summaryrefslogtreecommitdiff
path: root/tools/lib/python/kdoc/python_version.py
diff options
context:
space:
mode:
authorWangYuli <wangyuli@uniontech.com>2024-11-25 13:26:16 +0800
committerBenjamin Tissoires <bentiss@kernel.org>2024-11-28 14:40:42 +0100
commit59548215b76be98cf3422eea9a67d6ea578aca3d (patch)
tree861b613fb4f988dea2f8a283a16c82ae713f765f /tools/lib/python/kdoc/python_version.py
parent34851431ceca1bf457d85895bd38a4e7967e2055 (diff)
HID: wacom: fix when get product name maybe null pointer
Due to incorrect dev->product reporting by certain devices, null pointer dereferences occur when dev->product is empty, leading to potential system crashes. This issue was found on EXCELSIOR DL37-D05 device with Loongson-LS3A6000-7A2000-DL37 motherboard. Kernel logs: [ 56.470885] usb 4-3: new full-speed USB device number 4 using ohci-pci [ 56.671638] usb 4-3: string descriptor 0 read error: -22 [ 56.671644] usb 4-3: New USB device found, idVendor=056a, idProduct=0374, bcdDevice= 1.07 [ 56.671647] usb 4-3: New USB device strings: Mfr=1, Product=2, SerialNumber=3 [ 56.678839] hid-generic 0003:056A:0374.0004: hiddev0,hidraw3: USB HID v1.10 Device [HID 056a:0374] on usb-0000:00:05.0-3/input0 [ 56.697719] CPU 2 Unable to handle kernel paging request at virtual address 0000000000000000, era == 90000000066e35c8, ra == ffff800004f98a80 [ 56.697732] Oops[#1]: [ 56.697734] CPU: 2 PID: 2742 Comm: (udev-worker) Tainted: G OE 6.6.0-loong64-desktop #25.00.2000.015 [ 56.697737] Hardware name: Inspur CE520L2/C09901N000000000, BIOS 2.09.00 10/11/2024 [ 56.697739] pc 90000000066e35c8 ra ffff800004f98a80 tp 9000000125478000 sp 900000012547b8a0 [ 56.697741] a0 0000000000000000 a1 ffff800004818b28 a2 0000000000000000 a3 0000000000000000 [ 56.697743] a4 900000012547b8f0 a5 0000000000000000 a6 0000000000000000 a7 0000000000000000 [ 56.697745] t0 ffff800004818b2d t1 0000000000000000 t2 0000000000000003 t3 0000000000000005 [ 56.697747] t4 0000000000000000 t5 0000000000000000 t6 0000000000000000 t7 0000000000000000 [ 56.697748] t8 0000000000000000 u0 0000000000000000 s9 0000000000000000 s0 900000011aa48028 [ 56.697750] s1 0000000000000000 s2 0000000000000000 s3 ffff800004818e80 s4 ffff800004810000 [ 56.697751] s5 90000001000b98d0 s6 ffff800004811f88 s7 ffff800005470440 s8 0000000000000000 [ 56.697753] ra: ffff800004f98a80 wacom_update_name+0xe0/0x300 [wacom] [ 56.697802] ERA: 90000000066e35c8 strstr+0x28/0x120 [ 56.697806] CRMD: 000000b0 (PLV0 -IE -DA +PG DACF=CC DACM=CC -WE) [ 56.697816] PRMD: 0000000c (PPLV0 +PIE +PWE) [ 56.697821] EUEN: 00000000 (-FPE -SXE -ASXE -BTE) [ 56.697827] ECFG: 00071c1d (LIE=0,2-4,10-12 VS=7) [ 56.697831] ESTAT: 00010000 [PIL] (IS= ECode=1 EsubCode=0) [ 56.697835] BADV: 0000000000000000 [ 56.697836] PRID: 0014d000 (Loongson-64bit, Loongson-3A6000) [ 56.697838] Modules linked in: wacom(+) bnep bluetooth rfkill qrtr nls_iso8859_1 nls_cp437 snd_hda_codec_conexant snd_hda_codec_generic ledtrig_audio snd_hda_codec_hdmi snd_hda_intel snd_intel_dspcfg snd_hda_codec snd_hda_core snd_hwdep snd_pcm snd_timer snd soundcore input_leds mousedev led_class joydev deepin_netmonitor(OE) fuse nfnetlink dmi_sysfs ip_tables x_tables overlay amdgpu amdxcp drm_exec gpu_sched drm_buddy radeon drm_suballoc_helper i2c_algo_bit drm_ttm_helper r8169 ttm drm_display_helper spi_loongson_pci xhci_pci cec xhci_pci_renesas spi_loongson_core hid_generic realtek gpio_loongson_64bit [ 56.697887] Process (udev-worker) (pid: 2742, threadinfo=00000000aee0d8b4, task=00000000a9eff1f3) [ 56.697890] Stack : 0000000000000000 ffff800004817e00 0000000000000000 0000251c00000000 [ 56.697896] 0000000000000000 00000011fffffffd 0000000000000000 0000000000000000 [ 56.697901] 0000000000000000 1b67a968695184b9 0000000000000000 90000001000b98d0 [ 56.697906] 90000001000bb8d0 900000011aa48028 0000000000000000 ffff800004f9d74c [ 56.697911] 90000001000ba000 ffff800004f9ce58 0000000000000000 ffff800005470440 [ 56.697916] ffff800004811f88 90000001000b98d0 9000000100da2aa8 90000001000bb8d0 [ 56.697921] 0000000000000000 90000001000ba000 900000011aa48028 ffff800004f9d74c [ 56.697926] ffff8000054704e8 90000001000bb8b8 90000001000ba000 0000000000000000 [ 56.697931] 90000001000bb8d0 9000000006307564 9000000005e666e0 90000001752359b8 [ 56.697936] 9000000008cbe400 900000000804d000 9000000005e666e0 0000000000000000 [ 56.697941] ... [ 56.697944] Call Trace: [ 56.697945] [<90000000066e35c8>] strstr+0x28/0x120 [ 56.697950] [<ffff800004f98a80>] wacom_update_name+0xe0/0x300 [wacom] [ 56.698000] [<ffff800004f9ce58>] wacom_parse_and_register+0x338/0x900 [wacom] [ 56.698050] [<ffff800004f9d74c>] wacom_probe+0x32c/0x420 [wacom] [ 56.698099] [<9000000006307564>] hid_device_probe+0x144/0x260 [ 56.698103] [<9000000005e65d68>] really_probe+0x208/0x540 [ 56.698109] [<9000000005e661dc>] __driver_probe_device+0x13c/0x1e0 [ 56.698112] [<9000000005e66620>] driver_probe_device+0x40/0x100 [ 56.698116] [<9000000005e6680c>] __device_attach_driver+0x12c/0x180 [ 56.698119] [<9000000005e62bc8>] bus_for_each_drv+0x88/0x160 [ 56.698123] [<9000000005e66468>] __device_attach+0x108/0x260 [ 56.698126] [<9000000005e63918>] device_reprobe+0x78/0x100 [ 56.698129] [<9000000005e62a68>] bus_for_each_dev+0x88/0x160 [ 56.698132] [<9000000006304e54>] __hid_bus_driver_added+0x34/0x80 [ 56.698134] [<9000000005e62bc8>] bus_for_each_drv+0x88/0x160 [ 56.698137] [<9000000006304df0>] __hid_register_driver+0x70/0xa0 [ 56.698142] [<9000000004e10fe4>] do_one_initcall+0x104/0x320 [ 56.698146] [<9000000004f38150>] do_init_module+0x90/0x2c0 [ 56.698151] [<9000000004f3a3d8>] init_module_from_file+0xb8/0x120 [ 56.698155] [<9000000004f3a590>] idempotent_init_module+0x150/0x3a0 [ 56.698159] [<9000000004f3a890>] sys_finit_module+0xb0/0x140 [ 56.698163] [<900000000671e4e8>] do_syscall+0x88/0xc0 [ 56.698166] [<9000000004e12404>] handle_syscall+0xc4/0x160 [ 56.698171] Code: 0011958f 00150224 5800cd85 <2a00022c> 00150004 4000c180 0015022c 03400000 03400000 [ 56.698192] ---[ end trace 0000000000000000 ]--- Fixes: 09dc28acaec7 ("HID: wacom: Improve generic name generation") Reported-by: Zhenxing Chen <chenzhenxing@uniontech.com> Co-developed-by: Xu Rao <raoxu@uniontech.com> Signed-off-by: Xu Rao <raoxu@uniontech.com> Signed-off-by: WangYuli <wangyuli@uniontech.com> Link: https://patch.msgid.link/B31757FE8E1544CF+20241125052616.18261-1-wangyuli@uniontech.com Cc: stable@vger.kernel.org Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
Diffstat (limited to 'tools/lib/python/kdoc/python_version.py')
0 files changed, 0 insertions, 0 deletions
nfiniband/core/fmr_pool.c?h=for-next&id2=031277d4d33d33f0174fbb569ca8f68238175617'>drivers/infiniband/core/fmr_pool.c544
-rw-r--r--drivers/infiniband/core/ib_core_uverbs.c367
-rw-r--r--drivers/infiniband/core/iwcm.c401
-rw-r--r--drivers/infiniband/core/iwcm.h4
-rw-r--r--drivers/infiniband/core/iwpm_msg.c323
-rw-r--r--drivers/infiniband/core/iwpm_util.c238
-rw-r--r--drivers/infiniband/core/iwpm_util.h42
-rw-r--r--drivers/infiniband/core/lag.c135
-rw-r--r--drivers/infiniband/core/mad.c1468
-rw-r--r--drivers/infiniband/core/mad_priv.h102
-rw-r--r--drivers/infiniband/core/mad_rmpp.c99
-rw-r--r--drivers/infiniband/core/mr_pool.c82
-rw-r--r--drivers/infiniband/core/multicast.c142
-rw-r--r--drivers/infiniband/core/netlink.c375
-rw-r--r--drivers/infiniband/core/nldev.c2920
-rw-r--r--drivers/infiniband/core/opa_smi.h8
-rw-r--r--drivers/infiniband/core/packer.c14
-rw-r--r--drivers/infiniband/core/rdma_core.c1050
-rw-r--r--drivers/infiniband/core/rdma_core.h192
-rw-r--r--drivers/infiniband/core/restrack.c314
-rw-r--r--drivers/infiniband/core/restrack.h36
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c596
-rw-r--r--drivers/infiniband/core/rw.c734
-rw-r--r--drivers/infiniband/core/sa.h8
-rw-r--r--drivers/infiniband/core/sa_query.c1675
-rw-r--r--drivers/infiniband/core/security.c750
-rw-r--r--drivers/infiniband/core/smi.c12
-rw-r--r--drivers/infiniband/core/smi.h8
-rw-r--r--drivers/infiniband/core/sysfs.c1542
-rw-r--r--drivers/infiniband/core/trace.c12
-rw-r--r--drivers/infiniband/core/ucaps.c267
-rw-r--r--drivers/infiniband/core/ucm.c1368
-rw-r--r--drivers/infiniband/core/ucma.c1120
-rw-r--r--drivers/infiniband/core/ud_header.c244
-rw-r--r--drivers/infiniband/core/umem.c385
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c275
-rw-r--r--drivers/infiniband/core/umem_odp.c884
-rw-r--r--drivers/infiniband/core/umem_rbtree.c94
-rw-r--r--drivers/infiniband/core/user_mad.c658
-rw-r--r--drivers/infiniband/core/uverbs.h259
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4609
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c849
-rw-r--r--drivers/infiniband/core/uverbs_main.c1546
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c127
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c269
-rw-r--r--drivers/infiniband/core/uverbs_std_types_async_fd.c79
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c163
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c297
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c508
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dm.c116
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dmah.c145
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c (renamed from drivers/infiniband/hw/cxgb3/iwch_user.h)66
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c553
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c380
-rw-r--r--drivers/infiniband/core/uverbs_std_types_srq.c234
-rw-r--r--drivers/infiniband/core/uverbs_std_types_wq.c194
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c735
-rw-r--r--drivers/infiniband/core/verbs.c2843
-rw-r--r--drivers/infiniband/hw/Makefile14
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig9
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile8
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h290
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c396
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h36
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c433
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h165
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c4941
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h296
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c2613
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3225
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h688
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c1202
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h294
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c955
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h626
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c1145
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h373
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_tlv.h162
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h4729
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig27
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_dbg.c207
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1343
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h211
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c343
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.h69
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h802
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c292
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h180
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2272
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h233
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c233
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c232
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c203
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1508
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h360
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1163
-rw-r--r--drivers/infiniband/hw/cxgb3/tcb.h632
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig7
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2521
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c575
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c762
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c76
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h492
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c760
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c609
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1757
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c159
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c487
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h250
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h205
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h80
-rw-r--r--drivers/infiniband/hw/efa/Kconfig15
-rw-r--r--drivers/infiniband/hw/efa/Makefile9
-rw-r--r--drivers/infiniband/hw/efa/efa.h198
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h1140
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h175
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c1255
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h180
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c838
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h353
-rw-r--r--drivers/infiniband/hw/efa/efa_common_defs.h29
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h391
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c706
-rw-r--r--drivers/infiniband/hw/efa/efa_regs_defs.h101
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c2350
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig12
-rw-r--r--drivers/infiniband/hw/erdma/Makefile4
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h283
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c1431
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.h167
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c452
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c268
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c326
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h753
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c684
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c757
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c2300
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h491
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig23
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile68
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c1170
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h88
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.c270
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h35
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c15484
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h1428
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h1295
-rw-r--r--drivers/infiniband/hw/hfi1/common.h304
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c1335
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h65
-rw-r--r--drivers/infiniband/hw/hfi1/device.c132
-rw-r--r--drivers/infiniband/hw/hfi1/device.h19
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c1906
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c138
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.h16
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c450
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.h10
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c78
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h171
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c319
-rw-r--r--drivers/infiniband/hw/hfi1/fault.h69
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c1714
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c2253
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2632
-rw-r--r--drivers/infiniband/hw/hfi1/init.c1978
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c190
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c128
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h457
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h171
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c250
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_rx.c92
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c868
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4896
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h436
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c312
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h67
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c348
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h24
-rw-r--r--drivers/infiniband/hw/hfi1/netdev.h105
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c487
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h86
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.c323
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.h87
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c1382
-rw-r--r--drivers/infiniband/hw/hfi1/pin_system.c474
-rw-r--r--drivers/infiniband/hw/hfi1/pinning.h20
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c2135
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h292
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c715
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c1035
-rw-r--r--drivers/infiniband/hw/hfi1/platform.h371
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c925
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h107
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c796
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h202
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c3244
-rw-r--r--drivers/infiniband/hw/hfi1/rc.h59
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c575
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c3364
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1055
-rw-r--r--drivers/infiniband/hw/hfi1/sdma_txreq.h97
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c697
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5534
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.h319
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c532
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h24
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h115
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h118
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h455
-rw-r--r--drivers/infiniband/hw/hfi1/trace_iowait.h54
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h108
-rw-r--r--drivers/infiniband/hw/hfi1/trace_mmu.h72
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rc.h125
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h112
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h1642
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h1065
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c (renamed from drivers/infiniband/hw/qib/qib_uc.c)356
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c1023
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c966
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h66
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c103
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c1224
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h201
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c1948
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h487
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c99
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h79
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h126
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c615
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c282
-rw-r--r--drivers/infiniband/hw/hns/Kconfig11
-rw-r--r--drivers/infiniband/hw/hns/Makefile14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c142
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c187
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c291
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h158
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h199
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c542
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c180
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.c111
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.h33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1309
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c1467
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h120
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c7274
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1481
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c1180
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c1104
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c174
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c1609
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c234
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c550
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/ionic/Kconfig15
-rw-r--r--drivers/infiniband/hw/ionic/Makefile9
-rw-r--r--drivers/infiniband/hw/ionic/ionic_admin.c1229
-rw-r--r--drivers/infiniband/hw/ionic/ionic_controlpath.c2679
-rw-r--r--drivers/infiniband/hw/ionic/ionic_datapath.c1399
-rw-r--r--drivers/infiniband/hw/ionic/ionic_fw.h1029
-rw-r--r--drivers/infiniband/hw/ionic/ionic_hw_stats.c484
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.c440
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.h517
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.c111
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.h66
-rw-r--r--drivers/infiniband/hw/ionic/ionic_pgtbl.c143
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.c52
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.h234
-rw-r--r--drivers/infiniband/hw/ionic/ionic_res.h154
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig14
-rw-r--r--drivers/infiniband/hw/irdma/Makefile31
-rw-r--r--drivers/infiniband/hw/irdma/cm.c4434
-rw-r--r--drivers/infiniband/hw/irdma/cm.h416
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c6697
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1184
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c709
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h186
-rw-r--r--drivers/infiniband/hw/irdma/hw.c2826
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c261
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h162
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c220
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c205
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h73
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_if.c343
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.c170
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.h32
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_if.c232
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h174
-rw-r--r--drivers/infiniband/hw/irdma/main.c211
-rw-r--r--drivers/infiniband/hw/irdma/main.h579
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h74
-rw-r--r--drivers/infiniband/hw/irdma/pble.c517
-rw-r--r--drivers/infiniband/hw/irdma/pble.h132
-rw-r--r--drivers/infiniband/hw/irdma/protos.h97
-rw-r--r--drivers/infiniband/hw/irdma/puda.c1734
-rw-r--r--drivers/infiniband/hw/irdma/puda.h182
-rw-r--r--drivers/infiniband/hw/irdma/trace.c112
-rw-r--r--drivers/infiniband/hw/irdma/trace.h3
-rw-r--r--drivers/infiniband/hw/irdma/trace_cm.h460
-rw-r--r--drivers/infiniband/hw/irdma/type.h1679
-rw-r--r--drivers/infiniband/hw/irdma/uda.c265
-rw-r--r--drivers/infiniband/hw/irdma/uda.h87
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h127
-rw-r--r--drivers/infiniband/hw/irdma/uk.c1925
-rw-r--r--drivers/infiniband/hw/irdma/user.h674
-rw-r--r--drivers/infiniband/hw/irdma/utils.c2528
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c5505
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h340
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.c618
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.h176
-rw-r--r--drivers/infiniband/hw/irdma/ws.c406
-rw-r--r--drivers/infiniband/hw/irdma/ws.h41
-rw-r--r--drivers/infiniband/hw/mana/Kconfig10
-rw-r--r--drivers/infiniband/hw/mana/Makefile4
-rw-r--r--drivers/infiniband/hw/mana/ah.c58
-rw-r--r--drivers/infiniband/hw/mana/counters.c179
-rw-r--r--drivers/infiniband/hw/mana/counters.h62
-rw-r--r--drivers/infiniband/hw/mana/cq.c342
-rw-r--r--drivers/infiniband/hw/mana/device.c261
-rw-r--r--drivers/infiniband/hw/mana/main.c1134
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h738
-rw-r--r--drivers/infiniband/hw/mana/mr.c319
-rw-r--r--drivers/infiniband/hw/mana/qp.c921
-rw-r--r--drivers/infiniband/hw/mana/shadow_queue.h115
-rw-r--r--drivers/infiniband/hw/mana/wq.c92
-rw-r--r--drivers/infiniband/hw/mana/wr.c168
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig3
-rw-r--r--drivers/infiniband/hw/mlx4/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c240
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c44
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c252
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c204
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c696
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1888
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c149
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h309
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c391
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2674
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c117
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c91
-rw-r--r--drivers/infiniband/hw/mlx4/user.h107
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig5
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile33
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c130
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c268
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h63
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c491
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c1279
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h19
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c896
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c227
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.h23
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c3228
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h50
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c612
-rw-r--r--drivers/infiniband/hw/mlx5/dm.h68
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.c54
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.h23
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c15
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c3501
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h36
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c492
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c276
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h40
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c224
-rw-r--r--drivers/infiniband/hw/mlx5/macsec.c364
-rw-r--r--drivers/infiniband/hw/mlx5/macsec.h29
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c433
-rw-r--r--drivers/infiniband/hw/mlx5/main.c5213
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c226
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1806
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3350
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2163
-rw-r--r--drivers/infiniband/hw/mlx5/qos.c133
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c6616
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h60
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c697
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c217
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.h13
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c340
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h69
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c774
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c277
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c1129
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h113
-rw-r--r--drivers/infiniband/hw/mlx5/user.h133
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c1284
-rw-r--r--drivers/infiniband/hw/mlx5/wr.h136
-rw-r--r--drivers/infiniband/hw/mthca/Kconfig5
-rw-r--r--drivers/infiniband/hw/mthca/Makefile1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c31
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c94
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c44
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h70
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c43
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c106
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c66
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c53
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c291
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c923
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c338
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c46
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c58
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h112
-rw-r--r--drivers/infiniband/hw/nes/Kconfig16
-rw-r--r--drivers/infiniband/hw/nes/Makefile3
-rw-r--r--drivers/infiniband/hw/nes/nes.c1270
-rw-r--r--drivers/infiniband/hw/nes/nes.h582
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4192
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h478
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h193
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c3937
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1392
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c1160
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.h97
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1883
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h114
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c972
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4090
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h189
-rw-r--r--drivers/infiniband/hw/ocrdma/Kconfig3
-rw-r--r--drivers/infiniband/hw/ocrdma/Makefile3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h34
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h149
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c204
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h23
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c281
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c394
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h84
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c136
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c996
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h83
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig11
-rw-r--r--drivers/infiniband/hw/qedr/Makefile4
-rw-r--r--drivers/infiniband/hw/qedr/main.c1056
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h642
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h751
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c819
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.h (renamed from drivers/infiniband/hw/qib/qib_debugfs.h)32
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c729
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.h (renamed from drivers/infiniband/hw/qib/qib_pio_copy.c)57
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4499
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h103
-rw-r--r--drivers/infiniband/hw/qib/Kconfig15
-rw-r--r--drivers/infiniband/hw/qib/Makefile16
-rw-r--r--drivers/infiniband/hw/qib/qib.h1543
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h149
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h812
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c545
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c283
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c916
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c169
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c820
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c276
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2418
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c621
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3600
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4657
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8574
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1847
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c240
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c391
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2542
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h300
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c537
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c719
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1376
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c559
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h189
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2290
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c820
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1454
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c1039
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c380
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c818
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c501
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c572
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c590
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c157
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1465
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2367
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1180
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c368
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c62
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c150
-rw-r--r--drivers/infiniband/hw/usnic/Kconfig5
-rw-r--r--drivers/infiniband/hw/usnic/Makefile3
-rw-r--r--drivers/infiniband/hw/usnic/usnic.h21
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h23
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h22
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_util.h59
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c50
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.h21
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c43
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h30
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h27
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c364
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c96
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h56
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c240
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.h23
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c417
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h72
-rw-r--r--drivers/infiniband/hw/usnic/usnic_log.h21
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c33
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.h21
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c170
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h36
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c39
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h33
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.c96
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.h21
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Kconfig8
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Makefile4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h555
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c119
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c407
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h685
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c127
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c1160
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c306
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c332
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c1046
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h114
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c303
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c529
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h404
-rw-r--r--drivers/infiniband/sw/Makefile4
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig8
-rw-r--r--drivers/infiniband/sw/rdmavt/Makefile14
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c139
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h17
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c536
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h20
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c130
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.h18
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c401
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.h16
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c169
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.h19
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c922
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h37
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c64
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h14
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3218
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h30
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c172
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c306
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.h19
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h14
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_cq.h124
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h182
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h96
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rc.h67
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rvt.h39
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h163
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c619
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h62
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig27
-rw-r--r--drivers/infiniband/sw/rxe/Makefile27
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c280
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h124
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c173
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c845
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c130
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h977
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c51
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.h37
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c137
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h248
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c497
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c148
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c730
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c338
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c693
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c578
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c975
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h111
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h153
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c255
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h82
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c876
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c201
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h284
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c359
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c848
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c1709
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c199
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c299
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h58
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c1565
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h519
-rw-r--r--drivers/infiniband/sw/siw/Kconfig20
-rw-r--r--drivers/infiniband/sw/siw/Makefile11
-rw-r--r--drivers/infiniband/sw/siw/iwarp.h367
-rw-r--r--drivers/infiniband/sw/siw/siw.h729
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c1958
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.h133
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c122
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c508
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c400
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h69
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c1318
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c1455
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c1306
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c1891
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h91
-rw-r--r--drivers/infiniband/ulp/Makefile3
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig11
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h172
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c421
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c172
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c74
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c828
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1687
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c318
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c78
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c132
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c257
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig7
-rw-r--r--drivers/infiniband/ulp/iser/Makefile1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c281
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h387
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c553
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c852
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c988
-rw-r--r--drivers/infiniband/ulp/isert/Kconfig3
-rw-r--r--drivers/infiniband/ulp/isert/Makefile2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2243
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h203
-rw-r--r--drivers/infiniband/ulp/isert/isert_proto.h47
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Kconfig9
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Makefile9
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c513
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h524
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c183
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h329
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c400
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c1056
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c390
-rw-r--r--drivers/infiniband/ulp/rtrs/Kconfig27
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile21
-rw-r--r--drivers/infiniband/ulp/rtrs/README213
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c198
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c514
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c15
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h86
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c3207
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h252
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-log.h28
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h408
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c51
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c319
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h88
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2346
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h156
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c645
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h188
-rw-r--r--drivers/infiniband/ulp/srp/Kbuild1
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2499
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h136
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig5
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4016
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h330
705 files changed, 333610 insertions, 123174 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index aa26f3c3416b..f0323f1d6f01 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,11 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
menuconfig INFINIBAND
tristate "InfiniBand support"
- depends on PCI || BROKEN
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
depends on NET
depends on INET
depends on m || IPV6 != m
- ---help---
+ depends on !ALPHA
+ select IRQ_POLL
+ select DIMLIB
+ help
Core support for InfiniBand (IB). Make sure to also select
any protocols you wish to use as well as drivers for your
InfiniBand hardware.
@@ -15,54 +18,88 @@ if INFINIBAND
config INFINIBAND_USER_MAD
tristate "InfiniBand userspace MAD support"
depends on INFINIBAND
- ---help---
+ help
Userspace InfiniBand Management Datagram (MAD) support. This
is the kernel side of the userspace MAD support, which allows
userspace processes to send and receive MADs. You will also
- need libibumad from <http://www.openfabrics.org/downloads/management/>.
+ need libibumad from rdma-core
+ <https://github.com/linux-rdma/rdma-core>.
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
- select ANON_INODES
- ---help---
+ depends on MMU
+ help
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
communication manager (CM). This allows userspace processes
to set up connections and directly access InfiniBand
hardware for fast-path operations. You will also need
libibverbs, libibcm and a hardware driver library from
- <http://www.openfabrics.org/git/>.
+ rdma-core <https://github.com/linux-rdma/rdma-core>.
config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
+ depends on MMU
+ select DMA_SHARED_BUFFER
default y
config INFINIBAND_ON_DEMAND_PAGING
bool "InfiniBand on-demand paging support"
depends on INFINIBAND_USER_MEM
select MMU_NOTIFIER
+ select INTERVAL_TREE
+ select HMM_MIRROR
default y
- ---help---
+ help
On demand paging support for the InfiniBand subsystem.
Together with driver support this allows registration of
memory regions without pinning their pages, fetching the
pages on demand instead.
config INFINIBAND_ADDR_TRANS
- bool
+ bool "RDMA/CM"
depends on INFINIBAND
default y
+ help
+ Support for RDMA communication manager (CM).
+ This allows for a generic connection abstraction over RDMA.
-source "drivers/infiniband/hw/mthca/Kconfig"
-source "drivers/infiniband/hw/qib/Kconfig"
-source "drivers/infiniband/hw/cxgb3/Kconfig"
+config INFINIBAND_ADDR_TRANS_CONFIGFS
+ bool
+ depends on INFINIBAND_ADDR_TRANS && CONFIGFS_FS && !(INFINIBAND=y && CONFIGFS_FS=m)
+ default y
+ help
+ ConfigFS support for RDMA communication manager (CM).
+ This allows the user to config the default GID type that the CM
+ uses for each device, when initiaing new connections.
+
+config INFINIBAND_VIRT_DMA
+ def_bool !HIGHMEM
+
+if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
+if !UML
+source "drivers/infiniband/hw/bnxt_re/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
+source "drivers/infiniband/hw/efa/Kconfig"
+source "drivers/infiniband/hw/erdma/Kconfig"
+source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/hns/Kconfig"
+source "drivers/infiniband/hw/ionic/Kconfig"
+source "drivers/infiniband/hw/irdma/Kconfig"
+source "drivers/infiniband/hw/mana/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
-source "drivers/infiniband/hw/nes/Kconfig"
+source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
+source "drivers/infiniband/hw/qedr/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
+source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
+source "drivers/infiniband/sw/rdmavt/Kconfig"
+endif # !UML
+source "drivers/infiniband/sw/rxe/Kconfig"
+source "drivers/infiniband/sw/siw/Kconfig"
+endif
source "drivers/infiniband/ulp/ipoib/Kconfig"
@@ -71,5 +108,8 @@ source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
+source "drivers/infiniband/ulp/rtrs/Kconfig"
+
+source "drivers/infiniband/ulp/opa_vnic/Kconfig"
endif # INFINIBAND
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index dc21836b5a8d..8603cdfcfdcb 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND) += hw/
obj-$(CONFIG_INFINIBAND) += ulp/
+obj-$(CONFIG_INFINIBAND) += sw/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d43a8994ac5c..f483e0c12444 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,35 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
-obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
- ib_cm.o iw_cm.o ib_addr.o \
+obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \
$(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
- $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
-ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
- device.o fmr_pool.o cache.o netlink.o \
- roce_gid_mgmt.o
-ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
-ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
+ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
+ device.o cache.o netlink.o \
+ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
+ multicast.o mad.o smi.o agent.o mad_rmpp.o \
+ nldev.o restrack.o counters.o ib_core_uverbs.o \
+ trace.o lag.o
-ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
+ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
+ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
-ib_sa-y := sa_query.o multicast.o
-
-ib_cm-y := cm.o
+ib_cm-y := cm.o cm_trace.o
iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
-rdma_cm-y := cma.o
+CFLAGS_cma_trace.o += -I$(src)
+rdma_cm-y := cma.o cma_trace.o
-rdma_ucm-y := ucma.o
+rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o
-ib_addr-y := addr.o
+rdma_ucm-y := ucma.o
ib_umad-y := user_mad.o
-ib_ucm-y := ucm.o
-
-ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o
+ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
+ rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
+ uverbs_std_types_cq.o \
+ uverbs_std_types_dmah.o \
+ uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
+ uverbs_std_types_mr.o uverbs_std_types_counters.o \
+ uverbs_uapi.o uverbs_std_types_device.o \
+ uverbs_std_types_async_fd.o \
+ uverbs_std_types_srq.o \
+ uverbs_std_types_wq.o \
+ uverbs_std_types_qp.o \
+ ucaps.o
+ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o umem_dmabuf.o
+ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 746cdf56bc76..61596cda2b65 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -37,41 +37,162 @@
#include <linux/inetdevice.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <linux/module.h>
#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
#include <net/netevent.h>
-#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_sa.h>
#include <rdma/ib.h>
+#include <rdma/rdma_netlink.h>
+#include <net/netlink.h>
-MODULE_AUTHOR("Sean Hefty");
-MODULE_DESCRIPTION("IB Address Translation");
-MODULE_LICENSE("Dual BSD/GPL");
+#include "core_priv.h"
struct addr_req {
struct list_head list;
struct sockaddr_storage src_addr;
struct sockaddr_storage dst_addr;
struct rdma_dev_addr *addr;
- struct rdma_addr_client *client;
void *context;
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
+ struct delayed_work work;
+ bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */
int status;
+ u32 seq;
};
-static void process_req(struct work_struct *work);
+static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
-static DEFINE_MUTEX(lock);
+static DEFINE_SPINLOCK(lock);
static LIST_HEAD(req_list);
-static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
-int rdma_addr_size(struct sockaddr *addr)
+static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
+ [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
+ .len = sizeof(struct rdma_nla_ls_gid),
+ .validation_type = NLA_VALIDATE_MIN,
+ .min = sizeof(struct rdma_nla_ls_gid)},
+};
+
+static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
+{
+ struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
+ int ret;
+
+ if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+ return false;
+
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_addr_policy, NULL);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
+{
+ const struct nlattr *head, *curr;
+ union ib_gid gid;
+ struct addr_req *req;
+ int len, rem;
+ int found = 0;
+
+ head = (const struct nlattr *)nlmsg_data(nlh);
+ len = nlmsg_len(nlh);
+
+ nla_for_each_attr(curr, head, len, rem) {
+ if (curr->nla_type == LS_NLA_TYPE_DGID)
+ memcpy(&gid, nla_data(curr), nla_len(curr));
+ }
+
+ spin_lock_bh(&lock);
+ list_for_each_entry(req, &req_list, list) {
+ if (nlh->nlmsg_seq != req->seq)
+ continue;
+ /* We set the DGID part, the rest was set earlier */
+ rdma_addr_set_dgid(req->addr, &gid);
+ req->status = 0;
+ found = 1;
+ break;
+ }
+ spin_unlock_bh(&lock);
+
+ if (!found)
+ pr_info("Couldn't find request waiting for DGID: %pI6\n",
+ &gid);
+}
+
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
+ !(NETLINK_CB(skb).sk))
+ return -EPERM;
+
+ if (ib_nl_is_good_ip_resp(nlh))
+ ib_nl_process_good_ip_rsep(nlh);
+
+ return 0;
+}
+
+static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
+ const void *daddr,
+ u32 seq, u16 family)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ struct rdma_ls_ip_resolve_header *header;
+ void *data;
+ size_t size;
+ int attrtype;
+ int len;
+
+ if (family == AF_INET) {
+ size = sizeof(struct in_addr);
+ attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4;
+ } else {
+ size = sizeof(struct in6_addr);
+ attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6;
+ }
+
+ len = nla_total_size(sizeof(size));
+ len += NLMSG_ALIGN(sizeof(*header));
+
+ skb = nlmsg_new(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
+ RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST);
+ if (!data) {
+ nlmsg_free(skb);
+ return -ENODATA;
+ }
+
+ /* Construct the family header first */
+ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+ header->ifindex = dev_addr->bound_dev_if;
+ nla_put(skb, attrtype, size, daddr);
+
+ /* Repair the nlmsg header length */
+ nlmsg_end(skb, nlh);
+ rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
+
+ /* Make the request retry, so when we get the response from userspace
+ * we will have something.
+ */
+ return -ENODATA;
+}
+
+int rdma_addr_size(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
@@ -86,92 +207,95 @@ int rdma_addr_size(struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_addr_size);
-static struct rdma_addr_client self;
-
-void rdma_addr_register_client(struct rdma_addr_client *client)
+int rdma_addr_size_in6(struct sockaddr_in6 *addr)
{
- atomic_set(&client->refcount, 1);
- init_completion(&client->comp);
-}
-EXPORT_SYMBOL(rdma_addr_register_client);
+ int ret = rdma_addr_size((struct sockaddr *) addr);
-static inline void put_client(struct rdma_addr_client *client)
-{
- if (atomic_dec_and_test(&client->refcount))
- complete(&client->comp);
+ return ret <= sizeof(*addr) ? ret : 0;
}
+EXPORT_SYMBOL(rdma_addr_size_in6);
-void rdma_addr_unregister_client(struct rdma_addr_client *client)
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
{
- put_client(client);
- wait_for_completion(&client->comp);
+ int ret = rdma_addr_size((struct sockaddr *) addr);
+
+ return ret <= sizeof(*addr) ? ret : 0;
}
-EXPORT_SYMBOL(rdma_addr_unregister_client);
+EXPORT_SYMBOL(rdma_addr_size_kss);
-int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
- const unsigned char *dst_dev_addr)
+/**
+ * rdma_copy_src_l2_addr - Copy netdevice source addresses
+ * @dev_addr: Destination address pointer where to copy the addresses
+ * @dev: Netdevice whose source addresses to copy
+ *
+ * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice.
+ * This includes unicast address, broadcast address, device type and
+ * interface index.
+ */
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev)
{
dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
- if (dst_dev_addr)
- memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
dev_addr->bound_dev_if = dev->ifindex;
- return 0;
}
-EXPORT_SYMBOL(rdma_copy_addr);
+EXPORT_SYMBOL(rdma_copy_src_l2_addr);
-int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
- u16 *vlan_id)
+static struct net_device *
+rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
{
- struct net_device *dev;
+ struct net_device *dev = NULL;
int ret = -EADDRNOTAVAIL;
- if (dev_addr->bound_dev_if) {
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (!dev)
- return -ENODEV;
- ret = rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
- return ret;
- }
-
- switch (addr->sa_family) {
+ switch (src_in->sa_family) {
case AF_INET:
- dev = ip_dev_find(&init_net,
- ((struct sockaddr_in *) addr)->sin_addr.s_addr);
-
- if (!dev)
- return ret;
-
- ret = rdma_copy_addr(dev_addr, dev, NULL);
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
- dev_put(dev);
+ dev = __ip_dev_find(net,
+ ((const struct sockaddr_in *)src_in)->sin_addr.s_addr,
+ false);
+ if (dev)
+ ret = 0;
break;
-
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, dev) {
- if (ipv6_chk_addr(&init_net,
- &((struct sockaddr_in6 *) addr)->sin6_addr,
+ for_each_netdev_rcu(net, dev) {
+ if (ipv6_chk_addr(net,
+ &((const struct sockaddr_in6 *)src_in)->sin6_addr,
dev, 1)) {
- ret = rdma_copy_addr(dev_addr, dev, NULL);
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
+ ret = 0;
break;
}
}
- rcu_read_unlock();
break;
#endif
}
- return ret;
+ return ret ? ERR_PTR(ret) : dev;
+}
+
+int rdma_translate_ip(const struct sockaddr *addr,
+ struct rdma_dev_addr *dev_addr)
+{
+ struct net_device *dev;
+
+ if (dev_addr->bound_dev_if) {
+ dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ dev_put(dev);
+ return 0;
+ }
+
+ rcu_read_lock();
+ dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr);
+ if (!IS_ERR(dev))
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ rcu_read_unlock();
+ return PTR_ERR_OR_ZERO(dev);
}
EXPORT_SYMBOL(rdma_translate_ip);
-static void set_timeout(unsigned long time)
+static void set_timeout(struct addr_req *req, unsigned long time)
{
unsigned long delay;
@@ -179,53 +303,87 @@ static void set_timeout(unsigned long time)
if ((long)delay < 0)
delay = 0;
- mod_delayed_work(addr_wq, &work, delay);
+ mod_delayed_work(addr_wq, &req->work, delay);
}
static void queue_req(struct addr_req *req)
{
- struct addr_req *temp_req;
-
- mutex_lock(&lock);
- list_for_each_entry_reverse(temp_req, &req_list, list) {
- if (time_after_eq(req->timeout, temp_req->timeout))
- break;
- }
+ spin_lock_bh(&lock);
+ list_add_tail(&req->list, &req_list);
+ set_timeout(req, req->timeout);
+ spin_unlock_bh(&lock);
+}
- list_add(&req->list, &temp_req->list);
+static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr,
+ const void *daddr, u32 seq, u16 family)
+{
+ if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
+ return -EADDRNOTAVAIL;
- if (req_list.next == &req->list)
- set_timeout(req->timeout);
- mutex_unlock(&lock);
+ return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
}
-static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, void *daddr)
+static int dst_fetch_ha(const struct dst_entry *dst,
+ struct rdma_dev_addr *dev_addr,
+ const void *daddr)
{
struct neighbour *n;
- int ret;
+ int ret = 0;
n = dst_neigh_lookup(dst, daddr);
+ if (!n)
+ return -ENODATA;
- rcu_read_lock();
- if (!n || !(n->nud_state & NUD_VALID)) {
- if (n)
- neigh_event_send(n, NULL);
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
- ret = rdma_copy_addr(dev_addr, dst->dev, n->ha);
+ neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev);
}
- rcu_read_unlock();
- if (n)
- neigh_release(n);
+ neigh_release(n);
return ret;
}
-static int addr4_resolve(struct sockaddr_in *src_in,
- struct sockaddr_in *dst_in,
- struct rdma_dev_addr *addr)
+static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
+{
+ if (family == AF_INET)
+ return dst_rtable(dst)->rt_uses_gateway;
+
+ return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY;
+}
+
+static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+ const struct sockaddr *dst_in, u32 seq)
{
+ const struct sockaddr_in *dst_in4 =
+ (const struct sockaddr_in *)dst_in;
+ const struct sockaddr_in6 *dst_in6 =
+ (const struct sockaddr_in6 *)dst_in;
+ const void *daddr = (dst_in->sa_family == AF_INET) ?
+ (const void *)&dst_in4->sin_addr.s_addr :
+ (const void *)&dst_in6->sin6_addr;
+ sa_family_t family = dst_in->sa_family;
+
+ might_sleep();
+
+ /* If we have a gateway in IB mode then it must be an IB network */
+ if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
+ return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
+ else
+ return dst_fetch_ha(dst, dev_addr, daddr);
+}
+
+static int addr4_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
+ struct rdma_dev_addr *addr,
+ struct rtable **prt)
+{
+ struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock;
+ const struct sockaddr_in *dst_in =
+ (const struct sockaddr_in *)dst_sock;
+
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
@@ -236,144 +394,266 @@ static int addr4_resolve(struct sockaddr_in *src_in,
fl4.daddr = dst_ip;
fl4.saddr = src_ip;
fl4.flowi4_oif = addr->bound_dev_if;
- rt = ip_route_output_key(&init_net, &fl4);
- if (IS_ERR(rt)) {
- ret = PTR_ERR(rt);
- goto out;
- }
- src_in->sin_family = AF_INET;
- src_in->sin_addr.s_addr = fl4.saddr;
+ rt = ip_route_output_key(addr->net, &fl4);
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
- if (rt->dst.dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
- if (!ret)
- memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- goto put;
- }
+ src_in->sin_addr.s_addr = fl4.saddr;
- /* If the device does ARP internally, return 'done' */
- if (rt->dst.dev->flags & IFF_NOARP) {
- ret = rdma_copy_addr(addr, rt->dst.dev, NULL);
- goto put;
- }
+ addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
- ret = dst_fetch_ha(&rt->dst, addr, &fl4.daddr);
-put:
- ip_rt_put(rt);
-out:
- return ret;
+ *prt = rt;
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
+ struct rdma_dev_addr *addr,
+ struct dst_entry **pdst)
{
+ struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock;
+ const struct sockaddr_in6 *dst_in =
+ (const struct sockaddr_in6 *)dst_sock;
struct flowi6 fl6;
struct dst_entry *dst;
- int ret;
memset(&fl6, 0, sizeof fl6);
fl6.daddr = dst_in->sin6_addr;
fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if ((ret = dst->error))
- goto put;
+ dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
- if (ipv6_addr_any(&fl6.saddr)) {
- ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
- &fl6.daddr, 0, &fl6.saddr);
- if (ret)
- goto put;
-
- src_in->sin6_family = AF_INET6;
+ if (ipv6_addr_any(&src_in->sin6_addr))
src_in->sin6_addr = fl6.saddr;
- }
- if (dst->dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
- if (!ret)
- memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- goto put;
- }
-
- /* If the device does ARP internally, return 'done' */
- if (dst->dev->flags & IFF_NOARP) {
- ret = rdma_copy_addr(addr, dst->dev, NULL);
- goto put;
- }
+ addr->hoplimit = ip6_dst_hoplimit(dst);
- ret = dst_fetch_ha(dst, addr, &fl6.daddr);
-put:
- dst_release(dst);
- return ret;
+ *pdst = dst;
+ return 0;
}
#else
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
+ struct rdma_dev_addr *addr,
+ struct dst_entry **pdst)
{
return -EADDRNOTAVAIL;
}
#endif
-static int addr_resolve(struct sockaddr *src_in,
- struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
+static bool is_dst_local(const struct dst_entry *dst)
{
- if (src_in->sa_family == AF_INET) {
- return addr4_resolve((struct sockaddr_in *) src_in,
- (struct sockaddr_in *) dst_in, addr);
- } else
- return addr6_resolve((struct sockaddr_in6 *) src_in,
- (struct sockaddr_in6 *) dst_in, addr);
+ if (dst->ops->family == AF_INET)
+ return !!(dst_rtable(dst)->rt_type & RTN_LOCAL);
+ else if (dst->ops->family == AF_INET6)
+ return !!(dst_rt6_info(dst)->rt6i_flags & RTF_LOCAL);
+ else
+ return false;
}
-static void process_req(struct work_struct *work)
+static int addr_resolve_neigh(const struct dst_entry *dst,
+ const struct sockaddr *dst_in,
+ struct rdma_dev_addr *addr,
+ u32 seq)
{
- struct addr_req *req, *temp_req;
- struct sockaddr *src_in, *dst_in;
- struct list_head done_list;
+ if (is_dst_local(dst)) {
+ /* When the destination is local entry, source and destination
+ * are same. Skip the neighbour lookup.
+ */
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ return 0;
+ }
- INIT_LIST_HEAD(&done_list);
+ return fetch_ha(dst, addr, dst_in, seq);
+}
- mutex_lock(&lock);
- list_for_each_entry_safe(req, temp_req, &req_list, list) {
- if (req->status == -ENODATA) {
- src_in = (struct sockaddr *) &req->src_addr;
- dst_in = (struct sockaddr *) &req->dst_addr;
- req->status = addr_resolve(src_in, dst_in, req->addr);
- if (req->status && time_after_eq(jiffies, req->timeout))
- req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA)
- continue;
+static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
+ const struct sockaddr *dst_in,
+ const struct dst_entry *dst)
+{
+ struct net_device *ndev = READ_ONCE(dst->dev);
+
+ /* A physical device must be the RDMA device to use */
+ if (is_dst_local(dst)) {
+ int ret;
+ /*
+ * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
+ * loopback IP address. So if route is resolved to loopback
+ * interface, translate that to a real ndev based on non
+ * loopback IP address.
+ */
+ ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
+ if (IS_ERR(ndev))
+ return -ENODEV;
+ ret = rdma_translate_ip(dst_in, dev_addr);
+ if (ret)
+ return ret;
+ } else {
+ rdma_copy_src_l2_addr(dev_addr, dst->dev);
+ }
+
+ /*
+ * If there's a gateway and type of device not ARPHRD_INFINIBAND,
+ * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+ * network type accordingly.
+ */
+ if (has_gateway(dst, dst_in->sa_family) &&
+ ndev->type != ARPHRD_INFINIBAND)
+ dev_addr->network = dst_in->sa_family == AF_INET ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ else
+ dev_addr->network = RDMA_NETWORK_IB;
+
+ return 0;
+}
+
+static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
+{
+ struct net_device *ndev;
+
+ ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr);
+ if (IS_ERR(ndev))
+ return PTR_ERR(ndev);
+
+ /*
+ * Since we are holding the rcu, reading net and ifindex
+ * are safe without any additional reference; because
+ * change_net_namespace() in net/core/dev.c does rcu sync
+ * after it changes the state to IFF_DOWN and before
+ * updating netdev fields {net, ifindex}.
+ */
+ addr->net = dev_net(ndev);
+ addr->bound_dev_if = ndev->ifindex;
+ return 0;
+}
+
+static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr)
+{
+ addr->net = &init_net;
+ addr->bound_dev_if = 0;
+}
+
+static int addr_resolve(struct sockaddr *src_in,
+ const struct sockaddr *dst_in,
+ struct rdma_dev_addr *addr,
+ bool resolve_neigh,
+ bool resolve_by_gid_attr,
+ u32 seq)
+{
+ struct dst_entry *dst = NULL;
+ struct rtable *rt = NULL;
+ int ret;
+
+ if (!addr->net) {
+ pr_warn_ratelimited("%s: missing namespace\n", __func__);
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ if (resolve_by_gid_attr) {
+ if (!addr->sgid_attr) {
+ rcu_read_unlock();
+ pr_warn_ratelimited("%s: missing gid_attr\n", __func__);
+ return -EINVAL;
+ }
+ /*
+ * If the request is for a specific gid attribute of the
+ * rdma_dev_addr, derive net from the netdevice of the
+ * GID attribute.
+ */
+ ret = set_addr_netns_by_gid_rcu(addr);
+ if (ret) {
+ rcu_read_unlock();
+ return ret;
}
- list_move_tail(&req->list, &done_list);
}
+ if (src_in->sa_family == AF_INET) {
+ ret = addr4_resolve(src_in, dst_in, addr, &rt);
+ dst = &rt->dst;
+ } else {
+ ret = addr6_resolve(src_in, dst_in, addr, &dst);
+ }
+ if (ret) {
+ rcu_read_unlock();
+ goto done;
+ }
+ ret = rdma_set_src_addr_rcu(addr, dst_in, dst);
+ rcu_read_unlock();
- if (!list_empty(&req_list)) {
- req = list_entry(req_list.next, struct addr_req, list);
- set_timeout(req->timeout);
+ /*
+ * Resolve neighbor destination address if requested and
+ * only if src addr translation didn't fail.
+ */
+ if (!ret && resolve_neigh)
+ ret = addr_resolve_neigh(dst, dst_in, addr, seq);
+
+ if (src_in->sa_family == AF_INET)
+ ip_rt_put(rt);
+ else
+ dst_release(dst);
+done:
+ /*
+ * Clear the addr net to go back to its original state, only if it was
+ * derived from GID attribute in this context.
+ */
+ if (resolve_by_gid_attr)
+ rdma_addr_set_net_defaults(addr);
+ return ret;
+}
+
+static void process_one_req(struct work_struct *_work)
+{
+ struct addr_req *req;
+ struct sockaddr *src_in, *dst_in;
+
+ req = container_of(_work, struct addr_req, work.work);
+
+ if (req->status == -ENODATA) {
+ src_in = (struct sockaddr *)&req->src_addr;
+ dst_in = (struct sockaddr *)&req->dst_addr;
+ req->status = addr_resolve(src_in, dst_in, req->addr,
+ true, req->resolve_by_gid_attr,
+ req->seq);
+ if (req->status && time_after_eq(jiffies, req->timeout)) {
+ req->status = -ETIMEDOUT;
+ } else if (req->status == -ENODATA) {
+ /* requeue the work for retrying again */
+ spin_lock_bh(&lock);
+ if (!list_empty(&req->list))
+ set_timeout(req, req->timeout);
+ spin_unlock_bh(&lock);
+ return;
+ }
}
- mutex_unlock(&lock);
- list_for_each_entry_safe(req, temp_req, &done_list, list) {
- list_del(&req->list);
- req->callback(req->status, (struct sockaddr *) &req->src_addr,
- req->addr, req->context);
- put_client(req->client);
+ req->callback(req->status, (struct sockaddr *)&req->src_addr,
+ req->addr, req->context);
+ req->callback = NULL;
+
+ spin_lock_bh(&lock);
+ /*
+ * Although the work will normally have been canceled by the workqueue,
+ * it can still be requeued as long as it is on the req_list.
+ */
+ cancel_delayed_work(&req->work);
+ if (!list_empty(&req->list)) {
+ list_del_init(&req->list);
kfree(req);
}
+ spin_unlock_bh(&lock);
}
-int rdma_resolve_ip(struct rdma_addr_client *client,
- struct sockaddr *src_addr, struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr, int timeout_ms,
+int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
+ struct rdma_dev_addr *addr, unsigned long timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
- void *context)
+ bool resolve_by_gid_attr, void *context)
{
struct sockaddr *src_in, *dst_in;
struct addr_req *req;
@@ -401,10 +681,12 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->addr = addr;
req->callback = callback;
req->context = context;
- req->client = client;
- atomic_inc(&client->refcount);
+ req->resolve_by_gid_attr = resolve_by_gid_attr;
+ INIT_DELAYED_WORK(&req->work, process_one_req);
+ req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
- req->status = addr_resolve(src_in, dst_in, addr);
+ req->status = addr_resolve(src_in, dst_in, addr, true,
+ req->resolve_by_gid_attr, req->seq);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -416,7 +698,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
break;
default:
ret = req->status;
- atomic_dec(&client->refcount);
goto err;
}
return ret;
@@ -426,107 +707,149 @@ err:
}
EXPORT_SYMBOL(rdma_resolve_ip);
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr)
+{
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid, dgid;
+ struct rdma_dev_addr dev_addr = {};
+ int ret;
+
+ might_sleep();
+
+ if (rec->roce.route_resolved)
+ return 0;
+
+ rdma_gid2ip((struct sockaddr *)&sgid, &rec->sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid, &rec->dgid);
+
+ if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
+ return -EINVAL;
+
+ if (!attr || !attr->ndev)
+ return -EINVAL;
+
+ dev_addr.net = &init_net;
+ dev_addr.sgid_attr = attr;
+
+ ret = addr_resolve((struct sockaddr *)&sgid, (struct sockaddr *)&dgid,
+ &dev_addr, false, true, 0);
+ if (ret)
+ return ret;
+
+ if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
+ dev_addr.network == RDMA_NETWORK_IPV6) &&
+ rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
+ return -EINVAL;
+
+ rec->roce.route_resolved = true;
+ return 0;
+}
+
+/**
+ * rdma_addr_cancel - Cancel resolve ip request
+ * @addr: Pointer to address structure given previously
+ * during rdma_resolve_ip().
+ * rdma_addr_cancel() is synchronous function which cancels any pending
+ * request if there is any.
+ */
void rdma_addr_cancel(struct rdma_dev_addr *addr)
{
struct addr_req *req, *temp_req;
+ struct addr_req *found = NULL;
- mutex_lock(&lock);
+ spin_lock_bh(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) {
if (req->addr == addr) {
- req->status = -ECANCELED;
- req->timeout = jiffies;
- list_move(&req->list, &req_list);
- set_timeout(req->timeout);
+ /*
+ * Removing from the list means we take ownership of
+ * the req
+ */
+ list_del_init(&req->list);
+ found = req;
break;
}
}
- mutex_unlock(&lock);
+ spin_unlock_bh(&lock);
+
+ if (!found)
+ return;
+
+ /*
+ * sync canceling the work after removing it from the req_list
+ * guarentees no work is running and none will be started.
+ */
+ cancel_delayed_work_sync(&found->work);
+ kfree(found);
}
EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
- struct rdma_dev_addr *addr;
struct completion comp;
+ int status;
};
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
- rdma_dev_addr));
+ ((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
-int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
- u8 *dmac, u16 *vlan_id)
+int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
+ const union ib_gid *dgid,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
+ int *hoplimit)
{
- int ret = 0;
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
- struct net_device *dev;
-
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
+ int ret;
-
- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
+ dev_addr.net = &init_net;
+ dev_addr.sgid_attr = sgid_attr;
- ctx.addr = &dev_addr;
init_completion(&ctx.comp);
- ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, &ctx);
+ ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr,
+ (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
+ resolve_cb, true, &ctx);
if (ret)
return ret;
wait_for_completion(&ctx.comp);
- memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
- dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
- if (!dev)
- return -ENODEV;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
- dev_put(dev);
- return ret;
-}
-EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh);
-
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
-{
- int ret = 0;
- struct rdma_dev_addr dev_addr;
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } gid_addr;
-
- rdma_gid2ip(&gid_addr._sockaddr, sgid);
-
- memset(&dev_addr, 0, sizeof(dev_addr));
- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
+ ret = ctx.status;
if (ret)
return ret;
- memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
- return ret;
+ memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
+ *hoplimit = dev_addr.hoplimit;
+ return 0;
}
-EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
+ struct addr_req *req;
+
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
if (neigh->nud_state & NUD_VALID) {
- set_timeout(jiffies);
+ spin_lock_bh(&lock);
+ list_for_each_entry(req, &req_list, list)
+ set_timeout(req, jiffies);
+ spin_unlock_bh(&lock);
}
}
return 0;
@@ -536,23 +859,20 @@ static struct notifier_block nb = {
.notifier_call = netevent_callback
};
-static int __init addr_init(void)
+int addr_init(void)
{
- addr_wq = create_singlethread_workqueue("ib_addr");
+ addr_wq = alloc_ordered_workqueue("ib_addr", 0);
if (!addr_wq)
return -ENOMEM;
register_netevent_notifier(&nb);
- rdma_addr_register_client(&self);
+
return 0;
}
-static void __exit addr_cleanup(void)
+void addr_cleanup(void)
{
- rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
+ WARN_ON(!list_empty(&req_list));
}
-
-module_init(addr_init);
-module_exit(addr_cleanup);
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 0429040304fd..25a060a28301 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -59,7 +59,16 @@ __ib_get_agent_port(const struct ib_device *device, int port_num)
struct ib_agent_port_private *entry;
list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if (entry->agent[1]->device == device &&
+ /* Need to check both agent[0] and agent[1], as an agent port
+ * may only have one of them
+ */
+ if (entry->agent[0] &&
+ entry->agent[0]->device == device &&
+ entry->agent[0]->port_num == port_num)
+ return entry;
+
+ if (entry->agent[1] &&
+ entry->agent[1]->device == device &&
entry->agent[1]->port_num == port_num)
return entry;
}
@@ -101,8 +110,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
agent = port_priv->agent[qpn];
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
if (IS_ERR(ah)) {
- dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n",
- PTR_ERR(ah));
+ dev_err(&device->dev, "ib_create_ah_from_wc error %pe\n", ah);
return;
}
@@ -126,7 +134,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
mad_send_wr = container_of(send_buf,
struct ib_mad_send_wr_private,
send_buf);
- mad_send_wr->send_wr.wr.ud.port_num = port_num;
+ mad_send_wr->send_wr.port_num = port_num;
}
if (ib_post_send_mad(send_buf, NULL)) {
@@ -137,13 +145,13 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
err2:
ib_free_send_mad(send_buf);
err1:
- ib_destroy_ah(ah);
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
}
static void agent_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
- ib_destroy_ah(mad_send_wc->send_buf->ah);
+ rdma_destroy_ah(mad_send_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
ib_free_send_mad(mad_send_wc->send_buf);
}
@@ -156,7 +164,6 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
- dev_err(&device->dev, "No memory for ib_agent_port_private\n");
ret = -ENOMEM;
goto error1;
}
@@ -173,14 +180,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
}
}
- /* Obtain send only MAD agent for GSI QP */
- port_priv->agent[1] = ib_register_mad_agent(device, port_num,
- IB_QPT_GSI, NULL, 0,
- &agent_send_handler,
- NULL, NULL, 0);
- if (IS_ERR(port_priv->agent[1])) {
- ret = PTR_ERR(port_priv->agent[1]);
- goto error3;
+ if (rdma_cap_ib_cm(device, port_num)) {
+ /* Obtain send only MAD agent for GSI QP */
+ port_priv->agent[1] = ib_register_mad_agent(device, port_num,
+ IB_QPT_GSI, NULL, 0,
+ &agent_send_handler,
+ NULL, NULL, 0);
+ if (IS_ERR(port_priv->agent[1])) {
+ ret = PTR_ERR(port_priv->agent[1]);
+ goto error3;
+ }
}
spin_lock_irqsave(&ib_agent_port_list_lock, flags);
@@ -213,7 +222,8 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
list_del(&port_priv->port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
- ib_unregister_mad_agent(port_priv->agent[1]);
+ if (port_priv->agent[1])
+ ib_unregister_mad_agent(port_priv->agent[1]);
if (port_priv->agent[0])
ib_unregister_mad_agent(port_priv->agent[0]);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 8f66c67ff0df..81cf3c902e81 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -33,7 +33,7 @@
* SOFTWARE.
*/
-#include <linux/module.h>
+#include <linux/if_vlan.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -46,53 +46,58 @@
struct ib_pkey_cache {
int table_len;
- u16 table[0];
+ u16 table[] __counted_by(table_len);
};
struct ib_update_work {
struct work_struct work;
- struct ib_device *device;
- u8 port_num;
+ struct ib_event event;
+ bool enforce_security;
};
union ib_gid zgid;
EXPORT_SYMBOL(zgid);
-static const struct ib_gid_attr zattr;
-
enum gid_attr_find_mask {
GID_ATTR_FIND_MASK_GID = 1UL << 0,
GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
+ GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
};
-enum gid_table_entry_props {
- GID_TABLE_ENTRY_INVALID = 1UL << 0,
- GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
+enum gid_table_entry_state {
+ GID_TABLE_ENTRY_INVALID = 1,
+ GID_TABLE_ENTRY_VALID = 2,
+ /*
+ * Indicates that entry is pending to be removed, there may
+ * be active users of this GID entry.
+ * When last user of the GID entry releases reference to it,
+ * GID entry is detached from the table.
+ */
+ GID_TABLE_ENTRY_PENDING_DEL = 3,
};
-enum gid_table_write_action {
- GID_TABLE_WRITE_ACTION_ADD,
- GID_TABLE_WRITE_ACTION_DEL,
- /* MODIFY only updates the GID table. Currently only used by
- * ib_cache_update.
- */
- GID_TABLE_WRITE_ACTION_MODIFY
+struct roce_gid_ndev_storage {
+ struct rcu_head rcu_head;
+ struct net_device *ndev;
};
struct ib_gid_table_entry {
- /* This lock protects an entry from being
- * read and written simultaneously.
+ struct kref kref;
+ struct work_struct del_work;
+ struct ib_gid_attr attr;
+ void *context;
+ /* Store the ndev pointer to release reference later on in
+ * call_rcu context because by that time gid_table_entry
+ * and attr might be already freed. So keep a copy of it.
+ * ndev_storage is freed by rcu callback.
*/
- rwlock_t lock;
- unsigned long props;
- union ib_gid gid;
- struct ib_gid_attr attr;
- void *context;
+ struct roce_gid_ndev_storage *ndev_storage;
+ enum gid_table_entry_state state;
};
struct ib_gid_table {
- int sz;
+ int sz;
/* In RoCE, adding a GID to the table requires:
* (a) Find if this GID is already exists.
* (b) Find a free space.
@@ -102,140 +107,434 @@ struct ib_gid_table {
* (a) Find the GID
* (b) Delete it.
*
- * Add/delete should be carried out atomically.
- * This is done by locking this mutex from multiple
- * writers. We don't need this lock for IB, as the MAD
- * layer replaces all entries. All data_vec entries
- * are locked by this lock.
**/
- struct mutex lock;
- struct ib_gid_table_entry *data_vec;
+ /* Any writer to data_vec must hold this lock and the write side of
+ * rwlock. Readers must hold only rwlock. All writers must be in a
+ * sleepable context.
+ */
+ struct mutex lock;
+ /* rwlock protects data_vec[ix]->state and entry pointer.
+ */
+ rwlock_t rwlock;
+ struct ib_gid_table_entry **data_vec;
+ /* bit field, each bit indicates the index of default GID */
+ u32 default_gid_indices;
};
-static int write_gid(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table, int ix,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- enum gid_table_write_action action,
- bool default_gid)
+static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
{
- int ret = 0;
- struct net_device *old_net_dev;
- unsigned long flags;
+ struct ib_event event;
- /* in rdma_cap_roce_gid_table, this funciton should be protected by a
- * sleep-able lock.
+ event.device = ib_dev;
+ event.element.port_num = port;
+ event.event = IB_EVENT_GID_CHANGE;
+
+ ib_dispatch_event_clients(&event);
+}
+
+static const char * const gid_type_str[] = {
+ /* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
+ * user space compatibility reasons.
*/
- write_lock_irqsave(&table->data_vec[ix].lock, flags);
+ [IB_GID_TYPE_IB] = "IB/RoCE v1",
+ [IB_GID_TYPE_ROCE] = "IB/RoCE v1",
+ [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
+};
- if (rdma_cap_roce_gid_table(ib_dev, port)) {
- table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
- write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
- /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
- * RoCE providers and thus only updates the cache.
- */
- if (action == GID_TABLE_WRITE_ACTION_ADD)
- ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
- &table->data_vec[ix].context);
- else if (action == GID_TABLE_WRITE_ACTION_DEL)
- ret = ib_dev->del_gid(ib_dev, port, ix,
- &table->data_vec[ix].context);
- write_lock_irqsave(&table->data_vec[ix].lock, flags);
- }
+const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
+{
+ if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
+ return gid_type_str[gid_type];
- old_net_dev = table->data_vec[ix].attr.ndev;
- if (old_net_dev && old_net_dev != attr->ndev)
- dev_put(old_net_dev);
- /* if modify_gid failed, just delete the old gid */
- if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
- gid = &zgid;
- attr = &zattr;
- table->data_vec[ix].context = NULL;
- }
- if (default_gid)
- table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
- memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
- memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
- if (table->data_vec[ix].attr.ndev &&
- table->data_vec[ix].attr.ndev != old_net_dev)
- dev_hold(table->data_vec[ix].attr.ndev);
+ return "Invalid GID type";
+}
+EXPORT_SYMBOL(ib_cache_gid_type_str);
+
+/** rdma_is_zero_gid - Check if given GID is zero or not.
+ * @gid: GID to check
+ * Returns true if given GID is zero, returns false otherwise.
+ */
+bool rdma_is_zero_gid(const union ib_gid *gid)
+{
+ return !memcmp(gid, &zgid, sizeof(*gid));
+}
+EXPORT_SYMBOL(rdma_is_zero_gid);
+
+/** is_gid_index_default - Check if a given index belongs to
+ * reserved default GIDs or not.
+ * @table: GID table pointer
+ * @index: Index to check in GID table
+ * Returns true if index is one of the reserved default GID index otherwise
+ * returns false.
+ */
+static bool is_gid_index_default(const struct ib_gid_table *table,
+ unsigned int index)
+{
+ return index < 32 && (BIT(index) & table->default_gid_indices);
+}
+
+int ib_cache_gid_parse_type_str(const char *buf)
+{
+ unsigned int i;
+ size_t len;
+ int err = -EINVAL;
+
+ len = strlen(buf);
+ if (len == 0)
+ return -EINVAL;
+
+ if (buf[len - 1] == '\n')
+ len--;
+
+ for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
+ if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
+ len == strlen(gid_type_str[i])) {
+ err = i;
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
+
+static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
+{
+ return device->port_data[port].cache.gid;
+}
+
+static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
+{
+ return !entry;
+}
+
+static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
+{
+ return entry && entry->state == GID_TABLE_ENTRY_VALID;
+}
+
+static void schedule_free_gid(struct kref *kref)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(kref, struct ib_gid_table_entry, kref);
+
+ queue_work(ib_wq, &entry->del_work);
+}
+
+static void put_gid_ndev(struct rcu_head *head)
+{
+ struct roce_gid_ndev_storage *storage =
+ container_of(head, struct roce_gid_ndev_storage, rcu_head);
+
+ WARN_ON(!storage->ndev);
+ /* At this point its safe to release netdev reference,
+ * as all callers working on gid_attr->ndev are done
+ * using this netdev.
+ */
+ dev_put(storage->ndev);
+ kfree(storage);
+}
+
+static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
+{
+ struct ib_device *device = entry->attr.device;
+ u32 port_num = entry->attr.port_num;
+ struct ib_gid_table *table = rdma_gid_table(device, port_num);
+
+ dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
+ port_num, entry->attr.index, entry->attr.gid.raw);
+
+ write_lock_irq(&table->rwlock);
+
+ /*
+ * The only way to avoid overwriting NULL in table is
+ * by comparing if it is same entry in table or not!
+ * If new entry in table is added by the time we free here,
+ * don't overwrite the table entry.
+ */
+ if (entry == table->data_vec[entry->attr.index])
+ table->data_vec[entry->attr.index] = NULL;
+ /* Now this index is ready to be allocated */
+ write_unlock_irq(&table->rwlock);
+
+ if (entry->ndev_storage)
+ call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
+ kfree(entry);
+}
- table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
+static void free_gid_entry(struct kref *kref)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(kref, struct ib_gid_table_entry, kref);
- write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
+ free_gid_entry_locked(entry);
+}
- if (!ret && rdma_cap_roce_gid_table(ib_dev, port)) {
- struct ib_event event;
+/**
+ * free_gid_work - Release reference to the GID entry
+ * @work: Work structure to refer to GID entry which needs to be
+ * deleted.
+ *
+ * free_gid_work() frees the entry from the HCA's hardware table
+ * if provider supports it. It releases reference to netdevice.
+ */
+static void free_gid_work(struct work_struct *work)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(work, struct ib_gid_table_entry, del_work);
+ struct ib_device *device = entry->attr.device;
+ u32 port_num = entry->attr.port_num;
+ struct ib_gid_table *table = rdma_gid_table(device, port_num);
- event.device = ib_dev;
- event.element.port_num = port;
- event.event = IB_EVENT_GID_CHANGE;
+ mutex_lock(&table->lock);
+ free_gid_entry_locked(entry);
+ mutex_unlock(&table->lock);
+}
+
+static struct ib_gid_table_entry *
+alloc_gid_entry(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry;
+ struct net_device *ndev;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
- ib_dispatch_event(&event);
+ ndev = rcu_dereference_protected(attr->ndev, 1);
+ if (ndev) {
+ entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
+ GFP_KERNEL);
+ if (!entry->ndev_storage) {
+ kfree(entry);
+ return NULL;
+ }
+ dev_hold(ndev);
+ entry->ndev_storage->ndev = ndev;
}
- return ret;
+ kref_init(&entry->kref);
+ memcpy(&entry->attr, attr, sizeof(*attr));
+ INIT_WORK(&entry->del_work, free_gid_work);
+ entry->state = GID_TABLE_ENTRY_INVALID;
+ return entry;
}
-static int add_gid(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table, int ix,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- bool default_gid) {
- return write_gid(ib_dev, port, table, ix, gid, attr,
- GID_TABLE_WRITE_ACTION_ADD, default_gid);
+static void store_gid_entry(struct ib_gid_table *table,
+ struct ib_gid_table_entry *entry)
+{
+ entry->state = GID_TABLE_ENTRY_VALID;
+
+ dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
+ __func__, entry->attr.port_num, entry->attr.index,
+ entry->attr.gid.raw);
+
+ lockdep_assert_held(&table->lock);
+ write_lock_irq(&table->rwlock);
+ table->data_vec[entry->attr.index] = entry;
+ write_unlock_irq(&table->rwlock);
}
-static int modify_gid(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table, int ix,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- bool default_gid) {
- return write_gid(ib_dev, port, table, ix, gid, attr,
- GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
+static void get_gid_entry(struct ib_gid_table_entry *entry)
+{
+ kref_get(&entry->kref);
}
-static int del_gid(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table, int ix,
- bool default_gid) {
- return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
- GID_TABLE_WRITE_ACTION_DEL, default_gid);
+static void put_gid_entry(struct ib_gid_table_entry *entry)
+{
+ kref_put(&entry->kref, schedule_free_gid);
}
+static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
+{
+ kref_put(&entry->kref, free_gid_entry);
+}
+
+static int add_roce_gid(struct ib_gid_table_entry *entry)
+{
+ const struct ib_gid_attr *attr = &entry->attr;
+ int ret;
+
+ if (!attr->ndev) {
+ dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
+ __func__, attr->port_num, attr->index);
+ return -EINVAL;
+ }
+ if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
+ ret = attr->device->ops.add_gid(attr, &entry->context);
+ if (ret) {
+ dev_err(&attr->device->dev,
+ "%s GID add failed port=%u index=%u\n",
+ __func__, attr->port_num, attr->index);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * del_gid - Delete GID table entry
+ *
+ * @ib_dev: IB device whose GID entry to be deleted
+ * @port: Port number of the IB device
+ * @table: GID table of the IB device for a port
+ * @ix: GID entry index to delete
+ *
+ */
+static void del_gid(struct ib_device *ib_dev, u32 port,
+ struct ib_gid_table *table, int ix)
+{
+ struct roce_gid_ndev_storage *ndev_storage;
+ struct ib_gid_table_entry *entry;
+
+ lockdep_assert_held(&table->lock);
+
+ dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
+ ix, table->data_vec[ix]->attr.gid.raw);
+
+ write_lock_irq(&table->rwlock);
+ entry = table->data_vec[ix];
+ entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+ /*
+ * For non RoCE protocol, GID entry slot is ready to use.
+ */
+ if (!rdma_protocol_roce(ib_dev, port))
+ table->data_vec[ix] = NULL;
+ write_unlock_irq(&table->rwlock);
+
+ if (rdma_cap_roce_gid_table(ib_dev, port))
+ ib_dev->ops.del_gid(&entry->attr, &entry->context);
+
+ ndev_storage = entry->ndev_storage;
+ if (ndev_storage) {
+ entry->ndev_storage = NULL;
+ rcu_assign_pointer(entry->attr.ndev, NULL);
+ call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
+ }
+
+ put_gid_entry_locked(entry);
+}
+
+/**
+ * add_modify_gid - Add or modify GID table entry
+ *
+ * @table: GID table in which GID to be added or modified
+ * @attr: Attributes of the GID
+ *
+ * Returns 0 on success or appropriate error code. It accepts zero
+ * GID addition for non RoCE ports for HCA's who report them as valid
+ * GID. However such zero GIDs are not added to the cache.
+ */
+static int add_modify_gid(struct ib_gid_table *table,
+ const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry;
+ int ret = 0;
+
+ /*
+ * Invalidate any old entry in the table to make it safe to write to
+ * this index.
+ */
+ if (is_gid_entry_valid(table->data_vec[attr->index]))
+ del_gid(attr->device, attr->port_num, table, attr->index);
+
+ /*
+ * Some HCA's report multiple GID entries with only one valid GID, and
+ * leave other unused entries as the zero GID. Convert zero GIDs to
+ * empty table entries instead of storing them.
+ */
+ if (rdma_is_zero_gid(&attr->gid))
+ return 0;
+
+ entry = alloc_gid_entry(attr);
+ if (!entry)
+ return -ENOMEM;
+
+ if (rdma_protocol_roce(attr->device, attr->port_num)) {
+ ret = add_roce_gid(entry);
+ if (ret)
+ goto done;
+ }
+
+ store_gid_entry(table, entry);
+ return 0;
+
+done:
+ put_gid_entry(entry);
+ return ret;
+}
+
+/* rwlock should be read locked, or lock should be held */
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
const struct ib_gid_attr *val, bool default_gid,
- unsigned long mask)
+ unsigned long mask, int *pempty)
{
- int i;
+ int i = 0;
+ int found = -1;
+ int empty = pempty ? -1 : 0;
- for (i = 0; i < table->sz; i++) {
- unsigned long flags;
- struct ib_gid_attr *attr = &table->data_vec[i].attr;
+ while (i < table->sz && (found < 0 || empty < 0)) {
+ struct ib_gid_table_entry *data = table->data_vec[i];
+ struct ib_gid_attr *attr;
+ int curr_index = i;
- read_lock_irqsave(&table->data_vec[i].lock, flags);
+ i++;
- if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
- goto next;
+ /* find_gid() is used during GID addition where it is expected
+ * to return a free entry slot which is not duplicate.
+ * Free entry slot is requested and returned if pempty is set,
+ * so lookup free slot only if requested.
+ */
+ if (pempty && empty < 0) {
+ if (is_gid_entry_free(data) &&
+ default_gid ==
+ is_gid_index_default(table, curr_index)) {
+ /*
+ * Found an invalid (free) entry; allocate it.
+ * If default GID is requested, then our
+ * found slot must be one of the DEFAULT
+ * reserved slots or we fail.
+ * This ensures that only DEFAULT reserved
+ * slots are used for default property GIDs.
+ */
+ empty = curr_index;
+ }
+ }
+
+ /*
+ * Additionally find_gid() is used to find valid entry during
+ * lookup operation; so ignore the entries which are marked as
+ * pending for removal and the entries which are marked as
+ * invalid.
+ */
+ if (!is_gid_entry_valid(data))
+ continue;
+
+ if (found >= 0)
+ continue;
+
+ attr = &data->attr;
+ if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
+ attr->gid_type != val->gid_type)
+ continue;
if (mask & GID_ATTR_FIND_MASK_GID &&
- memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
- goto next;
+ memcmp(gid, &data->attr.gid, sizeof(*gid)))
+ continue;
if (mask & GID_ATTR_FIND_MASK_NETDEV &&
attr->ndev != val->ndev)
- goto next;
+ continue;
if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
- !!(table->data_vec[i].props & GID_TABLE_ENTRY_DEFAULT) !=
- default_gid)
- goto next;
-
- read_unlock_irqrestore(&table->data_vec[i].lock, flags);
- return i;
-next:
- read_unlock_irqrestore(&table->data_vec[i].lock, flags);
+ is_gid_index_default(table, curr_index) != default_gid)
+ continue;
+
+ found = curr_index;
}
- return -1;
+ if (pempty)
+ *pempty = empty;
+
+ return found;
}
static void make_default_gid(struct net_device *dev, union ib_gid *gid)
@@ -244,206 +543,236 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid)
addrconf_ifid_eui48(&gid->raw[8], dev);
}
-int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
- union ib_gid *gid, struct ib_gid_attr *attr)
+static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
+ union ib_gid *gid, struct ib_gid_attr *attr,
+ unsigned long mask, bool default_gid)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
struct ib_gid_table *table;
- int ix;
int ret = 0;
- struct net_device *idev;
-
- table = ports_table[port - rdma_start_port(ib_dev)];
+ int empty;
+ int ix;
- if (!memcmp(gid, &zgid, sizeof(*gid)))
+ /* Do not allow adding zero GID in support of
+ * IB spec version 1.3 section 4.1.1 point (6) and
+ * section 12.7.10 and section 12.7.20
+ */
+ if (rdma_is_zero_gid(gid))
return -EINVAL;
- if (ib_dev->get_netdev) {
- idev = ib_dev->get_netdev(ib_dev, port);
- if (idev && attr->ndev != idev) {
- union ib_gid default_gid;
-
- /* Adding default GIDs in not permitted */
- make_default_gid(idev, &default_gid);
- if (!memcmp(gid, &default_gid, sizeof(*gid))) {
- dev_put(idev);
- return -EPERM;
- }
- }
- if (idev)
- dev_put(idev);
- }
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
- ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
- GID_ATTR_FIND_MASK_NETDEV);
+ ix = find_gid(table, gid, attr, default_gid, mask, &empty);
if (ix >= 0)
goto out_unlock;
- ix = find_gid(table, &zgid, NULL, false, GID_ATTR_FIND_MASK_GID |
- GID_ATTR_FIND_MASK_DEFAULT);
- if (ix < 0) {
+ if (empty < 0) {
ret = -ENOSPC;
goto out_unlock;
}
-
- add_gid(ib_dev, port, table, ix, gid, attr, false);
+ attr->device = ib_dev;
+ attr->index = empty;
+ attr->port_num = port;
+ attr->gid = *gid;
+ ret = add_modify_gid(table, attr);
+ if (!ret)
+ dispatch_gid_change_event(ib_dev, port);
out_unlock:
mutex_unlock(&table->lock);
+ if (ret)
+ pr_warn_ratelimited("%s: unable to add gid %pI6 error=%d\n",
+ __func__, gid->raw, ret);
return ret;
}
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE |
+ GID_ATTR_FIND_MASK_NETDEV;
+
+ return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
+}
+
+static int
+_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
+ union ib_gid *gid, struct ib_gid_attr *attr,
+ unsigned long mask, bool default_gid)
+{
struct ib_gid_table *table;
+ int ret = 0;
int ix;
- table = ports_table[port - rdma_start_port(ib_dev)];
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
- ix = find_gid(table, gid, attr, false,
- GID_ATTR_FIND_MASK_GID |
- GID_ATTR_FIND_MASK_NETDEV |
- GID_ATTR_FIND_MASK_DEFAULT);
- if (ix < 0)
+ ix = find_gid(table, gid, attr, default_gid, mask, NULL);
+ if (ix < 0) {
+ ret = -EINVAL;
goto out_unlock;
+ }
- del_gid(ib_dev, port, table, ix, false);
+ del_gid(ib_dev, port, table, ix);
+ dispatch_gid_change_event(ib_dev, port);
out_unlock:
mutex_unlock(&table->lock);
- return 0;
+ if (ret)
+ pr_debug("%s: can't delete gid %pI6 error=%d\n",
+ __func__, gid->raw, ret);
+ return ret;
}
-int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
+ union ib_gid *gid, struct ib_gid_attr *attr)
+{
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE |
+ GID_ATTR_FIND_MASK_DEFAULT |
+ GID_ATTR_FIND_MASK_NETDEV;
+
+ return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
+}
+
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *ndev)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
struct ib_gid_table *table;
int ix;
+ bool deleted = false;
- table = ports_table[port - rdma_start_port(ib_dev)];
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
- for (ix = 0; ix < table->sz; ix++)
- if (table->data_vec[ix].attr.ndev == ndev)
- del_gid(ib_dev, port, table, ix, false);
+ for (ix = 0; ix < table->sz; ix++) {
+ if (is_gid_entry_valid(table->data_vec[ix]) &&
+ table->data_vec[ix]->attr.ndev == ndev) {
+ del_gid(ib_dev, port, table, ix);
+ deleted = true;
+ }
+ }
mutex_unlock(&table->lock);
+
+ if (deleted)
+ dispatch_gid_change_event(ib_dev, port);
+
return 0;
}
-static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
- union ib_gid *gid, struct ib_gid_attr *attr)
+/**
+ * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
+ * a valid GID entry for given search parameters. It searches for the specified
+ * GID value in the local software cache.
+ * @ib_dev: The device to query.
+ * @gid: The GID value to search for.
+ * @gid_type: The GID type to search for.
+ * @port: The port number of the device where the GID value should be searched.
+ * @ndev: In RoCE, the net device of the device. NULL means ignore.
+ *
+ * Returns sgid attributes if the GID is found with valid reference or
+ * returns ERR_PTR for the error.
+ * The caller must invoke rdma_put_gid_attr() to release the reference.
+ */
+const struct ib_gid_attr *
+rdma_find_gid_by_port(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ u32 port, struct net_device *ndev)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ int local_index;
struct ib_gid_table *table;
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE;
+ struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
+ const struct ib_gid_attr *attr;
unsigned long flags;
- table = ports_table[port - rdma_start_port(ib_dev)];
+ if (!rdma_is_port_valid(ib_dev, port))
+ return ERR_PTR(-ENOENT);
- if (index < 0 || index >= table->sz)
- return -EINVAL;
+ table = rdma_gid_table(ib_dev, port);
- read_lock_irqsave(&table->data_vec[index].lock, flags);
- if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) {
- read_unlock_irqrestore(&table->data_vec[index].lock, flags);
- return -EAGAIN;
- }
+ if (ndev)
+ mask |= GID_ATTR_FIND_MASK_NETDEV;
- memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
- if (attr) {
- memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
- if (attr->ndev)
- dev_hold(attr->ndev);
+ read_lock_irqsave(&table->rwlock, flags);
+ local_index = find_gid(table, gid, &val, false, mask, NULL);
+ if (local_index >= 0) {
+ get_gid_entry(table->data_vec[local_index]);
+ attr = &table->data_vec[local_index]->attr;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return attr;
}
- read_unlock_irqrestore(&table->data_vec[index].lock, flags);
- return 0;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return ERR_PTR(-ENOENT);
}
-
-static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
- const union ib_gid *gid,
- const struct ib_gid_attr *val,
- unsigned long mask,
- u8 *port, u16 *index)
+EXPORT_SYMBOL(rdma_find_gid_by_port);
+
+/**
+ * rdma_find_gid_by_filter - Returns the GID table attribute where a
+ * specified GID value occurs
+ * @ib_dev: The device to query.
+ * @gid: The GID value to search for.
+ * @port: The port number of the device where the GID value could be
+ * searched.
+ * @filter: The filter function is executed on any matching GID in the table.
+ * If the filter function returns true, the corresponding index is returned,
+ * otherwise, we continue searching the GID table. It's guaranteed that
+ * while filter is executed, ndev field is valid and the structure won't
+ * change. filter is executed in an atomic context. filter must not be NULL.
+ * @context: Private data to pass into the call-back.
+ *
+ * rdma_find_gid_by_filter() searches for the specified GID value
+ * of which the filter function returns true in the port's GID table.
+ *
+ */
+const struct ib_gid_attr *rdma_find_gid_by_filter(
+ struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
+ bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
+ void *),
+ void *context)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
struct ib_gid_table *table;
- u8 p;
- int local_index;
-
- for (p = 0; p < ib_dev->phys_port_cnt; p++) {
- table = ports_table[p];
- local_index = find_gid(table, gid, val, false, mask);
- if (local_index >= 0) {
- if (index)
- *index = local_index;
- if (port)
- *port = p + rdma_start_port(ib_dev);
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-static int ib_cache_gid_find(struct ib_device *ib_dev,
- const union ib_gid *gid,
- struct net_device *ndev, u8 *port,
- u16 *index)
-{
- unsigned long mask = GID_ATTR_FIND_MASK_GID;
- struct ib_gid_attr gid_attr_val = {.ndev = ndev};
-
- if (ndev)
- mask |= GID_ATTR_FIND_MASK_NETDEV;
+ unsigned long flags;
+ unsigned int i;
- return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
- mask, port, index);
-}
+ if (!rdma_is_port_valid(ib_dev, port))
+ return ERR_PTR(-EINVAL);
-int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
- const union ib_gid *gid,
- u8 port, struct net_device *ndev,
- u16 *index)
-{
- int local_index;
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
- struct ib_gid_table *table;
- unsigned long mask = GID_ATTR_FIND_MASK_GID;
- struct ib_gid_attr val = {.ndev = ndev};
+ table = rdma_gid_table(ib_dev, port);
- if (port < rdma_start_port(ib_dev) ||
- port > rdma_end_port(ib_dev))
- return -ENOENT;
+ read_lock_irqsave(&table->rwlock, flags);
+ for (i = 0; i < table->sz; i++) {
+ struct ib_gid_table_entry *entry = table->data_vec[i];
- table = ports_table[port - rdma_start_port(ib_dev)];
+ if (!is_gid_entry_valid(entry))
+ continue;
- if (ndev)
- mask |= GID_ATTR_FIND_MASK_NETDEV;
+ if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
+ continue;
- local_index = find_gid(table, gid, &val, false, mask);
- if (local_index >= 0) {
- if (index)
- *index = local_index;
- return 0;
+ if (filter(gid, &entry->attr, context)) {
+ get_gid_entry(entry);
+ res = &entry->attr;
+ break;
+ }
}
-
- return -ENOENT;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return res;
}
static struct ib_gid_table *alloc_gid_table(int sz)
{
- unsigned int i;
- struct ib_gid_table *table =
- kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
+ struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
+
if (!table)
return NULL;
@@ -454,10 +783,7 @@ static struct ib_gid_table *alloc_gid_table(int sz)
mutex_init(&table->lock);
table->sz = sz;
-
- for (i = 0; i < sz; i++)
- rwlock_init(&table->data_vec[i].lock);
-
+ rwlock_init(&table->rwlock);
return table;
err_free_table:
@@ -465,15 +791,30 @@ err_free_table:
return NULL;
}
-static void release_gid_table(struct ib_gid_table *table)
+static void release_gid_table(struct ib_device *device,
+ struct ib_gid_table *table)
{
- if (table) {
- kfree(table->data_vec);
- kfree(table);
+ int i;
+
+ if (!table)
+ return;
+
+ for (i = 0; i < table->sz; i++) {
+ if (is_gid_entry_free(table->data_vec[i]))
+ continue;
+
+ WARN_ONCE(true,
+ "GID entry ref leak for dev %s index %d ref=%u\n",
+ dev_name(&device->dev), i,
+ kref_read(&table->data_vec[i]->kref));
}
+
+ mutex_destroy(&table->lock);
+ kfree(table->data_vec);
+ kfree(table);
}
-static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
+static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
struct ib_gid_table *table)
{
int i;
@@ -481,150 +822,100 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
if (!table)
return;
+ mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (memcmp(&table->data_vec[i].gid, &zgid,
- sizeof(table->data_vec[i].gid)))
- del_gid(ib_dev, port, table, i,
- table->data_vec[i].props &
- GID_ATTR_FIND_MASK_DEFAULT);
+ if (is_gid_entry_valid(table->data_vec[i]))
+ del_gid(ib_dev, port, table, i);
}
+ mutex_unlock(&table->lock);
}
-void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
struct net_device *ndev,
+ unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
- union ib_gid gid;
+ union ib_gid gid = { };
struct ib_gid_attr gid_attr;
- struct ib_gid_table *table;
- int ix;
- union ib_gid current_gid;
- struct ib_gid_attr current_gid_attr = {};
-
- table = ports_table[port - rdma_start_port(ib_dev)];
+ unsigned int gid_type;
+ unsigned long mask;
- make_default_gid(ndev, &gid);
+ mask = GID_ATTR_FIND_MASK_GID_TYPE |
+ GID_ATTR_FIND_MASK_DEFAULT |
+ GID_ATTR_FIND_MASK_NETDEV;
memset(&gid_attr, 0, sizeof(gid_attr));
gid_attr.ndev = ndev;
- ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
+ for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
+ if (1UL << gid_type & ~gid_type_mask)
+ continue;
- /* Coudn't find default GID location */
- WARN_ON(ix < 0);
+ gid_attr.gid_type = gid_type;
- mutex_lock(&table->lock);
- if (!__ib_cache_gid_get(ib_dev, port, ix,
- &current_gid, &current_gid_attr) &&
- mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
- !memcmp(&gid, &current_gid, sizeof(gid)) &&
- !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
- goto unlock;
-
- if ((memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
- memcmp(&current_gid_attr, &zattr,
- sizeof(current_gid_attr))) &&
- del_gid(ib_dev, port, table, ix, true)) {
- pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
- ix, gid.raw);
- goto unlock;
+ if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
+ make_default_gid(ndev, &gid);
+ __ib_cache_gid_add(ib_dev, port, &gid,
+ &gid_attr, mask, true);
+ } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
+ _ib_cache_gid_del(ib_dev, port, &gid,
+ &gid_attr, mask, true);
+ }
}
+}
- if (mode == IB_CACHE_GID_DEFAULT_MODE_SET)
- if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
- pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
- gid.raw);
-
-unlock:
- if (current_gid_attr.ndev)
- dev_put(current_gid_attr.ndev);
- mutex_unlock(&table->lock);
+static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
+ struct ib_gid_table *table)
+{
+ unsigned int i;
+ unsigned long roce_gid_type_mask;
+ unsigned int num_default_gids;
+
+ roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+ num_default_gids = hweight_long(roce_gid_type_mask);
+ /* Reserve starting indices for default GIDs */
+ for (i = 0; i < num_default_gids && i < table->sz; i++)
+ table->default_gid_indices |= BIT(i);
}
-static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table)
+
+static void gid_table_release_one(struct ib_device *ib_dev)
{
- if (rdma_protocol_roce(ib_dev, port)) {
- struct ib_gid_table_entry *entry = &table->data_vec[0];
+ u32 p;
- entry->props |= GID_TABLE_ENTRY_DEFAULT;
+ rdma_for_each_port (ib_dev, p) {
+ release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
+ ib_dev->port_data[p].cache.gid = NULL;
}
-
- return 0;
}
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
- u8 port;
- struct ib_gid_table **table;
- int err = 0;
-
- table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-
- if (!table) {
- pr_warn("failed to allocate ib gid cache for %s\n",
- ib_dev->name);
- return -ENOMEM;
- }
-
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- u8 rdma_port = port + rdma_start_port(ib_dev);
+ struct ib_gid_table *table;
+ u32 rdma_port;
- table[port] =
- alloc_gid_table(
- ib_dev->port_immutable[rdma_port].gid_tbl_len);
- if (!table[port]) {
- err = -ENOMEM;
+ rdma_for_each_port (ib_dev, rdma_port) {
+ table = alloc_gid_table(
+ ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
+ if (!table)
goto rollback_table_setup;
- }
- err = gid_table_reserve_default(ib_dev,
- port + rdma_start_port(ib_dev),
- table[port]);
- if (err)
- goto rollback_table_setup;
+ gid_table_reserve_default(ib_dev, rdma_port, table);
+ ib_dev->port_data[rdma_port].cache.gid = table;
}
-
- ib_dev->cache.gid_cache = table;
return 0;
rollback_table_setup:
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
- table[port]);
- release_gid_table(table[port]);
- }
-
- kfree(table);
- return err;
-}
-
-static void gid_table_release_one(struct ib_device *ib_dev)
-{
- struct ib_gid_table **table = ib_dev->cache.gid_cache;
- u8 port;
-
- if (!table)
- return;
-
- for (port = 0; port < ib_dev->phys_port_cnt; port++)
- release_gid_table(table[port]);
-
- kfree(table);
- ib_dev->cache.gid_cache = NULL;
+ gid_table_release_one(ib_dev);
+ return -ENOMEM;
}
static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
- struct ib_gid_table **table = ib_dev->cache.gid_cache;
- u8 port;
+ u32 p;
- if (!table)
- return;
-
- for (port = 0; port < ib_dev->phys_port_cnt; port++)
- cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
- table[port]);
+ rdma_for_each_port (ib_dev, p)
+ cleanup_gid_table_port(ib_dev, p,
+ ib_dev->port_data[p].cache.gid);
}
static int gid_table_setup_one(struct ib_device *ib_dev)
@@ -636,39 +927,125 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
if (err)
return err;
- err = roce_rescan_device(ib_dev);
-
- if (err) {
- gid_table_cleanup_one(ib_dev);
- gid_table_release_one(ib_dev);
- }
+ rdma_roce_rescan_device(ib_dev);
return err;
}
-int ib_get_cached_gid(struct ib_device *device,
- u8 port_num,
- int index,
- union ib_gid *gid)
+/**
+ * rdma_query_gid - Read the GID content from the GID software cache
+ * @device: Device to query the GID
+ * @port_num: Port number of the device
+ * @index: Index of the GID table entry to read
+ * @gid: Pointer to GID where to store the entry's GID
+ *
+ * rdma_query_gid() only reads the GID entry content for requested device,
+ * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't
+ * hold any reference to the GID table entry in the HCA or software cache.
+ *
+ * Returns 0 on success or appropriate error code.
+ *
+ */
+int rdma_query_gid(struct ib_device *device, u32 port_num,
+ int index, union ib_gid *gid)
{
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ struct ib_gid_table *table;
+ unsigned long flags;
+ int res;
+
+ if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- return __ib_cache_gid_get(device, port_num, index, gid, NULL);
+ table = rdma_gid_table(device, port_num);
+ read_lock_irqsave(&table->rwlock, flags);
+
+ if (index < 0 || index >= table->sz) {
+ res = -EINVAL;
+ goto done;
+ }
+
+ if (!is_gid_entry_valid(table->data_vec[index])) {
+ res = -ENOENT;
+ goto done;
+ }
+
+ memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
+ res = 0;
+
+done:
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return res;
}
-EXPORT_SYMBOL(ib_get_cached_gid);
+EXPORT_SYMBOL(rdma_query_gid);
-int ib_find_cached_gid(struct ib_device *device,
- const union ib_gid *gid,
- u8 *port_num,
- u16 *index)
+/**
+ * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
+ * @attr: Potinter to the GID attribute
+ *
+ * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
+ * to the SGID attr. Callers are required to already be holding the reference
+ * to an existing GID entry.
+ *
+ * Returns the HW GID context
+ *
+ */
+void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
{
- return ib_cache_gid_find(device, gid, NULL, port_num, index);
+ return container_of(attr, struct ib_gid_table_entry, attr)->context;
}
-EXPORT_SYMBOL(ib_find_cached_gid);
+EXPORT_SYMBOL(rdma_read_gid_hw_context);
+
+/**
+ * rdma_find_gid - Returns SGID attributes if the matching GID is found.
+ * @device: The device to query.
+ * @gid: The GID value to search for.
+ * @gid_type: The GID type to search for.
+ * @ndev: In RoCE, the net device of the device. NULL means ignore.
+ *
+ * rdma_find_gid() searches for the specified GID value in the software cache.
+ *
+ * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
+ * error. The caller must invoke rdma_put_gid_attr() to release the reference.
+ *
+ */
+const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ struct net_device *ndev)
+{
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE;
+ struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
+ u32 p;
+
+ if (ndev)
+ mask |= GID_ATTR_FIND_MASK_NETDEV;
+
+ rdma_for_each_port(device, p) {
+ struct ib_gid_table *table;
+ unsigned long flags;
+ int index;
+
+ table = device->port_data[p].cache.gid;
+ read_lock_irqsave(&table->rwlock, flags);
+ index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
+ if (index >= 0) {
+ const struct ib_gid_attr *attr;
+
+ get_gid_entry(table->data_vec[index]);
+ attr = &table->data_vec[index]->attr;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return attr;
+ }
+ read_unlock_irqrestore(&table->rwlock, flags);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(rdma_find_gid);
int ib_get_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
int index,
u16 *pkey)
{
@@ -676,28 +1053,37 @@ int ib_get_cached_pkey(struct ib_device *device,
unsigned long flags;
int ret = 0;
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
- cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+ cache = device->port_data[port_num].cache.pkey;
- if (index < 0 || index >= cache->table_len)
+ if (!cache || index < 0 || index >= cache->table_len)
ret = -EINVAL;
else
*pkey = cache->table[index];
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_get_cached_pkey);
-int ib_find_cached_pkey(struct ib_device *device,
- u8 port_num,
- u16 pkey,
- u16 *index)
+void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
+ u64 *sn_pfx)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&device->cache_lock, flags);
+ *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
+ read_unlock_irqrestore(&device->cache_lock, flags);
+}
+EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
+
+int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
+ u16 pkey, u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
@@ -705,12 +1091,16 @@ int ib_find_cached_pkey(struct ib_device *device,
int ret = -ENOENT;
int partial_ix = -1;
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
- cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+ cache = device->port_data[port_num].cache.pkey;
+ if (!cache) {
+ ret = -EINVAL;
+ goto err;
+ }
*index = -1;
@@ -720,8 +1110,9 @@ int ib_find_cached_pkey(struct ib_device *device,
*index = i;
ret = 0;
break;
- } else
+ } else {
partial_ix = i;
+ }
}
if (ret && partial_ix >= 0) {
@@ -729,234 +1120,509 @@ int ib_find_cached_pkey(struct ib_device *device,
ret = 0;
}
- read_unlock_irqrestore(&device->cache.lock, flags);
+err:
+ read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);
-int ib_find_exact_cached_pkey(struct ib_device *device,
- u8 port_num,
- u16 pkey,
- u16 *index)
+int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
- struct ib_pkey_cache *cache;
unsigned long flags;
- int i;
- int ret = -ENOENT;
+ int ret = 0;
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
+ *lmc = device->port_data[port_num].cache.lmc;
+ read_unlock_irqrestore(&device->cache_lock, flags);
- cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+ return ret;
+}
+EXPORT_SYMBOL(ib_get_cached_lmc);
- *index = -1;
+int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
+ enum ib_port_state *port_state)
+{
+ unsigned long flags;
+ int ret = 0;
- for (i = 0; i < cache->table_len; ++i)
- if (cache->table[i] == pkey) {
- *index = i;
- ret = 0;
- break;
- }
+ if (!rdma_is_port_valid(device, port_num))
+ return -EINVAL;
+
+ read_lock_irqsave(&device->cache_lock, flags);
+ *port_state = device->port_data[port_num].cache.port_state;
+ read_unlock_irqrestore(&device->cache_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_get_cached_port_state);
+
+/**
+ * rdma_get_gid_attr - Returns GID attributes for a port of a device
+ * at a requested gid_index, if a valid GID entry exists.
+ * @device: The device to query.
+ * @port_num: The port number on the device where the GID value
+ * is to be queried.
+ * @index: Index of the GID table entry whose attributes are to
+ * be queried.
+ *
+ * rdma_get_gid_attr() acquires reference count of gid attributes from the
+ * cached GID table. Caller must invoke rdma_put_gid_attr() to release
+ * reference to gid attribute regardless of link layer.
+ *
+ * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
+ * code.
+ */
+const struct ib_gid_attr *
+rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
+{
+ const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
+ struct ib_gid_table *table;
+ unsigned long flags;
+
+ if (!rdma_is_port_valid(device, port_num))
+ return ERR_PTR(-EINVAL);
+
+ table = rdma_gid_table(device, port_num);
+ if (index < 0 || index >= table->sz)
+ return ERR_PTR(-EINVAL);
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_lock_irqsave(&table->rwlock, flags);
+ if (!is_gid_entry_valid(table->data_vec[index]))
+ goto done;
+ get_gid_entry(table->data_vec[index]);
+ attr = &table->data_vec[index]->attr;
+done:
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return attr;
+}
+EXPORT_SYMBOL(rdma_get_gid_attr);
+
+/**
+ * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
+ * @device: The device to query.
+ * @entries: Entries where GID entries are returned.
+ * @max_entries: Maximum number of entries that can be returned.
+ * Entries array must be allocated to hold max_entries number of entries.
+ *
+ * Returns number of entries on success or appropriate error code.
+ */
+ssize_t rdma_query_gid_table(struct ib_device *device,
+ struct ib_uverbs_gid_entry *entries,
+ size_t max_entries)
+{
+ const struct ib_gid_attr *gid_attr;
+ ssize_t num_entries = 0, ret;
+ struct ib_gid_table *table;
+ u32 port_num, i;
+ struct net_device *ndev;
+ unsigned long flags;
+
+ rdma_for_each_port(device, port_num) {
+ table = rdma_gid_table(device, port_num);
+ read_lock_irqsave(&table->rwlock, flags);
+ for (i = 0; i < table->sz; i++) {
+ if (!is_gid_entry_valid(table->data_vec[i]))
+ continue;
+ if (num_entries >= max_entries) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ gid_attr = &table->data_vec[i]->attr;
+
+ memcpy(&entries->gid, &gid_attr->gid,
+ sizeof(gid_attr->gid));
+ entries->gid_index = gid_attr->index;
+ entries->port_num = gid_attr->port_num;
+ entries->gid_type = gid_attr->gid_type;
+ ndev = rcu_dereference_protected(
+ gid_attr->ndev,
+ lockdep_is_held(&table->rwlock));
+ if (ndev)
+ entries->netdev_ifindex = ndev->ifindex;
+
+ num_entries++;
+ entries++;
+ }
+ read_unlock_irqrestore(&table->rwlock, flags);
+ }
+
+ return num_entries;
+err:
+ read_unlock_irqrestore(&table->rwlock, flags);
return ret;
}
-EXPORT_SYMBOL(ib_find_exact_cached_pkey);
+EXPORT_SYMBOL(rdma_query_gid_table);
-int ib_get_cached_lmc(struct ib_device *device,
- u8 port_num,
- u8 *lmc)
+/**
+ * rdma_put_gid_attr - Release reference to the GID attribute
+ * @attr: Pointer to the GID attribute whose reference
+ * needs to be released.
+ *
+ * rdma_put_gid_attr() must be used to release reference whose
+ * reference is acquired using rdma_get_gid_attr() or any APIs
+ * which returns a pointer to the ib_gid_attr regardless of link layer
+ * of IB or RoCE.
+ *
+ */
+void rdma_put_gid_attr(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+
+ put_gid_entry(entry);
+}
+EXPORT_SYMBOL(rdma_put_gid_attr);
+
+/**
+ * rdma_hold_gid_attr - Get reference to existing GID attribute
+ *
+ * @attr: Pointer to the GID attribute whose reference
+ * needs to be taken.
+ *
+ * Increase the reference count to a GID attribute to keep it from being
+ * freed. Callers are required to already be holding a reference to attribute.
+ *
+ */
+void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+
+ get_gid_entry(entry);
+}
+EXPORT_SYMBOL(rdma_hold_gid_attr);
+
+/**
+ * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
+ * which must be in UP state.
+ *
+ * @attr:Pointer to the GID attribute
+ *
+ * Returns pointer to netdevice if the netdevice was attached to GID and
+ * netdevice is in UP state. Caller must hold RCU lock as this API
+ * reads the netdev flags which can change while netdevice migrates to
+ * different net namespace. Returns ERR_PTR with error code otherwise.
+ *
+ */
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+ struct ib_device *device = entry->attr.device;
+ struct net_device *ndev = ERR_PTR(-EINVAL);
+ u32 port_num = entry->attr.port_num;
+ struct ib_gid_table *table;
unsigned long flags;
+ bool valid;
+
+ table = rdma_gid_table(device, port_num);
+
+ read_lock_irqsave(&table->rwlock, flags);
+ valid = is_gid_entry_valid(table->data_vec[attr->index]);
+ if (valid) {
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev)
+ ndev = ERR_PTR(-ENODEV);
+ }
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return ndev;
+}
+EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
+
+static int get_lower_dev_vlan(struct net_device *lower_dev,
+ struct netdev_nested_priv *priv)
+{
+ u16 *vlan_id = (u16 *)priv->data;
+
+ if (is_vlan_dev(lower_dev))
+ *vlan_id = vlan_dev_vlan_id(lower_dev);
+
+ /* We are interested only in first level vlan device, so
+ * always return 1 to stop iterating over next level devices.
+ */
+ return 1;
+}
+
+/**
+ * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
+ * of a GID entry.
+ *
+ * @attr: GID attribute pointer whose L2 fields to be read
+ * @vlan_id: Pointer to vlan id to fill up if the GID entry has
+ * vlan id. It is optional.
+ * @smac: Pointer to smac to fill up for a GID entry. It is optional.
+ *
+ * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
+ * (if gid entry has vlan) and source MAC, or returns error.
+ */
+int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
+ u16 *vlan_id, u8 *smac)
+{
+ struct netdev_nested_priv priv = {
+ .data = (void *)vlan_id,
+ };
+ struct net_device *ndev;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ if (smac)
+ ether_addr_copy(smac, ndev->dev_addr);
+ if (vlan_id) {
+ *vlan_id = 0xffff;
+ if (is_vlan_dev(ndev)) {
+ *vlan_id = vlan_dev_vlan_id(ndev);
+ } else {
+ /* If the netdev is upper device and if it's lower
+ * device is vlan device, consider vlan id of
+ * the lower vlan device for this gid entry.
+ */
+ netdev_walk_all_lower_dev_rcu(attr->ndev,
+ get_lower_dev_vlan, &priv);
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(rdma_read_gid_l2_fields);
+
+static int config_non_roce_gid_cache(struct ib_device *device,
+ u32 port, struct ib_port_attr *tprops)
+{
+ struct ib_gid_attr gid_attr = {};
+ struct ib_gid_table *table;
int ret = 0;
+ int i;
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
- return -EINVAL;
+ gid_attr.device = device;
+ gid_attr.port_num = port;
+ table = rdma_gid_table(device, port);
- read_lock_irqsave(&device->cache.lock, flags);
- *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
- read_unlock_irqrestore(&device->cache.lock, flags);
+ mutex_lock(&table->lock);
+ for (i = 0; i < tprops->gid_tbl_len; ++i) {
+ if (!device->ops.query_gid)
+ continue;
+ ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
+ if (ret) {
+ dev_warn(&device->dev,
+ "query_gid failed (%d) for index %d\n", ret,
+ i);
+ goto err;
+ }
+
+ if (rdma_protocol_iwarp(device, port)) {
+ struct net_device *ndev;
+
+ ndev = ib_device_get_netdev(device, port);
+ if (!ndev)
+ continue;
+ RCU_INIT_POINTER(gid_attr.ndev, ndev);
+ dev_put(ndev);
+ }
+ gid_attr.index = i;
+ tprops->subnet_prefix =
+ be64_to_cpu(gid_attr.gid.global.subnet_prefix);
+ add_modify_gid(table, &gid_attr);
+ }
+err:
+ mutex_unlock(&table->lock);
return ret;
}
-EXPORT_SYMBOL(ib_get_cached_lmc);
-static void ib_cache_update(struct ib_device *device,
- u8 port)
+static int
+ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
+ bool update_pkeys, bool enforce_security)
{
struct ib_port_attr *tprops = NULL;
- struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
- struct ib_gid_cache {
- int table_len;
- union ib_gid table[0];
- } *gid_cache = NULL;
+ struct ib_pkey_cache *pkey_cache = NULL;
+ struct ib_pkey_cache *old_pkey_cache = NULL;
int i;
int ret;
- struct ib_gid_table *table;
- struct ib_gid_table **ports_table = device->cache.gid_cache;
- bool use_roce_gid_table =
- rdma_cap_roce_gid_table(device, port);
- if (port < rdma_start_port(device) || port > rdma_end_port(device))
- return;
-
- table = ports_table[port - rdma_start_port(device)];
+ if (!rdma_is_port_valid(device, port))
+ return -EINVAL;
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
- return;
+ return -ENOMEM;
ret = ib_query_port(device, port, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
- ret, device->name);
+ dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
goto err;
}
- pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
- sizeof *pkey_cache->table, GFP_KERNEL);
- if (!pkey_cache)
- goto err;
-
- pkey_cache->table_len = tprops->pkey_tbl_len;
-
- if (!use_roce_gid_table) {
- gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
- sizeof(*gid_cache->table), GFP_KERNEL);
- if (!gid_cache)
+ if (!rdma_protocol_roce(device, port) && update_gids) {
+ ret = config_non_roce_gid_cache(device, port,
+ tprops);
+ if (ret)
goto err;
-
- gid_cache->table_len = tprops->gid_tbl_len;
}
- for (i = 0; i < pkey_cache->table_len; ++i) {
- ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
- if (ret) {
- printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ update_pkeys &= !!tprops->pkey_tbl_len;
+
+ if (update_pkeys) {
+ pkey_cache = kmalloc(struct_size(pkey_cache, table,
+ tprops->pkey_tbl_len),
+ GFP_KERNEL);
+ if (!pkey_cache) {
+ ret = -ENOMEM;
goto err;
}
- }
- if (!use_roce_gid_table) {
- for (i = 0; i < gid_cache->table_len; ++i) {
- ret = ib_query_gid(device, port, i,
- gid_cache->table + i);
+ pkey_cache->table_len = tprops->pkey_tbl_len;
+
+ for (i = 0; i < pkey_cache->table_len; ++i) {
+ ret = ib_query_pkey(device, port, i,
+ pkey_cache->table + i);
if (ret) {
- printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ dev_warn(&device->dev,
+ "ib_query_pkey failed (%d) for index %d\n",
+ ret, i);
goto err;
}
}
}
- write_lock_irq(&device->cache.lock);
+ write_lock_irq(&device->cache_lock);
- old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
-
- device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
- if (!use_roce_gid_table) {
- for (i = 0; i < gid_cache->table_len; i++) {
- modify_gid(device, port, table, i, gid_cache->table + i,
- &zattr, false);
- }
+ if (update_pkeys) {
+ old_pkey_cache = device->port_data[port].cache.pkey;
+ device->port_data[port].cache.pkey = pkey_cache;
}
+ device->port_data[port].cache.lmc = tprops->lmc;
- device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
+ if (device->port_data[port].cache.port_state != IB_PORT_NOP &&
+ device->port_data[port].cache.port_state != tprops->state)
+ ibdev_info(device, "Port: %d Link %s\n", port,
+ ib_port_state_to_str(tprops->state));
- write_unlock_irq(&device->cache.lock);
+ device->port_data[port].cache.port_state = tprops->state;
+
+ device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
+ write_unlock_irq(&device->cache_lock);
+
+ if (enforce_security)
+ ib_security_cache_change(device,
+ port,
+ tprops->subnet_prefix);
- kfree(gid_cache);
kfree(old_pkey_cache);
kfree(tprops);
- return;
+ return 0;
err:
kfree(pkey_cache);
- kfree(gid_cache);
kfree(tprops);
+ return ret;
+}
+
+static void ib_cache_event_task(struct work_struct *_work)
+{
+ struct ib_update_work *work =
+ container_of(_work, struct ib_update_work, work);
+ int ret;
+
+ /* Before distributing the cache update event, first sync
+ * the cache.
+ */
+ ret = ib_cache_update(work->event.device, work->event.element.port_num,
+ work->event.event == IB_EVENT_GID_CHANGE,
+ work->event.event == IB_EVENT_PKEY_CHANGE,
+ work->enforce_security);
+
+ /* GID event is notified already for individual GID entries by
+ * dispatch_gid_change_event(). Hence, notifiy for rest of the
+ * events.
+ */
+ if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
+ ib_dispatch_event_clients(&work->event);
+
+ kfree(work);
}
-static void ib_cache_task(struct work_struct *_work)
+static void ib_generic_event_task(struct work_struct *_work)
{
struct ib_update_work *work =
container_of(_work, struct ib_update_work, work);
- ib_cache_update(work->device, work->port_num);
+ ib_dispatch_event_clients(&work->event);
kfree(work);
}
-static void ib_cache_event(struct ib_event_handler *handler,
- struct ib_event *event)
+static bool is_cache_update_event(const struct ib_event *event)
+{
+ return (event->event == IB_EVENT_PORT_ERR ||
+ event->event == IB_EVENT_PORT_ACTIVE ||
+ event->event == IB_EVENT_LID_CHANGE ||
+ event->event == IB_EVENT_PKEY_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER ||
+ event->event == IB_EVENT_GID_CHANGE);
+}
+
+/**
+ * ib_dispatch_event - Dispatch an asynchronous event
+ * @event:Event to dispatch
+ *
+ * Low-level drivers must call ib_dispatch_event() to dispatch the
+ * event to all registered event handlers when an asynchronous event
+ * occurs.
+ */
+void ib_dispatch_event(const struct ib_event *event)
{
struct ib_update_work *work;
- if (event->event == IB_EVENT_PORT_ERR ||
- event->event == IB_EVENT_PORT_ACTIVE ||
- event->event == IB_EVENT_LID_CHANGE ||
- event->event == IB_EVENT_PKEY_CHANGE ||
- event->event == IB_EVENT_SM_CHANGE ||
- event->event == IB_EVENT_CLIENT_REREGISTER ||
- event->event == IB_EVENT_GID_CHANGE) {
- work = kmalloc(sizeof *work, GFP_ATOMIC);
- if (work) {
- INIT_WORK(&work->work, ib_cache_task);
- work->device = event->device;
- work->port_num = event->element.port_num;
- queue_work(ib_wq, &work->work);
- }
- }
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ if (is_cache_update_event(event))
+ INIT_WORK(&work->work, ib_cache_event_task);
+ else
+ INIT_WORK(&work->work, ib_generic_event_task);
+
+ work->event = *event;
+ if (event->event == IB_EVENT_PKEY_CHANGE ||
+ event->event == IB_EVENT_GID_CHANGE)
+ work->enforce_security = true;
+
+ queue_work(ib_wq, &work->work);
}
+EXPORT_SYMBOL(ib_dispatch_event);
int ib_cache_setup_one(struct ib_device *device)
{
- int p;
+ u32 p;
int err;
- rwlock_init(&device->cache.lock);
-
- device->cache.pkey_cache =
- kzalloc(sizeof *device->cache.pkey_cache *
- (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
- device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
- (rdma_end_port(device) -
- rdma_start_port(device) + 1),
- GFP_KERNEL);
- if (!device->cache.pkey_cache ||
- !device->cache.lmc_cache) {
- printk(KERN_WARNING "Couldn't allocate cache "
- "for %s\n", device->name);
- return -ENOMEM;
- }
-
err = gid_table_setup_one(device);
if (err)
- /* Allocated memory will be cleaned in the release function */
return err;
- for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
- ib_cache_update(device, p + rdma_start_port(device));
-
- INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
- device, ib_cache_event);
- err = ib_register_event_handler(&device->cache.event_handler);
- if (err)
- goto err;
+ rdma_for_each_port (device, p) {
+ err = ib_cache_update(device, p, true, true, true);
+ if (err) {
+ gid_table_cleanup_one(device);
+ return err;
+ }
+ }
return 0;
-
-err:
- gid_table_cleanup_one(device);
- return err;
}
void ib_cache_release_one(struct ib_device *device)
{
- int p;
+ u32 p;
/*
* The release function frees all the cache elements.
@@ -964,36 +1630,25 @@ void ib_cache_release_one(struct ib_device *device)
* all the device's resources when the cache could no
* longer be accessed.
*/
- if (device->cache.pkey_cache)
- for (p = 0;
- p <= rdma_end_port(device) - rdma_start_port(device); ++p)
- kfree(device->cache.pkey_cache[p]);
+ rdma_for_each_port (device, p)
+ kfree(device->port_data[p].cache.pkey);
gid_table_release_one(device);
- kfree(device->cache.pkey_cache);
- kfree(device->cache.lmc_cache);
}
void ib_cache_cleanup_one(struct ib_device *device)
{
- /* The cleanup function unregisters the event handler,
- * waits for all in-progress workqueue elements and cleans
- * up the GID cache. This function should be called after
- * the device was removed from the devices list and all
- * clients were removed, so the cache exists but is
+ /* The cleanup function waits for all in-progress workqueue
+ * elements and cleans up the GID cache. This function should be
+ * called after the device was removed from the devices list and
+ * all clients were removed, so the cache exists but is
* non-functional and shouldn't be updated anymore.
*/
- ib_unregister_event_handler(&device->cache.event_handler);
flush_workqueue(ib_wq);
gid_table_cleanup_one(device);
-}
-void __init ib_cache_setup(void)
-{
- roce_gid_mgmt_init();
-}
-
-void __exit ib_cache_cleanup(void)
-{
- roce_gid_mgmt_cleanup();
+ /*
+ * Flush the wq second time for any pending GID delete work.
+ */
+ flush_workqueue(ib_wq);
}
diff --git a/drivers/infiniband/core/cgroup.c b/drivers/infiniband/core/cgroup.c
new file mode 100644
index 000000000000..1f037fe01450
--- /dev/null
+++ b/drivers/infiniband/core/cgroup.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
+ */
+
+#include "core_priv.h"
+
+/**
+ * ib_device_register_rdmacg - register with rdma cgroup.
+ * @device: device to register to participate in resource
+ * accounting by rdma cgroup.
+ *
+ * Register with the rdma cgroup. Should be called before
+ * exposing rdma device to user space applications to avoid
+ * resource accounting leak.
+ */
+void ib_device_register_rdmacg(struct ib_device *device)
+{
+ device->cg_device.name = device->name;
+ rdmacg_register_device(&device->cg_device);
+}
+
+/**
+ * ib_device_unregister_rdmacg - unregister with rdma cgroup.
+ * @device: device to unregister.
+ *
+ * Unregister with the rdma cgroup. Should be called after
+ * all the resources are deallocated, and after a stage when any
+ * other resource allocation by user application cannot be done
+ * for this device to avoid any leak in accounting.
+ */
+void ib_device_unregister_rdmacg(struct ib_device *device)
+{
+ rdmacg_unregister_device(&device->cg_device);
+}
+
+int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
+ struct ib_device *device,
+ enum rdmacg_resource_type resource_index)
+{
+ return rdmacg_try_charge(&cg_obj->cg, &device->cg_device,
+ resource_index);
+}
+EXPORT_SYMBOL(ib_rdmacg_try_charge);
+
+void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
+ struct ib_device *device,
+ enum rdmacg_resource_type resource_index)
+{
+ rdmacg_uncharge(cg_obj->cg, &device->cg_device,
+ resource_index);
+}
+EXPORT_SYMBOL(ib_rdmacg_uncharge);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ea4db9c1d44f..01bede8ba105 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,36 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/completion.h>
@@ -51,14 +25,84 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
+#include <rdma/ib_sysfs.h>
#include "cm_msgs.h"
+#include "core_priv.h"
+#include "cm_trace.h"
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
-static void cm_add_one(struct ib_device *device);
+#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
+#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
+#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */
+
+static const char * const ibcm_rej_reason_strs[] = {
+ [IB_CM_REJ_NO_QP] = "no QP",
+ [IB_CM_REJ_NO_EEC] = "no EEC",
+ [IB_CM_REJ_NO_RESOURCES] = "no resources",
+ [IB_CM_REJ_TIMEOUT] = "timeout",
+ [IB_CM_REJ_UNSUPPORTED] = "unsupported",
+ [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
+ [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
+ [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
+ [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
+ [IB_CM_REJ_STALE_CONN] = "stale conn",
+ [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
+ [IB_CM_REJ_INVALID_GID] = "invalid GID",
+ [IB_CM_REJ_INVALID_LID] = "invalid LID",
+ [IB_CM_REJ_INVALID_SL] = "invalid SL",
+ [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
+ [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
+ [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
+ [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
+ [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
+ [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
+ [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
+ [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
+ [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
+ [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
+ [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
+ [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
+ [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
+ [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
+ [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
+ [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
+ [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
+ [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
+ [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
+ [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
+ "vendor option is not supported",
+};
+
+const char *__attribute_const__ ibcm_reject_msg(int reason)
+{
+ size_t index = reason;
+
+ if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
+ ibcm_rej_reason_strs[index])
+ return ibcm_rej_reason_strs[index];
+ else
+ return "unrecognized reason";
+}
+EXPORT_SYMBOL(ibcm_reject_msg);
+
+struct cm_id_private;
+struct cm_work;
+static int cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
+static void cm_process_work(struct cm_id_private *cm_id_priv,
+ struct cm_work *work);
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ struct ib_cm_sidr_rep_param *param);
+static void cm_issue_dreq(struct cm_id_private *cm_id_priv);
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
+ void *private_data, u8 private_data_len);
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len);
static struct ib_client cm_client = {
.name = "cm",
@@ -76,7 +120,8 @@ static struct ib_cm {
struct rb_root remote_qp_table;
struct rb_root remote_id_table;
struct rb_root remote_sidr_table;
- struct idr local_id_table;
+ struct xarray local_id_table;
+ u32 local_id_next;
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
@@ -107,80 +152,36 @@ enum {
CM_COUNTER_GROUPS
};
-static char const counter_group_names[CM_COUNTER_GROUPS]
- [sizeof("cm_rx_duplicates")] = {
- "cm_tx_msgs", "cm_tx_retries",
- "cm_rx_msgs", "cm_rx_duplicates"
-};
-
-struct cm_counter_group {
- struct kobject obj;
- atomic_long_t counter[CM_ATTR_COUNT];
-};
-
struct cm_counter_attribute {
- struct attribute attr;
- int index;
-};
-
-#define CM_COUNTER_ATTR(_name, _index) \
-struct cm_counter_attribute cm_##_name##_counter_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .index = _index \
-}
-
-static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
-static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
-static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
-static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
-static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
-static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
-static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
-static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
-static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
-static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
-static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
-
-static struct attribute *cm_counter_default_attrs[] = {
- &cm_req_counter_attr.attr,
- &cm_mra_counter_attr.attr,
- &cm_rej_counter_attr.attr,
- &cm_rep_counter_attr.attr,
- &cm_rtu_counter_attr.attr,
- &cm_dreq_counter_attr.attr,
- &cm_drep_counter_attr.attr,
- &cm_sidr_req_counter_attr.attr,
- &cm_sidr_rep_counter_attr.attr,
- &cm_lap_counter_attr.attr,
- &cm_apr_counter_attr.attr,
- NULL
+ struct ib_port_attribute attr;
+ unsigned short group;
+ unsigned short index;
};
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
- struct kobject port_obj;
- u8 port_num;
- struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
+ struct ib_mad_agent *rep_agent;
+ u32 port_num;
+ atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
};
struct cm_device {
+ struct kref kref;
struct list_head list;
+ rwlock_t mad_agent_lock;
struct ib_device *ib_device;
- struct device *device;
u8 ack_delay;
int going_down;
- struct cm_port *port[0];
+ struct cm_port *port[];
};
struct cm_av {
struct cm_port *port;
- union ib_gid dgid;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
+ u16 dlid_datapath;
u16 pkey_index;
u8 timeout;
- u8 valid;
- u8 smac[ETH_ALEN];
};
struct cm_work {
@@ -191,11 +192,11 @@ struct cm_work {
__be32 local_id; /* Established / timewait */
__be32 remote_id;
struct ib_cm_event cm_event;
- struct ib_sa_path_rec path[0];
+ struct sa_path_rec path[];
};
struct cm_timewait_info {
- struct cm_work work; /* Must be first. */
+ struct cm_work work;
struct list_head list;
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
@@ -210,12 +211,15 @@ struct cm_id_private {
struct rb_node service_node;
struct rb_node sidr_id_node;
+ u32 sidr_slid;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
- * Protected by the cm.lock spinlock. */
+ * Protected by the cm.lock spinlock.
+ */
int listen_sharecount;
+ struct rcu_head rcu;
struct ib_mad_send_buf *msg;
struct cm_timewait_info *timewait_info;
@@ -235,37 +239,67 @@ struct cm_id_private {
__be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
- u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
- u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
atomic_t work_count;
+
+ struct rdma_ucm_ece ece;
};
+static void cm_dev_release(struct kref *kref)
+{
+ struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
+ u32 i;
+
+ rdma_for_each_port(cm_dev->ib_device, i)
+ kfree(cm_dev->port[i - 1]);
+
+ kfree(cm_dev);
+}
+
+static void cm_device_put(struct cm_device *cm_dev)
+{
+ kref_put(&cm_dev->kref, cm_dev_release);
+}
+
static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
- if (atomic_dec_and_test(&cm_id_priv->refcount))
+ if (refcount_dec_and_test(&cm_id_priv->refcount))
complete(&cm_id_priv->comp);
}
-static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
- struct ib_mad_send_buf **msg)
+static struct ib_mad_send_buf *
+cm_alloc_msg_agent(struct cm_id_private *cm_id_priv, bool rep_agent)
{
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
- mad_agent = cm_id_priv->av.port->mad_agent;
- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
- if (IS_ERR(ah))
- return PTR_ERR(ah);
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ if (!cm_id_priv->av.port)
+ return ERR_PTR(-EINVAL);
+
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ mad_agent = rep_agent ? cm_id_priv->av.port->rep_agent :
+ cm_id_priv->av.port->mad_agent;
+ if (!mad_agent) {
+ m = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
+ if (IS_ERR(ah)) {
+ m = ERR_CAST(ah);
+ goto out;
+ }
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index,
@@ -273,25 +307,93 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
- ib_destroy_ah(ah);
- return PTR_ERR(m);
+ rdma_destroy_ah(ah, 0);
+ goto out;
}
- /* Timeout set by caller if response is expected. */
m->ah = ah;
- m->retries = cm_id_priv->max_cm_retries;
- atomic_inc(&cm_id_priv->refcount);
- m->context[0] = cm_id_priv;
- *msg = m;
- return 0;
+out:
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ return m;
}
-static int cm_alloc_response_msg(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc,
- struct ib_mad_send_buf **msg)
+static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
+{
+ return cm_alloc_msg_agent(cm_id_priv, false);
+}
+
+static void cm_free_msg(struct ib_mad_send_buf *msg)
+{
+ if (msg->ah)
+ rdma_destroy_ah(msg->ah, 0);
+ ib_free_send_mad(msg);
+}
+
+static struct ib_mad_send_buf *
+cm_alloc_priv_msg_rep(struct cm_id_private *cm_id_priv, enum ib_cm_state state,
+ bool rep_agent)
+{
+ struct ib_mad_send_buf *msg;
+
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ msg = cm_alloc_msg_agent(cm_id_priv, rep_agent);
+ if (IS_ERR(msg))
+ return msg;
+
+ cm_id_priv->msg = msg;
+ refcount_inc(&cm_id_priv->refcount);
+ msg->context[0] = cm_id_priv;
+ msg->context[1] = (void *) (unsigned long) state;
+
+ msg->retries = cm_id_priv->max_cm_retries;
+ msg->timeout_ms = cm_id_priv->timeout_ms;
+
+ return msg;
+}
+
+static struct ib_mad_send_buf *
+cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state)
+{
+ return cm_alloc_priv_msg_rep(cm_id_priv, state, false);
+}
+
+static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
+{
+ struct cm_id_private *cm_id_priv = msg->context[0];
+
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ if (!WARN_ON(cm_id_priv->msg != msg))
+ cm_id_priv->msg = NULL;
+
+ if (msg->ah)
+ rdma_destroy_ah(msg->ah, 0);
+ cm_deref_id(cm_id_priv);
+ ib_free_send_mad(msg);
+}
+
+static struct ib_mad_send_buf *
+cm_alloc_response_msg_no_ah(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ bool direct_retry)
{
struct ib_mad_send_buf *m;
+
+ m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
+ 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC, IB_MGMT_BASE_VERSION);
+ if (!IS_ERR(m))
+ m->context[0] = direct_retry ? CM_DIRECT_RETRY_CTX : NULL;
+
+ return m;
+}
+
+static int cm_create_response_msg_ah(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ struct ib_mad_send_buf *msg)
+{
struct ib_ah *ah;
ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
@@ -299,29 +401,33 @@ static int cm_alloc_response_msg(struct cm_port *port,
if (IS_ERR(ah))
return PTR_ERR(ah);
- m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
- 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(m)) {
- ib_destroy_ah(ah);
- return PTR_ERR(m);
- }
- m->ah = ah;
- *msg = m;
+ msg->ah = ah;
return 0;
}
-static void cm_free_msg(struct ib_mad_send_buf *msg)
+static int cm_alloc_response_msg(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ bool direct_retry,
+ struct ib_mad_send_buf **msg)
{
- ib_destroy_ah(msg->ah);
- if (msg->context[0])
- cm_deref_id(msg->context[0]);
- ib_free_send_mad(msg);
+ struct ib_mad_send_buf *m;
+ int ret;
+
+ m = cm_alloc_response_msg_no_ah(port, mad_recv_wc, direct_retry);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+
+ ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
+ if (ret) {
+ ib_free_send_mad(m);
+ return ret;
+ }
+
+ *msg = m;
+ return 0;
}
-static void * cm_copy_private_data(const void *private_data,
- u8 private_data_len)
+static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
{
void *data;
@@ -345,99 +451,150 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
-static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
- struct ib_grh *grh, struct cm_av *av)
+static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
{
+ struct cm_port *old_port = av->port;
+
+ if (old_port == port)
+ return;
+
av->port = port;
+ if (old_port)
+ cm_device_put(old_port->cm_dev);
+ if (port)
+ kref_get(&port->cm_dev->kref);
+}
+
+static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
+ struct rdma_ah_attr *ah_attr, struct cm_av *av)
+{
+ cm_set_av_port(av, port);
+ av->pkey_index = wc->pkey_index;
+ rdma_move_ah_attr(&av->ah_attr, ah_attr);
+}
+
+static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ struct ib_grh *grh, struct cm_av *av)
+{
+ cm_set_av_port(av, port);
av->pkey_index = wc->pkey_index;
- ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
- grh, &av->ah_attr);
+ return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
+ port->port_num, wc,
+ grh, &av->ah_attr);
}
-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+static struct cm_port *
+get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
unsigned long flags;
- int ret;
- u8 p;
- read_lock_irqsave(&cm.device_lock, flags);
- list_for_each_entry(cm_dev, &cm.device_list, list) {
- if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
- &p, NULL)) {
- port = cm_dev->port[p-1];
- break;
+ if (attr) {
+ read_lock_irqsave(&cm.device_lock, flags);
+ list_for_each_entry(cm_dev, &cm.device_list, list) {
+ if (cm_dev->ib_device == attr->device) {
+ port = cm_dev->port[attr->port_num - 1];
+ break;
+ }
+ }
+ read_unlock_irqrestore(&cm.device_lock, flags);
+ } else {
+ /* SGID attribute can be NULL in following
+ * conditions.
+ * (a) Alternative path
+ * (b) IB link layer without GRH
+ * (c) LAP send messages
+ */
+ read_lock_irqsave(&cm.device_lock, flags);
+ list_for_each_entry(cm_dev, &cm.device_list, list) {
+ attr = rdma_find_gid(cm_dev->ib_device,
+ &path->sgid,
+ sa_conv_pathrec_to_gid_type(path),
+ NULL);
+ if (!IS_ERR(attr)) {
+ port = cm_dev->port[attr->port_num - 1];
+ break;
+ }
}
+ read_unlock_irqrestore(&cm.device_lock, flags);
+ if (port)
+ rdma_put_gid_attr(attr);
}
- read_unlock_irqrestore(&cm.device_lock, flags);
+ return port;
+}
+
+static int cm_init_av_by_path(struct sa_path_rec *path,
+ const struct ib_gid_attr *sgid_attr,
+ struct cm_av *av)
+{
+ struct rdma_ah_attr new_ah_attr;
+ struct cm_device *cm_dev;
+ struct cm_port *port;
+ int ret;
+ port = get_cm_port_from_path(path, sgid_attr);
if (!port)
return -EINVAL;
+ cm_dev = port->cm_dev;
ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
be16_to_cpu(path->pkey), &av->pkey_index);
if (ret)
return ret;
- av->port = port;
- ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
- &av->ah_attr);
- av->timeout = path->packet_life_time + 1;
- memcpy(av->smac, path->smac, sizeof(av->smac));
+ cm_set_av_port(av, port);
+
+ /*
+ * av->ah_attr might be initialized based on wc or during
+ * request processing time which might have reference to sgid_attr.
+ * So initialize a new ah_attr on stack.
+ * If initialization fails, old ah_attr is used for sending any
+ * responses. If initialization is successful, than new ah_attr
+ * is used by overwriting the old one. So that right ah_attr
+ * can be used to return an error response.
+ */
+ ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
+ &new_ah_attr, sgid_attr);
+ if (ret)
+ return ret;
- av->valid = 1;
+ av->timeout = path->packet_life_time + 1;
+ rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
-static int cm_alloc_id(struct cm_id_private *cm_id_priv)
+/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
+static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
{
- unsigned long flags;
- int id;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&cm.lock, flags);
-
- id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
-
- spin_unlock_irqrestore(&cm.lock, flags);
- idr_preload_end();
-
- cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return id < 0 ? id : 0;
+ cm_set_av_port(dest, src->port);
+ cm_set_av_port(src, NULL);
+ dest->pkey_index = src->pkey_index;
+ rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
+ dest->timeout = src->timeout;
}
-static void cm_free_id(__be32 local_id)
+static void cm_destroy_av(struct cm_av *av)
{
- spin_lock_irq(&cm.lock);
- idr_remove(&cm.local_id_table,
- (__force int) (local_id ^ cm.random_id_operand));
- spin_unlock_irq(&cm.lock);
+ rdma_destroy_ah_attr(&av->ah_attr);
+ cm_set_av_port(av, NULL);
}
-static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
+static u32 cm_local_id(__be32 local_id)
{
- struct cm_id_private *cm_id_priv;
-
- cm_id_priv = idr_find(&cm.local_id_table,
- (__force int) (local_id ^ cm.random_id_operand));
- if (cm_id_priv) {
- if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
- else
- cm_id_priv = NULL;
- }
-
- return cm_id_priv;
+ return (__force u32) (local_id ^ cm.random_id_operand);
}
-static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
+static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
- spin_lock_irq(&cm.lock);
- cm_id_priv = cm_get_id(local_id, remote_id);
- spin_unlock_irq(&cm.lock);
+ rcu_read_lock();
+ cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
+ if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
+ !refcount_inc_not_zero(&cm_id_priv->refcount))
+ cm_id_priv = NULL;
+ rcu_read_unlock();
return cm_id_priv;
}
@@ -467,22 +624,25 @@ static int be64_gt(__be64 a, __be64 b)
return (__force u64) a > (__force u64) b;
}
-static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
+/*
+ * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
+ * if the new ID was inserted, NULL if it could not be inserted due to a
+ * collision, or the existing cm_id_priv ready for shared usage.
+ */
+static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
+ ib_cm_handler shared_handler)
{
struct rb_node **link = &cm.listen_service_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
- __be64 service_mask = cm_id_priv->id.service_mask;
+ unsigned long flags;
+ spin_lock_irqsave(&cm.lock, flags);
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
- if ((cur_cm_id_priv->id.service_mask & service_id) ==
- (service_mask & cur_cm_id_priv->id.service_id) &&
- (cm_id_priv->id.device == cur_cm_id_priv->id.device))
- return cur_cm_id_priv;
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
link = &(*link)->rb_left;
@@ -492,26 +652,38 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
link = &(*link)->rb_left;
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_right;
- else
- link = &(*link)->rb_right;
+ else {
+ /*
+ * Sharing an ib_cm_id with different handlers is not
+ * supported
+ */
+ if (cur_cm_id_priv->id.cm_handler != shared_handler ||
+ cur_cm_id_priv->id.context ||
+ WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return NULL;
+ }
+ refcount_inc(&cur_cm_id_priv->refcount);
+ cur_cm_id_priv->listen_sharecount++;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return cur_cm_id_priv;
+ }
}
+ cm_id_priv->listen_sharecount++;
rb_link_node(&cm_id_priv->service_node, parent, link);
rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
- return NULL;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return cm_id_priv;
}
-static struct cm_id_private * cm_find_listen(struct ib_device *device,
- __be64 service_id)
+static struct cm_id_private *cm_find_listen(struct ib_device *device,
+ __be64 service_id)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
while (node) {
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
- if ((cm_id_priv->id.service_mask & service_id) ==
- cm_id_priv->id.service_id &&
- (cm_id_priv->id.device == device))
- return cm_id_priv;
if (device < cm_id_priv->id.device)
node = node->rb_left;
@@ -521,14 +693,16 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
node = node->rb_left;
else if (be64_gt(service_id, cm_id_priv->id.service_id))
node = node->rb_right;
- else
- node = node->rb_right;
+ else {
+ refcount_inc(&cm_id_priv->refcount);
+ return cm_id_priv;
+ }
}
return NULL;
}
-static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
- *timewait_info)
+static struct cm_timewait_info *
+cm_insert_remote_id(struct cm_timewait_info *timewait_info)
{
struct rb_node **link = &cm.remote_id_table.rb_node;
struct rb_node *parent = NULL;
@@ -557,12 +731,14 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
return NULL;
}
-static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
- __be32 remote_id)
+static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
+ __be32 remote_id)
{
struct rb_node *node = cm.remote_id_table.rb_node;
struct cm_timewait_info *timewait_info;
+ struct cm_id_private *res = NULL;
+ spin_lock_irq(&cm.lock);
while (node) {
timewait_info = rb_entry(node, struct cm_timewait_info,
remote_id_node);
@@ -574,14 +750,18 @@ static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
node = node->rb_left;
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_right;
- else
- return timewait_info;
+ else {
+ res = cm_acquire_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+ break;
+ }
}
- return NULL;
+ spin_unlock_irq(&cm.lock);
+ return res;
}
-static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
- *timewait_info)
+static struct cm_timewait_info *
+cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
{
struct rb_node **link = &cm.remote_qp_table.rb_node;
struct rb_node *parent = NULL;
@@ -610,13 +790,12 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
return NULL;
}
-static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
- *cm_id_priv)
+static struct cm_id_private *
+cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
{
struct rb_node **link = &cm.remote_sidr_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
- union ib_gid *port_gid = &cm_id_priv->av.dgid;
__be32 remote_id = cm_id_priv->id.remote_id;
while (*link) {
@@ -628,12 +807,9 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
link = &(*link)->rb_right;
else {
- int cmp;
- cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
- sizeof *port_gid);
- if (cmp < 0)
+ if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
link = &(*link)->rb_left;
- else if (cmp > 0)
+ else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
link = &(*link)->rb_right;
else
return cur_cm_id_priv;
@@ -644,21 +820,12 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
return NULL;
}
-static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
- enum ib_cm_sidr_status status)
-{
- struct ib_cm_sidr_rep_param param;
-
- memset(&param, 0, sizeof param);
- param.status = status;
- ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
-}
-
-struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
- ib_cm_handler cm_handler,
- void *context)
+static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ void *context)
{
struct cm_id_private *cm_id_priv;
+ u32 id;
int ret;
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
@@ -670,24 +837,54 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
cm_id_priv->id.cm_handler = cm_handler;
cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1;
- ret = cm_alloc_id(cm_id_priv);
- if (ret)
- goto error;
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
+ RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
atomic_set(&cm_id_priv->work_count, -1);
- atomic_set(&cm_id_priv->refcount, 1);
- return &cm_id_priv->id;
+ refcount_set(&cm_id_priv->refcount, 1);
+
+ ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
+ &cm.local_id_next, GFP_KERNEL);
+ if (ret < 0)
+ goto error;
+ cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
+
+ return cm_id_priv;
error:
kfree(cm_id_priv);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Make the ID visible to the MAD handlers and other threads that use the
+ * xarray.
+ */
+static void cm_finalize_id(struct cm_id_private *cm_id_priv)
+{
+ xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
+ cm_id_priv, GFP_ATOMIC);
+}
+
+struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ void *context)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
+ if (IS_ERR(cm_id_priv))
+ return ERR_CAST(cm_id_priv);
+
+ cm_finalize_id(cm_id_priv);
+ return &cm_id_priv->id;
}
EXPORT_SYMBOL(ib_create_cm_id);
-static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
+static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
{
struct cm_work *work;
@@ -706,6 +903,36 @@ static void cm_free_work(struct cm_work *work)
kfree(work);
}
+static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
+ struct cm_work *work)
+ __releases(&cm_id_priv->lock)
+{
+ bool immediate;
+
+ /*
+ * To deliver the event to the user callback we have the drop the
+ * spinlock, however, we need to ensure that the user callback is single
+ * threaded and receives events in the temporal order. If there are
+ * already events being processed then thread new events onto a list,
+ * the thread currently processing will pick them up.
+ */
+ immediate = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!immediate) {
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ /*
+ * This routine always consumes incoming reference. Once queued
+ * to the work_list then a reference is held by the thread
+ * currently running cm_process_work() and this reference is not
+ * needed.
+ */
+ cm_deref_id(cm_id_priv);
+ }
+ spin_unlock_irq(&cm_id_priv->lock);
+
+ if (immediate)
+ cm_process_work(cm_id_priv, work);
+}
+
static inline int cm_convert_to_ms(int iba_time)
{
/* approximate conversion to ms from 4.096us x 2^iba_time */
@@ -731,8 +958,10 @@ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
return min(31, ack_timeout);
}
-static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
+static void cm_remove_remote(struct cm_id_private *cm_id_priv)
{
+ struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
+
if (timewait_info->inserted_remote_id) {
rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
timewait_info->inserted_remote_id = 0;
@@ -744,7 +973,7 @@ static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
}
}
-static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
+static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
{
struct cm_timewait_info *timewait_info;
@@ -764,12 +993,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
unsigned long flags;
struct cm_device *cm_dev;
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
if (!cm_dev)
return;
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
spin_unlock_irqrestore(&cm.lock, flags);
@@ -782,12 +1013,17 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
/* Check if the device started its remove_one */
- spin_lock_irq(&cm.lock);
+ spin_lock_irqsave(&cm.lock, flags);
if (!cm_dev->going_down)
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
- spin_unlock_irq(&cm.lock);
+ spin_unlock_irqrestore(&cm.lock, flags);
+ /*
+ * The timewait_info is converted into a work and gets freed during
+ * cm_free_work() in cm_timewait_handler().
+ */
+ BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
cm_id_priv->timewait_info = NULL;
}
@@ -795,105 +1031,157 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
}
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+}
+
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
+ int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-retest:
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
+retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
- spin_unlock_irq(&cm_id_priv->lock);
-
- spin_lock_irq(&cm.lock);
+ spin_lock(&cm.lock);
if (--cm_id_priv->listen_sharecount > 0) {
/* The id is still shared. */
+ WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
+ spin_unlock(&cm.lock);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_deref_id(cm_id_priv);
- spin_unlock_irq(&cm.lock);
return;
}
+ cm_id->state = IB_CM_IDLE;
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
- spin_unlock_irq(&cm.lock);
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
+ spin_unlock(&cm.lock);
break;
case IB_CM_SIDR_REQ_SENT:
cm_id->state = IB_CM_IDLE;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irq(&cm_id_priv->lock);
+ ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_SIDR_REQ_RCVD:
- spin_unlock_irq(&cm_id_priv->lock);
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
+ cm_send_sidr_rep_locked(cm_id_priv,
+ &(struct ib_cm_sidr_rep_param){
+ .status = IB_SIDR_REJECT });
+ /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
+ cm_id->state = IB_CM_IDLE;
break;
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
- &cm_id_priv->id.device->node_guid,
- sizeof cm_id_priv->id.device->node_guid,
- NULL, 0);
+ ib_cancel_mad(cm_id_priv->msg);
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
+ &cm_id_priv->id.device->node_guid,
+ sizeof(cm_id_priv->id.device->node_guid),
+ NULL, 0);
break;
case IB_CM_REQ_RCVD:
if (err == -ENOMEM) {
/* Do not reject to allow future retries. */
cm_reset_to_idle(cm_id_priv);
- spin_unlock_irq(&cm_id_priv->lock);
} else {
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
+ cm_send_rej_locked(cm_id_priv,
+ IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+ NULL, 0);
}
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* Fall through */
+ ib_cancel_mad(cm_id_priv->msg);
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, NULL, 0);
+ goto retest;
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, NULL, 0);
break;
case IB_CM_ESTABLISHED:
- spin_unlock_irq(&cm_id_priv->lock);
- if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
+ if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
+ cm_id->state = IB_CM_IDLE;
break;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ }
+ cm_issue_dreq(cm_id_priv);
+ cm_enter_timewait(cm_id_priv);
goto retest;
case IB_CM_DREQ_SENT:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
- spin_unlock_irq(&cm_id_priv->lock);
- break;
+ goto retest;
case IB_CM_DREQ_RCVD:
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_drep(cm_id, NULL, 0);
+ cm_send_drep_locked(cm_id_priv, NULL, 0);
+ WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
+ goto retest;
+ case IB_CM_TIMEWAIT:
+ /*
+ * The cm_acquire_id in cm_timewait_handler will stop working
+ * once we do xa_erase below, so just move to idle here for
+ * consistency.
+ */
+ cm_id->state = IB_CM_IDLE;
break;
- default:
- spin_unlock_irq(&cm_id_priv->lock);
+ case IB_CM_IDLE:
break;
}
+ WARN_ON(cm_id->state != IB_CM_IDLE);
+
+ spin_lock(&cm.lock);
+ /* Required for cleanup paths related cm_req_handler() */
+ if (cm_id_priv->timewait_info) {
+ cm_remove_remote(cm_id_priv);
+ kfree(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
+ }
+
+ WARN_ON(cm_id_priv->listen_sharecount);
+ WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
+ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
+ rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ spin_unlock(&cm.lock);
+ spin_unlock_irq(&cm_id_priv->lock);
- cm_free_id(cm_id->local_id);
+ xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
- wait_for_completion(&cm_id_priv->comp);
+ do {
+ ret = wait_for_completion_timeout(&cm_id_priv->comp,
+ msecs_to_jiffies(
+ CM_DESTROY_ID_WAIT_TIMEOUT));
+ if (!ret) /* timeout happened */
+ cm_destroy_id_wait_timeout(cm_id, old_state);
+ } while (!ret);
+
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
+
+ cm_destroy_av(&cm_id_priv->av);
+ cm_destroy_av(&cm_id_priv->alt_av);
kfree(cm_id_priv->private_data);
- kfree(cm_id_priv);
+ kfree_rcu(cm_id_priv, rcu);
}
void ib_destroy_cm_id(struct ib_cm_id *cm_id)
@@ -902,70 +1190,63 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
}
EXPORT_SYMBOL(ib_destroy_cm_id);
+static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
+{
+ if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
+ (service_id != IB_CM_ASSIGN_SERVICE_ID))
+ return -EINVAL;
+
+ if (service_id == IB_CM_ASSIGN_SERVICE_ID)
+ cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
+ else
+ cm_id_priv->id.service_id = service_id;
+
+ return 0;
+}
+
/**
- * __ib_cm_listen - Initiates listening on the specified service ID for
+ * ib_cm_listen - Initiates listening on the specified service ID for
* connection and service ID resolution requests.
* @cm_id: Connection identifier associated with the listen request.
* @service_id: Service identifier matched against incoming connection
* and service ID resolution requests. The service ID should be specified
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
* assign a service ID to the caller.
- * @service_mask: Mask applied to service ID used to listen across a
- * range of service IDs. If set to 0, the service ID is matched
- * exactly. This parameter is ignored if %service_id is set to
- * IB_CM_ASSIGN_SERVICE_ID.
*/
-static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
- __be64 service_mask)
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
{
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
- int ret = 0;
-
- service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
- service_id &= service_mask;
- if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
- (service_id != IB_CM_ASSIGN_SERVICE_ID))
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- if (cm_id->state != IB_CM_IDLE)
- return -EINVAL;
-
- cm_id->state = IB_CM_LISTEN;
- ++cm_id_priv->listen_sharecount;
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
- if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
- cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
- cm_id->service_mask = ~cpu_to_be64(0);
- } else {
- cm_id->service_id = service_id;
- cm_id->service_mask = service_mask;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state != IB_CM_IDLE) {
+ ret = -EINVAL;
+ goto out;
}
- cur_cm_id_priv = cm_insert_listen(cm_id_priv);
- if (cur_cm_id_priv) {
- cm_id->state = IB_CM_IDLE;
- --cm_id_priv->listen_sharecount;
+ ret = cm_init_listen(cm_id_priv, service_id);
+ if (ret)
+ goto out;
+
+ if (!cm_insert_listen(cm_id_priv, NULL)) {
ret = -EBUSY;
+ goto out;
}
- return ret;
-}
-
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
-{
- unsigned long flags;
- int ret;
- spin_lock_irqsave(&cm.lock, flags);
- ret = __ib_cm_listen(cm_id, service_id, service_mask);
- spin_unlock_irqrestore(&cm.lock, flags);
+ cm_id_priv->id.state = IB_CM_LISTEN;
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_cm_listen);
/**
- * Create a new listening ib_cm_id and listen on the given service ID.
+ * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
+ * the given service ID.
*
* If there's an existing ID listening on that same device and service ID,
* return it.
@@ -984,61 +1265,57 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
ib_cm_handler cm_handler,
__be64 service_id)
{
+ struct cm_id_private *listen_id_priv;
struct cm_id_private *cm_id_priv;
- struct ib_cm_id *cm_id;
- unsigned long flags;
int err = 0;
/* Create an ID in advance, since the creation may sleep */
- cm_id = ib_create_cm_id(device, cm_handler, NULL);
- if (IS_ERR(cm_id))
- return cm_id;
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
+ if (IS_ERR(cm_id_priv))
+ return ERR_CAST(cm_id_priv);
- spin_lock_irqsave(&cm.lock, flags);
+ err = cm_init_listen(cm_id_priv, service_id);
+ if (err) {
+ ib_destroy_cm_id(&cm_id_priv->id);
+ return ERR_PTR(err);
+ }
- if (service_id == IB_CM_ASSIGN_SERVICE_ID)
- goto new_id;
-
- /* Find an existing ID */
- cm_id_priv = cm_find_listen(device, service_id);
- if (cm_id_priv) {
- if (cm_id->cm_handler != cm_handler || cm_id->context) {
- /* Sharing an ib_cm_id with different handlers is not
- * supported */
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
+ listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
+ if (listen_id_priv != cm_id_priv) {
+ spin_unlock_irq(&cm_id_priv->lock);
+ ib_destroy_cm_id(&cm_id_priv->id);
+ if (!listen_id_priv)
return ERR_PTR(-EINVAL);
- }
- atomic_inc(&cm_id_priv->refcount);
- ++cm_id_priv->listen_sharecount;
- spin_unlock_irqrestore(&cm.lock, flags);
-
- ib_destroy_cm_id(cm_id);
- cm_id = &cm_id_priv->id;
- return cm_id;
+ return &listen_id_priv->id;
}
+ cm_id_priv->id.state = IB_CM_LISTEN;
+ spin_unlock_irq(&cm_id_priv->lock);
-new_id:
- /* Use newly created ID */
- err = __ib_cm_listen(cm_id, service_id, 0);
-
- spin_unlock_irqrestore(&cm.lock, flags);
+ /*
+ * A listen ID does not need to be in the xarray since it does not
+ * receive mads, is not placed in the remote_id or remote_qpn rbtree,
+ * and does not enter timewait.
+ */
- if (err) {
- ib_destroy_cm_id(cm_id);
- return ERR_PTR(err);
- }
- return cm_id;
+ return &cm_id_priv->id;
}
EXPORT_SYMBOL(ib_cm_insert_listen);
-static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
- enum cm_msg_sequence msg_seq)
+static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
{
- u64 hi_tid, low_tid;
+ u64 hi_tid = 0, low_tid;
+
+ lockdep_assert_held(&cm_id_priv->lock);
- hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
- (msg_seq << 30));
+ low_tid = (u64)cm_id_priv->id.local_id;
+ if (!cm_id_priv->av.port)
+ return cpu_to_be64(low_tid);
+
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ if (cm_id_priv->av.port->mad_agent)
+ hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1053,92 +1330,166 @@ static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
hdr->tid = tid;
}
+static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
+ __be64 tid, u32 attr_mod)
+{
+ cm_format_mad_hdr(hdr, attr_id, tid);
+ hdr->attr_mod = cpu_to_be32(attr_mod);
+}
+
static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
- struct ib_sa_path_rec *pri_path = param->primary_path;
- struct ib_sa_path_rec *alt_path = param->alternate_path;
-
- cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
-
- req_msg->local_comm_id = cm_id_priv->id.local_id;
- req_msg->service_id = param->service_id;
- req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
- cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
- cm_req_set_init_depth(req_msg, param->initiator_depth);
- cm_req_set_remote_resp_timeout(req_msg,
- param->remote_cm_response_timeout);
+ struct sa_path_rec *pri_path = param->primary_path;
+ struct sa_path_rec *alt_path = param->alternate_path;
+ bool pri_ext = false;
+ __be16 lid;
+
+ if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
+ pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
+ pri_path->opa.slid);
+
+ cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
+ cm_form_tid(cm_id_priv), param->ece.attr_mod);
+
+ IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
+ IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
+ be64_to_cpu(cm_id_priv->id.device->node_guid));
+ IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
+ IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
+ IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
+ param->remote_cm_response_timeout);
cm_req_set_qp_type(req_msg, param->qp_type);
- cm_req_set_flow_ctrl(req_msg, param->flow_control);
- cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
- cm_req_set_local_resp_timeout(req_msg,
- param->local_cm_response_timeout);
- req_msg->pkey = param->primary_path->pkey;
- cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
- cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
+ IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
+ IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
+ IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
+ param->local_cm_response_timeout);
+ IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
+ be16_to_cpu(param->primary_path->pkey));
+ IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
+ param->primary_path->mtu);
+ IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
if (param->qp_type != IB_QPT_XRC_INI) {
- cm_req_set_resp_res(req_msg, param->responder_resources);
- cm_req_set_retry_count(req_msg, param->retry_count);
- cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
- cm_req_set_srq(req_msg, param->srq);
+ IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
+ param->responder_resources);
+ IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
+ IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
+ param->rnr_retry_count);
+ IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
}
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
+ pri_path->sgid;
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
+ pri_path->dgid;
+ if (pri_ext) {
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
+ }
if (pri_path->hop_limit <= 1) {
- req_msg->primary_local_lid = pri_path->slid;
- req_msg->primary_remote_lid = pri_path->dlid;
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(pri_ext ? 0 :
+ htons(ntohl(sa_path_get_slid(
+ pri_path)))));
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(pri_ext ? 0 :
+ htons(ntohl(sa_path_get_dlid(
+ pri_path)))));
} else {
+
+ if (param->primary_path_inbound) {
+ lid = param->primary_path_inbound->ib.dlid;
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(lid));
+ } else
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
+
/* Work-around until there's a way to obtain remote LID info */
- req_msg->primary_local_lid = IB_LID_PERMISSIVE;
- req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
- }
- req_msg->primary_local_gid = pri_path->sgid;
- req_msg->primary_remote_gid = pri_path->dgid;
- cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
- cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
- req_msg->primary_traffic_class = pri_path->traffic_class;
- req_msg->primary_hop_limit = pri_path->hop_limit;
- cm_req_set_primary_sl(req_msg, pri_path->sl);
- cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
- cm_req_set_primary_local_ack_timeout(req_msg,
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
+ }
+ IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
+ be32_to_cpu(pri_path->flow_label));
+ IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
+ IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
+ IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
+ IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
+ IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
+ (pri_path->hop_limit <= 1));
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
pri_path->packet_life_time));
if (alt_path) {
+ bool alt_ext = false;
+
+ if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
+ alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
+ alt_path->opa.slid);
+
+ *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
+ alt_path->sgid;
+ *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
+ alt_path->dgid;
+ if (alt_ext) {
+ IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
+ req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
+ IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
+ req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
+ }
if (alt_path->hop_limit <= 1) {
- req_msg->alt_local_lid = alt_path->slid;
- req_msg->alt_remote_lid = alt_path->dlid;
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(
+ alt_ext ? 0 :
+ htons(ntohl(sa_path_get_slid(
+ alt_path)))));
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(
+ alt_ext ? 0 :
+ htons(ntohl(sa_path_get_dlid(
+ alt_path)))));
} else {
- req_msg->alt_local_lid = IB_LID_PERMISSIVE;
- req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
}
- req_msg->alt_local_gid = alt_path->sgid;
- req_msg->alt_remote_gid = alt_path->dgid;
- cm_req_set_alt_flow_label(req_msg,
- alt_path->flow_label);
- cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
- req_msg->alt_traffic_class = alt_path->traffic_class;
- req_msg->alt_hop_limit = alt_path->hop_limit;
- cm_req_set_alt_sl(req_msg, alt_path->sl);
- cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
- cm_req_set_alt_local_ack_timeout(req_msg,
+ IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
+ be32_to_cpu(alt_path->flow_label));
+ IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
+ IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
+ alt_path->traffic_class);
+ IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
+ alt_path->hop_limit);
+ IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
+ IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
+ (alt_path->hop_limit <= 1));
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
+ IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
if (param->private_data && param->private_data_len)
- memcpy(req_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
+ param->private_data_len);
}
static int cm_validate_req_param(struct ib_cm_req_param *param)
{
- /* peer-to-peer not supported */
- if (param->peer_to_peer)
- return -EINVAL;
-
if (!param->primary_path)
return -EINVAL;
@@ -1161,7 +1512,9 @@ static int cm_validate_req_param(struct ib_cm_req_param *param)
int ib_send_cm_req(struct ib_cm_id *cm_id,
struct ib_cm_req_param *param)
{
+ struct cm_av av = {}, alt_av = {};
struct cm_id_private *cm_id_priv;
+ struct ib_mad_send_buf *msg;
struct cm_req_msg *req_msg;
unsigned long flags;
int ret;
@@ -1173,10 +1526,9 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
/* Verify that we're not in timewait. */
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_IDLE) {
+ if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -1184,20 +1536,23 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
- goto out;
+ cm_id_priv->timewait_info = NULL;
+ return ret;
}
- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->primary_path,
+ param->ppath_sgid_attr, &av);
if (ret)
- goto error1;
+ return ret;
if (param->alternate_path) {
- ret = cm_init_av_by_path(param->alternate_path,
- &cm_id_priv->alt_av);
- if (ret)
- goto error1;
+ ret = cm_init_av_by_path(param->alternate_path, NULL,
+ &alt_av);
+ if (ret) {
+ cm_destroy_av(&av);
+ return ret;
+ }
}
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms(
@@ -1210,33 +1565,42 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->pkey = param->primary_path->pkey;
cm_id_priv->qp_type = param->qp_type;
- ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
- if (ret)
- goto error1;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+
+ cm_move_av_from_path(&cm_id_priv->av, &av);
+ if (param->primary_path_outbound)
+ cm_id_priv->av.dlid_datapath =
+ be16_to_cpu(param->primary_path_outbound->ib.dlid);
+
+ if (param->alternate_path)
+ cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
+
+ msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REQ_SENT);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
+ goto out_unlock;
+ }
- req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
+ req_msg = (struct cm_req_msg *)msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
cm_id_priv->tid = req_msg->hdr.tid;
- cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
- cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
- cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
- cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
+ cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- ret = ib_post_send_mad(cm_id_priv->msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- goto error2;
- }
+ trace_icm_send_req(&cm_id_priv->id);
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret)
+ goto out_free;
BUG_ON(cm_id->state != IB_CM_IDLE);
cm_id->state = IB_CM_REQ_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
-
-error2: cm_free_msg(cm_id_priv->msg);
-error1: kfree(cm_id_priv->timewait_info);
-out: return ret;
+out_free:
+ cm_free_priv_msg(msg);
+out_unlock:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return ret;
}
EXPORT_SYMBOL(ib_send_cm_req);
@@ -1250,7 +1614,7 @@ static int cm_issue_rej(struct cm_port *port,
struct cm_rej_msg *rej_msg, *rcv_msg;
int ret;
- ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
+ ret = cm_alloc_response_msg(port, mad_recv_wc, false, &msg);
if (ret)
return ret;
@@ -1259,16 +1623,21 @@ static int cm_issue_rej(struct cm_port *port,
rej_msg = (struct cm_rej_msg *) msg->mad;
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
- rej_msg->remote_comm_id = rcv_msg->local_comm_id;
- rej_msg->local_comm_id = rcv_msg->remote_comm_id;
- cm_rej_set_msg_rejected(rej_msg, msg_rejected);
- rej_msg->reason = cpu_to_be16(reason);
+ IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
+ IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
+ IBA_SET(CM_REJ_REASON, rej_msg, reason);
if (ari && ari_length) {
- cm_rej_set_reject_info_len(rej_msg, ari_length);
- memcpy(rej_msg->ari, ari, ari_length);
+ IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
+ IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
}
+ trace_icm_issue_rej(
+ IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
+ IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
@@ -1276,74 +1645,141 @@ static int cm_issue_rej(struct cm_port *port,
return ret;
}
-static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
- __be32 local_qpn, __be32 remote_qpn)
+static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
+{
+ return ((cpu_to_be16(
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
+ (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
+ req_msg))));
+}
+
+static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
+ struct sa_path_rec *path, union ib_gid *gid)
+{
+ if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
+ path->rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->rec_type = SA_PATH_REC_TYPE_IB;
+}
+
+static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
+ struct sa_path_rec *primary_path,
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
{
- return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
- ((local_ca_guid == remote_ca_guid) &&
- (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
+ u32 lid;
+
+ if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
+ sa_path_set_dlid(primary_path, wc->slid);
+ sa_path_set_slid(primary_path,
+ IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
+ req_msg));
+ } else {
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
+ sa_path_set_dlid(primary_path, lid);
+
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
+ sa_path_set_slid(primary_path, lid);
+ }
+
+ if (!cm_req_has_alt_path(req_msg))
+ return;
+
+ if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
+ sa_path_set_dlid(alt_path,
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
+ req_msg));
+ sa_path_set_slid(alt_path,
+ IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
+ req_msg));
+ } else {
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
+ sa_path_set_dlid(alt_path, lid);
+
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
+ sa_path_set_slid(alt_path, lid);
+ }
}
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
- struct ib_sa_path_rec *primary_path,
- struct ib_sa_path_rec *alt_path)
-{
- memset(primary_path, 0, sizeof *primary_path);
- primary_path->dgid = req_msg->primary_local_gid;
- primary_path->sgid = req_msg->primary_remote_gid;
- primary_path->dlid = req_msg->primary_local_lid;
- primary_path->slid = req_msg->primary_remote_lid;
- primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
- primary_path->hop_limit = req_msg->primary_hop_limit;
- primary_path->traffic_class = req_msg->primary_traffic_class;
+ struct sa_path_rec *primary_path,
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
+{
+ primary_path->dgid =
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
+ primary_path->sgid =
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
+ primary_path->flow_label =
+ cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
+ primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
+ primary_path->traffic_class =
+ IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
primary_path->reversible = 1;
- primary_path->pkey = req_msg->pkey;
- primary_path->sl = cm_req_get_primary_sl(req_msg);
+ primary_path->pkey =
+ cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
primary_path->mtu_selector = IB_SA_EQ;
- primary_path->mtu = cm_req_get_path_mtu(req_msg);
+ primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
primary_path->rate_selector = IB_SA_EQ;
- primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
+ primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
primary_path->packet_life_time_selector = IB_SA_EQ;
primary_path->packet_life_time =
- cm_req_get_primary_local_ack_timeout(req_msg);
+ IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
- primary_path->service_id = req_msg->service_id;
-
- if (req_msg->alt_local_lid) {
- memset(alt_path, 0, sizeof *alt_path);
- alt_path->dgid = req_msg->alt_local_gid;
- alt_path->sgid = req_msg->alt_remote_gid;
- alt_path->dlid = req_msg->alt_local_lid;
- alt_path->slid = req_msg->alt_remote_lid;
- alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
- alt_path->hop_limit = req_msg->alt_hop_limit;
- alt_path->traffic_class = req_msg->alt_traffic_class;
+ primary_path->service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
+ if (sa_path_is_roce(primary_path))
+ primary_path->roce.route_resolved = false;
+
+ if (cm_req_has_alt_path(req_msg)) {
+ alt_path->dgid = *IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
+ alt_path->sgid = *IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
+ alt_path->flow_label = cpu_to_be32(
+ IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
+ alt_path->hop_limit =
+ IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
+ alt_path->traffic_class =
+ IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
alt_path->reversible = 1;
- alt_path->pkey = req_msg->pkey;
- alt_path->sl = cm_req_get_alt_sl(req_msg);
+ alt_path->pkey =
+ cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
alt_path->mtu_selector = IB_SA_EQ;
- alt_path->mtu = cm_req_get_path_mtu(req_msg);
+ alt_path->mtu =
+ IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
alt_path->rate_selector = IB_SA_EQ;
- alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
+ alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
alt_path->packet_life_time_selector = IB_SA_EQ;
alt_path->packet_life_time =
- cm_req_get_alt_local_ack_timeout(req_msg);
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
- alt_path->service_id = req_msg->service_id;
+ alt_path->service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
+
+ if (sa_path_is_roce(alt_path))
+ alt_path->roce.route_resolved = false;
}
+ cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
}
static u16 cm_get_bth_pkey(struct cm_work *work)
{
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
- u8 port_num = work->port->port_num;
+ u32 port_num = work->port->port_num;
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
u16 pkey;
int ret;
ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
if (ret) {
- dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
+ dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
port_num, pkey_index, ret);
return 0;
}
@@ -1351,6 +1787,35 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
return pkey;
}
+/**
+ * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
+ * ULPs (such as IPoIB) do not understand OPA GIDs and will
+ * reject them as the local_gid will not match the sgid. Therefore,
+ * change the pathrec's SGID to an IB SGID.
+ *
+ * @work: Work completion
+ * @path: Path record
+ */
+static void cm_opa_to_ib_sgid(struct cm_work *work,
+ struct sa_path_rec *path)
+{
+ struct ib_device *dev = work->port->cm_dev->ib_device;
+ u32 port_num = work->port->port_num;
+
+ if (rdma_cap_opa_ah(dev, port_num) &&
+ (ib_is_opa_gid(&path->sgid))) {
+ union ib_gid sgid;
+
+ if (rdma_query_gid(dev, port_num, 0, &sgid)) {
+ dev_warn(&dev->dev,
+ "Error updating sgid in CM request\n");
+ return;
+ }
+
+ path->sgid = sgid;
+ }
+}
+
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
@@ -1364,26 +1829,35 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
- if (req_msg->alt_local_lid)
+ cm_opa_to_ib_sgid(work, param->primary_path);
+ if (cm_req_has_alt_path(req_msg)) {
param->alternate_path = &work->path[1];
- else
+ cm_opa_to_ib_sgid(work, param->alternate_path);
+ } else {
param->alternate_path = NULL;
- param->remote_ca_guid = req_msg->local_ca_guid;
- param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
- param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
+ }
+ param->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
+ param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
+ param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
param->qp_type = cm_req_get_qp_type(req_msg);
- param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
- param->responder_resources = cm_req_get_init_depth(req_msg);
- param->initiator_depth = cm_req_get_resp_res(req_msg);
+ param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
+ param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+ param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
param->local_cm_response_timeout =
- cm_req_get_remote_resp_timeout(req_msg);
- param->flow_control = cm_req_get_flow_ctrl(req_msg);
+ IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
+ param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
param->remote_cm_response_timeout =
- cm_req_get_local_resp_timeout(req_msg);
- param->retry_count = cm_req_get_retry_count(req_msg);
- param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
- param->srq = cm_req_get_srq(req_msg);
- work->cm_event.private_data = &req_msg->private_data;
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
+ param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+ param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
+ param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
+ param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
+ param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
+ param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
+
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
}
static void cm_process_work(struct cm_id_private *cm_id_priv,
@@ -1399,7 +1873,9 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
work = cm_dequeue_work(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
- BUG_ON(!work);
+ if (!work)
+ return;
+
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
&work->cm_event);
cm_free_work(work);
@@ -1411,58 +1887,67 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
- enum cm_msg_response msg_mraed, u8 service_timeout,
+ enum cm_msg_response msg_mraed,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
- cm_mra_set_msg_mraed(mra_msg, msg_mraed);
- mra_msg->local_comm_id = cm_id_priv->id.local_id;
- mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_mra_set_service_timeout(mra_msg, service_timeout);
+ IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
+ IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING);
if (private_data && private_data_len)
- memcpy(mra_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
+ private_data_len);
}
static void cm_format_rej(struct cm_rej_msg *rej_msg,
struct cm_id_private *cm_id_priv,
- enum ib_cm_rej_reason reason,
- void *ari,
- u8 ari_length,
- const void *private_data,
- u8 private_data_len)
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len, enum ib_cm_state state)
{
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
- rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
+ IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
- switch(cm_id_priv->id.state) {
+ switch (state) {
case IB_CM_REQ_RCVD:
- rej_msg->local_comm_id = 0;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_MRA_REQ_SENT:
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
break;
default:
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
+ CM_MSG_RESPONSE_OTHER);
break;
}
- rej_msg->reason = cpu_to_be16(reason);
+ IBA_SET(CM_REJ_REASON, rej_msg, reason);
if (ari && ari_length) {
- cm_rej_set_reject_info_len(rej_msg, ari_length);
- memcpy(rej_msg->ari, ari, ari_length);
+ IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
+ IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
}
if (private_data && private_data_len)
- memcpy(rej_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
+ private_data_len);
}
static void cm_dup_req_handler(struct cm_work *work,
@@ -1471,14 +1956,18 @@ static void cm_dup_req_handler(struct cm_work *work,
struct ib_mad_send_buf *msg = NULL;
int ret;
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_REQ_COUNTER]);
+ atomic_long_inc(
+ &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
- if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
+ spin_lock_irq(&cm_id_priv->lock);
+ if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
+ spin_unlock_irq(&cm_id_priv->lock);
return;
+ }
+ spin_unlock_irq(&cm_id_priv->lock);
- ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg);
if (ret)
return;
@@ -1486,19 +1975,21 @@ static void cm_dup_req_handler(struct cm_work *work,
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REQ,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
case IB_CM_TIMEWAIT:
- cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
- IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
+ IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
+ IB_CM_TIMEWAIT);
break;
default:
goto unlock;
}
spin_unlock_irq(&cm_id_priv->lock);
+ trace_icm_send_dup_req(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
@@ -1508,8 +1999,8 @@ unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
}
-static struct cm_id_private * cm_match_req(struct cm_work *work,
- struct cm_id_private *cm_id_priv)
+static struct cm_id_private *cm_match_req(struct cm_work *work,
+ struct cm_id_private *cm_id_priv)
{
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
@@ -1521,7 +2012,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
spin_lock_irq(&cm.lock);
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
if (timewait_info) {
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock_irq(&cm.lock);
if (cur_cm_id_priv) {
@@ -1534,31 +2025,34 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
/* Check for stale connections. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
+ if (cur_cm_id_priv) {
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
+ cm_deref_id(cur_cm_id_priv);
+ }
return NULL;
}
/* Find matching listen request. */
- listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
- req_msg->service_id);
+ listen_cm_id_priv = cm_find_listen(
+ cm_id_priv->id.device,
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
if (!listen_cm_id_priv) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
- goto out;
+ return NULL;
}
- atomic_inc(&listen_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
- cm_id_priv->id.state = IB_CM_REQ_RCVD;
- atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
-out:
return listen_cm_id_priv;
}
@@ -1569,114 +2063,187 @@ out:
*/
static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
{
- if (!cm_req_get_primary_subnet_local(req_msg)) {
- if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
- req_msg->primary_local_lid = cpu_to_be16(wc->slid);
- cm_req_set_primary_sl(req_msg, wc->sl);
+ if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
+ if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE) {
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(ib_lid_be16(wc->slid)));
+ IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
}
- if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
- req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE)
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+ wc->dlid_path_bits);
}
- if (!cm_req_get_alt_subnet_local(req_msg)) {
- if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
- req_msg->alt_local_lid = cpu_to_be16(wc->slid);
- cm_req_set_alt_sl(req_msg, wc->sl);
+ if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
+ if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE) {
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(ib_lid_be16(wc->slid)));
+ IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
}
- if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
- req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE)
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+ wc->dlid_path_bits);
}
}
static int cm_req_handler(struct cm_work *work)
{
- struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_req_msg *req_msg;
+ const struct ib_global_route *grh;
+ const struct ib_gid_attr *gid_attr;
int ret;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
- if (IS_ERR(cm_id))
- return PTR_ERR(cm_id);
+ cm_id_priv =
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
+ if (IS_ERR(cm_id_priv))
+ return PTR_ERR(cm_id_priv);
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- cm_id_priv->id.remote_id = req_msg->local_comm_id;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
+ cm_id_priv->tid = req_msg->hdr.tid;
+ cm_id_priv->timeout_ms = cm_convert_to_ms(
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
+ cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
+ cm_id_priv->remote_qpn =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->initiator_depth =
+ IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
+ cm_id_priv->responder_resources =
+ IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+ cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
+ cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
+ cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
+ cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
+
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto destroy;
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto destroy;
}
- cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
- cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
- cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
+ cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
+ cm_id_priv->timewait_info->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
+ cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
+
+ /*
+ * Note that the ID pointer is not in the xarray at this point,
+ * so this set is only visible to the local thread.
+ */
+ cm_id_priv->id.state = IB_CM_REQ_RCVD;
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
+ trace_icm_no_listener_err(&cm_id_priv->id);
+ cm_id_priv->id.state = IB_CM_IDLE;
ret = -EINVAL;
- kfree(cm_id_priv->timewait_info);
goto destroy;
}
- cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
- cm_id_priv->id.context = listen_cm_id_priv->id.context;
- cm_id_priv->id.service_id = req_msg->service_id;
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
-
- cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
- cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
+ memset(&work->path[0], 0, sizeof(work->path[0]));
+ if (cm_req_has_alt_path(req_msg))
+ memset(&work->path[1], 0, sizeof(work->path[1]));
+ grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
+ gid_attr = grh->sgid_attr;
- memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
- work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
+ work->path[0].rec_type =
+ sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
+ } else {
+ cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
+ cm_path_set_rec_type(
+ work->port->cm_dev->ib_device, work->port->port_num,
+ &work->path[0],
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
+ req_msg));
+ }
+ if (cm_req_has_alt_path(req_msg))
+ work->path[1].rec_type = work->path[0].rec_type;
+ cm_format_paths_from_req(req_msg, &work->path[0],
+ &work->path[1], work->mad_recv_wc->wc);
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ sa_path_set_dmac(&work->path[0],
+ cm_id_priv->av.ah_attr.roce.dmac);
+ work->path[0].hop_limit = grh->hop_limit;
+
+ /* This destroy call is needed to pair with cm_init_av_for_response */
+ cm_destroy_av(&cm_id_priv->av);
+ ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
if (ret) {
- ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num, 0, &work->path[0].sgid);
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
- &work->path[0].sgid, sizeof work->path[0].sgid,
- NULL, 0);
+ int err;
+
+ err = rdma_query_gid(work->port->cm_dev->ib_device,
+ work->port->port_num, 0,
+ &work->path[0].sgid);
+ if (err)
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
+ NULL, 0, NULL, 0);
+ else
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
+ &work->path[0].sgid,
+ sizeof(work->path[0].sgid),
+ NULL, 0);
goto rejected;
}
- if (req_msg->alt_local_lid) {
- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
+ cm_id_priv->av.dlid_datapath =
+ IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
+
+ if (cm_req_has_alt_path(req_msg)) {
+ ret = cm_init_av_by_path(&work->path[1], NULL,
+ &cm_id_priv->alt_av);
if (ret) {
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+ ib_send_cm_rej(&cm_id_priv->id,
+ IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
- sizeof work->path[0].sgid, NULL, 0);
+ sizeof(work->path[0].sgid), NULL, 0);
goto rejected;
}
}
- cm_id_priv->tid = req_msg->hdr.tid;
- cm_id_priv->timeout_ms = cm_convert_to_ms(
- cm_req_get_local_resp_timeout(req_msg));
- cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
- cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
- cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
- cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
- cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
- cm_id_priv->pkey = req_msg->pkey;
- cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
- cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
- cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
- cm_process_work(cm_id_priv, work);
+
+ /* Now MAD handlers can see the new ID */
+ spin_lock_irq(&cm_id_priv->lock);
+ cm_finalize_id(cm_id_priv);
+
+ /* Refcount belongs to the event, pairs with cm_process_work() */
+ refcount_inc(&cm_id_priv->refcount);
+ cm_queue_work_unlock(cm_id_priv, work);
+ /*
+ * Since this ID was just created and was not made visible to other MAD
+ * handlers until the cm_finalize_id() above we know that the
+ * cm_process_work() will deliver the event and the listen_cm_id
+ * embedded in the event can be derefed here.
+ */
cm_deref_id(listen_cm_id_priv);
return 0;
rejected:
- atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
destroy:
- ib_destroy_cm_id(cm_id);
+ ib_destroy_cm_id(&cm_id_priv->id);
return ret;
}
@@ -1684,30 +2251,41 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_rep_param *param)
{
- cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
- rep_msg->local_comm_id = cm_id_priv->id.local_id;
- rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
- rep_msg->resp_resources = param->responder_resources;
- cm_rep_set_target_ack_delay(rep_msg,
- cm_id_priv->av.port->cm_dev->ack_delay);
- cm_rep_set_failover(rep_msg, param->failover_accepted);
- cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
- rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
+ cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
+ param->ece.attr_mod);
+ IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
+ IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
+ param->responder_resources);
+ IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
+ cm_id_priv->av.port->cm_dev->ack_delay);
+ IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
+ IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
+ IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
+ be64_to_cpu(cm_id_priv->id.device->node_guid));
if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
- rep_msg->initiator_depth = param->initiator_depth;
- cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
- cm_rep_set_srq(rep_msg, param->srq);
- cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
+ IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
+ param->initiator_depth);
+ IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
+ param->flow_control);
+ IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
+ IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
} else {
- cm_rep_set_srq(rep_msg, 1);
- cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
+ IBA_SET(CM_REP_SRQ, rep_msg, 1);
+ IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
}
+ IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
+ IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
+ IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
+
if (param->private_data && param->private_data_len)
- memcpy(rep_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
+ param->private_data_len);
}
int ib_send_cm_rep(struct ib_cm_id *cm_id,
@@ -1727,34 +2305,40 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
+ trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
ret = -EINVAL;
goto out;
}
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
+ msg = cm_alloc_priv_msg_rep(cm_id_priv, IB_CM_REP_SENT, true);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
goto out;
+ }
rep_msg = (struct cm_rep_msg *) msg->mad;
cm_format_rep(rep_msg, cm_id_priv, param);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
+ trace_icm_send_rep(cm_id);
ret = ib_post_send_mad(msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- return ret;
- }
+ if (ret)
+ goto out_free;
cm_id->state = IB_CM_REP_SENT;
- cm_id_priv->msg = msg;
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
- cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
+ cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
+ WARN_ONCE(param->qp_num & 0xFF000000,
+ "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
+ param->qp_num);
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+out_free:
+ cm_free_priv_msg(msg);
+out:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rep);
@@ -1765,11 +2349,14 @@ static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
- rtu_msg->local_comm_id = cm_id_priv->id.local_id;
- rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
+ IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
if (private_data && private_data_len)
- memcpy(rtu_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
+ private_data_len);
}
int ib_send_cm_rtu(struct ib_cm_id *cm_id,
@@ -1793,17 +2380,21 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
+ trace_icm_send_cm_rtu_err(cm_id);
ret = -EINVAL;
goto error;
}
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
goto error;
+ }
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
+ trace_icm_send_rtu(cm_id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -1830,18 +2421,25 @@ static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rep_rcvd;
- param->remote_ca_guid = rep_msg->local_ca_guid;
- param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
+ param->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
+ param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
- param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
- param->responder_resources = rep_msg->initiator_depth;
- param->initiator_depth = rep_msg->resp_resources;
- param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
- param->failover_accepted = cm_rep_get_failover(rep_msg);
- param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
- param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
- param->srq = cm_rep_get_srq(rep_msg);
- work->cm_event.private_data = &rep_msg->private_data;
+ param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
+ param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
+ param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
+ param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
+ param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
+ param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
+ param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
+ param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
+ param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
+ param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
+
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
}
static void cm_dup_rep_handler(struct cm_work *work)
@@ -1852,14 +2450,15 @@ static void cm_dup_rep_handler(struct cm_work *work)
int ret;
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
- rep_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
if (!cm_id_priv)
return;
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_REP_COUNTER]);
- ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ atomic_long_inc(
+ &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
+ ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg);
if (ret)
goto deref;
@@ -1870,13 +2469,14 @@ static void cm_dup_rep_handler(struct cm_work *work)
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REP,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
goto unlock;
spin_unlock_irq(&cm_id_priv->lock);
+ trace_icm_send_dup_rep(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
@@ -1892,11 +2492,16 @@ static int cm_rep_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
int ret;
+ struct cm_id_private *cur_cm_id_priv;
+ struct cm_timewait_info *timewait_info;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
+ trace_icm_remote_no_priv_err(
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
return -EINVAL;
}
@@ -1908,13 +2513,19 @@ static int cm_rep_handler(struct cm_work *work)
case IB_CM_MRA_REQ_RCVD:
break;
default:
- spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ trace_icm_rep_unknown_err(
+ IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
+ cm_id_priv->id.state);
+ spin_unlock_irq(&cm_id_priv->lock);
goto error;
}
- cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
- cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
+ cm_id_priv->timewait_info->work.remote_id =
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
+ cm_id_priv->timewait_info->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
spin_lock(&cm.lock);
@@ -1923,31 +2534,48 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ trace_icm_insert_failed_err(
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
goto error;
}
/* Check for a stale connection. */
- if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
- rb_erase(&cm_id_priv->timewait_info->remote_id_node,
- &cm.remote_id_table);
- cm_id_priv->timewait_info->inserted_remote_id = 0;
+ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
+ if (timewait_info) {
+ cm_remove_remote(cm_id_priv);
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
+ trace_icm_staleconn_err(
+ IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
+
+ if (cur_cm_id_priv) {
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
+ cm_deref_id(cur_cm_id_priv);
+ }
+
goto error;
}
spin_unlock(&cm.lock);
cm_id_priv->id.state = IB_CM_REP_RCVD;
- cm_id_priv->id.remote_id = rep_msg->local_comm_id;
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
- cm_id_priv->initiator_depth = rep_msg->resp_resources;
- cm_id_priv->responder_resources = rep_msg->initiator_depth;
- cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
- cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
- cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
+ cm_id_priv->initiator_depth =
+ IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
+ cm_id_priv->responder_resources =
+ IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
+ cm_id_priv->target_ack_delay =
+ IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
cm_id_priv->av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->av.timeout - 1);
@@ -1955,18 +2583,8 @@ static int cm_rep_handler(struct cm_work *work)
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
- /* todo: handle peer_to_peer */
-
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ ib_cancel_mad(cm_id_priv->msg);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
error:
@@ -1977,7 +2595,6 @@ error:
static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
- int ret;
/* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
@@ -1990,16 +2607,8 @@ static int cm_establish_handler(struct cm_work *work)
goto out;
}
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ ib_cancel_mad(cm_id_priv->msg);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2010,36 +2619,29 @@ static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
- int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
- rtu_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
+ cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.private_data = &rtu_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_RTU_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_RTU_COUNTER]);
goto out;
}
cm_id_priv->id.state = IB_CM_ESTABLISHED;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ ib_cancel_mad(cm_id_priv->msg);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2052,20 +2654,43 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
- dreq_msg->local_comm_id = cm_id_priv->id.local_id;
- dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
+ cm_form_tid(cm_id_priv));
+ IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
+ be32_to_cpu(cm_id_priv->remote_qpn));
if (private_data && private_data_len)
- memcpy(dreq_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
+ private_data_len);
+}
+
+static void cm_issue_dreq(struct cm_id_private *cm_id_priv)
+{
+ struct ib_mad_send_buf *msg;
+ int ret;
+
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return;
+
+ cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, NULL, 0);
+
+ trace_icm_send_dreq(&cm_id_priv->id);
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret)
+ cm_free_msg(msg);
}
-int ib_send_cm_dreq(struct ib_cm_id *cm_id,
- const void *private_data,
+int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
@@ -2073,39 +2698,38 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_ESTABLISHED) {
+ if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
+ trace_icm_dreq_skipped(&cm_id_priv->id);
ret = -EINVAL;
- goto out;
+ goto unlock;
}
- if (cm_id->lap_state == IB_CM_LAP_SENT ||
- cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
+ cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+ ib_cancel_mad(cm_id_priv->msg);
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret) {
+ msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_DREQ_SENT);
+ if (IS_ERR(msg)) {
cm_enter_timewait(cm_id_priv);
- goto out;
+ ret = PTR_ERR(msg);
+ goto unlock;
}
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
+ trace_icm_send_dreq(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- return ret;
+ cm_free_priv_msg(msg);
+ goto unlock;
}
- cm_id->state = IB_CM_DREQ_SENT;
- cm_id_priv->msg = msg;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ cm_id_priv->id.state = IB_CM_DREQ_SENT;
+unlock:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_dreq);
@@ -2116,56 +2740,68 @@ static void cm_format_drep(struct cm_drep_msg *drep_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
- drep_msg->local_comm_id = cm_id_priv->id.local_id;
- drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
+ IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
if (private_data && private_data_len)
- memcpy(drep_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
+ private_data_len);
}
-int ib_send_cm_drep(struct ib_cm_id *cm_id,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
+ void *private_data, u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
- void *data;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
return -EINVAL;
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_DREQ_RCVD) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
+ if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
+ trace_icm_send_drep_err(&cm_id_priv->id);
+ kfree(private_data);
return -EINVAL;
}
- cm_set_private_data(cm_id_priv, data, private_data_len);
+ cm_set_private_data(cm_id_priv, private_data, private_data_len);
cm_enter_timewait(cm_id_priv);
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto out;
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
+ trace_icm_send_drep(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
+ return 0;
+}
+
+int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ void *data;
+ int ret;
+
+ data = cm_copy_private_data(private_data, private_data_len);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_drep);
@@ -2178,7 +2814,7 @@ static int cm_issue_drep(struct cm_port *port,
struct cm_drep_msg *drep_msg;
int ret;
- ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
+ ret = cm_alloc_response_msg(port, mad_recv_wc, true, &msg);
if (ret)
return ret;
@@ -2186,9 +2822,14 @@ static int cm_issue_drep(struct cm_port *port,
drep_msg = (struct cm_drep_msg *) msg->mad;
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
- drep_msg->remote_comm_id = dreq_msg->local_comm_id;
- drep_msg->local_comm_id = dreq_msg->remote_comm_id;
-
+ IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
+ IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
+
+ trace_icm_issue_drep(
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
@@ -2201,40 +2842,46 @@ static int cm_dreq_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
struct ib_mad_send_buf *msg = NULL;
- int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
- dreq_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
+ cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
if (!cm_id_priv) {
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
+ trace_icm_no_priv_err(
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
return -EINVAL;
}
- work->cm_event.private_data = &dreq_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
spin_lock_irq(&cm_id_priv->lock);
- if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
+ if (cm_id_priv->local_qpn !=
+ cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
goto unlock;
switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ case IB_CM_MRA_REP_RCVD:
+ ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- break;
- case IB_CM_MRA_REP_RCVD:
+ ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_TIMEWAIT:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_DREQ_COUNTER]);
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc,
+ true);
+ if (IS_ERR(msg))
goto unlock;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
@@ -2242,27 +2889,21 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
- if (ib_post_send_mad(msg, NULL))
+ if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
+ ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_DREQ_COUNTER]);
goto unlock;
default:
+ trace_icm_dreq_unknown_err(&cm_id_priv->id);
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
cm_id_priv->tid = dreq_msg->hdr.tid;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -2274,15 +2915,16 @@ static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
- int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
- drep_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
+ cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.private_data = &drep_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
@@ -2292,79 +2934,83 @@ static int cm_drep_handler(struct cm_work *work)
}
cm_enter_timewait(cm_id_priv);
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ ib_cancel_mad(cm_id_priv->msg);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
-int ib_send_cm_rej(struct ib_cm_id *cm_id,
- enum ib_cm_rej_reason reason,
- void *ari,
- u8 ari_length,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
+ enum ib_cm_state state = cm_id_priv->id.state;
struct ib_mad_send_buf *msg;
- unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ trace_icm_send_rej(&cm_id_priv->id, reason);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch (cm_id->state) {
+ switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (!ret)
- cm_format_rej((struct cm_rej_msg *) msg->mad,
- cm_id_priv, reason, ari, ari_length,
- private_data, private_data_len);
-
cm_reset_to_idle(cm_id_priv);
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
+ ari, ari_length, private_data, private_data_len,
+ state);
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (!ret)
- cm_format_rej((struct cm_rej_msg *) msg->mad,
- cm_id_priv, reason, ari, ari_length,
- private_data, private_data_len);
-
cm_enter_timewait(cm_id_priv);
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
+ ari, ari_length, private_data, private_data_len,
+ state);
break;
default:
- ret = -EINVAL;
- goto out;
+ trace_icm_send_unknown_rej_err(&cm_id_priv->id);
+ return -EINVAL;
}
- if (ret)
- goto out;
-
ret = ib_post_send_mad(msg, NULL);
- if (ret)
+ if (ret) {
cm_free_msg(msg);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
+ void *ari, u8 ari_length, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
+ private_data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rej);
@@ -2376,42 +3022,33 @@ static void cm_format_rej_event(struct cm_work *work)
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rej_rcvd;
- param->ari = rej_msg->ari;
- param->ari_length = cm_rej_get_reject_info_len(rej_msg);
- param->reason = __be16_to_cpu(rej_msg->reason);
- work->cm_event.private_data = &rej_msg->private_data;
+ param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
+ param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
+ param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
}
-static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
+static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
- struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
__be32 remote_id;
- remote_id = rej_msg->local_comm_id;
-
- if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
- spin_lock_irq(&cm.lock);
- timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
- remote_id);
- if (!timewait_info) {
- spin_unlock_irq(&cm.lock);
- return NULL;
- }
- cm_id_priv = idr_find(&cm.local_id_table, (__force int)
- (timewait_info->work.local_id ^
- cm.random_id_operand));
- if (cm_id_priv) {
- if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
- else
- cm_id_priv = NULL;
- }
- spin_unlock_irq(&cm.lock);
- } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
- cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
+ remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
+
+ if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
+ cm_id_priv = cm_find_remote_id(
+ *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
+ remote_id);
+ } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
+ CM_MSG_RESPONSE_REQ)
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
+ 0);
else
- cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
+ remote_id);
return cm_id_priv;
}
@@ -2420,7 +3057,6 @@ static int cm_rej_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rej_msg *rej_msg;
- int ret;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_rejected_id(rej_msg);
@@ -2435,18 +3071,18 @@ static int cm_rej_handler(struct cm_work *work)
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* fall through */
+ ib_cancel_mad(cm_id_priv->msg);
+ fallthrough;
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
- if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
+ if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
cm_enter_timewait(cm_id_priv);
else
cm_reset_to_idle(cm_id_priv);
break;
case IB_CM_DREQ_SENT:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* fall through */
+ ib_cancel_mad(cm_id_priv->msg);
+ fallthrough;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
cm_enter_timewait(cm_id_priv);
@@ -2455,120 +3091,79 @@ static int cm_rej_handler(struct cm_work *work)
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
- ib_cancel_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
break;
}
- /* fall through */
+ fallthrough;
default:
+ trace_icm_rej_unknown_err(&cm_id_priv->id);
spin_unlock_irq(&cm_id_priv->lock);
- ret = -EINVAL;
goto out;
}
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len)
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
- enum cm_msg_response msg_response;
- void *data;
unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch(cm_id_priv->id.state) {
+ switch (cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
- msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
+ fallthrough;
default:
+ trace_icm_prepare_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
- goto error1;
- }
-
- if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto error1;
-
- cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- msg_response, service_timeout,
- private_data, private_data_len);
- ret = ib_post_send_mad(msg, NULL);
- if (ret)
- goto error2;
+ goto error_unlock;
}
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
- cm_id_priv->service_timeout = service_timeout;
- cm_set_private_data(cm_id_priv, data, private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return 0;
-
-error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
- return ret;
+ cm_set_private_data(cm_id_priv, NULL, 0);
-error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
- cm_free_msg(msg);
+error_unlock:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
-EXPORT_SYMBOL(ib_send_cm_mra);
+EXPORT_SYMBOL(ib_prepare_cm_mra);
-static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
+static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
- switch (cm_mra_get_msg_mraed(mra_msg)) {
+ switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
case CM_MSG_RESPONSE_REQ:
- return cm_acquire_id(mra_msg->remote_comm_id, 0);
+ return cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
+ 0);
case CM_MSG_RESPONSE_REP:
case CM_MSG_RESPONSE_OTHER:
- return cm_acquire_id(mra_msg->remote_comm_id,
- mra_msg->local_comm_id);
+ return cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
+ cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
default:
return NULL;
}
@@ -2578,68 +3173,62 @@ static int cm_mra_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_mra_msg *mra_msg;
- int timeout, ret;
+ int timeout;
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_mraed_id(mra_msg);
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.private_data = &mra_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
work->cm_event.param.mra_rcvd.service_timeout =
- cm_mra_get_service_timeout(mra_msg);
- timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
+ IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
+ timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
cm_convert_to_ms(cm_id_priv->av.timeout);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
- ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout))
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+ CM_MSG_RESPONSE_REQ ||
+ ib_modify_mad(cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
break;
case IB_CM_REP_SENT:
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
- ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout))
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+ CM_MSG_RESPONSE_REP ||
+ ib_modify_mad(cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
break;
case IB_CM_ESTABLISHED:
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+ CM_MSG_RESPONSE_OTHER ||
cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
- ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout)) {
+ ib_modify_mad(cm_id_priv->msg, timeout)) {
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
- atomic_long_inc(&work->port->
- counter_group[CM_RECV_DUPLICATES].
- counter[CM_MRA_COUNTER]);
+ atomic_long_inc(
+ &work->port->counters[CM_RECV_DUPLICATES]
+ [CM_MRA_COUNTER]);
goto out;
}
cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_MRA_REP_RCVD:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_MRA_COUNTER]);
- /* fall through */
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_MRA_COUNTER]);
+ fallthrough;
default:
+ trace_icm_mra_unknown_err(&cm_id_priv->id);
goto out;
}
cm_id_priv->msg->context[1] = (void *) (unsigned long)
cm_id_priv->id.state;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
spin_unlock_irq(&cm_id_priv->lock);
@@ -2647,112 +3236,50 @@ out:
return -EINVAL;
}
-static void cm_format_lap(struct cm_lap_msg *lap_msg,
- struct cm_id_private *cm_id_priv,
- struct ib_sa_path_rec *alternate_path,
- const void *private_data,
- u8 private_data_len)
-{
- cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
- lap_msg->local_comm_id = cm_id_priv->id.local_id;
- lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
- /* todo: need remote CM response timeout */
- cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
- lap_msg->alt_local_lid = alternate_path->slid;
- lap_msg->alt_remote_lid = alternate_path->dlid;
- lap_msg->alt_local_gid = alternate_path->sgid;
- lap_msg->alt_remote_gid = alternate_path->dgid;
- cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
- cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
- lap_msg->alt_hop_limit = alternate_path->hop_limit;
- cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
- cm_lap_set_sl(lap_msg, alternate_path->sl);
- cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
- cm_lap_set_local_ack_timeout(lap_msg,
- cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
- alternate_path->packet_life_time));
-
- if (private_data && private_data_len)
- memcpy(lap_msg->private_data, private_data, private_data_len);
-}
-
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
- struct ib_sa_path_rec *alternate_path,
- const void *private_data,
- u8 private_data_len)
+static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
+ struct sa_path_rec *path)
{
- struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
- unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_ESTABLISHED ||
- (cm_id->lap_state != IB_CM_LAP_UNINIT &&
- cm_id->lap_state != IB_CM_LAP_IDLE)) {
- ret = -EINVAL;
- goto out;
- }
+ u32 lid;
- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
- if (ret)
- goto out;
- cm_id_priv->alt_av.timeout =
- cm_ack_timeout(cm_id_priv->target_ack_delay,
- cm_id_priv->alt_av.timeout - 1);
-
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto out;
-
- cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
- alternate_path, private_data, private_data_len);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
+ if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
+ sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
+ lap_msg));
+ sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
+ lap_msg));
+ } else {
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
+ sa_path_set_dlid(path, lid);
- ret = ib_post_send_mad(msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- return ret;
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
+ sa_path_set_slid(path, lid);
}
-
- cm_id->lap_state = IB_CM_LAP_SENT;
- cm_id_priv->msg = msg;
-
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return ret;
}
-EXPORT_SYMBOL(ib_send_cm_lap);
static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
- struct ib_sa_path_rec *path,
+ struct sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
- memset(path, 0, sizeof *path);
- path->dgid = lap_msg->alt_local_gid;
- path->sgid = lap_msg->alt_remote_gid;
- path->dlid = lap_msg->alt_local_lid;
- path->slid = lap_msg->alt_remote_lid;
- path->flow_label = cm_lap_get_flow_label(lap_msg);
- path->hop_limit = lap_msg->alt_hop_limit;
- path->traffic_class = cm_lap_get_traffic_class(lap_msg);
+ path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
+ path->sgid =
+ *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
+ path->flow_label =
+ cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
+ path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
+ path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
path->reversible = 1;
path->pkey = cm_id_priv->pkey;
- path->sl = cm_lap_get_sl(lap_msg);
+ path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
path->mtu_selector = IB_SA_EQ;
path->mtu = cm_id_priv->path_mtu;
path->rate_selector = IB_SA_EQ;
- path->rate = cm_lap_get_packet_rate(lap_msg);
+ path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
path->packet_life_time_selector = IB_SA_EQ;
- path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
+ path->packet_life_time =
+ IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
path->packet_life_time -= (path->packet_life_time > 0);
+ cm_format_path_lid_from_lap(lap_msg, path);
}
static int cm_lap_handler(struct cm_work *work)
@@ -2761,21 +3288,55 @@ static int cm_lap_handler(struct cm_work *work)
struct cm_lap_msg *lap_msg;
struct ib_cm_lap_event_param *param;
struct ib_mad_send_buf *msg = NULL;
+ struct rdma_ah_attr ah_attr;
+ struct cm_av alt_av = {};
int ret;
+ /* Currently Alternate path messages are not supported for
+ * RoCE link layer.
+ */
+ if (rdma_protocol_roce(work->port->cm_dev->ib_device,
+ work->port->port_num))
+ return -EINVAL;
+
/* todo: verify LAP request and send reject APR if invalid. */
lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
- lap_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
+ cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
if (!cm_id_priv)
return -EINVAL;
param = &work->cm_event.param.lap_rcvd;
+ memset(&work->path[0], 0, sizeof(work->path[1]));
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num, &work->path[0],
+ IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
+ lap_msg));
param->alternate_path = &work->path[0];
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
- work->cm_event.private_data = &lap_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
+
+ ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &ah_attr);
+ if (ret)
+ goto deref;
+
+ ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
+ if (ret) {
+ rdma_destroy_ah_attr(&ah_attr);
+ goto deref;
+ }
spin_lock_irq(&cm_id_priv->lock);
+ cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
+ &ah_attr, &cm_id_priv->av);
+ cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
+
if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
goto unlock;
@@ -2784,24 +3345,26 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_LAP_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_LAP_COUNTER]);
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc,
+ true);
+ if (IS_ERR(msg))
goto unlock;
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
- cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
- if (ib_post_send_mad(msg, NULL))
+ if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
+ ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_LAP_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_LAP_COUNTER]);
goto unlock;
default:
goto unlock;
@@ -2809,19 +3372,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -2829,88 +3380,33 @@ deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
-static void cm_format_apr(struct cm_apr_msg *apr_msg,
- struct cm_id_private *cm_id_priv,
- enum ib_cm_apr_status status,
- void *info,
- u8 info_length,
- const void *private_data,
- u8 private_data_len)
-{
- cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
- apr_msg->local_comm_id = cm_id_priv->id.local_id;
- apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
- apr_msg->ap_status = (u8) status;
-
- if (info && info_length) {
- apr_msg->info_length = info_length;
- memcpy(apr_msg->info, info, info_length);
- }
-
- if (private_data && private_data_len)
- memcpy(apr_msg->private_data, private_data, private_data_len);
-}
-
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
- enum ib_cm_apr_status status,
- void *info,
- u8 info_length,
- const void *private_data,
- u8 private_data_len)
-{
- struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
- unsigned long flags;
- int ret;
-
- if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
- (info && info_length > IB_CM_APR_INFO_LENGTH))
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_ESTABLISHED ||
- (cm_id->lap_state != IB_CM_LAP_RCVD &&
- cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto out;
-
- cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
- info, info_length, private_data, private_data_len);
- ret = ib_post_send_mad(msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- return ret;
- }
-
- cm_id->lap_state = IB_CM_LAP_IDLE;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(ib_send_cm_apr);
-
static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
- int ret;
+
+ /* Currently Alternate path messages are not supported for
+ * RoCE link layer.
+ */
+ if (rdma_protocol_roce(work->port->cm_dev->ib_device,
+ work->port->port_num))
+ return -EINVAL;
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
- apr_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
+ cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
- work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
- work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
- work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
- work->cm_event.private_data = &apr_msg->private_data;
+ work->cm_event.param.apr_rcvd.ap_status =
+ IBA_GET(CM_APR_AR_STATUS, apr_msg);
+ work->cm_event.param.apr_rcvd.apr_info =
+ IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
+ work->cm_event.param.apr_rcvd.info_len =
+ IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
@@ -2920,18 +3416,8 @@ static int cm_apr_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- cm_id_priv->msg = NULL;
-
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ ib_cancel_mad(cm_id_priv->msg);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2942,9 +3428,8 @@ static int cm_timewait_handler(struct cm_work *work)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
- int ret;
- timewait_info = (struct cm_timewait_info *)work;
+ timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
list_del(&timewait_info->list);
spin_unlock_irq(&cm.lock);
@@ -2961,15 +3446,7 @@ static int cm_timewait_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2981,14 +3458,17 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
struct ib_cm_sidr_req_param *param)
{
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
- sidr_req_msg->request_id = cm_id_priv->id.local_id;
- sidr_req_msg->pkey = param->path->pkey;
- sidr_req_msg->service_id = param->service_id;
+ cm_form_tid(cm_id_priv));
+ IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
+ be16_to_cpu(param->path->pkey));
+ IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
+ be64_to_cpu(param->service_id));
if (param->private_data && param->private_data_len)
- memcpy(sidr_req_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
+ param->private_data, param->private_data_len);
}
int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
@@ -2996,6 +3476,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
+ struct cm_av av = {};
unsigned long flags;
int ret;
@@ -3004,43 +3485,46 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
if (ret)
- goto out;
+ return ret;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ cm_move_av_from_path(&cm_id_priv->av, &av);
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto out;
-
- cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
- param);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
-
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state == IB_CM_IDLE)
- ret = ib_post_send_mad(msg, NULL);
- else
+ if (cm_id->state != IB_CM_IDLE) {
ret = -EINVAL;
+ goto out_unlock;
+ }
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- goto out;
+ msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_SIDR_REQ_SENT);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
+ goto out_unlock;
}
+
+ cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
+ param);
+
+ trace_icm_send_sidr_req(&cm_id_priv->id);
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret)
+ goto out_free;
cm_id->state = IB_CM_SIDR_REQ_SENT;
- cm_id_priv->msg = msg;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-out:
+ return 0;
+out_free:
+ cm_free_priv_msg(msg);
+out_unlock:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_req);
static void cm_format_sidr_req_event(struct cm_work *work,
+ const struct cm_id_private *rx_cm_id,
struct ib_cm_id *listen_id)
{
struct cm_sidr_req_msg *sidr_req_msg;
@@ -3049,67 +3533,86 @@ static void cm_format_sidr_req_event(struct cm_work *work,
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_req_rcvd;
- param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
+ param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
param->listen_id = listen_id;
- param->service_id = sidr_req_msg->service_id;
+ param->service_id =
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
param->bth_pkey = cm_get_bth_pkey(work);
param->port = work->port->port_num;
- work->cm_event.private_data = &sidr_req_msg->private_data;
+ param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
}
static int cm_sidr_req_handler(struct cm_work *work)
{
- struct ib_cm_id *cm_id;
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
+ struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
+ int ret;
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
- if (IS_ERR(cm_id))
- return PTR_ERR(cm_id);
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ cm_id_priv =
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
+ if (IS_ERR(cm_id_priv))
+ return PTR_ERR(cm_id_priv);
/* Record SGID/SLID and request ID for lookup. */
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
- wc = work->mad_recv_wc->wc;
- cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
- cm_id_priv->av.dgid.global.interface_id = 0;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
- cm_id_priv->id.remote_id = sidr_req_msg->request_id;
+
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
cm_id_priv->tid = sidr_req_msg->hdr.tid;
- atomic_inc(&cm_id_priv->work_count);
+
+ wc = work->mad_recv_wc->wc;
+ cm_id_priv->sidr_slid = wc->slid;
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto out;
spin_lock_irq(&cm.lock);
- cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
- if (cur_cm_id_priv) {
+ listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+ if (listen_cm_id_priv) {
spin_unlock_irq(&cm.lock);
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_SIDR_REQ_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
- cur_cm_id_priv = cm_find_listen(cm_id->device,
- sidr_req_msg->service_id);
- if (!cur_cm_id_priv) {
+ listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
+ cm_id_priv->id.service_id);
+ if (!listen_cm_id_priv) {
spin_unlock_irq(&cm.lock);
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
+ ib_send_cm_sidr_rep(&cm_id_priv->id,
+ &(struct ib_cm_sidr_rep_param){
+ .status = IB_SIDR_UNSUPPORTED });
goto out; /* No match. */
}
- atomic_inc(&cur_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
- cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
- cm_id_priv->id.context = cur_cm_id_priv->id.context;
- cm_id_priv->id.service_id = sidr_req_msg->service_id;
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
- cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
- cm_process_work(cm_id_priv, work);
- cm_deref_id(cur_cm_id_priv);
+ /*
+ * A SIDR ID does not need to be in the xarray since it does not receive
+ * mads, is not placed in the remote_id or remote_qpn rbtree, and does
+ * not enter timewait.
+ */
+
+ cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
+ cm_free_work(work);
+ /*
+ * A pointer to the listen_cm_id is held in the event, so this deref
+ * must be after the event is delivered above.
+ */
+ cm_deref_id(listen_cm_id_priv);
+ if (ret)
+ cm_destroy_id(&cm_id_priv->id, ret);
return 0;
out:
ib_destroy_cm_id(&cm_id_priv->id);
@@ -3120,68 +3623,85 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param)
{
- cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
- cm_id_priv->tid);
- sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
- sidr_rep_msg->status = param->status;
- cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
- sidr_rep_msg->service_id = cm_id_priv->id.service_id;
- sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
+ cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
+ cm_id_priv->tid, param->ece.attr_mod);
+ IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
+ IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
+ IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
+ be64_to_cpu(cm_id_priv->id.service_id));
+ IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
+ param->ece.vendor_id & 0xFF);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
+ (param->ece.vendor_id >> 8) & 0xFF);
if (param->info && param->info_length)
- memcpy(sidr_rep_msg->info, param->info, param->info_length);
+ IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
+ param->info, param->info_length);
if (param->private_data && param->private_data_len)
- memcpy(sidr_rep_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
+ param->private_data, param->private_data_len);
}
-int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
- struct ib_cm_sidr_rep_param *param)
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ struct ib_cm_sidr_rep_param *param)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
(param->private_data &&
param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
- ret = -EINVAL;
- goto error;
- }
+ if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
+ return -EINVAL;
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto error;
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param);
+ trace_icm_send_sidr_rep(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
- cm_id->state = IB_CM_IDLE;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-
+ cm_id_priv->id.state = IB_CM_IDLE;
spin_lock_irqsave(&cm.lock, flags);
- rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
+ rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
+ }
spin_unlock_irqrestore(&cm.lock, flags);
return 0;
+}
-error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
+ struct ib_cm_sidr_rep_param *param)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_sidr_rep_locked(cm_id_priv, param);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
-static void cm_format_sidr_rep_event(struct cm_work *work)
+static void cm_format_sidr_rep_event(struct cm_work *work,
+ const struct cm_id_private *cm_id_priv)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct ib_cm_sidr_rep_event_param *param;
@@ -3189,12 +3709,16 @@ static void cm_format_sidr_rep_event(struct cm_work *work)
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_rep_rcvd;
- param->status = sidr_rep_msg->status;
- param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
- param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
- param->info = &sidr_rep_msg->info;
- param->info_len = sidr_rep_msg->info_length;
- work->cm_event.private_data = &sidr_rep_msg->private_data;
+ param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
+ param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
+ param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
+ param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
+ sidr_rep_msg);
+ param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
+ sidr_rep_msg);
+ param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
}
static int cm_sidr_rep_handler(struct cm_work *work)
@@ -3204,7 +3728,8 @@ static int cm_sidr_rep_handler(struct cm_work *work)
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
@@ -3214,10 +3739,10 @@ static int cm_sidr_rep_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
- cm_format_sidr_rep_event(work);
+ cm_format_sidr_rep_event(work, cm_id_priv);
cm_process_work(cm_id_priv, work);
return 0;
out:
@@ -3225,23 +3750,29 @@ out:
return -EINVAL;
}
-static void cm_process_send_error(struct ib_mad_send_buf *msg,
+static void cm_process_send_error(struct cm_id_private *cm_id_priv,
+ struct ib_mad_send_buf *msg,
enum ib_wc_status wc_status)
{
- struct cm_id_private *cm_id_priv;
- struct ib_cm_event cm_event;
- enum ib_cm_state state;
+ enum ib_cm_state state = (unsigned long) msg->context[1];
+ struct ib_cm_event cm_event = {};
int ret;
- memset(&cm_event, 0, sizeof cm_event);
- cm_id_priv = msg->context[0];
-
- /* Discard old sends or ones without a response. */
+ /* Discard old sends. */
spin_lock_irq(&cm_id_priv->lock);
- state = (enum ib_cm_state) (unsigned long) msg->context[1];
- if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
- goto discard;
+ if (msg != cm_id_priv->msg) {
+ spin_unlock_irq(&cm_id_priv->lock);
+ cm_free_msg(msg);
+ cm_deref_id(cm_id_priv);
+ return;
+ }
+ cm_free_priv_msg(msg);
+ if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
+ wc_status == IB_WC_WR_FLUSH_ERR)
+ goto out_unlock;
+
+ trace_icm_mad_send_err(state, wc_status);
switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
@@ -3262,26 +3793,25 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
cm_event.event = IB_CM_SIDR_REQ_ERROR;
break;
default:
- goto discard;
+ goto out_unlock;
}
spin_unlock_irq(&cm_id_priv->lock);
cm_event.param.send_status = wc_status;
/* No other events can occur on the cm_id at this point. */
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
- cm_free_msg(msg);
if (ret)
ib_destroy_cm_id(&cm_id_priv->id);
return;
-discard:
+out_unlock:
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_msg(msg);
}
static void cm_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
+ struct cm_id_private *cm_id_priv;
struct cm_port *port;
u16 attr_index;
@@ -3289,33 +3819,22 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
attr_index = be16_to_cpu(((struct ib_mad_hdr *)
msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
- /*
- * If the send was in response to a received message (context[0] is not
- * set to a cm_id), and is not a REJ, then it is a send that was
- * manually retried.
- */
- if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+ if (msg->context[0] == CM_DIRECT_RETRY_CTX) {
msg->retries = 1;
+ cm_id_priv = NULL;
+ } else {
+ cm_id_priv = msg->context[0];
+ }
- atomic_long_add(1 + msg->retries,
- &port->counter_group[CM_XMIT].counter[attr_index]);
+ atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
if (msg->retries)
atomic_long_add(msg->retries,
- &port->counter_group[CM_XMIT_RETRIES].
- counter[attr_index]);
+ &port->counters[CM_XMIT_RETRIES][attr_index]);
- switch (mad_send_wc->status) {
- case IB_WC_SUCCESS:
- case IB_WC_WR_FLUSH_ERR:
+ if (cm_id_priv)
+ cm_process_send_error(cm_id_priv, msg, mad_send_wc->status);
+ else
cm_free_msg(msg);
- break;
- default:
- if (msg->context[0] && msg->context[1])
- cm_process_send_error(msg, mad_send_wc->status);
- else
- cm_free_msg(msg);
- break;
- }
}
static void cm_work_handler(struct work_struct *_work)
@@ -3364,6 +3883,7 @@ static void cm_work_handler(struct work_struct *_work)
ret = cm_timewait_handler(work);
break;
default:
+ trace_icm_handler_err(work->cm_event.event);
ret = -EINVAL;
break;
}
@@ -3389,8 +3909,7 @@ static int cm_establish(struct ib_cm_id *cm_id)
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch (cm_id->state)
- {
+ switch (cm_id->state) {
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_id->state = IB_CM_ESTABLISHED;
@@ -3399,6 +3918,7 @@ static int cm_establish(struct ib_cm_id *cm_id)
ret = -EISCONN;
break;
default:
+ trace_icm_establish_err(cm_id);
ret = -EINVAL;
break;
}
@@ -3422,14 +3942,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
work->cm_event.event = IB_CM_USER_ESTABLISHED;
/* Check if the device started its remove_one */
- spin_lock_irq(&cm.lock);
+ spin_lock_irqsave(&cm.lock, flags);
if (!cm_dev->going_down) {
queue_delayed_work(cm.wq, &work->work, 0);
} else {
kfree(work);
ret = -ENODEV;
}
- spin_unlock_irq(&cm.lock);
+ spin_unlock_irqrestore(&cm.lock, flags);
out:
return ret;
@@ -3474,19 +3994,22 @@ int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
EXPORT_SYMBOL(ib_cm_notify);
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_buf *send_buf,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct cm_port *port = mad_agent->context;
struct cm_work *work;
enum ib_cm_event_type event;
+ bool alt_path = false;
u16 attr_id;
int paths = 0;
int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
- paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
- alt_local_lid != 0);
+ alt_path = cm_req_has_alt_path((struct cm_req_msg *)
+ mad_recv_wc->recv_buf.mad);
+ paths = 1 + (alt_path != 0);
event = IB_CM_REQ_RECEIVED;
break;
case CM_MRA_ATTR_ID:
@@ -3526,11 +4049,9 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
}
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
- atomic_long_inc(&port->counter_group[CM_RECV].
- counter[attr_id - CM_ATTR_ID_OFFSET]);
+ atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
- work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
- GFP_KERNEL);
+ work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
if (!work) {
ib_free_recv_mad(mad_recv_wc);
return;
@@ -3576,14 +4097,25 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT;
qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
- if (cm_id_priv->responder_resources)
+ if (cm_id_priv->responder_resources) {
+ struct ib_device *ib_dev = cm_id_priv->id.device;
+ u64 support_flush = ib_dev->attrs.device_cap_flags &
+ (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT);
+ u32 flushable = support_flush ?
+ (IB_ACCESS_FLUSH_GLOBAL |
+ IB_ACCESS_FLUSH_PERSISTENT) : 0;
+
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_ATOMIC;
+ IB_ACCESS_REMOTE_ATOMIC |
+ flushable;
+ }
qp_attr->pkey_index = cm_id_priv->av.pkey_index;
- qp_attr->port_num = cm_id_priv->av.port->port_num;
+ if (cm_id_priv->av.port)
+ qp_attr->port_num = cm_id_priv->av.port->port_num;
ret = 0;
break;
default:
+ trace_icm_qp_init_err(&cm_id_priv->id);
ret = -EINVAL;
break;
}
@@ -3610,32 +4142,10 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
- if (!cm_id_priv->av.valid) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return -EINVAL;
- }
- if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
- qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
- *qp_attr_mask |= IB_QP_VID;
- }
- if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
- memcpy(qp_attr->smac, cm_id_priv->av.smac,
- sizeof(qp_attr->smac));
- *qp_attr_mask |= IB_QP_SMAC;
- }
- if (cm_id_priv->alt_av.valid) {
- if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
- qp_attr->alt_vlan_id =
- cm_id_priv->alt_av.ah_attr.vlan_id;
- *qp_attr_mask |= IB_QP_ALT_VID;
- }
- if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
- memcpy(qp_attr->alt_smac,
- cm_id_priv->alt_av.smac,
- sizeof(qp_attr->alt_smac));
- *qp_attr_mask |= IB_QP_ALT_SMAC;
- }
- }
+ if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
+ cm_id_priv->av.dlid_datapath &&
+ (cm_id_priv->av.dlid_datapath != 0xffff))
+ qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
@@ -3647,7 +4157,8 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
cm_id_priv->responder_resources;
qp_attr->min_rnr_timer = 0;
}
- if (cm_id_priv->alt_av.ah_attr.dlid) {
+ if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) &&
+ cm_id_priv->alt_av.port) {
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
@@ -3657,6 +4168,7 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ trace_icm_qp_rtr_err(&cm_id_priv->id);
ret = -EINVAL;
break;
}
@@ -3693,7 +4205,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
qp_attr->retry_cnt = cm_id_priv->retry_count;
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
- /* fall through */
+ fallthrough;
case IB_QPT_XRC_TGT:
*qp_attr_mask |= IB_QP_TIMEOUT;
qp_attr->timeout = cm_id_priv->av.timeout;
@@ -3701,13 +4213,15 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
default:
break;
}
- if (cm_id_priv->alt_av.ah_attr.dlid) {
+ if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
qp_attr->path_mig_state = IB_MIG_REARM;
}
} else {
*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
- qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ if (cm_id_priv->alt_av.port)
+ qp_attr->alt_port_num =
+ cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
@@ -3716,6 +4230,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ trace_icm_qp_rts_err(&cm_id_priv->id);
ret = -EINVAL;
break;
}
@@ -3749,106 +4264,76 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
-static void cm_get_ack_delay(struct cm_device *cm_dev)
+static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
- struct ib_device_attr attr;
-
- if (ib_query_device(cm_dev->ib_device, &attr))
- cm_dev->ack_delay = 0; /* acks will rely on packet life time */
- else
- cm_dev->ack_delay = attr.local_ca_ack_delay;
-}
-
-static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
- char *buf)
-{
- struct cm_counter_group *group;
- struct cm_counter_attribute *cm_attr;
-
- group = container_of(obj, struct cm_counter_group, obj);
- cm_attr = container_of(attr, struct cm_counter_attribute, attr);
-
- return sprintf(buf, "%ld\n",
- atomic_long_read(&group->counter[cm_attr->index]));
-}
-
-static const struct sysfs_ops cm_counter_ops = {
- .show = cm_show_counter
-};
-
-static struct kobj_type cm_counter_obj_type = {
- .sysfs_ops = &cm_counter_ops,
- .default_attrs = cm_counter_default_attrs
-};
-
-static void cm_release_port_obj(struct kobject *obj)
-{
- struct cm_port *cm_port;
-
- cm_port = container_of(obj, struct cm_port, port_obj);
- kfree(cm_port);
-}
+ struct cm_counter_attribute *cm_attr =
+ container_of(attr, struct cm_counter_attribute, attr);
+ struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client);
-static struct kobj_type cm_port_obj_type = {
- .release = cm_release_port_obj
-};
+ if (WARN_ON(!cm_dev))
+ return -EINVAL;
-static char *cm_devnode(struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+ return sysfs_emit(
+ buf, "%ld\n",
+ atomic_long_read(
+ &cm_dev->port[port_num - 1]
+ ->counters[cm_attr->group][cm_attr->index]));
}
-struct class cm_class = {
- .owner = THIS_MODULE,
- .name = "infiniband_cm",
- .devnode = cm_devnode,
-};
-EXPORT_SYMBOL(cm_class);
-
-static int cm_create_port_fs(struct cm_port *port)
-{
- int i, ret;
-
- ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
- &port->cm_dev->device->kobj,
- "%d", port->port_num);
- if (ret) {
- kfree(port);
- return ret;
- }
-
- for (i = 0; i < CM_COUNTER_GROUPS; i++) {
- ret = kobject_init_and_add(&port->counter_group[i].obj,
- &cm_counter_obj_type,
- &port->port_obj,
- "%s", counter_group_names[i]);
- if (ret)
- goto error;
+#define CM_COUNTER_ATTR(_name, _group, _index) \
+ { \
+ .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \
+ .group = _group, .index = _index \
}
- return 0;
-
-error:
- while (i--)
- kobject_put(&port->counter_group[i].obj);
- kobject_put(&port->port_obj);
- return ret;
-
-}
-
-static void cm_remove_port_fs(struct cm_port *port)
-{
- int i;
-
- for (i = 0; i < CM_COUNTER_GROUPS; i++)
- kobject_put(&port->counter_group[i].obj);
+#define CM_COUNTER_GROUP(_group, _name) \
+ static struct cm_counter_attribute cm_counter_attr_##_group[] = { \
+ CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \
+ CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \
+ CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \
+ CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \
+ CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \
+ CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \
+ CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \
+ CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \
+ CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \
+ CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \
+ CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \
+ }; \
+ static struct attribute *cm_counter_attrs_##_group[] = { \
+ &cm_counter_attr_##_group[0].attr.attr, \
+ &cm_counter_attr_##_group[1].attr.attr, \
+ &cm_counter_attr_##_group[2].attr.attr, \
+ &cm_counter_attr_##_group[3].attr.attr, \
+ &cm_counter_attr_##_group[4].attr.attr, \
+ &cm_counter_attr_##_group[5].attr.attr, \
+ &cm_counter_attr_##_group[6].attr.attr, \
+ &cm_counter_attr_##_group[7].attr.attr, \
+ &cm_counter_attr_##_group[8].attr.attr, \
+ &cm_counter_attr_##_group[9].attr.attr, \
+ &cm_counter_attr_##_group[10].attr.attr, \
+ NULL, \
+ }; \
+ static const struct attribute_group cm_counter_group_##_group = { \
+ .name = _name, \
+ .attrs = cm_counter_attrs_##_group, \
+ };
- kobject_put(&port->port_obj);
-}
+CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
+CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
+CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
+CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
+
+static const struct attribute_group *cm_counter_groups[] = {
+ &cm_counter_group_CM_XMIT,
+ &cm_counter_group_CM_XMIT_RETRIES,
+ &cm_counter_group_CM_RECV,
+ &cm_counter_group_CM_RECV_DUPLICATES,
+ NULL,
+};
-static void cm_add_one(struct ib_device *ib_device)
+static int cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
@@ -3862,38 +4347,38 @@ static void cm_add_one(struct ib_device *ib_device)
unsigned long flags;
int ret;
int count = 0;
- u8 i;
+ u32 i;
- cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
- ib_device->phys_port_cnt, GFP_KERNEL);
+ cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
+ GFP_KERNEL);
if (!cm_dev)
- return;
+ return -ENOMEM;
+ kref_init(&cm_dev->kref);
+ rwlock_init(&cm_dev->mad_agent_lock);
cm_dev->ib_device = ib_device;
- cm_get_ack_delay(cm_dev);
+ cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
- cm_dev->device = device_create(&cm_class, &ib_device->dev,
- MKDEV(0, 0), NULL,
- "%s", ib_device->name);
- if (IS_ERR(cm_dev->device)) {
- kfree(cm_dev);
- return;
- }
+
+ ib_set_client_data(ib_device, &cm_client, cm_dev);
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ rdma_for_each_port (ib_device, i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
port = kzalloc(sizeof *port, GFP_KERNEL);
- if (!port)
+ if (!port) {
+ ret = -ENOMEM;
goto error1;
+ }
cm_dev->port[i-1] = port;
port->cm_dev = cm_dev;
port->port_num = i;
- ret = cm_create_port_fs(port);
+ ret = ib_port_register_client_groups(ib_device, i,
+ cm_counter_groups);
if (ret)
goto error1;
@@ -3905,30 +4390,47 @@ static void cm_add_one(struct ib_device *ib_device)
cm_recv_handler,
port,
0);
- if (IS_ERR(port->mad_agent))
+ if (IS_ERR(port->mad_agent)) {
+ ret = PTR_ERR(port->mad_agent);
goto error2;
+ }
+
+ port->rep_agent = ib_register_mad_agent(ib_device, i,
+ IB_QPT_GSI,
+ NULL,
+ 0,
+ cm_send_handler,
+ NULL,
+ port,
+ 0);
+ if (IS_ERR(port->rep_agent)) {
+ ret = PTR_ERR(port->rep_agent);
+ goto error3;
+ }
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
- goto error3;
+ goto error4;
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
-
- ib_set_client_data(ib_device, &cm_client, cm_dev);
+ }
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
- return;
+ return 0;
+error4:
+ ib_unregister_mad_agent(port->rep_agent);
error3:
ib_unregister_mad_agent(port->mad_agent);
error2:
- cm_remove_port_fs(port);
+ ib_port_unregister_client_groups(ib_device, i, cm_counter_groups);
error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
@@ -3938,12 +4440,14 @@ error1:
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+ ib_unregister_mad_agent(port->rep_agent);
ib_unregister_mad_agent(port->mad_agent);
- cm_remove_port_fs(port);
+ ib_port_unregister_client_groups(ib_device, i,
+ cm_counter_groups);
}
free:
- device_unregister(cm_dev->device);
- kfree(cm_dev);
+ cm_device_put(cm_dev);
+ return ret;
}
static void cm_remove_one(struct ib_device *ib_device, void *client_data)
@@ -3954,10 +4458,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
.clr_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
- int i;
-
- if (!cm_dev)
- return;
+ u32 i;
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
@@ -3967,11 +4468,16 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
cm_dev->going_down = 1;
spin_unlock_irq(&cm.lock);
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ rdma_for_each_port (ib_device, i) {
+ struct ib_mad_agent *mad_agent;
+ struct ib_mad_agent *rep_agent;
+
if (!rdma_cap_ib_cm(ib_device, i))
continue;
port = cm_dev->port[i-1];
+ mad_agent = port->mad_agent;
+ rep_agent = port->rep_agent;
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
/*
* We flush the queue here after the going_down set, this
@@ -3979,18 +4485,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
* after that we can call the unregister_mad_agent
*/
flush_workqueue(cm.wq);
- ib_unregister_mad_agent(port->mad_agent);
- cm_remove_port_fs(port);
+ /*
+ * The above ensures no call paths from the work are running,
+ * the remaining paths all take the mad_agent_lock.
+ */
+ write_lock(&cm_dev->mad_agent_lock);
+ port->mad_agent = NULL;
+ port->rep_agent = NULL;
+ write_unlock(&cm_dev->mad_agent_lock);
+ ib_unregister_mad_agent(mad_agent);
+ ib_unregister_mad_agent(rep_agent);
+ ib_port_unregister_client_groups(ib_device, i,
+ cm_counter_groups);
}
- device_unregister(cm_dev->device);
- kfree(cm_dev);
+
+ cm_device_put(cm_dev);
}
static int __init ib_cm_init(void)
{
int ret;
- memset(&cm, 0, sizeof cm);
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
@@ -3999,17 +4514,11 @@ static int __init ib_cm_init(void)
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
- idr_init(&cm.local_id_table);
+ xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
- ret = class_register(&cm_class);
- if (ret) {
- ret = -ENOMEM;
- goto error1;
- }
-
- cm.wq = create_workqueue("ib_cm");
+ cm.wq = alloc_workqueue("ib_cm", 0, 1);
if (!cm.wq) {
ret = -ENOMEM;
goto error2;
@@ -4023,9 +4532,6 @@ static int __init ib_cm_init(void)
error3:
destroy_workqueue(cm.wq);
error2:
- class_unregister(&cm_class);
-error1:
- idr_destroy(&cm.local_id_table);
return ret;
}
@@ -4046,10 +4552,8 @@ static void __exit ib_cm_cleanup(void)
kfree(timewait_info);
}
- class_unregister(&cm_class);
- idr_destroy(&cm.local_id_table);
+ WARN_ON(!xa_empty(&cm.local_id_table));
}
module_init(ib_cm_init);
module_exit(ib_cm_cleanup);
-
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 8b76f0ef965e..8462de7ca26e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -1,39 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING the madirectory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use source and binary forms, with or
- * withmodification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retathe above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(CM_MSGS_H)
+#ifndef CM_MSGS_H
#define CM_MSGS_H
+#include <rdma/ibta_vol1_c12.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_cm.h>
@@ -44,127 +19,14 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
-enum cm_msg_sequence {
- CM_MSG_SEQUENCE_REQ,
- CM_MSG_SEQUENCE_LAP,
- CM_MSG_SEQUENCE_DREQ,
- CM_MSG_SEQUENCE_SIDR
-};
-
-struct cm_req_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 rsvd4;
- __be64 service_id;
- __be64 local_ca_guid;
- __be32 rsvd24;
- __be32 local_qkey;
- /* local QPN:24, responder resources:8 */
- __be32 offset32;
- /* local EECN:24, initiator depth:8 */
- __be32 offset36;
- /*
- * remote EECN:24, remote CM response timeout:5,
- * transport service type:2, end-to-end flow control:1
- */
- __be32 offset40;
- /* starting PSN:24, local CM response timeout:5, retry count:3 */
- __be32 offset44;
- __be16 pkey;
- /* path MTU:4, RDC exists:1, RNR retry count:3. */
- u8 offset50;
- /* max CM Retries:4, SRQ:1, extended transport type:3 */
- u8 offset51;
-
- __be16 primary_local_lid;
- __be16 primary_remote_lid;
- union ib_gid primary_local_gid;
- union ib_gid primary_remote_gid;
- /* flow label:20, rsvd:6, packet rate:6 */
- __be32 primary_offset88;
- u8 primary_traffic_class;
- u8 primary_hop_limit;
- /* SL:4, subnet local:1, rsvd:3 */
- u8 primary_offset94;
- /* local ACK timeout:5, rsvd:3 */
- u8 primary_offset95;
-
- __be16 alt_local_lid;
- __be16 alt_remote_lid;
- union ib_gid alt_local_gid;
- union ib_gid alt_remote_gid;
- /* flow label:20, rsvd:6, packet rate:6 */
- __be32 alt_offset132;
- u8 alt_traffic_class;
- u8 alt_hop_limit;
- /* SL:4, subnet local:1, rsvd:3 */
- u8 alt_offset138;
- /* local ACK timeout:5, rsvd:3 */
- u8 alt_offset139;
-
- u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-
-} __attribute__ ((packed));
-
-static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
-}
-
-static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
-{
- req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(req_msg->offset32) &
- 0x000000FF));
-}
-
-static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
-{
- return (u8) be32_to_cpu(req_msg->offset32);
-}
-
-static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
-{
- req_msg->offset32 = cpu_to_be32(resp_res |
- (be32_to_cpu(req_msg->offset32) &
- 0xFFFFFF00));
-}
-
-static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
-{
- return (u8) be32_to_cpu(req_msg->offset36);
-}
-
-static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
- u8 init_depth)
-{
- req_msg->offset36 = cpu_to_be32(init_depth |
- (be32_to_cpu(req_msg->offset36) &
- 0xFFFFFF00));
-}
-
-static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
-}
-
-static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
- u8 resp_timeout)
-{
- req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
- (be32_to_cpu(req_msg->offset40) &
- 0xFFFFFF07));
-}
-
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
{
- u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
- switch(transport_type) {
+ u8 transport_type = IBA_GET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg);
+ switch (transport_type) {
case 0: return IB_QPT_RC;
case 1: return IB_QPT_UC;
case 3:
- switch (req_msg->offset51 & 0x7) {
+ switch (IBA_GET(CM_REQ_EXTENDED_TRANSPORT_TYPE, req_msg)) {
case 1: return IB_QPT_XRC_TGT;
default: return 0;
}
@@ -175,242 +37,19 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
enum ib_qp_type qp_type)
{
- switch(qp_type) {
+ switch (qp_type) {
case IB_QPT_UC:
- req_msg->offset40 = cpu_to_be32((be32_to_cpu(
- req_msg->offset40) &
- 0xFFFFFFF9) | 0x2);
+ IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 1);
break;
case IB_QPT_XRC_INI:
- req_msg->offset40 = cpu_to_be32((be32_to_cpu(
- req_msg->offset40) &
- 0xFFFFFFF9) | 0x6);
- req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
+ IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 3);
+ IBA_SET(CM_REQ_EXTENDED_TRANSPORT_TYPE, req_msg, 1);
break;
default:
- req_msg->offset40 = cpu_to_be32(be32_to_cpu(
- req_msg->offset40) &
- 0xFFFFFFF9);
+ IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 0);
}
}
-static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
-{
- return be32_to_cpu(req_msg->offset40) & 0x1;
-}
-
-static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
- u8 flow_ctrl)
-{
- req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
- (be32_to_cpu(req_msg->offset40) &
- 0xFFFFFFFE));
-}
-
-static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
-}
-
-static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
- __be32 starting_psn)
-{
- req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
- (be32_to_cpu(req_msg->offset44) & 0x000000FF));
-}
-
-static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
-}
-
-static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
- u8 resp_timeout)
-{
- req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
- (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
-}
-
-static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
-{
- return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
-}
-
-static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
- u8 retry_count)
-{
- req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
- (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
-}
-
-static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
-{
- return req_msg->offset50 >> 4;
-}
-
-static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
-{
- req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
-}
-
-static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
-{
- return req_msg->offset50 & 0x7;
-}
-
-static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
- u8 rnr_retry_count)
-{
- req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
- (rnr_retry_count & 0x7));
-}
-
-static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
-{
- return req_msg->offset51 >> 4;
-}
-
-static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
- u8 retries)
-{
- req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
-}
-
-static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
-{
- return (req_msg->offset51 & 0x8) >> 3;
-}
-
-static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
-{
- req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
- ((srq & 0x1) << 3));
-}
-
-static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
-}
-
-static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
- __be32 flow_label)
-{
- req_msg->primary_offset88 = cpu_to_be32(
- (be32_to_cpu(req_msg->primary_offset88) &
- 0x00000FFF) |
- (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
-{
- return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
-}
-
-static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
- u8 rate)
-{
- req_msg->primary_offset88 = cpu_to_be32(
- (be32_to_cpu(req_msg->primary_offset88) &
- 0xFFFFFFC0) | (rate & 0x3F));
-}
-
-static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->primary_offset94 >> 4);
-}
-
-static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
-{
- req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
- (sl << 4));
-}
-
-static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
-{
- return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
-}
-
-static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
- u8 subnet_local)
-{
- req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
- ((subnet_local & 0x1) << 3));
-}
-
-static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->primary_offset95 >> 3);
-}
-
-static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
- u8 local_ack_timeout)
-{
- req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
- (local_ack_timeout << 3));
-}
-
-static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
-}
-
-static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
- __be32 flow_label)
-{
- req_msg->alt_offset132 = cpu_to_be32(
- (be32_to_cpu(req_msg->alt_offset132) &
- 0x00000FFF) |
- (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
-{
- return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
-}
-
-static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
- u8 rate)
-{
- req_msg->alt_offset132 = cpu_to_be32(
- (be32_to_cpu(req_msg->alt_offset132) &
- 0xFFFFFFC0) | (rate & 0x3F));
-}
-
-static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->alt_offset138 >> 4);
-}
-
-static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
-{
- req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
- (sl << 4));
-}
-
-static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
-{
- return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
-}
-
-static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
- u8 subnet_local)
-{
- req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
- ((subnet_local & 0x1) << 3));
-}
-
-static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->alt_offset139 >> 3);
-}
-
-static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
- u8 local_ack_timeout)
-{
- req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
- (local_ack_timeout << 3));
-}
-
/* Message REJected or MRAed */
enum cm_msg_response {
CM_MSG_RESPONSE_REQ = 0x0,
@@ -418,419 +57,12 @@ enum cm_msg_response {
CM_MSG_RESPONSE_OTHER = 0x2
};
- struct cm_mra_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- /* message MRAed:2, rsvd:6 */
- u8 offset8;
- /* service timeout:5, rsvd:3 */
- u8 offset9;
-
- u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
-
-} __attribute__ ((packed));
-
-static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
-{
- return (u8) (mra_msg->offset8 >> 6);
-}
-
-static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
-{
- mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
-}
-
-static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
-{
- return (u8) (mra_msg->offset9 >> 3);
-}
-
-static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
- u8 service_timeout)
-{
- mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
- (service_timeout << 3));
-}
-
-struct cm_rej_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- /* message REJected:2, rsvd:6 */
- u8 offset8;
- /* reject info length:7, rsvd:1. */
- u8 offset9;
- __be16 reason;
- u8 ari[IB_CM_REJ_ARI_LENGTH];
-
- u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
-
-} __attribute__ ((packed));
-
-static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
-{
- return (u8) (rej_msg->offset8 >> 6);
-}
-
-static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
-{
- rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
-}
-
-static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
-{
- return (u8) (rej_msg->offset9 >> 1);
-}
-
-static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
- u8 len)
-{
- rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
-}
-
-struct cm_rep_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- __be32 local_qkey;
- /* local QPN:24, rsvd:8 */
- __be32 offset12;
- /* local EECN:24, rsvd:8 */
- __be32 offset16;
- /* starting PSN:24 rsvd:8 */
- __be32 offset20;
- u8 resp_resources;
- u8 initiator_depth;
- /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
- u8 offset26;
- /* RNR retry count:3, SRQ:1, rsvd:5 */
- u8 offset27;
- __be64 local_ca_guid;
-
- u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
-
-} __attribute__ ((packed));
-
-static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
-}
-
-static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
-{
- rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
-}
-
-static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
-}
-
-static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
-{
- rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
- (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
-}
-
static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
{
return (qp_type == IB_QPT_XRC_INI) ?
- cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
-}
-
-static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
-}
-
-static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
- __be32 starting_psn)
-{
- rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
- (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
-}
-
-static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
-{
- return (u8) (rep_msg->offset26 >> 3);
-}
-
-static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
- u8 target_ack_delay)
-{
- rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
- (target_ack_delay << 3));
-}
-
-static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
-{
- return (u8) ((rep_msg->offset26 & 0x06) >> 1);
-}
-
-static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
-{
- rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
- ((failover & 0x3) << 1));
-}
-
-static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
-{
- return (u8) (rep_msg->offset26 & 0x01);
-}
-
-static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
- u8 flow_ctrl)
-{
- rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
- (flow_ctrl & 0x1));
-}
-
-static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
-{
- return (u8) (rep_msg->offset27 >> 5);
-}
-
-static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
- u8 rnr_retry_count)
-{
- rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
- (rnr_retry_count << 5));
-}
-
-static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
-{
- return (u8) ((rep_msg->offset27 >> 4) & 0x1);
-}
-
-static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
-{
- rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
- ((srq & 0x1) << 4));
-}
-
-struct cm_rtu_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
-
-} __attribute__ ((packed));
-
-struct cm_dreq_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- /* remote QPN/EECN:24, rsvd:8 */
- __be32 offset8;
-
- u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
-
-} __attribute__ ((packed));
-
-static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
-{
- return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
-}
-
-static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
-{
- dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
-}
-
-struct cm_drep_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
-
-} __attribute__ ((packed));
-
-struct cm_lap_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- __be32 rsvd8;
- /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
- __be32 offset12;
- __be32 rsvd16;
-
- __be16 alt_local_lid;
- __be16 alt_remote_lid;
- union ib_gid alt_local_gid;
- union ib_gid alt_remote_gid;
- /* flow label:20, rsvd:4, traffic class:8 */
- __be32 offset56;
- u8 alt_hop_limit;
- /* rsvd:2, packet rate:6 */
- u8 offset61;
- /* SL:4, subnet local:1, rsvd:3 */
- u8 offset62;
- /* local ACK timeout:5, rsvd:3 */
- u8 offset63;
-
- u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
-
-static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
-{
- return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
-}
-
-static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
-{
- lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(lap_msg->offset12) &
- 0x000000FF));
-}
-
-static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
-{
- return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
-}
-
-static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
- u8 resp_timeout)
-{
- lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
- (be32_to_cpu(lap_msg->offset12) &
- 0xFFFFFF07));
-}
-
-static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
-{
- return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
-}
-
-static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
- __be32 flow_label)
-{
- lap_msg->offset56 = cpu_to_be32(
- (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
- (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
-{
- return (u8) be32_to_cpu(lap_msg->offset56);
-}
-
-static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
- u8 traffic_class)
-{
- lap_msg->offset56 = cpu_to_be32(traffic_class |
- (be32_to_cpu(lap_msg->offset56) &
- 0xFFFFFF00));
-}
-
-static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
-{
- return lap_msg->offset61 & 0x3F;
-}
-
-static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
- u8 packet_rate)
-{
- lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
-}
-
-static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
-{
- return lap_msg->offset62 >> 4;
-}
-
-static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
-{
- lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
-}
-
-static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
-{
- return (lap_msg->offset62 >> 3) & 0x1;
-}
-
-static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
- u8 subnet_local)
-{
- lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
- (lap_msg->offset61 & 0xF7);
-}
-static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
-{
- return lap_msg->offset63 >> 3;
-}
-
-static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
- u8 local_ack_timeout)
-{
- lap_msg->offset63 = (local_ack_timeout << 3) |
- (lap_msg->offset63 & 0x07);
-}
-
-struct cm_apr_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- u8 info_length;
- u8 ap_status;
- __be16 rsvd;
- u8 info[IB_CM_APR_INFO_LENGTH];
-
- u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
-
-struct cm_sidr_req_msg {
- struct ib_mad_hdr hdr;
-
- __be32 request_id;
- __be16 pkey;
- __be16 rsvd;
- __be64 service_id;
-
- u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-} __attribute__ ((packed));
-
-struct cm_sidr_rep_msg {
- struct ib_mad_hdr hdr;
-
- __be32 request_id;
- u8 status;
- u8 info_length;
- __be16 rsvd;
- /* QPN:24, rsvd:8 */
- __be32 offset8;
- __be64 service_id;
- __be32 qkey;
- u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
-
- u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
-} __attribute__ ((packed));
-
-static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
-}
-
-static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
- __be32 qpn)
-{
- sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(sidr_rep_msg->offset8) &
- 0x000000FF));
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_EE_CONTEXT_NUMBER,
+ rep_msg)) :
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_QPN, rep_msg));
}
#endif /* CM_MSGS_H */
diff --git a/drivers/infiniband/core/cm_trace.c b/drivers/infiniband/core/cm_trace.c
new file mode 100644
index 000000000000..8f3482f66338
--- /dev/null
+++ b/drivers/infiniband/core/cm_trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for the IB Connection Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <rdma/rdma_cm.h>
+#include "cma_priv.h"
+
+#define CREATE_TRACE_POINTS
+
+#include "cm_trace.h"
diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
new file mode 100644
index 000000000000..4a4987da69d4
--- /dev/null
+++ b/drivers/infiniband/core/cm_trace.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for the RDMA Connect Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020 Oracle and/or its affiliates.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ib_cma
+
+#if !defined(_TRACE_IB_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _TRACE_IB_CMA_H
+
+#include <linux/tracepoint.h>
+#include <rdma/ib_cm.h>
+#include <trace/misc/rdma.h>
+
+/*
+ * enum ib_cm_state, from include/rdma/ib_cm.h
+ */
+#define IB_CM_STATE_LIST \
+ ib_cm_state(IDLE) \
+ ib_cm_state(LISTEN) \
+ ib_cm_state(REQ_SENT) \
+ ib_cm_state(REQ_RCVD) \
+ ib_cm_state(MRA_REQ_SENT) \
+ ib_cm_state(MRA_REQ_RCVD) \
+ ib_cm_state(REP_SENT) \
+ ib_cm_state(REP_RCVD) \
+ ib_cm_state(MRA_REP_SENT) \
+ ib_cm_state(MRA_REP_RCVD) \
+ ib_cm_state(ESTABLISHED) \
+ ib_cm_state(DREQ_SENT) \
+ ib_cm_state(DREQ_RCVD) \
+ ib_cm_state(TIMEWAIT) \
+ ib_cm_state(SIDR_REQ_SENT) \
+ ib_cm_state_end(SIDR_REQ_RCVD)
+
+#undef ib_cm_state
+#undef ib_cm_state_end
+#define ib_cm_state(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_state_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_STATE_LIST
+
+#undef ib_cm_state
+#undef ib_cm_state_end
+#define ib_cm_state(x) { IB_CM_##x, #x },
+#define ib_cm_state_end(x) { IB_CM_##x, #x }
+
+#define show_ib_cm_state(x) \
+ __print_symbolic(x, IB_CM_STATE_LIST)
+
+/*
+ * enum ib_cm_lap_state, from include/rdma/ib_cm.h
+ */
+#define IB_CM_LAP_STATE_LIST \
+ ib_cm_lap_state(LAP_UNINIT) \
+ ib_cm_lap_state(LAP_IDLE) \
+ ib_cm_lap_state(LAP_SENT) \
+ ib_cm_lap_state(LAP_RCVD) \
+ ib_cm_lap_state(MRA_LAP_SENT) \
+ ib_cm_lap_state_end(MRA_LAP_RCVD)
+
+#undef ib_cm_lap_state
+#undef ib_cm_lap_state_end
+#define ib_cm_lap_state(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_lap_state_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_LAP_STATE_LIST
+
+#undef ib_cm_lap_state
+#undef ib_cm_lap_state_end
+#define ib_cm_lap_state(x) { IB_CM_##x, #x },
+#define ib_cm_lap_state_end(x) { IB_CM_##x, #x }
+
+#define show_ib_cm_lap_state(x) \
+ __print_symbolic(x, IB_CM_LAP_STATE_LIST)
+
+/*
+ * enum ib_cm_rej_reason, from include/rdma/ib_cm.h
+ */
+#define IB_CM_REJ_REASON_LIST \
+ ib_cm_rej_reason(REJ_NO_QP) \
+ ib_cm_rej_reason(REJ_NO_EEC) \
+ ib_cm_rej_reason(REJ_NO_RESOURCES) \
+ ib_cm_rej_reason(REJ_TIMEOUT) \
+ ib_cm_rej_reason(REJ_UNSUPPORTED) \
+ ib_cm_rej_reason(REJ_INVALID_COMM_ID) \
+ ib_cm_rej_reason(REJ_INVALID_COMM_INSTANCE) \
+ ib_cm_rej_reason(REJ_INVALID_SERVICE_ID) \
+ ib_cm_rej_reason(REJ_INVALID_TRANSPORT_TYPE) \
+ ib_cm_rej_reason(REJ_STALE_CONN) \
+ ib_cm_rej_reason(REJ_RDC_NOT_EXIST) \
+ ib_cm_rej_reason(REJ_INVALID_GID) \
+ ib_cm_rej_reason(REJ_INVALID_LID) \
+ ib_cm_rej_reason(REJ_INVALID_SL) \
+ ib_cm_rej_reason(REJ_INVALID_TRAFFIC_CLASS) \
+ ib_cm_rej_reason(REJ_INVALID_HOP_LIMIT) \
+ ib_cm_rej_reason(REJ_INVALID_PACKET_RATE) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_GID) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_LID) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_SL) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_TRAFFIC_CLASS) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_HOP_LIMIT) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_PACKET_RATE) \
+ ib_cm_rej_reason(REJ_PORT_CM_REDIRECT) \
+ ib_cm_rej_reason(REJ_PORT_REDIRECT) \
+ ib_cm_rej_reason(REJ_INVALID_MTU) \
+ ib_cm_rej_reason(REJ_INSUFFICIENT_RESP_RESOURCES) \
+ ib_cm_rej_reason(REJ_CONSUMER_DEFINED) \
+ ib_cm_rej_reason(REJ_INVALID_RNR_RETRY) \
+ ib_cm_rej_reason(REJ_DUPLICATE_LOCAL_COMM_ID) \
+ ib_cm_rej_reason(REJ_INVALID_CLASS_VERSION) \
+ ib_cm_rej_reason(REJ_INVALID_FLOW_LABEL) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_FLOW_LABEL) \
+ ib_cm_rej_reason_end(REJ_VENDOR_OPTION_NOT_SUPPORTED)
+
+#undef ib_cm_rej_reason
+#undef ib_cm_rej_reason_end
+#define ib_cm_rej_reason(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_rej_reason_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_REJ_REASON_LIST
+
+#undef ib_cm_rej_reason
+#undef ib_cm_rej_reason_end
+#define ib_cm_rej_reason(x) { IB_CM_##x, #x },
+#define ib_cm_rej_reason_end(x) { IB_CM_##x, #x }
+
+#define show_ib_cm_rej_reason(x) \
+ __print_symbolic(x, IB_CM_REJ_REASON_LIST)
+
+DECLARE_EVENT_CLASS(icm_id_class,
+ TP_PROTO(
+ const struct ib_cm_id *cm_id
+ ),
+
+ TP_ARGS(cm_id),
+
+ TP_STRUCT__entry(
+ __field(const void *, cm_id) /* for eBPF scripts */
+ __field(unsigned int, local_id)
+ __field(unsigned int, remote_id)
+ __field(unsigned long, state)
+ __field(unsigned long, lap_state)
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = cm_id;
+ __entry->local_id = be32_to_cpu(cm_id->local_id);
+ __entry->remote_id = be32_to_cpu(cm_id->remote_id);
+ __entry->state = cm_id->state;
+ __entry->lap_state = cm_id->lap_state;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u state=%s lap_state=%s",
+ __entry->local_id, __entry->remote_id,
+ show_ib_cm_state(__entry->state),
+ show_ib_cm_lap_state(__entry->lap_state)
+ )
+);
+
+#define DEFINE_CM_SEND_EVENT(name) \
+ DEFINE_EVENT(icm_id_class, \
+ icm_send_##name, \
+ TP_PROTO( \
+ const struct ib_cm_id *cm_id \
+ ), \
+ TP_ARGS(cm_id))
+
+DEFINE_CM_SEND_EVENT(req);
+DEFINE_CM_SEND_EVENT(rep);
+DEFINE_CM_SEND_EVENT(dup_req);
+DEFINE_CM_SEND_EVENT(dup_rep);
+DEFINE_CM_SEND_EVENT(rtu);
+DEFINE_CM_SEND_EVENT(mra);
+DEFINE_CM_SEND_EVENT(sidr_req);
+DEFINE_CM_SEND_EVENT(sidr_rep);
+DEFINE_CM_SEND_EVENT(dreq);
+DEFINE_CM_SEND_EVENT(drep);
+
+TRACE_EVENT(icm_send_rej,
+ TP_PROTO(
+ const struct ib_cm_id *cm_id,
+ enum ib_cm_rej_reason reason
+ ),
+
+ TP_ARGS(cm_id, reason),
+
+ TP_STRUCT__entry(
+ __field(const void *, cm_id)
+ __field(u32, local_id)
+ __field(u32, remote_id)
+ __field(unsigned long, state)
+ __field(unsigned long, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = cm_id;
+ __entry->local_id = be32_to_cpu(cm_id->local_id);
+ __entry->remote_id = be32_to_cpu(cm_id->remote_id);
+ __entry->state = cm_id->state;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u state=%s reason=%s",
+ __entry->local_id, __entry->remote_id,
+ show_ib_cm_state(__entry->state),
+ show_ib_cm_rej_reason(__entry->reason)
+ )
+);
+
+#define DEFINE_CM_ERR_EVENT(name) \
+ DEFINE_EVENT(icm_id_class, \
+ icm_##name##_err, \
+ TP_PROTO( \
+ const struct ib_cm_id *cm_id \
+ ), \
+ TP_ARGS(cm_id))
+
+DEFINE_CM_ERR_EVENT(send_cm_rtu);
+DEFINE_CM_ERR_EVENT(establish);
+DEFINE_CM_ERR_EVENT(no_listener);
+DEFINE_CM_ERR_EVENT(send_drep);
+DEFINE_CM_ERR_EVENT(dreq_unknown);
+DEFINE_CM_ERR_EVENT(send_unknown_rej);
+DEFINE_CM_ERR_EVENT(rej_unknown);
+DEFINE_CM_ERR_EVENT(prepare_mra_unknown);
+DEFINE_CM_ERR_EVENT(mra_unknown);
+DEFINE_CM_ERR_EVENT(qp_init);
+DEFINE_CM_ERR_EVENT(qp_rtr);
+DEFINE_CM_ERR_EVENT(qp_rts);
+
+DEFINE_EVENT(icm_id_class, \
+ icm_dreq_skipped, \
+ TP_PROTO( \
+ const struct ib_cm_id *cm_id \
+ ), \
+ TP_ARGS(cm_id) \
+);
+
+DECLARE_EVENT_CLASS(icm_local_class,
+ TP_PROTO(
+ unsigned int local_id,
+ unsigned int remote_id
+ ),
+
+ TP_ARGS(local_id, remote_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local_id)
+ __field(unsigned int, remote_id)
+ ),
+
+ TP_fast_assign(
+ __entry->local_id = local_id;
+ __entry->remote_id = remote_id;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u",
+ __entry->local_id, __entry->remote_id
+ )
+);
+
+#define DEFINE_CM_LOCAL_EVENT(name) \
+ DEFINE_EVENT(icm_local_class, \
+ icm_##name, \
+ TP_PROTO( \
+ unsigned int local_id, \
+ unsigned int remote_id \
+ ), \
+ TP_ARGS(local_id, remote_id))
+
+DEFINE_CM_LOCAL_EVENT(issue_rej);
+DEFINE_CM_LOCAL_EVENT(issue_drep);
+DEFINE_CM_LOCAL_EVENT(staleconn_err);
+DEFINE_CM_LOCAL_EVENT(no_priv_err);
+
+DECLARE_EVENT_CLASS(icm_remote_class,
+ TP_PROTO(
+ u32 remote_id
+ ),
+
+ TP_ARGS(remote_id),
+
+ TP_STRUCT__entry(
+ __field(u32, remote_id)
+ ),
+
+ TP_fast_assign(
+ __entry->remote_id = remote_id;
+ ),
+
+ TP_printk("remote_id=%u",
+ __entry->remote_id
+ )
+);
+
+#define DEFINE_CM_REMOTE_EVENT(name) \
+ DEFINE_EVENT(icm_remote_class, \
+ icm_##name, \
+ TP_PROTO( \
+ u32 remote_id \
+ ), \
+ TP_ARGS(remote_id))
+
+DEFINE_CM_REMOTE_EVENT(remote_no_priv_err);
+DEFINE_CM_REMOTE_EVENT(insert_failed_err);
+
+TRACE_EVENT(icm_send_rep_err,
+ TP_PROTO(
+ __be32 local_id,
+ enum ib_cm_state state
+ ),
+
+ TP_ARGS(local_id, state),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local_id)
+ __field(unsigned long, state)
+ ),
+
+ TP_fast_assign(
+ __entry->local_id = be32_to_cpu(local_id);
+ __entry->state = state;
+ ),
+
+ TP_printk("local_id=%u state=%s",
+ __entry->local_id, show_ib_cm_state(__entry->state)
+ )
+);
+
+TRACE_EVENT(icm_rep_unknown_err,
+ TP_PROTO(
+ unsigned int local_id,
+ unsigned int remote_id,
+ enum ib_cm_state state
+ ),
+
+ TP_ARGS(local_id, remote_id, state),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local_id)
+ __field(unsigned int, remote_id)
+ __field(unsigned long, state)
+ ),
+
+ TP_fast_assign(
+ __entry->local_id = local_id;
+ __entry->remote_id = remote_id;
+ __entry->state = state;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u state=%s",
+ __entry->local_id, __entry->remote_id,
+ show_ib_cm_state(__entry->state)
+ )
+);
+
+TRACE_EVENT(icm_handler_err,
+ TP_PROTO(
+ enum ib_cm_event_type event
+ ),
+
+ TP_ARGS(event),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, event)
+ ),
+
+ TP_fast_assign(
+ __entry->event = event;
+ ),
+
+ TP_printk("unhandled event=%s",
+ rdma_show_ib_cm_event(__entry->event)
+ )
+);
+
+TRACE_EVENT(icm_mad_send_err,
+ TP_PROTO(
+ enum ib_cm_state state,
+ enum ib_wc_status wc_status
+ ),
+
+ TP_ARGS(state, wc_status),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(unsigned long, wc_status)
+ ),
+
+ TP_fast_assign(
+ __entry->state = state;
+ __entry->wc_status = wc_status;
+ ),
+
+ TP_printk("state=%s completion status=%s",
+ show_ib_cm_state(__entry->state),
+ rdma_show_wc_status(__entry->wc_status)
+ )
+);
+
+#endif /* _TRACE_IB_CMA_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/infiniband/core
+#define TRACE_INCLUDE_FILE cm_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b1ab13f3e182..5b2d3ae3f9fc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1,36 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
- * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
+ * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/completion.h>
@@ -38,12 +11,17 @@
#include <linux/in6.h>
#include <linux/mutex.h>
#include <linux/random.h>
-#include <linux/idr.h>
+#include <linux/rbtree.h>
+#include <linux/igmp.h>
+#include <linux/xarray.h>
#include <linux/inetdevice.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/route.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/netevent.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip_fib.h>
@@ -58,14 +36,18 @@
#include <rdma/ib_sa.h>
#include <rdma/iw_cm.h>
+#include "core_priv.h"
+#include "cma_priv.h"
+#include "cma_trace.h"
+
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
-#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
-#define CMA_IBOE_PACKET_LIFETIME 18
+#define CMA_IBOE_PACKET_LIFETIME 16
+#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
static const char * const cma_events[] = {
[RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
@@ -86,7 +68,12 @@ static const char * const cma_events[] = {
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
};
-const char *rdma_event_msg(enum rdma_cm_event_type event)
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ enum ib_gid_type gid_type);
+
+static void cma_netevent_work_handler(struct work_struct *_work);
+
+const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
size_t index = event;
@@ -95,7 +82,70 @@ const char *rdma_event_msg(enum rdma_cm_event_type event)
}
EXPORT_SYMBOL(rdma_event_msg);
-static void cma_add_one(struct ib_device *device);
+const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
+ int reason)
+{
+ if (rdma_ib_or_roce(id->device, id->port_num))
+ return ibcm_reject_msg(reason);
+
+ if (rdma_protocol_iwarp(id->device, id->port_num))
+ return iwcm_reject_msg(reason);
+
+ WARN_ON_ONCE(1);
+ return "unrecognized transport";
+}
+EXPORT_SYMBOL(rdma_reject_msg);
+
+/**
+ * rdma_is_consumer_reject - return true if the consumer rejected the connect
+ * request.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
+{
+ if (rdma_ib_or_roce(id->device, id->port_num))
+ return reason == IB_CM_REJ_CONSUMER_DEFINED;
+
+ if (rdma_protocol_iwarp(id->device, id->port_num))
+ return reason == -ECONNREFUSED;
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
+ struct rdma_cm_event *ev, u8 *data_len)
+{
+ const void *p;
+
+ if (rdma_is_consumer_reject(id, ev->status)) {
+ *data_len = ev->param.conn.private_data_len;
+ p = ev->param.conn.private_data;
+ } else {
+ *data_len = 0;
+ p = NULL;
+ }
+ return p;
+}
+EXPORT_SYMBOL(rdma_consumer_reject_data);
+
+/**
+ * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
+ * @id: Communication Identifier
+ */
+struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (id->device->node_type == RDMA_NODE_RNIC)
+ return id_priv->cm_id.iw;
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_iw_cm_id);
+
+static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
static struct ib_client cma_client = {
@@ -105,126 +155,203 @@ static struct ib_client cma_client = {
};
static struct ib_sa_client sa_client;
-static struct rdma_addr_client addr_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
+static struct rb_root id_table = RB_ROOT;
+/* Serialize operations of id_table tree */
+static DEFINE_SPINLOCK(id_table_lock);
static struct workqueue_struct *cma_wq;
-static DEFINE_IDR(tcp_ps);
-static DEFINE_IDR(udp_ps);
-static DEFINE_IDR(ipoib_ps);
-static DEFINE_IDR(ib_ps);
+static unsigned int cma_pernet_id;
+
+struct cma_pernet {
+ struct xarray tcp_ps;
+ struct xarray udp_ps;
+ struct xarray ipoib_ps;
+ struct xarray ib_ps;
+};
+
+static struct cma_pernet *cma_pernet(struct net *net)
+{
+ return net_generic(net, cma_pernet_id);
+}
-static struct idr *cma_idr(enum rdma_port_space ps)
+static
+struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
{
+ struct cma_pernet *pernet = cma_pernet(net);
+
switch (ps) {
case RDMA_PS_TCP:
- return &tcp_ps;
+ return &pernet->tcp_ps;
case RDMA_PS_UDP:
- return &udp_ps;
+ return &pernet->udp_ps;
case RDMA_PS_IPOIB:
- return &ipoib_ps;
+ return &pernet->ipoib_ps;
case RDMA_PS_IB:
- return &ib_ps;
+ return &pernet->ib_ps;
default:
return NULL;
}
}
+struct id_table_entry {
+ struct list_head id_list;
+ struct rb_node rb_node;
+};
+
struct cma_device {
struct list_head list;
struct ib_device *device;
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
struct list_head id_list;
+ enum ib_gid_type *default_gid_type;
+ u8 *default_roce_tos;
};
struct rdma_bind_list {
- enum rdma_port_space ps;
+ enum rdma_ucm_port_space ps;
struct hlist_head owners;
unsigned short port;
};
-static int cma_ps_alloc(enum rdma_port_space ps,
+static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list, int snum)
{
- struct idr *idr = cma_idr(ps);
+ struct xarray *xa = cma_pernet_xa(net, ps);
- return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL);
+ return xa_insert(xa, snum, bind_list, GFP_KERNEL);
}
-static struct rdma_bind_list *cma_ps_find(enum rdma_port_space ps, int snum)
+static struct rdma_bind_list *cma_ps_find(struct net *net,
+ enum rdma_ucm_port_space ps, int snum)
{
- struct idr *idr = cma_idr(ps);
+ struct xarray *xa = cma_pernet_xa(net, ps);
- return idr_find(idr, snum);
+ return xa_load(xa, snum);
}
-static void cma_ps_remove(enum rdma_port_space ps, int snum)
+static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
+ int snum)
{
- struct idr *idr = cma_idr(ps);
+ struct xarray *xa = cma_pernet_xa(net, ps);
- idr_remove(idr, snum);
+ xa_erase(xa, snum);
}
enum {
CMA_OPTION_AFONLY,
};
+void cma_dev_get(struct cma_device *cma_dev)
+{
+ refcount_inc(&cma_dev->refcount);
+}
+
+void cma_dev_put(struct cma_device *cma_dev)
+{
+ if (refcount_dec_and_test(&cma_dev->refcount))
+ complete(&cma_dev->comp);
+}
+
+struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
+ void *cookie)
+{
+ struct cma_device *cma_dev;
+ struct cma_device *found_cma_dev = NULL;
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(cma_dev, &dev_list, list)
+ if (filter(cma_dev->device, cookie)) {
+ found_cma_dev = cma_dev;
+ break;
+ }
+
+ if (found_cma_dev)
+ cma_dev_get(found_cma_dev);
+ mutex_unlock(&lock);
+ return found_cma_dev;
+}
+
+int cma_get_default_gid_type(struct cma_device *cma_dev,
+ u32 port)
+{
+ if (!rdma_is_port_valid(cma_dev->device, port))
+ return -EINVAL;
+
+ return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
+}
+
+int cma_set_default_gid_type(struct cma_device *cma_dev,
+ u32 port,
+ enum ib_gid_type default_gid_type)
+{
+ unsigned long supported_gids;
+
+ if (!rdma_is_port_valid(cma_dev->device, port))
+ return -EINVAL;
+
+ if (default_gid_type == IB_GID_TYPE_IB &&
+ rdma_protocol_roce_eth_encap(cma_dev->device, port))
+ default_gid_type = IB_GID_TYPE_ROCE;
+
+ supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
+
+ if (!(supported_gids & 1 << default_gid_type))
+ return -EINVAL;
+
+ cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
+ default_gid_type;
+
+ return 0;
+}
+
+int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
+{
+ if (!rdma_is_port_valid(cma_dev->device, port))
+ return -EINVAL;
+
+ return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
+}
+
+int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
+ u8 default_roce_tos)
+{
+ if (!rdma_is_port_valid(cma_dev->device, port))
+ return -EINVAL;
+
+ cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
+ default_roce_tos;
+
+ return 0;
+}
+struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
+{
+ return cma_dev->device;
+}
+
/*
* Device removal can occur at anytime, so we need extra handling to
* serialize notifying the user of device removal with other callbacks.
* We do this by disabling removal notification while a callback is in process,
* and reporting it after the callback completes.
*/
-struct rdma_id_private {
- struct rdma_cm_id id;
-
- struct rdma_bind_list *bind_list;
- struct hlist_node node;
- struct list_head list; /* listen_any_list or cma_device.list */
- struct list_head listen_list; /* per device listens */
- struct cma_device *cma_dev;
- struct list_head mc_list;
-
- int internal_id;
- enum rdma_cm_state state;
- spinlock_t lock;
- struct mutex qp_mutex;
-
- struct completion comp;
- atomic_t refcount;
- struct mutex handler_mutex;
-
- int backlog;
- int timeout_ms;
- struct ib_sa_query *query;
- int query_id;
- union {
- struct ib_cm_id *ib;
- struct iw_cm_id *iw;
- } cm_id;
-
- u32 seq_num;
- u32 qkey;
- u32 qp_num;
- pid_t owner;
- u32 options;
- u8 srq;
- u8 tos;
- u8 reuseaddr;
- u8 afonly;
-};
struct cma_multicast {
struct rdma_id_private *id_priv;
union {
- struct ib_sa_multicast *ib;
- } multicast;
+ struct ib_sa_multicast *sa_mc;
+ struct {
+ struct work_struct work;
+ struct rdma_cm_event event;
+ } iboe_join;
+ };
struct list_head list;
void *context;
struct sockaddr_storage addr;
- struct kref mcref;
+ u8 join_state;
};
struct cma_work {
@@ -235,18 +362,6 @@ struct cma_work {
struct rdma_cm_event event;
};
-struct cma_ndev_work {
- struct work_struct work;
- struct rdma_id_private *id;
- struct rdma_cm_event event;
-};
-
-struct iboe_mcast_work {
- struct work_struct work;
- struct rdma_id_private *id;
- struct cma_multicast *mc;
-};
-
union cma_ip_addr {
struct in6_addr ip6;
struct {
@@ -266,31 +381,31 @@ struct cma_hdr {
#define CMA_VERSION 0x00
struct cma_req_info {
+ struct sockaddr_storage listen_addr_storage;
+ struct sockaddr_storage src_addr_storage;
struct ib_device *device;
- int port;
union ib_gid local_gid;
__be64 service_id;
+ int port;
+ bool has_gid;
u16 pkey;
- bool has_gid:1;
};
-static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&id_priv->lock, flags);
- ret = (id_priv->state == comp);
- spin_unlock_irqrestore(&id_priv->lock, flags);
- return ret;
-}
-
static int cma_comp_exch(struct rdma_id_private *id_priv,
enum rdma_cm_state comp, enum rdma_cm_state exch)
{
unsigned long flags;
int ret;
+ /*
+ * The FSM uses a funny double locking where state is protected by both
+ * the handler_mutex and the spinlock. State is not allowed to change
+ * to/from a handler_mutex protected value without also holding
+ * handler_mutex.
+ */
+ if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
+ lockdep_assert_held(&id_priv->handler_mutex);
+
spin_lock_irqsave(&id_priv->lock, flags);
if ((ret = (id_priv->state == comp)))
id_priv->state = exch;
@@ -298,71 +413,198 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
return ret;
}
-static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
- enum rdma_cm_state exch)
+static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
{
- unsigned long flags;
- enum rdma_cm_state old;
+ return hdr->ip_version >> 4;
+}
- spin_lock_irqsave(&id_priv->lock, flags);
- old = id_priv->state;
- id_priv->state = exch;
- spin_unlock_irqrestore(&id_priv->lock, flags);
- return old;
+static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
+{
+ hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
}
-static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
+static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
{
- return hdr->ip_version >> 4;
+ return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
}
-static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
+static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
{
- hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
+ return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
}
-static void cma_attach_to_dev(struct rdma_id_private *id_priv,
- struct cma_device *cma_dev)
+static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
{
- atomic_inc(&cma_dev->refcount);
- id_priv->cma_dev = cma_dev;
- id_priv->id.device = cma_dev->device;
- id_priv->id.route.addr.dev_addr.transport =
- rdma_node_get_transport(cma_dev->device->node_type);
- list_add_tail(&id_priv->list, &cma_dev->id_list);
+ struct in_device *in_dev = NULL;
+
+ if (ndev) {
+ rtnl_lock();
+ in_dev = __in_dev_get_rtnl(ndev);
+ if (in_dev) {
+ if (join)
+ ip_mc_inc_group(in_dev,
+ *(__be32 *)(mgid->raw + 12));
+ else
+ ip_mc_dec_group(in_dev,
+ *(__be32 *)(mgid->raw + 12));
+ }
+ rtnl_unlock();
+ }
+ return (in_dev) ? 0 : -ENODEV;
}
-static inline void cma_deref_dev(struct cma_device *cma_dev)
+static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
+ struct id_table_entry *entry_b)
{
- if (atomic_dec_and_test(&cma_dev->refcount))
- complete(&cma_dev->comp);
+ struct rdma_id_private *id_priv = list_first_entry(
+ &entry_b->id_list, struct rdma_id_private, id_list_entry);
+ int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
+ struct sockaddr *sb = cma_dst_addr(id_priv);
+
+ if (ifindex_a != ifindex_b)
+ return (ifindex_a > ifindex_b) ? 1 : -1;
+
+ if (sa->sa_family != sb->sa_family)
+ return sa->sa_family - sb->sa_family;
+
+ if (sa->sa_family == AF_INET &&
+ __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
+ return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
+ &((struct sockaddr_in *)sb)->sin_addr,
+ sizeof(((struct sockaddr_in *)sa)->sin_addr));
+ }
+
+ if (sa->sa_family == AF_INET6 &&
+ __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
+ return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
+ &((struct sockaddr_in6 *)sb)->sin6_addr);
+ }
+
+ return -1;
}
-static inline void release_mc(struct kref *kref)
+static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
{
- struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
+ struct rb_node **new, *parent = NULL;
+ struct id_table_entry *this, *node;
+ unsigned long flags;
+ int result;
- kfree(mc->multicast.ib);
- kfree(mc);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ new = &id_table.rb_node;
+ while (*new) {
+ this = container_of(*new, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(
+ node_id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(node_id_priv), this);
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else {
+ list_add_tail(&node_id_priv->id_list_entry,
+ &this->id_list);
+ kfree(node);
+ goto unlock;
+ }
+ }
+
+ INIT_LIST_HEAD(&node->id_list);
+ list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
+
+ rb_link_node(&node->rb_node, parent, new);
+ rb_insert_color(&node->rb_node, &id_table);
+
+unlock:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return 0;
}
-static void cma_release_dev(struct rdma_id_private *id_priv)
+static struct id_table_entry *
+node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
{
- mutex_lock(&lock);
- list_del(&id_priv->list);
- cma_deref_dev(id_priv->cma_dev);
- id_priv->cma_dev = NULL;
- mutex_unlock(&lock);
+ struct rb_node *node = root->rb_node;
+ struct id_table_entry *data;
+ int result;
+
+ while (node) {
+ data = container_of(node, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(ifindex, sa, data);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+
+ return NULL;
}
-static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
+static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
{
- return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
+ struct id_table_entry *data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (list_empty(&id_priv->id_list_entry))
+ goto out;
+
+ data = node_from_ndev_ip(&id_table,
+ id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(id_priv));
+ if (!data)
+ goto out;
+
+ list_del_init(&id_priv->id_list_entry);
+ if (list_empty(&data->id_list)) {
+ rb_erase(&data->rb_node, &id_table);
+ kfree(data);
+ }
+out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
}
-static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
+static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
+ struct cma_device *cma_dev)
+{
+ cma_dev_get(cma_dev);
+ id_priv->cma_dev = cma_dev;
+ id_priv->id.device = cma_dev->device;
+ id_priv->id.route.addr.dev_addr.transport =
+ rdma_node_get_transport(cma_dev->device->node_type);
+ list_add_tail(&id_priv->device_item, &cma_dev->id_list);
+
+ trace_cm_id_attach(id_priv, cma_dev->device);
+}
+
+static void cma_attach_to_dev(struct rdma_id_private *id_priv,
+ struct cma_device *cma_dev)
{
- return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
+ _cma_attach_to_dev(id_priv, cma_dev);
+ id_priv->gid_type =
+ cma_dev->default_gid_type[id_priv->id.port_num -
+ rdma_start_port(cma_dev->device)];
+}
+
+static void cma_release_dev(struct rdma_id_private *id_priv)
+{
+ mutex_lock(&lock);
+ list_del_init(&id_priv->device_item);
+ cma_dev_put(id_priv->cma_dev);
+ id_priv->cma_dev = NULL;
+ id_priv->id.device = NULL;
+ if (id_priv->id.route.addr.dev_addr.sgid_attr) {
+ rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
+ id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
+ }
+ mutex_unlock(&lock);
}
static inline unsigned short cma_family(struct rdma_id_private *id_priv)
@@ -370,22 +612,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
return id_priv->id.route.addr.src_addr.ss_family;
}
-static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+static int cma_set_default_qkey(struct rdma_id_private *id_priv)
{
struct ib_sa_mcmember_rec rec;
int ret = 0;
- if (id_priv->qkey) {
- if (qkey && id_priv->qkey != qkey)
- return -EINVAL;
- return 0;
- }
-
- if (qkey) {
- id_priv->qkey = qkey;
- return 0;
- }
-
switch (id_priv->id.ps) {
case RDMA_PS_UDP:
case RDMA_PS_IB:
@@ -405,6 +636,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
return ret;
}
+static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+{
+ if (!qkey ||
+ (id_priv->qkey && (id_priv->qkey != qkey)))
+ return -EINVAL;
+
+ id_priv->qkey = qkey;
+ return 0;
+}
+
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{
dev_addr->dev_type = ARPHRD_INFINIBAND;
@@ -417,7 +658,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret;
if (addr->sa_family != AF_IB) {
- ret = rdma_translate_ip(addr, dev_addr, NULL);
+ ret = rdma_translate_ip(addr, dev_addr);
} else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0;
@@ -426,81 +667,257 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return ret;
}
-static inline int cma_validate_port(struct ib_device *device, u8 port,
- union ib_gid *gid, int dev_type)
+static const struct ib_gid_attr *
+cma_validate_port(struct ib_device *device, u32 port,
+ enum ib_gid_type gid_type,
+ union ib_gid *gid,
+ struct rdma_id_private *id_priv)
{
- u8 found_port;
- int ret = -ENODEV;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV);
+ int bound_if_index = dev_addr->bound_dev_if;
+ int dev_type = dev_addr->dev_type;
+ struct net_device *ndev = NULL;
+ struct net_device *pdev = NULL;
+
+ if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
+ goto out;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
- return ret;
+ goto out;
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
- return ret;
+ goto out;
- ret = ib_find_cached_gid(device, gid, &found_port, NULL);
- if (port != found_port)
- return -ENODEV;
+ /*
+ * For drivers that do not associate more than one net device with
+ * their gid tables, such as iWARP drivers, it is sufficient to
+ * return the first table entry.
+ *
+ * Other driver classes might be included in the future.
+ */
+ if (rdma_protocol_iwarp(device, port)) {
+ sgid_attr = rdma_get_gid_attr(device, port, 0);
+ if (IS_ERR(sgid_attr))
+ goto out;
- return ret;
+ rcu_read_lock();
+ ndev = rcu_dereference(sgid_attr->ndev);
+ if (ndev->ifindex != bound_if_index) {
+ pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
+ if (pdev) {
+ if (is_vlan_dev(pdev)) {
+ pdev = vlan_dev_real_dev(pdev);
+ if (ndev->ifindex == pdev->ifindex)
+ bound_if_index = pdev->ifindex;
+ }
+ if (is_vlan_dev(ndev)) {
+ pdev = vlan_dev_real_dev(ndev);
+ if (bound_if_index == pdev->ifindex)
+ bound_if_index = ndev->ifindex;
+ }
+ }
+ }
+ if (!net_eq(dev_net(ndev), dev_addr->net) ||
+ ndev->ifindex != bound_if_index) {
+ rdma_put_gid_attr(sgid_attr);
+ sgid_attr = ERR_PTR(-ENODEV);
+ }
+ rcu_read_unlock();
+ goto out;
+ }
+
+ /*
+ * For a RXE device, it should work with TUN device and normal ethernet
+ * devices. Use driver_id to check if a device is a RXE device or not.
+ * ARPHDR_NONE means a TUN device.
+ */
+ if (device->ops.driver_id == RDMA_DRIVER_RXE) {
+ if ((dev_type == ARPHRD_NONE || dev_type == ARPHRD_ETHER)
+ && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ goto out;
+ }
+ } else {
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ goto out;
+ } else {
+ gid_type = IB_GID_TYPE_IB;
+ }
+ }
+
+ sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
+ dev_put(ndev);
+out:
+ return sgid_attr;
+}
+
+static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
+ const struct ib_gid_attr *sgid_attr)
+{
+ WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
+ id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv,
- struct rdma_id_private *listen_id_priv)
+/**
+ * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
+ * based on source ip address.
+ * @id_priv: cm_id which should be bound to cma device
+ *
+ * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
+ * based on source IP address. It returns 0 on success or error code otherwise.
+ * It is applicable to active and passive side cm_id.
+ */
+static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
- struct cma_device *cma_dev;
+ const struct ib_gid_attr *sgid_attr;
union ib_gid gid, iboe_gid, *gidp;
+ struct cma_device *cma_dev;
+ enum ib_gid_type gid_type;
int ret = -ENODEV;
- u8 port;
+ u32 port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
return -EINVAL;
- mutex_lock(&lock);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof gid);
-
- if (listen_id_priv) {
- cma_dev = listen_id_priv->cma_dev;
- port = listen_id_priv->id.port_num;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
-
- ret = cma_validate_port(cma_dev->device, port, gidp,
- dev_addr->dev_type);
- if (!ret) {
- id_priv->id.port_num = port;
- goto out;
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ rdma_for_each_port (cma_dev->device, port) {
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
+ &iboe_gid : &gid;
+ gid_type = cma_dev->default_gid_type[port - 1];
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ cma_attach_to_dev(id_priv, cma_dev);
+ ret = 0;
+ goto out;
+ }
}
}
+out:
+ mutex_unlock(&lock);
+ return ret;
+}
+
+/**
+ * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
+ * @id_priv: cm id to bind to cma device
+ * @listen_id_priv: listener cm id to match against
+ * @req: Pointer to req structure containaining incoming
+ * request information
+ * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
+ * rdma device matches for listen_id and incoming request. It also verifies
+ * that a GID table entry is present for the source address.
+ * Returns 0 on success, or returns error code otherwise.
+ */
+static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv,
+ struct cma_req_info *req)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ enum ib_gid_type gid_type;
+ union ib_gid gid;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(req->device, req->port))
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &gid);
+ else
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
+ sgid_attr = cma_validate_port(req->device, req->port,
+ gid_type, &gid, id_priv);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ id_priv->id.port_num = req->port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ /* Need to acquire lock to protect against reader
+ * of cma_dev->id_list such as cma_netdev_callback() and
+ * cma_process_remove().
+ */
+ mutex_lock(&lock);
+ cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
+ mutex_unlock(&lock);
+ rdma_restrack_add(&id_priv->res);
+ return 0;
+}
+
+static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ struct cma_device *cma_dev;
+ enum ib_gid_type gid_type;
+ int ret = -ENODEV;
+ union ib_gid gid;
+ u32 port;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+
+ cma_dev = listen_id_priv->cma_dev;
+ port = listen_id_priv->id.port_num;
+ gid_type = listen_id_priv->gid_type;
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, &gid, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
+ goto out;
+ }
list_for_each_entry(cma_dev, &dev_list, list) {
- for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
- if (listen_id_priv &&
- listen_id_priv->cma_dev == cma_dev &&
+ rdma_for_each_port (cma_dev->device, port) {
+ if (listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
-
- ret = cma_validate_port(cma_dev->device, port, gidp,
- dev_addr->dev_type);
- if (!ret) {
+ gid_type = cma_dev->default_gid_type[port - 1];
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, &gid, id_priv);
+ if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
goto out;
}
}
}
out:
- if (!ret)
+ if (!ret) {
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
+ }
mutex_unlock(&lock);
return ret;
@@ -514,8 +931,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
struct cma_device *cma_dev, *cur_dev;
struct sockaddr_ib *addr;
union ib_gid gid, sgid, *dgid;
+ unsigned int p;
u16 pkey, index;
- u8 p;
+ enum ib_port_state port_state;
+ int ret;
int i;
cma_dev = NULL;
@@ -523,15 +942,25 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
dgid = (union ib_gid *) &addr->sib_addr;
pkey = ntohs(addr->sib_pkey);
+ mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
- for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ rdma_for_each_port (cur_dev->device, p) {
if (!rdma_cap_af_ib(cur_dev->device, p))
continue;
if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
continue;
- for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) {
+ if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
+ continue;
+
+ for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
+ ++i) {
+ ret = rdma_query_gid(cur_dev->device, p, i,
+ &gid);
+ if (ret)
+ continue;
+
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
sgid = gid;
@@ -540,46 +969,44 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
}
if (!cma_dev && (gid.global.subnet_prefix ==
- dgid->global.subnet_prefix)) {
+ dgid->global.subnet_prefix) &&
+ port_state == IB_PORT_ACTIVE) {
cma_dev = cur_dev;
sgid = gid;
id_priv->id.port_num = p;
+ goto found;
}
}
}
}
-
- if (!cma_dev)
- return -ENODEV;
+ mutex_unlock(&lock);
+ return -ENODEV;
found:
cma_attach_to_dev(id_priv, cma_dev);
- addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
- memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+ rdma_restrack_add(&id_priv->res);
+ mutex_unlock(&lock);
+ addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+ memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
return 0;
}
-static void cma_deref_id(struct rdma_id_private *id_priv)
+static void cma_id_get(struct rdma_id_private *id_priv)
{
- if (atomic_dec_and_test(&id_priv->refcount))
- complete(&id_priv->comp);
+ refcount_inc(&id_priv->refcount);
}
-static int cma_disable_callback(struct rdma_id_private *id_priv,
- enum rdma_cm_state state)
+static void cma_id_put(struct rdma_id_private *id_priv)
{
- mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state != state) {
- mutex_unlock(&id_priv->handler_mutex);
- return -EINVAL;
- }
- return 0;
+ if (refcount_dec_and_test(&id_priv->refcount))
+ complete(&id_priv->comp);
}
-struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
- void *context, enum rdma_port_space ps,
- enum ib_qp_type qp_type)
+static struct rdma_id_private *
+__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type, const struct rdma_id_private *parent)
{
struct rdma_id_private *id_priv;
@@ -587,24 +1014,68 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
if (!id_priv)
return ERR_PTR(-ENOMEM);
- id_priv->owner = task_pid_nr(current);
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
id_priv->id.event_handler = event_handler;
id_priv->id.ps = ps;
id_priv->id.qp_type = qp_type;
+ id_priv->tos_set = false;
+ id_priv->timeout_set = false;
+ id_priv->min_rnr_timer_set = false;
+ id_priv->gid_type = IB_GID_TYPE_IB;
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
init_completion(&id_priv->comp);
- atomic_set(&id_priv->refcount, 1);
+ refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
+ INIT_LIST_HEAD(&id_priv->device_item);
+ INIT_LIST_HEAD(&id_priv->id_list_entry);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
+ id_priv->id.route.addr.dev_addr.net = get_net(net);
+ id_priv->seq_num &= 0x00ffffff;
+ INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler);
+
+ rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
+ if (parent)
+ rdma_restrack_parent_name(&id_priv->res, &parent->res);
+
+ return id_priv;
+}
+
+struct rdma_cm_id *
+__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type, const char *caller)
+{
+ struct rdma_id_private *ret;
+
+ ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
+
+ rdma_restrack_set_name(&ret->res, caller);
+ return &ret->id;
+}
+EXPORT_SYMBOL(__rdma_create_kernel_id);
+
+struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
+ void *context,
+ enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type)
+{
+ struct rdma_id_private *ret;
+
+ ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
+ ps, qp_type, NULL);
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
- return &id_priv->id;
+ rdma_restrack_set_name(&ret->res, NULL);
+ return &ret->id;
}
-EXPORT_SYMBOL(rdma_create_id);
+EXPORT_SYMBOL(rdma_create_user_id);
static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
{
@@ -653,26 +1124,34 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (id->device != pd->device)
- return -EINVAL;
+ if (id->device != pd->device) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ qp_init_attr->port_num = id->port_num;
qp = ib_create_qp(pd, qp_init_attr);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto out_err;
+ }
if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp);
else
ret = cma_init_conn_qp(id_priv, qp);
if (ret)
- goto err;
+ goto out_destroy;
id->qp = qp;
id_priv->qp_num = qp->qp_num;
id_priv->srq = (qp->srq != NULL);
+ trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
return 0;
-err:
+out_destroy:
ib_destroy_qp(qp);
+out_err:
+ trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
return ret;
}
EXPORT_SYMBOL(rdma_create_qp);
@@ -682,6 +1161,7 @@ void rdma_destroy_qp(struct rdma_cm_id *id)
struct rdma_id_private *id_priv;
id_priv = container_of(id, struct rdma_id_private, id);
+ trace_cm_qp_destroy(id_priv);
mutex_lock(&id_priv->qp_mutex);
ib_destroy_qp(id_priv->id.qp);
id_priv->id.qp = NULL;
@@ -694,7 +1174,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
- union ib_gid sgid;
mutex_lock(&id_priv->qp_mutex);
if (!id_priv->id.qp) {
@@ -717,19 +1196,8 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret)
goto out;
- ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
- qp_attr.ah_attr.grh.sgid_index, &sgid);
- if (ret)
- goto out;
-
BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
- if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
- ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
-
- if (ret)
- goto out;
- }
if (conn_param)
qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
@@ -802,7 +1270,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (id_priv->id.qp_type == IB_QPT_UD) {
- ret = cma_set_qkey(id_priv, 0);
+ ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
@@ -838,65 +1306,90 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
- } else
+ qp_attr->port_num = id_priv->id.port_num;
+ *qp_attr_mask |= IB_QP_PORT;
+ } else {
ret = -ENOSYS;
+ }
+
+ if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
+ qp_attr->timeout = id_priv->timeout;
+
+ if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
+ qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
return ret;
}
EXPORT_SYMBOL(rdma_init_qp_attr);
-static inline int cma_zero_addr(struct sockaddr *addr)
+static inline bool cma_zero_addr(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
case AF_INET6:
- return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
case AF_IB:
- return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
+ return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
default:
- return 0;
+ return false;
}
}
-static inline int cma_loopback_addr(struct sockaddr *addr)
+static inline bool cma_loopback_addr(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
- return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
+ return ipv4_is_loopback(
+ ((struct sockaddr_in *)addr)->sin_addr.s_addr);
case AF_INET6:
- return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ return ipv6_addr_loopback(
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
case AF_IB:
- return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
+ return ib_addr_loopback(
+ &((struct sockaddr_ib *)addr)->sib_addr);
default:
- return 0;
+ return false;
}
}
-static inline int cma_any_addr(struct sockaddr *addr)
+static inline bool cma_any_addr(const struct sockaddr *addr)
{
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
-static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
+static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
{
if (src->sa_family != dst->sa_family)
return -1;
switch (src->sa_family) {
case AF_INET:
- return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
- case AF_INET6:
- return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
+ return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
+ ((struct sockaddr_in *)dst)->sin_addr.s_addr;
+ case AF_INET6: {
+ struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
+ struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
+ bool link_local;
+
+ if (ipv6_addr_cmp(&src_addr6->sin6_addr,
+ &dst_addr6->sin6_addr))
+ return 1;
+ link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
+ IPV6_ADDR_LINKLOCAL;
+ /* Link local must match their scope_ids */
+ return link_local ? (src_addr6->sin6_scope_id !=
+ dst_addr6->sin6_scope_id) :
+ 0;
+ }
+
default:
return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
&((struct sockaddr_ib *) dst)->sib_addr);
}
}
-static __be16 cma_port(struct sockaddr *addr)
+static __be16 cma_port(const struct sockaddr *addr)
{
struct sockaddr_ib *sib;
@@ -914,15 +1407,15 @@ static __be16 cma_port(struct sockaddr *addr)
}
}
-static inline int cma_any_port(struct sockaddr *addr)
+static inline int cma_any_port(const struct sockaddr *addr)
{
return !cma_port(addr);
}
static void cma_save_ib_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct rdma_cm_id *listen_id,
- struct ib_sa_path_rec *path)
+ const struct rdma_cm_id *listen_id,
+ const struct sa_path_rec *path)
{
struct sockaddr_ib *listen_ib, *ib;
@@ -956,47 +1449,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
}
}
-static void cma_save_ip4_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip4_info(struct sockaddr_in *src_addr,
+ struct sockaddr_in *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in *ip4;
-
if (src_addr) {
- ip4 = (struct sockaddr_in *)src_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = local_port;
+ *src_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
+ .sin_port = local_port,
+ };
}
if (dst_addr) {
- ip4 = (struct sockaddr_in *)dst_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
- ip4->sin_port = hdr->port;
+ *dst_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->src_addr.ip4.addr,
+ .sin_port = hdr->port,
+ };
}
}
-static void cma_save_ip6_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in6 *ip6;
-
if (src_addr) {
- ip6 = (struct sockaddr_in6 *)src_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = local_port;
+ *src_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->dst_addr.ip6,
+ .sin6_port = local_port,
+ };
}
if (dst_addr) {
- ip6 = (struct sockaddr_in6 *)dst_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->src_addr.ip6;
- ip6->sin6_port = hdr->port;
+ *dst_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->src_addr.ip6,
+ .sin6_port = hdr->port,
+ };
}
}
@@ -1007,7 +1500,7 @@ static u16 cma_port_from_service_id(__be64 service_id)
static int cma_save_ip_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct ib_cm_event *ib_event,
+ const struct ib_cm_event *ib_event,
__be64 service_id)
{
struct cma_hdr *hdr;
@@ -1021,10 +1514,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
switch (cma_get_ip_ver(hdr)) {
case 4:
- cma_save_ip4_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip4_info((struct sockaddr_in *)src_addr,
+ (struct sockaddr_in *)dst_addr, hdr, port);
break;
case 6:
- cma_save_ip6_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
+ (struct sockaddr_in6 *)dst_addr, hdr, port);
break;
default:
return -EAFNOSUPPORT;
@@ -1035,8 +1530,8 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
static int cma_save_net_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
+ const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
sa_family_t sa_family, __be64 service_id)
{
if (sa_family == AF_IB) {
@@ -1066,15 +1561,23 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
memcpy(&req->local_gid, &req_param->primary_path->sgid,
sizeof(req->local_gid));
req->has_gid = true;
- req->service_id = req_param->primary_path->service_id;
- req->pkey = req_param->bth_pkey;
+ req->service_id = req_param->primary_path->service_id;
+ req->pkey = be16_to_cpu(req_param->primary_path->pkey);
+ if (req->pkey != req_param->bth_pkey)
+ pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
+ "RDMA CMA: in the future this may cause the request to be dropped\n",
+ req_param->bth_pkey, req->pkey);
break;
case IB_CM_SIDR_REQ_RECEIVED:
req->device = sidr_param->listen_id->device;
req->port = sidr_param->port;
req->has_gid = false;
req->service_id = sidr_param->service_id;
- req->pkey = sidr_param->bth_pkey;
+ req->pkey = sidr_param->pkey;
+ if (req->pkey != sidr_param->bth_pkey)
+ pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
+ "RDMA CMA: in the future this may cause the request to be dropped\n",
+ sidr_param->bth_pkey, req->pkey);
break;
default:
return -EINVAL;
@@ -1101,16 +1604,13 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
return false;
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_iif = net_dev->ifindex;
+ fl4.flowi4_oif = net_dev->ifindex;
fl4.daddr = daddr;
fl4.saddr = saddr;
rcu_read_lock();
err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
- if (err)
- return false;
-
- ret = FIB_RES_DEV(res) == net_dev;
+ ret = err == 0 && FIB_RES_DEV(res) == net_dev;
rcu_read_unlock();
return ret;
@@ -1125,7 +1625,7 @@ static bool validate_ipv6_net_dev(struct net_device *net_dev,
IPV6_ADDR_LINKLOCAL;
struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
&src_addr->sin6_addr, net_dev->ifindex,
- strict);
+ NULL, strict);
bool ret;
if (!rt)
@@ -1163,12 +1663,36 @@ static bool validate_net_dev(struct net_device *net_dev,
}
}
-static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
- const struct cma_req_info *req)
+static struct net_device *
+roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
{
- struct sockaddr_storage listen_addr_storage, src_addr_storage;
- struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
- *src_addr = (struct sockaddr *)&src_addr_storage;
+ const struct ib_gid_attr *sgid_attr = NULL;
+ struct net_device *ndev;
+
+ if (ib_event->event == IB_CM_REQ_RECEIVED)
+ sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
+ else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
+ sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
+
+ if (!sgid_attr)
+ return NULL;
+
+ rcu_read_lock();
+ ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
+ if (IS_ERR(ndev))
+ ndev = NULL;
+ else
+ dev_hold(ndev);
+ rcu_read_unlock();
+ return ndev;
+}
+
+static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
+ struct cma_req_info *req)
+{
+ struct sockaddr *listen_addr =
+ (struct sockaddr *)&req->listen_addr_storage;
+ struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
struct net_device *net_dev;
const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
int err;
@@ -1178,20 +1702,19 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
if (err)
return ERR_PTR(err);
- net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey,
- gid, listen_addr);
+ if (rdma_protocol_roce(req->device, req->port))
+ net_dev = roce_get_net_dev_by_cm_event(ib_event);
+ else
+ net_dev = ib_get_net_dev_by_params(req->device, req->port,
+ req->pkey,
+ gid, listen_addr);
if (!net_dev)
return ERR_PTR(-ENODEV);
- if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
- dev_put(net_dev);
- return ERR_PTR(-EHOSTUNREACH);
- }
-
return net_dev;
}
-static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id)
+static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
{
return (be64_to_cpu(service_id) >> 16) & 0xffff;
}
@@ -1232,18 +1755,52 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,
return true;
}
-static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
- const struct net_device *net_dev)
+static bool cma_protocol_roce(const struct rdma_cm_id *id)
+{
+ struct ib_device *device = id->device;
+ const u32 port_num = id->port_num ?: rdma_start_port(device);
+
+ return rdma_protocol_roce(device, port_num);
+}
+
+static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
+{
+ const struct sockaddr *daddr =
+ (const struct sockaddr *)&req->listen_addr_storage;
+ const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
+
+ /* Returns true if the req is for IPv6 link local */
+ return (daddr->sa_family == AF_INET6 &&
+ (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
+}
+
+static bool cma_match_net_dev(const struct rdma_cm_id *id,
+ const struct net_device *net_dev,
+ const struct cma_req_info *req)
{
- const struct rdma_addr *addr = &id_priv->id.route.addr;
+ const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
/* This request is an AF_IB request */
- return addr->src_addr.ss_family == AF_IB;
+ return (!id->port_num || id->port_num == req->port) &&
+ (addr->src_addr.ss_family == AF_IB);
- return !addr->dev_addr.bound_dev_if ||
- (net_eq(dev_net(net_dev), &init_net) &&
- addr->dev_addr.bound_dev_if == net_dev->ifindex);
+ /*
+ * If the request is not for IPv6 link local, allow matching
+ * request to any netdevice of the one or multiport rdma device.
+ */
+ if (!cma_is_req_ipv6_ll(req))
+ return true;
+ /*
+ * Net namespaces must match, and if the listner is listening
+ * on a specific netdevice than netdevice must match as well.
+ */
+ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
+ (!!addr->dev_addr.bound_dev_if ==
+ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
+ return true;
+ else
+ return false;
}
static struct rdma_id_private *cma_find_listener(
@@ -1255,19 +1812,22 @@ static struct rdma_id_private *cma_find_listener(
{
struct rdma_id_private *id_priv, *id_priv_dev;
+ lockdep_assert_held(&lock);
+
if (!bind_list)
return ERR_PTR(-EINVAL);
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device &&
- cma_match_net_dev(id_priv, net_dev))
+ cma_match_net_dev(&id_priv->id, net_dev, req))
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
- listen_list) {
+ listen_item) {
if (id_priv_dev->id.device == cm_id->device &&
- cma_match_net_dev(id_priv_dev, net_dev))
+ cma_match_net_dev(&id_priv_dev->id,
+ net_dev, req))
return id_priv_dev;
}
}
@@ -1276,20 +1836,21 @@ static struct rdma_id_private *cma_find_listener(
return ERR_PTR(-EINVAL);
}
-static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
- struct ib_cm_event *ib_event,
- struct net_device **net_dev)
+static struct rdma_id_private *
+cma_ib_id_from_event(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event,
+ struct cma_req_info *req,
+ struct net_device **net_dev)
{
- struct cma_req_info req;
struct rdma_bind_list *bind_list;
struct rdma_id_private *id_priv;
int err;
- err = cma_save_req_info(ib_event, &req);
+ err = cma_save_req_info(ib_event, req);
if (err)
return ERR_PTR(err);
- *net_dev = cma_get_net_dev(ib_event, &req);
+ *net_dev = cma_get_net_dev(ib_event, req);
if (IS_ERR(*net_dev)) {
if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
/* Assuming the protocol is AF_IB */
@@ -1299,18 +1860,57 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
}
}
- bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
- cma_port_from_service_id(req.service_id));
- id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
- if (IS_ERR(id_priv)) {
+ mutex_lock(&lock);
+ /*
+ * Net namespace might be getting deleted while route lookup,
+ * cm_id lookup is in progress. Therefore, perform netdevice
+ * validation, cm_id lookup under rcu lock.
+ * RCU lock along with netdevice state check, synchronizes with
+ * netdevice migrating to different net namespace and also avoids
+ * case where net namespace doesn't get deleted while lookup is in
+ * progress.
+ * If the device state is not IFF_UP, its properties such as ifindex
+ * and nd_net cannot be trusted to remain valid without rcu lock.
+ * net/core/dev.c change_net_namespace() ensures to synchronize with
+ * ongoing operations on net device after device is closed using
+ * synchronize_net().
+ */
+ rcu_read_lock();
+ if (*net_dev) {
+ /*
+ * If netdevice is down, it is likely that it is administratively
+ * down or it might be migrating to different namespace.
+ * In that case avoid further processing, as the net namespace
+ * or ifindex may change.
+ */
+ if (((*net_dev)->flags & IFF_UP) == 0) {
+ id_priv = ERR_PTR(-EHOSTUNREACH);
+ goto err;
+ }
+
+ if (!validate_net_dev(*net_dev,
+ (struct sockaddr *)&req->src_addr_storage,
+ (struct sockaddr *)&req->listen_addr_storage)) {
+ id_priv = ERR_PTR(-EHOSTUNREACH);
+ goto err;
+ }
+ }
+
+ bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
+ rdma_ps_from_service_id(req->service_id),
+ cma_port_from_service_id(req->service_id));
+ id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
+err:
+ rcu_read_unlock();
+ mutex_unlock(&lock);
+ if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
}
-
return id_priv;
}
-static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
+static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
{
return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
}
@@ -1323,28 +1923,36 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
}
}
-static void cma_cancel_listens(struct rdma_id_private *id_priv)
+static void _cma_cancel_listens(struct rdma_id_private *id_priv)
{
struct rdma_id_private *dev_id_priv;
+ lockdep_assert_held(&lock);
+
/*
* Remove from listen_any_list to prevent added devices from spawning
* additional listen requests.
*/
- mutex_lock(&lock);
- list_del(&id_priv->list);
+ list_del_init(&id_priv->listen_any_item);
while (!list_empty(&id_priv->listen_list)) {
- dev_id_priv = list_entry(id_priv->listen_list.next,
- struct rdma_id_private, listen_list);
+ dev_id_priv =
+ list_first_entry(&id_priv->listen_list,
+ struct rdma_id_private, listen_item);
/* sync with device removal to avoid duplicate destruction */
- list_del_init(&dev_id_priv->list);
- list_del(&dev_id_priv->listen_list);
+ list_del_init(&dev_id_priv->device_item);
+ list_del_init(&dev_id_priv->listen_item);
mutex_unlock(&lock);
rdma_destroy_id(&dev_id_priv->id);
mutex_lock(&lock);
}
+}
+
+static void cma_cancel_listens(struct rdma_id_private *id_priv)
+{
+ mutex_lock(&lock);
+ _cma_cancel_listens(id_priv);
mutex_unlock(&lock);
}
@@ -1353,6 +1961,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
{
switch (state) {
case RDMA_CM_ADDR_QUERY:
+ /*
+ * We can avoid doing the rdma_addr_cancel() based on state,
+ * only RDMA_CM_ADDR_QUERY has a work that could still execute.
+ * Notice that the addr_handler work could still be exiting
+ * outside this state, however due to the interaction with the
+ * handler_mutex the work is guaranteed not to touch id_priv
+ * during exit.
+ */
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
break;
case RDMA_CM_ROUTE_QUERY:
@@ -1370,6 +1986,7 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
static void cma_release_port(struct rdma_id_private *id_priv)
{
struct rdma_bind_list *bind_list = id_priv->bind_list;
+ struct net *net = id_priv->id.route.addr.dev_addr.net;
if (!bind_list)
return;
@@ -1377,45 +1994,66 @@ static void cma_release_port(struct rdma_id_private *id_priv)
mutex_lock(&lock);
hlist_del(&id_priv->node);
if (hlist_empty(&bind_list->owners)) {
- cma_ps_remove(bind_list->ps, bind_list->port);
+ cma_ps_remove(net, bind_list->ps, bind_list->port);
kfree(bind_list);
}
mutex_unlock(&lock);
}
+static void destroy_mc(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
+{
+ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
+
+ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
+ ib_sa_free_multicast(mc->sa_mc);
+
+ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
+ struct rdma_dev_addr *dev_addr =
+ &id_priv->id.route.addr.dev_addr;
+ struct net_device *ndev = NULL;
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(dev_addr->net,
+ dev_addr->bound_dev_if);
+ if (ndev && !send_only) {
+ enum ib_gid_type gid_type;
+ union ib_gid mgid;
+
+ gid_type = id_priv->cma_dev->default_gid_type
+ [id_priv->id.port_num -
+ rdma_start_port(
+ id_priv->cma_dev->device)];
+ cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
+ gid_type);
+ cma_igmp_send(ndev, &mgid, false);
+ }
+ dev_put(ndev);
+
+ cancel_work_sync(&mc->iboe_join.work);
+ }
+ kfree(mc);
+}
+
static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
{
struct cma_multicast *mc;
while (!list_empty(&id_priv->mc_list)) {
- mc = container_of(id_priv->mc_list.next,
- struct cma_multicast, list);
+ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
+ list);
list_del(&mc->list);
- if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
- id_priv->id.port_num)) {
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
- } else
- kref_put(&mc->mcref, release_mc);
+ destroy_mc(id_priv, mc);
}
}
-void rdma_destroy_id(struct rdma_cm_id *id)
+static void _destroy_id(struct rdma_id_private *id_priv,
+ enum rdma_cm_state state)
{
- struct rdma_id_private *id_priv;
- enum rdma_cm_state state;
-
- id_priv = container_of(id, struct rdma_id_private, id);
- state = cma_exch(id_priv, RDMA_CM_DESTROYING);
cma_cancel_operation(id_priv, state);
- /*
- * Wait for any active callback to finish. New callbacks will find
- * the id_priv state set to destroying and abort.
- */
- mutex_lock(&id_priv->handler_mutex);
- mutex_unlock(&id_priv->handler_mutex);
-
+ rdma_restrack_del(&id_priv->res);
+ cma_remove_id_from_tree(id_priv);
if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
@@ -1429,15 +2067,56 @@ void rdma_destroy_id(struct rdma_cm_id *id)
}
cma_release_port(id_priv);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
wait_for_completion(&id_priv->comp);
if (id_priv->internal_id)
- cma_deref_id(id_priv->id.context);
+ cma_id_put(id_priv->id.context);
kfree(id_priv->id.route.path_rec);
+ kfree(id_priv->id.route.path_rec_inbound);
+ kfree(id_priv->id.route.path_rec_outbound);
+ kfree(id_priv->id.route.service_recs);
+
+ put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
}
+
+/*
+ * destroy an ID from within the handler_mutex. This ensures that no other
+ * handlers can start running concurrently.
+ */
+static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
+ __releases(&idprv->handler_mutex)
+{
+ enum rdma_cm_state state;
+ unsigned long flags;
+
+ trace_cm_id_destroy(id_priv);
+
+ /*
+ * Setting the state to destroyed under the handler mutex provides a
+ * fence against calling handler callbacks. If this is invoked due to
+ * the failure of a handler callback then it guarentees that no future
+ * handlers will be called.
+ */
+ lockdep_assert_held(&id_priv->handler_mutex);
+ spin_lock_irqsave(&id_priv->lock, flags);
+ state = id_priv->state;
+ id_priv->state = RDMA_CM_DESTROYING;
+ spin_unlock_irqrestore(&id_priv->lock, flags);
+ mutex_unlock(&id_priv->handler_mutex);
+ _destroy_id(id_priv, state);
+}
+
+void rdma_destroy_id(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ mutex_lock(&id_priv->handler_mutex);
+ destroy_id_handler_unlock(id_priv);
+}
EXPORT_SYMBOL(rdma_destroy_id);
static int cma_rep_recv(struct rdma_id_private *id_priv)
@@ -1452,20 +2131,23 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
if (ret)
goto reject;
+ trace_cm_send_rtu(id_priv);
ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
if (ret)
goto reject;
return 0;
reject:
+ pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
cma_modify_qp_err(id_priv);
+ trace_cm_send_rej(id_priv);
ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
return ret;
}
static void cma_set_rep_event_data(struct rdma_cm_event *event,
- struct ib_cm_rep_event_param *rep_data,
+ const struct ib_cm_rep_event_param *rep_data,
void *private_data)
{
event->param.conn.private_data = private_data;
@@ -1476,21 +2158,40 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
event->param.conn.srq = rep_data->srq;
event->param.conn.qp_num = rep_data->remote_qpn;
+
+ event->ece.vendor_id = rep_data->ece.vendor_id;
+ event->ece.attr_mod = rep_data->ece.attr_mod;
}
-static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
+static int cma_cm_event_handler(struct rdma_id_private *id_priv,
+ struct rdma_cm_event *event)
+{
+ int ret;
+
+ lockdep_assert_held(&id_priv->handler_mutex);
+
+ trace_cm_event_handler(id_priv, event);
+ ret = id_priv->id.event_handler(&id_priv->id, event);
+ trace_cm_event_done(id_priv, event, ret);
+ return ret;
+}
+
+static int cma_ib_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- struct rdma_cm_event event;
- int ret = 0;
+ struct rdma_cm_event event = {};
+ enum rdma_cm_state state;
+ int ret;
+ mutex_lock(&id_priv->handler_mutex);
+ state = READ_ONCE(id_priv->state);
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
+ state != RDMA_CM_CONNECT) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
- return 0;
+ state != RDMA_CM_DISCONNECT))
+ goto out;
- memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_REQ_ERROR:
case IB_CM_REP_ERROR:
@@ -1498,6 +2199,11 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.status = -ETIMEDOUT;
break;
case IB_CM_REP_RECEIVED:
+ if (state == RDMA_CM_CONNECT &&
+ (id_priv->id.qp_type != IB_QPT_UD)) {
+ trace_cm_prepare_mra(id_priv);
+ ib_prepare_cm_mra(cm_id);
+ }
if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
@@ -1513,7 +2219,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
case IB_CM_DREQ_ERROR:
- event.status = -ETIMEDOUT; /* fall through */
+ event.status = -ETIMEDOUT;
+ fallthrough;
case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED:
if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
@@ -1528,6 +2235,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
/* ignore event */
goto out;
case IB_CM_REJ_RECEIVED:
+ pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
+ ib_event->param.rej_rcvd.reason));
cma_modify_qp_err(id_priv);
event.status = ib_event->param.rej_rcvd.reason;
event.event = RDMA_CM_EVENT_REJECTED;
@@ -1535,69 +2244,77 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
- cma_exch(id_priv, RDMA_CM_DESTROYING);
- mutex_unlock(&id_priv->handler_mutex);
- rdma_destroy_id(&id_priv->id);
+ destroy_id_handler_unlock(id_priv);
return ret;
}
out:
mutex_unlock(&id_priv->handler_mutex);
- return ret;
+ return 0;
}
-static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
- struct net_device *net_dev)
+static struct rdma_id_private *
+cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
+ struct rdma_id_private *listen_id_priv;
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
struct rdma_route *rt;
const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
+ struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
const __be64 service_id =
- ib_event->param.req_rcvd.primary_path->service_id;
+ ib_event->param.req_rcvd.primary_path->service_id;
int ret;
- id = rdma_create_id(listen_id->event_handler, listen_id->context,
- listen_id->ps, ib_event->param.req_rcvd.qp_type);
- if (IS_ERR(id))
+ listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
+ id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
+ listen_id->event_handler, listen_id->context,
+ listen_id->ps,
+ ib_event->param.req_rcvd.qp_type,
+ listen_id_priv);
+ if (IS_ERR(id_priv))
return NULL;
- id_priv = container_of(id, struct rdma_id_private, id);
+ id = &id_priv->id;
if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
(struct sockaddr *)&id->route.addr.dst_addr,
listen_id, ib_event, ss_family, service_id))
goto err;
rt = &id->route;
- rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
- rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
- GFP_KERNEL);
+ rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
+ rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
+ sizeof(*rt->path_rec), GFP_KERNEL);
if (!rt->path_rec)
goto err;
- rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
- if (rt->num_paths == 2)
+ rt->path_rec[0] = *path;
+ if (rt->num_pri_alt_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
- ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
- if (ret)
- goto err;
+ rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
} else {
- /* An AF_IB connection */
- WARN_ON_ONCE(ss_family != AF_IB);
-
- cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv),
- &rt->addr.dev_addr);
+ if (!cma_protocol_roce(listen_id) &&
+ cma_any_addr(cma_src_addr(id_priv))) {
+ rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
+ rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
+ ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
+ } else if (!cma_any_addr(cma_src_addr(id_priv))) {
+ ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
+ if (ret)
+ goto err;
+ }
}
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
@@ -1609,21 +2326,26 @@ err:
return NULL;
}
-static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
- struct net_device *net_dev)
+static struct rdma_id_private *
+cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
+ const struct rdma_id_private *listen_id_priv;
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
+ struct net *net = listen_id->route.addr.dev_addr.net;
int ret;
- id = rdma_create_id(listen_id->event_handler, listen_id->context,
- listen_id->ps, IB_QPT_UD);
- if (IS_ERR(id))
+ listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
+ id_priv = __rdma_create_id(net, listen_id->event_handler,
+ listen_id->context, listen_id->ps, IB_QPT_UD,
+ listen_id_priv);
+ if (IS_ERR(id_priv))
return NULL;
- id_priv = container_of(id, struct rdma_id_private, id);
+ id = &id_priv->id;
if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
(struct sockaddr *)&id->route.addr.dst_addr,
listen_id, ib_event, ss_family,
@@ -1631,17 +2353,14 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
goto err;
if (net_dev) {
- ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
- if (ret)
- goto err;
+ rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
} else {
- /* An AF_IB connection */
- WARN_ON_ONCE(ss_family != AF_IB);
-
- if (!cma_any_addr(cma_src_addr(id_priv)))
- cma_translate_ib((struct sockaddr_ib *)
- cma_src_addr(id_priv),
- &id->route.addr.dev_addr);
+ if (!cma_any_addr(cma_src_addr(id_priv))) {
+ ret = cma_translate_addr(cma_src_addr(id_priv),
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto err;
+ }
}
id_priv->state = RDMA_CM_CONNECT;
@@ -1652,7 +2371,7 @@ err:
}
static void cma_set_req_event_data(struct rdma_cm_event *event,
- struct ib_cm_req_event_param *req_data,
+ const struct ib_cm_req_event_param *req_data,
void *private_data, int offset)
{
event->param.conn.private_data = private_data + offset;
@@ -1664,9 +2383,13 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
event->param.conn.srq = req_data->srq;
event->param.conn.qp_num = req_data->remote_qpn;
+
+ event->ece.vendor_id = req_data->ece.vendor_id;
+ event->ece.attr_mod = req_data->ece.attr_mod;
}
-static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
+static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
+ const struct ib_cm_event *ib_event)
{
return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
(ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
@@ -1675,93 +2398,81 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
(!id->qp_type));
}
-static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
+static int cma_ib_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event)
{
- struct rdma_id_private *listen_id, *conn_id;
- struct rdma_cm_event event;
+ struct rdma_id_private *listen_id, *conn_id = NULL;
+ struct rdma_cm_event event = {};
+ struct cma_req_info req = {};
struct net_device *net_dev;
- int offset, ret;
+ u8 offset;
+ int ret;
- listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
+ listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
- if (!cma_check_req_qp_type(&listen_id->id, ib_event)) {
+ trace_cm_req_handler(listen_id, ib_event->event);
+ if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
ret = -EINVAL;
goto net_dev_put;
}
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+ mutex_lock(&listen_id->handler_mutex);
+ if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
ret = -ECONNABORTED;
- goto net_dev_put;
+ goto err_unlock;
}
- memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
- conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
+ conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
- conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
+ conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
}
if (!conn_id) {
ret = -ENOMEM;
- goto err1;
+ goto err_unlock;
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- ret = cma_acquire_dev(conn_id, listen_id);
- if (ret)
- goto err2;
+ ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
+ if (ret) {
+ destroy_id_handler_unlock(conn_id);
+ goto err_unlock;
+ }
conn_id->cm_id.ib = cm_id;
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
- /*
- * Protect against the user destroying conn_id from another thread
- * until we're done accessing it.
- */
- atomic_inc(&conn_id->refcount);
- ret = conn_id->id.event_handler(&conn_id->id, &event);
- if (ret)
- goto err3;
- /*
- * Acquire mutex to prevent user executing rdma_destroy_id()
- * while we're accessing the cm_id.
- */
- mutex_lock(&lock);
- if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
- (conn_id->id.qp_type != IB_QPT_UD))
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
- mutex_unlock(&lock);
- mutex_unlock(&conn_id->handler_mutex);
- mutex_unlock(&listen_id->handler_mutex);
- cma_deref_id(conn_id);
- if (net_dev)
- dev_put(net_dev);
- return 0;
+ ret = cma_cm_event_handler(conn_id, &event);
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ conn_id->cm_id.ib = NULL;
+ mutex_unlock(&listen_id->handler_mutex);
+ destroy_id_handler_unlock(conn_id);
+ goto net_dev_put;
+ }
-err3:
- cma_deref_id(conn_id);
- /* Destroy the CM ID by returning a non-zero value. */
- conn_id->cm_id.ib = NULL;
-err2:
- cma_exch(conn_id, RDMA_CM_DESTROYING);
+ if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
+ conn_id->id.qp_type != IB_QPT_UD) {
+ trace_cm_prepare_mra(cm_id->context);
+ ib_prepare_cm_mra(cm_id);
+ }
mutex_unlock(&conn_id->handler_mutex);
-err1:
+
+err_unlock:
mutex_unlock(&listen_id->handler_mutex);
- if (conn_id)
- rdma_destroy_id(&conn_id->id);
net_dev_put:
- if (net_dev)
- dev_put(net_dev);
+ dev_put(net_dev);
return ret;
}
@@ -1775,18 +2486,45 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_get_service_id);
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ struct rdma_addr *addr = &cm_id->route.addr;
+
+ if (!cm_id->device) {
+ if (sgid)
+ memset(sgid, 0, sizeof(*sgid));
+ if (dgid)
+ memset(dgid, 0, sizeof(*dgid));
+ return;
+ }
+
+ if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
+ if (sgid)
+ rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
+ if (dgid)
+ rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
+ } else {
+ if (sgid)
+ rdma_addr_get_sgid(&addr->dev_addr, sgid);
+ if (dgid)
+ rdma_addr_get_dgid(&addr->dev_addr, dgid);
+ }
+}
+EXPORT_SYMBOL(rdma_read_gids);
+
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = 0;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
+ goto out;
- memset(&event, 0, sizeof event);
switch (iw_event->event) {
case IW_CM_EVENT_CLOSE:
event.event = RDMA_CM_EVENT_DISCONNECTED;
@@ -1820,22 +2558,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
event.param.conn.responder_resources = iw_event->ord;
break;
default:
- BUG_ON(1);
+ goto out;
}
event.status = iw_event->status;
event.param.conn.private_data = iw_event->private_data;
event.param.conn.private_data_len = iw_event->private_data_len;
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL;
- cma_exch(id_priv, RDMA_CM_DESTROYING);
- mutex_unlock(&id_priv->handler_mutex);
- rdma_destroy_id(&id_priv->id);
+ destroy_id_handler_unlock(id_priv);
return ret;
}
+out:
mutex_unlock(&id_priv->handler_mutex);
return ret;
}
@@ -1843,42 +2580,48 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct iw_cm_event *iw_event)
{
- struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
- struct rdma_cm_event event;
- int ret;
- struct ib_device_attr attr;
+ struct rdma_cm_event event = {};
+ int ret = -ECONNABORTED;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ event.param.conn.private_data = iw_event->private_data;
+ event.param.conn.private_data_len = iw_event->private_data_len;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
+
listen_id = cm_id->context;
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
- return -ECONNABORTED;
+
+ mutex_lock(&listen_id->handler_mutex);
+ if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
+ goto out;
/* Create a new RDMA id for the new IW CM ID */
- new_cm_id = rdma_create_id(listen_id->id.event_handler,
- listen_id->id.context,
- RDMA_PS_TCP, IB_QPT_RC);
- if (IS_ERR(new_cm_id)) {
+ conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
+ listen_id->id.event_handler,
+ listen_id->id.context, RDMA_PS_TCP,
+ IB_QPT_RC, listen_id);
+ if (IS_ERR(conn_id)) {
ret = -ENOMEM;
goto out;
}
- conn_id = container_of(new_cm_id, struct rdma_id_private, id);
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
if (ret) {
- mutex_unlock(&conn_id->handler_mutex);
- rdma_destroy_id(new_cm_id);
- goto out;
+ mutex_unlock(&listen_id->handler_mutex);
+ destroy_id_handler_unlock(conn_id);
+ return ret;
}
- ret = cma_acquire_dev(conn_id, listen_id);
+ ret = cma_iw_acquire_dev(conn_id, listen_id);
if (ret) {
- mutex_unlock(&conn_id->handler_mutex);
- rdma_destroy_id(new_cm_id);
- goto out;
+ mutex_unlock(&listen_id->handler_mutex);
+ destroy_id_handler_unlock(conn_id);
+ return ret;
}
conn_id->cm_id.iw = cm_id;
@@ -1888,38 +2631,16 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
- ret = ib_query_device(conn_id->id.device, &attr);
- if (ret) {
- mutex_unlock(&conn_id->handler_mutex);
- rdma_destroy_id(new_cm_id);
- goto out;
- }
-
- memset(&event, 0, sizeof event);
- event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
- event.param.conn.private_data = iw_event->private_data;
- event.param.conn.private_data_len = iw_event->private_data_len;
- event.param.conn.initiator_depth = iw_event->ird;
- event.param.conn.responder_resources = iw_event->ord;
-
- /*
- * Protect against the user destroying conn_id from another thread
- * until we're done accessing it.
- */
- atomic_inc(&conn_id->refcount);
- ret = conn_id->id.event_handler(&conn_id->id, &event);
+ ret = cma_cm_event_handler(conn_id, &event);
if (ret) {
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
- cma_exch(conn_id, RDMA_CM_DESTROYING);
- mutex_unlock(&conn_id->handler_mutex);
- cma_deref_id(conn_id);
- rdma_destroy_id(&conn_id->id);
- goto out;
+ mutex_unlock(&listen_id->handler_mutex);
+ destroy_id_handler_unlock(conn_id);
+ return ret;
}
mutex_unlock(&conn_id->handler_mutex);
- cma_deref_id(conn_id);
out:
mutex_unlock(&listen_id->handler_mutex);
@@ -1934,7 +2655,8 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
addr = cma_src_addr(id_priv);
svc_id = rdma_get_service_id(&id_priv->id, addr);
- id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id);
+ id = ib_cm_insert_listen(id_priv->id.device,
+ cma_ib_req_handler, svc_id);
if (IS_ERR(id))
return PTR_ERR(id);
id_priv->cm_id.ib = id;
@@ -1953,7 +2675,11 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
if (IS_ERR(id))
return PTR_ERR(id);
+ mutex_lock(&id_priv->qp_mutex);
id->tos = id_priv->tos;
+ id->tos_set = id_priv->tos_set;
+ mutex_unlock(&id_priv->qp_mutex);
+ id->afonly = id_priv->afonly;
id_priv->cm_id.iw = id;
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
@@ -1974,53 +2700,88 @@ static int cma_listen_handler(struct rdma_cm_id *id,
{
struct rdma_id_private *id_priv = id->context;
+ /* Listening IDs are always destroyed on removal */
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
+ return -1;
+
id->context = id_priv->id.context;
id->event_handler = id_priv->id.event_handler;
+ trace_cm_event_handler(id_priv, event);
return id_priv->id.event_handler(id, event);
}
-static void cma_listen_on_dev(struct rdma_id_private *id_priv,
- struct cma_device *cma_dev)
+static int cma_listen_on_dev(struct rdma_id_private *id_priv,
+ struct cma_device *cma_dev,
+ struct rdma_id_private **to_destroy)
{
struct rdma_id_private *dev_id_priv;
- struct rdma_cm_id *id;
+ struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;
- if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
- return;
+ lockdep_assert_held(&lock);
- id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
- id_priv->id.qp_type);
- if (IS_ERR(id))
- return;
+ *to_destroy = NULL;
+ if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
+ return 0;
- dev_id_priv = container_of(id, struct rdma_id_private, id);
+ dev_id_priv =
+ __rdma_create_id(net, cma_listen_handler, id_priv,
+ id_priv->id.ps, id_priv->id.qp_type, id_priv);
+ if (IS_ERR(dev_id_priv))
+ return PTR_ERR(dev_id_priv);
dev_id_priv->state = RDMA_CM_ADDR_BOUND;
memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
rdma_addr_size(cma_src_addr(id_priv)));
- cma_attach_to_dev(dev_id_priv, cma_dev);
- list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
- atomic_inc(&id_priv->refcount);
+ _cma_attach_to_dev(dev_id_priv, cma_dev);
+ rdma_restrack_add(&dev_id_priv->res);
+ cma_id_get(id_priv);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
+ mutex_lock(&id_priv->qp_mutex);
+ dev_id_priv->tos_set = id_priv->tos_set;
+ dev_id_priv->tos = id_priv->tos;
+ mutex_unlock(&id_priv->qp_mutex);
- ret = rdma_listen(id, id_priv->backlog);
+ ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
if (ret)
- printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
- "listening on device %s\n", ret, cma_dev->device->name);
+ goto err_listen;
+ list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
+ return 0;
+err_listen:
+ /* Caller must destroy this after releasing lock */
+ *to_destroy = dev_id_priv;
+ dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
+ return ret;
}
-static void cma_listen_on_all(struct rdma_id_private *id_priv)
+static int cma_listen_on_all(struct rdma_id_private *id_priv)
{
+ struct rdma_id_private *to_destroy;
struct cma_device *cma_dev;
+ int ret;
mutex_lock(&lock);
- list_add_tail(&id_priv->list, &listen_any_list);
- list_for_each_entry(cma_dev, &dev_list, list)
- cma_listen_on_dev(id_priv, cma_dev);
+ list_add_tail(&id_priv->listen_any_item, &listen_any_list);
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
+ if (ret) {
+ /* Prevent racing with cma_process_remove() */
+ if (to_destroy)
+ list_del_init(&to_destroy->device_item);
+ goto err_listen;
+ }
+ }
mutex_unlock(&lock);
+ return 0;
+
+err_listen:
+ _cma_cancel_listens(id_priv);
+ mutex_unlock(&lock);
+ if (to_destroy)
+ rdma_destroy_id(&to_destroy->id);
+ return ret;
}
void rdma_set_service_type(struct rdma_cm_id *id, int tos)
@@ -2028,47 +2789,179 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
struct rdma_id_private *id_priv;
id_priv = container_of(id, struct rdma_id_private, id);
+ mutex_lock(&id_priv->qp_mutex);
id_priv->tos = (u8) tos;
+ id_priv->tos_set = true;
+ mutex_unlock(&id_priv->qp_mutex);
}
EXPORT_SYMBOL(rdma_set_service_type);
-static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
- void *context)
+/**
+ * rdma_set_ack_timeout() - Set the ack timeout of QP associated
+ * with a connection identifier.
+ * @id: Communication identifier to associated with service type.
+ * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
+ *
+ * This function should be called before rdma_connect() on active side,
+ * and on passive side before rdma_accept(). It is applicable to primary
+ * path only. The timeout will affect the local side of the QP, it is not
+ * negotiated with remote side and zero disables the timer. In case it is
+ * set before rdma_resolve_route, the value will also be used to determine
+ * PacketLifeTime for RoCE.
+ *
+ * Return: 0 for success
+ */
+int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
+{
+ struct rdma_id_private *id_priv;
+
+ if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
+ return -EINVAL;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ mutex_lock(&id_priv->qp_mutex);
+ id_priv->timeout = timeout;
+ id_priv->timeout_set = true;
+ mutex_unlock(&id_priv->qp_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_set_ack_timeout);
+
+/**
+ * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
+ * QP associated with a connection identifier.
+ * @id: Communication identifier to associated with service type.
+ * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
+ * Timer Field" in the IBTA specification.
+ *
+ * This function should be called before rdma_connect() on active
+ * side, and on passive side before rdma_accept(). The timer value
+ * will be associated with the local QP. When it receives a send it is
+ * not read to handle, typically if the receive queue is empty, an RNR
+ * Retry NAK is returned to the requester with the min_rnr_timer
+ * encoded. The requester will then wait at least the time specified
+ * in the NAK before retrying. The default is zero, which translates
+ * to a minimum RNR Timer value of 655 ms.
+ *
+ * Return: 0 for success
+ */
+int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
+{
+ struct rdma_id_private *id_priv;
+
+ /* It is a five-bit value */
+ if (min_rnr_timer & 0xe0)
+ return -EINVAL;
+
+ if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
+ return -EINVAL;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ mutex_lock(&id_priv->qp_mutex);
+ id_priv->min_rnr_timer = min_rnr_timer;
+ id_priv->min_rnr_timer_set = true;
+ mutex_unlock(&id_priv->qp_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_set_min_rnr_timer);
+
+static int route_set_path_rec_inbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
+{
+ struct rdma_route *route = &work->id->id.route;
+
+ if (!route->path_rec_inbound) {
+ route->path_rec_inbound =
+ kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
+ if (!route->path_rec_inbound)
+ return -ENOMEM;
+ }
+
+ *route->path_rec_inbound = *path_rec;
+ return 0;
+}
+
+static int route_set_path_rec_outbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
+{
+ struct rdma_route *route = &work->id->id.route;
+
+ if (!route->path_rec_outbound) {
+ route->path_rec_outbound =
+ kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
+ if (!route->path_rec_outbound)
+ return -ENOMEM;
+ }
+
+ *route->path_rec_outbound = *path_rec;
+ return 0;
+}
+
+static void cma_query_handler(int status, struct sa_path_rec *path_rec,
+ unsigned int num_prs, void *context)
{
struct cma_work *work = context;
struct rdma_route *route;
+ int i;
route = &work->id->id.route;
- if (!status) {
- route->num_paths = 1;
- *route->path_rec = *path_rec;
- } else {
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
- work->event.status = status;
+ if (status)
+ goto fail;
+
+ for (i = 0; i < num_prs; i++) {
+ if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
+ *route->path_rec = path_rec[i];
+ else if (path_rec[i].flags & IB_PATH_INBOUND)
+ status = route_set_path_rec_inbound(work, &path_rec[i]);
+ else if (path_rec[i].flags & IB_PATH_OUTBOUND)
+ status = route_set_path_rec_outbound(work,
+ &path_rec[i]);
+ else
+ status = -EINVAL;
+
+ if (status)
+ goto fail;
}
+ route->num_pri_alt_paths = 1;
+ queue_work(cma_wq, &work->work);
+ return;
+
+fail:
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
+ work->event.status = status;
+ pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
+ status);
queue_work(cma_wq, &work->work);
}
-static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
- struct cma_work *work)
+static int cma_query_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms, struct cma_work *work)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
- struct ib_sa_path_rec path_rec;
+ struct sa_path_rec path_rec;
ib_sa_comp_mask comp_mask;
struct sockaddr_in6 *sin6;
struct sockaddr_ib *sib;
memset(&path_rec, 0, sizeof path_rec);
+
+ if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
+ path_rec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path_rec.rec_type = SA_PATH_REC_TYPE_IB;
rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
path_rec.numb_path = 1;
path_rec.reversible = 1;
- path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
+ path_rec.service_id = rdma_get_service_id(&id_priv->id,
+ cma_dst_addr(id_priv));
comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
@@ -2100,53 +2993,84 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
-static void cma_work_handler(struct work_struct *_work)
+static void cma_iboe_join_work_handler(struct work_struct *work)
{
- struct cma_work *work = container_of(_work, struct cma_work, work);
- struct rdma_id_private *id_priv = work->id;
- int destroy = 0;
+ struct cma_multicast *mc =
+ container_of(work, struct cma_multicast, iboe_join.work);
+ struct rdma_cm_event *event = &mc->iboe_join.event;
+ struct rdma_id_private *id_priv = mc->id_priv;
+ int ret;
mutex_lock(&id_priv->handler_mutex);
- if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
- goto out;
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
- if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
- cma_exch(id_priv, RDMA_CM_DESTROYING);
- destroy = 1;
- }
-out:
+ ret = cma_cm_event_handler(id_priv, event);
+ WARN_ON(ret);
+
+out_unlock:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
- if (destroy)
- rdma_destroy_id(&id_priv->id);
- kfree(work);
+ if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
+ rdma_destroy_ah_attr(&event->param.ud.ah_attr);
}
-static void cma_ndev_work_handler(struct work_struct *_work)
+static void cma_work_handler(struct work_struct *_work)
{
- struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
+ struct cma_work *work = container_of(_work, struct cma_work, work);
struct rdma_id_private *id_priv = work->id;
- int destroy = 0;
mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state == RDMA_CM_DESTROYING ||
- id_priv->state == RDMA_CM_DEVICE_REMOVAL)
- goto out;
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
+ if (work->old_state != 0 || work->new_state != 0) {
+ if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+ goto out_unlock;
+ }
- if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
- cma_exch(id_priv, RDMA_CM_DESTROYING);
- destroy = 1;
+ if (cma_cm_event_handler(id_priv, &work->event)) {
+ cma_id_put(id_priv);
+ destroy_id_handler_unlock(id_priv);
+ goto out_free;
}
-out:
+out_unlock:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
- if (destroy)
- rdma_destroy_id(&id_priv->id);
+ cma_id_put(id_priv);
+out_free:
+ if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
+ rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
kfree(work);
}
-static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
+static void cma_init_resolve_route_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+}
+
+static void enqueue_resolve_addr_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ /* Balances with cma_id_put() in cma_work_handler */
+ cma_id_get(id_priv);
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDR_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+
+ queue_work(cma_wq, &work->work);
+}
+
+static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
struct cma_work *work;
@@ -2156,13 +3080,10 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
- route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
+ if (!route->path_rec)
+ route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
goto err1;
@@ -2181,10 +3102,62 @@ err1:
return ret;
}
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct ib_sa_path_rec *path_rec, int num_paths)
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
+/*
+ * cma_iboe_set_path_rec_l2_fields() is helper function which sets
+ * path record type based on GID type.
+ * It also sets up other L2 fields which includes destination mac address
+ * netdev ifindex, of the path record.
+ * It returns the netdev of the bound interface for this path record entry.
+ */
+static struct net_device *
+cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
+{
+ struct rdma_route *route = &id_priv->id.route;
+ enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ struct rdma_addr *addr = &route->addr;
+ unsigned long supported_gids;
+ struct net_device *ndev;
+
+ if (!addr->dev_addr.bound_dev_if)
+ return NULL;
+
+ ndev = dev_get_by_index(addr->dev_addr.net,
+ addr->dev_addr.bound_dev_if);
+ if (!ndev)
+ return NULL;
+
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ gid_type = cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
+ /* Use the hint from IP Stack to select GID Type */
+ if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
+ gid_type = ib_network_to_gid_type(addr->dev_addr.network);
+ route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
+
+ route->path_rec->roce.route_resolved = true;
+ sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
+ return ndev;
+}
+
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec)
{
struct rdma_id_private *id_priv;
+ struct net_device *ndev;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
@@ -2192,22 +3165,35 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
RDMA_CM_ROUTE_RESOLVED))
return -EINVAL;
- id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
+ id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
GFP_KERNEL);
if (!id->route.path_rec) {
ret = -ENOMEM;
goto err;
}
- id->route.num_paths = num_paths;
+ if (rdma_protocol_roce(id->device, id->port_num)) {
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+ dev_put(ndev);
+ }
+
+ id->route.num_pri_alt_paths = 1;
return 0;
+
+err_free:
+ kfree(id->route.path_rec);
+ id->route.path_rec = NULL;
err:
cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret;
}
-EXPORT_SYMBOL(rdma_set_ib_paths);
+EXPORT_SYMBOL(rdma_set_ib_path);
-static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
{
struct cma_work *work;
@@ -2215,33 +3201,91 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
}
-static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
{
- int prio;
struct net_device *dev;
- prio = rt_tos2priority(tos);
- dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
- vlan_dev_real_dev(ndev) : ndev;
-
+ dev = vlan_dev_real_dev(vlan_ndev);
if (dev->num_tc)
return netdev_get_prio_tc_map(dev, prio);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (ndev->priv_flags & IFF_802_1Q_VLAN)
- return (vlan_dev_get_egress_qos_mask(ndev, prio) &
- VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-#endif
- return 0;
+ return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+struct iboe_prio_tc_map {
+ int input_prio;
+ int output_tc;
+ bool found;
+};
+
+static int get_lower_vlan_dev_tc(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
+
+ if (is_vlan_dev(dev))
+ map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
+ else if (dev->num_tc)
+ map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
+ else
+ map->output_tc = 0;
+ /* We are interested only in first level VLAN device, so always
+ * return 1 to stop iterating over next level devices.
+ */
+ map->found = true;
+ return 1;
+}
+
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+ struct iboe_prio_tc_map prio_tc_map = {};
+ int prio = rt_tos2priority(tos);
+ struct netdev_nested_priv priv;
+
+ /* If VLAN device, get it directly from the VLAN netdev */
+ if (is_vlan_dev(ndev))
+ return get_vlan_ndev_tc(ndev, prio);
+
+ prio_tc_map.input_prio = prio;
+ priv.data = (void *)&prio_tc_map;
+ rcu_read_lock();
+ netdev_walk_all_lower_dev_rcu(ndev,
+ get_lower_vlan_dev_tc,
+ &priv);
+ rcu_read_unlock();
+ /* If map is found from lower device, use it; Otherwise
+ * continue with the current netdevice to get priority to tc map.
+ */
+ if (prio_tc_map.found)
+ return prio_tc_map.output_tc;
+ else if (ndev->num_tc)
+ return netdev_get_prio_tc_map(ndev, prio);
+ else
+ return 0;
+}
+
+static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
+{
+ struct sockaddr_in6 *addr6;
+ u16 dport, sport;
+ u32 hash, fl;
+
+ addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
+ fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
+ if ((cma_family(id_priv) != AF_INET6) || !fl) {
+ dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
+ sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
+ hash = (u32)sport * 31 + dport;
+ fl = hash & IB_GRH_FLOWLABEL_MASK;
+ }
+
+ return cpu_to_be32(fl);
}
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
@@ -2250,61 +3294,79 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
struct rdma_addr *addr = &route->addr;
struct cma_work *work;
int ret;
- struct net_device *ndev = NULL;
+ struct net_device *ndev;
+
+ u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
+ rdma_start_port(id_priv->cma_dev->device)];
+ u8 tos;
+ mutex_lock(&id_priv->qp_mutex);
+ tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
+ mutex_unlock(&id_priv->qp_mutex);
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
-
route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
goto err1;
}
- route->num_paths = 1;
+ route->num_pri_alt_paths = 1;
- if (addr->dev_addr.bound_dev_if)
- ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) {
ret = -ENODEV;
goto err2;
}
- route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
- memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
- memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
-
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&route->path_rec->sgid);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
&route->path_rec->dgid);
- route->path_rec->hop_limit = 1;
+ if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
+ /* TODO: get the hoplimit from the inet/inet6 device */
+ route->path_rec->hop_limit = addr->dev_addr.hoplimit;
+ else
+ route->path_rec->hop_limit = 1;
route->path_rec->reversible = 1;
route->path_rec->pkey = cpu_to_be16(0xffff);
route->path_rec->mtu_selector = IB_SA_EQ;
- route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
+ route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
+ route->path_rec->traffic_class = tos;
route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
route->path_rec->rate_selector = IB_SA_EQ;
- route->path_rec->rate = iboe_get_rate(ndev);
+ route->path_rec->rate = IB_RATE_PORT_CURRENT;
dev_put(ndev);
route->path_rec->packet_life_time_selector = IB_SA_EQ;
- route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ /* In case ACK timeout is set, use this value to calculate
+ * PacketLifeTime. As per IBTA 12.7.34,
+ * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
+ * Assuming a negligible local ACK delay, we can use
+ * PacketLifeTime = local ACK timeout/2
+ * as a reasonable approximation for RoCE networks.
+ */
+ mutex_lock(&id_priv->qp_mutex);
+ if (id_priv->timeout_set && id_priv->timeout)
+ route->path_rec->packet_life_time = id_priv->timeout - 1;
+ else
+ route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ mutex_unlock(&id_priv->qp_mutex);
+
if (!route->path_rec->mtu) {
ret = -EINVAL;
goto err2;
}
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
- work->event.status = 0;
+ if (rdma_protocol_roce_udp_encap(id_priv->id.device,
+ id_priv->id.port_num))
+ route->path_rec->flow_label =
+ cma_get_roce_udp_flow_label(id_priv);
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@@ -2312,27 +3374,39 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2:
kfree(route->path_rec);
route->path_rec = NULL;
+ route->num_pri_alt_paths = 0;
err1:
kfree(work);
return ret;
}
-int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
+int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
+ enum rdma_cm_state state;
int ret;
+ if (!timeout_ms)
+ return -EINVAL;
+
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
+ state = id_priv->state;
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+ RDMA_CM_ROUTE_QUERY) &&
+ !cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_RESOLVED,
+ RDMA_CM_ROUTE_QUERY))
return -EINVAL;
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
- else if (rdma_protocol_roce(id->device, id->port_num))
+ else if (rdma_protocol_roce(id->device, id->port_num)) {
ret = cma_resolve_iboe_route(id_priv);
+ if (!ret)
+ cma_add_id_to_tree(id_priv);
+ }
else if (rdma_protocol_iwarp(id->device, id->port_num))
- ret = cma_resolve_iw_route(id_priv, timeout_ms);
+ ret = cma_resolve_iw_route(id_priv);
else
ret = -ENOSYS;
@@ -2341,8 +3415,8 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
return 0;
err:
- cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
- cma_deref_id(id_priv);
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, state);
+ cma_id_put(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_route);
@@ -2367,11 +3441,11 @@ static void cma_set_loopback(struct sockaddr *addr)
static int cma_bind_loopback(struct rdma_id_private *id_priv)
{
struct cma_device *cma_dev, *cur_dev;
- struct ib_port_attr port_attr;
union ib_gid gid;
+ enum ib_port_state port_state;
+ unsigned int p;
u16 pkey;
int ret;
- u8 p;
cma_dev = NULL;
mutex_lock(&lock);
@@ -2383,9 +3457,9 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
if (!cma_dev)
cma_dev = cur_dev;
- for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
- if (!ib_query_port(cur_dev->device, p, &port_attr) &&
- port_attr.state == IB_PORT_ACTIVE) {
+ rdma_for_each_port (cur_dev->device, p) {
+ if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
+ port_state == IB_PORT_ACTIVE) {
cma_dev = cur_dev;
goto port_found;
}
@@ -2400,7 +3474,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
p = 1;
port_found:
- ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
+ ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
if (ret)
goto out;
@@ -2416,6 +3490,7 @@ port_found:
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
id_priv->id.port_num = p;
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
cma_set_loopback(cma_src_addr(id_priv));
out:
mutex_unlock(&lock);
@@ -2426,19 +3501,36 @@ static void addr_handler(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *dev_addr, void *context)
{
struct rdma_id_private *id_priv = context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
+ struct sockaddr *addr;
+ struct sockaddr_storage old_addr;
- memset(&event, 0, sizeof event);
mutex_lock(&id_priv->handler_mutex);
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED))
goto out;
- memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
- if (!status && !id_priv->cma_dev)
- status = cma_acquire_dev(id_priv, NULL);
+ /*
+ * Store the previous src address, so that if we fail to acquire
+ * matching rdma device, old address can be restored back, which helps
+ * to cancel the cma listen operation correctly.
+ */
+ addr = cma_src_addr(id_priv);
+ memcpy(&old_addr, addr, rdma_addr_size(addr));
+ memcpy(addr, src_addr, rdma_addr_size(src_addr));
+ if (!status && !id_priv->cma_dev) {
+ status = cma_acquire_dev_by_src_ip(id_priv);
+ if (status)
+ pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
+ status);
+ rdma_restrack_add(&id_priv->res);
+ } else if (status) {
+ pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
+ }
if (status) {
+ memcpy(addr, &old_addr,
+ rdma_addr_size((struct sockaddr *)&old_addr));
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ADDR_BOUND))
goto out;
@@ -2447,16 +3539,12 @@ static void addr_handler(int status, struct sockaddr *src_addr,
} else
event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
- if (id_priv->id.event_handler(&id_priv->id, &event)) {
- cma_exch(id_priv, RDMA_CM_DESTROYING);
- mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
- rdma_destroy_id(&id_priv->id);
+ if (cma_cm_event_handler(id_priv, &event)) {
+ destroy_id_handler_unlock(id_priv);
return;
}
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
}
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
@@ -2478,12 +3566,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
- queue_work(cma_wq, &work->work);
+ enqueue_resolve_addr_work(work, id_priv);
return 0;
err:
kfree(work);
@@ -2508,81 +3591,13 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
- queue_work(cma_wq, &work->work);
+ enqueue_resolve_addr_work(work, id_priv);
return 0;
err:
kfree(work);
return ret;
}
-static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- struct sockaddr *dst_addr)
-{
- if (!src_addr || !src_addr->sa_family) {
- src_addr = (struct sockaddr *) &id->route.addr.src_addr;
- src_addr->sa_family = dst_addr->sa_family;
- if (dst_addr->sa_family == AF_INET6) {
- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
- } else if (dst_addr->sa_family == AF_IB) {
- ((struct sockaddr_ib *) src_addr)->sib_pkey =
- ((struct sockaddr_ib *) dst_addr)->sib_pkey;
- }
- }
- return rdma_bind_addr(id, src_addr);
-}
-
-int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- struct sockaddr *dst_addr, int timeout_ms)
-{
- struct rdma_id_private *id_priv;
- int ret;
-
- id_priv = container_of(id, struct rdma_id_private, id);
- if (id_priv->state == RDMA_CM_IDLE) {
- ret = cma_bind_addr(id, src_addr, dst_addr);
- if (ret)
- return ret;
- }
-
- if (cma_family(id_priv) != dst_addr->sa_family)
- return -EINVAL;
-
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
- return -EINVAL;
-
- atomic_inc(&id_priv->refcount);
- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
- if (cma_any_addr(dst_addr)) {
- ret = cma_resolve_loopback(id_priv);
- } else {
- if (dst_addr->sa_family == AF_IB) {
- ret = cma_resolve_ib_addr(id_priv);
- } else {
- ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
- dst_addr, &id->route.addr.dev_addr,
- timeout_ms, addr_handler, id_priv);
- }
- }
- if (ret)
- goto err;
-
- return 0;
-err:
- cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
- cma_deref_id(id_priv);
- return ret;
-}
-EXPORT_SYMBOL(rdma_resolve_addr);
-
int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
{
struct rdma_id_private *id_priv;
@@ -2591,7 +3606,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irqsave(&id_priv->lock, flags);
- if (reuse || id_priv->state == RDMA_CM_IDLE) {
+ if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
+ id_priv->state == RDMA_CM_IDLE) {
id_priv->reuseaddr = reuse;
ret = 0;
} else {
@@ -2630,6 +3646,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
u64 sid, mask;
__be16 port;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
port = htons(bind_list->port);
@@ -2652,22 +3670,25 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
hlist_add_head(&id_priv->node, &bind_list->owners);
}
-static int cma_alloc_port(enum rdma_port_space ps,
+static int cma_alloc_port(enum rdma_ucm_port_space ps,
struct rdma_id_private *id_priv, unsigned short snum)
{
struct rdma_bind_list *bind_list;
int ret;
+ lockdep_assert_held(&lock);
+
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
- ret = cma_ps_alloc(ps, bind_list, snum);
+ ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
+ snum);
if (ret < 0)
goto err;
bind_list->ps = ps;
- bind_list->port = (unsigned short)ret;
+ bind_list->port = snum;
cma_bind_port(bind_list, id_priv);
return 0;
err:
@@ -2675,20 +3696,74 @@ err:
return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
}
-static int cma_alloc_any_port(enum rdma_port_space ps,
+static int cma_port_is_unique(struct rdma_bind_list *bind_list,
+ struct rdma_id_private *id_priv)
+{
+ struct rdma_id_private *cur_id;
+ struct sockaddr *daddr = cma_dst_addr(id_priv);
+ struct sockaddr *saddr = cma_src_addr(id_priv);
+ __be16 dport = cma_port(daddr);
+
+ lockdep_assert_held(&lock);
+
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
+ struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
+ struct sockaddr *cur_saddr = cma_src_addr(cur_id);
+ __be16 cur_dport = cma_port(cur_daddr);
+
+ if (id_priv == cur_id)
+ continue;
+
+ /* different dest port -> unique */
+ if (!cma_any_port(daddr) &&
+ !cma_any_port(cur_daddr) &&
+ (dport != cur_dport))
+ continue;
+
+ /* different src address -> unique */
+ if (!cma_any_addr(saddr) &&
+ !cma_any_addr(cur_saddr) &&
+ cma_addr_cmp(saddr, cur_saddr))
+ continue;
+
+ /* different dst address -> unique */
+ if (!cma_any_addr(daddr) &&
+ !cma_any_addr(cur_daddr) &&
+ cma_addr_cmp(daddr, cur_daddr))
+ continue;
+
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
struct rdma_id_private *id_priv)
{
static unsigned int last_used_port;
int low, high, remaining;
unsigned int rover;
+ struct net *net = id_priv->id.route.addr.dev_addr.net;
+
+ lockdep_assert_held(&lock);
- inet_get_local_port_range(&init_net, &low, &high);
+ inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rover = prandom_u32() % remaining + low;
+ rover = get_random_u32_inclusive(low, remaining + low - 1);
retry:
- if (last_used_port != rover &&
- !cma_ps_find(ps, (unsigned short)rover)) {
- int ret = cma_alloc_port(ps, id_priv, rover);
+ if (last_used_port != rover) {
+ struct rdma_bind_list *bind_list;
+ int ret;
+
+ bind_list = cma_ps_find(net, ps, (unsigned short)rover);
+
+ if (!bind_list) {
+ ret = cma_alloc_port(ps, id_priv, rover);
+ } else {
+ ret = cma_port_is_unique(bind_list, id_priv);
+ if (!ret)
+ cma_bind_port(bind_list, id_priv);
+ }
/*
* Remember previously used port number in order to avoid
* re-using same port immediately after it is closed.
@@ -2719,13 +3794,14 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
continue;
- if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
- cur_id->reuseaddr)
+ if (reuseaddr && cur_id->reuseaddr)
continue;
cur_addr = cma_src_addr(cur_id);
@@ -2742,18 +3818,20 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
return 0;
}
-static int cma_use_port(enum rdma_port_space ps,
+static int cma_use_port(enum rdma_ucm_port_space ps,
struct rdma_id_private *id_priv)
{
struct rdma_bind_list *bind_list;
unsigned short snum;
int ret;
+ lockdep_assert_held(&lock);
+
snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
- bind_list = cma_ps_find(ps, snum);
+ bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
if (!bind_list) {
ret = cma_alloc_port(ps, id_priv, snum);
} else {
@@ -2764,20 +3842,8 @@ static int cma_use_port(enum rdma_port_space ps,
return ret;
}
-static int cma_bind_listen(struct rdma_id_private *id_priv)
-{
- struct rdma_bind_list *bind_list = id_priv->bind_list;
- int ret = 0;
-
- mutex_lock(&lock);
- if (bind_list->owners.first->next)
- ret = cma_check_port(bind_list, id_priv, 0);
- mutex_unlock(&lock);
- return ret;
-}
-
-static enum rdma_port_space cma_select_inet_ps(
- struct rdma_id_private *id_priv)
+static enum rdma_ucm_port_space
+cma_select_inet_ps(struct rdma_id_private *id_priv)
{
switch (id_priv->id.ps) {
case RDMA_PS_TCP:
@@ -2791,9 +3857,10 @@ static enum rdma_port_space cma_select_inet_ps(
}
}
-static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv)
+static enum rdma_ucm_port_space
+cma_select_ib_ps(struct rdma_id_private *id_priv)
{
- enum rdma_port_space ps = 0;
+ enum rdma_ucm_port_space ps = 0;
struct sockaddr_ib *sib;
u64 sid_ps, mask, sid;
@@ -2824,7 +3891,7 @@ static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv)
static int cma_get_port(struct rdma_id_private *id_priv)
{
- enum rdma_port_space ps;
+ enum rdma_ucm_port_space ps;
int ret;
if (cma_family(id_priv) != AF_IB)
@@ -2868,28 +3935,41 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
int rdma_listen(struct rdma_cm_id *id, int backlog)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
- if (id_priv->state == RDMA_CM_IDLE) {
- id->route.addr.src_addr.ss_family = AF_INET;
- ret = rdma_bind_addr(id, cma_src_addr(id_priv));
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+ struct sockaddr_in any_in = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+
+ /* For a well behaved ULP state will be RDMA_CM_IDLE */
+ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
if (ret)
return ret;
+ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+ RDMA_CM_LISTEN)))
+ return -EINVAL;
}
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
- return -EINVAL;
-
+ /*
+ * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
+ * any more, and has to be unique in the bind list.
+ */
if (id_priv->reuseaddr) {
- ret = cma_bind_listen(id_priv);
+ mutex_lock(&lock);
+ ret = cma_check_port(id_priv->bind_list, id_priv, 0);
+ if (!ret)
+ id_priv->reuseaddr = 0;
+ mutex_unlock(&lock);
if (ret)
goto err;
}
id_priv->backlog = backlog;
- if (id->device) {
+ if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id->device, 1)) {
ret = cma_ib_listen(id_priv);
if (ret)
@@ -2902,41 +3982,48 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
ret = -ENOSYS;
goto err;
}
- } else
- cma_listen_on_all(id_priv);
+ } else {
+ ret = cma_listen_on_all(id_priv);
+ if (ret)
+ goto err;
+ }
return 0;
err:
id_priv->backlog = 0;
+ /*
+ * All the failure paths that lead here will not allow the req_handler's
+ * to have run.
+ */
cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
return ret;
}
EXPORT_SYMBOL(rdma_listen);
-int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
+ struct sockaddr *addr, const struct sockaddr *daddr)
{
- struct rdma_id_private *id_priv;
+ struct sockaddr *id_daddr;
int ret;
if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
addr->sa_family != AF_IB)
return -EAFNOSUPPORT;
- id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
return -EINVAL;
- ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
+ ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
if (ret)
goto err1;
memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) {
- ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
+ ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
if (ret)
goto err1;
- ret = cma_acquire_dev(id_priv, NULL);
+ ret = cma_acquire_dev_by_src_ip(id_priv);
if (ret)
goto err1;
}
@@ -2945,14 +4032,24 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (addr->sa_family == AF_INET)
id_priv->afonly = 1;
#if IS_ENABLED(CONFIG_IPV6)
- else if (addr->sa_family == AF_INET6)
- id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
+ else if (addr->sa_family == AF_INET6) {
+ struct net *net = id_priv->id.route.addr.dev_addr.net;
+
+ id_priv->afonly = net->ipv6.sysctl.bindv6only;
+ }
#endif
}
+ id_daddr = cma_dst_addr(id_priv);
+ if (daddr != id_daddr)
+ memcpy(id_daddr, daddr, rdma_addr_size(addr));
+ id_daddr->sa_family = addr->sa_family;
+
ret = cma_get_port(id_priv);
if (ret)
goto err2;
+ if (!cma_any_addr(addr))
+ rdma_restrack_add(&id_priv->res);
return 0;
err2:
if (id_priv->cma_dev)
@@ -2961,6 +4058,129 @@ err1:
cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
}
+
+static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ struct sockaddr_storage zero_sock = {};
+
+ if (src_addr && src_addr->sa_family)
+ return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
+
+ /*
+ * When the src_addr is not specified, automatically supply an any addr
+ */
+ zero_sock.ss_family = dst_addr->sa_family;
+ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src_addr6 =
+ (struct sockaddr_in6 *)&zero_sock;
+ struct sockaddr_in6 *dst_addr6 =
+ (struct sockaddr_in6 *)dst_addr;
+
+ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ id->route.addr.dev_addr.bound_dev_if =
+ dst_addr6->sin6_scope_id;
+ } else if (dst_addr->sa_family == AF_IB) {
+ ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
+ ((struct sockaddr_ib *)dst_addr)->sib_pkey;
+ }
+ return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
+}
+
+/*
+ * If required, resolve the source address for bind and leave the id_priv in
+ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
+ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
+ * ignored.
+ */
+static int resolve_prepare_src(struct rdma_id_private *id_priv,
+ struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr)
+{
+ int ret;
+
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+ /* For a well behaved ULP state will be RDMA_CM_IDLE */
+ ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
+ if (ret)
+ return ret;
+ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+ RDMA_CM_ADDR_QUERY)))
+ return -EINVAL;
+
+ } else {
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
+ }
+
+ if (cma_family(id_priv) != dst_addr->sa_family) {
+ ret = -EINVAL;
+ goto err_state;
+ }
+ return 0;
+
+err_state:
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+
+int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr, unsigned long timeout_ms)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret;
+
+ ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
+ if (ret)
+ return ret;
+
+ if (cma_any_addr(dst_addr)) {
+ ret = cma_resolve_loopback(id_priv);
+ } else {
+ if (dst_addr->sa_family == AF_IB) {
+ ret = cma_resolve_ib_addr(id_priv);
+ } else {
+ /*
+ * The FSM can return back to RDMA_CM_ADDR_BOUND after
+ * rdma_resolve_ip() is called, eg through the error
+ * path in addr_handler(). If this happens the existing
+ * request must be canceled before issuing a new one.
+ * Since canceling a request is a bit slow and this
+ * oddball path is rare, keep track once a request has
+ * been issued. The track turns out to be a permanent
+ * state since this is the only cancel as it is
+ * immediately before rdma_resolve_ip().
+ */
+ if (id_priv->used_resolve_ip)
+ rdma_addr_cancel(&id->route.addr.dev_addr);
+ else
+ id_priv->used_resolve_ip = 1;
+ ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
+ &id->route.addr.dev_addr,
+ timeout_ms, addr_handler,
+ false, id_priv);
+ }
+ }
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_resolve_addr);
+
+int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
+}
EXPORT_SYMBOL(rdma_bind_addr);
static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
@@ -2994,17 +4214,18 @@ static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
}
static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *ib_event)
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- struct rdma_cm_event event;
- struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
- int ret = 0;
+ struct rdma_cm_event event = {};
+ const struct ib_cm_sidr_rep_event_param *rep =
+ &ib_event->param.sidr_rep_rcvd;
+ int ret;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
+ goto out;
- memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_SIDR_REQ_ERROR:
event.event = RDMA_CM_EVENT_UNREACHABLE;
@@ -3016,40 +4237,45 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
if (rep->status != IB_SIDR_SUCCESS) {
event.event = RDMA_CM_EVENT_UNREACHABLE;
event.status = ib_event->param.sidr_rep_rcvd.status;
+ pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
+ event.status);
break;
}
ret = cma_set_qkey(id_priv, rep->qkey);
if (ret) {
+ pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
event.event = RDMA_CM_EVENT_ADDR_ERROR;
event.status = ret;
break;
}
- ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
- id_priv->id.route.path_rec,
- &event.param.ud.ah_attr);
+ ib_init_ah_attr_from_path(id_priv->id.device,
+ id_priv->id.port_num,
+ id_priv->id.route.path_rec,
+ &event.param.ud.ah_attr,
+ rep->sgid_attr);
event.param.ud.qp_num = rep->qpn;
event.param.ud.qkey = rep->qkey;
event.event = RDMA_CM_EVENT_ESTABLISHED;
event.status = 0;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
+
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
- cma_exch(id_priv, RDMA_CM_DESTROYING);
- mutex_unlock(&id_priv->handler_mutex);
- rdma_destroy_id(&id_priv->id);
+ destroy_id_handler_unlock(id_priv);
return ret;
}
out:
mutex_unlock(&id_priv->handler_mutex);
- return ret;
+ return 0;
}
static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
@@ -3058,12 +4284,12 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
struct ib_cm_sidr_req_param req;
struct ib_cm_id *id;
void *private_data;
- int offset, ret;
+ u8 offset;
+ int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
@@ -3094,10 +4320,12 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
id_priv->cm_id.ib = id;
req.path = id_priv->id.route.path_rec;
+ req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req.max_cm_retries = CMA_MAX_CM_RETRIES;
+ trace_cm_send_sidr_req(id_priv);
ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
if (ret) {
ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -3115,12 +4343,12 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct rdma_route *route;
void *private_data;
struct ib_cm_id *id;
- int offset, ret;
+ u8 offset;
+ int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
@@ -3151,9 +4379,13 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
}
req.primary_path = &route->path_rec[0];
- if (route->num_paths == 2)
+ req.primary_path_inbound = route->path_rec_inbound;
+ req.primary_path_outbound = route->path_rec_outbound;
+ if (route->num_pri_alt_paths == 2)
req.alternate_path = &route->path_rec[1];
+ req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
+ /* Alternate path SGID attribute currently unsupported */
req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.qp_num = id_priv->qp_num;
req.qp_type = id_priv->id.qp_type;
@@ -3167,7 +4399,10 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.max_cm_retries = CMA_MAX_CM_RETRIES;
req.srq = id_priv->srq ? 1 : 0;
+ req.ece.vendor_id = id_priv->ece.vendor_id;
+ req.ece.attr_mod = id_priv->ece.attr_mod;
+ trace_cm_send_req(id_priv);
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
out:
if (ret && !IS_ERR(id)) {
@@ -3190,7 +4425,11 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
+ mutex_lock(&id_priv->qp_mutex);
cm_id->tos = id_priv->tos;
+ cm_id->tos_set = id_priv->tos_set;
+ mutex_unlock(&id_priv->qp_mutex);
+
id_priv->cm_id.iw = cm_id;
memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
@@ -3221,12 +4460,21 @@ out:
return ret;
}
-int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+/**
+ * rdma_connect_locked - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Same as rdma_connect() but can only be called from the
+ * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
+ */
+int rdma_connect_locked(struct rdma_cm_id *id,
+ struct rdma_conn_param *conn_param)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
return -EINVAL;
@@ -3240,20 +4488,66 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
ret = cma_resolve_ib_udp(id_priv, conn_param);
else
ret = cma_connect_ib(id_priv, conn_param);
- } else if (rdma_cap_iw_cm(id->device, id->port_num))
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = cma_connect_iw(id_priv, conn_param);
- else
+ } else {
ret = -ENOSYS;
+ }
if (ret)
- goto err;
-
+ goto err_state;
return 0;
-err:
+err_state:
cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
return ret;
}
+EXPORT_SYMBOL(rdma_connect_locked);
+
+/**
+ * rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Users must have resolved a route for the rdma_cm_id to connect with by having
+ * called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP information
+ * for unconnected rdma_cm_id's. The actual operation is based on the
+ * rdma_cm_id's port space.
+ */
+int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ ret = rdma_connect_locked(id, conn_param);
+ mutex_unlock(&id_priv->handler_mutex);
+ return ret;
+}
EXPORT_SYMBOL(rdma_connect);
+/**
+ * rdma_connect_ece - Initiate an active connection request with ECE data.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ * @ece: ECE parameters
+ *
+ * See rdma_connect() explanation.
+ */
+int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return rdma_connect(id, conn_param);
+}
+EXPORT_SYMBOL(rdma_connect_ece);
+
static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
@@ -3279,7 +4573,10 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.flow_control = conn_param->flow_control;
rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
+ trace_cm_send_rep(id_priv);
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
out:
return ret;
@@ -3291,6 +4588,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
struct iw_cm_conn_param iw_param;
int ret;
+ if (!conn_param)
+ return -EINVAL;
+
ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret)
return ret;
@@ -3299,9 +4599,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
iw_param.ird = conn_param->responder_resources;
iw_param.private_data = conn_param->private_data;
iw_param.private_data_len = conn_param->private_data_len;
- if (id_priv->id.qp) {
+ if (id_priv->id.qp)
iw_param.qpn = id_priv->qp_num;
- } else
+ else
iw_param.qpn = conn_param->qp_num;
return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
@@ -3317,28 +4617,53 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
memset(&rep, 0, sizeof rep);
rep.status = status;
if (status == IB_SIDR_SUCCESS) {
- ret = cma_set_qkey(id_priv, qkey);
+ if (qkey)
+ ret = cma_set_qkey(id_priv, qkey);
+ else
+ ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
rep.qp_num = id_priv->qp_num;
rep.qkey = id_priv->qkey;
+
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
}
+
rep.private_data = private_data;
rep.private_data_len = private_data_len;
+ trace_cm_send_sidr_rep(id_priv);
return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
}
+/**
+ * rdma_accept - Called to accept a connection request or response.
+ * @id: Connection identifier associated with the request.
+ * @conn_param: Information needed to establish the connection. This must be
+ * provided if accepting a connection request. If accepting a connection
+ * response, this parameter must be NULL.
+ *
+ * Typically, this routine is only called by the listener to accept a connection
+ * request. It must also be called on the active side of a connection if the
+ * user is performing their own QP transitions.
+ *
+ * In the case of error, a reject message is sent to the remote side and the
+ * state of the qp associated with the id is modified to error, such that any
+ * previously posted receive buffers would be flushed.
+ *
+ * This function is for use by kernel ULPs and must be called from under the
+ * handler callback.
+ */
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
-
- id_priv->owner = task_pid_nr(current);
+ lockdep_assert_held(&id_priv->handler_mutex);
- if (!cma_comp(id_priv, RDMA_CM_CONNECT))
+ if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
return -EINVAL;
if (!id->qp && conn_param) {
@@ -3362,22 +4687,53 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
else
ret = cma_rep_recv(id_priv);
}
- } else if (rdma_cap_iw_cm(id->device, id->port_num))
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = cma_accept_iw(id_priv, conn_param);
- else
+ } else {
ret = -ENOSYS;
-
+ }
if (ret)
goto reject;
return 0;
reject:
cma_modify_qp_err(id_priv);
- rdma_reject(id, NULL, 0);
+ rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
EXPORT_SYMBOL(rdma_accept);
+int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return rdma_accept(id, conn_param);
+}
+EXPORT_SYMBOL(rdma_accept_ece);
+
+void rdma_lock_handler(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ mutex_lock(&id_priv->handler_mutex);
+}
+EXPORT_SYMBOL(rdma_lock_handler);
+
+void rdma_unlock_handler(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ mutex_unlock(&id_priv->handler_mutex);
+}
+EXPORT_SYMBOL(rdma_unlock_handler);
+
int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
{
struct rdma_id_private *id_priv;
@@ -3400,7 +4756,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
EXPORT_SYMBOL(rdma_notify);
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
- u8 private_data_len)
+ u8 private_data_len, u8 reason)
{
struct rdma_id_private *id_priv;
int ret;
@@ -3410,18 +4766,20 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
return -EINVAL;
if (rdma_cap_ib_cm(id->device, id->port_num)) {
- if (id->qp_type == IB_QPT_UD)
+ if (id->qp_type == IB_QPT_UD) {
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
private_data, private_data_len);
- else
- ret = ib_send_cm_rej(id_priv->cm_id.ib,
- IB_CM_REJ_CONSUMER_DEFINED, NULL,
- 0, private_data, private_data_len);
+ } else {
+ trace_cm_send_rej(id_priv);
+ ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
+ private_data, private_data_len);
+ }
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
private_data, private_data_len);
- } else
+ } else {
ret = -ENOSYS;
+ }
return ret;
}
@@ -3441,8 +4799,13 @@ int rdma_disconnect(struct rdma_cm_id *id)
if (ret)
goto out;
/* Initiate or respond to a disconnect. */
- if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
- ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
+ trace_cm_disconnect(id_priv);
+ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
+ if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
+ trace_cm_sent_drep(id_priv);
+ } else {
+ trace_cm_sent_dreq(id_priv);
+ }
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
} else
@@ -3453,47 +4816,70 @@ out:
}
EXPORT_SYMBOL(rdma_disconnect);
-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
+ struct ib_sa_multicast *multicast,
+ struct rdma_cm_event *event,
+ struct cma_multicast *mc)
{
- struct rdma_id_private *id_priv;
- struct cma_multicast *mc = multicast->context;
- struct rdma_cm_event event;
- int ret;
+ struct rdma_dev_addr *dev_addr;
+ enum ib_gid_type gid_type;
+ struct net_device *ndev;
- id_priv = mc->id_priv;
- if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
- cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
- return 0;
+ if (status)
+ pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
+ status);
- if (!status)
- status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
- mutex_lock(&id_priv->qp_mutex);
- if (!status && id_priv->id.qp)
- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
- be16_to_cpu(multicast->rec.mlid));
- mutex_unlock(&id_priv->qp_mutex);
+ event->status = status;
+ event->param.ud.private_data = mc->context;
+ if (status) {
+ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
+ return;
+ }
- memset(&event, 0, sizeof event);
- event.status = status;
- event.param.ud.private_data = mc->context;
- if (!status) {
- event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
- ib_init_ah_from_mcmember(id_priv->id.device,
- id_priv->id.port_num, &multicast->rec,
- &event.param.ud.ah_attr);
- event.param.ud.qp_num = 0xFFFFFF;
- event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
- } else
- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
+ dev_addr = &id_priv->id.route.addr.dev_addr;
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ gid_type =
+ id_priv->cma_dev
+ ->default_gid_type[id_priv->id.port_num -
+ rdma_start_port(
+ id_priv->cma_dev->device)];
+
+ event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
+ if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
+ &multicast->rec, ndev, gid_type,
+ &event->param.ud.ah_attr)) {
+ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
+ goto out;
+ }
- ret = id_priv->id.event_handler(&id_priv->id, &event);
- if (ret) {
- cma_exch(id_priv, RDMA_CM_DESTROYING);
- mutex_unlock(&id_priv->handler_mutex);
- rdma_destroy_id(&id_priv->id);
- return 0;
+ event->param.ud.qp_num = 0xFFFFFF;
+ event->param.ud.qkey = id_priv->qkey;
+
+out:
+ dev_put(ndev);
+}
+
+static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+{
+ struct cma_multicast *mc = multicast->context;
+ struct rdma_id_private *id_priv = mc->id_priv;
+ struct rdma_cm_event event = {};
+ int ret = 0;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
+ goto out;
+
+ ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
+ if (!ret) {
+ cma_make_mc_event(status, id_priv, multicast, &event, mc);
+ ret = cma_cm_event_handler(id_priv, &event);
}
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
+ WARN_ON(ret);
+out:
mutex_unlock(&id_priv->handler_mutex);
return 0;
}
@@ -3515,7 +4901,7 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else if (addr->sa_family == AF_IB) {
memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
- } else if ((addr->sa_family == AF_INET6)) {
+ } else if (addr->sa_family == AF_INET6) {
ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP)
mc_map[7] = 0x01; /* Use RDMA CM signature */
@@ -3542,15 +4928,17 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (ret)
return ret;
- ret = cma_set_qkey(id_priv, 0);
- if (ret)
- return ret;
+ if (!id_priv->qkey) {
+ ret = cma_set_default_qkey(id_priv);
+ if (ret)
+ return ret;
+ }
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
rec.qkey = cpu_to_be32(id_priv->qkey);
rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
- rec.join_state = 1;
+ rec.join_state = mc->join_state;
comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
@@ -3565,26 +4953,14 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
IB_SA_MCMEMBER_REC_MTU |
IB_SA_MCMEMBER_REC_HOP_LIMIT;
- mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
- id_priv->id.port_num, &rec,
- comp_mask, GFP_KERNEL,
- cma_ib_mc_handler, mc);
- return PTR_ERR_OR_ZERO(mc->multicast.ib);
+ mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
+ id_priv->id.port_num, &rec, comp_mask,
+ GFP_KERNEL, cma_ib_mc_handler, mc);
+ return PTR_ERR_OR_ZERO(mc->sa_mc);
}
-static void iboe_mcast_work_handler(struct work_struct *work)
-{
- struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
- struct cma_multicast *mc = mw->mc;
- struct ib_sa_multicast *m = mc->multicast.ib;
-
- mc->multicast.ib->context = mc;
- cma_ib_mc_handler(0, m);
- kref_put(&mc->mcref, release_mc);
- kfree(mw);
-}
-
-static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ enum ib_gid_type gid_type)
{
struct sockaddr_in *sin = (struct sockaddr_in *)addr;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
@@ -3594,8 +4970,10 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
} else if (addr->sa_family == AF_INET6) {
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else {
- mgid->raw[0] = 0xff;
- mgid->raw[1] = 0x0e;
+ mgid->raw[0] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
+ mgid->raw[1] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
mgid->raw[2] = 0;
mgid->raw[3] = 0;
mgid->raw[4] = 0;
@@ -3613,100 +4991,109 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
- struct iboe_mcast_work *work;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
- int err;
+ int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
+ struct ib_sa_multicast ib = {};
+ enum ib_gid_type gid_type;
+ bool send_only;
- if (cma_zero_addr((struct sockaddr *)&mc->addr))
- return -EINVAL;
+ send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (!work)
- return -ENOMEM;
+ if (cma_zero_addr(addr))
+ return -EINVAL;
- mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
- if (!mc->multicast.ib) {
- err = -ENOMEM;
- goto out1;
- }
+ gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
+ rdma_start_port(id_priv->cma_dev->device)];
+ cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
- cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
+ ib.rec.pkey = cpu_to_be16(0xffff);
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ if (!ndev)
+ return -ENODEV;
- mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
- if (id_priv->id.ps == RDMA_PS_UDP)
- mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+ ib.rec.rate = IB_RATE_PORT_CURRENT;
+ ib.rec.hop_limit = 1;
+ ib.rec.mtu = iboe_get_mtu(ndev->mtu);
- if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (!ndev) {
- err = -ENODEV;
- goto out2;
+ if (addr->sa_family == AF_INET) {
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+ if (!send_only) {
+ err = cma_igmp_send(ndev, &ib.rec.mgid,
+ true);
+ }
+ }
+ } else {
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ err = -ENOTSUPP;
}
- mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
- mc->multicast.ib->rec.hop_limit = 1;
- mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
dev_put(ndev);
- if (!mc->multicast.ib->rec.mtu) {
- err = -EINVAL;
- goto out2;
- }
- rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
- &mc->multicast.ib->rec.port_gid);
- work->id = id_priv;
- work->mc = mc;
- INIT_WORK(&work->work, iboe_mcast_work_handler);
- kref_get(&mc->mcref);
- queue_work(cma_wq, &work->work);
+ if (err || !ib.rec.mtu)
+ return err ?: -EINVAL;
- return 0;
+ if (!id_priv->qkey)
+ cma_set_default_qkey(id_priv);
-out2:
- kfree(mc->multicast.ib);
-out1:
- kfree(work);
- return err;
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &ib.rec.port_gid);
+ INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
+ cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
+ queue_work(cma_wq, &mc->iboe_join.work);
+ return 0;
}
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
- void *context)
+ u8 join_state, void *context)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
struct cma_multicast *mc;
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
- !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
+ /* Not supported for kernel QPs */
+ if (WARN_ON(id->qp))
+ return -EINVAL;
+
+ /* ULP is calling this wrong. */
+ if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
+ READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
+ return -EINVAL;
+
+ if (id_priv->id.qp_type != IB_QPT_UD)
return -EINVAL;
- mc = kmalloc(sizeof *mc, GFP_KERNEL);
+ mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
memcpy(&mc->addr, addr, rdma_addr_size(addr));
mc->context = context;
mc->id_priv = id_priv;
-
- spin_lock(&id_priv->lock);
- list_add(&mc->list, &id_priv->mc_list);
- spin_unlock(&id_priv->lock);
+ mc->join_state = join_state;
if (rdma_protocol_roce(id->device, id->port_num)) {
- kref_init(&mc->mcref);
ret = cma_iboe_join_multicast(id_priv, mc);
- } else if (rdma_cap_ib_mcast(id->device, id->port_num))
+ if (ret)
+ goto out_err;
+ } else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
ret = cma_join_ib_multicast(id_priv, mc);
- else
+ if (ret)
+ goto out_err;
+ } else {
ret = -ENOSYS;
-
- if (ret) {
- spin_lock_irq(&id_priv->lock);
- list_del(&mc->list);
- spin_unlock_irq(&id_priv->lock);
- kfree(mc);
+ goto out_err;
}
+
+ spin_lock(&id_priv->lock);
+ list_add(&mc->list, &id_priv->mc_list);
+ spin_unlock(&id_priv->lock);
+
+ return 0;
+out_err:
+ kfree(mc);
return ret;
}
EXPORT_SYMBOL(rdma_join_multicast);
@@ -3719,25 +5106,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irq(&id_priv->lock);
list_for_each_entry(mc, &id_priv->mc_list, list) {
- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
- list_del(&mc->list);
- spin_unlock_irq(&id_priv->lock);
-
- if (id->qp)
- ib_detach_mcast(id->qp,
- &mc->multicast.ib->rec.mgid,
- be16_to_cpu(mc->multicast.ib->rec.mlid));
-
- BUG_ON(id_priv->cma_dev->device != id->device);
-
- if (rdma_cap_ib_mcast(id->device, id->port_num)) {
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
- } else if (rdma_protocol_roce(id->device, id->port_num))
- kref_put(&mc->mcref, release_mc);
+ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
+ continue;
+ list_del(&mc->list);
+ spin_unlock_irq(&id_priv->lock);
- return;
- }
+ WARN_ON(id_priv->cma_dev->device != id->device);
+ destroy_mc(id_priv, mc);
+ return;
}
spin_unlock_irq(&id_priv->lock);
}
@@ -3746,22 +5122,23 @@ EXPORT_SYMBOL(rdma_leave_multicast);
static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr;
- struct cma_ndev_work *work;
+ struct cma_work *work;
dev_addr = &id_priv->id.route.addr.dev_addr;
if ((dev_addr->bound_dev_if == ndev->ifindex) &&
+ (net_eq(dev_net(ndev), dev_addr->net)) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
- printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
- ndev->name, &id_priv->id);
+ pr_info("RDMA CM addr change for ndev %s used by id %p\n",
+ ndev->name, &id_priv->id);
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
return -ENOMEM;
- INIT_WORK(&work->work, cma_ndev_work_handler);
+ INIT_WORK(&work->work, cma_work_handler);
work->id = id_priv;
work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
queue_work(cma_wq, &work->work);
}
@@ -3776,18 +5153,15 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
struct rdma_id_private *id_priv;
int ret = NOTIFY_DONE;
- if (dev_net(ndev) != &init_net)
- return NOTIFY_DONE;
-
if (event != NETDEV_BONDING_FAILOVER)
return NOTIFY_DONE;
- if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
+ if (!netif_is_bond_master(ndev))
return NOTIFY_DONE;
mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list)
- list_for_each_entry(id_priv, &cma_dev->id_list, list) {
+ list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
ret = cma_netdev_change(ndev, id_priv);
if (ret)
goto out;
@@ -3798,223 +5172,469 @@ out:
return ret;
}
-static struct notifier_block cma_nb = {
- .notifier_call = cma_netdev_callback
-};
-
-static void cma_add_one(struct ib_device *device)
+static void cma_netevent_work_handler(struct work_struct *_work)
{
- struct cma_device *cma_dev;
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(_work, struct rdma_id_private, id.net_work);
+ struct rdma_cm_event event = {};
- cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
- if (!cma_dev)
- return;
+ mutex_lock(&id_priv->handler_mutex);
- cma_dev->device = device;
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
- init_completion(&cma_dev->comp);
- atomic_set(&cma_dev->refcount, 1);
- INIT_LIST_HEAD(&cma_dev->id_list);
- ib_set_client_data(device, &cma_client, cma_dev);
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
- mutex_lock(&lock);
- list_add_tail(&cma_dev->list, &dev_list);
- list_for_each_entry(id_priv, &listen_any_list, list)
- cma_listen_on_dev(id_priv, cma_dev);
- mutex_unlock(&lock);
+ if (cma_cm_event_handler(id_priv, &event)) {
+ __acquire(&id_priv->handler_mutex);
+ id_priv->cm_id.ib = NULL;
+ cma_id_put(id_priv);
+ destroy_id_handler_unlock(id_priv);
+ return;
+ }
+
+out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_id_put(id_priv);
}
-static int cma_remove_id_dev(struct rdma_id_private *id_priv)
+static int cma_netevent_callback(struct notifier_block *self,
+ unsigned long event, void *ctx)
{
- struct rdma_cm_event event;
- enum rdma_cm_state state;
- int ret = 0;
+ struct id_table_entry *ips_node = NULL;
+ struct rdma_id_private *current_id;
+ struct neighbour *neigh = ctx;
+ unsigned long flags;
- /* Record that we want to remove the device */
- state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
- if (state == RDMA_CM_DESTROYING)
- return 0;
+ if (event != NETEVENT_NEIGH_UPDATE)
+ return NOTIFY_DONE;
- cma_cancel_operation(id_priv, state);
- mutex_lock(&id_priv->handler_mutex);
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (neigh->tbl->family == AF_INET6) {
+ struct sockaddr_in6 neigh_sock_6;
+
+ neigh_sock_6.sin6_family = AF_INET6;
+ neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_6);
+ } else if (neigh->tbl->family == AF_INET) {
+ struct sockaddr_in neigh_sock_4;
+
+ neigh_sock_4.sin_family = AF_INET;
+ neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_4);
+ } else
+ goto out;
- /* Check for destruction from another callback. */
- if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
+ if (!ips_node)
goto out;
- memset(&event, 0, sizeof event);
- event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
+ if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
+ neigh->ha, ETH_ALEN))
+ continue;
+ cma_id_get(current_id);
+ if (!queue_work(cma_wq, &current_id->id.net_work))
+ cma_id_put(current_id);
+ }
out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cma_nb = {
+ .notifier_call = cma_netdev_callback
+};
+
+static struct notifier_block cma_netevent_cb = {
+ .notifier_call = cma_netevent_callback
+};
+
+static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
+{
+ struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
+ enum rdma_cm_state state;
+ unsigned long flags;
+
+ mutex_lock(&id_priv->handler_mutex);
+ /* Record that we want to remove the device */
+ spin_lock_irqsave(&id_priv->lock, flags);
+ state = id_priv->state;
+ if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
+ spin_unlock_irqrestore(&id_priv->lock, flags);
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_id_put(id_priv);
+ return;
+ }
+ id_priv->state = RDMA_CM_DEVICE_REMOVAL;
+ spin_unlock_irqrestore(&id_priv->lock, flags);
+
+ if (cma_cm_event_handler(id_priv, &event)) {
+ /*
+ * At this point the ULP promises it won't call
+ * rdma_destroy_id() concurrently
+ */
+ cma_id_put(id_priv);
+ mutex_unlock(&id_priv->handler_mutex);
+ trace_cm_id_destroy(id_priv);
+ _destroy_id(id_priv, state);
+ return;
+ }
mutex_unlock(&id_priv->handler_mutex);
- return ret;
+
+ /*
+ * If this races with destroy then the thread that first assigns state
+ * to a destroying does the cancel.
+ */
+ cma_cancel_operation(id_priv, state);
+ cma_id_put(id_priv);
}
static void cma_process_remove(struct cma_device *cma_dev)
{
- struct rdma_id_private *id_priv;
- int ret;
-
mutex_lock(&lock);
while (!list_empty(&cma_dev->id_list)) {
- id_priv = list_entry(cma_dev->id_list.next,
- struct rdma_id_private, list);
+ struct rdma_id_private *id_priv = list_first_entry(
+ &cma_dev->id_list, struct rdma_id_private, device_item);
- list_del(&id_priv->listen_list);
- list_del_init(&id_priv->list);
- atomic_inc(&id_priv->refcount);
+ list_del_init(&id_priv->listen_item);
+ list_del_init(&id_priv->device_item);
+ cma_id_get(id_priv);
mutex_unlock(&lock);
- ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
- cma_deref_id(id_priv);
- if (ret)
- rdma_destroy_id(&id_priv->id);
+ cma_send_device_removal_put(id_priv);
mutex_lock(&lock);
}
mutex_unlock(&lock);
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
wait_for_completion(&cma_dev->comp);
}
-static void cma_remove_one(struct ib_device *device, void *client_data)
+static bool cma_supported(struct ib_device *device)
{
- struct cma_device *cma_dev = client_data;
+ u32 i;
+
+ rdma_for_each_port(device, i) {
+ if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
+ return true;
+ }
+ return false;
+}
+
+static int cma_add_one(struct ib_device *device)
+{
+ struct rdma_id_private *to_destroy;
+ struct cma_device *cma_dev;
+ struct rdma_id_private *id_priv;
+ unsigned long supported_gids = 0;
+ int ret;
+ u32 i;
+
+ if (!cma_supported(device))
+ return -EOPNOTSUPP;
+ cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
if (!cma_dev)
- return;
+ return -ENOMEM;
+
+ cma_dev->device = device;
+ cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
+ sizeof(*cma_dev->default_gid_type),
+ GFP_KERNEL);
+ if (!cma_dev->default_gid_type) {
+ ret = -ENOMEM;
+ goto free_cma_dev;
+ }
+
+ cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
+ sizeof(*cma_dev->default_roce_tos),
+ GFP_KERNEL);
+ if (!cma_dev->default_roce_tos) {
+ ret = -ENOMEM;
+ goto free_gid_type;
+ }
+
+ rdma_for_each_port (device, i) {
+ supported_gids = roce_gid_type_mask_support(device, i);
+ WARN_ON(!supported_gids);
+ if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
+ cma_dev->default_gid_type[i - rdma_start_port(device)] =
+ CMA_PREFERRED_ROCE_GID_TYPE;
+ else
+ cma_dev->default_gid_type[i - rdma_start_port(device)] =
+ find_first_bit(&supported_gids, BITS_PER_LONG);
+ cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
+ }
+
+ init_completion(&cma_dev->comp);
+ refcount_set(&cma_dev->refcount, 1);
+ INIT_LIST_HEAD(&cma_dev->id_list);
+ ib_set_client_data(device, &cma_client, cma_dev);
mutex_lock(&lock);
+ list_add_tail(&cma_dev->list, &dev_list);
+ list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
+ ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
+ if (ret)
+ goto free_listen;
+ }
+ mutex_unlock(&lock);
+
+ trace_cm_add_one(device);
+ return 0;
+
+free_listen:
list_del(&cma_dev->list);
mutex_unlock(&lock);
+ /* cma_process_remove() will delete to_destroy */
cma_process_remove(cma_dev);
+ kfree(cma_dev->default_roce_tos);
+free_gid_type:
+ kfree(cma_dev->default_gid_type);
+
+free_cma_dev:
kfree(cma_dev);
+ return ret;
}
-static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
+static void cma_remove_one(struct ib_device *device, void *client_data)
{
- struct nlmsghdr *nlh;
- struct rdma_cm_id_stats *id_stats;
- struct rdma_id_private *id_priv;
- struct rdma_cm_id *id = NULL;
- struct cma_device *cma_dev;
- int i_dev = 0, i_id = 0;
-
- /*
- * We export all of the IDs as a sequence of messages. Each
- * ID gets its own netlink message.
- */
- mutex_lock(&lock);
+ struct cma_device *cma_dev = client_data;
- list_for_each_entry(cma_dev, &dev_list, list) {
- if (i_dev < cb->args[0]) {
- i_dev++;
- continue;
- }
+ trace_cm_remove_one(device);
- i_id = 0;
- list_for_each_entry(id_priv, &cma_dev->id_list, list) {
- if (i_id < cb->args[1]) {
- i_id++;
- continue;
- }
-
- id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
- sizeof *id_stats, RDMA_NL_RDMA_CM,
- RDMA_NL_RDMA_CM_ID_STATS,
- NLM_F_MULTI);
- if (!id_stats)
- goto out;
+ mutex_lock(&lock);
+ list_del(&cma_dev->list);
+ mutex_unlock(&lock);
- memset(id_stats, 0, sizeof *id_stats);
- id = &id_priv->id;
- id_stats->node_type = id->route.addr.dev_addr.dev_type;
- id_stats->port_num = id->port_num;
- id_stats->bound_dev_if =
- id->route.addr.dev_addr.bound_dev_if;
-
- if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
- cma_src_addr(id_priv),
- RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
- goto out;
- if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
- cma_dst_addr(id_priv),
- RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
- goto out;
+ cma_process_remove(cma_dev);
+ kfree(cma_dev->default_roce_tos);
+ kfree(cma_dev->default_gid_type);
+ kfree(cma_dev);
+}
- id_stats->pid = id_priv->owner;
- id_stats->port_space = id->ps;
- id_stats->cm_state = id_priv->state;
- id_stats->qp_num = id_priv->qp_num;
- id_stats->qp_type = id->qp_type;
+static int cma_init_net(struct net *net)
+{
+ struct cma_pernet *pernet = cma_pernet(net);
- i_id++;
- }
+ xa_init(&pernet->tcp_ps);
+ xa_init(&pernet->udp_ps);
+ xa_init(&pernet->ipoib_ps);
+ xa_init(&pernet->ib_ps);
- cb->args[1] = 0;
- i_dev++;
- }
+ return 0;
+}
-out:
- mutex_unlock(&lock);
- cb->args[0] = i_dev;
- cb->args[1] = i_id;
+static void cma_exit_net(struct net *net)
+{
+ struct cma_pernet *pernet = cma_pernet(net);
- return skb->len;
+ WARN_ON(!xa_empty(&pernet->tcp_ps));
+ WARN_ON(!xa_empty(&pernet->udp_ps));
+ WARN_ON(!xa_empty(&pernet->ipoib_ps));
+ WARN_ON(!xa_empty(&pernet->ib_ps));
}
-static const struct ibnl_client_cbs cma_cb_table[] = {
- [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
- .module = THIS_MODULE },
+static struct pernet_operations cma_pernet_operations = {
+ .init = cma_init_net,
+ .exit = cma_exit_net,
+ .id = &cma_pernet_id,
+ .size = sizeof(struct cma_pernet),
};
static int __init cma_init(void)
{
int ret;
- cma_wq = create_singlethread_workqueue("rdma_cm");
+ /*
+ * There is a rare lock ordering dependency in cma_netdev_callback()
+ * that only happens when bonding is enabled. Teach lockdep that rtnl
+ * must never be nested under lock so it can find these without having
+ * to test with bonding.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ rtnl_lock();
+ mutex_lock(&lock);
+ mutex_unlock(&lock);
+ rtnl_unlock();
+ }
+
+ cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
if (!cma_wq)
return -ENOMEM;
+ ret = register_pernet_subsys(&cma_pernet_operations);
+ if (ret)
+ goto err_wq;
+
ib_sa_register_client(&sa_client);
- rdma_addr_register_client(&addr_client);
register_netdevice_notifier(&cma_nb);
+ register_netevent_notifier(&cma_netevent_cb);
ret = ib_register_client(&cma_client);
if (ret)
goto err;
- if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
- printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
+ ret = cma_configfs_init();
+ if (ret)
+ goto err_ib;
return 0;
+err_ib:
+ ib_unregister_client(&cma_client);
err:
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
- rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
+ unregister_pernet_subsys(&cma_pernet_operations);
+err_wq:
destroy_workqueue(cma_wq);
return ret;
}
static void __exit cma_cleanup(void)
{
- ibnl_remove_client(RDMA_NL_RDMA_CM);
+ cma_configfs_exit();
ib_unregister_client(&cma_client);
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
- rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
+ unregister_pernet_subsys(&cma_pernet_operations);
destroy_workqueue(cma_wq);
- idr_destroy(&tcp_ps);
- idr_destroy(&udp_ps);
- idr_destroy(&ipoib_ps);
- idr_destroy(&ib_ps);
}
module_init(cma_init);
module_exit(cma_cleanup);
+
+static void cma_query_ib_service_handler(int status,
+ struct sa_service_rec *recs,
+ unsigned int num_recs, void *context)
+{
+ struct cma_work *work = context;
+ struct rdma_id_private *id_priv = work->id;
+ struct sockaddr_ib *addr;
+
+ if (status)
+ goto fail;
+
+ if (!num_recs) {
+ status = -ENOENT;
+ goto fail;
+ }
+
+ if (id_priv->id.route.service_recs) {
+ status = -EALREADY;
+ goto fail;
+ }
+
+ id_priv->id.route.service_recs =
+ kmalloc_array(num_recs, sizeof(*recs), GFP_KERNEL);
+ if (!id_priv->id.route.service_recs) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
+ id_priv->id.route.num_service_recs = num_recs;
+ memcpy(id_priv->id.route.service_recs, recs, sizeof(*recs) * num_recs);
+
+ addr = (struct sockaddr_ib *)&id_priv->id.route.addr.dst_addr;
+ addr->sib_family = AF_IB;
+ addr->sib_addr = *(struct ib_addr *)&recs->gid;
+ addr->sib_pkey = recs->pkey;
+ addr->sib_sid = recs->id;
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr,
+ (union ib_gid *)&addr->sib_addr);
+ ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr,
+ ntohs(addr->sib_pkey));
+
+ queue_work(cma_wq, &work->work);
+ return;
+
+fail:
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDR_BOUND;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_ERROR;
+ work->event.status = status;
+ pr_debug_ratelimited(
+ "RDMA CM: SERVICE_ERROR: failed to query service record. status %d\n",
+ status);
+ queue_work(cma_wq, &work->work);
+}
+
+static int cma_resolve_ib_service(struct rdma_id_private *id_priv,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct sa_service_rec sr = {};
+ ib_sa_comp_mask mask = 0;
+ struct cma_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ cma_id_get(id_priv);
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDRINFO_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_RESOLVED;
+
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_ID) {
+ sr.id = cpu_to_be64(ibs->service_id);
+ mask |= IB_SA_SERVICE_REC_SERVICE_ID;
+ }
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_NAME) {
+ strscpy(sr.name, ibs->service_name, sizeof(sr.name));
+ mask |= IB_SA_SERVICE_REC_SERVICE_NAME;
+ }
+
+ id_priv->query_id = ib_sa_service_rec_get(&sa_client,
+ id_priv->id.device,
+ id_priv->id.port_num,
+ &sr, mask,
+ 2000, GFP_KERNEL,
+ cma_query_ib_service_handler,
+ work, &id_priv->query);
+
+ if (id_priv->query_id < 0) {
+ cma_id_put(id_priv);
+ kfree(work);
+ return id_priv->query_id;
+ }
+
+ return 0;
+}
+
+int rdma_resolve_ib_service(struct rdma_cm_id *id,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct rdma_id_private *id_priv;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (!id_priv->cma_dev ||
+ !cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDRINFO_QUERY))
+ return -EINVAL;
+
+ if (rdma_cap_ib_sa(id->device, id->port_num))
+ ret = cma_resolve_ib_service(id_priv, ibs);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_resolve_ib_service);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
new file mode 100644
index 000000000000..f2fb2d8a6597
--- /dev/null
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/configfs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+
+#include "core_priv.h"
+#include "cma_priv.h"
+
+struct cma_device;
+
+struct cma_dev_group;
+
+struct cma_dev_port_group {
+ u32 port_num;
+ struct cma_dev_group *cma_dev_group;
+ struct config_group group;
+};
+
+struct cma_dev_group {
+ char name[IB_DEVICE_NAME_MAX];
+ struct config_group device_group;
+ struct config_group ports_group;
+ struct cma_dev_port_group *ports;
+};
+
+static struct cma_dev_port_group *to_dev_port_group(struct config_item *item)
+{
+ struct config_group *group;
+
+ if (!item)
+ return NULL;
+
+ group = container_of(item, struct config_group, cg_item);
+ return container_of(group, struct cma_dev_port_group, group);
+}
+
+static bool filter_by_name(struct ib_device *ib_dev, void *cookie)
+{
+ return !strcmp(dev_name(&ib_dev->dev), cookie);
+}
+
+static int cma_configfs_params_get(struct config_item *item,
+ struct cma_device **pcma_dev,
+ struct cma_dev_port_group **pgroup)
+{
+ struct cma_dev_port_group *group = to_dev_port_group(item);
+ struct cma_device *cma_dev;
+
+ if (!group)
+ return -ENODEV;
+
+ cma_dev = cma_enum_devices_by_ibdev(filter_by_name,
+ group->cma_dev_group->name);
+ if (!cma_dev)
+ return -ENODEV;
+
+ *pcma_dev = cma_dev;
+ *pgroup = group;
+
+ return 0;
+}
+
+static void cma_configfs_params_put(struct cma_device *cma_dev)
+{
+ cma_dev_put(cma_dev);
+}
+
+static ssize_t default_roce_mode_show(struct config_item *item,
+ char *buf)
+{
+ struct cma_device *cma_dev;
+ struct cma_dev_port_group *group;
+ int gid_type;
+ ssize_t ret;
+
+ ret = cma_configfs_params_get(item, &cma_dev, &group);
+ if (ret)
+ return ret;
+
+ gid_type = cma_get_default_gid_type(cma_dev, group->port_num);
+ cma_configfs_params_put(cma_dev);
+
+ if (gid_type < 0)
+ return gid_type;
+
+ return sysfs_emit(buf, "%s\n", ib_cache_gid_type_str(gid_type));
+}
+
+static ssize_t default_roce_mode_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct cma_device *cma_dev;
+ struct cma_dev_port_group *group;
+ int gid_type;
+ ssize_t ret;
+
+ ret = cma_configfs_params_get(item, &cma_dev, &group);
+ if (ret)
+ return ret;
+
+ gid_type = ib_cache_gid_parse_type_str(buf);
+ if (gid_type < 0) {
+ cma_configfs_params_put(cma_dev);
+ return -EINVAL;
+ }
+
+ ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
+
+ cma_configfs_params_put(cma_dev);
+
+ return !ret ? strnlen(buf, count) : ret;
+}
+
+CONFIGFS_ATTR(, default_roce_mode);
+
+static ssize_t default_roce_tos_show(struct config_item *item, char *buf)
+{
+ struct cma_device *cma_dev;
+ struct cma_dev_port_group *group;
+ ssize_t ret;
+ u8 tos;
+
+ ret = cma_configfs_params_get(item, &cma_dev, &group);
+ if (ret)
+ return ret;
+
+ tos = cma_get_default_roce_tos(cma_dev, group->port_num);
+ cma_configfs_params_put(cma_dev);
+
+ return sysfs_emit(buf, "%u\n", tos);
+}
+
+static ssize_t default_roce_tos_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct cma_device *cma_dev;
+ struct cma_dev_port_group *group;
+ ssize_t ret;
+ u8 tos;
+
+ ret = kstrtou8(buf, 0, &tos);
+ if (ret)
+ return ret;
+
+ ret = cma_configfs_params_get(item, &cma_dev, &group);
+ if (ret)
+ return ret;
+
+ ret = cma_set_default_roce_tos(cma_dev, group->port_num, tos);
+ cma_configfs_params_put(cma_dev);
+
+ return ret ? ret : strnlen(buf, count);
+}
+
+CONFIGFS_ATTR(, default_roce_tos);
+
+static struct configfs_attribute *cma_configfs_attributes[] = {
+ &attr_default_roce_mode,
+ &attr_default_roce_tos,
+ NULL,
+};
+
+static const struct config_item_type cma_port_group_type = {
+ .ct_attrs = cma_configfs_attributes,
+ .ct_owner = THIS_MODULE
+};
+
+static int make_cma_ports(struct cma_dev_group *cma_dev_group,
+ struct cma_device *cma_dev)
+{
+ struct cma_dev_port_group *ports;
+ struct ib_device *ibdev;
+ u32 ports_num;
+ u32 i;
+
+ ibdev = cma_get_ib_dev(cma_dev);
+
+ if (!ibdev)
+ return -ENODEV;
+
+ ports_num = ibdev->phys_port_cnt;
+ ports = kcalloc(ports_num, sizeof(*cma_dev_group->ports),
+ GFP_KERNEL);
+
+ if (!ports)
+ return -ENOMEM;
+
+ for (i = 0; i < ports_num; i++) {
+ char port_str[11];
+
+ ports[i].port_num = i + 1;
+ snprintf(port_str, sizeof(port_str), "%u", i + 1);
+ ports[i].cma_dev_group = cma_dev_group;
+ config_group_init_type_name(&ports[i].group,
+ port_str,
+ &cma_port_group_type);
+ configfs_add_default_group(&ports[i].group,
+ &cma_dev_group->ports_group);
+
+ }
+ cma_dev_group->ports = ports;
+ return 0;
+}
+
+static void release_cma_dev(struct config_item *item)
+{
+ struct config_group *group = container_of(item, struct config_group,
+ cg_item);
+ struct cma_dev_group *cma_dev_group = container_of(group,
+ struct cma_dev_group,
+ device_group);
+
+ kfree(cma_dev_group);
+};
+
+static void release_cma_ports_group(struct config_item *item)
+{
+ struct config_group *group = container_of(item, struct config_group,
+ cg_item);
+ struct cma_dev_group *cma_dev_group = container_of(group,
+ struct cma_dev_group,
+ ports_group);
+
+ kfree(cma_dev_group->ports);
+ cma_dev_group->ports = NULL;
+};
+
+static struct configfs_item_operations cma_ports_item_ops = {
+ .release = release_cma_ports_group
+};
+
+static const struct config_item_type cma_ports_group_type = {
+ .ct_item_ops = &cma_ports_item_ops,
+ .ct_owner = THIS_MODULE
+};
+
+static struct configfs_item_operations cma_device_item_ops = {
+ .release = release_cma_dev
+};
+
+static const struct config_item_type cma_device_group_type = {
+ .ct_item_ops = &cma_device_item_ops,
+ .ct_owner = THIS_MODULE
+};
+
+static struct config_group *make_cma_dev(struct config_group *group,
+ const char *name)
+{
+ int err = -ENODEV;
+ struct cma_device *cma_dev = cma_enum_devices_by_ibdev(filter_by_name,
+ (void *)name);
+ struct cma_dev_group *cma_dev_group = NULL;
+
+ if (!cma_dev)
+ goto fail;
+
+ cma_dev_group = kzalloc(sizeof(*cma_dev_group), GFP_KERNEL);
+
+ if (!cma_dev_group) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ strscpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
+
+ config_group_init_type_name(&cma_dev_group->ports_group, "ports",
+ &cma_ports_group_type);
+
+ err = make_cma_ports(cma_dev_group, cma_dev);
+ if (err)
+ goto fail;
+
+ config_group_init_type_name(&cma_dev_group->device_group, name,
+ &cma_device_group_type);
+ configfs_add_default_group(&cma_dev_group->ports_group,
+ &cma_dev_group->device_group);
+
+ cma_dev_put(cma_dev);
+ return &cma_dev_group->device_group;
+
+fail:
+ if (cma_dev)
+ cma_dev_put(cma_dev);
+ kfree(cma_dev_group);
+ return ERR_PTR(err);
+}
+
+static void drop_cma_dev(struct config_group *cgroup, struct config_item *item)
+{
+ struct config_group *group =
+ container_of(item, struct config_group, cg_item);
+ struct cma_dev_group *cma_dev_group =
+ container_of(group, struct cma_dev_group, device_group);
+
+ configfs_remove_default_groups(&cma_dev_group->ports_group);
+ configfs_remove_default_groups(&cma_dev_group->device_group);
+ config_item_put(item);
+}
+
+static struct configfs_group_operations cma_subsys_group_ops = {
+ .make_group = make_cma_dev,
+ .drop_item = drop_cma_dev,
+};
+
+static const struct config_item_type cma_subsys_type = {
+ .ct_group_ops = &cma_subsys_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem cma_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "rdma_cm",
+ .ci_type = &cma_subsys_type,
+ },
+ },
+};
+
+int __init cma_configfs_init(void)
+{
+ int ret;
+
+ config_group_init(&cma_subsys.su_group);
+ mutex_init(&cma_subsys.su_mutex);
+ ret = configfs_register_subsystem(&cma_subsys);
+ if (ret)
+ mutex_destroy(&cma_subsys.su_mutex);
+ return ret;
+}
+
+void __exit cma_configfs_exit(void)
+{
+ configfs_unregister_subsystem(&cma_subsys);
+ mutex_destroy(&cma_subsys.su_mutex);
+}
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
new file mode 100644
index 000000000000..c604b601f4d9
--- /dev/null
+++ b/drivers/infiniband/core/cma_priv.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2005 Voltaire Inc. All rights reserved.
+ * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
+ * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
+ * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CMA_PRIV_H
+#define _CMA_PRIV_H
+
+enum rdma_cm_state {
+ RDMA_CM_IDLE,
+ RDMA_CM_ADDR_QUERY,
+ RDMA_CM_ADDR_RESOLVED,
+ RDMA_CM_ROUTE_QUERY,
+ RDMA_CM_ROUTE_RESOLVED,
+ RDMA_CM_CONNECT,
+ RDMA_CM_DISCONNECT,
+ RDMA_CM_ADDR_BOUND,
+ RDMA_CM_LISTEN,
+ RDMA_CM_DEVICE_REMOVAL,
+ RDMA_CM_DESTROYING,
+ RDMA_CM_ADDRINFO_QUERY,
+ RDMA_CM_ADDRINFO_RESOLVED
+};
+
+struct rdma_id_private {
+ struct rdma_cm_id id;
+
+ struct rdma_bind_list *bind_list;
+ struct hlist_node node;
+ union {
+ struct list_head device_item; /* On cma_device->id_list */
+ struct list_head listen_any_item; /* On listen_any_list */
+ };
+ union {
+ /* On rdma_id_private->listen_list */
+ struct list_head listen_item;
+ struct list_head listen_list;
+ };
+ struct list_head id_list_entry;
+ struct cma_device *cma_dev;
+ struct list_head mc_list;
+
+ int internal_id;
+ enum rdma_cm_state state;
+ spinlock_t lock;
+ struct mutex qp_mutex;
+
+ struct completion comp;
+ refcount_t refcount;
+ struct mutex handler_mutex;
+
+ int backlog;
+ int timeout_ms;
+ struct ib_sa_query *query;
+ int query_id;
+ union {
+ struct ib_cm_id *ib;
+ struct iw_cm_id *iw;
+ } cm_id;
+
+ u32 seq_num;
+ u32 qkey;
+ u32 qp_num;
+ u32 options;
+ u8 srq;
+ u8 tos;
+ u8 tos_set:1;
+ u8 timeout_set:1;
+ u8 min_rnr_timer_set:1;
+ u8 reuseaddr;
+ u8 afonly;
+ u8 timeout;
+ u8 min_rnr_timer;
+ u8 used_resolve_ip;
+ enum ib_gid_type gid_type;
+
+ /*
+ * Internal to RDMA/core, don't use in the drivers
+ */
+ struct rdma_restrack_entry res;
+ struct rdma_ucm_ece ece;
+};
+
+#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
+int cma_configfs_init(void);
+void cma_configfs_exit(void);
+#else
+static inline int cma_configfs_init(void)
+{
+ return 0;
+}
+
+static inline void cma_configfs_exit(void)
+{
+}
+#endif
+
+void cma_dev_get(struct cma_device *dev);
+void cma_dev_put(struct cma_device *dev);
+typedef bool (*cma_device_filter)(struct ib_device *, void *);
+struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
+ void *cookie);
+int cma_get_default_gid_type(struct cma_device *dev, u32 port);
+int cma_set_default_gid_type(struct cma_device *dev, u32 port,
+ enum ib_gid_type default_gid_type);
+int cma_get_default_roce_tos(struct cma_device *dev, u32 port);
+int cma_set_default_roce_tos(struct cma_device *dev, u32 port,
+ u8 default_roce_tos);
+struct ib_device *cma_get_ib_dev(struct cma_device *dev);
+
+#endif /* _CMA_PRIV_H */
diff --git a/drivers/infiniband/core/cma_trace.c b/drivers/infiniband/core/cma_trace.c
new file mode 100644
index 000000000000..b314a281e10e
--- /dev/null
+++ b/drivers/infiniband/core/cma_trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for the RDMA Connection Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define CREATE_TRACE_POINTS
+
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include "cma_priv.h"
+
+#include "cma_trace.h"
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
new file mode 100644
index 000000000000..3456d5f3aa47
--- /dev/null
+++ b/drivers/infiniband/core/cma_trace.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for the RDMA Connect Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdma_cma
+
+#if !defined(_TRACE_RDMA_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _TRACE_RDMA_CMA_H
+
+#include <linux/tracepoint.h>
+#include <trace/misc/rdma.h>
+
+
+DECLARE_EVENT_CLASS(cma_fsm_class,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv
+ ),
+
+ TP_ARGS(id_priv),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos
+ )
+);
+
+#define DEFINE_CMA_FSM_EVENT(name) \
+ DEFINE_EVENT(cma_fsm_class, cm_##name, \
+ TP_PROTO( \
+ const struct rdma_id_private *id_priv \
+ ), \
+ TP_ARGS(id_priv))
+
+DEFINE_CMA_FSM_EVENT(send_rtu);
+DEFINE_CMA_FSM_EVENT(send_rej);
+DEFINE_CMA_FSM_EVENT(prepare_mra);
+DEFINE_CMA_FSM_EVENT(send_sidr_req);
+DEFINE_CMA_FSM_EVENT(send_sidr_rep);
+DEFINE_CMA_FSM_EVENT(disconnect);
+DEFINE_CMA_FSM_EVENT(sent_drep);
+DEFINE_CMA_FSM_EVENT(sent_dreq);
+DEFINE_CMA_FSM_EVENT(id_destroy);
+
+TRACE_EVENT(cm_id_attach,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ const struct ib_device *device
+ ),
+
+ TP_ARGS(id_priv, device),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ __string(devname, device->name)
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ __assign_str(devname);
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc device=%s",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr,
+ __get_str(devname)
+ )
+);
+
+DECLARE_EVENT_CLASS(cma_qp_class,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv
+ ),
+
+ TP_ARGS(id_priv),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(u32, qp_num)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->qp_num = id_priv->qp_num;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u qp_num=%u",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ __entry->qp_num
+ )
+);
+
+#define DEFINE_CMA_QP_EVENT(name) \
+ DEFINE_EVENT(cma_qp_class, cm_##name, \
+ TP_PROTO( \
+ const struct rdma_id_private *id_priv \
+ ), \
+ TP_ARGS(id_priv))
+
+DEFINE_CMA_QP_EVENT(send_req);
+DEFINE_CMA_QP_EVENT(send_rep);
+DEFINE_CMA_QP_EVENT(qp_destroy);
+
+/*
+ * enum ib_wp_type, from include/rdma/ib_verbs.h
+ */
+#define IB_QP_TYPE_LIST \
+ ib_qp_type(SMI) \
+ ib_qp_type(GSI) \
+ ib_qp_type(RC) \
+ ib_qp_type(UC) \
+ ib_qp_type(UD) \
+ ib_qp_type(RAW_IPV6) \
+ ib_qp_type(RAW_ETHERTYPE) \
+ ib_qp_type(RAW_PACKET) \
+ ib_qp_type(XRC_INI) \
+ ib_qp_type_end(XRC_TGT)
+
+#undef ib_qp_type
+#undef ib_qp_type_end
+
+#define ib_qp_type(x) TRACE_DEFINE_ENUM(IB_QPT_##x);
+#define ib_qp_type_end(x) TRACE_DEFINE_ENUM(IB_QPT_##x);
+
+IB_QP_TYPE_LIST
+
+#undef ib_qp_type
+#undef ib_qp_type_end
+
+#define ib_qp_type(x) { IB_QPT_##x, #x },
+#define ib_qp_type_end(x) { IB_QPT_##x, #x }
+
+#define rdma_show_qp_type(x) \
+ __print_symbolic(x, IB_QP_TYPE_LIST)
+
+
+TRACE_EVENT(cm_qp_create,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ const struct ib_pd *pd,
+ const struct ib_qp_init_attr *qp_init_attr,
+ int rc
+ ),
+
+ TP_ARGS(id_priv, pd, qp_init_attr, rc),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, pd_id)
+ __field(u32, tos)
+ __field(u32, qp_num)
+ __field(u32, send_wr)
+ __field(u32, recv_wr)
+ __field(int, rc)
+ __field(unsigned long, qp_type)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->pd_id = pd->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->send_wr = qp_init_attr->cap.max_send_wr;
+ __entry->recv_wr = qp_init_attr->cap.max_recv_wr;
+ __entry->rc = rc;
+ if (!rc) {
+ __entry->qp_num = id_priv->qp_num;
+ __entry->qp_type = id_priv->id.qp_type;
+ } else {
+ __entry->qp_num = 0;
+ __entry->qp_type = 0;
+ }
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u pd.id=%u qp_type=%s"
+ " send_wr=%u recv_wr=%u qp_num=%u rc=%d",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr,
+ __entry->tos, __entry->pd_id,
+ rdma_show_qp_type(__entry->qp_type), __entry->send_wr,
+ __entry->recv_wr, __entry->qp_num, __entry->rc
+ )
+);
+
+TRACE_EVENT(cm_req_handler,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ int event
+ ),
+
+ TP_ARGS(id_priv, event),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(unsigned long, event)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->event = event;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu)",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ rdma_show_ib_cm_event(__entry->event), __entry->event
+ )
+);
+
+TRACE_EVENT(cm_event_handler,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ const struct rdma_cm_event *event
+ ),
+
+ TP_ARGS(id_priv, event),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(unsigned long, event)
+ __field(int, status)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->event = event->event;
+ __entry->status = event->status;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu/%d)",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ rdma_show_cm_event(__entry->event), __entry->event,
+ __entry->status
+ )
+);
+
+TRACE_EVENT(cm_event_done,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ const struct rdma_cm_event *event,
+ int result
+ ),
+
+ TP_ARGS(id_priv, event, result),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(unsigned long, event)
+ __field(int, result)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->event = event->event;
+ __entry->result = result;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s consumer returns %d",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ rdma_show_cm_event(__entry->event), __entry->result
+ )
+);
+
+DECLARE_EVENT_CLASS(cma_client_class,
+ TP_PROTO(
+ const struct ib_device *device
+ ),
+
+ TP_ARGS(device),
+
+ TP_STRUCT__entry(
+ __string(name, device->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ ),
+
+ TP_printk("device name=%s",
+ __get_str(name)
+ )
+);
+
+#define DEFINE_CMA_CLIENT_EVENT(name) \
+ DEFINE_EVENT(cma_client_class, cm_##name, \
+ TP_PROTO( \
+ const struct ib_device *device \
+ ), \
+ TP_ARGS(device))
+
+DEFINE_CMA_CLIENT_EVENT(add_one);
+DEFINE_CMA_CLIENT_EVENT(remove_one);
+
+#endif /* _TRACE_RDMA_CMA_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE cma_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 70bb36ebb03b..05102769a918 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -35,25 +35,60 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/cgroup_rdma.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <rdma/ib_verbs.h>
+#include <rdma/opa_addr.h>
+#include <rdma/ib_mad.h>
+#include <rdma/restrack.h>
+#include "mad_priv.h"
+#include "restrack.h"
-int ib_device_register_sysfs(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *));
-void ib_device_unregister_sysfs(struct ib_device *device);
+/* Total number of ports combined across all struct ib_devices's */
+#define RDMA_MAX_PORTS 8192
-void ib_cache_setup(void);
-void ib_cache_cleanup(void);
+struct pkey_index_qp_list {
+ struct list_head pkey_index_list;
+ u16 pkey_index;
+ /* Lock to hold while iterating the qp_list. */
+ spinlock_t qp_list_lock;
+ struct list_head qp_list;
+};
+
+/**
+ * struct rdma_dev_net - rdma net namespace metadata for a net
+ * @nl_sock: Pointer to netlink socket
+ * @net: Pointer to owner net namespace
+ * @id: xarray id to identify the net namespace.
+ */
+struct rdma_dev_net {
+ struct sock *nl_sock;
+ possible_net_t net;
+ u32 id;
+};
-int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr, int *qp_attr_mask);
+extern const struct attribute_group ib_dev_attr_group;
+extern bool ib_devices_shared_netns;
+extern unsigned int rdma_dev_net_id;
-typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
+static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net)
+{
+ return net_generic(net, rdma_dev_net_id);
+}
+
+int ib_device_rename(struct ib_device *ibdev, const char *name);
+int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim);
+
+typedef void (*roce_netdev_callback)(struct ib_device *device, u32 port,
struct net_device *idev, void *cookie);
-typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
- struct net_device *idev, void *cookie);
+typedef bool (*roce_netdev_filter)(struct ib_device *device, u32 port,
+ struct net_device *idev, void *cookie);
+
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ u32 port);
void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_filter filter,
@@ -65,36 +100,275 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
roce_netdev_callback cb,
void *cookie);
-int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
- const union ib_gid *gid,
- u8 port, struct net_device *ndev,
- u16 *index);
+typedef int (*nldev_callback)(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx);
+
+int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+struct ib_client_nl_info {
+ struct sk_buff *nl_msg;
+ struct device *cdev;
+ u32 port;
+ u64 abi;
+};
+int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
+ struct ib_client_nl_info *res);
enum ib_cache_gid_default_mode {
IB_CACHE_GID_DEFAULT_MODE_SET,
IB_CACHE_GID_DEFAULT_MODE_DELETE
};
-void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_parse_type_str(const char *buf);
+
+const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
+
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
struct net_device *ndev,
+ unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode);
-int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr);
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr);
-int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *ndev);
int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);
-int roce_rescan_device(struct ib_device *ib_dev);
+unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
int ib_cache_setup_one(struct ib_device *device);
void ib_cache_cleanup_one(struct ib_device *device);
void ib_cache_release_one(struct ib_device *device);
+void ib_dispatch_event_clients(struct ib_event *event);
+
+#ifdef CONFIG_CGROUP_RDMA
+void ib_device_register_rdmacg(struct ib_device *device);
+void ib_device_unregister_rdmacg(struct ib_device *device);
+
+int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
+ struct ib_device *device,
+ enum rdmacg_resource_type resource_index);
+
+void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
+ struct ib_device *device,
+ enum rdmacg_resource_type resource_index);
+#else
+static inline void ib_device_register_rdmacg(struct ib_device *device)
+{
+}
+
+static inline void ib_device_unregister_rdmacg(struct ib_device *device)
+{
+}
+
+static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
+ struct ib_device *device,
+ enum rdmacg_resource_type resource_index)
+{
+ return 0;
+}
+
+static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
+ struct ib_device *device,
+ enum rdmacg_resource_type resource_index)
+{
+}
+#endif
+
+static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
+ struct net_device *upper)
+{
+ return netdev_has_upper_dev_all_rcu(dev, upper);
+}
+
+int addr_init(void);
+void addr_cleanup(void);
+
+int ib_mad_init(void);
+void ib_mad_cleanup(void);
+
+int ib_sa_init(void);
+void ib_sa_cleanup(void);
+
+void rdma_nl_init(void);
+void rdma_nl_exit(void);
+
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
+
+void ib_get_cached_subnet_prefix(struct ib_device *device,
+ u32 port_num,
+ u64 *sn_pfx);
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+void ib_security_release_port_pkey_list(struct ib_device *device);
+
+void ib_security_cache_change(struct ib_device *device,
+ u32 port_num,
+ u64 subnet_prefix);
+
+int ib_security_modify_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_udata *udata);
+
+int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev);
+void ib_destroy_qp_security_begin(struct ib_qp_security *sec);
+void ib_destroy_qp_security_abort(struct ib_qp_security *sec);
+void ib_destroy_qp_security_end(struct ib_qp_security *sec);
+int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev);
+void ib_close_shared_qp_security(struct ib_qp_security *sec);
+int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
+ enum ib_qp_type qp_type);
+void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
+int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
+void ib_mad_agent_security_change(void);
+#else
+static inline void ib_security_release_port_pkey_list(struct ib_device *device)
+{
+}
+
+static inline void ib_security_cache_change(struct ib_device *device,
+ u32 port_num,
+ u64 subnet_prefix)
+{
+}
+
+static inline int ib_security_modify_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_udata *udata)
+{
+ return qp->device->ops.modify_qp(qp->real_qp,
+ qp_attr,
+ qp_attr_mask,
+ udata);
+}
+
+static inline int ib_create_qp_security(struct ib_qp *qp,
+ struct ib_device *dev)
+{
+ return 0;
+}
+
+static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
+{
+}
+
+static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
+{
+}
+
+static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec)
+{
+}
+
+static inline int ib_open_shared_qp_security(struct ib_qp *qp,
+ struct ib_device *dev)
+{
+ return 0;
+}
+
+static inline void ib_close_shared_qp_security(struct ib_qp_security *sec)
+{
+}
+
+static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
+ enum ib_qp_type qp_type)
+{
+ return 0;
+}
+
+static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
+{
+}
+
+static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
+ u16 pkey_index)
+{
+ return 0;
+}
+
+static inline void ib_mad_agent_security_change(void)
+{
+}
+#endif
+
+struct ib_device *ib_device_get_by_index(const struct net *net, u32 index);
+
+/* RDMA device netlink */
+void nldev_init(void);
+void nldev_exit(void);
+
+struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller);
+
+void ib_qp_usecnt_inc(struct ib_qp *qp);
+void ib_qp_usecnt_dec(struct ib_qp *qp);
+
+struct rdma_dev_addr;
+
+int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
+ const union ib_gid *dgid,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
+ int *hoplimit);
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev);
+
+struct sa_path_rec;
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr);
+
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
+
+void ib_free_port_attrs(struct ib_core_device *coredev);
+int ib_setup_port_attrs(struct ib_core_device *coredev);
+struct rdma_hw_stats *ib_get_hw_stats_port(struct ib_device *ibdev, u32 port_num);
+void ib_device_release_hw_stats(struct hw_stats_device_data *data);
+int ib_setup_device_attrs(struct ib_device *ibdev);
+
+int rdma_compatdev_set(u8 enable);
+
+int ib_port_register_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups);
+void ib_port_unregister_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups);
+
+int ib_device_set_netns_put(struct sk_buff *skb,
+ struct ib_device *dev, u32 ns_fd);
+
+int rdma_nl_net_init(struct rdma_dev_net *rnet);
+void rdma_nl_net_exit(struct rdma_dev_net *rnet);
+
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+ struct rdma_user_mmap_entry *entry;
+};
+
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry);
+
+void ib_cq_pool_cleanup(struct ib_device *dev);
+bool rdma_nl_get_privileged_qkey(void);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
new file mode 100644
index 000000000000..c3aa6d7fc66b
--- /dev/null
+++ b/drivers/infiniband/core/counters.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019 Mellanox Technologies. All rights reserved.
+ */
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_counter.h>
+
+#include "core_priv.h"
+#include "restrack.h"
+
+#define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID)
+
+static int __counter_set_mode(struct rdma_port_counter *port_counter,
+ enum rdma_nl_counter_mode new_mode,
+ enum rdma_nl_counter_mask new_mask,
+ bool bind_opcnt)
+{
+ if (new_mode == RDMA_COUNTER_MODE_AUTO) {
+ if (new_mask & (~ALL_AUTO_MODE_MASKS))
+ return -EINVAL;
+ if (port_counter->num_counters)
+ return -EBUSY;
+ }
+
+ port_counter->mode.mode = new_mode;
+ port_counter->mode.mask = new_mask;
+ port_counter->mode.bind_opcnt = bind_opcnt;
+ return 0;
+}
+
+/*
+ * rdma_counter_set_auto_mode() - Turn on/off per-port auto mode
+ *
+ * @dev: Device to operate
+ * @port: Port to use
+ * @mask: Mask to configure
+ * @extack: Message to the user
+ *
+ * Return 0 on success. If counter mode wasn't changed then it is considered
+ * as success as well.
+ * Return -EBUSY when changing to auto mode while there are bounded counters.
+ *
+ */
+int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
+ enum rdma_nl_counter_mask mask,
+ bool bind_opcnt,
+ struct netlink_ext_ack *extack)
+{
+ struct rdma_port_counter *port_counter;
+ enum rdma_nl_counter_mode mode;
+ int ret;
+
+ port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&port_counter->lock);
+ if (mask)
+ mode = RDMA_COUNTER_MODE_AUTO;
+ else
+ mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
+ RDMA_COUNTER_MODE_NONE;
+
+ if (port_counter->mode.mode == mode &&
+ port_counter->mode.mask == mask &&
+ port_counter->mode.bind_opcnt == bind_opcnt) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __counter_set_mode(port_counter, mode, mask, bind_opcnt);
+
+out:
+ mutex_unlock(&port_counter->lock);
+ if (ret == -EBUSY)
+ NL_SET_ERR_MSG(
+ extack,
+ "Modifying auto mode is not allowed when there is a bound QP");
+ return ret;
+}
+
+static void auto_mode_init_counter(struct rdma_counter *counter,
+ const struct ib_qp *qp,
+ enum rdma_nl_counter_mask new_mask)
+{
+ struct auto_mode_param *param = &counter->mode.param;
+
+ counter->mode.mode = RDMA_COUNTER_MODE_AUTO;
+ counter->mode.mask = new_mask;
+
+ if (new_mask & RDMA_COUNTER_MASK_QP_TYPE)
+ param->qp_type = qp->qp_type;
+}
+
+static int __rdma_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *qp, u32 port)
+{
+ int ret;
+
+ if (qp->counter)
+ return -EINVAL;
+
+ if (!qp->device->ops.counter_bind_qp)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&counter->lock);
+ ret = qp->device->ops.counter_bind_qp(counter, qp, port);
+ mutex_unlock(&counter->lock);
+
+ return ret;
+}
+
+int rdma_counter_modify(struct ib_device *dev, u32 port,
+ unsigned int index, bool enable)
+{
+ struct rdma_hw_stats *stats;
+ int ret = 0;
+
+ if (!dev->ops.modify_hw_stat)
+ return -EOPNOTSUPP;
+
+ stats = ib_get_hw_stats_port(dev, port);
+ if (!stats || index >= stats->num_counters ||
+ !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL))
+ return -EINVAL;
+
+ mutex_lock(&stats->lock);
+
+ if (enable != test_bit(index, stats->is_disabled))
+ goto out;
+
+ ret = dev->ops.modify_hw_stat(dev, port, index, enable);
+ if (ret)
+ goto out;
+
+ if (enable)
+ clear_bit(index, stats->is_disabled);
+ else
+ set_bit(index, stats->is_disabled);
+out:
+ mutex_unlock(&stats->lock);
+ return ret;
+}
+
+static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
+ struct ib_qp *qp,
+ enum rdma_nl_counter_mode mode,
+ bool bind_opcnt)
+{
+ struct rdma_port_counter *port_counter;
+ struct rdma_counter *counter;
+ int ret;
+
+ if (!dev->ops.counter_dealloc || !dev->ops.counter_alloc_stats)
+ return NULL;
+
+ counter = rdma_zalloc_drv_obj(dev, rdma_counter);
+ if (!counter)
+ return NULL;
+
+ counter->device = dev;
+ counter->port = port;
+
+ dev->ops.counter_init(counter);
+
+ rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER);
+ counter->stats = dev->ops.counter_alloc_stats(counter);
+ if (!counter->stats)
+ goto err_stats;
+
+ port_counter = &dev->port_data[port].port_counter;
+ mutex_lock(&port_counter->lock);
+ switch (mode) {
+ case RDMA_COUNTER_MODE_MANUAL:
+ ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL,
+ 0, bind_opcnt);
+ if (ret) {
+ mutex_unlock(&port_counter->lock);
+ goto err_mode;
+ }
+ break;
+ case RDMA_COUNTER_MODE_AUTO:
+ auto_mode_init_counter(counter, qp, port_counter->mode.mask);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ mutex_unlock(&port_counter->lock);
+ goto err_mode;
+ }
+
+ port_counter->num_counters++;
+ mutex_unlock(&port_counter->lock);
+
+ counter->mode.mode = mode;
+ counter->mode.bind_opcnt = bind_opcnt;
+ kref_init(&counter->kref);
+ mutex_init(&counter->lock);
+
+ ret = __rdma_counter_bind_qp(counter, qp, port);
+ if (ret)
+ goto err_mode;
+
+ rdma_restrack_parent_name(&counter->res, &qp->res);
+ rdma_restrack_add(&counter->res);
+ return counter;
+
+err_mode:
+ rdma_free_hw_stats_struct(counter->stats);
+err_stats:
+ rdma_restrack_put(&counter->res);
+ kfree(counter);
+ return NULL;
+}
+
+static void rdma_counter_free(struct rdma_counter *counter)
+{
+ struct rdma_port_counter *port_counter;
+
+ port_counter = &counter->device->port_data[counter->port].port_counter;
+ mutex_lock(&port_counter->lock);
+ port_counter->num_counters--;
+ if (!port_counter->num_counters &&
+ (port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL))
+ __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0,
+ false);
+
+ mutex_unlock(&port_counter->lock);
+
+ rdma_restrack_del(&counter->res);
+ rdma_free_hw_stats_struct(counter->stats);
+ kfree(counter);
+}
+
+static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
+ enum rdma_nl_counter_mask auto_mask)
+{
+ struct auto_mode_param *param = &counter->mode.param;
+ bool match = true;
+
+ if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
+ match &= (param->qp_type == qp->qp_type);
+
+ if (auto_mask & RDMA_COUNTER_MASK_PID)
+ match &= (task_pid_nr(counter->res.task) ==
+ task_pid_nr(qp->res.task));
+
+ return match;
+}
+
+static int __rdma_counter_unbind_qp(struct ib_qp *qp, u32 port)
+{
+ struct rdma_counter *counter = qp->counter;
+ int ret;
+
+ if (!qp->device->ops.counter_unbind_qp)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&counter->lock);
+ ret = qp->device->ops.counter_unbind_qp(qp, port);
+ mutex_unlock(&counter->lock);
+
+ return ret;
+}
+
+static void counter_history_stat_update(struct rdma_counter *counter)
+{
+ struct ib_device *dev = counter->device;
+ struct rdma_port_counter *port_counter;
+ int i;
+
+ port_counter = &dev->port_data[counter->port].port_counter;
+ if (!port_counter->hstats)
+ return;
+
+ rdma_counter_query_stats(counter);
+
+ for (i = 0; i < counter->stats->num_counters; i++)
+ port_counter->hstats->value[i] += counter->stats->value[i];
+}
+
+/*
+ * rdma_get_counter_auto_mode - Find the counter that @qp should be bound
+ * with in auto mode
+ *
+ * Return: The counter (with ref-count increased) if found
+ */
+static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
+ u32 port)
+{
+ struct rdma_port_counter *port_counter;
+ struct rdma_counter *counter = NULL;
+ struct ib_device *dev = qp->device;
+ struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
+ unsigned long id = 0;
+
+ port_counter = &dev->port_data[port].port_counter;
+ rt = &dev->res[RDMA_RESTRACK_COUNTER];
+ xa_lock(&rt->xa);
+ xa_for_each(&rt->xa, id, res) {
+ counter = container_of(res, struct rdma_counter, res);
+ if ((counter->device != qp->device) || (counter->port != port))
+ goto next;
+
+ if (auto_mode_match(qp, counter, port_counter->mode.mask))
+ break;
+next:
+ counter = NULL;
+ }
+
+ if (counter && !kref_get_unless_zero(&counter->kref))
+ counter = NULL;
+
+ xa_unlock(&rt->xa);
+ return counter;
+}
+
+static void counter_release(struct kref *kref)
+{
+ struct rdma_counter *counter;
+
+ counter = container_of(kref, struct rdma_counter, kref);
+ counter_history_stat_update(counter);
+ counter->device->ops.counter_dealloc(counter);
+ rdma_counter_free(counter);
+}
+
+/*
+ * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on
+ * the auto-mode rule
+ */
+int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
+{
+ struct rdma_port_counter *port_counter;
+ struct ib_device *dev = qp->device;
+ struct rdma_counter *counter;
+ int ret;
+
+ if (!rdma_restrack_is_tracked(&qp->res) || rdma_is_kernel_res(&qp->res))
+ return 0;
+
+ if (!rdma_is_port_valid(dev, port))
+ return -EINVAL;
+
+ port_counter = &dev->port_data[port].port_counter;
+ if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO)
+ return 0;
+
+ counter = rdma_get_counter_auto_mode(qp, port);
+ if (counter) {
+ ret = __rdma_counter_bind_qp(counter, qp, port);
+ if (ret) {
+ kref_put(&counter->kref, counter_release);
+ return ret;
+ }
+ } else {
+ counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO,
+ port_counter->mode.bind_opcnt);
+ if (!counter)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * rdma_counter_unbind_qp - Unbind a qp from a counter
+ * @force:
+ * true - Decrease the counter ref-count anyway (e.g., qp destroy)
+ */
+int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force)
+{
+ struct rdma_counter *counter = qp->counter;
+ int ret;
+
+ if (!counter)
+ return -EINVAL;
+
+ ret = __rdma_counter_unbind_qp(qp, port);
+ if (ret && !force)
+ return ret;
+
+ kref_put(&counter->kref, counter_release);
+ return 0;
+}
+
+int rdma_counter_query_stats(struct rdma_counter *counter)
+{
+ struct ib_device *dev = counter->device;
+ int ret;
+
+ if (!dev->ops.counter_update_stats)
+ return -EINVAL;
+
+ mutex_lock(&counter->lock);
+ ret = dev->ops.counter_update_stats(counter);
+ mutex_unlock(&counter->lock);
+
+ return ret;
+}
+
+static u64 get_running_counters_hwstat_sum(struct ib_device *dev,
+ u32 port, u32 index)
+{
+ struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
+ struct rdma_counter *counter;
+ unsigned long id = 0;
+ u64 sum = 0;
+
+ rt = &dev->res[RDMA_RESTRACK_COUNTER];
+ xa_lock(&rt->xa);
+ xa_for_each(&rt->xa, id, res) {
+ if (!rdma_restrack_get(res))
+ continue;
+
+ xa_unlock(&rt->xa);
+
+ counter = container_of(res, struct rdma_counter, res);
+ if ((counter->device != dev) || (counter->port != port) ||
+ rdma_counter_query_stats(counter))
+ goto next;
+
+ sum += counter->stats->value[index];
+
+next:
+ xa_lock(&rt->xa);
+ rdma_restrack_put(res);
+ }
+
+ xa_unlock(&rt->xa);
+ return sum;
+}
+
+/*
+ * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a
+ * specific port, including the running ones and history data
+ */
+u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index)
+{
+ struct rdma_port_counter *port_counter;
+ u64 sum;
+
+ port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return 0;
+
+ sum = get_running_counters_hwstat_sum(dev, port, index);
+ sum += port_counter->hstats->value[index];
+
+ return sum;
+}
+
+static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
+{
+ struct rdma_restrack_entry *res = NULL;
+ struct ib_qp *qp = NULL;
+
+ res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_QP, qp_num);
+ if (IS_ERR(res))
+ return NULL;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (qp->qp_type == IB_QPT_RAW_PACKET && !rdma_dev_has_raw_cap(dev))
+ goto err;
+
+ return qp;
+
+err:
+ rdma_restrack_put(res);
+ return NULL;
+}
+
+static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
+ u32 counter_id)
+{
+ struct rdma_restrack_entry *res;
+ struct rdma_counter *counter;
+
+ res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_COUNTER, counter_id);
+ if (IS_ERR(res))
+ return NULL;
+
+ counter = container_of(res, struct rdma_counter, res);
+ kref_get(&counter->kref);
+ rdma_restrack_put(res);
+
+ return counter;
+}
+
+/*
+ * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id
+ */
+int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
+ u32 qp_num, u32 counter_id)
+{
+ struct rdma_port_counter *port_counter;
+ struct rdma_counter *counter;
+ struct ib_qp *qp;
+ int ret;
+
+ port_counter = &dev->port_data[port].port_counter;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
+ qp = rdma_counter_get_qp(dev, qp_num);
+ if (!qp)
+ return -ENOENT;
+
+ counter = rdma_get_counter_by_id(dev, counter_id);
+ if (!counter) {
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) {
+ ret = -EINVAL;
+ goto err_task;
+ }
+
+ if ((counter->device != qp->device) || (counter->port != qp->port)) {
+ ret = -EINVAL;
+ goto err_task;
+ }
+
+ ret = __rdma_counter_bind_qp(counter, qp, port);
+ if (ret)
+ goto err_task;
+
+ rdma_restrack_put(&qp->res);
+ return 0;
+
+err_task:
+ kref_put(&counter->kref, counter_release);
+err:
+ rdma_restrack_put(&qp->res);
+ return ret;
+}
+
+/*
+ * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it
+ * The id of new counter is returned in @counter_id
+ */
+int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
+ u32 qp_num, u32 *counter_id)
+{
+ struct rdma_port_counter *port_counter;
+ struct rdma_counter *counter;
+ struct ib_qp *qp;
+ int ret;
+
+ if (!rdma_is_port_valid(dev, port))
+ return -EINVAL;
+
+ port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return -EOPNOTSUPP;
+
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
+ qp = rdma_counter_get_qp(dev, qp_num);
+ if (!qp)
+ return -ENOENT;
+
+ if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL, true);
+ if (!counter) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (counter_id)
+ *counter_id = counter->id;
+
+ rdma_restrack_put(&qp->res);
+ return 0;
+
+err:
+ rdma_restrack_put(&qp->res);
+ return ret;
+}
+
+/*
+ * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter
+ */
+int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
+ u32 qp_num, u32 counter_id)
+{
+ struct rdma_port_counter *port_counter;
+ struct ib_qp *qp;
+ int ret;
+
+ if (!rdma_is_port_valid(dev, port))
+ return -EINVAL;
+
+ qp = rdma_counter_get_qp(dev, qp_num);
+ if (!qp)
+ return -ENOENT;
+
+ if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ port_counter = &dev->port_data[port].port_counter;
+ if (!qp->counter || qp->counter->id != counter_id ||
+ port_counter->mode.mode != RDMA_COUNTER_MODE_MANUAL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rdma_counter_unbind_qp(qp, port, false);
+
+out:
+ rdma_restrack_put(&qp->res);
+ return ret;
+}
+
+int rdma_counter_get_mode(struct ib_device *dev, u32 port,
+ enum rdma_nl_counter_mode *mode,
+ enum rdma_nl_counter_mask *mask,
+ bool *opcnt)
+{
+ struct rdma_port_counter *port_counter;
+
+ port_counter = &dev->port_data[port].port_counter;
+ *mode = port_counter->mode.mode;
+ *mask = port_counter->mode.mask;
+ *opcnt = port_counter->mode.bind_opcnt;
+
+ return 0;
+}
+
+void rdma_counter_init(struct ib_device *dev)
+{
+ struct rdma_port_counter *port_counter;
+ u32 port, i;
+
+ if (!dev->port_data)
+ return;
+
+ rdma_for_each_port(dev, port) {
+ port_counter = &dev->port_data[port].port_counter;
+ port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
+ mutex_init(&port_counter->lock);
+
+ if (!dev->ops.alloc_hw_port_stats)
+ continue;
+
+ port_counter->hstats = dev->ops.alloc_hw_port_stats(dev, port);
+ if (!port_counter->hstats)
+ goto fail;
+ }
+
+ return;
+
+fail:
+ for (i = port; i >= rdma_start_port(dev); i--) {
+ port_counter = &dev->port_data[port].port_counter;
+ rdma_free_hw_stats_struct(port_counter->hstats);
+ port_counter->hstats = NULL;
+ mutex_destroy(&port_counter->lock);
+ }
+}
+
+void rdma_counter_release(struct ib_device *dev)
+{
+ struct rdma_port_counter *port_counter;
+ u32 port;
+
+ rdma_for_each_port(dev, port) {
+ port_counter = &dev->port_data[port].port_counter;
+ rdma_free_hw_stats_struct(port_counter->hstats);
+ mutex_destroy(&port_counter->lock);
+ }
+}
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
new file mode 100644
index 000000000000..584537c71545
--- /dev/null
+++ b/drivers/infiniband/core/cq.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 HGST, a Western Digital Company.
+ */
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <rdma/ib_verbs.h>
+
+#include "core_priv.h"
+
+#include <trace/events/rdma_core.h>
+/* Max size for shared CQ, may require tuning */
+#define IB_MAX_SHARED_CQ_SZ 4096U
+
+/* # of WCs to poll for with a single call to ib_poll_cq */
+#define IB_POLL_BATCH 16
+#define IB_POLL_BATCH_DIRECT 8
+
+/* # of WCs to iterate over before yielding */
+#define IB_POLL_BUDGET_IRQ 256
+#define IB_POLL_BUDGET_WORKQUEUE 65536
+
+#define IB_POLL_FLAGS \
+ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
+
+static const struct dim_cq_moder
+rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = {
+ {1, 0, 1, 0},
+ {1, 0, 4, 0},
+ {2, 0, 4, 0},
+ {2, 0, 8, 0},
+ {4, 0, 8, 0},
+ {16, 0, 8, 0},
+ {16, 0, 16, 0},
+ {32, 0, 16, 0},
+ {32, 0, 32, 0},
+};
+
+static void ib_cq_rdma_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct ib_cq *cq = dim->priv;
+
+ u16 usec = rdma_dim_prof[dim->profile_ix].usec;
+ u16 comps = rdma_dim_prof[dim->profile_ix].comps;
+
+ dim->state = DIM_START_MEASURE;
+
+ trace_cq_modify(cq, comps, usec);
+ cq->device->ops.modify_cq(cq, comps, usec);
+}
+
+static void rdma_dim_init(struct ib_cq *cq)
+{
+ struct dim *dim;
+
+ if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
+ cq->poll_ctx == IB_POLL_DIRECT)
+ return;
+
+ dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
+ if (!dim)
+ return;
+
+ dim->state = DIM_START_MEASURE;
+ dim->tune_state = DIM_GOING_RIGHT;
+ dim->profile_ix = RDMA_DIM_START_PROFILE;
+ dim->priv = cq;
+ cq->dim = dim;
+
+ INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
+}
+
+static void rdma_dim_destroy(struct ib_cq *cq)
+{
+ if (!cq->dim)
+ return;
+
+ cancel_work_sync(&cq->dim->work);
+ kfree(cq->dim);
+}
+
+static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
+{
+ int rc;
+
+ rc = ib_poll_cq(cq, num_entries, wc);
+ trace_cq_poll(cq, num_entries, rc);
+ return rc;
+}
+
+static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
+ int batch)
+{
+ int i, n, completed = 0;
+
+ trace_cq_process(cq);
+
+ /*
+ * budget might be (-1) if the caller does not
+ * want to bound this call, thus we need unsigned
+ * minimum here.
+ */
+ while ((n = __poll_cq(cq, min_t(u32, batch,
+ budget - completed), wcs)) > 0) {
+ for (i = 0; i < n; i++) {
+ struct ib_wc *wc = &wcs[i];
+
+ if (wc->wr_cqe)
+ wc->wr_cqe->done(cq, wc);
+ else
+ WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
+ }
+
+ completed += n;
+
+ if (n != batch || (budget != -1 && completed >= budget))
+ break;
+ }
+
+ return completed;
+}
+
+/**
+ * ib_process_cq_direct - process a CQ in caller context
+ * @cq: CQ to process
+ * @budget: number of CQEs to poll for
+ *
+ * This function is used to process all outstanding CQ entries.
+ * It does not offload CQ processing to a different context and does
+ * not ask for completion interrupts from the HCA.
+ * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
+ * concurrent processing.
+ *
+ * Note: do not pass -1 as %budget unless it is guaranteed that the number
+ * of completions that will be processed is small.
+ */
+int ib_process_cq_direct(struct ib_cq *cq, int budget)
+{
+ struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
+
+ return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
+}
+EXPORT_SYMBOL(ib_process_cq_direct);
+
+static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
+{
+ WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
+}
+
+static int ib_poll_handler(struct irq_poll *iop, int budget)
+{
+ struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
+ struct dim *dim = cq->dim;
+ int completed;
+
+ completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
+ if (completed < budget) {
+ irq_poll_complete(&cq->iop);
+ if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
+ trace_cq_reschedule(cq);
+ irq_poll_sched(&cq->iop);
+ }
+ }
+
+ if (dim)
+ rdma_dim(dim, completed);
+
+ return completed;
+}
+
+static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
+{
+ trace_cq_schedule(cq);
+ irq_poll_sched(&cq->iop);
+}
+
+static void ib_cq_poll_work(struct work_struct *work)
+{
+ struct ib_cq *cq = container_of(work, struct ib_cq, work);
+ int completed;
+
+ completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
+ IB_POLL_BATCH);
+ if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
+ ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
+ queue_work(cq->comp_wq, &cq->work);
+ else if (cq->dim)
+ rdma_dim(cq->dim, completed);
+}
+
+static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
+{
+ trace_cq_schedule(cq);
+ queue_work(cq->comp_wq, &cq->work);
+}
+
+/**
+ * __ib_alloc_cq - allocate a completion queue
+ * @dev: device to allocate the CQ for
+ * @private: driver private data, accessible from cq->cq_context
+ * @nr_cqe: number of CQEs to allocate
+ * @comp_vector: HCA completion vectors for this CQ
+ * @poll_ctx: context to poll the CQ from.
+ * @caller: module owner name.
+ *
+ * This is the proper interface to allocate a CQ for in-kernel users. A
+ * CQ allocated with this interface will automatically be polled from the
+ * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
+ * to use this CQ abstraction.
+ */
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
+ int comp_vector, enum ib_poll_context poll_ctx,
+ const char *caller)
+{
+ struct ib_cq_init_attr cq_attr = {
+ .cqe = nr_cqe,
+ .comp_vector = comp_vector,
+ };
+ struct ib_cq *cq;
+ int ret = -ENOMEM;
+
+ cq = rdma_zalloc_drv_obj(dev, ib_cq);
+ if (!cq)
+ return ERR_PTR(ret);
+
+ cq->device = dev;
+ cq->cq_context = private;
+ cq->poll_ctx = poll_ctx;
+ atomic_set(&cq->usecnt, 0);
+ cq->comp_vector = comp_vector;
+
+ cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
+ if (!cq->wc)
+ goto out_free_cq;
+
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, caller);
+
+ ret = dev->ops.create_cq(cq, &cq_attr, NULL);
+ if (ret)
+ goto out_free_wc;
+
+ rdma_dim_init(cq);
+
+ switch (cq->poll_ctx) {
+ case IB_POLL_DIRECT:
+ cq->comp_handler = ib_cq_completion_direct;
+ break;
+ case IB_POLL_SOFTIRQ:
+ cq->comp_handler = ib_cq_completion_softirq;
+
+ irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ break;
+ case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
+ cq->comp_handler = ib_cq_completion_workqueue;
+ INIT_WORK(&cq->work, ib_cq_poll_work);
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
+ ib_comp_wq : ib_comp_unbound_wq;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_destroy_cq;
+ }
+
+ rdma_restrack_add(&cq->res);
+ trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
+ return cq;
+
+out_destroy_cq:
+ rdma_dim_destroy(cq);
+ cq->device->ops.destroy_cq(cq, NULL);
+out_free_wc:
+ rdma_restrack_put(&cq->res);
+ kfree(cq->wc);
+out_free_cq:
+ kfree(cq);
+ trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(__ib_alloc_cq);
+
+/**
+ * __ib_alloc_cq_any - allocate a completion queue
+ * @dev: device to allocate the CQ for
+ * @private: driver private data, accessible from cq->cq_context
+ * @nr_cqe: number of CQEs to allocate
+ * @poll_ctx: context to poll the CQ from
+ * @caller: module owner name
+ *
+ * Attempt to spread ULP Completion Queues over each device's interrupt
+ * vectors. A simple best-effort mechanism is used.
+ */
+struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+ int nr_cqe, enum ib_poll_context poll_ctx,
+ const char *caller)
+{
+ static atomic_t counter;
+ int comp_vector = 0;
+
+ if (dev->num_comp_vectors > 1)
+ comp_vector =
+ atomic_inc_return(&counter) %
+ min_t(int, dev->num_comp_vectors, num_online_cpus());
+
+ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
+ caller);
+}
+EXPORT_SYMBOL(__ib_alloc_cq_any);
+
+/**
+ * ib_free_cq - free a completion queue
+ * @cq: completion queue to free.
+ */
+void ib_free_cq(struct ib_cq *cq)
+{
+ int ret = 0;
+
+ if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
+ return;
+ if (WARN_ON_ONCE(cq->cqe_used))
+ return;
+
+ if (cq->device->ops.pre_destroy_cq) {
+ ret = cq->device->ops.pre_destroy_cq(cq);
+ WARN_ONCE(ret, "Disable of kernel CQ shouldn't fail");
+ }
+
+ switch (cq->poll_ctx) {
+ case IB_POLL_DIRECT:
+ break;
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ rdma_dim_destroy(cq);
+ trace_cq_free(cq);
+ if (cq->device->ops.post_destroy_cq)
+ cq->device->ops.post_destroy_cq(cq);
+ else
+ ret = cq->device->ops.destroy_cq(cq, NULL);
+ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
+ rdma_restrack_del(&cq->res);
+ kfree(cq->wc);
+ kfree(cq);
+}
+EXPORT_SYMBOL(ib_free_cq);
+
+void ib_cq_pool_cleanup(struct ib_device *dev)
+{
+ struct ib_cq *cq, *n;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
+ list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
+ pool_entry) {
+ WARN_ON(cq->cqe_used);
+ list_del(&cq->pool_entry);
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ }
+}
+
+static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
+ enum ib_poll_context poll_ctx)
+{
+ LIST_HEAD(tmp_list);
+ unsigned int nr_cqs, i;
+ struct ib_cq *cq, *n;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate at least as many CQEs as requested, and otherwise
+ * a reasonable batch size so that we can share CQs between
+ * multiple users instead of allocating a larger number of CQs.
+ */
+ nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
+ max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
+ nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ for (i = 0; i < nr_cqs; i++) {
+ cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out_free_cqs;
+ }
+ cq->shared = true;
+ list_add_tail(&cq->pool_entry, &tmp_list);
+ }
+
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return 0;
+
+out_free_cqs:
+ list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ return ret;
+}
+
+/**
+ * ib_cq_pool_get() - Find the least used completion queue that matches
+ * a given cpu hint (or least used for wild card affinity) and fits
+ * nr_cqe.
+ * @dev: rdma device
+ * @nr_cqe: number of needed cqe entries
+ * @comp_vector_hint: completion vector hint (-1) for the driver to assign
+ * a comp vector based on internal counter
+ * @poll_ctx: cq polling context
+ *
+ * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
+ * claim entries in it for us. In case there is no available cq, allocate
+ * a new cq with the requirements and add it to the device pool.
+ * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
+ * for @poll_ctx.
+ */
+struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
+ int comp_vector_hint,
+ enum ib_poll_context poll_ctx)
+{
+ static unsigned int default_comp_vector;
+ unsigned int vector, num_comp_vectors;
+ struct ib_cq *cq, *found = NULL;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return ERR_PTR(-EINVAL);
+ }
+
+ num_comp_vectors =
+ min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ /* Project the affinty to the device completion vector range */
+ if (comp_vector_hint < 0) {
+ comp_vector_hint =
+ (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
+ WRITE_ONCE(default_comp_vector, comp_vector_hint);
+ }
+ vector = comp_vector_hint % num_comp_vectors;
+
+ /*
+ * Find the least used CQ with correct affinity and
+ * enough free CQ entries
+ */
+ while (!found) {
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
+ pool_entry) {
+ /*
+ * Check to see if we have found a CQ with the
+ * correct completion vector
+ */
+ if (vector != cq->comp_vector)
+ continue;
+ if (cq->cqe_used + nr_cqe > cq->cqe)
+ continue;
+ found = cq;
+ break;
+ }
+
+ if (found) {
+ found->cqe_used += nr_cqe;
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return found;
+ }
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ /*
+ * Didn't find a match or ran out of CQs in the device
+ * pool, allocate a new array of CQs.
+ */
+ ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return found;
+}
+EXPORT_SYMBOL(ib_cq_pool_get);
+
+/**
+ * ib_cq_pool_put - Return a CQ taken from a shared pool.
+ * @cq: The CQ to return.
+ * @nr_cqe: The max number of cqes that the user had requested.
+ */
+void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
+{
+ if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
+ return;
+
+ spin_lock_irq(&cq->device->cq_pools_lock);
+ cq->cqe_used -= nr_cqe;
+ spin_unlock_irq(&cq->device->cq_pools_lock);
+}
+EXPORT_SYMBOL(ib_cq_pool_put);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 17639117afc6..b4f3c835844a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -37,66 +37,254 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <linux/security.h>
+#include <linux/notifier.h>
+#include <linux/hashtable.h>
#include <rdma/rdma_netlink.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
+#include <rdma/rdma_counter.h>
#include "core_priv.h"
+#include "restrack.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("core kernel InfiniBand API");
MODULE_LICENSE("Dual BSD/GPL");
-struct ib_client_data {
- struct list_head list;
- struct ib_client *client;
- void * data;
- /* The device or client is going down. Do not call client or device
- * callbacks other than remove(). */
- bool going_down;
-};
-
+struct workqueue_struct *ib_comp_wq;
+struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
+static struct workqueue_struct *ib_unreg_wq;
-/* The device_list and client_list contain devices and clients after their
- * registration has completed, and the devices and clients are removed
- * during unregistration. */
-static LIST_HEAD(device_list);
-static LIST_HEAD(client_list);
+/*
+ * Each of the three rwsem locks (devices, clients, client_data) protects the
+ * xarray of the same name. Specifically it allows the caller to assert that
+ * the MARK will/will not be changing under the lock, and for devices and
+ * clients, that the value in the xarray is still a valid pointer. Change of
+ * the MARK is linked to the object state, so holding the lock and testing the
+ * MARK also asserts that the contained object is in a certain state.
+ *
+ * This is used to build a two stage register/unregister flow where objects
+ * can continue to be in the xarray even though they are still in progress to
+ * register/unregister.
+ *
+ * The xarray itself provides additional locking, and restartable iteration,
+ * which is also relied on.
+ *
+ * Locks should not be nested, with the exception of client_data, which is
+ * allowed to nest under the read side of the other two locks.
+ *
+ * The devices_rwsem also protects the device name list, any change or
+ * assignment of device name must also hold the write side to guarantee unique
+ * names.
+ */
/*
- * device_mutex and lists_rwsem protect access to both device_list and
- * client_list. device_mutex protects writer access by device and client
- * registration / de-registration. lists_rwsem protects reader access to
- * these lists. Iterators of these lists must lock it for read, while updates
- * to the lists must be done with a write lock. A special case is when the
- * device_mutex is locked. In this case locking the lists for read access is
- * not necessary as the device_mutex implies it.
+ * devices contains devices that have had their names assigned. The
+ * devices may not be registered. Users that care about the registration
+ * status need to call ib_device_try_get() on the device to ensure it is
+ * registered, and keep it registered, for the required duration.
+ *
+ */
+static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
+static DECLARE_RWSEM(devices_rwsem);
+#define DEVICE_REGISTERED XA_MARK_1
+
+static u32 highest_client_id;
+#define CLIENT_REGISTERED XA_MARK_1
+static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
+static DECLARE_RWSEM(clients_rwsem);
+
+static void ib_client_put(struct ib_client *client)
+{
+ if (refcount_dec_and_test(&client->uses))
+ complete(&client->uses_zero);
+}
+
+/*
+ * If client_data is registered then the corresponding client must also still
+ * be registered.
+ */
+#define CLIENT_DATA_REGISTERED XA_MARK_1
+
+unsigned int rdma_dev_net_id;
+
+/*
+ * A list of net namespaces is maintained in an xarray. This is necessary
+ * because we can't get the locking right using the existing net ns list. We
+ * would require a init_net callback after the list is updated.
+ */
+static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
+/*
+ * rwsem to protect accessing the rdma_nets xarray entries.
+ */
+static DECLARE_RWSEM(rdma_nets_rwsem);
+
+bool ib_devices_shared_netns = true;
+module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
+MODULE_PARM_DESC(netns_mode,
+ "Share device among net namespaces; default=1 (shared)");
+/**
+ * rdma_dev_access_netns() - Return whether an rdma device can be accessed
+ * from a specified net namespace or not.
+ * @dev: Pointer to rdma device which needs to be checked
+ * @net: Pointer to net namesapce for which access to be checked
*
- * lists_rwsem also protects access to the client data list.
+ * When the rdma device is in shared mode, it ignores the net namespace.
+ * When the rdma device is exclusive to a net namespace, rdma device net
+ * namespace is checked against the specified one.
*/
-static DEFINE_MUTEX(device_mutex);
-static DECLARE_RWSEM(lists_rwsem);
+bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
+{
+ return (ib_devices_shared_netns ||
+ net_eq(read_pnet(&dev->coredev.rdma_net), net));
+}
+EXPORT_SYMBOL(rdma_dev_access_netns);
+
+/**
+ * rdma_dev_has_raw_cap() - Returns whether a specified rdma device has
+ * CAP_NET_RAW capability or not.
+ *
+ * @dev: Pointer to rdma device whose capability to be checked
+ *
+ * Returns true if a rdma device's owning user namespace has CAP_NET_RAW
+ * capability, otherwise false. When rdma subsystem is in legacy shared network,
+ * namespace mode, the default net namespace is considered.
+ */
+bool rdma_dev_has_raw_cap(const struct ib_device *dev)
+{
+ const struct net *net;
+
+ /* Network namespace is the resource whose user namespace
+ * to be considered. When in shared mode, there is no reliable
+ * network namespace resource, so consider the default net namespace.
+ */
+ if (ib_devices_shared_netns)
+ net = &init_net;
+ else
+ net = read_pnet(&dev->coredev.rdma_net);
+
+ return ns_capable(net->user_ns, CAP_NET_RAW);
+}
+EXPORT_SYMBOL(rdma_dev_has_raw_cap);
+
+/*
+ * xarray has this behavior where it won't iterate over NULL values stored in
+ * allocated arrays. So we need our own iterator to see all values stored in
+ * the array. This does the same thing as xa_for_each except that it also
+ * returns NULL valued entries if the array is allocating. Simplified to only
+ * work on simple xarrays.
+ */
+static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
+ xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ entry = xas_find_marked(&xas, ULONG_MAX, filter);
+ if (xa_is_zero(entry))
+ break;
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ if (entry) {
+ *indexp = xas.xa_index;
+ if (xa_is_zero(entry))
+ return NULL;
+ return entry;
+ }
+ return XA_ERROR(-ENOENT);
+}
+#define xan_for_each_marked(xa, index, entry, filter) \
+ for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
+ !xa_is_err(entry); \
+ (index)++, entry = xan_find_marked(xa, &(index), filter))
+
+/* RCU hash table mapping netdevice pointers to struct ib_port_data */
+static DEFINE_SPINLOCK(ndev_hash_lock);
+static DECLARE_HASHTABLE(ndev_hash, 5);
+
+static void free_netdevs(struct ib_device *ib_dev);
+static void ib_unregister_work(struct work_struct *work);
+static void __ib_unregister_device(struct ib_device *device);
+static int ib_security_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data);
+static void ib_policy_change_task(struct work_struct *work);
+static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
+
+static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
+ struct va_format *vaf)
+{
+ if (ibdev && ibdev->dev.parent)
+ dev_printk_emit(level[1] - '0',
+ ibdev->dev.parent,
+ "%s %s %s: %pV",
+ dev_driver_string(ibdev->dev.parent),
+ dev_name(ibdev->dev.parent),
+ dev_name(&ibdev->dev),
+ vaf);
+ else if (ibdev)
+ printk("%s%s: %pV",
+ level, dev_name(&ibdev->dev), vaf);
+ else
+ printk("%s(NULL ib_device): %pV", level, vaf);
+}
+
+#define define_ibdev_printk_level(func, level) \
+void func(const struct ib_device *ibdev, const char *fmt, ...) \
+{ \
+ struct va_format vaf; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ \
+ vaf.fmt = fmt; \
+ vaf.va = &args; \
+ \
+ __ibdev_printk(level, ibdev, &vaf); \
+ \
+ va_end(args); \
+} \
+EXPORT_SYMBOL(func);
+
+define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
+define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
+define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
+define_ibdev_printk_level(ibdev_err, KERN_ERR);
+define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
+define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
+define_ibdev_printk_level(ibdev_info, KERN_INFO);
+
+static struct notifier_block ibdev_lsm_nb = {
+ .notifier_call = ib_security_change,
+};
+static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
+ struct net *net);
-static int ib_device_check_mandatory(struct ib_device *device)
+/* Pointer to the RCU head at the start of the ib_port_data array */
+struct ib_port_data_rcu {
+ struct rcu_head rcu_head;
+ struct ib_port_data pdata[];
+};
+
+static void ib_device_check_mandatory(struct ib_device *device)
{
-#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
+#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
static const struct {
size_t offset;
char *name;
} mandatory_table[] = {
IB_MANDATORY_FUNC(query_device),
IB_MANDATORY_FUNC(query_port),
- IB_MANDATORY_FUNC(query_pkey),
- IB_MANDATORY_FUNC(query_gid),
IB_MANDATORY_FUNC(alloc_pd),
IB_MANDATORY_FUNC(dealloc_pd),
- IB_MANDATORY_FUNC(create_ah),
- IB_MANDATORY_FUNC(destroy_ah),
IB_MANDATORY_FUNC(create_qp),
IB_MANDATORY_FUNC(modify_qp),
IB_MANDATORY_FUNC(destroy_qp),
@@ -107,81 +295,238 @@ static int ib_device_check_mandatory(struct ib_device *device)
IB_MANDATORY_FUNC(poll_cq),
IB_MANDATORY_FUNC(req_notify_cq),
IB_MANDATORY_FUNC(get_dma_mr),
+ IB_MANDATORY_FUNC(reg_user_mr),
IB_MANDATORY_FUNC(dereg_mr),
IB_MANDATORY_FUNC(get_port_immutable)
};
int i;
+ device->kverbs_provider = true;
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
- if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
- return -EINVAL;
+ if (!*(void **) ((void *) &device->ops +
+ mandatory_table[i].offset)) {
+ device->kverbs_provider = false;
+ break;
}
}
+}
- return 0;
+/*
+ * Caller must perform ib_device_put() to return the device reference count
+ * when ib_device_get_by_index() returns valid device pointer.
+ */
+struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
+{
+ struct ib_device *device;
+
+ down_read(&devices_rwsem);
+ device = xa_load(&devices, index);
+ if (device) {
+ if (!rdma_dev_access_netns(device, net)) {
+ device = NULL;
+ goto out;
+ }
+
+ if (!ib_device_try_get(device))
+ device = NULL;
+ }
+out:
+ up_read(&devices_rwsem);
+ return device;
+}
+
+/**
+ * ib_device_put - Release IB device reference
+ * @device: device whose reference to be released
+ *
+ * ib_device_put() releases reference to the IB device to allow it to be
+ * unregistered and eventually free.
+ */
+void ib_device_put(struct ib_device *device)
+{
+ if (refcount_dec_and_test(&device->refcount))
+ complete(&device->unreg_completion);
}
+EXPORT_SYMBOL(ib_device_put);
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
+ unsigned long index;
- list_for_each_entry(device, &device_list, core_list)
- if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
+ xa_for_each (&devices, index, device)
+ if (!strcmp(name, dev_name(&device->dev)))
return device;
return NULL;
}
+/**
+ * ib_device_get_by_name - Find an IB device by name
+ * @name: The name to look for
+ * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
+ *
+ * Find and hold an ib_device by its name. The caller must call
+ * ib_device_put() on the returned pointer.
+ */
+struct ib_device *ib_device_get_by_name(const char *name,
+ enum rdma_driver_id driver_id)
+{
+ struct ib_device *device;
+
+ down_read(&devices_rwsem);
+ device = __ib_device_get_by_name(name);
+ if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
+ device->ops.driver_id != driver_id)
+ device = NULL;
+
+ if (device) {
+ if (!ib_device_try_get(device))
+ device = NULL;
+ }
+ up_read(&devices_rwsem);
+ return device;
+}
+EXPORT_SYMBOL(ib_device_get_by_name);
+
+static int rename_compat_devs(struct ib_device *device)
+{
+ struct ib_core_device *cdev;
+ unsigned long index;
+ int ret = 0;
+
+ mutex_lock(&device->compat_devs_mutex);
+ xa_for_each (&device->compat_devs, index, cdev) {
+ ret = device_rename(&cdev->dev, dev_name(&device->dev));
+ if (ret) {
+ dev_warn(&cdev->dev,
+ "Fail to rename compatdev to new name %s\n",
+ dev_name(&device->dev));
+ break;
+ }
+ }
+ mutex_unlock(&device->compat_devs_mutex);
+ return ret;
+}
+
+int ib_device_rename(struct ib_device *ibdev, const char *name)
+{
+ unsigned long index;
+ void *client_data;
+ int ret;
+
+ down_write(&devices_rwsem);
+ if (!strcmp(name, dev_name(&ibdev->dev))) {
+ up_write(&devices_rwsem);
+ return 0;
+ }
+
+ if (__ib_device_get_by_name(name)) {
+ up_write(&devices_rwsem);
+ return -EEXIST;
+ }
+
+ ret = device_rename(&ibdev->dev, name);
+ if (ret) {
+ up_write(&devices_rwsem);
+ return ret;
+ }
+
+ strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+ ret = rename_compat_devs(ibdev);
+
+ downgrade_write(&devices_rwsem);
+ down_read(&ibdev->client_data_rwsem);
+ xan_for_each_marked(&ibdev->client_data, index, client_data,
+ CLIENT_DATA_REGISTERED) {
+ struct ib_client *client = xa_load(&clients, index);
+
+ if (!client || !client->rename)
+ continue;
+
+ client->rename(ibdev, client_data);
+ }
+ up_read(&ibdev->client_data_rwsem);
+ rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT);
+ up_read(&devices_rwsem);
+ return 0;
+}
+
+int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim)
+{
+ if (use_dim > 1)
+ return -EINVAL;
+ ibdev->use_cq_dim = use_dim;
-static int alloc_name(char *name)
+ return 0;
+}
+
+static int alloc_name(struct ib_device *ibdev, const char *name)
{
- unsigned long *inuse;
- char buf[IB_DEVICE_NAME_MAX];
struct ib_device *device;
+ unsigned long index;
+ struct ida inuse;
+ int rc;
int i;
- inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
- if (!inuse)
- return -ENOMEM;
+ lockdep_assert_held_write(&devices_rwsem);
+ ida_init(&inuse);
+ xa_for_each (&devices, index, device) {
+ char buf[IB_DEVICE_NAME_MAX];
- list_for_each_entry(device, &device_list, core_list) {
- if (!sscanf(device->name, name, &i))
+ if (sscanf(dev_name(&device->dev), name, &i) != 1)
continue;
- if (i < 0 || i >= PAGE_SIZE * 8)
+ if (i < 0 || i >= INT_MAX)
continue;
snprintf(buf, sizeof buf, name, i);
- if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
- set_bit(i, inuse);
- }
+ if (strcmp(buf, dev_name(&device->dev)) != 0)
+ continue;
- i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
- free_page((unsigned long) inuse);
- snprintf(buf, sizeof buf, name, i);
+ rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
+ if (rc < 0)
+ goto out;
+ }
- if (__ib_device_get_by_name(buf))
- return -ENFILE;
+ rc = ida_alloc(&inuse, GFP_KERNEL);
+ if (rc < 0)
+ goto out;
- strlcpy(name, buf, IB_DEVICE_NAME_MAX);
- return 0;
+ rc = dev_set_name(&ibdev->dev, name, rc);
+out:
+ ida_destroy(&inuse);
+ return rc;
}
static void ib_device_release(struct device *device)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
- ib_cache_release_one(dev);
- kfree(dev->port_immutable);
- kfree(dev);
+ free_netdevs(dev);
+ WARN_ON(refcount_read(&dev->refcount));
+ if (dev->hw_stats_data)
+ ib_device_release_hw_stats(dev->hw_stats_data);
+ if (dev->port_data) {
+ ib_cache_release_one(dev);
+ ib_security_release_port_pkey_list(dev);
+ rdma_counter_release(dev);
+ kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
+ pdata[0]),
+ rcu_head);
+ }
+
+ mutex_destroy(&dev->subdev_lock);
+ mutex_destroy(&dev->unregistration_lock);
+ mutex_destroy(&dev->compat_devs_mutex);
+
+ xa_destroy(&dev->compat_devs);
+ xa_destroy(&dev->client_data);
+ kfree_rcu(dev, rcu_head);
}
-static int ib_device_uevent(struct device *device,
+static int ib_device_uevent(const struct device *device,
struct kobj_uevent_env *env)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
-
- if (add_uevent_var(env, "NAME=%s", dev->name))
+ if (add_uevent_var(env, "NAME=%s", dev_name(device)))
return -ENOMEM;
/*
@@ -191,15 +536,56 @@ static int ib_device_uevent(struct device *device,
return 0;
}
+static const void *net_namespace(const struct device *d)
+{
+ const struct ib_core_device *coredev =
+ container_of(d, struct ib_core_device, dev);
+
+ return read_pnet(&coredev->rdma_net);
+}
+
static struct class ib_class = {
.name = "infiniband",
.dev_release = ib_device_release,
.dev_uevent = ib_device_uevent,
+ .ns_type = &net_ns_type_operations,
+ .namespace = net_namespace,
};
+static void rdma_init_coredev(struct ib_core_device *coredev,
+ struct ib_device *dev, struct net *net)
+{
+ bool is_full_dev = &dev->coredev == coredev;
+
+ /* This BUILD_BUG_ON is intended to catch layout change
+ * of union of ib_core_device and device.
+ * dev must be the first element as ib_core and providers
+ * driver uses it. Adding anything in ib_core_device before
+ * device will break this assumption.
+ */
+ BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
+ offsetof(struct ib_device, dev));
+
+ coredev->dev.class = &ib_class;
+ coredev->dev.groups = dev->groups;
+
+ /*
+ * Don't expose hw counters outside of the init namespace.
+ */
+ if (!is_full_dev && dev->hw_stats_attr_index)
+ coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
+
+ device_initialize(&coredev->dev);
+ coredev->owner = dev;
+ INIT_LIST_HEAD(&coredev->port_list);
+ write_pnet(&coredev->rdma_net, net);
+}
+
/**
- * ib_alloc_device - allocate an IB device struct
+ * _ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
+ * @net: network namespace device should be located in, namespace
+ * must stay valid until ib_register_device() is completed.
*
* Low-level drivers should use ib_alloc_device() to allocate &struct
* ib_device. @size is the size of the structure to be allocated,
@@ -207,9 +593,10 @@ static struct class ib_class = {
* ib_dealloc_device() must be used to free structures allocated with
* ib_alloc_device().
*/
-struct ib_device *ib_alloc_device(size_t size)
+struct ib_device *_ib_alloc_device(size_t size, struct net *net)
{
struct ib_device *device;
+ unsigned int i;
if (WARN_ON(size < sizeof(struct ib_device)))
return NULL;
@@ -218,20 +605,81 @@ struct ib_device *ib_alloc_device(size_t size)
if (!device)
return NULL;
- device->dev.class = &ib_class;
- device_initialize(&device->dev);
+ if (rdma_restrack_init(device)) {
+ kfree(device);
+ return NULL;
+ }
- dev_set_drvdata(&device->dev, device);
+ /* ib_devices_shared_netns can't change while we have active namespaces
+ * in the system which means either init_net is passed or the user has
+ * no idea what they are doing.
+ *
+ * To avoid breaking backward compatibility, when in shared mode,
+ * force to init the device in the init_net.
+ */
+ net = ib_devices_shared_netns ? &init_net : net;
+ rdma_init_coredev(&device->coredev, device, net);
INIT_LIST_HEAD(&device->event_handler_list);
- spin_lock_init(&device->event_handler_lock);
- spin_lock_init(&device->client_data_lock);
- INIT_LIST_HEAD(&device->client_data_list);
- INIT_LIST_HEAD(&device->port_list);
+ spin_lock_init(&device->qp_open_list_lock);
+ init_rwsem(&device->event_handler_rwsem);
+ mutex_init(&device->unregistration_lock);
+ /*
+ * client_data needs to be alloc because we don't want our mark to be
+ * destroyed if the user stores NULL in the client data.
+ */
+ xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
+ init_rwsem(&device->client_data_rwsem);
+ xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
+ mutex_init(&device->compat_devs_mutex);
+ init_completion(&device->unreg_completion);
+ INIT_WORK(&device->unregistration_work, ib_unregister_work);
+
+ spin_lock_init(&device->cq_pools_lock);
+ for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
+ INIT_LIST_HEAD(&device->cq_pools[i]);
+
+ rwlock_init(&device->cache_lock);
+
+ device->uverbs_cmd_mask =
+ BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
+ BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
+ BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
+ BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
+ BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) |
+ BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
+ BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
+ BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
+ BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ);
+
+ mutex_init(&device->subdev_lock);
+ INIT_LIST_HEAD(&device->subdev_list_head);
+ INIT_LIST_HEAD(&device->subdev_list);
return device;
}
-EXPORT_SYMBOL(ib_alloc_device);
+EXPORT_SYMBOL(_ib_alloc_device);
/**
* ib_dealloc_device - free an IB device struct
@@ -241,66 +689,200 @@ EXPORT_SYMBOL(ib_alloc_device);
*/
void ib_dealloc_device(struct ib_device *device)
{
- WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
- device->reg_state != IB_DEV_UNINITIALIZED);
- kobject_put(&device->dev.kobj);
+ if (device->ops.dealloc_driver)
+ device->ops.dealloc_driver(device);
+
+ /*
+ * ib_unregister_driver() requires all devices to remain in the xarray
+ * while their ops are callable. The last op we call is dealloc_driver
+ * above. This is needed to create a fence on op callbacks prior to
+ * allowing the driver module to unload.
+ */
+ down_write(&devices_rwsem);
+ if (xa_load(&devices, device->index) == device)
+ xa_erase(&devices, device->index);
+ up_write(&devices_rwsem);
+
+ /* Expedite releasing netdev references */
+ free_netdevs(device);
+
+ WARN_ON(!xa_empty(&device->compat_devs));
+ WARN_ON(!xa_empty(&device->client_data));
+ WARN_ON(refcount_read(&device->refcount));
+ rdma_restrack_clean(device);
+ /* Balances with device_initialize */
+ put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
-static int add_client_context(struct ib_device *device, struct ib_client *client)
+/*
+ * add_client_context() and remove_client_context() must be safe against
+ * parallel calls on the same device - registration/unregistration of both the
+ * device and client can be occurring in parallel.
+ *
+ * The routines need to be a fence, any caller must not return until the add
+ * or remove is fully completed.
+ */
+static int add_client_context(struct ib_device *device,
+ struct ib_client *client)
{
- struct ib_client_data *context;
- unsigned long flags;
+ int ret = 0;
- context = kmalloc(sizeof *context, GFP_KERNEL);
- if (!context) {
- printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
- device->name, client->name);
- return -ENOMEM;
- }
+ if (!device->kverbs_provider && !client->no_kverbs_req)
+ return 0;
- context->client = client;
- context->data = NULL;
- context->going_down = false;
+ down_write(&device->client_data_rwsem);
+ /*
+ * So long as the client is registered hold both the client and device
+ * unregistration locks.
+ */
+ if (!refcount_inc_not_zero(&client->uses))
+ goto out_unlock;
+ refcount_inc(&device->refcount);
- down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_add(&context->list, &device->client_data_list);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
- up_write(&lists_rwsem);
+ /*
+ * Another caller to add_client_context got here first and has already
+ * completely initialized context.
+ */
+ if (xa_get_mark(&device->client_data, client->client_id,
+ CLIENT_DATA_REGISTERED))
+ goto out;
+ ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
+ GFP_KERNEL));
+ if (ret)
+ goto out;
+ downgrade_write(&device->client_data_rwsem);
+ if (client->add) {
+ if (client->add(device)) {
+ /*
+ * If a client fails to add then the error code is
+ * ignored, but we won't call any more ops on this
+ * client.
+ */
+ xa_erase(&device->client_data, client->client_id);
+ up_read(&device->client_data_rwsem);
+ ib_device_put(device);
+ ib_client_put(client);
+ return 0;
+ }
+ }
+
+ /* Readers shall not see a client until add has been completed */
+ xa_set_mark(&device->client_data, client->client_id,
+ CLIENT_DATA_REGISTERED);
+ up_read(&device->client_data_rwsem);
return 0;
+
+out:
+ ib_device_put(device);
+ ib_client_put(client);
+out_unlock:
+ up_write(&device->client_data_rwsem);
+ return ret;
}
-static int verify_immutable(const struct ib_device *dev, u8 port)
+static void remove_client_context(struct ib_device *device,
+ unsigned int client_id)
{
- return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
- rdma_max_mad_size(dev, port) != 0);
+ struct ib_client *client;
+ void *client_data;
+
+ down_write(&device->client_data_rwsem);
+ if (!xa_get_mark(&device->client_data, client_id,
+ CLIENT_DATA_REGISTERED)) {
+ up_write(&device->client_data_rwsem);
+ return;
+ }
+ client_data = xa_load(&device->client_data, client_id);
+ xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
+ client = xa_load(&clients, client_id);
+ up_write(&device->client_data_rwsem);
+
+ /*
+ * Notice we cannot be holding any exclusive locks when calling the
+ * remove callback as the remove callback can recurse back into any
+ * public functions in this module and thus try for any locks those
+ * functions take.
+ *
+ * For this reason clients and drivers should not call the
+ * unregistration functions will holdling any locks.
+ */
+ if (client->remove)
+ client->remove(device, client_data);
+
+ xa_erase(&device->client_data, client_id);
+ ib_device_put(device);
+ ib_client_put(client);
}
-static int read_port_immutable(struct ib_device *device)
+static int alloc_port_data(struct ib_device *device)
{
- int ret;
- u8 start_port = rdma_start_port(device);
- u8 end_port = rdma_end_port(device);
- u8 port;
+ struct ib_port_data_rcu *pdata_rcu;
+ u32 port;
- /**
- * device->port_immutable is indexed directly by the port number to make
+ if (device->port_data)
+ return 0;
+
+ /* This can only be called once the physical port range is defined */
+ if (WARN_ON(!device->phys_port_cnt))
+ return -EINVAL;
+
+ /* Reserve U32_MAX so the logic to go over all the ports is sane */
+ if (WARN_ON(device->phys_port_cnt == U32_MAX))
+ return -EINVAL;
+
+ /*
+ * device->port_data is indexed directly by the port number to make
* access to this data as efficient as possible.
*
- * Therefore port_immutable is declared as a 1 based array with
- * potential empty slots at the beginning.
+ * Therefore port_data is declared as a 1 based array with potential
+ * empty slots at the beginning.
*/
- device->port_immutable = kzalloc(sizeof(*device->port_immutable)
- * (end_port + 1),
- GFP_KERNEL);
- if (!device->port_immutable)
+ pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
+ size_add(rdma_end_port(device), 1)),
+ GFP_KERNEL);
+ if (!pdata_rcu)
return -ENOMEM;
+ /*
+ * The rcu_head is put in front of the port data array and the stored
+ * pointer is adjusted since we never need to see that member until
+ * kfree_rcu.
+ */
+ device->port_data = pdata_rcu->pdata;
+
+ rdma_for_each_port (device, port) {
+ struct ib_port_data *pdata = &device->port_data[port];
+
+ pdata->ib_dev = device;
+ spin_lock_init(&pdata->pkey_list_lock);
+ INIT_LIST_HEAD(&pdata->pkey_list);
+ spin_lock_init(&pdata->netdev_lock);
+ INIT_HLIST_NODE(&pdata->ndev_hash_link);
+ }
+ return 0;
+}
+
+static int verify_immutable(const struct ib_device *dev, u32 port)
+{
+ return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
+ rdma_max_mad_size(dev, port) != 0);
+}
- for (port = start_port; port <= end_port; ++port) {
- ret = device->get_port_immutable(device, port,
- &device->port_immutable[port]);
+static int setup_port_data(struct ib_device *device)
+{
+ u32 port;
+ int ret;
+
+ ret = alloc_port_data(device);
+ if (ret)
+ return ret;
+
+ rdma_for_each_port (device, port) {
+ struct ib_port_data *pdata = &device->port_data[port];
+
+ ret = device->ops.get_port_immutable(device, port,
+ &pdata->immutable);
if (ret)
return ret;
@@ -311,113 +893,949 @@ static int read_port_immutable(struct ib_device *device)
}
/**
- * ib_register_device - Register an IB device with IB core
- * @device:Device to register
- *
- * Low-level drivers use ib_register_device() to register their
- * devices with the IB core. All registered clients will receive a
- * callback for each device that is added. @device must be allocated
- * with ib_alloc_device().
+ * ib_port_immutable_read() - Read rdma port's immutable data
+ * @dev: IB device
+ * @port: port number whose immutable data to read. It starts with index 1 and
+ * valid upto including rdma_end_port().
*/
-int ib_register_device(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+const struct ib_port_immutable*
+ib_port_immutable_read(struct ib_device *dev, unsigned int port)
+{
+ WARN_ON(!rdma_is_port_valid(dev, port));
+ return &dev->port_data[port].immutable;
+}
+EXPORT_SYMBOL(ib_port_immutable_read);
+
+void ib_get_device_fw_str(struct ib_device *dev, char *str)
+{
+ if (dev->ops.get_dev_fw_str)
+ dev->ops.get_dev_fw_str(dev, str);
+ else
+ str[0] = '\0';
+}
+EXPORT_SYMBOL(ib_get_device_fw_str);
+
+static void ib_policy_change_task(struct work_struct *work)
+{
+ struct ib_device *dev;
+ unsigned long index;
+
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ unsigned int i;
+
+ rdma_for_each_port (dev, i) {
+ u64 sp;
+ ib_get_cached_subnet_prefix(dev, i, &sp);
+ ib_security_cache_change(dev, i, sp);
+ }
+ }
+ up_read(&devices_rwsem);
+}
+
+static int ib_security_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data)
{
+ if (event != LSM_POLICY_CHANGE)
+ return NOTIFY_DONE;
+
+ schedule_work(&ib_policy_change_work);
+ ib_mad_agent_security_change();
+
+ return NOTIFY_OK;
+}
+
+static void compatdev_release(struct device *dev)
+{
+ struct ib_core_device *cdev =
+ container_of(dev, struct ib_core_device, dev);
+
+ kfree(cdev);
+}
+
+static int add_one_compat_dev(struct ib_device *device,
+ struct rdma_dev_net *rnet)
+{
+ struct ib_core_device *cdev;
int ret;
- struct ib_client *client;
- mutex_lock(&device_mutex);
+ lockdep_assert_held(&rdma_nets_rwsem);
+ if (!ib_devices_shared_netns)
+ return 0;
+
+ /*
+ * Create and add compat device in all namespaces other than where it
+ * is currently bound to.
+ */
+ if (net_eq(read_pnet(&rnet->net),
+ read_pnet(&device->coredev.rdma_net)))
+ return 0;
+
+ /*
+ * The first of init_net() or ib_register_device() to take the
+ * compat_devs_mutex wins and gets to add the device. Others will wait
+ * for completion here.
+ */
+ mutex_lock(&device->compat_devs_mutex);
+ cdev = xa_load(&device->compat_devs, rnet->id);
+ if (cdev) {
+ ret = 0;
+ goto done;
+ }
+ ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
+ if (ret)
+ goto done;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ ret = -ENOMEM;
+ goto cdev_err;
+ }
+
+ cdev->dev.parent = device->dev.parent;
+ rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
+ cdev->dev.release = compatdev_release;
+ ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
+ if (ret)
+ goto add_err;
+
+ ret = device_add(&cdev->dev);
+ if (ret)
+ goto add_err;
+ ret = ib_setup_port_attrs(cdev);
+ if (ret)
+ goto port_err;
+
+ ret = xa_err(xa_store(&device->compat_devs, rnet->id,
+ cdev, GFP_KERNEL));
+ if (ret)
+ goto insert_err;
+
+ mutex_unlock(&device->compat_devs_mutex);
+ return 0;
+
+insert_err:
+ ib_free_port_attrs(cdev);
+port_err:
+ device_del(&cdev->dev);
+add_err:
+ put_device(&cdev->dev);
+cdev_err:
+ xa_release(&device->compat_devs, rnet->id);
+done:
+ mutex_unlock(&device->compat_devs_mutex);
+ return ret;
+}
+
+static void remove_one_compat_dev(struct ib_device *device, u32 id)
+{
+ struct ib_core_device *cdev;
+
+ mutex_lock(&device->compat_devs_mutex);
+ cdev = xa_erase(&device->compat_devs, id);
+ mutex_unlock(&device->compat_devs_mutex);
+ if (cdev) {
+ ib_free_port_attrs(cdev);
+ device_del(&cdev->dev);
+ put_device(&cdev->dev);
+ }
+}
- if (strchr(device->name, '%')) {
- ret = alloc_name(device->name);
+static void remove_compat_devs(struct ib_device *device)
+{
+ struct ib_core_device *cdev;
+ unsigned long index;
+
+ xa_for_each (&device->compat_devs, index, cdev)
+ remove_one_compat_dev(device, index);
+}
+
+static int add_compat_devs(struct ib_device *device)
+{
+ struct rdma_dev_net *rnet;
+ unsigned long index;
+ int ret = 0;
+
+ lockdep_assert_held(&devices_rwsem);
+
+ down_read(&rdma_nets_rwsem);
+ xa_for_each (&rdma_nets, index, rnet) {
+ ret = add_one_compat_dev(device, rnet);
if (ret)
- goto out;
+ break;
+ }
+ up_read(&rdma_nets_rwsem);
+ return ret;
+}
+
+static void remove_all_compat_devs(void)
+{
+ struct ib_compat_device *cdev;
+ struct ib_device *dev;
+ unsigned long index;
+
+ down_read(&devices_rwsem);
+ xa_for_each (&devices, index, dev) {
+ unsigned long c_index = 0;
+
+ /* Hold nets_rwsem so that any other thread modifying this
+ * system param can sync with this thread.
+ */
+ down_read(&rdma_nets_rwsem);
+ xa_for_each (&dev->compat_devs, c_index, cdev)
+ remove_one_compat_dev(dev, c_index);
+ up_read(&rdma_nets_rwsem);
+ }
+ up_read(&devices_rwsem);
+}
+
+static int add_all_compat_devs(void)
+{
+ struct rdma_dev_net *rnet;
+ struct ib_device *dev;
+ unsigned long index;
+ int ret = 0;
+
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ unsigned long net_index = 0;
+
+ /* Hold nets_rwsem so that any other thread modifying this
+ * system param can sync with this thread.
+ */
+ down_read(&rdma_nets_rwsem);
+ xa_for_each (&rdma_nets, net_index, rnet) {
+ ret = add_one_compat_dev(dev, rnet);
+ if (ret)
+ break;
+ }
+ up_read(&rdma_nets_rwsem);
}
+ up_read(&devices_rwsem);
+ if (ret)
+ remove_all_compat_devs();
+ return ret;
+}
- if (ib_device_check_mandatory(device)) {
- ret = -EINVAL;
+int rdma_compatdev_set(u8 enable)
+{
+ struct rdma_dev_net *rnet;
+ unsigned long index;
+ int ret = 0;
+
+ down_write(&rdma_nets_rwsem);
+ if (ib_devices_shared_netns == enable) {
+ up_write(&rdma_nets_rwsem);
+ return 0;
+ }
+
+ /* enable/disable of compat devices is not supported
+ * when more than default init_net exists.
+ */
+ xa_for_each (&rdma_nets, index, rnet) {
+ ret++;
+ break;
+ }
+ if (!ret)
+ ib_devices_shared_netns = enable;
+ up_write(&rdma_nets_rwsem);
+ if (ret)
+ return -EBUSY;
+
+ if (enable)
+ ret = add_all_compat_devs();
+ else
+ remove_all_compat_devs();
+ return ret;
+}
+
+static void rdma_dev_exit_net(struct net *net)
+{
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
+ struct ib_device *dev;
+ unsigned long index;
+ int ret;
+
+ down_write(&rdma_nets_rwsem);
+ /*
+ * Prevent the ID from being re-used and hide the id from xa_for_each.
+ */
+ ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
+ WARN_ON(ret);
+ up_write(&rdma_nets_rwsem);
+
+ down_read(&devices_rwsem);
+ xa_for_each (&devices, index, dev) {
+ get_device(&dev->dev);
+ /*
+ * Release the devices_rwsem so that pontentially blocking
+ * device_del, doesn't hold the devices_rwsem for too long.
+ */
+ up_read(&devices_rwsem);
+
+ remove_one_compat_dev(dev, rnet->id);
+
+ /*
+ * If the real device is in the NS then move it back to init.
+ */
+ rdma_dev_change_netns(dev, net, &init_net);
+
+ put_device(&dev->dev);
+ down_read(&devices_rwsem);
+ }
+ up_read(&devices_rwsem);
+
+ rdma_nl_net_exit(rnet);
+ xa_erase(&rdma_nets, rnet->id);
+}
+
+static __net_init int rdma_dev_init_net(struct net *net)
+{
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
+ unsigned long index;
+ struct ib_device *dev;
+ int ret;
+
+ write_pnet(&rnet->net, net);
+
+ ret = rdma_nl_net_init(rnet);
+ if (ret)
+ return ret;
+
+ /* No need to create any compat devices in default init_net. */
+ if (net_eq(net, &init_net))
+ return 0;
+
+ ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
+ if (ret) {
+ rdma_nl_net_exit(rnet);
+ return ret;
+ }
+
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ /* Hold nets_rwsem so that netlink command cannot change
+ * system configuration for device sharing mode.
+ */
+ down_read(&rdma_nets_rwsem);
+ ret = add_one_compat_dev(dev, rnet);
+ up_read(&rdma_nets_rwsem);
+ if (ret)
+ break;
+ }
+ up_read(&devices_rwsem);
+
+ if (ret)
+ rdma_dev_exit_net(net);
+
+ return ret;
+}
+
+/*
+ * Assign the unique string device name and the unique device index. This is
+ * undone by ib_dealloc_device.
+ */
+static int assign_name(struct ib_device *device, const char *name)
+{
+ static u32 last_id;
+ int ret;
+
+ down_write(&devices_rwsem);
+ /* Assign a unique name to the device */
+ if (strchr(name, '%'))
+ ret = alloc_name(device, name);
+ else
+ ret = dev_set_name(&device->dev, name);
+ if (ret)
+ goto out;
+
+ if (__ib_device_get_by_name(dev_name(&device->dev))) {
+ ret = -ENFILE;
goto out;
}
+ strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
- ret = read_port_immutable(device);
+ ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
+ &last_id, GFP_KERNEL);
+ if (ret > 0)
+ ret = 0;
+
+out:
+ up_write(&devices_rwsem);
+ return ret;
+}
+
+/*
+ * setup_device() allocates memory and sets up data that requires calling the
+ * device ops, this is the only reason these actions are not done during
+ * ib_alloc_device. It is undone by ib_dealloc_device().
+ */
+static int setup_device(struct ib_device *device)
+{
+ struct ib_udata uhw = {.outlen = 0, .inlen = 0};
+ int ret;
+
+ ib_device_check_mandatory(device);
+
+ ret = setup_port_data(device);
if (ret) {
- printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
- device->name);
+ dev_warn(&device->dev, "Couldn't create per-port data\n");
+ return ret;
+ }
+
+ memset(&device->attrs, 0, sizeof(device->attrs));
+ ret = device->ops.query_device(device, &device->attrs, &uhw);
+ if (ret) {
+ dev_warn(&device->dev,
+ "Couldn't query the device attributes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void disable_device(struct ib_device *device)
+{
+ u32 cid;
+
+ WARN_ON(!refcount_read(&device->refcount));
+
+ down_write(&devices_rwsem);
+ xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
+ up_write(&devices_rwsem);
+
+ /*
+ * Remove clients in LIFO order, see assign_client_id. This could be
+ * more efficient if xarray learns to reverse iterate. Since no new
+ * clients can be added to this ib_device past this point we only need
+ * the maximum possible client_id value here.
+ */
+ down_read(&clients_rwsem);
+ cid = highest_client_id;
+ up_read(&clients_rwsem);
+ while (cid) {
+ cid--;
+ remove_client_context(device, cid);
+ }
+
+ ib_cq_pool_cleanup(device);
+
+ /* Pairs with refcount_set in enable_device */
+ ib_device_put(device);
+ wait_for_completion(&device->unreg_completion);
+
+ /*
+ * compat devices must be removed after device refcount drops to zero.
+ * Otherwise init_net() may add more compatdevs after removing compat
+ * devices and before device is disabled.
+ */
+ remove_compat_devs(device);
+}
+
+/*
+ * An enabled device is visible to all clients and to all the public facing
+ * APIs that return a device pointer. This always returns with a new get, even
+ * if it fails.
+ */
+static int enable_device_and_get(struct ib_device *device)
+{
+ struct ib_client *client;
+ unsigned long index;
+ int ret = 0;
+
+ /*
+ * One ref belongs to the xa and the other belongs to this
+ * thread. This is needed to guard against parallel unregistration.
+ */
+ refcount_set(&device->refcount, 2);
+ down_write(&devices_rwsem);
+ xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
+
+ /*
+ * By using downgrade_write() we ensure that no other thread can clear
+ * DEVICE_REGISTERED while we are completing the client setup.
+ */
+ downgrade_write(&devices_rwsem);
+
+ if (device->ops.enable_driver) {
+ ret = device->ops.enable_driver(device);
+ if (ret)
+ goto out;
+ }
+
+ down_read(&clients_rwsem);
+ xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
+ ret = add_client_context(device, client);
+ if (ret)
+ break;
+ }
+ up_read(&clients_rwsem);
+ if (!ret)
+ ret = add_compat_devs(device);
+out:
+ up_read(&devices_rwsem);
+ return ret;
+}
+
+static void prevent_dealloc_device(struct ib_device *ib_dev)
+{
+}
+
+static void ib_device_notify_register(struct ib_device *device)
+{
+ struct net_device *netdev;
+ u32 port;
+ int ret;
+
+ down_read(&devices_rwsem);
+
+ /* Mark for userspace that device is ready */
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+
+ ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
+ if (ret)
goto out;
+
+ rdma_for_each_port(device, port) {
+ netdev = ib_device_get_netdev(device, port);
+ if (!netdev)
+ continue;
+
+ ret = rdma_nl_notify_event(device, port,
+ RDMA_NETDEV_ATTACH_EVENT);
+ dev_put(netdev);
+ if (ret)
+ goto out;
}
+out:
+ up_read(&devices_rwsem);
+}
+
+/**
+ * ib_register_device - Register an IB device with IB core
+ * @device: Device to register
+ * @name: unique string device name. This may include a '%' which will
+ * cause a unique index to be added to the passed device name.
+ * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
+ * device will be used. In this case the caller should fully
+ * setup the ibdev for DMA. This usually means using dma_virt_ops.
+ *
+ * Low-level drivers use ib_register_device() to register their
+ * devices with the IB core. All registered clients will receive a
+ * callback for each device that is added. @device must be allocated
+ * with ib_alloc_device().
+ *
+ * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
+ * asynchronously then the device pointer may become freed as soon as this
+ * function returns.
+ */
+int ib_register_device(struct ib_device *device, const char *name,
+ struct device *dma_device)
+{
+ int ret;
+
+ ret = assign_name(device, name);
+ if (ret)
+ return ret;
+
+ /*
+ * If the caller does not provide a DMA capable device then the IB core
+ * will set up ib_sge and scatterlist structures that stash the kernel
+ * virtual address into the address field.
+ */
+ WARN_ON(dma_device && !dma_device->dma_parms);
+ device->dma_device = dma_device;
+
+ ret = setup_device(device);
+ if (ret)
+ return ret;
+
ret = ib_cache_setup_one(device);
if (ret) {
- printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't set up InfiniBand P_Key/GID cache\n");
+ return ret;
}
- ret = ib_device_register_sysfs(device, port_callback);
+ device->groups[0] = &ib_dev_attr_group;
+ device->groups[1] = device->ops.device_group;
+ ret = ib_setup_device_attrs(device);
+ if (ret)
+ goto cache_cleanup;
+
+ ib_device_register_rdmacg(device);
+
+ rdma_counter_init(device);
+
+ /*
+ * Ensure that ADD uevent is not fired because it
+ * is too early amd device is not initialized yet.
+ */
+ dev_set_uevent_suppress(&device->dev, true);
+ ret = device_add(&device->dev);
+ if (ret)
+ goto cg_cleanup;
+
+ ret = ib_setup_port_attrs(&device->coredev);
if (ret) {
- printk(KERN_WARNING "Couldn't register device %s with driver model\n",
- device->name);
- ib_cache_cleanup_one(device);
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't register device with driver model\n");
+ goto dev_cleanup;
+ }
+
+ ret = enable_device_and_get(device);
+ if (ret) {
+ void (*dealloc_fn)(struct ib_device *);
+
+ /*
+ * If we hit this error flow then we don't want to
+ * automatically dealloc the device since the caller is
+ * expected to call ib_dealloc_device() after
+ * ib_register_device() fails. This is tricky due to the
+ * possibility for a parallel unregistration along with this
+ * error flow. Since we have a refcount here we know any
+ * parallel flow is stopped in disable_device and will see the
+ * special dealloc_driver pointer, causing the responsibility to
+ * ib_dealloc_device() to revert back to this thread.
+ */
+ dealloc_fn = device->ops.dealloc_driver;
+ device->ops.dealloc_driver = prevent_dealloc_device;
+ ib_device_put(device);
+ __ib_unregister_device(device);
+ device->ops.dealloc_driver = dealloc_fn;
+ dev_set_uevent_suppress(&device->dev, false);
+ return ret;
}
+ dev_set_uevent_suppress(&device->dev, false);
- device->reg_state = IB_DEV_REGISTERED;
+ ib_device_notify_register(device);
- list_for_each_entry(client, &client_list, list)
- if (client->add && !add_client_context(device, client))
- client->add(device);
+ ib_device_put(device);
- down_write(&lists_rwsem);
- list_add_tail(&device->core_list, &device_list);
- up_write(&lists_rwsem);
-out:
- mutex_unlock(&device_mutex);
+ return 0;
+
+dev_cleanup:
+ device_del(&device->dev);
+cg_cleanup:
+ dev_set_uevent_suppress(&device->dev, false);
+ ib_device_unregister_rdmacg(device);
+cache_cleanup:
+ ib_cache_cleanup_one(device);
return ret;
}
EXPORT_SYMBOL(ib_register_device);
+/* Callers must hold a get on the device. */
+static void __ib_unregister_device(struct ib_device *ib_dev)
+{
+ struct ib_device *sub, *tmp;
+
+ mutex_lock(&ib_dev->subdev_lock);
+ list_for_each_entry_safe_reverse(sub, tmp,
+ &ib_dev->subdev_list_head,
+ subdev_list) {
+ list_del(&sub->subdev_list);
+ ib_dev->ops.del_sub_dev(sub);
+ ib_device_put(ib_dev);
+ }
+ mutex_unlock(&ib_dev->subdev_lock);
+
+ /*
+ * We have a registration lock so that all the calls to unregister are
+ * fully fenced, once any unregister returns the device is truly
+ * unregistered even if multiple callers are unregistering it at the
+ * same time. This also interacts with the registration flow and
+ * provides sane semantics if register and unregister are racing.
+ */
+ mutex_lock(&ib_dev->unregistration_lock);
+ if (!refcount_read(&ib_dev->refcount))
+ goto out;
+
+ disable_device(ib_dev);
+ rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT);
+
+ /* Expedite removing unregistered pointers from the hash table */
+ free_netdevs(ib_dev);
+
+ ib_free_port_attrs(&ib_dev->coredev);
+ device_del(&ib_dev->dev);
+ ib_device_unregister_rdmacg(ib_dev);
+ ib_cache_cleanup_one(ib_dev);
+
+ /*
+ * Drivers using the new flow may not call ib_dealloc_device except
+ * in error unwind prior to registration success.
+ */
+ if (ib_dev->ops.dealloc_driver &&
+ ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
+ WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
+ ib_dealloc_device(ib_dev);
+ }
+out:
+ mutex_unlock(&ib_dev->unregistration_lock);
+}
+
/**
* ib_unregister_device - Unregister an IB device
- * @device:Device to unregister
+ * @ib_dev: The device to unregister
*
* Unregister an IB device. All clients will receive a remove callback.
+ *
+ * Callers should call this routine only once, and protect against races with
+ * registration. Typically it should only be called as part of a remove
+ * callback in an implementation of driver core's struct device_driver and
+ * related.
+ *
+ * If ops.dealloc_driver is used then ib_dev will be freed upon return from
+ * this function.
*/
-void ib_unregister_device(struct ib_device *device)
+void ib_unregister_device(struct ib_device *ib_dev)
{
- struct ib_client_data *context, *tmp;
- unsigned long flags;
+ get_device(&ib_dev->dev);
+ __ib_unregister_device(ib_dev);
+ put_device(&ib_dev->dev);
+}
+EXPORT_SYMBOL(ib_unregister_device);
- mutex_lock(&device_mutex);
+/**
+ * ib_unregister_device_and_put - Unregister a device while holding a 'get'
+ * @ib_dev: The device to unregister
+ *
+ * This is the same as ib_unregister_device(), except it includes an internal
+ * ib_device_put() that should match a 'get' obtained by the caller.
+ *
+ * It is safe to call this routine concurrently from multiple threads while
+ * holding the 'get'. When the function returns the device is fully
+ * unregistered.
+ *
+ * Drivers using this flow MUST use the driver_unregister callback to clean up
+ * their resources associated with the device and dealloc it.
+ */
+void ib_unregister_device_and_put(struct ib_device *ib_dev)
+{
+ WARN_ON(!ib_dev->ops.dealloc_driver);
+ get_device(&ib_dev->dev);
+ ib_device_put(ib_dev);
+ __ib_unregister_device(ib_dev);
+ put_device(&ib_dev->dev);
+}
+EXPORT_SYMBOL(ib_unregister_device_and_put);
- down_write(&lists_rwsem);
- list_del(&device->core_list);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
- context->going_down = true;
- spin_unlock_irqrestore(&device->client_data_lock, flags);
- downgrade_write(&lists_rwsem);
+/**
+ * ib_unregister_driver - Unregister all IB devices for a driver
+ * @driver_id: The driver to unregister
+ *
+ * This implements a fence for device unregistration. It only returns once all
+ * devices associated with the driver_id have fully completed their
+ * unregistration and returned from ib_unregister_device*().
+ *
+ * If device's are not yet unregistered it goes ahead and starts unregistering
+ * them.
+ *
+ * This does not block creation of new devices with the given driver_id, that
+ * is the responsibility of the caller.
+ */
+void ib_unregister_driver(enum rdma_driver_id driver_id)
+{
+ struct ib_device *ib_dev;
+ unsigned long index;
+
+ down_read(&devices_rwsem);
+ xa_for_each (&devices, index, ib_dev) {
+ if (ib_dev->ops.driver_id != driver_id)
+ continue;
- list_for_each_entry_safe(context, tmp, &device->client_data_list,
- list) {
- if (context->client->remove)
- context->client->remove(device, context->data);
+ get_device(&ib_dev->dev);
+ up_read(&devices_rwsem);
+
+ WARN_ON(!ib_dev->ops.dealloc_driver);
+ __ib_unregister_device(ib_dev);
+
+ put_device(&ib_dev->dev);
+ down_read(&devices_rwsem);
}
- up_read(&lists_rwsem);
+ up_read(&devices_rwsem);
+}
+EXPORT_SYMBOL(ib_unregister_driver);
- mutex_unlock(&device_mutex);
+static void ib_unregister_work(struct work_struct *work)
+{
+ struct ib_device *ib_dev =
+ container_of(work, struct ib_device, unregistration_work);
- ib_device_unregister_sysfs(device);
- ib_cache_cleanup_one(device);
+ __ib_unregister_device(ib_dev);
+ put_device(&ib_dev->dev);
+}
+
+/**
+ * ib_unregister_device_queued - Unregister a device using a work queue
+ * @ib_dev: The device to unregister
+ *
+ * This schedules an asynchronous unregistration using a WQ for the device. A
+ * driver should use this to avoid holding locks while doing unregistration,
+ * such as holding the RTNL lock.
+ *
+ * Drivers using this API must use ib_unregister_driver before module unload
+ * to ensure that all scheduled unregistrations have completed.
+ */
+void ib_unregister_device_queued(struct ib_device *ib_dev)
+{
+ WARN_ON(!refcount_read(&ib_dev->refcount));
+ WARN_ON(!ib_dev->ops.dealloc_driver);
+ get_device(&ib_dev->dev);
+ if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work))
+ put_device(&ib_dev->dev);
+}
+EXPORT_SYMBOL(ib_unregister_device_queued);
+
+/*
+ * The caller must pass in a device that has the kref held and the refcount
+ * released. If the device is in cur_net and still registered then it is moved
+ * into net.
+ */
+static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
+ struct net *net)
+{
+ int ret2 = -EINVAL;
+ int ret;
+
+ mutex_lock(&device->unregistration_lock);
+
+ /*
+ * If a device not under ib_device_get() or if the unregistration_lock
+ * is not held, the namespace can be changed, or it can be unregistered.
+ * Check again under the lock.
+ */
+ if (refcount_read(&device->refcount) == 0 ||
+ !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
+ disable_device(device);
+
+ /*
+ * At this point no one can be using the device, so it is safe to
+ * change the namespace.
+ */
+ write_pnet(&device->coredev.rdma_net, net);
+
+ down_read(&devices_rwsem);
+ /*
+ * Currently rdma devices are system wide unique. So the device name
+ * is guaranteed free in the new namespace. Publish the new namespace
+ * at the sysfs level.
+ */
+ ret = device_rename(&device->dev, dev_name(&device->dev));
+ up_read(&devices_rwsem);
+ if (ret) {
+ dev_warn(&device->dev,
+ "%s: Couldn't rename device after namespace change\n",
+ __func__);
+ /* Try and put things back and re-enable the device */
+ write_pnet(&device->coredev.rdma_net, cur_net);
+ }
+
+ ret2 = enable_device_and_get(device);
+ if (ret2) {
+ /*
+ * This shouldn't really happen, but if it does, let the user
+ * retry at later point. So don't disable the device.
+ */
+ dev_warn(&device->dev,
+ "%s: Couldn't re-enable device after namespace change\n",
+ __func__);
+ }
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
- down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
- kfree(context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
- up_write(&lists_rwsem);
+ ib_device_put(device);
+out:
+ mutex_unlock(&device->unregistration_lock);
+ if (ret)
+ return ret;
+ return ret2;
+}
- device->reg_state = IB_DEV_UNREGISTERED;
+int ib_device_set_netns_put(struct sk_buff *skb,
+ struct ib_device *dev, u32 ns_fd)
+{
+ struct net *net;
+ int ret;
+
+ net = get_net_ns_by_fd(ns_fd);
+ if (IS_ERR(net)) {
+ ret = PTR_ERR(net);
+ goto net_err;
+ }
+
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto ns_err;
+ }
+
+ /*
+ * All the ib_clients, including uverbs, are reset when the namespace is
+ * changed and this cannot be blocked waiting for userspace to do
+ * something, so disassociation is mandatory.
+ */
+ if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) {
+ ret = -EOPNOTSUPP;
+ goto ns_err;
+ }
+
+ get_device(&dev->dev);
+ ib_device_put(dev);
+ ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
+ put_device(&dev->dev);
+
+ put_net(net);
+ return ret;
+
+ns_err:
+ put_net(net);
+net_err:
+ ib_device_put(dev);
+ return ret;
+}
+
+static struct pernet_operations rdma_dev_net_ops = {
+ .init = rdma_dev_init_net,
+ .exit = rdma_dev_exit_net,
+ .id = &rdma_dev_net_id,
+ .size = sizeof(struct rdma_dev_net),
+};
+
+static int assign_client_id(struct ib_client *client)
+{
+ int ret;
+
+ lockdep_assert_held(&clients_rwsem);
+ /*
+ * The add/remove callbacks must be called in FIFO/LIFO order. To
+ * achieve this we assign client_ids so they are sorted in
+ * registration order.
+ */
+ client->client_id = highest_client_id;
+ ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ highest_client_id++;
+ xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
+ return 0;
+}
+
+static void remove_client_id(struct ib_client *client)
+{
+ down_write(&clients_rwsem);
+ xa_erase(&clients, client->client_id);
+ for (; highest_client_id; highest_client_id--)
+ if (xa_load(&clients, highest_client_id - 1))
+ break;
+ up_write(&clients_rwsem);
}
-EXPORT_SYMBOL(ib_unregister_device);
/**
* ib_register_client - Register an IB client
@@ -435,20 +1853,36 @@ EXPORT_SYMBOL(ib_unregister_device);
int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
+ unsigned long index;
+ bool need_unreg = false;
+ int ret;
- mutex_lock(&device_mutex);
-
- list_for_each_entry(device, &device_list, core_list)
- if (client->add && !add_client_context(device, client))
- client->add(device);
-
- down_write(&lists_rwsem);
- list_add_tail(&client->list, &client_list);
- up_write(&lists_rwsem);
+ refcount_set(&client->uses, 1);
+ init_completion(&client->uses_zero);
- mutex_unlock(&device_mutex);
+ /*
+ * The devices_rwsem is held in write mode to ensure that a racing
+ * ib_register_device() sees a consisent view of clients and devices.
+ */
+ down_write(&devices_rwsem);
+ down_write(&clients_rwsem);
+ ret = assign_client_id(client);
+ if (ret)
+ goto out;
- return 0;
+ need_unreg = true;
+ xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
+ ret = add_client_context(device, client);
+ if (ret)
+ goto out;
+ }
+ ret = 0;
+out:
+ up_write(&clients_rwsem);
+ up_write(&devices_rwsem);
+ if (need_unreg && ret)
+ ib_unregister_client(client);
+ return ret;
}
EXPORT_SYMBOL(ib_register_client);
@@ -459,80 +1893,140 @@ EXPORT_SYMBOL(ib_register_client);
* Upper level users use ib_unregister_client() to remove their client
* registration. When ib_unregister_client() is called, the client
* will receive a remove callback for each IB device still registered.
+ *
+ * This is a full fence, once it returns no client callbacks will be called,
+ * or are running in another thread.
*/
void ib_unregister_client(struct ib_client *client)
{
- struct ib_client_data *context, *tmp;
struct ib_device *device;
- unsigned long flags;
+ unsigned long index;
- mutex_lock(&device_mutex);
+ down_write(&clients_rwsem);
+ ib_client_put(client);
+ xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
+ up_write(&clients_rwsem);
- down_write(&lists_rwsem);
- list_del(&client->list);
- up_write(&lists_rwsem);
+ /* We do not want to have locks while calling client->remove() */
+ rcu_read_lock();
+ xa_for_each (&devices, index, device) {
+ if (!ib_device_try_get(device))
+ continue;
+ rcu_read_unlock();
- list_for_each_entry(device, &device_list, core_list) {
- struct ib_client_data *found_context = NULL;
+ remove_client_context(device, client->client_id);
- down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
- if (context->client == client) {
- context->going_down = true;
- found_context = context;
- break;
- }
- spin_unlock_irqrestore(&device->client_data_lock, flags);
- up_write(&lists_rwsem);
+ ib_device_put(device);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
- if (client->remove)
- client->remove(device, found_context ?
- found_context->data : NULL);
+ /*
+ * remove_client_context() is not a fence, it can return even though a
+ * removal is ongoing. Wait until all removals are completed.
+ */
+ wait_for_completion(&client->uses_zero);
+ remove_client_id(client);
+}
+EXPORT_SYMBOL(ib_unregister_client);
- if (!found_context) {
- pr_warn("No client context found for %s/%s\n",
- device->name, client->name);
+static int __ib_get_global_client_nl_info(const char *client_name,
+ struct ib_client_nl_info *res)
+{
+ struct ib_client *client;
+ unsigned long index;
+ int ret = -ENOENT;
+
+ down_read(&clients_rwsem);
+ xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
+ if (strcmp(client->name, client_name) != 0)
continue;
+ if (!client->get_global_nl_info) {
+ ret = -EOPNOTSUPP;
+ break;
}
+ ret = client->get_global_nl_info(res);
+ if (WARN_ON(ret == -ENOENT))
+ ret = -EINVAL;
+ if (!ret && res->cdev)
+ get_device(res->cdev);
+ break;
+ }
+ up_read(&clients_rwsem);
+ return ret;
+}
- down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_del(&found_context->list);
- kfree(found_context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
- up_write(&lists_rwsem);
+static int __ib_get_client_nl_info(struct ib_device *ibdev,
+ const char *client_name,
+ struct ib_client_nl_info *res)
+{
+ unsigned long index;
+ void *client_data;
+ int ret = -ENOENT;
+
+ down_read(&ibdev->client_data_rwsem);
+ xan_for_each_marked (&ibdev->client_data, index, client_data,
+ CLIENT_DATA_REGISTERED) {
+ struct ib_client *client = xa_load(&clients, index);
+
+ if (!client || strcmp(client->name, client_name) != 0)
+ continue;
+ if (!client->get_nl_info) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ ret = client->get_nl_info(ibdev, client_data, res);
+ if (WARN_ON(ret == -ENOENT))
+ ret = -EINVAL;
+
+ /*
+ * The cdev is guaranteed valid as long as we are inside the
+ * client_data_rwsem as remove_one can't be called. Keep it
+ * valid for the caller.
+ */
+ if (!ret && res->cdev)
+ get_device(res->cdev);
+ break;
}
+ up_read(&ibdev->client_data_rwsem);
- mutex_unlock(&device_mutex);
+ return ret;
}
-EXPORT_SYMBOL(ib_unregister_client);
/**
- * ib_get_client_data - Get IB client context
- * @device:Device to get context for
- * @client:Client to get context for
- *
- * ib_get_client_data() returns client context set with
- * ib_set_client_data().
+ * ib_get_client_nl_info - Fetch the nl_info from a client
+ * @ibdev: IB device
+ * @client_name: Name of the client
+ * @res: Result of the query
*/
-void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
+int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
+ struct ib_client_nl_info *res)
{
- struct ib_client_data *context;
- void *ret = NULL;
- unsigned long flags;
+ int ret;
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry(context, &device->client_data_list, list)
- if (context->client == client) {
- ret = context->data;
- break;
- }
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ if (ibdev)
+ ret = __ib_get_client_nl_info(ibdev, client_name, res);
+ else
+ ret = __ib_get_global_client_nl_info(client_name, res);
+#ifdef CONFIG_MODULES
+ if (ret == -ENOENT) {
+ request_module("rdma-client-%s", client_name);
+ if (ibdev)
+ ret = __ib_get_client_nl_info(ibdev, client_name, res);
+ else
+ ret = __ib_get_global_client_nl_info(client_name, res);
+ }
+#endif
+ if (ret) {
+ if (ret == -ENOENT)
+ return -EOPNOTSUPP;
+ return ret;
+ }
- return ret;
+ if (WARN_ON(!res->cdev))
+ return -EINVAL;
+ return 0;
}
-EXPORT_SYMBOL(ib_get_client_data);
/**
* ib_set_client_data - Set IB client context
@@ -540,27 +2034,22 @@ EXPORT_SYMBOL(ib_get_client_data);
* @client:Client to set context for
* @data:Context to set
*
- * ib_set_client_data() sets client context that can be retrieved with
- * ib_get_client_data().
+ * ib_set_client_data() sets client context data that can be retrieved with
+ * ib_get_client_data(). This can only be called while the client is
+ * registered to the device, once the ib_client remove() callback returns this
+ * cannot be called.
*/
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data)
{
- struct ib_client_data *context;
- unsigned long flags;
-
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry(context, &device->client_data_list, list)
- if (context->client == client) {
- context->data = data;
- goto out;
- }
+ void *rc;
- printk(KERN_WARNING "No client context found for %s/%s\n",
- device->name, client->name);
+ if (WARN_ON(IS_ERR(data)))
+ data = NULL;
-out:
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ rc = xa_store(&device->client_data, client->client_id, data,
+ GFP_KERNEL);
+ WARN_ON(xa_is_err(rc));
}
EXPORT_SYMBOL(ib_set_client_data);
@@ -570,19 +2059,15 @@ EXPORT_SYMBOL(ib_set_client_data);
*
* ib_register_event_handler() registers an event handler that will be
* called back when asynchronous IB events occur (as defined in
- * chapter 11 of the InfiniBand Architecture Specification). This
- * callback may occur in interrupt context.
+ * chapter 11 of the InfiniBand Architecture Specification). This
+ * callback occurs in workqueue context.
*/
-int ib_register_event_handler (struct ib_event_handler *event_handler)
+void ib_register_event_handler(struct ib_event_handler *event_handler)
{
- unsigned long flags;
-
- spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+ down_write(&event_handler->device->event_handler_rwsem);
list_add_tail(&event_handler->list,
&event_handler->device->event_handler_list);
- spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
-
- return 0;
+ up_write(&event_handler->device->event_handler_rwsem);
}
EXPORT_SYMBOL(ib_register_event_handler);
@@ -593,58 +2078,85 @@ EXPORT_SYMBOL(ib_register_event_handler);
* Unregister an event handler registered with
* ib_register_event_handler().
*/
-int ib_unregister_event_handler(struct ib_event_handler *event_handler)
+void ib_unregister_event_handler(struct ib_event_handler *event_handler)
{
- unsigned long flags;
-
- spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+ down_write(&event_handler->device->event_handler_rwsem);
list_del(&event_handler->list);
- spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
-
- return 0;
+ up_write(&event_handler->device->event_handler_rwsem);
}
EXPORT_SYMBOL(ib_unregister_event_handler);
-/**
- * ib_dispatch_event - Dispatch an asynchronous event
- * @event:Event to dispatch
- *
- * Low-level drivers must call ib_dispatch_event() to dispatch the
- * event to all registered event handlers when an asynchronous event
- * occurs.
- */
-void ib_dispatch_event(struct ib_event *event)
+void ib_dispatch_event_clients(struct ib_event *event)
{
- unsigned long flags;
struct ib_event_handler *handler;
- spin_lock_irqsave(&event->device->event_handler_lock, flags);
+ down_read(&event->device->event_handler_rwsem);
list_for_each_entry(handler, &event->device->event_handler_list, list)
handler->handler(handler, event);
- spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
+ up_read(&event->device->event_handler_rwsem);
}
-EXPORT_SYMBOL(ib_dispatch_event);
-/**
- * ib_query_device - Query IB device attributes
- * @device:Device to query
- * @device_attr:Device attributes
- *
- * ib_query_device() returns the attributes of a device through the
- * @device_attr pointer.
- */
-int ib_query_device(struct ib_device *device,
- struct ib_device_attr *device_attr)
+static int iw_query_port(struct ib_device *device,
+ u32 port_num,
+ struct ib_port_attr *port_attr)
{
- struct ib_udata uhw = {.outlen = 0, .inlen = 0};
+ struct in_device *inetdev;
+ struct net_device *netdev;
+
+ memset(port_attr, 0, sizeof(*port_attr));
+
+ netdev = ib_device_get_netdev(device, port_num);
+ if (!netdev)
+ return -ENODEV;
+
+ port_attr->max_mtu = IB_MTU_4096;
+ port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
+
+ if (!netif_carrier_ok(netdev)) {
+ port_attr->state = IB_PORT_DOWN;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ } else {
+ rcu_read_lock();
+ inetdev = __in_dev_get_rcu(netdev);
+
+ if (inetdev && inetdev->ifa_list) {
+ port_attr->state = IB_PORT_ACTIVE;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ port_attr->state = IB_PORT_INIT;
+ port_attr->phys_state =
+ IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
+ }
- memset(device_attr, 0, sizeof(*device_attr));
+ rcu_read_unlock();
+ }
- return device->query_device(device, device_attr, &uhw);
+ dev_put(netdev);
+ return device->ops.query_port(device, port_num, port_attr);
+}
+
+static int __ib_query_port(struct ib_device *device,
+ u32 port_num,
+ struct ib_port_attr *port_attr)
+{
+ int err;
+
+ memset(port_attr, 0, sizeof(*port_attr));
+
+ err = device->ops.query_port(device, port_num, port_attr);
+ if (err || port_attr->subnet_prefix)
+ return err;
+
+ if (rdma_port_get_link_layer(device, port_num) !=
+ IB_LINK_LAYER_INFINIBAND)
+ return 0;
+
+ ib_get_cached_subnet_prefix(device, port_num,
+ &port_attr->subnet_prefix);
+ return 0;
}
-EXPORT_SYMBOL(ib_query_device);
/**
* ib_query_port - Query IB port attributes
@@ -656,34 +2168,227 @@ EXPORT_SYMBOL(ib_query_device);
* @port_attr pointer.
*/
int ib_query_port(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct ib_port_attr *port_attr)
{
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- return device->query_port(device, port_num, port_attr);
+ if (rdma_protocol_iwarp(device, port_num))
+ return iw_query_port(device, port_num, port_attr);
+ else
+ return __ib_query_port(device, port_num, port_attr);
}
EXPORT_SYMBOL(ib_query_port);
+static void add_ndev_hash(struct ib_port_data *pdata)
+{
+ unsigned long flags;
+
+ might_sleep();
+
+ spin_lock_irqsave(&ndev_hash_lock, flags);
+ if (hash_hashed(&pdata->ndev_hash_link)) {
+ hash_del_rcu(&pdata->ndev_hash_link);
+ spin_unlock_irqrestore(&ndev_hash_lock, flags);
+ /*
+ * We cannot do hash_add_rcu after a hash_del_rcu until the
+ * grace period
+ */
+ synchronize_rcu();
+ spin_lock_irqsave(&ndev_hash_lock, flags);
+ }
+ if (pdata->netdev)
+ hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
+ (uintptr_t)pdata->netdev);
+ spin_unlock_irqrestore(&ndev_hash_lock, flags);
+}
+
/**
- * ib_query_gid - Get GID table entry
- * @device:Device to query
- * @port_num:Port number to query
- * @index:GID table index to query
- * @gid:Returned GID
+ * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
+ * @ib_dev: Device to modify
+ * @ndev: net_device to affiliate, may be NULL
+ * @port: IB port the net_device is connected to
+ *
+ * Drivers should use this to link the ib_device to a netdev so the netdev
+ * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
+ * affiliated with any port.
+ *
+ * The caller must ensure that the given ndev is not unregistered or
+ * unregistering, and that either the ib_device is unregistered or
+ * ib_device_set_netdev() is called with NULL when the ndev sends a
+ * NETDEV_UNREGISTER event.
+ */
+int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ u32 port)
+{
+ enum rdma_nl_notify_event_type etype;
+ struct net_device *old_ndev;
+ struct ib_port_data *pdata;
+ unsigned long flags;
+ int ret;
+
+ if (!rdma_is_port_valid(ib_dev, port))
+ return -EINVAL;
+
+ /*
+ * Drivers wish to call this before ib_register_driver, so we have to
+ * setup the port data early.
+ */
+ ret = alloc_port_data(ib_dev);
+ if (ret)
+ return ret;
+
+ pdata = &ib_dev->port_data[port];
+ spin_lock_irqsave(&pdata->netdev_lock, flags);
+ old_ndev = rcu_dereference_protected(
+ pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
+ if (old_ndev == ndev) {
+ spin_unlock_irqrestore(&pdata->netdev_lock, flags);
+ return 0;
+ }
+
+ rcu_assign_pointer(pdata->netdev, ndev);
+ netdev_put(old_ndev, &pdata->netdev_tracker);
+ netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
+ spin_unlock_irqrestore(&pdata->netdev_lock, flags);
+
+ add_ndev_hash(pdata);
+
+ /* Make sure that the device is registered before we send events */
+ if (xa_load(&devices, ib_dev->index) != ib_dev)
+ return 0;
+
+ etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT;
+ rdma_nl_notify_event(ib_dev, port, etype);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_device_set_netdev);
+
+static void free_netdevs(struct ib_device *ib_dev)
+{
+ unsigned long flags;
+ u32 port;
+
+ if (!ib_dev->port_data)
+ return;
+
+ rdma_for_each_port (ib_dev, port) {
+ struct ib_port_data *pdata = &ib_dev->port_data[port];
+ struct net_device *ndev;
+
+ spin_lock_irqsave(&pdata->netdev_lock, flags);
+ ndev = rcu_dereference_protected(
+ pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
+ if (ndev) {
+ spin_lock(&ndev_hash_lock);
+ hash_del_rcu(&pdata->ndev_hash_link);
+ spin_unlock(&ndev_hash_lock);
+
+ /*
+ * If this is the last dev_put there is still a
+ * synchronize_rcu before the netdev is kfreed, so we
+ * can continue to rely on unlocked pointer
+ * comparisons after the put
+ */
+ rcu_assign_pointer(pdata->netdev, NULL);
+ netdev_put(ndev, &pdata->netdev_tracker);
+ }
+ spin_unlock_irqrestore(&pdata->netdev_lock, flags);
+ }
+}
+
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ u32 port)
+{
+ struct ib_port_data *pdata;
+ struct net_device *res;
+
+ if (!rdma_is_port_valid(ib_dev, port))
+ return NULL;
+
+ if (!ib_dev->port_data)
+ return NULL;
+
+ pdata = &ib_dev->port_data[port];
+
+ /*
+ * New drivers should use ib_device_set_netdev() not the legacy
+ * get_netdev().
+ */
+ if (ib_dev->ops.get_netdev)
+ res = ib_dev->ops.get_netdev(ib_dev, port);
+ else {
+ spin_lock(&pdata->netdev_lock);
+ res = rcu_dereference_protected(
+ pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
+ dev_hold(res);
+ spin_unlock(&pdata->netdev_lock);
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(ib_device_get_netdev);
+
+/**
+ * ib_query_netdev_port - Query the port number of a net_device
+ * associated with an ibdev
+ * @ibdev: IB device
+ * @ndev: Network device
+ * @port: IB port the net_device is connected to
+ */
+int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
+ u32 *port)
+{
+ struct net_device *ib_ndev;
+ u32 port_num;
+
+ rdma_for_each_port(ibdev, port_num) {
+ ib_ndev = ib_device_get_netdev(ibdev, port_num);
+ if (ndev == ib_ndev) {
+ *port = port_num;
+ dev_put(ib_ndev);
+ return 0;
+ }
+ dev_put(ib_ndev);
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(ib_query_netdev_port);
+
+/**
+ * ib_device_get_by_netdev - Find an IB device associated with a netdev
+ * @ndev: netdev to locate
+ * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
*
- * ib_query_gid() fetches the specified GID table entry.
+ * Find and hold an ib_device that is associated with a netdev via
+ * ib_device_set_netdev(). The caller must call ib_device_put() on the
+ * returned pointer.
*/
-int ib_query_gid(struct ib_device *device,
- u8 port_num, int index, union ib_gid *gid)
+struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
+ enum rdma_driver_id driver_id)
{
- if (rdma_cap_roce_gid_table(device, port_num))
- return ib_get_cached_gid(device, port_num, index, gid);
+ struct ib_device *res = NULL;
+ struct ib_port_data *cur;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
+ (uintptr_t)ndev) {
+ if (rcu_access_pointer(cur->netdev) == ndev &&
+ (driver_id == RDMA_DRIVER_UNKNOWN ||
+ cur->ib_dev->ops.driver_id == driver_id) &&
+ ib_device_try_get(cur->ib_dev)) {
+ res = cur->ib_dev;
+ break;
+ }
+ }
+ rcu_read_unlock();
- return device->query_gid(device, port_num, index, gid);
+ return res;
}
-EXPORT_SYMBOL(ib_query_gid);
+EXPORT_SYMBOL(ib_device_get_by_netdev);
/**
* ib_enum_roce_netdev - enumerate all RoCE ports
@@ -703,27 +2408,16 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_callback cb,
void *cookie)
{
- u8 port;
+ u32 port;
- for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
- port++)
+ rdma_for_each_port (ib_dev, port)
if (rdma_protocol_roce(ib_dev, port)) {
- struct net_device *idev = NULL;
-
- if (ib_dev->get_netdev)
- idev = ib_dev->get_netdev(ib_dev, port);
-
- if (idev &&
- idev->reg_state >= NETREG_UNREGISTERED) {
- dev_put(idev);
- idev = NULL;
- }
+ struct net_device *idev =
+ ib_device_get_netdev(ib_dev, port);
if (filter(ib_dev, port, idev, filter_cookie))
cb(ib_dev, port, idev, cookie);
-
- if (idev)
- dev_put(idev);
+ dev_put(idev);
}
}
@@ -744,11 +2438,40 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
void *cookie)
{
struct ib_device *dev;
+ unsigned long index;
- down_read(&lists_rwsem);
- list_for_each_entry(dev, &device_list, core_list)
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
- up_read(&lists_rwsem);
+ up_read(&devices_rwsem);
+}
+
+/*
+ * ib_enum_all_devs - enumerate all ib_devices
+ * @cb: Callback to call for each found ib_device
+ *
+ * Enumerates all ib_devices and calls callback() on each device.
+ */
+int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ unsigned long index;
+ struct ib_device *dev;
+ unsigned int idx = 0;
+ int ret = 0;
+
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
+ continue;
+
+ ret = nldev_cb(dev, skb, cb, idx);
+ if (ret)
+ break;
+ idx++;
+ }
+ up_read(&devices_rwsem);
+ return ret;
}
/**
@@ -761,9 +2484,15 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
* ib_query_pkey() fetches the specified P_Key table entry.
*/
int ib_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey)
+ u32 port_num, u16 index, u16 *pkey)
{
- return device->query_pkey(device, port_num, index, pkey);
+ if (!rdma_is_port_valid(device, port_num))
+ return -EINVAL;
+
+ if (!device->ops.query_pkey)
+ return -EOPNOTSUPP;
+
+ return device->ops.query_pkey(device, port_num, index, pkey);
}
EXPORT_SYMBOL(ib_query_pkey);
@@ -780,11 +2509,11 @@ int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify)
{
- if (!device->modify_device)
- return -ENOSYS;
+ if (!device->ops.modify_device)
+ return -EOPNOTSUPP;
- return device->modify_device(device, device_modify_mask,
- device_modify);
+ return device->ops.modify_device(device, device_modify_mask,
+ device_modify);
}
EXPORT_SYMBOL(ib_modify_device);
@@ -800,23 +2529,31 @@ EXPORT_SYMBOL(ib_modify_device);
* @port_modify_mask and @port_modify structure.
*/
int ib_modify_port(struct ib_device *device,
- u8 port_num, int port_modify_mask,
+ u32 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
- if (!device->modify_port)
- return -ENOSYS;
+ int rc;
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- return device->modify_port(device, port_num, port_modify_mask,
- port_modify);
+ if (device->ops.modify_port)
+ rc = device->ops.modify_port(device, port_num,
+ port_modify_mask,
+ port_modify);
+ else if (rdma_protocol_roce(device, port_num) &&
+ ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
+ (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
+ rc = 0;
+ else
+ rc = -EOPNOTSUPP;
+ return rc;
}
EXPORT_SYMBOL(ib_modify_port);
/**
* ib_find_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
+ * a specified GID value occurs. Its searches only for IB link layer.
* @device: The device to query.
* @gid: The GID value to search for.
* @port_num: The port number of the device where the GID value was found.
@@ -824,24 +2561,22 @@ EXPORT_SYMBOL(ib_modify_port);
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- u8 *port_num, u16 *index)
+ u32 *port_num, u16 *index)
{
union ib_gid tmp_gid;
- int ret, port, i;
+ u32 port;
+ int ret, i;
- for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
- if (rdma_cap_roce_gid_table(device, port)) {
- if (!ib_cache_gid_find_by_port(device, gid, port,
- NULL, index)) {
- *port_num = port;
- return 0;
- }
- }
+ rdma_for_each_port (device, port) {
+ if (!rdma_protocol_ib(device, port))
+ continue;
- for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
- ret = ib_query_gid(device, port, i, &tmp_gid);
+ for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
+ ++i) {
+ ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
- return ret;
+ continue;
+
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
*port_num = port;
if (index)
@@ -864,13 +2599,14 @@ EXPORT_SYMBOL(ib_find_gid);
* @index: The index into the PKey table where the PKey was found.
*/
int ib_find_pkey(struct ib_device *device,
- u8 port_num, u16 pkey, u16 *index)
+ u32 port_num, u16 pkey, u16 *index)
{
int ret, i;
u16 tmp_pkey;
int partial_ix = -1;
- for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
+ for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
+ ++i) {
ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
if (ret)
return ret;
@@ -903,69 +2639,479 @@ EXPORT_SYMBOL(ib_find_pkey);
* @gid: A GID that the net_dev uses to communicate.
* @addr: Contains the IP address that the request specified as its
* destination.
+ *
*/
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
- u8 port,
+ u32 port,
u16 pkey,
const union ib_gid *gid,
const struct sockaddr *addr)
{
struct net_device *net_dev = NULL;
- struct ib_client_data *context;
+ unsigned long index;
+ void *client_data;
if (!rdma_protocol_ib(dev, port))
return NULL;
- down_read(&lists_rwsem);
-
- list_for_each_entry(context, &dev->client_data_list, list) {
- struct ib_client *client = context->client;
+ /*
+ * Holding the read side guarantees that the client will not become
+ * unregistered while we are calling get_net_dev_by_params()
+ */
+ down_read(&dev->client_data_rwsem);
+ xan_for_each_marked (&dev->client_data, index, client_data,
+ CLIENT_DATA_REGISTERED) {
+ struct ib_client *client = xa_load(&clients, index);
- if (context->going_down)
+ if (!client || !client->get_net_dev_by_params)
continue;
- if (client->get_net_dev_by_params) {
- net_dev = client->get_net_dev_by_params(dev, port, pkey,
- gid, addr,
- context->data);
- if (net_dev)
- break;
- }
+ net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
+ addr, client_data);
+ if (net_dev)
+ break;
}
-
- up_read(&lists_rwsem);
+ up_read(&dev->client_data_rwsem);
return net_dev;
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
+void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
+{
+ struct ib_device_ops *dev_ops = &dev->ops;
+#define SET_DEVICE_OP(ptr, name) \
+ do { \
+ if (ops->name) \
+ if (!((ptr)->name)) \
+ (ptr)->name = ops->name; \
+ } while (0)
+
+#define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
+
+ if (ops->driver_id != RDMA_DRIVER_UNKNOWN) {
+ WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN &&
+ dev_ops->driver_id != ops->driver_id);
+ dev_ops->driver_id = ops->driver_id;
+ }
+ if (ops->owner) {
+ WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner);
+ dev_ops->owner = ops->owner;
+ }
+ if (ops->uverbs_abi_ver)
+ dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver;
+
+ dev_ops->uverbs_no_driver_id_binding |=
+ ops->uverbs_no_driver_id_binding;
+
+ SET_DEVICE_OP(dev_ops, add_gid);
+ SET_DEVICE_OP(dev_ops, add_sub_dev);
+ SET_DEVICE_OP(dev_ops, advise_mr);
+ SET_DEVICE_OP(dev_ops, alloc_dm);
+ SET_DEVICE_OP(dev_ops, alloc_dmah);
+ SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
+ SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
+ SET_DEVICE_OP(dev_ops, alloc_mr);
+ SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
+ SET_DEVICE_OP(dev_ops, alloc_mw);
+ SET_DEVICE_OP(dev_ops, alloc_pd);
+ SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
+ SET_DEVICE_OP(dev_ops, alloc_ucontext);
+ SET_DEVICE_OP(dev_ops, alloc_xrcd);
+ SET_DEVICE_OP(dev_ops, attach_mcast);
+ SET_DEVICE_OP(dev_ops, check_mr_status);
+ SET_DEVICE_OP(dev_ops, counter_alloc_stats);
+ SET_DEVICE_OP(dev_ops, counter_bind_qp);
+ SET_DEVICE_OP(dev_ops, counter_dealloc);
+ SET_DEVICE_OP(dev_ops, counter_init);
+ SET_DEVICE_OP(dev_ops, counter_unbind_qp);
+ SET_DEVICE_OP(dev_ops, counter_update_stats);
+ SET_DEVICE_OP(dev_ops, create_ah);
+ SET_DEVICE_OP(dev_ops, create_counters);
+ SET_DEVICE_OP(dev_ops, create_cq);
+ SET_DEVICE_OP(dev_ops, create_cq_umem);
+ SET_DEVICE_OP(dev_ops, create_flow);
+ SET_DEVICE_OP(dev_ops, create_qp);
+ SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
+ SET_DEVICE_OP(dev_ops, create_srq);
+ SET_DEVICE_OP(dev_ops, create_user_ah);
+ SET_DEVICE_OP(dev_ops, create_wq);
+ SET_DEVICE_OP(dev_ops, dealloc_dm);
+ SET_DEVICE_OP(dev_ops, dealloc_dmah);
+ SET_DEVICE_OP(dev_ops, dealloc_driver);
+ SET_DEVICE_OP(dev_ops, dealloc_mw);
+ SET_DEVICE_OP(dev_ops, dealloc_pd);
+ SET_DEVICE_OP(dev_ops, dealloc_ucontext);
+ SET_DEVICE_OP(dev_ops, dealloc_xrcd);
+ SET_DEVICE_OP(dev_ops, del_gid);
+ SET_DEVICE_OP(dev_ops, del_sub_dev);
+ SET_DEVICE_OP(dev_ops, dereg_mr);
+ SET_DEVICE_OP(dev_ops, destroy_ah);
+ SET_DEVICE_OP(dev_ops, destroy_counters);
+ SET_DEVICE_OP(dev_ops, destroy_cq);
+ SET_DEVICE_OP(dev_ops, destroy_flow);
+ SET_DEVICE_OP(dev_ops, destroy_flow_action);
+ SET_DEVICE_OP(dev_ops, destroy_qp);
+ SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
+ SET_DEVICE_OP(dev_ops, destroy_srq);
+ SET_DEVICE_OP(dev_ops, destroy_wq);
+ SET_DEVICE_OP(dev_ops, device_group);
+ SET_DEVICE_OP(dev_ops, detach_mcast);
+ SET_DEVICE_OP(dev_ops, disassociate_ucontext);
+ SET_DEVICE_OP(dev_ops, drain_rq);
+ SET_DEVICE_OP(dev_ops, drain_sq);
+ SET_DEVICE_OP(dev_ops, enable_driver);
+ SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry);
+ SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
+ SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw);
+ SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
+ SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw);
+ SET_DEVICE_OP(dev_ops, fill_res_qp_entry);
+ SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw);
+ SET_DEVICE_OP(dev_ops, fill_res_srq_entry);
+ SET_DEVICE_OP(dev_ops, fill_res_srq_entry_raw);
+ SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
+ SET_DEVICE_OP(dev_ops, get_dev_fw_str);
+ SET_DEVICE_OP(dev_ops, get_dma_mr);
+ SET_DEVICE_OP(dev_ops, get_hw_stats);
+ SET_DEVICE_OP(dev_ops, get_link_layer);
+ SET_DEVICE_OP(dev_ops, get_netdev);
+ SET_DEVICE_OP(dev_ops, get_numa_node);
+ SET_DEVICE_OP(dev_ops, get_port_immutable);
+ SET_DEVICE_OP(dev_ops, get_vector_affinity);
+ SET_DEVICE_OP(dev_ops, get_vf_config);
+ SET_DEVICE_OP(dev_ops, get_vf_guid);
+ SET_DEVICE_OP(dev_ops, get_vf_stats);
+ SET_DEVICE_OP(dev_ops, iw_accept);
+ SET_DEVICE_OP(dev_ops, iw_add_ref);
+ SET_DEVICE_OP(dev_ops, iw_connect);
+ SET_DEVICE_OP(dev_ops, iw_create_listen);
+ SET_DEVICE_OP(dev_ops, iw_destroy_listen);
+ SET_DEVICE_OP(dev_ops, iw_get_qp);
+ SET_DEVICE_OP(dev_ops, iw_reject);
+ SET_DEVICE_OP(dev_ops, iw_rem_ref);
+ SET_DEVICE_OP(dev_ops, map_mr_sg);
+ SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
+ SET_DEVICE_OP(dev_ops, mmap);
+ SET_DEVICE_OP(dev_ops, mmap_free);
+ SET_DEVICE_OP(dev_ops, modify_ah);
+ SET_DEVICE_OP(dev_ops, modify_cq);
+ SET_DEVICE_OP(dev_ops, modify_device);
+ SET_DEVICE_OP(dev_ops, modify_hw_stat);
+ SET_DEVICE_OP(dev_ops, modify_port);
+ SET_DEVICE_OP(dev_ops, modify_qp);
+ SET_DEVICE_OP(dev_ops, modify_srq);
+ SET_DEVICE_OP(dev_ops, modify_wq);
+ SET_DEVICE_OP(dev_ops, peek_cq);
+ SET_DEVICE_OP(dev_ops, pre_destroy_cq);
+ SET_DEVICE_OP(dev_ops, poll_cq);
+ SET_DEVICE_OP(dev_ops, port_groups);
+ SET_DEVICE_OP(dev_ops, post_destroy_cq);
+ SET_DEVICE_OP(dev_ops, post_recv);
+ SET_DEVICE_OP(dev_ops, post_send);
+ SET_DEVICE_OP(dev_ops, post_srq_recv);
+ SET_DEVICE_OP(dev_ops, process_mad);
+ SET_DEVICE_OP(dev_ops, query_ah);
+ SET_DEVICE_OP(dev_ops, query_device);
+ SET_DEVICE_OP(dev_ops, query_gid);
+ SET_DEVICE_OP(dev_ops, query_pkey);
+ SET_DEVICE_OP(dev_ops, query_port);
+ SET_DEVICE_OP(dev_ops, query_qp);
+ SET_DEVICE_OP(dev_ops, query_srq);
+ SET_DEVICE_OP(dev_ops, query_ucontext);
+ SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
+ SET_DEVICE_OP(dev_ops, read_counters);
+ SET_DEVICE_OP(dev_ops, reg_dm_mr);
+ SET_DEVICE_OP(dev_ops, reg_user_mr);
+ SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
+ SET_DEVICE_OP(dev_ops, req_notify_cq);
+ SET_DEVICE_OP(dev_ops, rereg_user_mr);
+ SET_DEVICE_OP(dev_ops, resize_cq);
+ SET_DEVICE_OP(dev_ops, set_vf_guid);
+ SET_DEVICE_OP(dev_ops, set_vf_link_state);
+ SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
+ SET_DEVICE_OP(dev_ops, report_port_event);
+
+ SET_OBJ_SIZE(dev_ops, ib_ah);
+ SET_OBJ_SIZE(dev_ops, ib_counters);
+ SET_OBJ_SIZE(dev_ops, ib_cq);
+ SET_OBJ_SIZE(dev_ops, ib_dmah);
+ SET_OBJ_SIZE(dev_ops, ib_mw);
+ SET_OBJ_SIZE(dev_ops, ib_pd);
+ SET_OBJ_SIZE(dev_ops, ib_qp);
+ SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
+ SET_OBJ_SIZE(dev_ops, ib_srq);
+ SET_OBJ_SIZE(dev_ops, ib_ucontext);
+ SET_OBJ_SIZE(dev_ops, ib_xrcd);
+ SET_OBJ_SIZE(dev_ops, rdma_counter);
+}
+EXPORT_SYMBOL(ib_set_device_ops);
+
+int ib_add_sub_device(struct ib_device *parent,
+ enum rdma_nl_dev_type type,
+ const char *name)
+{
+ struct ib_device *sub;
+ int ret = 0;
+
+ if (!parent->ops.add_sub_dev || !parent->ops.del_sub_dev)
+ return -EOPNOTSUPP;
+
+ if (!ib_device_try_get(parent))
+ return -EINVAL;
+
+ sub = parent->ops.add_sub_dev(parent, type, name);
+ if (IS_ERR(sub)) {
+ ib_device_put(parent);
+ return PTR_ERR(sub);
+ }
+
+ sub->type = type;
+ sub->parent = parent;
+
+ mutex_lock(&parent->subdev_lock);
+ list_add_tail(&parent->subdev_list_head, &sub->subdev_list);
+ mutex_unlock(&parent->subdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_add_sub_device);
+
+int ib_del_sub_device_and_put(struct ib_device *sub)
+{
+ struct ib_device *parent = sub->parent;
+
+ if (!parent)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&parent->subdev_lock);
+ list_del(&sub->subdev_list);
+ mutex_unlock(&parent->subdev_lock);
+
+ ib_device_put(sub);
+ parent->ops.del_sub_dev(sub);
+ ib_device_put(parent);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_del_sub_device_and_put);
+
+#ifdef CONFIG_INFINIBAND_VIRT_DMA
+int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ sg_dma_address(s) = (uintptr_t)sg_virt(s);
+ sg_dma_len(s) = s->length;
+ }
+ return nents;
+}
+EXPORT_SYMBOL(ib_dma_virt_map_sg);
+#endif /* CONFIG_INFINIBAND_VIRT_DMA */
+
+static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
+ [RDMA_NL_LS_OP_RESOLVE] = {
+ .doit = ib_nl_handle_resolve_resp,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NL_LS_OP_SET_TIMEOUT] = {
+ .doit = ib_nl_handle_set_timeout,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NL_LS_OP_IP_RESOLVE] = {
+ .doit = ib_nl_handle_ip_res_resp,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+};
+
+void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
+{
+ enum ib_port_state curr_state;
+ struct ib_event ibevent = {};
+ u32 port;
+
+ if (ib_query_netdev_port(ibdev, ndev, &port))
+ return;
+
+ curr_state = ib_get_curr_port_state(ndev);
+
+ write_lock_irq(&ibdev->cache_lock);
+ if (ibdev->port_data[port].cache.last_port_state == curr_state) {
+ write_unlock_irq(&ibdev->cache_lock);
+ return;
+ }
+ ibdev->port_data[port].cache.last_port_state = curr_state;
+ write_unlock_irq(&ibdev->cache_lock);
+
+ ibevent.event = (curr_state == IB_PORT_DOWN) ?
+ IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
+ ibevent.device = ibdev;
+ ibevent.element.port_num = port;
+ ib_dispatch_event(&ibevent);
+}
+EXPORT_SYMBOL(ib_dispatch_port_state_event);
+
+static void handle_port_event(struct net_device *ndev, unsigned long event)
+{
+ struct ib_device *ibdev;
+
+ /* Currently, link events in bonding scenarios are still
+ * reported by drivers that support bonding.
+ */
+ if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
+ return;
+
+ ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
+ if (!ibdev)
+ return;
+
+ if (ibdev->ops.report_port_event) {
+ ibdev->ops.report_port_event(ibdev, ndev, event);
+ goto put_ibdev;
+ }
+
+ ib_dispatch_port_state_event(ibdev, ndev);
+
+put_ibdev:
+ ib_device_put(ibdev);
+};
+
+static int ib_netdevice_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct ib_device *ibdev;
+ u32 port;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ if (ib_query_netdev_port(ibdev, ndev, &port)) {
+ ib_device_put(ibdev);
+ break;
+ }
+
+ rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
+ ib_device_put(ibdev);
+ break;
+
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ case NETDEV_DOWN:
+ handle_port_event(ndev, event);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nb_netdevice = {
+ .notifier_call = ib_netdevice_event,
+};
+
static int __init ib_core_init(void)
{
- int ret;
+ int ret = -ENOMEM;
ib_wq = alloc_workqueue("infiniband", 0, 0);
if (!ib_wq)
return -ENOMEM;
+ ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!ib_unreg_wq)
+ goto err;
+
+ ib_comp_wq = alloc_workqueue("ib-comp-wq",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!ib_comp_wq)
+ goto err_unbound;
+
+ ib_comp_unbound_wq =
+ alloc_workqueue("ib-comp-unb-wq",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
+ if (!ib_comp_unbound_wq)
+ goto err_comp;
+
ret = class_register(&ib_class);
if (ret) {
- printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
- goto err;
+ pr_warn("Couldn't create InfiniBand device class\n");
+ goto err_comp_unbound;
+ }
+
+ rdma_nl_init();
+
+ ret = addr_init();
+ if (ret) {
+ pr_warn("Couldn't init IB address resolution\n");
+ goto err_ibnl;
+ }
+
+ ret = ib_mad_init();
+ if (ret) {
+ pr_warn("Couldn't init IB MAD\n");
+ goto err_addr;
}
- ret = ibnl_init();
+ ret = ib_sa_init();
if (ret) {
- printk(KERN_WARNING "Couldn't init IB netlink interface\n");
- goto err_sysfs;
+ pr_warn("Couldn't init SA\n");
+ goto err_mad;
}
- ib_cache_setup();
+ ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
+ if (ret) {
+ pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
+ goto err_sa;
+ }
+
+ ret = register_pernet_device(&rdma_dev_net_ops);
+ if (ret) {
+ pr_warn("Couldn't init compat dev. ret %d\n", ret);
+ goto err_compat;
+ }
+
+ nldev_init();
+ rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
+ ret = roce_gid_mgmt_init();
+ if (ret) {
+ pr_warn("Couldn't init RoCE GID management\n");
+ goto err_parent;
+ }
+
+ register_netdevice_notifier(&nb_netdevice);
return 0;
-err_sysfs:
+err_parent:
+ rdma_nl_unregister(RDMA_NL_LS);
+ nldev_exit();
+ unregister_pernet_device(&rdma_dev_net_ops);
+err_compat:
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
+err_sa:
+ ib_sa_cleanup();
+err_mad:
+ ib_mad_cleanup();
+err_addr:
+ addr_cleanup();
+err_ibnl:
class_unregister(&ib_class);
-
+err_comp_unbound:
+ destroy_workqueue(ib_comp_unbound_wq);
+err_comp:
+ destroy_workqueue(ib_comp_wq);
+err_unbound:
+ destroy_workqueue(ib_unreg_wq);
err:
destroy_workqueue(ib_wq);
return ret;
@@ -973,12 +3119,30 @@ err:
static void __exit ib_core_cleanup(void)
{
- ib_cache_cleanup();
- ibnl_cleanup();
+ unregister_netdevice_notifier(&nb_netdevice);
+ roce_gid_mgmt_cleanup();
+ rdma_nl_unregister(RDMA_NL_LS);
+ nldev_exit();
+ unregister_pernet_device(&rdma_dev_net_ops);
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
+ ib_sa_cleanup();
+ ib_mad_cleanup();
+ addr_cleanup();
+ rdma_nl_exit();
class_unregister(&ib_class);
+ destroy_workqueue(ib_comp_unbound_wq);
+ destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
+ destroy_workqueue(ib_unreg_wq);
+ WARN_ON(!xa_empty(&clients));
+ WARN_ON(!xa_empty(&devices));
}
-module_init(ib_core_init);
+MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
+
+/* ib core relies on netdev stack to first register net_ns_type_operations
+ * ns kobject type before ib_core initialization.
+ */
+fs_initcall(ib_core_init);
module_exit(ib_core_cleanup);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
deleted file mode 100644
index 9f5ad7cc33c8..000000000000
--- a/drivers/infiniband/core/fmr_pool.c
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/jhash.h>
-#include <linux/kthread.h>
-
-#include <rdma/ib_fmr_pool.h>
-
-#include "core_priv.h"
-
-#define PFX "fmr_pool: "
-
-enum {
- IB_FMR_MAX_REMAPS = 32,
-
- IB_FMR_HASH_BITS = 8,
- IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
- IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
-};
-
-/*
- * If an FMR is not in use, then the list member will point to either
- * its pool's free_list (if the FMR can be mapped again; that is,
- * remap_count < pool->max_remaps) or its pool's dirty_list (if the
- * FMR needs to be unmapped before being remapped). In either of
- * these cases it is a bug if the ref_count is not 0. In other words,
- * if ref_count is > 0, then the list member must not be linked into
- * either free_list or dirty_list.
- *
- * The cache_node member is used to link the FMR into a cache bucket
- * (if caching is enabled). This is independent of the reference
- * count of the FMR. When a valid FMR is released, its ref_count is
- * decremented, and if ref_count reaches 0, the FMR is placed in
- * either free_list or dirty_list as appropriate. However, it is not
- * removed from the cache and may be "revived" if a call to
- * ib_fmr_register_physical() occurs before the FMR is remapped. In
- * this case we just increment the ref_count and remove the FMR from
- * free_list/dirty_list.
- *
- * Before we remap an FMR from free_list, we remove it from the cache
- * (to prevent another user from obtaining a stale FMR). When an FMR
- * is released, we add it to the tail of the free list, so that our
- * cache eviction policy is "least recently used."
- *
- * All manipulation of ref_count, list and cache_node is protected by
- * pool_lock to maintain consistency.
- */
-
-struct ib_fmr_pool {
- spinlock_t pool_lock;
-
- int pool_size;
- int max_pages;
- int max_remaps;
- int dirty_watermark;
- int dirty_len;
- struct list_head free_list;
- struct list_head dirty_list;
- struct hlist_head *cache_bucket;
-
- void (*flush_function)(struct ib_fmr_pool *pool,
- void * arg);
- void *flush_arg;
-
- struct task_struct *thread;
-
- atomic_t req_ser;
- atomic_t flush_ser;
-
- wait_queue_head_t force_wait;
-};
-
-static inline u32 ib_fmr_hash(u64 first_page)
-{
- return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
- (IB_FMR_HASH_SIZE - 1);
-}
-
-/* Caller must hold pool_lock */
-static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
- u64 *page_list,
- int page_list_len,
- u64 io_virtual_address)
-{
- struct hlist_head *bucket;
- struct ib_pool_fmr *fmr;
-
- if (!pool->cache_bucket)
- return NULL;
-
- bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
-
- hlist_for_each_entry(fmr, bucket, cache_node)
- if (io_virtual_address == fmr->io_virtual_address &&
- page_list_len == fmr->page_list_len &&
- !memcmp(page_list, fmr->page_list,
- page_list_len * sizeof *page_list))
- return fmr;
-
- return NULL;
-}
-
-static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
-{
- int ret;
- struct ib_pool_fmr *fmr;
- LIST_HEAD(unmap_list);
- LIST_HEAD(fmr_list);
-
- spin_lock_irq(&pool->pool_lock);
-
- list_for_each_entry(fmr, &pool->dirty_list, list) {
- hlist_del_init(&fmr->cache_node);
- fmr->remap_count = 0;
- list_add_tail(&fmr->fmr->list, &fmr_list);
-
-#ifdef DEBUG
- if (fmr->ref_count !=0) {
- printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
- fmr, fmr->ref_count);
- }
-#endif
- }
-
- list_splice_init(&pool->dirty_list, &unmap_list);
- pool->dirty_len = 0;
-
- spin_unlock_irq(&pool->pool_lock);
-
- if (list_empty(&unmap_list)) {
- return;
- }
-
- ret = ib_unmap_fmr(&fmr_list);
- if (ret)
- printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
-
- spin_lock_irq(&pool->pool_lock);
- list_splice(&unmap_list, &pool->free_list);
- spin_unlock_irq(&pool->pool_lock);
-}
-
-static int ib_fmr_cleanup_thread(void *pool_ptr)
-{
- struct ib_fmr_pool *pool = pool_ptr;
-
- do {
- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
- ib_fmr_batch_release(pool);
-
- atomic_inc(&pool->flush_ser);
- wake_up_interruptible(&pool->force_wait);
-
- if (pool->flush_function)
- pool->flush_function(pool, pool->flush_arg);
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
- !kthread_should_stop())
- schedule();
- __set_current_state(TASK_RUNNING);
- } while (!kthread_should_stop());
-
- return 0;
-}
-
-/**
- * ib_create_fmr_pool - Create an FMR pool
- * @pd:Protection domain for FMRs
- * @params:FMR pool parameters
- *
- * Create a pool of FMRs. Return value is pointer to new pool or
- * error code if creation failed.
- */
-struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- struct ib_fmr_pool_param *params)
-{
- struct ib_device *device;
- struct ib_fmr_pool *pool;
- struct ib_device_attr *attr;
- int i;
- int ret;
- int max_remaps;
-
- if (!params)
- return ERR_PTR(-EINVAL);
-
- device = pd->device;
- if (!device->alloc_fmr || !device->dealloc_fmr ||
- !device->map_phys_fmr || !device->unmap_fmr) {
- printk(KERN_INFO PFX "Device %s does not support FMRs\n",
- device->name);
- return ERR_PTR(-ENOSYS);
- }
-
- attr = kmalloc(sizeof *attr, GFP_KERNEL);
- if (!attr) {
- printk(KERN_WARNING PFX "couldn't allocate device attr struct\n");
- return ERR_PTR(-ENOMEM);
- }
-
- ret = ib_query_device(device, attr);
- if (ret) {
- printk(KERN_WARNING PFX "couldn't query device: %d\n", ret);
- kfree(attr);
- return ERR_PTR(ret);
- }
-
- if (!attr->max_map_per_fmr)
- max_remaps = IB_FMR_MAX_REMAPS;
- else
- max_remaps = attr->max_map_per_fmr;
-
- kfree(attr);
-
- pool = kmalloc(sizeof *pool, GFP_KERNEL);
- if (!pool) {
- printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
- return ERR_PTR(-ENOMEM);
- }
-
- pool->cache_bucket = NULL;
-
- pool->flush_function = params->flush_function;
- pool->flush_arg = params->flush_arg;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->dirty_list);
-
- if (params->cache) {
- pool->cache_bucket =
- kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
- GFP_KERNEL);
- if (!pool->cache_bucket) {
- printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
- ret = -ENOMEM;
- goto out_free_pool;
- }
-
- for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
- INIT_HLIST_HEAD(pool->cache_bucket + i);
- }
-
- pool->pool_size = 0;
- pool->max_pages = params->max_pages_per_fmr;
- pool->max_remaps = max_remaps;
- pool->dirty_watermark = params->dirty_watermark;
- pool->dirty_len = 0;
- spin_lock_init(&pool->pool_lock);
- atomic_set(&pool->req_ser, 0);
- atomic_set(&pool->flush_ser, 0);
- init_waitqueue_head(&pool->force_wait);
-
- pool->thread = kthread_run(ib_fmr_cleanup_thread,
- pool,
- "ib_fmr(%s)",
- device->name);
- if (IS_ERR(pool->thread)) {
- printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
- ret = PTR_ERR(pool->thread);
- goto out_free_pool;
- }
-
- {
- struct ib_pool_fmr *fmr;
- struct ib_fmr_attr fmr_attr = {
- .max_pages = params->max_pages_per_fmr,
- .max_maps = pool->max_remaps,
- .page_shift = params->page_shift
- };
- int bytes_per_fmr = sizeof *fmr;
-
- if (pool->cache_bucket)
- bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
-
- for (i = 0; i < params->pool_size; ++i) {
- fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
- if (!fmr) {
- printk(KERN_WARNING PFX "failed to allocate fmr "
- "struct for FMR %d\n", i);
- goto out_fail;
- }
-
- fmr->pool = pool;
- fmr->remap_count = 0;
- fmr->ref_count = 0;
- INIT_HLIST_NODE(&fmr->cache_node);
-
- fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
- if (IS_ERR(fmr->fmr)) {
- printk(KERN_WARNING PFX "fmr_create failed "
- "for FMR %d\n", i);
- kfree(fmr);
- goto out_fail;
- }
-
- list_add_tail(&fmr->list, &pool->free_list);
- ++pool->pool_size;
- }
- }
-
- return pool;
-
- out_free_pool:
- kfree(pool->cache_bucket);
- kfree(pool);
-
- return ERR_PTR(ret);
-
- out_fail:
- ib_destroy_fmr_pool(pool);
-
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(ib_create_fmr_pool);
-
-/**
- * ib_destroy_fmr_pool - Free FMR pool
- * @pool:FMR pool to free
- *
- * Destroy an FMR pool and free all associated resources.
- */
-void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
-{
- struct ib_pool_fmr *fmr;
- struct ib_pool_fmr *tmp;
- LIST_HEAD(fmr_list);
- int i;
-
- kthread_stop(pool->thread);
- ib_fmr_batch_release(pool);
-
- i = 0;
- list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
- if (fmr->remap_count) {
- INIT_LIST_HEAD(&fmr_list);
- list_add_tail(&fmr->fmr->list, &fmr_list);
- ib_unmap_fmr(&fmr_list);
- }
- ib_dealloc_fmr(fmr->fmr);
- list_del(&fmr->list);
- kfree(fmr);
- ++i;
- }
-
- if (i < pool->pool_size)
- printk(KERN_WARNING PFX "pool still has %d regions registered\n",
- pool->pool_size - i);
-
- kfree(pool->cache_bucket);
- kfree(pool);
-}
-EXPORT_SYMBOL(ib_destroy_fmr_pool);
-
-/**
- * ib_flush_fmr_pool - Invalidate all unmapped FMRs
- * @pool:FMR pool to flush
- *
- * Ensure that all unmapped FMRs are fully invalidated.
- */
-int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
-{
- int serial;
- struct ib_pool_fmr *fmr, *next;
-
- /*
- * The free_list holds FMRs that may have been used
- * but have not been remapped enough times to be dirty.
- * Put them on the dirty list now so that the cleanup
- * thread will reap them too.
- */
- spin_lock_irq(&pool->pool_lock);
- list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
- if (fmr->remap_count > 0)
- list_move(&fmr->list, &pool->dirty_list);
- }
- spin_unlock_irq(&pool->pool_lock);
-
- serial = atomic_inc_return(&pool->req_ser);
- wake_up_process(pool->thread);
-
- if (wait_event_interruptible(pool->force_wait,
- atomic_read(&pool->flush_ser) - serial >= 0))
- return -EINTR;
-
- return 0;
-}
-EXPORT_SYMBOL(ib_flush_fmr_pool);
-
-/**
- * ib_fmr_pool_map_phys -
- * @pool:FMR pool to allocate FMR from
- * @page_list:List of pages to map
- * @list_len:Number of pages in @page_list
- * @io_virtual_address:I/O virtual address for new FMR
- *
- * Map an FMR from an FMR pool.
- */
-struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
- u64 *page_list,
- int list_len,
- u64 io_virtual_address)
-{
- struct ib_fmr_pool *pool = pool_handle;
- struct ib_pool_fmr *fmr;
- unsigned long flags;
- int result;
-
- if (list_len < 1 || list_len > pool->max_pages)
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- fmr = ib_fmr_cache_lookup(pool,
- page_list,
- list_len,
- io_virtual_address);
- if (fmr) {
- /* found in cache */
- ++fmr->ref_count;
- if (fmr->ref_count == 1) {
- list_del(&fmr->list);
- }
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- return fmr;
- }
-
- if (list_empty(&pool->free_list)) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- return ERR_PTR(-EAGAIN);
- }
-
- fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
- list_del(&fmr->list);
- hlist_del_init(&fmr->cache_node);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
- io_virtual_address);
-
- if (result) {
- spin_lock_irqsave(&pool->pool_lock, flags);
- list_add(&fmr->list, &pool->free_list);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
-
- return ERR_PTR(result);
- }
-
- ++fmr->remap_count;
- fmr->ref_count = 1;
-
- if (pool->cache_bucket) {
- fmr->io_virtual_address = io_virtual_address;
- fmr->page_list_len = list_len;
- memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- hlist_add_head(&fmr->cache_node,
- pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- }
-
- return fmr;
-}
-EXPORT_SYMBOL(ib_fmr_pool_map_phys);
-
-/**
- * ib_fmr_pool_unmap - Unmap FMR
- * @fmr:FMR to unmap
- *
- * Unmap an FMR. The FMR mapping may remain valid until the FMR is
- * reused (or until ib_flush_fmr_pool() is called).
- */
-int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
-{
- struct ib_fmr_pool *pool;
- unsigned long flags;
-
- pool = fmr->pool;
-
- spin_lock_irqsave(&pool->pool_lock, flags);
-
- --fmr->ref_count;
- if (!fmr->ref_count) {
- if (fmr->remap_count < pool->max_remaps) {
- list_add_tail(&fmr->list, &pool->free_list);
- } else {
- list_add_tail(&fmr->list, &pool->dirty_list);
- if (++pool->dirty_len >= pool->dirty_watermark) {
- atomic_inc(&pool->req_ser);
- wake_up_process(pool->thread);
- }
- }
- }
-
-#ifdef DEBUG
- if (fmr->ref_count < 0)
- printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
- fmr, fmr->ref_count);
-#endif
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_fmr_pool_unmap);
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
new file mode 100644
index 000000000000..b51bd7087a88
--- /dev/null
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2019 Marvell. All rights reserved.
+ */
+#include <linux/xarray.h>
+#include "uverbs.h"
+#include "core_priv.h"
+
+/**
+ * rdma_umap_priv_init() - Initialize the private data of a vma
+ *
+ * @priv: The already allocated private data
+ * @vma: The vm area struct that needs private data
+ * @entry: entry into the mmap_xa that needs to be linked with
+ * this vma
+ *
+ * Each time we map IO memory into user space this keeps track of the
+ * mapping. When the device is hot-unplugged we 'zap' the mmaps in user space
+ * to point to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ *
+ */
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ if (entry) {
+ kref_get(&entry->ref);
+ priv->entry = entry;
+ }
+ vma->vm_private_data = priv;
+ /* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+EXPORT_SYMBOL(rdma_umap_priv_init);
+
+/**
+ * rdma_user_mmap_io() - Map IO memory into a process
+ *
+ * @ucontext: associated user context
+ * @vma: the vma related to the current mmap call
+ * @pfn: pfn to map
+ * @size: size to map
+ * @prot: pgprot to use in remap call
+ * @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL
+ * if mmap_entry is not used by the driver
+ *
+ * This is to be called by drivers as part of their mmap() functions if they
+ * wish to send something like PCI-E BAR memory to userspace.
+ *
+ * Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on
+ * success.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return -EINVAL;
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return -EINVAL;
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma, entry);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/**
+ * rdma_user_mmap_entry_get_pgoff() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @pgoff: The mmap offset >> PAGE_SHIFT
+ *
+ * This function is called when a user tries to mmap with an offset (returned
+ * by rdma_user_mmap_get_offset()) it initially received from the driver. The
+ * rdma_user_mmap_entry was created by the function
+ * rdma_user_mmap_entry_insert(). This function increases the refcnt of the
+ * entry so that it won't be deleted from the xarray in the meantime.
+ *
+ * Return an reference to an entry if exists or NULL if there is no
+ * match. rdma_user_mmap_entry_put() must be called to put the reference.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (pgoff > U32_MAX)
+ return NULL;
+
+ xa_lock(&ucontext->mmap_xa);
+
+ entry = xa_load(&ucontext->mmap_xa, pgoff);
+
+ /*
+ * If refcount is zero, entry is already being deleted, driver_removed
+ * indicates that the no further mmaps are possible and we waiting for
+ * the active VMAs to be closed.
+ */
+ if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
+ !kref_get_unless_zero(&entry->ref))
+ goto err;
+
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
+ pgoff, entry->npages);
+
+ return entry;
+
+err:
+ xa_unlock(&ucontext->mmap_xa);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
+
+/**
+ * rdma_user_mmap_entry_get() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @vma: the vma being mmap'd into
+ *
+ * This function is like rdma_user_mmap_entry_get_pgoff() except that it also
+ * checks that the VMA is correct.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return NULL;
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
+ if (!entry)
+ return NULL;
+ if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
+ rdma_user_mmap_entry_put(entry);
+ return NULL;
+ }
+ return entry;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get);
+
+static void rdma_user_mmap_entry_free(struct kref *kref)
+{
+ struct rdma_user_mmap_entry *entry =
+ container_of(kref, struct rdma_user_mmap_entry, ref);
+ struct ib_ucontext *ucontext = entry->ucontext;
+ unsigned long i;
+
+ /*
+ * Erase all entries occupied by this single entry, this is deferred
+ * until all VMA are closed so that the mmap offsets remain unique.
+ */
+ xa_lock(&ucontext->mmap_xa);
+ for (i = 0; i < entry->npages; i++)
+ __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
+ entry->start_pgoff, entry->npages);
+
+ if (ucontext->device->ops.mmap_free)
+ ucontext->device->ops.mmap_free(entry);
+}
+
+/**
+ * rdma_user_mmap_entry_put() - Drop reference to the mmap entry
+ *
+ * @entry: an entry in the mmap_xa
+ *
+ * This function is called when the mapping is closed if it was
+ * an io mapping or when the driver is done with the entry for
+ * some other reason.
+ * Should be called after rdma_user_mmap_entry_get was called
+ * and entry is no longer needed. This function will erase the
+ * entry and free it if its refcnt reaches zero.
+ */
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
+{
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_put);
+
+/**
+ * rdma_user_mmap_entry_remove() - Drop reference to entry and
+ * mark it as unmmapable
+ *
+ * @entry: the entry to insert into the mmap_xa
+ *
+ * Drivers can call this to prevent userspace from creating more mappings for
+ * entry, however existing mmaps continue to exist and ops->mmap_free() will
+ * not be called until all user mmaps are destroyed.
+ */
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
+{
+ if (!entry)
+ return;
+
+ xa_lock(&entry->ucontext->mmap_xa);
+ entry->driver_removed = true;
+ xa_unlock(&entry->ucontext->mmap_xa);
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
+
+/**
+ * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa
+ * in a given range.
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ * @min_pgoff: minimum pgoff to be returned
+ * @max_pgoff: maximum pgoff to be returned
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for implementing their mmap syscall A database of mmap offsets is
+ * handled in the core and helper functions are provided to insert entries
+ * into the database and extract entries when the user calls mmap with the
+ * given offset. The function allocates a unique page offset in a given range
+ * that should be provided to user, the user will use the offset to retrieve
+ * information such as address to be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 min_pgoff,
+ u32 max_pgoff)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
+ u32 xa_first, xa_last, npages;
+ int err;
+ u32 i;
+
+ if (!entry)
+ return -EINVAL;
+
+ kref_init(&entry->ref);
+ entry->ucontext = ucontext;
+
+ /*
+ * We want the whole allocation to be done without interruption from a
+ * different thread. The allocation requires finding a free range and
+ * storing. During the xa_insert the lock could be released, possibly
+ * allowing another thread to choose the same range.
+ */
+ mutex_lock(&ufile->umap_lock);
+
+ xa_lock(&ucontext->mmap_xa);
+
+ /* We want to find an empty range */
+ npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
+ entry->npages = npages;
+ while (true) {
+ /* First find an empty index */
+ xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ goto err_unlock;
+
+ xa_first = xas.xa_index;
+
+ /* Is there enough room to have the range? */
+ if (check_add_overflow(xa_first, npages, &xa_last))
+ goto err_unlock;
+
+ /*
+ * Now look for the next present entry. If an entry doesn't
+ * exist, we found an empty range and can proceed.
+ */
+ xas_next_entry(&xas, xa_last - 1);
+ if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
+ break;
+ }
+
+ for (i = xa_first; i < xa_last; i++) {
+ err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
+ if (err)
+ goto err_undo;
+ }
+
+ /*
+ * Internally the kernel uses a page offset, in libc this is a byte
+ * offset. Drivers should not return pgoff to userspace.
+ */
+ entry->start_pgoff = xa_first;
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
+ entry->start_pgoff, npages);
+
+ return 0;
+
+err_undo:
+ for (; i > xa_first; i--)
+ __xa_erase(&ucontext->mmap_xa, i - 1);
+
+err_unlock:
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range);
+
+/**
+ * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa.
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for handling user mmapped addresses. The database is handled in
+ * the core and helper functions are provided to insert entries into the
+ * database and extract entries when the user calls mmap with the given offset.
+ * The function allocates a unique page offset that should be provided to user,
+ * the user will use the offset to retrieve information such as address to
+ * be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length)
+{
+ return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0,
+ U32_MAX);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index ff9163dc1596..62410578dec3 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -50,6 +50,8 @@
#include <rdma/iw_cm.h>
#include <rdma/ib_addr.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
#include "iwcm.h"
@@ -57,6 +59,38 @@ MODULE_AUTHOR("Tom Tucker");
MODULE_DESCRIPTION("iWARP CM");
MODULE_LICENSE("Dual BSD/GPL");
+static const char * const iwcm_rej_reason_strs[] = {
+ [ECONNRESET] = "reset by remote host",
+ [ECONNREFUSED] = "refused by remote application",
+ [ETIMEDOUT] = "setup timeout",
+};
+
+const char *__attribute_const__ iwcm_reject_msg(int reason)
+{
+ size_t index;
+
+ /* iWARP uses negative errnos */
+ index = -reason;
+
+ if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
+ iwcm_rej_reason_strs[index])
+ return iwcm_rej_reason_strs[index];
+ else
+ return "unrecognized reason";
+}
+EXPORT_SYMBOL(iwcm_reject_msg);
+
+static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
+ [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
+ [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+ [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+ [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
+ [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+ [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
+ [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb},
+ [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb}
+};
+
static struct workqueue_struct *iwcm_wq;
struct iwcm_work {
struct work_struct work;
@@ -75,9 +109,10 @@ static struct ctl_table iwcm_ctl_table[] = {
.data = &default_backlog,
.maxlen = sizeof(default_backlog),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
},
- { }
};
/*
@@ -110,8 +145,8 @@ static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
if (list_empty(&cm_id_priv->work_free_list))
return NULL;
- work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
- free_list);
+ work = list_first_entry(&cm_id_priv->work_free_list, struct iwcm_work,
+ free_list);
list_del_init(&work->free_list);
return work;
}
@@ -125,8 +160,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
{
struct list_head *e, *tmp;
- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
+ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
+ list_del(e);
kfree(list_entry(e, struct iwcm_work, free_list));
+ }
}
static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
@@ -171,44 +208,33 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
/*
* Release a reference on cm_id. If the last reference is being
- * released, enable the waiting thread (in iw_destroy_cm_id) to
- * get woken up, and return 1 if a thread is already waiting.
+ * released, free the cm_id and return 'true'.
*/
-static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
+static bool iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
- BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
- if (atomic_dec_and_test(&cm_id_priv->refcount)) {
+ if (refcount_dec_and_test(&cm_id_priv->refcount)) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
- complete(&cm_id_priv->destroy_comp);
- return 1;
+ free_cm_id(cm_id_priv);
+ return true;
}
- return 0;
+ return false;
}
static void add_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
}
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
- int cb_destroy;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- /*
- * Test bit before deref in case the cm_id gets freed on another
- * thread.
- */
- cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
- BUG_ON(!list_empty(&cm_id_priv->work_list));
- free_cm_id(cm_id_priv);
- }
+ (void)iwcm_deref_id(cm_id_priv);
}
static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
@@ -231,7 +257,7 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
cm_id_priv->id.add_ref = add_ref;
cm_id_priv->id.rem_ref = rem_ref;
spin_lock_init(&cm_id_priv->lock);
- atomic_set(&cm_id_priv->refcount, 1);
+ refcount_set(&cm_id_priv->refcount, 1);
init_waitqueue_head(&cm_id_priv->connect_wait);
init_completion(&cm_id_priv->destroy_comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
@@ -342,12 +368,12 @@ EXPORT_SYMBOL(iw_cm_disconnect);
/*
* CM_ID <-- DESTROYING
*
- * Clean up all resources associated with the connection and release
- * the initial reference taken by iw_create_cm_id.
+ * Clean up all resources associated with the connection.
*/
static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ struct ib_qp *qp;
unsigned long flags;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -358,20 +384,29 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
wait_event(cm_id_priv->connect_wait,
!test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
+ /*
+ * Since we're deleting the cm_id, drop any events that
+ * might arrive before the last dereference.
+ */
+ set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
+
spin_lock_irqsave(&cm_id_priv->lock, flags);
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
+
switch (cm_id_priv->state) {
case IW_CM_STATE_LISTEN:
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* destroy the listening endpoint */
- cm_id->device->iwcm->destroy_listen(cm_id);
+ cm_id->device->ops.iw_destroy_listen(cm_id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_ESTABLISHED:
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* Abrupt close of the connection */
- (void)iwcm_modify_qp_err(cm_id_priv->qp);
+ (void)iwcm_modify_qp_err(qp);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_IDLE:
@@ -387,7 +422,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
*/
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_id->device->iwcm->reject(cm_id, NULL, 0);
+ cm_id->device->ops.iw_reject(cm_id, NULL, 0);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_CONN_SENT:
@@ -396,35 +431,129 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
BUG();
break;
}
- if (cm_id_priv->qp) {
- cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
- cm_id_priv->qp = NULL;
- }
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
- (void)iwcm_deref_id(cm_id_priv);
+ if (cm_id->mapped) {
+ iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
+ iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
+ }
}
/*
- * This function is only called by the application thread and cannot
- * be called by the event thread. The function will wait for all
- * references to be released on the cm_id and then kfree the cm_id
- * object.
+ * Destroy cm_id. If the cm_id still has other references, wait for all
+ * references to be released on the cm_id and then release the initial
+ * reference taken by iw_create_cm_id.
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
-
destroy_cm_id(cm_id);
+ if (refcount_read(&cm_id_priv->refcount) > 1)
+ flush_workqueue(iwcm_wq);
+ iwcm_deref_id(cm_id_priv);
+}
+EXPORT_SYMBOL(iw_destroy_cm_id);
+
+/**
+ * iw_cm_check_wildcard - If IP address is 0 then use original
+ * @pm_addr: sockaddr containing the ip to check for wildcard
+ * @cm_addr: sockaddr containing the actual IP address
+ * @cm_outaddr: sockaddr to set IP addr which leaving port
+ *
+ * Checks the pm_addr for wildcard and then sets cm_outaddr's
+ * IP to the actual (cm_addr).
+ */
+static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
+ struct sockaddr_storage *cm_addr,
+ struct sockaddr_storage *cm_outaddr)
+{
+ if (pm_addr->ss_family == AF_INET) {
+ struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
+
+ if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
+ struct sockaddr_in *cm4_addr =
+ (struct sockaddr_in *)cm_addr;
+ struct sockaddr_in *cm4_outaddr =
+ (struct sockaddr_in *)cm_outaddr;
+
+ cm4_outaddr->sin_addr = cm4_addr->sin_addr;
+ }
+ } else {
+ struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
+
+ if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
+ struct sockaddr_in6 *cm6_addr =
+ (struct sockaddr_in6 *)cm_addr;
+ struct sockaddr_in6 *cm6_outaddr =
+ (struct sockaddr_in6 *)cm_outaddr;
+
+ cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
+ }
+ }
+}
- wait_for_completion(&cm_id_priv->destroy_comp);
+/**
+ * iw_cm_map - Use portmapper to map the ports
+ * @cm_id: connection manager pointer
+ * @active: Indicates the active side when true
+ * returns nonzero for error only if iwpm_create_mapinfo() fails
+ *
+ * Tries to add a mapping for a port using the Portmapper. If
+ * successful in mapping the IP/Port it will check the remote
+ * mapped IP address for a wildcard IP address and replace the
+ * zero IP address with the remote_addr.
+ */
+static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
+{
+ const char *devname = dev_name(&cm_id->device->dev);
+ const char *ifname = cm_id->device->iw_ifname;
+ struct iwpm_dev_data pm_reg_msg = {};
+ struct iwpm_sa_data pm_msg;
+ int status;
+
+ if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
+ strlen(ifname) >= sizeof(pm_reg_msg.if_name))
+ return -EINVAL;
- free_cm_id(cm_id_priv);
+ cm_id->m_local_addr = cm_id->local_addr;
+ cm_id->m_remote_addr = cm_id->remote_addr;
+
+ strcpy(pm_reg_msg.dev_name, devname);
+ strcpy(pm_reg_msg.if_name, ifname);
+
+ if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
+ !iwpm_valid_pid())
+ return 0;
+
+ cm_id->mapped = true;
+ pm_msg.loc_addr = cm_id->local_addr;
+ pm_msg.rem_addr = cm_id->remote_addr;
+ pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
+ IWPM_FLAGS_NO_PORT_MAP : 0;
+ if (active)
+ status = iwpm_add_and_query_mapping(&pm_msg,
+ RDMA_NL_IWCM);
+ else
+ status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
+
+ if (!status) {
+ cm_id->m_local_addr = pm_msg.mapped_loc_addr;
+ if (active) {
+ cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
+ iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
+ &cm_id->remote_addr,
+ &cm_id->m_remote_addr);
+ }
+ }
+
+ return iwpm_create_mapinfo(&cm_id->local_addr,
+ &cm_id->m_local_addr,
+ RDMA_NL_IWCM, pm_msg.flags);
}
-EXPORT_SYMBOL(iw_destroy_cm_id);
/*
* CM_ID <-- LISTEN
@@ -452,7 +581,10 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
case IW_CM_STATE_IDLE:
cm_id_priv->state = IW_CM_STATE_LISTEN;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+ ret = iw_cm_map(cm_id, false);
+ if (!ret)
+ ret = cm_id->device->ops.iw_create_listen(cm_id,
+ backlog);
if (ret)
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -492,7 +624,7 @@ int iw_cm_reject(struct iw_cm_id *cm_id,
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->reject(cm_id, private_data,
+ ret = cm_id->device->ops.iw_reject(cm_id, private_data,
private_data_len);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
@@ -528,28 +660,28 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
return -EINVAL;
}
/* Get the ib_qp given the QPN */
- qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
+ qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
- cm_id->device->iwcm->add_ref(qp);
+ cm_id->device->ops.iw_add_ref(qp);
cm_id_priv->qp = qp;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->accept(cm_id, iw_param);
+ ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
if (ret) {
/* An error on accept precludes provider events */
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->iwcm->rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id->device->ops.iw_rem_ref(qp);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
}
@@ -570,7 +702,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
struct iwcm_id_private *cm_id_priv;
int ret;
unsigned long flags;
- struct ib_qp *qp;
+ struct ib_qp *qp = NULL;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -582,39 +714,37 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->state != IW_CM_STATE_IDLE) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
/* Get the ib_qp given the QPN */
- qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
+ qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
- cm_id->device->iwcm->add_ref(qp);
+ cm_id->device->ops.iw_add_ref(qp);
cm_id_priv->qp = qp;
cm_id_priv->state = IW_CM_STATE_CONN_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->connect(cm_id, iw_param);
- if (ret) {
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->iwcm->rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
- cm_id_priv->state = IW_CM_STATE_IDLE;
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- }
+ ret = iw_cm_map(cm_id, true);
+ if (!ret)
+ ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
+ if (!ret)
+ return 0; /* success */
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+err:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id->device->ops.iw_rem_ref(qp);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return ret;
}
EXPORT_SYMBOL(iw_cm_connect);
@@ -656,8 +786,23 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
goto out;
cm_id->provider_data = iw_event->provider_data;
- cm_id->local_addr = iw_event->local_addr;
- cm_id->remote_addr = iw_event->remote_addr;
+ cm_id->m_local_addr = iw_event->local_addr;
+ cm_id->m_remote_addr = iw_event->remote_addr;
+ cm_id->local_addr = listen_id_priv->id.local_addr;
+
+ ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
+ &iw_event->remote_addr,
+ &cm_id->remote_addr,
+ RDMA_NL_IWCM);
+ if (ret) {
+ cm_id->remote_addr = iw_event->remote_addr;
+ } else {
+ iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
+ &iw_event->local_addr,
+ &cm_id->local_addr);
+ iw_event->local_addr = cm_id->local_addr;
+ iw_event->remote_addr = cm_id->remote_addr;
+ }
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
cm_id_priv->state = IW_CM_STATE_CONN_RECV;
@@ -686,10 +831,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
ret = cm_id->cm_handler(cm_id, iw_event);
if (ret) {
iw_cm_reject(cm_id, NULL, 0);
- set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- destroy_cm_id(cm_id);
- if (atomic_read(&cm_id_priv->refcount)==0)
- free_cm_id(cm_id_priv);
+ iw_destroy_cm_id(cm_id);
}
out:
@@ -742,6 +884,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
+ struct ib_qp *qp = NULL;
unsigned long flags;
int ret;
@@ -753,16 +896,20 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
if (iw_event->status == 0) {
- cm_id_priv->id.local_addr = iw_event->local_addr;
- cm_id_priv->id.remote_addr = iw_event->remote_addr;
+ cm_id_priv->id.m_local_addr = iw_event->local_addr;
+ cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
+ iw_event->local_addr = cm_id_priv->id.local_addr;
+ iw_event->remote_addr = cm_id_priv->id.remote_addr;
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
} else {
/* REJECTED or RESET */
- cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
+ qp = cm_id_priv->qp;
cm_id_priv->qp = NULL;
cm_id_priv->state = IW_CM_STATE_IDLE;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
if (iw_event->private_data_len)
@@ -804,21 +951,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
+ struct ib_qp *qp;
unsigned long flags;
- int ret = 0;
+ int ret = 0, notify_event = 0;
spin_lock_irqsave(&cm_id_priv->lock, flags);
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
- if (cm_id_priv->qp) {
- cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
- cm_id_priv->qp = NULL;
- }
switch (cm_id_priv->state) {
case IW_CM_STATE_ESTABLISHED:
case IW_CM_STATE_CLOSING:
cm_id_priv->state = IW_CM_STATE_IDLE;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ notify_event = 1;
break;
case IW_CM_STATE_DESTROYING:
break;
@@ -827,6 +971,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
+ if (notify_event)
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
return ret;
}
@@ -873,36 +1021,26 @@ static void cm_work_handler(struct work_struct *_work)
struct iw_cm_event levent;
struct iwcm_id_private *cm_id_priv = work->cm_id;
unsigned long flags;
- int empty;
int ret = 0;
- int destroy_id;
spin_lock_irqsave(&cm_id_priv->lock, flags);
- empty = list_empty(&cm_id_priv->work_list);
- while (!empty) {
- work = list_entry(cm_id_priv->work_list.next,
- struct iwcm_work, list);
+ while (!list_empty(&cm_id_priv->work_list)) {
+ work = list_first_entry(&cm_id_priv->work_list,
+ struct iwcm_work, list);
list_del_init(&work->list);
- empty = list_empty(&cm_id_priv->work_list);
levent = work->event;
put_work(work);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = process_event(cm_id_priv, &levent);
- if (ret) {
- set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- destroy_cm_id(&cm_id_priv->id);
- }
- BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
- destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- if (iwcm_deref_id(cm_id_priv)) {
- if (destroy_id) {
- BUG_ON(!list_empty(&cm_id_priv->work_list));
- free_cm_id(cm_id_priv);
+ if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+ ret = process_event(cm_id_priv, &levent);
+ if (ret) {
+ destroy_cm_id(&cm_id_priv->id);
+ WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
}
- return;
- }
- if (empty)
+ } else
+ pr_debug("dropping event %d\n", levent.event);
+ if (iwcm_deref_id(cm_id_priv))
return;
spin_lock_irqsave(&cm_id_priv->lock, flags);
}
@@ -955,12 +1093,9 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
}
}
- atomic_inc(&cm_id_priv->refcount);
- if (list_empty(&cm_id_priv->work_list)) {
- list_add_tail(&work->list, &cm_id_priv->work_list);
- queue_work(iwcm_wq, &work->work);
- } else
- list_add_tail(&work->list, &cm_id_priv->work_list);
+ refcount_inc(&cm_id_priv->refcount);
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ queue_work(iwcm_wq, &work->work);
out:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
@@ -1044,26 +1179,42 @@ EXPORT_SYMBOL(iw_cm_init_qp_attr);
static int __init iw_cm_init(void)
{
- iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
+ int ret;
+
+ ret = iwpm_init(RDMA_NL_IWCM);
+ if (ret)
+ return ret;
+
+ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
if (!iwcm_wq)
- return -ENOMEM;
+ goto err_alloc;
iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
iwcm_ctl_table);
if (!iwcm_ctl_table_hdr) {
pr_err("iw_cm: couldn't register sysctl paths\n");
- destroy_workqueue(iwcm_wq);
- return -ENOMEM;
+ goto err_sysctl;
}
+ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
return 0;
+
+err_sysctl:
+ destroy_workqueue(iwcm_wq);
+err_alloc:
+ iwpm_exit(RDMA_NL_IWCM);
+ return -ENOMEM;
}
static void __exit iw_cm_cleanup(void)
{
+ rdma_nl_unregister(RDMA_NL_IWCM);
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq);
+ iwpm_exit(RDMA_NL_IWCM);
}
+MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
+
module_init(iw_cm_init);
module_exit(iw_cm_cleanup);
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h
index 3f6cc82564c8..bf74639be128 100644
--- a/drivers/infiniband/core/iwcm.h
+++ b/drivers/infiniband/core/iwcm.h
@@ -52,11 +52,11 @@ struct iwcm_id_private {
wait_queue_head_t connect_wait;
struct list_head work_list;
spinlock_t lock;
- atomic_t refcount;
+ refcount_t refcount;
struct list_head work_free_list;
};
-#define IWCM_F_CALLBACK_DESTROY 1
+#define IWCM_F_DROP_EVENTS 1
#define IWCM_F_CONNECT_WAIT 2
#endif /* IWCM_H */
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 22a3abee2a54..3c9a9869212b 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -34,19 +34,25 @@
#include "iwpm_util.h"
static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
-static int iwpm_ulib_version = 3;
+u16 iwpm_ulib_version = IWPM_UABI_VERSION_MIN;
static int iwpm_user_pid = IWPM_PID_UNDEFINED;
static atomic_t echo_nlmsg_seq;
+/**
+ * iwpm_valid_pid - Check if the userspace iwarp port mapper pid is valid
+ *
+ * Returns true if the pid is greater than zero, otherwise returns false
+ */
int iwpm_valid_pid(void)
{
return iwpm_user_pid > 0;
}
-EXPORT_SYMBOL(iwpm_valid_pid);
-/*
- * iwpm_register_pid - Send a netlink query to user space
- * for the iwarp port mapper pid
+/**
+ * iwpm_register_pid - Send a netlink query to userspace
+ * to get the iwarp port mapper pid
+ * @pm_msg: Contains driver info to send to the userspace port mapper
+ * @nl_client: The index of the netlink client
*
* nlmsg attributes:
* [IWPM_NLA_REG_PID_SEQ]
@@ -63,10 +69,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto pid_query_error;
- }
if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
iwpm_user_pid == IWPM_PID_UNAVAILABLE)
return 0;
@@ -88,8 +90,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);
if (ret)
goto pid_query_error;
- ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE,
- pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
+ ret = ibnl_put_attr(skb, nlh, IFNAMSIZ,
+ pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
if (ret)
goto pid_query_error;
ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,
@@ -101,10 +103,12 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
if (ret)
goto pid_query_error;
+ nlmsg_end(skb, nlh);
+
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
- ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
+ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
@@ -115,21 +119,26 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = iwpm_wait_complete_req(nlmsg_request);
return ret;
pid_query_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
- if (skb)
- dev_kfree_skb(skb);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
+ dev_kfree_skb(skb);
if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
}
-EXPORT_SYMBOL(iwpm_register_pid);
-/*
- * iwpm_add_mapping - Send a netlink add mapping message
- * to the port mapper
+/**
+ * iwpm_add_mapping - Send a netlink add mapping request to
+ * the userspace port mapper
+ * @pm_msg: Contains the local ip/tcp address info to send
+ * @nl_client: The index of the netlink client
+ *
* nlmsg attributes:
* [IWPM_NLA_MANAGE_MAPPING_SEQ]
* [IWPM_NLA_MANAGE_ADDR]
+ * [IWPM_NLA_MANAGE_FLAGS]
+ *
+ * If the request is successful, the pm_msg stores
+ * the port mapper response (mapped address info)
*/
int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
{
@@ -140,10 +149,6 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto add_mapping_error;
- }
if (!iwpm_valid_pid())
return 0;
if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
@@ -172,9 +177,23 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
if (ret)
goto add_mapping_error;
+
+ /* If flags are required and we're not V4, then return a quiet error */
+ if (pm_msg->flags && iwpm_ulib_version == IWPM_UABI_VERSION_MIN) {
+ ret = -EINVAL;
+ goto add_mapping_error_nowarn;
+ }
+ if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) {
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags,
+ IWPM_NLA_MANAGE_FLAGS);
+ if (ret)
+ goto add_mapping_error;
+ }
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
- ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED;
@@ -184,22 +203,25 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
ret = iwpm_wait_complete_req(nlmsg_request);
return ret;
add_mapping_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
- if (skb)
- dev_kfree_skb(skb);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
+add_mapping_error_nowarn:
+ dev_kfree_skb(skb);
if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
}
-EXPORT_SYMBOL(iwpm_add_mapping);
-/*
- * iwpm_add_and_query_mapping - Send a netlink add and query
- * mapping message to the port mapper
+/**
+ * iwpm_add_and_query_mapping - Process the port mapper response to
+ * iwpm_add_and_query_mapping request
+ * @pm_msg: Contains the local ip/tcp address info to send
+ * @nl_client: The index of the netlink client
+ *
* nlmsg attributes:
* [IWPM_NLA_QUERY_MAPPING_SEQ]
* [IWPM_NLA_QUERY_LOCAL_ADDR]
* [IWPM_NLA_QUERY_REMOTE_ADDR]
+ * [IWPM_NLA_QUERY_FLAGS]
*/
int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
{
@@ -210,10 +232,6 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto query_mapping_error;
- }
if (!iwpm_valid_pid())
return 0;
if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
@@ -249,9 +267,23 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
if (ret)
goto query_mapping_error;
+
+ /* If flags are required and we're not V4, then return a quite error */
+ if (pm_msg->flags && iwpm_ulib_version == IWPM_UABI_VERSION_MIN) {
+ ret = -EINVAL;
+ goto query_mapping_error_nowarn;
+ }
+ if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) {
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags,
+ IWPM_NLA_QUERY_FLAGS);
+ if (ret)
+ goto query_mapping_error;
+ }
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
- ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
err_str = "Unable to send a nlmsg";
@@ -260,18 +292,21 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
ret = iwpm_wait_complete_req(nlmsg_request);
return ret;
query_mapping_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
- if (skb)
- dev_kfree_skb(skb);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
+query_mapping_error_nowarn:
+ dev_kfree_skb(skb);
if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
}
-EXPORT_SYMBOL(iwpm_add_and_query_mapping);
-/*
- * iwpm_remove_mapping - Send a netlink remove mapping message
- * to the port mapper
+/**
+ * iwpm_remove_mapping - Send a netlink remove mapping request
+ * to the userspace port mapper
+ *
+ * @local_addr: Local ip/tcp address to remove
+ * @nl_client: The index of the netlink client
+ *
* nlmsg attributes:
* [IWPM_NLA_MANAGE_MAPPING_SEQ]
* [IWPM_NLA_MANAGE_ADDR]
@@ -284,10 +319,6 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto remove_mapping_error;
- }
if (!iwpm_valid_pid())
return 0;
if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
@@ -312,7 +343,9 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
if (ret)
goto remove_mapping_error;
- ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ nlmsg_end(skb, nlh);
+
+ ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED;
@@ -323,12 +356,11 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
"remove_mapping: Local sockaddr:");
return 0;
remove_mapping_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
if (skb)
dev_kfree_skb_any(skb);
return ret;
}
-EXPORT_SYMBOL(iwpm_remove_mapping);
/* netlink attribute policy for the received response to register pid request */
static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {
@@ -341,9 +373,14 @@ static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {
[IWPM_NLA_RREG_PID_ERR] = { .type = NLA_U16 }
};
-/*
- * iwpm_register_pid_cb - Process a port mapper response to
- * iwpm_register_pid()
+/**
+ * iwpm_register_pid_cb - Process the port mapper response to
+ * iwpm_register_pid query
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * If successful, the function receives the userspace port mapper pid
+ * which is used in future communication with the port mapper
*/
int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -376,40 +413,46 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
/* check device name, ulib name and version */
if (strcmp(pm_msg->dev_name, dev_name) ||
strcmp(iwpm_ulib_name, iwpm_name) ||
- iwpm_version != iwpm_ulib_version) {
+ iwpm_version < IWPM_UABI_VERSION_MIN) {
- pr_info("%s: Incorrect info (dev = %s name = %s version = %d)\n",
+ pr_info("%s: Incorrect info (dev = %s name = %s version = %u)\n",
__func__, dev_name, iwpm_name, iwpm_version);
nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
goto register_pid_response_exit;
}
iwpm_user_pid = cb->nlh->nlmsg_pid;
+ iwpm_ulib_version = iwpm_version;
+ if (iwpm_ulib_version < IWPM_UABI_VERSION)
+ pr_warn_once("%s: Down level iwpmd/pid %d. Continuing...",
+ __func__, iwpm_user_pid);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid);
- if (iwpm_valid_client(nl_client))
- iwpm_set_registration(nl_client, IWPM_REG_VALID);
+ iwpm_set_registration(nl_client, IWPM_REG_VALID);
register_pid_response_exit:
nlmsg_request->request_done = 1;
/* always for found nlmsg_request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_register_pid_cb);
/* netlink attribute policy for the received response to add mapping request */
static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = {
- [IWPM_NLA_MANAGE_MAPPING_SEQ] = { .type = NLA_U32 },
- [IWPM_NLA_MANAGE_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_MANAGE_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 }
+ [IWPM_NLA_RMANAGE_MAPPING_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_RMANAGE_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 }
};
-/*
- * iwpm_add_mapping_cb - Process a port mapper response to
- * iwpm_add_mapping()
+/**
+ * iwpm_add_mapping_cb - Process the port mapper response to
+ * iwpm_add_mapping request
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -428,7 +471,7 @@ int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
- msg_seq = nla_get_u32(nltb[IWPM_NLA_MANAGE_MAPPING_SEQ]);
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_RMANAGE_MAPPING_SEQ]);
nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
@@ -437,9 +480,9 @@ int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_MANAGE_ADDR]);
+ nla_data(nltb[IWPM_NLA_RMANAGE_ADDR]);
mapped_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_MANAGE_MAPPED_LOC_ADDR]);
+ nla_data(nltb[IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR]);
if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr)) {
nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
@@ -463,25 +506,31 @@ add_mapping_response_exit:
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_add_mapping_cb);
/* netlink attribute policy for the response to add and query mapping request
- * and response with remote address info */
+ * and response with remote address info
+ */
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
- [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },
- [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_QUERY_REMOTE_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPING_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_RQUERY_LOCAL_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_REMOTE_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
[IWPM_NLA_RQUERY_MAPPING_ERR] = { .type = NLA_U16 }
};
-/*
- * iwpm_add_and_query_mapping_cb - Process a port mapper response to
- * iwpm_add_and_query_mapping()
+/**
+ * iwpm_add_and_query_mapping_cb - Process the port mapper response to
+ * iwpm_add_and_query_mapping request
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
struct netlink_callback *cb)
@@ -501,18 +550,18 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
return -EINVAL;
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
- msg_seq = nla_get_u32(nltb[IWPM_NLA_QUERY_MAPPING_SEQ]);
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_RQUERY_MAPPING_SEQ]);
nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
__func__, msg_seq);
- return -EINVAL;
+ return -EINVAL;
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_LOCAL_ADDR]);
remote_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_REMOTE_ADDR]);
mapped_loc_sockaddr = (struct sockaddr_storage *)
nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
mapped_rem_sockaddr = (struct sockaddr_storage *)
@@ -555,14 +604,17 @@ query_mapping_response_exit:
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
-/*
- * iwpm_remote_info_cb - Process a port mapper message, containing
- * the remote connecting peer address info
+/**
+ * iwpm_remote_info_cb - Process remote connecting peer address info, which
+ * the port mapper has received from the connecting peer
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Stores the IPv4/IPv6 address info in a hash table
*/
int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -580,17 +632,12 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
return ret;
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
- if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %d\n",
- __func__, nl_client);
- return ret;
- }
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
local_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_LOCAL_ADDR]);
remote_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_REMOTE_ADDR]);
mapped_loc_sockaddr = (struct sockaddr_storage *)
nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
mapped_rem_sockaddr = (struct sockaddr_storage *)
@@ -604,7 +651,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
}
rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
if (!rem_info) {
- pr_err("%s: Unable to allocate a remote info\n", __func__);
ret = -ENOMEM;
return ret;
}
@@ -628,7 +674,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
"remote_info: Mapped remote sockaddr:");
return ret;
}
-EXPORT_SYMBOL(iwpm_remote_info_cb);
/* netlink attribute policy for the received request for mapping info */
static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
@@ -637,8 +682,14 @@ static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
[IWPM_NLA_MAPINFO_ULIB_VER] = { .type = NLA_U16 }
};
-/*
- * iwpm_mapping_info_cb - Process a port mapper request for mapping info
+/**
+ * iwpm_mapping_info_cb - Process a notification that the userspace
+ * port mapper daemon is started
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Using the received port mapper pid, send all the local mapping
+ * info records to the userspace port mapper
*/
int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -657,20 +708,20 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
iwpm_name = (char *)nla_data(nltb[IWPM_NLA_MAPINFO_ULIB_NAME]);
iwpm_version = nla_get_u16(nltb[IWPM_NLA_MAPINFO_ULIB_VER]);
if (strcmp(iwpm_ulib_name, iwpm_name) ||
- iwpm_version != iwpm_ulib_version) {
- pr_info("%s: Invalid port mapper name = %s version = %d\n",
+ iwpm_version < IWPM_UABI_VERSION_MIN) {
+ pr_info("%s: Invalid port mapper name = %s version = %u\n",
__func__, iwpm_name, iwpm_version);
return ret;
}
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
- if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %d\n",
- __func__, nl_client);
- return ret;
- }
iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
iwpm_user_pid = cb->nlh->nlmsg_pid;
+
+ if (iwpm_ulib_version < IWPM_UABI_VERSION)
+ pr_warn_once("%s: Down level iwpmd/pid %d. Continuing...",
+ __func__, iwpm_user_pid);
+
if (!iwpm_mapinfo_available())
return 0;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
@@ -678,7 +729,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
return ret;
}
-EXPORT_SYMBOL(iwpm_mapping_info_cb);
/* netlink attribute policy for the received mapping info ack */
static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {
@@ -687,9 +737,11 @@ static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {
[IWPM_NLA_MAPINFO_ACK_NUM] = { .type = NLA_U32 }
};
-/*
- * iwpm_ack_mapping_info_cb - Process a port mapper ack for
- * the provided mapping info records
+/**
+ * iwpm_ack_mapping_info_cb - Process the port mapper ack for
+ * the provided local mapping info records
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -708,7 +760,6 @@ int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
return 0;
}
-EXPORT_SYMBOL(iwpm_ack_mapping_info_cb);
/* netlink attribute policy for the received port mapper error message */
static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {
@@ -716,8 +767,11 @@ static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {
[IWPM_NLA_ERR_CODE] = { .type = NLA_U16 },
};
-/*
- * iwpm_mapping_error_cb - Process a port mapper error message
+/**
+ * iwpm_mapping_error_cb - Process port mapper notification for error
+ *
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -749,7 +803,44 @@ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_mapping_error_cb);
+
+/* netlink attribute policy for the received hello request */
+static const struct nla_policy hello_policy[IWPM_NLA_HELLO_MAX] = {
+ [IWPM_NLA_HELLO_ABI_VERSION] = { .type = NLA_U16 }
+};
+
+/**
+ * iwpm_hello_cb - Process a hello message from iwpmd
+ *
+ * @skb: The socket buffer
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Using the received port mapper pid, send the kernel's abi_version
+ * after adjusting it to support the iwpmd version.
+ */
+int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nlattr *nltb[IWPM_NLA_HELLO_MAX];
+ const char *msg_type = "Hello request";
+ u8 nl_client;
+ u16 abi_version;
+ int ret = -EINVAL;
+
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_HELLO_MAX, hello_policy, nltb,
+ msg_type)) {
+ pr_info("%s: Unable to parse nlmsg\n", __func__);
+ return ret;
+ }
+ abi_version = nla_get_u16(nltb[IWPM_NLA_HELLO_ABI_VERSION]);
+ nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+ iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ iwpm_ulib_version = min_t(u16, IWPM_UABI_VERSION, abi_version);
+ pr_debug("Using ABI version %u\n", iwpm_ulib_version);
+ iwpm_user_pid = cb->nlh->nlmsg_pid;
+ ret = iwpm_send_hello(nl_client, iwpm_user_pid, iwpm_ulib_version);
+ return ret;
+}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 5fb089e91353..eecb369898f5 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -37,6 +37,7 @@
#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
#define IWPM_REMINFO_HASH_SIZE 64
#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
+#define IWPM_MSG_SIZE 512
static LIST_HEAD(iwpm_nlmsg_req_list);
static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -47,95 +48,81 @@ static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
static struct hlist_head *iwpm_reminfo_bucket;
static DEFINE_SPINLOCK(iwpm_reminfo_lock);
-static DEFINE_MUTEX(iwpm_admin_lock);
static struct iwpm_admin_data iwpm_admin;
+/**
+ * iwpm_init - Allocate resources for the iwarp port mapper
+ * @nl_client: The index of the netlink client
+ *
+ * Should be called when network interface goes up.
+ */
int iwpm_init(u8 nl_client)
{
- int ret = 0;
- if (iwpm_valid_client(nl_client))
- return -EINVAL;
- mutex_lock(&iwpm_admin_lock);
- if (atomic_read(&iwpm_admin.refcount) == 0) {
- iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
- sizeof(struct hlist_head), GFP_KERNEL);
- if (!iwpm_hash_bucket) {
- ret = -ENOMEM;
- pr_err("%s Unable to create mapinfo hash table\n", __func__);
- goto init_exit;
- }
- iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
- sizeof(struct hlist_head), GFP_KERNEL);
- if (!iwpm_reminfo_bucket) {
- kfree(iwpm_hash_bucket);
- ret = -ENOMEM;
- pr_err("%s Unable to create reminfo hash table\n", __func__);
- goto init_exit;
- }
- }
- atomic_inc(&iwpm_admin.refcount);
-init_exit:
- mutex_unlock(&iwpm_admin_lock);
- if (!ret) {
- iwpm_set_valid(nl_client, 1);
- iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
- pr_debug("%s: Mapinfo and reminfo tables are created\n",
- __func__);
+ iwpm_hash_bucket = kcalloc(IWPM_MAPINFO_HASH_SIZE,
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!iwpm_hash_bucket)
+ return -ENOMEM;
+
+ iwpm_reminfo_bucket = kcalloc(IWPM_REMINFO_HASH_SIZE,
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!iwpm_reminfo_bucket) {
+ kfree(iwpm_hash_bucket);
+ return -ENOMEM;
}
- return ret;
+
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
+ pr_debug("%s: Mapinfo and reminfo tables are created\n", __func__);
+ return 0;
}
-EXPORT_SYMBOL(iwpm_init);
static void free_hash_bucket(void);
static void free_reminfo_bucket(void);
+/**
+ * iwpm_exit - Deallocate resources for the iwarp port mapper
+ * @nl_client: The index of the netlink client
+ *
+ * Should be called when network interface goes down.
+ */
int iwpm_exit(u8 nl_client)
{
-
- if (!iwpm_valid_client(nl_client))
- return -EINVAL;
- mutex_lock(&iwpm_admin_lock);
- if (atomic_read(&iwpm_admin.refcount) == 0) {
- mutex_unlock(&iwpm_admin_lock);
- pr_err("%s Incorrect usage - negative refcount\n", __func__);
- return -EINVAL;
- }
- if (atomic_dec_and_test(&iwpm_admin.refcount)) {
- free_hash_bucket();
- free_reminfo_bucket();
- pr_debug("%s: Resources are destroyed\n", __func__);
- }
- mutex_unlock(&iwpm_admin_lock);
- iwpm_set_valid(nl_client, 0);
+ free_hash_bucket();
+ free_reminfo_bucket();
+ pr_debug("%s: Resources are destroyed\n", __func__);
iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0;
}
-EXPORT_SYMBOL(iwpm_exit);
static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
struct sockaddr_storage *);
+/**
+ * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
+ * info in a hash table
+ * @local_sockaddr: Local ip/tcp address
+ * @mapped_sockaddr: Mapped local ip/tcp address
+ * @nl_client: The index of the netlink client
+ * @map_flags: IWPM mapping flags
+ */
int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
struct sockaddr_storage *mapped_sockaddr,
- u8 nl_client)
+ u8 nl_client, u32 map_flags)
{
- struct hlist_head *hash_bucket_head;
+ struct hlist_head *hash_bucket_head = NULL;
struct iwpm_mapping_info *map_info;
unsigned long flags;
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client))
- return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
- if (!map_info) {
- pr_err("%s: Unable to allocate a mapping info\n", __func__);
+ if (!map_info)
return -ENOMEM;
- }
+
memcpy(&map_info->local_sockaddr, local_sockaddr,
sizeof(struct sockaddr_storage));
memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
sizeof(struct sockaddr_storage));
map_info->nl_client = nl_client;
+ map_info->map_flags = map_flags;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
@@ -148,10 +135,21 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
}
}
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+
+ if (!hash_bucket_head)
+ kfree(map_info);
return ret;
}
-EXPORT_SYMBOL(iwpm_create_mapinfo);
+/**
+ * iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address
+ * info from the hash table
+ * @local_sockaddr: Local ip/tcp address
+ * @mapped_local_addr: Mapped local ip/tcp address
+ *
+ * Returns err code if mapping info is not found in the hash table,
+ * otherwise returns 0
+ */
int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
struct sockaddr_storage *mapped_local_addr)
{
@@ -186,7 +184,6 @@ remove_mapinfo_exit:
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
return ret;
}
-EXPORT_SYMBOL(iwpm_remove_mapinfo);
static void free_hash_bucket(void)
{
@@ -253,10 +250,21 @@ void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
}
+/**
+ * iwpm_get_remote_info - Get the remote connecting peer address info
+ *
+ * @mapped_loc_addr: Mapped local address of the listening peer
+ * @mapped_rem_addr: Mapped remote address of the connecting peer
+ * @remote_addr: To store the remote address of the connecting peer
+ * @nl_client: The index of the netlink client
+ *
+ * The remote address info is retrieved and provided to the client in
+ * the remote_addr. After that it is removed from the hash table
+ */
int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
- struct sockaddr_storage *mapped_rem_addr,
- struct sockaddr_storage *remote_addr,
- u8 nl_client)
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr,
+ u8 nl_client)
{
struct hlist_node *tmp_hlist_node;
struct hlist_head *hash_bucket_head;
@@ -264,10 +272,6 @@ int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
unsigned long flags;
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid client = %d\n", __func__, nl_client);
- return ret;
- }
spin_lock_irqsave(&iwpm_reminfo_lock, flags);
if (iwpm_reminfo_bucket) {
hash_bucket_head = get_reminfo_hash_bucket(
@@ -299,19 +303,17 @@ get_remote_info_exit:
spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
return ret;
}
-EXPORT_SYMBOL(iwpm_get_remote_info);
struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
u8 nl_client, gfp_t gfp)
{
- struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct iwpm_nlmsg_request *nlmsg_request;
unsigned long flags;
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
- if (!nlmsg_request) {
- pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
+ if (!nlmsg_request)
return NULL;
- }
+
spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
@@ -322,6 +324,8 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
nlmsg_request->nl_client = nl_client;
nlmsg_request->request_done = 0;
nlmsg_request->err_code = 0;
+ sema_init(&nlmsg_request->sem, 1);
+ down(&nlmsg_request->sem);
return nlmsg_request;
}
@@ -364,11 +368,9 @@ struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq)
int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request)
{
int ret;
- init_waitqueue_head(&nlmsg_request->waitq);
- ret = wait_event_timeout(nlmsg_request->waitq,
- (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT);
- if (!ret) {
+ ret = down_timeout(&nlmsg_request->sem, IWPM_NL_TIMEOUT);
+ if (ret) {
ret = -EINVAL;
pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",
__func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq);
@@ -384,20 +386,6 @@ int iwpm_get_nlmsg_seq(void)
return atomic_inc_return(&iwpm_admin.nlmsg_seq);
}
-int iwpm_valid_client(u8 nl_client)
-{
- if (nl_client >= RDMA_NL_NUM_CLIENTS)
- return 0;
- return iwpm_admin.client_list[nl_client];
-}
-
-void iwpm_set_valid(u8 nl_client, int valid)
-{
- if (nl_client >= RDMA_NL_NUM_CLIENTS)
- return;
- iwpm_admin.client_list[nl_client] = valid;
-}
-
/* valid client */
u32 iwpm_get_registration(u8 nl_client)
{
@@ -452,11 +440,10 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
{
struct sk_buff *skb = NULL;
- skb = dev_alloc_skb(NLMSG_GOODSIZE);
- if (!skb) {
- pr_err("%s Unable to allocate skb\n", __func__);
+ skb = dev_alloc_skb(IWPM_MSG_SIZE);
+ if (!skb)
goto create_nlmsg_exit;
- }
+
if (!(ibnl_put_msg(skb, nlh, 0, 0, nl_client, nl_op,
NLM_F_REQUEST))) {
pr_warn("%s: Unable to put the nlmsg header\n", __func__);
@@ -475,12 +462,14 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
int ret;
const char *err_str = "";
- ret = nlmsg_validate(cb->nlh, nlh_len, policy_max-1, nlmsg_policy);
+ ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Invalid attribute";
goto parse_nlmsg_error;
}
- ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max-1, nlmsg_policy);
+ ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Unable to parse the nlmsg";
goto parse_nlmsg_error;
@@ -609,18 +598,20 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
if (ret)
goto mapinfo_num_error;
- ret = ibnl_unicast(skb, nlh, iwpm_pid);
+
+ nlmsg_end(skb, nlh);
+
+ ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret) {
skb = NULL;
err_str = "Unable to send a nlmsg";
goto mapinfo_num_error;
}
- pr_debug("%s: Sent mapping number = %d\n", __func__, mapping_num);
+ pr_debug("%s: Sent mapping number = %u\n", __func__, mapping_num);
return 0;
mapinfo_num_error:
pr_info("%s: %s\n", __func__, err_str);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return ret;
}
@@ -634,10 +625,11 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
pr_warn("%s Unable to put NLMSG_DONE\n", __func__);
+ dev_kfree_skb(skb);
return -ENOMEM;
}
nlh->nlmsg_type = NLMSG_DONE;
- ret = ibnl_unicast(skb, (struct nlmsghdr *)skb->data, iwpm_pid);
+ ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret)
pr_warn("%s Unable to send a nlmsg\n", __func__);
return ret;
@@ -662,6 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
@@ -689,6 +682,16 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
if (ret)
goto send_mapping_info_unlock;
+ if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) {
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32),
+ &map_info->map_flags,
+ IWPM_NLA_MAPINFO_FLAGS);
+ if (ret)
+ goto send_mapping_info_unlock;
+ }
+
+ nlmsg_end(skb, nlh);
+
iwpm_print_sockaddr(&map_info->local_sockaddr,
"send_mapping_info: Local sockaddr:");
iwpm_print_sockaddr(&map_info->mapped_sockaddr,
@@ -730,8 +733,7 @@ send_mapping_info_unlock:
send_mapping_info_exit:
if (ret) {
pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return ret;
}
send_nlmsg_done(skb, nl_client, iwpm_pid);
@@ -755,3 +757,37 @@ int iwpm_mapinfo_available(void)
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
return full_bucket;
}
+
+int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ const char *err_str;
+ int ret = -EINVAL;
+
+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_HELLO, &nlh, nl_client);
+ if (!skb) {
+ err_str = "Unable to create a nlmsg";
+ goto hello_num_error;
+ }
+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();
+ err_str = "Unable to put attribute of abi_version into nlmsg";
+ ret = ibnl_put_attr(skb, nlh, sizeof(u16), &abi_version,
+ IWPM_NLA_HELLO_ABI_VERSION);
+ if (ret)
+ goto hello_num_error;
+ nlmsg_end(skb, nlh);
+
+ ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
+ if (ret) {
+ skb = NULL;
+ err_str = "Unable to send a nlmsg";
+ goto hello_num_error;
+ }
+ pr_debug("%s: Sent hello abi_version = %u\n", __func__, abi_version);
+ return 0;
+hello_num_error:
+ pr_info("%s: %s\n", __func__, err_str);
+ dev_kfree_skb(skb);
+ return ret;
+}
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index b7b9e194ce81..d6fc8402158a 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -33,7 +33,6 @@
#ifndef _IWPM_UTIL_H
#define _IWPM_UTIL_H
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -69,7 +68,7 @@ struct iwpm_nlmsg_request {
u8 nl_client;
u8 request_done;
u16 err_code;
- wait_queue_head_t waitq;
+ struct semaphore sem;
struct kref kref;
};
@@ -78,6 +77,7 @@ struct iwpm_mapping_info {
struct sockaddr_storage local_sockaddr;
struct sockaddr_storage mapped_sockaddr;
u8 nl_client;
+ u32 map_flags;
};
struct iwpm_remote_info {
@@ -89,9 +89,7 @@ struct iwpm_remote_info {
};
struct iwpm_admin_data {
- atomic_t refcount;
atomic_t nlmsg_seq;
- int client_list[RDMA_NL_NUM_CLIENTS];
u32 reg_list[RDMA_NL_NUM_CLIENTS];
};
@@ -140,29 +138,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
int iwpm_get_nlmsg_seq(void);
/**
- * iwpm_add_reminfo - Add remote address info of the connecting peer
+ * iwpm_add_remote_info - Add remote address info of the connecting peer
* to the remote info hash table
* @reminfo: The remote info to be added
*/
void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
/**
- * iwpm_valid_client - Check if the port mapper client is valid
- * @nl_client: The index of the netlink client
- *
- * Valid clients need to call iwpm_init() before using
- * the port mapper
- */
-int iwpm_valid_client(u8 nl_client);
-
-/**
- * iwpm_set_valid - Set the port mapper client to valid or not
- * @nl_client: The index of the netlink client
- * @valid: 1 if valid or 0 if invalid
- */
-void iwpm_set_valid(u8 nl_client, int valid);
-
-/**
* iwpm_check_registration - Check if the client registration
* matches the given one
* @nl_client: The index of the netlink client
@@ -182,7 +164,7 @@ u32 iwpm_check_registration(u8 nl_client, u32 reg);
void iwpm_set_registration(u8 nl_client, u32 reg);
/**
- * iwpm_get_registration
+ * iwpm_get_registration - Get the client registration
* @nl_client: The index of the netlink client
*
* Returns the client registration type
@@ -209,8 +191,10 @@ int iwpm_mapinfo_available(void);
/**
* iwpm_compare_sockaddr - Compare two sockaddr storage structs
+ * @a_sockaddr: first sockaddr to compare
+ * @b_sockaddr: second sockaddr to compare
*
- * Returns 0 if they are holding the same ip/tcp address info,
+ * Return: 0 if they are holding the same ip/tcp address info,
* otherwise returns 1
*/
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
@@ -266,4 +250,16 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
* @msg: Message to print
*/
void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);
+
+/**
+ * iwpm_send_hello - Send hello response to iwpmd
+ *
+ * @nl_client: The index of the netlink client
+ * @iwpm_pid: The pid of the user space port mapper
+ * @abi_version: The kernel's abi_version
+ *
+ * Returns 0 on success or a negative error code
+ */
+int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version);
+extern u16 iwpm_ulib_version;
#endif
diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
new file mode 100644
index 000000000000..8fd80adfe833
--- /dev/null
+++ b/drivers/infiniband/core/lag.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include <rdma/lag.h>
+
+static struct sk_buff *rdma_build_skb(struct net_device *netdev,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ u8 smac[ETH_ALEN];
+ bool is_ipv4;
+ int hdr_len;
+
+ is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw);
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) + LL_RESERVED_SPACE(netdev);
+ hdr_len += is_ipv4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr);
+
+ skb = alloc_skb(hdr_len, flags);
+ if (!skb)
+ return NULL;
+
+ skb->dev = netdev;
+ skb_reserve(skb, hdr_len);
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+ uh->source =
+ htons(rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ uh->dest = htons(ROCE_V2_UDP_DPORT);
+ uh->len = htons(sizeof(struct udphdr));
+
+ if (is_ipv4) {
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ iph->frag_off = 0;
+ iph->version = 4;
+ iph->protocol = IPPROTO_UDP;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(sizeof(struct udphdr) + sizeof(struct
+ iphdr));
+ memcpy(&iph->saddr, ah_attr->grh.sgid_attr->gid.raw + 12,
+ sizeof(struct in_addr));
+ memcpy(&iph->daddr, ah_attr->grh.dgid.raw + 12,
+ sizeof(struct in_addr));
+ } else {
+ skb_push(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->version = 6;
+ ip6h->nexthdr = IPPROTO_UDP;
+ memcpy(&ip6h->flow_lbl, &ah_attr->grh.flow_label,
+ sizeof(*ip6h->flow_lbl));
+ memcpy(&ip6h->saddr, ah_attr->grh.sgid_attr->gid.raw,
+ sizeof(struct in6_addr));
+ memcpy(&ip6h->daddr, ah_attr->grh.dgid.raw,
+ sizeof(struct in6_addr));
+ }
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = eth->h_proto = htons(is_ipv4 ? ETH_P_IP : ETH_P_IPV6);
+ rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, NULL, smac);
+ memcpy(eth->h_source, smac, ETH_ALEN);
+ memcpy(eth->h_dest, ah_attr->roce.dmac, ETH_ALEN);
+
+ return skb;
+}
+
+static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
+ struct net_device *master,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave;
+ struct sk_buff *skb;
+
+ skb = rdma_build_skb(master, ah_attr, flags);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ rcu_read_lock();
+ slave = netdev_get_xmit_slave(master, skb,
+ !!(device->lag_flags &
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
+ dev_hold(slave);
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return slave;
+}
+
+void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave)
+{
+ dev_put(xmit_slave);
+}
+
+struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave = NULL;
+ struct net_device *master;
+
+ if (!(ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE &&
+ ah_attr->grh.sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
+ ah_attr->grh.flow_label))
+ return NULL;
+
+ rcu_read_lock();
+ master = rdma_read_gid_attr_ndev_rcu(ah_attr->grh.sgid_attr);
+ if (IS_ERR(master)) {
+ rcu_read_unlock();
+ return master;
+ }
+ dev_hold(master);
+ rcu_read_unlock();
+
+ if (!netif_is_bond_master(master))
+ goto put;
+
+ slave = rdma_get_xmit_slave_udp(device, master, ah_attr, flags);
+put:
+ dev_put(master);
+ return slave;
+}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 4b5c72311deb..8f26bfb69586 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2009 HNR Consulting. All rights reserved.
- * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ * Copyright (c) 2014,2018 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -40,18 +40,37 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/xarray.h>
#include <rdma/ib_cache.h>
#include "mad_priv.h"
+#include "core_priv.h"
#include "mad_rmpp.h"
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("kernel IB MAD API");
-MODULE_AUTHOR("Hal Rosenstock");
-MODULE_AUTHOR("Sean Hefty");
+#define CREATE_TRACE_POINTS
+#include <trace/events/ib_mad.h>
+
+#ifdef CONFIG_TRACEPOINTS
+static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_qp_info *qp_info,
+ struct trace_event_raw_ib_mad_send_template *entry)
+{
+ struct ib_ud_wr *wr = &mad_send_wr->send_wr;
+ struct rdma_ah_attr attr = {};
+
+ rdma_query_ah(wr->ah, &attr);
+
+ /* These are common */
+ entry->sl = attr.sl;
+ entry->rqpn = wr->remote_qpn;
+ entry->rqkey = wr->remote_qkey;
+ entry->dlid = rdma_ah_get_dlid(&attr);
+}
+#endif
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
@@ -61,8 +80,9 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
+static u32 ib_mad_client_next;
static struct list_head ib_mad_port_list;
-static u32 ib_mad_client_id = 0;
/* Port list lock */
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
@@ -84,13 +104,16 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
u8 mgmt_class);
static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
struct ib_mad_agent_private *agent_priv);
+static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
+ struct ib_wc *wc);
+static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
/*
* Returns a ib_mad_port_private structure or NULL for a device/port
* Assumes ib_mad_port_list_lock is being held
*/
static inline struct ib_mad_port_private *
-__ib_get_mad_port(struct ib_device *device, int port_num)
+__ib_get_mad_port(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *entry;
@@ -106,7 +129,7 @@ __ib_get_mad_port(struct ib_device *device, int port_num)
* for a device/port
*/
static inline struct ib_mad_port_private *
-ib_get_mad_port(struct ib_device *device, int port_num)
+ib_get_mad_port(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *entry;
unsigned long flags;
@@ -127,8 +150,7 @@ static inline u8 convert_mgmt_class(u8 mgmt_class)
static int get_spl_qp_index(enum ib_qp_type qp_type)
{
- switch (qp_type)
- {
+ switch (qp_type) {
case IB_QPT_SMI:
return 0;
case IB_QPT_GSI:
@@ -188,11 +210,36 @@ int ib_response_mad(const struct ib_mad_hdr *hdr)
}
EXPORT_SYMBOL(ib_response_mad);
+#define SOL_FC_MAX_DEFAULT_FRAC 4
+#define SOL_FC_MAX_SA_FRAC 32
+
+static int get_sol_fc_max_outstanding(struct ib_mad_reg_req *mad_reg_req)
+{
+ if (!mad_reg_req)
+ /* Send only agent */
+ return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC;
+
+ switch (mad_reg_req->mgmt_class) {
+ case IB_MGMT_CLASS_CM:
+ return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC;
+ case IB_MGMT_CLASS_SUBN_ADM:
+ return mad_recvq_size / SOL_FC_MAX_SA_FRAC;
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ return min(mad_recvq_size, IB_MAD_QP_RECV_SIZE) /
+ SOL_FC_MAX_DEFAULT_FRAC;
+ default:
+ return 0;
+ }
+}
+
/*
* ib_register_mad_agent - Register to send/receive MADs
+ *
+ * Context: Process context.
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_qp_type qp_type,
struct ib_mad_reg_req *mad_reg_req,
u8 rmpp_version,
@@ -210,36 +257,39 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
struct ib_mad_mgmt_vendor_class *vendor_class;
struct ib_mad_mgmt_method_table *method;
int ret2, qpn;
- unsigned long flags;
u8 mgmt_class, vclass;
+ if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
+ (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
+ return ERR_PTR(-EPROTONOSUPPORT);
+
/* Validate parameters */
qpn = get_spl_qp_index(qp_type);
if (qpn == -1) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid QP Type %d\n",
- qp_type);
+ dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
+ __func__, qp_type);
goto error1;
}
if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid RMPP Version %u\n",
- rmpp_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid RMPP Version %u\n",
+ __func__, rmpp_version);
goto error1;
}
/* Validate MAD registration request if supplied */
if (mad_reg_req) {
if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid Class Version %u\n",
- mad_reg_req->mgmt_class_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid Class Version %u\n",
+ __func__,
+ mad_reg_req->mgmt_class_version);
goto error1;
}
if (!recv_handler) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: no recv_handler\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: no recv_handler\n", __func__);
goto error1;
}
if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
@@ -249,9 +299,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
*/
if (mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else if (mad_reg_req->mgmt_class == 0) {
@@ -259,8 +309,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* Class 0 is reserved in IBA and is used for
* aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
*/
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0\n",
+ __func__);
goto error1;
} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
/*
@@ -268,18 +319,19 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* ensure supplied OUI is not zero
*/
if (!is_vendor_oui(mad_reg_req->oui)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: No OUI specified for class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: No OUI specified for class 0x%x\n",
+ __func__,
+ mad_reg_req->mgmt_class);
goto error1;
}
}
/* Make sure class supplied is consistent with RMPP */
if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
if (rmpp_version) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: RMPP version for non-RMPP class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -290,9 +342,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
(mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid SM QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else {
@@ -300,9 +352,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
(mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid GS QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -317,16 +369,18 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Validate device and port */
port_priv = ib_get_mad_port(device, port_num);
if (!port_priv) {
- dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
+ dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n",
+ __func__, port_num);
ret = ERR_PTR(-ENODEV);
goto error1;
}
- /* Verify the QP requested is supported. For example, Ethernet devices
- * will not have QP0 */
+ /* Verify the QP requested is supported. For example, Ethernet devices
+ * will not have QP0.
+ */
if (!port_priv->qp_info[qpn].qp) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: QP %d not supported\n", qpn);
+ dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
+ __func__, qpn);
ret = ERR_PTR(-EPROTONOSUPPORT);
goto error1;
}
@@ -360,21 +414,41 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
spin_lock_init(&mad_agent_priv->lock);
INIT_LIST_HEAD(&mad_agent_priv->send_list);
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
- INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
+ INIT_LIST_HEAD(&mad_agent_priv->backlog_list);
INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
INIT_WORK(&mad_agent_priv->local_work, local_completions);
- atomic_set(&mad_agent_priv->refcount, 1);
+ refcount_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
+ mad_agent_priv->sol_fc_send_count = 0;
+ mad_agent_priv->sol_fc_wait_count = 0;
+ mad_agent_priv->sol_fc_max =
+ recv_handler ? get_sol_fc_max_outstanding(mad_reg_req) : 0;
+
+ ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
+ if (ret2) {
+ ret = ERR_PTR(ret2);
+ goto error4;
+ }
- spin_lock_irqsave(&port_priv->reg_lock, flags);
- mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
+ /*
+ * The mlx4 driver uses the top byte to distinguish which virtual
+ * function generated the MAD, so we must avoid using it.
+ */
+ ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
+ mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
+ &ib_mad_client_next, GFP_KERNEL);
+ if (ret2 < 0) {
+ ret = ERR_PTR(ret2);
+ goto error5;
+ }
/*
* Make sure MAD registration (if supplied)
* is non overlapping with any existing ones
*/
+ spin_lock_irq(&port_priv->reg_lock);
if (mad_reg_req) {
mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
if (!is_vendor_class(mgmt_class)) {
@@ -385,7 +459,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (method) {
if (method_in_use(&method,
mad_reg_req))
- goto error4;
+ goto error6;
}
}
ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
@@ -401,25 +475,26 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (is_vendor_method_in_use(
vendor_class,
mad_reg_req))
- goto error4;
+ goto error6;
}
}
ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
}
if (ret2) {
ret = ERR_PTR(ret2);
- goto error4;
+ goto error6;
}
}
+ spin_unlock_irq(&port_priv->reg_lock);
- /* Add mad agent into port's agent list */
- list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
-
+ trace_ib_mad_create_agent(mad_agent_priv);
return &mad_agent_priv->agent;
-
+error6:
+ spin_unlock_irq(&port_priv->reg_lock);
+ xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
+error5:
+ ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
error4:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
kfree(reg_req);
error3:
kfree(mad_agent_priv);
@@ -428,138 +503,18 @@ error1:
}
EXPORT_SYMBOL(ib_register_mad_agent);
-static inline int is_snooping_sends(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (/*IB_MAD_SNOOP_POSTED_SENDS |
- IB_MAD_SNOOP_RMPP_SENDS |*/
- IB_MAD_SNOOP_SEND_COMPLETIONS /*|
- IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
-}
-
-static inline int is_snooping_recvs(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (IB_MAD_SNOOP_RECVS /*|
- IB_MAD_SNOOP_RMPP_RECVS*/));
-}
-
-static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
- struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_snoop_private **new_snoop_table;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- /* Check for empty slot in array. */
- for (i = 0; i < qp_info->snoop_table_size; i++)
- if (!qp_info->snoop_table[i])
- break;
-
- if (i == qp_info->snoop_table_size) {
- /* Grow table. */
- new_snoop_table = krealloc(qp_info->snoop_table,
- sizeof mad_snoop_priv *
- (qp_info->snoop_table_size + 1),
- GFP_ATOMIC);
- if (!new_snoop_table) {
- i = -ENOMEM;
- goto out;
- }
-
- qp_info->snoop_table = new_snoop_table;
- qp_info->snoop_table_size++;
- }
- qp_info->snoop_table[i] = mad_snoop_priv;
- atomic_inc(&qp_info->snoop_count);
-out:
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- return i;
-}
-
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- struct ib_mad_port_private *port_priv;
- struct ib_mad_agent *ret;
- struct ib_mad_snoop_private *mad_snoop_priv;
- int qpn;
-
- /* Validate parameters */
- if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
- (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- qpn = get_spl_qp_index(qp_type);
- if (qpn == -1) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- port_priv = ib_get_mad_port(device, port_num);
- if (!port_priv) {
- ret = ERR_PTR(-ENODEV);
- goto error1;
- }
- /* Allocate structures */
- mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
- if (!mad_snoop_priv) {
- ret = ERR_PTR(-ENOMEM);
- goto error1;
- }
-
- /* Now, fill in the various structures */
- mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
- mad_snoop_priv->agent.device = device;
- mad_snoop_priv->agent.recv_handler = recv_handler;
- mad_snoop_priv->agent.snoop_handler = snoop_handler;
- mad_snoop_priv->agent.context = context;
- mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
- mad_snoop_priv->agent.port_num = port_num;
- mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
- init_completion(&mad_snoop_priv->comp);
- mad_snoop_priv->snoop_index = register_snoop_agent(
- &port_priv->qp_info[qpn],
- mad_snoop_priv);
- if (mad_snoop_priv->snoop_index < 0) {
- ret = ERR_PTR(mad_snoop_priv->snoop_index);
- goto error2;
- }
-
- atomic_set(&mad_snoop_priv->refcount, 1);
- return &mad_snoop_priv->agent;
-
-error2:
- kfree(mad_snoop_priv);
-error1:
- return ret;
-}
-EXPORT_SYMBOL(ib_register_mad_snoop);
-
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
+ if (refcount_dec_and_test(&mad_agent_priv->refcount))
complete(&mad_agent_priv->comp);
}
-static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
- complete(&mad_snoop_priv->comp);
-}
-
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
struct ib_mad_port_private *port_priv;
- unsigned long flags;
/* Note that we could still be handling received MADs */
+ trace_ib_mad_unregister_agent(mad_agent_priv);
/*
* Canceling all sends results in dropping received response
@@ -569,59 +524,36 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
port_priv = mad_agent_priv->qp_info->port_priv;
cancel_delayed_work(&mad_agent_priv->timed_work);
- spin_lock_irqsave(&port_priv->reg_lock, flags);
+ spin_lock_irq(&port_priv->reg_lock);
remove_mad_reg_req(mad_agent_priv);
- list_del(&mad_agent_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+ spin_unlock_irq(&port_priv->reg_lock);
+ xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
flush_workqueue(port_priv->wq);
- ib_cancel_rmpp_recvs(mad_agent_priv);
deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp);
+ ib_cancel_rmpp_recvs(mad_agent_priv);
- kfree(mad_agent_priv->reg_req);
- kfree(mad_agent_priv);
-}
-
-static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_qp_info *qp_info;
- unsigned long flags;
-
- qp_info = mad_snoop_priv->qp_info;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
- atomic_dec(&qp_info->snoop_count);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-
- deref_snoop_agent(mad_snoop_priv);
- wait_for_completion(&mad_snoop_priv->comp);
+ ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
- kfree(mad_snoop_priv);
+ kfree(mad_agent_priv->reg_req);
+ kfree_rcu(mad_agent_priv, rcu);
}
/*
* ib_unregister_mad_agent - Unregisters a client from using MAD services
+ *
+ * Context: Process context.
*/
-int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
+void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
{
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_snoop_private *mad_snoop_priv;
-
- /* If the TID is zero, the agent can only snoop. */
- if (mad_agent->hi_tid) {
- mad_agent_priv = container_of(mad_agent,
- struct ib_mad_agent_private,
- agent);
- unregister_mad_agent(mad_agent_priv);
- } else {
- mad_snoop_priv = container_of(mad_agent,
- struct ib_mad_snoop_private,
- agent);
- unregister_mad_snoop(mad_snoop_priv);
- }
- return 0;
+
+ mad_agent_priv = container_of(mad_agent,
+ struct ib_mad_agent_private,
+ agent);
+ unregister_mad_agent(mad_agent_priv);
}
EXPORT_SYMBOL(ib_unregister_mad_agent);
@@ -630,7 +562,6 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
struct ib_mad_queue *mad_queue;
unsigned long flags;
- BUG_ON(!mad_list->mad_queue);
mad_queue = mad_list->mad_queue;
spin_lock_irqsave(&mad_queue->lock, flags);
list_del(&mad_list->list);
@@ -638,63 +569,11 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
spin_unlock_irqrestore(&mad_queue->lock, flags);
}
-static void snoop_send(struct ib_mad_qp_info *qp_info,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
- send_buf, mad_send_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
-static void snoop_recv(struct ib_mad_qp_info *qp_info,
- struct ib_mad_recv_wc *mad_recv_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
- mad_recv_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
-static void build_smp_wc(struct ib_qp *qp,
- u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
- struct ib_wc *wc)
+static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
+ u16 pkey_index, u32 port_num, struct ib_wc *wc)
{
memset(wc, 0, sizeof *wc);
- wc->wr_id = wr_id;
+ wc->wr_cqe = cqe;
wc->status = IB_WC_SUCCESS;
wc->opcode = IB_WC_RECV;
wc->pkey_index = pkey_index;
@@ -750,9 +629,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_port_private *port_priv;
struct ib_mad_agent_private *recv_mad_agent = NULL;
struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num;
+ u32 port_num;
struct ib_wc mad_wc;
- struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
+ struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
u16 out_mad_pkey_index = 0;
u16 drslid;
@@ -761,7 +640,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
if (rdma_cap_ib_switch(device) &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- port_num = send_wr->wr.ud.port_num;
+ port_num = send_wr->port_num;
else
port_num = mad_agent_priv->agent.port_num;
@@ -771,9 +650,11 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
* If we are at the start of the LID routed part, don't update the
* hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
*/
- if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
+ if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
u32 opa_drslid;
+ trace_ib_mad_handle_out_opa_smi(opa_smp);
+
if ((opa_get_smp_direction(opa_smp)
? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
OPA_LID_PERMISSIVE &&
@@ -799,6 +680,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
goto out;
} else {
+ trace_ib_mad_handle_out_ib_smi(smp);
+
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
IB_LID_PERMISSIVE &&
smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
@@ -818,7 +701,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local = kmalloc(sizeof *local, GFP_ATOMIC);
if (!local) {
ret = -ENOMEM;
- dev_err(&device->dev, "No memory for ib_mad_local_private\n");
goto out;
}
local->mad_priv = NULL;
@@ -826,15 +708,14 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
if (!mad_priv) {
ret = -ENOMEM;
- dev_err(&device->dev, "No memory for local response MAD\n");
kfree(local);
goto out;
}
build_smp_wc(mad_agent_priv->agent.qp,
- send_wr->wr_id, drslid,
- send_wr->wr.ud.pkey_index,
- send_wr->wr.ud.port_num, &mad_wc);
+ send_wr->wr.wr_cqe, drslid,
+ send_wr->pkey_index,
+ send_wr->port_num, &mad_wc);
if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
@@ -843,12 +724,11 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
}
/* No GRH for DR SMP */
- ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
- (const struct ib_mad_hdr *)smp, mad_size,
- (struct ib_mad_hdr *)mad_priv->mad,
- &mad_size, &out_mad_pkey_index);
- switch (ret)
- {
+ ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
+ (const struct ib_mad *)smp,
+ (struct ib_mad *)mad_priv->mad, &mad_size,
+ &out_mad_pkey_index);
+ switch (ret) {
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
mad_agent_priv->agent.recv_handler) {
@@ -858,7 +738,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
* Reference MAD agent until receive
* side of local completion handled
*/
- atomic_inc(&mad_agent_priv->refcount);
+ refcount_inc(&mad_agent_priv->refcount);
} else
kfree(mad_priv);
break;
@@ -894,11 +774,11 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local->mad_send_wr = mad_send_wr;
if (opa) {
- local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index;
+ local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
local->return_wc_byte_len = mad_size;
}
/* Reference MAD agent until send side of local completion handled */
- atomic_inc(&mad_agent_priv->refcount);
+ refcount_inc(&mad_agent_priv->refcount);
/* Queue local completion to local list */
spin_lock_irqsave(&mad_agent_priv->lock, flags);
list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
@@ -947,11 +827,8 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
/* Allocate data segments. */
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
- seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
+ seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
if (!seg) {
- dev_err(&send_buf->mad_agent->device->dev,
- "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
- sizeof (*seg) + seg_size, gfp_mask);
free_send_rmpp_list(send_wr);
return -ENOMEM;
}
@@ -980,12 +857,11 @@ int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
}
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
-struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
- u32 remote_qpn, u16 pkey_index,
- int rmpp_active,
- int hdr_len, int data_len,
- gfp_t gfp_mask,
- u8 base_version)
+struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
+ u32 remote_qpn, u16 pkey_index,
+ int rmpp_active, int hdr_len,
+ int data_len, gfp_t gfp_mask,
+ u8 base_version)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
@@ -1039,14 +915,16 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
- mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
- mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
- mad_send_wr->send_wr.num_sge = 2;
- mad_send_wr->send_wr.opcode = IB_WR_SEND;
- mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
- mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
- mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
- mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
+ mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
+
+ mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
+ mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
+ mad_send_wr->send_wr.wr.num_sge = 2;
+ mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
+ mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
+ mad_send_wr->send_wr.remote_qpn = remote_qpn;
+ mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
+ mad_send_wr->send_wr.pkey_index = pkey_index;
if (rmpp_active) {
ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
@@ -1057,7 +935,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
}
mad_send_wr->send_buf.mad_agent = mad_agent;
- atomic_inc(&mad_agent_priv->refcount);
+ refcount_inc(&mad_agent_priv->refcount);
return &mad_send_wr->send_buf;
}
EXPORT_SYMBOL(ib_create_send_mad);
@@ -1143,7 +1021,6 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_qp_info *qp_info;
struct list_head *list;
- struct ib_send_wr *bad_send_wr;
struct ib_mad_agent *mad_agent;
struct ib_sge *sge;
unsigned long flags;
@@ -1151,8 +1028,9 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
/* Set WR ID to find mad_send_wr upon completion */
qp_info = mad_send_wr->mad_agent_priv->qp_info;
- mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
+ mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
+ mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
mad_agent = mad_send_wr->send_buf.mad_agent;
sge = mad_send_wr->sg_list;
@@ -1179,8 +1057,9 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
- ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
- &bad_send_wr);
+ trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
+ ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
+ NULL);
list = &qp_info->send_queue.list;
} else {
ret = 0;
@@ -1203,6 +1082,180 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
return ret;
}
+static void handle_queued_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP) {
+ mad_agent_priv->sol_fc_wait_count--;
+ list_move_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->backlog_list);
+ } else {
+ expect_mad_state(mad_send_wr, IB_MAD_STATE_INIT);
+ list_add_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->backlog_list);
+ }
+}
+
+static void handle_send_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ if (mad_send_wr->state == IB_MAD_STATE_INIT) {
+ list_add_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->send_list);
+ } else {
+ expect_mad_state2(mad_send_wr, IB_MAD_STATE_WAIT_RESP,
+ IB_MAD_STATE_QUEUED);
+ list_move_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->send_list);
+ }
+
+ if (mad_send_wr->is_solicited_fc) {
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
+ mad_agent_priv->sol_fc_wait_count--;
+ mad_agent_priv->sol_fc_send_count++;
+ }
+}
+
+static void handle_wait_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ struct ib_mad_send_wr_private *temp_mad_send_wr;
+ struct list_head *list_item;
+ unsigned long delay;
+
+ expect_mad_state3(mad_send_wr, IB_MAD_STATE_SEND_START,
+ IB_MAD_STATE_WAIT_RESP, IB_MAD_STATE_CANCELED);
+ if (mad_send_wr->state == IB_MAD_STATE_SEND_START &&
+ mad_send_wr->is_solicited_fc) {
+ mad_agent_priv->sol_fc_send_count--;
+ mad_agent_priv->sol_fc_wait_count++;
+ }
+
+ list_del_init(&mad_send_wr->agent_list);
+ delay = mad_send_wr->timeout;
+ mad_send_wr->timeout += jiffies;
+
+ if (delay) {
+ list_for_each_prev(list_item,
+ &mad_agent_priv->wait_list) {
+ temp_mad_send_wr = list_entry(
+ list_item,
+ struct ib_mad_send_wr_private,
+ agent_list);
+ if (time_after(mad_send_wr->timeout,
+ temp_mad_send_wr->timeout))
+ break;
+ }
+ } else {
+ list_item = &mad_agent_priv->wait_list;
+ }
+
+ list_add(&mad_send_wr->agent_list, list_item);
+}
+
+static void handle_early_resp_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ expect_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
+ mad_agent_priv->sol_fc_send_count -= mad_send_wr->is_solicited_fc;
+}
+
+static void handle_canceled_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ not_expect_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ if (mad_send_wr->is_solicited_fc) {
+ if (mad_send_wr->state == IB_MAD_STATE_SEND_START)
+ mad_agent_priv->sol_fc_send_count--;
+ else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
+ mad_agent_priv->sol_fc_wait_count--;
+ }
+}
+
+static void handle_done_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ if (mad_send_wr->is_solicited_fc) {
+ if (mad_send_wr->state == IB_MAD_STATE_SEND_START)
+ mad_agent_priv->sol_fc_send_count--;
+ else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
+ mad_agent_priv->sol_fc_wait_count--;
+ }
+
+ list_del_init(&mad_send_wr->agent_list);
+}
+
+void change_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state new_state)
+{
+ struct ib_mad_agent_private *mad_agent_priv =
+ mad_send_wr->mad_agent_priv;
+
+ switch (new_state) {
+ case IB_MAD_STATE_INIT:
+ break;
+ case IB_MAD_STATE_QUEUED:
+ handle_queued_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_SEND_START:
+ handle_send_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_WAIT_RESP:
+ handle_wait_state(mad_send_wr, mad_agent_priv);
+ if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
+ return;
+ break;
+ case IB_MAD_STATE_EARLY_RESP:
+ handle_early_resp_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_CANCELED:
+ handle_canceled_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_DONE:
+ handle_done_state(mad_send_wr, mad_agent_priv);
+ break;
+ }
+
+ mad_send_wr->state = new_state;
+}
+
+static bool is_solicited_fc_mad(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_rmpp_mad *rmpp_mad;
+ u8 mgmt_class;
+
+ if (!mad_send_wr->timeout)
+ return 0;
+
+ rmpp_mad = mad_send_wr->send_buf.mad;
+ if (mad_send_wr->mad_agent_priv->agent.rmpp_version &&
+ (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE))
+ return 0;
+
+ mgmt_class =
+ ((struct ib_mad_hdr *)mad_send_wr->send_buf.mad)->mgmt_class;
+ return mgmt_class == IB_MGMT_CLASS_CM ||
+ mgmt_class == IB_MGMT_CLASS_SUBN_ADM ||
+ mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
+}
+
+static bool mad_is_for_backlog(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_mad_agent_private *mad_agent_priv =
+ mad_send_wr->mad_agent_priv;
+
+ if (!mad_send_wr->is_solicited_fc || !mad_agent_priv->sol_fc_max)
+ return false;
+
+ if (!list_empty(&mad_agent_priv->backlog_list))
+ return true;
+
+ return mad_agent_priv->sol_fc_send_count +
+ mad_agent_priv->sol_fc_wait_count >=
+ mad_agent_priv->sol_fc_max;
+}
+
/*
* ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
* with the registered client
@@ -1218,15 +1271,17 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
/* Walk list of send WRs and post each on send list */
for (; send_buf; send_buf = next_send_buf) {
-
mad_send_wr = container_of(send_buf,
struct ib_mad_send_wr_private,
send_buf);
mad_agent_priv = mad_send_wr->mad_agent_priv;
- if (!send_buf->mad_agent->send_handler ||
- (send_buf->timeout_ms &&
- !send_buf->mad_agent->recv_handler)) {
+ ret = ib_mad_enforce_security(mad_agent_priv,
+ mad_send_wr->send_wr.pkey_index);
+ if (ret)
+ goto error;
+
+ if (!send_buf->mad_agent->send_handler) {
ret = -EINVAL;
goto error;
}
@@ -1244,7 +1299,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
* request associated with the completion
*/
next_send_buf = send_buf->next;
- mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
+ mad_send_wr->send_wr.ah = send_buf->ah;
if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
@@ -1262,15 +1317,19 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
mad_send_wr->max_retries = send_buf->retries;
mad_send_wr->retries_left = send_buf->retries;
send_buf->retries = 0;
- /* Reference for work request to QP + response */
- mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
- mad_send_wr->status = IB_WC_SUCCESS;
+ change_mad_state(mad_send_wr, IB_MAD_STATE_INIT);
/* Reference MAD agent until send completes */
- atomic_inc(&mad_agent_priv->refcount);
+ refcount_inc(&mad_agent_priv->refcount);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_add_tail(&mad_send_wr->agent_list,
- &mad_agent_priv->send_list);
+ mad_send_wr->is_solicited_fc = is_solicited_fc_mad(mad_send_wr);
+ if (mad_is_for_backlog(mad_send_wr)) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ return 0;
+ }
+
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
@@ -1282,9 +1341,9 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
if (ret < 0) {
/* Fail send request */
spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_del(&mad_send_wr->agent_list);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- atomic_dec(&mad_agent_priv->refcount);
+ deref_mad_agent(mad_agent_priv);
goto error;
}
}
@@ -1324,25 +1383,6 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
}
EXPORT_SYMBOL(ib_free_recv_mad);
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- return ERR_PTR(-EINVAL); /* XXX: for now */
-}
-EXPORT_SYMBOL(ib_redirect_mad_qp);
-
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc)
-{
- dev_err(&mad_agent->device->dev,
- "ib_process_mad_wc() not implemented yet\n");
- return 0;
-}
-EXPORT_SYMBOL(ib_process_mad_wc);
-
static int method_in_use(struct ib_mad_mgmt_method_table **method,
struct ib_mad_reg_req *mad_reg_req)
{
@@ -1361,12 +1401,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{
/* Allocate management method table */
*method = kzalloc(sizeof **method, GFP_ATOMIC);
- if (!*method) {
- pr_err("No memory for ib_mad_mgmt_method_table\n");
- return -ENOMEM;
- }
-
- return 0;
+ return (*method) ? 0 : (-ENOMEM);
}
/*
@@ -1435,11 +1470,9 @@ static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
int i;
/* Remove any methods for this mad agent */
- for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
- if (method->agent[i] == agent) {
+ for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
+ if (method->agent[i] == agent)
method->agent[i] = NULL;
- }
- }
}
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
@@ -1457,8 +1490,6 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate management class table for "new" class version */
*class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_class_table\n");
ret = -ENOMEM;
goto error1;
}
@@ -1523,22 +1554,16 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
if (!*vendor_table) {
/* Allocate mgmt vendor class table for "new" class version */
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
- if (!vendor) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_vendor_class_table\n");
+ if (!vendor)
goto error1;
- }
*vendor_table = vendor;
}
if (!(*vendor_table)->vendor_class[vclass]) {
/* Allocate table for this management vendor class */
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
- if (!vendor_class) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_vendor_class\n");
+ if (!vendor_class)
goto error2;
- }
(*vendor_table)->vendor_class[vclass] = vendor_class;
}
@@ -1548,7 +1573,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req->oui, 3)) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(!*method);
+ if (!*method)
+ goto error3;
goto check_in_use;
}
}
@@ -1558,10 +1584,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
vclass]->oui[i])) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(*method);
/* Allocate method table for this OUI */
- if ((ret = allocate_method_table(method)))
- goto error3;
+ if (!*method) {
+ ret = allocate_method_table(method);
+ if (ret)
+ goto error3;
+ }
memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
mad_reg_req->oui, 3);
goto check_in_use;
@@ -1619,9 +1647,8 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
* Was MAD registration request supplied
* with original registration ?
*/
- if (!agent_priv->reg_req) {
+ if (!agent_priv->reg_req)
goto out;
- }
port_priv = agent_priv->qp_info->port_priv;
mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
@@ -1637,9 +1664,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
/* Now, check to see if there are any methods still in use */
if (!check_method_table(method)) {
/* If not, release management method table */
- kfree(method);
- class->method_table[mgmt_class] = NULL;
- /* Any management classes left ? */
+ kfree(method);
+ class->method_table[mgmt_class] = NULL;
+ /* Any management classes left ? */
if (!check_class_table(class)) {
/* If not, release management class table */
kfree(class);
@@ -1709,22 +1736,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
struct ib_mad_agent_private *mad_agent = NULL;
unsigned long flags;
- spin_lock_irqsave(&port_priv->reg_lock, flags);
if (ib_response_mad(mad_hdr)) {
u32 hi_tid;
- struct ib_mad_agent_private *entry;
/*
* Routing is based on high 32 bits of transaction ID
* of MAD.
*/
hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
- list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
- if (entry->agent.hi_tid == hi_tid) {
- mad_agent = entry;
- break;
- }
- }
+ rcu_read_lock();
+ mad_agent = xa_load(&ib_mad_clients, hi_tid);
+ if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
+ mad_agent = NULL;
+ rcu_read_unlock();
} else {
struct ib_mad_mgmt_class_table *class;
struct ib_mad_mgmt_method_table *method;
@@ -1733,6 +1757,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
const struct ib_vendor_mad *vendor_mad;
int index;
+ spin_lock_irqsave(&port_priv->reg_lock, flags);
/*
* Routing is based on version, class, and method
* For "newer" vendor MADs, also based on OUI
@@ -1745,7 +1770,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
if (!class)
goto out;
if (convert_mgmt_class(mad_hdr->mgmt_class) >=
- IB_MGMT_MAX_METHODS)
+ ARRAY_SIZE(class->method_table))
goto out;
method = class->method_table[convert_mgmt_class(
mad_hdr->mgmt_class)];
@@ -1772,20 +1797,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
~IB_MGMT_METHOD_RESP];
}
}
+ if (mad_agent)
+ refcount_inc(&mad_agent->refcount);
+out:
+ spin_unlock_irqrestore(&port_priv->reg_lock, flags);
}
- if (mad_agent) {
- if (mad_agent->agent.recv_handler)
- atomic_inc(&mad_agent->refcount);
- else {
- dev_notice(&port_priv->device->dev,
- "No receive handler for client %p on port %d\n",
- &mad_agent->agent, port_priv->port_num);
- mad_agent = NULL;
- }
+ if (mad_agent && !mad_agent->agent.recv_handler) {
+ dev_notice(&port_priv->device->dev,
+ "No receive handler for client %p on port %u\n",
+ &mad_agent->agent, port_priv->port_num);
+ deref_mad_agent(mad_agent);
+ mad_agent = NULL;
}
-out:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
return mad_agent;
}
@@ -1800,7 +1824,7 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr,
/* Make sure MAD base version is understood */
if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
(!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
- pr_err("MAD received with unsupported base version %d %s\n",
+ pr_err("MAD received with unsupported base version %u %s\n",
mad_hdr->base_version, opa ? "(opa)" : "");
goto out;
}
@@ -1811,6 +1835,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr,
if (qp_num == 0)
valid = 1;
} else {
+ /* CM attributes other than ClassPortInfo only use Send method */
+ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
+ (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
+ (mad_hdr->method != IB_MGMT_METHOD_SEND))
+ goto out;
/* Filter GSI packets sent to QP0 */
if (qp_num != 0)
valid = 1;
@@ -1840,16 +1869,18 @@ static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
rwc->recv_buf.mad->mad_hdr.mgmt_class;
}
-static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
- const struct ib_mad_send_wr_private *wr,
- const struct ib_mad_recv_wc *rwc )
+static inline int
+rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
+ const struct ib_mad_send_wr_private *wr,
+ const struct ib_mad_recv_wc *rwc)
{
- struct ib_ah_attr attr;
+ struct rdma_ah_attr attr;
u8 send_resp, rcv_resp;
union ib_gid sgid;
struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num = mad_agent_priv->agent.port_num;
+ u32 port_num = mad_agent_priv->agent.port_num;
u8 lmc;
+ bool has_grh;
send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
@@ -1858,36 +1889,40 @@ static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_
/* both requests, or both responses. GIDs different */
return 0;
- if (ib_query_ah(wr->send_buf.ah, &attr))
+ if (rdma_query_ah(wr->send_buf.ah, &attr))
/* Assume not equal, to avoid false positives. */
return 0;
- if (!!(attr.ah_flags & IB_AH_GRH) !=
- !!(rwc->wc->wc_flags & IB_WC_GRH))
+ has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
+ if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
/* one has GID, other does not. Assume different */
return 0;
if (!send_resp && rcv_resp) {
/* is request/response. */
- if (!(attr.ah_flags & IB_AH_GRH)) {
+ if (!has_grh) {
if (ib_get_cached_lmc(device, port_num, &lmc))
return 0;
- return (!lmc || !((attr.src_path_bits ^
+ return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
rwc->wc->dlid_path_bits) &
((1 << lmc) - 1)));
} else {
- if (ib_get_cached_gid(device, port_num,
- attr.grh.sgid_index, &sgid))
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&attr);
+
+ if (rdma_query_gid(device, port_num,
+ grh->sgid_index, &sgid))
return 0;
return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
16);
}
}
- if (!(attr.ah_flags & IB_AH_GRH))
- return attr.dlid == rwc->wc->slid;
+ if (!has_grh)
+ return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
else
- return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
+ return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
+ rwc->recv_buf.grh->sgid.raw,
16);
}
@@ -1914,7 +1949,19 @@ ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
*/
(is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc)))
- return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
+ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
+ }
+
+ list_for_each_entry(wr, &mad_agent_priv->backlog_list, agent_list) {
+ if ((wr->tid == mad_hdr->tid) &&
+ rcv_has_same_class(wr, wc) &&
+ /*
+ * Don't check GID for direct routed MADs.
+ * These might have permissive LIDs.
+ */
+ (is_direct(mad_hdr->mgmt_class) ||
+ rcv_has_same_gid(mad_agent_priv, wr, wc)))
+ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
}
/*
@@ -1933,17 +1980,55 @@ ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
(is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc)))
/* Verify request has not been canceled */
- return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
+ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
}
return NULL;
}
+static void
+process_backlog_mads(struct ib_mad_agent_private *mad_agent_priv)
+{
+ struct ib_mad_send_wr_private *mad_send_wr;
+ struct ib_mad_send_wc mad_send_wc = {};
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ while (!list_empty(&mad_agent_priv->backlog_list) &&
+ (mad_agent_priv->sol_fc_send_count +
+ mad_agent_priv->sol_fc_wait_count <
+ mad_agent_priv->sol_fc_max)) {
+ mad_send_wr = list_entry(mad_agent_priv->backlog_list.next,
+ struct ib_mad_send_wr_private,
+ agent_list);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ ret = ib_send_mad(mad_send_wr);
+ if (ret) {
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ deref_mad_agent(mad_agent_priv);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ mad_send_wc.send_buf = &mad_send_wr->send_buf;
+ mad_send_wc.status = IB_WC_LOC_QP_OP_ERR;
+ mad_agent_priv->agent.send_handler(
+ &mad_agent_priv->agent, &mad_send_wc);
+ }
+
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+}
+
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
{
mad_send_wr->timeout = 0;
- if (mad_send_wr->refcount == 1)
- list_move_tail(&mad_send_wr->agent_list,
- &mad_send_wr->mad_agent_priv->done_list);
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP ||
+ mad_send_wr->state == IB_MAD_STATE_QUEUED)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ else
+ change_mad_state(mad_send_wr, IB_MAD_STATE_EARLY_RESP);
}
static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
@@ -1952,8 +2037,18 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
+ bool is_mad_done;
+ int ret;
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
+ ret = ib_mad_enforce_security(mad_agent_priv,
+ mad_recv_wc->wc->pkey_index);
+ if (ret) {
+ ib_free_recv_mad(mad_recv_wc);
+ deref_mad_agent(mad_agent_priv);
+ return;
+ }
+
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
@@ -1977,34 +2072,40 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
/* user rmpp is in effect
* and this is an active RMPP MAD
*/
- mad_recv_wc->wc->wr_id = 0;
- mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
- mad_recv_wc);
- atomic_dec(&mad_agent_priv->refcount);
+ mad_agent_priv->agent.recv_handler(
+ &mad_agent_priv->agent, NULL,
+ mad_recv_wc);
+ deref_mad_agent(mad_agent_priv);
} else {
/* not user rmpp, revert to normal behavior and
- * drop the mad */
+ * drop the mad
+ */
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv);
return;
}
} else {
ib_mark_mad_done(mad_send_wr);
+ is_mad_done = (mad_send_wr->state == IB_MAD_STATE_DONE);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
/* Defined behavior is to complete response before request */
- mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
- mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
- mad_recv_wc);
- atomic_dec(&mad_agent_priv->refcount);
+ mad_agent_priv->agent.recv_handler(
+ &mad_agent_priv->agent,
+ &mad_send_wr->send_buf,
+ mad_recv_wc);
+ deref_mad_agent(mad_agent_priv);
- mad_send_wc.status = IB_WC_SUCCESS;
- mad_send_wc.vendor_err = 0;
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
+ if (is_mad_done) {
+ mad_send_wc.status = IB_WC_SUCCESS;
+ mad_send_wc.vendor_err = 0;
+ mad_send_wc.send_buf = &mad_send_wr->send_buf;
+ ib_mad_complete_send_wr(mad_send_wr,
+ &mad_send_wc);
+ }
}
} else {
- mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
+ mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
mad_recv_wc);
deref_mad_agent(mad_agent_priv);
}
@@ -2013,13 +2114,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
const struct ib_mad_qp_info *qp_info,
const struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response)
{
enum smi_forward_action retsmi;
struct ib_smp *smp = (struct ib_smp *)recv->mad;
+ trace_ib_mad_handle_ib_smi(smp);
+
if (smi_handle_dr_smp_recv(smp,
rdma_cap_ib_switch(port_priv->device),
port_num,
@@ -2098,13 +2201,15 @@ static enum smi_action
handle_opa_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_qp_info *qp_info,
struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response)
{
enum smi_forward_action retsmi;
struct opa_smp *smp = (struct opa_smp *)recv->mad;
+ trace_ib_mad_handle_opa_smi(smp);
+
if (opa_smi_handle_dr_smp_recv(smp,
rdma_cap_ib_switch(port_priv->device),
port_num,
@@ -2152,7 +2257,7 @@ static enum smi_action
handle_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_qp_info *qp_info,
struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response,
bool opa)
@@ -2160,28 +2265,39 @@ handle_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
- mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
+ mad_hdr->class_version == OPA_SM_CLASS_VERSION)
return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
response);
return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
}
-static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
- struct ib_wc *wc)
+static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct ib_mad_port_private *port_priv = cq->cq_context;
+ struct ib_mad_list_head *mad_list =
+ container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
struct ib_mad_qp_info *qp_info;
struct ib_mad_private_header *mad_priv_hdr;
struct ib_mad_private *recv, *response = NULL;
- struct ib_mad_list_head *mad_list;
struct ib_mad_agent_private *mad_agent;
- int port_num;
+ u32 port_num;
int ret = IB_MAD_RESULT_SUCCESS;
size_t mad_size;
u16 resp_mad_pkey_index = 0;
bool opa;
- mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
+ if (list_empty_careful(&port_priv->port_list))
+ return;
+
+ if (wc->status != IB_WC_SUCCESS) {
+ /*
+ * Receive errors indicate that the QP has entered the error
+ * state - error handling/shutdown code will cleanup
+ */
+ return;
+ }
+
qp_info = mad_list->mad_queue->qp_info;
dequeue_mad(mad_list);
@@ -2211,20 +2327,17 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
recv->header.recv_wc.recv_buf.grh = &recv->grh;
- if (atomic_read(&qp_info->snoop_count))
- snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
-
/* Validate MAD */
if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
goto out;
+ trace_ib_mad_recv_done_handler(qp_info, wc,
+ (struct ib_mad_hdr *)recv->mad);
+
mad_size = recv->mad_size;
response = alloc_mad_private(mad_size, GFP_KERNEL);
- if (!response) {
- dev_err(&port_priv->device->dev,
- "ib_mad_recv_done_handler no memory for response buffer\n");
+ if (!response)
goto out;
- }
if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
@@ -2240,14 +2353,12 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
}
/* Give driver "right of first refusal" on incoming MAD */
- if (port_priv->device->process_mad) {
- ret = port_priv->device->process_mad(port_priv->device, 0,
- port_priv->port_num,
- wc, &recv->grh,
- (const struct ib_mad_hdr *)recv->mad,
- recv->mad_size,
- (struct ib_mad_hdr *)response->mad,
- &mad_size, &resp_mad_pkey_index);
+ if (port_priv->device->ops.process_mad) {
+ ret = port_priv->device->ops.process_mad(
+ port_priv->device, 0, port_priv->port_num, wc,
+ &recv->grh, (const struct ib_mad *)recv->mad,
+ (struct ib_mad *)response->mad, &mad_size,
+ &resp_mad_pkey_index);
if (opa)
wc->pkey_index = resp_mad_pkey_index;
@@ -2269,6 +2380,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
if (mad_agent) {
+ trace_ib_mad_recv_done_agent(mad_agent);
ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
/*
* recv is freed up in error cases in ib_mad_complete_recv
@@ -2318,29 +2430,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *temp_mad_send_wr;
- struct list_head *list_item;
unsigned long delay;
mad_agent_priv = mad_send_wr->mad_agent_priv;
- list_del(&mad_send_wr->agent_list);
-
delay = mad_send_wr->timeout;
- mad_send_wr->timeout += jiffies;
-
- if (delay) {
- list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
- temp_mad_send_wr = list_entry(list_item,
- struct ib_mad_send_wr_private,
- agent_list);
- if (time_after(mad_send_wr->timeout,
- temp_mad_send_wr->timeout))
- break;
- }
- }
- else
- list_item = &mad_agent_priv->wait_list;
- list_add(&mad_send_wr->agent_list, list_item);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_WAIT_RESP);
/* Reschedule a work item if we have a shorter timeout */
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
@@ -2349,7 +2443,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
}
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms)
+ unsigned long timeout_ms)
{
mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
wait_for_response(mad_send_wr);
@@ -2374,32 +2468,28 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
} else
ret = IB_RMPP_RESULT_UNHANDLED;
- if (mad_send_wc->status != IB_WC_SUCCESS &&
- mad_send_wr->status == IB_WC_SUCCESS) {
- mad_send_wr->status = mad_send_wc->status;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
-
- if (--mad_send_wr->refcount > 0) {
- if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
- mad_send_wr->status == IB_WC_SUCCESS) {
- wait_for_response(mad_send_wr);
- }
+ if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
+ mad_send_wc->status = IB_WC_WR_FLUSH_ERR;
+ else if (mad_send_wr->state == IB_MAD_STATE_SEND_START &&
+ mad_send_wr->timeout) {
+ wait_for_response(mad_send_wr);
goto done;
}
/* Remove send from MAD agent and notify client of completion */
- list_del(&mad_send_wr->agent_list);
+ if (mad_send_wr->state != IB_MAD_STATE_DONE)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
adjust_timeout(mad_agent_priv);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- if (mad_send_wr->status != IB_WC_SUCCESS )
- mad_send_wc->status = mad_send_wr->status;
- if (ret == IB_RMPP_RESULT_INTERNAL)
+ if (ret == IB_RMPP_RESULT_INTERNAL) {
ib_rmpp_send_handler(mad_send_wc);
- else
+ } else {
+ if (mad_send_wr->is_solicited_fc)
+ process_backlog_mads(mad_agent_priv);
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
mad_send_wc);
+ }
/* Release reference on agent taken when sending */
deref_mad_agent(mad_agent_priv);
@@ -2408,24 +2498,34 @@ done:
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}
-static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
- struct ib_wc *wc)
+static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct ib_mad_port_private *port_priv = cq->cq_context;
+ struct ib_mad_list_head *mad_list =
+ container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
- struct ib_mad_list_head *mad_list;
struct ib_mad_qp_info *qp_info;
struct ib_mad_queue *send_queue;
- struct ib_send_wr *bad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
int ret;
- mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
+ if (list_empty_careful(&port_priv->port_list))
+ return;
+
+ if (wc->status != IB_WC_SUCCESS) {
+ if (!ib_mad_send_error(port_priv, wc))
+ return;
+ }
+
mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
mad_list);
send_queue = mad_list->mad_queue;
qp_info = send_queue->qp_info;
+ trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
+ trace_ib_mad_send_done_handler(mad_send_wr, wc);
+
retry:
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
mad_send_wr->header_mapping,
@@ -2451,14 +2551,12 @@ retry:
mad_send_wc.send_buf = &mad_send_wr->send_buf;
mad_send_wc.status = wc->status;
mad_send_wc.vendor_err = wc->vendor_err;
- if (atomic_read(&qp_info->snoop_count))
- snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
- IB_MAD_SNOOP_SEND_COMPLETIONS);
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
if (queued_send_wr) {
- ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
- &bad_send_wr);
+ trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
+ ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
+ NULL);
if (ret) {
dev_err(&port_priv->device->dev,
"ib_post_send failed: %d\n", ret);
@@ -2485,24 +2583,15 @@ static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
}
-static void mad_error_handler(struct ib_mad_port_private *port_priv,
- struct ib_wc *wc)
+static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
+ struct ib_wc *wc)
{
- struct ib_mad_list_head *mad_list;
- struct ib_mad_qp_info *qp_info;
+ struct ib_mad_list_head *mad_list =
+ container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
+ struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
struct ib_mad_send_wr_private *mad_send_wr;
int ret;
- /* Determine if failure was a send or receive */
- mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
- qp_info = mad_list->mad_queue->qp_info;
- if (mad_list->mad_queue == &qp_info->recv_queue)
- /*
- * Receive errors indicate that the QP has entered the error
- * state - error handling/shutdown code will cleanup
- */
- return;
-
/*
* Send errors will transition the QP to SQE - move
* QP to RTS and repost flushed work requests
@@ -2512,15 +2601,13 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
if (wc->status == IB_WC_WR_FLUSH_ERR) {
if (mad_send_wr->retry) {
/* Repost send */
- struct ib_send_wr *bad_send_wr;
-
mad_send_wr->retry = 0;
- ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
- &bad_send_wr);
- if (ret)
- ib_mad_send_done_handler(port_priv, wc);
- } else
- ib_mad_send_done_handler(port_priv, wc);
+ trace_ib_mad_error_handler(mad_send_wr, qp_info);
+ ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
+ NULL);
+ if (!ret)
+ return false;
+ }
} else {
struct ib_qp_attr *attr;
@@ -2534,41 +2621,31 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
kfree(attr);
if (ret)
dev_err(&port_priv->device->dev,
- "mad_error_handler - ib_modify_qp to RTS : %d\n",
- ret);
+ "%s - ib_modify_qp to RTS: %d\n",
+ __func__, ret);
else
mark_sends_for_retry(qp_info);
}
- ib_mad_send_done_handler(port_priv, wc);
}
+
+ return true;
}
-/*
- * IB MAD completion callback
- */
-static void ib_mad_completion_handler(struct work_struct *work)
+static void clear_mad_error_list(struct list_head *list,
+ enum ib_wc_status wc_status,
+ struct ib_mad_agent_private *mad_agent_priv)
{
- struct ib_mad_port_private *port_priv;
- struct ib_wc wc;
+ struct ib_mad_send_wr_private *mad_send_wr, *n;
+ struct ib_mad_send_wc mad_send_wc;
- port_priv = container_of(work, struct ib_mad_port_private, work);
- ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
+ mad_send_wc.status = wc_status;
+ mad_send_wc.vendor_err = 0;
- while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
- if (wc.status == IB_WC_SUCCESS) {
- switch (wc.opcode) {
- case IB_WC_SEND:
- ib_mad_send_done_handler(port_priv, &wc);
- break;
- case IB_WC_RECV:
- ib_mad_recv_done_handler(port_priv, &wc);
- break;
- default:
- BUG_ON(1);
- break;
- }
- } else
- mad_error_handler(port_priv, &wc);
+ list_for_each_entry_safe(mad_send_wr, n, list, agent_list) {
+ mad_send_wc.send_buf = &mad_send_wr->send_buf;
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ &mad_send_wc);
+ deref_mad_agent(mad_agent_priv);
}
}
@@ -2576,36 +2653,31 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
{
unsigned long flags;
struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
- struct ib_mad_send_wc mad_send_wc;
struct list_head cancel_list;
INIT_LIST_HEAD(&cancel_list);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
- &mad_agent_priv->send_list, agent_list) {
- if (mad_send_wr->status == IB_WC_SUCCESS) {
- mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
- }
+ &mad_agent_priv->send_list, agent_list)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED);
- /* Empty wait list to prevent receives from finding a request */
- list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-
- /* Report all cancelled requests */
- mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
- mad_send_wc.vendor_err = 0;
+ /* Empty wait & backlog list to prevent receives from finding request */
+ list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
+ &mad_agent_priv->wait_list, agent_list) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ list_add_tail(&mad_send_wr->agent_list, &cancel_list);
+ }
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
- &cancel_list, agent_list) {
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- list_del(&mad_send_wr->agent_list);
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
- atomic_dec(&mad_agent_priv->refcount);
+ &mad_agent_priv->backlog_list, agent_list) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ list_add_tail(&mad_send_wr->agent_list, &cancel_list);
}
+
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ /* Report all cancelled requests */
+ clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv);
}
static struct ib_mad_send_wr_private*
@@ -2627,31 +2699,40 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
&mad_send_wr->send_buf == send_buf)
return mad_send_wr;
}
+
+ list_for_each_entry(mad_send_wr, &mad_agent_priv->backlog_list,
+ agent_list) {
+ if (&mad_send_wr->send_buf == send_buf)
+ return mad_send_wr;
+ }
+
return NULL;
}
-int ib_modify_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf, u32 timeout_ms)
+int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
unsigned long flags;
int active;
- mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
- agent);
+ if (!send_buf)
+ return -EINVAL;
+
+ mad_agent_priv = container_of(send_buf->mad_agent,
+ struct ib_mad_agent_private, agent);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
- if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
+ if (!mad_send_wr || mad_send_wr->state == IB_MAD_STATE_CANCELED) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
return -EINVAL;
}
- active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
- if (!timeout_ms) {
- mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
+ active = ((mad_send_wr->state == IB_MAD_STATE_SEND_START) ||
+ (mad_send_wr->state == IB_MAD_STATE_EARLY_RESP) ||
+ (mad_send_wr->state == IB_MAD_STATE_QUEUED && timeout_ms));
+ if (!timeout_ms)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED);
mad_send_wr->send_buf.timeout_ms = timeout_ms;
if (active)
@@ -2664,13 +2745,6 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent,
}
EXPORT_SYMBOL(ib_modify_mad);
-void ib_cancel_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf)
-{
- ib_modify_mad(mad_agent, send_buf, 0);
-}
-EXPORT_SYMBOL(ib_cancel_mad);
-
static void local_completions(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
@@ -2711,9 +2785,9 @@ static void local_completions(struct work_struct *work)
* before request
*/
build_smp_wc(recv_mad_agent->agent.qp,
- (unsigned long) local->mad_send_wr,
+ local->mad_send_wr->send_wr.wr.wr_cqe,
be16_to_cpu(IB_LID_PERMISSIVE),
- local->mad_send_wr->send_wr.wr.ud.pkey_index,
+ local->mad_send_wr->send_wr.pkey_index,
recv_mad_agent->agent.port_num, &wc);
local->mad_priv->header.recv_wc.wc = &wc;
@@ -2733,15 +2807,12 @@ static void local_completions(struct work_struct *work)
local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
local->mad_priv->header.recv_wc.recv_buf.mad =
(struct ib_mad *)local->mad_priv->mad;
- if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
- snoop_recv(recv_mad_agent->qp_info,
- &local->mad_priv->header.recv_wc,
- IB_MAD_SNOOP_RECVS);
recv_mad_agent->agent.recv_handler(
&recv_mad_agent->agent,
+ &local->mad_send_wr->send_buf,
&local->mad_priv->header.recv_wc);
spin_lock_irqsave(&recv_mad_agent->lock, flags);
- atomic_dec(&recv_mad_agent->refcount);
+ deref_mad_agent(recv_mad_agent);
spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
}
@@ -2750,15 +2821,11 @@ local_send_completion:
mad_send_wc.status = IB_WC_SUCCESS;
mad_send_wc.vendor_err = 0;
mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
- snoop_send(mad_agent_priv->qp_info,
- &local->mad_send_wr->send_buf,
- &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
- atomic_dec(&mad_agent_priv->refcount);
+ deref_mad_agent(mad_agent_priv);
if (free_mad)
kfree(local->mad_priv);
kfree(local);
@@ -2777,6 +2844,11 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->send_buf.retries++;
mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
+ if (mad_send_wr->is_solicited_fc &&
+ !list_empty(&mad_send_wr->mad_agent_priv->backlog_list)) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED);
+ return 0;
+ }
if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
ret = ib_retry_rmpp(mad_send_wr);
@@ -2794,24 +2866,25 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
} else
ret = ib_send_mad(mad_send_wr);
- if (!ret) {
- mad_send_wr->refcount++;
- list_add_tail(&mad_send_wr->agent_list,
- &mad_send_wr->mad_agent_priv->send_list);
- }
+ if (!ret)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
+
return ret;
}
static void timeout_sends(struct work_struct *work)
{
- struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
- struct ib_mad_send_wc mad_send_wc;
+ struct ib_mad_agent_private *mad_agent_priv;
+ struct list_head timeout_list;
+ struct list_head cancel_list;
+ struct list_head *list_item;
unsigned long flags, delay;
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
timed_work.work);
- mad_send_wc.vendor_err = 0;
+ INIT_LIST_HEAD(&timeout_list);
+ INIT_LIST_HEAD(&cancel_list);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->wait_list)) {
@@ -2829,36 +2902,22 @@ static void timeout_sends(struct work_struct *work)
break;
}
- list_del(&mad_send_wr->agent_list);
- if (mad_send_wr->status == IB_WC_SUCCESS &&
- !retry_send(mad_send_wr))
- continue;
-
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-
- if (mad_send_wr->status == IB_WC_SUCCESS)
- mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
+ if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
+ list_item = &cancel_list;
+ else if (retry_send(mad_send_wr))
+ list_item = &timeout_list;
else
- mad_send_wc.status = mad_send_wr->status;
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
+ continue;
- atomic_dec(&mad_agent_priv->refcount);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ list_add_tail(&mad_send_wr->agent_list, list_item);
}
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-}
-static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
-{
- struct ib_mad_port_private *port_priv = cq->cq_context;
- unsigned long flags;
-
- spin_lock_irqsave(&ib_mad_port_list_lock, flags);
- if (!list_empty(&port_priv->port_list))
- queue_work(port_priv->wq, &port_priv->work);
- spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ process_backlog_mads(mad_agent_priv);
+ clear_mad_error_list(&timeout_list, IB_WC_RESP_TIMEOUT_ERR,
+ mad_agent_priv);
+ clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv);
}
/*
@@ -2868,11 +2927,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad)
{
unsigned long flags;
- int post, ret;
struct ib_mad_private *mad_priv;
struct ib_sge sg_list;
- struct ib_recv_wr recv_wr, *bad_recv_wr;
+ struct ib_recv_wr recv_wr;
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
+ int ret = 0;
/* Initialize common scatter list fields */
sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
@@ -2882,7 +2941,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
recv_wr.sg_list = &sg_list;
recv_wr.num_sge = 1;
- do {
+ while (true) {
/* Allocate and map receive buffer */
if (mad) {
mad_priv = mad;
@@ -2890,12 +2949,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
} else {
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC);
- if (!mad_priv) {
- dev_err(&qp_info->port_priv->device->dev,
- "No memory for receive buffer\n");
- ret = -ENOMEM;
- break;
- }
+ if (!mad_priv)
+ return -ENOMEM;
}
sg_list.length = mad_priv_dma_size(mad_priv);
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
@@ -2905,34 +2960,40 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
ret = -ENOMEM;
- break;
+ goto free_mad_priv;
}
mad_priv->header.mapping = sg_list.addr;
- recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
-
- /* Post receive WR */
+ mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
+ recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
spin_lock_irqsave(&recv_queue->lock, flags);
- post = (++recv_queue->count < recv_queue->max_active);
- list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
+ if (recv_queue->count >= recv_queue->max_active) {
+ /* Fully populated the receive queue */
+ spin_unlock_irqrestore(&recv_queue->lock, flags);
+ break;
+ }
+ recv_queue->count++;
+ list_add_tail(&mad_priv->header.mad_list.list,
+ &recv_queue->list);
spin_unlock_irqrestore(&recv_queue->lock, flags);
- ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
+
+ ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
if (ret) {
spin_lock_irqsave(&recv_queue->lock, flags);
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
- ib_dma_unmap_single(qp_info->port_priv->device,
- mad_priv->header.mapping,
- mad_priv_dma_size(mad_priv),
- DMA_FROM_DEVICE);
- kfree(mad_priv);
dev_err(&qp_info->port_priv->device->dev,
"ib_post_recv failed: %d\n", ret);
break;
}
- } while (post);
+ }
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ mad_priv->header.mapping,
+ mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE);
+free_mad_priv:
+ kfree(mad_priv);
return ret;
}
@@ -2982,11 +3043,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
u16 pkey_index;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
- if (!attr) {
- dev_err(&port_priv->device->dev,
- "Couldn't kmalloc ib_qp_attr\n");
+ if (!attr)
return -ENOMEM;
- }
ret = ib_find_pkey(port_priv->device, port_priv->port_num,
IB_DEFAULT_PKEY_FULL, &pkey_index);
@@ -3064,7 +3122,7 @@ static void qp_event_handler(struct ib_event *event, void *qp_context)
/* It's worse than that! He's dead, Jim! */
dev_err(&qp_info->port_priv->device->dev,
- "Fatal error (%d) on MAD QP (%d)\n",
+ "Fatal error (%d) on MAD QP (%u)\n",
event->event, qp_info->qp->qp_num);
}
@@ -3084,10 +3142,6 @@ static void init_mad_qp(struct ib_mad_port_private *port_priv,
init_mad_queue(qp_info, &qp_info->send_queue);
init_mad_queue(qp_info, &qp_info->recv_queue);
INIT_LIST_HEAD(&qp_info->overflow_list);
- spin_lock_init(&qp_info->snoop_lock);
- qp_info->snoop_table = NULL;
- qp_info->snoop_table_size = 0;
- atomic_set(&qp_info->snoop_count, 0);
}
static int create_mad_qp(struct ib_mad_qp_info *qp_info,
@@ -3131,7 +3185,6 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
return;
ib_destroy_qp(qp_info->qp);
- kfree(qp_info->snoop_table);
}
/*
@@ -3139,14 +3192,12 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
* Create the QP, PD, MR, and CQ if needed
*/
static int ib_mad_port_open(struct ib_device *device,
- int port_num)
+ u32 port_num)
{
int ret, cq_size;
struct ib_mad_port_private *port_priv;
unsigned long flags;
- char name[sizeof "ib_mad123"];
int has_smi;
- struct ib_cq_init_attr cq_attr = {};
if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
return -EFAULT;
@@ -3157,15 +3208,12 @@ static int ib_mad_port_open(struct ib_device *device,
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
- if (!port_priv) {
- dev_err(&device->dev, "No memory for ib_mad_port_private\n");
+ if (!port_priv)
return -ENOMEM;
- }
port_priv->device = device;
port_priv->port_num = port_num;
spin_lock_init(&port_priv->reg_lock);
- INIT_LIST_HEAD(&port_priv->agent_list);
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
@@ -3174,20 +3222,18 @@ static int ib_mad_port_open(struct ib_device *device,
if (has_smi)
cq_size *= 2;
- cq_attr.cqe = cq_size;
- port_priv->cq = ib_create_cq(port_priv->device,
- ib_mad_thread_completion_handler,
- NULL, port_priv, &cq_attr);
- if (IS_ERR(port_priv->cq)) {
- dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
- ret = PTR_ERR(port_priv->cq);
- goto error3;
- }
-
- port_priv->pd = ib_alloc_pd(device);
+ port_priv->pd = ib_alloc_pd(device, 0);
if (IS_ERR(port_priv->pd)) {
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
ret = PTR_ERR(port_priv->pd);
+ goto error3;
+ }
+
+ port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
+ IB_POLL_UNBOUND_WORKQUEUE);
+ if (IS_ERR(port_priv->cq)) {
+ dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
+ ret = PTR_ERR(port_priv->cq);
goto error4;
}
@@ -3196,17 +3242,19 @@ static int ib_mad_port_open(struct ib_device *device,
if (ret)
goto error6;
}
- ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
- if (ret)
- goto error7;
- snprintf(name, sizeof name, "ib_mad%d", port_num);
- port_priv->wq = create_singlethread_workqueue(name);
+ if (rdma_cap_ib_cm(device, port_num)) {
+ ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
+ if (ret)
+ goto error7;
+ }
+
+ port_priv->wq = alloc_ordered_workqueue("ib_mad%u", WQ_MEM_RECLAIM,
+ port_num);
if (!port_priv->wq) {
ret = -ENOMEM;
goto error8;
}
- INIT_WORK(&port_priv->work, ib_mad_completion_handler);
spin_lock_irqsave(&ib_mad_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_mad_port_list);
@@ -3231,11 +3279,11 @@ error8:
error7:
destroy_mad_qp(&port_priv->qp_info[0]);
error6:
- ib_dealloc_pd(port_priv->pd);
-error4:
- ib_destroy_cq(port_priv->cq);
+ ib_free_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+ ib_dealloc_pd(port_priv->pd);
error3:
kfree(port_priv);
@@ -3247,7 +3295,7 @@ error3:
* If there are no classes using the port, free the port
* resources (CQ, MR, PD, QP) and remove the port's info structure
*/
-static int ib_mad_port_close(struct ib_device *device, int port_num)
+static int ib_mad_port_close(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *port_priv;
unsigned long flags;
@@ -3256,7 +3304,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
port_priv = __ib_get_mad_port(device, port_num);
if (port_priv == NULL) {
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- dev_err(&device->dev, "Port %d not found\n", port_num);
+ dev_err(&device->dev, "Port %u not found\n", port_num);
return -ENODEV;
}
list_del_init(&port_priv->port_list);
@@ -3265,8 +3313,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
destroy_workqueue(port_priv->wq);
destroy_mad_qp(&port_priv->qp_info[1]);
destroy_mad_qp(&port_priv->qp_info[0]);
+ ib_free_cq(port_priv->cq);
ib_dealloc_pd(port_priv->pd);
- ib_destroy_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
/* XXX: Handle deallocation of MAD registration tables */
@@ -3276,9 +3324,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
return 0;
}
-static void ib_mad_init_device(struct ib_device *device)
+static int ib_mad_init_device(struct ib_device *device)
{
int start, i;
+ unsigned int count = 0;
+ int ret;
start = rdma_start_port(device);
@@ -3286,17 +3336,23 @@ static void ib_mad_init_device(struct ib_device *device)
if (!rdma_cap_ib_mad(device, i))
continue;
- if (ib_mad_port_open(device, i)) {
+ ret = ib_mad_port_open(device, i);
+ if (ret) {
dev_err(&device->dev, "Couldn't open port %d\n", i);
goto error;
}
- if (ib_agent_port_open(device, i)) {
+ ret = ib_agent_port_open(device, i);
+ if (ret) {
dev_err(&device->dev,
"Couldn't open port %d for agents\n", i);
goto error_agent;
}
+ count++;
}
- return;
+ if (!count)
+ return -EOPNOTSUPP;
+
+ return 0;
error_agent:
if (ib_mad_port_close(device, i))
@@ -3313,21 +3369,22 @@ error:
if (ib_mad_port_close(device, i))
dev_err(&device->dev, "Couldn't close port %d\n", i);
}
+ return ret;
}
static void ib_mad_remove_device(struct ib_device *device, void *client_data)
{
- int i;
+ unsigned int i;
- for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
+ rdma_for_each_port (device, i) {
if (!rdma_cap_ib_mad(device, i))
continue;
if (ib_agent_port_close(device, i))
dev_err(&device->dev,
- "Couldn't close port %d for agents\n", i);
+ "Couldn't close port %u for agents\n", i);
if (ib_mad_port_close(device, i))
- dev_err(&device->dev, "Couldn't close port %d\n", i);
+ dev_err(&device->dev, "Couldn't close port %u\n", i);
}
}
@@ -3337,7 +3394,7 @@ static struct ib_client mad_client = {
.remove = ib_mad_remove_device
};
-static int __init ib_mad_init_module(void)
+int ib_mad_init(void)
{
mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
@@ -3355,10 +3412,7 @@ static int __init ib_mad_init_module(void)
return 0;
}
-static void __exit ib_mad_cleanup_module(void)
+void ib_mad_cleanup(void)
{
ib_unregister_client(&mad_client);
}
-
-module_init(ib_mad_init_module);
-module_exit(ib_mad_cleanup_module);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 4a4f7aad0978..f444357d33f4 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -64,6 +64,7 @@
struct ib_mad_list_head {
struct list_head list;
+ struct ib_cqe cqe;
struct ib_mad_queue *mad_queue;
};
@@ -72,39 +73,44 @@ struct ib_mad_private_header {
struct ib_mad_recv_wc recv_wc;
struct ib_wc wc;
u64 mapping;
-} __attribute__ ((packed));
+} __packed;
struct ib_mad_private {
struct ib_mad_private_header header;
size_t mad_size;
struct ib_grh grh;
- u8 mad[0];
-} __attribute__ ((packed));
+ u8 mad[];
+} __packed;
struct ib_rmpp_segment {
struct list_head list;
u32 num;
- u8 data[0];
+ u8 data[];
};
struct ib_mad_agent_private {
- struct list_head agent_list;
struct ib_mad_agent agent;
struct ib_mad_reg_req *reg_req;
struct ib_mad_qp_info *qp_info;
spinlock_t lock;
struct list_head send_list;
+ unsigned int sol_fc_send_count;
struct list_head wait_list;
- struct list_head done_list;
+ unsigned int sol_fc_wait_count;
struct delayed_work timed_work;
unsigned long timeout;
struct list_head local_list;
struct work_struct local_work;
struct list_head rmpp_list;
-
- atomic_t refcount;
- struct completion comp;
+ unsigned int sol_fc_max;
+ struct list_head backlog_list;
+
+ refcount_t refcount;
+ union {
+ struct completion comp;
+ struct rcu_head rcu;
+ };
};
struct ib_mad_snoop_private {
@@ -112,10 +118,35 @@ struct ib_mad_snoop_private {
struct ib_mad_qp_info *qp_info;
int snoop_index;
int mad_snoop_flags;
- atomic_t refcount;
struct completion comp;
};
+enum ib_mad_state {
+ /* MAD is in the making and is not yet in any list */
+ IB_MAD_STATE_INIT,
+ /* MAD is in backlog list */
+ IB_MAD_STATE_QUEUED,
+ /*
+ * MAD was sent to the QP and is waiting for completion
+ * notification in send list.
+ */
+ IB_MAD_STATE_SEND_START,
+ /*
+ * MAD send completed successfully, waiting for a response
+ * in wait list.
+ */
+ IB_MAD_STATE_WAIT_RESP,
+ /*
+ * Response came early, before send completion notification,
+ * in send list.
+ */
+ IB_MAD_STATE_EARLY_RESP,
+ /* MAD was canceled while in wait or send list */
+ IB_MAD_STATE_CANCELED,
+ /* MAD processing completed, MAD in no list */
+ IB_MAD_STATE_DONE
+};
+
struct ib_mad_send_wr_private {
struct ib_mad_list_head mad_list;
struct list_head agent_list;
@@ -123,15 +154,13 @@ struct ib_mad_send_wr_private {
struct ib_mad_send_buf send_buf;
u64 header_mapping;
u64 payload_mapping;
- struct ib_send_wr send_wr;
+ struct ib_ud_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
__be64 tid;
unsigned long timeout;
int max_retries;
int retries_left;
int retry;
- int refcount;
- enum ib_wc_status status;
/* RMPP control */
struct list_head rmpp_list;
@@ -141,8 +170,48 @@ struct ib_mad_send_wr_private {
int seg_num;
int newwin;
int pad;
+
+ enum ib_mad_state state;
+
+ /* Solicited MAD flow control */
+ bool is_solicited_fc;
};
+static inline void expect_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state expected_state)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state != expected_state);
+}
+
+static inline void expect_mad_state2(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state expected_state1,
+ enum ib_mad_state expected_state2)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state != expected_state1 &&
+ mad_send_wr->state != expected_state2);
+}
+
+static inline void expect_mad_state3(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state expected_state1,
+ enum ib_mad_state expected_state2,
+ enum ib_mad_state expected_state3)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state != expected_state1 &&
+ mad_send_wr->state != expected_state2 &&
+ mad_send_wr->state != expected_state3);
+}
+
+static inline void
+not_expect_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state wrong_state)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state == wrong_state);
+}
+
struct ib_mad_local_private {
struct list_head completion_list;
struct ib_mad_private *mad_priv;
@@ -202,9 +271,7 @@ struct ib_mad_port_private {
spinlock_t reg_lock;
struct ib_mad_mgmt_version_table version[MAX_MGMT_VERSION];
- struct list_head agent_list;
struct workqueue_struct *wq;
- struct work_struct work;
struct ib_mad_qp_info qp_info[IB_MAD_QPS_CORE];
};
@@ -220,6 +287,9 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms);
+ unsigned long timeout_ms);
+
+void change_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state new_state);
#endif /* __IB_MAD_PRIV_H__ */
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 382941b46e43..1c5e0eaf1c94 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -40,8 +40,7 @@
enum rmpp_state {
RMPP_STATE_ACTIVE,
RMPP_STATE_TIMEOUT,
- RMPP_STATE_COMPLETE,
- RMPP_STATE_CANCELING
+ RMPP_STATE_COMPLETE
};
struct mad_rmpp_recv {
@@ -52,7 +51,7 @@ struct mad_rmpp_recv {
struct completion comp;
enum rmpp_state state;
spinlock_t lock;
- atomic_t refcount;
+ refcount_t refcount;
struct ib_ah *ah;
struct ib_mad_recv_wc *rmpp_wc;
@@ -64,7 +63,7 @@ struct mad_rmpp_recv {
__be64 tid;
u32 src_qp;
- u16 slid;
+ u32 slid;
u8 mgmt_class;
u8 class_version;
u8 method;
@@ -73,7 +72,7 @@ struct mad_rmpp_recv {
static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
{
- if (atomic_dec_and_test(&rmpp_recv->refcount))
+ if (refcount_dec_and_test(&rmpp_recv->refcount))
complete(&rmpp_recv->comp);
}
@@ -81,7 +80,7 @@ static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
{
deref_rmpp_recv(rmpp_recv);
wait_for_completion(&rmpp_recv->comp);
- ib_destroy_ah(rmpp_recv->ah);
+ rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE);
kfree(rmpp_recv);
}
@@ -92,22 +91,18 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
spin_lock_irqsave(&agent->lock, flags);
list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
- if (rmpp_recv->state != RMPP_STATE_COMPLETE)
- ib_free_recv_mad(rmpp_recv->rmpp_wc);
- rmpp_recv->state = RMPP_STATE_CANCELING;
- }
- spin_unlock_irqrestore(&agent->lock, flags);
-
- list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
cancel_delayed_work(&rmpp_recv->timeout_work);
cancel_delayed_work(&rmpp_recv->cleanup_work);
}
+ spin_unlock_irqrestore(&agent->lock, flags);
flush_workqueue(agent->qp_info->port_priv->wq);
list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
&agent->rmpp_list, list) {
list_del(&rmpp_recv->list);
+ if (rmpp_recv->state != RMPP_STATE_COMPLETE)
+ ib_free_recv_mad(rmpp_recv->rmpp_wc);
destroy_rmpp_recv(rmpp_recv);
}
}
@@ -163,7 +158,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
- return (void *) ah;
+ return ERR_CAST(ah);
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
@@ -171,7 +166,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
hdr_len, 0, GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(msg))
- ib_destroy_ah(ah);
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
else {
msg->ah = ah;
msg->context[0] = ah;
@@ -201,7 +196,7 @@ static void ack_ds_ack(struct ib_mad_agent_private *agent,
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- ib_destroy_ah(msg->ah);
+ rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
ib_free_send_mad(msg);
}
}
@@ -209,7 +204,8 @@ static void ack_ds_ack(struct ib_mad_agent_private *agent,
void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{
if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
- ib_destroy_ah(mad_send_wc->send_buf->ah);
+ rdma_destroy_ah(mad_send_wc->send_buf->ah,
+ RDMA_DESTROY_AH_SLEEPABLE);
ib_free_send_mad(mad_send_wc->send_buf);
}
@@ -237,7 +233,7 @@ static void nack_recv(struct ib_mad_agent_private *agent,
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- ib_destroy_ah(msg->ah);
+ rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
ib_free_send_mad(msg);
}
}
@@ -271,10 +267,6 @@ static void recv_cleanup_handler(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
- if (rmpp_recv->state == RMPP_STATE_CANCELING) {
- spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
- return;
- }
list_del(&rmpp_recv->list);
spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
destroy_rmpp_recv(rmpp_recv);
@@ -304,7 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
spin_lock_init(&rmpp_recv->lock);
rmpp_recv->state = RMPP_STATE_ACTIVE;
- atomic_set(&rmpp_recv->refcount, 1);
+ refcount_set(&rmpp_recv->refcount, 1);
rmpp_recv->rmpp_wc = mad_recv_wc;
rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
@@ -356,7 +348,7 @@ acquire_rmpp_recv(struct ib_mad_agent_private *agent,
spin_lock_irqsave(&agent->lock, flags);
rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
if (rmpp_recv)
- atomic_inc(&rmpp_recv->refcount);
+ refcount_inc(&rmpp_recv->refcount);
spin_unlock_irqrestore(&agent->lock, flags);
return rmpp_recv;
}
@@ -390,8 +382,8 @@ static inline int get_seg_num(struct ib_mad_recv_buf *seg)
return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
}
-static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
- struct ib_mad_recv_buf *seg)
+static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list,
+ struct ib_mad_recv_buf *seg)
{
if (seg->list.next == rmpp_list)
return NULL;
@@ -404,8 +396,8 @@ static inline int window_size(struct ib_mad_agent_private *agent)
return max(agent->qp_info->recv_queue.max_active >> 3, 1);
}
-static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
- int seg_num)
+static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list,
+ int seg_num)
{
struct ib_mad_recv_buf *seg_buf;
int cur_seg_num;
@@ -457,7 +449,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
return hdr_size + rmpp_recv->seg_num * data_size - pad;
}
-static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
+static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
{
struct ib_mad_recv_wc *rmpp_wc;
@@ -552,7 +544,7 @@ start_rmpp(struct ib_mad_agent_private *agent,
destroy_rmpp_recv(rmpp_recv);
return continue_rmpp(agent, mad_recv_wc);
}
- atomic_inc(&rmpp_recv->refcount);
+ refcount_inc(&rmpp_recv->refcount);
if (get_last_flag(&mad_recv_wc->recv_buf)) {
rmpp_recv->state = RMPP_STATE_COMPLETE;
@@ -616,16 +608,20 @@ static void abort_send(struct ib_mad_agent_private *agent,
goto out; /* Unmatched send */
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
- (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
+ (!mad_send_wr->timeout) ||
+ (mad_send_wr->state == IB_MAD_STATE_CANCELED))
goto out; /* Send is already done */
ib_mark_mad_done(mad_send_wr);
+ if (mad_send_wr->state == IB_MAD_STATE_DONE) {
+ spin_unlock_irqrestore(&agent->lock, flags);
+ wc.status = IB_WC_REM_ABORT_ERR;
+ wc.vendor_err = rmpp_status;
+ wc.send_buf = &mad_send_wr->send_buf;
+ ib_mad_complete_send_wr(mad_send_wr, &wc);
+ return;
+ }
spin_unlock_irqrestore(&agent->lock, flags);
-
- wc.status = IB_WC_REM_ABORT_ERR;
- wc.vendor_err = rmpp_status;
- wc.send_buf = &mad_send_wr->send_buf;
- ib_mad_complete_send_wr(mad_send_wr, &wc);
return;
out:
spin_unlock_irqrestore(&agent->lock, flags);
@@ -692,7 +688,8 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
}
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
- (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
+ (!mad_send_wr->timeout) ||
+ (mad_send_wr->state == IB_MAD_STATE_CANCELED))
goto out; /* Send is already done */
if (seg_num > mad_send_wr->send_buf.seg_count ||
@@ -717,21 +714,24 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
struct ib_mad_send_wc wc;
ib_mark_mad_done(mad_send_wr);
+ if (mad_send_wr->state == IB_MAD_STATE_DONE) {
+ spin_unlock_irqrestore(&agent->lock, flags);
+ wc.status = IB_WC_SUCCESS;
+ wc.vendor_err = 0;
+ wc.send_buf = &mad_send_wr->send_buf;
+ ib_mad_complete_send_wr(mad_send_wr, &wc);
+ return;
+ }
spin_unlock_irqrestore(&agent->lock, flags);
-
- wc.status = IB_WC_SUCCESS;
- wc.vendor_err = 0;
- wc.send_buf = &mad_send_wr->send_buf;
- ib_mad_complete_send_wr(mad_send_wr, &wc);
return;
}
- if (mad_send_wr->refcount == 1)
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
ib_reset_mad_timeout(mad_send_wr,
mad_send_wr->send_buf.timeout_ms);
spin_unlock_irqrestore(&agent->lock, flags);
ack_ds_ack(agent, mad_recv_wc);
return;
- } else if (mad_send_wr->refcount == 1 &&
+ } else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP &&
mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
/* Send failure will just result in a timeout/retry */
@@ -739,7 +739,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (ret)
goto out;
- mad_send_wr->refcount++;
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
list_move_tail(&mad_send_wr->agent_list,
&mad_send_wr->mad_agent_priv->send_list);
}
@@ -852,7 +852,7 @@ static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
struct mad_rmpp_recv *rmpp_recv;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
int newwin = 1;
@@ -867,10 +867,10 @@ static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
(rmpp_recv->method & IB_MGMT_METHOD_RESP))
continue;
- if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
+ if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
continue;
- if (rmpp_recv->slid == ah_attr.dlid) {
+ if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) {
newwin = rmpp_recv->repwin;
break;
}
@@ -898,7 +898,6 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->newwin = init_newwin(mad_send_wr);
/* We need to wait for the final ACK even if there isn't a response */
- mad_send_wr->refcount += (mad_send_wr->timeout == 0);
ret = send_next_seg(mad_send_wr);
if (!ret)
return IB_RMPP_RESULT_CONSUMED;
@@ -920,7 +919,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
if (mad_send_wc->status != IB_WC_SUCCESS ||
- mad_send_wr->status != IB_WC_SUCCESS)
+ mad_send_wr->state == IB_MAD_STATE_CANCELED)
return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
if (!mad_send_wr->timeout)
diff --git a/drivers/infiniband/core/mr_pool.c b/drivers/infiniband/core/mr_pool.c
new file mode 100644
index 000000000000..c0e2df128b34
--- /dev/null
+++ b/drivers/infiniband/core/mr_pool.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ */
+#include <rdma/ib_verbs.h>
+#include <rdma/mr_pool.h>
+
+struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list)
+{
+ struct ib_mr *mr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ mr = list_first_entry_or_null(list, struct ib_mr, qp_entry);
+ if (mr) {
+ list_del(&mr->qp_entry);
+ qp->mrs_used++;
+ }
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+
+ return mr;
+}
+EXPORT_SYMBOL(ib_mr_pool_get);
+
+void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ list_add(&mr->qp_entry, list);
+ qp->mrs_used--;
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+}
+EXPORT_SYMBOL(ib_mr_pool_put);
+
+int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
+ enum ib_mr_type type, u32 max_num_sg, u32 max_num_meta_sg)
+{
+ struct ib_mr *mr;
+ unsigned long flags;
+ int ret, i;
+
+ for (i = 0; i < nr; i++) {
+ if (type == IB_MR_TYPE_INTEGRITY)
+ mr = ib_alloc_mr_integrity(qp->pd, max_num_sg,
+ max_num_meta_sg);
+ else
+ mr = ib_alloc_mr(qp->pd, type, max_num_sg);
+ if (IS_ERR(mr)) {
+ ret = PTR_ERR(mr);
+ goto out;
+ }
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ list_add_tail(&mr->qp_entry, list);
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+ }
+
+ return 0;
+out:
+ ib_mr_pool_destroy(qp, list);
+ return ret;
+}
+EXPORT_SYMBOL(ib_mr_pool_init);
+
+void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list)
+{
+ struct ib_mr *mr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ while (!list_empty(list)) {
+ mr = list_first_entry(list, struct ib_mr, qp_entry);
+ list_del(&mr->qp_entry);
+
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+ ib_dereg_mr(mr);
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ }
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+}
+EXPORT_SYMBOL(ib_mr_pool_destroy);
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index d38d8b2b2979..a236532a9026 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -42,7 +42,7 @@
#include <rdma/ib_cache.h>
#include "sa.h"
-static void mcast_add_one(struct ib_device *device);
+static int mcast_add_one(struct ib_device *device);
static void mcast_remove_one(struct ib_device *device, void *client_data);
static struct ib_client mcast_client = {
@@ -61,9 +61,9 @@ struct mcast_port {
struct mcast_device *dev;
spinlock_t lock;
struct rb_root table;
- atomic_t refcount;
+ refcount_t refcount;
struct completion comp;
- u8 port_num;
+ u32 port_num;
};
struct mcast_device {
@@ -71,7 +71,7 @@ struct mcast_device {
struct ib_event_handler event_handler;
int start_port;
int end_port;
- struct mcast_port port[0];
+ struct mcast_port port[];
};
enum mcast_state {
@@ -102,11 +102,10 @@ struct mcast_group {
struct list_head pending_list;
struct list_head active_list;
struct mcast_member *last_join;
- int members[3];
+ int members[NUM_JOIN_MEMBERSHIP_TYPES];
atomic_t refcount;
enum mcast_group_state state;
struct ib_sa_query *query;
- int query_id;
u16 pkey_index;
u8 leave_state;
int retries;
@@ -118,7 +117,7 @@ struct mcast_member {
struct mcast_group *group;
struct list_head list;
enum mcast_state state;
- atomic_t refcount;
+ refcount_t refcount;
struct completion comp;
};
@@ -179,7 +178,7 @@ static struct mcast_group *mcast_insert(struct mcast_port *port,
static void deref_port(struct mcast_port *port)
{
- if (atomic_dec_and_test(&port->refcount))
+ if (refcount_dec_and_test(&port->refcount))
complete(&port->comp);
}
@@ -200,7 +199,7 @@ static void release_group(struct mcast_group *group)
static void deref_member(struct mcast_member *member)
{
- if (atomic_dec_and_test(&member->refcount))
+ if (refcount_dec_and_test(&member->refcount))
complete(&member->comp);
}
@@ -220,8 +219,9 @@ static void queue_join(struct mcast_member *member)
}
/*
- * A multicast group has three types of members: full member, non member, and
- * send only member. We need to keep track of the number of members of each
+ * A multicast group has four types of members: full member, non member,
+ * sendonly non member and sendonly full member.
+ * We need to keep track of the number of members of each
* type based on their join state. Adjust the number of members the belong to
* the specified join states.
*/
@@ -229,7 +229,7 @@ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
{
int i;
- for (i = 0; i < 3; i++, join_state >>= 1)
+ for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1)
if (join_state & 0x1)
group->members[i] += inc;
}
@@ -245,7 +245,7 @@ static u8 get_leave_state(struct mcast_group *group)
u8 leave_state = 0;
int i;
- for (i = 0; i < 3; i++)
+ for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++)
if (!group->members[i])
leave_state |= (0x1 << i);
@@ -339,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
member->multicast.comp_mask,
3000, GFP_KERNEL, join_handler, group,
&group->query);
- if (ret >= 0) {
- group->query_id = ret;
- ret = 0;
- }
- return ret;
+ return (ret > 0) ? 0 : ret;
}
static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -363,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
IB_SA_MCMEMBER_REC_JOIN_STATE,
3000, GFP_KERNEL, leave_handler,
group, &group->query);
- if (ret >= 0) {
- group->query_id = ret;
- ret = 0;
- }
- return ret;
+ return (ret > 0) ? 0 : ret;
}
static void join_group(struct mcast_group *group, struct mcast_member *member,
@@ -409,7 +401,7 @@ static void process_group_error(struct mcast_group *group)
while (!list_empty(&group->active_list)) {
member = list_entry(group->active_list.next,
struct mcast_member, list);
- atomic_inc(&member->refcount);
+ refcount_inc(&member->refcount);
list_del_init(&member->list);
adjust_membership(group, member->multicast.rec.join_state, -1);
member->state = MCAST_ERROR;
@@ -453,7 +445,7 @@ retest:
struct mcast_member, list);
multicast = &member->multicast;
join_state = multicast->rec.join_state;
- atomic_inc(&member->refcount);
+ refcount_inc(&member->refcount);
if (join_state == (group->rec.join_state & join_state)) {
status = cmp_rec(&group->rec, &multicast->rec,
@@ -505,7 +497,7 @@ static void process_join_error(struct mcast_group *group, int status)
member = list_entry(group->pending_list.next,
struct mcast_member, list);
if (group->last_join == member) {
- atomic_inc(&member->refcount);
+ refcount_inc(&member->refcount);
list_del_init(&member->list);
spin_unlock_irq(&group->lock);
ret = member->multicast.callback(status, &member->multicast);
@@ -526,8 +518,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
process_join_error(group, status);
else {
int mgids_changed, is_mgid0;
- ib_find_pkey(group->port->dev->device, group->port->port_num,
- be16_to_cpu(rec->pkey), &pkey_index);
+
+ if (ib_find_pkey(group->port->dev->device,
+ group->port->port_num, be16_to_cpu(rec->pkey),
+ &pkey_index))
+ pkey_index = MCAST_INVALID_PKEY_INDEX;
spin_lock_irq(&group->port->lock);
if (group->state == MCAST_BUSY &&
@@ -594,7 +589,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
kfree(group);
group = cur_group;
} else
- atomic_inc(&port->refcount);
+ refcount_inc(&port->refcount);
found:
atomic_inc(&group->refcount);
spin_unlock_irqrestore(&port->lock, flags);
@@ -610,7 +605,7 @@ found:
*/
struct ib_sa_multicast *
ib_sa_join_multicast(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
int (*callback)(int status,
@@ -637,7 +632,7 @@ ib_sa_join_multicast(struct ib_sa_client *client,
member->multicast.callback = callback;
member->multicast.context = context;
init_completion(&member->comp);
- atomic_set(&member->refcount, 1);
+ refcount_set(&member->refcount, 1);
member->state = MCAST_JOINING;
member->group = acquire_group(&dev->port[port_num - dev->start_port],
@@ -695,7 +690,7 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
}
EXPORT_SYMBOL(ib_sa_free_multicast);
-int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
+int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num,
union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
{
struct mcast_device *dev;
@@ -721,32 +716,54 @@ int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
}
EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
-int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+/**
+ * ib_init_ah_from_mcmember - Initialize AH attribute from multicast
+ * member record and gid of the device.
+ * @device: RDMA device
+ * @port_num: Port of the rdma device to consider
+ * @rec: Multicast member record to use
+ * @ndev: Optional netdevice, applicable only for RoCE
+ * @gid_type: GID type to consider
+ * @ah_attr: AH attribute to fillup on successful completion
+ *
+ * ib_init_ah_from_mcmember() initializes AH attribute based on multicast
+ * member record and other device properties. On success the caller is
+ * responsible to call rdma_destroy_ah_attr on the ah_attr. Returns 0 on
+ * success or appropriate error code.
+ *
+ */
+int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
- struct ib_ah_attr *ah_attr)
+ struct net_device *ndev,
+ enum ib_gid_type gid_type,
+ struct rdma_ah_attr *ah_attr)
{
- int ret;
- u16 gid_index;
- u8 p;
-
- ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
- if (ret)
- return ret;
-
- memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->dlid = be16_to_cpu(rec->mlid);
- ah_attr->sl = rec->sl;
- ah_attr->port_num = port_num;
- ah_attr->static_rate = rec->rate;
-
- ah_attr->ah_flags = IB_AH_GRH;
- ah_attr->grh.dgid = rec->mgid;
+ const struct ib_gid_attr *sgid_attr;
- ah_attr->grh.sgid_index = (u8) gid_index;
- ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
- ah_attr->grh.hop_limit = rec->hop_limit;
- ah_attr->grh.traffic_class = rec->traffic_class;
+ /* GID table is not based on the netdevice for IB link layer,
+ * so ignore ndev during search.
+ */
+ if (rdma_protocol_ib(device, port_num))
+ ndev = NULL;
+ else if (!rdma_protocol_roce(device, port_num))
+ return -EINVAL;
+ sgid_attr = rdma_find_gid_by_port(device, &rec->port_gid,
+ gid_type, port_num, ndev);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = rdma_ah_find_type(device, port_num);
+
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(rec->mlid));
+ rdma_ah_set_sl(ah_attr, rec->sl);
+ rdma_ah_set_port_num(ah_attr, port_num);
+ rdma_ah_set_static_rate(ah_attr, rec->rate);
+ rdma_move_grh_sgid_attr(ah_attr, &rec->mgid,
+ be32_to_cpu(rec->flow_label),
+ rec->hop_limit, rec->traffic_class,
+ sgid_attr);
return 0;
}
EXPORT_SYMBOL(ib_init_ah_from_mcmember);
@@ -788,7 +805,6 @@ static void mcast_event_handler(struct ib_event_handler *handler,
switch (event->event) {
case IB_EVENT_PORT_ERR:
case IB_EVENT_LID_CHANGE:
- case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER:
mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
break;
@@ -800,17 +816,17 @@ static void mcast_event_handler(struct ib_event_handler *handler,
}
}
-static void mcast_add_one(struct ib_device *device)
+static int mcast_add_one(struct ib_device *device)
{
struct mcast_device *dev;
struct mcast_port *port;
int i;
int count = 0;
- dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
+ dev = kmalloc(struct_size(dev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!dev)
- return;
+ return -ENOMEM;
dev->start_port = rdma_start_port(device);
dev->end_port = rdma_end_port(device);
@@ -824,13 +840,13 @@ static void mcast_add_one(struct ib_device *device)
spin_lock_init(&port->lock);
port->table = RB_ROOT;
init_completion(&port->comp);
- atomic_set(&port->refcount, 1);
+ refcount_set(&port->refcount, 1);
++count;
}
if (!count) {
kfree(dev);
- return;
+ return -EOPNOTSUPP;
}
dev->device = device;
@@ -838,6 +854,7 @@ static void mcast_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
ib_register_event_handler(&dev->event_handler);
+ return 0;
}
static void mcast_remove_one(struct ib_device *device, void *client_data)
@@ -846,9 +863,6 @@ static void mcast_remove_one(struct ib_device *device, void *client_data)
struct mcast_port *port;
int i;
- if (!dev)
- return;
-
ib_unregister_event_handler(&dev->event_handler);
flush_workqueue(mcast_wq);
@@ -867,7 +881,7 @@ int mcast_init(void)
{
int ret;
- mcast_wq = create_singlethread_workqueue("ib_mcast");
+ mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM);
if (!mcast_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index d47df9356779..def14c54b648 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2017 Mellanox Technologies Inc. All rights reserved.
* Copyright (c) 2010 Voltaire Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -35,237 +36,297 @@
#include <linux/export.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
+#include <linux/module.h>
+#include "core_priv.h"
-struct ibnl_client {
- struct list_head list;
- int index;
- int nops;
- const struct ibnl_client_cbs *cb_table;
-};
-
-static DEFINE_MUTEX(ibnl_mutex);
-static struct sock *nls;
-static LIST_HEAD(client_list);
+static struct {
+ const struct rdma_nl_cbs *cb_table;
+ /* Synchronizes between ongoing netlink commands and netlink client
+ * unregistration.
+ */
+ struct rw_semaphore sem;
+} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
-int ibnl_chk_listeners(unsigned int group)
+bool rdma_nl_chk_listeners(unsigned int group)
{
- if (netlink_has_listeners(nls, group) == 0)
- return -1;
- return 0;
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
+
+ return netlink_has_listeners(rnet->nl_sock, group);
}
-EXPORT_SYMBOL(ibnl_chk_listeners);
+EXPORT_SYMBOL(rdma_nl_chk_listeners);
-int ibnl_add_client(int index, int nops,
- const struct ibnl_client_cbs cb_table[])
+static bool is_nl_msg_valid(unsigned int type, unsigned int op)
{
- struct ibnl_client *cur;
- struct ibnl_client *nl_client;
+ static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = {
+ [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS,
+ [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS,
+ [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS,
+ };
- nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL);
- if (!nl_client)
- return -ENOMEM;
+ /*
+ * This BUILD_BUG_ON is intended to catch addition of new
+ * RDMA netlink protocol without updating the array above.
+ */
+ BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6);
- nl_client->index = index;
- nl_client->nops = nops;
- nl_client->cb_table = cb_table;
+ if (type >= RDMA_NL_NUM_CLIENTS)
+ return false;
- mutex_lock(&ibnl_mutex);
+ return op < max_num_ops[type];
+}
- list_for_each_entry(cur, &client_list, list) {
- if (cur->index == index) {
- pr_warn("Client for %d already exists\n", index);
- mutex_unlock(&ibnl_mutex);
- kfree(nl_client);
- return -EINVAL;
- }
- }
+static const struct rdma_nl_cbs *
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
+{
+ const struct rdma_nl_cbs *cb_table;
- list_add_tail(&nl_client->list, &client_list);
+ /*
+ * Currently only NLDEV client is supporting netlink commands in
+ * non init_net net namespace.
+ */
+ if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
+ return NULL;
- mutex_unlock(&ibnl_mutex);
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+ if (!cb_table) {
+ /*
+ * Didn't get valid reference of the table, attempt module
+ * load once.
+ */
+ up_read(&rdma_nl_types[type].sem);
- return 0;
+ request_module("rdma-netlink-subsys-%u", type);
+
+ down_read(&rdma_nl_types[type].sem);
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+ }
+ if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
+ return NULL;
+ return cb_table;
}
-EXPORT_SYMBOL(ibnl_add_client);
-int ibnl_remove_client(int index)
+void rdma_nl_register(unsigned int index,
+ const struct rdma_nl_cbs cb_table[])
{
- struct ibnl_client *cur, *next;
-
- mutex_lock(&ibnl_mutex);
- list_for_each_entry_safe(cur, next, &client_list, list) {
- if (cur->index == index) {
- list_del(&(cur->list));
- mutex_unlock(&ibnl_mutex);
- kfree(cur);
- return 0;
- }
- }
- pr_warn("Can't remove callback for client idx %d. Not found\n", index);
- mutex_unlock(&ibnl_mutex);
+ if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
+ WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
+ return;
+
+ /* Pairs with the READ_ONCE in is_nl_valid() */
+ smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
+}
+EXPORT_SYMBOL(rdma_nl_register);
- return -EINVAL;
+void rdma_nl_unregister(unsigned int index)
+{
+ down_write(&rdma_nl_types[index].sem);
+ rdma_nl_types[index].cb_table = NULL;
+ up_write(&rdma_nl_types[index].sem);
}
-EXPORT_SYMBOL(ibnl_remove_client);
+EXPORT_SYMBOL(rdma_nl_unregister);
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int len, int client, int op, int flags)
{
- unsigned char *prev_tail;
-
- prev_tail = skb_tail_pointer(skb);
- *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
- len, flags);
+ *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
if (!*nlh)
- goto out_nlmsg_trim;
- (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
+ return NULL;
return nlmsg_data(*nlh);
-
-out_nlmsg_trim:
- nlmsg_trim(skb, prev_tail);
- return NULL;
}
EXPORT_SYMBOL(ibnl_put_msg);
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type)
{
- unsigned char *prev_tail;
-
- prev_tail = skb_tail_pointer(skb);
- if (nla_put(skb, type, len, data))
- goto nla_put_failure;
- nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
+ if (nla_put(skb, type, len, data)) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
return 0;
-
-nla_put_failure:
- nlmsg_trim(skb, prev_tail - nlh->nlmsg_len);
- return -EMSGSIZE;
}
EXPORT_SYMBOL(ibnl_put_attr);
-static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- struct ibnl_client *client;
int type = nlh->nlmsg_type;
- int index = RDMA_NL_GET_CLIENT(type);
- int op = RDMA_NL_GET_OP(type);
-
- list_for_each_entry(client, &client_list, list) {
- if (client->index == index) {
- if (op < 0 || op >= client->nops ||
- !client->cb_table[op].dump)
- return -EINVAL;
-
- /*
- * For response or local service set_timeout request,
- * there is no need to use netlink_dump_start.
- */
- if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
- (index == RDMA_NL_LS &&
- op == RDMA_NL_LS_OP_SET_TIMEOUT)) {
- struct netlink_callback cb = {
- .skb = skb,
- .nlh = nlh,
- .dump = client->cb_table[op].dump,
- .module = client->cb_table[op].module,
- };
-
- return cb.dump(skb, &cb);
- }
-
- {
- struct netlink_dump_control c = {
- .dump = client->cb_table[op].dump,
- .module = client->cb_table[op].module,
- };
- return netlink_dump_start(nls, skb, nlh, &c);
- }
- }
+ unsigned int index = RDMA_NL_GET_CLIENT(type);
+ unsigned int op = RDMA_NL_GET_OP(type);
+ const struct rdma_nl_cbs *cb_table;
+ int err = -EINVAL;
+
+ if (!is_nl_msg_valid(index, op))
+ return -EINVAL;
+
+ down_read(&rdma_nl_types[index].sem);
+ cb_table = get_cb_table(skb, index, op);
+ if (!cb_table)
+ goto done;
+
+ if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
+ !netlink_capable(skb, CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ /*
+ * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
+ * mistakenly call the .dump() function.
+ */
+ if (index == RDMA_NL_LS) {
+ if (cb_table[op].doit)
+ err = cb_table[op].doit(skb, nlh, extack);
+ goto done;
+ }
+ /* FIXME: Convert IWCM to properly handle doit callbacks */
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
+ struct netlink_dump_control c = {
+ .dump = cb_table[op].dump,
+ };
+ if (c.dump)
+ err = netlink_dump_start(skb->sk, skb, nlh, &c);
+ goto done;
}
- pr_info("Index %d wasn't found in client list\n", index);
- return -EINVAL;
+ if (cb_table[op].doit)
+ err = cb_table[op].doit(skb, nlh, extack);
+done:
+ up_read(&rdma_nl_types[index].sem);
+ return err;
}
-static void ibnl_rcv_reply_skb(struct sk_buff *skb)
+/*
+ * This function is similar to netlink_rcv_skb with one exception:
+ * It calls to the callback for the netlink messages without NLM_F_REQUEST
+ * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed
+ * for that consumer only.
+ */
+static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
+ struct nlmsghdr *,
+ struct netlink_ext_ack *))
{
+ struct netlink_ext_ack extack = {};
struct nlmsghdr *nlh;
- int msglen;
+ int err;
- /*
- * Process responses until there is no more message or the first
- * request. Generally speaking, it is not recommended to mix responses
- * with requests.
- */
while (skb->len >= nlmsg_total_size(0)) {
+ int msglen;
+
nlh = nlmsg_hdr(skb);
+ err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
- return;
-
- /* Handle response only */
- if (nlh->nlmsg_flags & NLM_F_REQUEST)
- return;
-
- ibnl_rcv_msg(skb, nlh);
+ return 0;
+ /*
+ * Generally speaking, the only requests are handled
+ * by the kernel, but RDMA_NL_LS is different, because it
+ * runs backward netlink scheme. Kernel initiates messages
+ * and waits for reply with data to keep pathrecord cache
+ * in sync.
+ */
+ if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
+ (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
+ goto ack;
+
+ /* Skip control messages */
+ if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
+ goto ack;
+
+ err = cb(skb, nlh, &extack);
+ if (err == -EINTR)
+ goto skip;
+
+ack:
+ if (nlh->nlmsg_flags & NLM_F_ACK || err)
+ netlink_ack(skb, nlh, err, &extack);
+
+skip:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
+
+ return 0;
+}
+
+static void rdma_nl_rcv(struct sk_buff *skb)
+{
+ rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
+}
+
+int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
+{
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
+ int err;
+
+ err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
+ return (err < 0) ? err : 0;
}
+EXPORT_SYMBOL(rdma_nl_unicast);
-static void ibnl_rcv(struct sk_buff *skb)
+int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
{
- mutex_lock(&ibnl_mutex);
- ibnl_rcv_reply_skb(skb);
- netlink_rcv_skb(skb, &ibnl_rcv_msg);
- mutex_unlock(&ibnl_mutex);
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
+ int err;
+
+ err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
+ return (err < 0) ? err : 0;
}
+EXPORT_SYMBOL(rdma_nl_unicast_wait);
-int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
- __u32 pid)
+int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
+ unsigned int group, gfp_t flags)
{
- return nlmsg_unicast(nls, skb, pid);
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
+
+ return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
}
-EXPORT_SYMBOL(ibnl_unicast);
+EXPORT_SYMBOL(rdma_nl_multicast);
-int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
- unsigned int group, gfp_t flags)
+void rdma_nl_init(void)
{
- return nlmsg_multicast(nls, skb, 0, group, flags);
+ int idx;
+
+ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+ init_rwsem(&rdma_nl_types[idx].sem);
}
-EXPORT_SYMBOL(ibnl_multicast);
-int __init ibnl_init(void)
+void rdma_nl_exit(void)
{
+ int idx;
+
+ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+ WARN(rdma_nl_types[idx].cb_table,
+ "Netlink client %d wasn't released prior to unloading %s\n",
+ idx, KBUILD_MODNAME);
+}
+
+int rdma_nl_net_init(struct rdma_dev_net *rnet)
+{
+ struct net *net = read_pnet(&rnet->net);
struct netlink_kernel_cfg cfg = {
- .input = ibnl_rcv,
+ .input = rdma_nl_rcv,
+ .flags = NL_CFG_F_NONROOT_RECV,
};
+ struct sock *nls;
- nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
- if (!nls) {
- pr_warn("Failed to create netlink socket\n");
+ nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
+ if (!nls)
return -ENOMEM;
- }
+ nls->sk_sndtimeo = 10 * HZ;
+ rnet->nl_sock = nls;
return 0;
}
-void ibnl_cleanup(void)
+void rdma_nl_net_exit(struct rdma_dev_net *rnet)
{
- struct ibnl_client *cur, *next;
-
- mutex_lock(&ibnl_mutex);
- list_for_each_entry_safe(cur, next, &client_list, list) {
- list_del(&(cur->list));
- kfree(cur);
- }
- mutex_unlock(&ibnl_mutex);
-
- netlink_kernel_release(nls);
+ netlink_kernel_release(rnet->nl_sock);
}
+
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
new file mode 100644
index 000000000000..2220a2dfab24
--- /dev/null
+++ b/drivers/infiniband/core/nldev.c
@@ -0,0 +1,2920 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/pid.h>
+#include <linux/pid_namespace.h>
+#include <linux/mutex.h>
+#include <net/netlink.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rdma_netlink.h>
+
+#include "core_priv.h"
+#include "cma_priv.h"
+#include "restrack.h"
+#include "uverbs.h"
+
+/*
+ * This determines whether a non-privileged user is allowed to specify a
+ * controlled QKEY or not, when true non-privileged user is allowed to specify
+ * a controlled QKEY.
+ */
+static bool privileged_qkey;
+
+typedef int (*res_fill_func_t)(struct sk_buff*, bool,
+ struct rdma_restrack_entry*, uint32_t);
+
+/*
+ * Sort array elements by the netlink attribute name
+ */
+static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
+ [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
+ [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
+ [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = IB_DEVICE_NAME_MAX },
+ [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
+ [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
+ [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
+ [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
+ [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
+ [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
+ .len = IFNAMSIZ },
+ [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = IFNAMSIZ },
+ [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
+ .len = sizeof(struct __kernel_sockaddr_storage) },
+ [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
+ [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY },
+ [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
+ .len = sizeof(struct __kernel_sockaddr_storage) },
+ [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
+ [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_SUBTYPE] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
+ [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
+ [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
+ [RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DRIVER_DETAILS] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DEV_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING },
+ [RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED] = { .type = NLA_U8 },
+};
+
+static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type)
+{
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
+ return -EMSGSIZE;
+ if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type,
+ u32 value)
+{
+ if (put_driver_name_print_type(msg, name, print_type))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type,
+ u64 value)
+{
+ if (put_driver_name_print_type(msg, name, print_type))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str)
+{
+ if (put_driver_name_print_type(msg, name,
+ RDMA_NLDEV_PRINT_TYPE_UNSPEC))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
+ return -EMSGSIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_string);
+
+int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
+{
+ return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u32);
+
+int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
+ u32 value)
+{
+ return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
+
+int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
+{
+ return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u64);
+
+int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
+{
+ return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
+
+bool rdma_nl_get_privileged_qkey(void)
+{
+ return privileged_qkey;
+}
+EXPORT_SYMBOL(rdma_nl_get_privileged_qkey);
+
+static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
+{
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
+ dev_name(&device->dev)))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+{
+ char fw[IB_FW_VERSION_NAME_MAX];
+ int ret = 0;
+ u32 port;
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
+ return -EMSGSIZE;
+
+ BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
+ device->attrs.device_cap_flags,
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+
+ ib_get_device_fw_str(device, fw);
+ /* Device without FW has strlen(fw) = 0 */
+ if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
+ return -EMSGSIZE;
+
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
+ be64_to_cpu(device->node_guid),
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
+ be64_to_cpu(device->attrs.sys_image_guid),
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
+ return -EMSGSIZE;
+
+ if (device->type &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_TYPE, device->type))
+ return -EMSGSIZE;
+
+ if (device->parent &&
+ nla_put_string(msg, RDMA_NLDEV_ATTR_PARENT_NAME,
+ dev_name(&device->parent->dev)))
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE,
+ device->name_assign_type))
+ return -EMSGSIZE;
+
+ /*
+ * Link type is determined on first port and mlx4 device
+ * which can potentially have two different link type for the same
+ * IB device is considered as better to be avoided in the future,
+ */
+ port = rdma_start_port(device);
+ if (rdma_cap_opa_mad(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
+ else if (rdma_protocol_ib(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
+ else if (rdma_protocol_iwarp(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
+ else if (rdma_protocol_roce(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
+ else if (rdma_protocol_usnic(device, port))
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
+ "usnic");
+ return ret;
+}
+
+static int fill_port_info(struct sk_buff *msg,
+ struct ib_device *device, u32 port,
+ const struct net *net)
+{
+ struct net_device *netdev = NULL;
+ struct ib_port_attr attr;
+ int ret;
+ u64 cap_flags = 0;
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
+ return -EMSGSIZE;
+
+ ret = ib_query_port(device, port, &attr);
+ if (ret)
+ return ret;
+
+ if (rdma_protocol_ib(device, port)) {
+ BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
+ sizeof(attr.port_cap_flags2)) > sizeof(u64));
+ cap_flags = attr.port_cap_flags |
+ ((u64)attr.port_cap_flags2 << 32);
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
+ cap_flags, RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
+ attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
+ return -EMSGSIZE;
+ }
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
+ return -EMSGSIZE;
+
+ netdev = ib_device_get_netdev(device, port);
+ if (netdev && net_eq(dev_net(netdev), net)) {
+ ret = nla_put_u32(msg,
+ RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
+ if (ret)
+ goto out;
+ ret = nla_put_string(msg,
+ RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
+ }
+
+out:
+ dev_put(netdev);
+ return ret;
+}
+
+static int fill_res_info_entry(struct sk_buff *msg,
+ const char *name, u64 curr)
+{
+ struct nlattr *entry_attr;
+
+ entry_attr = nla_nest_start_noflag(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
+ goto err;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
+ RDMA_NLDEV_ATTR_PAD))
+ goto err;
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_info(struct sk_buff *msg, struct ib_device *device,
+ bool show_details)
+{
+ static const char * const names[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_PD] = "pd",
+ [RDMA_RESTRACK_CQ] = "cq",
+ [RDMA_RESTRACK_QP] = "qp",
+ [RDMA_RESTRACK_CM_ID] = "cm_id",
+ [RDMA_RESTRACK_MR] = "mr",
+ [RDMA_RESTRACK_CTX] = "ctx",
+ [RDMA_RESTRACK_SRQ] = "srq",
+ };
+
+ struct nlattr *table_attr;
+ int ret, i, curr;
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
+ if (!names[i])
+ continue;
+ curr = rdma_restrack_count(device, i, show_details);
+ ret = fill_res_info_entry(msg, names[i], curr);
+ if (ret)
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return ret;
+}
+
+static int fill_res_name_pid(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ int err = 0;
+
+ /*
+ * For user resources, user is should read /proc/PID/comm to get the
+ * name of the task file.
+ */
+ if (rdma_is_kernel_res(res)) {
+ err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
+ res->kern_name);
+ } else {
+ pid_t pid;
+
+ pid = task_pid_vnr(res->task);
+ /*
+ * Task is dead and in zombie state.
+ * There is no need to print PID anymore.
+ */
+ if (pid)
+ /*
+ * This part is racy, task can be killed and PID will
+ * be zero right here but it is ok, next query won't
+ * return PID. We don't promise real-time reflection
+ * of SW objects.
+ */
+ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
+ }
+
+ return err ? -EMSGSIZE : 0;
+}
+
+static int fill_res_qp_entry_query(struct sk_buff *msg,
+ struct rdma_restrack_entry *res,
+ struct ib_device *dev,
+ struct ib_qp *qp)
+{
+ struct ib_qp_init_attr qp_init_attr;
+ struct ib_qp_attr qp_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
+ if (ret)
+ return ret;
+
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
+ qp_attr.dest_qp_num))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
+ qp_attr.rq_psn))
+ goto err;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
+ goto err;
+
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
+ qp_attr.path_mig_state))
+ goto err;
+ }
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
+ goto err;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
+ goto err;
+
+ if (dev->ops.fill_res_qp_entry)
+ return dev->ops.fill_res_qp_entry(msg, qp);
+ return 0;
+
+err: return -EMSGSIZE;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_qp *qp = container_of(res, struct ib_qp, res);
+ struct ib_device *dev = qp->device;
+ int ret;
+
+ if (port && port != qp->port)
+ return -EAGAIN;
+
+ /* In create_qp() port is not set yet */
+ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
+ return -EMSGSIZE;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
+ if (ret)
+ return -EMSGSIZE;
+
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
+ return -EMSGSIZE;
+
+ ret = fill_res_name_pid(msg, res);
+ if (ret)
+ return -EMSGSIZE;
+
+ return fill_res_qp_entry_query(msg, res, dev, qp);
+}
+
+static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_qp *qp = container_of(res, struct ib_qp, res);
+ struct ib_device *dev = qp->device;
+
+ if (port && port != qp->port)
+ return -EAGAIN;
+ if (!dev->ops.fill_res_qp_entry_raw)
+ return -EINVAL;
+ return dev->ops.fill_res_qp_entry_raw(msg, qp);
+}
+
+static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct rdma_id_private *id_priv =
+ container_of(res, struct rdma_id_private, res);
+ struct ib_device *dev = id_priv->id.device;
+ struct rdma_cm_id *cm_id = &id_priv->id;
+
+ if (port && port != cm_id->port_num)
+ return -EAGAIN;
+
+ if (cm_id->port_num &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
+ goto err;
+
+ if (id_priv->qp_num) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
+ goto err;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
+ goto err;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
+ goto err;
+
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
+ goto err;
+
+ if (cm_id->route.addr.src_addr.ss_family &&
+ nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
+ sizeof(cm_id->route.addr.src_addr),
+ &cm_id->route.addr.src_addr))
+ goto err;
+ if (cm_id->route.addr.dst_addr.ss_family &&
+ nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
+ sizeof(cm_id->route.addr.dst_addr),
+ &cm_id->route.addr.dst_addr))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
+ goto err;
+
+ if (fill_res_name_pid(msg, res))
+ goto err;
+
+ if (dev->ops.fill_res_cm_id_entry)
+ return dev->ops.fill_res_cm_id_entry(msg, cm_id);
+ return 0;
+
+err: return -EMSGSIZE;
+}
+
+static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_cq *cq = container_of(res, struct ib_cq, res);
+ struct ib_device *dev = cq->device;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
+ atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+
+ /* Poll context is only valid for kernel CQs */
+ if (rdma_is_kernel_res(res) &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
+ return -EMSGSIZE;
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
+ cq->uobject->uevent.uobject.context->res.id))
+ return -EMSGSIZE;
+
+ if (fill_res_name_pid(msg, res))
+ return -EMSGSIZE;
+
+ return (dev->ops.fill_res_cq_entry) ?
+ dev->ops.fill_res_cq_entry(msg, cq) : 0;
+}
+
+static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_cq *cq = container_of(res, struct ib_cq, res);
+ struct ib_device *dev = cq->device;
+
+ if (!dev->ops.fill_res_cq_entry_raw)
+ return -EINVAL;
+ return dev->ops.fill_res_cq_entry_raw(msg, cq);
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct ib_device *dev = mr->pd->device;
+
+ if (has_cap_net_admin) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
+ return -EMSGSIZE;
+ }
+
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
+ return -EMSGSIZE;
+
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
+ return -EMSGSIZE;
+
+ if (fill_res_name_pid(msg, res))
+ return -EMSGSIZE;
+
+ return (dev->ops.fill_res_mr_entry) ?
+ dev->ops.fill_res_mr_entry(msg, mr) :
+ 0;
+}
+
+static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct ib_device *dev = mr->pd->device;
+
+ if (!dev->ops.fill_res_mr_entry_raw)
+ return -EINVAL;
+ return dev->ops.fill_res_mr_entry_raw(msg, mr);
+}
+
+static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_pd *pd = container_of(res, struct ib_pd, res);
+
+ if (has_cap_net_admin) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
+ pd->local_dma_lkey))
+ goto err;
+ if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
+ pd->unsafe_global_rkey))
+ goto err;
+ }
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
+ atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
+ goto err;
+
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
+ pd->uobject->context->res.id))
+ goto err;
+
+ return fill_res_name_pid(msg, res);
+
+err: return -EMSGSIZE;
+}
+
+static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
+
+ if (rdma_is_kernel_res(res))
+ return 0;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
+ return -EMSGSIZE;
+
+ return fill_res_name_pid(msg, res);
+}
+
+static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
+ uint32_t max_range)
+{
+ struct nlattr *entry_attr;
+
+ if (!min_range)
+ return 0;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (min_range == max_range) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
+ goto err;
+ } else {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
+ goto err;
+ }
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
+{
+ uint32_t min_range = 0, prev = 0;
+ struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
+ struct nlattr *table_attr;
+ struct ib_qp *qp = NULL;
+ unsigned long id = 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ rt = &srq->device->res[RDMA_RESTRACK_QP];
+ xa_lock(&rt->xa);
+ xa_for_each(&rt->xa, id, res) {
+ if (!rdma_restrack_get(res))
+ continue;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
+ rdma_restrack_put(res);
+ continue;
+ }
+
+ if (qp->qp_num < prev)
+ /* qp_num should be ascending */
+ goto err_loop;
+
+ if (min_range == 0) {
+ min_range = qp->qp_num;
+ } else if (qp->qp_num > (prev + 1)) {
+ if (fill_res_range_qp_entry(msg, min_range, prev))
+ goto err_loop;
+
+ min_range = qp->qp_num;
+ }
+ prev = qp->qp_num;
+ rdma_restrack_put(res);
+ }
+
+ xa_unlock(&rt->xa);
+
+ if (fill_res_range_qp_entry(msg, min_range, prev))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_loop:
+ rdma_restrack_put(res);
+ xa_unlock(&rt->xa);
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_srq *srq = container_of(res, struct ib_srq, res);
+ struct ib_device *dev = srq->device;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
+ goto err;
+
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
+ goto err;
+
+ if (ib_srq_has_cq(srq->srq_type)) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
+ srq->ext.cq->res.id))
+ goto err;
+ }
+
+ if (fill_res_srq_qps(msg, srq))
+ goto err;
+
+ if (fill_res_name_pid(msg, res))
+ goto err;
+
+ if (dev->ops.fill_res_srq_entry)
+ return dev->ops.fill_res_srq_entry(msg, srq);
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_srq *srq = container_of(res, struct ib_srq, res);
+ struct ib_device *dev = srq->device;
+
+ if (!dev->ops.fill_res_srq_entry_raw)
+ return -EINVAL;
+ return dev->ops.fill_res_srq_entry_raw(msg, srq);
+}
+
+static int fill_stat_counter_mode(struct sk_buff *msg,
+ struct rdma_counter *counter)
+{
+ struct rdma_counter_mode *m = &counter->mode;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
+ return -EMSGSIZE;
+
+ if (m->mode == RDMA_COUNTER_MODE_AUTO) {
+ if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
+ return -EMSGSIZE;
+
+ if ((m->mask & RDMA_COUNTER_MASK_PID) &&
+ fill_res_name_pid(msg, &counter->res))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
+{
+ struct nlattr *entry_attr;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
+ goto err;
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_stat_counter_qps(struct sk_buff *msg,
+ struct rdma_counter *counter)
+{
+ struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
+ struct nlattr *table_attr;
+ struct ib_qp *qp = NULL;
+ unsigned long id = 0;
+ int ret = 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ rt = &counter->device->res[RDMA_RESTRACK_QP];
+ xa_lock(&rt->xa);
+ xa_for_each(&rt->xa, id, res) {
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->counter || (qp->counter->id != counter->id))
+ continue;
+
+ ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
+ if (ret)
+ goto err;
+ }
+
+ xa_unlock(&rt->xa);
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ xa_unlock(&rt->xa);
+ nla_nest_cancel(msg, table_attr);
+ return ret;
+}
+
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value)
+{
+ struct nlattr *entry_attr;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
+ name))
+ goto err;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
+ value, RDMA_NLDEV_ATTR_PAD))
+ goto err;
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
+
+static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct ib_device *dev = mr->pd->device;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
+ goto err;
+
+ if (dev->ops.fill_stat_mr_entry)
+ return dev->ops.fill_stat_mr_entry(msg, mr);
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_stat_counter_hwcounters(struct sk_buff *msg,
+ struct rdma_counter *counter)
+{
+ struct rdma_hw_stats *st = counter->stats;
+ struct nlattr *table_attr;
+ int i;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ mutex_lock(&st->lock);
+ for (i = 0; i < st->num_counters; i++) {
+ if (test_bit(i, st->is_disabled))
+ continue;
+ if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name,
+ st->value[i]))
+ goto err;
+ }
+ mutex_unlock(&st->lock);
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ mutex_unlock(&st->lock);
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res,
+ uint32_t port)
+{
+ struct rdma_counter *counter =
+ container_of(res, struct rdma_counter, res);
+
+ if (port && port != counter->port)
+ return -EAGAIN;
+
+ /* Dump it even query failed */
+ rdma_counter_query_stats(counter);
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
+ fill_stat_counter_mode(msg, counter) ||
+ fill_stat_counter_qps(msg, counter) ||
+ fill_stat_counter_hwcounters(msg, counter))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int err;
+
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+ 0, 0);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto err_free;
+ }
+
+ err = fill_dev_info(msg, device);
+ if (err)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return err;
+}
+
+static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 index;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
+ char name[IB_DEVICE_NAME_MAX] = {};
+
+ nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ IB_DEVICE_NAME_MAX);
+ if (strlen(name) == 0) {
+ err = -EINVAL;
+ goto done;
+ }
+ err = ib_device_rename(device, name);
+ goto done;
+ }
+
+ if (tb[RDMA_NLDEV_NET_NS_FD]) {
+ u32 ns_fd;
+
+ ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
+ err = ib_device_set_netns_put(skb, device, ns_fd);
+ goto put_done;
+ }
+
+ if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
+ u8 use_dim;
+
+ use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
+ err = ib_device_set_dim(device, use_dim);
+ goto done;
+ }
+
+done:
+ ib_device_put(device);
+put_done:
+ return err;
+}
+
+static int _nldev_get_dumpit(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx)
+{
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+
+ if (idx < start)
+ return 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+ 0, NLM_F_MULTI);
+
+ if (!nlh || fill_dev_info(skb, device)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+ idx++;
+
+out: cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ /*
+ * There is no need to take lock, because
+ * we are relying on ib_core's locking.
+ */
+ return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
+}
+
+static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ u32 port;
+ int err;
+
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
+ if (err ||
+ !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+ 0, 0);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto err_free;
+ }
+
+ err = fill_port_info(msg, device, port, sock_net(skb->sk));
+ if (err)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return err;
+}
+
+static int nldev_port_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+ u32 idx = 0;
+ u32 ifindex;
+ int err;
+ unsigned int p;
+
+ err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
+ if (!device)
+ return -EINVAL;
+
+ rdma_for_each_port (device, p) {
+ /*
+ * The dumpit function returns all information from specific
+ * index. This specific index is taken from the netlink
+ * messages request sent by user and it is available
+ * in cb->args[0].
+ *
+ * Usually, the user doesn't fill this field and it causes
+ * to return everything.
+ *
+ */
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_PORT_GET),
+ 0, NLM_F_MULTI);
+
+ if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+ idx++;
+ nlmsg_end(skb, nlh);
+ }
+
+out:
+ ib_device_put(device);
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ bool show_details = false;
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int ret;
+
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
+ show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, 0);
+ if (!nlh) {
+ ret = -EMSGSIZE;
+ goto err_free;
+ }
+
+ ret = fill_res_info(msg, device, show_details);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
+static int _nldev_res_get_dumpit(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx)
+{
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+
+ if (idx < start)
+ return 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, NLM_F_MULTI);
+
+ if (!nlh || fill_res_info(skb, device, false)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+ nlmsg_end(skb, nlh);
+
+ idx++;
+
+out:
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_res_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
+}
+
+struct nldev_fill_res_entry {
+ enum rdma_nldev_attr nldev_attr;
+ u8 flags;
+ u32 entry;
+ u32 id;
+};
+
+enum nldev_res_flags {
+ NLDEV_PER_DEV = 1 << 0,
+};
+
+static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_QP] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
+ .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_LQPN,
+ },
+ [RDMA_RESTRACK_CM_ID] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
+ .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_CM_IDN,
+ },
+ [RDMA_RESTRACK_CQ] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_CQN,
+ },
+ [RDMA_RESTRACK_MR] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_MRN,
+ },
+ [RDMA_RESTRACK_PD] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_PDN,
+ },
+ [RDMA_RESTRACK_COUNTER] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
+ .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
+ .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
+ },
+ [RDMA_RESTRACK_CTX] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_CTXN,
+ },
+ [RDMA_RESTRACK_SRQ] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_SRQN,
+ },
+
+};
+
+static noinline_for_stack int
+res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
+{
+ const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct rdma_restrack_entry *res;
+ struct ib_device *device;
+ u32 index, id, port = 0;
+ bool has_cap_net_admin;
+ struct sk_buff *msg;
+ int ret;
+
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if ((port && fe->flags & NLDEV_PER_DEV) ||
+ (!port && ~fe->flags & NLDEV_PER_DEV)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ id = nla_get_u32(tb[fe->id]);
+ res = rdma_restrack_get_byid(device, res_type, id);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err_get;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NL_GET_OP(nlh->nlmsg_type)),
+ 0, 0);
+
+ if (!nlh || fill_nldev_handle(msg, device)) {
+ ret = -EMSGSIZE;
+ goto err_free;
+ }
+
+ has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
+
+ ret = fill_func(msg, has_cap_net_admin, res, port);
+ if (ret)
+ goto err_free;
+
+ rdma_restrack_put(res);
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err_get:
+ rdma_restrack_put(res);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
+static int res_get_common_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
+{
+ const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
+ int err, ret = 0, idx = 0;
+ bool show_details = false;
+ struct nlattr *table_attr;
+ struct nlattr *entry_attr;
+ struct ib_device *device;
+ int start = cb->args[0];
+ bool has_cap_net_admin;
+ struct nlmsghdr *nlh;
+ unsigned long id;
+ u32 index, port = 0;
+ bool filled = false;
+
+ err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
+ /*
+ * Right now, we are expecting the device index to get res information,
+ * but it is possible to extend this code to return all devices in
+ * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
+ * if it doesn't exist, we will iterate over all devices.
+ *
+ * But it is not needed for now.
+ */
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
+ show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
+
+ /*
+ * If no PORT_INDEX is supplied, we will return all QPs from that device
+ */
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err_index;
+ }
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
+ 0, NLM_F_MULTI);
+
+ if (!nlh || fill_nldev_handle(skb, device)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
+ if (!table_attr) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
+
+ rt = &device->res[res_type];
+ xa_lock(&rt->xa);
+ /*
+ * FIXME: if the skip ahead is something common this loop should
+ * use xas_for_each & xas_pause to optimize, we can have a lot of
+ * objects.
+ */
+ xa_for_each(&rt->xa, id, res) {
+ if (xa_get_mark(&rt->xa, res->id, RESTRACK_DD) && !show_details)
+ goto next;
+
+ if (idx < start || !rdma_restrack_get(res))
+ goto next;
+
+ xa_unlock(&rt->xa);
+
+ filled = true;
+
+ entry_attr = nla_nest_start_noflag(skb, fe->entry);
+ if (!entry_attr) {
+ ret = -EMSGSIZE;
+ rdma_restrack_put(res);
+ goto msg_full;
+ }
+
+ ret = fill_func(skb, has_cap_net_admin, res, port);
+
+ rdma_restrack_put(res);
+
+ if (ret) {
+ nla_nest_cancel(skb, entry_attr);
+ if (ret == -EMSGSIZE)
+ goto msg_full;
+ if (ret == -EAGAIN)
+ goto again;
+ goto res_err;
+ }
+ nla_nest_end(skb, entry_attr);
+again: xa_lock(&rt->xa);
+next: idx++;
+ }
+ xa_unlock(&rt->xa);
+
+msg_full:
+ nla_nest_end(skb, table_attr);
+ nlmsg_end(skb, nlh);
+ cb->args[0] = idx;
+
+ /*
+ * No more entries to fill, cancel the message and
+ * return 0 to mark end of dumpit.
+ */
+ if (!filled)
+ goto err;
+
+ ib_device_put(device);
+ return skb->len;
+
+res_err:
+ nla_nest_cancel(skb, table_attr);
+
+err:
+ nlmsg_cancel(skb, nlh);
+
+err_index:
+ ib_device_put(device);
+ return ret;
+}
+
+#define RES_GET_FUNCS(name, type) \
+ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
+ struct netlink_callback *cb) \
+ { \
+ return res_get_common_dumpit(skb, cb, type, \
+ fill_res_##name##_entry); \
+ } \
+ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
+ struct nlmsghdr *nlh, \
+ struct netlink_ext_ack *extack) \
+ { \
+ return res_get_common_doit(skb, nlh, extack, type, \
+ fill_res_##name##_entry); \
+ }
+
+RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
+RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP);
+RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
+RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
+RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ);
+RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
+RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
+RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
+RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
+RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
+RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
+RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ);
+
+static LIST_HEAD(link_ops);
+static DECLARE_RWSEM(link_ops_rwsem);
+
+static const struct rdma_link_ops *link_ops_get(const char *type)
+{
+ const struct rdma_link_ops *ops;
+
+ list_for_each_entry(ops, &link_ops, list) {
+ if (!strcmp(ops->type, type))
+ goto out;
+ }
+ ops = NULL;
+out:
+ return ops;
+}
+
+void rdma_link_register(struct rdma_link_ops *ops)
+{
+ down_write(&link_ops_rwsem);
+ if (WARN_ON_ONCE(link_ops_get(ops->type)))
+ goto out;
+ list_add(&ops->list, &link_ops);
+out:
+ up_write(&link_ops_rwsem);
+}
+EXPORT_SYMBOL(rdma_link_register);
+
+void rdma_link_unregister(struct rdma_link_ops *ops)
+{
+ down_write(&link_ops_rwsem);
+ list_del(&ops->list);
+ up_write(&link_ops_rwsem);
+}
+EXPORT_SYMBOL(rdma_link_unregister);
+
+static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ char ibdev_name[IB_DEVICE_NAME_MAX];
+ const struct rdma_link_ops *ops;
+ char ndev_name[IFNAMSIZ];
+ struct net_device *ndev;
+ char type[IFNAMSIZ];
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
+ !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
+ return -EINVAL;
+
+ nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ sizeof(ibdev_name));
+ if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
+ return -EINVAL;
+
+ nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
+ nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
+ sizeof(ndev_name));
+
+ ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
+ if (!ndev)
+ return -ENODEV;
+
+ down_read(&link_ops_rwsem);
+ ops = link_ops_get(type);
+#ifdef CONFIG_MODULES
+ if (!ops) {
+ up_read(&link_ops_rwsem);
+ request_module("rdma-link-%s", type);
+ down_read(&link_ops_rwsem);
+ ops = link_ops_get(type);
+ }
+#endif
+ err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
+ up_read(&link_ops_rwsem);
+ dev_put(ndev);
+
+ return err;
+}
+
+static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 index;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) {
+ ib_device_put(device);
+ return -EINVAL;
+ }
+
+ ib_unregister_device_and_put(device);
+ return 0;
+}
+
+static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
+ struct ib_client_nl_info data = {};
+ struct ib_device *ibdev = NULL;
+ struct sk_buff *msg;
+ u32 index;
+ int err;
+
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ NL_VALIDATE_LIBERAL, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
+ return -EINVAL;
+
+ nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
+ sizeof(client_name));
+
+ if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!ibdev)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(ibdev, data.port)) {
+ err = -EINVAL;
+ goto out_put;
+ }
+ } else {
+ data.port = -1;
+ }
+ } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ return -EINVAL;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_GET_CHARDEV),
+ 0, 0);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto out_nlmsg;
+ }
+
+ data.nl_msg = msg;
+ err = ib_get_client_nl_info(ibdev, client_name, &data);
+ if (err)
+ goto out_nlmsg;
+
+ err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
+ huge_encode_dev(data.cdev->devt),
+ RDMA_NLDEV_ATTR_PAD);
+ if (err)
+ goto out_data;
+ err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
+ RDMA_NLDEV_ATTR_PAD);
+ if (err)
+ goto out_data;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
+ dev_name(data.cdev))) {
+ err = -EMSGSIZE;
+ goto out_data;
+ }
+
+ nlmsg_end(msg, nlh);
+ put_device(data.cdev);
+ if (ibdev)
+ ib_device_put(ibdev);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+out_data:
+ put_device(data.cdev);
+out_nlmsg:
+ nlmsg_free(msg);
+out_put:
+ if (ibdev)
+ ib_device_put(ibdev);
+ return err;
+}
+
+static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct sk_buff *msg;
+ int err;
+
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
+ if (err)
+ return err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_SYS_GET),
+ 0, 0);
+ if (!nlh) {
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+ }
+
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
+ (u8)ib_devices_shared_netns);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE,
+ (u8)privileged_qkey);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, 1);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+ /*
+ * Copy-on-fork is supported.
+ * See commits:
+ * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
+ * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm")
+ * for more details. Don't backport this without them.
+ *
+ * Return value ignored on purpose, assume copy-on-fork is not
+ * supported in case of failure.
+ */
+ nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1);
+
+ nlmsg_end(msg, nlh);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+}
+
+static int nldev_set_sys_set_netns_doit(struct nlattr *tb[])
+{
+ u8 enable;
+ int err;
+
+ enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
+ /* Only 0 and 1 are supported */
+ if (enable > 1)
+ return -EINVAL;
+
+ err = rdma_compatdev_set(enable);
+ return err;
+}
+
+static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[])
+{
+ u8 enable;
+
+ enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]);
+ /* Only 0 and 1 are supported */
+ if (enable > 1)
+ return -EINVAL;
+
+ privileged_qkey = enable;
+ return 0;
+}
+
+static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
+ return nldev_set_sys_set_netns_doit(tb);
+
+ if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE])
+ return nldev_set_sys_set_pqkey_doit(tb);
+
+ return -EINVAL;
+}
+
+
+static int nldev_stat_set_mode_doit(struct sk_buff *msg,
+ struct netlink_ext_ack *extack,
+ struct nlattr *tb[],
+ struct ib_device *device, u32 port)
+{
+ u32 mode, mask = 0, qpn, cntn = 0;
+ bool opcnt = false;
+ int ret;
+
+ /* Currently only counter for QP is supported */
+ if (!tb[RDMA_NLDEV_ATTR_STAT_RES] ||
+ nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED])
+ opcnt = !!nla_get_u8(
+ tb[RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED]);
+
+ mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
+ if (mode == RDMA_COUNTER_MODE_AUTO) {
+ if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
+ mask = nla_get_u32(
+ tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
+ return rdma_counter_set_auto_mode(device, port, mask, opcnt,
+ extack);
+ }
+
+ if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
+ return -EINVAL;
+
+ qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
+ if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
+ cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
+ ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
+ if (ret)
+ return ret;
+ } else {
+ ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn);
+ if (ret)
+ return ret;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
+ ret = -EMSGSIZE;
+ goto err_fill;
+ }
+
+ return 0;
+
+err_fill:
+ rdma_counter_unbind_qpn(device, port, qpn, cntn);
+ return ret;
+}
+
+static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[],
+ struct ib_device *device,
+ u32 port)
+{
+ struct rdma_hw_stats *stats;
+ struct nlattr *entry_attr;
+ unsigned long *target;
+ int rem, i, ret = 0;
+ u32 index;
+
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats)
+ return -EINVAL;
+
+ target = kcalloc(BITS_TO_LONGS(stats->num_counters),
+ sizeof(*stats->is_disabled), GFP_KERNEL);
+ if (!target)
+ return -ENOMEM;
+
+ nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS],
+ rem) {
+ index = nla_get_u32(entry_attr);
+ if ((index >= stats->num_counters) ||
+ !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ set_bit(index, target);
+ }
+
+ for (i = 0; i < stats->num_counters; i++) {
+ if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL))
+ continue;
+
+ ret = rdma_counter_modify(device, port, i, test_bit(i, target));
+ if (ret)
+ goto out;
+ }
+
+out:
+ kfree(target);
+ return ret;
+}
+
+static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index, port;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err_put_device;
+ }
+
+ if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] &&
+ !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
+ ret = -EINVAL;
+ goto err_put_device;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_STAT_SET),
+ 0, 0);
+ if (!nlh || fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
+ ret = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) {
+ ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port);
+ if (ret)
+ goto err_free_msg;
+ }
+
+ if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
+ ret = nldev_stat_set_counter_dynamic_doit(tb, device, port);
+ if (ret)
+ goto err_free_msg;
+ }
+
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_free_msg:
+ nlmsg_free(msg);
+err_put_device:
+ ib_device_put(device);
+ return ret;
+}
+
+static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index, port, qpn, cntn;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
+ !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
+ !tb[RDMA_NLDEV_ATTR_RES_LQPN])
+ return -EINVAL;
+
+ if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_STAT_SET),
+ 0, 0);
+ if (!nlh) {
+ ret = -EMSGSIZE;
+ goto err_fill;
+ }
+
+ cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
+ qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
+ if (fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
+ ret = -EMSGSIZE;
+ goto err_fill;
+ }
+
+ ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
+ if (ret)
+ goto err_fill;
+
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_fill:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
+static noinline_for_stack int
+stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ struct nlattr *tb[])
+{
+ struct rdma_hw_stats *stats;
+ struct nlattr *table_attr;
+ struct ib_device *device;
+ int ret, num_cnts, i;
+ struct sk_buff *msg;
+ u32 index, port;
+ u64 v;
+
+ if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_STAT_GET),
+ 0, 0);
+
+ if (!nlh || fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
+ ret = -EMSGSIZE;
+ goto err_msg;
+ }
+
+ mutex_lock(&stats->lock);
+
+ num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
+ if (num_cnts < 0) {
+ ret = -EINVAL;
+ goto err_stats;
+ }
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+ if (!table_attr) {
+ ret = -EMSGSIZE;
+ goto err_stats;
+ }
+ for (i = 0; i < num_cnts; i++) {
+ if (test_bit(i, stats->is_disabled))
+ continue;
+
+ v = stats->value[i] +
+ rdma_counter_get_hwstat_value(device, port, i);
+ if (rdma_nl_stat_hwcounter_entry(msg,
+ stats->descs[i].name, v)) {
+ ret = -EMSGSIZE;
+ goto err_table;
+ }
+ }
+ nla_nest_end(msg, table_attr);
+
+ mutex_unlock(&stats->lock);
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_table:
+ nla_nest_cancel(msg, table_attr);
+err_stats:
+ mutex_unlock(&stats->lock);
+err_msg:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
+static noinline_for_stack int
+stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack, struct nlattr *tb[])
+
+{
+ static enum rdma_nl_counter_mode mode;
+ static enum rdma_nl_counter_mask mask;
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index, port;
+ bool opcnt;
+ int ret;
+
+ if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
+ return nldev_res_get_counter_doit(skb, nlh, extack);
+
+ if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
+ !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_STAT_GET),
+ 0, 0);
+ if (!nlh) {
+ ret = -EMSGSIZE;
+ goto err_msg;
+ }
+
+ ret = rdma_counter_get_mode(device, port, &mode, &mask, &opcnt);
+ if (ret)
+ goto err_msg;
+
+ if (fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
+ ret = -EMSGSIZE;
+ goto err_msg;
+ }
+
+ if ((mode == RDMA_COUNTER_MODE_AUTO) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
+ ret = -EMSGSIZE;
+ goto err_msg;
+ }
+
+ if ((mode == RDMA_COUNTER_MODE_AUTO) &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED, opcnt)) {
+ ret = -EMSGSIZE;
+ goto err_msg;
+ }
+
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_msg:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
+static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ int ret;
+
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
+ if (ret)
+ return -EINVAL;
+
+ if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
+ return stat_get_doit_default_counter(skb, nlh, extack, tb);
+
+ switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
+ case RDMA_NLDEV_ATTR_RES_QP:
+ ret = stat_get_doit_qp(skb, nlh, extack, tb);
+ break;
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int nldev_stat_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ int ret;
+
+ ret = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
+ if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
+ case RDMA_NLDEV_ATTR_RES_QP:
+ ret = nldev_res_get_counter_dumpit(skb, cb);
+ break;
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int nldev_stat_get_counter_status_doit(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry;
+ struct rdma_hw_stats *stats;
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 devid, port;
+ int ret, i;
+
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), devid);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(
+ msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS),
+ 0, 0);
+
+ ret = -EMSGSIZE;
+ if (!nlh || fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
+ goto err_msg;
+
+ table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+ if (!table)
+ goto err_msg;
+
+ mutex_lock(&stats->lock);
+ for (i = 0; i < stats->num_counters; i++) {
+ entry = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
+ if (!entry)
+ goto err_msg_table;
+
+ if (nla_put_string(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
+ stats->descs[i].name) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i))
+ goto err_msg_entry;
+
+ if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) &&
+ (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC,
+ !test_bit(i, stats->is_disabled))))
+ goto err_msg_entry;
+
+ nla_nest_end(msg, entry);
+ }
+ mutex_unlock(&stats->lock);
+
+ nla_nest_end(msg, table);
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_msg_entry:
+ nla_nest_cancel(msg, entry);
+err_msg_table:
+ mutex_unlock(&stats->lock);
+ nla_nest_cancel(msg, table);
+err_msg:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
+static int nldev_newdev(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ enum rdma_nl_dev_type type;
+ struct ib_device *parent;
+ char name[IFNAMSIZ] = {};
+ u32 parentid;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_DEV_TYPE])
+ return -EINVAL;
+
+ nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(name));
+ type = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_TYPE]);
+ parentid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ parent = ib_device_get_by_index(sock_net(skb->sk), parentid);
+ if (!parent)
+ return -EINVAL;
+
+ ret = ib_add_sub_device(parent, type, name);
+ ib_device_put(parent);
+
+ return ret;
+}
+
+static int nldev_deldev(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 devid;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), devid);
+ if (!device)
+ return -EINVAL;
+
+ return ib_del_sub_device_and_put(device);
+}
+
+static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
+ [RDMA_NLDEV_CMD_GET] = {
+ .doit = nldev_get_doit,
+ .dump = nldev_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_GET_CHARDEV] = {
+ .doit = nldev_get_chardev,
+ },
+ [RDMA_NLDEV_CMD_SET] = {
+ .doit = nldev_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_NEWLINK] = {
+ .doit = nldev_newlink,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_DELLINK] = {
+ .doit = nldev_dellink,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_PORT_GET] = {
+ .doit = nldev_port_get_doit,
+ .dump = nldev_port_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_GET] = {
+ .doit = nldev_res_get_doit,
+ .dump = nldev_res_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_QP_GET] = {
+ .doit = nldev_res_get_qp_doit,
+ .dump = nldev_res_get_qp_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
+ .doit = nldev_res_get_cm_id_doit,
+ .dump = nldev_res_get_cm_id_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_CQ_GET] = {
+ .doit = nldev_res_get_cq_doit,
+ .dump = nldev_res_get_cq_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_MR_GET] = {
+ .doit = nldev_res_get_mr_doit,
+ .dump = nldev_res_get_mr_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_PD_GET] = {
+ .doit = nldev_res_get_pd_doit,
+ .dump = nldev_res_get_pd_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_CTX_GET] = {
+ .doit = nldev_res_get_ctx_doit,
+ .dump = nldev_res_get_ctx_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_SRQ_GET] = {
+ .doit = nldev_res_get_srq_doit,
+ .dump = nldev_res_get_srq_dumpit,
+ },
+ [RDMA_NLDEV_CMD_SYS_GET] = {
+ .doit = nldev_sys_get_doit,
+ },
+ [RDMA_NLDEV_CMD_SYS_SET] = {
+ .doit = nldev_set_sys_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_STAT_SET] = {
+ .doit = nldev_stat_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_STAT_GET] = {
+ .doit = nldev_stat_get_doit,
+ .dump = nldev_stat_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_STAT_DEL] = {
+ .doit = nldev_stat_del_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = {
+ .doit = nldev_res_get_qp_raw_doit,
+ .dump = nldev_res_get_qp_raw_dumpit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = {
+ .doit = nldev_res_get_cq_raw_doit,
+ .dump = nldev_res_get_cq_raw_dumpit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = {
+ .doit = nldev_res_get_mr_raw_doit,
+ .dump = nldev_res_get_mr_raw_dumpit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = {
+ .doit = nldev_res_get_srq_raw_doit,
+ .dump = nldev_res_get_srq_raw_dumpit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_STAT_GET_STATUS] = {
+ .doit = nldev_stat_get_counter_status_doit,
+ },
+ [RDMA_NLDEV_CMD_NEWDEV] = {
+ .doit = nldev_newdev,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_DELDEV] = {
+ .doit = nldev_deldev,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+};
+
+static int fill_mon_netdev_rename(struct sk_buff *msg,
+ struct ib_device *device, u32 port,
+ const struct net *net)
+{
+ struct net_device *netdev = ib_device_get_netdev(device, port);
+ int ret = 0;
+
+ if (!netdev || !net_eq(dev_net(netdev), net))
+ goto out;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
+ if (ret)
+ goto out;
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
+out:
+ dev_put(netdev);
+ return ret;
+}
+
+static int fill_mon_netdev_association(struct sk_buff *msg,
+ struct ib_device *device, u32 port,
+ const struct net *net)
+{
+ struct net_device *netdev = ib_device_get_netdev(device, port);
+ int ret = 0;
+
+ if (netdev && !net_eq(dev_net(netdev), net))
+ goto out;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index);
+ if (ret)
+ goto out;
+
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
+ dev_name(&device->dev));
+ if (ret)
+ goto out;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port);
+ if (ret)
+ goto out;
+
+ if (netdev) {
+ ret = nla_put_u32(msg,
+ RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
+ if (ret)
+ goto out;
+
+ ret = nla_put_string(msg,
+ RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
+ }
+
+out:
+ dev_put(netdev);
+ return ret;
+}
+
+static void rdma_nl_notify_err_msg(struct ib_device *device, u32 port_num,
+ enum rdma_nl_notify_event_type type)
+{
+ struct net_device *netdev;
+
+ switch (type) {
+ case RDMA_REGISTER_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor register device event\n");
+ break;
+ case RDMA_UNREGISTER_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor unregister device event\n");
+ break;
+ case RDMA_NETDEV_ATTACH_EVENT:
+ netdev = ib_device_get_netdev(device, port_num);
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor netdev attach event: port %d netdev %d\n",
+ port_num, netdev->ifindex);
+ dev_put(netdev);
+ break;
+ case RDMA_NETDEV_DETACH_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor netdev detach event: port %d\n",
+ port_num);
+ break;
+ case RDMA_RENAME_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor rename device event\n");
+ break;
+
+ case RDMA_NETDEV_RENAME_EVENT:
+ netdev = ib_device_get_netdev(device, port_num);
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor netdev rename event: port %d netdev %d\n",
+ port_num, netdev->ifindex);
+ dev_put(netdev);
+ break;
+ default:
+ break;
+ }
+}
+
+int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
+ enum rdma_nl_notify_event_type type)
+{
+ struct sk_buff *skb;
+ int ret = -EMSGSIZE;
+ struct net *net;
+ void *nlh;
+
+ net = read_pnet(&device->coredev.rdma_net);
+ if (!net)
+ return -EINVAL;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ nlh = nlmsg_put(skb, 0, 0,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR),
+ 0, 0);
+ if (!nlh)
+ goto err_free;
+
+ switch (type) {
+ case RDMA_REGISTER_EVENT:
+ case RDMA_UNREGISTER_EVENT:
+ case RDMA_RENAME_EVENT:
+ ret = fill_nldev_handle(skb, device);
+ if (ret)
+ goto err_free;
+ break;
+ case RDMA_NETDEV_ATTACH_EVENT:
+ case RDMA_NETDEV_DETACH_EVENT:
+ ret = fill_mon_netdev_association(skb, device, port_num, net);
+ if (ret)
+ goto err_free;
+ break;
+ case RDMA_NETDEV_RENAME_EVENT:
+ ret = fill_mon_netdev_rename(skb, device, port_num, net);
+ if (ret)
+ goto err_free;
+ break;
+ default:
+ break;
+ }
+
+ ret = nla_put_u8(skb, RDMA_NLDEV_ATTR_EVENT_TYPE, type);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(skb, nlh);
+ ret = rdma_nl_multicast(net, skb, RDMA_NL_GROUP_NOTIFY, GFP_KERNEL);
+ if (ret && ret != -ESRCH) {
+ skb = NULL; /* skb is freed in the netlink send-op handling */
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ rdma_nl_notify_err_msg(device, port_num, type);
+ nlmsg_free(skb);
+ return ret;
+}
+
+void __init nldev_init(void)
+{
+ rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
+}
+
+void nldev_exit(void)
+{
+ rdma_nl_unregister(RDMA_NL_NLDEV);
+}
+
+MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index 3bfab3505a29..64e2822af70f 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -40,11 +40,11 @@
#include "smi.h"
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt);
+ u32 port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- bool is_switch, int port_num);
+ bool is_switch, u32 port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
@@ -55,7 +55,7 @@ static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp,
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
- return (device->process_mad &&
+ return (device->ops.process_mad &&
!opa_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD;
@@ -70,7 +70,7 @@ static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
- return (device->process_mad &&
+ return (device->ops.process_mad &&
opa_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD;
}
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index 1b65986c0be3..19b1ee3279b4 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure)
case 4: return be32_to_cpup((__be32 *) (structure + offset));
case 8: return be64_to_cpup((__be64 *) (structure + offset));
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
return 0;
}
}
@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
if (desc[i].struct_size_bytes)
@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure)
case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
}
}
@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
memcpy(structure + desc[i].struct_offset_bytes,
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
new file mode 100644
index 000000000000..18918f463361
--- /dev/null
+++ b/drivers/infiniband/core/rdma_core.c
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/sched/mm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <linux/rcupdate.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/rdma_user_ioctl.h>
+#include "uverbs.h"
+#include "core_priv.h"
+#include "rdma_core.h"
+
+static void uverbs_uobject_free(struct kref *ref)
+{
+ kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
+}
+
+/*
+ * In order to indicate we no longer needs this uobject, uverbs_uobject_put
+ * is called. When the reference count is decreased, the uobject is freed.
+ * For example, this is used when attaching a completion channel to a CQ.
+ */
+void uverbs_uobject_put(struct ib_uobject *uobject)
+{
+ kref_put(&uobject->ref, uverbs_uobject_free);
+}
+EXPORT_SYMBOL(uverbs_uobject_put);
+
+int uverbs_try_lock_object(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
+{
+ /*
+ * When a shared access is required, we use a positive counter. Each
+ * shared access request checks that the value != -1 and increment it.
+ * Exclusive access is required for operations like write or destroy.
+ * In exclusive access mode, we check that the counter is zero (nobody
+ * claimed this object) and we set it to -1. Releasing a shared access
+ * lock is done simply by decreasing the counter. As for exclusive
+ * access locks, since only a single one of them is allowed
+ * concurrently, setting the counter to zero is enough for releasing
+ * this lock.
+ */
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
+ return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
+ -EBUSY : 0;
+ case UVERBS_LOOKUP_WRITE:
+ /* lock is exclusive */
+ return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
+ case UVERBS_LOOKUP_DESTROY:
+ return 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_try_lock_object);
+
+static void assert_uverbs_usecnt(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
+{
+#ifdef CONFIG_LOCKDEP
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
+ WARN_ON(atomic_read(&uobj->usecnt) <= 0);
+ break;
+ case UVERBS_LOOKUP_WRITE:
+ WARN_ON(atomic_read(&uobj->usecnt) != -1);
+ break;
+ case UVERBS_LOOKUP_DESTROY:
+ break;
+ }
+#endif
+}
+
+/*
+ * This must be called with the hw_destroy_rwsem locked for read or write,
+ * also the uobject itself must be locked for write.
+ *
+ * Upon return the HW object is guaranteed to be destroyed.
+ *
+ * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
+ * however the type's allocat_commit function cannot have been called and the
+ * uobject cannot be on the uobjects_lists
+ *
+ * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via
+ * rdma_lookup_get_uobject) and the object is left in a state where the caller
+ * needs to call rdma_lookup_put_uobject.
+ *
+ * For all other destroy modes this function internally unlocks the uobject
+ * and consumes the kref on the uobj.
+ */
+static int uverbs_destroy_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason reason,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ unsigned long flags;
+ int ret;
+
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+ assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+
+ if (reason == RDMA_REMOVE_ABORT) {
+ WARN_ON(!list_empty(&uobj->list));
+ WARN_ON(!uobj->context);
+ uobj->uapi_object->type_class->alloc_abort(uobj);
+ } else if (uobj->object) {
+ ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
+ attrs);
+ if (ret)
+ /* Nothing to be done, wait till ucontext will clean it */
+ return ret;
+
+ uobj->object = NULL;
+ }
+
+ uobj->context = NULL;
+
+ /*
+ * For DESTROY the usecnt is not changed, the caller is expected to
+ * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
+ * handle.
+ */
+ if (reason != RDMA_REMOVE_DESTROY)
+ atomic_set(&uobj->usecnt, 0);
+ else
+ uobj->uapi_object->type_class->remove_handle(uobj);
+
+ if (!list_empty(&uobj->list)) {
+ spin_lock_irqsave(&ufile->uobjects_lock, flags);
+ list_del_init(&uobj->list);
+ spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
+
+ /*
+ * Pairs with the get in rdma_alloc_commit_uobject(), could
+ * destroy uobj.
+ */
+ uverbs_uobject_put(uobj);
+ }
+
+ /*
+ * When aborting the stack kref remains owned by the core code, and is
+ * not transferred into the type. Pairs with the get in alloc_uobj
+ */
+ if (reason == RDMA_REMOVE_ABORT)
+ uverbs_uobject_put(uobj);
+
+ return 0;
+}
+
+/*
+ * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
+ * sequence. It should only be used from command callbacks. On success the
+ * caller must pair this with uobj_put_destroy(). This
+ * version requires the caller to have already obtained an
+ * LOOKUP_DESTROY uobject kref.
+ */
+int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ int ret;
+
+ down_read(&ufile->hw_destroy_rwsem);
+
+ /*
+ * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
+ * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
+ * This is because any other concurrent thread can still see the object
+ * in the xarray due to RCU. Leaving it locked ensures nothing else will
+ * touch it.
+ */
+ ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
+ if (ret)
+ goto out_unlock;
+
+ ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs);
+ if (ret) {
+ atomic_set(&uobj->usecnt, 0);
+ goto out_unlock;
+ }
+
+out_unlock:
+ up_read(&ufile->hw_destroy_rwsem);
+ return ret;
+}
+
+/*
+ * uobj_get_destroy destroys the HW object and returns a handle to the uobj
+ * with a NULL object pointer. The caller must pair this with
+ * uobj_put_destroy().
+ */
+struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
+ u32 id, struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj;
+ int ret;
+
+ uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
+ UVERBS_LOOKUP_DESTROY, attrs);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ ret = uobj_destroy(uobj, attrs);
+ if (ret) {
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
+ return ERR_PTR(ret);
+ }
+
+ return uobj;
+}
+
+/*
+ * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success
+ * (negative errno on failure). For use by callers that do not need the uobj.
+ */
+int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj;
+
+ uobj = __uobj_get_destroy(obj, id, attrs);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+ uobj_put_destroy(uobj);
+ return 0;
+}
+
+/* alloc_uobj must be undone by uverbs_destroy_uobject() */
+static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs,
+ const struct uverbs_api_object *obj)
+{
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ struct ib_uobject *uobj;
+
+ if (!attrs->context) {
+ struct ib_ucontext *ucontext =
+ ib_uverbs_get_ucontext_file(ufile);
+
+ if (IS_ERR(ucontext))
+ return ERR_CAST(ucontext);
+ attrs->context = ucontext;
+ }
+
+ uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
+ if (!uobj)
+ return ERR_PTR(-ENOMEM);
+ /*
+ * user_handle should be filled by the handler,
+ * The object is added to the list in the commit stage.
+ */
+ uobj->ufile = ufile;
+ uobj->context = attrs->context;
+ INIT_LIST_HEAD(&uobj->list);
+ uobj->uapi_object = obj;
+ /*
+ * Allocated objects start out as write locked to deny any other
+ * syscalls from accessing them until they are committed. See
+ * rdma_alloc_commit_uobject
+ */
+ atomic_set(&uobj->usecnt, -1);
+ kref_init(&uobj->ref);
+
+ return uobj;
+}
+
+static int idr_add_uobj(struct ib_uobject *uobj)
+{
+ /*
+ * We start with allocating an idr pointing to NULL. This represents an
+ * object which isn't initialized yet. We'll replace it later on with
+ * the real object once we commit.
+ */
+ return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b,
+ GFP_KERNEL);
+}
+
+/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
+static struct ib_uobject *
+lookup_get_idr_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
+{
+ struct ib_uobject *uobj;
+
+ if (id < 0 || id > ULONG_MAX)
+ return ERR_PTR(-EINVAL);
+
+ rcu_read_lock();
+ /*
+ * The idr_find is guaranteed to return a pointer to something that
+ * isn't freed yet, or NULL, as the free after idr_remove goes through
+ * kfree_rcu(). However the object may still have been released and
+ * kfree() could be called at any time.
+ */
+ uobj = xa_load(&ufile->idr, id);
+ if (!uobj || !kref_get_unless_zero(&uobj->ref))
+ uobj = ERR_PTR(-ENOENT);
+ rcu_read_unlock();
+ return uobj;
+}
+
+static struct ib_uobject *
+lookup_get_fd_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
+{
+ const struct uverbs_obj_fd_type *fd_type;
+ struct file *f;
+ struct ib_uobject *uobject;
+ int fdno = id;
+
+ if (fdno != id)
+ return ERR_PTR(-EINVAL);
+
+ if (mode != UVERBS_LOOKUP_READ)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!obj->type_attrs)
+ return ERR_PTR(-EIO);
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+
+ f = fget(fdno);
+ if (!f)
+ return ERR_PTR(-EBADF);
+
+ uobject = f->private_data;
+ /*
+ * fget(id) ensures we are not currently running
+ * uverbs_uobject_fd_release(), and the caller is expected to ensure
+ * that release is never done while a call to lookup is possible.
+ */
+ if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
+ fput(f);
+ return ERR_PTR(-EBADF);
+ }
+
+ uverbs_uobject_get(uobject);
+ return uobject;
+}
+
+struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj;
+ int ret;
+
+ if (obj == ERR_PTR(-ENOMSG)) {
+ /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
+ uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
+ if (IS_ERR(uobj))
+ return uobj;
+ } else {
+ if (IS_ERR(obj))
+ return ERR_PTR(-EINVAL);
+
+ uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ if (uobj->uapi_object != obj) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+
+ /*
+ * If we have been disassociated block every command except for
+ * DESTROY based commands.
+ */
+ if (mode != UVERBS_LOOKUP_DESTROY &&
+ !srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu)) {
+ ret = -EIO;
+ goto free;
+ }
+
+ ret = uverbs_try_lock_object(uobj, mode);
+ if (ret)
+ goto free;
+ if (attrs)
+ attrs->context = uobj->context;
+
+ return uobj;
+free:
+ uobj->uapi_object->type_class->lookup_put(uobj, mode);
+ uverbs_uobject_put(uobj);
+ return ERR_PTR(ret);
+}
+
+static struct ib_uobject *
+alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
+ struct uverbs_attr_bundle *attrs)
+{
+ int ret;
+ struct ib_uobject *uobj;
+
+ uobj = alloc_uobj(attrs, obj);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ ret = idr_add_uobj(uobj);
+ if (ret)
+ goto uobj_put;
+
+ ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
+ RDMACG_RESOURCE_HCA_OBJECT);
+ if (ret)
+ goto remove;
+
+ return uobj;
+
+remove:
+ xa_erase(&attrs->ufile->idr, uobj->id);
+uobj_put:
+ uverbs_uobject_put(uobj);
+ return ERR_PTR(ret);
+}
+
+static struct ib_uobject *
+alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
+ struct uverbs_attr_bundle *attrs)
+{
+ const struct uverbs_obj_fd_type *fd_type;
+ int new_fd;
+ struct ib_uobject *uobj, *ret;
+ struct file *filp;
+
+ uobj = alloc_uobj(attrs, obj);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+ if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
+ fd_type->fops->release != &uverbs_async_event_release)) {
+ ret = ERR_PTR(-EINVAL);
+ goto err_fd;
+ }
+
+ new_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (new_fd < 0) {
+ ret = ERR_PTR(new_fd);
+ goto err_fd;
+ }
+
+ /* Note that uverbs_uobject_fd_release() is called during abort */
+ filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
+ fd_type->flags);
+ if (IS_ERR(filp)) {
+ ret = ERR_CAST(filp);
+ goto err_getfile;
+ }
+ uobj->object = filp;
+
+ uobj->id = new_fd;
+ return uobj;
+
+err_getfile:
+ put_unused_fd(new_fd);
+err_fd:
+ uverbs_uobject_put(uobj);
+ return ret;
+}
+
+struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ struct ib_uobject *ret;
+
+ if (IS_ERR(obj))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * The hw_destroy_rwsem is held across the entire object creation and
+ * released during rdma_alloc_commit_uobject or
+ * rdma_alloc_abort_uobject
+ */
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ return ERR_PTR(-EIO);
+
+ ret = obj->type_class->alloc_begin(obj, attrs);
+ if (IS_ERR(ret)) {
+ up_read(&ufile->hw_destroy_rwsem);
+ return ret;
+ }
+ return ret;
+}
+
+static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
+{
+ ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+ RDMACG_RESOURCE_HCA_OBJECT);
+
+ xa_erase(&uobj->ufile->idr, uobj->id);
+}
+
+static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ const struct uverbs_obj_idr_type *idr_type =
+ container_of(uobj->uapi_object->type_attrs,
+ struct uverbs_obj_idr_type, type);
+ int ret = idr_type->destroy_object(uobj, why, attrs);
+
+ if (ret)
+ return ret;
+
+ if (why == RDMA_REMOVE_ABORT)
+ return 0;
+
+ ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+ RDMACG_RESOURCE_HCA_OBJECT);
+
+ return 0;
+}
+
+static void remove_handle_idr_uobject(struct ib_uobject *uobj)
+{
+ xa_erase(&uobj->ufile->idr, uobj->id);
+ /* Matches the kref in alloc_commit_idr_uobject */
+ uverbs_uobject_put(uobj);
+}
+
+static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
+{
+ struct file *filp = uobj->object;
+
+ fput(filp);
+ put_unused_fd(uobj->id);
+}
+
+static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ const struct uverbs_obj_fd_type *fd_type = container_of(
+ uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
+
+ fd_type->destroy_object(uobj, why);
+ return 0;
+}
+
+static void remove_handle_fd_uobject(struct ib_uobject *uobj)
+{
+}
+
+static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
+{
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ void *old;
+
+ /*
+ * We already allocated this IDR with a NULL object, so
+ * this shouldn't fail.
+ *
+ * NOTE: Storing the uobj transfers our kref on uobj to the XArray.
+ * It will be put by remove_commit_idr_uobject()
+ */
+ old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
+ WARN_ON(old != NULL);
+}
+
+static void swap_idr_uobjects(struct ib_uobject *obj_old,
+ struct ib_uobject *obj_new)
+{
+ struct ib_uverbs_file *ufile = obj_old->ufile;
+ void *old;
+
+ /*
+ * New must be an object that been allocated but not yet committed, this
+ * moves the pre-committed state to obj_old, new still must be comitted.
+ */
+ old = xa_cmpxchg(&ufile->idr, obj_old->id, obj_old, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ if (WARN_ON(old != obj_old))
+ return;
+
+ swap(obj_old->id, obj_new->id);
+
+ old = xa_cmpxchg(&ufile->idr, obj_old->id, NULL, obj_old, GFP_KERNEL);
+ WARN_ON(old != NULL);
+}
+
+static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
+{
+ int fd = uobj->id;
+ struct file *filp = uobj->object;
+
+ /* Matching put will be done in uverbs_uobject_fd_release() */
+ kref_get(&uobj->ufile->ref);
+
+ /* This shouldn't be used anymore. Use the file object instead */
+ uobj->id = 0;
+
+ /*
+ * NOTE: Once we install the file we loose ownership of our kref on
+ * uobj. It will be put by uverbs_uobject_fd_release()
+ */
+ filp->private_data = uobj;
+ fd_install(fd, filp);
+}
+
+/*
+ * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
+ * caller can no longer assume uobj is valid. If this function fails it
+ * destroys the uboject, including the attached HW object.
+ */
+void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_file *ufile = attrs->ufile;
+
+ /* kref is held so long as the uobj is on the uobj list. */
+ uverbs_uobject_get(uobj);
+ spin_lock_irq(&ufile->uobjects_lock);
+ list_add(&uobj->list, &ufile->uobjects);
+ spin_unlock_irq(&ufile->uobjects_lock);
+
+ /* matches atomic_set(-1) in alloc_uobj */
+ atomic_set(&uobj->usecnt, 0);
+
+ /* alloc_commit consumes the uobj kref */
+ uobj->uapi_object->type_class->alloc_commit(uobj);
+
+ /* Matches the down_read in rdma_alloc_begin_uobject */
+ up_read(&ufile->hw_destroy_rwsem);
+}
+
+/*
+ * new_uobj will be assigned to the handle currently used by to_uobj, and
+ * to_uobj will be destroyed.
+ *
+ * Upon return the caller must do:
+ * rdma_alloc_commit_uobject(new_uobj)
+ * uobj_put_destroy(to_uobj)
+ *
+ * to_uobj must have a write get but the put mode switches to destroy once
+ * this is called.
+ */
+void rdma_assign_uobject(struct ib_uobject *to_uobj, struct ib_uobject *new_uobj,
+ struct uverbs_attr_bundle *attrs)
+{
+ assert_uverbs_usecnt(new_uobj, UVERBS_LOOKUP_WRITE);
+
+ if (WARN_ON(to_uobj->uapi_object != new_uobj->uapi_object ||
+ !to_uobj->uapi_object->type_class->swap_uobjects))
+ return;
+
+ to_uobj->uapi_object->type_class->swap_uobjects(to_uobj, new_uobj);
+
+ /*
+ * If this fails then the uobject is still completely valid (though with
+ * a new ID) and we leak it until context close.
+ */
+ uverbs_destroy_uobject(to_uobj, RDMA_REMOVE_DESTROY, attrs);
+}
+
+/*
+ * This consumes the kref for uobj. It is up to the caller to unwind the HW
+ * object and anything else connected to uobj before calling this.
+ */
+void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs,
+ bool hw_obj_valid)
+{
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ int ret;
+
+ if (hw_obj_valid) {
+ ret = uobj->uapi_object->type_class->destroy_hw(
+ uobj, RDMA_REMOVE_ABORT, attrs);
+ /*
+ * If the driver couldn't destroy the object then go ahead and
+ * commit it. Leaking objects that can't be destroyed is only
+ * done during FD close after the driver has a few more tries to
+ * destroy it.
+ */
+ if (WARN_ON(ret))
+ return rdma_alloc_commit_uobject(uobj, attrs);
+ }
+
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
+
+ /* Matches the down_read in rdma_alloc_begin_uobject */
+ up_read(&ufile->hw_destroy_rwsem);
+}
+
+static void lookup_put_idr_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
+{
+}
+
+static void lookup_put_fd_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
+{
+ struct file *filp = uobj->object;
+
+ WARN_ON(mode != UVERBS_LOOKUP_READ);
+ /*
+ * This indirectly calls uverbs_uobject_fd_release() and free the
+ * object
+ */
+ fput(filp);
+}
+
+void rdma_lookup_put_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
+{
+ assert_uverbs_usecnt(uobj, mode);
+ /*
+ * In order to unlock an object, either decrease its usecnt for
+ * read access or zero it in case of exclusive access. See
+ * uverbs_try_lock_object for locking schema information.
+ */
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
+ atomic_dec(&uobj->usecnt);
+ break;
+ case UVERBS_LOOKUP_WRITE:
+ atomic_set(&uobj->usecnt, 0);
+ break;
+ case UVERBS_LOOKUP_DESTROY:
+ break;
+ }
+
+ uobj->uapi_object->type_class->lookup_put(uobj, mode);
+ /* Pairs with the kref obtained by type->lookup_get */
+ uverbs_uobject_put(uobj);
+}
+
+void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+{
+ xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC);
+}
+
+void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+{
+ struct ib_uobject *entry;
+ unsigned long id;
+
+ /*
+ * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
+ * there are no HW objects left, however the xarray is still populated
+ * with anything that has not been cleaned up by userspace. Since the
+ * kref on ufile is 0, nothing is allowed to call lookup_get.
+ *
+ * This is an optimized equivalent to remove_handle_idr_uobject
+ */
+ xa_for_each(&ufile->idr, id, entry) {
+ WARN_ON(entry->object);
+ uverbs_uobject_put(entry);
+ }
+
+ xa_destroy(&ufile->idr);
+}
+
+const struct uverbs_obj_type_class uverbs_idr_class = {
+ .alloc_begin = alloc_begin_idr_uobject,
+ .lookup_get = lookup_get_idr_uobject,
+ .alloc_commit = alloc_commit_idr_uobject,
+ .alloc_abort = alloc_abort_idr_uobject,
+ .lookup_put = lookup_put_idr_uobject,
+ .destroy_hw = destroy_hw_idr_uobject,
+ .remove_handle = remove_handle_idr_uobject,
+ .swap_uobjects = swap_idr_uobjects,
+};
+EXPORT_SYMBOL(uverbs_idr_class);
+
+/*
+ * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
+ * file_operations release method.
+ */
+int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
+{
+ struct ib_uverbs_file *ufile;
+ struct ib_uobject *uobj;
+
+ /*
+ * This can only happen if the fput came from alloc_abort_fd_uobject()
+ */
+ if (!filp->private_data)
+ return 0;
+ uobj = filp->private_data;
+ ufile = uobj->ufile;
+
+ if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
+ struct uverbs_attr_bundle attrs = {
+ .context = uobj->context,
+ .ufile = ufile,
+ };
+
+ /*
+ * lookup_get_fd_uobject holds the kref on the struct file any
+ * time a FD uobj is locked, which prevents this release
+ * method from being invoked. Meaning we can always get the
+ * write lock here, or we have a kernel bug.
+ */
+ WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs);
+ up_read(&ufile->hw_destroy_rwsem);
+ }
+
+ /* Matches the get in alloc_commit_fd_uobject() */
+ kref_put(&ufile->ref, ib_uverbs_release_file);
+
+ /* Pairs with filp->private_data in alloc_begin_fd_uobject */
+ uverbs_uobject_put(uobj);
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_uobject_fd_release);
+
+/*
+ * Drop the ucontext off the ufile and completely disconnect it from the
+ * ib_device
+ */
+static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
+{
+ struct ib_ucontext *ucontext = ufile->ucontext;
+ struct ib_device *ib_dev = ucontext->device;
+
+ /*
+ * If we are closing the FD then the user mmap VMAs must have
+ * already been destroyed as they hold on to the filep, otherwise
+ * they need to be zap'd.
+ */
+ if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
+ uverbs_user_mmap_disassociate(ufile);
+ if (ib_dev->ops.disassociate_ucontext)
+ ib_dev->ops.disassociate_ucontext(ucontext);
+ }
+
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
+ RDMACG_RESOURCE_HCA_HANDLE);
+
+ rdma_restrack_del(&ucontext->res);
+
+ ib_dev->ops.dealloc_ucontext(ucontext);
+ WARN_ON(!xa_empty(&ucontext->mmap_xa));
+ kfree(ucontext);
+
+ ufile->ucontext = NULL;
+}
+
+static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
+{
+ struct uverbs_attr_bundle attrs = { .ufile = ufile };
+ struct ib_ucontext *ucontext = ufile->ucontext;
+ struct ib_device *ib_dev = ucontext->device;
+ struct ib_uobject *obj, *next_obj;
+ int ret = -EINVAL;
+
+ if (ib_dev->ops.ufile_hw_cleanup)
+ ib_dev->ops.ufile_hw_cleanup(ufile);
+
+ /*
+ * This shouldn't run while executing other commands on this
+ * context. Thus, the only thing we should take care of is
+ * releasing a FD while traversing this list. The FD could be
+ * closed and released from the _release fop of this FD.
+ * In order to mitigate this, we add a lock.
+ * We take and release the lock per traversal in order to let
+ * other threads (which might still use the FDs) chance to run.
+ */
+ list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
+ attrs.context = obj->context;
+ /*
+ * if we hit this WARN_ON, that means we are
+ * racing with a lookup_get.
+ */
+ WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
+ if (reason == RDMA_REMOVE_DRIVER_FAILURE)
+ obj->object = NULL;
+ if (!uverbs_destroy_uobject(obj, reason, &attrs))
+ ret = 0;
+ else
+ atomic_set(&obj->usecnt, 0);
+ }
+
+ if (reason == RDMA_REMOVE_DRIVER_FAILURE) {
+ WARN_ON(!list_empty(&ufile->uobjects));
+ return 0;
+ }
+ return ret;
+}
+
+/*
+ * Destroy the ucontext and every uobject associated with it.
+ *
+ * This is internally locked and can be called in parallel from multiple
+ * contexts.
+ */
+void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
+{
+ down_write(&ufile->hw_destroy_rwsem);
+
+ /*
+ * If a ucontext was never created then we can't have any uobjects to
+ * cleanup, nothing to do.
+ */
+ if (!ufile->ucontext)
+ goto done;
+
+ while (!list_empty(&ufile->uobjects) &&
+ !__uverbs_cleanup_ufile(ufile, reason)) {
+ }
+
+ if (WARN_ON(!list_empty(&ufile->uobjects)))
+ __uverbs_cleanup_ufile(ufile, RDMA_REMOVE_DRIVER_FAILURE);
+ ufile_destroy_ucontext(ufile, reason);
+
+done:
+ up_write(&ufile->hw_destroy_rwsem);
+}
+
+const struct uverbs_obj_type_class uverbs_fd_class = {
+ .alloc_begin = alloc_begin_fd_uobject,
+ .lookup_get = lookup_get_fd_uobject,
+ .alloc_commit = alloc_commit_fd_uobject,
+ .alloc_abort = alloc_abort_fd_uobject,
+ .lookup_put = lookup_put_fd_uobject,
+ .destroy_hw = destroy_hw_fd_uobject,
+ .remove_handle = remove_handle_fd_uobject,
+};
+EXPORT_SYMBOL(uverbs_fd_class);
+
+struct ib_uobject *
+uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
+ s64 id, struct uverbs_attr_bundle *attrs)
+{
+ const struct uverbs_api_object *obj =
+ uapi_get_object(attrs->ufile->device->uapi, object_id);
+
+ switch (access) {
+ case UVERBS_ACCESS_READ:
+ return rdma_lookup_get_uobject(obj, attrs->ufile, id,
+ UVERBS_LOOKUP_READ, attrs);
+ case UVERBS_ACCESS_DESTROY:
+ /* Actual destruction is done inside uverbs_handle_method */
+ return rdma_lookup_get_uobject(obj, attrs->ufile, id,
+ UVERBS_LOOKUP_DESTROY, attrs);
+ case UVERBS_ACCESS_WRITE:
+ return rdma_lookup_get_uobject(obj, attrs->ufile, id,
+ UVERBS_LOOKUP_WRITE, attrs);
+ case UVERBS_ACCESS_NEW:
+ return rdma_alloc_begin_uobject(obj, attrs);
+ default:
+ WARN_ON(true);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+void uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs)
+{
+ /*
+ * refcounts should be handled at the object level and not at the
+ * uobject level. Refcounts of the objects themselves are done in
+ * handlers.
+ */
+
+ switch (access) {
+ case UVERBS_ACCESS_READ:
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
+ break;
+ case UVERBS_ACCESS_WRITE:
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ break;
+ case UVERBS_ACCESS_DESTROY:
+ if (uobj)
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
+ break;
+ case UVERBS_ACCESS_NEW:
+ if (commit)
+ rdma_alloc_commit_uobject(uobj, attrs);
+ else
+ rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid);
+ break;
+ default:
+ WARN_ON(true);
+ }
+}
+
+/**
+ * rdma_uattrs_has_raw_cap() - Returns whether a rdma device linked to the
+ * uverbs attributes file has CAP_NET_RAW
+ * capability or not.
+ *
+ * @attrs: Pointer to uverbs attributes
+ *
+ * Returns true if a rdma device's owning user namespace has CAP_NET_RAW
+ * capability, otherwise false.
+ */
+bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ struct ib_ucontext *ucontext;
+ bool has_cap = false;
+ int srcu_key;
+
+ srcu_key = srcu_read_lock(&ufile->device->disassociate_srcu);
+ ucontext = ib_uverbs_get_ucontext_file(ufile);
+ if (IS_ERR(ucontext))
+ goto out;
+ has_cap = rdma_dev_has_raw_cap(ucontext->device);
+
+out:
+ srcu_read_unlock(&ufile->device->disassociate_srcu, srcu_key);
+ return has_cap;
+}
+EXPORT_SYMBOL(rdma_uattrs_has_raw_cap);
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
new file mode 100644
index 000000000000..a59b087611cb
--- /dev/null
+++ b/drivers/infiniband/core/rdma_core.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_CORE_H
+#define RDMA_CORE_H
+
+#include <linux/idr.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/ib_verbs.h>
+#include <linux/mutex.h>
+
+struct ib_uverbs_device;
+
+void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason);
+
+int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs);
+
+/*
+ * Get an ib_uobject that corresponds to the given id from ufile, assuming
+ * the object is from the given type. Lock it to the required access when
+ * applicable.
+ * This function could create (access == NEW), destroy (access == DESTROY)
+ * or unlock (access == READ || access == WRITE) objects if required.
+ * The action will be finalized only when uverbs_finalize_object or
+ * uverbs_finalize_objects are called.
+ */
+struct ib_uobject *
+uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
+ s64 id, struct uverbs_attr_bundle *attrs);
+
+void uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs);
+
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
+
+void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+
+struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs);
+
+/*
+ * This is the runtime description of the uverbs API, used by the syscall
+ * machinery to validate and dispatch calls.
+ */
+
+/*
+ * Depending on ID the slot pointer in the radix tree points at one of these
+ * structs.
+ */
+
+struct uverbs_api_ioctl_method {
+ int(__rcu *handler)(struct uverbs_attr_bundle *attrs);
+ DECLARE_BITMAP(attr_mandatory, UVERBS_API_ATTR_BKEY_LEN);
+ u16 bundle_size;
+ u8 use_stack:1;
+ u8 driver_method:1;
+ u8 disabled:1;
+ u8 has_udata:1;
+ u8 key_bitmap_len;
+ u8 destroy_bkey;
+};
+
+struct uverbs_api_write_method {
+ int (*handler)(struct uverbs_attr_bundle *attrs);
+ u8 disabled:1;
+ u8 is_ex:1;
+ u8 has_udata:1;
+ u8 has_resp:1;
+ u8 req_size;
+ u8 resp_size;
+};
+
+struct uverbs_api_attr {
+ struct uverbs_attr_spec spec;
+};
+
+struct uverbs_api {
+ /* radix tree contains struct uverbs_api_* pointers */
+ struct radix_tree_root radix;
+ enum rdma_driver_id driver_id;
+
+ unsigned int num_write;
+ unsigned int num_write_ex;
+ struct uverbs_api_write_method notsupp_method;
+ const struct uverbs_api_write_method **write_methods;
+ const struct uverbs_api_write_method **write_ex_methods;
+};
+
+/*
+ * Get an uverbs_api_object that corresponds to the given object_id.
+ * Note:
+ * -ENOMSG means that any object is allowed to match during lookup.
+ */
+static inline const struct uverbs_api_object *
+uapi_get_object(struct uverbs_api *uapi, u16 object_id)
+{
+ const struct uverbs_api_object *res;
+
+ if (object_id == UVERBS_IDR_ANY_OBJECT)
+ return ERR_PTR(-ENOMSG);
+
+ res = radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id));
+ if (!res)
+ return ERR_PTR(-ENOENT);
+
+ return res;
+}
+
+char *uapi_key_format(char *S, unsigned int key);
+struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev);
+void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev);
+void uverbs_disassociate_api(struct uverbs_api *uapi);
+void uverbs_destroy_api(struct uverbs_api *uapi);
+void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
+ unsigned int num_attrs);
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
+
+extern const struct uapi_definition uverbs_def_obj_async_fd[];
+extern const struct uapi_definition uverbs_def_obj_counters[];
+extern const struct uapi_definition uverbs_def_obj_cq[];
+extern const struct uapi_definition uverbs_def_obj_device[];
+extern const struct uapi_definition uverbs_def_obj_dm[];
+extern const struct uapi_definition uverbs_def_obj_dmah[];
+extern const struct uapi_definition uverbs_def_obj_flow_action[];
+extern const struct uapi_definition uverbs_def_obj_intf[];
+extern const struct uapi_definition uverbs_def_obj_mr[];
+extern const struct uapi_definition uverbs_def_obj_qp[];
+extern const struct uapi_definition uverbs_def_obj_srq[];
+extern const struct uapi_definition uverbs_def_obj_wq[];
+extern const struct uapi_definition uverbs_def_write_intf[];
+
+static inline const struct uverbs_api_write_method *
+uapi_get_method(const struct uverbs_api *uapi, u32 command)
+{
+ u32 cmd_idx = command & IB_USER_VERBS_CMD_COMMAND_MASK;
+
+ if (command & ~(u32)(IB_USER_VERBS_CMD_FLAG_EXTENDED |
+ IB_USER_VERBS_CMD_COMMAND_MASK))
+ return ERR_PTR(-EINVAL);
+
+ if (command & IB_USER_VERBS_CMD_FLAG_EXTENDED) {
+ if (cmd_idx >= uapi->num_write_ex)
+ return ERR_PTR(-EOPNOTSUPP);
+ return uapi->write_ex_methods[cmd_idx];
+ }
+
+ if (cmd_idx >= uapi->num_write)
+ return ERR_PTR(-EOPNOTSUPP);
+ return uapi->write_methods[cmd_idx];
+}
+
+void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
+ struct ib_udata *udata, unsigned int attr_in,
+ unsigned int attr_out);
+
+#endif /* RDMA_CORE_H */
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
new file mode 100644
index 000000000000..a7de6f403fca
--- /dev/null
+++ b/drivers/infiniband/core/restrack.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
+#include <rdma/rdma_counter.h>
+#include <linux/mutex.h>
+#include <linux/sched/task.h>
+#include <linux/pid_namespace.h>
+
+#include "cma_priv.h"
+#include "restrack.h"
+
+/**
+ * rdma_restrack_init() - initialize and allocate resource tracking
+ * @dev: IB device
+ *
+ * Return: 0 on success
+ */
+int rdma_restrack_init(struct ib_device *dev)
+{
+ struct rdma_restrack_root *rt;
+ int i;
+
+ dev->res = kcalloc(RDMA_RESTRACK_MAX, sizeof(*rt), GFP_KERNEL);
+ if (!dev->res)
+ return -ENOMEM;
+
+ rt = dev->res;
+
+ for (i = 0; i < RDMA_RESTRACK_MAX; i++)
+ xa_init_flags(&rt[i].xa, XA_FLAGS_ALLOC);
+
+ return 0;
+}
+
+/**
+ * rdma_restrack_clean() - clean resource tracking
+ * @dev: IB device
+ */
+void rdma_restrack_clean(struct ib_device *dev)
+{
+ struct rdma_restrack_root *rt = dev->res;
+ int i;
+
+ for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
+ struct xarray *xa = &dev->res[i].xa;
+
+ WARN_ON(!xa_empty(xa));
+ xa_destroy(xa);
+ }
+ kfree(rt);
+}
+
+/**
+ * rdma_restrack_count() - the current usage of specific object
+ * @dev: IB device
+ * @type: actual type of object to operate
+ * @show_details: count driver specific objects
+ */
+int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
+ bool show_details)
+{
+ struct rdma_restrack_root *rt = &dev->res[type];
+ struct rdma_restrack_entry *e;
+ XA_STATE(xas, &rt->xa, 0);
+ u32 cnt = 0;
+
+ xa_lock(&rt->xa);
+ xas_for_each(&xas, e, U32_MAX) {
+ if (xa_get_mark(&rt->xa, e->id, RESTRACK_DD) && !show_details)
+ continue;
+ cnt++;
+ }
+ xa_unlock(&rt->xa);
+ return cnt;
+}
+EXPORT_SYMBOL(rdma_restrack_count);
+
+static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
+{
+ switch (res->type) {
+ case RDMA_RESTRACK_PD:
+ return container_of(res, struct ib_pd, res)->device;
+ case RDMA_RESTRACK_CQ:
+ return container_of(res, struct ib_cq, res)->device;
+ case RDMA_RESTRACK_QP:
+ return container_of(res, struct ib_qp, res)->device;
+ case RDMA_RESTRACK_CM_ID:
+ return container_of(res, struct rdma_id_private,
+ res)->id.device;
+ case RDMA_RESTRACK_MR:
+ return container_of(res, struct ib_mr, res)->device;
+ case RDMA_RESTRACK_CTX:
+ return container_of(res, struct ib_ucontext, res)->device;
+ case RDMA_RESTRACK_COUNTER:
+ return container_of(res, struct rdma_counter, res)->device;
+ case RDMA_RESTRACK_SRQ:
+ return container_of(res, struct ib_srq, res)->device;
+ case RDMA_RESTRACK_DMAH:
+ return container_of(res, struct ib_dmah, res)->device;
+ default:
+ WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
+ return NULL;
+ }
+}
+
+/**
+ * rdma_restrack_attach_task() - attach the task onto this resource,
+ * valid for user space restrack entries.
+ * @res: resource entry
+ * @task: the task to attach
+ */
+static void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
+ struct task_struct *task)
+{
+ if (WARN_ON_ONCE(!task))
+ return;
+
+ if (res->task)
+ put_task_struct(res->task);
+ get_task_struct(task);
+ res->task = task;
+ res->user = true;
+}
+
+/**
+ * rdma_restrack_set_name() - set the task for this resource
+ * @res: resource entry
+ * @caller: kernel name, the current task will be used if the caller is NULL.
+ */
+void rdma_restrack_set_name(struct rdma_restrack_entry *res, const char *caller)
+{
+ if (caller) {
+ res->kern_name = caller;
+ return;
+ }
+
+ rdma_restrack_attach_task(res, current);
+}
+EXPORT_SYMBOL(rdma_restrack_set_name);
+
+/**
+ * rdma_restrack_parent_name() - set the restrack name properties based
+ * on parent restrack
+ * @dst: destination resource entry
+ * @parent: parent resource entry
+ */
+void rdma_restrack_parent_name(struct rdma_restrack_entry *dst,
+ const struct rdma_restrack_entry *parent)
+{
+ if (rdma_is_kernel_res(parent))
+ dst->kern_name = parent->kern_name;
+ else
+ rdma_restrack_attach_task(dst, parent->task);
+}
+EXPORT_SYMBOL(rdma_restrack_parent_name);
+
+/**
+ * rdma_restrack_new() - Initializes new restrack entry to allow _put() interface
+ * to release memory in fully automatic way.
+ * @res: Entry to initialize
+ * @type: REstrack type
+ */
+void rdma_restrack_new(struct rdma_restrack_entry *res,
+ enum rdma_restrack_type type)
+{
+ kref_init(&res->kref);
+ init_completion(&res->comp);
+ res->type = type;
+}
+EXPORT_SYMBOL(rdma_restrack_new);
+
+/**
+ * rdma_restrack_add() - add object to the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_add(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev = res_to_dev(res);
+ struct rdma_restrack_root *rt;
+ int ret = 0;
+
+ if (!dev)
+ return;
+
+ if (res->no_track)
+ goto out;
+
+ rt = &dev->res[res->type];
+
+ if (res->type == RDMA_RESTRACK_QP) {
+ /* Special case to ensure that LQPN points to right QP */
+ struct ib_qp *qp = container_of(res, struct ib_qp, res);
+
+ WARN_ONCE(qp->qp_num >> 24 || qp->port >> 8,
+ "QP number 0x%0X and port 0x%0X", qp->qp_num,
+ qp->port);
+ res->id = qp->qp_num;
+ if (qp->qp_type == IB_QPT_SMI || qp->qp_type == IB_QPT_GSI)
+ res->id |= qp->port << 24;
+ ret = xa_insert(&rt->xa, res->id, res, GFP_KERNEL);
+ if (ret)
+ res->id = 0;
+
+ if (qp->qp_type >= IB_QPT_DRIVER)
+ xa_set_mark(&rt->xa, res->id, RESTRACK_DD);
+ } else if (res->type == RDMA_RESTRACK_COUNTER) {
+ /* Special case to ensure that cntn points to right counter */
+ struct rdma_counter *counter;
+
+ counter = container_of(res, struct rdma_counter, res);
+ ret = xa_insert(&rt->xa, counter->id, res, GFP_KERNEL);
+ res->id = ret ? 0 : counter->id;
+ } else {
+ ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
+ &rt->next_id, GFP_KERNEL);
+ ret = (ret < 0) ? ret : 0;
+ }
+
+out:
+ if (!ret)
+ res->valid = true;
+}
+EXPORT_SYMBOL(rdma_restrack_add);
+
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res)
+{
+ return kref_get_unless_zero(&res->kref);
+}
+EXPORT_SYMBOL(rdma_restrack_get);
+
+/**
+ * rdma_restrack_get_byid() - translate from ID to restrack object
+ * @dev: IB device
+ * @type: resource track type
+ * @id: ID to take a look
+ *
+ * Return: Pointer to restrack entry or -ENOENT in case of error.
+ */
+struct rdma_restrack_entry *
+rdma_restrack_get_byid(struct ib_device *dev,
+ enum rdma_restrack_type type, u32 id)
+{
+ struct rdma_restrack_root *rt = &dev->res[type];
+ struct rdma_restrack_entry *res;
+
+ xa_lock(&rt->xa);
+ res = xa_load(&rt->xa, id);
+ if (!res || !rdma_restrack_get(res))
+ res = ERR_PTR(-ENOENT);
+ xa_unlock(&rt->xa);
+
+ return res;
+}
+EXPORT_SYMBOL(rdma_restrack_get_byid);
+
+static void restrack_release(struct kref *kref)
+{
+ struct rdma_restrack_entry *res;
+
+ res = container_of(kref, struct rdma_restrack_entry, kref);
+ if (res->task) {
+ put_task_struct(res->task);
+ res->task = NULL;
+ }
+ complete(&res->comp);
+}
+
+int rdma_restrack_put(struct rdma_restrack_entry *res)
+{
+ return kref_put(&res->kref, restrack_release);
+}
+EXPORT_SYMBOL(rdma_restrack_put);
+
+/**
+ * rdma_restrack_del() - delete object from the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_del(struct rdma_restrack_entry *res)
+{
+ struct rdma_restrack_entry *old;
+ struct rdma_restrack_root *rt;
+ struct ib_device *dev;
+
+ if (!res->valid) {
+ if (res->task) {
+ put_task_struct(res->task);
+ res->task = NULL;
+ }
+ return;
+ }
+
+ if (res->no_track)
+ goto out;
+
+ dev = res_to_dev(res);
+ if (WARN_ON(!dev))
+ return;
+
+ rt = &dev->res[res->type];
+
+ old = xa_erase(&rt->xa, res->id);
+ WARN_ON(old != res);
+
+out:
+ res->valid = false;
+ rdma_restrack_put(res);
+ wait_for_completion(&res->comp);
+}
+EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
new file mode 100644
index 000000000000..6a04fc41f738
--- /dev/null
+++ b/drivers/infiniband/core/restrack.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_CORE_RESTRACK_H_
+#define _RDMA_CORE_RESTRACK_H_
+
+#include <linux/mutex.h>
+
+/**
+ * struct rdma_restrack_root - main resource tracking management
+ * entity, per-device
+ */
+struct rdma_restrack_root {
+ /**
+ * @xa: Array of XArray structure to hold restrack entries.
+ */
+ struct xarray xa;
+ /**
+ * @next_id: Next ID to support cyclic allocation
+ */
+ u32 next_id;
+};
+
+int rdma_restrack_init(struct ib_device *dev);
+void rdma_restrack_clean(struct ib_device *dev);
+void rdma_restrack_add(struct rdma_restrack_entry *res);
+void rdma_restrack_del(struct rdma_restrack_entry *res);
+void rdma_restrack_new(struct rdma_restrack_entry *res,
+ enum rdma_restrack_type type);
+void rdma_restrack_set_name(struct rdma_restrack_entry *res,
+ const char *caller);
+void rdma_restrack_parent_name(struct rdma_restrack_entry *dst,
+ const struct rdma_restrack_entry *parent);
+#endif /* _RDMA_CORE_RESTRACK_H_ */
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 6b24cba1e474..a9f2c6b1b29e 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -42,6 +42,8 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+static struct workqueue_struct *gid_cache_wq;
+
enum gid_op_type {
GID_DEL = 0,
GID_ADD
@@ -67,17 +69,53 @@ struct netdev_event_work {
struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
};
+static const struct {
+ bool (*is_supported)(const struct ib_device *device, u32 port_num);
+ enum ib_gid_type gid_type;
+} PORT_CAP_TO_GID_TYPE[] = {
+ {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
+ {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
+};
+
+#define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
+
+unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port)
+{
+ int i;
+ unsigned int ret_flags = 0;
+
+ if (!rdma_protocol_roce(ib_dev, port))
+ return 1UL << IB_GID_TYPE_IB;
+
+ for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
+ if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
+ ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
+
+ return ret_flags;
+}
+EXPORT_SYMBOL(roce_gid_type_mask_support);
+
static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
- u8 port, union ib_gid *gid,
+ u32 port, union ib_gid *gid,
struct ib_gid_attr *gid_attr)
{
- switch (gid_op) {
- case GID_ADD:
- ib_cache_gid_add(ib_dev, port, gid, gid_attr);
- break;
- case GID_DEL:
- ib_cache_gid_del(ib_dev, port, gid, gid_attr);
- break;
+ int i;
+ unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+
+ for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
+ if ((1UL << i) & gid_type_mask) {
+ gid_attr->gid_type = i;
+ switch (gid_op) {
+ case GID_ADD:
+ ib_cache_gid_add(ib_dev, port,
+ gid, gid_attr);
+ break;
+ case GID_DEL:
+ ib_cache_gid_del(ib_dev, port,
+ gid, gid_attr);
+ break;
+ }
+ }
}
}
@@ -103,36 +141,24 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
return BONDING_SLAVE_STATE_NA;
}
-static bool is_upper_dev_rcu(struct net_device *dev, struct net_device *upper)
-{
- struct net_device *_upper = NULL;
- struct list_head *iter;
-
- netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
- if (_upper == upper)
- break;
-
- return _upper == upper;
-}
-
#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
BONDING_SLAVE_STATE_NA)
-static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool
+is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev, void *cookie)
{
- struct net_device *event_ndev = (struct net_device *)cookie;
struct net_device *real_dev;
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
rcu_read_lock();
- real_dev = rdma_vlan_dev_real_dev(event_ndev);
+ real_dev = rdma_vlan_dev_real_dev(cookie);
if (!real_dev)
- real_dev = event_ndev;
+ real_dev = cookie;
- res = ((is_upper_dev_rcu(rdma_ndev, event_ndev) &&
+ res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
(is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
REQUIRED_BOND_STATES)) ||
real_dev == rdma_ndev);
@@ -141,14 +167,15 @@ static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
return res;
}
-static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool
+is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev, void *cookie)
{
struct net_device *master_dev;
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
rcu_read_lock();
master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
@@ -159,34 +186,102 @@ static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
return res;
}
-static int pass_all_filter(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+/**
+ * is_ndev_for_default_gid_filter - Check if a given netdevice
+ * can be considered for default GIDs or not.
+ * @ib_dev: IB device to check
+ * @port: Port to consider for adding default GID
+ * @rdma_ndev: rdma netdevice pointer
+ * @cookie: Netdevice to consider to form a default GID
+ *
+ * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
+ * considered for deriving default RoCE GID, returns false otherwise.
+ */
+static bool
+is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev, void *cookie)
{
- return 1;
+ struct net_device *cookie_ndev = cookie;
+ bool res;
+
+ if (!rdma_ndev)
+ return false;
+
+ rcu_read_lock();
+
+ /*
+ * When rdma netdevice is used in bonding, bonding master netdevice
+ * should be considered for default GIDs. Therefore, ignore slave rdma
+ * netdevices when bonding is considered.
+ * Additionally when event(cookie) netdevice is bond master device,
+ * make sure that it the upper netdevice of rdma netdevice.
+ */
+ res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
+ (netif_is_bond_master(cookie_ndev) &&
+ rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
+
+ rcu_read_unlock();
+ return res;
}
-static int upper_device_filter(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool pass_all_filter(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ return true;
+}
+
+static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev, void *cookie)
{
- struct net_device *event_ndev = (struct net_device *)cookie;
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
- if (rdma_ndev == event_ndev)
- return 1;
+ if (rdma_ndev == cookie)
+ return true;
rcu_read_lock();
- res = is_upper_dev_rcu(rdma_ndev, event_ndev);
+ res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
rcu_read_unlock();
return res;
}
+/**
+ * is_upper_ndev_bond_master_filter - Check if a given netdevice
+ * is bond master device of netdevice of the RDMA device of port.
+ * @ib_dev: IB device to check
+ * @port: Port to consider for adding default GID
+ * @rdma_ndev: Pointer to rdma netdevice
+ * @cookie: Netdevice to consider to form a default GID
+ *
+ * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
+ * is bond master device and rdma_ndev is its lower netdevice. It might
+ * not have been established as slave device yet.
+ */
+static bool
+is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev,
+ void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ bool match = false;
+
+ if (!rdma_ndev)
+ return false;
+
+ rcu_read_lock();
+ if (netif_is_bond_master(cookie_ndev) &&
+ rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
+ match = true;
+ rcu_read_unlock();
+ return match;
+}
+
static void update_gid_ip(enum gid_op_type gid_op,
struct ib_device *ib_dev,
- u8 port, struct net_device *ndev,
+ u32 port, struct net_device *ndev,
struct sockaddr *addr)
{
union ib_gid gid;
@@ -199,32 +294,13 @@ static void update_gid_ip(enum gid_op_type gid_op,
update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
}
-static void enum_netdev_default_gids(struct ib_device *ib_dev,
- u8 port, struct net_device *event_ndev,
- struct net_device *rdma_ndev)
-{
- rcu_read_lock();
- if (!rdma_ndev ||
- ((rdma_ndev != event_ndev &&
- !is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
- is_eth_active_slave_of_bonding_rcu(rdma_ndev,
- netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
- BONDING_SLAVE_STATE_INACTIVE)) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
-
- ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
- IB_CACHE_GID_DEFAULT_MODE_SET);
-}
-
static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
- u8 port,
- struct net_device *event_ndev,
- struct net_device *rdma_ndev)
+ u32 port,
+ struct net_device *rdma_ndev,
+ struct net_device *event_ndev)
{
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
+ unsigned long gid_type_mask;
if (!rdma_ndev)
return;
@@ -234,45 +310,70 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
rcu_read_lock();
- if (is_upper_dev_rcu(rdma_ndev, event_ndev) &&
- is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
- BONDING_SLAVE_STATE_INACTIVE) {
- rcu_read_unlock();
-
- ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
- IB_CACHE_GID_DEFAULT_MODE_DELETE);
- } else {
+ if (((rdma_ndev != event_ndev &&
+ !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
+ is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
+ ==
+ BONDING_SLAVE_STATE_INACTIVE)) {
rcu_read_unlock();
+ return;
}
+
+ rcu_read_unlock();
+
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+
+ ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
+ gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_DELETE);
}
static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
+ const struct in_ifaddr *ifa;
struct in_device *in_dev;
+ struct sin_list {
+ struct list_head list;
+ struct sockaddr_in ip;
+ };
+ struct sin_list *sin_iter;
+ struct sin_list *sin_temp;
+ LIST_HEAD(sin_list);
if (ndev->reg_state >= NETREG_UNREGISTERING)
return;
- in_dev = in_dev_get(ndev);
- if (!in_dev)
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(ndev);
+ if (!in_dev) {
+ rcu_read_unlock();
return;
+ }
- for_ifa(in_dev) {
- struct sockaddr_in ip;
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- ip.sin_family = AF_INET;
- ip.sin_addr.s_addr = ifa->ifa_address;
- update_gid_ip(GID_ADD, ib_dev, port, ndev,
- (struct sockaddr *)&ip);
+ if (!entry)
+ continue;
+
+ entry->ip.sin_family = AF_INET;
+ entry->ip.sin_addr.s_addr = ifa->ifa_address;
+ list_add_tail(&entry->list, &sin_list);
}
- endfor_ifa(in_dev);
- in_dev_put(in_dev);
+ rcu_read_unlock();
+
+ list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
+ update_gid_ip(GID_ADD, ib_dev, port, ndev,
+ (struct sockaddr *)&sin_iter->ip);
+ list_del(&sin_iter->list);
+ kfree(sin_iter);
+ }
}
static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
struct inet6_ifaddr *ifp;
struct inet6_dev *in6_dev;
@@ -296,10 +397,8 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry) {
- pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
+ if (!entry)
continue;
- }
entry->sin6.sin6_family = AF_INET6;
entry->sin6.sin6_addr = ifp->addr;
@@ -319,7 +418,7 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
}
}
-static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void _add_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *ndev)
{
enum_netdev_ipv4_ips(ib_dev, port, ndev);
@@ -327,25 +426,52 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
enum_netdev_ipv6_ips(ib_dev, port, ndev);
}
-static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void add_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
- struct net_device *event_ndev = (struct net_device *)cookie;
-
- enum_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
- _add_netdev_ips(ib_dev, port, event_ndev);
+ _add_netdev_ips(ib_dev, port, cookie);
}
-static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void del_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
- struct net_device *event_ndev = (struct net_device *)cookie;
+ ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
+}
+
+/**
+ * del_default_gids - Delete default GIDs of the event/cookie netdevice
+ * @ib_dev: RDMA device pointer
+ * @port: Port of the RDMA device whose GID table to consider
+ * @rdma_ndev: Unused rdma netdevice
+ * @cookie: Pointer to event netdevice
+ *
+ * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
+ */
+static void del_default_gids(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ unsigned long gid_type_mask;
+
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+
+ ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_DELETE);
+}
+
+static void add_default_gids(struct ib_device *ib_dev, u32 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = cookie;
+ unsigned long gid_type_mask;
- ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+ ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_SET);
}
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -356,25 +482,62 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
* our feet
*/
rtnl_lock();
+ down_read(&net_rwsem);
for_each_net(net)
- for_each_netdev(net, ndev)
- if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
- add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
+ for_each_netdev(net, ndev) {
+ /*
+ * Filter and add default GIDs of the primary netdevice
+ * when not in bonding mode, or add default GIDs
+ * of bond master device, when in bonding mode.
+ */
+ if (is_ndev_for_default_gid_filter(ib_dev, port,
+ rdma_ndev, ndev))
+ add_default_gids(ib_dev, port, rdma_ndev, ndev);
+
+ if (is_eth_port_of_netdev_filter(ib_dev, port,
+ rdma_ndev, ndev))
+ _add_netdev_ips(ib_dev, port, ndev);
+ }
+ up_read(&net_rwsem);
rtnl_unlock();
}
-/* This function will rescan all of the network devices in the system
- * and add their gids, as needed, to the relevant RoCE devices. */
-int roce_rescan_device(struct ib_device *ib_dev)
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @ib_dev: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ib_dev)
{
ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
enum_all_gids_of_dev_cb, NULL);
+}
+EXPORT_SYMBOL(rdma_roce_rescan_device);
- return 0;
+/**
+ * rdma_roce_rescan_port - Rescan all of the network devices in the system
+ * and add their gids if relevant to the port of the RoCE device.
+ *
+ * @ib_dev: IB device
+ * @port: Port number
+ */
+void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port)
+{
+ struct net_device *ndev = NULL;
+
+ if (rdma_protocol_roce(ib_dev, port)) {
+ ndev = ib_device_get_netdev(ib_dev, port);
+ if (!ndev)
+ return;
+ enum_all_gids_of_dev_cb(ib_dev, port, ndev, ndev);
+ dev_put(ndev);
+ }
}
+EXPORT_SYMBOL(rdma_roce_rescan_port);
static void callback_for_addr_gid_device_scan(struct ib_device *device,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -385,37 +548,42 @@ static void callback_for_addr_gid_device_scan(struct ib_device *device,
&parsed->gid_attr);
}
-static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+struct upper_list {
+ struct list_head list;
+ struct net_device *upper;
+};
+
+static int netdev_upper_walk(struct net_device *upper,
+ struct netdev_nested_priv *priv)
+{
+ struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ struct list_head *upper_list = (struct list_head *)priv->data;
+
+ if (!entry)
+ return 0;
+
+ list_add_tail(&entry->list, upper_list);
+ dev_hold(upper);
+ entry->upper = upper;
+
+ return 0;
+}
+
+static void handle_netdev_upper(struct ib_device *ib_dev, u32 port,
void *cookie,
void (*handle_netdev)(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *ndev))
{
- struct net_device *ndev = (struct net_device *)cookie;
- struct upper_list {
- struct list_head list;
- struct net_device *upper;
- };
- struct net_device *upper;
- struct list_head *iter;
+ struct net_device *ndev = cookie;
+ struct netdev_nested_priv priv;
struct upper_list *upper_iter;
struct upper_list *upper_temp;
LIST_HEAD(upper_list);
+ priv.data = &upper_list;
rcu_read_lock();
- netdev_for_each_all_upper_dev_rcu(ndev, upper, iter) {
- struct upper_list *entry = kmalloc(sizeof(*entry),
- GFP_ATOMIC);
-
- if (!entry) {
- pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
- continue;
- }
-
- list_add_tail(&entry->list, &upper_list);
- dev_hold(upper);
- entry->upper = upper;
- }
+ netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &priv);
rcu_read_unlock();
handle_netdev(ib_dev, port, ndev);
@@ -428,25 +596,26 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
}
}
-static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
- struct net_device *event_ndev)
+void roce_del_all_netdev_gids(struct ib_device *ib_dev,
+ u32 port, struct net_device *ndev)
{
- ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
+ ib_cache_gid_del_all_netdev_gids(ib_dev, port, ndev);
}
+EXPORT_SYMBOL(roce_del_all_netdev_gids);
-static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
- handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
+ handle_netdev_upper(ib_dev, port, cookie, roce_del_all_netdev_gids);
}
-static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
}
-static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
+static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -454,25 +623,16 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
rcu_read_lock();
master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
- if (master_ndev)
- dev_hold(master_ndev);
+ dev_hold(master_ndev);
rcu_read_unlock();
if (master_ndev) {
- bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
- rdma_ndev);
+ bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
+ master_ndev);
dev_put(master_ndev);
}
}
-static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
-{
- struct net_device *event_ndev = (struct net_device *)cookie;
-
- bond_delete_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
-}
-
/* The following functions operate on all IB devices. netdevice_event and
* addr_event execute ib_enum_all_roce_netdevs through a work.
* ib_enum_all_roce_netdevs iterates through all IB devices.
@@ -503,10 +663,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
struct netdev_event_work *ndev_work =
kmalloc(sizeof(*ndev_work), GFP_KERNEL);
- if (!ndev_work) {
- pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
+ if (!ndev_work)
return NOTIFY_DONE;
- }
memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
@@ -519,46 +677,100 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
}
INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
- queue_work(ib_wq, &ndev_work->work);
+ queue_work(gid_cache_wq, &ndev_work->work);
return NOTIFY_DONE;
}
static const struct netdev_event_work_cmd add_cmd = {
- .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
+ .cb = add_netdev_ips,
+ .filter = is_eth_port_of_netdev_filter
+};
+
static const struct netdev_event_work_cmd add_cmd_upper_ips = {
- .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
+ .cb = add_netdev_upper_ips,
+ .filter = is_eth_port_of_netdev_filter
+};
-static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
- struct netdev_event_work_cmd *cmds)
+static void
+ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
{
- static const struct netdev_event_work_cmd upper_ips_del_cmd = {
- .cb = del_netdev_upper_ips, .filter = upper_device_filter};
- static const struct netdev_event_work_cmd bonding_default_del_cmd = {
- .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
-
- if (changeupper_info->linking == false) {
- cmds[0] = upper_ips_del_cmd;
- cmds[0].ndev = changeupper_info->upper_dev;
- cmds[1] = add_cmd;
- } else {
- cmds[0] = bonding_default_del_cmd;
- cmds[0].ndev = changeupper_info->upper_dev;
- cmds[1] = add_cmd_upper_ips;
- cmds[1].ndev = changeupper_info->upper_dev;
- cmds[1].filter_ndev = changeupper_info->upper_dev;
- }
+ static const struct netdev_event_work_cmd
+ upper_ips_del_cmd = {
+ .cb = del_netdev_upper_ips,
+ .filter = upper_device_filter
+ };
+
+ cmds[0] = upper_ips_del_cmd;
+ cmds[0].ndev = changeupper_info->upper_dev;
+ cmds[1] = add_cmd;
+}
+
+static const struct netdev_event_work_cmd bonding_default_add_cmd = {
+ .cb = add_default_gids,
+ .filter = is_upper_ndev_bond_master_filter
+};
+
+static void
+ndev_event_link(struct net_device *event_ndev,
+ struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
+{
+ static const struct netdev_event_work_cmd
+ bonding_default_del_cmd = {
+ .cb = del_default_gids,
+ .filter = is_upper_ndev_bond_master_filter
+ };
+ /*
+ * When a lower netdev is linked to its upper bonding
+ * netdev, delete lower slave netdev's default GIDs.
+ */
+ cmds[0] = bonding_default_del_cmd;
+ cmds[0].ndev = event_ndev;
+ cmds[0].filter_ndev = changeupper_info->upper_dev;
+
+ /* Now add bonding upper device default GIDs */
+ cmds[1] = bonding_default_add_cmd;
+ cmds[1].ndev = changeupper_info->upper_dev;
+ cmds[1].filter_ndev = changeupper_info->upper_dev;
+
+ /* Now add bonding upper device IP based GIDs */
+ cmds[2] = add_cmd_upper_ips;
+ cmds[2].ndev = changeupper_info->upper_dev;
+ cmds[2].filter_ndev = changeupper_info->upper_dev;
}
+static void netdevice_event_changeupper(struct net_device *event_ndev,
+ struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
+{
+ if (changeupper_info->linking)
+ ndev_event_link(event_ndev, changeupper_info, cmds);
+ else
+ ndev_event_unlink(changeupper_info, cmds);
+}
+
+static const struct netdev_event_work_cmd add_default_gid_cmd = {
+ .cb = add_default_gids,
+ .filter = is_ndev_for_default_gid_filter,
+};
+
static int netdevice_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
static const struct netdev_event_work_cmd del_cmd = {
.cb = del_netdev_ips, .filter = pass_all_filter};
- static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
- .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
- static const struct netdev_event_work_cmd default_del_cmd = {
- .cb = del_netdev_default_ips, .filter = pass_all_filter};
+ static const struct netdev_event_work_cmd
+ bonding_default_del_cmd_join = {
+ .cb = del_netdev_default_ips_join,
+ .filter = is_eth_port_inactive_slave_filter
+ };
+ static const struct netdev_event_work_cmd
+ netdev_del_cmd = {
+ .cb = del_netdev_ips,
+ .filter = is_eth_port_of_netdev_filter
+ };
static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
.cb = del_netdev_upper_ips, .filter = upper_device_filter};
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
@@ -571,7 +783,8 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
case NETDEV_REGISTER:
case NETDEV_UP:
cmds[0] = bonding_default_del_cmd_join;
- cmds[1] = add_cmd;
+ cmds[1] = add_default_gid_cmd;
+ cmds[2] = add_cmd;
break;
case NETDEV_UNREGISTER:
@@ -582,19 +795,24 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
break;
case NETDEV_CHANGEADDR:
- cmds[0] = default_del_cmd;
- cmds[1] = add_cmd;
+ cmds[0] = netdev_del_cmd;
+ if (ndev->reg_state == NETREG_REGISTERED) {
+ cmds[1] = add_default_gid_cmd;
+ cmds[2] = add_cmd;
+ }
break;
case NETDEV_CHANGEUPPER:
- netdevice_event_changeupper(
+ netdevice_event_changeupper(ndev,
container_of(ptr, struct netdev_notifier_changeupper_info, info),
cmds);
break;
case NETDEV_BONDING_FAILOVER:
cmds[0] = bonding_event_ips_del_cmd;
- cmds[1] = bonding_default_del_cmd_join;
+ /* Add default GIDs of the bond device */
+ cmds[1] = bonding_default_add_cmd;
+ /* Add IP based GIDs of the bond device */
cmds[2] = add_cmd_upper_ips;
break;
@@ -610,7 +828,8 @@ static void update_gid_event_work_handler(struct work_struct *_work)
struct update_gid_event_work *work =
container_of(_work, struct update_gid_event_work, work);
- ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
+ ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
+ work->gid_attr.ndev,
callback_for_addr_gid_device_scan, work);
dev_put(work->gid_attr.ndev);
@@ -640,10 +859,8 @@ static int addr_event(struct notifier_block *this, unsigned long event,
}
work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
+ if (!work)
return NOTIFY_DONE;
- }
INIT_WORK(&work->work, update_gid_event_work_handler);
@@ -654,7 +871,7 @@ static int addr_event(struct notifier_block *this, unsigned long event,
dev_hold(ndev);
work->gid_attr.ndev = ndev;
- queue_work(ib_wq, &work->work);
+ queue_work(gid_cache_wq, &work->work);
return NOTIFY_DONE;
}
@@ -701,6 +918,10 @@ static struct notifier_block nb_inet6addr = {
int __init roce_gid_mgmt_init(void)
{
+ gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
+ if (!gid_cache_wq)
+ return -ENOMEM;
+
register_inetaddr_notifier(&nb_inetaddr);
if (IS_ENABLED(CONFIG_IPV6))
register_inet6addr_notifier(&nb_inet6addr);
@@ -725,4 +946,5 @@ void __exit roce_gid_mgmt_cleanup(void)
* ib-core is removed, all physical devices have been removed,
* so no issue with remaining hardware contexts.
*/
+ destroy_workqueue(gid_cache_wq);
}
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
new file mode 100644
index 000000000000..6354ddf2a274
--- /dev/null
+++ b/drivers/infiniband/core/rw.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ */
+#include <linux/memremap.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/pci-p2pdma.h>
+#include <rdma/mr_pool.h>
+#include <rdma/rw.h>
+
+enum {
+ RDMA_RW_SINGLE_WR,
+ RDMA_RW_MULTI_WR,
+ RDMA_RW_MR,
+ RDMA_RW_SIG_MR,
+};
+
+static bool rdma_rw_force_mr;
+module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
+MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
+
+/*
+ * Report whether memory registration should be used. Memory registration must
+ * be used for iWarp devices because of iWARP-specific limitations. Memory
+ * registration is also enabled if registering memory might yield better
+ * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
+ */
+static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num)
+{
+ if (rdma_protocol_iwarp(dev, port_num))
+ return true;
+ if (dev->attrs.max_sgl_rd)
+ return true;
+ if (unlikely(rdma_rw_force_mr))
+ return true;
+ return false;
+}
+
+/*
+ * Check if the device will use memory registration for this RW operation.
+ * For RDMA READs we must use MRs on iWarp and can optionally use them as an
+ * optimization otherwise. Additionally we have a debug option to force usage
+ * of MRs to help testing this code path.
+ */
+static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num,
+ enum dma_data_direction dir, int dma_nents)
+{
+ if (dir == DMA_FROM_DEVICE) {
+ if (rdma_protocol_iwarp(dev, port_num))
+ return true;
+ if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
+ return true;
+ }
+ if (unlikely(rdma_rw_force_mr))
+ return true;
+ return false;
+}
+
+static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev,
+ bool pi_support)
+{
+ u32 max_pages;
+
+ if (pi_support)
+ max_pages = dev->attrs.max_pi_fast_reg_page_list_len;
+ else
+ max_pages = dev->attrs.max_fast_reg_page_list_len;
+
+ /* arbitrary limit to avoid allocating gigantic resources */
+ return min_t(u32, max_pages, 256);
+}
+
+static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
+{
+ int count = 0;
+
+ if (reg->mr->need_inval) {
+ reg->inv_wr.opcode = IB_WR_LOCAL_INV;
+ reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
+ reg->inv_wr.next = &reg->reg_wr.wr;
+ count++;
+ } else {
+ reg->inv_wr.next = NULL;
+ }
+
+ return count;
+}
+
+/* Caller must have zero-initialized *reg. */
+static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num,
+ struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
+ u32 sg_cnt, u32 offset)
+{
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
+ qp->integrity_en);
+ u32 nents = min(sg_cnt, pages_per_mr);
+ int count = 0, ret;
+
+ reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
+ if (!reg->mr)
+ return -EAGAIN;
+
+ count += rdma_rw_inv_key(reg);
+
+ ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
+ if (ret < 0 || ret < nents) {
+ ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
+ return -EINVAL;
+ }
+
+ reg->reg_wr.wr.opcode = IB_WR_REG_MR;
+ reg->reg_wr.mr = reg->mr;
+ reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
+ if (rdma_protocol_iwarp(qp->device, port_num))
+ reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
+ count++;
+
+ reg->sge.addr = reg->mr->iova;
+ reg->sge.length = reg->mr->length;
+ return count;
+}
+
+static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ struct rdma_rw_reg_ctx *prev = NULL;
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
+ qp->integrity_en);
+ int i, j, ret = 0, count = 0;
+
+ ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
+ ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
+ if (!ctx->reg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < ctx->nr_ops; i++) {
+ struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
+ u32 nents = min(sg_cnt, pages_per_mr);
+
+ ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
+ offset);
+ if (ret < 0)
+ goto out_free;
+ count += ret;
+
+ if (prev) {
+ if (reg->mr->need_inval)
+ prev->wr.wr.next = &reg->inv_wr;
+ else
+ prev->wr.wr.next = &reg->reg_wr.wr;
+ }
+
+ reg->reg_wr.wr.next = &reg->wr.wr;
+
+ reg->wr.wr.sg_list = &reg->sge;
+ reg->wr.wr.num_sge = 1;
+ reg->wr.remote_addr = remote_addr;
+ reg->wr.rkey = rkey;
+ if (dir == DMA_TO_DEVICE) {
+ reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
+ } else if (!rdma_cap_read_inv(qp->device, port_num)) {
+ reg->wr.wr.opcode = IB_WR_RDMA_READ;
+ } else {
+ reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
+ reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
+ }
+ count++;
+
+ remote_addr += reg->sge.length;
+ sg_cnt -= nents;
+ for (j = 0; j < nents; j++)
+ sg = sg_next(sg);
+ prev = reg;
+ offset = 0;
+ }
+
+ if (prev)
+ prev->wr.wr.next = NULL;
+
+ ctx->type = RDMA_RW_MR;
+ return count;
+
+out_free:
+ while (--i >= 0)
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
+ kfree(ctx->reg);
+out:
+ return ret;
+}
+
+static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ struct scatterlist *sg, u32 sg_cnt, u32 offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
+ qp->max_read_sge;
+ struct ib_sge *sge;
+ u32 total_len = 0, i, j;
+
+ ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
+
+ ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
+ if (!ctx->map.sges)
+ goto out;
+
+ ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
+ if (!ctx->map.wrs)
+ goto out_free_sges;
+
+ for (i = 0; i < ctx->nr_ops; i++) {
+ struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
+ u32 nr_sge = min(sg_cnt, max_sge);
+
+ if (dir == DMA_TO_DEVICE)
+ rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+ else
+ rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+ rdma_wr->remote_addr = remote_addr + total_len;
+ rdma_wr->rkey = rkey;
+ rdma_wr->wr.num_sge = nr_sge;
+ rdma_wr->wr.sg_list = sge;
+
+ for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
+ sge->addr = sg_dma_address(sg) + offset;
+ sge->length = sg_dma_len(sg) - offset;
+ sge->lkey = qp->pd->local_dma_lkey;
+
+ total_len += sge->length;
+ sge++;
+ sg_cnt--;
+ offset = 0;
+ }
+
+ rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
+ &ctx->map.wrs[i + 1].wr : NULL;
+ }
+
+ ctx->type = RDMA_RW_MULTI_WR;
+ return ctx->nr_ops;
+
+out_free_sges:
+ kfree(ctx->map.sges);
+out:
+ return -ENOMEM;
+}
+
+static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
+ enum dma_data_direction dir)
+{
+ struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
+
+ ctx->nr_ops = 1;
+
+ ctx->single.sge.lkey = qp->pd->local_dma_lkey;
+ ctx->single.sge.addr = sg_dma_address(sg) + offset;
+ ctx->single.sge.length = sg_dma_len(sg) - offset;
+
+ memset(rdma_wr, 0, sizeof(*rdma_wr));
+ if (dir == DMA_TO_DEVICE)
+ rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+ else
+ rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+ rdma_wr->wr.sg_list = &ctx->single.sge;
+ rdma_wr->wr.num_sge = 1;
+ rdma_wr->remote_addr = remote_addr;
+ rdma_wr->rkey = rkey;
+
+ ctx->type = RDMA_RW_SINGLE_WR;
+ return 1;
+}
+
+/**
+ * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
+ * @ctx: context to initialize
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist to READ/WRITE from/to
+ * @sg_cnt: number of entries in @sg
+ * @sg_offset: current byte offset into @sg
+ * @remote_addr:remote address to read/write (relative to @rkey)
+ * @rkey: remote key to operate on
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ *
+ * Returns the number of WQEs that will be needed on the workqueue if
+ * successful, or a negative error code.
+ */
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
+ struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ struct ib_device *dev = qp->pd->device;
+ struct sg_table sgt = {
+ .sgl = sg,
+ .orig_nents = sg_cnt,
+ };
+ int ret;
+
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
+ if (ret)
+ return ret;
+ sg_cnt = sgt.nents;
+
+ /*
+ * Skip to the S/G entry that sg_offset falls into:
+ */
+ for (;;) {
+ u32 len = sg_dma_len(sg);
+
+ if (sg_offset < len)
+ break;
+
+ sg = sg_next(sg);
+ sg_offset -= len;
+ sg_cnt--;
+ }
+
+ ret = -EIO;
+ if (WARN_ON_ONCE(sg_cnt == 0))
+ goto out_unmap_sg;
+
+ if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
+ ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
+ sg_offset, remote_addr, rkey, dir);
+ } else if (sg_cnt > 1) {
+ ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
+ remote_addr, rkey, dir);
+ } else {
+ ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
+ remote_addr, rkey, dir);
+ }
+
+ if (ret < 0)
+ goto out_unmap_sg;
+ return ret;
+
+out_unmap_sg:
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_init);
+
+/**
+ * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
+ * @ctx: context to initialize
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist to READ/WRITE from/to
+ * @sg_cnt: number of entries in @sg
+ * @prot_sg: scatterlist to READ/WRITE protection information from/to
+ * @prot_sg_cnt: number of entries in @prot_sg
+ * @sig_attrs: signature offloading algorithms
+ * @remote_addr:remote address to read/write (relative to @rkey)
+ * @rkey: remote key to operate on
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ *
+ * Returns the number of WQEs that will be needed on the workqueue if
+ * successful, or a negative error code.
+ */
+int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ struct ib_sig_attrs *sig_attrs,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ struct ib_device *dev = qp->pd->device;
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
+ qp->integrity_en);
+ struct sg_table sgt = {
+ .sgl = sg,
+ .orig_nents = sg_cnt,
+ };
+ struct sg_table prot_sgt = {
+ .sgl = prot_sg,
+ .orig_nents = prot_sg_cnt,
+ };
+ struct ib_rdma_wr *rdma_wr;
+ int count = 0, ret;
+
+ if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
+ pr_err("SG count too large: sg_cnt=%u, prot_sg_cnt=%u, pages_per_mr=%u\n",
+ sg_cnt, prot_sg_cnt, pages_per_mr);
+ return -EINVAL;
+ }
+
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
+ if (ret)
+ return ret;
+
+ if (prot_sg_cnt) {
+ ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
+ if (ret)
+ goto out_unmap_sg;
+ }
+
+ ctx->type = RDMA_RW_SIG_MR;
+ ctx->nr_ops = 1;
+ ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL);
+ if (!ctx->reg) {
+ ret = -ENOMEM;
+ goto out_unmap_prot_sg;
+ }
+
+ ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs);
+ if (!ctx->reg->mr) {
+ ret = -EAGAIN;
+ goto out_free_ctx;
+ }
+
+ count += rdma_rw_inv_key(ctx->reg);
+
+ memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
+
+ ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg,
+ prot_sgt.nents, NULL, SZ_4K);
+ if (unlikely(ret)) {
+ pr_err("failed to map PI sg (%u)\n",
+ sgt.nents + prot_sgt.nents);
+ goto out_destroy_sig_mr;
+ }
+
+ ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY;
+ ctx->reg->reg_wr.wr.wr_cqe = NULL;
+ ctx->reg->reg_wr.wr.num_sge = 0;
+ ctx->reg->reg_wr.wr.send_flags = 0;
+ ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
+ if (rdma_protocol_iwarp(qp->device, port_num))
+ ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
+ ctx->reg->reg_wr.mr = ctx->reg->mr;
+ ctx->reg->reg_wr.key = ctx->reg->mr->lkey;
+ count++;
+
+ ctx->reg->sge.addr = ctx->reg->mr->iova;
+ ctx->reg->sge.length = ctx->reg->mr->length;
+ if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE)
+ ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length;
+
+ rdma_wr = &ctx->reg->wr;
+ rdma_wr->wr.sg_list = &ctx->reg->sge;
+ rdma_wr->wr.num_sge = 1;
+ rdma_wr->remote_addr = remote_addr;
+ rdma_wr->rkey = rkey;
+ if (dir == DMA_TO_DEVICE)
+ rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+ else
+ rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+ ctx->reg->reg_wr.wr.next = &rdma_wr->wr;
+ count++;
+
+ return count;
+
+out_destroy_sig_mr:
+ ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
+out_free_ctx:
+ kfree(ctx->reg);
+out_unmap_prot_sg:
+ if (prot_sgt.nents)
+ ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
+out_unmap_sg:
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
+
+/*
+ * Now that we are going to post the WRs we can update the lkey and need_inval
+ * state on the MRs. If we were doing this at init time, we would get double
+ * or missing invalidations if a context was initialized but not actually
+ * posted.
+ */
+static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
+{
+ reg->mr->need_inval = need_inval;
+ ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
+ reg->reg_wr.key = reg->mr->lkey;
+ reg->sge.lkey = reg->mr->lkey;
+}
+
+/**
+ * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
+ * @ctx: context to operate on
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @cqe: completion queue entry for the last WR
+ * @chain_wr: WR to append to the posted chain
+ *
+ * Return the WR chain for the set of RDMA READ/WRITE operations described by
+ * @ctx, as well as any memory registration operations needed. If @chain_wr
+ * is non-NULL the WR it points to will be appended to the chain of WRs posted.
+ * If @chain_wr is not set @cqe must be set so that the caller gets a
+ * completion notification.
+ */
+struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct ib_send_wr *first_wr, *last_wr;
+ int i;
+
+ switch (ctx->type) {
+ case RDMA_RW_SIG_MR:
+ case RDMA_RW_MR:
+ for (i = 0; i < ctx->nr_ops; i++) {
+ rdma_rw_update_lkey(&ctx->reg[i],
+ ctx->reg[i].wr.wr.opcode !=
+ IB_WR_RDMA_READ_WITH_INV);
+ }
+
+ if (ctx->reg[0].inv_wr.next)
+ first_wr = &ctx->reg[0].inv_wr;
+ else
+ first_wr = &ctx->reg[0].reg_wr.wr;
+ last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
+ break;
+ case RDMA_RW_MULTI_WR:
+ first_wr = &ctx->map.wrs[0].wr;
+ last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
+ break;
+ case RDMA_RW_SINGLE_WR:
+ first_wr = &ctx->single.wr.wr;
+ last_wr = &ctx->single.wr.wr;
+ break;
+ default:
+ BUG();
+ }
+
+ if (chain_wr) {
+ last_wr->next = chain_wr;
+ } else {
+ last_wr->wr_cqe = cqe;
+ last_wr->send_flags |= IB_SEND_SIGNALED;
+ }
+
+ return first_wr;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_wrs);
+
+/**
+ * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
+ * @ctx: context to operate on
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @cqe: completion queue entry for the last WR
+ * @chain_wr: WR to append to the posted chain
+ *
+ * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
+ * any memory registration operations needed. If @chain_wr is non-NULL the
+ * WR it points to will be appended to the chain of WRs posted. If @chain_wr
+ * is not set @cqe must be set so that the caller gets a completion
+ * notification.
+ */
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct ib_send_wr *first_wr;
+
+ first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
+ return ib_post_send(qp, first_wr, NULL);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_post);
+
+/**
+ * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
+ * @ctx: context to release
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist that was used for the READ/WRITE
+ * @sg_cnt: number of entries in @sg
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ */
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ switch (ctx->type) {
+ case RDMA_RW_MR:
+ for (i = 0; i < ctx->nr_ops; i++)
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
+ kfree(ctx->reg);
+ break;
+ case RDMA_RW_MULTI_WR:
+ kfree(ctx->map.wrs);
+ kfree(ctx->map.sges);
+ break;
+ case RDMA_RW_SINGLE_WR:
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_destroy);
+
+/**
+ * rdma_rw_ctx_destroy_signature - release all resources allocated by
+ * rdma_rw_ctx_signature_init
+ * @ctx: context to release
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist that was used for the READ/WRITE
+ * @sg_cnt: number of entries in @sg
+ * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
+ * @prot_sg_cnt: number of entries in @prot_sg
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ */
+void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ enum dma_data_direction dir)
+{
+ if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
+ return;
+
+ ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
+ kfree(ctx->reg);
+
+ if (prot_sg_cnt)
+ ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
+
+/**
+ * rdma_rw_mr_factor - return number of MRs required for a payload
+ * @device: device handling the connection
+ * @port_num: port num to which the connection is bound
+ * @maxpages: maximum payload pages per rdma_rw_ctx
+ *
+ * Returns the number of MRs the device requires to move @maxpayload
+ * bytes. The returned value is used during transport creation to
+ * compute max_rdma_ctxts and the size of the transport's Send and
+ * Send Completion Queues.
+ */
+unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
+ unsigned int maxpages)
+{
+ unsigned int mr_pages;
+
+ if (rdma_rw_can_use_mr(device, port_num))
+ mr_pages = rdma_rw_fr_page_list_len(device, false);
+ else
+ mr_pages = device->attrs.max_sge_rd;
+ return DIV_ROUND_UP(maxpages, mr_pages);
+}
+EXPORT_SYMBOL(rdma_rw_mr_factor);
+
+void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
+{
+ u32 factor;
+
+ WARN_ON_ONCE(attr->port_num == 0);
+
+ /*
+ * Each context needs at least one RDMA READ or WRITE WR.
+ *
+ * For some hardware we might need more, eventually we should ask the
+ * HCA driver for a multiplier here.
+ */
+ factor = 1;
+
+ /*
+ * If the device needs MRs to perform RDMA READ or WRITE operations,
+ * we'll need two additional MRs for the registrations and the
+ * invalidation.
+ */
+ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN ||
+ rdma_rw_can_use_mr(dev, attr->port_num))
+ factor += 2; /* inv + reg */
+
+ attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
+
+ /*
+ * But maybe we were just too high in the sky and the device doesn't
+ * even support all we need, and we'll have to live with what we get..
+ */
+ attr->cap.max_send_wr =
+ min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
+}
+
+int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
+{
+ struct ib_device *dev = qp->pd->device;
+ u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0;
+ int ret = 0;
+
+ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) {
+ nr_sig_mrs = attr->cap.max_rdma_ctxs;
+ nr_mrs = attr->cap.max_rdma_ctxs;
+ max_num_sg = rdma_rw_fr_page_list_len(dev, true);
+ } else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
+ nr_mrs = attr->cap.max_rdma_ctxs;
+ max_num_sg = rdma_rw_fr_page_list_len(dev, false);
+ }
+
+ if (nr_mrs) {
+ ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
+ IB_MR_TYPE_MEM_REG,
+ max_num_sg, 0);
+ if (ret) {
+ pr_err("%s: failed to allocated %u MRs\n",
+ __func__, nr_mrs);
+ return ret;
+ }
+ }
+
+ if (nr_sig_mrs) {
+ ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
+ IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg);
+ if (ret) {
+ pr_err("%s: failed to allocated %u SIG MRs\n",
+ __func__, nr_sig_mrs);
+ goto out_free_rdma_mrs;
+ }
+ }
+
+ return 0;
+
+out_free_rdma_mrs:
+ ib_mr_pool_destroy(qp, &qp->rdma_mrs);
+ return ret;
+}
+
+void rdma_rw_cleanup_mrs(struct ib_qp *qp)
+{
+ ib_mr_pool_destroy(qp, &qp->sig_mrs);
+ ib_mr_pool_destroy(qp, &qp->rdma_mrs);
+}
diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h
index b1d4bbf4ce5c..143de37ae598 100644
--- a/drivers/infiniband/core/sa.h
+++ b/drivers/infiniband/core/sa.h
@@ -49,16 +49,14 @@ static inline void ib_sa_client_put(struct ib_sa_client *client)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- u8 method,
+ struct ib_device *device, u32 port_num, u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ void *context, struct ib_sa_query **sa_query);
int mcast_init(void);
void mcast_cleanup(void);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8c014b33d8e0..c23e9c847314 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/random.h>
@@ -40,7 +39,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/kref.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/workqueue.h>
#include <uapi/linux/if_ether.h>
#include <rdma/ib_pack.h>
@@ -49,15 +48,17 @@
#include <net/netlink.h>
#include <uapi/rdma/ib_user_sa.h>
#include <rdma/ib_marshall.h>
+#include <rdma/ib_addr.h>
+#include <rdma/opa_addr.h>
+#include <rdma/rdma_cm.h>
#include "sa.h"
-
-MODULE_AUTHOR("Roland Dreier");
-MODULE_DESCRIPTION("InfiniBand subnet administration query support");
-MODULE_LICENSE("Dual BSD/GPL");
+#include "core_priv.h"
#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
+#define IB_SA_CPI_MAX_RETRY_CNT 3
+#define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
struct ib_sa_sm_ah {
@@ -67,22 +68,47 @@ struct ib_sa_sm_ah {
u8 src_path_mask;
};
+enum rdma_class_port_info_type {
+ RDMA_CLASS_PORT_INFO_IB,
+ RDMA_CLASS_PORT_INFO_OPA
+};
+
+struct rdma_class_port_info {
+ enum rdma_class_port_info_type type;
+ union {
+ struct ib_class_port_info ib;
+ struct opa_class_port_info opa;
+ };
+};
+
+struct ib_sa_classport_cache {
+ bool valid;
+ int retry_cnt;
+ struct rdma_class_port_info data;
+};
+
struct ib_sa_port {
struct ib_mad_agent *agent;
struct ib_sa_sm_ah *sm_ah;
struct work_struct update_task;
+ struct ib_sa_classport_cache classport_info;
+ struct delayed_work ib_cpi_work;
+ spinlock_t classport_lock; /* protects class port info set */
spinlock_t ah_lock;
- u8 port_num;
+ u32 port_num;
};
struct ib_sa_device {
int start_port, end_port;
struct ib_event_handler event_handler;
- struct ib_sa_port port[0];
+ struct ib_sa_port port[];
};
struct ib_sa_query {
- void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
+ void (*callback)(struct ib_sa_query *sa_query, int status,
+ struct ib_sa_mad *mad);
+ void (*rmpp_callback)(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -98,21 +124,24 @@ struct ib_sa_query {
#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
#define IB_SA_CANCEL 0x00000002
+#define IB_SA_QUERY_OPA 0x00000004
-struct ib_sa_service_query {
- void (*callback)(int, struct ib_sa_service_rec *, void *);
+struct ib_sa_path_query {
+ void (*callback)(int status, struct sa_path_rec *rec,
+ unsigned int num_paths, void *context);
void *context;
struct ib_sa_query sa_query;
+ struct sa_path_rec *conv_pr;
};
-struct ib_sa_path_query {
- void (*callback)(int, struct ib_sa_path_rec *, void *);
+struct ib_sa_guidinfo_query {
+ void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
void *context;
struct ib_sa_query sa_query;
};
-struct ib_sa_guidinfo_query {
- void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
+struct ib_sa_classport_info_query {
+ void (*callback)(void *);
void *context;
struct ib_sa_query sa_query;
};
@@ -123,6 +152,13 @@ struct ib_sa_mcmember_query {
struct ib_sa_query sa_query;
};
+struct ib_sa_service_query {
+ void (*callback)(int status, struct sa_service_rec *rec,
+ unsigned int num_services, void *context);
+ void *context;
+ struct ib_sa_query sa_query;
+};
+
static LIST_HEAD(ib_nl_request_list);
static DEFINE_SPINLOCK(ib_nl_request_lock);
static atomic_t ib_nl_sa_request_seq;
@@ -143,7 +179,7 @@ static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
};
-static void ib_sa_add_one(struct ib_device *device);
+static int ib_sa_add_one(struct ib_device *device);
static void ib_sa_remove_one(struct ib_device *device, void *client_data);
static struct ib_client sa_client = {
@@ -152,15 +188,14 @@ static struct ib_client sa_client = {
.remove = ib_sa_remove_one
};
-static DEFINE_SPINLOCK(idr_lock);
-static DEFINE_IDR(query_idr);
+static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
static DEFINE_SPINLOCK(tid_lock);
static u32 tid;
#define PATH_REC_FIELD(field) \
- .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
+ .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
+ .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
.field_name = "sa_path_rec:" #field
static const struct ib_field path_rec_table[] = {
@@ -176,15 +211,15 @@ static const struct ib_field path_rec_table[] = {
.offset_words = 6,
.offset_bits = 0,
.size_bits = 128 },
- { PATH_REC_FIELD(dlid),
+ { PATH_REC_FIELD(ib.dlid),
.offset_words = 10,
.offset_bits = 0,
.size_bits = 16 },
- { PATH_REC_FIELD(slid),
+ { PATH_REC_FIELD(ib.slid),
.offset_words = 10,
.offset_bits = 16,
.size_bits = 16 },
- { PATH_REC_FIELD(raw_traffic),
+ { PATH_REC_FIELD(ib.raw_traffic),
.offset_words = 11,
.offset_bits = 0,
.size_bits = 1 },
@@ -258,9 +293,139 @@ static const struct ib_field path_rec_table[] = {
.size_bits = 48 },
};
+#define OPA_PATH_REC_FIELD(field) \
+ .struct_offset_bytes = \
+ offsetof(struct sa_path_rec, field), \
+ .struct_size_bytes = \
+ sizeof_field(struct sa_path_rec, field), \
+ .field_name = "sa_path_rec:" #field
+
+static const struct ib_field opa_path_rec_table[] = {
+ { OPA_PATH_REC_FIELD(service_id),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 64 },
+ { OPA_PATH_REC_FIELD(dgid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { OPA_PATH_REC_FIELD(sgid),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { OPA_PATH_REC_FIELD(opa.dlid),
+ .offset_words = 10,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { OPA_PATH_REC_FIELD(opa.slid),
+ .offset_words = 11,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { OPA_PATH_REC_FIELD(opa.raw_traffic),
+ .offset_words = 12,
+ .offset_bits = 0,
+ .size_bits = 1 },
+ { RESERVED,
+ .offset_words = 12,
+ .offset_bits = 1,
+ .size_bits = 3 },
+ { OPA_PATH_REC_FIELD(flow_label),
+ .offset_words = 12,
+ .offset_bits = 4,
+ .size_bits = 20 },
+ { OPA_PATH_REC_FIELD(hop_limit),
+ .offset_words = 12,
+ .offset_bits = 24,
+ .size_bits = 8 },
+ { OPA_PATH_REC_FIELD(traffic_class),
+ .offset_words = 13,
+ .offset_bits = 0,
+ .size_bits = 8 },
+ { OPA_PATH_REC_FIELD(reversible),
+ .offset_words = 13,
+ .offset_bits = 8,
+ .size_bits = 1 },
+ { OPA_PATH_REC_FIELD(numb_path),
+ .offset_words = 13,
+ .offset_bits = 9,
+ .size_bits = 7 },
+ { OPA_PATH_REC_FIELD(pkey),
+ .offset_words = 13,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { OPA_PATH_REC_FIELD(opa.l2_8B),
+ .offset_words = 14,
+ .offset_bits = 0,
+ .size_bits = 1 },
+ { OPA_PATH_REC_FIELD(opa.l2_10B),
+ .offset_words = 14,
+ .offset_bits = 1,
+ .size_bits = 1 },
+ { OPA_PATH_REC_FIELD(opa.l2_9B),
+ .offset_words = 14,
+ .offset_bits = 2,
+ .size_bits = 1 },
+ { OPA_PATH_REC_FIELD(opa.l2_16B),
+ .offset_words = 14,
+ .offset_bits = 3,
+ .size_bits = 1 },
+ { RESERVED,
+ .offset_words = 14,
+ .offset_bits = 4,
+ .size_bits = 2 },
+ { OPA_PATH_REC_FIELD(opa.qos_type),
+ .offset_words = 14,
+ .offset_bits = 6,
+ .size_bits = 2 },
+ { OPA_PATH_REC_FIELD(opa.qos_priority),
+ .offset_words = 14,
+ .offset_bits = 8,
+ .size_bits = 8 },
+ { RESERVED,
+ .offset_words = 14,
+ .offset_bits = 16,
+ .size_bits = 3 },
+ { OPA_PATH_REC_FIELD(sl),
+ .offset_words = 14,
+ .offset_bits = 19,
+ .size_bits = 5 },
+ { RESERVED,
+ .offset_words = 14,
+ .offset_bits = 24,
+ .size_bits = 8 },
+ { OPA_PATH_REC_FIELD(mtu_selector),
+ .offset_words = 15,
+ .offset_bits = 0,
+ .size_bits = 2 },
+ { OPA_PATH_REC_FIELD(mtu),
+ .offset_words = 15,
+ .offset_bits = 2,
+ .size_bits = 6 },
+ { OPA_PATH_REC_FIELD(rate_selector),
+ .offset_words = 15,
+ .offset_bits = 8,
+ .size_bits = 2 },
+ { OPA_PATH_REC_FIELD(rate),
+ .offset_words = 15,
+ .offset_bits = 10,
+ .size_bits = 6 },
+ { OPA_PATH_REC_FIELD(packet_life_time_selector),
+ .offset_words = 15,
+ .offset_bits = 16,
+ .size_bits = 2 },
+ { OPA_PATH_REC_FIELD(packet_life_time),
+ .offset_words = 15,
+ .offset_bits = 18,
+ .size_bits = 6 },
+ { OPA_PATH_REC_FIELD(preference),
+ .offset_words = 15,
+ .offset_bits = 24,
+ .size_bits = 8 },
+};
+
#define MCMEMBER_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
.field_name = "sa_mcmember_rec:" #field
static const struct ib_field mcmember_rec_table[] = {
@@ -342,57 +507,167 @@ static const struct ib_field mcmember_rec_table[] = {
.size_bits = 23 },
};
-#define SERVICE_REC_FIELD(field) \
- .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
- .field_name = "sa_service_rec:" #field
+#define CLASSPORTINFO_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
+ .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
+ .field_name = "ib_class_port_info:" #field
-static const struct ib_field service_rec_table[] = {
- { SERVICE_REC_FIELD(id),
+static const struct ib_field ib_classport_info_rec_table[] = {
+ { CLASSPORTINFO_REC_FIELD(base_version),
.offset_words = 0,
.offset_bits = 0,
- .size_bits = 64 },
- { SERVICE_REC_FIELD(gid),
+ .size_bits = 8 },
+ { CLASSPORTINFO_REC_FIELD(class_version),
+ .offset_words = 0,
+ .offset_bits = 8,
+ .size_bits = 8 },
+ { CLASSPORTINFO_REC_FIELD(capability_mask),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_gid),
.offset_words = 2,
.offset_bits = 0,
.size_bits = 128 },
- { SERVICE_REC_FIELD(pkey),
+ { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
.offset_words = 6,
.offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_lid),
+ .offset_words = 7,
+ .offset_bits = 0,
.size_bits = 16 },
- { SERVICE_REC_FIELD(lease),
+ { CLASSPORTINFO_REC_FIELD(redirect_pkey),
.offset_words = 7,
+ .offset_bits = 16,
+ .size_bits = 16 },
+
+ { CLASSPORTINFO_REC_FIELD(redirect_qp),
+ .offset_words = 8,
.offset_bits = 0,
.size_bits = 32 },
- { SERVICE_REC_FIELD(key),
+ { CLASSPORTINFO_REC_FIELD(redirect_qkey),
+ .offset_words = 9,
+ .offset_bits = 0,
+ .size_bits = 32 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_gid),
+ .offset_words = 10,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
+ .offset_words = 14,
+ .offset_bits = 0,
+ .size_bits = 32 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_lid),
+ .offset_words = 15,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(trap_pkey),
+ .offset_words = 15,
+ .offset_bits = 16,
+ .size_bits = 16 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_hlqp),
+ .offset_words = 16,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(trap_qkey),
+ .offset_words = 17,
+ .offset_bits = 0,
+ .size_bits = 32 },
+};
+
+#define OPA_CLASSPORTINFO_REC_FIELD(field) \
+ .struct_offset_bytes =\
+ offsetof(struct opa_class_port_info, field), \
+ .struct_size_bytes = \
+ sizeof_field(struct opa_class_port_info, field), \
+ .field_name = "opa_class_port_info:" #field
+
+static const struct ib_field opa_classport_info_rec_table[] = {
+ { OPA_CLASSPORTINFO_REC_FIELD(base_version),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 8 },
+ { OPA_CLASSPORTINFO_REC_FIELD(class_version),
+ .offset_words = 0,
+ .offset_bits = 8,
+ .size_bits = 8 },
+ { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
+ .offset_words = 7,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
.offset_words = 8,
.offset_bits = 0,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
+ .offset_words = 9,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
+ .offset_words = 10,
+ .offset_bits = 0,
.size_bits = 128 },
- { SERVICE_REC_FIELD(name),
- .offset_words = 12,
+ { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
+ .offset_words = 14,
.offset_bits = 0,
- .size_bits = 64*8 },
- { SERVICE_REC_FIELD(data8),
- .offset_words = 28,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
+ .offset_words = 15,
.offset_bits = 0,
- .size_bits = 16*8 },
- { SERVICE_REC_FIELD(data16),
- .offset_words = 32,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
+ .offset_words = 16,
.offset_bits = 0,
- .size_bits = 8*16 },
- { SERVICE_REC_FIELD(data32),
- .offset_words = 36,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
+ .offset_words = 17,
.offset_bits = 0,
- .size_bits = 4*32 },
- { SERVICE_REC_FIELD(data64),
- .offset_words = 40,
+ .size_bits = 32 },
+ { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
+ .offset_words = 18,
.offset_bits = 0,
- .size_bits = 2*64 },
+ .size_bits = 16 },
+ { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
+ .offset_words = 18,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
+ .offset_words = 19,
+ .offset_bits = 0,
+ .size_bits = 8 },
+ { RESERVED,
+ .offset_words = 19,
+ .offset_bits = 8,
+ .size_bits = 24 },
};
#define GUIDINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
- .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
.field_name = "sa_guidinfo_rec:" #field
static const struct ib_field guidinfo_rec_table[] = {
@@ -418,6 +693,60 @@ static const struct ib_field guidinfo_rec_table[] = {
.size_bits = 512 },
};
+#define SERVICE_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct sa_service_rec, field), \
+ .struct_size_bytes = sizeof_field(struct sa_service_rec, field), \
+ .field_name = "sa_service_rec:" #field
+
+static const struct ib_field service_rec_table[] = {
+ { SERVICE_REC_FIELD(id),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 64 },
+ { SERVICE_REC_FIELD(gid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(pkey),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { RESERVED,
+ .offset_words = 6,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { SERVICE_REC_FIELD(lease),
+ .offset_words = 7,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { SERVICE_REC_FIELD(key),
+ .offset_words = 8,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(name),
+ .offset_words = 12,
+ .offset_bits = 0,
+ .size_bits = 512 },
+ { SERVICE_REC_FIELD(data_8),
+ .offset_words = 28,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_16),
+ .offset_words = 32,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_32),
+ .offset_words = 36,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_64),
+ .offset_words = 40,
+ .offset_bits = 0,
+ .size_bits = 128 },
+};
+
+#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
+
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
{
query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
@@ -431,7 +760,7 @@ static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
struct ib_sa_query *query)
{
- struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
+ struct sa_path_rec *sa_rec = query->mad_buf->context[1];
struct ib_sa_mad *mad = query->mad_buf->mad;
ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
u16 val16;
@@ -441,15 +770,15 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
query->mad_buf->context[1] = NULL;
/* Construct the family header first */
- header = (struct rdma_ls_resolve_header *)
- skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
- memcpy(header->device_name, query->port->agent->device->name,
- LS_DEVICE_NAME_MAX);
+ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+ strscpy_pad(header->device_name,
+ dev_name(&query->port->agent->device->dev),
+ LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
sa_rec->reversible != 0)
- query->path_use = LS_RESOLVE_PATH_USE_GMP;
+ query->path_use = LS_RESOLVE_PATH_USE_ALL;
else
query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
header->path_use = query->path_use;
@@ -512,21 +841,27 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
return len;
}
-static int ib_nl_send_msg(struct ib_sa_query *query)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
void *data;
- int ret = 0;
struct ib_sa_mad *mad;
int len;
+ unsigned long flags;
+ unsigned long delay;
+ gfp_t gfp_flag;
+ int ret;
+
+ INIT_LIST_HEAD(&query->list);
+ query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
mad = query->mad_buf->mad;
len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
if (len <= 0)
return -EMSGSIZE;
- skb = nlmsg_new(len, GFP_KERNEL);
+ skb = nlmsg_new(len, gfp_mask);
if (!skb)
return -ENOMEM;
@@ -534,7 +869,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
if (!data) {
- kfree_skb(skb);
+ nlmsg_free(skb);
return -EMSGSIZE;
}
@@ -544,33 +879,16 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
- if (!ret)
- ret = len;
- else
- ret = 0;
-
- return ret;
-}
-
-static int ib_nl_make_request(struct ib_sa_query *query)
-{
- unsigned long flags;
- unsigned long delay;
- int ret;
-
- INIT_LIST_HEAD(&query->list);
- query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+ gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
+ GFP_NOWAIT;
spin_lock_irqsave(&ib_nl_request_lock, flags);
- ret = ib_nl_send_msg(query);
- if (ret <= 0) {
- ret = -EIO;
- goto request_out;
- } else {
- ret = 0;
- }
+ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
+
+ if (ret)
+ goto out;
+ /* Put the request on the list.*/
delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
query->timeout = delay + jiffies;
list_add_tail(&query->list, &ib_nl_request_list);
@@ -578,7 +896,7 @@ static int ib_nl_make_request(struct ib_sa_query *query)
if (ib_nl_request_list.next == &query->list)
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
-request_out:
+out:
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
return ret;
@@ -613,50 +931,77 @@ static void send_handler(struct ib_mad_agent *agent,
static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
const struct nlmsghdr *nlh)
{
+ struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
+ struct ib_sa_path_query *path_query;
+ struct ib_path_rec_data *rec_data;
struct ib_mad_send_wc mad_send_wc;
- struct ib_sa_mad *mad = NULL;
const struct nlattr *head, *curr;
- struct ib_path_rec_data *rec;
- int len, rem;
+ struct ib_sa_mad *mad = NULL;
+ int len, rem, status = -EIO;
+ unsigned int num_prs = 0;
u32 mask = 0;
- int status = -EIO;
-
- if (query->callback) {
- head = (const struct nlattr *) nlmsg_data(nlh);
- len = nlmsg_len(nlh);
- switch (query->path_use) {
- case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
- mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
- break;
- case LS_RESOLVE_PATH_USE_ALL:
- case LS_RESOLVE_PATH_USE_GMP:
- default:
- mask = IB_PATH_PRIMARY | IB_PATH_GMP |
- IB_PATH_BIDIRECTIONAL;
- break;
- }
- nla_for_each_attr(curr, head, len, rem) {
- if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
- rec = nla_data(curr);
- /*
- * Get the first one. In the future, we may
- * need to get up to 6 pathrecords.
- */
- if ((rec->flags & mask) == mask) {
- mad = query->mad_buf->mad;
- mad->mad_hdr.method |=
- IB_MGMT_METHOD_RESP;
- memcpy(mad->data, rec->path_rec,
- sizeof(rec->path_rec));
- status = 0;
- break;
- }
- }
+ if (!query->callback)
+ goto out;
+
+ path_query = container_of(query, struct ib_sa_path_query, sa_query);
+ mad = query->mad_buf->mad;
+
+ head = (const struct nlattr *) nlmsg_data(nlh);
+ len = nlmsg_len(nlh);
+ switch (query->path_use) {
+ case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
+ mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
+ break;
+
+ case LS_RESOLVE_PATH_USE_ALL:
+ mask = IB_PATH_PRIMARY;
+ break;
+
+ case LS_RESOLVE_PATH_USE_GMP:
+ default:
+ mask = IB_PATH_PRIMARY | IB_PATH_GMP |
+ IB_PATH_BIDIRECTIONAL;
+ break;
+ }
+
+ nla_for_each_attr(curr, head, len, rem) {
+ if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
+ continue;
+
+ rec_data = nla_data(curr);
+ if ((rec_data->flags & mask) != mask)
+ continue;
+
+ if ((query->flags & IB_SA_QUERY_OPA) ||
+ path_query->conv_pr) {
+ mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
+ memcpy(mad->data, rec_data->path_rec,
+ sizeof(rec_data->path_rec));
+ query->callback(query, 0, mad);
+ goto out;
}
- query->callback(query, status, mad);
+
+ status = 0;
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ rec_data->path_rec, &recs[num_prs]);
+ recs[num_prs].flags = rec_data->flags;
+ recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(&recs[num_prs]);
+
+ num_prs++;
+ if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
+ break;
}
+ if (!status) {
+ mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
+ path_query->callback(status, recs, num_prs,
+ path_query->context);
+ } else
+ query->callback(query, status, mad);
+
+out:
mad_send_wc.send_buf = query->mad_buf;
mad_send_wc.status = IB_WC_SUCCESS;
send_handler(query->mad_buf->mad_agent, &mad_send_wc);
@@ -701,10 +1046,10 @@ static void ib_nl_request_timeout(struct work_struct *work)
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
-static int ib_nl_handle_set_timeout(struct sk_buff *skb,
- struct netlink_callback *cb)
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
int timeout, delta, abs_delta;
const struct nlattr *attr;
unsigned long flags;
@@ -713,11 +1058,12 @@ static int ib_nl_handle_set_timeout(struct sk_buff *skb,
struct nlattr *tb[LS_NLA_TYPE_MAX];
int ret;
- if (!netlink_capable(skb, CAP_NET_ADMIN))
+ if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
+ !(NETLINK_CB(skb).sk))
return -EPERM;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
if (ret || !attr)
goto settimeout_out;
@@ -728,6 +1074,8 @@ static int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+
delta = timeout - sa_local_svc_timeout_ms;
if (delta < 0)
abs_delta = -delta;
@@ -735,7 +1083,6 @@ static int ib_nl_handle_set_timeout(struct sk_buff *skb,
abs_delta = delta;
if (delta != 0) {
- spin_lock_irqsave(&ib_nl_request_lock, flags);
sa_local_svc_timeout_ms = timeout;
list_for_each_entry(query, &ib_nl_request_list, list) {
if (delta < 0 && abs_delta > query->timeout)
@@ -753,11 +1100,12 @@ static int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (delay)
mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
(unsigned long)delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+
settimeout_out:
- return skb->len;
+ return 0;
}
static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
@@ -768,43 +1116,44 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
return 0;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
if (ret)
return 0;
return 1;
}
-static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
- struct netlink_callback *cb)
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
unsigned long flags;
- struct ib_sa_query *query;
+ struct ib_sa_query *query = NULL, *iter;
struct ib_mad_send_buf *send_buf;
struct ib_mad_send_wc mad_send_wc;
- int found = 0;
int ret;
- if (!netlink_capable(skb, CAP_NET_ADMIN))
+ if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
+ !(NETLINK_CB(skb).sk))
return -EPERM;
spin_lock_irqsave(&ib_nl_request_lock, flags);
- list_for_each_entry(query, &ib_nl_request_list, list) {
+ list_for_each_entry(iter, &ib_nl_request_list, list) {
/*
* If the query is cancelled, let the timeout routine
* take care of it.
*/
- if (nlh->nlmsg_seq == query->seq) {
- found = !ib_sa_query_cancelled(query);
- if (found)
- list_del(&query->list);
+ if (nlh->nlmsg_seq == iter->seq) {
+ if (!ib_sa_query_cancelled(iter)) {
+ list_del(&iter->list);
+ query = iter;
+ }
break;
}
}
- if (!found) {
+ if (!query) {
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
goto resp_out;
}
@@ -827,101 +1176,17 @@ static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
}
resp_out:
- return skb->len;
+ return 0;
}
-static struct ibnl_client_cbs ib_sa_cb_table[] = {
- [RDMA_NL_LS_OP_RESOLVE] = {
- .dump = ib_nl_handle_resolve_resp,
- .module = THIS_MODULE },
- [RDMA_NL_LS_OP_SET_TIMEOUT] = {
- .dump = ib_nl_handle_set_timeout,
- .module = THIS_MODULE },
-};
-
static void free_sm_ah(struct kref *kref)
{
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
- ib_destroy_ah(sm_ah->ah);
+ rdma_destroy_ah(sm_ah->ah, 0);
kfree(sm_ah);
}
-static void update_sm_ah(struct work_struct *work)
-{
- struct ib_sa_port *port =
- container_of(work, struct ib_sa_port, update_task);
- struct ib_sa_sm_ah *new_ah;
- struct ib_port_attr port_attr;
- struct ib_ah_attr ah_attr;
-
- if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
- printk(KERN_WARNING "Couldn't query port\n");
- return;
- }
-
- new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
- if (!new_ah) {
- printk(KERN_WARNING "Couldn't allocate new SM AH\n");
- return;
- }
-
- kref_init(&new_ah->ref);
- new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
-
- new_ah->pkey_index = 0;
- if (ib_find_pkey(port->agent->device, port->port_num,
- IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
- printk(KERN_ERR "Couldn't find index for default PKey\n");
-
- memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.dlid = port_attr.sm_lid;
- ah_attr.sl = port_attr.sm_sl;
- ah_attr.port_num = port->port_num;
-
- new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
- if (IS_ERR(new_ah->ah)) {
- printk(KERN_WARNING "Couldn't create new SM AH\n");
- kfree(new_ah);
- return;
- }
-
- spin_lock_irq(&port->ah_lock);
- if (port->sm_ah)
- kref_put(&port->sm_ah->ref, free_sm_ah);
- port->sm_ah = new_ah;
- spin_unlock_irq(&port->ah_lock);
-
-}
-
-static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
-{
- if (event->event == IB_EVENT_PORT_ERR ||
- event->event == IB_EVENT_PORT_ACTIVE ||
- event->event == IB_EVENT_LID_CHANGE ||
- event->event == IB_EVENT_PKEY_CHANGE ||
- event->event == IB_EVENT_SM_CHANGE ||
- event->event == IB_EVENT_CLIENT_REREGISTER) {
- unsigned long flags;
- struct ib_sa_device *sa_dev =
- container_of(handler, typeof(*sa_dev), event_handler);
- struct ib_sa_port *port =
- &sa_dev->port[event->element.port_num - sa_dev->start_port];
-
- if (!rdma_cap_ib_sa(handler->device, port->port_num))
- return;
-
- spin_lock_irqsave(&port->ah_lock, flags);
- if (port->sm_ah)
- kref_put(&port->sm_ah->ref, free_sm_ah);
- port->sm_ah = NULL;
- spin_unlock_irqrestore(&port->ah_lock, flags);
-
- queue_work(ib_wq, &sa_dev->port[event->element.port_num -
- sa_dev->start_port].update_task);
- }
-}
-
void ib_sa_register_client(struct ib_sa_client *client)
{
atomic_set(&client->users, 1);
@@ -948,17 +1213,15 @@ EXPORT_SYMBOL(ib_sa_unregister_client);
void ib_sa_cancel_query(int id, struct ib_sa_query *query)
{
unsigned long flags;
- struct ib_mad_agent *agent;
struct ib_mad_send_buf *mad_buf;
- spin_lock_irqsave(&idr_lock, flags);
- if (idr_find(&query_idr, id) != query) {
- spin_unlock_irqrestore(&idr_lock, flags);
+ xa_lock_irqsave(&queries, flags);
+ if (xa_load(&queries, id) != query) {
+ xa_unlock_irqrestore(&queries, flags);
return;
}
- agent = query->port->agent;
mad_buf = query->mad_buf;
- spin_unlock_irqrestore(&idr_lock, flags);
+ xa_unlock_irqrestore(&queries, flags);
/*
* If the query is still on the netlink request list, schedule
@@ -966,11 +1229,11 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
* sent to the MAD layer and has to be cancelled from there.
*/
if (!ib_nl_cancel_request(query))
- ib_cancel_mad(agent, mad_buf);
+ ib_cancel_mad(mad_buf);
}
EXPORT_SYMBOL(ib_sa_cancel_query);
-static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
+static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
{
struct ib_sa_device *sa_dev;
struct ib_sa_port *port;
@@ -989,50 +1252,84 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
+static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *gid_attr)
{
- int ret;
- u16 gid_index;
- int force_grh;
+ enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
- memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->dlid = be16_to_cpu(rec->dlid);
- ah_attr->sl = rec->sl;
- ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
- get_src_path_mask(device, port_num);
- ah_attr->port_num = port_num;
- ah_attr->static_rate = rec->rate;
+ if (!gid_attr) {
+ gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
+ port_num, NULL);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+ } else
+ rdma_hold_gid_attr(gid_attr);
- force_grh = rdma_cap_eth_ah(device, port_num);
+ rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
+ be32_to_cpu(rec->flow_label),
+ rec->hop_limit, rec->traffic_class,
+ gid_attr);
+ return 0;
+}
- if (rec->hop_limit > 1 || force_grh) {
- ah_attr->ah_flags = IB_AH_GRH;
- ah_attr->grh.dgid = rec->dgid;
+/**
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
+ * @device: Device associated ah attributes initialization.
+ * @port_num: Port on the specified device.
+ * @rec: path record entry to use for ah attributes initialization.
+ * @ah_attr: address handle attributes to initialization from path record.
+ * @gid_attr: SGID attribute to consider during initialization.
+ *
+ * When ib_init_ah_attr_from_path() returns success,
+ * (a) for IB link layer it optionally contains a reference to SGID attribute
+ * when GRH is present for IB link layer.
+ * (b) for RoCE link layer it contains a reference to SGID attribute.
+ * User must invoke rdma_destroy_ah_attr() to release reference to SGID
+ * attributes which are initialized using ib_init_ah_attr_from_path().
+ */
+int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *gid_attr)
+{
+ int ret = 0;
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = rdma_ah_find_type(device, port_num);
+ rdma_ah_set_sl(ah_attr, rec->sl);
+ rdma_ah_set_port_num(ah_attr, port_num);
+ rdma_ah_set_static_rate(ah_attr, rec->rate);
- ret = ib_find_cached_gid(device, &rec->sgid, &port_num,
- &gid_index);
+ if (sa_path_is_roce(rec)) {
+ ret = roce_resolve_route_from_path(rec, gid_attr);
if (ret)
return ret;
- ah_attr->grh.sgid_index = gid_index;
- ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
- ah_attr->grh.hop_limit = rec->hop_limit;
- ah_attr->grh.traffic_class = rec->traffic_class;
- }
- if (force_grh) {
- memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
- ah_attr->vlan_id = rec->vlan_id;
+ memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
} else {
- ah_attr->vlan_id = 0xffff;
+ rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
+ if (sa_path_is_opa(rec) &&
+ rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
+ rdma_ah_set_make_grd(ah_attr, true);
+
+ rdma_ah_set_path_bits(ah_attr,
+ be32_to_cpu(sa_path_get_slid(rec)) &
+ get_src_path_mask(device, port_num));
}
- return 0;
+ if (rec->hop_limit > 0 || sa_path_is_roce(rec))
+ ret = init_ah_attr_grh_fields(device, port_num,
+ rec, ah_attr, gid_attr);
+ return ret;
}
-EXPORT_SYMBOL(ib_init_ah_from_path);
+EXPORT_SYMBOL(ib_init_ah_attr_from_path);
static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
{
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
spin_lock_irqsave(&query->port->ah_lock, flags);
@@ -1044,11 +1341,22 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
query->sm_ah = query->port->sm_ah;
spin_unlock_irqrestore(&query->port->ah_lock, flags);
+ /*
+ * Always check if sm_ah has valid dlid assigned,
+ * before querying for class port info
+ */
+ if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
+ !rdma_is_valid_unicast_lid(&ah_attr)) {
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+ return -EAGAIN;
+ }
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
query->sm_ah->pkey_index,
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
gfp_mask,
- IB_MGMT_BASE_VERSION);
+ ((query->flags & IB_SA_QUERY_OPA) ?
+ OPA_MGMT_BASE_VERSION :
+ IB_MGMT_BASE_VERSION));
if (IS_ERR(query->mad_buf)) {
kref_put(&query->sm_ah->ref, free_sm_ah);
return -ENOMEM;
@@ -1065,47 +1373,54 @@ static void free_mad(struct ib_sa_query *query)
kref_put(&query->sm_ah->ref, free_sm_ah);
}
-static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
+static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
{
+ struct ib_sa_mad *mad = query->mad_buf->mad;
unsigned long flags;
memset(mad, 0, sizeof *mad);
- mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
+ if (query->flags & IB_SA_QUERY_OPA) {
+ mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
+ mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
+ } else {
+ mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
+ mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
+ }
mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
- mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
-
spin_lock_irqsave(&tid_lock, flags);
mad->mad_hdr.tid =
cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
spin_unlock_irqrestore(&tid_lock, flags);
}
-static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
+ gfp_t gfp_mask)
{
- bool preload = !!(gfp_mask & __GFP_WAIT);
unsigned long flags;
int ret, id;
+ const int nmbr_sa_query_retries = 10;
- if (preload)
- idr_preload(gfp_mask);
- spin_lock_irqsave(&idr_lock, flags);
-
- id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
-
- spin_unlock_irqrestore(&idr_lock, flags);
- if (preload)
- idr_preload_end();
- if (id < 0)
- return id;
-
- query->mad_buf->timeout_ms = timeout_ms;
+ xa_lock_irqsave(&queries, flags);
+ ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
+ xa_unlock_irqrestore(&queries, flags);
+ if (ret < 0)
+ return ret;
+
+ query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries;
+ query->mad_buf->retries = nmbr_sa_query_retries;
+ if (!query->mad_buf->timeout_ms) {
+ /* Special case, very small timeout_ms */
+ query->mad_buf->timeout_ms = 1;
+ query->mad_buf->retries = timeout_ms;
+ }
query->mad_buf->context[0] = query;
query->id = id;
- if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
- if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
- if (!ib_nl_make_request(query))
+ if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
+ (!(query->flags & IB_SA_QUERY_OPA))) {
+ if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
+ if (!ib_nl_make_request(query, gfp_mask))
return id;
}
ib_sa_disable_local_svc(query);
@@ -1113,9 +1428,9 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
ret = ib_post_send_mad(query->mad_buf, NULL);
if (ret) {
- spin_lock_irqsave(&idr_lock, flags);
- idr_remove(&query_idr, id);
- spin_unlock_irqrestore(&idr_lock, flags);
+ xa_lock_irqsave(&queries, flags);
+ __xa_erase(&queries, id);
+ xa_unlock_irqrestore(&queries, flags);
}
/*
@@ -1126,41 +1441,198 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
return ret ? ret : id;
}
-void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
+void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
{
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
}
EXPORT_SYMBOL(ib_sa_unpack_path);
-void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
+void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
{
ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
}
EXPORT_SYMBOL(ib_sa_pack_path);
+void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute)
+{
+ ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), rec,
+ attribute);
+}
+EXPORT_SYMBOL(ib_sa_pack_service);
+
+void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec)
+{
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), attribute,
+ rec);
+}
+EXPORT_SYMBOL(ib_sa_unpack_service);
+
+static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
+ struct ib_sa_device *sa_dev,
+ u32 port_num)
+{
+ struct ib_sa_port *port;
+ unsigned long flags;
+ bool ret = false;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ spin_lock_irqsave(&port->classport_lock, flags);
+ if (!port->classport_info.valid)
+ goto ret;
+
+ if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
+ ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
+ OPA_CLASS_PORT_INFO_PR_SUPPORT;
+ret:
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+ return ret;
+}
+
+enum opa_pr_supported {
+ PR_NOT_SUPPORTED,
+ PR_OPA_SUPPORTED,
+ PR_IB_SUPPORTED
+};
+
+/*
+ * opa_pr_query_possible - Check if current PR query can be an OPA query.
+ *
+ * Returns PR_NOT_SUPPORTED if a path record query is not
+ * possible, PR_OPA_SUPPORTED if an OPA path record query
+ * is possible and PR_IB_SUPPORTED if an IB path record
+ * query is possible.
+ */
+static int opa_pr_query_possible(struct ib_sa_client *client,
+ struct ib_sa_device *sa_dev,
+ struct ib_device *device, u32 port_num)
+{
+ struct ib_port_attr port_attr;
+
+ if (ib_query_port(device, port_num, &port_attr))
+ return PR_NOT_SUPPORTED;
+
+ if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
+ return PR_OPA_SUPPORTED;
+
+ if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
+ return PR_NOT_SUPPORTED;
+ else
+ return PR_IB_SUPPORTED;
+}
+
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
struct ib_sa_path_query *query =
container_of(sa_query, struct ib_sa_path_query, sa_query);
+ struct sa_path_rec rec = {};
- if (mad) {
- struct ib_sa_path_rec rec;
+ if (!mad) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
- ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ if (sa_query->flags & IB_SA_QUERY_OPA) {
+ ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
mad->data, &rec);
- rec.vlan_id = 0xffff;
- memset(rec.dmac, 0, ETH_ALEN);
- memset(rec.smac, 0, ETH_ALEN);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
+ rec.rec_type = SA_PATH_REC_TYPE_OPA;
+ query->callback(status, &rec, 1, query->context);
+ return;
+ }
+
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ mad->data, &rec);
+ rec.rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(&rec);
+
+ if (query->conv_pr) {
+ struct sa_path_rec opa;
+
+ memset(&opa, 0, sizeof(struct sa_path_rec));
+ sa_convert_path_ib_to_opa(&opa, &rec);
+ query->callback(status, &opa, 1, query->context);
+ } else {
+ query->callback(status, &rec, 1, query->context);
+ }
+}
+
+#define IB_SA_DATA_OFFS 56
+#define IB_SERVICE_REC_SZ 176
+
+static void ib_unpack_service_rmpp(struct sa_service_rec *rec,
+ struct ib_mad_recv_wc *mad_wc,
+ int num_services)
+{
+ unsigned int cp_sz, data_i, data_size, rec_i = 0, buf_i = 0;
+ struct ib_mad_recv_buf *mad_buf;
+ u8 buf[IB_SERVICE_REC_SZ];
+ u8 *data;
+
+ data_size = sizeof(((struct ib_sa_mad *) mad_buf->mad)->data);
+
+ list_for_each_entry(mad_buf, &mad_wc->rmpp_list, list) {
+ data = ((struct ib_sa_mad *) mad_buf->mad)->data;
+ data_i = 0;
+ while (data_i < data_size && rec_i < num_services) {
+ cp_sz = min(IB_SERVICE_REC_SZ - buf_i,
+ data_size - data_i);
+ memcpy(buf + buf_i, data + data_i, cp_sz);
+ data_i += cp_sz;
+ buf_i += cp_sz;
+ if (buf_i == IB_SERVICE_REC_SZ) {
+ ib_sa_unpack_service(buf, rec + rec_i);
+ buf_i = 0;
+ rec_i++;
+ }
+ }
+ }
+}
+
+static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+ struct sa_service_rec *rec;
+ int num_services;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
+
+ num_services = (mad_wc->mad_len - IB_SA_DATA_OFFS) / IB_SERVICE_REC_SZ;
+ if (!num_services) {
+ query->callback(-ENODATA, NULL, 0, query->context);
+ return;
+ }
+
+ rec = kmalloc_array(num_services, sizeof(*rec), GFP_KERNEL);
+ if (!rec) {
+ query->callback(-ENOMEM, NULL, 0, query->context);
+ return;
+ }
+
+ ib_unpack_service_rmpp(rec, mad_wc, num_services);
+ query->callback(status, rec, num_services, query->context);
+ kfree(rec);
}
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
- kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
+ struct ib_sa_path_query *query =
+ container_of(sa_query, struct ib_sa_path_query, sa_query);
+
+ kfree(query->conv_pr);
+ kfree(query);
+}
+
+static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+
+ kfree(query);
}
/**
@@ -1189,13 +1661,13 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
* the query.
*/
int ib_sa_path_rec_get(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- struct ib_sa_path_rec *rec,
+ struct ib_device *device, u32 port_num,
+ struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
- struct ib_sa_path_rec *resp,
- void *context),
+ struct sa_path_rec *resp,
+ unsigned int num_paths, void *context),
void *context,
struct ib_sa_query **sa_query)
{
@@ -1204,11 +1676,16 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
struct ib_sa_port *port;
struct ib_mad_agent *agent;
struct ib_sa_mad *mad;
+ enum opa_pr_supported status;
int ret;
if (!sa_dev)
return -ENODEV;
+ if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
+ (rec->rec_type != SA_PATH_REC_TYPE_OPA))
+ return -EINVAL;
+
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
@@ -1217,9 +1694,26 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
return -ENOMEM;
query->sa_query.port = port;
+ if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
+ status = opa_pr_query_possible(client, sa_dev, device, port_num);
+ if (status == PR_NOT_SUPPORTED) {
+ ret = -EINVAL;
+ goto err1;
+ } else if (status == PR_OPA_SUPPORTED) {
+ query->sa_query.flags |= IB_SA_QUERY_OPA;
+ } else {
+ query->conv_pr =
+ kmalloc(sizeof(*query->conv_pr), gfp_mask);
+ if (!query->conv_pr) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ }
+ }
+
ret = alloc_mad(&query->sa_query, gfp_mask);
if (ret)
- goto err1;
+ goto err2;
ib_sa_client_get(client);
query->sa_query.client = client;
@@ -1227,7 +1721,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
query->context = context;
mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
+ init_mad(&query->sa_query, agent);
query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
query->sa_query.release = ib_sa_path_rec_release;
@@ -1235,113 +1729,97 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
mad->sa_hdr.comp_mask = comp_mask;
- ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
+ if (query->sa_query.flags & IB_SA_QUERY_OPA) {
+ ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
+ rec, mad->data);
+ } else if (query->conv_pr) {
+ sa_convert_path_opa_to_ib(query->conv_pr, rec);
+ ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ query->conv_pr, mad->data);
+ } else {
+ ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ rec, mad->data);
+ }
*sa_query = &query->sa_query;
query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
- query->sa_query.mad_buf->context[1] = rec;
+ query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
+ query->conv_pr : rec;
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
if (ret < 0)
- goto err2;
+ goto err3;
return ret;
-err2:
+err3:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
free_mad(&query->sa_query);
-
+err2:
+ kfree(query->conv_pr);
err1:
kfree(query);
return ret;
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
-static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
-{
- struct ib_sa_service_query *query =
- container_of(sa_query, struct ib_sa_service_query, sa_query);
-
- if (mad) {
- struct ib_sa_service_rec rec;
-
- ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
-}
-
-static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
-{
- kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
-}
-
/**
- * ib_sa_service_rec_query - Start Service Record operation
- * @client:SA client
- * @device:device to send request on
- * @port_num: port number to send request on
- * @method:SA method - should be get, set, or delete
- * @rec:Service Record to send in request
- * @comp_mask:component mask to send in request
- * @timeout_ms:time to wait for response
- * @gfp_mask:GFP mask to use for internal allocations
- * @callback:function called when request completes, times out or is
+ * ib_sa_service_rec_get - Start a Service get query
+ * @client: SA client
+ * @device: device to send query on
+ * @port_num: port number to send query on
+ * @rec: Service Record to send in query
+ * @comp_mask: component mask to send in query
+ * @timeout_ms: time to wait for response
+ * @gfp_mask: GFP mask to use for internal allocations
+ * @callback: function called when query completes, times out or is
* canceled
- * @context:opaque user context passed to callback
- * @sa_query:request context, used to cancel request
+ * @context: opaque user context passed to callback
+ * @sa_query: query context, used to cancel query
*
- * Send a Service Record set/get/delete to the SA to register,
- * unregister or query a service record.
- * The callback function will be called when the request completes (or
+ * Send a Service Record Get query to the SA to look up a path. The
+ * callback function will be called when the query completes (or
* fails); status is 0 for a successful response, -EINTR if the query
* is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
* occurred sending the query. The resp parameter of the callback is
* only valid if status is 0.
*
- * If the return value of ib_sa_service_rec_query() is negative, it is an
- * error code. Otherwise it is a request ID that can be used to cancel
+ * If the return value of ib_sa_service_rec_get() is negative, it is an
+ * error code. Otherwise it is a query ID that can be used to cancel
* the query.
*/
-int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context,
- struct ib_sa_query **sa_query)
+int ib_sa_service_rec_get(struct ib_sa_client *client,
+ struct ib_device *device, u32 port_num,
+ struct sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct sa_service_rec *resp,
+ unsigned int num_services,
+ void *context),
+ void *context, struct ib_sa_query **sa_query)
{
- struct ib_sa_service_query *query;
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
+ struct ib_sa_service_query *query;
struct ib_mad_agent *agent;
+ struct ib_sa_port *port;
struct ib_sa_mad *mad;
int ret;
if (!sa_dev)
return -ENODEV;
- port = &sa_dev->port[port_num - sa_dev->start_port];
+ port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
- if (method != IB_MGMT_METHOD_GET &&
- method != IB_MGMT_METHOD_SET &&
- method != IB_SA_METHOD_DELETE)
- return -EINVAL;
-
query = kzalloc(sizeof(*query), gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.port = port;
+ query->sa_query.port = port;
+
ret = alloc_mad(&query->sa_query, gfp_mask);
if (ret)
goto err1;
@@ -1352,18 +1830,19 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
query->context = context;
mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
+ init_mad(&query->sa_query, agent);
- query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
- query->sa_query.release = ib_sa_service_rec_release;
- mad->mad_hdr.method = method;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
- mad->sa_hdr.comp_mask = comp_mask;
+ query->sa_query.rmpp_callback = callback ? ib_sa_service_rec_callback :
+ NULL;
+ query->sa_query.release = ib_sa_service_rec_release;
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET_TABLE;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
- ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
- rec, mad->data);
+ ib_sa_pack_service(rec, mad->data);
*sa_query = &query->sa_query;
+ query->sa_query.mad_buf->context[1] = rec;
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
if (ret < 0)
@@ -1375,16 +1854,14 @@ err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
free_mad(&query->sa_query);
-
err1:
kfree(query);
return ret;
}
-EXPORT_SYMBOL(ib_sa_service_rec_query);
+EXPORT_SYMBOL(ib_sa_service_rec_get);
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
struct ib_sa_mcmember_query *query =
container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
@@ -1405,11 +1882,11 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
@@ -1444,7 +1921,7 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
query->context = context;
mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
+ init_mad(&query->sa_query, agent);
query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
query->sa_query.release = ib_sa_mcmember_rec_release;
@@ -1475,8 +1952,7 @@ err1:
/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
struct ib_sa_guidinfo_query *query =
container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
@@ -1497,10 +1973,10 @@ static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
}
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
@@ -1541,7 +2017,7 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
query->context = context;
mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
+ init_mad(&query->sa_query, agent);
query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
query->sa_query.release = ib_sa_guidinfo_rec_release;
@@ -1572,52 +2048,244 @@ err1:
}
EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
+struct ib_classport_info_context {
+ struct completion done;
+ struct ib_sa_query *sa_query;
+};
+
+static void ib_classportinfo_cb(void *context)
+{
+ struct ib_classport_info_context *cb_ctx = context;
+
+ complete(&cb_ctx->done);
+}
+
+static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
+ int status, struct ib_sa_mad *mad)
+{
+ unsigned long flags;
+ struct ib_sa_classport_info_query *query =
+ container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
+ struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
+
+ if (mad) {
+ if (sa_query->flags & IB_SA_QUERY_OPA) {
+ struct opa_class_port_info rec;
+
+ ib_unpack(opa_classport_info_rec_table,
+ ARRAY_SIZE(opa_classport_info_rec_table),
+ mad->data, &rec);
+
+ spin_lock_irqsave(&sa_query->port->classport_lock,
+ flags);
+ if (!status && !info->valid) {
+ memcpy(&info->data.opa, &rec,
+ sizeof(info->data.opa));
+
+ info->valid = true;
+ info->data.type = RDMA_CLASS_PORT_INFO_OPA;
+ }
+ spin_unlock_irqrestore(&sa_query->port->classport_lock,
+ flags);
+
+ } else {
+ struct ib_class_port_info rec;
+
+ ib_unpack(ib_classport_info_rec_table,
+ ARRAY_SIZE(ib_classport_info_rec_table),
+ mad->data, &rec);
+
+ spin_lock_irqsave(&sa_query->port->classport_lock,
+ flags);
+ if (!status && !info->valid) {
+ memcpy(&info->data.ib, &rec,
+ sizeof(info->data.ib));
+
+ info->valid = true;
+ info->data.type = RDMA_CLASS_PORT_INFO_IB;
+ }
+ spin_unlock_irqrestore(&sa_query->port->classport_lock,
+ flags);
+ }
+ }
+ query->callback(query->context);
+}
+
+static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
+{
+ kfree(container_of(sa_query, struct ib_sa_classport_info_query,
+ sa_query));
+}
+
+static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
+ unsigned long timeout_ms,
+ void (*callback)(void *context),
+ void *context,
+ struct ib_sa_query **sa_query)
+{
+ struct ib_mad_agent *agent;
+ struct ib_sa_classport_info_query *query;
+ struct ib_sa_mad *mad;
+ gfp_t gfp_mask = GFP_KERNEL;
+ int ret;
+
+ agent = port->agent;
+
+ query = kzalloc(sizeof(*query), gfp_mask);
+ if (!query)
+ return -ENOMEM;
+
+ query->sa_query.port = port;
+ query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
+ port->port_num) ?
+ IB_SA_QUERY_OPA : 0;
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
+ goto err_free;
+
+ query->callback = callback;
+ query->context = context;
+
+ mad = query->sa_query.mad_buf->mad;
+ init_mad(&query->sa_query, agent);
+
+ query->sa_query.callback = ib_sa_classport_info_rec_callback;
+ query->sa_query.release = ib_sa_classport_info_rec_release;
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
+ mad->sa_hdr.comp_mask = 0;
+ *sa_query = &query->sa_query;
+
+ ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+ if (ret < 0)
+ goto err_free_mad;
+
+ return ret;
+
+err_free_mad:
+ *sa_query = NULL;
+ free_mad(&query->sa_query);
+
+err_free:
+ kfree(query);
+ return ret;
+}
+
+static void update_ib_cpi(struct work_struct *work)
+{
+ struct ib_sa_port *port =
+ container_of(work, struct ib_sa_port, ib_cpi_work.work);
+ struct ib_classport_info_context *cb_context;
+ unsigned long flags;
+ int ret;
+
+ /* If the classport info is valid, nothing
+ * to do here.
+ */
+ spin_lock_irqsave(&port->classport_lock, flags);
+ if (port->classport_info.valid) {
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+
+ cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
+ if (!cb_context)
+ goto err_nomem;
+
+ init_completion(&cb_context->done);
+
+ ret = ib_sa_classport_info_rec_query(port, 3000,
+ ib_classportinfo_cb, cb_context,
+ &cb_context->sa_query);
+ if (ret < 0)
+ goto free_cb_err;
+ wait_for_completion(&cb_context->done);
+free_cb_err:
+ kfree(cb_context);
+ spin_lock_irqsave(&port->classport_lock, flags);
+
+ /* If the classport info is still not valid, the query should have
+ * failed for some reason. Retry issuing the query
+ */
+ if (!port->classport_info.valid) {
+ port->classport_info.retry_cnt++;
+ if (port->classport_info.retry_cnt <=
+ IB_SA_CPI_MAX_RETRY_CNT) {
+ unsigned long delay =
+ msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
+
+ queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
+ }
+ }
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+
+err_nomem:
+ return;
+}
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
unsigned long flags;
+ int status = 0;
- if (query->callback)
+ if (query->callback || query->rmpp_callback) {
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ status = -ETIMEDOUT;
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ status = -EINTR;
break;
default:
- query->callback(query, -EIO, NULL);
+ status = -EIO;
break;
}
- spin_lock_irqsave(&idr_lock, flags);
- idr_remove(&query_idr, query->id);
- spin_unlock_irqrestore(&idr_lock, flags);
+ if (status)
+ query->callback ? query->callback(query, status, NULL) :
+ query->rmpp_callback(query, status, NULL);
+ }
+
+ xa_lock_irqsave(&queries, flags);
+ __xa_erase(&queries, query->id);
+ xa_unlock_irqrestore(&queries, flags);
free_mad(query);
- ib_sa_client_put(query->client);
+ if (query->client)
+ ib_sa_client_put(query->client);
query->release(query);
}
static void recv_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_buf *send_buf,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_sa_query *query;
- struct ib_mad_send_buf *mad_buf;
+ struct ib_mad *mad;
- mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
- query = mad_buf->context[0];
- if (query->callback) {
+ if (!send_buf)
+ return;
+
+ query = send_buf->context[0];
+ mad = mad_recv_wc->recv_buf.mad;
+
+ if (query->rmpp_callback) {
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
- query->callback(query,
- mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
- (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
+ query->rmpp_callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, mad_recv_wc);
+ else
+ query->rmpp_callback(query, -EIO, NULL);
+ } else if (query->callback) {
+ if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
+ query->callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, (struct ib_sa_mad *)mad);
else
query->callback(query, -EIO, NULL);
}
@@ -1625,20 +2293,133 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
ib_free_recv_mad(mad_recv_wc);
}
-static void ib_sa_add_one(struct ib_device *device)
+static void update_sm_ah(struct work_struct *work)
+{
+ struct ib_sa_port *port =
+ container_of(work, struct ib_sa_port, update_task);
+ struct ib_sa_sm_ah *new_ah;
+ struct ib_port_attr port_attr;
+ struct rdma_ah_attr ah_attr;
+ bool grh_required;
+
+ if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
+ pr_warn("Couldn't query port\n");
+ return;
+ }
+
+ new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
+ if (!new_ah)
+ return;
+
+ kref_init(&new_ah->ref);
+ new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
+
+ new_ah->pkey_index = 0;
+ if (ib_find_pkey(port->agent->device, port->port_num,
+ IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
+ pr_err("Couldn't find index for default PKey\n");
+
+ memset(&ah_attr, 0, sizeof(ah_attr));
+ ah_attr.type = rdma_ah_find_type(port->agent->device,
+ port->port_num);
+ rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
+ rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
+ rdma_ah_set_port_num(&ah_attr, port->port_num);
+
+ grh_required = rdma_is_grh_required(port->agent->device,
+ port->port_num);
+
+ /*
+ * The OPA sm_lid of 0xFFFF needs special handling so that it can be
+ * differentiated from a permissive LID of 0xFFFF. We set the
+ * grh_required flag here so the SA can program the DGID in the
+ * address handle appropriately
+ */
+ if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
+ (grh_required ||
+ port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
+ rdma_ah_set_make_grd(&ah_attr, true);
+
+ if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
+ rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
+ rdma_ah_set_subnet_prefix(&ah_attr,
+ cpu_to_be64(port_attr.subnet_prefix));
+ rdma_ah_set_interface_id(&ah_attr,
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
+ }
+
+ new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
+ RDMA_CREATE_AH_SLEEPABLE);
+ if (IS_ERR(new_ah->ah)) {
+ pr_warn("Couldn't create new SM AH\n");
+ kfree(new_ah);
+ return;
+ }
+
+ spin_lock_irq(&port->ah_lock);
+ if (port->sm_ah)
+ kref_put(&port->sm_ah->ref, free_sm_ah);
+ port->sm_ah = new_ah;
+ spin_unlock_irq(&port->ah_lock);
+}
+
+static void ib_sa_event(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ if (event->event == IB_EVENT_PORT_ERR ||
+ event->event == IB_EVENT_PORT_ACTIVE ||
+ event->event == IB_EVENT_LID_CHANGE ||
+ event->event == IB_EVENT_PKEY_CHANGE ||
+ event->event == IB_EVENT_SM_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER) {
+ unsigned long flags;
+ struct ib_sa_device *sa_dev =
+ container_of(handler, typeof(*sa_dev), event_handler);
+ u32 port_num = event->element.port_num - sa_dev->start_port;
+ struct ib_sa_port *port = &sa_dev->port[port_num];
+
+ if (!rdma_cap_ib_sa(handler->device, port->port_num))
+ return;
+
+ spin_lock_irqsave(&port->ah_lock, flags);
+ if (port->sm_ah)
+ kref_put(&port->sm_ah->ref, free_sm_ah);
+ port->sm_ah = NULL;
+ spin_unlock_irqrestore(&port->ah_lock, flags);
+
+ if (event->event == IB_EVENT_SM_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER ||
+ event->event == IB_EVENT_LID_CHANGE ||
+ event->event == IB_EVENT_PORT_ACTIVE) {
+ unsigned long delay =
+ msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
+
+ spin_lock_irqsave(&port->classport_lock, flags);
+ port->classport_info.valid = false;
+ port->classport_info.retry_cnt = 0;
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+ queue_delayed_work(ib_wq,
+ &port->ib_cpi_work, delay);
+ }
+ queue_work(ib_wq, &sa_dev->port[port_num].update_task);
+ }
+}
+
+static int ib_sa_add_one(struct ib_device *device)
{
struct ib_sa_device *sa_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
- sa_dev = kzalloc(sizeof *sa_dev +
- (e - s + 1) * sizeof (struct ib_sa_port),
+ sa_dev = kzalloc(struct_size(sa_dev, port,
+ size_add(size_sub(e, s), 1)),
GFP_KERNEL);
if (!sa_dev)
- return;
+ return -ENOMEM;
sa_dev->start_port = s;
sa_dev->end_port = e;
@@ -1651,20 +2432,30 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
+ spin_lock_init(&sa_dev->port[i].classport_lock);
+ sa_dev->port[i].classport_info.valid = false;
+
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
- NULL, 0, send_handler,
- recv_handler, sa_dev, 0);
- if (IS_ERR(sa_dev->port[i].agent))
+ NULL, IB_MGMT_RMPP_VERSION,
+ send_handler, recv_handler,
+ sa_dev, 0);
+ if (IS_ERR(sa_dev->port[i].agent)) {
+ ret = PTR_ERR(sa_dev->port[i].agent);
goto err;
+ }
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
+ INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
+ update_ib_cpi);
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &sa_client, sa_dev);
@@ -1676,15 +2467,14 @@ static void ib_sa_add_one(struct ib_device *device)
*/
INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
- if (ib_register_event_handler(&sa_dev->event_handler))
- goto err;
+ ib_register_event_handler(&sa_dev->event_handler);
for (i = 0; i <= e - s; ++i) {
if (rdma_cap_ib_sa(device, i + 1))
update_sm_ah(&sa_dev->port[i].update_task);
}
- return;
+ return 0;
err:
while (--i >= 0) {
@@ -1693,7 +2483,7 @@ err:
}
free:
kfree(sa_dev);
- return;
+ return ret;
}
static void ib_sa_remove_one(struct ib_device *device, void *client_data)
@@ -1701,15 +2491,12 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
struct ib_sa_device *sa_dev = client_data;
int i;
- if (!sa_dev)
- return;
-
ib_unregister_event_handler(&sa_dev->event_handler);
-
flush_workqueue(ib_wq);
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
if (rdma_cap_ib_sa(device, i + 1)) {
+ cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
ib_unregister_mad_agent(sa_dev->port[i].agent);
if (sa_dev->port[i].sm_ah)
kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
@@ -1720,7 +2507,7 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
kfree(sa_dev);
}
-static int __init ib_sa_init(void)
+int ib_sa_init(void)
{
int ret;
@@ -1730,33 +2517,26 @@ static int __init ib_sa_init(void)
ret = ib_register_client(&sa_client);
if (ret) {
- printk(KERN_ERR "Couldn't register ib_sa client\n");
+ pr_err("Couldn't register ib_sa client\n");
goto err1;
}
ret = mcast_init();
if (ret) {
- printk(KERN_ERR "Couldn't initialize multicast handling\n");
+ pr_err("Couldn't initialize multicast handling\n");
goto err2;
}
- ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
+ ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
if (!ib_nl_wq) {
ret = -ENOMEM;
goto err3;
}
- if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
- ib_sa_cb_table)) {
- pr_err("Failed to add netlink callback\n");
- ret = -EINVAL;
- goto err4;
- }
INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
return 0;
-err4:
- destroy_workqueue(ib_nl_wq);
+
err3:
mcast_cleanup();
err2:
@@ -1765,16 +2545,11 @@ err1:
return ret;
}
-static void __exit ib_sa_cleanup(void)
+void ib_sa_cleanup(void)
{
- ibnl_remove_client(RDMA_NL_LS);
cancel_delayed_work(&ib_nl_timed_work);
- flush_workqueue(ib_nl_wq);
destroy_workqueue(ib_nl_wq);
mcast_cleanup();
ib_unregister_client(&sa_client);
- idr_destroy(&query_idr);
+ WARN_ON(!xa_empty(&queries));
}
-
-module_init(ib_sa_init);
-module_exit(ib_sa_cleanup);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
new file mode 100644
index 000000000000..3512c2e54efc
--- /dev/null
+++ b/drivers/infiniband/core/security.c
@@ -0,0 +1,750 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/security.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include "core_priv.h"
+#include "mad_priv.h"
+
+static LIST_HEAD(mad_agent_list);
+/* Lock to protect mad_agent_list */
+static DEFINE_SPINLOCK(mad_agent_list_lock);
+
+static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
+{
+ struct pkey_index_qp_list *pkey = NULL;
+ struct pkey_index_qp_list *tmp_pkey;
+ struct ib_device *dev = pp->sec->dev;
+
+ spin_lock(&dev->port_data[pp->port_num].pkey_list_lock);
+ list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list,
+ pkey_index_list) {
+ if (tmp_pkey->pkey_index == pp->pkey_index) {
+ pkey = tmp_pkey;
+ break;
+ }
+ }
+ spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock);
+ return pkey;
+}
+
+static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
+ u16 *pkey,
+ u64 *subnet_prefix)
+{
+ struct ib_device *dev = pp->sec->dev;
+ int ret;
+
+ ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
+ if (ret)
+ return ret;
+
+ ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
+
+ return ret;
+}
+
+static int enforce_qp_pkey_security(u16 pkey,
+ u64 subnet_prefix,
+ struct ib_qp_security *qp_sec)
+{
+ struct ib_qp_security *shared_qp_sec;
+ int ret;
+
+ ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(shared_qp_sec,
+ &qp_sec->shared_qp_list,
+ shared_qp_list) {
+ ret = security_ib_pkey_access(shared_qp_sec->security,
+ subnet_prefix,
+ pkey);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* The caller of this function must hold the QP security
+ * mutex of the QP of the security structure in *pps.
+ *
+ * It takes separate ports_pkeys and security structure
+ * because in some cases the pps will be for a new settings
+ * or the pps will be for the real QP and security structure
+ * will be for a shared QP.
+ */
+static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
+ struct ib_qp_security *sec)
+{
+ u64 subnet_prefix;
+ u16 pkey;
+ int ret = 0;
+
+ if (!pps)
+ return 0;
+
+ if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
+ ret = get_pkey_and_subnet_prefix(&pps->main,
+ &pkey,
+ &subnet_prefix);
+ if (ret)
+ return ret;
+
+ ret = enforce_qp_pkey_security(pkey,
+ subnet_prefix,
+ sec);
+ if (ret)
+ return ret;
+ }
+
+ if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
+ ret = get_pkey_and_subnet_prefix(&pps->alt,
+ &pkey,
+ &subnet_prefix);
+ if (ret)
+ return ret;
+
+ ret = enforce_qp_pkey_security(pkey,
+ subnet_prefix,
+ sec);
+ }
+
+ return ret;
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static void qp_to_error(struct ib_qp_security *sec)
+{
+ struct ib_qp_security *shared_qp_sec;
+ struct ib_qp_attr attr = {
+ .qp_state = IB_QPS_ERR
+ };
+ struct ib_event event = {
+ .event = IB_EVENT_QP_FATAL
+ };
+
+ /* If the QP is in the process of being destroyed
+ * the qp pointer in the security structure is
+ * undefined. It cannot be modified now.
+ */
+ if (sec->destroying)
+ return;
+
+ ib_modify_qp(sec->qp,
+ &attr,
+ IB_QP_STATE);
+
+ if (sec->qp->event_handler && sec->qp->qp_context) {
+ event.element.qp = sec->qp;
+ sec->qp->event_handler(&event,
+ sec->qp->qp_context);
+ }
+
+ list_for_each_entry(shared_qp_sec,
+ &sec->shared_qp_list,
+ shared_qp_list) {
+ struct ib_qp *qp = shared_qp_sec->qp;
+
+ if (qp->event_handler && qp->qp_context) {
+ event.element.qp = qp;
+ event.device = qp->device;
+ qp->event_handler(&event,
+ qp->qp_context);
+ }
+ }
+}
+
+static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
+ struct ib_device *device,
+ u32 port_num,
+ u64 subnet_prefix)
+{
+ struct ib_port_pkey *pp, *tmp_pp;
+ bool comp;
+ LIST_HEAD(to_error_list);
+ u16 pkey_val;
+
+ if (!ib_get_cached_pkey(device,
+ port_num,
+ pkey->pkey_index,
+ &pkey_val)) {
+ spin_lock(&pkey->qp_list_lock);
+ list_for_each_entry(pp, &pkey->qp_list, qp_list) {
+ if (atomic_read(&pp->sec->error_list_count))
+ continue;
+
+ if (enforce_qp_pkey_security(pkey_val,
+ subnet_prefix,
+ pp->sec)) {
+ atomic_inc(&pp->sec->error_list_count);
+ list_add(&pp->to_error_list,
+ &to_error_list);
+ }
+ }
+ spin_unlock(&pkey->qp_list_lock);
+ }
+
+ list_for_each_entry_safe(pp,
+ tmp_pp,
+ &to_error_list,
+ to_error_list) {
+ mutex_lock(&pp->sec->mutex);
+ qp_to_error(pp->sec);
+ list_del(&pp->to_error_list);
+ atomic_dec(&pp->sec->error_list_count);
+ comp = pp->sec->destroying;
+ mutex_unlock(&pp->sec->mutex);
+
+ if (comp)
+ complete(&pp->sec->error_complete);
+ }
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static int port_pkey_list_insert(struct ib_port_pkey *pp)
+{
+ struct pkey_index_qp_list *tmp_pkey;
+ struct pkey_index_qp_list *pkey;
+ struct ib_device *dev;
+ u32 port_num = pp->port_num;
+ int ret = 0;
+
+ if (pp->state != IB_PORT_PKEY_VALID)
+ return 0;
+
+ dev = pp->sec->dev;
+
+ pkey = get_pkey_idx_qp_list(pp);
+
+ if (!pkey) {
+ bool found = false;
+
+ pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
+ if (!pkey)
+ return -ENOMEM;
+
+ spin_lock(&dev->port_data[port_num].pkey_list_lock);
+ /* Check for the PKey again. A racing process may
+ * have created it.
+ */
+ list_for_each_entry(tmp_pkey,
+ &dev->port_data[port_num].pkey_list,
+ pkey_index_list) {
+ if (tmp_pkey->pkey_index == pp->pkey_index) {
+ kfree(pkey);
+ pkey = tmp_pkey;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pkey->pkey_index = pp->pkey_index;
+ spin_lock_init(&pkey->qp_list_lock);
+ INIT_LIST_HEAD(&pkey->qp_list);
+ list_add(&pkey->pkey_index_list,
+ &dev->port_data[port_num].pkey_list);
+ }
+ spin_unlock(&dev->port_data[port_num].pkey_list_lock);
+ }
+
+ spin_lock(&pkey->qp_list_lock);
+ list_add(&pp->qp_list, &pkey->qp_list);
+ spin_unlock(&pkey->qp_list_lock);
+
+ pp->state = IB_PORT_PKEY_LISTED;
+
+ return ret;
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static void port_pkey_list_remove(struct ib_port_pkey *pp)
+{
+ struct pkey_index_qp_list *pkey;
+
+ if (pp->state != IB_PORT_PKEY_LISTED)
+ return;
+
+ pkey = get_pkey_idx_qp_list(pp);
+
+ spin_lock(&pkey->qp_list_lock);
+ list_del(&pp->qp_list);
+ spin_unlock(&pkey->qp_list_lock);
+
+ /* The setting may still be valid, i.e. after
+ * a destroy has failed for example.
+ */
+ pp->state = IB_PORT_PKEY_VALID;
+}
+
+static void destroy_qp_security(struct ib_qp_security *sec)
+{
+ security_ib_free_security(sec->security);
+ kfree(sec->ports_pkeys);
+ kfree(sec);
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
+ const struct ib_qp_attr *qp_attr,
+ int qp_attr_mask)
+{
+ struct ib_ports_pkeys *new_pps;
+ struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
+
+ new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
+ if (!new_pps)
+ return NULL;
+
+ if (qp_attr_mask & IB_QP_PORT)
+ new_pps->main.port_num = qp_attr->port_num;
+ else if (qp_pps)
+ new_pps->main.port_num = qp_pps->main.port_num;
+
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ new_pps->main.pkey_index = qp_attr->pkey_index;
+ else if (qp_pps)
+ new_pps->main.pkey_index = qp_pps->main.pkey_index;
+
+ if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
+ (qp_attr_mask & IB_QP_PORT)) ||
+ (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
+ new_pps->main.state = IB_PORT_PKEY_VALID;
+
+ if (qp_attr_mask & IB_QP_ALT_PATH) {
+ new_pps->alt.port_num = qp_attr->alt_port_num;
+ new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
+ new_pps->alt.state = IB_PORT_PKEY_VALID;
+ } else if (qp_pps) {
+ new_pps->alt.port_num = qp_pps->alt.port_num;
+ new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
+ if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
+ new_pps->alt.state = IB_PORT_PKEY_VALID;
+ }
+
+ new_pps->main.sec = qp->qp_sec;
+ new_pps->alt.sec = qp->qp_sec;
+ return new_pps;
+}
+
+int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
+{
+ struct ib_qp *real_qp = qp->real_qp;
+ int ret;
+
+ ret = ib_create_qp_security(qp, dev);
+
+ if (ret)
+ return ret;
+
+ if (!qp->qp_sec)
+ return 0;
+
+ mutex_lock(&real_qp->qp_sec->mutex);
+ ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
+ qp->qp_sec);
+
+ if (ret)
+ goto ret;
+
+ if (qp != real_qp)
+ list_add(&qp->qp_sec->shared_qp_list,
+ &real_qp->qp_sec->shared_qp_list);
+ret:
+ mutex_unlock(&real_qp->qp_sec->mutex);
+ if (ret)
+ destroy_qp_security(qp->qp_sec);
+
+ return ret;
+}
+
+void ib_close_shared_qp_security(struct ib_qp_security *sec)
+{
+ struct ib_qp *real_qp = sec->qp->real_qp;
+
+ mutex_lock(&real_qp->qp_sec->mutex);
+ list_del(&sec->shared_qp_list);
+ mutex_unlock(&real_qp->qp_sec->mutex);
+
+ destroy_qp_security(sec);
+}
+
+int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
+{
+ unsigned int i;
+ bool is_ib = false;
+ int ret;
+
+ rdma_for_each_port (dev, i) {
+ is_ib = rdma_protocol_ib(dev, i);
+ if (is_ib)
+ break;
+ }
+
+ /* If this isn't an IB device don't create the security context */
+ if (!is_ib)
+ return 0;
+
+ qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
+ if (!qp->qp_sec)
+ return -ENOMEM;
+
+ qp->qp_sec->qp = qp;
+ qp->qp_sec->dev = dev;
+ mutex_init(&qp->qp_sec->mutex);
+ INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
+ atomic_set(&qp->qp_sec->error_list_count, 0);
+ init_completion(&qp->qp_sec->error_complete);
+ ret = security_ib_alloc_security(&qp->qp_sec->security);
+ if (ret) {
+ kfree(qp->qp_sec);
+ qp->qp_sec = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_create_qp_security);
+
+void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
+{
+ /* Return if not IB */
+ if (!sec)
+ return;
+
+ mutex_lock(&sec->mutex);
+
+ /* Remove the QP from the lists so it won't get added to
+ * a to_error_list during the destroy process.
+ */
+ if (sec->ports_pkeys) {
+ port_pkey_list_remove(&sec->ports_pkeys->main);
+ port_pkey_list_remove(&sec->ports_pkeys->alt);
+ }
+
+ /* If the QP is already in one or more of those lists
+ * the destroying flag will ensure the to error flow
+ * doesn't operate on an undefined QP.
+ */
+ sec->destroying = true;
+
+ /* Record the error list count to know how many completions
+ * to wait for.
+ */
+ sec->error_comps_pending = atomic_read(&sec->error_list_count);
+
+ mutex_unlock(&sec->mutex);
+}
+
+void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
+{
+ int ret;
+ int i;
+
+ /* Return if not IB */
+ if (!sec)
+ return;
+
+ /* If a concurrent cache update is in progress this
+ * QP security could be marked for an error state
+ * transition. Wait for this to complete.
+ */
+ for (i = 0; i < sec->error_comps_pending; i++)
+ wait_for_completion(&sec->error_complete);
+
+ mutex_lock(&sec->mutex);
+ sec->destroying = false;
+
+ /* Restore the position in the lists and verify
+ * access is still allowed in case a cache update
+ * occurred while attempting to destroy.
+ *
+ * Because these setting were listed already
+ * and removed during ib_destroy_qp_security_begin
+ * we know the pkey_index_qp_list for the PKey
+ * already exists so port_pkey_list_insert won't fail.
+ */
+ if (sec->ports_pkeys) {
+ port_pkey_list_insert(&sec->ports_pkeys->main);
+ port_pkey_list_insert(&sec->ports_pkeys->alt);
+ }
+
+ ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
+ if (ret)
+ qp_to_error(sec);
+
+ mutex_unlock(&sec->mutex);
+}
+
+void ib_destroy_qp_security_end(struct ib_qp_security *sec)
+{
+ int i;
+
+ /* Return if not IB */
+ if (!sec)
+ return;
+
+ /* If a concurrent cache update is occurring we must
+ * wait until this QP security structure is processed
+ * in the QP to error flow before destroying it because
+ * the to_error_list is in use.
+ */
+ for (i = 0; i < sec->error_comps_pending; i++)
+ wait_for_completion(&sec->error_complete);
+
+ destroy_qp_security(sec);
+}
+
+void ib_security_cache_change(struct ib_device *device,
+ u32 port_num,
+ u64 subnet_prefix)
+{
+ struct pkey_index_qp_list *pkey;
+
+ list_for_each_entry (pkey, &device->port_data[port_num].pkey_list,
+ pkey_index_list) {
+ check_pkey_qps(pkey,
+ device,
+ port_num,
+ subnet_prefix);
+ }
+}
+
+void ib_security_release_port_pkey_list(struct ib_device *device)
+{
+ struct pkey_index_qp_list *pkey, *tmp_pkey;
+ unsigned int i;
+
+ rdma_for_each_port (device, i) {
+ list_for_each_entry_safe(pkey,
+ tmp_pkey,
+ &device->port_data[i].pkey_list,
+ pkey_index_list) {
+ list_del(&pkey->pkey_index_list);
+ kfree(pkey);
+ }
+ }
+}
+
+int ib_security_modify_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_udata *udata)
+{
+ int ret = 0;
+ struct ib_ports_pkeys *tmp_pps;
+ struct ib_ports_pkeys *new_pps = NULL;
+ struct ib_qp *real_qp = qp->real_qp;
+ bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
+ real_qp->qp_type == IB_QPT_GSI ||
+ real_qp->qp_type >= IB_QPT_RESERVED1);
+ bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
+ (qp_attr_mask & IB_QP_ALT_PATH));
+
+ WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
+ rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
+ !real_qp->qp_sec),
+ "%s: QP security is not initialized for IB QP: %u\n",
+ __func__, real_qp->qp_num);
+
+ /* The port/pkey settings are maintained only for the real QP. Open
+ * handles on the real QP will be in the shared_qp_list. When
+ * enforcing security on the real QP all the shared QPs will be
+ * checked as well.
+ */
+
+ if (pps_change && !special_qp && real_qp->qp_sec) {
+ mutex_lock(&real_qp->qp_sec->mutex);
+ new_pps = get_new_pps(real_qp,
+ qp_attr,
+ qp_attr_mask);
+ if (!new_pps) {
+ mutex_unlock(&real_qp->qp_sec->mutex);
+ return -ENOMEM;
+ }
+ /* Add this QP to the lists for the new port
+ * and pkey settings before checking for permission
+ * in case there is a concurrent cache update
+ * occurring. Walking the list for a cache change
+ * doesn't acquire the security mutex unless it's
+ * sending the QP to error.
+ */
+ ret = port_pkey_list_insert(&new_pps->main);
+
+ if (!ret)
+ ret = port_pkey_list_insert(&new_pps->alt);
+
+ if (!ret)
+ ret = check_qp_port_pkey_settings(new_pps,
+ real_qp->qp_sec);
+ }
+
+ if (!ret)
+ ret = real_qp->device->ops.modify_qp(real_qp,
+ qp_attr,
+ qp_attr_mask,
+ udata);
+
+ if (new_pps) {
+ /* Clean up the lists and free the appropriate
+ * ports_pkeys structure.
+ */
+ if (ret) {
+ tmp_pps = new_pps;
+ } else {
+ tmp_pps = real_qp->qp_sec->ports_pkeys;
+ real_qp->qp_sec->ports_pkeys = new_pps;
+ }
+
+ if (tmp_pps) {
+ port_pkey_list_remove(&tmp_pps->main);
+ port_pkey_list_remove(&tmp_pps->alt);
+ }
+ kfree(tmp_pps);
+ mutex_unlock(&real_qp->qp_sec->mutex);
+ }
+ return ret;
+}
+
+static int ib_security_pkey_access(struct ib_device *dev,
+ u32 port_num,
+ u16 pkey_index,
+ void *sec)
+{
+ u64 subnet_prefix;
+ u16 pkey;
+ int ret;
+
+ if (!rdma_protocol_ib(dev, port_num))
+ return 0;
+
+ ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
+ if (ret)
+ return ret;
+
+ ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
+
+ return security_ib_pkey_access(sec, subnet_prefix, pkey);
+}
+
+void ib_mad_agent_security_change(void)
+{
+ struct ib_mad_agent *ag;
+
+ spin_lock(&mad_agent_list_lock);
+ list_for_each_entry(ag,
+ &mad_agent_list,
+ mad_agent_sec_list)
+ WRITE_ONCE(ag->smp_allowed,
+ !security_ib_endport_manage_subnet(ag->security,
+ dev_name(&ag->device->dev), ag->port_num));
+ spin_unlock(&mad_agent_list_lock);
+}
+
+int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
+ enum ib_qp_type qp_type)
+{
+ int ret;
+
+ if (!rdma_protocol_ib(agent->device, agent->port_num))
+ return 0;
+
+ INIT_LIST_HEAD(&agent->mad_agent_sec_list);
+
+ ret = security_ib_alloc_security(&agent->security);
+ if (ret)
+ return ret;
+
+ if (qp_type != IB_QPT_SMI)
+ return 0;
+
+ spin_lock(&mad_agent_list_lock);
+ ret = security_ib_endport_manage_subnet(agent->security,
+ dev_name(&agent->device->dev),
+ agent->port_num);
+ if (ret)
+ goto free_security;
+
+ WRITE_ONCE(agent->smp_allowed, true);
+ list_add(&agent->mad_agent_sec_list, &mad_agent_list);
+ spin_unlock(&mad_agent_list_lock);
+ return 0;
+
+free_security:
+ spin_unlock(&mad_agent_list_lock);
+ security_ib_free_security(agent->security);
+ return ret;
+}
+
+void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
+{
+ if (!rdma_protocol_ib(agent->device, agent->port_num))
+ return;
+
+ if (agent->qp->qp_type == IB_QPT_SMI) {
+ spin_lock(&mad_agent_list_lock);
+ list_del(&agent->mad_agent_sec_list);
+ spin_unlock(&mad_agent_list_lock);
+ }
+
+ security_ib_free_security(agent->security);
+}
+
+int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
+{
+ if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
+ return 0;
+
+ if (map->agent.qp->qp_type == IB_QPT_SMI) {
+ if (!READ_ONCE(map->agent.smp_allowed))
+ return -EACCES;
+ return 0;
+ }
+
+ return ib_security_pkey_access(map->agent.device,
+ map->agent.port_num,
+ pkey_index,
+ map->agent.security);
+}
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index f19b23817c2b..45f09b75c893 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
#include "smi.h"
#include "opa_smi.h"
-static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
const u8 *return_path,
@@ -127,7 +127,7 @@ static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
* Return IB_SMI_DISCARD if the SMP should be discarded
*/
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- bool is_switch, int port_num)
+ bool is_switch, u32 port_num)
{
return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
@@ -139,7 +139,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
}
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- bool is_switch, int port_num)
+ bool is_switch, u32 port_num)
{
return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
OPA_LID_PERMISSIVE);
}
-static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num,
int phys_port_cnt,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
@@ -238,7 +238,7 @@ static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt)
+ u32 port_num, int phys_port_cnt)
{
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
@@ -254,7 +254,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt)
+ u32 port_num, int phys_port_cnt)
{
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index 33c91c8a16e9..e350ed623c45 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -52,11 +52,11 @@ enum smi_forward_action {
};
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt);
+ u32 port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- bool is_switch, int port_num);
+ bool is_switch, u32 port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
@@ -67,7 +67,7 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
- return ((device->process_mad &&
+ return ((device->ops.process_mad &&
!ib_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
@@ -82,7 +82,7 @@ static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
- return ((device->process_mad &&
+ return ((device->ops.process_mad &&
ib_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 34cdd74b0a17..0ed862b38b44 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -37,152 +37,267 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <rdma/ib_mad.h>
+#include <rdma/ib_pma.h>
+#include <rdma/ib_cache.h>
+#include <rdma/rdma_counter.h>
+#include <rdma/ib_sysfs.h>
+
+struct port_table_attribute {
+ struct ib_port_attribute attr;
+ char name[8];
+ int index;
+ __be16 attr_id;
+};
+
+struct gid_attr_group {
+ struct ib_port *port;
+ struct kobject kobj;
+ struct attribute_group groups[2];
+ const struct attribute_group *groups_list[3];
+ struct port_table_attribute attrs_list[];
+};
struct ib_port {
- struct kobject kobj;
- struct ib_device *ibdev;
- struct attribute_group gid_group;
- struct attribute_group pkey_group;
- u8 port_num;
+ struct kobject kobj;
+ struct ib_device *ibdev;
+ struct gid_attr_group *gid_attr_group;
+ struct hw_stats_port_data *hw_stats_data;
+
+ struct attribute_group groups[3];
+ const struct attribute_group *groups_list[5];
+ u32 port_num;
+ struct port_table_attribute attrs_list[];
};
-struct port_attribute {
- struct attribute attr;
- ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf);
- ssize_t (*store)(struct ib_port *, struct port_attribute *,
+struct hw_stats_device_attribute {
+ struct device_attribute attr;
+ ssize_t (*show)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num, char *buf);
+ ssize_t (*store)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
const char *buf, size_t count);
};
-#define PORT_ATTR(_name, _mode, _show, _store) \
-struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
+struct hw_stats_port_attribute {
+ struct ib_port_attribute attr;
+ ssize_t (*show)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num, char *buf);
+ ssize_t (*store)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
+ const char *buf, size_t count);
+};
-#define PORT_ATTR_RO(_name) \
-struct port_attribute port_attr_##_name = __ATTR_RO(_name)
+struct hw_stats_device_data {
+ struct attribute_group group;
+ struct rdma_hw_stats *stats;
+ struct hw_stats_device_attribute attrs[];
+};
-struct port_table_attribute {
- struct port_attribute attr;
- char name[8];
- int index;
+struct hw_stats_port_data {
+ struct rdma_hw_stats *stats;
+ struct hw_stats_port_attribute attrs[];
};
static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- struct port_attribute *port_attr =
- container_of(attr, struct port_attribute, attr);
+ struct ib_port_attribute *port_attr =
+ container_of(attr, struct ib_port_attribute, attr);
struct ib_port *p = container_of(kobj, struct ib_port, kobj);
if (!port_attr->show)
return -EIO;
- return port_attr->show(p, port_attr, buf);
+ return port_attr->show(p->ibdev, p->port_num, port_attr, buf);
+}
+
+static ssize_t port_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ib_port_attribute *port_attr =
+ container_of(attr, struct ib_port_attribute, attr);
+ struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+
+ if (!port_attr->store)
+ return -EIO;
+ return port_attr->store(p->ibdev, p->port_num, port_attr, buf, count);
+}
+
+struct ib_device *ib_port_sysfs_get_ibdev_kobj(struct kobject *kobj,
+ u32 *port_num)
+{
+ struct ib_port *port = container_of(kobj, struct ib_port, kobj);
+
+ *port_num = port->port_num;
+ return port->ibdev;
}
+EXPORT_SYMBOL(ib_port_sysfs_get_ibdev_kobj);
static const struct sysfs_ops port_sysfs_ops = {
- .show = port_attr_show
+ .show = port_attr_show,
+ .store = port_attr_store
+};
+
+static ssize_t hw_stat_device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hw_stats_device_attribute *stat_attr =
+ container_of(attr, struct hw_stats_device_attribute, attr);
+ struct ib_device *ibdev = container_of(dev, struct ib_device, dev);
+
+ return stat_attr->show(ibdev, ibdev->hw_stats_data->stats,
+ stat_attr - ibdev->hw_stats_data->attrs, 0, buf);
+}
+
+static ssize_t hw_stat_device_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hw_stats_device_attribute *stat_attr =
+ container_of(attr, struct hw_stats_device_attribute, attr);
+ struct ib_device *ibdev = container_of(dev, struct ib_device, dev);
+
+ return stat_attr->store(ibdev, ibdev->hw_stats_data->stats,
+ stat_attr - ibdev->hw_stats_data->attrs, 0, buf,
+ count);
+}
+
+static ssize_t hw_stat_port_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hw_stats_port_attribute *stat_attr =
+ container_of(attr, struct hw_stats_port_attribute, attr);
+ struct ib_port *port = ibdev->port_data[port_num].sysfs;
+
+ return stat_attr->show(ibdev, port->hw_stats_data->stats,
+ stat_attr - port->hw_stats_data->attrs,
+ port->port_num, buf);
+}
+
+static ssize_t hw_stat_port_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hw_stats_port_attribute *stat_attr =
+ container_of(attr, struct hw_stats_port_attribute, attr);
+ struct ib_port *port = ibdev->port_data[port_num].sysfs;
+
+ return stat_attr->store(ibdev, port->hw_stats_data->stats,
+ stat_attr - port->hw_stats_data->attrs,
+ port->port_num, buf, count);
+}
+
+static ssize_t gid_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ib_port_attribute *port_attr =
+ container_of(attr, struct ib_port_attribute, attr);
+ struct ib_port *p = container_of(kobj, struct gid_attr_group,
+ kobj)->port;
+
+ if (!port_attr->show)
+ return -EIO;
+
+ return port_attr->show(p->ibdev, p->port_num, port_attr, buf);
+}
+
+static const struct sysfs_ops gid_attr_sysfs_ops = {
+ .show = gid_attr_show
};
-static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t state_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- static const char *state_name[] = {
- [IB_PORT_NOP] = "NOP",
- [IB_PORT_DOWN] = "DOWN",
- [IB_PORT_INIT] = "INIT",
- [IB_PORT_ARMED] = "ARMED",
- [IB_PORT_ACTIVE] = "ACTIVE",
- [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER"
- };
-
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sprintf(buf, "%d: %s\n", attr.state,
- attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
- state_name[attr.state] : "UNKNOWN");
+ return sysfs_emit(buf, "%d: %s\n", attr.state,
+ ib_port_state_to_str(attr.state));
}
-static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t lid_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sprintf(buf, "0x%x\n", attr.lid);
+ return sysfs_emit(buf, "0x%x\n", attr.lid);
}
-static ssize_t lid_mask_count_show(struct ib_port *p,
- struct port_attribute *unused,
- char *buf)
+static ssize_t lid_mask_count_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sprintf(buf, "%d\n", attr.lmc);
+ return sysfs_emit(buf, "%u\n", attr.lmc);
}
-static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t sm_lid_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sprintf(buf, "0x%x\n", attr.sm_lid);
+ return sysfs_emit(buf, "0x%x\n", attr.sm_lid);
}
-static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t sm_sl_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sprintf(buf, "%d\n", attr.sm_sl);
+ return sysfs_emit(buf, "%u\n", attr.sm_sl);
}
-static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t cap_mask_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
+ return sysfs_emit(buf, "0x%08x\n", attr.port_cap_flags);
}
-static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t rate_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
char *speed = "";
int rate; /* in deci-Gb/sec */
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
@@ -207,8 +322,21 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
speed = " EDR";
rate = 250;
break;
+ case IB_SPEED_HDR:
+ speed = " HDR";
+ rate = 500;
+ break;
+ case IB_SPEED_NDR:
+ speed = " NDR";
+ rate = 1000;
+ break;
+ case IB_SPEED_XDR:
+ speed = " XDR";
+ rate = 2000;
+ break;
case IB_SPEED_SDR:
default: /* default to SDR for invalid rates */
+ speed = " SDR";
rate = 25;
break;
}
@@ -217,124 +345,218 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
if (rate < 0)
return -EINVAL;
- return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
- rate / 10, rate % 10 ? ".5" : "",
- ib_width_enum_to_int(attr.active_width), speed);
+ return sysfs_emit(buf, "%d%s Gb/sec (%dX%s)\n", rate / 10,
+ rate % 10 ? ".5" : "",
+ ib_width_enum_to_int(attr.active_width), speed);
}
-static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
+{
+ static const char *phys_state_str[] = {
+ "<unknown>",
+ "Sleep",
+ "Polling",
+ "Disabled",
+ "PortConfigurationTraining",
+ "LinkUp",
+ "LinkErrorRecovery",
+ "Phy Test",
+ };
+
+ if (phys_state < ARRAY_SIZE(phys_state_str))
+ return phys_state_str[phys_state];
+ return "<unknown>";
+}
+
+static ssize_t phys_state_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- switch (attr.phys_state) {
- case 1: return sprintf(buf, "1: Sleep\n");
- case 2: return sprintf(buf, "2: Polling\n");
- case 3: return sprintf(buf, "3: Disabled\n");
- case 4: return sprintf(buf, "4: PortConfigurationTraining\n");
- case 5: return sprintf(buf, "5: LinkUp\n");
- case 6: return sprintf(buf, "6: LinkErrorRecovery\n");
- case 7: return sprintf(buf, "7: Phy Test\n");
- default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
- }
+ return sysfs_emit(buf, "%u: %s\n", attr.phys_state,
+ phys_state_to_str(attr.phys_state));
}
-static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t link_layer_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
- switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) {
+ const char *output;
+
+ switch (rdma_port_get_link_layer(ibdev, port_num)) {
case IB_LINK_LAYER_INFINIBAND:
- return sprintf(buf, "%s\n", "InfiniBand");
+ output = "InfiniBand";
+ break;
case IB_LINK_LAYER_ETHERNET:
- return sprintf(buf, "%s\n", "Ethernet");
+ output = "Ethernet";
+ break;
default:
- return sprintf(buf, "%s\n", "Unknown");
+ output = "Unknown";
+ break;
}
+
+ return sysfs_emit(buf, "%s\n", output);
}
-static PORT_ATTR_RO(state);
-static PORT_ATTR_RO(lid);
-static PORT_ATTR_RO(lid_mask_count);
-static PORT_ATTR_RO(sm_lid);
-static PORT_ATTR_RO(sm_sl);
-static PORT_ATTR_RO(cap_mask);
-static PORT_ATTR_RO(rate);
-static PORT_ATTR_RO(phys_state);
-static PORT_ATTR_RO(link_layer);
+static IB_PORT_ATTR_RO(state);
+static IB_PORT_ATTR_RO(lid);
+static IB_PORT_ATTR_RO(lid_mask_count);
+static IB_PORT_ATTR_RO(sm_lid);
+static IB_PORT_ATTR_RO(sm_sl);
+static IB_PORT_ATTR_RO(cap_mask);
+static IB_PORT_ATTR_RO(rate);
+static IB_PORT_ATTR_RO(phys_state);
+static IB_PORT_ATTR_RO(link_layer);
static struct attribute *port_default_attrs[] = {
- &port_attr_state.attr,
- &port_attr_lid.attr,
- &port_attr_lid_mask_count.attr,
- &port_attr_sm_lid.attr,
- &port_attr_sm_sl.attr,
- &port_attr_cap_mask.attr,
- &port_attr_rate.attr,
- &port_attr_phys_state.attr,
- &port_attr_link_layer.attr,
+ &ib_port_attr_state.attr,
+ &ib_port_attr_lid.attr,
+ &ib_port_attr_lid_mask_count.attr,
+ &ib_port_attr_sm_lid.attr,
+ &ib_port_attr_sm_sl.attr,
+ &ib_port_attr_cap_mask.attr,
+ &ib_port_attr_rate.attr,
+ &ib_port_attr_phys_state.attr,
+ &ib_port_attr_link_layer.attr,
NULL
};
+ATTRIBUTE_GROUPS(port_default);
-static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
- char *buf)
+static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
+{
+ struct net_device *ndev;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(gid_attr->ndev);
+ if (ndev)
+ ret = sysfs_emit(buf, "%s\n", ndev->name);
+ rcu_read_unlock();
+ return ret;
+}
+
+static ssize_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ ib_cache_gid_type_str(gid_attr->gid_type));
+}
+
+static ssize_t _show_port_gid_attr(
+ struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr,
+ char *buf,
+ ssize_t (*print)(const struct ib_gid_attr *gid_attr, char *buf))
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
- union ib_gid gid;
+ const struct ib_gid_attr *gid_attr;
ssize_t ret;
- ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
- if (ret)
- return ret;
+ gid_attr = rdma_get_gid_attr(ibdev, port_num, tab_attr->index);
+ if (IS_ERR(gid_attr))
+ /* -EINVAL is returned for user space compatibility reasons. */
+ return -EINVAL;
+
+ ret = print(gid_attr, buf);
+ rdma_put_gid_attr(gid_attr);
+ return ret;
+}
- return sprintf(buf, "%pI6\n", gid.raw);
+static ssize_t show_port_gid(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct port_table_attribute *tab_attr =
+ container_of(attr, struct port_table_attribute, attr);
+ const struct ib_gid_attr *gid_attr;
+ int len;
+
+ gid_attr = rdma_get_gid_attr(ibdev, port_num, tab_attr->index);
+ if (IS_ERR(gid_attr)) {
+ const union ib_gid zgid = {};
+
+ /* If reading GID fails, it is likely due to GID entry being
+ * empty (invalid) or reserved GID in the table. User space
+ * expects to read GID table entries as long as it given index
+ * is within GID table size. Administrative/debugging tool
+ * fails to query rest of the GID entries if it hits error
+ * while querying a GID of the given index. To avoid user
+ * space throwing such error on fail to read gid, return zero
+ * GID as before. This maintains backward compatibility.
+ */
+ return sysfs_emit(buf, "%pI6\n", zgid.raw);
+ }
+
+ len = sysfs_emit(buf, "%pI6\n", gid_attr->gid.raw);
+ rdma_put_gid_attr(gid_attr);
+ return len;
+}
+
+static ssize_t show_port_gid_attr_ndev(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr,
+ char *buf)
+{
+ return _show_port_gid_attr(ibdev, port_num, attr, buf, print_ndev);
+}
+
+static ssize_t show_port_gid_attr_gid_type(struct ib_device *ibdev,
+ u32 port_num,
+ struct ib_port_attribute *attr,
+ char *buf)
+{
+ return _show_port_gid_attr(ibdev, port_num, attr, buf, print_gid_type);
}
-static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
- char *buf)
+static ssize_t show_port_pkey(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
u16 pkey;
- ssize_t ret;
+ int ret;
- ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+ ret = ib_query_pkey(ibdev, port_num, tab_attr->index, &pkey);
if (ret)
return ret;
- return sprintf(buf, "0x%04x\n", pkey);
+ return sysfs_emit(buf, "0x%04x\n", pkey);
}
#define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
struct port_table_attribute port_pma_attr_##_name = { \
.attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
- .index = (_offset) | ((_width) << 16) | ((_counter) << 24) \
+ .index = (_offset) | ((_width) << 16) | ((_counter) << 24), \
+ .attr_id = IB_PMA_PORT_COUNTERS, \
+}
+
+#define PORT_PMA_ATTR_EXT(_name, _width, _offset) \
+struct port_table_attribute port_pma_attr_ext_##_name = { \
+ .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
+ .index = (_offset) | ((_width) << 16), \
+ .attr_id = IB_PMA_PORT_COUNTERS_EXT, \
}
-static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
- char *buf)
+/*
+ * Get a Perfmgmt MAD block of data.
+ * Returns error code or the number of bytes retrieved.
+ */
+static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
+ void *data, int offset, size_t size)
{
- struct port_table_attribute *tab_attr =
- container_of(attr, struct port_table_attribute, attr);
- int offset = tab_attr->index & 0xffff;
- int width = (tab_attr->index >> 16) & 0xff;
- struct ib_mad *in_mad = NULL;
- struct ib_mad *out_mad = NULL;
+ struct ib_mad *in_mad;
+ struct ib_mad *out_mad;
size_t mad_size = sizeof(*out_mad);
u16 out_mad_pkey_index = 0;
ssize_t ret;
- if (!p->ibdev->process_mad)
- return sprintf(buf, "N/A (no PMA)\n");
+ if (!dev->ops.process_mad)
+ return -ENOSYS;
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kzalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
@@ -344,46 +566,66 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
in_mad->mad_hdr.class_version = 1;
in_mad->mad_hdr.method = IB_MGMT_METHOD_GET;
- in_mad->mad_hdr.attr_id = cpu_to_be16(0x12); /* PortCounters */
+ in_mad->mad_hdr.attr_id = attr;
- in_mad->data[41] = p->port_num; /* PortSelect field */
+ if (attr != IB_PMA_CLASS_PORT_INFO)
+ in_mad->data[41] = port_num; /* PortSelect field */
- if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
- p->port_num, NULL, NULL,
- (const struct ib_mad_hdr *)in_mad, mad_size,
- (struct ib_mad_hdr *)out_mad, &mad_size,
- &out_mad_pkey_index) &
+ if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL,
+ in_mad, out_mad, &mad_size,
+ &out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
ret = -EINVAL;
goto out;
}
+ memcpy(data, out_mad->data + offset, size);
+ ret = size;
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return ret;
+}
+
+static ssize_t show_pma_counter(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct port_table_attribute *tab_attr =
+ container_of(attr, struct port_table_attribute, attr);
+ int offset = tab_attr->index & 0xffff;
+ int width = (tab_attr->index >> 16) & 0xff;
+ int ret;
+ u8 data[8];
+ int len;
+
+ ret = get_perf_mad(ibdev, port_num, tab_attr->attr_id, &data,
+ 40 + offset / 8, sizeof(data));
+ if (ret < 0)
+ return ret;
switch (width) {
case 4:
- ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
- (4 - (offset % 8))) & 0xf);
+ len = sysfs_emit(buf, "%d\n",
+ (*data >> (4 - (offset % 8))) & 0xf);
break;
case 8:
- ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
+ len = sysfs_emit(buf, "%u\n", *data);
break;
case 16:
- ret = sprintf(buf, "%u\n",
- be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
+ len = sysfs_emit(buf, "%u\n", be16_to_cpup((__be16 *)data));
break;
case 32:
- ret = sprintf(buf, "%u\n",
- be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
+ len = sysfs_emit(buf, "%u\n", be32_to_cpup((__be32 *)data));
+ break;
+ case 64:
+ len = sysfs_emit(buf, "%llu\n", be64_to_cpup((__be64 *)data));
break;
default:
- ret = 0;
+ len = 0;
+ break;
}
-out:
- kfree(in_mad);
- kfree(out_mad);
-
- return ret;
+ return len;
}
static PORT_PMA_ATTR(symbol_error , 0, 16, 32);
@@ -402,6 +644,19 @@ static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192);
static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320);
+
+/*
+ * Counters added by extended set
+ */
+static PORT_PMA_ATTR_EXT(port_xmit_data , 64, 64);
+static PORT_PMA_ATTR_EXT(port_rcv_data , 64, 128);
+static PORT_PMA_ATTR_EXT(port_xmit_packets , 64, 192);
+static PORT_PMA_ATTR_EXT(port_rcv_packets , 64, 256);
+static PORT_PMA_ATTR_EXT(unicast_xmit_packets , 64, 320);
+static PORT_PMA_ATTR_EXT(unicast_rcv_packets , 64, 384);
+static PORT_PMA_ATTR_EXT(multicast_xmit_packets , 64, 448);
+static PORT_PMA_ATTR_EXT(multicast_rcv_packets , 64, 512);
static struct attribute *pma_attrs[] = {
&port_pma_attr_symbol_error.attr.attr,
@@ -420,468 +675,799 @@ static struct attribute *pma_attrs[] = {
&port_pma_attr_port_rcv_data.attr.attr,
&port_pma_attr_port_xmit_packets.attr.attr,
&port_pma_attr_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
-static struct attribute_group pma_group = {
+static struct attribute *pma_attrs_ext[] = {
+ &port_pma_attr_symbol_error.attr.attr,
+ &port_pma_attr_link_error_recovery.attr.attr,
+ &port_pma_attr_link_downed.attr.attr,
+ &port_pma_attr_port_rcv_errors.attr.attr,
+ &port_pma_attr_port_rcv_remote_physical_errors.attr.attr,
+ &port_pma_attr_port_rcv_switch_relay_errors.attr.attr,
+ &port_pma_attr_port_xmit_discards.attr.attr,
+ &port_pma_attr_port_xmit_constraint_errors.attr.attr,
+ &port_pma_attr_port_rcv_constraint_errors.attr.attr,
+ &port_pma_attr_local_link_integrity_errors.attr.attr,
+ &port_pma_attr_excessive_buffer_overrun_errors.attr.attr,
+ &port_pma_attr_VL15_dropped.attr.attr,
+ &port_pma_attr_ext_port_xmit_data.attr.attr,
+ &port_pma_attr_ext_port_rcv_data.attr.attr,
+ &port_pma_attr_ext_port_xmit_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
+ &port_pma_attr_ext_port_rcv_packets.attr.attr,
+ &port_pma_attr_ext_unicast_rcv_packets.attr.attr,
+ &port_pma_attr_ext_unicast_xmit_packets.attr.attr,
+ &port_pma_attr_ext_multicast_rcv_packets.attr.attr,
+ &port_pma_attr_ext_multicast_xmit_packets.attr.attr,
+ NULL
+};
+
+static struct attribute *pma_attrs_noietf[] = {
+ &port_pma_attr_symbol_error.attr.attr,
+ &port_pma_attr_link_error_recovery.attr.attr,
+ &port_pma_attr_link_downed.attr.attr,
+ &port_pma_attr_port_rcv_errors.attr.attr,
+ &port_pma_attr_port_rcv_remote_physical_errors.attr.attr,
+ &port_pma_attr_port_rcv_switch_relay_errors.attr.attr,
+ &port_pma_attr_port_xmit_discards.attr.attr,
+ &port_pma_attr_port_xmit_constraint_errors.attr.attr,
+ &port_pma_attr_port_rcv_constraint_errors.attr.attr,
+ &port_pma_attr_local_link_integrity_errors.attr.attr,
+ &port_pma_attr_excessive_buffer_overrun_errors.attr.attr,
+ &port_pma_attr_VL15_dropped.attr.attr,
+ &port_pma_attr_ext_port_xmit_data.attr.attr,
+ &port_pma_attr_ext_port_rcv_data.attr.attr,
+ &port_pma_attr_ext_port_xmit_packets.attr.attr,
+ &port_pma_attr_ext_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
+ NULL
+};
+
+static const struct attribute_group pma_group = {
.name = "counters",
.attrs = pma_attrs
};
+static const struct attribute_group pma_group_ext = {
+ .name = "counters",
+ .attrs = pma_attrs_ext
+};
+
+static const struct attribute_group pma_group_noietf = {
+ .name = "counters",
+ .attrs = pma_attrs_noietf
+};
+
static void ib_port_release(struct kobject *kobj)
{
- struct ib_port *p = container_of(kobj, struct ib_port, kobj);
- struct attribute *a;
+ struct ib_port *port = container_of(kobj, struct ib_port, kobj);
int i;
- if (p->gid_group.attrs) {
- for (i = 0; (a = p->gid_group.attrs[i]); ++i)
- kfree(a);
-
- kfree(p->gid_group.attrs);
- }
-
- if (p->pkey_group.attrs) {
- for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
- kfree(a);
+ for (i = 0; i != ARRAY_SIZE(port->groups); i++)
+ kfree(port->groups[i].attrs);
+ if (port->hw_stats_data)
+ rdma_free_hw_stats_struct(port->hw_stats_data->stats);
+ kfree(port->hw_stats_data);
+ kvfree(port);
+}
- kfree(p->pkey_group.attrs);
- }
+static void ib_port_gid_attr_release(struct kobject *kobj)
+{
+ struct gid_attr_group *gid_attr_group =
+ container_of(kobj, struct gid_attr_group, kobj);
+ int i;
- kfree(p);
+ for (i = 0; i != ARRAY_SIZE(gid_attr_group->groups); i++)
+ kfree(gid_attr_group->groups[i].attrs);
+ kfree(gid_attr_group);
}
static struct kobj_type port_type = {
.release = ib_port_release,
.sysfs_ops = &port_sysfs_ops,
- .default_attrs = port_default_attrs
+ .default_groups = port_default_groups,
+};
+
+static struct kobj_type gid_attr_type = {
+ .sysfs_ops = &gid_attr_sysfs_ops,
+ .release = ib_port_gid_attr_release
};
-static struct attribute **
-alloc_group_attrs(ssize_t (*show)(struct ib_port *,
- struct port_attribute *, char *buf),
- int len)
+/*
+ * Figure out which counter table to use depending on
+ * the device capabilities.
+ */
+static const struct attribute_group *get_counter_table(struct ib_device *dev,
+ int port_num)
{
- struct attribute **tab_attr;
- struct port_table_attribute *element;
- int i;
+ struct ib_class_port_info cpi;
- tab_attr = kcalloc(1 + len, sizeof(struct attribute *), GFP_KERNEL);
- if (!tab_attr)
- return NULL;
+ if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
+ &cpi, 40, sizeof(cpi)) >= 0) {
+ if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
+ /* We have extended counters */
+ return &pma_group_ext;
- for (i = 0; i < len; i++) {
- element = kzalloc(sizeof(struct port_table_attribute),
- GFP_KERNEL);
- if (!element)
- goto err;
+ if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
+ /* But not the IETF ones */
+ return &pma_group_noietf;
+ }
- if (snprintf(element->name, sizeof(element->name),
- "%d", i) >= sizeof(element->name)) {
- kfree(element);
- goto err;
- }
+ /* Fall back to normal counters */
+ return &pma_group;
+}
- element->attr.attr.name = element->name;
- element->attr.attr.mode = S_IRUGO;
- element->attr.show = show;
- element->index = i;
- sysfs_attr_init(&element->attr.attr);
+static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ int ret;
- tab_attr[i] = &element->attr.attr;
- }
+ if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
+ return 0;
+ ret = dev->ops.get_hw_stats(dev, stats, port_num, index);
+ if (ret < 0)
+ return ret;
+ if (ret == stats->num_counters)
+ stats->timestamp = jiffies;
- return tab_attr;
+ return 0;
+}
-err:
- while (--i >= 0)
- kfree(tab_attr[i]);
- kfree(tab_attr);
- return NULL;
+static int print_hw_stat(struct ib_device *dev, int port_num,
+ struct rdma_hw_stats *stats, int index, char *buf)
+{
+ u64 v = rdma_counter_get_hwstat_value(dev, port_num, index);
+
+ return sysfs_emit(buf, "%llu\n", stats->value[index] + v);
}
-static int add_port(struct ib_device *device, int port_num,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+static ssize_t show_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats, unsigned int index,
+ unsigned int port_num, char *buf)
{
- struct ib_port *p;
- struct ib_port_attr attr;
- int i;
int ret;
- ret = ib_query_port(device, port_num, &attr);
+ mutex_lock(&stats->lock);
+ ret = update_hw_stats(ibdev, stats, port_num, index);
+ if (ret)
+ goto unlock;
+ ret = print_hw_stat(ibdev, port_num, stats, index, buf);
+unlock:
+ mutex_unlock(&stats->lock);
+
+ return ret;
+}
+
+static ssize_t show_stats_lifespan(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
+ char *buf)
+{
+ int msecs;
+
+ mutex_lock(&stats->lock);
+ msecs = jiffies_to_msecs(stats->lifespan);
+ mutex_unlock(&stats->lock);
+
+ return sysfs_emit(buf, "%d\n", msecs);
+}
+
+static ssize_t set_stats_lifespan(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
+ const char *buf, size_t count)
+{
+ int msecs;
+ int jiffies;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &msecs);
if (ret)
return ret;
+ if (msecs < 0 || msecs > 10000)
+ return -EINVAL;
+ jiffies = msecs_to_jiffies(msecs);
- p = kzalloc(sizeof *p, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ mutex_lock(&stats->lock);
+ stats->lifespan = jiffies;
+ mutex_unlock(&stats->lock);
+
+ return count;
+}
- p->ibdev = device;
- p->port_num = port_num;
+static struct hw_stats_device_data *
+alloc_hw_stats_device(struct ib_device *ibdev)
+{
+ struct hw_stats_device_data *data;
+ struct rdma_hw_stats *stats;
+
+ if (!ibdev->ops.alloc_hw_device_stats)
+ return ERR_PTR(-EOPNOTSUPP);
+ stats = ibdev->ops.alloc_hw_device_stats(ibdev);
+ if (!stats)
+ return ERR_PTR(-ENOMEM);
+ if (!stats->descs || stats->num_counters <= 0)
+ goto err_free_stats;
+
+ /*
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+ data->group.attrs = kcalloc(stats->num_counters + 2,
+ sizeof(*data->group.attrs), GFP_KERNEL);
+ if (!data->group.attrs)
+ goto err_free_data;
+
+ data->group.name = "hw_counters";
+ data->stats = stats;
+ return data;
+
+err_free_data:
+ kfree(data);
+err_free_stats:
+ rdma_free_hw_stats_struct(stats);
+ return ERR_PTR(-ENOMEM);
+}
+
+void ib_device_release_hw_stats(struct hw_stats_device_data *data)
+{
+ kfree(data->group.attrs);
+ rdma_free_hw_stats_struct(data->stats);
+ kfree(data);
+}
- ret = kobject_init_and_add(&p->kobj, &port_type,
- device->ports_parent,
- "%d", port_num);
- if (ret) {
- kfree(p);
+int ib_setup_device_attrs(struct ib_device *ibdev)
+{
+ struct hw_stats_device_attribute *attr;
+ struct hw_stats_device_data *data;
+ bool opstat_skipped = false;
+ int i, ret, pos = 0;
+
+ data = alloc_hw_stats_device(ibdev);
+ if (IS_ERR(data)) {
+ if (PTR_ERR(data) == -EOPNOTSUPP)
+ return 0;
+ return PTR_ERR(data);
+ }
+ ibdev->hw_stats_data = data;
+
+ ret = ibdev->ops.get_hw_stats(ibdev, data->stats, 0,
+ data->stats->num_counters);
+ if (ret != data->stats->num_counters) {
+ if (WARN_ON(ret >= 0))
+ return -EINVAL;
return ret;
}
- ret = sysfs_create_group(&p->kobj, &pma_group);
- if (ret)
- goto err_put;
+ data->stats->timestamp = jiffies;
- p->gid_group.name = "gids";
- p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
- if (!p->gid_group.attrs) {
- ret = -ENOMEM;
- goto err_remove_pma;
+ for (i = 0; i < data->stats->num_counters; i++) {
+ if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) {
+ opstat_skipped = true;
+ continue;
+ }
+
+ WARN_ON(opstat_skipped);
+ attr = &data->attrs[pos];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = data->stats->descs[i].name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = hw_stat_device_show;
+ attr->show = show_hw_stats;
+ data->group.attrs[pos] = &attr->attr.attr;
+ pos++;
}
- ret = sysfs_create_group(&p->kobj, &p->gid_group);
- if (ret)
- goto err_free_gid;
+ attr = &data->attrs[pos];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = "lifespan";
+ attr->attr.attr.mode = 0644;
+ attr->attr.show = hw_stat_device_show;
+ attr->show = show_stats_lifespan;
+ attr->attr.store = hw_stat_device_store;
+ attr->store = set_stats_lifespan;
+ data->group.attrs[pos] = &attr->attr.attr;
+ for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
+ if (!ibdev->groups[i]) {
+ ibdev->groups[i] = &data->group;
+ ibdev->hw_stats_attr_index = i;
+ return 0;
+ }
+ WARN(true, "struct ib_device->groups is too small");
+ return -EINVAL;
+}
- p->pkey_group.name = "pkeys";
- p->pkey_group.attrs = alloc_group_attrs(show_port_pkey,
- attr.pkey_tbl_len);
- if (!p->pkey_group.attrs) {
- ret = -ENOMEM;
- goto err_remove_gid;
+static struct hw_stats_port_data *
+alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
+{
+ struct ib_device *ibdev = port->ibdev;
+ struct hw_stats_port_data *data;
+ struct rdma_hw_stats *stats;
+
+ if (!ibdev->ops.alloc_hw_port_stats)
+ return ERR_PTR(-EOPNOTSUPP);
+ stats = ibdev->ops.alloc_hw_port_stats(port->ibdev, port->port_num);
+ if (!stats)
+ return ERR_PTR(-ENOMEM);
+ if (!stats->descs || stats->num_counters <= 0)
+ goto err_free_stats;
+
+ /*
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+ group->attrs = kcalloc(stats->num_counters + 2,
+ sizeof(*group->attrs), GFP_KERNEL);
+ if (!group->attrs)
+ goto err_free_data;
+
+ group->name = "hw_counters";
+ data->stats = stats;
+ return data;
+
+err_free_data:
+ kfree(data);
+err_free_stats:
+ rdma_free_hw_stats_struct(stats);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int setup_hw_port_stats(struct ib_port *port,
+ struct attribute_group *group)
+{
+ struct hw_stats_port_attribute *attr;
+ struct hw_stats_port_data *data;
+ bool opstat_skipped = false;
+ int i, ret, pos = 0;
+
+ data = alloc_hw_stats_port(port, group);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ ret = port->ibdev->ops.get_hw_stats(port->ibdev, data->stats,
+ port->port_num,
+ data->stats->num_counters);
+ if (ret != data->stats->num_counters) {
+ if (WARN_ON(ret >= 0))
+ return -EINVAL;
+ return ret;
}
- ret = sysfs_create_group(&p->kobj, &p->pkey_group);
- if (ret)
- goto err_free_pkey;
+ data->stats->timestamp = jiffies;
- if (port_callback) {
- ret = port_callback(device, port_num, &p->kobj);
- if (ret)
- goto err_remove_pkey;
- }
+ for (i = 0; i < data->stats->num_counters; i++) {
+ if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) {
+ opstat_skipped = true;
+ continue;
+ }
- list_add_tail(&p->kobj.entry, &device->port_list);
+ WARN_ON(opstat_skipped);
+ attr = &data->attrs[pos];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = data->stats->descs[i].name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = hw_stat_port_show;
+ attr->show = show_hw_stats;
+ group->attrs[pos] = &attr->attr.attr;
+ pos++;
+ }
- kobject_uevent(&p->kobj, KOBJ_ADD);
+ attr = &data->attrs[pos];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = "lifespan";
+ attr->attr.attr.mode = 0644;
+ attr->attr.show = hw_stat_port_show;
+ attr->show = show_stats_lifespan;
+ attr->attr.store = hw_stat_port_store;
+ attr->store = set_stats_lifespan;
+ group->attrs[pos] = &attr->attr.attr;
+
+ port->hw_stats_data = data;
return 0;
+}
-err_remove_pkey:
- sysfs_remove_group(&p->kobj, &p->pkey_group);
+struct rdma_hw_stats *ib_get_hw_stats_port(struct ib_device *ibdev,
+ u32 port_num)
+{
+ if (!ibdev->port_data || !rdma_is_port_valid(ibdev, port_num) ||
+ !ibdev->port_data[port_num].sysfs->hw_stats_data)
+ return NULL;
+ return ibdev->port_data[port_num].sysfs->hw_stats_data->stats;
+}
-err_free_pkey:
- for (i = 0; i < attr.pkey_tbl_len; ++i)
- kfree(p->pkey_group.attrs[i]);
+static int
+alloc_port_table_group(const char *name, struct attribute_group *group,
+ struct port_table_attribute *attrs, size_t num,
+ ssize_t (*show)(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *, char *buf))
+{
+ struct attribute **attr_list;
+ int i;
- kfree(p->pkey_group.attrs);
- p->pkey_group.attrs = NULL;
+ attr_list = kcalloc(num + 1, sizeof(*attr_list), GFP_KERNEL);
+ if (!attr_list)
+ return -ENOMEM;
-err_remove_gid:
- sysfs_remove_group(&p->kobj, &p->gid_group);
+ for (i = 0; i < num; i++) {
+ struct port_table_attribute *element = &attrs[i];
-err_free_gid:
- for (i = 0; i < attr.gid_tbl_len; ++i)
- kfree(p->gid_group.attrs[i]);
+ if (snprintf(element->name, sizeof(element->name), "%d", i) >=
+ sizeof(element->name))
+ goto err;
+
+ sysfs_attr_init(&element->attr.attr);
+ element->attr.attr.name = element->name;
+ element->attr.attr.mode = 0444;
+ element->attr.show = show;
+ element->index = i;
+
+ attr_list[i] = &element->attr.attr;
+ }
+ group->name = name;
+ group->attrs = attr_list;
+ return 0;
+err:
+ kfree(attr_list);
+ return -EINVAL;
+}
+
+/*
+ * Create the sysfs:
+ * ibp0s9/ports/XX/gid_attrs/{ndevs,types}/YYY
+ * YYY is the gid table index in decimal
+ */
+static int setup_gid_attrs(struct ib_port *port,
+ const struct ib_port_attr *attr)
+{
+ struct gid_attr_group *gid_attr_group;
+ int ret;
- kfree(p->gid_group.attrs);
- p->gid_group.attrs = NULL;
+ gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
+ size_mul(attr->gid_tbl_len, 2)),
+ GFP_KERNEL);
+ if (!gid_attr_group)
+ return -ENOMEM;
+ gid_attr_group->port = port;
+ kobject_init(&gid_attr_group->kobj, &gid_attr_type);
-err_remove_pma:
- sysfs_remove_group(&p->kobj, &pma_group);
+ ret = alloc_port_table_group("ndevs", &gid_attr_group->groups[0],
+ gid_attr_group->attrs_list,
+ attr->gid_tbl_len,
+ show_port_gid_attr_ndev);
+ if (ret)
+ goto err_put;
+ gid_attr_group->groups_list[0] = &gid_attr_group->groups[0];
+ ret = alloc_port_table_group(
+ "types", &gid_attr_group->groups[1],
+ gid_attr_group->attrs_list + attr->gid_tbl_len,
+ attr->gid_tbl_len, show_port_gid_attr_gid_type);
+ if (ret)
+ goto err_put;
+ gid_attr_group->groups_list[1] = &gid_attr_group->groups[1];
+
+ ret = kobject_add(&gid_attr_group->kobj, &port->kobj, "gid_attrs");
+ if (ret)
+ goto err_put;
+ ret = sysfs_create_groups(&gid_attr_group->kobj,
+ gid_attr_group->groups_list);
+ if (ret)
+ goto err_del;
+ port->gid_attr_group = gid_attr_group;
+ return 0;
+
+err_del:
+ kobject_del(&gid_attr_group->kobj);
err_put:
- kobject_put(&p->kobj);
+ kobject_put(&gid_attr_group->kobj);
return ret;
}
-static ssize_t show_node_type(struct device *device,
- struct device_attribute *attr, char *buf)
+static void destroy_gid_attrs(struct ib_port *port)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct gid_attr_group *gid_attr_group = port->gid_attr_group;
- switch (dev->node_type) {
- case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
- case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
- case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
- case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type);
- case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
- case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
- default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
- }
+ if (!gid_attr_group)
+ return;
+ sysfs_remove_groups(&gid_attr_group->kobj, gid_attr_group->groups_list);
+ kobject_del(&gid_attr_group->kobj);
+ kobject_put(&gid_attr_group->kobj);
}
-static ssize_t show_sys_image_guid(struct device *device,
- struct device_attribute *dev_attr, char *buf)
+/*
+ * Create the sysfs:
+ * ibp0s9/ports/XX/{gids,pkeys,counters}/YYY
+ */
+static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ const struct ib_port_attr *attr)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
- struct ib_device_attr attr;
- ssize_t ret;
+ struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
+ bool is_full_dev = &device->coredev == coredev;
+ const struct attribute_group **cur_group;
+ struct ib_port *p;
+ int ret;
- ret = ib_query_device(dev, &attr);
+ p = kvzalloc(struct_size(p, attrs_list,
+ size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
+ GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ p->ibdev = device;
+ p->port_num = port_num;
+ kobject_init(&p->kobj, &port_type);
+
+ if (device->port_data && is_full_dev)
+ device->port_data[port_num].sysfs = p;
+
+ cur_group = p->groups_list;
+ ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list,
+ attr->gid_tbl_len, show_port_gid);
if (ret)
- return ret;
+ goto err_put;
+ *cur_group++ = &p->groups[0];
+
+ if (attr->pkey_tbl_len) {
+ ret = alloc_port_table_group("pkeys", &p->groups[1],
+ p->attrs_list + attr->gid_tbl_len,
+ attr->pkey_tbl_len, show_port_pkey);
+ if (ret)
+ goto err_put;
+ *cur_group++ = &p->groups[1];
+ }
+
+ /*
+ * If port == 0, it means hw_counters are per device and not per
+ * port, so holder should be device. Therefore skip per port
+ * counter initialization.
+ */
+ if (port_num && is_full_dev) {
+ ret = setup_hw_port_stats(p, &p->groups[2]);
+ if (ret && ret != -EOPNOTSUPP)
+ goto err_put;
+ if (!ret)
+ *cur_group++ = &p->groups[2];
+ }
- return sprintf(buf, "%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
- be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
- be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
- be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
+ if (device->ops.process_mad && is_full_dev)
+ *cur_group++ = get_counter_table(device, port_num);
+
+ ret = kobject_add(&p->kobj, coredev->ports_kobj, "%d", port_num);
+ if (ret)
+ goto err_put;
+ ret = sysfs_create_groups(&p->kobj, p->groups_list);
+ if (ret)
+ goto err_del;
+ if (is_full_dev) {
+ ret = sysfs_create_groups(&p->kobj, device->ops.port_groups);
+ if (ret)
+ goto err_groups;
+ }
+
+ list_add_tail(&p->kobj.entry, &coredev->port_list);
+ return p;
+
+err_groups:
+ sysfs_remove_groups(&p->kobj, p->groups_list);
+err_del:
+ kobject_del(&p->kobj);
+err_put:
+ if (device->port_data && is_full_dev)
+ device->port_data[port_num].sysfs = NULL;
+ kobject_put(&p->kobj);
+ return ERR_PTR(ret);
+}
+
+static void destroy_port(struct ib_core_device *coredev, struct ib_port *port)
+{
+ bool is_full_dev = &port->ibdev->coredev == coredev;
+
+ list_del(&port->kobj.entry);
+ if (is_full_dev)
+ sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups);
+
+ sysfs_remove_groups(&port->kobj, port->groups_list);
+ kobject_del(&port->kobj);
+
+ if (port->ibdev->port_data &&
+ port->ibdev->port_data[port->port_num].sysfs == port)
+ port->ibdev->port_data[port->port_num].sysfs = NULL;
+
+ kobject_put(&port->kobj);
}
-static ssize_t show_node_guid(struct device *device,
+static const char *node_type_string(int node_type)
+{
+ switch (node_type) {
+ case RDMA_NODE_IB_CA:
+ return "CA";
+ case RDMA_NODE_IB_SWITCH:
+ return "switch";
+ case RDMA_NODE_IB_ROUTER:
+ return "router";
+ case RDMA_NODE_RNIC:
+ return "RNIC";
+ case RDMA_NODE_USNIC:
+ return "usNIC";
+ case RDMA_NODE_USNIC_UDP:
+ return "usNIC UDP";
+ case RDMA_NODE_UNSPECIFIED:
+ return "unspecified";
+ }
+ return "<unknown>";
+}
+
+static ssize_t node_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
- return sprintf(buf, "%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
- be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
- be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
- be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
+ return sysfs_emit(buf, "%u: %s\n", dev->node_type,
+ node_type_string(dev->node_type));
}
+static DEVICE_ATTR_RO(node_type);
-static ssize_t show_node_desc(struct device *device,
+static ssize_t sys_image_guid_show(struct device *device,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct ib_device *dev = rdma_device_to_ibdev(device);
+ __be16 *guid = (__be16 *)&dev->attrs.sys_image_guid;
+
+ return sysfs_emit(buf, "%04x:%04x:%04x:%04x\n",
+ be16_to_cpu(guid[0]),
+ be16_to_cpu(guid[1]),
+ be16_to_cpu(guid[2]),
+ be16_to_cpu(guid[3]));
+}
+static DEVICE_ATTR_RO(sys_image_guid);
+
+static ssize_t node_guid_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
+ __be16 *node_guid = (__be16 *)&dev->node_guid;
+
+ return sysfs_emit(buf, "%04x:%04x:%04x:%04x\n",
+ be16_to_cpu(node_guid[0]),
+ be16_to_cpu(node_guid[1]),
+ be16_to_cpu(node_guid[2]),
+ be16_to_cpu(node_guid[3]));
+}
+static DEVICE_ATTR_RO(node_guid);
- return sprintf(buf, "%.64s\n", dev->node_desc);
+static ssize_t node_desc_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ib_device *dev = rdma_device_to_ibdev(device);
+
+ return sysfs_emit(buf, "%.64s\n", dev->node_desc);
}
-static ssize_t set_node_desc(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t node_desc_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
struct ib_device_modify desc = {};
int ret;
- if (!dev->modify_device)
- return -EIO;
+ if (!dev->ops.modify_device)
+ return -EOPNOTSUPP;
- memcpy(desc.node_desc, buf, min_t(int, count, 64));
+ memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
if (ret)
return ret;
return count;
}
+static DEVICE_ATTR_RW(node_desc);
-static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
-static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
-static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
-static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
-
-static struct device_attribute *ib_class_attributes[] = {
- &dev_attr_node_type,
- &dev_attr_sys_image_guid,
- &dev_attr_node_guid,
- &dev_attr_node_desc
-};
-
-/* Show a given an attribute in the statistics group */
-static ssize_t show_protocol_stat(const struct device *device,
- struct device_attribute *attr, char *buf,
- unsigned offset)
+static ssize_t fw_ver_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
- union rdma_protocol_stats stats;
- ssize_t ret;
+ struct ib_device *dev = rdma_device_to_ibdev(device);
+ char version[IB_FW_VERSION_NAME_MAX] = {};
- ret = dev->get_protocol_stats(dev, &stats);
- if (ret)
- return ret;
+ ib_get_device_fw_str(dev, version);
- return sprintf(buf, "%llu\n",
- (unsigned long long) ((u64 *) &stats)[offset]);
-}
-
-/* generate a read-only iwarp statistics attribute */
-#define IW_STATS_ENTRY(name) \
-static ssize_t show_##name(struct device *device, \
- struct device_attribute *attr, char *buf) \
-{ \
- return show_protocol_stat(device, attr, buf, \
- offsetof(struct iw_protocol_stats, name) / \
- sizeof (u64)); \
-} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-
-IW_STATS_ENTRY(ipInReceives);
-IW_STATS_ENTRY(ipInHdrErrors);
-IW_STATS_ENTRY(ipInTooBigErrors);
-IW_STATS_ENTRY(ipInNoRoutes);
-IW_STATS_ENTRY(ipInAddrErrors);
-IW_STATS_ENTRY(ipInUnknownProtos);
-IW_STATS_ENTRY(ipInTruncatedPkts);
-IW_STATS_ENTRY(ipInDiscards);
-IW_STATS_ENTRY(ipInDelivers);
-IW_STATS_ENTRY(ipOutForwDatagrams);
-IW_STATS_ENTRY(ipOutRequests);
-IW_STATS_ENTRY(ipOutDiscards);
-IW_STATS_ENTRY(ipOutNoRoutes);
-IW_STATS_ENTRY(ipReasmTimeout);
-IW_STATS_ENTRY(ipReasmReqds);
-IW_STATS_ENTRY(ipReasmOKs);
-IW_STATS_ENTRY(ipReasmFails);
-IW_STATS_ENTRY(ipFragOKs);
-IW_STATS_ENTRY(ipFragFails);
-IW_STATS_ENTRY(ipFragCreates);
-IW_STATS_ENTRY(ipInMcastPkts);
-IW_STATS_ENTRY(ipOutMcastPkts);
-IW_STATS_ENTRY(ipInBcastPkts);
-IW_STATS_ENTRY(ipOutBcastPkts);
-IW_STATS_ENTRY(tcpRtoAlgorithm);
-IW_STATS_ENTRY(tcpRtoMin);
-IW_STATS_ENTRY(tcpRtoMax);
-IW_STATS_ENTRY(tcpMaxConn);
-IW_STATS_ENTRY(tcpActiveOpens);
-IW_STATS_ENTRY(tcpPassiveOpens);
-IW_STATS_ENTRY(tcpAttemptFails);
-IW_STATS_ENTRY(tcpEstabResets);
-IW_STATS_ENTRY(tcpCurrEstab);
-IW_STATS_ENTRY(tcpInSegs);
-IW_STATS_ENTRY(tcpOutSegs);
-IW_STATS_ENTRY(tcpRetransSegs);
-IW_STATS_ENTRY(tcpInErrs);
-IW_STATS_ENTRY(tcpOutRsts);
-
-static struct attribute *iw_proto_stats_attrs[] = {
- &dev_attr_ipInReceives.attr,
- &dev_attr_ipInHdrErrors.attr,
- &dev_attr_ipInTooBigErrors.attr,
- &dev_attr_ipInNoRoutes.attr,
- &dev_attr_ipInAddrErrors.attr,
- &dev_attr_ipInUnknownProtos.attr,
- &dev_attr_ipInTruncatedPkts.attr,
- &dev_attr_ipInDiscards.attr,
- &dev_attr_ipInDelivers.attr,
- &dev_attr_ipOutForwDatagrams.attr,
- &dev_attr_ipOutRequests.attr,
- &dev_attr_ipOutDiscards.attr,
- &dev_attr_ipOutNoRoutes.attr,
- &dev_attr_ipReasmTimeout.attr,
- &dev_attr_ipReasmReqds.attr,
- &dev_attr_ipReasmOKs.attr,
- &dev_attr_ipReasmFails.attr,
- &dev_attr_ipFragOKs.attr,
- &dev_attr_ipFragFails.attr,
- &dev_attr_ipFragCreates.attr,
- &dev_attr_ipInMcastPkts.attr,
- &dev_attr_ipOutMcastPkts.attr,
- &dev_attr_ipInBcastPkts.attr,
- &dev_attr_ipOutBcastPkts.attr,
- &dev_attr_tcpRtoAlgorithm.attr,
- &dev_attr_tcpRtoMin.attr,
- &dev_attr_tcpRtoMax.attr,
- &dev_attr_tcpMaxConn.attr,
- &dev_attr_tcpActiveOpens.attr,
- &dev_attr_tcpPassiveOpens.attr,
- &dev_attr_tcpAttemptFails.attr,
- &dev_attr_tcpEstabResets.attr,
- &dev_attr_tcpCurrEstab.attr,
- &dev_attr_tcpInSegs.attr,
- &dev_attr_tcpOutSegs.attr,
- &dev_attr_tcpRetransSegs.attr,
- &dev_attr_tcpInErrs.attr,
- &dev_attr_tcpOutRsts.attr,
- NULL
+ return sysfs_emit(buf, "%s\n", version);
+}
+static DEVICE_ATTR_RO(fw_ver);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_node_type.attr,
+ &dev_attr_node_guid.attr,
+ &dev_attr_sys_image_guid.attr,
+ &dev_attr_fw_ver.attr,
+ &dev_attr_node_desc.attr,
+ NULL,
};
-static struct attribute_group iw_stats_group = {
- .name = "proto_stats",
- .attrs = iw_proto_stats_attrs,
+const struct attribute_group ib_dev_attr_group = {
+ .attrs = ib_dev_attrs,
};
-static void free_port_list_attributes(struct ib_device *device)
+void ib_free_port_attrs(struct ib_core_device *coredev)
{
struct kobject *p, *t;
- list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ list_for_each_entry_safe(p, t, &coredev->port_list, entry) {
struct ib_port *port = container_of(p, struct ib_port, kobj);
- list_del(&p->entry);
- sysfs_remove_group(p, &pma_group);
- sysfs_remove_group(p, &port->pkey_group);
- sysfs_remove_group(p, &port->gid_group);
- kobject_put(p);
+
+ destroy_gid_attrs(port);
+ destroy_port(coredev, port);
}
- kobject_put(device->ports_parent);
+ kobject_put(coredev->ports_kobj);
}
-int ib_device_register_sysfs(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+int ib_setup_port_attrs(struct ib_core_device *coredev)
{
- struct device *class_dev = &device->dev;
+ struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
+ u32 port_num;
int ret;
- int i;
- device->dev.parent = device->dma_device;
- ret = dev_set_name(class_dev, "%s", device->name);
- if (ret)
- return ret;
+ coredev->ports_kobj = kobject_create_and_add("ports",
+ &coredev->dev.kobj);
+ if (!coredev->ports_kobj)
+ return -ENOMEM;
- ret = device_add(class_dev);
- if (ret)
- goto err;
+ rdma_for_each_port (device, port_num) {
+ struct ib_port_attr attr;
+ struct ib_port *port;
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
- ret = device_create_file(class_dev, ib_class_attributes[i]);
+ ret = ib_query_port(device, port_num, &attr);
if (ret)
- goto err_unregister;
- }
-
- device->ports_parent = kobject_create_and_add("ports",
- &class_dev->kobj);
- if (!device->ports_parent) {
- ret = -ENOMEM;
- goto err_put;
- }
+ goto err_put;
- if (rdma_cap_ib_switch(device)) {
- ret = add_port(device, 0, port_callback);
- if (ret)
+ port = setup_port(coredev, port_num, &attr);
+ if (IS_ERR(port)) {
+ ret = PTR_ERR(port);
goto err_put;
- } else {
- for (i = 1; i <= device->phys_port_cnt; ++i) {
- ret = add_port(device, i, port_callback);
- if (ret)
- goto err_put;
}
- }
- if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) {
- ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group);
+ ret = setup_gid_attrs(port, &attr);
if (ret)
goto err_put;
}
-
return 0;
err_put:
- free_port_list_attributes(device);
-
-err_unregister:
- device_unregister(class_dev);
-
-err:
+ ib_free_port_attrs(coredev);
return ret;
}
-void ib_device_unregister_sysfs(struct ib_device *device)
+/**
+ * ib_port_register_client_groups - Add an ib_client's attributes to the port
+ *
+ * @ibdev: IB device to add counters
+ * @port_num: valid port number
+ * @groups: Group list of attributes
+ *
+ * Do not use. Only for legacy sysfs compatibility.
+ */
+int ib_port_register_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups)
{
- /* Hold kobject until ib_dealloc_device() */
- struct kobject *kobj_dev = kobject_get(&device->dev.kobj);
- int i;
-
- if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats)
- sysfs_remove_group(kobj_dev, &iw_stats_group);
-
- free_port_list_attributes(device);
-
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
- device_remove_file(&device->dev, ib_class_attributes[i]);
+ return sysfs_create_groups(&ibdev->port_data[port_num].sysfs->kobj,
+ groups);
+}
+EXPORT_SYMBOL(ib_port_register_client_groups);
- device_unregister(&device->dev);
+void ib_port_unregister_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups)
+{
+ return sysfs_remove_groups(&ibdev->port_data[port_num].sysfs->kobj,
+ groups);
}
+EXPORT_SYMBOL(ib_port_unregister_client_groups);
diff --git a/drivers/infiniband/core/trace.c b/drivers/infiniband/core/trace.c
new file mode 100644
index 000000000000..31e7860d35bf
--- /dev/null
+++ b/drivers/infiniband/core/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for core RDMA functions.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define CREATE_TRACE_POINTS
+
+#include <trace/events/rdma_core.h>
diff --git a/drivers/infiniband/core/ucaps.c b/drivers/infiniband/core/ucaps.c
new file mode 100644
index 000000000000..de5cb8bf0a61
--- /dev/null
+++ b/drivers/infiniband/core/ucaps.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/kref.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <rdma/ib_ucaps.h>
+
+#define RDMA_UCAP_FIRST RDMA_UCAP_MLX5_CTRL_LOCAL
+
+static DEFINE_MUTEX(ucaps_mutex);
+static struct ib_ucap *ucaps_list[RDMA_UCAP_MAX];
+static bool ucaps_class_is_registered;
+static dev_t ucaps_base_dev;
+
+struct ib_ucap {
+ struct cdev cdev;
+ struct device dev;
+ struct kref ref;
+};
+
+static const char *ucap_names[RDMA_UCAP_MAX] = {
+ [RDMA_UCAP_MLX5_CTRL_LOCAL] = "mlx5_perm_ctrl_local",
+ [RDMA_UCAP_MLX5_CTRL_OTHER_VHCA] = "mlx5_perm_ctrl_other_vhca"
+};
+
+static char *ucaps_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0600;
+
+ return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
+
+static const struct class ucaps_class = {
+ .name = "infiniband_ucaps",
+ .devnode = ucaps_devnode,
+};
+
+static const struct file_operations ucaps_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+};
+
+/**
+ * ib_cleanup_ucaps - cleanup all API resources and class.
+ *
+ * This is called once, when removing the ib_uverbs module.
+ */
+void ib_cleanup_ucaps(void)
+{
+ mutex_lock(&ucaps_mutex);
+ if (!ucaps_class_is_registered) {
+ mutex_unlock(&ucaps_mutex);
+ return;
+ }
+
+ for (int i = RDMA_UCAP_FIRST; i < RDMA_UCAP_MAX; i++)
+ WARN_ON(ucaps_list[i]);
+
+ class_unregister(&ucaps_class);
+ ucaps_class_is_registered = false;
+ unregister_chrdev_region(ucaps_base_dev, RDMA_UCAP_MAX);
+ mutex_unlock(&ucaps_mutex);
+}
+
+static int get_ucap_from_devt(dev_t devt, u64 *idx_mask)
+{
+ for (int type = RDMA_UCAP_FIRST; type < RDMA_UCAP_MAX; type++) {
+ if (ucaps_list[type] && ucaps_list[type]->dev.devt == devt) {
+ *idx_mask |= 1 << type;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int get_devt_from_fd(unsigned int fd, dev_t *ret_dev)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return -EBADF;
+
+ *ret_dev = file_inode(file)->i_rdev;
+ fput(file);
+ return 0;
+}
+
+/**
+ * ib_ucaps_init - Initialization required before ucap creation.
+ *
+ * Return: 0 on success, or a negative errno value on failure
+ */
+static int ib_ucaps_init(void)
+{
+ int ret = 0;
+
+ if (ucaps_class_is_registered)
+ return ret;
+
+ ret = class_register(&ucaps_class);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&ucaps_base_dev, 0, RDMA_UCAP_MAX,
+ ucaps_class.name);
+ if (ret < 0) {
+ class_unregister(&ucaps_class);
+ return ret;
+ }
+
+ ucaps_class_is_registered = true;
+
+ return 0;
+}
+
+static void ucap_dev_release(struct device *device)
+{
+ struct ib_ucap *ucap = container_of(device, struct ib_ucap, dev);
+
+ kfree(ucap);
+}
+
+/**
+ * ib_create_ucap - Add a ucap character device
+ * @type: UCAP type
+ *
+ * Creates a ucap character device in the /dev/infiniband directory. By default,
+ * the device has root-only read-write access.
+ *
+ * A driver may call this multiple times with the same UCAP type. A reference
+ * count tracks creations and deletions.
+ *
+ * Return: 0 on success, or a negative errno value on failure
+ */
+int ib_create_ucap(enum rdma_user_cap type)
+{
+ struct ib_ucap *ucap;
+ int ret;
+
+ if (type >= RDMA_UCAP_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ucaps_mutex);
+ ret = ib_ucaps_init();
+ if (ret)
+ goto unlock;
+
+ ucap = ucaps_list[type];
+ if (ucap) {
+ kref_get(&ucap->ref);
+ mutex_unlock(&ucaps_mutex);
+ return 0;
+ }
+
+ ucap = kzalloc(sizeof(*ucap), GFP_KERNEL);
+ if (!ucap) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ device_initialize(&ucap->dev);
+ ucap->dev.class = &ucaps_class;
+ ucap->dev.devt = MKDEV(MAJOR(ucaps_base_dev), type);
+ ucap->dev.release = ucap_dev_release;
+ ret = dev_set_name(&ucap->dev, "%s", ucap_names[type]);
+ if (ret)
+ goto err_device;
+
+ cdev_init(&ucap->cdev, &ucaps_cdev_fops);
+ ucap->cdev.owner = THIS_MODULE;
+
+ ret = cdev_device_add(&ucap->cdev, &ucap->dev);
+ if (ret)
+ goto err_device;
+
+ kref_init(&ucap->ref);
+ ucaps_list[type] = ucap;
+ mutex_unlock(&ucaps_mutex);
+
+ return 0;
+
+err_device:
+ put_device(&ucap->dev);
+unlock:
+ mutex_unlock(&ucaps_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(ib_create_ucap);
+
+static void ib_release_ucap(struct kref *ref)
+{
+ struct ib_ucap *ucap = container_of(ref, struct ib_ucap, ref);
+ enum rdma_user_cap type;
+
+ for (type = RDMA_UCAP_FIRST; type < RDMA_UCAP_MAX; type++) {
+ if (ucaps_list[type] == ucap)
+ break;
+ }
+ WARN_ON(type == RDMA_UCAP_MAX);
+
+ ucaps_list[type] = NULL;
+ cdev_device_del(&ucap->cdev, &ucap->dev);
+ put_device(&ucap->dev);
+}
+
+/**
+ * ib_remove_ucap - Remove a ucap character device
+ * @type: User cap type
+ *
+ * Removes the ucap character device according to type. The device is completely
+ * removed from the filesystem when its reference count reaches 0.
+ */
+void ib_remove_ucap(enum rdma_user_cap type)
+{
+ struct ib_ucap *ucap;
+
+ mutex_lock(&ucaps_mutex);
+ ucap = ucaps_list[type];
+ if (WARN_ON(!ucap))
+ goto end;
+
+ kref_put(&ucap->ref, ib_release_ucap);
+end:
+ mutex_unlock(&ucaps_mutex);
+}
+EXPORT_SYMBOL(ib_remove_ucap);
+
+/**
+ * ib_get_ucaps - Get bitmask of ucap types from file descriptors
+ * @fds: Array of file descriptors
+ * @fd_count: Number of file descriptors in the array
+ * @idx_mask: Bitmask to be updated based on the ucaps in the fd list
+ *
+ * Given an array of file descriptors, this function returns a bitmask of
+ * the ucaps where a bit is set if an FD for that ucap type was in the array.
+ *
+ * Return: 0 on success, or a negative errno value on failure
+ */
+int ib_get_ucaps(int *fds, int fd_count, uint64_t *idx_mask)
+{
+ int ret = 0;
+ dev_t dev;
+
+ *idx_mask = 0;
+ mutex_lock(&ucaps_mutex);
+ for (int i = 0; i < fd_count; i++) {
+ ret = get_devt_from_fd(fds[i], &dev);
+ if (ret)
+ goto end;
+
+ ret = get_ucap_from_devt(dev, idx_mask);
+ if (ret)
+ goto end;
+ }
+
+end:
+ mutex_unlock(&ucaps_mutex);
+ return ret;
+}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
deleted file mode 100644
index 6b4e8a008bc0..000000000000
--- a/drivers/infiniband/core/ucm.c
+++ /dev/null
@@ -1,1368 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/completion.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/file.h>
-#include <linux/mount.h>
-#include <linux/cdev.h>
-#include <linux/idr.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-
-#include <rdma/ib_cm.h>
-#include <rdma/ib_user_cm.h>
-#include <rdma/ib_marshall.h>
-
-MODULE_AUTHOR("Libor Michalek");
-MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
-MODULE_LICENSE("Dual BSD/GPL");
-
-struct ib_ucm_device {
- int devnum;
- struct cdev cdev;
- struct device dev;
- struct ib_device *ib_dev;
-};
-
-struct ib_ucm_file {
- struct mutex file_mutex;
- struct file *filp;
- struct ib_ucm_device *device;
-
- struct list_head ctxs;
- struct list_head events;
- wait_queue_head_t poll_wait;
-};
-
-struct ib_ucm_context {
- int id;
- struct completion comp;
- atomic_t ref;
- int events_reported;
-
- struct ib_ucm_file *file;
- struct ib_cm_id *cm_id;
- __u64 uid;
-
- struct list_head events; /* list of pending events. */
- struct list_head file_list; /* member in file ctx list */
-};
-
-struct ib_ucm_event {
- struct ib_ucm_context *ctx;
- struct list_head file_list; /* member in file event list */
- struct list_head ctx_list; /* member in ctx event list */
-
- struct ib_cm_id *cm_id;
- struct ib_ucm_event_resp resp;
- void *data;
- void *info;
- int data_len;
- int info_len;
-};
-
-enum {
- IB_UCM_MAJOR = 231,
- IB_UCM_BASE_MINOR = 224,
- IB_UCM_MAX_DEVICES = 32
-};
-
-#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
-
-static void ib_ucm_add_one(struct ib_device *device);
-static void ib_ucm_remove_one(struct ib_device *device, void *client_data);
-
-static struct ib_client ucm_client = {
- .name = "ucm",
- .add = ib_ucm_add_one,
- .remove = ib_ucm_remove_one
-};
-
-static DEFINE_MUTEX(ctx_id_mutex);
-static DEFINE_IDR(ctx_id_table);
-static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
-
-static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
-{
- struct ib_ucm_context *ctx;
-
- mutex_lock(&ctx_id_mutex);
- ctx = idr_find(&ctx_id_table, id);
- if (!ctx)
- ctx = ERR_PTR(-ENOENT);
- else if (ctx->file != file)
- ctx = ERR_PTR(-EINVAL);
- else
- atomic_inc(&ctx->ref);
- mutex_unlock(&ctx_id_mutex);
-
- return ctx;
-}
-
-static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
-{
- if (atomic_dec_and_test(&ctx->ref))
- complete(&ctx->comp);
-}
-
-static inline int ib_ucm_new_cm_id(int event)
-{
- return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED;
-}
-
-static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
-{
- struct ib_ucm_event *uevent;
-
- mutex_lock(&ctx->file->file_mutex);
- list_del(&ctx->file_list);
- while (!list_empty(&ctx->events)) {
-
- uevent = list_entry(ctx->events.next,
- struct ib_ucm_event, ctx_list);
- list_del(&uevent->file_list);
- list_del(&uevent->ctx_list);
- mutex_unlock(&ctx->file->file_mutex);
-
- /* clear incoming connections. */
- if (ib_ucm_new_cm_id(uevent->resp.event))
- ib_destroy_cm_id(uevent->cm_id);
-
- kfree(uevent);
- mutex_lock(&ctx->file->file_mutex);
- }
- mutex_unlock(&ctx->file->file_mutex);
-}
-
-static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
-{
- struct ib_ucm_context *ctx;
-
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx)
- return NULL;
-
- atomic_set(&ctx->ref, 1);
- init_completion(&ctx->comp);
- ctx->file = file;
- INIT_LIST_HEAD(&ctx->events);
-
- mutex_lock(&ctx_id_mutex);
- ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
- mutex_unlock(&ctx_id_mutex);
- if (ctx->id < 0)
- goto error;
-
- list_add_tail(&ctx->file_list, &file->ctxs);
- return ctx;
-
-error:
- kfree(ctx);
- return NULL;
-}
-
-static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
- struct ib_cm_req_event_param *kreq)
-{
- ureq->remote_ca_guid = kreq->remote_ca_guid;
- ureq->remote_qkey = kreq->remote_qkey;
- ureq->remote_qpn = kreq->remote_qpn;
- ureq->qp_type = kreq->qp_type;
- ureq->starting_psn = kreq->starting_psn;
- ureq->responder_resources = kreq->responder_resources;
- ureq->initiator_depth = kreq->initiator_depth;
- ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
- ureq->flow_control = kreq->flow_control;
- ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
- ureq->retry_count = kreq->retry_count;
- ureq->rnr_retry_count = kreq->rnr_retry_count;
- ureq->srq = kreq->srq;
- ureq->port = kreq->port;
-
- ib_copy_path_rec_to_user(&ureq->primary_path, kreq->primary_path);
- if (kreq->alternate_path)
- ib_copy_path_rec_to_user(&ureq->alternate_path,
- kreq->alternate_path);
-}
-
-static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
- struct ib_cm_rep_event_param *krep)
-{
- urep->remote_ca_guid = krep->remote_ca_guid;
- urep->remote_qkey = krep->remote_qkey;
- urep->remote_qpn = krep->remote_qpn;
- urep->starting_psn = krep->starting_psn;
- urep->responder_resources = krep->responder_resources;
- urep->initiator_depth = krep->initiator_depth;
- urep->target_ack_delay = krep->target_ack_delay;
- urep->failover_accepted = krep->failover_accepted;
- urep->flow_control = krep->flow_control;
- urep->rnr_retry_count = krep->rnr_retry_count;
- urep->srq = krep->srq;
-}
-
-static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
- struct ib_cm_sidr_rep_event_param *krep)
-{
- urep->status = krep->status;
- urep->qkey = krep->qkey;
- urep->qpn = krep->qpn;
-};
-
-static int ib_ucm_event_process(struct ib_cm_event *evt,
- struct ib_ucm_event *uvt)
-{
- void *info = NULL;
-
- switch (evt->event) {
- case IB_CM_REQ_RECEIVED:
- ib_ucm_event_req_get(&uvt->resp.u.req_resp,
- &evt->param.req_rcvd);
- uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
- uvt->resp.present = IB_UCM_PRES_PRIMARY;
- uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
- IB_UCM_PRES_ALTERNATE : 0);
- break;
- case IB_CM_REP_RECEIVED:
- ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
- &evt->param.rep_rcvd);
- uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
- break;
- case IB_CM_RTU_RECEIVED:
- uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
- uvt->resp.u.send_status = evt->param.send_status;
- break;
- case IB_CM_DREQ_RECEIVED:
- uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
- uvt->resp.u.send_status = evt->param.send_status;
- break;
- case IB_CM_DREP_RECEIVED:
- uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
- uvt->resp.u.send_status = evt->param.send_status;
- break;
- case IB_CM_MRA_RECEIVED:
- uvt->resp.u.mra_resp.timeout =
- evt->param.mra_rcvd.service_timeout;
- uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
- break;
- case IB_CM_REJ_RECEIVED:
- uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason;
- uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
- uvt->info_len = evt->param.rej_rcvd.ari_length;
- info = evt->param.rej_rcvd.ari;
- break;
- case IB_CM_LAP_RECEIVED:
- ib_copy_path_rec_to_user(&uvt->resp.u.lap_resp.path,
- evt->param.lap_rcvd.alternate_path);
- uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
- uvt->resp.present = IB_UCM_PRES_ALTERNATE;
- break;
- case IB_CM_APR_RECEIVED:
- uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status;
- uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
- uvt->info_len = evt->param.apr_rcvd.info_len;
- info = evt->param.apr_rcvd.apr_info;
- break;
- case IB_CM_SIDR_REQ_RECEIVED:
- uvt->resp.u.sidr_req_resp.pkey =
- evt->param.sidr_req_rcvd.pkey;
- uvt->resp.u.sidr_req_resp.port =
- evt->param.sidr_req_rcvd.port;
- uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
- break;
- case IB_CM_SIDR_REP_RECEIVED:
- ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
- &evt->param.sidr_rep_rcvd);
- uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
- uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
- info = evt->param.sidr_rep_rcvd.info;
- break;
- default:
- uvt->resp.u.send_status = evt->param.send_status;
- break;
- }
-
- if (uvt->data_len) {
- uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL);
- if (!uvt->data)
- goto err1;
-
- uvt->resp.present |= IB_UCM_PRES_DATA;
- }
-
- if (uvt->info_len) {
- uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL);
- if (!uvt->info)
- goto err2;
-
- uvt->resp.present |= IB_UCM_PRES_INFO;
- }
- return 0;
-
-err2:
- kfree(uvt->data);
-err1:
- return -ENOMEM;
-}
-
-static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
-{
- struct ib_ucm_event *uevent;
- struct ib_ucm_context *ctx;
- int result = 0;
-
- ctx = cm_id->context;
-
- uevent = kzalloc(sizeof *uevent, GFP_KERNEL);
- if (!uevent)
- goto err1;
-
- uevent->ctx = ctx;
- uevent->cm_id = cm_id;
- uevent->resp.uid = ctx->uid;
- uevent->resp.id = ctx->id;
- uevent->resp.event = event->event;
-
- result = ib_ucm_event_process(event, uevent);
- if (result)
- goto err2;
-
- mutex_lock(&ctx->file->file_mutex);
- list_add_tail(&uevent->file_list, &ctx->file->events);
- list_add_tail(&uevent->ctx_list, &ctx->events);
- wake_up_interruptible(&ctx->file->poll_wait);
- mutex_unlock(&ctx->file->file_mutex);
- return 0;
-
-err2:
- kfree(uevent);
-err1:
- /* Destroy new cm_id's */
- return ib_ucm_new_cm_id(event->event);
-}
-
-static ssize_t ib_ucm_event(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_context *ctx;
- struct ib_ucm_event_get cmd;
- struct ib_ucm_event *uevent;
- int result = 0;
-
- if (out_len < sizeof(struct ib_ucm_event_resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- mutex_lock(&file->file_mutex);
- while (list_empty(&file->events)) {
- mutex_unlock(&file->file_mutex);
-
- if (file->filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- if (wait_event_interruptible(file->poll_wait,
- !list_empty(&file->events)))
- return -ERESTARTSYS;
-
- mutex_lock(&file->file_mutex);
- }
-
- uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
-
- if (ib_ucm_new_cm_id(uevent->resp.event)) {
- ctx = ib_ucm_ctx_alloc(file);
- if (!ctx) {
- result = -ENOMEM;
- goto done;
- }
-
- ctx->cm_id = uevent->cm_id;
- ctx->cm_id->context = ctx;
- uevent->resp.id = ctx->id;
- }
-
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &uevent->resp, sizeof(uevent->resp))) {
- result = -EFAULT;
- goto done;
- }
-
- if (uevent->data) {
- if (cmd.data_len < uevent->data_len) {
- result = -ENOMEM;
- goto done;
- }
- if (copy_to_user((void __user *)(unsigned long)cmd.data,
- uevent->data, uevent->data_len)) {
- result = -EFAULT;
- goto done;
- }
- }
-
- if (uevent->info) {
- if (cmd.info_len < uevent->info_len) {
- result = -ENOMEM;
- goto done;
- }
- if (copy_to_user((void __user *)(unsigned long)cmd.info,
- uevent->info, uevent->info_len)) {
- result = -EFAULT;
- goto done;
- }
- }
-
- list_del(&uevent->file_list);
- list_del(&uevent->ctx_list);
- uevent->ctx->events_reported++;
-
- kfree(uevent->data);
- kfree(uevent->info);
- kfree(uevent);
-done:
- mutex_unlock(&file->file_mutex);
- return result;
-}
-
-static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_create_id cmd;
- struct ib_ucm_create_id_resp resp;
- struct ib_ucm_context *ctx;
- int result;
-
- if (out_len < sizeof(resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- mutex_lock(&file->file_mutex);
- ctx = ib_ucm_ctx_alloc(file);
- mutex_unlock(&file->file_mutex);
- if (!ctx)
- return -ENOMEM;
-
- ctx->uid = cmd.uid;
- ctx->cm_id = ib_create_cm_id(file->device->ib_dev,
- ib_ucm_event_handler, ctx);
- if (IS_ERR(ctx->cm_id)) {
- result = PTR_ERR(ctx->cm_id);
- goto err1;
- }
-
- resp.id = ctx->id;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp))) {
- result = -EFAULT;
- goto err2;
- }
- return 0;
-
-err2:
- ib_destroy_cm_id(ctx->cm_id);
-err1:
- mutex_lock(&ctx_id_mutex);
- idr_remove(&ctx_id_table, ctx->id);
- mutex_unlock(&ctx_id_mutex);
- kfree(ctx);
- return result;
-}
-
-static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_destroy_id cmd;
- struct ib_ucm_destroy_id_resp resp;
- struct ib_ucm_context *ctx;
- int result = 0;
-
- if (out_len < sizeof(resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- mutex_lock(&ctx_id_mutex);
- ctx = idr_find(&ctx_id_table, cmd.id);
- if (!ctx)
- ctx = ERR_PTR(-ENOENT);
- else if (ctx->file != file)
- ctx = ERR_PTR(-EINVAL);
- else
- idr_remove(&ctx_id_table, ctx->id);
- mutex_unlock(&ctx_id_mutex);
-
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- ib_ucm_ctx_put(ctx);
- wait_for_completion(&ctx->comp);
-
- /* No new events will be generated after destroying the cm_id. */
- ib_destroy_cm_id(ctx->cm_id);
- /* Cleanup events not yet reported to the user. */
- ib_ucm_cleanup_events(ctx);
-
- resp.events_reported = ctx->events_reported;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
- result = -EFAULT;
-
- kfree(ctx);
- return result;
-}
-
-static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_attr_id_resp resp;
- struct ib_ucm_attr_id cmd;
- struct ib_ucm_context *ctx;
- int result = 0;
-
- if (out_len < sizeof(resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- resp.service_id = ctx->cm_id->service_id;
- resp.service_mask = ctx->cm_id->service_mask;
- resp.local_id = ctx->cm_id->local_id;
- resp.remote_id = ctx->cm_id->remote_id;
-
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
- result = -EFAULT;
-
- ib_ucm_ctx_put(ctx);
- return result;
-}
-
-static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_uverbs_qp_attr resp;
- struct ib_ucm_init_qp_attr cmd;
- struct ib_ucm_context *ctx;
- struct ib_qp_attr qp_attr;
- int result = 0;
-
- if (out_len < sizeof(resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- resp.qp_attr_mask = 0;
- memset(&qp_attr, 0, sizeof qp_attr);
- qp_attr.qp_state = cmd.qp_state;
- result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
- if (result)
- goto out;
-
- ib_copy_qp_attr_to_user(&resp, &qp_attr);
-
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
- result = -EFAULT;
-
-out:
- ib_ucm_ctx_put(ctx);
- return result;
-}
-
-static int ucm_validate_listen(__be64 service_id, __be64 service_mask)
-{
- service_id &= service_mask;
-
- if (((service_id & IB_CMA_SERVICE_ID_MASK) == IB_CMA_SERVICE_ID) ||
- ((service_id & IB_SDP_SERVICE_ID_MASK) == IB_SDP_SERVICE_ID))
- return -EINVAL;
-
- return 0;
-}
-
-static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_listen cmd;
- struct ib_ucm_context *ctx;
- int result;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- result = ucm_validate_listen(cmd.service_id, cmd.service_mask);
- if (result)
- goto out;
-
- result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask);
-out:
- ib_ucm_ctx_put(ctx);
- return result;
-}
-
-static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_notify cmd;
- struct ib_ucm_context *ctx;
- int result;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
- ib_ucm_ctx_put(ctx);
- return result;
-}
-
-static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
-{
- void *data;
-
- *dest = NULL;
-
- if (!len)
- return 0;
-
- data = memdup_user((void __user *)(unsigned long)src, len);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- *dest = data;
- return 0;
-}
-
-static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
-{
- struct ib_user_path_rec upath;
- struct ib_sa_path_rec *sa_path;
-
- *path = NULL;
-
- if (!src)
- return 0;
-
- sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL);
- if (!sa_path)
- return -ENOMEM;
-
- if (copy_from_user(&upath, (void __user *)(unsigned long)src,
- sizeof(upath))) {
-
- kfree(sa_path);
- return -EFAULT;
- }
-
- ib_copy_path_rec_from_user(sa_path, &upath);
- *path = sa_path;
- return 0;
-}
-
-static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_cm_req_param param;
- struct ib_ucm_context *ctx;
- struct ib_ucm_req cmd;
- int result;
-
- param.private_data = NULL;
- param.primary_path = NULL;
- param.alternate_path = NULL;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
- if (result)
- goto done;
-
- result = ib_ucm_path_get(&param.primary_path, cmd.primary_path);
- if (result)
- goto done;
-
- result = ib_ucm_path_get(&param.alternate_path, cmd.alternate_path);
- if (result)
- goto done;
-
- param.private_data_len = cmd.len;
- param.service_id = cmd.sid;
- param.qp_num = cmd.qpn;
- param.qp_type = cmd.qp_type;
- param.starting_psn = cmd.psn;
- param.peer_to_peer = cmd.peer_to_peer;
- param.responder_resources = cmd.responder_resources;
- param.initiator_depth = cmd.initiator_depth;
- param.remote_cm_response_timeout = cmd.remote_cm_response_timeout;
- param.flow_control = cmd.flow_control;
- param.local_cm_response_timeout = cmd.local_cm_response_timeout;
- param.retry_count = cmd.retry_count;
- param.rnr_retry_count = cmd.rnr_retry_count;
- param.max_cm_retries = cmd.max_cm_retries;
- param.srq = cmd.srq;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- result = ib_send_cm_req(ctx->cm_id, &param);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
-done:
- kfree(param.private_data);
- kfree(param.primary_path);
- kfree(param.alternate_path);
- return result;
-}
-
-static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_cm_rep_param param;
- struct ib_ucm_context *ctx;
- struct ib_ucm_rep cmd;
- int result;
-
- param.private_data = NULL;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
- if (result)
- return result;
-
- param.qp_num = cmd.qpn;
- param.starting_psn = cmd.psn;
- param.private_data_len = cmd.len;
- param.responder_resources = cmd.responder_resources;
- param.initiator_depth = cmd.initiator_depth;
- param.failover_accepted = cmd.failover_accepted;
- param.flow_control = cmd.flow_control;
- param.rnr_retry_count = cmd.rnr_retry_count;
- param.srq = cmd.srq;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- ctx->uid = cmd.uid;
- result = ib_send_cm_rep(ctx->cm_id, &param);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
- kfree(param.private_data);
- return result;
-}
-
-static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
- const char __user *inbuf, int in_len,
- int (*func)(struct ib_cm_id *cm_id,
- const void *private_data,
- u8 private_data_len))
-{
- struct ib_ucm_private_data cmd;
- struct ib_ucm_context *ctx;
- const void *private_data = NULL;
- int result;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len);
- if (result)
- return result;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- result = func(ctx->cm_id, private_data, cmd.len);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
- kfree(private_data);
- return result;
-}
-
-static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu);
-}
-
-static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq);
-}
-
-static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep);
-}
-
-static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
- const char __user *inbuf, int in_len,
- int (*func)(struct ib_cm_id *cm_id,
- int status,
- const void *info,
- u8 info_len,
- const void *data,
- u8 data_len))
-{
- struct ib_ucm_context *ctx;
- struct ib_ucm_info cmd;
- const void *data = NULL;
- const void *info = NULL;
- int result;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len);
- if (result)
- goto done;
-
- result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len);
- if (result)
- goto done;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- result = func(ctx->cm_id, cmd.status, info, cmd.info_len,
- data, cmd.data_len);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
-done:
- kfree(data);
- kfree(info);
- return result;
-}
-
-static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
-}
-
-static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
-}
-
-static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_context *ctx;
- struct ib_ucm_mra cmd;
- const void *data = NULL;
- int result;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
- if (result)
- return result;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
- kfree(data);
- return result;
-}
-
-static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_ucm_context *ctx;
- struct ib_sa_path_rec *path = NULL;
- struct ib_ucm_lap cmd;
- const void *data = NULL;
- int result;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
- if (result)
- goto done;
-
- result = ib_ucm_path_get(&path, cmd.path);
- if (result)
- goto done;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
-done:
- kfree(data);
- kfree(path);
- return result;
-}
-
-static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_cm_sidr_req_param param;
- struct ib_ucm_context *ctx;
- struct ib_ucm_sidr_req cmd;
- int result;
-
- param.private_data = NULL;
- param.path = NULL;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
- if (result)
- goto done;
-
- result = ib_ucm_path_get(&param.path, cmd.path);
- if (result)
- goto done;
-
- param.private_data_len = cmd.len;
- param.service_id = cmd.sid;
- param.timeout_ms = cmd.timeout;
- param.max_cm_retries = cmd.max_cm_retries;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- result = ib_send_cm_sidr_req(ctx->cm_id, &param);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
-done:
- kfree(param.private_data);
- kfree(param.path);
- return result;
-}
-
-static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
-{
- struct ib_cm_sidr_rep_param param;
- struct ib_ucm_sidr_rep cmd;
- struct ib_ucm_context *ctx;
- int result;
-
- param.info = NULL;
-
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
- return -EFAULT;
-
- result = ib_ucm_alloc_data(&param.private_data,
- cmd.data, cmd.data_len);
- if (result)
- goto done;
-
- result = ib_ucm_alloc_data(&param.info, cmd.info, cmd.info_len);
- if (result)
- goto done;
-
- param.qp_num = cmd.qpn;
- param.qkey = cmd.qkey;
- param.status = cmd.status;
- param.info_length = cmd.info_len;
- param.private_data_len = cmd.data_len;
-
- ctx = ib_ucm_ctx_get(file, cmd.id);
- if (!IS_ERR(ctx)) {
- result = ib_send_cm_sidr_rep(ctx->cm_id, &param);
- ib_ucm_ctx_put(ctx);
- } else
- result = PTR_ERR(ctx);
-
-done:
- kfree(param.private_data);
- kfree(param.info);
- return result;
-}
-
-static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len) = {
- [IB_USER_CM_CMD_CREATE_ID] = ib_ucm_create_id,
- [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
- [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
- [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
- [IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify,
- [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
- [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
- [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
- [IB_USER_CM_CMD_SEND_DREQ] = ib_ucm_send_dreq,
- [IB_USER_CM_CMD_SEND_DREP] = ib_ucm_send_drep,
- [IB_USER_CM_CMD_SEND_REJ] = ib_ucm_send_rej,
- [IB_USER_CM_CMD_SEND_MRA] = ib_ucm_send_mra,
- [IB_USER_CM_CMD_SEND_LAP] = ib_ucm_send_lap,
- [IB_USER_CM_CMD_SEND_APR] = ib_ucm_send_apr,
- [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
- [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
- [IB_USER_CM_CMD_EVENT] = ib_ucm_event,
- [IB_USER_CM_CMD_INIT_QP_ATTR] = ib_ucm_init_qp_attr,
-};
-
-static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
- size_t len, loff_t *pos)
-{
- struct ib_ucm_file *file = filp->private_data;
- struct ib_ucm_cmd_hdr hdr;
- ssize_t result;
-
- if (len < sizeof(hdr))
- return -EINVAL;
-
- if (copy_from_user(&hdr, buf, sizeof(hdr)))
- return -EFAULT;
-
- if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
- return -EINVAL;
-
- if (hdr.in + sizeof(hdr) > len)
- return -EINVAL;
-
- result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr),
- hdr.in, hdr.out);
- if (!result)
- result = len;
-
- return result;
-}
-
-static unsigned int ib_ucm_poll(struct file *filp,
- struct poll_table_struct *wait)
-{
- struct ib_ucm_file *file = filp->private_data;
- unsigned int mask = 0;
-
- poll_wait(filp, &file->poll_wait, wait);
-
- if (!list_empty(&file->events))
- mask = POLLIN | POLLRDNORM;
-
- return mask;
-}
-
-/*
- * ib_ucm_open() does not need the BKL:
- *
- * - no global state is referred to;
- * - there is no ioctl method to race against;
- * - no further module initialization is required for open to work
- * after the device is registered.
- */
-static int ib_ucm_open(struct inode *inode, struct file *filp)
-{
- struct ib_ucm_file *file;
-
- file = kmalloc(sizeof(*file), GFP_KERNEL);
- if (!file)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&file->events);
- INIT_LIST_HEAD(&file->ctxs);
- init_waitqueue_head(&file->poll_wait);
-
- mutex_init(&file->file_mutex);
-
- filp->private_data = file;
- file->filp = filp;
- file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
-
- return nonseekable_open(inode, filp);
-}
-
-static int ib_ucm_close(struct inode *inode, struct file *filp)
-{
- struct ib_ucm_file *file = filp->private_data;
- struct ib_ucm_context *ctx;
-
- mutex_lock(&file->file_mutex);
- while (!list_empty(&file->ctxs)) {
- ctx = list_entry(file->ctxs.next,
- struct ib_ucm_context, file_list);
- mutex_unlock(&file->file_mutex);
-
- mutex_lock(&ctx_id_mutex);
- idr_remove(&ctx_id_table, ctx->id);
- mutex_unlock(&ctx_id_mutex);
-
- ib_destroy_cm_id(ctx->cm_id);
- ib_ucm_cleanup_events(ctx);
- kfree(ctx);
-
- mutex_lock(&file->file_mutex);
- }
- mutex_unlock(&file->file_mutex);
- kfree(file);
- return 0;
-}
-
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
-static void ib_ucm_release_dev(struct device *dev)
-{
- struct ib_ucm_device *ucm_dev;
-
- ucm_dev = container_of(dev, struct ib_ucm_device, dev);
- cdev_del(&ucm_dev->cdev);
- if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
- clear_bit(ucm_dev->devnum, dev_map);
- else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
- kfree(ucm_dev);
-}
-
-static const struct file_operations ucm_fops = {
- .owner = THIS_MODULE,
- .open = ib_ucm_open,
- .release = ib_ucm_close,
- .write = ib_ucm_write,
- .poll = ib_ucm_poll,
- .llseek = no_llseek,
-};
-
-static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ib_ucm_device *ucm_dev;
-
- ucm_dev = container_of(dev, struct ib_ucm_device, dev);
- return sprintf(buf, "%s\n", ucm_dev->ib_dev->name);
-}
-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
-
-static dev_t overflow_maj;
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
- "infiniband_cm");
- if (ret) {
- printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
- if (ret >= IB_UCM_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
-static void ib_ucm_add_one(struct ib_device *device)
-{
- int devnum;
- dev_t base;
- struct ib_ucm_device *ucm_dev;
-
- if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1))
- return;
-
- ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
- if (!ucm_dev)
- return;
-
- ucm_dev->ib_dev = device;
-
- devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (devnum >= IB_UCM_MAX_DEVICES) {
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- ucm_dev->devnum = devnum;
- base = devnum + IB_UCM_BASE_DEV;
- set_bit(devnum, dev_map);
- }
-
- cdev_init(&ucm_dev->cdev, &ucm_fops);
- ucm_dev->cdev.owner = THIS_MODULE;
- kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
- if (cdev_add(&ucm_dev->cdev, base, 1))
- goto err;
-
- ucm_dev->dev.class = &cm_class;
- ucm_dev->dev.parent = device->dma_device;
- ucm_dev->dev.devt = ucm_dev->cdev.dev;
- ucm_dev->dev.release = ib_ucm_release_dev;
- dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum);
- if (device_register(&ucm_dev->dev))
- goto err_cdev;
-
- if (device_create_file(&ucm_dev->dev, &dev_attr_ibdev))
- goto err_dev;
-
- ib_set_client_data(device, &ucm_client, ucm_dev);
- return;
-
-err_dev:
- device_unregister(&ucm_dev->dev);
-err_cdev:
- cdev_del(&ucm_dev->cdev);
- if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
-err:
- kfree(ucm_dev);
- return;
-}
-
-static void ib_ucm_remove_one(struct ib_device *device, void *client_data)
-{
- struct ib_ucm_device *ucm_dev = client_data;
-
- if (!ucm_dev)
- return;
-
- device_unregister(&ucm_dev->dev);
-}
-
-static CLASS_ATTR_STRING(abi_version, S_IRUGO,
- __stringify(IB_USER_CM_ABI_VERSION));
-
-static int __init ib_ucm_init(void)
-{
- int ret;
-
- ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
- "infiniband_cm");
- if (ret) {
- printk(KERN_ERR "ucm: couldn't register device number\n");
- goto error1;
- }
-
- ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
- if (ret) {
- printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
- goto error2;
- }
-
- ret = ib_register_client(&ucm_client);
- if (ret) {
- printk(KERN_ERR "ucm: couldn't register client\n");
- goto error3;
- }
- return 0;
-
-error3:
- class_remove_file(&cm_class, &class_attr_abi_version.attr);
-error2:
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
-error1:
- return ret;
-}
-
-static void __exit ib_ucm_cleanup(void)
-{
- ib_unregister_client(&ucm_client);
- class_remove_file(&cm_class, &class_attr_abi_version.attr);
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
- idr_destroy(&ctx_id_table);
-}
-
-module_init(ib_ucm_init);
-module_exit(ib_ucm_cleanup);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index a53fc9b01c69..f86ece701db6 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -42,6 +42,9 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/module.h>
+#include <linux/nsproxy.h>
+
+#include <linux/nospec.h>
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -49,6 +52,9 @@
#include <rdma/rdma_cm_ib.h>
#include <rdma/ib_addr.h>
#include <rdma/ib.h>
+#include <rdma/ib_cm.h>
+#include <rdma/rdma_netlink.h>
+#include "core_priv.h"
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
@@ -63,9 +69,10 @@ static struct ctl_table ucma_ctl_table[] = {
.data = &max_backlog,
.maxlen = sizeof max_backlog,
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
},
- { }
};
struct ucma_file {
@@ -74,60 +81,56 @@ struct ucma_file {
struct list_head ctx_list;
struct list_head event_list;
wait_queue_head_t poll_wait;
- struct workqueue_struct *close_wq;
};
struct ucma_context {
- int id;
+ u32 id;
struct completion comp;
- atomic_t ref;
+ refcount_t ref;
int events_reported;
- int backlog;
+ atomic_t backlog;
struct ucma_file *file;
struct rdma_cm_id *cm_id;
+ struct mutex mutex;
u64 uid;
struct list_head list;
struct list_head mc_list;
- /* mark that device is in process of destroying the internal HW
- * resources, protected by the global mut
- */
- int closing;
- /* sync between removal event and id destroy, protected by file mut */
- int destroying;
struct work_struct close_work;
};
struct ucma_multicast {
struct ucma_context *ctx;
- int id;
+ u32 id;
int events_reported;
u64 uid;
+ u8 join_state;
struct list_head list;
struct sockaddr_storage addr;
};
struct ucma_event {
struct ucma_context *ctx;
+ struct ucma_context *conn_req_ctx;
struct ucma_multicast *mc;
struct list_head list;
- struct rdma_cm_id *cm_id;
struct rdma_ucm_event_resp resp;
- struct work_struct close_work;
};
-static DEFINE_MUTEX(mut);
-static DEFINE_IDR(ctx_idr);
-static DEFINE_IDR(multicast_idr);
+static DEFINE_XARRAY_ALLOC(ctx_table);
+static DEFINE_XARRAY_ALLOC(multicast_table);
+
+static const struct file_operations ucma_fops;
+static int ucma_destroy_private_ctx(struct ucma_context *ctx);
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
{
struct ucma_context *ctx;
- ctx = idr_find(&ctx_idr, id);
+ ctx = xa_load(&ctx_table, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
else if (ctx->file != file)
@@ -139,30 +142,36 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
{
struct ucma_context *ctx;
- mutex_lock(&mut);
+ xa_lock(&ctx_table);
ctx = _ucma_find_context(id, file);
- if (!IS_ERR(ctx)) {
- if (ctx->closing)
- ctx = ERR_PTR(-EIO);
- else
- atomic_inc(&ctx->ref);
- }
- mutex_unlock(&mut);
+ if (!IS_ERR(ctx))
+ if (!refcount_inc_not_zero(&ctx->ref))
+ ctx = ERR_PTR(-ENXIO);
+ xa_unlock(&ctx_table);
return ctx;
}
static void ucma_put_ctx(struct ucma_context *ctx)
{
- if (atomic_dec_and_test(&ctx->ref))
+ if (refcount_dec_and_test(&ctx->ref))
complete(&ctx->comp);
}
-static void ucma_close_event_id(struct work_struct *work)
+/*
+ * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
+ * CM_ID is bound.
+ */
+static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
{
- struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
+ struct ucma_context *ctx = ucma_get_ctx(file, id);
- rdma_destroy_id(uevent_close->cm_id);
- kfree(uevent_close);
+ if (IS_ERR(ctx))
+ return ctx;
+ if (!ctx->cm_id->device) {
+ ucma_put_ctx(ctx);
+ return ERR_PTR(-EINVAL);
+ }
+ return ctx;
}
static void ucma_close_id(struct work_struct *work)
@@ -171,12 +180,15 @@ static void ucma_close_id(struct work_struct *work)
/* once all inflight tasks are finished, we close all underlying
* resources. The context is still alive till its explicit destryoing
- * by its creator.
+ * by its creator. This puts back the xarray's reference.
*/
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
+
+ /* Reading the cm_id without holding a positive ref is not allowed */
+ ctx->cm_id = NULL;
}
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
@@ -188,46 +200,32 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id);
- atomic_set(&ctx->ref, 1);
init_completion(&ctx->comp);
INIT_LIST_HEAD(&ctx->mc_list);
+ /* So list_del() will work if we don't do ucma_finish_ctx() */
+ INIT_LIST_HEAD(&ctx->list);
ctx->file = file;
+ mutex_init(&ctx->mutex);
- mutex_lock(&mut);
- ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
- mutex_unlock(&mut);
- if (ctx->id < 0)
- goto error;
-
- list_add_tail(&ctx->list, &file->ctx_list);
+ if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) {
+ kfree(ctx);
+ return NULL;
+ }
return ctx;
-
-error:
- kfree(ctx);
- return NULL;
}
-static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
+static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
+ struct rdma_cm_id *cm_id)
{
- struct ucma_multicast *mc;
-
- mc = kzalloc(sizeof(*mc), GFP_KERNEL);
- if (!mc)
- return NULL;
-
- mutex_lock(&mut);
- mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
- mutex_unlock(&mut);
- if (mc->id < 0)
- goto error;
-
- mc->ctx = ctx;
- list_add_tail(&mc->list, &ctx->mc_list);
- return mc;
+ refcount_set(&ctx->ref, 1);
+ ctx->cm_id = cm_id;
+}
-error:
- kfree(mc);
- return NULL;
+static void ucma_finish_ctx(struct ucma_context *ctx)
+{
+ lockdep_assert_held(&ctx->file->mut);
+ list_add_tail(&ctx->list, &ctx->file->ctx_list);
+ xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
}
static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
@@ -237,7 +235,7 @@ static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
memcpy(dst->private_data, src->private_data,
src->private_data_len);
dst->private_data_len = src->private_data_len;
- dst->responder_resources =src->responder_resources;
+ dst->responder_resources = src->responder_resources;
dst->initiator_depth = src->initiator_depth;
dst->flow_control = src->flow_control;
dst->retry_count = src->retry_count;
@@ -246,22 +244,28 @@ static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
dst->qp_num = src->qp_num;
}
-static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
+static void ucma_copy_ud_event(struct ib_device *device,
+ struct rdma_ucm_ud_param *dst,
struct rdma_ud_param *src)
{
if (src->private_data_len)
memcpy(dst->private_data, src->private_data,
src->private_data_len);
dst->private_data_len = src->private_data_len;
- ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
+ ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
dst->qp_num = src->qp_num;
dst->qkey = src->qkey;
}
-static void ucma_set_event_context(struct ucma_context *ctx,
- struct rdma_cm_event *event,
- struct ucma_event *uevent)
+static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
+ struct rdma_cm_event *event)
{
+ struct ucma_event *uevent;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent)
+ return NULL;
+
uevent->ctx = ctx;
switch (event->event) {
case RDMA_CM_EVENT_MULTICAST_JOIN:
@@ -276,44 +280,60 @@ static void ucma_set_event_context(struct ucma_context *ctx,
uevent->resp.id = ctx->id;
break;
}
+ uevent->resp.event = event->event;
+ uevent->resp.status = event->status;
+
+ if (event->event == RDMA_CM_EVENT_ADDRINFO_RESOLVED)
+ goto out;
+
+ if (ctx->cm_id->qp_type == IB_QPT_UD)
+ ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
+ &event->param.ud);
+ else
+ ucma_copy_conn_event(&uevent->resp.param.conn,
+ &event->param.conn);
+
+out:
+ uevent->resp.ece.vendor_id = event->ece.vendor_id;
+ uevent->resp.ece.attr_mod = event->ece.attr_mod;
+ return uevent;
}
-/* Called with file->mut locked for the relevant context. */
-static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
+static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
{
- struct ucma_context *ctx = cm_id->context;
- struct ucma_event *con_req_eve;
- int event_found = 0;
+ struct ucma_context *listen_ctx = cm_id->context;
+ struct ucma_context *ctx;
+ struct ucma_event *uevent;
- if (ctx->destroying)
- return;
+ if (!atomic_add_unless(&listen_ctx->backlog, -1, 0))
+ return -ENOMEM;
+ ctx = ucma_alloc_ctx(listen_ctx->file);
+ if (!ctx)
+ goto err_backlog;
+ ucma_set_ctx_cm_id(ctx, cm_id);
- /* only if context is pointing to cm_id that it owns it and can be
- * queued to be closed, otherwise that cm_id is an inflight one that
- * is part of that context event list pending to be detached and
- * reattached to its new context as part of ucma_get_event,
- * handled separately below.
- */
- if (ctx->cm_id == cm_id) {
- mutex_lock(&mut);
- ctx->closing = 1;
- mutex_unlock(&mut);
- queue_work(ctx->file->close_wq, &ctx->close_work);
- return;
- }
+ uevent = ucma_create_uevent(listen_ctx, event);
+ if (!uevent)
+ goto err_alloc;
+ uevent->conn_req_ctx = ctx;
+ uevent->resp.id = ctx->id;
- list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
- if (con_req_eve->cm_id == cm_id &&
- con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
- list_del(&con_req_eve->list);
- INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
- queue_work(ctx->file->close_wq, &con_req_eve->close_work);
- event_found = 1;
- break;
- }
- }
- if (!event_found)
- printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
+ ctx->cm_id->context = ctx;
+
+ mutex_lock(&ctx->file->mut);
+ ucma_finish_ctx(ctx);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
+ return 0;
+
+err_alloc:
+ ucma_destroy_private_ctx(ctx);
+err_backlog:
+ atomic_inc(&listen_ctx->backlog);
+ /* Returning error causes the new ID to be destroyed */
+ return -ENOMEM;
}
static int ucma_event_handler(struct rdma_cm_id *cm_id,
@@ -321,64 +341,49 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
{
struct ucma_event *uevent;
struct ucma_context *ctx = cm_id->context;
- int ret = 0;
- uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
- if (!uevent)
- return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
+ if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST)
+ return ucma_connect_event_handler(cm_id, event);
- mutex_lock(&ctx->file->mut);
- uevent->cm_id = cm_id;
- ucma_set_event_context(ctx, event, uevent);
- uevent->resp.event = event->event;
- uevent->resp.status = event->status;
- if (cm_id->qp_type == IB_QPT_UD)
- ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
- else
- ucma_copy_conn_event(&uevent->resp.param.conn,
- &event->param.conn);
-
- if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
- if (!ctx->backlog) {
- ret = -ENOMEM;
- kfree(uevent);
- goto out;
- }
- ctx->backlog--;
- } else if (!ctx->uid || ctx->cm_id != cm_id) {
- /*
- * We ignore events for new connections until userspace has set
- * their context. This can only happen if an error occurs on a
- * new connection before the user accepts it. This is okay,
- * since the accept will just fail later. However, we do need
- * to release the underlying HW resources in case of a device
- * removal event.
- */
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
- ucma_removal_event_handler(cm_id);
-
- kfree(uevent);
- goto out;
+ /*
+ * We ignore events for new connections until userspace has set their
+ * context. This can only happen if an error occurs on a new connection
+ * before the user accepts it. This is okay, since the accept will just
+ * fail later. However, we do need to release the underlying HW
+ * resources in case of a device removal event.
+ */
+ if (ctx->uid) {
+ uevent = ucma_create_uevent(ctx, event);
+ if (!uevent)
+ return 0;
+
+ mutex_lock(&ctx->file->mut);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
}
- list_add_tail(&uevent->list, &ctx->file->event_list);
- wake_up_interruptible(&ctx->file->poll_wait);
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
- ucma_removal_event_handler(cm_id);
-out:
- mutex_unlock(&ctx->file->mut);
- return ret;
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+ xa_lock(&ctx_table);
+ if (xa_load(&ctx_table, ctx->id) == ctx)
+ queue_work(system_unbound_wq, &ctx->close_work);
+ xa_unlock(&ctx_table);
+ }
+ return 0;
}
static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
- struct ucma_context *ctx;
struct rdma_ucm_get_event cmd;
struct ucma_event *uevent;
- int ret = 0;
- if (out_len < sizeof uevent->resp)
+ /*
+ * Old 32 bit user space does not send the 4 byte padding in the
+ * reserved field. We don't care, allow it to keep working.
+ */
+ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
+ sizeof(uevent->resp.ece))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -398,34 +403,25 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
mutex_lock(&file->mut);
}
- uevent = list_entry(file->event_list.next, struct ucma_event, list);
-
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
- ctx = ucma_alloc_ctx(file);
- if (!ctx) {
- ret = -ENOMEM;
- goto done;
- }
- uevent->ctx->backlog++;
- ctx->cm_id = uevent->cm_id;
- ctx->cm_id->context = ctx;
- uevent->resp.id = ctx->id;
- }
+ uevent = list_first_entry(&file->event_list, struct ucma_event, list);
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &uevent->resp, sizeof uevent->resp)) {
- ret = -EFAULT;
- goto done;
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
+ &uevent->resp,
+ min_t(size_t, out_len, sizeof(uevent->resp)))) {
+ mutex_unlock(&file->mut);
+ return -EFAULT;
}
list_del(&uevent->list);
uevent->ctx->events_reported++;
if (uevent->mc)
uevent->mc->events_reported++;
- kfree(uevent);
-done:
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
+ atomic_inc(&uevent->ctx->backlog);
mutex_unlock(&file->mut);
- return ret;
+
+ kfree(uevent);
+ return 0;
}
static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
@@ -452,6 +448,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
struct rdma_ucm_create_id cmd;
struct rdma_ucm_create_id_resp resp;
struct ucma_context *ctx;
+ struct rdma_cm_id *cm_id;
enum ib_qp_type qp_type;
int ret;
@@ -465,34 +462,32 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
if (ret)
return ret;
- mutex_lock(&file->mut);
ctx = ucma_alloc_ctx(file);
- mutex_unlock(&file->mut);
if (!ctx)
return -ENOMEM;
ctx->uid = cmd.uid;
- ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
- if (IS_ERR(ctx->cm_id)) {
- ret = PTR_ERR(ctx->cm_id);
+ cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
goto err1;
}
+ ucma_set_ctx_cm_id(ctx, cm_id);
resp.id = ctx->id;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) {
ret = -EFAULT;
- goto err2;
+ goto err1;
}
+
+ mutex_lock(&file->mut);
+ ucma_finish_ctx(ctx);
+ mutex_unlock(&file->mut);
return 0;
-err2:
- rdma_destroy_id(ctx->cm_id);
err1:
- mutex_lock(&mut);
- idr_remove(&ctx_idr, ctx->id);
- mutex_unlock(&mut);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
return ret;
}
@@ -500,19 +495,25 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx)
{
struct ucma_multicast *mc, *tmp;
- mutex_lock(&mut);
+ xa_lock(&multicast_table);
list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
list_del(&mc->list);
- idr_remove(&multicast_idr, mc->id);
+ /*
+ * At this point mc->ctx->ref is 0 so the mc cannot leave the
+ * lock on the reader and this is enough serialization
+ */
+ __xa_erase(&multicast_table, mc->id);
kfree(mc);
}
- mutex_unlock(&mut);
+ xa_unlock(&multicast_table);
}
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
{
struct ucma_event *uevent, *tmp;
+ rdma_lock_handler(mc->ctx->cm_id);
+ mutex_lock(&mc->ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
if (uevent->mc != mc)
continue;
@@ -520,45 +521,75 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
list_del(&uevent->list);
kfree(uevent);
}
+ mutex_unlock(&mc->ctx->file->mut);
+ rdma_unlock_handler(mc->ctx->cm_id);
}
-/*
- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
- * this point, no new events will be reported from the hardware. However, we
- * still need to cleanup the UCMA context for this ID. Specifically, there
- * might be events that have not yet been consumed by the user space software.
- * These might include pending connect requests which we have not completed
- * processing. We cannot call rdma_destroy_id while holding the lock of the
- * context (file->mut), as it might cause a deadlock. We therefore extract all
- * relevant events from the context pending events list while holding the
- * mutex. After that we release them as needed.
- */
-static int ucma_free_ctx(struct ucma_context *ctx)
+static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
{
int events_reported;
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);
-
- ucma_cleanup_multicast(ctx);
-
- /* Cleanup events not yet reported to the user. */
+ /* Cleanup events not yet reported to the user.*/
mutex_lock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
- if (uevent->ctx == ctx)
+ if (uevent->ctx != ctx)
+ continue;
+
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+ xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
+ uevent->conn_req_ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) == uevent->conn_req_ctx) {
list_move_tail(&uevent->list, &list);
+ continue;
+ }
+ list_del(&uevent->list);
+ kfree(uevent);
}
list_del(&ctx->list);
+ events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut);
+ /*
+ * If this was a listening ID then any connections spawned from it that
+ * have not been delivered to userspace are cleaned up too. Must be done
+ * outside any locks.
+ */
list_for_each_entry_safe(uevent, tmp, &list, list) {
- list_del(&uevent->list);
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
- rdma_destroy_id(uevent->cm_id);
+ ucma_destroy_private_ctx(uevent->conn_req_ctx);
kfree(uevent);
}
+ return events_reported;
+}
- events_reported = ctx->events_reported;
+/*
+ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
+ * the ctx is not public to the user). This either because:
+ * - ucma_finish_ctx() hasn't been called
+ * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
+ */
+static int ucma_destroy_private_ctx(struct ucma_context *ctx)
+{
+ int events_reported;
+
+ /*
+ * Destroy the underlying cm_id. New work queuing is prevented now by
+ * the removal from the xarray. Once the work is cancled ref will either
+ * be 0 because the work ran to completion and consumed the ref from the
+ * xarray, or it will be positive because we still have the ref from the
+ * xarray. This can also be 0 in cases where cm_id was never set
+ */
+ cancel_work_sync(&ctx->close_work);
+ if (refcount_read(&ctx->ref))
+ ucma_close_id(&ctx->close_work);
+
+ events_reported = ucma_cleanup_ctx_events(ctx);
+ ucma_cleanup_multicast(ctx);
+
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
+ GFP_KERNEL) != NULL);
+ mutex_destroy(&ctx->mutex);
kfree(ctx);
return events_reported;
}
@@ -577,34 +608,20 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- mutex_lock(&mut);
+ xa_lock(&ctx_table);
ctx = _ucma_find_context(cmd.id, file);
- if (!IS_ERR(ctx))
- idr_remove(&ctx_idr, ctx->id);
- mutex_unlock(&mut);
+ if (!IS_ERR(ctx)) {
+ if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx)
+ ctx = ERR_PTR(-ENOENT);
+ }
+ xa_unlock(&ctx_table);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- mutex_lock(&ctx->file->mut);
- ctx->destroying = 1;
- mutex_unlock(&ctx->file->mut);
-
- flush_workqueue(ctx->file->close_wq);
- /* At this point it's guaranteed that there is no inflight
- * closing task */
- mutex_lock(&mut);
- if (!ctx->closing) {
- mutex_unlock(&mut);
- ucma_put_ctx(ctx);
- wait_for_completion(&ctx->comp);
- rdma_destroy_id(ctx->cm_id);
- } else {
- mutex_unlock(&mut);
- }
-
- resp.events_reported = ucma_free_ctx(ctx);
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ resp.events_reported = ucma_destroy_private_ctx(ctx);
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -621,11 +638,17 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!rdma_addr_size_in6(&cmd.addr))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ mutex_unlock(&ctx->mutex);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -634,22 +657,23 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_bind cmd;
- struct sockaddr *addr;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- addr = (struct sockaddr *) &cmd.addr;
- if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
+ if (cmd.reserved || !cmd.addr_size ||
+ cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_bind_addr(ctx->cm_id, addr);
+ mutex_lock(&ctx->mutex);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -665,13 +689,18 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
+ !rdma_addr_size_in6(&cmd.dst_addr))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
- (struct sockaddr *) &cmd.dst_addr,
- cmd.timeout_ms);
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -681,24 +710,47 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
int in_len, int out_len)
{
struct rdma_ucm_resolve_addr cmd;
- struct sockaddr *src, *dst;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- src = (struct sockaddr *) &cmd.src_addr;
- dst = (struct sockaddr *) &cmd.dst_addr;
- if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
- !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
+ if (cmd.reserved ||
+ (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
+ !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
+ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_resolve_ib_service(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_resolve_ib_service cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_ib_service(ctx->cm_id, &cmd.ibs);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -714,11 +766,13 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -728,8 +782,8 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
{
struct rdma_dev_addr *dev_addr;
- resp->num_paths = route->num_paths;
- switch (route->num_paths) {
+ resp->num_paths = route->num_pri_alt_paths;
+ switch (route->num_pri_alt_paths) {
case 0:
dev_addr = &route->addr.dev_addr;
rdma_addr_get_dgid(dev_addr,
@@ -741,7 +795,7 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
case 2:
ib_copy_path_rec_to_user(&resp->ib_route[1],
&route->path_rec[1]);
- /* fall through */
+ fallthrough;
case 1:
ib_copy_path_rec_to_user(&resp->ib_route[0],
&route->path_rec[0]);
@@ -755,8 +809,8 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
struct rdma_route *route)
{
- resp->num_paths = route->num_paths;
- switch (route->num_paths) {
+ resp->num_paths = route->num_pri_alt_paths;
+ switch (route->num_pri_alt_paths) {
case 0:
rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
(union ib_gid *)&resp->ib_route[0].dgid);
@@ -767,7 +821,7 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
case 2:
ib_copy_path_rec_to_user(&resp->ib_route[1],
&route->path_rec[1]);
- /* fall through */
+ fallthrough;
case 1:
ib_copy_path_rec_to_user(&resp->ib_route[0],
&route->path_rec[0]);
@@ -797,7 +851,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -807,6 +861,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
memset(&resp, 0, sizeof resp);
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
@@ -820,6 +875,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
goto out;
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
+ resp.ibdev_index = ctx->cm_id->device->index;
resp.port_num = ctx->cm_id->port_num;
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
@@ -830,8 +886,9 @@ static ssize_t ucma_query_route(struct ucma_file *file,
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
out:
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
+ mutex_unlock(&ctx->mutex);
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
+ min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
ucma_put_ctx(ctx);
@@ -845,6 +902,7 @@ static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
return;
resp->node_guid = (__force __u64) cm_id->device->node_guid;
+ resp->ibdev_index = cm_id->device->index;
resp->port_num = cm_id->port_num;
resp->pkey = (__force __u16) cpu_to_be16(
ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
@@ -857,7 +915,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -872,7 +930,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
ucma_query_device_addr(ctx->cm_id, &resp);
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
return ret;
@@ -891,19 +949,26 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
if (!resp)
return -ENOMEM;
- resp->num_paths = ctx->cm_id->route.num_paths;
+ resp->num_paths = ctx->cm_id->route.num_pri_alt_paths;
for (i = 0, out_len -= sizeof(*resp);
i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
i++, out_len -= sizeof(struct ib_path_rec_data)) {
+ struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
IB_PATH_BIDIRECTIONAL;
- ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
- &resp->path_data[i].path_rec);
+ if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
+ struct sa_path_rec ib;
+
+ sa_convert_path_opa_to_ib(&ib, rec);
+ ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
+
+ } else {
+ ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
+ }
}
- if (copy_to_user(response, resp,
- sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
+ if (copy_to_user(response, resp, struct_size(resp, path_data, i)))
ret = -EFAULT;
kfree(resp);
@@ -917,7 +982,7 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
struct sockaddr_ib *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -931,8 +996,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
+ NULL);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.src_addr);
}
@@ -944,15 +1009,52 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, NULL,
+ (union ib_gid *)&addr->sib_addr);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.dst_addr);
}
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static ssize_t ucma_query_ib_service(struct ucma_context *ctx,
+ void __user *response, int out_len)
+{
+ struct rdma_ucm_query_ib_service_resp *resp;
+ int n, ret = 0;
+
+ if (out_len < sizeof(struct rdma_ucm_query_ib_service_resp))
+ return -ENOSPC;
+
+ if (!ctx->cm_id->route.service_recs)
+ return -ENODATA;
+
+ resp = kzalloc(out_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->num_service_recs = ctx->cm_id->route.num_service_recs;
+
+ n = (out_len - sizeof(struct rdma_ucm_query_ib_service_resp)) /
+ sizeof(struct ib_user_service_rec);
+
+ if (!n)
+ goto out;
+
+ if (n > ctx->cm_id->route.num_service_recs)
+ n = ctx->cm_id->route.num_service_recs;
+
+ memcpy(resp->recs, ctx->cm_id->route.service_recs,
+ sizeof(*resp->recs) * n);
+ if (copy_to_user(response, resp, struct_size(resp, recs, n)))
ret = -EFAULT;
+out:
+ kfree(resp);
return ret;
}
@@ -968,11 +1070,12 @@ static ssize_t ucma_query(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- response = (void __user *)(unsigned long) cmd.response;
+ response = u64_to_user_ptr(cmd.response);
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
switch (cmd.option) {
case RDMA_USER_CM_QUERY_ADDR:
ret = ucma_query_addr(ctx, response, out_len);
@@ -983,10 +1086,14 @@ static ssize_t ucma_query(struct ucma_file *file,
case RDMA_USER_CM_QUERY_GID:
ret = ucma_query_gid(ctx, response, out_len);
break;
+ case RDMA_USER_CM_QUERY_IB_SERVICE:
+ ret = ucma_query_ib_service(ctx, response, out_len);
+ break;
default:
ret = -ENOSYS;
break;
}
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -998,36 +1105,48 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
{
dst->private_data = src->private_data;
dst->private_data_len = src->private_data_len;
- dst->responder_resources =src->responder_resources;
+ dst->responder_resources = src->responder_resources;
dst->initiator_depth = src->initiator_depth;
dst->flow_control = src->flow_control;
dst->retry_count = src->retry_count;
dst->rnr_retry_count = src->rnr_retry_count;
dst->srq = src->srq;
- dst->qp_num = src->qp_num;
+ dst->qp_num = src->qp_num & 0xFFFFFF;
dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
}
static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
- struct rdma_ucm_connect cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
+ struct rdma_ucm_connect cmd;
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
if (!cmd.conn_param.valid)
return -EINVAL;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
- ret = rdma_connect(ctx->cm_id, &conn_param);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
+ mutex_lock(&ctx->mutex);
+ ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1046,9 +1165,13 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
- cmd.backlog : max_backlog;
- ret = rdma_listen(ctx->cm_id, ctx->backlog);
+ if (cmd.backlog <= 0 || cmd.backlog > max_backlog)
+ cmd.backlog = max_backlog;
+ atomic_set(&ctx->backlog, cmd.backlog);
+
+ mutex_lock(&ctx->mutex);
+ ret = rdma_listen(ctx->cm_id, cmd.backlog);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1058,26 +1181,44 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_accept cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
- mutex_lock(&file->mut);
- ret = rdma_accept(ctx->cm_id, &conn_param);
- if (!ret)
+ mutex_lock(&ctx->mutex);
+ rdma_lock_handler(ctx->cm_id);
+ ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece);
+ if (!ret) {
+ /* The uid must be set atomically with the handler */
ctx->uid = cmd.uid;
- mutex_unlock(&file->mut);
- } else
- ret = rdma_accept(ctx->cm_id, NULL);
-
+ }
+ rdma_unlock_handler(ctx->cm_id);
+ mutex_unlock(&ctx->mutex);
+ } else {
+ mutex_lock(&ctx->mutex);
+ rdma_lock_handler(ctx->cm_id);
+ ret = rdma_accept_ece(ctx->cm_id, NULL, &ece);
+ rdma_unlock_handler(ctx->cm_id);
+ mutex_unlock(&ctx->mutex);
+ }
ucma_put_ctx(ctx);
return ret;
}
@@ -1092,11 +1233,25 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ if (!cmd.reason)
+ cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
+
+ switch (cmd.reason) {
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ mutex_lock(&ctx->mutex);
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
+ cmd.reason);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1111,11 +1266,13 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_disconnect(ctx->cm_id);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1136,19 +1293,24 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ if (cmd.qp_state > IB_QPS_ERR)
+ return -EINVAL;
+
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
+ mutex_lock(&ctx->mutex);
ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
+ mutex_unlock(&ctx->mutex);
if (ret)
goto out;
- ib_copy_qp_attr_to_user(&resp, &qp_attr);
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1184,6 +1346,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
}
ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
break;
+ case RDMA_OPTION_ID_ACK_TIMEOUT:
+ if (optlen != sizeof(u8)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
+ break;
default:
ret = -ENOSYS;
}
@@ -1194,7 +1363,7 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
static int ucma_set_ib_path(struct ucma_context *ctx,
struct ib_path_rec_data *path_data, size_t optlen)
{
- struct ib_sa_path_rec sa_path;
+ struct sa_path_rec sa_path;
struct rdma_cm_event event;
int ret;
@@ -1210,11 +1379,26 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
if (!optlen)
return -EINVAL;
+ if (!ctx->cm_id->device)
+ return -EINVAL;
+
memset(&sa_path, 0, sizeof(sa_path));
- sa_path.vlan_id = 0xffff;
+ sa_path.rec_type = SA_PATH_REC_TYPE_IB;
ib_sa_unpack_path(path_data->path_rec, &sa_path);
- ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+
+ if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
+ struct sa_path_rec opa;
+
+ sa_convert_path_ib_to_opa(&opa, &sa_path);
+ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
+ mutex_unlock(&ctx->mutex);
+ } else {
+ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
+ mutex_unlock(&ctx->mutex);
+ }
if (ret)
return ret;
@@ -1246,7 +1430,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
switch (level) {
case RDMA_OPTION_ID:
+ mutex_lock(&ctx->mutex);
ret = ucma_set_option_id(ctx, optname, optval, optlen);
+ mutex_unlock(&ctx->mutex);
break;
case RDMA_OPTION_IB:
ret = ucma_set_option_ib(ctx, optname, optval, optlen);
@@ -1269,11 +1455,14 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- optval = memdup_user((void __user *) (unsigned long) cmd.optval,
+ optval = memdup_user(u64_to_user_ptr(cmd.optval),
cmd.optlen);
if (IS_ERR(optval)) {
ret = PTR_ERR(optval);
@@ -1294,7 +1483,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_notify cmd;
struct ucma_context *ctx;
- int ret;
+ int ret = -EINVAL;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
@@ -1303,7 +1492,11 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+ mutex_lock(&ctx->mutex);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+ mutex_unlock(&ctx->mutex);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -1316,53 +1509,79 @@ static ssize_t ucma_process_join(struct ucma_file *file,
struct ucma_multicast *mc;
struct sockaddr *addr;
int ret;
+ u8 join_state;
if (out_len < sizeof(resp))
return -ENOSPC;
addr = (struct sockaddr *) &cmd->addr;
- if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ if (cmd->addr_size != rdma_addr_size(addr))
+ return -EINVAL;
+
+ if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
+ join_state = BIT(FULLMEMBER_JOIN);
+ else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
+ join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
+ else
return -EINVAL;
- ctx = ucma_get_ctx(file, cmd->id);
+ ctx = ucma_get_ctx_dev(file, cmd->id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- mutex_lock(&file->mut);
- mc = ucma_alloc_multicast(ctx);
+ mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc) {
ret = -ENOMEM;
- goto err1;
+ goto err_put_ctx;
}
+ mc->ctx = ctx;
+ mc->join_state = join_state;
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
- ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
+
+ xa_lock(&multicast_table);
+ if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
+ GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err_free_mc;
+ }
+
+ list_add_tail(&mc->list, &ctx->mc_list);
+ xa_unlock(&multicast_table);
+
+ mutex_lock(&ctx->mutex);
+ ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
+ join_state, mc);
+ mutex_unlock(&ctx->mutex);
if (ret)
- goto err2;
+ goto err_xa_erase;
resp.id = mc->id;
- if (copy_to_user((void __user *)(unsigned long) cmd->response,
+ if (copy_to_user(u64_to_user_ptr(cmd->response),
&resp, sizeof(resp))) {
ret = -EFAULT;
- goto err3;
+ goto err_leave_multicast;
}
- mutex_unlock(&file->mut);
+ xa_store(&multicast_table, mc->id, mc, 0);
+
ucma_put_ctx(ctx);
return 0;
-err3:
+err_leave_multicast:
+ mutex_lock(&ctx->mutex);
rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
+ mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc);
-err2:
- mutex_lock(&mut);
- idr_remove(&multicast_idr, mc->id);
- mutex_unlock(&mut);
+err_xa_erase:
+ xa_lock(&multicast_table);
list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
+err_free_mc:
+ xa_unlock(&multicast_table);
kfree(mc);
-err1:
- mutex_unlock(&file->mut);
+err_put_ctx:
ucma_put_ctx(ctx);
return ret;
}
@@ -1380,8 +1599,11 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.response = cmd.response;
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
- join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
- join_cmd.reserved = 0;
+ join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
+ if (!join_cmd.addr_size)
+ return -EINVAL;
+
+ join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
return ucma_process_join(file, &join_cmd, out_len);
@@ -1396,6 +1618,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!rdma_addr_size_kss(&cmd.addr))
+ return -EINVAL;
+
return ucma_process_join(file, &cmd, out_len);
}
@@ -1414,80 +1639,51 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- mutex_lock(&mut);
- mc = idr_find(&multicast_idr, cmd.id);
+ xa_lock(&multicast_table);
+ mc = xa_load(&multicast_table, cmd.id);
if (!mc)
mc = ERR_PTR(-ENOENT);
- else if (mc->ctx->file != file)
+ else if (READ_ONCE(mc->ctx->file) != file)
mc = ERR_PTR(-EINVAL);
- else if (!atomic_inc_not_zero(&mc->ctx->ref))
+ else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
- else
- idr_remove(&multicast_idr, mc->id);
- mutex_unlock(&mut);
if (IS_ERR(mc)) {
+ xa_unlock(&multicast_table);
ret = PTR_ERR(mc);
goto out;
}
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
+ xa_unlock(&multicast_table);
+
+ mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
- mutex_lock(&mc->ctx->file->mut);
+ mutex_unlock(&mc->ctx->mutex);
+
ucma_cleanup_mc_events(mc);
- list_del(&mc->list);
- mutex_unlock(&mc->ctx->file->mut);
ucma_put_ctx(mc->ctx);
resp.events_reported = mc->events_reported;
kfree(mc);
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
out:
return ret;
}
-static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
-{
- /* Acquire mutex's based on pointer comparison to prevent deadlock. */
- if (file1 < file2) {
- mutex_lock(&file1->mut);
- mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
- } else {
- mutex_lock(&file2->mut);
- mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
- }
-}
-
-static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
-{
- if (file1 < file2) {
- mutex_unlock(&file2->mut);
- mutex_unlock(&file1->mut);
- } else {
- mutex_unlock(&file1->mut);
- mutex_unlock(&file2->mut);
- }
-}
-
-static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
-{
- struct ucma_event *uevent, *tmp;
-
- list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
- if (uevent->ctx == ctx)
- list_move_tail(&uevent->list, &file->event_list);
-}
-
static ssize_t ucma_migrate_id(struct ucma_file *new_file,
const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_migrate_id cmd;
struct rdma_ucm_migrate_resp resp;
+ struct ucma_event *uevent, *tmp;
struct ucma_context *ctx;
- struct fd f;
+ LIST_HEAD(event_list);
struct ucma_file *cur_file;
int ret = 0;
@@ -1495,46 +1691,106 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
return -EFAULT;
/* Get current fd to protect against it being closed */
- f = fdget(cmd.fd);
- if (!f.file)
+ CLASS(fd, f)(cmd.fd);
+ if (fd_empty(f))
return -ENOENT;
+ if (fd_file(f)->f_op != &ucma_fops)
+ return -EINVAL;
+ cur_file = fd_file(f)->private_data;
/* Validate current fd and prevent destruction of id. */
- ctx = ucma_get_ctx(f.file->private_data, cmd.id);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto file_put;
- }
+ ctx = ucma_get_ctx(cur_file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- cur_file = ctx->file;
- if (cur_file == new_file) {
- resp.events_reported = ctx->events_reported;
- goto response;
+ rdma_lock_handler(ctx->cm_id);
+ /*
+ * ctx->file can only be changed under the handler & xa_lock. xa_load()
+ * must be checked again to ensure the ctx hasn't begun destruction
+ * since the ucma_get_ctx().
+ */
+ xa_lock(&ctx_table);
+ if (_ucma_find_context(cmd.id, cur_file) != ctx) {
+ xa_unlock(&ctx_table);
+ ret = -ENOENT;
+ goto err_unlock;
}
+ ctx->file = new_file;
+ xa_unlock(&ctx_table);
+ mutex_lock(&cur_file->mut);
+ list_del(&ctx->list);
/*
- * Migrate events between fd's, maintaining order, and avoiding new
- * events being added before existing events.
+ * At this point lock_handler() prevents addition of new uevents for
+ * this ctx.
*/
- ucma_lock_files(cur_file, new_file);
- mutex_lock(&mut);
-
- list_move_tail(&ctx->list, &new_file->ctx_list);
- ucma_move_events(ctx, new_file);
- ctx->file = new_file;
+ list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list)
+ if (uevent->ctx == ctx)
+ list_move_tail(&uevent->list, &event_list);
resp.events_reported = ctx->events_reported;
+ mutex_unlock(&cur_file->mut);
- mutex_unlock(&mut);
- ucma_unlock_files(cur_file, new_file);
+ mutex_lock(&new_file->mut);
+ list_add_tail(&ctx->list, &new_file->ctx_list);
+ list_splice_tail(&event_list, &new_file->event_list);
+ mutex_unlock(&new_file->mut);
-response:
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
+err_unlock:
+ rdma_unlock_handler(ctx->cm_id);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_write_cm_event(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_write_cm_event cmd;
+ struct rdma_cm_event event = {};
+ struct ucma_event *uevent;
+ struct ucma_context *ctx;
+ int ret = 0;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ if ((cmd.event != RDMA_CM_EVENT_USER) &&
+ (cmd.event != RDMA_CM_EVENT_INTERNAL))
+ return -EINVAL;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ event.event = cmd.event;
+ event.status = cmd.status;
+ event.param.arg = cmd.param.arg;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ uevent->ctx = ctx;
+ uevent->resp.uid = ctx->uid;
+ uevent->resp.id = ctx->id;
+ uevent->resp.event = event.event;
+ uevent->resp.status = event.status;
+ memcpy(uevent->resp.param.arg32, &event.param.arg,
+ sizeof(event.param.arg));
+
+ mutex_lock(&ctx->file->mut);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
+
+out:
ucma_put_ctx(ctx);
-file_put:
- fdput(f);
return ret;
}
@@ -1563,7 +1819,9 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
[RDMA_USER_CM_CMD_QUERY] = ucma_query,
[RDMA_USER_CM_CMD_BIND] = ucma_bind,
[RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
- [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
+ [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
+ [RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE] = ucma_resolve_ib_service,
+ [RDMA_USER_CM_CMD_WRITE_CM_EVENT] = ucma_write_cm_event,
};
static ssize_t ucma_write(struct file *filp, const char __user *buf,
@@ -1573,6 +1831,12 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ __func__, task_tgid_vnr(current), current->comm);
+ return -EACCES;
+ }
+
if (len < sizeof(hdr))
return -EINVAL;
@@ -1581,6 +1845,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
return -EINVAL;
+ hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
if (hdr.in + sizeof(hdr) > len)
return -EINVAL;
@@ -1595,15 +1860,15 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ucma_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
if (!list_empty(&file->event_list))
- mask = POLLIN | POLLRDNORM;
+ mask = EPOLLIN | EPOLLRDNORM;
return mask;
}
@@ -1628,49 +1893,33 @@ static int ucma_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&file->ctx_list);
init_waitqueue_head(&file->poll_wait);
mutex_init(&file->mut);
- file->close_wq = create_singlethread_workqueue("ucma_close_id");
filp->private_data = file;
file->filp = filp;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int ucma_close(struct inode *inode, struct file *filp)
{
struct ucma_file *file = filp->private_data;
- struct ucma_context *ctx, *tmp;
- mutex_lock(&file->mut);
- list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
- ctx->destroying = 1;
- mutex_unlock(&file->mut);
-
- mutex_lock(&mut);
- idr_remove(&ctx_idr, ctx->id);
- mutex_unlock(&mut);
-
- flush_workqueue(file->close_wq);
- /* At that step once ctx was marked as destroying and workqueue
- * was flushed we are safe from any inflights handlers that
- * might put other closing task.
- */
- mutex_lock(&mut);
- if (!ctx->closing) {
- mutex_unlock(&mut);
- /* rdma_destroy_id ensures that no event handlers are
- * inflight for that id before releasing it.
- */
- rdma_destroy_id(ctx->cm_id);
- } else {
- mutex_unlock(&mut);
- }
+ /*
+ * All paths that touch ctx_list or ctx_list starting from write() are
+ * prevented by this being a FD release function. The list_add_tail() in
+ * ucma_connect_event_handler() can run concurrently, however it only
+ * adds to the list *after* a listening ID. By only reading the first of
+ * the list, and relying on ucma_destroy_private_ctx() to block
+ * ucma_connect_event_handler(), no additional locking is needed.
+ */
+ while (!list_empty(&file->ctx_list)) {
+ struct ucma_context *ctx = list_first_entry(
+ &file->ctx_list, struct ucma_context, list);
- ucma_free_ctx(ctx);
- mutex_lock(&file->mut);
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx);
+ ucma_destroy_private_ctx(ctx);
}
- mutex_unlock(&file->mut);
- destroy_workqueue(file->close_wq);
kfree(file);
return 0;
}
@@ -1681,7 +1930,6 @@ static const struct file_operations ucma_fops = {
.release = ucma_close,
.write = ucma_write,
.poll = ucma_poll,
- .llseek = no_llseek,
};
static struct miscdevice ucma_misc = {
@@ -1692,13 +1940,25 @@ static struct miscdevice ucma_misc = {
.fops = &ucma_fops,
};
-static ssize_t show_abi_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ucma_get_global_nl_info(struct ib_client_nl_info *res)
{
- return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
+ res->abi = RDMA_USER_CM_ABI_VERSION;
+ res->cdev = ucma_misc.this_device;
+ return 0;
}
-static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+
+static struct ib_client rdma_cma_client = {
+ .name = "rdma_cm",
+ .get_global_nl_info = ucma_get_global_nl_info,
+};
+MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
+
+static ssize_t abi_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
+}
+static DEVICE_ATTR_RO(abi_version);
static int __init ucma_init(void)
{
@@ -1710,17 +1970,24 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) {
- printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+ pr_err("rdma_ucm: couldn't create abi_version attr\n");
goto err1;
}
ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
if (!ucma_ctl_table_hdr) {
- printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+ pr_err("rdma_ucm: couldn't register sysctl paths\n");
ret = -ENOMEM;
goto err2;
}
+
+ ret = ib_register_client(&rdma_cma_client);
+ if (ret)
+ goto err3;
+
return 0;
+err3:
+ unregister_net_sysctl_table(ucma_ctl_table_hdr);
err2:
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
err1:
@@ -1730,11 +1997,10 @@ err1:
static void __exit ucma_cleanup(void)
{
+ ib_unregister_client(&rdma_cma_client);
unregister_net_sysctl_table(ucma_ctl_table_hdr);
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc);
- idr_destroy(&ctx_idr);
- idr_destroy(&multicast_idr);
}
module_init(ucma_init);
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 72feee620ebf..8d3dfef9ebaa 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -35,12 +35,13 @@
#include <linux/string.h>
#include <linux/export.h>
#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <rdma/ib_pack.h>
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
- .struct_size_bytes = sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_unpacked_ ## header, field), \
.field_name = #header ":" #field
static const struct ib_field lrh_table[] = {
@@ -116,6 +117,72 @@ static const struct ib_field vlan_table[] = {
.size_bits = 16 }
};
+static const struct ib_field ip4_table[] = {
+ { STRUCT_FIELD(ip4, ver),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 4 },
+ { STRUCT_FIELD(ip4, hdr_len),
+ .offset_words = 0,
+ .offset_bits = 4,
+ .size_bits = 4 },
+ { STRUCT_FIELD(ip4, tos),
+ .offset_words = 0,
+ .offset_bits = 8,
+ .size_bits = 8 },
+ { STRUCT_FIELD(ip4, tot_len),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { STRUCT_FIELD(ip4, id),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { STRUCT_FIELD(ip4, frag_off),
+ .offset_words = 1,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { STRUCT_FIELD(ip4, ttl),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 8 },
+ { STRUCT_FIELD(ip4, protocol),
+ .offset_words = 2,
+ .offset_bits = 8,
+ .size_bits = 8 },
+ { STRUCT_FIELD(ip4, check),
+ .offset_words = 2,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { STRUCT_FIELD(ip4, saddr),
+ .offset_words = 3,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { STRUCT_FIELD(ip4, daddr),
+ .offset_words = 4,
+ .offset_bits = 0,
+ .size_bits = 32 }
+};
+
+static const struct ib_field udp_table[] = {
+ { STRUCT_FIELD(udp, sport),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { STRUCT_FIELD(udp, dport),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { STRUCT_FIELD(udp, length),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { STRUCT_FIELD(udp, csum),
+ .offset_words = 1,
+ .offset_bits = 16,
+ .size_bits = 16 }
+};
+
static const struct ib_field grh_table[] = {
{ STRUCT_FIELD(grh, ip_version),
.offset_words = 0,
@@ -213,26 +280,59 @@ static const struct ib_field deth_table[] = {
.size_bits = 24 }
};
+__sum16 ib_ud_ip4_csum(struct ib_ud_header *header)
+{
+ struct iphdr iph;
+
+ iph.ihl = 5;
+ iph.version = 4;
+ iph.tos = header->ip4.tos;
+ iph.tot_len = header->ip4.tot_len;
+ iph.id = header->ip4.id;
+ iph.frag_off = header->ip4.frag_off;
+ iph.ttl = header->ip4.ttl;
+ iph.protocol = header->ip4.protocol;
+ iph.check = 0;
+ iph.saddr = header->ip4.saddr;
+ iph.daddr = header->ip4.daddr;
+
+ return ip_fast_csum((u8 *)&iph, iph.ihl);
+}
+EXPORT_SYMBOL(ib_ud_ip4_csum);
+
/**
* ib_ud_header_init - Initialize UD header structure
* @payload_bytes:Length of packet payload
* @lrh_present: specify if LRH is present
* @eth_present: specify if Eth header is present
* @vlan_present: packet is tagged vlan
- * @grh_present:GRH flag (if non-zero, GRH will be included)
+ * @grh_present: GRH flag (if non-zero, GRH will be included)
+ * @ip_version: if non-zero, IP header, V4 or V6, will be included
+ * @udp_present :if non-zero, UDP header will be included
* @immediate_present: specify if immediate data is present
* @header:Structure to initialize
*/
-void ib_ud_header_init(int payload_bytes,
- int lrh_present,
- int eth_present,
- int vlan_present,
- int grh_present,
- int immediate_present,
- struct ib_ud_header *header)
+int ib_ud_header_init(int payload_bytes,
+ int lrh_present,
+ int eth_present,
+ int vlan_present,
+ int grh_present,
+ int ip_version,
+ int udp_present,
+ int immediate_present,
+ struct ib_ud_header *header)
{
+ size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0;
+
+ grh_present = grh_present && !ip_version;
memset(header, 0, sizeof *header);
+ /*
+ * UDP header without IP header doesn't make sense
+ */
+ if (udp_present && ip_version != 4 && ip_version != 6)
+ return -EINVAL;
+
if (lrh_present) {
u16 packet_length;
@@ -252,16 +352,37 @@ void ib_ud_header_init(int payload_bytes,
if (vlan_present)
header->eth.type = cpu_to_be16(ETH_P_8021Q);
- if (grh_present) {
+ if (ip_version == 6 || grh_present) {
header->grh.ip_version = 6;
header->grh.payload_length =
- cpu_to_be16((IB_BTH_BYTES +
+ cpu_to_be16((udp_bytes +
+ IB_BTH_BYTES +
IB_DETH_BYTES +
payload_bytes +
4 + /* ICRC */
3) & ~3); /* round up */
- header->grh.next_header = 0x1b;
+ header->grh.next_header = udp_present ? IPPROTO_UDP : 0x1b;
+ }
+
+ if (ip_version == 4) {
+ header->ip4.ver = 4; /* version 4 */
+ header->ip4.hdr_len = 5; /* 5 words */
+ header->ip4.tot_len =
+ cpu_to_be16(IB_IP4_BYTES +
+ udp_bytes +
+ IB_BTH_BYTES +
+ IB_DETH_BYTES +
+ payload_bytes +
+ 4); /* ICRC */
+ header->ip4.protocol = IPPROTO_UDP;
}
+ if (udp_present && ip_version)
+ header->udp.length =
+ cpu_to_be16(IB_UDP_BYTES +
+ IB_BTH_BYTES +
+ IB_DETH_BYTES +
+ payload_bytes +
+ 4); /* ICRC */
if (immediate_present)
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
@@ -273,8 +394,11 @@ void ib_ud_header_init(int payload_bytes,
header->lrh_present = lrh_present;
header->eth_present = eth_present;
header->vlan_present = vlan_present;
- header->grh_present = grh_present;
+ header->grh_present = grh_present || (ip_version == 6);
+ header->ipv4_present = ip_version == 4;
+ header->udp_present = udp_present;
header->immediate_present = immediate_present;
+ return 0;
}
EXPORT_SYMBOL(ib_ud_header_init);
@@ -311,6 +435,16 @@ int ib_ud_header_pack(struct ib_ud_header *header,
&header->grh, buf + len);
len += IB_GRH_BYTES;
}
+ if (header->ipv4_present) {
+ ib_pack(ip4_table, ARRAY_SIZE(ip4_table),
+ &header->ip4, buf + len);
+ len += IB_IP4_BYTES;
+ }
+ if (header->udp_present) {
+ ib_pack(udp_table, ARRAY_SIZE(udp_table),
+ &header->udp, buf + len);
+ len += IB_UDP_BYTES;
+ }
ib_pack(bth_table, ARRAY_SIZE(bth_table),
&header->bth, buf + len);
@@ -328,87 +462,3 @@ int ib_ud_header_pack(struct ib_ud_header *header,
return len;
}
EXPORT_SYMBOL(ib_ud_header_pack);
-
-/**
- * ib_ud_header_unpack - Unpack UD header struct from wire format
- * @header:UD header struct
- * @buf:Buffer to pack into
- *
- * ib_ud_header_pack() unpacks the UD header structure @header from wire
- * format in the buffer @buf.
- */
-int ib_ud_header_unpack(void *buf,
- struct ib_ud_header *header)
-{
- ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
- buf, &header->lrh);
- buf += IB_LRH_BYTES;
-
- if (header->lrh.link_version != 0) {
- printk(KERN_WARNING "Invalid LRH.link_version %d\n",
- header->lrh.link_version);
- return -EINVAL;
- }
-
- switch (header->lrh.link_next_header) {
- case IB_LNH_IBA_LOCAL:
- header->grh_present = 0;
- break;
-
- case IB_LNH_IBA_GLOBAL:
- header->grh_present = 1;
- ib_unpack(grh_table, ARRAY_SIZE(grh_table),
- buf, &header->grh);
- buf += IB_GRH_BYTES;
-
- if (header->grh.ip_version != 6) {
- printk(KERN_WARNING "Invalid GRH.ip_version %d\n",
- header->grh.ip_version);
- return -EINVAL;
- }
- if (header->grh.next_header != 0x1b) {
- printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header);
- return -EINVAL;
- }
- break;
-
- default:
- printk(KERN_WARNING "Invalid LRH.link_next_header %d\n",
- header->lrh.link_next_header);
- return -EINVAL;
- }
-
- ib_unpack(bth_table, ARRAY_SIZE(bth_table),
- buf, &header->bth);
- buf += IB_BTH_BYTES;
-
- switch (header->bth.opcode) {
- case IB_OPCODE_UD_SEND_ONLY:
- header->immediate_present = 0;
- break;
- case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
- header->immediate_present = 1;
- break;
- default:
- printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n",
- header->bth.opcode);
- return -EINVAL;
- }
-
- if (header->bth.transport_header_version != 0) {
- printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n",
- header->bth.transport_header_version);
- return -EINVAL;
- }
-
- ib_unpack(deth_table, ARRAY_SIZE(deth_table),
- buf, &header->deth);
- buf += IB_DETH_BYTES;
-
- if (header->immediate_present)
- memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_ud_header_unpack);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 38acb3cfc545..c5b686394760 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -2,6 +2,7 @@
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -34,73 +35,139 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include <linux/export.h>
-#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/count_zeros.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
-
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
+ bool make_dirty = umem->writable && dirty;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ if (dirty)
+ ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
+ DMA_BIDIRECTIONAL, 0);
+
+ for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
+ unpin_user_page_range_dirty_lock(sg_page(sg),
+ DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
+
+ sg_free_append_table(&umem->sgt_append);
+}
+
+/**
+ * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
+ *
+ * @umem: umem struct
+ * @pgsz_bitmap: bitmap of HW supported page sizes
+ * @virt: IOVA
+ *
+ * This helper is intended for HW that support multiple page
+ * sizes but can do only a single page size in an MR.
+ *
+ * Returns 0 if the umem requires page sizes not supported by
+ * the driver to be mapped. Drivers always supporting PAGE_SIZE
+ * or smaller will never see a 0 result.
+ */
+unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ unsigned long virt)
+{
+ unsigned long curr_len = 0;
+ dma_addr_t curr_base = ~0;
+ unsigned long va, pgoff;
struct scatterlist *sg;
- struct page *page;
+ dma_addr_t mask;
+ dma_addr_t end;
int i;
- if (umem->nmap > 0)
- ib_dma_unmap_sg(dev, umem->sg_head.sgl,
- umem->nmap,
- DMA_BIDIRECTIONAL);
+ umem->iova = va = virt;
- for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
+ if (umem->is_odp) {
+ unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
- page = sg_page(sg);
- if (umem->writable && dirty)
- set_page_dirty_lock(page);
- put_page(page);
+ /* ODP must always be self consistent. */
+ if (!(pgsz_bitmap & page_size))
+ return 0;
+ return page_size;
}
- sg_free_table(&umem->sg_head);
- return;
+ /* The best result is the smallest page size that results in the minimum
+ * number of required pages. Compute the largest page size that could
+ * work based on VA address bits that don't change.
+ */
+ mask = pgsz_bitmap &
+ GENMASK(BITS_PER_LONG - 1,
+ bits_per((umem->length - 1 + virt) ^ virt));
+ /* offset into first SGL */
+ pgoff = umem->address & ~PAGE_MASK;
+
+ for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
+ /* If the current entry is physically contiguous with the previous
+ * one, no need to take its start addresses into consideration.
+ */
+ if (check_add_overflow(curr_base, curr_len, &end) ||
+ end != sg_dma_address(sg)) {
+
+ curr_base = sg_dma_address(sg);
+ curr_len = 0;
+
+ /* Reduce max page size if VA/PA bits differ */
+ mask |= (curr_base + pgoff) ^ va;
+
+ /* The alignment of any VA matching a discontinuity point
+ * in the physical memory sets the maximum possible page
+ * size as this must be a starting point of a new page that
+ * needs to be aligned.
+ */
+ if (i != 0)
+ mask |= va;
+ }
+
+ curr_len += sg_dma_len(sg);
+ va += sg_dma_len(sg) - pgoff;
+
+ pgoff = 0;
+ }
+ /* The mask accumulates 1's in each position where the VA and physical
+ * address differ, thus the length of trailing 0 is the largest page
+ * size that can pass the VA through to the physical.
+ */
+ if (mask)
+ pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
+ return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
}
+EXPORT_SYMBOL(ib_umem_find_best_pgsz);
/**
* ib_umem_get - Pin and DMA map userspace memory.
*
- * If access flags indicate ODP memory, avoid pinning. Instead, stores
- * the mm for future page fault handling in conjunction with MMU notifiers.
- *
- * @context: userspace context to pin memory for
+ * @device: IB device to connect UMEM
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
- * @dmasync: flush in-flight DMA when the memory region is written
*/
-struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- size_t size, int access, int dmasync)
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
+ size_t size, int access)
{
struct ib_umem *umem;
struct page **page_list;
- struct vm_area_struct **vma_list;
- unsigned long locked;
unsigned long lock_limit;
+ unsigned long new_pinned;
unsigned long cur_base;
+ unsigned long dma_attr = 0;
+ struct mm_struct *mm;
unsigned long npages;
- int ret;
- int i;
- DEFINE_DMA_ATTRS(attrs);
- struct scatterlist *sg, *sg_list_start;
- int need_release = 0;
-
- if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
-
- if (!size)
- return ERR_PTR(-EINVAL);
+ int pinned, ret;
+ unsigned int gup_flags = FOLL_LONGTERM;
/*
* If the combination of the addr and size requested for this memory
@@ -113,223 +180,118 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!can_do_mlock())
return ERR_PTR(-EPERM);
- umem = kzalloc(sizeof *umem, GFP_KERNEL);
+ if (access & IB_ACCESS_ON_DEMAND)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
if (!umem)
return ERR_PTR(-ENOMEM);
-
- umem->context = context;
- umem->length = size;
- umem->address = addr;
- umem->page_size = PAGE_SIZE;
- umem->pid = get_task_pid(current, PIDTYPE_PID);
+ umem->ibdev = device;
+ umem->length = size;
+ umem->address = addr;
/*
- * We ask for writable memory if any of the following
- * access flags are set. "Local write" and "remote write"
- * obviously require write access. "Remote atomic" can do
- * things like fetch and add, which will modify memory, and
- * "MW bind" can change permissions by binding a window.
+ * Drivers should call ib_umem_find_best_pgsz() to set the iova
+ * correctly.
*/
- umem->writable = !!(access &
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
-
- if (access & IB_ACCESS_ON_DEMAND) {
- ret = ib_umem_odp_get(context, umem);
- if (ret) {
- kfree(umem);
- return ERR_PTR(ret);
- }
- return umem;
- }
-
- umem->odp_data = NULL;
-
- /* We assume the memory is from hugetlb until proved otherwise */
- umem->hugetlb = 1;
+ umem->iova = addr;
+ umem->writable = ib_access_writable(access);
+ umem->owning_mm = mm = current->mm;
+ mmgrab(mm);
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
- kfree(umem);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto umem_kfree;
}
- /*
- * if we can't alloc the vma_list, it's not so bad;
- * just assume the memory is not hugetlb memory
- */
- vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
- if (!vma_list)
- umem->hugetlb = 0;
-
npages = ib_umem_num_pages(umem);
+ if (npages == 0 || npages > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
- down_write(&current->mm->mmap_sem);
-
- locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ atomic64_sub(npages, &mm->pinned_vm);
ret = -ENOMEM;
goto out;
}
cur_base = addr & PAGE_MASK;
- if (npages == 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
- if (ret)
- goto out;
-
- need_release = 1;
- sg_list_start = umem->sg_head.sgl;
+ if (umem->writable)
+ gup_flags |= FOLL_WRITE;
while (npages) {
- ret = get_user_pages(current, current->mm, cur_base,
- min_t(unsigned long, npages,
- PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
-
- if (ret < 0)
- goto out;
-
- umem->npages += ret;
- cur_base += ret * PAGE_SIZE;
- npages -= ret;
-
- for_each_sg(sg_list_start, sg, ret, i) {
- if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
- umem->hugetlb = 0;
-
- sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
+ cond_resched();
+ pinned = pin_user_pages_fast(cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE /
+ sizeof(struct page *)),
+ gup_flags, page_list);
+ if (pinned < 0) {
+ ret = pinned;
+ goto umem_release;
}
- /* preparing for next loop */
- sg_list_start = sg;
+ cur_base += pinned * PAGE_SIZE;
+ npages -= pinned;
+ ret = sg_alloc_append_table_from_pages(
+ &umem->sgt_append, page_list, pinned, 0,
+ pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
+ npages, GFP_KERNEL);
+ if (ret) {
+ unpin_user_pages_dirty_lock(page_list, pinned, 0);
+ goto umem_release;
+ }
}
- umem->nmap = ib_dma_map_sg_attrs(context->device,
- umem->sg_head.sgl,
- umem->npages,
- DMA_BIDIRECTIONAL,
- &attrs);
-
- if (umem->nmap <= 0) {
- ret = -ENOMEM;
- goto out;
- }
+ if (access & IB_ACCESS_RELAXED_ORDERING)
+ dma_attr |= DMA_ATTR_WEAK_ORDERING;
- ret = 0;
+ ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
+ DMA_BIDIRECTIONAL, dma_attr);
+ if (ret)
+ goto umem_release;
+ goto out;
+umem_release:
+ __ib_umem_release(device, umem, 0);
+ atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
- if (ret < 0) {
- if (need_release)
- __ib_umem_release(context->device, umem, 0);
- put_pid(umem->pid);
- kfree(umem);
- } else
- current->mm->pinned_vm = locked;
-
- up_write(&current->mm->mmap_sem);
- if (vma_list)
- free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
-
- return ret < 0 ? ERR_PTR(ret) : umem;
+umem_kfree:
+ if (ret) {
+ mmdrop(umem->owning_mm);
+ kfree(umem);
+ }
+ return ret ? ERR_PTR(ret) : umem;
}
EXPORT_SYMBOL(ib_umem_get);
-static void ib_umem_account(struct work_struct *work)
-{
- struct ib_umem *umem = container_of(work, struct ib_umem, work);
-
- down_write(&umem->mm->mmap_sem);
- umem->mm->pinned_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
-}
-
/**
* ib_umem_release - release memory pinned with ib_umem_get
* @umem: umem struct to release
*/
void ib_umem_release(struct ib_umem *umem)
{
- struct ib_ucontext *context = umem->context;
- struct mm_struct *mm;
- struct task_struct *task;
- unsigned long diff;
-
- if (umem->odp_data) {
- ib_umem_odp_release(umem);
+ if (!umem)
return;
- }
-
- __ib_umem_release(umem->context->device, umem, 1);
+ if (umem->is_dmabuf)
+ return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
+ if (umem->is_odp)
+ return ib_umem_odp_release(to_ib_umem_odp(umem));
- task = get_pid_task(umem->pid, PIDTYPE_PID);
- put_pid(umem->pid);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
-
- diff = ib_umem_num_pages(umem);
+ __ib_umem_release(umem->ibdev, umem, 1);
- /*
- * We may be called with the mm's mmap_sem already held. This
- * can happen when a userspace munmap() is the call that drops
- * the last reference to our file and calls our release
- * method. If there are memory regions to destroy, we'll end
- * up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
- */
- if (context->closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&umem->work, ib_umem_account);
- umem->mm = mm;
- umem->diff = diff;
-
- queue_work(ib_wq, &umem->work);
- return;
- }
- } else
- down_write(&mm->mmap_sem);
-
- mm->pinned_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
-out:
+ atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
+ mmdrop(umem->owning_mm);
kfree(umem);
}
EXPORT_SYMBOL(ib_umem_release);
-int ib_umem_page_count(struct ib_umem *umem)
-{
- int shift;
- int i;
- int n;
- struct scatterlist *sg;
-
- if (umem->odp_data)
- return ib_umem_num_pages(umem);
-
- shift = ilog2(umem->page_size);
-
- n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
- n += sg_dma_len(sg) >> shift;
-
- return n;
-}
-EXPORT_SYMBOL(ib_umem_page_count);
-
/*
* Copy from the given ib_umem's pages to the given buffer.
*
@@ -347,12 +309,13 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
int ret;
if (offset > umem->length || length > umem->length - offset) {
- pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
- offset, umem->length, end);
+ pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
+ __func__, offset, umem->length, end);
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.orig_nents, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
new file mode 100644
index 000000000000..0ec2e4120cc9
--- /dev/null
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "uverbs.h"
+
+MODULE_IMPORT_NS("DMA_BUF");
+
+int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned long start, end, cur = 0;
+ unsigned int nmap = 0;
+ long ret;
+ int i;
+
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (umem_dmabuf->revoked)
+ return -EINVAL;
+
+ if (umem_dmabuf->sgt)
+ goto wait_fence;
+
+ sgt = dma_buf_map_attachment(umem_dmabuf->attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ /* modify the sg list in-place to match umem address and length */
+
+ start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
+ end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
+ PAGE_SIZE);
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (start < cur + sg_dma_len(sg) && cur < end)
+ nmap++;
+ if (cur <= start && start < cur + sg_dma_len(sg)) {
+ unsigned long offset = start - cur;
+
+ umem_dmabuf->first_sg = sg;
+ umem_dmabuf->first_sg_offset = offset;
+ sg_dma_address(sg) += offset;
+ sg_dma_len(sg) -= offset;
+ cur += offset;
+ }
+ if (cur < end && end <= cur + sg_dma_len(sg)) {
+ unsigned long trim = cur + sg_dma_len(sg) - end;
+
+ umem_dmabuf->last_sg = sg;
+ umem_dmabuf->last_sg_trim = trim;
+ sg_dma_len(sg) -= trim;
+ break;
+ }
+ cur += sg_dma_len(sg);
+ }
+
+ umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
+ umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
+ umem_dmabuf->sgt = sgt;
+
+wait_fence:
+ /*
+ * Although the sg list is valid now, the content of the pages
+ * may be not up-to-date. Wait for the exporter to finish
+ * the migration.
+ */
+ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
+
+void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!umem_dmabuf->sgt)
+ return;
+
+ /* retore the original sg list */
+ if (umem_dmabuf->first_sg) {
+ sg_dma_address(umem_dmabuf->first_sg) -=
+ umem_dmabuf->first_sg_offset;
+ sg_dma_len(umem_dmabuf->first_sg) +=
+ umem_dmabuf->first_sg_offset;
+ umem_dmabuf->first_sg = NULL;
+ umem_dmabuf->first_sg_offset = 0;
+ }
+ if (umem_dmabuf->last_sg) {
+ sg_dma_len(umem_dmabuf->last_sg) +=
+ umem_dmabuf->last_sg_trim;
+ umem_dmabuf->last_sg = NULL;
+ umem_dmabuf->last_sg_trim = 0;
+ }
+
+ dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+ DMA_BIDIRECTIONAL);
+
+ umem_dmabuf->sgt = NULL;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
+
+static struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
+{
+ struct dma_buf *dmabuf;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_umem *umem;
+ unsigned long end;
+ struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
+
+ if (check_add_overflow(offset, (unsigned long)size, &end))
+ return ret;
+
+ if (unlikely(!ops || !ops->move_notify))
+ return ret;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
+
+ if (dmabuf->size < end)
+ goto out_release_dmabuf;
+
+ umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
+ if (!umem_dmabuf) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_release_dmabuf;
+ }
+
+ umem = &umem_dmabuf->umem;
+ umem->ibdev = device;
+ umem->length = size;
+ umem->address = offset;
+ umem->writable = ib_access_writable(access);
+ umem->is_dmabuf = 1;
+
+ if (!ib_umem_num_pages(umem))
+ goto out_free_umem;
+
+ umem_dmabuf->attach = dma_buf_dynamic_attach(
+ dmabuf,
+ dma_device,
+ ops,
+ umem_dmabuf);
+ if (IS_ERR(umem_dmabuf->attach)) {
+ ret = ERR_CAST(umem_dmabuf->attach);
+ goto out_free_umem;
+ }
+ return umem_dmabuf;
+
+out_free_umem:
+ kfree(umem_dmabuf);
+
+out_release_dmabuf:
+ dma_buf_put(dmabuf);
+ return ret;
+}
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
+{
+ return ib_umem_dmabuf_get_with_dma_device(device, device->dma_device,
+ offset, size, fd, access, ops);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get);
+
+static void
+ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+
+ ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
+ "Invalidate callback should not be called when memory is pinned\n");
+}
+
+static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
+ .allow_peer2peer = true,
+ .move_notify = ib_umem_dmabuf_unsupported_move_notify,
+};
+
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access)
+{
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int err;
+
+ umem_dmabuf = ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset,
+ size, fd, access,
+ &ib_umem_dmabuf_attach_pinned_ops);
+ if (IS_ERR(umem_dmabuf))
+ return umem_dmabuf;
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ err = dma_buf_pin(umem_dmabuf->attach);
+ if (err)
+ goto err_release;
+ umem_dmabuf->pinned = 1;
+
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err)
+ goto err_unpin;
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ return umem_dmabuf;
+
+err_unpin:
+ dma_buf_unpin(umem_dmabuf->attach);
+err_release:
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device);
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access)
+{
+ return ib_umem_dmabuf_get_pinned_with_dma_device(device, device->dma_device,
+ offset, size, fd, access);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
+
+void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ if (umem_dmabuf->revoked)
+ goto end;
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ if (umem_dmabuf->pinned) {
+ dma_buf_unpin(umem_dmabuf->attach);
+ umem_dmabuf->pinned = 0;
+ }
+ umem_dmabuf->revoked = 1;
+end:
+ dma_resv_unlock(dmabuf->resv);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_revoke);
+
+void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+
+ ib_umem_dmabuf_revoke(umem_dmabuf);
+
+ dma_buf_detach(dmabuf, umem_dmabuf->attach);
+ dma_buf_put(dmabuf);
+ kfree(umem_dmabuf);
+}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 40becdb3196e..572a91a62a7b 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -32,324 +32,248 @@
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
#include <linux/pid.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
+#include <linux/hugetlb.h>
+#include <linux/interval_tree.h>
+#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
+#include <linux/pagemap.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
-static void ib_umem_notifier_start_account(struct ib_umem *item)
-{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
- int notifiers_count = item->odp_data->notifiers_count++;
-
- if (notifiers_count == 0)
- /* Initialize the completion object for waiting on
- * notifiers. Since notifier_count is zero, no one
- * should be waiting right now. */
- reinit_completion(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
-}
-
-static void ib_umem_notifier_end_account(struct ib_umem *item)
-{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
- /*
- * This sequence increase will notify the QP page fault that
- * the page that is going to be mapped in the spte could have
- * been freed.
- */
- ++item->odp_data->notifiers_seq;
- if (--item->odp_data->notifiers_count == 0)
- complete_all(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
-}
+#include "uverbs.h"
-/* Account for a new mmu notifier in an ib_ucontext. */
-static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
{
- atomic_inc(&context->notifier_count);
+ umem_odp->is_implicit_odp = 1;
+ umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
}
-/* Account for a terminating mmu notifier in an ib_ucontext.
- *
- * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
- * the function takes the semaphore itself. */
-static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
{
- int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
-
- if (zero_notifiers &&
- !list_empty(&context->no_private_counters)) {
- /* No currently running mmu notifiers. Now is the chance to
- * add private accounting to all previously added umems. */
- struct ib_umem_odp *odp_data, *next;
-
- /* Prevent concurrent mmu notifiers from working on the
- * no_private_counters list. */
- down_write(&context->umem_rwsem);
-
- /* Read the notifier_count again, with the umem_rwsem
- * semaphore taken for write. */
- if (!atomic_read(&context->notifier_count)) {
- list_for_each_entry_safe(odp_data, next,
- &context->no_private_counters,
- no_private_counters) {
- mutex_lock(&odp_data->umem_mutex);
- odp_data->mn_counters_active = true;
- list_del(&odp_data->no_private_counters);
- complete_all(&odp_data->notifier_completion);
- mutex_unlock(&odp_data->umem_mutex);
- }
- }
+ struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t page_size = 1UL << umem_odp->page_shift;
+ struct hmm_dma_map *map;
+ unsigned long start;
+ unsigned long end;
+ size_t nr_entries;
+ int ret = 0;
- up_write(&context->umem_rwsem);
- }
-}
+ umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
-static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie) {
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
/*
- * Increase the number of notifiers running, to
- * prevent any further fault handling on this MR.
+ * The mmu notifier can be called within reclaim contexts and takes the
+ * umem_mutex. This is rare to trigger in testing, teach lockdep about
+ * it.
*/
- ib_umem_notifier_start_account(item);
- item->odp_data->dying = 1;
- /* Make sure that the fact the umem is dying is out before we release
- * all pending page faults. */
- smp_wmb();
- complete_all(&item->odp_data->notifier_completion);
- item->context->invalidate_range(item, ib_umem_start(item),
- ib_umem_end(item));
- return 0;
-}
-
-static void ib_umem_notifier_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
- ULLONG_MAX,
- ib_umem_notifier_release_trampoline,
- NULL);
- up_read(&context->umem_rwsem);
-}
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ mutex_lock(&umem_odp->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
+ fs_reclaim_release(GFP_KERNEL);
+ }
-static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie)
-{
- ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, start + PAGE_SIZE);
- ib_umem_notifier_end_account(item);
- return 0;
-}
+ nr_entries = (end - start) >> PAGE_SHIFT;
+ if (!(nr_entries * PAGE_SIZE / page_size))
+ return -EINVAL;
-static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
- address + PAGE_SIZE,
- invalidate_page_trampoline, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
-}
+ map = &umem_odp->map;
+ if (ib_uses_virt_dma(dev)) {
+ map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!map->pfn_list)
+ ret = -ENOMEM;
+ } else
+ ret = hmm_dma_map_alloc(dev->dma_device, map,
+ (end - start) >> PAGE_SHIFT,
+ 1 << umem_odp->page_shift);
+ if (ret)
+ return ret;
+
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_map;
-static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie)
-{
- ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, end);
return 0;
-}
-
-static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
- end,
- invalidate_range_start_trampoline, NULL);
- up_read(&context->umem_rwsem);
-}
-
-static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie)
-{
- ib_umem_notifier_end_account(item);
- return 0;
+out_free_map:
+ if (ib_uses_virt_dma(dev))
+ kvfree(map->pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, map);
+ return ret;
}
-static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+/**
+ * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
+ *
+ * Implicit ODP umems do not have a VA range and do not have any page lists.
+ * They exist only to hold the per_mm reference to help the driver create
+ * children umems.
+ *
+ * @device: IB device to create UMEM
+ * @access: ib_reg_mr access flags
+ */
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
+ int access)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
- end,
- invalidate_range_end_trampoline, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
+ struct ib_umem *umem;
+ struct ib_umem_odp *umem_odp;
+
+ if (access & IB_ACCESS_HUGETLB)
+ return ERR_PTR(-EINVAL);
+
+ umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
+ if (!umem_odp)
+ return ERR_PTR(-ENOMEM);
+ umem = &umem_odp->umem;
+ umem->ibdev = device;
+ umem->writable = ib_access_writable(access);
+ umem->owning_mm = current->mm;
+ umem_odp->page_shift = PAGE_SHIFT;
+
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ ib_init_umem_implicit_odp(umem_odp);
+ return umem_odp;
}
+EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
-static struct mmu_notifier_ops ib_umem_notifiers = {
- .release = ib_umem_notifier_release,
- .invalidate_page = ib_umem_notifier_invalidate_page,
- .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
- .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
-};
-
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
+/**
+ * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
+ * parent ODP umem
+ *
+ * @root: The parent umem enclosing the child. This must be allocated using
+ * ib_alloc_implicit_odp_umem()
+ * @addr: The starting userspace VA
+ * @size: The length of the userspace VA
+ * @ops: MMU interval ops, currently only @invalidate
+ */
+struct ib_umem_odp *
+ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
+ size_t size,
+ const struct mmu_interval_notifier_ops *ops)
{
- int ret_val;
- struct pid *our_pid;
- struct mm_struct *mm = get_task_mm(current);
-
- if (!mm)
- return -EINVAL;
-
- /* Prevent creating ODP MRs in child processes */
- rcu_read_lock();
- our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- put_pid(our_pid);
- if (context->tgid != our_pid) {
- ret_val = -EINVAL;
- goto out_mm;
- }
-
- umem->hugetlb = 0;
- umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
- if (!umem->odp_data) {
- ret_val = -ENOMEM;
- goto out_mm;
- }
- umem->odp_data->umem = umem;
-
- mutex_init(&umem->odp_data->umem_mutex);
-
- init_completion(&umem->odp_data->notifier_completion);
-
- umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
- sizeof(*umem->odp_data->page_list));
- if (!umem->odp_data->page_list) {
- ret_val = -ENOMEM;
- goto out_odp_data;
- }
-
- umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
- sizeof(*umem->odp_data->dma_list));
- if (!umem->odp_data->dma_list) {
- ret_val = -ENOMEM;
- goto out_page_list;
- }
-
/*
- * When using MMU notifiers, we will get a
- * notification before the "current" task (and MM) is
- * destroyed. We use the umem_rwsem semaphore to synchronize.
+ * Caller must ensure that root cannot be freed during the call to
+ * ib_alloc_odp_umem.
*/
- down_write(&context->umem_rwsem);
- context->odp_mrs_count++;
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_insert(&umem->odp_data->interval_tree,
- &context->umem_tree);
- if (likely(!atomic_read(&context->notifier_count)) ||
- context->odp_mrs_count == 1)
- umem->odp_data->mn_counters_active = true;
- else
- list_add(&umem->odp_data->no_private_counters,
- &context->no_private_counters);
- downgrade_write(&context->umem_rwsem);
-
- if (context->odp_mrs_count == 1) {
- /*
- * Note that at this point, no MMU notifier is running
- * for this context!
- */
- atomic_set(&context->notifier_count, 0);
- INIT_HLIST_NODE(&context->mn.hlist);
- context->mn.ops = &ib_umem_notifiers;
- /*
- * Lock-dep detects a false positive for mmap_sem vs.
- * umem_rwsem, due to not grasping downgrade_write correctly.
- */
- lockdep_off();
- ret_val = mmu_notifier_register(&context->mn, mm);
- lockdep_on();
- if (ret_val) {
- pr_err("Failed to register mmu_notifier %d\n", ret_val);
- ret_val = -EBUSY;
- goto out_mutex;
- }
- }
-
- up_read(&context->umem_rwsem);
+ struct ib_umem_odp *odp_data;
+ struct ib_umem *umem;
+ int ret;
+
+ if (WARN_ON(!root->is_implicit_odp))
+ return ERR_PTR(-EINVAL);
+
+ odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
+ if (!odp_data)
+ return ERR_PTR(-ENOMEM);
+ umem = &odp_data->umem;
+ umem->ibdev = root->umem.ibdev;
+ umem->length = size;
+ umem->address = addr;
+ umem->writable = root->umem.writable;
+ umem->owning_mm = root->umem.owning_mm;
+ odp_data->page_shift = PAGE_SHIFT;
+ odp_data->notifier.ops = ops;
/*
- * Note that doing an mmput can cause a notifier for the relevant mm.
- * If the notifier is called while we hold the umem_rwsem, this will
- * cause a deadlock. Therefore, we release the reference only after we
- * released the semaphore.
+ * A mmget must be held when registering a notifier, the owming_mm only
+ * has a mm_grab at this point.
*/
- mmput(mm);
- return 0;
+ if (!mmget_not_zero(umem->owning_mm)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
-out_mutex:
- up_read(&context->umem_rwsem);
- vfree(umem->odp_data->dma_list);
-out_page_list:
- vfree(umem->odp_data->page_list);
-out_odp_data:
- kfree(umem->odp_data);
-out_mm:
- mmput(mm);
- return ret_val;
+ odp_data->tgid = get_pid(root->tgid);
+ ret = ib_init_umem_odp(odp_data, ops);
+ if (ret)
+ goto out_tgid;
+ mmput(umem->owning_mm);
+ return odp_data;
+
+out_tgid:
+ put_pid(odp_data->tgid);
+ mmput(umem->owning_mm);
+out_free:
+ kfree(odp_data);
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL(ib_umem_odp_alloc_child);
-void ib_umem_odp_release(struct ib_umem *umem)
+/**
+ * ib_umem_odp_get - Create a umem_odp for a userspace va
+ *
+ * @device: IB device struct to get UMEM
+ * @addr: userspace virtual address to start at
+ * @size: length of region to pin
+ * @access: IB_ACCESS_xxx flags for memory being pinned
+ * @ops: MMU interval ops, currently only @invalidate
+ *
+ * The driver should use when the access flags indicate ODP memory. It avoids
+ * pinning, instead, stores the mm for future page fault handling in
+ * conjunction with MMU notifiers.
+ */
+struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
+ unsigned long addr, size_t size, int access,
+ const struct mmu_interval_notifier_ops *ops)
{
- struct ib_ucontext *context = umem->context;
+ struct ib_umem_odp *umem_odp;
+ int ret;
+
+ if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
+ return ERR_PTR(-EINVAL);
+
+ umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
+ if (!umem_odp)
+ return ERR_PTR(-ENOMEM);
+
+ umem_odp->umem.ibdev = device;
+ umem_odp->umem.length = size;
+ umem_odp->umem.address = addr;
+ umem_odp->umem.writable = ib_access_writable(access);
+ umem_odp->umem.owning_mm = current->mm;
+ umem_odp->notifier.ops = ops;
+
+ umem_odp->page_shift = PAGE_SHIFT;
+#ifdef CONFIG_HUGETLB_PAGE
+ if (access & IB_ACCESS_HUGETLB)
+ umem_odp->page_shift = HPAGE_SHIFT;
+#endif
+
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ ret = ib_init_umem_odp(umem_odp, ops);
+ if (ret)
+ goto err_put_pid;
+ return umem_odp;
+
+err_put_pid:
+ put_pid(umem_odp->tgid);
+ kfree(umem_odp);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(ib_umem_odp_get);
+
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
+{
+ struct ib_device *dev = umem_odp->umem.ibdev;
/*
* Ensure that no more pages are mapped in the umem.
@@ -357,156 +281,37 @@ void ib_umem_odp_release(struct ib_umem *umem)
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
- ib_umem_end(umem));
-
- down_write(&context->umem_rwsem);
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_remove(&umem->odp_data->interval_tree,
- &context->umem_tree);
- context->odp_mrs_count--;
- if (!umem->odp_data->mn_counters_active) {
- list_del(&umem->odp_data->no_private_counters);
- complete_all(&umem->odp_data->notifier_completion);
- }
-
- /*
- * Downgrade the lock to a read lock. This ensures that the notifiers
- * (who lock the mutex for reading) will be able to finish, and we
- * will be able to enventually obtain the mmu notifiers SRCU. Note
- * that since we are doing it atomically, no other user could register
- * and unregister while we do the check.
- */
- downgrade_write(&context->umem_rwsem);
- if (!context->odp_mrs_count) {
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(context->tgid,
- PIDTYPE_PID);
- if (owning_process == NULL)
- /*
- * The process is already dead, notifier were removed
- * already.
- */
- goto out;
-
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL)
- /*
- * The process' mm is already dead, notifier were
- * removed already.
- */
- goto out_put_task;
- mmu_notifier_unregister(&context->mn, owning_mm);
-
- mmput(owning_mm);
-
-out_put_task:
- put_task_struct(owning_process);
- }
-out:
- up_read(&context->umem_rwsem);
-
- vfree(umem->odp_data->dma_list);
- vfree(umem->odp_data->page_list);
- kfree(umem->odp_data);
- kfree(umem);
+ mutex_lock(&umem_odp->umem_mutex);
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
+ if (ib_uses_virt_dma(dev))
+ kvfree(umem_odp->map.pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
-/*
- * Map for DMA and insert a single page into the on-demand paging page tables.
- *
- * @umem: the umem to insert the page to.
- * @page_index: index in the umem to add the page to.
- * @page: the page struct to map and add.
- * @access_mask: access permissions needed for this page.
- * @current_seq: sequence number for synchronization with invalidations.
- * the sequence number is taken from
- * umem->odp_data->notifiers_seq.
- *
- * The function returns -EFAULT if the DMA mapping operation fails. It returns
- * -EAGAIN if a concurrent invalidation prevents us from updating the page.
- *
- * The page is released via put_page even if the operation failed. For
- * on-demand pinning, the page is released whenever it isn't stored in the
- * umem.
- */
-static int ib_umem_odp_map_dma_single_page(
- struct ib_umem *umem,
- int page_index,
- u64 base_virt_addr,
- struct page *page,
- u64 access_mask,
- unsigned long current_seq)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_device *dev = umem->context->device;
- dma_addr_t dma_addr;
- int stored_page = 0;
- int remove_existing_mapping = 0;
- int ret = 0;
-
- /*
- * Note: we avoid writing if seq is different from the initial seq, to
- * handle case of a racing notifier. This check also allows us to bail
- * early if we have a notifier running in parallel with us.
- */
- if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
- ret = -EAGAIN;
- goto out;
- }
- if (!(umem->odp_data->dma_list[page_index])) {
- dma_addr = ib_dma_map_page(dev,
- page,
- 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(dev, dma_addr)) {
- ret = -EFAULT;
- goto out;
- }
- umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
- umem->odp_data->page_list[page_index] = page;
- stored_page = 1;
- } else if (umem->odp_data->page_list[page_index] == page) {
- umem->odp_data->dma_list[page_index] |= access_mask;
- } else {
- pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem->odp_data->page_list[page_index], page);
- /* Better remove the mapping now, to prevent any further
- * damage. */
- remove_existing_mapping = 1;
- }
-
-out:
- /* On Demand Paging - avoid pinning the page */
- if (umem->context->invalidate_range || !stored_page)
- put_page(page);
-
- if (remove_existing_mapping && umem->context->invalidate_range) {
- invalidate_page_trampoline(
- umem,
- base_virt_addr + (page_index * PAGE_SIZE),
- base_virt_addr + ((page_index+1)*PAGE_SIZE),
- NULL);
- ret = -EAGAIN;
- }
+ if (!umem_odp->is_implicit_odp)
+ ib_umem_odp_free(umem_odp);
- return ret;
+ put_pid(umem_odp->tgid);
+ kfree(umem_odp);
}
+EXPORT_SYMBOL(ib_umem_odp_release);
/**
- * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
+ * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
*
- * Pins the range of pages passed in the argument, and maps them to
- * DMA addresses. The DMA addresses of the mapped pages is updated in
- * umem->odp_data->dma_list.
+ * Maps the range passed in the argument to DMA addresses.
+ * Upon success the ODP MR will be locked to let caller complete its device
+ * page table update.
*
* Returns the number of pages mapped in success, negative error code
* for failure.
- * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
- * the function from completing its task.
- *
- * @umem: the umem to map and pin
+ * @umem_odp: the umem to map and pin
* @user_virt: the address from which we need to map.
* @bcnt: the minimal number of bytes to pin and map. The mapping might be
* bigger due to alignment, and may also be smaller in case of an error
@@ -514,156 +319,151 @@ out:
* the return value.
* @access_mask: bit mask of the requested access permissions for the given
* range.
- * @current_seq: the MMU notifiers sequance value for synchronization with
- * invalidations. the sequance number is read from
- * umem->odp_data->notifiers_seq before calling this function
+ * @fault: is faulting required for the given range
*/
-int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
- u64 access_mask, unsigned long current_seq)
+int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
+ u64 bcnt, u64 access_mask, bool fault)
+ __acquires(&umem_odp->umem_mutex)
{
struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
- struct page **local_page_list = NULL;
- u64 off;
- int j, k, ret = 0, start_idx, npages = 0;
- u64 base_virt_addr;
-
- if (access_mask == 0)
- return -EINVAL;
-
- if (user_virt < ib_umem_start(umem) ||
- user_virt + bcnt > ib_umem_end(umem))
+ struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
+ int pfn_index, dma_index, ret = 0, start_idx;
+ unsigned int page_shift, hmm_order, pfn_start_idx;
+ unsigned long num_pfns, current_seq;
+ struct hmm_range range = {};
+ unsigned long timeout;
+
+ if (user_virt < ib_umem_start(umem_odp) ||
+ user_virt + bcnt > ib_umem_end(umem_odp))
return -EFAULT;
- local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
- if (!local_page_list)
- return -ENOMEM;
-
- off = user_virt & (~PAGE_MASK);
- user_virt = user_virt & PAGE_MASK;
- base_virt_addr = user_virt;
- bcnt += off; /* Charge for the first page offset as well. */
+ page_shift = umem_odp->page_shift;
- owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
- if (owning_process == NULL) {
+ /*
+ * owning_process is allowed to be NULL, this means somehow the mm is
+ * existing beyond the lifetime of the originating process.. Presumably
+ * mmget_not_zero will fail in this case.
+ */
+ owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
+ if (!owning_process || !mmget_not_zero(owning_mm)) {
ret = -EINVAL;
- goto out_no_task;
+ goto out_put_task;
}
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL) {
- ret = -EINVAL;
- goto out_put_task;
+ range.notifier = &umem_odp->notifier;
+ range.start = ALIGN_DOWN(user_virt, 1UL << page_shift);
+ range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
+ pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
+ num_pfns = (range.end - range.start) >> PAGE_SHIFT;
+ if (fault) {
+ range.default_flags = HMM_PFN_REQ_FAULT;
+
+ if (access_mask & HMM_PFN_WRITE)
+ range.default_flags |= HMM_PFN_REQ_WRITE;
}
- start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
- k = start_idx;
+ range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+
+retry:
+ current_seq = range.notifier_seq =
+ mmu_interval_read_begin(&umem_odp->notifier);
+
+ mmap_read_lock(owning_mm);
+ ret = hmm_range_fault(&range);
+ mmap_read_unlock(owning_mm);
+ if (unlikely(ret)) {
+ if (ret == -EBUSY && !time_after(jiffies, timeout))
+ goto retry;
+ goto out_put_mm;
+ }
- while (bcnt > 0) {
- const size_t gup_num_pages =
- min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
- PAGE_SIZE / sizeof(struct page *));
+ start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift;
+ dma_index = start_idx;
+
+ mutex_lock(&umem_odp->umem_mutex);
+ if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ goto retry;
+ }
+
+ for (pfn_index = 0; pfn_index < num_pfns;
+ pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
- down_read(&owning_mm->mmap_sem);
/*
- * Note: this might result in redundent page getting. We can
- * avoid this by checking dma_list to be 0 before calling
- * get_user_pages. However, this make the code much more
- * complex (and doesn't gain us much performance in most use
- * cases).
+ * Since we asked for hmm_range_fault() to populate
+ * pages it shouldn't return an error entry on success.
*/
- npages = get_user_pages(owning_process, owning_mm, user_virt,
- gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT, 0,
- local_page_list, NULL);
- up_read(&owning_mm->mmap_sem);
+ WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
+ WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
+ if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID))
+ continue;
- if (npages < 0)
- break;
+ if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED)
+ continue;
- bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
- user_virt += npages << PAGE_SHIFT;
- mutex_lock(&umem->odp_data->umem_mutex);
- for (j = 0; j < npages; ++j) {
- ret = ib_umem_odp_map_dma_single_page(
- umem, k, base_virt_addr, local_page_list[j],
- access_mask, current_seq);
- if (ret < 0)
- break;
- k++;
- }
- mutex_unlock(&umem->odp_data->umem_mutex);
-
- if (ret < 0) {
- /* Release left over pages when handling errors. */
- for (++j; j < npages; ++j)
- put_page(local_page_list[j]);
+ hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
+ /* If a hugepage was detected and ODP wasn't set for, the umem
+ * page_shift will be used, the opposite case is an error.
+ */
+ if (hmm_order + PAGE_SHIFT < page_shift) {
+ ret = -EINVAL;
+ ibdev_dbg(umem_odp->umem.ibdev,
+ "%s: un-expected hmm_order %u, page_shift %u\n",
+ __func__, hmm_order, page_shift);
break;
}
}
+ /* upon success lock should stay on hold for the callee */
+ if (!ret)
+ ret = dma_index - start_idx;
+ else
+ mutex_unlock(&umem_odp->umem_mutex);
- if (ret >= 0) {
- if (npages < 0 && k == start_idx)
- ret = npages;
- else
- ret = k - start_idx;
- }
-
- mmput(owning_mm);
+out_put_mm:
+ mmput_async(owning_mm);
out_put_task:
- put_task_struct(owning_process);
-out_no_task:
- free_page((unsigned long)local_page_list);
+ if (owning_process)
+ put_task_struct(owning_process);
return ret;
}
-EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
+EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
-void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
+void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
- int idx;
+ struct ib_device *dev = umem_odp->umem.ibdev;
u64 addr;
- struct ib_device *dev = umem->context->device;
-
- virt = max_t(u64, virt, ib_umem_start(umem));
- bound = min_t(u64, bound, ib_umem_end(umem));
- /* Note that during the run of this function, the
- * notifiers_count of the MR is > 0, preventing any racing
- * faults from completion. We might be racing with other
- * invalidations, so we must make sure we free each page only
- * once. */
- mutex_lock(&umem->odp_data->umem_mutex);
- for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
- idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
- if (umem->odp_data->page_list[idx]) {
- struct page *page = umem->odp_data->page_list[idx];
- dma_addr_t dma = umem->odp_data->dma_list[idx];
- dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
-
- WARN_ON(!dma_addr);
-
- ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT) {
- struct page *head_page = compound_head(page);
- /*
- * set_page_dirty prefers being called with
- * the page lock. However, MMU notifiers are
- * called sometimes with and sometimes without
- * the lock. We rely on the umem_mutex instead
- * to prevent other mmu notifiers from
- * continuing and allowing the page mapping to
- * be removed.
- */
- set_page_dirty(head_page);
- }
- /* on demand pinning support */
- if (!umem->context->invalidate_range)
- put_page(page);
- umem->odp_data->page_list[idx] = NULL;
- umem->odp_data->dma_list[idx] = 0;
+
+ lockdep_assert_held(&umem_odp->umem_mutex);
+
+ virt = max_t(u64, virt, ib_umem_start(umem_odp));
+ bound = min_t(u64, bound, ib_umem_end(umem_odp));
+ for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
+ u64 offset = addr - ib_umem_start(umem_odp);
+ size_t idx = offset >> umem_odp->page_shift;
+ unsigned long pfn = umem_odp->map.pfn_list[idx];
+
+ if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx))
+ goto clear;
+
+ if (pfn & HMM_PFN_WRITE) {
+ struct page *page = hmm_pfn_to_page(pfn);
+ struct page *head_page = compound_head(page);
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
}
+ umem_odp->npages--;
+clear:
+ umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c
deleted file mode 100644
index 727d788448f5..000000000000
--- a/drivers/infiniband/core/umem_rbtree.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interval_tree_generic.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <rdma/ib_umem_odp.h>
-
-/*
- * The ib_umem list keeps track of memory regions for which the HW
- * device request to receive notification when the related memory
- * mapping is changed.
- *
- * ib_umem_lock protects the list.
- */
-
-static inline u64 node_start(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_start(umem_odp->umem);
-}
-
-/* Note that the representation of the intervals in the interval tree
- * considers the ending point as contained in the interval, while the
- * function ib_umem_end returns the first address which is not contained
- * in the umem.
- */
-static inline u64 node_last(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_end(umem_odp->umem) - 1;
-}
-
-INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
- node_start, node_last, , rbt_ib_umem)
-
-/* @last is not a part of the interval. See comment for function
- * node_last.
- */
-int rbt_ib_umem_for_each_in_range(struct rb_root *root,
- u64 start, u64 last,
- umem_call_back cb,
- void *cookie)
-{
- int ret_val = 0;
- struct umem_odp_node *node;
- struct ib_umem_odp *umem;
-
- if (unlikely(start == last))
- return ret_val;
-
- for (node = rbt_ib_umem_iter_first(root, start, last - 1); node;
- node = rbt_ib_umem_iter_next(node, start, last - 1)) {
- umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem->umem, start, last, cookie) || ret_val;
- }
-
- return ret_val;
-}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 57f281f8d686..fd67fc9fe85a 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,22 +49,31 @@
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_user_mad.h>
+#include <rdma/rdma_netlink.h>
+
+#include "core_priv.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL");
+#define MAX_UMAD_RECV_LIST_SIZE 200000
+
enum {
- IB_UMAD_MAX_PORTS = 64,
+ IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32,
IB_UMAD_MAJOR = 231,
- IB_UMAD_MINOR_BASE = 0
+ IB_UMAD_MINOR_BASE = 0,
+ IB_UMAD_NUM_FIXED_MINOR = 64,
+ IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR,
+ IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR,
};
/*
@@ -83,10 +92,9 @@ enum {
struct ib_umad_port {
struct cdev cdev;
- struct device *dev;
-
+ struct device dev;
struct cdev sm_cdev;
- struct device *sm_dev;
+ struct device sm_dev;
struct semaphore sm_sem;
struct mutex file_mutex;
@@ -95,18 +103,19 @@ struct ib_umad_port {
struct ib_device *ib_dev;
struct ib_umad_device *umad_dev;
int dev_num;
- u8 port_num;
+ u32 port_num;
};
struct ib_umad_device {
- struct kobject kobj;
- struct ib_umad_port port[0];
+ struct kref kref;
+ struct ib_umad_port ports[];
};
struct ib_umad_file {
struct mutex mutex;
struct ib_umad_port *port;
struct list_head recv_list;
+ atomic_t recv_list_size;
struct list_head send_list;
struct list_head port_list;
spinlock_t send_lock;
@@ -125,32 +134,47 @@ struct ib_umad_packet {
struct ib_user_mad mad;
};
-static struct class *umad_class;
+struct ib_rmpp_mad_hdr {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+} __packed;
-static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+#define CREATE_TRACE_POINTS
+#include <trace/events/ib_umad.h>
-static DEFINE_SPINLOCK(port_lock);
-static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
+static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
+ IB_UMAD_NUM_FIXED_MINOR;
+static dev_t dynamic_umad_dev;
+static dev_t dynamic_issm_dev;
-static void ib_umad_add_one(struct ib_device *device);
+static DEFINE_IDA(umad_ida);
+
+static int ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
-static void ib_umad_release_dev(struct kobject *kobj)
+static void ib_umad_dev_free(struct kref *kref)
{
struct ib_umad_device *dev =
- container_of(kobj, struct ib_umad_device, kobj);
+ container_of(kref, struct ib_umad_device, kref);
kfree(dev);
}
-static struct kobj_type ib_umad_dev_ktype = {
- .release = ib_umad_release_dev,
-};
+static void ib_umad_dev_get(struct ib_umad_device *dev)
+{
+ kref_get(&dev->kref);
+}
+
+static void ib_umad_dev_put(struct ib_umad_device *dev)
+{
+ kref_put(&dev->kref, ib_umad_dev_free);
+}
static int hdr_size(struct ib_umad_file *file)
{
- return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
- sizeof (struct ib_user_mad_hdr_old);
+ return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) :
+ sizeof(struct ib_user_mad_hdr_old);
}
/* caller must hold file->mutex */
@@ -159,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
return file->agents_dead ? NULL : file->agent[id];
}
-static int queue_packet(struct ib_umad_file *file,
- struct ib_mad_agent *agent,
- struct ib_umad_packet *packet)
+static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
+ struct ib_umad_packet *packet, bool is_recv_mad)
{
int ret = 1;
mutex_lock(&file->mutex);
+ if (is_recv_mad &&
+ atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
+ goto unlock;
+
for (packet->mad.hdr.id = 0;
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
packet->mad.hdr.id++)
if (agent == __get_agent(file, packet->mad.hdr.id)) {
list_add_tail(&packet->list, &file->recv_list);
+ atomic_inc(&file->recv_list_size);
wake_up_interruptible(&file->recv_wait);
ret = 0;
break;
}
-
+unlock:
mutex_unlock(&file->mutex);
return ret;
@@ -197,19 +225,20 @@ static void send_handler(struct ib_mad_agent *agent,
struct ib_umad_packet *packet = send_wc->send_buf->context[0];
dequeue_send(file, packet);
- ib_destroy_ah(packet->msg->ah);
+ rdma_destroy_ah(packet->msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
ib_free_send_mad(packet->msg);
if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
packet->length = IB_MGMT_MAD_HDR;
packet->mad.hdr.status = ETIMEDOUT;
- if (!queue_packet(file, agent, packet))
+ if (!queue_packet(file, agent, packet, false))
return;
}
kfree(packet);
}
static void recv_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_buf *send_buf,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_umad_file *file = agent->context;
@@ -228,26 +257,41 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.status = 0;
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
- packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
+ /*
+ * On OPA devices it is okay to lose the upper 16 bits of LID as this
+ * information is obtained elsewhere. Mask off the upper 16 bits.
+ */
+ if (rdma_cap_opa_mad(agent->device, agent->port_num))
+ packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
+ mad_recv_wc->wc->slid);
+ else
+ packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
if (packet->mad.hdr.grh_present) {
- struct ib_ah_attr ah_attr;
-
- ib_init_ah_from_wc(agent->device, agent->port_num,
- mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
- &ah_attr);
-
- packet->mad.hdr.gid_index = ah_attr.grh.sgid_index;
- packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit;
- packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class;
- memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16);
- packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label);
+ struct rdma_ah_attr ah_attr;
+ const struct ib_global_route *grh;
+ int ret;
+
+ ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num,
+ mad_recv_wc->wc,
+ mad_recv_wc->recv_buf.grh,
+ &ah_attr);
+ if (ret)
+ goto err2;
+
+ grh = rdma_ah_read_grh(&ah_attr);
+ packet->mad.hdr.gid_index = grh->sgid_index;
+ packet->mad.hdr.hop_limit = grh->hop_limit;
+ packet->mad.hdr.traffic_class = grh->traffic_class;
+ memcpy(packet->mad.hdr.gid, &grh->dgid, 16);
+ packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label);
+ rdma_destroy_ah_attr(&ah_attr);
}
- if (queue_packet(file, agent, packet))
+ if (queue_packet(file, agent, packet, true))
goto err2;
return;
@@ -307,6 +351,9 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
return -EFAULT;
}
}
+
+ trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr);
+
return hdr_size(file) + packet->length;
}
@@ -326,6 +373,9 @@ static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
if (copy_to_user(buf, packet->mad.data, packet->length))
return -EFAULT;
+ trace_ib_umad_read_send(file, &packet->mad.hdr,
+ (struct ib_mad_hdr *)&packet->mad.data);
+
return size;
}
@@ -341,6 +391,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
while (list_empty(&file->recv_list)) {
mutex_unlock(&file->mutex);
@@ -354,8 +409,14 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
}
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
list_del(&packet->list);
+ atomic_dec(&file->recv_list_size);
mutex_unlock(&file->mutex);
@@ -368,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
/* Requeue packet */
mutex_lock(&file->mutex);
list_add(&packet->list, &file->recv_list);
+ atomic_inc(&file->recv_list_size);
mutex_unlock(&file->mutex);
} else {
if (packet->recv_wc)
@@ -446,11 +508,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_umad_file *file = filp->private_data;
+ struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
struct ib_umad_packet *packet;
struct ib_mad_agent *agent;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
struct ib_ah *ah;
- struct ib_rmpp_mad *rmpp_mad;
__be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active;
u8 base_version;
@@ -458,7 +520,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
return -EINVAL;
- packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
+ packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
if (!packet)
return -ENOMEM;
@@ -481,39 +543,44 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
mutex_lock(&file->mutex);
+ trace_ib_umad_write(file, &packet->mad.hdr,
+ (struct ib_mad_hdr *)&packet->mad.data);
+
agent = __get_agent(file, packet->mad.hdr.id);
if (!agent) {
- ret = -EINVAL;
+ ret = -EIO;
goto err_up;
}
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid);
- ah_attr.sl = packet->mad.hdr.sl;
- ah_attr.src_path_bits = packet->mad.hdr.path_bits;
- ah_attr.port_num = file->port->port_num;
+ ah_attr.type = rdma_ah_find_type(agent->device,
+ file->port->port_num);
+ rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
+ rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
+ rdma_ah_set_path_bits(&ah_attr, packet->mad.hdr.path_bits);
+ rdma_ah_set_port_num(&ah_attr, file->port->port_num);
if (packet->mad.hdr.grh_present) {
- ah_attr.ah_flags = IB_AH_GRH;
- memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
- ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
- ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
- ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
- ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
+ rdma_ah_set_grh(&ah_attr, NULL,
+ be32_to_cpu(packet->mad.hdr.flow_label),
+ packet->mad.hdr.gid_index,
+ packet->mad.hdr.hop_limit,
+ packet->mad.hdr.traffic_class);
+ rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid);
}
- ah = ib_create_ah(agent->qp->pd, &ah_attr);
+ ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_up;
}
- rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
- hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
+ rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
+ hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);
- if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
+ if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
&& ib_mad_kernel_rmpp_agent(agent)) {
copy_offset = IB_MGMT_RMPP_HDR;
- rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
+ rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
copy_offset = IB_MGMT_MAD_HDR;
@@ -562,12 +629,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
(be64_to_cpup(tid) & 0xffffffff));
- rmpp_mad->mad_hdr.tid = *tid;
+ rmpp_mad_hdr->mad_hdr.tid = *tid;
}
if (!ib_mad_kernel_rmpp_agent(agent)
- && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
- && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
+ && ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
+ && (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
spin_lock_irq(&file->send_lock);
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
@@ -595,7 +662,7 @@ err_send:
err_msg:
ib_free_send_mad(packet->msg);
err_ah:
- ib_destroy_ah(ah);
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
err_up:
mutex_unlock(&file->mutex);
err:
@@ -603,17 +670,21 @@ err:
return ret;
}
-static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ib_umad_file *file = filp->private_data;
/* we will always be able to post a MAD send */
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM;
+ mutex_lock(&file->mutex);
poll_wait(filp, &file->recv_wait, wait);
if (!list_empty(&file->recv_list))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (file->agents_dead)
+ mask = EPOLLERR;
+ mutex_unlock(&file->mutex);
return mask;
}
@@ -631,8 +702,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
- dev_notice(file->port->dev,
- "ib_umad_reg_agent: invalid device\n");
+ dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
ret = -EPIPE;
goto out;
}
@@ -643,8 +713,8 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
- dev_notice(file->port->dev,
- "ib_umad_reg_agent: invalid QPN %d specified\n",
+ dev_notice(&file->port->dev,
+ "%s: invalid QPN %u specified\n", __func__,
ureq.qpn);
ret = -EINVAL;
goto out;
@@ -654,9 +724,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (!__get_agent(file, agent_id))
goto found;
- dev_notice(file->port->dev,
- "ib_umad_reg_agent: Max Agents (%u) reached\n",
+ dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
IB_UMAD_MAX_AGENTS);
+
ret = -ENOMEM;
goto out;
@@ -699,11 +769,11 @@ found:
if (!file->already_used) {
file->already_used = 1;
if (!file->use_pkey_index) {
- dev_warn(file->port->dev,
+ dev_warn(&file->port->dev,
"process %s did not enable P_Key index support.\n",
current->comm);
- dev_warn(file->port->dev,
- " Documentation/infiniband/user_mad.txt has info on the new ABI.\n");
+ dev_warn(&file->port->dev,
+ " Documentation/infiniband/user_mad.rst has info on the new ABI.\n");
}
}
@@ -733,8 +803,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
- dev_notice(file->port->dev,
- "ib_umad_reg_agent2: invalid device\n");
+ dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
ret = -EPIPE;
goto out;
}
@@ -745,17 +814,16 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
- dev_notice(file->port->dev,
- "ib_umad_reg_agent2: invalid QPN %d specified\n",
- ureq.qpn);
+ dev_notice(&file->port->dev, "%s: invalid QPN %u specified\n",
+ __func__, ureq.qpn);
ret = -EINVAL;
goto out;
}
if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
- dev_notice(file->port->dev,
- "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
- ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
+ dev_notice(&file->port->dev,
+ "%s failed: invalid registration flags specified 0x%x; supported 0x%x\n",
+ __func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
ret = -EINVAL;
if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
@@ -770,8 +838,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
if (!__get_agent(file, agent_id))
goto found;
- dev_notice(file->port->dev,
- "ib_umad_reg_agent2: Max Agents (%u) reached\n",
+ dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
IB_UMAD_MAX_AGENTS);
ret = -ENOMEM;
goto out;
@@ -782,8 +849,8 @@ found:
req.mgmt_class = ureq.mgmt_class;
req.mgmt_class_version = ureq.mgmt_class_version;
if (ureq.oui & 0xff000000) {
- dev_notice(file->port->dev,
- "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
+ dev_notice(&file->port->dev,
+ "%s failed: oui invalid 0x%08x\n", __func__,
ureq.oui);
ret = -EINVAL;
goto out;
@@ -842,11 +909,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
if (get_user(id, arg))
return -EFAULT;
+ if (id >= IB_UMAD_MAX_AGENTS)
+ return -EINVAL;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+ if (!__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
@@ -928,19 +998,27 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret = -ENXIO;
+ int ret = 0;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
mutex_lock(&port->file_mutex);
- if (!port->ib_dev)
+ if (!port->ib_dev) {
+ ret = -ENXIO;
goto out;
+ }
- ret = -ENOMEM;
- file = kzalloc(sizeof *file, GFP_KERNEL);
- if (!file)
+ if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
+ ret = -EPERM;
goto out;
+ }
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file) {
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_init(&file->mutex);
spin_lock_init(&file->send_lock);
@@ -953,15 +1031,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
- ret = nonseekable_open(inode, filp);
- if (ret) {
- list_del(&file->port_list);
- kfree(file);
- goto out;
- }
-
- kobject_get(&port->umad_dev->kobj);
-
+ stream_open(inode, filp);
out:
mutex_unlock(&port->file_mutex);
return ret;
@@ -970,7 +1040,6 @@ out:
static int ib_umad_close(struct inode *inode, struct file *filp)
{
struct ib_umad_file *file = filp->private_data;
- struct ib_umad_device *dev = file->port->umad_dev;
struct ib_umad_packet *packet, *tmp;
int already_dead;
int i;
@@ -997,10 +1066,8 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
ib_unregister_mad_agent(file->agent[i]);
mutex_unlock(&file->port->file_mutex);
-
+ mutex_destroy(&file->mutex);
kfree(file);
- kobject_put(&dev->kobj);
-
return 0;
}
@@ -1015,7 +1082,6 @@ static const struct file_operations umad_fops = {
#endif
.open = ib_umad_open,
.release = ib_umad_close,
- .llseek = no_llseek,
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -1040,24 +1106,20 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
}
}
+ if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
+ ret = -EPERM;
+ goto err_up_sem;
+ }
+
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
if (ret)
goto err_up_sem;
filp->private_data = port;
- ret = nonseekable_open(inode, filp);
- if (ret)
- goto err_clr_sm_cap;
-
- kobject_get(&port->umad_dev->kobj);
-
+ nonseekable_open(inode, filp);
return 0;
-err_clr_sm_cap:
- swap(props.set_port_cap_mask, props.clr_port_cap_mask);
- ib_modify_port(port->ib_dev, port->port_num, 0, &props);
-
err_up_sem:
up(&port->sm_sem);
@@ -1080,8 +1142,6 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
up(&port->sm_sem);
- kobject_put(&port->umad_dev->kobj);
-
return ret;
}
@@ -1089,16 +1149,63 @@ static const struct file_operations umad_sm_fops = {
.owner = THIS_MODULE,
.open = ib_umad_sm_open,
.release = ib_umad_sm_close,
- .llseek = no_llseek,
};
+static struct ib_umad_port *get_port(struct ib_device *ibdev,
+ struct ib_umad_device *umad_dev,
+ u32 port)
+{
+ if (!umad_dev)
+ return ERR_PTR(-EOPNOTSUPP);
+ if (!rdma_is_port_valid(ibdev, port))
+ return ERR_PTR(-EINVAL);
+ if (!rdma_cap_ib_mad(ibdev, port))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return &umad_dev->ports[port - rdma_start_port(ibdev)];
+}
+
+static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
+ struct ib_client_nl_info *res)
+{
+ struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
+
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ res->abi = IB_USER_MAD_ABI_VERSION;
+ res->cdev = &port->dev;
+ return 0;
+}
+
static struct ib_client umad_client = {
.name = "umad",
.add = ib_umad_add_one,
- .remove = ib_umad_remove_one
+ .remove = ib_umad_remove_one,
+ .get_nl_info = ib_umad_get_nl_info,
+};
+MODULE_ALIAS_RDMA_CLIENT("umad");
+
+static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
+ struct ib_client_nl_info *res)
+{
+ struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
+
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ res->abi = IB_USER_MAD_ABI_VERSION;
+ res->cdev = &port->sm_dev;
+ return 0;
+}
+
+static struct ib_client issm_client = {
+ .name = "issm",
+ .get_nl_info = ib_issm_get_nl_info,
};
+MODULE_ALIAS_RDMA_CLIENT("issm");
-static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
+static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ib_umad_port *port = dev_get_drvdata(dev);
@@ -1106,11 +1213,11 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
if (!port)
return -ENODEV;
- return sprintf(buf, "%s\n", port->ib_dev->name);
+ return sysfs_emit(buf, "%s\n", dev_name(&port->ib_dev->dev));
}
-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static DEVICE_ATTR_RO(ibdev);
-static ssize_t show_port(struct device *dev, struct device_attribute *attr,
+static ssize_t port_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ib_umad_port *port = dev_get_drvdata(dev);
@@ -1118,34 +1225,60 @@ static ssize_t show_port(struct device *dev, struct device_attribute *attr,
if (!port)
return -ENODEV;
- return sprintf(buf, "%d\n", port->port_num);
+ return sysfs_emit(buf, "%d\n", port->port_num);
}
-static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
+static DEVICE_ATTR_RO(port);
-static CLASS_ATTR_STRING(abi_version, S_IRUGO,
- __stringify(IB_USER_MAD_ABI_VERSION));
+static struct attribute *umad_class_dev_attrs[] = {
+ &dev_attr_ibdev.attr,
+ &dev_attr_port.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(umad_class_dev);
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
-static int find_overflow_devnum(struct ib_device *device)
+static char *umad_devnode(const struct device *dev, umode_t *mode)
{
- int ret;
+ return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
- "infiniband_mad");
- if (ret) {
- dev_err(&device->dev,
- "couldn't register dynamic device number\n");
- return ret;
- }
- }
+static ssize_t abi_version_show(const struct class *class,
+ const struct class_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
+}
+static CLASS_ATTR_RO(abi_version);
- ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
- if (ret >= IB_UMAD_MAX_PORTS)
- return -1;
+static struct attribute *umad_class_attrs[] = {
+ &class_attr_abi_version.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(umad_class);
- return ret;
+static struct class umad_class = {
+ .name = "infiniband_mad",
+ .devnode = umad_devnode,
+ .class_groups = umad_class_groups,
+ .dev_groups = umad_class_dev_groups,
+};
+
+static void ib_umad_release_port(struct device *device)
+{
+ struct ib_umad_port *port = dev_get_drvdata(device);
+ struct ib_umad_device *umad_dev = port->umad_dev;
+
+ ib_umad_dev_put(umad_dev);
+}
+
+static void ib_umad_init_port_dev(struct device *dev,
+ struct ib_umad_port *port,
+ const struct ib_device *device)
+{
+ device_initialize(dev);
+ ib_umad_dev_get(port->umad_dev);
+ dev->class = &umad_class;
+ dev->parent = device->dev.parent;
+ dev_set_drvdata(dev, port);
+ dev->release = ib_umad_release_port;
}
static int ib_umad_init_port(struct ib_device *device, int port_num,
@@ -1153,112 +1286,85 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_port *port)
{
int devnum;
- dev_t base;
-
- spin_lock(&port_lock);
- devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS) {
- spin_unlock(&port_lock);
- devnum = find_overflow_devnum(device);
- if (devnum < 0)
- return -1;
-
- spin_lock(&port_lock);
- port->dev_num = devnum + IB_UMAD_MAX_PORTS;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
+ dev_t base_umad;
+ dev_t base_issm;
+ int ret;
+
+ devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL);
+ if (devnum < 0)
+ return -1;
+ port->dev_num = devnum;
+ if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
+ base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
+ base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
} else {
- port->dev_num = devnum;
- base = devnum + base_dev;
- set_bit(devnum, dev_map);
+ base_umad = devnum + base_umad_dev;
+ base_issm = devnum + base_issm_dev;
}
- spin_unlock(&port_lock);
port->ib_dev = device;
+ port->umad_dev = umad_dev;
port->port_num = port_num;
sema_init(&port->sm_sem, 1);
mutex_init(&port->file_mutex);
INIT_LIST_HEAD(&port->file_list);
+ ib_umad_init_port_dev(&port->dev, port, device);
+ port->dev.devt = base_umad;
+ dev_set_name(&port->dev, "umad%d", port->dev_num);
cdev_init(&port->cdev, &umad_fops);
port->cdev.owner = THIS_MODULE;
- port->cdev.kobj.parent = &umad_dev->kobj;
- kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
- if (cdev_add(&port->cdev, base, 1))
- goto err_cdev;
- port->dev = device_create(umad_class, device->dma_device,
- port->cdev.dev, port,
- "umad%d", port->dev_num);
- if (IS_ERR(port->dev))
+ ret = cdev_device_add(&port->cdev, &port->dev);
+ if (ret)
goto err_cdev;
- if (device_create_file(port->dev, &dev_attr_ibdev))
- goto err_dev;
- if (device_create_file(port->dev, &dev_attr_port))
- goto err_dev;
-
- base += IB_UMAD_MAX_PORTS;
- cdev_init(&port->sm_cdev, &umad_sm_fops);
- port->sm_cdev.owner = THIS_MODULE;
- port->sm_cdev.kobj.parent = &umad_dev->kobj;
- kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
- if (cdev_add(&port->sm_cdev, base, 1))
- goto err_sm_cdev;
-
- port->sm_dev = device_create(umad_class, device->dma_device,
- port->sm_cdev.dev, port,
- "issm%d", port->dev_num);
- if (IS_ERR(port->sm_dev))
- goto err_sm_cdev;
-
- if (device_create_file(port->sm_dev, &dev_attr_ibdev))
- goto err_sm_dev;
- if (device_create_file(port->sm_dev, &dev_attr_port))
- goto err_sm_dev;
-
- return 0;
+ if (rdma_cap_ib_smi(device, port_num)) {
+ ib_umad_init_port_dev(&port->sm_dev, port, device);
+ port->sm_dev.devt = base_issm;
+ dev_set_name(&port->sm_dev, "issm%d", port->dev_num);
+ cdev_init(&port->sm_cdev, &umad_sm_fops);
+ port->sm_cdev.owner = THIS_MODULE;
-err_sm_dev:
- device_destroy(umad_class, port->sm_cdev.dev);
+ ret = cdev_device_add(&port->sm_cdev, &port->sm_dev);
+ if (ret)
+ goto err_dev;
+ }
-err_sm_cdev:
- cdev_del(&port->sm_cdev);
+ return 0;
err_dev:
- device_destroy(umad_class, port->cdev.dev);
-
+ put_device(&port->sm_dev);
+ cdev_device_del(&port->cdev, &port->dev);
err_cdev:
- cdev_del(&port->cdev);
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
-
- return -1;
+ put_device(&port->dev);
+ ida_free(&umad_ida, devnum);
+ return ret;
}
static void ib_umad_kill_port(struct ib_umad_port *port)
{
struct ib_umad_file *file;
+ bool has_smi = false;
int id;
- dev_set_drvdata(port->dev, NULL);
- dev_set_drvdata(port->sm_dev, NULL);
-
- device_destroy(umad_class, port->cdev.dev);
- device_destroy(umad_class, port->sm_cdev.dev);
-
- cdev_del(&port->cdev);
- cdev_del(&port->sm_cdev);
+ if (rdma_cap_ib_smi(port->ib_dev, port->port_num)) {
+ cdev_device_del(&port->sm_cdev, &port->sm_dev);
+ has_smi = true;
+ }
+ cdev_device_del(&port->cdev, &port->dev);
mutex_lock(&port->file_mutex);
+ /* Mark ib_dev NULL and block ioctl or other file ops to progress
+ * further.
+ */
port->ib_dev = NULL;
list_for_each_entry(file, &port->file_list, port_list) {
mutex_lock(&file->mutex);
file->agents_dead = 1;
+ wake_up_interruptible(&file->recv_wait);
mutex_unlock(&file->mutex);
for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
@@ -1268,120 +1374,128 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
mutex_unlock(&port->file_mutex);
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(port->dev_num, dev_map);
- else
- clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
+ ida_free(&umad_ida, port->dev_num);
+
+ /* balances device_initialize() */
+ if (has_smi)
+ put_device(&port->sm_dev);
+ put_device(&port->dev);
}
-static void ib_umad_add_one(struct ib_device *device)
+static int ib_umad_add_one(struct ib_device *device)
{
struct ib_umad_device *umad_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
- umad_dev = kzalloc(sizeof *umad_dev +
- (e - s + 1) * sizeof (struct ib_umad_port),
+ umad_dev = kzalloc(struct_size(umad_dev, ports,
+ size_add(size_sub(e, s), 1)),
GFP_KERNEL);
if (!umad_dev)
- return;
-
- kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
+ return -ENOMEM;
+ kref_init(&umad_dev->kref);
for (i = s; i <= e; ++i) {
if (!rdma_cap_ib_mad(device, i))
continue;
- umad_dev->port[i - s].umad_dev = umad_dev;
-
- if (ib_umad_init_port(device, i, umad_dev,
- &umad_dev->port[i - s]))
+ ret = ib_umad_init_port(device, i, umad_dev,
+ &umad_dev->ports[i - s]);
+ if (ret)
goto err;
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &umad_client, umad_dev);
- return;
+ return 0;
err:
while (--i >= s) {
if (!rdma_cap_ib_mad(device, i))
continue;
- ib_umad_kill_port(&umad_dev->port[i - s]);
+ ib_umad_kill_port(&umad_dev->ports[i - s]);
}
free:
- kobject_put(&umad_dev->kobj);
+ /* balances kref_init */
+ ib_umad_dev_put(umad_dev);
+ return ret;
}
static void ib_umad_remove_one(struct ib_device *device, void *client_data)
{
struct ib_umad_device *umad_dev = client_data;
- int i;
-
- if (!umad_dev)
- return;
+ unsigned int i;
- for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) {
- if (rdma_cap_ib_mad(device, i + rdma_start_port(device)))
- ib_umad_kill_port(&umad_dev->port[i]);
+ rdma_for_each_port (device, i) {
+ if (rdma_cap_ib_mad(device, i))
+ ib_umad_kill_port(
+ &umad_dev->ports[i - rdma_start_port(device)]);
}
-
- kobject_put(&umad_dev->kobj);
-}
-
-static char *umad_devnode(struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+ /* balances kref_init() */
+ ib_umad_dev_put(umad_dev);
}
static int __init ib_umad_init(void)
{
int ret;
- ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
- "infiniband_mad");
+ ret = register_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2,
+ umad_class.name);
if (ret) {
pr_err("couldn't register device number\n");
goto out;
}
- umad_class = class_create(THIS_MODULE, "infiniband_mad");
- if (IS_ERR(umad_class)) {
- ret = PTR_ERR(umad_class);
- pr_err("couldn't create class infiniband_mad\n");
- goto out_chrdev;
+ ret = alloc_chrdev_region(&dynamic_umad_dev, 0,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2,
+ umad_class.name);
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
}
+ dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR;
- umad_class->devnode = umad_devnode;
-
- ret = class_create_file(umad_class, &class_attr_abi_version.attr);
+ ret = class_register(&umad_class);
if (ret) {
- pr_err("couldn't create abi_version attribute\n");
- goto out_class;
+ pr_err("couldn't create class infiniband_mad\n");
+ goto out_chrdev;
}
ret = ib_register_client(&umad_client);
- if (ret) {
- pr_err("couldn't register ib_umad client\n");
+ if (ret)
goto out_class;
- }
+
+ ret = ib_register_client(&issm_client);
+ if (ret)
+ goto out_client;
return 0;
+out_client:
+ ib_unregister_client(&umad_client);
out_class:
- class_destroy(umad_class);
+ class_unregister(&umad_class);
out_chrdev:
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
+
+out_alloc:
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
out:
return ret;
@@ -1389,11 +1503,13 @@ out:
static void __exit ib_umad_cleanup(void)
{
+ ib_unregister_client(&issm_client);
ib_unregister_client(&umad_client);
- class_destroy(umad_class);
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
+ class_unregister(&umad_class);
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 3863d33c243d..797e2fcc8072 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -46,22 +46,33 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_std_types.h>
-#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
- do { \
- (udata)->inbuf = (const void __user *) (ibuf); \
- (udata)->outbuf = (void __user *) (obuf); \
- (udata)->inlen = (ilen); \
- (udata)->outlen = (olen); \
- } while (0)
-
-#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
- do { \
- (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
- (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
- (udata)->inlen = (ilen); \
- (udata)->outlen = (olen); \
- } while (0)
+#define UVERBS_MODULE_NAME ib_uverbs
+#include <rdma/uverbs_named_ioctl.h>
+
+static inline void
+ib_uverbs_init_udata(struct ib_udata *udata,
+ const void __user *ibuf,
+ void __user *obuf,
+ size_t ilen, size_t olen)
+{
+ udata->inbuf = ibuf;
+ udata->outbuf = obuf;
+ udata->inlen = ilen;
+ udata->outlen = olen;
+}
+
+static inline void
+ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
+ const void __user *ibuf,
+ void __user *obuf,
+ size_t ilen, size_t olen)
+{
+ ib_uverbs_init_udata(udata,
+ ilen ? ibuf : NULL, olen ? obuf : NULL,
+ ilen, olen);
+}
/*
* Our lifetime rules for these structs are the following:
@@ -76,52 +87,50 @@
* an asynchronous event queue file is created and released when the
* event file is closed.
*
- * struct ib_uverbs_event_file: One reference is held by the VFS and
- * released when the file is closed. For asynchronous event files,
- * another reference is held by the corresponding main context file
- * and released when that file is closed. For completion event files,
- * a reference is taken when a CQ is created that uses the file, and
- * released when the CQ is destroyed.
+ * struct ib_uverbs_event_queue: Base structure for
+ * struct ib_uverbs_async_event_file and struct ib_uverbs_completion_event_file.
+ * One reference is held by the VFS and released when the file is closed.
+ * For asynchronous event files, another reference is held by the corresponding
+ * main context file and released when that file is closed. For completion
+ * event files, a reference is taken when a CQ is created that uses the file,
+ * and released when the CQ is destroyed.
*/
struct ib_uverbs_device {
- atomic_t refcount;
- int num_comp_vectors;
+ refcount_t refcount;
+ u32 num_comp_vectors;
struct completion comp;
- struct device *dev;
+ struct device dev;
+ /* First group for device attributes, NULL terminated array */
+ const struct attribute_group *groups[2];
struct ib_device __rcu *ib_dev;
int devnum;
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
- struct kobject kobj;
struct srcu_struct disassociate_srcu;
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
- struct list_head uverbs_events_file_list;
+ struct uverbs_api *uapi;
};
-struct ib_uverbs_event_file {
- struct kref ref;
- int is_async;
- struct ib_uverbs_file *uverbs_file;
+struct ib_uverbs_event_queue {
spinlock_t lock;
int is_closed;
wait_queue_head_t poll_wait;
struct fasync_struct *async_queue;
struct list_head event_list;
- struct list_head list;
};
-struct ib_uverbs_file {
- struct kref ref;
- struct mutex mutex;
- struct ib_uverbs_device *device;
- struct ib_ucontext *ucontext;
+struct ib_uverbs_async_event_file {
+ struct ib_uobject uobj;
+ struct ib_uverbs_event_queue ev_queue;
struct ib_event_handler event_handler;
- struct ib_uverbs_event_file *async_file;
- struct list_head list;
- int is_closed;
+};
+
+struct ib_uverbs_completion_event_file {
+ struct ib_uobject uobj;
+ struct ib_uverbs_event_queue ev_queue;
};
struct ib_uverbs_event {
@@ -142,6 +151,8 @@ struct ib_uverbs_mcast_entry {
struct ib_uevent_object {
struct ib_uobject uobject;
+ struct ib_uverbs_async_event_file *event_file;
+ /* List member for ib_uverbs_async_event_file list */
struct list_head event_list;
u32 events_reported;
};
@@ -158,51 +169,55 @@ struct ib_usrq_object {
struct ib_uqp_object {
struct ib_uevent_object uevent;
+ /* lock for mcast list */
+ struct mutex mcast_lock;
struct list_head mcast_list;
struct ib_uxrcd_object *uxrcd;
};
+struct ib_uwq_object {
+ struct ib_uevent_object uevent;
+};
+
struct ib_ucq_object {
- struct ib_uobject uobject;
- struct ib_uverbs_file *uverbs_file;
+ struct ib_uevent_object uevent;
struct list_head comp_list;
- struct list_head async_list;
u32 comp_events_reported;
- u32 async_events_reported;
};
-extern spinlock_t ib_uverbs_idr_lock;
-extern struct idr ib_uverbs_pd_idr;
-extern struct idr ib_uverbs_mr_idr;
-extern struct idr ib_uverbs_mw_idr;
-extern struct idr ib_uverbs_ah_idr;
-extern struct idr ib_uverbs_cq_idr;
-extern struct idr ib_uverbs_qp_idr;
-extern struct idr ib_uverbs_srq_idr;
-extern struct idr ib_uverbs_xrcd_idr;
-extern struct idr ib_uverbs_rule_idr;
-
-void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
-
-struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
- struct ib_device *ib_dev,
- int is_async);
-void ib_uverbs_free_async_event_file(struct ib_uverbs_file *uverbs_file);
-struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
-
-void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
- struct ib_uverbs_event_file *ev_file,
+extern const struct file_operations uverbs_event_fops;
+extern const struct file_operations uverbs_async_event_fops;
+void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
+void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
+void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
+void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+int uverbs_async_event_release(struct inode *inode, struct file *filp);
+
+int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
+int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
+
+void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj);
-void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
- struct ib_uevent_object *uobj);
+void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
+void ib_uverbs_release_file(struct kref *ref);
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+ __u64 element, __u64 event,
+ struct list_head *obj_list, u32 *counter);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
+void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
-void ib_uverbs_event_handler(struct ib_event_handler *handler,
- struct ib_event *event);
-void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd);
+int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs);
+
+int uverbs_dealloc_mw(struct ib_mw *mw);
+void ib_uverbs_detach_umcast(struct ib_qp *qp,
+ struct ib_uqp_object *uobj);
+
+long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
struct ib_uverbs_flow_spec {
union {
@@ -216,61 +231,63 @@ struct ib_uverbs_flow_spec {
};
struct ib_uverbs_flow_spec_eth eth;
struct ib_uverbs_flow_spec_ipv4 ipv4;
+ struct ib_uverbs_flow_spec_esp esp;
struct ib_uverbs_flow_spec_tcp_udp tcp_udp;
+ struct ib_uverbs_flow_spec_ipv6 ipv6;
+ struct ib_uverbs_flow_spec_action_tag flow_tag;
+ struct ib_uverbs_flow_spec_action_drop drop;
+ struct ib_uverbs_flow_spec_action_handle action;
+ struct ib_uverbs_flow_spec_action_count flow_count;
};
};
-#define IB_UVERBS_DECLARE_CMD(name) \
- ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
- struct ib_device *ib_dev, \
- const char __user *buf, int in_len, \
- int out_len)
-
-IB_UVERBS_DECLARE_CMD(get_context);
-IB_UVERBS_DECLARE_CMD(query_device);
-IB_UVERBS_DECLARE_CMD(query_port);
-IB_UVERBS_DECLARE_CMD(alloc_pd);
-IB_UVERBS_DECLARE_CMD(dealloc_pd);
-IB_UVERBS_DECLARE_CMD(reg_mr);
-IB_UVERBS_DECLARE_CMD(rereg_mr);
-IB_UVERBS_DECLARE_CMD(dereg_mr);
-IB_UVERBS_DECLARE_CMD(alloc_mw);
-IB_UVERBS_DECLARE_CMD(dealloc_mw);
-IB_UVERBS_DECLARE_CMD(create_comp_channel);
-IB_UVERBS_DECLARE_CMD(create_cq);
-IB_UVERBS_DECLARE_CMD(resize_cq);
-IB_UVERBS_DECLARE_CMD(poll_cq);
-IB_UVERBS_DECLARE_CMD(req_notify_cq);
-IB_UVERBS_DECLARE_CMD(destroy_cq);
-IB_UVERBS_DECLARE_CMD(create_qp);
-IB_UVERBS_DECLARE_CMD(open_qp);
-IB_UVERBS_DECLARE_CMD(query_qp);
-IB_UVERBS_DECLARE_CMD(modify_qp);
-IB_UVERBS_DECLARE_CMD(destroy_qp);
-IB_UVERBS_DECLARE_CMD(post_send);
-IB_UVERBS_DECLARE_CMD(post_recv);
-IB_UVERBS_DECLARE_CMD(post_srq_recv);
-IB_UVERBS_DECLARE_CMD(create_ah);
-IB_UVERBS_DECLARE_CMD(destroy_ah);
-IB_UVERBS_DECLARE_CMD(attach_mcast);
-IB_UVERBS_DECLARE_CMD(detach_mcast);
-IB_UVERBS_DECLARE_CMD(create_srq);
-IB_UVERBS_DECLARE_CMD(modify_srq);
-IB_UVERBS_DECLARE_CMD(query_srq);
-IB_UVERBS_DECLARE_CMD(destroy_srq);
-IB_UVERBS_DECLARE_CMD(create_xsrq);
-IB_UVERBS_DECLARE_CMD(open_xrcd);
-IB_UVERBS_DECLARE_CMD(close_xrcd);
-
-#define IB_UVERBS_DECLARE_EX_CMD(name) \
- int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \
- struct ib_device *ib_dev, \
- struct ib_udata *ucore, \
- struct ib_udata *uhw)
-
-IB_UVERBS_DECLARE_EX_CMD(create_flow);
-IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
-IB_UVERBS_DECLARE_EX_CMD(query_device);
-IB_UVERBS_DECLARE_EX_CMD(create_cq);
+int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
+ const void *kern_spec_mask,
+ const void *kern_spec_val,
+ size_t kern_filter_sz,
+ union ib_flow_spec *ib_spec);
+
+/*
+ * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
+ * PortInfo CapabilityMask, but was extended with unique bits.
+ */
+static inline u32 make_port_cap_flags(const struct ib_port_attr *attr)
+{
+ u32 res;
+
+ /* All IBA CapabilityMask bits are passed through here, except bit 26,
+ * which is overridden with IP_BASED_GIDS. This is due to a historical
+ * mistake in the implementation of IP_BASED_GIDS. Otherwise all other
+ * bits match the IBA definition across all kernel versions.
+ */
+ res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS;
+
+ if (attr->ip_gids)
+ res |= IB_UVERBS_PCF_IP_BASED_GIDS;
+
+ return res;
+}
+
+static inline struct ib_uverbs_async_event_file *
+ib_uverbs_get_async_event(struct uverbs_attr_bundle *attrs,
+ u16 id)
+{
+ struct ib_uobject *async_ev_file_uobj;
+ struct ib_uverbs_async_event_file *async_ev_file;
+
+ async_ev_file_uobj = uverbs_attr_get_uobject(attrs, id);
+ if (IS_ERR(async_ev_file_uobj))
+ async_ev_file = READ_ONCE(attrs->ufile->default_async_file);
+ else
+ async_ev_file = container_of(async_ev_file_uobj,
+ struct ib_uverbs_async_event_file,
+ uobj);
+ if (async_ev_file)
+ uverbs_uobject_get(&async_ev_file->uobj);
+ return async_ev_file;
+}
+void copy_port_attr_to_resp(struct ib_port_attr *attr,
+ struct ib_uverbs_query_port_resp *resp,
+ struct ib_device *ib_dev, u8 port_num);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index be4cb9f04be3..ce16404cdfb8 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -38,364 +38,314 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_std_types.h>
+#include <rdma/ib_ucaps.h>
+#include "rdma_core.h"
#include "uverbs.h"
#include "core_priv.h"
-struct uverbs_lock_class {
- struct lock_class_key key;
- char name[16];
-};
-
-static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
-static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
-static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
-static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
-static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
-static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
-static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
-static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
-static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
-
/*
- * The ib_uobject locking scheme is as follows:
- *
- * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
- * needs to be held during all idr operations. When an object is
- * looked up, a reference must be taken on the object's kref before
- * dropping this lock.
+ * Copy a response to userspace. If the provided 'resp' is larger than the
+ * user buffer it is silently truncated. If the user provided a larger buffer
+ * then the trailing portion is zero filled.
*
- * - Each object also has an rwsem. This rwsem must be held for
- * reading while an operation that uses the object is performed.
- * For example, while registering an MR, the associated PD's
- * uobject.mutex must be held for reading. The rwsem must be held
- * for writing while initializing or destroying an object.
- *
- * - In addition, each object has a "live" flag. If this flag is not
- * set, then lookups of the object will fail even if it is found in
- * the idr. This handles a reader that blocks and does not acquire
- * the rwsem until after the object is destroyed. The destroy
- * operation will set the live flag to 0 and then drop the rwsem;
- * this will allow the reader to acquire the rwsem, see that the
- * live flag is 0, and then drop the rwsem and its reference to
- * object. The underlying storage will not be freed until the last
- * reference to the object is dropped.
+ * These semantics are intended to support future extension of the output
+ * structures.
*/
-
-static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
- struct ib_ucontext *context, struct uverbs_lock_class *c)
-{
- uobj->user_handle = user_handle;
- uobj->context = context;
- kref_init(&uobj->ref);
- init_rwsem(&uobj->mutex);
- lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
- uobj->live = 0;
-}
-
-static void release_uobj(struct kref *kref)
-{
- kfree(container_of(kref, struct ib_uobject, ref));
-}
-
-static void put_uobj(struct ib_uobject *uobj)
-{
- kref_put(&uobj->ref, release_uobj);
-}
-
-static void put_uobj_read(struct ib_uobject *uobj)
-{
- up_read(&uobj->mutex);
- put_uobj(uobj);
-}
-
-static void put_uobj_write(struct ib_uobject *uobj)
-{
- up_write(&uobj->mutex);
- put_uobj(uobj);
-}
-
-static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
+static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
+ size_t resp_len)
{
int ret;
- idr_preload(GFP_KERNEL);
- spin_lock(&ib_uverbs_idr_lock);
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ return uverbs_copy_to_struct_or_zero(
+ attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
- ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
- if (ret >= 0)
- uobj->id = ret;
-
- spin_unlock(&ib_uverbs_idr_lock);
- idr_preload_end();
+ if (copy_to_user(attrs->ucore.outbuf, resp,
+ min(attrs->ucore.outlen, resp_len)))
+ return -EFAULT;
- return ret < 0 ? ret : 0;
-}
+ if (resp_len < attrs->ucore.outlen) {
+ /*
+ * Zero fill any extra memory that user
+ * space might have provided.
+ */
+ ret = clear_user(attrs->ucore.outbuf + resp_len,
+ attrs->ucore.outlen - resp_len);
+ if (ret)
+ return -EFAULT;
+ }
-void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
-{
- spin_lock(&ib_uverbs_idr_lock);
- idr_remove(idr, uobj->id);
- spin_unlock(&ib_uverbs_idr_lock);
+ return 0;
}
-static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
- struct ib_ucontext *context)
+/*
+ * Copy a request from userspace. If the provided 'req' is larger than the
+ * user buffer then the user buffer is zero extended into the 'req'. If 'req'
+ * is smaller than the user buffer then the uncopied bytes in the user buffer
+ * must be zero.
+ */
+static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
+ size_t req_len)
{
- struct ib_uobject *uobj;
+ if (copy_from_user(req, attrs->ucore.inbuf,
+ min(attrs->ucore.inlen, req_len)))
+ return -EFAULT;
- spin_lock(&ib_uverbs_idr_lock);
- uobj = idr_find(idr, id);
- if (uobj) {
- if (uobj->context == context)
- kref_get(&uobj->ref);
- else
- uobj = NULL;
+ if (attrs->ucore.inlen < req_len) {
+ memset(req + attrs->ucore.inlen, 0,
+ req_len - attrs->ucore.inlen);
+ } else if (attrs->ucore.inlen > req_len) {
+ if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
+ attrs->ucore.inlen - req_len))
+ return -EOPNOTSUPP;
}
- spin_unlock(&ib_uverbs_idr_lock);
-
- return uobj;
+ return 0;
}
-static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
- struct ib_ucontext *context, int nested)
+/*
+ * Generate the value for the 'response_length' protocol used by write_ex.
+ * This is the number of bytes the kernel actually wrote. Userspace can use
+ * this to detect what structure members in the response the kernel
+ * understood.
+ */
+static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
+ size_t resp_len)
{
- struct ib_uobject *uobj;
-
- uobj = __idr_get_uobj(idr, id, context);
- if (!uobj)
- return NULL;
-
- if (nested)
- down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
- else
- down_read(&uobj->mutex);
- if (!uobj->live) {
- put_uobj_read(uobj);
- return NULL;
- }
-
- return uobj;
+ return min_t(size_t, attrs->ucore.outlen, resp_len);
}
-static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
- struct ib_ucontext *context)
-{
- struct ib_uobject *uobj;
+/*
+ * The iterator version of the request interface is for handlers that need to
+ * step over a flex array at the end of a command header.
+ */
+struct uverbs_req_iter {
+ const void __user *cur;
+ const void __user *end;
+};
- uobj = __idr_get_uobj(idr, id, context);
- if (!uobj)
- return NULL;
+static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
+ struct uverbs_req_iter *iter,
+ void *req,
+ size_t req_len)
+{
+ if (attrs->ucore.inlen < req_len)
+ return -ENOSPC;
- down_write(&uobj->mutex);
- if (!uobj->live) {
- put_uobj_write(uobj);
- return NULL;
- }
+ if (copy_from_user(req, attrs->ucore.inbuf, req_len))
+ return -EFAULT;
- return uobj;
+ iter->cur = attrs->ucore.inbuf + req_len;
+ iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
+ return 0;
}
-static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
- int nested)
+static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
+ size_t len)
{
- struct ib_uobject *uobj;
+ if (iter->cur + len > iter->end)
+ return -ENOSPC;
- uobj = idr_read_uobj(idr, id, context, nested);
- return uobj ? uobj->object : NULL;
-}
+ if (copy_from_user(val, iter->cur, len))
+ return -EFAULT;
-static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
-{
- return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
+ iter->cur += len;
+ return 0;
}
-static void put_pd_read(struct ib_pd *pd)
+static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
+ size_t len)
{
- put_uobj_read(pd->uobject);
-}
+ const void __user *res = iter->cur;
-static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
-{
- return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
+ if (len > iter->end - iter->cur)
+ return (void __force __user *)ERR_PTR(-ENOSPC);
+ iter->cur += len;
+ return res;
}
-static void put_cq_read(struct ib_cq *cq)
+static int uverbs_request_finish(struct uverbs_req_iter *iter)
{
- put_uobj_read(cq->uobject);
+ if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
+ return -EOPNOTSUPP;
+ return 0;
}
-static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
+/*
+ * When calling a destroy function during an error unwind we need to pass in
+ * the udata that is sanitized of all user arguments. Ie from the driver
+ * perspective it looks like no udata was passed.
+ */
+struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
{
- return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
+ attrs->driver_udata = (struct ib_udata){};
+ return &attrs->driver_udata;
}
-static void put_ah_read(struct ib_ah *ah)
+static struct ib_uverbs_completion_event_file *
+_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
{
- put_uobj_read(ah->uobject);
-}
+ struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
+ fd, attrs);
-static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
-{
- return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
-}
+ if (IS_ERR(uobj))
+ return ERR_CAST(uobj);
-static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
-{
- struct ib_uobject *uobj;
+ uverbs_uobject_get(uobj);
+ uobj_put_read(uobj);
- uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
- return uobj ? uobj->object : NULL;
+ return container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
}
+#define ib_uverbs_lookup_comp_file(_fd, _ufile) \
+ _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
-static void put_qp_read(struct ib_qp *qp)
+int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
{
- put_uobj_read(qp->uobject);
-}
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
-static void put_qp_write(struct ib_qp *qp)
-{
- put_uobj_write(qp->uobject);
-}
-
-static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
-{
- return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
-}
+ ib_dev = srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu);
+ if (!ib_dev)
+ return -EIO;
-static void put_srq_read(struct ib_srq *srq)
-{
- put_uobj_read(srq->uobject);
-}
+ ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
+ if (!ucontext)
+ return -ENOMEM;
-static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
- struct ib_uobject **uobj)
-{
- *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
- return *uobj ? (*uobj)->object : NULL;
-}
+ ucontext->device = ib_dev;
+ ucontext->ufile = ufile;
+ xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
-static void put_xrcd_read(struct ib_uobject *uobj)
-{
- put_uobj_read(uobj);
+ rdma_restrack_new(&ucontext->res, RDMA_RESTRACK_CTX);
+ rdma_restrack_set_name(&ucontext->res, NULL);
+ attrs->context = ucontext;
+ return 0;
}
-ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf,
- int in_len, int out_len)
+int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_get_context cmd;
- struct ib_uverbs_get_context_resp resp;
- struct ib_udata udata;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct ib_device_attr dev_attr;
-#endif
- struct ib_ucontext *ucontext;
- struct file *filp;
+ struct ib_ucontext *ucontext = attrs->context;
+ struct ib_uverbs_file *file = attrs->ufile;
+ int *fd_array;
+ int fd_count;
int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- mutex_lock(&file->mutex);
-
+ if (!down_read_trylock(&file->hw_destroy_rwsem))
+ return -EIO;
+ mutex_lock(&file->ucontext_lock);
if (file->ucontext) {
ret = -EINVAL;
goto err;
}
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-
- ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
- if (IS_ERR(ucontext)) {
- ret = PTR_ERR(ucontext);
+ ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
+ RDMACG_RESOURCE_HCA_HANDLE);
+ if (ret)
goto err;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_GET_CONTEXT_FD_ARR)) {
+ fd_count = uverbs_attr_ptr_get_array_size(attrs,
+ UVERBS_ATTR_GET_CONTEXT_FD_ARR,
+ sizeof(int));
+ if (fd_count < 0) {
+ ret = fd_count;
+ goto err_uncharge;
+ }
+
+ fd_array = uverbs_attr_get_alloced_ptr(attrs,
+ UVERBS_ATTR_GET_CONTEXT_FD_ARR);
+ ret = ib_get_ucaps(fd_array, fd_count, &ucontext->enabled_caps);
+ if (ret)
+ goto err_uncharge;
}
- ucontext->device = ib_dev;
- INIT_LIST_HEAD(&ucontext->pd_list);
- INIT_LIST_HEAD(&ucontext->mr_list);
- INIT_LIST_HEAD(&ucontext->mw_list);
- INIT_LIST_HEAD(&ucontext->cq_list);
- INIT_LIST_HEAD(&ucontext->qp_list);
- INIT_LIST_HEAD(&ucontext->srq_list);
- INIT_LIST_HEAD(&ucontext->ah_list);
- INIT_LIST_HEAD(&ucontext->xrcd_list);
- INIT_LIST_HEAD(&ucontext->rule_list);
- rcu_read_lock();
- ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- ucontext->closing = 0;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- ucontext->umem_tree = RB_ROOT;
- init_rwsem(&ucontext->umem_rwsem);
- ucontext->odp_mrs_count = 0;
- INIT_LIST_HEAD(&ucontext->no_private_counters);
-
- ret = ib_query_device(ib_dev, &dev_attr);
+ ret = ucontext->device->ops.alloc_ucontext(ucontext,
+ &attrs->driver_udata);
if (ret)
- goto err_free;
- if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
- ucontext->invalidate_range = NULL;
-
-#endif
+ goto err_uncharge;
- resp.num_comp_vectors = file->device->num_comp_vectors;
+ rdma_restrack_add(&ucontext->res);
- ret = get_unused_fd_flags(O_CLOEXEC);
- if (ret < 0)
- goto err_free;
- resp.async_fd = ret;
+ /*
+ * Make sure that ib_uverbs_get_ucontext() sees the pointer update
+ * only after all writes to setup the ucontext have completed
+ */
+ smp_store_release(&file->ucontext, ucontext);
- filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
- if (IS_ERR(filp)) {
- ret = PTR_ERR(filp);
- goto err_fd;
- }
+ mutex_unlock(&file->ucontext_lock);
+ up_read(&file->hw_destroy_rwsem);
+ return 0;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_file;
- }
+err_uncharge:
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+ RDMACG_RESOURCE_HCA_HANDLE);
+err:
+ mutex_unlock(&file->ucontext_lock);
+ up_read(&file->hw_destroy_rwsem);
+ return ret;
+}
- file->ucontext = ucontext;
+static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_get_context_resp resp;
+ struct ib_uverbs_get_context cmd;
+ struct ib_device *ib_dev;
+ struct ib_uobject *uobj;
+ int ret;
- fd_install(resp.async_fd, filp);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- mutex_unlock(&file->mutex);
+ ret = ib_alloc_ucontext(attrs);
+ if (ret)
+ return ret;
- return in_len;
+ uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
+ if (IS_ERR(uobj)) {
+ ret = PTR_ERR(uobj);
+ goto err_ucontext;
+ }
-err_file:
- ib_uverbs_free_async_event_file(file);
- fput(filp);
+ resp = (struct ib_uverbs_get_context_resp){
+ .num_comp_vectors = attrs->ufile->device->num_comp_vectors,
+ .async_fd = uobj->id,
+ };
+ ret = uverbs_response(attrs, &resp, sizeof(resp));
+ if (ret)
+ goto err_uobj;
-err_fd:
- put_unused_fd(resp.async_fd);
+ ret = ib_init_ucontext(attrs);
+ if (ret)
+ goto err_uobj;
-err_free:
- put_pid(ucontext->tgid);
- ib_dev->dealloc_ucontext(ucontext);
+ ib_uverbs_init_async_event_file(
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj));
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
-err:
- mutex_unlock(&file->mutex);
+err_uobj:
+ rdma_alloc_abort_uobject(uobj, attrs, false);
+err_ucontext:
+ rdma_restrack_put(&attrs->context->res);
+ kfree(attrs->context);
+ attrs->context = NULL;
return ret;
}
-static void copy_query_dev_fields(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
+static void copy_query_dev_fields(struct ib_ucontext *ucontext,
struct ib_uverbs_query_device_resp *resp,
struct ib_device_attr *attr)
{
+ struct ib_device *ib_dev = ucontext->device;
+
resp->fw_ver = attr->fw_ver;
resp->node_guid = ib_dev->node_guid;
resp->sys_image_guid = attr->sys_image_guid;
@@ -406,8 +356,8 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
- resp->device_cap_flags = attr->device_cap_flags;
- resp->max_sge = attr->max_sge;
+ resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
+ resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge);
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
resp->max_cqe = attr->max_cqe;
@@ -428,216 +378,122 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
resp->max_ah = attr->max_ah;
- resp->max_fmr = attr->max_fmr;
- resp->max_map_per_fmr = attr->max_map_per_fmr;
resp->max_srq = attr->max_srq;
resp->max_srq_wr = attr->max_srq_wr;
resp->max_srq_sge = attr->max_srq_sge;
resp->max_pkeys = attr->max_pkeys;
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
- resp->phys_port_cnt = ib_dev->phys_port_cnt;
+ resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX);
}
-ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf,
- int in_len, int out_len)
+static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_query_device cmd;
struct ib_uverbs_query_device_resp resp;
- struct ib_device_attr attr;
- int ret;
+ struct ib_ucontext *ucontext;
+ int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- ret = ib_query_device(ib_dev, &attr);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
memset(&resp, 0, sizeof resp);
- copy_query_dev_fields(file, ib_dev, &resp, &attr);
+ copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- return -EFAULT;
-
- return in_len;
+ return uverbs_response(attrs, &resp, sizeof(resp));
}
-ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf,
- int in_len, int out_len)
+static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_query_port cmd;
struct ib_uverbs_query_port_resp resp;
struct ib_port_attr attr;
int ret;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
- if (out_len < sizeof resp)
- return -ENOSPC;
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
ret = ib_query_port(ib_dev, cmd.port_num, &attr);
if (ret)
return ret;
memset(&resp, 0, sizeof resp);
+ copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
- resp.state = attr.state;
- resp.max_mtu = attr.max_mtu;
- resp.active_mtu = attr.active_mtu;
- resp.gid_tbl_len = attr.gid_tbl_len;
- resp.port_cap_flags = attr.port_cap_flags;
- resp.max_msg_sz = attr.max_msg_sz;
- resp.bad_pkey_cntr = attr.bad_pkey_cntr;
- resp.qkey_viol_cntr = attr.qkey_viol_cntr;
- resp.pkey_tbl_len = attr.pkey_tbl_len;
- resp.lid = attr.lid;
- resp.sm_lid = attr.sm_lid;
- resp.lmc = attr.lmc;
- resp.max_vl_num = attr.max_vl_num;
- resp.sm_sl = attr.sm_sl;
- resp.subnet_timeout = attr.subnet_timeout;
- resp.init_type_reply = attr.init_type_reply;
- resp.active_width = attr.active_width;
- resp.active_speed = attr.active_speed;
- resp.phys_state = attr.phys_state;
- resp.link_layer = rdma_port_get_link_layer(ib_dev,
- cmd.port_num);
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- return -EFAULT;
-
- return in_len;
+ return uverbs_response(attrs, &resp, sizeof(resp));
}
-ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf,
- int in_len, int out_len)
+static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
{
+ struct ib_uverbs_alloc_pd_resp resp = {};
struct ib_uverbs_alloc_pd cmd;
- struct ib_uverbs_alloc_pd_resp resp;
- struct ib_udata udata;
struct ib_uobject *uobj;
struct ib_pd *pd;
int ret;
+ struct ib_device *ib_dev;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-
- uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
- if (!uobj)
- return -ENOMEM;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
- down_write(&uobj->mutex);
+ uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
- pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
- if (IS_ERR(pd)) {
- ret = PTR_ERR(pd);
+ pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
+ if (!pd) {
+ ret = -ENOMEM;
goto err;
}
pd->device = ib_dev;
pd->uobject = uobj;
- pd->local_mr = NULL;
atomic_set(&pd->usecnt, 0);
- uobj->object = pd;
- ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
- if (ret)
- goto err_idr;
+ rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
+ rdma_restrack_set_name(&pd->res, NULL);
- memset(&resp, 0, sizeof resp);
- resp.pd_handle = uobj->id;
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_copy;
- }
-
- mutex_lock(&file->mutex);
- list_add_tail(&uobj->list, &file->ucontext->pd_list);
- mutex_unlock(&file->mutex);
-
- uobj->live = 1;
-
- up_write(&uobj->mutex);
-
- return in_len;
+ ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
+ if (ret)
+ goto err_alloc;
+ rdma_restrack_add(&pd->res);
-err_copy:
- idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
+ uobj->object = pd;
+ uobj_finalize_uobj_create(uobj, attrs);
-err_idr:
- ib_dealloc_pd(pd);
+ resp.pd_handle = uobj->id;
+ return uverbs_response(attrs, &resp, sizeof(resp));
+err_alloc:
+ rdma_restrack_put(&pd->res);
+ kfree(pd);
err:
- put_uobj_write(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
-ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf,
- int in_len, int out_len)
+static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_dealloc_pd cmd;
- struct ib_uobject *uobj;
- struct ib_pd *pd;
- int ret;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
- if (!uobj)
- return -EINVAL;
- pd = uobj->object;
-
- if (atomic_read(&pd->usecnt)) {
- ret = -EBUSY;
- goto err_put;
- }
+ int ret;
- ret = pd->device->dealloc_pd(uobj->object);
- WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
- goto err_put;
-
- uobj->live = 0;
- put_uobj_write(uobj);
-
- idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
-
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
-
- put_uobj(uobj);
-
- return in_len;
+ return ret;
-err_put:
- put_uobj_write(uobj);
- return ret;
+ return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
}
struct xrcd_table_entry {
@@ -725,43 +581,35 @@ static void xrcd_table_delete(struct ib_uverbs_device *dev,
}
}
-ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
{
+ struct ib_uverbs_device *ibudev = attrs->ufile->device;
+ struct ib_uverbs_open_xrcd_resp resp = {};
struct ib_uverbs_open_xrcd cmd;
- struct ib_uverbs_open_xrcd_resp resp;
- struct ib_udata udata;
struct ib_uxrcd_object *obj;
struct ib_xrcd *xrcd = NULL;
- struct fd f = {NULL, 0};
struct inode *inode = NULL;
- int ret = 0;
int new_xrcd = 0;
+ struct ib_device *ib_dev;
+ struct fd f = EMPTY_FD;
+ int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- mutex_lock(&file->device->xrcd_tree_mutex);
+ mutex_lock(&ibudev->xrcd_tree_mutex);
if (cmd.fd != -1) {
/* search for file descriptor */
f = fdget(cmd.fd);
- if (!f.file) {
+ if (fd_empty(f)) {
ret = -EBADF;
goto err_tree_mutex_unlock;
}
- inode = file_inode(f.file);
- xrcd = find_xrcd(file->device, inode);
+ inode = file_inode(fd_file(f));
+ xrcd = find_xrcd(ibudev, inode);
if (!xrcd && !(cmd.oflags & O_CREAT)) {
/* no file descriptor. Need CREATE flag */
ret = -EAGAIN;
@@ -774,229 +622,127 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
}
}
- obj = kmalloc(sizeof *obj, GFP_KERNEL);
- if (!obj) {
- ret = -ENOMEM;
+ obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
+ &ib_dev);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto err_tree_mutex_unlock;
}
- init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
-
- down_write(&obj->uobject.mutex);
-
if (!xrcd) {
- xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
+ xrcd = ib_alloc_xrcd_user(ib_dev, inode, &attrs->driver_udata);
if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd);
goto err;
}
-
- xrcd->inode = inode;
- xrcd->device = ib_dev;
- atomic_set(&xrcd->usecnt, 0);
- mutex_init(&xrcd->tgt_qp_mutex);
- INIT_LIST_HEAD(&xrcd->tgt_qp_list);
new_xrcd = 1;
}
atomic_set(&obj->refcnt, 0);
obj->uobject.object = xrcd;
- ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
- if (ret)
- goto err_idr;
-
- memset(&resp, 0, sizeof resp);
- resp.xrcd_handle = obj->uobject.id;
if (inode) {
if (new_xrcd) {
/* create new inode/xrcd table entry */
- ret = xrcd_table_insert(file->device, inode, xrcd);
+ ret = xrcd_table_insert(ibudev, inode, xrcd);
if (ret)
- goto err_insert_xrcd;
+ goto err_dealloc_xrcd;
}
atomic_inc(&xrcd->usecnt);
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_copy;
- }
-
- if (f.file)
- fdput(f);
+ fdput(f);
- mutex_lock(&file->mutex);
- list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
- mutex_unlock(&file->mutex);
+ mutex_unlock(&ibudev->xrcd_tree_mutex);
+ uobj_finalize_uobj_create(&obj->uobject, attrs);
- obj->uobject.live = 1;
- up_write(&obj->uobject.mutex);
-
- mutex_unlock(&file->device->xrcd_tree_mutex);
- return in_len;
-
-err_copy:
- if (inode) {
- if (new_xrcd)
- xrcd_table_delete(file->device, inode);
- atomic_dec(&xrcd->usecnt);
- }
-
-err_insert_xrcd:
- idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
+ resp.xrcd_handle = obj->uobject.id;
+ return uverbs_response(attrs, &resp, sizeof(resp));
-err_idr:
- ib_dealloc_xrcd(xrcd);
+err_dealloc_xrcd:
+ ib_dealloc_xrcd_user(xrcd, uverbs_get_cleared_udata(attrs));
err:
- put_uobj_write(&obj->uobject);
+ uobj_alloc_abort(&obj->uobject, attrs);
err_tree_mutex_unlock:
- if (f.file)
- fdput(f);
+ fdput(f);
- mutex_unlock(&file->device->xrcd_tree_mutex);
+ mutex_unlock(&ibudev->xrcd_tree_mutex);
return ret;
}
-ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_close_xrcd cmd;
- struct ib_uobject *uobj;
- struct ib_xrcd *xrcd = NULL;
- struct inode *inode = NULL;
- struct ib_uxrcd_object *obj;
- int live;
- int ret = 0;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- mutex_lock(&file->device->xrcd_tree_mutex);
- uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
- if (!uobj) {
- ret = -EINVAL;
- goto out;
- }
-
- xrcd = uobj->object;
- inode = xrcd->inode;
- obj = container_of(uobj, struct ib_uxrcd_object, uobject);
- if (atomic_read(&obj->refcnt)) {
- put_uobj_write(uobj);
- ret = -EBUSY;
- goto out;
- }
-
- if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
- ret = ib_dealloc_xrcd(uobj->object);
- if (!ret)
- uobj->live = 0;
- }
-
- live = uobj->live;
- if (inode && ret)
- atomic_inc(&xrcd->usecnt);
-
- put_uobj_write(uobj);
+ int ret;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
- goto out;
-
- if (inode && !live)
- xrcd_table_delete(file->device, inode);
-
- idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
-
- put_uobj(uobj);
- ret = in_len;
+ return ret;
-out:
- mutex_unlock(&file->device->xrcd_tree_mutex);
- return ret;
+ return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
}
-void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
- struct ib_xrcd *xrcd)
+int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct inode *inode;
+ int ret;
+ struct ib_uverbs_device *dev = attrs->ufile->device;
inode = xrcd->inode;
if (inode && !atomic_dec_and_test(&xrcd->usecnt))
- return;
+ return 0;
- ib_dealloc_xrcd(xrcd);
+ ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata);
+ if (ret) {
+ atomic_inc(&xrcd->usecnt);
+ return ret;
+ }
if (inode)
xrcd_table_delete(dev, inode);
+
+ return 0;
}
-ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
{
+ struct ib_uverbs_reg_mr_resp resp = {};
struct ib_uverbs_reg_mr cmd;
- struct ib_uverbs_reg_mr_resp resp;
- struct ib_udata udata;
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mr *mr;
int ret;
+ struct ib_device *ib_dev;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-
- if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
- return -EINVAL;
-
- ret = ib_check_mr_access(cmd.access_flags);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
- if (!uobj)
- return -ENOMEM;
+ if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
+ return -EINVAL;
- init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
- down_write(&uobj->mutex);
+ uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
- if (!pd) {
- ret = -EINVAL;
+ ret = ib_check_mr_access(ib_dev, cmd.access_flags);
+ if (ret)
goto err_free;
- }
-
- if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
- struct ib_device_attr attr;
- ret = ib_query_device(pd->device, &attr);
- if (ret || !(attr.device_cap_flags &
- IB_DEVICE_ON_DEMAND_PAGING)) {
- pr_debug("ODP support not available\n");
- ret = -EINVAL;
- goto err_put;
- }
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto err_free;
}
- mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
- cmd.access_flags, &udata);
+ mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
+ cmd.access_flags, NULL,
+ &attrs->driver_udata);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
goto err_put;
@@ -1004,485 +750,360 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
mr->device = pd->device;
mr->pd = pd;
+ mr->type = IB_MR_TYPE_USER;
+ mr->dm = NULL;
+ mr->sig_attrs = NULL;
mr->uobject = uobj;
atomic_inc(&pd->usecnt);
- atomic_set(&mr->usecnt, 0);
+ mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&mr->res, NULL);
+ rdma_restrack_add(&mr->res);
uobj->object = mr;
- ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
- if (ret)
- goto err_unreg;
+ uobj_put_obj_read(pd);
+ uobj_finalize_uobj_create(uobj, attrs);
- memset(&resp, 0, sizeof resp);
- resp.lkey = mr->lkey;
- resp.rkey = mr->rkey;
+ resp.lkey = mr->lkey;
+ resp.rkey = mr->rkey;
resp.mr_handle = uobj->id;
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_copy;
- }
-
- put_pd_read(pd);
-
- mutex_lock(&file->mutex);
- list_add_tail(&uobj->list, &file->ucontext->mr_list);
- mutex_unlock(&file->mutex);
-
- uobj->live = 1;
-
- up_write(&uobj->mutex);
-
- return in_len;
-
-err_copy:
- idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
-
-err_unreg:
- ib_dereg_mr(mr);
+ return uverbs_response(attrs, &resp, sizeof(resp));
err_put:
- put_pd_read(pd);
-
+ uobj_put_obj_read(pd);
err_free:
- put_uobj_write(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
-ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_rereg_mr cmd;
struct ib_uverbs_rereg_mr_resp resp;
- struct ib_udata udata;
- struct ib_pd *pd = NULL;
struct ib_mr *mr;
- struct ib_pd *old_pd;
int ret;
struct ib_uobject *uobj;
+ struct ib_uobject *new_uobj;
+ struct ib_device *ib_dev;
+ struct ib_pd *orig_pd;
+ struct ib_pd *new_pd;
+ struct ib_mr *new_mr;
- if (out_len < sizeof(resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof(cmd)))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
- in_len - sizeof(cmd), out_len - sizeof(resp));
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
+ if (!cmd.flags)
return -EINVAL;
- if ((cmd.flags & IB_MR_REREG_TRANS) &&
- (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
- (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
- return -EINVAL;
-
- uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
- file->ucontext);
+ if (cmd.flags & ~IB_MR_REREG_SUPPORTED)
+ return -EOPNOTSUPP;
- if (!uobj)
+ if ((cmd.flags & IB_MR_REREG_TRANS) &&
+ (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
return -EINVAL;
+ uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+
mr = uobj->object;
+ if (mr->dm) {
+ ret = -EINVAL;
+ goto put_uobjs;
+ }
+
if (cmd.flags & IB_MR_REREG_ACCESS) {
- ret = ib_check_mr_access(cmd.access_flags);
+ ret = ib_check_mr_access(mr->device, cmd.access_flags);
if (ret)
goto put_uobjs;
}
+ orig_pd = mr->pd;
if (cmd.flags & IB_MR_REREG_PD) {
- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
- if (!pd) {
- ret = -EINVAL;
+ new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
+ attrs);
+ if (IS_ERR(new_pd)) {
+ ret = PTR_ERR(new_pd);
goto put_uobjs;
}
+ } else {
+ new_pd = mr->pd;
}
- if (atomic_read(&mr->usecnt)) {
- ret = -EBUSY;
+ /*
+ * The driver might create a new HW object as part of the rereg, we need
+ * to have a uobject ready to hold it.
+ */
+ new_uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
+ if (IS_ERR(new_uobj)) {
+ ret = PTR_ERR(new_uobj);
goto put_uobj_pd;
}
- old_pd = mr->pd;
- ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
- cmd.length, cmd.hca_va,
- cmd.access_flags, pd, &udata);
- if (!ret) {
+ new_mr = ib_dev->ops.rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
+ cmd.hca_va, cmd.access_flags, new_pd,
+ &attrs->driver_udata);
+ if (IS_ERR(new_mr)) {
+ ret = PTR_ERR(new_mr);
+ goto put_new_uobj;
+ }
+ if (new_mr) {
+ new_mr->device = new_pd->device;
+ new_mr->pd = new_pd;
+ new_mr->type = IB_MR_TYPE_USER;
+ new_mr->uobject = uobj;
+ atomic_inc(&new_pd->usecnt);
+ new_uobj->object = new_mr;
+
+ rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&new_mr->res, NULL);
+ rdma_restrack_add(&new_mr->res);
+
+ /*
+ * The new uobj for the new HW object is put into the same spot
+ * in the IDR and the old uobj & HW object is deleted.
+ */
+ rdma_assign_uobject(uobj, new_uobj, attrs);
+ rdma_alloc_commit_uobject(new_uobj, attrs);
+ uobj_put_destroy(uobj);
+ new_uobj = NULL;
+ uobj = NULL;
+ mr = new_mr;
+ } else {
if (cmd.flags & IB_MR_REREG_PD) {
- atomic_inc(&pd->usecnt);
- mr->pd = pd;
- atomic_dec(&old_pd->usecnt);
+ atomic_dec(&orig_pd->usecnt);
+ mr->pd = new_pd;
+ atomic_inc(&new_pd->usecnt);
+ }
+ if (cmd.flags & IB_MR_REREG_TRANS) {
+ mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
}
- } else {
- goto put_uobj_pd;
}
memset(&resp, 0, sizeof(resp));
resp.lkey = mr->lkey;
resp.rkey = mr->rkey;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
- ret = -EFAULT;
- else
- ret = in_len;
+ ret = uverbs_response(attrs, &resp, sizeof(resp));
+put_new_uobj:
+ if (new_uobj)
+ uobj_alloc_abort(new_uobj, attrs);
put_uobj_pd:
if (cmd.flags & IB_MR_REREG_PD)
- put_pd_read(pd);
+ uobj_put_obj_read(new_pd);
put_uobjs:
-
- put_uobj_write(mr->uobject);
+ if (uobj)
+ uobj_put_write(uobj);
return ret;
}
-ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_dereg_mr cmd;
- struct ib_mr *mr;
- struct ib_uobject *uobj;
- int ret = -EINVAL;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
- if (!uobj)
- return -EINVAL;
-
- mr = uobj->object;
-
- ret = ib_dereg_mr(mr);
- if (!ret)
- uobj->live = 0;
-
- put_uobj_write(uobj);
+ int ret;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
-
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
-
- put_uobj(uobj);
-
- return in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
}
-ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_alloc_mw cmd;
- struct ib_uverbs_alloc_mw_resp resp;
+ struct ib_uverbs_alloc_mw_resp resp = {};
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mw *mw;
int ret;
+ struct ib_device *ib_dev;
- if (out_len < sizeof(resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof(cmd)))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
- if (!uobj)
- return -ENOMEM;
+ uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
- init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
- down_write(&uobj->mutex);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto err_free;
+ }
- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
- if (!pd) {
+ if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
ret = -EINVAL;
- goto err_free;
+ goto err_put;
}
- mw = pd->device->alloc_mw(pd, cmd.mw_type);
- if (IS_ERR(mw)) {
- ret = PTR_ERR(mw);
+ mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
+ if (!mw) {
+ ret = -ENOMEM;
goto err_put;
}
- mw->device = pd->device;
- mw->pd = pd;
+ mw->device = ib_dev;
+ mw->pd = pd;
mw->uobject = uobj;
- atomic_inc(&pd->usecnt);
+ mw->type = cmd.mw_type;
- uobj->object = mw;
- ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
+ ret = pd->device->ops.alloc_mw(mw, &attrs->driver_udata);
if (ret)
- goto err_unalloc;
+ goto err_alloc;
- memset(&resp, 0, sizeof(resp));
- resp.rkey = mw->rkey;
- resp.mw_handle = uobj->id;
-
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp))) {
- ret = -EFAULT;
- goto err_copy;
- }
-
- put_pd_read(pd);
-
- mutex_lock(&file->mutex);
- list_add_tail(&uobj->list, &file->ucontext->mw_list);
- mutex_unlock(&file->mutex);
-
- uobj->live = 1;
-
- up_write(&uobj->mutex);
-
- return in_len;
+ atomic_inc(&pd->usecnt);
-err_copy:
- idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
+ uobj->object = mw;
+ uobj_put_obj_read(pd);
+ uobj_finalize_uobj_create(uobj, attrs);
-err_unalloc:
- ib_dealloc_mw(mw);
+ resp.rkey = mw->rkey;
+ resp.mw_handle = uobj->id;
+ return uverbs_response(attrs, &resp, sizeof(resp));
+err_alloc:
+ kfree(mw);
err_put:
- put_pd_read(pd);
-
+ uobj_put_obj_read(pd);
err_free:
- put_uobj_write(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
-ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_dealloc_mw cmd;
- struct ib_mw *mw;
- struct ib_uobject *uobj;
- int ret = -EINVAL;
-
- if (copy_from_user(&cmd, buf, sizeof(cmd)))
- return -EFAULT;
-
- uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
- if (!uobj)
- return -EINVAL;
-
- mw = uobj->object;
-
- ret = ib_dealloc_mw(mw);
- if (!ret)
- uobj->live = 0;
-
- put_uobj_write(uobj);
+ int ret;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
-
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
-
- put_uobj(uobj);
-
- return in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
}
-ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_create_comp_channel cmd;
struct ib_uverbs_create_comp_channel_resp resp;
- struct file *filp;
+ struct ib_uobject *uobj;
+ struct ib_uverbs_completion_event_file *ev_file;
+ struct ib_device *ib_dev;
int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- ret = get_unused_fd_flags(O_CLOEXEC);
- if (ret < 0)
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
return ret;
- resp.fd = ret;
- filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
- if (IS_ERR(filp)) {
- put_unused_fd(resp.fd);
- return PTR_ERR(filp);
- }
+ uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- put_unused_fd(resp.fd);
- fput(filp);
- return -EFAULT;
- }
+ ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
+ ib_uverbs_init_event_queue(&ev_file->ev_queue);
+ uobj_finalize_uobj_create(uobj, attrs);
- fd_install(resp.fd, filp);
- return in_len;
+ resp.fd = uobj->id;
+ return uverbs_response(attrs, &resp, sizeof(resp));
}
-static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- struct ib_udata *ucore,
- struct ib_udata *uhw,
- struct ib_uverbs_ex_create_cq *cmd,
- size_t cmd_sz,
- int (*cb)(struct ib_uverbs_file *file,
- struct ib_ucq_object *obj,
- struct ib_uverbs_ex_create_cq_resp *resp,
- struct ib_udata *udata,
- void *context),
- void *context)
+static int create_cq(struct uverbs_attr_bundle *attrs,
+ struct ib_uverbs_ex_create_cq *cmd)
{
struct ib_ucq_object *obj;
- struct ib_uverbs_event_file *ev_file = NULL;
+ struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_cq *cq;
int ret;
- struct ib_uverbs_ex_create_cq_resp resp;
+ struct ib_uverbs_ex_create_cq_resp resp = {};
struct ib_cq_init_attr attr = {};
+ struct ib_device *ib_dev;
- if (cmd->comp_vector >= file->device->num_comp_vectors)
- return ERR_PTR(-EINVAL);
-
- obj = kmalloc(sizeof *obj, GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
+ if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
+ return -EINVAL;
- init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
- down_write(&obj->uobject.mutex);
+ obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
+ &ib_dev);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
if (cmd->comp_channel >= 0) {
- ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
- if (!ev_file) {
- ret = -EINVAL;
+ ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
+ if (IS_ERR(ev_file)) {
+ ret = PTR_ERR(ev_file);
goto err;
}
}
- obj->uverbs_file = file;
- obj->comp_events_reported = 0;
- obj->async_events_reported = 0;
+ obj->uevent.uobject.user_handle = cmd->user_handle;
INIT_LIST_HEAD(&obj->comp_list);
- INIT_LIST_HEAD(&obj->async_list);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
attr.cqe = cmd->cqe;
attr.comp_vector = cmd->comp_vector;
+ attr.flags = cmd->flags;
- if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
- attr.flags = cmd->flags;
-
- cq = ib_dev->create_cq(ib_dev, &attr,
- file->ucontext, uhw);
- if (IS_ERR(cq)) {
- ret = PTR_ERR(cq);
+ cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
+ if (!cq) {
+ ret = -ENOMEM;
goto err_file;
}
-
cq->device = ib_dev;
- cq->uobject = &obj->uobject;
+ cq->uobject = obj;
cq->comp_handler = ib_uverbs_comp_handler;
cq->event_handler = ib_uverbs_cq_event_handler;
- cq->cq_context = ev_file;
+ cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
atomic_set(&cq->usecnt, 0);
- obj->uobject.object = cq;
- ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
- if (ret)
- goto err_free;
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, NULL);
- memset(&resp, 0, sizeof resp);
- resp.base.cq_handle = obj->uobject.id;
- resp.base.cqe = cq->cqe;
-
- resp.response_length = offsetof(typeof(resp), response_length) +
- sizeof(resp.response_length);
-
- ret = cb(file, obj, &resp, ucore, context);
+ ret = ib_dev->ops.create_cq(cq, &attr, attrs);
if (ret)
- goto err_cb;
-
- mutex_lock(&file->mutex);
- list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
- mutex_unlock(&file->mutex);
-
- obj->uobject.live = 1;
-
- up_write(&obj->uobject.mutex);
+ goto err_free;
+ rdma_restrack_add(&cq->res);
- return obj;
+ obj->uevent.uobject.object = cq;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
+ uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
-err_cb:
- idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
+ resp.base.cq_handle = obj->uevent.uobject.id;
+ resp.base.cqe = cq->cqe;
+ resp.response_length = uverbs_response_length(attrs, sizeof(resp));
+ return uverbs_response(attrs, &resp, sizeof(resp));
err_free:
- ib_destroy_cq(cq);
-
+ rdma_restrack_put(&cq->res);
+ kfree(cq);
err_file:
if (ev_file)
- ib_uverbs_release_ucq(file, ev_file, obj);
-
+ ib_uverbs_release_ucq(ev_file, obj);
err:
- put_uobj_write(&obj->uobject);
-
- return ERR_PTR(ret);
-}
-
-static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
- struct ib_ucq_object *obj,
- struct ib_uverbs_ex_create_cq_resp *resp,
- struct ib_udata *ucore, void *context)
-{
- if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
- return -EFAULT;
-
- return 0;
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
+ return ret;
}
-ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_create_cq cmd;
struct ib_uverbs_ex_create_cq cmd_ex;
- struct ib_uverbs_create_cq_resp resp;
- struct ib_udata ucore;
- struct ib_udata uhw;
- struct ib_ucq_object *obj;
-
- if (out_len < sizeof(resp))
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof(cmd)))
- return -EFAULT;
-
- INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
+ int ret;
- INIT_UDATA(&uhw, buf + sizeof(cmd),
- (unsigned long)cmd.response + sizeof(resp),
- in_len - sizeof(cmd), out_len - sizeof(resp));
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle;
@@ -1490,44 +1111,17 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
cmd_ex.comp_vector = cmd.comp_vector;
cmd_ex.comp_channel = cmd.comp_channel;
- obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
- offsetof(typeof(cmd_ex), comp_channel) +
- sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
- NULL);
-
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- return in_len;
-}
-
-static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
- struct ib_ucq_object *obj,
- struct ib_uverbs_ex_create_cq_resp *resp,
- struct ib_udata *ucore, void *context)
-{
- if (ib_copy_to_udata(ucore, resp, resp->response_length))
- return -EFAULT;
-
- return 0;
+ return create_cq(attrs, &cmd_ex);
}
-int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- struct ib_udata *ucore,
- struct ib_udata *uhw)
+static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_ex_create_cq_resp resp;
struct ib_uverbs_ex_create_cq cmd;
- struct ib_ucq_object *obj;
- int err;
-
- if (ucore->inlen < sizeof(cmd))
- return -EINVAL;
+ int ret;
- err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
- if (err)
- return err;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
if (cmd.comp_mask)
return -EINVAL;
@@ -1535,59 +1129,40 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
if (cmd.reserved)
return -EINVAL;
- if (ucore->outlen < (offsetof(typeof(resp), response_length) +
- sizeof(resp.response_length)))
- return -ENOSPC;
-
- obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
- min(ucore->inlen, sizeof(cmd)),
- ib_uverbs_ex_create_cq_cb, NULL);
-
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- return 0;
+ return create_cq(attrs, &cmd);
}
-ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_resize_cq cmd;
- struct ib_uverbs_resize_cq_resp resp;
- struct ib_udata udata;
+ struct ib_uverbs_resize_cq_resp resp = {};
struct ib_cq *cq;
- int ret = -EINVAL;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ int ret;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
- if (!cq)
- return -EINVAL;
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
- ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
+ ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
if (ret)
goto out;
resp.cqe = cq->cqe;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp.cqe))
- ret = -EFAULT;
-
+ ret = uverbs_response(attrs, &resp, sizeof(resp));
out:
- put_cq_read(cq);
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
- return ret ? ret : in_len;
+ return ret;
}
-static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
+static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
+ struct ib_wc *wc)
{
struct ib_uverbs_wc tmp;
@@ -1596,12 +1171,15 @@ static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
tmp.opcode = wc->opcode;
tmp.vendor_err = wc->vendor_err;
tmp.byte_len = wc->byte_len;
- tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
+ tmp.ex.imm_data = wc->ex.imm_data;
tmp.qp_num = wc->qp->qp_num;
tmp.src_qp = wc->src_qp;
tmp.wc_flags = wc->wc_flags;
tmp.pkey_index = wc->pkey_index;
- tmp.slid = wc->slid;
+ if (rdma_cap_opa_ah(ib_dev, wc->port_num))
+ tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
+ else
+ tmp.slid = ib_lid_cpu16(wc->slid);
tmp.sl = wc->sl;
tmp.dlid_path_bits = wc->dlid_path_bits;
tmp.port_num = wc->port_num;
@@ -1613,10 +1191,7 @@ static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
return 0;
}
-ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_poll_cq cmd;
struct ib_uverbs_poll_cq_resp resp;
@@ -1626,15 +1201,16 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
struct ib_wc wc;
int ret;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
- if (!cq)
- return -EINVAL;
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
- header_ptr = (void __user *)(unsigned long) cmd.response;
+ header_ptr = attrs->ucore.outbuf;
data_ptr = header_ptr + sizeof resp;
memset(&resp, 0, sizeof resp);
@@ -1645,7 +1221,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
if (!ret)
break;
- ret = copy_wc_to_user(data_ptr, &wc);
+ ret = copy_wc_to_user(cq->device, data_ptr, &wc);
if (ret)
goto out_put;
@@ -1657,162 +1233,186 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
ret = -EFAULT;
goto out_put;
}
+ ret = 0;
- ret = in_len;
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
out_put:
- put_cq_read(cq);
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
-ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_req_notify_cq cmd;
struct ib_cq *cq;
+ int ret;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
- if (!cq)
- return -EINVAL;
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
- put_cq_read(cq);
-
- return in_len;
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ return 0;
}
-ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_destroy_cq cmd;
struct ib_uverbs_destroy_cq_resp resp;
struct ib_uobject *uobj;
- struct ib_cq *cq;
struct ib_ucq_object *obj;
- struct ib_uverbs_event_file *ev_file;
- int ret = -EINVAL;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
- if (!uobj)
- return -EINVAL;
- cq = uobj->object;
- ev_file = cq->cq_context;
- obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
-
- ret = ib_destroy_cq(cq);
- if (!ret)
- uobj->live = 0;
-
- put_uobj_write(uobj);
+ int ret;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
-
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
- ib_uverbs_release_ucq(file, ev_file, obj);
-
- memset(&resp, 0, sizeof resp);
+ obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
+ memset(&resp, 0, sizeof(resp));
resp.comp_events_reported = obj->comp_events_reported;
- resp.async_events_reported = obj->async_events_reported;
+ resp.async_events_reported = obj->uevent.events_reported;
- put_uobj(uobj);
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- return -EFAULT;
+ uobj_put_destroy(uobj);
- return in_len;
+ return uverbs_response(attrs, &resp, sizeof(resp));
}
-ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int create_qp(struct uverbs_attr_bundle *attrs,
+ struct ib_uverbs_ex_create_qp *cmd)
{
- struct ib_uverbs_create_qp cmd;
- struct ib_uverbs_create_qp_resp resp;
- struct ib_udata udata;
- struct ib_uqp_object *obj;
- struct ib_device *device;
- struct ib_pd *pd = NULL;
- struct ib_xrcd *xrcd = NULL;
- struct ib_uobject *uninitialized_var(xrcd_uobj);
- struct ib_cq *scq = NULL, *rcq = NULL;
- struct ib_srq *srq = NULL;
- struct ib_qp *qp;
- struct ib_qp_init_attr attr;
- int ret;
+ struct ib_uqp_object *obj;
+ struct ib_device *device;
+ struct ib_pd *pd = NULL;
+ struct ib_xrcd *xrcd = NULL;
+ struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
+ struct ib_cq *scq = NULL, *rcq = NULL;
+ struct ib_srq *srq = NULL;
+ struct ib_qp *qp;
+ struct ib_qp_init_attr attr = {};
+ struct ib_uverbs_ex_create_qp_resp resp = {};
+ int ret;
+ struct ib_rwq_ind_table *ind_tbl = NULL;
+ bool has_sq = true;
+ struct ib_device *ib_dev;
+
+ switch (cmd->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ if (!rdma_uattrs_has_raw_cap(attrs))
+ return -EPERM;
+ fallthrough;
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_DRIVER:
+ break;
+ default:
+ return -EINVAL;
+ }
- if (out_len < sizeof resp)
- return -ENOSPC;
+ obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
+ &ib_dev);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+ obj->uxrcd = NULL;
+ obj->uevent.uobject.user_handle = cmd->user_handle;
+ mutex_init(&obj->mcast_lock);
+
+ if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
+ ind_tbl = uobj_get_obj_read(rwq_ind_table,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ cmd->rwq_ind_tbl_handle, attrs);
+ if (IS_ERR(ind_tbl)) {
+ ret = PTR_ERR(ind_tbl);
+ goto err_put;
+ }
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ attr.rwq_ind_tbl = ind_tbl;
+ }
- if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
- return -EPERM;
+ if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
+ ret = -EINVAL;
+ goto err_put;
+ }
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ if (ind_tbl && !cmd->max_send_wr)
+ has_sq = false;
- obj = kzalloc(sizeof *obj, GFP_KERNEL);
- if (!obj)
- return -ENOMEM;
+ if (cmd->qp_type == IB_QPT_XRC_TGT) {
+ xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
+ attrs);
- init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
- down_write(&obj->uevent.uobject.mutex);
+ if (IS_ERR(xrcd_uobj)) {
+ ret = -EINVAL;
+ goto err_put;
+ }
- if (cmd.qp_type == IB_QPT_XRC_TGT) {
- xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
+ xrcd = (struct ib_xrcd *)xrcd_uobj->object;
if (!xrcd) {
ret = -EINVAL;
goto err_put;
}
device = xrcd->device;
} else {
- if (cmd.qp_type == IB_QPT_XRC_INI) {
- cmd.max_recv_wr = cmd.max_recv_sge = 0;
+ if (cmd->qp_type == IB_QPT_XRC_INI) {
+ cmd->max_recv_wr = 0;
+ cmd->max_recv_sge = 0;
} else {
- if (cmd.is_srq) {
- srq = idr_read_srq(cmd.srq_handle, file->ucontext);
- if (!srq || srq->srq_type != IB_SRQT_BASIC) {
- ret = -EINVAL;
+ if (cmd->is_srq) {
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
+ cmd->srq_handle, attrs);
+ if (IS_ERR(srq) ||
+ srq->srq_type == IB_SRQT_XRC) {
+ ret = IS_ERR(srq) ? PTR_ERR(srq) :
+ -EINVAL;
goto err_put;
}
}
- if (cmd.recv_cq_handle != cmd.send_cq_handle) {
- rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
- if (!rcq) {
- ret = -EINVAL;
- goto err_put;
+ if (!ind_tbl) {
+ if (cmd->recv_cq_handle != cmd->send_cq_handle) {
+ rcq = uobj_get_obj_read(
+ cq, UVERBS_OBJECT_CQ,
+ cmd->recv_cq_handle, attrs);
+ if (IS_ERR(rcq)) {
+ ret = PTR_ERR(rcq);
+ goto err_put;
+ }
}
}
}
- scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
- rcq = rcq ?: scq;
- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
- if (!pd || !scq) {
- ret = -EINVAL;
+ if (has_sq) {
+ scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ cmd->send_cq_handle, attrs);
+ if (IS_ERR(scq)) {
+ ret = PTR_ERR(scq);
+ goto err_put;
+ }
+ }
+
+ if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
+ rcq = rcq ?: scq;
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
+ attrs);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_put;
}
@@ -1820,222 +1420,246 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
}
attr.event_handler = ib_uverbs_qp_event_handler;
- attr.qp_context = file;
attr.send_cq = scq;
attr.recv_cq = rcq;
attr.srq = srq;
attr.xrcd = xrcd;
- attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
- attr.qp_type = cmd.qp_type;
- attr.create_flags = 0;
+ attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
+ IB_SIGNAL_REQ_WR;
+ attr.qp_type = cmd->qp_type;
- attr.cap.max_send_wr = cmd.max_send_wr;
- attr.cap.max_recv_wr = cmd.max_recv_wr;
- attr.cap.max_send_sge = cmd.max_send_sge;
- attr.cap.max_recv_sge = cmd.max_recv_sge;
- attr.cap.max_inline_data = cmd.max_inline_data;
+ attr.cap.max_send_wr = cmd->max_send_wr;
+ attr.cap.max_recv_wr = cmd->max_recv_wr;
+ attr.cap.max_send_sge = cmd->max_send_sge;
+ attr.cap.max_recv_sge = cmd->max_recv_sge;
+ attr.cap.max_inline_data = cmd->max_inline_data;
- obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
INIT_LIST_HEAD(&obj->mcast_list);
- if (cmd.qp_type == IB_QPT_XRC_TGT)
- qp = ib_create_qp(pd, &attr);
- else
- qp = device->create_qp(pd, &attr, &udata);
+ attr.create_flags = cmd->create_flags;
+ if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+ IB_QP_CREATE_CROSS_CHANNEL |
+ IB_QP_CREATE_MANAGED_SEND |
+ IB_QP_CREATE_MANAGED_RECV |
+ IB_QP_CREATE_SCATTER_FCS |
+ IB_QP_CREATE_CVLAN_STRIPPING |
+ IB_QP_CREATE_SOURCE_QPN |
+ IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
+ if (!rdma_uattrs_has_raw_cap(attrs)) {
+ ret = -EPERM;
+ goto err_put;
+ }
+
+ attr.source_qpn = cmd->source_qpn;
+ }
+ qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj,
+ KBUILD_MODNAME);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto err_put;
}
-
- if (cmd.qp_type != IB_QPT_XRC_TGT) {
- qp->real_qp = qp;
- qp->device = device;
- qp->pd = pd;
- qp->send_cq = attr.send_cq;
- qp->recv_cq = attr.recv_cq;
- qp->srq = attr.srq;
- qp->event_handler = attr.event_handler;
- qp->qp_context = attr.qp_context;
- qp->qp_type = attr.qp_type;
- atomic_set(&qp->usecnt, 0);
- atomic_inc(&pd->usecnt);
- atomic_inc(&attr.send_cq->usecnt);
- if (attr.recv_cq)
- atomic_inc(&attr.recv_cq->usecnt);
- if (attr.srq)
- atomic_inc(&attr.srq->usecnt);
- }
- qp->uobject = &obj->uevent.uobject;
+ ib_qp_usecnt_inc(qp);
obj->uevent.uobject.object = qp;
- ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
- if (ret)
- goto err_destroy;
-
- memset(&resp, 0, sizeof resp);
- resp.qpn = qp->qp_num;
- resp.qp_handle = obj->uevent.uobject.id;
- resp.max_recv_sge = attr.cap.max_recv_sge;
- resp.max_send_sge = attr.cap.max_send_sge;
- resp.max_recv_wr = attr.cap.max_recv_wr;
- resp.max_send_wr = attr.cap.max_send_wr;
- resp.max_inline_data = attr.cap.max_inline_data;
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_copy;
- }
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
if (xrcd) {
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
uobject);
atomic_inc(&obj->uxrcd->refcnt);
- put_xrcd_read(xrcd_uobj);
+ uobj_put_read(xrcd_uobj);
}
if (pd)
- put_pd_read(pd);
+ uobj_put_obj_read(pd);
if (scq)
- put_cq_read(scq);
+ rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (rcq && rcq != scq)
- put_cq_read(rcq);
+ rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (srq)
- put_srq_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ if (ind_tbl)
+ uobj_put_obj_read(ind_tbl);
+ uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
+
+ resp.base.qpn = qp->qp_num;
+ resp.base.qp_handle = obj->uevent.uobject.id;
+ resp.base.max_recv_sge = attr.cap.max_recv_sge;
+ resp.base.max_send_sge = attr.cap.max_send_sge;
+ resp.base.max_recv_wr = attr.cap.max_recv_wr;
+ resp.base.max_send_wr = attr.cap.max_send_wr;
+ resp.base.max_inline_data = attr.cap.max_inline_data;
+ resp.response_length = uverbs_response_length(attrs, sizeof(resp));
+ return uverbs_response(attrs, &resp, sizeof(resp));
+
+err_put:
+ if (!IS_ERR(xrcd_uobj))
+ uobj_put_read(xrcd_uobj);
+ if (!IS_ERR_OR_NULL(pd))
+ uobj_put_obj_read(pd);
+ if (!IS_ERR_OR_NULL(scq))
+ rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ if (!IS_ERR_OR_NULL(rcq) && rcq != scq)
+ rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ if (!IS_ERR_OR_NULL(srq))
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ if (!IS_ERR_OR_NULL(ind_tbl))
+ uobj_put_obj_read(ind_tbl);
+
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
+ return ret;
+}
- mutex_lock(&file->mutex);
- list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
- mutex_unlock(&file->mutex);
+static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_create_qp cmd;
+ struct ib_uverbs_ex_create_qp cmd_ex;
+ int ret;
- obj->uevent.uobject.live = 1;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- up_write(&obj->uevent.uobject.mutex);
+ memset(&cmd_ex, 0, sizeof(cmd_ex));
+ cmd_ex.user_handle = cmd.user_handle;
+ cmd_ex.pd_handle = cmd.pd_handle;
+ cmd_ex.send_cq_handle = cmd.send_cq_handle;
+ cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
+ cmd_ex.srq_handle = cmd.srq_handle;
+ cmd_ex.max_send_wr = cmd.max_send_wr;
+ cmd_ex.max_recv_wr = cmd.max_recv_wr;
+ cmd_ex.max_send_sge = cmd.max_send_sge;
+ cmd_ex.max_recv_sge = cmd.max_recv_sge;
+ cmd_ex.max_inline_data = cmd.max_inline_data;
+ cmd_ex.sq_sig_all = cmd.sq_sig_all;
+ cmd_ex.qp_type = cmd.qp_type;
+ cmd_ex.is_srq = cmd.is_srq;
- return in_len;
+ return create_qp(attrs, &cmd_ex);
+}
-err_copy:
- idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
+static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_create_qp cmd;
+ int ret;
-err_destroy:
- ib_destroy_qp(qp);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
-err_put:
- if (xrcd)
- put_xrcd_read(xrcd_uobj);
- if (pd)
- put_pd_read(pd);
- if (scq)
- put_cq_read(scq);
- if (rcq && rcq != scq)
- put_cq_read(rcq);
- if (srq)
- put_srq_read(srq);
+ if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
+ return -EINVAL;
- put_uobj_write(&obj->uevent.uobject);
- return ret;
+ if (cmd.reserved)
+ return -EINVAL;
+
+ return create_qp(attrs, &cmd);
}
-ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len, int out_len)
+static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
{
+ struct ib_uverbs_create_qp_resp resp = {};
struct ib_uverbs_open_qp cmd;
- struct ib_uverbs_create_qp_resp resp;
- struct ib_udata udata;
struct ib_uqp_object *obj;
struct ib_xrcd *xrcd;
- struct ib_uobject *uninitialized_var(xrcd_uobj);
struct ib_qp *qp;
- struct ib_qp_open_attr attr;
+ struct ib_qp_open_attr attr = {};
int ret;
+ struct ib_uobject *xrcd_uobj;
+ struct ib_device *ib_dev;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- obj = kmalloc(sizeof *obj, GFP_KERNEL);
- if (!obj)
- return -ENOMEM;
+ obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
+ &ib_dev);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
- down_write(&obj->uevent.uobject.mutex);
+ xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
+ if (IS_ERR(xrcd_uobj)) {
+ ret = -EINVAL;
+ goto err_put;
+ }
- xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
+ xrcd = (struct ib_xrcd *)xrcd_uobj->object;
if (!xrcd) {
ret = -EINVAL;
- goto err_put;
+ goto err_xrcd;
}
attr.event_handler = ib_uverbs_qp_event_handler;
- attr.qp_context = file;
attr.qp_num = cmd.qpn;
attr.qp_type = cmd.qp_type;
- obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
INIT_LIST_HEAD(&obj->mcast_list);
qp = ib_open_qp(xrcd, &attr);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
- goto err_put;
+ goto err_xrcd;
}
- qp->uobject = &obj->uevent.uobject;
-
obj->uevent.uobject.object = qp;
- ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
- if (ret)
- goto err_destroy;
-
- memset(&resp, 0, sizeof resp);
- resp.qpn = qp->qp_num;
- resp.qp_handle = obj->uevent.uobject.id;
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_remove;
- }
+ obj->uevent.uobject.user_handle = cmd.user_handle;
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
atomic_inc(&obj->uxrcd->refcnt);
- put_xrcd_read(xrcd_uobj);
-
- mutex_lock(&file->mutex);
- list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
- mutex_unlock(&file->mutex);
-
- obj->uevent.uobject.live = 1;
+ qp->uobject = obj;
+ uobj_put_read(xrcd_uobj);
+ uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
- up_write(&obj->uevent.uobject.mutex);
-
- return in_len;
-
-err_remove:
- idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
-
-err_destroy:
- ib_destroy_qp(qp);
+ resp.qpn = qp->qp_num;
+ resp.qp_handle = obj->uevent.uobject.id;
+ return uverbs_response(attrs, &resp, sizeof(resp));
+err_xrcd:
+ uobj_put_read(xrcd_uobj);
err_put:
- put_xrcd_read(xrcd_uobj);
- put_uobj_write(&obj->uevent.uobject);
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
return ret;
}
-ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
+ struct rdma_ah_attr *rdma_attr)
+{
+ const struct ib_global_route *grh;
+
+ uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
+ uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
+ uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
+ uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
+ uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
+ IB_AH_GRH);
+ if (uverb_attr->is_global) {
+ grh = rdma_ah_read_grh(rdma_attr);
+ memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
+ uverb_attr->flow_label = grh->flow_label;
+ uverb_attr->sgid_index = grh->sgid_index;
+ uverb_attr->hop_limit = grh->hop_limit;
+ uverb_attr->traffic_class = grh->traffic_class;
+ }
+ uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
+}
+
+static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_query_qp cmd;
struct ib_uverbs_query_qp_resp resp;
@@ -2044,8 +1668,9 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
struct ib_qp_init_attr *init_attr;
int ret;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
@@ -2054,15 +1679,16 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
goto out;
}
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
- if (!qp) {
- ret = -EINVAL;
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
- put_qp_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ret)
goto out;
@@ -2091,29 +1717,8 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp.alt_port_num = attr->alt_port_num;
resp.alt_timeout = attr->alt_timeout;
- memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
- resp.dest.flow_label = attr->ah_attr.grh.flow_label;
- resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
- resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
- resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
- resp.dest.dlid = attr->ah_attr.dlid;
- resp.dest.sl = attr->ah_attr.sl;
- resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
- resp.dest.static_rate = attr->ah_attr.static_rate;
- resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
- resp.dest.port_num = attr->ah_attr.port_num;
-
- memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
- resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
- resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
- resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
- resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
- resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
- resp.alt_dest.sl = attr->alt_ah_attr.sl;
- resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
- resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
- resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
- resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
+ copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
+ copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
resp.max_send_wr = init_attr->cap.max_send_wr;
resp.max_recv_wr = init_attr->cap.max_recv_wr;
@@ -2122,15 +1727,13 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp.max_inline_data = init_attr->cap.max_inline_data;
resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- ret = -EFAULT;
+ ret = uverbs_response(attrs, &resp, sizeof(resp));
out:
kfree(attr);
kfree(init_attr);
- return ret ? ret : in_len;
+ return ret;
}
/* Remove ignored fields set in the attribute mask */
@@ -2147,200 +1750,320 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
}
}
-ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static void copy_ah_attr_from_uverbs(struct ib_device *dev,
+ struct rdma_ah_attr *rdma_attr,
+ struct ib_uverbs_qp_dest *uverb_attr)
{
- struct ib_uverbs_modify_qp cmd;
- struct ib_udata udata;
- struct ib_qp *qp;
- struct ib_qp_attr *attr;
- int ret;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
+ if (uverb_attr->is_global) {
+ rdma_ah_set_grh(rdma_attr, NULL,
+ uverb_attr->flow_label,
+ uverb_attr->sgid_index,
+ uverb_attr->hop_limit,
+ uverb_attr->traffic_class);
+ rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
+ } else {
+ rdma_ah_set_ah_flags(rdma_attr, 0);
+ }
+ rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
+ rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
+ rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
+ rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
+ rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
+ rdma_ah_set_make_grd(rdma_attr, false);
+}
- INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
- out_len);
+static int modify_qp(struct uverbs_attr_bundle *attrs,
+ struct ib_uverbs_ex_modify_qp *cmd)
+{
+ struct ib_qp_attr *attr;
+ struct ib_qp *qp;
+ int ret;
- attr = kmalloc(sizeof *attr, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
- if (!qp) {
- ret = -EINVAL;
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
+ attrs);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
- attr->qp_state = cmd.qp_state;
- attr->cur_qp_state = cmd.cur_qp_state;
- attr->path_mtu = cmd.path_mtu;
- attr->path_mig_state = cmd.path_mig_state;
- attr->qkey = cmd.qkey;
- attr->rq_psn = cmd.rq_psn;
- attr->sq_psn = cmd.sq_psn;
- attr->dest_qp_num = cmd.dest_qp_num;
- attr->qp_access_flags = cmd.qp_access_flags;
- attr->pkey_index = cmd.pkey_index;
- attr->alt_pkey_index = cmd.alt_pkey_index;
- attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
- attr->max_rd_atomic = cmd.max_rd_atomic;
- attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
- attr->min_rnr_timer = cmd.min_rnr_timer;
- attr->port_num = cmd.port_num;
- attr->timeout = cmd.timeout;
- attr->retry_cnt = cmd.retry_cnt;
- attr->rnr_retry = cmd.rnr_retry;
- attr->alt_port_num = cmd.alt_port_num;
- attr->alt_timeout = cmd.alt_timeout;
-
- memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
- attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
- attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
- attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
- attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
- attr->ah_attr.dlid = cmd.dest.dlid;
- attr->ah_attr.sl = cmd.dest.sl;
- attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
- attr->ah_attr.static_rate = cmd.dest.static_rate;
- attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
- attr->ah_attr.port_num = cmd.dest.port_num;
-
- memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
- attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
- attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
- attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
- attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
- attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
- attr->alt_ah_attr.sl = cmd.alt_dest.sl;
- attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
- attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
- attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
- attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
-
- if (qp->real_qp == qp) {
- ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
- if (ret)
+ if ((cmd->base.attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
+ if ((cmd->base.attr_mask & IB_QP_AV)) {
+ if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
+ ret = -EINVAL;
goto release_qp;
- ret = qp->device->modify_qp(qp, attr,
- modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
- } else {
- ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+ }
+
+ if (cmd->base.attr_mask & IB_QP_STATE &&
+ cmd->base.qp_state == IB_QPS_RTR) {
+ /* We are in INIT->RTR TRANSITION (if we are not,
+ * this transition will be rejected in subsequent checks).
+ * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
+ * but the IB_QP_STATE flag is required.
+ *
+ * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
+ * when IB_QP_AV is set, has required inclusion of a valid
+ * port number in the primary AV. (AVs are created and handled
+ * differently for infiniband and ethernet (RoCE) ports).
+ *
+ * Check the port number included in the primary AV against
+ * the port number in the qp struct, which was set (and saved)
+ * in the RST->INIT transition.
+ */
+ if (cmd->base.dest.port_num != qp->real_qp->port) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+ } else {
+ /* We are in SQD->SQD. (If we are not, this transition will
+ * be rejected later in the verbs layer checks).
+ * Check for both IB_QP_PORT and IB_QP_AV, these can be set
+ * together in the SQD->SQD transition.
+ *
+ * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
+ * verbs layer driver does not track primary port changes
+ * resulting from path migration. Thus, in SQD, if the primary
+ * AV is modified, the primary port should also be modified).
+ *
+ * Note that in this transition, the IB_QP_STATE flag
+ * is not allowed.
+ */
+ if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+ == (IB_QP_AV | IB_QP_PORT)) &&
+ cmd->base.port_num != cmd->base.dest.port_num) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+ if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+ == IB_QP_AV) {
+ cmd->base.attr_mask |= IB_QP_PORT;
+ cmd->base.port_num = cmd->base.dest.port_num;
+ }
+ }
}
- if (ret)
+ if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
+ (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
+ !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
+ cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
+ if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
+ cmd->base.cur_qp_state > IB_QPS_ERR) ||
+ (cmd->base.attr_mask & IB_QP_STATE &&
+ cmd->base.qp_state > IB_QPS_ERR)) {
+ ret = -EINVAL;
goto release_qp;
+ }
- ret = in_len;
+ if (cmd->base.attr_mask & IB_QP_STATE)
+ attr->qp_state = cmd->base.qp_state;
+ if (cmd->base.attr_mask & IB_QP_CUR_STATE)
+ attr->cur_qp_state = cmd->base.cur_qp_state;
+ if (cmd->base.attr_mask & IB_QP_PATH_MTU)
+ attr->path_mtu = cmd->base.path_mtu;
+ if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+ attr->path_mig_state = cmd->base.path_mig_state;
+ if (cmd->base.attr_mask & IB_QP_QKEY) {
+ if (cmd->base.qkey & IB_QP_SET_QKEY &&
+ !(rdma_nl_get_privileged_qkey() ||
+ rdma_uattrs_has_raw_cap(attrs))) {
+ ret = -EPERM;
+ goto release_qp;
+ }
+ attr->qkey = cmd->base.qkey;
+ }
+ if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+ attr->rq_psn = cmd->base.rq_psn;
+ if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+ attr->sq_psn = cmd->base.sq_psn;
+ if (cmd->base.attr_mask & IB_QP_DEST_QPN)
+ attr->dest_qp_num = cmd->base.dest_qp_num;
+ if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
+ attr->qp_access_flags = cmd->base.qp_access_flags;
+ if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
+ attr->pkey_index = cmd->base.pkey_index;
+ if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+ if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ attr->max_rd_atomic = cmd->base.max_rd_atomic;
+ if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+ if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
+ attr->min_rnr_timer = cmd->base.min_rnr_timer;
+ if (cmd->base.attr_mask & IB_QP_PORT)
+ attr->port_num = cmd->base.port_num;
+ if (cmd->base.attr_mask & IB_QP_TIMEOUT)
+ attr->timeout = cmd->base.timeout;
+ if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
+ attr->retry_cnt = cmd->base.retry_cnt;
+ if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
+ attr->rnr_retry = cmd->base.rnr_retry;
+ if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
+ attr->alt_port_num = cmd->base.alt_port_num;
+ attr->alt_timeout = cmd->base.alt_timeout;
+ attr->alt_pkey_index = cmd->base.alt_pkey_index;
+ }
+ if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
+ attr->rate_limit = cmd->rate_limit;
+
+ if (cmd->base.attr_mask & IB_QP_AV)
+ copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
+ &cmd->base.dest);
+
+ if (cmd->base.attr_mask & IB_QP_ALT_PATH)
+ copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
+ &cmd->base.alt_dest);
+
+ ret = ib_modify_qp_with_udata(qp, attr,
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask),
+ &attrs->driver_udata);
release_qp:
- put_qp_read(qp);
-
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
out:
kfree(attr);
return ret;
}
-ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_destroy_qp cmd;
- struct ib_uverbs_destroy_qp_resp resp;
- struct ib_uobject *uobj;
- struct ib_qp *qp;
- struct ib_uqp_object *obj;
- int ret = -EINVAL;
+ struct ib_uverbs_ex_modify_qp cmd;
+ int ret;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
+ if (ret)
+ return ret;
- memset(&resp, 0, sizeof resp);
+ if (cmd.base.attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
- uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
- if (!uobj)
- return -EINVAL;
- qp = uobj->object;
- obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
+ return modify_qp(attrs, &cmd);
+}
- if (!list_empty(&obj->mcast_list)) {
- put_uobj_write(uobj);
- return -EBUSY;
- }
+static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_modify_qp cmd;
+ struct ib_uverbs_ex_modify_qp_resp resp = {
+ .response_length = uverbs_response_length(attrs, sizeof(resp))
+ };
+ int ret;
- ret = ib_destroy_qp(qp);
- if (!ret)
- uobj->live = 0;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- put_uobj_write(uobj);
+ /*
+ * Last bit is reserved for extending the attr_mask by
+ * using another field.
+ */
+ if (cmd.base.attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
+ return -EOPNOTSUPP;
+ ret = modify_qp(attrs, &cmd);
if (ret)
return ret;
- if (obj->uxrcd)
- atomic_dec(&obj->uxrcd->refcnt);
+ return uverbs_response(attrs, &resp, sizeof(resp));
+}
- idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
+static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_destroy_qp cmd;
+ struct ib_uverbs_destroy_qp_resp resp;
+ struct ib_uobject *uobj;
+ struct ib_uqp_object *obj;
+ int ret;
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- ib_uverbs_release_uevent(file, &obj->uevent);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+ obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
+ memset(&resp, 0, sizeof(resp));
resp.events_reported = obj->uevent.events_reported;
- put_uobj(uobj);
+ uobj_put_destroy(uobj);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- return -EFAULT;
+ return uverbs_response(attrs, &resp, sizeof(resp));
+}
- return in_len;
+static void *alloc_wr(size_t wr_size, __u32 num_sge)
+{
+ if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) /
+ sizeof(struct ib_sge))
+ return NULL;
+
+ return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) +
+ num_sge * sizeof(struct ib_sge),
+ GFP_KERNEL);
}
-ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_post_send cmd;
struct ib_uverbs_post_send_resp resp;
struct ib_uverbs_send_wr *user_wr;
- struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
+ struct ib_send_wr *wr = NULL, *last, *next;
+ const struct ib_send_wr *bad_wr;
struct ib_qp *qp;
int i, sg_ind;
int is_ud;
- ssize_t ret = -EINVAL;
+ int ret, ret2;
+ size_t next_size;
+ const struct ib_sge __user *sgls;
+ const void __user *wqes;
+ struct uverbs_req_iter iter;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
- cmd.sge_count * sizeof (struct ib_uverbs_sge))
- return -EINVAL;
-
- if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
- return -EINVAL;
+ ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+ wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
+ cmd.wr_count));
+ if (IS_ERR(wqes))
+ return PTR_ERR(wqes);
+ sgls = uverbs_request_next_ptr(&iter,
+ size_mul(cmd.sge_count,
+ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return PTR_ERR(sgls);
+ ret = uverbs_request_finish(&iter);
+ if (ret)
+ return ret;
user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
if (!user_wr)
return -ENOMEM;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
- if (!qp)
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
+ }
is_ud = qp->qp_type == IB_QPT_UD;
sg_ind = 0;
last = NULL;
for (i = 0; i < cmd.wr_count; ++i) {
- if (copy_from_user(user_wr,
- buf + sizeof cmd + i * cmd.wqe_size,
+ if (copy_from_user(user_wr, wqes + i * cmd.wqe_size,
cmd.wqe_size)) {
ret = -EFAULT;
goto out_put;
@@ -2351,14 +2074,88 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
goto out_put;
}
- next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
- user_wr->num_sge * sizeof (struct ib_sge),
- GFP_KERNEL);
- if (!next) {
- ret = -ENOMEM;
+ if (is_ud) {
+ struct ib_ud_wr *ud;
+
+ if (user_wr->opcode != IB_WR_SEND &&
+ user_wr->opcode != IB_WR_SEND_WITH_IMM) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ next_size = sizeof(*ud);
+ ud = alloc_wr(next_size, user_wr->num_sge);
+ if (!ud) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
+ user_wr->wr.ud.ah, attrs);
+ if (IS_ERR(ud->ah)) {
+ ret = PTR_ERR(ud->ah);
+ kfree(ud);
+ goto out_put;
+ }
+ ud->remote_qpn = user_wr->wr.ud.remote_qpn;
+ ud->remote_qkey = user_wr->wr.ud.remote_qkey;
+
+ next = &ud->wr;
+ } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ user_wr->opcode == IB_WR_RDMA_WRITE ||
+ user_wr->opcode == IB_WR_RDMA_READ) {
+ struct ib_rdma_wr *rdma;
+
+ next_size = sizeof(*rdma);
+ rdma = alloc_wr(next_size, user_wr->num_sge);
+ if (!rdma) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ rdma->remote_addr = user_wr->wr.rdma.remote_addr;
+ rdma->rkey = user_wr->wr.rdma.rkey;
+
+ next = &rdma->wr;
+ } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ struct ib_atomic_wr *atomic;
+
+ next_size = sizeof(*atomic);
+ atomic = alloc_wr(next_size, user_wr->num_sge);
+ if (!atomic) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ atomic->remote_addr = user_wr->wr.atomic.remote_addr;
+ atomic->compare_add = user_wr->wr.atomic.compare_add;
+ atomic->swap = user_wr->wr.atomic.swap;
+ atomic->rkey = user_wr->wr.atomic.rkey;
+
+ next = &atomic->wr;
+ } else if (user_wr->opcode == IB_WR_SEND ||
+ user_wr->opcode == IB_WR_SEND_WITH_IMM ||
+ user_wr->opcode == IB_WR_SEND_WITH_INV) {
+ next_size = sizeof(*next);
+ next = alloc_wr(next_size, user_wr->num_sge);
+ if (!next) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ } else {
+ ret = -EINVAL;
goto out_put;
}
+ if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
+ user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
+ next->ex.imm_data =
+ (__be32 __force) user_wr->ex.imm_data;
+ } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
+ next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
+ }
+
if (!last)
wr = next;
else
@@ -2371,68 +2168,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
next->opcode = user_wr->opcode;
next->send_flags = user_wr->send_flags;
- if (is_ud) {
- if (next->opcode != IB_WR_SEND &&
- next->opcode != IB_WR_SEND_WITH_IMM) {
- ret = -EINVAL;
- goto out_put;
- }
-
- next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
- file->ucontext);
- if (!next->wr.ud.ah) {
- ret = -EINVAL;
- goto out_put;
- }
- next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
- next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
- if (next->opcode == IB_WR_SEND_WITH_IMM)
- next->ex.imm_data =
- (__be32 __force) user_wr->ex.imm_data;
- } else {
- switch (next->opcode) {
- case IB_WR_RDMA_WRITE_WITH_IMM:
- next->ex.imm_data =
- (__be32 __force) user_wr->ex.imm_data;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_READ:
- next->wr.rdma.remote_addr =
- user_wr->wr.rdma.remote_addr;
- next->wr.rdma.rkey =
- user_wr->wr.rdma.rkey;
- break;
- case IB_WR_SEND_WITH_IMM:
- next->ex.imm_data =
- (__be32 __force) user_wr->ex.imm_data;
- break;
- case IB_WR_SEND_WITH_INV:
- next->ex.invalidate_rkey =
- user_wr->ex.invalidate_rkey;
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- next->wr.atomic.remote_addr =
- user_wr->wr.atomic.remote_addr;
- next->wr.atomic.compare_add =
- user_wr->wr.atomic.compare_add;
- next->wr.atomic.swap = user_wr->wr.atomic.swap;
- next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
- case IB_WR_SEND:
- break;
- default:
- ret = -EINVAL;
- goto out_put;
- }
- }
-
if (next->num_sge) {
next->sg_list = (void *) next +
- ALIGN(sizeof *next, sizeof (struct ib_sge));
- if (copy_from_user(next->sg_list,
- buf + sizeof cmd +
- cmd.wr_count * cmd.wqe_size +
- sg_ind * sizeof (struct ib_sge),
- next->num_sge * sizeof (struct ib_sge))) {
+ ALIGN(next_size, sizeof(struct ib_sge));
+ if (copy_from_user(next->sg_list, sgls + sg_ind,
+ next->num_sge *
+ sizeof(struct ib_sge))) {
ret = -EFAULT;
goto out_put;
}
@@ -2442,7 +2183,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
}
resp.bad_wr = 0;
- ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
+ ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
if (ret)
for (next = wr; next; next = next->next) {
++resp.bad_wr;
@@ -2450,16 +2191,17 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
break;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- ret = -EFAULT;
+ ret2 = uverbs_response(attrs, &resp, sizeof(resp));
+ if (ret2)
+ ret = ret2;
out_put:
- put_qp_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
while (wr) {
- if (is_ud && wr->wr.ud.ah)
- put_ah_read(wr->wr.ud.ah);
+ if (is_ud && ud_wr(wr)->ah)
+ uobj_put_obj_read(ud_wr(wr)->ah);
next = wr->next;
kfree(wr);
wr = next;
@@ -2468,27 +2210,34 @@ out_put:
out:
kfree(user_wr);
- return ret ? ret : in_len;
+ return ret;
}
-static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
- int in_len,
- u32 wr_count,
- u32 sge_count,
- u32 wqe_size)
+static struct ib_recv_wr *
+ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
+ u32 wqe_size, u32 sge_count)
{
struct ib_uverbs_recv_wr *user_wr;
struct ib_recv_wr *wr = NULL, *last, *next;
int sg_ind;
int i;
int ret;
+ const struct ib_sge __user *sgls;
+ const void __user *wqes;
- if (in_len < wqe_size * wr_count +
- sge_count * sizeof (struct ib_uverbs_sge))
+ if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
return ERR_PTR(-EINVAL);
- if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
- return ERR_PTR(-EINVAL);
+ wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
+ if (IS_ERR(wqes))
+ return ERR_CAST(wqes);
+ sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
+ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return ERR_CAST(sgls);
+ ret = uverbs_request_finish(iter);
+ if (ret)
+ return ERR_PTR(ret);
user_wr = kmalloc(wqe_size, GFP_KERNEL);
if (!user_wr)
@@ -2497,7 +2246,7 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
sg_ind = 0;
last = NULL;
for (i = 0; i < wr_count; ++i) {
- if (copy_from_user(user_wr, buf + i * wqe_size,
+ if (copy_from_user(user_wr, wqes + i * wqe_size,
wqe_size)) {
ret = -EFAULT;
goto err;
@@ -2508,8 +2257,15 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
goto err;
}
- next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
- user_wr->num_sge * sizeof (struct ib_sge),
+ if (user_wr->num_sge >=
+ (U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) /
+ sizeof(struct ib_sge)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) +
+ user_wr->num_sge * sizeof(struct ib_sge),
GFP_KERNEL);
if (!next) {
ret = -ENOMEM;
@@ -2527,12 +2283,11 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
next->num_sge = user_wr->num_sge;
if (next->num_sge) {
- next->sg_list = (void *) next +
- ALIGN(sizeof *next, sizeof (struct ib_sge));
- if (copy_from_user(next->sg_list,
- buf + wr_count * wqe_size +
- sg_ind * sizeof (struct ib_sge),
- next->num_sge * sizeof (struct ib_sge))) {
+ next->sg_list = (void *)next +
+ ALIGN(sizeof(*next), sizeof(struct ib_sge));
+ if (copy_from_user(next->sg_list, sgls + sg_ind,
+ next->num_sge *
+ sizeof(struct ib_sge))) {
ret = -EFAULT;
goto err;
}
@@ -2556,46 +2311,47 @@ err:
return ERR_PTR(ret);
}
-ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_post_recv cmd;
struct ib_uverbs_post_recv_resp resp;
- struct ib_recv_wr *wr, *next, *bad_wr;
+ struct ib_recv_wr *wr, *next;
+ const struct ib_recv_wr *bad_wr;
struct ib_qp *qp;
- ssize_t ret = -EINVAL;
+ int ret, ret2;
+ struct uverbs_req_iter iter;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
- in_len - sizeof cmd, cmd.wr_count,
- cmd.sge_count, cmd.wqe_size);
+ wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
+ cmd.sge_count);
if (IS_ERR(wr))
return PTR_ERR(wr);
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
- if (!qp)
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
+ }
resp.bad_wr = 0;
- ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
+ ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
- put_qp_read(qp);
-
- if (ret)
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ if (ret) {
for (next = wr; next; next = next->next) {
++resp.bad_wr;
if (next == bad_wr)
break;
}
+ }
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- ret = -EFAULT;
-
+ ret2 = uverbs_response(attrs, &resp, sizeof(resp));
+ if (ret2)
+ ret = ret2;
out:
while (wr) {
next = wr->next;
@@ -2603,37 +2359,39 @@ out:
wr = next;
}
- return ret ? ret : in_len;
+ return ret;
}
-ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_post_srq_recv cmd;
struct ib_uverbs_post_srq_recv_resp resp;
- struct ib_recv_wr *wr, *next, *bad_wr;
+ struct ib_recv_wr *wr, *next;
+ const struct ib_recv_wr *bad_wr;
struct ib_srq *srq;
- ssize_t ret = -EINVAL;
+ int ret, ret2;
+ struct uverbs_req_iter iter;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
- in_len - sizeof cmd, cmd.wr_count,
- cmd.sge_count, cmd.wqe_size);
+ wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
+ cmd.sge_count);
if (IS_ERR(wr))
return PTR_ERR(wr);
- srq = idr_read_srq(cmd.srq_handle, file->ucontext);
- if (!srq)
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
goto out;
+ }
resp.bad_wr = 0;
- ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
+ ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
- put_srq_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ret)
for (next = wr; next; next = next->next) {
@@ -2642,9 +2400,9 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- ret = -EFAULT;
+ ret2 = uverbs_response(attrs, &resp, sizeof(resp));
+ if (ret2)
+ ret = ret2;
out:
while (wr) {
@@ -2653,143 +2411,92 @@ out:
wr = next;
}
- return ret ? ret : in_len;
+ return ret;
}
-ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_create_ah cmd;
struct ib_uverbs_create_ah_resp resp;
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_ah *ah;
- struct ib_ah_attr attr;
+ struct rdma_ah_attr attr = {};
int ret;
+ struct ib_device *ib_dev;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
- if (!uobj)
- return -ENOMEM;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
- down_write(&uobj->mutex);
+ uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
- if (!pd) {
+ if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
ret = -EINVAL;
goto err;
}
- attr.dlid = cmd.attr.dlid;
- attr.sl = cmd.attr.sl;
- attr.src_path_bits = cmd.attr.src_path_bits;
- attr.static_rate = cmd.attr.static_rate;
- attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
- attr.port_num = cmd.attr.port_num;
- attr.grh.flow_label = cmd.attr.grh.flow_label;
- attr.grh.sgid_index = cmd.attr.grh.sgid_index;
- attr.grh.hop_limit = cmd.attr.grh.hop_limit;
- attr.grh.traffic_class = cmd.attr.grh.traffic_class;
- attr.vlan_id = 0;
- memset(&attr.dmac, 0, sizeof(attr.dmac));
- memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
-
- ah = ib_create_ah(pd, &attr);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto err;
+ }
+
+ attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
+ rdma_ah_set_make_grd(&attr, false);
+ rdma_ah_set_dlid(&attr, cmd.attr.dlid);
+ rdma_ah_set_sl(&attr, cmd.attr.sl);
+ rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
+ rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
+ rdma_ah_set_port_num(&attr, cmd.attr.port_num);
+
+ if (cmd.attr.is_global) {
+ rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
+ cmd.attr.grh.sgid_index,
+ cmd.attr.grh.hop_limit,
+ cmd.attr.grh.traffic_class);
+ rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
+ } else {
+ rdma_ah_set_ah_flags(&attr, 0);
+ }
+
+ ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_put;
}
ah->uobject = uobj;
+ uobj->user_handle = cmd.user_handle;
uobj->object = ah;
-
- ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
- if (ret)
- goto err_destroy;
+ uobj_put_obj_read(pd);
+ uobj_finalize_uobj_create(uobj, attrs);
resp.ah_handle = uobj->id;
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_copy;
- }
-
- put_pd_read(pd);
-
- mutex_lock(&file->mutex);
- list_add_tail(&uobj->list, &file->ucontext->ah_list);
- mutex_unlock(&file->mutex);
-
- uobj->live = 1;
-
- up_write(&uobj->mutex);
-
- return in_len;
-
-err_copy:
- idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
-
-err_destroy:
- ib_destroy_ah(ah);
+ return uverbs_response(attrs, &resp, sizeof(resp));
err_put:
- put_pd_read(pd);
-
+ uobj_put_obj_read(pd);
err:
- put_uobj_write(uobj);
+ uobj_alloc_abort(uobj, attrs);
return ret;
}
-ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len, int out_len)
+static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_destroy_ah cmd;
- struct ib_ah *ah;
- struct ib_uobject *uobj;
- int ret;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
- if (!uobj)
- return -EINVAL;
- ah = uobj->object;
-
- ret = ib_destroy_ah(ah);
- if (!ret)
- uobj->live = 0;
-
- put_uobj_write(uobj);
+ int ret;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
-
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
-
- put_uobj(uobj);
-
- return in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
}
-ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_attach_mcast cmd;
struct ib_qp *qp;
@@ -2797,15 +2504,17 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
struct ib_uverbs_mcast_entry *mcast;
int ret;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- qp = idr_write_qp(cmd.qp_handle, file->ucontext);
- if (!qp)
- return -EINVAL;
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
- obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
+ obj = qp->uobject;
+ mutex_lock(&obj->mcast_lock);
list_for_each_entry(mcast, &obj->mcast_list, list)
if (cmd.mlid == mcast->lid &&
!memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
@@ -2829,85 +2538,328 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
kfree(mcast);
out_put:
- put_qp_write(qp);
+ mutex_unlock(&obj->mcast_lock);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
- return ret ? ret : in_len;
+ return ret;
}
-ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_detach_mcast cmd;
struct ib_uqp_object *obj;
struct ib_qp *qp;
struct ib_uverbs_mcast_entry *mcast;
- int ret = -EINVAL;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- qp = idr_write_qp(cmd.qp_handle, file->ucontext);
- if (!qp)
- return -EINVAL;
+ int ret;
+ bool found = false;
- ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
- goto out_put;
+ return ret;
+
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
- obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
+ obj = qp->uobject;
+ mutex_lock(&obj->mcast_lock);
list_for_each_entry(mcast, &obj->mcast_list, list)
if (cmd.mlid == mcast->lid &&
!memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
list_del(&mcast->list);
kfree(mcast);
+ found = true;
break;
}
+ if (!found) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
+
out_put:
- put_qp_write(qp);
+ mutex_unlock(&obj->mcast_lock);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ return ret;
+}
+
+struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
+{
+ struct ib_uflow_resources *resources;
+
+ resources = kzalloc(sizeof(*resources), GFP_KERNEL);
+
+ if (!resources)
+ return NULL;
+
+ if (!num_specs)
+ goto out;
+
+ resources->counters =
+ kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
+ resources->collection =
+ kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
+
+ if (!resources->counters || !resources->collection)
+ goto err;
+
+out:
+ resources->max = num_specs;
+ return resources;
+
+err:
+ kfree(resources->counters);
+ kfree(resources);
- return ret ? ret : in_len;
+ return NULL;
}
+EXPORT_SYMBOL(flow_resources_alloc);
-static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
- union ib_flow_spec *ib_spec)
+void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
- if (kern_spec->reserved)
- return -EINVAL;
+ unsigned int i;
- ib_spec->type = kern_spec->type;
+ if (!uflow_res)
+ return;
+
+ for (i = 0; i < uflow_res->collection_num; i++)
+ atomic_dec(&uflow_res->collection[i]->usecnt);
+
+ for (i = 0; i < uflow_res->counters_num; i++)
+ atomic_dec(&uflow_res->counters[i]->usecnt);
+
+ kfree(uflow_res->collection);
+ kfree(uflow_res->counters);
+ kfree(uflow_res);
+}
+EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
+
+void flow_resources_add(struct ib_uflow_resources *uflow_res,
+ enum ib_flow_spec_type type,
+ void *ibobj)
+{
+ WARN_ON(uflow_res->num >= uflow_res->max);
+
+ switch (type) {
+ case IB_FLOW_SPEC_ACTION_HANDLE:
+ atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
+ uflow_res->collection[uflow_res->collection_num++] =
+ (struct ib_flow_action *)ibobj;
+ break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
+ uflow_res->counters[uflow_res->counters_num++] =
+ (struct ib_counters *)ibobj;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ uflow_res->num++;
+}
+EXPORT_SYMBOL(flow_resources_add);
+
+static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
+ struct ib_uverbs_flow_spec *kern_spec,
+ union ib_flow_spec *ib_spec,
+ struct ib_uflow_resources *uflow_res)
+{
+ ib_spec->type = kern_spec->type;
switch (ib_spec->type) {
+ case IB_FLOW_SPEC_ACTION_TAG:
+ if (kern_spec->flow_tag.size !=
+ sizeof(struct ib_uverbs_flow_spec_action_tag))
+ return -EINVAL;
+
+ ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
+ ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
+ break;
+ case IB_FLOW_SPEC_ACTION_DROP:
+ if (kern_spec->drop.size !=
+ sizeof(struct ib_uverbs_flow_spec_action_drop))
+ return -EINVAL;
+
+ ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
+ break;
+ case IB_FLOW_SPEC_ACTION_HANDLE:
+ if (kern_spec->action.size !=
+ sizeof(struct ib_uverbs_flow_spec_action_handle))
+ return -EOPNOTSUPP;
+ ib_spec->action.act = uobj_get_obj_read(flow_action,
+ UVERBS_OBJECT_FLOW_ACTION,
+ kern_spec->action.handle,
+ attrs);
+ if (IS_ERR(ib_spec->action.act))
+ return PTR_ERR(ib_spec->action.act);
+ ib_spec->action.size =
+ sizeof(struct ib_flow_spec_action_handle);
+ flow_resources_add(uflow_res,
+ IB_FLOW_SPEC_ACTION_HANDLE,
+ ib_spec->action.act);
+ uobj_put_obj_read(ib_spec->action.act);
+ break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ if (kern_spec->flow_count.size !=
+ sizeof(struct ib_uverbs_flow_spec_action_count))
+ return -EINVAL;
+ ib_spec->flow_count.counters =
+ uobj_get_obj_read(counters,
+ UVERBS_OBJECT_COUNTERS,
+ kern_spec->flow_count.handle,
+ attrs);
+ if (IS_ERR(ib_spec->flow_count.counters))
+ return PTR_ERR(ib_spec->flow_count.counters);
+ ib_spec->flow_count.size =
+ sizeof(struct ib_flow_spec_action_count);
+ flow_resources_add(uflow_res,
+ IB_FLOW_SPEC_ACTION_COUNT,
+ ib_spec->flow_count.counters);
+ uobj_put_obj_read(ib_spec->flow_count.counters);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
+ u16 ib_real_filter_sz)
+{
+ /*
+ * User space filter structures must be 64 bit aligned, otherwise this
+ * may pass, but we won't handle additional new attributes.
+ */
+
+ if (kern_filter_size > ib_real_filter_sz) {
+ if (memchr_inv(kern_spec_filter +
+ ib_real_filter_sz, 0,
+ kern_filter_size - ib_real_filter_sz))
+ return -EINVAL;
+ return ib_real_filter_sz;
+ }
+ return kern_filter_size;
+}
+
+int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
+ const void *kern_spec_mask,
+ const void *kern_spec_val,
+ size_t kern_filter_sz,
+ union ib_flow_spec *ib_spec)
+{
+ ssize_t actual_filter_sz;
+ ssize_t ib_filter_sz;
+
+ /* User flow spec size must be aligned to 4 bytes */
+ if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
+ return -EINVAL;
+
+ ib_spec->type = type;
+
+ if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
+ return -EINVAL;
+
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
- ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
- if (ib_spec->eth.size != kern_spec->eth.size)
+ ib_filter_sz = sizeof(struct ib_flow_eth_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
- sizeof(struct ib_flow_eth_filter));
- memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
- sizeof(struct ib_flow_eth_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_eth);
+ memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV4:
- ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
- if (ib_spec->ipv4.size != kern_spec->ipv4.size)
+ ib_filter_sz = sizeof(struct ib_flow_ipv4_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
+ memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
+ break;
+ case IB_FLOW_SPEC_IPV6:
+ ib_filter_sz = sizeof(struct ib_flow_ipv6_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
+ memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
+
+ if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
+ (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
return -EINVAL;
- memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
- sizeof(struct ib_flow_ipv4_filter));
- memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
- sizeof(struct ib_flow_ipv4_filter));
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
- ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
- if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
+ ib_filter_sz = sizeof(struct ib_flow_tcp_udp_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
- sizeof(struct ib_flow_tcp_udp_filter));
- memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
- sizeof(struct ib_flow_tcp_udp_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
+ memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
+ break;
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
+ ib_filter_sz = sizeof(struct ib_flow_tunnel_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
+ memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
+
+ if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
+ (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
+ return -EINVAL;
+ break;
+ case IB_FLOW_SPEC_ESP:
+ ib_filter_sz = sizeof(struct ib_flow_esp_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
+ memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
+ break;
+ case IB_FLOW_SPEC_GRE:
+ ib_filter_sz = sizeof(struct ib_flow_gre_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
+ memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
+ break;
+ case IB_FLOW_SPEC_MPLS:
+ ib_filter_sz = sizeof(struct ib_flow_mpls_filter);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
+ memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
break;
default:
return -EINVAL;
@@ -2915,48 +2867,380 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
return 0;
}
-int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- struct ib_udata *ucore,
- struct ib_udata *uhw)
+static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
+ union ib_flow_spec *ib_spec)
+{
+ size_t kern_filter_sz;
+ void *kern_spec_mask;
+ void *kern_spec_val;
+
+ if (check_sub_overflow((size_t)kern_spec->hdr.size,
+ sizeof(struct ib_uverbs_flow_spec_hdr),
+ &kern_filter_sz))
+ return -EINVAL;
+
+ kern_filter_sz /= 2;
+
+ kern_spec_val = (void *)kern_spec +
+ sizeof(struct ib_uverbs_flow_spec_hdr);
+ kern_spec_mask = kern_spec_val + kern_filter_sz;
+
+ return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
+ kern_spec_mask,
+ kern_spec_val,
+ kern_filter_sz, ib_spec);
+}
+
+static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
+ struct ib_uverbs_flow_spec *kern_spec,
+ union ib_flow_spec *ib_spec,
+ struct ib_uflow_resources *uflow_res)
+{
+ if (kern_spec->reserved)
+ return -EINVAL;
+
+ if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
+ return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
+ uflow_res);
+ else
+ return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
+}
+
+static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_create_wq cmd;
+ struct ib_uverbs_ex_create_wq_resp resp = {};
+ struct ib_uwq_object *obj;
+ int err = 0;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ struct ib_wq *wq;
+ struct ib_wq_init_attr wq_init_attr = {};
+ struct ib_device *ib_dev;
+
+ err = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
+ &ib_dev);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
+ goto err_uobj;
+ }
+
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+ if (IS_ERR(cq)) {
+ err = PTR_ERR(cq);
+ goto err_put_pd;
+ }
+
+ wq_init_attr.cq = cq;
+ wq_init_attr.max_sge = cmd.max_sge;
+ wq_init_attr.max_wr = cmd.max_wr;
+ wq_init_attr.wq_type = cmd.wq_type;
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
+ wq_init_attr.create_flags = cmd.create_flags;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd.user_handle;
+
+ wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
+ if (IS_ERR(wq)) {
+ err = PTR_ERR(wq);
+ goto err_put_cq;
+ }
+
+ wq->uobject = obj;
+ obj->uevent.uobject.object = wq;
+ wq->wq_type = wq_init_attr.wq_type;
+ wq->cq = cq;
+ wq->pd = pd;
+ wq->device = pd->device;
+ atomic_set(&wq->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&cq->usecnt);
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
+
+ uobj_put_obj_read(pd);
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
+
+ resp.wq_handle = obj->uevent.uobject.id;
+ resp.max_sge = wq_init_attr.max_sge;
+ resp.max_wr = wq_init_attr.max_wr;
+ resp.wqn = wq->wq_num;
+ resp.response_length = uverbs_response_length(attrs, sizeof(resp));
+ return uverbs_response(attrs, &resp, sizeof(resp));
+
+err_put_cq:
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+err_put_pd:
+ uobj_put_obj_read(pd);
+err_uobj:
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
+
+ return err;
+}
+
+static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_destroy_wq cmd;
+ struct ib_uverbs_ex_destroy_wq_resp resp = {};
+ struct ib_uobject *uobj;
+ struct ib_uwq_object *obj;
+ int ret;
+
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ resp.response_length = uverbs_response_length(attrs, sizeof(resp));
+ uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+
+ obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
+ resp.events_reported = obj->uevent.events_reported;
+
+ uobj_put_destroy(uobj);
+
+ return uverbs_response(attrs, &resp, sizeof(resp));
+}
+
+static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_modify_wq cmd;
+ struct ib_wq *wq;
+ struct ib_wq_attr wq_attr = {};
+ int ret;
+
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+
+ if (!cmd.attr_mask)
+ return -EINVAL;
+
+ if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
+ return -EINVAL;
+
+ wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
+ if (IS_ERR(wq))
+ return PTR_ERR(wq);
+
+ if (cmd.attr_mask & IB_WQ_FLAGS) {
+ wq_attr.flags = cmd.flags;
+ wq_attr.flags_mask = cmd.flags_mask;
+ }
+
+ if (cmd.attr_mask & IB_WQ_CUR_STATE) {
+ if (cmd.curr_wq_state > IB_WQS_ERR)
+ return -EINVAL;
+
+ wq_attr.curr_wq_state = cmd.curr_wq_state;
+ } else {
+ wq_attr.curr_wq_state = wq->state;
+ }
+
+ if (cmd.attr_mask & IB_WQ_STATE) {
+ if (cmd.wq_state > IB_WQS_ERR)
+ return -EINVAL;
+
+ wq_attr.wq_state = cmd.wq_state;
+ } else {
+ wq_attr.wq_state = wq_attr.curr_wq_state;
+ }
+
+ ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
+ &attrs->driver_udata);
+ rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ return ret;
+}
+
+static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_create_rwq_ind_table cmd;
+ struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
+ struct ib_uobject *uobj;
+ int err;
+ struct ib_rwq_ind_table_init_attr init_attr = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+ struct ib_wq **wqs = NULL;
+ u32 *wqs_handles = NULL;
+ struct ib_wq *wq = NULL;
+ int i, num_read_wqs;
+ u32 num_wq_handles;
+ struct uverbs_req_iter iter;
+ struct ib_device *ib_dev;
+
+ err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
+ return -EINVAL;
+
+ num_wq_handles = 1 << cmd.log_ind_tbl_size;
+ wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
+ GFP_KERNEL);
+ if (!wqs_handles)
+ return -ENOMEM;
+
+ err = uverbs_request_next(&iter, wqs_handles,
+ num_wq_handles * sizeof(__u32));
+ if (err)
+ goto err_free;
+
+ err = uverbs_request_finish(&iter);
+ if (err)
+ goto err_free;
+
+ wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
+ if (!wqs) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
+ num_read_wqs++) {
+ wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
+ wqs_handles[num_read_wqs], attrs);
+ if (IS_ERR(wq)) {
+ err = PTR_ERR(wq);
+ goto put_wqs;
+ }
+
+ wqs[num_read_wqs] = wq;
+ atomic_inc(&wqs[num_read_wqs]->usecnt);
+ }
+
+ uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
+ if (IS_ERR(uobj)) {
+ err = PTR_ERR(uobj);
+ goto put_wqs;
+ }
+
+ rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
+ if (!rwq_ind_tbl) {
+ err = -ENOMEM;
+ goto err_uobj;
+ }
+
+ init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
+ init_attr.ind_tbl = wqs;
+
+ rwq_ind_tbl->ind_tbl = wqs;
+ rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
+ rwq_ind_tbl->uobject = uobj;
+ uobj->object = rwq_ind_tbl;
+ rwq_ind_tbl->device = ib_dev;
+ atomic_set(&rwq_ind_tbl->usecnt, 0);
+
+ err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
+ &attrs->driver_udata);
+ if (err)
+ goto err_create;
+
+ for (i = 0; i < num_wq_handles; i++)
+ rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ kfree(wqs_handles);
+ uobj_finalize_uobj_create(uobj, attrs);
+
+ resp.ind_tbl_handle = uobj->id;
+ resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
+ resp.response_length = uverbs_response_length(attrs, sizeof(resp));
+ return uverbs_response(attrs, &resp, sizeof(resp));
+
+err_create:
+ kfree(rwq_ind_tbl);
+err_uobj:
+ uobj_alloc_abort(uobj, attrs);
+put_wqs:
+ for (i = 0; i < num_read_wqs; i++) {
+ rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ atomic_dec(&wqs[i]->usecnt);
+ }
+err_free:
+ kfree(wqs_handles);
+ kfree(wqs);
+ return err;
+}
+
+static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
+ int ret;
+
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
+ cmd.ind_tbl_handle, attrs);
+}
+
+static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_create_flow cmd;
- struct ib_uverbs_create_flow_resp resp;
+ struct ib_uverbs_create_flow_resp resp = {};
struct ib_uobject *uobj;
struct ib_flow *flow_id;
struct ib_uverbs_flow_attr *kern_flow_attr;
struct ib_flow_attr *flow_attr;
struct ib_qp *qp;
- int err = 0;
- void *kern_spec;
+ struct ib_uflow_resources *uflow_res;
+ struct ib_uverbs_flow_spec_hdr *kern_spec;
+ struct uverbs_req_iter iter;
+ int err;
void *ib_spec;
int i;
+ struct ib_device *ib_dev;
- if (ucore->inlen < sizeof(cmd))
- return -EINVAL;
-
- if (ucore->outlen < sizeof(resp))
- return -ENOSPC;
-
- err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
if (err)
return err;
- ucore->inbuf += sizeof(cmd);
- ucore->inlen -= sizeof(cmd);
-
if (cmd.comp_mask)
return -EINVAL;
- if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
- !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
+ if (!rdma_uattrs_has_raw_cap(attrs))
return -EPERM;
+ if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
+ return -EINVAL;
+
+ if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+ ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
+ (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
+ return -EINVAL;
+
if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
return -EINVAL;
- if (cmd.flow_attr.size > ucore->inlen ||
- cmd.flow_attr.size >
+ if (cmd.flow_attr.size >
(cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
return -EINVAL;
@@ -2970,34 +3254,52 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (!kern_flow_attr)
return -ENOMEM;
- memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
- err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
- cmd.flow_attr.size);
+ *kern_flow_attr = cmd.flow_attr;
+ err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
+ cmd.flow_attr.size);
if (err)
goto err_free_attr;
} else {
kern_flow_attr = &cmd.flow_attr;
}
- uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
- if (!uobj) {
- err = -ENOMEM;
+ err = uverbs_request_finish(&iter);
+ if (err)
+ goto err_free_attr;
+
+ uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
+ if (IS_ERR(uobj)) {
+ err = PTR_ERR(uobj);
goto err_free_attr;
}
- init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
- down_write(&uobj->mutex);
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
- if (!qp) {
+ if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
err = -EINVAL;
goto err_uobj;
}
- flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+ if (IS_ERR(qp)) {
+ err = PTR_ERR(qp);
+ goto err_uobj;
+ }
+
+ if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ flow_attr = kzalloc(struct_size(flow_attr, flows,
+ cmd.flow_attr.num_of_specs), GFP_KERNEL);
if (!flow_attr) {
err = -ENOMEM;
goto err_put;
}
+ uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
+ if (!uflow_res) {
+ err = -ENOMEM;
+ goto err_free_flow_attr;
+ }
flow_attr->type = kern_flow_attr->type;
flow_attr->priority = kern_flow_attr->priority;
@@ -3006,270 +3308,204 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
flow_attr->flags = kern_flow_attr->flags;
flow_attr->size = sizeof(*flow_attr);
- kern_spec = kern_flow_attr + 1;
+ kern_spec = kern_flow_attr->flow_specs;
ib_spec = flow_attr + 1;
for (i = 0; i < flow_attr->num_of_specs &&
- cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
- cmd.flow_attr.size >=
- ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
- err = kern_spec_to_ib_spec(kern_spec, ib_spec);
+ cmd.flow_attr.size >= sizeof(*kern_spec) &&
+ cmd.flow_attr.size >= kern_spec->size;
+ i++) {
+ err = kern_spec_to_ib_spec(
+ attrs, (struct ib_uverbs_flow_spec *)kern_spec,
+ ib_spec, uflow_res);
if (err)
goto err_free;
+
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
- cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
- kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
+ cmd.flow_attr.size -= kern_spec->size;
+ kern_spec = ((void *)kern_spec) + kern_spec->size;
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
}
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
- pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
+ pr_warn("create flow failed, flow %d: %u bytes left from uverb cmd\n",
i, cmd.flow_attr.size);
err = -EINVAL;
goto err_free;
}
- flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
+
+ flow_id = qp->device->ops.create_flow(qp, flow_attr,
+ &attrs->driver_udata);
+
if (IS_ERR(flow_id)) {
err = PTR_ERR(flow_id);
goto err_free;
}
- flow_id->qp = qp;
- flow_id->uobject = uobj;
- uobj->object = flow_id;
-
- err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
- if (err)
- goto destroy_flow;
-
- memset(&resp, 0, sizeof(resp));
- resp.flow_handle = uobj->id;
-
- err = ib_copy_to_udata(ucore,
- &resp, sizeof(resp));
- if (err)
- goto err_copy;
-
- put_qp_read(qp);
- mutex_lock(&file->mutex);
- list_add_tail(&uobj->list, &file->ucontext->rule_list);
- mutex_unlock(&file->mutex);
- uobj->live = 1;
+ ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
- up_write(&uobj->mutex);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
kfree(flow_attr);
+
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
- return 0;
-err_copy:
- idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
-destroy_flow:
- ib_destroy_flow(flow_id);
+ uobj_finalize_uobj_create(uobj, attrs);
+
+ resp.flow_handle = uobj->id;
+ return uverbs_response(attrs, &resp, sizeof(resp));
+
err_free:
+ ib_uverbs_flow_resources_free(uflow_res);
+err_free_flow_attr:
kfree(flow_attr);
err_put:
- put_qp_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
err_uobj:
- put_uobj_write(uobj);
+ uobj_alloc_abort(uobj, attrs);
err_free_attr:
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
return err;
}
-int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- struct ib_udata *ucore,
- struct ib_udata *uhw)
+static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_destroy_flow cmd;
- struct ib_flow *flow_id;
- struct ib_uobject *uobj;
int ret;
- if (ucore->inlen < sizeof(cmd))
- return -EINVAL;
-
- ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
if (cmd.comp_mask)
return -EINVAL;
- uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
- file->ucontext);
- if (!uobj)
- return -EINVAL;
- flow_id = uobj->object;
-
- ret = ib_destroy_flow(flow_id);
- if (!ret)
- uobj->live = 0;
-
- put_uobj_write(uobj);
-
- idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
-
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
-
- put_uobj(uobj);
-
- return ret;
+ return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
}
-static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
+static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
struct ib_uverbs_create_xsrq *cmd,
struct ib_udata *udata)
{
- struct ib_uverbs_create_srq_resp resp;
+ struct ib_uverbs_create_srq_resp resp = {};
struct ib_usrq_object *obj;
struct ib_pd *pd;
struct ib_srq *srq;
- struct ib_uobject *uninitialized_var(xrcd_uobj);
struct ib_srq_init_attr attr;
int ret;
+ struct ib_uobject *xrcd_uobj;
+ struct ib_device *ib_dev;
- obj = kmalloc(sizeof *obj, GFP_KERNEL);
- if (!obj)
- return -ENOMEM;
+ obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
+ &ib_dev);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
- down_write(&obj->uevent.uobject.mutex);
+ if (cmd->srq_type == IB_SRQT_TM)
+ attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
if (cmd->srq_type == IB_SRQT_XRC) {
- attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
- if (!attr.ext.xrc.xrcd) {
+ xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
+ attrs);
+ if (IS_ERR(xrcd_uobj)) {
ret = -EINVAL;
goto err;
}
+ attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!attr.ext.xrc.xrcd) {
+ ret = -EINVAL;
+ goto err_put_xrcd;
+ }
+
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
atomic_inc(&obj->uxrcd->refcnt);
+ }
- attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
- if (!attr.ext.xrc.cq) {
- ret = -EINVAL;
+ if (ib_srq_has_cq(cmd->srq_type)) {
+ attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ cmd->cq_handle, attrs);
+ if (IS_ERR(attr.ext.cq)) {
+ ret = PTR_ERR(attr.ext.cq);
goto err_put_xrcd;
}
}
- pd = idr_read_pd(cmd->pd_handle, file->ucontext);
- if (!pd) {
- ret = -EINVAL;
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_put_cq;
}
attr.event_handler = ib_uverbs_srq_event_handler;
- attr.srq_context = file;
attr.srq_type = cmd->srq_type;
attr.attr.max_wr = cmd->max_wr;
attr.attr.max_sge = cmd->max_sge;
attr.attr.srq_limit = cmd->srq_limit;
- obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd->user_handle;
- srq = pd->device->create_srq(pd, &attr, udata);
+ srq = ib_create_srq_user(pd, &attr, obj, udata);
if (IS_ERR(srq)) {
ret = PTR_ERR(srq);
- goto err_put;
- }
-
- srq->device = pd->device;
- srq->pd = pd;
- srq->srq_type = cmd->srq_type;
- srq->uobject = &obj->uevent.uobject;
- srq->event_handler = attr.event_handler;
- srq->srq_context = attr.srq_context;
-
- if (cmd->srq_type == IB_SRQT_XRC) {
- srq->ext.xrc.cq = attr.ext.xrc.cq;
- srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
- atomic_inc(&attr.ext.xrc.cq->usecnt);
- atomic_inc(&attr.ext.xrc.xrcd->usecnt);
+ goto err_put_pd;
}
- atomic_inc(&pd->usecnt);
- atomic_set(&srq->usecnt, 0);
-
obj->uevent.uobject.object = srq;
- ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
- if (ret)
- goto err_destroy;
+ obj->uevent.uobject.user_handle = cmd->user_handle;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
- memset(&resp, 0, sizeof resp);
- resp.srq_handle = obj->uevent.uobject.id;
- resp.max_wr = attr.attr.max_wr;
- resp.max_sge = attr.attr.max_sge;
if (cmd->srq_type == IB_SRQT_XRC)
resp.srqn = srq->ext.xrc.srq_num;
- if (copy_to_user((void __user *) (unsigned long) cmd->response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_copy;
- }
-
- if (cmd->srq_type == IB_SRQT_XRC) {
- put_uobj_read(xrcd_uobj);
- put_cq_read(attr.ext.xrc.cq);
- }
- put_pd_read(pd);
-
- mutex_lock(&file->mutex);
- list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
- mutex_unlock(&file->mutex);
-
- obj->uevent.uobject.live = 1;
-
- up_write(&obj->uevent.uobject.mutex);
-
- return 0;
+ if (cmd->srq_type == IB_SRQT_XRC)
+ uobj_put_read(xrcd_uobj);
-err_copy:
- idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
+ if (ib_srq_has_cq(cmd->srq_type))
+ rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
-err_destroy:
- ib_destroy_srq(srq);
+ uobj_put_obj_read(pd);
+ uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
-err_put:
- put_pd_read(pd);
+ resp.srq_handle = obj->uevent.uobject.id;
+ resp.max_wr = attr.attr.max_wr;
+ resp.max_sge = attr.attr.max_sge;
+ return uverbs_response(attrs, &resp, sizeof(resp));
+err_put_pd:
+ uobj_put_obj_read(pd);
err_put_cq:
- if (cmd->srq_type == IB_SRQT_XRC)
- put_cq_read(attr.ext.xrc.cq);
+ if (ib_srq_has_cq(cmd->srq_type))
+ rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
err_put_xrcd:
if (cmd->srq_type == IB_SRQT_XRC) {
atomic_dec(&obj->uxrcd->refcnt);
- put_uobj_read(xrcd_uobj);
+ uobj_put_read(xrcd_uobj);
}
err:
- put_uobj_write(&obj->uevent.uobject);
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
return ret;
}
-ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_create_srq cmd;
struct ib_uverbs_create_xsrq xcmd;
- struct ib_uverbs_create_srq_resp resp;
- struct ib_udata udata;
int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+ memset(&xcmd, 0, sizeof(xcmd));
xcmd.response = cmd.response;
xcmd.user_handle = cmd.user_handle;
xcmd.srq_type = IB_SRQT_BASIC;
@@ -3278,78 +3514,49 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
xcmd.max_sge = cmd.max_sge;
xcmd.srq_limit = cmd.srq_limit;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-
- ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
- if (ret)
- return ret;
-
- return in_len;
+ return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
}
-ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len, int out_len)
+static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_create_xsrq cmd;
- struct ib_uverbs_create_srq_resp resp;
- struct ib_udata udata;
int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-
- ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- return in_len;
+ return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
}
-ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_modify_srq cmd;
- struct ib_udata udata;
struct ib_srq *srq;
struct ib_srq_attr attr;
int ret;
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
- out_len);
-
- srq = idr_read_srq(cmd.srq_handle, file->ucontext);
- if (!srq)
- return -EINVAL;
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
+ if (IS_ERR(srq))
+ return PTR_ERR(srq);
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
- ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
+ ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
+ &attrs->driver_udata);
- put_srq_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
- return ret ? ret : in_len;
+ return ret;
}
-ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf,
- int in_len, int out_len)
+static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_query_srq cmd;
struct ib_uverbs_query_srq_resp resp;
@@ -3357,19 +3564,18 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
struct ib_srq *srq;
int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- srq = idr_read_srq(cmd.srq_handle, file->ucontext);
- if (!srq)
- return -EINVAL;
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
+ if (IS_ERR(srq))
+ return PTR_ERR(srq);
ret = ib_query_srq(srq, &attr);
- put_srq_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ret)
return ret;
@@ -3380,85 +3586,49 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
resp.max_sge = attr.max_sge;
resp.srq_limit = attr.srq_limit;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- return -EFAULT;
-
- return in_len;
+ return uverbs_response(attrs, &resp, sizeof(resp));
}
-ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_destroy_srq cmd;
struct ib_uverbs_destroy_srq_resp resp;
struct ib_uobject *uobj;
- struct ib_srq *srq;
struct ib_uevent_object *obj;
- int ret = -EINVAL;
- struct ib_usrq_object *us;
- enum ib_srq_type srq_type;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
- if (!uobj)
- return -EINVAL;
- srq = uobj->object;
- obj = container_of(uobj, struct ib_uevent_object, uobject);
- srq_type = srq->srq_type;
-
- ret = ib_destroy_srq(srq);
- if (!ret)
- uobj->live = 0;
-
- put_uobj_write(uobj);
+ int ret;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- if (srq_type == IB_SRQT_XRC) {
- us = container_of(obj, struct ib_usrq_object, uevent);
- atomic_dec(&us->uxrcd->refcnt);
- }
-
- idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
-
- mutex_lock(&file->mutex);
- list_del(&uobj->list);
- mutex_unlock(&file->mutex);
-
- ib_uverbs_release_uevent(file, obj);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
- memset(&resp, 0, sizeof resp);
+ obj = container_of(uobj, struct ib_uevent_object, uobject);
+ memset(&resp, 0, sizeof(resp));
resp.events_reported = obj->events_reported;
- put_uobj(uobj);
-
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- ret = -EFAULT;
+ uobj_put_destroy(uobj);
- return ret ? ret : in_len;
+ return uverbs_response(attrs, &resp, sizeof(resp));
}
-int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- struct ib_udata *ucore,
- struct ib_udata *uhw)
+static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_ex_query_device_resp resp;
+ struct ib_uverbs_ex_query_device_resp resp = {};
struct ib_uverbs_ex_query_device cmd;
- struct ib_device_attr attr;
+ struct ib_device_attr attr = {0};
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
int err;
- if (ucore->inlen < sizeof(cmd))
- return -EINVAL;
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
- err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ err = uverbs_request(attrs, &cmd, sizeof(cmd));
if (err)
return err;
@@ -3468,24 +3638,12 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (cmd.reserved)
return -EINVAL;
- resp.response_length = offsetof(typeof(resp), odp_caps);
-
- if (ucore->outlen < resp.response_length)
- return -ENOSPC;
-
- memset(&attr, 0, sizeof(attr));
-
- err = ib_dev->query_device(ib_dev, &attr, uhw);
+ err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
if (err)
return err;
- copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
- resp.comp_mask = 0;
+ copy_query_dev_fields(ucontext, &resp.base, &attr);
- if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
- goto end;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
resp.odp_caps.general_caps = attr.odp_caps.general_caps;
resp.odp_caps.per_transport_caps.rc_odp_caps =
attr.odp_caps.per_transport_caps.rc_odp_caps;
@@ -3493,28 +3651,431 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps;
- resp.odp_caps.reserved = 0;
-#else
- memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
-#endif
- resp.response_length += sizeof(resp.odp_caps);
-
- if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
- goto end;
+ resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
resp.timestamp_mask = attr.timestamp_mask;
- resp.response_length += sizeof(resp.timestamp_mask);
+ resp.hca_core_clock = attr.hca_core_clock;
+ resp.device_cap_flags_ex = attr.device_cap_flags;
+ resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
+ resp.rss_caps.max_rwq_indirection_tables =
+ attr.rss_caps.max_rwq_indirection_tables;
+ resp.rss_caps.max_rwq_indirection_table_size =
+ attr.rss_caps.max_rwq_indirection_table_size;
+ resp.max_wq_type_rq = attr.max_wq_type_rq;
+ resp.raw_packet_caps = attr.raw_packet_caps;
+ resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
+ resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
+ resp.tm_caps.max_ops = attr.tm_caps.max_ops;
+ resp.tm_caps.max_sge = attr.tm_caps.max_sge;
+ resp.tm_caps.flags = attr.tm_caps.flags;
+ resp.cq_moderation_caps.max_cq_moderation_count =
+ attr.cq_caps.max_cq_moderation_count;
+ resp.cq_moderation_caps.max_cq_moderation_period =
+ attr.cq_caps.max_cq_moderation_period;
+ resp.max_dm_size = attr.max_dm_size;
+ resp.response_length = uverbs_response_length(attrs, sizeof(resp));
+
+ return uverbs_response(attrs, &resp, sizeof(resp));
+}
+
+static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_ex_modify_cq cmd;
+ struct ib_cq *cq;
+ int ret;
- if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
- goto end;
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- resp.hca_core_clock = attr.hca_core_clock;
- resp.response_length += sizeof(resp.hca_core_clock);
+ if (!cmd.attr_mask || cmd.reserved)
+ return -EINVAL;
-end:
- err = ib_copy_to_udata(ucore, &resp, resp.response_length);
- if (err)
- return err;
+ if (cmd.attr_mask > IB_CQ_MODERATE)
+ return -EOPNOTSUPP;
- return 0;
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
+
+ ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
+
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ return ret;
}
+
+/*
+ * Describe the input structs for write(). Some write methods have an input
+ * only struct, most have an input and output. If the struct has an output then
+ * the 'response' u64 must be the first field in the request structure.
+ *
+ * If udata is present then both the request and response structs have a
+ * trailing driver_data flex array. In this case the size of the base struct
+ * cannot be changed.
+ */
+#define UAPI_DEF_WRITE_IO(req, resp) \
+ .write.has_resp = 1 + \
+ BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \
+ BUILD_BUG_ON_ZERO(sizeof_field(req, response) != \
+ sizeof(u64)), \
+ .write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
+
+#define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
+
+#define UAPI_DEF_WRITE_UDATA_IO(req, resp) \
+ UAPI_DEF_WRITE_IO(req, resp), \
+ .write.has_udata = \
+ 1 + \
+ BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \
+ sizeof(req)) + \
+ BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) != \
+ sizeof(resp))
+
+#define UAPI_DEF_WRITE_UDATA_I(req) \
+ UAPI_DEF_WRITE_I(req), \
+ .write.has_udata = \
+ 1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \
+ sizeof(req))
+
+/*
+ * The _EX versions are for use with WRITE_EX and allow the last struct member
+ * to be specified. Buffers that do not include that member will be rejected.
+ */
+#define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member) \
+ .write.has_resp = 1, \
+ .write.req_size = offsetofend(req, req_last_member), \
+ .write.resp_size = offsetofend(resp, resp_last_member)
+
+#define UAPI_DEF_WRITE_I_EX(req, req_last_member) \
+ .write.req_size = offsetofend(req, req_last_member)
+
+const struct uapi_definition uverbs_def_write_intf[] = {
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_AH,
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
+ ib_uverbs_create_ah,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_create_ah,
+ struct ib_uverbs_create_ah_resp)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_DESTROY_AH,
+ ib_uverbs_destroy_ah,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah)),
+ UAPI_DEF_OBJ_NEEDS_FN(create_user_ah),
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_COMP_CHANNEL,
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
+ ib_uverbs_create_comp_channel,
+ UAPI_DEF_WRITE_IO(
+ struct ib_uverbs_create_comp_channel,
+ struct ib_uverbs_create_comp_channel_resp))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_CQ,
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
+ ib_uverbs_create_cq,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_create_cq,
+ struct ib_uverbs_create_cq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_DESTROY_CQ,
+ ib_uverbs_destroy_cq,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
+ struct ib_uverbs_destroy_cq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_POLL_CQ,
+ ib_uverbs_poll_cq,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
+ struct ib_uverbs_poll_cq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
+ ib_uverbs_req_notify_cq,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
+ UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
+ ib_uverbs_resize_cq,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_resize_cq,
+ struct ib_uverbs_resize_cq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_CREATE_CQ,
+ ib_uverbs_ex_create_cq,
+ UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
+ reserved,
+ struct ib_uverbs_ex_create_cq_resp,
+ response_length),
+ UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_MODIFY_CQ,
+ ib_uverbs_ex_modify_cq,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
+ UAPI_DEF_METHOD_NEEDS_FN(modify_cq))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_DEVICE,
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
+ ib_uverbs_get_context,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_get_context,
+ struct ib_uverbs_get_context_resp)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_QUERY_DEVICE,
+ ib_uverbs_query_device,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
+ struct ib_uverbs_query_device_resp)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_QUERY_PORT,
+ ib_uverbs_query_port,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
+ struct ib_uverbs_query_port_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(query_port)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
+ ib_uverbs_ex_query_device,
+ UAPI_DEF_WRITE_IO_EX(
+ struct ib_uverbs_ex_query_device,
+ reserved,
+ struct ib_uverbs_ex_query_device_resp,
+ response_length),
+ UAPI_DEF_METHOD_NEEDS_FN(query_device)),
+ UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_FLOW,
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_CREATE_FLOW,
+ ib_uverbs_ex_create_flow,
+ UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
+ flow_attr,
+ struct ib_uverbs_create_flow_resp,
+ flow_handle),
+ UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
+ ib_uverbs_ex_destroy_flow,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
+ UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_MR,
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
+ ib_uverbs_dereg_mr,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
+ UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_REG_MR,
+ ib_uverbs_reg_mr,
+ UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
+ struct ib_uverbs_reg_mr_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_REREG_MR,
+ ib_uverbs_rereg_mr,
+ UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
+ struct ib_uverbs_rereg_mr_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_MW,
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_ALLOC_MW,
+ ib_uverbs_alloc_mw,
+ UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
+ struct ib_uverbs_alloc_mw_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_DEALLOC_MW,
+ ib_uverbs_dealloc_mw,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
+ UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_PD,
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_ALLOC_PD,
+ ib_uverbs_alloc_pd,
+ UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
+ struct ib_uverbs_alloc_pd_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_DEALLOC_PD,
+ ib_uverbs_dealloc_pd,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
+ UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_QP,
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_ATTACH_MCAST,
+ ib_uverbs_attach_mcast,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
+ UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
+ UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
+ ib_uverbs_create_qp,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_create_qp,
+ struct ib_uverbs_create_qp_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_DESTROY_QP,
+ ib_uverbs_destroy_qp,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
+ struct ib_uverbs_destroy_qp_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_DETACH_MCAST,
+ ib_uverbs_detach_mcast,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
+ UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_MODIFY_QP,
+ ib_uverbs_modify_qp,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
+ UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_POST_RECV,
+ ib_uverbs_post_recv,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
+ struct ib_uverbs_post_recv_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_POST_SEND,
+ ib_uverbs_post_send,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
+ struct ib_uverbs_post_send_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(post_send)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_QUERY_QP,
+ ib_uverbs_query_qp,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
+ struct ib_uverbs_query_qp_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_CREATE_QP,
+ ib_uverbs_ex_create_qp,
+ UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
+ comp_mask,
+ struct ib_uverbs_ex_create_qp_resp,
+ response_length),
+ UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_MODIFY_QP,
+ ib_uverbs_ex_modify_qp,
+ UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
+ base,
+ struct ib_uverbs_ex_modify_qp_resp,
+ response_length),
+ UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
+ ib_uverbs_ex_create_rwq_ind_table,
+ UAPI_DEF_WRITE_IO_EX(
+ struct ib_uverbs_ex_create_rwq_ind_table,
+ log_ind_tbl_size,
+ struct ib_uverbs_ex_create_rwq_ind_table_resp,
+ ind_tbl_num),
+ UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
+ ib_uverbs_ex_destroy_rwq_ind_table,
+ UAPI_DEF_WRITE_I(
+ struct ib_uverbs_ex_destroy_rwq_ind_table),
+ UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_WQ,
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_CREATE_WQ,
+ ib_uverbs_ex_create_wq,
+ UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
+ max_sge,
+ struct ib_uverbs_ex_create_wq_resp,
+ wqn),
+ UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_DESTROY_WQ,
+ ib_uverbs_ex_destroy_wq,
+ UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
+ wq_handle,
+ struct ib_uverbs_ex_destroy_wq_resp,
+ reserved),
+ UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
+ DECLARE_UVERBS_WRITE_EX(
+ IB_USER_VERBS_EX_CMD_MODIFY_WQ,
+ ib_uverbs_ex_modify_wq,
+ UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
+ curr_wq_state),
+ UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_SRQ,
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
+ ib_uverbs_create_srq,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_create_srq,
+ struct ib_uverbs_create_srq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
+ ib_uverbs_create_xsrq,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_create_xsrq,
+ struct ib_uverbs_create_srq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_DESTROY_SRQ,
+ ib_uverbs_destroy_srq,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
+ struct ib_uverbs_destroy_srq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_MODIFY_SRQ,
+ ib_uverbs_modify_srq,
+ UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
+ UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_POST_SRQ_RECV,
+ ib_uverbs_post_srq_recv,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
+ struct ib_uverbs_post_srq_recv_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_QUERY_SRQ,
+ ib_uverbs_query_srq,
+ UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
+ struct ib_uverbs_query_srq_resp),
+ UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
+
+ DECLARE_UVERBS_OBJECT(
+ UVERBS_OBJECT_XRCD,
+ DECLARE_UVERBS_WRITE(
+ IB_USER_VERBS_CMD_CLOSE_XRCD,
+ ib_uverbs_close_xrcd,
+ UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd)),
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
+ ib_uverbs_open_qp,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_open_qp,
+ struct ib_uverbs_create_qp_resp)),
+ DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
+ ib_uverbs_open_xrcd,
+ UAPI_DEF_WRITE_UDATA_IO(
+ struct ib_uverbs_open_xrcd,
+ struct ib_uverbs_open_xrcd_resp)),
+ UAPI_DEF_OBJ_NEEDS_FN(alloc_xrcd),
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
+
+ {},
+};
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
new file mode 100644
index 000000000000..f80da6a67e24
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -0,0 +1,849 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/rdma_user_ioctl.h>
+#include <rdma/uverbs_ioctl.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+struct bundle_alloc_head {
+ struct_group_tagged(bundle_alloc_head_hdr, hdr,
+ struct bundle_alloc_head *next;
+ );
+ u8 data[];
+};
+
+struct bundle_priv {
+ /* Must be first */
+ struct bundle_alloc_head_hdr alloc_head;
+ struct bundle_alloc_head *allocated_mem;
+ size_t internal_avail;
+ size_t internal_used;
+
+ struct radix_tree_root *radix;
+ const struct uverbs_api_ioctl_method *method_elm;
+ void __rcu **radix_slots;
+ unsigned long radix_slots_len;
+ u32 method_key;
+
+ struct ib_uverbs_attr __user *user_attrs;
+ struct ib_uverbs_attr *uattrs;
+
+ DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN);
+
+ /*
+ * Must be last. bundle ends in a flex array which overlaps
+ * internal_buffer.
+ */
+ struct uverbs_attr_bundle_hdr bundle;
+ u64 internal_buffer[32];
+};
+
+/*
+ * Each method has an absolute minimum amount of memory it needs to allocate,
+ * precompute that amount and determine if the onstack memory can be used or
+ * if allocation is need.
+ */
+void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
+ unsigned int num_attrs)
+{
+ struct bundle_priv *pbundle;
+ struct uverbs_attr_bundle *bundle;
+ size_t bundle_size =
+ offsetof(struct bundle_priv, internal_buffer) +
+ sizeof(*bundle->attrs) * method_elm->key_bitmap_len +
+ sizeof(*pbundle->uattrs) * num_attrs;
+
+ method_elm->use_stack = bundle_size <= sizeof(*pbundle);
+ method_elm->bundle_size =
+ ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer));
+
+ /* Do not want order-2 allocations for this. */
+ WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE);
+}
+
+/**
+ * _uverbs_alloc() - Quickly allocate memory for use with a bundle
+ * @bundle: The bundle
+ * @size: Number of bytes to allocate
+ * @flags: Allocator flags
+ *
+ * The bundle allocator is intended for allocations that are connected with
+ * processing the system call related to the bundle. The allocated memory is
+ * always freed once the system call completes, and cannot be freed any other
+ * way.
+ *
+ * This tries to use a small pool of pre-allocated memory for performance.
+ */
+__malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
+ gfp_t flags)
+{
+ struct bundle_priv *pbundle =
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
+ size_t new_used;
+ void *res;
+
+ if (check_add_overflow(size, pbundle->internal_used, &new_used))
+ return ERR_PTR(-EOVERFLOW);
+
+ if (new_used > pbundle->internal_avail) {
+ struct bundle_alloc_head *buf;
+
+ buf = kvmalloc(struct_size(buf, data, size), flags);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ buf->next = pbundle->allocated_mem;
+ pbundle->allocated_mem = buf;
+ return buf->data;
+ }
+
+ res = (void *)pbundle->internal_buffer + pbundle->internal_used;
+ pbundle->internal_used =
+ ALIGN(new_used, sizeof(*pbundle->internal_buffer));
+ if (want_init_on_alloc(flags))
+ memset(res, 0, size);
+ return res;
+}
+EXPORT_SYMBOL(_uverbs_alloc);
+
+static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
+ u16 len)
+{
+ if (uattr->len > sizeof_field(struct ib_uverbs_attr, data))
+ return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
+ uattr->len - len);
+
+ return !memchr_inv((const void *)&uattr->data + len,
+ 0, uattr->len - len);
+}
+
+static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
+ const struct uverbs_attr *attr)
+{
+ struct bundle_priv *pbundle =
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
+ u16 flags;
+
+ flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
+ UVERBS_ATTR_F_VALID_OUTPUT;
+ if (put_user(flags,
+ &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
+ return -EFAULT;
+ return 0;
+}
+
+static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
+ const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ struct ib_uverbs_attr *uattr,
+ u32 attr_bkey)
+{
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ size_t array_len;
+ u32 *idr_vals;
+ int ret = 0;
+ size_t i;
+
+ if (uattr->attr_data.reserved)
+ return -EINVAL;
+
+ if (uattr->len % sizeof(u32))
+ return -EINVAL;
+
+ array_len = uattr->len / sizeof(u32);
+ if (array_len < spec->u2.objs_arr.min_len ||
+ array_len > spec->u2.objs_arr.max_len)
+ return -EINVAL;
+
+ attr->uobjects =
+ uverbs_alloc(bundle,
+ array_size(array_len, sizeof(*attr->uobjects)));
+ if (IS_ERR(attr->uobjects))
+ return PTR_ERR(attr->uobjects);
+
+ /*
+ * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
+ * to store idrs array and avoid additional memory allocation. The
+ * idrs array is offset to the end of the uobjects array so we will be
+ * able to read idr and replace with a pointer.
+ */
+ idr_vals = (u32 *)(attr->uobjects + array_len) - array_len;
+
+ if (uattr->len > sizeof(uattr->data)) {
+ ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data),
+ uattr->len);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memcpy(idr_vals, &uattr->data, uattr->len);
+ }
+
+ for (i = 0; i != array_len; i++) {
+ attr->uobjects[i] = uverbs_get_uobject_from_file(
+ spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access,
+ idr_vals[i], bundle);
+ if (IS_ERR(attr->uobjects[i])) {
+ ret = PTR_ERR(attr->uobjects[i]);
+ break;
+ }
+ }
+
+ attr->len = i;
+ __set_bit(attr_bkey, pbundle->spec_finalize);
+ return ret;
+}
+
+static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ bool commit,
+ struct uverbs_attr_bundle *attrs)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ size_t i;
+
+ for (i = 0; i != attr->len; i++)
+ uverbs_finalize_object(attr->uobjects[i],
+ spec->u2.objs_arr.access, false, commit,
+ attrs);
+}
+
+static int uverbs_process_attr(struct bundle_priv *pbundle,
+ const struct uverbs_api_attr *attr_uapi,
+ struct ib_uverbs_attr *uattr, u32 attr_bkey)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
+ struct uverbs_attr *e = &bundle->attrs[attr_bkey];
+ const struct uverbs_attr_spec *val_spec = spec;
+ struct uverbs_obj_attr *o_attr;
+
+ switch (spec->type) {
+ case UVERBS_ATTR_TYPE_ENUM_IN:
+ if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems)
+ return -EOPNOTSUPP;
+
+ if (uattr->attr_data.enum_data.reserved)
+ return -EINVAL;
+
+ val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id];
+
+ /* Currently we only support PTR_IN based enums */
+ if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN)
+ return -EOPNOTSUPP;
+
+ e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
+ fallthrough;
+ case UVERBS_ATTR_TYPE_PTR_IN:
+ /* Ensure that any data provided by userspace beyond the known
+ * struct is zero. Userspace that knows how to use some future
+ * longer struct will fail here if used with an old kernel and
+ * non-zero content, making ABI compat/discovery simpler.
+ */
+ if (uattr->len > val_spec->u.ptr.len &&
+ val_spec->zero_trailing &&
+ !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
+ return -EOPNOTSUPP;
+
+ fallthrough;
+ case UVERBS_ATTR_TYPE_PTR_OUT:
+ if (uattr->len < val_spec->u.ptr.min_len ||
+ (!val_spec->zero_trailing &&
+ uattr->len > val_spec->u.ptr.len))
+ return -EINVAL;
+
+ if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN &&
+ uattr->attr_data.reserved)
+ return -EINVAL;
+
+ e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
+ e->ptr_attr.len = uattr->len;
+
+ if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
+ void *p;
+
+ p = uverbs_alloc(bundle, uattr->len);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ e->ptr_attr.ptr = p;
+
+ if (copy_from_user(p, u64_to_user_ptr(uattr->data),
+ uattr->len))
+ return -EFAULT;
+ } else {
+ e->ptr_attr.data = uattr->data;
+ }
+ break;
+
+ case UVERBS_ATTR_TYPE_IDR:
+ case UVERBS_ATTR_TYPE_FD:
+ if (uattr->attr_data.reserved)
+ return -EINVAL;
+
+ if (uattr->len != 0)
+ return -EINVAL;
+
+ o_attr = &e->obj_attr;
+ o_attr->attr_elm = attr_uapi;
+
+ /*
+ * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
+ * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
+ * here without caring about truncation as we know that the
+ * IDR implementation today rejects negative IDs
+ */
+ o_attr->uobject = uverbs_get_uobject_from_file(
+ spec->u.obj.obj_type, spec->u.obj.access,
+ uattr->data_s64, bundle);
+ if (IS_ERR(o_attr->uobject))
+ return PTR_ERR(o_attr->uobject);
+ __set_bit(attr_bkey, pbundle->uobj_finalize);
+
+ if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
+ unsigned int uattr_idx = uattr - pbundle->uattrs;
+ s64 id = o_attr->uobject->id;
+
+ /* Copy the allocated id to the user-space */
+ if (put_user(id, &pbundle->user_attrs[uattr_idx].data))
+ return -EFAULT;
+ }
+
+ break;
+
+ case UVERBS_ATTR_TYPE_RAW_FD:
+ if (uattr->attr_data.reserved || uattr->len != 0 ||
+ uattr->data_s64 < INT_MIN || uattr->data_s64 > INT_MAX)
+ return -EINVAL;
+ /* _uverbs_get_const_signed() is the accessor */
+ e->ptr_attr.data = uattr->data_s64;
+ break;
+
+ case UVERBS_ATTR_TYPE_IDRS_ARRAY:
+ return uverbs_process_idrs_array(pbundle, attr_uapi,
+ &e->objs_arr_attr, uattr,
+ attr_bkey);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/*
+ * We search the radix tree with the method prefix and now we want to fast
+ * search the suffix bits to get a particular attribute pointer. It is not
+ * totally clear to me if this breaks the radix tree encasulation or not, but
+ * it uses the iter data to determine if the method iter points at the same
+ * chunk that will store the attribute, if so it just derefs it directly. By
+ * construction in most kernel configs the method and attrs will all fit in a
+ * single radix chunk, so in most cases this will have no search. Other cases
+ * this falls back to a full search.
+ */
+static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle,
+ u32 attr_key)
+{
+ void __rcu **slot;
+
+ if (likely(attr_key < pbundle->radix_slots_len)) {
+ void *entry;
+
+ slot = pbundle->radix_slots + attr_key;
+ entry = rcu_dereference_raw(*slot);
+ if (likely(!radix_tree_is_internal_node(entry) && entry))
+ return slot;
+ }
+
+ return radix_tree_lookup_slot(pbundle->radix,
+ pbundle->method_key | attr_key);
+}
+
+static int uverbs_set_attr(struct bundle_priv *pbundle,
+ struct ib_uverbs_attr *uattr)
+{
+ u32 attr_key = uapi_key_attr(uattr->attr_id);
+ u32 attr_bkey = uapi_bkey_attr(attr_key);
+ const struct uverbs_api_attr *attr;
+ void __rcu **slot;
+ int ret;
+
+ slot = uapi_get_attr_for_method(pbundle, attr_key);
+ if (!slot) {
+ /*
+ * Kernel does not support the attribute but user-space says it
+ * is mandatory
+ */
+ if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
+ return -EPROTONOSUPPORT;
+ return 0;
+ }
+ attr = rcu_dereference_protected(*slot, true);
+
+ /* Reject duplicate attributes from user-space */
+ if (test_bit(attr_bkey, pbundle->bundle.attr_present))
+ return -EINVAL;
+
+ ret = uverbs_process_attr(pbundle, attr, uattr, attr_bkey);
+ if (ret)
+ return ret;
+
+ __set_bit(attr_bkey, pbundle->bundle.attr_present);
+
+ return 0;
+}
+
+static int ib_uverbs_run_method(struct bundle_priv *pbundle,
+ unsigned int num_attrs)
+{
+ int (*handler)(struct uverbs_attr_bundle *attrs);
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
+ size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
+ unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
+ unsigned int i;
+ int ret;
+
+ /* See uverbs_disassociate_api() */
+ handler = srcu_dereference(
+ pbundle->method_elm->handler,
+ &pbundle->bundle.ufile->device->disassociate_srcu);
+ if (!handler)
+ return -EIO;
+
+ pbundle->uattrs = uverbs_alloc(bundle, uattrs_size);
+ if (IS_ERR(pbundle->uattrs))
+ return PTR_ERR(pbundle->uattrs);
+ if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
+ return -EFAULT;
+
+ for (i = 0; i != num_attrs; i++) {
+ ret = uverbs_set_attr(pbundle, &pbundle->uattrs[i]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ /* User space did not provide all the mandatory attributes */
+ if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory,
+ pbundle->bundle.attr_present,
+ pbundle->method_elm->key_bitmap_len)))
+ return -EINVAL;
+
+ if (pbundle->method_elm->has_udata)
+ uverbs_fill_udata(bundle, &pbundle->bundle.driver_udata,
+ UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT);
+ else
+ pbundle->bundle.driver_udata = (struct ib_udata){};
+
+ if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
+ struct uverbs_obj_attr *destroy_attr = &bundle->attrs[destroy_bkey].obj_attr;
+
+ ret = uobj_destroy(destroy_attr->uobject, bundle);
+ if (ret)
+ return ret;
+ __clear_bit(destroy_bkey, pbundle->uobj_finalize);
+
+ ret = handler(bundle);
+ uobj_put_destroy(destroy_attr->uobject);
+ } else {
+ ret = handler(bundle);
+ }
+
+ /*
+ * Until the drivers are revised to use the bundle directly we have to
+ * assume that the driver wrote to its UHW_OUT and flag userspace
+ * appropriately.
+ */
+ if (!ret && pbundle->method_elm->has_udata) {
+ const struct uverbs_attr *attr =
+ uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
+
+ if (!IS_ERR(attr))
+ ret = uverbs_set_output(bundle, attr);
+ }
+
+ /*
+ * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
+ * not invoke the method because the request is not supported. No
+ * other cases should return this code.
+ */
+ if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT))
+ return -EINVAL;
+
+ return ret;
+}
+
+static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
+{
+ unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
+ struct bundle_alloc_head *memblock;
+ unsigned int i;
+
+ /* fast path for simple uobjects */
+ i = -1;
+ while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
+ i + 1)) < key_bitmap_len) {
+ struct uverbs_attr *attr = &bundle->attrs[i];
+
+ uverbs_finalize_object(
+ attr->obj_attr.uobject,
+ attr->obj_attr.attr_elm->spec.u.obj.access,
+ test_bit(i, pbundle->uobj_hw_obj_valid),
+ commit, bundle);
+ }
+
+ i = -1;
+ while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
+ i + 1)) < key_bitmap_len) {
+ struct uverbs_attr *attr = &bundle->attrs[i];
+ const struct uverbs_api_attr *attr_uapi;
+ void __rcu **slot;
+
+ slot = uapi_get_attr_for_method(
+ pbundle,
+ pbundle->method_key | uapi_bkey_to_key_attr(i));
+ if (WARN_ON(!slot))
+ continue;
+
+ attr_uapi = rcu_dereference_protected(*slot, true);
+
+ if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr,
+ commit, bundle);
+ }
+ }
+
+ for (memblock = pbundle->allocated_mem; memblock;) {
+ struct bundle_alloc_head *tmp = memblock;
+
+ memblock = memblock->next;
+ kvfree(tmp);
+ }
+}
+
+static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
+ struct ib_uverbs_ioctl_hdr *hdr,
+ struct ib_uverbs_attr __user *user_attrs)
+{
+ const struct uverbs_api_ioctl_method *method_elm;
+ struct uverbs_api *uapi = ufile->device->uapi;
+ struct radix_tree_iter attrs_iter;
+ struct bundle_priv *pbundle;
+ struct bundle_priv onstack;
+ void __rcu **slot;
+ int ret;
+
+ if (unlikely(hdr->driver_id != uapi->driver_id))
+ return -EINVAL;
+
+ slot = radix_tree_iter_lookup(
+ &uapi->radix, &attrs_iter,
+ uapi_key_obj(hdr->object_id) |
+ uapi_key_ioctl_method(hdr->method_id));
+ if (unlikely(!slot))
+ return -EPROTONOSUPPORT;
+ method_elm = rcu_dereference_protected(*slot, true);
+
+ if (!method_elm->use_stack) {
+ pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
+ if (!pbundle)
+ return -ENOMEM;
+ pbundle->internal_avail =
+ method_elm->bundle_size -
+ offsetof(struct bundle_priv, internal_buffer);
+ pbundle->alloc_head.next = NULL;
+ pbundle->allocated_mem = container_of(&pbundle->alloc_head,
+ struct bundle_alloc_head, hdr);
+ } else {
+ pbundle = &onstack;
+ pbundle->internal_avail = sizeof(pbundle->internal_buffer);
+ pbundle->allocated_mem = NULL;
+ }
+
+ /* Space for the pbundle->bundle.attrs flex array */
+ pbundle->method_elm = method_elm;
+ pbundle->method_key = attrs_iter.index;
+ pbundle->bundle.ufile = ufile;
+ pbundle->bundle.context = NULL; /* only valid if bundle has uobject */
+ pbundle->radix = &uapi->radix;
+ pbundle->radix_slots = slot;
+ pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter);
+ pbundle->user_attrs = user_attrs;
+
+ pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
+ sizeof(*container_of(&pbundle->bundle,
+ struct uverbs_attr_bundle, hdr)->attrs),
+ sizeof(*pbundle->internal_buffer));
+ memset(pbundle->bundle.attr_present, 0,
+ sizeof(pbundle->bundle.attr_present));
+ memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
+ memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
+ memset(pbundle->uobj_hw_obj_valid, 0,
+ sizeof(pbundle->uobj_hw_obj_valid));
+
+ ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
+ bundle_destroy(pbundle, ret == 0);
+ return ret;
+}
+
+long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ib_uverbs_file *file = filp->private_data;
+ struct ib_uverbs_ioctl_hdr __user *user_hdr =
+ (struct ib_uverbs_ioctl_hdr __user *)arg;
+ struct ib_uverbs_ioctl_hdr hdr;
+ int srcu_key;
+ int err;
+
+ if (unlikely(cmd != RDMA_VERBS_IOCTL))
+ return -ENOIOCTLCMD;
+
+ err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
+ if (err)
+ return -EFAULT;
+
+ if (hdr.length > PAGE_SIZE ||
+ hdr.length != struct_size(&hdr, attrs, hdr.num_attrs))
+ return -EINVAL;
+
+ if (hdr.reserved1 || hdr.reserved2)
+ return -EPROTONOSUPPORT;
+
+ srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
+ err = ib_uverbs_cmd_verbs(file, &hdr, user_hdr->attrs);
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ return err;
+}
+
+int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ const struct uverbs_attr *attr;
+ u64 flags;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ /* Missing attribute means 0 flags */
+ if (IS_ERR(attr)) {
+ *to = 0;
+ return 0;
+ }
+
+ /*
+ * New userspace code should use 8 bytes to pass flags, but we
+ * transparently support old userspaces that were using 4 bytes as
+ * well.
+ */
+ if (attr->ptr_attr.len == 8)
+ flags = attr->ptr_attr.data;
+ else if (attr->ptr_attr.len == 4)
+ flags = *(u32 *)&attr->ptr_attr.data;
+ else
+ return -EINVAL;
+
+ if (flags & ~allowed_bits)
+ return -EINVAL;
+
+ *to = flags;
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_get_flags64);
+
+int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ u64 flags;
+ int ret;
+
+ ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits);
+ if (ret)
+ return ret;
+
+ if (flags > U32_MAX)
+ return -EINVAL;
+ *to = flags;
+
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_get_flags32);
+
+/*
+ * Fill a ib_udata struct (core or uhw) using the given attribute IDs.
+ * This is primarily used to convert the UVERBS_ATTR_UHW() into the
+ * ib_udata format used by the drivers.
+ */
+void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
+ struct ib_udata *udata, unsigned int attr_in,
+ unsigned int attr_out)
+{
+ struct bundle_priv *pbundle =
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
+ struct uverbs_attr_bundle *bundle_aux =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
+ const struct uverbs_attr *in =
+ uverbs_attr_get(bundle_aux, attr_in);
+ const struct uverbs_attr *out =
+ uverbs_attr_get(bundle_aux, attr_out);
+
+ if (!IS_ERR(in)) {
+ udata->inlen = in->ptr_attr.len;
+ if (uverbs_attr_ptr_is_inline(in))
+ udata->inbuf =
+ &pbundle->user_attrs[in->ptr_attr.uattr_idx]
+ .data;
+ else
+ udata->inbuf = u64_to_user_ptr(in->ptr_attr.data);
+ } else {
+ udata->inbuf = NULL;
+ udata->inlen = 0;
+ }
+
+ if (!IS_ERR(out)) {
+ udata->outbuf = u64_to_user_ptr(out->ptr_attr.data);
+ udata->outlen = out->ptr_attr.len;
+ } else {
+ udata->outbuf = NULL;
+ udata->outlen = 0;
+ }
+}
+
+int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
+ const void *from, size_t size)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+ size_t min_size;
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ min_size = min_t(size_t, attr->ptr_attr.len, size);
+ if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
+ return -EFAULT;
+
+ return uverbs_set_output(bundle, attr);
+}
+EXPORT_SYMBOL(uverbs_copy_to);
+
+
+/*
+ * This is only used if the caller has directly used copy_to_use to write the
+ * data. It signals to user space that the buffer is filled in.
+ */
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ return uverbs_set_output(bundle, attr);
+}
+
+int _uverbs_get_const_signed(s64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ const struct uverbs_attr *attr;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ if (IS_ERR(attr)) {
+ if ((PTR_ERR(attr) != -ENOENT) || !def_val)
+ return PTR_ERR(attr);
+
+ *to = *def_val;
+ } else {
+ *to = attr->ptr_attr.data;
+ }
+
+ if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(_uverbs_get_const_signed);
+
+int _uverbs_get_const_unsigned(u64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 upper_bound, u64 *def_val)
+{
+ const struct uverbs_attr *attr;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ if (IS_ERR(attr)) {
+ if ((PTR_ERR(attr) != -ENOENT) || !def_val)
+ return PTR_ERR(attr);
+
+ *to = *def_val;
+ } else {
+ *to = attr->ptr_attr.data;
+ }
+
+ if (*to > upper_bound)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(_uverbs_get_const_unsigned);
+
+int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
+ size_t idx, const void *from, size_t size)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ if (size < attr->ptr_attr.len) {
+ if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
+ attr->ptr_attr.len - size))
+ return -EFAULT;
+ }
+ return uverbs_copy_to(bundle, idx, from, size);
+}
+EXPORT_SYMBOL(uverbs_copy_to_struct_or_zero);
+
+/* Once called an abort will call through to the type's destroy_hw() */
+void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
+ u16 idx)
+{
+ struct bundle_priv *pbundle =
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
+
+ __set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
+ pbundle->uobj_hw_obj_valid);
+}
+EXPORT_SYMBOL(uverbs_finalize_uobj_create);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index c29a660c72fe..973fe2c7ef53 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -45,10 +45,18 @@
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+
+#include <rdma/ib.h>
+#include <rdma/uverbs_std_types.h>
+#include <rdma/rdma_netlink.h>
+#include <rdma/ib_ucaps.h>
#include "uverbs.h"
+#include "core_priv.h"
+#include "rdma_core.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace verbs access");
@@ -57,142 +65,120 @@ MODULE_LICENSE("Dual BSD/GPL");
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
- IB_UVERBS_MAX_DEVICES = 32
+ IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UVERBS_NUM_FIXED_MINOR = 32,
+ IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
};
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
-static struct class *uverbs_class;
-
-DEFINE_SPINLOCK(ib_uverbs_idr_lock);
-DEFINE_IDR(ib_uverbs_pd_idr);
-DEFINE_IDR(ib_uverbs_mr_idr);
-DEFINE_IDR(ib_uverbs_mw_idr);
-DEFINE_IDR(ib_uverbs_ah_idr);
-DEFINE_IDR(ib_uverbs_cq_idr);
-DEFINE_IDR(ib_uverbs_qp_idr);
-DEFINE_IDR(ib_uverbs_srq_idr);
-DEFINE_IDR(ib_uverbs_xrcd_idr);
-DEFINE_IDR(ib_uverbs_rule_idr);
-
-static DEFINE_SPINLOCK(map_lock);
-static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
-
-static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len) = {
- [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
- [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
- [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
- [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
- [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
- [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
- [IB_USER_VERBS_CMD_REREG_MR] = ib_uverbs_rereg_mr,
- [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
- [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw,
- [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw,
- [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
- [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
- [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
- [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
- [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
- [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
- [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
- [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
- [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
- [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
- [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
- [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
- [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
- [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
- [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
- [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
- [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
- [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
- [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
- [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
- [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
- [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd,
- [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
- [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
- [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
-};
-
-static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- struct ib_udata *ucore,
- struct ib_udata *uhw) = {
- [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
- [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
- [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
- [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq,
-};
+static dev_t dynamic_uverbs_dev;
-static void ib_uverbs_add_one(struct ib_device *device);
+static DEFINE_IDA(uverbs_ida);
+static int ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
+static struct ib_client uverbs_client;
-static void ib_uverbs_release_dev(struct kobject *kobj)
+static char *uverbs_devnode(const struct device *dev, umode_t *mode)
{
- struct ib_uverbs_device *dev =
- container_of(kobj, struct ib_uverbs_device, kobj);
-
- cleanup_srcu_struct(&dev->disassociate_srcu);
- kfree(dev);
+ if (mode)
+ *mode = 0666;
+ return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
-static struct kobj_type ib_uverbs_dev_ktype = {
- .release = ib_uverbs_release_dev,
+static const struct class uverbs_class = {
+ .name = "infiniband_verbs",
+ .devnode = uverbs_devnode,
};
-static void ib_uverbs_release_event_file(struct kref *ref)
+/*
+ * Must be called with the ufile->device->disassociate_srcu held, and the lock
+ * must be held until use of the ucontext is finished.
+ */
+struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
{
- struct ib_uverbs_event_file *file =
- container_of(ref, struct ib_uverbs_event_file, ref);
+ /*
+ * We do not hold the hw_destroy_rwsem lock for this flow, instead
+ * srcu is used. It does not matter if someone races this with
+ * get_context, we get NULL or valid ucontext.
+ */
+ struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
- kfree(file);
+ if (!srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu))
+ return ERR_PTR(-EIO);
+
+ if (!ucontext)
+ return ERR_PTR(-EINVAL);
+
+ return ucontext;
+}
+EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
+
+int uverbs_dealloc_mw(struct ib_mw *mw)
+{
+ struct ib_pd *pd = mw->pd;
+ int ret;
+
+ ret = mw->device->ops.dealloc_mw(mw);
+ if (ret)
+ return ret;
+
+ atomic_dec(&pd->usecnt);
+ kfree(mw);
+ return ret;
}
-void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
- struct ib_uverbs_event_file *ev_file,
- struct ib_ucq_object *uobj)
+static void ib_uverbs_release_dev(struct device *device)
+{
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
+
+ uverbs_destroy_api(dev->uapi);
+ cleanup_srcu_struct(&dev->disassociate_srcu);
+ mutex_destroy(&dev->lists_mutex);
+ mutex_destroy(&dev->xrcd_tree_mutex);
+ kfree(dev);
+}
+
+void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
+ struct ib_ucq_object *uobj)
{
struct ib_uverbs_event *evt, *tmp;
if (ev_file) {
- spin_lock_irq(&ev_file->lock);
+ spin_lock_irq(&ev_file->ev_queue.lock);
list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
list_del(&evt->list);
kfree(evt);
}
- spin_unlock_irq(&ev_file->lock);
+ spin_unlock_irq(&ev_file->ev_queue.lock);
- kref_put(&ev_file->ref, ib_uverbs_release_event_file);
+ uverbs_uobject_put(&ev_file->uobj);
}
- spin_lock_irq(&file->async_file->lock);
- list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
- list_del(&evt->list);
- kfree(evt);
- }
- spin_unlock_irq(&file->async_file->lock);
+ ib_uverbs_release_uevent(&uobj->uevent);
}
-void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
- struct ib_uevent_object *uobj)
+void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
{
+ struct ib_uverbs_async_event_file *async_file = uobj->event_file;
struct ib_uverbs_event *evt, *tmp;
- spin_lock_irq(&file->async_file->lock);
+ if (!async_file)
+ return;
+
+ spin_lock_irq(&async_file->ev_queue.lock);
list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
list_del(&evt->list);
kfree(evt);
}
- spin_unlock_irq(&file->async_file->lock);
+ spin_unlock_irq(&async_file->ev_queue.lock);
+ uverbs_uobject_put(&async_file->uobj);
}
-static void ib_uverbs_detach_umcast(struct ib_qp *qp,
- struct ib_uqp_object *uobj)
+void ib_uverbs_detach_umcast(struct ib_qp *qp,
+ struct ib_uqp_object *uobj)
{
struct ib_uverbs_mcast_entry *mcast, *tmp;
@@ -203,187 +189,84 @@ static void ib_uverbs_detach_umcast(struct ib_qp *qp,
}
}
-static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
- struct ib_ucontext *context)
-{
- struct ib_uobject *uobj, *tmp;
-
- context->closing = 1;
-
- list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
- struct ib_ah *ah = uobj->object;
-
- idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
- ib_destroy_ah(ah);
- kfree(uobj);
- }
-
- /* Remove MWs before QPs, in order to support type 2A MWs. */
- list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
- struct ib_mw *mw = uobj->object;
-
- idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
- ib_dealloc_mw(mw);
- kfree(uobj);
- }
-
- list_for_each_entry_safe(uobj, tmp, &context->rule_list, list) {
- struct ib_flow *flow_id = uobj->object;
-
- idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
- ib_destroy_flow(flow_id);
- kfree(uobj);
- }
-
- list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
- struct ib_qp *qp = uobj->object;
- struct ib_uqp_object *uqp =
- container_of(uobj, struct ib_uqp_object, uevent.uobject);
-
- idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- if (qp != qp->real_qp) {
- ib_close_qp(qp);
- } else {
- ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
- }
- ib_uverbs_release_uevent(file, &uqp->uevent);
- kfree(uqp);
- }
-
- list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
- struct ib_srq *srq = uobj->object;
- struct ib_uevent_object *uevent =
- container_of(uobj, struct ib_uevent_object, uobject);
-
- idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
- ib_destroy_srq(srq);
- ib_uverbs_release_uevent(file, uevent);
- kfree(uevent);
- }
-
- list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
- struct ib_cq *cq = uobj->object;
- struct ib_uverbs_event_file *ev_file = cq->cq_context;
- struct ib_ucq_object *ucq =
- container_of(uobj, struct ib_ucq_object, uobject);
-
- idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
- ib_destroy_cq(cq);
- ib_uverbs_release_ucq(file, ev_file, ucq);
- kfree(ucq);
- }
-
- list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
- struct ib_mr *mr = uobj->object;
-
- idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
- ib_dereg_mr(mr);
- kfree(uobj);
- }
-
- mutex_lock(&file->device->xrcd_tree_mutex);
- list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
- struct ib_xrcd *xrcd = uobj->object;
- struct ib_uxrcd_object *uxrcd =
- container_of(uobj, struct ib_uxrcd_object, uobject);
-
- idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
- ib_uverbs_dealloc_xrcd(file->device, xrcd);
- kfree(uxrcd);
- }
- mutex_unlock(&file->device->xrcd_tree_mutex);
-
- list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
- struct ib_pd *pd = uobj->object;
-
- idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
- ib_dealloc_pd(pd);
- kfree(uobj);
- }
-
- put_pid(context->tgid);
-
- return context->device->dealloc_ucontext(context);
-}
-
static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
{
complete(&dev->comp);
}
-static void ib_uverbs_release_file(struct kref *ref)
+void ib_uverbs_release_file(struct kref *ref)
{
struct ib_uverbs_file *file =
container_of(ref, struct ib_uverbs_file, ref);
struct ib_device *ib_dev;
int srcu_key;
+ release_ufile_idr_uobject(file);
+
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
ib_dev = srcu_dereference(file->device->ib_dev,
&file->device->disassociate_srcu);
- if (ib_dev && !ib_dev->disassociate_ucontext)
- module_put(ib_dev->owner);
+ if (ib_dev && !ib_dev->ops.disassociate_ucontext)
+ module_put(ib_dev->ops.owner);
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
- if (atomic_dec_and_test(&file->device->refcount))
+ if (refcount_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
+ if (file->default_async_file)
+ uverbs_uobject_put(&file->default_async_file->uobj);
+ put_device(&file->device->dev);
+
+ if (file->disassociate_page)
+ __free_pages(file->disassociate_page, 0);
+ mutex_destroy(&file->disassociation_lock);
+ mutex_destroy(&file->umap_lock);
+ mutex_destroy(&file->ucontext_lock);
kfree(file);
}
-static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+ struct file *filp, char __user *buf,
+ size_t count, loff_t *pos,
+ size_t eventsz)
{
- struct ib_uverbs_event_file *file = filp->private_data;
struct ib_uverbs_event *event;
- int eventsz;
int ret = 0;
- spin_lock_irq(&file->lock);
+ spin_lock_irq(&ev_queue->lock);
- while (list_empty(&file->event_list)) {
- spin_unlock_irq(&file->lock);
+ while (list_empty(&ev_queue->event_list)) {
+ if (ev_queue->is_closed) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+ }
+ spin_unlock_irq(&ev_queue->lock);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- if (wait_event_interruptible(file->poll_wait,
- (!list_empty(&file->event_list) ||
- /* The barriers built into wait_event_interruptible()
- * and wake_up() guarentee this will see the null set
- * without using RCU
- */
- !file->uverbs_file->device->ib_dev)))
+ if (wait_event_interruptible(ev_queue->poll_wait,
+ (!list_empty(&ev_queue->event_list) ||
+ ev_queue->is_closed)))
return -ERESTARTSYS;
- /* If device was disassociated and no event exists set an error */
- if (list_empty(&file->event_list) &&
- !file->uverbs_file->device->ib_dev)
- return -EIO;
-
- spin_lock_irq(&file->lock);
+ spin_lock_irq(&ev_queue->lock);
}
- event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
-
- if (file->is_async)
- eventsz = sizeof (struct ib_uverbs_async_event_desc);
- else
- eventsz = sizeof (struct ib_uverbs_comp_event_desc);
+ event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
if (eventsz > count) {
ret = -EINVAL;
event = NULL;
} else {
- list_del(file->event_list.next);
+ list_del(ev_queue->event_list.next);
if (event->counter) {
++(*event->counter);
list_del(&event->obj_list);
}
}
- spin_unlock_irq(&file->lock);
+ spin_unlock_irq(&ev_queue->lock);
if (event) {
if (copy_to_user(buf, event, eventsz))
@@ -397,466 +280,641 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
return ret;
}
-static unsigned int ib_uverbs_event_poll(struct file *filp,
+static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ib_uverbs_async_event_file *file = filp->private_data;
+
+ return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
+ sizeof(struct ib_uverbs_async_event_desc));
+}
+
+static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ib_uverbs_completion_event_file *comp_ev_file =
+ filp->private_data;
+
+ return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
+ pos,
+ sizeof(struct ib_uverbs_comp_event_desc));
+}
+
+static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+ struct file *filp,
struct poll_table_struct *wait)
{
- unsigned int pollflags = 0;
- struct ib_uverbs_event_file *file = filp->private_data;
+ __poll_t pollflags = 0;
- poll_wait(filp, &file->poll_wait, wait);
+ poll_wait(filp, &ev_queue->poll_wait, wait);
- spin_lock_irq(&file->lock);
- if (!list_empty(&file->event_list))
- pollflags = POLLIN | POLLRDNORM;
- spin_unlock_irq(&file->lock);
+ spin_lock_irq(&ev_queue->lock);
+ if (!list_empty(&ev_queue->event_list))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ else if (ev_queue->is_closed)
+ pollflags = EPOLLERR;
+ spin_unlock_irq(&ev_queue->lock);
return pollflags;
}
-static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
+static __poll_t ib_uverbs_async_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
{
- struct ib_uverbs_event_file *file = filp->private_data;
+ struct ib_uverbs_async_event_file *file = filp->private_data;
- return fasync_helper(fd, filp, on, &file->async_queue);
+ return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
}
-static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
+static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
{
- struct ib_uverbs_event_file *file = filp->private_data;
- struct ib_uverbs_event *entry, *tmp;
- int closed_already = 0;
-
- mutex_lock(&file->uverbs_file->device->lists_mutex);
- spin_lock_irq(&file->lock);
- closed_already = file->is_closed;
- file->is_closed = 1;
- list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
- if (entry->counter)
- list_del(&entry->obj_list);
- kfree(entry);
- }
- spin_unlock_irq(&file->lock);
- if (!closed_already) {
- list_del(&file->list);
- if (file->is_async)
- ib_unregister_event_handler(&file->uverbs_file->
- event_handler);
- }
- mutex_unlock(&file->uverbs_file->device->lists_mutex);
+ struct ib_uverbs_completion_event_file *comp_ev_file =
+ filp->private_data;
- kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
- kref_put(&file->ref, ib_uverbs_release_event_file);
+ return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
+}
- return 0;
+static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
+{
+ struct ib_uverbs_async_event_file *file = filp->private_data;
+
+ return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
}
-static const struct file_operations uverbs_event_fops = {
+static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
+{
+ struct ib_uverbs_completion_event_file *comp_ev_file =
+ filp->private_data;
+
+ return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
+}
+
+const struct file_operations uverbs_event_fops = {
+ .owner = THIS_MODULE,
+ .read = ib_uverbs_comp_event_read,
+ .poll = ib_uverbs_comp_event_poll,
+ .release = uverbs_uobject_fd_release,
+ .fasync = ib_uverbs_comp_event_fasync,
+};
+
+const struct file_operations uverbs_async_event_fops = {
.owner = THIS_MODULE,
- .read = ib_uverbs_event_read,
- .poll = ib_uverbs_event_poll,
- .release = ib_uverbs_event_close,
- .fasync = ib_uverbs_event_fasync,
- .llseek = no_llseek,
+ .read = ib_uverbs_async_event_read,
+ .poll = ib_uverbs_async_event_poll,
+ .release = uverbs_async_event_release,
+ .fasync = ib_uverbs_async_event_fasync,
};
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
{
- struct ib_uverbs_event_file *file = cq_context;
+ struct ib_uverbs_event_queue *ev_queue = cq_context;
struct ib_ucq_object *uobj;
struct ib_uverbs_event *entry;
unsigned long flags;
- if (!file)
+ if (!ev_queue)
return;
- spin_lock_irqsave(&file->lock, flags);
- if (file->is_closed) {
- spin_unlock_irqrestore(&file->lock, flags);
+ spin_lock_irqsave(&ev_queue->lock, flags);
+ if (ev_queue->is_closed) {
+ spin_unlock_irqrestore(&ev_queue->lock, flags);
return;
}
- entry = kmalloc(sizeof *entry, GFP_ATOMIC);
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
- spin_unlock_irqrestore(&file->lock, flags);
+ spin_unlock_irqrestore(&ev_queue->lock, flags);
return;
}
- uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
+ uobj = cq->uobject;
- entry->desc.comp.cq_handle = cq->uobject->user_handle;
+ entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle;
entry->counter = &uobj->comp_events_reported;
- list_add_tail(&entry->list, &file->event_list);
+ list_add_tail(&entry->list, &ev_queue->event_list);
list_add_tail(&entry->obj_list, &uobj->comp_list);
- spin_unlock_irqrestore(&file->lock, flags);
+ spin_unlock_irqrestore(&ev_queue->lock, flags);
- wake_up_interruptible(&file->poll_wait);
- kill_fasync(&file->async_queue, SIGIO, POLL_IN);
+ wake_up_interruptible(&ev_queue->poll_wait);
+ kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
}
-static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
- __u64 element, __u64 event,
- struct list_head *obj_list,
- u32 *counter)
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+ __u64 element, __u64 event,
+ struct list_head *obj_list, u32 *counter)
{
struct ib_uverbs_event *entry;
unsigned long flags;
- spin_lock_irqsave(&file->async_file->lock, flags);
- if (file->async_file->is_closed) {
- spin_unlock_irqrestore(&file->async_file->lock, flags);
+ if (!async_file)
+ return;
+
+ spin_lock_irqsave(&async_file->ev_queue.lock, flags);
+ if (async_file->ev_queue.is_closed) {
+ spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
return;
}
- entry = kmalloc(sizeof *entry, GFP_ATOMIC);
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
- spin_unlock_irqrestore(&file->async_file->lock, flags);
+ spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
return;
}
- entry->desc.async.element = element;
+ entry->desc.async.element = element;
entry->desc.async.event_type = event;
- entry->desc.async.reserved = 0;
- entry->counter = counter;
+ entry->desc.async.reserved = 0;
+ entry->counter = counter;
- list_add_tail(&entry->list, &file->async_file->event_list);
+ list_add_tail(&entry->list, &async_file->ev_queue.event_list);
if (obj_list)
list_add_tail(&entry->obj_list, obj_list);
- spin_unlock_irqrestore(&file->async_file->lock, flags);
+ spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
- wake_up_interruptible(&file->async_file->poll_wait);
- kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN);
+ wake_up_interruptible(&async_file->ev_queue.poll_wait);
+ kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN);
}
-void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
+static void uverbs_uobj_event(struct ib_uevent_object *eobj,
+ struct ib_event *event)
{
- struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
- struct ib_ucq_object, uobject);
+ ib_uverbs_async_handler(eobj->event_file,
+ eobj->uobject.user_handle, event->event,
+ &eobj->event_list, &eobj->events_reported);
+}
- ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
- event->event, &uobj->async_list,
- &uobj->async_events_reported);
+void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
+{
+ uverbs_uobj_event(&event->element.cq->uobject->uevent, event);
}
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
{
- struct ib_uevent_object *uobj;
-
/* for XRC target qp's, check that qp is live */
- if (!event->element.qp->uobject || !event->element.qp->uobject->live)
+ if (!event->element.qp->uobject)
return;
- uobj = container_of(event->element.qp->uobject,
- struct ib_uevent_object, uobject);
+ uverbs_uobj_event(&event->element.qp->uobject->uevent, event);
+}
- ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
- event->event, &uobj->event_list,
- &uobj->events_reported);
+void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
+{
+ uverbs_uobj_event(&event->element.wq->uobject->uevent, event);
}
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
{
- struct ib_uevent_object *uobj;
-
- uobj = container_of(event->element.srq->uobject,
- struct ib_uevent_object, uobject);
-
- ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
- event->event, &uobj->event_list,
- &uobj->events_reported);
+ uverbs_uobj_event(&event->element.srq->uobject->uevent, event);
}
-void ib_uverbs_event_handler(struct ib_event_handler *handler,
- struct ib_event *event)
+static void ib_uverbs_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
{
- struct ib_uverbs_file *file =
- container_of(handler, struct ib_uverbs_file, event_handler);
-
- ib_uverbs_async_handler(file, event->element.port_num, event->event,
- NULL, NULL);
+ ib_uverbs_async_handler(
+ container_of(handler, struct ib_uverbs_async_event_file,
+ event_handler),
+ event->element.port_num, event->event, NULL, NULL);
}
-void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
+void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
{
- kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
- file->async_file = NULL;
+ spin_lock_init(&ev_queue->lock);
+ INIT_LIST_HEAD(&ev_queue->event_list);
+ init_waitqueue_head(&ev_queue->poll_wait);
+ ev_queue->is_closed = 0;
+ ev_queue->async_queue = NULL;
}
-struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
- struct ib_device *ib_dev,
- int is_async)
+void ib_uverbs_init_async_event_file(
+ struct ib_uverbs_async_event_file *async_file)
{
- struct ib_uverbs_event_file *ev_file;
- struct file *filp;
- int ret;
+ struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile;
+ struct ib_device *ib_dev = async_file->uobj.context->device;
- ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
- if (!ev_file)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&ev_file->ref);
- spin_lock_init(&ev_file->lock);
- INIT_LIST_HEAD(&ev_file->event_list);
- init_waitqueue_head(&ev_file->poll_wait);
- ev_file->uverbs_file = uverbs_file;
- kref_get(&ev_file->uverbs_file->ref);
- ev_file->async_queue = NULL;
- ev_file->is_closed = 0;
-
- filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
- ev_file, O_RDONLY);
- if (IS_ERR(filp))
- goto err_put_refs;
-
- mutex_lock(&uverbs_file->device->lists_mutex);
- list_add_tail(&ev_file->list,
- &uverbs_file->device->uverbs_events_file_list);
- mutex_unlock(&uverbs_file->device->lists_mutex);
-
- if (is_async) {
- WARN_ON(uverbs_file->async_file);
- uverbs_file->async_file = ev_file;
- kref_get(&uverbs_file->async_file->ref);
- INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
- ib_dev,
- ib_uverbs_event_handler);
- ret = ib_register_event_handler(&uverbs_file->event_handler);
- if (ret)
- goto err_put_file;
+ ib_uverbs_init_event_queue(&async_file->ev_queue);
- /* At that point async file stuff was fully set */
- ev_file->is_async = 1;
+ /* The first async_event_file becomes the default one for the file. */
+ mutex_lock(&uverbs_file->ucontext_lock);
+ if (!uverbs_file->default_async_file) {
+ /* Pairs with the put in ib_uverbs_release_file */
+ uverbs_uobject_get(&async_file->uobj);
+ smp_store_release(&uverbs_file->default_async_file, async_file);
}
+ mutex_unlock(&uverbs_file->ucontext_lock);
- return filp;
-
-err_put_file:
- fput(filp);
- kref_put(&uverbs_file->async_file->ref, ib_uverbs_release_event_file);
- uverbs_file->async_file = NULL;
- return ERR_PTR(ret);
-
-err_put_refs:
- kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
- kref_put(&ev_file->ref, ib_uverbs_release_event_file);
- return filp;
+ INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev,
+ ib_uverbs_event_handler);
+ ib_register_event_handler(&async_file->event_handler);
}
-/*
- * Look up a completion event file by FD. If lookup is successful,
- * takes a ref to the event file struct that it returns; if
- * unsuccessful, returns NULL.
- */
-struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
+static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
+ struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
+ const struct uverbs_api_write_method *method_elm)
{
- struct ib_uverbs_event_file *ev_file = NULL;
- struct fd f = fdget(fd);
+ if (method_elm->is_ex) {
+ count -= sizeof(*hdr) + sizeof(*ex_hdr);
- if (!f.file)
- return NULL;
+ if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
+ return -EINVAL;
- if (f.file->f_op != &uverbs_event_fops)
- goto out;
+ if (hdr->in_words * 8 < method_elm->req_size)
+ return -ENOSPC;
- ev_file = f.file->private_data;
- if (ev_file->is_async) {
- ev_file = NULL;
- goto out;
+ if (ex_hdr->cmd_hdr_reserved)
+ return -EINVAL;
+
+ if (ex_hdr->response) {
+ if (!hdr->out_words && !ex_hdr->provider_out_words)
+ return -EINVAL;
+
+ if (hdr->out_words * 8 < method_elm->resp_size)
+ return -ENOSPC;
+
+ if (!access_ok(u64_to_user_ptr(ex_hdr->response),
+ (hdr->out_words + ex_hdr->provider_out_words) * 8))
+ return -EFAULT;
+ } else {
+ if (hdr->out_words || ex_hdr->provider_out_words)
+ return -EINVAL;
+ }
+
+ return 0;
}
- kref_get(&ev_file->ref);
+ /* not extended command */
+ if (hdr->in_words * 4 != count)
+ return -EINVAL;
-out:
- fdput(f);
- return ev_file;
+ if (count < method_elm->req_size + sizeof(*hdr)) {
+ /*
+ * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
+ * with a 16 byte write instead of 24. Old kernels didn't
+ * check the size so they allowed this. Now that the size is
+ * checked provide a compatibility work around to not break
+ * those userspaces.
+ */
+ if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
+ count == 16) {
+ hdr->in_words = 6;
+ return 0;
+ }
+ return -ENOSPC;
+ }
+ if (hdr->out_words * 4 < method_elm->resp_size)
+ return -ENOSPC;
+
+ return 0;
}
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
- struct ib_device *ib_dev;
+ const struct uverbs_api_write_method *method_elm;
+ struct uverbs_api *uapi = file->device->uapi;
+ struct ib_uverbs_ex_cmd_hdr ex_hdr;
struct ib_uverbs_cmd_hdr hdr;
- __u32 flags;
+ struct uverbs_attr_bundle bundle;
int srcu_key;
ssize_t ret;
- if (count < sizeof hdr)
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
+ return -EACCES;
+ }
+
+ if (count < sizeof(hdr))
return -EINVAL;
- if (copy_from_user(&hdr, buf, sizeof hdr))
+ if (copy_from_user(&hdr, buf, sizeof(hdr)))
return -EFAULT;
- srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- ret = -EIO;
- goto out;
+ method_elm = uapi_get_method(uapi, hdr.command);
+ if (IS_ERR(method_elm))
+ return PTR_ERR(method_elm);
+
+ if (method_elm->is_ex) {
+ if (count < (sizeof(hdr) + sizeof(ex_hdr)))
+ return -EINVAL;
+ if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
+ return -EFAULT;
}
- flags = (hdr.command &
- IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+ ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
+ if (ret)
+ return ret;
- if (!flags) {
- __u32 command;
+ srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
+ buf += sizeof(hdr);
+
+ memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
+ bundle.ufile = file;
+ bundle.context = NULL; /* only valid if bundle has uobject */
+ bundle.uobject = NULL;
+ if (!method_elm->is_ex) {
+ size_t in_len = hdr.in_words * 4 - sizeof(hdr);
+ size_t out_len = hdr.out_words * 4;
+ u64 response = 0;
+
+ if (method_elm->has_udata) {
+ bundle.driver_udata.inlen =
+ in_len - method_elm->req_size;
+ in_len = method_elm->req_size;
+ if (bundle.driver_udata.inlen)
+ bundle.driver_udata.inbuf = buf + in_len;
+ else
+ bundle.driver_udata.inbuf = NULL;
+ } else {
+ memset(&bundle.driver_udata, 0,
+ sizeof(bundle.driver_udata));
}
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
-
- if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
- !uverbs_cmd_table[command]) {
- ret = -EINVAL;
- goto out;
+ if (method_elm->has_resp) {
+ /*
+ * The macros check that if has_resp is set
+ * then the command request structure starts
+ * with a '__aligned u64 response' member.
+ */
+ ret = get_user(response, (const u64 __user *)buf);
+ if (ret)
+ goto out_unlock;
+
+ if (method_elm->has_udata) {
+ bundle.driver_udata.outlen =
+ out_len - method_elm->resp_size;
+ out_len = method_elm->resp_size;
+ if (bundle.driver_udata.outlen)
+ bundle.driver_udata.outbuf =
+ u64_to_user_ptr(response +
+ out_len);
+ else
+ bundle.driver_udata.outbuf = NULL;
+ }
+ } else {
+ bundle.driver_udata.outlen = 0;
+ bundle.driver_udata.outbuf = NULL;
}
- if (!file->ucontext &&
- command != IB_USER_VERBS_CMD_GET_CONTEXT) {
- ret = -EINVAL;
- goto out;
- }
+ ib_uverbs_init_udata_buf_or_null(
+ &bundle.ucore, buf, u64_to_user_ptr(response),
+ in_len, out_len);
+ } else {
+ buf += sizeof(ex_hdr);
- if (!(ib_dev->uverbs_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
+ ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
+ u64_to_user_ptr(ex_hdr.response),
+ hdr.in_words * 8, hdr.out_words * 8);
- if (hdr.in_words * 4 != count) {
- ret = -EINVAL;
- goto out;
- }
+ ib_uverbs_init_udata_buf_or_null(
+ &bundle.driver_udata, buf + bundle.ucore.inlen,
+ u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
- ret = uverbs_cmd_table[command](file, ib_dev,
- buf + sizeof(hdr),
- hdr.in_words * 4,
- hdr.out_words * 4);
+ }
- } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
- __u32 command;
+ ret = method_elm->handler(&bundle);
+ if (bundle.uobject)
+ uverbs_finalize_object(bundle.uobject, UVERBS_ACCESS_NEW, true,
+ !ret, &bundle);
+out_unlock:
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ return (ret) ? : count;
+}
- struct ib_uverbs_ex_cmd_hdr ex_hdr;
- struct ib_udata ucore;
- struct ib_udata uhw;
- size_t written_count = count;
+static const struct vm_operations_struct rdma_umap_ops;
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
- }
+static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *file = filp->private_data;
+ struct ib_ucontext *ucontext;
+ int ret = 0;
+ int srcu_key;
+
+ srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
+ ucontext = ib_uverbs_get_ucontext_file(file);
+ if (IS_ERR(ucontext)) {
+ ret = PTR_ERR(ucontext);
+ goto out;
+ }
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ mutex_lock(&file->disassociation_lock);
- if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
- !uverbs_ex_cmd_table[command]) {
- ret = -ENOSYS;
- goto out;
- }
+ vma->vm_ops = &rdma_umap_ops;
+ ret = ucontext->device->ops.mmap(ucontext, vma);
- if (!file->ucontext) {
- ret = -EINVAL;
- goto out;
- }
+ mutex_unlock(&file->disassociation_lock);
+out:
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ return ret;
+}
- if (!(ib_dev->uverbs_ex_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
+/*
+ * The VMA has been dup'd, initialize the vm_private_data with a new tracking
+ * struct
+ */
+static void rdma_umap_open(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *opriv = vma->vm_private_data;
+ struct rdma_umap_priv *priv;
- if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
- ret = -EINVAL;
- goto out;
- }
+ if (!opriv)
+ return;
- if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) {
- ret = -EFAULT;
- goto out;
- }
+ /* We are racing with disassociation */
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ goto out_zap;
+ mutex_lock(&ufile->disassociation_lock);
- count -= sizeof(hdr) + sizeof(ex_hdr);
- buf += sizeof(hdr) + sizeof(ex_hdr);
+ /*
+ * Disassociation already completed, the VMA should already be zapped.
+ */
+ if (!ufile->ucontext)
+ goto out_unlock;
- if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) {
- ret = -EINVAL;
- goto out;
- }
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_unlock;
+ rdma_umap_priv_init(priv, vma, opriv->entry);
- if (ex_hdr.cmd_hdr_reserved) {
- ret = -EINVAL;
- goto out;
- }
+ mutex_unlock(&ufile->disassociation_lock);
+ up_read(&ufile->hw_destroy_rwsem);
+ return;
- if (ex_hdr.response) {
- if (!hdr.out_words && !ex_hdr.provider_out_words) {
- ret = -EINVAL;
- goto out;
- }
+out_unlock:
+ mutex_unlock(&ufile->disassociation_lock);
+ up_read(&ufile->hw_destroy_rwsem);
+out_zap:
+ /*
+ * We can't allow the VMA to be created with the actual IO pages, that
+ * would break our API contract, and it can't be stopped at this
+ * point, so zap it.
+ */
+ vma->vm_private_data = NULL;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
- if (!access_ok(VERIFY_WRITE,
- (void __user *) (unsigned long) ex_hdr.response,
- (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
- ret = -EFAULT;
- goto out;
- }
- } else {
- if (hdr.out_words || ex_hdr.provider_out_words) {
- ret = -EINVAL;
- goto out;
- }
- }
+static void rdma_umap_close(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *priv = vma->vm_private_data;
- INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
- hdr.in_words * 8, hdr.out_words * 8);
-
- INIT_UDATA_BUF_OR_NULL(&uhw,
- buf + ucore.inlen,
- (unsigned long) ex_hdr.response + ucore.outlen,
- ex_hdr.provider_in_words * 8,
- ex_hdr.provider_out_words * 8);
-
- ret = uverbs_ex_cmd_table[command](file,
- ib_dev,
- &ucore,
- &uhw);
- if (!ret)
- ret = written_count;
+ if (!priv)
+ return;
+
+ /*
+ * The vma holds a reference on the struct file that created it, which
+ * in turn means that the ib_uverbs_file is guaranteed to exist at
+ * this point.
+ */
+ mutex_lock(&ufile->umap_lock);
+ if (priv->entry)
+ rdma_user_mmap_entry_put(priv->entry);
+
+ list_del(&priv->list);
+ mutex_unlock(&ufile->umap_lock);
+ kfree(priv);
+}
+
+/*
+ * Once the zap_vma_ptes has been called touches to the VMA will come here and
+ * we return a dummy writable zero page for all the pfns.
+ */
+static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
+{
+ struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
+ struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
+ vm_fault_t ret = 0;
+
+ if (!priv)
+ return VM_FAULT_SIGBUS;
+
+ /* Read only pages can just use the system zero page. */
+ if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
+ vmf->page = ZERO_PAGE(vmf->address);
+ get_page(vmf->page);
+ return 0;
+ }
+
+ mutex_lock(&ufile->umap_lock);
+ if (!ufile->disassociate_page)
+ ufile->disassociate_page =
+ alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
+
+ if (ufile->disassociate_page) {
+ /*
+ * This VMA is forced to always be shared so this doesn't have
+ * to worry about COW.
+ */
+ vmf->page = ufile->disassociate_page;
+ get_page(vmf->page);
} else {
- ret = -ENOSYS;
+ ret = VM_FAULT_SIGBUS;
}
+ mutex_unlock(&ufile->umap_lock);
-out:
- srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
return ret;
}
-static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
+static const struct vm_operations_struct rdma_umap_ops = {
+ .open = rdma_umap_open,
+ .close = rdma_umap_close,
+ .fault = rdma_umap_fault,
+};
+
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
{
- struct ib_uverbs_file *file = filp->private_data;
- struct ib_device *ib_dev;
- int ret = 0;
- int srcu_key;
+ struct rdma_umap_priv *priv, *next_priv;
+
+ mutex_lock(&ufile->disassociation_lock);
+
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ /* Get an arbitrary mm pointer that hasn't been cleaned yet */
+ mutex_lock(&ufile->umap_lock);
+ while (!list_empty(&ufile->umaps)) {
+ int ret;
+
+ priv = list_first_entry(&ufile->umaps,
+ struct rdma_umap_priv, list);
+ mm = priv->vma->vm_mm;
+ ret = mmget_not_zero(mm);
+ if (!ret) {
+ list_del_init(&priv->list);
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
+ mm = NULL;
+ continue;
+ }
+ break;
+ }
+ mutex_unlock(&ufile->umap_lock);
+ if (!mm) {
+ mutex_unlock(&ufile->disassociation_lock);
+ return;
+ }
- srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- ret = -EIO;
- goto out;
+ /*
+ * The umap_lock is nested under mmap_lock since it used within
+ * the vma_ops callbacks, so we have to clean the list one mm
+ * at a time to get the lock ordering right. Typically there
+ * will only be one mm, so no big deal.
+ */
+ mmap_read_lock(mm);
+ mutex_lock(&ufile->umap_lock);
+ list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
+ list) {
+ struct vm_area_struct *vma = priv->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+ list_del_init(&priv->list);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
+ }
+ mutex_unlock(&ufile->umap_lock);
+ mmap_read_unlock(mm);
+ mmput(mm);
}
- if (!file->ucontext)
- ret = -ENODEV;
- else
- ret = ib_dev->mmap(file->ucontext, vma);
-out:
- srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
- return ret;
+ mutex_unlock(&ufile->disassociation_lock);
+}
+
+/**
+ * rdma_user_mmap_disassociate() - Revoke mmaps for a device
+ * @device: device to revoke
+ *
+ * This function should be called by drivers that need to disable mmaps for the
+ * device, for instance because it is going to be reset.
+ */
+void rdma_user_mmap_disassociate(struct ib_device *device)
+{
+ struct ib_uverbs_device *uverbs_dev =
+ ib_get_client_data(device, &uverbs_client);
+ struct ib_uverbs_file *ufile;
+
+ mutex_lock(&uverbs_dev->lists_mutex);
+ list_for_each_entry(ufile, &uverbs_dev->uverbs_file_list, list) {
+ if (ufile->ucontext)
+ uverbs_user_mmap_disassociate(ufile);
+ }
+ mutex_unlock(&uverbs_dev->lists_mutex);
}
+EXPORT_SYMBOL(rdma_user_mmap_disassociate);
/*
* ib_uverbs_open() does not need the BKL:
@@ -878,9 +936,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
int srcu_key;
dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
- if (!atomic_inc_not_zero(&dev->refcount))
+ if (!refcount_inc_not_zero(&dev->refcount))
return -ENXIO;
+ get_device(&dev->dev);
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
mutex_lock(&dev->lists_mutex);
ib_dev = srcu_dereference(dev->ib_dev,
@@ -890,13 +949,18 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
goto err;
}
+ if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) {
+ ret = -EPERM;
+ goto err;
+ }
+
/* In case IB device supports disassociate ucontext, there is no hard
* dependency between uverbs device and its low level device.
*/
- module_dependent = !(ib_dev->disassociate_ucontext);
+ module_dependent = !(ib_dev->ops.disassociate_ucontext);
if (module_dependent) {
- if (!try_module_get(ib_dev->owner)) {
+ if (!try_module_get(ib_dev->ops.owner)) {
ret = -ENODEV;
goto err;
}
@@ -912,53 +976,50 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
}
file->device = dev;
- file->ucontext = NULL;
- file->async_file = NULL;
kref_init(&file->ref);
- mutex_init(&file->mutex);
+ mutex_init(&file->ucontext_lock);
+
+ spin_lock_init(&file->uobjects_lock);
+ INIT_LIST_HEAD(&file->uobjects);
+ init_rwsem(&file->hw_destroy_rwsem);
+ mutex_init(&file->umap_lock);
+ INIT_LIST_HEAD(&file->umaps);
+
+ mutex_init(&file->disassociation_lock);
filp->private_data = file;
- kobject_get(&dev->kobj);
list_add_tail(&file->list, &dev->uverbs_file_list);
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
- return nonseekable_open(inode, filp);
+ setup_ufile_idr_uobject(file);
+
+ return stream_open(inode, filp);
err_module:
- module_put(ib_dev->owner);
+ module_put(ib_dev->ops.owner);
err:
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
- if (atomic_dec_and_test(&dev->refcount))
+ if (refcount_dec_and_test(&dev->refcount))
ib_uverbs_comp_dev(dev);
+ put_device(&dev->dev);
return ret;
}
static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
- struct ib_uverbs_device *dev = file->device;
- struct ib_ucontext *ucontext = NULL;
+
+ uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
mutex_lock(&file->device->lists_mutex);
- ucontext = file->ucontext;
- file->ucontext = NULL;
- if (!file->is_closed) {
- list_del(&file->list);
- file->is_closed = 1;
- }
+ list_del_init(&file->list);
mutex_unlock(&file->device->lists_mutex);
- if (ucontext)
- ib_uverbs_cleanup_ucontext(file, ucontext);
-
- if (file->async_file)
- kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
kref_put(&file->ref, ib_uverbs_release_file);
- kobject_put(&dev->kobj);
return 0;
}
@@ -968,7 +1029,8 @@ static const struct file_operations uverbs_fops = {
.write = ib_uverbs_write,
.open = ib_uverbs_open,
.release = ib_uverbs_close,
- .llseek = no_llseek,
+ .unlocked_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static const struct file_operations uverbs_mmap_fops = {
@@ -977,239 +1039,216 @@ static const struct file_operations uverbs_mmap_fops = {
.mmap = ib_uverbs_mmap,
.open = ib_uverbs_open,
.release = ib_uverbs_close,
- .llseek = no_llseek,
+ .unlocked_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
+static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data,
+ struct ib_client_nl_info *res)
+{
+ struct ib_uverbs_device *uverbs_dev = client_data;
+ int ret;
+
+ if (res->port != -1)
+ return -EINVAL;
+
+ res->abi = ibdev->ops.uverbs_abi_ver;
+ res->cdev = &uverbs_dev->dev;
+
+ /*
+ * To support DRIVER_ID binding in userspace some of the driver need
+ * upgrading to expose their PCI dependent revision information
+ * through get_context instead of relying on modalias matching. When
+ * the drivers are fixed they can drop this flag.
+ */
+ if (!ibdev->ops.uverbs_no_driver_id_binding) {
+ ret = nla_put_u32(res->nl_msg, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID,
+ ibdev->ops.driver_id);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static struct ib_client uverbs_client = {
.name = "uverbs",
+ .no_kverbs_req = true,
.add = ib_uverbs_add_one,
- .remove = ib_uverbs_remove_one
+ .remove = ib_uverbs_remove_one,
+ .get_nl_info = ib_uverbs_get_nl_info,
};
+MODULE_ALIAS_RDMA_CLIENT("uverbs");
-static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
+static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
-
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
- ret = sprintf(buf, "%s\n", ib_dev->name);
+ ret = sysfs_emit(buf, "%s\n", dev_name(&ib_dev->dev));
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return ret;
}
-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static DEVICE_ATTR_RO(ibdev);
-static ssize_t show_dev_abi_version(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t abi_version_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
- ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
+ ret = sysfs_emit(buf, "%u\n", ib_dev->ops.uverbs_abi_ver);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return ret;
}
-static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
+static DEVICE_ATTR_RO(abi_version);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_abi_version.attr,
+ &dev_attr_ibdev.attr,
+ NULL,
+};
+
+static const struct attribute_group dev_attr_group = {
+ .attrs = ib_dev_attrs,
+};
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
-
-/*
- * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
- * requesting a new major number and doubling the number of max devices we
- * support. It's stupid, but simple.
- */
-static int find_overflow_devnum(void)
+static int ib_uverbs_create_uapi(struct ib_device *device,
+ struct ib_uverbs_device *uverbs_dev)
{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
- "infiniband_verbs");
- if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
- return ret;
- }
- }
+ struct uverbs_api *uapi;
- ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
- if (ret >= IB_UVERBS_MAX_DEVICES)
- return -1;
+ uapi = uverbs_alloc_api(device);
+ if (IS_ERR(uapi))
+ return PTR_ERR(uapi);
- return ret;
+ uverbs_dev->uapi = uapi;
+ return 0;
}
-static void ib_uverbs_add_one(struct ib_device *device)
+static int ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
dev_t base;
struct ib_uverbs_device *uverbs_dev;
int ret;
- if (!device->alloc_ucontext)
- return;
+ if (!device->ops.alloc_ucontext ||
+ device->type == RDMA_DEVICE_TYPE_SMI)
+ return -EOPNOTSUPP;
- uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
+ uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
if (!uverbs_dev)
- return;
+ return -ENOMEM;
ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
if (ret) {
kfree(uverbs_dev);
- return;
+ return -ENOMEM;
}
- atomic_set(&uverbs_dev->refcount, 1);
+ device_initialize(&uverbs_dev->dev);
+ uverbs_dev->dev.class = &uverbs_class;
+ uverbs_dev->dev.parent = device->dev.parent;
+ uverbs_dev->dev.release = ib_uverbs_release_dev;
+ uverbs_dev->groups[0] = &dev_attr_group;
+ uverbs_dev->dev.groups = uverbs_dev->groups;
+ refcount_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
- kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
mutex_init(&uverbs_dev->lists_mutex);
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
- INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
-
- spin_lock(&map_lock);
- devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES) {
- spin_unlock(&map_lock);
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- spin_lock(&map_lock);
- uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- uverbs_dev->devnum = devnum;
- base = devnum + IB_UVERBS_BASE_DEV;
- set_bit(devnum, dev_map);
- }
- spin_unlock(&map_lock);
-
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
- cdev_init(&uverbs_dev->cdev, NULL);
- uverbs_dev->cdev.owner = THIS_MODULE;
- uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
- kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
- if (cdev_add(&uverbs_dev->cdev, base, 1))
- goto err_cdev;
-
- uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
- uverbs_dev->cdev.dev, uverbs_dev,
- "uverbs%d", uverbs_dev->devnum);
- if (IS_ERR(uverbs_dev->dev))
- goto err_cdev;
-
- if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
- goto err_class;
- if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
- goto err_class;
+ devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
+ GFP_KERNEL);
+ if (devnum < 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ uverbs_dev->devnum = devnum;
+ if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
+ base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
+ else
+ base = IB_UVERBS_BASE_DEV + devnum;
- ib_set_client_data(device, &uverbs_client, uverbs_dev);
+ ret = ib_uverbs_create_uapi(device, uverbs_dev);
+ if (ret)
+ goto err_uapi;
- return;
+ uverbs_dev->dev.devt = base;
+ dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
-err_class:
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
+ cdev_init(&uverbs_dev->cdev,
+ device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
+ uverbs_dev->cdev.owner = THIS_MODULE;
-err_cdev:
- cdev_del(&uverbs_dev->cdev);
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
+ if (ret)
+ goto err_uapi;
+ ib_set_client_data(device, &uverbs_client, uverbs_dev);
+ return 0;
+
+err_uapi:
+ ida_free(&uverbs_ida, devnum);
err:
- if (atomic_dec_and_test(&uverbs_dev->refcount))
+ if (refcount_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
- return;
+ put_device(&uverbs_dev->dev);
+ return ret;
}
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_device *ib_dev)
{
struct ib_uverbs_file *file;
- struct ib_uverbs_event_file *event_file;
- struct ib_event event;
/* Pending running commands to terminate */
- synchronize_srcu(&uverbs_dev->disassociate_srcu);
- event.event = IB_EVENT_DEVICE_FATAL;
- event.element.port_num = 0;
- event.device = ib_dev;
+ uverbs_disassociate_api_pre(uverbs_dev);
mutex_lock(&uverbs_dev->lists_mutex);
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
- struct ib_ucontext *ucontext;
-
file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list);
- file->is_closed = 1;
- ucontext = file->ucontext;
- list_del(&file->list);
- file->ucontext = NULL;
+ list_del_init(&file->list);
kref_get(&file->ref);
- mutex_unlock(&uverbs_dev->lists_mutex);
+
/* We must release the mutex before going ahead and calling
- * disassociate_ucontext. disassociate_ucontext might end up
- * indirectly calling uverbs_close, for example due to freeing
- * the resources (e.g mmput).
+ * uverbs_cleanup_ufile, as it might end up indirectly calling
+ * uverbs_close, for example due to freeing the resources (e.g
+ * mmput).
*/
- ib_uverbs_event_handler(&file->event_handler, &event);
- if (ucontext) {
- ib_dev->disassociate_ucontext(ucontext);
- ib_uverbs_cleanup_ucontext(file, ucontext);
- }
+ mutex_unlock(&uverbs_dev->lists_mutex);
- mutex_lock(&uverbs_dev->lists_mutex);
+ uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
kref_put(&file->ref, ib_uverbs_release_file);
- }
- while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
- event_file = list_first_entry(&uverbs_dev->
- uverbs_events_file_list,
- struct ib_uverbs_event_file,
- list);
- spin_lock_irq(&event_file->lock);
- event_file->is_closed = 1;
- spin_unlock_irq(&event_file->lock);
-
- list_del(&event_file->list);
- if (event_file->is_async) {
- ib_unregister_event_handler(&event_file->uverbs_file->
- event_handler);
- event_file->uverbs_file->event_handler.device = NULL;
- }
-
- wake_up_interruptible(&event_file->poll_wait);
- kill_fasync(&event_file->async_queue, SIGIO, POLL_IN);
+ mutex_lock(&uverbs_dev->lists_mutex);
}
mutex_unlock(&uverbs_dev->lists_mutex);
+
+ uverbs_disassociate_api(uverbs_dev->uapi);
}
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
@@ -1217,19 +1256,10 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
struct ib_uverbs_device *uverbs_dev = client_data;
int wait_clients = 1;
- if (!uverbs_dev)
- return;
-
- dev_set_drvdata(uverbs_dev->dev, NULL);
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
- cdev_del(&uverbs_dev->cdev);
+ cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
+ ida_free(&uverbs_ida, uverbs_dev->devnum);
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(uverbs_dev->devnum, dev_map);
- else
- clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
-
- if (device->disassociate_ucontext) {
+ if (device->ops.disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
* Userspace will see a EIO errno for all future access.
* Upon returning, ib_device may be freed internally and is not
@@ -1241,64 +1271,68 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
* cdev was deleted, however active clients can still issue
* commands and close their open files.
*/
- rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
ib_uverbs_free_hw_resources(uverbs_dev, device);
wait_clients = 0;
}
- if (atomic_dec_and_test(&uverbs_dev->refcount))
+ if (refcount_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
-}
-static char *uverbs_devnode(struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+ put_device(&uverbs_dev->dev);
}
static int __init ib_uverbs_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR,
"infiniband_verbs");
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register device number\n");
+ pr_err("user_verbs: couldn't register device number\n");
goto out;
}
- uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
- if (IS_ERR(uverbs_class)) {
- ret = PTR_ERR(uverbs_class);
- printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n");
- goto out_chrdev;
+ ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
+ IB_UVERBS_NUM_DYNAMIC_MINOR,
+ "infiniband_verbs");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
}
- uverbs_class->devnode = uverbs_devnode;
+ ret = class_register(&uverbs_class);
+ if (ret) {
+ pr_err("user_verbs: couldn't create class infiniband_verbs\n");
+ goto out_chrdev;
+ }
- ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
+ ret = class_create_file(&uverbs_class, &class_attr_abi_version.attr);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
+ pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class;
}
ret = ib_register_client(&uverbs_client);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register client\n");
+ pr_err("user_verbs: couldn't register client\n");
goto out_class;
}
return 0;
out_class:
- class_destroy(uverbs_class);
+ class_unregister(&uverbs_class);
out_chrdev:
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
+
+out_alloc:
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
out:
return ret;
@@ -1307,17 +1341,13 @@ out:
static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
- class_destroy(uverbs_class);
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
- idr_destroy(&ib_uverbs_pd_idr);
- idr_destroy(&ib_uverbs_mr_idr);
- idr_destroy(&ib_uverbs_mw_idr);
- idr_destroy(&ib_uverbs_ah_idr);
- idr_destroy(&ib_uverbs_cq_idr);
- idr_destroy(&ib_uverbs_qp_idr);
- idr_destroy(&ib_uverbs_srq_idr);
+ class_unregister(&uverbs_class);
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
+ ib_cleanup_ucaps();
+ mmu_notifier_synchronize();
}
module_init(ib_uverbs_init);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index abd97247443e..e803f609ec87 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -33,26 +33,68 @@
#include <linux/export.h>
#include <rdma/ib_marshall.h>
-void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
- struct ib_ah_attr *src)
+#define OPA_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+static int rdma_ah_conv_opa_to_ib(struct ib_device *dev,
+ struct rdma_ah_attr *ib,
+ struct rdma_ah_attr *opa)
{
- memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid);
- dst->grh.flow_label = src->grh.flow_label;
- dst->grh.sgid_index = src->grh.sgid_index;
- dst->grh.hop_limit = src->grh.hop_limit;
- dst->grh.traffic_class = src->grh.traffic_class;
- memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
- dst->dlid = src->dlid;
- dst->sl = src->sl;
- dst->src_path_bits = src->src_path_bits;
- dst->static_rate = src->static_rate;
- dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
- dst->port_num = src->port_num;
+ struct ib_port_attr port_attr;
+ int ret = 0;
+
+ /* Do structure copy and the over-write fields */
+ *ib = *opa;
+
+ ib->type = RDMA_AH_ATTR_TYPE_IB;
+ rdma_ah_set_grh(ib, NULL, 0, 0, 1, 0);
+
+ if (ib_query_port(dev, opa->port_num, &port_attr)) {
+ /* Set to default subnet to indicate error */
+ rdma_ah_set_subnet_prefix(ib, OPA_DEFAULT_GID_PREFIX);
+ ret = -EINVAL;
+ } else {
+ rdma_ah_set_subnet_prefix(ib,
+ cpu_to_be64(port_attr.subnet_prefix));
+ }
+ rdma_ah_set_interface_id(ib, OPA_MAKE_ID(rdma_ah_get_dlid(opa)));
+ return ret;
+}
+
+void ib_copy_ah_attr_to_user(struct ib_device *device,
+ struct ib_uverbs_ah_attr *dst,
+ struct rdma_ah_attr *ah_attr)
+{
+ struct rdma_ah_attr *src = ah_attr;
+ struct rdma_ah_attr conv_ah;
+
+ memset(&dst->grh, 0, sizeof(dst->grh));
+
+ if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
+ (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
+ (!rdma_ah_conv_opa_to_ib(device, &conv_ah, ah_attr)))
+ src = &conv_ah;
+
+ dst->dlid = rdma_ah_get_dlid(src);
+ dst->sl = rdma_ah_get_sl(src);
+ dst->src_path_bits = rdma_ah_get_path_bits(src);
+ dst->static_rate = rdma_ah_get_static_rate(src);
+ dst->is_global = rdma_ah_get_ah_flags(src) &
+ IB_AH_GRH ? 1 : 0;
+ if (dst->is_global) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(src);
+
+ memcpy(dst->grh.dgid, grh->dgid.raw, sizeof(grh->dgid));
+ dst->grh.flow_label = grh->flow_label;
+ dst->grh.sgid_index = grh->sgid_index;
+ dst->grh.hop_limit = grh->hop_limit;
+ dst->grh.traffic_class = grh->traffic_class;
+ }
+ dst->port_num = rdma_ah_get_port_num(src);
dst->reserved = 0;
}
EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
-void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
+void ib_copy_qp_attr_to_user(struct ib_device *device,
+ struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src)
{
dst->qp_state = src->qp_state;
@@ -71,8 +113,8 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
dst->max_recv_sge = src->cap.max_recv_sge;
dst->max_inline_data = src->cap.max_inline_data;
- ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
- ib_copy_ah_attr_to_user(&dst->alt_ah_attr, &src->alt_ah_attr);
+ ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
+ ib_copy_ah_attr_to_user(device, &dst->alt_ah_attr, &src->alt_ah_attr);
dst->pkey_index = src->pkey_index;
dst->alt_pkey_index = src->alt_pkey_index;
@@ -91,15 +133,15 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
}
EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
-void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
- struct ib_sa_path_rec *src)
+static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
+ struct sa_path_rec *src)
{
- memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid);
- memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid);
+ memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
+ memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
- dst->dlid = src->dlid;
- dst->slid = src->slid;
- dst->raw_traffic = src->raw_traffic;
+ dst->dlid = htons(ntohl(sa_path_get_dlid(src)));
+ dst->slid = htons(ntohl(sa_path_get_slid(src)));
+ dst->raw_traffic = sa_path_get_raw_traffic(src);
dst->flow_label = src->flow_label;
dst->hop_limit = src->hop_limit;
dst->traffic_class = src->traffic_class;
@@ -115,34 +157,17 @@ void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
dst->preference = src->preference;
dst->packet_life_time_selector = src->packet_life_time_selector;
}
-EXPORT_SYMBOL(ib_copy_path_rec_to_user);
-void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
- struct ib_user_path_rec *src)
+void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
+ struct sa_path_rec *src)
{
- memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
- memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
+ struct sa_path_rec rec;
- dst->dlid = src->dlid;
- dst->slid = src->slid;
- dst->raw_traffic = src->raw_traffic;
- dst->flow_label = src->flow_label;
- dst->hop_limit = src->hop_limit;
- dst->traffic_class = src->traffic_class;
- dst->reversible = src->reversible;
- dst->numb_path = src->numb_path;
- dst->pkey = src->pkey;
- dst->sl = src->sl;
- dst->mtu_selector = src->mtu_selector;
- dst->mtu = src->mtu;
- dst->rate_selector = src->rate_selector;
- dst->rate = src->rate;
- dst->packet_life_time = src->packet_life_time;
- dst->preference = src->preference;
- dst->packet_life_time_selector = src->packet_life_time_selector;
-
- memset(dst->smac, 0, sizeof(dst->smac));
- memset(dst->dmac, 0, sizeof(dst->dmac));
- dst->vlan_id = 0xffff;
+ if (src->rec_type == SA_PATH_REC_TYPE_OPA) {
+ sa_convert_path_opa_to_ib(&rec, src);
+ __ib_copy_path_rec_to_user(dst, &rec);
+ return;
+ }
+ __ib_copy_path_rec_to_user(dst, src);
}
-EXPORT_SYMBOL(ib_copy_path_rec_from_user);
+EXPORT_SYMBOL(ib_copy_path_rec_to_user);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
new file mode 100644
index 000000000000..13776a66e2e4
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <linux/bug.h>
+#include <linux/file.h>
+#include <rdma/restrack.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_ah(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ return rdma_destroy_ah_user((struct ib_ah *)uobject->object,
+ RDMA_DESTROY_AH_SLEEPABLE,
+ &attrs->driver_udata);
+}
+
+static int uverbs_free_flow(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_flow *flow = (struct ib_flow *)uobject->object;
+ struct ib_uflow_object *uflow =
+ container_of(uobject, struct ib_uflow_object, uobject);
+ struct ib_qp *qp = flow->qp;
+ int ret;
+
+ ret = flow->device->ops.destroy_flow(flow);
+ if (!ret) {
+ if (qp)
+ atomic_dec(&qp->usecnt);
+ ib_uverbs_flow_resources_free(uflow->resources);
+ }
+
+ return ret;
+}
+
+static int uverbs_free_mw(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
+}
+
+static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
+ struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
+ u32 table_size = (1 << rwq_ind_tbl->log_ind_tbl_size);
+ int ret, i;
+
+ if (atomic_read(&rwq_ind_tbl->usecnt))
+ return -EBUSY;
+
+ ret = rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < table_size; i++)
+ atomic_dec(&ind_tbl[i]->usecnt);
+
+ kfree(rwq_ind_tbl);
+ kfree(ind_tbl);
+ return 0;
+}
+
+static int uverbs_free_xrcd(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_xrcd *xrcd = uobject->object;
+ struct ib_uxrcd_object *uxrcd =
+ container_of(uobject, struct ib_uxrcd_object, uobject);
+ int ret;
+
+ if (atomic_read(&uxrcd->refcnt))
+ return -EBUSY;
+
+ mutex_lock(&attrs->ufile->device->xrcd_tree_mutex);
+ ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, attrs);
+ mutex_unlock(&attrs->ufile->device->xrcd_tree_mutex);
+
+ return ret;
+}
+
+static int uverbs_free_pd(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_pd *pd = uobject->object;
+
+ if (atomic_read(&pd->usecnt))
+ return -EBUSY;
+
+ return ib_dealloc_pd_user(pd, &attrs->driver_udata);
+}
+
+void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
+{
+ struct ib_uverbs_event *entry, *tmp;
+
+ spin_lock_irq(&event_queue->lock);
+ /*
+ * The user must ensure that no new items are added to the event_list
+ * once is_closed is set.
+ */
+ event_queue->is_closed = 1;
+ spin_unlock_irq(&event_queue->lock);
+ wake_up_interruptible(&event_queue->poll_wait);
+ kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
+
+ spin_lock_irq(&event_queue->lock);
+ list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
+ if (entry->counter)
+ list_del(&entry->obj_list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_irq(&event_queue->lock);
+}
+
+static void
+uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct ib_uverbs_completion_event_file *file =
+ container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
+
+ ib_uverbs_free_event_queue(&file->ev_queue);
+}
+
+int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
+{
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_destroy_def_handler);
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
+ uverbs_completion_event_file_destroy_uobj,
+ &uverbs_event_fops,
+ "[infinibandevent]",
+ O_RDONLY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_MW_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE,
+ UVERBS_OBJECT_MW,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw),
+ &UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_AH_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE,
+ UVERBS_OBJECT_AH,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah),
+ &UVERBS_METHOD(UVERBS_METHOD_AH_DESTROY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_FLOW_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_FLOW,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
+ uverbs_free_flow),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_RWQ_IND_TBL_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl),
+ &UVERBS_METHOD(UVERBS_METHOD_RWQ_IND_TBL_DESTROY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_XRCD_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_XRCD,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object),
+ uverbs_free_xrcd),
+ &UVERBS_METHOD(UVERBS_METHOD_XRCD_DESTROY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_PD_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd),
+ &UVERBS_METHOD(UVERBS_METHOD_PD_DESTROY));
+
+const struct uapi_definition uverbs_def_obj_intf[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_PD,
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL,
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW,
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_XRCD,
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_async_fd.c b/drivers/infiniband/core/uverbs_std_types_async_fd.c
new file mode 100644
index 000000000000..cc24cfdf7aee
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_async_fd.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int UVERBS_HANDLER(UVERBS_METHOD_ASYNC_EVENT_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_METHOD_ASYNC_EVENT_ALLOC);
+
+ ib_uverbs_init_async_event_file(
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj));
+ return 0;
+}
+
+static void uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct ib_uverbs_async_event_file *event_file =
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+ ib_unregister_event_handler(&event_file->event_handler);
+
+ if (why == RDMA_REMOVE_DRIVER_REMOVE)
+ ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL,
+ NULL, NULL);
+}
+
+int uverbs_async_event_release(struct inode *inode, struct file *filp)
+{
+ struct ib_uverbs_async_event_file *event_file;
+ struct ib_uobject *uobj = filp->private_data;
+ int ret;
+
+ if (!uobj)
+ return uverbs_uobject_fd_release(inode, filp);
+
+ event_file =
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+ /*
+ * The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after
+ * disassociation, so cleaning the event list must only happen after
+ * release. The user knows it has reached the end of the event stream
+ * when it sees IB_EVENT_DEVICE_FATAL.
+ */
+ uverbs_uobject_get(uobj);
+ ret = uverbs_uobject_fd_release(inode, filp);
+ ib_uverbs_free_event_queue(&event_file->ev_queue);
+ uverbs_uobject_put(uobj);
+ return ret;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_ASYNC_EVENT_ALLOC,
+ UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_async_event_file),
+ uverbs_async_event_destroy_uobj,
+ &uverbs_async_event_fops,
+ "[infinibandevent]",
+ O_RDONLY),
+ &UVERBS_METHOD(UVERBS_METHOD_ASYNC_EVENT_ALLOC));
+
+const struct uapi_definition uverbs_def_obj_async_fd[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_ASYNC_EVENT),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
new file mode 100644
index 000000000000..381aa5797641
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rdma_core.h"
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
+
+static int uverbs_free_counters(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_counters *counters = uobject->object;
+ int ret;
+
+ if (atomic_read(&counters->usecnt))
+ return -EBUSY;
+
+ ret = counters->device->ops.destroy_counters(counters);
+ if (ret)
+ return ret;
+ kfree(counters);
+ return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
+ struct ib_device *ib_dev = attrs->context->device;
+ struct ib_counters *counters;
+ int ret;
+
+ /*
+ * This check should be removed once the infrastructure
+ * have the ability to remove methods from parse tree once
+ * such condition is met.
+ */
+ if (!ib_dev->ops.create_counters)
+ return -EOPNOTSUPP;
+
+ counters = rdma_zalloc_drv_obj(ib_dev, ib_counters);
+ if (!counters)
+ return -ENOMEM;
+
+ counters->device = ib_dev;
+ counters->uobject = uobj;
+ uobj->object = counters;
+ atomic_set(&counters->usecnt, 0);
+
+ ret = ib_dev->ops.create_counters(counters, attrs);
+ if (ret)
+ kfree(counters);
+
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_counters_read_attr read_attr = {};
+ const struct uverbs_attr *uattr;
+ struct ib_counters *counters =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
+ int ret;
+
+ if (!counters->device->ops.read_counters)
+ return -EOPNOTSUPP;
+
+ if (!atomic_read(&counters->usecnt))
+ return -EINVAL;
+
+ ret = uverbs_get_flags32(&read_attr.flags, attrs,
+ UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ IB_UVERBS_READ_COUNTERS_PREFER_CACHED);
+ if (ret)
+ return ret;
+
+ uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
+ if (IS_ERR(uattr))
+ return PTR_ERR(uattr);
+ read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
+ read_attr.counters_buff = uverbs_zalloc(
+ attrs, array_size(read_attr.ncounters, sizeof(u64)));
+ if (IS_ERR(read_attr.counters_buff))
+ return PTR_ERR(read_attr.counters_buff);
+
+ ret = counters->device->ops.read_counters(counters, &read_attr, attrs);
+ if (ret)
+ return ret;
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
+ read_attr.counters_buff,
+ read_attr.ncounters * sizeof(u64));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_COUNTERS_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_COUNTERS_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_COUNTERS_READ,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
+ UVERBS_ATTR_MIN_SIZE(0),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ enum ib_uverbs_read_counters_flags));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_counters),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ));
+
+const struct uapi_definition uverbs_def_obj_counters[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COUNTERS,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_counters)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
new file mode 100644
index 000000000000..37cd37556510
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+#include "restrack.h"
+
+static int uverbs_free_cq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_cq *cq = uobject->object;
+ struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
+ struct ib_ucq_object *ucq =
+ container_of(uobject, struct ib_ucq_object, uevent.uobject);
+ int ret;
+
+ ret = ib_destroy_cq_user(cq, &attrs->driver_udata);
+ if (ret)
+ return ret;
+
+ ib_uverbs_release_ucq(
+ ev_queue ? container_of(ev_queue,
+ struct ib_uverbs_completion_event_file,
+ ev_queue) :
+ NULL,
+ ucq);
+ return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_ucq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_uverbs_completion_event_file *ev_file = NULL;
+ struct ib_device *ib_dev = attrs->context->device;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_cq_init_attr attr = {};
+ struct ib_uobject *ev_file_uobj;
+ struct ib_umem *umem = NULL;
+ u64 buffer_length;
+ u64 buffer_offset;
+ struct ib_cq *cq;
+ u64 user_handle;
+ u64 buffer_va;
+ int buffer_fd;
+ int ret;
+
+ if ((!ib_dev->ops.create_cq && !ib_dev->ops.create_cq_umem) || !ib_dev->ops.destroy_cq)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_copy_from(&attr.comp_vector, attrs,
+ UVERBS_ATTR_CREATE_CQ_COMP_VECTOR);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.cqe, attrs,
+ UVERBS_ATTR_CREATE_CQ_CQE);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_CQ_USER_HANDLE);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_flags32(&attr.flags, attrs,
+ UVERBS_ATTR_CREATE_CQ_FLAGS,
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION |
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN);
+ if (ret)
+ return ret;
+
+ ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
+ if (!IS_ERR(ev_file_uobj)) {
+ ev_file = container_of(ev_file_uobj,
+ struct ib_uverbs_completion_event_file,
+ uobj);
+ uverbs_uobject_get(ev_file_uobj);
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(
+ attrs, UVERBS_ATTR_CREATE_CQ_EVENT_FD);
+
+ if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
+
+ INIT_LIST_HEAD(&obj->comp_list);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA)) {
+
+ ret = uverbs_copy_from(&buffer_va, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA);
+ if (ret)
+ goto err_event_file;
+
+ ret = uverbs_copy_from(&buffer_length, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
+ if (ret)
+ goto err_event_file;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD) ||
+ uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
+ !ib_dev->ops.create_cq_umem) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
+
+ umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ ret = PTR_ERR(umem);
+ goto err_event_file;
+ }
+ } else if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD)) {
+
+ ret = uverbs_get_raw_fd(&buffer_fd, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD);
+ if (ret)
+ goto err_event_file;
+
+ ret = uverbs_copy_from(&buffer_offset, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET);
+ if (ret)
+ goto err_event_file;
+
+ ret = uverbs_copy_from(&buffer_length, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
+ if (ret)
+ goto err_event_file;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA) ||
+ !ib_dev->ops.create_cq_umem) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ib_dev, buffer_offset, buffer_length,
+ buffer_fd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem_dmabuf)) {
+ ret = PTR_ERR(umem_dmabuf);
+ goto err_event_file;
+ }
+ umem = &umem_dmabuf->umem;
+ } else if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
+ uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH) ||
+ !ib_dev->ops.create_cq) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
+
+ cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
+ if (!cq) {
+ ret = -ENOMEM;
+ ib_umem_release(umem);
+ goto err_event_file;
+ }
+
+ cq->device = ib_dev;
+ cq->uobject = obj;
+ cq->comp_handler = ib_uverbs_comp_handler;
+ cq->event_handler = ib_uverbs_cq_event_handler;
+ cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
+ atomic_set(&cq->usecnt, 0);
+
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, NULL);
+
+ ret = umem ? ib_dev->ops.create_cq_umem(cq, &attr, umem, attrs) :
+ ib_dev->ops.create_cq(cq, &attr, attrs);
+ if (ret)
+ goto err_free;
+
+ obj->uevent.uobject.object = cq;
+ obj->uevent.uobject.user_handle = user_handle;
+ rdma_restrack_add(&cq->res);
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
+ sizeof(cq->cqe));
+ return ret;
+
+err_free:
+ rdma_restrack_put(&cq->res);
+ kfree(cq);
+err_event_file:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ if (ev_file)
+ uverbs_uobject_put(ev_file_uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_CQ_FLAGS,
+ enum ib_uverbs_ex_create_cq_flags),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_BUFFER_VA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_RAW_FD(UVERBS_ATTR_CREATE_CQ_BUFFER_FD,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
+ struct ib_ucq_object *obj =
+ container_of(uobj, struct ib_ucq_object, uevent.uobject);
+ struct ib_uverbs_destroy_cq_resp resp = {
+ .comp_events_reported = obj->comp_events_reported,
+ .async_events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_CQ_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_CQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_CQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), uverbs_free_cq),
+ &UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_cq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_CQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_cq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
new file mode 100644
index 000000000000..c0fd283d9d6c
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/overflow.h>
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/opa_addr.h>
+#include <rdma/ib_cache.h>
+
+/*
+ * This ioctl method allows calling any defined write or write_ex
+ * handler. This essentially replaces the hdr/ex_hdr system with the ioctl
+ * marshalling, and brings the non-ex path into the same marshalling as the ex
+ * path.
+ */
+static int UVERBS_HANDLER(UVERBS_METHOD_INVOKE_WRITE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct uverbs_api *uapi = attrs->ufile->device->uapi;
+ const struct uverbs_api_write_method *method_elm;
+ u32 cmd;
+ int rc;
+
+ rc = uverbs_get_const(&cmd, attrs, UVERBS_ATTR_WRITE_CMD);
+ if (rc)
+ return rc;
+
+ method_elm = uapi_get_method(uapi, cmd);
+ if (IS_ERR(method_elm))
+ return PTR_ERR(method_elm);
+
+ uverbs_fill_udata(attrs, &attrs->ucore, UVERBS_ATTR_CORE_IN,
+ UVERBS_ATTR_CORE_OUT);
+
+ if (attrs->ucore.inlen < method_elm->req_size ||
+ attrs->ucore.outlen < method_elm->resp_size)
+ return -ENOSPC;
+
+ attrs->uobject = NULL;
+ rc = method_elm->handler(attrs);
+ if (attrs->uobject)
+ uverbs_finalize_object(attrs->uobject, UVERBS_ACCESS_NEW, true,
+ !rc, attrs);
+ return rc;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_INVOKE_WRITE,
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_WRITE_CMD,
+ enum ib_uverbs_write_cmds,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CORE_IN,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CORE_OUT,
+ UVERBS_ATTR_MIN_SIZE(0),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static uint32_t *
+gather_objects_handle(struct ib_uverbs_file *ufile,
+ const struct uverbs_api_object *uapi_object,
+ struct uverbs_attr_bundle *attrs,
+ ssize_t out_len,
+ u64 *total)
+{
+ u64 max_count = out_len / sizeof(u32);
+ struct ib_uobject *obj;
+ u64 count = 0;
+ u32 *handles;
+
+ /* Allocated memory that cannot page out where we gather
+ * all object ids under a spin_lock.
+ */
+ handles = uverbs_zalloc(attrs, out_len);
+ if (IS_ERR(handles))
+ return handles;
+
+ spin_lock_irq(&ufile->uobjects_lock);
+ list_for_each_entry(obj, &ufile->uobjects, list) {
+ u32 obj_id = obj->id;
+
+ if (obj->uapi_object != uapi_object)
+ continue;
+
+ if (count >= max_count)
+ break;
+
+ handles[count] = obj_id;
+ count++;
+ }
+ spin_unlock_irq(&ufile->uobjects_lock);
+
+ *total = count;
+ return handles;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
+ struct uverbs_attr_bundle *attrs)
+{
+ const struct uverbs_api_object *uapi_object;
+ ssize_t out_len;
+ u64 total = 0;
+ u16 object_id;
+ u32 *handles;
+ int ret;
+
+ out_len = uverbs_attr_get_len(attrs, UVERBS_ATTR_INFO_HANDLES_LIST);
+ if (out_len <= 0 || (out_len % sizeof(u32) != 0))
+ return -EINVAL;
+
+ ret = uverbs_get_const(&object_id, attrs, UVERBS_ATTR_INFO_OBJECT_ID);
+ if (ret)
+ return ret;
+
+ uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
+ if (IS_ERR(uapi_object))
+ return PTR_ERR(uapi_object);
+
+ handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
+ out_len, &total);
+ if (IS_ERR(handles))
+ return PTR_ERR(handles);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_HANDLES_LIST, handles,
+ sizeof(u32) * total);
+ if (ret)
+ goto err;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_TOTAL_HANDLES, &total,
+ sizeof(total));
+err:
+ return ret;
+}
+
+void copy_port_attr_to_resp(struct ib_port_attr *attr,
+ struct ib_uverbs_query_port_resp *resp,
+ struct ib_device *ib_dev, u8 port_num)
+{
+ resp->state = attr->state;
+ resp->max_mtu = attr->max_mtu;
+ resp->active_mtu = attr->active_mtu;
+ resp->gid_tbl_len = attr->gid_tbl_len;
+ resp->port_cap_flags = make_port_cap_flags(attr);
+ resp->max_msg_sz = attr->max_msg_sz;
+ resp->bad_pkey_cntr = attr->bad_pkey_cntr;
+ resp->qkey_viol_cntr = attr->qkey_viol_cntr;
+ resp->pkey_tbl_len = attr->pkey_tbl_len;
+
+ if (rdma_is_grh_required(ib_dev, port_num))
+ resp->flags |= IB_UVERBS_QPF_GRH_REQUIRED;
+
+ if (rdma_cap_opa_ah(ib_dev, port_num)) {
+ resp->lid = OPA_TO_IB_UCAST_LID(attr->lid);
+ resp->sm_lid = OPA_TO_IB_UCAST_LID(attr->sm_lid);
+ } else {
+ resp->lid = ib_lid_cpu16(attr->lid);
+ resp->sm_lid = ib_lid_cpu16(attr->sm_lid);
+ }
+
+ resp->lmc = attr->lmc;
+ resp->max_vl_num = attr->max_vl_num;
+ resp->sm_sl = attr->sm_sl;
+ resp->subnet_timeout = attr->subnet_timeout;
+ resp->init_type_reply = attr->init_type_reply;
+ resp->active_width = attr->active_width;
+ /* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */
+ resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR);
+ resp->phys_state = attr->phys_state;
+ resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_device *ib_dev;
+ struct ib_port_attr attr = {};
+ struct ib_uverbs_query_port_resp_ex resp = {};
+ struct ib_ucontext *ucontext;
+ int ret;
+ u8 port_num;
+
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
+ /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
+ if (!ib_dev->ops.query_port)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_get_const(&port_num, attrs,
+ UVERBS_ATTR_QUERY_PORT_PORT_NUM);
+ if (ret)
+ return ret;
+
+ ret = ib_query_port(ib_dev, port_num, &attr);
+ if (ret)
+ return ret;
+
+ copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num);
+ resp.port_cap_flags2 = attr.port_cap_flags2;
+ resp.active_speed_ex = attr.active_speed;
+
+ return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP,
+ &resp, sizeof(resp));
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ u32 num_comp = attrs->ufile->device->num_comp_vectors;
+ u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
+ int ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+ &num_comp, sizeof(num_comp));
+ if (IS_UVERBS_COPY_ERR(ret))
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+ &core_support, sizeof(core_support));
+ if (IS_UVERBS_COPY_ERR(ret))
+ return ret;
+
+ ret = ib_alloc_ucontext(attrs);
+ if (ret)
+ return ret;
+ ret = ib_init_ucontext(attrs);
+ if (ret) {
+ kfree(attrs->context);
+ attrs->context = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
+ u32 num_comp;
+ int ret;
+
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
+ if (!ib_dev->ops.query_ucontext)
+ return -EOPNOTSUPP;
+
+ num_comp = attrs->ufile->device->num_comp_vectors;
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
+ &num_comp, sizeof(num_comp));
+ if (IS_UVERBS_COPY_ERR(ret))
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
+ &core_support, sizeof(core_support));
+ if (IS_UVERBS_COPY_ERR(ret))
+ return ret;
+
+ return ucontext->device->ops.query_ucontext(ucontext, attrs);
+}
+
+static int copy_gid_entries_to_user(struct uverbs_attr_bundle *attrs,
+ struct ib_uverbs_gid_entry *entries,
+ size_t num_entries, size_t user_entry_size)
+{
+ const struct uverbs_attr *attr;
+ void __user *user_entries;
+ size_t copy_len;
+ int ret;
+ int i;
+
+ if (user_entry_size == sizeof(*entries)) {
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ entries, sizeof(*entries) * num_entries);
+ return ret;
+ }
+
+ copy_len = min_t(size_t, user_entry_size, sizeof(*entries));
+ attr = uverbs_attr_get(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ user_entries = u64_to_user_ptr(attr->ptr_attr.data);
+ for (i = 0; i < num_entries; i++) {
+ if (copy_to_user(user_entries, entries, copy_len))
+ return -EFAULT;
+
+ if (user_entry_size > sizeof(*entries)) {
+ if (clear_user(user_entries + sizeof(*entries),
+ user_entry_size - sizeof(*entries)))
+ return -EFAULT;
+ }
+
+ entries++;
+ user_entries += user_entry_size;
+ }
+
+ return uverbs_output_written(attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_gid_entry *entries;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
+ size_t user_entry_size;
+ ssize_t num_entries;
+ int max_entries;
+ u32 flags;
+ int ret;
+
+ ret = uverbs_get_flags32(&flags, attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, 0);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&user_entry_size, attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE);
+ if (ret)
+ return ret;
+
+ if (!user_entry_size)
+ return -EINVAL;
+
+ max_entries = uverbs_attr_ptr_get_array_size(
+ attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ user_entry_size);
+ if (max_entries <= 0)
+ return max_entries ?: -EINVAL;
+
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
+ entries = uverbs_kcalloc(attrs, max_entries, sizeof(*entries));
+ if (IS_ERR(entries))
+ return PTR_ERR(entries);
+
+ num_entries = rdma_query_gid_table(ib_dev, entries, max_entries);
+ if (num_entries < 0)
+ return -EINVAL;
+
+ ret = copy_gid_entries_to_user(attrs, entries, num_entries,
+ user_entry_size);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
+ &num_entries, sizeof(num_entries));
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_gid_entry entry = {};
+ const struct ib_gid_attr *gid_attr;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
+ struct net_device *ndev;
+ u32 gid_index;
+ u32 port_num;
+ u32 flags;
+ int ret;
+
+ ret = uverbs_get_flags32(&flags, attrs,
+ UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, 0);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&port_num, attrs,
+ UVERBS_ATTR_QUERY_GID_ENTRY_PORT);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&gid_index, attrs,
+ UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX);
+ if (ret)
+ return ret;
+
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
+ if (!rdma_is_port_valid(ib_dev, port_num))
+ return -EINVAL;
+
+ gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+
+ memcpy(&entry.gid, &gid_attr->gid, sizeof(gid_attr->gid));
+ entry.gid_index = gid_attr->index;
+ entry.port_num = gid_attr->port_num;
+ entry.gid_type = gid_attr->gid_type;
+
+ rcu_read_lock();
+ ndev = rdma_read_gid_attr_ndev_rcu(gid_attr);
+ if (IS_ERR(ndev)) {
+ if (PTR_ERR(ndev) != -ENODEV) {
+ ret = PTR_ERR(ndev);
+ rcu_read_unlock();
+ goto out;
+ }
+ } else {
+ entry.netdev_ifindex = ndev->ifindex;
+ }
+ rcu_read_unlock();
+
+ ret = uverbs_copy_to_struct_or_zero(
+ attrs, UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, &entry,
+ sizeof(entry));
+out:
+ rdma_put_gid_attr(gid_attr);
+ return ret;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_GET_CONTEXT,
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+ UVERBS_ATTR_TYPE(u64), UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_GET_CONTEXT_FD_ARR,
+ UVERBS_ATTR_MIN_SIZE(sizeof(int)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_UHW());
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QUERY_CONTEXT,
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
+ UVERBS_ATTR_TYPE(u64), UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_INFO_HANDLES,
+ /* Also includes any device specific object ids */
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_INFO_OBJECT_ID,
+ enum uverbs_default_objects, UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_TOTAL_HANDLES,
+ UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_HANDLES_LIST,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)), UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QUERY_PORT,
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_PORT_PORT_NUM, u8, UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(
+ UVERBS_ATTR_QUERY_PORT_RESP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex,
+ active_speed_ex),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QUERY_GID_TABLE,
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, u64,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, u32,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QUERY_GID_ENTRY,
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_PORT, u32,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, u32,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, u32,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_gid_entry,
+ netdev_ifindex),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
+ &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
+ &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
+ &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT),
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT),
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_TABLE),
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_ENTRY));
+
+const struct uapi_definition uverbs_def_obj_device[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE),
+ {},
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c
new file mode 100644
index 000000000000..98c522cf86d6
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_dm.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rdma_core.h"
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
+
+static int uverbs_free_dm(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dm *dm = uobject->object;
+
+ if (atomic_read(&dm->usecnt))
+ return -EBUSY;
+
+ return dm->device->ops.dealloc_dm(dm, attrs);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dm_alloc_attr attr = {};
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE)
+ ->obj_attr.uobject;
+ struct ib_device *ib_dev = attrs->context->device;
+ struct ib_dm *dm;
+ int ret;
+
+ if (!ib_dev->ops.alloc_dm)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_copy_from(&attr.length, attrs,
+ UVERBS_ATTR_ALLOC_DM_LENGTH);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&attr.alignment, attrs,
+ UVERBS_ATTR_ALLOC_DM_ALIGNMENT);
+ if (ret)
+ return ret;
+
+ dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs);
+ if (IS_ERR(dm))
+ return PTR_ERR(dm);
+
+ dm->device = ib_dev;
+ dm->length = attr.length;
+ dm->uobject = uobj;
+ atomic_set(&dm->usecnt, 0);
+
+ uobj->object = dm;
+
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_DM_FREE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DM,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_dm),
+ &UVERBS_METHOD(UVERBS_METHOD_DM_ALLOC),
+ &UVERBS_METHOD(UVERBS_METHOD_DM_FREE));
+
+const struct uapi_definition uverbs_def_obj_dm[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM,
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_dm)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_dmah.c b/drivers/infiniband/core/uverbs_std_types_dmah.c
new file mode 100644
index 000000000000..453ce656c6f2
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_dmah.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include "rdma_core.h"
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
+#include "restrack.h"
+
+static int uverbs_free_dmah(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dmah *dmah = uobject->object;
+ int ret;
+
+ if (atomic_read(&dmah->usecnt))
+ return -EBUSY;
+
+ ret = dmah->device->ops.dealloc_dmah(dmah, attrs);
+ if (ret)
+ return ret;
+
+ rdma_restrack_del(&dmah->res);
+ kfree(dmah);
+ return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_DMAH_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DMAH_HANDLE)
+ ->obj_attr.uobject;
+ struct ib_device *ib_dev = attrs->context->device;
+ struct ib_dmah *dmah;
+ int ret;
+
+ dmah = rdma_zalloc_drv_obj(ib_dev, ib_dmah);
+ if (!dmah)
+ return -ENOMEM;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_ALLOC_DMAH_CPU_ID)) {
+ ret = uverbs_copy_from(&dmah->cpu_id, attrs,
+ UVERBS_ATTR_ALLOC_DMAH_CPU_ID);
+ if (ret)
+ goto err;
+
+ if (!cpumask_test_cpu(dmah->cpu_id, current->cpus_ptr)) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ dmah->valid_fields |= BIT(IB_DMAH_CPU_ID_EXISTS);
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE)) {
+ dmah->mem_type = uverbs_attr_get_enum_id(attrs,
+ UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE);
+ dmah->valid_fields |= BIT(IB_DMAH_MEM_TYPE_EXISTS);
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_ALLOC_DMAH_PH)) {
+ ret = uverbs_copy_from(&dmah->ph, attrs,
+ UVERBS_ATTR_ALLOC_DMAH_PH);
+ if (ret)
+ goto err;
+
+ /* Per PCIe spec 6.2-1.0, only the lowest two bits are applicable */
+ if (dmah->ph & 0xFC) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dmah->valid_fields |= BIT(IB_DMAH_PH_EXISTS);
+ }
+
+ dmah->device = ib_dev;
+ dmah->uobject = uobj;
+ atomic_set(&dmah->usecnt, 0);
+
+ rdma_restrack_new(&dmah->res, RDMA_RESTRACK_DMAH);
+ rdma_restrack_set_name(&dmah->res, NULL);
+
+ ret = ib_dev->ops.alloc_dmah(dmah, attrs);
+ if (ret) {
+ rdma_restrack_put(&dmah->res);
+ goto err;
+ }
+
+ uobj->object = dmah;
+ rdma_restrack_add(&dmah->res);
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_ALLOC_DMAH_HANDLE);
+ return 0;
+err:
+ kfree(dmah);
+ return ret;
+}
+
+static const struct uverbs_attr_spec uverbs_dmah_mem_type[] = {
+ [TPH_MEM_TYPE_VM] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [TPH_MEM_TYPE_PM] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DMAH_ALLOC,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DMAH_HANDLE,
+ UVERBS_OBJECT_DMAH,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DMAH_CPU_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE,
+ uverbs_dmah_mem_type,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DMAH_PH,
+ UVERBS_ATTR_TYPE(u8),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_DMAH_FREE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DMA_HANDLE,
+ UVERBS_OBJECT_DMAH,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DMAH,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_dmah),
+ &UVERBS_METHOD(UVERBS_METHOD_DMAH_ALLOC),
+ &UVERBS_METHOD(UVERBS_METHOD_DMAH_FREE));
+
+const struct uapi_definition uverbs_def_obj_dmah[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DMAH,
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_dmah),
+ UAPI_DEF_OBJ_NEEDS_FN(alloc_dmah)),
+ {}
+};
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index a277c31fcaf7..0ddcf6da66c4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_user.h
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -29,46 +29,38 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __IWCH_USER_H__
-#define __IWCH_USER_H__
-#define IWCH_UVERBS_ABI_VERSION 1
+#include "rdma_core.h"
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-struct iwch_create_cq_req {
- __u64 user_rptr_addr;
-};
+static int uverbs_free_flow_action(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_flow_action *action = uobject->object;
-struct iwch_create_cq_resp_v0 {
- __u64 key;
- __u32 cqid;
- __u32 size_log2;
-};
+ if (atomic_read(&action->usecnt))
+ return -EBUSY;
-struct iwch_create_cq_resp {
- __u64 key;
- __u32 cqid;
- __u32 size_log2;
- __u32 memsize;
- __u32 reserved;
-};
+ return action->device->ops.destroy_flow_action(action);
+}
-struct iwch_create_qp_resp {
- __u64 key;
- __u64 db_key;
- __u32 qpid;
- __u32 size_log2;
- __u32 sq_size_log2;
- __u32 rq_size_log2;
-};
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_FLOW_ACTION_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY));
-struct iwch_reg_user_mr_resp {
- __u32 pbl_addr;
+const struct uapi_definition uverbs_def_obj_flow_action[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ UVERBS_OBJECT_FLOW_ACTION,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_flow_action)),
+ {}
};
-#endif
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
new file mode 100644
index 000000000000..570b9656801d
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rdma_core.h"
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
+#include "restrack.h"
+
+static int uverbs_free_mr(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ return ib_dereg_mr_user((struct ib_mr *)uobject->object,
+ &attrs->driver_udata);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_ADVISE_MR_PD_HANDLE);
+ enum ib_uverbs_advise_mr_advice advice;
+ struct ib_device *ib_dev = pd->device;
+ struct ib_sge *sg_list;
+ int num_sge;
+ u32 flags;
+ int ret;
+
+ /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
+ if (!ib_dev->ops.advise_mr)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_get_const(&advice, attrs, UVERBS_ATTR_ADVISE_MR_ADVICE);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_flags32(&flags, attrs, UVERBS_ATTR_ADVISE_MR_FLAGS,
+ IB_UVERBS_ADVISE_MR_FLAG_FLUSH);
+ if (ret)
+ return ret;
+
+ num_sge = uverbs_attr_ptr_get_array_size(
+ attrs, UVERBS_ATTR_ADVISE_MR_SGE_LIST, sizeof(struct ib_sge));
+ if (num_sge <= 0)
+ return num_sge;
+
+ sg_list = uverbs_attr_get_alloced_ptr(attrs,
+ UVERBS_ATTR_ADVISE_MR_SGE_LIST);
+ return ib_dev->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
+ attrs);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dm_mr_attr attr = {};
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
+ struct ib_dm *dm =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_DM_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_PD_HANDLE);
+ struct ib_device *ib_dev = pd->device;
+
+ struct ib_mr *mr;
+ int ret;
+
+ if (!ib_dev->ops.reg_dm_mr)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&attr.length, attrs,
+ UVERBS_ATTR_REG_DM_MR_LENGTH);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_flags32(&attr.access_flags, attrs,
+ UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ IB_ACCESS_SUPPORTED);
+ if (ret)
+ return ret;
+
+ if (!(attr.access_flags & IB_ZERO_BASED))
+ return -EINVAL;
+
+ ret = ib_check_mr_access(ib_dev, attr.access_flags);
+ if (ret)
+ return ret;
+
+ if (attr.offset > dm->length || attr.length > dm->length ||
+ attr.length > dm->length - attr.offset)
+ return -EINVAL;
+
+ mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->type = IB_MR_TYPE_DM;
+ mr->dm = dm;
+ mr->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&dm->usecnt);
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&mr->res, NULL);
+ rdma_restrack_add(&mr->res);
+ uobj->object = mr;
+
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_LKEY, &mr->lkey,
+ sizeof(mr->lkey));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
+ &mr->rkey, sizeof(mr->rkey));
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_MR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_mr *mr =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_QUERY_MR_HANDLE);
+ int ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_LKEY, &mr->lkey,
+ sizeof(mr->lkey));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_RKEY,
+ &mr->rkey, sizeof(mr->rkey));
+
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_LENGTH,
+ &mr->length, sizeof(mr->length));
+
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_IOVA,
+ &mr->iova, sizeof(mr->iova));
+
+ return IS_UVERBS_COPY_ERR(ret) ? ret : 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE);
+ struct ib_device *ib_dev = pd->device;
+
+ u64 offset, length, iova;
+ u32 fd, access_flags;
+ struct ib_mr *mr;
+ int ret;
+
+ if (!ib_dev->ops.reg_user_mr_dmabuf)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_copy_from(&offset, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_OFFSET);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&length, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_LENGTH);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&iova, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_IOVA);
+ if (ret)
+ return ret;
+
+ if ((offset & ~PAGE_MASK) != (iova & ~PAGE_MASK))
+ return -EINVAL;
+
+ ret = uverbs_copy_from(&fd, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_FD);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_flags32(&access_flags, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (ret)
+ return ret;
+
+ ret = ib_check_mr_access(ib_dev, access_flags);
+ if (ret)
+ return ret;
+
+ mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
+ access_flags, NULL,
+ attrs);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->type = IB_MR_TYPE_USER;
+ mr->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&mr->res, NULL);
+ rdma_restrack_add(&mr->res);
+ uobj->object = mr;
+
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ &mr->lkey, sizeof(mr->lkey));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+ &mr->rkey, sizeof(mr->rkey));
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_REG_MR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_MR_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_MR_PD_HANDLE);
+ u32 valid_access_flags = IB_ACCESS_SUPPORTED;
+ u64 length, iova, fd_offset = 0, addr = 0;
+ struct ib_device *ib_dev = pd->device;
+ struct ib_dmah *dmah = NULL;
+ bool has_fd_offset = false;
+ bool has_addr = false;
+ bool has_fd = false;
+ u32 access_flags;
+ struct ib_mr *mr;
+ int fd;
+ int ret;
+
+ ret = uverbs_copy_from(&iova, attrs, UVERBS_ATTR_REG_MR_IOVA);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&length, attrs, UVERBS_ATTR_REG_MR_LENGTH);
+ if (ret)
+ return ret;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_ADDR)) {
+ ret = uverbs_copy_from(&addr, attrs,
+ UVERBS_ATTR_REG_MR_ADDR);
+ if (ret)
+ return ret;
+ has_addr = true;
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_FD_OFFSET)) {
+ ret = uverbs_copy_from(&fd_offset, attrs,
+ UVERBS_ATTR_REG_MR_FD_OFFSET);
+ if (ret)
+ return ret;
+ has_fd_offset = true;
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_FD)) {
+ ret = uverbs_get_raw_fd(&fd, attrs,
+ UVERBS_ATTR_REG_MR_FD);
+ if (ret)
+ return ret;
+ has_fd = true;
+ }
+
+ if (has_fd) {
+ if (!ib_dev->ops.reg_user_mr_dmabuf)
+ return -EOPNOTSUPP;
+
+ /* FD requires offset and can't come with addr */
+ if (!has_fd_offset || has_addr)
+ return -EINVAL;
+
+ if ((fd_offset & ~PAGE_MASK) != (iova & ~PAGE_MASK))
+ return -EINVAL;
+
+ valid_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC |
+ IB_ACCESS_RELAXED_ORDERING;
+ } else {
+ if (!has_addr || has_fd_offset)
+ return -EINVAL;
+
+ if ((addr & ~PAGE_MASK) != (iova & ~PAGE_MASK))
+ return -EINVAL;
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_DMA_HANDLE)) {
+ dmah = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_REG_MR_DMA_HANDLE);
+ if (IS_ERR(dmah))
+ return PTR_ERR(dmah);
+ }
+
+ ret = uverbs_get_flags32(&access_flags, attrs,
+ UVERBS_ATTR_REG_MR_ACCESS_FLAGS,
+ valid_access_flags);
+ if (ret)
+ return ret;
+
+ ret = ib_check_mr_access(ib_dev, access_flags);
+ if (ret)
+ return ret;
+
+ if (has_fd)
+ mr = pd->device->ops.reg_user_mr_dmabuf(pd, fd_offset, length,
+ iova, fd, access_flags,
+ dmah, attrs);
+ else
+ mr = pd->device->ops.reg_user_mr(pd, addr, length, iova,
+ access_flags, dmah, NULL);
+
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->type = IB_MR_TYPE_USER;
+ mr->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+ if (dmah) {
+ mr->dmah = dmah;
+ atomic_inc(&dmah->usecnt);
+ }
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&mr->res, NULL);
+ rdma_restrack_add(&mr->res);
+ uobj->object = mr;
+
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_MR_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_MR_RESP_LKEY,
+ &mr->lkey, sizeof(mr->lkey));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_MR_RESP_RKEY,
+ &mr->rkey, sizeof(mr->rkey));
+ return ret;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_ADVISE_MR,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_ADVISE_MR_ADVICE,
+ enum ib_uverbs_advise_mr_advice,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_ADVISE_MR_FLAGS,
+ enum ib_uverbs_advise_mr_flag,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ADVISE_MR_SGE_LIST,
+ UVERBS_ATTR_MIN_SIZE(sizeof(struct ib_uverbs_sge)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QUERY_MR,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_QUERY_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_RKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_IOVA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DM_MR_REG,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ enum ib_access_flags),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_IOVA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_FD,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ enum ib_access_flags),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_REG_MR,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_MR_DMA_HANDLE,
+ UVERBS_OBJECT_DMAH,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_IOVA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_MR_ACCESS_FLAGS,
+ enum ib_access_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_ADDR,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_FD_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_RAW_FD(UVERBS_ATTR_REG_MR_FD,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_MR_RESP_RKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_MR_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_MR,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr),
+ &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR),
+ &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG),
+ &UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY),
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR),
+ &UVERBS_METHOD(UVERBS_METHOD_REG_DMABUF_MR),
+ &UVERBS_METHOD(UVERBS_METHOD_REG_MR));
+
+const struct uapi_definition uverbs_def_obj_mr[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR,
+ UAPI_DEF_OBJ_NEEDS_FN(dereg_mr)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
new file mode 100644
index 000000000000..be0730e8509e
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+#include "core_priv.h"
+
+static int uverbs_free_qp(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_qp *qp = uobject->object;
+ struct ib_uqp_object *uqp =
+ container_of(uobject, struct ib_uqp_object, uevent.uobject);
+ int ret;
+
+ /*
+ * If this is a user triggered destroy then do not allow destruction
+ * until the user cleans up all the mcast bindings. Unlike in other
+ * places we forcibly clean up the mcast attachments for !DESTROY
+ * because the mcast attaches are not ubojects and will not be
+ * destroyed by anything else during cleanup processing.
+ */
+ if (why == RDMA_REMOVE_DESTROY) {
+ if (!list_empty(&uqp->mcast_list))
+ return -EBUSY;
+ } else if (qp == qp->real_qp) {
+ ib_uverbs_detach_umcast(qp, uqp);
+ }
+
+ ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
+ if (ret)
+ return ret;
+
+ if (uqp->uxrcd)
+ atomic_dec(&uqp->uxrcd->refcnt);
+
+ ib_uverbs_release_uevent(&uqp->uevent);
+ return 0;
+}
+
+static int check_creation_flags(enum ib_qp_type qp_type,
+ u32 create_flags)
+{
+ create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+
+ if (!create_flags || qp_type == IB_QPT_DRIVER)
+ return 0;
+
+ if (qp_type != IB_QPT_RAW_PACKET && qp_type != IB_QPT_UD)
+ return -EINVAL;
+
+ if ((create_flags & IB_UVERBS_QP_CREATE_SCATTER_FCS ||
+ create_flags & IB_UVERBS_QP_CREATE_CVLAN_STRIPPING) &&
+ qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void set_caps(struct ib_qp_init_attr *attr,
+ struct ib_uverbs_qp_cap *cap, bool req)
+{
+ if (req) {
+ attr->cap.max_send_wr = cap->max_send_wr;
+ attr->cap.max_recv_wr = cap->max_recv_wr;
+ attr->cap.max_send_sge = cap->max_send_sge;
+ attr->cap.max_recv_sge = cap->max_recv_sge;
+ attr->cap.max_inline_data = cap->max_inline_data;
+ } else {
+ cap->max_send_wr = attr->cap.max_send_wr;
+ cap->max_recv_wr = attr->cap.max_recv_wr;
+ cap->max_send_sge = attr->cap.max_send_sge;
+ cap->max_recv_sge = attr->cap.max_recv_sge;
+ cap->max_inline_data = attr->cap.max_inline_data;
+ }
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uqp_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_QP_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_qp_init_attr attr = {};
+ struct ib_uverbs_qp_cap cap = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl = NULL;
+ struct ib_qp *qp;
+ struct ib_pd *pd = NULL;
+ struct ib_srq *srq = NULL;
+ struct ib_cq *recv_cq = NULL;
+ struct ib_cq *send_cq = NULL;
+ struct ib_xrcd *xrcd = NULL;
+ struct ib_uobject *xrcd_uobj = NULL;
+ struct ib_device *device;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from_or_zero(&cap, attrs,
+ UVERBS_ATTR_CREATE_QP_CAP);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_QP_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.qp_type, attrs,
+ UVERBS_ATTR_CREATE_QP_TYPE);
+ if (ret)
+ return ret;
+
+ switch (attr.qp_type) {
+ case IB_QPT_XRC_TGT:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE))
+ return -EINVAL;
+
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!xrcd)
+ return -EINVAL;
+ device = xrcd->device;
+ break;
+ case IB_UVERBS_QPT_RAW_PACKET:
+ if (!rdma_uattrs_has_raw_cap(attrs))
+ return -EPERM;
+ fallthrough;
+ case IB_UVERBS_QPT_RC:
+ case IB_UVERBS_QPT_UC:
+ case IB_UVERBS_QPT_UD:
+ case IB_UVERBS_QPT_XRC_INI:
+ case IB_UVERBS_QPT_DRIVER:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE) ||
+ (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE) &&
+ attr.qp_type == IB_QPT_XRC_INI))
+ return -EINVAL;
+
+ pd = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE);
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ rwq_ind_tbl = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE);
+ if (!IS_ERR(rwq_ind_tbl)) {
+ if (cap.max_recv_wr || cap.max_recv_sge ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE))
+ return -EINVAL;
+
+ /* send_cq is optional */
+ if (cap.max_send_wr) {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+ }
+ attr.rwq_ind_tbl = rwq_ind_tbl;
+ } else {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+
+ if (attr.qp_type != IB_QPT_XRC_INI) {
+ recv_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE);
+ if (IS_ERR(recv_cq))
+ return PTR_ERR(recv_cq);
+ }
+ }
+
+ device = pd->device;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = uverbs_get_flags32(&attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_QP_FLAGS,
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+ IB_UVERBS_QP_CREATE_SCATTER_FCS |
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING |
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING |
+ IB_UVERBS_QP_CREATE_SQ_SIG_ALL);
+ if (ret)
+ return ret;
+
+ ret = check_creation_flags(attr.qp_type, attr.create_flags);
+ if (ret)
+ return ret;
+
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN)) {
+ ret = uverbs_copy_from(&attr.source_qpn, attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN);
+ if (ret)
+ return ret;
+ attr.create_flags |= IB_QP_CREATE_SOURCE_QPN;
+ }
+
+ srq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE);
+ if (!IS_ERR(srq)) {
+ if ((srq->srq_type == IB_SRQT_XRC &&
+ attr.qp_type != IB_QPT_XRC_TGT) ||
+ (srq->srq_type != IB_SRQT_XRC &&
+ attr.qp_type == IB_QPT_XRC_TGT))
+ return -EINVAL;
+ attr.srq = srq;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_QP_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ INIT_LIST_HEAD(&obj->mcast_list);
+ obj->uevent.uobject.user_handle = user_handle;
+ attr.event_handler = ib_uverbs_qp_event_handler;
+ attr.send_cq = send_cq;
+ attr.recv_cq = recv_cq;
+ attr.xrcd = xrcd;
+ if (attr.create_flags & IB_UVERBS_QP_CREATE_SQ_SIG_ALL) {
+ /* This creation bit is uverbs one, need to mask before
+ * calling drivers. It was added to prevent an extra user attr
+ * only for that when using ioctl.
+ */
+ attr.create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+ attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ } else {
+ attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ }
+
+ set_caps(&attr, &cap, true);
+ mutex_init(&obj->mcast_lock);
+
+ qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj,
+ KBUILD_MODNAME);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto err_put;
+ }
+ ib_qp_usecnt_inc(qp);
+
+ if (attr.qp_type == IB_QPT_XRC_TGT) {
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ }
+
+ obj->uevent.uobject.object = qp;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_QP_HANDLE);
+
+ set_caps(&attr, &cap, false);
+ ret = uverbs_copy_to_struct_or_zero(attrs,
+ UVERBS_ATTR_CREATE_QP_RESP_CAP, &cap,
+ sizeof(cap));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ &qp->qp_num,
+ sizeof(qp->qp_num));
+
+ return ret;
+err_put:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_QP_TYPE,
+ enum ib_uverbs_qp_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_QP_FLAGS,
+ enum ib_uverbs_qp_create_flags,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_SOURCE_QPN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_QP_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_QP_HANDLE);
+ struct ib_uqp_object *obj =
+ container_of(uobj, struct ib_uqp_object, uevent.uobject);
+ struct ib_uverbs_destroy_qp_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_QP_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_QP_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_qp_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_QP,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_DESTROY));
+
+const struct uapi_definition uverbs_def_obj_qp[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_srq.c b/drivers/infiniband/core/uverbs_std_types_srq.c
new file mode 100644
index 000000000000..e5513f828bdc
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_srq.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_srq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_srq *srq = uobject->object;
+ struct ib_uevent_object *uevent =
+ container_of(uobject, struct ib_uevent_object, uobject);
+ enum ib_srq_type srq_type = srq->srq_type;
+ int ret;
+
+ ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
+ if (ret)
+ return ret;
+
+ if (srq_type == IB_SRQT_XRC) {
+ struct ib_usrq_object *us =
+ container_of(uobject, struct ib_usrq_object,
+ uevent.uobject);
+
+ atomic_dec(&us->uxrcd->refcnt);
+ }
+
+ ib_uverbs_release_uevent(uevent);
+ return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_usrq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_SRQ_PD_HANDLE);
+ struct ib_srq_init_attr attr = {};
+ struct ib_uobject *xrcd_uobj;
+ struct ib_srq *srq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from(&attr.attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.srq_limit, attrs,
+ UVERBS_ATTR_CREATE_SRQ_LIMIT);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_SRQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.srq_type, attrs,
+ UVERBS_ATTR_CREATE_SRQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (ib_srq_has_cq(attr.srq_type)) {
+ attr.ext.cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE);
+ if (IS_ERR(attr.ext.cq))
+ return PTR_ERR(attr.ext.cq);
+ }
+
+ switch (attr.srq_type) {
+ case IB_UVERBS_SRQT_XRC:
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!attr.ext.xrc.xrcd)
+ return -EINVAL;
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ break;
+ case IB_UVERBS_SRQT_TM:
+ ret = uverbs_copy_from(&attr.ext.tag_matching.max_num_tags,
+ attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS);
+ if (ret)
+ return ret;
+ break;
+ case IB_UVERBS_SRQT_BASIC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_SRQ_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ attr.event_handler = ib_uverbs_srq_event_handler;
+ obj->uevent.uobject.user_handle = user_handle;
+
+ srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = srq;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ &attr.attr.max_wr,
+ sizeof(attr.attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ &attr.attr.max_sge,
+ sizeof(attr.attr.max_sge));
+ if (ret)
+ return ret;
+
+ if (attr.srq_type == IB_SRQT_XRC) {
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ &srq->ext.xrc.srq_num,
+ sizeof(srq->ext.xrc.srq_num));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ if (attr.srq_type == IB_SRQT_XRC)
+ atomic_dec(&obj->uxrcd->refcnt);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_SRQ_TYPE,
+ enum ib_uverbs_srq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_LIMIT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_SRQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_SRQ_HANDLE);
+ struct ib_usrq_object *obj =
+ container_of(uobj, struct ib_usrq_object, uevent.uobject);
+ struct ib_uverbs_destroy_srq_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_SRQ_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_SRQ_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_srq_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_SRQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
+ uverbs_free_srq),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_srq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
new file mode 100644
index 000000000000..7ded8339346f
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_wq.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_wq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_wq *wq = uobject->object;
+ struct ib_uwq_object *uwq =
+ container_of(uobject, struct ib_uwq_object, uevent.uobject);
+ int ret;
+
+ ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
+ if (ret)
+ return ret;
+
+ ib_uverbs_release_uevent(&uwq->uevent);
+ return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uwq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_PD_HANDLE);
+ struct ib_cq *cq =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_CQ_HANDLE);
+ struct ib_wq_init_attr wq_init_attr = {};
+ struct ib_wq *wq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_get_flags32(&wq_init_attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_WQ_FLAGS,
+ IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING |
+ IB_UVERBS_WQ_FLAGS_SCATTER_FCS |
+ IB_UVERBS_WQ_FLAGS_DELAY_DROP |
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_WQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&wq_init_attr.wq_type, attrs,
+ UVERBS_ATTR_CREATE_WQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (wq_init_attr.wq_type != IB_WQT_RQ)
+ return -EINVAL;
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_WQ_EVENT_FD);
+ obj->uevent.uobject.user_handle = user_handle;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
+ wq_init_attr.wq_context = attrs->ufile;
+ wq_init_attr.cq = cq;
+
+ wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
+ if (IS_ERR(wq)) {
+ ret = PTR_ERR(wq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = wq;
+ wq->wq_type = wq_init_attr.wq_type;
+ wq->cq = cq;
+ wq->pd = pd;
+ wq->device = pd->device;
+ wq->wq_context = wq_init_attr.wq_context;
+ atomic_set(&wq->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&cq->usecnt);
+ wq->uobject = obj;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ &wq_init_attr.max_wr,
+ sizeof(wq_init_attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ &wq_init_attr.max_sge,
+ sizeof(wq_init_attr.max_sge));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ &wq->wq_num,
+ sizeof(wq->wq_num));
+ return ret;
+
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_WQ_TYPE,
+ enum ib_wq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_WQ_FLAGS,
+ enum ib_uverbs_wq_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_WQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_WQ_HANDLE);
+ struct ib_uwq_object *obj =
+ container_of(uobj, struct ib_uwq_object, uevent.uobject);
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_WQ_RESP,
+ &obj->uevent.events_reported,
+ sizeof(obj->uevent.events_reported));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_WQ_RESP,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_WQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_wq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
new file mode 100644
index 000000000000..e00ea63175bd
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ */
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/rdma_user_ioctl.h>
+#include <linux/bitops.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int ib_uverbs_notsupp(struct uverbs_attr_bundle *attrs)
+{
+ return -EOPNOTSUPP;
+}
+
+static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size)
+{
+ void *elm;
+ int rc;
+
+ if (key == UVERBS_API_KEY_ERR)
+ return ERR_PTR(-EOVERFLOW);
+
+ elm = kzalloc(alloc_size, GFP_KERNEL);
+ if (!elm)
+ return ERR_PTR(-ENOMEM);
+ rc = radix_tree_insert(&uapi->radix, key, elm);
+ if (rc) {
+ kfree(elm);
+ return ERR_PTR(rc);
+ }
+
+ return elm;
+}
+
+static void *uapi_add_get_elm(struct uverbs_api *uapi, u32 key,
+ size_t alloc_size, bool *exists)
+{
+ void *elm;
+
+ elm = uapi_add_elm(uapi, key, alloc_size);
+ if (!IS_ERR(elm)) {
+ *exists = false;
+ return elm;
+ }
+
+ if (elm != ERR_PTR(-EEXIST))
+ return elm;
+
+ elm = radix_tree_lookup(&uapi->radix, key);
+ if (WARN_ON(!elm))
+ return ERR_PTR(-EINVAL);
+ *exists = true;
+ return elm;
+}
+
+static int uapi_create_write(struct uverbs_api *uapi,
+ struct ib_device *ibdev,
+ const struct uapi_definition *def,
+ u32 obj_key,
+ u32 *cur_method_key)
+{
+ struct uverbs_api_write_method *method_elm;
+ u32 method_key = obj_key;
+ bool exists;
+
+ if (def->write.is_ex)
+ method_key |= uapi_key_write_ex_method(def->write.command_num);
+ else
+ method_key |= uapi_key_write_method(def->write.command_num);
+
+ method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm),
+ &exists);
+ if (IS_ERR(method_elm))
+ return PTR_ERR(method_elm);
+
+ if (WARN_ON(exists && (def->write.is_ex != method_elm->is_ex)))
+ return -EINVAL;
+
+ method_elm->is_ex = def->write.is_ex;
+ method_elm->handler = def->func_write;
+ if (!def->write.is_ex)
+ method_elm->disabled = !(ibdev->uverbs_cmd_mask &
+ BIT_ULL(def->write.command_num));
+
+ if (!def->write.is_ex && def->func_write) {
+ method_elm->has_udata = def->write.has_udata;
+ method_elm->has_resp = def->write.has_resp;
+ method_elm->req_size = def->write.req_size;
+ method_elm->resp_size = def->write.resp_size;
+ }
+
+ *cur_method_key = method_key;
+ return 0;
+}
+
+static int uapi_merge_method(struct uverbs_api *uapi,
+ struct uverbs_api_object *obj_elm, u32 obj_key,
+ const struct uverbs_method_def *method,
+ bool is_driver)
+{
+ u32 method_key = obj_key | uapi_key_ioctl_method(method->id);
+ struct uverbs_api_ioctl_method *method_elm;
+ unsigned int i;
+ bool exists;
+
+ if (!method->attrs)
+ return 0;
+
+ method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm),
+ &exists);
+ if (IS_ERR(method_elm))
+ return PTR_ERR(method_elm);
+ if (exists) {
+ /*
+ * This occurs when a driver uses ADD_UVERBS_ATTRIBUTES_SIMPLE
+ */
+ if (WARN_ON(method->handler))
+ return -EINVAL;
+ } else {
+ WARN_ON(!method->handler);
+ rcu_assign_pointer(method_elm->handler, method->handler);
+ if (method->handler != uverbs_destroy_def_handler)
+ method_elm->driver_method = is_driver;
+ }
+
+ for (i = 0; i != method->num_attrs; i++) {
+ const struct uverbs_attr_def *attr = (*method->attrs)[i];
+ struct uverbs_api_attr *attr_slot;
+
+ if (!attr)
+ continue;
+
+ /*
+ * ENUM_IN contains the 'ids' pointer to the driver's .rodata,
+ * so if it is specified by a driver then it always makes this
+ * into a driver method.
+ */
+ if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN)
+ method_elm->driver_method |= is_driver;
+
+ /*
+ * Like other uobject based things we only support a single
+ * uobject being NEW'd or DESTROY'd
+ */
+ if (attr->attr.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ u8 access = attr->attr.u2.objs_arr.access;
+
+ if (WARN_ON(access == UVERBS_ACCESS_NEW ||
+ access == UVERBS_ACCESS_DESTROY))
+ return -EINVAL;
+ }
+
+ attr_slot =
+ uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id),
+ sizeof(*attr_slot));
+ /* Attributes are not allowed to be modified by drivers */
+ if (IS_ERR(attr_slot))
+ return PTR_ERR(attr_slot);
+
+ attr_slot->spec = attr->attr;
+ }
+
+ return 0;
+}
+
+static int uapi_merge_obj_tree(struct uverbs_api *uapi,
+ const struct uverbs_object_def *obj,
+ bool is_driver)
+{
+ struct uverbs_api_object *obj_elm;
+ unsigned int i;
+ u32 obj_key;
+ bool exists;
+ int rc;
+
+ obj_key = uapi_key_obj(obj->id);
+ obj_elm = uapi_add_get_elm(uapi, obj_key, sizeof(*obj_elm), &exists);
+ if (IS_ERR(obj_elm))
+ return PTR_ERR(obj_elm);
+
+ if (obj->type_attrs) {
+ if (WARN_ON(obj_elm->type_attrs))
+ return -EINVAL;
+
+ obj_elm->id = obj->id;
+ obj_elm->type_attrs = obj->type_attrs;
+ obj_elm->type_class = obj->type_attrs->type_class;
+ /*
+ * Today drivers are only permitted to use idr_class and
+ * fd_class types. We can revoke the IDR types during
+ * disassociation, and the FD types require the driver to use
+ * struct file_operations.owner to prevent the driver module
+ * code from unloading while the file is open. This provides
+ * enough safety that uverbs_uobject_fd_release() will
+ * continue to work. Drivers using FD are responsible to
+ * handle disassociation of the device on their own.
+ */
+ if (WARN_ON(is_driver &&
+ obj->type_attrs->type_class != &uverbs_idr_class &&
+ obj->type_attrs->type_class != &uverbs_fd_class))
+ return -EINVAL;
+ }
+
+ if (!obj->methods)
+ return 0;
+
+ for (i = 0; i != obj->num_methods; i++) {
+ const struct uverbs_method_def *method = (*obj->methods)[i];
+
+ if (!method)
+ continue;
+
+ rc = uapi_merge_method(uapi, obj_elm, obj_key, method,
+ is_driver);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int uapi_disable_elm(struct uverbs_api *uapi,
+ const struct uapi_definition *def,
+ u32 obj_key,
+ u32 method_key)
+{
+ bool exists;
+
+ if (def->scope == UAPI_SCOPE_OBJECT) {
+ struct uverbs_api_object *obj_elm;
+
+ obj_elm = uapi_add_get_elm(
+ uapi, obj_key, sizeof(*obj_elm), &exists);
+ if (IS_ERR(obj_elm))
+ return PTR_ERR(obj_elm);
+ obj_elm->disabled = 1;
+ return 0;
+ }
+
+ if (def->scope == UAPI_SCOPE_METHOD &&
+ uapi_key_is_ioctl_method(method_key)) {
+ struct uverbs_api_ioctl_method *method_elm;
+
+ method_elm = uapi_add_get_elm(uapi, method_key,
+ sizeof(*method_elm), &exists);
+ if (IS_ERR(method_elm))
+ return PTR_ERR(method_elm);
+ method_elm->disabled = 1;
+ return 0;
+ }
+
+ if (def->scope == UAPI_SCOPE_METHOD &&
+ (uapi_key_is_write_method(method_key) ||
+ uapi_key_is_write_ex_method(method_key))) {
+ struct uverbs_api_write_method *write_elm;
+
+ write_elm = uapi_add_get_elm(uapi, method_key,
+ sizeof(*write_elm), &exists);
+ if (IS_ERR(write_elm))
+ return PTR_ERR(write_elm);
+ write_elm->disabled = 1;
+ return 0;
+ }
+
+ WARN_ON(true);
+ return -EINVAL;
+}
+
+static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev,
+ const struct uapi_definition *def_list,
+ bool is_driver)
+{
+ const struct uapi_definition *def = def_list;
+ u32 cur_obj_key = UVERBS_API_KEY_ERR;
+ u32 cur_method_key = UVERBS_API_KEY_ERR;
+ bool exists;
+ int rc;
+
+ if (!def_list)
+ return 0;
+
+ for (;; def++) {
+ switch ((enum uapi_definition_kind)def->kind) {
+ case UAPI_DEF_CHAIN:
+ rc = uapi_merge_def(uapi, ibdev, def->chain, is_driver);
+ if (rc)
+ return rc;
+ continue;
+
+ case UAPI_DEF_CHAIN_OBJ_TREE:
+ if (WARN_ON(def->object_start.object_id !=
+ def->chain_obj_tree->id))
+ return -EINVAL;
+
+ cur_obj_key = uapi_key_obj(def->object_start.object_id);
+ rc = uapi_merge_obj_tree(uapi, def->chain_obj_tree,
+ is_driver);
+ if (rc)
+ return rc;
+ continue;
+
+ case UAPI_DEF_END:
+ return 0;
+
+ case UAPI_DEF_IS_SUPPORTED_DEV_FN: {
+ void **ibdev_fn =
+ (void *)(&ibdev->ops) + def->needs_fn_offset;
+
+ if (*ibdev_fn)
+ continue;
+ rc = uapi_disable_elm(
+ uapi, def, cur_obj_key, cur_method_key);
+ if (rc)
+ return rc;
+ continue;
+ }
+
+ case UAPI_DEF_IS_SUPPORTED_FUNC:
+ if (def->func_is_supported(ibdev))
+ continue;
+ rc = uapi_disable_elm(
+ uapi, def, cur_obj_key, cur_method_key);
+ if (rc)
+ return rc;
+ continue;
+
+ case UAPI_DEF_OBJECT_START: {
+ struct uverbs_api_object *obj_elm;
+
+ cur_obj_key = uapi_key_obj(def->object_start.object_id);
+ obj_elm = uapi_add_get_elm(uapi, cur_obj_key,
+ sizeof(*obj_elm), &exists);
+ if (IS_ERR(obj_elm))
+ return PTR_ERR(obj_elm);
+ continue;
+ }
+
+ case UAPI_DEF_WRITE:
+ rc = uapi_create_write(
+ uapi, ibdev, def, cur_obj_key, &cur_method_key);
+ if (rc)
+ return rc;
+ continue;
+ }
+ WARN_ON(true);
+ return -EINVAL;
+ }
+}
+
+static int
+uapi_finalize_ioctl_method(struct uverbs_api *uapi,
+ struct uverbs_api_ioctl_method *method_elm,
+ u32 method_key)
+{
+ struct radix_tree_iter iter;
+ unsigned int num_attrs = 0;
+ unsigned int max_bkey = 0;
+ bool single_uobj = false;
+ void __rcu **slot;
+
+ method_elm->destroy_bkey = UVERBS_API_ATTR_BKEY_LEN;
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter,
+ uapi_key_attrs_start(method_key)) {
+ struct uverbs_api_attr *elm =
+ rcu_dereference_protected(*slot, true);
+ u32 attr_key = iter.index & UVERBS_API_ATTR_KEY_MASK;
+ u32 attr_bkey = uapi_bkey_attr(attr_key);
+ u8 type = elm->spec.type;
+
+ if (uapi_key_attr_to_ioctl_method(iter.index) !=
+ uapi_key_attr_to_ioctl_method(method_key))
+ break;
+
+ if (elm->spec.mandatory)
+ __set_bit(attr_bkey, method_elm->attr_mandatory);
+
+ if (elm->spec.is_udata)
+ method_elm->has_udata = true;
+
+ if (type == UVERBS_ATTR_TYPE_IDR ||
+ type == UVERBS_ATTR_TYPE_FD) {
+ u8 access = elm->spec.u.obj.access;
+
+ /*
+ * Verbs specs may only have one NEW/DESTROY, we don't
+ * have the infrastructure to abort multiple NEW's or
+ * cope with multiple DESTROY failure.
+ */
+ if (access == UVERBS_ACCESS_NEW ||
+ access == UVERBS_ACCESS_DESTROY) {
+ if (WARN_ON(single_uobj))
+ return -EINVAL;
+
+ single_uobj = true;
+ if (WARN_ON(!elm->spec.mandatory))
+ return -EINVAL;
+ }
+
+ if (access == UVERBS_ACCESS_DESTROY)
+ method_elm->destroy_bkey = attr_bkey;
+ }
+
+ max_bkey = max(max_bkey, attr_bkey);
+ num_attrs++;
+ }
+
+ method_elm->key_bitmap_len = max_bkey + 1;
+ WARN_ON(method_elm->key_bitmap_len > UVERBS_API_ATTR_BKEY_LEN);
+
+ uapi_compute_bundle_size(method_elm, num_attrs);
+ return 0;
+}
+
+static int uapi_finalize(struct uverbs_api *uapi)
+{
+ const struct uverbs_api_write_method **data;
+ unsigned long max_write_ex = 0;
+ unsigned long max_write = 0;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ int rc;
+ int i;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ struct uverbs_api_ioctl_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (uapi_key_is_ioctl_method(iter.index)) {
+ rc = uapi_finalize_ioctl_method(uapi, method_elm,
+ iter.index);
+ if (rc)
+ return rc;
+ }
+
+ if (uapi_key_is_write_method(iter.index))
+ max_write = max(max_write,
+ iter.index & UVERBS_API_ATTR_KEY_MASK);
+ if (uapi_key_is_write_ex_method(iter.index))
+ max_write_ex =
+ max(max_write_ex,
+ iter.index & UVERBS_API_ATTR_KEY_MASK);
+ }
+
+ uapi->notsupp_method.handler = ib_uverbs_notsupp;
+ uapi->num_write = max_write + 1;
+ uapi->num_write_ex = max_write_ex + 1;
+ data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
+ sizeof(*uapi->write_methods), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
+ data[i] = &uapi->notsupp_method;
+ uapi->write_methods = data;
+ uapi->write_ex_methods = data + uapi->num_write;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ if (uapi_key_is_write_method(iter.index))
+ uapi->write_methods[iter.index &
+ UVERBS_API_ATTR_KEY_MASK] =
+ rcu_dereference_protected(*slot, true);
+ if (uapi_key_is_write_ex_method(iter.index))
+ uapi->write_ex_methods[iter.index &
+ UVERBS_API_ATTR_KEY_MASK] =
+ rcu_dereference_protected(*slot, true);
+ }
+
+ return 0;
+}
+
+static void uapi_remove_range(struct uverbs_api *uapi, u32 start, u32 last)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, start) {
+ if (iter.index > last)
+ return;
+ kfree(rcu_dereference_protected(*slot, true));
+ radix_tree_iter_delete(&uapi->radix, &iter, slot);
+ }
+}
+
+static void uapi_remove_object(struct uverbs_api *uapi, u32 obj_key)
+{
+ uapi_remove_range(uapi, obj_key,
+ obj_key | UVERBS_API_METHOD_KEY_MASK |
+ UVERBS_API_ATTR_KEY_MASK);
+}
+
+static void uapi_remove_method(struct uverbs_api *uapi, u32 method_key)
+{
+ uapi_remove_range(uapi, method_key,
+ method_key | UVERBS_API_ATTR_KEY_MASK);
+}
+
+
+static u32 uapi_get_obj_id(struct uverbs_attr_spec *spec)
+{
+ if (spec->type == UVERBS_ATTR_TYPE_IDR ||
+ spec->type == UVERBS_ATTR_TYPE_FD)
+ return spec->u.obj.obj_type;
+ if (spec->type == UVERBS_ATTR_TYPE_IDRS_ARRAY)
+ return spec->u2.objs_arr.obj_type;
+ return UVERBS_API_KEY_ERR;
+}
+
+static void uapi_key_okay(u32 key)
+{
+ unsigned int count = 0;
+
+ if (uapi_key_is_object(key))
+ count++;
+ if (uapi_key_is_ioctl_method(key))
+ count++;
+ if (uapi_key_is_write_method(key))
+ count++;
+ if (uapi_key_is_write_ex_method(key))
+ count++;
+ if (uapi_key_is_attr(key))
+ count++;
+ WARN(count != 1, "Bad count %u key=%x", count, key);
+}
+
+static void uapi_finalize_disable(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ u32 starting_key = 0;
+ bool scan_again = false;
+ void __rcu **slot;
+
+again:
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, starting_key) {
+ uapi_key_okay(iter.index);
+
+ if (uapi_key_is_object(iter.index)) {
+ struct uverbs_api_object *obj_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (obj_elm->disabled) {
+ /* Have to check all the attrs again */
+ scan_again = true;
+ starting_key = iter.index;
+ uapi_remove_object(uapi, iter.index);
+ goto again;
+ }
+ continue;
+ }
+
+ if (uapi_key_is_ioctl_method(iter.index)) {
+ struct uverbs_api_ioctl_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (method_elm->disabled) {
+ starting_key = iter.index;
+ uapi_remove_method(uapi, iter.index);
+ goto again;
+ }
+ continue;
+ }
+
+ if (uapi_key_is_write_method(iter.index) ||
+ uapi_key_is_write_ex_method(iter.index)) {
+ struct uverbs_api_write_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (method_elm->disabled) {
+ kfree(method_elm);
+ radix_tree_iter_delete(&uapi->radix, &iter, slot);
+ }
+ continue;
+ }
+
+ if (uapi_key_is_attr(iter.index)) {
+ struct uverbs_api_attr *attr_elm =
+ rcu_dereference_protected(*slot, true);
+ const struct uverbs_api_object *tmp_obj;
+ u32 obj_key;
+
+ /*
+ * If the method has a mandatory object handle
+ * attribute which relies on an object which is not
+ * present then the entire method is uncallable.
+ */
+ if (!attr_elm->spec.mandatory)
+ continue;
+ obj_key = uapi_get_obj_id(&attr_elm->spec);
+ if (obj_key == UVERBS_API_KEY_ERR)
+ continue;
+ tmp_obj = uapi_get_object(uapi, obj_key);
+ if (IS_ERR(tmp_obj)) {
+ if (PTR_ERR(tmp_obj) == -ENOMSG)
+ continue;
+ } else {
+ if (!tmp_obj->disabled)
+ continue;
+ }
+
+ starting_key = iter.index;
+ uapi_remove_method(
+ uapi,
+ iter.index & (UVERBS_API_OBJ_KEY_MASK |
+ UVERBS_API_METHOD_KEY_MASK));
+ goto again;
+ }
+
+ WARN_ON(false);
+ }
+
+ if (!scan_again)
+ return;
+ scan_again = false;
+ starting_key = 0;
+ goto again;
+}
+
+void uverbs_destroy_api(struct uverbs_api *uapi)
+{
+ if (!uapi)
+ return;
+
+ uapi_remove_range(uapi, 0, U32_MAX);
+ kfree(uapi->write_methods);
+ kfree(uapi);
+}
+
+static const struct uapi_definition uverbs_core_api[] = {
+ UAPI_DEF_CHAIN(uverbs_def_obj_async_fd),
+ UAPI_DEF_CHAIN(uverbs_def_obj_counters),
+ UAPI_DEF_CHAIN(uverbs_def_obj_cq),
+ UAPI_DEF_CHAIN(uverbs_def_obj_device),
+ UAPI_DEF_CHAIN(uverbs_def_obj_dm),
+ UAPI_DEF_CHAIN(uverbs_def_obj_dmah),
+ UAPI_DEF_CHAIN(uverbs_def_obj_flow_action),
+ UAPI_DEF_CHAIN(uverbs_def_obj_intf),
+ UAPI_DEF_CHAIN(uverbs_def_obj_mr),
+ UAPI_DEF_CHAIN(uverbs_def_obj_qp),
+ UAPI_DEF_CHAIN(uverbs_def_obj_srq),
+ UAPI_DEF_CHAIN(uverbs_def_obj_wq),
+ UAPI_DEF_CHAIN(uverbs_def_write_intf),
+ {},
+};
+
+struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev)
+{
+ struct uverbs_api *uapi;
+ int rc;
+
+ uapi = kzalloc(sizeof(*uapi), GFP_KERNEL);
+ if (!uapi)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_RADIX_TREE(&uapi->radix, GFP_KERNEL);
+ uapi->driver_id = ibdev->ops.driver_id;
+
+ rc = uapi_merge_def(uapi, ibdev, uverbs_core_api, false);
+ if (rc)
+ goto err;
+ rc = uapi_merge_def(uapi, ibdev, ibdev->driver_def, true);
+ if (rc)
+ goto err;
+
+ uapi_finalize_disable(uapi);
+ rc = uapi_finalize(uapi);
+ if (rc)
+ goto err;
+
+ return uapi;
+err:
+ if (rc != -ENOMEM)
+ dev_err(&ibdev->dev,
+ "Setup of uverbs_api failed, kernel parsing tree description is not valid (%d)??\n",
+ rc);
+
+ uverbs_destroy_api(uapi);
+ return ERR_PTR(rc);
+}
+
+/*
+ * The pre version is done before destroying the HW objects, it only blocks
+ * off method access. All methods that require the ib_dev or the module data
+ * must test one of these assignments prior to continuing.
+ */
+void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev)
+{
+ struct uverbs_api *uapi = uverbs_dev->uapi;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ if (uapi_key_is_ioctl_method(iter.index)) {
+ struct uverbs_api_ioctl_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (method_elm->driver_method)
+ rcu_assign_pointer(method_elm->handler, NULL);
+ }
+ }
+
+ synchronize_srcu(&uverbs_dev->disassociate_srcu);
+}
+
+/*
+ * Called when a driver disassociates from the ib_uverbs_device. The
+ * assumption is that the driver module will unload after. Replace everything
+ * related to the driver with NULL as a safety measure.
+ */
+void uverbs_disassociate_api(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ if (uapi_key_is_object(iter.index)) {
+ struct uverbs_api_object *object_elm =
+ rcu_dereference_protected(*slot, true);
+
+ /*
+ * Some type_attrs are in the driver module. We don't
+ * bother to keep track of which since there should be
+ * no use of this after disassociate.
+ */
+ object_elm->type_attrs = NULL;
+ } else if (uapi_key_is_attr(iter.index)) {
+ struct uverbs_api_attr *elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (elm->spec.type == UVERBS_ATTR_TYPE_ENUM_IN)
+ elm->spec.u2.enum_def.ids = NULL;
+ }
+ }
+}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e1f2c9887f3f..3a5f81402d2f 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -41,12 +41,22 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/addrconf.h>
+#include <linux/security.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+#include <rdma/rw.h>
+#include <rdma/lag.h>
#include "core_priv.h"
+#include <trace/events/rdma_core.h>
+
+static int ib_resolve_eth_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr);
static const char * const ib_events[] = {
[IB_EVENT_CQ_ERR] = "CQ error",
@@ -70,7 +80,7 @@ static const char * const ib_events[] = {
[IB_EVENT_GID_CHANGE] = "GID changed",
};
-const char *ib_event_msg(enum ib_event_type event)
+const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
{
size_t index = event;
@@ -86,10 +96,10 @@ static const char * const wc_statuses[] = {
[IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
[IB_WC_LOC_PROT_ERR] = "local protection error",
[IB_WC_WR_FLUSH_ERR] = "WR flushed",
- [IB_WC_MW_BIND_ERR] = "memory management operation error",
+ [IB_WC_MW_BIND_ERR] = "memory bind operation error",
[IB_WC_BAD_RESP_ERR] = "bad response error",
[IB_WC_LOC_ACCESS_ERR] = "local access error",
- [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
+ [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error",
[IB_WC_REM_ACCESS_ERR] = "remote access error",
[IB_WC_REM_OP_ERR] = "remote operation error",
[IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
@@ -104,7 +114,7 @@ static const char * const wc_statuses[] = {
[IB_WC_GENERAL_ERR] = "general error",
};
-const char *ib_wc_status_msg(enum ib_wc_status status)
+const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
{
size_t index = status;
@@ -116,16 +126,29 @@ EXPORT_SYMBOL(ib_wc_status_msg);
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
- case IB_RATE_2_5_GBPS: return 1;
- case IB_RATE_5_GBPS: return 2;
- case IB_RATE_10_GBPS: return 4;
- case IB_RATE_20_GBPS: return 8;
- case IB_RATE_30_GBPS: return 12;
- case IB_RATE_40_GBPS: return 16;
- case IB_RATE_60_GBPS: return 24;
- case IB_RATE_80_GBPS: return 32;
- case IB_RATE_120_GBPS: return 48;
- default: return -1;
+ case IB_RATE_2_5_GBPS: return 1;
+ case IB_RATE_5_GBPS: return 2;
+ case IB_RATE_10_GBPS: return 4;
+ case IB_RATE_20_GBPS: return 8;
+ case IB_RATE_30_GBPS: return 12;
+ case IB_RATE_40_GBPS: return 16;
+ case IB_RATE_60_GBPS: return 24;
+ case IB_RATE_80_GBPS: return 32;
+ case IB_RATE_120_GBPS: return 48;
+ case IB_RATE_14_GBPS: return 6;
+ case IB_RATE_56_GBPS: return 22;
+ case IB_RATE_112_GBPS: return 45;
+ case IB_RATE_168_GBPS: return 67;
+ case IB_RATE_25_GBPS: return 10;
+ case IB_RATE_100_GBPS: return 40;
+ case IB_RATE_200_GBPS: return 80;
+ case IB_RATE_300_GBPS: return 120;
+ case IB_RATE_28_GBPS: return 11;
+ case IB_RATE_50_GBPS: return 20;
+ case IB_RATE_400_GBPS: return 160;
+ case IB_RATE_600_GBPS: return 240;
+ case IB_RATE_800_GBPS: return 320;
+ default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mult);
@@ -133,16 +156,29 @@ EXPORT_SYMBOL(ib_rate_to_mult);
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{
switch (mult) {
- case 1: return IB_RATE_2_5_GBPS;
- case 2: return IB_RATE_5_GBPS;
- case 4: return IB_RATE_10_GBPS;
- case 8: return IB_RATE_20_GBPS;
- case 12: return IB_RATE_30_GBPS;
- case 16: return IB_RATE_40_GBPS;
- case 24: return IB_RATE_60_GBPS;
- case 32: return IB_RATE_80_GBPS;
- case 48: return IB_RATE_120_GBPS;
- default: return IB_RATE_PORT_CURRENT;
+ case 1: return IB_RATE_2_5_GBPS;
+ case 2: return IB_RATE_5_GBPS;
+ case 4: return IB_RATE_10_GBPS;
+ case 8: return IB_RATE_20_GBPS;
+ case 12: return IB_RATE_30_GBPS;
+ case 16: return IB_RATE_40_GBPS;
+ case 24: return IB_RATE_60_GBPS;
+ case 32: return IB_RATE_80_GBPS;
+ case 48: return IB_RATE_120_GBPS;
+ case 6: return IB_RATE_14_GBPS;
+ case 22: return IB_RATE_56_GBPS;
+ case 45: return IB_RATE_112_GBPS;
+ case 67: return IB_RATE_168_GBPS;
+ case 10: return IB_RATE_25_GBPS;
+ case 40: return IB_RATE_100_GBPS;
+ case 80: return IB_RATE_200_GBPS;
+ case 120: return IB_RATE_300_GBPS;
+ case 11: return IB_RATE_28_GBPS;
+ case 20: return IB_RATE_50_GBPS;
+ case 160: return IB_RATE_400_GBPS;
+ case 240: return IB_RATE_600_GBPS;
+ case 320: return IB_RATE_800_GBPS;
+ default: return IB_RATE_PORT_CURRENT;
}
}
EXPORT_SYMBOL(mult_to_ib_rate);
@@ -167,55 +203,55 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
case IB_RATE_100_GBPS: return 103125;
case IB_RATE_200_GBPS: return 206250;
case IB_RATE_300_GBPS: return 309375;
+ case IB_RATE_28_GBPS: return 28125;
+ case IB_RATE_50_GBPS: return 53125;
+ case IB_RATE_400_GBPS: return 425000;
+ case IB_RATE_600_GBPS: return 637500;
+ case IB_RATE_800_GBPS: return 850000;
default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mbps);
__attribute_const__ enum rdma_transport_type
-rdma_node_get_transport(enum rdma_node_type node_type)
-{
- switch (node_type) {
- case RDMA_NODE_IB_CA:
- case RDMA_NODE_IB_SWITCH:
- case RDMA_NODE_IB_ROUTER:
- return RDMA_TRANSPORT_IB;
- case RDMA_NODE_RNIC:
- return RDMA_TRANSPORT_IWARP;
- case RDMA_NODE_USNIC:
+rdma_node_get_transport(unsigned int node_type)
+{
+
+ if (node_type == RDMA_NODE_USNIC)
return RDMA_TRANSPORT_USNIC;
- case RDMA_NODE_USNIC_UDP:
+ if (node_type == RDMA_NODE_USNIC_UDP)
return RDMA_TRANSPORT_USNIC_UDP;
- default:
- BUG();
- return 0;
- }
+ if (node_type == RDMA_NODE_RNIC)
+ return RDMA_TRANSPORT_IWARP;
+ if (node_type == RDMA_NODE_UNSPECIFIED)
+ return RDMA_TRANSPORT_UNSPECIFIED;
+
+ return RDMA_TRANSPORT_IB;
}
EXPORT_SYMBOL(rdma_node_get_transport);
-enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
+ u32 port_num)
{
- if (device->get_link_layer)
- return device->get_link_layer(device, port_num);
+ enum rdma_transport_type lt;
+ if (device->ops.get_link_layer)
+ return device->ops.get_link_layer(device, port_num);
- switch (rdma_node_get_transport(device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ lt = rdma_node_get_transport(device->node_type);
+ if (lt == RDMA_TRANSPORT_IB)
return IB_LINK_LAYER_INFINIBAND;
- case RDMA_TRANSPORT_IWARP:
- case RDMA_TRANSPORT_USNIC:
- case RDMA_TRANSPORT_USNIC_UDP:
- return IB_LINK_LAYER_ETHERNET;
- default:
- return IB_LINK_LAYER_UNSPECIFIED;
- }
+
+ return IB_LINK_LAYER_ETHERNET;
}
EXPORT_SYMBOL(rdma_port_get_link_layer);
/* Protection domains */
/**
- * ib_alloc_pd - Allocates an unused protection domain.
+ * __ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
+ * @flags: protection domain flags
+ * @caller: caller's build-time module name
*
* A protection domain object provides an association between QPs, shared
* receive queues, address handles, memory regions, and memory windows.
@@ -223,287 +259,868 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
* Every PD has a local_dma_lkey which can be used as the lkey value for local
* memory operations.
*/
-struct ib_pd *ib_alloc_pd(struct ib_device *device)
+struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
+ const char *caller)
{
struct ib_pd *pd;
- struct ib_device_attr devattr;
- int rc;
-
- rc = ib_query_device(device, &devattr);
- if (rc)
- return ERR_PTR(rc);
+ int mr_access_flags = 0;
+ int ret;
- pd = device->alloc_pd(device, NULL, NULL);
- if (IS_ERR(pd))
- return pd;
+ pd = rdma_zalloc_drv_obj(device, ib_pd);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
pd->device = device;
- pd->uobject = NULL;
- pd->local_mr = NULL;
- atomic_set(&pd->usecnt, 0);
+ pd->flags = flags;
+
+ rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
+ rdma_restrack_set_name(&pd->res, caller);
+
+ ret = device->ops.alloc_pd(pd, NULL);
+ if (ret) {
+ rdma_restrack_put(&pd->res);
+ kfree(pd);
+ return ERR_PTR(ret);
+ }
+ rdma_restrack_add(&pd->res);
- if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
+ if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey;
- else {
+ else
+ mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
+
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ pr_warn("%s: enabling unsafe global rkey\n", caller);
+ mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
+ }
+
+ if (mr_access_flags) {
struct ib_mr *mr;
- mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
+ mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
if (IS_ERR(mr)) {
ib_dealloc_pd(pd);
- return (struct ib_pd *)mr;
+ return ERR_CAST(mr);
}
- pd->local_mr = mr;
- pd->local_dma_lkey = pd->local_mr->lkey;
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->type = IB_MR_TYPE_DMA;
+ mr->uobject = NULL;
+ mr->need_inval = false;
+
+ pd->__internal_mr = mr;
+
+ if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
+ pd->local_dma_lkey = pd->__internal_mr->lkey;
+
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+ pd->unsafe_global_rkey = pd->__internal_mr->rkey;
}
+
return pd;
}
-EXPORT_SYMBOL(ib_alloc_pd);
+EXPORT_SYMBOL(__ib_alloc_pd);
/**
- * ib_dealloc_pd - Deallocates a protection domain.
+ * ib_dealloc_pd_user - Deallocates a protection domain.
* @pd: The protection domain to deallocate.
+ * @udata: Valid user data or NULL for kernel object
*
* It is an error to call this function while any resources in the pd still
* exist. The caller is responsible to synchronously destroy them and
* guarantee no new allocations will happen.
*/
-void ib_dealloc_pd(struct ib_pd *pd)
+int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
{
int ret;
- if (pd->local_mr) {
- ret = ib_dereg_mr(pd->local_mr);
+ if (pd->__internal_mr) {
+ ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
WARN_ON(ret);
- pd->local_mr = NULL;
+ pd->__internal_mr = NULL;
}
- /* uverbs manipulates usecnt with proper locking, while the kabi
- requires the caller to guarantee we can't race here. */
- WARN_ON(atomic_read(&pd->usecnt));
+ ret = pd->device->ops.dealloc_pd(pd, udata);
+ if (ret)
+ return ret;
- /* Making delalloc_pd a void return is a WIP, no driver should return
- an error here. */
- ret = pd->device->dealloc_pd(pd);
- WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
+ rdma_restrack_del(&pd->res);
+ kfree(pd);
+ return ret;
}
-EXPORT_SYMBOL(ib_dealloc_pd);
+EXPORT_SYMBOL(ib_dealloc_pd_user);
/* Address handles */
-struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+/**
+ * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
+ * @dest: Pointer to destination ah_attr. Contents of the destination
+ * pointer is assumed to be invalid and attribute are overwritten.
+ * @src: Pointer to source ah_attr.
+ */
+void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
+ const struct rdma_ah_attr *src)
{
+ *dest = *src;
+ if (dest->grh.sgid_attr)
+ rdma_hold_gid_attr(dest->grh.sgid_attr);
+}
+EXPORT_SYMBOL(rdma_copy_ah_attr);
+
+/**
+ * rdma_replace_ah_attr - Replace valid ah_attr with new one.
+ * @old: Pointer to existing ah_attr which needs to be replaced.
+ * old is assumed to be valid or zero'd
+ * @new: Pointer to the new ah_attr.
+ *
+ * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
+ * old the ah_attr is valid; after that it copies the new attribute and holds
+ * the reference to the replaced ah_attr.
+ */
+void rdma_replace_ah_attr(struct rdma_ah_attr *old,
+ const struct rdma_ah_attr *new)
+{
+ rdma_destroy_ah_attr(old);
+ *old = *new;
+ if (old->grh.sgid_attr)
+ rdma_hold_gid_attr(old->grh.sgid_attr);
+}
+EXPORT_SYMBOL(rdma_replace_ah_attr);
+
+/**
+ * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
+ * @dest: Pointer to destination ah_attr to copy to.
+ * dest is assumed to be valid or zero'd
+ * @src: Pointer to the new ah_attr.
+ *
+ * rdma_move_ah_attr() first releases any reference in the destination ah_attr
+ * if it is valid. This also transfers ownership of internal references from
+ * src to dest, making src invalid in the process. No new reference of the src
+ * ah_attr is taken.
+ */
+void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
+{
+ rdma_destroy_ah_attr(dest);
+ *dest = *src;
+ src->grh.sgid_attr = NULL;
+}
+EXPORT_SYMBOL(rdma_move_ah_attr);
+
+/*
+ * Validate that the rdma_ah_attr is valid for the device before passing it
+ * off to the driver.
+ */
+static int rdma_check_ah_attr(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ if (!rdma_is_port_valid(device, ah_attr->port_num))
+ return -EINVAL;
+
+ if ((rdma_is_grh_required(device, ah_attr->port_num) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ return -EINVAL;
+
+ if (ah_attr->grh.sgid_attr) {
+ /*
+ * Make sure the passed sgid_attr is consistent with the
+ * parameters
+ */
+ if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
+ ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
+ * On success the caller is responsible to call rdma_unfill_sgid_attr().
+ */
+static int rdma_fill_sgid_attr(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr **old_sgid_attr)
+{
+ const struct ib_gid_attr *sgid_attr;
+ struct ib_global_route *grh;
+ int ret;
+
+ *old_sgid_attr = ah_attr->grh.sgid_attr;
+
+ ret = rdma_check_ah_attr(device, ah_attr);
+ if (ret)
+ return ret;
+
+ if (!(ah_attr->ah_flags & IB_AH_GRH))
+ return 0;
+
+ grh = rdma_ah_retrieve_grh(ah_attr);
+ if (grh->sgid_attr)
+ return 0;
+
+ sgid_attr =
+ rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ /* Move ownerhip of the kref into the ah_attr */
+ grh->sgid_attr = sgid_attr;
+ return 0;
+}
+
+static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *old_sgid_attr)
+{
+ /*
+ * Fill didn't change anything, the caller retains ownership of
+ * whatever it passed
+ */
+ if (ah_attr->grh.sgid_attr == old_sgid_attr)
+ return;
+
+ /*
+ * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
+ * doesn't see any change in the rdma_ah_attr. If we get here
+ * old_sgid_attr is NULL.
+ */
+ rdma_destroy_ah_attr(ah_attr);
+}
+
+static const struct ib_gid_attr *
+rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *old_attr)
+{
+ if (old_attr)
+ rdma_put_gid_attr(old_attr);
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
+ return ah_attr->grh.sgid_attr;
+ }
+ return NULL;
+}
+
+static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ u32 flags,
+ struct ib_udata *udata,
+ struct net_device *xmit_slave)
+{
+ struct rdma_ah_init_attr init_attr = {};
+ struct ib_device *device = pd->device;
struct ib_ah *ah;
+ int ret;
- ah = pd->device->create_ah(pd, ah_attr);
+ might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
- if (!IS_ERR(ah)) {
- ah->device = pd->device;
- ah->pd = pd;
- ah->uobject = NULL;
- atomic_inc(&pd->usecnt);
+ if (!udata && !device->ops.create_ah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ah = rdma_zalloc_drv_obj_gfp(
+ device, ib_ah,
+ (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ah->device = device;
+ ah->pd = pd;
+ ah->type = ah_attr->type;
+ ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
+ init_attr.ah_attr = ah_attr;
+ init_attr.flags = flags;
+ init_attr.xmit_slave = xmit_slave;
+
+ if (udata)
+ ret = device->ops.create_user_ah(ah, &init_attr, udata);
+ else
+ ret = device->ops.create_ah(ah, &init_attr, NULL);
+ if (ret) {
+ if (ah->sgid_attr)
+ rdma_put_gid_attr(ah->sgid_attr);
+ kfree(ah);
+ return ERR_PTR(ret);
+ }
+
+ atomic_inc(&pd->usecnt);
+ return ah;
+}
+
+/**
+ * rdma_create_ah - Creates an address handle for the
+ * given address vector.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ * @flags: Create address handle flags (see enum rdma_create_ah_flags).
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
+ u32 flags)
+{
+ const struct ib_gid_attr *old_sgid_attr;
+ struct net_device *slave;
+ struct ib_ah *ah;
+ int ret;
+
+ ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
+ if (ret)
+ return ERR_PTR(ret);
+ slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
+ (flags & RDMA_CREATE_AH_SLEEPABLE) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (IS_ERR(slave)) {
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ERR_CAST(slave);
+ }
+ ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
+ rdma_lag_put_ah_roce_slave(slave);
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ah;
+}
+EXPORT_SYMBOL(rdma_create_ah);
+
+/**
+ * rdma_create_user_ah - Creates an address handle for the
+ * given address vector.
+ * It resolves destination mac address for ah attribute of RoCE type.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ * @udata: pointer to user's input output buffer information need by
+ * provider driver.
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata)
+{
+ const struct ib_gid_attr *old_sgid_attr;
+ struct ib_ah *ah;
+ int err;
+
+ err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
+ if (err)
+ return ERR_PTR(err);
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ err = ib_resolve_eth_dmac(pd->device, ah_attr);
+ if (err) {
+ ah = ERR_PTR(err);
+ goto out;
+ }
}
+ ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
+ udata, NULL);
+
+out:
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
return ah;
}
-EXPORT_SYMBOL(ib_create_ah);
+EXPORT_SYMBOL(rdma_create_user_ah);
+
+int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
+{
+ const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
+ struct iphdr ip4h_checked;
+ const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
+
+ /* If it's IPv6, the version must be 6, otherwise, the first
+ * 20 bytes (before the IPv4 header) are garbled.
+ */
+ if (ip6h->version != 6)
+ return (ip4h->version == 4) ? 4 : 0;
+ /* version may be 6 or 4 because the first 20 bytes could be garbled */
+
+ /* RoCE v2 requires no options, thus header length
+ * must be 5 words
+ */
+ if (ip4h->ihl != 5)
+ return 6;
+
+ /* Verify checksum.
+ * We can't write on scattered buffers so we need to copy to
+ * temp buffer.
+ */
+ memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
+ ip4h_checked.check = 0;
+ ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
+ /* if IPv4 header checksum is OK, believe it */
+ if (ip4h->check == ip4h_checked.check)
+ return 4;
+ return 6;
+}
+EXPORT_SYMBOL(ib_get_rdma_header_version);
+
+static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
+ u32 port_num,
+ const struct ib_grh *grh)
+{
+ int grh_version;
+
+ if (rdma_protocol_ib(device, port_num))
+ return RDMA_NETWORK_IB;
+
+ grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
+
+ if (grh_version == 4)
+ return RDMA_NETWORK_IPV4;
+
+ if (grh->next_hdr == IPPROTO_UDP)
+ return RDMA_NETWORK_IPV6;
+
+ return RDMA_NETWORK_ROCE_V1;
+}
+
+struct find_gid_index_context {
+ u16 vlan_id;
+ enum ib_gid_type gid_type;
+};
+
+static bool find_gid_index(const union ib_gid *gid,
+ const struct ib_gid_attr *gid_attr,
+ void *context)
+{
+ struct find_gid_index_context *ctx = context;
+ u16 vlan_id = 0xffff;
+ int ret;
+
+ if (ctx->gid_type != gid_attr->gid_type)
+ return false;
+
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+ if (ret)
+ return false;
+
+ return ctx->vlan_id == vlan_id;
+}
+
+static const struct ib_gid_attr *
+get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
+ u16 vlan_id, const union ib_gid *sgid,
+ enum ib_gid_type gid_type)
+{
+ struct find_gid_index_context context = {.vlan_id = vlan_id,
+ .gid_type = gid_type};
+
+ return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
+ &context);
+}
+
+int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
+ enum rdma_network_type net_type,
+ union ib_gid *sgid, union ib_gid *dgid)
+{
+ struct sockaddr_in src_in;
+ struct sockaddr_in dst_in;
+ __be32 src_saddr, dst_saddr;
+
+ if (!sgid || !dgid)
+ return -EINVAL;
+
+ if (net_type == RDMA_NETWORK_IPV4) {
+ memcpy(&src_in.sin_addr.s_addr,
+ &hdr->roce4grh.saddr, 4);
+ memcpy(&dst_in.sin_addr.s_addr,
+ &hdr->roce4grh.daddr, 4);
+ src_saddr = src_in.sin_addr.s_addr;
+ dst_saddr = dst_in.sin_addr.s_addr;
+ ipv6_addr_set_v4mapped(src_saddr,
+ (struct in6_addr *)sgid);
+ ipv6_addr_set_v4mapped(dst_saddr,
+ (struct in6_addr *)dgid);
+ return 0;
+ } else if (net_type == RDMA_NETWORK_IPV6 ||
+ net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
+ *dgid = hdr->ibgrh.dgid;
+ *sgid = hdr->ibgrh.sgid;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+
+/* Resolve destination mac address and hop limit for unicast destination
+ * GID entry, considering the source GID entry as well.
+ * ah_attribute must have valid port_num, sgid_index.
+ */
+static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
+ const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
+ int hop_limit = 0xff;
+ int ret = 0;
+
+ /* If destination is link local and source GID is RoCEv1,
+ * IP stack is not used.
+ */
+ if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
+ sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
+ rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
+ ah_attr->roce.dmac);
+ return ret;
+ }
+
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
+ ah_attr->roce.dmac,
+ sgid_attr, &hop_limit);
+
+ grh->hop_limit = hop_limit;
+ return ret;
+}
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct ib_ah_attr *ah_attr)
+/*
+ * This function initializes address handle attributes from the incoming packet.
+ * Incoming packet has dgid of the receiver node on which this code is
+ * getting executed and, sgid contains the GID of the sender.
+ *
+ * When resolving mac address of destination, the arrived dgid is used
+ * as sgid and, sgid is used as dgid because sgid contains destinations
+ * GID whom to respond to.
+ *
+ * On success the caller is responsible to call rdma_destroy_ah_attr on the
+ * attr.
+ */
+int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr)
{
u32 flow_class;
- u16 gid_index;
int ret;
+ enum rdma_network_type net_type = RDMA_NETWORK_IB;
+ enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ const struct ib_gid_attr *sgid_attr;
+ int hoplimit = 0xff;
+ union ib_gid dgid;
+ union ib_gid sgid;
+
+ might_sleep();
memset(ah_attr, 0, sizeof *ah_attr);
+ ah_attr->type = rdma_ah_find_type(device, port_num);
if (rdma_cap_eth_ah(device, port_num)) {
+ if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
+ net_type = wc->network_hdr_type;
+ else
+ net_type = ib_get_net_type_by_grh(device, port_num, grh);
+ gid_type = ib_network_to_gid_type(net_type);
+ }
+ ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+ &sgid, &dgid);
+ if (ret)
+ return ret;
+
+ rdma_ah_set_sl(ah_attr, wc->sl);
+ rdma_ah_set_port_num(ah_attr, port_num);
+
+ if (rdma_protocol_roce(device, port_num)) {
+ u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
+ wc->vlan_id : 0xffff;
+
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
- if (wc->wc_flags & IB_WC_WITH_SMAC &&
- wc->wc_flags & IB_WC_WITH_VLAN) {
- memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
- ah_attr->vlan_id = wc->vlan_id;
- } else {
- ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
- ah_attr->dmac, &ah_attr->vlan_id);
- if (ret)
- return ret;
- }
- } else {
- ah_attr->vlan_id = 0xffff;
- }
+ sgid_attr = get_sgid_attr_from_eth(device, port_num,
+ vlan_id, &dgid,
+ gid_type);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
- ah_attr->dlid = wc->slid;
- ah_attr->sl = wc->sl;
- ah_attr->src_path_bits = wc->dlid_path_bits;
- ah_attr->port_num = port_num;
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_move_grh_sgid_attr(ah_attr,
+ &sgid,
+ flow_class & 0xFFFFF,
+ hoplimit,
+ (flow_class >> 20) & 0xFF,
+ sgid_attr);
+
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ if (ret)
+ rdma_destroy_ah_attr(ah_attr);
- if (wc->wc_flags & IB_WC_GRH) {
- ah_attr->ah_flags = IB_AH_GRH;
- ah_attr->grh.dgid = grh->sgid;
+ return ret;
+ } else {
+ rdma_ah_set_dlid(ah_attr, wc->slid);
+ rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
- &gid_index);
- if (ret)
- return ret;
+ if ((wc->wc_flags & IB_WC_GRH) == 0)
+ return 0;
+
+ if (dgid.global.interface_id !=
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+ sgid_attr = rdma_find_gid_by_port(
+ device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
+ } else
+ sgid_attr = rdma_get_gid_attr(device, port_num, 0);
- ah_attr->grh.sgid_index = (u8) gid_index;
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
flow_class = be32_to_cpu(grh->version_tclass_flow);
- ah_attr->grh.flow_label = flow_class & 0xFFFFF;
- ah_attr->grh.hop_limit = 0xFF;
- ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
+ rdma_move_grh_sgid_attr(ah_attr,
+ &sgid,
+ flow_class & 0xFFFFF,
+ hoplimit,
+ (flow_class >> 20) & 0xFF,
+ sgid_attr);
+
+ return 0;
}
- return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_wc);
+EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
+
+/**
+ * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
+ * of the reference
+ *
+ * @attr: Pointer to AH attribute structure
+ * @dgid: Destination GID
+ * @flow_label: Flow label
+ * @hop_limit: Hop limit
+ * @traffic_class: traffic class
+ * @sgid_attr: Pointer to SGID attribute
+ *
+ * This takes ownership of the sgid_attr reference. The caller must ensure
+ * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
+ * calling this function.
+ */
+void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
+ u32 flow_label, u8 hop_limit, u8 traffic_class,
+ const struct ib_gid_attr *sgid_attr)
+{
+ rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
+ traffic_class);
+ attr->grh.sgid_attr = sgid_attr;
+}
+EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
+
+/**
+ * rdma_destroy_ah_attr - Release reference to SGID attribute of
+ * ah attribute.
+ * @ah_attr: Pointer to ah attribute
+ *
+ * Release reference to the SGID attribute of the ah attribute if it is
+ * non NULL. It is safe to call this multiple times, and safe to call it on
+ * a zero initialized ah_attr.
+ */
+void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
+{
+ if (ah_attr->grh.sgid_attr) {
+ rdma_put_gid_attr(ah_attr->grh.sgid_attr);
+ ah_attr->grh.sgid_attr = NULL;
+ }
+}
+EXPORT_SYMBOL(rdma_destroy_ah_attr);
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
- const struct ib_grh *grh, u8 port_num)
+ const struct ib_grh *grh, u32 port_num)
{
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
+ struct ib_ah *ah;
int ret;
- ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
+ ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
if (ret)
return ERR_PTR(ret);
- return ib_create_ah(pd, &ah_attr);
+ ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
+
+ rdma_destroy_ah_attr(&ah_attr);
+ return ah;
}
EXPORT_SYMBOL(ib_create_ah_from_wc);
-int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
- return ah->device->modify_ah ?
- ah->device->modify_ah(ah, ah_attr) :
- -ENOSYS;
+ const struct ib_gid_attr *old_sgid_attr;
+ int ret;
+
+ if (ah->type != ah_attr->type)
+ return -EINVAL;
+
+ ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
+ if (ret)
+ return ret;
+
+ ret = ah->device->ops.modify_ah ?
+ ah->device->ops.modify_ah(ah, ah_attr) :
+ -EOPNOTSUPP;
+
+ ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ret;
}
-EXPORT_SYMBOL(ib_modify_ah);
+EXPORT_SYMBOL(rdma_modify_ah);
-int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
- return ah->device->query_ah ?
- ah->device->query_ah(ah, ah_attr) :
- -ENOSYS;
+ ah_attr->grh.sgid_attr = NULL;
+
+ return ah->device->ops.query_ah ?
+ ah->device->ops.query_ah(ah, ah_attr) :
+ -EOPNOTSUPP;
}
-EXPORT_SYMBOL(ib_query_ah);
+EXPORT_SYMBOL(rdma_query_ah);
-int ib_destroy_ah(struct ib_ah *ah)
+int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{
+ const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
struct ib_pd *pd;
int ret;
+ might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
+
pd = ah->pd;
- ret = ah->device->destroy_ah(ah);
- if (!ret)
- atomic_dec(&pd->usecnt);
+ ret = ah->device->ops.destroy_ah(ah, flags);
+ if (ret)
+ return ret;
+
+ atomic_dec(&pd->usecnt);
+ if (sgid_attr)
+ rdma_put_gid_attr(sgid_attr);
+
+ kfree(ah);
return ret;
}
-EXPORT_SYMBOL(ib_destroy_ah);
+EXPORT_SYMBOL(rdma_destroy_ah_user);
/* Shared receive queues */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr)
+/**
+ * ib_create_srq_user - Creates a SRQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the
+ * SRQ. If SRQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created SRQ.
+ * @uobject: uobject pointer if this is not a kernel SRQ
+ * @udata: udata pointer if this is not a kernel SRQ
+ *
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the
+ * requested size of the SRQ, and set to the actual values allocated
+ * on return. If ib_create_srq() succeeds, then max_wr and max_sge
+ * will always be at least as large as the requested values.
+ */
+struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_usrq_object *uobject,
+ struct ib_udata *udata)
{
struct ib_srq *srq;
+ int ret;
+
+ srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ srq->device = pd->device;
+ srq->pd = pd;
+ srq->event_handler = srq_init_attr->event_handler;
+ srq->srq_context = srq_init_attr->srq_context;
+ srq->srq_type = srq_init_attr->srq_type;
+ srq->uobject = uobject;
- if (!pd->device->create_srq)
- return ERR_PTR(-ENOSYS);
-
- srq = pd->device->create_srq(pd, srq_init_attr, NULL);
-
- if (!IS_ERR(srq)) {
- srq->device = pd->device;
- srq->pd = pd;
- srq->uobject = NULL;
- srq->event_handler = srq_init_attr->event_handler;
- srq->srq_context = srq_init_attr->srq_context;
- srq->srq_type = srq_init_attr->srq_type;
- if (srq->srq_type == IB_SRQT_XRC) {
- srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
- srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
+ if (ib_srq_has_cq(srq->srq_type)) {
+ srq->ext.cq = srq_init_attr->ext.cq;
+ atomic_inc(&srq->ext.cq->usecnt);
+ }
+ if (srq->srq_type == IB_SRQT_XRC) {
+ srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
+ if (srq->ext.xrc.xrcd)
atomic_inc(&srq->ext.xrc.xrcd->usecnt);
- atomic_inc(&srq->ext.xrc.cq->usecnt);
- }
- atomic_inc(&pd->usecnt);
- atomic_set(&srq->usecnt, 0);
}
+ atomic_inc(&pd->usecnt);
+
+ rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
+ rdma_restrack_parent_name(&srq->res, &pd->res);
+
+ ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
+ if (ret) {
+ rdma_restrack_put(&srq->res);
+ atomic_dec(&pd->usecnt);
+ if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
+ atomic_dec(&srq->ext.xrc.xrcd->usecnt);
+ if (ib_srq_has_cq(srq->srq_type))
+ atomic_dec(&srq->ext.cq->usecnt);
+ kfree(srq);
+ return ERR_PTR(ret);
+ }
+
+ rdma_restrack_add(&srq->res);
return srq;
}
-EXPORT_SYMBOL(ib_create_srq);
+EXPORT_SYMBOL(ib_create_srq_user);
int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask)
{
- return srq->device->modify_srq ?
- srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
- -ENOSYS;
+ return srq->device->ops.modify_srq ?
+ srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
+ NULL) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_modify_srq);
int ib_query_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr)
{
- return srq->device->query_srq ?
- srq->device->query_srq(srq, srq_attr) : -ENOSYS;
+ return srq->device->ops.query_srq ?
+ srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_query_srq);
-int ib_destroy_srq(struct ib_srq *srq)
+int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
{
- struct ib_pd *pd;
- enum ib_srq_type srq_type;
- struct ib_xrcd *uninitialized_var(xrcd);
- struct ib_cq *uninitialized_var(cq);
int ret;
if (atomic_read(&srq->usecnt))
return -EBUSY;
- pd = srq->pd;
- srq_type = srq->srq_type;
- if (srq_type == IB_SRQT_XRC) {
- xrcd = srq->ext.xrc.xrcd;
- cq = srq->ext.xrc.cq;
- }
+ ret = srq->device->ops.destroy_srq(srq, udata);
+ if (ret)
+ return ret;
- ret = srq->device->destroy_srq(srq);
- if (!ret) {
- atomic_dec(&pd->usecnt);
- if (srq_type == IB_SRQT_XRC) {
- atomic_dec(&xrcd->usecnt);
- atomic_dec(&cq->usecnt);
- }
- }
+ atomic_dec(&srq->pd->usecnt);
+ if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
+ atomic_dec(&srq->ext.xrc.xrcd->usecnt);
+ if (ib_srq_has_cq(srq->srq_type))
+ atomic_dec(&srq->ext.cq->usecnt);
+ rdma_restrack_del(&srq->res);
+ kfree(srq);
return ret;
}
-EXPORT_SYMBOL(ib_destroy_srq);
+EXPORT_SYMBOL(ib_destroy_srq_user);
/* Queue pairs */
+static void __ib_qp_event_handler(struct ib_event *event, void *context)
+{
+ struct ib_qp *qp = event->element.qp;
+
+ if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
+ complete(&qp->srq_completion);
+ if (qp->registered_event_handler)
+ qp->registered_event_handler(event, qp->qp_context);
+}
+
static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
{
struct ib_qp *qp = context;
unsigned long flags;
- spin_lock_irqsave(&qp->device->event_handler_lock, flags);
+ spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
list_for_each_entry(event->element.qp, &qp->open_list, open_list)
if (event->element.qp->event_handler)
event->element.qp->event_handler(event, event->element.qp->qp_context);
- spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
-}
-
-static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
-{
- mutex_lock(&xrcd->tgt_qp_mutex);
- list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
- mutex_unlock(&xrcd->tgt_qp_mutex);
+ spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
}
static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
@@ -512,12 +1129,20 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
{
struct ib_qp *qp;
unsigned long flags;
+ int err;
qp = kzalloc(sizeof *qp, GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->real_qp = real_qp;
+ err = ib_open_shared_qp_security(qp, real_qp->device);
+ if (err) {
+ kfree(qp);
+ return ERR_PTR(err);
+ }
+
+ qp->real_qp = real_qp;
atomic_inc(&real_qp->usecnt);
qp->device = real_qp->device;
qp->event_handler = event_handler;
@@ -525,9 +1150,9 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
qp->qp_num = real_qp->qp_num;
qp->qp_type = real_qp->qp_type;
- spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+ spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
list_add(&qp->open_list, &real_qp->open_list);
- spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+ spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
return qp;
}
@@ -540,86 +1165,235 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
return ERR_PTR(-EINVAL);
- qp = ERR_PTR(-EINVAL);
- mutex_lock(&xrcd->tgt_qp_mutex);
- list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
- if (real_qp->qp_num == qp_open_attr->qp_num) {
- qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
- qp_open_attr->qp_context);
- break;
- }
+ down_read(&xrcd->tgt_qps_rwsem);
+ real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
+ if (!real_qp) {
+ up_read(&xrcd->tgt_qps_rwsem);
+ return ERR_PTR(-EINVAL);
}
- mutex_unlock(&xrcd->tgt_qp_mutex);
+ qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
+ qp_open_attr->qp_context);
+ up_read(&xrcd->tgt_qps_rwsem);
return qp;
}
EXPORT_SYMBOL(ib_open_qp);
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr)
+static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
+ struct ib_qp_init_attr *qp_init_attr)
{
- struct ib_qp *qp, *real_qp;
- struct ib_device *device;
-
- device = pd ? pd->device : qp_init_attr->xrcd->device;
- qp = device->create_qp(pd, qp_init_attr, NULL);
-
- if (!IS_ERR(qp)) {
- qp->device = device;
- qp->real_qp = qp;
- qp->uobject = NULL;
- qp->qp_type = qp_init_attr->qp_type;
-
- atomic_set(&qp->usecnt, 0);
- if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
- qp->event_handler = __ib_shared_qp_event_handler;
- qp->qp_context = qp;
- qp->pd = NULL;
- qp->send_cq = qp->recv_cq = NULL;
- qp->srq = NULL;
- qp->xrcd = qp_init_attr->xrcd;
- atomic_inc(&qp_init_attr->xrcd->usecnt);
- INIT_LIST_HEAD(&qp->open_list);
-
- real_qp = qp;
- qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
- qp_init_attr->qp_context);
- if (!IS_ERR(qp))
- __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
- else
- real_qp->device->destroy_qp(real_qp);
- } else {
- qp->event_handler = qp_init_attr->event_handler;
- qp->qp_context = qp_init_attr->qp_context;
- if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
- qp->recv_cq = NULL;
- qp->srq = NULL;
- } else {
- qp->recv_cq = qp_init_attr->recv_cq;
- atomic_inc(&qp_init_attr->recv_cq->usecnt);
- qp->srq = qp_init_attr->srq;
- if (qp->srq)
- atomic_inc(&qp_init_attr->srq->usecnt);
- }
+ struct ib_qp *real_qp = qp;
+ int err;
+
+ qp->event_handler = __ib_shared_qp_event_handler;
+ qp->qp_context = qp;
+ qp->pd = NULL;
+ qp->send_cq = qp->recv_cq = NULL;
+ qp->srq = NULL;
+ qp->xrcd = qp_init_attr->xrcd;
+ atomic_inc(&qp_init_attr->xrcd->usecnt);
+ INIT_LIST_HEAD(&qp->open_list);
+
+ qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
+ qp_init_attr->qp_context);
+ if (IS_ERR(qp))
+ return qp;
+
+ err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
+ real_qp, GFP_KERNEL));
+ if (err) {
+ ib_close_qp(qp);
+ return ERR_PTR(err);
+ }
+ return qp;
+}
- qp->pd = pd;
- qp->send_cq = qp_init_attr->send_cq;
- qp->xrcd = NULL;
+static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller)
+{
+ struct ib_udata dummy = {};
+ struct ib_qp *qp;
+ int ret;
- atomic_inc(&pd->usecnt);
- atomic_inc(&qp_init_attr->send_cq->usecnt);
- }
+ if (!dev->ops.create_qp)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->device = dev;
+ qp->pd = pd;
+ qp->uobject = uobj;
+ qp->real_qp = qp;
+
+ qp->qp_type = attr->qp_type;
+ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
+ qp->srq = attr->srq;
+ qp->event_handler = __ib_qp_event_handler;
+ qp->registered_event_handler = attr->event_handler;
+ qp->port = attr->port_num;
+ qp->qp_context = attr->qp_context;
+
+ spin_lock_init(&qp->mr_lock);
+ INIT_LIST_HEAD(&qp->rdma_mrs);
+ INIT_LIST_HEAD(&qp->sig_mrs);
+ init_completion(&qp->srq_completion);
+
+ qp->send_cq = attr->send_cq;
+ qp->recv_cq = attr->recv_cq;
+
+ rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
+ WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
+ rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
+ ret = dev->ops.create_qp(qp, attr, udata);
+ if (ret)
+ goto err_create;
+
+ /*
+ * TODO: The mlx4 internally overwrites send_cq and recv_cq.
+ * Unfortunately, it is not an easy task to fix that driver.
+ */
+ qp->send_cq = attr->send_cq;
+ qp->recv_cq = attr->recv_cq;
+
+ ret = ib_create_qp_security(qp, dev);
+ if (ret)
+ goto err_security;
+
+ rdma_restrack_add(&qp->res);
+ return qp;
+
+err_security:
+ qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
+err_create:
+ rdma_restrack_put(&qp->res);
+ kfree(qp);
+ return ERR_PTR(ret);
+
+}
+
+/**
+ * ib_create_qp_user - Creates a QP associated with the specified protection
+ * domain.
+ * @dev: IB device
+ * @pd: The protection domain associated with the QP.
+ * @attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
+ * @udata: User data
+ * @uobj: uverbs obect
+ * @caller: caller's build-time module name
+ */
+struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller)
+{
+ struct ib_qp *qp, *xrc_qp;
+
+ if (attr->qp_type == IB_QPT_XRC_TGT)
+ qp = create_qp(dev, pd, attr, NULL, NULL, caller);
+ else
+ qp = create_qp(dev, pd, attr, udata, uobj, NULL);
+ if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
+ return qp;
+
+ xrc_qp = create_xrc_qp_user(qp, attr);
+ if (IS_ERR(xrc_qp)) {
+ ib_destroy_qp(qp);
+ return xrc_qp;
+ }
+
+ xrc_qp->uobject = uobj;
+ return xrc_qp;
+}
+EXPORT_SYMBOL(ib_create_qp_user);
+
+void ib_qp_usecnt_inc(struct ib_qp *qp)
+{
+ if (qp->pd)
+ atomic_inc(&qp->pd->usecnt);
+ if (qp->send_cq)
+ atomic_inc(&qp->send_cq->usecnt);
+ if (qp->recv_cq)
+ atomic_inc(&qp->recv_cq->usecnt);
+ if (qp->srq)
+ atomic_inc(&qp->srq->usecnt);
+ if (qp->rwq_ind_tbl)
+ atomic_inc(&qp->rwq_ind_tbl->usecnt);
+}
+EXPORT_SYMBOL(ib_qp_usecnt_inc);
+
+void ib_qp_usecnt_dec(struct ib_qp *qp)
+{
+ if (qp->rwq_ind_tbl)
+ atomic_dec(&qp->rwq_ind_tbl->usecnt);
+ if (qp->srq)
+ atomic_dec(&qp->srq->usecnt);
+ if (qp->recv_cq)
+ atomic_dec(&qp->recv_cq->usecnt);
+ if (qp->send_cq)
+ atomic_dec(&qp->send_cq->usecnt);
+ if (qp->pd)
+ atomic_dec(&qp->pd->usecnt);
+}
+EXPORT_SYMBOL(ib_qp_usecnt_dec);
+
+struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller)
+{
+ struct ib_device *device = pd->device;
+ struct ib_qp *qp;
+ int ret;
+
+ /*
+ * If the callers is using the RDMA API calculate the resources
+ * needed for the RDMA READ/WRITE operations.
+ *
+ * Note that these callers need to pass in a port number.
+ */
+ if (qp_init_attr->cap.max_rdma_ctxs)
+ rdma_rw_init_qp(device, qp_init_attr);
+
+ qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
+ if (IS_ERR(qp))
+ return qp;
+
+ ib_qp_usecnt_inc(qp);
+
+ if (qp_init_attr->cap.max_rdma_ctxs) {
+ ret = rdma_rw_init_mrs(qp, qp_init_attr);
+ if (ret)
+ goto err;
}
+ /*
+ * Note: all hw drivers guarantee that max_send_sge is lower than
+ * the device RDMA WRITE SGE limit but not all hw drivers ensure that
+ * max_send_sge <= max_sge_rd.
+ */
+ qp->max_write_sge = qp_init_attr->cap.max_send_sge;
+ qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
+ device->attrs.max_sge_rd);
+ if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
+ qp->integrity_en = true;
+
return qp;
+
+err:
+ ib_destroy_qp(qp);
+ return ERR_PTR(ret);
+
}
-EXPORT_SYMBOL(ib_create_qp);
+EXPORT_SYMBOL(ib_create_qp_kernel);
static const struct {
int valid;
enum ib_qp_attr_mask req_param[IB_QPT_MAX];
- enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
- enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
@@ -700,12 +1474,6 @@ static const struct {
IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER),
},
- .req_param_add_eth = {
- [IB_QPT_RC] = (IB_QP_SMAC),
- [IB_QPT_UC] = (IB_QP_SMAC),
- [IB_QPT_XRC_INI] = (IB_QP_SMAC),
- [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
- },
.opt_param = {
[IB_QPT_UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
@@ -726,21 +1494,7 @@ static const struct {
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
},
- .opt_param_add_eth = {
- [IB_QPT_RC] = (IB_QP_ALT_SMAC |
- IB_QP_VID |
- IB_QP_ALT_VID),
- [IB_QPT_UC] = (IB_QP_ALT_SMAC |
- IB_QP_VID |
- IB_QP_ALT_VID),
- [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
- IB_QP_VID |
- IB_QP_ALT_VID),
- [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
- IB_QP_VID |
- IB_QP_ALT_VID)
- }
- }
+ },
},
[IB_QPS_RTR] = {
[IB_QPS_RESET] = { .valid = 1 },
@@ -790,6 +1544,7 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
}
}
},
@@ -823,6 +1578,7 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
}
},
[IB_QPS_SQD] = {
@@ -941,91 +1697,328 @@ static const struct {
}
};
-int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll)
+bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
+ enum ib_qp_type type, enum ib_qp_attr_mask mask)
{
enum ib_qp_attr_mask req_param, opt_param;
- if (cur_state < 0 || cur_state > IB_QPS_ERR ||
- next_state < 0 || next_state > IB_QPS_ERR)
- return 0;
-
if (mask & IB_QP_CUR_STATE &&
cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
- return 0;
+ return false;
if (!qp_state_table[cur_state][next_state].valid)
- return 0;
+ return false;
req_param = qp_state_table[cur_state][next_state].req_param[type];
opt_param = qp_state_table[cur_state][next_state].opt_param[type];
- if (ll == IB_LINK_LAYER_ETHERNET) {
- req_param |= qp_state_table[cur_state][next_state].
- req_param_add_eth[type];
- opt_param |= qp_state_table[cur_state][next_state].
- opt_param_add_eth[type];
- }
-
if ((mask & req_param) != req_param)
- return 0;
+ return false;
if (mask & ~(req_param | opt_param | IB_QP_STATE))
- return 0;
+ return false;
- return 1;
+ return true;
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
-int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr, int *qp_attr_mask)
+/**
+ * ib_resolve_eth_dmac - Resolve destination mac address
+ * @device: Device to consider
+ * @ah_attr: address handle attribute which describes the
+ * source and destination parameters
+ * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
+ * returns 0 on success or appropriate error code. It initializes the
+ * necessary ah_attr fields when call is successful.
+ */
+static int ib_resolve_eth_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ int ret = 0;
+
+ if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+ __be32 addr = 0;
+
+ memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
+ ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
+ } else {
+ ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
+ (char *)ah_attr->roce.dmac);
+ }
+ } else {
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ }
+ return ret;
+}
+
+static bool is_qp_type_connected(const struct ib_qp *qp)
{
- int ret = 0;
- union ib_gid sgid;
+ return (qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_RC ||
+ qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT);
+}
+
+/*
+ * IB core internal function to perform QP attributes modification.
+ */
+static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ const struct ib_gid_attr *old_sgid_attr_av;
+ const struct ib_gid_attr *old_sgid_attr_alt_av;
+ int ret;
- if ((*qp_attr_mask & IB_QP_AV) &&
- (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) {
- ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
- qp_attr->ah_attr.grh.sgid_index, &sgid);
+ attr->xmit_slave = NULL;
+ if (attr_mask & IB_QP_AV) {
+ ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
+ &old_sgid_attr_av);
if (ret)
+ return ret;
+
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
+ struct net_device *slave;
+
+ /*
+ * If the user provided the qp_attr then we have to
+ * resolve it. Kerne users have to provide already
+ * resolved rdma_ah_attr's.
+ */
+ if (udata) {
+ ret = ib_resolve_eth_dmac(qp->device,
+ &attr->ah_attr);
+ if (ret)
+ goto out_av;
+ }
+ slave = rdma_lag_get_ah_roce_slave(qp->device,
+ &attr->ah_attr,
+ GFP_KERNEL);
+ if (IS_ERR(slave)) {
+ ret = PTR_ERR(slave);
+ goto out_av;
+ }
+ attr->xmit_slave = slave;
+ }
+ }
+ if (attr_mask & IB_QP_ALT_PATH) {
+ /*
+ * FIXME: This does not track the migration state, so if the
+ * user loads a new alternate path after the HW has migrated
+ * from primary->alternate we will keep the wrong
+ * references. This is OK for IB because the reference
+ * counting does not serve any functional purpose.
+ */
+ ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
+ &old_sgid_attr_alt_av);
+ if (ret)
+ goto out_av;
+
+ /*
+ * Today the core code can only handle alternate paths and APM
+ * for IB. Ban them in roce mode.
+ */
+ if (!(rdma_protocol_ib(qp->device,
+ attr->alt_ah_attr.port_num) &&
+ rdma_protocol_ib(qp->device, port))) {
+ ret = -EINVAL;
goto out;
- if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
- rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
- rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
- if (!(*qp_attr_mask & IB_QP_VID))
- qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
- } else {
- ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
- qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
- if (ret)
- goto out;
- ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
- if (ret)
- goto out;
}
- *qp_attr_mask |= IB_QP_SMAC;
- if (qp_attr->vlan_id < 0xFFFF)
- *qp_attr_mask |= IB_QP_VID;
}
+
+ if (rdma_ib_or_roce(qp->device, port)) {
+ if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
+ dev_warn(&qp->device->dev,
+ "%s rq_psn overflow, masking to 24 bits\n",
+ __func__);
+ attr->rq_psn &= 0xffffff;
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
+ dev_warn(&qp->device->dev,
+ " %s sq_psn overflow, masking to 24 bits\n",
+ __func__);
+ attr->sq_psn &= 0xffffff;
+ }
+ }
+
+ /*
+ * Bind this qp to a counter automatically based on the rdma counter
+ * rules. This only set in RST2INIT with port specified
+ */
+ if (!qp->counter && (attr_mask & IB_QP_PORT) &&
+ ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
+ rdma_counter_bind_qp_auto(qp, attr->port_num);
+
+ ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
+ if (ret)
+ goto out;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port = attr->port_num;
+ if (attr_mask & IB_QP_AV)
+ qp->av_sgid_attr =
+ rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
+ if (attr_mask & IB_QP_ALT_PATH)
+ qp->alt_path_sgid_attr = rdma_update_sgid_attr(
+ &attr->alt_ah_attr, qp->alt_path_sgid_attr);
+
out:
+ if (attr_mask & IB_QP_ALT_PATH)
+ rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
+out_av:
+ if (attr_mask & IB_QP_AV) {
+ rdma_lag_put_ah_roce_slave(attr->xmit_slave);
+ rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
+ }
return ret;
}
-EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
+/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @ib_qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ * are being modified.
+ * @udata: pointer to user's input output buffer information
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
+}
+EXPORT_SYMBOL(ib_modify_qp_with_udata);
+
+static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
+ u16 *speed, u8 *width)
+{
+ if (!lanes) {
+ if (netdev_speed <= SPEED_1000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_SDR;
+ } else if (netdev_speed <= SPEED_10000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_20000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_DDR;
+ } else if (netdev_speed <= SPEED_25000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_40000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_50000) {
+ *width = IB_WIDTH_2X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_100000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_200000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_HDR;
+ } else {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_NDR;
+ }
+
+ return;
+ }
+
+ switch (lanes) {
+ case 1:
+ *width = IB_WIDTH_1X;
+ break;
+ case 2:
+ *width = IB_WIDTH_2X;
+ break;
+ case 4:
+ *width = IB_WIDTH_4X;
+ break;
+ case 8:
+ *width = IB_WIDTH_8X;
+ break;
+ case 12:
+ *width = IB_WIDTH_12X;
+ break;
+ default:
+ *width = IB_WIDTH_1X;
+ }
+
+ switch (netdev_speed / lanes) {
+ case SPEED_2500:
+ *speed = IB_SPEED_SDR;
+ break;
+ case SPEED_5000:
+ *speed = IB_SPEED_DDR;
+ break;
+ case SPEED_10000:
+ *speed = IB_SPEED_FDR10;
+ break;
+ case SPEED_14000:
+ *speed = IB_SPEED_FDR;
+ break;
+ case SPEED_25000:
+ *speed = IB_SPEED_EDR;
+ break;
+ case SPEED_50000:
+ *speed = IB_SPEED_HDR;
+ break;
+ case SPEED_100000:
+ *speed = IB_SPEED_NDR;
+ break;
+ default:
+ *speed = IB_SPEED_SDR;
+ }
+}
+
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
+{
+ int rc;
+ u32 netdev_speed;
+ struct net_device *netdev;
+ struct ethtool_link_ksettings lksettings = {};
+
+ if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
+ return -EINVAL;
+
+ netdev = ib_device_get_netdev(dev, port_num);
+ if (!netdev)
+ return -ENODEV;
+
+ rtnl_lock();
+ rc = __ethtool_get_link_ksettings(netdev, &lksettings);
+ rtnl_unlock();
+
+ dev_put(netdev);
+
+ if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
+ netdev_speed = lksettings.base.speed;
+ } else {
+ netdev_speed = SPEED_1000;
+ if (rc)
+ pr_warn("%s speed is unknown, defaulting to %u\n",
+ netdev->name, netdev_speed);
+ }
+
+ ib_get_width_and_speed(netdev_speed, lksettings.lanes,
+ speed, width);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_get_eth_speed);
int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- int ret;
-
- ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
- if (ret)
- return ret;
-
- return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
+ return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
@@ -1034,9 +2027,12 @@ int ib_query_qp(struct ib_qp *qp,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
- return qp->device->query_qp ?
- qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
- -ENOSYS;
+ qp_attr->ah_attr.grh.sgid_attr = NULL;
+ qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
+
+ return qp->device->ops.query_qp ?
+ qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
+ qp_init_attr) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_query_qp);
@@ -1049,11 +2045,13 @@ int ib_close_qp(struct ib_qp *qp)
if (real_qp == qp)
return -EINVAL;
- spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+ spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
list_del(&qp->open_list);
- spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+ spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
atomic_dec(&real_qp->usecnt);
+ if (qp->qp_sec)
+ ib_close_shared_qp_security(qp->qp_sec);
kfree(qp);
return 0;
@@ -1068,156 +2066,221 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
real_qp = qp->real_qp;
xrcd = real_qp->xrcd;
-
- mutex_lock(&xrcd->tgt_qp_mutex);
+ down_write(&xrcd->tgt_qps_rwsem);
ib_close_qp(qp);
if (atomic_read(&real_qp->usecnt) == 0)
- list_del(&real_qp->xrcd_list);
+ xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
else
real_qp = NULL;
- mutex_unlock(&xrcd->tgt_qp_mutex);
+ up_write(&xrcd->tgt_qps_rwsem);
if (real_qp) {
ret = ib_destroy_qp(real_qp);
if (!ret)
atomic_dec(&xrcd->usecnt);
- else
- __ib_insert_xrcd_qp(xrcd, real_qp);
}
return 0;
}
-int ib_destroy_qp(struct ib_qp *qp)
+int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
{
- struct ib_pd *pd;
- struct ib_cq *scq, *rcq;
- struct ib_srq *srq;
+ const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
+ const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
+ struct ib_qp_security *sec;
int ret;
+ WARN_ON_ONCE(qp->mrs_used > 0);
+
if (atomic_read(&qp->usecnt))
return -EBUSY;
if (qp->real_qp != qp)
return __ib_destroy_shared_qp(qp);
- pd = qp->pd;
- scq = qp->send_cq;
- rcq = qp->recv_cq;
- srq = qp->srq;
+ sec = qp->qp_sec;
+ if (sec)
+ ib_destroy_qp_security_begin(sec);
- ret = qp->device->destroy_qp(qp);
- if (!ret) {
- if (pd)
- atomic_dec(&pd->usecnt);
- if (scq)
- atomic_dec(&scq->usecnt);
- if (rcq)
- atomic_dec(&rcq->usecnt);
- if (srq)
- atomic_dec(&srq->usecnt);
+ if (!qp->uobject)
+ rdma_rw_cleanup_mrs(qp);
+
+ rdma_counter_unbind_qp(qp, qp->port, true);
+ ret = qp->device->ops.destroy_qp(qp, udata);
+ if (ret) {
+ if (sec)
+ ib_destroy_qp_security_abort(sec);
+ return ret;
}
+ if (alt_path_sgid_attr)
+ rdma_put_gid_attr(alt_path_sgid_attr);
+ if (av_sgid_attr)
+ rdma_put_gid_attr(av_sgid_attr);
+
+ ib_qp_usecnt_dec(qp);
+ if (sec)
+ ib_destroy_qp_security_end(sec);
+
+ rdma_restrack_del(&qp->res);
+ kfree(qp);
return ret;
}
-EXPORT_SYMBOL(ib_destroy_qp);
+EXPORT_SYMBOL(ib_destroy_qp_user);
/* Completion queues */
-struct ib_cq *ib_create_cq(struct ib_device *device,
- ib_comp_handler comp_handler,
- void (*event_handler)(struct ib_event *, void *),
- void *cq_context,
- const struct ib_cq_init_attr *cq_attr)
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+ ib_comp_handler comp_handler,
+ void (*event_handler)(struct ib_event *, void *),
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr,
+ const char *caller)
{
struct ib_cq *cq;
+ int ret;
+
+ cq = rdma_zalloc_drv_obj(device, ib_cq);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ cq->device = device;
+ cq->uobject = NULL;
+ cq->comp_handler = comp_handler;
+ cq->event_handler = event_handler;
+ cq->cq_context = cq_context;
+ atomic_set(&cq->usecnt, 0);
- cq = device->create_cq(device, cq_attr, NULL, NULL);
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, caller);
- if (!IS_ERR(cq)) {
- cq->device = device;
- cq->uobject = NULL;
- cq->comp_handler = comp_handler;
- cq->event_handler = event_handler;
- cq->cq_context = cq_context;
- atomic_set(&cq->usecnt, 0);
+ ret = device->ops.create_cq(cq, cq_attr, NULL);
+ if (ret) {
+ rdma_restrack_put(&cq->res);
+ kfree(cq);
+ return ERR_PTR(ret);
}
+ rdma_restrack_add(&cq->res);
return cq;
}
-EXPORT_SYMBOL(ib_create_cq);
+EXPORT_SYMBOL(__ib_create_cq);
-int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
- return cq->device->modify_cq ?
- cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
+ return cq->device->ops.modify_cq ?
+ cq->device->ops.modify_cq(cq, cq_count,
+ cq_period) : -EOPNOTSUPP;
}
-EXPORT_SYMBOL(ib_modify_cq);
+EXPORT_SYMBOL(rdma_set_cq_moderation);
-int ib_destroy_cq(struct ib_cq *cq)
+int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
+ int ret;
+
+ if (WARN_ON_ONCE(cq->shared))
+ return -EOPNOTSUPP;
+
if (atomic_read(&cq->usecnt))
return -EBUSY;
- return cq->device->destroy_cq(cq);
+ ret = cq->device->ops.destroy_cq(cq, udata);
+ if (ret)
+ return ret;
+
+ rdma_restrack_del(&cq->res);
+ kfree(cq);
+ return ret;
}
-EXPORT_SYMBOL(ib_destroy_cq);
+EXPORT_SYMBOL(ib_destroy_cq_user);
int ib_resize_cq(struct ib_cq *cq, int cqe)
{
- return cq->device->resize_cq ?
- cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
+ return cq->device->ops.resize_cq ?
+ cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
+struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags)
{
struct ib_mr *mr;
- int err;
- err = ib_check_mr_access(mr_access_flags);
- if (err)
- return ERR_PTR(err);
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ if (!(pd->device->attrs.kernel_cap_flags &
+ IBK_ON_DEMAND_PAGING)) {
+ pr_debug("ODP support not available\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
- mr = pd->device->get_dma_mr(pd, mr_access_flags);
+ mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
+ access_flags, NULL, NULL);
- if (!IS_ERR(mr)) {
- mr->device = pd->device;
- mr->pd = pd;
- mr->uobject = NULL;
- atomic_inc(&pd->usecnt);
- atomic_set(&mr->usecnt, 0);
- }
+ if (IS_ERR(mr))
+ return mr;
+
+ mr->device = pd->device;
+ mr->type = IB_MR_TYPE_USER;
+ mr->pd = pd;
+ mr->dm = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->iova = virt_addr;
+ mr->length = length;
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_parent_name(&mr->res, &pd->res);
+ rdma_restrack_add(&mr->res);
return mr;
}
-EXPORT_SYMBOL(ib_get_dma_mr);
+EXPORT_SYMBOL(ib_reg_user_mr);
-int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
+int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
- return mr->device->query_mr ?
- mr->device->query_mr(mr, mr_attr) : -ENOSYS;
+ if (!pd->device->ops.advise_mr)
+ return -EOPNOTSUPP;
+
+ if (!num_sge)
+ return 0;
+
+ return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
+ NULL);
}
-EXPORT_SYMBOL(ib_query_mr);
+EXPORT_SYMBOL(ib_advise_mr);
-int ib_dereg_mr(struct ib_mr *mr)
+int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{
- struct ib_pd *pd;
+ struct ib_pd *pd = mr->pd;
+ struct ib_dm *dm = mr->dm;
+ struct ib_dmah *dmah = mr->dmah;
+ struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
int ret;
- if (atomic_read(&mr->usecnt))
- return -EBUSY;
-
- pd = mr->pd;
- ret = mr->device->dereg_mr(mr);
- if (!ret)
+ trace_mr_dereg(mr);
+ rdma_restrack_del(&mr->res);
+ ret = mr->device->ops.dereg_mr(mr, udata);
+ if (!ret) {
atomic_dec(&pd->usecnt);
+ if (dm)
+ atomic_dec(&dm->usecnt);
+ if (dmah)
+ atomic_dec(&dmah->usecnt);
+ kfree(sig_attrs);
+ }
return ret;
}
-EXPORT_SYMBOL(ib_dereg_mr);
+EXPORT_SYMBOL(ib_dereg_mr_user);
/**
* ib_alloc_mr() - Allocates a memory region
@@ -1231,241 +2294,891 @@ EXPORT_SYMBOL(ib_dereg_mr);
* max_num_sg * used_page_size.
*
*/
-struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
struct ib_mr *mr;
- if (!pd->device->alloc_mr)
- return ERR_PTR(-ENOSYS);
+ if (!pd->device->ops.alloc_mr) {
+ mr = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
- mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
- if (!IS_ERR(mr)) {
- mr->device = pd->device;
- mr->pd = pd;
- mr->uobject = NULL;
- atomic_inc(&pd->usecnt);
- atomic_set(&mr->usecnt, 0);
+ if (mr_type == IB_MR_TYPE_INTEGRITY) {
+ WARN_ON_ONCE(1);
+ mr = ERR_PTR(-EINVAL);
+ goto out;
}
+ mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
+ if (IS_ERR(mr))
+ goto out;
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->dm = NULL;
+ mr->uobject = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
+ mr->type = mr_type;
+ mr->sig_attrs = NULL;
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_parent_name(&mr->res, &pd->res);
+ rdma_restrack_add(&mr->res);
+out:
+ trace_mr_alloc(pd, mr_type, max_num_sg, mr);
return mr;
}
EXPORT_SYMBOL(ib_alloc_mr);
-struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
- int max_page_list_len)
+/**
+ * ib_alloc_mr_integrity() - Allocates an integrity memory region
+ * @pd: protection domain associated with the region
+ * @max_num_data_sg: maximum data sg entries available for registration
+ * @max_num_meta_sg: maximum metadata sg entries available for
+ * registration
+ *
+ * Notes:
+ * Memory registration page/sg lists must not exceed max_num_sg,
+ * also the integrity page/sg lists must not exceed max_num_meta_sg.
+ *
+ */
+struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_data_sg,
+ u32 max_num_meta_sg)
{
- struct ib_fast_reg_page_list *page_list;
+ struct ib_mr *mr;
+ struct ib_sig_attrs *sig_attrs;
+
+ if (!pd->device->ops.alloc_mr_integrity ||
+ !pd->device->ops.map_mr_sg_pi) {
+ mr = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
- if (!device->alloc_fast_reg_page_list)
- return ERR_PTR(-ENOSYS);
+ if (!max_num_meta_sg) {
+ mr = ERR_PTR(-EINVAL);
+ goto out;
+ }
- page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
+ sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
+ if (!sig_attrs) {
+ mr = ERR_PTR(-ENOMEM);
+ goto out;
+ }
- if (!IS_ERR(page_list)) {
- page_list->device = device;
- page_list->max_page_list_len = max_page_list_len;
+ mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
+ max_num_meta_sg);
+ if (IS_ERR(mr)) {
+ kfree(sig_attrs);
+ goto out;
}
- return page_list;
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->dm = NULL;
+ mr->uobject = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
+ mr->type = IB_MR_TYPE_INTEGRITY;
+ mr->sig_attrs = sig_attrs;
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_parent_name(&mr->res, &pd->res);
+ rdma_restrack_add(&mr->res);
+out:
+ trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
+ return mr;
}
-EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
+EXPORT_SYMBOL(ib_alloc_mr_integrity);
+
+/* Multicast groups */
-void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
+static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
{
- page_list->device->free_fast_reg_page_list(page_list);
-}
-EXPORT_SYMBOL(ib_free_fast_reg_page_list);
+ struct ib_qp_init_attr init_attr = {};
+ struct ib_qp_attr attr = {};
+ int num_eth_ports = 0;
+ unsigned int port;
+
+ /* If QP state >= init, it is assigned to a port and we can check this
+ * port only.
+ */
+ if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
+ if (attr.qp_state >= IB_QPS_INIT) {
+ if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
+ IB_LINK_LAYER_INFINIBAND)
+ return true;
+ goto lid_check;
+ }
+ }
-/* Memory windows */
+ /* Can't get a quick answer, iterate over all ports */
+ rdma_for_each_port(qp->device, port)
+ if (rdma_port_get_link_layer(qp->device, port) !=
+ IB_LINK_LAYER_INFINIBAND)
+ num_eth_ports++;
+
+ /* If we have at lease one Ethernet port, RoCE annex declares that
+ * multicast LID should be ignored. We can't tell at this step if the
+ * QP belongs to an IB or Ethernet port.
+ */
+ if (num_eth_ports)
+ return true;
+
+ /* If all the ports are IB, we can check according to IB spec. */
+lid_check:
+ return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
+ lid == be16_to_cpu(IB_LID_PERMISSIVE));
+}
-struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{
- struct ib_mw *mw;
+ int ret;
- if (!pd->device->alloc_mw)
- return ERR_PTR(-ENOSYS);
+ if (!qp->device->ops.attach_mcast)
+ return -EOPNOTSUPP;
- mw = pd->device->alloc_mw(pd, type);
- if (!IS_ERR(mw)) {
- mw->device = pd->device;
- mw->pd = pd;
- mw->uobject = NULL;
- mw->type = type;
- atomic_inc(&pd->usecnt);
- }
+ if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
+ qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
+ return -EINVAL;
- return mw;
+ ret = qp->device->ops.attach_mcast(qp, gid, lid);
+ if (!ret)
+ atomic_inc(&qp->usecnt);
+ return ret;
}
-EXPORT_SYMBOL(ib_alloc_mw);
+EXPORT_SYMBOL(ib_attach_mcast);
-int ib_dealloc_mw(struct ib_mw *mw)
+int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{
- struct ib_pd *pd;
int ret;
- pd = mw->pd;
- ret = mw->device->dealloc_mw(mw);
- if (!ret)
- atomic_dec(&pd->usecnt);
+ if (!qp->device->ops.detach_mcast)
+ return -EOPNOTSUPP;
+
+ if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
+ qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
+ return -EINVAL;
+ ret = qp->device->ops.detach_mcast(qp, gid, lid);
+ if (!ret)
+ atomic_dec(&qp->usecnt);
return ret;
}
-EXPORT_SYMBOL(ib_dealloc_mw);
+EXPORT_SYMBOL(ib_detach_mcast);
+
+/**
+ * ib_alloc_xrcd_user - Allocates an XRC domain.
+ * @device: The device on which to allocate the XRC domain.
+ * @inode: inode to connect XRCD
+ * @udata: Valid user data or NULL for kernel object
+ */
+struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
+ struct inode *inode, struct ib_udata *udata)
+{
+ struct ib_xrcd *xrcd;
+ int ret;
+
+ if (!device->ops.alloc_xrcd)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
+ if (!xrcd)
+ return ERR_PTR(-ENOMEM);
-/* "Fast" memory regions */
+ xrcd->device = device;
+ xrcd->inode = inode;
+ atomic_set(&xrcd->usecnt, 0);
+ init_rwsem(&xrcd->tgt_qps_rwsem);
+ xa_init(&xrcd->tgt_qps);
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
+ ret = device->ops.alloc_xrcd(xrcd, udata);
+ if (ret)
+ goto err;
+ return xrcd;
+err:
+ kfree(xrcd);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(ib_alloc_xrcd_user);
+
+/**
+ * ib_dealloc_xrcd_user - Deallocates an XRC domain.
+ * @xrcd: The XRC domain to deallocate.
+ * @udata: Valid user data or NULL for kernel object
+ */
+int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
- struct ib_fmr *fmr;
+ int ret;
- if (!pd->device->alloc_fmr)
- return ERR_PTR(-ENOSYS);
+ if (atomic_read(&xrcd->usecnt))
+ return -EBUSY;
- fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
- if (!IS_ERR(fmr)) {
- fmr->device = pd->device;
- fmr->pd = pd;
+ WARN_ON(!xa_empty(&xrcd->tgt_qps));
+ ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
+ if (ret)
+ return ret;
+ kfree(xrcd);
+ return ret;
+}
+EXPORT_SYMBOL(ib_dealloc_xrcd_user);
+
+/**
+ * ib_create_wq - Creates a WQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the WQ.
+ * @wq_attr: A list of initial attributes required to create the
+ * WQ. If WQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created WQ.
+ *
+ * wq_attr->max_wr and wq_attr->max_sge determine
+ * the requested size of the WQ, and set to the actual values allocated
+ * on return.
+ * If ib_create_wq() succeeds, then max_wr and max_sge will always be
+ * at least as large as the requested values.
+ */
+struct ib_wq *ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *wq_attr)
+{
+ struct ib_wq *wq;
+
+ if (!pd->device->ops.create_wq)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
+ if (!IS_ERR(wq)) {
+ wq->event_handler = wq_attr->event_handler;
+ wq->wq_context = wq_attr->wq_context;
+ wq->wq_type = wq_attr->wq_type;
+ wq->cq = wq_attr->cq;
+ wq->device = pd->device;
+ wq->pd = pd;
+ wq->uobject = NULL;
atomic_inc(&pd->usecnt);
+ atomic_inc(&wq_attr->cq->usecnt);
+ atomic_set(&wq->usecnt, 0);
}
+ return wq;
+}
+EXPORT_SYMBOL(ib_create_wq);
+
+/**
+ * ib_destroy_wq_user - Destroys the specified user WQ.
+ * @wq: The WQ to destroy.
+ * @udata: Valid user data
+ */
+int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
+{
+ struct ib_cq *cq = wq->cq;
+ struct ib_pd *pd = wq->pd;
+ int ret;
+
+ if (atomic_read(&wq->usecnt))
+ return -EBUSY;
+
+ ret = wq->device->ops.destroy_wq(wq, udata);
+ if (ret)
+ return ret;
- return fmr;
+ atomic_dec(&pd->usecnt);
+ atomic_dec(&cq->usecnt);
+ return ret;
}
-EXPORT_SYMBOL(ib_alloc_fmr);
+EXPORT_SYMBOL(ib_destroy_wq_user);
-int ib_unmap_fmr(struct list_head *fmr_list)
+int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
+ struct ib_mr_status *mr_status)
{
- struct ib_fmr *fmr;
+ if (!mr->device->ops.check_mr_status)
+ return -EOPNOTSUPP;
- if (list_empty(fmr_list))
- return 0;
+ return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
+}
+EXPORT_SYMBOL(ib_check_mr_status);
- fmr = list_entry(fmr_list->next, struct ib_fmr, list);
- return fmr->device->unmap_fmr(fmr_list);
+int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
+ int state)
+{
+ if (!device->ops.set_vf_link_state)
+ return -EOPNOTSUPP;
+
+ return device->ops.set_vf_link_state(device, vf, port, state);
}
-EXPORT_SYMBOL(ib_unmap_fmr);
+EXPORT_SYMBOL(ib_set_vf_link_state);
-int ib_dealloc_fmr(struct ib_fmr *fmr)
+int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_info *info)
{
- struct ib_pd *pd;
- int ret;
+ if (!device->ops.get_vf_config)
+ return -EOPNOTSUPP;
- pd = fmr->pd;
- ret = fmr->device->dealloc_fmr(fmr);
- if (!ret)
- atomic_dec(&pd->usecnt);
+ return device->ops.get_vf_config(device, vf, port, info);
+}
+EXPORT_SYMBOL(ib_get_vf_config);
- return ret;
+int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_stats *stats)
+{
+ if (!device->ops.get_vf_stats)
+ return -EOPNOTSUPP;
+
+ return device->ops.get_vf_stats(device, vf, port, stats);
}
-EXPORT_SYMBOL(ib_dealloc_fmr);
+EXPORT_SYMBOL(ib_get_vf_stats);
-/* Multicast groups */
+int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
+ int type)
+{
+ if (!device->ops.set_vf_guid)
+ return -EOPNOTSUPP;
-int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
+ return device->ops.set_vf_guid(device, vf, port, guid, type);
+}
+EXPORT_SYMBOL(ib_set_vf_guid);
+
+int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
{
- int ret;
+ if (!device->ops.get_vf_guid)
+ return -EOPNOTSUPP;
- if (!qp->device->attach_mcast)
- return -ENOSYS;
- if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
+ return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
+}
+EXPORT_SYMBOL(ib_get_vf_guid);
+/**
+ * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
+ * information) and set an appropriate memory region for registration.
+ * @mr: memory region
+ * @data_sg: dma mapped scatterlist for data
+ * @data_sg_nents: number of entries in data_sg
+ * @data_sg_offset: offset in bytes into data_sg
+ * @meta_sg: dma mapped scatterlist for metadata
+ * @meta_sg_nents: number of entries in meta_sg
+ * @meta_sg_offset: offset in bytes into meta_sg
+ * @page_size: page vector desired page size
+ *
+ * Constraints:
+ * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
+ *
+ * Return: 0 on success.
+ *
+ * After this completes successfully, the memory region
+ * is ready for registration.
+ */
+int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset, unsigned int page_size)
+{
+ if (unlikely(!mr->device->ops.map_mr_sg_pi ||
+ WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
+ return -EOPNOTSUPP;
+
+ mr->page_size = page_size;
+
+ return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg,
+ meta_sg_nents, meta_sg_offset);
+}
+EXPORT_SYMBOL(ib_map_mr_sg_pi);
+
+/**
+ * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
+ * and set it the memory region.
+ * @mr: memory region
+ * @sg: dma mapped scatterlist
+ * @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
+ * @page_size: page vector desired page size
+ *
+ * Constraints:
+ *
+ * - The first sg element is allowed to have an offset.
+ * - Each sg element must either be aligned to page_size or virtually
+ * contiguous to the previous element. In case an sg element has a
+ * non-contiguous offset, the mapping prefix will not include it.
+ * - The last sg element is allowed to have length less than page_size.
+ * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
+ * then only max_num_sg entries will be mapped.
+ * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
+ * constraints holds and the page_size argument is ignored.
+ *
+ * Returns the number of sg elements that were mapped to the memory region.
+ *
+ * After this completes successfully, the memory region
+ * is ready for registration.
+ */
+int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset, unsigned int page_size)
+{
+ if (unlikely(!mr->device->ops.map_mr_sg))
+ return -EOPNOTSUPP;
+
+ mr->page_size = page_size;
+
+ return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
+}
+EXPORT_SYMBOL(ib_map_mr_sg);
+
+/**
+ * ib_sg_to_pages() - Convert the largest prefix of a sg list
+ * to a page vector
+ * @mr: memory region
+ * @sgl: dma mapped scatterlist
+ * @sg_nents: number of entries in sg
+ * @sg_offset_p: ==== =======================================================
+ * IN start offset in bytes into sg
+ * OUT offset in bytes for element n of the sg of the first
+ * byte that has not been processed where n is the return
+ * value of this function.
+ * ==== =======================================================
+ * @set_page: driver page assignment function pointer
+ *
+ * Core service helper for drivers to convert the largest
+ * prefix of given sg list to a page vector. The sg list
+ * prefix converted is the prefix that meet the requirements
+ * of ib_map_mr_sg.
+ *
+ * Returns the number of sg elements that were assigned to
+ * a page vector.
+ */
+int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
+ unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
+{
+ struct scatterlist *sg;
+ u64 last_end_dma_addr = 0;
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+ unsigned int last_page_off = 0;
+ u64 page_mask = ~((u64)mr->page_size - 1);
+ int i, ret;
+
+ if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
return -EINVAL;
- ret = qp->device->attach_mcast(qp, gid, lid);
- if (!ret)
- atomic_inc(&qp->usecnt);
- return ret;
+ mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
+ mr->length = 0;
+
+ for_each_sg(sgl, sg, sg_nents, i) {
+ u64 dma_addr = sg_dma_address(sg) + sg_offset;
+ u64 prev_addr = dma_addr;
+ unsigned int dma_len = sg_dma_len(sg) - sg_offset;
+ u64 end_dma_addr = dma_addr + dma_len;
+ u64 page_addr = dma_addr & page_mask;
+
+ /*
+ * For the second and later elements, check whether either the
+ * end of element i-1 or the start of element i is not aligned
+ * on a page boundary.
+ */
+ if (i && (last_page_off != 0 || page_addr != dma_addr)) {
+ /* Stop mapping if there is a gap. */
+ if (last_end_dma_addr != dma_addr)
+ break;
+
+ /*
+ * Coalesce this element with the last. If it is small
+ * enough just update mr->length. Otherwise start
+ * mapping from the next page.
+ */
+ goto next_page;
+ }
+
+ do {
+ ret = set_page(mr, page_addr);
+ if (unlikely(ret < 0)) {
+ sg_offset = prev_addr - sg_dma_address(sg);
+ mr->length += prev_addr - dma_addr;
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+ return i || sg_offset ? i : ret;
+ }
+ prev_addr = page_addr;
+next_page:
+ page_addr += mr->page_size;
+ } while (page_addr < end_dma_addr);
+
+ mr->length += dma_len;
+ last_end_dma_addr = end_dma_addr;
+ last_page_off = end_dma_addr & ~page_mask;
+
+ sg_offset = 0;
+ }
+
+ if (sg_offset_p)
+ *sg_offset_p = 0;
+ return i;
}
-EXPORT_SYMBOL(ib_attach_mcast);
+EXPORT_SYMBOL(ib_sg_to_pages);
-int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
+struct ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the SQ.
+ */
+static void __ib_drain_sq(struct ib_qp *qp)
{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe sdrain;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
int ret;
- if (!qp->device->detach_mcast)
- return -ENOSYS;
- if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
- return -EINVAL;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
- ret = qp->device->detach_mcast(qp, gid, lid);
- if (!ret)
- atomic_dec(&qp->usecnt);
- return ret;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = ib_post_send(qp, &swr.wr, NULL);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ if (cq->poll_ctx == IB_POLL_DIRECT)
+ while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ else
+ wait_for_completion(&sdrain.done);
}
-EXPORT_SYMBOL(ib_detach_mcast);
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
+/*
+ * Post a WR and block until its completion is reaped for the RQ.
+ */
+static void __ib_drain_rq(struct ib_qp *qp)
{
- struct ib_xrcd *xrcd;
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ int ret;
- if (!device->alloc_xrcd)
- return ERR_PTR(-ENOSYS);
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
- xrcd = device->alloc_xrcd(device, NULL, NULL);
- if (!IS_ERR(xrcd)) {
- xrcd->device = device;
- xrcd->inode = NULL;
- atomic_set(&xrcd->usecnt, 0);
- mutex_init(&xrcd->tgt_qp_mutex);
- INIT_LIST_HEAD(&xrcd->tgt_qp_list);
+ ret = ib_post_recv(qp, &rwr, NULL);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
}
- return xrcd;
+ if (cq->poll_ctx == IB_POLL_DIRECT)
+ while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ else
+ wait_for_completion(&rdrain.done);
}
-EXPORT_SYMBOL(ib_alloc_xrcd);
-int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+/*
+ * __ib_drain_srq() - Block until Last WQE Reached event arrives, or timeout
+ * expires.
+ * @qp: queue pair associated with SRQ to drain
+ *
+ * Quoting 10.3.1 Queue Pair and EE Context States:
+ *
+ * Note, for QPs that are associated with an SRQ, the Consumer should take the
+ * QP through the Error State before invoking a Destroy QP or a Modify QP to the
+ * Reset State. The Consumer may invoke the Destroy QP without first performing
+ * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
+ * Last WQE Reached Event. However, if the Consumer does not wait for the
+ * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
+ * leakage may occur. Therefore, it is good programming practice to tear down a
+ * QP that is associated with an SRQ by using the following process:
+ *
+ * - Put the QP in the Error State
+ * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
+ * - either:
+ * drain the CQ by invoking the Poll CQ verb and either wait for CQ
+ * to be empty or the number of Poll CQ operations has exceeded
+ * CQ capacity size;
+ * - or
+ * post another WR that completes on the same CQ and wait for this
+ * WR to return as a WC;
+ * - and then invoke a Destroy QP or Reset QP.
+ *
+ * We use the first option.
+ */
+static void __ib_drain_srq(struct ib_qp *qp)
{
- struct ib_qp *qp;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_cq *cq;
+ int n, polled = 0;
int ret;
- if (atomic_read(&xrcd->usecnt))
- return -EBUSY;
+ if (!qp->srq) {
+ WARN_ONCE(1, "QP 0x%p is not associated with SRQ\n", qp);
+ return;
+ }
- while (!list_empty(&xrcd->tgt_qp_list)) {
- qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
- ret = ib_destroy_qp(qp);
- if (ret)
- return ret;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain shared recv queue: %d\n", ret);
+ return;
+ }
+
+ if (ib_srq_has_cq(qp->srq->srq_type)) {
+ cq = qp->srq->ext.cq;
+ } else if (qp->recv_cq) {
+ cq = qp->recv_cq;
+ } else {
+ WARN_ONCE(1, "QP 0x%p has no CQ associated with SRQ\n", qp);
+ return;
}
- return xrcd->device->dealloc_xrcd(xrcd);
+ if (wait_for_completion_timeout(&qp->srq_completion, 60 * HZ) > 0) {
+ while (polled != cq->cqe) {
+ n = ib_process_cq_direct(cq, cq->cqe - polled);
+ if (!n)
+ return;
+ polled += n;
+ }
+ }
}
-EXPORT_SYMBOL(ib_dealloc_xrcd);
-struct ib_flow *ib_create_flow(struct ib_qp *qp,
- struct ib_flow_attr *flow_attr,
- int domain)
+/**
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_sq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and SQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq().
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_sq(struct ib_qp *qp)
{
- struct ib_flow *flow_id;
- if (!qp->device->create_flow)
- return ERR_PTR(-ENOSYS);
+ if (qp->device->ops.drain_sq)
+ qp->device->ops.drain_sq(qp);
+ else
+ __ib_drain_sq(qp);
+ trace_cq_drain_complete(qp->send_cq);
+}
+EXPORT_SYMBOL(ib_drain_sq);
- flow_id = qp->device->create_flow(qp, flow_attr, domain);
- if (!IS_ERR(flow_id))
- atomic_inc(&qp->usecnt);
- return flow_id;
+/**
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_rq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and RQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq().
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_rq(struct ib_qp *qp)
+{
+ if (qp->device->ops.drain_rq)
+ qp->device->ops.drain_rq(qp);
+ else
+ __ib_drain_rq(qp);
+ trace_cq_drain_complete(qp->recv_cq);
}
-EXPORT_SYMBOL(ib_create_flow);
+EXPORT_SYMBOL(ib_drain_rq);
-int ib_destroy_flow(struct ib_flow *flow_id)
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ * application on both the RQ and SQ.
+ * @qp: queue pair to drain
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
+ * and completions.
+ *
+ * allocate the CQs using ib_alloc_cq().
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_qp(struct ib_qp *qp)
{
- int err;
- struct ib_qp *qp = flow_id->qp;
+ ib_drain_sq(qp);
+ if (!qp->srq)
+ ib_drain_rq(qp);
+ else
+ __ib_drain_srq(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
- err = qp->device->destroy_flow(flow_id);
- if (!err)
- atomic_dec(&qp->usecnt);
- return err;
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *))
+{
+ struct rdma_netdev_alloc_params params;
+ struct net_device *netdev;
+ int rc;
+
+ if (!device->ops.rdma_netdev_get_params)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ rc = device->ops.rdma_netdev_get_params(device, port_num, type,
+ &params);
+ if (rc)
+ return ERR_PTR(rc);
+
+ netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
+ setup, params.txqs, params.rxqs);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+
+ return netdev;
}
-EXPORT_SYMBOL(ib_destroy_flow);
+EXPORT_SYMBOL(rdma_alloc_netdev);
-int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
- struct ib_mr_status *mr_status)
+int rdma_init_netdev(struct ib_device *device, u32 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *),
+ struct net_device *netdev)
{
- return mr->device->check_mr_status ?
- mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
+ struct rdma_netdev_alloc_params params;
+ int rc;
+
+ if (!device->ops.rdma_netdev_get_params)
+ return -EOPNOTSUPP;
+
+ rc = device->ops.rdma_netdev_get_params(device, port_num, type,
+ &params);
+ if (rc)
+ return rc;
+
+ return params.initialize_rdma_netdev(device, port_num,
+ netdev, params.param);
}
-EXPORT_SYMBOL(ib_check_mr_status);
+EXPORT_SYMBOL(rdma_init_netdev);
+
+void __rdma_block_iter_start(struct ib_block_iter *biter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgsz)
+{
+ memset(biter, 0, sizeof(struct ib_block_iter));
+ biter->__sg = sglist;
+ biter->__sg_nents = nents;
+
+ /* Driver provides best block size to use */
+ biter->__pg_bit = __fls(pgsz);
+}
+EXPORT_SYMBOL(__rdma_block_iter_start);
+
+bool __rdma_block_iter_next(struct ib_block_iter *biter)
+{
+ unsigned int block_offset;
+ unsigned int delta;
+
+ if (!biter->__sg_nents || !biter->__sg)
+ return false;
+
+ biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
+ block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
+ delta = BIT_ULL(biter->__pg_bit) - block_offset;
+
+ while (biter->__sg_nents && biter->__sg &&
+ sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) {
+ delta -= sg_dma_len(biter->__sg) - biter->__sg_advance;
+ biter->__sg_advance = 0;
+ biter->__sg = sg_next(biter->__sg);
+ biter->__sg_nents--;
+ }
+ biter->__sg_advance += delta;
+
+ return true;
+}
+EXPORT_SYMBOL(__rdma_block_iter_next);
+
+/**
+ * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
+ * for the drivers.
+ * @descs: array of static descriptors
+ * @num_counters: number of elements in array
+ * @lifespan: milliseconds between updates
+ */
+struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const struct rdma_stat_desc *descs, int num_counters,
+ unsigned long lifespan)
+{
+ struct rdma_hw_stats *stats;
+
+ stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL);
+ if (!stats)
+ return NULL;
+
+ stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
+ sizeof(*stats->is_disabled), GFP_KERNEL);
+ if (!stats->is_disabled)
+ goto err;
+
+ stats->descs = descs;
+ stats->num_counters = num_counters;
+ stats->lifespan = msecs_to_jiffies(lifespan);
+ mutex_init(&stats->lock);
+
+ return stats;
+
+err:
+ kfree(stats);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
+
+/**
+ * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
+ * @stats: statistics to release
+ */
+void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
+{
+ if (!stats)
+ return;
+
+ kfree(stats->is_disabled);
+ kfree(stats);
+}
+EXPORT_SYMBOL(rdma_free_hw_stats_struct);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index aded2a5cc2d5..b706dc0d0263 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,9 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
-obj-$(CONFIG_INFINIBAND_QIB) += qib/
-obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
+obj-$(CONFIG_INFINIBAND_EFA) += efa/
+obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/
+obj-$(CONFIG_MANA_INFINIBAND) += mana/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
-obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
+obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
+obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
+obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
+obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic/
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
new file mode 100644
index 000000000000..6a17f5cdb020
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_BNXT_RE
+ tristate "Broadcom Netxtreme HCA support"
+ depends on 64BIT
+ depends on INET && DCB && BNXT
+ help
+ This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
+ RoCE HCAs. To compile this driver as a module, choose M here:
+ the module will be called bnxt_re.
diff --git a/drivers/infiniband/hw/bnxt_re/Makefile b/drivers/infiniband/hw/bnxt_re/Makefile
new file mode 100644
index 000000000000..f63417d2ccc6
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/broadcom/bnxt
+obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
+bnxt_re-y := main.o ib_verbs.o \
+ qplib_res.o qplib_rcfw.o \
+ qplib_sp.o qplib_fp.o hw_counters.o \
+ debugfs.o
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
new file mode 100644
index 000000000000..3485e495ac6a
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -0,0 +1,290 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Slow Path Operators (header)
+ *
+ */
+
+#ifndef __BNXT_RE_H__
+#define __BNXT_RE_H__
+#include <rdma/uverbs_ioctl.h>
+#include "hw_counters.h"
+#include <linux/hashtable.h>
+#define ROCE_DRV_MODULE_NAME "bnxt_re"
+
+#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
+
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+#define BNXT_RE_PAGE_SIZE_SUPPORTED 0x7FFFF000 /* 4kb - 1G */
+
+#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
+#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
+
+
+/* Number of MRs to reserve for PF, leaving remainder for VFs */
+#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
+#define BNXT_RE_MAX_GID_PER_VF 128
+
+/*
+ * Percentage of resources of each type reserved for PF.
+ * Remaining resources are divided equally among VFs.
+ * [0, 100]
+ */
+#define BNXT_RE_PCT_RSVD_FOR_PF 50
+
+#define BNXT_RE_UD_QP_HW_STALL 0x400000
+
+#define BNXT_RE_RQ_WQE_THRESHOLD 32
+
+/*
+ * Setting the default ack delay value to 16, which means
+ * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
+ */
+
+#define BNXT_RE_DEFAULT_ACK_DELAY 16
+
+struct bnxt_re_ring_attr {
+ dma_addr_t *dma_arr;
+ int pages;
+ int type;
+ u32 depth;
+ u32 lrid; /* Logical ring id */
+ u8 mode;
+};
+
+/*
+ * Data structure and defines to handle
+ * recovery
+ */
+#define BNXT_RE_PRE_RECOVERY_REMOVE 0x1
+#define BNXT_RE_COMPLETE_REMOVE 0x2
+#define BNXT_RE_POST_RECOVERY_INIT 0x4
+#define BNXT_RE_COMPLETE_INIT 0x8
+
+struct bnxt_re_sqp_entries {
+ struct bnxt_qplib_sge sge;
+ u64 wrid;
+ /* For storing the actual qp1 cqe */
+ struct bnxt_qplib_cqe cqe;
+ struct bnxt_re_qp *qp1_qp;
+};
+
+#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
+struct bnxt_re_gsi_context {
+ struct bnxt_re_qp *gsi_qp;
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_sqp_entries *sqp_tbl;
+};
+
+struct bnxt_re_en_dev_info {
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+};
+
+#define BNXT_RE_AEQ_IDX 0
+#define BNXT_RE_NQ_IDX 1
+#define BNXT_RE_GEN_P5_MAX_VF 64
+
+struct bnxt_re_pacing {
+ u64 dbr_db_fifo_reg_off;
+ void *dbr_page;
+ u64 dbr_bar_addr;
+ u32 pacing_algo_th;
+ u32 do_pacing_save;
+ u32 dbq_pacing_time; /* ms */
+ u32 dbr_def_do_pacing;
+ bool dbr_pacing;
+ struct mutex dbq_lock; /* synchronize db pacing algo */
+};
+
+#define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF
+#define BNXT_RE_DBR_PACING_TIME 5 /* ms */
+#define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */
+#define BNXT_RE_PACING_ALARM_TH_MULTIPLE 2 /* Multiple of pacing algo threshold */
+/* Default do_pacing value when there is no congestion */
+#define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
+
+#define BNXT_RE_MAX_FIFO_DEPTH_P5 0x2c00
+#define BNXT_RE_MAX_FIFO_DEPTH_P7 0x8000
+
+#define BNXT_RE_MAX_FIFO_DEPTH(ctx) \
+ (bnxt_qplib_is_chip_gen_p7((ctx)) ? \
+ BNXT_RE_MAX_FIFO_DEPTH_P7 :\
+ BNXT_RE_MAX_FIFO_DEPTH_P5)
+
+#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
+
+#define BNXT_RE_MIN_MSIX 2
+#define BNXT_RE_MAX_MSIX BNXT_MAX_ROCE_MSIX
+struct bnxt_re_nq_record {
+ struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
+ struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ int num_msix;
+ /* serialize NQ access */
+ struct mutex load_lock;
+};
+
+#define MAX_CQ_HASH_BITS (16)
+#define MAX_SRQ_HASH_BITS (16)
+
+static inline bool bnxt_re_chip_gen_p7(u16 chip_num)
+{
+ return (chip_num == CHIP_NUM_58818 ||
+ chip_num == CHIP_NUM_57608);
+}
+
+struct bnxt_re_dev {
+ struct ib_device ibdev;
+ struct list_head list;
+ unsigned long flags;
+#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
+#define BNXT_RE_FLAG_STATS_CTX3_ALLOC 1
+#define BNXT_RE_FLAG_HAVE_L2_REF 3
+#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
+#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
+#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
+#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
+#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
+ struct net_device *netdev;
+ struct auxiliary_device *adev;
+ unsigned int version, major, minor;
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ struct bnxt_en_dev *en_dev;
+
+ int id;
+
+ /* RCFW Channel */
+ struct bnxt_qplib_rcfw rcfw;
+
+ /* NQ record */
+ struct bnxt_re_nq_record *nqr;
+
+ /* Device Resources */
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_ctx qplib_ctx;
+ struct bnxt_qplib_res qplib_res;
+ struct bnxt_qplib_dpi dpi_privileged;
+ struct bnxt_qplib_cq_coal_param cq_coalescing;
+
+ struct mutex qp_lock; /* protect qp list */
+ struct list_head qp_list;
+
+ /* Max of 2 lossless traffic class supported per port */
+ u16 cosq[2];
+
+ /* QP for handling QP1 packets */
+ struct bnxt_re_gsi_context gsi_ctx;
+ struct bnxt_re_stats stats;
+ atomic_t nq_alloc_cnt;
+ u32 is_virtfn;
+ u32 num_vfs;
+ struct bnxt_re_pacing pacing;
+ struct work_struct dbq_fifo_check_work;
+ struct delayed_work dbq_pacing_work;
+ DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
+ DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
+ struct dentry *dbg_root;
+ struct dentry *qp_debugfs;
+ unsigned long event_bitmap;
+ struct bnxt_qplib_cc_param cc_param;
+ struct workqueue_struct *dcb_wq;
+ struct dentry *cc_config;
+ struct bnxt_re_dbg_cc_config_params *cc_config_params;
+#define BNXT_VPD_FLD_LEN 32
+ char board_partno[BNXT_VPD_FLD_LEN];
+ /* RoCE mirror */
+ u16 mirror_vnic_id;
+ union ib_gid ugid;
+ u32 ugid_index;
+ u8 sniffer_flow_created : 1;
+};
+
+#define to_bnxt_re_dev(ptr, member) \
+ container_of((ptr), struct bnxt_re_dev, member)
+
+#define BNXT_RE_ROCE_V1_PACKET 0
+#define BNXT_RE_ROCEV2_IPV4_PACKET 2
+#define BNXT_RE_ROCEV2_IPV6_PACKET 3
+
+#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
+
+int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad);
+int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev,
+ struct ib_mad *out_mad);
+
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id);
+
+static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
+{
+ if (rdev)
+ return &rdev->ibdev.dev;
+ return NULL;
+}
+
+extern const struct uapi_definition bnxt_re_uapi_defs[];
+
+static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
+{
+ rdev->qplib_res.pacing_data->dev_err_state =
+ test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+}
+
+static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev)
+{
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ||
+ rdev->rcfw.res->cctx->hwrm_intf_ver < HWRM_VERSION_READ_CTX)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+#define BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5 1088
+#define BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5 128
+#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5 128
+#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P5 192
+
+#define BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 1088
+#define BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 192
+#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192
+#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192
+
+#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
+ ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
+
+#endif
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
new file mode 100644
index 000000000000..be5e9b5ca2f0
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (c) 2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Description: Debugfs component of the bnxt_re driver
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <rdma/ib_addr.h>
+
+#include "bnxt_ulp.h"
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
+#include "debugfs.h"
+
+static struct dentry *bnxt_re_debugfs_root;
+
+static const char * const bnxt_re_cc_gen0_name[] = {
+ "enable_cc",
+ "run_avg_weight_g",
+ "num_phase_per_state",
+ "init_cr",
+ "init_tr",
+ "tos_ecn",
+ "tos_dscp",
+ "alt_vlan_pcp",
+ "alt_vlan_dscp",
+ "rtt",
+ "cc_mode",
+ "tcp_cp",
+ "tx_queue",
+ "inactivity_cp",
+};
+
+static inline const char *bnxt_re_qp_state_str(u8 state)
+{
+ switch (state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RESET:
+ return "RST";
+ case CMDQ_MODIFY_QP_NEW_STATE_INIT:
+ return "INIT";
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ return "RTR";
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ return "RTS";
+ case CMDQ_MODIFY_QP_NEW_STATE_SQE:
+ return "SQER";
+ case CMDQ_MODIFY_QP_NEW_STATE_SQD:
+ return "SQD";
+ case CMDQ_MODIFY_QP_NEW_STATE_ERR:
+ return "ERR";
+ default:
+ return "Invalid QP state";
+ }
+}
+
+static inline const char *bnxt_re_qp_type_str(u8 type)
+{
+ switch (type) {
+ case CMDQ_CREATE_QP1_TYPE_GSI: return "QP1";
+ case CMDQ_CREATE_QP_TYPE_GSI: return "QP1";
+ case CMDQ_CREATE_QP_TYPE_RC: return "RC";
+ case CMDQ_CREATE_QP_TYPE_UD: return "UD";
+ case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
+ default: return "Invalid transport type";
+ }
+}
+
+static ssize_t qp_info_read(struct file *filep,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct bnxt_re_qp *qp = filep->private_data;
+ char *buf;
+ int len;
+
+ if (*ppos)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL,
+ "QPN\t\t: %d\n"
+ "transport\t: %s\n"
+ "state\t\t: %s\n"
+ "mtu\t\t: %d\n"
+ "timeout\t\t: %d\n"
+ "remote QPN\t: %d\n",
+ qp->qplib_qp.id,
+ bnxt_re_qp_type_str(qp->qplib_qp.type),
+ bnxt_re_qp_state_str(qp->qplib_qp.state),
+ qp->qplib_qp.mtu,
+ qp->qplib_qp.timeout,
+ qp->qplib_qp.dest_qpn);
+ if (!buf)
+ return -ENOMEM;
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations debugfs_qp_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qp_info_read,
+};
+
+void bnxt_re_debug_add_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ char resn[32];
+
+ sprintf(resn, "0x%x", qp->qplib_qp.id);
+ qp->dentry = debugfs_create_file(resn, 0400, rdev->qp_debugfs, qp, &debugfs_qp_fops);
+}
+
+void bnxt_re_debug_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ debugfs_remove(qp->dentry);
+}
+
+static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param *ccparam, u32 *val)
+{
+ u64 map_offset;
+
+ map_offset = BIT(offset);
+
+ switch (map_offset) {
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC:
+ *val = ccparam->enable;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G:
+ *val = ccparam->g;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE:
+ *val = ccparam->nph_per_state;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR:
+ *val = ccparam->init_cr;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR:
+ *val = ccparam->init_tr;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN:
+ *val = ccparam->tos_ecn;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP:
+ *val = ccparam->tos_dscp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP:
+ *val = ccparam->alt_vlan_pcp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP:
+ *val = ccparam->alt_tos_dscp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT:
+ *val = ccparam->rtt;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE:
+ *val = ccparam->cc_mode;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
+ *val = ccparam->tcp_cp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ *val = ccparam->inact_th;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
+ size_t usr_buf_len, loff_t *ppos)
+{
+ struct bnxt_re_cc_param *dbg_cc_param = filp->private_data;
+ struct bnxt_re_dev *rdev = dbg_cc_param->rdev;
+ struct bnxt_qplib_cc_param ccparam = {};
+ u32 offset = dbg_cc_param->offset;
+ char buf[16];
+ u32 val;
+ int rc;
+
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &ccparam);
+ if (rc)
+ return rc;
+
+ rc = map_cc_config_offset_gen0_ext0(offset, &ccparam, &val);
+ if (rc)
+ return rc;
+
+ rc = snprintf(buf, sizeof(buf), "%d\n", val);
+ if (rc < 0)
+ return rc;
+
+ return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
+}
+
+static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+{
+ u32 modify_mask;
+
+ modify_mask = BIT(offset);
+
+ switch (modify_mask) {
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC:
+ ccparam->enable = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G:
+ ccparam->g = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE:
+ ccparam->nph_per_state = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR:
+ ccparam->init_cr = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR:
+ ccparam->init_tr = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN:
+ ccparam->tos_ecn = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP:
+ ccparam->tos_dscp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP:
+ ccparam->alt_vlan_pcp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP:
+ ccparam->alt_tos_dscp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT:
+ ccparam->rtt = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE:
+ ccparam->cc_mode = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
+ ccparam->tcp_cp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ return -EOPNOTSUPP;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ ccparam->inact_th = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
+ ccparam->time_pph = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE:
+ ccparam->pkts_pph = val;
+ break;
+ }
+
+ ccparam->mask = modify_mask;
+ return 0;
+}
+
+static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
+{
+ struct bnxt_qplib_cc_param ccparam = { };
+ int rc;
+
+ if (gen_ext != CC_CONFIG_GEN0_EXT0)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ if (rc)
+ return rc;
+
+ bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
+ return 0;
+}
+
+static ssize_t bnxt_re_cc_config_set(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct bnxt_re_cc_param *dbg_cc_param = filp->private_data;
+ struct bnxt_re_dev *rdev = dbg_cc_param->rdev;
+ u32 offset = dbg_cc_param->offset;
+ u8 cc_gen = dbg_cc_param->cc_gen;
+ char buf[16];
+ u32 val;
+ int rc;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (kstrtou32(buf, 0, &val))
+ return -EINVAL;
+
+ rc = bnxt_re_configure_cc(rdev, cc_gen, offset, val);
+ return rc ? rc : count;
+}
+
+static const struct file_operations bnxt_re_cc_config_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = bnxt_re_cc_config_get,
+ .write = bnxt_re_cc_config_set,
+};
+
+static int info_show(struct seq_file *m, void *unused)
+{
+ struct bnxt_re_dev *rdev = m->private;
+ struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
+
+ seq_puts(m, "Info:\n");
+ seq_printf(m, "Device Name\t\t: %s\n", dev_name(&rdev->ibdev.dev));
+ seq_printf(m, "PD Watermark\t\t: %llu\n", res_s->pd_watermark);
+ seq_printf(m, "AH Watermark\t\t: %llu\n", res_s->ah_watermark);
+ seq_printf(m, "QP Watermark\t\t: %llu\n", res_s->qp_watermark);
+ seq_printf(m, "RC QP Watermark\t\t: %llu\n", res_s->rc_qp_watermark);
+ seq_printf(m, "UD QP Watermark\t\t: %llu\n", res_s->ud_qp_watermark);
+ seq_printf(m, "SRQ Watermark\t\t: %llu\n", res_s->srq_watermark);
+ seq_printf(m, "CQ Watermark\t\t: %llu\n", res_s->cq_watermark);
+ seq_printf(m, "MR Watermark\t\t: %llu\n", res_s->mr_watermark);
+ seq_printf(m, "MW Watermark\t\t: %llu\n", res_s->mw_watermark);
+ seq_printf(m, "CQ Resize Count\t\t: %d\n", atomic_read(&res_s->resize_count));
+ if (rdev->pacing.dbr_pacing) {
+ seq_printf(m, "DB Pacing Reschedule\t: %llu\n", rdev->stats.pacing.resched);
+ seq_printf(m, "DB Pacing Complete\t: %llu\n", rdev->stats.pacing.complete);
+ seq_printf(m, "DB Pacing Alerts\t: %llu\n", rdev->stats.pacing.alerts);
+ seq_printf(m, "DB FIFO Register\t: 0x%x\n",
+ readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(info);
+
+static void bnxt_re_debugfs_add_info(struct bnxt_re_dev *rdev)
+{
+ debugfs_create_file("info", 0400, rdev->dbg_root, rdev, &info_fops);
+}
+
+void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev = rdev->en_dev->pdev;
+ struct bnxt_re_dbg_cc_config_params *cc_params;
+ int i;
+
+ rdev->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), bnxt_re_debugfs_root);
+
+ rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root);
+ rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root);
+
+ bnxt_re_debugfs_add_info(rdev);
+
+ rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL);
+
+ for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) {
+ struct bnxt_re_cc_param *tmp_params = &rdev->cc_config_params->gen0_parms[i];
+
+ tmp_params->rdev = rdev;
+ tmp_params->offset = i;
+ tmp_params->cc_gen = CC_CONFIG_GEN0_EXT0;
+ tmp_params->dentry = debugfs_create_file(bnxt_re_cc_gen0_name[i], 0400,
+ rdev->cc_config, tmp_params,
+ &bnxt_re_cc_config_ops);
+ }
+}
+
+void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev)
+{
+ debugfs_remove_recursive(rdev->qp_debugfs);
+ debugfs_remove_recursive(rdev->cc_config);
+ kfree(rdev->cc_config_params);
+ debugfs_remove_recursive(rdev->dbg_root);
+ rdev->dbg_root = NULL;
+}
+
+void bnxt_re_register_debugfs(void)
+{
+ bnxt_re_debugfs_root = debugfs_create_dir("bnxt_re", NULL);
+}
+
+void bnxt_re_unregister_debugfs(void)
+{
+ debugfs_remove(bnxt_re_debugfs_root);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.h b/drivers/infiniband/hw/bnxt_re/debugfs.h
new file mode 100644
index 000000000000..8f101df4e838
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (c) 2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Description: Debugfs header
+ */
+
+#ifndef __BNXT_RE_DEBUGFS__
+#define __BNXT_RE_DEBUGFS__
+
+void bnxt_re_debug_add_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp);
+void bnxt_re_debug_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp);
+
+void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev);
+void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev);
+
+void bnxt_re_register_debugfs(void);
+void bnxt_re_unregister_debugfs(void);
+
+#define CC_CONFIG_GEN_EXT(x, y) (((x) << 16) | (y))
+#define CC_CONFIG_GEN0_EXT0 CC_CONFIG_GEN_EXT(0, 0)
+
+#define BNXT_RE_CC_PARAM_GEN0 14
+
+struct bnxt_re_cc_param {
+ struct bnxt_re_dev *rdev;
+ struct dentry *dentry;
+ u32 offset;
+ u8 cc_gen;
+};
+
+struct bnxt_re_dbg_cc_config_params {
+ struct bnxt_re_cc_param gen0_parms[BNXT_RE_CC_PARAM_GEN0];
+};
+#endif
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
new file mode 100644
index 000000000000..651cf9d0e0c7
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -0,0 +1,433 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Statistics
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_pma.h>
+
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+#include "bnxt_re.h"
+#include "hw_counters.h"
+
+static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
+ [BNXT_RE_RX_PKTS].name = "rx_pkts",
+ [BNXT_RE_RX_BYTES].name = "rx_bytes",
+ [BNXT_RE_TX_PKTS].name = "tx_pkts",
+ [BNXT_RE_TX_BYTES].name = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors",
+ [BNXT_RE_TX_ERRORS].name = "tx_roce_errors",
+ [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
+ [BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
+ [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
+ [BNXT_RE_TO_RETRANSMITS].name = "local_ack_timeout_err",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "packet_seq_err",
+ [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_nak_retry_err",
+ [BNXT_RE_MISSING_RESP].name = "implied_nak_seq_err",
+ [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
+ [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
+ [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
+ [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
+ [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "req_remote_invalid_request",
+ [BNXT_RE_REMOTE_ACCESS_ERR].name = "req_remote_access_errors",
+ [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
+ [BNXT_RE_DUP_REQ].name = "duplicate_request",
+ [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
+ [BNXT_RE_RES_LENGTH_MISMATCH].name = "resp_local_length_error",
+ [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
+ [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
+ [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
+ [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err",
+ [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm",
+ [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err",
+ [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey",
+ [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err",
+ [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm",
+ [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err",
+ [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow",
+ [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode",
+ [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic",
+ [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err",
+ [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err",
+ [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err",
+ [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err",
+ [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey",
+ [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err",
+ [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err",
+ [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
+ [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
+ [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
+ [BNXT_RE_OUT_OF_SEQ_ERR].name = "out_of_sequence",
+ [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
+ [BNXT_RE_TX_READ_REQ].name = "tx_read_req",
+ [BNXT_RE_TX_READ_RES].name = "tx_read_resp",
+ [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req",
+ [BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
+ [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
+ [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
+ [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_requests",
+ [BNXT_RE_RX_READ_REQ].name = "rx_read_requests",
+ [BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
+ [BNXT_RE_RX_WRITE_REQ].name = "rx_write_requests",
+ [BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
+ [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
+ [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
+ [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
+ [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
+ [BNXT_RE_OOB].name = "out_of_buffer",
+ [BNXT_RE_TX_CNP].name = "np_cnp_pkts",
+ [BNXT_RE_RX_CNP].name = "rp_cnp_handled",
+ [BNXT_RE_RX_ECN].name = "np_ecn_marked_roce_packets",
+ [BNXT_RE_REQ_CQE_ERROR].name = "req_cqe_error",
+ [BNXT_RE_RESP_CQE_ERROR].name = "resp_cqe_error",
+ [BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name = "resp_remote_access_errors",
+};
+
+static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats,
+ struct bnxt_qplib_ext_stat *s)
+{
+ stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req;
+ stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req;
+ stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res;
+ stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req;
+ stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req;
+ stats->value[BNXT_RE_TX_ROCE_PKTS] = s->tx_roce_pkts;
+ stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes;
+ stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req;
+ stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req;
+ stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res;
+ stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req;
+ stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req;
+ stats->value[BNXT_RE_RX_ROCE_PKTS] = s->rx_roce_pkts;
+ stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes;
+ stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts;
+ stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes;
+ stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer;
+ stats->value[BNXT_RE_TX_CNP] = s->tx_cnp;
+ stats->value[BNXT_RE_RX_CNP] = s->rx_cnp;
+ stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence;
+}
+
+static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ u32 fid;
+ int rc;
+
+ fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ goto done;
+ bnxt_re_copy_ext_stats(rdev, stats, estat);
+
+done:
+ return rc;
+}
+
+static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats,
+ struct bnxt_qplib_roce_stats *err_s)
+{
+ stats->value[BNXT_RE_TO_RETRANSMITS] =
+ err_s->to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
+ err_s->seq_err_naks_rcvd;
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
+ err_s->max_retry_exceeded;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] =
+ err_s->rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] =
+ err_s->missing_resp;
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
+ err_s->unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] =
+ err_s->bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
+ err_s->local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
+ err_s->local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
+ err_s->mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
+ err_s->remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
+ err_s->remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] =
+ err_s->remote_op_err;
+ stats->value[BNXT_RE_DUP_REQ] =
+ err_s->dup_req;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] =
+ err_s->res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
+ err_s->res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
+ err_s->res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] =
+ err_s->res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
+ err_s->res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
+ err_s->res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] =
+ err_s->res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
+ err_s->res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
+ err_s->res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
+ err_s->res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] =
+ err_s->res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
+ err_s->res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
+ err_s->res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
+ err_s->res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
+ err_s->res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] =
+ err_s->res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR] =
+ err_s->res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] =
+ err_s->res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] =
+ err_s->res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
+ err_s->res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
+ err_s->res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
+ err_s->res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
+ err_s->res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] =
+ err_s->res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] =
+ err_s->res_rx_pci_err;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
+ err_s->res_oos_drop_count;
+ stats->value[BNXT_RE_REQ_CQE_ERROR] =
+ err_s->bad_resp_err +
+ err_s->local_qp_op_err +
+ err_s->local_protection_err +
+ err_s->mem_mgmt_op_err +
+ err_s->remote_invalid_req_err +
+ err_s->remote_access_err +
+ err_s->remote_op_err;
+ stats->value[BNXT_RE_RESP_CQE_ERROR] =
+ err_s->res_cmp_err +
+ err_s->res_cq_load_err;
+ stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] =
+ err_s->res_rx_no_perm +
+ err_s->res_tx_no_perm;
+}
+
+int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
+{
+ struct ib_pma_portcounters_ext *pma_cnt_ext;
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ struct ctx_hw_stats *hw_stats = NULL;
+ int rc;
+
+ hw_stats = rdev->qplib_ctx.stats.dma;
+
+ pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
+ u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ return rc;
+ }
+
+ pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
+ !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_bytes) / 4);
+ pma_cnt_ext->port_rcv_data =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_bytes) / 4);
+ pma_cnt_ext->port_xmit_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
+ pma_cnt_ext->port_rcv_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
+ pma_cnt_ext->port_unicast_rcv_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
+ pma_cnt_ext->port_unicast_xmit_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
+
+ } else {
+ pma_cnt_ext->port_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
+ pma_cnt_ext->port_rcv_data = cpu_to_be64(estat->rx_roce_good_bytes / 4);
+ pma_cnt_ext->port_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
+ pma_cnt_ext->port_xmit_data = cpu_to_be64(estat->tx_roce_bytes / 4);
+ pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
+ pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
+ }
+ return 0;
+}
+
+int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
+{
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ struct ib_pma_portcounters *pma_cnt;
+ struct ctx_hw_stats *hw_stats = NULL;
+ int rc;
+
+ hw_stats = rdev->qplib_ctx.stats.dma;
+
+ pma_cnt = (struct ib_pma_portcounters *)(out_mad->data + 40);
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
+ u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ return rc;
+ }
+ if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
+ !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ pma_cnt->port_rcv_packets =
+ cpu_to_be32((u32)(le64_to_cpu(hw_stats->rx_ucast_pkts)) & 0xFFFFFFFF);
+ pma_cnt->port_rcv_data =
+ cpu_to_be32((u32)((le64_to_cpu(hw_stats->rx_ucast_bytes) &
+ 0xFFFFFFFF) / 4));
+ pma_cnt->port_xmit_packets =
+ cpu_to_be32((u32)(le64_to_cpu(hw_stats->tx_ucast_pkts)) & 0xFFFFFFFF);
+ pma_cnt->port_xmit_data =
+ cpu_to_be32((u32)((le64_to_cpu(hw_stats->tx_ucast_bytes)
+ & 0xFFFFFFFF) / 4));
+ } else {
+ pma_cnt->port_rcv_packets = cpu_to_be32(estat->rx_roce_good_pkts);
+ pma_cnt->port_rcv_data = cpu_to_be32((estat->rx_roce_good_bytes / 4));
+ pma_cnt->port_xmit_packets = cpu_to_be32(estat->tx_roce_pkts);
+ pma_cnt->port_xmit_data = cpu_to_be32((estat->tx_roce_bytes / 4));
+ }
+ pma_cnt->port_rcv_constraint_errors = (u8)(le64_to_cpu(hw_stats->rx_discard_pkts) & 0xFF);
+ pma_cnt->port_rcv_errors = cpu_to_be16((u16)(le64_to_cpu(hw_stats->rx_error_pkts)
+ & 0xFFFF));
+ pma_cnt->port_xmit_constraint_errors = (u8)(le64_to_cpu(hw_stats->tx_error_pkts) & 0xFF);
+ pma_cnt->port_xmit_discards = cpu_to_be16((u16)(le64_to_cpu(hw_stats->tx_discard_pkts)
+ & 0xFFFF));
+
+ return 0;
+}
+
+int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_roce_stats *err_s = NULL;
+ struct ctx_hw_stats *hw_stats = NULL;
+ int rc = 0;
+
+ hw_stats = rdev->qplib_ctx.stats.dma;
+ if (!port || !stats)
+ return -EINVAL;
+
+ if (hw_stats) {
+ stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
+ le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_TX_DISCARDS] =
+ le64_to_cpu(hw_stats->tx_discard_pkts);
+ stats->value[BNXT_RE_TX_ERRORS] =
+ le64_to_cpu(hw_stats->tx_error_pkts);
+ stats->value[BNXT_RE_RX_ERRORS] =
+ le64_to_cpu(hw_stats->rx_error_pkts);
+ stats->value[BNXT_RE_RX_DISCARDS] =
+ le64_to_cpu(hw_stats->rx_discard_pkts);
+ stats->value[BNXT_RE_RX_PKTS] =
+ le64_to_cpu(hw_stats->rx_ucast_pkts);
+ stats->value[BNXT_RE_RX_BYTES] =
+ le64_to_cpu(hw_stats->rx_ucast_bytes);
+ stats->value[BNXT_RE_TX_PKTS] =
+ le64_to_cpu(hw_stats->tx_ucast_pkts);
+ stats->value[BNXT_RE_TX_BYTES] =
+ le64_to_cpu(hw_stats->tx_ucast_bytes);
+ }
+ err_s = &rdev->stats.rstat.errs;
+ if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
+ rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, err_s);
+ if (rc) {
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ goto done;
+ }
+ bnxt_re_copy_err_stats(rdev, stats, err_s);
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
+ rc = bnxt_re_get_ext_stat(rdev, stats);
+ if (rc) {
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ goto done;
+ }
+ }
+ }
+
+done:
+ return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
+}
+
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ int num_counters = 0;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ num_counters = BNXT_RE_NUM_EXT_COUNTERS;
+ else
+ num_counters = BNXT_RE_NUM_STD_COUNTERS;
+
+ return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
new file mode 100644
index 000000000000..09d371d442aa
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -0,0 +1,165 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Statistics (header)
+ *
+ */
+
+#ifndef __BNXT_RE_HW_STATS_H__
+#define __BNXT_RE_HW_STATS_H__
+
+enum bnxt_re_hw_stats {
+ BNXT_RE_RX_PKTS,
+ BNXT_RE_RX_BYTES,
+ BNXT_RE_TX_PKTS,
+ BNXT_RE_TX_BYTES,
+ BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_TX_ERRORS,
+ BNXT_RE_TX_DISCARDS,
+ BNXT_RE_RX_ERRORS,
+ BNXT_RE_RX_DISCARDS,
+ BNXT_RE_TO_RETRANSMITS,
+ BNXT_RE_SEQ_ERR_NAKS_RCVD,
+ BNXT_RE_MAX_RETRY_EXCEEDED,
+ BNXT_RE_RNR_NAKS_RCVD,
+ BNXT_RE_MISSING_RESP,
+ BNXT_RE_UNRECOVERABLE_ERR,
+ BNXT_RE_BAD_RESP_ERR,
+ BNXT_RE_LOCAL_QP_OP_ERR,
+ BNXT_RE_LOCAL_PROTECTION_ERR,
+ BNXT_RE_MEM_MGMT_OP_ERR,
+ BNXT_RE_REMOTE_INVALID_REQ_ERR,
+ BNXT_RE_REMOTE_ACCESS_ERR,
+ BNXT_RE_REMOTE_OP_ERR,
+ BNXT_RE_DUP_REQ,
+ BNXT_RE_RES_EXCEED_MAX,
+ BNXT_RE_RES_LENGTH_MISMATCH,
+ BNXT_RE_RES_EXCEEDS_WQE,
+ BNXT_RE_RES_OPCODE_ERR,
+ BNXT_RE_RES_RX_INVALID_RKEY,
+ BNXT_RE_RES_RX_DOMAIN_ERR,
+ BNXT_RE_RES_RX_NO_PERM,
+ BNXT_RE_RES_RX_RANGE_ERR,
+ BNXT_RE_RES_TX_INVALID_RKEY,
+ BNXT_RE_RES_TX_DOMAIN_ERR,
+ BNXT_RE_RES_TX_NO_PERM,
+ BNXT_RE_RES_TX_RANGE_ERR,
+ BNXT_RE_RES_IRRQ_OFLOW,
+ BNXT_RE_RES_UNSUP_OPCODE,
+ BNXT_RE_RES_UNALIGNED_ATOMIC,
+ BNXT_RE_RES_REM_INV_ERR,
+ BNXT_RE_RES_MEM_ERROR,
+ BNXT_RE_RES_SRQ_ERR,
+ BNXT_RE_RES_CMP_ERR,
+ BNXT_RE_RES_INVALID_DUP_RKEY,
+ BNXT_RE_RES_WQE_FORMAT_ERR,
+ BNXT_RE_RES_CQ_LOAD_ERR,
+ BNXT_RE_RES_SRQ_LOAD_ERR,
+ BNXT_RE_RES_TX_PCI_ERR,
+ BNXT_RE_RES_RX_PCI_ERR,
+ BNXT_RE_OUT_OF_SEQ_ERR,
+ BNXT_RE_TX_ATOMIC_REQ,
+ BNXT_RE_TX_READ_REQ,
+ BNXT_RE_TX_READ_RES,
+ BNXT_RE_TX_WRITE_REQ,
+ BNXT_RE_TX_SEND_REQ,
+ BNXT_RE_TX_ROCE_PKTS,
+ BNXT_RE_TX_ROCE_BYTES,
+ BNXT_RE_RX_ATOMIC_REQ,
+ BNXT_RE_RX_READ_REQ,
+ BNXT_RE_RX_READ_RESP,
+ BNXT_RE_RX_WRITE_REQ,
+ BNXT_RE_RX_SEND_REQ,
+ BNXT_RE_RX_ROCE_PKTS,
+ BNXT_RE_RX_ROCE_BYTES,
+ BNXT_RE_RX_ROCE_GOOD_PKTS,
+ BNXT_RE_RX_ROCE_GOOD_BYTES,
+ BNXT_RE_OOB,
+ BNXT_RE_TX_CNP,
+ BNXT_RE_RX_CNP,
+ BNXT_RE_RX_ECN,
+ BNXT_RE_REQ_CQE_ERROR,
+ BNXT_RE_RESP_CQE_ERROR,
+ BNXT_RE_RESP_REMOTE_ACCESS_ERRS,
+ BNXT_RE_NUM_EXT_COUNTERS
+};
+
+#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
+
+struct bnxt_re_db_pacing_stats {
+ u64 resched;
+ u64 complete;
+ u64 alerts;
+};
+
+struct bnxt_re_res_cntrs {
+ atomic_t qp_count;
+ atomic_t rc_qp_count;
+ atomic_t ud_qp_count;
+ atomic_t cq_count;
+ atomic_t srq_count;
+ atomic_t mr_count;
+ atomic_t mw_count;
+ atomic_t ah_count;
+ atomic_t pd_count;
+ atomic_t resize_count;
+ u64 qp_watermark;
+ u64 rc_qp_watermark;
+ u64 ud_qp_watermark;
+ u64 cq_watermark;
+ u64 srq_watermark;
+ u64 mr_watermark;
+ u64 mw_watermark;
+ u64 ah_watermark;
+ u64 pd_watermark;
+};
+
+struct bnxt_re_rstat {
+ struct bnxt_qplib_roce_stats errs;
+ struct bnxt_qplib_ext_stat ext_stat;
+};
+
+struct bnxt_re_stats {
+ struct bnxt_re_rstat rstat;
+ struct bnxt_re_res_cntrs res;
+ struct bnxt_re_db_pacing_stats pacing;
+};
+
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num);
+int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port, int index);
+#endif /* __BNXT_RE_HW_STATS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
new file mode 100644
index 000000000000..4dab5ca7362b
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -0,0 +1,4941 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: IB Verbs interpreter
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <net/addrconf.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_pma.h>
+#include <rdma/uverbs_ioctl.h>
+#include <linux/hashtable.h>
+
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+
+#include "bnxt_re.h"
+#include "ib_verbs.h"
+#include "debugfs.h"
+
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_std_types.h>
+
+#include <rdma/ib_user_ioctl_cmds.h>
+
+#define UVERBS_MODULE_NAME bnxt_re
+#include <rdma/uverbs_named_ioctl.h>
+
+#include <rdma/bnxt_re-abi.h>
+
+static int __from_ib_access_flags(int iflags)
+{
+ int qflags = 0;
+
+ if (iflags & IB_ACCESS_LOCAL_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_READ)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
+ if (iflags & IB_ACCESS_REMOTE_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
+ if (iflags & IB_ACCESS_MW_BIND)
+ qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
+ if (iflags & IB_ZERO_BASED)
+ qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
+ if (iflags & IB_ACCESS_ON_DEMAND)
+ qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
+ return qflags;
+};
+
+static int __to_ib_access_flags(int qflags)
+{
+ int iflags = 0;
+
+ if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
+ iflags |= IB_ACCESS_LOCAL_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
+ iflags |= IB_ACCESS_REMOTE_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
+ iflags |= IB_ACCESS_REMOTE_READ;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
+ iflags |= IB_ACCESS_REMOTE_ATOMIC;
+ if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
+ iflags |= IB_ACCESS_MW_BIND;
+ if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
+ iflags |= IB_ZERO_BASED;
+ if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
+ iflags |= IB_ACCESS_ON_DEMAND;
+ return iflags;
+}
+
+static u8 __qp_access_flags_from_ib(struct bnxt_qplib_chip_ctx *cctx, int iflags)
+{
+ u8 qflags = 0;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
+ /* For Wh+ */
+ return (u8)__from_ib_access_flags(iflags);
+
+ /* For P5, P7 and later chips */
+ if (iflags & IB_ACCESS_LOCAL_WRITE)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_WRITE)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_READ)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
+ if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC;
+
+ return qflags;
+}
+
+static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
+{
+ int iflags = 0;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
+ /* For Wh+ */
+ return __to_ib_access_flags(qflags);
+
+ /* For P5, P7 and later chips */
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE)
+ iflags |= IB_ACCESS_LOCAL_WRITE;
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE)
+ iflags |= IB_ACCESS_REMOTE_WRITE;
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_READ)
+ iflags |= IB_ACCESS_REMOTE_READ;
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC)
+ iflags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return iflags;
+}
+
+static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_mrw *qplib_mr)
+{
+ if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
+ pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
+ qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
+}
+
+static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
+ struct bnxt_qplib_sge *sg_list, int num)
+{
+ int i, total = 0;
+
+ for (i = 0; i < num; i++) {
+ sg_list[i].addr = ib_sg_list[i].addr;
+ sg_list[i].lkey = ib_sg_list[i].lkey;
+ sg_list[i].size = ib_sg_list[i].length;
+ total += sg_list[i].size;
+ }
+ return total;
+}
+
+/* Device */
+int bnxt_re_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *ib_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+
+ memset(ib_attr, 0, sizeof(*ib_attr));
+ memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
+ min(sizeof(dev_attr->fw_ver),
+ sizeof(ib_attr->fw_ver)));
+ addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
+ rdev->netdev->dev_addr);
+ ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
+
+ ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
+ ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
+ ib_attr->hw_ver = rdev->en_dev->pdev->revision;
+ ib_attr->max_qp = dev_attr->max_qp;
+ ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
+ ib_attr->device_cap_flags =
+ IB_DEVICE_CURR_QP_STATE_MOD
+ | IB_DEVICE_RC_RNR_NAK_GEN
+ | IB_DEVICE_SHUTDOWN_PORT
+ | IB_DEVICE_SYS_IMAGE_GUID
+ | IB_DEVICE_RESIZE_MAX_WR
+ | IB_DEVICE_PORT_ACTIVE_EVENT
+ | IB_DEVICE_N_NOTIFY_CQ
+ | IB_DEVICE_MEM_WINDOW
+ | IB_DEVICE_MEM_WINDOW_TYPE_2B
+ | IB_DEVICE_MEM_MGT_EXTENSIONS;
+ ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ ib_attr->max_send_sge = dev_attr->max_qp_sges;
+ ib_attr->max_recv_sge = dev_attr->max_qp_sges;
+ ib_attr->max_sge_rd = dev_attr->max_qp_sges;
+ ib_attr->max_cq = dev_attr->max_cq;
+ ib_attr->max_cqe = dev_attr->max_cq_wqes;
+ ib_attr->max_mr = dev_attr->max_mr;
+ ib_attr->max_pd = dev_attr->max_pd;
+ ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
+ ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
+ ib_attr->atomic_cap = IB_ATOMIC_NONE;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
+ if (dev_attr->is_atomic) {
+ ib_attr->atomic_cap = IB_ATOMIC_GLOB;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
+ }
+
+ ib_attr->max_ee_rd_atom = 0;
+ ib_attr->max_res_rd_atom = 0;
+ ib_attr->max_ee_init_rd_atom = 0;
+ ib_attr->max_ee = 0;
+ ib_attr->max_rdd = 0;
+ ib_attr->max_mw = dev_attr->max_mw;
+ ib_attr->max_raw_ipv6_qp = 0;
+ ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
+ ib_attr->max_mcast_grp = 0;
+ ib_attr->max_mcast_qp_attach = 0;
+ ib_attr->max_total_mcast_qp_attach = 0;
+ ib_attr->max_ah = dev_attr->max_ah;
+
+ ib_attr->max_srq = dev_attr->max_srq;
+ ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
+ ib_attr->max_srq_sge = dev_attr->max_srq_sges;
+
+ ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
+
+ ib_attr->max_pkeys = 1;
+ ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
+ return 0;
+}
+
+int bnxt_re_modify_device(struct ib_device *ibdev,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ ibdev_dbg(ibdev, "Modify device with mask 0x%x", device_modify_mask);
+
+ if (device_modify_mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (!(device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC))
+ return 0;
+
+ memcpy(ibdev->node_desc, device_modify->node_desc, IB_DEVICE_NODE_DESC_MAX);
+ return 0;
+}
+
+/* Port */
+int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attr *port_attr)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ int rc;
+
+ memset(port_attr, 0, sizeof(*port_attr));
+
+ if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
+ port_attr->state = IB_PORT_ACTIVE;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ port_attr->state = IB_PORT_DOWN;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+ port_attr->max_mtu = IB_MTU_4096;
+ port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
+ /* One GID is reserved for RawEth QP. Report one less */
+ port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) :
+ dev_attr->max_sgid);
+ port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP;
+ port_attr->ip_gids = true;
+
+ port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
+ port_attr->bad_pkey_cntr = 0;
+ port_attr->qkey_viol_cntr = 0;
+ port_attr->pkey_tbl_len = dev_attr->max_pkey;
+ port_attr->lid = 0;
+ port_attr->sm_lid = 0;
+ port_attr->lmc = 0;
+ port_attr->max_vl_num = 4;
+ port_attr->sm_sl = 0;
+ port_attr->subnet_timeout = 0;
+ port_attr->init_type_reply = 0;
+ rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
+ &port_attr->active_width);
+
+ return rc;
+}
+
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr port_attr;
+
+ if (bnxt_re_query_port(ibdev, port_num, &port_attr))
+ return -EINVAL;
+
+ immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
+ immutable->gid_tbl_len = port_attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ return 0;
+}
+
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+ rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
+ rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
+}
+
+int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
+ u16 index, u16 *pkey)
+{
+ if (index > 0)
+ return -EINVAL;
+
+ *pkey = IB_DEFAULT_PKEY_FULL;
+
+ return 0;
+}
+
+int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
+ int index, union ib_gid *gid)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ int rc;
+
+ /* Ignore port_num */
+ memset(gid, 0, sizeof(*gid));
+ rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
+ &rdev->qplib_res.sgid_tbl, index,
+ (struct bnxt_qplib_gid *)gid);
+ return rc;
+}
+
+int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ int rc = 0;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid *gid_to_del;
+ u16 vlan_id = 0xFFFF;
+
+ /* Delete the entry from the hardware */
+ ctx = *context;
+ if (!ctx)
+ return -EINVAL;
+
+ if (sgid_tbl->active) {
+ if (ctx->idx >= sgid_tbl->max)
+ return -EINVAL;
+ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
+ /* DEL_GID is called in WQ context(netdevice_event_work_handler)
+ * or via the ib_unregister_device path. In the former case QP1
+ * may not be destroyed yet, in which case just return as FW
+ * needs that entry to be present and will fail it's deletion.
+ * We could get invoked again after QP1 is destroyed OR get an
+ * ADD_GID call with a different GID value for the same index
+ * where we issue MODIFY_GID cmd to update the GID entry -- TBD
+ */
+ if (ctx->idx == 0 &&
+ rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
+ ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
+ ibdev_dbg(&rdev->ibdev,
+ "Trying to delete GID0 while QP1 is alive\n");
+ return -EFAULT;
+ }
+ ctx->refcnt--;
+ if (!ctx->refcnt) {
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+ vlan_id, true);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to remove GID: %#x", rc);
+ } else {
+ ctx_tbl = sgid_tbl->ctx;
+ ctx_tbl[ctx->idx] = NULL;
+ kfree(ctx);
+ }
+ }
+ } else {
+ return -EINVAL;
+ }
+ return rc;
+}
+
+int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ int rc;
+ u32 tbl_idx = 0;
+ u16 vlan_id = 0xFFFF;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+
+ rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
+ rdev->qplib_res.netdev->dev_addr,
+ vlan_id, true, &tbl_idx, false, 0);
+ if (rc == -EALREADY) {
+ ctx_tbl = sgid_tbl->ctx;
+ ctx_tbl[tbl_idx]->refcnt++;
+ *context = ctx_tbl[tbl_idx];
+ return 0;
+ }
+
+ if (rc < 0) {
+ ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
+ return rc;
+ }
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx_tbl = sgid_tbl->ctx;
+ ctx->idx = tbl_idx;
+ ctx->refcnt = 1;
+ ctx_tbl[tbl_idx] = ctx;
+ *context = ctx;
+
+ return rc;
+}
+
+enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
+
+static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct ib_mr *ib_mr = &fence->mr->ib_mr;
+ struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ memset(wqe, 0, sizeof(*wqe));
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
+ wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ wqe->bind.zero_based = false;
+ wqe->bind.parent_l_key = ib_mr->lkey;
+ wqe->bind.va = (u64)(unsigned long)fence->va;
+ wqe->bind.length = fence->size;
+ wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
+ wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
+
+ /* Save the initial rkey in fence structure for now;
+ * wqe->bind.r_key will be set at (re)bind time.
+ */
+ fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
+}
+
+static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
+{
+ struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ struct ib_pd *ib_pd = qp->ib_qp.pd;
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
+ struct bnxt_qplib_swqe wqe;
+ int rc;
+
+ memcpy(&wqe, fence_wqe, sizeof(wqe));
+ wqe.bind.r_key = fence->bind_rkey;
+ fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
+
+ ibdev_dbg(&qp->rdev->ibdev,
+ "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+ wqe.bind.r_key, qp->qplib_qp.id, pd);
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ if (rc) {
+ ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
+ return rc;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+
+ return rc;
+}
+
+static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct device *dev = &rdev->en_dev->pdev->dev;
+ struct bnxt_re_mr *mr = fence->mr;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ if (fence->mw) {
+ bnxt_re_dealloc_mw(fence->mw);
+ fence->mw = NULL;
+ }
+ if (mr) {
+ if (mr->ib_mr.rkey)
+ bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
+ true);
+ if (mr->ib_mr.lkey)
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ kfree(mr);
+ fence->mr = NULL;
+ }
+ if (fence->dma_addr) {
+ dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ fence->dma_addr = 0;
+ }
+}
+
+static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+{
+ int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct device *dev = &rdev->en_dev->pdev->dev;
+ struct bnxt_re_mr *mr = NULL;
+ dma_addr_t dma_addr = 0;
+ struct ib_mw *mw;
+ int rc;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ rc = dma_mapping_error(dev, dma_addr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
+ rc = -EIO;
+ fence->dma_addr = 0;
+ goto fail;
+ }
+ fence->dma_addr = dma_addr;
+
+ /* Allocate a MR */
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ fence->mr = mr;
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
+ goto fail;
+ }
+
+ /* Register MR */
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
+ }
+ mr->qplib_mr.va = (u64)(unsigned long)fence->va;
+ mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
+ BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
+ goto fail;
+ }
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
+ /* Create a fence MW only for kernel consumers */
+ mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
+ if (IS_ERR(mw)) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to create fence-MW for PD: %p\n", pd);
+ rc = PTR_ERR(mw);
+ goto fail;
+ }
+ fence->mw = mw;
+
+ bnxt_re_create_fence_wqe(pd);
+ return 0;
+
+fail:
+ bnxt_re_destroy_fence_mr(pd);
+ return rc;
+}
+
+static struct bnxt_re_user_mmap_entry*
+bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
+ enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
+{
+ struct bnxt_re_user_mmap_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->mem_offset = mem_offset;
+ entry->mmap_flag = mmap_flag;
+ entry->uctx = uctx;
+
+ switch (mmap_flag) {
+ case BNXT_RE_MMAP_SH_PAGE:
+ ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
+ &entry->rdma_entry, PAGE_SIZE, 0);
+ break;
+ case BNXT_RE_MMAP_UC_DB:
+ case BNXT_RE_MMAP_WC_DB:
+ case BNXT_RE_MMAP_DBR_BAR:
+ case BNXT_RE_MMAP_DBR_PAGE:
+ case BNXT_RE_MMAP_TOGGLE_PAGE:
+ ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
+ &entry->rdma_entry, PAGE_SIZE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+ if (offset)
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return entry;
+}
+
+/* Protection Domains */
+int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (udata) {
+ rdma_user_mmap_entry_remove(pd->pd_db_mmap);
+ pd->pd_db_mmap = NULL;
+ }
+
+ bnxt_re_destroy_fence_mr(pd);
+
+ if (pd->qplib_pd.id) {
+ if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+ &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd))
+ atomic_dec(&rdev->stats.res.pd_count);
+ }
+ return 0;
+}
+
+int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibpd->device;
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
+ struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_user_mmap_entry *entry = NULL;
+ u32 active_pds;
+ int rc = 0;
+
+ pd->rdev = rdev;
+ if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_pd_resp resp = {};
+
+ if (!ucntx->dpi.dbr) {
+ /* Allocate DPI in alloc_pd to avoid failing of
+ * ibv_devinfo and family of application when DPIs
+ * are depleted.
+ */
+ if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
+ &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
+ rc = -ENOMEM;
+ goto dbfail;
+ }
+ }
+
+ resp.pdid = pd->qplib_pd.id;
+ /* Still allow mapping this DBR to the new user PD. */
+ resp.dpi = ucntx->dpi.dpi;
+
+ entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
+ BNXT_RE_MMAP_UC_DB, &resp.dbr);
+
+ if (!entry) {
+ rc = -ENOMEM;
+ goto dbfail;
+ }
+
+ pd->pd_db_mmap = &entry->rdma_entry;
+
+ rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (rc) {
+ rdma_user_mmap_entry_remove(pd->pd_db_mmap);
+ rc = -EFAULT;
+ goto dbfail;
+ }
+ }
+
+ if (!udata)
+ if (bnxt_re_create_fence_mr(pd))
+ ibdev_warn(&rdev->ibdev,
+ "Failed to create Fence-MR\n");
+ active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
+ if (active_pds > rdev->stats.res.pd_watermark)
+ rdev->stats.res.pd_watermark = active_pds;
+
+ return 0;
+dbfail:
+ bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
+fail:
+ return rc;
+}
+
+/* Address Handles */
+int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
+{
+ struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
+ struct bnxt_re_dev *rdev = ah->rdev;
+ bool block = true;
+ int rc;
+
+ block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
+ rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
+ if (BNXT_RE_CHECK_RC(rc)) {
+ if (rc == -ETIMEDOUT)
+ rc = 0;
+ else
+ goto fail;
+ }
+ atomic_dec(&rdev->stats.res.ah_count);
+fail:
+ return rc;
+}
+
+static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
+{
+ u8 nw_type;
+
+ switch (ntype) {
+ case RDMA_NETWORK_IPV4:
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
+ break;
+ default:
+ nw_type = CMDQ_CREATE_AH_TYPE_V1;
+ break;
+ }
+ return nw_type;
+}
+
+int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_pd *ib_pd = ib_ah->pd;
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ const struct ib_gid_attr *sgid_attr;
+ struct bnxt_re_gid_ctx *ctx;
+ struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
+ u32 active_ahs;
+ u8 nw_type;
+ int rc;
+
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
+ ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
+ return -EINVAL;
+ }
+
+ ah->rdev = rdev;
+ ah->qplib_ah.pd = &pd->qplib_pd;
+
+ /* Supply the configuration for the HW */
+ memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
+ sizeof(union ib_gid));
+ sgid_attr = grh->sgid_attr;
+ /* Get the HW context of the GID. The reference
+ * of GID table entry is already taken by the caller.
+ */
+ ctx = rdma_read_gid_hw_context(sgid_attr);
+ ah->qplib_ah.sgid_index = ctx->idx;
+ ah->qplib_ah.host_sgid_index = grh->sgid_index;
+ ah->qplib_ah.traffic_class = grh->traffic_class;
+ ah->qplib_ah.flow_label = grh->flow_label;
+ ah->qplib_ah.hop_limit = grh->hop_limit;
+ ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
+
+ /* Get network header type for this GID */
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
+ ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
+
+ memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
+ rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
+ !(init_attr->flags &
+ RDMA_CREATE_AH_SLEEPABLE));
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
+ return rc;
+ }
+
+ /* Write AVID to shared page. */
+ if (udata) {
+ struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
+ unsigned long flag;
+ u32 *wrptr;
+
+ spin_lock_irqsave(&uctx->sh_lock, flag);
+ wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
+ *wrptr = ah->qplib_ah.id;
+ wmb(); /* make sure cache is updated. */
+ spin_unlock_irqrestore(&uctx->sh_lock, flag);
+ }
+ active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
+ if (active_ahs > rdev->stats.res.ah_watermark)
+ rdev->stats.res.ah_watermark = active_ahs;
+
+ return 0;
+}
+
+int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
+{
+ struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
+
+ ah_attr->type = ib_ah->type;
+ rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
+ memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
+ rdma_ah_set_grh(ah_attr, NULL, 0,
+ ah->qplib_ah.host_sgid_index,
+ 0, ah->qplib_ah.traffic_class);
+ rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
+ rdma_ah_set_port_num(ah_attr, 1);
+ rdma_ah_set_static_rate(ah_attr, 0);
+ return 0;
+}
+
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
+ __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->scq->cq_lock, flags);
+ if (qp->rcq != qp->scq)
+ spin_lock(&qp->rcq->cq_lock);
+ else
+ __acquire(&qp->rcq->cq_lock);
+
+ return flags;
+}
+
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+ unsigned long flags)
+ __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
+{
+ if (qp->rcq != qp->scq)
+ spin_unlock(&qp->rcq->cq_lock);
+ else
+ __release(&qp->rcq->cq_lock);
+ spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
+}
+
+static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+{
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_dev *rdev;
+ int rc;
+
+ rdev = qp->rdev;
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
+ bnxt_qplib_destroy_ah(&rdev->qplib_res,
+ &gsi_sah->qplib_ah,
+ true);
+ atomic_dec(&rdev->stats.res.ah_count);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
+ goto fail;
+ }
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->stats.res.qp_count);
+
+ kfree(rdev->gsi_ctx.sqp_tbl);
+ kfree(gsi_sah);
+ kfree(gsi_sqp);
+ rdev->gsi_ctx.gsi_sqp = NULL;
+ rdev->gsi_ctx.gsi_sah = NULL;
+ rdev->gsi_ctx.sqp_tbl = NULL;
+
+ return 0;
+fail:
+ return rc;
+}
+
+static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ 0xFFFF, true);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc);
+}
+
+/* Queue Pairs */
+int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_qplib_nq *scq_nq = NULL;
+ struct bnxt_qplib_nq *rcq_nq = NULL;
+ unsigned int flags;
+ int rc;
+
+ bnxt_re_debug_rem_qpinfo(rdev, qp);
+
+ bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
+
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
+
+ if (rdma_is_kernel_res(&qp->ib_qp.res)) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
+
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
+ bnxt_re_destroy_gsi_sqp(qp);
+
+ mutex_lock(&rdev->qp_lock);
+ list_del(&qp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->stats.res.qp_count);
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
+ atomic_dec(&rdev->stats.res.rc_qp_count);
+ else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
+ atomic_dec(&rdev->stats.res.ud_qp_count);
+
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ bnxt_re_del_unique_gid(rdev);
+
+ ib_umem_release(qp->rumem);
+ ib_umem_release(qp->sumem);
+
+ /* Flush all the entries of notification queue associated with
+ * given qp.
+ */
+ scq_nq = qplib_qp->scq->nq;
+ rcq_nq = qplib_qp->rcq->nq;
+ bnxt_re_synchronize_nq(scq_nq);
+ if (scq_nq != rcq_nq)
+ bnxt_re_synchronize_nq(rcq_nq);
+
+ return 0;
+}
+
+static u8 __from_ib_qp_type(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_GSI:
+ return CMDQ_CREATE_QP1_TYPE_GSI;
+ case IB_QPT_RC:
+ return CMDQ_CREATE_QP_TYPE_RC;
+ case IB_QPT_UD:
+ return CMDQ_CREATE_QP_TYPE_UD;
+ case IB_QPT_RAW_PACKET:
+ return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
+ default:
+ return IB_QPT_MAX;
+ }
+}
+
+static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
+ int rsge, int max)
+{
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ rsge = max;
+ return bnxt_re_get_rwqe_size(rsge);
+}
+
+static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
+{
+ u16 wqe_size, calc_ils;
+
+ wqe_size = bnxt_re_get_swqe_size(nsge);
+ if (ilsize) {
+ calc_ils = sizeof(struct sq_send_hdr) + ilsize;
+ wqe_size = max_t(u16, calc_ils, wqe_size);
+ wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
+ }
+ return wqe_size;
+}
+
+static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *sq;
+ int align, ilsize;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ sq = &qplqp->sq;
+ dev_attr = rdev->dev_attr;
+
+ align = sizeof(struct sq_send_hdr);
+ ilsize = ALIGN(init_attr->cap.max_inline_data, align);
+
+ /* For gen p4 and gen p5 fixed wqe compatibility mode
+ * wqe size is fixed to 128 bytes - ie 6 SGEs
+ */
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
+ sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
+ sq->max_sge = BNXT_STATIC_MAX_SGE;
+ } else {
+ sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
+ if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
+ return -EINVAL;
+ }
+
+ if (init_attr->cap.max_inline_data) {
+ qplqp->max_inline_data = sq->wqe_size -
+ sizeof(struct sq_send_hdr);
+ init_attr->cap.max_inline_data = qplqp->max_inline_data;
+ }
+
+ return 0;
+}
+
+static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
+ struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
+ struct bnxt_re_qp_req *ureq)
+{
+ struct bnxt_qplib_qp *qplib_qp;
+ int bytes = 0, psn_sz;
+ struct ib_umem *umem;
+ int psn_nume;
+
+ qplib_qp = &qp->qplib_qp;
+
+ bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
+ /* Consider mapping PSN search memory only for RC QPs. */
+ if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+ if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
+ psn_nume = ureq->sq_slots;
+ } else {
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
+ sizeof(struct bnxt_qplib_sge));
+ }
+ if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
+ psn_nume = roundup_pow_of_two(psn_nume);
+ bytes += (psn_nume * psn_sz);
+ }
+
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem))
+ return PTR_ERR(umem);
+
+ qp->sumem = umem;
+ qplib_qp->sq.sg_info.umem = umem;
+ qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
+ qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
+ qplib_qp->qp_handle = ureq->qp_handle;
+
+ if (!qp->qplib_qp.srq) {
+ bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem))
+ goto rqfail;
+ qp->rumem = umem;
+ qplib_qp->rq.sg_info.umem = umem;
+ qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
+ qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
+ }
+
+ qplib_qp->dpi = &cntx->dpi;
+ return 0;
+rqfail:
+ ib_umem_release(qp->sumem);
+ qp->sumem = NULL;
+ memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
+
+ return PTR_ERR(umem);
+}
+
+static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
+ (struct bnxt_re_pd *pd,
+ struct bnxt_qplib_res *qp1_res,
+ struct bnxt_qplib_qp *qp1_qp)
+{
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_ah *ah;
+ union ib_gid sgid;
+ int rc;
+
+ ah = kzalloc(sizeof(*ah), GFP_KERNEL);
+ if (!ah)
+ return NULL;
+
+ ah->rdev = rdev;
+ ah->qplib_ah.pd = &pd->qplib_pd;
+
+ rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
+ if (rc)
+ goto fail;
+
+ /* supply the dgid data same as sgid */
+ memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
+ sizeof(union ib_gid));
+ ah->qplib_ah.sgid_index = 0;
+
+ ah->qplib_ah.traffic_class = 0;
+ ah->qplib_ah.flow_label = 0;
+ ah->qplib_ah.hop_limit = 1;
+ ah->qplib_ah.sl = 0;
+ /* Have DMAC same as SMAC */
+ ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
+
+ rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate HW AH for Shadow QP");
+ goto fail;
+ }
+ atomic_inc(&rdev->stats.res.ah_count);
+
+ return ah;
+
+fail:
+ kfree(ah);
+ return NULL;
+}
+
+static struct bnxt_re_qp *bnxt_re_create_shadow_qp
+ (struct bnxt_re_pd *pd,
+ struct bnxt_qplib_res *qp1_res,
+ struct bnxt_qplib_qp *qp1_qp)
+{
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_qp *qp;
+ int rc;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ qp->rdev = rdev;
+
+ /* Initialize the shadow QP structure from the QP1 values */
+ ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
+
+ qp->qplib_qp.pd = &pd->qplib_pd;
+ qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
+ qp->qplib_qp.type = IB_QPT_UD;
+
+ qp->qplib_qp.max_inline_data = 0;
+ qp->qplib_qp.sig_type = true;
+
+ /* Shadow QP SQ depth should be same as QP1 RQ depth */
+ qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
+ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sge = 2;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.sq.q_full_delta = 1;
+ qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
+ qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
+
+ qp->qplib_qp.scq = qp1_qp->scq;
+ qp->qplib_qp.rcq = qp1_qp->rcq;
+
+ qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
+ qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.rq.q_full_delta = 1;
+ qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
+ qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
+
+ qp->qplib_qp.mtu = qp1_qp->mtu;
+
+ qp->qplib_qp.sq_hdr_buf_size = 0;
+ qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
+ qp->qplib_qp.dpi = &rdev->dpi_privileged;
+
+ rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
+ if (rc)
+ goto fail;
+
+ spin_lock_init(&qp->sq_lock);
+ INIT_LIST_HEAD(&qp->list);
+ mutex_lock(&rdev->qp_lock);
+ list_add_tail(&qp->list, &rdev->qp_list);
+ atomic_inc(&rdev->stats.res.qp_count);
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+fail:
+ kfree(qp);
+ return NULL;
+}
+
+static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *rq;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ rq = &qplqp->rq;
+ dev_attr = rdev->dev_attr;
+
+ if (init_attr->srq) {
+ struct bnxt_re_srq *srq;
+
+ srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
+ qplqp->srq = &srq->qplib_srq;
+ rq->max_wqe = 0;
+ } else {
+ rq->max_sge = init_attr->cap.max_recv_sge;
+ if (rq->max_sge > dev_attr->max_qp_sges)
+ rq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_recv_sge = rq->max_sge;
+ rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
+ dev_attr->max_qp_sges);
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty.
+ */
+ entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->max_sw_wqe = rq->max_wqe;
+ rq->q_full_delta = 0;
+ rq->sg_info.pgsize = PAGE_SIZE;
+ rq->sg_info.pgshft = PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = rdev->dev_attr;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ qplqp->rq.max_sge = 6;
+ }
+}
+
+static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *sq;
+ int diff = 0;
+ int entries;
+ int rc;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ sq = &qplqp->sq;
+ dev_attr = rdev->dev_attr;
+
+ sq->max_sge = init_attr->cap.max_send_sge;
+ entries = init_attr->cap.max_send_wr;
+ if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
+ sq->max_wqe = ureq->sq_slots;
+ sq->max_sw_wqe = ureq->sq_slots;
+ sq->wqe_size = sizeof(struct sq_sge);
+ } else {
+ if (sq->max_sge > dev_attr->max_qp_sges) {
+ sq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_send_sge = sq->max_sge;
+ }
+
+ rc = bnxt_re_setup_swqe_size(qp, init_attr);
+ if (rc)
+ return rc;
+
+ /* Allocate 128 + 1 more than what's provided */
+ diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
+ 0 : BNXT_QPLIB_RESERVED_QP_WRS;
+ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
+ else
+ sq->max_sw_wqe = sq->max_wqe;
+
+ }
+ sq->q_full_delta = diff + 1;
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qplqp->sq.q_full_delta -= 1;
+ qplqp->sq.sg_info.pgsize = PAGE_SIZE;
+ qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
+
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = rdev->dev_attr;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
+ qplqp->sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
+ init_attr->cap.max_send_wr;
+ qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ }
+}
+
+static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ int qptype;
+
+ chip_ctx = rdev->chip_ctx;
+
+ qptype = __from_ib_qp_type(init_attr->qp_type);
+ if (qptype == IB_QPT_MAX) {
+ ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
+ qptype = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
+ init_attr->qp_type == IB_QPT_GSI)
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
+out:
+ return qptype;
+}
+
+static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc = 0, qptype;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = rdev->dev_attr;
+
+ /* Setup misc params */
+ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+ qplqp->pd = &pd->qplib_pd;
+ qplqp->qp_handle = (u64)qplqp;
+ qplqp->max_inline_data = init_attr->cap.max_inline_data;
+ qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
+ qptype = bnxt_re_init_qp_type(rdev, init_attr);
+ if (qptype < 0) {
+ rc = qptype;
+ goto out;
+ }
+ qplqp->type = (u8)qptype;
+ qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
+ qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
+ }
+ qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
+ if (init_attr->create_flags) {
+ ibdev_dbg(&rdev->ibdev,
+ "QP create flags 0x%x not supported",
+ init_attr->create_flags);
+ return -EOPNOTSUPP;
+ }
+
+ /* Setup CQs */
+ if (init_attr->send_cq) {
+ cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
+ qplqp->scq = &cq->qplib_cq;
+ qp->scq = cq;
+ }
+
+ if (init_attr->recv_cq) {
+ cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
+ qplqp->rcq = &cq->qplib_cq;
+ qp->rcq = cq;
+ }
+
+ /* Setup RQ/SRQ */
+ rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_rq_attr(qp);
+
+ /* Setup SQ */
+ rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
+
+ if (uctx) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
+out:
+ return rc;
+}
+
+static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
+ struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_sqp_entries *sqp_tbl;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_qp *sqp;
+ struct bnxt_re_ah *sah;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ /* Create a shadow QP to handle the QP1 traffic */
+ sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
+ GFP_KERNEL);
+ if (!sqp_tbl)
+ return -ENOMEM;
+ rdev->gsi_ctx.sqp_tbl = sqp_tbl;
+
+ sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
+ if (!sqp) {
+ rc = -ENODEV;
+ ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sqp = sqp;
+
+ sqp->rcq = qp->rcq;
+ sqp->scq = qp->scq;
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
+ &qp->qplib_qp);
+ if (!sah) {
+ bnxt_qplib_destroy_qp(&rdev->qplib_res,
+ &sqp->qplib_qp);
+ rc = -ENODEV;
+ ibdev_err(&rdev->ibdev,
+ "Failed to create AH entry for ShadowQP");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sah = sah;
+
+ return 0;
+out:
+ kfree(sqp_tbl);
+ return rc;
+}
+
+static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_qp *qplqp;
+ int rc;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+
+ qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
+
+ rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
+ goto out;
+ }
+
+ rc = bnxt_re_create_shadow_gsi(qp, pd);
+out:
+ return rc;
+}
+
+static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_qplib_dev_attr *dev_attr)
+{
+ bool rc = true;
+
+ if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
+ ibdev_err(&rdev->ibdev,
+ "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
+ init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_inline_data,
+ dev_attr->max_inline_data);
+ rc = false;
+ }
+ return rc;
+}
+
+static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL);
+ addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev);
+
+ rc = bnxt_qplib_add_sgid(&res->sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ rdev->qplib_res.netdev->dev_addr,
+ 0xFFFF, true, &rdev->ugid_index, true,
+ hctx->stats3.fw_id);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc);
+
+ return rc;
+}
+
+int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_qp_req ureq;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_pd *pd;
+ struct bnxt_re_qp *qp;
+ struct ib_pd *ib_pd;
+ u32 active_qps;
+ int rc;
+
+ ib_pd = ib_qp->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = rdev->dev_attr;
+ qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ if (udata)
+ if (ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq))))
+ return -EFAULT;
+
+ rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
+ if (!rc) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ qp->rdev = rdev;
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
+ if (rc)
+ goto fail;
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
+ rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
+ if (rc == -ENODEV)
+ goto qp_destroy;
+ if (rc)
+ goto fail;
+ } else {
+ rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to create HW QP");
+ goto free_umem;
+ }
+ if (udata) {
+ struct bnxt_re_qp_resp resp;
+
+ resp.qpid = qp->qplib_qp.id;
+ resp.rsvd = 0;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
+ goto qp_destroy;
+ }
+ }
+ }
+
+ /* Support for RawEth QP is added to capture TCP pkt dump.
+ * So unique SGID is used to avoid incorrect statistics on per
+ * function stats_ctx
+ */
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) {
+ rc = bnxt_re_add_unique_gid(rdev);
+ if (rc)
+ goto qp_destroy;
+ qp->qplib_qp.ugid_index = rdev->ugid_index;
+ }
+
+ qp->ib_qp.qp_num = qp->qplib_qp.id;
+ if (qp_init_attr->qp_type == IB_QPT_GSI)
+ rdev->gsi_ctx.gsi_qp = qp;
+ spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
+ INIT_LIST_HEAD(&qp->list);
+ mutex_lock(&rdev->qp_lock);
+ list_add_tail(&qp->list, &rdev->qp_list);
+ mutex_unlock(&rdev->qp_lock);
+ active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
+ if (active_qps > rdev->stats.res.qp_watermark)
+ rdev->stats.res.qp_watermark = active_qps;
+ if (qp_init_attr->qp_type == IB_QPT_RC) {
+ active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
+ if (active_qps > rdev->stats.res.rc_qp_watermark)
+ rdev->stats.res.rc_qp_watermark = active_qps;
+ } else if (qp_init_attr->qp_type == IB_QPT_UD) {
+ active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
+ if (active_qps > rdev->stats.res.ud_qp_watermark)
+ rdev->stats.res.ud_qp_watermark = active_qps;
+ }
+ bnxt_re_debug_add_qpinfo(rdev, qp);
+
+ return 0;
+qp_destroy:
+ bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
+free_umem:
+ ib_umem_release(qp->rumem);
+ ib_umem_release(qp->sumem);
+fail:
+ return rc;
+}
+
+static u8 __from_ib_qp_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ case IB_QPS_INIT:
+ return CMDQ_MODIFY_QP_NEW_STATE_INIT;
+ case IB_QPS_RTR:
+ return CMDQ_MODIFY_QP_NEW_STATE_RTR;
+ case IB_QPS_RTS:
+ return CMDQ_MODIFY_QP_NEW_STATE_RTS;
+ case IB_QPS_SQD:
+ return CMDQ_MODIFY_QP_NEW_STATE_SQD;
+ case IB_QPS_SQE:
+ return CMDQ_MODIFY_QP_NEW_STATE_SQE;
+ case IB_QPS_ERR:
+ default:
+ return CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ }
+}
+
+static enum ib_qp_state __to_ib_qp_state(u8 state)
+{
+ switch (state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RESET:
+ return IB_QPS_RESET;
+ case CMDQ_MODIFY_QP_NEW_STATE_INIT:
+ return IB_QPS_INIT;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ return IB_QPS_RTR;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ return IB_QPS_RTS;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQD:
+ return IB_QPS_SQD;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQE:
+ return IB_QPS_SQE;
+ case CMDQ_MODIFY_QP_NEW_STATE_ERR:
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
+static u32 __from_ib_mtu(enum ib_mtu mtu)
+{
+ switch (mtu) {
+ case IB_MTU_256:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
+ case IB_MTU_512:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
+ case IB_MTU_1024:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
+ case IB_MTU_2048:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
+ case IB_MTU_4096:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
+ default:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
+ }
+}
+
+static enum ib_mtu __to_ib_mtu(u32 mtu)
+{
+ switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
+ return IB_MTU_256;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
+ return IB_MTU_512;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
+ return IB_MTU_1024;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
+ return IB_MTU_2048;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
+ return IB_MTU_4096;
+ default:
+ return IB_MTU_2048;
+ }
+}
+
+/* Shared Receive Queues */
+int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ free_page((unsigned long)srq->uctx_srq_page);
+ hash_del(&srq->hash_entry);
+ }
+ bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
+ ib_umem_release(srq->umem);
+ atomic_dec(&rdev->stats.res.srq_count);
+ return 0;
+}
+
+static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
+ struct bnxt_re_pd *pd,
+ struct bnxt_re_srq *srq,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq_req ureq;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct ib_umem *umem;
+ int bytes = 0;
+ struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
+
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+ return -EFAULT;
+
+ bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem))
+ return PTR_ERR(umem);
+
+ srq->umem = umem;
+ qplib_srq->sg_info.umem = umem;
+ qplib_srq->sg_info.pgsize = PAGE_SIZE;
+ qplib_srq->sg_info.pgshft = PAGE_SHIFT;
+ qplib_srq->srq_handle = ureq.srq_handle;
+ qplib_srq->dpi = &cntx->dpi;
+
+ return 0;
+}
+
+int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ struct bnxt_re_pd *pd;
+ struct ib_pd *ib_pd;
+ u32 active_srqs;
+ int rc, entries;
+
+ ib_pd = ib_srq->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = rdev->dev_attr;
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+
+ if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+ ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ rc = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ srq->rdev = rdev;
+ srq->qplib_srq.pd = &pd->qplib_pd;
+ srq->qplib_srq.dpi = &rdev->dpi_privileged;
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty
+ */
+ entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
+ if (entries > dev_attr->max_srq_wqes + 1)
+ entries = dev_attr->max_srq_wqes + 1;
+ srq->qplib_srq.max_wqe = entries;
+
+ srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
+ /* 128 byte wqe size for SRQ . So use max sges */
+ srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
+ srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ srq->srq_limit = srq_init_attr->attr.srq_limit;
+ srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
+ srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
+ srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
+
+ if (udata) {
+ rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
+ if (rc)
+ goto fail;
+ }
+
+ rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_srq_resp resp = {};
+
+ resp.srqid = srq->qplib_srq.id;
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
+ srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!srq->uctx_srq_page) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
+ }
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
+ bnxt_qplib_destroy_srq(&rdev->qplib_res,
+ &srq->qplib_srq);
+ goto fail;
+ }
+ }
+ active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
+ if (active_srqs > rdev->stats.res.srq_watermark)
+ rdev->stats.res.srq_watermark = active_srqs;
+ spin_lock_init(&srq->lock);
+
+ return 0;
+
+fail:
+ ib_umem_release(srq->umem);
+exit:
+ return rc;
+}
+
+int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+ return -EINVAL;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+ bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
+
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+ return 0;
+ default:
+ ibdev_err(&rdev->ibdev,
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ return -EINVAL;
+ }
+}
+
+int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_srq tsrq;
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ /* Get live SRQ attr */
+ tsrq.qplib_srq.id = srq->qplib_srq.id;
+ rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
+ return rc;
+ }
+ srq_attr->max_wr = srq->qplib_srq.max_wqe;
+ srq_attr->max_sge = srq->qplib_srq.max_sge;
+ srq_attr->srq_limit = tsrq.qplib_srq.threshold;
+
+ return 0;
+}
+
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_qplib_swqe wqe;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&srq->lock, flags);
+ while (wr) {
+ /* Transcribe each ib_recv_wr to qplib_swqe */
+ wqe.num_sge = wr->num_sge;
+ bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return rc;
+}
+static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp1_qp,
+ int qp_attr_mask)
+{
+ struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
+ int rc;
+
+ if (qp_attr_mask & IB_QP_STATE) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
+ qp->qplib_qp.state = qp1_qp->qplib_qp.state;
+ }
+ if (qp_attr_mask & IB_QP_PKEY_INDEX) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
+ qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
+ }
+
+ if (qp_attr_mask & IB_QP_QKEY) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
+ /* Using a Random QKEY */
+ qp->qplib_qp.qkey = 0x81818181;
+ }
+ if (qp_attr_mask & IB_QP_SQ_PSN) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
+ qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
+ }
+
+ rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
+ return rc;
+}
+
+int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ enum ib_qp_state curr_qp_state, new_qp_state;
+ int rc, entries;
+ unsigned int flags;
+ u8 nw_type;
+
+ if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ qp->qplib_qp.modify_flags = 0;
+ if (qp_attr_mask & IB_QP_STATE) {
+ curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
+ new_qp_state = qp_attr->qp_state;
+ if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
+ ib_qp->qp_type, qp_attr_mask)) {
+ ibdev_err(&rdev->ibdev,
+ "Invalid attribute mask: %#x specified ",
+ qp_attr_mask);
+ ibdev_err(&rdev->ibdev,
+ "for qpn: %#x type: %#x",
+ ib_qp->qp_num, ib_qp->qp_type);
+ ibdev_err(&rdev->ibdev,
+ "curr_qp_state=0x%x, new_qp_state=0x%x\n",
+ curr_qp_state, new_qp_state);
+ return -EINVAL;
+ }
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
+ qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
+
+ if (!qp->sumem &&
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ ibdev_dbg(&rdev->ibdev,
+ "Move QP = %p to flush list\n", qp);
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+ if (!qp->sumem &&
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
+ ibdev_dbg(&rdev->ibdev,
+ "Move QP = %p out of flush list\n", qp);
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+ }
+ if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
+ qp->qplib_qp.en_sqd_async_notify = true;
+ }
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
+ qp->qplib_qp.access =
+ __qp_access_flags_from_ib(qp->qplib_qp.cctx,
+ qp_attr->qp_access_flags);
+ /* LOCAL_WRITE access must be set to allow RC receive */
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
+ }
+ if (qp_attr_mask & IB_QP_PKEY_INDEX) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
+ qp->qplib_qp.pkey_index = qp_attr->pkey_index;
+ }
+ if (qp_attr_mask & IB_QP_QKEY) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
+ qp->qplib_qp.qkey = qp_attr->qkey;
+ }
+ if (qp_attr_mask & IB_QP_AV) {
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&qp_attr->ah_attr);
+ const struct ib_gid_attr *sgid_attr;
+ struct bnxt_re_gid_ctx *ctx;
+
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
+ CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
+ CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
+ CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
+ CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
+ memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
+ sizeof(qp->qplib_qp.ah.dgid.data));
+ qp->qplib_qp.ah.flow_label = grh->flow_label;
+ sgid_attr = grh->sgid_attr;
+ /* Get the HW context of the GID. The reference
+ * of GID table entry is already taken by the caller.
+ */
+ ctx = rdma_read_gid_hw_context(sgid_attr);
+ qp->qplib_qp.ah.sgid_index = ctx->idx;
+ qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
+ qp->qplib_qp.ah.hop_limit = grh->hop_limit;
+ qp->qplib_qp.ah.traffic_class = grh->traffic_class >> 2;
+ qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
+ ether_addr_copy(qp->qplib_qp.ah.dmac,
+ qp_attr->ah_attr.roce.dmac);
+
+ rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
+ &qp->qplib_qp.smac[0]);
+ if (rc)
+ return rc;
+
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV4:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
+ break;
+ default:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
+ break;
+ }
+ }
+
+ if (qp_attr->qp_state == IB_QPS_RTR) {
+ enum ib_mtu qpmtu;
+
+ qpmtu = iboe_get_mtu(rdev->netdev->mtu);
+ if (qp_attr_mask & IB_QP_PATH_MTU) {
+ if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
+ ib_mtu_enum_to_int(qpmtu))
+ return -EINVAL;
+ qpmtu = qp_attr->path_mtu;
+ }
+
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+ qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
+ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
+ }
+
+ if (qp_attr_mask & IB_QP_TIMEOUT) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
+ qp->qplib_qp.timeout = qp_attr->timeout;
+ }
+ if (qp_attr_mask & IB_QP_RETRY_CNT) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
+ qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
+ }
+ if (qp_attr_mask & IB_QP_RNR_RETRY) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
+ qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
+ }
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
+ qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
+ }
+ if (qp_attr_mask & IB_QP_RQ_PSN) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
+ qp->qplib_qp.rq.psn = qp_attr->rq_psn;
+ }
+ if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
+ /* Cap the max_rd_atomic to device max */
+ qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
+ dev_attr->max_qp_rd_atom);
+ }
+ if (qp_attr_mask & IB_QP_SQ_PSN) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
+ qp->qplib_qp.sq.psn = qp_attr->sq_psn;
+ }
+ if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (qp_attr->max_dest_rd_atomic >
+ dev_attr->max_qp_init_rd_atom) {
+ ibdev_err(&rdev->ibdev,
+ "max_dest_rd_atomic requested%d is > dev_max%d",
+ qp_attr->max_dest_rd_atomic,
+ dev_attr->max_qp_init_rd_atom);
+ return -EINVAL;
+ }
+
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
+ qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
+ }
+ if (qp_attr_mask & IB_QP_CAP) {
+ struct bnxt_re_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
+ if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
+ (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
+ (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
+ (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
+ (qp_attr->cap.max_inline_data >=
+ dev_attr->max_inline_data)) {
+ ibdev_err(&rdev->ibdev,
+ "Create QP failed - max exceeded");
+ return -EINVAL;
+ }
+ entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
+ qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+ qp_attr->cap.max_send_wr;
+ /*
+ * Reserving one slot for Phantom WQE. Some application can
+ * post one extra entry in this case. Allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qp->qplib_qp.sq.q_full_delta -= 1;
+ qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
+ if (qp->qplib_qp.rq.max_wqe) {
+ entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
+ qp->qplib_qp.rq.max_wqe =
+ min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+ qp_attr->cap.max_recv_wr;
+ qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
+ } else {
+ /* SRQ was used prior, just ignore the RQ caps */
+ }
+ }
+ if (qp_attr_mask & IB_QP_DEST_QPN) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
+ qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
+ }
+ rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
+ return rc;
+ }
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
+ rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
+ return rc;
+}
+
+int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_qplib_qp *qplib_qp;
+ int rc;
+
+ qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
+ if (!qplib_qp)
+ return -ENOMEM;
+
+ qplib_qp->id = qp->qplib_qp.id;
+ qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
+
+ rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to query HW QP");
+ goto out;
+ }
+ qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
+ qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
+ qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
+ qp_attr->qp_access_flags = __qp_access_flags_to_ib(qp->qplib_qp.cctx,
+ qplib_qp->access);
+ qp_attr->pkey_index = qplib_qp->pkey_index;
+ qp_attr->qkey = qplib_qp->qkey;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport,
+ qplib_qp->ah.host_sgid_index,
+ qplib_qp->ah.hop_limit,
+ qplib_qp->ah.traffic_class);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
+ rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
+ ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
+ qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
+ qp_attr->timeout = qplib_qp->timeout;
+ qp_attr->retry_cnt = qplib_qp->retry_cnt;
+ qp_attr->rnr_retry = qplib_qp->rnr_retry;
+ qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+ qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
+ qp_attr->rq_psn = qplib_qp->rq.psn;
+ qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
+ qp_attr->sq_psn = qplib_qp->sq.psn;
+ qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
+ qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
+ IB_SIGNAL_REQ_WR;
+ qp_attr->dest_qp_num = qplib_qp->dest_qpn;
+
+ qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
+ qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
+ qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
+ qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
+ qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
+ qp_init_attr->cap = qp_attr->cap;
+
+out:
+ kfree(qplib_qp);
+ return rc;
+}
+
+/* Routine for sending QP1 packets for RoCE V1 an V2
+ */
+static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
+ const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe,
+ int payload_size)
+{
+ struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
+ ib_ah);
+ struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
+ const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
+ struct bnxt_qplib_sge sge;
+ u8 nw_type;
+ u16 ether_type;
+ union ib_gid dgid;
+ bool is_eth = false;
+ bool is_vlan = false;
+ bool is_grh = false;
+ bool is_udp = false;
+ u8 ip_version = 0;
+ u16 vlan_id = 0xFFFF;
+ void *buf;
+ int i, rc;
+
+ memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
+
+ rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
+ if (rc)
+ return rc;
+
+ /* Get network header type for this GID */
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV4:
+ nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
+ break;
+ case RDMA_NETWORK_IPV6:
+ nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
+ break;
+ default:
+ nw_type = BNXT_RE_ROCE_V1_PACKET;
+ break;
+ }
+ memcpy(&dgid.raw, &qplib_ah->dgid, 16);
+ is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
+ if (is_udp) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
+ ip_version = 4;
+ ether_type = ETH_P_IP;
+ } else {
+ ip_version = 6;
+ ether_type = ETH_P_IPV6;
+ }
+ is_grh = false;
+ } else {
+ ether_type = ETH_P_IBOE;
+ is_grh = true;
+ }
+
+ is_eth = true;
+ is_vlan = vlan_id && (vlan_id < 0x1000);
+
+ ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
+ ip_version, is_udp, 0, &qp->qp1_hdr);
+
+ /* ETH */
+ ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
+ ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
+
+ /* For vlan, check the sgid for vlan existence */
+
+ if (!is_vlan) {
+ qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
+ } else {
+ qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
+ qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
+ }
+
+ if (is_grh || (ip_version == 6)) {
+ memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
+ sizeof(sgid_attr->gid));
+ memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
+ sizeof(sgid_attr->gid));
+ qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
+ }
+
+ if (ip_version == 4) {
+ qp->qp1_hdr.ip4.tos = 0;
+ qp->qp1_hdr.ip4.id = 0;
+ qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
+ qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
+
+ memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
+ memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
+ qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
+ }
+
+ if (is_udp) {
+ qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
+ qp->qp1_hdr.udp.sport = htons(0x8CD1);
+ qp->qp1_hdr.udp.csum = 0;
+ }
+
+ /* BTH */
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ qp->qp1_hdr.immediate_present = 1;
+ } else {
+ qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ }
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ qp->qp1_hdr.bth.solicited_event = 1;
+ /* pad_count */
+ qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
+
+ /* P_key for QP1 is for all members */
+ qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
+ qp->qp1_hdr.bth.destination_qpn = IB_QP1;
+ qp->qp1_hdr.bth.ack_req = 0;
+ qp->send_psn++;
+ qp->send_psn &= BTH_PSN_MASK;
+ qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
+ /* DETH */
+ /* Use the priviledged Q_Key for QP1 */
+ qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
+ qp->qp1_hdr.deth.source_qpn = IB_QP1;
+
+ /* Pack the QP1 to the transmit buffer */
+ buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
+ if (buf) {
+ ib_ud_header_pack(&qp->qp1_hdr, buf);
+ for (i = wqe->num_sge; i; i--) {
+ wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
+ wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
+ wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
+ }
+
+ /*
+ * Max Header buf size for IPV6 RoCE V2 is 86,
+ * which is same as the QP1 SQ header buffer.
+ * Header buf size for IPV4 RoCE V2 can be 66.
+ * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
+ * Subtract 20 bytes from QP1 SQ header buf size
+ */
+ if (is_udp && ip_version == 4)
+ sge.size -= 20;
+ /*
+ * Max Header buf size for RoCE V1 is 78.
+ * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
+ * Subtract 8 bytes from QP1 SQ header buf size
+ */
+ if (!is_udp)
+ sge.size -= 8;
+
+ /* Subtract 4 bytes for non vlan packets */
+ if (!is_vlan)
+ sge.size -= 4;
+
+ wqe->sg_list[0].addr = sge.addr;
+ wqe->sg_list[0].lkey = sge.lkey;
+ wqe->sg_list[0].size = sge.size;
+ wqe->num_sge++;
+
+ } else {
+ ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
+ rc = -ENOMEM;
+ }
+ return rc;
+}
+
+/* For the MAD layer, it only provides the recv SGE the size of
+ * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
+ * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
+ * receive packet (334 bytes) with no VLAN and then copy the GRH
+ * and the MAD datagram out to the provided SGE.
+ */
+static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
+ const struct ib_recv_wr *wr,
+ struct bnxt_qplib_swqe *wqe,
+ int payload_size)
+{
+ struct bnxt_re_sqp_entries *sqp_entry;
+ struct bnxt_qplib_sge ref, sge;
+ struct bnxt_re_dev *rdev;
+ u32 rq_prod_index;
+
+ rdev = qp->rdev;
+
+ rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
+
+ if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
+ return -ENOMEM;
+
+ /* Create 1 SGE to receive the entire
+ * ethernet packet
+ */
+ /* Save the reference from ULP */
+ ref.addr = wqe->sg_list[0].addr;
+ ref.lkey = wqe->sg_list[0].lkey;
+ ref.size = wqe->sg_list[0].size;
+
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
+
+ /* SGE 1 */
+ wqe->sg_list[0].addr = sge.addr;
+ wqe->sg_list[0].lkey = sge.lkey;
+ wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ sge.size -= wqe->sg_list[0].size;
+
+ sqp_entry->sge.addr = ref.addr;
+ sqp_entry->sge.lkey = ref.lkey;
+ sqp_entry->sge.size = ref.size;
+ /* Store the wrid for reporting completion */
+ sqp_entry->wrid = wqe->wr_id;
+ /* change the wqe->wrid to table index */
+ wqe->wr_id = rq_prod_index;
+ return 0;
+}
+
+static int is_ud_qp(struct bnxt_re_qp *qp)
+{
+ return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
+ qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
+}
+
+static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
+ const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_ah *ah = NULL;
+
+ if (is_ud_qp(qp)) {
+ ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
+ wqe->send.q_key = ud_wr(wr)->remote_qkey;
+ wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
+ wqe->send.avid = ah->qplib_ah.id;
+ }
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
+ wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
+ wqe->send.inv_key = wr->ex.invalidate_rkey;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+ if (wr->send_flags & IB_SEND_INLINE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
+
+ return 0;
+}
+
+static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
+ wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
+ break;
+ case IB_WR_RDMA_READ:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
+ wqe->rdma.inv_key = wr->ex.invalidate_rkey;
+ break;
+ default:
+ return -EINVAL;
+ }
+ wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
+ wqe->rdma.r_key = rdma_wr(wr)->rkey;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+ if (wr->send_flags & IB_SEND_INLINE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
+
+ return 0;
+}
+
+static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ switch (wr->opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
+ wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
+ wqe->atomic.swap_data = atomic_wr(wr)->swap;
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
+ wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
+ break;
+ default:
+ return -EINVAL;
+ }
+ wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
+ wqe->atomic.r_key = atomic_wr(wr)->rkey;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+ return 0;
+}
+
+static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
+ wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
+
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+
+ return 0;
+}
+
+static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
+ struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
+ int access = wr->access;
+
+ wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
+ wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
+ wqe->frmr.page_list = mr->pages;
+ wqe->frmr.page_list_len = mr->npages;
+ wqe->frmr.levels = qplib_frpl->hwq.level;
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
+
+ if (wr->wr.send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+
+ if (access & IB_ACCESS_LOCAL_WRITE)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
+ if (access & IB_ACCESS_REMOTE_READ)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
+ if (access & IB_ACCESS_MW_BIND)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
+
+ wqe->frmr.l_key = wr->key;
+ wqe->frmr.length = wr->mr->length;
+ wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
+ wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
+ wqe->frmr.va = wr->mr->iova;
+ return 0;
+}
+
+static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
+ const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ /* Copy the inline data to the data field */
+ u8 *in_data;
+ u32 i, sge_len;
+ void *sge_addr;
+
+ in_data = wqe->inline_data;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_addr = (void *)(unsigned long)
+ wr->sg_list[i].addr;
+ sge_len = wr->sg_list[i].length;
+
+ if ((sge_len + wqe->inline_len) >
+ BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
+ ibdev_err(&rdev->ibdev,
+ "Inline data size requested > supported value");
+ return -EINVAL;
+ }
+ sge_len = wr->sg_list[i].length;
+
+ memcpy(in_data, sge_addr, sge_len);
+ in_data += wr->sg_list[i].length;
+ wqe->inline_len += wr->sg_list[i].length;
+ }
+ return wqe->inline_len;
+}
+
+static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
+ const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ int payload_sz = 0;
+
+ if (wr->send_flags & IB_SEND_INLINE)
+ payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
+ else
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
+ wqe->num_sge);
+
+ return payload_sz;
+}
+
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+ if ((qp->ib_qp.qp_type == IB_QPT_UD ||
+ qp->ib_qp.qp_type == IB_QPT_GSI ||
+ qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+ qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+ int qp_attr_mask;
+ struct ib_qp_attr qp_attr;
+
+ qp_attr_mask = IB_QP_STATE;
+ qp_attr.qp_state = IB_QPS_RTS;
+ bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+ qp->qplib_qp.wqe_cnt = 0;
+ }
+}
+
+static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ int rc = 0, payload_sz = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+ while (wr) {
+ struct bnxt_qplib_swqe wqe = {};
+
+ /* Common */
+ wqe.num_sge = wr->num_sge;
+ if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
+ ibdev_err(&rdev->ibdev,
+ "Limit exceeded for Send SGEs");
+ rc = -EINVAL;
+ goto bad;
+ }
+
+ payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
+ if (payload_sz < 0) {
+ rc = -EINVAL;
+ goto bad;
+ }
+ wqe.wr_id = wr->wr_id;
+
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
+
+ rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
+ if (!rc)
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+bad:
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Post send failed opcode = %#x rc = %d",
+ wr->opcode, rc);
+ break;
+ }
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+}
+
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
+int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_swqe wqe;
+ int rc = 0, payload_sz = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+
+ /* Common */
+ wqe.num_sge = wr->num_sge;
+ if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
+ ibdev_err(&qp->rdev->ibdev,
+ "Limit exceeded for Send SGEs");
+ rc = -EINVAL;
+ goto bad;
+ }
+
+ payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
+ if (payload_sz < 0) {
+ rc = -EINVAL;
+ goto bad;
+ }
+ wqe.wr_id = wr->wr_id;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
+ rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
+ payload_sz);
+ if (rc)
+ goto bad;
+ wqe.rawqp1.lflags |=
+ SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
+ }
+ switch (wr->send_flags) {
+ case IB_SEND_IP_CSUM:
+ wqe.rawqp1.lflags |=
+ SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
+ break;
+ default:
+ break;
+ }
+ fallthrough;
+ case IB_WR_SEND_WITH_INV:
+ rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
+ break;
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ case IB_WR_RDMA_READ:
+ rc = bnxt_re_build_rdma_wqe(wr, &wqe);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc = bnxt_re_build_atomic_wqe(wr, &wqe);
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ ibdev_err(&qp->rdev->ibdev,
+ "RDMA Read with Invalidate is not supported");
+ rc = -EINVAL;
+ goto bad;
+ case IB_WR_LOCAL_INV:
+ rc = bnxt_re_build_inv_wqe(wr, &wqe);
+ break;
+ case IB_WR_REG_MR:
+ rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
+ break;
+ default:
+ /* Unsupported WRs */
+ ibdev_err(&qp->rdev->ibdev,
+ "WR (%#x) is not supported", wr->opcode);
+ rc = -EINVAL;
+ goto bad;
+ }
+ if (!rc) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
+bad:
+ if (rc) {
+ ibdev_err(&qp->rdev->ibdev,
+ "post_send failed op:%#x qps = %#x rc = %d\n",
+ wr->opcode, qp->qplib_qp.state, rc);
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ return rc;
+}
+
+static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp,
+ const struct ib_recv_wr *wr)
+{
+ struct bnxt_qplib_swqe wqe;
+ int rc = 0;
+
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+
+ /* Common */
+ wqe.num_sge = wr->num_sge;
+ if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
+ ibdev_err(&rdev->ibdev,
+ "Limit exceeded for Receive SGEs");
+ rc = -EINVAL;
+ break;
+ }
+ bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
+ if (rc)
+ break;
+
+ wr = wr->next;
+ }
+ if (!rc)
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ return rc;
+}
+
+int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_swqe wqe;
+ int rc = 0, payload_sz = 0;
+ unsigned long flags;
+ u32 count = 0;
+
+ spin_lock_irqsave(&qp->rq_lock, flags);
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+
+ /* Common */
+ wqe.num_sge = wr->num_sge;
+ if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
+ ibdev_err(&qp->rdev->ibdev,
+ "Limit exceeded for Receive SGEs");
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
+ wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ if (ib_qp->qp_type == IB_QPT_GSI &&
+ qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
+ rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
+ payload_sz);
+ if (!rc)
+ rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+
+ /* Ring DB if the RQEs posted reaches a threshold value */
+ if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ count = 0;
+ }
+
+ wr = wr->next;
+ }
+
+ if (count)
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+ spin_unlock_irqrestore(&qp->rq_lock, flags);
+
+ return rc;
+}
+
+static struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev)
+{
+ int min, indx;
+
+ mutex_lock(&rdev->nqr->load_lock);
+ for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) {
+ if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load)
+ min = indx;
+ }
+ rdev->nqr->nq[min].load++;
+ mutex_unlock(&rdev->nqr->load_lock);
+
+ return &rdev->nqr->nq[min];
+}
+
+static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq)
+{
+ mutex_lock(&rdev->nqr->load_lock);
+ nq->load--;
+ mutex_unlock(&rdev->nqr->load_lock);
+}
+
+/* Completion Queues */
+int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+{
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+ nq = cq->qplib_cq.nq;
+ cctx = rdev->chip_ctx;
+
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ free_page((unsigned long)cq->uctx_cq_page);
+ hash_del(&cq->hash_entry);
+ }
+ bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
+
+ bnxt_re_put_nq(rdev, nq);
+ ib_umem_release(cq->umem);
+
+ atomic_dec(&rdev->stats.res.cq_count);
+ kfree(cq->cql);
+ return 0;
+}
+
+int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct bnxt_re_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ struct bnxt_qplib_chip_ctx *cctx;
+ int cqe = attr->cqe;
+ int rc, entries;
+ u32 active_cqs;
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ /* Validate CQ fields */
+ if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
+ ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
+ return -EINVAL;
+ }
+
+ cq->rdev = rdev;
+ cctx = rdev->chip_ctx;
+ cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
+
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
+ if (entries > dev_attr->max_cq_wqes + 1)
+ entries = dev_attr->max_cq_wqes + 1;
+
+ cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
+ cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
+ if (udata) {
+ struct bnxt_re_cq_req req;
+ if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+ rc = -EFAULT;
+ goto fail;
+ }
+
+ cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
+ entries * sizeof(struct cq_base),
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->umem)) {
+ rc = PTR_ERR(cq->umem);
+ goto fail;
+ }
+ cq->qplib_cq.sg_info.umem = cq->umem;
+ cq->qplib_cq.dpi = &uctx->dpi;
+ } else {
+ cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
+ cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
+ GFP_KERNEL);
+ if (!cq->cql) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ cq->qplib_cq.dpi = &rdev->dpi_privileged;
+ }
+ cq->qplib_cq.max_wqe = entries;
+ cq->qplib_cq.coalescing = &rdev->cq_coalescing;
+ cq->qplib_cq.nq = bnxt_re_get_nq(rdev);
+ cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id;
+
+ rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
+ goto fail;
+ }
+
+ cq->ib_cq.cqe = entries;
+ cq->cq_period = cq->qplib_cq.period;
+
+ active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
+ if (active_cqs > rdev->stats.res.cq_watermark)
+ rdev->stats.res.cq_watermark = active_cqs;
+ spin_lock_init(&cq->cq_lock);
+
+ if (udata) {
+ struct bnxt_re_cq_resp resp = {};
+
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
+ /* Allocate a page */
+ cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cq->uctx_cq_page) {
+ rc = -ENOMEM;
+ goto c2fail;
+ }
+ resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
+ }
+ resp.cqid = cq->qplib_cq.id;
+ resp.tail = cq->qplib_cq.hwq.cons;
+ resp.phase = cq->qplib_cq.period;
+ resp.rsvd = 0;
+ rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
+ bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
+ goto free_mem;
+ }
+ }
+
+ return 0;
+
+free_mem:
+ free_page((unsigned long)cq->uctx_cq_page);
+c2fail:
+ ib_umem_release(cq->umem);
+fail:
+ kfree(cq->cql);
+ return rc;
+}
+
+static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
+{
+ struct bnxt_re_dev *rdev = cq->rdev;
+
+ bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
+
+ cq->qplib_cq.max_wqe = cq->resize_cqe;
+ if (cq->resize_umem) {
+ ib_umem_release(cq->umem);
+ cq->umem = cq->resize_umem;
+ cq->resize_umem = NULL;
+ cq->resize_cqe = 0;
+ }
+}
+
+int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct bnxt_qplib_sg_info sg_info = {};
+ struct bnxt_qplib_dpi *orig_dpi = NULL;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx = NULL;
+ struct bnxt_re_resize_cq_req req;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc, entries;
+
+ cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+ dev_attr = rdev->dev_attr;
+ if (!ibcq->uobject) {
+ ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (cq->resize_umem) {
+ ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
+ cq->qplib_cq.id);
+ return -EBUSY;
+ }
+
+ /* Check the requested cq depth out of supported depth */
+ if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
+ ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
+ cq->qplib_cq.id, cqe);
+ return -EINVAL;
+ }
+
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
+ if (entries > dev_attr->max_cq_wqes + 1)
+ entries = dev_attr->max_cq_wqes + 1;
+
+ /* uverbs consumer */
+ if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+ rc = -EFAULT;
+ goto fail;
+ }
+
+ cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
+ entries * sizeof(struct cq_base),
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->resize_umem)) {
+ rc = PTR_ERR(cq->resize_umem);
+ ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n",
+ __func__, cq->resize_umem);
+ cq->resize_umem = NULL;
+ goto fail;
+ }
+ cq->resize_cqe = entries;
+ memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
+ orig_dpi = cq->qplib_cq.dpi;
+
+ cq->qplib_cq.sg_info.umem = cq->resize_umem;
+ cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
+ cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
+ cq->qplib_cq.dpi = &uctx->dpi;
+
+ rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
+ cq->qplib_cq.id);
+ goto fail;
+ }
+
+ cq->ib_cq.cqe = cq->resize_cqe;
+ atomic_inc(&rdev->stats.res.resize_count);
+
+ return 0;
+
+fail:
+ if (cq->resize_umem) {
+ ib_umem_release(cq->resize_umem);
+ cq->resize_umem = NULL;
+ cq->resize_cqe = 0;
+ memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
+ cq->qplib_cq.dpi = orig_dpi;
+ }
+ return rc;
+}
+
+static u8 __req_to_ib_wc_status(u8 qstatus)
+{
+ switch (qstatus) {
+ case CQ_REQ_STATUS_OK:
+ return IB_WC_SUCCESS;
+ case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
+ return IB_WC_BAD_RESP_ERR;
+ case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
+ return IB_WC_GENERAL_ERR;
+ case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
+ return IB_WC_REM_OP_ERR;
+ case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
+ return IB_WC_RNR_RETRY_EXC_ERR;
+ case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
+ return IB_WC_RETRY_EXC_ERR;
+ case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+ return 0;
+}
+
+static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
+{
+ switch (qstatus) {
+ case CQ_RES_RAWETH_QP1_STATUS_OK:
+ return IB_WC_SUCCESS;
+ case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
+ return IB_WC_GENERAL_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+static u8 __rc_to_ib_wc_status(u8 qstatus)
+{
+ switch (qstatus) {
+ case CQ_RES_RC_STATUS_OK:
+ return IB_WC_SUCCESS;
+ case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
+ return IB_WC_GENERAL_ERR;
+ case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
+{
+ switch (cqe->type) {
+ case BNXT_QPLIB_SWQE_TYPE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
+ wc->opcode = IB_WC_COMP_SWAP;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
+ wc->opcode = IB_WC_FETCH_ADD;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_REG_MR:
+ wc->opcode = IB_WC_REG_MR;
+ break;
+ default:
+ wc->opcode = IB_WC_SEND;
+ break;
+ }
+
+ wc->status = __req_to_ib_wc_status(cqe->status);
+}
+
+static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
+ u16 raweth_qp1_flags2)
+{
+ bool is_ipv6 = false, is_ipv4 = false;
+
+ /* raweth_qp1_flags Bit 9-6 indicates itype */
+ if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
+ != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
+ return -1;
+
+ if (raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
+ raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
+ /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
+ (raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
+ (is_ipv6 = true) : (is_ipv4 = true);
+ return ((is_ipv6) ?
+ BNXT_RE_ROCEV2_IPV6_PACKET :
+ BNXT_RE_ROCEV2_IPV4_PACKET);
+ } else {
+ return BNXT_RE_ROCE_V1_PACKET;
+ }
+}
+
+static int bnxt_re_to_ib_nw_type(int nw_type)
+{
+ u8 nw_hdr_type = 0xFF;
+
+ switch (nw_type) {
+ case BNXT_RE_ROCE_V1_PACKET:
+ nw_hdr_type = RDMA_NETWORK_ROCE_V1;
+ break;
+ case BNXT_RE_ROCEV2_IPV4_PACKET:
+ nw_hdr_type = RDMA_NETWORK_IPV4;
+ break;
+ case BNXT_RE_ROCEV2_IPV6_PACKET:
+ nw_hdr_type = RDMA_NETWORK_IPV6;
+ break;
+ }
+ return nw_hdr_type;
+}
+
+static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
+ void *rq_hdr_buf)
+{
+ u8 *tmp_buf = NULL;
+ struct ethhdr *eth_hdr;
+ u16 eth_type;
+ bool rc = false;
+
+ tmp_buf = (u8 *)rq_hdr_buf;
+ /*
+ * If dest mac is not same as I/F mac, this could be a
+ * loopback address or multicast address, check whether
+ * it is a loopback packet
+ */
+ if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
+ tmp_buf += 4;
+ /* Check the ether type */
+ eth_hdr = (struct ethhdr *)tmp_buf;
+ eth_type = ntohs(eth_hdr->h_proto);
+ switch (eth_type) {
+ case ETH_P_IBOE:
+ rc = true;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6: {
+ u32 len;
+ struct udphdr *udp_hdr;
+
+ len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
+ sizeof(struct ipv6hdr));
+ tmp_buf += sizeof(struct ethhdr) + len;
+ udp_hdr = (struct udphdr *)tmp_buf;
+ if (ntohs(udp_hdr->dest) ==
+ ROCE_V2_UDP_DPORT)
+ rc = true;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
+ struct bnxt_qplib_cqe *cqe)
+{
+ struct bnxt_re_dev *rdev = gsi_qp->rdev;
+ struct bnxt_re_sqp_entries *sqp_entry = NULL;
+ struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ dma_addr_t shrq_hdr_buf_map;
+ struct ib_sge s_sge[2] = {};
+ struct ib_sge r_sge[2] = {};
+ struct bnxt_re_ah *gsi_sah;
+ struct ib_recv_wr rwr = {};
+ dma_addr_t rq_hdr_buf_map;
+ struct ib_ud_wr udwr = {};
+ struct ib_send_wr *swr;
+ u32 skip_bytes = 0;
+ int pkt_type = 0;
+ void *rq_hdr_buf;
+ u32 offset = 0;
+ u32 tbl_idx;
+ int rc;
+
+ swr = &udwr.wr;
+ tbl_idx = cqe->wr_id;
+
+ rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
+ (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
+ rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
+ tbl_idx);
+
+ /* Shadow QP header buffer */
+ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
+ tbl_idx);
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+
+ /* Store this cqe */
+ memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
+ sqp_entry->qp1_qp = gsi_qp;
+
+ /* Find packet type from the cqe */
+
+ pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
+ cqe->raweth_qp1_flags2);
+ if (pkt_type < 0) {
+ ibdev_err(&rdev->ibdev, "Invalid packet\n");
+ return -EINVAL;
+ }
+
+ /* Adjust the offset for the user buffer and post in the rq */
+
+ if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
+ offset = 20;
+
+ /*
+ * QP1 loopback packet has 4 bytes of internal header before
+ * ether header. Skip these four bytes.
+ */
+ if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
+ skip_bytes = 4;
+
+ /* First send SGE . Skip the ether header*/
+ s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
+ + skip_bytes;
+ s_sge[0].lkey = 0xFFFFFFFF;
+ s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
+ BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
+
+ /* Second Send SGE */
+ s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
+ BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
+ if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
+ s_sge[1].addr += 8;
+ s_sge[1].lkey = 0xFFFFFFFF;
+ s_sge[1].length = 256;
+
+ /* First recv SGE */
+
+ r_sge[0].addr = shrq_hdr_buf_map;
+ r_sge[0].lkey = 0xFFFFFFFF;
+ r_sge[0].length = 40;
+
+ r_sge[1].addr = sqp_entry->sge.addr + offset;
+ r_sge[1].lkey = sqp_entry->sge.lkey;
+ r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
+
+ /* Create receive work request */
+ rwr.num_sge = 2;
+ rwr.sg_list = r_sge;
+ rwr.wr_id = tbl_idx;
+ rwr.next = NULL;
+
+ rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to post Rx buffers to shadow QP");
+ return -ENOMEM;
+ }
+
+ swr->num_sge = 2;
+ swr->sg_list = s_sge;
+ swr->wr_id = tbl_idx;
+ swr->opcode = IB_WR_SEND;
+ swr->next = NULL;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+ udwr.ah = &gsi_sah->ib_ah;
+ udwr.remote_qpn = gsi_sqp->qplib_qp.id;
+ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
+
+ /* post data received in the send queue */
+ return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
+}
+
+static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rawqp1_to_ib_wc_status(cqe->status);
+ wc->wc_flags |= IB_WC_GRH;
+}
+
+static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
+ u16 vlan_id)
+{
+ /*
+ * Check if the vlan is configured in the host. If not configured, it
+ * can be a transparent VLAN. So dont report the vlan id.
+ */
+ if (!__vlan_find_dev_deep_rcu(rdev->netdev,
+ htons(ETH_P_8021Q), vlan_id))
+ return false;
+ return true;
+}
+
+static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
+ u16 *vid, u8 *sl)
+{
+ bool ret = false;
+ u32 metadata;
+ u16 tpid;
+
+ metadata = orig_cqe->raweth_qp1_metadata;
+ if (orig_cqe->raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
+ tpid = ((metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
+ if (tpid == ETH_P_8021Q) {
+ *vid = metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
+ *sl = (metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rc_to_ib_wc_status(cqe->status);
+
+ if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ if (cqe->flags & CQ_RES_RC_FLAGS_INV)
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
+ (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+}
+
+static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
+ struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ struct bnxt_re_dev *rdev = gsi_sqp->rdev;
+ struct bnxt_re_qp *gsi_qp = NULL;
+ struct bnxt_qplib_cqe *orig_cqe = NULL;
+ struct bnxt_re_sqp_entries *sqp_entry = NULL;
+ int nw_type;
+ u32 tbl_idx;
+ u16 vlan_id;
+ u8 sl;
+
+ tbl_idx = cqe->wr_id;
+
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ gsi_qp = sqp_entry->qp1_qp;
+ orig_cqe = &sqp_entry->cqe;
+
+ wc->wr_id = sqp_entry->wrid;
+ wc->byte_len = orig_cqe->length;
+ wc->qp = &gsi_qp->ib_qp;
+
+ wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata);
+ wc->src_qp = orig_cqe->src_qp;
+ memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
+ if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
+ if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->sl = sl;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
+ }
+ wc->port_num = 1;
+ wc->vendor_err = orig_cqe->status;
+
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
+ wc->wc_flags |= IB_WC_GRH;
+
+ nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
+ orig_cqe->raweth_qp1_flags2);
+ if (nw_type >= 0) {
+ wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ }
+}
+
+static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
+ struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ struct bnxt_re_dev *rdev;
+ u16 vlan_id = 0;
+ u8 nw_type;
+
+ rdev = qp->rdev;
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rc_to_ib_wc_status(cqe->status);
+
+ if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ /* report only on GSI QP for Thor */
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
+ wc->wc_flags |= IB_WC_GRH;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->wc_flags |= IB_WC_WITH_SMAC;
+ if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
+ vlan_id = (cqe->cfa_meta & 0xFFF);
+ }
+ /* Mark only if vlan_id is non zero */
+ if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
+ nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
+ CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
+ wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ }
+
+}
+
+static int send_phantom_wqe(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+ rc = bnxt_re_bind_fence_mw(lib_qp);
+ if (!rc) {
+ lib_qp->sq.phantom_wqe_cnt++;
+ ibdev_dbg(&qp->rdev->ibdev,
+ "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+ lib_qp->id, lib_qp->sq.hwq.prod,
+ HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+ lib_qp->sq.phantom_wqe_cnt);
+ }
+
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+}
+
+int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
+{
+ struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ struct bnxt_re_qp *qp, *sh_qp;
+ struct bnxt_qplib_cqe *cqe;
+ int i, ncqe, budget;
+ struct bnxt_qplib_q *sq;
+ struct bnxt_qplib_qp *lib_qp;
+ u32 tbl_idx;
+ struct bnxt_re_sqp_entries *sqp_entry = NULL;
+ unsigned long flags;
+
+ /* User CQ; the only processing we do is to
+ * complete any pending CQ resize operation.
+ */
+ if (cq->umem) {
+ if (cq->resize_umem)
+ bnxt_re_resize_cq_complete(cq);
+ return 0;
+ }
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ budget = min_t(u32, num_entries, cq->max_cql);
+ num_entries = budget;
+ if (!cq->cql) {
+ ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
+ goto exit;
+ }
+ cqe = &cq->cql[0];
+ while (budget) {
+ lib_qp = NULL;
+ ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
+ if (lib_qp) {
+ sq = &lib_qp->sq;
+ if (sq->send_phantom) {
+ qp = container_of(lib_qp,
+ struct bnxt_re_qp, qplib_qp);
+ if (send_phantom_wqe(qp) == -ENOMEM)
+ ibdev_err(&cq->rdev->ibdev,
+ "Phantom failed! Scheduled to send again\n");
+ else
+ sq->send_phantom = false;
+ }
+ }
+ if (ncqe < budget)
+ ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
+ cqe + ncqe,
+ budget - ncqe);
+
+ if (!ncqe)
+ break;
+
+ for (i = 0; i < ncqe; i++, cqe++) {
+ /* Transcribe each qplib_wqe back to ib_wc */
+ memset(wc, 0, sizeof(*wc));
+
+ wc->wr_id = cqe->wr_id;
+ wc->byte_len = cqe->length;
+ qp = container_of
+ ((struct bnxt_qplib_qp *)
+ (unsigned long)(cqe->qp_handle),
+ struct bnxt_re_qp, qplib_qp);
+ wc->qp = &qp->ib_qp;
+ if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
+ wc->ex.imm_data = cpu_to_be32(cqe->immdata);
+ else
+ wc->ex.invalidate_rkey = cqe->invrkey;
+ wc->src_qp = cqe->src_qp;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->port_num = 1;
+ wc->vendor_err = cqe->status;
+
+ switch (cqe->opcode) {
+ case CQ_BASE_CQE_TYPE_REQ:
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
+ /* Handle this completion with
+ * the stored completion
+ */
+ memset(wc, 0, sizeof(*wc));
+ continue;
+ }
+ bnxt_re_process_req_wc(wc, cqe);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
+ if (!cqe->status) {
+ int rc = 0;
+
+ rc = bnxt_re_process_raw_qp_pkt_rx
+ (qp, cqe);
+ if (!rc) {
+ memset(wc, 0, sizeof(*wc));
+ continue;
+ }
+ cqe->status = -1;
+ }
+ /* Errors need not be looped back.
+ * But change the wr_id to the one
+ * stored in the table
+ */
+ tbl_idx = cqe->wr_id;
+ sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ wc->wr_id = sqp_entry->wrid;
+ bnxt_re_process_res_rawqp1_wc(wc, cqe);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RC:
+ bnxt_re_process_res_rc_wc(wc, cqe);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_UD:
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
+ /* Handle this completion with
+ * the stored completion
+ */
+ if (cqe->status) {
+ continue;
+ } else {
+ bnxt_re_process_res_shadow_qp_wc
+ (qp, wc, cqe);
+ break;
+ }
+ }
+ bnxt_re_process_res_ud_wc(qp, wc, cqe);
+ break;
+ default:
+ ibdev_err(&cq->rdev->ibdev,
+ "POLL CQ : type 0x%x not handled",
+ cqe->opcode);
+ continue;
+ }
+ wc++;
+ budget--;
+ }
+ }
+exit:
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return num_entries - budget;
+}
+
+int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
+ enum ib_cq_notify_flags ib_cqn_flags)
+{
+ struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ int type = 0, rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ /* Trigger on the very next completion */
+ if (ib_cqn_flags & IB_CQ_NEXT_COMP)
+ type = DBC_DBC_TYPE_CQ_ARMALL;
+ /* Trigger on the next solicited completion */
+ else if (ib_cqn_flags & IB_CQ_SOLICITED)
+ type = DBC_DBC_TYPE_CQ_ARMSE;
+
+ /* Poll to see if there are missed events */
+ if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
+ rc = 1;
+ goto exit;
+ }
+ bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
+
+exit:
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return rc;
+}
+
+/* Memory Regions */
+struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_mr *mr;
+ u32 active_mrs;
+ int rc;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
+ /* Allocate and register 0 as the address */
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc)
+ goto fail;
+
+ mr->qplib_mr.hwq.level = PBL_LVL_MAX;
+ mr->qplib_mr.total_size = -1; /* Infinte length */
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
+ PAGE_SIZE);
+ if (rc)
+ goto fail_mr;
+
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_ATOMIC))
+ mr->ib_mr.rkey = mr->ib_mr.lkey;
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
+
+ return &mr->ib_mr;
+
+fail_mr:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+fail:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+{
+ struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
+ struct bnxt_re_dev *rdev = mr->rdev;
+ int rc;
+
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
+ return rc;
+ }
+
+ if (mr->pages) {
+ rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
+ &mr->qplib_frpl);
+ kfree(mr->pages);
+ mr->npages = 0;
+ mr->pages = NULL;
+ }
+ ib_umem_release(mr->ib_umem);
+
+ kfree(mr);
+ atomic_dec(&rdev->stats.res.mr_count);
+ return rc;
+}
+
+static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
+{
+ struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
+
+ if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
+ return -ENOMEM;
+
+ mr->pages[mr->npages++] = addr;
+ return 0;
+}
+
+int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
+
+ mr->npages = 0;
+ return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
+}
+
+struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
+ u32 max_num_sg)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_mr *mr = NULL;
+ u32 active_mrs;
+ int rc;
+
+ if (type != IB_MR_TYPE_MEM_REG) {
+ ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
+ return ERR_PTR(-EINVAL);
+ }
+ if (max_num_sg > MAX_PBL_LVL_1_PGS)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc)
+ goto bail;
+
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ mr->ib_mr.rkey = mr->ib_mr.lkey;
+
+ mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
+ if (!mr->pages) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
+ &mr->qplib_frpl, max_num_sg);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate HW FR page list");
+ goto fail_mr;
+ }
+
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
+ return &mr->ib_mr;
+
+fail_mr:
+ kfree(mr->pages);
+fail:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+bail:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_mw *mw;
+ u32 active_mws;
+ int rc;
+
+ mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
+ return ERR_PTR(-ENOMEM);
+ mw->rdev = rdev;
+ mw->qplib_mw.pd = &pd->qplib_pd;
+
+ mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Allocate MW failed!");
+ goto fail;
+ }
+ mw->ib_mw.rkey = mw->qplib_mw.rkey;
+
+ active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
+ if (active_mws > rdev->stats.res.mw_watermark)
+ rdev->stats.res.mw_watermark = active_mws;
+ return &mw->ib_mw;
+
+fail:
+ kfree(mw);
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
+{
+ struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
+ struct bnxt_re_dev *rdev = mw->rdev;
+ int rc;
+
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
+ return rc;
+ }
+
+ kfree(mw);
+ atomic_dec(&rdev->stats.res.mw_count);
+ return rc;
+}
+
+static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_umem *umem)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ unsigned long page_size;
+ struct bnxt_re_mr *mr;
+ int umem_pgs, rc;
+ u32 active_mrs;
+
+ if (length > BNXT_RE_MAX_MR_SIZE) {
+ ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
+ length, BNXT_RE_MAX_MR_SIZE);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
+ if (!page_size) {
+ ibdev_err(&rdev->ibdev, "umem page size unsupported!");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+ rc = -EIO;
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
+ }
+ mr->ib_umem = umem;
+ mr->qplib_mr.va = virt_addr;
+ mr->qplib_mr.total_size = length;
+
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
+ umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
+ umem_pgs, page_size);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
+ rc = -EIO;
+ goto free_mrw;
+ }
+
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ mr->ib_mr.rkey = mr->qplib_mr.lkey;
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
+
+ return &mr->ib_mr;
+
+free_mrw:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+free_mr:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr, int fd,
+ int mr_access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
+ fd, mr_access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return ERR_CAST(umem_dmabuf);
+
+ umem = &umem_dmabuf->umem;
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
+int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ctx->device;
+ struct bnxt_re_ucontext *uctx =
+ container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ struct bnxt_re_user_mmap_entry *entry;
+ struct bnxt_re_uctx_resp resp = {};
+ struct bnxt_re_uctx_req ureq = {};
+ u32 chip_met_rev_num = 0;
+ int rc;
+
+ ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
+
+ if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
+ ibdev_dbg(ibdev, " is different from the device %d ",
+ BNXT_RE_ABI_VERSION);
+ return -EPERM;
+ }
+
+ uctx->rdev = rdev;
+
+ uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
+ if (!uctx->shpg) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ spin_lock_init(&uctx->sh_lock);
+
+ resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
+ chip_met_rev_num = rdev->chip_ctx->chip_num;
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
+ BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
+ BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
+ resp.chip_id0 = chip_met_rev_num;
+ /*Temp, Use xa_alloc instead */
+ resp.dev_id = rdev->en_dev->pdev->devfn;
+ resp.max_qp = rdev->qplib_ctx.qpc_count;
+ resp.pg_size = PAGE_SIZE;
+ resp.cqe_sz = sizeof(struct cq_base);
+ resp.max_cqd = dev_attr->max_cq_wqes;
+
+ if (rdev->chip_ctx->modes.db_push)
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
+
+ entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto cfail;
+ }
+ uctx->shpage_mmap = &entry->rdma_entry;
+ if (rdev->pacing.dbr_pacing)
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
+
+ if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
+
+ if (udata->inlen >= sizeof(ureq)) {
+ rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ goto cfail;
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
+ }
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
+ resp.mode = rdev->chip_ctx->modes.wqe_mode;
+ if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
+ }
+ }
+
+ rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
+ if (rc) {
+ ibdev_err(ibdev, "Failed to copy user context");
+ rc = -EFAULT;
+ goto cfail;
+ }
+
+ return 0;
+cfail:
+ free_page((unsigned long)uctx->shpg);
+ uctx->shpg = NULL;
+fail:
+ return rc;
+}
+
+void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
+{
+ struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
+ struct bnxt_re_ucontext,
+ ib_uctx);
+
+ struct bnxt_re_dev *rdev = uctx->rdev;
+
+ rdma_user_mmap_entry_remove(uctx->shpage_mmap);
+ uctx->shpage_mmap = NULL;
+ if (uctx->shpg)
+ free_page((unsigned long)uctx->shpg);
+
+ if (uctx->dpi.dbr) {
+ /* Free DPI only if this is the first PD allocated by the
+ * application and mark the context dpi as NULL
+ */
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
+ uctx->dpi.dbr = NULL;
+ }
+}
+
+static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ int rc;
+
+ rc = bnxt_re_hwrm_alloc_vnic(rdev);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
+ if (rc)
+ goto out_free_vnic;
+
+ return 0;
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+ return rc;
+}
+
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_re_flow *flow;
+ int rc;
+
+ if (attr->type != IB_FLOW_ATTR_SNIFFER ||
+ !rdev->rcfw.roce_mirror)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&rdev->qp_lock);
+ if (rdev->sniffer_flow_created) {
+ ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-EBUSY);
+ }
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow) {
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ flow->rdev = rdev;
+
+ rc = bnxt_re_setup_vnic(rdev, qp);
+ if (rc)
+ goto out_free_flow;
+
+ rc = bnxt_qplib_create_flow(&rdev->qplib_res);
+ if (rc)
+ goto out_free_vnic;
+
+ rdev->sniffer_flow_created = 1;
+ mutex_unlock(&rdev->qp_lock);
+
+ return &flow->ib_flow;
+
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+out_free_flow:
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_destroy_flow(struct ib_flow *flow_id)
+{
+ struct bnxt_re_flow *flow =
+ container_of(flow_id, struct bnxt_re_flow, ib_flow);
+ struct bnxt_re_dev *rdev = flow->rdev;
+ int rc;
+
+ mutex_lock(&rdev->qp_lock);
+ rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
+ rdev->sniffer_flow_created = 0;
+
+ bnxt_re_hwrm_free_vnic(rdev);
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+
+ return rc;
+}
+
+static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
+{
+ struct bnxt_re_cq *cq = NULL, *tmp_cq;
+
+ hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
+ if (tmp_cq->qplib_cq.id == cq_id) {
+ cq = tmp_cq;
+ break;
+ }
+ }
+ return cq;
+}
+
+static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
+{
+ struct bnxt_re_srq *srq = NULL, *tmp_srq;
+
+ hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
+ if (tmp_srq->qplib_srq.id == srq_id) {
+ srq = tmp_srq;
+ break;
+ }
+ }
+ return srq;
+}
+
+/* Helper function to mmap the virtual memory from user app */
+int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
+{
+ struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
+ struct bnxt_re_ucontext,
+ ib_uctx);
+ struct bnxt_re_user_mmap_entry *bnxt_entry;
+ struct rdma_user_mmap_entry *rdma_entry;
+ int ret = 0;
+ u64 pfn;
+
+ rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
+ if (!rdma_entry)
+ return -EINVAL;
+
+ bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
+ rdma_entry);
+
+ switch (bnxt_entry->mmap_flag) {
+ case BNXT_RE_MMAP_WC_DB:
+ pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+ ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case BNXT_RE_MMAP_UC_DB:
+ pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+ ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case BNXT_RE_MMAP_SH_PAGE:
+ ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
+ break;
+ case BNXT_RE_MMAP_DBR_BAR:
+ pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+ ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case BNXT_RE_MMAP_DBR_PAGE:
+ case BNXT_RE_MMAP_TOGGLE_PAGE:
+ /* Driver doesn't expect write access for user space */
+ if (vma->vm_flags & VM_WRITE)
+ ret = -EFAULT;
+ else
+ ret = vm_insert_page(vma, vma->vm_start,
+ virt_to_page((void *)bnxt_entry->mem_offset));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return ret;
+}
+
+void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct bnxt_re_user_mmap_entry *bnxt_entry;
+
+ bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
+ rdma_entry);
+
+ kfree(bnxt_entry);
+}
+
+int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct ib_class_port_info cpi = {};
+ int ret = IB_MAD_RESULT_SUCCESS;
+ int rc = 0;
+
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
+ return ret;
+
+ switch (in_mad->mad_hdr.attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+ break;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ rc = bnxt_re_assign_pma_port_ext_counters(rdev, out_mad);
+ break;
+ case IB_PMA_PORT_COUNTERS:
+ rc = bnxt_re_assign_pma_port_counters(rdev, out_mad);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return IB_MAD_RESULT_FAILURE;
+ ret |= IB_MAD_RESULT_REPLY;
+ return ret;
+}
+
+static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_ucontext *uctx;
+
+ uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
+ bnxt_re_pacing_alert(uctx->rdev);
+ return 0;
+}
+
+static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
+ enum bnxt_re_alloc_page_type alloc_type;
+ struct bnxt_re_user_mmap_entry *entry;
+ enum bnxt_re_mmap_flag mmap_flag;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_dev *rdev;
+ u64 mmap_offset;
+ u32 length;
+ u32 dpi;
+ u64 addr;
+ int err;
+
+ uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
+ if (IS_ERR(uctx))
+ return PTR_ERR(uctx);
+
+ err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
+ if (err)
+ return err;
+
+ rdev = uctx->rdev;
+ cctx = rdev->chip_ctx;
+
+ switch (alloc_type) {
+ case BNXT_RE_ALLOC_WC_PAGE:
+ if (cctx->modes.db_push) {
+ if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
+ uctx, BNXT_QPLIB_DPI_TYPE_WC))
+ return -ENOMEM;
+ length = PAGE_SIZE;
+ dpi = uctx->wcdpi.dpi;
+ addr = (u64)uctx->wcdpi.umdbr;
+ mmap_flag = BNXT_RE_MMAP_WC_DB;
+ } else {
+ return -EINVAL;
+ }
+
+ break;
+ case BNXT_RE_ALLOC_DBR_BAR_PAGE:
+ length = PAGE_SIZE;
+ addr = (u64)rdev->pacing.dbr_bar_addr;
+ mmap_flag = BNXT_RE_MMAP_DBR_BAR;
+ break;
+
+ case BNXT_RE_ALLOC_DBR_PAGE:
+ length = PAGE_SIZE;
+ addr = (u64)rdev->pacing.dbr_page;
+ mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
+ if (!entry)
+ return -ENOMEM;
+
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
+ err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
+ &dpi, sizeof(dpi));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_user_mmap_entry *entry = uobject->object;
+ struct bnxt_re_ucontext *uctx = entry->uctx;
+
+ switch (entry->mmap_flag) {
+ case BNXT_RE_MMAP_WC_DB:
+ if (uctx && uctx->wcdpi.dbr) {
+ struct bnxt_re_dev *rdev = uctx->rdev;
+
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
+ uctx->wcdpi.dbr = NULL;
+ }
+ break;
+ case BNXT_RE_MMAP_DBR_BAR:
+ case BNXT_RE_MMAP_DBR_PAGE:
+ break;
+ default:
+ goto exit;
+ }
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+exit:
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
+ UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
+ BNXT_RE_OBJECT_ALLOC_PAGE,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
+ enum bnxt_re_alloc_page_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
+ UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
+ BNXT_RE_OBJECT_ALLOC_PAGE,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
+ UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
+ &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
+ &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
+
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
+
+DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
+ &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
+
+/* Toggle MEM */
+static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
+ enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
+ enum bnxt_re_get_toggle_mem_type res_type;
+ struct bnxt_re_user_mmap_entry *entry;
+ struct bnxt_re_ucontext *uctx;
+ struct ib_ucontext *ib_uctx;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ u32 length = PAGE_SIZE;
+ struct bnxt_re_cq *cq;
+ u64 mem_offset;
+ u32 offset = 0;
+ u64 addr = 0;
+ u32 res_id;
+ int err;
+
+ ib_uctx = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ib_uctx))
+ return PTR_ERR(ib_uctx);
+
+ err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
+ if (err)
+ return err;
+
+ uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
+ rdev = uctx->rdev;
+ err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
+
+ switch (res_type) {
+ case BNXT_RE_CQ_TOGGLE_MEM:
+ cq = bnxt_re_search_for_cq(rdev, res_id);
+ if (!cq)
+ return -EINVAL;
+
+ addr = (u64)cq->uctx_cq_page;
+ break;
+ case BNXT_RE_SRQ_TOGGLE_MEM:
+ srq = bnxt_re_search_for_srq(rdev, res_id);
+ if (!srq)
+ return -EINVAL;
+
+ addr = (u64)srq->uctx_srq_page;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
+ if (!entry)
+ return -ENOMEM;
+
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ &mem_offset, sizeof(mem_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ &offset, sizeof(offset));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_user_mmap_entry *entry = uobject->object;
+
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
+ UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
+ enum bnxt_re_get_toggle_mem_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
+ UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
+ &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
+ &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
+
+const struct uapi_definition bnxt_re_uapi_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
+ {}
+};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
new file mode 100644
index 000000000000..76ba9ab04d5c
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -0,0 +1,296 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: IB Verbs interpreter (header)
+ */
+
+#ifndef __BNXT_RE_IB_VERBS_H__
+#define __BNXT_RE_IB_VERBS_H__
+
+struct bnxt_re_gid_ctx {
+ u32 idx;
+ u32 refcnt;
+};
+
+#define BNXT_RE_FENCE_BYTES 64
+struct bnxt_re_fence_data {
+ u32 size;
+ u8 va[BNXT_RE_FENCE_BYTES];
+ dma_addr_t dma_addr;
+ struct bnxt_re_mr *mr;
+ struct ib_mw *mw;
+ struct bnxt_qplib_swqe bind_wqe;
+ u32 bind_rkey;
+};
+
+struct bnxt_re_pd {
+ struct ib_pd ib_pd;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_pd qplib_pd;
+ struct bnxt_re_fence_data fence;
+ struct rdma_user_mmap_entry *pd_db_mmap;
+ struct rdma_user_mmap_entry *pd_wcdb_mmap;
+};
+
+struct bnxt_re_ah {
+ struct ib_ah ib_ah;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_ah qplib_ah;
+};
+
+struct bnxt_re_srq {
+ struct ib_srq ib_srq;
+ struct bnxt_re_dev *rdev;
+ u32 srq_limit;
+ struct bnxt_qplib_srq qplib_srq;
+ struct ib_umem *umem;
+ spinlock_t lock; /* protect srq */
+ void *uctx_srq_page;
+ struct hlist_node hash_entry;
+};
+
+struct bnxt_re_qp {
+ struct ib_qp ib_qp;
+ struct list_head list;
+ struct bnxt_re_dev *rdev;
+ spinlock_t sq_lock; /* protect sq */
+ spinlock_t rq_lock; /* protect rq */
+ struct bnxt_qplib_qp qplib_qp;
+ struct ib_umem *sumem;
+ struct ib_umem *rumem;
+ /* QP1 */
+ u32 send_psn;
+ struct ib_ud_header qp1_hdr;
+ struct bnxt_re_cq *scq;
+ struct bnxt_re_cq *rcq;
+ struct dentry *dentry;
+};
+
+struct bnxt_re_cq {
+ struct ib_cq ib_cq;
+ struct bnxt_re_dev *rdev;
+ spinlock_t cq_lock; /* protect cq */
+ u16 cq_count;
+ u16 cq_period;
+ struct bnxt_qplib_cq qplib_cq;
+ struct bnxt_qplib_cqe *cql;
+#define MAX_CQL_PER_POLL 1024
+ u32 max_cql;
+ struct ib_umem *umem;
+ struct ib_umem *resize_umem;
+ int resize_cqe;
+ void *uctx_cq_page;
+ struct hlist_node hash_entry;
+};
+
+struct bnxt_re_mr {
+ struct bnxt_re_dev *rdev;
+ struct ib_mr ib_mr;
+ struct ib_umem *ib_umem;
+ struct bnxt_qplib_mrw qplib_mr;
+ u32 npages;
+ u64 *pages;
+ struct bnxt_qplib_frpl qplib_frpl;
+};
+
+struct bnxt_re_frpl {
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_frpl qplib_frpl;
+ u64 *page_list;
+};
+
+struct bnxt_re_mw {
+ struct bnxt_re_dev *rdev;
+ struct ib_mw ib_mw;
+ struct bnxt_qplib_mrw qplib_mw;
+};
+
+struct bnxt_re_ucontext {
+ struct ib_ucontext ib_uctx;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_dpi dpi;
+ struct bnxt_qplib_dpi wcdpi;
+ void *shpg;
+ spinlock_t sh_lock; /* protect shpg */
+ struct rdma_user_mmap_entry *shpage_mmap;
+ u64 cmask;
+};
+
+enum bnxt_re_mmap_flag {
+ BNXT_RE_MMAP_SH_PAGE,
+ BNXT_RE_MMAP_UC_DB,
+ BNXT_RE_MMAP_WC_DB,
+ BNXT_RE_MMAP_DBR_PAGE,
+ BNXT_RE_MMAP_DBR_BAR,
+ BNXT_RE_MMAP_TOGGLE_PAGE,
+};
+
+struct bnxt_re_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ struct bnxt_re_ucontext *uctx;
+ u64 mem_offset;
+ u8 mmap_flag;
+};
+
+struct bnxt_re_flow {
+ struct ib_flow ib_flow;
+ struct bnxt_re_dev *rdev;
+};
+
+static inline u16 bnxt_re_get_swqe_size(int nsge)
+{
+ return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
+}
+
+static inline u16 bnxt_re_get_rwqe_size(int nsge)
+{
+ return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
+}
+
+enum {
+ BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL,
+ BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL,
+};
+
+static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
+{
+ return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CAP_POW2_DISABLED) ?
+ ent : roundup_pow_of_two(ent) : ent;
+}
+
+static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ucontext *uctx)
+{
+ if (uctx)
+ return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
+ else
+ return rdev->chip_ctx->modes.wqe_mode;
+}
+
+int bnxt_re_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *ib_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_device(struct ib_device *ibdev,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify);
+int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attr *port_attr);
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable);
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
+int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
+ u16 index, u16 *pkey);
+int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
+int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
+int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
+ int index, union ib_gid *gid);
+enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
+ u32 port_num);
+int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
+int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
+int bnxt_re_create_srq(struct ib_srq *srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+int bnxt_re_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata);
+int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
+int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
+
+int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+int bnxt_re_dealloc_mw(struct ib_mw *mw);
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
+void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata);
+int bnxt_re_destroy_flow(struct ib_flow *flow_id);
+
+int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+
+int bnxt_re_process_mad(struct ib_device *device, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+static inline u32 __to_ib_port_num(u16 port_id)
+{
+ return (u32)port_id + 1;
+}
+
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
+#endif /* __BNXT_RE_IB_VERBS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
new file mode 100644
index 000000000000..b13810572c2e
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -0,0 +1,2613 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Main component of the bnxt_re driver
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <net/dcbnl.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/if_ether.h>
+#include <linux/auxiliary_bus.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <linux/hashtable.h>
+
+#include "bnxt_ulp.h"
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
+#include <rdma/bnxt_re-abi.h>
+#include "bnxt.h"
+#include "hw_counters.h"
+#include "debugfs.h"
+
+static char version[] =
+ BNXT_RE_DESC "\n";
+
+MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
+MODULE_DESCRIPTION(BNXT_RE_DESC);
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* globals */
+static DEFINE_MUTEX(bnxt_re_mutex);
+
+static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev);
+
+static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
+ u32 *offset);
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+ u8 port_num, enum ib_event_type event);
+static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_qplib_res *res;
+ u32 l2db_len = 0;
+ u32 offset = 0;
+ u32 barlen;
+ int rc;
+
+ res = &rdev->qplib_res;
+ en_dev = rdev->en_dev;
+ cctx = rdev->chip_ctx;
+
+ /* Issue qcfg */
+ rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset);
+ if (rc)
+ dev_info(rdev_to_dev(rdev),
+ "Couldn't get DB bar size, Low latency framework is disabled\n");
+ /* set register offsets for both UC and WC */
+ if (bnxt_qplib_is_chip_gen_p7(cctx)) {
+ res->dpi_tbl.ucreg.offset = offset;
+ res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
+ } else {
+ res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
+ BNXT_QPLIB_DBR_PF_DB_OFFSET;
+ res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
+ }
+
+ /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
+ * is equal to the DB-Bar actual size. This indicates that L2
+ * is mapping entire bar as UC-. RoCE driver can't enable WC mapping
+ * in such cases and DB-push will be disabled.
+ */
+ barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION);
+ if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) {
+ res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
+ dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n");
+ }
+}
+
+static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_chip_ctx *cctx;
+
+ cctx = rdev->chip_ctx;
+ cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ?
+ BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC;
+ if (bnxt_re_hwrm_qcaps(rdev))
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query hwrm qcaps\n");
+ if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) {
+ cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
+ cctx->modes.toggle_bits |= BNXT_QPLIB_SRQ_TOGGLE_BIT;
+ }
+}
+
+static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+
+ if (!rdev->chip_ctx)
+ return;
+
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
+ chip_ctx = rdev->chip_ctx;
+ rdev->chip_ctx = NULL;
+ rdev->rcfw.res = NULL;
+ rdev->qplib_res.cctx = NULL;
+ rdev->qplib_res.pdev = NULL;
+ rdev->qplib_res.netdev = NULL;
+ kfree(chip_ctx);
+}
+
+static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ struct bnxt_en_dev *en_dev;
+ int rc = -ENOMEM;
+
+ en_dev = rdev->en_dev;
+
+ rdev->qplib_res.pdev = en_dev->pdev;
+ chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
+ if (!chip_ctx)
+ return -ENOMEM;
+ chip_ctx->chip_num = en_dev->chip_num;
+ chip_ctx->hw_stats_size = en_dev->hw_ring_stats_size;
+
+ rdev->chip_ctx = chip_ctx;
+ /* rest members to follow eventually */
+
+ rdev->qplib_res.cctx = rdev->chip_ctx;
+ rdev->rcfw.res = &rdev->qplib_res;
+ rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
+ if (!rdev->dev_attr)
+ goto free_chip_ctx;
+ rdev->qplib_res.dattr = rdev->dev_attr;
+ rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
+ rdev->qplib_res.en_dev = en_dev;
+
+ rc = bnxt_re_query_hwrm_intf_version(rdev);
+ if (rc)
+ goto free_dev_attr;
+
+ bnxt_re_set_drv_mode(rdev);
+
+ bnxt_re_set_db_offset(rdev);
+ rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
+ if (rc)
+ goto free_dev_attr;
+
+ if (bnxt_qplib_determine_atomics(en_dev->pdev))
+ ibdev_info(&rdev->ibdev,
+ "platform doesn't support global atomics.");
+ return 0;
+free_dev_attr:
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+free_chip_ctx:
+ kfree(rdev->chip_ctx);
+ rdev->chip_ctx = NULL;
+ return rc;
+}
+
+/* SR-IOV helper functions */
+
+static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
+{
+ if (BNXT_EN_VF(rdev->en_dev))
+ rdev->is_virtfn = 1;
+}
+
+/* Set the maximum number of each resource that the driver actually wants
+ * to allocate. This may be up to the maximum number the firmware has
+ * reserved for the function. The driver may choose to allocate fewer
+ * resources than the firmware maximum.
+ */
+static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_ctx *ctx;
+ int i;
+
+ attr = rdev->dev_attr;
+ ctx = &rdev->qplib_ctx;
+
+ ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ attr->max_qp);
+ ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ /* Use max_mr from fw since max_mrw does not get set */
+ ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
+ ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ attr->max_srq);
+ ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_ctx.qcount[i] =
+ rdev->dev_attr->tqm_alloc_reqs[i];
+}
+
+static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+{
+ struct bnxt_qplib_vf_res *vf_res;
+ u32 mrws = 0;
+ u32 vf_pct;
+ u32 nvfs;
+
+ vf_res = &qplib_ctx->vf_res;
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ nvfs = num_vf;
+ num_vf = 100 * num_vf;
+ vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
+ vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
+ vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF and
+ * divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware severely
+ * restricts the number of MRs, then let PF have half and divide
+ * the rest among VFs, as for the other resource types.
+ */
+ if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
+ mrws = qplib_ctx->mrw_count * vf_pct;
+ nvfs = num_vf;
+ } else {
+ mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
+ }
+ vf_res->max_mrw_per_vf = (mrws / nvfs);
+ vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
+}
+
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 num_vfs;
+
+ memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+ num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
+ if (num_vfs)
+ bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
+}
+
+static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
+{
+ /*
+ * Use the total VF count since the actual VF count may not be
+ * available at this point.
+ */
+ rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
+ if (!rdev->num_vfs)
+ return;
+
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+}
+
+struct bnxt_re_dcb_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+};
+
+static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
+{
+ return qp->ib_qp.qp_type == IB_QPT_GSI;
+}
+
+static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ if (bnxt_re_is_qp1_qp(qp)) {
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+ return NULL;
+}
+
+static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ qp = bnxt_re_get_qp1_qp(rdev);
+ if (!qp)
+ return 0;
+
+ qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
+ qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
+
+ return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+}
+
+static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
+}
+
+static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->dcb_wq)
+ return;
+ destroy_workqueue(rdev->dcb_wq);
+}
+
+static void bnxt_re_dcb_wq_task(struct work_struct *work)
+{
+ struct bnxt_re_dcb_work *dcb_work =
+ container_of(work, struct bnxt_re_dcb_work, work);
+ struct bnxt_re_dev *rdev = dcb_work->rdev;
+ struct bnxt_qplib_cc_param *cc_param;
+ int rc;
+
+ if (!rdev)
+ goto free_dcb;
+
+ cc_param = &rdev->cc_param;
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "Failed to query ccparam rc:%d", rc);
+ goto free_dcb;
+ }
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d",
+ __func__, rc);
+ goto free_dcb;
+ }
+ }
+
+free_dcb:
+ kfree(dcb_work);
+}
+
+static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_re_dcb_work *dcb_work;
+ struct bnxt_re_dev *rdev;
+ u32 data1, data2;
+ u16 event_id;
+
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+
+ event_id = le16_to_cpu(cmpl->event_id);
+ data1 = le32_to_cpu(cmpl->event_data1);
+ data2 = le32_to_cpu(cmpl->event_data2);
+
+ ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
+ event_id, data1, data2);
+
+ switch (event_id) {
+ case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ break;
+
+ dcb_work->rdev = rdev;
+ memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
+ INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
+ queue_work(rdev->dcb_wq, &dcb_work->work);
+ break;
+ default:
+ break;
+ }
+}
+
+static void bnxt_re_stop_irq(void *handle, bool reset)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_nq *nq;
+ int indx;
+
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+ rcfw = &rdev->rcfw;
+
+ if (reset) {
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_DEVICE_FATAL);
+ }
+
+ for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
+ nq = &rdev->nqr->nq[indx - 1];
+ bnxt_qplib_nq_stop_irq(nq, false);
+ }
+
+ bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_msix_entry *msix_ent;
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_nq *nq;
+ int indx, rc;
+
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+ msix_ent = rdev->nqr->msix_entries;
+ rcfw = &rdev->rcfw;
+ if (!ent) {
+ /* Not setting the f/w timeout bit in rcfw.
+ * During the driver unload the first command
+ * to f/w will timeout and that will set the
+ * timeout bit.
+ */
+ ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
+ return;
+ }
+
+ /* Vectors may change after restart, so update with new vectors
+ * in device sctructure.
+ */
+ for (indx = 0; indx < rdev->nqr->num_msix; indx++)
+ rdev->nqr->msix_entries[indx].vector = ent[indx].vector;
+
+ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+ false);
+ if (rc) {
+ ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
+ return;
+ }
+ for (indx = BNXT_RE_NQ_IDX ; indx < rdev->nqr->num_msix; indx++) {
+ nq = &rdev->nqr->nq[indx - 1];
+ rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+ msix_ent[indx].vector, false);
+ if (rc) {
+ ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
+ indx - 1);
+ return;
+ }
+ }
+}
+
+static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = bnxt_re_async_notifier,
+ .ulp_irq_stop = bnxt_re_stop_irq,
+ .ulp_irq_restart = bnxt_re_start_irq
+};
+
+/* RoCE -> Net driver */
+
+static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev;
+
+ en_dev = rdev->en_dev;
+ return bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev);
+}
+
+static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
+{
+ hdr->req_type = cpu_to_le16(opcd);
+ hdr->cmpl_ring = cpu_to_le16(-1);
+ hdr->target_id = cpu_to_le16(-1);
+}
+
+static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
+ int msg_len, void *resp, int resp_max_len,
+ int timeout)
+{
+ fw_msg->msg = msg;
+ fw_msg->msg_len = msg_len;
+ fw_msg->resp = resp;
+ fw_msg->resp_max_len = resp_max_len;
+ fw_msg->timeout = timeout;
+}
+
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_free_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_FREE);
+
+ req.vnic_id = cpu_to_le32(rdev->mirror_vnic_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to free vnic, rc = %d\n", rc);
+}
+
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_alloc_output resp = {};
+ struct hwrm_vnic_alloc_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_ALLOC);
+
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to alloc vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_cfg_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG);
+
+ req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE);
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.raw_qp_id = cpu_to_le32(qp_id);
+ req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN);
+
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to cfg vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
+/* Query device config using common hwrm */
+static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
+ u32 *offset)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_func_qcfg_output resp = {0};
+ struct hwrm_func_qcfg_input req = {0};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
+ req.fid = cpu_to_le16(0xffff);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (!rc) {
+ *db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
+ *offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
+ rdev->mirror_vnic_id = le16_to_cpu(resp.mirror_vnic_id);
+ }
+ return rc;
+}
+
+/* Query function capabilities using common hwrm */
+int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_func_qcaps_output resp = {};
+ struct hwrm_func_qcaps_input req = {};
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg = {};
+ u32 flags_ext2;
+ int rc;
+
+ cctx = rdev->chip_ctx;
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
+ req.fid = cpu_to_le16(0xffff);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ return rc;
+ cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
+
+ flags_ext2 = le32_to_cpu(resp.flags_ext2);
+ cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
+ flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED;
+ cctx->modes.roce_mirror = !!(le32_to_cpu(resp.flags_ext3) &
+ FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED);
+ return 0;
+}
+
+static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+ struct hwrm_func_dbr_pacing_qcfg_output resp = {};
+ struct hwrm_func_dbr_pacing_qcfg_input req = {};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ cctx = rdev->chip_ctx;
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ return rc;
+
+ if ((le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) ==
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC)
+ cctx->dbr_stat_db_fifo =
+ le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
+ ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
+
+ pacing_data->fifo_max_depth = le32_to_cpu(resp.dbr_stat_db_max_fifo_depth);
+ if (!pacing_data->fifo_max_depth)
+ pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH(cctx);
+ pacing_data->fifo_room_mask = le32_to_cpu(resp.dbr_stat_db_fifo_reg_fifo_room_mask);
+ pacing_data->fifo_room_shift = resp.dbr_stat_db_fifo_reg_fifo_room_shift;
+
+ return 0;
+}
+
+/* Update the pacing tunable parameters to the default values */
+static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+
+ pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing;
+ pacing_data->pacing_th = rdev->pacing.pacing_algo_th;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
+}
+
+static u32 __get_fifo_occupancy(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+ u32 read_val, fifo_occup;
+
+ read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ fifo_occup = pacing_data->fifo_max_depth -
+ ((read_val & pacing_data->fifo_room_mask) >>
+ pacing_data->fifo_room_shift);
+ return fifo_occup;
+}
+
+static bool is_dbr_fifo_full(struct bnxt_re_dev *rdev)
+{
+ u32 max_occup, fifo_occup;
+
+ fifo_occup = __get_fifo_occupancy(rdev);
+ max_occup = BNXT_RE_MAX_FIFO_DEPTH(rdev->chip_ctx) - 1;
+ if (fifo_occup == max_occup)
+ return true;
+
+ return false;
+}
+
+static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+ u32 retry_fifo_check = 1000;
+ u32 fifo_occup;
+
+ /* loop shouldn't run infintely as the occupancy usually goes
+ * below pacing algo threshold as soon as pacing kicks in.
+ */
+ while (1) {
+ fifo_occup = __get_fifo_occupancy(rdev);
+ /* Fifo occupancy cannot be greater the MAX FIFO depth */
+ if (fifo_occup > pacing_data->fifo_max_depth)
+ break;
+
+ if (fifo_occup < pacing_data->pacing_th)
+ break;
+ if (!retry_fifo_check--) {
+ dev_info_once(rdev_to_dev(rdev),
+ "%s: fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n",
+ __func__, fifo_occup, pacing_data->fifo_max_depth,
+ pacing_data->pacing_th);
+ break;
+ }
+
+ }
+}
+
+static void bnxt_re_db_fifo_check(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_fifo_check_work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 pacing_save;
+
+ if (!mutex_trylock(&rdev->pacing.dbq_lock))
+ return;
+ pacing_data = rdev->qplib_res.pacing_data;
+ pacing_save = rdev->pacing.do_pacing_save;
+ __wait_for_fifo_occupancy_below_th(rdev);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+ if (pacing_save > rdev->pacing.dbr_def_do_pacing) {
+ /* Double the do_pacing value during the congestion */
+ pacing_save = pacing_save << 1;
+ } else {
+ /*
+ * when a new congestion is detected increase the do_pacing
+ * by 8 times. And also increase the pacing_th by 4 times. The
+ * reason to increase pacing_th is to give more space for the
+ * queue to oscillate down without getting empty, but also more
+ * room for the queue to increase without causing another alarm.
+ */
+ pacing_save = pacing_save << 3;
+ pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4;
+ }
+
+ if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING)
+ pacing_save = BNXT_RE_MAX_DBR_DO_PACING;
+
+ pacing_data->do_pacing = pacing_save;
+ rdev->pacing.do_pacing_save = pacing_data->do_pacing;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+ rdev->stats.pacing.alerts++;
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+static void bnxt_re_pacing_timer_exp(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_pacing_work.work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 fifo_occup;
+
+ if (!mutex_trylock(&rdev->pacing.dbq_lock))
+ return;
+
+ pacing_data = rdev->qplib_res.pacing_data;
+ fifo_occup = __get_fifo_occupancy(rdev);
+
+ if (fifo_occup > pacing_data->pacing_th)
+ goto restart_timer;
+
+ /*
+ * Instead of immediately going back to the default do_pacing
+ * reduce it by 1/8 times and restart the timer.
+ */
+ pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3);
+ pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing);
+ if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) {
+ bnxt_re_set_default_pacing_data(rdev);
+ rdev->stats.pacing.complete++;
+ goto dbq_unlock;
+ }
+
+restart_timer:
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+ rdev->stats.pacing.resched++;
+dbq_unlock:
+ rdev->pacing.do_pacing_save = pacing_data->do_pacing;
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+
+ if (!rdev->pacing.dbr_pacing)
+ return;
+ mutex_lock(&rdev->pacing.dbq_lock);
+ pacing_data = rdev->qplib_res.pacing_data;
+
+ /*
+ * Increase the alarm_th to max so that other user lib instances do not
+ * keep alerting the driver.
+ */
+ pacing_data->alarm_th = pacing_data->fifo_max_depth;
+ pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING;
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ schedule_work(&rdev->dbq_fifo_check_work);
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ /* Allocate a page for app use */
+ rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!rdev->pacing.dbr_page)
+ return -ENOMEM;
+
+ memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
+ rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page;
+
+ if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev)) {
+ free_page((u64)rdev->pacing.dbr_page);
+ rdev->pacing.dbr_page = NULL;
+ return -EIO;
+ }
+
+ /* MAP HW window 2 for reading db fifo depth */
+ writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK,
+ rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+ rdev->pacing.dbr_db_fifo_reg_off =
+ (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
+ BNXT_RE_GRC_FIFO_REG_BASE;
+ rdev->pacing.dbr_bar_addr =
+ pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off;
+
+ if (is_dbr_fifo_full(rdev)) {
+ free_page((u64)rdev->pacing.dbr_page);
+ rdev->pacing.dbr_page = NULL;
+ return -EIO;
+ }
+
+ rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
+ rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME;
+ rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
+ rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing;
+ rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off;
+ bnxt_re_set_default_pacing_data(rdev);
+ /* Initialize worker for DBR Pacing */
+ INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check);
+ INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp);
+ return 0;
+}
+
+static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+ if (rdev->pacing.dbr_page)
+ free_page((u64)rdev->pacing.dbr_page);
+
+ rdev->pacing.dbr_page = NULL;
+ rdev->pacing.dbr_pacing = false;
+}
+
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
+ u16 fw_ring_id, int type)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_ring_free_input req = {};
+ struct hwrm_ring_free_output resp;
+ struct bnxt_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE);
+ req.ring_type = type;
+ req.ring_id = cpu_to_le16(fw_ring_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
+ req.ring_id, rc);
+ return rc;
+}
+
+static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ring_attr *ring_attr,
+ u16 *fw_ring_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_ring_alloc_input req = {};
+ struct hwrm_ring_alloc_output resp;
+ struct bnxt_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
+ req.enables = 0;
+ req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
+ if (ring_attr->pages > 1) {
+ /* Page size is in log2 units */
+ req.page_size = BNXT_PAGE_SHIFT;
+ req.page_tbl_depth = 1;
+ }
+ req.fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req.logical_id = cpu_to_le16(ring_attr->lrid);
+ req.length = cpu_to_le32(ring_attr->depth + 1);
+ req.ring_type = ring_attr->type;
+ req.int_mode = ring_attr->mode;
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (!rc)
+ *fw_ring_id = le16_to_cpu(resp.ring_id);
+
+ return rc;
+}
+
+static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
+ u32 fw_stats_ctx_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_stat_ctx_free_input req = {};
+ struct hwrm_stat_ctx_free_output resp = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
+ req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
+ rc);
+
+ return rc;
+}
+
+static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_stats *stats)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
+ struct hwrm_stat_ctx_alloc_output resp = {};
+ struct hwrm_stat_ctx_alloc_input req = {};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ stats->fw_id = INVALID_STATS_CTX_ID;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
+ req.update_period_ms = cpu_to_le32(1000);
+ req.stats_dma_addr = cpu_to_le64(stats->dma_map);
+ req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
+ req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (!rc)
+ stats->fw_id = le32_to_cpu(resp.stat_ctx_id);
+
+ return rc;
+}
+
+static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
+/* Device */
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev =
+ rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->revision);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt_re_dev *rdev =
+ rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->device);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static ssize_t board_id_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device,
+ struct bnxt_re_dev, ibdev);
+ char buffer[BNXT_VPD_FLD_LEN] = {};
+
+ if (!rdev->is_virtfn)
+ memcpy(buffer, rdev->board_partno, BNXT_VPD_FLD_LEN - 1);
+ else
+ scnprintf(buffer, BNXT_VPD_FLD_LEN, "0x%x-VF",
+ rdev->en_dev->pdev->device);
+
+ return sysfs_emit(buf, "%s\n", buffer);
+}
+static DEVICE_ATTR_RO(board_id);
+
+static struct attribute *bnxt_re_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
+
+static const struct attribute_group bnxt_re_dev_attr_group = {
+ .attrs = bnxt_re_attributes,
+};
+
+static int bnxt_re_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct bnxt_qplib_hwq *mr_hwq;
+ struct nlattr *table_attr;
+ struct bnxt_re_mr *mr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
+ mr_hwq = &mr->qplib_mr.hwq;
+
+ if (rdma_nl_put_driver_u32(msg, "page_size",
+ mr_hwq->qe_ppg * mr_hwq->element_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "max_elements", mr_hwq->max_elements))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "element_size", mr_hwq->element_size))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "hwq", (unsigned long)mr_hwq))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "va", mr->qplib_mr.va))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_mr *mr;
+ int err, len;
+ void *data;
+
+ mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
+ rdev = mr->rdev;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5;
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_MRW,
+ mr->qplib_mr.lkey, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
+static int bnxt_re_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct bnxt_qplib_hwq *cq_hwq;
+ struct nlattr *table_attr;
+ struct bnxt_re_cq *cq;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ cq_hwq = &cq->qplib_cq.hwq;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32(msg, "cq_depth", cq_hwq->depth))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "max_elements", cq_hwq->max_elements))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "element_size", cq_hwq->element_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "max_wqe", cq->qplib_cq.max_wqe))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int err, len;
+ void *data;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5;
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw,
+ CMDQ_READ_CONTEXT_TYPE_CQ,
+ cq->qplib_cq.id, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
+static int bnxt_re_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+ struct bnxt_qplib_qp *qplib_qp;
+ struct nlattr *table_attr;
+ struct bnxt_re_qp *qp;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ qplib_qp = &qp->qplib_qp;
+
+ if (rdma_nl_put_driver_u32(msg, "sq_max_wqe", qplib_qp->sq.max_wqe))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_max_sge", qplib_qp->sq.max_sge))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_wqe_size", qplib_qp->sq.wqe_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_swq_start", qplib_qp->sq.swq_start))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_swq_last", qplib_qp->sq.swq_last))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_max_wqe", qplib_qp->rq.max_wqe))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_max_sge", qplib_qp->rq.max_sge))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_wqe_size", qplib_qp->rq.wqe_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_swq_start", qplib_qp->rq.swq_start))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_swq_last", qplib_qp->rq.swq_last))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "timeout", qplib_qp->timeout))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibqp->device, ibdev);
+ int err, len;
+ void *data;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5;
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_QPC,
+ ibqp->qp_num, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
+static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
+{
+ struct nlattr *table_attr;
+ struct bnxt_re_srq *srq;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+
+ if (rdma_nl_put_driver_u32_hex(msg, "wqe_size", srq->qplib_srq.wqe_size))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "max_wqe", srq->qplib_srq.max_wqe))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "srq_limit", srq->qplib_srq.threshold))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ int err, len;
+ void *data;
+
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+ rdev = srq->rdev;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P5;
+
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_SRQ,
+ srq->qplib_srq.id, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
+static const struct ib_device_ops bnxt_re_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_BNXT_RE,
+ .uverbs_abi_ver = BNXT_RE_ABI_VERSION,
+
+ .add_gid = bnxt_re_add_gid,
+ .alloc_hw_port_stats = bnxt_re_ib_alloc_hw_port_stats,
+ .alloc_mr = bnxt_re_alloc_mr,
+ .alloc_pd = bnxt_re_alloc_pd,
+ .alloc_ucontext = bnxt_re_alloc_ucontext,
+ .create_ah = bnxt_re_create_ah,
+ .create_cq = bnxt_re_create_cq,
+ .create_qp = bnxt_re_create_qp,
+ .create_srq = bnxt_re_create_srq,
+ .create_user_ah = bnxt_re_create_ah,
+ .dealloc_pd = bnxt_re_dealloc_pd,
+ .dealloc_ucontext = bnxt_re_dealloc_ucontext,
+ .del_gid = bnxt_re_del_gid,
+ .dereg_mr = bnxt_re_dereg_mr,
+ .destroy_ah = bnxt_re_destroy_ah,
+ .destroy_cq = bnxt_re_destroy_cq,
+ .destroy_qp = bnxt_re_destroy_qp,
+ .destroy_srq = bnxt_re_destroy_srq,
+ .device_group = &bnxt_re_dev_attr_group,
+ .disassociate_ucontext = bnxt_re_disassociate_ucontext,
+ .get_dev_fw_str = bnxt_re_query_fw_str,
+ .get_dma_mr = bnxt_re_get_dma_mr,
+ .get_hw_stats = bnxt_re_ib_get_hw_stats,
+ .get_link_layer = bnxt_re_get_link_layer,
+ .get_port_immutable = bnxt_re_get_port_immutable,
+ .map_mr_sg = bnxt_re_map_mr_sg,
+ .mmap = bnxt_re_mmap,
+ .mmap_free = bnxt_re_mmap_free,
+ .modify_qp = bnxt_re_modify_qp,
+ .modify_srq = bnxt_re_modify_srq,
+ .poll_cq = bnxt_re_poll_cq,
+ .post_recv = bnxt_re_post_recv,
+ .post_send = bnxt_re_post_send,
+ .post_srq_recv = bnxt_re_post_srq_recv,
+ .process_mad = bnxt_re_process_mad,
+ .query_ah = bnxt_re_query_ah,
+ .query_device = bnxt_re_query_device,
+ .modify_device = bnxt_re_modify_device,
+ .query_pkey = bnxt_re_query_pkey,
+ .query_port = bnxt_re_query_port,
+ .query_qp = bnxt_re_query_qp,
+ .query_srq = bnxt_re_query_srq,
+ .reg_user_mr = bnxt_re_reg_user_mr,
+ .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
+ .req_notify_cq = bnxt_re_req_notify_cq,
+ .resize_cq = bnxt_re_resize_cq,
+ .create_flow = bnxt_re_create_flow,
+ .destroy_flow = bnxt_re_destroy_flow,
+ INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
+};
+
+static const struct ib_device_ops restrack_ops = {
+ .fill_res_cq_entry = bnxt_re_fill_res_cq_entry,
+ .fill_res_cq_entry_raw = bnxt_re_fill_res_cq_entry_raw,
+ .fill_res_qp_entry = bnxt_re_fill_res_qp_entry,
+ .fill_res_qp_entry_raw = bnxt_re_fill_res_qp_entry_raw,
+ .fill_res_mr_entry = bnxt_re_fill_res_mr_entry,
+ .fill_res_mr_entry_raw = bnxt_re_fill_res_mr_entry_raw,
+ .fill_res_srq_entry = bnxt_re_fill_res_srq_entry,
+ .fill_res_srq_entry_raw = bnxt_re_fill_res_srq_entry_raw,
+};
+
+static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
+{
+ struct ib_device *ibdev = &rdev->ibdev;
+ int ret;
+
+ /* ib device init */
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA");
+ ibdev->phys_port_cnt = 1;
+
+ addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
+
+ ibdev->num_comp_vectors = rdev->nqr->num_msix - 1;
+ ibdev->dev.parent = &rdev->en_dev->pdev->dev;
+ ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
+ ibdev->driver_def = bnxt_re_uapi_defs;
+
+ ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
+ ib_set_device_ops(ibdev, &restrack_ops);
+ ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
+ ibdev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ);
+ return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
+}
+
+static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
+ struct bnxt_en_dev *en_dev)
+{
+ struct bnxt_re_dev *rdev;
+
+ /* Allocate bnxt_re_dev instance here */
+ rdev = ib_alloc_device(bnxt_re_dev, ibdev);
+ if (!rdev) {
+ ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!",
+ ROCE_DRV_MODULE_NAME);
+ return NULL;
+ }
+ /* Default values */
+ rdev->netdev = en_dev->net;
+ rdev->en_dev = en_dev;
+ rdev->adev = adev;
+ rdev->id = rdev->en_dev->pdev->devfn;
+ INIT_LIST_HEAD(&rdev->qp_list);
+ mutex_init(&rdev->qp_lock);
+ mutex_init(&rdev->pacing.dbq_lock);
+ atomic_set(&rdev->stats.res.qp_count, 0);
+ atomic_set(&rdev->stats.res.cq_count, 0);
+ atomic_set(&rdev->stats.res.srq_count, 0);
+ atomic_set(&rdev->stats.res.mr_count, 0);
+ atomic_set(&rdev->stats.res.mw_count, 0);
+ atomic_set(&rdev->stats.res.ah_count, 0);
+ atomic_set(&rdev->stats.res.pd_count, 0);
+ rdev->cosq[0] = 0xFFFF;
+ rdev->cosq[1] = 0xFFFF;
+ rdev->cq_coalescing.buf_maxtime = BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME;
+ if (bnxt_re_chip_gen_p7(en_dev->chip_num)) {
+ rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7;
+ rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7;
+ } else {
+ rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5;
+ rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5;
+ }
+ rdev->cq_coalescing.en_ring_idle_mode = BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE;
+
+ return rdev;
+}
+
+static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
+ *unaffi_async)
+{
+ switch (unaffi_async->event) {
+ case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
+ struct bnxt_re_qp *qp)
+{
+ struct creq_qp_error_notification *err_event;
+ struct bnxt_re_srq *srq = NULL;
+ struct ib_event event = {};
+ unsigned int flags;
+
+ if (qp->qplib_qp.srq)
+ srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
+ qplib_srq);
+
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
+ rdma_is_kernel_res(&qp->ib_qp.res)) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+
+ event.device = &qp->rdev->ibdev;
+ event.element.qp = &qp->ib_qp;
+ event.event = IB_EVENT_QP_FATAL;
+
+ err_event = (struct creq_qp_error_notification *)qp_event;
+
+ switch (err_event->req_err_state_reason) {
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (err_event->res_err_state_reason) {
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR:
+ case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR:
+ if (srq)
+ event.event = IB_EVENT_SRQ_ERR;
+ break;
+ default:
+ break;
+ }
+
+ if (err_event->res_err_state_reason || err_event->req_err_state_reason) {
+ ibdev_dbg(&qp->rdev->ibdev,
+ "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n",
+ __func__, rdma_is_kernel_res(&qp->ib_qp.res) ? "kernel" : "user",
+ qp->qplib_qp.id,
+ err_event->sq_cons_idx,
+ err_event->rq_cons_idx,
+ err_event->req_slow_path_state,
+ err_event->req_err_state_reason,
+ err_event->res_slow_path_state,
+ err_event->res_err_state_reason);
+ } else {
+ if (srq)
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ }
+
+ if (event.event == IB_EVENT_SRQ_ERR && srq->ib_srq.event_handler) {
+ (*srq->ib_srq.event_handler)(&event,
+ srq->ib_srq.srq_context);
+ } else if (event.device && qp->ib_qp.event_handler) {
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+ }
+
+ return 0;
+}
+
+static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq)
+{
+ struct creq_cq_error_notification *cqerr;
+ struct ib_event ibevent = {};
+
+ cqerr = event;
+ switch (cqerr->cq_err_reason) {
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR:
+ ibevent.event = IB_EVENT_CQ_ERR;
+ break;
+ default:
+ break;
+ }
+
+ if (ibevent.event == IB_EVENT_CQ_ERR && cq->ib_cq.event_handler) {
+ ibevent.element.cq = &cq->ib_cq;
+ ibevent.device = &cq->rdev->ibdev;
+
+ ibdev_dbg(&cq->rdev->ibdev,
+ "%s err reason %d\n", __func__, cqerr->cq_err_reason);
+ cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context);
+ }
+
+ return 0;
+}
+
+static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
+ void *obj)
+{
+ struct bnxt_qplib_qp *lib_qp;
+ struct bnxt_qplib_cq *lib_cq;
+ struct bnxt_re_qp *qp;
+ struct bnxt_re_cq *cq;
+ int rc = 0;
+ u8 event;
+
+ if (!obj)
+ return rc; /* QP was already dead, still return success */
+
+ event = affi_async->event;
+ switch (event) {
+ case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ lib_qp = obj;
+ qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp);
+ rc = bnxt_re_handle_qp_async_event(affi_async, qp);
+ break;
+ case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION:
+ lib_cq = obj;
+ cq = container_of(lib_cq, struct bnxt_re_cq, qplib_cq);
+ rc = bnxt_re_handle_cq_async_error(affi_async, cq);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
+ void *aeqe, void *obj)
+{
+ struct creq_qp_event *affi_async;
+ struct creq_func_event *unaffi_async;
+ u8 type;
+ int rc;
+
+ type = ((struct creq_base *)aeqe)->type;
+ if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
+ unaffi_async = aeqe;
+ rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
+ } else {
+ affi_async = aeqe;
+ rc = bnxt_re_handle_affi_async_event(affi_async, obj);
+ }
+
+ return rc;
+}
+
+static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *handle, u8 event)
+{
+ struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
+ qplib_srq);
+ struct ib_event ib_event;
+
+ ib_event.device = &srq->rdev->ibdev;
+ ib_event.element.srq = &srq->ib_srq;
+
+ if (srq->ib_srq.event_handler) {
+ if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
+ ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ (*srq->ib_srq.event_handler)(&ib_event,
+ srq->ib_srq.srq_context);
+ }
+ return 0;
+}
+
+static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *handle)
+{
+ struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
+ qplib_cq);
+
+ if (cq->ib_cq.comp_handler)
+ (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
+
+ return 0;
+}
+
+static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
+{
+ int i;
+
+ for (i = 1; i < rdev->nqr->num_msix; i++)
+ bnxt_qplib_disable_nq(&rdev->nqr->nq[i - 1]);
+
+ if (rdev->qplib_res.rcfw)
+ bnxt_qplib_cleanup_res(&rdev->qplib_res);
+}
+
+static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
+{
+ int num_vec_enabled = 0;
+ int rc = 0, i;
+ u32 db_offt;
+
+ bnxt_qplib_init_res(&rdev->qplib_res);
+
+ mutex_init(&rdev->nqr->load_lock);
+
+ for (i = 1; i < rdev->nqr->num_msix ; i++) {
+ db_offt = rdev->nqr->msix_entries[i].db_offset;
+ rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1],
+ i - 1, rdev->nqr->msix_entries[i].vector,
+ db_offt, &bnxt_re_cqn_handler,
+ &bnxt_re_srqn_handler);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to enable NQ with rc = 0x%x", rc);
+ goto fail;
+ }
+ num_vec_enabled++;
+ }
+ return 0;
+fail:
+ for (i = num_vec_enabled; i >= 0; i--)
+ bnxt_qplib_disable_nq(&rdev->nqr->nq[i]);
+ return rc;
+}
+
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_nq *nq;
+ u8 type;
+ int i;
+
+ for (i = 0; i < rdev->nqr->num_msix - 1; i++) {
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ nq = &rdev->nqr->nq[i];
+ bnxt_re_net_ring_free(rdev, nq->ring_id, type);
+ bnxt_qplib_free_nq(nq);
+ nq->res = NULL;
+ }
+}
+
+static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
+{
+ bnxt_re_free_nq_res(rdev);
+
+ if (rdev->qplib_res.dpi_tbl.max) {
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->dpi_privileged);
+ }
+ if (rdev->qplib_res.rcfw) {
+ bnxt_qplib_free_res(&rdev->qplib_res);
+ rdev->qplib_res.rcfw = NULL;
+ }
+}
+
+static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_ring_attr rattr = {};
+ int num_vec_created = 0;
+ int rc, i;
+ u8 type;
+
+ /* Configure and allocate resources for qplib */
+ rdev->qplib_res.rcfw = &rdev->rcfw;
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res,
+ &rdev->dpi_privileged,
+ rdev, BNXT_QPLIB_DPI_TYPE_KERNEL);
+ if (rc)
+ goto dealloc_res;
+
+ for (i = 0; i < rdev->nqr->num_msix - 1; i++) {
+ struct bnxt_qplib_nq *nq;
+
+ nq = &rdev->nqr->nq[i];
+ nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
+ rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, nq);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
+ i, rc);
+ goto free_nq;
+ }
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count;
+ rattr.type = type;
+ rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
+ rattr.lrid = rdev->nqr->msix_entries[i + 1].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate NQ fw id with rc = 0x%x",
+ rc);
+ bnxt_qplib_free_nq(nq);
+ goto free_nq;
+ }
+ num_vec_created++;
+ }
+ return 0;
+free_nq:
+ for (i = num_vec_created - 1; i >= 0; i--) {
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->nqr->nq[i].ring_id, type);
+ bnxt_qplib_free_nq(&rdev->nqr->nq[i]);
+ }
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->dpi_privileged);
+dealloc_res:
+ bnxt_qplib_free_res(&rdev->qplib_res);
+
+fail:
+ rdev->qplib_res.rcfw = NULL;
+ return rc;
+}
+
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+ u8 port_num, enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ ib_event.device = ibdev;
+ if (qp) {
+ ib_event.element.qp = qp;
+ ib_event.event = event;
+ if (qp->event_handler)
+ qp->event_handler(&ib_event, qp->qp_context);
+
+ } else {
+ ib_event.element.port_num = port_num;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+ }
+}
+
+static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp)
+{
+ return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
+ (qp == rdev->gsi_ctx.gsi_sqp);
+}
+
+static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ /* Modify the state of all QPs except QP1/Shadow QP */
+ if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
+ if (qp->qplib_qp.state !=
+ CMDQ_MODIFY_QP_NEW_STATE_RESET &&
+ qp->qplib_qp.state !=
+ CMDQ_MODIFY_QP_NEW_STATE_ERR)
+ bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
+ 1, IB_EVENT_QP_FATAL);
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+}
+
+static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
+static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
+static void bnxt_re_read_vpd_info(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev = rdev->en_dev->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto free;
+
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(rdev->board_partno, &vpd_data[pos], size);
+free:
+ kfree(vpd_data);
+}
+
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_ver_get_output resp = {};
+ struct hwrm_ver_get_input req = {};
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
+ rc);
+ return rc;
+ }
+
+ cctx = rdev->chip_ctx;
+ cctx->hwrm_intf_ver =
+ (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
+ (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
+ (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
+ le16_to_cpu(resp.hwrm_intf_patch);
+
+ cctx->hwrm_cmd_max_timeout = le16_to_cpu(resp.max_req_timeout);
+
+ if (!cctx->hwrm_cmd_max_timeout)
+ cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
+
+ return 0;
+}
+
+static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
+{
+ int rc;
+ u32 event;
+
+ /* Register ib dev */
+ rc = bnxt_re_register_ib(rdev);
+ if (rc) {
+ pr_err("Failed to register with IB: %#x\n", rc);
+ return rc;
+ }
+ dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
+
+ event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
+
+ return rc;
+}
+
+static int bnxt_re_alloc_nqr_mem(struct bnxt_re_dev *rdev)
+{
+ rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL);
+ if (!rdev->nqr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
+{
+ kfree(rdev->nqr);
+ rdev->nqr = NULL;
+}
+
+/* When DEL_GID fails, driver is not freeing GID ctx memory.
+ * To avoid the memory leak, free the memory during unload
+ */
+static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ int i;
+
+ if (!sgid_tbl->active)
+ return;
+
+ ctx_tbl = sgid_tbl->ctx;
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (sgid_tbl->hw_id[i] == 0xFFFF)
+ continue;
+
+ ctx = ctx_tbl[i];
+ kfree(ctx);
+ }
+}
+
+static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+
+ return rc;
+}
+
+static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+
+ return rc;
+}
+
+static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+}
+
+static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+}
+
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+{
+ u8 type;
+ int rc;
+
+ bnxt_re_debugfs_rem_pdev(rdev);
+
+ bnxt_re_net_unregister_async_event(rdev);
+ bnxt_re_uninit_dcb_wq(rdev);
+
+ bnxt_re_put_stats3_ctx(rdev);
+
+ bnxt_re_free_gid_ctx(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
+ &rdev->flags))
+ bnxt_re_cleanup_res(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
+ bnxt_re_free_res(rdev);
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
+ rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
+ if (rc)
+ ibdev_warn(&rdev->ibdev,
+ "Failed to deinitialize RCFW: %#x", rc);
+ bnxt_re_put_stats_ctx(rdev);
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
+ bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
+ }
+
+ rdev->nqr->num_msix = 0;
+
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_deinitialize_dbr_pacing(rdev);
+
+ bnxt_re_free_nqr_mem(rdev);
+ bnxt_re_destroy_chip_ctx(rdev);
+ if (op_type == BNXT_RE_COMPLETE_REMOVE) {
+ if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+ bnxt_unregister_dev(rdev->en_dev);
+ }
+}
+
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
+{
+ struct bnxt_re_ring_attr rattr = {};
+ struct bnxt_qplib_creq_ctx *creq;
+ u32 db_offt;
+ int vid;
+ u8 type;
+ int rc;
+
+ if (op_type == BNXT_RE_COMPLETE_INIT) {
+ /* Registered a new RoCE device instance to netdev */
+ rc = bnxt_re_register_netdev(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with Ethernet driver, rc %d\n",
+ rc);
+ return rc;
+ }
+ }
+ set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+
+ if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) {
+ ibdev_err(&rdev->ibdev,
+ "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n",
+ rdev->en_dev->ulp_tbl->msix_requested);
+ bnxt_unregister_dev(rdev->en_dev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ return -EINVAL;
+ }
+ ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+ rdev->en_dev->ulp_tbl->msix_requested);
+
+ rc = bnxt_re_setup_chip_ctx(rdev);
+ if (rc) {
+ bnxt_unregister_dev(rdev->en_dev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_re_alloc_nqr_mem(rdev);
+ if (rc) {
+ bnxt_re_destroy_chip_ctx(rdev);
+ bnxt_unregister_dev(rdev->en_dev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ return rc;
+ }
+ rdev->nqr->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
+ memcpy(rdev->nqr->msix_entries, rdev->en_dev->msix_entries,
+ sizeof(struct bnxt_msix_entry) * rdev->nqr->num_msix);
+
+ /* Check whether VF or PF */
+ bnxt_re_get_sriov_func_type(rdev);
+
+ /* Establish RCFW Communication Channel to initialize the context
+ * memory for the function and all child VFs
+ */
+ rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate RCFW Channel: %#x\n", rc);
+ goto fail;
+ }
+
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ creq = &rdev->rcfw.creq;
+ rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
+ rattr.type = type;
+ rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
+ rattr.lrid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
+ goto free_rcfw;
+ }
+ db_offt = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
+ vid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
+ vid, db_offt,
+ &bnxt_re_aeq_handler);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
+ rc);
+ goto free_ring;
+ }
+
+ if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) {
+ rc = bnxt_re_initialize_dbr_pacing(rdev);
+ if (!rc) {
+ rdev->pacing.dbr_pacing = true;
+ } else {
+ ibdev_err(&rdev->ibdev,
+ "DBR pacing disabled with error : %d\n", rc);
+ rdev->pacing.dbr_pacing = false;
+ }
+ }
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
+ if (rc)
+ goto disable_rcfw;
+
+ bnxt_qplib_query_version(&rdev->rcfw);
+ bnxt_re_set_resource_limits(rdev);
+
+ if (!rdev->is_virtfn &&
+ !bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate hw context: %#x\n", rc);
+ goto disable_rcfw;
+ }
+ }
+
+ rc = bnxt_re_get_stats_ctx(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate stats context: %#x\n", rc);
+ goto free_ctx;
+ }
+
+ rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
+ rdev->is_virtfn);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to initialize RCFW: %#x\n", rc);
+ goto free_sctx;
+ }
+ set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
+
+ /* Resources based on the 'new' device caps */
+ rc = bnxt_re_alloc_res(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate resources: %#x\n", rc);
+ goto fail;
+ }
+ set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
+ rc = bnxt_re_init_res(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to initialize resources: %#x\n", rc);
+ goto fail;
+ }
+
+ set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
+
+ if (!rdev->is_virtfn) {
+ /* Query f/w defaults of CC params */
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
+ if (rc)
+ ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
+
+ if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT))
+ bnxt_re_vf_res_config(rdev);
+ }
+ hash_init(rdev->cq_hash);
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
+ hash_init(rdev->srq_hash);
+
+ bnxt_re_debugfs_add_pdev(rdev);
+
+ bnxt_re_init_dcb_wq(rdev);
+ bnxt_re_net_register_async_event(rdev);
+
+ if (!rdev->is_virtfn)
+ bnxt_re_read_vpd_info(rdev);
+
+ rc = bnxt_re_get_stats3_ctx(rdev);
+ if (rc)
+ goto fail;
+
+ return 0;
+free_sctx:
+ bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
+free_ctx:
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
+disable_rcfw:
+ bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
+free_ring:
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
+free_rcfw:
+ bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
+fail:
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+
+ return rc;
+}
+
+static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
+{
+ struct bnxt_qplib_cc_param cc_param = {};
+
+ /* Do not enable congestion control on VFs */
+ if (rdev->is_virtfn)
+ return;
+
+ /* Currently enabling only for GenP5 adapters */
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ if (enable) {
+ cc_param.enable = 1;
+ cc_param.tos_ecn = 1;
+ }
+
+ cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
+
+ if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
+ ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
+}
+
+static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
+ struct bnxt_re_en_dev_info *en_info,
+ struct auxiliary_device *adev)
+{
+ /* Before updating the rdev pointer in bnxt_re_en_dev_info structure,
+ * take the rtnl lock to avoid accessing invalid rdev pointer from
+ * L2 ULP callbacks. This is applicable in all the places where rdev
+ * pointer is updated in bnxt_re_en_dev_info.
+ */
+ rtnl_lock();
+ en_info->rdev = rdev;
+ rtnl_unlock();
+}
+
+static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
+{
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+ int rc;
+
+ en_info = auxiliary_get_drvdata(adev);
+ en_dev = en_info->en_dev;
+
+
+ rdev = bnxt_re_dev_add(adev, en_dev);
+ if (!rdev || !rdev_to_dev(rdev)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ bnxt_re_update_en_info_rdev(rdev, en_info, adev);
+
+ rc = bnxt_re_dev_init(rdev, op_type);
+ if (rc)
+ goto re_dev_dealloc;
+
+ rc = bnxt_re_ib_init(rdev);
+ if (rc) {
+ pr_err("Failed to register with IB: %s",
+ aux_priv->aux_dev.name);
+ goto re_dev_uninit;
+ }
+
+ bnxt_re_setup_cc(rdev, true);
+
+ return 0;
+
+re_dev_uninit:
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+re_dev_dealloc:
+ ib_dealloc_device(&rdev->ibdev);
+exit:
+ return rc;
+}
+
+#define BNXT_ADEV_NAME "bnxt_en"
+
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
+ struct auxiliary_device *aux_dev)
+{
+ bnxt_re_setup_cc(rdev, false);
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev, op_type);
+ ib_dealloc_device(&rdev->ibdev);
+}
+
+static void bnxt_re_remove(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ mutex_lock(&bnxt_re_mutex);
+ rdev = en_info->rdev;
+
+ if (rdev)
+ bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev);
+ kfree(en_info);
+ mutex_unlock(&bnxt_re_mutex);
+}
+
+static int bnxt_re_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev;
+ int rc;
+
+ en_dev = aux_priv->edev;
+
+ mutex_lock(&bnxt_re_mutex);
+ en_info = kzalloc(sizeof(*en_info), GFP_KERNEL);
+ if (!en_info) {
+ mutex_unlock(&bnxt_re_mutex);
+ return -ENOMEM;
+ }
+ en_info->en_dev = en_dev;
+
+ auxiliary_set_drvdata(adev, en_info);
+
+ rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
+ if (rc)
+ kfree(en_info);
+
+ mutex_unlock(&bnxt_re_mutex);
+
+ return rc;
+}
+
+static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+
+ rdev = en_info->rdev;
+ en_dev = en_info->en_dev;
+ mutex_lock(&bnxt_re_mutex);
+
+ ibdev_info(&rdev->ibdev, "Handle device suspend call");
+ /* Check the current device state from bnxt_en_dev and move the
+ * device to detached state if FW_FATAL_COND is set.
+ * This prevents more commands to HW during clean-up,
+ * in case the device is already in error.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state)) {
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ bnxt_re_dev_stop(rdev);
+ }
+
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_set_pacing_dev_state(rdev);
+
+ ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
+ __func__, en_dev->en_state);
+ bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
+ mutex_unlock(&bnxt_re_mutex);
+
+ return 0;
+}
+
+static int bnxt_re_resume(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ mutex_lock(&bnxt_re_mutex);
+ bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT);
+ rdev = en_info->rdev;
+ ibdev_info(&rdev->ibdev, "Device resume completed");
+ mutex_unlock(&bnxt_re_mutex);
+
+ return 0;
+}
+
+static void bnxt_re_shutdown(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ rdev = en_info->rdev;
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+}
+
+static const struct auxiliary_device_id bnxt_re_id_table[] = {
+ { .name = BNXT_ADEV_NAME ".rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, bnxt_re_id_table);
+
+static struct auxiliary_driver bnxt_re_driver = {
+ .name = "rdma",
+ .probe = bnxt_re_probe,
+ .remove = bnxt_re_remove,
+ .shutdown = bnxt_re_shutdown,
+ .suspend = bnxt_re_suspend,
+ .resume = bnxt_re_resume,
+ .id_table = bnxt_re_id_table,
+};
+
+static int __init bnxt_re_mod_init(void)
+{
+ int rc;
+
+ pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
+ bnxt_re_register_debugfs();
+
+ rc = auxiliary_driver_register(&bnxt_re_driver);
+ if (rc) {
+ pr_err("%s: Failed to register auxiliary driver\n",
+ ROCE_DRV_MODULE_NAME);
+ goto err_debug;
+ }
+ return 0;
+err_debug:
+ bnxt_re_unregister_debugfs();
+ return rc;
+}
+
+static void __exit bnxt_re_mod_exit(void)
+{
+ auxiliary_driver_unregister(&bnxt_re_driver);
+ bnxt_re_unregister_debugfs();
+}
+
+module_init(bnxt_re_mod_init);
+module_exit(bnxt_re_mod_exit);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
new file mode 100644
index 000000000000..ce90d3d834d4
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -0,0 +1,3225 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Fast Path Operators
+ */
+
+#define dev_fmt(fmt) "QPLIB: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/prefetch.h>
+#include <linux/if_ether.h>
+#include <rdma/ib_mad.h>
+
+#include "roce_hsi.h"
+
+#include "qplib_res.h"
+#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include <rdma/ib_addr.h>
+#include "bnxt_ulp.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
+
+static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+
+static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
+{
+ qp->sq.condition = false;
+ qp->sq.send_phantom = false;
+ qp->sq.single = false;
+}
+
+/* Flush list */
+static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (!qp->sq.flushed) {
+ dev_dbg(&scq->hwq.pdev->dev,
+ "FP: Adding to SQ Flush list = %p\n", qp);
+ bnxt_qplib_cancel_phantom_processing(qp);
+ list_add_tail(&qp->sq_flush, &scq->sqf_head);
+ qp->sq.flushed = true;
+ }
+ if (!qp->srq) {
+ if (!qp->rq.flushed) {
+ dev_dbg(&rcq->hwq.pdev->dev,
+ "FP: Adding to RQ Flush list = %p\n", qp);
+ list_add_tail(&qp->rq_flush, &rcq->rqf_head);
+ qp->rq.flushed = true;
+ }
+ }
+}
+
+static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
+{
+ spin_lock_irqsave(&qp->scq->flush_lock, *flags);
+ if (qp->scq == qp->rcq)
+ __acquire(&qp->rcq->flush_lock);
+ else
+ spin_lock(&qp->rcq->flush_lock);
+}
+
+static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
+{
+ if (qp->scq == qp->rcq)
+ __release(&qp->rcq->flush_lock);
+ else
+ spin_unlock(&qp->rcq->flush_lock);
+ spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
+}
+
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ unsigned long flags;
+
+ bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_release_cq_flush_locks(qp, &flags);
+}
+
+static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ if (qp->sq.flushed) {
+ qp->sq.flushed = false;
+ list_del(&qp->sq_flush);
+ }
+ if (!qp->srq) {
+ if (qp->rq.flushed) {
+ qp->rq.flushed = false;
+ list_del(&qp->rq_flush);
+ }
+ }
+}
+
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
+{
+ unsigned long flags;
+
+ bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
+ __clean_cq(qp->scq, (u64)(unsigned long)qp);
+ qp->sq.hwq.prod = 0;
+ qp->sq.hwq.cons = 0;
+ __clean_cq(qp->rcq, (u64)(unsigned long)qp);
+ qp->rq.hwq.prod = 0;
+ qp->rq.hwq.cons = 0;
+
+ __bnxt_qplib_del_flush_qp(qp);
+ bnxt_qplib_release_cq_flush_locks(qp, &flags);
+}
+
+static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
+{
+ struct bnxt_qplib_nq_work *nq_work =
+ container_of(work, struct bnxt_qplib_nq_work, work);
+
+ struct bnxt_qplib_cq *cq = nq_work->cq;
+ struct bnxt_qplib_nq *nq = nq_work->nq;
+
+ if (cq && nq) {
+ spin_lock_bh(&cq->compl_lock);
+ if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
+ dev_dbg(&nq->pdev->dev,
+ "%s:Trigger cq = %p event nq = %p\n",
+ __func__, cq, nq);
+ nq->cqn_handler(nq, cq);
+ }
+ spin_unlock_bh(&cq->compl_lock);
+ }
+ kfree(nq_work);
+}
+
+static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct bnxt_qplib_q *sq = &qp->sq;
+
+ if (qp->rq_hdr_buf)
+ dma_free_coherent(&res->pdev->dev,
+ rq->max_wqe * qp->rq_hdr_buf_size,
+ qp->rq_hdr_buf, qp->rq_hdr_buf_map);
+ if (qp->sq_hdr_buf)
+ dma_free_coherent(&res->pdev->dev,
+ sq->max_wqe * qp->sq_hdr_buf_size,
+ qp->sq_hdr_buf, qp->sq_hdr_buf_map);
+ qp->rq_hdr_buf = NULL;
+ qp->sq_hdr_buf = NULL;
+ qp->rq_hdr_buf_map = 0;
+ qp->sq_hdr_buf_map = 0;
+ qp->sq_hdr_buf_size = 0;
+ qp->rq_hdr_buf_size = 0;
+}
+
+static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct bnxt_qplib_q *sq = &qp->sq;
+ int rc = 0;
+
+ if (qp->sq_hdr_buf_size && sq->max_wqe) {
+ qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
+ sq->max_wqe * qp->sq_hdr_buf_size,
+ &qp->sq_hdr_buf_map, GFP_KERNEL);
+ if (!qp->sq_hdr_buf) {
+ rc = -ENOMEM;
+ dev_err(&res->pdev->dev,
+ "Failed to create sq_hdr_buf\n");
+ goto fail;
+ }
+ }
+
+ if (qp->rq_hdr_buf_size && rq->max_wqe) {
+ qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
+ rq->max_wqe *
+ qp->rq_hdr_buf_size,
+ &qp->rq_hdr_buf_map,
+ GFP_KERNEL);
+ if (!qp->rq_hdr_buf) {
+ rc = -ENOMEM;
+ dev_err(&res->pdev->dev,
+ "Failed to create rq_hdr_buf\n");
+ goto fail;
+ }
+ }
+ return 0;
+
+fail:
+ bnxt_qplib_free_qp_hdr_buf(res, qp);
+ return rc;
+}
+
+static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
+ struct nq_base *nqe, **nq_ptr;
+ int budget = nq->budget;
+ uintptr_t q_handle;
+ u16 type;
+
+ spin_lock_bh(&hwq->lock);
+ /* Service the NQ until empty */
+ while (budget--) {
+ nq_ptr = (struct nq_base **)hwq->pbl_ptr;
+ nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
+ break;
+
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
+ switch (type) {
+ case NQ_BASE_TYPE_CQ_NOTIFICATION:
+ {
+ struct nq_cn *nqcne = (struct nq_cn *)nqe;
+
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
+ << 32;
+ if ((unsigned long)cq == q_handle) {
+ nqcne->cq_handle_low = 0;
+ nqcne->cq_handle_high = 0;
+ cq->cnq_events++;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &nq->nq_db.dbinfo.flags);
+ }
+ spin_unlock_bh(&hwq->lock);
+}
+
+/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
+ * this CQ.
+ */
+static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
+{
+ u32 retry_cnt = 100;
+
+ while (retry_cnt--) {
+ if (cnq_events == cq->cnq_events)
+ return;
+ usleep_range(50, 100);
+ clean_nq(cq->nq, cq);
+ }
+}
+
+static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+{
+ struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
+ struct bnxt_qplib_cq *cq;
+ int budget = nq->budget;
+ struct nq_base *nqe;
+ uintptr_t q_handle;
+ u32 hw_polled = 0;
+ u16 type;
+
+ spin_lock_bh(&hwq->lock);
+ /* Service the NQ until empty */
+ while (budget--) {
+ nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
+ break;
+
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
+ switch (type) {
+ case NQ_BASE_TYPE_CQ_NOTIFICATION:
+ {
+ struct nq_cn *nqcne = (struct nq_cn *)nqe;
+ struct bnxt_re_cq *cq_p;
+
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
+ << 32;
+ cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
+ if (!cq)
+ break;
+ cq->toggle = (le16_to_cpu(nqe->info10_type) &
+ NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
+ cq->dbinfo.toggle = cq->toggle;
+ cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
+ if (cq_p->uctx_cq_page)
+ *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
+
+ bnxt_qplib_armen_db(&cq->dbinfo,
+ DBC_DBC_TYPE_CQ_ARMENA);
+ spin_lock_bh(&cq->compl_lock);
+ atomic_set(&cq->arm_state, 0);
+ if (nq->cqn_handler(nq, (cq)))
+ dev_warn(&nq->pdev->dev,
+ "cqn - type 0x%x not handled\n", type);
+ cq->cnq_events++;
+ spin_unlock_bh(&cq->compl_lock);
+ break;
+ }
+ case NQ_BASE_TYPE_SRQ_EVENT:
+ {
+ struct bnxt_qplib_srq *srq;
+ struct bnxt_re_srq *srq_p;
+ struct nq_srq_event *nqsrqe =
+ (struct nq_srq_event *)nqe;
+
+ q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
+ << 32;
+ srq = (struct bnxt_qplib_srq *)q_handle;
+ srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
+ >> NQ_CN_TOGGLE_SFT;
+ srq->dbinfo.toggle = srq->toggle;
+ srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
+ if (srq_p->uctx_srq_page)
+ *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
+ bnxt_qplib_armen_db(&srq->dbinfo,
+ DBC_DBC_TYPE_SRQ_ARMENA);
+ if (nq->srqn_handler(nq,
+ (struct bnxt_qplib_srq *)q_handle,
+ nqsrqe->event))
+ dev_warn(&nq->pdev->dev,
+ "SRQ event 0x%x not handled\n",
+ nqsrqe->event);
+ break;
+ }
+ case NQ_BASE_TYPE_DBQ_EVENT:
+ break;
+ default:
+ dev_warn(&nq->pdev->dev,
+ "nqe with type = 0x%x not handled\n", type);
+ break;
+ }
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &nq->nq_db.dbinfo.flags);
+ }
+ if (hw_polled)
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
+ spin_unlock_bh(&hwq->lock);
+}
+
+/* bnxt_re_synchronize_nq - self polling notification queue.
+ * @nq - notification queue pointer
+ *
+ * This function will start polling entries of a given notification queue
+ * for all pending entries.
+ * This function is useful to synchronize notification entries while resources
+ * are going away.
+ */
+
+void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
+{
+ int budget = nq->budget;
+
+ nq->budget = nq->hwq.max_elements;
+ bnxt_qplib_service_nq(&nq->nq_tasklet);
+ nq->budget = budget;
+}
+
+static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
+{
+ struct bnxt_qplib_nq *nq = dev_instance;
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
+ u32 sw_cons;
+
+ /* Prefetch the NQ element */
+ sw_cons = HWQ_CMP(hwq->cons, hwq);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
+
+ /* Fan out to CPU affinitized kthreads? */
+ tasklet_schedule(&nq->nq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+{
+ if (!nq->requested)
+ return;
+
+ nq->requested = false;
+ /* Mask h/w interrupt */
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
+ /* Sync with last running IRQ handler */
+ synchronize_irq(nq->msix_vec);
+ irq_set_affinity_hint(nq->msix_vec, NULL);
+ free_irq(nq->msix_vec, nq);
+ kfree(nq->name);
+ nq->name = NULL;
+
+ if (kill)
+ tasklet_kill(&nq->nq_tasklet);
+ tasklet_disable(&nq->nq_tasklet);
+}
+
+void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+{
+ if (nq->cqn_wq) {
+ destroy_workqueue(nq->cqn_wq);
+ nq->cqn_wq = NULL;
+ }
+
+ /* Make sure the HW is stopped! */
+ bnxt_qplib_nq_stop_irq(nq, true);
+
+ if (nq->nq_db.reg.bar_reg) {
+ iounmap(nq->nq_db.reg.bar_reg);
+ nq->nq_db.reg.bar_reg = NULL;
+ }
+
+ nq->cqn_handler = NULL;
+ nq->srqn_handler = NULL;
+ nq->msix_vec = 0;
+}
+
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init)
+{
+ struct bnxt_qplib_res *res = nq->res;
+ int rc;
+
+ if (nq->requested)
+ return -EFAULT;
+
+ nq->msix_vec = msix_vector;
+ if (need_init)
+ tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
+ else
+ tasklet_enable(&nq->nq_tasklet);
+
+ nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
+ nq_indx, pci_name(res->pdev));
+ if (!nq->name)
+ return -ENOMEM;
+ rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
+ if (rc) {
+ kfree(nq->name);
+ nq->name = NULL;
+ tasklet_disable(&nq->nq_tasklet);
+ return rc;
+ }
+
+ cpumask_clear(&nq->mask);
+ cpumask_set_cpu(nq_indx, &nq->mask);
+ rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
+ if (rc) {
+ dev_warn(&nq->pdev->dev,
+ "set affinity failed; vector: %d nq_idx: %d\n",
+ nq->msix_vec, nq_indx);
+ }
+ nq->requested = true;
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
+
+ return rc;
+}
+
+static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
+{
+ resource_size_t reg_base;
+ struct bnxt_qplib_nq_db *nq_db;
+ struct pci_dev *pdev;
+
+ pdev = nq->pdev;
+ nq_db = &nq->nq_db;
+
+ nq_db->dbinfo.flags = 0;
+ nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
+ nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
+ if (!nq_db->reg.bar_base) {
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
+ nq_db->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ reg_base = nq_db->reg.bar_base + reg_offt;
+ /* Unconditionally map 8 bytes to support 57500 series */
+ nq_db->reg.len = 8;
+ nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
+ if (!nq_db->reg.bar_reg) {
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
+ nq_db->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ nq_db->dbinfo.db = nq_db->reg.bar_reg;
+ nq_db->dbinfo.hwq = &nq->hwq;
+ nq_db->dbinfo.xid = nq->ring_id;
+
+ return 0;
+}
+
+int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ int nq_idx, int msix_vector, int bar_reg_offset,
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srqn_handler)
+{
+ int rc;
+
+ nq->pdev = pdev;
+ nq->cqn_handler = cqn_handler;
+ nq->srqn_handler = srqn_handler;
+ nq->load = 0;
+
+ /* Have a task to schedule CQ notifiers in post send case */
+ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
+ if (!nq->cqn_wq)
+ return -ENOMEM;
+
+ rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
+ if (rc) {
+ dev_err(&nq->pdev->dev,
+ "Failed to request irq for nq-idx %d\n", nq_idx);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ bnxt_qplib_disable_nq(nq);
+ return rc;
+}
+
+void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
+{
+ if (nq->hwq.max_elements) {
+ bnxt_qplib_free_hwq(nq->res, &nq->hwq);
+ nq->hwq.max_elements = 0;
+ }
+}
+
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+
+ nq->pdev = res->pdev;
+ nq->res = res;
+ if (!nq->hwq.max_elements ||
+ nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
+ nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = nq->hwq.max_elements;
+ hwq_attr.stride = sizeof(struct nq_base);
+ hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
+ if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
+ dev_err(&nq->pdev->dev, "FP NQ allocation failed");
+ return -ENOMEM;
+ }
+ nq->budget = 8;
+ return 0;
+}
+
+/* SRQ */
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_destroy_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_srq req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_SRQ,
+ sizeof(req));
+
+ /* Configure the request */
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ kfree(srq->swq);
+ if (rc)
+ return;
+ bnxt_qplib_free_hwq(res, &srq->hwq);
+}
+
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct creq_create_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_srq req = {};
+ struct bnxt_qplib_pbl *pbl;
+ u16 pg_sz_lvl;
+ int rc, idx;
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &srq->sg_info;
+ hwq_attr.depth = srq->max_wqe;
+ hwq_attr.stride = srq->wqe_size;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
+ if (rc)
+ return rc;
+ srq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_SRQ,
+ sizeof(req));
+
+ /* Configure the request */
+ req.dpi = cpu_to_le32(srq->dpi->dpi);
+ req.srq_handle = cpu_to_le64((uintptr_t)srq);
+
+ req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
+ pbl = &srq->hwq.pbl[PBL_LVL_0];
+ pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
+ CMDQ_CREATE_SRQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT;
+ req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
+ req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ req.pd_id = cpu_to_le32(srq->pd->id);
+ req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ spin_lock_init(&srq->lock);
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.max_elements - 1;
+ if (!srq->hwq.is_user) {
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+ }
+
+ srq->id = le32_to_cpu(resp.xid);
+ srq->dbinfo.hwq = &srq->hwq;
+ srq->dbinfo.xid = srq->id;
+ srq->dbinfo.db = srq->dpi->dbr;
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res, &srq->hwq);
+ kfree(srq->swq);
+
+ return rc;
+}
+
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct creq_query_srq_resp_sb *sb;
+ struct cmdq_query_srq req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_SRQ,
+ sizeof(req));
+
+ /* Configure the request */
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ req.srq_cid = cpu_to_le32(srq->id);
+ sb = sbuf.sb;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (!rc)
+ srq->threshold = le16_to_cpu(sb->srq_limit);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+
+ return rc;
+}
+
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe;
+ struct sq_sge *hw_sge;
+ int i, next;
+
+ spin_lock(&srq_hwq->lock);
+ if (srq->start_idx == srq->last_idx) {
+ dev_err(&srq_hwq->pdev->dev,
+ "FP: SRQ (0x%x) is full!\n", srq->id);
+ spin_unlock(&srq_hwq->lock);
+ return -EINVAL;
+ }
+ next = srq->start_idx;
+ srq->start_idx = srq->swq[next].next_idx;
+ spin_unlock(&srq_hwq->lock);
+
+ srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
+ memset(srqe, 0, srq->wqe_size);
+ /* Calculate wqe_size16 and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
+ i < wqe->num_sge; i++, hw_sge++) {
+ hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
+ hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
+ hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
+ }
+ srqe->wqe_type = wqe->type;
+ srqe->flags = wqe->flags;
+ srqe->wqe_size = wqe->num_sge +
+ ((offsetof(typeof(*srqe), data) + 15) >> 4);
+ srqe->wr_id[0] = cpu_to_le32((u32)next);
+ srq->swq[next].wr_id = wqe->wr_id;
+
+ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
+
+ /* Ring DB */
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
+
+ return 0;
+}
+
+/* QP */
+
+static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
+{
+ int indx;
+
+ que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
+ if (!que->swq)
+ return -ENOMEM;
+
+ que->swq_start = 0;
+ que->swq_last = que->max_sw_wqe - 1;
+ for (indx = 0; indx < que->max_sw_wqe; indx++)
+ que->swq[indx].next_idx = indx + 1;
+ que->swq[que->swq_last].next_idx = 0; /* Make it circular */
+ que->swq_last = 0;
+
+ return 0;
+}
+
+int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_create_qp1_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct cmdq_create_qp1 req = {};
+ struct bnxt_qplib_pbl *pbl;
+ u32 qp_flags = 0;
+ u8 pg_sz_lvl;
+ u32 tbl_indx;
+ int rc;
+
+ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP1,
+ sizeof(req));
+ /* General */
+ req.type = qp->type;
+ req.dpi = cpu_to_le32(qp->dpi->dpi);
+ req.qp_handle = cpu_to_le64(qp->qp_handle);
+
+ /* SQ */
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto fail_sq;
+
+ req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ pbl = &sq->hwq.pbl[PBL_LVL_0];
+ req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
+ req.sq_fwo_sq_sge =
+ cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
+ CMDQ_CREATE_QP1_SQ_SGE_SFT);
+ req.scq_cid = cpu_to_le32(qp->scq->id);
+
+ /* RQ */
+ if (rq->max_wqe) {
+ rq->dbinfo.flags = 0;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ if (rc)
+ goto sq_swq;
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto fail_rq;
+ req.rq_size = cpu_to_le32(rq->max_wqe);
+ pbl = &rq->hwq.pbl[PBL_LVL_0];
+ req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
+ req.rq_fwo_rq_sge =
+ cpu_to_le16((rq->max_sge &
+ CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
+ CMDQ_CREATE_QP1_RQ_SGE_SFT);
+ }
+ req.rcq_cid = cpu_to_le32(qp->rcq->id);
+ /* Header buffer - allow hdr_buf pass in */
+ rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
+ if (rc) {
+ rc = -ENOMEM;
+ goto rq_rwq;
+ }
+ qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
+ req.qp_flags = cpu_to_le32(qp_flags);
+ req.pd_id = cpu_to_le32(qp->pd->id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ qp->id = le32_to_cpu(resp.xid);
+ qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ qp->cctx = res->cctx;
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
+ }
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
+
+ return 0;
+
+fail:
+ bnxt_qplib_free_qp_hdr_buf(res, qp);
+rq_rwq:
+ kfree(rq->swq);
+fail_rq:
+ bnxt_qplib_free_hwq(res, &rq->hwq);
+sq_swq:
+ kfree(sq->swq);
+fail_sq:
+ bnxt_qplib_free_hwq(res, &sq->hwq);
+ return rc;
+}
+
+static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
+{
+ struct bnxt_qplib_hwq *hwq;
+ struct bnxt_qplib_q *sq;
+ u64 fpsne, psn_pg;
+ u16 indx_pad = 0;
+
+ sq = &qp->sq;
+ hwq = &sq->hwq;
+ /* First psn entry */
+ fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
+ if (!IS_ALIGNED(fpsne, PAGE_SIZE))
+ indx_pad = (fpsne & ~PAGE_MASK) / size;
+ hwq->pad_pgofft = indx_pad;
+ hwq->pad_pg = (u64 *)psn_pg;
+ hwq->pad_stride = size;
+}
+
+int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct creq_create_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct cmdq_create_qp req = {};
+ int rc, req_size, psn_sz = 0;
+ struct bnxt_qplib_hwq *xrrq;
+ struct bnxt_qplib_pbl *pbl;
+ u32 qp_flags = 0;
+ u8 pg_sz_lvl;
+ u32 tbl_indx;
+ u16 nsge;
+
+ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
+ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP,
+ sizeof(req));
+
+ /* General */
+ req.type = qp->type;
+ req.dpi = cpu_to_le32(qp->dpi->dpi);
+ req.qp_handle = cpu_to_le64(qp->qp_handle);
+
+ /* SQ */
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+
+ if (qp->is_host_msn_tbl) {
+ psn_sz = sizeof(struct sq_msn_search);
+ qp->msn = 0;
+ }
+ }
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
+ hwq_attr.aux_stride = psn_sz;
+ hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
+ : 0;
+ /* Update msn tbl size */
+ if (qp->is_host_msn_tbl && psn_sz) {
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ else
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
+ qp->msn_tbl_sz = hwq_attr.aux_depth;
+ qp->msn = 0;
+ }
+
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
+ if (rc)
+ return rc;
+
+ if (!sq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto fail_sq;
+
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ }
+ req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ pbl = &sq->hwq.pbl[PBL_LVL_0];
+ req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
+ req.sq_fwo_sq_sge =
+ cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
+ CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
+ req.scq_cid = cpu_to_le32(qp->scq->id);
+
+ /* RQ */
+ if (!qp->srq) {
+ rq->dbinfo.flags = 0;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ if (rc)
+ goto sq_swq;
+ if (!rq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto fail_rq;
+ }
+
+ req.rq_size = cpu_to_le32(rq->max_wqe);
+ pbl = &rq->hwq.pbl[PBL_LVL_0];
+ req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
+ nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ 6 : rq->max_sge;
+ req.rq_fwo_rq_sge =
+ cpu_to_le16(((nsge &
+ CMDQ_CREATE_QP_RQ_SGE_MASK) <<
+ CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
+ } else {
+ /* SRQ */
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
+ req.srq_cid = cpu_to_le32(qp->srq->id);
+ }
+ req.rcq_cid = cpu_to_le32(qp->rcq->id);
+
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
+ if (qp->sig_type)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
+
+ req.qp_flags = cpu_to_le32(qp_flags);
+
+ /* ORRQ and IRRQ */
+ if (psn_sz) {
+ xrrq = &qp->orrq;
+ xrrq->max_elements =
+ ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
+ req_size = xrrq->max_elements *
+ BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
+ req_size &= ~(PAGE_SIZE - 1);
+ sginfo.pgsize = req_size;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
+ if (rc)
+ goto rq_swq;
+ pbl = &xrrq->pbl[PBL_LVL_0];
+ req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
+
+ xrrq = &qp->irrq;
+ xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
+ qp->max_dest_rd_atomic);
+ req_size = xrrq->max_elements *
+ BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
+ req_size &= ~(PAGE_SIZE - 1);
+ sginfo.pgsize = req_size;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
+ if (rc)
+ goto fail_orrq;
+
+ pbl = &xrrq->pbl[PBL_LVL_0];
+ req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
+ }
+ req.pd_id = cpu_to_le32(qp->pd->id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ qp->id = le32_to_cpu(resp.xid);
+ qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ INIT_LIST_HEAD(&qp->sq_flush);
+ INIT_LIST_HEAD(&qp->rq_flush);
+ qp->cctx = res->cctx;
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
+ }
+ spin_lock_bh(&rcfw->tbl_lock);
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res, &qp->irrq);
+fail_orrq:
+ bnxt_qplib_free_hwq(res, &qp->orrq);
+rq_swq:
+ kfree(rq->swq);
+fail_rq:
+ bnxt_qplib_free_hwq(res, &rq->hwq);
+sq_swq:
+ kfree(sq->swq);
+fail_sq:
+ bnxt_qplib_free_hwq(res, &sq->hwq);
+ return rc;
+}
+
+static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
+{
+ switch (qp->state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ /* INIT->RTR, configure the path_mtu to the default
+ * 2048 if not being requested
+ */
+ if (!(qp->modify_flags &
+ CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
+ qp->modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+ qp->path_mtu =
+ CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
+ }
+ /* Bono FW require the max_dest_rd_atomic to be >= 1 */
+ if (qp->max_dest_rd_atomic < 1)
+ qp->max_dest_rd_atomic = 1;
+ qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
+ /* Bono FW 20.6.5 requires SGID_INDEX configuration */
+ if (!(qp->modify_flags &
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
+ qp->modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
+ qp->ah.sgid_index = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
+{
+ switch (qp->state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ /* Bono FW requires the max_rd_atomic to be >= 1 */
+ if (qp->max_rd_atomic < 1)
+ qp->max_rd_atomic = 1;
+ /* Bono FW does not allow PKEY_INDEX,
+ * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
+ * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
+ * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
+ * modification
+ */
+ qp->modify_flags &=
+ ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
+ CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
+ CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
+ CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
+ CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
+ CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
+ break;
+ default:
+ break;
+ }
+}
+
+static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
+{
+ switch (qp->cur_qp_state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RESET:
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_INIT:
+ __modify_flags_from_init_state(qp);
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ __modify_flags_from_rtr_state(qp);
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQD:
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQE:
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_ERR:
+ break;
+ default:
+ break;
+ }
+}
+
+static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp,
+ struct cmdq_modify_qp *req)
+{
+ u32 mandatory_flags = 0;
+
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
+
+ if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq)
+ req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED);
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
+ }
+
+ if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
+ }
+
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
+ qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
+
+ qp->modify_flags |= mandatory_flags;
+ req->qp_type = qp->type;
+}
+
+static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
+{
+ if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) ||
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS))
+ return true;
+
+ return false;
+}
+
+int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_modify_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_modify_qp req = {};
+ u16 vlan_pcp_vlan_dei_vlan_id;
+ u32 temp32[4];
+ u32 bmask;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_MODIFY_QP,
+ sizeof(req));
+
+ /* Filter out the qp_attr_mask based on the state->new transition */
+ __filter_modify_flags(qp);
+ if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
+ /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
+ if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
+ is_optimized_state_transition(qp))
+ bnxt_set_mandatory_attributes(res, qp, &req);
+ }
+ bmask = qp->modify_flags;
+ req.modify_mask = cpu_to_le32(qp->modify_flags);
+ req.qp_cid = cpu_to_le32(qp->id);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
+ req.network_type_en_sqd_async_notify_new_state =
+ (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
+ (qp->en_sqd_async_notify ?
+ CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
+ }
+ req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
+ req.access = qp->access;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
+ req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
+ req.qkey = cpu_to_le32(qp->qkey);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
+ memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
+ req.dgid[0] = cpu_to_le32(temp32[0]);
+ req.dgid[1] = cpu_to_le32(temp32[1]);
+ req.dgid[2] = cpu_to_le32(temp32[2]);
+ req.dgid[3] = cpu_to_le32(temp32[3]);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
+ req.flow_label = cpu_to_le32(qp->ah.flow_label);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
+ else
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
+ }
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
+ req.hop_limit = qp->ah.hop_limit;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
+ req.traffic_class = qp->ah.traffic_class;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
+ memcpy(req.dest_mac, qp->ah.dmac, 6);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
+ req.path_mtu_pingpong_push_enable |= qp->path_mtu;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
+ req.timeout = qp->timeout;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
+ req.retry_cnt = qp->retry_cnt;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
+ req.rnr_retry = qp->rnr_retry;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
+ req.min_rnr_timer = qp->min_rnr_timer;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
+ req.rq_psn = cpu_to_le32(qp->rq.psn);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
+ req.sq_psn = cpu_to_le32(qp->sq.psn);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
+ req.max_rd_atomic =
+ ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
+ req.max_dest_rd_atomic =
+ IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
+
+ req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
+ req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
+ req.sq_sge = cpu_to_le16(qp->sq.max_sge);
+ req.rq_sge = cpu_to_le16(qp->rq.max_sge);
+ req.max_inline_data = cpu_to_le32(qp->max_inline_data);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
+ req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
+ vlan_pcp_vlan_dei_vlan_id =
+ ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id <<
+ CMDQ_MODIFY_QP_VLAN_ID_SFT) &
+ CMDQ_MODIFY_QP_VLAN_ID_MASK);
+ vlan_pcp_vlan_dei_vlan_id |=
+ ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) &
+ CMDQ_MODIFY_QP_VLAN_PCP_MASK);
+ req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id);
+ }
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ qp->cur_qp_state = qp->state;
+ return 0;
+}
+
+int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct creq_query_qp_resp_sb *sb;
+ struct cmdq_query_qp req = {};
+ u32 temp32[4];
+ int i, rc;
+
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_QP,
+ sizeof(req));
+
+ req.qp_cid = cpu_to_le32(qp->id);
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ qp->state = sb->en_sqd_async_notify_state &
+ CREQ_QUERY_QP_RESP_SB_STATE_MASK;
+ qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
+ CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
+ qp->access = sb->access;
+ qp->pkey_index = le16_to_cpu(sb->pkey);
+ qp->qkey = le32_to_cpu(sb->qkey);
+ qp->udp_sport = le16_to_cpu(sb->udp_src_port);
+
+ temp32[0] = le32_to_cpu(sb->dgid[0]);
+ temp32[1] = le32_to_cpu(sb->dgid[1]);
+ temp32[2] = le32_to_cpu(sb->dgid[2]);
+ temp32[3] = le32_to_cpu(sb->dgid[3]);
+ memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
+
+ qp->ah.flow_label = le32_to_cpu(sb->flow_label);
+
+ qp->ah.sgid_index = 0;
+ for (i = 0; i < res->sgid_tbl.max; i++) {
+ if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
+ qp->ah.sgid_index = i;
+ break;
+ }
+ }
+ if (i == res->sgid_tbl.max)
+ dev_warn(&res->pdev->dev, "SGID not found??\n");
+
+ qp->ah.hop_limit = sb->hop_limit;
+ qp->ah.traffic_class = sb->traffic_class;
+ memcpy(qp->ah.dmac, sb->dest_mac, 6);
+ qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
+ CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
+ CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
+ qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
+ CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
+ CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
+ qp->timeout = sb->timeout;
+ qp->retry_cnt = sb->retry_cnt;
+ qp->rnr_retry = sb->rnr_retry;
+ qp->min_rnr_timer = sb->min_rnr_timer;
+ qp->rq.psn = le32_to_cpu(sb->rq_psn);
+ qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
+ qp->sq.psn = le32_to_cpu(sb->sq_psn);
+ qp->max_dest_rd_atomic =
+ IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
+ qp->sq.max_wqe = qp->sq.hwq.max_elements;
+ qp->rq.max_wqe = qp->rq.hwq.max_elements;
+ qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
+ qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
+ qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
+ qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
+ memcpy(qp->smac, sb->src_mac, 6);
+ qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
+ qp->port_id = le16_to_cpu(sb->port_id);
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
+{
+ struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
+ u32 peek_flags, peek_cons;
+ struct cq_base *hw_cqe;
+ int i;
+
+ peek_flags = cq->dbinfo.flags;
+ peek_cons = cq_hwq->cons;
+ for (i = 0; i < cq_hwq->max_elements; i++) {
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
+ if (!CQE_CMP_VALID(hw_cqe, peek_flags))
+ continue;
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
+ case CQ_BASE_CQE_TYPE_REQ:
+ case CQ_BASE_CQE_TYPE_TERMINAL:
+ {
+ struct cq_req *cqe = (struct cq_req *)hw_cqe;
+
+ if (qp == le64_to_cpu(cqe->qp_handle))
+ cqe->qp_handle = 0;
+ break;
+ }
+ case CQ_BASE_CQE_TYPE_RES_RC:
+ case CQ_BASE_CQE_TYPE_RES_UD:
+ case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
+ {
+ struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
+
+ if (qp == le64_to_cpu(cqe->qp_handle))
+ cqe->qp_handle = 0;
+ break;
+ }
+ default:
+ break;
+ }
+ bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
+ 1, &peek_flags);
+ }
+}
+
+int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_destroy_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_qp req = {};
+ u32 tbl_indx;
+ int rc;
+
+ spin_lock_bh(&rcfw->tbl_lock);
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
+ rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
+ spin_unlock_bh(&rcfw->tbl_lock);
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_QP,
+ sizeof(req));
+
+ req.qp_cid = cpu_to_le32(qp->id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc) {
+ spin_lock_bh(&rcfw->tbl_lock);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
+ bnxt_qplib_free_qp_hdr_buf(res, qp);
+ bnxt_qplib_free_hwq(res, &qp->sq.hwq);
+ kfree(qp->sq.swq);
+
+ bnxt_qplib_free_hwq(res, &qp->rq.hwq);
+ kfree(qp->rq.swq);
+
+ if (qp->irrq.max_elements)
+ bnxt_qplib_free_hwq(res, &qp->irrq);
+ if (qp->orrq.max_elements)
+ bnxt_qplib_free_hwq(res, &qp->orrq);
+
+}
+
+void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge)
+{
+ struct bnxt_qplib_q *sq = &qp->sq;
+ u32 sw_prod;
+
+ memset(sge, 0, sizeof(*sge));
+
+ if (qp->sq_hdr_buf) {
+ sw_prod = sq->swq_start;
+ sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
+ sw_prod * qp->sq_hdr_buf_size);
+ sge->lkey = 0xFFFFFFFF;
+ sge->size = qp->sq_hdr_buf_size;
+ return qp->sq_hdr_buf + sw_prod * sge->size;
+ }
+ return NULL;
+}
+
+u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+
+ return rq->swq_start;
+}
+
+dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
+{
+ return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
+}
+
+void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+ u32 sw_prod;
+
+ memset(sge, 0, sizeof(*sge));
+
+ if (qp->rq_hdr_buf) {
+ sw_prod = rq->swq_start;
+ sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
+ sw_prod * qp->rq_hdr_buf_size);
+ sge->lkey = 0xFFFFFFFF;
+ sge->size = qp->rq_hdr_buf_size;
+ return qp->rq_hdr_buf + sw_prod * sge->size;
+ }
+ return NULL;
+}
+
+/* Fil the MSN table into the next psn row */
+static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_msn_search *msns;
+ u32 start_psn, next_psn;
+ u16 start_idx;
+
+ msns = (struct sq_msn_search *)swq->psn_search;
+ msns->start_idx_next_psn_start_psn = 0;
+
+ start_psn = swq->start_psn;
+ next_psn = swq->next_psn;
+ start_idx = swq->slot_idx;
+ msns->start_idx_next_psn_start_psn |=
+ bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
+ qp->msn++;
+ qp->msn %= qp->msn_tbl_sz;
+}
+
+static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_psn_search_ext *psns_ext;
+ struct sq_psn_search *psns;
+ u32 flg_npsn;
+ u32 op_spsn;
+
+ if (!swq->psn_search)
+ return;
+ /* Handle MSN differently on cap flags */
+ if (qp->is_host_msn_tbl) {
+ bnxt_qplib_fill_msn_search(qp, wqe, swq);
+ return;
+ }
+ psns = (struct sq_psn_search *)swq->psn_search;
+ psns = swq->psn_search;
+ psns_ext = swq->psn_ext;
+
+ op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
+ SQ_PSN_SEARCH_START_PSN_MASK);
+ op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
+ SQ_PSN_SEARCH_OPCODE_MASK);
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
+ psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
+ psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
+ } else {
+ psns->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns->flags_next_psn = cpu_to_le32(flg_npsn);
+ }
+}
+
+static unsigned int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ u32 *idx)
+{
+ struct bnxt_qplib_hwq *hwq;
+ int len, t_len, offt;
+ bool pull_dst = true;
+ void *il_dst = NULL;
+ void *il_src = NULL;
+ int t_cplen, cplen;
+ int indx;
+
+ hwq = &qp->sq.hwq;
+ t_len = 0;
+ for (indx = 0; indx < wqe->num_sge; indx++) {
+ len = wqe->sg_list[indx].size;
+ il_src = (void *)wqe->sg_list[indx].addr;
+ t_len += len;
+ if (t_len > qp->max_inline_data)
+ return BNXT_RE_INVAL_MSG_SIZE;
+ while (len) {
+ if (pull_dst) {
+ pull_dst = false;
+ il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
+ (*idx)++;
+ t_cplen = 0;
+ offt = 0;
+ }
+ cplen = min_t(int, len, sizeof(struct sq_sge));
+ cplen = min_t(int, cplen,
+ (sizeof(struct sq_sge) - offt));
+ memcpy(il_dst, il_src, cplen);
+ t_cplen += cplen;
+ il_src += cplen;
+ il_dst += cplen;
+ offt += cplen;
+ len -= cplen;
+ if (t_cplen == sizeof(struct sq_sge))
+ pull_dst = true;
+ }
+ }
+
+ return t_len;
+}
+
+static unsigned int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_sge *ssge,
+ u32 nsge, u32 *idx)
+{
+ struct sq_sge *dsge;
+ int indx, len = 0;
+
+ for (indx = 0; indx < nsge; indx++, (*idx)++) {
+ dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
+ dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
+ dsge->l_key = cpu_to_le32(ssge[indx].lkey);
+ dsge->size = cpu_to_le32(ssge[indx].size);
+ len += ssge[indx].size;
+ }
+
+ return len;
+}
+
+static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ u16 *wqe_sz, u16 *qdf, u8 mode)
+{
+ u32 ilsize, bytes;
+ u16 nsge;
+ u16 slot;
+
+ nsge = wqe->num_sge;
+ /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
+ bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
+ if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
+ ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
+ bytes = ALIGN(ilsize, sizeof(struct sq_sge));
+ bytes += sizeof(struct sq_send_hdr);
+ }
+
+ *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
+ slot = bytes >> 4;
+ *wqe_sz = slot;
+ if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ slot = 8;
+ return slot;
+}
+
+static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
+ struct bnxt_qplib_swq *swq, bool hw_retx)
+{
+ struct bnxt_qplib_hwq *hwq;
+ u32 pg_num, pg_indx;
+ void *buff;
+ u32 tail;
+
+ hwq = &sq->hwq;
+ if (!hwq->pad_pg)
+ return;
+ tail = swq->slot_idx / sq->dbinfo.max_slot;
+ if (hw_retx) {
+ /* For HW retx use qp msn index */
+ tail = qp->msn;
+ tail %= qp->msn_tbl_sz;
+ }
+ pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
+ pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
+ buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
+ swq->psn_ext = buff;
+ swq->psn_search = buff;
+}
+
+void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *sq = &qp->sq;
+
+ bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
+}
+
+int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ int i, rc = 0, data_len = 0, pkt_num = 0;
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_hwq *hwq;
+ struct bnxt_qplib_swq *swq;
+ bool sch_handler = false;
+ u32 wqe_idx, slots, idx;
+ u16 wqe_sz, qdf = 0;
+ bool msn_update;
+ void *base_hdr;
+ void *ext_hdr;
+ __le32 temp32;
+
+ hwq = &sq->hwq;
+ if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
+ qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ dev_err(&hwq->pdev->dev,
+ "QPLIB: FP: QP (0x%x) is in the 0x%x state",
+ qp->id, qp->state);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
+ if (bnxt_qplib_queue_full(sq, slots + qdf)) {
+ dev_err(&hwq->pdev->dev,
+ "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
+ hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
+ bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
+
+ idx = 0;
+ swq->slot_idx = hwq->prod;
+ swq->slots = slots;
+ swq->wr_id = wqe->wr_id;
+ swq->type = wqe->type;
+ swq->flags = wqe->flags;
+ swq->start_psn = sq->psn & BTH_PSN_MASK;
+ if (qp->sig_type)
+ swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
+
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ sch_handler = true;
+ dev_dbg(&hwq->pdev->dev,
+ "%s Error QP. Scheduling for poll_cq\n", __func__);
+ goto queue_err;
+ }
+
+ base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
+ ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
+ memset(base_hdr, 0, sizeof(struct sq_sge));
+ memset(ext_hdr, 0, sizeof(struct sq_sge));
+
+ if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
+ /* Copy the inline data */
+ data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
+ else
+ data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
+ &idx);
+ if (data_len > BNXT_RE_MAX_MSG_SIZE) {
+ rc = -EINVAL;
+ goto done;
+ }
+ /* Make sure we update MSN table only for wired wqes */
+ msn_update = true;
+ /* Specifics */
+ switch (wqe->type) {
+ case BNXT_QPLIB_SWQE_TYPE_SEND:
+ if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
+ struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
+ struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
+ /* Assemble info for Raw Ethertype QPs */
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->wqe_size = wqe_sz;
+ sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
+ sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
+ sqe->length = cpu_to_le32(data_len);
+ ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
+ SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
+ SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
+
+ break;
+ }
+ fallthrough;
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
+ {
+ struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
+ struct sq_send_hdr *sqe = base_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->wqe_size = wqe_sz;
+ sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
+ if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
+ qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
+ sqe->q_key = cpu_to_le32(wqe->send.q_key);
+ sqe->length = cpu_to_le32(data_len);
+ sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
+ ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
+ SQ_SEND_DST_QP_MASK);
+ ext_sqe->avid = cpu_to_le32(wqe->send.avid &
+ SQ_SEND_AVID_MASK);
+ msn_update = false;
+ } else {
+ sqe->length = cpu_to_le32(data_len);
+ if (qp->mtu)
+ pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
+ if (!pkt_num)
+ pkt_num = 1;
+ sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
+ }
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
+ {
+ struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
+ struct sq_rdma_hdr *sqe = base_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->wqe_size = wqe_sz;
+ sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
+ sqe->length = cpu_to_le32((u32)data_len);
+ ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
+ ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
+ if (qp->mtu)
+ pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
+ if (!pkt_num)
+ pkt_num = 1;
+ sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
+ {
+ struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
+ struct sq_atomic_hdr *sqe = base_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
+ sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
+ ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
+ ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
+ if (qp->mtu)
+ pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
+ if (!pkt_num)
+ pkt_num = 1;
+ sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
+ {
+ struct sq_localinvalidate *sqe = base_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
+ msn_update = false;
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
+ {
+ struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
+ struct sq_fr_pmr_hdr *sqe = base_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->access_cntl = wqe->frmr.access_cntl |
+ SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
+ sqe->zero_based_page_size_log =
+ (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
+ SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
+ (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
+ sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
+ temp32 = cpu_to_le32(wqe->frmr.length);
+ memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
+ sqe->numlevels_pbl_page_size_log =
+ ((wqe->frmr.pbl_pg_sz_log <<
+ SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
+ SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
+ ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
+ SQ_FR_PMR_NUMLEVELS_MASK);
+
+ for (i = 0; i < wqe->frmr.page_list_len; i++)
+ wqe->frmr.pbl_ptr[i] = cpu_to_le64(
+ wqe->frmr.page_list[i] |
+ PTU_PTE_VALID);
+ ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
+ ext_sqe->va = cpu_to_le64(wqe->frmr.va);
+ msn_update = false;
+
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
+ {
+ struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
+ struct sq_bind_hdr *sqe = base_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->access_cntl = wqe->bind.access_cntl;
+ sqe->mw_type_zero_based = wqe->bind.mw_type |
+ (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
+ sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
+ sqe->l_key = cpu_to_le32(wqe->bind.r_key);
+ ext_sqe->va = cpu_to_le64(wqe->bind.va);
+ ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
+ msn_update = false;
+ break;
+ }
+ default:
+ /* Bad wqe, return error */
+ rc = -EINVAL;
+ goto done;
+ }
+ if (!qp->is_host_msn_tbl || msn_update) {
+ swq->next_psn = sq->psn & BTH_PSN_MASK;
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ }
+queue_err:
+ bnxt_qplib_swq_mod_start(sq, wqe_idx);
+ bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
+ qp->wqe_cnt++;
+done:
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->scq;
+ nq_work->nq = qp->scq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&hwq->pdev->dev,
+ "FP: Failed to allocate SQ nq_work!\n");
+ rc = -ENOMEM;
+ }
+ }
+ return rc;
+}
+
+void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+
+ bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
+}
+
+int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct rq_wqe_hdr *base_hdr;
+ struct rq_ext_hdr *ext_hdr;
+ struct bnxt_qplib_hwq *hwq;
+ struct bnxt_qplib_swq *swq;
+ bool sch_handler = false;
+ u32 wqe_idx, idx;
+ u16 wqe_sz;
+ int rc = 0;
+
+ hwq = &rq->hwq;
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
+ dev_err(&hwq->pdev->dev,
+ "QPLIB: FP: QP (0x%x) is in the 0x%x state",
+ qp->id, qp->state);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
+ dev_err(&hwq->pdev->dev,
+ "FP: QP (0x%x) RQ is full!\n", qp->id);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
+ swq->wr_id = wqe->wr_id;
+ swq->slots = rq->dbinfo.max_slot;
+
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ sch_handler = true;
+ dev_dbg(&hwq->pdev->dev,
+ "%s: Error QP. Scheduling for poll_cq\n", __func__);
+ goto queue_err;
+ }
+
+ idx = 0;
+ base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
+ ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
+ memset(base_hdr, 0, sizeof(struct sq_sge));
+ memset(ext_hdr, 0, sizeof(struct sq_sge));
+ wqe_sz = (sizeof(struct rq_wqe_hdr) +
+ wqe->num_sge * sizeof(struct sq_sge)) >> 4;
+ bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
+ if (!wqe->num_sge) {
+ struct sq_sge *sge;
+
+ sge = bnxt_qplib_get_prod_qe(hwq, idx++);
+ sge->size = 0;
+ wqe_sz++;
+ }
+ base_hdr->wqe_type = wqe->type;
+ base_hdr->flags = wqe->flags;
+ base_hdr->wqe_size = wqe_sz;
+ base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
+queue_err:
+ bnxt_qplib_swq_mod_start(rq, wqe_idx);
+ bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
+done:
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->rcq;
+ nq_work->nq = qp->rcq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&hwq->pdev->dev,
+ "FP: Failed to allocate RQ nq_work!\n");
+ rc = -ENOMEM;
+ }
+ }
+
+ return rc;
+}
+
+/* CQ */
+int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct creq_create_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_cq req = {};
+ struct bnxt_qplib_pbl *pbl;
+ u32 coalescing = 0;
+ u32 pg_sz_lvl;
+ int rc;
+
+ if (!cq->dpi) {
+ dev_err(&rcfw->pdev->dev,
+ "FP: CREATE_CQ failed due to NULL DPI\n");
+ return -EINVAL;
+ }
+
+ cq->dbinfo.flags = 0;
+ hwq_attr.res = res;
+ hwq_attr.depth = cq->max_wqe;
+ hwq_attr.stride = sizeof(struct cq_base);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ hwq_attr.sginfo = &cq->sg_info;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
+ if (rc)
+ return rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_CQ,
+ sizeof(req));
+
+ req.dpi = cpu_to_le32(cq->dpi->dpi);
+ req.cq_handle = cpu_to_le64(cq->cq_handle);
+ req.cq_size = cpu_to_le32(cq->max_wqe);
+
+ if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) {
+ req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
+ coalescing |= ((cq->coalescing->buf_maxtime <<
+ CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
+ CMDQ_CREATE_CQ_BUF_MAXTIME_MASK);
+ coalescing |= ((cq->coalescing->normal_maxbuf <<
+ CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) &
+ CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK);
+ coalescing |= ((cq->coalescing->during_maxbuf <<
+ CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) &
+ CMDQ_CREATE_CQ_DURING_MAXBUF_MASK);
+ if (cq->coalescing->en_ring_idle_mode)
+ coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
+ else
+ coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
+ req.coalescing = cpu_to_le32(coalescing);
+ }
+
+ pbl = &cq->hwq.pbl[PBL_LVL_0];
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
+ CMDQ_CREATE_CQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
+ req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
+ req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ req.cq_fco_cnq_id = cpu_to_le32(
+ (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
+ CMDQ_CREATE_CQ_CNQ_ID_SFT);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ cq->id = le32_to_cpu(resp.xid);
+ cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
+ init_waitqueue_head(&cq->waitq);
+ INIT_LIST_HEAD(&cq->sqf_head);
+ INIT_LIST_HEAD(&cq->rqf_head);
+ spin_lock_init(&cq->compl_lock);
+ spin_lock_init(&cq->flush_lock);
+
+ cq->dbinfo.hwq = &cq->hwq;
+ cq->dbinfo.xid = cq->id;
+ cq->dbinfo.db = cq->dpi->dbr;
+ cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ cq->dbinfo.flags = 0;
+ cq->dbinfo.toggle = 0;
+
+ bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
+
+ return 0;
+
+fail:
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+ return rc;
+}
+
+void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cq *cq)
+{
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+ memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
+ /* Reset only the cons bit in the flags */
+ cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
+}
+
+int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
+ int new_cqes)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_resize_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_resize_cq req = {};
+ struct bnxt_qplib_pbl *pbl;
+ u32 pg_sz, lvl, new_sz;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_RESIZE_CQ,
+ sizeof(req));
+ hwq_attr.sginfo = &cq->sg_info;
+ hwq_attr.res = res;
+ hwq_attr.depth = new_cqes;
+ hwq_attr.stride = sizeof(struct cq_base);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
+ if (rc)
+ return rc;
+
+ req.cq_cid = cpu_to_le32(cq->id);
+ pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
+ pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
+ lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
+ CMDQ_RESIZE_CQ_LVL_MASK;
+ new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
+ CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
+ req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
+ req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ return rc;
+}
+
+int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_destroy_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_cq req = {};
+ u16 total_cnq_events;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_CQ,
+ sizeof(req));
+
+ req.cq_cid = cpu_to_le32(cq->id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ total_cnq_events = le16_to_cpu(resp.total_cnq_events);
+ __wait_for_all_nqes(cq, total_cnq_events);
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+ return 0;
+}
+
+static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cqe **pcqe, int *budget)
+{
+ struct bnxt_qplib_cqe *cqe;
+ u32 start, last;
+ int rc = 0;
+
+ /* Now complete all outstanding SQEs with FLUSHED_ERR */
+ start = sq->swq_start;
+ cqe = *pcqe;
+ while (*budget) {
+ last = sq->swq_last;
+ if (start == last)
+ break;
+ /* Skip the FENCE WQE completions */
+ if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
+ bnxt_qplib_cancel_phantom_processing(qp);
+ goto skip_compl;
+ }
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
+ cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
+ cqe->qp_handle = (u64)(unsigned long)qp;
+ cqe->wr_id = sq->swq[last].wr_id;
+ cqe->src_qp = qp->id;
+ cqe->type = sq->swq[last].type;
+ cqe++;
+ (*budget)--;
+skip_compl:
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ sq->swq[last].slots, &sq->dbinfo.flags);
+ sq->swq_last = sq->swq[last].next_idx;
+ }
+ *pcqe = cqe;
+ if (!(*budget) && sq->swq_last != start)
+ /* Out of budget */
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cqe **pcqe, int *budget)
+{
+ struct bnxt_qplib_cqe *cqe;
+ u32 start, last;
+ int opcode = 0;
+ int rc = 0;
+
+ switch (qp->type) {
+ case CMDQ_CREATE_QP1_TYPE_GSI:
+ opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
+ break;
+ case CMDQ_CREATE_QP_TYPE_RC:
+ opcode = CQ_BASE_CQE_TYPE_RES_RC;
+ break;
+ case CMDQ_CREATE_QP_TYPE_UD:
+ case CMDQ_CREATE_QP_TYPE_GSI:
+ opcode = CQ_BASE_CQE_TYPE_RES_UD;
+ break;
+ }
+
+ /* Flush the rest of the RQ */
+ start = rq->swq_start;
+ cqe = *pcqe;
+ while (*budget) {
+ last = rq->swq_last;
+ if (last == start)
+ break;
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->status =
+ CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
+ cqe->opcode = opcode;
+ cqe->qp_handle = (unsigned long)qp;
+ cqe->wr_id = rq->swq[last].wr_id;
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ rq->swq[last].slots, &rq->dbinfo.flags);
+ rq->swq_last = rq->swq[last].next_idx;
+ }
+ *pcqe = cqe;
+ if (!*budget && rq->swq_last != start)
+ /* Out of budget */
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+void bnxt_qplib_mark_qp_error(void *qp_handle)
+{
+ struct bnxt_qplib_qp *qp = qp_handle;
+
+ if (!qp)
+ return;
+
+ /* Must block new posting of SQ and RQ */
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ bnxt_qplib_cancel_phantom_processing(qp);
+}
+
+/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
+ * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
+ */
+static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+ u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
+{
+ u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct cq_req *peek_req_hwcqe;
+ struct bnxt_qplib_qp *peek_qp;
+ struct bnxt_qplib_q *peek_sq;
+ struct bnxt_qplib_swq *swq;
+ struct cq_base *peek_hwcqe;
+ int i, rc = 0;
+
+ /* Normal mode */
+ /* Check for the psn_search marking before completing */
+ swq = &sq->swq[swq_last];
+ if (swq->psn_search &&
+ le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
+ /* Unmark */
+ swq->psn_search->flags_next_psn = cpu_to_le32
+ (le32_to_cpu(swq->psn_search->flags_next_psn)
+ & ~0x80000000);
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
+ cq_cons, qp->id, swq_last, cqe_sq_cons);
+ sq->condition = true;
+ sq->send_phantom = true;
+
+ /* TODO: Only ARM if the previous SQE is ARMALL */
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
+ rc = -EAGAIN;
+ goto out;
+ }
+ if (sq->condition) {
+ /* Peek at the completions */
+ peek_flags = cq->dbinfo.flags;
+ peek_sw_cq_cons = cq_cons;
+ i = cq->hwq.max_elements;
+ while (i--) {
+ peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
+ peek_sw_cq_cons, NULL);
+ /* If the next hwcqe is VALID */
+ if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ /* If the next hwcqe is a REQ */
+ if ((peek_hwcqe->cqe_type_toggle &
+ CQ_BASE_CQE_TYPE_MASK) ==
+ CQ_BASE_CQE_TYPE_REQ) {
+ peek_req_hwcqe = (struct cq_req *)
+ peek_hwcqe;
+ peek_qp = (struct bnxt_qplib_qp *)
+ ((unsigned long)
+ le64_to_cpu
+ (peek_req_hwcqe->qp_handle));
+ peek_sq = &peek_qp->sq;
+ peek_sq_cons_idx =
+ ((le16_to_cpu(
+ peek_req_hwcqe->sq_cons_idx)
+ - 1) % sq->max_wqe);
+ /* If the hwcqe's sq's wr_id matches */
+ if (peek_sq == sq &&
+ sq->swq[peek_sq_cons_idx].wr_id ==
+ BNXT_QPLIB_FENCE_WRID) {
+ /*
+ * Unbreak only if the phantom
+ * comes back
+ */
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP: Got Phantom CQE\n");
+ sq->condition = false;
+ sq->single = true;
+ rc = 0;
+ goto out;
+ }
+ }
+ /* Valid but not the phantom, so keep looping */
+ } else {
+ /* Not valid yet, just exit and wait */
+ rc = -EINVAL;
+ goto out;
+ }
+ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
+ &peek_sw_cq_cons,
+ 1, &peek_flags);
+ }
+ dev_err(&cq->hwq.pdev->dev,
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
+ cq_cons, qp->id, swq_last, cqe_sq_cons);
+ rc = -EINVAL;
+ }
+out:
+ return rc;
+}
+
+static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_swq *swq;
+ int cqe_sq_cons = -1;
+ u32 start, last;
+
+ sq_hwq = &sq->hwq;
+
+ start = sq->swq_start;
+ last = sq->swq_last;
+
+ while (last != start) {
+ swq = &sq->swq[last];
+ if (swq->slot_idx == cqe_slot) {
+ cqe_sq_cons = swq->next_idx;
+ dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
+ __func__, cqe_sq_cons, cqe_slot);
+ break;
+ }
+
+ last = swq->next_idx;
+ }
+ return cqe_sq_cons;
+}
+
+static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ struct cq_req *hwcqe,
+ struct bnxt_qplib_cqe **pcqe, int *budget,
+ u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
+{
+ struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_cqe *cqe;
+ u32 cqe_sq_cons, slot_num;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *sq;
+ int cqe_cons;
+ int rc = 0;
+
+ qp = (struct bnxt_qplib_qp *)((unsigned long)
+ le64_to_cpu(hwcqe->qp_handle));
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: Process Req qp is NULL\n");
+ return -EINVAL;
+ }
+ sq = &qp->sq;
+
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
+
+ if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
+ slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
+ cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
+ if (cqe_cons < 0) {
+ dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
+ __func__, slot_num);
+ goto done;
+ }
+ cqe_sq_cons = cqe_cons;
+ dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
+ __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
+ }
+
+ /* Require to walk the sq's swq to fabricate CQEs for all previously
+ * signaled SWQEs due to CQE aggregation from the current sq cons
+ * to the cqe_sq_cons
+ */
+ cqe = *pcqe;
+ while (*budget) {
+ if (sq->swq_last == cqe_sq_cons)
+ /* Done */
+ break;
+
+ swq = &sq->swq[sq->swq_last];
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
+ cqe->qp_handle = (u64)(unsigned long)qp;
+ cqe->src_qp = qp->id;
+ cqe->wr_id = swq->wr_id;
+ if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
+ goto skip;
+ cqe->type = swq->type;
+
+ /* For the last CQE, check for status. For errors, regardless
+ * of the request being signaled or not, it must complete with
+ * the hwcqe error status
+ */
+ if (swq->next_idx == cqe_sq_cons &&
+ hwcqe->status != CQ_REQ_STATUS_OK) {
+ cqe->status = hwcqe->status;
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
+ sq->swq_last, cqe->wr_id, cqe->status);
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_mark_qp_error(qp);
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
+ } else {
+ /* Before we complete, do WA 9060 */
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
+ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
+ }
+ if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ cqe->status = CQ_REQ_STATUS_OK;
+ cqe++;
+ (*budget)--;
+ }
+ }
+skip:
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ swq->slots, &sq->dbinfo.flags);
+ sq->swq_last = swq->next_idx;
+ if (sq->single)
+ break;
+ }
+out:
+ *pcqe = cqe;
+ if (sq->swq_last != cqe_sq_cons) {
+ /* Out of budget */
+ rc = -EAGAIN;
+ goto done;
+ }
+ /*
+ * Back to normal completion mode only after it has completed all of
+ * the WC for this CQE
+ */
+ sq->single = false;
+done:
+ return rc;
+}
+
+static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
+{
+ spin_lock(&srq->hwq.lock);
+ srq->swq[srq->last_idx].next_idx = (int)tag;
+ srq->last_idx = (int)tag;
+ srq->swq[srq->last_idx].next_idx = -1;
+ bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
+ srq->dbinfo.max_slot, &srq->dbinfo.flags);
+ spin_unlock(&srq->hwq.lock);
+}
+
+static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
+ struct cq_res_rc *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_srq *srq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
+ u32 wr_id_idx;
+
+ qp = (struct bnxt_qplib_qp *)((unsigned long)
+ le64_to_cpu(hwcqe->qp_handle));
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
+ return -EINVAL;
+ }
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QP in Flush QP = %p\n", __func__, qp);
+ return 0;
+ }
+
+ cqe = *pcqe;
+ cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ cqe->length = le32_to_cpu(hwcqe->length);
+ cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
+ cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
+ cqe->flags = le16_to_cpu(hwcqe->flags);
+ cqe->status = hwcqe->status;
+ cqe->qp_handle = (u64)(unsigned long)qp;
+
+ wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
+ CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
+ if (wr_id_idx >= srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ struct bnxt_qplib_swq *swq;
+
+ rq = &qp->rq;
+ if (wr_id_idx > (rq->max_wqe - 1)) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
+ wr_id_idx, rq->max_wqe);
+ return -EINVAL;
+ }
+ if (wr_id_idx != rq->swq_last)
+ return -EINVAL;
+ swq = &rq->swq[rq->swq_last];
+ cqe->wr_id = swq->wr_id;
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
+ rq->swq_last = swq->next_idx;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
+ }
+ }
+
+ return 0;
+}
+
+static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
+ struct cq_res_ud *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_srq *srq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
+ u32 wr_id_idx;
+
+ qp = (struct bnxt_qplib_qp *)((unsigned long)
+ le64_to_cpu(hwcqe->qp_handle));
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
+ return -EINVAL;
+ }
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QP in Flush QP = %p\n", __func__, qp);
+ return 0;
+ }
+ cqe = *pcqe;
+ cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
+ cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
+ cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
+ cqe->flags = le16_to_cpu(hwcqe->flags);
+ cqe->status = hwcqe->status;
+ cqe->qp_handle = (u64)(unsigned long)qp;
+ /*FIXME: Endianness fix needed for smace */
+ memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
+ wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
+ & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
+ cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
+ ((le32_to_cpu(
+ hwcqe->src_qp_high_srq_or_rq_wr_id) &
+ CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
+
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
+
+ if (wr_id_idx >= srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ struct bnxt_qplib_swq *swq;
+
+ rq = &qp->rq;
+ if (wr_id_idx > (rq->max_wqe - 1)) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
+ wr_id_idx, rq->max_wqe);
+ return -EINVAL;
+ }
+
+ if (rq->swq_last != wr_id_idx)
+ return -EINVAL;
+ swq = &rq->swq[rq->swq_last];
+ cqe->wr_id = swq->wr_id;
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
+ rq->swq_last = swq->next_idx;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
+ }
+ }
+
+ return 0;
+}
+
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
+{
+ struct cq_base *hw_cqe;
+ bool rc = true;
+
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
+ /* Check for Valid bit. If the CQE is valid, return false */
+ rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
+ return rc;
+}
+
+static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
+ struct cq_res_raweth_qp1 *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
+ struct bnxt_qplib_cqe *cqe;
+ u32 wr_id_idx;
+
+ qp = (struct bnxt_qplib_qp *)((unsigned long)
+ le64_to_cpu(hwcqe->qp_handle));
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
+ return -EINVAL;
+ }
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QP in Flush QP = %p\n", __func__, qp);
+ return 0;
+ }
+ cqe = *pcqe;
+ cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ cqe->flags = le16_to_cpu(hwcqe->flags);
+ cqe->qp_handle = (u64)(unsigned long)qp;
+
+ wr_id_idx =
+ le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
+ & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
+ cqe->src_qp = qp->id;
+ if (qp->id == 1 && !cqe->length) {
+ /* Add workaround for the length misdetection */
+ cqe->length = 296;
+ } else {
+ cqe->length = le16_to_cpu(hwcqe->length);
+ }
+ cqe->pkey_index = qp->pkey_index;
+ memcpy(cqe->smac, qp->smac, 6);
+
+ cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
+ cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
+ cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
+
+ if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: SRQ used but not defined??\n");
+ return -EINVAL;
+ }
+ if (wr_id_idx >= srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ struct bnxt_qplib_swq *swq;
+
+ rq = &qp->rq;
+ if (wr_id_idx > (rq->max_wqe - 1)) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
+ wr_id_idx, rq->max_wqe);
+ return -EINVAL;
+ }
+ if (rq->swq_last != wr_id_idx)
+ return -EINVAL;
+ swq = &rq->swq[rq->swq_last];
+ cqe->wr_id = swq->wr_id;
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
+ rq->swq_last = swq->next_idx;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
+ }
+ }
+
+ return 0;
+}
+
+static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
+ struct cq_terminal *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *sq, *rq;
+ struct bnxt_qplib_cqe *cqe;
+ u32 swq_last = 0, cqe_cons;
+ int rc = 0;
+
+ /* Check the Status */
+ if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
+ dev_warn(&cq->hwq.pdev->dev,
+ "FP: CQ Process Terminal Error status = 0x%x\n",
+ hwcqe->status);
+
+ qp = (struct bnxt_qplib_qp *)((unsigned long)
+ le64_to_cpu(hwcqe->qp_handle));
+ if (!qp)
+ return -EINVAL;
+
+ /* Must block new posting of SQ and RQ */
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+
+ sq = &qp->sq;
+ rq = &qp->rq;
+
+ cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
+ if (cqe_cons == 0xFFFF)
+ goto do_rq;
+ cqe_cons %= sq->max_sw_wqe;
+
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QP in Flush QP = %p\n", __func__, qp);
+ goto sq_done;
+ }
+
+ /* Terminal CQE can also include aggregated successful CQEs prior.
+ * So we must complete all CQEs from the current sq's cons to the
+ * cq_cons with status OK
+ */
+ cqe = *pcqe;
+ while (*budget) {
+ swq_last = sq->swq_last;
+ if (swq_last == cqe_cons)
+ break;
+ if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->status = CQ_REQ_STATUS_OK;
+ cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
+ cqe->qp_handle = (u64)(unsigned long)qp;
+ cqe->src_qp = qp->id;
+ cqe->wr_id = sq->swq[swq_last].wr_id;
+ cqe->type = sq->swq[swq_last].type;
+ cqe++;
+ (*budget)--;
+ }
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ sq->swq[swq_last].slots, &sq->dbinfo.flags);
+ sq->swq_last = sq->swq[swq_last].next_idx;
+ }
+ *pcqe = cqe;
+ if (!(*budget) && swq_last != cqe_cons) {
+ /* Out of budget */
+ rc = -EAGAIN;
+ goto sq_done;
+ }
+sq_done:
+ if (rc)
+ return rc;
+do_rq:
+ cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
+ if (cqe_cons == 0xFFFF) {
+ goto done;
+ } else if (cqe_cons > rq->max_wqe - 1) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
+ cqe_cons, rq->max_wqe);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QP in Flush QP = %p\n", __func__, qp);
+ rc = 0;
+ goto done;
+ }
+
+ /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
+ * from the current rq->cons to the rq->prod regardless what the
+ * rq->cons the terminal CQE indicates
+ */
+
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
+done:
+ return rc;
+}
+
+static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
+ struct cq_cutoff *hwcqe)
+{
+ /* Check the Status */
+ if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
+ dev_err(&cq->hwq.pdev->dev,
+ "FP: CQ Process Cutoff Error status = 0x%x\n",
+ hwcqe->status);
+ return -EINVAL;
+ }
+ clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
+ wake_up_interruptible(&cq->waitq);
+
+ return 0;
+}
+
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes)
+{
+ struct bnxt_qplib_qp *qp = NULL;
+ u32 budget = num_cqes;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->flush_lock, flags);
+ list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
+ __flush_sq(&qp->sq, qp, &cqe, &budget);
+ }
+
+ list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
+ __flush_rq(&qp->rq, qp, &cqe, &budget);
+ }
+ spin_unlock_irqrestore(&cq->flush_lock, flags);
+
+ return num_cqes - budget;
+}
+
+int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
+ int num_cqes, struct bnxt_qplib_qp **lib_qp)
+{
+ struct cq_base *hw_cqe;
+ int budget, rc = 0;
+ u32 hw_polled = 0;
+ u8 type;
+
+ budget = num_cqes;
+
+ while (budget) {
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
+
+ /* Check for Valid bit */
+ if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
+ break;
+
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ /* From the device's respective CQE format to qplib_wc*/
+ type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ switch (type) {
+ case CQ_BASE_CQE_TYPE_REQ:
+ rc = bnxt_qplib_cq_process_req(cq,
+ (struct cq_req *)hw_cqe,
+ &cqe, &budget,
+ cq->hwq.cons, lib_qp);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RC:
+ rc = bnxt_qplib_cq_process_res_rc(cq,
+ (struct cq_res_rc *)
+ hw_cqe, &cqe,
+ &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_UD:
+ rc = bnxt_qplib_cq_process_res_ud
+ (cq, (struct cq_res_ud *)hw_cqe, &cqe,
+ &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
+ rc = bnxt_qplib_cq_process_res_raweth_qp1
+ (cq, (struct cq_res_raweth_qp1 *)
+ hw_cqe, &cqe, &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_TERMINAL:
+ rc = bnxt_qplib_cq_process_terminal
+ (cq, (struct cq_terminal *)hw_cqe,
+ &cqe, &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_CUT_OFF:
+ bnxt_qplib_cq_process_cutoff
+ (cq, (struct cq_cutoff *)hw_cqe);
+ /* Done processing this CQ */
+ goto exit;
+ default:
+ dev_err(&cq->hwq.pdev->dev,
+ "process_cq unknown type 0x%lx\n",
+ hw_cqe->cqe_type_toggle &
+ CQ_BASE_CQE_TYPE_MASK);
+ rc = -EINVAL;
+ break;
+ }
+ if (rc < 0) {
+ if (rc == -EAGAIN)
+ break;
+ /* Error while processing the CQE, just skip to the
+ * next one
+ */
+ if (type != CQ_BASE_CQE_TYPE_TERMINAL)
+ dev_err(&cq->hwq.pdev->dev,
+ "process_cqe error rc = 0x%x\n", rc);
+ }
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
+ 1, &cq->dbinfo.flags);
+
+ }
+ if (hw_polled)
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
+exit:
+ return num_cqes - budget;
+}
+
+void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
+{
+ cq->dbinfo.toggle = cq->toggle;
+ if (arm_type)
+ bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
+ /* Using cq->arm_state variable to track whether to issue cq handler */
+ atomic_set(&cq->arm_state, 1);
+}
+
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
+{
+ flush_workqueue(qp->scq->nq->cqn_wq);
+ if (qp->scq != qp->rcq)
+ flush_workqueue(qp->rcq->nq->cqn_wq);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
new file mode 100644
index 000000000000..b990d0c0ce1a
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -0,0 +1,688 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Fast Path Operators (header)
+ */
+
+#ifndef __BNXT_QPLIB_FP_H__
+#define __BNXT_QPLIB_FP_H__
+
+#include <rdma/bnxt_re-abi.h>
+
+/* Few helper structures temporarily defined here
+ * should get rid of these when roce_hsi.h is updated
+ * in original code base
+ */
+struct sq_ud_ext_hdr {
+ __le32 dst_qp;
+ __le32 avid;
+ __le64 rsvd;
+};
+
+struct sq_raw_ext_hdr {
+ __le32 cfa_meta;
+ __le32 rsvd0;
+ __le64 rsvd1;
+};
+
+struct sq_rdma_ext_hdr {
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 rsvd;
+};
+
+struct sq_atomic_ext_hdr {
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
+struct sq_fr_pmr_ext_hdr {
+ __le64 pblptr;
+ __le64 va;
+};
+
+struct sq_bind_ext_hdr {
+ __le64 va;
+ __le32 length_lo;
+ __le32 length_hi;
+};
+
+struct rq_ext_hdr {
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Helper structures end */
+
+struct bnxt_qplib_srq {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_db_info dbinfo;
+ u64 srq_handle;
+ u32 id;
+ u16 wqe_size;
+ u32 max_wqe;
+ u32 max_sge;
+ u32 threshold;
+ bool arm_req;
+ struct bnxt_qplib_cq *cq;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_swq *swq;
+ int start_idx;
+ int last_idx;
+ struct bnxt_qplib_sg_info sg_info;
+ u16 eventq_hw_ring_id;
+ spinlock_t lock; /* protect SRQE link list */
+ u8 toggle;
+};
+
+struct bnxt_qplib_sge {
+ u64 addr;
+ u32 lkey;
+ u32 size;
+};
+
+struct bnxt_qplib_swq {
+ u64 wr_id;
+ int next_idx;
+ u8 type;
+ u8 flags;
+ u32 start_psn;
+ u32 next_psn;
+ u32 slot_idx;
+ u8 slots;
+ struct sq_psn_search *psn_search;
+ struct sq_psn_search_ext *psn_ext;
+};
+
+struct bnxt_qplib_swqe {
+ /* General */
+#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
+ u64 wr_id;
+ u8 reqs_type;
+ u8 type;
+#define BNXT_QPLIB_SWQE_TYPE_SEND 0
+#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
+#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
+#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
+#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
+#define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
+#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
+#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
+#define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
+#define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
+#define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
+#define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
+#define BNXT_QPLIB_SWQE_TYPE_RECV 128
+#define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
+ u8 flags;
+#define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0)
+#define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1)
+#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
+#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
+#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
+ struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
+ int num_sge;
+ /* Max inline data is 96 bytes */
+ u32 inline_len;
+#define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96
+ u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH];
+
+ union {
+ /* Send, with imm, inval key */
+ struct {
+ union {
+ u32 imm_data;
+ u32 inv_key;
+ };
+ u32 q_key;
+ u32 dst_qp;
+ u32 avid;
+ } send;
+
+ /* Send Raw Ethernet and QP1 */
+ struct {
+ u16 lflags;
+ u16 cfa_action;
+ u32 cfa_meta;
+ } rawqp1;
+
+ /* RDMA write, with imm, read */
+ struct {
+ union {
+ u32 imm_data;
+ u32 inv_key;
+ };
+ u64 remote_va;
+ u32 r_key;
+ } rdma;
+
+ /* Atomic cmp/swap, fetch/add */
+ struct {
+ u64 remote_va;
+ u32 r_key;
+ u64 swap_data;
+ u64 cmp_data;
+ } atomic;
+
+ /* Local Invalidate */
+ struct {
+ u32 inv_l_key;
+ } local_inv;
+
+ /* FR-PMR */
+ struct {
+ u8 access_cntl;
+ u8 pg_sz_log;
+ bool zero_based;
+ u32 l_key;
+ u32 length;
+ u8 pbl_pg_sz_log;
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
+ u8 levels;
+#define PAGE_SHIFT_4K 12
+ __le64 *pbl_ptr;
+ dma_addr_t pbl_dma_ptr;
+ u64 *page_list;
+ u16 page_list_len;
+ u64 va;
+ } frmr;
+
+ /* Bind */
+ struct {
+ u8 access_cntl;
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4)
+ bool zero_based;
+ u8 mw_type;
+ u32 parent_l_key;
+ u32 r_key;
+ u64 va;
+ u32 length;
+ } bind;
+ };
+};
+
+struct bnxt_qplib_q {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_db_info dbinfo;
+ struct bnxt_qplib_sg_info sg_info;
+ u32 max_wqe;
+ u32 max_sw_wqe;
+ u16 wqe_size;
+ u16 q_full_delta;
+ u16 max_sge;
+ u32 psn;
+ bool condition;
+ bool single;
+ bool send_phantom;
+ u32 phantom_wqe_cnt;
+ u32 phantom_cqe_cnt;
+ u32 next_cq_cons;
+ bool flushed;
+ u32 swq_start;
+ u32 swq_last;
+};
+
+struct bnxt_qplib_qp {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_chip_ctx *cctx;
+ u64 qp_handle;
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
+ u32 id;
+ u8 type;
+ u8 sig_type;
+ u8 wqe_mode;
+ u8 state;
+ u8 cur_qp_state;
+ u64 modify_flags;
+ u32 max_inline_data;
+ u32 mtu;
+ u8 path_mtu;
+ bool en_sqd_async_notify;
+ u16 pkey_index;
+ u32 qkey;
+ u32 dest_qp_id;
+ u8 access;
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u64 wqe_cnt;
+ u32 min_rnr_timer;
+ u32 max_rd_atomic;
+ u32 max_dest_rd_atomic;
+ u32 dest_qpn;
+ u8 smac[6];
+ u16 vlan_id;
+ u16 port_id;
+ u16 udp_sport;
+ u8 nw_type;
+ struct bnxt_qplib_ah ah;
+
+#define BTH_PSN_MASK ((1 << 24) - 1)
+ /* SQ */
+ struct bnxt_qplib_q sq;
+ /* RQ */
+ struct bnxt_qplib_q rq;
+ /* SRQ */
+ struct bnxt_qplib_srq *srq;
+ /* CQ */
+ struct bnxt_qplib_cq *scq;
+ struct bnxt_qplib_cq *rcq;
+ /* IRRQ and ORRQ */
+ struct bnxt_qplib_hwq irrq;
+ struct bnxt_qplib_hwq orrq;
+ /* Header buffer for QP1 */
+ int sq_hdr_buf_size;
+ int rq_hdr_buf_size;
+/*
+ * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
+ * and ib_bth + ib_deth (20).
+ * Max required is 82 when RoCE V2 is enabled
+ */
+#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
+ /* Ethernet header = 14 */
+ /* ib_grh = 40 (provided by MAD) */
+ /* ib_bth + ib_deth = 20 */
+ /* MAD = 256 (provided by MAD) */
+ /* iCRC = 4 */
+#define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
+#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
+#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
+#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
+#define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
+ void *sq_hdr_buf;
+ dma_addr_t sq_hdr_buf_map;
+ void *rq_hdr_buf;
+ dma_addr_t rq_hdr_buf_map;
+ struct list_head sq_flush;
+ struct list_head rq_flush;
+ u32 msn;
+ u32 msn_tbl_sz;
+ bool is_host_msn_tbl;
+ u8 tos_dscp;
+ u32 ugid_index;
+};
+
+#define BNXT_RE_MAX_MSG_SIZE 0x80000000
+#define BNXT_RE_INVAL_MSG_SIZE 0xFFFFFFFF
+
+#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
+
+#define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
+#define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1)
+#define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
+#define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
+
+#define ROCE_CQE_CMP_V 0
+#define CQE_CMP_VALID(hdr, pass) \
+ (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
+ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+
+static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq)
+{
+ int cons, prod, avail;
+
+ cons = hwq->cons;
+ prod = hwq->prod;
+ avail = cons - prod;
+ if (cons <= prod)
+ avail += hwq->depth;
+ return avail;
+}
+
+static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que,
+ u8 slots)
+{
+ struct bnxt_qplib_hwq *hwq;
+ int avail;
+
+ hwq = &que->hwq;
+ /* False full is possible, retrying post-send makes sense */
+ avail = hwq->cons - hwq->prod;
+ if (hwq->cons <= hwq->prod)
+ avail += hwq->depth;
+ return avail <= slots;
+}
+
+/* CQ coalescing parameters */
+struct bnxt_qplib_cq_coal_param {
+ u16 buf_maxtime;
+ u8 normal_maxbuf;
+ u8 during_maxbuf;
+ u8 en_ring_idle_mode;
+};
+
+#define BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME 0x1
+#define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7 0x8
+#define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7 0x8
+#define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5 0x1
+#define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5 0x1
+#define BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE 0x1
+#define BNXT_QPLIB_CQ_COAL_MAX_BUF_MAXTIME 0x1bf
+#define BNXT_QPLIB_CQ_COAL_MAX_NORMAL_MAXBUF 0x1f
+#define BNXT_QPLIB_CQ_COAL_MAX_DURING_MAXBUF 0x1f
+#define BNXT_QPLIB_CQ_COAL_MAX_EN_RING_IDLE_MODE 0x1
+
+struct bnxt_qplib_cqe {
+ u8 status;
+ u8 type;
+ u8 opcode;
+ u32 length;
+ u16 cfa_meta;
+ u64 wr_id;
+ union {
+ u32 immdata;
+ u32 invrkey;
+ };
+ u64 qp_handle;
+ u64 mr_handle;
+ u16 flags;
+ u8 smac[6];
+ u32 src_qp;
+ u16 raweth_qp1_flags;
+ u16 raweth_qp1_errors;
+ u16 raweth_qp1_cfa_code;
+ u32 raweth_qp1_flags2;
+ u32 raweth_qp1_metadata;
+ u8 raweth_qp1_payload_offset;
+ u16 pkey_index;
+};
+
+#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
+struct bnxt_qplib_cq {
+ struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_db_info dbinfo;
+ u32 max_wqe;
+ u32 id;
+ u16 count;
+ u16 period;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_hwq resize_hwq;
+ u32 cnq_hw_ring_id;
+ struct bnxt_qplib_nq *nq;
+ bool resize_in_progress;
+ struct bnxt_qplib_sg_info sg_info;
+ u64 cq_handle;
+ u8 toggle;
+
+#define CQ_RESIZE_WAIT_TIME_MS 500
+ unsigned long flags;
+#define CQ_FLAGS_RESIZE_IN_PROG 1
+ wait_queue_head_t waitq;
+ struct list_head sqf_head, rqf_head;
+ atomic_t arm_state;
+ spinlock_t compl_lock; /* synch CQ handlers */
+/* Locking Notes:
+ * QP can move to error state from modify_qp, async error event or error
+ * CQE as part of poll_cq. When QP is moved to error state, it gets added
+ * to two flush lists, one each for SQ and RQ.
+ * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
+ * flush_locks should be acquired when QP is moved to error. The control path
+ * operations(modify_qp and async error events) are synchronized with poll_cq
+ * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
+ * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
+ * of the same QP while manipulating the flush list.
+ */
+ spinlock_t flush_lock; /* QP flush management */
+ u16 cnq_events;
+ struct bnxt_qplib_cq_coal_param *coalescing;
+};
+
+#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
+#define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
+#define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2)
+#define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1)
+#define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1)
+#define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1)
+
+#define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base)
+
+#define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
+#define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1)
+#define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
+#define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
+
+#define NQE_CMP_VALID(hdr, pass) \
+ (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
+ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+
+#define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
+
+#define NQ_CONS_PCI_BAR_REGION 2
+#define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
+#define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
+#define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
+#define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \
+ NQ_DB_IDX_VALID)
+#define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
+ NQ_DB_IDX_VALID | \
+ NQ_DB_IRQ_DIS)
+
+struct bnxt_qplib_nq_db {
+ struct bnxt_qplib_reg_desc reg;
+ struct bnxt_qplib_db_info dbinfo;
+};
+
+typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq, u8 event);
+
+struct bnxt_qplib_nq {
+ struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
+ char *name;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_nq_db nq_db;
+ u16 ring_id;
+ int msix_vec;
+ cpumask_t mask;
+ struct tasklet_struct nq_tasklet;
+ bool requested;
+ int budget;
+ u32 load;
+
+ cqn_handler_t cqn_handler;
+ srqn_handler_t srqn_handler;
+ struct workqueue_struct *cqn_wq;
+};
+
+struct bnxt_qplib_nq_work {
+ struct work_struct work;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_qplib_cq *cq;
+};
+
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
+void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init);
+int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ int nq_idx, int msix_vector, int bar_reg_offset,
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srq_handler);
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe);
+int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp);
+void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge);
+void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge);
+u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
+dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp,
+ u32 index);
+void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
+int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe);
+void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
+int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe);
+int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
+int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
+ int new_cqes);
+void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cq *cq);
+int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
+int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
+ int num, struct bnxt_qplib_qp **qp);
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
+void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
+void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq);
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
+void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes);
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
+void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq);
+
+static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx)
+{
+ u32 idx;
+
+ idx = que->swq_start;
+ if (swq_idx)
+ *swq_idx = idx;
+ return &que->swq[idx];
+}
+
+static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
+{
+ que->swq_start = que->swq[idx].next_idx;
+}
+
+static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
+{
+ u32 slots;
+
+ /* Queue depth is the number of slots. */
+ slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ /* For variable WQE mode, need to align the slots to 256 */
+ if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
+ slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
+ return slots;
+}
+
+static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
+{
+ return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
+}
+
+static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode)
+{
+ return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ sizeof(struct sq_send) / sizeof(struct sq_sge) : 1;
+}
+
+static inline u32 bnxt_qplib_set_rq_max_slot(u32 wqe_size)
+{
+ return (wqe_size / sizeof(struct sq_sge));
+}
+
+static inline u16 __xlate_qfd(u16 delta, u16 wqe_bytes)
+{
+ /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128
+ * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128.
+ * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512.
+ * when 8916 is disabled.
+ */
+ return (delta * wqe_bytes) / sizeof(struct sq_sge);
+}
+
+static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max)
+{
+ u16 size = 0;
+ int indx;
+
+ for (indx = 0; indx < wqe->num_sge; indx++)
+ size += wqe->sg_list[indx].size;
+ if (size > max)
+ size = max;
+
+ return size;
+}
+
+/* MSN table update inlin */
+static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn)
+{
+ return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) &
+ SQ_MSN_SEARCH_START_IDX_MASK) |
+ (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_MSN_SEARCH_NEXT_PSN_MASK) |
+ (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
+ SQ_MSN_SEARCH_START_PSN_MASK));
+}
+
+static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp)
+{
+ return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE);
+}
+
+static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status)
+{
+ return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp);
+}
+#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
new file mode 100644
index 000000000000..295a9610f3e6
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -0,0 +1,1202 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: RDMA Controller HW interface
+ */
+
+#define dev_fmt(fmt) "QPLIB: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/prefetch.h>
+#include <linux/delay.h>
+
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_tlv.h"
+
+static void bnxt_qplib_service_creq(struct tasklet_struct *t);
+
+/**
+ * bnxt_qplib_map_rc - map return type based on opcode
+ * @opcode: roce slow path opcode
+ *
+ * case #1
+ * Firmware initiated error recovery is a safe state machine and
+ * driver can consider all the underlying rdma resources are free.
+ * In this state, it is safe to return success for opcodes related to
+ * destroying rdma resources (like destroy qp, destroy cq etc.).
+ *
+ * case #2
+ * If driver detect potential firmware stall, it is not safe state machine
+ * and the driver can not consider all the underlying rdma resources are
+ * freed.
+ * In this state, it is not safe to return success for opcodes related to
+ * destroying rdma resources (like destroy qp, destroy cq etc.).
+ *
+ * Scope of this helper function is only for case #1.
+ *
+ * Returns:
+ * 0 to communicate success to caller.
+ * Non zero error code to communicate failure to caller.
+ */
+static int bnxt_qplib_map_rc(u8 opcode)
+{
+ switch (opcode) {
+ case CMDQ_BASE_OPCODE_DESTROY_QP:
+ case CMDQ_BASE_OPCODE_DESTROY_SRQ:
+ case CMDQ_BASE_OPCODE_DESTROY_CQ:
+ case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
+ case CMDQ_BASE_OPCODE_DEREGISTER_MR:
+ case CMDQ_BASE_OPCODE_DELETE_GID:
+ case CMDQ_BASE_OPCODE_DESTROY_QP1:
+ case CMDQ_BASE_OPCODE_DESTROY_AH:
+ case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:
+ case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:
+ case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:
+ return 0;
+ default:
+ return -ETIMEDOUT;
+ }
+}
+
+/**
+ * bnxt_re_is_fw_stalled - Check firmware health
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
+ *
+ * If firmware has not responded any rcfw command within
+ * rcfw->max_timeout, consider firmware as stalled.
+ *
+ * Returns:
+ * 0 if firmware is responding
+ * -ENODEV if firmware is not responding
+ */
+static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw,
+ u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+
+ crsqe = &rcfw->crsqe_tbl[cookie];
+ cmdq = &rcfw->cmdq;
+
+ if (time_after(jiffies, cmdq->last_seen +
+ (rcfw->max_timeout * HZ))) {
+ dev_warn_ratelimited(&rcfw->pdev->dev,
+ "%s: FW STALL Detected. cmdq[%#x]=%#x waited (%d > %d) msec active %d ",
+ __func__, cookie, crsqe->opcode,
+ jiffies_to_msecs(jiffies - cmdq->last_seen),
+ rcfw->max_timeout * 1000,
+ crsqe->is_in_used);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * __wait_for_resp - Don't hold the cpu context and wait for response
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
+ *
+ * Wait for command completion in sleepable context.
+ *
+ * Returns:
+ * 0 if command is completed by firmware.
+ * Non zero error code for rest of the case.
+ */
+static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ int ret;
+
+ cmdq = &rcfw->cmdq;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ do {
+ if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
+ return bnxt_qplib_map_rc(crsqe->opcode);
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ wait_event_timeout(cmdq->waitq,
+ !crsqe->is_in_used ||
+ test_bit(ERR_DEVICE_DETACHED, &cmdq->flags),
+ secs_to_jiffies(rcfw->max_timeout));
+
+ if (!crsqe->is_in_used)
+ return 0;
+
+ bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
+
+ if (!crsqe->is_in_used)
+ return 0;
+
+ ret = bnxt_re_is_fw_stalled(rcfw, cookie);
+ if (ret)
+ return ret;
+
+ } while (true);
+};
+
+/**
+ * __block_for_resp - hold the cpu context and wait for response
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
+ *
+ * This function will hold the cpu (non-sleepable context) and
+ * wait for command completion. Maximum holding interval is 8 second.
+ *
+ * Returns:
+ * -ETIMEDOUT if command is not completed in specific time interval.
+ * 0 if command is completed by firmware.
+ */
+static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ unsigned long issue_time = 0;
+
+ issue_time = jiffies;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ do {
+ if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
+ return bnxt_qplib_map_rc(crsqe->opcode);
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ udelay(1);
+
+ bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
+ if (!crsqe->is_in_used)
+ return 0;
+
+ } while (time_before(jiffies, issue_time + (8 * HZ)));
+
+ return -ETIMEDOUT;
+};
+
+/* __send_message_no_waiter - get cookie and post the message.
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
+ *
+ * This function will just post and don't bother about completion.
+ * Current design of this function is -
+ * user must hold the completion queue hwq->lock.
+ * user must have used existing completion and free the resources.
+ * this function will not check queue full condition.
+ * this function will explicitly set is_waiter_alive=false.
+ * current use case is - send destroy_ah if create_ah is return
+ * after waiter of create_ah is lost. It can be extended for other
+ * use case as well.
+ *
+ * Returns: Nothing
+ *
+ */
+static void __send_message_no_waiter(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
+ struct bnxt_qplib_crsqe *crsqe;
+ struct bnxt_qplib_cmdqe *cmdqe;
+ u32 sw_prod, cmdq_prod;
+ u16 cookie;
+ u32 bsize;
+ u8 *preq;
+
+ cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
+ __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ /* Set cmd_size in terms of 16B slots in req. */
+ bsize = bnxt_qplib_set_cmd_slots(msg->req);
+ /* GET_CMD_SIZE would return number of slots in either case of tlv
+ * and non-tlv commands after call to bnxt_qplib_set_cmd_slots()
+ */
+ crsqe->is_internal_cmd = true;
+ crsqe->is_waiter_alive = false;
+ crsqe->is_in_used = true;
+ crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
+
+ preq = (u8 *)msg->req;
+ do {
+ /* Locate the next cmdq slot */
+ sw_prod = HWQ_CMP(hwq->prod, hwq);
+ cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
+ /* Copy a segment of the req cmd to the cmdq */
+ memset(cmdqe, 0, sizeof(*cmdqe));
+ memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
+ preq += min_t(u32, bsize, sizeof(*cmdqe));
+ bsize -= min_t(u32, bsize, sizeof(*cmdqe));
+ hwq->prod++;
+ } while (bsize > 0);
+ cmdq->seq_num++;
+
+ cmdq_prod = hwq->prod;
+ atomic_inc(&rcfw->timeout_send);
+ /* ring CMDQ DB */
+ wmb();
+ writel(cmdq_prod, cmdq->cmdq_mbox.prod);
+ writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+}
+
+static int __send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg, u8 opcode)
+{
+ u32 bsize, free_slots, required_slots;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ struct bnxt_qplib_cmdqe *cmdqe;
+ struct bnxt_qplib_hwq *hwq;
+ u32 sw_prod, cmdq_prod;
+ struct pci_dev *pdev;
+ u16 cookie;
+ u8 *preq;
+
+ cmdq = &rcfw->cmdq;
+ hwq = &cmdq->hwq;
+ pdev = rcfw->pdev;
+
+ /* Cmdq are in 16-byte units, each request can consume 1 or more
+ * cmdqe
+ */
+ spin_lock_bh(&hwq->lock);
+ required_slots = bnxt_qplib_get_cmd_slots(msg->req);
+ free_slots = HWQ_FREE_SLOTS(hwq);
+ cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ if (required_slots >= free_slots) {
+ dev_info_ratelimited(&pdev->dev,
+ "CMDQ is full req/free %d/%d!",
+ required_slots, free_slots);
+ spin_unlock_bh(&hwq->lock);
+ return -EAGAIN;
+ }
+ if (msg->block)
+ cookie |= RCFW_CMD_IS_BLOCKING;
+ __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
+
+ bsize = bnxt_qplib_set_cmd_slots(msg->req);
+ crsqe->free_slots = free_slots;
+ crsqe->resp = (struct creq_qp_event *)msg->resp;
+ crsqe->resp->cookie = cpu_to_le16(cookie);
+ crsqe->is_internal_cmd = false;
+ crsqe->is_waiter_alive = true;
+ crsqe->is_in_used = true;
+ crsqe->opcode = opcode;
+
+ crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
+ if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
+ struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb;
+
+ __set_cmdq_base_resp_addr(msg->req, msg->req_sz,
+ cpu_to_le64(sbuf->dma_addr));
+ __set_cmdq_base_resp_size(msg->req, msg->req_sz,
+ ALIGN(sbuf->size,
+ BNXT_QPLIB_CMDQE_UNITS) /
+ BNXT_QPLIB_CMDQE_UNITS);
+ }
+
+ preq = (u8 *)msg->req;
+ do {
+ /* Locate the next cmdq slot */
+ sw_prod = HWQ_CMP(hwq->prod, hwq);
+ cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
+ /* Copy a segment of the req cmd to the cmdq */
+ memset(cmdqe, 0, sizeof(*cmdqe));
+ memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
+ preq += min_t(u32, bsize, sizeof(*cmdqe));
+ bsize -= min_t(u32, bsize, sizeof(*cmdqe));
+ hwq->prod++;
+ } while (bsize > 0);
+ cmdq->seq_num++;
+
+ cmdq_prod = hwq->prod & 0xFFFF;
+ if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
+ /* The very first doorbell write
+ * is required to set this flag
+ * which prompts the FW to reset
+ * its internal pointers
+ */
+ cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
+ clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ }
+ /* ring CMDQ DB */
+ wmb();
+ writel(cmdq_prod, cmdq->cmdq_mbox.prod);
+ writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+ print_hex_dump_bytes("req: ", DUMP_PREFIX_OFFSET, msg->req, msg->req_sz);
+ spin_unlock_bh(&hwq->lock);
+ /* Return the CREQ response pointer */
+ return 0;
+}
+
+/**
+ * __poll_for_resp - self poll completion for rcfw command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
+ *
+ * It works same as __wait_for_resp except this function will
+ * do self polling in sort interval since interrupt is disabled.
+ * This function can not be called from non-sleepable context.
+ *
+ * Returns:
+ * -ETIMEDOUT if command is not completed in specific time interval.
+ * 0 if command is completed by firmware.
+ */
+static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ unsigned long issue_time;
+ int ret;
+
+ issue_time = jiffies;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ do {
+ if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
+ return bnxt_qplib_map_rc(crsqe->opcode);
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1001);
+
+ bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
+ if (!crsqe->is_in_used)
+ return 0;
+ if (jiffies_to_msecs(jiffies - issue_time) >
+ (rcfw->max_timeout * 1000)) {
+ ret = bnxt_re_is_fw_stalled(rcfw, cookie);
+ if (ret)
+ return ret;
+ }
+ } while (true);
+};
+
+static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg,
+ u8 opcode)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+
+ cmdq = &rcfw->cmdq;
+
+ /* Prevent posting if f/w is not in a state to process */
+ if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
+ return -ENXIO;
+
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
+ opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
+ dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
+ return -EINVAL;
+ }
+
+ if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
+ (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
+ opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
+ opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: RCFW not initialized, reject opcode 0x%x",
+ opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* This function will just post and do not bother about completion */
+static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,
+ struct creq_create_ah_resp *create_ah_resp)
+{
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_ah req = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_AH,
+ sizeof(req));
+ req.ah_cid = create_ah_resp->xid;
+ msg.req = (struct cmdq_base *)&req;
+ msg.req_sz = sizeof(req);
+ __send_message_no_waiter(rcfw, &msg);
+ dev_info_ratelimited(&rcfw->pdev->dev,
+ "From %s: ah_cid = %d timeout_send %d\n",
+ __func__, req.ah_cid,
+ atomic_read(&rcfw->timeout_send));
+}
+
+/**
+ * __bnxt_qplib_rcfw_send_message - qplib interface to send
+ * and complete rcfw command.
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
+ *
+ * This function does not account shadow queue depth. It will send
+ * all the command unconditionally as long as send queue is not full.
+ *
+ * Returns:
+ * 0 if command completed by firmware.
+ * Non zero if the command is not completed by firmware.
+ */
+static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg)
+{
+ struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
+ struct bnxt_qplib_crsqe *crsqe;
+ u16 cookie;
+ int rc;
+ u8 opcode;
+
+ opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
+
+ rc = __send_message_basic_sanity(rcfw, msg, opcode);
+ if (rc)
+ return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
+
+ rc = __send_message(rcfw, msg, opcode);
+ if (rc)
+ return rc;
+
+ cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz))
+ & RCFW_MAX_COOKIE_VALUE;
+
+ if (msg->block)
+ rc = __block_for_resp(rcfw, cookie);
+ else if (atomic_read(&rcfw->rcfw_intr_enabled))
+ rc = __wait_for_resp(rcfw, cookie);
+ else
+ rc = __poll_for_resp(rcfw, cookie);
+
+ if (rc) {
+ spin_lock_bh(&rcfw->cmdq.hwq.lock);
+ crsqe = &rcfw->crsqe_tbl[cookie];
+ crsqe->is_waiter_alive = false;
+ if (rc == -ENODEV)
+ set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
+ spin_unlock_bh(&rcfw->cmdq.hwq.lock);
+ return -ETIMEDOUT;
+ }
+
+ if (evnt->status) {
+ /* failed with status */
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
+ cookie, opcode, evnt->status);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+/**
+ * bnxt_qplib_rcfw_send_message - qplib interface to send
+ * and complete rcfw command.
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
+ *
+ * Driver interact with Firmware through rcfw channel/slow path in two ways.
+ * a. Blocking rcfw command send. In this path, driver cannot hold
+ * the context for longer period since it is holding cpu until
+ * command is not completed.
+ * b. Non-blocking rcfw command send. In this path, driver can hold the
+ * context for longer period. There may be many pending command waiting
+ * for completion because of non-blocking nature.
+ *
+ * Driver will use shadow queue depth. Current queue depth of 8K
+ * (due to size of rcfw message there can be actual ~4K rcfw outstanding)
+ * is not optimal for rcfw command processing in firmware.
+ *
+ * Restrict at max #RCFW_CMD_NON_BLOCKING_SHADOW_QD Non-Blocking rcfw commands.
+ * Allow all blocking commands until there is no queue full.
+ *
+ * Returns:
+ * 0 if command completed by firmware.
+ * Non zero if the command is not completed by firmware.
+ */
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg)
+{
+ int ret;
+
+ if (!msg->block) {
+ down(&rcfw->rcfw_inflight);
+ ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
+ up(&rcfw->rcfw_inflight);
+ } else {
+ ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
+ }
+
+ return ret;
+}
+
+/* Completions */
+static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
+ struct creq_func_event *func_event)
+{
+ int rc;
+
+ switch (func_event->event) {
+ case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
+ /* SRQ ctx error, call srq_handler??
+ * But there's no SRQ handle!
+ */
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
+ break;
+ case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
+ return rc;
+}
+
+static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ struct creq_qp_event *qp_event,
+ u32 *num_wait)
+{
+ struct creq_qp_error_notification *err_event;
+ struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
+ struct bnxt_qplib_crsqe *crsqe;
+ u32 qp_id, tbl_indx, req_size;
+ struct bnxt_qplib_qp *qp;
+ u16 cookie, blocked = 0;
+ bool is_waiter_alive;
+ struct pci_dev *pdev;
+ u32 wait_cmds = 0;
+ int rc = 0;
+
+ pdev = rcfw->pdev;
+ print_hex_dump_bytes("event: ", DUMP_PREFIX_OFFSET, qp_event, sizeof(*qp_event));
+ switch (qp_event->event) {
+ case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ err_event = (struct creq_qp_error_notification *)qp_event;
+ qp_id = le32_to_cpu(err_event->xid);
+ spin_lock(&rcfw->tbl_lock);
+ tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
+ qp = rcfw->qp_tbl[tbl_indx].qp_handle;
+ if (!qp) {
+ spin_unlock(&rcfw->tbl_lock);
+ break;
+ }
+ bnxt_qplib_mark_qp_error(qp);
+ rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
+ spin_unlock(&rcfw->tbl_lock);
+ dev_dbg(&pdev->dev, "Received QP error notification\n");
+ dev_dbg(&pdev->dev,
+ "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ qp_id, err_event->req_err_state_reason,
+ err_event->res_err_state_reason);
+ break;
+ default:
+ /*
+ * Command Response
+ * cmdq->lock needs to be acquired to synchronie
+ * the command send and completion reaping. This function
+ * is always called with creq->lock held. Using
+ * the nested variant of spin_lock.
+ *
+ */
+
+ spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
+ cookie = le16_to_cpu(qp_event->cookie);
+ blocked = cookie & RCFW_CMD_IS_BLOCKING;
+ cookie &= RCFW_MAX_COOKIE_VALUE;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
+ &rcfw->cmdq.flags),
+ "QPLIB: Unreponsive rcfw channel detected.!!")) {
+ dev_info(&pdev->dev,
+ "rcfw timedout: cookie = %#x, free_slots = %d",
+ cookie, crsqe->free_slots);
+ spin_unlock(&hwq->lock);
+ return rc;
+ }
+
+ if (crsqe->is_internal_cmd && !qp_event->status)
+ atomic_dec(&rcfw->timeout_send);
+
+ if (crsqe->is_waiter_alive) {
+ if (crsqe->resp) {
+ memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
+ /* Insert write memory barrier to ensure that
+ * response data is copied before clearing the
+ * flags
+ */
+ smp_wmb();
+ }
+ if (!blocked)
+ wait_cmds++;
+ }
+
+ req_size = crsqe->req_size;
+ is_waiter_alive = crsqe->is_waiter_alive;
+
+ crsqe->req_size = 0;
+ if (!is_waiter_alive)
+ crsqe->resp = NULL;
+
+ crsqe->is_in_used = false;
+
+ hwq->cons += req_size;
+
+ /* This is a case to handle below scenario -
+ * Create AH is completed successfully by firmware,
+ * but completion took more time and driver already lost
+ * the context of create_ah from caller.
+ * We have already return failure for create_ah verbs,
+ * so let's destroy the same address vector since it is
+ * no more used in stack. We don't care about completion
+ * in __send_message_no_waiter.
+ * If destroy_ah is failued by firmware, there will be AH
+ * resource leak and relatively not critical + unlikely
+ * scenario. Current design is not to handle such case.
+ */
+ if (!is_waiter_alive && !qp_event->status &&
+ qp_event->event == CREQ_QP_EVENT_EVENT_CREATE_AH)
+ __destroy_timedout_ah(rcfw,
+ (struct creq_create_ah_resp *)
+ qp_event);
+ spin_unlock(&hwq->lock);
+ }
+ *num_wait += wait_cmds;
+ return rc;
+}
+
+/* SP - CREQ Completion handlers */
+static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+{
+ struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
+ struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
+ u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
+ struct bnxt_qplib_hwq *hwq = &creq->hwq;
+ struct creq_base *creqe;
+ u32 num_wakeup = 0;
+ u32 hw_polled = 0;
+
+ /* Service the CREQ until budget is over */
+ spin_lock_bh(&hwq->lock);
+ while (budget > 0) {
+ creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
+ if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
+ break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ rcfw->cmdq.last_seen = jiffies;
+
+ type = creqe->type & CREQ_BASE_TYPE_MASK;
+ switch (type) {
+ case CREQ_BASE_TYPE_QP_EVENT:
+ bnxt_qplib_process_qp_event
+ (rcfw, (struct creq_qp_event *)creqe,
+ &num_wakeup);
+ creq->stats.creq_qp_event_processed++;
+ break;
+ case CREQ_BASE_TYPE_FUNC_EVENT:
+ if (!bnxt_qplib_process_func_event
+ (rcfw, (struct creq_func_event *)creqe))
+ creq->stats.creq_func_event_processed++;
+ else
+ dev_warn(&rcfw->pdev->dev,
+ "aeqe:%#x Not handled\n", type);
+ break;
+ default:
+ if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
+ dev_warn(&rcfw->pdev->dev,
+ "creqe with event 0x%x not handled\n",
+ type);
+ break;
+ }
+ budget--;
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &creq->creq_db.dbinfo.flags);
+ }
+
+ if (hw_polled)
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
+ rcfw->res->cctx, true);
+ spin_unlock_bh(&hwq->lock);
+ if (num_wakeup)
+ wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
+}
+
+static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
+{
+ struct bnxt_qplib_rcfw *rcfw = dev_instance;
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_hwq *hwq;
+ u32 sw_cons;
+
+ creq = &rcfw->creq;
+ hwq = &creq->hwq;
+ /* Prefetch the CREQ element */
+ sw_cons = HWQ_CMP(hwq->cons, hwq);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
+
+ tasklet_schedule(&creq->creq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* RCFW */
+int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct creq_deinitialize_fw_resp resp = {};
+ struct cmdq_deinitialize_fw req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,
+ sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
+ return 0;
+}
+
+int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx, int is_virtfn)
+{
+ struct creq_initialize_fw_resp resp = {};
+ struct cmdq_initialize_fw req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ u16 flags = 0;
+ u8 pgsz, lvl;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_INITIALIZE_FW,
+ sizeof(req));
+ /* Supply (log-base-2-of-host-page-size - base-page-shift)
+ * to bono to adjust the doorbell page sizes.
+ */
+ req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
+ RCFW_DBR_BASE_PAGE_SHIFT);
+ /*
+ * Gen P5 devices doesn't require this allocation
+ * as the L2 driver does the same for RoCE also.
+ * Also, VFs need not setup the HW context area, PF
+ * shall setup this area for VF. Skipping the
+ * HW programming
+ */
+ if (is_virtfn || bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
+ goto skip_ctx_setup;
+
+ lvl = ctx->qpc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
+ req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->mrw_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
+ req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->srqc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
+ req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->cq_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
+ req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tim_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
+ req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tqm_ctx.pde.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
+ req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ req.qpc_page_dir =
+ cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
+ req.mrw_page_dir =
+ cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
+ req.srq_page_dir =
+ cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
+ req.cq_page_dir =
+ cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
+ req.tim_page_dir =
+ cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
+ req.tqm_page_dir =
+ cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]);
+
+ req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
+ req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
+ req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
+ req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
+
+skip_ctx_setup:
+ if (BNXT_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED;
+ if (_is_optimize_modify_qp_supported(rcfw->res->dattr->dev_cap_flags2))
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
+ if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT;
+ if (bnxt_qplib_roce_mirror_supported(rcfw->res->cctx)) {
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED;
+ rcfw->roce_mirror = true;
+ }
+ req.flags |= cpu_to_le16(flags);
+ req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
+ return 0;
+}
+
+void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+{
+ kfree(rcfw->crsqe_tbl);
+ bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
+ bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
+ rcfw->pdev = NULL;
+}
+
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+
+ rcfw->pdev = res->pdev;
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ rcfw->res = res;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = rcfw->res;
+ hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
+ hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
+ hwq_attr.type = bnxt_qplib_get_hwq_type(res);
+
+ if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
+ dev_err(&rcfw->pdev->dev,
+ "HW channel CREQ allocation failed\n");
+ goto fail;
+ }
+
+ rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT;
+
+ sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
+ hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
+ hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
+ dev_err(&rcfw->pdev->dev,
+ "HW channel CMDQ allocation failed\n");
+ goto fail;
+ }
+
+ rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
+ sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
+ if (!rcfw->crsqe_tbl)
+ goto fail;
+
+ spin_lock_init(&rcfw->tbl_lock);
+
+ rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
+
+ return 0;
+
+fail:
+ bnxt_qplib_free_rcfw_channel(rcfw);
+ return -ENOMEM;
+}
+
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
+{
+ struct bnxt_qplib_creq_ctx *creq;
+
+ creq = &rcfw->creq;
+
+ if (!creq->requested)
+ return;
+
+ creq->requested = false;
+ /* Mask h/w interrupts */
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
+ /* Sync with last running IRQ-handler */
+ synchronize_irq(creq->msix_vec);
+ free_irq(creq->msix_vec, rcfw);
+ kfree(creq->irq_name);
+ creq->irq_name = NULL;
+ atomic_set(&rcfw->rcfw_intr_enabled, 0);
+ if (kill)
+ tasklet_kill(&creq->creq_tasklet);
+ tasklet_disable(&creq->creq_tasklet);
+}
+
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+
+ creq = &rcfw->creq;
+ cmdq = &rcfw->cmdq;
+ /* Make sure the HW channel is stopped! */
+ bnxt_qplib_rcfw_stop_irq(rcfw, true);
+
+ iounmap(cmdq->cmdq_mbox.reg.bar_reg);
+ iounmap(creq->creq_db.reg.bar_reg);
+
+ cmdq->cmdq_mbox.reg.bar_reg = NULL;
+ creq->creq_db.reg.bar_reg = NULL;
+ creq->aeq_handler = NULL;
+ creq->msix_vec = 0;
+}
+
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init)
+{
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_res *res;
+ int rc;
+
+ creq = &rcfw->creq;
+ res = rcfw->res;
+
+ if (creq->requested)
+ return -EFAULT;
+
+ creq->msix_vec = msix_vector;
+ if (need_init)
+ tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
+ else
+ tasklet_enable(&creq->creq_tasklet);
+
+ creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s",
+ pci_name(res->pdev));
+ if (!creq->irq_name)
+ return -ENOMEM;
+ rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
+ creq->irq_name, rcfw);
+ if (rc) {
+ kfree(creq->irq_name);
+ creq->irq_name = NULL;
+ tasklet_disable(&creq->creq_tasklet);
+ return rc;
+ }
+ creq->requested = true;
+
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
+ atomic_inc(&rcfw->rcfw_intr_enabled);
+
+ return 0;
+}
+
+static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
+
+ pdev = rcfw->pdev;
+ mbox = &rcfw->cmdq.cmdq_mbox;
+
+ mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
+ if (!mbox->reg.bar_base) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d resc start is 0!\n",
+ mbox->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
+ if (!mbox->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d mapping failed\n",
+ mbox->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ mbox->prod = (void __iomem *)(mbox->reg.bar_reg +
+ RCFW_PF_VF_COMM_PROD_OFFSET);
+ mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET);
+ return 0;
+}
+
+static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
+{
+ struct bnxt_qplib_creq_db *creq_db;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
+
+ pdev = rcfw->pdev;
+ creq_db = &rcfw->creq.creq_db;
+
+ creq_db->dbinfo.flags = 0;
+ creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
+ creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
+ if (!creq_db->reg.bar_id)
+ dev_err(&pdev->dev,
+ "QPLIB: CREQ BAR region %d resc start is 0!",
+ creq_db->reg.bar_id);
+
+ bar_reg = creq_db->reg.bar_base + reg_offt;
+ /* Unconditionally map 8 bytes to support 57500 series */
+ creq_db->reg.len = 8;
+ creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
+ if (!creq_db->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "QPLIB: CREQ BAR region %d mapping failed",
+ creq_db->reg.bar_id);
+ return -ENOMEM;
+ }
+ creq_db->dbinfo.db = creq_db->reg.bar_reg;
+ creq_db->dbinfo.hwq = &rcfw->creq.hwq;
+ creq_db->dbinfo.xid = rcfw->creq.ring_id;
+ return 0;
+}
+
+static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ struct cmdq_init init = {0};
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ mbox = &cmdq->cmdq_mbox;
+
+ init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
+ init.cmdq_size_cmdq_lvl =
+ cpu_to_le16(((rcfw->cmdq_depth <<
+ CMDQ_INIT_CMDQ_SIZE_SFT) &
+ CMDQ_INIT_CMDQ_SIZE_MASK) |
+ ((cmdq->hwq.level <<
+ CMDQ_INIT_CMDQ_LVL_SFT) &
+ CMDQ_INIT_CMDQ_LVL_MASK));
+ init.creq_ring_id = cpu_to_le16(creq->ring_id);
+ /* Write to the Bono mailbox register */
+ __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
+}
+
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off,
+ aeq_handler_t aeq_handler)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ int rc;
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+
+ /* Clear to defaults */
+
+ cmdq->seq_num = 0;
+ set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ init_waitqueue_head(&cmdq->waitq);
+
+ creq->stats.creq_qp_event_processed = 0;
+ creq->stats.creq_func_event_processed = 0;
+ creq->aeq_handler = aeq_handler;
+
+ rc = bnxt_qplib_map_cmdq_mbox(rcfw);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
+ if (rc) {
+ dev_err(&rcfw->pdev->dev,
+ "Failed to request IRQ for CREQ rc = 0x%x\n", rc);
+ bnxt_qplib_disable_rcfw_channel(rcfw);
+ return rc;
+ }
+
+ sema_init(&rcfw->rcfw_inflight, RCFW_CMD_NON_BLOCKING_SHADOW_QD);
+ bnxt_qplib_start_rcfw(rcfw);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
new file mode 100644
index 000000000000..988c89b4232e
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -0,0 +1,294 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: RDMA Controller HW interface (header)
+ */
+
+#ifndef __BNXT_QPLIB_RCFW_H__
+#define __BNXT_QPLIB_RCFW_H__
+
+#include "qplib_tlv.h"
+
+#define RCFW_CMDQ_TRIG_VAL 1
+#define RCFW_COMM_PCI_BAR_REGION 0
+#define RCFW_COMM_CONS_PCI_BAR_REGION 2
+#define RCFW_COMM_BASE_OFFSET 0x600
+#define RCFW_PF_VF_COMM_PROD_OFFSET 0xc
+#define RCFW_COMM_TRIG_OFFSET 0x100
+#define RCFW_COMM_SIZE 0x104
+
+#define RCFW_DBR_PCI_BAR_REGION 2
+#define RCFW_DBR_BASE_PAGE_SHIFT 12
+#define RCFW_FW_STALL_MAX_TIMEOUT 40
+
+/* Cmdq contains a fix number of a 16-Byte slots */
+struct bnxt_qplib_cmdqe {
+ u8 data[16];
+};
+
+#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
+
+static inline void bnxt_qplib_rcfw_cmd_prep(struct cmdq_base *req,
+ u8 opcode, u8 cmd_size)
+{
+ req->opcode = opcode;
+ req->cmd_size = cmd_size;
+}
+
+/* Shadow queue depth for non blocking command */
+#define RCFW_CMD_NON_BLOCKING_SHADOW_QD 64
+#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
+
+/* CMDQ elements */
+#define BNXT_QPLIB_CMDQE_MAX_CNT 8192
+#define BNXT_QPLIB_CMDQE_BYTES(depth) ((depth) * BNXT_QPLIB_CMDQE_UNITS)
+
+static inline u32 bnxt_qplib_cmdqe_npages(u32 depth)
+{
+ u32 npages;
+
+ npages = BNXT_QPLIB_CMDQE_BYTES(depth) / PAGE_SIZE;
+ if (BNXT_QPLIB_CMDQE_BYTES(depth) % PAGE_SIZE)
+ npages++;
+ return npages;
+}
+
+static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
+{
+ return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
+}
+
+/* Get the number of command units required for the req. The
+ * function returns correct value only if called before
+ * setting using bnxt_qplib_set_cmd_slots
+ */
+static inline u32 bnxt_qplib_get_cmd_slots(struct cmdq_base *req)
+{
+ u32 cmd_units = 0;
+
+ if (HAS_TLV_HEADER(req)) {
+ struct roce_tlv *tlv_req = (struct roce_tlv *)req;
+
+ cmd_units = tlv_req->total_size;
+ } else {
+ cmd_units = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
+ }
+
+ return cmd_units;
+}
+
+static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
+{
+ u32 cmd_byte = 0;
+
+ if (HAS_TLV_HEADER(req)) {
+ struct roce_tlv *tlv_req = (struct roce_tlv *)req;
+
+ cmd_byte = tlv_req->total_size * BNXT_QPLIB_CMDQE_UNITS;
+ } else {
+ cmd_byte = req->cmd_size;
+ req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
+ }
+
+ return cmd_byte;
+}
+
+#define RCFW_MAX_COOKIE_VALUE (BNXT_QPLIB_CMDQE_MAX_CNT - 1)
+#define RCFW_CMD_IS_BLOCKING 0x8000
+
+#define HWRM_VERSION_DEV_ATTR_MAX_DPI 0x1000A0000000DULL
+/* HWRM version 1.10.3.18 */
+#define HWRM_VERSION_READ_CTX 0x1000A00030012
+
+/* Crsq buf is 1024-Byte */
+struct bnxt_qplib_crsbe {
+ u8 data[1024];
+};
+
+/* CREQ */
+/* Allocate 1 per QP for async error notification for now */
+#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
+#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
+#define CREQ_CMP_VALID(hdr, pass) \
+ (!!((hdr)->v & CREQ_BASE_V) == \
+ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+#define CREQ_ENTRY_POLL_BUDGET 0x100
+
+/* HWQ */
+typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *);
+
+struct bnxt_qplib_crsqe {
+ struct creq_qp_event *resp;
+ u32 req_size;
+ /* Free slots at the time of submission */
+ u32 free_slots;
+ u8 opcode;
+ bool is_waiter_alive;
+ bool is_internal_cmd;
+ bool is_in_used;
+};
+
+struct bnxt_qplib_rcfw_sbuf {
+ void *sb;
+ dma_addr_t dma_addr;
+ u32 size;
+};
+
+struct bnxt_qplib_qp_node {
+ u32 qp_id; /* QP id */
+ void *qp_handle; /* ptr to qplib_qp */
+};
+
+#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
+
+#define FIRMWARE_INITIALIZED_FLAG (0)
+#define FIRMWARE_FIRST_FLAG (31)
+#define FIRMWARE_STALL_DETECTED (3)
+#define ERR_DEVICE_DETACHED (4)
+
+struct bnxt_qplib_cmdq_mbox {
+ struct bnxt_qplib_reg_desc reg;
+ void __iomem *prod;
+ void __iomem *db;
+};
+
+struct bnxt_qplib_cmdq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_cmdq_mbox cmdq_mbox;
+ wait_queue_head_t waitq;
+ unsigned long flags;
+ unsigned long last_seen;
+ u32 seq_num;
+};
+
+struct bnxt_qplib_creq_db {
+ struct bnxt_qplib_reg_desc reg;
+ struct bnxt_qplib_db_info dbinfo;
+};
+
+struct bnxt_qplib_creq_stat {
+ u64 creq_qp_event_processed;
+ u64 creq_func_event_processed;
+};
+
+struct bnxt_qplib_creq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_creq_db creq_db;
+ struct bnxt_qplib_creq_stat stats;
+ struct tasklet_struct creq_tasklet;
+ aeq_handler_t aeq_handler;
+ u16 ring_id;
+ int msix_vec;
+ bool requested; /*irq handler installed */
+ char *irq_name;
+};
+
+/* RCFW Communication Channels */
+struct bnxt_qplib_rcfw {
+ struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_cmdq_ctx cmdq;
+ struct bnxt_qplib_creq_ctx creq;
+ struct bnxt_qplib_crsqe *crsqe_tbl;
+ int qp_tbl_size;
+ struct bnxt_qplib_qp_node *qp_tbl;
+ /* To synchronize the qp-handle hash table */
+ spinlock_t tbl_lock;
+ u64 oos_prev;
+ u32 init_oos_stats;
+ u32 cmdq_depth;
+ atomic_t rcfw_intr_enabled;
+ struct semaphore rcfw_inflight;
+ atomic_t timeout_send;
+ /* cached from chip cctx for quick reference in slow path */
+ u16 max_timeout;
+ bool roce_mirror;
+};
+
+struct bnxt_qplib_cmdqmsg {
+ struct cmdq_base *req;
+ struct creq_base *resp;
+ void *sb;
+ u32 req_sz;
+ u32 res_sz;
+ u8 block;
+};
+
+static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
+ void *req, void *resp, void *sb,
+ u32 req_sz, u32 res_sz, u8 block)
+{
+ msg->req = req;
+ msg->resp = resp;
+ msg->sb = sb;
+ msg->req_sz = req_sz;
+ msg->res_sz = res_sz;
+ msg->block = block;
+}
+
+void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx);
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init);
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off,
+ aeq_handler_t aeq_handler);
+
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+ struct bnxt_qplib_rcfw *rcfw,
+ u32 size);
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_rcfw_sbuf *sbuf);
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg);
+
+int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx, int is_virtfn);
+void bnxt_qplib_mark_qp_error(void *qp_handle);
+
+static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
+{
+ /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
+}
+#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
new file mode 100644
index 000000000000..875d7b52c06a
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -0,0 +1,955 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: QPLib resource manager
+ */
+
+#define dev_fmt(fmt) "QPLIB: " fmt
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/inetdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_rcfw.h"
+
+/* PBL */
+static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
+ bool is_umem)
+{
+ struct pci_dev *pdev = res->pdev;
+ int i;
+
+ if (!is_umem) {
+ for (i = 0; i < pbl->pg_count; i++) {
+ if (pbl->pg_arr[i])
+ dma_free_coherent(&pdev->dev, pbl->pg_size,
+ (void *)((unsigned long)
+ pbl->pg_arr[i] &
+ PAGE_MASK),
+ pbl->pg_map_arr[i]);
+ else
+ dev_warn(&pdev->dev,
+ "PBL free pg_arr[%d] empty?!\n", i);
+ pbl->pg_arr[i] = NULL;
+ }
+ }
+ vfree(pbl->pg_arr);
+ pbl->pg_arr = NULL;
+ vfree(pbl->pg_map_arr);
+ pbl->pg_map_arr = NULL;
+ pbl->pg_count = 0;
+ pbl->pg_size = 0;
+}
+
+static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
+{
+ struct ib_block_iter biter;
+ int i = 0;
+
+ rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
+ pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
+ pbl->pg_arr[i] = NULL;
+ pbl->pg_count++;
+ i++;
+ }
+}
+
+static int __alloc_pbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
+{
+ struct pci_dev *pdev = res->pdev;
+ bool is_umem = false;
+ u32 pages;
+ int i;
+
+ if (sginfo->nopte)
+ return 0;
+ if (sginfo->umem)
+ pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
+ else
+ pages = sginfo->npages;
+ /* page ptr arrays */
+ pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
+ memset(pbl->pg_arr, 0, pages * sizeof(void *));
+
+ pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+ vfree(pbl->pg_arr);
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
+ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+ if (!sginfo->umem) {
+ for (i = 0; i < pages; i++) {
+ pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
+ if (!pbl->pg_arr[i])
+ goto fail;
+ pbl->pg_count++;
+ }
+ } else {
+ is_umem = true;
+ bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
+ }
+
+ return 0;
+fail:
+ __free_pbl(res, pbl, is_umem);
+ return -ENOMEM;
+}
+
+/* HWQ */
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq)
+{
+ int i;
+
+ if (!hwq->max_elements)
+ return;
+ if (hwq->level >= PBL_LVL_MAX)
+ return;
+
+ for (i = 0; i < hwq->level + 1; i++) {
+ if (i == hwq->level)
+ __free_pbl(res, &hwq->pbl[i], hwq->is_user);
+ else
+ __free_pbl(res, &hwq->pbl[i], false);
+ }
+
+ hwq->level = PBL_LVL_MAX;
+ hwq->max_elements = 0;
+ hwq->element_size = 0;
+ hwq->prod = 0;
+ hwq->cons = 0;
+ hwq->cp_bit = 0;
+}
+
+/* All HWQs are power of 2 in size */
+
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr)
+{
+ u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
+ struct bnxt_qplib_sg_info sginfo = {};
+ u32 depth, stride, npbl, npde;
+ dma_addr_t *src_phys_ptr, **dst_virt_ptr;
+ struct bnxt_qplib_res *res;
+ struct pci_dev *pdev;
+ int i, rc, lvl;
+
+ res = hwq_attr->res;
+ pdev = res->pdev;
+ pg_size = hwq_attr->sginfo->pgsize;
+ hwq->level = PBL_LVL_MAX;
+
+ depth = roundup_pow_of_two(hwq_attr->depth);
+ stride = roundup_pow_of_two(hwq_attr->stride);
+ if (hwq_attr->aux_depth) {
+ aux_slots = hwq_attr->aux_depth;
+ aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
+ aux_pages = (aux_slots * aux_size) / pg_size;
+ if ((aux_slots * aux_size) % pg_size)
+ aux_pages++;
+ }
+
+ if (!hwq_attr->sginfo->umem) {
+ hwq->is_user = false;
+ npages = (depth * stride) / pg_size + aux_pages;
+ if ((depth * stride) % pg_size)
+ npages++;
+ if (!npages)
+ return -EINVAL;
+ hwq_attr->sginfo->npages = npages;
+ } else {
+ npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
+ hwq_attr->sginfo->pgsize);
+ hwq->is_user = true;
+ }
+
+ if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
+ /* This request is Level 0, map PTE */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_0;
+ goto done;
+ }
+
+ if (npages >= MAX_PBL_LVL_0_PGS) {
+ if (npages > MAX_PBL_LVL_1_PGS) {
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
+ /* 2 levels of indirection */
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ npde = npbl >> MAX_PDL_LVL_SHIFT;
+ if (npbl % BIT(MAX_PDL_LVL_SHIFT))
+ npde++;
+ /* Alloc PDE pages */
+ sginfo.pgsize = npde * pg_size;
+ sginfo.npages = 1;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
+ if (rc)
+ goto fail;
+
+ /* Alloc PBL pages */
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
+ if (rc)
+ goto fail;
+ /* Fill PDL with PBL page pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
+
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_2;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBLs with PTE pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
+ for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] | PTU_PTE_VALID;
+ }
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
+ /* Find the last pg of the size */
+ i = hwq->pbl[PBL_LVL_2].pg_count;
+ dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
+ PTU_PTE_LAST;
+ if (i > 1)
+ dst_virt_ptr[PTR_PG(i - 2)]
+ [PTR_IDX(i - 2)] |=
+ PTU_PTE_NEXT_TO_LAST;
+ }
+ } else { /* pages < 512 npbl = 1, npde = 0 */
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
+
+ /* 1 level of indirection */
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ /* Alloc PBL page */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
+ if (rc)
+ goto fail;
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_1;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBL with PTE pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] | flag;
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
+ /* Find the last pg of the size */
+ i = hwq->pbl[PBL_LVL_1].pg_count;
+ dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
+ PTU_PTE_LAST;
+ if (i > 1)
+ dst_virt_ptr[PTR_PG(i - 2)]
+ [PTR_IDX(i - 2)] |=
+ PTU_PTE_NEXT_TO_LAST;
+ }
+ }
+ }
+done:
+ hwq->prod = 0;
+ hwq->cons = 0;
+ hwq->pdev = pdev;
+ hwq->depth = hwq_attr->depth;
+ hwq->max_elements = hwq->depth;
+ hwq->element_size = stride;
+ hwq->qe_ppg = pg_size / stride;
+ /* For direct access to the elements */
+ lvl = hwq->level;
+ if (hwq_attr->sginfo->nopte && hwq->level)
+ lvl = hwq->level - 1;
+ hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
+ hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
+ spin_lock_init(&hwq->lock);
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res, hwq);
+ return -ENOMEM;
+}
+
+/* Context Tables */
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ int i;
+
+ bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
+ /* restore original pde level before destroy */
+ ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
+}
+
+static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_tqm_ctx *tqmctx;
+ int rc;
+ int i;
+
+ tqmctx = &ctx->tqm_ctx;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = res;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ hwq_attr.depth = 512;
+ hwq_attr.stride = sizeof(u64);
+ /* Alloc pdl buffer */
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
+ if (rc)
+ goto out;
+ /* Save original pdl level */
+ tqmctx->pde_level = tqmctx->pde.level;
+
+ hwq_attr.stride = 1;
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
+ if (!tqmctx->qcount[i])
+ continue;
+ hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
+ if (rc)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
+{
+ struct bnxt_qplib_hwq *tbl;
+ dma_addr_t *dma_ptr;
+ __le64 **pbl_ptr, *ptr;
+ int i, j, k;
+ int fnz_idx = -1;
+ int pg_count;
+
+ pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
+
+ for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
+ i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
+ tbl = &ctx->qtbl[i];
+ if (!tbl->max_elements)
+ continue;
+ if (fnz_idx == -1)
+ fnz_idx = i; /* first non-zero index */
+ switch (tbl->level) {
+ case PBL_LVL_2:
+ pg_count = tbl->pbl[PBL_LVL_1].pg_count;
+ for (k = 0; k < pg_count; k++) {
+ ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
+ dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
+ *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
+ }
+ break;
+ case PBL_LVL_1:
+ case PBL_LVL_0:
+ default:
+ ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
+ *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
+ PTU_PTE_VALID);
+ break;
+ }
+ }
+ if (fnz_idx == -1)
+ fnz_idx = 0;
+ /* update pde level as per page table programming */
+ ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
+ ctx->qtbl[fnz_idx].level + 1;
+}
+
+static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ int rc;
+
+ rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
+ if (rc)
+ goto fail;
+
+ bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
+fail:
+ return rc;
+}
+
+/*
+ * Routine: bnxt_qplib_alloc_hwctx
+ * Description:
+ * Context tables are memories which are used by the chip fw.
+ * The 6 tables defined are:
+ * QPC ctx - holds QP states
+ * MRW ctx - holds memory region and window
+ * SRQ ctx - holds shared RQ states
+ * CQ ctx - holds completion queue states
+ * TQM ctx - holds Tx Queue Manager context
+ * TIM ctx - holds timer context
+ * Depending on the size of the tbl requested, either a 1 Page Buffer List
+ * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
+ * instead.
+ * Table might be employed as follows:
+ * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
+ * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
+ * For 512 < ctx size <= MAX, 2 levels of ind is used
+ * Returns:
+ * 0 if success, else -ERRORS
+ */
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ int rc;
+
+ /* QPC Tables */
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = ctx->qpc_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* MRW Tables */
+ hwq_attr.depth = ctx->mrw_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* SRQ Tables */
+ hwq_attr.depth = ctx->srqc_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* CQ Tables */
+ hwq_attr.depth = ctx->cq_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* TQM Buffer */
+ rc = bnxt_qplib_setup_tqm_rings(res, ctx);
+ if (rc)
+ goto fail;
+ /* TIM Buffer */
+ ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
+ hwq_attr.depth = ctx->qpc_count * 16;
+ hwq_attr.stride = 1;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ bnxt_qplib_free_hwctx(res, ctx);
+ return rc;
+}
+
+static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl)
+{
+ kfree(sgid_tbl->tbl);
+ kfree(sgid_tbl->hw_id);
+ kfree(sgid_tbl->ctx);
+ kfree(sgid_tbl->vlan);
+ sgid_tbl->tbl = NULL;
+ sgid_tbl->hw_id = NULL;
+ sgid_tbl->ctx = NULL;
+ sgid_tbl->vlan = NULL;
+ sgid_tbl->max = 0;
+ sgid_tbl->active = 0;
+}
+
+static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ u16 max)
+{
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
+ if (!sgid_tbl->tbl)
+ return -ENOMEM;
+
+ sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
+ if (!sgid_tbl->hw_id)
+ goto out_free1;
+
+ sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
+ if (!sgid_tbl->ctx)
+ goto out_free2;
+
+ sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
+ if (!sgid_tbl->vlan)
+ goto out_free3;
+
+ sgid_tbl->max = max;
+ return 0;
+out_free3:
+ kfree(sgid_tbl->ctx);
+ sgid_tbl->ctx = NULL;
+out_free2:
+ kfree(sgid_tbl->hw_id);
+ sgid_tbl->hw_id = NULL;
+out_free1:
+ kfree(sgid_tbl->tbl);
+ sgid_tbl->tbl = NULL;
+ return -ENOMEM;
+};
+
+static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl)
+{
+ int i;
+
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)))
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+ sgid_tbl->tbl[i].vlan_id, true);
+ }
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
+ memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+ memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
+ sgid_tbl->active = 0;
+}
+
+static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct net_device *netdev)
+{
+ u32 i;
+
+ for (i = 0; i < sgid_tbl->max; i++)
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
+
+ memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+}
+
+/* PDs */
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd)
+{
+ struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
+ u32 bit_num;
+ int rc = 0;
+
+ mutex_lock(&res->pd_tbl_lock);
+ bit_num = find_first_bit(pdt->tbl, pdt->max);
+ if (bit_num == pdt->max) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* Found unused PD */
+ clear_bit(bit_num, pdt->tbl);
+ pd->id = bit_num;
+exit:
+ mutex_unlock(&res->pd_tbl_lock);
+ return rc;
+}
+
+int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pd_tbl *pdt,
+ struct bnxt_qplib_pd *pd)
+{
+ int rc = 0;
+
+ mutex_lock(&res->pd_tbl_lock);
+ if (test_and_set_bit(pd->id, pdt->tbl)) {
+ dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
+ pd->id);
+ rc = -EINVAL;
+ goto exit;
+ }
+ pd->id = 0;
+exit:
+ mutex_unlock(&res->pd_tbl_lock);
+ return rc;
+}
+
+static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
+{
+ kfree(pdt->tbl);
+ pdt->tbl = NULL;
+ pdt->max = 0;
+}
+
+static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pd_tbl *pdt,
+ u32 max)
+{
+ u32 bytes;
+
+ bytes = max >> 3;
+ if (!bytes)
+ bytes = 1;
+ pdt->tbl = kmalloc(bytes, GFP_KERNEL);
+ if (!pdt->tbl)
+ return -ENOMEM;
+
+ pdt->max = max;
+ memset((u8 *)pdt->tbl, 0xFF, bytes);
+ mutex_init(&res->pd_tbl_lock);
+
+ return 0;
+}
+
+/* DPIs */
+int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi,
+ void *app, u8 type)
+{
+ struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
+ struct bnxt_qplib_reg_desc *reg;
+ u32 bit_num;
+ u64 umaddr;
+
+ reg = &dpit->wcreg;
+ mutex_lock(&res->dpi_tbl_lock);
+
+ bit_num = find_first_bit(dpit->tbl, dpit->max);
+ if (bit_num == dpit->max) {
+ mutex_unlock(&res->dpi_tbl_lock);
+ return -ENOMEM;
+ }
+
+ /* Found unused DPI */
+ clear_bit(bit_num, dpit->tbl);
+ dpit->app_tbl[bit_num] = app;
+
+ dpi->bit = bit_num;
+ dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
+
+ umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
+ dpi->umdbr = umaddr;
+
+ switch (type) {
+ case BNXT_QPLIB_DPI_TYPE_KERNEL:
+ /* privileged dbr was already mapped just initialize it. */
+ dpi->umdbr = dpit->ucreg.bar_base +
+ dpit->ucreg.offset + bit_num * PAGE_SIZE;
+ dpi->dbr = dpit->priv_db;
+ dpi->dpi = dpi->bit;
+ break;
+ case BNXT_QPLIB_DPI_TYPE_WC:
+ dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
+ break;
+ default:
+ dpi->dbr = ioremap(umaddr, PAGE_SIZE);
+ break;
+ }
+
+ dpi->type = type;
+ mutex_unlock(&res->dpi_tbl_lock);
+ return 0;
+
+}
+
+int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi)
+{
+ struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
+
+ mutex_lock(&res->dpi_tbl_lock);
+ if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL)
+ pci_iounmap(res->pdev, dpi->dbr);
+
+ if (test_and_set_bit(dpi->bit, dpit->tbl)) {
+ dev_warn(&res->pdev->dev,
+ "Freeing an unused DPI? dpi = %d, bit = %d\n",
+ dpi->dpi, dpi->bit);
+ mutex_unlock(&res->dpi_tbl_lock);
+ return -EINVAL;
+ }
+ if (dpit->app_tbl)
+ dpit->app_tbl[dpi->bit] = NULL;
+ memset(dpi, 0, sizeof(*dpi));
+ mutex_unlock(&res->dpi_tbl_lock);
+ return 0;
+}
+
+static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi_tbl *dpit)
+{
+ kfree(dpit->tbl);
+ kfree(dpit->app_tbl);
+ dpit->tbl = NULL;
+ dpit->app_tbl = NULL;
+ dpit->max = 0;
+}
+
+static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dev_attr *dev_attr)
+{
+ struct bnxt_qplib_dpi_tbl *dpit;
+ struct bnxt_qplib_reg_desc *reg;
+ unsigned long bar_len;
+ u32 dbr_offset;
+ u32 bytes;
+
+ dpit = &res->dpi_tbl;
+ reg = &dpit->wcreg;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ /* Offest should come from L2 driver */
+ dbr_offset = dev_attr->l2_db_size;
+ dpit->ucreg.offset = dbr_offset;
+ dpit->wcreg.offset = dbr_offset;
+ }
+
+ bar_len = pci_resource_len(res->pdev, reg->bar_id);
+ dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
+ if (dev_attr->max_dpi)
+ dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
+
+ dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
+ if (!dpit->app_tbl)
+ return -ENOMEM;
+
+ bytes = dpit->max >> 3;
+ if (!bytes)
+ bytes = 1;
+
+ dpit->tbl = kmalloc(bytes, GFP_KERNEL);
+ if (!dpit->tbl) {
+ kfree(dpit->app_tbl);
+ dpit->app_tbl = NULL;
+ return -ENOMEM;
+ }
+
+ memset((u8 *)dpit->tbl, 0xFF, bytes);
+ mutex_init(&res->dpi_tbl_lock);
+ dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
+
+ return 0;
+
+}
+
+/* Stats */
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats)
+{
+ if (stats->dma) {
+ dma_free_coherent(&pdev->dev, stats->size,
+ stats->dma, stats->dma_map);
+ }
+ memset(stats, 0, sizeof(*stats));
+ stats->fw_id = -1;
+}
+
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats)
+{
+ memset(stats, 0, sizeof(*stats));
+ stats->fw_id = -1;
+ stats->size = cctx->hw_stats_size;
+ stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
+ &stats->dma_map, GFP_KERNEL);
+ if (!stats->dma) {
+ dev_err(&pdev->dev, "Stats DMA allocation failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
+{
+ bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
+}
+
+int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
+{
+ bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
+
+ return 0;
+}
+
+void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
+{
+ kfree(res->rcfw->qp_tbl);
+ bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
+ bnxt_qplib_free_pd_tbl(&res->pd_tbl);
+ bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
+}
+
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ int rc;
+
+ res->netdev = netdev;
+ dev_attr = res->dattr;
+
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp);
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ return -ENOMEM;
+
+ rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr);
+ if (rc)
+ goto fail;
+
+ return 0;
+fail:
+ bnxt_qplib_free_res(res);
+ return rc;
+}
+
+void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_reg_desc *reg;
+
+ reg = &res->dpi_tbl.ucreg;
+ if (reg->bar_reg)
+ pci_iounmap(res->pdev, reg->bar_reg);
+ reg->bar_reg = NULL;
+ reg->bar_base = 0;
+ reg->len = 0;
+ reg->bar_id = 0;
+}
+
+int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_reg_desc *ucreg;
+ struct bnxt_qplib_reg_desc *wcreg;
+
+ wcreg = &res->dpi_tbl.wcreg;
+ wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
+ wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
+
+ ucreg = &res->dpi_tbl.ucreg;
+ ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
+ ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
+ ucreg->len = ucreg->offset + PAGE_SIZE;
+ if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
+ dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d",
+ (int)ucreg->len);
+ return -EINVAL;
+ }
+ ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
+ if (!ucreg->bar_reg) {
+ dev_err(&res->pdev->dev, "privileged dpi map failed!");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int bnxt_qplib_determine_atomics(struct pci_dev *dev)
+{
+ int comp;
+ u16 ctl2;
+
+ comp = pci_enable_atomic_ops_to_root(dev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32);
+ if (comp)
+ return -EOPNOTSUPP;
+ comp = pci_enable_atomic_ops_to_root(dev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ if (comp)
+ return -EOPNOTSUPP;
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
+ return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
new file mode 100644
index 000000000000..2ea3b7f232a3
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -0,0 +1,626 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: QPLib resource manager (header)
+ */
+
+#ifndef __BNXT_QPLIB_RES_H__
+#define __BNXT_QPLIB_RES_H__
+
+#include "bnxt_ulp.h"
+
+extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
+
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
+#define CHIP_NUM_58818 0xd818
+#define CHIP_NUM_57608 0x1760
+
+#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
+#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
+#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
+#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
+#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
+
+struct bnxt_qplib_drv_modes {
+ u8 wqe_mode;
+ bool db_push;
+ bool dbr_pacing;
+ u32 toggle_bits;
+ u8 roce_mirror;
+};
+
+enum bnxt_re_toggle_modes {
+ BNXT_QPLIB_CQ_TOGGLE_BIT = 0x1,
+ BNXT_QPLIB_SRQ_TOGGLE_BIT = 0x2,
+};
+
+struct bnxt_qplib_chip_ctx {
+ u16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u16 hw_stats_size;
+ u16 hwrm_cmd_max_timeout;
+ struct bnxt_qplib_drv_modes modes;
+ u64 hwrm_intf_ver;
+ u32 dbr_stat_db_fifo;
+};
+
+struct bnxt_qplib_db_pacing_data {
+ u32 do_pacing;
+ u32 pacing_th;
+ u32 alarm_th;
+ u32 fifo_max_depth;
+ u32 fifo_room_mask;
+ u32 fifo_room_shift;
+ u32 grc_reg_offset;
+ u32 dev_err_state;
+};
+
+#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
+#define BNXT_QPLIB_DBR_VF_DB_OFFSET 0x4000
+
+#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
+#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
+#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
+#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
+
+#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
+
+#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
+ ((HWQ_CMP(hwq->prod, hwq)\
+ - HWQ_CMP(hwq->cons, hwq))\
+ & (hwq->max_elements - 1)))
+enum bnxt_qplib_hwq_type {
+ HWQ_TYPE_CTX,
+ HWQ_TYPE_QUEUE,
+ HWQ_TYPE_L2_CMPL,
+ HWQ_TYPE_MR
+};
+
+#define MAX_PBL_LVL_0_PGS 1
+#define MAX_PBL_LVL_1_PGS 512
+#define MAX_PBL_LVL_1_PGS_SHIFT 9
+#define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
+#define MAX_PBL_LVL_2_PGS (256 * 512)
+#define MAX_PDL_LVL_SHIFT 9
+
+enum bnxt_qplib_pbl_lvl {
+ PBL_LVL_0,
+ PBL_LVL_1,
+ PBL_LVL_2,
+ PBL_LVL_MAX
+};
+
+#define ROCE_PG_SIZE_4K (4 * 1024)
+#define ROCE_PG_SIZE_8K (8 * 1024)
+#define ROCE_PG_SIZE_64K (64 * 1024)
+#define ROCE_PG_SIZE_2M (2 * 1024 * 1024)
+#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
+#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
+
+enum bnxt_qplib_hwrm_pg_size {
+ BNXT_QPLIB_HWRM_PG_SIZE_4K = 0,
+ BNXT_QPLIB_HWRM_PG_SIZE_8K = 1,
+ BNXT_QPLIB_HWRM_PG_SIZE_64K = 2,
+ BNXT_QPLIB_HWRM_PG_SIZE_2M = 3,
+ BNXT_QPLIB_HWRM_PG_SIZE_8M = 4,
+ BNXT_QPLIB_HWRM_PG_SIZE_1G = 5,
+};
+
+struct bnxt_qplib_reg_desc {
+ u8 bar_id;
+ resource_size_t bar_base;
+ unsigned long offset;
+ void __iomem *bar_reg;
+ size_t len;
+};
+
+struct bnxt_qplib_pbl {
+ u32 pg_count;
+ u32 pg_size;
+ void **pg_arr;
+ dma_addr_t *pg_map_arr;
+};
+
+struct bnxt_qplib_sg_info {
+ struct ib_umem *umem;
+ u32 npages;
+ u32 pgshft;
+ u32 pgsize;
+ bool nopte;
+};
+
+struct bnxt_qplib_hwq_attr {
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_sg_info *sginfo;
+ enum bnxt_qplib_hwq_type type;
+ u32 depth;
+ u32 stride;
+ u32 aux_stride;
+ u32 aux_depth;
+};
+
+struct bnxt_qplib_hwq {
+ struct pci_dev *pdev;
+ /* lock to protect qplib_hwq */
+ spinlock_t lock;
+ struct bnxt_qplib_pbl pbl[PBL_LVL_MAX + 1];
+ enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
+ /* ptr for easy access to the PBL entries */
+ void **pbl_ptr;
+ /* ptr for easy access to the dma_addr */
+ dma_addr_t *pbl_dma_ptr;
+ u32 max_elements;
+ u32 depth;
+ u16 element_size; /* Size of each entry */
+ u16 qe_ppg; /* queue entry per page */
+
+ u32 prod; /* raw */
+ u32 cons; /* raw */
+ u8 cp_bit;
+ u8 is_user;
+ u64 *pad_pg;
+ u32 pad_stride;
+ u32 pad_pgofft;
+};
+
+struct bnxt_qplib_db_info {
+ void __iomem *db;
+ void __iomem *priv_db;
+ struct bnxt_qplib_hwq *hwq;
+ u32 xid;
+ u32 max_slot;
+ u32 flags;
+ u8 toggle;
+};
+
+enum bnxt_qplib_db_info_flags_mask {
+ BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
+ BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
+ BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL,
+ BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL,
+};
+
+enum bnxt_qplib_db_epoch_flag_shift {
+ BNXT_QPLIB_DB_EPOCH_CONS_SHIFT = BNXT_QPLIB_DBR_EPOCH_SHIFT,
+ BNXT_QPLIB_DB_EPOCH_PROD_SHIFT = (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1),
+};
+
+/* Tables */
+struct bnxt_qplib_pd_tbl {
+ unsigned long *tbl;
+ u32 max;
+};
+
+struct bnxt_qplib_sgid_tbl {
+ struct bnxt_qplib_gid_info *tbl;
+ u16 *hw_id;
+ u16 max;
+ u16 active;
+ void *ctx;
+ u8 *vlan;
+};
+
+enum {
+ BNXT_QPLIB_DPI_TYPE_KERNEL = 0,
+ BNXT_QPLIB_DPI_TYPE_UC = 1,
+ BNXT_QPLIB_DPI_TYPE_WC = 2
+};
+
+struct bnxt_qplib_dpi {
+ u32 dpi;
+ u32 bit;
+ void __iomem *dbr;
+ u64 umdbr;
+ u8 type;
+};
+
+struct bnxt_qplib_dpi_tbl {
+ void **app_tbl;
+ unsigned long *tbl;
+ u16 max;
+ struct bnxt_qplib_reg_desc ucreg; /* Hold entire DB bar. */
+ struct bnxt_qplib_reg_desc wcreg;
+ void __iomem *priv_db;
+};
+
+struct bnxt_qplib_stats {
+ dma_addr_t dma_map;
+ void *dma;
+ u32 size;
+ u32 fw_id;
+};
+
+struct bnxt_qplib_vf_res {
+ u32 max_qp_per_vf;
+ u32 max_mrw_per_vf;
+ u32 max_srq_per_vf;
+ u32 max_cq_per_vf;
+ u32 max_gid_per_vf;
+};
+
+#define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448
+#define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64
+#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
+#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
+
+#define MAX_TQM_ALLOC_REQ 48
+#define MAX_TQM_ALLOC_BLK_SIZE 8
+struct bnxt_qplib_tqm_ctx {
+ struct bnxt_qplib_hwq pde;
+ u8 pde_level; /* Original level */
+ struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ];
+ u8 qcount[MAX_TQM_ALLOC_REQ];
+};
+
+struct bnxt_qplib_ctx {
+ u32 qpc_count;
+ struct bnxt_qplib_hwq qpc_tbl;
+ u32 mrw_count;
+ struct bnxt_qplib_hwq mrw_tbl;
+ u32 srqc_count;
+ struct bnxt_qplib_hwq srqc_tbl;
+ u32 cq_count;
+ struct bnxt_qplib_hwq cq_tbl;
+ struct bnxt_qplib_hwq tim_tbl;
+ struct bnxt_qplib_tqm_ctx tqm_ctx;
+ struct bnxt_qplib_stats stats;
+ struct bnxt_qplib_stats stats3;
+ struct bnxt_qplib_vf_res vf_res;
+};
+
+struct bnxt_qplib_res {
+ struct pci_dev *pdev;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_dev_attr *dattr;
+ struct net_device *netdev;
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_qplib_pd_tbl pd_tbl;
+ /* To protect the pd table bit map */
+ struct mutex pd_tbl_lock;
+ struct bnxt_qplib_sgid_tbl sgid_tbl;
+ struct bnxt_qplib_dpi_tbl dpi_tbl;
+ /* To protect the dpi table bit map */
+ struct mutex dpi_tbl_lock;
+ bool prio;
+ bool is_vf;
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+};
+
+static inline bool bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return (cctx->chip_num == CHIP_NUM_58818 ||
+ cctx->chip_num == CHIP_NUM_57608);
+}
+
+static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return (cctx->chip_num == CHIP_NUM_57508 ||
+ cctx->chip_num == CHIP_NUM_57504 ||
+ cctx->chip_num == CHIP_NUM_57502);
+}
+
+static inline bool bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return bnxt_qplib_is_chip_gen_p5(cctx) || bnxt_qplib_is_chip_gen_p7(cctx);
+}
+
+static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
+{
+ return bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
+ HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
+}
+
+static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return bnxt_qplib_is_chip_gen_p5_p7(cctx) ?
+ RING_ALLOC_REQ_RING_TYPE_NQ :
+ RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
+}
+
+static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq)
+{
+ u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ struct bnxt_qplib_pbl *pbl;
+
+ pbl = &hwq->pbl[PBL_LVL_0];
+ switch (pbl->pg_size) {
+ case ROCE_PG_SIZE_4K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ break;
+ case ROCE_PG_SIZE_8K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
+ break;
+ case ROCE_PG_SIZE_64K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
+ break;
+ case ROCE_PG_SIZE_2M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
+ break;
+ case ROCE_PG_SIZE_8M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
+ break;
+ case ROCE_PG_SIZE_1G:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
+ break;
+ default:
+ break;
+ }
+
+ return pg_size;
+}
+
+static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
+ u32 indx, u64 *pg)
+{
+ u32 pg_num, pg_idx;
+
+ pg_num = (indx / hwq->qe_ppg);
+ pg_idx = (indx % hwq->qe_ppg);
+ if (pg)
+ *pg = (u64)&hwq->pbl_ptr[pg_num];
+ return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
+}
+
+static inline void *bnxt_qplib_get_prod_qe(struct bnxt_qplib_hwq *hwq, u32 idx)
+{
+ idx += hwq->prod;
+ if (idx >= hwq->depth)
+ idx -= hwq->depth;
+ return bnxt_qplib_get_qe(hwq, idx, NULL);
+}
+
+#define to_bnxt_qplib(ptr, type, member) \
+ container_of(ptr, type, member)
+
+struct bnxt_qplib_pd;
+struct bnxt_qplib_dev_attr;
+
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq);
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr);
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pd *pd);
+int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pd_tbl *pd_tbl,
+ struct bnxt_qplib_pd *pd);
+int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi,
+ void *app, u8 type);
+int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi);
+void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
+int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
+void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
+int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
+void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
+
+int bnxt_qplib_determine_atomics(struct pci_dev *dev);
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats);
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats);
+
+static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
+ struct bnxt_qplib_hwq *hwq, u32 cnt)
+{
+ /* move prod and update toggle/epoch if wrap around */
+ hwq->prod += cnt;
+ if (hwq->prod >= hwq->depth) {
+ hwq->prod %= hwq->depth;
+ dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT;
+ }
+}
+
+static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt,
+ u32 *dbinfo_flags)
+{
+ /* move cons and update toggle/epoch if wrap around */
+ *cons += cnt;
+ if (*cons >= max_elements) {
+ *cons %= max_elements;
+ *dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT;
+ }
+}
+
+static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
+ bool arm)
+{
+ u32 key = 0;
+
+ key |= info->hwq->cons | (CMPL_DOORBELL_IDX_VALID |
+ (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
+ if (!arm)
+ key |= CMPL_DOORBELL_MASK;
+ writel(key, info->db);
+}
+
+#define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \
+ (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
+ (type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \
+ (((u32)(toggle)) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
+
+static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+ u32 indx;
+ u8 toggle = 0;
+
+ if (type == DBC_DBC_TYPE_CQ_ARMALL ||
+ type == DBC_DBC_TYPE_CQ_ARMSE)
+ toggle = info->toggle;
+
+ indx = (info->hwq->cons & DBC_DBC_INDEX_MASK) |
+ ((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) <<
+ BNXT_QPLIB_DB_EPOCH_CONS_SHIFT);
+
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle);
+ writeq(key, info->db);
+}
+
+static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+ u32 indx;
+
+ indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) |
+ ((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) <<
+ BNXT_QPLIB_DB_EPOCH_PROD_SHIFT));
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0);
+ writeq(key, info->db);
+}
+
+static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+ u8 toggle = 0;
+
+ if (type == DBC_DBC_TYPE_CQ_ARMENA || type == DBC_DBC_TYPE_SRQ_ARMENA)
+ toggle = info->toggle;
+ /* Index always at 0 */
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle);
+ writeq(key, info->priv_db);
+}
+
+static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info,
+ u32 th)
+{
+ u64 key = 0;
+
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, th, info->toggle);
+ writeq(key, info->priv_db);
+}
+
+static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
+ struct bnxt_qplib_chip_ctx *cctx,
+ bool arm)
+{
+ u32 type;
+
+ type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+ if (bnxt_qplib_is_chip_gen_p5_p7(cctx))
+ bnxt_qplib_ring_db(info, type);
+ else
+ bnxt_qplib_ring_db32(info, arm);
+}
+
+static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+}
+
+static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
+ u16 flags, bool virtfn)
+{
+ /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
+ return (_is_ext_stats_supported(flags) &&
+ ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
+}
+
+static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
+ CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
+}
+
+#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
+
+static inline bool _is_host_msn_table(u16 dev_cap_ext_flags2)
+{
+ return (dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK) ==
+ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE;
+}
+
+static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.dbr_pacing;
+}
+
+static inline u8 bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.roce_mirror;
+}
+
+static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
+{
+ return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
+}
+
+static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED;
+}
+
+static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
+}
+
+static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
+{
+ return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
+}
+
+static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
+}
+
+static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
+{
+ return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
+}
+
+#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
new file mode 100644
index 000000000000..9ef581ed785c
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -0,0 +1,1145 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Slow Path Operators
+ */
+
+#define dev_fmt(fmt) "QPLIB: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+
+#include "roce_hsi.h"
+
+#include "qplib_res.h"
+#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+#include "qplib_tlv.h"
+
+const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 } };
+
+/* Device */
+
+static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+{
+ u16 pcie_ctl2 = 0;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
+ return false;
+
+ pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
+ return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct creq_query_version_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_version req = {};
+ struct bnxt_qplib_dev_attr *attr;
+ int rc;
+
+ attr = rcfw->res->dattr;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_VERSION,
+ sizeof(req));
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return;
+ attr->fw_ver[0] = resp.fw_maj;
+ attr->fw_ver[1] = resp.fw_minor;
+ attr->fw_ver[2] = resp.fw_bld;
+ attr->fw_ver[3] = resp.fw_rsvd;
+}
+
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
+ struct creq_query_func_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct creq_query_func_resp_sb *sb;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct cmdq_query_func req = {};
+ u8 *tqm_alloc;
+ int i, rc;
+ u32 temp;
+
+ cctx = rcfw->res->cctx;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_FUNC,
+ sizeof(req));
+
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+
+ /* Extract the context from the side buffer */
+ attr->max_qp = le32_to_cpu(sb->max_qp);
+ /* max_qp value reported by FW doesn't include the QP1 */
+ attr->max_qp += 1;
+ attr->max_qp_rd_atom =
+ sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
+ attr->max_qp_init_rd_atom =
+ sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
+ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
+ /*
+ * 128 WQEs needs to be reserved for the HW (8916). Prevent
+ * reporting the max number on legacy devices
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ }
+
+ /* Adjust for max_qp_wqes for variable wqe */
+ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
+
+ attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
+ min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
+ attr->max_cq = le32_to_cpu(sb->max_cq);
+ attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
+ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
+ attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
+ attr->max_cq_sges = attr->max_qp_sges;
+ attr->max_mr = le32_to_cpu(sb->max_mr);
+ attr->max_mw = le32_to_cpu(sb->max_mw);
+
+ attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
+ attr->max_pd = 64 * 1024;
+ attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
+ attr->max_ah = le32_to_cpu(sb->max_ah);
+
+ attr->max_srq = le16_to_cpu(sb->max_srq);
+ attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
+ attr->max_srq_sges = sb->max_srq_sge;
+ attr->max_pkey = 1;
+ attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
+ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
+ attr->l2_db_size = (sb->l2_db_space_size + 1) *
+ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
+ /*
+ * Read the max gid supported by HW.
+ * For each entry in HW GID in HW table, we consume 2
+ * GID entries in the kernel GID table. So max_gid reported
+ * to stack can be up to twice the value reported by the HW, up to 256 gids.
+ */
+ attr->max_sgid = le32_to_cpu(sb->max_gid);
+ attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
+ attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+
+ if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
+ attr->max_srq += le16_to_cpu(sb->max_srq_ext);
+
+ for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
+ temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
+ tqm_alloc = (u8 *)&temp;
+ attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
+ attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
+ attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
+ attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
+ }
+
+ if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI)
+ attr->max_dpi = le32_to_cpu(sb->max_dpi);
+
+ attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct creq_set_func_resources_resp resp = {};
+ struct cmdq_set_func_resources req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES,
+ sizeof(req));
+
+ req.number_of_qp = cpu_to_le32(ctx->qpc_count);
+ req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
+ req.number_of_srq = cpu_to_le32(ctx->srqc_count);
+ req.number_of_cq = cpu_to_le32(ctx->cq_count);
+
+ req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
+ req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
+ req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
+ req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
+ req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc) {
+ dev_err(&res->pdev->dev, "Failed to set function resources\n");
+ }
+ return rc;
+}
+
+/* SGID */
+int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
+ struct bnxt_qplib_gid *gid)
+{
+ if (index >= sgid_tbl->max) {
+ dev_err(&res->pdev->dev,
+ "Index %d exceeded SGID table max (%d)\n",
+ index, sgid_tbl->max);
+ return -EINVAL;
+ }
+ memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
+ return 0;
+}
+
+int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
+{
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+ sgid_tbl);
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ int index;
+
+ /* Do we need a sgid_lock here? */
+ if (!sgid_tbl->active) {
+ dev_err(&res->pdev->dev, "SGID table has no active entries\n");
+ return -ENOMEM;
+ }
+ for (index = 0; index < sgid_tbl->max; index++) {
+ if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+ vlan_id == sgid_tbl->tbl[index].vlan_id)
+ break;
+ }
+ if (index == sgid_tbl->max) {
+ dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
+ return 0;
+ }
+ /* Remove GID from the SGID table */
+ if (update) {
+ struct creq_delete_gid_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_delete_gid req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DELETE_GID,
+ sizeof(req));
+ if (sgid_tbl->hw_id[index] == 0xFFFF) {
+ dev_err(&res->pdev->dev,
+ "GID entry contains an invalid HW id\n");
+ return -EINVAL;
+ }
+ req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ }
+ memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->tbl[index].vlan_id = 0xFFFF;
+ sgid_tbl->vlan[index] = 0;
+ sgid_tbl->active--;
+ dev_dbg(&res->pdev->dev,
+ "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
+ index, sgid_tbl->hw_id[index], sgid_tbl->active);
+ sgid_tbl->hw_id[index] = (u16)-1;
+
+ /* unlock */
+ return 0;
+}
+
+int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, const u8 *smac,
+ u16 vlan_id, bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id)
+{
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+ sgid_tbl);
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ int i, free_idx;
+
+ /* Do we need a sgid_lock here? */
+ if (sgid_tbl->active == sgid_tbl->max) {
+ dev_err(&res->pdev->dev, "SGID table is full\n");
+ return -ENOMEM;
+ }
+ free_idx = sgid_tbl->max;
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+ sgid_tbl->tbl[i].vlan_id == vlan_id) {
+ dev_dbg(&res->pdev->dev,
+ "SGID entry already exist in entry %d!\n", i);
+ *index = i;
+ return -EALREADY;
+ } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)) &&
+ free_idx == sgid_tbl->max) {
+ free_idx = i;
+ }
+ }
+ if (free_idx == sgid_tbl->max) {
+ dev_err(&res->pdev->dev,
+ "SGID table is FULL but count is not MAX??\n");
+ return -ENOMEM;
+ }
+ if (update) {
+ struct creq_add_gid_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_add_gid req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ADD_GID,
+ sizeof(req));
+
+ req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
+ req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
+ req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
+ req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
+ /*
+ * driver should ensure that all RoCE traffic is always VLAN
+ * tagged if RoCE traffic is running on non-zero VLAN ID or
+ * RoCE traffic is running on non-zero Priority.
+ */
+ if ((vlan_id != 0xFFFF) || res->prio) {
+ if (vlan_id != 0xFFFF)
+ req.vlan = cpu_to_le16
+ (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
+ req.vlan |= cpu_to_le16
+ (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
+ CMDQ_ADD_GID_VLAN_VLAN_EN);
+ }
+
+ /* MAC in network format */
+ req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
+ req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
+ req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+
+ req.stats_ctx = cpu_to_le16(CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID |
+ (u16)stats_ctx_id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
+ }
+ /* Add GID to the sgid_tbl */
+ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+ sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
+ sgid_tbl->active++;
+ if (vlan_id != 0xFFFF)
+ sgid_tbl->vlan[free_idx] = 1;
+
+ dev_dbg(&res->pdev->dev,
+ "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
+ free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
+
+ *index = free_idx;
+ /* unlock */
+ return 0;
+}
+
+/* AH */
+int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_create_ah_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_ah req = {};
+ u32 temp32[4];
+ u16 temp16[3];
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_AH,
+ sizeof(req));
+
+ memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
+ req.dgid[0] = cpu_to_le32(temp32[0]);
+ req.dgid[1] = cpu_to_le32(temp32[1]);
+ req.dgid[2] = cpu_to_le32(temp32[2]);
+ req.dgid[3] = cpu_to_le32(temp32[3]);
+
+ req.type = ah->nw_type;
+ req.hop_limit = ah->hop_limit;
+ req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
+ req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
+ CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
+ CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
+ req.pd_id = cpu_to_le32(ah->pd->id);
+ req.traffic_class = ah->traffic_class;
+
+ /* MAC in network format */
+ memcpy(temp16, ah->dmac, 6);
+ req.dest_mac[0] = cpu_to_le16(temp16[0]);
+ req.dest_mac[1] = cpu_to_le16(temp16[1]);
+ req.dest_mac[2] = cpu_to_le16(temp16[2]);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ ah->id = le32_to_cpu(resp.xid);
+ return 0;
+}
+
+int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_destroy_ah_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_ah req = {};
+ int rc;
+
+ /* Clean up the AH table in the device */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_AH,
+ sizeof(req));
+
+ req.ah_cid = cpu_to_le32(ah->id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ return rc;
+}
+
+/* MRW */
+int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
+{
+ struct creq_deallocate_key_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_deallocate_key req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ int rc;
+
+ if (mrw->lkey == 0xFFFFFFFF) {
+ dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n");
+ return 0;
+ }
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DEALLOCATE_KEY,
+ sizeof(req));
+
+ req.mrw_flags = mrw->type;
+
+ if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
+ req.key = cpu_to_le32(mrw->rkey);
+ else
+ req.key = cpu_to_le32(mrw->lkey);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ /* Free the qplib's MRW memory */
+ if (mrw->hwq.max_elements)
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
+
+ return 0;
+}
+
+int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_allocate_mrw_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_allocate_mrw req = {};
+ unsigned long tmp;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ALLOCATE_MRW,
+ sizeof(req));
+
+ req.pd_id = cpu_to_le32(mrw->pd->id);
+ req.mrw_flags = mrw->type;
+ if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
+ mrw->access_flags & BNXT_QPLIB_FR_PMR) ||
+ mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
+ mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
+ req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
+ tmp = (unsigned long)mrw;
+ req.mrw_handle = cpu_to_le64(tmp);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
+ mrw->rkey = le32_to_cpu(resp.xid);
+ else
+ mrw->lkey = le32_to_cpu(resp.xid);
+ return 0;
+}
+
+int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
+ bool block)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_deregister_mr_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_deregister_mr req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DEREGISTER_MR,
+ sizeof(req));
+
+ req.lkey = cpu_to_le32(mrw->lkey);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ /* Free the qplib's MR memory */
+ if (mrw->hwq.max_elements) {
+ mrw->va = 0;
+ mrw->total_size = 0;
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
+ }
+
+ return 0;
+}
+
+int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct creq_register_mr_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_register_mr req = {};
+ int pages, rc;
+ u32 pg_size;
+ u16 level;
+
+ if (num_pbls) {
+ pages = roundup_pow_of_two(num_pbls);
+ /* Allocate memory for the non-leaf pages to store buf ptrs.
+ * Non-leaf pages always uses system PAGE_SIZE
+ */
+ /* Free the hwq if it already exist, must be a rereg */
+ if (mr->hwq.max_elements)
+ bnxt_qplib_free_hwq(res, &mr->hwq);
+ hwq_attr.res = res;
+ hwq_attr.depth = pages;
+ hwq_attr.stride = sizeof(dma_addr_t);
+ hwq_attr.type = HWQ_TYPE_MR;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.sginfo->umem = umem;
+ hwq_attr.sginfo->npages = pages;
+ hwq_attr.sginfo->pgsize = buf_pg_size;
+ hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
+ rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
+ if (rc) {
+ dev_err(&res->pdev->dev,
+ "SP: Reg MR memory allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_REGISTER_MR,
+ sizeof(req));
+
+ /* Configure the request */
+ if (mr->hwq.level == PBL_LVL_MAX) {
+ /* No PBL provided, just use system PAGE_SIZE */
+ level = 0;
+ req.pbl = 0;
+ pg_size = PAGE_SIZE;
+ } else {
+ level = mr->hwq.level;
+ req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
+ }
+ pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
+ req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
+ ((ilog2(pg_size) <<
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
+ req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
+ req.access = (mr->access_flags & BNXT_QPLIB_MR_ACCESS_MASK);
+ req.va = cpu_to_le64(mr->va);
+ req.key = cpu_to_le32(mr->lkey);
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
+ req.key = cpu_to_le32(mr->pd->id);
+ req.flags = cpu_to_le16(mr->flags);
+ req.mr_size = cpu_to_le64(mr->total_size);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) {
+ mr->lkey = le32_to_cpu(resp.xid);
+ mr->rkey = mr->lkey;
+ }
+
+ return 0;
+
+fail:
+ if (mr->hwq.max_elements)
+ bnxt_qplib_free_hwq(res, &mr->hwq);
+ return rc;
+}
+
+int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl,
+ int max_pg_ptrs)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ int pg_ptrs, pages, rc;
+
+ /* Re-calculate the max to fit the HWQ allocation model */
+ pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
+ pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (!pages)
+ pages++;
+
+ if (pages > MAX_PBL_LVL_1_PGS)
+ return -ENOMEM;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.nopte = true;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = pg_ptrs;
+ hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr);
+ if (!rc)
+ frpl->max_pg_ptrs = pg_ptrs;
+
+ return rc;
+}
+
+int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl)
+{
+ bnxt_qplib_free_hwq(res, &frpl->hwq);
+ return 0;
+}
+
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats)
+{
+ struct creq_query_roce_stats_resp resp = {};
+ struct creq_query_roce_stats_resp_sb *sb;
+ struct cmdq_query_roce_stats req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_ROCE_STATS,
+ sizeof(req));
+
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
+ stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
+ stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
+ stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
+ stats->missing_resp = le64_to_cpu(sb->missing_resp);
+ stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
+ stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
+ stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
+ stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
+ stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
+ stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
+ stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
+ stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
+ stats->dup_req = le64_to_cpu(sb->dup_req);
+ stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
+ stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
+ stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
+ stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
+ stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
+ stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
+ stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
+ stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
+ stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
+ stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
+ stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
+ stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
+ stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
+ stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
+ stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
+ stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
+ stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
+ stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
+ stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
+ stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
+ stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
+ stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
+ stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
+ stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
+ stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+ if (!rcfw->init_oos_stats) {
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ rcfw->init_oos_stats = 1;
+ } else {
+ stats->res_oos_drop_count +=
+ (le64_to_cpu(sb->res_oos_drop_count) -
+ rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK;
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ }
+
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat)
+{
+ struct creq_query_roce_stats_ext_resp resp = {};
+ struct creq_query_roce_stats_ext_resp_sb *sb;
+ struct cmdq_query_roce_stats_ext req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ int rc;
+
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ sb = sbuf.sb;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS,
+ sizeof(req));
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ req.resp_addr = cpu_to_le64(sbuf.dma_addr);
+ if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.function_id = cpu_to_le32(fid);
+ req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+
+ estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
+ estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
+ estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
+ estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
+ estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
+ estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts);
+ estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes);
+ estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
+ estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
+ estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
+ estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
+ estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
+ estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts);
+ estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes);
+ estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
+ estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
+ estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
+ estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
+ estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts);
+ estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts);
+ estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts);
+
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req,
+ struct bnxt_qplib_cc_param_ext *cc_ext)
+{
+ ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask);
+ cc_ext->ext_mask = 0;
+ ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi);
+ ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp);
+ ext_req->init_cp = cpu_to_le16(cc_ext->init_cp);
+ ext_req->tr_update_mode = cc_ext->tr_update_mode;
+ ext_req->tr_update_cycles = cc_ext->tr_update_cyls;
+ ext_req->fr_num_rtts = cc_ext->fr_rtt;
+ ext_req->ai_rate_increase = cc_ext->ai_rate_incr;
+ ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th);
+ ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th);
+ ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th);
+ ext_req->bw_avg_weight = cc_ext->bw_avg_weight;
+ ext_req->actual_cr_factor = cc_ext->cr_factor;
+ ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp);
+ ext_req->cp_bias_en = cc_ext->cp_bias_en;
+ ext_req->cp_bias = cc_ext->cp_bias;
+ ext_req->cnp_ecn = cc_ext->cnp_ecn;
+ ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en;
+ ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec);
+ ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th);
+ ext_req->cr_width = cc_ext->cr_width;
+ ext_req->quota_period_min = cc_ext->min_quota;
+ ext_req->quota_period_max = cc_ext->max_quota;
+ ext_req->quota_period_abs_max = cc_ext->abs_max_quota;
+ ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb);
+ ext_req->cr_prob_factor = cc_ext->cr_prob_fac;
+ ext_req->tr_prob_factor = cc_ext->tr_prob_fac;
+ ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th);
+ ext_req->red_div = cc_ext->red_div;
+ ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th;
+ ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt);
+ ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio;
+ ext_req->use_rate_table = cc_ext->low_rate_en;
+ ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th);
+ ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1);
+ ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2);
+ ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th);
+ ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1);
+ ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2);
+ ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt);
+ ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes;
+}
+
+int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_modify_cc_req tlv_req = {};
+ struct creq_modify_roce_cc_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_modify_roce_cc *req;
+ int req_size;
+ void *cmd;
+ int rc;
+
+ /* Prepare the older base command */
+ req = &tlv_req.base_req;
+ cmd = req;
+ req_size = sizeof(*req);
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC,
+ sizeof(*req));
+ req->modify_mask = cpu_to_le32(cc_param->mask);
+ req->enable_cc = cc_param->enable;
+ req->g = cc_param->g;
+ req->num_phases_per_state = cc_param->nph_per_state;
+ req->time_per_phase = cc_param->time_pph;
+ req->pkts_per_phase = cc_param->pkts_pph;
+ req->init_cr = cpu_to_le16(cc_param->init_cr);
+ req->init_tr = cpu_to_le16(cc_param->init_tr);
+ req->tos_dscp_tos_ecn = (cc_param->tos_dscp << CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) |
+ (cc_param->tos_ecn & CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK);
+ req->alt_vlan_pcp = cc_param->alt_vlan_pcp;
+ req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp);
+ req->rtt = cpu_to_le16(cc_param->rtt);
+ req->tcp_cp = cpu_to_le16(cc_param->tcp_cp);
+ req->cc_mode = cc_param->cc_mode;
+ req->inactivity_th = cpu_to_le16(cc_param->inact_th);
+
+ /* For chip gen P5 onwards fill extended cmd and header */
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ struct roce_tlv *hdr;
+ u32 payload;
+ u32 chunks;
+
+ cmd = &tlv_req;
+ req_size = sizeof(tlv_req);
+ /* Prepare primary tlv header */
+ hdr = &tlv_req.tlv_hdr;
+ chunks = CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req));
+ payload = sizeof(struct cmdq_modify_roce_cc);
+ __roce_1st_tlv_prep(hdr, chunks, payload, true);
+ /* Prepare secondary tlv header */
+ hdr = (struct roce_tlv *)&tlv_req.ext_req;
+ payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) -
+ sizeof(struct roce_tlv);
+ __roce_ext_tlv_prep(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN1, payload, false, true);
+ bnxt_qplib_fill_cc_gen1(&tlv_req.ext_req, &cc_param->cc_ext);
+ }
+
+ bnxt_qplib_fill_cmdqmsg(&msg, cmd, &resp, NULL, req_size,
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ return rc;
+}
+
+int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 res_type,
+ u32 xid, u32 resp_size, void *resp_va)
+{
+ struct creq_read_context resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_read_context req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ int rc;
+
+ sbuf.size = resp_size;
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_READ_CONTEXT, sizeof(req));
+ req.resp_addr = cpu_to_le64(sbuf.dma_addr);
+ req.resp_size = resp_size / BNXT_QPLIB_CMDQE_UNITS;
+
+ req.xid = cpu_to_le32(xid);
+ req.type = res_type;
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto free_mem;
+
+ memcpy(resp_va, sbuf.sb, resp_size);
+free_mem:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
+{
+ cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
+ cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
+ cc_ext->init_cp = le16_to_cpu(sb->init_cp);
+ cc_ext->tr_update_mode = sb->tr_update_mode;
+ cc_ext->tr_update_cyls = sb->tr_update_cycles;
+ cc_ext->fr_rtt = sb->fr_num_rtts;
+ cc_ext->ai_rate_incr = sb->ai_rate_increase;
+ cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
+ cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
+ cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
+ cc_ext->bw_avg_weight = sb->bw_avg_weight;
+ cc_ext->cr_factor = sb->actual_cr_factor;
+ cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
+ cc_ext->cp_bias_en = sb->cp_bias_en;
+ cc_ext->cp_bias = sb->cp_bias;
+ cc_ext->cnp_ecn = sb->cnp_ecn;
+ cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
+ cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
+ cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
+ cc_ext->cr_width = sb->cr_width;
+ cc_ext->min_quota = sb->quota_period_min;
+ cc_ext->max_quota = sb->quota_period_max;
+ cc_ext->abs_max_quota = sb->quota_period_abs_max;
+ cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
+ cc_ext->cr_prob_fac = sb->cr_prob_factor;
+ cc_ext->tr_prob_fac = sb->tr_prob_factor;
+ cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
+ cc_ext->red_div = sb->red_div;
+ cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
+ cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
+ cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
+ cc_ext->low_rate_en = sb->use_rate_table;
+ cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
+ cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
+ cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
+ cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
+ cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
+ cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
+ cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
+ cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
+ cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
+}
+
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_roce_cc_resp resp = {};
+ struct creq_query_roce_cc_resp_sb *sb;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_roce_cc req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ size_t resp_size;
+ int rc;
+
+ /* Query the parameters from chip */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
+ sizeof(req));
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx))
+ resp_size = sizeof(*ext_sb);
+ else
+ resp_size = sizeof(*sb);
+
+ sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ if (rc)
+ goto out;
+
+ ext_sb = sbuf.sb;
+ sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
+ (struct creq_query_roce_cc_resp_sb *)ext_sb;
+
+ cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
+ cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
+ cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
+ cc_param->alt_tos_dscp = sb->alt_tos_dscp;
+ cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
+
+ cc_param->g = sb->g;
+ cc_param->nph_per_state = sb->num_phases_per_state;
+ cc_param->init_cr = le16_to_cpu(sb->init_cr);
+ cc_param->init_tr = le16_to_cpu(sb->init_tr);
+ cc_param->cc_mode = sb->cc_mode;
+ cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
+ cc_param->rtt = le16_to_cpu(sb->rtt);
+ cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
+ cc_param->time_pph = sb->time_per_phase;
+ cc_param->pkts_pph = sb->pkts_per_phase;
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb);
+ cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16;
+ }
+out:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE;
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
+
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
new file mode 100644
index 000000000000..147b5d9c0313
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -0,0 +1,373 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Slow Path Operators (header)
+ *
+ */
+
+#ifndef __BNXT_QPLIB_SP_H__
+#define __BNXT_QPLIB_SP_H__
+
+#include <rdma/bnxt_re-abi.h>
+#define BNXT_QPLIB_RESERVED_QP_WRS 128
+
+struct bnxt_qplib_dev_attr {
+#define FW_VER_ARR_LEN 4
+ u8 fw_ver[FW_VER_ARR_LEN];
+#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256
+ u16 max_sgid;
+ u16 max_mrw;
+ u32 max_qp;
+#define BNXT_QPLIB_MAX_OUT_RD_ATOM 126
+ u32 max_qp_rd_atom;
+ u32 max_qp_init_rd_atom;
+ u32 max_qp_wqes;
+ u32 max_qp_sges;
+ u32 max_cq;
+#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
+ u32 max_cq_wqes;
+ u32 max_cq_sges;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_pd;
+ u32 max_mw;
+ u32 max_raw_ethy_qp;
+ u32 max_ah;
+ u32 max_srq;
+ u32 max_srq_wqes;
+ u32 max_srq_sges;
+ u32 max_pkey;
+ u32 max_inline_data;
+ u32 l2_db_size;
+ u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
+ bool is_atomic;
+ u16 dev_cap_flags;
+ u16 dev_cap_flags2;
+ u32 max_dpi;
+};
+
+struct bnxt_qplib_pd {
+ u32 id;
+};
+
+struct bnxt_qplib_gid {
+ u8 data[16];
+};
+
+struct bnxt_qplib_gid_info {
+ struct bnxt_qplib_gid gid;
+ u16 vlan_id;
+};
+
+struct bnxt_qplib_ah {
+ struct bnxt_qplib_gid dgid;
+ struct bnxt_qplib_pd *pd;
+ u32 id;
+ u8 sgid_index;
+ /* For Query AH if the hw table and SW table are differnt */
+ u8 host_sgid_index;
+ u8 traffic_class;
+ u32 flow_label;
+ u8 hop_limit;
+ u8 sl;
+ u8 dmac[6];
+ u16 vlan_id;
+ u8 nw_type;
+};
+
+struct bnxt_qplib_mrw {
+ struct bnxt_qplib_pd *pd;
+ int type;
+ u32 access_flags;
+#define BNXT_QPLIB_MR_ACCESS_MASK 0xFF
+#define BNXT_QPLIB_FR_PMR 0x80000000
+ u32 lkey;
+ u32 rkey;
+#define BNXT_QPLIB_RSVD_LKEY 0xFFFFFFFF
+ u64 va;
+ u64 total_size;
+ u32 npages;
+ u16 flags;
+ u64 mr_handle;
+ struct bnxt_qplib_hwq hwq;
+};
+
+struct bnxt_qplib_frpl {
+ int max_pg_ptrs;
+ struct bnxt_qplib_hwq hwq;
+};
+
+#define BNXT_QPLIB_ACCESS_LOCAL_WRITE BIT(0)
+#define BNXT_QPLIB_ACCESS_REMOTE_READ BIT(1)
+#define BNXT_QPLIB_ACCESS_REMOTE_WRITE BIT(2)
+#define BNXT_QPLIB_ACCESS_REMOTE_ATOMIC BIT(3)
+#define BNXT_QPLIB_ACCESS_MW_BIND BIT(4)
+#define BNXT_QPLIB_ACCESS_ZERO_BASED BIT(5)
+#define BNXT_QPLIB_ACCESS_ON_DEMAND BIT(6)
+
+struct bnxt_qplib_roce_stats {
+ u64 to_retransmits;
+ u64 seq_err_naks_rcvd;
+ /* seq_err_naks_rcvd is 64 b */
+ u64 max_retry_exceeded;
+ /* max_retry_exceeded is 64 b */
+ u64 rnr_naks_rcvd;
+ /* rnr_naks_rcvd is 64 b */
+ u64 missing_resp;
+ u64 unrecoverable_err;
+ /* unrecoverable_err is 64 b */
+ u64 bad_resp_err;
+ /* bad_resp_err is 64 b */
+ u64 local_qp_op_err;
+ /* local_qp_op_err is 64 b */
+ u64 local_protection_err;
+ /* local_protection_err is 64 b */
+ u64 mem_mgmt_op_err;
+ /* mem_mgmt_op_err is 64 b */
+ u64 remote_invalid_req_err;
+ /* remote_invalid_req_err is 64 b */
+ u64 remote_access_err;
+ /* remote_access_err is 64 b */
+ u64 remote_op_err;
+ /* remote_op_err is 64 b */
+ u64 dup_req;
+ /* dup_req is 64 b */
+ u64 res_exceed_max;
+ /* res_exceed_max is 64 b */
+ u64 res_length_mismatch;
+ /* res_length_mismatch is 64 b */
+ u64 res_exceeds_wqe;
+ /* res_exceeds_wqe is 64 b */
+ u64 res_opcode_err;
+ /* res_opcode_err is 64 b */
+ u64 res_rx_invalid_rkey;
+ /* res_rx_invalid_rkey is 64 b */
+ u64 res_rx_domain_err;
+ /* res_rx_domain_err is 64 b */
+ u64 res_rx_no_perm;
+ /* res_rx_no_perm is 64 b */
+ u64 res_rx_range_err;
+ /* res_rx_range_err is 64 b */
+ u64 res_tx_invalid_rkey;
+ /* res_tx_invalid_rkey is 64 b */
+ u64 res_tx_domain_err;
+ /* res_tx_domain_err is 64 b */
+ u64 res_tx_no_perm;
+ /* res_tx_no_perm is 64 b */
+ u64 res_tx_range_err;
+ /* res_tx_range_err is 64 b */
+ u64 res_irrq_oflow;
+ /* res_irrq_oflow is 64 b */
+ u64 res_unsup_opcode;
+ /* res_unsup_opcode is 64 b */
+ u64 res_unaligned_atomic;
+ /* res_unaligned_atomic is 64 b */
+ u64 res_rem_inv_err;
+ /* res_rem_inv_err is 64 b */
+ u64 res_mem_error;
+ /* res_mem_error is 64 b */
+ u64 res_srq_err;
+ /* res_srq_err is 64 b */
+ u64 res_cmp_err;
+ /* res_cmp_err is 64 b */
+ u64 res_invalid_dup_rkey;
+ /* res_invalid_dup_rkey is 64 b */
+ u64 res_wqe_format_err;
+ /* res_wqe_format_err is 64 b */
+ u64 res_cq_load_err;
+ /* res_cq_load_err is 64 b */
+ u64 res_srq_load_err;
+ /* res_srq_load_err is 64 b */
+ u64 res_tx_pci_err;
+ /* res_tx_pci_err is 64 b */
+ u64 res_rx_pci_err;
+ /* res_rx_pci_err is 64 b */
+ u64 res_oos_drop_count;
+ /* res_oos_drop_count */
+ u64 active_qp_count_p0;
+ /* port 0 active qps */
+ u64 active_qp_count_p1;
+ /* port 1 active qps */
+ u64 active_qp_count_p2;
+ /* port 2 active qps */
+ u64 active_qp_count_p3;
+ /* port 3 active qps */
+};
+
+struct bnxt_qplib_ext_stat {
+ u64 tx_atomic_req;
+ u64 tx_read_req;
+ u64 tx_read_res;
+ u64 tx_write_req;
+ u64 tx_send_req;
+ u64 tx_roce_pkts;
+ u64 tx_roce_bytes;
+ u64 rx_atomic_req;
+ u64 rx_read_req;
+ u64 rx_read_res;
+ u64 rx_write_req;
+ u64 rx_send_req;
+ u64 rx_roce_pkts;
+ u64 rx_roce_bytes;
+ u64 rx_roce_good_pkts;
+ u64 rx_roce_good_bytes;
+ u64 rx_out_of_buffer;
+ u64 rx_out_of_sequence;
+ u64 tx_cnp;
+ u64 rx_cnp;
+ u64 rx_ecn_marked;
+};
+
+struct bnxt_qplib_cc_param_ext {
+ u64 ext_mask;
+ u16 inact_th_hi;
+ u16 min_delta_cnp;
+ u16 init_cp;
+ u8 tr_update_mode;
+ u8 tr_update_cyls;
+ u8 fr_rtt;
+ u8 ai_rate_incr;
+ u16 rr_rtt_th;
+ u16 ar_cr_th;
+ u16 cr_min_th;
+ u8 bw_avg_weight;
+ u8 cr_factor;
+ u16 cr_th_max_cp;
+ u8 cp_bias_en;
+ u8 cp_bias;
+ u8 cnp_ecn;
+ u8 rtt_jitter_en;
+ u16 bytes_per_usec;
+ u16 cc_cr_reset_th;
+ u8 cr_width;
+ u8 min_quota;
+ u8 max_quota;
+ u8 abs_max_quota;
+ u16 tr_lb;
+ u8 cr_prob_fac;
+ u8 tr_prob_fac;
+ u16 fair_cr_th;
+ u8 red_div;
+ u8 cnp_ratio_th;
+ u16 ai_ext_rtt;
+ u8 exp_crcp_ratio;
+ u8 low_rate_en;
+ u16 cpcr_update_th;
+ u16 ai_rtt_th1;
+ u16 ai_rtt_th2;
+ u16 cf_rtt_th;
+ u16 sc_cr_th1; /* severe congestion cr threshold 1 */
+ u16 sc_cr_th2; /* severe congestion cr threshold 2 */
+ u32 l64B_per_rtt;
+ u8 cc_ack_bytes;
+ u16 reduce_cf_rtt_th;
+};
+
+struct bnxt_qplib_cc_param {
+ u8 alt_vlan_pcp;
+ u8 qp1_tos_dscp;
+ u16 alt_tos_dscp;
+ u8 cc_mode;
+ u8 enable;
+ u16 inact_th;
+ u16 init_cr;
+ u16 init_tr;
+ u16 rtt;
+ u8 g;
+ u8 nph_per_state;
+ u8 time_pph;
+ u8 pkts_pph;
+ u8 tos_ecn;
+ u8 tos_dscp;
+ u16 tcp_cp;
+ struct bnxt_qplib_cc_param_ext cc_ext;
+ u32 mask;
+};
+
+int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
+ struct bnxt_qplib_gid *gid);
+int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
+int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id,
+ bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id);
+int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 gid_idx,
+ const u8 *smac);
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx);
+int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block);
+int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block);
+int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_mrw *mrw);
+int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
+ bool block);
+int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size);
+int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
+int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_mrw *mr, int max);
+int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl, int max);
+int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl);
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats);
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat);
+int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
+int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
+ u32 resp_size, void *resp_va);
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res);
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res);
+
+#define BNXT_VAR_MAX_WQE 4352
+#define BNXT_VAR_MAX_SLOT_ALIGN 256
+#define BNXT_VAR_MAX_SGE 13
+#define BNXT_RE_MAX_RQ_WQES 65536
+
+#define BNXT_STATIC_MAX_SGE 6
+
+#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_tlv.h b/drivers/infiniband/hw/bnxt_re/qplib_tlv.h
new file mode 100644
index 000000000000..ae96a75d7f31
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/qplib_tlv.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+#ifndef __QPLIB_TLV_H__
+#define __QPLIB_TLV_H__
+
+struct roce_tlv {
+ struct tlv tlv;
+ u8 total_size; // in units of 16 byte chunks
+ u8 unused[7]; // for 16 byte alignment
+};
+
+#define CHUNK_SIZE 16
+#define CHUNKS(x) (((x) + CHUNK_SIZE - 1) / CHUNK_SIZE)
+
+static inline void __roce_1st_tlv_prep(struct roce_tlv *rtlv, u8 tot_chunks,
+ u16 content_bytes, u8 flags)
+{
+ rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP);
+ rtlv->tlv.tlv_type = cpu_to_le16(TLV_TYPE_ROCE_SP_COMMAND);
+ rtlv->tlv.length = cpu_to_le16(content_bytes);
+ rtlv->tlv.flags = TLV_FLAGS_REQUIRED;
+ rtlv->tlv.flags |= flags ? TLV_FLAGS_MORE : 0;
+ rtlv->total_size = (tot_chunks);
+}
+
+static inline void __roce_ext_tlv_prep(struct roce_tlv *rtlv, u16 tlv_type,
+ u16 content_bytes, u8 more, u8 flags)
+{
+ rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP);
+ rtlv->tlv.tlv_type = cpu_to_le16(tlv_type);
+ rtlv->tlv.length = cpu_to_le16(content_bytes);
+ rtlv->tlv.flags |= more ? TLV_FLAGS_MORE : 0;
+ rtlv->tlv.flags |= flags ? TLV_FLAGS_REQUIRED : 0;
+}
+
+/*
+ * TLV size in units of 16 byte chunks
+ */
+#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16)
+/*
+ * TLV length in bytes
+ */
+#define TLV_BYTES (TLV_SIZE * 16)
+
+#define HAS_TLV_HEADER(msg) (le16_to_cpu(((struct tlv *)(msg))->cmd_discr) == CMD_DISCR_TLV_ENCAP)
+#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES])
+
+static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode;
+ else
+ return req->opcode;
+}
+
+static inline void __set_cmdq_base_opcode(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val;
+ else
+ req->opcode = val;
+}
+
+static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie;
+ else
+ return req->cookie;
+}
+
+static inline void __set_cmdq_base_cookie(struct cmdq_base *req,
+ u32 size, __le16 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val;
+ else
+ req->cookie = val;
+}
+
+static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr;
+ else
+ return req->resp_addr;
+}
+
+static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req,
+ u32 size, __le64 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val;
+ else
+ req->resp_addr = val;
+}
+
+static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size;
+ else
+ return req->resp_size;
+}
+
+static inline void __set_cmdq_base_resp_size(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val;
+ else
+ req->resp_size = val;
+}
+
+static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct roce_tlv *)(req))->total_size;
+ else
+ return req->cmd_size;
+}
+
+static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val;
+ else
+ req->cmd_size = val;
+}
+
+static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->flags;
+ else
+ return req->flags;
+}
+
+static inline void __set_cmdq_base_flags(struct cmdq_base *req,
+ u32 size, __le16 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->flags = val;
+ else
+ req->flags = val;
+}
+
+struct bnxt_qplib_tlv_modify_cc_req {
+ struct roce_tlv tlv_hdr;
+ struct cmdq_modify_roce_cc base_req;
+ __le64 tlvpad;
+ struct cmdq_modify_roce_cc_gen1_tlv ext_req;
+};
+
+struct bnxt_qplib_tlv_query_rcc_sb {
+ struct roce_tlv tlv_hdr;
+ struct creq_query_roce_cc_resp_sb base_sb;
+ struct creq_query_roce_cc_gen1_resp_sb_tlv gen1_sb;
+};
+#endif /* __QPLIB_TLV_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
new file mode 100644
index 000000000000..99ecd72e72e2
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -0,0 +1,4729 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: RoCE HSI File - Autogenerated
+ */
+
+#ifndef __BNXT_RE_HSI_H__
+#define __BNXT_RE_HSI_H__
+
+/* include linux/bnxt/hsi.h */
+#include <linux/bnxt/hsi.h>
+
+/* tx_doorbell (size:32b/4B) */
+struct tx_doorbell {
+ __le32 key_idx;
+ #define TX_DOORBELL_IDX_MASK 0xffffffUL
+ #define TX_DOORBELL_IDX_SFT 0
+ #define TX_DOORBELL_KEY_MASK 0xf0000000UL
+ #define TX_DOORBELL_KEY_SFT 28
+ #define TX_DOORBELL_KEY_TX (0x0UL << 28)
+ #define TX_DOORBELL_KEY_LAST TX_DOORBELL_KEY_TX
+};
+
+/* rx_doorbell (size:32b/4B) */
+struct rx_doorbell {
+ __le32 key_idx;
+ #define RX_DOORBELL_IDX_MASK 0xffffffUL
+ #define RX_DOORBELL_IDX_SFT 0
+ #define RX_DOORBELL_KEY_MASK 0xf0000000UL
+ #define RX_DOORBELL_KEY_SFT 28
+ #define RX_DOORBELL_KEY_RX (0x1UL << 28)
+ #define RX_DOORBELL_KEY_LAST RX_DOORBELL_KEY_RX
+};
+
+/* cmpl_doorbell (size:32b/4B) */
+struct cmpl_doorbell {
+ __le32 key_mask_valid_idx;
+ #define CMPL_DOORBELL_IDX_MASK 0xffffffUL
+ #define CMPL_DOORBELL_IDX_SFT 0
+ #define CMPL_DOORBELL_IDX_VALID 0x4000000UL
+ #define CMPL_DOORBELL_MASK 0x8000000UL
+ #define CMPL_DOORBELL_KEY_MASK 0xf0000000UL
+ #define CMPL_DOORBELL_KEY_SFT 28
+ #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28)
+ #define CMPL_DOORBELL_KEY_LAST CMPL_DOORBELL_KEY_CMPL
+};
+
+/* status_doorbell (size:32b/4B) */
+struct status_doorbell {
+ __le32 key_idx;
+ #define STATUS_DOORBELL_IDX_MASK 0xffffffUL
+ #define STATUS_DOORBELL_IDX_SFT 0
+ #define STATUS_DOORBELL_KEY_MASK 0xf0000000UL
+ #define STATUS_DOORBELL_KEY_SFT 28
+ #define STATUS_DOORBELL_KEY_STAT (0x3UL << 28)
+ #define STATUS_DOORBELL_KEY_LAST STATUS_DOORBELL_KEY_STAT
+};
+
+/* cmdq_init (size:128b/16B) */
+struct cmdq_init {
+ __le64 cmdq_pbl;
+ __le16 cmdq_size_cmdq_lvl;
+ #define CMDQ_INIT_CMDQ_LVL_MASK 0x3UL
+ #define CMDQ_INIT_CMDQ_LVL_SFT 0
+ #define CMDQ_INIT_CMDQ_SIZE_MASK 0xfffcUL
+ #define CMDQ_INIT_CMDQ_SIZE_SFT 2
+ __le16 creq_ring_id;
+ __le32 prod_idx;
+};
+
+/* cmdq_base (size:128b/16B) */
+struct cmdq_base {
+ u8 opcode;
+ #define CMDQ_BASE_OPCODE_CREATE_QP 0x1UL
+ #define CMDQ_BASE_OPCODE_DESTROY_QP 0x2UL
+ #define CMDQ_BASE_OPCODE_MODIFY_QP 0x3UL
+ #define CMDQ_BASE_OPCODE_QUERY_QP 0x4UL
+ #define CMDQ_BASE_OPCODE_CREATE_SRQ 0x5UL
+ #define CMDQ_BASE_OPCODE_DESTROY_SRQ 0x6UL
+ #define CMDQ_BASE_OPCODE_QUERY_SRQ 0x8UL
+ #define CMDQ_BASE_OPCODE_CREATE_CQ 0x9UL
+ #define CMDQ_BASE_OPCODE_DESTROY_CQ 0xaUL
+ #define CMDQ_BASE_OPCODE_RESIZE_CQ 0xcUL
+ #define CMDQ_BASE_OPCODE_ALLOCATE_MRW 0xdUL
+ #define CMDQ_BASE_OPCODE_DEALLOCATE_KEY 0xeUL
+ #define CMDQ_BASE_OPCODE_REGISTER_MR 0xfUL
+ #define CMDQ_BASE_OPCODE_DEREGISTER_MR 0x10UL
+ #define CMDQ_BASE_OPCODE_ADD_GID 0x11UL
+ #define CMDQ_BASE_OPCODE_DELETE_GID 0x12UL
+ #define CMDQ_BASE_OPCODE_MODIFY_GID 0x17UL
+ #define CMDQ_BASE_OPCODE_QUERY_GID 0x18UL
+ #define CMDQ_BASE_OPCODE_CREATE_QP1 0x13UL
+ #define CMDQ_BASE_OPCODE_DESTROY_QP1 0x14UL
+ #define CMDQ_BASE_OPCODE_CREATE_AH 0x15UL
+ #define CMDQ_BASE_OPCODE_DESTROY_AH 0x16UL
+ #define CMDQ_BASE_OPCODE_INITIALIZE_FW 0x80UL
+ #define CMDQ_BASE_OPCODE_DEINITIALIZE_FW 0x81UL
+ #define CMDQ_BASE_OPCODE_STOP_FUNC 0x82UL
+ #define CMDQ_BASE_OPCODE_QUERY_FUNC 0x83UL
+ #define CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES 0x84UL
+ #define CMDQ_BASE_OPCODE_READ_CONTEXT 0x85UL
+ #define CMDQ_BASE_OPCODE_VF_BACKCHANNEL_REQUEST 0x86UL
+ #define CMDQ_BASE_OPCODE_READ_VF_MEMORY 0x87UL
+ #define CMDQ_BASE_OPCODE_COMPLETE_VF_REQUEST 0x88UL
+ #define CMDQ_BASE_OPCODE_EXTEND_CONTEXT_ARRRAY 0x89UL
+ #define CMDQ_BASE_OPCODE_MAP_TC_TO_COS 0x8aUL
+ #define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL
+ #define CMDQ_BASE_OPCODE_MODIFY_ROCE_CC 0x8cUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL
+ #define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL
+ #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_base (size:128b/16B) */
+struct creq_base {
+ u8 type;
+ #define CREQ_BASE_TYPE_MASK 0x3fUL
+ #define CREQ_BASE_TYPE_SFT 0
+ #define CREQ_BASE_TYPE_QP_EVENT 0x38UL
+ #define CREQ_BASE_TYPE_FUNC_EVENT 0x3aUL
+ #define CREQ_BASE_TYPE_LAST CREQ_BASE_TYPE_FUNC_EVENT
+ u8 reserved56[7];
+ u8 v;
+ #define CREQ_BASE_V 0x1UL
+ u8 event;
+ u8 reserved48[6];
+};
+
+/* cmdq_query_version (size:128b/16B) */
+struct cmdq_query_version {
+ u8 opcode;
+ #define CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION 0x8bUL
+ #define CMDQ_QUERY_VERSION_OPCODE_LAST CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_query_version_resp (size:128b/16B) */
+struct creq_query_version_resp {
+ u8 type;
+ #define CREQ_QUERY_VERSION_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_VERSION_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_VERSION_RESP_TYPE_LAST CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ u8 fw_maj;
+ u8 fw_minor;
+ u8 fw_bld;
+ u8 fw_rsvd;
+ u8 v;
+ #define CREQ_QUERY_VERSION_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION 0x8bUL
+ #define CREQ_QUERY_VERSION_RESP_EVENT_LAST \
+ CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION
+ __le16 reserved16;
+ u8 intf_maj;
+ u8 intf_minor;
+ u8 intf_bld;
+ u8 intf_rsvd;
+};
+
+/* cmdq_initialize_fw (size:896b/112B) */
+struct cmdq_initialize_fw {
+ u8 opcode;
+ #define CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW 0x80UL
+ #define CMDQ_INITIALIZE_FW_OPCODE_LAST CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_INITIALIZE_FW_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED 0x80UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 qpc_pg_size_qpc_lvl;
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LAST CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_LAST CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G
+ u8 mrw_pg_size_mrw_lvl;
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LAST CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_LAST CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LAST CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LAST CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G
+ u8 tqm_pg_size_tqm_lvl;
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LAST CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LAST CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G
+ __le16 log2_dbr_pg_size;
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
+ CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
+ #define CMDQ_INITIALIZE_FW_RSVD_MASK 0xfff0UL
+ #define CMDQ_INITIALIZE_FW_RSVD_SFT 4
+ __le64 qpc_page_dir;
+ __le64 mrw_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 tqm_page_dir;
+ __le64 tim_page_dir;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
+};
+
+/* creq_initialize_fw_resp (size:128b/16B) */
+struct creq_initialize_fw_resp {
+ u8 type;
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_SFT 0
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_LAST CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_INITIALIZE_FW_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW 0x80UL
+ #define CREQ_INITIALIZE_FW_RESP_EVENT_LAST \
+ CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW
+ u8 reserved48[6];
+};
+
+/* cmdq_deinitialize_fw (size:128b/16B) */
+struct cmdq_deinitialize_fw {
+ u8 opcode;
+ #define CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW 0x81UL
+ #define CMDQ_DEINITIALIZE_FW_OPCODE_LAST \
+ CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_deinitialize_fw_resp (size:128b/16B) */
+struct creq_deinitialize_fw_resp {
+ u8 type;
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_SFT 0
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_LAST CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_DEINITIALIZE_FW_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW 0x81UL
+ #define CREQ_DEINITIALIZE_FW_RESP_EVENT_LAST \
+ CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW
+ u8 reserved48[6];
+};
+
+/* cmdq_create_qp (size:832b/104B) */
+struct cmdq_create_qp {
+ u8 opcode;
+ #define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL
+ #define CMDQ_CREATE_QP_OPCODE_LAST CMDQ_CREATE_QP_OPCODE_CREATE_QP
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 qp_handle;
+ __le32 qp_flags;
+ #define CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED 0x1UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION 0x2UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID 0x200UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_LAST \
+ CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED
+ u8 type;
+ #define CMDQ_CREATE_QP_TYPE_RC 0x2UL
+ #define CMDQ_CREATE_QP_TYPE_UD 0x4UL
+ #define CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE 0x6UL
+ #define CMDQ_CREATE_QP_TYPE_GSI 0x7UL
+ #define CMDQ_CREATE_QP_TYPE_LAST CMDQ_CREATE_QP_TYPE_GSI
+ u8 sq_pg_size_sq_lvl;
+ #define CMDQ_CREATE_QP_SQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP_SQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP_SQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP_SQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP_SQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP_SQ_LVL_LAST CMDQ_CREATE_QP_SQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_LAST CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G
+ u8 rq_pg_size_rq_lvl;
+ #define CMDQ_CREATE_QP_RQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP_RQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP_RQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP_RQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP_RQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP_RQ_LVL_LAST CMDQ_CREATE_QP_RQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_LAST CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G
+ u8 unused_0;
+ __le32 dpi;
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_fwo_sq_sge;
+ #define CMDQ_CREATE_QP_SQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP_SQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP_SQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP_SQ_FWO_SFT 4
+ __le16 rq_fwo_rq_sge;
+ #define CMDQ_CREATE_QP_RQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP_RQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP_RQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP_RQ_FWO_SFT 4
+ __le32 scq_cid;
+ __le32 rcq_cid;
+ __le32 srq_cid;
+ __le32 pd_id;
+ __le64 sq_pbl;
+ __le64 rq_pbl;
+ __le64 irrq_addr;
+ __le64 orrq_addr;
+ __le32 request_xid;
+ __le16 steering_tag;
+ __le16 reserved16;
+};
+
+/* creq_create_qp_resp (size:128b/16B) */
+struct creq_create_qp_resp {
+ u8 type;
+ #define CREQ_CREATE_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_QP_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_QP_RESP_TYPE_LAST CREQ_CREATE_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_QP_RESP_EVENT_CREATE_QP 0x1UL
+ #define CREQ_CREATE_QP_RESP_EVENT_LAST CREQ_CREATE_QP_RESP_EVENT_CREATE_QP
+ u8 optimized_transmit_enabled;
+ u8 reserved48[5];
+};
+
+/* cmdq_destroy_qp (size:192b/24B) */
+struct cmdq_destroy_qp {
+ u8 opcode;
+ #define CMDQ_DESTROY_QP_OPCODE_DESTROY_QP 0x2UL
+ #define CMDQ_DESTROY_QP_OPCODE_LAST CMDQ_DESTROY_QP_OPCODE_DESTROY_QP
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 qp_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_qp_resp (size:128b/16B) */
+struct creq_destroy_qp_resp {
+ u8 type;
+ #define CREQ_DESTROY_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_QP_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_QP_RESP_TYPE_LAST CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP 0x2UL
+ #define CREQ_DESTROY_QP_RESP_EVENT_LAST CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP
+ u8 reserved48[6];
+};
+
+/* cmdq_modify_qp (size:1024b/128B) */
+struct cmdq_modify_qp {
+ u8 opcode;
+ #define CMDQ_MODIFY_QP_OPCODE_MODIFY_QP 0x3UL
+ #define CMDQ_MODIFY_QP_OPCODE_LAST CMDQ_MODIFY_QP_OPCODE_MODIFY_QP
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_MODIFY_QP_FLAGS_SRQ_USED 0x1UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 qp_type;
+ #define CMDQ_MODIFY_QP_QP_TYPE_RC 0x2UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_UD 0x4UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_RAW_ETHERTYPE 0x6UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_GSI 0x7UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_LAST CMDQ_MODIFY_QP_QP_TYPE_GSI
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_STATE 0x1UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY 0x2UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS 0x4UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_PKEY 0x8UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_QKEY 0x10UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_DGID 0x20UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL 0x40UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX 0x80UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT 0x100UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS 0x200UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC 0x400UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_PINGPONG_PUSH_MODE 0x800UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU 0x1000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT 0x2000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT 0x4000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY 0x8000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN 0x10000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC 0x20000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER 0x40000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN 0x80000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC 0x100000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE 0x200000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE 0x400000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE 0x800000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE 0x1000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA 0x2000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID 0x4000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC 0x8000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID 0x10000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC 0x20000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN 0x40000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP 0x80000000UL
+ __le32 qp_cid;
+ u8 network_type_en_sqd_async_notify_new_state;
+ #define CMDQ_MODIFY_QP_NEW_STATE_MASK 0xfUL
+ #define CMDQ_MODIFY_QP_NEW_STATE_SFT 0
+ #define CMDQ_MODIFY_QP_NEW_STATE_RESET 0x0UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_INIT 0x1UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_RTR 0x2UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_RTS 0x3UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_SQD 0x4UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_SQE 0x5UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_ERR 0x6UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_LAST CMDQ_MODIFY_QP_NEW_STATE_ERR
+ #define CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY 0x10UL
+ #define CMDQ_MODIFY_QP_UNUSED1 0x20UL
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_MASK 0xc0UL
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_SFT 6
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1 (0x0UL << 6)
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 (0x2UL << 6)
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 (0x3UL << 6)
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_LAST CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6
+ u8 access;
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK 0xffUL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT 0
+ #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL
+ __le16 pkey;
+ __le32 qkey;
+ __le32 dgid[4];
+ __le32 flow_label;
+ __le16 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
+ __le16 dest_mac[3];
+ u8 tos_dscp_tos_ecn;
+ #define CMDQ_MODIFY_QP_TOS_ECN_MASK 0x3UL
+ #define CMDQ_MODIFY_QP_TOS_ECN_SFT 0
+ #define CMDQ_MODIFY_QP_TOS_DSCP_MASK 0xfcUL
+ #define CMDQ_MODIFY_QP_TOS_DSCP_SFT 2
+ u8 path_mtu_pingpong_push_enable;
+ #define CMDQ_MODIFY_QP_PINGPONG_PUSH_ENABLE 0x1UL
+ #define CMDQ_MODIFY_QP_UNUSED3_MASK 0xeUL
+ #define CMDQ_MODIFY_QP_UNUSED3_SFT 1
+ #define CMDQ_MODIFY_QP_PATH_MTU_MASK 0xf0UL
+ #define CMDQ_MODIFY_QP_PATH_MTU_SFT 4
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_256 (0x0UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_512 (0x1UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_1024 (0x2UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_2048 (0x3UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_4096 (0x4UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_8192 (0x5UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_LAST CMDQ_MODIFY_QP_PATH_MTU_MTU_8192
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 min_rnr_timer;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ __le16 enable_cc;
+ #define CMDQ_MODIFY_QP_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_QP_UNUSED15_MASK 0xfffeUL
+ #define CMDQ_MODIFY_QP_UNUSED15_SFT 1
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_sge;
+ __le16 rq_sge;
+ __le32 max_inline_data;
+ __le32 dest_qp_id;
+ __le32 pingpong_push_dpi;
+ __le16 src_mac[3];
+ __le16 vlan_pcp_vlan_dei_vlan_id;
+ #define CMDQ_MODIFY_QP_VLAN_ID_MASK 0xfffUL
+ #define CMDQ_MODIFY_QP_VLAN_ID_SFT 0
+ #define CMDQ_MODIFY_QP_VLAN_DEI 0x1000UL
+ #define CMDQ_MODIFY_QP_VLAN_PCP_MASK 0xe000UL
+ #define CMDQ_MODIFY_QP_VLAN_PCP_SFT 13
+ __le64 irrq_addr;
+ __le64 orrq_addr;
+ __le32 ext_modify_mask;
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_EXT_STATS_CTX 0x1UL
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_SCHQ_ID_VALID 0x2UL
+ __le32 ext_stats_ctx_id;
+ __le16 schq_id;
+ __le16 unused_0;
+ __le32 reserved32;
+};
+
+/* creq_modify_qp_resp (size:128b/16B) */
+struct creq_modify_qp_resp {
+ u8 type;
+ #define CREQ_MODIFY_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MODIFY_QP_RESP_TYPE_SFT 0
+ #define CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MODIFY_QP_RESP_TYPE_LAST CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_MODIFY_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP 0x3UL
+ #define CREQ_MODIFY_QP_RESP_EVENT_LAST CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP
+ u8 pingpong_push_state_index_enabled;
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED 0x1UL
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_MASK 0xeUL
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_SFT 1
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_STATE 0x10UL
+ u8 reserved8;
+ __le32 lag_src_mac;
+};
+
+/* cmdq_query_qp (size:192b/24B) */
+struct cmdq_query_qp {
+ u8 opcode;
+ #define CMDQ_QUERY_QP_OPCODE_QUERY_QP 0x4UL
+ #define CMDQ_QUERY_QP_OPCODE_LAST CMDQ_QUERY_QP_OPCODE_QUERY_QP
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 qp_cid;
+ __le32 unused_0;
+};
+
+/* creq_query_qp_resp (size:128b/16B) */
+struct creq_query_qp_resp {
+ u8 type;
+ #define CREQ_QUERY_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_QP_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_QP_RESP_TYPE_LAST CREQ_QUERY_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_QP_RESP_EVENT_QUERY_QP 0x4UL
+ #define CREQ_QUERY_QP_RESP_EVENT_LAST CREQ_QUERY_QP_RESP_EVENT_QUERY_QP
+ u8 reserved48[6];
+};
+
+/* creq_query_qp_resp_sb (size:832b/104B) */
+struct creq_query_qp_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP 0x4UL
+ #define CREQ_QUERY_QP_RESP_SB_OPCODE_LAST CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ u8 en_sqd_async_notify_state;
+ #define CREQ_QUERY_QP_RESP_SB_STATE_MASK 0xfUL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_STATE_RESET 0x0UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_INIT 0x1UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_RTR 0x2UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_RTS 0x3UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_SQD 0x4UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_SQE 0x5UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_ERR 0x6UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_LAST CREQ_QUERY_QP_RESP_SB_STATE_ERR
+ #define CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY 0x10UL
+ #define CREQ_QUERY_QP_RESP_SB_UNUSED3_MASK 0xe0UL
+ #define CREQ_QUERY_QP_RESP_SB_UNUSED3_SFT 5
+ u8 access;
+ #define \
+ CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK\
+ 0xffUL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT\
+ 0
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_LOCAL_WRITE 0x1UL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_WRITE 0x2UL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_READ 0x4UL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL
+ __le16 pkey;
+ __le32 qkey;
+ __le16 udp_src_port;
+ __le16 reserved16;
+ __le32 dgid[4];
+ __le32 flow_label;
+ __le16 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
+ __le16 dest_mac[3];
+ __le16 path_mtu_dest_vlan_id;
+ #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_MASK 0xfffUL
+ #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK 0xf000UL
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT 12
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_256 (0x0UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_512 (0x1UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_1024 (0x2UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_2048 (0x3UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_4096 (0x4UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192 (0x5UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_LAST CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 min_rnr_timer;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ u8 tos_dscp_tos_ecn;
+ #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_MASK 0x3UL
+ #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_MASK 0xfcUL
+ #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_SFT 2
+ u8 enable_cc;
+ #define CREQ_QUERY_QP_RESP_SB_ENABLE_CC 0x1UL
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_sge;
+ __le16 rq_sge;
+ __le32 max_inline_data;
+ __le32 dest_qp_id;
+ __le16 port_id;
+ u8 unused_0;
+ u8 stat_collection_id;
+ __le16 src_mac[3];
+ __le16 vlan_pcp_vlan_dei_vlan_id;
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK 0xfffUL
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_DEI 0x1000UL
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_MASK 0xe000UL
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_SFT 13
+};
+
+/* cmdq_query_qp_extend (size:192b/24B) */
+struct cmdq_query_qp_extend {
+ u8 opcode;
+ #define CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CMDQ_QUERY_QP_EXTEND_OPCODE_LAST CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 num_qps;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_QP_EXTEND_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_QP_EXTEND_PF_NUM_SFT 0
+ #define CMDQ_QUERY_QP_EXTEND_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_QP_EXTEND_VF_NUM_SFT 8
+ #define CMDQ_QUERY_QP_EXTEND_VF_VALID 0x1000000UL
+ __le32 current_index;
+};
+
+/* creq_query_qp_extend_resp (size:128b/16B) */
+struct creq_query_qp_extend_resp {
+ u8 type;
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_QP_EXTEND_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_LAST CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND
+ __le16 reserved16;
+ __le32 current_index;
+};
+
+/* creq_query_qp_extend_resp_sb (size:384b/48B) */
+struct creq_query_qp_extend_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ u8 state;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_MASK 0xfUL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SFT 0
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RESET 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_INIT 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTR 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTS 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQD 0x4UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQE 0x5UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR 0x6UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_MASK 0xf0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_SFT 4
+ u8 reserved_8;
+ __le16 port_id;
+ __le32 qkey;
+ __le16 sgid_index;
+ u8 network_type;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV1 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV4 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_LAST \
+ CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6
+ u8 unused_0;
+ __le32 dgid[4];
+ __le32 dest_qp_id;
+ u8 stat_collection_id;
+ u8 reservred_8;
+ __le16 reserved_16;
+};
+
+/* creq_query_qp_extend_resp_sb_tlv (size:512b/64B) */
+struct creq_query_qp_extend_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST \
+ CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ u8 total_size;
+ u8 reserved56[7];
+ u8 opcode;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_LAST \
+ CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ u8 state;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_MASK 0xfUL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SFT 0
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RESET 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_INIT 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTR 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTS 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQD 0x4UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQE 0x5UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR 0x6UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_LAST \
+ CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_MASK 0xf0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_SFT 4
+ u8 reserved_8;
+ __le16 port_id;
+ __le32 qkey;
+ __le16 sgid_index;
+ u8 network_type;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV1 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV4 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_LAST \
+ CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6
+ u8 unused_0;
+ __le32 dgid[4];
+ __le32 dest_qp_id;
+ u8 stat_collection_id;
+ u8 reservred_8;
+ __le16 reserved_16;
+};
+
+/* cmdq_create_srq (size:448b/56B) */
+struct cmdq_create_srq {
+ u8 opcode;
+ #define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL
+ #define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 srq_handle;
+ __le16 pg_size_lvl;
+ #define CMDQ_CREATE_SRQ_LVL_MASK 0x3UL
+ #define CMDQ_CREATE_SRQ_LVL_SFT 0
+ #define CMDQ_CREATE_SRQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_SRQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_SRQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_SRQ_LVL_LAST CMDQ_CREATE_SRQ_LVL_LVL_2
+ #define CMDQ_CREATE_SRQ_PG_SIZE_MASK 0x1cUL
+ #define CMDQ_CREATE_SRQ_PG_SIZE_SFT 2
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_LAST CMDQ_CREATE_SRQ_PG_SIZE_PG_1G
+ #define CMDQ_CREATE_SRQ_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_CREATE_SRQ_UNUSED11_SFT 5
+ __le16 eventq_id;
+ #define CMDQ_CREATE_SRQ_EVENTQ_ID_MASK 0xfffUL
+ #define CMDQ_CREATE_SRQ_EVENTQ_ID_SFT 0
+ #define CMDQ_CREATE_SRQ_UNUSED4_MASK 0xf000UL
+ #define CMDQ_CREATE_SRQ_UNUSED4_SFT 12
+ __le16 srq_size;
+ __le16 srq_fwo;
+ __le32 dpi;
+ __le32 pd_id;
+ __le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[6];
+};
+
+/* creq_create_srq_resp (size:128b/16B) */
+struct creq_create_srq_resp {
+ u8 type;
+ #define CREQ_CREATE_SRQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_SRQ_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_SRQ_RESP_TYPE_LAST CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_SRQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ 0x5UL
+ #define CREQ_CREATE_SRQ_RESP_EVENT_LAST CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ
+ u8 reserved48[6];
+};
+
+/* cmdq_destroy_srq (size:192b/24B) */
+struct cmdq_destroy_srq {
+ u8 opcode;
+ #define CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ 0x6UL
+ #define CMDQ_DESTROY_SRQ_OPCODE_LAST CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 srq_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_srq_resp (size:128b/16B) */
+struct creq_destroy_srq_resp {
+ u8 type;
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_LAST CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_SRQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ 0x6UL
+ #define CREQ_DESTROY_SRQ_RESP_EVENT_LAST CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ
+ __le16 enable_for_arm[3];
+ #define CREQ_DESTROY_SRQ_RESP_UNUSED0_MASK 0xffffUL
+ #define CREQ_DESTROY_SRQ_RESP_UNUSED0_SFT 0
+ #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_MASK 0x30000UL
+ #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_SFT 16
+};
+
+/* cmdq_query_srq (size:192b/24B) */
+struct cmdq_query_srq {
+ u8 opcode;
+ #define CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ 0x8UL
+ #define CMDQ_QUERY_SRQ_OPCODE_LAST CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 srq_cid;
+ __le32 unused_0;
+};
+
+/* creq_query_srq_resp (size:128b/16B) */
+struct creq_query_srq_resp {
+ u8 type;
+ #define CREQ_QUERY_SRQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_SRQ_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_SRQ_RESP_TYPE_LAST CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_SRQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ 0x8UL
+ #define CREQ_QUERY_SRQ_RESP_EVENT_LAST CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ
+ u8 reserved48[6];
+};
+
+/* creq_query_srq_resp_sb (size:256b/32B) */
+struct creq_query_srq_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ 0x8UL
+ #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_LAST CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ __le16 srq_limit;
+ __le16 reserved16;
+ __le32 data[4];
+};
+
+/* cmdq_create_cq (size:448b/56B) */
+struct cmdq_create_cq {
+ u8 opcode;
+ #define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL
+ #define CMDQ_CREATE_CQ_OPCODE_LAST CMDQ_CREATE_CQ_OPCODE_CREATE_CQ
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL
+ #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL
+ #define CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID 0x8UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 cq_handle;
+ __le32 pg_size_lvl;
+ #define CMDQ_CREATE_CQ_LVL_MASK 0x3UL
+ #define CMDQ_CREATE_CQ_LVL_SFT 0
+ #define CMDQ_CREATE_CQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_CQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_CQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_CQ_LVL_LAST CMDQ_CREATE_CQ_LVL_LVL_2
+ #define CMDQ_CREATE_CQ_PG_SIZE_MASK 0x1cUL
+ #define CMDQ_CREATE_CQ_PG_SIZE_SFT 2
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_LAST CMDQ_CREATE_CQ_PG_SIZE_PG_1G
+ #define CMDQ_CREATE_CQ_UNUSED27_MASK 0xffffffe0UL
+ #define CMDQ_CREATE_CQ_UNUSED27_SFT 5
+ __le32 cq_fco_cnq_id;
+ #define CMDQ_CREATE_CQ_CNQ_ID_MASK 0xfffUL
+ #define CMDQ_CREATE_CQ_CNQ_ID_SFT 0
+ #define CMDQ_CREATE_CQ_CQ_FCO_MASK 0xfffff000UL
+ #define CMDQ_CREATE_CQ_CQ_FCO_SFT 12
+ __le32 dpi;
+ __le32 cq_size;
+ __le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[2];
+ __le32 coalescing;
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_MASK 0x1ffUL
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_SFT 0
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK 0x3e00UL
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT 9
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_MASK 0x7c000UL
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_SFT 14
+ #define CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE 0x80000UL
+ #define CMDQ_CREATE_CQ_UNUSED12_MASK 0xfff00000UL
+ #define CMDQ_CREATE_CQ_UNUSED12_SFT 20
+ __le64 reserved64;
+};
+
+/* creq_create_cq_resp (size:128b/16B) */
+struct creq_create_cq_resp {
+ u8 type;
+ #define CREQ_CREATE_CQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_CQ_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_CQ_RESP_TYPE_LAST CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_CQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ 0x9UL
+ #define CREQ_CREATE_CQ_RESP_EVENT_LAST CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ
+ u8 reserved48[6];
+};
+
+/* cmdq_destroy_cq (size:192b/24B) */
+struct cmdq_destroy_cq {
+ u8 opcode;
+ #define CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ 0xaUL
+ #define CMDQ_DESTROY_CQ_OPCODE_LAST CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 cq_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_cq_resp (size:128b/16B) */
+struct creq_destroy_cq_resp {
+ u8 type;
+ #define CREQ_DESTROY_CQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_CQ_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_CQ_RESP_TYPE_LAST CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_CQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ 0xaUL
+ #define CREQ_DESTROY_CQ_RESP_EVENT_LAST CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ
+ __le16 cq_arm_lvl;
+ #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_MASK 0x3UL
+ #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_SFT 0
+ __le16 total_cnq_events;
+ __le16 reserved16;
+};
+
+/* cmdq_resize_cq (size:320b/40B) */
+struct cmdq_resize_cq {
+ u8 opcode;
+ #define CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ 0xcUL
+ #define CMDQ_RESIZE_CQ_OPCODE_LAST CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 cq_cid;
+ __le32 new_cq_size_pg_size_lvl;
+ #define CMDQ_RESIZE_CQ_LVL_MASK 0x3UL
+ #define CMDQ_RESIZE_CQ_LVL_SFT 0
+ #define CMDQ_RESIZE_CQ_LVL_LVL_0 0x0UL
+ #define CMDQ_RESIZE_CQ_LVL_LVL_1 0x1UL
+ #define CMDQ_RESIZE_CQ_LVL_LVL_2 0x2UL
+ #define CMDQ_RESIZE_CQ_LVL_LAST CMDQ_RESIZE_CQ_LVL_LVL_2
+ #define CMDQ_RESIZE_CQ_PG_SIZE_MASK 0x1cUL
+ #define CMDQ_RESIZE_CQ_PG_SIZE_SFT 2
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_LAST CMDQ_RESIZE_CQ_PG_SIZE_PG_1G
+ #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK 0x1fffffe0UL
+ #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT 5
+ __le64 new_pbl;
+ __le32 new_cq_fco;
+ __le32 unused_0;
+};
+
+/* creq_resize_cq_resp (size:128b/16B) */
+struct creq_resize_cq_resp {
+ u8 type;
+ #define CREQ_RESIZE_CQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_RESIZE_CQ_RESP_TYPE_SFT 0
+ #define CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_RESIZE_CQ_RESP_TYPE_LAST CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_RESIZE_CQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ 0xcUL
+ #define CREQ_RESIZE_CQ_RESP_EVENT_LAST CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ
+ u8 reserved48[6];
+};
+
+/* cmdq_allocate_mrw (size:256b/32B) */
+struct cmdq_allocate_mrw {
+ u8 opcode;
+ #define CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW 0xdUL
+ #define CMDQ_ALLOCATE_MRW_OPCODE_LAST CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 mrw_handle;
+ u8 mrw_flags;
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MASK 0xfUL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_SFT 0
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR 0x0UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR 0x1UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 0x2UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
+ #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID 0x10UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xe0UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 5
+ u8 access;
+ #define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL
+ __le16 steering_tag;
+ __le32 pd_id;
+};
+
+/* creq_allocate_mrw_resp (size:128b/16B) */
+struct creq_allocate_mrw_resp {
+ u8 type;
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_SFT 0
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_LAST CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_ALLOCATE_MRW_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW 0xdUL
+ #define CREQ_ALLOCATE_MRW_RESP_EVENT_LAST CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW
+ u8 reserved48[6];
+};
+
+/* cmdq_deallocate_key (size:192b/24B) */
+struct cmdq_deallocate_key {
+ u8 opcode;
+ #define CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY 0xeUL
+ #define CMDQ_DEALLOCATE_KEY_OPCODE_LAST CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 mrw_flags;
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MASK 0xfUL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_SFT 0
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MR 0x0UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_PMR 0x1UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE1 0x2UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2A 0x3UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B 0x4UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_LAST CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B
+ #define CMDQ_DEALLOCATE_KEY_UNUSED4_MASK 0xf0UL
+ #define CMDQ_DEALLOCATE_KEY_UNUSED4_SFT 4
+ u8 unused24[3];
+ __le32 key;
+};
+
+/* creq_deallocate_key_resp (size:128b/16B) */
+struct creq_deallocate_key_resp {
+ u8 type;
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_SFT 0
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_LAST CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DEALLOCATE_KEY_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY 0xeUL
+ #define CREQ_DEALLOCATE_KEY_RESP_EVENT_LAST CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY
+ __le16 reserved16;
+ __le32 bound_window_info;
+};
+
+/* cmdq_register_mr (size:448b/56B) */
+struct cmdq_register_mr {
+ u8 opcode;
+ #define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL
+ #define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_REGISTER_MR_FLAGS_ENABLE_RO 0x4UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 log2_pg_size_lvl;
+ #define CMDQ_REGISTER_MR_LVL_MASK 0x3UL
+ #define CMDQ_REGISTER_MR_LVL_SFT 0
+ #define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL
+ #define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL
+ #define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL
+ #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED1 0x80UL
+ u8 access;
+ #define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL
+ #define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL
+ #define CMDQ_REGISTER_MR_ACCESS_REMOTE_WRITE 0x4UL
+ #define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL
+ #define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL
+ #define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL
+ __le16 log2_pbl_pg_size;
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_REGISTER_MR_UNUSED11_SFT 5
+ __le32 key;
+ __le64 pbl;
+ __le64 va;
+ __le64 mr_size;
+ __le16 steering_tag;
+ u8 reserved48[6];
+};
+
+/* creq_register_mr_resp (size:128b/16B) */
+struct creq_register_mr_resp {
+ u8 type;
+ #define CREQ_REGISTER_MR_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_REGISTER_MR_RESP_TYPE_SFT 0
+ #define CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_REGISTER_MR_RESP_TYPE_LAST CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_REGISTER_MR_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR 0xfUL
+ #define CREQ_REGISTER_MR_RESP_EVENT_LAST CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR
+ u8 reserved48[6];
+};
+
+/* cmdq_deregister_mr (size:192b/24B) */
+struct cmdq_deregister_mr {
+ u8 opcode;
+ #define CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR 0x10UL
+ #define CMDQ_DEREGISTER_MR_OPCODE_LAST CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 lkey;
+ __le32 unused_0;
+};
+
+/* creq_deregister_mr_resp (size:128b/16B) */
+struct creq_deregister_mr_resp {
+ u8 type;
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_SFT 0
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_LAST CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DEREGISTER_MR_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR 0x10UL
+ #define CREQ_DEREGISTER_MR_RESP_EVENT_LAST CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR
+ __le16 reserved16;
+ __le32 bound_windows;
+};
+
+/* cmdq_add_gid (size:384b/48B) */
+struct cmdq_add_gid {
+ u8 opcode;
+ #define CMDQ_ADD_GID_OPCODE_ADD_GID 0x11UL
+ #define CMDQ_ADD_GID_OPCODE_LAST CMDQ_ADD_GID_OPCODE_ADD_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __be32 gid[4];
+ __be16 src_mac[3];
+ __le16 vlan;
+ #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL
+ #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0
+ #define CMDQ_ADD_GID_VLAN_VLAN_ID_MASK 0xfffUL
+ #define CMDQ_ADD_GID_VLAN_VLAN_ID_SFT 0
+ #define CMDQ_ADD_GID_VLAN_TPID_MASK 0x7000UL
+ #define CMDQ_ADD_GID_VLAN_TPID_SFT 12
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_8100 (0x1UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_9100 (0x2UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_9200 (0x3UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_9300 (0x4UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_LAST CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3
+ #define CMDQ_ADD_GID_VLAN_VLAN_EN 0x8000UL
+ __le16 ipid;
+ __le16 stats_ctx;
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_MASK 0xffffUL
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_SFT 0
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_SFT 0
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL
+ __le32 unused_0;
+};
+
+/* creq_add_gid_resp (size:128b/16B) */
+struct creq_add_gid_resp {
+ u8 type;
+ #define CREQ_ADD_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ADD_GID_RESP_TYPE_SFT 0
+ #define CREQ_ADD_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ADD_GID_RESP_TYPE_LAST CREQ_ADD_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_ADD_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ADD_GID_RESP_EVENT_ADD_GID 0x11UL
+ #define CREQ_ADD_GID_RESP_EVENT_LAST CREQ_ADD_GID_RESP_EVENT_ADD_GID
+ u8 reserved48[6];
+};
+
+/* cmdq_delete_gid (size:192b/24B) */
+struct cmdq_delete_gid {
+ u8 opcode;
+ #define CMDQ_DELETE_GID_OPCODE_DELETE_GID 0x12UL
+ #define CMDQ_DELETE_GID_OPCODE_LAST CMDQ_DELETE_GID_OPCODE_DELETE_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le16 gid_index;
+ u8 unused_0[6];
+};
+
+/* creq_delete_gid_resp (size:128b/16B) */
+struct creq_delete_gid_resp {
+ u8 type;
+ #define CREQ_DELETE_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DELETE_GID_RESP_TYPE_SFT 0
+ #define CREQ_DELETE_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DELETE_GID_RESP_TYPE_LAST CREQ_DELETE_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DELETE_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DELETE_GID_RESP_EVENT_DELETE_GID 0x12UL
+ #define CREQ_DELETE_GID_RESP_EVENT_LAST CREQ_DELETE_GID_RESP_EVENT_DELETE_GID
+ u8 reserved48[6];
+};
+
+/* cmdq_modify_gid (size:384b/48B) */
+struct cmdq_modify_gid {
+ u8 opcode;
+ #define CMDQ_MODIFY_GID_OPCODE_MODIFY_GID 0x17UL
+ #define CMDQ_MODIFY_GID_OPCODE_LAST CMDQ_MODIFY_GID_OPCODE_MODIFY_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __be32 gid[4];
+ __be16 src_mac[3];
+ __le16 vlan;
+ #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL
+ #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0
+ #define CMDQ_MODIFY_GID_VLAN_TPID_MASK 0x7000UL
+ #define CMDQ_MODIFY_GID_VLAN_TPID_SFT 12
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_8100 (0x1UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9100 (0x2UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9200 (0x3UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9300 (0x4UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_LAST CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3
+ #define CMDQ_MODIFY_GID_VLAN_VLAN_EN 0x8000UL
+ __le16 ipid;
+ __le16 gid_index;
+ __le16 stats_ctx;
+ #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL
+ #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_SFT 0
+ #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL
+ __le16 unused_0;
+};
+
+/* creq_modify_gid_resp (size:128b/16B) */
+struct creq_modify_gid_resp {
+ u8 type;
+ #define CREQ_MODIFY_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MODIFY_GID_RESP_TYPE_SFT 0
+ #define CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MODIFY_GID_RESP_TYPE_LAST CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_MODIFY_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MODIFY_GID_RESP_EVENT_ADD_GID 0x11UL
+ #define CREQ_MODIFY_GID_RESP_EVENT_LAST CREQ_MODIFY_GID_RESP_EVENT_ADD_GID
+ u8 reserved48[6];
+};
+
+/* cmdq_query_gid (size:192b/24B) */
+struct cmdq_query_gid {
+ u8 opcode;
+ #define CMDQ_QUERY_GID_OPCODE_QUERY_GID 0x18UL
+ #define CMDQ_QUERY_GID_OPCODE_LAST CMDQ_QUERY_GID_OPCODE_QUERY_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le16 gid_index;
+ u8 unused16[6];
+};
+
+/* creq_query_gid_resp (size:128b/16B) */
+struct creq_query_gid_resp {
+ u8 type;
+ #define CREQ_QUERY_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_GID_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_GID_RESP_TYPE_LAST CREQ_QUERY_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_GID_RESP_EVENT_QUERY_GID 0x18UL
+ #define CREQ_QUERY_GID_RESP_EVENT_LAST CREQ_QUERY_GID_RESP_EVENT_QUERY_GID
+ u8 reserved48[6];
+};
+
+/* creq_query_gid_resp_sb (size:320b/40B) */
+struct creq_query_gid_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID 0x18UL
+ #define CREQ_QUERY_GID_RESP_SB_OPCODE_LAST CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 gid[4];
+ __le16 src_mac[3];
+ __le16 vlan;
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_MASK 0xfffUL
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_SFT 0
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_MASK 0x7000UL
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_SFT 12
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_88A8 (0x0UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_8100 (0x1UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9100 (0x2UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9200 (0x3UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9300 (0x4UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG1 (0x5UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG2 (0x6UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 (0x7UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_LAST CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN 0x8000UL
+ __le16 ipid;
+ __le16 gid_index;
+ __le32 unused_0;
+};
+
+/* cmdq_create_qp1 (size:640b/80B) */
+struct cmdq_create_qp1 {
+ u8 opcode;
+ #define CMDQ_CREATE_QP1_OPCODE_CREATE_QP1 0x13UL
+ #define CMDQ_CREATE_QP1_OPCODE_LAST CMDQ_CREATE_QP1_OPCODE_CREATE_QP1
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 qp_handle;
+ __le32 qp_flags;
+ #define CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED 0x1UL
+ #define CMDQ_CREATE_QP1_QP_FLAGS_FORCE_COMPLETION 0x2UL
+ #define CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL
+ #define CMDQ_CREATE_QP1_QP_FLAGS_LAST CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE
+ u8 type;
+ #define CMDQ_CREATE_QP1_TYPE_GSI 0x1UL
+ #define CMDQ_CREATE_QP1_TYPE_LAST CMDQ_CREATE_QP1_TYPE_GSI
+ u8 sq_pg_size_sq_lvl;
+ #define CMDQ_CREATE_QP1_SQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_SQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP1_SQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP1_SQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP1_SQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP1_SQ_LVL_LAST CMDQ_CREATE_QP1_SQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_LAST CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G
+ u8 rq_pg_size_rq_lvl;
+ #define CMDQ_CREATE_QP1_RQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_RQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP1_RQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP1_RQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP1_RQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP1_RQ_LVL_LAST CMDQ_CREATE_QP1_RQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_LAST CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G
+ u8 unused_0;
+ __le32 dpi;
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_fwo_sq_sge;
+ #define CMDQ_CREATE_QP1_SQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_SQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP1_SQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP1_SQ_FWO_SFT 4
+ __le16 rq_fwo_rq_sge;
+ #define CMDQ_CREATE_QP1_RQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_RQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP1_RQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP1_RQ_FWO_SFT 4
+ __le32 scq_cid;
+ __le32 rcq_cid;
+ __le32 srq_cid;
+ __le32 pd_id;
+ __le64 sq_pbl;
+ __le64 rq_pbl;
+};
+
+/* creq_create_qp1_resp (size:128b/16B) */
+struct creq_create_qp1_resp {
+ u8 type;
+ #define CREQ_CREATE_QP1_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_QP1_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_QP1_RESP_TYPE_LAST CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_QP1_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1 0x13UL
+ #define CREQ_CREATE_QP1_RESP_EVENT_LAST CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1
+ u8 reserved48[6];
+};
+
+/* cmdq_destroy_qp1 (size:192b/24B) */
+struct cmdq_destroy_qp1 {
+ u8 opcode;
+ #define CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1 0x14UL
+ #define CMDQ_DESTROY_QP1_OPCODE_LAST CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 qp1_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_qp1_resp (size:128b/16B) */
+struct creq_destroy_qp1_resp {
+ u8 type;
+ #define CREQ_DESTROY_QP1_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_QP1_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_QP1_RESP_TYPE_LAST CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_QP1_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1 0x14UL
+ #define CREQ_DESTROY_QP1_RESP_EVENT_LAST CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1
+ u8 reserved48[6];
+};
+
+/* cmdq_create_ah (size:512b/64B) */
+struct cmdq_create_ah {
+ u8 opcode;
+ #define CMDQ_CREATE_AH_OPCODE_CREATE_AH 0x15UL
+ #define CMDQ_CREATE_AH_OPCODE_LAST CMDQ_CREATE_AH_OPCODE_CREATE_AH
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 ah_handle;
+ __le32 dgid[4];
+ u8 type;
+ #define CMDQ_CREATE_AH_TYPE_V1 0x0UL
+ #define CMDQ_CREATE_AH_TYPE_V2IPV4 0x2UL
+ #define CMDQ_CREATE_AH_TYPE_V2IPV6 0x3UL
+ #define CMDQ_CREATE_AH_TYPE_LAST CMDQ_CREATE_AH_TYPE_V2IPV6
+ u8 hop_limit;
+ __le16 sgid_index;
+ __le32 dest_vlan_id_flow_label;
+ #define CMDQ_CREATE_AH_FLOW_LABEL_MASK 0xfffffUL
+ #define CMDQ_CREATE_AH_FLOW_LABEL_SFT 0
+ #define CMDQ_CREATE_AH_DEST_VLAN_ID_MASK 0xfff00000UL
+ #define CMDQ_CREATE_AH_DEST_VLAN_ID_SFT 20
+ __le32 pd_id;
+ __le32 unused_0;
+ __le16 dest_mac[3];
+ u8 traffic_class;
+ u8 enable_cc;
+ #define CMDQ_CREATE_AH_ENABLE_CC 0x1UL
+};
+
+/* creq_create_ah_resp (size:128b/16B) */
+struct creq_create_ah_resp {
+ u8 type;
+ #define CREQ_CREATE_AH_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_AH_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_AH_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_AH_RESP_TYPE_LAST CREQ_CREATE_AH_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_AH_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_AH_RESP_EVENT_CREATE_AH 0x15UL
+ #define CREQ_CREATE_AH_RESP_EVENT_LAST CREQ_CREATE_AH_RESP_EVENT_CREATE_AH
+ u8 reserved48[6];
+};
+
+/* cmdq_destroy_ah (size:192b/24B) */
+struct cmdq_destroy_ah {
+ u8 opcode;
+ #define CMDQ_DESTROY_AH_OPCODE_DESTROY_AH 0x16UL
+ #define CMDQ_DESTROY_AH_OPCODE_LAST CMDQ_DESTROY_AH_OPCODE_DESTROY_AH
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 ah_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_ah_resp (size:128b/16B) */
+struct creq_destroy_ah_resp {
+ u8 type;
+ #define CREQ_DESTROY_AH_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_AH_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_AH_RESP_TYPE_LAST CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_AH_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH 0x16UL
+ #define CREQ_DESTROY_AH_RESP_EVENT_LAST CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH
+ u8 reserved48[6];
+};
+
+/* cmdq_query_roce_stats (size:192b/24B) */
+struct cmdq_query_roce_stats {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID 0x1UL
+ #define CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 collection_id;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_ROCE_STATS_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_ROCE_STATS_PF_NUM_SFT 0
+ #define CMDQ_QUERY_ROCE_STATS_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_ROCE_STATS_VF_NUM_SFT 8
+ #define CMDQ_QUERY_ROCE_STATS_VF_VALID 0x1000000UL
+ __le32 reserved32;
+};
+
+/* creq_query_roce_stats_resp (size:128b/16B) */
+struct creq_query_roce_stats_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_resp_sb (size:2944b/368B) */
+struct creq_query_roce_stats_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le32 num_counters;
+ __le32 rsvd1;
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+ __le64 res_oos_drop_count;
+ __le64 active_qp_count_p0;
+ __le64 active_qp_count_p1;
+ __le64 active_qp_count_p2;
+ __le64 active_qp_count_p3;
+};
+
+/* cmdq_query_roce_stats_ext (size:192b/24B) */
+struct cmdq_query_roce_stats_ext {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST \
+ CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 collection_id;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL
+ __le32 reserved32;
+};
+
+/* creq_query_roce_stats_ext_resp (size:128b/16B) */
+struct creq_query_roce_stats_ext_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_ext_resp_sb (size:1856b/232B) */
+struct creq_query_roce_stats_ext_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_send_req_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_send_req_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_ecn_marked_pkts;
+ __le64 tx_cnp_bytes;
+ __le64 rx_cnp_bytes;
+ __le64 seq_err_naks_rcvd;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 to_retransmit;
+ __le64 dup_req;
+};
+
+/* cmdq_roce_mirror_cfg (size:192b/24B) */
+struct cmdq_roce_mirror_cfg {
+ u8 opcode;
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST \
+ CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 mirror_flags;
+ #define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE 0x1UL
+ u8 rsvd[7];
+};
+
+/* creq_roce_mirror_cfg_resp (size:128b/16B) */
+struct creq_roce_mirror_cfg_resp {
+ u8 type;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT 0
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0x99UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG
+ u8 reserved48[6];
+};
+
+/* cmdq_query_func (size:128b/16B) */
+struct cmdq_query_func {
+ u8 opcode;
+ #define CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC 0x83UL
+ #define CMDQ_QUERY_FUNC_OPCODE_LAST CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_query_func_resp (size:128b/16B) */
+struct creq_query_func_resp {
+ u8 type;
+ #define CREQ_QUERY_FUNC_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_FUNC_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_FUNC_RESP_TYPE_LAST CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_FUNC_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC 0x83UL
+ #define CREQ_QUERY_FUNC_RESP_EVENT_LAST CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC
+ u8 reserved48[6];
+};
+
+/* creq_query_func_resp_sb (size:1088b/136B) */
+struct creq_query_func_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC 0x83UL
+ #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_LAST CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 max_mr_size;
+ __le32 max_qp;
+ __le16 max_qp_wr;
+ __le16 dev_cap_flags;
+ #define CREQ_QUERY_FUNC_RESP_SB_RESIZE_QP 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_MASK 0xeUL
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_SFT 1
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN0 (0x0UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1 (0x1UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT (0x2UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_LAST \
+ CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT
+ #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC 0x20UL
+ #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZED_TRANSMIT_ENABLED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CQE_V2 0x80UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE 0x100UL
+ #define CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED 0x200UL
+ #define CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED 0x400UL
+ __le32 max_cq;
+ __le32 max_cqe;
+ __le32 max_pd;
+ u8 max_sge;
+ u8 max_srq_sge;
+ u8 max_qp_rd_atom;
+ u8 max_qp_init_rd_atom;
+ __le32 max_mr;
+ __le32 max_mw;
+ __le32 max_raw_eth_qp;
+ __le32 max_ah;
+ __le32 max_fmr;
+ __le32 max_srq_wr;
+ __le32 max_pkeys;
+ __le32 max_inline_data;
+ u8 max_map_per_fmr;
+ u8 l2_db_space_size;
+ __le16 max_srq;
+ __le32 max_gid;
+ __le32 tqm_alloc_reqs[12];
+ __le32 max_dpi;
+ u8 max_sge_var_wqe;
+ u8 dev_cap_ext_flags;
+ #define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED 0x10UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED 0x20UL
+ #define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED 0x80UL
+ __le16 max_inline_data_var_wqe;
+ __le32 start_qid;
+ u8 max_msn_table_size;
+ u8 reserved8_1;
+ __le16 dev_cap_ext_flags_2;
+ #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_WQE_SUPPORTED 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK 0x30UL
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_SFT 4
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_PSN_TABLE (0x0UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE (0x1UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
+ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
+ __le16 max_xp_qp_size;
+ __le16 create_qp_batch_size;
+ __le16 destroy_qp_batch_size;
+ __le16 max_srq_ext;
+ __le64 reserved64;
+};
+
+/* cmdq_set_func_resources (size:448b/56B) */
+struct cmdq_set_func_resources {
+ u8 opcode;
+ #define CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES 0x84UL
+ #define CMDQ_SET_FUNC_RESOURCES_OPCODE_LAST\
+ CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_SET_FUNC_RESOURCES_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
+};
+
+/* creq_set_func_resources_resp (size:128b/16B) */
+struct creq_set_func_resources_resp {
+ u8 type;
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_SFT 0
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_LAST CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_SET_FUNC_RESOURCES_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES 0x84UL
+ #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_LAST \
+ CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES
+ u8 reserved48[6];
+};
+
+/* cmdq_read_context (size:192b/24B) */
+struct cmdq_read_context {
+ u8 opcode;
+ #define CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT 0x85UL
+ #define CMDQ_READ_CONTEXT_OPCODE_LAST CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 xid;
+ u8 type;
+ #define CMDQ_READ_CONTEXT_TYPE_QPC 0x0UL
+ #define CMDQ_READ_CONTEXT_TYPE_CQ 0x1UL
+ #define CMDQ_READ_CONTEXT_TYPE_MRW 0x2UL
+ #define CMDQ_READ_CONTEXT_TYPE_SRQ 0x3UL
+ #define CMDQ_READ_CONTEXT_TYPE_LAST CMDQ_READ_CONTEXT_TYPE_SRQ
+ u8 unused_0[3];
+};
+
+/* creq_read_context (size:128b/16B) */
+struct creq_read_context {
+ u8 type;
+ #define CREQ_READ_CONTEXT_TYPE_MASK 0x3fUL
+ #define CREQ_READ_CONTEXT_TYPE_SFT 0
+ #define CREQ_READ_CONTEXT_TYPE_QP_EVENT 0x38UL
+ #define CREQ_READ_CONTEXT_TYPE_LAST CREQ_READ_CONTEXT_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_READ_CONTEXT_V 0x1UL
+ u8 event;
+ #define CREQ_READ_CONTEXT_EVENT_READ_CONTEXT 0x85UL
+ #define CREQ_READ_CONTEXT_EVENT_LAST CREQ_READ_CONTEXT_EVENT_READ_CONTEXT
+ __le16 reserved16;
+ __le32 reserved_32;
+};
+
+/* cmdq_map_tc_to_cos (size:192b/24B) */
+struct cmdq_map_tc_to_cos {
+ u8 opcode;
+ #define CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS 0x8aUL
+ #define CMDQ_MAP_TC_TO_COS_OPCODE_LAST CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le16 cos0;
+ #define CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE 0xffffUL
+ #define CMDQ_MAP_TC_TO_COS_COS0_LAST CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE
+ __le16 cos1;
+ #define CMDQ_MAP_TC_TO_COS_COS1_DISABLE 0x8000UL
+ #define CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE 0xffffUL
+ #define CMDQ_MAP_TC_TO_COS_COS1_LAST CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE
+ __le32 unused_0;
+};
+
+/* creq_map_tc_to_cos_resp (size:128b/16B) */
+struct creq_map_tc_to_cos_resp {
+ u8 type;
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_SFT 0
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_LAST CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_MAP_TC_TO_COS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS 0x8aUL
+ #define CREQ_MAP_TC_TO_COS_RESP_EVENT_LAST CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS
+ u8 reserved48[6];
+};
+
+/* cmdq_query_roce_cc (size:128b/16B) */
+struct cmdq_query_roce_cc {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CMDQ_QUERY_ROCE_CC_OPCODE_LAST CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_query_roce_cc_resp (size:128b/16B) */
+struct creq_query_roce_cc_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_LAST CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_CC_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC 0x8dUL
+ #define CREQ_QUERY_ROCE_CC_RESP_EVENT_LAST CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_cc_resp_sb (size:256b/32B) */
+struct creq_query_roce_cc_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ u8 enable_cc;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_MASK 0xfeUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_SFT 1
+ u8 tos_dscp_tos_ecn;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK 0x3UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK 0xfcUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT 2
+ u8 g;
+ u8 num_phases_per_state;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 alt_vlan_pcp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_MASK 0x7UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_MASK 0xf8UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_SFT 3
+ u8 alt_tos_dscp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_MASK 0xc0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_SFT 6
+ u8 cc_mode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_DCTCP 0x0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_LAST \
+ CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC
+ u8 tx_queue;
+ __le16 rtt;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_MASK 0xc000UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_MASK 0xfc00UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_SFT 10
+ __le16 inactivity_th;
+ u8 pkts_per_phase;
+ u8 time_per_phase;
+ __le32 reserved32;
+};
+
+/* creq_query_roce_cc_resp_sb_tlv (size:384b/48B) */
+struct creq_query_roce_cc_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST \
+ CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ u8 total_size;
+ u8 reserved56[7];
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_LAST \
+ CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ u8 enable_cc;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ENABLE_CC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_MASK 0xfeUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_SFT 1
+ u8 tos_dscp_tos_ecn;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_MASK 0x3UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_MASK 0xfcUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_SFT 2
+ u8 g;
+ u8 num_phases_per_state;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 alt_vlan_pcp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_MASK 0x7UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_MASK 0xf8UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_SFT 3
+ u8 alt_tos_dscp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_MASK 0xc0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_SFT 6
+ u8 cc_mode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_DCTCP 0x0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_LAST\
+ CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC
+ u8 tx_queue;
+ __le16 rtt;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_MASK 0xc000UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_MASK 0xfc00UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_SFT 10
+ __le16 inactivity_th;
+ u8 pkts_per_phase;
+ u8 time_per_phase;
+ __le32 reserved32;
+};
+
+/* creq_query_roce_cc_gen1_resp_sb_tlv (size:704b/88B) */
+struct creq_query_roce_cc_gen1_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST \
+ CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le16 inactivity_th_hi;
+ __le16 min_time_between_cnps;
+ __le16 init_cp;
+ u8 tr_update_mode;
+ u8 tr_update_cycles;
+ u8 fr_num_rtts;
+ u8 ai_rate_increase;
+ __le16 reduction_relax_rtts_th;
+ __le16 additional_relax_cr_th;
+ __le16 cr_min_th;
+ u8 bw_avg_weight;
+ u8 actual_cr_factor;
+ __le16 max_cp_cr_th;
+ u8 cp_bias_en;
+ u8 cp_bias;
+ u8 cnp_ecn;
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_NOT_ECT 0x0UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_1 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_LAST \
+ CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0
+ u8 rtt_jitter_en;
+ __le16 link_bytes_per_usec;
+ __le16 reset_cc_cr_th;
+ u8 cr_width;
+ u8 quota_period_min;
+ u8 quota_period_max;
+ u8 quota_period_abs_max;
+ __le16 tr_lower_bound;
+ u8 cr_prob_factor;
+ u8 tr_prob_factor;
+ __le16 fairness_cr_th;
+ u8 red_div;
+ u8 cnp_ratio_th;
+ __le16 exp_ai_rtts;
+ u8 exp_ai_cr_cp_ratio;
+ u8 use_rate_table;
+ __le16 cp_exp_update_th;
+ __le16 high_exp_ai_rtts_th1;
+ __le16 high_exp_ai_rtts_th2;
+ __le16 actual_cr_cong_free_rtts_th;
+ __le16 severe_cong_cr_th1;
+ __le16 severe_cong_cr_th2;
+ __le32 link64B_per_rtt;
+ u8 cc_ack_bytes;
+ u8 reduce_init_en;
+ __le16 reduce_init_cong_free_rtts_th;
+ u8 random_no_red_en;
+ u8 actual_cr_shift_correction_en;
+ u8 quota_period_adjust_en;
+ u8 reserved[5];
+};
+
+/* cmdq_modify_roce_cc (size:448b/56B) */
+struct cmdq_modify_roce_cc {
+ u8 opcode;
+ #define CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC 0x8cUL
+ #define CMDQ_MODIFY_ROCE_CC_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP 0x80UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP 0x100UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT 0x200UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE 0x400UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP 0x800UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE 0x1000UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP 0x2000UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE 0x4000UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL
+ u8 enable_cc;
+ #define CMDQ_MODIFY_ROCE_CC_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD1_MASK 0xfeUL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD1_SFT 1
+ u8 g;
+ u8 num_phases_per_state;
+ u8 pkts_per_phase;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 tos_dscp_tos_ecn;
+ #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK 0x3UL
+ #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_MASK 0xfcUL
+ #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT 2
+ u8 alt_vlan_pcp;
+ #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_MASK 0x7UL
+ #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD3_MASK 0xf8UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD3_SFT 3
+ __le16 alt_tos_dscp;
+ #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD4_MASK 0xffc0UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD4_SFT 6
+ __le16 rtt;
+ #define CMDQ_MODIFY_ROCE_CC_RTT_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_RTT_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD5_MASK 0xc000UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CMDQ_MODIFY_ROCE_CC_TCP_CP_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_TCP_CP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD6_MASK 0xfc00UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD6_SFT 10
+ u8 cc_mode;
+ #define CMDQ_MODIFY_ROCE_CC_CC_MODE_DCTCP_CC_MODE 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_CC_MODE_LAST CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE
+ u8 tx_queue;
+ __le16 inactivity_th;
+ u8 time_per_phase;
+ u8 reserved8_1;
+ __le16 reserved16;
+ __le32 reserved32;
+ __le64 reserved64;
+};
+
+/* cmdq_modify_roce_cc_tlv (size:640b/80B) */
+struct cmdq_modify_roce_cc_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_LAST \
+ CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ u8 total_size;
+ u8 reserved56[7];
+ u8 opcode;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC 0x8cUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_G 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_CR 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_TR 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_ECN 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_DSCP 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_VLAN_PCP 0x80UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_TOS_DSCP 0x100UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_RTT 0x200UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_CC_MODE 0x400UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TCP_CP 0x800UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TX_QUEUE 0x1000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INACTIVITY_CP 0x2000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TIME_PER_PHASE 0x4000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL
+ u8 enable_cc;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_MASK 0xfeUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_SFT 1
+ u8 g;
+ u8 num_phases_per_state;
+ u8 pkts_per_phase;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 tos_dscp_tos_ecn;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_MASK 0x3UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_MASK 0xfcUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_SFT 2
+ u8 alt_vlan_pcp;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_MASK 0x7UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_MASK 0xf8UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_SFT 3
+ __le16 alt_tos_dscp;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_MASK 0xffc0UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_SFT 6
+ __le16 rtt;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_MASK 0xc000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_MASK 0xfc00UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_SFT 10
+ u8 cc_mode;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_DCTCP_CC_MODE 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_LAST\
+ CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE
+ u8 tx_queue;
+ __le16 inactivity_th;
+ u8 time_per_phase;
+ u8 reserved8_1;
+ __le16 reserved16;
+ __le32 reserved32;
+ __le64 reserved64;
+ __le64 reservedtlvpad;
+};
+
+/* cmdq_modify_roce_cc_gen1_tlv (size:768b/96B) */
+struct cmdq_modify_roce_cc_gen1_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_LAST\
+ CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le64 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MIN_TIME_BETWEEN_CNPS 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_INIT_CP 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_MODE 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_CYCLES 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FR_NUM_RTTS 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_AI_RATE_INCREASE 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCTION_RELAX_RTTS_TH 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ADDITIONAL_RELAX_CR_TH 0x80UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_MIN_TH 0x100UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_BW_AVG_WEIGHT 0x200UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_FACTOR 0x400UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MAX_CP_CR_TH 0x800UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS_EN 0x1000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS 0x2000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_ECN 0x4000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RTT_JITTER_EN 0x8000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK_BYTES_PER_USEC 0x10000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RESET_CC_CR_TH 0x20000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_WIDTH 0x40000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MIN 0x80000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MAX 0x100000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ABS_MAX 0x200000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_LOWER_BOUND 0x400000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_PROB_FACTOR 0x800000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_PROB_FACTOR 0x1000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FAIRNESS_CR_TH 0x2000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RED_DIV 0x4000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_RATIO_TH 0x8000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_RTTS 0x10000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_CR_CP_RATIO 0x20000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_EXP_UPDATE_TH 0x40000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH1 0x80000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH2 0x100000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_USE_RATE_TABLE 0x200000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK64B_PER_RTT 0x400000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_CONG_FREE_RTTS_TH 0x800000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH1 0x1000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH2 0x2000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CC_ACK_BYTES 0x4000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_EN 0x8000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_CONG_FREE_RTTS_TH \
+ 0x10000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RANDOM_NO_RED_EN 0x20000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_SHIFT_CORRECTION_EN \
+ 0x40000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ADJUST_EN 0x80000000000ULL
+ __le16 inactivity_th_hi;
+ __le16 min_time_between_cnps;
+ __le16 init_cp;
+ u8 tr_update_mode;
+ u8 tr_update_cycles;
+ u8 fr_num_rtts;
+ u8 ai_rate_increase;
+ __le16 reduction_relax_rtts_th;
+ __le16 additional_relax_cr_th;
+ __le16 cr_min_th;
+ u8 bw_avg_weight;
+ u8 actual_cr_factor;
+ __le16 max_cp_cr_th;
+ u8 cp_bias_en;
+ u8 cp_bias;
+ u8 cnp_ecn;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_NOT_ECT 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_1 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_LAST CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0
+ u8 rtt_jitter_en;
+ __le16 link_bytes_per_usec;
+ __le16 reset_cc_cr_th;
+ u8 cr_width;
+ u8 quota_period_min;
+ u8 quota_period_max;
+ u8 quota_period_abs_max;
+ __le16 tr_lower_bound;
+ u8 cr_prob_factor;
+ u8 tr_prob_factor;
+ __le16 fairness_cr_th;
+ u8 red_div;
+ u8 cnp_ratio_th;
+ __le16 exp_ai_rtts;
+ u8 exp_ai_cr_cp_ratio;
+ u8 use_rate_table;
+ __le16 cp_exp_update_th;
+ __le16 high_exp_ai_rtts_th1;
+ __le16 high_exp_ai_rtts_th2;
+ __le16 actual_cr_cong_free_rtts_th;
+ __le16 severe_cong_cr_th1;
+ __le16 severe_cong_cr_th2;
+ __le32 link64B_per_rtt;
+ u8 cc_ack_bytes;
+ u8 reduce_init_en;
+ __le16 reduce_init_cong_free_rtts_th;
+ u8 random_no_red_en;
+ u8 actual_cr_shift_correction_en;
+ u8 quota_period_adjust_en;
+ u8 reserved[5];
+};
+
+/* creq_modify_roce_cc_resp (size:128b/16B) */
+struct creq_modify_roce_cc_resp {
+ u8 type;
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_SFT 0
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_LAST CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_MODIFY_ROCE_CC_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC 0x8cUL
+ #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_LAST CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC
+ u8 reserved48[6];
+};
+
+/* cmdq_set_link_aggr_mode_cc (size:320b/40B) */
+struct cmdq_set_link_aggr_mode_cc {
+ u8 opcode;
+ #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL
+ #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_LAST \
+ CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_EN 0x1UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_ACTIVE_PORT_MAP 0x2UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_MEMBER_PORT_MAP 0x4UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_MODE 0x8UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_STAT_CTX_ID 0x10UL
+ u8 aggr_enable;
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_ENABLE 0x1UL
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_MASK 0xfeUL
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_SFT 1
+ u8 active_port_map;
+ #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_MASK 0xfUL
+ #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_SFT 0
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_MASK 0xf0UL
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_SFT 4
+ u8 member_port_map;
+ u8 link_aggr_mode;
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_ACTIVE 0x1UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_BACKUP 0x2UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_BALANCE_XOR 0x3UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD 0x4UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_LAST CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD
+ __le16 stat_ctx_id[4];
+ __le64 rsvd1;
+};
+
+/* creq_set_link_aggr_mode_resources_resp (size:128b/16B) */
+struct creq_set_link_aggr_mode_resources_resp {
+ u8 type;
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_SFT 0
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_LAST CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE 0x8fUL
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_LAST\
+ CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE
+ u8 reserved48[6];
+};
+
+/* creq_func_event (size:128b/16B) */
+struct creq_func_event {
+ u8 type;
+ #define CREQ_FUNC_EVENT_TYPE_MASK 0x3fUL
+ #define CREQ_FUNC_EVENT_TYPE_SFT 0
+ #define CREQ_FUNC_EVENT_TYPE_FUNC_EVENT 0x3aUL
+ #define CREQ_FUNC_EVENT_TYPE_LAST CREQ_FUNC_EVENT_TYPE_FUNC_EVENT
+ u8 reserved56[7];
+ u8 v;
+ #define CREQ_FUNC_EVENT_V 0x1UL
+ u8 event;
+ #define CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR 0x1UL
+ #define CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR 0x2UL
+ #define CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR 0x3UL
+ #define CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR 0x4UL
+ #define CREQ_FUNC_EVENT_EVENT_CQ_ERROR 0x5UL
+ #define CREQ_FUNC_EVENT_EVENT_TQM_ERROR 0x6UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR 0x7UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCS_ERROR 0x8UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCC_ERROR 0x9UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCM_ERROR 0xaUL
+ #define CREQ_FUNC_EVENT_EVENT_TIM_ERROR 0xbUL
+ #define CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST 0x80UL
+ #define CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED 0x81UL
+ #define CREQ_FUNC_EVENT_EVENT_LAST CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED
+ u8 reserved48[6];
+};
+
+/* creq_qp_event (size:128b/16B) */
+struct creq_qp_event {
+ u8 type;
+ #define CREQ_QP_EVENT_TYPE_MASK 0x3fUL
+ #define CREQ_QP_EVENT_TYPE_SFT 0
+ #define CREQ_QP_EVENT_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QP_EVENT_TYPE_LAST CREQ_QP_EVENT_TYPE_QP_EVENT
+ u8 status;
+ #define CREQ_QP_EVENT_STATUS_SUCCESS 0x0UL
+ #define CREQ_QP_EVENT_STATUS_FAIL 0x1UL
+ #define CREQ_QP_EVENT_STATUS_RESOURCES 0x2UL
+ #define CREQ_QP_EVENT_STATUS_INVALID_CMD 0x3UL
+ #define CREQ_QP_EVENT_STATUS_NOT_IMPLEMENTED 0x4UL
+ #define CREQ_QP_EVENT_STATUS_INVALID_PARAMETER 0x5UL
+ #define CREQ_QP_EVENT_STATUS_HARDWARE_ERROR 0x6UL
+ #define CREQ_QP_EVENT_STATUS_INTERNAL_ERROR 0x7UL
+ #define CREQ_QP_EVENT_STATUS_LAST CREQ_QP_EVENT_STATUS_INTERNAL_ERROR
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_QP_EVENT_V 0x1UL
+ u8 event;
+ #define CREQ_QP_EVENT_EVENT_CREATE_QP 0x1UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_QP 0x2UL
+ #define CREQ_QP_EVENT_EVENT_MODIFY_QP 0x3UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_QP 0x4UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_SRQ 0x5UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_SRQ 0x6UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_SRQ 0x8UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_CQ 0x9UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_CQ 0xaUL
+ #define CREQ_QP_EVENT_EVENT_RESIZE_CQ 0xcUL
+ #define CREQ_QP_EVENT_EVENT_ALLOCATE_MRW 0xdUL
+ #define CREQ_QP_EVENT_EVENT_DEALLOCATE_KEY 0xeUL
+ #define CREQ_QP_EVENT_EVENT_REGISTER_MR 0xfUL
+ #define CREQ_QP_EVENT_EVENT_DEREGISTER_MR 0x10UL
+ #define CREQ_QP_EVENT_EVENT_ADD_GID 0x11UL
+ #define CREQ_QP_EVENT_EVENT_DELETE_GID 0x12UL
+ #define CREQ_QP_EVENT_EVENT_MODIFY_GID 0x17UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_GID 0x18UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_QP1 0x13UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_QP1 0x14UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_AH 0x15UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_AH 0x16UL
+ #define CREQ_QP_EVENT_EVENT_INITIALIZE_FW 0x80UL
+ #define CREQ_QP_EVENT_EVENT_DEINITIALIZE_FW 0x81UL
+ #define CREQ_QP_EVENT_EVENT_STOP_FUNC 0x82UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_FUNC 0x83UL
+ #define CREQ_QP_EVENT_EVENT_SET_FUNC_RESOURCES 0x84UL
+ #define CREQ_QP_EVENT_EVENT_READ_CONTEXT 0x85UL
+ #define CREQ_QP_EVENT_EVENT_MAP_TC_TO_COS 0x8aUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_VERSION 0x8bUL
+ #define CREQ_QP_EVENT_EVENT_MODIFY_CC 0x8cUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_CC 0x8dUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QP_EVENT_EVENT_SET_LINK_AGGR_MODE 0x8fUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION 0xc0UL
+ #define CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL
+ #define CREQ_QP_EVENT_EVENT_LAST CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION
+ u8 reserved48[6];
+};
+
+/* creq_qp_error_notification (size:128b/16B) */
+struct creq_qp_error_notification {
+ u8 type;
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_SFT 0
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_LAST CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT
+ u8 status;
+ u8 req_slow_path_state;
+ u8 req_err_state_reason;
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_NO_ERROR 0X0UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR 0X1UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT 0X2UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT 0X3UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1 0X4UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2 0X5UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3 0X6UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4 0X7UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR 0X8UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR 0X9UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH 0XAUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP 0XBUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND 0XCUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG 0XDUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE 0XEUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR 0XFUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR 0X10UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR 0X11UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR 0X12UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR 0X13UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR 0X14UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR 0X15UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR 0X16UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR 0X17UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR 0X18UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR 0X19UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR 0X1AUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR 0X1BUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR 0X1CUL
+ __le32 xid;
+ u8 v;
+ #define CREQ_QP_ERROR_NOTIFICATION_V 0x1UL
+ u8 event;
+ #define CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION 0xc0UL
+ #define CREQ_QP_ERROR_NOTIFICATION_EVENT_LAST \
+ CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION
+ u8 res_slow_path_state;
+ u8 res_err_state_reason;
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_NO_ERROR 0x0UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX 0x1UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH 0x2UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE 0x3UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR 0x4UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT 0x5UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY 0x6UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR 0x7UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION 0x8UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR 0x9UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY 0xaUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR 0xbUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION 0xcUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR 0xdUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW 0xeUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE 0xfUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC 0x10UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE 0x11UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR 0x12UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR 0x13UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR 0x14UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY 0x15UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR 0x16UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR 0x17UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR 0x18UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR 0x19UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR 0x1bUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR 0x1cUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND 0x1dUL
+ __le16 sq_cons_idx;
+ __le16 rq_cons_idx;
+};
+
+/* creq_cq_error_notification (size:128b/16B) */
+struct creq_cq_error_notification {
+ u8 type;
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_SFT 0
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT 0x38UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_LAST CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT
+ u8 status;
+ u8 cq_err_reason;
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR 0x1UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR 0x2UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR 0x3UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR 0x4UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR 0x5UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR 0x6UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_LAST \
+ CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR
+ u8 reserved8;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CQ_ERROR_NOTIFICATION_V 0x1UL
+ u8 event;
+ #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_LAST \
+ CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION
+ u8 reserved48[6];
+};
+
+/* sq_base (size:64b/8B) */
+struct sq_base {
+ u8 wqe_type;
+ #define SQ_BASE_WQE_TYPE_SEND 0x0UL
+ #define SQ_BASE_WQE_TYPE_SEND_W_IMMEAD 0x1UL
+ #define SQ_BASE_WQE_TYPE_SEND_W_INVALID 0x2UL
+ #define SQ_BASE_WQE_TYPE_WRITE_WQE 0x4UL
+ #define SQ_BASE_WQE_TYPE_WRITE_W_IMMEAD 0x5UL
+ #define SQ_BASE_WQE_TYPE_READ_WQE 0x6UL
+ #define SQ_BASE_WQE_TYPE_ATOMIC_CS 0x8UL
+ #define SQ_BASE_WQE_TYPE_ATOMIC_FA 0xbUL
+ #define SQ_BASE_WQE_TYPE_LOCAL_INVALID 0xcUL
+ #define SQ_BASE_WQE_TYPE_FR_PMR 0xdUL
+ #define SQ_BASE_WQE_TYPE_BIND 0xeUL
+ #define SQ_BASE_WQE_TYPE_FR_PPMR 0xfUL
+ #define SQ_BASE_WQE_TYPE_LAST SQ_BASE_WQE_TYPE_FR_PPMR
+ u8 unused_0[7];
+};
+
+/* sq_sge (size:128b/16B) */
+struct sq_sge {
+ __le64 va_or_pa;
+ __le32 l_key;
+ __le32 size;
+};
+
+/* sq_psn_search (size:64b/8B) */
+struct sq_psn_search {
+ __le32 opcode_start_psn;
+ #define SQ_PSN_SEARCH_START_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_START_PSN_SFT 0
+ #define SQ_PSN_SEARCH_OPCODE_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_OPCODE_SFT 24
+ __le32 flags_next_psn;
+ #define SQ_PSN_SEARCH_NEXT_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_NEXT_PSN_SFT 0
+ #define SQ_PSN_SEARCH_FLAGS_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_FLAGS_SFT 24
+};
+
+/* sq_psn_search_ext (size:128b/16B) */
+struct sq_psn_search_ext {
+ __le32 opcode_start_psn;
+ #define SQ_PSN_SEARCH_EXT_START_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_EXT_START_PSN_SFT 0
+ #define SQ_PSN_SEARCH_EXT_OPCODE_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_EXT_OPCODE_SFT 24
+ __le32 flags_next_psn;
+ #define SQ_PSN_SEARCH_EXT_NEXT_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_EXT_NEXT_PSN_SFT 0
+ #define SQ_PSN_SEARCH_EXT_FLAGS_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_EXT_FLAGS_SFT 24
+ __le16 start_slot_idx;
+ __le16 reserved16;
+ __le32 reserved32;
+};
+
+/* sq_msn_search (size:64b/8B) */
+struct sq_msn_search {
+ __le64 start_idx_next_psn_start_psn;
+ #define SQ_MSN_SEARCH_START_PSN_MASK 0xffffffUL
+ #define SQ_MSN_SEARCH_START_PSN_SFT 0
+ #define SQ_MSN_SEARCH_NEXT_PSN_MASK 0xffffff000000ULL
+ #define SQ_MSN_SEARCH_NEXT_PSN_SFT 24
+ #define SQ_MSN_SEARCH_START_IDX_MASK 0xffff000000000000ULL
+ #define SQ_MSN_SEARCH_START_IDX_SFT 48
+};
+
+/* sq_send (size:1024b/128B) */
+struct sq_send {
+ u8 wqe_type;
+ #define SQ_SEND_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_WQE_TYPE_SEND_W_IMMEAD 0x1UL
+ #define SQ_SEND_WQE_TYPE_SEND_W_INVALID 0x2UL
+ #define SQ_SEND_WQE_TYPE_LAST SQ_SEND_WQE_TYPE_SEND_W_INVALID
+ u8 flags;
+ #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_SEND_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_FLAGS_SE 0x8UL
+ #define SQ_SEND_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8_1;
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 q_key;
+ __le32 dst_qp;
+ #define SQ_SEND_DST_QP_MASK 0xffffffUL
+ #define SQ_SEND_DST_QP_SFT 0
+ __le32 avid;
+ #define SQ_SEND_AVID_MASK 0xfffffUL
+ #define SQ_SEND_AVID_SFT 0
+ __le32 reserved32;
+ __le32 timestamp;
+ #define SQ_SEND_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_send_hdr (size:256b/32B) */
+struct sq_send_hdr {
+ u8 wqe_type;
+ #define SQ_SEND_HDR_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_HDR_WQE_TYPE_SEND_W_IMMEAD 0x1UL
+ #define SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID 0x2UL
+ #define SQ_SEND_HDR_WQE_TYPE_LAST SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID
+ u8 flags;
+ #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_SEND_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_HDR_FLAGS_SE 0x8UL
+ #define SQ_SEND_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8_1;
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 q_key;
+ __le32 dst_qp;
+ #define SQ_SEND_HDR_DST_QP_MASK 0xffffffUL
+ #define SQ_SEND_HDR_DST_QP_SFT 0
+ __le32 avid;
+ #define SQ_SEND_HDR_AVID_MASK 0xfffffUL
+ #define SQ_SEND_HDR_AVID_SFT 0
+ __le32 reserved32;
+ __le32 timestamp;
+ #define SQ_SEND_HDR_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_HDR_TIMESTAMP_SFT 0
+};
+
+/* sq_send_raweth_qp1 (size:1024b/128B) */
+struct sq_send_raweth_qp1 {
+ u8 wqe_type;
+ #define SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_RAWETH_QP1_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND
+ u8 flags;
+ #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK \
+ 0xffUL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT \
+ 0
+ #define SQ_SEND_RAWETH_QP1_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_SE 0x8UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le16 lflags;
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_TCP_UDP_CHKSUM 0x1UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM 0x2UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_NOCRC 0x4UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_STAMP 0x8UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_T_IP_CHKSUM 0x10UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC 0x100UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_FCOE_CRC 0x200UL
+ __le16 cfa_action;
+ __le32 length;
+ __le32 reserved32_1;
+ __le32 cfa_meta;
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK 0xfffUL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT 0
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_DE 0x1000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_MASK 0xe000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_SFT 13
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_MASK 0x70000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_SFT 16
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_LAST\
+ SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_MASK 0xff80000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_SFT 19
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_MASK 0xf0000000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_SFT 28
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_NONE (0x0UL << 28)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG (0x1UL << 28)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_LAST SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG
+ __le32 reserved32_2;
+ __le32 reserved32_3;
+ __le32 timestamp;
+ #define SQ_SEND_RAWETH_QP1_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_RAWETH_QP1_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_send_raweth_qp1_hdr (size:256b/32B) */
+struct sq_send_raweth_qp1_hdr {
+ u8 wqe_type;
+ #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND
+ u8 flags;
+ #define \
+ SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT\
+ 0
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SE 0x8UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le16 lflags;
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_TCP_UDP_CHKSUM 0x1UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_IP_CHKSUM 0x2UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_NOCRC 0x4UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_STAMP 0x8UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_T_IP_CHKSUM 0x10UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_ROCE_CRC 0x100UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_FCOE_CRC 0x200UL
+ __le16 cfa_action;
+ __le32 length;
+ __le32 reserved32_1;
+ __le32 cfa_meta;
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_MASK 0xfffUL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_SFT 0
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_DE 0x1000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_MASK 0xe000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_SFT 13
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_MASK 0x70000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_SFT 16
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_LAST\
+ SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_MASK 0xff80000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_SFT 19
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_MASK 0xf0000000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_SFT 28
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_NONE (0x0UL << 28)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG (0x1UL << 28)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_LAST\
+ SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG
+ __le32 reserved32_2;
+ __le32 reserved32_3;
+ __le32 timestamp;
+ #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_SFT 0
+};
+
+/* sq_rdma (size:1024b/128B) */
+struct sq_rdma {
+ u8 wqe_type;
+ #define SQ_RDMA_WQE_TYPE_WRITE_WQE 0x4UL
+ #define SQ_RDMA_WQE_TYPE_WRITE_W_IMMEAD 0x5UL
+ #define SQ_RDMA_WQE_TYPE_READ_WQE 0x6UL
+ #define SQ_RDMA_WQE_TYPE_LAST SQ_RDMA_WQE_TYPE_READ_WQE
+ u8 flags;
+ #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_RDMA_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RDMA_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RDMA_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RDMA_FLAGS_SE 0x8UL
+ #define SQ_RDMA_FLAGS_INLINE 0x10UL
+ #define SQ_RDMA_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RDMA_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 imm_data;
+ __le32 length;
+ __le32 reserved32_1;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 timestamp;
+ #define SQ_RDMA_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RDMA_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_rdma_hdr (size:256b/32B) */
+struct sq_rdma_hdr {
+ u8 wqe_type;
+ #define SQ_RDMA_HDR_WQE_TYPE_WRITE_WQE 0x4UL
+ #define SQ_RDMA_HDR_WQE_TYPE_WRITE_W_IMMEAD 0x5UL
+ #define SQ_RDMA_HDR_WQE_TYPE_READ_WQE 0x6UL
+ #define SQ_RDMA_HDR_WQE_TYPE_LAST SQ_RDMA_HDR_WQE_TYPE_READ_WQE
+ u8 flags;
+ #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_RDMA_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RDMA_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RDMA_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RDMA_HDR_FLAGS_SE 0x8UL
+ #define SQ_RDMA_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_RDMA_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RDMA_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 imm_data;
+ __le32 length;
+ __le32 reserved32_1;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 timestamp;
+ #define SQ_RDMA_HDR_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RDMA_HDR_TIMESTAMP_SFT 0
+};
+
+/* sq_atomic (size:1024b/128B) */
+struct sq_atomic {
+ u8 wqe_type;
+ #define SQ_ATOMIC_WQE_TYPE_ATOMIC_CS 0x8UL
+ #define SQ_ATOMIC_WQE_TYPE_ATOMIC_FA 0xbUL
+ #define SQ_ATOMIC_WQE_TYPE_LAST SQ_ATOMIC_WQE_TYPE_ATOMIC_FA
+ u8 flags;
+ #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_ATOMIC_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_ATOMIC_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_ATOMIC_FLAGS_UC_FENCE 0x4UL
+ #define SQ_ATOMIC_FLAGS_SE 0x8UL
+ #define SQ_ATOMIC_FLAGS_INLINE 0x10UL
+ #define SQ_ATOMIC_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_ATOMIC_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 remote_key;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+ __le32 data[24];
+};
+
+/* sq_atomic_hdr (size:256b/32B) */
+struct sq_atomic_hdr {
+ u8 wqe_type;
+ #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_CS 0x8UL
+ #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA 0xbUL
+ #define SQ_ATOMIC_HDR_WQE_TYPE_LAST SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA
+ u8 flags;
+ #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_ATOMIC_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_ATOMIC_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_ATOMIC_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_ATOMIC_HDR_FLAGS_SE 0x8UL
+ #define SQ_ATOMIC_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_ATOMIC_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_ATOMIC_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 remote_key;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
+/* sq_localinvalidate (size:1024b/128B) */
+struct sq_localinvalidate {
+ u8 wqe_type;
+ #define SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID 0xcUL
+ #define SQ_LOCALINVALIDATE_WQE_TYPE_LAST SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID
+ u8 flags;
+ #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK\
+ 0xffUL
+ #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT\
+ 0
+ #define SQ_LOCALINVALIDATE_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_LOCALINVALIDATE_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_LOCALINVALIDATE_FLAGS_UC_FENCE 0x4UL
+ #define SQ_LOCALINVALIDATE_FLAGS_SE 0x8UL
+ #define SQ_LOCALINVALIDATE_FLAGS_INLINE 0x10UL
+ #define SQ_LOCALINVALIDATE_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_LOCALINVALIDATE_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 inv_l_key;
+ __le64 reserved64;
+ u8 reserved128[16];
+ __le32 data[24];
+};
+
+/* sq_localinvalidate_hdr (size:256b/32B) */
+struct sq_localinvalidate_hdr {
+ u8 wqe_type;
+ #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID 0xcUL
+ #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LAST SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID
+ u8 flags;
+ #define \
+ SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT\
+ 0
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_SE 0x8UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 inv_l_key;
+ __le64 reserved64;
+ u8 reserved128[16];
+};
+
+/* sq_fr_pmr (size:1024b/128B) */
+struct sq_fr_pmr {
+ u8 wqe_type;
+ #define SQ_FR_PMR_WQE_TYPE_FR_PMR 0xdUL
+ #define SQ_FR_PMR_WQE_TYPE_LAST SQ_FR_PMR_WQE_TYPE_FR_PMR
+ u8 flags;
+ #define SQ_FR_PMR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_FR_PMR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_FR_PMR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_FR_PMR_FLAGS_SE 0x8UL
+ #define SQ_FR_PMR_FLAGS_INLINE 0x10UL
+ #define SQ_FR_PMR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_FR_PMR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 zero_based_page_size_log;
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_ZERO_BASED 0x20UL
+ __le32 l_key;
+ u8 length[5];
+ u8 reserved8_1;
+ u8 reserved8_2;
+ u8 numlevels_pbl_page_size_log;
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_NUMLEVELS_MASK 0xc0UL
+ #define SQ_FR_PMR_NUMLEVELS_SFT 6
+ #define SQ_FR_PMR_NUMLEVELS_PHYSICAL (0x0UL << 6)
+ #define SQ_FR_PMR_NUMLEVELS_LAYER1 (0x1UL << 6)
+ #define SQ_FR_PMR_NUMLEVELS_LAYER2 (0x2UL << 6)
+ #define SQ_FR_PMR_NUMLEVELS_LAST SQ_FR_PMR_NUMLEVELS_LAYER2
+ __le64 pblptr;
+ __le64 va;
+ __le32 data[24];
+};
+
+/* sq_fr_pmr_hdr (size:256b/32B) */
+struct sq_fr_pmr_hdr {
+ u8 wqe_type;
+ #define SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR 0xdUL
+ #define SQ_FR_PMR_HDR_WQE_TYPE_LAST SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR
+ u8 flags;
+ #define SQ_FR_PMR_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_FR_PMR_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_FR_PMR_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_FR_PMR_HDR_FLAGS_SE 0x8UL
+ #define SQ_FR_PMR_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_FR_PMR_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_FR_PMR_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 zero_based_page_size_log;
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_HDR_ZERO_BASED 0x20UL
+ __le32 l_key;
+ u8 length[5];
+ u8 reserved8_1;
+ u8 reserved8_2;
+ u8 numlevels_pbl_page_size_log;
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_HDR_NUMLEVELS_MASK 0xc0UL
+ #define SQ_FR_PMR_HDR_NUMLEVELS_SFT 6
+ #define SQ_FR_PMR_HDR_NUMLEVELS_PHYSICAL (0x0UL << 6)
+ #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER1 (0x1UL << 6)
+ #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER2 (0x2UL << 6)
+ #define SQ_FR_PMR_HDR_NUMLEVELS_LAST SQ_FR_PMR_HDR_NUMLEVELS_LAYER2
+ __le64 pblptr;
+ __le64 va;
+};
+
+/* sq_bind (size:1024b/128B) */
+struct sq_bind {
+ u8 wqe_type;
+ #define SQ_BIND_WQE_TYPE_BIND 0xeUL
+ #define SQ_BIND_WQE_TYPE_LAST SQ_BIND_WQE_TYPE_BIND
+ u8 flags;
+ #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_BIND_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_BIND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_BIND_FLAGS_UC_FENCE 0x4UL
+ #define SQ_BIND_FLAGS_SE 0x8UL
+ #define SQ_BIND_FLAGS_INLINE 0x10UL
+ #define SQ_BIND_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_BIND_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define \
+ SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK\
+ 0xffUL
+ #define \
+ SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT 0
+ #define SQ_BIND_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_BIND_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_BIND_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_BIND_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 reserved8_1;
+ u8 mw_type_zero_based;
+ #define SQ_BIND_ZERO_BASED 0x1UL
+ #define SQ_BIND_MW_TYPE 0x2UL
+ #define SQ_BIND_MW_TYPE_TYPE1 (0x0UL << 1)
+ #define SQ_BIND_MW_TYPE_TYPE2 (0x1UL << 1)
+ #define SQ_BIND_MW_TYPE_LAST SQ_BIND_MW_TYPE_TYPE2
+ u8 reserved8_2;
+ __le16 reserved16;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ u8 length[5];
+ u8 reserved24[3];
+ __le32 data[24];
+};
+
+/* sq_bind_hdr (size:256b/32B) */
+struct sq_bind_hdr {
+ u8 wqe_type;
+ #define SQ_BIND_HDR_WQE_TYPE_BIND 0xeUL
+ #define SQ_BIND_HDR_WQE_TYPE_LAST SQ_BIND_HDR_WQE_TYPE_BIND
+ u8 flags;
+ #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_BIND_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_BIND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_BIND_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_BIND_HDR_FLAGS_SE 0x8UL
+ #define SQ_BIND_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_BIND_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_BIND_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define \
+ SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK\
+ 0xffUL
+ #define \
+ SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT \
+ 0
+ #define SQ_BIND_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 reserved8_1;
+ u8 mw_type_zero_based;
+ #define SQ_BIND_HDR_ZERO_BASED 0x1UL
+ #define SQ_BIND_HDR_MW_TYPE 0x2UL
+ #define SQ_BIND_HDR_MW_TYPE_TYPE1 (0x0UL << 1)
+ #define SQ_BIND_HDR_MW_TYPE_TYPE2 (0x1UL << 1)
+ #define SQ_BIND_HDR_MW_TYPE_LAST SQ_BIND_HDR_MW_TYPE_TYPE2
+ u8 reserved8_2;
+ __le16 reserved16;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ u8 length[5];
+ u8 reserved24[3];
+};
+
+/* rq_wqe (size:1024b/128B) */
+struct rq_wqe {
+ u8 wqe_type;
+ #define RQ_WQE_WQE_TYPE_RCV 0x80UL
+ #define RQ_WQE_WQE_TYPE_LAST RQ_WQE_WQE_TYPE_RCV
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 reserved32;
+ __le32 wr_id[2];
+ #define RQ_WQE_WR_ID_MASK 0xfffffUL
+ #define RQ_WQE_WR_ID_SFT 0
+ u8 reserved128[16];
+ __le32 data[24];
+};
+
+/* rq_wqe_hdr (size:256b/32B) */
+struct rq_wqe_hdr {
+ u8 wqe_type;
+ #define RQ_WQE_HDR_WQE_TYPE_RCV 0x80UL
+ #define RQ_WQE_HDR_WQE_TYPE_LAST RQ_WQE_HDR_WQE_TYPE_RCV
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 reserved32;
+ __le32 wr_id[2];
+ #define RQ_WQE_HDR_WR_ID_MASK 0xfffffUL
+ #define RQ_WQE_HDR_WR_ID_SFT 0
+ u8 reserved128[16];
+};
+
+/* cq_base (size:256b/32B) */
+struct cq_base {
+ __le64 reserved64_1;
+ __le64 reserved64_2;
+ __le64 reserved64_3;
+ u8 cqe_type_toggle;
+ #define CQ_BASE_TOGGLE 0x1UL
+ #define CQ_BASE_CQE_TYPE_MASK 0x1eUL
+ #define CQ_BASE_CQE_TYPE_SFT 1
+ #define CQ_BASE_CQE_TYPE_REQ (0x0UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RC (0x1UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD (0x2UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
+ #define CQ_BASE_CQE_TYPE_REQ_V3 (0x8UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RC_V3 (0x9UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_V3 (0xaUL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1_V3 (0xbUL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_CFA_V3 (0xcUL << 1)
+ #define CQ_BASE_CQE_TYPE_NO_OP (0xdUL << 1)
+ #define CQ_BASE_CQE_TYPE_TERMINAL (0xeUL << 1)
+ #define CQ_BASE_CQE_TYPE_CUT_OFF (0xfUL << 1)
+ #define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF
+ u8 status;
+ #define CQ_BASE_STATUS_OK 0x0UL
+ #define CQ_BASE_STATUS_BAD_RESPONSE_ERR 0x1UL
+ #define CQ_BASE_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_BASE_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL
+ #define CQ_BASE_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_BASE_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_BASE_STATUS_LOCAL_ACCESS_ERROR 0x6UL
+ #define CQ_BASE_STATUS_MEMORY_MGT_OPERATION_ERR 0x7UL
+ #define CQ_BASE_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL
+ #define CQ_BASE_STATUS_REMOTE_ACCESS_ERR 0x9UL
+ #define CQ_BASE_STATUS_REMOTE_OPERATION_ERR 0xaUL
+ #define CQ_BASE_STATUS_RNR_NAK_RETRY_CNT_ERR 0xbUL
+ #define CQ_BASE_STATUS_TRANSPORT_RETRY_CNT_ERR 0xcUL
+ #define CQ_BASE_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_BASE_STATUS_HW_FLUSH_ERR 0xeUL
+ #define CQ_BASE_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_BASE_STATUS_LAST CQ_BASE_STATUS_OVERFLOW_ERR
+ __le16 reserved16;
+ __le32 opaque;
+};
+
+/* cq_req (size:256b/32B) */
+struct cq_req {
+ __le64 qp_handle;
+ __le16 sq_cons_idx;
+ __le16 reserved16_1;
+ __le32 reserved32_2;
+ __le64 reserved64;
+ u8 cqe_type_toggle;
+ #define CQ_REQ_TOGGLE 0x1UL
+ #define CQ_REQ_CQE_TYPE_MASK 0x1eUL
+ #define CQ_REQ_CQE_TYPE_SFT 1
+ #define CQ_REQ_CQE_TYPE_REQ (0x0UL << 1)
+ #define CQ_REQ_CQE_TYPE_LAST CQ_REQ_CQE_TYPE_REQ
+ #define CQ_REQ_PUSH 0x20UL
+ u8 status;
+ #define CQ_REQ_STATUS_OK 0x0UL
+ #define CQ_REQ_STATUS_BAD_RESPONSE_ERR 0x1UL
+ #define CQ_REQ_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR 0x3UL
+ #define CQ_REQ_STATUS_LOCAL_PROTECTION_ERR 0x4UL
+ #define CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL
+ #define CQ_REQ_STATUS_REMOTE_ACCESS_ERR 0x7UL
+ #define CQ_REQ_STATUS_REMOTE_OPERATION_ERR 0x8UL
+ #define CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR 0x9UL
+ #define CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR 0xaUL
+ #define CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR 0xbUL
+ #define CQ_REQ_STATUS_LAST CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
+ __le16 reserved16_2;
+ __le32 reserved32_1;
+};
+
+/* cq_res_rc (size:256b/32B) */
+struct cq_res_rc {
+ __le32 length;
+ __le32 imm_data_or_inv_r_key;
+ __le64 qp_handle;
+ __le64 mr_handle;
+ u8 cqe_type_toggle;
+ #define CQ_RES_RC_TOGGLE 0x1UL
+ #define CQ_RES_RC_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RC_CQE_TYPE_SFT 1
+ #define CQ_RES_RC_CQE_TYPE_RES_RC (0x1UL << 1)
+ #define CQ_RES_RC_CQE_TYPE_LAST CQ_RES_RC_CQE_TYPE_RES_RC
+ u8 status;
+ #define CQ_RES_RC_STATUS_OK 0x0UL
+ #define CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL
+ #define CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_RC_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_RC_STATUS_LAST CQ_RES_RC_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_RC_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RC_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RC_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RC_FLAGS_SRQ_LAST CQ_RES_RC_FLAGS_SRQ_SRQ
+ #define CQ_RES_RC_FLAGS_IMM 0x2UL
+ #define CQ_RES_RC_FLAGS_INV 0x4UL
+ #define CQ_RES_RC_FLAGS_RDMA 0x8UL
+ #define CQ_RES_RC_FLAGS_RDMA_SEND (0x0UL << 3)
+ #define CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE (0x1UL << 3)
+ #define CQ_RES_RC_FLAGS_RDMA_LAST CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE
+ __le32 srq_or_rq_wr_id;
+ #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_SFT 0
+};
+
+/* cq_res_ud (size:256b/32B) */
+struct cq_res_ud {
+ __le16 length;
+ #define CQ_RES_UD_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_LENGTH_SFT 0
+ __le16 cfa_metadata;
+ #define CQ_RES_UD_CFA_METADATA_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_METADATA_VID_SFT 0
+ #define CQ_RES_UD_CFA_METADATA_DE 0x1000UL
+ #define CQ_RES_UD_CFA_METADATA_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_METADATA_PRI_SFT 13
+ __le32 imm_data;
+ __le64 qp_handle;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_TOGGLE 0x1UL
+ #define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_CQE_TYPE_RES_UD (0x2UL << 1)
+ #define CQ_RES_UD_CQE_TYPE_LAST CQ_RES_UD_CQE_TYPE_RES_UD
+ u8 status;
+ #define CQ_RES_UD_STATUS_OK 0x0UL
+ #define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_STATUS_LAST CQ_RES_UD_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_FLAGS_SRQ_LAST CQ_RES_UD_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_VLAN (0x1UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_LAST CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_MASK 0xc00UL
+ #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_SFT 10
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_ud_v2 (size:256b/32B) */
+struct cq_res_ud_v2 {
+ __le16 length;
+ #define CQ_RES_UD_V2_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_V2_LENGTH_SFT 0
+ __le16 cfa_metadata0;
+ #define CQ_RES_UD_V2_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_UD_V2_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_UD_V2_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_UD_V2_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_V2_CFA_METADATA0_PRI_SFT 13
+ __le32 imm_data;
+ __le64 qp_handle;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_V2_TOGGLE 0x1UL
+ #define CQ_RES_UD_V2_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_V2_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_V2_CQE_TYPE_RES_UD (0x2UL << 1)
+ #define CQ_RES_UD_V2_CQE_TYPE_LAST CQ_RES_UD_V2_CQE_TYPE_RES_UD
+ u8 status;
+ #define CQ_RES_UD_V2_STATUS_OK 0x0UL
+ #define CQ_RES_UD_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_V2_STATUS_LAST CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_V2_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_V2_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_V2_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_V2_FLAGS_SRQ_LAST CQ_RES_UD_V2_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_V2_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_V2_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_V2_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_LAST CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_V2_CFA_METADATA1_MASK 0xf00000UL
+ #define CQ_RES_UD_V2_CFA_METADATA1_SFT 20
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_SFT 20
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG
+ #define CQ_RES_UD_V2_CFA_METADATA1_VALID 0x800000UL
+ #define CQ_RES_UD_V2_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_V2_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_ud_cfa (size:256b/32B) */
+struct cq_res_ud_cfa {
+ __le16 length;
+ #define CQ_RES_UD_CFA_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_CFA_LENGTH_SFT 0
+ __le16 cfa_code;
+ __le32 imm_data;
+ __le32 qid;
+ #define CQ_RES_UD_CFA_QID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_QID_SFT 0
+ __le32 cfa_metadata;
+ #define CQ_RES_UD_CFA_CFA_METADATA_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_CFA_METADATA_VID_SFT 0
+ #define CQ_RES_UD_CFA_CFA_METADATA_DE 0x1000UL
+ #define CQ_RES_UD_CFA_CFA_METADATA_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_CFA_METADATA_PRI_SFT 13
+ #define CQ_RES_UD_CFA_CFA_METADATA_TPID_MASK 0xffff0000UL
+ #define CQ_RES_UD_CFA_CFA_METADATA_TPID_SFT 16
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_CFA_TOGGLE 0x1UL
+ #define CQ_RES_UD_CFA_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CFA_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
+ #define CQ_RES_UD_CFA_CQE_TYPE_LAST CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA
+ u8 status;
+ #define CQ_RES_UD_CFA_STATUS_OK 0x0UL
+ #define CQ_RES_UD_CFA_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_CFA_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_CFA_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_CFA_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_CFA_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_CFA_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_CFA_STATUS_LAST CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_CFA_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_CFA_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_FLAGS_SRQ_LAST CQ_RES_UD_CFA_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_CFA_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_CFA_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_CFA_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_VLAN (0x1UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_MASK 0xc00UL
+ #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_SFT 10
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_CFA_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_CFA_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_ud_cfa_v2 (size:256b/32B) */
+struct cq_res_ud_cfa_v2 {
+ __le16 length;
+ #define CQ_RES_UD_CFA_V2_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_CFA_V2_LENGTH_SFT 0
+ __le16 cfa_metadata0;
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_SFT 13
+ __le32 imm_data;
+ __le32 qid;
+ #define CQ_RES_UD_CFA_V2_QID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_V2_QID_SFT 0
+ __le32 cfa_metadata2;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_CFA_V2_TOGGLE 0x1UL
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_LAST CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA
+ u8 status;
+ #define CQ_RES_UD_CFA_V2_STATUS_OK 0x0UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_CFA_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_CFA_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_CFA_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LAST CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_LAST CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_CFA_V2_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_LAST \
+ CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_MASK 0xf00000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_SFT 20
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_SFT 20
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_LAST \
+ CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_VALID 0x800000UL
+ #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_raweth_qp1 (size:256b/32B) */
+struct cq_res_raweth_qp1 {
+ __le16 length;
+ #define CQ_RES_RAWETH_QP1_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_RAWETH_QP1_LENGTH_SFT 0
+ __le16 raweth_qp1_flags;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_MASK 0x3ffUL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_SFT 6
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_LAST \
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ __le16 raweth_qp1_errors;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST \
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_LAST \
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ __le16 raweth_qp1_cfa_code;
+ __le64 qp_handle;
+ __le32 raweth_qp1_flags2;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC 0x1UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC 0x2UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_IP_CS_CALC 0x4UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_L4_CS_CALC 0x8UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN (0x1UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_LAST \
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_MASK 0xc00UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_SFT 10
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16
+ __le32 raweth_qp1_metadata;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_MASK 0xffffUL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK 0xfffUL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_DE 0x1000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK 0xe000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT 13
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK 0xffff0000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT 16
+ u8 cqe_type_toggle;
+ #define CQ_RES_RAWETH_QP1_TOGGLE 0x1UL
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_SFT 1
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1
+ u8 status;
+ #define CQ_RES_RAWETH_QP1_STATUS_OK 0x0UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LAST CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ
+ __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id;
+ #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24
+};
+
+/* cq_res_raweth_qp1_v2 (size:256b/32B) */
+struct cq_res_raweth_qp1_v2 {
+ __le16 length;
+ #define CQ_RES_RAWETH_QP1_V2_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_RAWETH_QP1_V2_LENGTH_SFT 0
+ __le16 raweth_qp1_flags;
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_MASK 0x3ffUL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_SFT 0
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_SFT 6
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_LAST \
+ CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ __le16 raweth_qp1_errors;
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST \
+ CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
+ (0x7UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
+ (0x8UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_LAST \
+ CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ __le16 cfa_metadata0;
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_SFT 13
+ __le64 qp_handle;
+ __le32 raweth_qp1_flags2;
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_ALL_OK_MODE 0x8UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_ACT_REC_PTR (0x1UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_LAST \
+ CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_MASK 0xfc00UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_SFT 10
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16
+ __le32 cfa_metadata2;
+ u8 cqe_type_toggle;
+ #define CQ_RES_RAWETH_QP1_V2_TOGGLE 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_SFT 1
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1
+ u8 status;
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_OK 0x0UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LAST CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ
+ __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id;
+ #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_MASK 0xf00000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_SFT 20
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_SFT 20
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_LAST \
+ CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_VALID 0x800000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24
+};
+
+/* cq_terminal (size:256b/32B) */
+struct cq_terminal {
+ __le64 qp_handle;
+ __le16 sq_cons_idx;
+ __le16 rq_cons_idx;
+ __le32 reserved32_1;
+ __le64 reserved64_3;
+ u8 cqe_type_toggle;
+ #define CQ_TERMINAL_TOGGLE 0x1UL
+ #define CQ_TERMINAL_CQE_TYPE_MASK 0x1eUL
+ #define CQ_TERMINAL_CQE_TYPE_SFT 1
+ #define CQ_TERMINAL_CQE_TYPE_TERMINAL (0xeUL << 1)
+ #define CQ_TERMINAL_CQE_TYPE_LAST CQ_TERMINAL_CQE_TYPE_TERMINAL
+ u8 status;
+ #define CQ_TERMINAL_STATUS_OK 0x0UL
+ #define CQ_TERMINAL_STATUS_LAST CQ_TERMINAL_STATUS_OK
+ __le16 reserved16;
+ __le32 reserved32_2;
+};
+
+/* cq_cutoff (size:256b/32B) */
+struct cq_cutoff {
+ __le64 reserved64_1;
+ __le64 reserved64_2;
+ __le64 reserved64_3;
+ u8 cqe_type_toggle;
+ #define CQ_CUTOFF_TOGGLE 0x1UL
+ #define CQ_CUTOFF_CQE_TYPE_MASK 0x1eUL
+ #define CQ_CUTOFF_CQE_TYPE_SFT 1
+ #define CQ_CUTOFF_CQE_TYPE_CUT_OFF (0xfUL << 1)
+ #define CQ_CUTOFF_CQE_TYPE_LAST CQ_CUTOFF_CQE_TYPE_CUT_OFF
+ #define CQ_CUTOFF_RESIZE_TOGGLE_MASK 0x60UL
+ #define CQ_CUTOFF_RESIZE_TOGGLE_SFT 5
+ u8 status;
+ #define CQ_CUTOFF_STATUS_OK 0x0UL
+ #define CQ_CUTOFF_STATUS_LAST CQ_CUTOFF_STATUS_OK
+ __le16 reserved16;
+ __le32 reserved32;
+};
+
+/* nq_base (size:128b/16B) */
+struct nq_base {
+ __le16 info10_type;
+ #define NQ_BASE_TYPE_MASK 0x3fUL
+ #define NQ_BASE_TYPE_SFT 0
+ #define NQ_BASE_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_BASE_TYPE_SRQ_EVENT 0x32UL
+ #define NQ_BASE_TYPE_DBQ_EVENT 0x34UL
+ #define NQ_BASE_TYPE_QP_EVENT 0x38UL
+ #define NQ_BASE_TYPE_FUNC_EVENT 0x3aUL
+ #define NQ_BASE_TYPE_LAST NQ_BASE_TYPE_FUNC_EVENT
+ #define NQ_BASE_INFO10_MASK 0xffc0UL
+ #define NQ_BASE_INFO10_SFT 6
+ __le16 info16;
+ __le32 info32;
+ __le32 info63_v[2];
+ #define NQ_BASE_V 0x1UL
+ #define NQ_BASE_INFO63_MASK 0xfffffffeUL
+ #define NQ_BASE_INFO63_SFT 1
+};
+
+/* nq_cn (size:128b/16B) */
+struct nq_cn {
+ __le16 type;
+ #define NQ_CN_TYPE_MASK 0x3fUL
+ #define NQ_CN_TYPE_SFT 0
+ #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_CN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
+/* nq_srq_event (size:128b/16B) */
+struct nq_srq_event {
+ u8 type;
+ #define NQ_SRQ_EVENT_TYPE_MASK 0x3fUL
+ #define NQ_SRQ_EVENT_TYPE_SFT 0
+ #define NQ_SRQ_EVENT_TYPE_SRQ_EVENT 0x32UL
+ #define NQ_SRQ_EVENT_TYPE_LAST NQ_SRQ_EVENT_TYPE_SRQ_EVENT
+ #define NQ_SRQ_EVENT_TOGGLE_MASK 0xc0UL
+ #define NQ_SRQ_EVENT_TOGGLE_SFT 6
+ u8 event;
+ #define NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT 0x1UL
+ #define NQ_SRQ_EVENT_EVENT_LAST NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
+ __le16 reserved16;
+ __le32 srq_handle_low;
+ __le32 v;
+ #define NQ_SRQ_EVENT_V 0x1UL
+ __le32 srq_handle_high;
+};
+
+/* nq_dbq_event (size:128b/16B) */
+struct nq_dbq_event {
+ u8 type;
+ #define NQ_DBQ_EVENT_TYPE_MASK 0x3fUL
+ #define NQ_DBQ_EVENT_TYPE_SFT 0
+ #define NQ_DBQ_EVENT_TYPE_DBQ_EVENT 0x34UL
+ #define NQ_DBQ_EVENT_TYPE_LAST NQ_DBQ_EVENT_TYPE_DBQ_EVENT
+ u8 event;
+ #define NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT 0x1UL
+ #define NQ_DBQ_EVENT_EVENT_LAST NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT
+ __le16 db_pfid;
+ #define NQ_DBQ_EVENT_DB_PFID_MASK 0xfUL
+ #define NQ_DBQ_EVENT_DB_PFID_SFT 0
+ __le32 db_dpi;
+ #define NQ_DBQ_EVENT_DB_DPI_MASK 0xfffffUL
+ #define NQ_DBQ_EVENT_DB_DPI_SFT 0
+ __le32 v;
+ #define NQ_DBQ_EVENT_V 0x1UL
+ __le32 db_type_db_xid;
+ #define NQ_DBQ_EVENT_DB_XID_MASK 0xfffffUL
+ #define NQ_DBQ_EVENT_DB_XID_SFT 0
+ #define NQ_DBQ_EVENT_DB_TYPE_MASK 0xf0000000UL
+ #define NQ_DBQ_EVENT_DB_TYPE_SFT 28
+};
+
+/* xrrq_irrq (size:256b/32B) */
+struct xrrq_irrq {
+ __le16 credits_type;
+ #define XRRQ_IRRQ_TYPE 0x1UL
+ #define XRRQ_IRRQ_TYPE_READ_REQ 0x0UL
+ #define XRRQ_IRRQ_TYPE_ATOMIC_REQ 0x1UL
+ #define XRRQ_IRRQ_TYPE_LAST XRRQ_IRRQ_TYPE_ATOMIC_REQ
+ #define XRRQ_IRRQ_CREDITS_MASK 0xf800UL
+ #define XRRQ_IRRQ_CREDITS_SFT 11
+ __le16 reserved16;
+ __le32 reserved32;
+ __le32 psn;
+ #define XRRQ_IRRQ_PSN_MASK 0xffffffUL
+ #define XRRQ_IRRQ_PSN_SFT 0
+ __le32 msn;
+ #define XRRQ_IRRQ_MSN_MASK 0xffffffUL
+ #define XRRQ_IRRQ_MSN_SFT 0
+ __le64 va_or_atomic_result;
+ __le32 rdma_r_key;
+ __le32 length;
+};
+
+/* xrrq_orrq (size:256b/32B) */
+struct xrrq_orrq {
+ __le16 num_sges_type;
+ #define XRRQ_ORRQ_TYPE 0x1UL
+ #define XRRQ_ORRQ_TYPE_READ_REQ 0x0UL
+ #define XRRQ_ORRQ_TYPE_ATOMIC_REQ 0x1UL
+ #define XRRQ_ORRQ_TYPE_LAST XRRQ_ORRQ_TYPE_ATOMIC_REQ
+ #define XRRQ_ORRQ_NUM_SGES_MASK 0xf800UL
+ #define XRRQ_ORRQ_NUM_SGES_SFT 11
+ __le16 reserved16;
+ __le32 length;
+ __le32 psn;
+ #define XRRQ_ORRQ_PSN_MASK 0xffffffUL
+ #define XRRQ_ORRQ_PSN_SFT 0
+ __le32 end_psn;
+ #define XRRQ_ORRQ_END_PSN_MASK 0xffffffUL
+ #define XRRQ_ORRQ_END_PSN_SFT 0
+ __le64 first_sge_phy_or_sing_sge_va;
+ __le32 single_sge_l_key;
+ __le32 single_sge_size;
+};
+
+/* ptu_pte (size:64b/8B) */
+struct ptu_pte {
+ __le32 page_next_to_last_last_valid[2];
+ #define PTU_PTE_VALID 0x1UL
+ #define PTU_PTE_LAST 0x2UL
+ #define PTU_PTE_NEXT_TO_LAST 0x4UL
+ #define PTU_PTE_UNUSED_MASK 0xff8UL
+ #define PTU_PTE_UNUSED_SFT 3
+ #define PTU_PTE_PAGE_MASK 0xfffff000UL
+ #define PTU_PTE_PAGE_SFT 12
+};
+
+/* ptu_pde (size:64b/8B) */
+struct ptu_pde {
+ __le32 page_valid[2];
+ #define PTU_PDE_VALID 0x1UL
+ #define PTU_PDE_UNUSED_MASK 0xffeUL
+ #define PTU_PDE_UNUSED_SFT 1
+ #define PTU_PDE_PAGE_MASK 0xfffff000UL
+ #define PTU_PDE_PAGE_SFT 12
+};
+
+#endif /* ___BNXT_RE_HSI_H__ */
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
deleted file mode 100644
index 2b6352b85485..000000000000
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
-config INFINIBAND_CXGB3
- tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3 && INET
- select GENERIC_ALLOCATOR
- ---help---
- This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
- 10GbE adapters.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module, choose M here: the module
- will be called iw_cxgb3.
-
-config INFINIBAND_CXGB3_DEBUG
- bool "Verbose debugging output"
- depends on INFINIBAND_CXGB3
- default n
- ---help---
- This option causes the Chelsio RDMA driver to produce copious
- amounts of debug messages. Select this if you are developing
- the driver or trying to diagnose a problem.
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
deleted file mode 100644
index 2761364185af..000000000000
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb3
-
-obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
-
-iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
- iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
-
-ccflags-$(CONFIG_INFINIBAND_CXGB3_DEBUG) += -DDEBUG
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
deleted file mode 100644
index 8bca6b4ec9af..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef DEBUG
-#include <linux/types.h>
-#include <linux/slab.h>
-#include "common.h"
-#include "cxgb3_ioctl.h"
-#include "cxio_hal.h"
-#include "cxio_wr.h"
-
-void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
-{
- struct ch_mem_range *m;
- u64 *data;
- int rc;
- int size = 32;
-
- m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
- return;
- }
- m->mem_id = MEM_PMRX;
- m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
- m->len = size;
- PDBG("%s TPT addr 0x%x len %d\n", __func__, m->addr, m->len);
- rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
- if (rc) {
- PDBG("%s toectl returned error %d\n", __func__, rc);
- kfree(m);
- return;
- }
-
- data = (u64 *)m->buf;
- while (size > 0) {
- PDBG("TPT %08x: %016llx\n", m->addr, (unsigned long long) *data);
- size -= 8;
- data++;
- m->addr += 8;
- }
- kfree(m);
-}
-
-void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
-{
- struct ch_mem_range *m;
- u64 *data;
- int rc;
- int size, npages;
-
- shift += 12;
- npages = (len + (1ULL << shift) - 1) >> shift;
- size = npages * sizeof(u64);
-
- m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
- return;
- }
- m->mem_id = MEM_PMRX;
- m->addr = pbl_addr;
- m->len = size;
- PDBG("%s PBL addr 0x%x len %d depth %d\n",
- __func__, m->addr, m->len, npages);
- rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
- if (rc) {
- PDBG("%s toectl returned error %d\n", __func__, rc);
- kfree(m);
- return;
- }
-
- data = (u64 *)m->buf;
- while (size > 0) {
- PDBG("PBL %08x: %016llx\n", m->addr, (unsigned long long) *data);
- size -= 8;
- data++;
- m->addr += 8;
- }
- kfree(m);
-}
-
-void cxio_dump_wqe(union t3_wr *wqe)
-{
- __be64 *data = (__be64 *)wqe;
- uint size = (uint)(be64_to_cpu(*data) & 0xff);
-
- if (size == 0)
- size = 8;
- while (size > 0) {
- PDBG("WQE %p: %016llx\n", data,
- (unsigned long long) be64_to_cpu(*data));
- size--;
- data++;
- }
-}
-
-void cxio_dump_wce(struct t3_cqe *wce)
-{
- __be64 *data = (__be64 *)wce;
- int size = sizeof(*wce);
-
- while (size > 0) {
- PDBG("WCE %p: %016llx\n", data,
- (unsigned long long) be64_to_cpu(*data));
- size -= 8;
- data++;
- }
-}
-
-void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
-{
- struct ch_mem_range *m;
- int size = nents * 64;
- u64 *data;
- int rc;
-
- m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
- return;
- }
- m->mem_id = MEM_PMRX;
- m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
- m->len = size;
- PDBG("%s RQT addr 0x%x len %d\n", __func__, m->addr, m->len);
- rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
- if (rc) {
- PDBG("%s toectl returned error %d\n", __func__, rc);
- kfree(m);
- return;
- }
-
- data = (u64 *)m->buf;
- while (size > 0) {
- PDBG("RQT %08x: %016llx\n", m->addr, (unsigned long long) *data);
- size -= 8;
- data++;
- m->addr += 8;
- }
- kfree(m);
-}
-
-void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
-{
- struct ch_mem_range *m;
- int size = TCB_SIZE;
- u32 *data;
- int rc;
-
- m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
- return;
- }
- m->mem_id = MEM_CM;
- m->addr = hwtid * size;
- m->len = size;
- PDBG("%s TCB %d len %d\n", __func__, m->addr, m->len);
- rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
- if (rc) {
- PDBG("%s toectl returned error %d\n", __func__, rc);
- kfree(m);
- return;
- }
-
- data = (u32 *)m->buf;
- while (size > 0) {
- printk("%2u: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- m->addr,
- *(data+2), *(data+3), *(data),*(data+1),
- *(data+6), *(data+7), *(data+4), *(data+5));
- size -= 32;
- data += 8;
- m->addr += 32;
- }
- kfree(m);
-}
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
deleted file mode 100644
index de1c61b417d6..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ /dev/null
@@ -1,1343 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <asm/delay.h>
-
-#include <linux/mutex.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-#include "sge_defs.h"
-
-static LIST_HEAD(rdev_list);
-static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (!strcmp(rdev->dev_name, dev_name))
- return rdev;
- return NULL;
-}
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (rdev->t3cdev_p == tdev)
- return rdev;
- return NULL;
-}
-
-int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit)
-{
- int ret;
- struct t3_cqe *cqe;
- u32 rptr;
-
- struct rdma_cq_op setup;
- setup.id = cq->cqid;
- setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
- setup.op = op;
- ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
-
- if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
- return ret;
-
- /*
- * If the rearm returned an index other than our current index,
- * then there might be CQE's in flight (being DMA'd). We must wait
- * here for them to complete or the consumer can miss a notification.
- */
- if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
- int i=0;
-
- rptr = cq->rptr;
-
- /*
- * Keep the generation correct by bumping rptr until it
- * matches the index returned by the rearm - 1.
- */
- while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
- rptr++;
-
- /*
- * Now rptr is the index for the (last) cqe that was
- * in-flight at the time the HW rearmed the CQ. We
- * spin until that CQE is valid.
- */
- cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
- while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
- udelay(1);
- if (i++ > 1000000) {
- printk(KERN_ERR "%s: stalled rnic\n",
- rdev_p->dev_name);
- return -EIO;
- }
- }
-
- return 1;
- }
-
- return 0;
-}
-
-static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
-{
- struct rdma_cq_setup setup;
- setup.id = cqid;
- setup.base_addr = 0; /* NULL address */
- setup.size = 0; /* disaable the CQ */
- setup.credits = 0;
- setup.credit_thres = 0;
- setup.ovfl_mode = 0;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
-{
- u64 sge_cmd;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- PDBG("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
- T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
- T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = qpid << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
-{
- struct rdma_cq_setup setup;
- int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
-
- size += 1; /* one extra page for storing cq-in-err state */
- cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
- if (!cq->cqid)
- return -ENOMEM;
- if (kernel) {
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- }
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
- &(cq->dma_addr), GFP_KERNEL);
- if (!cq->queue) {
- kfree(cq->sw_queue);
- return -ENOMEM;
- }
- dma_unmap_addr_set(cq, mapping, cq->dma_addr);
- memset(cq->queue, 0, size);
- setup.id = cq->cqid;
- setup.base_addr = (u64) (cq->dma_addr);
- setup.size = 1UL << cq->size_log2;
- setup.credits = 65535;
- setup.credit_thres = 1;
- if (rdev_p->t3cdev_p->type != T3A)
- setup.ovfl_mode = 0;
- else
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-#ifdef notyet
-int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
-{
- struct rdma_cq_setup setup;
- setup.id = cq->cqid;
- setup.base_addr = (u64) (cq->dma_addr);
- setup.size = 1UL << cq->size_log2;
- setup.credits = setup.size;
- setup.credit_thres = setup.size; /* TBD: overflow recovery */
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-#endif
-
-static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
- u32 qpid;
- int i;
-
- mutex_lock(&uctx->lock);
- if (!list_empty(&uctx->qpids)) {
- entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
- entry);
- list_del(&entry->entry);
- qpid = entry->qpid;
- kfree(entry);
- } else {
- qpid = cxio_hal_get_qpid(rdev_p->rscp);
- if (!qpid)
- goto out;
- for (i = qpid+1; i & rdev_p->qpmask; i++) {
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry)
- break;
- entry->qpid = i;
- list_add_tail(&entry->entry, &uctx->qpids);
- }
- }
-out:
- mutex_unlock(&uctx->lock);
- PDBG("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
- struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
-
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry)
- return;
- PDBG("%s qpid 0x%x\n", __func__, qpid);
- entry->qpid = qpid;
- mutex_lock(&uctx->lock);
- list_add_tail(&entry->entry, &uctx->qpids);
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct list_head *pos, *nxt;
- struct cxio_qpid_list *entry;
-
- mutex_lock(&uctx->lock);
- list_for_each_safe(pos, nxt, &uctx->qpids) {
- entry = list_entry(pos, struct cxio_qpid_list, entry);
- list_del_init(&entry->entry);
- if (!(entry->qpid & rdev_p->qpmask))
- cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
- kfree(entry);
- }
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- INIT_LIST_HEAD(&uctx->qpids);
- mutex_init(&uctx->lock);
-}
-
-int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
- struct t3_wq *wq, struct cxio_ucontext *uctx)
-{
- int depth = 1UL << wq->size_log2;
- int rqsize = 1UL << wq->rq_size_log2;
-
- wq->qpid = get_qpid(rdev_p, uctx);
- if (!wq->qpid)
- return -ENOMEM;
-
- wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL);
- if (!wq->rq)
- goto err1;
-
- wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
- if (!wq->rq_addr)
- goto err2;
-
- wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);
- if (!wq->sq)
- goto err3;
-
- wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
- if (!wq->queue)
- goto err4;
-
- memset(wq->queue, 0, depth * sizeof(union t3_wr));
- dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
- if (!kernel_domain)
- wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
- (wq->qpid << rdev_p->qpshift);
- wq->rdev = rdev_p;
- PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
- wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
- return 0;
-err4:
- kfree(wq->sq);
-err3:
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
-err2:
- kfree(wq->rq);
-err1:
- put_qpid(rdev_p, wq->qpid, uctx);
- return -ENOMEM;
-}
-
-int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
-{
- int err;
- err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
- kfree(cq->sw_queue);
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2))
- * sizeof(struct t3_cqe), cq->queue,
- dma_unmap_addr(cq, mapping));
- cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
- return err;
-}
-
-int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
- struct cxio_ucontext *uctx)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (wq->size_log2))
- * sizeof(union t3_wr), wq->queue,
- dma_unmap_addr(wq, mapping));
- kfree(wq->sq);
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
- kfree(wq->rq);
- put_qpid(rdev_p, wq->qpid, uctx);
- return 0;
-}
-
-static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_cqe cqe;
-
- PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(T3_SEND) |
- V_CQE_TYPE(0) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- u32 ptr;
- int flushed = 0;
-
- PDBG("%s wq %p cq %p\n", __func__, wq, cq);
-
- /* flush RQ */
- PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
- wq->rq_rptr, wq->rq_wptr, count);
- ptr = wq->rq_rptr + count;
- while (ptr++ != wq->rq_wptr) {
- insert_recv_cqe(wq, cq);
- flushed++;
- }
- return flushed;
-}
-
-static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
- struct t3_swsq *sqp)
-{
- struct t3_cqe cqe;
-
- PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(sqp->opcode) |
- V_CQE_TYPE(1) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- cqe.u.scqe.wrid_hi = sqp->sq_wptr;
-
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- __u32 ptr;
- int flushed = 0;
- struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
-
- ptr = wq->sq_rptr + count;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- while (ptr != wq->sq_wptr) {
- sqp->signaled = 0;
- insert_sq_cqe(wq, cq, sqp);
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- flushed++;
- }
- return flushed;
-}
-
-/*
- * Move all CQEs from the HWCQ into the SWCQ.
- */
-void cxio_flush_hw_cq(struct t3_cq *cq)
-{
- struct t3_cqe *cqe, *swcqe;
-
- PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
- cqe = cxio_next_hw_cqe(cq);
- while (cqe) {
- PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
- __func__, cq->rptr, cq->sw_wptr);
- swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
- *swcqe = *cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
- cq->sw_wptr++;
- cq->rptr++;
- cqe = cxio_next_hw_cqe(cq);
- }
-}
-
-static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
-{
- if (CQE_OPCODE(*cqe) == T3_TERMINATE)
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
- return 0;
-
- if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
- return 0;
-
- return 1;
-}
-
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if ((SQ_TYPE(*cqe) ||
- ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
- (CQE_QPID(*cqe) == wq->qpid))
- (*count)++;
- ptr++;
- }
- PDBG("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- PDBG("%s count zero %d\n", __func__, *count);
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
- (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
- (*count)++;
- ptr++;
- }
- PDBG("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
-{
- struct rdma_cq_setup setup;
- setup.id = 0;
- setup.base_addr = 0; /* NULL address */
- setup.size = 1; /* enable the CQ */
- setup.credits = 0;
-
- /* force SGE to redirect to RspQ and interrupt */
- setup.credit_thres = 0;
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- int err;
- u64 sge_cmd, ctx0, ctx1;
- u64 base_addr;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb;
-
- skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- PDBG("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- err = cxio_hal_init_ctrl_cq(rdev_p);
- if (err) {
- PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
- goto err;
- }
- rdev_p->ctrl_qp.workq = dma_alloc_coherent(
- &(rdev_p->rnic_info.pdev->dev),
- (1 << T3_CTRL_QP_SIZE_LOG2) *
- sizeof(union t3_wr),
- &(rdev_p->ctrl_qp.dma_addr),
- GFP_KERNEL);
- if (!rdev_p->ctrl_qp.workq) {
- PDBG("%s dma_alloc_coherent failed\n", __func__);
- err = -ENOMEM;
- goto err;
- }
- dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
- rdev_p->ctrl_qp.dma_addr);
- rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
- memset(rdev_p->ctrl_qp.workq, 0,
- (1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr));
-
- mutex_init(&rdev_p->ctrl_qp.lock);
- init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
-
- /* update HW Ctrl QP context */
- base_addr = rdev_p->ctrl_qp.dma_addr;
- base_addr >>= 12;
- ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
- V_EC_BASE_LO((u32) base_addr & 0xffff));
- ctx0 <<= 32;
- ctx0 |= V_EC_CREDITS(FW_WR_NUM);
- base_addr >>= 16;
- ctx1 = (u32) base_addr;
- base_addr >>= 32;
- ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
- V_EC_TYPE(0) | V_EC_GEN(1) |
- V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
- wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
- T3_CTL_QP_TID, 7, T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- wqe->ctx1 = cpu_to_be64(ctx1);
- wqe->ctx0 = cpu_to_be64(ctx0);
- PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",
- (unsigned long long) rdev_p->ctrl_qp.dma_addr,
- rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-err:
- kfree_skb(skb);
- return err;
-}
-
-static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << T3_CTRL_QP_SIZE_LOG2)
- * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
- return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
-}
-
-/* write len bytes of data into addr (32B aligned address)
- * If data is NULL, clear len byte of memory to zero.
- * caller acquires the ctrl_qp lock before the call
- */
-static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
- u32 len, void *data)
-{
- u32 i, nr_wqe, copy_len;
- u8 *copy_data;
- u8 wr_len, utx_len; /* length in 8 byte flit */
- enum t3_wr_flags flag;
- __be64 *wqe;
- u64 utx_cmd;
- addr &= 0x7FFFFFF;
- nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
- PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
- __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
- nr_wqe, data, addr);
- utx_len = 3; /* in 32B unit */
- for (i = 0; i < nr_wqe; i++) {
- if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2)) {
- PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
- "wait for more space i %d\n", __func__,
- rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- !Q_FULL(rdev_p->ctrl_qp.rptr,
- rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2))) {
- PDBG("%s ctrl_qp workq interrupted\n",
- __func__);
- return -ERESTARTSYS;
- }
- PDBG("%s ctrl_qp wakeup, continue posting work request "
- "i %d\n", __func__, i);
- }
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
- flag = 0;
- if (i == (nr_wqe - 1)) {
- /* last WQE */
- flag = T3_COMPLETION_FLAG;
- if (len % 32)
- utx_len = len / 32 + 1;
- else
- utx_len = len / 32;
- }
-
- /*
- * Force a CQE to return the credit to the workq in case
- * we posted more than half the max QP size of WRs
- */
- if ((i != 0) &&
- (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
- flag = T3_COMPLETION_FLAG;
- PDBG("%s force completion at i %d\n", __func__, i);
- }
-
- /* build the utx mem command */
- wqe += (sizeof(struct t3_bypass_wr) >> 3);
- utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
- utx_cmd <<= 32;
- utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
- *wqe = cpu_to_be64(utx_cmd);
- wqe++;
- copy_data = (u8 *) data + i * 96;
- copy_len = len > 96 ? 96 : len;
-
- /* clear memory content if data is NULL */
- if (data)
- memcpy(wqe, copy_data, copy_len);
- else
- memset(wqe, 0, copy_len);
- if (copy_len % 32)
- memset(((u8 *) wqe) + copy_len, 0,
- 32 - (copy_len % 32));
- wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
- (utx_len << 2);
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
-
- /* wptr in the WRID[31:0] */
- ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
-
- /*
- * This must be the last write with a memory barrier
- * for the genbit
- */
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
- Q_GENBIT(rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
- wr_len, T3_SOPEOP);
- if (flag == T3_COMPLETION_FLAG)
- ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
- len -= 96;
- rdev_p->ctrl_qp.wptr++;
- }
- return 0;
-}
-
-/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
- * OUT: stag index
- * TBD: shared memory region support
- */
-static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
- u32 *stag, u8 stag_state, u32 pdid,
- enum tpt_mem_type type, enum tpt_mem_perm perm,
- u32 zbva, u64 to, u32 len, u8 page_size,
- u32 pbl_size, u32 pbl_addr)
-{
- int err;
- struct tpt_entry tpt;
- u32 stag_idx;
- u32 wptr;
-
- if (cxio_fatal_error(rdev_p))
- return -EIO;
-
- stag_state = stag_state > 0;
- stag_idx = (*stag) >> 8;
-
- if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
- stag_idx = cxio_hal_get_stag(rdev_p->rscp);
- if (!stag_idx)
- return -ENOMEM;
- *stag = (stag_idx << 8) | ((*stag) & 0xFF);
- }
- PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
-
- /* write TPT entry */
- if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
- else {
- tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
- V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
- V_TPT_STAG_STATE(stag_state) |
- V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
- BUG_ON(page_size >= 28);
- tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
- ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
- V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
- V_TPT_PAGE_SIZE(page_size));
- tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
- tpt.len = cpu_to_be32(len);
- tpt.va_hi = cpu_to_be32((u32) (to >> 32));
- tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
- tpt.rsvd_bind_cnt_or_pstag = 0;
- tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
- }
- err = cxio_hal_ctrl_qp_write_mem(rdev_p,
- stag_idx +
- (rdev_p->rnic_info.tpt_base >> 5),
- sizeof(tpt), &tpt);
-
- /* release the stag index to free pool */
- if (reset_tpt_entry)
- cxio_hal_put_stag(rdev_p->rscp, stag_idx);
-
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (!err)
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
- return err;
-}
-
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
-{
- u32 wptr;
- int err;
-
- PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
- pbl_size);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
- err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
- pbl);
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (err)
- return err;
-
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
- u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- pbl_size, pbl_addr);
-}
-
-int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
- 0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
-}
-
-int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
-{
- struct t3_rdma_init_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
- PDBG("%s rdev_p %p\n", __func__, rdev_p);
- wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
- wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
- wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
- V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
- wqe->wrid.id1 = 0;
- wqe->qpid = cpu_to_be32(attr->qpid);
- wqe->pdid = cpu_to_be32(attr->pdid);
- wqe->scqid = cpu_to_be32(attr->scqid);
- wqe->rcqid = cpu_to_be32(attr->rcqid);
- wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
- wqe->rq_size = cpu_to_be32(attr->rq_size);
- wqe->mpaattrs = attr->mpaattrs;
- wqe->qpcaps = attr->qpcaps;
- wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
- wqe->rqe_count = cpu_to_be16(attr->rqe_count);
- wqe->flags_rtr_type = cpu_to_be16(attr->flags |
- V_RTR_TYPE(attr->rtr_type) |
- V_CHAN(attr->chan));
- wqe->ord = cpu_to_be32(attr->ord);
- wqe->ird = cpu_to_be32(attr->ird);
- wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
- wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
- wqe->irs = cpu_to_be32(attr->irs);
- skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = ev_cb;
-}
-
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = NULL;
-}
-
-static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
-{
- static int cnt;
- struct cxio_rdev *rdev_p = NULL;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
- " se %0x notify %0x cqbranch %0x creditth %0x\n",
- cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
- RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
- RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
- RSPQ_CREDIT_THRESH(rsp_msg));
- PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
- "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
- rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
- if (!rdev_p) {
- PDBG("%s called by t3cdev %p with null ulp\n", __func__,
- t3cdev_p);
- return 0;
- }
- if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
- rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
- wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
- dev_kfree_skb_irq(skb);
- } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
- dev_kfree_skb_irq(skb);
- else if (cxio_ev_cb)
- (*cxio_ev_cb) (rdev_p, skb);
- else
- dev_kfree_skb_irq(skb);
- cnt++;
- return 0;
-}
-
-/* Caller takes care of locking if needed */
-int cxio_rdev_open(struct cxio_rdev *rdev_p)
-{
- struct net_device *netdev_p = NULL;
- int err = 0;
- if (strlen(rdev_p->dev_name)) {
- if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
- return -EBUSY;
- }
- netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
- if (!netdev_p) {
- return -EINVAL;
- }
- dev_put(netdev_p);
- } else if (rdev_p->t3cdev_p) {
- if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
- return -EBUSY;
- }
- netdev_p = rdev_p->t3cdev_p->lldev;
- strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
- T3_MAX_DEV_NAME_LEN);
- } else {
- PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
- return -EINVAL;
- }
-
- list_add_tail(&rdev_p->entry, &rdev_list);
-
- PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
- memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
- if (!rdev_p->t3cdev_p)
- rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
- rdev_p->t3cdev_p->ulp = (void *) rdev_p;
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
- &(rdev_p->fw_info));
- if (err) {
- printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
- printk(KERN_ERR MOD "fatal firmware version mismatch: "
- "need version %u but adapter has version %u\n",
- CXIO_FW_MAJ,
- G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
- err = -EINVAL;
- goto err1;
- }
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
- &(rdev_p->rnic_info));
- if (err) {
- printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
- &(rdev_p->port_info));
- if (err) {
- printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
-
- /*
- * qpshift is the number of bits to shift the qpid left in order
- * to get the correct address of the doorbell for that qp.
- */
- cxio_init_ucontext(rdev_p, &rdev_p->uctx);
- rdev_p->qpshift = PAGE_SHIFT -
- ilog2(65536 >>
- ilog2(rdev_p->rnic_info.udbell_len >>
- PAGE_SHIFT));
- rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
- rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
- PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
- "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
- __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
- rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
- rdev_p->rnic_info.pbl_base,
- rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
- rdev_p->rnic_info.rqt_top);
- PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
- "qpnr %d qpmask 0x%x\n",
- rdev_p->rnic_info.udbell_len,
- rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
- rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
-
- err = cxio_hal_init_ctrl_qp(rdev_p);
- if (err) {
- printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
- __func__, err);
- goto err1;
- }
- err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
- 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
- T3_MAX_NUM_PD);
- if (err) {
- printk(KERN_ERR "%s error %d initializing hal resources.\n",
- __func__, err);
- goto err2;
- }
- err = cxio_hal_pblpool_create(rdev_p);
- if (err) {
- printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
- __func__, err);
- goto err3;
- }
- err = cxio_hal_rqtpool_create(rdev_p);
- if (err) {
- printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
- __func__, err);
- goto err4;
- }
- return 0;
-err4:
- cxio_hal_pblpool_destroy(rdev_p);
-err3:
- cxio_hal_destroy_resource(rdev_p->rscp);
-err2:
- cxio_hal_destroy_ctrl_qp(rdev_p);
-err1:
- rdev_p->t3cdev_p->ulp = NULL;
- list_del(&rdev_p->entry);
- return err;
-}
-
-void cxio_rdev_close(struct cxio_rdev *rdev_p)
-{
- if (rdev_p) {
- cxio_hal_pblpool_destroy(rdev_p);
- cxio_hal_rqtpool_destroy(rdev_p);
- list_del(&rdev_p->entry);
- cxio_hal_destroy_ctrl_qp(rdev_p);
- cxio_hal_destroy_resource(rdev_p->rscp);
- rdev_p->t3cdev_p->ulp = NULL;
- }
-}
-
-int __init cxio_hal_init(void)
-{
- if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
- return -ENOMEM;
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
- return 0;
-}
-
-void __exit cxio_hal_exit(void)
-{
- struct cxio_rdev *rdev, *tmp;
-
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
- list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
- cxio_rdev_close(rdev);
- cxio_hal_destroy_rhdl_resource();
-}
-
-static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_swsq *sqp;
- __u32 ptr = wq->sq_rptr;
- int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
-
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- while (count--)
- if (!sqp->signaled) {
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- } else if (sqp->complete) {
-
- /*
- * Insert this completed cqe into the swcq.
- */
- PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
- __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
- Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
- sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
- = sqp->cqe;
- cq->sw_wptr++;
- sqp->signaled = 0;
- break;
- } else
- break;
-}
-
-static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
- struct t3_cqe *read_cqe)
-{
- read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
- read_cqe->len = wq->oldest_read->read_len;
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
- V_CQE_OPCODE(T3_READ_REQ) |
- V_CQE_TYPE(1));
-}
-
-/*
- * Return a ptr to the next read wr in the SWSQ or NULL.
- */
-static void advance_oldest_read(struct t3_wq *wq)
-{
-
- u32 rptr = wq->oldest_read - wq->sq + 1;
- u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
-
- while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
- wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
-
- if (wq->oldest_read->opcode == T3_READ_REQ)
- return;
- rptr++;
- }
- wq->oldest_read = NULL;
-}
-
-/*
- * cxio_poll_cq
- *
- * Caller must:
- * check the validity of the first CQE,
- * supply the wq assicated with the qpid.
- *
- * credit: cq credit to return to sge.
- * cqe_flushed: 1 iff the CQE is flushed.
- * cqe: copy of the polled CQE.
- *
- * return value:
- * 0 CQE returned,
- * -1 CQE skipped, try again.
- */
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
-{
- int ret = 0;
- struct t3_cqe *hw_cqe, read_cqe;
-
- *cqe_flushed = 0;
- *credit = 0;
- hw_cqe = cxio_next_cqe(cq);
-
- PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
- " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
- CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
- CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
- CQE_WRID_LOW(*hw_cqe));
-
- /*
- * skip cqe's not affiliated with a QP.
- */
- if (wq == NULL) {
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Gotta tweak READ completions:
- * 1) the cqe doesn't contain the sq_wptr from the wr.
- * 2) opcode not reflected from the wr.
- * 3) read_len not reflected from the wr.
- * 4) cq_type is RQ_TYPE not SQ_TYPE.
- */
- if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
-
- /*
- * If this is an unsolicited read response, then the read
- * was generated by the kernel driver as part of peer-2-peer
- * connection setup. So ignore the completion.
- */
- if (!wq->oldest_read) {
- if (CQE_STATUS(*hw_cqe))
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Don't write to the HWCQ, so create a new read req CQE
- * in local memory.
- */
- create_read_req_cqe(wq, hw_cqe, &read_cqe);
- hw_cqe = &read_cqe;
- advance_oldest_read(wq);
- }
-
- /*
- * T3A: Discard TERMINATE CQEs.
- */
- if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
- ret = -1;
- wq->error = 1;
- goto skip_cqe;
- }
-
- if (CQE_STATUS(*hw_cqe) || wq->error) {
- *cqe_flushed = wq->error;
- wq->error = 1;
-
- /*
- * T3A inserts errors into the CQE. We cannot return
- * these as work completions.
- */
- /* incoming write failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
- && RQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
- /* incoming read request failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
-
- /* incoming SEND with no receive posted failures */
- if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- ret = -1;
- goto skip_cqe;
- }
- BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
- goto proc_cqe;
- }
-
- /*
- * RECV completion.
- */
- if (RQ_TYPE(*hw_cqe)) {
-
- /*
- * HW only validates 4 bits of MSN. So we must validate that
- * the MSN in the SEND is the next expected MSN. If its not,
- * then we complete this with TPT_ERR_MSN and mark the wq in
- * error.
- */
-
- if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
- wq->error = 1;
- hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
- goto proc_cqe;
- }
- goto proc_cqe;
- }
-
- /*
- * If we get here its a send completion.
- *
- * Handle out of order completion. These get stuffed
- * in the SW SQ. Then the SW SQ is walked to move any
- * now in-order completions into the SW CQ. This handles
- * 2 cases:
- * 1) reaping unsignaled WRs when the first subsequent
- * signaled WR is completed.
- * 2) out of order read completions.
- */
- if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
- struct t3_swsq *sqp;
-
- PDBG("%s out of order completion going in swsq at idx %ld\n",
- __func__,
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
- sqp = wq->sq +
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
- sqp->cqe = *hw_cqe;
- sqp->complete = 1;
- ret = -1;
- goto flush_wq;
- }
-
-proc_cqe:
- *cqe = *hw_cqe;
-
- /*
- * Reap the associated WR(s) that are freed up with this
- * completion.
- */
- if (SQ_TYPE(*hw_cqe)) {
- wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
- PDBG("%s completing sq idx %ld\n", __func__,
- Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
- *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
- wq->sq_rptr++;
- } else {
- PDBG("%s completing rq idx %ld\n", __func__,
- Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
- *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
- if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
- cxio_hal_pblpool_free(wq->rdev,
- wq->rq[Q_PTR2IDX(wq->rq_rptr,
- wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
- BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
- wq->rq_rptr++;
- }
-
-flush_wq:
- /*
- * Flush any completed cqes that are now in-order.
- */
- flush_completed_wrs(wq, cq);
-
-skip_cqe:
- if (SW_CQE(*hw_cqe)) {
- PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->sw_rptr);
- ++cq->sw_rptr;
- } else {
- PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->rptr);
- ++cq->rptr;
-
- /*
- * T3A: compute credits.
- */
- if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
- || ((cq->rptr - cq->wptr) >= 128)) {
- *credit = cq->rptr - cq->wptr;
- cq->wptr = cq->rptr;
- }
- }
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
deleted file mode 100644
index 78fbe9ffe7f0..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_HAL_H__
-#define __CXIO_HAL_H__
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/kfifo.h>
-
-#include "t3_cpl.h"
-#include "t3cdev.h"
-#include "cxgb3_ctl_defs.h"
-#include "cxio_wr.h"
-
-#define T3_CTRL_QP_ID FW_RI_SGEEC_START
-#define T3_CTL_QP_TID FW_RI_TID_START
-#define T3_CTRL_QP_SIZE_LOG2 8
-#define T3_CTRL_CQ_ID 0
-
-#define T3_MAX_NUM_RI (1<<15)
-#define T3_MAX_NUM_QP (1<<15)
-#define T3_MAX_NUM_CQ (1<<15)
-#define T3_MAX_NUM_PD (1<<15)
-#define T3_MAX_PBL_SIZE 256
-#define T3_MAX_RQ_SIZE 1024
-#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 65536
-#define T3_MAX_NUM_STAG (1<<15)
-#define T3_MAX_MR_SIZE 0x100000000ULL
-#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
-
-#define T3_STAG_UNSET 0xffffffff
-
-#define T3_MAX_DEV_NAME_LEN 32
-
-#define CXIO_FW_MAJ 7
-
-struct cxio_hal_ctrl_qp {
- u32 wptr;
- u32 rptr;
- struct mutex lock; /* for the wtpr, can sleep */
- wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
- union t3_wr *workq; /* the work request queue */
- dma_addr_t dma_addr; /* pci bus address of the workq */
- DEFINE_DMA_UNMAP_ADDR(mapping);
- void __iomem *doorbell;
-};
-
-struct cxio_hal_resource {
- struct kfifo tpt_fifo;
- spinlock_t tpt_fifo_lock;
- struct kfifo qpid_fifo;
- spinlock_t qpid_fifo_lock;
- struct kfifo cqid_fifo;
- spinlock_t cqid_fifo_lock;
- struct kfifo pdid_fifo;
- spinlock_t pdid_fifo_lock;
-};
-
-struct cxio_qpid_list {
- struct list_head entry;
- u32 qpid;
-};
-
-struct cxio_ucontext {
- struct list_head qpids;
- struct mutex lock;
-};
-
-struct cxio_rdev {
- char dev_name[T3_MAX_DEV_NAME_LEN];
- struct t3cdev *t3cdev_p;
- struct rdma_info rnic_info;
- struct adap_ports port_info;
- struct cxio_hal_resource *rscp;
- struct cxio_hal_ctrl_qp ctrl_qp;
- void *ulp;
- unsigned long qpshift;
- u32 qpnr;
- u32 qpmask;
- struct cxio_ucontext uctx;
- struct gen_pool *pbl_pool;
- struct gen_pool *rqt_pool;
- struct list_head entry;
- struct ch_embedded_info fw_info;
- u32 flags;
-#define CXIO_ERROR_FATAL 1
-};
-
-static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
-{
- return rdev_p->flags & CXIO_ERROR_FATAL;
-}
-
-static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
-{
- return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
-}
-
-typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
- struct sk_buff * skb);
-
-#define RSPQ_CQID(rsp) (be32_to_cpu(rsp->cq_ptrid) & 0xffff)
-#define RSPQ_CQPTR(rsp) ((be32_to_cpu(rsp->cq_ptrid) >> 16) & 0xffff)
-#define RSPQ_GENBIT(rsp) ((be32_to_cpu(rsp->flags) >> 16) & 1)
-#define RSPQ_OVERFLOW(rsp) ((be32_to_cpu(rsp->flags) >> 17) & 1)
-#define RSPQ_AN(rsp) ((be32_to_cpu(rsp->flags) >> 18) & 1)
-#define RSPQ_SE(rsp) ((be32_to_cpu(rsp->flags) >> 19) & 1)
-#define RSPQ_NOTIFY(rsp) ((be32_to_cpu(rsp->flags) >> 20) & 1)
-#define RSPQ_CQBRANCH(rsp) ((be32_to_cpu(rsp->flags) >> 21) & 1)
-#define RSPQ_CREDIT_THRESH(rsp) ((be32_to_cpu(rsp->flags) >> 22) & 1)
-
-struct respQ_msg_t {
- __be32 flags; /* flit 0 */
- __be32 cq_ptrid;
- __be64 rsvd; /* flit 1 */
- struct t3_cqe cqe; /* flits 2-3 */
-};
-
-enum t3_cq_opcode {
- CQ_ARM_AN = 0x2,
- CQ_ARM_SE = 0x6,
- CQ_FORCE_AN = 0x3,
- CQ_CREDIT_UPDATE = 0x7
-};
-
-int cxio_rdev_open(struct cxio_rdev *rdev);
-void cxio_rdev_close(struct cxio_rdev *rdev);
-int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
-int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
-int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
-void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size);
-int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr);
-int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
-int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
-int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
-int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
-int __init cxio_hal_init(void);
-void __exit cxio_hal_exit(void);
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_flush_hw_cq(struct t3_cq *cq);
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit);
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
-
-#define MOD "iw_cxgb3: "
-#define PDBG(fmt, args...) pr_debug(MOD fmt, ## args)
-
-#ifdef DEBUG
-void cxio_dump_tpt(struct cxio_rdev *rev, u32 stag);
-void cxio_dump_pbl(struct cxio_rdev *rev, u32 pbl_addr, uint len, u8 shift);
-void cxio_dump_wqe(union t3_wr *wqe);
-void cxio_dump_wce(struct t3_cqe *wce);
-void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents);
-void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid);
-#endif
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
deleted file mode 100644
index c40088ecf9f3..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* Crude resource management */
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-
-static struct kfifo rhdl_fifo;
-static spinlock_t rhdl_fifo_lock;
-
-#define RANDOM_SIZE 16
-
-static int __cxio_init_resource_fifo(struct kfifo *fifo,
- spinlock_t *fifo_lock,
- u32 nr, u32 skip_low,
- u32 skip_high,
- int random)
-{
- u32 i, j, entry = 0, idx;
- u32 random_bytes;
- u32 rarray[16];
- spin_lock_init(fifo_lock);
-
- if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 0; i < skip_low + skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
- if (random) {
- j = 0;
- random_bytes = prandom_u32();
- for (i = 0; i < RANDOM_SIZE; i++)
- rarray[i] = i + skip_low;
- for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
- if (j >= RANDOM_SIZE) {
- j = 0;
- random_bytes = prandom_u32();
- }
- idx = (random_bytes >> (j * 2)) & 0xF;
- kfifo_in(fifo,
- (unsigned char *) &rarray[idx],
- sizeof(u32));
- rarray[idx] = i;
- j++;
- }
- for (i = 0; i < RANDOM_SIZE; i++)
- kfifo_in(fifo,
- (unsigned char *) &rarray[i],
- sizeof(u32));
- } else
- for (i = skip_low; i < nr - skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
-
- for (i = 0; i < skip_low + skip_high; i++)
- if (kfifo_out_locked(fifo, (unsigned char *) &entry,
- sizeof(u32), fifo_lock) != sizeof(u32))
- break;
- return 0;
-}
-
-static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 0));
-}
-
-static int cxio_init_resource_fifo_random(struct kfifo *fifo,
- spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
-
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 1));
-}
-
-static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
-{
- u32 i;
-
- spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
-
- if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
- GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 16; i < T3_MAX_NUM_QP; i++)
- if (!(i & rdev_p->qpmask))
- kfifo_in(&rdev_p->rscp->qpid_fifo,
- (unsigned char *) &i, sizeof(u32));
- return 0;
-}
-
-int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
-{
- return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
- 0);
-}
-
-void cxio_hal_destroy_rhdl_resource(void)
-{
- kfifo_free(&rhdl_fifo);
-}
-
-/* nr_* must be power of 2 */
-int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
-{
- int err = 0;
- struct cxio_hal_resource *rscp;
-
- rscp = kmalloc(sizeof(*rscp), GFP_KERNEL);
- if (!rscp)
- return -ENOMEM;
- rdev_p->rscp = rscp;
- err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
- &rscp->tpt_fifo_lock,
- nr_tpt, 1, 0);
- if (err)
- goto tpt_err;
- err = cxio_init_qpid_fifo(rdev_p);
- if (err)
- goto qpid_err;
- err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
- nr_cqid, 1, 0);
- if (err)
- goto cqid_err;
- err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
- nr_pdid, 1, 0);
- if (err)
- goto pdid_err;
- return 0;
-pdid_err:
- kfifo_free(&rscp->cqid_fifo);
-cqid_err:
- kfifo_free(&rscp->qpid_fifo);
-qpid_err:
- kfifo_free(&rscp->tpt_fifo);
-tpt_err:
- return -ENOMEM;
-}
-
-/*
- * returns 0 if no resource available
- */
-static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
-{
- u32 entry;
- if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
- return entry;
- else
- return 0; /* fifo emptry */
-}
-
-static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
- u32 entry)
-{
- BUG_ON(
- kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
- == 0);
-}
-
-u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
-}
-
-void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
-{
- cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
-}
-
-u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
-{
- u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
- &rscp->qpid_fifo_lock);
- PDBG("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
-{
- PDBG("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
-}
-
-u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
-}
-
-void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
-{
- cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
-}
-
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
-}
-
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
-{
- cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
-}
-
-void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
-{
- kfifo_free(&rscp->tpt_fifo);
- kfifo_free(&rscp->cqid_fifo);
- kfifo_free(&rscp->qpid_fifo);
- kfifo_free(&rscp->pdid_fifo);
- kfree(rscp);
-}
-
-/*
- * PBL Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
-
-u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
- return (u32)addr;
-}
-
-void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
- gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
-}
-
-int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned pbl_start, pbl_chunk;
-
- rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
- if (!rdev_p->pbl_pool)
- return -ENOMEM;
-
- pbl_start = rdev_p->rnic_info.pbl_base;
- pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
-
- while (pbl_start < rdev_p->rnic_info.pbl_top) {
- pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
- pbl_chunk);
- if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
- PDBG("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
- printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n",
- __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start);
- return 0;
- }
- pbl_chunk >>= 1;
- } else {
- PDBG("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- pbl_start += pbl_chunk;
- }
- }
-
- return 0;
-}
-
-void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->pbl_pool);
-}
-
-/*
- * RQT Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
-#define RQT_CHUNK 2*1024*1024
-
-u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
- return (u32)addr;
-}
-
-void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
- gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
-}
-
-int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned long i;
- rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
- if (rdev_p->rqt_pool)
- for (i = rdev_p->rnic_info.rqt_base;
- i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
- i += RQT_CHUNK)
- gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
- return rdev_p->rqt_pool ? 0 : -ENOMEM;
-}
-
-void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->rqt_pool);
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.h b/drivers/infiniband/hw/cxgb3/cxio_resource.h
deleted file mode 100644
index a2703a3d882d..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_RESOURCE_H__
-#define __CXIO_RESOURCE_H__
-
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/genalloc.h>
-#include "cxio_hal.h"
-
-extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
-extern void cxio_hal_destroy_rhdl_resource(void);
-extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
- u32 nr_pdid);
-extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
-extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
-extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
-extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
-
-#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
-extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-
-#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
-extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
deleted file mode 100644
index 83d2e19d31ae..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_WR_H__
-#define __CXIO_WR_H__
-
-#include <asm/io.h>
-#include <linux/pci.h>
-#include <linux/timer.h>
-#include "firmware_exports.h"
-
-#define T3_MAX_SGE 4
-#define T3_MAX_INLINE 64
-#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
-#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
-#define T3_STAG0_PAGE_SHIFT 15
-
-#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
-#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
- ((rptr)!=(wptr)) )
-#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
-#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
-#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
-#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
-
-static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
-{
- writel(((1<<31) | qpid), doorbell);
-}
-
-#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
-
-enum t3_wr_flags {
- T3_COMPLETION_FLAG = 0x01,
- T3_NOTIFY_FLAG = 0x02,
- T3_SOLICITED_EVENT_FLAG = 0x04,
- T3_READ_FENCE_FLAG = 0x08,
- T3_LOCAL_FENCE_FLAG = 0x10
-} __attribute__ ((packed));
-
-enum t3_wr_opcode {
- T3_WR_BP = FW_WROPCODE_RI_BYPASS,
- T3_WR_SEND = FW_WROPCODE_RI_SEND,
- T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
- T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
- T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
- T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
- T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
- T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
- T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
- T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
-} __attribute__ ((packed));
-
-enum t3_rdma_opcode {
- T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
- T3_READ_REQ,
- T3_READ_RESP,
- T3_SEND,
- T3_SEND_WITH_INV,
- T3_SEND_WITH_SE,
- T3_SEND_WITH_SE_INV,
- T3_TERMINATE,
- T3_RDMA_INIT, /* CHELSIO RI specific ... */
- T3_BIND_MW,
- T3_FAST_REGISTER,
- T3_LOCAL_INV,
- T3_QP_MOD,
- T3_BYPASS,
- T3_RDMA_READ_REQ_WITH_INV,
-} __attribute__ ((packed));
-
-static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
-{
- switch (wrop) {
- case T3_WR_BP: return T3_BYPASS;
- case T3_WR_SEND: return T3_SEND;
- case T3_WR_WRITE: return T3_RDMA_WRITE;
- case T3_WR_READ: return T3_READ_REQ;
- case T3_WR_INV_STAG: return T3_LOCAL_INV;
- case T3_WR_BIND: return T3_BIND_MW;
- case T3_WR_INIT: return T3_RDMA_INIT;
- case T3_WR_QP_MOD: return T3_QP_MOD;
- case T3_WR_FASTREG: return T3_FAST_REGISTER;
- default: break;
- }
- return -1;
-}
-
-
-/* Work request id */
-union t3_wrid {
- struct {
- u32 hi;
- u32 low;
- } id0;
- u64 id1;
-};
-
-#define WRID(wrid) (wrid.id1)
-#define WRID_GEN(wrid) (wrid.id0.wr_gen)
-#define WRID_IDX(wrid) (wrid.id0.wr_idx)
-#define WRID_LO(wrid) (wrid.id0.wr_lo)
-
-struct fw_riwrh {
- __be32 op_seop_flags;
- __be32 gen_tid_len;
-};
-
-#define S_FW_RIWR_OP 24
-#define M_FW_RIWR_OP 0xff
-#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
-#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
-
-#define S_FW_RIWR_SOPEOP 22
-#define M_FW_RIWR_SOPEOP 0x3
-#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
-
-#define S_FW_RIWR_FLAGS 8
-#define M_FW_RIWR_FLAGS 0x3fffff
-#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
-#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
-
-#define S_FW_RIWR_TID 8
-#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
-
-#define S_FW_RIWR_LEN 0
-#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
-
-#define S_FW_RIWR_GEN 31
-#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
-
-struct t3_sge {
- __be32 stag;
- __be32 len;
- __be64 to;
-};
-
-/* If num_sgle is zero, flit 5+ contains immediate data.*/
-struct t3_send_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
-
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 rem_stag;
- __be32 plen; /* 3 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
-};
-
-#define T3_MAX_FASTREG_DEPTH 10
-#define T3_MAX_FASTREG_FRAG 10
-
-struct t3_fastreg_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 len;
- __be32 va_base_hi; /* 3 */
- __be32 va_base_lo_fbo;
- __be32 page_type_perms; /* 4 */
- __be32 reserved1;
- __be64 pbl_addrs[0]; /* 5+ */
-};
-
-/*
- * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
- */
-struct t3_pbl_frag {
- struct fw_riwrh wrh; /* 0 */
- __be64 pbl_addrs[14]; /* 1..14 */
-};
-
-#define S_FR_PAGE_COUNT 24
-#define M_FR_PAGE_COUNT 0xff
-#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
-#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
-
-#define S_FR_PAGE_SIZE 16
-#define M_FR_PAGE_SIZE 0x1f
-#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
-#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
-
-#define S_FR_TYPE 8
-#define M_FR_TYPE 0x1
-#define V_FR_TYPE(x) ((x) << S_FR_TYPE)
-#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
-
-#define S_FR_PERMS 0
-#define M_FR_PERMS 0xff
-#define V_FR_PERMS(x) ((x) << S_FR_PERMS)
-#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
-
-struct t3_local_inv_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 reserved;
-};
-
-struct t3_rdma_write_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 stag_sink;
- __be64 to_sink; /* 3 */
- __be32 plen; /* 4 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
-};
-
-struct t3_rdma_read_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 local_inv;
- u8 reserved[2];
- __be32 rem_stag;
- __be64 rem_to; /* 3 */
- __be32 local_stag; /* 4 */
- __be32 local_len;
- __be64 local_to; /* 5 */
-};
-
-struct t3_bind_mw_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u16 reserved; /* 2 */
- u8 type;
- u8 perms;
- __be32 mr_stag;
- __be32 mw_stag; /* 3 */
- __be32 mw_len;
- __be64 mw_va; /* 4 */
- __be32 mr_pbl_addr; /* 5 */
- u8 reserved2[3];
- u8 mr_pagesz;
-};
-
-struct t3_receive_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 pagesz[T3_MAX_SGE];
- __be32 num_sgle; /* 2 */
- struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
- __be32 pbl_addr[T3_MAX_SGE];
-};
-
-struct t3_bypass_wr {
- struct fw_riwrh wrh;
- union t3_wrid wrid; /* 1 */
-};
-
-struct t3_modify_qp_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 flags; /* 2 */
- __be32 quiesce; /* 2 */
- __be32 max_ird; /* 3 */
- __be32 max_ord; /* 3 */
- __be64 sge_cmd; /* 4 */
- __be64 ctx1; /* 5 */
- __be64 ctx0; /* 6 */
-};
-
-enum t3_modify_qp_flags {
- MODQP_QUIESCE = 0x01,
- MODQP_MAX_IRD = 0x02,
- MODQP_MAX_ORD = 0x04,
- MODQP_WRITE_EC = 0x08,
- MODQP_READ_EC = 0x10,
-};
-
-
-enum t3_mpa_attrs {
- uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
- uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
- uP_RI_MPA_CRC_ENABLE = 0x4,
- uP_RI_MPA_IETF_ENABLE = 0x8
-} __attribute__ ((packed));
-
-enum t3_qp_caps {
- uP_RI_QP_RDMA_READ_ENABLE = 0x01,
- uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
- uP_RI_QP_BIND_ENABLE = 0x04,
- uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
- uP_RI_QP_STAG0_ENABLE = 0x10
-} __attribute__ ((packed));
-
-enum rdma_init_rtr_types {
- RTR_READ = 1,
- RTR_WRITE = 2,
- RTR_SEND = 3,
-};
-
-#define S_RTR_TYPE 2
-#define M_RTR_TYPE 0x3
-#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
-#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
-
-#define S_CHAN 4
-#define M_CHAN 0x3
-#define V_CHAN(x) ((x) << S_CHAN)
-#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
-
-struct t3_rdma_init_attr {
- u32 tid;
- u32 qpid;
- u32 pdid;
- u32 scqid;
- u32 rcqid;
- u32 rq_addr;
- u32 rq_size;
- enum t3_mpa_attrs mpaattrs;
- enum t3_qp_caps qpcaps;
- u16 tcp_emss;
- u32 ord;
- u32 ird;
- u64 qp_dma_addr;
- u32 qp_dma_size;
- enum rdma_init_rtr_types rtr_type;
- u16 flags;
- u16 rqe_count;
- u32 irs;
- u32 chan;
-};
-
-struct t3_rdma_init_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 qpid; /* 2 */
- __be32 pdid;
- __be32 scqid; /* 3 */
- __be32 rcqid;
- __be32 rq_addr; /* 4 */
- __be32 rq_size;
- u8 mpaattrs; /* 5 */
- u8 qpcaps;
- __be16 ulpdu_size;
- __be16 flags_rtr_type;
- __be16 rqe_count;
- __be32 ord; /* 6 */
- __be32 ird;
- __be64 qp_dma_addr; /* 7 */
- __be32 qp_dma_size; /* 8 */
- __be32 irs;
-};
-
-struct t3_genbit {
- u64 flit[15];
- __be64 genbit;
-};
-
-struct t3_wq_in_err {
- u64 flit[13];
- u64 err;
-};
-
-enum rdma_init_wr_flags {
- MPA_INITIATOR = (1<<0),
- PRIV_QP = (1<<1),
-};
-
-union t3_wr {
- struct t3_send_wr send;
- struct t3_rdma_write_wr write;
- struct t3_rdma_read_wr read;
- struct t3_receive_wr recv;
- struct t3_fastreg_wr fastreg;
- struct t3_pbl_frag pbl_frag;
- struct t3_local_inv_wr local_inv;
- struct t3_bind_mw_wr bind;
- struct t3_bypass_wr bypass;
- struct t3_rdma_init_wr init;
- struct t3_modify_qp_wr qp_mod;
- struct t3_genbit genbit;
- struct t3_wq_in_err wq_in_err;
- __be64 flit[16];
-};
-
-#define T3_SQ_CQE_FLIT 13
-#define T3_SQ_COOKIE_FLIT 14
-
-#define T3_RQ_COOKIE_FLIT 13
-#define T3_RQ_CQE_FLIT 14
-
-static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
-{
- return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
-}
-
-enum t3_wr_hdr_bits {
- T3_EOP = 1,
- T3_SOP = 2,
- T3_SOPEOP = T3_EOP|T3_SOP,
-};
-
-static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
- enum t3_wr_flags flags, u8 genbit, u32 tid,
- u8 len, u8 sopeop)
-{
- wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
- V_FW_RIWR_SOPEOP(sopeop) |
- V_FW_RIWR_FLAGS(flags));
- wmb();
- wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
- V_FW_RIWR_TID(tid) |
- V_FW_RIWR_LEN(len));
- /* 2nd gen bit... */
- ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
-}
-
-/*
- * T3 ULP2_TX commands
- */
-enum t3_utx_mem_op {
- T3_UTX_MEM_READ = 2,
- T3_UTX_MEM_WRITE = 3
-};
-
-/* T3 MC7 RDMA TPT entry format */
-
-enum tpt_mem_type {
- TPT_NON_SHARED_MR = 0x0,
- TPT_SHARED_MR = 0x1,
- TPT_MW = 0x2,
- TPT_MW_RELAXED_PROTECTION = 0x3
-};
-
-enum tpt_addr_type {
- TPT_ZBTO = 0,
- TPT_VATO = 1
-};
-
-enum tpt_mem_perm {
- TPT_MW_BIND = 0x10,
- TPT_LOCAL_READ = 0x8,
- TPT_LOCAL_WRITE = 0x4,
- TPT_REMOTE_READ = 0x2,
- TPT_REMOTE_WRITE = 0x1
-};
-
-struct tpt_entry {
- __be32 valid_stag_pdid;
- __be32 flags_pagesize_qpid;
-
- __be32 rsvd_pbl_addr;
- __be32 len;
- __be32 va_hi;
- __be32 va_low_or_fbo;
-
- __be32 rsvd_bind_cnt_or_pstag;
- __be32 rsvd_pbl_size;
-};
-
-#define S_TPT_VALID 31
-#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
-#define F_TPT_VALID V_TPT_VALID(1U)
-
-#define S_TPT_STAG_KEY 23
-#define M_TPT_STAG_KEY 0xFF
-#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
-#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
-
-#define S_TPT_STAG_STATE 22
-#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
-#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
-
-#define S_TPT_STAG_TYPE 20
-#define M_TPT_STAG_TYPE 0x3
-#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
-#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
-
-#define S_TPT_PDID 0
-#define M_TPT_PDID 0xFFFFF
-#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
-#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
-
-#define S_TPT_PERM 28
-#define M_TPT_PERM 0xF
-#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
-#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
-
-#define S_TPT_REM_INV_DIS 27
-#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
-#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
-
-#define S_TPT_ADDR_TYPE 26
-#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
-#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
-
-#define S_TPT_MW_BIND_ENABLE 25
-#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
-#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
-
-#define S_TPT_PAGE_SIZE 20
-#define M_TPT_PAGE_SIZE 0x1F
-#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
-#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
-
-#define S_TPT_PBL_ADDR 0
-#define M_TPT_PBL_ADDR 0x1FFFFFFF
-#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
-#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
-
-#define S_TPT_QPID 0
-#define M_TPT_QPID 0xFFFFF
-#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
-#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
-
-#define S_TPT_PSTAG 0
-#define M_TPT_PSTAG 0xFFFFFF
-#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
-#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
-
-#define S_TPT_PBL_SIZE 0
-#define M_TPT_PBL_SIZE 0xFFFFF
-#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
-#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
-
-/*
- * CQE defs
- */
-struct t3_cqe {
- __be32 header;
- __be32 len;
- union {
- struct {
- __be32 stag;
- __be32 msn;
- } rcqe;
- struct {
- u32 wrid_hi;
- u32 wrid_low;
- } scqe;
- } u;
-};
-
-#define S_CQE_OOO 31
-#define M_CQE_OOO 0x1
-#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
-#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
-
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0x7FFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_GENBIT 10
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
-#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
-#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
-#define SQ_TYPE(x) (CQE_TYPE((x)))
-#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
-
-#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
-
-#define CQE_LEN(x) (be32_to_cpu((x).len))
-
-/* used for RQ completion processing */
-#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
-#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
-
-/* used for SQ completion processing */
-#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
-
-/* generic accessor macros */
-#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
-
-#define TPT_ERR_SUCCESS 0x0
-#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
- /* STAG is offlimt, being 0, */
- /* or STAG_key mismatch */
-#define TPT_ERR_PDID 0x2 /* PDID mismatch */
-#define TPT_ERR_QPID 0x3 /* QPID mismatch */
-#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
-#define TPT_ERR_WRAP 0x5 /* Wrap error */
-#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
-#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_ECC 0x9 /* ECC error detected */
-#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
- /* reading PSTAG for a MW */
- /* Invalidate */
-#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
- /* software error */
-#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
-#define TPT_ERR_CRC 0x10 /* CRC error */
-#define TPT_ERR_MARKER 0x11 /* Marker error */
-#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
-#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
-#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
-#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
-#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
-#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
-#define TPT_ERR_MSN 0x18 /* MSN error */
-#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
-#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
- /* or READ_REQ */
-#define TPT_ERR_MSN_GAP 0x1B
-#define TPT_ERR_MSN_RANGE 0x1C
-#define TPT_ERR_IRD_OVERFLOW 0x1D
-#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
- /* software error */
-#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
- /* mismatch) */
-
-struct t3_swsq {
- __u64 wr_id;
- struct t3_cqe cqe;
- __u32 sq_wptr;
- __be32 read_len;
- int opcode;
- int complete;
- int signaled;
-};
-
-struct t3_swrq {
- __u64 wr_id;
- __u32 pbl_addr;
-};
-
-/*
- * A T3 WQ implements both the SQ and RQ.
- */
-struct t3_wq {
- union t3_wr *queue; /* DMA accessible memory */
- dma_addr_t dma_addr; /* DMA address for HW */
- DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
- u32 error; /* 1 once we go to ERROR */
- u32 qpid;
- u32 wptr; /* idx to next available WR slot */
- u32 size_log2; /* total wq size */
- struct t3_swsq *sq; /* SW SQ */
- struct t3_swsq *oldest_read; /* tracks oldest pending read */
- u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
- u32 sq_rptr; /* pending wrs */
- u32 sq_size_log2; /* sq size */
- struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
- u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
- u32 rq_rptr; /* pending wrs */
- struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
- u32 rq_size_log2; /* rq size */
- u32 rq_addr; /* rq adapter address */
- void __iomem *doorbell; /* kernel db */
- u64 udb; /* user db if any */
- struct cxio_rdev *rdev;
-};
-
-struct t3_cq {
- u32 cqid;
- u32 rptr;
- u32 wptr;
- u32 size_log2;
- dma_addr_t dma_addr;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- struct t3_cqe *queue;
- struct t3_cqe *sw_queue;
- u32 sw_rptr;
- u32 sw_wptr;
-};
-
-#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
- CQE_GENBIT(*cqe))
-
-struct t3_cq_status_page {
- u32 cq_err;
-};
-
-static inline int cxio_cq_in_error(struct t3_cq *cq)
-{
- return ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err;
-}
-
-static inline void cxio_set_cq_in_error(struct t3_cq *cq)
-{
- ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err = 1;
-}
-
-static inline void cxio_set_wq_in_error(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 1;
-}
-
-static inline void cxio_disable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 2;
-}
-
-static inline void cxio_enable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err &= ~2;
-}
-
-static inline int cxio_wq_db_enabled(struct t3_wq *wq)
-{
- return !(wq->queue->wq_in_err.err & 2);
-}
-
-static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
deleted file mode 100644
index 8e77dc543dd1..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-#include "iwch_user.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-
-#define DRV_VERSION "1.1"
-
-MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
-MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static void open_rnic_dev(struct t3cdev *);
-static void close_rnic_dev(struct t3cdev *);
-static void iwch_event_handler(struct t3cdev *, u32, u32);
-
-struct cxgb3_client t3c_client = {
- .name = "iw_cxgb3",
- .add = open_rnic_dev,
- .remove = close_rnic_dev,
- .handlers = t3c_handlers,
- .redirect = iwch_ep_redirect,
- .event_handler = iwch_event_handler
-};
-
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(dev_mutex);
-
-static int disable_qp_db(int id, void *p, void *data)
-{
- struct iwch_qp *qhp = p;
-
- cxio_disable_wq_db(&qhp->wq);
- return 0;
-}
-
-static int enable_qp_db(int id, void *p, void *data)
-{
- struct iwch_qp *qhp = p;
-
- if (data)
- ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
- cxio_enable_wq_db(&qhp->wq);
- return 0;
-}
-
-static void disable_dbs(struct iwch_dev *rnicp)
-{
- spin_lock_irq(&rnicp->lock);
- idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
- spin_unlock_irq(&rnicp->lock);
-}
-
-static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
-{
- spin_lock_irq(&rnicp->lock);
- idr_for_each(&rnicp->qpidr, enable_qp_db,
- (void *)(unsigned long)ring_db);
- spin_unlock_irq(&rnicp->lock);
-}
-
-static void iwch_db_drop_task(struct work_struct *work)
-{
- struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
- db_drop_task.work);
- enable_dbs(rnicp, 1);
-}
-
-static void rnic_init(struct iwch_dev *rnicp)
-{
- PDBG("%s iwch_dev %p\n", __func__, rnicp);
- idr_init(&rnicp->cqidr);
- idr_init(&rnicp->qpidr);
- idr_init(&rnicp->mmidr);
- spin_lock_init(&rnicp->lock);
- INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
-
- rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
- rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
- rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
- rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
- rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
- rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
- rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
- rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
- rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
- rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
- rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
- rnicp->attr.can_resize_wq = 0;
- rnicp->attr.max_rdma_reads_per_qp = 8;
- rnicp->attr.max_rdma_read_resources =
- rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
- rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
- rnicp->attr.max_rdma_read_depth =
- rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
- rnicp->attr.rq_overflow_handled = 0;
- rnicp->attr.can_modify_ird = 0;
- rnicp->attr.can_modify_ord = 0;
- rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
- rnicp->attr.stag0_value = 1;
- rnicp->attr.zbva_support = 1;
- rnicp->attr.local_invalidate_fence = 1;
- rnicp->attr.cq_overflow_detection = 1;
- return;
-}
-
-static void open_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *rnicp;
-
- PDBG("%s t3cdev %p\n", __func__, tdev);
- printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
- DRV_VERSION);
- rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
- if (!rnicp) {
- printk(KERN_ERR MOD "Cannot allocate ib device\n");
- return;
- }
- rnicp->rdev.ulp = rnicp;
- rnicp->rdev.t3cdev_p = tdev;
-
- mutex_lock(&dev_mutex);
-
- if (cxio_rdev_open(&rnicp->rdev)) {
- mutex_unlock(&dev_mutex);
- printk(KERN_ERR MOD "Unable to open CXIO rdev\n");
- ib_dealloc_device(&rnicp->ibdev);
- return;
- }
-
- rnic_init(rnicp);
-
- list_add_tail(&rnicp->entry, &dev_list);
- mutex_unlock(&dev_mutex);
-
- if (iwch_register_device(rnicp)) {
- printk(KERN_ERR MOD "Unable to register device\n");
- close_rnic_dev(tdev);
- }
- printk(KERN_INFO MOD "Initialized device %s\n",
- pci_name(rnicp->rdev.rnic_info.pdev));
- return;
-}
-
-static void close_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *dev, *tmp;
- PDBG("%s t3cdev %p\n", __func__, tdev);
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
- if (dev->rdev.t3cdev_p == tdev) {
- dev->rdev.flags = CXIO_ERROR_FATAL;
- synchronize_net();
- cancel_delayed_work_sync(&dev->db_drop_task);
- list_del(&dev->entry);
- iwch_unregister_device(dev);
- cxio_rdev_close(&dev->rdev);
- idr_destroy(&dev->cqidr);
- idr_destroy(&dev->qpidr);
- idr_destroy(&dev->mmidr);
- ib_dealloc_device(&dev->ibdev);
- break;
- }
- }
- mutex_unlock(&dev_mutex);
-}
-
-static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
-{
- struct cxio_rdev *rdev = tdev->ulp;
- struct iwch_dev *rnicp;
- struct ib_event event;
- u32 portnum = port_id + 1;
- int dispatch = 0;
-
- if (!rdev)
- return;
- rnicp = rdev_to_iwch_dev(rdev);
- switch (evt) {
- case OFFLOAD_STATUS_DOWN: {
- rdev->flags = CXIO_ERROR_FATAL;
- synchronize_net();
- event.event = IB_EVENT_DEVICE_FATAL;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_DOWN: {
- event.event = IB_EVENT_PORT_ERR;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_UP: {
- event.event = IB_EVENT_PORT_ACTIVE;
- dispatch = 1;
- break;
- }
- case OFFLOAD_DB_FULL: {
- disable_dbs(rnicp);
- break;
- }
- case OFFLOAD_DB_EMPTY: {
- enable_dbs(rnicp, 1);
- break;
- }
- case OFFLOAD_DB_DROP: {
- unsigned long delay = 1000;
- unsigned short r;
-
- disable_dbs(rnicp);
- get_random_bytes(&r, 2);
- delay += r & 1023;
-
- /*
- * delay is between 1000-2023 usecs.
- */
- schedule_delayed_work(&rnicp->db_drop_task,
- usecs_to_jiffies(delay));
- break;
- }
- }
-
- if (dispatch) {
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
- }
-
- return;
-}
-
-static int __init iwch_init_module(void)
-{
- int err;
-
- err = cxio_hal_init();
- if (err)
- return err;
- err = iwch_cm_init();
- if (err)
- return err;
- cxio_register_ev_cb(iwch_ev_dispatch);
- cxgb3_register_client(&t3c_client);
- return 0;
-}
-
-static void __exit iwch_exit_module(void)
-{
- cxgb3_unregister_client(&t3c_client);
- cxio_unregister_ev_cb(iwch_ev_dispatch);
- iwch_cm_term();
- cxio_hal_exit();
-}
-
-module_init(iwch_init_module);
-module_exit(iwch_exit_module);
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
deleted file mode 100644
index 837862287a29..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_H__
-#define __IWCH_H__
-
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/workqueue.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-
-struct iwch_pd;
-struct iwch_cq;
-struct iwch_qp;
-struct iwch_mr;
-
-struct iwch_rnic_attributes {
- u32 max_qps;
- u32 max_wrs; /* Max for any SQ/RQ */
- u32 max_sge_per_wr;
- u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
- u32 max_cqs;
- u32 max_cqes_per_cq;
- u32 max_mem_regs;
- u32 max_phys_buf_entries; /* for phys buf list */
- u32 max_pds;
-
- /*
- * The memory page sizes supported by this RNIC.
- * Bit position i in bitmap indicates page of
- * size (4k)^i. Phys block list mode unsupported.
- */
- u32 mem_pgsizes_bitmask;
- u64 max_mr_size;
- u8 can_resize_wq;
-
- /*
- * The maximum number of RDMA Reads that can be outstanding
- * per QP with this RNIC as the target.
- */
- u32 max_rdma_reads_per_qp;
-
- /*
- * The maximum number of resources used for RDMA Reads
- * by this RNIC with this RNIC as the target.
- */
- u32 max_rdma_read_resources;
-
- /*
- * The max depth per QP for initiation of RDMA Read
- * by this RNIC.
- */
- u32 max_rdma_read_qp_depth;
-
- /*
- * The maximum depth for initiation of RDMA Read
- * operations by this RNIC on all QPs
- */
- u32 max_rdma_read_depth;
- u8 rq_overflow_handled;
- u32 can_modify_ird;
- u32 can_modify_ord;
- u32 max_mem_windows;
- u32 stag0_value;
- u8 zbva_support;
- u8 local_invalidate_fence;
- u32 cq_overflow_detection;
-};
-
-struct iwch_dev {
- struct ib_device ibdev;
- struct cxio_rdev rdev;
- u32 device_cap_flags;
- struct iwch_rnic_attributes attr;
- struct idr cqidr;
- struct idr qpidr;
- struct idr mmidr;
- spinlock_t lock;
- struct list_head entry;
- struct delayed_work db_drop_task;
-};
-
-static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct iwch_dev, ibdev);
-}
-
-static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
-{
- return container_of(rdev, struct iwch_dev, rdev);
-}
-
-static inline int t3b_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3B;
-}
-
-static inline int t3a_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3A;
-}
-
-static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
-{
- return idr_find(&rhp->cqidr, cqid);
-}
-
-static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
-{
- return idr_find(&rhp->qpidr, qpid);
-}
-
-static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
-{
- return idr_find(&rhp->mmidr, mmid);
-}
-
-static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
- void *handle, u32 id)
-{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&rhp->lock);
-
- ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
-
- spin_unlock_irq(&rhp->lock);
- idr_preload_end();
-
- BUG_ON(ret == -ENOSPC);
- return ret < 0 ? ret : 0;
-}
-
-static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
-{
- spin_lock_irq(&rhp->lock);
- idr_remove(idr, id);
- spin_unlock_irq(&rhp->lock);
-}
-
-extern struct cxgb3_client t3c_client;
-extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
deleted file mode 100644
index cb78b1e9bcd9..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ /dev/null
@@ -1,2272 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/notifier.h>
-#include <linux/inetdevice.h>
-
-#include <net/neighbour.h>
-#include <net/netevent.h>
-#include <net/route.h>
-
-#include "tcb.h"
-#include "cxgb3_offload.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-
-static char *states[] = {
- "idle",
- "listen",
- "connecting",
- "mpa_wait_req",
- "mpa_req_sent",
- "mpa_req_rcvd",
- "mpa_rep_sent",
- "fpdu_mode",
- "aborting",
- "closing",
- "moribund",
- "dead",
- NULL,
-};
-
-int peer2peer = 0;
-module_param(peer2peer, int, 0644);
-MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
-
-static int ep_timeout_secs = 60;
-module_param(ep_timeout_secs, int, 0644);
-MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
- "in seconds (default=60)");
-
-static int mpa_rev = 1;
-module_param(mpa_rev, int, 0644);
-MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is spec compliant. (default=1)");
-
-static int markers_enabled = 0;
-module_param(markers_enabled, int, 0644);
-MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
-
-static int crc_enabled = 1;
-module_param(crc_enabled, int, 0644);
-MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
-
-static int rcv_win = 256 * 1024;
-module_param(rcv_win, int, 0644);
-MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
-
-static int snd_win = 32 * 1024;
-module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
-
-static unsigned int nocong = 0;
-module_param(nocong, uint, 0644);
-MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
-
-static unsigned int cong_flavor = 1;
-module_param(cong_flavor, uint, 0644);
-MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-
-static struct workqueue_struct *workq;
-
-static struct sk_buff_head rxq;
-
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(unsigned long arg);
-static void connect_reply_upcall(struct iwch_ep *ep, int status);
-
-static void start_ep_timer(struct iwch_ep *ep)
-{
- PDBG("%s ep %p\n", __func__, ep);
- if (timer_pending(&ep->timer)) {
- PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
- del_timer_sync(&ep->timer);
- } else
- get_ep(&ep->com);
- ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- ep->timer.data = (unsigned long)ep;
- ep->timer.function = ep_timeout;
- add_timer(&ep->timer);
-}
-
-static void stop_ep_timer(struct iwch_ep *ep)
-{
- PDBG("%s ep %p\n", __func__, ep);
- if (!timer_pending(&ep->timer)) {
- WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
- __func__, ep, ep->com.state);
- return;
- }
- del_timer_sync(&ep->timer);
- put_ep(&ep->com);
-}
-
-static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = l2t_send(tdev, skb, l2e);
- if (error < 0)
- kfree_skb(skb);
- return error;
-}
-
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = cxgb3_ofld_send(tdev, skb);
- if (error < 0)
- kfree_skb(skb);
- return error;
-}
-
-static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
-{
- struct cpl_tid_release *req;
-
- skb = get_skb(skb, sizeof *req, GFP_KERNEL);
- if (!skb)
- return;
- req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_cxgb3_ofld_send(tdev, skb);
- return;
-}
-
-int iwch_quiesce_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-int iwch_resume_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = 0;
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static void set_emss(struct iwch_ep *ep, u16 opt)
-{
- PDBG("%s ep %p opt %u\n", __func__, ep, opt);
- ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
- if (G_TCPOPT_TSTAMP(opt))
- ep->emss -= 12;
- if (ep->emss < 128)
- ep->emss = 128;
- PDBG("emss=%d\n", ep->emss);
-}
-
-static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
-{
- unsigned long flags;
- enum iwch_ep_state state;
-
- spin_lock_irqsave(&epc->lock, flags);
- state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
- return state;
-}
-
-static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- epc->state = new;
-}
-
-static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
- PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
- __state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
- return;
-}
-
-static void *alloc_ep(int size, gfp_t gfp)
-{
- struct iwch_ep_common *epc;
-
- epc = kzalloc(size, gfp);
- if (epc) {
- kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
- }
- PDBG("%s alloc ep %p\n", __func__, epc);
- return epc;
-}
-
-void __free_ep(struct kref *kref)
-{
- struct iwch_ep *ep;
- ep = container_of(container_of(kref, struct iwch_ep_common, kref),
- struct iwch_ep, com);
- PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
- if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
- cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- }
- kfree(ep);
-}
-
-static void release_ep_resources(struct iwch_ep *ep)
-{
- PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- set_bit(RELEASE_RESOURCES, &ep->com.flags);
- put_ep(&ep->com);
-}
-
-static int status2errno(int status)
-{
- switch (status) {
- case CPL_ERR_NONE:
- return 0;
- case CPL_ERR_CONN_RESET:
- return -ECONNRESET;
- case CPL_ERR_ARP_MISS:
- return -EHOSTUNREACH;
- case CPL_ERR_CONN_TIMEDOUT:
- return -ETIMEDOUT;
- case CPL_ERR_TCAM_FULL:
- return -ENOMEM;
- case CPL_ERR_CONN_EXIST:
- return -EADDRINUSE;
- default:
- return -EIO;
- }
-}
-
-/*
- * Try and reuse skbs already allocated...
- */
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
-{
- if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
- skb_trim(skb, 0);
- skb_get(skb);
- } else {
- skb = alloc_skb(len, gfp);
- }
- return skb;
-}
-
-static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- return rt;
-}
-
-static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
-{
- int i = 0;
-
- while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
- ++i;
- return i;
-}
-
-static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
-{
- PDBG("%s t3cdev %p\n", __func__, dev);
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for an active open.
- */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- printk(KERN_ERR MOD "ARP failure duing connect\n");
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
- * and send it along.
- */
-static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- struct cpl_abort_req *req = cplhdr(skb);
-
- PDBG("%s t3cdev %p\n", __func__, dev);
- req->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(dev, skb);
-}
-
-static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
-{
- struct cpl_close_con_req *req;
- struct sk_buff *skb;
-
- PDBG("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- struct cpl_abort_req *req;
-
- PDBG("%s ep %p\n", __func__, ep);
- skb = get_skb(skb, sizeof(*req), gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
- __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, abort_arp_failure);
- req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_connect(struct iwch_ep *ep)
-{
- struct cpl_act_open_req *req;
- struct sk_buff *skb;
- u32 opt0h, opt0l, opt2;
- unsigned int mtu_idx;
- int wscale;
-
- PDBG("%s ep %p\n", __func__, ep);
-
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
- __func__);
- return -ENOMEM;
- }
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
- skb->priority = CPL_PRIORITY_SETUP;
- set_arp_failure_handler(skb, act_open_req_arp_failure);
-
- req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0h = htonl(opt0h);
- req->opt0l = htonl(opt0l);
- req->params = 0;
- req->opt2 = htonl(opt2);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
-
- PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
-
- BUG_ON(skb_cloned(skb));
-
- mpalen = sizeof(*mpa) + ep->plen;
- if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
- kfree_skb(skb);
- skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- connect_reply_upcall(ep, -ENOMEM);
- return;
- }
- }
- skb_trim(skb, 0);
- skb_reserve(skb, sizeof(*req));
- skb_put(skb, mpalen);
- skb->priority = CPL_PRIORITY_DATA;
- mpa = (struct mpa_message *) skb->data;
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->private_data_size = htons(ep->plen);
- mpa->revision = mpa_rev;
-
- if (ep->plen)
- memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
- start_ep_timer(ep);
- state_set(&ep->com, MPA_REQ_SENT);
- return;
-}
-
-static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- struct sk_buff *skb;
-
- PDBG("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb_reserve(skb, sizeof(*req));
- mpa = (struct mpa_message *) skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = MPA_REJECT;
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb again. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(mpalen);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
- struct sk_buff *skb;
-
- PDBG("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- skb_reserve(skb, sizeof(*req));
- mpa = (struct mpa_message *) skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- ep->mpa_skb = skb;
- state_set(&ep->com, MPA_REP_SENT);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_establish *req = cplhdr(skb);
- unsigned int tid = GET_TID(req);
-
- PDBG("%s ep %p tid %d\n", __func__, ep, tid);
-
- dst_confirm(ep->dst);
-
- /* setup the hwtid for this connection */
- ep->hwtid = tid;
- cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
-
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- /* dealloc the atid */
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-
- /* start MPA negotiation */
- send_mpa_req(ep, skb);
-
- return 0;
-}
-
-static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- PDBG("%s ep %p\n", __FILE__, ep);
- state_set(&ep->com, ABORTING);
- send_abort(ep, skb, gfp);
-}
-
-static void close_complete_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- PDBG("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- if (ep->com.cm_id) {
- PDBG("close complete delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void peer_close_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- PDBG("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_DISCONNECT;
- if (ep->com.cm_id) {
- PDBG("peer close delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static void peer_abort_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- PDBG("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- event.status = -ECONNRESET;
- if (ep->com.cm_id) {
- PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
- ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_reply_upcall(struct iwch_ep *ep, int status)
-{
- struct iw_cm_event event;
-
- PDBG("%s ep %p status %d\n", __func__, ep, status);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REPLY;
- event.status = status;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-
- if ((status == 0) || (status == -ECONNREFUSED)) {
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- }
- if (ep->com.cm_id) {
- PDBG("%s ep %p tid %d status %d\n", __func__, ep,
- ep->hwtid, status);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
- if (status < 0) {
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_request_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REQUEST;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.local_addr));
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- event.provider_data = ep;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (state_read(&ep->parent_ep->com) != DEAD) {
- get_ep(&ep->com);
- ep->parent_ep->com.cm_id->event_handler(
- ep->parent_ep->com.cm_id,
- &event);
- }
- put_ep(&ep->parent_ep->com);
- ep->parent_ep = NULL;
-}
-
-static void established_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- PDBG("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_ESTABLISHED;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (ep->com.cm_id) {
- PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static int update_rx_credits(struct iwch_ep *ep, u32 credits)
-{
- struct cpl_rx_data_ack *req;
- struct sk_buff *skb;
-
- PDBG("%s ep %p credits %u\n", __func__, ep, credits);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
- return 0;
- }
-
- req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
- req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
- skb->priority = CPL_PRIORITY_ACK;
- iwch_cxgb3_ofld_send(ep->com.tdev, skb);
- return credits;
-}
-
-static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- int err;
-
- PDBG("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_SENT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- err = -EINVAL;
- goto err;
- }
-
- /*
- * copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * if we don't even have the mpa message, then bail.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /* Validate MPA header. */
- if (mpa->revision != mpa_rev) {
- err = -EPROTO;
- goto err;
- }
- if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
- err = -EPROTO;
- goto err;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- err = -EPROTO;
- goto err;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- err = -EPROTO;
- goto err;
- }
-
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- if (mpa->flags & MPA_REJECT) {
- err = -ECONNREFUSED;
- goto err;
- }
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data. And
- * the MPA header is valid.
- */
- state_set(&ep->com, FPDU_MODE);
- ep->mpa_attr.initiator = 1;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
- "xmit_marker_enabled=%d, version=%d\n", __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
-
- /* bind QP and TID with INIT_WR */
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err;
-
- if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
- iwch_post_zb_read(ep);
- }
-
- goto out;
-err:
- abort_connection(ep, skb, GFP_KERNEL);
-out:
- connect_reply_upcall(ep, err);
- return;
-}
-
-static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
-
- PDBG("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_WAIT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
-
- /*
- * Copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * If we don't even have the mpa message, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /*
- * Validate MPA Header.
- */
- if (mpa->revision != mpa_rev) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data.
- */
- ep->mpa_attr.initiator = 0;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
- "xmit_marker_enabled=%d, version=%d\n", __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- state_set(&ep->com, MPA_REQ_RCVD);
-
- /* drive upcall */
- connect_request_upcall(ep);
- return;
-}
-
-static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_rx_data *hdr = cplhdr(skb);
- unsigned int dlen = ntohs(hdr->len);
-
- PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
-
- skb_pull(skb, sizeof(*hdr));
- skb_trim(skb, dlen);
-
- ep->rcv_seq += dlen;
- BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
-
- switch (state_read(&ep->com)) {
- case MPA_REQ_SENT:
- process_mpa_reply(ep, skb);
- break;
- case MPA_REQ_WAIT:
- process_mpa_request(ep, skb);
- break;
- case MPA_REP_SENT:
- break;
- default:
- printk(KERN_ERR MOD "%s Unexpected streaming data."
- " ep %p state %d tid %d\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
-
- /*
- * The ep will timeout and inform the ULP of the failure.
- * See ep_timeout().
- */
- break;
- }
-
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Upcall from the adapter indicating data has been transmitted.
- * For us its just the single MPA request or reply. We can now free
- * the skb holding the mpa message.
- */
-static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_wr_ack *hdr = cplhdr(skb);
- unsigned int credits = ntohs(hdr->credits);
- unsigned long flags;
- int post_zb = 0;
-
- PDBG("%s ep %p credits %u\n", __func__, ep, credits);
-
- if (credits == 0) {
- PDBG("%s 0 credit ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- BUG_ON(credits != 1);
- dst_confirm(ep->dst);
- if (!ep->mpa_skb) {
- PDBG("%s rdma_init wr_ack ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->mpa_attr.initiator) {
- PDBG("%s initiator ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (peer2peer && ep->com.state == FPDU_MODE)
- post_zb = 1;
- } else {
- PDBG("%s responder ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->com.state == MPA_REQ_RCVD) {
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- }
- }
- } else {
- PDBG("%s lsm ack ep %p state %u freeing skb\n",
- __func__, ep, ep->com.state);
- kfree_skb(ep->mpa_skb);
- ep->mpa_skb = NULL;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (post_zb)
- iwch_post_zb_read(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- unsigned long flags;
- int release = 0;
-
- PDBG("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /*
- * We get 2 abort replies from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case ABORTING:
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- default:
- printk(KERN_ERR "%s ep %p state %d\n",
- __func__, ep, ep->com.state);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Return whether a failed active open has allocated a TID
- */
-static inline int act_open_has_tid(int status)
-{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
-}
-
-static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_open_rpl *rpl = cplhdr(skb);
-
- PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
- status2errno(rpl->status));
- connect_reply_upcall(ep, status2errno(rpl->status));
- state_set(&ep->com, DEAD);
- if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
- release_tid(ep->com.tdev, GET_TID(rpl), NULL);
- cxgb3_free_atid(ep->com.tdev, ep->atid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- put_ep(&ep->com);
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_start(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_pass_open_req *req;
-
- PDBG("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
- return -ENOMEM;
- }
-
- req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
- req->local_port = ep->com.local_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_port = 0;
- req->peer_ip = 0;
- req->peer_netmask = 0;
- req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
- req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
- req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
-
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_pass_open_rpl *rpl = cplhdr(skb);
-
- PDBG("%s ep %p status %d error %d\n", __func__, ep,
- rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_stop(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_close_listserv_req *req;
-
- PDBG("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->cpu_idx = 0;
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
- void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
-
- PDBG("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- return CPL_RET_BUF_DONE;
-}
-
-static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
-{
- struct cpl_pass_accept_rpl *rpl;
- unsigned int mtu_idx;
- u32 opt0h, opt0l, opt2;
- int wscale;
-
- PDBG("%s ep %p\n", __func__, ep);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(*rpl));
- skb_get(skb);
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
-
- rpl = cplhdr(skb);
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(opt0h);
- rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
- rpl->opt2 = htonl(opt2);
- rpl->rsvd = rpl->opt2; /* workaround for HW bug */
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-
- return;
-}
-
-static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
- struct sk_buff *skb)
-{
- PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
- peer_ip);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(struct cpl_tid_release));
- skb_get(skb);
-
- if (tdev->type != T3A)
- release_tid(tdev, hwtid, skb);
- else {
- struct cpl_pass_accept_rpl *rpl;
-
- rpl = cplhdr(skb);
- skb->priority = CPL_PRIORITY_SETUP;
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(F_TCAM_BYPASS);
- rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
- rpl->opt2 = 0;
- rpl->rsvd = rpl->opt2;
- iwch_cxgb3_ofld_send(tdev, skb);
- }
-}
-
-static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *child_ep, *parent_ep = ctx;
- struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int hwtid = GET_TID(req);
- struct dst_entry *dst;
- struct l2t_entry *l2t;
- struct rtable *rt;
- struct iff_mac tim;
-
- PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
- if (state_read(&parent_ep->com) != LISTEN) {
- printk(KERN_ERR "%s - listening ep not in LISTEN\n",
- __func__);
- goto reject;
- }
-
- /*
- * Find the netdev for this connection request.
- */
- tim.mac_addr = req->dst_mac;
- tim.vlan_tag = ntohs(req->vlan_tag);
- if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- printk(KERN_ERR "%s bad dst mac %pM\n",
- __func__, req->dst_mac);
- goto reject;
- }
-
- /* Find output route */
- rt = find_route(tdev,
- req->local_ip,
- req->peer_ip,
- req->local_port,
- req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
- if (!rt) {
- printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
- __func__);
- goto reject;
- }
- dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
- if (!l2t) {
- printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
- __func__);
- dst_release(dst);
- goto reject;
- }
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
- __func__);
- l2t_release(tdev, l2t);
- dst_release(dst);
- goto reject;
- }
- state_set(&child_ep->com, CONNECTING);
- child_ep->com.tdev = tdev;
- child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = PF_INET;
- child_ep->com.local_addr.sin_port = req->local_port;
- child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
- child_ep->com.remote_addr.sin_family = PF_INET;
- child_ep->com.remote_addr.sin_port = req->peer_port;
- child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
- get_ep(&parent_ep->com);
- child_ep->parent_ep = parent_ep;
- child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
- child_ep->l2t = l2t;
- child_ep->dst = dst;
- child_ep->hwtid = hwtid;
- init_timer(&child_ep->timer);
- cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
- accept_cr(child_ep, req->peer_ip, skb);
- goto out;
-reject:
- reject_cr(tdev, hwtid, req->peer_ip, skb);
-out:
- return CPL_RET_BUF_DONE;
-}
-
-static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_pass_establish *req = cplhdr(skb);
-
- PDBG("%s ep %p\n", __func__, ep);
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- dst_confirm(ep->dst);
- state_set(&ep->com, MPA_REQ_WAIT);
- start_ep_timer(ep);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int disconnect = 1;
- int release = 0;
-
- PDBG("%s ep %p\n", __func__, ep);
- dst_confirm(ep->dst);
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- __state_set(&ep->com, CLOSING);
- break;
- case MPA_REQ_SENT:
- __state_set(&ep->com, CLOSING);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REP_SENT:
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case FPDU_MODE:
- start_ep_timer(ep);
- __state_set(&ep->com, CLOSING);
- attrs.next_state = IWCH_QP_STATE_CLOSING;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- break;
- case ABORTING:
- disconnect = 0;
- break;
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- disconnect = 0;
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- disconnect = 0;
- break;
- case DEAD:
- disconnect = 0;
- break;
- default:
- BUG_ON(1);
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (disconnect)
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
-static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct iwch_ep *ep = ctx;
- struct cpl_abort_rpl *rpl;
- struct sk_buff *rpl_skb;
- struct iwch_qp_attributes attrs;
- int ret;
- int release = 0;
- unsigned long flags;
-
- if (is_neg_adv_abort(req->status)) {
- PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
- ep->hwtid);
- t3_l2t_send_event(ep->com.tdev, ep->l2t);
- return CPL_RET_BUF_DONE;
- }
-
- /*
- * We get 2 peer aborts from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
- switch (ep->com.state) {
- case CONNECTING:
- break;
- case MPA_REQ_WAIT:
- stop_ep_timer(ep);
- break;
- case MPA_REQ_SENT:
- stop_ep_timer(ep);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MORIBUND:
- case CLOSING:
- stop_ep_timer(ep);
- /*FALLTHROUGH*/
- case FPDU_MODE:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- ret = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (ret)
- printk(KERN_ERR MOD
- "%s - qp <- error failed!\n",
- __func__);
- }
- peer_abort_upcall(ep);
- break;
- case ABORTING:
- break;
- case DEAD:
- PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
- return CPL_RET_BUF_DONE;
- default:
- BUG_ON(1);
- break;
- }
- dst_confirm(ep->dst);
- if (ep->com.state != ABORTING) {
- __state_set(&ep->com, DEAD);
- release = 1;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
- __func__);
- release = 1;
- goto out;
- }
- rpl_skb->priority = CPL_PRIORITY_DATA;
- rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
-out:
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int release = 0;
-
- PDBG("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if ((ep->com.cm_id) && (ep->com.qp)) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- case ABORTING:
- case DEAD:
- break;
- default:
- BUG_ON(1);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * T3A does 3 things when a TERM is received:
- * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
- * 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcode cqe into the associated CQ.
- *
- * For (1), we save the message in the qp for later consumer consumption.
- * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
- * For (3), we toss the CQE in cxio_poll_cq().
- *
- * terminate() handles case (1)...
- */
-static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
-
- if (state_read(&ep->com) != FPDU_MODE)
- return CPL_RET_BUF_DONE;
-
- PDBG("%s ep %p\n", __func__, ep);
- skb_pull(skb, sizeof(struct cpl_rdma_terminate));
- PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
- return CPL_RET_BUF_DONE;
-}
-
-static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_rdma_ec_status *rep = cplhdr(skb);
- struct iwch_ep *ep = ctx;
-
- PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
- rep->status);
- if (rep->status) {
- struct iwch_qp_attributes attrs;
-
- printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
- __func__, ep->hwtid);
- stop_ep_timer(ep);
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- abort_connection(ep, NULL, GFP_KERNEL);
- }
- return CPL_RET_BUF_DONE;
-}
-
-static void ep_timeout(unsigned long arg)
-{
- struct iwch_ep *ep = (struct iwch_ep *)arg;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int abort = 1;
-
- spin_lock_irqsave(&ep->com.lock, flags);
- PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
- switch (ep->com.state) {
- case MPA_REQ_SENT:
- __state_set(&ep->com, ABORTING);
- connect_reply_upcall(ep, -ETIMEDOUT);
- break;
- case MPA_REQ_WAIT:
- __state_set(&ep->com, ABORTING);
- break;
- case CLOSING:
- case MORIBUND:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- __state_set(&ep->com, ABORTING);
- break;
- default:
- WARN(1, "%s unexpected state ep %p state %u\n",
- __func__, ep, ep->com.state);
- abort = 0;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (abort)
- abort_connection(ep, NULL, GFP_ATOMIC);
- put_ep(&ep->com);
-}
-
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- int err;
- struct iwch_ep *ep = to_ep(cm_id);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-
- if (state_read(&ep->com) == DEAD) {
- put_ep(&ep->com);
- return -ECONNRESET;
- }
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- if (mpa_rev == 0)
- abort_connection(ep, NULL, GFP_KERNEL);
- else {
- err = send_mpa_reject(ep, pdata, pdata_len);
- err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- }
- put_ep(&ep->com);
- return 0;
-}
-
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- int err;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- struct iwch_ep *ep = to_ep(cm_id);
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
-
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
- err = -ECONNRESET;
- goto err;
- }
-
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- BUG_ON(!qp);
-
- if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
- (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
- abort_connection(ep, NULL, GFP_KERNEL);
- err = -EINVAL;
- goto err;
- }
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = qp;
-
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ird == 0)
- ep->ird = 1;
-
- PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
-
- /* bind QP to EP and move to RTS */
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- /* bind QP and TID with INIT_WR */
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_MAX_ORD;
-
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err1;
-
- /* if needed, wait for wr_ack */
- if (iwch_rqes_posted(qp)) {
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (err)
- goto err1;
- }
-
- err = send_mpa_reply(ep, conn_param->private_data,
- conn_param->private_data_len);
- if (err)
- goto err1;
-
-
- state_set(&ep->com, FPDU_MODE);
- established_upcall(ep);
- put_ep(&ep->com);
- return 0;
-err1:
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- cm_id->rem_ref(cm_id);
-err:
- put_ep(&ep->com);
- return err;
-}
-
-static int is_loopback_dst(struct iw_cm_id *cm_id)
-{
- struct net_device *dev;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
- dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
- if (!dev)
- return 0;
- dev_put(dev);
- return 1;
-}
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_ep *ep;
- struct rtable *rt;
- int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
- if (cm_id->remote_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto out;
- }
-
- if (is_loopback_dst(cm_id)) {
- err = -ENOSYS;
- goto out;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
- err = -ENOMEM;
- goto out;
- }
- init_timer(&ep->timer);
- ep->plen = conn_param->private_data_len;
- if (ep->plen)
- memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
- conn_param->private_data, ep->plen);
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ord == 0)
- ep->ord = 1;
-
- ep->com.tdev = h->rdev.t3cdev_p;
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = get_qhp(h, conn_param->qpn);
- BUG_ON(!ep->com.qp);
- PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
- ep->com.qp, cm_id);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->atid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, IPTOS_LOWDELAY);
- if (!rt) {
- printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
- &raddr->sin_addr.s_addr);
- if (!ep->l2t) {
- printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
- goto fail4;
- }
-
- state_set(&ep->com, CONNECTING);
- ep->tos = IPTOS_LOWDELAY;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
- sizeof(ep->com.remote_addr));
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- l2t_release(h->rdev.t3cdev_p, ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-out:
- return err;
-}
-
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
-{
- int err = 0;
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_listen_ep *ep;
-
-
- might_sleep();
-
- if (cm_id->local_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto fail1;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
- err = -ENOMEM;
- goto fail1;
- }
- PDBG("%s ep %p\n", __func__, ep);
- ep->com.tdev = h->rdev.t3cdev_p;
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
- sizeof(ep->com.local_addr));
-
- /*
- * Allocate a server TID.
- */
- ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->stid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- state_set(&ep->com, LISTEN);
- err = listen_start(ep);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (!err) {
- cm_id->provider_data = ep;
- goto out;
- }
-fail3:
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-fail1:
-out:
- return err;
-}
-
-int iwch_destroy_listen(struct iw_cm_id *cm_id)
-{
- int err;
- struct iwch_listen_ep *ep = to_listen_ep(cm_id);
-
- PDBG("%s ep %p\n", __func__, ep);
-
- might_sleep();
- state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
- err = listen_stop(ep);
- if (err)
- goto done;
- wait_event(ep->com.waitq, ep->com.rpl_done);
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-done:
- err = ep->com.rpl_err;
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
- return err;
-}
-
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
-{
- int ret=0;
- unsigned long flags;
- int close = 0;
- int fatal = 0;
- struct t3cdev *tdev;
- struct cxio_rdev *rdev;
-
- spin_lock_irqsave(&ep->com.lock, flags);
-
- PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
- states[ep->com.state], abrupt);
-
- tdev = (struct t3cdev *)ep->com.tdev;
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- fatal = 1;
- close_complete_upcall(ep);
- ep->com.state = DEAD;
- }
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- case MPA_REQ_SENT:
- case MPA_REQ_RCVD:
- case MPA_REP_SENT:
- case FPDU_MODE:
- close = 1;
- if (abrupt)
- ep->com.state = ABORTING;
- else {
- ep->com.state = CLOSING;
- start_ep_timer(ep);
- }
- set_bit(CLOSE_SENT, &ep->com.flags);
- break;
- case CLOSING:
- if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
- close = 1;
- if (abrupt) {
- stop_ep_timer(ep);
- ep->com.state = ABORTING;
- } else
- ep->com.state = MORIBUND;
- }
- break;
- case MORIBUND:
- case ABORTING:
- case DEAD:
- PDBG("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
- break;
- default:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (close) {
- if (abrupt)
- ret = send_abort(ep, NULL, gfp);
- else
- ret = send_halfclose(ep, gfp);
- if (ret)
- fatal = 1;
- }
- if (fatal)
- release_ep_resources(ep);
- return ret;
-}
-
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t)
-{
- struct iwch_ep *ep = ctx;
-
- if (ep->dst != old)
- return 0;
-
- PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
- l2t);
- dst_hold(new);
- l2t_release(ep->com.tdev, ep->l2t);
- ep->l2t = l2t;
- dst_release(old);
- ep->dst = new;
- return 1;
-}
-
-/*
- * All the CM events are handled on a work queue to have a safe context.
- * These are the real handlers that are called from the work queue.
- */
-static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = act_establish,
- [CPL_ACT_OPEN_RPL] = act_open_rpl,
- [CPL_RX_DATA] = rx_data,
- [CPL_TX_DMA_ACK] = tx_ack,
- [CPL_ABORT_RPL_RSS] = abort_rpl,
- [CPL_ABORT_RPL] = abort_rpl,
- [CPL_PASS_OPEN_RPL] = pass_open_rpl,
- [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
- [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
- [CPL_PASS_ESTABLISH] = pass_establish,
- [CPL_PEER_CLOSE] = peer_close,
- [CPL_ABORT_REQ_RSS] = peer_abort,
- [CPL_CLOSE_CON_RPL] = close_con_rpl,
- [CPL_RDMA_TERMINATE] = terminate,
- [CPL_RDMA_EC_STATUS] = ec_status,
-};
-
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
-static DECLARE_WORK(skb_work, process_work);
-
-static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep_common *epc = ctx;
-
- get_ep(epc);
-
- /*
- * Save ctx and tdev in the skb->cb area.
- */
- *((void **) skb->cb) = ctx;
- *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
-
- /*
- * Queue the skb and schedule the worker thread.
- */
- skb_queue_tail(&rxq, skb);
- queue_work(workq, &skb_work);
- return 0;
-}
-
-static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
-
- if (rpl->status != CPL_ERR_NONE) {
- printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
- "for tid %u\n", rpl->status, GET_TID(rpl));
- }
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * All upcalls from the T3 Core go to sched() to schedule the
- * processing on a work queue.
- */
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = sched,
- [CPL_ACT_OPEN_RPL] = sched,
- [CPL_RX_DATA] = sched,
- [CPL_TX_DMA_ACK] = sched,
- [CPL_ABORT_RPL_RSS] = sched,
- [CPL_ABORT_RPL] = sched,
- [CPL_PASS_OPEN_RPL] = sched,
- [CPL_CLOSE_LISTSRV_RPL] = sched,
- [CPL_PASS_ACCEPT_REQ] = sched,
- [CPL_PASS_ESTABLISH] = sched,
- [CPL_PEER_CLOSE] = sched,
- [CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
- [CPL_RDMA_TERMINATE] = sched,
- [CPL_RDMA_EC_STATUS] = sched,
- [CPL_SET_TCB_RPL] = set_tcb_rpl,
-};
-
-int __init iwch_cm_init(void)
-{
- skb_queue_head_init(&rxq);
-
- workq = create_singlethread_workqueue("iw_cxgb3");
- if (!workq)
- return -ENOMEM;
-
- return 0;
-}
-
-void __exit iwch_cm_term(void)
-{
- flush_workqueue(workq);
- destroy_workqueue(workq);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
deleted file mode 100644
index b9efadfffb4f..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _IWCH_CM_H_
-#define _IWCH_CM_H_
-
-#include <linux/inet.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/kref.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/iw_cm.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-
-#define MPA_KEY_REQ "MPA ID Req Frame"
-#define MPA_KEY_REP "MPA ID Rep Frame"
-
-#define MPA_MAX_PRIVATE_DATA 256
-#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
-#define MPA_REJECT 0x20
-#define MPA_CRC 0x40
-#define MPA_MARKERS 0x80
-#define MPA_FLAGS_MASK 0xE0
-
-#define put_ep(ep) { \
- PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
- WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
- kref_put(&((ep)->kref), __free_ep); \
-}
-
-#define get_ep(ep) { \
- PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
- kref_get(&((ep)->kref)); \
-}
-
-struct mpa_message {
- u8 key[16];
- u8 flags;
- u8 revision;
- __be16 private_data_size;
- u8 private_data[0];
-};
-
-struct terminate_message {
- u8 layer_etype;
- u8 ecode;
- __be16 hdrct_rsvd;
- u8 len_hdrs[0];
-};
-
-#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
-
-enum iwch_layers_types {
- LAYER_RDMAP = 0x00,
- LAYER_DDP = 0x10,
- LAYER_MPA = 0x20,
- RDMAP_LOCAL_CATA = 0x00,
- RDMAP_REMOTE_PROT = 0x01,
- RDMAP_REMOTE_OP = 0x02,
- DDP_LOCAL_CATA = 0x00,
- DDP_TAGGED_ERR = 0x01,
- DDP_UNTAGGED_ERR = 0x02,
- DDP_LLP = 0x03
-};
-
-enum iwch_rdma_ecodes {
- RDMAP_INV_STAG = 0x00,
- RDMAP_BASE_BOUNDS = 0x01,
- RDMAP_ACC_VIOL = 0x02,
- RDMAP_STAG_NOT_ASSOC = 0x03,
- RDMAP_TO_WRAP = 0x04,
- RDMAP_INV_VERS = 0x05,
- RDMAP_INV_OPCODE = 0x06,
- RDMAP_STREAM_CATA = 0x07,
- RDMAP_GLOBAL_CATA = 0x08,
- RDMAP_CANT_INV_STAG = 0x09,
- RDMAP_UNSPECIFIED = 0xff
-};
-
-enum iwch_ddp_ecodes {
- DDPT_INV_STAG = 0x00,
- DDPT_BASE_BOUNDS = 0x01,
- DDPT_STAG_NOT_ASSOC = 0x02,
- DDPT_TO_WRAP = 0x03,
- DDPT_INV_VERS = 0x04,
- DDPU_INV_QN = 0x01,
- DDPU_INV_MSN_NOBUF = 0x02,
- DDPU_INV_MSN_RANGE = 0x03,
- DDPU_INV_MO = 0x04,
- DDPU_MSG_TOOBIG = 0x05,
- DDPU_INV_VERS = 0x06
-};
-
-enum iwch_mpa_ecodes {
- MPA_CRC_ERR = 0x02,
- MPA_MARKER_ERR = 0x03
-};
-
-enum iwch_ep_state {
- IDLE = 0,
- LISTEN,
- CONNECTING,
- MPA_REQ_WAIT,
- MPA_REQ_SENT,
- MPA_REQ_RCVD,
- MPA_REP_SENT,
- FPDU_MODE,
- ABORTING,
- CLOSING,
- MORIBUND,
- DEAD,
-};
-
-enum iwch_ep_flags {
- PEER_ABORT_IN_PROGRESS = 0,
- ABORT_REQ_IN_PROGRESS = 1,
- RELEASE_RESOURCES = 2,
- CLOSE_SENT = 3,
-};
-
-struct iwch_ep_common {
- struct iw_cm_id *cm_id;
- struct iwch_qp *qp;
- struct t3cdev *tdev;
- enum iwch_ep_state state;
- struct kref kref;
- spinlock_t lock;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
- wait_queue_head_t waitq;
- int rpl_done;
- int rpl_err;
- unsigned long flags;
-};
-
-struct iwch_listen_ep {
- struct iwch_ep_common com;
- unsigned int stid;
- int backlog;
-};
-
-struct iwch_ep {
- struct iwch_ep_common com;
- struct iwch_ep *parent_ep;
- struct timer_list timer;
- unsigned int atid;
- u32 hwtid;
- u32 snd_seq;
- u32 rcv_seq;
- struct l2t_entry *l2t;
- struct dst_entry *dst;
- struct sk_buff *mpa_skb;
- struct iwch_mpa_attributes mpa_attr;
- unsigned int mpa_pkt_len;
- u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
- u8 tos;
- u16 emss;
- u16 plen;
- u32 ird;
- u32 ord;
-};
-
-static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
-/* CM prototypes */
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
-int iwch_destroy_listen(struct iw_cm_id *cm_id);
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
-int iwch_quiesce_tid(struct iwch_ep *ep);
-int iwch_resume_tid(struct iwch_ep *ep);
-void __free_ep(struct kref *kref);
-void iwch_rearp(struct iwch_ep *ep);
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t);
-
-int __init iwch_cm_init(void);
-void __exit iwch_cm_term(void);
-extern int peer2peer;
-
-#endif /* _IWCH_CM_H_ */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
deleted file mode 100644
index cf5474ae68ff..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "iwch_provider.h"
-#include "iwch.h"
-
-/*
- * Get one cq entry from cxio and map it to openib.
- *
- * Returns:
- * 0 EMPTY;
- * 1 cqe returned
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct ib_wc *wc)
-{
- struct iwch_qp *qhp = NULL;
- struct t3_cqe cqe, *rd_cqe;
- struct t3_wq *wq;
- u32 credit = 0;
- u8 cqe_flushed;
- u64 cookie;
- int ret = 1;
-
- rd_cqe = cxio_next_cqe(&chp->cq);
-
- if (!rd_cqe)
- return 0;
-
- qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
- if (!qhp)
- wq = NULL;
- else {
- spin_lock(&qhp->lock);
- wq = &(qhp->wq);
- }
- ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
- &credit);
- if (t3a_device(chp->rhp) && credit) {
- PDBG("%s updating %d cq credits on id %d\n", __func__,
- credit, chp->cq.cqid);
- cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
- }
-
- if (ret) {
- ret = -EAGAIN;
- goto out;
- }
- ret = 1;
-
- wc->wr_id = cookie;
- wc->qp = &qhp->ibqp;
- wc->vendor_err = CQE_STATUS(cqe);
- wc->wc_flags = 0;
-
- PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
- "lo 0x%x cookie 0x%llx\n", __func__,
- CQE_QPID(cqe), CQE_TYPE(cqe),
- CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
- CQE_WRID_LOW(cqe), (unsigned long long) cookie);
-
- if (CQE_TYPE(cqe) == 0) {
- if (!CQE_STATUS(cqe))
- wc->byte_len = CQE_LEN(cqe);
- else
- wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
- CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
- wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- }
- } else {
- switch (CQE_OPCODE(cqe)) {
- case T3_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case T3_READ_REQ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = CQE_LEN(cqe);
- break;
- case T3_SEND:
- case T3_SEND_WITH_SE:
- case T3_SEND_WITH_INV:
- case T3_SEND_WITH_SE_INV:
- wc->opcode = IB_WC_SEND;
- break;
- case T3_BIND_MW:
- wc->opcode = IB_WC_BIND_MW;
- break;
-
- case T3_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case T3_FAST_REGISTER:
- wc->opcode = IB_WC_FAST_REG_MR;
- break;
- default:
- printk(KERN_ERR MOD "Unexpected opcode %d "
- "in the CQE received for QPID=0x%0x\n",
- CQE_OPCODE(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- goto out;
- }
- }
-
- if (cqe_flushed)
- wc->status = IB_WC_WR_FLUSH_ERR;
- else {
-
- switch (CQE_STATUS(cqe)) {
- case TPT_ERR_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case TPT_ERR_STAG:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_PDID:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_WRAP:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- case TPT_ERR_BOUND:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- case TPT_ERR_OPCODE:
- wc->status = IB_WC_FATAL_ERR;
- break;
- case TPT_ERR_SWFLUSH:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- default:
- printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
- "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- }
- }
-out:
- if (wq)
- spin_unlock(&qhp->lock);
- return ret;
-}
-
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- unsigned long flags;
- int npolled;
- int err = 0;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
-
- spin_lock_irqsave(&chp->lock, flags);
- for (npolled = 0; npolled < num_entries; ++npolled) {
-#ifdef DEBUG
- int i=0;
-#endif
-
- /*
- * Because T3 can post CQEs that are _not_ associated
- * with a WR, we might have to poll again after removing
- * one of these.
- */
- do {
- err = iwch_poll_cq_one(rhp, chp, wc + npolled);
-#ifdef DEBUG
- BUG_ON(++i > 1000);
-#endif
- } while (err == -EAGAIN);
- if (err <= 0)
- break;
- }
- spin_unlock_irqrestore(&chp->lock, flags);
-
- if (err < 0)
- return err;
- else {
- return npolled;
- }
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
deleted file mode 100644
index abcc9e76962b..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/gfp.h>
-#include <linux/mman.h>
-#include <net/sock.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_wr.h"
-
-static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
- struct respQ_msg_t *rsp_msg,
- enum ib_event_type ib_event,
- int send_term)
-{
- struct ib_event event;
- struct iwch_qp_attributes attrs;
- struct iwch_qp *qhp;
- unsigned long flag;
-
- spin_lock(&rnicp->lock);
- qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
-
- if (!qhp) {
- printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
- __func__, CQE_STATUS(rsp_msg->cqe),
- CQE_QPID(rsp_msg->cqe));
- spin_unlock(&rnicp->lock);
- return;
- }
-
- if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
- (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
- PDBG("%s AE received after RTS - "
- "qp state %d qpid 0x%x status 0x%x\n", __func__,
- qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
- spin_unlock(&rnicp->lock);
- return;
- }
-
- printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
- "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
- CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
- CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
-
- atomic_inc(&qhp->refcnt);
- spin_unlock(&rnicp->lock);
-
- if (qhp->attr.state == IWCH_QP_STATE_RTS) {
- attrs.next_state = IWCH_QP_STATE_TERMINATE;
- iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (send_term)
- iwch_post_terminate(qhp, rsp_msg);
- }
-
- event.event = ib_event;
- event.device = chp->ibcq.device;
- if (ib_event == IB_EVENT_CQ_ERR)
- event.element.cq = &chp->ibcq;
- else
- event.element.qp = &qhp->ibqp;
-
- if (qhp->ibqp.event_handler)
- (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
-
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-}
-
-void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
-{
- struct iwch_dev *rnicp;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- struct iwch_cq *chp;
- struct iwch_qp *qhp;
- u32 cqid = RSPQ_CQID(rsp_msg);
- unsigned long flag;
-
- rnicp = (struct iwch_dev *) rdev_p->ulp;
- spin_lock(&rnicp->lock);
- chp = get_chp(rnicp, cqid);
- qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
- if (!chp || !qhp) {
- printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
- "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
- cqid, CQE_QPID(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
- CQE_WRID_LOW(rsp_msg->cqe));
- spin_unlock(&rnicp->lock);
- goto out;
- }
- iwch_qp_add_ref(&qhp->ibqp);
- atomic_inc(&chp->refcnt);
- spin_unlock(&rnicp->lock);
-
- /*
- * 1) completion of our sending a TERMINATE.
- * 2) incoming TERMINATE message.
- */
- if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
- (CQE_STATUS(rsp_msg->cqe) == 0)) {
- if (SQ_TYPE(rsp_msg->cqe)) {
- PDBG("%s QPID 0x%x ep %p disconnecting\n",
- __func__, qhp->wq.qpid, qhp->ep);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- } else {
- PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
- qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg,
- IB_EVENT_QP_REQ_ERR, 0);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- }
- goto done;
- }
-
- /* Bad incoming Read request */
- if (SQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- /* Bad incoming write */
- if (RQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- switch (CQE_STATUS(rsp_msg->cqe)) {
-
- /* Completion Events */
- case TPT_ERR_SUCCESS:
-
- /*
- * Confirm the destination entry if this is a RECV completion.
- */
- if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
- dst_confirm(qhp->ep->dst);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- break;
-
- case TPT_ERR_STAG:
- case TPT_ERR_PDID:
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- case TPT_ERR_WRAP:
- case TPT_ERR_BOUND:
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
- break;
-
- /* Device Fatal Errors */
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
- break;
-
- /* QP Fatal Errors */
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_PBL_ADDR_BOUND:
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_OPCODE:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_RQE_ADDR_BOUND:
- case TPT_ERR_IRD_OVERFLOW:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
-
- default:
- printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n",
- CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
- }
-done:
- if (atomic_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
- iwch_qp_rem_ref(&qhp->ibqp);
-out:
- dev_kfree_skb_irq(skb);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
deleted file mode 100644
index 5c36ee2809ac..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-
-static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
-{
- u32 mmid;
-
- mhp->attr.state = 1;
- mhp->attr.stag = stag;
- mmid = stag >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
- return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
-}
-
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift)
-{
- u32 stag;
- int ret;
-
- if (cxio_register_phys_mem(&rhp->rdev,
- &stag, mhp->attr.pdid,
- mhp->attr.perms,
- mhp->attr.zbva,
- mhp->attr.va_fbo,
- mhp->attr.len,
- shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr))
- return -ENOMEM;
-
- ret = iwch_finish_mem_reg(mhp, stag);
- if (ret)
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- return ret;
-}
-
-int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp,
- int shift,
- int npages)
-{
- u32 stag;
- int ret;
-
- /* We could support this... */
- if (npages > mhp->attr.pbl_size)
- return -ENOMEM;
-
- stag = mhp->attr.stag;
- if (cxio_reregister_phys_mem(&rhp->rdev,
- &stag, mhp->attr.pdid,
- mhp->attr.perms,
- mhp->attr.zbva,
- mhp->attr.va_fbo,
- mhp->attr.len,
- shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr))
- return -ENOMEM;
-
- ret = iwch_finish_mem_reg(mhp, stag);
- if (ret)
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-
- return ret;
-}
-
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
-{
- mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
- npages << 3);
-
- if (!mhp->attr.pbl_addr)
- return -ENOMEM;
-
- mhp->attr.pbl_size = npages;
-
- return 0;
-}
-
-void iwch_free_pbl(struct iwch_mr *mhp)
-{
- cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
- mhp->attr.pbl_size << 3);
-}
-
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
-{
- return cxio_write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (offset << 3), npages);
-}
-
-int build_phys_page_list(struct ib_phys_buf *buffer_list,
- int num_phys_buf,
- u64 *iova_start,
- u64 *total_size,
- int *npages,
- int *shift,
- __be64 **page_list)
-{
- u64 mask;
- int i, j, n;
-
- mask = 0;
- *total_size = 0;
- for (i = 0; i < num_phys_buf; ++i) {
- if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
- return -EINVAL;
- if (i != 0 && i != num_phys_buf - 1 &&
- (buffer_list[i].size & ~PAGE_MASK))
- return -EINVAL;
- *total_size += buffer_list[i].size;
- if (i > 0)
- mask |= buffer_list[i].addr;
- else
- mask |= buffer_list[i].addr & PAGE_MASK;
- if (i != num_phys_buf - 1)
- mask |= buffer_list[i].addr + buffer_list[i].size;
- else
- mask |= (buffer_list[i].addr + buffer_list[i].size +
- PAGE_SIZE - 1) & PAGE_MASK;
- }
-
- if (*total_size > 0xFFFFFFFFULL)
- return -ENOMEM;
-
- /* Find largest page shift we can use to cover buffers */
- for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
- if ((1ULL << *shift) & mask)
- break;
-
- buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
- buffer_list[0].addr &= ~0ull << *shift;
-
- *npages = 0;
- for (i = 0; i < num_phys_buf; ++i)
- *npages += (buffer_list[i].size +
- (1ULL << *shift) - 1) >> *shift;
-
- if (!*npages)
- return -EINVAL;
-
- *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
- if (!*page_list)
- return -ENOMEM;
-
- n = 0;
- for (i = 0; i < num_phys_buf; ++i)
- for (j = 0;
- j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
- ++j)
- (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
- ((u64) j << *shift));
-
- PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
- __func__, (unsigned long long) *iova_start,
- (unsigned long long) mask, *shift, (unsigned long long) *total_size,
- *npages);
-
- return 0;
-
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
deleted file mode 100644
index 93308c45f298..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ /dev/null
@@ -1,1508 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/rtnetlink.h>
-#include <linux/inetdevice.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-
-#include "cxio_hal.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-#include "iwch_user.h"
-#include "common.h"
-
-static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static int iwch_ah_destroy(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int iwch_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- return -ENOSYS;
-}
-
-static int iwch_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct iwch_dev *rhp = to_iwch_dev(context->device);
- struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
- struct iwch_mm_entry *mm, *tmp;
-
- PDBG("%s context %p\n", __func__, context);
- list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
- kfree(mm);
- cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
- kfree(ucontext);
- return 0;
-}
-
-static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct iwch_ucontext *context;
- struct iwch_dev *rhp = to_iwch_dev(ibdev);
-
- PDBG("%s ibdev %p\n", __func__, ibdev);
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
- cxio_init_ucontext(&rhp->rdev, &context->uctx);
- INIT_LIST_HEAD(&context->mmaps);
- spin_lock_init(&context->mmap_lock);
- return &context->ibucontext;
-}
-
-static int iwch_destroy_cq(struct ib_cq *ib_cq)
-{
- struct iwch_cq *chp;
-
- PDBG("%s ib_cq %p\n", __func__, ib_cq);
- chp = to_iwch_cq(ib_cq);
-
- remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
- atomic_dec(&chp->refcnt);
- wait_event(chp->wait, !atomic_read(&chp->refcnt));
-
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
- kfree(chp);
- return 0;
-}
-
-static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- struct iwch_create_cq_resp uresp;
- struct iwch_create_cq_req ureq;
- struct iwch_ucontext *ucontext = NULL;
- static int warned;
- size_t resplen;
-
- PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- rhp = to_iwch_dev(ibdev);
- chp = kzalloc(sizeof(*chp), GFP_KERNEL);
- if (!chp)
- return ERR_PTR(-ENOMEM);
-
- if (ib_context) {
- ucontext = to_iwch_ucontext(ib_context);
- if (!t3a_device(rhp)) {
- if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
- kfree(chp);
- return ERR_PTR(-EFAULT);
- }
- chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
- }
- }
-
- if (t3a_device(rhp)) {
-
- /*
- * T3A: Add some fluff to handle extra CQEs inserted
- * for various errors.
- * Additional CQE possibilities:
- * TERMINATE,
- * incoming RDMA WRITE Failures
- * incoming RDMA READ REQUEST FAILUREs
- * NOTE: We cannot ensure the CQ won't overflow.
- */
- entries += 16;
- }
- entries = roundup_pow_of_two(entries);
- chp->cq.size_log2 = ilog2(entries);
-
- if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
- kfree(chp);
- return ERR_PTR(-ENOMEM);
- }
- chp->rhp = rhp;
- chp->ibcq.cqe = 1 << chp->cq.size_log2;
- spin_lock_init(&chp->lock);
- spin_lock_init(&chp->comp_handler_lock);
- atomic_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
- if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
- kfree(chp);
- return ERR_PTR(-ENOMEM);
- }
-
- if (ucontext) {
- struct iwch_mm_entry *mm;
-
- mm = kmalloc(sizeof *mm, GFP_KERNEL);
- if (!mm) {
- iwch_destroy_cq(&chp->ibcq);
- return ERR_PTR(-ENOMEM);
- }
- uresp.cqid = chp->cq.cqid;
- uresp.size_log2 = chp->cq.size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
- if (udata->outlen < sizeof uresp) {
- if (!warned++)
- printk(KERN_WARNING MOD "Warning - "
- "downlevel libcxgb3 (non-fatal).\n");
- mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
- sizeof(struct t3_cqe));
- resplen = sizeof(struct iwch_create_cq_resp_v0);
- } else {
- mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
- sizeof(struct t3_cqe));
- uresp.memsize = mm->len;
- uresp.reserved = 0;
- resplen = sizeof uresp;
- }
- if (ib_copy_to_udata(udata, &uresp, resplen)) {
- kfree(mm);
- iwch_destroy_cq(&chp->ibcq);
- return ERR_PTR(-EFAULT);
- }
- insert_mmap(ucontext, mm);
- }
- PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
- chp->cq.cqid, chp, (1 << chp->cq.size_log2),
- (unsigned long long) chp->cq.dma_addr);
- return &chp->ibcq;
-}
-
-static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
-#ifdef notyet
- struct iwch_cq *chp = to_iwch_cq(cq);
- struct t3_cq oldcq, newcq;
- int ret;
-
- PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
-
- /* We don't downsize... */
- if (cqe <= cq->cqe)
- return 0;
-
- /* create new t3_cq with new size */
- cqe = roundup_pow_of_two(cqe+1);
- newcq.size_log2 = ilog2(cqe);
-
- /* Dont allow resize to less than the current wce count */
- if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
- return -ENOMEM;
- }
-
- /* Quiesce all QPs using this CQ */
- ret = iwch_quiesce_qps(chp);
- if (ret) {
- return ret;
- }
-
- ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
- if (ret) {
- return ret;
- }
-
- /* copy CQEs */
- memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
- sizeof(struct t3_cqe));
-
- /* old iwch_qp gets new t3_cq but keeps old cqid */
- oldcq = chp->cq;
- chp->cq = newcq;
- chp->cq.cqid = oldcq.cqid;
-
- /* resize new t3_cq to update the HW context */
- ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
- if (ret) {
- chp->cq = oldcq;
- return ret;
- }
- chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
-
- /* destroy old t3_cq */
- oldcq.cqid = newcq.cqid;
- ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
- if (ret) {
- printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
- __func__, ret);
- }
-
- /* add user hooks here */
-
- /* resume qps */
- ret = iwch_resume_qps(chp);
- return ret;
-#else
- return -ENOSYS;
-#endif
-}
-
-static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- enum t3_cq_opcode cq_op;
- int err;
- unsigned long flag;
- u32 rptr;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
- if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- cq_op = CQ_ARM_SE;
- else
- cq_op = CQ_ARM_AN;
- if (chp->user_rptr_addr) {
- if (get_user(rptr, chp->user_rptr_addr))
- return -EFAULT;
- spin_lock_irqsave(&chp->lock, flag);
- chp->cq.rptr = rptr;
- } else
- spin_lock_irqsave(&chp->lock, flag);
- PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
- err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
- spin_unlock_irqrestore(&chp->lock, flag);
- if (err < 0)
- printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
- chp->cq.cqid);
- if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- err = 0;
- return err;
-}
-
-static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- int len = vma->vm_end - vma->vm_start;
- u32 key = vma->vm_pgoff << PAGE_SHIFT;
- struct cxio_rdev *rdev_p;
- int ret = 0;
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext;
- u64 addr;
-
- PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
- key, len);
-
- if (vma->vm_start & (PAGE_SIZE-1)) {
- return -EINVAL;
- }
-
- rdev_p = &(to_iwch_dev(context->device)->rdev);
- ucontext = to_iwch_ucontext(context);
-
- mm = remove_mmap(ucontext, key, len);
- if (!mm)
- return -EINVAL;
- addr = mm->addr;
- kfree(mm);
-
- if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
- (addr < (rdev_p->rnic_info.udbell_physbase +
- rdev_p->rnic_info.udbell_len))) {
-
- /*
- * Map T3 DB register.
- */
- if (vma->vm_flags & VM_READ) {
- return -EPERM;
- }
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_flags &= ~VM_MAYREAD;
- ret = io_remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static int iwch_deallocate_pd(struct ib_pd *pd)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
- cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
- kfree(php);
- return 0;
-}
-
-static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct iwch_pd *php;
- u32 pdid;
- struct iwch_dev *rhp;
-
- PDBG("%s ibdev %p\n", __func__, ibdev);
- rhp = (struct iwch_dev *) ibdev;
- pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
- if (!pdid)
- return ERR_PTR(-EINVAL);
- php = kzalloc(sizeof(*php), GFP_KERNEL);
- if (!php) {
- cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
- return ERR_PTR(-ENOMEM);
- }
- php->pdid = pdid;
- php->rhp = rhp;
- if (context) {
- if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
- iwch_deallocate_pd(&php->ibpd);
- return ERR_PTR(-EFAULT);
- }
- }
- PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
- return &php->ibpd;
-}
-
-static int iwch_dereg_mr(struct ib_mr *ib_mr)
-{
- struct iwch_dev *rhp;
- struct iwch_mr *mhp;
- u32 mmid;
-
- PDBG("%s ib_mr %p\n", __func__, ib_mr);
- /* There can be no memory windows */
- if (atomic_read(&ib_mr->usecnt))
- return -EINVAL;
-
- mhp = to_iwch_mr(ib_mr);
- rhp = mhp->rhp;
- mmid = mhp->attr.stag >> 8;
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- iwch_free_pbl(mhp);
- remove_handle(rhp, &rhp->mmidr, mmid);
- if (mhp->kva)
- kfree((void *) (unsigned long) mhp->kva);
- if (mhp->umem)
- ib_umem_release(mhp->umem);
- PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf,
- int acc,
- u64 *iova_start)
-{
- __be64 *page_list;
- int shift;
- u64 total_size;
- int npages;
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- int ret;
-
- PDBG("%s ib_pd %p\n", __func__, pd);
- php = to_iwch_pd(pd);
- rhp = php->rhp;
-
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- /* First check that we have enough alignment */
- if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
- ret = -EINVAL;
- goto err;
- }
-
- if (num_phys_buf > 1 &&
- ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
- ret = -EINVAL;
- goto err;
- }
-
- ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
- &total_size, &npages, &shift, &page_list);
- if (ret)
- goto err;
-
- ret = iwch_alloc_pbl(mhp, npages);
- if (ret) {
- kfree(page_list);
- goto err_pbl;
- }
-
- ret = iwch_write_pbl(mhp, page_list, npages, 0);
- kfree(page_list);
- if (ret)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
-
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = *iova_start;
- mhp->attr.page_size = shift - 12;
-
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- ret = iwch_register_mem(rhp, php, mhp, shift);
- if (ret)
- goto err_pbl;
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- kfree(mhp);
- return ERR_PTR(ret);
-
-}
-
-static int iwch_reregister_phys_mem(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf,
- int acc, u64 * iova_start)
-{
-
- struct iwch_mr mh, *mhp;
- struct iwch_pd *php;
- struct iwch_dev *rhp;
- __be64 *page_list = NULL;
- int shift = 0;
- u64 total_size;
- int npages = 0;
- int ret;
-
- PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
-
- /* There can be no memory windows */
- if (atomic_read(&mr->usecnt))
- return -EINVAL;
-
- mhp = to_iwch_mr(mr);
- rhp = mhp->rhp;
- php = to_iwch_pd(mr->pd);
-
- /* make sure we are on the same adapter */
- if (rhp != php->rhp)
- return -EINVAL;
-
- memcpy(&mh, mhp, sizeof *mhp);
-
- if (mr_rereg_mask & IB_MR_REREG_PD)
- php = to_iwch_pd(pd);
- if (mr_rereg_mask & IB_MR_REREG_ACCESS)
- mh.attr.perms = iwch_ib_to_tpt_access(acc);
- if (mr_rereg_mask & IB_MR_REREG_TRANS) {
- ret = build_phys_page_list(buffer_list, num_phys_buf,
- iova_start,
- &total_size, &npages,
- &shift, &page_list);
- if (ret)
- return ret;
- }
-
- ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
- kfree(page_list);
- if (ret) {
- return ret;
- }
- if (mr_rereg_mask & IB_MR_REREG_PD)
- mhp->attr.pdid = php->pdid;
- if (mr_rereg_mask & IB_MR_REREG_ACCESS)
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- if (mr_rereg_mask & IB_MR_REREG_TRANS) {
- mhp->attr.zbva = 0;
- mhp->attr.va_fbo = *iova_start;
- mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- }
-
- return 0;
-}
-
-
-static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- __be64 *pages;
- int shift, n, len;
- int i, k, entry;
- int err = 0;
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- struct iwch_reg_user_mr_resp uresp;
- struct scatterlist *sg;
- PDBG("%s ib_pd %p\n", __func__, pd);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
- }
-
- shift = ffs(mhp->umem->page_size) - 1;
-
- n = mhp->umem->nmap;
-
- err = iwch_alloc_pbl(mhp, n);
- if (err)
- goto err;
-
- pages = (__be64 *) __get_free_page(GFP_KERNEL);
- if (!pages) {
- err = -ENOMEM;
- goto err_pbl;
- }
-
- i = n = 0;
-
- for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = cpu_to_be64(sg_dma_address(sg) +
- mhp->umem->page_size * k);
- if (i == PAGE_SIZE / sizeof *pages) {
- err = iwch_write_pbl(mhp, pages, i, n);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
- }
- }
-
- if (i)
- err = iwch_write_pbl(mhp, pages, i, n);
-
-pbl_done:
- free_page((unsigned long) pages);
- if (err)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = virt;
- mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
-
- err = iwch_register_mem(rhp, php, mhp, shift);
- if (err)
- goto err_pbl;
-
- if (udata && !t3a_device(rhp)) {
- uresp.pbl_addr = (mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3;
- PDBG("%s user resp pbl_addr 0x%x\n", __func__,
- uresp.pbl_addr);
-
- if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
- iwch_dereg_mr(&mhp->ibmr);
- err = -EFAULT;
- goto err;
- }
- }
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- ib_umem_release(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
-}
-
-static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct ib_phys_buf bl;
- u64 kva;
- struct ib_mr *ibmr;
-
- PDBG("%s ib_pd %p\n", __func__, pd);
-
- /*
- * T3 only supports 32 bits of size.
- */
- if (sizeof(phys_addr_t) > 4) {
- pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
- return ERR_PTR(-ENOTSUPP);
- }
- bl.size = 0xffffffff;
- bl.addr = 0;
- kva = 0;
- ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
- return ibmr;
-}
-
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mw *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
- ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
- mhp->rhp = rhp;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_MW;
- mhp->attr.stag = stag;
- mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
- PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmw);
-}
-
-static int iwch_dealloc_mw(struct ib_mw *mw)
-{
- struct iwch_dev *rhp;
- struct iwch_mw *mhp;
- u32 mmid;
-
- mhp = to_iwch_mw(mw);
- rhp = mhp->rhp;
- mmid = (mw->rkey) >> 8;
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- remove_handle(rhp, &rhp->mmidr, mmid);
- PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret = 0;
-
- if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > T3_MAX_FASTREG_DEPTH)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- goto err;
-
- mhp->rhp = rhp;
- ret = iwch_alloc_pbl(mhp, max_num_sg);
- if (ret)
- goto err1;
- mhp->attr.pbl_size = max_num_sg;
- ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret)
- goto err2;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_NON_SHARED_MR;
- mhp->attr.stag = stag;
- mhp->attr.state = 1;
- mmid = (stag) >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
- goto err3;
-
- PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmr);
-err3:
- cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-err2:
- iwch_free_pbl(mhp);
-err1:
- kfree(mhp);
-err:
- return ERR_PTR(ret);
-}
-
-static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
- struct ib_device *device,
- int page_list_len)
-{
- struct ib_fast_reg_page_list *page_list;
-
- page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64),
- GFP_KERNEL);
- if (!page_list)
- return ERR_PTR(-ENOMEM);
-
- page_list->page_list = (u64 *)(page_list + 1);
- page_list->max_page_list_len = page_list_len;
-
- return page_list;
-}
-
-static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list)
-{
- kfree(page_list);
-}
-
-static int iwch_destroy_qp(struct ib_qp *ib_qp)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_qp_attributes attrs;
- struct iwch_ucontext *ucontext;
-
- qhp = to_iwch_qp(ib_qp);
- rhp = qhp->rhp;
-
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
- wait_event(qhp->wait, !qhp->ep);
-
- remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
-
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
-
- ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
- : NULL;
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
- PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
- ib_qp, qhp->wq.qpid, qhp);
- kfree(qhp);
- return 0;
-}
-
-static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_pd *php;
- struct iwch_cq *schp;
- struct iwch_cq *rchp;
- struct iwch_create_qp_resp uresp;
- int wqsize, sqsize, rqsize;
- struct iwch_ucontext *ucontext;
-
- PDBG("%s ib_pd %p\n", __func__, pd);
- if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
- rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
- if (!schp || !rchp)
- return ERR_PTR(-EINVAL);
-
- /* The RQT size must be # of entries + 1 rounded up to a power of two */
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
- if (rqsize == attrs->cap.max_recv_wr)
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
-
- /* T3 doesn't support RQT depth < 16 */
- if (rqsize < 16)
- rqsize = 16;
-
- if (rqsize > T3_MAX_RQ_SIZE)
- return ERR_PTR(-EINVAL);
-
- if (attrs->cap.max_inline_data > T3_MAX_INLINE)
- return ERR_PTR(-EINVAL);
-
- /*
- * NOTE: The SQ and total WQ sizes don't need to be
- * a power of two. However, all the code assumes
- * they are. EG: Q_FREECNT() and friends.
- */
- sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
- wqsize = roundup_pow_of_two(rqsize + sqsize);
-
- /*
- * Kernel users need more wq space for fastreg WRs which can take
- * 2 WR fragments.
- */
- ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
- if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
- wqsize = roundup_pow_of_two(rqsize +
- roundup_pow_of_two(attrs->cap.max_send_wr * 2));
- PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
- wqsize, sqsize, rqsize);
- qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
- if (!qhp)
- return ERR_PTR(-ENOMEM);
- qhp->wq.size_log2 = ilog2(wqsize);
- qhp->wq.rq_size_log2 = ilog2(rqsize);
- qhp->wq.sq_size_log2 = ilog2(sqsize);
- if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- attrs->cap.max_recv_wr = rqsize - 1;
- attrs->cap.max_send_wr = sqsize;
- attrs->cap.max_inline_data = T3_MAX_INLINE;
-
- qhp->rhp = rhp;
- qhp->attr.pd = php->pdid;
- qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
- qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
- qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
- qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
- qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.next_state = IWCH_QP_STATE_IDLE;
-
- /*
- * XXX - These don't get passed in from the openib user
- * at create time. The CM sets them via a QP modify.
- * Need to fix... I think the CM should
- */
- qhp->attr.enable_rdma_read = 1;
- qhp->attr.enable_rdma_write = 1;
- qhp->attr.enable_bind = 1;
- qhp->attr.max_ord = 1;
- qhp->attr.max_ird = 1;
-
- spin_lock_init(&qhp->lock);
- init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
-
- if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- if (udata) {
-
- struct iwch_mm_entry *mm1, *mm2;
-
- mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
- if (!mm1) {
- iwch_destroy_qp(&qhp->ibqp);
- return ERR_PTR(-ENOMEM);
- }
-
- mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
- if (!mm2) {
- kfree(mm1);
- iwch_destroy_qp(&qhp->ibqp);
- return ERR_PTR(-ENOMEM);
- }
-
- uresp.qpid = qhp->wq.qpid;
- uresp.size_log2 = qhp->wq.size_log2;
- uresp.sq_size_log2 = qhp->wq.sq_size_log2;
- uresp.rq_size_log2 = qhp->wq.rq_size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- uresp.db_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
- kfree(mm1);
- kfree(mm2);
- iwch_destroy_qp(&qhp->ibqp);
- return ERR_PTR(-EFAULT);
- }
- mm1->key = uresp.key;
- mm1->addr = virt_to_phys(qhp->wq.queue);
- mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.db_key;
- mm2->addr = qhp->wq.udb & PAGE_MASK;
- mm2->len = PAGE_SIZE;
- insert_mmap(ucontext, mm2);
- }
- qhp->ibqp.qp_num = qhp->wq.qpid;
- init_timer(&(qhp->timer));
- PDBG("%s sq_num_entries %d, rq_num_entries %d "
- "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
- __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
- qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
- 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
- return &qhp->ibqp;
-}
-
-static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- enum iwch_qp_attr_mask mask = 0;
- struct iwch_qp_attributes attrs;
-
- PDBG("%s ib_qp %p\n", __func__, ibqp);
-
- /* iwarp does not support the RTR state */
- if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
- attr_mask &= ~IB_QP_STATE;
-
- /* Make sure we still have something left to do */
- if (!attr_mask)
- return 0;
-
- memset(&attrs, 0, sizeof attrs);
- qhp = to_iwch_qp(ibqp);
- rhp = qhp->rhp;
-
- attrs.next_state = iwch_convert_state(attr->qp_state);
- attrs.enable_rdma_read = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ) ? 1 : 0;
- attrs.enable_rdma_write = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
- attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
-
-
- mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
- mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
- (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
-
- return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp)
-{
- PDBG("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_iwch_qp(qp)->refcnt));
-}
-
-void iwch_qp_rem_ref(struct ib_qp *qp)
-{
- PDBG("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
- wake_up(&(to_iwch_qp(qp)->wait));
-}
-
-static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
-{
- PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
- return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
-}
-
-
-static int iwch_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 * pkey)
-{
- PDBG("%s ibdev %p\n", __func__, ibdev);
- *pkey = 0;
- return 0;
-}
-
-static int iwch_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct iwch_dev *dev;
-
- PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
- dev = to_iwch_dev(ibdev);
- BUG_ON(port == 0 || port > 2);
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
- return 0;
-}
-
-static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
-{
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
- char *cp, *next;
- unsigned fw_maj, fw_min, fw_mic;
-
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
-
- next = info.fw_version + 1;
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_maj);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_min);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_mic);
-
- return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
- (fw_mic & 0xffff);
-}
-
-static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
-
- struct iwch_dev *dev;
-
- PDBG("%s ibdev %p\n", __func__, ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- dev = to_iwch_dev(ibdev);
- memset(props, 0, sizeof *props);
- memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- props->hw_ver = dev->rdev.t3cdev_p->type;
- props->fw_ver = fw_vers_string_to_u64(dev);
- props->device_cap_flags = dev->device_cap_flags;
- props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
- props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
- props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
- props->max_mr_size = dev->attr.max_mr_size;
- props->max_qp = dev->attr.max_qps;
- props->max_qp_wr = dev->attr.max_wrs;
- props->max_sge = dev->attr.max_sge_per_wr;
- props->max_sge_rd = 1;
- props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_cq = dev->attr.max_cqs;
- props->max_cqe = dev->attr.max_cqes_per_cq;
- props->max_mr = dev->attr.max_mem_regs;
- props->max_pd = dev->attr.max_pds;
- props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
-
- return 0;
-}
-
-static int iwch_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- struct iwch_dev *dev;
- struct net_device *netdev;
- struct in_device *inetdev;
-
- PDBG("%s ibdev %p\n", __func__, ibdev);
-
- dev = to_iwch_dev(ibdev);
- netdev = dev->rdev.port_info.lldevs[port-1];
-
- memset(props, 0, sizeof(struct ib_port_attr));
- props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
-
- if (!netif_carrier_ok(netdev))
- props->state = IB_PORT_DOWN;
- else {
- inetdev = in_dev_get(netdev);
- if (inetdev) {
- if (inetdev->ifa_list)
- props->state = IB_PORT_ACTIVE;
- else
- props->state = IB_PORT_INIT;
- in_dev_put(inetdev);
- } else
- props->state = IB_PORT_INIT;
- }
-
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_SNMP_TUNNEL_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
- props->max_msg_sz = -1;
-
- return 0;
-}
-
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
-}
-
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- PDBG("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.fw_version);
-}
-
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- PDBG("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.driver);
-}
-
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
- iwch_dev->rdev.rnic_info.pdev->device);
-}
-
-static int iwch_get_mib(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
-{
- struct iwch_dev *dev;
- struct tp_mib_stats m;
- int ret;
-
- PDBG("%s ibdev %p\n", __func__, ibdev);
- dev = to_iwch_dev(ibdev);
- ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
- if (ret)
- return -ENOSYS;
-
- memset(stats, 0, sizeof *stats);
- stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
- m.ipInReceive_lo;
- stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
- m.ipInHdrErrors_lo;
- stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
- m.ipInAddrErrors_lo;
- stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
- m.ipInUnknownProtos_lo;
- stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
- m.ipInDiscards_lo;
- stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
- m.ipInDelivers_lo;
- stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
- m.ipOutRequests_lo;
- stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
- m.ipOutDiscards_lo;
- stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
- m.ipOutNoRoutes_lo;
- stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
- stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
- stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
- stats->iw.ipReasmFails = (u64) m.ipReasmFails;
- stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
- stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
- stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
- stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
- stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
- stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
- stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
- m.tcpInSegs_lo;
- stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
- m.tcpOutSegs_lo;
- stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
- m.tcpRetransSeg_lo;
- stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
- m.tcpInErrs_lo;
- stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
- stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
- return 0;
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id,
-};
-
-static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = iwch_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- return 0;
-}
-
-int iwch_register_device(struct iwch_dev *dev)
-{
- int ret;
- int i;
-
- PDBG("%s iwch_dev %p\n", __func__, dev);
- strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- dev->ibdev.owner = THIS_MODULE;
- dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
-
- /* cxgb3 supports STag 0. */
- dev->ibdev.local_dma_lkey = 0;
-
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
- dev->ibdev.node_type = RDMA_NODE_RNIC;
- memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
- dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
- dev->ibdev.num_comp_vectors = 1;
- dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
- dev->ibdev.query_device = iwch_query_device;
- dev->ibdev.query_port = iwch_query_port;
- dev->ibdev.query_pkey = iwch_query_pkey;
- dev->ibdev.query_gid = iwch_query_gid;
- dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
- dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
- dev->ibdev.mmap = iwch_mmap;
- dev->ibdev.alloc_pd = iwch_allocate_pd;
- dev->ibdev.dealloc_pd = iwch_deallocate_pd;
- dev->ibdev.create_ah = iwch_ah_create;
- dev->ibdev.destroy_ah = iwch_ah_destroy;
- dev->ibdev.create_qp = iwch_create_qp;
- dev->ibdev.modify_qp = iwch_ib_modify_qp;
- dev->ibdev.destroy_qp = iwch_destroy_qp;
- dev->ibdev.create_cq = iwch_create_cq;
- dev->ibdev.destroy_cq = iwch_destroy_cq;
- dev->ibdev.resize_cq = iwch_resize_cq;
- dev->ibdev.poll_cq = iwch_poll_cq;
- dev->ibdev.get_dma_mr = iwch_get_dma_mr;
- dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
- dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
- dev->ibdev.reg_user_mr = iwch_reg_user_mr;
- dev->ibdev.dereg_mr = iwch_dereg_mr;
- dev->ibdev.alloc_mw = iwch_alloc_mw;
- dev->ibdev.bind_mw = iwch_bind_mw;
- dev->ibdev.dealloc_mw = iwch_dealloc_mw;
- dev->ibdev.alloc_mr = iwch_alloc_mr;
- dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
- dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
- dev->ibdev.attach_mcast = iwch_multicast_attach;
- dev->ibdev.detach_mcast = iwch_multicast_detach;
- dev->ibdev.process_mad = iwch_process_mad;
- dev->ibdev.req_notify_cq = iwch_arm_cq;
- dev->ibdev.post_send = iwch_post_send;
- dev->ibdev.post_recv = iwch_post_receive;
- dev->ibdev.get_protocol_stats = iwch_get_mib;
- dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
- dev->ibdev.get_port_immutable = iwch_port_immutable;
-
- dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
- if (!dev->ibdev.iwcm)
- return -ENOMEM;
-
- dev->ibdev.iwcm->connect = iwch_connect;
- dev->ibdev.iwcm->accept = iwch_accept_cr;
- dev->ibdev.iwcm->reject = iwch_reject_cr;
- dev->ibdev.iwcm->create_listen = iwch_create_listen;
- dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
- dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
- dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
- dev->ibdev.iwcm->get_qp = iwch_get_qp;
-
- ret = ib_register_device(&dev->ibdev, NULL);
- if (ret)
- goto bail1;
-
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
- if (ret) {
- goto bail2;
- }
- }
- return 0;
-bail2:
- ib_unregister_device(&dev->ibdev);
-bail1:
- kfree(dev->ibdev.iwcm);
- return ret;
-}
-
-void iwch_unregister_device(struct iwch_dev *dev)
-{
- int i;
-
- PDBG("%s iwch_dev %p\n", __func__, dev);
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
- ib_unregister_device(&dev->ibdev);
- kfree(dev->ibdev.iwcm);
- return;
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
deleted file mode 100644
index 87c14b0c5ac0..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_PROVIDER_H__
-#define __IWCH_PROVIDER_H__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <rdma/ib_verbs.h>
-#include <asm/types.h>
-#include "t3cdev.h"
-#include "iwch.h"
-#include "cxio_wr.h"
-#include "cxio_hal.h"
-
-struct iwch_pd {
- struct ib_pd ibpd;
- u32 pdid;
- struct iwch_dev *rhp;
-};
-
-static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct iwch_pd, ibpd);
-}
-
-struct tpt_attributes {
- u32 stag;
- u32 state:1;
- u32 type:2;
- u32 rsvd:1;
- enum tpt_mem_perm perms;
- u32 remote_invaliate_disable:1;
- u32 zbva:1;
- u32 mw_bind_enable:1;
- u32 page_size:5;
-
- u32 pdid;
- u32 qpid;
- u32 pbl_addr;
- u32 len;
- u64 va_fbo;
- u32 pbl_size;
-};
-
-struct iwch_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
-};
-
-typedef struct iwch_mw iwch_mw_handle;
-
-static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct iwch_mr, ibmr);
-}
-
-struct iwch_mw {
- struct ib_mw ibmw;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
-};
-
-static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct iwch_mw, ibmw);
-}
-
-struct iwch_cq {
- struct ib_cq ibcq;
- struct iwch_dev *rhp;
- struct t3_cq cq;
- spinlock_t lock;
- spinlock_t comp_handler_lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- u32 __user *user_rptr_addr;
-};
-
-static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct iwch_cq, ibcq);
-}
-
-enum IWCH_QP_FLAGS {
- QP_QUIESCED = 0x01
-};
-
-struct iwch_mpa_attributes {
- u8 initiator;
- u8 recv_marker_enabled;
- u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
- u8 crc_enabled;
- u8 version; /* 0 or 1 */
-};
-
-struct iwch_qp_attributes {
- u32 scq;
- u32 rcq;
- u32 sq_num_entries;
- u32 rq_num_entries;
- u32 sq_max_sges;
- u32 sq_max_sges_rdma_write;
- u32 rq_max_sges;
- u32 state;
- u8 enable_rdma_read;
- u8 enable_rdma_write; /* enable inbound Read Resp. */
- u8 enable_bind;
- u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
- /*
- * Next QP state. If specify the current state, only the
- * QP attributes will be modified.
- */
- u32 max_ord;
- u32 max_ird;
- u32 pd; /* IN */
- u32 next_state;
- char terminate_buffer[52];
- u32 terminate_msg_len;
- u8 is_terminate_local;
- struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
- struct iwch_ep *llp_stream_handle;
- char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
- u32 stream_msg_buf_len; /* Only on Idle -> RTS */
-};
-
-struct iwch_qp {
- struct ib_qp ibqp;
- struct iwch_dev *rhp;
- struct iwch_ep *ep;
- struct iwch_qp_attributes attr;
- struct t3_wq wq;
- spinlock_t lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- enum IWCH_QP_FLAGS flags;
- struct timer_list timer;
-};
-
-static inline int qp_quiesced(struct iwch_qp *qhp)
-{
- return qhp->flags & QP_QUIESCED;
-}
-
-static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct iwch_qp, ibqp);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp);
-void iwch_qp_rem_ref(struct ib_qp *qp);
-
-struct iwch_ucontext {
- struct ib_ucontext ibucontext;
- struct cxio_ucontext uctx;
- u32 key;
- spinlock_t mmap_lock;
- struct list_head mmaps;
-};
-
-static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
-{
- return container_of(c, struct iwch_ucontext, ibucontext);
-}
-
-struct iwch_mm_entry {
- struct list_head entry;
- u64 addr;
- u32 key;
- unsigned len;
-};
-
-static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
- u32 key, unsigned len)
-{
- struct list_head *pos, *nxt;
- struct iwch_mm_entry *mm;
-
- spin_lock(&ucontext->mmap_lock);
- list_for_each_safe(pos, nxt, &ucontext->mmaps) {
-
- mm = list_entry(pos, struct iwch_mm_entry, entry);
- if (mm->key == key && mm->len == len) {
- list_del_init(&mm->entry);
- spin_unlock(&ucontext->mmap_lock);
- PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
- key, (unsigned long long) mm->addr, mm->len);
- return mm;
- }
- }
- spin_unlock(&ucontext->mmap_lock);
- return NULL;
-}
-
-static inline void insert_mmap(struct iwch_ucontext *ucontext,
- struct iwch_mm_entry *mm)
-{
- spin_lock(&ucontext->mmap_lock);
- PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
- mm->key, (unsigned long long) mm->addr, mm->len);
- list_add_tail(&mm->entry, &ucontext->mmaps);
- spin_unlock(&ucontext->mmap_lock);
-}
-
-enum iwch_qp_attr_mask {
- IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
- IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
- IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
- IWCH_QP_ATTR_MAX_ORD = 1 << 11,
- IWCH_QP_ATTR_MAX_IRD = 1 << 12,
- IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
- IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
- IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
- IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_MAX_ORD |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_STREAM_MSG_BUFFER |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
-};
-
-int iwch_modify_qp(struct iwch_dev *rhp,
- struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal);
-
-enum iwch_qp_state {
- IWCH_QP_STATE_IDLE,
- IWCH_QP_STATE_RTS,
- IWCH_QP_STATE_ERROR,
- IWCH_QP_STATE_TERMINATE,
- IWCH_QP_STATE_CLOSING,
- IWCH_QP_STATE_TOT
-};
-
-static inline int iwch_convert_state(enum ib_qp_state ib_state)
-{
- switch (ib_state) {
- case IB_QPS_RESET:
- case IB_QPS_INIT:
- return IWCH_QP_STATE_IDLE;
- case IB_QPS_RTS:
- return IWCH_QP_STATE_RTS;
- case IB_QPS_SQD:
- return IWCH_QP_STATE_CLOSING;
- case IB_QPS_SQE:
- return IWCH_QP_STATE_TERMINATE;
- case IB_QPS_ERR:
- return IWCH_QP_STATE_ERROR;
- default:
- return -1;
- }
-}
-
-static inline u32 iwch_ib_to_tpt_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
- (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
- TPT_LOCAL_READ;
-}
-
-static inline u32 iwch_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
-}
-
-enum iwch_mmid_state {
- IWCH_STAG_STATE_VALID,
- IWCH_STAG_STATE_INVALID
-};
-
-enum iwch_qp_query_flags {
- IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
- IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
- IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
-
- /*
- * Quiesce QP context; Consumer
- * will NOT replay outstanding WR
- */
- IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
- IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
- IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
-};
-
-u16 iwch_rqes_posted(struct iwch_qp *qhp);
-int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int iwch_bind_mw(struct ib_qp *qp,
- struct ib_mw *mw,
- struct ib_mw_bind *mw_bind);
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
-int iwch_post_zb_read(struct iwch_ep *ep);
-int iwch_register_device(struct iwch_dev *dev);
-void iwch_unregister_device(struct iwch_dev *dev);
-void stop_read_rep_timer(struct iwch_qp *qhp);
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift);
-int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp,
- int shift,
- int npages);
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
-void iwch_free_pbl(struct iwch_mr *mhp);
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
-int build_phys_page_list(struct ib_phys_buf *buffer_list,
- int num_phys_buf,
- u64 *iova_start,
- u64 *total_size,
- int *npages,
- int *shift,
- __be64 **page_list);
-
-
-#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
deleted file mode 100644
index b57c0befd962..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ /dev/null
@@ -1,1163 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-
-#define NO_SUPPORT -1
-
-static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 * flit_cnt)
-{
- int i;
- u32 plen;
-
- switch (wr->opcode) {
- case IB_WR_SEND:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE;
- else
- wqe->send.rdmaop = T3_SEND;
- wqe->send.rem_stag = 0;
- break;
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
- else
- wqe->send.rdmaop = T3_SEND_WITH_INV;
- wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
- break;
- default:
- return -EINVAL;
- }
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->send.reserved[0] = 0;
- wqe->send.reserved[1] = 0;
- wqe->send.reserved[2] = 0;
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
-
- plen += wr->sg_list[i].length;
- wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 4 + ((wr->num_sge) << 1);
- wqe->send.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->write.rdmaop = T3_RDMA_WRITE;
- wqe->write.reserved[0] = 0;
- wqe->write.reserved[1] = 0;
- wqe->write.reserved[2] = 0;
- wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
- wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
-
- if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
- plen = 4;
- wqe->write.sgl[0].stag = wr->ex.imm_data;
- wqe->write.sgl[0].len = cpu_to_be32(0);
- wqe->write.num_sgle = cpu_to_be32(0);
- *flit_cnt = 6;
- } else {
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen) {
- return -EMSGSIZE;
- }
- plen += wr->sg_list[i].length;
- wqe->write.sgl[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->write.sgl[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->write.sgl[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 5 + ((wr->num_sge) << 1);
- }
- wqe->write.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- if (wr->num_sge > 1)
- return -EINVAL;
- wqe->read.rdmaop = T3_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
- wqe->read.local_inv = 1;
- else
- wqe->read.local_inv = 0;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
- wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
- wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
- wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
- wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
- *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
- return 0;
-}
-
-static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
-{
- int i;
- __be64 *p;
-
- if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
- return -EINVAL;
- *wr_cnt = 1;
- wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
- wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
- wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
- wqe->fastreg.va_base_lo_fbo =
- cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
- wqe->fastreg.page_type_perms = cpu_to_be32(
- V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
- V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
- V_FR_TYPE(TPT_VATO) |
- V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
- p = &wqe->fastreg.pbl_addrs[0];
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
-
- /* If we need a 2nd WR, then set it up */
- if (i == T3_MAX_FASTREG_FRAG) {
- *wr_cnt = 2;
- wqe = (union t3_wr *)(wq->queue +
- Q_PTR2IDX((wq->wptr+1), wq->size_log2));
- build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
- Q_GENBIT(wq->wptr + 1, wq->size_log2),
- 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
- T3_EOP);
-
- p = &wqe->pbl_frag.pbl_addrs[0];
- }
- *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
- }
- *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
- if (*flit_cnt > 15)
- *flit_cnt = 15;
- return 0;
-}
-
-static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
- wqe->local_inv.reserved = 0;
- *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
- return 0;
-}
-
-static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
- u32 num_sgle, u32 * pbl_addr, u8 * page_size)
-{
- int i;
- struct iwch_mr *mhp;
- u64 offset;
- for (i = 0; i < num_sgle; i++) {
-
- mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
- if (!mhp) {
- PDBG("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (!mhp->attr.state) {
- PDBG("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (mhp->attr.zbva) {
- PDBG("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
-
- if (sg_list[i].addr < mhp->attr.va_fbo) {
- PDBG("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) <
- sg_list[i].addr) {
- PDBG("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) >
- mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
- PDBG("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- offset = sg_list[i].addr - mhp->attr.va_fbo;
- offset += mhp->attr.va_fbo &
- ((1UL << (12 + mhp->attr.page_size)) - 1);
- pbl_addr[i] = ((mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3) +
- (offset >> (12 + mhp->attr.page_size));
- page_size[i] = mhp->attr.page_size;
- }
- return 0;
-}
-
-static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- struct ib_recv_wr *wr)
-{
- int i, err = 0;
- u32 pbl_addr[T3_MAX_SGE];
- u8 page_size[T3_MAX_SGE];
-
- err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
- page_size);
- if (err)
- return err;
- wqe->recv.pagesz[0] = page_size[0];
- wqe->recv.pagesz[1] = page_size[1];
- wqe->recv.pagesz[2] = page_size[2];
- wqe->recv.pagesz[3] = page_size[3];
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
- for (i = 0; i < wr->num_sge; i++) {
- wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
-
- /* to in the WQE == the offset into the page */
- wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
- ((1UL << (12 + page_size[i])) - 1));
-
- /* pbl_addr is the adapters address in the PBL */
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = 0;
- return 0;
-}
-
-static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- struct ib_recv_wr *wr)
-{
- int i;
- u32 pbl_addr;
- u32 pbl_offset;
-
-
- /*
- * The T3 HW requires the PBL in the HW recv descriptor to reference
- * a PBL entry. So we allocate the max needed PBL memory here and pass
- * it to the uP in the recv WR. The uP will build the PBL and setup
- * the HW recv descriptor.
- */
- pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
- if (!pbl_addr)
- return -ENOMEM;
-
- /*
- * Compute the 8B aligned offset.
- */
- pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
-
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
-
- for (i = 0; i < wr->num_sge; i++) {
-
- /*
- * Use a 128MB page size. This and an imposed 128MB
- * sge length limit allows us to require only a 2-entry HW
- * PBL for each SGE. This restriction is acceptable since
- * since it is not possible to allocate 128MB of contiguous
- * DMA coherent memory!
- */
- if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
- return -EINVAL;
- wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
-
- /*
- * T3 restricts a recv to all zero-stag or all non-zero-stag.
- */
- if (wr->sg_list[i].lkey != 0)
- return -EINVAL;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
- pbl_offset += 2;
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.pagesz[i] = 0;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
- return 0;
-}
-
-int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
-{
- int err = 0;
- u8 uninitialized_var(t3_wr_flit_cnt);
- enum t3_wr_opcode t3_wr_opcode = 0;
- enum t3_wr_flags t3_wr_flags;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
- struct t3_swsq *sqp;
- int wr_cnt = 1;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
- qhp->wq.sq_size_log2);
- if (num_wrs == 0) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (num_wrs == 0) {
- err = -ENOMEM;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- t3_wr_flags = 0;
- if (wr->send_flags & IB_SEND_SOLICITED)
- t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
- if (wr->send_flags & IB_SEND_SIGNALED)
- t3_wr_flags |= T3_COMPLETION_FLAG;
- sqp = qhp->wq.sq +
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
- switch (wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_READ_FENCE_FLAG;
- t3_wr_opcode = T3_WR_SEND;
- err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- t3_wr_opcode = T3_WR_WRITE;
- err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_READ_WITH_INV:
- t3_wr_opcode = T3_WR_READ;
- t3_wr_flags = 0; /* T3 reads are always signaled */
- err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
- if (err)
- break;
- sqp->read_len = wqe->read.local_len;
- if (!qhp->wq.oldest_read)
- qhp->wq.oldest_read = sqp;
- break;
- case IB_WR_FAST_REG_MR:
- t3_wr_opcode = T3_WR_FASTREG;
- err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
- &wr_cnt, &qhp->wq);
- break;
- case IB_WR_LOCAL_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
- t3_wr_opcode = T3_WR_INV_STAG;
- err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
- break;
- default:
- PDBG("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
- err = -EINVAL;
- }
- if (err)
- break;
- wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
- sqp->wr_id = wr->wr_id;
- sqp->opcode = wr2opcode(t3_wr_opcode);
- sqp->sq_wptr = qhp->wq.sq_wptr;
- sqp->complete = 0;
- sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
-
- build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, t3_wr_flit_cnt,
- (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
- PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
- __func__, (unsigned long long) wr->wr_id, idx,
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
- sqp->opcode);
- wr = wr->next;
- num_wrs--;
- qhp->wq.wptr += wr_cnt;
- ++(qhp->wq.sq_wptr);
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- int err = 0;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2) - 1;
- if (!wr) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (wr->num_sge > T3_MAX_SGE) {
- err = -EINVAL;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- if (num_wrs)
- if (wr->sg_list[0].lkey)
- err = build_rdma_recv(qhp, wqe, wr);
- else
- err = build_zero_stag_recv(qhp, wqe, wr);
- else
- err = -ENOMEM;
-
- if (err)
- break;
-
- build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
- PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
- "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
- idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
- ++(qhp->wq.rq_wptr);
- ++(qhp->wq.wptr);
- wr = wr->next;
- num_wrs--;
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-int iwch_bind_mw(struct ib_qp *qp,
- struct ib_mw *mw,
- struct ib_mw_bind *mw_bind)
-{
- struct iwch_dev *rhp;
- struct iwch_mw *mhp;
- struct iwch_qp *qhp;
- union t3_wr *wqe;
- u32 pbl_addr;
- u8 page_size;
- u32 num_wrs;
- unsigned long flag;
- struct ib_sge sgl;
- int err=0;
- enum t3_wr_flags t3_wr_flags;
- u32 idx;
- struct t3_swsq *sqp;
-
- qhp = to_iwch_qp(qp);
- mhp = to_iwch_mw(mw);
- rhp = qhp->rhp;
-
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
- }
- num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
- qhp->wq.sq_size_log2);
- if (num_wrs == 0) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- return -ENOMEM;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
- mw, mw_bind);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
-
- t3_wr_flags = 0;
- if (mw_bind->send_flags & IB_SEND_SIGNALED)
- t3_wr_flags = T3_COMPLETION_FLAG;
-
- sgl.addr = mw_bind->bind_info.addr;
- sgl.lkey = mw_bind->bind_info.mr->lkey;
- sgl.length = mw_bind->bind_info.length;
- wqe->bind.reserved = 0;
- wqe->bind.type = TPT_VATO;
-
- /* TBD: check perms */
- wqe->bind.perms = iwch_ib_to_tpt_bind_access(
- mw_bind->bind_info.mw_access_flags);
- wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);
- wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
- wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length);
- wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);
- err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
- if (err) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- return err;
- }
- wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
- sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
- sqp->wr_id = mw_bind->wr_id;
- sqp->opcode = T3_BIND_MW;
- sqp->sq_wptr = qhp->wq.sq_wptr;
- sqp->complete = 0;
- sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
- wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
- wqe->bind.mr_pagesz = page_size;
- build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
- sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
- ++(qhp->wq.wptr);
- ++(qhp->wq.sq_wptr);
- spin_unlock_irqrestore(&qhp->lock, flag);
-
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
- return err;
-}
-
-static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
- u8 *layer_type, u8 *ecode)
-{
- int status = TPT_ERR_INTERNAL_ERR;
- int tagged = 0;
- int opcode = -1;
- int rqtype = 0;
- int send_inv = 0;
-
- if (rsp_msg) {
- status = CQE_STATUS(rsp_msg->cqe);
- opcode = CQE_OPCODE(rsp_msg->cqe);
- rqtype = RQ_TYPE(rsp_msg->cqe);
- send_inv = (opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV);
- tagged = (opcode == T3_RDMA_WRITE) ||
- (rqtype && (opcode == T3_READ_RESP));
- }
-
- switch (status) {
- case TPT_ERR_STAG:
- if (send_inv) {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_INV_STAG;
- }
- break;
- case TPT_ERR_PDID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- if ((opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV))
- *ecode = RDMAP_CANT_INV_STAG;
- else
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_QPID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_ACCESS:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_ACC_VIOL;
- break;
- case TPT_ERR_WRAP:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_TO_WRAP;
- break;
- case TPT_ERR_BOUND:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_BASE_BOUNDS;
- }
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- break;
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_OUT_OF_RQE:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_NOBUF;
- break;
- case TPT_ERR_PBL_ADDR_BOUND:
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- break;
- case TPT_ERR_CRC:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_CRC_ERR;
- break;
- case TPT_ERR_MARKER:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_MARKER_ERR;
- break;
- case TPT_ERR_PDU_LEN_ERR:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_MSG_TOOBIG;
- break;
- case TPT_ERR_DDP_VERSION:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_INV_VERS;
- } else {
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_VERS;
- }
- break;
- case TPT_ERR_RDMA_VERSION:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_VERS;
- break;
- case TPT_ERR_OPCODE:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_OPCODE;
- break;
- case TPT_ERR_DDP_QUEUE_NUM:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_QN;
- break;
- case TPT_ERR_MSN:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_RANGE;
- break;
- case TPT_ERR_TBIT:
- *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_MO:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MO;
- break;
- default:
- *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- }
-}
-
-int iwch_post_zb_read(struct iwch_ep *ep)
-{
- union t3_wr *wqe;
- struct sk_buff *skb;
- u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
-
- PDBG("%s enter\n", __func__);
- skb = alloc_skb(40, GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
- return -ENOMEM;
- }
- wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
- memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
- wqe->read.rdmaop = T3_READ_REQ;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(1);
- wqe->read.rem_to = cpu_to_be64(1);
- wqe->read.local_stag = cpu_to_be32(1);
- wqe->read.local_len = cpu_to_be32(0);
- wqe->read.local_to = cpu_to_be64(1);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
- V_FW_RIWR_LEN(flit_cnt));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * This posts a TERMINATE with layer=RDMA, type=catastrophic.
- */
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
-{
- union t3_wr *wqe;
- struct terminate_message *term;
- struct sk_buff *skb;
-
- PDBG("%s %d\n", __func__, __LINE__);
- skb = alloc_skb(40, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
- return -ENOMEM;
- }
- wqe = (union t3_wr *)skb_put(skb, 40);
- memset(wqe, 0, 40);
- wqe->send.rdmaop = T3_TERMINATE;
-
- /* immediate data length */
- wqe->send.plen = htonl(4);
-
- /* immediate data starts here. */
- term = (struct terminate_message *)wqe->send.sgl;
- build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
- V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * Assumes qhp lock is held.
- */
-static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
- struct iwch_cq *schp)
-{
- int count;
- int flushed;
-
-
- PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
- /* take a ref on the qhp since we must release the lock */
- atomic_inc(&qhp->refcnt);
- spin_unlock(&qhp->lock);
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&rchp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&rchp->cq);
- cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
- flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&rchp->lock);
- if (flushed) {
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- }
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&schp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&schp->cq);
- cxio_count_scqes(&schp->cq, &qhp->wq, &count);
- flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&schp->lock);
- if (flushed) {
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
-
- /* deref */
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-
- spin_lock(&qhp->lock);
-}
-
-static void flush_qp(struct iwch_qp *qhp)
-{
- struct iwch_cq *rchp, *schp;
-
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
-
- if (qhp->ibqp.uobject) {
- cxio_set_wq_in_error(&qhp->wq);
- cxio_set_cq_in_error(&rchp->cq);
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- if (schp != rchp) {
- cxio_set_cq_in_error(&schp->cq);
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
- return;
- }
- __flush_qp(qhp, rchp, schp);
-}
-
-
-/*
- * Return count of RECV WRs posted
- */
-u16 iwch_rqes_posted(struct iwch_qp *qhp)
-{
- union t3_wr *wqe = qhp->wq.queue;
- u16 count = 0;
-
- while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
- count++;
- wqe++;
- }
- PDBG("%s qhp %p count %u\n", __func__, qhp, count);
- return count;
-}
-
-static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs)
-{
- struct t3_rdma_init_attr init_attr;
- int ret;
-
- init_attr.tid = qhp->ep->hwtid;
- init_attr.qpid = qhp->wq.qpid;
- init_attr.pdid = qhp->attr.pd;
- init_attr.scqid = qhp->attr.scq;
- init_attr.rcqid = qhp->attr.rcq;
- init_attr.rq_addr = qhp->wq.rq_addr;
- init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
- init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
- qhp->attr.mpa_attr.recv_marker_enabled |
- (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
- (qhp->attr.mpa_attr.crc_enabled << 2);
-
- init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
- uP_RI_QP_RDMA_WRITE_ENABLE |
- uP_RI_QP_BIND_ENABLE;
- if (!qhp->ibqp.uobject)
- init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
- uP_RI_QP_FAST_REGISTER_ENABLE;
-
- init_attr.tcp_emss = qhp->ep->emss;
- init_attr.ord = qhp->attr.max_ord;
- init_attr.ird = qhp->attr.max_ird;
- init_attr.qp_dma_addr = qhp->wq.dma_addr;
- init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
- init_attr.rqe_count = iwch_rqes_posted(qhp);
- init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
- init_attr.chan = qhp->ep->l2t->smt_idx;
- if (peer2peer) {
- init_attr.rtr_type = RTR_READ;
- if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
- init_attr.ord = 1;
- if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
- init_attr.ird = 1;
- } else
- init_attr.rtr_type = 0;
- init_attr.irs = qhp->ep->rcv_seq;
- PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
- "flags 0x%x qpcaps 0x%x\n", __func__,
- init_attr.rq_addr, init_attr.rq_size,
- init_attr.flags, init_attr.qpcaps);
- ret = cxio_rdma_init(&rhp->rdev, &init_attr);
- PDBG("%s ret %d\n", __func__, ret);
- return ret;
-}
-
-int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal)
-{
- int ret = 0;
- struct iwch_qp_attributes newattr = qhp->attr;
- unsigned long flag;
- int disconnect = 0;
- int terminate = 0;
- int abort = 0;
- int free = 0;
- struct iwch_ep *ep = NULL;
-
- PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
- qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
- (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
-
- spin_lock_irqsave(&qhp->lock, flag);
-
- /* Process attr changes if in IDLE */
- if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
- if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
- ret = -EIO;
- goto out;
- }
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
- newattr.enable_rdma_read = attrs->enable_rdma_read;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
- newattr.enable_rdma_write = attrs->enable_rdma_write;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
- newattr.enable_bind = attrs->enable_bind;
- if (mask & IWCH_QP_ATTR_MAX_ORD) {
- if (attrs->max_ord >
- rhp->attr.max_rdma_read_qp_depth) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ord = attrs->max_ord;
- }
- if (mask & IWCH_QP_ATTR_MAX_IRD) {
- if (attrs->max_ird >
- rhp->attr.max_rdma_reads_per_qp) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ird = attrs->max_ird;
- }
- qhp->attr = newattr;
- }
-
- if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
- goto out;
- if (qhp->attr.state == attrs->next_state)
- goto out;
-
- switch (qhp->attr.state) {
- case IWCH_QP_STATE_IDLE:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_RTS:
- if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
- ret = -EINVAL;
- goto out;
- }
- if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.mpa_attr = attrs->mpa_attr;
- qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
- qhp->ep = qhp->attr.llp_stream_handle;
- qhp->attr.state = IWCH_QP_STATE_RTS;
-
- /*
- * Ref the endpoint here and deref when we
- * disassociate the endpoint from the QP. This
- * happens in CLOSING->IDLE transition or *->ERROR
- * transition.
- */
- get_ep(&qhp->ep->com);
- spin_unlock_irqrestore(&qhp->lock, flag);
- ret = rdma_init(rhp, qhp, mask, attrs);
- spin_lock_irqsave(&qhp->lock, flag);
- if (ret)
- goto err;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- flush_qp(qhp);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_RTS:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_CLOSING:
- BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
- qhp->attr.state = IWCH_QP_STATE_CLOSING;
- if (!internal) {
- abort=0;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- break;
- case IWCH_QP_STATE_TERMINATE:
- qhp->attr.state = IWCH_QP_STATE_TERMINATE;
- if (qhp->ibqp.uobject)
- cxio_set_wq_in_error(&qhp->wq);
- if (!internal)
- terminate = 1;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- if (!internal) {
- abort=1;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- goto err;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_CLOSING:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- switch (attrs->next_state) {
- case IWCH_QP_STATE_IDLE:
- flush_qp(qhp);
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.llp_stream_handle = NULL;
- put_ep(&qhp->ep->com);
- qhp->ep = NULL;
- wake_up(&qhp->wait);
- break;
- case IWCH_QP_STATE_ERROR:
- goto err;
- default:
- ret = -EINVAL;
- goto err;
- }
- break;
- case IWCH_QP_STATE_ERROR:
- if (attrs->next_state != IWCH_QP_STATE_IDLE) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
- !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- break;
- case IWCH_QP_STATE_TERMINATE:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- goto err;
- break;
- default:
- printk(KERN_ERR "%s in a bad state %d\n",
- __func__, qhp->attr.state);
- ret = -EINVAL;
- goto err;
- break;
- }
- goto out;
-err:
- PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
- qhp->wq.qpid);
-
- /* disassociate the LLP connection */
- qhp->attr.llp_stream_handle = NULL;
- ep = qhp->ep;
- qhp->ep = NULL;
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- free=1;
- wake_up(&qhp->wait);
- BUG_ON(!ep);
- flush_qp(qhp);
-out:
- spin_unlock_irqrestore(&qhp->lock, flag);
-
- if (terminate)
- iwch_post_terminate(qhp, NULL);
-
- /*
- * If disconnect is 1, then we need to initiate a disconnect
- * on the EP. This can be a normal close (RTS->CLOSING) or
- * an abnormal close (RTS/CLOSING->ERROR).
- */
- if (disconnect) {
- iwch_ep_disconnect(ep, abort, GFP_KERNEL);
- put_ep(&ep->com);
- }
-
- /*
- * If free is 1, then we've disassociated the EP from the QP
- * and we need to dereference the EP.
- */
- if (free)
- put_ep(&ep->com);
-
- PDBG("%s exit state %d\n", __func__, qhp->attr.state);
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/tcb.h b/drivers/infiniband/hw/cxgb3/tcb.h
deleted file mode 100644
index c702dc199e18..000000000000
--- a/drivers/infiniband/hw/cxgb3/tcb.h
+++ /dev/null
@@ -1,632 +0,0 @@
-/*
- * Copyright (c) 2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _TCB_DEFS_H
-#define _TCB_DEFS_H
-
-#define W_TCB_T_STATE 0
-#define S_TCB_T_STATE 0
-#define M_TCB_T_STATE 0xfULL
-#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
-
-#define W_TCB_TIMER 0
-#define S_TCB_TIMER 4
-#define M_TCB_TIMER 0x1ULL
-#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
-
-#define W_TCB_DACK_TIMER 0
-#define S_TCB_DACK_TIMER 5
-#define M_TCB_DACK_TIMER 0x1ULL
-#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
-
-#define W_TCB_DEL_FLAG 0
-#define S_TCB_DEL_FLAG 6
-#define M_TCB_DEL_FLAG 0x1ULL
-#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
-
-#define W_TCB_L2T_IX 0
-#define S_TCB_L2T_IX 7
-#define M_TCB_L2T_IX 0x7ffULL
-#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
-
-#define W_TCB_SMAC_SEL 0
-#define S_TCB_SMAC_SEL 18
-#define M_TCB_SMAC_SEL 0x3ULL
-#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
-
-#define W_TCB_TOS 0
-#define S_TCB_TOS 20
-#define M_TCB_TOS 0x3fULL
-#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
-
-#define W_TCB_MAX_RT 0
-#define S_TCB_MAX_RT 26
-#define M_TCB_MAX_RT 0xfULL
-#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
-
-#define W_TCB_T_RXTSHIFT 0
-#define S_TCB_T_RXTSHIFT 30
-#define M_TCB_T_RXTSHIFT 0xfULL
-#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
-
-#define W_TCB_T_DUPACKS 1
-#define S_TCB_T_DUPACKS 2
-#define M_TCB_T_DUPACKS 0xfULL
-#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
-
-#define W_TCB_T_MAXSEG 1
-#define S_TCB_T_MAXSEG 6
-#define M_TCB_T_MAXSEG 0xfULL
-#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
-
-#define W_TCB_T_FLAGS1 1
-#define S_TCB_T_FLAGS1 10
-#define M_TCB_T_FLAGS1 0xffffffffULL
-#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
-
-#define W_TCB_T_MIGRATION 1
-#define S_TCB_T_MIGRATION 20
-#define M_TCB_T_MIGRATION 0x1ULL
-#define V_TCB_T_MIGRATION(x) ((x) << S_TCB_T_MIGRATION)
-
-#define W_TCB_T_FLAGS2 2
-#define S_TCB_T_FLAGS2 10
-#define M_TCB_T_FLAGS2 0x7fULL
-#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
-
-#define W_TCB_SND_SCALE 2
-#define S_TCB_SND_SCALE 17
-#define M_TCB_SND_SCALE 0xfULL
-#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
-
-#define W_TCB_RCV_SCALE 2
-#define S_TCB_RCV_SCALE 21
-#define M_TCB_RCV_SCALE 0xfULL
-#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
-
-#define W_TCB_SND_UNA_RAW 2
-#define S_TCB_SND_UNA_RAW 25
-#define M_TCB_SND_UNA_RAW 0x7ffffffULL
-#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
-
-#define W_TCB_SND_NXT_RAW 3
-#define S_TCB_SND_NXT_RAW 20
-#define M_TCB_SND_NXT_RAW 0x7ffffffULL
-#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
-
-#define W_TCB_RCV_NXT 4
-#define S_TCB_RCV_NXT 15
-#define M_TCB_RCV_NXT 0xffffffffULL
-#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
-
-#define W_TCB_RCV_ADV 5
-#define S_TCB_RCV_ADV 15
-#define M_TCB_RCV_ADV 0xffffULL
-#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
-
-#define W_TCB_SND_MAX_RAW 5
-#define S_TCB_SND_MAX_RAW 31
-#define M_TCB_SND_MAX_RAW 0x7ffffffULL
-#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
-
-#define W_TCB_SND_CWND 6
-#define S_TCB_SND_CWND 26
-#define M_TCB_SND_CWND 0x7ffffffULL
-#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
-
-#define W_TCB_SND_SSTHRESH 7
-#define S_TCB_SND_SSTHRESH 21
-#define M_TCB_SND_SSTHRESH 0x7ffffffULL
-#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
-
-#define W_TCB_T_RTT_TS_RECENT_AGE 8
-#define S_TCB_T_RTT_TS_RECENT_AGE 16
-#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
-#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
-
-#define W_TCB_T_RTSEQ_RECENT 9
-#define S_TCB_T_RTSEQ_RECENT 16
-#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
-#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
-
-#define W_TCB_T_SRTT 10
-#define S_TCB_T_SRTT 16
-#define M_TCB_T_SRTT 0xffffULL
-#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
-
-#define W_TCB_T_RTTVAR 11
-#define S_TCB_T_RTTVAR 0
-#define M_TCB_T_RTTVAR 0xffffULL
-#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
-
-#define W_TCB_TS_LAST_ACK_SENT_RAW 11
-#define S_TCB_TS_LAST_ACK_SENT_RAW 16
-#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
-#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
-
-#define W_TCB_DIP 12
-#define S_TCB_DIP 11
-#define M_TCB_DIP 0xffffffffULL
-#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
-
-#define W_TCB_SIP 13
-#define S_TCB_SIP 11
-#define M_TCB_SIP 0xffffffffULL
-#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
-
-#define W_TCB_DP 14
-#define S_TCB_DP 11
-#define M_TCB_DP 0xffffULL
-#define V_TCB_DP(x) ((x) << S_TCB_DP)
-
-#define W_TCB_SP 14
-#define S_TCB_SP 27
-#define M_TCB_SP 0xffffULL
-#define V_TCB_SP(x) ((x) << S_TCB_SP)
-
-#define W_TCB_TIMESTAMP 15
-#define S_TCB_TIMESTAMP 11
-#define M_TCB_TIMESTAMP 0xffffffffULL
-#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
-
-#define W_TCB_TIMESTAMP_OFFSET 16
-#define S_TCB_TIMESTAMP_OFFSET 11
-#define M_TCB_TIMESTAMP_OFFSET 0xfULL
-#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
-
-#define W_TCB_TX_MAX 16
-#define S_TCB_TX_MAX 15
-#define M_TCB_TX_MAX 0xffffffffULL
-#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
-
-#define W_TCB_TX_HDR_PTR_RAW 17
-#define S_TCB_TX_HDR_PTR_RAW 15
-#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
-
-#define W_TCB_TX_LAST_PTR_RAW 18
-#define S_TCB_TX_LAST_PTR_RAW 0
-#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
-
-#define W_TCB_TX_COMPACT 18
-#define S_TCB_TX_COMPACT 17
-#define M_TCB_TX_COMPACT 0x1ULL
-#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
-
-#define W_TCB_RX_COMPACT 18
-#define S_TCB_RX_COMPACT 18
-#define M_TCB_RX_COMPACT 0x1ULL
-#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
-
-#define W_TCB_RCV_WND 18
-#define S_TCB_RCV_WND 19
-#define M_TCB_RCV_WND 0x7ffffffULL
-#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
-
-#define W_TCB_RX_HDR_OFFSET 19
-#define S_TCB_RX_HDR_OFFSET 14
-#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
-#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
-
-#define W_TCB_RX_FRAG0_START_IDX_RAW 20
-#define S_TCB_RX_FRAG0_START_IDX_RAW 9
-#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
-
-#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
-#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
-#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
-#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
-
-#define W_TCB_RX_FRAG0_LEN 21
-#define S_TCB_RX_FRAG0_LEN 31
-#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
-
-#define W_TCB_RX_FRAG1_LEN 22
-#define S_TCB_RX_FRAG1_LEN 26
-#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
-
-#define W_TCB_NEWRENO_RECOVER 23
-#define S_TCB_NEWRENO_RECOVER 21
-#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
-#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
-
-#define W_TCB_PDU_HAVE_LEN 24
-#define S_TCB_PDU_HAVE_LEN 16
-#define M_TCB_PDU_HAVE_LEN 0x1ULL
-#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
-
-#define W_TCB_PDU_LEN 24
-#define S_TCB_PDU_LEN 17
-#define M_TCB_PDU_LEN 0xffffULL
-#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
-
-#define W_TCB_RX_QUIESCE 25
-#define S_TCB_RX_QUIESCE 1
-#define M_TCB_RX_QUIESCE 0x1ULL
-#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
-
-#define W_TCB_RX_PTR_RAW 25
-#define S_TCB_RX_PTR_RAW 2
-#define M_TCB_RX_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
-
-#define W_TCB_CPU_NO 25
-#define S_TCB_CPU_NO 19
-#define M_TCB_CPU_NO 0x7fULL
-#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
-
-#define W_TCB_ULP_TYPE 25
-#define S_TCB_ULP_TYPE 26
-#define M_TCB_ULP_TYPE 0xfULL
-#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
-
-#define W_TCB_RX_FRAG1_PTR_RAW 25
-#define S_TCB_RX_FRAG1_PTR_RAW 30
-#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
-#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
-#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
-
-#define W_TCB_RX_FRAG2_PTR_RAW 27
-#define S_TCB_RX_FRAG2_PTR_RAW 10
-#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_LEN_RAW 27
-#define S_TCB_RX_FRAG2_LEN_RAW 27
-#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_PTR_RAW 28
-#define S_TCB_RX_FRAG3_PTR_RAW 22
-#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
-
-#define W_TCB_RX_FRAG3_LEN_RAW 29
-#define S_TCB_RX_FRAG3_LEN_RAW 7
-#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
-#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
-#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
-
-#define W_TCB_PDU_HDR_LEN 30
-#define S_TCB_PDU_HDR_LEN 29
-#define M_TCB_PDU_HDR_LEN 0xffULL
-#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
-
-#define W_TCB_SLUSH1 31
-#define S_TCB_SLUSH1 5
-#define M_TCB_SLUSH1 0x7ffffULL
-#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
-
-#define W_TCB_ULP_RAW 31
-#define S_TCB_ULP_RAW 24
-#define M_TCB_ULP_RAW 0xffULL
-#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
-
-#define W_TCB_DDP_RDMAP_VERSION 25
-#define S_TCB_DDP_RDMAP_VERSION 30
-#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
-#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
-
-#define W_TCB_MARKER_ENABLE_RX 25
-#define S_TCB_MARKER_ENABLE_RX 31
-#define M_TCB_MARKER_ENABLE_RX 0x1ULL
-#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
-
-#define W_TCB_MARKER_ENABLE_TX 26
-#define S_TCB_MARKER_ENABLE_TX 0
-#define M_TCB_MARKER_ENABLE_TX 0x1ULL
-#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
-
-#define W_TCB_CRC_ENABLE 26
-#define S_TCB_CRC_ENABLE 1
-#define M_TCB_CRC_ENABLE 0x1ULL
-#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
-
-#define W_TCB_IRS_ULP 26
-#define S_TCB_IRS_ULP 2
-#define M_TCB_IRS_ULP 0x1ffULL
-#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
-
-#define W_TCB_ISS_ULP 26
-#define S_TCB_ISS_ULP 11
-#define M_TCB_ISS_ULP 0x1ffULL
-#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
-
-#define W_TCB_TX_PDU_LEN 26
-#define S_TCB_TX_PDU_LEN 20
-#define M_TCB_TX_PDU_LEN 0x3fffULL
-#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
-
-#define W_TCB_TX_PDU_OUT 27
-#define S_TCB_TX_PDU_OUT 2
-#define M_TCB_TX_PDU_OUT 0x1ULL
-#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
-
-#define W_TCB_CQ_IDX_SQ 27
-#define S_TCB_CQ_IDX_SQ 3
-#define M_TCB_CQ_IDX_SQ 0xffffULL
-#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
-
-#define W_TCB_CQ_IDX_RQ 27
-#define S_TCB_CQ_IDX_RQ 19
-#define M_TCB_CQ_IDX_RQ 0xffffULL
-#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
-
-#define W_TCB_QP_ID 28
-#define S_TCB_QP_ID 3
-#define M_TCB_QP_ID 0xffffULL
-#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
-
-#define W_TCB_PD_ID 28
-#define S_TCB_PD_ID 19
-#define M_TCB_PD_ID 0xffffULL
-#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
-
-#define W_TCB_STAG 29
-#define S_TCB_STAG 3
-#define M_TCB_STAG 0xffffffffULL
-#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
-
-#define W_TCB_RQ_START 30
-#define S_TCB_RQ_START 3
-#define M_TCB_RQ_START 0x3ffffffULL
-#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
-
-#define W_TCB_RQ_MSN 30
-#define S_TCB_RQ_MSN 29
-#define M_TCB_RQ_MSN 0x3ffULL
-#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
-
-#define W_TCB_RQ_MAX_OFFSET 31
-#define S_TCB_RQ_MAX_OFFSET 7
-#define M_TCB_RQ_MAX_OFFSET 0xfULL
-#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
-
-#define W_TCB_RQ_WRITE_PTR 31
-#define S_TCB_RQ_WRITE_PTR 11
-#define M_TCB_RQ_WRITE_PTR 0x3ffULL
-#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
-
-#define W_TCB_INB_WRITE_PERM 31
-#define S_TCB_INB_WRITE_PERM 21
-#define M_TCB_INB_WRITE_PERM 0x1ULL
-#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
-
-#define W_TCB_INB_READ_PERM 31
-#define S_TCB_INB_READ_PERM 22
-#define M_TCB_INB_READ_PERM 0x1ULL
-#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
-
-#define W_TCB_ORD_L_BIT_VLD 31
-#define S_TCB_ORD_L_BIT_VLD 23
-#define M_TCB_ORD_L_BIT_VLD 0x1ULL
-#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
-
-#define W_TCB_RDMAP_OPCODE 31
-#define S_TCB_RDMAP_OPCODE 24
-#define M_TCB_RDMAP_OPCODE 0xfULL
-#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
-
-#define W_TCB_TX_FLUSH 31
-#define S_TCB_TX_FLUSH 28
-#define M_TCB_TX_FLUSH 0x1ULL
-#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
-
-#define W_TCB_TX_OOS_RXMT 31
-#define S_TCB_TX_OOS_RXMT 29
-#define M_TCB_TX_OOS_RXMT 0x1ULL
-#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
-
-#define W_TCB_TX_OOS_TXMT 31
-#define S_TCB_TX_OOS_TXMT 30
-#define M_TCB_TX_OOS_TXMT 0x1ULL
-#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
-
-#define W_TCB_SLUSH_AUX2 31
-#define S_TCB_SLUSH_AUX2 31
-#define M_TCB_SLUSH_AUX2 0x1ULL
-#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
-
-#define W_TCB_RX_FRAG1_PTR_RAW2 25
-#define S_TCB_RX_FRAG1_PTR_RAW2 30
-#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
-
-#define W_TCB_RX_DDP_FLAGS 26
-#define S_TCB_RX_DDP_FLAGS 15
-#define M_TCB_RX_DDP_FLAGS 0x3ffULL
-#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
-
-#define W_TCB_SLUSH_AUX3 26
-#define S_TCB_SLUSH_AUX3 31
-#define M_TCB_SLUSH_AUX3 0x1ffULL
-#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
-
-#define W_TCB_RX_DDP_BUF0_OFFSET 27
-#define S_TCB_RX_DDP_BUF0_OFFSET 8
-#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
-
-#define W_TCB_RX_DDP_BUF0_LEN 27
-#define S_TCB_RX_DDP_BUF0_LEN 30
-#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
-
-#define W_TCB_RX_DDP_BUF1_OFFSET 28
-#define S_TCB_RX_DDP_BUF1_OFFSET 20
-#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
-
-#define W_TCB_RX_DDP_BUF1_LEN 29
-#define S_TCB_RX_DDP_BUF1_LEN 10
-#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
-
-#define W_TCB_RX_DDP_BUF0_TAG 30
-#define S_TCB_RX_DDP_BUF0_TAG 0
-#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
-
-#define W_TCB_RX_DDP_BUF1_TAG 31
-#define S_TCB_RX_DDP_BUF1_TAG 0
-#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-
-#define S_TF_DACK 10
-#define V_TF_DACK(x) ((x) << S_TF_DACK)
-
-#define S_TF_NAGLE 11
-#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
-
-#define S_TF_RECV_SCALE 12
-#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
-
-#define S_TF_RECV_TSTMP 13
-#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
-
-#define S_TF_RECV_SACK 14
-#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
-
-#define S_TF_TURBO 15
-#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
-
-#define S_TF_KEEPALIVE 16
-#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
-
-#define S_TF_TCAM_BYPASS 17
-#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
-
-#define S_TF_CORE_FIN 18
-#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
-
-#define S_TF_CORE_MORE 19
-#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
-
-#define S_TF_MIGRATING 20
-#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
-
-#define S_TF_ACTIVE_OPEN 21
-#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
-
-#define S_TF_ASK_MODE 22
-#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
-
-#define S_TF_NON_OFFLOAD 23
-#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
-
-#define S_TF_MOD_SCHD 24
-#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
-
-#define S_TF_MOD_SCHD_REASON0 25
-#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
-
-#define S_TF_MOD_SCHD_REASON1 26
-#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
-
-#define S_TF_MOD_SCHD_RX 27
-#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
-
-#define S_TF_CORE_PUSH 28
-#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
-
-#define S_TF_RCV_COALESCE_ENABLE 29
-#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
-
-#define S_TF_RCV_COALESCE_PUSH 30
-#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
-
-#define S_TF_RCV_COALESCE_LAST_PSH 31
-#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
-
-#define S_TF_RCV_COALESCE_HEARTBEAT 32
-#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
-
-#define S_TF_HALF_CLOSE 33
-#define V_TF_HALF_CLOSE(x) ((x) << S_TF_HALF_CLOSE)
-
-#define S_TF_DACK_MSS 34
-#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
-
-#define S_TF_CCTRL_SEL0 35
-#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
-
-#define S_TF_CCTRL_SEL1 36
-#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
-
-#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
-#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
-
-#define S_TF_TX_PACE_AUTO 38
-#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
-
-#define S_TF_PEER_FIN_HELD 39
-#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
-
-#define S_TF_CORE_URG 40
-#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
-
-#define S_TF_RDMA_ERROR 41
-#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
-
-#define S_TF_SSWS_DISABLED 42
-#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
-
-#define S_TF_DUPACK_COUNT_ODD 43
-#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
-
-#define S_TF_TX_CHANNEL 44
-#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
-
-#define S_TF_RX_CHANNEL 45
-#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
-
-#define S_TF_TX_PACE_FIXED 46
-#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
-
-#define S_TF_RDMA_FLM_ERROR 47
-#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
-
-#define S_TF_RX_FLOW_CONTROL_DISABLE 48
-#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
-
-#endif /* _TCB_DEFS_H */
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index 23f38cf2c5cd..9e2b2c348afd 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,8 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
- depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
+ depends on CHELSIO_T4 && INET
+ depends on INFINIBAND_ADDR_TRANS
+ select CHELSIO_LIB
select GENERIC_ALLOCATOR
- ---help---
+ help
This is an iWARP/RDMA driver for the Chelsio T4 and T5
1GbE, 10GbE adapters and T5 40GbE adapter.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index e11cf7299945..291d259d2319 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -1,5 +1,8 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
-iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
+iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o \
+ restrack.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index debc39d2cbc2..b3b45c49077d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -49,6 +49,7 @@
#include <rdma/ib_addr.h>
+#include <libcxgb_cm.h>
#include "iw_cxgb4.h"
#include "clip_tbl.h"
@@ -76,9 +77,9 @@ static int enable_ecn;
module_param(enable_ecn, int, 0644);
MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
-static int dack_mode = 1;
+static int dack_mode;
module_param(dack_mode, int, 0644);
-MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
+MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
uint c4iw_max_read_depth = 32;
module_param(c4iw_max_read_depth, int, 0644);
@@ -98,10 +99,6 @@ module_param(enable_tcp_window_scaling, int, 0644);
MODULE_PARM_DESC(enable_tcp_window_scaling,
"Enable tcp window scaling (default=1)");
-int c4iw_debug;
-module_param(c4iw_debug, int, 0644);
-MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
-
static int peer2peer = 1;
module_param(peer2peer, int, 0644);
MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
@@ -119,7 +116,7 @@ MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
static int mpa_rev = 2;
module_param(mpa_rev, int, 0644);
MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
+ "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
" compliant (default=2)");
static int markers_enabled;
@@ -143,27 +140,43 @@ static struct workqueue_struct *workq;
static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(unsigned long arg);
+static void ep_timeout(struct timer_list *t);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
+static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
static LIST_HEAD(timeout_list);
-static spinlock_t timeout_lock;
+static DEFINE_SPINLOCK(timeout_lock);
+
+static void deref_cm_id(struct c4iw_ep_common *epc)
+{
+ epc->cm_id->rem_ref(epc->cm_id);
+ epc->cm_id = NULL;
+ set_bit(CM_ID_DEREFED, &epc->history);
+}
+
+static void ref_cm_id(struct c4iw_ep_common *epc)
+{
+ set_bit(CM_ID_REFED, &epc->history);
+ epc->cm_id->add_ref(epc->cm_id);
+}
static void deref_qp(struct c4iw_ep *ep)
{
c4iw_qp_rem_ref(&ep->com.qp->ibqp);
clear_bit(QP_REFERENCED, &ep->com.flags);
+ set_bit(QP_DEREFED, &ep->com.history);
}
static void ref_qp(struct c4iw_ep *ep)
{
set_bit(QP_REFERENCED, &ep->com.flags);
+ set_bit(QP_REFED, &ep->com.history);
c4iw_qp_add_ref(&ep->com.qp->ibqp);
}
static void start_ep_timer(struct c4iw_ep *ep)
{
- PDBG("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
if (timer_pending(&ep->timer)) {
pr_err("%s timer already started! ep %p\n",
__func__, ep);
@@ -172,15 +185,13 @@ static void start_ep_timer(struct c4iw_ep *ep)
clear_bit(TIMEOUT, &ep->com.flags);
c4iw_get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- ep->timer.data = (unsigned long)ep;
- ep->timer.function = ep_timeout;
add_timer(&ep->timer);
}
static int stop_ep_timer(struct c4iw_ep *ep)
{
- PDBG("%s ep %p stopping\n", __func__, ep);
- del_timer_sync(&ep->timer);
+ pr_debug("ep %p stopping\n", ep);
+ timer_delete_sync(&ep->timer);
if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
c4iw_put_ep(&ep->com);
return 0;
@@ -195,12 +206,14 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- PDBG("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
if (error < 0)
kfree_skb(skb);
+ else if (error == NET_XMIT_DROP)
+ return -ENOMEM;
return error < 0 ? error : 0;
}
@@ -210,7 +223,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- PDBG("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
@@ -221,15 +234,13 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
{
- struct cpl_tid_release *req;
+ u32 len = roundup(sizeof(struct cpl_tid_release), 16);
- skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+ skb = get_skb(skb, len, GFP_KERNEL);
if (!skb)
return;
- req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+
+ cxgb_mk_tid_release(skb, len, hwtid, 0);
c4iw_ofld_send(rdev, skb);
return;
}
@@ -246,10 +257,10 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
- PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
- ep->mss, ep->emss);
+ pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
+ ep->emss);
}
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
@@ -270,62 +281,151 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
mutex_lock(&epc->mutex);
- PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ pr_debug("%s -> %s\n", states[epc->state], states[new]);
__state_set(epc, new);
mutex_unlock(&epc->mutex);
return;
}
+static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
+{
+ struct sk_buff *skb;
+ unsigned int i;
+ size_t len;
+
+ len = roundup(sizeof(union cpl_wr_size), 16);
+ for (i = 0; i < size; i++) {
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ goto fail;
+ skb_queue_tail(ep_skb_list, skb);
+ }
+ return 0;
+fail:
+ skb_queue_purge(ep_skb_list);
+ return -ENOMEM;
+}
+
static void *alloc_ep(int size, gfp_t gfp)
{
struct c4iw_ep_common *epc;
epc = kzalloc(size, gfp);
if (epc) {
+ epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
+ if (!epc->wr_waitp) {
+ kfree(epc);
+ epc = NULL;
+ goto out;
+ }
kref_init(&epc->kref);
mutex_init(&epc->mutex);
- c4iw_init_wr_wait(&epc->wr_wait);
+ c4iw_init_wr_wait(epc->wr_waitp);
}
- PDBG("%s alloc ep %p\n", __func__, epc);
+ pr_debug("alloc ep %p\n", epc);
+out:
return epc;
}
+static void remove_ep_tid(struct c4iw_ep *ep)
+{
+ unsigned long flags;
+
+ xa_lock_irqsave(&ep->com.dev->hwtids, flags);
+ __xa_erase(&ep->com.dev->hwtids, ep->hwtid);
+ if (xa_empty(&ep->com.dev->hwtids))
+ wake_up(&ep->com.dev->wait);
+ xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
+}
+
+static int insert_ep_tid(struct c4iw_ep *ep)
+{
+ unsigned long flags;
+ int err;
+
+ xa_lock_irqsave(&ep->com.dev->hwtids, flags);
+ err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL);
+ xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
+
+ return err;
+}
+
+/*
+ * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
+ */
+static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
+{
+ struct c4iw_ep *ep;
+ unsigned long flags;
+
+ xa_lock_irqsave(&dev->hwtids, flags);
+ ep = xa_load(&dev->hwtids, tid);
+ if (ep)
+ c4iw_get_ep(&ep->com);
+ xa_unlock_irqrestore(&dev->hwtids, flags);
+ return ep;
+}
+
+/*
+ * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
+ */
+static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
+ unsigned int stid)
+{
+ struct c4iw_listen_ep *ep;
+ unsigned long flags;
+
+ xa_lock_irqsave(&dev->stids, flags);
+ ep = xa_load(&dev->stids, stid);
+ if (ep)
+ c4iw_get_ep(&ep->com);
+ xa_unlock_irqrestore(&dev->stids, flags);
+ return ep;
+}
+
void _c4iw_free_ep(struct kref *kref)
{
struct c4iw_ep *ep;
ep = container_of(kref, struct c4iw_ep, com.kref);
- PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
+ pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
}
- remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
- cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
+ ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
+ kfree_skb(ep->mpa_skb);
}
- if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
- print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
- iwpm_remove_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr);
- iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
- }
+ if (!skb_queue_empty(&ep->com.ep_skb_list))
+ skb_queue_purge(&ep->com.ep_skb_list);
+ c4iw_put_wr_wait(ep->com.wr_waitp);
kfree(ep);
}
static void release_ep_resources(struct c4iw_ep *ep)
{
set_bit(RELEASE_RESOURCES, &ep->com.flags);
+
+ /*
+ * If we have a hwtid, then remove it from the idr table
+ * so lookups will no longer find this endpoint. Otherwise
+ * we have a race where one thread finds the ep ptr just
+ * before the other thread is freeing the ep memory.
+ */
+ if (ep->hwtid != -1)
+ remove_ep_tid(ep);
c4iw_put_ep(&ep->com);
}
@@ -360,6 +460,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
skb_reset_transport_header(skb);
} else {
skb = alloc_skb(len, gfp);
+ if (!skb)
+ return NULL;
}
t4_set_arp_err_handler(skb, NULL, NULL);
return skb;
@@ -370,76 +472,74 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
}
-static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
+static void arp_failure_discard(void *handle, struct sk_buff *skb)
{
- int i;
+ pr_err("ARP failure\n");
+ kfree_skb(skb);
+}
- egress_dev = get_real_dev(egress_dev);
- for (i = 0; i < dev->rdev.lldi.nports; i++)
- if (dev->rdev.lldi.ports[i] == egress_dev)
- return 1;
+static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
+{
+ pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
+}
+
+enum {
+ NUM_FAKE_CPLS = 2,
+ FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
+ FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
+};
+
+static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+
+ ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
+ release_ep_resources(ep);
return 0;
}
-static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
- __u8 *peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos,
- __u32 sin6_scope_id)
+static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct dst_entry *dst = NULL;
-
- if (IS_ENABLED(CONFIG_IPV6)) {
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- memcpy(&fl6.daddr, peer_ip, 16);
- memcpy(&fl6.saddr, local_ip, 16);
- if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = sin6_scope_id;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
- dst_release(dst);
- dst = NULL;
- }
- }
+ struct c4iw_ep *ep;
-out:
- return dst;
+ ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
+ c4iw_put_ep(&ep->parent_ep->com);
+ release_ep_resources(ep);
+ return 0;
}
-static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
+/*
+ * Fake up a special CPL opcode and call sched() so process_work() will call
+ * _put_ep_safe() in a safe context to free the ep resources. This is needed
+ * because ARP error handlers are called in an ATOMIC context, and
+ * _c4iw_free_ep() needs to block.
+ */
+static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
+ int cpl)
{
- struct rtable *rt;
- struct flowi4 fl4;
- struct neighbour *n;
+ struct cpl_act_establish *rpl = cplhdr(skb);
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- n = dst_neigh_lookup(&rt->dst, &peer_ip);
- if (!n)
- return NULL;
- if (!our_interface(dev, n->dev) &&
- !(n->dev->flags & IFF_LOOPBACK)) {
- neigh_release(n);
- dst_release(&rt->dst);
- return NULL;
- }
- neigh_release(n);
- return &rt->dst;
+ /* Set our special ARP_FAILURE opcode */
+ rpl->ot.opcode = cpl;
+
+ /*
+ * Save ep in the skb->cb area, after where sched() will save the dev
+ * ptr.
+ */
+ *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
+ sched(ep->com.dev, skb);
}
-static void arp_failure_discard(void *handle, struct sk_buff *skb)
+/* Handle an ARP failure for an accept */
+static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
{
- PDBG("%s c4iw_dev %p\n", __func__, handle);
- kfree_skb(skb);
+ struct c4iw_ep *ep = handle;
+
+ pr_err("ARP failure during accept - tid %u - dropping connection\n",
+ ep->hwtid);
+
+ __state_set(&ep->com, DEAD);
+ queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
}
/*
@@ -449,21 +549,18 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
struct c4iw_ep *ep = handle;
- printk(KERN_ERR MOD "ARP failure duing connect\n");
- kfree_skb(skb);
+ pr_err("ARP failure during connect\n");
connect_reply_upcall(ep, -EHOSTUNREACH);
- state_set(&ep->com, DEAD);
+ __state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
- dst_release(ep->dst);
- cxgb4_l2t_release(ep->l2t);
- c4iw_put_ep(&ep->com);
+ queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
}
/*
@@ -472,27 +569,49 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
*/
static void abort_arp_failure(void *handle, struct sk_buff *skb)
{
- struct c4iw_rdev *rdev = handle;
+ int ret;
+ struct c4iw_ep *ep = handle;
+ struct c4iw_rdev *rdev = &ep->com.dev->rdev;
struct cpl_abort_req *req = cplhdr(skb);
- PDBG("%s rdev %p\n", __func__, rdev);
+ pr_debug("rdev %p\n", rdev);
req->cmd = CPL_ABORT_NO_RST;
- c4iw_ofld_send(rdev, skb);
+ skb_get(skb);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret) {
+ __state_set(&ep->com, DEAD);
+ queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
+ } else
+ kfree_skb(skb);
}
-static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
+static int send_flowc(struct c4iw_ep *ep)
{
- unsigned int flowclen = 80;
struct fw_flowc_wr *flowc;
- int i;
+ struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
+ u16 vlan = ep->l2t->vlan;
+ int nparams;
+ int flowclen, flowclen16;
- skb = get_skb(skb, flowclen, GFP_KERNEL);
- flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
+ if (vlan == CPL_L2T_VLAN_NONE)
+ nparams = 9;
+ else
+ nparams = 10;
+
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+
+ flowc = __skb_put(skb, flowclen);
+ memset(flowc, 0, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
- FW_FLOWC_WR_NPARAMS_V(8));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
- 16)) | FW_WR_FLOWID_V(ep->hwtid));
+ FW_FLOWC_WR_NPARAMS_V(nparams));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
@@ -511,172 +630,152 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
- /* Pad WR to 16 byte boundary */
- flowc->mnemval[8].mnemonic = 0;
- flowc->mnemval[8].val = 0;
- for (i = 0; i < 9; i++) {
- flowc->mnemval[i].r4[0] = 0;
- flowc->mnemval[i].r4[1] = 0;
- flowc->mnemval[i].r4[2] = 0;
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+ flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
+ if (nparams == 10) {
+ u16 pri;
+ pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[9].val = cpu_to_be32(pri);
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- c4iw_ofld_send(&ep->com.dev->rdev, skb);
+ return c4iw_ofld_send(&ep->com.dev->rdev, skb);
}
-static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
+static int send_halfclose(struct c4iw_ep *ep)
{
- struct cpl_close_con_req *req;
- struct sk_buff *skb;
- int wrlen = roundup(sizeof *req, 16);
+ struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
+ u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb = get_skb(NULL, wrlen, gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
+ if (WARN_ON(!skb))
return -ENOMEM;
- }
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
- ep->hwtid));
- return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
-}
-static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- struct cpl_abort_req *req;
- int wrlen = roundup(sizeof *req, 16);
+ cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
+ NULL, arp_failure_discard);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb = get_skb(skb, wrlen, gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
- __func__);
- return -ENOMEM;
- }
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
- req = (struct cpl_abort_req *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-/*
- * c4iw_form_pm_msg - Form a port mapper message with mapping info
- */
-static void c4iw_form_pm_msg(struct c4iw_ep *ep,
- struct iwpm_sa_data *pm_msg)
+static void read_tcb(struct c4iw_ep *ep)
{
- memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-}
+ struct sk_buff *skb;
+ struct cpl_get_tcb *req;
+ int wrlen = roundup(sizeof(*req), 16);
-/*
- * c4iw_form_reg_msg - Form a port mapper message with dev info
- */
-static void c4iw_form_reg_msg(struct c4iw_dev *dev,
- struct iwpm_dev_data *pm_msg)
-{
- memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
- memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
- IWPM_IFNAME_SIZE);
-}
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (WARN_ON(!skb))
+ return;
-static void c4iw_record_pm_msg(struct c4iw_ep *ep,
- struct iwpm_sa_data *pm_msg)
-{
- memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
- sizeof(ep->com.mapped_local_addr));
- memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
- sizeof(ep->com.mapped_remote_addr));
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
+ req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
+ req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
+
+ /*
+ * keep a ref on the ep so the tcb is not unlocked before this
+ * cpl completes. The ref is released in read_tcb_rpl().
+ */
+ c4iw_get_ep(&ep->com);
+ if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
+ c4iw_put_ep(&ep->com);
}
-static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
+static int send_abort_req(struct c4iw_ep *ep)
{
- int ret;
+ u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
+ struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
- print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
- print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
+ if (WARN_ON(!req_skb))
+ return -ENOMEM;
- ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
- &child_ep->com.mapped_remote_addr,
- &child_ep->com.remote_addr, RDMA_NL_C4IW);
- if (ret)
- PDBG("Unable to find remote peer addr info - err %d\n", ret);
+ cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
+ ep, abort_arp_failure);
- return ret;
+ return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
-static void best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx, int use_ts, int ipv6)
+static int send_abort(struct c4iw_ep *ep)
{
- unsigned short hdr_size = (ipv6 ?
- sizeof(struct ipv6hdr) :
- sizeof(struct iphdr)) +
- sizeof(struct tcphdr) +
- (use_ts ?
- round_up(TCPOLEN_TIMESTAMP, 4) : 0);
- unsigned short data_size = mtu - hdr_size;
-
- cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+ if (!ep->com.qp || !ep->com.qp->srq) {
+ send_abort_req(ep);
+ return 0;
+ }
+ set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ return 0;
}
static int send_connect(struct c4iw_ep *ep)
{
- struct cpl_act_open_req *req;
- struct cpl_t5_act_open_req *t5_req;
- struct cpl_act_open_req6 *req6;
- struct cpl_t5_act_open_req6 *t5_req6;
+ struct cpl_act_open_req *req = NULL;
+ struct cpl_t5_act_open_req *t5req = NULL;
+ struct cpl_t6_act_open_req *t6req = NULL;
+ struct cpl_act_open_req6 *req6 = NULL;
+ struct cpl_t5_act_open_req6 *t5req6 = NULL;
+ struct cpl_t6_act_open_req6 *t6req6 = NULL;
struct sk_buff *skb;
u64 opt0;
u32 opt2;
unsigned int mtu_idx;
- int wscale;
- int wrlen;
- int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
- sizeof(struct cpl_act_open_req) :
- sizeof(struct cpl_t5_act_open_req);
- int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
- sizeof(struct cpl_act_open_req6) :
- sizeof(struct cpl_t5_act_open_req6);
+ u32 wscale;
+ int win, sizev4, sizev6, wrlen;
struct sockaddr_in *la = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
struct sockaddr_in *ra = (struct sockaddr_in *)
- &ep->com.mapped_remote_addr;
+ &ep->com.remote_addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
- &ep->com.mapped_remote_addr;
- int win;
+ &ep->com.remote_addr;
int ret;
+ enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
+ struct net_device *netdev;
+ u64 params;
+
+ netdev = ep->com.dev->rdev.lldi.ports[0];
+
+ switch (CHELSIO_CHIP_VERSION(adapter_type)) {
+ case CHELSIO_T4:
+ sizev4 = sizeof(struct cpl_act_open_req);
+ sizev6 = sizeof(struct cpl_act_open_req6);
+ break;
+ case CHELSIO_T5:
+ sizev4 = sizeof(struct cpl_t5_act_open_req);
+ sizev6 = sizeof(struct cpl_t5_act_open_req6);
+ break;
+ case CHELSIO_T6:
+ sizev4 = sizeof(struct cpl_t6_act_open_req);
+ sizev6 = sizeof(struct cpl_t6_act_open_req6);
+ break;
+ default:
+ pr_err("T%d Chip is not supported\n",
+ CHELSIO_CHIP_VERSION(adapter_type));
+ return -EINVAL;
+ }
wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
roundup(sizev4, 16) :
roundup(sizev6, 16);
- PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
+ pr_debug("ep %p atid %u\n", ep, ep->atid);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
- __func__);
+ pr_err("%s - failed to alloc skb\n", __func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -694,7 +793,7 @@ static int send_connect(struct c4iw_ep *ep)
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP_V(ep->tos) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
@@ -706,145 +805,161 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
- if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
+ if (peer2peer)
+ isn += 4;
+
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
}
+ params = cxgb4_select_ntuple(netdev, ep->l2t);
+
if (ep->com.remote_addr.ss_family == AF_INET6)
cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&la6->sin6_addr.s6_addr, 1);
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
- if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
- if (ep->com.remote_addr.ss_family == AF_INET) {
- req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ if (ep->com.remote_addr.ss_family == AF_INET) {
+ switch (CHELSIO_CHIP_VERSION(adapter_type)) {
+ case CHELSIO_T4:
+ req = skb_put(skb, wrlen);
INIT_TP_WR(req, 0);
- OPCODE_TID(req) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
- ((ep->rss_qid << 14) | ep->atid)));
- req->local_port = la->sin_port;
- req->peer_port = ra->sin_port;
- req->local_ip = la->sin_addr.s_addr;
- req->peer_ip = ra->sin_addr.s_addr;
- req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(cxgb4_select_ntuple(
- ep->com.dev->rdev.lldi.ports[0],
- ep->l2t));
+ break;
+ case CHELSIO_T5:
+ t5req = skb_put(skb, wrlen);
+ INIT_TP_WR(t5req, 0);
+ req = (struct cpl_act_open_req *)t5req;
+ break;
+ case CHELSIO_T6:
+ t6req = skb_put(skb, wrlen);
+ INIT_TP_WR(t6req, 0);
+ req = (struct cpl_act_open_req *)t6req;
+ t5req = (struct cpl_t5_act_open_req *)t6req;
+ break;
+ default:
+ pr_err("T%d Chip is not supported\n",
+ CHELSIO_CHIP_VERSION(adapter_type));
+ ret = -EINVAL;
+ goto clip_release;
+ }
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid<<14) | ep->atid)));
+ req->local_port = la->sin_port;
+ req->peer_port = ra->sin_port;
+ req->local_ip = la->sin_addr.s_addr;
+ req->peer_ip = ra->sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+
+ if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
+ req->params = cpu_to_be32(params);
req->opt2 = cpu_to_be32(opt2);
} else {
- req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
-
- INIT_TP_WR(req6, 0);
- OPCODE_TID(req6) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
- ((ep->rss_qid<<14)|ep->atid)));
- req6->local_port = la6->sin6_port;
- req6->peer_port = ra6->sin6_port;
- req6->local_ip_hi = *((__be64 *)
- (la6->sin6_addr.s6_addr));
- req6->local_ip_lo = *((__be64 *)
- (la6->sin6_addr.s6_addr + 8));
- req6->peer_ip_hi = *((__be64 *)
- (ra6->sin6_addr.s6_addr));
- req6->peer_ip_lo = *((__be64 *)
- (ra6->sin6_addr.s6_addr + 8));
- req6->opt0 = cpu_to_be64(opt0);
- req6->params = cpu_to_be32(cxgb4_select_ntuple(
- ep->com.dev->rdev.lldi.ports[0],
- ep->l2t));
- req6->opt2 = cpu_to_be32(opt2);
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ t5req->params =
+ cpu_to_be64(FILTER_TUPLE_V(params));
+ t5req->rsvd = cpu_to_be32(isn);
+ pr_debug("snd_isn %u\n", t5req->rsvd);
+ t5req->opt2 = cpu_to_be32(opt2);
+ } else {
+ t6req->params =
+ cpu_to_be64(FILTER_TUPLE_V(params));
+ t6req->rsvd = cpu_to_be32(isn);
+ pr_debug("snd_isn %u\n", t6req->rsvd);
+ t6req->opt2 = cpu_to_be32(opt2);
+ }
}
} else {
- u32 isn = (prandom_u32() & ~7UL) - 1;
-
- if (peer2peer)
- isn += 4;
+ switch (CHELSIO_CHIP_VERSION(adapter_type)) {
+ case CHELSIO_T4:
+ req6 = skb_put(skb, wrlen);
+ INIT_TP_WR(req6, 0);
+ break;
+ case CHELSIO_T5:
+ t5req6 = skb_put(skb, wrlen);
+ INIT_TP_WR(t5req6, 0);
+ req6 = (struct cpl_act_open_req6 *)t5req6;
+ break;
+ case CHELSIO_T6:
+ t6req6 = skb_put(skb, wrlen);
+ INIT_TP_WR(t6req6, 0);
+ req6 = (struct cpl_act_open_req6 *)t6req6;
+ t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
+ break;
+ default:
+ pr_err("T%d Chip is not supported\n",
+ CHELSIO_CHIP_VERSION(adapter_type));
+ ret = -EINVAL;
+ goto clip_release;
+ }
- if (ep->com.remote_addr.ss_family == AF_INET) {
- t5_req = (struct cpl_t5_act_open_req *)
- skb_put(skb, wrlen);
- INIT_TP_WR(t5_req, 0);
- OPCODE_TID(t5_req) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
- ((ep->rss_qid << 14) | ep->atid)));
- t5_req->local_port = la->sin_port;
- t5_req->peer_port = ra->sin_port;
- t5_req->local_ip = la->sin_addr.s_addr;
- t5_req->peer_ip = ra->sin_addr.s_addr;
- t5_req->opt0 = cpu_to_be64(opt0);
- t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
- cxgb4_select_ntuple(
- ep->com.dev->rdev.lldi.ports[0],
- ep->l2t)));
- t5_req->rsvd = cpu_to_be32(isn);
- PDBG("%s snd_isn %u\n", __func__,
- be32_to_cpu(t5_req->rsvd));
- t5_req->opt2 = cpu_to_be32(opt2);
+ OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ ((ep->rss_qid<<14)|ep->atid)));
+ req6->local_port = la6->sin6_port;
+ req6->peer_port = ra6->sin6_port;
+ req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
+ req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
+ req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
+ req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
+ req6->opt0 = cpu_to_be64(opt0);
+
+ if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
+ req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
+ ep->l2t));
+ req6->opt2 = cpu_to_be32(opt2);
} else {
- t5_req6 = (struct cpl_t5_act_open_req6 *)
- skb_put(skb, wrlen);
- INIT_TP_WR(t5_req6, 0);
- OPCODE_TID(t5_req6) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
- ((ep->rss_qid<<14)|ep->atid)));
- t5_req6->local_port = la6->sin6_port;
- t5_req6->peer_port = ra6->sin6_port;
- t5_req6->local_ip_hi = *((__be64 *)
- (la6->sin6_addr.s6_addr));
- t5_req6->local_ip_lo = *((__be64 *)
- (la6->sin6_addr.s6_addr + 8));
- t5_req6->peer_ip_hi = *((__be64 *)
- (ra6->sin6_addr.s6_addr));
- t5_req6->peer_ip_lo = *((__be64 *)
- (ra6->sin6_addr.s6_addr + 8));
- t5_req6->opt0 = cpu_to_be64(opt0);
- t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
- cxgb4_select_ntuple(
- ep->com.dev->rdev.lldi.ports[0],
- ep->l2t)));
- t5_req6->rsvd = cpu_to_be32(isn);
- PDBG("%s snd_isn %u\n", __func__,
- be32_to_cpu(t5_req6->rsvd));
- t5_req6->opt2 = cpu_to_be32(opt2);
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ t5req6->params =
+ cpu_to_be64(FILTER_TUPLE_V(params));
+ t5req6->rsvd = cpu_to_be32(isn);
+ pr_debug("snd_isn %u\n", t5req6->rsvd);
+ t5req6->opt2 = cpu_to_be32(opt2);
+ } else {
+ t6req6->params =
+ cpu_to_be64(FILTER_TUPLE_V(params));
+ t6req6->rsvd = cpu_to_be32(isn);
+ pr_debug("snd_isn %u\n", t6req6->rsvd);
+ t6req6->opt2 = cpu_to_be32(opt2);
+ }
+
}
}
set_bit(ACT_OPEN_REQ, &ep->com.history);
ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+clip_release:
if (ret && ep->com.remote_addr.ss_family == AF_INET6)
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&la6->sin6_addr.s6_addr, 1);
return ret;
}
-static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
- u8 mpa_rev_to_use)
+static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
+ u8 mpa_rev_to_use)
{
- int mpalen, wrlen;
+ int mpalen, wrlen, ret;
struct fw_ofld_tx_data_wr *req;
struct mpa_message *mpa;
struct mpa_v2_conn_params mpa_v2_params;
- PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
-
- BUG_ON(skb_cloned(skb));
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + ep->plen;
if (mpa_rev_to_use == 2)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(skb, wrlen, GFP_KERNEL);
if (!skb) {
connect_reply_upcall(ep, -ENOMEM);
- return;
+ return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
- memset(req, 0, wrlen);
+ req = skb_put_zero(skb, wrlen);
req->op_to_immdlen = cpu_to_be32(
FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL_F |
@@ -859,9 +974,19 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
mpa = (struct mpa_message *)(req + 1);
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0) |
- (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
+
+ mpa->flags = 0;
+ if (crc_enabled)
+ mpa->flags |= MPA_CRC;
+ if (markers_enabled) {
+ mpa->flags |= MPA_MARKERS;
+ ep->mpa_attr.recv_marker_enabled = 1;
+ } else {
+ ep->mpa_attr.recv_marker_enabled = 0;
+ }
+ if (mpa_rev_to_use == 2)
+ mpa->flags |= MPA_ENHANCED_RDMA_CONN;
+
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
if (mpa_rev_to_use == 1) {
@@ -870,10 +995,11 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (mpa_rev_to_use == 2) {
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
- PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
- ep->ord);
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
+ pr_debug("initiator ird %u ord %u\n", ep->ird,
+ ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
@@ -905,14 +1031,15 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
*/
skb_get(skb);
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
- c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ if (ret)
+ return ret;
start_ep_timer(ep);
__state_set(&ep->com, MPA_REQ_SENT);
ep->mpa_attr.initiator = 1;
ep->snd_seq += mpalen;
- return;
+ return ret;
}
static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
@@ -923,22 +1050,22 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ pr_err("%s - cannot alloc skb!\n", __func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
- memset(req, 0, wrlen);
+ req = skb_put_zero(skb, wrlen);
req->op_to_immdlen = cpu_to_be32(
FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL_F |
@@ -960,8 +1087,9 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons(((u16)ep->ird) |
(peer2peer ? MPA_V2_PEER2PEER_MODEL :
0));
@@ -988,8 +1116,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
*/
skb_get(skb);
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- BUG_ON(ep->mpa_skb);
+ t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
ep->mpa_skb = skb;
ep->snd_seq += mpalen;
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -1003,22 +1130,22 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ pr_err("%s - cannot alloc skb!\n", __func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
+ req = skb_put_zero(skb, wrlen);
req->op_to_immdlen = cpu_to_be32(
FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL_F |
@@ -1034,15 +1161,19 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
+ mpa->flags = 0;
+ if (ep->mpa_attr.crc_enabled)
+ mpa->flags |= MPA_CRC;
+ if (ep->mpa_attr.recv_marker_enabled)
+ mpa->flags |= MPA_MARKERS;
mpa->revision = ep->mpa_attr.version;
mpa->private_data_size = htons(plen);
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
if (peer2peer && (ep->mpa_attr.p2p_type !=
@@ -1073,7 +1204,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
* Function fw4_ack() will deref it.
*/
skb_get(skb);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
ep->mpa_skb = skb;
__state_set(&ep->com, MPA_REP_SENT);
ep->snd_seq += mpalen;
@@ -1084,40 +1215,54 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
+ unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req);
unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
+ int ret;
ep = lookup_atid(t, atid);
+ if (!ep)
+ return -EINVAL;
- PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
- be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
+ pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
+ be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
mutex_lock(&ep->com.mutex);
dst_confirm(ep->dst);
/* setup the hwtid for this connection */
ep->hwtid = tid;
- cxgb4_insert_tid(t, ep, tid);
- insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
+ cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
+ insert_ep_tid(ep);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
/* dealloc the atid */
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
set_bit(ACT_ESTAB, &ep->com.history);
/* start MPA negotiation */
- send_flowc(ep, NULL);
+ ret = send_flowc(ep);
+ if (ret)
+ goto err;
if (ep->retry_with_mpa_v1)
- send_mpa_req(ep, skb, 1);
+ ret = send_mpa_req(ep, skb, 1);
else
- send_mpa_req(ep, skb, mpa_rev);
+ ret = send_mpa_req(ep, skb, mpa_rev);
+ if (ret)
+ goto err;
+ mutex_unlock(&ep->com.mutex);
+ return 0;
+err:
mutex_unlock(&ep->com.mutex);
+ connect_reply_upcall(ep, -ENOMEM);
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
return 0;
}
@@ -1125,38 +1270,29 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = status;
if (ep->com.cm_id) {
- PDBG("close complete delivered ep %p cm_id %p tid %u\n",
- ep, ep->com.cm_id, ep->hwtid);
+ pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
+ deref_cm_id(&ep->com);
set_bit(CLOSE_UPCALL, &ep->com.history);
}
}
-static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- __state_set(&ep->com, ABORTING);
- set_bit(ABORT_CONN, &ep->com.history);
- return send_abort(ep, skb, gfp);
-}
-
static void peer_close_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_DISCONNECT;
if (ep->com.cm_id) {
- PDBG("peer close delivered ep %p cm_id %p tid %u\n",
- ep, ep->com.cm_id, ep->hwtid);
+ pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(DISCONN_UPCALL, &ep->com.history);
}
@@ -1166,16 +1302,15 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = -ECONNRESET;
if (ep->com.cm_id) {
- PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
- ep->com.cm_id, ep->hwtid);
+ pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
+ ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
+ deref_cm_id(&ep->com);
set_bit(ABORT_UPCALL, &ep->com.history);
}
}
@@ -1184,7 +1319,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
+ pr_debug("ep %p tid %u status %d\n",
+ ep, ep->hwtid, status);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
@@ -1196,6 +1332,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
if ((status == 0) || (status == -ECONNREFUSED)) {
if (!ep->tried_with_mpa_v1) {
/* this means MPA_v2 is used */
+ event.ord = ep->ird;
+ event.ird = ep->ord;
event.private_data_len = ep->plen -
sizeof(struct mpa_v2_conn_params);
event.private_data = ep->mpa_pkt +
@@ -1203,21 +1341,21 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
sizeof(struct mpa_v2_conn_params);
} else {
/* this means MPA_v1 is used */
+ event.ord = cur_max_read_depth(ep->com.dev);
+ event.ird = cur_max_read_depth(ep->com.dev);
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt +
sizeof(struct mpa_message);
}
}
- PDBG("%s ep %p tid %u status %d\n", __func__, ep,
- ep->hwtid, status);
+ pr_debug("ep %p tid %u status %d\n", ep,
+ ep->hwtid, status);
set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- if (status < 0) {
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- }
+ if (status < 0)
+ deref_cm_id(&ep->com);
}
static int connect_request_upcall(struct c4iw_ep *ep)
@@ -1225,7 +1363,7 @@ static int connect_request_upcall(struct c4iw_ep *ep)
struct iw_cm_event event;
int ret;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
memcpy(&event.local_addr, &ep->com.local_addr,
@@ -1262,13 +1400,13 @@ static void established_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
- event.ird = ep->ird;
- event.ord = ep->ord;
+ event.ird = ep->ord;
+ event.ord = ep->ird;
if (ep->com.cm_id) {
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(ESTAB_UPCALL, &ep->com.history);
}
@@ -1276,14 +1414,15 @@ static void established_upcall(struct c4iw_ep *ep)
static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
{
- struct cpl_rx_data_ack *req;
struct sk_buff *skb;
- int wrlen = roundup(sizeof *req, 16);
+ u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
+ u32 credit_dack;
- PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
+ pr_err("update_rx_credits - cannot alloc skb!\n");
return 0;
}
@@ -1295,21 +1434,30 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
- req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
- ep->hwtid));
- req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
- RX_DACK_CHANGE_F |
- RX_DACK_MODE_V(dack_mode));
- set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
+ credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
+ RX_DACK_MODE_V(dack_mode);
+
+ cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
+ credit_dack);
+
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
}
#define RELAXED_IRD_NEGOTIATION 1
+/*
+ * process_mpa_reply - process streaming mode MPA reply
+ *
+ * Returns:
+ *
+ * 0 upon success indicating a connect request was delivered to the ULP
+ * or the mpa request is incomplete but valid so far.
+ *
+ * 1 if a failure requires the caller to close the connection.
+ *
+ * 2 if a failure requires the caller to abort the connection.
+ */
static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
@@ -1322,15 +1470,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
int err;
int disconnect = 0;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-
- /*
- * Stop mpa timer. If it expired, then
- * we ignore the MPA reply. process_timeout()
- * will abort the connection.
- */
- if (stop_ep_timer(ep))
- return 0;
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1338,7 +1478,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
*/
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
err = -EINVAL;
- goto err;
+ goto err_stop_timer;
}
/*
@@ -1357,14 +1497,14 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
/* Validate MPA header. */
if (mpa->revision > mpa_rev) {
- printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
- " Received = %d\n", __func__, mpa_rev, mpa->revision);
+ pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
+ __func__, mpa_rev, mpa->revision);
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
plen = ntohs(mpa->private_data_size);
@@ -1374,7 +1514,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
*/
if (plen > MPA_MAX_PRIVATE_DATA) {
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
/*
@@ -1382,7 +1522,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
*/
if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
ep->plen = (u8) plen;
@@ -1396,17 +1536,24 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (mpa->flags & MPA_REJECT) {
err = -ECONNREFUSED;
- goto err;
+ goto err_stop_timer;
}
/*
+ * Stop mpa timer. If it expired, then
+ * we ignore the MPA reply. process_timeout()
+ * will abort the connection.
+ */
+ if (stop_ep_timer(ep))
+ return 0;
+
+ /*
* If we get here we have accumulated the entire mpa
* start reply message including private data. And
* the MPA header is valid.
*/
__state_set(&ep->com, FPDU_MODE);
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
ep->mpa_attr.version = mpa->revision;
ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
@@ -1421,8 +1568,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
- PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
- __func__, resp_ird, resp_ord, ep->ird, ep->ord);
+ pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
+ resp_ird, resp_ord, ep->ird, ep->ord);
/*
* This is a double-check. Ideally, below checks are
@@ -1466,12 +1613,11 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
- "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
- "%d\n", __func__, ep->mpa_attr.crc_enabled,
- ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
- ep->mpa_attr.p2p_type, p2p_type);
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
+ ep->mpa_attr.crc_enabled,
+ ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type, p2p_type);
/*
* If responder's RTR does not match with that of initiator, assign
@@ -1506,7 +1652,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* supports, generate TERM message
*/
if (rtr_mismatch) {
- printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
+ pr_err("%s: RTR mismatch, sending TERM\n", __func__);
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
@@ -1525,8 +1671,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* initiator ORD.
*/
if (insuff_ird) {
- printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
- __func__);
+ pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
@@ -1538,33 +1683,43 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
goto out;
}
goto out;
+err_stop_timer:
+ stop_ep_timer(ep);
err:
- __state_set(&ep->com, ABORTING);
- send_abort(ep, skb, GFP_KERNEL);
+ disconnect = 2;
out:
connect_reply_upcall(ep, err);
return disconnect;
}
-static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+/*
+ * process_mpa_request - process streaming mode MPA request
+ *
+ * Returns:
+ *
+ * 0 upon success indicating a connect request was delivered to the ULP
+ * or the mpa request is incomplete but valid so far.
+ *
+ * 1 if a failure requires the caller to close the connection.
+ *
+ * 2 if a failure requires the caller to abort the connection.
+ */
+static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
struct mpa_v2_conn_params *mpa_v2_params;
u16 plen;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
* then we must fail this connection.
*/
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
+ goto err_stop_timer;
- PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
/*
* Copy the new data into our accumulation buffer.
@@ -1578,54 +1733,43 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* We'll continue process when more data arrives.
*/
if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
+ return 0;
- PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
mpa = (struct mpa_message *) ep->mpa_pkt;
/*
* Validate MPA Header.
*/
if (mpa->revision > mpa_rev) {
- printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
- " Received = %d\n", __func__, mpa_rev, mpa->revision);
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
+ pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
+ __func__, mpa_rev, mpa->revision);
+ goto err_stop_timer;
}
- if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
+ goto err_stop_timer;
plen = ntohs(mpa->private_data_size);
/*
* Fail if there's too much private data.
*/
- if (plen > MPA_MAX_PRIVATE_DATA) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (plen > MPA_MAX_PRIVATE_DATA)
+ goto err_stop_timer;
/*
* If plen does not account for pkt size
*/
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
+ goto err_stop_timer;
ep->plen = (u8) plen;
/*
* If we don't have all the pdata yet, then bail.
*/
if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
+ return 0;
/*
* If we get here we have accumulated the entire mpa
@@ -1648,10 +1792,14 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
(ep->mpa_pkt + sizeof(*mpa));
ep->ird = ntohs(mpa_v2_params->ird) &
MPA_V2_IRD_ORD_MASK;
+ ep->ird = min_t(u32, ep->ird,
+ cur_max_read_depth(ep->com.dev));
ep->ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
- PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
- ep->ord);
+ ep->ord = min_t(u32, ep->ord,
+ cur_max_read_depth(ep->com.dev));
+ pr_debug("initiator ird %u ord %u\n",
+ ep->ird, ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
if (ntohs(mpa_v2_params->ord) &
@@ -1668,32 +1816,31 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
- "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
- ep->mpa_attr.p2p_type);
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type);
- /*
- * If the endpoint timer already expired, then we ignore
- * the start request. process_timeout() will abort
- * the connection.
- */
- if (!stop_ep_timer(ep)) {
- __state_set(&ep->com, MPA_REQ_RCVD);
-
- /* drive upcall */
- mutex_lock_nested(&ep->parent_ep->com.mutex,
- SINGLE_DEPTH_NESTING);
- if (ep->parent_ep->com.state != DEAD) {
- if (connect_request_upcall(ep))
- abort_connection(ep, skb, GFP_KERNEL);
- } else {
- abort_connection(ep, skb, GFP_KERNEL);
- }
- mutex_unlock(&ep->parent_ep->com.mutex);
+ __state_set(&ep->com, MPA_REQ_RCVD);
+
+ /* drive upcall */
+ mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
+ if (ep->parent_ep->com.state != DEAD) {
+ if (connect_request_upcall(ep))
+ goto err_unlock_parent;
+ } else {
+ goto err_unlock_parent;
}
- return;
+ mutex_unlock(&ep->parent_ep->com.mutex);
+ return 0;
+
+err_unlock_parent:
+ mutex_unlock(&ep->parent_ep->com.mutex);
+ goto err_out;
+err_stop_timer:
+ (void)stop_ep_timer(ep);
+err_out:
+ return 2;
}
static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
@@ -1702,33 +1849,32 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_rx_data *hdr = cplhdr(skb);
unsigned int dlen = ntohs(hdr->len);
unsigned int tid = GET_TID(hdr);
- struct tid_info *t = dev->rdev.lldi.tids;
__u8 status = hdr->status;
int disconnect = 0;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
switch (ep->com.state) {
case MPA_REQ_SENT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
- process_mpa_request(ep, skb);
+ disconnect = process_mpa_request(ep, skb);
break;
case FPDU_MODE: {
struct c4iw_qp_attributes attrs;
- BUG_ON(!ep->com.qp);
+
+ update_rx_credits(ep, dlen);
if (status)
pr_err("%s Unexpected streaming data." \
" qpid %u ep %p state %d tid %u status %d\n",
@@ -1745,64 +1891,95 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
if (disconnect)
- c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
return 0;
}
+static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
+{
+ enum chip_type adapter_type;
+
+ adapter_type = ep->com.dev->rdev.lldi.adapter_type;
+
+ /*
+ * If this TCB had a srq buffer cached, then we must complete
+ * it. For user mode, that means saving the srqidx in the
+ * user/kernel status page for this qp. For kernel mode, just
+ * synthesize the CQE now.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) {
+ if (ep->com.qp->ibqp.uobject)
+ t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
+ else
+ c4iw_flush_srqidx(ep->com.qp, srqidx);
+ }
+}
+
static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
+ u32 srqidx;
struct c4iw_ep *ep;
- struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
int release = 0;
unsigned int tid = GET_TID(rpl);
- struct tid_info *t = dev->rdev.lldi.tids;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
if (!ep) {
- printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
+ pr_warn("Abort rpl to freed endpoint\n");
return 0;
}
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status));
+ complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
+ }
+
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
__state_set(&ep->com, DEAD);
release = 1;
break;
default:
- printk(KERN_ERR "%s ep %p state %d\n",
- __func__, ep, ep->com.state);
+ pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
break;
}
mutex_unlock(&ep->com.mutex);
- if (release)
+ if (release) {
+ close_complete_upcall(ep, -ECONNRESET);
release_ep_resources(ep);
+ }
+ c4iw_put_ep(&ep->com);
return 0;
}
-static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
{
struct sk_buff *skb;
struct fw_ofld_connection_wr *req;
unsigned int mtu_idx;
- int wscale;
+ u32 wscale;
struct sockaddr_in *sin;
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
- sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
+ sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
req->le.u.ipv4.lip = sin->sin_addr.s_addr;
- sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
+ sin = (struct sockaddr_in *)&ep->com.remote_addr;
req->le.pport = sin->sin_port;
req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req->tcb.t_state_to_astid =
@@ -1812,10 +1989,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -1834,7 +2011,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP_V(ep->tos) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE_V(1) |
@@ -1852,25 +2029,21 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
set_bit(ACT_OFLD_CONN, &ep->com.history);
- c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
/*
- * Return whether a failed active open has allocated a TID
+ * Some of the error codes above implicitly indicate that there is no TID
+ * allocated with the result of an ACT_OPEN. We use this predicate to make
+ * that explicit.
*/
static inline int act_open_has_tid(int status)
{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
-}
-
-/* Returns whether a CPL status conveys negative advice.
- */
-static int is_neg_adv(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE ||
- status == CPL_ERR_KEEPALV_NEG_ADVICE;
+ return (status != CPL_ERR_TCAM_PARITY &&
+ status != CPL_ERR_TCAM_MISS &&
+ status != CPL_ERR_TCAM_FULL &&
+ status != CPL_ERR_CONN_EXIST_SYNRECV &&
+ status != CPL_ERR_CONN_EXIST);
}
static char *neg_adv_str(unsigned int status)
@@ -1891,14 +2064,15 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
ep->snd_win = snd_win;
ep->rcv_win = rcv_win;
- PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
+ pr_debug("snd_win %d rcv_win %d\n",
+ ep->snd_win, ep->rcv_win);
}
#define ACT_OPEN_RETRY_COUNT 2
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
struct dst_entry *dst, struct c4iw_dev *cdev,
- bool clear_mpa_v1)
+ bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
{
struct neighbour *n;
int err, step;
@@ -1912,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENOMEM;
if (n->dev->flags & IFF_LOOPBACK) {
if (iptype == 4)
- pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
+ pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
else if (IS_ENABLED(CONFIG_IPV6))
for_each_netdev(&init_net, pdev) {
if (ipv6_chk_addr(&init_net,
@@ -1927,13 +2101,15 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENODEV;
goto out;
}
+ if (is_vlan_dev(pdev))
+ pdev = vlan_dev_real_dev(pdev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -1943,16 +2119,15 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
- dev_put(pdev);
} else {
pdev = get_real_dev(n->dev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = dst_mtu(dst);
ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -1980,63 +2155,89 @@ out:
static int c4iw_reconnect(struct c4iw_ep *ep)
{
int err = 0;
+ int size = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)
- &ep->com.cm_id->local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)
- &ep->com.cm_id->remote_addr;
+ &ep->com.cm_id->m_remote_addr;
struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
- &ep->com.cm_id->local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
- &ep->com.cm_id->remote_addr;
+ &ep->com.cm_id->m_remote_addr;
int iptype;
__u8 *ra;
- PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
- init_timer(&ep->timer);
+ pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
+
+ /* When MPA revision is different on nodes, the node with MPA_rev=2
+ * tries to reconnect with MPA_rev 1 for the same EP through
+ * c4iw_reconnect(), where the same EP is assigned with new tid for
+ * further connection establishment. As we are using the same EP pointer
+ * for reconnect, few skbs are used during the previous c4iw_connect(),
+ * which leaves the EP with inadequate skbs for further
+ * c4iw_reconnect(), Further causing a crash due to an empty
+ * skb_list() during peer_abort(). Allocate skbs which is already used.
+ */
+ size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
+ if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
+ err = -ENOMEM;
+ goto fail1;
+ }
/*
* Allocate an active TID to initiate a TCP connection.
*/
ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
if (ep->atid == -1) {
- pr_err("%s - cannot alloc atid.\n", __func__);
+ pr_err("%s - cannot alloc atid\n", __func__);
err = -ENOMEM;
goto fail2;
}
- insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
+ err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL);
+ if (err)
+ goto fail2a;
/* find a route */
- if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
- ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, 0);
+ if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
+ ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
+ laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr,
+ laddr->sin_port,
+ raddr->sin_port, ep->com.cm_id->tos);
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
} else {
- ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
- raddr6->sin6_addr.s6_addr,
- laddr6->sin6_port, raddr6->sin6_port, 0,
- raddr6->sin6_scope_id);
+ ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
+ get_real_dev,
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+ raddr6->sin6_port,
+ ep->com.cm_id->tos,
+ raddr6->sin6_scope_id);
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
}
if (!ep->dst) {
- pr_err("%s - cannot find route.\n", __func__);
+ pr_err("%s - cannot find route\n", __func__);
err = -EHOSTUNREACH;
goto fail3;
}
- err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
+ err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
+ ep->com.dev->rdev.lldi.adapter_type,
+ ep->com.cm_id->tos);
if (err) {
- pr_err("%s - cannot alloc l2e.\n", __func__);
+ pr_err("%s - cannot alloc l2e\n", __func__);
goto fail4;
}
- PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
- ep->l2t->idx);
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
state_set(&ep->com, CONNECTING);
- ep->tos = 0;
+ ep->tos = ep->com.cm_id->tos;
/* send connect request to rnic */
err = send_connect(ep);
@@ -2047,7 +2248,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
fail4:
dst_release(ep->dst);
fail3:
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
+fail2a:
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
/*
@@ -2057,6 +2259,7 @@ fail2:
* response of 1st connect request.
*/
connect_reply_upcall(ep, -ECONNRESET);
+fail1:
c4iw_put_ep(&ep->com);
out:
return err;
@@ -2074,19 +2277,23 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct sockaddr_in *ra;
struct sockaddr_in6 *la6;
struct sockaddr_in6 *ra6;
+ int ret = 0;
ep = lookup_atid(t, atid);
- la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
- ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
- la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
- ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
+ if (!ep)
+ return -EINVAL;
+
+ la = (struct sockaddr_in *)&ep->com.local_addr;
+ ra = (struct sockaddr_in *)&ep->com.remote_addr;
+ la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
- PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
- status, status2errno(status));
+ pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
+ status, status2errno(status));
- if (is_neg_adv(status)) {
- PDBG("%s Connection problems for atid %u status %u (%s)\n",
- __func__, atid, status, neg_adv_str(status));
+ if (cxgb_is_neg_adv(status)) {
+ pr_debug("Connection problems for atid %u status %u (%s)\n",
+ atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -2109,9 +2316,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
mutex_unlock(&dev->rdev.stats.lock);
if (ep->com.local_addr.ss_family == AF_INET &&
dev->rdev.lldi.enable_fw_ofld_conn) {
- send_fw_act_open_req(ep,
- TID_TID_G(AOPEN_ATID_G(
- ntohl(rpl->atid_status))));
+ ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
+ ntohl(rpl->atid_status))));
+ if (ret)
+ goto fail;
return 0;
}
break;
@@ -2121,14 +2329,13 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
(const u32 *)
&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
- atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2151,19 +2358,21 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
break;
}
+fail:
connect_reply_upcall(ep, status2errno(status));
state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
if (status && act_open_has_tid(status))
- cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
+ ep->com.local_addr.ss_family);
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2175,18 +2384,17 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_pass_open_rpl *rpl = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int stid = GET_TID(rpl);
- struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+ struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
- PDBG("%s stid %d lookup failure!\n", __func__, stid);
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
- PDBG("%s ep %p status %d error %d\n", __func__, ep,
- rpl->status, status2errno(rpl->status));
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
-
+ pr_debug("ep %p status %d error %d\n", ep,
+ rpl->status, status2errno(rpl->status));
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
+ c4iw_put_ep(&ep->com);
out:
return 0;
}
@@ -2194,46 +2402,37 @@ out:
static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int stid = GET_TID(rpl);
- struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+ struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
- PDBG("%s ep %p\n", __func__, ep);
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ if (!ep) {
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
+ goto out;
+ }
+ pr_debug("ep %p\n", ep);
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
+ c4iw_put_ep(&ep->com);
+out:
return 0;
}
-static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
- struct cpl_pass_accept_req *req)
+static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
+ struct cpl_pass_accept_req *req)
{
struct cpl_pass_accept_rpl *rpl;
unsigned int mtu_idx;
u64 opt0;
u32 opt2;
- int wscale;
+ u32 wscale;
struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
int win;
+ enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- BUG_ON(skb_cloned(skb));
-
- skb_get(skb);
- rpl = cplhdr(skb);
- if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
- skb_trim(skb, roundup(sizeof(*rpl5), 16));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps && req->tcpopt.tstamp,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps && req->tcpopt.tstamp,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -2266,74 +2465,52 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len);
- tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
- IP_HDR_LEN_G(hlen);
+ if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
+ tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
+ IP_HDR_LEN_G(hlen);
+ else
+ tcph = (const void *)(req + 1) +
+ T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1);
}
- if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
- u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ if (!is_t4(adapter_type)) {
+ u32 isn = (get_random_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
- PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
+ pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
- return;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{
- PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
- BUG_ON(skb_cloned(skb));
+ pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
skb_trim(skb, sizeof(struct cpl_tid_release));
release_tid(&dev->rdev, hwtid, skb);
return;
}
-static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
- __u8 *local_ip, __u8 *peer_ip,
- __be16 *local_port, __be16 *peer_port)
-{
- int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
- struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
- struct tcphdr *tcp = (struct tcphdr *)
- ((u8 *)(req + 1) + eth_len + ip_len);
-
- if (ip->version == 4) {
- PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
- ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 4;
- memcpy(peer_ip, &ip->saddr, 4);
- memcpy(local_ip, &ip->daddr, 4);
- } else {
- PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
- ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 6;
- memcpy(peer_ip, ip6->saddr.s6_addr, 16);
- memcpy(local_ip, ip6->daddr.s6_addr, 16);
- }
- *peer_port = tcp->source;
- *local_port = tcp->dest;
-
- return;
-}
-
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
@@ -2349,101 +2526,103 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
unsigned short hdrs;
+ u8 tos;
- parent_ep = lookup_stid(t, stid);
+ parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
- PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ pr_err("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
if (state_read(&parent_ep->com) != LISTEN) {
- printk(KERN_ERR "%s - listening ep not in LISTEN\n",
- __func__);
+ pr_err("%s - listening ep not in LISTEN\n", __func__);
goto reject;
}
- get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
+ if (parent_ep->com.cm_id->tos_set)
+ tos = parent_ep->com.cm_id->tos;
+ else
+ tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+
+ cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
+ &iptype, local_ip, peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
- PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
- local_ip, peer_ip, ntohs(local_port),
- ntohs(peer_port), peer_mss);
- dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
- local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ *(__be32 *)local_ip, *(__be32 *)peer_ip,
+ local_port, peer_port, tos);
} else {
- PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
- local_ip, peer_ip, ntohs(local_port),
- ntohs(peer_port), peer_mss);
- dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
- ((struct sockaddr_in6 *)
- &parent_ep->com.local_addr)->sin6_scope_id);
+ pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+ local_ip, peer_ip, local_port, peer_port,
+ tos,
+ ((struct sockaddr_in6 *)
+ &parent_ep->com.local_addr)->sin6_scope_id);
}
if (!dst) {
- printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
- __func__);
+ pr_err("%s - failed to find dst entry!\n", __func__);
goto reject;
}
child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
if (!child_ep) {
- printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
- __func__);
+ pr_err("%s - failed to allocate ep entry!\n", __func__);
dst_release(dst);
goto reject;
}
- err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
+ err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
+ parent_ep->com.dev->rdev.lldi.adapter_type, tos);
if (err) {
- printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
- __func__);
+ pr_err("%s - failed to allocate l2t entry!\n", __func__);
dst_release(dst);
kfree(child_ep);
goto reject;
}
- hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct tcphdr) +
((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
child_ep->mtu = peer_mss + hdrs;
+ skb_queue_head_init(&child_ep->com.ep_skb_list);
+ if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
+ goto fail;
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
- /*
- * The mapped_local and mapped_remote addresses get setup with
- * the actual 4-tuple. The local address will be based on the
- * actual local address of the connection, but on the port number
- * of the parent listening endpoint. The remote address is
- * setup based on a query to the IWPM since we don't know what it
- * originally was before mapping. If no mapping was done, then
- * mapped_remote == remote, and mapped_local == local.
- */
if (iptype == 4) {
struct sockaddr_in *sin = (struct sockaddr_in *)
- &child_ep->com.mapped_local_addr;
+ &child_ep->com.local_addr;
- sin->sin_family = PF_INET;
+ sin->sin_family = AF_INET;
sin->sin_port = local_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip;
sin = (struct sockaddr_in *)&child_ep->com.local_addr;
- sin->sin_family = PF_INET;
+ sin->sin_family = AF_INET;
sin->sin_port = ((struct sockaddr_in *)
&parent_ep->com.local_addr)->sin_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip;
- sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
- sin->sin_family = PF_INET;
+ sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
+ sin->sin_family = AF_INET;
sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else {
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = local_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
@@ -2454,38 +2633,44 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
&parent_ep->com.local_addr)->sin6_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = peer_port;
memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
}
- memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
- sizeof(child_ep->com.remote_addr));
- get_remote_addr(parent_ep, child_ep);
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
- child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ child_ep->tos = tos;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
- child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
+ pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
+ child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
- init_timer(&child_ep->timer);
- cxgb4_insert_tid(t, child_ep, hwtid);
- insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
- accept_cr(child_ep, skb, req);
- set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
+ timer_setup(&child_ep->timer, ep_timeout, 0);
+ cxgb4_insert_tid(t, child_ep, hwtid,
+ child_ep->com.local_addr.ss_family);
+ insert_ep_tid(child_ep);
+ if (accept_cr(child_ep, skb, req)) {
+ c4iw_put_ep(&parent_ep->com);
+ release_ep_resources(child_ep);
+ } else {
+ set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
+ }
if (iptype == 6) {
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
goto out;
+fail:
+ c4iw_put_ep(&child_ep->com);
reject:
reject_cr(dev, hwtid, skb);
out:
+ if (parent_ep)
+ c4iw_put_ep(&parent_ep->com);
return 0;
}
@@ -2493,24 +2678,33 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_pass_establish *req = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
+ int ret;
+ u16 tcp_opt = ntohs(req->tcp_opt);
- ep = lookup_tid(t, tid);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
- ntohs(req->tcp_opt));
+ pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
dst_confirm(ep->dst);
- state_set(&ep->com, MPA_REQ_WAIT);
+ mutex_lock(&ep->com.mutex);
+ ep->com.state = MPA_REQ_WAIT;
start_ep_timer(ep);
- send_flowc(ep, skb);
set_bit(PASS_ESTAB, &ep->com.history);
+ ret = send_flowc(ep);
+ mutex_unlock(&ep->com.mutex);
+ if (ret)
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -2522,12 +2716,14 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int disconnect = 1;
int release = 0;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
int ret;
- ep = lookup_tid(t, tid);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
dst_confirm(ep->dst);
set_bit(PEER_CLOSE, &ep->com.history);
@@ -2549,13 +2745,13 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
* in rdma connection migration (see c4iw_accept_cr()).
*/
__state_set(&ep->com, CLOSING);
- PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
- PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case FPDU_MODE:
start_ep_timer(ep);
@@ -2591,41 +2787,64 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
}
mutex_unlock(&ep->com.mutex);
if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
if (release)
release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
return 0;
}
+static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
+{
+ complete_cached_srq_buffers(ep, ep->srqe_idx);
+ if (ep->com.cm_id && ep->com.qp) {
+ struct c4iw_qp_attributes attrs;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ peer_abort_upcall(ep);
+ release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
+}
+
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cpl_abort_req_rss6 *req = cplhdr(skb);
struct c4iw_ep *ep;
- struct cpl_abort_rpl *rpl;
struct sk_buff *rpl_skb;
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
+ u8 status;
+ u32 srqidx;
- ep = lookup_tid(t, tid);
- if (is_neg_adv(req->status)) {
- PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
+
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+
+ status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
+
+ if (cxgb_is_neg_adv(status)) {
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, status, neg_adv_str(status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
- return 0;
+ goto deref_ep;
}
- PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
- ep->com.state);
+
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
+ ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
/*
@@ -2634,18 +2853,20 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* MPA_REQ_SENT
*/
if (ep->com.state != MPA_REQ_SENT)
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CONNECTING:
+ c4iw_put_ep(&ep->parent_ep->com);
break;
case MPA_REQ_WAIT:
(void)stop_ep_timer(ep);
break;
case MPA_REQ_SENT:
(void)stop_ep_timer(ep);
- if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
+ if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 ||
+ (mpa_rev == 2 && ep->tried_with_mpa_v1))
connect_reply_upcall(ep, -ECONNRESET);
else {
/*
@@ -2656,8 +2877,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* do some housekeeping so as to re-initiate the
* connection
*/
- PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
- mpa_rev);
+ pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
+ __func__, mpa_rev);
ep->retry_with_mpa_v1 = 1;
}
break;
@@ -2668,28 +2889,42 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case MORIBUND:
case CLOSING:
stop_ep_timer(ep);
- /*FALLTHROUGH*/
+ fallthrough;
case FPDU_MODE:
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(
+ be32_to_cpu(req->srqidx_status));
+ if (srqidx) {
+ complete_cached_srq_buffers(ep, srqidx);
+ } else {
+ /* Hold ep ref until finish_peer_abort() */
+ c4iw_get_ep(&ep->com);
+ __state_set(&ep->com, ABORTING);
+ set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ break;
+
+ }
+ }
+
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_ERROR;
ret = c4iw_modify_qp(ep->com.qp->rhp,
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
if (ret)
- printk(KERN_ERR MOD
- "%s - qp <- error failed!\n",
- __func__);
+ pr_err("%s - qp <- error failed!\n", __func__);
}
peer_abort_upcall(ep);
break;
case ABORTING:
break;
case DEAD:
- PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
mutex_unlock(&ep->com.mutex);
- return 0;
+ goto deref_ep;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
dst_confirm(ep->dst);
@@ -2701,18 +2936,14 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
- __func__);
+ rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
+ if (WARN_ON(!rpl_skb)) {
release = 1;
goto out;
}
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
+
+ cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
+
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
if (release)
@@ -2721,19 +2952,24 @@ out:
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
}
- remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
- cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid);
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
+ ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
c4iw_reconnect(ep);
}
+deref_ep:
+ c4iw_put_ep(&ep->com);
+ /* Dereferencing ep, referenced in peer_abort_intr() */
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -2743,16 +2979,17 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
struct cpl_close_con_rpl *rpl = cplhdr(skb);
int release = 0;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- BUG_ON(!ep);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/* The cm_id may be null if we failed to connect */
mutex_lock(&ep->com.mutex);
+ set_bit(CLOSE_CON_RPL, &ep->com.history);
switch (ep->com.state) {
case CLOSING:
__state_set(&ep->com, MORIBUND);
@@ -2774,34 +3011,41 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case DEAD:
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
return 0;
}
static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_rdma_terminate *rpl = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
- ep = lookup_tid(t, tid);
- BUG_ON(!ep);
+ ep = get_ep_from_tid(dev, tid);
- if (ep && ep->com.qp) {
- printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
- ep->com.qp->wq.sq.qid);
- attrs.next_state = C4IW_QP_STATE_TERMINATE;
- c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ if (ep) {
+ if (ep->com.qp) {
+ pr_warn("TERM received tid %u qpid %u\n", tid,
+ ep->com.qp->wq.sq.qid);
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+
+ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
+ * when entering the TERM state the RNIC MUST initiate a CLOSE.
+ */
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
} else
- printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
+ pr_warn("TERM received tid %u no ep/qp\n", tid);
return 0;
}
@@ -2817,52 +3061,58 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_fw4_ack *hdr = cplhdr(skb);
u8 credits = hdr->credits;
unsigned int tid = GET_TID(hdr);
- struct tid_info *t = dev->rdev.lldi.tids;
- ep = lookup_tid(t, tid);
- PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
- if (credits == 0) {
- PDBG("%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
return 0;
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
+ if (credits == 0) {
+ pr_debug("0 credit ack ep %p tid %u state %u\n",
+ ep, ep->hwtid, state_read(&ep->com));
+ goto out;
}
dst_confirm(ep->dst);
if (ep->mpa_skb) {
- PDBG("%s last streaming msg ack ep %p tid %u state %u "
- "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
- state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
+ ep, ep->hwtid, state_read(&ep->com),
+ ep->mpa_attr.initiator ? 1 : 0);
+ mutex_lock(&ep->com.mutex);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
+ if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
+ stop_ep_timer(ep);
+ mutex_unlock(&ep->com.mutex);
}
+out:
+ c4iw_put_ep(&ep->com);
return 0;
}
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
- int err = 0;
- int disconnect = 0;
+ int abort;
struct c4iw_ep *ep = to_ep(cm_id);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
- if (ep->com.state == DEAD) {
+ if (ep->com.state != MPA_REQ_RCVD) {
mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
return -ECONNRESET;
}
set_bit(ULP_REJECT, &ep->com.history);
- BUG_ON(ep->com.state != MPA_REQ_RCVD);
if (mpa_rev == 0)
- abort_connection(ep, NULL, GFP_KERNEL);
- else {
- err = send_mpa_reject(ep, pdata, pdata_len);
- disconnect = 1;
- }
+ abort = 1;
+ else
+ abort = send_mpa_reject(ep, pdata, pdata_len);
mutex_unlock(&ep->com.mutex);
- if (disconnect)
- err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+
+ stop_ep_timer(ep);
+ c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
c4iw_put_ep(&ep->com);
return 0;
}
@@ -2875,38 +3125,39 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_ep *ep = to_ep(cm_id);
struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
+ int abort = 0;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
- if (ep->com.state == DEAD) {
+ if (ep->com.state != MPA_REQ_RCVD) {
err = -ECONNRESET;
- goto err;
+ goto err_out;
}
- BUG_ON(ep->com.state != MPA_REQ_RCVD);
- BUG_ON(!qp);
+ if (!qp) {
+ err = -EINVAL;
+ goto err_out;
+ }
set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
(conn_param->ird > cur_max_read_depth(ep->com.dev))) {
- abort_connection(ep, NULL, GFP_KERNEL);
err = -EINVAL;
- goto err;
+ goto err_abort;
}
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
if (conn_param->ord > ep->ird) {
if (RELAXED_IRD_NEGOTIATION) {
- ep->ord = ep->ird;
+ conn_param->ord = ep->ird;
} else {
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
send_mpa_reject(ep, conn_param->private_data,
conn_param->private_data_len);
- abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
- goto err;
+ goto err_abort;
}
}
if (conn_param->ird < ep->ord) {
@@ -2914,9 +3165,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ord <= h->rdev.lldi.max_ordird_qp) {
conn_param->ird = ep->ord;
} else {
- abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
- goto err;
+ goto err_abort;
}
}
}
@@ -2929,14 +3179,14 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} else {
if (peer2peer &&
(ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
- (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
+ (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
ep->ird = 1;
}
- PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+ pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
- cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
+ ref_cm_id(&ep->com);
ep->com.qp = qp;
ref_qp(ep);
@@ -2957,23 +3207,27 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = c4iw_modify_qp(ep->com.qp->rhp,
ep->com.qp, mask, &attrs, 1);
if (err)
- goto err1;
+ goto err_deref_cm_id;
+
+ set_bit(STOP_MPA_TIMER, &ep->com.flags);
err = send_mpa_reply(ep, conn_param->private_data,
conn_param->private_data_len);
if (err)
- goto err1;
+ goto err_deref_cm_id;
__state_set(&ep->com, FPDU_MODE);
established_upcall(ep);
mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
return 0;
-err1:
- ep->com.cm_id = NULL;
- abort_connection(ep, NULL, GFP_KERNEL);
- cm_id->rem_ref(cm_id);
-err:
+err_deref_cm_id:
+ deref_cm_id(&ep->com);
+err_abort:
+ abort = 1;
+err_out:
mutex_unlock(&ep->com.mutex);
+ if (abort)
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
c4iw_put_ep(&ep->com);
return err;
}
@@ -2982,19 +3236,24 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
struct in_device *ind;
int found = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ const struct in_ifaddr *ifa;
ind = in_dev_get(dev->rdev.lldi.ports[0]);
if (!ind)
return -EADDRNOTAVAIL;
- for_primary_ifa(ind) {
+ rcu_read_lock();
+ in_dev_for_each_ifa_rcu(ifa, ind) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+ continue;
laddr->sin_addr.s_addr = ifa->ifa_address;
raddr->sin_addr.s_addr = ifa->ifa_address;
found = 1;
break;
}
- endfor_ifa(ind);
+ rcu_read_unlock();
+
in_dev_put(ind);
return found ? 0 : -EADDRNOTAVAIL;
}
@@ -3027,9 +3286,9 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
- struct in6_addr uninitialized_var(addr);
- struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
- struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+ struct in6_addr addr;
+ struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
memcpy(la6->sin6_addr.s6_addr, &addr, 16);
@@ -3048,11 +3307,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in *raddr;
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
__u8 *ra;
int iptype;
- int iwpm_err = 0;
if ((conn_param->ord > cur_max_read_depth(dev)) ||
(conn_param->ird > cur_max_read_depth(dev))) {
@@ -3061,11 +3317,18 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) {
- printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ pr_err("%s - cannot alloc ep\n", __func__);
err = -ENOMEM;
goto out;
}
- init_timer(&ep->timer);
+
+ skb_queue_head_init(&ep->com.ep_skb_list);
+ if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ timer_setup(&ep->timer, ep_timeout, 0);
ep->plen = conn_param->private_data_len;
if (ep->plen)
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
@@ -3076,90 +3339,65 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (peer2peer && ep->ord == 0)
ep->ord = 1;
- cm_id->add_ref(cm_id);
- ep->com.dev = dev;
ep->com.cm_id = cm_id;
+ ref_cm_id(&ep->com);
+ cm_id->provider_data = ep;
+ ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
- PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+ pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
- goto fail1;
+ goto fail2;
}
ref_qp(ep);
- PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
- ep->com.qp, cm_id);
+ pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
+ ep->com.qp, cm_id);
/*
* Allocate an active TID to initiate a TCP connection.
*/
ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
if (ep->atid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
+ pr_err("%s - cannot alloc atid\n", __func__);
err = -ENOMEM;
- goto fail1;
+ goto fail2;
}
- insert_handle(dev, &dev->atid_idr, ep, ep->atid);
+ err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL);
+ if (err)
+ goto fail5;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
sizeof(ep->com.remote_addr));
- /* No port mapper available, go with the specified peer information */
- memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
- sizeof(ep->com.mapped_local_addr));
- memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
- sizeof(ep->com.mapped_remote_addr));
-
- c4iw_form_reg_msg(dev, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
- if (iwpm_err) {
- PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
- __func__, iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- c4iw_form_pm_msg(ep, &pm_msg);
- iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
- if (iwpm_err)
- PDBG("%s: Port Mapper query fail (err = %d).\n",
- __func__, iwpm_err);
- else
- c4iw_record_pm_msg(ep, &pm_msg);
- }
- if (iwpm_create_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
- iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
- err = -ENOMEM;
- goto fail1;
- }
- print_addr(&ep->com, __func__, "add_query/create_mapinfo");
- set_bit(RELEASE_MAPINFO, &ep->com.flags);
-
- laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
- raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
- laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
- raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
+ laddr = (struct sockaddr_in *)&ep->com.local_addr;
+ raddr = (struct sockaddr_in *)&ep->com.remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
- if (cm_id->remote_addr.ss_family == AF_INET) {
+ if (cm_id->m_remote_addr.ss_family == AF_INET) {
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
/*
* Handle loopback requests to INADDR_ANY.
*/
- if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
+ if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail1;
+ goto fail3;
}
/* find a route */
- PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
- __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
- ra, ntohs(raddr->sin_port));
- ep->dst = find_route(dev, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, 0);
+ pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+ &laddr->sin_addr, ntohs(laddr->sin_port),
+ ra, ntohs(raddr->sin_port));
+ ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr,
+ laddr->sin_port,
+ raddr->sin_port, cm_id->tos);
} else {
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -3170,37 +3408,40 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail1;
+ goto fail3;
}
/* find a route */
- PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
- __func__, laddr6->sin6_addr.s6_addr,
- ntohs(laddr6->sin6_port),
- raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
- ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
- raddr6->sin6_addr.s6_addr,
- laddr6->sin6_port, raddr6->sin6_port, 0,
- raddr6->sin6_scope_id);
+ pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+ laddr6->sin6_addr.s6_addr,
+ ntohs(laddr6->sin6_port),
+ raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
+ ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+ raddr6->sin6_port, cm_id->tos,
+ raddr6->sin6_scope_id);
}
if (!ep->dst) {
- printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
+ pr_err("%s - cannot find route\n", __func__);
err = -EHOSTUNREACH;
- goto fail2;
+ goto fail3;
}
- err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
+ err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
+ ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
if (err) {
- printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- goto fail3;
+ pr_err("%s - cannot alloc l2e\n", __func__);
+ goto fail4;
}
- PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
- ep->l2t->idx);
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
state_set(&ep->com, CONNECTING);
- ep->tos = 0;
+ ep->tos = cm_id->tos;
/* send connect request to rnic */
err = send_connect(ep);
@@ -3208,13 +3449,16 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto out;
cxgb4_l2t_release(ep->l2t);
-fail3:
+fail4:
dst_release(ep->dst);
-fail2:
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+fail3:
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
+fail5:
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ skb_queue_purge(&ep->com.ep_skb_list);
+ deref_cm_id(&ep->com);
fail1:
- cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
out:
return err;
@@ -3224,26 +3468,32 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
+ err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ if (err)
+ return err;
+ }
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
ep->stid, &sin6->sin6_addr,
sin6->sin6_port,
ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
- if (err)
+ if (err) {
+ cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
err, ep->stid,
sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
- else
- cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
- (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ }
return err;
}
@@ -3251,7 +3501,7 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
struct sockaddr_in *sin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
if (dev->rdev.lldi.enable_fw_ofld_conn) {
do {
@@ -3260,18 +3510,22 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
sin->sin_addr.s_addr, sin->sin_port, 0,
ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
if (err == -EBUSY) {
+ if (c4iw_fatal_error(&ep->com.dev->rdev)) {
+ err = -EIO;
+ break;
+ }
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(100));
}
} while (err == -EBUSY);
} else {
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
ep->stid, sin->sin_addr.s_addr, sin->sin_port,
0, ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
@@ -3288,24 +3542,22 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
- int iwpm_err = 0;
might_sleep();
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) {
- printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ pr_err("%s - cannot alloc ep\n", __func__);
err = -ENOMEM;
goto fail1;
}
- PDBG("%s ep %p\n", __func__, ep);
- cm_id->add_ref(cm_id);
+ skb_queue_head_init(&ep->com.ep_skb_list);
+ pr_debug("ep %p\n", ep);
ep->com.cm_id = cm_id;
+ ref_cm_id(&ep->com);
ep->com.dev = dev;
ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
/*
@@ -3314,48 +3566,20 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
if (dev->rdev.lldi.enable_fw_ofld_conn &&
ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
- cm_id->local_addr.ss_family, ep);
+ cm_id->m_local_addr.ss_family, ep);
else
ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
- cm_id->local_addr.ss_family, ep);
+ cm_id->m_local_addr.ss_family, ep);
if (ep->stid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
+ pr_err("%s - cannot alloc stid\n", __func__);
err = -ENOMEM;
goto fail2;
}
- insert_handle(dev, &dev->stid_idr, ep, ep->stid);
-
- /* No port mapper available, go with the specified info */
- memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
- sizeof(ep->com.mapped_local_addr));
-
- c4iw_form_reg_msg(dev, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
- if (iwpm_err) {
- PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
- __func__, iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
- if (iwpm_err)
- PDBG("%s: Port Mapper query fail (err = %d).\n",
- __func__, iwpm_err);
- else
- memcpy(&ep->com.mapped_local_addr,
- &pm_msg.mapped_loc_addr,
- sizeof(ep->com.mapped_local_addr));
- }
- if (iwpm_create_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
- err = -ENOMEM;
+ err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL);
+ if (err)
goto fail3;
- }
- print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
- set_bit(RELEASE_MAPINFO, &ep->com.flags);
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
err = create_server4(dev, ep);
@@ -3365,12 +3589,12 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep;
goto out;
}
-
+ xa_erase_irq(&ep->com.dev->stids, ep->stid);
fail3:
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
- cm_id->rem_ref(cm_id);
+ deref_cm_id(&ep->com);
c4iw_put_ep(&ep->com);
fail1:
out:
@@ -3382,7 +3606,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
int err;
struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
- PDBG("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
might_sleep();
state_set(&ep->com, DEAD);
@@ -3390,26 +3614,27 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
ep->com.local_addr.ss_family == AF_INET) {
err = cxgb4_remove_server_filter(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ ep->com.dev->rdev.lldi.rxq_ids[0], false);
} else {
struct sockaddr_in6 *sin6;
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ ep->com.local_addr.ss_family == AF_INET6);
if (err)
goto done;
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
0, 0, __func__);
- sin6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
+ xa_erase_irq(&ep->com.dev->stids, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
done:
- cm_id->rem_ref(cm_id);
+ deref_cm_id(&ep->com);
c4iw_put_ep(&ep->com);
return err;
}
@@ -3423,8 +3648,14 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
mutex_lock(&ep->com.mutex);
- PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
- states[ep->com.state], abrupt);
+ pr_debug("ep %p state %s, abrupt %d\n", ep,
+ states[ep->com.state], abrupt);
+
+ /*
+ * Ref the ep here in case we have fatal errors causing the
+ * ep to be released and freed.
+ */
+ c4iw_get_ep(&ep->com);
rdev = &ep->com.dev->rdev;
if (c4iw_fatal_error(rdev)) {
@@ -3438,11 +3669,22 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MPA_REQ_RCVD:
case MPA_REP_SENT:
case FPDU_MODE:
+ case CONNECTING:
close = 1;
if (abrupt)
ep->com.state = ABORTING;
else {
ep->com.state = CLOSING;
+
+ /*
+ * if we close before we see the fw4_ack() then we fix
+ * up the timer state since we're reusing it.
+ */
+ if (ep->mpa_skb &&
+ test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
+ clear_bit(STOP_MPA_TIMER, &ep->com.flags);
+ stop_ep_timer(ep);
+ }
start_ep_timer(ep);
}
set_bit(CLOSE_SENT, &ep->com.flags);
@@ -3460,27 +3702,45 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- PDBG("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_debug("ignoring disconnect ep %p state %u\n",
+ ep, ep->com.state);
break;
default:
- BUG();
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
if (close) {
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
- close_complete_upcall(ep, -ECONNRESET);
- ret = send_abort(ep, NULL, gfp);
+ ret = send_abort(ep);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
- ret = send_halfclose(ep, gfp);
+ ret = send_halfclose(ep);
}
- if (ret)
+ if (ret) {
+ set_bit(EP_DISC_FAIL, &ep->com.history);
+ if (!abrupt) {
+ stop_ep_timer(ep);
+ close_complete_upcall(ep, -EIO);
+ }
+ if (ep->com.qp) {
+ struct c4iw_qp_attributes attrs;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ ret = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ if (ret)
+ pr_err("%s - qp <- error failed!\n",
+ __func__);
+ }
fatal = 1;
+ }
}
mutex_unlock(&ep->com.mutex);
+ c4iw_put_ep(&ep->com);
if (fatal)
release_ep_resources(ep);
return ret;
@@ -3504,6 +3764,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
send_fw_act_open_req(ep, atid);
return;
}
+ fallthrough;
case FW_EADDRINUSE:
set_bit(ACT_RETRY_INUSE, &ep->com.history);
if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
@@ -3525,11 +3786,11 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(dev, &dev->atid_idr, atid);
+ xa_erase_irq(&dev->atids, atid);
cxgb4_free_atid(dev->rdev.lldi.tids, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -3544,9 +3805,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
int ret;
rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
- BUG_ON(!rpl_skb);
if (req->retval) {
- PDBG("%s passive open failure %d\n", __func__, req->retval);
+ pr_err("%s passive open failure %d\n", __func__, req->retval);
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.pas_ofld_conn_fails++;
mutex_unlock(&dev->rdev.stats.lock);
@@ -3563,6 +3823,80 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
return;
}
+static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
+{
+ u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]);
+ u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]);
+ u64 t;
+ u32 shift = 32;
+
+ t = (thi << shift) | (tlo >> shift);
+
+ return t;
+}
+
+static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
+{
+ u32 v;
+ u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
+
+ if (word & 0x1)
+ shift += 32;
+ v = (t >> shift) & mask;
+ return v;
+}
+
+static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
+ __be64 *tcb = (__be64 *)(rpl + 1);
+ unsigned int tid = GET_TID(rpl);
+ struct c4iw_ep *ep;
+ u64 t_flags_64;
+ u32 rx_pdu_out;
+
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+ /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to
+ * determine if there's a rx PDU feedback event pending.
+ *
+ * If that bit is set, it means we'll need to re-read the TCB's
+ * rq_start value. The final value is the one present in a TCB
+ * with the TF_RX_PDU_OUT bit cleared.
+ */
+
+ t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W);
+ rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S;
+
+ c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
+ c4iw_put_ep(&ep->com); /* from read_tcb() */
+
+ /* If TF_RX_PDU_OUT bit is set, re-read the TCB */
+ if (rx_pdu_out) {
+ if (++ep->rx_pdu_out_cnt >= 2) {
+ WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n");
+ goto cleanup;
+ }
+ read_tcb(ep);
+ return 0;
+ }
+
+ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
+ TCB_RQ_START_S);
+cleanup:
+ pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
+
+ if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
+ finish_peer_abort(dev, ep);
+ else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
+ send_abort_req(ep);
+ else
+ WARN_ONCE(1, "unexpected state!");
+
+ return 0;
+}
+
static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
@@ -3593,20 +3927,23 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
{
- u32 l2info;
- u16 vlantag, len, hdr_len, eth_hdr_len;
+ __be32 l2info;
+ __be16 hdr_len, vlantag, len;
+ u16 eth_hdr_len;
+ int tcp_hdr_len, ip_hdr_len;
u8 intf;
struct cpl_rx_pkt *cpl = cplhdr(skb);
struct cpl_pass_accept_req *req;
struct tcp_options_received tmp_opt;
struct c4iw_dev *dev;
+ enum chip_type type;
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
/* Store values from cpl_rx_pkt in temporary location. */
- vlantag = (__force u16) cpl->vlan;
- len = (__force u16) cpl->len;
- l2info = (__force u32) cpl->l2info;
- hdr_len = (__force u16) cpl->hdr_len;
+ vlantag = cpl->vlan;
+ len = cpl->len;
+ l2info = cpl->l2info;
+ hdr_len = cpl->hdr_len;
intf = cpl->iff;
__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
@@ -3617,26 +3954,34 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
*/
memset(&tmp_opt, 0, sizeof(tmp_opt));
tcp_clear_options(&tmp_opt);
- tcp_parse_options(skb, &tmp_opt, 0, NULL);
+ tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
- req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
+ req = __skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
SYN_MAC_IDX_V(RX_MACIDX_G(
- (__force int) htonl(l2info))) |
+ be32_to_cpu(l2info))) |
SYN_XACT_MATCH_F);
- eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
- RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
- req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
- (__force int) htonl(l2info))) |
- TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
- (__force int) htons(hdr_len))) |
- IP_HDR_LEN_V(RX_IPHDR_LEN_G(
- (__force int) htons(hdr_len))) |
- ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len)));
- req->vlan = (__force __be16) vlantag;
- req->len = (__force __be16) len;
+ type = dev->rdev.lldi.adapter_type;
+ tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
+ ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
+ req->hdr_len =
+ cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
+ if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
+ eth_hdr_len = is_t4(type) ?
+ RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
+ RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
+ req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
+ IP_HDR_LEN_V(ip_hdr_len) |
+ ETH_HDR_LEN_V(eth_hdr_len));
+ } else { /* T6 and later */
+ eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
+ req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
+ T6_IP_HDR_LEN_V(ip_hdr_len) |
+ T6_ETH_HDR_LEN_V(eth_hdr_len));
+ }
+ req->vlan = vlantag;
+ req->len = len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
PASS_OPEN_TOS_V(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp);
@@ -3661,8 +4006,9 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
int ret;
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
- req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ if (!req_skb)
+ return;
+ req = __skb_put_zero(req_skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
@@ -3724,13 +4070,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_pass_accept_req *req = (void *)(rss + 1);
struct l2t_entry *e;
struct dst_entry *dst;
- struct c4iw_ep *lep;
+ struct c4iw_ep *lep = NULL;
u16 window;
struct port_info *pi;
struct net_device *pdev;
u16 rss_qid, eth_hdr_len;
int step;
- u32 tx_chan;
struct neighbour *neigh;
/* Drop all non-SYN packets */
@@ -3749,22 +4094,36 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
*/
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
- lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
+ lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!lep) {
- PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ pr_warn("%s connect request on invalid stid %d\n",
+ __func__, stid);
+ goto reject;
+ }
+
+ switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
+ case CHELSIO_T4:
+ eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
+ break;
+ case CHELSIO_T5:
+ eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
+ break;
+ case CHELSIO_T6:
+ eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
+ break;
+ default:
+ pr_err("T%d Chip is not supported\n",
+ CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
goto reject;
}
- eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
- RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
} else {
vlan_eh = (struct vlan_ethhdr *)(req + 1);
iph = (struct iphdr *)(vlan_eh + 1);
- skb->vlan_tci = ntohs(cpl->vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
}
if (iph->version != 0x4)
@@ -3775,38 +4134,39 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
skb_set_transport_header(skb, (void *)tcph - (void *)rss);
skb_get(skb);
- PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
- ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
- ntohs(tcph->source), iph->tos);
+ pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
+ ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
+ ntohs(tcph->source), iph->tos);
- dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
- iph->tos);
+ dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ iph->daddr, iph->saddr, tcph->dest,
+ tcph->source, iph->tos);
if (!dst) {
- pr_err("%s - failed to find dst entry!\n",
- __func__);
+ pr_err("%s - failed to find dst entry!\n", __func__);
goto reject;
}
neigh = dst_neigh_lookup_skb(dst, skb);
if (!neigh) {
- pr_err("%s - failed to allocate neigh!\n",
- __func__);
+ pr_err("%s - failed to allocate neigh!\n", __func__);
goto free_dst;
}
if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, iph->daddr);
+ if (!pdev) {
+ pr_err("%s - failed to find device!\n", __func__);
+ goto free_dst;
+ }
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
dev_put(pdev);
} else {
pdev = get_real_dev(neigh->dev);
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
}
neigh_release(neigh);
if (!e) {
@@ -3837,6 +4197,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
free_dst:
dst_release(dst);
reject:
+ if (lep)
+ c4iw_put_ep(&lep->com);
return 0;
}
@@ -3844,7 +4206,7 @@ reject:
* These are the real handlers that are called from a
* work queue.
*/
-static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
+static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
[CPL_ACT_ESTABLISH] = act_establish,
[CPL_ACT_OPEN_RPL] = act_open_rpl,
[CPL_RX_DATA] = rx_data,
@@ -3859,8 +4221,11 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
+ [CPL_GET_TCB_RPL] = read_tcb_rpl,
[CPL_FW6_MSG] = deferred_fw6_msg,
- [CPL_RX_PKT] = rx_pkt
+ [CPL_RX_PKT] = rx_pkt,
+ [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
+ [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
};
static void process_timeout(struct c4iw_ep *ep)
@@ -3869,16 +4234,16 @@ static void process_timeout(struct c4iw_ep *ep)
int abort = 1;
mutex_lock(&ep->com.mutex);
- PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
- __state_set(&ep->com, ABORTING);
connect_reply_upcall(ep, -ETIMEDOUT);
break;
case MPA_REQ_WAIT:
- __state_set(&ep->com, ABORTING);
+ case MPA_REQ_RCVD:
+ case MPA_REP_SENT:
+ case FPDU_MODE:
break;
case CLOSING:
case MORIBUND:
@@ -3888,7 +4253,6 @@ static void process_timeout(struct c4iw_ep *ep)
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
}
- __state_set(&ep->com, ABORTING);
close_complete_upcall(ep, -ETIMEDOUT);
break;
case ABORTING:
@@ -3906,9 +4270,9 @@ static void process_timeout(struct c4iw_ep *ep)
__func__, ep, ep->hwtid, ep->com.state);
abort = 0;
}
- if (abort)
- abort_connection(ep, NULL, GFP_KERNEL);
mutex_unlock(&ep->com.mutex);
+ if (abort)
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
c4iw_put_ep(&ep->com);
}
@@ -3946,19 +4310,24 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- BUG_ON(!work_handlers[opcode]);
- ret = work_handlers[opcode](dev, skb);
- if (!ret)
+ if (opcode >= ARRAY_SIZE(work_handlers) ||
+ !work_handlers[opcode]) {
+ pr_err("No handler for opcode 0x%x.\n", opcode);
kfree_skb(skb);
+ } else {
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
process_timedout_eps();
}
}
static DECLARE_WORK(skb_work, process_work);
-static void ep_timeout(unsigned long arg)
+static void ep_timeout(struct timer_list *t)
{
- struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+ struct c4iw_ep *ep = timer_container_of(ep, t, timer);
int kickit = 0;
spin_lock(&timeout_lock);
@@ -4000,8 +4369,8 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE) {
- printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
- "for tid %u\n", rpl->status, GET_TID(rpl));
+ pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
+ rpl->status, GET_TID(rpl));
}
kfree_skb(skb);
return 0;
@@ -4013,15 +4382,15 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_wr_wait *wr_waitp;
int ret;
- PDBG("%s type %u\n", __func__, rpl->type);
+ pr_debug("type %u\n", rpl->type);
switch (rpl->type) {
case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
- PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
if (wr_waitp)
- c4iw_wake_up(wr_waitp, ret ? -ret : 0);
+ c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
break;
case FW6_TYPE_CQE:
@@ -4029,8 +4398,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
sched(dev, skb);
break;
default:
- printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
- rpl->type);
+ pr_err("%s unexpected fw6 msg type %u\n",
+ __func__, rpl->type);
kfree_skb(skb);
break;
}
@@ -4041,38 +4410,25 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
struct c4iw_ep *ep;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
+ /* This EP will be dereferenced in peer_abort() */
if (!ep) {
- printk(KERN_WARNING MOD
- "Abort on non-existent endpoint, tid %d\n", tid);
+ pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
kfree_skb(skb);
return 0;
}
- if (is_neg_adv(req->status)) {
- PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
- ep->stats.abort_neg_adv++;
- dev->rdev.stats.neg_adv++;
- kfree_skb(skb);
- return 0;
+ if (cxgb_is_neg_adv(req->status)) {
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status,
+ neg_adv_str(req->status));
+ goto out;
}
- PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
- /*
- * Wake up any threads in rdma_init() or rdma_fini().
- * However, if we are on MPAv2 and want to retry with MPAv1
- * then, don't wake up yet.
- */
- if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
- if (ep->com.state != MPA_REQ_SENT)
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
- } else
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
+out:
sched(dev, skb);
return 0;
}
@@ -4097,16 +4453,16 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
+ [CPL_GET_TCB_RPL] = sched,
[CPL_FW6_MSG] = fw6_msg,
[CPL_RX_PKT] = sched
};
int __init c4iw_cm_init(void)
{
- spin_lock_init(&timeout_lock);
skb_queue_head_init(&rxq);
- workq = create_singlethread_workqueue("iw_cxgb4");
+ workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
if (!workq)
return -ENOMEM;
@@ -4116,6 +4472,5 @@ int __init c4iw_cm_init(void)
void c4iw_cm_term(void)
{
WARN_ON(!list_empty(&timeout_list));
- flush_workqueue(workq);
destroy_workqueue(workq);
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 92d518382a9f..14ced7b667fa 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -30,61 +30,57 @@
* SOFTWARE.
*/
+#include <rdma/uverbs_ioctl.h>
+
#include "iw_cxgb4.h"
-static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx)
+static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
- struct c4iw_wr_wait wr_wait;
- struct sk_buff *skb;
- int ret;
- wr_len = sizeof *res_wr + sizeof *res;
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ wr_len = sizeof(*res_wr) + sizeof(*res);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
res->u.cq.iqid = cpu_to_be32(cq->cqid);
- c4iw_init_wr_wait(&wr_wait);
- ret = c4iw_ofld_send(rdev, skb);
- if (!ret) {
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
- }
+ c4iw_init_wr_wait(wr_waitp);
+ c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
kfree(cq->sw_queue);
dma_free_coherent(&(rdev->lldi.pdev->dev),
cq->memsize, cq->queue,
dma_unmap_addr(cq, mapping));
c4iw_put_cqid(rdev, cq->cqid, uctx);
- return ret;
}
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
int user = (uctx != &rdev->uctx);
- struct c4iw_wr_wait wr_wait;
int ret;
struct sk_buff *skb;
+ struct c4iw_ucontext *ucontext = NULL;
+
+ if (user)
+ ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
cq->cqid = c4iw_get_cqid(rdev, uctx);
if (!cq->cqid) {
@@ -106,10 +102,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
goto err3;
}
dma_unmap_addr_set(cq, mapping, cq->dma_addr);
- memset(cq->queue, 0, cq->memsize);
+
+ if (user && ucontext->is_32b_cqe) {
+ cq->qp_errp = &((struct t4_status_page *)
+ ((u8 *)cq->queue + (cq->size - 1) *
+ (sizeof(*cq->queue) / 2)))->qp_err;
+ } else {
+ cq->qp_errp = &((struct t4_status_page *)
+ ((u8 *)cq->queue + (cq->size - 1) *
+ sizeof(*cq->queue)))->qp_err;
+ }
/* build fw_ri_res_wr */
- wr_len = sizeof *res_wr + sizeof *res;
+ wr_len = sizeof(*res_wr) + sizeof(*res);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
@@ -118,14 +123,13 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -141,17 +145,14 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_IQPCIECH_V(2) |
FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
FW_RI_RES_WR_IQO_F |
- FW_RI_RES_WR_IQESIZE_V(1));
+ ((user && ucontext->is_32b_cqe) ?
+ FW_RI_RES_WR_IQESIZE_V(1) :
+ FW_RI_RES_WR_IQESIZE_V(2)));
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
- c4iw_init_wr_wait(&wr_wait);
-
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- goto err4;
- PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
if (ret)
goto err4;
@@ -159,11 +160,11 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->gts = rdev->lldi.gts_reg;
cq->rdev = rdev;
- cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
+ cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_va) {
- pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
+ if (user && !cq->bar2_pa) {
+ pr_warn("%s: cqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
goto err4;
@@ -180,12 +181,12 @@ err1:
return ret;
}
-static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
+static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)
{
struct t4_cqe cqe;
- PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
- wq, cq, cq->sw_cidx, cq->sw_pidx);
+ pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(FW_RI_SEND) |
@@ -193,6 +194,8 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
CQE_SWCQE_V(1) |
CQE_QPID_V(wq->sq.qid));
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ if (srqidx)
+ cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@@ -202,11 +205,10 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
int flushed = 0;
int in_use = wq->rq.in_use - count;
- BUG_ON(in_use < 0);
- PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
- wq, cq, wq->rq.in_use, count);
+ pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
+ wq, cq, wq->rq.in_use, count);
while (in_use--) {
- insert_recv_cqe(wq, cq);
+ insert_recv_cqe(wq, cq, 0);
flushed++;
}
return flushed;
@@ -217,8 +219,8 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
{
struct t4_cqe cqe;
- PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
- wq, cq, cq->sw_cidx, cq->sw_pidx);
+ pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(swcqe->opcode) |
@@ -245,14 +247,11 @@ int c4iw_flush_sq(struct c4iw_qp *qhp)
if (wq->sq.flush_cidx == -1)
wq->sq.flush_cidx = wq->sq.cidx;
idx = wq->sq.flush_cidx;
- BUG_ON(idx >= wq->sq.size);
while (idx != wq->sq.pidx) {
swsqe = &wq->sq.sw_sq[idx];
- BUG_ON(swsqe->flushed);
swsqe->flushed = 1;
insert_sq_cqe(wq, cq, swsqe);
if (wq->sq.oldest_read == swsqe) {
- BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
advance_oldest_read(wq);
}
flushed++;
@@ -273,7 +272,6 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
if (wq->sq.flush_cidx == -1)
wq->sq.flush_cidx = wq->sq.cidx;
cidx = wq->sq.flush_cidx;
- BUG_ON(cidx > wq->sq.size);
while (cidx != wq->sq.pidx) {
swsqe = &wq->sq.sw_sq[cidx];
@@ -282,13 +280,11 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
cidx = 0;
} else if (swsqe->complete) {
- BUG_ON(swsqe->flushed);
-
/*
* Insert this completed cqe into the swcq.
*/
- PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
- __func__, cidx, cq->sw_pidx);
+ pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
+ cidx, cq->sw_pidx);
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq);
@@ -336,14 +332,14 @@ static void advance_oldest_read(struct t4_wq *wq)
* Deal with out-of-order and/or completions that complete
* prior unsignalled WRs.
*/
-void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
{
struct t4_cqe *hw_cqe, *swcqe, read_cqe;
struct c4iw_qp *qhp;
struct t4_swsqe *swsqe;
int ret;
- PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
+ pr_debug("cqid 0x%x\n", chp->cq.cqid);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
/*
@@ -360,6 +356,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
if (qhp == NULL)
goto next_cqe;
+ if (flush_qhp != qhp) {
+ spin_lock(&qhp->lock);
+
+ if (qhp->wq.flushed == 1)
+ goto next_cqe;
+ }
+
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
goto next_cqe;
@@ -411,11 +414,18 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
next_cqe:
t4_hwcq_consume(&chp->cq);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
+ if (qhp && flush_qhp != qhp)
+ spin_unlock(&qhp->lock);
}
}
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
{
+ if (DRAIN_CQE(cqe)) {
+ WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
+ return 0;
+ }
+
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
return 0;
@@ -436,7 +446,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
u32 ptr;
*count = 0;
- PDBG("%s count zero %d\n", __func__, *count);
+ pr_debug("count zero %d\n", *count);
ptr = cq->sw_cidx;
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
@@ -446,7 +456,73 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
if (++ptr == cq->size)
ptr = 0;
}
- PDBG("%s cq %p count %d\n", __func__, cq, *count);
+ pr_debug("cq %p count %d\n", cq, *count);
+}
+
+static void post_pending_srq_wrs(struct t4_srq *srq)
+{
+ struct t4_srq_pending_wr *pwr;
+ u16 idx = 0;
+
+ while (srq->pending_in_use) {
+ pwr = &srq->pending_wrs[srq->pending_cidx];
+ srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
+ srq->sw_rq[srq->pidx].valid = 1;
+
+ pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
+ __func__,
+ srq->cidx, srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->size,
+ (unsigned long long)pwr->wr_id);
+
+ c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16);
+ t4_srq_consume_pending_wr(srq);
+ t4_srq_produce(srq, pwr->len16);
+ idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE);
+ }
+
+ if (idx) {
+ t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe);
+ srq->queue[srq->size].status.host_wq_pidx =
+ srq->wq_pidx;
+ }
+}
+
+static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq)
+{
+ int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx;
+ u64 wr_id;
+
+ srq->sw_rq[rel_idx].valid = 0;
+ wr_id = srq->sw_rq[rel_idx].wr_id;
+
+ if (rel_idx == srq->cidx) {
+ pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
+ __func__, rel_idx, srq->cidx, srq->pidx,
+ srq->wq_pidx, srq->in_use, srq->size,
+ (unsigned long long)srq->sw_rq[rel_idx].wr_id);
+ t4_srq_consume(srq);
+ while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) {
+ pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
+ __func__, srq->cidx, srq->pidx,
+ srq->wq_pidx, srq->in_use,
+ srq->size, srq->ooo_count,
+ (unsigned long long)
+ srq->sw_rq[srq->cidx].wr_id);
+ t4_srq_consume_ooo(srq);
+ }
+ if (srq->ooo_count == 0 && srq->pending_in_use)
+ post_pending_srq_wrs(srq);
+ } else {
+ pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
+ __func__, rel_idx, srq->cidx,
+ srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->size,
+ srq->ooo_count,
+ (unsigned long long)srq->sw_rq[rel_idx].wr_id);
+ t4_srq_produce_ooo(srq);
+ }
+ return wr_id;
}
/*
@@ -466,7 +542,8 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
* -EOVERFLOW CQ overflow detected.
*/
static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
+ u8 *cqe_flushed, u64 *cookie, u32 *credit,
+ struct t4_srq *srq)
{
int ret = 0;
struct t4_cqe *hw_cqe, read_cqe;
@@ -477,12 +554,11 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (ret)
return ret;
- PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
- " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
- CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
- CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
- CQE_WRID_LOW(hw_cqe));
+ pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+ CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+ CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
+ CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
+ CQE_WRID_LOW(hw_cqe));
/*
* skip cqe's not affiliated with a QP.
@@ -509,6 +585,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
/*
+ * Special cqe for drain WR completions...
+ */
+ if (DRAIN_CQE(hw_cqe)) {
+ *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+ *cqe = *hw_cqe;
+ goto skip_cqe;
+ }
+
+ /*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
@@ -523,7 +608,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
*/
if (CQE_TYPE(hw_cqe) == 1) {
if (CQE_STATUS(hw_cqe))
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
ret = -EAGAIN;
goto skip_cqe;
}
@@ -534,7 +619,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
*/
if (CQE_WRID_STAG(hw_cqe) == 1) {
if (CQE_STATUS(hw_cqe))
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
ret = -EAGAIN;
goto skip_cqe;
}
@@ -559,7 +644,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
}
/*
@@ -573,16 +658,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
* then we complete this with T4_ERR_MSN and mark the wq in
* error.
*/
-
- if (t4_rq_empty(wq)) {
- t4_set_wq_in_error(wq);
- ret = -EAGAIN;
- goto skip_cqe;
- }
- if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
- t4_set_wq_in_error(wq);
- hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
- goto proc_cqe;
+ if (unlikely(!CQE_STATUS(hw_cqe) &&
+ CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
+ t4_set_wq_in_error(wq, 0);
+ hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
}
goto proc_cqe;
}
@@ -601,8 +680,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
struct t4_swsqe *swsqe;
- PDBG("%s out of order completion going in sw_sq at idx %u\n",
- __func__, CQE_WRID_SQ_IDX(hw_cqe));
+ pr_debug("out of order completion going in sw_sq at idx %u\n",
+ CQE_WRID_SQ_IDX(hw_cqe));
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
swsqe->cqe = *hw_cqe;
swsqe->complete = 1;
@@ -619,7 +698,6 @@ proc_cqe:
*/
if (SQ_TYPE(hw_cqe)) {
int idx = CQE_WRID_SQ_IDX(hw_cqe);
- BUG_ON(idx >= wq->sq.size);
/*
* Account for any unsignaled completions completed by
@@ -633,21 +711,24 @@ proc_cqe:
wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
else
wq->sq.in_use -= idx - wq->sq.cidx;
- BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx;
- PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+ pr_debug("completing sq idx %u\n", wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
- PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
- *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
- BUG_ON(t4_rq_empty(wq));
- if (c4iw_wr_log)
- c4iw_log_wr_stats(wq, hw_cqe);
- t4_rq_consume(wq);
+ if (!srq) {
+ pr_debug("completing rq idx %u\n", wq->rq.cidx);
+ *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
+ if (c4iw_wr_log)
+ c4iw_log_wr_stats(wq, hw_cqe);
+ t4_rq_consume(wq);
+ } else {
+ *cookie = reap_srq_cqe(hw_cqe, srq);
+ }
+ wq->rq.msn++;
goto skip_cqe;
}
@@ -659,49 +740,29 @@ flush_wq:
skip_cqe:
if (SW_CQE(hw_cqe)) {
- PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->sw_cidx);
+ pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
+ cq, cq->cqid, cq->sw_cidx);
t4_swcq_consume(cq);
} else {
- PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->cidx);
+ pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
+ cq, cq->cqid, cq->cidx);
t4_hwcq_consume(cq);
}
return ret;
}
-/*
- * Get one cq entry from c4iw and map it to openib.
- *
- * Returns:
- * 0 cqe returned
- * -ENODATA EMPTY;
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
+ struct ib_wc *wc, struct c4iw_srq *srq)
{
- struct c4iw_qp *qhp = NULL;
- struct t4_cqe uninitialized_var(cqe), *rd_cqe;
- struct t4_wq *wq;
+ struct t4_cqe cqe;
+ struct t4_wq *wq = qhp ? &qhp->wq : NULL;
u32 credit = 0;
u8 cqe_flushed;
u64 cookie = 0;
int ret;
- ret = t4_next_cqe(&chp->cq, &rd_cqe);
-
- if (ret)
- return ret;
-
- qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
- if (!qhp)
- wq = NULL;
- else {
- spin_lock(&qhp->lock);
- wq = &(qhp->wq);
- }
- ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
+ ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
+ srq ? &srq->wq : NULL);
if (ret)
goto out;
@@ -710,24 +771,51 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;
- PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
- "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
- CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
- CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
+ /*
+ * Simulate a SRQ_LIMIT_REACHED HW notification if required.
+ */
+ if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed &&
+ srq->wq.in_use < srq->srq_limit)
+ c4iw_dispatch_srq_limit_reached_event(srq);
+
+ pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
+ CQE_QPID(&cqe),
+ CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
+ CQE_STATUS(&cqe), CQE_LEN(&cqe),
+ CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
+ (unsigned long long)cookie);
if (CQE_TYPE(&cqe) == 0) {
if (!CQE_STATUS(&cqe))
wc->byte_len = CQE_LEN(&cqe);
else
wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
- CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
+
+ switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_SEND:
+ wc->opcode = IB_WC_RECV;
+ break;
+ case FW_RI_SEND_WITH_INV:
+ case FW_RI_SEND_WITH_SE_INV:
+ wc->opcode = IB_WC_RECV;
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
+ break;
+ case FW_RI_WRITE_IMMEDIATE:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->ex.imm_data = CQE_IMM_DATA(&cqe);
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ default:
+ pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
+ CQE_OPCODE(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ goto out;
}
} else {
switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_WRITE_IMMEDIATE:
case FW_RI_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
@@ -744,19 +832,20 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
case FW_RI_SEND_WITH_SE:
wc->opcode = IB_WC_SEND;
break;
- case FW_RI_BIND_MW:
- wc->opcode = IB_WC_BIND_MW;
- break;
case FW_RI_LOCAL_INV:
wc->opcode = IB_WC_LOCAL_INV;
break;
case FW_RI_FAST_REGISTER:
- wc->opcode = IB_WC_FAST_REG_MR;
+ wc->opcode = IB_WC_REG_MR;
+
+ /* Invalidate the MR if the fastreg failed */
+ if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
+ c4iw_invalidate_mr(qhp->rhp,
+ CQE_WRID_FR_STAG(&cqe));
break;
default:
- printk(KERN_ERR MOD "Unexpected opcode %d "
- "in the CQE received for QPID=0x%0x\n",
+ pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
ret = -EINVAL;
goto out;
@@ -811,15 +900,49 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->status = IB_WC_WR_FLUSH_ERR;
break;
default:
- printk(KERN_ERR MOD
- "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
+ pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
wc->status = IB_WC_FATAL_ERR;
}
}
out:
- if (wq)
+ return ret;
+}
+
+/*
+ * Get one cq entry from c4iw and map it to openib.
+ *
+ * Returns:
+ * 0 cqe returned
+ * -ENODATA EMPTY;
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+{
+ struct c4iw_srq *srq = NULL;
+ struct c4iw_qp *qhp = NULL;
+ struct t4_cqe *rd_cqe;
+ int ret;
+
+ ret = t4_next_cqe(&chp->cq, &rd_cqe);
+
+ if (ret)
+ return ret;
+
+ qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
+ if (qhp) {
+ spin_lock(&qhp->lock);
+ srq = qhp->srq;
+ if (srq)
+ spin_lock(&srq->lock);
+ ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
spin_unlock(&qhp->lock);
+ if (srq)
+ spin_unlock(&srq->lock);
+ } else {
+ ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
+ }
return ret;
}
@@ -844,56 +967,78 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err;
}
-int c4iw_destroy_cq(struct ib_cq *ib_cq)
+void c4iw_cq_rem_ref(struct c4iw_cq *chp)
+{
+ if (refcount_dec_and_test(&chp->refcnt))
+ complete(&chp->cq_rel_comp);
+}
+
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext;
- PDBG("%s ib_cq %p\n", __func__, ib_cq);
+ pr_debug("ib_cq %p\n", ib_cq);
chp = to_c4iw_cq(ib_cq);
- remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
- atomic_dec(&chp->refcnt);
- wait_event(chp->wait, !atomic_read(&chp->refcnt));
+ xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
+ c4iw_cq_rem_ref(chp);
+ wait_for_completion(&chp->cq_rel_comp);
- ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
- : NULL;
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
+ ibucontext);
destroy_cq(&chp->rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
- kfree(chp);
+ ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
+ chp->destroy_skb, chp->wr_waitp);
+ c4iw_put_wr_wait(chp->wr_waitp);
return 0;
}
-struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_context,
- struct ib_udata *udata)
+int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
int entries = attr->cqe;
int vector = attr->comp_vector;
- struct c4iw_dev *rhp;
- struct c4iw_cq *chp;
+ struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device);
+ struct c4iw_cq *chp = to_c4iw_cq(ibcq);
+ struct c4iw_create_cq ucmd;
struct c4iw_create_cq_resp uresp;
- struct c4iw_ucontext *ucontext = NULL;
- int ret;
+ int ret, wr_len;
size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2;
+ struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct c4iw_ucontext, ibucontext);
- PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+ pr_debug("ib_dev %p entries %d\n", ibdev, entries);
if (attr->flags)
- return ERR_PTR(-EINVAL);
+ return -EOPNOTSUPP;
- rhp = to_c4iw_dev(ibdev);
+ if (entries < 1 || entries > ibdev->attrs.max_cqe)
+ return -EINVAL;
if (vector >= rhp->rdev.lldi.nciq)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
+
+ if (udata) {
+ if (udata->inlen < sizeof(ucmd))
+ ucontext->is_32b_cqe = 1;
+ }
- chp = kzalloc(sizeof(*chp), GFP_KERNEL);
- if (!chp)
- return ERR_PTR(-ENOMEM);
+ chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!chp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_chp;
+ }
+ c4iw_init_wr_wait(chp->wr_waitp);
- if (ib_context)
- ucontext = to_c4iw_ucontext(ib_context);
+ wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
+ chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!chp->destroy_skb) {
+ ret = -ENOMEM;
+ goto err_free_wr_wait;
+ }
/* account for the status page. */
entries++;
@@ -918,41 +1063,46 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (hwentries < 64)
hwentries = 64;
- memsize = hwentries * sizeof *chp->cq.queue;
+ memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
+ (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
/*
* memsize must be a multiple of the page size if its a user cq.
*/
- if (ucontext)
+ if (udata)
memsize = roundup(memsize, PAGE_SIZE);
+
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
chp->cq.vector = vector;
ret = create_cq(&rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ chp->wr_waitp);
if (ret)
- goto err1;
+ goto err_free_skb;
chp->rhp = rhp;
chp->cq.size--; /* status page */
chp->ibcq.cqe = entries - 2;
spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock);
- atomic_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
- ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+ refcount_set(&chp->refcnt, 1);
+ init_completion(&chp->cq_rel_comp);
+ ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret)
- goto err2;
+ goto err_destroy_cq;
if (ucontext) {
- mm = kmalloc(sizeof *mm, GFP_KERNEL);
+ ret = -ENOMEM;
+ mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm)
- goto err3;
- mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+ goto err_remove_handle;
+ mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
if (!mm2)
- goto err4;
+ goto err_free_mm;
+ memset(&uresp, 0, sizeof(uresp));
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
uresp.size = chp->cq.size;
@@ -962,57 +1112,86 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ucontext->key += PAGE_SIZE;
uresp.gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
+ /* communicate to the userspace that
+ * kernel driver supports 64B CQE
+ */
+ uresp.flags |= C4IW_64B_CQE;
+
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp,
- sizeof(uresp) - sizeof(uresp.reserved));
+ ucontext->is_32b_cqe ?
+ sizeof(uresp) - sizeof(uresp.flags) :
+ sizeof(uresp));
if (ret)
- goto err5;
+ goto err_free_mm2;
mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
+ mm->addr = 0;
+ mm->vaddr = chp->cq.queue;
+ mm->dma_addr = chp->cq.dma_addr;
mm->len = chp->cq.memsize;
+ insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
insert_mmap(ucontext, mm);
mm2->key = uresp.gts_key;
mm2->addr = chp->cq.bar2_pa;
mm2->len = PAGE_SIZE;
+ mm2->vaddr = NULL;
+ mm2->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, mm2, mm2->addr);
insert_mmap(ucontext, mm2);
}
- PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
- __func__, chp->cq.cqid, chp, chp->cq.size,
- chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
- return &chp->ibcq;
-err5:
+
+ pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n",
+ chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
+ &chp->cq.dma_addr);
+ return 0;
+err_free_mm2:
kfree(mm2);
-err4:
+err_free_mm:
kfree(mm);
-err3:
- remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
-err2:
+err_remove_handle:
+ xa_erase_irq(&rhp->cqs, chp->cq.cqid);
+err_destroy_cq:
destroy_cq(&chp->rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-err1:
- kfree(chp);
- return ERR_PTR(ret);
-}
-
-int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
- return -ENOSYS;
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ chp->destroy_skb, chp->wr_waitp);
+err_free_skb:
+ kfree_skb(chp->destroy_skb);
+err_free_wr_wait:
+ c4iw_put_wr_wait(chp->wr_waitp);
+err_free_chp:
+ return ret;
}
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
- int ret;
+ int ret = 0;
unsigned long flag;
chp = to_c4iw_cq(ibcq);
spin_lock_irqsave(&chp->lock, flag);
- ret = t4_arm_cq(&chp->cq,
- (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ t4_arm_cq(&chp->cq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ if (flags & IB_CQ_REPORT_MISSED_EVENTS)
+ ret = t4_cq_notempty(&chp->cq);
spin_unlock_irqrestore(&chp->lock, flag);
- if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- ret = 0;
return ret;
}
+
+void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx)
+{
+ struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ unsigned long flag;
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&rchp->lock, flag);
+ spin_lock(&qhp->lock);
+
+ /* create a SRQ RECV CQE for srqidx */
+ insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx);
+
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 1a297391b54c..d892f55febe2 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -44,7 +44,6 @@
MODULE_AUTHOR("Steve Wise");
MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
static int allow_db_fc_on_t5;
module_param(allow_db_fc_on_t5, int, 0644);
@@ -65,14 +64,9 @@ module_param(c4iw_wr_log_size_order, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log_size_order,
"Number of entries (log2) in the work request timing log.");
-struct uld_ctx {
- struct list_head entry;
- struct cxgb4_lld_info lldi;
- struct c4iw_dev *dev;
-};
-
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+static struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
@@ -87,25 +81,6 @@ struct c4iw_debugfs_data {
int pos;
};
-/* registered cxgb4 netlink callbacks */
-static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
- [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
- [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
- [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
- [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
- [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
- [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
- [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
-static int count_idrs(int id, void *p, void *data)
-{
- int *countp = data;
-
- *countp = *countp + 1;
- return 0;
-}
-
static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -125,19 +100,19 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
(wq->rdev->wr_log_size - 1);
le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
- getnstimeofday(&le.poll_host_ts);
+ le.poll_host_time = ktime_get();
le.valid = 1;
le.cqe_sge_ts = CQE_TS(cqe);
if (SQ_TYPE(cqe)) {
le.qid = wq->sq.qid;
le.opcode = CQE_OPCODE(cqe);
- le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+ le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time;
le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
le.wr_id = CQE_WRID_SQ_IDX(cqe);
} else {
le.qid = wq->rq.qid;
le.opcode = FW_RI_RECEIVE;
- le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+ le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
le.wr_id = CQE_WRID_MSN(cqe);
}
@@ -147,9 +122,9 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
static int wr_log_show(struct seq_file *seq, void *v)
{
struct c4iw_dev *dev = seq->private;
- struct timespec prev_ts = {0, 0};
+ ktime_t prev_time;
struct wr_log_entry *lep;
- int prev_ts_set = 0;
+ int prev_time_set = 0;
int idx, end;
#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
@@ -162,33 +137,29 @@ static int wr_log_show(struct seq_file *seq, void *v)
lep = &dev->rdev.wr_log[idx];
while (idx != end) {
if (lep->valid) {
- if (!prev_ts_set) {
- prev_ts_set = 1;
- prev_ts = lep->poll_host_ts;
+ if (!prev_time_set) {
+ prev_time_set = 1;
+ prev_time = lep->poll_host_time;
}
- seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
- "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+ seq_printf(seq, "%04u: nsec %llu qid %u opcode "
+ "%u %s 0x%x host_wr_delta nsec %llu "
"post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
"poll_sge_ts 0x%llx post_poll_delta_ns %llu "
"cqe_poll_delta_ns %llu\n",
idx,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ prev_time)),
lep->qid, lep->opcode,
lep->opcode == FW_RI_RECEIVE ?
"msn" : "wrid",
lep->wr_id,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ lep->post_host_time)),
lep->post_sge_ts, lep->cqe_sge_ts,
lep->poll_sge_ts,
ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
- prev_ts = lep->poll_host_ts;
+ prev_time = lep->poll_host_time;
}
idx++;
if (idx > (dev->rdev.wr_log_size - 1))
@@ -225,13 +196,57 @@ static const struct file_operations wr_log_debugfs_fops = {
.write = wr_log_clear,
};
-static int dump_qp(int id, void *p, void *data)
+static struct sockaddr_in zero_sin = {
+ .sin_family = AF_INET,
+};
+
+static struct sockaddr_in6 zero_sin6 = {
+ .sin6_family = AF_INET6,
+};
+
+static void set_ep_sin_addrs(struct c4iw_ep *ep,
+ struct sockaddr_in **lsin,
+ struct sockaddr_in **rsin,
+ struct sockaddr_in **m_lsin,
+ struct sockaddr_in **m_rsin)
+{
+ struct iw_cm_id *id = ep->com.cm_id;
+
+ *m_lsin = (struct sockaddr_in *)&ep->com.local_addr;
+ *m_rsin = (struct sockaddr_in *)&ep->com.remote_addr;
+ if (id) {
+ *lsin = (struct sockaddr_in *)&id->local_addr;
+ *rsin = (struct sockaddr_in *)&id->remote_addr;
+ } else {
+ *lsin = &zero_sin;
+ *rsin = &zero_sin;
+ }
+}
+
+static void set_ep_sin6_addrs(struct c4iw_ep *ep,
+ struct sockaddr_in6 **lsin6,
+ struct sockaddr_in6 **rsin6,
+ struct sockaddr_in6 **m_lsin6,
+ struct sockaddr_in6 **m_rsin6)
+{
+ struct iw_cm_id *id = ep->com.cm_id;
+
+ *m_lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ *m_rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
+ if (id) {
+ *lsin6 = (struct sockaddr_in6 *)&id->local_addr;
+ *rsin6 = (struct sockaddr_in6 *)&id->remote_addr;
+ } else {
+ *lsin6 = &zero_sin6;
+ *rsin6 = &zero_sin6;
+ }
+}
+
+static int dump_qp(unsigned long id, struct c4iw_qp *qp,
+ struct c4iw_debugfs_data *qpd)
{
- struct c4iw_qp *qp = p;
- struct c4iw_debugfs_data *qpd = data;
int space;
int cc;
-
if (id != qp->wq.sq.qid)
return 0;
@@ -240,40 +255,36 @@ static int dump_qp(int id, void *p, void *data)
return 1;
if (qp->ep) {
- if (qp->ep->com.local_addr.ss_family == AF_INET) {
- struct sockaddr_in *lsin = (struct sockaddr_in *)
- &qp->ep->com.local_addr;
- struct sockaddr_in *rsin = (struct sockaddr_in *)
- &qp->ep->com.remote_addr;
- struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &qp->ep->com.mapped_local_addr;
- struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
- &qp->ep->com.mapped_remote_addr;
+ struct c4iw_ep *ep = qp->ep;
+
+ if (ep->com.local_addr.ss_family == AF_INET) {
+ struct sockaddr_in *lsin;
+ struct sockaddr_in *rsin;
+ struct sockaddr_in *m_lsin;
+ struct sockaddr_in *m_rsin;
+ set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
cc = snprintf(qpd->buf + qpd->pos, space,
- "rc qp sq id %u rq id %u state %u "
+ "rc qp sq id %u %s id %u state %u "
"onchip %u ep tid %u state %u "
"%pI4:%u/%u->%pI4:%u/%u\n",
- qp->wq.sq.qid, qp->wq.rq.qid,
+ qp->wq.sq.qid, qp->srq ? "srq" : "rq",
+ qp->srq ? qp->srq->idx : qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
- qp->ep->hwtid, (int)qp->ep->com.state,
+ ep->hwtid, (int)ep->com.state,
&lsin->sin_addr, ntohs(lsin->sin_port),
- ntohs(mapped_lsin->sin_port),
+ ntohs(m_lsin->sin_port),
&rsin->sin_addr, ntohs(rsin->sin_port),
- ntohs(mapped_rsin->sin_port));
+ ntohs(m_rsin->sin_port));
} else {
- struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &qp->ep->com.local_addr;
- struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
- &qp->ep->com.remote_addr;
- struct sockaddr_in6 *mapped_lsin6 =
- (struct sockaddr_in6 *)
- &qp->ep->com.mapped_local_addr;
- struct sockaddr_in6 *mapped_rsin6 =
- (struct sockaddr_in6 *)
- &qp->ep->com.mapped_remote_addr;
+ struct sockaddr_in6 *lsin6;
+ struct sockaddr_in6 *rsin6;
+ struct sockaddr_in6 *m_lsin6;
+ struct sockaddr_in6 *m_rsin6;
+ set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6,
+ &m_rsin6);
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
"onchip %u ep tid %u state %u "
@@ -281,13 +292,13 @@ static int dump_qp(int id, void *p, void *data)
qp->wq.sq.qid, qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
- qp->ep->hwtid, (int)qp->ep->com.state,
+ ep->hwtid, (int)ep->com.state,
&lsin6->sin6_addr,
ntohs(lsin6->sin6_port),
- ntohs(mapped_lsin6->sin6_port),
+ ntohs(m_lsin6->sin6_port),
&rsin6->sin6_addr,
ntohs(rsin6->sin6_port),
- ntohs(mapped_rsin6->sin6_port));
+ ntohs(m_rsin6->sin6_port));
}
} else
cc = snprintf(qpd->buf + qpd->pos, space,
@@ -304,7 +315,7 @@ static int qp_release(struct inode *inode, struct file *file)
{
struct c4iw_debugfs_data *qpd = file->private_data;
if (!qpd) {
- printk(KERN_INFO "%s null qpd?\n", __func__);
+ pr_info("%s null qpd?\n", __func__);
return 0;
}
vfree(qpd->buf);
@@ -314,40 +325,40 @@ static int qp_release(struct inode *inode, struct file *file)
static int qp_open(struct inode *inode, struct file *file)
{
+ struct c4iw_qp *qp;
struct c4iw_debugfs_data *qpd;
- int ret = 0;
+ unsigned long index;
int count = 1;
- qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
- if (!qpd) {
- ret = -ENOMEM;
- goto out;
- }
+ qpd = kmalloc(sizeof(*qpd), GFP_KERNEL);
+ if (!qpd)
+ return -ENOMEM;
+
qpd->devp = inode->i_private;
qpd->pos = 0;
- spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
- spin_unlock_irq(&qpd->devp->lock);
+ /*
+ * No need to lock; we drop the lock to call vmalloc so it's racy
+ * anyway. Someone who cares should switch this over to seq_file
+ */
+ xa_for_each(&qpd->devp->qps, index, qp)
+ count++;
- qpd->bufsize = count * 128;
+ qpd->bufsize = count * 180;
qpd->buf = vmalloc(qpd->bufsize);
if (!qpd->buf) {
- ret = -ENOMEM;
- goto err1;
+ kfree(qpd);
+ return -ENOMEM;
}
- spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
- spin_unlock_irq(&qpd->devp->lock);
+ xa_lock_irq(&qpd->devp->qps);
+ xa_for_each(&qpd->devp->qps, index, qp)
+ dump_qp(index, qp, qpd);
+ xa_unlock_irq(&qpd->devp->qps);
qpd->buf[qpd->pos++] = 0;
file->private_data = qpd;
- goto out;
-err1:
- kfree(qpd);
-out:
- return ret;
+ return 0;
}
static const struct file_operations qp_debugfs_fops = {
@@ -358,9 +369,8 @@ static const struct file_operations qp_debugfs_fops = {
.llseek = default_llseek,
};
-static int dump_stag(int id, void *p, void *data)
+static int dump_stag(unsigned long id, struct c4iw_debugfs_data *stagd)
{
- struct c4iw_debugfs_data *stagd = data;
int space;
int cc;
struct fw_ri_tpte tpte;
@@ -398,7 +408,7 @@ static int stag_release(struct inode *inode, struct file *file)
{
struct c4iw_debugfs_data *stagd = file->private_data;
if (!stagd) {
- printk(KERN_INFO "%s null stagd?\n", __func__);
+ pr_info("%s null stagd?\n", __func__);
return 0;
}
vfree(stagd->buf);
@@ -409,10 +419,12 @@ static int stag_release(struct inode *inode, struct file *file)
static int stag_open(struct inode *inode, struct file *file)
{
struct c4iw_debugfs_data *stagd;
+ void *p;
+ unsigned long index;
int ret = 0;
int count = 1;
- stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
+ stagd = kmalloc(sizeof(*stagd), GFP_KERNEL);
if (!stagd) {
ret = -ENOMEM;
goto out;
@@ -420,9 +432,8 @@ static int stag_open(struct inode *inode, struct file *file)
stagd->devp = inode->i_private;
stagd->pos = 0;
- spin_lock_irq(&stagd->devp->lock);
- idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
- spin_unlock_irq(&stagd->devp->lock);
+ xa_for_each(&stagd->devp->mrs, index, p)
+ count++;
stagd->bufsize = count * 256;
stagd->buf = vmalloc(stagd->bufsize);
@@ -431,9 +442,10 @@ static int stag_open(struct inode *inode, struct file *file)
goto err1;
}
- spin_lock_irq(&stagd->devp->lock);
- idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
- spin_unlock_irq(&stagd->devp->lock);
+ xa_lock_irq(&stagd->devp->mrs);
+ xa_for_each(&stagd->devp->mrs, index, p)
+ dump_stag(index, stagd);
+ xa_unlock_irq(&stagd->devp->mrs);
stagd->buf[stagd->pos++] = 0;
file->private_data = stagd;
@@ -466,6 +478,9 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
+ seq_printf(seq, " SRQS: %10llu %10llu %10llu %10llu\n",
+ dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur,
+ dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail);
seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
@@ -516,6 +531,8 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
dev->rdev.stats.pbl.fail = 0;
dev->rdev.stats.rqt.max = 0;
dev->rdev.stats.rqt.fail = 0;
+ dev->rdev.stats.rqt.max = 0;
+ dev->rdev.stats.rqt.fail = 0;
dev->rdev.stats.ocqp.max = 0;
dev->rdev.stats.ocqp.fail = 0;
dev->rdev.stats.db_full = 0;
@@ -538,10 +555,8 @@ static const struct file_operations stats_debugfs_fops = {
.write = stats_clear,
};
-static int dump_ep(int id, void *p, void *data)
+static int dump_ep(struct c4iw_ep *ep, struct c4iw_debugfs_data *epd)
{
- struct c4iw_ep *ep = p;
- struct c4iw_debugfs_data *epd = data;
int space;
int cc;
@@ -550,15 +565,12 @@ static int dump_ep(int id, void *p, void *data)
return 1;
if (ep->com.local_addr.ss_family == AF_INET) {
- struct sockaddr_in *lsin = (struct sockaddr_in *)
- &ep->com.local_addr;
- struct sockaddr_in *rsin = (struct sockaddr_in *)
- &ep->com.remote_addr;
- struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
- struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
- &ep->com.mapped_remote_addr;
+ struct sockaddr_in *lsin;
+ struct sockaddr_in *rsin;
+ struct sockaddr_in *m_lsin;
+ struct sockaddr_in *m_rsin;
+ set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
@@ -570,19 +582,16 @@ static int dump_ep(int id, void *p, void *data)
ep->stats.connect_neg_adv,
ep->stats.abort_neg_adv,
&lsin->sin_addr, ntohs(lsin->sin_port),
- ntohs(mapped_lsin->sin_port),
+ ntohs(m_lsin->sin_port),
&rsin->sin_addr, ntohs(rsin->sin_port),
- ntohs(mapped_rsin->sin_port));
+ ntohs(m_rsin->sin_port));
} else {
- struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &ep->com.local_addr;
- struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
- &ep->com.remote_addr;
- struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
- struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_remote_addr;
+ struct sockaddr_in6 *lsin6;
+ struct sockaddr_in6 *rsin6;
+ struct sockaddr_in6 *m_lsin6;
+ struct sockaddr_in6 *m_rsin6;
+ set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6);
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
@@ -594,19 +603,18 @@ static int dump_ep(int id, void *p, void *data)
ep->stats.connect_neg_adv,
ep->stats.abort_neg_adv,
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
- ntohs(mapped_lsin6->sin6_port),
+ ntohs(m_lsin6->sin6_port),
&rsin6->sin6_addr, ntohs(rsin6->sin6_port),
- ntohs(mapped_rsin6->sin6_port));
+ ntohs(m_rsin6->sin6_port));
}
if (cc < space)
epd->pos += cc;
return 0;
}
-static int dump_listen_ep(int id, void *p, void *data)
+static
+int dump_listen_ep(struct c4iw_listen_ep *ep, struct c4iw_debugfs_data *epd)
{
- struct c4iw_listen_ep *ep = p;
- struct c4iw_debugfs_data *epd = data;
int space;
int cc;
@@ -616,9 +624,9 @@ static int dump_listen_ep(int id, void *p, void *data)
if (ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
- &ep->com.local_addr;
- struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->local_addr;
+ struct sockaddr_in *m_lsin = (struct sockaddr_in *)
+ &ep->com.cm_id->m_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -626,12 +634,12 @@ static int dump_listen_ep(int id, void *p, void *data)
ep, ep->com.cm_id, (int)ep->com.state,
ep->com.flags, ep->stid, ep->backlog,
&lsin->sin_addr, ntohs(lsin->sin_port),
- ntohs(mapped_lsin->sin_port));
+ ntohs(m_lsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &ep->com.local_addr;
- struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->local_addr;
+ struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *)
+ &ep->com.cm_id->m_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -639,7 +647,7 @@ static int dump_listen_ep(int id, void *p, void *data)
ep, ep->com.cm_id, (int)ep->com.state,
ep->com.flags, ep->stid, ep->backlog,
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
- ntohs(mapped_lsin6->sin6_port));
+ ntohs(m_lsin6->sin6_port));
}
if (cc < space)
epd->pos += cc;
@@ -660,6 +668,9 @@ static int ep_release(struct inode *inode, struct file *file)
static int ep_open(struct inode *inode, struct file *file)
{
+ struct c4iw_ep *ep;
+ struct c4iw_listen_ep *lep;
+ unsigned long index;
struct c4iw_debugfs_data *epd;
int ret = 0;
int count = 1;
@@ -672,11 +683,12 @@ static int ep_open(struct inode *inode, struct file *file)
epd->devp = inode->i_private;
epd->pos = 0;
- spin_lock_irq(&epd->devp->lock);
- idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
- idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
- idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
- spin_unlock_irq(&epd->devp->lock);
+ xa_for_each(&epd->devp->hwtids, index, ep)
+ count++;
+ xa_for_each(&epd->devp->atids, index, ep)
+ count++;
+ xa_for_each(&epd->devp->stids, index, lep)
+ count++;
epd->bufsize = count * 240;
epd->buf = vmalloc(epd->bufsize);
@@ -685,11 +697,18 @@ static int ep_open(struct inode *inode, struct file *file)
goto err1;
}
- spin_lock_irq(&epd->devp->lock);
- idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
- idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
- idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
- spin_unlock_irq(&epd->devp->lock);
+ xa_lock_irq(&epd->devp->hwtids);
+ xa_for_each(&epd->devp->hwtids, index, ep)
+ dump_ep(ep, epd);
+ xa_unlock_irq(&epd->devp->hwtids);
+ xa_lock_irq(&epd->devp->atids);
+ xa_for_each(&epd->devp->atids, index, ep)
+ dump_ep(ep, epd);
+ xa_unlock_irq(&epd->devp->atids);
+ xa_lock_irq(&epd->devp->stids);
+ xa_for_each(&epd->devp->stids, index, lep)
+ dump_listen_ep(lep, epd);
+ xa_unlock_irq(&epd->devp->stids);
file->private_data = epd;
goto out;
@@ -706,11 +725,8 @@ static const struct file_operations ep_debugfs_fops = {
.read = debugfs_read,
};
-static int setup_debugfs(struct c4iw_dev *devp)
+static void setup_debugfs(struct c4iw_dev *devp)
{
- if (!devp->debugfs_root)
- return -1;
-
debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
(void *)devp, &qp_debugfs_fops, 4096);
@@ -726,7 +742,6 @@ static int setup_debugfs(struct c4iw_dev *devp)
if (c4iw_wr_log)
debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
(void *)devp, &wr_log_debugfs_fops, 4096);
- return 0;
}
void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
@@ -749,7 +764,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
kfree(entry);
}
- list_for_each_safe(pos, nxt, &uctx->qpids) {
+ list_for_each_safe(pos, nxt, &uctx->cqids) {
entry = list_entry(pos, struct c4iw_qid_list, entry);
list_del_init(&entry->entry);
kfree(entry);
@@ -769,6 +784,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
static int c4iw_rdev_open(struct c4iw_rdev *rdev)
{
int err;
+ unsigned int factor;
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
@@ -778,125 +794,153 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
* cqid and qpid range must match for now.
*/
if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
- pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
+ pr_err("%s: unsupported udb/ucq densities %u/%u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
rdev->lldi.ucq_density);
- err = -EINVAL;
- goto err1;
+ return -EINVAL;
}
if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
- pr_err(MOD "%s: unsupported qp and cq id ranges "
- "qp start %u size %u cq start %u size %u\n",
+ pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
rdev->lldi.vr->cq.size);
- err = -EINVAL;
- goto err1;
+ return -EINVAL;
}
- rdev->qpmask = rdev->lldi.udb_density - 1;
- rdev->cqmask = rdev->lldi.ucq_density - 1;
- PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
- "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
- "qp qid start %u size %u cq qid start %u size %u\n",
- __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
- rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
- rdev->lldi.vr->pbl.start,
- rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
- rdev->lldi.vr->rq.size,
- rdev->lldi.vr->qp.start,
- rdev->lldi.vr->qp.size,
- rdev->lldi.vr->cq.start,
- rdev->lldi.vr->cq.size);
- PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p "
- "qpmask 0x%x cqmask 0x%x\n",
- (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (void *)pci_resource_start(rdev->lldi.pdev, 2),
- rdev->lldi.db_reg, rdev->lldi.gts_reg,
- rdev->qpmask, rdev->cqmask);
-
- if (c4iw_num_stags(rdev) == 0) {
- err = -EINVAL;
- goto err1;
+ /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
+ if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
+ pr_err("%s: unsupported sge host page size %u\n",
+ pci_name(rdev->lldi.pdev),
+ rdev->lldi.sge_host_page_size);
+ return -EINVAL;
}
+ factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
+ rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
+ rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
+
+ pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
+ pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+ rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
+ rdev->lldi.vr->pbl.start,
+ rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
+ rdev->lldi.vr->rq.size,
+ rdev->lldi.vr->qp.start,
+ rdev->lldi.vr->qp.size,
+ rdev->lldi.vr->cq.start,
+ rdev->lldi.vr->cq.size,
+ rdev->lldi.vr->srq.size);
+ pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
+ &rdev->lldi.pdev->resource[2],
+ rdev->lldi.db_reg, rdev->lldi.gts_reg,
+ rdev->qpmask, rdev->cqmask);
+
+ if (c4iw_num_stags(rdev) == 0)
+ return -EINVAL;
+
rdev->stats.pd.total = T4_MAX_NUM_PD;
rdev->stats.stag.total = rdev->lldi.vr->stag.size;
rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
+ rdev->stats.srqt.total = rdev->lldi.vr->srq.size;
rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
rdev->stats.qid.total = rdev->lldi.vr->qp.size;
- err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ err = c4iw_init_resource(rdev, c4iw_num_stags(rdev),
+ T4_MAX_NUM_PD, rdev->lldi.vr->srq.size);
if (err) {
- printk(KERN_ERR MOD "error %d initializing resources\n", err);
- goto err1;
+ pr_err("error %d initializing resources\n", err);
+ return err;
}
err = c4iw_pblpool_create(rdev);
if (err) {
- printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
- goto err2;
+ pr_err("error %d initializing pbl pool\n", err);
+ goto destroy_resource;
}
err = c4iw_rqtpool_create(rdev);
if (err) {
- printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
- goto err3;
+ pr_err("error %d initializing rqt pool\n", err);
+ goto destroy_pblpool;
}
err = c4iw_ocqp_pool_create(rdev);
if (err) {
- printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
- goto err4;
+ pr_err("error %d initializing ocqp pool\n", err);
+ goto destroy_rqtpool;
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
if (!rdev->status_page) {
- pr_err(MOD "error allocating status page\n");
- goto err4;
+ err = -ENOMEM;
+ goto destroy_ocqp_pool;
}
+ rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
+ rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
+ rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
+ rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
+ rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support;
if (c4iw_wr_log) {
- rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
- sizeof(*rdev->wr_log), GFP_KERNEL);
+ rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order,
+ sizeof(*rdev->wr_log),
+ GFP_KERNEL);
if (rdev->wr_log) {
rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
atomic_set(&rdev->wr_log_idx, 0);
- } else {
- pr_err(MOD "error allocating wr_log. Logging disabled\n");
}
}
+ rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+ if (!rdev->free_workq) {
+ err = -ENOMEM;
+ goto err_free_status_page_and_wr_log;
+ }
+
rdev->status_page->db_off = 0;
+ init_completion(&rdev->rqt_compl);
+ init_completion(&rdev->pbl_compl);
+ kref_init(&rdev->rqt_kref);
+ kref_init(&rdev->pbl_kref);
+
return 0;
-err4:
+err_free_status_page_and_wr_log:
+ kfree(rdev->wr_log);
+ free_page((unsigned long)rdev->status_page);
+destroy_ocqp_pool:
+ c4iw_ocqp_pool_destroy(rdev);
+destroy_rqtpool:
c4iw_rqtpool_destroy(rdev);
-err3:
+destroy_pblpool:
c4iw_pblpool_destroy(rdev);
-err2:
+destroy_resource:
c4iw_destroy_resource(&rdev->resource);
-err1:
return err;
}
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
kfree(rdev->wr_log);
+ c4iw_release_dev_ucontext(rdev, &rdev->uctx);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
+ wait_for_completion(&rdev->pbl_compl);
+ wait_for_completion(&rdev->rqt_compl);
+ c4iw_ocqp_pool_destroy(rdev);
+ destroy_workqueue(rdev->free_workq);
c4iw_destroy_resource(&rdev->resource);
}
-static void c4iw_dealloc(struct uld_ctx *ctx)
+void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
- idr_destroy(&ctx->dev->cqidr);
- idr_destroy(&ctx->dev->qpidr);
- idr_destroy(&ctx->dev->mmidr);
- idr_destroy(&ctx->dev->hwtid_idr);
- idr_destroy(&ctx->dev->stid_idr);
- idr_destroy(&ctx->dev->atid_idr);
+ WARN_ON(!xa_empty(&ctx->dev->cqs));
+ WARN_ON(!xa_empty(&ctx->dev->qps));
+ WARN_ON(!xa_empty(&ctx->dev->mrs));
+ wait_event(ctx->dev->wait, xa_empty(&ctx->dev->hwtids));
+ WARN_ON(!xa_empty(&ctx->dev->stids));
+ WARN_ON(!xa_empty(&ctx->dev->atids));
if (ctx->dev->rdev.bar2_kva)
iounmap(ctx->dev->rdev.bar2_kva);
if (ctx->dev->rdev.oc_mw_kva)
@@ -907,7 +951,8 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
- PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
+ pr_debug("c4iw_dev %p\n", ctx->dev);
+ debugfs_remove_recursive(ctx->dev->debugfs_root);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
@@ -925,28 +970,28 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
int ret;
if (!rdma_supported(infop)) {
- printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
- pci_name(infop->pdev));
+ pr_info("%s: RDMA not supported on this device\n",
+ pci_name(infop->pdev));
return ERR_PTR(-ENOSYS);
}
if (!ocqp_supported(infop))
- pr_info("%s: On-Chip Queues not supported on this device.\n",
+ pr_info("%s: On-Chip Queues not supported on this device\n",
pci_name(infop->pdev));
- devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
+ devp = ib_alloc_device(c4iw_dev, ibdev);
if (!devp) {
- printk(KERN_ERR MOD "Cannot allocate ib device\n");
+ pr_err("Cannot allocate ib device\n");
return ERR_PTR(-ENOMEM);
}
devp->rdev.lldi = *infop;
/* init various hw-queue params based on lld info */
- PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
- __func__, devp->rdev.lldi.sge_ingpadboundary,
- devp->rdev.lldi.sge_egrstatuspagesize);
+ pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+ devp->rdev.lldi.sge_ingpadboundary,
+ devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries =
- devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+ devp->rdev.lldi.sge_egrstatuspagesize / 64;
devp->rdev.hw_queue.t4_max_eq_size = 65520;
devp->rdev.hw_queue.t4_max_iq_size = 65520;
devp->rdev.hw_queue.t4_max_rq_size = 8192 -
@@ -962,16 +1007,16 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi.sge_egrstatuspagesize;
/*
- * For T5 devices, we map all of BAR2 with WC.
+ * For T5/T6 devices, we map all of BAR2 with WC.
* For T4 devices with onchip qp mem, we map only that part
* of BAR2 with WC.
*/
devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
- if (is_t5(devp->rdev.lldi.adapter_type)) {
+ if (!is_t4(devp->rdev.lldi.adapter_type)) {
devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
pci_resource_len(devp->rdev.lldi.pdev, 2));
if (!devp->rdev.bar2_kva) {
- pr_err(MOD "Unable to ioremap BAR2\n");
+ pr_err("Unable to ioremap BAR2\n");
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL);
}
@@ -983,34 +1028,33 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
devp->rdev.lldi.vr->ocq.size);
if (!devp->rdev.oc_mw_kva) {
- pr_err(MOD "Unable to ioremap onchip mem\n");
+ pr_err("Unable to ioremap onchip mem\n");
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL);
}
}
- PDBG(KERN_INFO MOD "ocq memory: "
- "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
- devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
- devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
+ pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
+ devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
+ devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
ret = c4iw_rdev_open(&devp->rdev);
if (ret) {
- printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
+ pr_err("Unable to open CXIO rdev err %d\n", ret);
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(ret);
}
- idr_init(&devp->cqidr);
- idr_init(&devp->qpidr);
- idr_init(&devp->mmidr);
- idr_init(&devp->hwtid_idr);
- idr_init(&devp->stid_idr);
- idr_init(&devp->atid_idr);
- spin_lock_init(&devp->lock);
+ xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->mrs, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->hwtids, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->atids, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&devp->stids, XA_FLAGS_LOCK_IRQ);
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
INIT_LIST_HEAD(&devp->db_fc_list);
+ init_waitqueue_head(&devp->wait);
devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
if (c4iw_debugfs_root) {
@@ -1034,24 +1078,24 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
DRV_VERSION);
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
ctx = ERR_PTR(-ENOMEM);
goto out;
}
ctx->lldi = *infop;
- PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
- __func__, pci_name(ctx->lldi.pdev),
- ctx->lldi.nchan, ctx->lldi.nrxq,
- ctx->lldi.ntxq, ctx->lldi.nports);
+ pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+ pci_name(ctx->lldi.pdev),
+ ctx->lldi.nchan, ctx->lldi.nrxq,
+ ctx->lldi.ntxq, ctx->lldi.nports);
mutex_lock(&dev_mutex);
list_add_tail(&ctx->entry, &uld_ctx_list);
mutex_unlock(&dev_mutex);
for (i = 0; i < ctx->lldi.nrxq; i++)
- PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
+ pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
out:
return ctx;
}
@@ -1069,13 +1113,15 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* The math here assumes sizeof cpl_pass_accept_req >= sizeof
* cpl_rx_pkt.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
- sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header)) - pktshift,
+ GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
- __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
- sizeof(struct rss_header) - pktshift);
+ __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift);
/*
* This skb will contain:
@@ -1108,8 +1154,7 @@ static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
goto out;
if (c4iw_handlers[opcode] == NULL) {
- pr_info("%s no handler opcode 0x%x...\n", __func__,
- opcode);
+ pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
kfree_skb(skb);
goto out;
}
@@ -1146,13 +1191,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (recv_rx_pkt(dev, gl, rsp))
return 0;
- pr_info("%s: unexpected FL contents at %p, " \
- "RSS %#llx, FL %#llx, len %u\n",
- pci_name(ctx->lldi.pdev), gl->va,
- (unsigned long long)be64_to_cpu(*rsp),
- (unsigned long long)be64_to_cpu(
- *(__force __be64 *)gl->va),
- gl->tot_len);
+ pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
+ pci_name(ctx->lldi.pdev), gl->va,
+ be64_to_cpu(*rsp),
+ be64_to_cpu(*(__force __be64 *)gl->va),
+ gl->tot_len);
return 0;
} else {
@@ -1165,8 +1208,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (c4iw_handlers[opcode]) {
c4iw_handlers[opcode](dev, skb);
} else {
- pr_info("%s no handler opcode 0x%x...\n", __func__,
- opcode);
+ pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
kfree_skb(skb);
}
@@ -1179,45 +1221,35 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct uld_ctx *ctx = handle;
- PDBG("%s new_state %u\n", __func__, new_state);
+ pr_debug("new_state %u\n", new_state);
switch (new_state) {
case CXGB4_STATE_UP:
- printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
+ pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) {
- int ret;
-
ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) {
- printk(KERN_ERR MOD
- "%s: initialization failed: %ld\n",
- pci_name(ctx->lldi.pdev),
- PTR_ERR(ctx->dev));
+ pr_err("%s: initialization failed: %pe\n",
+ pci_name(ctx->lldi.pdev), ctx->dev);
ctx->dev = NULL;
break;
}
- ret = c4iw_register_device(ctx->dev);
- if (ret) {
- printk(KERN_ERR MOD
- "%s: RDMA registration failed: %d\n",
- pci_name(ctx->lldi.pdev), ret);
- c4iw_dealloc(ctx);
- }
+
+ INIT_WORK(&ctx->reg_work, c4iw_register_device);
+ queue_work(reg_workq, &ctx->reg_work);
}
break;
case CXGB4_STATE_DOWN:
- printk(KERN_INFO MOD "%s: Down\n",
- pci_name(ctx->lldi.pdev));
+ pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
if (ctx->dev)
c4iw_remove(ctx);
break;
+ case CXGB4_STATE_FATAL_ERROR:
case CXGB4_STATE_START_RECOVERY:
- printk(KERN_INFO MOD "%s: Fatal Error\n",
- pci_name(ctx->lldi.pdev));
+ pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
if (ctx->dev) {
- struct ib_event event;
+ struct ib_event event = {};
ctx->dev->rdev.flags |= T4_FATAL_ERROR;
- memset(&event, 0, sizeof event);
event.event = IB_EVENT_DEVICE_FATAL;
event.device = &ctx->dev->ibdev;
ib_dispatch_event(&event);
@@ -1225,8 +1257,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
}
break;
case CXGB4_STATE_DETACH:
- printk(KERN_INFO MOD "%s: Detach\n",
- pci_name(ctx->lldi.pdev));
+ pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
if (ctx->dev)
c4iw_remove(ctx);
break;
@@ -1234,44 +1265,29 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
return 0;
}
-static int disable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_disable_wq_db(&qp->wq);
- return 0;
-}
-
static void stop_queues(struct uld_ctx *ctx)
{
- unsigned long flags;
+ struct c4iw_qp *qp;
+ unsigned long index, flags;
- spin_lock_irqsave(&ctx->dev->lock, flags);
+ xa_lock_irqsave(&ctx->dev->qps, flags);
ctx->dev->rdev.stats.db_state_transitions++;
ctx->dev->db_state = STOPPED;
- if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
- idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
- else
+ if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
+ xa_for_each(&ctx->dev->qps, index, qp)
+ t4_disable_wq_db(&qp->wq);
+ } else {
ctx->dev->rdev.status_page->db_off = 1;
- spin_unlock_irqrestore(&ctx->dev->lock, flags);
-}
-
-static int enable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_enable_wq_db(&qp->wq);
- return 0;
+ }
+ xa_unlock_irqrestore(&ctx->dev->qps, flags);
}
static void resume_rc_qp(struct c4iw_qp *qp)
{
spin_lock(&qp->lock);
- t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc,
- is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
+ t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
qp->wq.sq.wq_pidx_inc = 0;
- t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc,
- is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
+ t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
qp->wq.rq.wq_pidx_inc = 0;
spin_unlock(&qp->lock);
}
@@ -1293,18 +1309,21 @@ static void resume_a_chunk(struct uld_ctx *ctx)
static void resume_queues(struct uld_ctx *ctx)
{
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
if (ctx->dev->db_state != STOPPED)
goto out;
ctx->dev->db_state = FLOW_CONTROL;
while (1) {
if (list_empty(&ctx->dev->db_fc_list)) {
+ struct c4iw_qp *qp;
+ unsigned long index;
+
WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
ctx->dev->db_state = NORMAL;
ctx->dev->rdev.stats.db_state_transitions++;
if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
- idr_for_each(&ctx->dev->qpidr, enable_qp_db,
- NULL);
+ xa_for_each(&ctx->dev->qps, index, qp)
+ t4_enable_wq_db(&qp->wq);
} else {
ctx->dev->rdev.status_page->db_off = 0;
}
@@ -1316,12 +1335,12 @@ static void resume_queues(struct uld_ctx *ctx)
resume_a_chunk(ctx);
}
if (!list_empty(&ctx->dev->db_fc_list)) {
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
if (DB_FC_RESUME_DELAY) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(DB_FC_RESUME_DELAY);
}
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
if (ctx->dev->db_state != FLOW_CONTROL)
break;
}
@@ -1330,7 +1349,7 @@ static void resume_queues(struct uld_ctx *ctx)
out:
if (ctx->dev->db_state != NORMAL)
ctx->dev->rdev.stats.db_fc_interruptions++;
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
}
struct qp_list {
@@ -1338,23 +1357,6 @@ struct qp_list {
struct c4iw_qp **qps;
};
-static int add_and_ref_qp(int id, void *p, void *data)
-{
- struct qp_list *qp_listp = data;
- struct c4iw_qp *qp = p;
-
- c4iw_qp_add_ref(&qp->ibqp);
- qp_listp->qps[qp_listp->idx++] = qp;
- return 0;
-}
-
-static int count_qps(int id, void *p, void *data)
-{
- unsigned *countp = data;
- (*countp)++;
- return 0;
-}
-
static void deref_qps(struct qp_list *qp_list)
{
int idx;
@@ -1371,19 +1373,17 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
for (idx = 0; idx < qp_list->idx; idx++) {
struct c4iw_qp *qp = qp_list->qps[idx];
- spin_lock_irq(&qp->rhp->lock);
+ xa_lock_irq(&qp->rhp->qps);
spin_lock(&qp->lock);
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
qp->wq.sq.qid,
t4_sq_host_wq_pidx(&qp->wq),
t4_sq_wq_size(&qp->wq));
if (ret) {
- pr_err(MOD "%s: Fatal error - "
- "DB overflow recovery failed - "
- "error syncing SQ qid %u\n",
+ pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
spin_unlock(&qp->lock);
- spin_unlock_irq(&qp->rhp->lock);
+ xa_unlock_irq(&qp->rhp->qps);
return;
}
qp->wq.sq.wq_pidx_inc = 0;
@@ -1394,17 +1394,15 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_rq_wq_size(&qp->wq));
if (ret) {
- pr_err(MOD "%s: Fatal error - "
- "DB overflow recovery failed - "
- "error syncing RQ qid %u\n",
+ pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
spin_unlock(&qp->lock);
- spin_unlock_irq(&qp->rhp->lock);
+ xa_unlock_irq(&qp->rhp->qps);
return;
}
qp->wq.rq.wq_pidx_inc = 0;
spin_unlock(&qp->lock);
- spin_unlock_irq(&qp->rhp->lock);
+ xa_unlock_irq(&qp->rhp->qps);
/* Wait for the dbfifo to drain */
while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
@@ -1416,6 +1414,8 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
static void recover_queues(struct uld_ctx *ctx)
{
+ struct c4iw_qp *qp;
+ unsigned long index;
int count = 0;
struct qp_list qp_list;
int ret;
@@ -1427,30 +1427,32 @@ static void recover_queues(struct uld_ctx *ctx)
/* flush the SGE contexts */
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
if (ret) {
- printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
+ pr_err("%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev));
return;
}
/* Count active queues so we can build a list of queues to recover */
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
WARN_ON(ctx->dev->db_state != STOPPED);
ctx->dev->db_state = RECOVERY;
- idr_for_each(&ctx->dev->qpidr, count_qps, &count);
+ xa_for_each(&ctx->dev->qps, index, qp)
+ count++;
- qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
+ qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC);
if (!qp_list.qps) {
- printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
- pci_name(ctx->lldi.pdev));
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
return;
}
qp_list.idx = 0;
/* add and ref each qp so it doesn't get freed */
- idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
+ xa_for_each(&ctx->dev->qps, index, qp) {
+ c4iw_qp_add_ref(&qp->ibqp);
+ qp_list.qps[qp_list.idx++] = qp;
+ }
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
/* now traverse the list in a safe context to recover the db state*/
recover_lost_dbs(ctx, &qp_list);
@@ -1459,10 +1461,10 @@ static void recover_queues(struct uld_ctx *ctx)
deref_qps(&qp_list);
kfree(qp_list.qps);
- spin_lock_irq(&ctx->dev->lock);
+ xa_lock_irq(&ctx->dev->qps);
WARN_ON(ctx->dev->db_state != RECOVERY);
ctx->dev->db_state = STOPPED;
- spin_unlock_irq(&ctx->dev->lock);
+ xa_unlock_irq(&ctx->dev->qps);
}
static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
@@ -1487,8 +1489,8 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
mutex_unlock(&ctx->dev->rdev.stats.lock);
break;
default:
- printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
- pci_name(ctx->lldi.pdev), control);
+ pr_warn("%s: unknown control cmd %u\n",
+ pci_name(ctx->lldi.pdev), control);
break;
}
return 0;
@@ -1496,12 +1498,38 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static struct cxgb4_uld_info c4iw_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
+ .rxq_size = 511,
+ .ciq = true,
+ .lro = false,
.add = c4iw_uld_add,
.rx_handler = c4iw_uld_rx_handler,
.state_change = c4iw_uld_state_change,
.control = c4iw_uld_control,
};
+void _c4iw_free_wr_wait(struct kref *kref)
+{
+ struct c4iw_wr_wait *wr_waitp;
+
+ wr_waitp = container_of(kref, struct c4iw_wr_wait, kref);
+ pr_debug("Free wr_wait %p\n", wr_waitp);
+ kfree(wr_waitp);
+}
+
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
+{
+ struct c4iw_wr_wait *wr_waitp;
+
+ wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
+ if (wr_waitp) {
+ kref_init(&wr_waitp->kref);
+ pr_debug("wr_wait %p\n", wr_waitp);
+ }
+ return wr_waitp;
+}
+
static int __init c4iw_init_module(void)
{
int err;
@@ -1511,22 +1539,11 @@ static int __init c4iw_init_module(void)
return err;
c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
- if (!c4iw_debugfs_root)
- printk(KERN_WARNING MOD
- "could not create debugfs entry, continuing\n");
- if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
- c4iw_nl_cb_table))
- pr_err("%s[%u]: Failed to add netlink callback\n"
- , __func__, __LINE__);
-
- err = iwpm_init(RDMA_NL_C4IW);
- if (err) {
- pr_err("port mapper initialization failed with %d\n", err);
- ibnl_remove_client(RDMA_NL_C4IW);
- c4iw_cm_term();
- debugfs_remove_recursive(c4iw_debugfs_root);
- return err;
+ reg_workq = create_singlethread_workqueue("Register_iWARP_device");
+ if (!reg_workq) {
+ pr_err("Failed creating workqueue to register iwarp device\n");
+ return -ENOMEM;
}
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
@@ -1545,9 +1562,8 @@ static void __exit c4iw_exit_module(void)
kfree(ctx);
}
mutex_unlock(&dev_mutex);
+ destroy_workqueue(reg_workq);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
- iwpm_exit(RDMA_NL_C4IW);
- ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
}
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index bdfac2ccb704..34211a533d5c 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -47,17 +47,16 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag)
"%s cxgb4_read_tpte err %d\n", __func__, ret);
return;
}
- PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
- "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
- stag & 0xffffff00,
- FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
- FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
- ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
- ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
+ pr_debug("stag idx 0x%x valid %d key 0x%x state %d pdid %d perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+ stag & 0xffffff00,
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
+ ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+ ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
}
static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -71,9 +70,10 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
- PDBG("%016llx %016llx %016llx %016llx\n",
- be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
- be64_to_cpu(p[3]));
+ pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
+ be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
+ be64_to_cpu(p[6]), be64_to_cpu(p[7]));
/*
* Ingress WRITE and READ_RESP errors provide
@@ -110,9 +110,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&chp->cq)) {
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ }
}
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -121,16 +123,15 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
struct c4iw_qp *qhp;
u32 cqid;
- spin_lock_irq(&dev->lock);
- qhp = get_qhp(dev, CQE_QPID(err_cqe));
+ xa_lock_irq(&dev->qps);
+ qhp = xa_load(&dev->qps, CQE_QPID(err_cqe));
if (!qhp) {
- printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
- "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
goto out;
}
@@ -140,19 +141,18 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
cqid = qhp->attr.rcq;
chp = get_chp(dev, cqid);
if (!chp) {
- printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
- "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
cqid, CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
goto out;
}
c4iw_qp_add_ref(&qhp->ibqp);
- atomic_inc(&chp->refcnt);
- spin_unlock_irq(&dev->lock);
+ refcount_inc(&chp->refcnt);
+ xa_unlock_irq(&dev->qps);
/* Bad incoming write */
if (RQ_TYPE(err_cqe) &&
@@ -165,7 +165,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
/* Completion Events */
case T4_ERR_SUCCESS:
- printk(KERN_ERR MOD "AE with status 0!\n");
+ pr_err("AE with status 0!\n");
break;
case T4_ERR_STAG:
@@ -207,14 +207,13 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break;
default:
- printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
+ pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
CQE_STATUS(err_cqe), qhp->wq.sq.qid);
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
break;
}
done:
- if (atomic_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
+ c4iw_cq_rem_ref(chp);
c4iw_qp_rem_ref(&qhp->ibqp);
out:
return;
@@ -225,20 +224,19 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
struct c4iw_cq *chp;
unsigned long flag;
- spin_lock_irqsave(&dev->lock, flag);
- chp = get_chp(dev, qid);
+ xa_lock_irqsave(&dev->cqs, flag);
+ chp = xa_load(&dev->cqs, qid);
if (chp) {
- atomic_inc(&chp->refcnt);
- spin_unlock_irqrestore(&dev->lock, flag);
+ refcount_inc(&chp->refcnt);
+ xa_unlock_irqrestore(&dev->cqs, flag);
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- if (atomic_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
+ c4iw_cq_rem_ref(chp);
} else {
- PDBG("%s unknown cqid 0x%x\n", __func__, qid);
- spin_unlock_irqrestore(&dev->lock, flag);
+ pr_debug("unknown cqid 0x%x\n", qid);
+ xa_unlock_irqrestore(&dev->cqs, flag);
}
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index 0161ae6ad629..e2188b335e76 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -54,12 +54,12 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
if (obj < alloc->max) {
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last += prandom_u32() % RANDOM_SKIP;
+ alloc->last += get_random_u32_below(RANDOM_SKIP);
else
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
alloc->last = 0;
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj += alloc->start;
} else
obj = -1;
@@ -73,40 +73,34 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
unsigned long flags;
obj -= alloc->start;
- BUG_ON((int)obj < 0);
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
spin_unlock_irqrestore(&alloc->lock, flags);
}
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
u32 reserved, u32 flags)
{
- int i;
-
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last = prandom_u32() % RANDOM_SKIP;
+ alloc->last = get_random_u32_below(RANDOM_SKIP);
else
alloc->last = 0;
- alloc->max = num;
+ alloc->max = num;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void c4iw_id_table_free(struct c4iw_id_table *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index c7bb38c931a5..e17c1252536b 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -34,10 +34,10 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/completion.h>
#include <linux/netdevice.h>
-#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/inet.h>
@@ -45,6 +45,7 @@
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
@@ -54,21 +55,21 @@
#include <rdma/iw_cm.h>
#include <rdma/rdma_netlink.h>
#include <rdma/iw_portmap.h>
+#include <rdma/restrack.h>
#include "cxgb4.h"
#include "cxgb4_uld.h"
#include "l2t.h"
-#include "user.h"
+#include <rdma/cxgb4-abi.h>
#define DRV_NAME "iw_cxgb4"
#define MOD DRV_NAME ":"
-extern int c4iw_debug;
-#define PDBG(fmt, args...) \
-do { \
- if (c4iw_debug) \
- printk(MOD fmt, ## args); \
-} while (0)
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "t4.h"
@@ -96,6 +97,7 @@ struct c4iw_resource {
struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
+ struct c4iw_id_table srq_table;
};
struct c4iw_qid_list {
@@ -107,6 +109,7 @@ struct c4iw_dev_ucontext {
struct list_head qpids;
struct list_head cqids;
struct mutex lock;
+ struct kref kref;
};
enum c4iw_rdev_flags {
@@ -128,6 +131,8 @@ struct c4iw_stats {
struct c4iw_stat stag;
struct c4iw_stat pbl;
struct c4iw_stat rqt;
+ struct c4iw_stat srqt;
+ struct c4iw_stat srq;
struct c4iw_stat ocqp;
u64 db_full;
u64 db_empty;
@@ -152,8 +157,8 @@ struct c4iw_hw_queue {
};
struct wr_log_entry {
- struct timespec post_host_ts;
- struct timespec poll_host_ts;
+ ktime_t post_host_time;
+ ktime_t poll_host_time;
u64 post_sge_ts;
u64 cqe_sge_ts;
u64 poll_sge_ts;
@@ -183,6 +188,11 @@ struct c4iw_rdev {
atomic_t wr_log_idx;
struct wr_log_entry *wr_log;
int wr_log_size;
+ struct workqueue_struct *free_workq;
+ struct completion rqt_compl;
+ struct completion pbl_compl;
+ struct kref rqt_kref;
+ struct kref pbl_kref;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -200,18 +210,50 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
struct c4iw_wr_wait {
struct completion completion;
int ret;
+ struct kref kref;
};
+void _c4iw_free_wr_wait(struct kref *kref);
+
+static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
+ kref_read(&wr_waitp->kref));
+ WARN_ON(kref_read(&wr_waitp->kref) == 0);
+ kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
+}
+
+static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
+ kref_read(&wr_waitp->kref));
+ WARN_ON(kref_read(&wr_waitp->kref) == 0);
+ kref_get(&wr_waitp->kref);
+}
+
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
{
wr_waitp->ret = 0;
init_completion(&wr_waitp->completion);
}
-static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
+static inline void _c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret,
+ bool deref)
{
wr_waitp->ret = ret;
complete(&wr_waitp->completion);
+ if (deref)
+ c4iw_put_wr_wait(wr_waitp);
+}
+
+static inline void c4iw_wake_up_noref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+ _c4iw_wake_up(wr_waitp, ret, false);
+}
+
+static inline void c4iw_wake_up_deref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+ _c4iw_wake_up(wr_waitp, ret, true);
}
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
@@ -228,18 +270,40 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) {
- PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
- func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+ pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+ func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO;
+ goto out;
}
-out:
if (wr_waitp->ret)
- PDBG("%s: FW reply %d tid %u qpid %u\n",
- pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+ pr_debug("%s: FW reply %d tid %u qpid %u\n",
+ pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+out:
return wr_waitp->ret;
}
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
+
+static inline int c4iw_ref_send_wait(struct c4iw_rdev *rdev,
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp,
+ u32 hwtid, u32 qpid,
+ const char *func)
+{
+ int ret;
+
+ pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func, wr_waitp, hwtid,
+ qpid);
+ c4iw_get_wr_wait(wr_waitp);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret) {
+ c4iw_put_wr_wait(wr_waitp);
+ return ret;
+ }
+ return c4iw_wait_for_reply(rdev, wr_waitp, hwtid, qpid, func);
+}
+
enum db_state {
NORMAL = 0,
FLOW_CONTROL = 1,
@@ -250,19 +314,25 @@ enum db_state {
struct c4iw_dev {
struct ib_device ibdev;
struct c4iw_rdev rdev;
- u32 device_cap_flags;
- struct idr cqidr;
- struct idr qpidr;
- struct idr mmidr;
- spinlock_t lock;
+ struct xarray cqs;
+ struct xarray qps;
+ struct xarray mrs;
struct mutex db_mutex;
struct dentry *debugfs_root;
enum db_state db_state;
- struct idr hwtid_idr;
- struct idr atid_idr;
- struct idr stid_idr;
+ struct xarray hwtids;
+ struct xarray atids;
+ struct xarray stids;
struct list_head db_fc_list;
u32 avail_ird;
+ wait_queue_head_t wait;
+};
+
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct c4iw_dev *dev;
+ struct work_struct reg_work;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -270,78 +340,14 @@ static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
return container_of(ibdev, struct c4iw_dev, ibdev);
}
-static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
-{
- return container_of(rdev, struct c4iw_dev, rdev);
-}
-
static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
{
- return idr_find(&rhp->cqidr, cqid);
+ return xa_load(&rhp->cqs, cqid);
}
static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
{
- return idr_find(&rhp->qpidr, qpid);
-}
-
-static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
-{
- return idr_find(&rhp->mmidr, mmid);
-}
-
-static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
- void *handle, u32 id, int lock)
-{
- int ret;
-
- if (lock) {
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&rhp->lock);
- }
-
- ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
-
- if (lock) {
- spin_unlock_irq(&rhp->lock);
- idr_preload_end();
- }
-
- BUG_ON(ret == -ENOSPC);
- return ret < 0 ? ret : 0;
-}
-
-static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
- void *handle, u32 id)
-{
- return _insert_handle(rhp, idr, handle, id, 1);
-}
-
-static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
- void *handle, u32 id)
-{
- return _insert_handle(rhp, idr, handle, id, 0);
-}
-
-static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
- u32 id, int lock)
-{
- if (lock)
- spin_lock_irq(&rhp->lock);
- idr_remove(idr, id);
- if (lock)
- spin_unlock_irq(&rhp->lock);
-}
-
-static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
-{
- _remove_handle(rhp, idr, id, 1);
-}
-
-static inline void remove_handle_nolock(struct c4iw_dev *rhp,
- struct idr *idr, u32 id)
-{
- _remove_handle(rhp, idr, id, 0);
+ return xa_load(&rhp->qps, qpid);
}
extern uint c4iw_max_read_depth;
@@ -384,8 +390,14 @@ struct c4iw_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct c4iw_dev *rhp;
+ struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
+ u64 *mpl;
+ dma_addr_t mpl_addr;
+ u32 max_mpl_len;
+ u32 mpl_len;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
@@ -396,8 +408,10 @@ static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
struct c4iw_mw {
struct ib_mw ibmw;
struct c4iw_dev *rhp;
+ struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
@@ -405,28 +419,16 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
return container_of(ibmw, struct c4iw_mw, ibmw);
}
-struct c4iw_fr_page_list {
- struct ib_fast_reg_page_list ibpl;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- dma_addr_t dma_addr;
- struct c4iw_dev *dev;
- int pll_len;
-};
-
-static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
- struct ib_fast_reg_page_list *ibpl)
-{
- return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
-}
-
struct c4iw_cq {
struct ib_cq ibcq;
struct c4iw_dev *rhp;
+ struct sk_buff *destroy_skb;
struct t4_cq cq;
spinlock_t lock;
spinlock_t comp_handler_lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
+ refcount_t refcnt;
+ struct completion cq_rel_comp;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
@@ -482,10 +484,13 @@ struct c4iw_qp {
struct t4_wq wq;
spinlock_t lock;
struct mutex mutex;
- atomic_t refcnt;
wait_queue_head_t wait;
- struct timer_list timer;
int sq_sig_all;
+ struct c4iw_srq *srq;
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_wr_wait *wr_waitp;
+ struct completion qp_rel_comp;
+ refcount_t qp_refcnt;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -493,12 +498,33 @@ static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
return container_of(ibqp, struct c4iw_qp, ibqp);
}
+struct c4iw_srq {
+ struct ib_srq ibsrq;
+ struct list_head db_fc_entry;
+ struct c4iw_dev *rhp;
+ struct t4_srq wq;
+ struct sk_buff *destroy_skb;
+ u32 srq_limit;
+ u32 pdid;
+ int idx;
+ u32 flags;
+ spinlock_t lock; /* protects srq */
+ struct c4iw_wr_wait *wr_waitp;
+ bool armed;
+};
+
+static inline struct c4iw_srq *to_c4iw_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct c4iw_srq, ibsrq);
+}
+
struct c4iw_ucontext {
struct ib_ucontext ibucontext;
struct c4iw_dev_ucontext uctx;
u32 key;
spinlock_t mmap_lock;
struct list_head mmaps;
+ bool is_32b_cqe;
};
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -506,11 +532,21 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
+enum {
+ CXGB4_MMAP_BAR,
+ CXGB4_MMAP_BAR_WC,
+ CXGB4_MMAP_CONTIG,
+ CXGB4_MMAP_NON_CONTIG,
+};
+
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
u32 key;
+ void *vaddr;
+ dma_addr_t dma_addr;
unsigned len;
+ u8 mmap_flag;
};
static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
@@ -526,8 +562,8 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
if (mm->key == key && mm->len == len) {
list_del_init(&mm->entry);
spin_unlock(&ucontext->mmap_lock);
- PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
- key, (unsigned long long) mm->addr, mm->len);
+ pr_debug("key 0x%x addr 0x%llx len %d\n", key,
+ (unsigned long long)mm->addr, mm->len);
return mm;
}
}
@@ -535,12 +571,38 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
return NULL;
}
+static inline void insert_flag_to_mmap(struct c4iw_rdev *rdev,
+ struct c4iw_mm_entry *mm, u64 addr)
+{
+ if (addr >= pci_resource_start(rdev->lldi.pdev, 0) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
+ pci_resource_len(rdev->lldi.pdev, 0))))
+ mm->mmap_flag = CXGB4_MMAP_BAR;
+ else if (addr >= pci_resource_start(rdev->lldi.pdev, 2) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+ if (addr >= rdev->oc_mw_pa) {
+ mm->mmap_flag = CXGB4_MMAP_BAR_WC;
+ } else {
+ if (is_t4(rdev->lldi.adapter_type))
+ mm->mmap_flag = CXGB4_MMAP_BAR;
+ else
+ mm->mmap_flag = CXGB4_MMAP_BAR_WC;
+ }
+ } else {
+ if (addr)
+ mm->mmap_flag = CXGB4_MMAP_CONTIG;
+ else
+ mm->mmap_flag = CXGB4_MMAP_NON_CONTIG;
+ }
+}
+
static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm)
{
spin_lock(&ucontext->mmap_lock);
- PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
- mm->key, (unsigned long long) mm->addr, mm->len);
+ pr_debug("key 0x%x addr 0x%llx len %d\n",
+ mm->key, (unsigned long long)mm->addr, mm->len);
list_add_tail(&mm->entry, &ucontext->mmaps);
spin_unlock(&ucontext->mmap_lock);
}
@@ -627,12 +689,6 @@ static inline u32 c4iw_ib_to_tpt_access(int a)
FW_RI_MEM_ACCESS_LOCAL_READ;
}
-static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
-}
-
enum c4iw_mmid_state {
C4IW_STAG_STATE_VALID,
C4IW_STAG_STATE_INVALID
@@ -656,17 +712,17 @@ enum c4iw_mmid_state {
#define MPA_V2_RDMA_READ_RTR 0x4000
#define MPA_V2_IRD_ORD_MASK 0x3FFF
-#define c4iw_put_ep(ep) { \
- PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
- WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
- kref_put(&((ep)->kref), _c4iw_free_ep); \
+#define c4iw_put_ep(ep) { \
+ pr_debug("put_ep ep %p refcnt %d\n", \
+ ep, kref_read(&((ep)->kref))); \
+ WARN_ON(kref_read(&((ep)->kref)) < 1); \
+ kref_put(&((ep)->kref), _c4iw_free_ep); \
}
-#define c4iw_get_ep(ep) { \
- PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
- kref_get(&((ep)->kref)); \
+#define c4iw_get_ep(ep) { \
+ pr_debug("get_ep ep %p, refcnt %d\n", \
+ ep, kref_read(&((ep)->kref))); \
+ kref_get(&((ep)->kref)); \
}
void _c4iw_free_ep(struct kref *kref);
@@ -675,7 +731,7 @@ struct mpa_message {
u8 flags;
u8 revision;
__be16 private_data_size;
- u8 private_data[0];
+ u8 private_data[];
};
struct mpa_v2_conn_params {
@@ -687,7 +743,7 @@ struct terminate_message {
u8 layer_etype;
u8 ecode;
__be16 hdrct_rsvd;
- u8 len_hdrs[0];
+ u8 len_hdrs[];
};
#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
@@ -763,7 +819,7 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
TIMEOUT = 4,
QP_REFERENCED = 5,
- RELEASE_MAPINFO = 6,
+ STOP_MPA_TIMER = 7,
};
enum c4iw_ep_history {
@@ -788,21 +844,47 @@ enum c4iw_ep_history {
EP_DISC_ABORT = 18,
CONN_RPL_UPCALL = 19,
ACT_RETRY_NOMEM = 20,
- ACT_RETRY_INUSE = 21
+ ACT_RETRY_INUSE = 21,
+ CLOSE_CON_RPL = 22,
+ EP_DISC_FAIL = 24,
+ QP_REFED = 25,
+ QP_DEREFED = 26,
+ CM_ID_REFED = 27,
+ CM_ID_DEREFED = 28,
+};
+
+enum conn_pre_alloc_buffers {
+ CN_ABORT_REQ_BUF,
+ CN_ABORT_RPL_BUF,
+ CN_CLOSE_CON_REQ_BUF,
+ CN_DESTROY_BUF,
+ CN_FLOWC_BUF,
+ CN_MAX_CON_BUF
+};
+
+enum {
+ FLOWC_LEN = offsetof(struct fw_flowc_wr, mnemval[FW_FLOWC_MNEM_MAX])
+};
+
+union cpl_wr_size {
+ struct cpl_abort_req abrt_req;
+ struct cpl_abort_rpl abrt_rpl;
+ struct fw_ri_wr ri_req;
+ struct cpl_close_con_req close_req;
+ char flowc_buf[FLOWC_LEN];
};
struct c4iw_ep_common {
struct iw_cm_id *cm_id;
struct c4iw_qp *qp;
struct c4iw_dev *dev;
+ struct sk_buff_head ep_skb_list;
enum c4iw_ep_state state;
struct kref kref;
struct mutex mutex;
struct sockaddr_storage local_addr;
struct sockaddr_storage remote_addr;
- struct sockaddr_storage mapped_local_addr;
- struct sockaddr_storage mapped_remote_addr;
- struct c4iw_wr_wait wr_wait;
+ struct c4iw_wr_wait *wr_waitp;
unsigned long flags;
unsigned long history;
};
@@ -850,48 +932,13 @@ struct c4iw_ep {
unsigned int retry_count;
int snd_win;
int rcv_win;
+ u32 snd_wscale;
struct c4iw_ep_stats stats;
+ u32 srqe_idx;
+ u32 rx_pdu_out_cnt;
+ struct sk_buff *peer_abort_skb;
};
-static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
- const char *msg)
-{
-
-#define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr))
-#define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port)
-#define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr))
-#define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port)
-
- if (c4iw_debug) {
- switch (epc->local_addr.ss_family) {
- case AF_INET:
- PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n",
- func, msg, SINA(&epc->local_addr),
- SINP(&epc->local_addr),
- SINP(&epc->mapped_local_addr),
- SINA(&epc->remote_addr),
- SINP(&epc->remote_addr),
- SINP(&epc->mapped_remote_addr));
- break;
- case AF_INET6:
- PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n",
- func, msg, SIN6A(&epc->local_addr),
- SIN6P(&epc->local_addr),
- SIN6P(&epc->mapped_local_addr),
- SIN6A(&epc->remote_addr),
- SIN6P(&epc->remote_addr),
- SIN6P(&epc->mapped_remote_addr));
- break;
- default:
- break;
- }
- }
-#undef SINA
-#undef SINP
-#undef SIN6A
-#undef SIN6P
-}
-
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
{
return cm_id->provider_data;
@@ -902,15 +949,6 @@ static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
return cm_id->provider_data;
}
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
{
#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
@@ -928,14 +966,12 @@ void c4iw_id_table_free(struct c4iw_id_table *alloc);
typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
-int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t);
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
struct c4iw_dev_ucontext *uctx);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
-int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
+ u32 nr_pdid, u32 nr_srqt);
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
@@ -943,8 +979,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
-int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_register_device(struct c4iw_dev *dev);
+void c4iw_register_device(struct work_struct *work);
void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void);
void c4iw_cm_term(void);
@@ -953,12 +988,10 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
struct c4iw_dev_ucontext *uctx);
int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
- struct ib_mw_bind *mw_bind);
+int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
int c4iw_destroy_listen(struct iw_cm_id *cm_id);
@@ -966,42 +999,31 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
void c4iw_qp_add_ref(struct ib_qp *qp);
void c4iw_qp_rem_ref(struct ib_qp *qp);
-void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
-struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
- struct ib_device *device,
- int page_list_len);
-struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
-int c4iw_dealloc_mw(struct ib_mw *mw);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
-struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf,
- int acc,
- u64 *iova_start);
-int c4iw_reregister_phys_mem(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf,
- int acc, u64 *iova_start);
-int c4iw_dereg_mr(struct ib_mr *ib_mr);
-int c4iw_destroy_cq(struct ib_cq *ib_cq);
-struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_context,
- struct ib_udata *udata);
-int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void c4iw_cq_rem_ref(struct c4iw_cq *chp);
+int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-int c4iw_destroy_qp(struct ib_qp *ib_qp);
-struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata);
+int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
+int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata);
+int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
+int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1013,15 +1035,12 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
-int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
-void c4iw_flush_hw_cq(struct c4iw_cq *chp);
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_flush_sq(struct c4iw_qp *qhp);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
-u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
struct c4iw_dev_ucontext *uctx);
@@ -1035,11 +1054,24 @@ extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
enum cxgb4_bar2_qtype qtype,
unsigned int *pbar2_qid, u64 *pbar2_pa);
+int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev);
+void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx);
extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
-
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
+void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq);
+void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16);
+void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx);
+int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
+
+int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr);
+int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
+int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp);
+int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, struct rdma_cm_id *cm_id);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 026b91ebd5e2..dcdfe250bdbe 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -34,12 +34,13 @@
#include <linux/moduleparam.h>
#include <rdma/ib_umem.h>
#include <linux/atomic.h>
+#include <rdma/ib_user_verbs.h>
#include "iw_cxgb4.h"
-int use_dsgl = 0;
+int use_dsgl = 1;
module_param(use_dsgl, int, 0644);
-MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)");
+MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)");
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
@@ -58,35 +59,37 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
}
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
- u32 len, dma_addr_t data, int wait)
+ u32 len, dma_addr_t data,
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
- struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
- struct c4iw_wr_wait wr_wait;
addr &= 0x7FFFFFF;
- if (wait)
- c4iw_init_wr_wait(&wr_wait);
+ if (wr_waitp)
+ c4iw_init_wr_wait(wr_waitp);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ }
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
- memset(req, 0, wr_len);
+ req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
- (wait ? FW_WR_COMPL_F : 0));
- req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
+ (wr_waitp ? FW_WR_COMPL_F : 0));
+ req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
- req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
+ req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ T5_ULP_MEMIO_ORDER_V(1) |
+ T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
@@ -97,23 +100,21 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- return ret;
- if (wait)
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ if (wr_waitp)
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
return ret;
}
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
- struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_idata *sc;
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
- struct c4iw_wr_wait wr_wait;
__be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
if (is_t4(rdev->lldi.adapter_type))
@@ -122,29 +123,31 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF;
- PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
+ pr_debug("addr 0x%x len %u\n", addr, len);
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
- c4iw_init_wr_wait(&wr_wait);
+ c4iw_init_wr_wait(wr_waitp);
for (i = 0; i < num_wqe; i++) {
copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
len;
- wr_len = roundup(sizeof *req + sizeof *sc +
- roundup(copy_len, T4_ULPTX_MIN_IO), 16);
-
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ wr_len = roundup(sizeof(*req) + sizeof(*sc) +
+ roundup(copy_len, T4_ULPTX_MIN_IO),
+ 16);
+
+ if (!skb) {
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ }
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
- memset(req, 0, wr_len);
+ req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
FW_WR_COMPL_F);
- req->wr.wr_lo = (__force __be64)&wr_wait;
+ req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
@@ -170,17 +173,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (copy_len % T4_ULPTX_MIN_IO)
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
(copy_len % T4_ULPTX_MIN_IO));
- ret = c4iw_ofld_send(rdev, skb);
+ if (i == (num_wqe-1))
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
+ __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
if (ret)
- return ret;
+ break;
+ skb = NULL;
len -= C4IW_MAX_INLINE_SIZE;
}
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
return ret;
}
-static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
u32 remain = len;
u32 dmalen;
@@ -203,7 +212,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
- !remain);
+ skb, remain ? NULL : wr_waitp);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -211,7 +220,8 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
daddr += dmalen;
}
if (remain)
- ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
+ wr_waitp);
out:
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret;
@@ -222,23 +232,33 @@ out:
* If data is NULL, clear len byte of memory to zero.
*/
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
- if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
- if (len > inline_threshold) {
- if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
- printk_ratelimited(KERN_WARNING
- "%s: dma map"
- " failure (non fatal)\n",
- pci_name(rdev->lldi.pdev));
- return _c4iw_write_mem_inline(rdev, addr, len,
- data);
- } else
- return 0;
- } else
- return _c4iw_write_mem_inline(rdev, addr, len, data);
- } else
- return _c4iw_write_mem_inline(rdev, addr, len, data);
+ int ret;
+
+ if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ goto out;
+ }
+
+ if (len <= inline_threshold) {
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ goto out;
+ }
+
+ ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
+ if (ret) {
+ pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ }
+out:
+ return ret;
+
}
/*
@@ -251,16 +271,21 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
u32 *stag, u8 stag_state, u32 pdid,
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
int bind_enabled, u32 zbva, u64 to,
- u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
+ u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
+ struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
{
int err;
- struct fw_ri_tpte tpt;
+ struct fw_ri_tpte *tpt;
u32 stag_idx;
static atomic_t key;
if (c4iw_fatal_error(rdev))
return -EIO;
+ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+ if (!tpt)
+ return -ENOMEM;
+
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
@@ -270,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
+ kfree(tpt);
return -ENOMEM;
}
mutex_lock(&rdev->stats.lock);
@@ -279,33 +305,33 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_unlock(&rdev->stats.lock);
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
}
- PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
+ pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ stag_state, type, pdid, stag_idx);
/* write TPT entry */
if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
+ memset(tpt, 0, sizeof(*tpt));
else {
- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
FW_RI_TPTE_STAGSTATE_V(stag_state) |
FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
FW_RI_TPTE_PS_V(page_size));
- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt);
+ sizeof(*tpt), tpt, skb, wr_waitp);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -313,48 +339,39 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
}
+ kfree(tpt);
return err;
}
static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
+ u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
{
int err;
- PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev->lldi.vr->pbl.start,
- pbl_size);
+ pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ pbl_addr, rdev->lldi.vr->pbl.start,
+ pbl_size);
- err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
+ wr_waitp);
return err;
}
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr)
+ u32 pbl_addr, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
- pbl_size, pbl_addr);
-}
-
-static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
-{
- *stag = T4_STAG_UNSET;
- return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
- 0UL, 0, 0, 0, 0);
-}
-
-static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
-{
- return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
- 0);
+ pbl_size, pbl_addr, skb, wr_waitp);
}
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
- u32 pbl_size, u32 pbl_addr)
+ u32 pbl_size, u32 pbl_addr,
+ struct c4iw_wr_wait *wr_waitp)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
- 0UL, 0, 0, pbl_size, pbl_addr);
+ 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
}
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
@@ -365,8 +382,10 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
- return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+ mhp->ibmr.length = mhp->attr.len;
+ mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
+ pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
+ return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
}
static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
@@ -381,40 +400,17 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
mhp->attr.va_fbo, mhp->attr.len ?
mhp->attr.len : -1, shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret)
- return ret;
-
- ret = finish_mem_reg(mhp, stag);
- if (ret)
- dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- return ret;
-}
-
-static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
- struct c4iw_mr *mhp, int shift, int npages)
-{
- u32 stag;
- int ret;
-
- if (npages > mhp->attr.pbl_size)
- return -ENOMEM;
-
- stag = mhp->attr.stag;
- ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
- FW_RI_STAG_NSMR, mhp->attr.perms,
- mhp->attr.mw_bind_enable, mhp->attr.zbva,
- mhp->attr.va_fbo, mhp->attr.len, shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
+ mhp->wr_waitp);
if (ret)
return ret;
ret = finish_mem_reg(mhp, stag);
- if (ret)
+ if (ret) {
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+ mhp->dereg_skb = NULL;
+ }
return ret;
}
@@ -431,244 +427,34 @@ static int alloc_pbl(struct c4iw_mr *mhp, int npages)
return 0;
}
-static int build_phys_page_list(struct ib_phys_buf *buffer_list,
- int num_phys_buf, u64 *iova_start,
- u64 *total_size, int *npages,
- int *shift, __be64 **page_list)
-{
- u64 mask;
- int i, j, n;
-
- mask = 0;
- *total_size = 0;
- for (i = 0; i < num_phys_buf; ++i) {
- if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
- return -EINVAL;
- if (i != 0 && i != num_phys_buf - 1 &&
- (buffer_list[i].size & ~PAGE_MASK))
- return -EINVAL;
- *total_size += buffer_list[i].size;
- if (i > 0)
- mask |= buffer_list[i].addr;
- else
- mask |= buffer_list[i].addr & PAGE_MASK;
- if (i != num_phys_buf - 1)
- mask |= buffer_list[i].addr + buffer_list[i].size;
- else
- mask |= (buffer_list[i].addr + buffer_list[i].size +
- PAGE_SIZE - 1) & PAGE_MASK;
- }
-
- if (*total_size > 0xFFFFFFFFULL)
- return -ENOMEM;
-
- /* Find largest page shift we can use to cover buffers */
- for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
- if ((1ULL << *shift) & mask)
- break;
-
- buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
- buffer_list[0].addr &= ~0ull << *shift;
-
- *npages = 0;
- for (i = 0; i < num_phys_buf; ++i)
- *npages += (buffer_list[i].size +
- (1ULL << *shift) - 1) >> *shift;
-
- if (!*npages)
- return -EINVAL;
-
- *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
- if (!*page_list)
- return -ENOMEM;
-
- n = 0;
- for (i = 0; i < num_phys_buf; ++i)
- for (j = 0;
- j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
- ++j)
- (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
- ((u64) j << *shift));
-
- PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
- __func__, (unsigned long long)*iova_start,
- (unsigned long long)mask, *shift, (unsigned long long)*total_size,
- *npages);
-
- return 0;
-
-}
-
-int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
- struct ib_pd *pd, struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start)
-{
-
- struct c4iw_mr mh, *mhp;
- struct c4iw_pd *php;
- struct c4iw_dev *rhp;
- __be64 *page_list = NULL;
- int shift = 0;
- u64 total_size;
- int npages;
- int ret;
-
- PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
-
- /* There can be no memory windows */
- if (atomic_read(&mr->usecnt))
- return -EINVAL;
-
- mhp = to_c4iw_mr(mr);
- rhp = mhp->rhp;
- php = to_c4iw_pd(mr->pd);
-
- /* make sure we are on the same adapter */
- if (rhp != php->rhp)
- return -EINVAL;
-
- memcpy(&mh, mhp, sizeof *mhp);
-
- if (mr_rereg_mask & IB_MR_REREG_PD)
- php = to_c4iw_pd(pd);
- if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
- mh.attr.perms = c4iw_ib_to_tpt_access(acc);
- mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
- IB_ACCESS_MW_BIND;
- }
- if (mr_rereg_mask & IB_MR_REREG_TRANS) {
- ret = build_phys_page_list(buffer_list, num_phys_buf,
- iova_start,
- &total_size, &npages,
- &shift, &page_list);
- if (ret)
- return ret;
- }
-
- if (mr_exceeds_hw_limits(rhp, total_size)) {
- kfree(page_list);
- return -EINVAL;
- }
-
- ret = reregister_mem(rhp, php, &mh, shift, npages);
- kfree(page_list);
- if (ret)
- return ret;
- if (mr_rereg_mask & IB_MR_REREG_PD)
- mhp->attr.pdid = php->pdid;
- if (mr_rereg_mask & IB_MR_REREG_ACCESS)
- mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
- if (mr_rereg_mask & IB_MR_REREG_TRANS) {
- mhp->attr.zbva = 0;
- mhp->attr.va_fbo = *iova_start;
- mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- }
-
- return 0;
-}
-
-struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start)
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
{
- __be64 *page_list;
- int shift;
- u64 total_size;
- int npages;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
int ret;
+ u32 stag = T4_STAG_UNSET;
- PDBG("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- /* First check that we have enough alignment */
- if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
- ret = -EINVAL;
- goto err;
- }
-
- if (num_phys_buf > 1 &&
- ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
- ret = -EINVAL;
- goto err;
- }
-
- ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
- &total_size, &npages, &shift,
- &page_list);
- if (ret)
- goto err;
-
- if (mr_exceeds_hw_limits(rhp, total_size)) {
- kfree(page_list);
- ret = -EINVAL;
- goto err;
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_mhp;
}
+ c4iw_init_wr_wait(mhp->wr_waitp);
- ret = alloc_pbl(mhp, npages);
- if (ret) {
- kfree(page_list);
- goto err;
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb) {
+ ret = -ENOMEM;
+ goto err_free_wr_wait;
}
- ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
- npages);
- kfree(page_list);
- if (ret)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
-
- mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = *iova_start;
- mhp->attr.page_size = shift - 12;
-
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- ret = register_mem(rhp, php, mhp, shift);
- if (ret)
- goto err_pbl;
-
- return &mhp->ibmr;
-
-err_pbl:
- c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
- mhp->attr.pbl_size << 3);
-
-err:
- kfree(mhp);
- return ERR_PTR(ret);
-
-}
-
-struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct c4iw_dev *rhp;
- struct c4iw_pd *php;
- struct c4iw_mr *mhp;
- int ret;
- u32 stag = T4_STAG_UNSET;
-
- PDBG("%s ib_pd %p\n", __func__, pd);
- php = to_c4iw_pd(pd);
- rhp = php->rhp;
-
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
@@ -681,35 +467,43 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
- mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
+ mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
+ NULL, mhp->wr_waitp);
if (ret)
- goto err1;
+ goto err_free_skb;
ret = finish_mem_reg(mhp, stag);
if (ret)
- goto err2;
+ goto err_dereg_mem;
return &mhp->ibmr;
-err2:
+err_dereg_mem:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-err1:
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_skb:
+ kfree_skb(mhp->dereg_skb);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
kfree(mhp);
return ERR_PTR(ret);
}
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
__be64 *pages;
- int shift, n, len;
- int i, k, entry;
- int err = 0;
- struct scatterlist *sg;
+ int shift, n, i;
+ int err = -ENOMEM;
+ struct ib_block_iter biter;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
- PDBG("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
if (length == ~0ULL)
return ERR_PTR(-EINVAL);
@@ -726,56 +520,57 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp)
+ goto err_free_mhp;
+
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb)
+ goto err_free_wr_wait;
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
- }
+ mhp->umem = ib_umem_get(pd->device, start, length, acc);
+ if (IS_ERR(mhp->umem))
+ goto err_free_skb;
- shift = ffs(mhp->umem->page_size) - 1;
+ shift = PAGE_SHIFT;
- n = mhp->umem->nmap;
+ n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift);
err = alloc_pbl(mhp, n);
if (err)
- goto err;
+ goto err_umem_release;
pages = (__be64 *) __get_free_page(GFP_KERNEL);
if (!pages) {
err = -ENOMEM;
- goto err_pbl;
+ goto err_pbl_free;
}
i = n = 0;
- for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = cpu_to_be64(sg_dma_address(sg) +
- mhp->umem->page_size * k);
- if (i == PAGE_SIZE / sizeof *pages) {
- err = write_pbl(&mhp->rhp->rdev,
- pages,
- mhp->attr.pbl_addr + (n << 3), i);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
+ rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
+ pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter));
+ if (i == PAGE_SIZE / sizeof(*pages)) {
+ err = write_pbl(&mhp->rhp->rdev, pages,
+ mhp->attr.pbl_addr + (n << 3), i,
+ mhp->wr_waitp);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
}
}
if (i)
err = write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (n << 3), i);
+ mhp->attr.pbl_addr + (n << 3), i,
+ mhp->wr_waitp);
pbl_done:
free_page((unsigned long) pages);
if (err)
- goto err_pbl;
+ goto err_pbl_free;
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
@@ -786,75 +581,25 @@ pbl_done:
err = register_mem(rhp, php, mhp, shift);
if (err)
- goto err_pbl;
+ goto err_pbl_free;
return &mhp->ibmr;
-err_pbl:
+err_pbl_free:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
-
-err:
+err_umem_release:
ib_umem_release(mhp->umem);
+err_free_skb:
+ kfree_skb(mhp->dereg_skb);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
kfree(mhp);
return ERR_PTR(err);
}
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
-{
- struct c4iw_dev *rhp;
- struct c4iw_pd *php;
- struct c4iw_mw *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- php = to_c4iw_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
- ret = allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
- mhp->rhp = rhp;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = FW_RI_STAG_MW;
- mhp->attr.stag = stag;
- mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
- deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
- PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmw);
-}
-
-int c4iw_dealloc_mw(struct ib_mw *mw)
-{
- struct c4iw_dev *rhp;
- struct c4iw_mw *mhp;
- u32 mmid;
-
- mhp = to_c4iw_mw(mw);
- rhp = mhp->rhp;
- mmid = (mw->rkey) >> 8;
- remove_handle(rhp, &rhp->mmidr, mmid);
- deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
- return 0;
-}
-
-struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
struct c4iw_dev *rhp;
@@ -863,123 +608,136 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
u32 mmid;
u32 stag = 0;
int ret = 0;
+ int length = roundup(max_num_sg * sizeof(u64), 32);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > t4_max_fr_depth(use_dsgl))
+ max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+ use_dsgl))
return ERR_PTR(-EINVAL);
- php = to_c4iw_pd(pd);
- rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp) {
ret = -ENOMEM;
goto err;
}
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_mhp;
+ }
+ c4iw_init_wr_wait(mhp->wr_waitp);
+
+ mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
+ length, &mhp->mpl_addr, GFP_KERNEL);
+ if (!mhp->mpl) {
+ ret = -ENOMEM;
+ goto err_free_wr_wait;
+ }
+ mhp->max_mpl_len = length;
+
mhp->rhp = rhp;
ret = alloc_pbl(mhp, max_num_sg);
if (ret)
- goto err1;
+ goto err_free_dma;
mhp->attr.pbl_size = max_num_sg;
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr,
+ mhp->wr_waitp);
if (ret)
- goto err2;
+ goto err_free_pbl;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_NSMR;
mhp->attr.stag = stag;
- mhp->attr.state = 1;
+ mhp->attr.state = 0;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
ret = -ENOMEM;
- goto err3;
+ goto err_dereg;
}
- PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmr);
-err3:
+err_dereg:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-err2:
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_pbl:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
-err1:
+err_free_dma:
+ dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
+ mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
kfree(mhp);
err:
return ERR_PTR(ret);
}
-struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
- int page_list_len)
+static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
{
- struct c4iw_fr_page_list *c4pl;
- struct c4iw_dev *dev = to_c4iw_dev(device);
- dma_addr_t dma_addr;
- int pll_len = roundup(page_list_len * sizeof(u64), 32);
-
- c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
- if (!c4pl)
- return ERR_PTR(-ENOMEM);
+ struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
- c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
- pll_len, &dma_addr,
- GFP_KERNEL);
- if (!c4pl->ibpl.page_list) {
- kfree(c4pl);
- return ERR_PTR(-ENOMEM);
- }
- dma_unmap_addr_set(c4pl, mapping, dma_addr);
- c4pl->dma_addr = dma_addr;
- c4pl->dev = dev;
- c4pl->pll_len = pll_len;
+ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
+ return -ENOMEM;
- PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
- __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
- &c4pl->dma_addr);
+ mhp->mpl[mhp->mpl_len++] = addr;
- return &c4pl->ibpl;
+ return 0;
}
-void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
+int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
- struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
+ struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
- PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
- __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
- &c4pl->dma_addr);
+ mhp->mpl_len = 0;
- dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
- c4pl->pll_len,
- c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
- kfree(c4pl);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
}
-int c4iw_dereg_mr(struct ib_mr *ib_mr)
+int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_mr *mhp;
u32 mmid;
- PDBG("%s ib_mr %p\n", __func__, ib_mr);
- /* There can be no memory windows */
- if (atomic_read(&ib_mr->usecnt))
- return -EINVAL;
+ pr_debug("ib_mr %p\n", ib_mr);
mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp;
mmid = mhp->attr.stag >> 8;
- remove_handle(rhp, &rhp->mmidr, mmid);
+ xa_erase_irq(&rhp->mrs, mmid);
+ if (mhp->mpl)
+ dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
+ mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
if (mhp->attr.pbl_size)
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
if (mhp->kva)
kfree((void *) (unsigned long) mhp->kva);
- if (mhp->umem)
- ib_umem_release(mhp->umem);
- PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+ ib_umem_release(mhp->umem);
+ pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
kfree(mhp);
return 0;
}
+
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
+{
+ struct c4iw_mr *mhp;
+ unsigned long flags;
+
+ xa_lock_irqsave(&rhp->mrs, flags);
+ mhp = xa_load(&rhp->mrs, rkey >> 8);
+ if (mhp)
+ mhp->attr.state = 0;
+ xa_unlock_irqrestore(&rhp->mrs, flags);
+}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 7746113552e7..e059f92d90fd 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -41,6 +41,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
+#include <net/addrconf.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -58,83 +59,43 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
-static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
- return ERR_PTR(-ENOSYS);
-}
-
-static int c4iw_ah_destroy(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- return -ENOSYS;
-}
-
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct c4iw_dev *rhp = to_c4iw_dev(context->device);
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp;
- PDBG("%s context %p\n", __func__, context);
+ pr_debug("context %p\n", context);
+ rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
- kfree(ucontext);
- return 0;
}
-static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
+ struct ib_udata *udata)
{
- struct c4iw_ucontext *context;
+ struct ib_device *ibdev = ucontext->device;
+ struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
- static int warned;
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
- PDBG("%s ibdev %p\n", __func__, ibdev);
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = -ENOMEM;
- goto err;
- }
-
+ pr_debug("ibdev %p\n", ibdev);
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
- if (!warned++)
- pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
+ pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
- goto err_free;
+ goto err;
}
uresp.status_page_size = PAGE_SIZE;
@@ -152,15 +113,16 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
+ mm->vaddr = NULL;
+ mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
insert_mmap(context, mm);
}
- return &context->ibucontext;
+ return 0;
err_mm:
kfree(mm);
-err_free:
- kfree(context);
err:
- return ERR_PTR(ret);
+ return ret;
}
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
@@ -172,9 +134,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_mm_entry *mm;
struct c4iw_ucontext *ucontext;
u64 addr;
+ u8 mmap_flag;
+ size_t size;
+ void *vaddr;
+ unsigned long vm_pgoff;
+ dma_addr_t dma_addr;
- PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
- key, len);
+ pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
+ key, len);
if (vma->vm_start & (PAGE_SIZE-1))
return -EINVAL;
@@ -186,92 +153,79 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (!mm)
return -EINVAL;
addr = mm->addr;
+ vaddr = mm->vaddr;
+ dma_addr = mm->dma_addr;
+ size = mm->len;
+ mmap_flag = mm->mmap_flag;
kfree(mm);
- if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
- pci_resource_len(rdev->lldi.pdev, 0)))) {
-
- /*
- * MA_SYNC register...
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ switch (mmap_flag) {
+ case CXGB4_MMAP_BAR:
+ ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+ len,
+ pgprot_noncached(vma->vm_page_prot));
+ break;
+ case CXGB4_MMAP_BAR_WC:
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
- pci_resource_len(rdev->lldi.pdev, 2)))) {
-
- /*
- * Map user DB or OCQP memory...
- */
- if (addr >= rdev->oc_mw_pa)
- vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
- else {
- if (is_t5(rdev->lldi.adapter_type))
- vma->vm_page_prot =
- t4_pgprot_wc(vma->vm_page_prot);
- else
- vma->vm_page_prot =
- pgprot_noncached(vma->vm_page_prot);
- }
+ len, t4_pgprot_wc(vma->vm_page_prot));
+ break;
+ case CXGB4_MMAP_CONTIG:
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
+ break;
+ case CXGB4_MMAP_NON_CONTIG:
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
+ ret = dma_mmap_coherent(&rdev->lldi.pdev->dev, vma,
+ vaddr, dma_addr, size);
+ vma->vm_pgoff = vm_pgoff;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
return ret;
}
-static int c4iw_deallocate_pd(struct ib_pd *pd)
+static int c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
php = to_c4iw_pd(pd);
rhp = php->rhp;
- PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+ pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
mutex_unlock(&rhp->rdev.stats.lock);
- kfree(php);
return 0;
}
-static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{
- struct c4iw_pd *php;
+ struct c4iw_pd *php = to_c4iw_pd(pd);
+ struct ib_device *ibdev = pd->device;
u32 pdid;
struct c4iw_dev *rhp;
- PDBG("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
- return ERR_PTR(-EINVAL);
- php = kzalloc(sizeof(*php), GFP_KERNEL);
- if (!php) {
- c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
- return ERR_PTR(-ENOMEM);
- }
+ return -EINVAL;
+
php->pdid = pdid;
php->rhp = rhp;
- if (context) {
- if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
- c4iw_deallocate_pd(&php->ibpd);
- return ERR_PTR(-EFAULT);
+ if (udata) {
+ struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
+
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+ c4iw_deallocate_pd(&php->ibpd, udata);
+ return -EFAULT;
}
}
mutex_lock(&rhp->rdev.stats.lock);
@@ -279,27 +233,20 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock);
- PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
- return &php->ibpd;
-}
-
-static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- PDBG("%s ibdev %p\n", __func__, ibdev);
- *pkey = 0;
+ pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
return 0;
}
-static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int c4iw_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct c4iw_dev *dev;
- PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
+ pr_debug("ibdev %p, port %u, index %d, gid %p\n",
+ ibdev, port, index, gid);
+ if (!port)
+ return -EINVAL;
dev = to_c4iw_dev(ibdev);
- BUG_ON(port == 0);
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
return 0;
@@ -311,24 +258,31 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
struct c4iw_dev *dev;
- PDBG("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
if (uhw->inlen || uhw->outlen)
return -EINVAL;
dev = to_c4iw_dev(ibdev);
- memset(props, 0, sizeof *props);
- memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
- props->device_cap_flags = dev->device_cap_flags;
+ props->device_cap_flags = IB_DEVICE_MEM_WINDOW;
+ props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ if (fastreg_support)
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
props->page_size_cap = T4_PAGESIZE_MASK;
props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
props->max_mr_size = T4_MAX_MR_SIZE;
props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
+ props->max_srq = dev->rdev.lldi.vr->srq.size;
props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
- props->max_sge = T4_MAX_RECV_SGE;
+ props->max_srq_wr = dev->rdev.hw_queue.t4_max_qp_depth;
+ props->max_send_sge = min(T4_MAX_SEND_SGE, T4_MAX_WRITE_SGE);
+ props->max_recv_sge = T4_MAX_RECV_SGE;
+ props->max_srq_sge = T4_MAX_RECV_SGE;
props->max_sge_rd = 1;
props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
@@ -339,49 +293,19 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
props->max_mr = c4iw_num_stags(&dev->rdev);
props->max_pd = T4_MAX_NUM_PD;
props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
+ props->max_fast_reg_page_list_len =
+ t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
return 0;
}
-static int c4iw_query_port(struct ib_device *ibdev, u8 port,
+static int c4iw_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
- struct c4iw_dev *dev;
- struct net_device *netdev;
- struct in_device *inetdev;
-
- PDBG("%s ibdev %p\n", __func__, ibdev);
-
- dev = to_c4iw_dev(ibdev);
- netdev = dev->rdev.lldi.ports[port-1];
-
- memset(props, 0, sizeof(struct ib_port_attr));
- props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
-
- if (!netif_carrier_ok(netdev))
- props->state = IB_PORT_DOWN;
- else {
- inetdev = in_dev_get(netdev);
- if (inetdev) {
- if (inetdev->ifa_list)
- props->state = IB_PORT_ACTIVE;
- else
- props->state = IB_PORT_INIT;
- in_dev_put(inetdev);
- } else
- props->state = IB_PORT_INIT;
- }
+ int ret = 0;
+ pr_debug("ibdev %p\n", ibdev);
+ ret = ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
props->port_cap_flags =
IB_PORT_CM_SUP |
@@ -390,225 +314,256 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
- return 0;
+ return ret;
}
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n",
- CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
-}
+ struct c4iw_dev *c4iw_dev =
+ rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
-
- return sprintf(buf, "%u.%u.%u.%u\n",
- FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
+ pr_debug("dev 0x%p\n", dev);
+ return sysfs_emit(
+ buf, "%d\n",
+ CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
+ struct c4iw_dev *c4iw_dev =
+ rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
- PDBG("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.driver);
+ return sysfs_emit(buf, "%s\n", info.driver);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
- c4iw_dev->rdev.lldi.pdev->device);
+ struct c4iw_dev *c4iw_dev =
+ rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
+
+ pr_debug("dev 0x%p\n", dev);
+ return sysfs_emit(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
+ c4iw_dev->rdev.lldi.pdev->device);
+}
+static DEVICE_ATTR_RO(board_id);
+
+enum counters {
+ IP4INSEGS,
+ IP4OUTSEGS,
+ IP4RETRANSSEGS,
+ IP4OUTRSTS,
+ IP6INSEGS,
+ IP6OUTSEGS,
+ IP6RETRANSSEGS,
+ IP6OUTRSTS,
+ NR_COUNTERS
+};
+
+static const struct rdma_stat_desc cxgb4_descs[] = {
+ [IP4INSEGS].name = "ip4InSegs",
+ [IP4OUTSEGS].name = "ip4OutSegs",
+ [IP4RETRANSSEGS].name = "ip4RetransSegs",
+ [IP4OUTRSTS].name = "ip4OutRsts",
+ [IP6INSEGS].name = "ip6InSegs",
+ [IP6OUTSEGS].name = "ip6OutSegs",
+ [IP6RETRANSSEGS].name = "ip6RetransSegs",
+ [IP6OUTRSTS].name = "ip6OutRsts"
+};
+
+static struct rdma_hw_stats *c4iw_alloc_device_stats(struct ib_device *ibdev)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(cxgb4_descs) != NR_COUNTERS);
+
+ /* FIXME: these look like port stats */
+ return rdma_alloc_hw_stats_struct(cxgb4_descs, NR_COUNTERS,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
static int c4iw_get_mib(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
+ struct rdma_hw_stats *stats,
+ u32 port, int index)
{
struct tp_tcp_stats v4, v6;
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
- memset(stats, 0, sizeof *stats);
- stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
- stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
- stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
- stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
-
- return 0;
+ stats->value[IP4INSEGS] = v4.tcp_in_segs;
+ stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
+ stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
+ stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
+ stats->value[IP6INSEGS] = v6.tcp_in_segs;
+ stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
+ stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
+ stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
+
+ return stats->num_counters;
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *c4iw_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *c4iw_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id,
+static const struct attribute_group c4iw_attr_group = {
+ .attrs = c4iw_class_attributes,
};
-static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int c4iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
int err;
- err = c4iw_query_port(ibdev, port_num, &attr);
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+ err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
return 0;
}
-int c4iw_register_device(struct c4iw_dev *dev)
+static void get_dev_fw_str(struct ib_device *dev, char *str)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev);
+ pr_debug("dev 0x%p\n", dev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
+}
+
+static const struct ib_device_ops c4iw_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_CXGB4,
+ .uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION,
+
+ .alloc_hw_device_stats = c4iw_alloc_device_stats,
+ .alloc_mr = c4iw_alloc_mr,
+ .alloc_pd = c4iw_allocate_pd,
+ .alloc_ucontext = c4iw_alloc_ucontext,
+ .create_cq = c4iw_create_cq,
+ .create_qp = c4iw_create_qp,
+ .create_srq = c4iw_create_srq,
+ .dealloc_pd = c4iw_deallocate_pd,
+ .dealloc_ucontext = c4iw_dealloc_ucontext,
+ .dereg_mr = c4iw_dereg_mr,
+ .destroy_cq = c4iw_destroy_cq,
+ .destroy_qp = c4iw_destroy_qp,
+ .destroy_srq = c4iw_destroy_srq,
+ .device_group = &c4iw_attr_group,
+ .fill_res_cq_entry = c4iw_fill_res_cq_entry,
+ .fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry,
+ .fill_res_mr_entry = c4iw_fill_res_mr_entry,
+ .fill_res_qp_entry = c4iw_fill_res_qp_entry,
+ .get_dev_fw_str = get_dev_fw_str,
+ .get_dma_mr = c4iw_get_dma_mr,
+ .get_hw_stats = c4iw_get_mib,
+ .get_port_immutable = c4iw_port_immutable,
+ .iw_accept = c4iw_accept_cr,
+ .iw_add_ref = c4iw_qp_add_ref,
+ .iw_connect = c4iw_connect,
+ .iw_create_listen = c4iw_create_listen,
+ .iw_destroy_listen = c4iw_destroy_listen,
+ .iw_get_qp = c4iw_get_qp,
+ .iw_reject = c4iw_reject_cr,
+ .iw_rem_ref = c4iw_qp_rem_ref,
+ .map_mr_sg = c4iw_map_mr_sg,
+ .mmap = c4iw_mmap,
+ .modify_qp = c4iw_ib_modify_qp,
+ .modify_srq = c4iw_modify_srq,
+ .poll_cq = c4iw_poll_cq,
+ .post_recv = c4iw_post_receive,
+ .post_send = c4iw_post_send,
+ .post_srq_recv = c4iw_post_srq_recv,
+ .query_device = c4iw_query_device,
+ .query_gid = c4iw_query_gid,
+ .query_port = c4iw_query_port,
+ .query_qp = c4iw_ib_query_qp,
+ .reg_user_mr = c4iw_reg_user_mr,
+ .req_notify_cq = c4iw_arm_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
+ INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, c4iw_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
+};
+
+static int set_netdevs(struct ib_device *ib_dev, struct c4iw_rdev *rdev)
{
int ret;
int i;
- PDBG("%s c4iw_dev %p\n", __func__, dev);
- BUG_ON(!dev->rdev.lldi.ports[0]);
- strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
- dev->ibdev.owner = THIS_MODULE;
- dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
- if (fastreg_support)
- dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ for (i = 0; i < rdev->lldi.nports; i++) {
+ ret = ib_device_set_netdev(ib_dev, rdev->lldi.ports[i],
+ i + 1);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+void c4iw_register_device(struct work_struct *work)
+{
+ int ret;
+ struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
+ struct c4iw_dev *dev = ctx->dev;
+
+ pr_debug("c4iw_dev %p\n", dev);
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
dev->ibdev.local_dma_lkey = 0;
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
+ BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
- dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
- dev->ibdev.query_device = c4iw_query_device;
- dev->ibdev.query_port = c4iw_query_port;
- dev->ibdev.query_pkey = c4iw_query_pkey;
- dev->ibdev.query_gid = c4iw_query_gid;
- dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
- dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
- dev->ibdev.mmap = c4iw_mmap;
- dev->ibdev.alloc_pd = c4iw_allocate_pd;
- dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
- dev->ibdev.create_ah = c4iw_ah_create;
- dev->ibdev.destroy_ah = c4iw_ah_destroy;
- dev->ibdev.create_qp = c4iw_create_qp;
- dev->ibdev.modify_qp = c4iw_ib_modify_qp;
- dev->ibdev.query_qp = c4iw_ib_query_qp;
- dev->ibdev.destroy_qp = c4iw_destroy_qp;
- dev->ibdev.create_cq = c4iw_create_cq;
- dev->ibdev.destroy_cq = c4iw_destroy_cq;
- dev->ibdev.resize_cq = c4iw_resize_cq;
- dev->ibdev.poll_cq = c4iw_poll_cq;
- dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
- dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
- dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
- dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
- dev->ibdev.dereg_mr = c4iw_dereg_mr;
- dev->ibdev.alloc_mw = c4iw_alloc_mw;
- dev->ibdev.bind_mw = c4iw_bind_mw;
- dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
- dev->ibdev.alloc_mr = c4iw_alloc_mr;
- dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
- dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
- dev->ibdev.attach_mcast = c4iw_multicast_attach;
- dev->ibdev.detach_mcast = c4iw_multicast_detach;
- dev->ibdev.process_mad = c4iw_process_mad;
- dev->ibdev.req_notify_cq = c4iw_arm_cq;
- dev->ibdev.post_send = c4iw_post_send;
- dev->ibdev.post_recv = c4iw_post_receive;
- dev->ibdev.get_protocol_stats = c4iw_get_mib;
- dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
- dev->ibdev.get_port_immutable = c4iw_port_immutable;
-
- dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
- if (!dev->ibdev.iwcm)
- return -ENOMEM;
-
- dev->ibdev.iwcm->connect = c4iw_connect;
- dev->ibdev.iwcm->accept = c4iw_accept_cr;
- dev->ibdev.iwcm->reject = c4iw_reject_cr;
- dev->ibdev.iwcm->create_listen = c4iw_create_listen;
- dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
- dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
- dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
- dev->ibdev.iwcm->get_qp = c4iw_get_qp;
-
- ret = ib_register_device(&dev->ibdev, NULL);
+ dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev;
+
+ memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name,
+ sizeof(dev->ibdev.iw_ifname));
+
+ ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
+ ret = set_netdevs(&dev->ibdev, &dev->rdev);
+ if (ret)
+ goto err_dealloc_ctx;
+ dma_set_max_seg_size(&dev->rdev.lldi.pdev->dev, UINT_MAX);
+ ret = ib_register_device(&dev->ibdev, "cxgb4_%d",
+ &dev->rdev.lldi.pdev->dev);
if (ret)
- goto bail1;
+ goto err_dealloc_ctx;
+ return;
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
- if (ret)
- goto bail2;
- }
- return 0;
-bail2:
- ib_unregister_device(&dev->ibdev);
-bail1:
- kfree(dev->ibdev.iwcm);
- return ret;
+err_dealloc_ctx:
+ pr_err("%s - Failed registering iwarp device: %d\n",
+ pci_name(ctx->lldi.pdev), ret);
+ c4iw_dealloc(ctx);
+ return;
}
void c4iw_unregister_device(struct c4iw_dev *dev)
{
- int i;
-
- PDBG("%s c4iw_dev %p\n", __func__, dev);
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
+ pr_debug("c4iw_dev %p\n", dev);
ib_unregister_device(&dev->ibdev);
- kfree(dev->ibdev.iwcm);
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 6517e1208ccb..955f061a55e9 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -31,6 +31,7 @@
*/
#include <linux/module.h>
+#include <rdma/uverbs_ioctl.h>
#include "iw_cxgb4.h"
@@ -56,18 +57,18 @@ MODULE_PARM_DESC(db_coalescing_threshold,
static int max_fr_immd = T4_MAX_FR_IMMD;
module_param(max_fr_immd, int, 0644);
-MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
static int alloc_ird(struct c4iw_dev *dev, u32 ird)
{
int ret = 0;
- spin_lock_irq(&dev->lock);
+ xa_lock_irq(&dev->qps);
if (ird <= dev->avail_ird)
dev->avail_ird -= ird;
else
ret = -ENOMEM;
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
if (ret)
dev_warn(&dev->rdev.lldi.pdev->dev,
@@ -78,9 +79,9 @@ static int alloc_ird(struct c4iw_dev *dev, u32 ird)
static void free_ird(struct c4iw_dev *dev, int ird)
{
- spin_lock_irq(&dev->lock);
+ xa_lock_irq(&dev->qps);
dev->avail_ird += ird;
- spin_unlock_irq(&dev->lock);
+ xa_unlock_irq(&dev->qps);
}
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
@@ -99,7 +100,7 @@ static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
- pci_unmap_addr(sq, mapping));
+ dma_unmap_addr(sq, mapping));
}
static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
@@ -132,7 +133,7 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
if (!sq->queue)
return -ENOMEM;
sq->phys_addr = virt_to_phys(sq->queue);
- pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+ dma_unmap_addr_set(sq, mapping, sq->dma_addr);
return 0;
}
@@ -147,21 +148,24 @@ static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
}
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx, int has_rq)
{
/*
* uP clears EQ contexts when the connection exits rdma mode,
* so no need to post a RESET WR for these EQs.
*/
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, wq->rq.queue,
- dma_unmap_addr(&wq->rq, mapping));
dealloc_sq(rdev, &wq->sq);
- c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
- kfree(wq->rq.sw_rq);
kfree(wq->sq.sw_sq);
- c4iw_put_qpid(rdev, wq->rq.qid, uctx);
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+
+ if (has_rq) {
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize, wq->rq.queue,
+ dma_unmap_addr(&wq->rq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ kfree(wq->rq.sw_rq);
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ }
return 0;
}
@@ -185,18 +189,23 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+ if (is_t4(rdev->lldi.adapter_type))
+ return NULL;
+
return rdev->bar2_kva + bar2_qoffset;
}
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct t4_cq *rcq, struct t4_cq *scq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp,
+ int need_rq)
{
int user = (uctx != &rdev->uctx);
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
- struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
int ret = 0;
int eqsize;
@@ -205,36 +214,44 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (!wq->sq.qid)
return -ENOMEM;
- wq->rq.qid = c4iw_get_qpid(rdev, uctx);
- if (!wq->rq.qid) {
- ret = -ENOMEM;
- goto free_sq_qid;
+ if (need_rq) {
+ wq->rq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->rq.qid) {
+ ret = -ENOMEM;
+ goto free_sq_qid;
+ }
}
if (!user) {
- wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
- GFP_KERNEL);
+ wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
+ GFP_KERNEL);
if (!wq->sq.sw_sq) {
ret = -ENOMEM;
- goto free_rq_qid;
+ goto free_rq_qid;//FIXME
}
- wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
- GFP_KERNEL);
- if (!wq->rq.sw_rq) {
- ret = -ENOMEM;
- goto free_sw_sq;
+ if (need_rq) {
+ wq->rq.sw_rq = kcalloc(wq->rq.size,
+ sizeof(*wq->rq.sw_rq),
+ GFP_KERNEL);
+ if (!wq->rq.sw_rq) {
+ ret = -ENOMEM;
+ goto free_sw_sq;
+ }
}
}
- /*
- * RQT must be a power of 2 and at least 16 deep.
- */
- wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
- wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
- if (!wq->rq.rqt_hwaddr) {
- ret = -ENOMEM;
- goto free_sw_rq;
+ if (need_rq) {
+ /*
+ * RQT must be a power of 2 and at least 16 deep.
+ */
+ wq->rq.rqt_size =
+ roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
+ wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
+ if (!wq->rq.rqt_hwaddr) {
+ ret = -ENOMEM;
+ goto free_sw_rq;
+ }
}
ret = alloc_sq(rdev, &wq->sq, user);
@@ -243,36 +260,42 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
- wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, &(wq->rq.dma_addr),
- GFP_KERNEL);
- if (!wq->rq.queue) {
- ret = -ENOMEM;
- goto free_sq;
+ if (need_rq) {
+ wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize,
+ &wq->rq.dma_addr,
+ GFP_KERNEL);
+ if (!wq->rq.queue) {
+ ret = -ENOMEM;
+ goto free_sq;
+ }
+ pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ wq->sq.queue,
+ (unsigned long long)virt_to_phys(wq->sq.queue),
+ wq->rq.queue,
+ (unsigned long long)virt_to_phys(wq->rq.queue));
+ dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
}
- PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
- __func__, wq->sq.queue,
- (unsigned long long)virt_to_phys(wq->sq.queue),
- wq->rq.queue,
- (unsigned long long)virt_to_phys(wq->rq.queue));
- memset(wq->rq.queue, 0, wq->rq.memsize);
- dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
wq->db = rdev->lldi.db_reg;
- wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
+ wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
+ CXGB4_BAR2_QTYPE_EGRESS,
&wq->sq.bar2_qid,
user ? &wq->sq.bar2_pa : NULL);
- wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
- &wq->rq.bar2_qid,
- user ? &wq->rq.bar2_pa : NULL);
+ if (need_rq)
+ wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
+ CXGB4_BAR2_QTYPE_EGRESS,
+ &wq->rq.bar2_qid,
+ user ? &wq->rq.bar2_pa : NULL);
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
- pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
+ if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
+ pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
+ ret = -EINVAL;
goto free_dma;
}
@@ -280,8 +303,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->rq.msn = 1;
/* build fw_ri_res_wr */
- wr_len = sizeof *res_wr + 2 * sizeof *res;
-
+ wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
+ if (need_rq)
+ wr_len += sizeof(*res);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
@@ -289,14 +313,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- FW_RI_RES_WR_NRES_V(2) |
+ FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -317,72 +340,79 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
+ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
+ FW_RI_RES_WR_FBMAX_V(3)) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
- res++;
- res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
- res->u.sqrq.op = FW_RI_RES_OP_WRITE;
-
- /*
- * eqsize is the number of 64B entries plus the status page size.
- */
- eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
- rdev->hw_queue.t4_eq_status_entries;
- res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
- FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
- FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
- FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
- FW_RI_RES_WR_IQID_V(rcq->cqid));
- res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
- FW_RI_RES_WR_DCAEN_V(0) |
- FW_RI_RES_WR_DCACPU_V(0) |
- FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
- FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
- FW_RI_RES_WR_CIDXFTHRESH_V(0) |
- FW_RI_RES_WR_EQSIZE_V(eqsize));
- res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
- res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
- c4iw_init_wr_wait(&wr_wait);
+ if (need_rq) {
+ res++;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size
+ */
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
+ res->u.sqrq.fetchszm_to_iqid =
+ /* no host cidx updates */
+ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
+ /* don't keep in chip cache */
+ FW_RI_RES_WR_CPRIO_V(0) |
+ /* set by uP at ri_init time */
+ FW_RI_RES_WR_PCIECHN_V(0) |
+ FW_RI_RES_WR_IQID_V(rcq->cqid));
+ res->u.sqrq.dcaen_to_eqsize =
+ cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+ }
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- goto free_dma;
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
if (ret)
goto free_dma;
- PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
- __func__, wq->sq.qid, wq->rq.qid, wq->db,
- wq->sq.bar2_va, wq->rq.bar2_va);
+ pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
+ wq->sq.qid, wq->rq.qid, wq->db,
+ wq->sq.bar2_va, wq->rq.bar2_va);
return 0;
free_dma:
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, wq->rq.queue,
- dma_unmap_addr(&wq->rq, mapping));
+ if (need_rq)
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize, wq->rq.queue,
+ dma_unmap_addr(&wq->rq, mapping));
free_sq:
dealloc_sq(rdev, &wq->sq);
free_hwaddr:
- c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ if (need_rq)
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
- kfree(wq->rq.sw_rq);
+ if (need_rq)
+ kfree(wq->rq.sw_rq);
free_sw_sq:
kfree(wq->sq.sw_sq);
free_rq_qid:
- c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ if (need_rq)
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
return ret;
}
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
- struct ib_send_wr *wr, int max, u32 *plenp)
+ const struct ib_send_wr *wr, int max, u32 *plenp)
{
u8 *dstp, *srcp;
u32 plen = 0;
@@ -409,7 +439,7 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
rem -= len;
}
}
- len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
+ len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
if (len)
memset(dstp, 0, len);
immdp->op = FW_RI_DATA_IMMD;
@@ -427,7 +457,12 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
{
int i;
u32 plen = 0;
- __be64 *flitp = (__be64 *)isglp->sge;
+ __be64 *flitp;
+
+ if ((__be64 *)isglp == queue_end)
+ isglp = (struct fw_ri_isgl *)queue_start;
+
+ flitp = (__be64 *)isglp->sge;
for (i = 0; i < num_sge; i++) {
if ((plen + sg_list[i].length) < plen)
@@ -452,7 +487,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
}
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ const struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
@@ -493,7 +528,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
T4_MAX_SEND_INLINE, &plen);
if (ret)
return ret;
- size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
+ size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
plen;
} else {
ret = build_isgl((__be64 *)sq->queue,
@@ -502,7 +537,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
wr->sg_list, wr->num_sge, &plen);
if (ret)
return ret;
- size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
+ size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
} else {
@@ -510,7 +545,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
wqe->send.u.immd_src[0].r1 = 0;
wqe->send.u.immd_src[0].r2 = 0;
wqe->send.u.immd_src[0].immdlen = 0;
- size = sizeof wqe->send + sizeof(struct fw_ri_immd);
+ size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
@@ -519,7 +554,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
}
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ const struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
@@ -527,16 +562,24 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
- wqe->write.r2 = 0;
- wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
- wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
+
+ /*
+ * iWARP protocol supports 64 bit immediate data but rdma api
+ * limits it to 32bit.
+ */
+ if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
+ else
+ wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
+ wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
+ wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
if (wr->num_sge) {
if (wr->send_flags & IB_SEND_INLINE) {
ret = build_immd(sq, wqe->write.u.immd_src, wr,
T4_MAX_WRITE_INLINE, &plen);
if (ret)
return ret;
- size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
+ size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
plen;
} else {
ret = build_isgl((__be64 *)sq->queue,
@@ -545,7 +588,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
wr->sg_list, wr->num_sge, &plen);
if (ret)
return ret;
- size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
+ size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
} else {
@@ -553,7 +596,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
wqe->write.u.immd_src[0].r1 = 0;
wqe->write.u.immd_src[0].r2 = 0;
wqe->write.u.immd_src[0].immdlen = 0;
- size = sizeof wqe->write + sizeof(struct fw_ri_immd);
+ size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
@@ -561,15 +604,69 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
+ struct ib_send_wr *wr)
+{
+ memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
+ memset(immdp->r1, 0, 6);
+ immdp->op = FW_RI_DATA_IMMD;
+ immdp->immdlen = 16;
+}
+
+static void build_rdma_write_cmpl(struct t4_sq *sq,
+ struct fw_ri_rdma_write_cmpl_wr *wcwr,
+ const struct ib_send_wr *wr, u8 *len16)
+{
+ u32 plen;
+ int size;
+
+ /*
+ * This code assumes the struct fields preceding the write isgl
+ * fit in one 64B WR slot. This is because the WQE is built
+ * directly in the dma queue, and wrapping is only handled
+ * by the code buildling sgls. IE the "fixed part" of the wr
+ * structs must all fit in 64B. The WQE build code should probably be
+ * redesigned to avoid this restriction, but for now just add
+ * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
+ */
+ BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
+
+ wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
+ wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
+ if (wr->next->opcode == IB_WR_SEND)
+ wcwr->stag_inv = 0;
+ else
+ wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
+ wcwr->r2 = 0;
+ wcwr->r3 = 0;
+
+ /* SEND_INV SGL */
+ if (wr->next->send_flags & IB_SEND_INLINE)
+ build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
+ else
+ build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
+ &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
+
+ /* WRITE SGL */
+ build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
+ wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
+
+ size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ wcwr->plen = cpu_to_be32(plen);
+ *len16 = DIV_ROUND_UP(size, 16);
+}
+
+static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
+ u8 *len16)
{
if (wr->num_sge > 1)
return -EINVAL;
- if (wr->num_sge) {
- wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
- wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
+ if (wr->num_sge && wr->sg_list[0].length) {
+ wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
+ wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
>> 32));
- wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
+ wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
@@ -586,12 +683,81 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
}
wqe->read.r2 = 0;
wqe->read.r5 = 0;
- *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+ *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
return 0;
}
+static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
+{
+ bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
+ bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
+ struct t4_swsqe *swsqe;
+ union t4_wr *wqe;
+ u16 write_wrid;
+ u8 len16;
+ u16 idx;
+
+ /*
+ * The sw_sq entries still look like a WRITE and a SEND and consume
+ * 2 slots. The FW WR, however, will be a single uber-WR.
+ */
+ wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
+ qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
+ build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
+
+ /* WRITE swsqe */
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ swsqe->opcode = FW_RI_RDMA_WRITE;
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = write_signaled;
+ swsqe->flushed = 0;
+ swsqe->wr_id = wr->wr_id;
+ if (c4iw_wr_log) {
+ swsqe->sge_ts =
+ cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
+ swsqe->host_time = ktime_get();
+ }
+
+ write_wrid = qhp->wq.sq.pidx;
+
+ /* just bump the sw_sq */
+ qhp->wq.sq.in_use++;
+ if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
+ qhp->wq.sq.pidx = 0;
+
+ /* SEND_WITH_INV swsqe */
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ if (wr->next->opcode == IB_WR_SEND)
+ swsqe->opcode = FW_RI_SEND;
+ else
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = send_signaled;
+ swsqe->flushed = 0;
+ swsqe->wr_id = wr->next->wr_id;
+ if (c4iw_wr_log) {
+ swsqe->sge_ts =
+ cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
+ swsqe->host_time = ktime_get();
+ }
+
+ wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
+ wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
+
+ init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
+ write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
+ t4_sq_produce(&qhp->wq, len16);
+ idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+
+ t4_ring_sq_db(&qhp->wq, idx, wqe);
+}
+
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
- struct ib_recv_wr *wr, u8 *len16)
+ const struct ib_recv_wr *wr, u8 *len16)
{
int ret;
@@ -600,52 +766,92 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
if (ret)
return ret;
- *len16 = DIV_ROUND_UP(sizeof wqe->recv +
+ *len16 = DIV_ROUND_UP(
+ sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
+ return 0;
+}
+
+static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
+ u8 *len16)
+{
+ int ret;
+
+ ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
+ &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
+ if (ret)
+ return ret;
+ *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
wr->num_sge * sizeof(struct fw_ri_sge), 16);
return 0;
}
-static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16, u8 t5dev)
+static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
+ const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ u8 *len16)
{
+ __be64 *p = (__be64 *)fr->pbl;
+
+ fr->r2 = cpu_to_be32(0);
+ fr->stag = cpu_to_be32(mhp->ibmr.rkey);
+
+ fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
+ FW_RI_TPTE_STAGSTATE_V(1) |
+ FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
+ FW_RI_TPTE_PDID_V(mhp->attr.pdid));
+ fr->tpte.locread_to_qpid = cpu_to_be32(
+ FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
+ FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
+ FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
+ fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
+ PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
+ fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
+ fr->tpte.len_hi = cpu_to_be32(0);
+ fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
+ fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
+ fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
+
+ p[0] = cpu_to_be64((u64)mhp->mpl[0]);
+ p[1] = cpu_to_be64((u64)mhp->mpl[1]);
+
+ *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
+}
+static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
+ const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ u8 *len16, bool dsgl_supported)
+{
struct fw_ri_immd *imdp;
__be64 *p;
int i;
- int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+ int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
int rem;
- if (wr->wr.fast_reg.page_list_len >
- t4_max_fr_depth(use_dsgl))
+ if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
return -EINVAL;
wqe->fr.qpbinde_to_dcacpu = 0;
- wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
+ wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
wqe->fr.addr_type = FW_RI_VA_BASED_TO;
- wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
+ wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
wqe->fr.len_hi = 0;
- wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
- wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
- wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
- wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
+ wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
+ wqe->fr.stag = cpu_to_be32(wr->key);
+ wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
+ wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
0xffffffff);
- if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
- struct c4iw_fr_page_list *c4pl =
- to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
struct fw_ri_dsgl *sglp;
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
- cpu_to_be64((u64)
- wr->wr.fast_reg.page_list->page_list[i]);
- }
+ for (i = 0; i < mhp->mpl_len; i++)
+ mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
sglp->op = FW_RI_DATA_DSGL;
sglp->r1 = 0;
sglp->nsge = cpu_to_be16(1);
- sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
sglp->len0 = cpu_to_be32(pbllen);
*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
@@ -657,14 +863,12 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
imdp->immdlen = cpu_to_be32(pbllen);
p = (__be64 *)(imdp + 1);
rem = pbllen;
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- *p = cpu_to_be64(
- (u64)wr->wr.fast_reg.page_list->page_list[i]);
+ for (i = 0; i < mhp->mpl_len; i++) {
+ *p = cpu_to_be64((u64)mhp->mpl[i]);
rem -= sizeof(*p);
if (++p == (__be64 *)&sq->queue[sq->size])
p = (__be64 *)sq->queue;
}
- BUG_ON(rem < 0);
while (rem) {
*p = 0;
rem -= sizeof(*p);
@@ -677,26 +881,26 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
+static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
u8 *len16)
{
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0;
- *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
+ *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
return 0;
}
void c4iw_qp_add_ref(struct ib_qp *qp)
{
- PDBG("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+ pr_debug("ib_qp %p\n", qp);
+ refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
}
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
- PDBG("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
- wake_up(&(to_c4iw_qp(qp)->wait));
+ pr_debug("ib_qp %p\n", qp);
+ if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
+ complete(&to_c4iw_qp(qp)->qp_rel_comp);
}
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -709,17 +913,16 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
- spin_lock_irqsave(&qhp->rhp->lock, flags);
+ xa_lock_irqsave(&qhp->rhp->qps, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL)
- t4_ring_sq_db(&qhp->wq, inc,
- is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
+ t4_ring_sq_db(&qhp->wq, inc, NULL);
else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.sq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ xa_unlock_irqrestore(&qhp->rhp->qps, flags);
return 0;
}
@@ -727,28 +930,159 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
- spin_lock_irqsave(&qhp->rhp->lock, flags);
+ xa_lock_irqsave(&qhp->rhp->qps, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL)
- t4_ring_rq_db(&qhp->wq, inc,
- is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
+ t4_ring_rq_db(&qhp->wq, inc, NULL);
else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.rq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ xa_unlock_irqrestore(&qhp->rhp->qps, flags);
return 0;
}
-int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int ib_to_fw_opcode(int ib_opcode)
+{
+ int opcode;
+
+ switch (ib_opcode) {
+ case IB_WR_SEND_WITH_INV:
+ opcode = FW_RI_SEND_WITH_INV;
+ break;
+ case IB_WR_SEND:
+ opcode = FW_RI_SEND;
+ break;
+ case IB_WR_RDMA_WRITE:
+ opcode = FW_RI_RDMA_WRITE;
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ opcode = FW_RI_WRITE_IMMEDIATE;
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ opcode = FW_RI_READ_REQ;
+ break;
+ case IB_WR_REG_MR:
+ opcode = FW_RI_FAST_REGISTER;
+ break;
+ case IB_WR_LOCAL_INV:
+ opcode = FW_RI_LOCAL_INV;
+ break;
+ default:
+ opcode = -EINVAL;
+ }
+ return opcode;
+}
+
+static int complete_sq_drain_wr(struct c4iw_qp *qhp,
+ const struct ib_send_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *schp;
+ unsigned long flag;
+ struct t4_cq *cq;
+ int opcode;
+
+ schp = to_c4iw_cq(qhp->ibqp.send_cq);
+ cq = &schp->cq;
+
+ opcode = ib_to_fw_opcode(wr->opcode);
+ if (opcode < 0)
+ return opcode;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(opcode) |
+ CQE_TYPE_V(1) |
+ CQE_SWCQE_V(1) |
+ CQE_DRAIN_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&schp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&schp->lock, flag);
+
+ if (t4_clear_cq_armed(&schp->cq)) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
+ return 0;
+}
+
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ int ret = 0;
+
+ while (wr) {
+ ret = complete_sq_drain_wr(qhp, wr);
+ if (ret) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return ret;
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp,
+ const struct ib_recv_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *rchp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ cq = &rchp->cq;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(FW_RI_SEND) |
+ CQE_TYPE_V(0) |
+ CQE_SWCQE_V(1) |
+ CQE_DRAIN_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&rchp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+
+ if (t4_clear_cq_armed(&rchp->cq)) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
+}
+
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
+ const struct ib_recv_wr *wr)
+{
+ while (wr) {
+ complete_rq_drain_wr(qhp, wr);
+ wr = wr->next;
+ }
+}
+
+int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int err = 0;
u8 len16 = 0;
enum fw_wr_opcodes fw_opcode = 0;
enum fw_ri_wr_flags fw_flags;
struct c4iw_qp *qhp;
+ struct c4iw_dev *rhp;
union t4_wr *wqe = NULL;
u32 num_wrs;
struct t4_swsqe *swsqe;
@@ -756,16 +1090,49 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u16 idx = 0;
qhp = to_c4iw_qp(ibqp);
+ rhp = qhp->rhp;
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ err = complete_sq_drain_wrs(qhp, wr, bad_wr);
+ return err;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
+
+ /*
+ * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
+ * the response for small NVMEe-oF READ requests. If the chain is
+ * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
+ * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
+ * request, then build and post the write_cmpl WR. If any of the tests
+ * below are not true, then we continue on with the tradtional WRITE
+ * and SEND WRs.
+ */
+ if (qhp->rhp->rdev.lldi.write_cmpl_support &&
+ CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
+ CHELSIO_T5 &&
+ wr && wr->next && !wr->next->next &&
+ wr->opcode == IB_WR_RDMA_WRITE &&
+ wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
+ (wr->next->opcode == IB_WR_SEND ||
+ wr->next->opcode == IB_WR_SEND_WITH_INV) &&
+ wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
+ wr->next->num_sge == 1 && num_wrs >= 2) {
+ post_write_cmpl(qhp, wr);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return 0;
+ }
+
while (wr) {
if (num_wrs == 0) {
err = -ENOMEM;
@@ -793,6 +1160,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode = FW_RI_SEND_WITH_INV;
err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
+ err = -EINVAL;
+ break;
+ }
+ fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
+ fallthrough;
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
@@ -802,10 +1176,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ_WITH_INV:
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+ if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
+ c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
fw_flags = FW_RI_RDMA_READ_INVALIDATE;
- else
+ } else {
fw_flags = 0;
+ }
err = build_rdma_read(wqe, wr, &len16);
if (err)
break;
@@ -813,24 +1189,37 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!qhp->wq.sq.oldest_read)
qhp->wq.sq.oldest_read = swsqe;
break;
- case IB_WR_FAST_REG_MR:
- fw_opcode = FW_RI_FR_NSMR_WR;
+ case IB_WR_REG_MR: {
+ struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
+
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
- is_t5(
- qhp->rhp->rdev.lldi.adapter_type) ?
- 1 : 0);
+ if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
+ !mhp->attr.state && mhp->mpl_len <= 2) {
+ fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
+ build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
+ mhp, &len16);
+ } else {
+ fw_opcode = FW_RI_FR_NSMR_WR;
+ err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
+ mhp, &len16,
+ rhp->rdev.lldi.ulptx_memwrite_dsgl);
+ if (err)
+ break;
+ }
+ mhp->attr.state = 1;
break;
+ }
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
err = build_inv_stag(wqe, wr, &len16);
+ c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
break;
default:
- PDBG("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
+ pr_warn("%s post of type=%d TBD!\n", __func__,
+ wr->opcode);
err = -EINVAL;
}
if (err) {
@@ -845,23 +1234,22 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->wr_id = wr->wr_id;
if (c4iw_wr_log) {
swsqe->sge_ts = cxgb4_read_sge_timestamp(
- qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(&swsqe->host_ts);
+ rhp->rdev.lldi.ports[0]);
+ swsqe->host_time = ktime_get();
}
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
- PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
- __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
- swsqe->opcode, swsqe->read_len);
+ pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
+ (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
+ swsqe->opcode, swsqe->read_len);
wr = wr->next;
num_wrs--;
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
- if (!qhp->rhp->rdev.status_page->db_off) {
- t4_ring_sq_db(&qhp->wq, idx,
- is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
+ if (!rhp->rdev.status_page->db_off) {
+ t4_ring_sq_db(&qhp->wq, idx, wqe);
spin_unlock_irqrestore(&qhp->lock, flag);
} else {
spin_unlock_irqrestore(&qhp->lock, flag);
@@ -870,8 +1258,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return err;
}
-int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
struct c4iw_qp *qhp;
@@ -883,13 +1271,20 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ complete_rq_drain_wrs(qhp, wr);
+ return err;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
while (wr) {
@@ -915,8 +1310,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(
- &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
+ ktime_get();
}
wqe->recv.opcode = FW_RI_RECV_WR;
@@ -926,16 +1321,15 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16;
- PDBG("%s cookie 0x%llx pidx %u\n", __func__,
- (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
+ pr_debug("cookie 0x%llx pidx %u\n",
+ (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
wr = wr->next;
num_wrs--;
}
if (!qhp->rhp->rdev.status_page->db_off) {
- t4_ring_rq_db(&qhp->wq, idx,
- is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
+ t4_ring_rq_db(&qhp->wq, idx, wqe);
spin_unlock_irqrestore(&qhp->lock, flag);
} else {
spin_unlock_irqrestore(&qhp->lock, flag);
@@ -944,9 +1338,87 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return err;
}
-int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
+static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
+ u64 wr_id, u8 len16)
{
- return -ENOSYS;
+ struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
+
+ pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
+ __func__, srq->cidx, srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->ooo_count,
+ (unsigned long long)wr_id, srq->pending_cidx,
+ srq->pending_pidx, srq->pending_in_use);
+ pwr->wr_id = wr_id;
+ pwr->len16 = len16;
+ memcpy(&pwr->wqe, wqe, len16 * 16);
+ t4_srq_produce_pending_wr(srq);
+}
+
+int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ union t4_recv_wr *wqe, lwqe;
+ struct c4iw_srq *srq;
+ unsigned long flag;
+ u8 len16 = 0;
+ u16 idx = 0;
+ int err = 0;
+ u32 num_wrs;
+
+ srq = to_c4iw_srq(ibsrq);
+ spin_lock_irqsave(&srq->lock, flag);
+ num_wrs = t4_srq_avail(&srq->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&srq->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (wr->num_sge > T4_MAX_RECV_SGE) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &lwqe;
+ if (num_wrs)
+ err = build_srq_recv(wqe, wr, &len16);
+ else
+ err = -ENOMEM;
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe->recv.opcode = FW_RI_RECV_WR;
+ wqe->recv.r1 = 0;
+ wqe->recv.wrid = srq->wq.pidx;
+ wqe->recv.r2[0] = 0;
+ wqe->recv.r2[1] = 0;
+ wqe->recv.r2[2] = 0;
+ wqe->recv.len16 = len16;
+
+ if (srq->wq.ooo_count ||
+ srq->wq.pending_in_use ||
+ srq->wq.sw_rq[srq->wq.pidx].valid) {
+ defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
+ } else {
+ srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
+ srq->wq.sw_rq[srq->wq.pidx].valid = 1;
+ c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
+ pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
+ __func__, srq->wq.cidx,
+ srq->wq.pidx, srq->wq.wq_pidx,
+ srq->wq.in_use,
+ (unsigned long long)wr->wr_id);
+ t4_srq_produce(&srq->wq, len16);
+ idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+ }
+ wr = wr->next;
+ num_wrs--;
+ }
+ if (idx)
+ t4_ring_srq_db(&srq->wq, idx, len16, wqe);
+ spin_unlock_irqrestore(&srq->lock, flag);
+ return err;
}
static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
@@ -1092,23 +1564,23 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
struct sk_buff *skb;
struct terminate_message *term;
- PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- qhp->ep->hwtid);
+ pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
- skb = alloc_skb(sizeof *wqe, gfp);
- if (!skb)
+ skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
+ if (WARN_ON(!skb))
return;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(qhp->ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
- wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
+ wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
term = (struct terminate_message *)wqe->u.terminate.termmsg;
if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
term->layer_etype = qhp->attr.layer_etype;
@@ -1125,53 +1597,66 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp)
{
int count;
- int rq_flushed, sq_flushed;
+ int rq_flushed = 0, sq_flushed;
unsigned long flag;
+ struct ib_event ev;
- PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+ pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
- /* locking hierarchy: cq lock first, then qp lock. */
+ /* locking hierarchy: cqs lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag);
+ if (schp != rchp)
+ spin_lock(&schp->lock);
spin_lock(&qhp->lock);
+ if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
+ qhp->ibqp.event_handler) {
+ ev.device = qhp->ibqp.device;
+ ev.element.qp = &qhp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
+ }
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
+ if (schp != rchp)
+ spin_unlock(&schp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
return;
}
qhp->wq.flushed = 1;
+ t4_set_wq_in_error(&qhp->wq, 0);
- c4iw_flush_hw_cq(rchp);
- c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
- rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&rchp->lock, flag);
+ c4iw_flush_hw_cq(rchp, qhp);
+ if (!qhp->srq) {
+ c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+ rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ }
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&schp->lock, flag);
- spin_lock(&qhp->lock);
if (schp != rchp)
- c4iw_flush_hw_cq(schp);
+ c4iw_flush_hw_cq(schp, qhp);
sq_flushed = c4iw_flush_sq(qhp);
+
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&schp->lock, flag);
+ if (schp != rchp)
+ spin_unlock(&schp->lock);
+ spin_unlock_irqrestore(&rchp->lock, flag);
if (schp == rchp) {
- if (t4_clear_cq_armed(&rchp->cq) &&
- (rq_flushed || sq_flushed)) {
+ if ((rq_flushed || sq_flushed) &&
+ t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
} else {
- if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+ if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
- if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+ if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
@@ -1188,8 +1673,14 @@ static void flush_qp(struct c4iw_qp *qhp)
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
schp = to_c4iw_cq(qhp->ibqp.send_cq);
- t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) {
+
+ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+ if (qhp->wq.flushed)
+ return;
+
+ qhp->wq.flushed = 1;
+ t4_set_wq_in_error(&qhp->wq, 0);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
@@ -1213,49 +1704,44 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int ret;
struct sk_buff *skb;
- PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- ep->hwtid);
+ pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
- skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
- if (!skb)
+ skb = skb_dequeue(&ep->com.ep_skb_list);
+ if (WARN_ON(!skb))
return -ENOMEM;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
FW_WR_COMPL_F);
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (uintptr_t)&ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)ep->com.wr_waitp;
wqe->u.fini.type = FW_RI_TYPE_FINI;
- ret = c4iw_ofld_send(&rhp->rdev, skb);
- if (ret)
- goto out;
- ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
- qhp->wq.sq.qid, __func__);
-out:
- PDBG("%s ret %d\n", __func__, ret);
+ ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+
+ pr_debug("ret %d\n", ret);
return ret;
}
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{
- PDBG("%s p2p_type = %d\n", __func__, p2p_type);
- memset(&init->u, 0, sizeof init->u);
+ pr_debug("p2p_type = %d\n", p2p_type);
+ memset(&init->u, 0, sizeof(init->u));
switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
init->u.write.stag_sink = cpu_to_be32(1);
init->u.write.to_sink = cpu_to_be64(1);
init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
- init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
- sizeof(struct fw_ri_immd),
- 16);
+ init->u.write.len16 = DIV_ROUND_UP(
+ sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
break;
case FW_RI_INIT_P2PTYPE_READ_REQ:
init->u.write.opcode = FW_RI_RDMA_READ_WR;
@@ -1263,7 +1749,7 @@ static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
init->u.read.to_src_lo = cpu_to_be32(1);
init->u.read.stag_sink = cpu_to_be32(1);
init->u.read.to_sink_lo = cpu_to_be32(1);
- init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
+ init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
break;
}
}
@@ -1274,10 +1760,10 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int ret;
struct sk_buff *skb;
- PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
- qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
+ pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
+ qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
- skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
+ skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto out;
@@ -1290,8 +1776,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
}
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
FW_WR_COMPL_F);
@@ -1299,7 +1784,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_FLOWID_V(qhp->ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
@@ -1323,31 +1808,32 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
- wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ if (qhp->srq) {
+ wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
+ qhp->srq->idx);
+ } else {
+ wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
+ wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
+ rhp->rdev.lldi.vr->rq.start);
+ }
wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
- wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
- wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
- rhp->rdev.lldi.vr->rq.start);
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
- ret = c4iw_ofld_send(&rhp->rdev, skb);
- if (ret)
- goto err1;
-
- ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
- qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+ ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
if (!ret)
goto out;
-err1:
+
free_ird(rhp, qhp->attr.max_ird);
out:
- PDBG("%s ret %d\n", __func__, ret);
+ pr_debug("ret %d\n", ret);
return ret;
}
@@ -1364,9 +1850,9 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int free = 0;
struct c4iw_ep *ep = NULL;
- PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
- qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
- (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
+ pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
+ qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
+ (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
mutex_lock(&qhp->mutex);
@@ -1453,8 +1939,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_RTS:
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
- BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
if (!internal) {
@@ -1467,13 +1952,13 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
goto err;
break;
case C4IW_QP_STATE_TERMINATE:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_TERMINATE);
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
if (!internal) {
- c4iw_get_ep(&qhp->ep->com);
+ c4iw_get_ep(&ep->com);
terminate = 1;
disconnect = 1;
} else {
@@ -1484,10 +1969,9 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break;
case C4IW_QP_STATE_ERROR:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_ERROR);
if (!internal) {
- abort = 1;
disconnect = 1;
ep = qhp->ep;
c4iw_get_ep(&qhp->ep->com);
@@ -1500,7 +1984,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break;
case C4IW_QP_STATE_CLOSING:
- if (!internal) {
+
+ /*
+ * Allow kernel users to move to ERROR for qp draining.
+ */
+ if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+ C4IW_QP_STATE_ERROR)) {
ret = -EINVAL;
goto out;
}
@@ -1539,16 +2028,15 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
goto err;
break;
default:
- printk(KERN_ERR "%s in a bad state %d\n",
- __func__, qhp->attr.state);
+ pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
ret = -EINVAL;
goto err;
break;
}
goto out;
err:
- PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
- qhp->wq.sq.qid);
+ pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
+ qhp->wq.sq.qid);
/* disassociate the LLP connection */
qhp->attr.llp_stream_handle = NULL;
@@ -1558,7 +2046,6 @@ err:
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
abort = 1;
- BUG_ON(!ep);
flush_qp(qhp);
wake_up(&qhp->wait);
out:
@@ -1584,19 +2071,20 @@ out:
*/
if (free)
c4iw_put_ep(&ep->com);
- PDBG("%s exit state %d\n", __func__, qhp->attr.state);
+ pr_debug("exit state %d\n", qhp->attr.state);
return ret;
}
-int c4iw_destroy_qp(struct ib_qp *ib_qp)
+int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_qp *qhp;
- struct c4iw_qp_attributes attrs;
struct c4iw_ucontext *ucontext;
+ struct c4iw_qp_attributes attrs;
qhp = to_c4iw_qp(ib_qp);
rhp = qhp->rhp;
+ ucontext = qhp->ucontext;
attrs.next_state = C4IW_QP_STATE_ERROR;
if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
@@ -1605,91 +2093,99 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
- remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
-
- spin_lock_irq(&rhp->lock);
+ xa_lock_irq(&rhp->qps);
+ __xa_erase(&rhp->qps, qhp->wq.sq.qid);
if (!list_empty(&qhp->db_fc_entry))
list_del_init(&qhp->db_fc_entry);
- spin_unlock_irq(&rhp->lock);
+ xa_unlock_irq(&rhp->qps);
free_ird(rhp, qhp->attr.max_ird);
- ucontext = ib_qp->uobject ?
- to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
+ c4iw_qp_rem_ref(ib_qp);
+
+ wait_for_completion(&qhp->qp_rel_comp);
+
+ pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
+ pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
+
destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
- PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
- kfree(qhp);
+ c4iw_put_wr_wait(qhp->wr_waitp);
return 0;
}
-struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
+int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
{
+ struct ib_pd *pd = qp->pd;
struct c4iw_dev *rhp;
- struct c4iw_qp *qhp;
+ struct c4iw_qp *qhp = to_c4iw_qp(qp);
struct c4iw_pd *php;
struct c4iw_cq *schp;
struct c4iw_cq *rchp;
struct c4iw_create_qp_resp uresp;
- unsigned int sqsize, rqsize;
- struct c4iw_ucontext *ucontext;
+ unsigned int sqsize, rqsize = 0;
+ struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct c4iw_ucontext, ibucontext);
int ret;
- struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
+ struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
+ struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
- PDBG("%s ib_pd %p\n", __func__, pd);
-
- if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
+ if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
+ return -EOPNOTSUPP;
php = to_c4iw_pd(pd);
rhp = php->rhp;
schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
if (!schp || !rchp)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
- return ERR_PTR(-E2BIG);
- rqsize = attrs->cap.max_recv_wr + 1;
- if (rqsize < 8)
- rqsize = 8;
+ if (!attrs->srq) {
+ if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
+ return -E2BIG;
+ rqsize = attrs->cap.max_recv_wr + 1;
+ if (rqsize < 8)
+ rqsize = 8;
+ }
if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
sqsize = attrs->cap.max_send_wr + 1;
if (sqsize < 8)
sqsize = 8;
- ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+ qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!qhp->wr_waitp)
+ return -ENOMEM;
- qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
- if (!qhp)
- return ERR_PTR(-ENOMEM);
qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize =
(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
qhp->wq.sq.flush_cidx = -1;
- qhp->wq.rq.size = rqsize;
- qhp->wq.rq.memsize =
- (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
- sizeof(*qhp->wq.rq.queue);
+ if (!attrs->srq) {
+ qhp->wq.rq.size = rqsize;
+ qhp->wq.rq.memsize =
+ (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*qhp->wq.rq.queue);
+ }
if (ucontext) {
qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
- qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
+ if (!attrs->srq)
+ qhp->wq.rq.memsize =
+ roundup(qhp->wq.rq.memsize, PAGE_SIZE);
}
ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ qhp->wr_waitp, !attrs->srq);
if (ret)
- goto err1;
+ goto err_free_wr_wait;
attrs->cap.max_recv_wr = rqsize - 1;
attrs->cap.max_send_wr = sqsize - 1;
@@ -1700,10 +2196,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ if (!attrs->srq) {
+ qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
+ qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ }
qhp->attr.state = C4IW_QP_STATE_IDLE;
qhp->attr.next_state = C4IW_QP_STATE_IDLE;
qhp->attr.enable_rdma_read = 1;
@@ -1715,119 +2213,171 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_lock_init(&qhp->lock);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
+ init_completion(&qhp->qp_rel_comp);
+ refcount_set(&qhp->qp_refcnt, 1);
- ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+ ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
if (ret)
- goto err2;
+ goto err_destroy_qp;
- if (udata) {
- mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
- if (!mm1) {
+ if (udata && ucontext) {
+ sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
+ if (!sq_key_mm) {
ret = -ENOMEM;
- goto err3;
+ goto err_remove_handle;
}
- mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
- if (!mm2) {
- ret = -ENOMEM;
- goto err4;
+ if (!attrs->srq) {
+ rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
+ if (!rq_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_sq_key;
+ }
}
- mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
- if (!mm3) {
+ sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
+ if (!sq_db_key_mm) {
ret = -ENOMEM;
- goto err5;
+ goto err_free_rq_key;
}
- mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
- if (!mm4) {
- ret = -ENOMEM;
- goto err6;
+ if (!attrs->srq) {
+ rq_db_key_mm =
+ kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
+ if (!rq_db_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_sq_db_key;
+ }
}
+ memset(&uresp, 0, sizeof(uresp));
if (t4_sq_onchip(&qhp->wq.sq)) {
- mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
- if (!mm5) {
+ ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
+ GFP_KERNEL);
+ if (!ma_sync_key_mm) {
ret = -ENOMEM;
- goto err7;
+ goto err_free_rq_db_key;
}
uresp.flags = C4IW_QPF_ONCHIP;
- } else
- uresp.flags = 0;
+ }
+ if (rhp->rdev.lldi.write_w_imm_support)
+ uresp.flags |= C4IW_QPF_WRITE_W_IMM;
uresp.qid_mask = rhp->rdev.qpmask;
uresp.sqid = qhp->wq.sq.qid;
uresp.sq_size = qhp->wq.sq.size;
uresp.sq_memsize = qhp->wq.sq.memsize;
- uresp.rqid = qhp->wq.rq.qid;
- uresp.rq_size = qhp->wq.rq.size;
- uresp.rq_memsize = qhp->wq.rq.memsize;
+ if (!attrs->srq) {
+ uresp.rqid = qhp->wq.rq.qid;
+ uresp.rq_size = qhp->wq.rq.size;
+ uresp.rq_memsize = qhp->wq.rq.memsize;
+ }
spin_lock(&ucontext->mmap_lock);
- if (mm5) {
+ if (ma_sync_key_mm) {
uresp.ma_sync_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- } else {
- uresp.ma_sync_key = 0;
}
uresp.sq_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- uresp.rq_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
+ if (!attrs->srq) {
+ uresp.rq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
uresp.sq_db_gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- uresp.rq_db_gts_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
+ if (!attrs->srq) {
+ uresp.rq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
spin_unlock(&ucontext->mmap_lock);
- ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (ret)
- goto err8;
- mm1->key = uresp.sq_key;
- mm1->addr = qhp->wq.sq.phys_addr;
- mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.rq_key;
- mm2->addr = virt_to_phys(qhp->wq.rq.queue);
- mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
- insert_mmap(ucontext, mm2);
- mm3->key = uresp.sq_db_gts_key;
- mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa;
- mm3->len = PAGE_SIZE;
- insert_mmap(ucontext, mm3);
- mm4->key = uresp.rq_db_gts_key;
- mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa;
- mm4->len = PAGE_SIZE;
- insert_mmap(ucontext, mm4);
- if (mm5) {
- mm5->key = uresp.ma_sync_key;
- mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
- + PCIE_MA_SYNC_A) & PAGE_MASK;
- mm5->len = PAGE_SIZE;
- insert_mmap(ucontext, mm5);
+ goto err_free_ma_sync_key;
+ sq_key_mm->key = uresp.sq_key;
+ sq_key_mm->addr = 0;
+ sq_key_mm->vaddr = qhp->wq.sq.queue;
+ sq_key_mm->dma_addr = qhp->wq.sq.dma_addr;
+ sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_flag_to_mmap(&rhp->rdev, sq_key_mm, sq_key_mm->addr);
+ insert_mmap(ucontext, sq_key_mm);
+ if (!attrs->srq) {
+ rq_key_mm->key = uresp.rq_key;
+ rq_key_mm->addr = 0;
+ rq_key_mm->vaddr = qhp->wq.rq.queue;
+ rq_key_mm->dma_addr = qhp->wq.rq.dma_addr;
+ rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_flag_to_mmap(&rhp->rdev, rq_key_mm,
+ rq_key_mm->addr);
+ insert_mmap(ucontext, rq_key_mm);
+ }
+ sq_db_key_mm->key = uresp.sq_db_gts_key;
+ sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
+ sq_db_key_mm->vaddr = NULL;
+ sq_db_key_mm->dma_addr = 0;
+ sq_db_key_mm->len = PAGE_SIZE;
+ insert_flag_to_mmap(&rhp->rdev, sq_db_key_mm,
+ sq_db_key_mm->addr);
+ insert_mmap(ucontext, sq_db_key_mm);
+ if (!attrs->srq) {
+ rq_db_key_mm->key = uresp.rq_db_gts_key;
+ rq_db_key_mm->addr =
+ (u64)(unsigned long)qhp->wq.rq.bar2_pa;
+ rq_db_key_mm->len = PAGE_SIZE;
+ rq_db_key_mm->vaddr = NULL;
+ rq_db_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, rq_db_key_mm,
+ rq_db_key_mm->addr);
+ insert_mmap(ucontext, rq_db_key_mm);
}
+ if (ma_sync_key_mm) {
+ ma_sync_key_mm->key = uresp.ma_sync_key;
+ ma_sync_key_mm->addr =
+ (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
+ PCIE_MA_SYNC_A) & PAGE_MASK;
+ ma_sync_key_mm->len = PAGE_SIZE;
+ ma_sync_key_mm->vaddr = NULL;
+ ma_sync_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, ma_sync_key_mm,
+ ma_sync_key_mm->addr);
+ insert_mmap(ucontext, ma_sync_key_mm);
+ }
+
+ qhp->ucontext = ucontext;
+ }
+ if (!attrs->srq) {
+ qhp->wq.qp_errp =
+ &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
+ } else {
+ qhp->wq.qp_errp =
+ &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
+ qhp->wq.srqidxp =
+ &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
}
+
qhp->ibqp.qp_num = qhp->wq.sq.qid;
- init_timer(&(qhp->timer));
+ if (attrs->srq)
+ qhp->srq = to_c4iw_srq(attrs->srq);
INIT_LIST_HEAD(&qhp->db_fc_entry);
- PDBG("%s sq id %u size %u memsize %zu num_entries %u "
- "rq id %u size %u memsize %zu num_entries %u\n", __func__,
- qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
- attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
- qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
- return &qhp->ibqp;
-err8:
- kfree(mm5);
-err7:
- kfree(mm4);
-err6:
- kfree(mm3);
-err5:
- kfree(mm2);
-err4:
- kfree(mm1);
-err3:
- remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
-err2:
+ pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
+ qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
+ attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
+ qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
+ return 0;
+err_free_ma_sync_key:
+ kfree(ma_sync_key_mm);
+err_free_rq_db_key:
+ if (!attrs->srq)
+ kfree(rq_db_key_mm);
+err_free_sq_db_key:
+ kfree(sq_db_key_mm);
+err_free_rq_key:
+ if (!attrs->srq)
+ kfree(rq_key_mm);
+err_free_sq_key:
+ kfree(sq_key_mm);
+err_remove_handle:
+ xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
+err_destroy_qp:
destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-err1:
- kfree(qhp);
- return ERR_PTR(ret);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
+err_free_wr_wait:
+ c4iw_put_wr_wait(qhp->wr_waitp);
+ return ret;
}
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1836,9 +2386,12 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct c4iw_dev *rhp;
struct c4iw_qp *qhp;
enum c4iw_qp_attr_mask mask = 0;
- struct c4iw_qp_attributes attrs;
+ struct c4iw_qp_attributes attrs = {};
+
+ pr_debug("ib_qp %p\n", ibqp);
- PDBG("%s ib_qp %p\n", __func__, ibqp);
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
/* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -1848,7 +2401,6 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (!attr_mask)
return 0;
- memset(&attrs, 0, sizeof attrs);
qhp = to_c4iw_qp(ibqp);
rhp = qhp->rhp;
@@ -1875,7 +2427,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
- if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+ if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
(mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
return -EINVAL;
@@ -1884,23 +2436,412 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
{
- PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+ pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
}
+void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
+{
+ struct ib_event event = {};
+
+ event.device = &srq->rhp->ibdev;
+ event.element.srq = &srq->ibsrq;
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ ib_dispatch_event(&event);
+}
+
+int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
+ int ret = 0;
+
+ /*
+ * XXX 0 mask == a SW interrupt for srq_limit reached...
+ */
+ if (udata && !srq_attr_mask) {
+ c4iw_dispatch_srq_limit_reached_event(srq);
+ goto out;
+ }
+
+ /* no support for this yet */
+ if (srq_attr_mask & IB_SRQ_MAX_WR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
+ srq->armed = true;
+ srq->srq_limit = attr->srq_limit;
+ }
+out:
+ return ret;
+}
+
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr)
{
struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
- memset(attr, 0, sizeof *attr);
- memset(init_attr, 0, sizeof *init_attr);
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
attr->qp_state = to_ib_qp_state(qhp->attr.state);
+ attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
- init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
+ init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+ return 0;
+}
+
+static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
+{
+ struct c4iw_rdev *rdev = &srq->rhp->rdev;
+ struct sk_buff *skb = srq->destroy_skb;
+ struct t4_srq *wq = &srq->wq;
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+
+ wr_len = sizeof(*res_wr) + sizeof(*res);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
+ FW_RI_RES_WR_NRES_V(1) |
+ FW_WR_COMPL_F);
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (uintptr_t)wr_waitp;
+ res = res_wr->res;
+ res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
+ res->u.srq.op = FW_RI_RES_OP_RESET;
+ res->u.srq.srqid = cpu_to_be32(srq->idx);
+ res->u.srq.eqid = cpu_to_be32(wq->qid);
+
+ c4iw_init_wr_wait(wr_waitp);
+ c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, wq->queue,
+ dma_unmap_addr(wq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
+ kfree(wq->sw_rq);
+ c4iw_put_qpid(rdev, wq->qid, uctx);
+}
+
+static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
+{
+ struct c4iw_rdev *rdev = &srq->rhp->rdev;
+ int user = (uctx != &rdev->uctx);
+ struct t4_srq *wq = &srq->wq;
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ struct sk_buff *skb;
+ int wr_len;
+ int eqsize;
+ int ret = -ENOMEM;
+
+ wq->qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->qid)
+ goto err;
+
+ if (!user) {
+ wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
+ GFP_KERNEL);
+ if (!wq->sw_rq)
+ goto err_put_qpid;
+ wq->pending_wrs = kcalloc(srq->wq.size,
+ sizeof(*srq->wq.pending_wrs),
+ GFP_KERNEL);
+ if (!wq->pending_wrs)
+ goto err_free_sw_rq;
+ }
+
+ wq->rqt_size = wq->size;
+ wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
+ if (!wq->rqt_hwaddr)
+ goto err_free_pending_wrs;
+ wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
+ T4_RQT_ENTRY_SHIFT;
+
+ wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
+ &wq->dma_addr, GFP_KERNEL);
+ if (!wq->queue)
+ goto err_free_rqtpool;
+
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
+
+ wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
+ &wq->bar2_qid,
+ user ? &wq->bar2_pa : NULL);
+
+ /*
+ * User mode must have bar2 access.
+ */
+
+ if (user && !wq->bar2_va) {
+ pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
+ pci_name(rdev->lldi.pdev), wq->qid);
+ ret = -EINVAL;
+ goto err_free_queue;
+ }
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof(*res_wr) + sizeof(*res);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!skb)
+ goto err_free_queue;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
+ FW_RI_RES_WR_NRES_V(1) |
+ FW_WR_COMPL_F);
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (uintptr_t)wr_waitp;
+ res = res_wr->res;
+ res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
+ res->u.srq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->size * T4_RQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
+ res->u.srq.eqid = cpu_to_be32(wq->qid);
+ res->u.srq.fetchszm_to_iqid =
+ /* no host cidx updates */
+ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
+ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
+ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
+ FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
+ res->u.srq.dcaen_to_eqsize =
+ cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+ res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
+ res->u.srq.srqid = cpu_to_be32(srq->idx);
+ res->u.srq.pdid = cpu_to_be32(srq->pdid);
+ res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
+ res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
+ rdev->lldi.vr->rq.start);
+
+ c4iw_init_wr_wait(wr_waitp);
+
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
+ if (ret)
+ goto err_free_queue;
+
+ pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
+ " bar2_addr %p rqt addr 0x%x size %d\n",
+ __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
+ (u64)virt_to_phys(wq->queue), wq->bar2_va,
+ wq->rqt_hwaddr, wq->rqt_size);
+
+ return 0;
+err_free_queue:
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, wq->queue,
+ dma_unmap_addr(wq, mapping));
+err_free_rqtpool:
+ c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
+err_free_pending_wrs:
+ if (!user)
+ kfree(wq->pending_wrs);
+err_free_sw_rq:
+ if (!user)
+ kfree(wq->sw_rq);
+err_put_qpid:
+ c4iw_put_qpid(rdev, wq->qid, uctx);
+err:
+ return ret;
+}
+
+void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
+{
+ u64 *src, *dst;
+
+ src = (u64 *)wqe;
+ dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
+ while (len16) {
+ *dst++ = *src++;
+ if (dst >= (u64 *)&srq->queue[srq->size])
+ dst = (u64 *)srq->queue;
+ *dst++ = *src++;
+ if (dst >= (u64 *)&srq->queue[srq->size])
+ dst = (u64 *)srq->queue;
+ len16--;
+ }
+}
+
+int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct ib_pd *pd = ib_srq->pd;
+ struct c4iw_dev *rhp;
+ struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
+ struct c4iw_pd *php;
+ struct c4iw_create_srq_resp uresp;
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
+ int rqsize;
+ int ret;
+ int wr_len;
+
+ if (attrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ pr_debug("%s ib_pd %p\n", __func__, pd);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ if (!rhp->rdev.lldi.vr->srq.size)
+ return -EINVAL;
+ if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
+ return -E2BIG;
+ if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
+ return -E2BIG;
+
+ /*
+ * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
+ */
+ rqsize = attrs->attr.max_wr + 1;
+ rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
+
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
+ ibucontext);
+
+ srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!srq->wr_waitp)
+ return -ENOMEM;
+
+ srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
+ if (srq->idx < 0) {
+ ret = -ENOMEM;
+ goto err_free_wr_wait;
+ }
+
+ wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
+ srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!srq->destroy_skb) {
+ ret = -ENOMEM;
+ goto err_free_srq_idx;
+ }
+
+ srq->rhp = rhp;
+ srq->pdid = php->pdid;
+
+ srq->wq.size = rqsize;
+ srq->wq.memsize =
+ (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*srq->wq.queue);
+ if (ucontext)
+ srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
+
+ ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
+ &rhp->rdev.uctx, srq->wr_waitp);
+ if (ret)
+ goto err_free_skb;
+ attrs->attr.max_wr = rqsize - 1;
+
+ if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
+ srq->flags = T4_SRQ_LIMIT_SUPPORT;
+
+ if (udata) {
+ srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
+ if (!srq_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_queue;
+ }
+ srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
+ if (!srq_db_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_srq_key_mm;
+ }
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.flags = srq->flags;
+ uresp.qid_mask = rhp->rdev.qpmask;
+ uresp.srqid = srq->wq.qid;
+ uresp.srq_size = srq->wq.size;
+ uresp.srq_memsize = srq->wq.memsize;
+ uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.srq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.srq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_free_srq_db_key_mm;
+ srq_key_mm->key = uresp.srq_key;
+ srq_key_mm->addr = 0;
+ srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
+ srq_key_mm->vaddr = srq->wq.queue;
+ srq_key_mm->dma_addr = srq->wq.dma_addr;
+ insert_flag_to_mmap(&rhp->rdev, srq_key_mm, srq_key_mm->addr);
+ insert_mmap(ucontext, srq_key_mm);
+ srq_db_key_mm->key = uresp.srq_db_gts_key;
+ srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
+ srq_db_key_mm->len = PAGE_SIZE;
+ srq_db_key_mm->vaddr = NULL;
+ srq_db_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, srq_db_key_mm,
+ srq_db_key_mm->addr);
+ insert_mmap(ucontext, srq_db_key_mm);
+ }
+
+ pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
+ __func__, srq->wq.qid, srq->idx, srq->wq.size,
+ (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
+
+ spin_lock_init(&srq->lock);
+ return 0;
+
+err_free_srq_db_key_mm:
+ kfree(srq_db_key_mm);
+err_free_srq_key_mm:
+ kfree(srq_key_mm);
+err_free_queue:
+ free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ srq->wr_waitp);
+err_free_skb:
+ kfree_skb(srq->destroy_skb);
+err_free_srq_idx:
+ c4iw_free_srq_idx(&rhp->rdev, srq->idx);
+err_free_wr_wait:
+ c4iw_put_wr_wait(srq->wr_waitp);
+ return ret;
+}
+
+int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_srq *srq;
+ struct c4iw_ucontext *ucontext;
+
+ srq = to_c4iw_srq(ibsrq);
+ rhp = srq->rhp;
+
+ pr_debug("%s id %d\n", __func__, srq->wq.qid);
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
+ ibucontext);
+ free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ srq->wr_waitp);
+ c4iw_free_srq_idx(&rhp->rdev, srq->idx);
+ c4iw_put_wr_wait(srq->wr_waitp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 67df71a7012e..e800e8e8bed5 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -53,7 +53,8 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
+ u32 nr_pdid, u32 nr_srqt)
{
int err = 0;
err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
@@ -67,7 +68,17 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
nr_pdid, 1, 0);
if (err)
goto pdid_err;
+ if (!nr_srqt)
+ err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
+ 1, 1, 0);
+ else
+ err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
+ nr_srqt, 0, 0);
+ if (err)
+ goto srq_err;
return 0;
+ srq_err:
+ c4iw_id_table_free(&rdev->resource.pdid_table);
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
@@ -90,7 +101,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{
- PDBG("%s entry 0x%x\n", __func__, entry);
+ pr_debug("entry 0x%x\n", entry);
c4iw_id_free(id_table, entry);
}
@@ -115,7 +126,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) {
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
@@ -126,13 +137,13 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
* now put the same ids on the qp list since they all
* map to the same db/gts page.
*/
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->qpids);
for (i = qid+1; i & rdev->qpmask; i++) {
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
@@ -141,7 +152,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -154,10 +165,10 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
{
struct c4iw_qid_list *entry;
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return;
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids);
@@ -189,7 +200,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) {
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
@@ -200,13 +211,13 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
* now put the same ids on the cq list since they all
* map to the same db/gts page.
*/
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->cqids);
- for (i = qid; i & rdev->qpmask; i++) {
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ for (i = qid + 1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
@@ -215,7 +226,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -228,10 +239,10 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
{
struct c4iw_qid_list *entry;
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return;
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
@@ -254,25 +265,36 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
rdev->stats.pbl.max = rdev->stats.pbl.cur;
+ kref_get(&rdev->pbl_kref);
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_pblpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+ gen_pool_destroy(rdev->pbl_pool);
+ complete(&rdev->pbl_compl);
+}
+
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -290,19 +312,17 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
- PDBG("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("failed to add PBL chunk (%x/%x)\n",
+ pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
- printk(KERN_WARNING MOD
- "Failed to add all PBL chunks (%x/%x)\n",
- pbl_start,
- pbl_top - pbl_start);
+ pr_warn("Failed to add all PBL chunks (%x/%x)\n",
+ pbl_start, pbl_top - pbl_start);
return 0;
}
pbl_chunk >>= 1;
} else {
- PDBG("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("added PBL chunk (%x/%x)\n",
+ pbl_start, pbl_chunk);
pbl_start += pbl_chunk;
}
}
@@ -312,7 +332,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->pbl_pool);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
/*
@@ -324,57 +344,75 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
if (!addr)
- pr_warn_ratelimited(MOD "%s: Out of RQT memory\n",
+ pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
rdev->stats.rqt.max = rdev->stats.rqt.cur;
+ kref_get(&rdev->rqt_kref);
} else
rdev->stats.rqt.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_rqtpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+ gen_pool_destroy(rdev->rqt_pool);
+ complete(&rdev->rqt_compl);
+}
+
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+ pr_debug("addr 0x%x size %d\n", addr, size << 6);
mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
{
unsigned rqt_start, rqt_chunk, rqt_top;
+ int skip = 0;
rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
if (!rdev->rqt_pool)
return -ENOMEM;
- rqt_start = rdev->lldi.vr->rq.start;
- rqt_chunk = rdev->lldi.vr->rq.size;
+ /*
+ * If SRQs are supported, then never use the first RQE from
+ * the RQT region. This is because HW uses RQT index 0 as NULL.
+ */
+ if (rdev->lldi.vr->srq.size)
+ skip = T4_RQT_ENTRY_SIZE;
+
+ rqt_start = rdev->lldi.vr->rq.start + skip;
+ rqt_chunk = rdev->lldi.vr->rq.size - skip;
rqt_top = rqt_start + rqt_chunk;
while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
- PDBG("%s failed to add RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("failed to add RQT chunk (%x/%x)\n",
+ rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
- printk(KERN_WARNING MOD
- "Failed to add all RQT chunks (%x/%x)\n",
- rqt_start, rqt_top - rqt_start);
+ pr_warn("Failed to add all RQT chunks (%x/%x)\n",
+ rqt_start, rqt_top - rqt_start);
return 0;
}
rqt_chunk >>= 1;
} else {
- PDBG("%s added RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("added RQT chunk (%x/%x)\n",
+ rqt_start, rqt_chunk);
rqt_start += rqt_chunk;
}
}
@@ -383,7 +421,33 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->rqt_pool);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
+}
+
+int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev)
+{
+ int idx;
+
+ idx = c4iw_id_alloc(&rdev->resource.srq_table);
+ mutex_lock(&rdev->stats.lock);
+ if (idx == -1) {
+ rdev->stats.srqt.fail++;
+ mutex_unlock(&rdev->stats.lock);
+ return -ENOMEM;
+ }
+ rdev->stats.srqt.cur++;
+ if (rdev->stats.srqt.cur > rdev->stats.srqt.max)
+ rdev->stats.srqt.max = rdev->stats.srqt.cur;
+ mutex_unlock(&rdev->stats.lock);
+ return idx;
+}
+
+void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx)
+{
+ c4iw_id_free(&rdev->resource.srq_table, idx);
+ mutex_lock(&rdev->stats.lock);
+ rdev->stats.srqt.cur--;
+ mutex_unlock(&rdev->stats.lock);
}
/*
@@ -394,7 +458,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size);
if (addr) {
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
@@ -407,7 +471,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -429,18 +493,17 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
while (start < top) {
chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
- PDBG("%s failed to add OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("failed to add OCQP chunk (%x/%x)\n",
+ start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
- printk(KERN_WARNING MOD
- "Failed to add all OCQP chunks (%x/%x)\n",
- start, top - start);
+ pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
+ start, top - start);
return 0;
}
chunk >>= 1;
} else {
- PDBG("%s added OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("added OCQP chunk (%x/%x)\n",
+ start, chunk);
start += chunk;
}
}
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
new file mode 100644
index 000000000000..fd22c85d35f4
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/rdma_cm.h>
+
+#include "iw_cxgb4.h"
+#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_netlink.h>
+
+static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
+{
+ /* WQ+SQ */
+ if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
+{
+ /* RQ */
+ if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
+ struct t4_swsqe *sqe)
+{
+ if (rdma_nl_put_driver_u32(msg, "idx", idx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
+ goto err;
+ if (sqe->complete &&
+ rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+/*
+ * Dump the first and last pending sqes.
+ */
+static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
+ u16 first_idx, struct t4_swsqe *first_sqe,
+ u16 last_idx, struct t4_swsqe *last_sqe)
+{
+ if (!first_sqe)
+ goto out;
+ if (fill_swsqe(msg, sq, first_idx, first_sqe))
+ goto err;
+ if (!last_sqe)
+ goto out;
+ if (fill_swsqe(msg, sq, last_idx, last_sqe))
+ goto err;
+out:
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp)
+{
+ struct t4_swsqe *fsp = NULL, *lsp = NULL;
+ struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
+ u16 first_sq_idx = 0, last_sq_idx = 0;
+ struct t4_swsqe first_sqe, last_sqe;
+ struct nlattr *table_attr;
+ struct t4_wq wq;
+
+ /* User qp state is not available, so don't dump user qps */
+ if (qhp->ucontext)
+ return 0;
+
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ /* Get a consistent snapshot */
+ spin_lock_irq(&qhp->lock);
+ wq = qhp->wq;
+
+ /* If there are any pending sqes, copy the first and last */
+ if (wq.sq.cidx != wq.sq.pidx) {
+ first_sq_idx = wq.sq.cidx;
+ first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
+ fsp = &first_sqe;
+ last_sq_idx = wq.sq.pidx;
+ if (last_sq_idx-- == 0)
+ last_sq_idx = wq.sq.size - 1;
+ if (last_sq_idx != first_sq_idx) {
+ last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
+ lsp = &last_sqe;
+ }
+ }
+ spin_unlock_irq(&qhp->lock);
+
+ if (fill_sq(msg, &wq))
+ goto err_cancel_table;
+
+ if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
+ goto err_cancel_table;
+
+ if (fill_rq(msg, &wq))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+union union_ep {
+ struct c4iw_listen_ep lep;
+ struct c4iw_ep ep;
+};
+
+int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
+ struct rdma_cm_id *cm_id)
+{
+ struct nlattr *table_attr;
+ struct c4iw_ep_common *epcp;
+ struct c4iw_listen_ep *listen_ep = NULL;
+ struct c4iw_ep *ep = NULL;
+ struct iw_cm_id *iw_cm_id;
+ union union_ep *uep;
+
+ iw_cm_id = rdma_iw_cm_id(cm_id);
+ if (!iw_cm_id)
+ return 0;
+ epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
+ if (!epcp)
+ return 0;
+ uep = kzalloc(sizeof(*uep), GFP_KERNEL);
+ if (!uep)
+ return 0;
+
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err_free_uep;
+
+ /* Get a consistent snapshot */
+ mutex_lock(&epcp->mutex);
+ if (epcp->state == LISTEN) {
+ uep->lep = *(struct c4iw_listen_ep *)epcp;
+ mutex_unlock(&epcp->mutex);
+ listen_ep = &uep->lep;
+ epcp = &listen_ep->com;
+ } else {
+ uep->ep = *(struct c4iw_ep *)epcp;
+ mutex_unlock(&epcp->mutex);
+ ep = &uep->ep;
+ epcp = &ep->com;
+ }
+
+ if (rdma_nl_put_driver_u32(msg, "state", epcp->state))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64_hex(msg, "flags", epcp->flags))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
+ goto err_cancel_table;
+
+ if (listen_ep) {
+ if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
+ goto err_cancel_table;
+ } else {
+ if (rdma_nl_put_driver_u32(msg, "hwtid", ep->hwtid))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ord", ep->ord))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ird", ep->ird))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "emss", ep->emss))
+ goto err_cancel_table;
+
+ if (!ep->parent_ep && rdma_nl_put_driver_u32(msg, "atid",
+ ep->atid))
+ goto err_cancel_table;
+ }
+ nla_nest_end(msg, table_attr);
+ kfree(uep);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err_free_uep:
+ kfree(uep);
+ return -EMSGSIZE;
+}
+
+static int fill_cq(struct sk_buff *msg, struct t4_cq *cq)
+{
+ if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", cq->size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "vector", cq->vector))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "gen", cq->gen))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "error", cq->error))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
+ be64_to_cpu(cq->bits_type_ts)))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "flags", cq->flags))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx,
+ const char *qstr)
+{
+ if (rdma_nl_put_driver_u32(msg, qstr, idx))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "header",
+ be32_to_cpu(cqe->header)))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len)))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "wrid_hi",
+ be32_to_cpu(cqe->u.gen.wrid_hi)))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "wrid_low",
+ be32_to_cpu(cqe->u.gen.wrid_low)))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
+ be64_to_cpu(cqe->bits_type_ts)))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq,
+ struct t4_cqe *cqes)
+{
+ u16 idx;
+
+ idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1;
+ if (fill_cqe(msg, cqes, idx, "hwcq_idx"))
+ goto err;
+ idx = cq->cidx;
+ if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx"))
+ goto err;
+
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq,
+ struct t4_cqe *cqes)
+{
+ u16 idx;
+
+ if (!cq->sw_in_use)
+ return 0;
+
+ idx = cq->sw_cidx;
+ if (fill_cqe(msg, cqes, idx, "swcq_idx"))
+ goto err;
+ if (cq->sw_in_use == 1)
+ goto out;
+ idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1;
+ if (fill_cqe(msg, cqes + 1, idx, "swcq_idx"))
+ goto err;
+out:
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq)
+{
+ struct c4iw_cq *chp = to_c4iw_cq(ibcq);
+ struct nlattr *table_attr;
+ struct t4_cqe hwcqes[2];
+ struct t4_cqe swcqes[2];
+ struct t4_cq cq;
+ u16 idx;
+
+ /* User cq state is not available, so don't dump user cqs */
+ if (ibcq->uobject)
+ return 0;
+
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ /* Get a consistent snapshot */
+ spin_lock_irq(&chp->lock);
+
+ /* t4_cq struct */
+ cq = chp->cq;
+
+ /* get 2 hw cqes: cidx-1, and cidx */
+ idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1;
+ hwcqes[0] = chp->cq.queue[idx];
+
+ idx = cq.cidx;
+ hwcqes[1] = chp->cq.queue[idx];
+
+ /* get first and last sw cqes */
+ if (cq.sw_in_use) {
+ swcqes[0] = chp->cq.sw_queue[cq.sw_cidx];
+ if (cq.sw_in_use > 1) {
+ idx = (cq.sw_pidx > 0) ? cq.sw_pidx - 1 : cq.size - 1;
+ swcqes[1] = chp->cq.sw_queue[idx];
+ }
+ }
+
+ spin_unlock_irq(&chp->lock);
+
+ if (fill_cq(msg, &cq))
+ goto err_cancel_table;
+
+ if (fill_swcqes(msg, &cq, swcqes))
+ goto err_cancel_table;
+
+ if (fill_hwcqes(msg, &cq, hwcqes))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
+{
+ struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
+ struct c4iw_dev *dev = mhp->rhp;
+ u32 stag = mhp->attr.stag;
+ struct nlattr *table_attr;
+ struct fw_ri_tpte tpte;
+ int ret;
+
+ if (!stag)
+ return 0;
+
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte);
+ if (ret) {
+ dev_err(&dev->rdev.lldi.pdev->dev,
+ "%s cxgb4_read_tpte err %d\n", __func__, ret);
+ return 0;
+ }
+
+ if (rdma_nl_put_driver_u32_hex(msg, "idx", stag >> 8))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "valid",
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "key", stag & 0xff))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "state",
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "pdid",
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "perm",
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ps",
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64(msg, "len",
+ ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo)))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "pbl_addr",
+ FW_RI_TPTE_PBLADDR_G(ntohl(tpte.nosnoop_pbladdr))))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 274a7ab13bef..c3b0e2896475 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -35,6 +35,7 @@
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
+#include "t4_tcb.h"
#include "t4fw_ri_api.h"
#define T4_MAX_NUM_PD 65536
@@ -52,12 +53,16 @@ struct t4_status_page {
__be16 pidx;
u8 qp_err; /* flit 1 - sw owns */
u8 db_off;
- u8 pad;
+ u8 pad[2];
u16 host_wq_pidx;
u16 host_cidx;
u16 host_pidx;
+ u16 pad2;
+ u32 srqidx;
};
+#define T4_RQT_ENTRY_SHIFT 6
+#define T4_RQT_ENTRY_SIZE BIT(T4_RQT_ENTRY_SHIFT)
#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 5
@@ -87,6 +92,9 @@ static inline int t4_max_fr_depth(int use_dsgl)
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
+#define T4_WRITE_CMPL_MAX_SGL 4
+#define T4_WRITE_CMPL_MAX_CQE 16
+
union t4_wr {
struct fw_ri_res_wr res;
struct fw_ri_wr ri;
@@ -95,7 +103,9 @@ union t4_wr {
struct fw_ri_rdma_read_wr read;
struct fw_ri_bind_mw_wr bind;
struct fw_ri_fr_nsmr_wr fr;
+ struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
struct fw_ri_inv_lstag_wr inv;
+ struct fw_ri_rdma_write_cmpl_wr write_cmpl;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
@@ -170,7 +180,7 @@ struct t4_cqe {
__be32 msn;
} rcqe;
struct {
- u32 nada1;
+ __be32 stag;
u16 nada2;
u16 cidx;
} scqe;
@@ -178,8 +188,32 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ struct {
+ __be32 stag;
+ __be32 msn;
+ __be32 reserved;
+ __be32 abs_rqe_idx;
+ } srcqe;
+ struct {
+ __be32 mo;
+ __be32 msn;
+ /*
+ * Use union for immediate data to be consistent with
+ * stack's 32 bit data and iWARP spec's 64 bit data.
+ */
+ union {
+ struct {
+ __be32 imm_data32;
+ u32 reserved;
+ } ib_imm_data;
+ __be64 imm_data64;
+ } iw_imm_data;
+ } imm_data_rcqe;
+
+ u64 drain_cookie;
+ __be64 flits[3];
} u;
- __be64 reserved;
+ __be64 reserved[3];
__be64 bits_type_ts;
};
@@ -195,6 +229,11 @@ struct t4_cqe {
#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
+#define CQE_DRAIN_S 10
+#define CQE_DRAIN_M 0x1
+#define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S)
+
#define CQE_STATUS_S 5
#define CQE_STATUS_M 0x1F
#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -211,6 +250,7 @@ struct t4_cqe {
#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header)))
#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
@@ -229,13 +269,18 @@ struct t4_cqe {
/* used for RQ completion processing */
#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
+#define CQE_ABS_RQE_IDX(x) (be32_to_cpu((x)->u.srcqe.abs_rqe_idx))
+#define CQE_IMM_DATA(x)( \
+ (x)->u.imm_data_rcqe.iw_imm_data.ib_imm_data.imm_data32)
/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
+#define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
/* macros for flit 3 of the cqe */
#define CQE_GENBIT_S 63
@@ -267,7 +312,7 @@ struct t4_swsqe {
int signaled;
u16 idx;
int flushed;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
@@ -308,8 +353,9 @@ struct t4_sq {
struct t4_swrqe {
u64 wr_id;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
+ int valid;
};
struct t4_rq {
@@ -339,8 +385,98 @@ struct t4_wq {
void __iomem *db;
struct c4iw_rdev *rdev;
int flushed;
+ u8 *qp_errp;
+ u32 *srqidxp;
+};
+
+struct t4_srq_pending_wr {
+ u64 wr_id;
+ union t4_recv_wr wqe;
+ u8 len16;
+};
+
+struct t4_srq {
+ union t4_recv_wr *queue;
+ dma_addr_t dma_addr;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ struct t4_swrqe *sw_rq;
+ void __iomem *bar2_va;
+ u64 bar2_pa;
+ size_t memsize;
+ u32 bar2_qid;
+ u32 qid;
+ u32 msn;
+ u32 rqt_hwaddr;
+ u32 rqt_abs_idx;
+ u16 rqt_size;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+ u16 wq_pidx;
+ u16 wq_pidx_inc;
+ u16 in_use;
+ struct t4_srq_pending_wr *pending_wrs;
+ u16 pending_cidx;
+ u16 pending_pidx;
+ u16 pending_in_use;
+ u16 ooo_count;
};
+static inline u32 t4_srq_avail(struct t4_srq *srq)
+{
+ return srq->size - 1 - srq->in_use;
+}
+
+static inline void t4_srq_produce(struct t4_srq *srq, u8 len16)
+{
+ srq->in_use++;
+ if (++srq->pidx == srq->size)
+ srq->pidx = 0;
+ srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+ if (srq->wq_pidx >= srq->size * T4_RQ_NUM_SLOTS)
+ srq->wq_pidx %= srq->size * T4_RQ_NUM_SLOTS;
+ srq->queue[srq->size].status.host_pidx = srq->pidx;
+}
+
+static inline void t4_srq_produce_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use++;
+ srq->in_use++;
+ if (++srq->pending_pidx == srq->size)
+ srq->pending_pidx = 0;
+}
+
+static inline void t4_srq_consume_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use--;
+ srq->in_use--;
+ if (++srq->pending_cidx == srq->size)
+ srq->pending_cidx = 0;
+}
+
+static inline void t4_srq_produce_ooo(struct t4_srq *srq)
+{
+ srq->in_use--;
+ srq->ooo_count++;
+}
+
+static inline void t4_srq_consume_ooo(struct t4_srq *srq)
+{
+ srq->cidx++;
+ if (srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+ srq->ooo_count--;
+}
+
+static inline void t4_srq_consume(struct t4_srq *srq)
+{
+ srq->in_use--;
+ if (++srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+}
+
static inline int t4_rqes_posted(struct t4_wq *wq)
{
return wq->rq.in_use;
@@ -351,11 +487,6 @@ static inline int t4_rq_empty(struct t4_wq *wq)
return wq->rq.in_use == 0;
}
-static inline int t4_rq_full(struct t4_wq *wq)
-{
- return wq->rq.in_use == (wq->rq.size - 1);
-}
-
static inline u32 t4_rq_avail(struct t4_wq *wq)
{
return wq->rq.size - 1 - wq->rq.in_use;
@@ -374,7 +505,6 @@ static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_rq_consume(struct t4_wq *wq)
{
wq->rq.in_use--;
- wq->rq.msn++;
if (++wq->rq.cidx == wq->rq.size)
wq->rq.cidx = 0;
}
@@ -399,11 +529,6 @@ static inline int t4_sq_empty(struct t4_wq *wq)
return wq->sq.in_use == 0;
}
-static inline int t4_sq_full(struct t4_wq *wq)
-{
- return wq->sq.in_use == (wq->sq.size - 1);
-}
-
static inline u32 t4_sq_avail(struct t4_wq *wq)
{
return wq->sq.size - 1 - wq->sq.in_use;
@@ -421,7 +546,6 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_sq_consume(struct t4_wq *wq)
{
- BUG_ON(wq->sq.in_use < 1);
if (wq->sq.cidx == wq->sq.flush_cidx)
wq->sq.flush_cidx = -1;
wq->sq.in_use--;
@@ -455,22 +579,38 @@ static inline void pio_copy(u64 __iomem *dst, u64 *src)
}
}
-static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
- union t4_wr *wqe)
+static inline void t4_ring_srq_db(struct t4_srq *srq, u16 inc, u8 len16,
+ union t4_recv_wr *wqe)
+{
+ /* Flush host queue memory writes. */
+ wmb();
+ if (inc == 1 && srq->bar2_qid == 0 && wqe) {
+ pr_debug("%s : WC srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ pio_copy(srq->bar2_va + SGE_UDB_WCDOORBELL, (u64 *)wqe);
+ } else {
+ pr_debug("%s: DB srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ writel(PIDX_T5_V(inc) | QID_V(srq->bar2_qid),
+ srq->bar2_va + SGE_UDB_KDOORBELL);
+ }
+ /* Flush user doorbell area writes. */
+ wmb();
+}
+
+static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
{
/* Flush host queue memory writes. */
wmb();
if (wq->sq.bar2_va) {
if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
- PDBG("%s: WC wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
pio_copy((u64 __iomem *)
(wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
(u64 *)wqe);
} else {
- PDBG("%s: DB wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
wq->sq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -482,7 +622,7 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
}
-static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
+static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
union t4_recv_wr *wqe)
{
@@ -490,14 +630,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
wmb();
if (wq->rq.bar2_va) {
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
- PDBG("%s: WC wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
pio_copy((u64 __iomem *)
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
(void *)wqe);
} else {
- PDBG("%s: DB wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
wq->rq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -511,12 +649,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
static inline int t4_wq_in_error(struct t4_wq *wq)
{
- return wq->rq.queue[wq->rq.size].status.qp_err;
+ return *wq->qp_errp;
}
-static inline void t4_set_wq_in_error(struct t4_wq *wq)
+static inline void t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx)
{
- wq->rq.queue[wq->rq.size].status.qp_err = 1;
+ if (srqidx)
+ *wq->srqidxp = srqidx;
+ *wq->qp_errp = 1;
}
static inline void t4_disable_wq_db(struct t4_wq *wq)
@@ -529,11 +669,6 @@ static inline void t4_enable_wq_db(struct t4_wq *wq)
wq->rq.queue[wq->rq.size].status.db_off = 0;
}
-static inline int t4_wq_db_enabled(struct t4_wq *wq)
-{
- return !wq->rq.queue[wq->rq.size].status.db_off;
-}
-
enum t4_cq_flags {
CQ_ARMED = 1,
};
@@ -561,6 +696,7 @@ struct t4_cq {
u16 cidx_inc;
u8 gen;
u8 error;
+ u8 *qp_errp;
unsigned long flags;
};
@@ -598,9 +734,11 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
- PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
+ pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
cq->error = 1;
- BUG_ON(1);
+ cq->sw_in_use--;
+ return;
}
if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0;
@@ -608,7 +746,6 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
static inline void t4_swcq_consume(struct t4_cq *cq)
{
- BUG_ON(cq->sw_in_use < 1);
cq->sw_in_use--;
if (++cq->sw_cidx == cq->size)
cq->sw_cidx = 0;
@@ -635,6 +772,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
return (CQE_GENBIT(cqe) == cq->gen);
}
+static inline int t4_cq_notempty(struct t4_cq *cq)
+{
+ return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
+}
+
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;
@@ -648,8 +790,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
ret = -EOVERFLOW;
cq->error = 1;
- printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
- BUG_ON(1);
+ pr_err("cq overflow cqid %u\n", cq->cqid);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
/* Ensure CQE is flushed to memory */
@@ -661,19 +802,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
-{
- if (cq->sw_in_use == cq->size) {
- PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
- cq->error = 1;
- BUG_ON(1);
- return NULL;
- }
- if (cq->sw_in_use)
- return &cq->sw_queue[cq->sw_cidx];
- return NULL;
-}
-
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret = 0;
@@ -687,17 +815,19 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline int t4_cq_in_error(struct t4_cq *cq)
-{
- return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
-}
-
static inline void t4_set_cq_in_error(struct t4_cq *cq)
{
- ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+ *cq->qp_errp = 1;
}
#endif
struct t4_dev_status_page {
u8 db_off;
+ u8 write_cmpl_supported;
+ u16 pad2;
+ u32 pad3;
+ u64 qp_start;
+ u64 qp_size;
+ u64 cq_start;
+ u64 cq_size;
};
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 343e8daf2270..1f79537fc8d1 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -50,7 +50,8 @@ enum fw_ri_wr_opcode {
FW_RI_BYPASS = 0xd,
FW_RI_RECEIVE = 0xe,
- FW_RI_SGE_EC_CR_RETURN = 0xf
+ FW_RI_SGE_EC_CR_RETURN = 0xf,
+ FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT
};
enum fw_ri_wr_flags {
@@ -59,7 +60,8 @@ enum fw_ri_wr_flags {
FW_RI_SOLICITED_EVENT_FLAG = 0x04,
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
- FW_RI_RDMA_READ_INVALIDATE = 0x20
+ FW_RI_RDMA_READ_INVALIDATE = 0x20,
+ FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40
};
enum fw_ri_mpa_attrs {
@@ -120,9 +122,7 @@ struct fw_ri_dsgl {
__be16 nsge;
__be32 len0;
__be64 addr0;
-#ifndef C99_NOT_SUPPORTED
- struct fw_ri_dsge_pair sge[0];
-#endif
+ struct fw_ri_dsge_pair sge[];
};
struct fw_ri_sge {
@@ -136,9 +136,7 @@ struct fw_ri_isgl {
__u8 r1;
__be16 nsge;
__be32 r2;
-#ifndef C99_NOT_SUPPORTED
- struct fw_ri_sge sge[0];
-#endif
+ struct fw_ri_sge sge[];
};
struct fw_ri_immd {
@@ -146,9 +144,7 @@ struct fw_ri_immd {
__u8 r1;
__be16 r2;
__be32 immdlen;
-#ifndef C99_NOT_SUPPORTED
- __u8 data[0];
-#endif
+ __u8 data[];
};
struct fw_ri_tpte {
@@ -263,6 +259,7 @@ enum fw_ri_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
+ FW_RI_RES_TYPE_SRQ,
};
enum fw_ri_res_op {
@@ -296,6 +293,20 @@ struct fw_ri_res {
__be32 r6_lo;
__be64 r7;
} cq;
+ struct fw_ri_res_srq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 srqid;
+ __be32 pdid;
+ __be32 hwsrqsize;
+ __be32 hwsrqaddr;
+ } srq;
} u;
};
@@ -303,9 +314,7 @@ struct fw_ri_res_wr {
__be32 op_nres;
__be32 len16_pkd;
__u64 cookie;
-#ifndef C99_NOT_SUPPORTED
- struct fw_ri_res res[0];
-#endif
+ struct fw_ri_res res[];
};
#define FW_RI_RES_WR_NRES_S 0
@@ -531,16 +540,24 @@ struct fw_ri_rdma_write_wr {
__u16 wrid;
__u8 r1[3];
__u8 len16;
- __be64 r2;
+ /*
+ * Use union for immediate data to be consistent with stack's 32 bit
+ * data and iWARP spec's 64 bit data.
+ */
+ union {
+ struct {
+ __be32 imm_data32;
+ u32 reserved;
+ } ib_imm_data;
+ __be64 imm_data64;
+ } iw_imm_data;
__be32 plen;
__be32 stag_sink;
__be64 to_sink;
-#ifndef C99_NOT_SUPPORTED
union {
- struct fw_ri_immd immd_src[0];
- struct fw_ri_isgl isgl_src[0];
+ DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
+ DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
-#endif
};
struct fw_ri_send_wr {
@@ -554,12 +571,10 @@ struct fw_ri_send_wr {
__be32 plen;
__be32 r3;
__be64 r4;
-#ifndef C99_NOT_SUPPORTED
union {
- struct fw_ri_immd immd_src[0];
- struct fw_ri_isgl isgl_src[0];
+ DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
+ DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
-#endif
};
#define FW_RI_SEND_WR_SENDOP_S 0
@@ -568,6 +583,35 @@ struct fw_ri_send_wr {
#define FW_RI_SEND_WR_SENDOP_G(x) \
(((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
+struct fw_ri_rdma_write_cmpl_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 r2;
+ __u8 flags_send;
+ __u16 wrid_send;
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 stag_sink;
+ __be64 to_sink;
+ union fw_ri_cmpl {
+ struct fw_ri_immd_cmpl {
+ __u8 op;
+ __u8 r1[6];
+ __u8 immdlen;
+ __u8 data[16];
+ } immd_src;
+ struct fw_ri_isgl isgl_src;
+ } u_cmpl;
+ __be64 r3;
+ union fw_ri_write {
+ DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
+ DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
+ } u;
+};
+
struct fw_ri_rdma_read_wr {
__u8 opcode;
__u8 flags;
@@ -669,6 +713,18 @@ struct fw_ri_fr_nsmr_wr {
#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
(((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
+struct fw_ri_fr_nsmr_tpte_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 stag;
+ struct fw_ri_tpte tpte;
+ __u64 pbl[2];
+};
+
struct fw_ri_inv_lstag_wr {
__u8 opcode;
__u8 flags;
@@ -695,6 +751,10 @@ enum fw_ri_init_p2ptype {
FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
};
+enum fw_ri_init_rqeqid_srq {
+ FW_RI_INIT_RQEQID_SRQ = 1 << 31,
+};
+
struct fw_ri_wr {
__be32 op_compl;
__be32 flowid_len16;
@@ -753,103 +813,4 @@ struct fw_ri_wr {
#define FW_RI_WR_P2PTYPE_G(x) \
(((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
-struct tcp_options {
- __be16 mss;
- __u8 wsf;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:4;
- __u8 unknown:1;
- __u8:1;
- __u8 sack:1;
- __u8 tstamp:1;
-#else
- __u8 tstamp:1;
- __u8 sack:1;
- __u8:1;
- __u8 unknown:1;
- __u8:4;
-#endif
-};
-
-struct cpl_pass_accept_req {
- union opcode_tid ot;
- __be16 rsvd;
- __be16 len;
- __be32 hdr_len;
- __be16 vlan;
- __be16 l2info;
- __be32 tos_stid;
- struct tcp_options tcpopt;
-};
-
-/* cpl_pass_accept_req.hdr_len fields */
-#define SYN_RX_CHAN_S 0
-#define SYN_RX_CHAN_M 0xF
-#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
-#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
-
-#define TCP_HDR_LEN_S 10
-#define TCP_HDR_LEN_M 0x3F
-#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
-#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
-
-#define IP_HDR_LEN_S 16
-#define IP_HDR_LEN_M 0x3FF
-#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
-#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
-
-#define ETH_HDR_LEN_S 26
-#define ETH_HDR_LEN_M 0x1F
-#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
-#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
-
-/* cpl_pass_accept_req.l2info fields */
-#define SYN_MAC_IDX_S 0
-#define SYN_MAC_IDX_M 0x1FF
-#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
-#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
-
-#define SYN_XACT_MATCH_S 9
-#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
-#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
-
-#define SYN_INTF_S 12
-#define SYN_INTF_M 0xF
-#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
-#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
-
-struct ulptx_idata {
- __be32 cmd_more;
- __be32 len;
-};
-
-#define ULPTX_NSGE_S 0
-#define ULPTX_NSGE_M 0xFFFF
-#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
-
-#define RX_DACK_MODE_S 29
-#define RX_DACK_MODE_M 0x3
-#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
-#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
-
-#define RX_DACK_CHANGE_S 31
-#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
-#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
-
-enum { /* TCP congestion control algorithms */
- CONG_ALG_RENO,
- CONG_ALG_TAHOE,
- CONG_ALG_NEWRENO,
- CONG_ALG_HIGHSPEED
-};
-
-#define CONG_CNTRL_S 14
-#define CONG_CNTRL_M 0x3
-#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
-#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
-
-#define T5_ISS_S 18
-#define T5_ISS_V(x) ((x) << T5_ISS_S)
-#define T5_ISS_F T5_ISS_V(1U)
-
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
deleted file mode 100644
index cbd0ce170728..000000000000
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __C4IW_USER_H__
-#define __C4IW_USER_H__
-
-#define C4IW_UVERBS_ABI_VERSION 2
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-struct c4iw_create_cq_resp {
- __u64 key;
- __u64 gts_key;
- __u64 memsize;
- __u32 cqid;
- __u32 size;
- __u32 qid_mask;
- __u32 reserved; /* explicit padding (optional for i386) */
-};
-
-
-enum {
- C4IW_QPF_ONCHIP = (1<<0)
-};
-
-struct c4iw_create_qp_resp {
- __u64 ma_sync_key;
- __u64 sq_key;
- __u64 rq_key;
- __u64 sq_db_gts_key;
- __u64 rq_db_gts_key;
- __u64 sq_memsize;
- __u64 rq_memsize;
- __u32 sqid;
- __u32 rqid;
- __u32 sq_size;
- __u32 rq_size;
- __u32 qid_mask;
- __u32 flags;
-};
-
-struct c4iw_alloc_ucontext_resp {
- __u64 status_page_key;
- __u32 status_page_size;
- __u32 reserved; /* explicit padding (optional for i386) */
-};
-#endif
diff --git a/drivers/infiniband/hw/efa/Kconfig b/drivers/infiniband/hw/efa/Kconfig
new file mode 100644
index 000000000000..457e18ba1d57
--- /dev/null
+++ b/drivers/infiniband/hw/efa/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+#
+# Amazon fabric device configuration
+#
+
+config INFINIBAND_EFA
+ tristate "Amazon Elastic Fabric Adapter (EFA) support"
+ depends on PCI_MSI && 64BIT && !CPU_BIG_ENDIAN
+ depends on INFINIBAND_USER_ACCESS
+ help
+ This driver supports Amazon Elastic Fabric Adapter (EFA).
+
+ To compile this driver as a module, choose M here.
+ The module will be called efa.
diff --git a/drivers/infiniband/hw/efa/Makefile b/drivers/infiniband/hw/efa/Makefile
new file mode 100644
index 000000000000..6e83083af0bc
--- /dev/null
+++ b/drivers/infiniband/hw/efa/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+#
+# Makefile for Amazon Elastic Fabric Adapter (EFA) device driver.
+#
+
+obj-$(CONFIG_INFINIBAND_EFA) += efa.o
+
+efa-y := efa_com_cmd.o efa_com.o efa_main.o efa_verbs.o
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
new file mode 100644
index 000000000000..96f9c3bc98b2
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_H_
+#define _EFA_H_
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include <rdma/efa-abi.h>
+#include <rdma/ib_verbs.h>
+
+#include "efa_com_cmd.h"
+
+#define DRV_MODULE_NAME "efa"
+#define DEVICE_NAME "Elastic Fabric Adapter (EFA)"
+
+#define EFA_IRQNAME_SIZE 40
+
+#define EFA_MGMNT_MSIX_VEC_IDX 0
+#define EFA_COMP_EQS_VEC_BASE 1
+
+struct efa_irq {
+ irq_handler_t handler;
+ void *data;
+ u32 irqn;
+ u32 vector;
+ cpumask_t affinity_hint_mask;
+ char name[EFA_IRQNAME_SIZE];
+};
+
+/* Don't use anything other than atomic64 */
+struct efa_stats {
+ atomic64_t alloc_pd_err;
+ atomic64_t create_qp_err;
+ atomic64_t create_cq_err;
+ atomic64_t reg_mr_err;
+ atomic64_t alloc_ucontext_err;
+ atomic64_t create_ah_err;
+ atomic64_t mmap_err;
+ atomic64_t keep_alive_rcvd;
+};
+
+struct efa_dev {
+ struct ib_device ibdev;
+ struct efa_com_dev edev;
+ struct pci_dev *pdev;
+ struct efa_com_get_device_attr_result dev_attr;
+
+ u64 reg_bar_addr;
+ u64 reg_bar_len;
+ u64 mem_bar_addr;
+ u64 mem_bar_len;
+ u64 db_bar_addr;
+ u64 db_bar_len;
+
+ u32 num_irq_vectors;
+ u32 admin_msix_vector_idx;
+ struct efa_irq admin_irq;
+
+ struct efa_stats stats;
+
+ /* Array of completion EQs */
+ struct efa_eq *eqs;
+ u32 neqs;
+
+ /* Only stores CQs with interrupts enabled */
+ struct xarray cqs_xa;
+};
+
+struct efa_ucontext {
+ struct ib_ucontext ibucontext;
+ u16 uarn;
+};
+
+struct efa_pd {
+ struct ib_pd ibpd;
+ u16 pdn;
+};
+
+struct efa_mr_interconnect_info {
+ u16 recv_ic_id;
+ u16 rdma_read_ic_id;
+ u16 rdma_recv_ic_id;
+ u8 recv_ic_id_valid : 1;
+ u8 rdma_read_ic_id_valid : 1;
+ u8 rdma_recv_ic_id_valid : 1;
+};
+
+struct efa_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct efa_mr_interconnect_info ic_info;
+};
+
+struct efa_cq {
+ struct ib_cq ibcq;
+ struct efa_ucontext *ucontext;
+ dma_addr_t dma_addr;
+ void *cpu_addr;
+ struct rdma_user_mmap_entry *mmap_entry;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ size_t size;
+ u16 cq_idx;
+ /* NULL when no interrupts requested */
+ struct efa_eq *eq;
+ struct ib_umem *umem;
+};
+
+struct efa_qp {
+ struct ib_qp ibqp;
+ dma_addr_t rq_dma_addr;
+ void *rq_cpu_addr;
+ size_t rq_size;
+ enum ib_qp_state state;
+
+ /* Used for saving mmap_xa entries */
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *llq_desc_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_mmap_entry;
+
+ u32 qp_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
+struct efa_ah {
+ struct ib_ah ibah;
+ u16 ah;
+ /* dest_addr */
+ u8 id[EFA_GID_SIZE];
+};
+
+struct efa_eq {
+ struct efa_com_eq eeq;
+ struct efa_irq irq;
+};
+
+int efa_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata);
+int efa_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
+int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid);
+int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey);
+int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem, struct uverbs_attr_bundle *attrs);
+struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable);
+int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
+void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
+int efa_mmap(struct ib_ucontext *ibucontext,
+ struct vm_area_struct *vma);
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int efa_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int efa_destroy_ah(struct ib_ah *ibah, u32 flags);
+int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata);
+enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
+ u32 port_num);
+struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num);
+struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev);
+int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index);
+
+#endif /* _EFA_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
new file mode 100644
index 000000000000..57178dad5eb7
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_ADMIN_CMDS_H_
+#define _EFA_ADMIN_CMDS_H_
+
+#define EFA_ADMIN_API_VERSION_MAJOR 0
+#define EFA_ADMIN_API_VERSION_MINOR 1
+
+/* EFA admin queue opcodes */
+enum efa_admin_aq_opcode {
+ EFA_ADMIN_CREATE_QP = 1,
+ EFA_ADMIN_MODIFY_QP = 2,
+ EFA_ADMIN_QUERY_QP = 3,
+ EFA_ADMIN_DESTROY_QP = 4,
+ EFA_ADMIN_CREATE_AH = 5,
+ EFA_ADMIN_DESTROY_AH = 6,
+ EFA_ADMIN_REG_MR = 7,
+ EFA_ADMIN_DEREG_MR = 8,
+ EFA_ADMIN_CREATE_CQ = 9,
+ EFA_ADMIN_DESTROY_CQ = 10,
+ EFA_ADMIN_GET_FEATURE = 11,
+ EFA_ADMIN_SET_FEATURE = 12,
+ EFA_ADMIN_GET_STATS = 13,
+ EFA_ADMIN_ALLOC_PD = 14,
+ EFA_ADMIN_DEALLOC_PD = 15,
+ EFA_ADMIN_ALLOC_UAR = 16,
+ EFA_ADMIN_DEALLOC_UAR = 17,
+ EFA_ADMIN_CREATE_EQ = 18,
+ EFA_ADMIN_DESTROY_EQ = 19,
+ EFA_ADMIN_ALLOC_MR = 20,
+ EFA_ADMIN_MAX_OPCODE = 20,
+};
+
+enum efa_admin_aq_feature_id {
+ EFA_ADMIN_DEVICE_ATTR = 1,
+ EFA_ADMIN_AENQ_CONFIG = 2,
+ EFA_ADMIN_NETWORK_ATTR = 3,
+ EFA_ADMIN_QUEUE_ATTR = 4,
+ EFA_ADMIN_HW_HINTS = 5,
+ EFA_ADMIN_HOST_INFO = 6,
+ EFA_ADMIN_EVENT_QUEUE_ATTR = 7,
+};
+
+/* QP transport type */
+enum efa_admin_qp_type {
+ /* Unreliable Datagram */
+ EFA_ADMIN_QP_TYPE_UD = 1,
+ /* Scalable Reliable Datagram */
+ EFA_ADMIN_QP_TYPE_SRD = 2,
+};
+
+/* QP state */
+enum efa_admin_qp_state {
+ EFA_ADMIN_QP_STATE_RESET = 0,
+ EFA_ADMIN_QP_STATE_INIT = 1,
+ EFA_ADMIN_QP_STATE_RTR = 2,
+ EFA_ADMIN_QP_STATE_RTS = 3,
+ EFA_ADMIN_QP_STATE_SQD = 4,
+ EFA_ADMIN_QP_STATE_SQE = 5,
+ EFA_ADMIN_QP_STATE_ERR = 6,
+};
+
+enum efa_admin_get_stats_type {
+ EFA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ EFA_ADMIN_GET_STATS_TYPE_MESSAGES = 1,
+ EFA_ADMIN_GET_STATS_TYPE_RDMA_READ = 2,
+ EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE = 3,
+ EFA_ADMIN_GET_STATS_TYPE_NETWORK = 4,
+};
+
+enum efa_admin_get_stats_scope {
+ EFA_ADMIN_GET_STATS_SCOPE_ALL = 0,
+ EFA_ADMIN_GET_STATS_SCOPE_QUEUE = 1,
+};
+
+/*
+ * QP allocation sizes, converted by fabric QueuePair (QP) create command
+ * from QP capabilities.
+ */
+struct efa_admin_qp_alloc_size {
+ /* Send descriptor ring size in bytes */
+ u32 send_queue_ring_size;
+
+ /* Max number of WQEs that can be outstanding on send queue. */
+ u32 send_queue_depth;
+
+ /*
+ * Recv descriptor ring size in bytes, sufficient for user-provided
+ * number of WQEs
+ */
+ u32 recv_queue_ring_size;
+
+ /* Max number of WQEs that can be outstanding on recv queue */
+ u32 recv_queue_depth;
+};
+
+struct efa_admin_create_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain associated with this QP */
+ u16 pd;
+
+ /* QP type */
+ u8 qp_type;
+
+ /*
+ * 0 : sq_virt - If set, SQ ring base address is
+ * virtual (IOVA returned by MR registration)
+ * 1 : rq_virt - If set, RQ ring base address is
+ * virtual (IOVA returned by MR registration)
+ * 2 : unsolicited_write_recv - If set, work requests
+ * will not be consumed for incoming RDMA write with
+ * immediate
+ * 7:3 : reserved - MBZ
+ */
+ u8 flags;
+
+ /*
+ * Send queue (SQ) ring base physical address. This field is not
+ * used if this is a Low Latency Queue(LLQ).
+ */
+ u64 sq_base_addr;
+
+ /* Receive queue (RQ) ring base address. */
+ u64 rq_base_addr;
+
+ /* Index of CQ to be associated with Send Queue completions */
+ u32 send_cq_idx;
+
+ /* Index of CQ to be associated with Recv Queue completions */
+ u32 recv_cq_idx;
+
+ /*
+ * Memory registration key for the SQ ring, used only when not in
+ * LLQ mode and base address is virtual
+ */
+ u32 sq_l_key;
+
+ /*
+ * Memory registration key for the RQ ring, used only when base
+ * address is virtual
+ */
+ u32 rq_l_key;
+
+ /* Requested QP allocation sizes */
+ struct efa_admin_qp_alloc_size qp_alloc_size;
+
+ /* UAR number */
+ u16 uar;
+
+ /* Requested service level for the QP, 0 is the default SL */
+ u8 sl;
+
+ /* MBZ */
+ u8 reserved;
+
+ /* MBZ */
+ u32 reserved2;
+};
+
+struct efa_admin_create_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /*
+ * Opaque handle to be used for consequent admin operations on the
+ * QP
+ */
+ u32 qp_handle;
+
+ /*
+ * QP number in the given EFA virtual device. Least-significant bits (as
+ * needed according to max_qp) carry unique QP ID
+ */
+ u16 qp_num;
+
+ /* MBZ */
+ u16 reserved;
+
+ /* Index of sub-CQ for Send Queue completions */
+ u16 send_sub_cq_idx;
+
+ /* Index of sub-CQ for Receive Queue completions */
+ u16 recv_sub_cq_idx;
+
+ /* SQ doorbell address, as offset to PCIe DB BAR */
+ u32 sq_db_offset;
+
+ /* RQ doorbell address, as offset to PCIe DB BAR */
+ u32 rq_db_offset;
+
+ /*
+ * low latency send queue ring base address as an offset to PCIe
+ * MMIO LLQ_MEM BAR
+ */
+ u32 llq_descriptors_offset;
+};
+
+struct efa_admin_modify_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /*
+ * Mask indicating which fields should be updated
+ * 0 : qp_state
+ * 1 : cur_qp_state
+ * 2 : qkey
+ * 3 : sq_psn
+ * 4 : sq_drained_async_notify
+ * 5 : rnr_retry
+ * 31:6 : reserved
+ */
+ u32 modify_mask;
+
+ /* QP handle returned by create_qp command */
+ u32 qp_handle;
+
+ /* QP state */
+ u32 qp_state;
+
+ /* Override current QP state (before applying the transition) */
+ u32 cur_qp_state;
+
+ /* QKey */
+ u32 qkey;
+
+ /* SQ PSN */
+ u32 sq_psn;
+
+ /* Enable async notification when SQ is drained */
+ u8 sq_drained_async_notify;
+
+ /* Number of RNR retries (valid only for SRD QPs) */
+ u8 rnr_retry;
+
+ /* MBZ */
+ u16 reserved2;
+};
+
+struct efa_admin_modify_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+struct efa_admin_query_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* QP handle returned by create_qp command */
+ u32 qp_handle;
+};
+
+struct efa_admin_query_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* QP state */
+ u32 qp_state;
+
+ /* QKey */
+ u32 qkey;
+
+ /* SQ PSN */
+ u32 sq_psn;
+
+ /* Indicates that draining is in progress */
+ u8 sq_draining;
+
+ /* Number of RNR retries (valid only for SRD QPs) */
+ u8 rnr_retry;
+
+ /* MBZ */
+ u16 reserved2;
+};
+
+struct efa_admin_destroy_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* QP handle returned by create_qp command */
+ u32 qp_handle;
+};
+
+struct efa_admin_destroy_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/*
+ * Create Address Handle command parameters. Must not be called more than
+ * once for the same destination
+ */
+struct efa_admin_create_ah_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Destination address in network byte order */
+ u8 dest_addr[16];
+
+ /* PD number */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_create_ah_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* Target interface address handle (opaque) */
+ u16 ah;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_destroy_ah_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Target interface address handle (opaque) */
+ u16 ah;
+
+ /* PD number */
+ u16 pd;
+};
+
+struct efa_admin_destroy_ah_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/*
+ * Registration of MemoryRegion, required for QP working with Virtual
+ * Addresses. In standard verbs semantics, region length is limited to 2GB
+ * space, but EFA offers larger MR support for large memory space, to ease
+ * on users working with very large datasets (i.e. full GPU memory mapping).
+ */
+struct efa_admin_reg_mr_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved16_w1;
+
+ /* Physical Buffer List, each element is page-aligned. */
+ union {
+ /*
+ * Inline array of guest-physical page addresses of user
+ * memory pages (optimization for short region
+ * registrations)
+ */
+ u64 inline_pbl_array[4];
+
+ /* points to PBL (direct or indirect, chained if needed) */
+ struct efa_admin_ctrl_buff_info pbl;
+ } pbl;
+
+ /* Memory region length, in bytes. */
+ u64 mr_length;
+
+ /*
+ * flags and page size
+ * 4:0 : phys_page_size_shift - page size is (1 <<
+ * phys_page_size_shift). Page size is used for
+ * building the Virtual to Physical address mapping
+ * 6:5 : reserved - MBZ
+ * 7 : mem_addr_phy_mode_en - Enable bit for physical
+ * memory registration (no translation), can be used
+ * only by privileged clients. If set, PBL must
+ * contain a single entry.
+ */
+ u8 flags;
+
+ /*
+ * permissions
+ * 0 : local_write_enable - Local write permissions:
+ * must be set for RQ buffers and buffers posted for
+ * RDMA Read requests
+ * 1 : remote_write_enable - Remote write
+ * permissions: must be set to enable RDMA write to
+ * the region
+ * 2 : remote_read_enable - Remote read permissions:
+ * must be set to enable RDMA read from the region
+ * 7:3 : reserved2 - MBZ
+ */
+ u8 permissions;
+
+ /* MBZ */
+ u16 reserved16_w5;
+
+ /* number of pages in PBL (redundant, could be calculated) */
+ u32 page_num;
+
+ /*
+ * IO Virtual Address associated with this MR. If
+ * mem_addr_phy_mode_en is set, contains the physical address of
+ * the region.
+ */
+ u64 iova;
+};
+
+struct efa_admin_reg_mr_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /*
+ * L_Key, to be used in conjunction with local buffer references in
+ * SQ and RQ WQE, or with virtual RQ/CQ rings
+ */
+ u32 l_key;
+
+ /*
+ * R_Key, to be used in RDMA messages to refer to remotely accessed
+ * memory region
+ */
+ u32 r_key;
+
+ /*
+ * Mask indicating which fields have valid values
+ * 0 : recv_ic_id
+ * 1 : rdma_read_ic_id
+ * 2 : rdma_recv_ic_id
+ */
+ u8 validity;
+
+ /*
+ * Physical interconnect used by the device to reach the MR for receive
+ * operation
+ */
+ u8 recv_ic_id;
+
+ /*
+ * Physical interconnect used by the device to reach the MR for RDMA
+ * read operation
+ */
+ u8 rdma_read_ic_id;
+
+ /*
+ * Physical interconnect used by the device to reach the MR for RDMA
+ * write receive
+ */
+ u8 rdma_recv_ic_id;
+};
+
+struct efa_admin_dereg_mr_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* L_Key, memory region's l_key */
+ u32 l_key;
+};
+
+struct efa_admin_dereg_mr_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/*
+ * Allocation of MemoryRegion, required for QP working with Virtual
+ * Addresses in kernel verbs semantics, ready for fast registration use.
+ */
+struct efa_admin_alloc_mr_cmd {
+ /* Common Admin Queue descriptor */
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved1;
+
+ /* Maximum number of pages this MR supports. */
+ u32 max_pages;
+};
+
+struct efa_admin_alloc_mr_resp {
+ /* Common Admin Queue completion descriptor */
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /*
+ * L_Key, to be used in conjunction with local buffer references in
+ * SQ and RQ WQE, or with virtual RQ/CQ rings
+ */
+ u32 l_key;
+
+ /*
+ * R_Key, to be used in RDMA messages to refer to remotely accessed
+ * memory region
+ */
+ u32 r_key;
+};
+
+struct efa_admin_create_cq_cmd {
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ /*
+ * 4:0 : reserved5 - MBZ
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode (i.e. CQ events and EQ elements
+ * are generated), otherwise - polling
+ * 6 : virt - If set, ring base address is virtual
+ * (IOVA returned by MR registration)
+ * 7 : reserved6 - MBZ
+ */
+ u8 cq_caps_1;
+
+ /*
+ * 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 5 : set_src_addr - If set, source address will be
+ * filled on RX completions from unknown senders.
+ * Requires 8 words CQ entry size.
+ * 7:6 : reserved7 - MBZ
+ */
+ u8 cq_caps_2;
+
+ /* Sub completion queue depth in # of entries. must be power of 2 */
+ u16 sub_cq_depth;
+
+ /* EQ number assigned to this cq */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
+
+ /*
+ * CQ ring base address, virtual or physical depending on 'virt'
+ * flag
+ */
+ struct efa_common_mem_addr cq_ba;
+
+ /*
+ * Memory registration key for the ring, used only when base
+ * address is virtual
+ */
+ u32 l_key;
+
+ /*
+ * number of sub cqs - must be equal to sub_cqs_per_cq of queue
+ * attributes.
+ */
+ u16 num_sub_cqs;
+
+ /* UAR number */
+ u16 uar;
+};
+
+struct efa_admin_create_cq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ u16 cq_idx;
+
+ /* actual sub cq depth in number of entries */
+ u16 sub_cq_actual_depth;
+
+ /* CQ doorbell address, as offset to PCIe DB BAR */
+ u32 db_offset;
+
+ /*
+ * 0 : db_valid - If set, doorbell offset is valid.
+ * Always set when interrupts are requested.
+ */
+ u32 flags;
+};
+
+struct efa_admin_destroy_cq_cmd {
+ struct efa_admin_aq_common_desc aq_common_desc;
+
+ u16 cq_idx;
+
+ /* MBZ */
+ u16 reserved1;
+};
+
+struct efa_admin_destroy_cq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/*
+ * EFA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct efa_admin_aq_get_stats_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ /* command specific inline data */
+ u32 inline_data_w1[3];
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* stats type as defined in enum efa_admin_get_stats_type */
+ u8 type;
+
+ /* stats scope defined in enum efa_admin_get_stats_scope */
+ u8 scope;
+
+ u16 scope_modifier;
+};
+
+struct efa_admin_basic_stats {
+ u64 tx_bytes;
+
+ u64 tx_pkts;
+
+ u64 rx_bytes;
+
+ u64 rx_pkts;
+
+ u64 rx_drops;
+
+ u64 qkey_viol;
+};
+
+struct efa_admin_messages_stats {
+ u64 send_bytes;
+
+ u64 send_wrs;
+
+ u64 recv_bytes;
+
+ u64 recv_wrs;
+};
+
+struct efa_admin_rdma_read_stats {
+ u64 read_wrs;
+
+ u64 read_bytes;
+
+ u64 read_wr_err;
+
+ u64 read_resp_bytes;
+};
+
+struct efa_admin_rdma_write_stats {
+ u64 write_wrs;
+
+ u64 write_bytes;
+
+ u64 write_wr_err;
+
+ u64 write_recv_bytes;
+};
+
+struct efa_admin_network_stats {
+ u64 retrans_bytes;
+
+ u64 retrans_pkts;
+
+ u64 retrans_timeout_events;
+
+ u64 unresponsive_remote_events;
+
+ u64 impaired_remote_conn_events;
+};
+
+struct efa_admin_acq_get_stats_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ union {
+ struct efa_admin_basic_stats basic_stats;
+
+ struct efa_admin_messages_stats messages_stats;
+
+ struct efa_admin_rdma_read_stats rdma_read_stats;
+
+ struct efa_admin_rdma_write_stats rdma_write_stats;
+
+ struct efa_admin_network_stats network_stats;
+ } u;
+};
+
+struct efa_admin_get_set_feature_common_desc {
+ /* MBZ */
+ u8 reserved0;
+
+ /* as appears in efa_admin_aq_feature_id */
+ u8 feature_id;
+
+ /* MBZ */
+ u16 reserved16;
+};
+
+struct efa_admin_feature_device_attr_desc {
+ /* Bitmap of efa_admin_aq_feature_id */
+ u64 supported_features;
+
+ /* Bitmap of supported page sizes in MR registrations */
+ u64 page_size_cap;
+
+ u32 fw_version;
+
+ u32 admin_api_version;
+
+ u32 device_version;
+
+ /* Bar used for SQ and RQ doorbells */
+ u16 db_bar;
+
+ /* Indicates how many bits are used on physical address access */
+ u8 phys_addr_width;
+
+ /* Indicates how many bits are used on virtual address access */
+ u8 virt_addr_width;
+
+ /*
+ * 0 : rdma_read - If set, RDMA Read is supported on
+ * TX queues
+ * 1 : rnr_retry - If set, RNR retry is supported on
+ * modify QP command
+ * 2 : data_polling_128 - If set, 128 bytes data
+ * polling is supported
+ * 3 : rdma_write - If set, RDMA Write is supported
+ * on TX queues
+ * 4 : unsolicited_write_recv - If set, unsolicited
+ * write with imm. receive is supported
+ * 31:5 : reserved - MBZ
+ */
+ u32 device_caps;
+
+ /* Max RDMA transfer size in bytes */
+ u32 max_rdma_size;
+
+ /* Unique global ID for an EFA device */
+ u64 guid;
+
+ /* The device maximum link speed in Gbit/sec */
+ u16 max_link_speed_gbps;
+
+ /* MBZ */
+ u16 reserved0;
+
+ /* MBZ */
+ u32 reserved1;
+};
+
+struct efa_admin_feature_queue_attr_desc {
+ /* The maximum number of queue pairs supported */
+ u32 max_qp;
+
+ /* Maximum number of WQEs per Send Queue */
+ u32 max_sq_depth;
+
+ /* Maximum size of data that can be sent inline in a Send WQE */
+ u32 inline_buf_size;
+
+ /* Maximum number of buffer descriptors per Recv Queue */
+ u32 max_rq_depth;
+
+ /* The maximum number of completion queues supported per VF */
+ u32 max_cq;
+
+ /* Maximum number of CQEs per Completion Queue */
+ u32 max_cq_depth;
+
+ /* Number of sub-CQs to be created for each CQ */
+ u16 sub_cqs_per_cq;
+
+ /* Minimum number of WQEs per SQ */
+ u16 min_sq_depth;
+
+ /* Maximum number of SGEs (buffers) allowed for a single send WQE */
+ u16 max_wr_send_sges;
+
+ /* Maximum number of SGEs allowed for a single recv WQE */
+ u16 max_wr_recv_sges;
+
+ /* The maximum number of memory regions supported */
+ u32 max_mr;
+
+ /* The maximum number of pages can be registered */
+ u32 max_mr_pages;
+
+ /* The maximum number of protection domains supported */
+ u32 max_pd;
+
+ /* The maximum number of address handles supported */
+ u32 max_ah;
+
+ /* The maximum size of LLQ in bytes */
+ u32 max_llq_size;
+
+ /* Maximum number of SGEs for a single RDMA read/write WQE */
+ u16 max_wr_rdma_sges;
+
+ /*
+ * Maximum number of bytes that can be written to SQ between two
+ * consecutive doorbells (in units of 64B). Driver must ensure that only
+ * complete WQEs are written to queue before issuing a doorbell.
+ * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can
+ * be written to SQ between two consecutive doorbells. max_tx_batch=11
+ * and WQE size = 128B, means up to 5 WQEs can be written to SQ between
+ * two consecutive doorbells. Zero means unlimited.
+ */
+ u16 max_tx_batch;
+};
+
+struct efa_admin_event_queue_attr_desc {
+ /* The maximum number of event queues supported */
+ u32 max_eq;
+
+ /* Maximum number of EQEs per Event Queue */
+ u32 max_eq_depth;
+
+ /* Supported events bitmask */
+ u32 event_bitmask;
+};
+
+struct efa_admin_feature_aenq_desc {
+ /* bitmask for AENQ groups the device can report */
+ u32 supported_groups;
+
+ /* bitmask for AENQ groups to report */
+ u32 enabled_groups;
+};
+
+struct efa_admin_feature_network_attr_desc {
+ /* Raw address data in network byte order */
+ u8 addr[16];
+
+ /* max packet payload size in bytes */
+ u32 mtu;
+};
+
+/*
+ * When hint value is 0, hints capabilities are not supported or driver
+ * should use its own predefined value
+ */
+struct efa_admin_hw_hints {
+ /* value in ms */
+ u16 mmio_read_timeout;
+
+ /* value in ms */
+ u16 driver_watchdog_timeout;
+
+ /* value in ms */
+ u16 admin_completion_timeout;
+
+ /* poll interval in ms */
+ u16 poll_interval;
+};
+
+struct efa_admin_get_feature_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+
+ struct efa_admin_get_set_feature_common_desc feature_common;
+
+ u32 raw[11];
+};
+
+struct efa_admin_get_feature_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+
+ struct efa_admin_feature_device_attr_desc device_attr;
+
+ struct efa_admin_feature_aenq_desc aenq;
+
+ struct efa_admin_feature_network_attr_desc network_attr;
+
+ struct efa_admin_feature_queue_attr_desc queue_attr;
+
+ struct efa_admin_event_queue_attr_desc event_queue_attr;
+
+ struct efa_admin_hw_hints hw_hints;
+ } u;
+};
+
+struct efa_admin_set_feature_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+
+ struct efa_admin_get_set_feature_common_desc feature_common;
+
+ union {
+ u32 raw[11];
+
+ /* AENQ configuration */
+ struct efa_admin_feature_aenq_desc aenq;
+ } u;
+};
+
+struct efa_admin_set_feature_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+ } u;
+};
+
+struct efa_admin_alloc_pd_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+};
+
+struct efa_admin_alloc_pd_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* PD number */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_pd_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* PD number */
+ u16 pd;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_pd_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+struct efa_admin_alloc_uar_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+};
+
+struct efa_admin_alloc_uar_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* UAR number */
+ u16 uar;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_uar_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* UAR number */
+ u16 uar;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_dealloc_uar_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+struct efa_admin_create_eq_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* Size of the EQ in entries, must be power of 2 */
+ u16 depth;
+
+ /* MSI-X table entry index */
+ u8 msix_vec;
+
+ /*
+ * 4:0 : entry_size_words - size of EQ entry in
+ * 32-bit words
+ * 7:5 : reserved - MBZ
+ */
+ u8 caps;
+
+ /* EQ ring base address */
+ struct efa_common_mem_addr ba;
+
+ /*
+ * Enabled events on this EQ
+ * 0 : completion_events - Enable completion events
+ * 31:1 : reserved - MBZ
+ */
+ u32 event_bitmask;
+
+ /* MBZ */
+ u32 reserved;
+};
+
+struct efa_admin_create_eq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* EQ number */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_destroy_eq_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* EQ number */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_destroy_eq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
+/* asynchronous event notification groups */
+enum efa_admin_aenq_group {
+ EFA_ADMIN_FATAL_ERROR = 1,
+ EFA_ADMIN_WARNING = 2,
+ EFA_ADMIN_NOTIFICATION = 3,
+ EFA_ADMIN_KEEP_ALIVE = 4,
+ EFA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+struct efa_admin_mmio_req_read_less_resp {
+ u16 req_id;
+
+ u16 reg_off;
+
+ /* value is valid when poll is cleared */
+ u32 reg_val;
+};
+
+enum efa_admin_os_type {
+ EFA_ADMIN_OS_LINUX = 0,
+};
+
+struct efa_admin_host_info {
+ /* OS distribution string format */
+ u8 os_dist_str[128];
+
+ /* Defined in enum efa_admin_os_type */
+ u32 os_type;
+
+ /* Kernel version string format */
+ u8 kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ u32 kernel_ver;
+
+ /*
+ * 7:0 : driver_module_type
+ * 15:8 : driver_sub_minor
+ * 23:16 : driver_minor
+ * 31:24 : driver_major
+ */
+ u32 driver_ver;
+
+ /*
+ * Device's Bus, Device and Function
+ * 2:0 : function
+ * 7:3 : device
+ * 15:8 : bus
+ */
+ u16 bdf;
+
+ /*
+ * Spec version
+ * 7:0 : spec_minor
+ * 15:8 : spec_major
+ */
+ u16 spec_ver;
+
+ /*
+ * 0 : intree - Intree driver
+ * 1 : gdr - GPUDirect RDMA supported
+ * 31:2 : reserved2
+ */
+ u32 flags;
+};
+
+/* create_qp_cmd */
+#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
+#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
+#define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
+
+/* modify_qp_cmd */
+#define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK BIT(0)
+#define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK BIT(1)
+#define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK BIT(2)
+#define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK BIT(3)
+#define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
+#define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK BIT(5)
+
+/* reg_mr_cmd */
+#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
+#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
+#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK BIT(1)
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
+
+/* reg_mr_resp */
+#define EFA_ADMIN_REG_MR_RESP_RECV_IC_ID_MASK BIT(0)
+#define EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID_MASK BIT(1)
+#define EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID_MASK BIT(2)
+
+/* create_cq_cmd */
+#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
+#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+#define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK BIT(5)
+
+/* create_cq_resp */
+#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0)
+
+/* feature_device_attr_desc */
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK BIT(3)
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
+
+/* create_eq_cmd */
+#define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+#define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK BIT(0)
+
+/* host_info */
+#define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK GENMASK(23, 16)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK GENMASK(31, 24)
+#define EFA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define EFA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define EFA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_INTREE_MASK BIT(0)
+#define EFA_ADMIN_HOST_INFO_GDR_MASK BIT(1)
+
+#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
new file mode 100644
index 000000000000..35700c93e639
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_ADMIN_H_
+#define _EFA_ADMIN_H_
+
+enum efa_admin_aq_completion_status {
+ EFA_ADMIN_SUCCESS = 0,
+ EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ EFA_ADMIN_BAD_OPCODE = 2,
+ EFA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ EFA_ADMIN_MALFORMED_REQUEST = 4,
+ /* Additional status is provided in ACQ entry extended_status */
+ EFA_ADMIN_ILLEGAL_PARAMETER = 5,
+ EFA_ADMIN_UNKNOWN_ERROR = 6,
+ EFA_ADMIN_RESOURCE_BUSY = 7,
+};
+
+struct efa_admin_aq_common_desc {
+ /*
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command_id;
+
+ /* as appears in efa_admin_aq_opcode */
+ u8 opcode;
+
+ /*
+ * 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ u8 flags;
+};
+
+/*
+ * used in efa_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct efa_admin_ctrl_buff_info {
+ u32 length;
+
+ struct efa_common_mem_addr address;
+};
+
+struct efa_admin_aq_entry {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ u32 inline_data_w1[3];
+
+ struct efa_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ u32 inline_data_w4[12];
+};
+
+struct efa_admin_acq_common_desc {
+ /*
+ * command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command;
+
+ u8 status;
+
+ /*
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ u8 flags;
+
+ u16 extended_status;
+
+ /*
+ * indicates to the driver which AQ entry has been consumed by the
+ * device and could be reused
+ */
+ u16 sq_head_indx;
+};
+
+struct efa_admin_acq_entry {
+ struct efa_admin_acq_common_desc acq_common_descriptor;
+
+ u32 response_specific_data[14];
+};
+
+struct efa_admin_aenq_common_desc {
+ u16 group;
+
+ u16 syndrome;
+
+ /*
+ * 0 : phase
+ * 7:1 : reserved - MBZ
+ */
+ u8 flags;
+
+ u8 reserved1[3];
+
+ u32 timestamp_low;
+
+ u32 timestamp_high;
+};
+
+struct efa_admin_aenq_entry {
+ struct efa_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ u32 inline_data_w4[12];
+};
+
+enum efa_admin_eqe_event_type {
+ EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION = 0,
+};
+
+/* Completion event */
+struct efa_admin_comp_event {
+ /* CQ number */
+ u16 cqn;
+
+ /* MBZ */
+ u16 reserved;
+
+ /* MBZ */
+ u32 reserved2;
+};
+
+/* Event Queue Element */
+struct efa_admin_eqe {
+ /*
+ * 0 : phase
+ * 8:1 : event_type - Event type
+ * 31:9 : reserved - MBZ
+ */
+ u32 common;
+
+ /* MBZ */
+ u32 reserved;
+
+ union {
+ /* Event data */
+ u32 event_data[2];
+
+ /* Completion Event */
+ struct efa_admin_comp_event comp_event;
+ } u;
+};
+
+/* aq_common_desc */
+#define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* acq_common_desc */
+#define EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_common_desc */
+#define EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* eqe */
+#define EFA_ADMIN_EQE_PHASE_MASK BIT(0)
+#define EFA_ADMIN_EQE_EVENT_TYPE_MASK GENMASK(8, 1)
+
+#endif /* _EFA_ADMIN_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
new file mode 100644
index 000000000000..0e979ca10d24
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "efa_com.h"
+#include "efa_regs_defs.h"
+
+#define ADMIN_CMD_TIMEOUT_US 30000000 /* usecs */
+
+#define EFA_REG_READ_TIMEOUT_US 50000 /* usecs */
+#define EFA_MMIO_READ_INVALID 0xffffffff
+
+#define EFA_POLL_INTERVAL_MS 100 /* msecs */
+
+#define EFA_ASYNC_QUEUE_DEPTH 16
+#define EFA_ADMIN_QUEUE_DEPTH 32
+
+#define EFA_CTRL_MAJOR 0
+#define EFA_CTRL_MINOR 0
+#define EFA_CTRL_SUB_MINOR 1
+
+enum efa_cmd_status {
+ EFA_CMD_SUBMITTED,
+ EFA_CMD_COMPLETED,
+};
+
+struct efa_comp_ctx {
+ struct completion wait_event;
+ struct efa_admin_acq_entry *user_cqe;
+ u32 comp_size;
+ enum efa_cmd_status status;
+ u16 cmd_id;
+ u8 cmd_opcode;
+ u8 occupied;
+};
+
+static const char *efa_com_cmd_str(u8 cmd)
+{
+#define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd
+
+ switch (cmd) {
+ EFA_CMD_STR_CASE(CREATE_QP);
+ EFA_CMD_STR_CASE(MODIFY_QP);
+ EFA_CMD_STR_CASE(QUERY_QP);
+ EFA_CMD_STR_CASE(DESTROY_QP);
+ EFA_CMD_STR_CASE(CREATE_AH);
+ EFA_CMD_STR_CASE(DESTROY_AH);
+ EFA_CMD_STR_CASE(REG_MR);
+ EFA_CMD_STR_CASE(DEREG_MR);
+ EFA_CMD_STR_CASE(CREATE_CQ);
+ EFA_CMD_STR_CASE(DESTROY_CQ);
+ EFA_CMD_STR_CASE(GET_FEATURE);
+ EFA_CMD_STR_CASE(SET_FEATURE);
+ EFA_CMD_STR_CASE(GET_STATS);
+ EFA_CMD_STR_CASE(ALLOC_PD);
+ EFA_CMD_STR_CASE(DEALLOC_PD);
+ EFA_CMD_STR_CASE(ALLOC_UAR);
+ EFA_CMD_STR_CASE(DEALLOC_UAR);
+ EFA_CMD_STR_CASE(CREATE_EQ);
+ EFA_CMD_STR_CASE(DESTROY_EQ);
+ default: return "unknown command opcode";
+ }
+#undef EFA_CMD_STR_CASE
+}
+
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
+{
+ *addr_low = lower_32_bits(addr);
+ *addr_high = upper_32_bits(addr);
+}
+
+static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+ struct efa_admin_mmio_req_read_less_resp *read_resp;
+ unsigned long exp_time;
+ u32 mmio_read_reg = 0;
+ u32 err;
+
+ read_resp = mmio_read->read_resp;
+
+ spin_lock(&mmio_read->lock);
+ mmio_read->seq_num++;
+
+ /* trash DMA req_id to identify when hardware is done */
+ read_resp->req_id = mmio_read->seq_num + 0x9aL;
+ EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
+ EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
+ mmio_read->seq_num);
+
+ writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
+
+ exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout);
+ do {
+ if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
+ break;
+ udelay(1);
+ } while (time_is_after_jiffies(exp_time));
+
+ if (read_resp->req_id != mmio_read->seq_num) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
+ mmio_read->seq_num, offset, read_resp->req_id,
+ read_resp->reg_off);
+ err = EFA_MMIO_READ_INVALID;
+ goto out;
+ }
+
+ if (read_resp->reg_off != offset) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Reading register failed: wrong offset provided\n");
+ err = EFA_MMIO_READ_INVALID;
+ goto out;
+ }
+
+ err = read_resp->reg_val;
+out:
+ spin_unlock(&mmio_read->lock);
+ return err;
+}
+
+static int efa_com_admin_init_sq(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_com_admin_sq *sq = &aq->sq;
+ u16 size = aq->depth * sizeof(*sq->entries);
+ u32 aq_caps = 0;
+ u32 addr_high;
+ u32 addr_low;
+
+ sq->entries =
+ dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
+ if (!sq->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&sq->lock);
+
+ sq->cc = 0;
+ sq->pc = 0;
+ sq->phase = 1;
+
+ sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
+
+ addr_high = upper_32_bits(sq->dma_addr);
+ addr_low = lower_32_bits(sq->dma_addr);
+
+ writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
+ writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
+
+ EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
+ EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_aq_entry));
+
+ writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
+
+ return 0;
+}
+
+static int efa_com_admin_init_cq(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_com_admin_cq *cq = &aq->cq;
+ u16 size = aq->depth * sizeof(*cq->entries);
+ u32 acq_caps = 0;
+ u32 addr_high;
+ u32 addr_low;
+
+ cq->entries =
+ dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
+ if (!cq->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&cq->lock);
+
+ cq->cc = 0;
+ cq->phase = 1;
+
+ addr_high = upper_32_bits(cq->dma_addr);
+ addr_low = lower_32_bits(cq->dma_addr);
+
+ writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
+ writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
+
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_acq_entry));
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
+ aq->msix_vector_idx);
+
+ writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
+
+ return 0;
+}
+
+static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
+ struct efa_aenq_handlers *aenq_handlers)
+{
+ struct efa_com_aenq *aenq = &edev->aenq;
+ u32 addr_low, addr_high;
+ u32 aenq_caps = 0;
+ u16 size;
+
+ if (!aenq_handlers) {
+ ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n");
+ return -EINVAL;
+ }
+
+ size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
+ aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr,
+ GFP_KERNEL);
+ if (!aenq->entries)
+ return -ENOMEM;
+
+ aenq->aenq_handlers = aenq_handlers;
+ aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
+ aenq->cc = 0;
+ aenq->phase = 1;
+
+ addr_low = lower_32_bits(aenq->dma_addr);
+ addr_high = upper_32_bits(aenq->dma_addr);
+
+ writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
+ writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
+
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_aenq_entry));
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
+ aenq->msix_vector_idx);
+ writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
+
+ /*
+ * Init cons_db to mark that all entries in the queue
+ * are initially available
+ */
+ writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
+
+ return 0;
+}
+
+/* ID to be used with efa_com_get_comp_ctx */
+static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
+{
+ u16 ctx_id;
+
+ spin_lock(&aq->comp_ctx_lock);
+ ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
+ aq->comp_ctx_pool_next++;
+ spin_unlock(&aq->comp_ctx_lock);
+
+ return ctx_id;
+}
+
+static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
+ u16 ctx_id)
+{
+ spin_lock(&aq->comp_ctx_lock);
+ aq->comp_ctx_pool_next--;
+ aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
+ spin_unlock(&aq->comp_ctx_lock);
+}
+
+static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
+ struct efa_comp_ctx *comp_ctx)
+{
+ u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command,
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
+ u16 ctx_id = cmd_id & (aq->depth - 1);
+
+ ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
+ comp_ctx->occupied = 0;
+ efa_com_dealloc_ctx_id(aq, ctx_id);
+}
+
+static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
+ u16 cmd_id, bool capture)
+{
+ u16 ctx_id = cmd_id & (aq->depth - 1);
+
+ if (aq->comp_ctx[ctx_id].occupied && capture) {
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Completion context for command_id %#x is occupied\n",
+ cmd_id);
+ return NULL;
+ }
+
+ if (capture) {
+ aq->comp_ctx[ctx_id].occupied = 1;
+ ibdev_dbg(aq->efa_dev,
+ "Take completion ctxt for command_id %#x\n", cmd_id);
+ }
+
+ return &aq->comp_ctx[ctx_id];
+}
+
+static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct efa_admin_aq_entry *aqe;
+ struct efa_comp_ctx *comp_ctx;
+ u16 queue_size_mask;
+ u16 cmd_id;
+ u16 ctx_id;
+ u16 pi;
+
+ queue_size_mask = aq->depth - 1;
+ pi = aq->sq.pc & queue_size_mask;
+
+ ctx_id = efa_com_alloc_ctx_id(aq);
+
+ /* cmd_id LSBs are the ctx_id and MSBs are entropy bits from pc */
+ cmd_id = ctx_id & queue_size_mask;
+ cmd_id |= aq->sq.pc & ~queue_size_mask;
+ cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ cmd->aq_common_descriptor.command_id = cmd_id;
+ EFA_SET(&cmd->aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
+
+ comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
+ if (!comp_ctx) {
+ efa_com_dealloc_ctx_id(aq, ctx_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ comp_ctx->status = EFA_CMD_SUBMITTED;
+ comp_ctx->comp_size = comp_size_in_bytes;
+ comp_ctx->user_cqe = comp;
+ comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+ comp_ctx->cmd_id = cmd_id;
+
+ reinit_completion(&comp_ctx->wait_event);
+
+ aqe = &aq->sq.entries[pi];
+ memset(aqe, 0, sizeof(*aqe));
+ memcpy(aqe, cmd, cmd_size_in_bytes);
+
+ aq->sq.pc++;
+ atomic64_inc(&aq->stats.submitted_cmd);
+
+ if ((aq->sq.pc & queue_size_mask) == 0)
+ aq->sq.phase = !aq->sq.phase;
+
+ /* barrier not needed in case of writel */
+ writel(aq->sq.pc, aq->sq.db_addr);
+
+ return comp_ctx;
+}
+
+static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
+{
+ size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
+ size_t size = aq->depth * sizeof(struct efa_comp_ctx);
+ struct efa_comp_ctx *comp_ctx;
+ u16 i;
+
+ aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
+ aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
+ if (!aq->comp_ctx || !aq->comp_ctx_pool) {
+ devm_kfree(aq->dmadev, aq->comp_ctx_pool);
+ devm_kfree(aq->dmadev, aq->comp_ctx);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < aq->depth; i++) {
+ comp_ctx = efa_com_get_comp_ctx(aq, i, false);
+ if (comp_ctx)
+ init_completion(&comp_ctx->wait_event);
+
+ aq->comp_ctx_pool[i] = i;
+ }
+
+ spin_lock_init(&aq->comp_ctx_lock);
+
+ aq->comp_ctx_pool_next = 0;
+
+ return 0;
+}
+
+static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct efa_comp_ctx *comp_ctx;
+
+ spin_lock(&aq->sq.lock);
+ if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
+ ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
+ spin_unlock(&aq->sq.lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
+ comp_size_in_bytes);
+ spin_unlock(&aq->sq.lock);
+ if (IS_ERR(comp_ctx))
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+
+ return comp_ctx;
+}
+
+static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
+ struct efa_admin_acq_entry *cqe)
+{
+ struct efa_comp_ctx *comp_ctx;
+ u16 cmd_id;
+
+ cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
+
+ comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
+ if (comp_ctx->status != EFA_CMD_SUBMITTED) {
+ ibdev_err(aq->efa_dev,
+ "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n",
+ cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ return -EINVAL;
+ }
+
+ comp_ctx->status = EFA_CMD_COMPLETED;
+ memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
+
+ if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
+ complete(&comp_ctx->wait_event);
+
+ return 0;
+}
+
+static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+{
+ struct efa_admin_acq_entry *cqe;
+ u16 queue_size_mask;
+ u16 comp_cmds = 0;
+ u8 phase;
+ int err;
+ u16 ci;
+
+ queue_size_mask = aq->depth - 1;
+
+ ci = aq->cq.cc & queue_size_mask;
+ phase = aq->cq.phase;
+
+ cqe = &aq->cq.entries[ci];
+
+ /* Go over all the completions */
+ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
+ EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /*
+ * Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ dma_rmb();
+ err = efa_com_handle_single_admin_completion(aq, cqe);
+ if (!err)
+ comp_cmds++;
+
+ aq->cq.cc++;
+ ci++;
+ if (ci == aq->depth) {
+ ci = 0;
+ phase = !phase;
+ }
+
+ cqe = &aq->cq.entries[ci];
+ }
+
+ aq->cq.phase = phase;
+ aq->sq.cc += comp_cmds;
+ atomic64_add(comp_cmds, &aq->stats.completed_cmd);
+}
+
+static int efa_com_comp_status_to_errno(u8 comp_status)
+{
+ switch (comp_status) {
+ case EFA_ADMIN_SUCCESS:
+ return 0;
+ case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+ return -ENOMEM;
+ case EFA_ADMIN_UNSUPPORTED_OPCODE:
+ return -EOPNOTSUPP;
+ case EFA_ADMIN_BAD_OPCODE:
+ case EFA_ADMIN_MALFORMED_REQUEST:
+ case EFA_ADMIN_ILLEGAL_PARAMETER:
+ case EFA_ADMIN_UNKNOWN_ERROR:
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
+ struct efa_com_admin_queue *aq)
+{
+ unsigned long timeout;
+ unsigned long flags;
+ int err;
+
+ timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
+
+ while (1) {
+ spin_lock_irqsave(&aq->cq.lock, flags);
+ efa_com_handle_admin_completion(aq);
+ spin_unlock_irqrestore(&aq->cq.lock, flags);
+
+ if (comp_ctx->status != EFA_CMD_SUBMITTED)
+ break;
+
+ if (time_is_before_jiffies(timeout)) {
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Wait for completion (polling) timeout\n");
+ /* EFA didn't have any completion */
+ atomic64_inc(&aq->stats.no_completion);
+
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+ err = -ETIME;
+ goto out;
+ }
+
+ msleep(aq->poll_interval);
+ }
+
+ err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
+out:
+ efa_com_put_comp_ctx(aq, comp_ctx);
+ return err;
+}
+
+static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
+ struct efa_com_admin_queue *aq)
+{
+ unsigned long flags;
+ int err;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ usecs_to_jiffies(aq->completion_timeout));
+
+ /*
+ * In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+ * 1) No completion (timeout reached)
+ * 2) There is completion but the device didn't get any msi-x interrupt.
+ */
+ if (comp_ctx->status == EFA_CMD_SUBMITTED) {
+ spin_lock_irqsave(&aq->cq.lock, flags);
+ efa_com_handle_admin_completion(aq);
+ spin_unlock_irqrestore(&aq->cq.lock, flags);
+
+ atomic64_inc(&aq->stats.no_completion);
+
+ if (comp_ctx->status == EFA_CMD_COMPLETED)
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ efa_com_cmd_str(comp_ctx->cmd_opcode),
+ comp_ctx->cmd_opcode, comp_ctx->status,
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
+ else
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "The device didn't send any completion for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ efa_com_cmd_str(comp_ctx->cmd_opcode),
+ comp_ctx->cmd_opcode, comp_ctx->status,
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
+
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+ err = -ETIME;
+ goto out;
+ }
+
+ err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
+out:
+ efa_com_put_comp_ctx(aq, comp_ctx);
+ return err;
+}
+
+/*
+ * There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called efa_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
+ struct efa_com_admin_queue *aq)
+{
+ if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
+ return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
+
+ return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
+}
+
+/**
+ * efa_com_cmd_exec - Execute admin command
+ * @aq: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @comp: command completion return entry.
+ * @comp_size: command completion size.
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copied into comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size)
+{
+ struct efa_comp_ctx *comp_ctx;
+ int err;
+
+ might_sleep();
+
+ /* In case of queue FULL */
+ down(&aq->avail_cmds);
+
+ ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode);
+ comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
+ if (IS_ERR(comp_ctx)) {
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Failed to submit command %s (opcode %u) err %pe\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode, comp_ctx);
+
+ up(&aq->avail_cmds);
+ atomic64_inc(&aq->stats.cmd_err);
+ return PTR_ERR(comp_ctx);
+ }
+
+ err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
+ if (err) {
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Failed to process command %s (opcode %u) comp_status %d err %d\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode,
+ comp_ctx->user_cqe->acq_common_descriptor.status, err);
+ atomic64_inc(&aq->stats.cmd_err);
+ }
+
+ up(&aq->avail_cmds);
+
+ return err;
+}
+
+/**
+ * efa_com_admin_destroy - Destroy the admin and the async events queues.
+ * @edev: EFA communication layer struct
+ */
+void efa_com_admin_destroy(struct efa_com_dev *edev)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_com_aenq *aenq = &edev->aenq;
+ struct efa_com_admin_cq *cq = &aq->cq;
+ struct efa_com_admin_sq *sq = &aq->sq;
+ u16 size;
+
+ clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+
+ devm_kfree(edev->dmadev, aq->comp_ctx_pool);
+ devm_kfree(edev->dmadev, aq->comp_ctx);
+
+ size = aq->depth * sizeof(*sq->entries);
+ dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr);
+
+ size = aq->depth * sizeof(*cq->entries);
+ dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr);
+
+ size = aenq->depth * sizeof(*aenq->entries);
+ dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr);
+}
+
+/**
+ * efa_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @edev: EFA communication layer struct
+ * @polling: Enable/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
+{
+ u32 mask_value = 0;
+
+ if (polling)
+ EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1);
+
+ writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
+ if (polling)
+ set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
+ else
+ clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
+}
+
+static void efa_com_stats_init(struct efa_com_dev *edev)
+{
+ atomic64_t *s = (atomic64_t *)&edev->aq.stats;
+ int i;
+
+ for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
+ atomic64_set(s, 0);
+}
+
+/**
+ * efa_com_admin_init - Init the admin and the async queues
+ * @edev: EFA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int efa_com_admin_init(struct efa_com_dev *edev,
+ struct efa_aenq_handlers *aenq_handlers)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ u32 timeout;
+ u32 dev_sts;
+ u32 cap;
+ int err;
+
+ dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
+ if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
+ ibdev_err(edev->efa_dev,
+ "Device isn't ready, abort com init %#x\n", dev_sts);
+ return -ENODEV;
+ }
+
+ aq->depth = EFA_ADMIN_QUEUE_DEPTH;
+
+ aq->dmadev = edev->dmadev;
+ aq->efa_dev = edev->efa_dev;
+ set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
+
+ sema_init(&aq->avail_cmds, aq->depth);
+
+ efa_com_stats_init(edev);
+
+ err = efa_com_init_comp_ctxt(aq);
+ if (err)
+ return err;
+
+ err = efa_com_admin_init_sq(edev);
+ if (err)
+ goto err_destroy_comp_ctxt;
+
+ err = efa_com_admin_init_cq(edev);
+ if (err)
+ goto err_destroy_sq;
+
+ efa_com_set_admin_polling_mode(edev, false);
+
+ err = efa_com_admin_init_aenq(edev, aenq_handlers);
+ if (err)
+ goto err_destroy_cq;
+
+ cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ aq->completion_timeout = timeout * 100000;
+ else
+ aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
+ aq->poll_interval = EFA_POLL_INTERVAL_MS;
+
+ set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+
+ return 0;
+
+err_destroy_cq:
+ dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
+ aq->cq.entries, aq->cq.dma_addr);
+err_destroy_sq:
+ dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
+ aq->sq.entries, aq->sq.dma_addr);
+err_destroy_comp_ctxt:
+ devm_kfree(edev->dmadev, aq->comp_ctx);
+
+ return err;
+}
+
+/**
+ * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @edev: EFA communication layer struct
+ *
+ * This method goes over the admin completion queue and wakes up
+ * all the pending threads that wait on the commands wait event.
+ *
+ * Note: Should be called after MSI-X interrupt.
+ */
+void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edev->aq.cq.lock, flags);
+ efa_com_handle_admin_completion(&edev->aq);
+ spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
+}
+
+/*
+ * efa_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
+ u16 group)
+{
+ struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
+
+ if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group])
+ return aenq_handlers->handlers[group];
+
+ return aenq_handlers->unimplemented_handler;
+}
+
+/**
+ * efa_com_aenq_intr_handler - AENQ interrupt handler
+ * @edev: EFA communication layer struct
+ * @data: Data of interrupt handler.
+ *
+ * Go over the async event notification queue and call the proper aenq handler.
+ */
+void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
+{
+ struct efa_admin_aenq_common_desc *aenq_common;
+ struct efa_com_aenq *aenq = &edev->aenq;
+ struct efa_admin_aenq_entry *aenq_e;
+ efa_aenq_handler handler_cb;
+ u32 processed = 0;
+ u8 phase;
+ u32 ci;
+
+ ci = aenq->cc & (aenq->depth - 1);
+ phase = aenq->phase;
+ aenq_e = &aenq->entries[ci]; /* Get first entry */
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+ while ((READ_ONCE(aenq_common->flags) &
+ EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /*
+ * Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ dma_rmb();
+
+ /* Handle specific event*/
+ handler_cb = efa_com_get_specific_aenq_cb(edev,
+ aenq_common->group);
+ handler_cb(data, aenq_e); /* call the actual event handler*/
+
+ /* Get next event entry */
+ ci++;
+ processed++;
+
+ if (ci == aenq->depth) {
+ ci = 0;
+ phase = !phase;
+ }
+ aenq_e = &aenq->entries[ci];
+ aenq_common = &aenq_e->aenq_common_desc;
+ }
+
+ aenq->cc += processed;
+ aenq->phase = phase;
+
+ /* Don't update aenq doorbell if there weren't any processed events */
+ if (!processed)
+ return;
+
+ /* barrier not needed in case of writel */
+ writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
+}
+
+static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+ u32 addr_high;
+ u32 addr_low;
+
+ /* dma_addr_bits is unknown at this point */
+ addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
+ addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
+
+ writel(addr_high, edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF);
+ writel(addr_low, edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF);
+}
+
+int efa_com_mmio_reg_read_init(struct efa_com_dev *edev)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+
+ spin_lock_init(&mmio_read->lock);
+ mmio_read->read_resp =
+ dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ if (!mmio_read->read_resp)
+ return -ENOMEM;
+
+ efa_com_mmio_reg_read_resp_addr_init(edev);
+
+ mmio_read->read_resp->req_id = 0;
+ mmio_read->seq_num = 0;
+ mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
+
+ return 0;
+}
+
+void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
+{
+ struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
+
+ dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+}
+
+int efa_com_validate_version(struct efa_com_dev *edev)
+{
+ u32 min_ctrl_ver = 0;
+ u32 ctrl_ver_masked;
+ u32 min_ver = 0;
+ u32 ctrl_ver;
+ u32 ver;
+
+ /*
+ * Make sure the EFA version and the controller version are at least
+ * as the driver expects
+ */
+ ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
+ ctrl_ver = efa_com_reg_read32(edev,
+ EFA_REGS_CONTROLLER_VERSION_OFF);
+
+ ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
+ EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
+ EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));
+
+ EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
+ EFA_ADMIN_API_VERSION_MAJOR);
+ EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
+ EFA_ADMIN_API_VERSION_MINOR);
+ if (ver < min_ver) {
+ ibdev_err(edev->efa_dev,
+ "EFA version is lower than the minimal version the driver supports\n");
+ return -EOPNOTSUPP;
+ }
+
+ ibdev_dbg(
+ edev->efa_dev,
+ "efa controller version: %d.%d.%d implementation version %d\n",
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
+ EFA_GET(&ctrl_ver,
+ EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));
+
+ ctrl_ver_masked =
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
+ EFA_GET(&ctrl_ver,
+ EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);
+
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
+ EFA_CTRL_MAJOR);
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
+ EFA_CTRL_MINOR);
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
+ EFA_CTRL_SUB_MINOR);
+ /* Validate the ctrl version without the implementation ID */
+ if (ctrl_ver_masked < min_ctrl_ver) {
+ ibdev_err(edev->efa_dev,
+ "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * efa_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @edev: EFA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int efa_com_get_dma_width(struct efa_com_dev *edev)
+{
+ u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
+ int width;
+
+ width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);
+
+ ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
+
+ if (width < 32 || width > 64) {
+ ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width);
+ return -EINVAL;
+ }
+
+ edev->dma_addr_bits = width;
+
+ return width;
+}
+
+static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
+{
+ u32 val, i;
+
+ for (i = 0; i < timeout; i++) {
+ val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
+
+ if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
+ return 0;
+
+ ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
+ msleep(EFA_POLL_INTERVAL_MS);
+ }
+
+ return -ETIME;
+}
+
+/**
+ * efa_com_dev_reset - Perform device FLR to the device.
+ * @edev: EFA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int efa_com_dev_reset(struct efa_com_dev *edev,
+ enum efa_regs_reset_reason_types reset_reason)
+{
+ u32 stat, timeout, cap;
+ u32 reset_val = 0;
+ int err;
+
+ stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
+ cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
+
+ if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
+ ibdev_err(edev->efa_dev,
+ "Device isn't ready, can't reset device\n");
+ return -EINVAL;
+ }
+
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
+ if (!timeout) {
+ ibdev_err(edev->efa_dev, "Invalid timeout value\n");
+ return -EINVAL;
+ }
+
+ /* start reset */
+ EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
+ EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
+ writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
+
+ /* reset clears the mmio readless address, restore it */
+ efa_com_mmio_reg_read_resp_addr_init(edev);
+
+ err = wait_for_reset_state(edev, timeout, 1);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
+ return err;
+ }
+
+ /* reset done */
+ writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
+ err = wait_for_reset_state(edev, timeout, 0);
+ if (err) {
+ ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n");
+ return err;
+ }
+
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ edev->aq.completion_timeout = timeout * 100000;
+ else
+ edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
+ return 0;
+}
+
+static int efa_com_create_eq(struct efa_com_dev *edev,
+ struct efa_com_create_eq_params *params,
+ struct efa_com_create_eq_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_create_eq_resp resp = {};
+ struct efa_admin_create_eq_cmd cmd = {};
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ;
+ EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS,
+ params->entry_size_in_bytes / 4);
+ cmd.depth = params->depth;
+ cmd.event_bitmask = params->event_bitmask;
+ cmd.msix_vec = params->msix_vec;
+
+ efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high,
+ &cmd.ba.mem_addr_low);
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create eq[%d]\n", err);
+ return err;
+ }
+
+ result->eqn = resp.eqn;
+
+ return 0;
+}
+
+static void efa_com_destroy_eq(struct efa_com_dev *edev,
+ struct efa_com_destroy_eq_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_destroy_eq_resp resp = {};
+ struct efa_admin_destroy_eq_cmd cmd = {};
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ;
+ cmd.eqn = params->eqn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err)
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy EQ-%u [%d]\n", cmd.eqn,
+ err);
+}
+
+static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq)
+{
+ u32 val = 0;
+
+ EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn);
+ EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1);
+
+ writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF);
+}
+
+void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
+ struct efa_com_eq *eeq)
+{
+ struct efa_admin_eqe *eqe;
+ u32 processed = 0;
+ u8 phase;
+ u32 ci;
+
+ ci = eeq->cc & (eeq->depth - 1);
+ phase = eeq->phase;
+ eqe = &eeq->eqes[ci];
+
+ /* Go over all the events */
+ while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) {
+ /*
+ * Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ dma_rmb();
+
+ eeq->cb(eeq, eqe);
+
+ /* Get next event entry */
+ ci++;
+ processed++;
+
+ if (ci == eeq->depth) {
+ ci = 0;
+ phase = !phase;
+ }
+
+ eqe = &eeq->eqes[ci];
+ }
+
+ eeq->cc += processed;
+ eeq->phase = phase;
+ efa_com_arm_eq(eeq->edev, eeq);
+}
+
+void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq)
+{
+ struct efa_com_destroy_eq_params params = {
+ .eqn = eeq->eqn,
+ };
+
+ efa_com_destroy_eq(edev, &params);
+ dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes),
+ eeq->eqes, eeq->dma_addr);
+}
+
+int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
+ efa_eqe_handler cb, u16 depth, u8 msix_vec)
+{
+ struct efa_com_create_eq_params params = {};
+ struct efa_com_create_eq_result result = {};
+ int err;
+
+ params.depth = depth;
+ params.entry_size_in_bytes = sizeof(*eeq->eqes);
+ EFA_SET(&params.event_bitmask,
+ EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1);
+ params.msix_vec = msix_vec;
+
+ eeq->eqes = dma_alloc_coherent(edev->dmadev,
+ params.depth * sizeof(*eeq->eqes),
+ &params.dma_addr, GFP_KERNEL);
+ if (!eeq->eqes)
+ return -ENOMEM;
+
+ err = efa_com_create_eq(edev, &params, &result);
+ if (err)
+ goto err_free_coherent;
+
+ eeq->eqn = result.eqn;
+ eeq->edev = edev;
+ eeq->dma_addr = params.dma_addr;
+ eeq->phase = 1;
+ eeq->depth = params.depth;
+ eeq->cb = cb;
+ efa_com_arm_eq(edev, eeq);
+
+ return 0;
+
+err_free_coherent:
+ dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes),
+ eeq->eqes, params.dma_addr);
+ return err;
+}
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
new file mode 100644
index 000000000000..4d9ca97e4296
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_COM_H_
+#define _EFA_COM_H_
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "efa_common_defs.h"
+#include "efa_admin_defs.h"
+#include "efa_admin_cmds_defs.h"
+#include "efa_regs_defs.h"
+
+#define EFA_MAX_HANDLERS 256
+
+struct efa_com_admin_cq {
+ struct efa_admin_acq_entry *entries;
+ dma_addr_t dma_addr;
+ spinlock_t lock; /* Protects ACQ */
+
+ u16 cc; /* consumer counter */
+ u8 phase;
+};
+
+struct efa_com_admin_sq {
+ struct efa_admin_aq_entry *entries;
+ dma_addr_t dma_addr;
+ spinlock_t lock; /* Protects ASQ */
+
+ u32 __iomem *db_addr;
+
+ u16 cc; /* consumer counter */
+ u16 pc; /* producer counter */
+ u8 phase;
+
+};
+
+/* Don't use anything other than atomic64 */
+struct efa_com_stats_admin {
+ atomic64_t submitted_cmd;
+ atomic64_t completed_cmd;
+ atomic64_t cmd_err;
+ atomic64_t no_completion;
+};
+
+enum {
+ EFA_AQ_STATE_RUNNING_BIT = 0,
+ EFA_AQ_STATE_POLLING_BIT = 1,
+};
+
+struct efa_com_admin_queue {
+ void *dmadev;
+ void *efa_dev;
+ struct efa_comp_ctx *comp_ctx;
+ u32 completion_timeout; /* usecs */
+ u16 poll_interval; /* msecs */
+ u16 depth;
+ struct efa_com_admin_cq cq;
+ struct efa_com_admin_sq sq;
+ u32 msix_vector_idx;
+
+ unsigned long state;
+
+ /* Count the number of available admin commands */
+ struct semaphore avail_cmds;
+
+ struct efa_com_stats_admin stats;
+
+ spinlock_t comp_ctx_lock; /* Protects completion context pool */
+ u32 *comp_ctx_pool;
+ u16 comp_ctx_pool_next;
+};
+
+struct efa_aenq_handlers;
+struct efa_com_eq;
+typedef void (*efa_eqe_handler)(struct efa_com_eq *eeq,
+ struct efa_admin_eqe *eqe);
+
+struct efa_com_aenq {
+ struct efa_admin_aenq_entry *entries;
+ struct efa_aenq_handlers *aenq_handlers;
+ dma_addr_t dma_addr;
+ u32 cc; /* consumer counter */
+ u32 msix_vector_idx;
+ u16 depth;
+ u8 phase;
+};
+
+struct efa_com_mmio_read {
+ struct efa_admin_mmio_req_read_less_resp *read_resp;
+ dma_addr_t read_resp_dma_addr;
+ u16 seq_num;
+ u16 mmio_read_timeout; /* usecs */
+ /* serializes mmio reads */
+ spinlock_t lock;
+};
+
+struct efa_com_dev {
+ struct efa_com_admin_queue aq;
+ struct efa_com_aenq aenq;
+ u8 __iomem *reg_bar;
+ void *dmadev;
+ void *efa_dev;
+ u32 supported_features;
+ u32 dma_addr_bits;
+
+ struct efa_com_mmio_read mmio_read;
+};
+
+struct efa_com_eq {
+ struct efa_com_dev *edev;
+ struct efa_admin_eqe *eqes;
+ dma_addr_t dma_addr;
+ u32 cc; /* Consumer counter */
+ u16 eqn;
+ u16 depth;
+ u8 phase;
+ efa_eqe_handler cb;
+};
+
+struct efa_com_create_eq_params {
+ dma_addr_t dma_addr;
+ u32 event_bitmask;
+ u16 depth;
+ u8 entry_size_in_bytes;
+ u8 msix_vec;
+};
+
+struct efa_com_create_eq_result {
+ u16 eqn;
+};
+
+struct efa_com_destroy_eq_params {
+ u16 eqn;
+};
+
+typedef void (*efa_aenq_handler)(void *data,
+ struct efa_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct efa_aenq_handlers {
+ efa_aenq_handler handlers[EFA_MAX_HANDLERS];
+ efa_aenq_handler unimplemented_handler;
+};
+
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
+int efa_com_admin_init(struct efa_com_dev *edev,
+ struct efa_aenq_handlers *aenq_handlers);
+void efa_com_admin_destroy(struct efa_com_dev *edev);
+int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
+ efa_eqe_handler cb, u16 depth, u8 msix_vec);
+void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq);
+int efa_com_dev_reset(struct efa_com_dev *edev,
+ enum efa_regs_reset_reason_types reset_reason);
+void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling);
+void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev);
+int efa_com_mmio_reg_read_init(struct efa_com_dev *edev);
+void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev);
+
+int efa_com_validate_version(struct efa_com_dev *edev);
+int efa_com_get_dma_width(struct efa_com_dev *edev);
+
+int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
+ struct efa_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size);
+void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data);
+void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
+ struct efa_com_eq *eeq);
+
+#endif /* _EFA_COM_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
new file mode 100644
index 000000000000..9ead02800ac7
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "efa_com.h"
+#include "efa_com_cmd.h"
+
+int efa_com_create_qp(struct efa_com_dev *edev,
+ struct efa_com_create_qp_params *params,
+ struct efa_com_create_qp_result *res)
+{
+ struct efa_admin_create_qp_cmd create_qp_cmd = {};
+ struct efa_admin_create_qp_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ create_qp_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_QP;
+
+ create_qp_cmd.pd = params->pd;
+ create_qp_cmd.qp_type = params->qp_type;
+ create_qp_cmd.rq_base_addr = params->rq_base_addr;
+ create_qp_cmd.send_cq_idx = params->send_cq_idx;
+ create_qp_cmd.recv_cq_idx = params->recv_cq_idx;
+ create_qp_cmd.qp_alloc_size.send_queue_ring_size =
+ params->sq_ring_size_in_bytes;
+ create_qp_cmd.qp_alloc_size.send_queue_depth =
+ params->sq_depth;
+ create_qp_cmd.qp_alloc_size.recv_queue_ring_size =
+ params->rq_ring_size_in_bytes;
+ create_qp_cmd.qp_alloc_size.recv_queue_depth =
+ params->rq_depth;
+ create_qp_cmd.uar = params->uarn;
+ create_qp_cmd.sl = params->sl;
+
+ if (params->unsolicited_write_recv)
+ EFA_SET(&create_qp_cmd.flags, EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV, 1);
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&create_qp_cmd,
+ sizeof(create_qp_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create qp [%d]\n", err);
+ return err;
+ }
+
+ res->qp_handle = cmd_completion.qp_handle;
+ res->qp_num = cmd_completion.qp_num;
+ res->sq_db_offset = cmd_completion.sq_db_offset;
+ res->rq_db_offset = cmd_completion.rq_db_offset;
+ res->llq_descriptors_offset = cmd_completion.llq_descriptors_offset;
+ res->send_sub_cq_idx = cmd_completion.send_sub_cq_idx;
+ res->recv_sub_cq_idx = cmd_completion.recv_sub_cq_idx;
+
+ return 0;
+}
+
+int efa_com_modify_qp(struct efa_com_dev *edev,
+ struct efa_com_modify_qp_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_modify_qp_cmd cmd = {};
+ struct efa_admin_modify_qp_resp resp;
+ int err;
+
+ cmd.aq_common_desc.opcode = EFA_ADMIN_MODIFY_QP;
+ cmd.modify_mask = params->modify_mask;
+ cmd.qp_handle = params->qp_handle;
+ cmd.qp_state = params->qp_state;
+ cmd.cur_qp_state = params->cur_qp_state;
+ cmd.qkey = params->qkey;
+ cmd.sq_psn = params->sq_psn;
+ cmd.sq_drained_async_notify = params->sq_drained_async_notify;
+ cmd.rnr_retry = params->rnr_retry;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to modify qp-%u modify_mask[%#x] [%d]\n",
+ cmd.qp_handle, cmd.modify_mask, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_query_qp(struct efa_com_dev *edev,
+ struct efa_com_query_qp_params *params,
+ struct efa_com_query_qp_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_query_qp_cmd cmd = {};
+ struct efa_admin_query_qp_resp resp;
+ int err;
+
+ cmd.aq_common_desc.opcode = EFA_ADMIN_QUERY_QP;
+ cmd.qp_handle = params->qp_handle;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to query qp-%u [%d]\n",
+ cmd.qp_handle, err);
+ return err;
+ }
+
+ result->qp_state = resp.qp_state;
+ result->qkey = resp.qkey;
+ result->sq_draining = resp.sq_draining;
+ result->sq_psn = resp.sq_psn;
+ result->rnr_retry = resp.rnr_retry;
+
+ return 0;
+}
+
+int efa_com_destroy_qp(struct efa_com_dev *edev,
+ struct efa_com_destroy_qp_params *params)
+{
+ struct efa_admin_destroy_qp_resp cmd_completion;
+ struct efa_admin_destroy_qp_cmd qp_cmd = {};
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ qp_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_QP;
+ qp_cmd.qp_handle = params->qp_handle;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&qp_cmd,
+ sizeof(qp_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy qp-%u [%d]\n",
+ qp_cmd.qp_handle, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_create_cq(struct efa_com_dev *edev,
+ struct efa_com_create_cq_params *params,
+ struct efa_com_create_cq_result *result)
+{
+ struct efa_admin_create_cq_resp cmd_completion = {};
+ struct efa_admin_create_cq_cmd create_cmd = {};
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ create_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_CQ;
+ EFA_SET(&create_cmd.cq_caps_2,
+ EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS,
+ params->entry_size_in_bytes / 4);
+ create_cmd.sub_cq_depth = params->sub_cq_depth;
+ create_cmd.num_sub_cqs = params->num_sub_cqs;
+ create_cmd.uar = params->uarn;
+ if (params->interrupt_mode_enabled) {
+ EFA_SET(&create_cmd.cq_caps_1,
+ EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
+ create_cmd.eqn = params->eqn;
+ }
+ if (params->set_src_addr) {
+ EFA_SET(&create_cmd.cq_caps_2,
+ EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR, 1);
+ }
+ efa_com_set_dma_addr(params->dma_addr,
+ &create_cmd.cq_ba.mem_addr_high,
+ &create_cmd.cq_ba.mem_addr_low);
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create cq[%d]\n", err);
+ return err;
+ }
+
+ result->cq_idx = cmd_completion.cq_idx;
+ result->actual_depth = params->sub_cq_depth;
+ result->db_off = cmd_completion.db_offset;
+ result->db_valid = EFA_GET(&cmd_completion.flags,
+ EFA_ADMIN_CREATE_CQ_RESP_DB_VALID);
+
+ return 0;
+}
+
+int efa_com_destroy_cq(struct efa_com_dev *edev,
+ struct efa_com_destroy_cq_params *params)
+{
+ struct efa_admin_destroy_cq_cmd destroy_cmd = {};
+ struct efa_admin_destroy_cq_resp destroy_resp;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ destroy_cmd.cq_idx = params->cq_idx;
+ destroy_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_CQ;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct efa_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy CQ-%u [%d]\n",
+ params->cq_idx, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_register_mr(struct efa_com_dev *edev,
+ struct efa_com_reg_mr_params *params,
+ struct efa_com_reg_mr_result *result)
+{
+ struct efa_admin_reg_mr_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_reg_mr_cmd mr_cmd = {};
+ int err;
+
+ mr_cmd.aq_common_desc.opcode = EFA_ADMIN_REG_MR;
+ mr_cmd.pd = params->pd;
+ mr_cmd.mr_length = params->mr_length_in_bytes;
+ EFA_SET(&mr_cmd.flags, EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT,
+ params->page_shift);
+ mr_cmd.iova = params->iova;
+ mr_cmd.permissions = params->permissions;
+
+ if (params->inline_pbl) {
+ memcpy(mr_cmd.pbl.inline_pbl_array,
+ params->pbl.inline_pbl_array,
+ sizeof(mr_cmd.pbl.inline_pbl_array));
+ } else {
+ mr_cmd.pbl.pbl.length = params->pbl.pbl.length;
+ mr_cmd.pbl.pbl.address.mem_addr_low =
+ params->pbl.pbl.address.mem_addr_low;
+ mr_cmd.pbl.pbl.address.mem_addr_high =
+ params->pbl.pbl.address.mem_addr_high;
+ EFA_SET(&mr_cmd.aq_common_desc.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
+ if (params->indirect)
+ EFA_SET(&mr_cmd.aq_common_desc.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
+ }
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&mr_cmd,
+ sizeof(mr_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to register mr [%d]\n", err);
+ return err;
+ }
+
+ result->l_key = cmd_completion.l_key;
+ result->r_key = cmd_completion.r_key;
+ result->ic_info.recv_ic_id = cmd_completion.recv_ic_id;
+ result->ic_info.rdma_read_ic_id = cmd_completion.rdma_read_ic_id;
+ result->ic_info.rdma_recv_ic_id = cmd_completion.rdma_recv_ic_id;
+ result->ic_info.recv_ic_id_valid = EFA_GET(&cmd_completion.validity,
+ EFA_ADMIN_REG_MR_RESP_RECV_IC_ID);
+ result->ic_info.rdma_read_ic_id_valid = EFA_GET(&cmd_completion.validity,
+ EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID);
+ result->ic_info.rdma_recv_ic_id_valid = EFA_GET(&cmd_completion.validity,
+ EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID);
+
+ return 0;
+}
+
+int efa_com_dereg_mr(struct efa_com_dev *edev,
+ struct efa_com_dereg_mr_params *params)
+{
+ struct efa_admin_dereg_mr_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_dereg_mr_cmd mr_cmd = {};
+ int err;
+
+ mr_cmd.aq_common_desc.opcode = EFA_ADMIN_DEREG_MR;
+ mr_cmd.l_key = params->l_key;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&mr_cmd,
+ sizeof(mr_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to de-register mr(lkey-%u) [%d]\n",
+ mr_cmd.l_key, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_create_ah(struct efa_com_dev *edev,
+ struct efa_com_create_ah_params *params,
+ struct efa_com_create_ah_result *result)
+{
+ struct efa_admin_create_ah_resp cmd_completion;
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_create_ah_cmd ah_cmd = {};
+ int err;
+
+ ah_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_AH;
+
+ memcpy(ah_cmd.dest_addr, params->dest_addr, sizeof(ah_cmd.dest_addr));
+ ah_cmd.pd = params->pdn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&ah_cmd,
+ sizeof(ah_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create ah for %pI6 [%d]\n",
+ ah_cmd.dest_addr, err);
+ return err;
+ }
+
+ result->ah = cmd_completion.ah;
+
+ return 0;
+}
+
+int efa_com_destroy_ah(struct efa_com_dev *edev,
+ struct efa_com_destroy_ah_params *params)
+{
+ struct efa_admin_destroy_ah_resp cmd_completion;
+ struct efa_admin_destroy_ah_cmd ah_cmd = {};
+ struct efa_com_admin_queue *aq = &edev->aq;
+ int err;
+
+ ah_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_AH;
+ ah_cmd.ah = params->ah;
+ ah_cmd.pd = params->pdn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&ah_cmd,
+ sizeof(ah_cmd),
+ (struct efa_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy ah-%d pd-%d [%d]\n",
+ ah_cmd.ah, ah_cmd.pd, err);
+ return err;
+ }
+
+ return 0;
+}
+
+bool
+efa_com_check_supported_feature_id(struct efa_com_dev *edev,
+ enum efa_admin_aq_feature_id feature_id)
+{
+ u32 feature_mask = 1 << feature_id;
+
+ /* Device attributes is always supported */
+ if (feature_id != EFA_ADMIN_DEVICE_ATTR &&
+ !(edev->supported_features & feature_mask))
+ return false;
+
+ return true;
+}
+
+static int efa_com_get_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_get_feature_resp *get_resp,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct efa_admin_get_feature_cmd get_cmd = {};
+ struct efa_com_admin_queue *aq;
+ int err;
+
+ if (!efa_com_check_supported_feature_id(edev, feature_id)) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Feature %d isn't supported\n",
+ feature_id);
+ return -EOPNOTSUPP;
+ }
+
+ aq = &edev->aq;
+
+ get_cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_FEATURE;
+
+ if (control_buff_size)
+ EFA_SET(&get_cmd.aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
+
+ efa_com_set_dma_addr(control_buf_dma_addr,
+ &get_cmd.control_buffer.address.mem_addr_high,
+ &get_cmd.control_buffer.address.mem_addr_low);
+
+ get_cmd.control_buffer.length = control_buff_size;
+ get_cmd.feature_common.feature_id = feature_id;
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)
+ &get_cmd,
+ sizeof(get_cmd),
+ (struct efa_admin_acq_entry *)
+ get_resp,
+ sizeof(*get_resp));
+
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to submit get_feature command %d [%d]\n",
+ feature_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int efa_com_get_feature(struct efa_com_dev *edev,
+ struct efa_admin_get_feature_resp *get_resp,
+ enum efa_admin_aq_feature_id feature_id)
+{
+ return efa_com_get_feature_ex(edev, get_resp, feature_id, 0, 0);
+}
+
+int efa_com_get_device_attr(struct efa_com_dev *edev,
+ struct efa_com_get_device_attr_result *result)
+{
+ struct efa_admin_get_feature_resp resp;
+ int err;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get device attributes %d\n",
+ err);
+ return err;
+ }
+
+ result->page_size_cap = resp.u.device_attr.page_size_cap;
+ result->fw_version = resp.u.device_attr.fw_version;
+ result->admin_api_version = resp.u.device_attr.admin_api_version;
+ result->device_version = resp.u.device_attr.device_version;
+ result->supported_features = resp.u.device_attr.supported_features;
+ result->phys_addr_width = resp.u.device_attr.phys_addr_width;
+ result->virt_addr_width = resp.u.device_attr.virt_addr_width;
+ result->db_bar = resp.u.device_attr.db_bar;
+ result->max_rdma_size = resp.u.device_attr.max_rdma_size;
+ result->device_caps = resp.u.device_attr.device_caps;
+ result->guid = resp.u.device_attr.guid;
+ result->max_link_speed_gbps = resp.u.device_attr.max_link_speed_gbps;
+
+ if (result->admin_api_version < 1) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to get device attr api version [%u < 1]\n",
+ result->admin_api_version);
+ return -EINVAL;
+ }
+
+ edev->supported_features = resp.u.device_attr.supported_features;
+ err = efa_com_get_feature(edev, &resp,
+ EFA_ADMIN_QUEUE_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get queue attributes %d\n",
+ err);
+ return err;
+ }
+
+ result->max_qp = resp.u.queue_attr.max_qp;
+ result->max_sq_depth = resp.u.queue_attr.max_sq_depth;
+ result->max_rq_depth = resp.u.queue_attr.max_rq_depth;
+ result->max_cq = resp.u.queue_attr.max_cq;
+ result->max_cq_depth = resp.u.queue_attr.max_cq_depth;
+ result->inline_buf_size = resp.u.queue_attr.inline_buf_size;
+ result->max_sq_sge = resp.u.queue_attr.max_wr_send_sges;
+ result->max_rq_sge = resp.u.queue_attr.max_wr_recv_sges;
+ result->max_mr = resp.u.queue_attr.max_mr;
+ result->max_mr_pages = resp.u.queue_attr.max_mr_pages;
+ result->max_pd = resp.u.queue_attr.max_pd;
+ result->max_ah = resp.u.queue_attr.max_ah;
+ result->max_llq_size = resp.u.queue_attr.max_llq_size;
+ result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
+ result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
+ result->max_tx_batch = resp.u.queue_attr.max_tx_batch;
+ result->min_sq_depth = resp.u.queue_attr.min_sq_depth;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get network attributes %d\n",
+ err);
+ return err;
+ }
+
+ memcpy(result->addr, resp.u.network_attr.addr,
+ sizeof(resp.u.network_attr.addr));
+ result->mtu = resp.u.network_attr.mtu;
+
+ if (efa_com_check_supported_feature_id(edev,
+ EFA_ADMIN_EVENT_QUEUE_ATTR)) {
+ err = efa_com_get_feature(edev, &resp,
+ EFA_ADMIN_EVENT_QUEUE_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to get event queue attributes %d\n",
+ err);
+ return err;
+ }
+
+ result->max_eq = resp.u.event_queue_attr.max_eq;
+ result->max_eq_depth = resp.u.event_queue_attr.max_eq_depth;
+ result->event_bitmask = resp.u.event_queue_attr.event_bitmask;
+ }
+
+ return 0;
+}
+
+int efa_com_get_hw_hints(struct efa_com_dev *edev,
+ struct efa_com_get_hw_hints_result *result)
+{
+ struct efa_admin_get_feature_resp resp;
+ int err;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get hw hints %d\n", err);
+ return err;
+ }
+
+ result->admin_completion_timeout = resp.u.hw_hints.admin_completion_timeout;
+ result->driver_watchdog_timeout = resp.u.hw_hints.driver_watchdog_timeout;
+ result->mmio_read_timeout = resp.u.hw_hints.mmio_read_timeout;
+ result->poll_interval = resp.u.hw_hints.poll_interval;
+
+ return 0;
+}
+
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct efa_com_admin_queue *aq;
+ int err;
+
+ if (!efa_com_check_supported_feature_id(edev, feature_id)) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Feature %d isn't supported\n",
+ feature_id);
+ return -EOPNOTSUPP;
+ }
+
+ aq = &edev->aq;
+
+ set_cmd->aq_common_descriptor.opcode = EFA_ADMIN_SET_FEATURE;
+ if (control_buff_size) {
+ set_cmd->aq_common_descriptor.flags = 0;
+ EFA_SET(&set_cmd->aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
+ efa_com_set_dma_addr(control_buf_dma_addr,
+ &set_cmd->control_buffer.address.mem_addr_high,
+ &set_cmd->control_buffer.address.mem_addr_low);
+ }
+
+ set_cmd->control_buffer.length = control_buff_size;
+ set_cmd->feature_common.feature_id = feature_id;
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)set_cmd,
+ sizeof(*set_cmd),
+ (struct efa_admin_acq_entry *)set_resp,
+ sizeof(*set_resp));
+
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to submit set_feature command %d error: %d\n",
+ feature_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int efa_com_set_feature(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id)
+{
+ return efa_com_set_feature_ex(edev, set_resp, set_cmd, feature_id,
+ 0, 0);
+}
+
+int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
+{
+ struct efa_admin_get_feature_resp get_resp;
+ struct efa_admin_set_feature_resp set_resp;
+ struct efa_admin_set_feature_cmd cmd = {};
+ int err;
+
+ ibdev_dbg(edev->efa_dev, "Configuring aenq with groups[%#x]\n", groups);
+
+ err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get aenq attributes: %d\n",
+ err);
+ return err;
+ }
+
+ ibdev_dbg(edev->efa_dev,
+ "Get aenq groups: supported[%#x] enabled[%#x]\n",
+ get_resp.u.aenq.supported_groups,
+ get_resp.u.aenq.enabled_groups);
+
+ if ((get_resp.u.aenq.supported_groups & groups) != groups) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
+ groups, get_resp.u.aenq.supported_groups);
+ return -EOPNOTSUPP;
+ }
+
+ cmd.u.aenq.enabled_groups = groups;
+ err = efa_com_set_feature(edev, &set_resp, &cmd,
+ EFA_ADMIN_AENQ_CONFIG);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to set aenq attributes: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_alloc_pd(struct efa_com_dev *edev,
+ struct efa_com_alloc_pd_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_alloc_pd_cmd cmd = {};
+ struct efa_admin_alloc_pd_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_ALLOC_PD;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to allocate pd[%d]\n", err);
+ return err;
+ }
+
+ result->pdn = resp.pd;
+
+ return 0;
+}
+
+int efa_com_dealloc_pd(struct efa_com_dev *edev,
+ struct efa_com_dealloc_pd_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_dealloc_pd_cmd cmd = {};
+ struct efa_admin_dealloc_pd_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_DEALLOC_PD;
+ cmd.pd = params->pdn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to deallocate pd-%u [%d]\n",
+ cmd.pd, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_alloc_uar(struct efa_com_dev *edev,
+ struct efa_com_alloc_uar_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_alloc_uar_cmd cmd = {};
+ struct efa_admin_alloc_uar_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_ALLOC_UAR;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to allocate uar[%d]\n", err);
+ return err;
+ }
+
+ result->uarn = resp.uar;
+
+ return 0;
+}
+
+int efa_com_dealloc_uar(struct efa_com_dev *edev,
+ struct efa_com_dealloc_uar_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_dealloc_uar_cmd cmd = {};
+ struct efa_admin_dealloc_uar_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_DEALLOC_UAR;
+ cmd.uar = params->uarn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to deallocate uar-%u [%d]\n",
+ cmd.uar, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int efa_com_get_stats(struct efa_com_dev *edev,
+ struct efa_com_get_stats_params *params,
+ union efa_com_get_stats_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_aq_get_stats_cmd cmd = {};
+ struct efa_admin_acq_get_stats_resp resp;
+ struct efa_admin_rdma_write_stats *rws;
+ struct efa_admin_rdma_read_stats *rrs;
+ struct efa_admin_messages_stats *ms;
+ struct efa_admin_network_stats *ns;
+ struct efa_admin_basic_stats *bs;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS;
+ cmd.type = params->type;
+ cmd.scope = params->scope;
+ cmd.scope_modifier = params->scope_modifier;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to get stats type-%u scope-%u.%u [%d]\n",
+ cmd.type, cmd.scope, cmd.scope_modifier, err);
+ return err;
+ }
+
+ switch (cmd.type) {
+ case EFA_ADMIN_GET_STATS_TYPE_BASIC:
+ bs = &resp.u.basic_stats;
+ result->basic_stats.tx_bytes = bs->tx_bytes;
+ result->basic_stats.tx_pkts = bs->tx_pkts;
+ result->basic_stats.rx_bytes = bs->rx_bytes;
+ result->basic_stats.rx_pkts = bs->rx_pkts;
+ result->basic_stats.rx_drops = bs->rx_drops;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_MESSAGES:
+ ms = &resp.u.messages_stats;
+ result->messages_stats.send_bytes = ms->send_bytes;
+ result->messages_stats.send_wrs = ms->send_wrs;
+ result->messages_stats.recv_bytes = ms->recv_bytes;
+ result->messages_stats.recv_wrs = ms->recv_wrs;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_RDMA_READ:
+ rrs = &resp.u.rdma_read_stats;
+ result->rdma_read_stats.read_wrs = rrs->read_wrs;
+ result->rdma_read_stats.read_bytes = rrs->read_bytes;
+ result->rdma_read_stats.read_wr_err = rrs->read_wr_err;
+ result->rdma_read_stats.read_resp_bytes = rrs->read_resp_bytes;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE:
+ rws = &resp.u.rdma_write_stats;
+ result->rdma_write_stats.write_wrs = rws->write_wrs;
+ result->rdma_write_stats.write_bytes = rws->write_bytes;
+ result->rdma_write_stats.write_wr_err = rws->write_wr_err;
+ result->rdma_write_stats.write_recv_bytes = rws->write_recv_bytes;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_NETWORK:
+ ns = &resp.u.network_stats;
+ result->network_stats.retrans_bytes = ns->retrans_bytes;
+ result->network_stats.retrans_pkts = ns->retrans_pkts;
+ result->network_stats.retrans_timeout_events = ns->retrans_timeout_events;
+ result->network_stats.unresponsive_remote_events = ns->unresponsive_remote_events;
+ result->network_stats.impaired_remote_conn_events = ns->impaired_remote_conn_events;
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
new file mode 100644
index 000000000000..3ac2686abba1
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_COM_CMD_H_
+#define _EFA_COM_CMD_H_
+
+#include "efa_com.h"
+
+#define EFA_GID_SIZE 16
+
+struct efa_com_create_qp_params {
+ u64 rq_base_addr;
+ u32 send_cq_idx;
+ u32 recv_cq_idx;
+ /*
+ * Send descriptor ring size in bytes,
+ * sufficient for user-provided number of WQEs and SGL size
+ */
+ u32 sq_ring_size_in_bytes;
+ /* Max number of WQEs that will be posted on send queue */
+ u32 sq_depth;
+ /* Recv descriptor ring size in bytes */
+ u32 rq_ring_size_in_bytes;
+ u32 rq_depth;
+ u16 pd;
+ u16 uarn;
+ u8 qp_type;
+ u8 sl;
+ u8 unsolicited_write_recv : 1;
+};
+
+struct efa_com_create_qp_result {
+ u32 qp_handle;
+ u32 qp_num;
+ u32 sq_db_offset;
+ u32 rq_db_offset;
+ u32 llq_descriptors_offset;
+ u16 send_sub_cq_idx;
+ u16 recv_sub_cq_idx;
+};
+
+struct efa_com_modify_qp_params {
+ u32 modify_mask;
+ u32 qp_handle;
+ u32 qp_state;
+ u32 cur_qp_state;
+ u32 qkey;
+ u32 sq_psn;
+ u8 sq_drained_async_notify;
+ u8 rnr_retry;
+};
+
+struct efa_com_query_qp_params {
+ u32 qp_handle;
+};
+
+struct efa_com_query_qp_result {
+ u32 qp_state;
+ u32 qkey;
+ u32 sq_draining;
+ u32 sq_psn;
+ u8 rnr_retry;
+};
+
+struct efa_com_destroy_qp_params {
+ u32 qp_handle;
+};
+
+struct efa_com_create_cq_params {
+ /* cq physical base address in OS memory */
+ dma_addr_t dma_addr;
+ /* completion queue depth in # of entries */
+ u16 sub_cq_depth;
+ u16 num_sub_cqs;
+ u16 uarn;
+ u16 eqn;
+ u8 entry_size_in_bytes;
+ u8 interrupt_mode_enabled : 1;
+ u8 set_src_addr : 1;
+};
+
+struct efa_com_create_cq_result {
+ /* cq identifier */
+ u16 cq_idx;
+ /* actual cq depth in # of entries */
+ u16 actual_depth;
+ u32 db_off;
+ bool db_valid;
+};
+
+struct efa_com_destroy_cq_params {
+ u16 cq_idx;
+};
+
+struct efa_com_create_ah_params {
+ u16 pdn;
+ /* Destination address in network byte order */
+ u8 dest_addr[EFA_GID_SIZE];
+};
+
+struct efa_com_create_ah_result {
+ u16 ah;
+};
+
+struct efa_com_destroy_ah_params {
+ u16 ah;
+ u16 pdn;
+};
+
+struct efa_com_get_device_attr_result {
+ u8 addr[EFA_GID_SIZE];
+ u64 page_size_cap;
+ u64 max_mr_pages;
+ u64 guid;
+ u32 mtu;
+ u32 fw_version;
+ u32 admin_api_version;
+ u32 device_version;
+ u32 supported_features;
+ u32 phys_addr_width;
+ u32 virt_addr_width;
+ u32 max_qp;
+ u32 max_sq_depth; /* wqes */
+ u32 max_rq_depth; /* wqes */
+ u32 max_cq;
+ u32 max_cq_depth; /* cqes */
+ u32 inline_buf_size;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_ah;
+ u32 max_llq_size;
+ u32 max_rdma_size;
+ u32 device_caps;
+ u32 max_eq;
+ u32 max_eq_depth;
+ u32 event_bitmask; /* EQ events bitmask */
+ u16 sub_cqs_per_cq;
+ u16 max_sq_sge;
+ u16 max_rq_sge;
+ u16 max_wr_rdma_sge;
+ u16 max_tx_batch;
+ u16 min_sq_depth;
+ u16 max_link_speed_gbps;
+ u8 db_bar;
+};
+
+struct efa_com_get_hw_hints_result {
+ u16 mmio_read_timeout;
+ u16 driver_watchdog_timeout;
+ u16 admin_completion_timeout;
+ u16 poll_interval;
+ u32 reserved[4];
+};
+
+struct efa_com_mem_addr {
+ u32 mem_addr_low;
+ u32 mem_addr_high;
+};
+
+/* Used at indirect mode page list chunks for chaining */
+struct efa_com_ctrl_buff_info {
+ /* indicates length of the buffer pointed by control_buffer_address. */
+ u32 length;
+ /* points to control buffer (direct or indirect) */
+ struct efa_com_mem_addr address;
+};
+
+struct efa_com_reg_mr_params {
+ /* Memory region length, in bytes. */
+ u64 mr_length_in_bytes;
+ /* IO Virtual Address associated with this MR. */
+ u64 iova;
+ /* words 8:15: Physical Buffer List, each element is page-aligned. */
+ union {
+ /*
+ * Inline array of physical addresses of app pages
+ * (optimization for short region reservations)
+ */
+ u64 inline_pbl_array[4];
+ /*
+ * Describes the next physically contiguous chunk of indirect
+ * page list. A page list contains physical addresses of command
+ * data pages. Data pages are 4KB; page list chunks are
+ * variable-sized.
+ */
+ struct efa_com_ctrl_buff_info pbl;
+ } pbl;
+ /* number of pages in PBL (redundant, could be calculated) */
+ u32 page_num;
+ /* Protection Domain */
+ u16 pd;
+ /*
+ * phys_page_size_shift - page size is (1 << phys_page_size_shift)
+ * Page size is used for building the Virtual to Physical
+ * address mapping
+ */
+ u8 page_shift;
+ /* see permissions field of struct efa_admin_reg_mr_cmd */
+ u8 permissions;
+ u8 inline_pbl;
+ u8 indirect;
+};
+
+struct efa_com_mr_interconnect_info {
+ u16 recv_ic_id;
+ u16 rdma_read_ic_id;
+ u16 rdma_recv_ic_id;
+ u8 recv_ic_id_valid : 1;
+ u8 rdma_read_ic_id_valid : 1;
+ u8 rdma_recv_ic_id_valid : 1;
+};
+
+struct efa_com_reg_mr_result {
+ /*
+ * To be used in conjunction with local buffers references in SQ and
+ * RQ WQE
+ */
+ u32 l_key;
+ /*
+ * To be used in incoming RDMA semantics messages to refer to remotely
+ * accessed memory region
+ */
+ u32 r_key;
+ struct efa_com_mr_interconnect_info ic_info;
+};
+
+struct efa_com_dereg_mr_params {
+ u32 l_key;
+};
+
+struct efa_com_alloc_pd_result {
+ u16 pdn;
+};
+
+struct efa_com_dealloc_pd_params {
+ u16 pdn;
+};
+
+struct efa_com_alloc_uar_result {
+ u16 uarn;
+};
+
+struct efa_com_dealloc_uar_params {
+ u16 uarn;
+};
+
+struct efa_com_get_stats_params {
+ /* see enum efa_admin_get_stats_type */
+ u8 type;
+ /* see enum efa_admin_get_stats_scope */
+ u8 scope;
+ u16 scope_modifier;
+};
+
+struct efa_com_basic_stats {
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_drops;
+};
+
+struct efa_com_messages_stats {
+ u64 send_bytes;
+ u64 send_wrs;
+ u64 recv_bytes;
+ u64 recv_wrs;
+};
+
+struct efa_com_rdma_read_stats {
+ u64 read_wrs;
+ u64 read_bytes;
+ u64 read_wr_err;
+ u64 read_resp_bytes;
+};
+
+struct efa_com_rdma_write_stats {
+ u64 write_wrs;
+ u64 write_bytes;
+ u64 write_wr_err;
+ u64 write_recv_bytes;
+};
+
+struct efa_com_network_stats {
+ u64 retrans_bytes;
+ u64 retrans_pkts;
+ u64 retrans_timeout_events;
+ u64 unresponsive_remote_events;
+ u64 impaired_remote_conn_events;
+};
+
+union efa_com_get_stats_result {
+ struct efa_com_basic_stats basic_stats;
+ struct efa_com_messages_stats messages_stats;
+ struct efa_com_rdma_read_stats rdma_read_stats;
+ struct efa_com_rdma_write_stats rdma_write_stats;
+ struct efa_com_network_stats network_stats;
+};
+
+int efa_com_create_qp(struct efa_com_dev *edev,
+ struct efa_com_create_qp_params *params,
+ struct efa_com_create_qp_result *res);
+int efa_com_modify_qp(struct efa_com_dev *edev,
+ struct efa_com_modify_qp_params *params);
+int efa_com_query_qp(struct efa_com_dev *edev,
+ struct efa_com_query_qp_params *params,
+ struct efa_com_query_qp_result *result);
+int efa_com_destroy_qp(struct efa_com_dev *edev,
+ struct efa_com_destroy_qp_params *params);
+int efa_com_create_cq(struct efa_com_dev *edev,
+ struct efa_com_create_cq_params *params,
+ struct efa_com_create_cq_result *result);
+int efa_com_destroy_cq(struct efa_com_dev *edev,
+ struct efa_com_destroy_cq_params *params);
+int efa_com_register_mr(struct efa_com_dev *edev,
+ struct efa_com_reg_mr_params *params,
+ struct efa_com_reg_mr_result *result);
+int efa_com_dereg_mr(struct efa_com_dev *edev,
+ struct efa_com_dereg_mr_params *params);
+int efa_com_create_ah(struct efa_com_dev *edev,
+ struct efa_com_create_ah_params *params,
+ struct efa_com_create_ah_result *result);
+int efa_com_destroy_ah(struct efa_com_dev *edev,
+ struct efa_com_destroy_ah_params *params);
+int efa_com_get_device_attr(struct efa_com_dev *edev,
+ struct efa_com_get_device_attr_result *result);
+int efa_com_get_hw_hints(struct efa_com_dev *edev,
+ struct efa_com_get_hw_hints_result *result);
+bool
+efa_com_check_supported_feature_id(struct efa_com_dev *edev,
+ enum efa_admin_aq_feature_id feature_id);
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size);
+int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups);
+int efa_com_alloc_pd(struct efa_com_dev *edev,
+ struct efa_com_alloc_pd_result *result);
+int efa_com_dealloc_pd(struct efa_com_dev *edev,
+ struct efa_com_dealloc_pd_params *params);
+int efa_com_alloc_uar(struct efa_com_dev *edev,
+ struct efa_com_alloc_uar_result *result);
+int efa_com_dealloc_uar(struct efa_com_dev *edev,
+ struct efa_com_dealloc_uar_params *params);
+int efa_com_get_stats(struct efa_com_dev *edev,
+ struct efa_com_get_stats_params *params,
+ union efa_com_get_stats_result *result);
+
+#endif /* _EFA_COM_CMD_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_common_defs.h b/drivers/infiniband/hw/efa/efa_common_defs.h
new file mode 100644
index 000000000000..90af1c82c9c6
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_common_defs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_COMMON_H_
+#define _EFA_COMMON_H_
+
+#include <linux/bitfield.h>
+
+#define EFA_COMMON_SPEC_VERSION_MAJOR 2
+#define EFA_COMMON_SPEC_VERSION_MINOR 0
+
+#define EFA_GET(ptr, mask) FIELD_GET(mask##_MASK, *(ptr))
+
+#define EFA_SET(ptr, mask, value) \
+ ({ \
+ typeof(ptr) _ptr = ptr; \
+ *_ptr = (*_ptr & ~(mask##_MASK)) | \
+ FIELD_PREP(mask##_MASK, value); \
+ })
+
+struct efa_common_mem_addr {
+ u32 mem_addr_low;
+
+ u32 mem_addr_high;
+};
+
+#endif /* _EFA_COMMON_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_io_defs.h b/drivers/infiniband/hw/efa/efa_io_defs.h
new file mode 100644
index 000000000000..a4c9fd33da38
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_io_defs.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_IO_H_
+#define _EFA_IO_H_
+
+#define EFA_IO_TX_DESC_NUM_BUFS 2
+#define EFA_IO_TX_DESC_NUM_RDMA_BUFS 1
+#define EFA_IO_TX_DESC_INLINE_MAX_SIZE 32
+#define EFA_IO_TX_DESC_IMM_DATA_SIZE 4
+#define EFA_IO_TX_DESC_INLINE_PBL_SIZE 1
+
+enum efa_io_queue_type {
+ /* send queue (of a QP) */
+ EFA_IO_SEND_QUEUE = 1,
+ /* recv queue (of a QP) */
+ EFA_IO_RECV_QUEUE = 2,
+};
+
+enum efa_io_send_op_type {
+ /* send message */
+ EFA_IO_SEND = 0,
+ /* RDMA read */
+ EFA_IO_RDMA_READ = 1,
+ /* RDMA write */
+ EFA_IO_RDMA_WRITE = 2,
+ /* Fast MR registration */
+ EFA_IO_FAST_REG = 3,
+ /* Fast MR invalidation */
+ EFA_IO_FAST_INV = 4,
+};
+
+enum efa_io_comp_status {
+ /* Successful completion */
+ EFA_IO_COMP_STATUS_OK = 0,
+ /* Flushed during QP destroy */
+ EFA_IO_COMP_STATUS_FLUSHED = 1,
+ /* Internal QP error */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_QP_INTERNAL_ERROR = 2,
+ /* Unsupported operation */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_UNSUPPORTED_OP = 3,
+ /* Bad AH */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_AH = 4,
+ /* LKEY not registered or does not match IOVA */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_LKEY = 5,
+ /* Message too long */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_BAD_LENGTH = 6,
+ /* RKEY not registered or does not match remote IOVA */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_ADDRESS = 7,
+ /* Connection was reset by remote side */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_ABORT = 8,
+ /* Bad dest QP number (QP does not exist or is in error state) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_DEST_QPN = 9,
+ /* Destination resource not ready (no WQEs posted on RQ) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_RNR = 10,
+ /* Receiver SGL too short */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_LENGTH = 11,
+ /* Unexpected status returned by responder */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_STATUS = 12,
+ /* Unresponsive remote - was previously responsive */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_UNRESP_REMOTE = 13,
+ /* No valid AH at remote side (required for RDMA operations) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_UNKNOWN_PEER = 14,
+ /* Unreachable remote - never received a response */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_UNREACH_REMOTE = 15,
+};
+
+enum efa_io_frwr_pbl_mode {
+ EFA_IO_FRWR_INLINE_PBL = 0,
+ EFA_IO_FRWR_DIRECT_PBL = 1,
+};
+
+struct efa_io_tx_meta_desc {
+ /* Verbs-generated Request ID */
+ u16 req_id;
+
+ /*
+ * control flags
+ * 3:0 : op_type - enum efa_io_send_op_type
+ * 4 : has_imm - immediate_data field carries valid
+ * data.
+ * 5 : inline_msg - inline mode - inline message data
+ * follows this descriptor (no buffer descriptors).
+ * Note that it is different from immediate data
+ * 6 : meta_extension - Extended metadata. MBZ
+ * 7 : meta_desc - Indicates metadata descriptor.
+ * Must be set.
+ */
+ u8 ctrl1;
+
+ /*
+ * control flags
+ * 0 : phase
+ * 1 : reserved25 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction. Must be set.
+ * 3 : last - Indicates last descriptor in
+ * transaction. Must be set.
+ * 4 : comp_req - Indicates whether completion should
+ * be posted, after packet is transmitted. Valid only
+ * for the first descriptor
+ * 7:5 : reserved29 - MBZ
+ */
+ u8 ctrl2;
+
+ u16 dest_qp_num;
+
+ /*
+ * If inline_msg bit is set, length of inline message in bytes,
+ * otherwise length of SGL (number of buffers).
+ */
+ u16 length;
+
+ /*
+ * immediate data: if has_imm is set, then this field is included within
+ * Tx message and reported in remote Rx completion.
+ */
+ u32 immediate_data;
+
+ u16 ah;
+
+ u16 reserved;
+
+ /* Queue key */
+ u32 qkey;
+
+ u8 reserved2[12];
+};
+
+/*
+ * Tx queue buffer descriptor, for any transport type. Preceded by metadata
+ * descriptor.
+ */
+struct efa_io_tx_buf_desc {
+ /* length in bytes */
+ u32 length;
+
+ /*
+ * 23:0 : lkey - local memory translation key
+ * 31:24 : reserved - MBZ
+ */
+ u32 lkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_remote_mem_addr {
+ /* length in bytes */
+ u32 length;
+
+ /* remote memory translation key */
+ u32 rkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_rdma_req {
+ /* Remote memory address */
+ struct efa_io_remote_mem_addr remote_mem;
+
+ /* Local memory address */
+ struct efa_io_tx_buf_desc local_mem[1];
+};
+
+struct efa_io_fast_mr_reg_req {
+ /* Updated local key of the MR after lkey/rkey increment */
+ u32 lkey;
+
+ /*
+ * permissions
+ * 0 : local_write_enable - Local write permissions:
+ * must be set for RQ buffers and buffers posted for
+ * RDMA Read requests
+ * 1 : remote_write_enable - Remote write
+ * permissions: must be set to enable RDMA write to
+ * the region
+ * 2 : remote_read_enable - Remote read permissions:
+ * must be set to enable RDMA read from the region
+ * 7:3 : reserved2 - MBZ
+ */
+ u8 permissions;
+
+ /*
+ * control flags
+ * 4:0 : phys_page_size_shift - page size is (1 <<
+ * phys_page_size_shift)
+ * 6:5 : pbl_mode - enum efa_io_frwr_pbl_mode
+ * 7 : reserved - MBZ
+ */
+ u8 flags;
+
+ /* MBZ */
+ u8 reserved[2];
+
+ /* IO Virtual Address associated with this MR */
+ u64 iova;
+
+ /* Memory region length, in bytes */
+ u64 mr_length;
+
+ /* Physical Buffer List, each element is page-aligned. */
+ union {
+ /*
+ * Inline array of physical page addresses (optimization
+ * for short region activation).
+ */
+ u64 inline_array[1];
+
+ /* points to PBL (Currently only direct) */
+ u64 dma_addr;
+ } pbl;
+};
+
+struct efa_io_fast_mr_inv_req {
+ /* Local key of the MR to invalidate */
+ u32 lkey;
+
+ /* MBZ */
+ u8 reserved[28];
+};
+
+/*
+ * Tx WQE, composed of tx meta descriptors followed by either tx buffer
+ * descriptors or inline data
+ */
+struct efa_io_tx_wqe {
+ /* TX meta */
+ struct efa_io_tx_meta_desc meta;
+
+ union {
+ /* Send buffer descriptors */
+ struct efa_io_tx_buf_desc sgl[2];
+
+ u8 inline_data[32];
+
+ /* RDMA local and remote memory addresses */
+ struct efa_io_rdma_req rdma_req;
+
+ /* Fast registration */
+ struct efa_io_fast_mr_reg_req reg_mr_req;
+
+ /* Fast invalidation */
+ struct efa_io_fast_mr_inv_req inv_mr_req;
+ } data;
+};
+
+/*
+ * Rx buffer descriptor; RX WQE is composed of one or more RX buffer
+ * descriptors.
+ */
+struct efa_io_rx_desc {
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer Pointer[63:32] */
+ u32 buf_addr_hi;
+
+ /* Verbs-generated request id. */
+ u16 req_id;
+
+ /* Length in bytes. */
+ u16 length;
+
+ /*
+ * LKey and control flags
+ * 23:0 : lkey
+ * 29:24 : reserved - MBZ
+ * 30 : first - Indicates first descriptor in WQE
+ * 31 : last - Indicates last descriptor in WQE
+ */
+ u32 lkey_ctrl;
+};
+
+/* Common IO completion descriptor */
+struct efa_io_cdesc_common {
+ /*
+ * verbs-generated request ID, as provided in the completed tx or rx
+ * descriptor.
+ */
+ u16 req_id;
+
+ u8 status;
+
+ /*
+ * flags
+ * 0 : phase - Phase bit
+ * 2:1 : q_type - enum efa_io_queue_type: send/recv
+ * 3 : has_imm - indicates that immediate data is
+ * present - for RX completions only
+ * 6:4 : op_type - enum efa_io_send_op_type
+ * 7 : unsolicited - indicates that there is no
+ * matching request - for RDMA with imm. RX only
+ */
+ u8 flags;
+
+ /* local QP number */
+ u16 qp_num;
+};
+
+/* Tx completion descriptor */
+struct efa_io_tx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+
+ /* MBZ */
+ u16 reserved16;
+};
+
+/* Rx Completion Descriptor */
+struct efa_io_rx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+
+ /* Transferred length bits[15:0] */
+ u16 length;
+
+ /* Remote Address Handle FW index, 0xFFFF indicates invalid ah */
+ u16 ah;
+
+ u16 src_qp_num;
+
+ /* Immediate data */
+ u32 imm;
+};
+
+/* Rx Completion Descriptor RDMA write info */
+struct efa_io_rx_cdesc_rdma_write {
+ /* Transferred length bits[31:16] */
+ u16 length_hi;
+};
+
+/* Extended Rx Completion Descriptor */
+struct efa_io_rx_cdesc_ex {
+ /* Base RX completion info */
+ struct efa_io_rx_cdesc base;
+
+ union {
+ struct efa_io_rx_cdesc_rdma_write rdma_write;
+
+ /*
+ * Valid only in case of unknown AH (0xFFFF) and CQ
+ * set_src_addr is enabled.
+ */
+ u8 src_addr[16];
+ } u;
+};
+
+/* tx_meta_desc */
+#define EFA_IO_TX_META_DESC_OP_TYPE_MASK GENMASK(3, 0)
+#define EFA_IO_TX_META_DESC_HAS_IMM_MASK BIT(4)
+#define EFA_IO_TX_META_DESC_INLINE_MSG_MASK BIT(5)
+#define EFA_IO_TX_META_DESC_META_EXTENSION_MASK BIT(6)
+#define EFA_IO_TX_META_DESC_META_DESC_MASK BIT(7)
+#define EFA_IO_TX_META_DESC_PHASE_MASK BIT(0)
+#define EFA_IO_TX_META_DESC_FIRST_MASK BIT(2)
+#define EFA_IO_TX_META_DESC_LAST_MASK BIT(3)
+#define EFA_IO_TX_META_DESC_COMP_REQ_MASK BIT(4)
+
+/* tx_buf_desc */
+#define EFA_IO_TX_BUF_DESC_LKEY_MASK GENMASK(23, 0)
+
+/* fast_mr_reg_req */
+#define EFA_IO_FAST_MR_REG_REQ_LOCAL_WRITE_ENABLE_MASK BIT(0)
+#define EFA_IO_FAST_MR_REG_REQ_REMOTE_WRITE_ENABLE_MASK BIT(1)
+#define EFA_IO_FAST_MR_REG_REQ_REMOTE_READ_ENABLE_MASK BIT(2)
+#define EFA_IO_FAST_MR_REG_REQ_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
+#define EFA_IO_FAST_MR_REG_REQ_PBL_MODE_MASK GENMASK(6, 5)
+
+/* rx_desc */
+#define EFA_IO_RX_DESC_LKEY_MASK GENMASK(23, 0)
+#define EFA_IO_RX_DESC_FIRST_MASK BIT(30)
+#define EFA_IO_RX_DESC_LAST_MASK BIT(31)
+
+/* cdesc_common */
+#define EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0)
+#define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1)
+#define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3)
+#define EFA_IO_CDESC_COMMON_OP_TYPE_MASK GENMASK(6, 4)
+#define EFA_IO_CDESC_COMMON_UNSOLICITED_MASK BIT(7)
+
+#endif /* _EFA_IO_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
new file mode 100644
index 000000000000..6c415b9adb5f
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -0,0 +1,706 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "efa.h"
+
+#define PCI_DEV_ID_EFA0_VF 0xefa0
+#define PCI_DEV_ID_EFA1_VF 0xefa1
+#define PCI_DEV_ID_EFA2_VF 0xefa2
+#define PCI_DEV_ID_EFA3_VF 0xefa3
+
+static const struct pci_device_id efa_pci_tbl[] = {
+ { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
+ { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
+ { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
+ { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA3_VF) },
+ { }
+};
+
+MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(DEVICE_NAME);
+MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
+
+#define EFA_REG_BAR 0
+#define EFA_MEM_BAR 2
+#define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
+
+#define EFA_AENQ_ENABLED_GROUPS \
+ (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
+ BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
+
+extern const struct uapi_definition efa_uapi_defs[];
+
+/* This handler will called for unknown event group or unimplemented handlers */
+static void unimplemented_aenq_handler(void *data,
+ struct efa_admin_aenq_entry *aenq_e)
+{
+ struct efa_dev *dev = (struct efa_dev *)data;
+
+ ibdev_err(&dev->ibdev,
+ "Unknown event was received or event with unimplemented handler\n");
+}
+
+static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
+{
+ struct efa_dev *dev = (struct efa_dev *)data;
+
+ atomic64_inc(&dev->stats.keep_alive_rcvd);
+}
+
+static struct efa_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
+
+static void efa_release_bars(struct efa_dev *dev, int bars_mask)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int release_bars;
+
+ release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
+ pci_release_selected_regions(pdev, release_bars);
+}
+
+static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
+{
+ u16 cqn = eqe->u.comp_event.cqn;
+ struct efa_cq *cq;
+
+ /* Safe to load as we're in irq and removal calls synchronize_irq() */
+ cq = xa_load(&dev->cqs_xa, cqn);
+ if (unlikely(!cq)) {
+ ibdev_err_ratelimited(&dev->ibdev,
+ "Completion event on non-existent CQ[%u]",
+ cqn);
+ return;
+ }
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
+{
+ struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
+
+ if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
+ EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
+ efa_process_comp_eqe(dev, eqe);
+ else
+ ibdev_err_ratelimited(&dev->ibdev,
+ "Unknown event type received %lu",
+ EFA_GET(&eqe->common,
+ EFA_ADMIN_EQE_EVENT_TYPE));
+}
+
+static irqreturn_t efa_intr_msix_comp(int irq, void *data)
+{
+ struct efa_eq *eq = data;
+ struct efa_com_dev *edev = eq->eeq.edev;
+
+ efa_com_eq_comp_intr_handler(edev, &eq->eeq);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
+{
+ struct efa_dev *dev = data;
+
+ efa_com_admin_q_comp_intr_handler(&dev->edev);
+ efa_com_aenq_intr_handler(&dev->edev, data);
+
+ return IRQ_HANDLED;
+}
+
+static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
+{
+ int err;
+
+ err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
+ irq->name, err);
+ return err;
+ }
+
+ irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
+
+ return 0;
+}
+
+static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq, u32 vector)
+{
+ u32 cpu;
+
+ cpu = vector - EFA_COMP_EQS_VEC_BASE;
+ snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
+ pci_name(dev->pdev));
+ eq->irq.handler = efa_intr_msix_comp;
+ eq->irq.data = eq;
+ eq->irq.vector = vector;
+ eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
+ cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
+}
+
+static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
+{
+ irq_set_affinity_hint(irq->irqn, NULL);
+ free_irq(irq->irqn, irq->data);
+}
+
+static void efa_setup_mgmnt_irq(struct efa_dev *dev)
+{
+ u32 cpu;
+
+ snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
+ "efa-mgmnt@pci:%s", pci_name(dev->pdev));
+ dev->admin_irq.handler = efa_intr_msix_mgmnt;
+ dev->admin_irq.data = dev;
+ dev->admin_irq.vector = dev->admin_msix_vector_idx;
+ dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
+ dev->admin_msix_vector_idx);
+ cpu = cpumask_first(cpu_online_mask);
+ cpumask_set_cpu(cpu,
+ &dev->admin_irq.affinity_hint_mask);
+ dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
+ dev->admin_irq.irqn,
+ dev->admin_irq.name);
+}
+
+static int efa_set_mgmnt_irq(struct efa_dev *dev)
+{
+ efa_setup_mgmnt_irq(dev);
+
+ return efa_request_irq(dev, &dev->admin_irq);
+}
+
+static int efa_request_doorbell_bar(struct efa_dev *dev)
+{
+ u8 db_bar_idx = dev->dev_attr.db_bar;
+ struct pci_dev *pdev = dev->pdev;
+ int pci_mem_bars;
+ int db_bar;
+ int err;
+
+ db_bar = BIT(db_bar_idx);
+ if (!(db_bar & EFA_BASE_BAR_MASK)) {
+ pci_mem_bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (db_bar & ~pci_mem_bars) {
+ dev_err(&pdev->dev,
+ "Doorbells BAR unavailable. Requested %#x, available %#x\n",
+ db_bar, pci_mem_bars);
+ return -ENODEV;
+ }
+
+ err = pci_request_selected_regions(pdev, db_bar, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions for bar %d failed %d\n",
+ db_bar_idx, err);
+ return err;
+ }
+ }
+
+ dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
+ dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
+
+ return 0;
+}
+
+static void efa_release_doorbell_bar(struct efa_dev *dev)
+{
+ if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
+ efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
+}
+
+static void efa_update_hw_hints(struct efa_dev *dev,
+ struct efa_com_get_hw_hints_result *hw_hints)
+{
+ struct efa_com_dev *edev = &dev->edev;
+
+ if (hw_hints->mmio_read_timeout)
+ edev->mmio_read.mmio_read_timeout =
+ hw_hints->mmio_read_timeout * 1000;
+
+ if (hw_hints->poll_interval)
+ edev->aq.poll_interval = hw_hints->poll_interval;
+
+ if (hw_hints->admin_completion_timeout)
+ edev->aq.completion_timeout =
+ hw_hints->admin_completion_timeout;
+}
+
+static void efa_stats_init(struct efa_dev *dev)
+{
+ atomic64_t *s = (atomic64_t *)&dev->stats;
+ int i;
+
+ for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
+ atomic64_set(s, 0);
+}
+
+static void efa_set_host_info(struct efa_dev *dev)
+{
+ struct efa_admin_set_feature_resp resp = {};
+ struct efa_admin_set_feature_cmd cmd = {};
+ struct efa_admin_host_info *hinf;
+ u32 bufsz = sizeof(*hinf);
+ dma_addr_t hinf_dma;
+
+ if (!efa_com_check_supported_feature_id(&dev->edev,
+ EFA_ADMIN_HOST_INFO))
+ return;
+
+ /* Failures in host info set shall not disturb probe */
+ hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
+ GFP_KERNEL);
+ if (!hinf)
+ return;
+
+ strscpy(hinf->os_dist_str, utsname()->release,
+ sizeof(hinf->os_dist_str));
+ hinf->os_type = EFA_ADMIN_OS_LINUX;
+ strscpy(hinf->kernel_ver_str, utsname()->version,
+ sizeof(hinf->kernel_ver_str));
+ hinf->kernel_ver = LINUX_VERSION_CODE;
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
+ PCI_SLOT(dev->pdev->devfn));
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
+ PCI_FUNC(dev->pdev->devfn));
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
+ EFA_COMMON_SPEC_VERSION_MAJOR);
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
+ EFA_COMMON_SPEC_VERSION_MINOR);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
+
+ efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
+ hinf_dma, bufsz);
+
+ dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
+}
+
+static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
+{
+ efa_com_eq_destroy(&dev->edev, &eq->eeq);
+ efa_free_irq(dev, &eq->irq);
+}
+
+static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u32 msix_vec)
+{
+ int err;
+
+ efa_setup_comp_irq(dev, eq, msix_vec);
+ err = efa_request_irq(dev, &eq->irq);
+ if (err)
+ return err;
+
+ err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
+ dev->dev_attr.max_eq_depth, msix_vec);
+ if (err)
+ goto err_free_comp_irq;
+
+ return 0;
+
+err_free_comp_irq:
+ efa_free_irq(dev, &eq->irq);
+ return err;
+}
+
+static int efa_create_eqs(struct efa_dev *dev)
+{
+ u32 neqs = dev->dev_attr.max_eq;
+ int err, i;
+
+ neqs = min_t(u32, neqs, dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
+ dev->neqs = neqs;
+ dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
+ if (!dev->eqs)
+ return -ENOMEM;
+
+ for (i = 0; i < neqs; i++) {
+ err = efa_create_eq(dev, &dev->eqs[i], i + EFA_COMP_EQS_VEC_BASE);
+ if (err)
+ goto err_destroy_eqs;
+ }
+
+ return 0;
+
+err_destroy_eqs:
+ for (i--; i >= 0; i--)
+ efa_destroy_eq(dev, &dev->eqs[i]);
+ kfree(dev->eqs);
+
+ return err;
+}
+
+static void efa_destroy_eqs(struct efa_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->neqs; i++)
+ efa_destroy_eq(dev, &dev->eqs[i]);
+
+ kfree(dev->eqs);
+}
+
+static const struct ib_device_ops efa_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_EFA,
+ .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
+
+ .alloc_hw_port_stats = efa_alloc_hw_port_stats,
+ .alloc_hw_device_stats = efa_alloc_hw_device_stats,
+ .alloc_pd = efa_alloc_pd,
+ .alloc_ucontext = efa_alloc_ucontext,
+ .create_cq = efa_create_cq,
+ .create_cq_umem = efa_create_cq_umem,
+ .create_qp = efa_create_qp,
+ .create_user_ah = efa_create_ah,
+ .dealloc_pd = efa_dealloc_pd,
+ .dealloc_ucontext = efa_dealloc_ucontext,
+ .dereg_mr = efa_dereg_mr,
+ .destroy_ah = efa_destroy_ah,
+ .destroy_cq = efa_destroy_cq,
+ .destroy_qp = efa_destroy_qp,
+ .get_hw_stats = efa_get_hw_stats,
+ .get_link_layer = efa_port_link_layer,
+ .get_port_immutable = efa_get_port_immutable,
+ .mmap = efa_mmap,
+ .mmap_free = efa_mmap_free,
+ .modify_qp = efa_modify_qp,
+ .query_device = efa_query_device,
+ .query_gid = efa_query_gid,
+ .query_pkey = efa_query_pkey,
+ .query_port = efa_query_port,
+ .query_qp = efa_query_qp,
+ .reg_user_mr = efa_reg_mr,
+ .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
+};
+
+static int efa_ib_device_add(struct efa_dev *dev)
+{
+ struct efa_com_get_hw_hints_result hw_hints;
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ efa_stats_init(dev);
+
+ err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
+ if (err)
+ return err;
+
+ dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
+ err = efa_request_doorbell_bar(dev);
+ if (err)
+ return err;
+
+ err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
+ if (err)
+ goto err_release_doorbell_bar;
+
+ efa_update_hw_hints(dev, &hw_hints);
+
+ /* Try to enable all the available aenq groups */
+ err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
+ if (err)
+ goto err_release_doorbell_bar;
+
+ err = efa_create_eqs(dev);
+ if (err)
+ goto err_release_doorbell_bar;
+
+ efa_set_host_info(dev);
+
+ dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
+ dev->ibdev.node_guid = dev->dev_attr.guid;
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
+ dev->ibdev.dev.parent = &pdev->dev;
+
+ ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
+
+ dev->ibdev.driver_def = efa_uapi_defs;
+
+ err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
+ if (err)
+ goto err_destroy_eqs;
+
+ ibdev_info(&dev->ibdev, "IB device registered\n");
+
+ return 0;
+
+err_destroy_eqs:
+ efa_destroy_eqs(dev);
+err_release_doorbell_bar:
+ efa_release_doorbell_bar(dev);
+ return err;
+}
+
+static void efa_ib_device_remove(struct efa_dev *dev)
+{
+ ibdev_info(&dev->ibdev, "Unregister ib device\n");
+ ib_unregister_device(&dev->ibdev);
+ efa_destroy_eqs(dev);
+ efa_release_doorbell_bar(dev);
+}
+
+static void efa_disable_msix(struct efa_dev *dev)
+{
+ pci_free_irq_vectors(dev->pdev);
+}
+
+static int efa_enable_msix(struct efa_dev *dev)
+{
+ int max_vecs, num_vecs;
+
+ /*
+ * Reserve the max msix vectors we might need, one vector is reserved
+ * for admin.
+ */
+ max_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
+ num_online_cpus() + 1);
+ dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
+ max_vecs);
+
+ dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
+ num_vecs = pci_alloc_irq_vectors(dev->pdev, 1,
+ max_vecs, PCI_IRQ_MSIX);
+
+ if (num_vecs < 0) {
+ dev_err(&dev->pdev->dev, "Failed to enable MSI-X. error %d\n",
+ num_vecs);
+ return -ENOSPC;
+ }
+
+ dev_dbg(&dev->pdev->dev, "Allocated %d MSI-X vectors\n", num_vecs);
+
+ dev->num_irq_vectors = num_vecs;
+
+ return 0;
+}
+
+static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
+{
+ int dma_width;
+ int err;
+
+ err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
+ if (err)
+ return err;
+
+ err = efa_com_validate_version(edev);
+ if (err)
+ return err;
+
+ dma_width = efa_com_get_dma_width(edev);
+ if (dma_width < 0) {
+ err = dma_width;
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
+ if (err) {
+ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
+ return err;
+ }
+
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+ return 0;
+}
+
+static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
+{
+ struct efa_com_dev *edev;
+ struct efa_dev *dev;
+ int pci_mem_bars;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
+ return ERR_PTR(err);
+ }
+
+ pci_set_master(pdev);
+
+ dev = ib_alloc_device(efa_dev, ibdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "Device alloc failed\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ edev = &dev->edev;
+ edev->efa_dev = dev;
+ edev->dmadev = &pdev->dev;
+ dev->pdev = pdev;
+ xa_init(&dev->cqs_xa);
+
+ pci_mem_bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (EFA_BASE_BAR_MASK & ~pci_mem_bars) {
+ dev_err(&pdev->dev, "BARs unavailable. Requested %#x, available %#x\n",
+ (int)EFA_BASE_BAR_MASK, pci_mem_bars);
+ err = -ENODEV;
+ goto err_ibdev_destroy;
+ }
+ err = pci_request_selected_regions(pdev, EFA_BASE_BAR_MASK, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
+ err);
+ goto err_ibdev_destroy;
+ }
+
+ dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
+ dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
+ dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
+ dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
+
+ edev->reg_bar = devm_ioremap(&pdev->dev,
+ dev->reg_bar_addr,
+ dev->reg_bar_len);
+ if (!edev->reg_bar) {
+ dev_err(&pdev->dev, "Failed to remap register bar\n");
+ err = -EFAULT;
+ goto err_release_bars;
+ }
+
+ err = efa_com_mmio_reg_read_init(edev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init readless MMIO\n");
+ goto err_iounmap;
+ }
+
+ err = efa_device_init(edev, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "EFA device init failed\n");
+ if (err == -ETIME)
+ err = -EPROBE_DEFER;
+ goto err_reg_read_destroy;
+ }
+
+ err = efa_enable_msix(dev);
+ if (err)
+ goto err_reg_read_destroy;
+
+ edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
+ edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
+
+ err = efa_set_mgmnt_irq(dev);
+ if (err)
+ goto err_disable_msix;
+
+ err = efa_com_admin_init(edev, &aenq_handlers);
+ if (err)
+ goto err_free_mgmnt_irq;
+
+ return dev;
+
+err_free_mgmnt_irq:
+ efa_free_irq(dev, &dev->admin_irq);
+err_disable_msix:
+ efa_disable_msix(dev);
+err_reg_read_destroy:
+ efa_com_mmio_reg_read_destroy(edev);
+err_iounmap:
+ devm_iounmap(&pdev->dev, edev->reg_bar);
+err_release_bars:
+ efa_release_bars(dev, EFA_BASE_BAR_MASK);
+err_ibdev_destroy:
+ ib_dealloc_device(&dev->ibdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ERR_PTR(err);
+}
+
+static void efa_remove_device(struct pci_dev *pdev,
+ enum efa_regs_reset_reason_types reset_reason)
+{
+ struct efa_dev *dev = pci_get_drvdata(pdev);
+ struct efa_com_dev *edev;
+
+ edev = &dev->edev;
+ efa_com_dev_reset(edev, reset_reason);
+ efa_com_admin_destroy(edev);
+ efa_free_irq(dev, &dev->admin_irq);
+ efa_disable_msix(dev);
+ efa_com_mmio_reg_read_destroy(edev);
+ devm_iounmap(&pdev->dev, edev->reg_bar);
+ efa_release_bars(dev, EFA_BASE_BAR_MASK);
+ xa_destroy(&dev->cqs_xa);
+ ib_dealloc_device(&dev->ibdev);
+ pci_disable_device(pdev);
+}
+
+static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct efa_dev *dev;
+ int err;
+
+ dev = efa_probe_device(pdev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ err = efa_ib_device_add(dev);
+ if (err)
+ goto err_remove_device;
+
+ return 0;
+
+err_remove_device:
+ efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
+ return err;
+}
+
+static void efa_remove(struct pci_dev *pdev)
+{
+ struct efa_dev *dev = pci_get_drvdata(pdev);
+
+ efa_ib_device_remove(dev);
+ efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
+}
+
+static void efa_shutdown(struct pci_dev *pdev)
+{
+ struct efa_dev *dev = pci_get_drvdata(pdev);
+
+ efa_destroy_eqs(dev);
+ efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_SHUTDOWN);
+ efa_free_irq(dev, &dev->admin_irq);
+ efa_disable_msix(dev);
+}
+
+static struct pci_driver efa_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = efa_pci_tbl,
+ .probe = efa_probe,
+ .remove = efa_remove,
+ .shutdown = efa_shutdown,
+};
+
+module_pci_driver(efa_pci_driver);
diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h
new file mode 100644
index 000000000000..714ae6258800
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_regs_defs.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_REGS_H_
+#define _EFA_REGS_H_
+
+enum efa_regs_reset_reason_types {
+ EFA_REGS_RESET_NORMAL = 0,
+ /* Keep alive timeout */
+ EFA_REGS_RESET_KEEP_ALIVE_TO = 1,
+ EFA_REGS_RESET_ADMIN_TO = 2,
+ EFA_REGS_RESET_INIT_ERR = 3,
+ EFA_REGS_RESET_DRIVER_INVALID_STATE = 4,
+ EFA_REGS_RESET_OS_TRIGGER = 5,
+ EFA_REGS_RESET_SHUTDOWN = 6,
+ EFA_REGS_RESET_USER_TRIGGER = 7,
+ EFA_REGS_RESET_GENERIC = 8,
+};
+
+/* efa_registers offsets */
+
+/* 0 base */
+#define EFA_REGS_VERSION_OFF 0x0
+#define EFA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define EFA_REGS_CAPS_OFF 0x8
+#define EFA_REGS_AQ_BASE_LO_OFF 0x10
+#define EFA_REGS_AQ_BASE_HI_OFF 0x14
+#define EFA_REGS_AQ_CAPS_OFF 0x18
+#define EFA_REGS_ACQ_BASE_LO_OFF 0x20
+#define EFA_REGS_ACQ_BASE_HI_OFF 0x24
+#define EFA_REGS_ACQ_CAPS_OFF 0x28
+#define EFA_REGS_AQ_PROD_DB_OFF 0x2c
+#define EFA_REGS_AENQ_CAPS_OFF 0x34
+#define EFA_REGS_AENQ_BASE_LO_OFF 0x38
+#define EFA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define EFA_REGS_AENQ_CONS_DB_OFF 0x40
+#define EFA_REGS_INTR_MASK_OFF 0x4c
+#define EFA_REGS_DEV_CTL_OFF 0x54
+#define EFA_REGS_DEV_STS_OFF 0x58
+#define EFA_REGS_MMIO_REG_READ_OFF 0x5c
+#define EFA_REGS_MMIO_RESP_LO_OFF 0x60
+#define EFA_REGS_MMIO_RESP_HI_OFF 0x64
+#define EFA_REGS_EQ_DB_OFF 0x68
+
+/* version register */
+#define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define EFA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define EFA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define EFA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define EFA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
+
+/* aq_caps register */
+#define EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xff0000
+#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK 0xff000000
+
+/* aenq_caps register */
+#define EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xff0000
+#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK 0xff000000
+
+/* intr_mask register */
+#define EFA_REGS_INTR_MASK_EN_MASK 0x1
+
+/* dev_ctl register */
+#define EFA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define EFA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define EFA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
+
+/* dev_sts register */
+#define EFA_REGS_DEV_STS_READY_MASK 0x1
+#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define EFA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define EFA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+
+/* mmio_reg_read register */
+#define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+/* eq_db register */
+#define EFA_REGS_EQ_DB_EQN_MASK 0xffff
+#define EFA_REGS_EQ_DB_ARM_MASK 0x80000000
+
+#endif /* _EFA_REGS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
new file mode 100644
index 000000000000..d9a12681f843
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -0,0 +1,2350 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <linux/vmalloc.h>
+#include <linux/log2.h>
+
+#include <rdma/ib_addr.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+#define UVERBS_MODULE_NAME efa_ib
+#include <rdma/uverbs_named_ioctl.h>
+#include <rdma/ib_user_ioctl_cmds.h>
+
+#include "efa.h"
+#include "efa_io_defs.h"
+
+enum {
+ EFA_MMAP_DMA_PAGE = 0,
+ EFA_MMAP_IO_WC,
+ EFA_MMAP_IO_NC,
+};
+
+struct efa_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 address;
+ u8 mmap_flag;
+};
+
+#define EFA_DEFINE_DEVICE_STATS(op) \
+ op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
+ op(EFA_COMPLETED_CMDS, "completed_cmds") \
+ op(EFA_CMDS_ERR, "cmds_err") \
+ op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
+ op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
+ op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
+ op(EFA_CREATE_QP_ERR, "create_qp_err") \
+ op(EFA_CREATE_CQ_ERR, "create_cq_err") \
+ op(EFA_REG_MR_ERR, "reg_mr_err") \
+ op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
+ op(EFA_CREATE_AH_ERR, "create_ah_err") \
+ op(EFA_MMAP_ERR, "mmap_err")
+
+#define EFA_DEFINE_PORT_STATS(op) \
+ op(EFA_TX_BYTES, "tx_bytes") \
+ op(EFA_TX_PKTS, "tx_pkts") \
+ op(EFA_RX_BYTES, "rx_bytes") \
+ op(EFA_RX_PKTS, "rx_pkts") \
+ op(EFA_RX_DROPS, "rx_drops") \
+ op(EFA_SEND_BYTES, "send_bytes") \
+ op(EFA_SEND_WRS, "send_wrs") \
+ op(EFA_RECV_BYTES, "recv_bytes") \
+ op(EFA_RECV_WRS, "recv_wrs") \
+ op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
+ op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
+ op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
+ op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
+ op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \
+ op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
+ op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
+ op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
+ op(EFA_RETRANS_BYTES, "retrans_bytes") \
+ op(EFA_RETRANS_PKTS, "retrans_pkts") \
+ op(EFA_RETRANS_TIMEOUT_EVENS, "retrans_timeout_events") \
+ op(EFA_UNRESPONSIVE_REMOTE_EVENTS, "unresponsive_remote_events") \
+ op(EFA_IMPAIRED_REMOTE_CONN_EVENTS, "impaired_remote_conn_events") \
+
+#define EFA_STATS_ENUM(ename, name) ename,
+#define EFA_STATS_STR(ename, nam) \
+ [ename].name = nam,
+
+enum efa_hw_device_stats {
+ EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
+};
+
+static const struct rdma_stat_desc efa_device_stats_descs[] = {
+ EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
+};
+
+enum efa_hw_port_stats {
+ EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
+};
+
+static const struct rdma_stat_desc efa_port_stats_descs[] = {
+ EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
+};
+
+#define EFA_DEFAULT_LINK_SPEED_GBPS 100
+
+#define EFA_CHUNK_PAYLOAD_SHIFT 12
+#define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
+#define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
+
+#define EFA_CHUNK_SHIFT 12
+#define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
+#define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
+
+#define EFA_PTRS_PER_CHUNK \
+ ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
+
+#define EFA_CHUNK_USED_SIZE \
+ ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
+
+struct pbl_chunk {
+ dma_addr_t dma_addr;
+ u64 *buf;
+ u32 length;
+};
+
+struct pbl_chunk_list {
+ struct pbl_chunk *chunks;
+ unsigned int size;
+};
+
+struct pbl_context {
+ union {
+ struct {
+ dma_addr_t dma_addr;
+ } continuous;
+ struct {
+ u32 pbl_buf_size_in_pages;
+ struct scatterlist *sgl;
+ int sg_dma_cnt;
+ struct pbl_chunk_list chunk_list;
+ } indirect;
+ } phys;
+ u64 *pbl_buf;
+ u32 pbl_buf_size_in_bytes;
+ u8 physically_continuous;
+};
+
+static inline struct efa_dev *to_edev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct efa_dev, ibdev);
+}
+
+static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct efa_ucontext, ibucontext);
+}
+
+static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct efa_pd, ibpd);
+}
+
+static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct efa_mr, ibmr);
+}
+
+static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct efa_qp, ibqp);
+}
+
+static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct efa_cq, ibcq);
+}
+
+static inline struct efa_ah *to_eah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct efa_ah, ibah);
+}
+
+static inline struct efa_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
+}
+
+#define EFA_DEV_CAP(dev, cap) \
+ ((dev)->dev_attr.device_caps & \
+ EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
+
+#define is_reserved_cleared(reserved) \
+ !memchr_inv(reserved, 0, sizeof(reserved))
+
+static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ void *addr;
+
+ addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+ if (!addr)
+ return NULL;
+
+ *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
+ if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
+ ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
+ free_pages_exact(addr, size);
+ return NULL;
+ }
+
+ return addr;
+}
+
+static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
+ dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
+ free_pages_exact(cpu_addr, size);
+}
+
+int efa_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata)
+{
+ struct efa_com_get_device_attr_result *dev_attr;
+ struct efa_ibv_ex_query_device_resp resp = {};
+ struct efa_dev *dev = to_edev(ibdev);
+ int err;
+
+ if (udata && udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ return -EINVAL;
+ }
+
+ dev_attr = &dev->dev_attr;
+
+ memset(props, 0, sizeof(*props));
+ props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
+ props->page_size_cap = dev_attr->page_size_cap;
+ props->vendor_id = dev->pdev->vendor;
+ props->vendor_part_id = dev->pdev->device;
+ props->hw_ver = dev->pdev->subsystem_device;
+ props->max_qp = dev_attr->max_qp;
+ props->max_cq = dev_attr->max_cq;
+ props->max_pd = dev_attr->max_pd;
+ props->max_mr = dev_attr->max_mr;
+ props->max_ah = dev_attr->max_ah;
+ props->max_cqe = dev_attr->max_cq_depth;
+ props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
+ dev_attr->max_rq_depth);
+ props->max_send_sge = dev_attr->max_sq_sge;
+ props->max_recv_sge = dev_attr->max_rq_sge;
+ props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+ props->max_pkeys = 1;
+
+ if (udata && udata->outlen) {
+ resp.max_sq_sge = dev_attr->max_sq_sge;
+ resp.max_rq_sge = dev_attr->max_rq_sge;
+ resp.max_sq_wr = dev_attr->max_sq_depth;
+ resp.max_rq_wr = dev_attr->max_rq_depth;
+ resp.max_rdma_size = dev_attr->max_rdma_size;
+
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM;
+ if (EFA_DEV_CAP(dev, RDMA_READ))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
+
+ if (EFA_DEV_CAP(dev, RNR_RETRY))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
+
+ if (EFA_DEV_CAP(dev, DATA_POLLING_128))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128;
+
+ if (EFA_DEV_CAP(dev, RDMA_WRITE))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
+
+ if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV;
+
+ if (dev->neqs)
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
+
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(ibdev,
+ "Failed to copy udata for query_device\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void efa_link_gbps_to_speed_and_width(u16 gbps,
+ enum ib_port_speed *speed,
+ enum ib_port_width *width)
+{
+ if (gbps >= 400) {
+ *width = IB_WIDTH_8X;
+ *speed = IB_SPEED_HDR;
+ } else if (gbps >= 200) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_HDR;
+ } else if (gbps >= 120) {
+ *width = IB_WIDTH_12X;
+ *speed = IB_SPEED_FDR10;
+ } else if (gbps >= 100) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_EDR;
+ } else if (gbps >= 60) {
+ *width = IB_WIDTH_12X;
+ *speed = IB_SPEED_DDR;
+ } else if (gbps >= 50) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_HDR;
+ } else if (gbps >= 40) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_FDR10;
+ } else if (gbps >= 30) {
+ *width = IB_WIDTH_12X;
+ *speed = IB_SPEED_SDR;
+ } else {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_EDR;
+ }
+}
+
+int efa_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct efa_dev *dev = to_edev(ibdev);
+ enum ib_port_speed link_speed;
+ enum ib_port_width link_width;
+ u16 link_gbps;
+
+ props->lmc = 1;
+
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ link_gbps = dev->dev_attr.max_link_speed_gbps ?: EFA_DEFAULT_LINK_SPEED_GBPS;
+ efa_link_gbps_to_speed_and_width(link_gbps, &link_speed, &link_width);
+ props->active_speed = link_speed;
+ props->active_width = link_width;
+ props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->max_msg_sz = dev->dev_attr.mtu;
+ props->max_vl_num = 1;
+
+ return 0;
+}
+
+int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct efa_dev *dev = to_edev(ibqp->device);
+ struct efa_com_query_qp_params params = {};
+ struct efa_com_query_qp_result result;
+ struct efa_qp *qp = to_eqp(ibqp);
+ int err;
+
+#define EFA_QUERY_QP_SUPP_MASK \
+ (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
+ IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
+
+ if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
+ qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
+ return -EOPNOTSUPP;
+ }
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ params.qp_handle = qp->qp_handle;
+ err = efa_com_query_qp(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ qp_attr->qp_state = result.qp_state;
+ qp_attr->qkey = result.qkey;
+ qp_attr->sq_psn = result.sq_psn;
+ qp_attr->sq_draining = result.sq_draining;
+ qp_attr->port_num = 1;
+ qp_attr->rnr_retry = result.rnr_retry;
+
+ qp_attr->cap.max_send_wr = qp->max_send_wr;
+ qp_attr->cap.max_recv_wr = qp->max_recv_wr;
+ qp_attr->cap.max_send_sge = qp->max_send_sge;
+ qp_attr->cap.max_recv_sge = qp->max_recv_sge;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->qp_context = ibqp->qp_context;
+ qp_init_attr->cap = qp_attr->cap;
+
+ return 0;
+}
+
+int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ struct efa_dev *dev = to_edev(ibdev);
+
+ memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
+
+ return 0;
+}
+
+int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (index > 0)
+ return -EINVAL;
+
+ *pkey = 0xffff;
+ return 0;
+}
+
+static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
+{
+ struct efa_com_dealloc_pd_params params = {
+ .pdn = pdn,
+ };
+
+ return efa_com_dealloc_pd(&dev->edev, &params);
+}
+
+int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_ibv_alloc_pd_resp resp = {};
+ struct efa_com_alloc_pd_result result;
+ struct efa_pd *pd = to_epd(ibpd);
+ int err;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = efa_com_alloc_pd(&dev->edev, &result);
+ if (err)
+ goto err_out;
+
+ pd->pdn = result.pdn;
+ resp.pdn = result.pdn;
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Failed to copy udata for alloc_pd\n");
+ goto err_dealloc_pd;
+ }
+ }
+
+ ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
+
+ return 0;
+
+err_dealloc_pd:
+ efa_pd_dealloc(dev, result.pdn);
+err_out:
+ atomic64_inc(&dev->stats.alloc_pd_err);
+ return err;
+}
+
+int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_pd *pd = to_epd(ibpd);
+
+ ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
+ efa_pd_dealloc(dev, pd->pdn);
+ return 0;
+}
+
+static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
+{
+ struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
+
+ return efa_com_destroy_qp(&dev->edev, &params);
+}
+
+static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
+{
+ rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
+}
+
+int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibqp->pd->device);
+ struct efa_qp *qp = to_eqp(ibqp);
+ int err;
+
+ ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
+
+ err = efa_destroy_qp_handle(dev, qp->qp_handle);
+ if (err)
+ return err;
+
+ efa_qp_user_mmap_entries_remove(qp);
+
+ if (qp->rq_cpu_addr) {
+ ibdev_dbg(&dev->ibdev,
+ "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
+ qp->rq_cpu_addr, qp->rq_size,
+ &qp->rq_dma_addr);
+ efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
+static struct rdma_user_mmap_entry*
+efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ u64 address, size_t length,
+ u8 mmap_flag, u64 *offset)
+{
+ struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int err;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_flag = mmap_flag;
+
+ err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
+ length);
+ if (err) {
+ kfree(entry);
+ return NULL;
+ }
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+static int qp_mmap_entries_setup(struct efa_qp *qp,
+ struct efa_dev *dev,
+ struct efa_ucontext *ucontext,
+ struct efa_com_create_qp_params *params,
+ struct efa_ibv_create_qp_resp *resp)
+{
+ size_t length;
+ u64 address;
+
+ address = dev->db_bar_addr + resp->sq_db_offset;
+ qp->sq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->sq_db_mmap_key);
+ if (!qp->sq_db_mmap_entry)
+ return -ENOMEM;
+
+ resp->sq_db_offset &= ~PAGE_MASK;
+
+ address = dev->mem_bar_addr + resp->llq_desc_offset;
+ length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
+ offset_in_page(resp->llq_desc_offset));
+
+ qp->llq_desc_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, length,
+ EFA_MMAP_IO_WC,
+ &resp->llq_desc_mmap_key);
+ if (!qp->llq_desc_mmap_entry)
+ goto err_remove_mmap;
+
+ resp->llq_desc_offset &= ~PAGE_MASK;
+
+ if (qp->rq_size) {
+ address = dev->db_bar_addr + resp->rq_db_offset;
+
+ qp->rq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, PAGE_SIZE,
+ EFA_MMAP_IO_NC,
+ &resp->rq_db_mmap_key);
+ if (!qp->rq_db_mmap_entry)
+ goto err_remove_mmap;
+
+ resp->rq_db_offset &= ~PAGE_MASK;
+
+ address = virt_to_phys(qp->rq_cpu_addr);
+ qp->rq_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, qp->rq_size,
+ EFA_MMAP_DMA_PAGE,
+ &resp->rq_mmap_key);
+ if (!qp->rq_mmap_entry)
+ goto err_remove_mmap;
+
+ resp->rq_mmap_size = qp->rq_size;
+ }
+
+ return 0;
+
+err_remove_mmap:
+ efa_qp_user_mmap_entries_remove(qp);
+
+ return -ENOMEM;
+}
+
+static int efa_qp_validate_cap(struct efa_dev *dev,
+ struct ib_qp_init_attr *init_attr)
+{
+ if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested send wr[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_send_wr,
+ dev->dev_attr.max_sq_depth);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested receive wr[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_recv_wr,
+ dev->dev_attr.max_rq_depth);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested sge send[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested sge recv[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
+ return -EINVAL;
+ }
+ if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
+ ibdev_dbg(&dev->ibdev,
+ "qp: requested inline data[%u] exceeds the max[%u]\n",
+ init_attr->cap.max_inline_data,
+ dev->dev_attr.inline_buf_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int efa_qp_validate_attr(struct efa_dev *dev,
+ struct ib_qp_init_attr *init_attr)
+{
+ if (init_attr->qp_type != IB_QPT_DRIVER &&
+ init_attr->qp_type != IB_QPT_UD) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp type %d\n", init_attr->qp_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->srq) {
+ ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->create_flags) {
+ ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct efa_com_create_qp_params create_qp_params = {};
+ struct efa_com_create_qp_result create_qp_resp;
+ struct efa_dev *dev = to_edev(ibqp->device);
+ struct efa_ibv_create_qp_resp resp = {};
+ struct efa_ibv_create_qp cmd = {};
+ struct efa_qp *qp = to_eqp(ibqp);
+ struct efa_ucontext *ucontext;
+ u16 supported_efa_flags = 0;
+ int err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
+ ibucontext);
+
+ err = efa_qp_validate_cap(dev, init_attr);
+ if (err)
+ goto err_out;
+
+ err = efa_qp_validate_attr(dev, init_attr);
+ if (err)
+ goto err_out;
+
+ if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, no input udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (udata->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(udata, sizeof(cmd),
+ udata->inlen - sizeof(cmd))) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = ib_copy_from_udata(&cmd, udata,
+ min(sizeof(cmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Cannot copy udata for create_qp\n");
+ goto err_out;
+ }
+
+ if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_98)) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
+ supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV;
+
+ if (cmd.flags & ~supported_efa_flags) {
+ ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n",
+ cmd.flags, supported_efa_flags);
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ create_qp_params.uarn = ucontext->uarn;
+ create_qp_params.pd = to_epd(ibqp->pd)->pdn;
+
+ if (init_attr->qp_type == IB_QPT_UD) {
+ create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
+ } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
+ create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
+ } else {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp type %d driver qp type %d\n",
+ init_attr->qp_type, cmd.driver_qp_type);
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
+ init_attr->qp_type, cmd.driver_qp_type);
+ create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
+ create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
+ create_qp_params.sq_depth = init_attr->cap.max_send_wr;
+ create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
+
+ create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
+ create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
+ qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
+ if (qp->rq_size) {
+ qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
+ if (!qp->rq_cpu_addr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
+ qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
+ create_qp_params.rq_base_addr = qp->rq_dma_addr;
+ }
+
+ create_qp_params.sl = cmd.sl;
+
+ if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV)
+ create_qp_params.unsolicited_write_recv = true;
+
+ err = efa_com_create_qp(&dev->edev, &create_qp_params,
+ &create_qp_resp);
+ if (err)
+ goto err_free_mapped;
+
+ resp.sq_db_offset = create_qp_resp.sq_db_offset;
+ resp.rq_db_offset = create_qp_resp.rq_db_offset;
+ resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
+ resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
+ resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
+
+ err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
+ &resp);
+ if (err)
+ goto err_destroy_qp;
+
+ qp->qp_handle = create_qp_resp.qp_handle;
+ qp->ibqp.qp_num = create_qp_resp.qp_num;
+ qp->max_send_wr = init_attr->cap.max_send_wr;
+ qp->max_recv_wr = init_attr->cap.max_recv_wr;
+ qp->max_send_sge = init_attr->cap.max_send_sge;
+ qp->max_recv_sge = init_attr->cap.max_recv_sge;
+ qp->max_inline_data = init_attr->cap.max_inline_data;
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Failed to copy udata for qp[%u]\n",
+ create_qp_resp.qp_num);
+ goto err_remove_mmap_entries;
+ }
+ }
+
+ ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
+
+ return 0;
+
+err_remove_mmap_entries:
+ efa_qp_user_mmap_entries_remove(qp);
+err_destroy_qp:
+ efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
+err_free_mapped:
+ if (qp->rq_size)
+ efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
+err_out:
+ atomic64_inc(&dev->stats.create_qp_err);
+ return err;
+}
+
+static const struct {
+ int valid;
+ enum ib_qp_attr_mask req_param;
+ enum ib_qp_attr_mask opt_param;
+} srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_INIT] = {
+ .valid = 1,
+ .req_param = IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_QKEY,
+ },
+ },
+ [IB_QPS_INIT] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_INIT] = {
+ .valid = 1,
+ .opt_param = IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_QKEY,
+ },
+ [IB_QPS_RTR] = {
+ .valid = 1,
+ .opt_param = IB_QP_PKEY_INDEX |
+ IB_QP_QKEY,
+ },
+ },
+ [IB_QPS_RTR] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .req_param = IB_QP_SQ_PSN,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY |
+ IB_QP_RNR_RETRY,
+
+ }
+ },
+ [IB_QPS_RTS] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY,
+ },
+ [IB_QPS_SQD] = {
+ .valid = 1,
+ .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
+ },
+ },
+ [IB_QPS_SQD] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY,
+ },
+ [IB_QPS_SQD] = {
+ .valid = 1,
+ .opt_param = IB_QP_PKEY_INDEX |
+ IB_QP_QKEY,
+ }
+ },
+ [IB_QPS_SQE] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY,
+ }
+ },
+ [IB_QPS_ERR] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ }
+};
+
+static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
+ enum ib_qp_state next_state,
+ enum ib_qp_attr_mask mask)
+{
+ enum ib_qp_attr_mask req_param, opt_param;
+
+ if (mask & IB_QP_CUR_STATE &&
+ cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
+ cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
+ return false;
+
+ if (!srd_qp_state_table[cur_state][next_state].valid)
+ return false;
+
+ req_param = srd_qp_state_table[cur_state][next_state].req_param;
+ opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
+
+ if ((mask & req_param) != req_param)
+ return false;
+
+ if (mask & ~(req_param | opt_param | IB_QP_STATE))
+ return false;
+
+ return true;
+}
+
+static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ int err;
+
+#define EFA_MODIFY_QP_SUPP_MASK \
+ (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
+ IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
+ IB_QP_RNR_RETRY)
+
+ if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
+ qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
+ return -EOPNOTSUPP;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_DRIVER)
+ err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
+ qp_attr_mask);
+ else
+ err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
+ qp_attr_mask);
+
+ if (err) {
+ ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
+ return -EINVAL;
+ }
+
+ if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
+ ibdev_dbg(&dev->ibdev, "Can't change port num\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
+ ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibqp->device);
+ struct efa_com_modify_qp_params params = {};
+ struct efa_qp *qp = to_eqp(ibqp);
+ enum ib_qp_state cur_state;
+ enum ib_qp_state new_state;
+ int err;
+
+ if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ return -EINVAL;
+ }
+
+ cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
+ qp->state;
+ new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
+
+ err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
+ new_state);
+ if (err)
+ return err;
+
+ params.qp_handle = qp->qp_handle;
+
+ if (qp_attr_mask & IB_QP_STATE) {
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
+ 1);
+ EFA_SET(&params.modify_mask,
+ EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
+ params.cur_qp_state = cur_state;
+ params.qp_state = new_state;
+ }
+
+ if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
+ EFA_SET(&params.modify_mask,
+ EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
+ params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
+ }
+
+ if (qp_attr_mask & IB_QP_QKEY) {
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
+ params.qkey = qp_attr->qkey;
+ }
+
+ if (qp_attr_mask & IB_QP_SQ_PSN) {
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
+ params.sq_psn = qp_attr->sq_psn;
+ }
+
+ if (qp_attr_mask & IB_QP_RNR_RETRY) {
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
+ 1);
+ params.rnr_retry = qp_attr->rnr_retry;
+ }
+
+ err = efa_com_modify_qp(&dev->edev, &params);
+ if (err)
+ return err;
+
+ qp->state = new_state;
+
+ return 0;
+}
+
+static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
+{
+ struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
+
+ return efa_com_destroy_cq(&dev->edev, &params);
+}
+
+static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
+{
+ rdma_user_mmap_entry_remove(cq->db_mmap_entry);
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
+}
+
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibcq->device);
+ struct efa_cq *cq = to_ecq(ibcq);
+
+ ibdev_dbg(&dev->ibdev,
+ "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
+ cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
+
+ efa_destroy_cq_idx(dev, cq->cq_idx);
+ efa_cq_user_mmap_entries_remove(cq);
+ if (cq->eq) {
+ xa_erase(&dev->cqs_xa, cq->cq_idx);
+ synchronize_irq(cq->eq->irq.irqn);
+ }
+
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+ else
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
+ return 0;
+}
+
+static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
+{
+ return &dev->eqs[vec];
+}
+
+static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
+ struct efa_ibv_create_cq_resp *resp,
+ bool db_valid)
+{
+ resp->q_mmap_size = cq->size;
+ cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ virt_to_phys(cq->cpu_addr),
+ cq->size, EFA_MMAP_DMA_PAGE,
+ &resp->q_mmap_key);
+ if (!cq->mmap_entry)
+ return -ENOMEM;
+
+ if (db_valid) {
+ cq->db_mmap_entry =
+ efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ dev->db_bar_addr + resp->db_off,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->db_mmap_key);
+ if (!cq->db_mmap_entry) {
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
+ return -ENOMEM;
+ }
+
+ resp->db_off &= ~PAGE_MASK;
+ resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
+ }
+
+ return 0;
+}
+
+int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem, struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct efa_ucontext, ibucontext);
+ struct efa_com_create_cq_params params = {};
+ struct efa_ibv_create_cq_resp resp = {};
+ struct efa_com_create_cq_result result;
+ struct ib_device *ibdev = ibcq->device;
+ struct efa_dev *dev = to_edev(ibdev);
+ struct efa_ibv_create_cq cmd = {};
+ struct efa_cq *cq = to_ecq(ibcq);
+ int entries = attr->cqe;
+ bool set_src_addr;
+ int err;
+
+ ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
+ ibdev_dbg(ibdev,
+ "cq: requested entries[%u] non-positive or greater than max[%u]\n",
+ entries, dev->dev_attr.max_cq_depth);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, no input udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (udata->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(udata, sizeof(cmd),
+ udata->inlen - sizeof(cmd))) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = ib_copy_from_udata(&cmd, udata,
+ min(sizeof(cmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
+ goto err_out;
+ }
+
+ if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) {
+ ibdev_dbg(ibdev,
+ "Incompatible ABI params, unknown fields in udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
+ if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
+ (set_src_addr ||
+ cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
+ ibdev_dbg(ibdev,
+ "Invalid entry size [%u]\n", cmd.cq_entry_size);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
+ ibdev_dbg(ibdev,
+ "Invalid number of sub cqs[%u] expected[%u]\n",
+ cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ cq->ucontext = ucontext;
+ cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
+
+ if (umem) {
+ if (umem->length < cq->size) {
+ ibdev_dbg(&dev->ibdev, "External memory too small\n");
+ err = -EINVAL;
+ goto err_free_mem;
+ }
+
+ if (!ib_umem_is_contiguous(umem)) {
+ ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
+ err = -EINVAL;
+ goto err_free_mem;
+ }
+
+ cq->cpu_addr = NULL;
+ cq->dma_addr = ib_umem_start_dma_addr(umem);
+ cq->umem = umem;
+ } else {
+ cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
+ if (!cq->cpu_addr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ params.uarn = cq->ucontext->uarn;
+ params.sub_cq_depth = entries;
+ params.dma_addr = cq->dma_addr;
+ params.entry_size_in_bytes = cmd.cq_entry_size;
+ params.num_sub_cqs = cmd.num_sub_cqs;
+ params.set_src_addr = set_src_addr;
+ if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
+ cq->eq = efa_vec2eq(dev, attr->comp_vector);
+ params.eqn = cq->eq->eeq.eqn;
+ params.interrupt_mode_enabled = true;
+ }
+
+ err = efa_com_create_cq(&dev->edev, &params, &result);
+ if (err)
+ goto err_free_mem;
+
+ resp.db_off = result.db_off;
+ resp.cq_idx = result.cq_idx;
+ cq->cq_idx = result.cq_idx;
+ cq->ibcq.cqe = result.actual_depth;
+ WARN_ON_ONCE(entries != result.actual_depth);
+
+ if (!umem)
+ err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
+
+ if (err) {
+ ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
+ cq->cq_idx);
+ goto err_destroy_cq;
+ }
+
+ if (cq->eq) {
+ err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
+ cq->cq_idx);
+ goto err_remove_mmap;
+ }
+ }
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(ibdev,
+ "Failed to copy udata for create_cq\n");
+ goto err_xa_erase;
+ }
+ }
+
+ ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
+ cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
+
+ return 0;
+
+err_xa_erase:
+ if (cq->eq)
+ xa_erase(&dev->cqs_xa, cq->cq_idx);
+err_remove_mmap:
+ efa_cq_user_mmap_entries_remove(cq);
+err_destroy_cq:
+ efa_destroy_cq_idx(dev, cq->cq_idx);
+err_free_mem:
+ if (umem)
+ ib_umem_release(umem);
+ else
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
+
+err_out:
+ atomic64_inc(&dev->stats.create_cq_err);
+ return err;
+}
+
+int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ return efa_create_cq_umem(ibcq, attr, NULL, attrs);
+}
+
+static int umem_to_page_list(struct efa_dev *dev,
+ struct ib_umem *umem,
+ u64 *page_list,
+ u32 hp_cnt,
+ u8 hp_shift)
+{
+ u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
+ struct ib_block_iter biter;
+ unsigned int hp_idx = 0;
+
+ ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
+ hp_cnt, pages_in_hp);
+
+ rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
+ page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
+
+ return 0;
+}
+
+static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
+{
+ struct scatterlist *sglist;
+ struct page *pg;
+ int i;
+
+ sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
+ if (!sglist)
+ return NULL;
+ sg_init_table(sglist, page_cnt);
+ for (i = 0; i < page_cnt; i++) {
+ pg = vmalloc_to_page(buf);
+ if (!pg)
+ goto err;
+ sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
+ buf += PAGE_SIZE / sizeof(*buf);
+ }
+ return sglist;
+
+err:
+ kfree(sglist);
+ return NULL;
+}
+
+/*
+ * create a chunk list of physical pages dma addresses from the supplied
+ * scatter gather list
+ */
+static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
+ int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
+ struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
+ unsigned int chunk_list_size, chunk_idx, payload_idx;
+ int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
+ struct efa_com_ctrl_buff_info *ctrl_buf;
+ u64 *cur_chunk_buf, *prev_chunk_buf;
+ struct ib_block_iter biter;
+ dma_addr_t dma_addr;
+ int i;
+
+ /* allocate a chunk list that consists of 4KB chunks */
+ chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
+
+ chunk_list->size = chunk_list_size;
+ chunk_list->chunks = kcalloc(chunk_list_size,
+ sizeof(*chunk_list->chunks),
+ GFP_KERNEL);
+ if (!chunk_list->chunks)
+ return -ENOMEM;
+
+ ibdev_dbg(&dev->ibdev,
+ "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
+ page_cnt);
+
+ /* allocate chunk buffers: */
+ for (i = 0; i < chunk_list_size; i++) {
+ chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
+ if (!chunk_list->chunks[i].buf)
+ goto chunk_list_dealloc;
+
+ chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
+ }
+ chunk_list->chunks[chunk_list_size - 1].length =
+ ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
+ EFA_CHUNK_PTR_SIZE;
+
+ /* fill the dma addresses of sg list pages to chunks: */
+ chunk_idx = 0;
+ payload_idx = 0;
+ cur_chunk_buf = chunk_list->chunks[0].buf;
+ rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
+ EFA_CHUNK_PAYLOAD_SIZE) {
+ cur_chunk_buf[payload_idx++] =
+ rdma_block_iter_dma_address(&biter);
+
+ if (payload_idx == EFA_PTRS_PER_CHUNK) {
+ chunk_idx++;
+ cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
+ payload_idx = 0;
+ }
+ }
+
+ /* map chunks to dma and fill chunks next ptrs */
+ for (i = chunk_list_size - 1; i >= 0; i--) {
+ dma_addr = dma_map_single(&dev->pdev->dev,
+ chunk_list->chunks[i].buf,
+ chunk_list->chunks[i].length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
+ ibdev_err(&dev->ibdev,
+ "chunk[%u] dma_map_failed\n", i);
+ goto chunk_list_unmap;
+ }
+
+ chunk_list->chunks[i].dma_addr = dma_addr;
+ ibdev_dbg(&dev->ibdev,
+ "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
+
+ if (!i)
+ break;
+
+ prev_chunk_buf = chunk_list->chunks[i - 1].buf;
+
+ ctrl_buf = (struct efa_com_ctrl_buff_info *)
+ &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
+ ctrl_buf->length = chunk_list->chunks[i].length;
+
+ efa_com_set_dma_addr(dma_addr,
+ &ctrl_buf->address.mem_addr_high,
+ &ctrl_buf->address.mem_addr_low);
+ }
+
+ return 0;
+
+chunk_list_unmap:
+ for (; i < chunk_list_size; i++) {
+ dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
+ chunk_list->chunks[i].length, DMA_TO_DEVICE);
+ }
+chunk_list_dealloc:
+ for (i = 0; i < chunk_list_size; i++)
+ kfree(chunk_list->chunks[i].buf);
+
+ kfree(chunk_list->chunks);
+ return -ENOMEM;
+}
+
+static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
+ int i;
+
+ for (i = 0; i < chunk_list->size; i++) {
+ dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
+ chunk_list->chunks[i].length, DMA_TO_DEVICE);
+ kfree(chunk_list->chunks[i].buf);
+ }
+
+ kfree(chunk_list->chunks);
+}
+
+/* initialize pbl continuous mode: map pbl buffer to a dma address. */
+static int pbl_continuous_initialize(struct efa_dev *dev,
+ struct pbl_context *pbl)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
+ pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
+ ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
+ return -ENOMEM;
+ }
+
+ pbl->phys.continuous.dma_addr = dma_addr;
+ ibdev_dbg(&dev->ibdev,
+ "pbl continuous - dma_addr = %pad, size[%u]\n",
+ &dma_addr, pbl->pbl_buf_size_in_bytes);
+
+ return 0;
+}
+
+/*
+ * initialize pbl indirect mode:
+ * create a chunk list out of the dma addresses of the physical pages of
+ * pbl buffer.
+ */
+static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
+ struct scatterlist *sgl;
+ int sg_dma_cnt, err;
+
+ BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
+ sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
+ if (!sg_dma_cnt) {
+ err = -EINVAL;
+ goto err_map;
+ }
+
+ pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
+ pbl->phys.indirect.sgl = sgl;
+ pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
+ err = pbl_chunk_list_create(dev, pbl);
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "chunk_list creation failed[%d]\n", err);
+ goto err_chunk;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "pbl indirect - size[%u], chunks[%u]\n",
+ pbl->pbl_buf_size_in_bytes,
+ pbl->phys.indirect.chunk_list.size);
+
+ return 0;
+
+err_chunk:
+ dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
+err_map:
+ kfree(sgl);
+ return err;
+}
+
+static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ pbl_chunk_list_destroy(dev, pbl);
+ dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
+ pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
+ kfree(pbl->phys.indirect.sgl);
+}
+
+/* create a page buffer list from a mapped user memory region */
+static int pbl_create(struct efa_dev *dev,
+ struct pbl_context *pbl,
+ struct ib_umem *umem,
+ int hp_cnt,
+ u8 hp_shift)
+{
+ int err;
+
+ pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
+ pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
+ if (!pbl->pbl_buf)
+ return -ENOMEM;
+
+ if (is_vmalloc_addr(pbl->pbl_buf)) {
+ pbl->physically_continuous = 0;
+ err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
+ hp_shift);
+ if (err)
+ goto err_free;
+
+ err = pbl_indirect_initialize(dev, pbl);
+ if (err)
+ goto err_free;
+ } else {
+ pbl->physically_continuous = 1;
+ err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
+ hp_shift);
+ if (err)
+ goto err_free;
+
+ err = pbl_continuous_initialize(dev, pbl);
+ if (err)
+ goto err_free;
+ }
+
+ ibdev_dbg(&dev->ibdev,
+ "user_pbl_created: user_pages[%u], continuous[%u]\n",
+ hp_cnt, pbl->physically_continuous);
+
+ return 0;
+
+err_free:
+ kvfree(pbl->pbl_buf);
+ return err;
+}
+
+static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
+{
+ if (pbl->physically_continuous)
+ dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
+ pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
+ else
+ pbl_indirect_terminate(dev, pbl);
+
+ kvfree(pbl->pbl_buf);
+}
+
+static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
+ struct efa_com_reg_mr_params *params)
+{
+ int err;
+
+ params->inline_pbl = 1;
+ err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
+ params->page_num, params->page_shift);
+ if (err)
+ return err;
+
+ ibdev_dbg(&dev->ibdev,
+ "inline_pbl_array - pages[%u]\n", params->page_num);
+
+ return 0;
+}
+
+static int efa_create_pbl(struct efa_dev *dev,
+ struct pbl_context *pbl,
+ struct efa_mr *mr,
+ struct efa_com_reg_mr_params *params)
+{
+ int err;
+
+ err = pbl_create(dev, pbl, mr->umem, params->page_num,
+ params->page_shift);
+ if (err) {
+ ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
+ return err;
+ }
+
+ params->inline_pbl = 0;
+ params->indirect = !pbl->physically_continuous;
+ if (pbl->physically_continuous) {
+ params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
+
+ efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
+ &params->pbl.pbl.address.mem_addr_high,
+ &params->pbl.pbl.address.mem_addr_low);
+ } else {
+ params->pbl.pbl.length =
+ pbl->phys.indirect.chunk_list.chunks[0].length;
+
+ efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
+ &params->pbl.pbl.address.mem_addr_high,
+ &params->pbl.pbl.address.mem_addr_low);
+ }
+
+ return 0;
+}
+
+static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ int supp_access_flags;
+ struct efa_mr *mr;
+
+ if (udata && udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
+ ibdev_dbg(&dev->ibdev,
+ "Incompatible ABI params, udata not cleared\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ supp_access_flags =
+ IB_ACCESS_LOCAL_WRITE |
+ (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) |
+ (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0);
+
+ access_flags &= ~IB_ACCESS_OPTIONAL;
+ if (access_flags & ~supp_access_flags) {
+ ibdev_dbg(&dev->ibdev,
+ "Unsupported access flags[%#x], supported[%#x]\n",
+ access_flags, supp_access_flags);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ return mr;
+}
+
+static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
+ u64 length, u64 virt_addr, int access_flags)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_com_reg_mr_params params = {};
+ struct efa_com_reg_mr_result result = {};
+ struct pbl_context pbl;
+ unsigned int pg_sz;
+ int inline_size;
+ int err;
+
+ params.pd = to_epd(ibpd)->pdn;
+ params.iova = virt_addr;
+ params.mr_length_in_bytes = length;
+ params.permissions = access_flags;
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->dev_attr.page_size_cap,
+ virt_addr);
+ if (!pg_sz) {
+ ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
+ dev->dev_attr.page_size_cap);
+ return -EOPNOTSUPP;
+ }
+
+ params.page_shift = order_base_2(pg_sz);
+ params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
+
+ ibdev_dbg(&dev->ibdev,
+ "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
+ start, length, params.page_shift, params.page_num);
+
+ inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
+ if (params.page_num <= inline_size) {
+ err = efa_create_inline_pbl(dev, mr, &params);
+ if (err)
+ return err;
+
+ err = efa_com_register_mr(&dev->edev, &params, &result);
+ if (err)
+ return err;
+ } else {
+ err = efa_create_pbl(dev, &pbl, mr, &params);
+ if (err)
+ return err;
+
+ err = efa_com_register_mr(&dev->edev, &params, &result);
+ pbl_destroy(dev, &pbl);
+
+ if (err)
+ return err;
+ }
+
+ mr->ibmr.lkey = result.l_key;
+ mr->ibmr.rkey = result.r_key;
+ mr->ibmr.length = length;
+ mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id;
+ mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id;
+ mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id;
+ mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid;
+ mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid;
+ mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid;
+ ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
+
+ return 0;
+}
+
+struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct efa_mr *mr;
+ int err;
+
+ if (dmah) {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto err_out;
+ }
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
+ access_flags);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
+ goto err_free;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+ err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
+ if (err)
+ goto err_release;
+
+ return &mr->ibmr;
+
+err_release:
+ ib_umem_release(mr->umem);
+err_free:
+ kfree(mr);
+err_out:
+ atomic64_inc(&dev->stats.reg_mr_err);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_mr *mr;
+ int err;
+
+ if (dmah) {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ mr = efa_alloc_mr(ibpd, access_flags, udata);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto err_out;
+ }
+
+ mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ ibdev_dbg(&dev->ibdev,
+ "Failed to pin and map user space memory[%pe]\n",
+ mr->umem);
+ goto err_free;
+ }
+
+ err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
+ if (err)
+ goto err_release;
+
+ return &mr->ibmr;
+
+err_release:
+ ib_umem_release(mr->umem);
+err_free:
+ kfree(mr);
+err_out:
+ atomic64_inc(&dev->stats.reg_mr_err);
+ return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE);
+ struct efa_mr *mr = to_emr(ibmr);
+ u16 ic_id_validity = 0;
+ int ret;
+
+ ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
+ &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
+ &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
+ &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id));
+ if (ret)
+ return ret;
+
+ if (mr->ic_info.recv_ic_id_valid)
+ ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID;
+ if (mr->ic_info.rdma_read_ic_id_valid)
+ ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID;
+ if (mr->ic_info.rdma_recv_ic_id_valid)
+ ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID;
+
+ return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
+ &ic_id_validity, sizeof(ic_id_validity));
+}
+
+int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibmr->device);
+ struct efa_com_dereg_mr_params params;
+ struct efa_mr *mr = to_emr(ibmr);
+ int err;
+
+ ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
+
+ params.l_key = mr->ibmr.lkey;
+ err = efa_com_dereg_mr(&dev->edev, &params);
+ if (err)
+ return err;
+
+ ib_umem_release(mr->umem);
+ kfree(mr);
+
+ return 0;
+}
+
+int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err) {
+ ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
+ return err;
+ }
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
+{
+ struct efa_com_dealloc_uar_params params = {
+ .uarn = uarn,
+ };
+
+ return efa_com_dealloc_uar(&dev->edev, &params);
+}
+
+#define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
+ (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
+ NULL : #_attr)
+
+static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
+ const struct efa_ibv_alloc_ucontext_cmd *cmd)
+{
+ struct efa_dev *dev = to_edev(ibucontext->device);
+ char *attr_str;
+
+ if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
+ EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
+ goto err;
+
+ if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
+ EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
+ attr_str))
+ goto err;
+
+ return 0;
+
+err:
+ ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
+ attr_str);
+ return -EOPNOTSUPP;
+}
+
+int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
+{
+ struct efa_ucontext *ucontext = to_eucontext(ibucontext);
+ struct efa_dev *dev = to_edev(ibucontext->device);
+ struct efa_ibv_alloc_ucontext_resp resp = {};
+ struct efa_ibv_alloc_ucontext_cmd cmd = {};
+ struct efa_com_alloc_uar_result result;
+ int err;
+
+ /*
+ * it's fine if the driver does not know all request fields,
+ * we will ack input fields in our response.
+ */
+
+ err = ib_copy_from_udata(&cmd, udata,
+ min(sizeof(cmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Cannot copy udata for alloc_ucontext\n");
+ goto err_out;
+ }
+
+ err = efa_user_comp_handshake(ibucontext, &cmd);
+ if (err)
+ goto err_out;
+
+ err = efa_com_alloc_uar(&dev->edev, &result);
+ if (err)
+ goto err_out;
+
+ ucontext->uarn = result.uarn;
+
+ resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
+ resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
+ resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
+ resp.inline_buf_size = dev->dev_attr.inline_buf_size;
+ resp.max_llq_size = dev->dev_attr.max_llq_size;
+ resp.max_tx_batch = dev->dev_attr.max_tx_batch;
+ resp.min_sq_wr = dev->dev_attr.min_sq_depth;
+
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err)
+ goto err_dealloc_uar;
+
+ return 0;
+
+err_dealloc_uar:
+ efa_dealloc_uar(dev, result.uarn);
+err_out:
+ atomic64_inc(&dev->stats.alloc_ucontext_err);
+ return err;
+}
+
+void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
+{
+ struct efa_ucontext *ucontext = to_eucontext(ibucontext);
+ struct efa_dev *dev = to_edev(ibucontext->device);
+
+ efa_dealloc_uar(dev, ucontext->uarn);
+}
+
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ kfree(entry);
+}
+
+static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct efa_user_mmap_entry *entry;
+ unsigned long va;
+ int err = 0;
+ u64 pfn;
+
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev,
+ "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
+ atomic64_inc(&dev->stats.mmap_err);
+ return -EINVAL;
+ }
+ entry = to_emmap(rdma_entry);
+
+ ibdev_dbg(&dev->ibdev,
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag);
+
+ pfn = entry->address >> PAGE_SHIFT;
+ switch (entry->mmap_flag) {
+ case EFA_MMAP_IO_NC:
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case EFA_MMAP_IO_WC:
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case EFA_MMAP_DMA_PAGE:
+ for (va = vma->vm_start; va < vma->vm_end;
+ va += PAGE_SIZE, pfn++) {
+ err = vm_insert_page(vma, va, pfn_to_page(pfn));
+ if (err)
+ break;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err) {
+ ibdev_dbg(
+ &dev->ibdev,
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag, err);
+ atomic64_inc(&dev->stats.mmap_err);
+ }
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
+}
+
+int efa_mmap(struct ib_ucontext *ibucontext,
+ struct vm_area_struct *vma)
+{
+ struct efa_ucontext *ucontext = to_eucontext(ibucontext);
+ struct efa_dev *dev = to_edev(ibucontext->device);
+ size_t length = vma->vm_end - vma->vm_start;
+
+ ibdev_dbg(&dev->ibdev,
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
+
+ return __efa_mmap(dev, ucontext, vma);
+}
+
+static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
+{
+ struct efa_com_destroy_ah_params params = {
+ .ah = ah->ah,
+ .pdn = to_epd(ah->ibah.pd)->pdn,
+ };
+
+ return efa_com_destroy_ah(&dev->edev, &params);
+}
+
+int efa_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ struct efa_dev *dev = to_edev(ibah->device);
+ struct efa_com_create_ah_params params = {};
+ struct efa_ibv_create_ah_resp resp = {};
+ struct efa_com_create_ah_result result;
+ struct efa_ah *ah = to_eah(ibah);
+ int err;
+
+ if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
+ ibdev_dbg(&dev->ibdev,
+ "Create address handle is not supported in atomic context\n");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ if (udata->inlen &&
+ !ib_is_udata_cleared(udata, 0, udata->inlen)) {
+ ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
+ sizeof(params.dest_addr));
+ params.pdn = to_epd(ibah->pd)->pdn;
+ err = efa_com_create_ah(&dev->edev, &params, &result);
+ if (err)
+ goto err_out;
+
+ memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
+ ah->ah = result.ah;
+
+ resp.efa_address_handle = result.ah;
+
+ if (udata->outlen) {
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&dev->ibdev,
+ "Failed to copy udata for create_ah response\n");
+ goto err_destroy_ah;
+ }
+ }
+ ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
+
+ return 0;
+
+err_destroy_ah:
+ efa_ah_destroy(dev, ah);
+err_out:
+ atomic64_inc(&dev->stats.create_ah_err);
+ return err;
+}
+
+int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct efa_dev *dev = to_edev(ibah->pd->device);
+ struct efa_ah *ah = to_eah(ibah);
+
+ ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
+
+ if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
+ ibdev_dbg(&dev->ibdev,
+ "Destroy address handle is not supported in atomic context\n");
+ return -EOPNOTSUPP;
+ }
+
+ efa_ah_destroy(dev, ah);
+ return 0;
+}
+
+struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
+ ARRAY_SIZE(efa_port_stats_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
+ ARRAY_SIZE(efa_device_stats_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int efa_fill_device_stats(struct efa_dev *dev,
+ struct rdma_hw_stats *stats)
+{
+ struct efa_com_stats_admin *as = &dev->edev.aq.stats;
+ struct efa_stats *s = &dev->stats;
+
+ stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
+ stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
+ stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
+ stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
+
+ stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
+ stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
+ stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
+ stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
+ stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
+ stats->value[EFA_ALLOC_UCONTEXT_ERR] =
+ atomic64_read(&s->alloc_ucontext_err);
+ stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
+ stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
+
+ return ARRAY_SIZE(efa_device_stats_descs);
+}
+
+static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
+ u32 port_num)
+{
+ struct efa_com_get_stats_params params = {};
+ union efa_com_get_stats_result result;
+ struct efa_com_rdma_write_stats *rws;
+ struct efa_com_rdma_read_stats *rrs;
+ struct efa_com_messages_stats *ms;
+ struct efa_com_network_stats *ns;
+ struct efa_com_basic_stats *bs;
+ int err;
+
+ params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
+ params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
+
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ bs = &result.basic_stats;
+ stats->value[EFA_TX_BYTES] = bs->tx_bytes;
+ stats->value[EFA_TX_PKTS] = bs->tx_pkts;
+ stats->value[EFA_RX_BYTES] = bs->rx_bytes;
+ stats->value[EFA_RX_PKTS] = bs->rx_pkts;
+ stats->value[EFA_RX_DROPS] = bs->rx_drops;
+
+ params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ ms = &result.messages_stats;
+ stats->value[EFA_SEND_BYTES] = ms->send_bytes;
+ stats->value[EFA_SEND_WRS] = ms->send_wrs;
+ stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
+ stats->value[EFA_RECV_WRS] = ms->recv_wrs;
+
+ params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ rrs = &result.rdma_read_stats;
+ stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
+ stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
+ stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
+ stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
+
+ if (EFA_DEV_CAP(dev, RDMA_WRITE)) {
+ params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ rws = &result.rdma_write_stats;
+ stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs;
+ stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes;
+ stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err;
+ stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
+ }
+
+ params.type = EFA_ADMIN_GET_STATS_TYPE_NETWORK;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ ns = &result.network_stats;
+ stats->value[EFA_RETRANS_BYTES] = ns->retrans_bytes;
+ stats->value[EFA_RETRANS_PKTS] = ns->retrans_pkts;
+ stats->value[EFA_RETRANS_TIMEOUT_EVENS] = ns->retrans_timeout_events;
+ stats->value[EFA_UNRESPONSIVE_REMOTE_EVENTS] = ns->unresponsive_remote_events;
+ stats->value[EFA_IMPAIRED_REMOTE_CONN_EVENTS] = ns->impaired_remote_conn_events;
+
+ return ARRAY_SIZE(efa_port_stats_descs);
+}
+
+int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ if (port_num)
+ return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
+ else
+ return efa_fill_device_stats(to_edev(ibdev), stats);
+}
+
+enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_UNSPECIFIED;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY,
+ UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(efa_mr,
+ UVERBS_OBJECT_MR,
+ &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY));
+
+const struct uapi_definition efa_uapi_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR,
+ &efa_mr),
+ {},
+};
diff --git a/drivers/infiniband/hw/erdma/Kconfig b/drivers/infiniband/hw/erdma/Kconfig
new file mode 100644
index 000000000000..267fc1f3c42a
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_ERDMA
+ tristate "Alibaba Elastic RDMA Adapter (ERDMA) support"
+ depends on PCI_MSI && 64BIT
+ depends on INFINIBAND_ADDR_TRANS
+ depends on INFINIBAND_USER_ACCESS
+ help
+ This is a RDMA driver for Alibaba Elastic RDMA Adapter(ERDMA),
+ which supports RDMA features in Alibaba cloud environment.
+
+ To compile this driver as module, choose M here. The module will be
+ called erdma.
diff --git a/drivers/infiniband/hw/erdma/Makefile b/drivers/infiniband/hw/erdma/Makefile
new file mode 100644
index 000000000000..51d2ef91905a
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_ERDMA) := erdma.o
+
+erdma-y := erdma_cm.o erdma_main.o erdma_cmdq.o erdma_cq.o erdma_verbs.o erdma_qp.o erdma_eq.o
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
new file mode 100644
index 000000000000..2a023b99f992
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_H__
+#define __ERDMA_H__
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/xarray.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma_hw.h"
+
+#define DRV_MODULE_NAME "erdma"
+#define ERDMA_NODE_DESC "Elastic RDMA Adapter stack"
+
+struct erdma_eq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+
+ u16 ci;
+ u16 rsvd;
+
+ atomic64_t event_num;
+ atomic64_t notify_num;
+
+ void __iomem *db;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
+};
+
+struct erdma_cmdq_sq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u16 ci;
+ u16 pi;
+
+ u16 wqebb_cnt;
+
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
+};
+
+struct erdma_cmdq_cq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u32 ci;
+ u32 cmdsn;
+
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
+
+ atomic64_t armed_num;
+};
+
+enum {
+ ERDMA_CMD_STATUS_INIT,
+ ERDMA_CMD_STATUS_ISSUED,
+ ERDMA_CMD_STATUS_FINISHED,
+ ERDMA_CMD_STATUS_TIMEOUT
+};
+
+struct erdma_comp_wait {
+ struct completion wait_event;
+ u32 cmd_status;
+ u32 ctx_id;
+ u16 sq_pi;
+ u8 comp_status;
+ u8 rsvd;
+ u32 comp_data[4];
+};
+
+enum {
+ ERDMA_CMDQ_STATE_OK_BIT = 0,
+ ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
+ ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
+};
+
+#define ERDMA_CMDQ_TIMEOUT_MS 15000
+#define ERDMA_REG_ACCESS_WAIT_MS 20
+#define ERDMA_WAIT_DEV_DONE_CNT 500
+
+struct erdma_cmdq {
+ unsigned long *comp_wait_bitmap;
+ struct erdma_comp_wait *wait_pool;
+ spinlock_t lock;
+
+ struct erdma_cmdq_sq sq;
+ struct erdma_cmdq_cq cq;
+ struct erdma_eq eq;
+
+ unsigned long state;
+
+ struct semaphore credits;
+ u16 max_outstandings;
+};
+
+#define COMPROMISE_CC ERDMA_CC_CUBIC
+enum erdma_cc_alg {
+ ERDMA_CC_NEWRENO = 0,
+ ERDMA_CC_CUBIC,
+ ERDMA_CC_HPCC_RTT,
+ ERDMA_CC_HPCC_ECN,
+ ERDMA_CC_HPCC_INT,
+ ERDMA_CC_METHODS_NUM
+};
+
+struct erdma_devattr {
+ u32 fw_version;
+
+ unsigned char peer_addr[ETH_ALEN];
+ unsigned long cap_flags;
+
+ int numa_node;
+ enum erdma_cc_alg cc;
+ u32 irq_num;
+
+ u32 max_qp;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_ord;
+ u32 max_ird;
+
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_sge_rd;
+ u32 max_cq;
+ u32 max_cqe;
+ u64 max_mr_size;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_mw;
+ u32 max_gid;
+ u32 max_ah;
+ u32 local_dma_key;
+};
+
+#define ERDMA_IRQNAME_SIZE 50
+
+struct erdma_irq {
+ char name[ERDMA_IRQNAME_SIZE];
+ u32 msix_vector;
+ cpumask_t affinity_hint_mask;
+};
+
+struct erdma_eq_cb {
+ bool ready;
+ void *dev; /* All EQs use this fields to get erdma_dev struct */
+ struct erdma_irq irq;
+ struct erdma_eq eq;
+ struct tasklet_struct tasklet;
+};
+
+struct erdma_resource_cb {
+ unsigned long *bitmap;
+ spinlock_t lock;
+ u32 next_alloc_idx;
+ u32 max_cap;
+};
+
+enum {
+ ERDMA_RES_TYPE_PD = 0,
+ ERDMA_RES_TYPE_STAG_IDX = 1,
+ ERDMA_RES_TYPE_AH = 2,
+ ERDMA_RES_CNT = 3,
+};
+
+struct erdma_dev {
+ struct ib_device ibdev;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct notifier_block netdev_nb;
+ struct workqueue_struct *reflush_wq;
+
+ resource_size_t func_bar_addr;
+ resource_size_t func_bar_len;
+ u8 __iomem *func_bar;
+
+ struct erdma_devattr attrs;
+ u32 mtu;
+
+ /* cmdq and aeq use the same msix vector */
+ struct erdma_irq comm_irq;
+ struct erdma_cmdq cmdq;
+ struct erdma_eq aeq;
+ struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
+
+ spinlock_t lock;
+ struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
+ struct xarray qp_xa;
+ struct xarray cq_xa;
+
+ u32 next_alloc_qpn;
+ u32 next_alloc_cqn;
+
+ atomic_t num_ctx;
+ struct list_head cep_list;
+
+ struct dma_pool *db_pool;
+ struct dma_pool *resp_pool;
+ enum erdma_proto_type proto;
+};
+
+static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
+{
+ idx &= (depth - 1);
+
+ return qbuf + (idx << shift);
+}
+
+static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct erdma_dev, ibdev);
+}
+
+static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
+{
+ return readl(dev->func_bar + reg);
+}
+
+static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
+{
+ return readq(dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
+{
+ writel(value, dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
+{
+ writeq(value, dev->func_bar + reg);
+}
+
+static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
+ u32 filed_mask)
+{
+ u32 val = erdma_reg_read32(dev, reg);
+
+ return FIELD_GET(filed_mask, val);
+}
+
+#define ERDMA_GET(val, name) FIELD_GET(ERDMA_CMD_##name##_MASK, val)
+
+int erdma_cmdq_init(struct erdma_dev *dev);
+void erdma_finish_cmdq_init(struct erdma_dev *dev);
+void erdma_cmdq_destroy(struct erdma_dev *dev);
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
+ u64 *resp0, u64 *resp1, bool sleepable);
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
+
+int erdma_ceqs_init(struct erdma_dev *dev);
+void erdma_ceqs_uninit(struct erdma_dev *dev);
+void notify_eq(struct erdma_eq *eq);
+void *get_next_valid_eqe(struct erdma_eq *eq);
+
+int erdma_aeq_init(struct erdma_dev *dev);
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth);
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq);
+
+void erdma_aeq_event_handler(struct erdma_dev *dev);
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
new file mode 100644
index 000000000000..e0acc185e719
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -0,0 +1,1431 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Fredy Neeser */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#include <linux/workqueue.h>
+#include <trace/events/sock.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+static struct workqueue_struct *erdma_cm_wq;
+
+static void erdma_cm_llp_state_change(struct sock *sk);
+static void erdma_cm_llp_data_ready(struct sock *sk);
+static void erdma_cm_llp_error_report(struct sock *sk);
+
+static void erdma_sk_assign_cm_upcalls(struct sock *sk)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = erdma_cm_llp_state_change;
+ sk->sk_data_ready = erdma_cm_llp_data_ready;
+ sk->sk_error_report = erdma_cm_llp_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_save_upcalls(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ cep->sk_state_change = sk->sk_state_change;
+ cep->sk_data_ready = sk->sk_data_ready;
+ cep->sk_error_report = sk->sk_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_restore_upcalls(struct sock *sk, struct erdma_cep *cep)
+{
+ sk->sk_state_change = cep->sk_state_change;
+ sk->sk_data_ready = cep->sk_data_ready;
+ sk->sk_error_report = cep->sk_error_report;
+ sk->sk_user_data = NULL;
+}
+
+static void erdma_socket_disassoc(struct socket *s)
+{
+ struct sock *sk = s->sk;
+ struct erdma_cep *cep;
+
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ cep = sk_to_cep(sk);
+ if (cep) {
+ erdma_sk_restore_upcalls(sk, cep);
+ erdma_cep_put(cep);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void erdma_cep_socket_assoc(struct erdma_cep *cep, struct socket *s)
+{
+ cep->sock = s;
+ erdma_cep_get(cep);
+ s->sk->sk_user_data = cep;
+
+ erdma_sk_save_upcalls(s->sk);
+ erdma_sk_assign_cm_upcalls(s->sk);
+}
+
+static void erdma_disassoc_listen_cep(struct erdma_cep *cep)
+{
+ if (cep->listen_cep) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
+}
+
+static struct erdma_cep *erdma_cep_alloc(struct erdma_dev *dev)
+{
+ struct erdma_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
+ unsigned long flags;
+
+ if (!cep)
+ return NULL;
+
+ INIT_LIST_HEAD(&cep->listenq);
+ INIT_LIST_HEAD(&cep->devq);
+ INIT_LIST_HEAD(&cep->work_freelist);
+
+ kref_init(&cep->ref);
+ cep->state = ERDMA_EPSTATE_IDLE;
+ init_waitqueue_head(&cep->waitq);
+ spin_lock_init(&cep->lock);
+ cep->dev = dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cep->devq, &dev->cep_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return cep;
+}
+
+static void erdma_cm_free_work(struct erdma_cep *cep)
+{
+ struct list_head *w, *tmp;
+ struct erdma_cm_work *work;
+
+ list_for_each_safe(w, tmp, &cep->work_freelist) {
+ work = list_entry(w, struct erdma_cm_work, list);
+ list_del(&work->list);
+ kfree(work);
+ }
+}
+
+static void erdma_cancel_mpatimer(struct erdma_cep *cep)
+{
+ spin_lock_bh(&cep->lock);
+ if (cep->mpa_timer) {
+ if (cancel_delayed_work(&cep->mpa_timer->work)) {
+ erdma_cep_put(cep);
+ kfree(cep->mpa_timer);
+ }
+ cep->mpa_timer = NULL;
+ }
+ spin_unlock_bh(&cep->lock);
+}
+
+static void erdma_put_work(struct erdma_cm_work *work)
+{
+ INIT_LIST_HEAD(&work->list);
+ spin_lock_bh(&work->cep->lock);
+ list_add(&work->list, &work->cep->work_freelist);
+ spin_unlock_bh(&work->cep->lock);
+}
+
+static void erdma_cep_set_inuse(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ while (cep->in_use) {
+ spin_unlock_irqrestore(&cep->lock, flags);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ spin_lock_irqsave(&cep->lock, flags);
+ }
+
+ cep->in_use = 1;
+ spin_unlock_irqrestore(&cep->lock, flags);
+}
+
+static void erdma_cep_set_free(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ cep->in_use = 0;
+ spin_unlock_irqrestore(&cep->lock, flags);
+
+ wake_up(&cep->waitq);
+}
+
+static void __erdma_cep_dealloc(struct kref *ref)
+{
+ struct erdma_cep *cep = container_of(ref, struct erdma_cep, ref);
+ struct erdma_dev *dev = cep->dev;
+ unsigned long flags;
+
+ WARN_ON(cep->listen_cep);
+
+ kfree(cep->private_data);
+ kfree(cep->mpa.pdata);
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist))
+ erdma_cm_free_work(cep);
+ spin_unlock_bh(&cep->lock);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&cep->devq);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ kfree(cep);
+}
+
+static struct erdma_cm_work *erdma_get_work(struct erdma_cep *cep)
+{
+ struct erdma_cm_work *work = NULL;
+
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist)) {
+ work = list_entry(cep->work_freelist.next, struct erdma_cm_work,
+ list);
+ list_del_init(&work->list);
+ }
+
+ spin_unlock_bh(&cep->lock);
+ return work;
+}
+
+static int erdma_cm_alloc_work(struct erdma_cep *cep, int num)
+{
+ struct erdma_cm_work *work;
+
+ while (num--) {
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ if (!(list_empty(&cep->work_freelist)))
+ erdma_cm_free_work(cep);
+ return -ENOMEM;
+ }
+ work->cep = cep;
+ INIT_LIST_HEAD(&work->list);
+ list_add(&work->list, &cep->work_freelist);
+ }
+
+ return 0;
+}
+
+static int erdma_cm_upcall(struct erdma_cep *cep, enum iw_cm_event_type reason,
+ int status)
+{
+ struct iw_cm_event event;
+ struct iw_cm_id *cm_id;
+
+ memset(&event, 0, sizeof(event));
+ event.status = status;
+ event.event = reason;
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
+ event.provider_data = cep;
+ cm_id = cep->listen_cep->cm_id;
+
+ event.ird = cep->dev->attrs.max_ird;
+ event.ord = cep->dev->attrs.max_ord;
+ } else {
+ cm_id = cep->cm_id;
+ }
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
+ reason == IW_CM_EVENT_CONNECT_REPLY) {
+ u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
+
+ if (pd_len && cep->mpa.pdata) {
+ event.private_data_len = pd_len;
+ event.private_data = cep->mpa.pdata;
+ }
+
+ getname_local(cep->sock, &event.local_addr);
+ getname_peer(cep->sock, &event.remote_addr);
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+void erdma_qp_cm_drop(struct erdma_qp *qp)
+{
+ struct erdma_cep *cep = qp->cep;
+
+ if (!qp->cep)
+ return;
+
+ erdma_cep_set_inuse(cep);
+
+ /* already closed. */
+ if (cep->state == ERDMA_EPSTATE_CLOSED)
+ goto out;
+
+ if (cep->cm_id) {
+ switch (cep->state) {
+ case ERDMA_EPSTATE_AWAIT_MPAREP:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EINVAL);
+ break;
+ case ERDMA_EPSTATE_RDMA_MODE:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ break;
+ case ERDMA_EPSTATE_IDLE:
+ case ERDMA_EPSTATE_LISTENING:
+ case ERDMA_EPSTATE_CONNECTING:
+ case ERDMA_EPSTATE_AWAIT_MPAREQ:
+ case ERDMA_EPSTATE_RECVD_MPAREQ:
+ case ERDMA_EPSTATE_CLOSED:
+ default:
+ break;
+ }
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ erdma_cep_put(cep);
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->qp) {
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+out:
+ erdma_cep_set_free(cep);
+}
+
+void erdma_cep_put(struct erdma_cep *cep)
+{
+ WARN_ON(kref_read(&cep->ref) < 1);
+ kref_put(&cep->ref, __erdma_cep_dealloc);
+}
+
+void erdma_cep_get(struct erdma_cep *cep)
+{
+ kref_get(&cep->ref);
+}
+
+static int erdma_send_mpareqrep(struct erdma_cep *cep, const void *pdata,
+ u8 pd_len)
+{
+ struct socket *s = cep->sock;
+ struct mpa_rr *rr = &cep->mpa.hdr;
+ struct kvec iov[3];
+ struct msghdr msg;
+ int iovec_num = 0;
+ int ret;
+ int mpa_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ rr->params.pd_len = cpu_to_be16(pd_len);
+
+ iov[iovec_num].iov_base = rr;
+ iov[iovec_num].iov_len = sizeof(*rr);
+ iovec_num++;
+ mpa_len = sizeof(*rr);
+
+ iov[iovec_num].iov_base = &cep->mpa.ext_data;
+ iov[iovec_num].iov_len = sizeof(cep->mpa.ext_data);
+ iovec_num++;
+ mpa_len += sizeof(cep->mpa.ext_data);
+
+ if (pd_len) {
+ iov[iovec_num].iov_base = (char *)pdata;
+ iov[iovec_num].iov_len = pd_len;
+ mpa_len += pd_len;
+ iovec_num++;
+ }
+
+ ret = kernel_sendmsg(s, &msg, iov, iovec_num, mpa_len);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline int ksock_recv(struct socket *sock, char *buf, size_t size,
+ int flags)
+{
+ struct kvec iov = { buf, size };
+ struct msghdr msg = { .msg_name = NULL, .msg_flags = flags };
+
+ return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+}
+
+static int __recv_mpa_hdr(struct erdma_cep *cep, int hdr_rcvd, char *hdr,
+ int hdr_size, int *rcvd_out)
+{
+ struct socket *s = cep->sock;
+ int rcvd;
+
+ *rcvd_out = 0;
+ if (hdr_rcvd < hdr_size) {
+ rcvd = ksock_recv(s, hdr + hdr_rcvd, hdr_size - hdr_rcvd,
+ MSG_DONTWAIT);
+ if (rcvd == -EAGAIN)
+ return -EAGAIN;
+
+ if (rcvd <= 0)
+ return -ECONNABORTED;
+
+ hdr_rcvd += rcvd;
+ *rcvd_out = rcvd;
+
+ if (hdr_rcvd < hdr_size)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void __mpa_rr_set_revision(__be16 *bits, u8 rev)
+{
+ *bits = (*bits & ~MPA_RR_MASK_REVISION) |
+ (cpu_to_be16(rev) & MPA_RR_MASK_REVISION);
+}
+
+static u8 __mpa_rr_revision(__be16 mpa_rr_bits)
+{
+ __be16 rev = mpa_rr_bits & MPA_RR_MASK_REVISION;
+
+ return (u8)be16_to_cpu(rev);
+}
+
+static void __mpa_ext_set_cc(__be32 *bits, u32 cc)
+{
+ *bits = (*bits & ~MPA_EXT_FLAG_CC) |
+ (cpu_to_be32(cc) & MPA_EXT_FLAG_CC);
+}
+
+static u8 __mpa_ext_cc(__be32 mpa_ext_bits)
+{
+ __be32 cc = mpa_ext_bits & MPA_EXT_FLAG_CC;
+
+ return (u8)be32_to_cpu(cc);
+}
+
+/*
+ * Receive MPA Request/Reply header.
+ *
+ * Returns 0 if complete MPA Request/Reply haeder including
+ * eventual private data was received. Returns -EAGAIN if
+ * header was partially received or negative error code otherwise.
+ *
+ * Context: May be called in process context only
+ */
+static int erdma_recv_mpa_rr(struct erdma_cep *cep)
+{
+ struct mpa_rr *hdr = &cep->mpa.hdr;
+ struct socket *s = cep->sock;
+ u16 pd_len;
+ int rcvd, to_rcv, ret, pd_rcvd;
+
+ if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
+ ret = __recv_mpa_hdr(cep, cep->mpa.bytes_rcvd,
+ (char *)&cep->mpa.hdr,
+ sizeof(struct mpa_rr), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA ||
+ __mpa_rr_revision(hdr->params.bits) != MPA_REVISION_EXT_1)
+ return -EPROTO;
+
+ if (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) <
+ sizeof(struct erdma_mpa_ext)) {
+ ret = __recv_mpa_hdr(
+ cep, cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
+ (char *)&cep->mpa.ext_data,
+ sizeof(struct erdma_mpa_ext), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ pd_len = be16_to_cpu(hdr->params.pd_len);
+ pd_rcvd = cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) -
+ sizeof(struct erdma_mpa_ext);
+ to_rcv = pd_len - pd_rcvd;
+
+ if (!to_rcv) {
+ /*
+ * We have received the whole MPA Request/Reply message.
+ * Check against peer protocol violation.
+ */
+ u32 word;
+
+ ret = __recv_mpa_hdr(cep, 0, (char *)&word, sizeof(word),
+ &rcvd);
+ if (ret == -EAGAIN && rcvd == 0)
+ return 0;
+
+ if (ret)
+ return ret;
+
+ return -EPROTO;
+ }
+
+ /*
+ * At this point, MPA header has been fully received, and pd_len != 0.
+ * So, begin to receive private data.
+ */
+ if (!cep->mpa.pdata) {
+ cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
+ if (!cep->mpa.pdata)
+ return -ENOMEM;
+ }
+
+ rcvd = ksock_recv(s, cep->mpa.pdata + pd_rcvd, to_rcv + 4,
+ MSG_DONTWAIT);
+ if (rcvd < 0)
+ return rcvd;
+
+ if (rcvd > to_rcv)
+ return -EPROTO;
+
+ cep->mpa.bytes_rcvd += rcvd;
+
+ if (to_rcv == rcvd)
+ return 0;
+
+ return -EAGAIN;
+}
+
+/*
+ * erdma_proc_mpareq()
+ *
+ * Read MPA Request from socket and signal new connection to IWCM
+ * if success. Caller must hold lock on corresponding listening CEP.
+ */
+static int erdma_proc_mpareq(struct erdma_cep *cep)
+{
+ struct mpa_rr *req;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ return ret;
+
+ req = &cep->mpa.hdr;
+
+ if (memcmp(req->key, MPA_KEY_REQ, MPA_KEY_SIZE))
+ return -EPROTO;
+
+ memcpy(req->key, MPA_KEY_REP, MPA_KEY_SIZE);
+
+ /* Currently does not support marker and crc. */
+ if (req->params.bits & MPA_RR_FLAG_MARKERS ||
+ req->params.bits & MPA_RR_FLAG_CRC)
+ goto reject_conn;
+
+ cep->state = ERDMA_EPSTATE_RECVD_MPAREQ;
+
+ /* Keep reference until IWCM accepts/rejects */
+ erdma_cep_get(cep);
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
+ if (ret)
+ erdma_cep_put(cep);
+
+ return ret;
+
+reject_conn:
+ req->params.bits &= ~MPA_RR_FLAG_MARKERS;
+ req->params.bits |= MPA_RR_FLAG_REJECT;
+ req->params.bits &= ~MPA_RR_FLAG_CRC;
+
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ erdma_send_mpareqrep(cep, NULL, 0);
+
+ return -EOPNOTSUPP;
+}
+
+static int erdma_proc_mpareply(struct erdma_cep *cep)
+{
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_mod_qp_params_iwarp params;
+ struct erdma_qp *qp = cep->qp;
+ struct mpa_rr *rep;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ goto out_err;
+
+ erdma_cancel_mpatimer(cep);
+
+ rep = &cep->mpa.hdr;
+
+ if (memcmp(rep->key, MPA_KEY_REP, MPA_KEY_SIZE)) {
+ ret = -EPROTO;
+ goto out_err;
+ }
+
+ if (rep->params.bits & MPA_RR_FLAG_REJECT) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
+ return -ECONNRESET;
+ }
+
+ /* Currently does not support marker and crc. */
+ if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
+ (rep->params.bits & MPA_RR_FLAG_CRC)) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ return -EINVAL;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.state = ERDMA_QPS_IWARP_RTS;
+ params.irq_size = cep->ird;
+ params.orq_size = cep->ord;
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto out_err;
+ }
+
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_LLP_HANDLE |
+ ERDMA_QPA_IWARP_MPA | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_ORD;
+
+ params.qp_type = ERDMA_QP_ACTIVE;
+ if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ params.cc = COMPROMISE_CC;
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params, to_modify_attrs);
+
+ up_write(&qp->state_lock);
+
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
+ if (!ret)
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ return 0;
+ }
+
+out_err:
+ if (ret != -EAGAIN)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+
+ return ret;
+}
+
+static void erdma_accept_newconn(struct erdma_cep *cep)
+{
+ struct socket *s = cep->sock;
+ struct socket *new_s = NULL;
+ struct erdma_cep *new_cep = NULL;
+ int ret = 0;
+
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ goto error;
+
+ new_cep = erdma_cep_alloc(cep->dev);
+ if (!new_cep)
+ goto error;
+
+ /*
+ * 4: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout.
+ */
+ if (erdma_cm_alloc_work(new_cep, 4) != 0)
+ goto error;
+
+ /*
+ * Copy saved socket callbacks from listening CEP
+ * and assign new socket with new CEP
+ */
+ new_cep->sk_state_change = cep->sk_state_change;
+ new_cep->sk_data_ready = cep->sk_data_ready;
+ new_cep->sk_error_report = cep->sk_error_report;
+
+ ret = kernel_accept(s, &new_s, O_NONBLOCK);
+ if (ret != 0)
+ goto error;
+
+ new_cep->sock = new_s;
+ erdma_cep_get(new_cep);
+ new_s->sk->sk_user_data = new_cep;
+
+ tcp_sock_set_nodelay(new_s->sk);
+ new_cep->state = ERDMA_EPSTATE_AWAIT_MPAREQ;
+
+ ret = erdma_cm_queue_work(new_cep, ERDMA_CM_WORK_MPATIMEOUT);
+ if (ret)
+ goto error;
+
+ new_cep->listen_cep = cep;
+ erdma_cep_get(cep);
+
+ if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
+ /* MPA REQ already queued */
+ erdma_cep_set_inuse(new_cep);
+ ret = erdma_proc_mpareq(new_cep);
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep);
+ new_cep->listen_cep = NULL;
+ if (ret) {
+ erdma_cep_set_free(new_cep);
+ goto error;
+ }
+ }
+ erdma_cep_set_free(new_cep);
+ }
+ return;
+
+error:
+ if (new_cep) {
+ new_cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cancel_mpatimer(new_cep);
+
+ erdma_cep_put(new_cep);
+ }
+
+ if (new_s) {
+ erdma_socket_disassoc(new_s);
+ sock_release(new_s);
+ }
+}
+
+static int erdma_newconn_connected(struct erdma_cep *cep)
+{
+ int ret = 0;
+
+ cep->mpa.hdr.params.bits = 0;
+ __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
+
+ memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
+
+ ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
+ cep->state = ERDMA_EPSTATE_AWAIT_MPAREP;
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (ret >= 0)
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_MPATIMEOUT);
+
+ return ret;
+}
+
+static void erdma_cm_work_handler(struct work_struct *w)
+{
+ struct erdma_cm_work *work;
+ struct erdma_cep *cep;
+ int release_cep = 0, ret = 0;
+
+ work = container_of(w, struct erdma_cm_work, work.work);
+ cep = work->cep;
+
+ erdma_cep_set_inuse(cep);
+
+ switch (work->type) {
+ case ERDMA_CM_WORK_CONNECTED:
+ erdma_cancel_mpatimer(cep);
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ ret = erdma_newconn_connected(cep);
+ if (ret) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EIO);
+ release_cep = 1;
+ }
+ }
+ break;
+ case ERDMA_CM_WORK_CONNECTTIMEOUT:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ cep->mpa_timer = NULL;
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ }
+ break;
+ case ERDMA_CM_WORK_ACCEPT:
+ erdma_accept_newconn(cep);
+ break;
+ case ERDMA_CM_WORK_READ_MPAHDR:
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ if (cep->listen_cep) {
+ erdma_cep_set_inuse(cep->listen_cep);
+
+ if (cep->listen_cep->state ==
+ ERDMA_EPSTATE_LISTENING)
+ ret = erdma_proc_mpareq(cep);
+ else
+ ret = -EFAULT;
+
+ erdma_cep_set_free(cep->listen_cep);
+
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ if (ret)
+ erdma_cep_put(cep);
+ }
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ ret = erdma_proc_mpareply(cep);
+ }
+
+ if (ret && ret != -EAGAIN)
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_CLOSE_LLP:
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_PEER_CLOSE:
+ if (cep->cm_id) {
+ if (cep->state == ERDMA_EPSTATE_CONNECTING ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA reply not received, but connection drop
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ } else if (cep->state == ERDMA_EPSTATE_RDMA_MODE) {
+ /*
+ * NOTE: IW_CM_EVENT_DISCONNECT is given just
+ * to transition IWCM into CLOSING.
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* Socket close before MPA request received. */
+ erdma_disassoc_listen_cep(cep);
+ erdma_cep_put(cep);
+ }
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_MPATIMEOUT:
+ cep->mpa_timer = NULL;
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA request timed out:
+ * Hide any partially received private data and signal
+ * timeout
+ */
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* No MPA req received after peer TCP stream setup. */
+ erdma_disassoc_listen_cep(cep);
+
+ erdma_cep_put(cep);
+ release_cep = 1;
+ }
+ break;
+ default:
+ WARN(1, "Undefined CM work type: %d\n", work->type);
+ }
+
+ if (release_cep) {
+ erdma_cancel_mpatimer(cep);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ if (cep->qp) {
+ struct erdma_qp *qp = cep->qp;
+ /*
+ * Serialize a potential race with application
+ * closing the QP and calling erdma_qp_cm_drop()
+ */
+ erdma_qp_get(qp);
+ erdma_cep_set_free(cep);
+
+ erdma_qp_llp_close(qp);
+ erdma_qp_put(qp);
+
+ erdma_cep_set_inuse(cep);
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cep_put(cep);
+ }
+ }
+ erdma_cep_set_free(cep);
+ erdma_put_work(work);
+ erdma_cep_put(cep);
+}
+
+int erdma_cm_queue_work(struct erdma_cep *cep, enum erdma_work_type type)
+{
+ struct erdma_cm_work *work = erdma_get_work(cep);
+ unsigned long delay = 0;
+
+ if (!work)
+ return -ENOMEM;
+
+ work->type = type;
+ work->cep = cep;
+
+ erdma_cep_get(cep);
+
+ INIT_DELAYED_WORK(&work->work, erdma_cm_work_handler);
+
+ if (type == ERDMA_CM_WORK_MPATIMEOUT) {
+ cep->mpa_timer = work;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ delay = MPAREP_TIMEOUT;
+ else
+ delay = MPAREQ_TIMEOUT;
+ } else if (type == ERDMA_CM_WORK_CONNECTTIMEOUT) {
+ cep->mpa_timer = work;
+
+ delay = CONNECT_TIMEOUT;
+ }
+
+ queue_delayed_work(erdma_cm_wq, &work->work, delay);
+
+ return 0;
+}
+
+static void erdma_cm_llp_data_ready(struct sock *sk)
+{
+ struct erdma_cep *cep;
+
+ trace_sk_data_ready(sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep)
+ goto out;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_READ_MPAHDR);
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void erdma_cm_llp_error_report(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ if (cep)
+ cep->sk_error_report(sk);
+}
+
+static void erdma_cm_llp_state_change(struct sock *sk)
+{
+ struct erdma_cep *cep;
+ void (*orig_state_change)(struct sock *sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = cep->sk_state_change;
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ else
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_ACCEPT);
+ break;
+ case TCP_CLOSE:
+ case TCP_CLOSE_WAIT:
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_PEER_CLOSE);
+ break;
+ default:
+ break;
+ }
+ read_unlock(&sk->sk_callback_lock);
+ orig_state_change(sk);
+}
+
+static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
+ int laddrlen, struct sockaddr *raddr,
+ int raddrlen, int flags)
+{
+ int ret;
+
+ sock_set_reuseaddr(s->sk);
+ ret = s->ops->bind(s, laddr, laddrlen);
+ if (ret)
+ return ret;
+ ret = s->ops->connect(s, raddr, raddrlen, flags);
+ return ret < 0 ? ret : 0;
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_qp *qp;
+ struct erdma_cep *cep = NULL;
+ struct socket *s = NULL;
+ struct sockaddr *laddr = (struct sockaddr *)&id->m_local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&id->m_remote_addr;
+ u16 pd_len = params->private_data_len;
+ int ret;
+
+ if (pd_len > MPA_MAX_PRIVDATA)
+ return -EINVAL;
+
+ if (params->ird > dev->attrs.max_ird ||
+ params->ord > dev->attrs.max_ord)
+ return -EINVAL;
+
+ if (laddr->sa_family != AF_INET || raddr->sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ ret = sock_create(AF_INET, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ goto error_put_qp;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error_release_sock;
+ }
+
+ erdma_cep_set_inuse(cep);
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ /* Associate cm_id with CEP */
+ id->add_ref(id);
+ cep->cm_id = id;
+
+ /*
+ * 6: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout, connected event
+ * and connect timeout.
+ */
+ ret = erdma_cm_alloc_work(cep, 6);
+ if (ret != 0) {
+ ret = -ENOMEM;
+ goto error_release_cep;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+ cep->state = ERDMA_EPSTATE_CONNECTING;
+
+ erdma_cep_socket_assoc(cep, s);
+
+ if (pd_len) {
+ cep->pd_len = pd_len;
+ cep->private_data = kmalloc(pd_len, GFP_KERNEL);
+ if (!cep->private_data) {
+ ret = -ENOMEM;
+ goto error_disassoc;
+ }
+
+ memcpy(cep->private_data, params->private_data,
+ params->private_data_len);
+ }
+
+ ret = kernel_bindconnect(s, laddr, sizeof(*laddr), raddr,
+ sizeof(*raddr), O_NONBLOCK);
+ if (ret != -EINPROGRESS && ret != 0) {
+ goto error_disassoc;
+ } else if (ret == 0) {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ if (ret)
+ goto error_disassoc;
+ } else {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTTIMEOUT);
+ if (ret)
+ goto error_disassoc;
+ }
+
+ erdma_cep_set_free(cep);
+ return 0;
+
+error_disassoc:
+ kfree(cep->private_data);
+ cep->private_data = NULL;
+ cep->pd_len = 0;
+
+ erdma_socket_disassoc(s);
+
+error_release_cep:
+ /* disassoc with cm_id */
+ cep->cm_id = NULL;
+ id->rem_ref(id);
+
+ /* disassoc with qp */
+ qp->cep = NULL;
+ erdma_cep_put(cep);
+ cep->qp = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+
+ /* release the cep. */
+ erdma_cep_put(cep);
+
+error_release_sock:
+ if (s)
+ sock_release(s);
+error_put_qp:
+ erdma_qp_put(qp);
+
+ return ret;
+}
+
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+ struct erdma_mod_qp_params_iwarp mod_qp_params;
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_qp *qp;
+ int ret;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ /* Free lingering inbound private data */
+ if (cep->mpa.hdr.params.pd_len) {
+ cep->mpa.hdr.params.pd_len = 0;
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ }
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->ord > dev->attrs.max_ord ||
+ params->ird > dev->attrs.max_ord) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->private_data_len > MPA_MAX_PRIVDATA) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ memset(&mod_qp_params, 0, sizeof(mod_qp_params));
+
+ mod_qp_params.irq_size = params->ird;
+ mod_qp_params.orq_size = params->ord;
+ mod_qp_params.state = ERDMA_QPS_IWARP_RTS;
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ mod_qp_params.qp_type = ERDMA_QP_PASSIVE;
+ mod_qp_params.pd_len = params->private_data_len;
+
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_ORD |
+ ERDMA_QPA_IWARP_LLP_HANDLE | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_MPA;
+
+ if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits)) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ mod_qp_params.cc = COMPROMISE_CC;
+ }
+
+ /* move to rts */
+ ret = erdma_modify_qp_state_iwarp(qp, &mod_qp_params, to_modify_attrs);
+
+ up_write(&qp->state_lock);
+
+ if (ret)
+ goto error;
+
+ cep->mpa.ext_data.bits = 0;
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
+
+ ret = erdma_send_mpareqrep(cep, params->private_data,
+ params->private_data_len);
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
+ if (ret)
+ goto error;
+
+ erdma_cep_set_free(cep);
+
+ return 0;
+ }
+
+error:
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(id);
+ cep->cm_id = NULL;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(cep);
+ qp->cep = NULL;
+ }
+
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return ret;
+}
+
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen)
+{
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ if (__mpa_rr_revision(cep->mpa.hdr.params.bits) == MPA_REVISION_EXT_1) {
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
+ erdma_send_mpareqrep(cep, pdata, plen);
+ }
+
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return 0;
+}
+
+int erdma_create_listen(struct iw_cm_id *id, int backlog)
+{
+ struct socket *s;
+ struct erdma_cep *cep = NULL;
+ int ret = 0;
+ struct erdma_dev *dev = to_edev(id->device);
+ int addr_family = id->local_addr.ss_family;
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ if (addr_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ ret = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ return ret;
+
+ sock_set_reuseaddr(s->sk);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+ s->sk->sk_bound_dev_if = dev->netdev->ifindex;
+
+ ret = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ if (ret)
+ goto error;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ erdma_cep_socket_assoc(cep, s);
+
+ ret = erdma_cm_alloc_work(cep, backlog);
+ if (ret)
+ goto error;
+
+ ret = s->ops->listen(s, backlog);
+ if (ret)
+ goto error;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ if (!id->provider_data) {
+ id->provider_data =
+ kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!id->provider_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ INIT_LIST_HEAD((struct list_head *)id->provider_data);
+ }
+
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = ERDMA_EPSTATE_LISTENING;
+
+ return 0;
+
+error:
+ if (cep) {
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ cep->sock = NULL;
+ erdma_socket_disassoc(s);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+ sock_release(s);
+
+ return ret;
+}
+
+static void erdma_drop_listeners(struct iw_cm_id *id)
+{
+ struct list_head *p, *tmp;
+ /*
+ * In case of a wildcard rdma_listen on a multi-homed device,
+ * a listener's IWCM id is associated with more than one listening CEP.
+ */
+ list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
+ struct erdma_cep *cep =
+ list_entry(p, struct erdma_cep, listenq);
+
+ list_del(p);
+
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+}
+
+int erdma_destroy_listen(struct iw_cm_id *id)
+{
+ if (!id->provider_data)
+ return 0;
+
+ erdma_drop_listeners(id);
+ kfree(id->provider_data);
+ id->provider_data = NULL;
+
+ return 0;
+}
+
+int erdma_cm_init(void)
+{
+ erdma_cm_wq = create_singlethread_workqueue("erdma_cm_wq");
+ if (!erdma_cm_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void erdma_cm_exit(void)
+{
+ if (erdma_cm_wq)
+ destroy_workqueue(erdma_cm_wq);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.h b/drivers/infiniband/hw/erdma/erdma_cm.h
new file mode 100644
index 000000000000..a26d80770188
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#ifndef __ERDMA_CM_H__
+#define __ERDMA_CM_H__
+
+#include <linux/tcp.h>
+#include <net/sock.h>
+#include <rdma/iw_cm.h>
+
+/* iWarp MPA protocol defs */
+#define MPA_REVISION_EXT_1 129
+#define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+#define MPA_KEY_SIZE 16
+#define MPA_DEFAULT_HDR_LEN 28
+
+struct mpa_rr_params {
+ __be16 bits;
+ __be16 pd_len;
+};
+
+/*
+ * MPA request/response Hdr bits & fields
+ */
+enum {
+ MPA_RR_FLAG_MARKERS = cpu_to_be16(0x8000),
+ MPA_RR_FLAG_CRC = cpu_to_be16(0x4000),
+ MPA_RR_FLAG_REJECT = cpu_to_be16(0x2000),
+ MPA_RR_RESERVED = cpu_to_be16(0x1f00),
+ MPA_RR_MASK_REVISION = cpu_to_be16(0x00ff)
+};
+
+/*
+ * MPA request/reply header
+ */
+struct mpa_rr {
+ u8 key[16];
+ struct mpa_rr_params params;
+};
+
+struct erdma_mpa_ext {
+ __be32 cookie;
+ __be32 bits;
+};
+
+enum {
+ MPA_EXT_FLAG_CC = cpu_to_be32(0x0000000f),
+};
+
+struct erdma_mpa_info {
+ struct mpa_rr hdr; /* peer mpa hdr in host byte order */
+ struct erdma_mpa_ext ext_data;
+ char *pdata;
+ int bytes_rcvd;
+};
+
+struct erdma_sk_upcalls {
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+struct erdma_dev;
+
+enum erdma_cep_state {
+ ERDMA_EPSTATE_IDLE = 1,
+ ERDMA_EPSTATE_LISTENING,
+ ERDMA_EPSTATE_CONNECTING,
+ ERDMA_EPSTATE_AWAIT_MPAREQ,
+ ERDMA_EPSTATE_RECVD_MPAREQ,
+ ERDMA_EPSTATE_AWAIT_MPAREP,
+ ERDMA_EPSTATE_RDMA_MODE,
+ ERDMA_EPSTATE_CLOSED
+};
+
+struct erdma_cep {
+ struct iw_cm_id *cm_id;
+ struct erdma_dev *dev;
+ struct list_head devq;
+ spinlock_t lock;
+ struct kref ref;
+ int in_use;
+ wait_queue_head_t waitq;
+ enum erdma_cep_state state;
+
+ struct list_head listenq;
+ struct erdma_cep *listen_cep;
+
+ struct erdma_qp *qp;
+ struct socket *sock;
+
+ struct erdma_cm_work *mpa_timer;
+ struct list_head work_freelist;
+
+ struct erdma_mpa_info mpa;
+ int ord;
+ int ird;
+
+ int pd_len;
+ /* hold user's private data. */
+ void *private_data;
+
+ /* Saved upcalls of socket llp.sock */
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+#define MPAREQ_TIMEOUT (HZ * 20)
+#define MPAREP_TIMEOUT (HZ * 10)
+#define CONNECT_TIMEOUT (HZ * 10)
+
+enum erdma_work_type {
+ ERDMA_CM_WORK_ACCEPT = 1,
+ ERDMA_CM_WORK_READ_MPAHDR,
+ ERDMA_CM_WORK_CLOSE_LLP, /* close socket */
+ ERDMA_CM_WORK_PEER_CLOSE, /* socket indicated peer close */
+ ERDMA_CM_WORK_MPATIMEOUT,
+ ERDMA_CM_WORK_CONNECTED,
+ ERDMA_CM_WORK_CONNECTTIMEOUT
+};
+
+struct erdma_cm_work {
+ struct delayed_work work;
+ struct list_head list;
+ enum erdma_work_type type;
+ struct erdma_cep *cep;
+};
+
+#define to_sockaddr_in(a) (*(struct sockaddr_in *)(&(a)))
+
+static inline int getname_peer(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 1);
+}
+
+static inline int getname_local(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 0);
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen);
+int erdma_create_listen(struct iw_cm_id *id, int backlog);
+int erdma_destroy_listen(struct iw_cm_id *id);
+
+void erdma_cep_get(struct erdma_cep *ceq);
+void erdma_cep_put(struct erdma_cep *ceq);
+int erdma_cm_queue_work(struct erdma_cep *ceq, enum erdma_work_type type);
+
+int erdma_cm_init(void);
+void erdma_cm_exit(void);
+
+#define sk_to_cep(sk) ((struct erdma_cep *)((sk)->sk_user_data))
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
new file mode 100644
index 000000000000..b867aefe83b2
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include "erdma.h"
+
+static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
+
+ *cmdq->cq.dbrec = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
+
+ atomic64_inc(&cmdq->cq.armed_num);
+}
+
+static void kick_cmdq_db(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
+
+ *cmdq->sq.dbrec = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
+}
+
+static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
+{
+ int comp_idx;
+
+ spin_lock(&cmdq->lock);
+ comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
+ cmdq->max_outstandings);
+ if (comp_idx == cmdq->max_outstandings) {
+ spin_unlock(&cmdq->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ __set_bit(comp_idx, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ return &cmdq->wait_pool[comp_idx];
+}
+
+static void put_comp_wait(struct erdma_cmdq *cmdq,
+ struct erdma_comp_wait *comp_wait)
+{
+ int used;
+
+ cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
+ spin_lock(&cmdq->lock);
+ used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ WARN_ON(!used);
+}
+
+static int erdma_cmdq_wait_res_init(struct erdma_dev *dev,
+ struct erdma_cmdq *cmdq)
+{
+ int i;
+
+ cmdq->wait_pool =
+ devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
+ sizeof(struct erdma_comp_wait), GFP_KERNEL);
+ if (!cmdq->wait_pool)
+ return -ENOMEM;
+
+ spin_lock_init(&cmdq->lock);
+ cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
+ &dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
+ if (!cmdq->comp_wait_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->max_outstandings; i++) {
+ init_completion(&cmdq->wait_pool[i].wait_event);
+ cmdq->wait_pool[i].ctx_id = i;
+ }
+
+ return 0;
+}
+
+static int erdma_cmdq_sq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_sq *sq = &cmdq->sq;
+
+ sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
+ sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
+
+ sq->qbuf = dma_alloc_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
+ &sq->qbuf_dma_addr, GFP_KERNEL);
+ if (!sq->qbuf)
+ return -ENOMEM;
+
+ sq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &sq->dbrec_dma);
+ if (!sq->dbrec)
+ goto err_out;
+
+ spin_lock_init(&sq->lock);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_H_REG,
+ upper_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
+ lower_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, sq->dbrec_dma);
+
+ return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
+ sq->qbuf, sq->qbuf_dma_addr);
+
+ return -ENOMEM;
+}
+
+static int erdma_cmdq_cq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_cq *cq = &cmdq->cq;
+
+ cq->depth = cmdq->sq.depth;
+ cq->qbuf = dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ &cq->qbuf_dma_addr, GFP_KERNEL);
+ if (!cq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&cq->lock);
+
+ cq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &cq->dbrec_dma);
+ if (!cq->dbrec)
+ goto err_out;
+
+ atomic64_set(&cq->armed_num, 0);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_H_REG,
+ upper_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
+ lower_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, cq->dbrec_dma);
+
+ return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, cq->qbuf,
+ cq->qbuf_dma_addr);
+
+ return -ENOMEM;
+}
+
+static int erdma_cmdq_eq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_eq *eq = &cmdq->eq;
+ int ret;
+
+ ret = erdma_eq_common_init(dev, eq, cmdq->max_outstandings);
+ if (ret)
+ return ret;
+
+ eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+
+ return 0;
+}
+
+int erdma_cmdq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ int err;
+
+ cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
+
+ sema_init(&cmdq->credits, cmdq->max_outstandings);
+
+ err = erdma_cmdq_wait_res_init(dev, cmdq);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_sq_init(dev);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_cq_init(dev);
+ if (err)
+ goto err_destroy_sq;
+
+ err = erdma_cmdq_eq_init(dev);
+ if (err)
+ goto err_destroy_cq;
+
+ set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ return 0;
+
+err_destroy_cq:
+ dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
+
+err_destroy_sq:
+ dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
+
+ return err;
+}
+
+void erdma_finish_cmdq_init(struct erdma_dev *dev)
+{
+ arm_cmdq_cq(&dev->cmdq);
+}
+
+void erdma_cmdq_destroy(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ erdma_eq_destroy(dev, &cmdq->eq);
+
+ dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
+
+ dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
+}
+
+static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
+{
+ __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
+ cmdq->cq.depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
+}
+
+static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
+ struct erdma_comp_wait *comp_wait)
+{
+ __le64 *wqe;
+ u64 hdr = *req;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_ISSUED;
+ reinit_completion(&comp_wait->wait_event);
+ comp_wait->sq_pi = cmdq->sq.pi;
+
+ wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ memcpy(wqe, req, req_len);
+
+ cmdq->sq.pi += cmdq->sq.wqebb_cnt;
+ hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
+ FIELD_PREP(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK,
+ comp_wait->ctx_id) |
+ FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
+ *wqe = cpu_to_le64(hdr);
+
+ kick_cmdq_db(cmdq);
+}
+
+static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
+{
+ struct erdma_comp_wait *comp_wait;
+ u32 hdr0, sqe_idx;
+ __be32 *cqe;
+ u16 ctx_id;
+ u64 *sqe;
+
+ cqe = get_next_valid_cmdq_cqe(cmdq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cmdq->cq.ci++;
+
+ dma_rmb();
+ hdr0 = be32_to_cpu(*cqe);
+ sqe_idx = be32_to_cpu(*(cqe + 1));
+
+ sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
+ comp_wait = &cmdq->wait_pool[ctx_id];
+ if (comp_wait->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ return -EIO;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
+ comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
+ cmdq->sq.ci += cmdq->sq.wqebb_cnt;
+ /* Copy 16B comp data after cqe hdr to outer */
+ be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
+
+ complete(&comp_wait->wait_event);
+
+ return 0;
+}
+
+static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
+{
+ unsigned long flags;
+ u16 comp_num;
+
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+
+ /* We must have less than # of max_outstandings
+ * completions at one time.
+ */
+ for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
+ if (erdma_poll_single_cmd_completion(cmdq))
+ break;
+
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+}
+
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
+{
+ int got_event = 0;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
+ return;
+
+ while (get_next_valid_eqe(&cmdq->eq)) {
+ cmdq->eq.ci++;
+ got_event++;
+ }
+
+ if (got_event) {
+ cmdq->cq.cmdsn++;
+ erdma_polling_cmd_completions(cmdq);
+ arm_cmdq_cq(cmdq);
+ }
+
+ notify_eq(&cmdq->eq);
+}
+
+static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long comp_timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (1) {
+ erdma_polling_cmd_completions(cmdq);
+ if (comp_ctx->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ break;
+
+ if (time_is_before_jiffies(comp_timeout))
+ return -ETIME;
+
+ udelay(20);
+ }
+
+ return 0;
+}
+
+static int erdma_wait_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long flags = 0;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ msecs_to_jiffies(timeout));
+
+ if (unlikely(comp_ctx->cmd_status != ERDMA_CMD_STATUS_FINISHED)) {
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+ comp_ctx->cmd_status = ERDMA_CMD_STATUS_TIMEOUT;
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
+{
+ *hdr = FIELD_PREP(ERDMA_CMD_HDR_SUB_MOD_MASK, mod) |
+ FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
+}
+
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
+ u64 *resp0, u64 *resp1, bool sleepable)
+{
+ struct erdma_comp_wait *comp_wait;
+ int ret;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
+ return -ENODEV;
+
+ if (!sleepable) {
+ while (down_trylock(&cmdq->credits))
+ ;
+ } else {
+ down(&cmdq->credits);
+ }
+
+ comp_wait = get_comp_wait(cmdq);
+ if (IS_ERR(comp_wait)) {
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
+ up(&cmdq->credits);
+ return PTR_ERR(comp_wait);
+ }
+
+ spin_lock(&cmdq->sq.lock);
+ push_cmdq_sqe(cmdq, req, req_size, comp_wait);
+ spin_unlock(&cmdq->sq.lock);
+
+ if (sleepable)
+ ret = erdma_wait_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+ else
+ ret = erdma_poll_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+
+ if (ret) {
+ set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ goto out;
+ }
+
+ if (comp_wait->comp_status)
+ ret = -EIO;
+
+ if (resp0 && resp1) {
+ *resp0 = *((u64 *)&comp_wait->comp_data[0]);
+ *resp1 = *((u64 *)&comp_wait->comp_data[2]);
+ }
+ put_comp_wait(cmdq, comp_wait);
+
+out:
+ up(&cmdq->credits);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
new file mode 100644
index 000000000000..1f456327e63c
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include "erdma_verbs.h"
+
+static void *get_next_valid_cqe(struct erdma_cq *cq)
+{
+ __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
+ cq->depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
+}
+
+static void notify_cq(struct erdma_cq *cq, u8 solcitied)
+{
+ u64 db_data =
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
+ FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
+
+ *cq->kern_cq.dbrec = db_data;
+ writeq(db_data, cq->kern_cq.db);
+}
+
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
+
+ notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
+ ret = 1;
+
+ cq->kern_cq.notify_cnt++;
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
+
+ return ret;
+}
+
+static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
+ [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_READ] = IB_WC_RDMA_READ,
+ [ERDMA_OP_SEND] = IB_WC_SEND,
+ [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
+ [ERDMA_OP_RECEIVE] = IB_WC_RECV,
+ [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
+ [ERDMA_OP_RECV_INV] = IB_WC_RECV,
+ [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
+ [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
+ [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
+ [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
+ [ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP,
+ [ERDMA_OP_ATOMIC_FAA] = IB_WC_FETCH_ADD,
+};
+
+static const struct {
+ enum erdma_wc_status erdma;
+ enum ib_wc_status base;
+ enum erdma_vendor_err vendor;
+} map_cqe_status[ERDMA_NUM_WC_STATUS] = {
+ { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_INVALID_RQE },
+ { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG },
+ { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
+ { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
+ { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD },
+ { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR },
+ { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
+ ERDMA_WC_VENDOR_INVALID_SQE },
+ { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_ZERO_ORD },
+ { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG },
+ { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
+ { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
+ { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD },
+ { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR },
+ { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
+};
+
+static void erdma_process_ud_cqe(struct erdma_cqe *cqe, struct ib_wc *wc)
+{
+ u32 ud_info;
+
+ wc->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
+ ud_info = be32_to_cpu(cqe->ud.info);
+ wc->network_hdr_type = FIELD_GET(ERDMA_CQE_NTYPE_MASK, ud_info);
+ if (wc->network_hdr_type == ERDMA_NETWORK_TYPE_IPV4)
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ wc->src_qp = FIELD_GET(ERDMA_CQE_SQPN_MASK, ud_info);
+ wc->sl = FIELD_GET(ERDMA_CQE_SL_MASK, ud_info);
+ wc->pkey_index = 0;
+}
+
+#define ERDMA_POLLCQ_NO_QP 1
+
+static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+ u8 opcode, syndrome, qtype;
+ struct erdma_kqp *kern_qp;
+ struct erdma_cqe *cqe;
+ struct erdma_qp *qp;
+ u16 wqe_idx, depth;
+ u32 qpn, cqe_hdr;
+ u64 *id_table;
+ u64 *wqe_hdr;
+
+ cqe = get_next_valid_cqe(cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cq->kern_cq.ci++;
+
+ /* cqbuf should be ready when we poll */
+ dma_rmb();
+
+ qpn = be32_to_cpu(cqe->qpn);
+ wqe_idx = be32_to_cpu(cqe->qe_idx);
+ cqe_hdr = be32_to_cpu(cqe->hdr);
+
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ return ERDMA_POLLCQ_NO_QP;
+
+ kern_qp = &qp->kern_qp;
+
+ qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
+ syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
+ opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
+
+ if (qtype == ERDMA_CQE_QTYPE_SQ) {
+ id_table = kern_qp->swr_tbl;
+ depth = qp->attrs.sq_size;
+ wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ kern_qp->sq_ci =
+ FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
+ wqe_idx + 1;
+ } else {
+ id_table = kern_qp->rwr_tbl;
+ depth = qp->attrs.rq_size;
+ }
+ wc->wr_id = id_table[wqe_idx & (depth - 1)];
+ wc->byte_len = be32_to_cpu(cqe->size);
+
+ wc->wc_flags = 0;
+
+ wc->opcode = wc_mapping_table[opcode];
+ if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ } else if (opcode == ERDMA_OP_RECV_INV) {
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+
+ if (erdma_device_rocev2(dev) &&
+ (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_GSI))
+ erdma_process_ud_cqe(cqe, wc);
+
+ if (syndrome >= ERDMA_NUM_WC_STATUS)
+ syndrome = ERDMA_WC_GENERAL_ERR;
+
+ wc->status = map_cqe_status[syndrome].base;
+ wc->vendor_err = map_cqe_status[syndrome].vendor;
+ wc->qp = &qp->ibqp;
+
+ return 0;
+}
+
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long flags;
+ int npolled, ret;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ for (npolled = 0; npolled < num_entries;) {
+ ret = erdma_poll_one_cqe(cq, wc + npolled);
+
+ if (ret == -EAGAIN) /* no received new CQEs. */
+ break;
+ else if (ret) /* ignore invalid CQEs. */
+ continue;
+
+ npolled++;
+ }
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+
+ return npolled;
+}
+
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_cqe *cqe, *dst_cqe;
+ u32 prev_cq_ci, cur_cq_ci;
+ u32 ncqe = 0, nqp_cqe = 0;
+ unsigned long flags;
+ u8 owner;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ prev_cq_ci = cq->kern_cq.ci;
+
+ while (ncqe < cq->depth && (cqe = get_next_valid_cqe(cq)) != NULL) {
+ ++cq->kern_cq.ci;
+ ++ncqe;
+ }
+
+ while (ncqe > 0) {
+ cur_cq_ci = prev_cq_ci + ncqe - 1;
+ cqe = get_queue_entry(cq->kern_cq.qbuf, cur_cq_ci, cq->depth,
+ CQE_SHIFT);
+
+ if (be32_to_cpu(cqe->qpn) == qpn) {
+ ++nqp_cqe;
+ } else if (nqp_cqe) {
+ dst_cqe = get_queue_entry(cq->kern_cq.qbuf,
+ cur_cq_ci + nqp_cqe,
+ cq->depth, CQE_SHIFT);
+ owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ be32_to_cpu(dst_cqe->hdr));
+ cqe->hdr = cpu_to_be32(
+ (be32_to_cpu(cqe->hdr) &
+ ~ERDMA_CQE_HDR_OWNER_MASK) |
+ FIELD_PREP(ERDMA_CQE_HDR_OWNER_MASK, owner));
+ memcpy(dst_cqe, cqe, sizeof(*cqe));
+ }
+
+ --ncqe;
+ }
+
+ cq->kern_cq.ci = prev_cq_ci + nqp_cqe;
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
new file mode 100644
index 000000000000..6486234a2360
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include "erdma_verbs.h"
+
+#define MAX_POLL_CHUNK_SIZE 16
+
+void notify_eq(struct erdma_eq *eq)
+{
+ u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
+ FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
+
+ *eq->dbrec = db_data;
+ writeq(db_data, eq->db);
+
+ atomic64_inc(&eq->notify_num);
+}
+
+void *get_next_valid_eqe(struct erdma_eq *eq)
+{
+ u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
+
+ return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
+}
+
+void erdma_aeq_event_handler(struct erdma_dev *dev)
+{
+ struct erdma_aeqe *aeqe;
+ u32 cqn, qpn;
+ struct erdma_qp *qp;
+ struct erdma_cq *cq;
+ struct ib_event event;
+ u32 poll_cnt = 0;
+
+ memset(&event, 0, sizeof(event));
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ aeqe = get_next_valid_eqe(&dev->aeq);
+ if (!aeqe)
+ break;
+
+ dma_rmb();
+
+ dev->aeq.ci++;
+ atomic64_inc(&dev->aeq.event_num);
+ poll_cnt++;
+
+ if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
+ le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
+ cqn = le32_to_cpu(aeqe->event_data0);
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ event.device = cq->ibcq.device;
+ event.element.cq = &cq->ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ if (cq->ibcq.event_handler)
+ cq->ibcq.event_handler(&event,
+ cq->ibcq.cq_context);
+ } else {
+ qpn = le32_to_cpu(aeqe->event_data0);
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ continue;
+
+ event.device = qp->ibqp.device;
+ event.element.qp = &qp->ibqp;
+ event.event = IB_EVENT_QP_FATAL;
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&event,
+ qp->ibqp.qp_context);
+ }
+ }
+
+ notify_eq(&dev->aeq);
+}
+
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
+{
+ u32 buf_size = depth << EQE_SHIFT;
+
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
+ &eq->qbuf_dma_addr, GFP_KERNEL);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
+ if (!eq->dbrec)
+ goto err_free_qbuf;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+ eq->ci = 0;
+ eq->depth = depth;
+
+ return 0;
+
+err_free_qbuf:
+ dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
+ eq->qbuf_dma_addr);
+
+ return -ENOMEM;
+}
+
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
+{
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+ eq->qbuf_dma_addr);
+}
+
+int erdma_aeq_init(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+ int ret;
+
+ ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
+
+ eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
+
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+
+ return 0;
+}
+
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
+{
+ struct erdma_dev *dev = ceq_cb->dev;
+ struct erdma_cq *cq;
+ u32 poll_cnt = 0;
+ u64 *ceqe;
+ int cqn;
+
+ if (!ceq_cb->ready)
+ return;
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ ceqe = get_next_valid_eqe(&ceq_cb->eq);
+ if (!ceqe)
+ break;
+
+ dma_rmb();
+ ceq_cb->eq.ci++;
+ poll_cnt++;
+ cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
+
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res))
+ cq->kern_cq.cmdsn++;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ }
+
+ notify_eq(&ceq_cb->eq);
+}
+
+static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
+{
+ struct erdma_eq_cb *ceq_cb = data;
+
+ tasklet_schedule(&ceq_cb->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void erdma_intr_ceq_task(unsigned long data)
+{
+ erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
+}
+
+static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+ int err;
+
+ snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
+ pci_name(dev->pdev));
+ eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
+
+ tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
+ (unsigned long)&dev->ceqs[ceqn]);
+
+ cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
+ &eqc->irq.affinity_hint_mask);
+
+ err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
+ eqc->irq.name, eqc);
+ if (err) {
+ dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
+ return err;
+ }
+
+ irq_set_affinity_hint(eqc->irq.msix_vector,
+ &eqc->irq.affinity_hint_mask);
+
+ return 0;
+}
+
+static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+
+ irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
+ free_irq(eqc->irq.msix_vector, eqc);
+}
+
+static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
+{
+ struct erdma_cmdq_create_eq_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CREATE_EQ);
+ req.eqn = eqn;
+ req.depth = ilog2(eq->depth);
+ req.qbuf_addr = eq->qbuf_dma_addr;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ /* Vector index is the same as EQN. */
+ req.vector_idx = eqn;
+ req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
+ req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
+
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
+}
+
+static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ int ret;
+
+ ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
+
+ eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
+ (ceqn + 1) * ERDMA_DB_SIZE;
+ dev->ceqs[ceqn].dev = dev;
+ dev->ceqs[ceqn].ready = true;
+
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ ret = create_eq_cmd(dev, ceqn + 1, eq);
+ if (ret) {
+ erdma_eq_destroy(dev, eq);
+ dev->ceqs[ceqn].ready = false;
+ }
+
+ return ret;
+}
+
+static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ struct erdma_cmdq_destroy_eq_req req;
+ int err;
+
+ dev->ceqs[ceqn].ready = 0;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_DESTROY_EQ);
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ req.eqn = ceqn + 1;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ req.vector_idx = ceqn + 1;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
+ if (err)
+ return;
+
+ erdma_eq_destroy(dev, eq);
+}
+
+int erdma_ceqs_init(struct erdma_dev *dev)
+{
+ u32 i, j;
+ int err;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ err = erdma_ceq_init_one(dev, i);
+ if (err)
+ goto out_err;
+
+ err = erdma_set_ceq_irq(dev, i);
+ if (err) {
+ erdma_ceq_uninit_one(dev, i);
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ for (j = 0; j < i; j++) {
+ erdma_free_ceq_irq(dev, j);
+ erdma_ceq_uninit_one(dev, j);
+ }
+
+ return err;
+}
+
+void erdma_ceqs_uninit(struct erdma_dev *dev)
+{
+ u32 i;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ erdma_free_ceq_irq(dev, i);
+ erdma_ceq_uninit_one(dev, i);
+ }
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
new file mode 100644
index 000000000000..ea4db53901a4
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -0,0 +1,753 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_HW_H__
+#define __ERDMA_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/* PCIe device related definition. */
+#define ERDMA_PCI_WIDTH 64
+#define ERDMA_FUNC_BAR 0
+#define ERDMA_MISX_BAR 2
+
+#define ERDMA_BAR_MASK (BIT(ERDMA_FUNC_BAR) | BIT(ERDMA_MISX_BAR))
+
+/* MSI-X related. */
+#define ERDMA_NUM_MSIX_VEC 32U
+#define ERDMA_MSIX_VECTOR_CMDQ 0
+
+/* RoCEv2 related */
+#define ERDMA_ROCEV2_GID_SIZE 16
+#define ERDMA_MAX_PKEYS 1
+#define ERDMA_DEFAULT_PKEY 0xFFFF
+
+/* erdma device protocol type */
+enum erdma_proto_type {
+ ERDMA_PROTO_IWARP = 0,
+ ERDMA_PROTO_ROCEV2 = 1,
+ ERDMA_PROTO_COUNT = 2,
+};
+
+/* PCIe Bar0 Registers. */
+#define ERDMA_REGS_VERSION_REG 0x0
+#define ERDMA_REGS_DEV_PROTO_REG 0xC
+#define ERDMA_REGS_DEV_CTRL_REG 0x10
+#define ERDMA_REGS_DEV_ST_REG 0x14
+#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
+#define ERDMA_REGS_NETDEV_MAC_H_REG 0x1C
+#define ERDMA_REGS_CMDQ_SQ_ADDR_L_REG 0x20
+#define ERDMA_REGS_CMDQ_SQ_ADDR_H_REG 0x24
+#define ERDMA_REGS_CMDQ_CQ_ADDR_L_REG 0x28
+#define ERDMA_REGS_CMDQ_CQ_ADDR_H_REG 0x2C
+#define ERDMA_REGS_CMDQ_DEPTH_REG 0x30
+#define ERDMA_REGS_CMDQ_EQ_DEPTH_REG 0x34
+#define ERDMA_REGS_CMDQ_EQ_ADDR_L_REG 0x38
+#define ERDMA_REGS_CMDQ_EQ_ADDR_H_REG 0x3C
+#define ERDMA_REGS_AEQ_ADDR_L_REG 0x40
+#define ERDMA_REGS_AEQ_ADDR_H_REG 0x44
+#define ERDMA_REGS_AEQ_DEPTH_REG 0x48
+#define ERDMA_REGS_GRP_NUM_REG 0x4c
+#define ERDMA_REGS_AEQ_DB_REG 0x50
+#define ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG 0x60
+#define ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG 0x68
+#define ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG 0x70
+#define ERDMA_AEQ_DB_HOST_ADDR_REG 0x78
+#define ERDMA_REGS_STATS_TSO_IN_PKTS_REG 0x80
+#define ERDMA_REGS_STATS_TSO_OUT_PKTS_REG 0x88
+#define ERDMA_REGS_STATS_TSO_OUT_BYTES_REG 0x90
+#define ERDMA_REGS_STATS_TX_DROP_PKTS_REG 0x98
+#define ERDMA_REGS_STATS_TX_BPS_METER_DROP_PKTS_REG 0xa0
+#define ERDMA_REGS_STATS_TX_PPS_METER_DROP_PKTS_REG 0xa8
+#define ERDMA_REGS_STATS_RX_PKTS_REG 0xc0
+#define ERDMA_REGS_STATS_RX_BYTES_REG 0xc8
+#define ERDMA_REGS_STATS_RX_DROP_PKTS_REG 0xd0
+#define ERDMA_REGS_STATS_RX_BPS_METER_DROP_PKTS_REG 0xd8
+#define ERDMA_REGS_STATS_RX_PPS_METER_DROP_PKTS_REG 0xe0
+#define ERDMA_REGS_CEQ_DB_BASE_REG 0x100
+#define ERDMA_CMDQ_SQDB_REG 0x200
+#define ERDMA_CMDQ_CQDB_REG 0x300
+
+/* DEV_CTRL_REG details. */
+#define ERDMA_REG_DEV_CTRL_RESET_MASK 0x00000001
+#define ERDMA_REG_DEV_CTRL_INIT_MASK 0x00000002
+
+/* DEV_ST_REG details. */
+#define ERDMA_REG_DEV_ST_RESET_DONE_MASK 0x00000001U
+#define ERDMA_REG_DEV_ST_INIT_DONE_MASK 0x00000002U
+
+/* eRDMA PCIe DBs definition. */
+#define ERDMA_BAR_DB_SPACE_BASE 4096
+
+#define ERDMA_BAR_SQDB_SPACE_OFFSET ERDMA_BAR_DB_SPACE_BASE
+#define ERDMA_BAR_SQDB_SPACE_SIZE (384 * 1024)
+
+#define ERDMA_BAR_RQDB_SPACE_OFFSET \
+ (ERDMA_BAR_SQDB_SPACE_OFFSET + ERDMA_BAR_SQDB_SPACE_SIZE)
+#define ERDMA_BAR_RQDB_SPACE_SIZE (96 * 1024)
+
+#define ERDMA_BAR_CQDB_SPACE_OFFSET \
+ (ERDMA_BAR_RQDB_SPACE_OFFSET + ERDMA_BAR_RQDB_SPACE_SIZE)
+
+#define ERDMA_SDB_SHARED_PAGE_INDEX 95
+
+/* Doorbell related. */
+#define ERDMA_DB_SIZE 8
+
+#define ERDMA_CQDB_IDX_MASK GENMASK_ULL(63, 56)
+#define ERDMA_CQDB_CQN_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CQDB_ARM_MASK BIT_ULL(31)
+#define ERDMA_CQDB_SOL_MASK BIT_ULL(30)
+#define ERDMA_CQDB_CMDSN_MASK GENMASK_ULL(29, 28)
+#define ERDMA_CQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_EQDB_ARM_MASK BIT(31)
+#define ERDMA_EQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
+
+/* Hardware page size definition */
+#define ERDMA_HW_PAGE_SHIFT 12
+#define ERDMA_HW_PAGE_SIZE 4096
+
+/* WQE related. */
+#define EQE_SIZE 16
+#define EQE_SHIFT 4
+#define RQE_SIZE 32
+#define RQE_SHIFT 5
+#define CQE_SIZE 32
+#define CQE_SHIFT 5
+#define SQEBB_SIZE 32
+#define SQEBB_SHIFT 5
+#define SQEBB_MASK (~(SQEBB_SIZE - 1))
+#define SQEBB_ALIGN(size) ((size + SQEBB_SIZE - 1) & SQEBB_MASK)
+#define SQEBB_COUNT(size) (SQEBB_ALIGN(size) >> SQEBB_SHIFT)
+
+#define ERDMA_MAX_SQE_SIZE 128
+#define ERDMA_MAX_WQEBB_PER_SQE 4
+
+/* CMDQ related. */
+#define ERDMA_CMDQ_MAX_OUTSTANDING 128
+#define ERDMA_CMDQ_SQE_SIZE 128
+
+/* cmdq sub module definition. */
+enum CMDQ_WQE_SUB_MOD {
+ CMDQ_SUBMOD_RDMA = 0,
+ CMDQ_SUBMOD_COMMON = 1
+};
+
+enum CMDQ_RDMA_OPCODE {
+ CMDQ_OPCODE_QUERY_DEVICE = 0,
+ CMDQ_OPCODE_CREATE_QP = 1,
+ CMDQ_OPCODE_DESTROY_QP = 2,
+ CMDQ_OPCODE_MODIFY_QP = 3,
+ CMDQ_OPCODE_CREATE_CQ = 4,
+ CMDQ_OPCODE_DESTROY_CQ = 5,
+ CMDQ_OPCODE_REFLUSH = 6,
+ CMDQ_OPCODE_REG_MR = 8,
+ CMDQ_OPCODE_DEREG_MR = 9,
+ CMDQ_OPCODE_SET_GID = 14,
+ CMDQ_OPCODE_CREATE_AH = 15,
+ CMDQ_OPCODE_DESTROY_AH = 16,
+ CMDQ_OPCODE_QUERY_QP = 17,
+};
+
+enum CMDQ_COMMON_OPCODE {
+ CMDQ_OPCODE_CREATE_EQ = 0,
+ CMDQ_OPCODE_DESTROY_EQ = 1,
+ CMDQ_OPCODE_QUERY_FW_INFO = 2,
+ CMDQ_OPCODE_CONF_MTU = 3,
+ CMDQ_OPCODE_GET_STATS = 4,
+ CMDQ_OPCODE_CONF_DEVICE = 5,
+ CMDQ_OPCODE_ALLOC_DB = 8,
+ CMDQ_OPCODE_FREE_DB = 9,
+};
+
+/* cmdq-SQE HDR */
+#define ERDMA_CMD_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK GENMASK_ULL(47, 32)
+#define ERDMA_CMD_HDR_SUB_MOD_MASK GENMASK_ULL(25, 24)
+#define ERDMA_CMD_HDR_OPCODE_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+struct erdma_cmdq_destroy_cq_req {
+ u64 hdr;
+ u32 cqn;
+};
+
+#define ERDMA_EQ_TYPE_AEQ 0
+#define ERDMA_EQ_TYPE_CEQ 1
+
+struct erdma_cmdq_create_eq_req {
+ u64 hdr;
+ u64 qbuf_addr;
+ u8 vector_idx;
+ u8 eqn;
+ u8 depth;
+ u8 qtype;
+ u32 db_dma_addr_l;
+ u32 db_dma_addr_h;
+};
+
+struct erdma_cmdq_destroy_eq_req {
+ u64 hdr;
+ u64 rsvd0;
+ u8 vector_idx;
+ u8 eqn;
+ u8 rsvd1;
+ u8 qtype;
+};
+
+/* config device cfg */
+#define ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK BIT(31)
+#define ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK GENMASK(4, 0)
+
+struct erdma_cmdq_config_device_req {
+ u64 hdr;
+ u32 cfg;
+ u32 rsvd[5];
+};
+
+struct erdma_cmdq_config_mtu_req {
+ u64 hdr;
+ u32 mtu;
+};
+
+/* ext db requests(alloc and free) cfg */
+#define ERDMA_CMD_EXT_DB_CQ_EN_MASK BIT(2)
+#define ERDMA_CMD_EXT_DB_RQ_EN_MASK BIT(1)
+#define ERDMA_CMD_EXT_DB_SQ_EN_MASK BIT(0)
+
+struct erdma_cmdq_ext_db_req {
+ u64 hdr;
+ u32 cfg;
+ u16 rdb_off;
+ u16 sdb_off;
+ u16 rsvd0;
+ u16 cdb_off;
+ u32 rsvd1[3];
+};
+
+/* alloc db response qword 0 definition */
+#define ERDMA_CMD_ALLOC_DB_RESP_RDB_MASK GENMASK_ULL(63, 48)
+#define ERDMA_CMD_ALLOC_DB_RESP_CDB_MASK GENMASK_ULL(47, 32)
+#define ERDMA_CMD_ALLOC_DB_RESP_SDB_MASK GENMASK_ULL(15, 0)
+
+/* create_cq cfg0 */
+#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
+#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
+#define ERDMA_CMD_CREATE_CQ_CQN_MASK GENMASK(19, 0)
+
+/* create_cq cfg1 */
+#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
+#define ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK BIT(15)
+#define ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK BIT(11)
+#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
+
+/* create_cq cfg2 */
+#define ERDMA_CMD_CREATE_CQ_DB_CFG_MASK GENMASK(15, 0)
+
+struct erdma_cmdq_create_cq_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 qbuf_addr_l;
+ u32 qbuf_addr_h;
+ u32 cfg1;
+ u64 cq_dbrec_dma;
+ u32 first_page_offset;
+ u32 cfg2;
+};
+
+/* regmr/deregmr cfg0 */
+#define ERDMA_CMD_MR_VALID_MASK BIT(31)
+#define ERDMA_CMD_MR_VERSION_MASK GENMASK(30, 28)
+#define ERDMA_CMD_MR_KEY_MASK GENMASK(27, 20)
+#define ERDMA_CMD_MR_MPT_IDX_MASK GENMASK(19, 0)
+
+/* regmr cfg1 */
+#define ERDMA_CMD_REGMR_PD_MASK GENMASK(31, 12)
+#define ERDMA_CMD_REGMR_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_CMD_REGMR_RIGHT_MASK GENMASK(5, 1)
+
+/* regmr cfg2 */
+#define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27)
+#define ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK GENMASK(26, 24)
+#define ERDMA_CMD_REGMR_MTT_LEVEL_MASK GENMASK(21, 20)
+#define ERDMA_CMD_REGMR_MTT_CNT_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_reg_mr_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u64 start_va;
+ u32 size;
+ u32 cfg2;
+ union {
+ u64 phy_addr[4];
+ struct {
+ u64 rsvd;
+ u32 size_h;
+ u32 mtt_cnt_h;
+ };
+ };
+};
+
+struct erdma_cmdq_dereg_mr_req {
+ u64 hdr;
+ u32 cfg;
+};
+
+/* create_av cfg0 */
+#define ERDMA_CMD_CREATE_AV_FL_MASK GENMASK(19, 0)
+#define ERDMA_CMD_CREATE_AV_NTYPE_MASK BIT(20)
+
+struct erdma_av_cfg {
+ u32 cfg0;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 sl;
+ u8 rsvd;
+ u16 udp_sport;
+ u16 sgid_index;
+ u8 dmac[ETH_ALEN];
+ u8 padding[2];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+};
+
+struct erdma_cmdq_create_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+ struct erdma_av_cfg av_cfg;
+};
+
+struct erdma_cmdq_destroy_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+};
+
+/* modify qp cfg */
+#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
+#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
+#define ERDMA_CMD_MODIFY_QP_QPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_modify_qp_req {
+ u64 hdr;
+ u32 cfg;
+ u32 cookie;
+ __be32 dip;
+ __be32 sip;
+ __be16 sport;
+ __be16 dport;
+ u32 send_nxt;
+ u32 recv_nxt;
+};
+
+/* modify qp cfg1 for roce device */
+#define ERDMA_CMD_MODIFY_QP_DQPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_mod_qp_req_rocev2 {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 attr_mask;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ struct erdma_av_cfg av_cfg;
+};
+
+/* query qp response mask */
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK GENMASK_ULL(23, 0)
+#define ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK GENMASK_ULL(47, 24)
+#define ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK GENMASK_ULL(55, 48)
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK GENMASK_ULL(56, 56)
+
+struct erdma_cmdq_query_qp_req_rocev2 {
+ u64 hdr;
+ u32 qpn;
+};
+
+enum erdma_qp_type {
+ ERDMA_QPT_RC = 0,
+ ERDMA_QPT_UD = 1,
+};
+
+/* create qp cfg0 */
+#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
+
+/* create qp cfg1 */
+#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
+
+/* create qp cfg2 */
+#define ERDMA_CMD_CREATE_QP_TYPE_MASK GENMASK(3, 0)
+
+/* create qp cqn_mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
+#define ERDMA_CMD_CREATE_QP_DB_CFG_MASK BIT(25)
+#define ERDMA_CMD_CREATE_QP_CQN_MASK GENMASK(23, 0)
+
+/* create qp mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK GENMASK(31, 12)
+#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
+#define ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK BIT(0)
+
+/* create qp db cfg */
+#define ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK GENMASK(31, 16)
+#define ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK GENMASK(15, 0)
+
+#define ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK GENMASK_ULL(31, 0)
+
+struct erdma_cmdq_create_qp_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 sq_cqn_mtt_cfg;
+ u32 rq_cqn_mtt_cfg;
+ u64 sq_buf_addr;
+ u64 rq_buf_addr;
+ u32 sq_mtt_cfg;
+ u32 rq_mtt_cfg;
+ u64 sq_dbrec_dma;
+ u64 rq_dbrec_dma;
+
+ u64 sq_mtt_entry[3];
+ u64 rq_mtt_entry[3];
+
+ u32 db_cfg;
+ u32 cfg2;
+};
+
+struct erdma_cmdq_destroy_qp_req {
+ u64 hdr;
+ u32 qpn;
+};
+
+struct erdma_cmdq_reflush_req {
+ u64 hdr;
+ u32 qpn;
+ u32 sq_pi;
+ u32 rq_pi;
+};
+
+#define ERDMA_HW_RESP_SIZE 256
+
+struct erdma_cmdq_query_req {
+ u64 hdr;
+ u32 rsvd;
+ u32 index;
+
+ u64 target_addr;
+ u32 target_length;
+};
+
+#define ERDMA_HW_RESP_MAGIC 0x5566
+
+struct erdma_cmdq_query_resp_hdr {
+ u16 magic;
+ u8 ver;
+ u8 length;
+
+ u32 index;
+ u32 rsvd[2];
+};
+
+struct erdma_cmdq_query_stats_resp {
+ struct erdma_cmdq_query_resp_hdr hdr;
+
+ u64 tx_req_cnt;
+ u64 tx_packets_cnt;
+ u64 tx_bytes_cnt;
+ u64 tx_drop_packets_cnt;
+ u64 tx_bps_meter_drop_packets_cnt;
+ u64 tx_pps_meter_drop_packets_cnt;
+ u64 rx_packets_cnt;
+ u64 rx_bytes_cnt;
+ u64 rx_drop_packets_cnt;
+ u64 rx_bps_meter_drop_packets_cnt;
+ u64 rx_pps_meter_drop_packets_cnt;
+};
+
+enum erdma_network_type {
+ ERDMA_NETWORK_TYPE_IPV4 = 0,
+ ERDMA_NETWORK_TYPE_IPV6 = 1,
+};
+
+enum erdma_set_gid_op {
+ ERDMA_SET_GID_OP_ADD = 0,
+ ERDMA_SET_GID_OP_DEL = 1,
+};
+
+/* set gid cfg */
+#define ERDMA_CMD_SET_GID_SGID_IDX_MASK GENMASK(15, 0)
+#define ERDMA_CMD_SET_GID_NTYPE_MASK BIT(16)
+#define ERDMA_CMD_SET_GID_OP_MASK BIT(31)
+
+struct erdma_cmdq_set_gid_req {
+ u64 hdr;
+ u32 cfg;
+ u8 gid[ERDMA_ROCEV2_GID_SIZE];
+};
+
+/* cap qword 0 definition */
+#define ERDMA_CMD_DEV_CAP_MAX_GID_MASK GENMASK_ULL(51, 48)
+#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
+#define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
+#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_AH_MASK GENMASK_ULL(15, 8)
+#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
+
+/* cap qword 1 definition */
+#define ERDMA_CMD_DEV_CAP_DMA_LOCAL_KEY_MASK GENMASK_ULL(63, 32)
+#define ERDMA_CMD_DEV_CAP_DEFAULT_CC_MASK GENMASK_ULL(31, 28)
+#define ERDMA_CMD_DEV_CAP_QBLOCK_MASK GENMASK_ULL(27, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_MW_MASK GENMASK_ULL(7, 0)
+
+#define ERDMA_NQP_PER_QBLOCK 1024
+
+enum {
+ ERDMA_DEV_CAP_FLAGS_ATOMIC = 1 << 7,
+ ERDMA_DEV_CAP_FLAGS_MTT_VA = 1 << 5,
+ ERDMA_DEV_CAP_FLAGS_EXTEND_DB = 1 << 3,
+};
+
+#define ERDMA_CMD_INFO0_FW_VER_MASK GENMASK_ULL(31, 0)
+
+/* CQE hdr */
+#define ERDMA_CQE_HDR_OWNER_MASK BIT(31)
+#define ERDMA_CQE_HDR_OPCODE_MASK GENMASK(23, 16)
+#define ERDMA_CQE_HDR_QTYPE_MASK GENMASK(15, 8)
+#define ERDMA_CQE_HDR_SYNDROME_MASK GENMASK(7, 0)
+
+#define ERDMA_CQE_QTYPE_SQ 0
+#define ERDMA_CQE_QTYPE_RQ 1
+#define ERDMA_CQE_QTYPE_CMDQ 2
+
+#define ERDMA_CQE_NTYPE_MASK BIT(31)
+#define ERDMA_CQE_SL_MASK GENMASK(27, 20)
+#define ERDMA_CQE_SQPN_MASK GENMASK(19, 0)
+
+struct erdma_cqe {
+ __be32 hdr;
+ __be32 qe_idx;
+ __be32 qpn;
+ union {
+ __le32 imm_data;
+ __be32 inv_rkey;
+ };
+ __be32 size;
+ union {
+ struct {
+ __be32 rsvd[3];
+ } rc;
+
+ struct {
+ __be32 rsvd[2];
+ __be32 info;
+ } ud;
+ };
+};
+
+struct erdma_sge {
+ __aligned_le64 addr;
+ __le32 length;
+ __le32 key;
+};
+
+/* Receive Queue Element */
+struct erdma_rqe {
+ __le16 qe_idx;
+ __le16 rsvd0;
+ __le32 qpn;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ __le64 to;
+ __le32 length;
+ __le32 stag;
+};
+
+/* SQE */
+#define ERDMA_SQE_HDR_SGL_LEN_MASK GENMASK_ULL(63, 56)
+#define ERDMA_SQE_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_SQE_HDR_QPN_MASK GENMASK_ULL(51, 32)
+#define ERDMA_SQE_HDR_OPCODE_MASK GENMASK_ULL(31, 27)
+#define ERDMA_SQE_HDR_DWQE_MASK BIT_ULL(26)
+#define ERDMA_SQE_HDR_INLINE_MASK BIT_ULL(25)
+#define ERDMA_SQE_HDR_FENCE_MASK BIT_ULL(24)
+#define ERDMA_SQE_HDR_SE_MASK BIT_ULL(23)
+#define ERDMA_SQE_HDR_CE_MASK BIT_ULL(22)
+#define ERDMA_SQE_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+/* REG MR attrs */
+#define ERDMA_SQE_MR_ACCESS_MASK GENMASK(5, 1)
+#define ERDMA_SQE_MR_MTT_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_SQE_MR_MTT_CNT_MASK GENMASK(31, 12)
+
+struct erdma_write_sqe {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+
+ __le32 rsvd;
+
+ struct erdma_sge sgl[];
+};
+
+struct erdma_send_sqe_rc {
+ __le64 hdr;
+ union {
+ __be32 imm_data;
+ __le32 invalid_stag;
+ };
+
+ __le32 length;
+ struct erdma_sge sgl[];
+};
+
+struct erdma_send_sqe_ud {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+ __le32 qkey;
+ __le32 dst_qpn;
+ __le32 ahn;
+ __le32 rsvd;
+ struct erdma_sge sgl[];
+};
+
+struct erdma_readreq_sqe {
+ __le64 hdr;
+ __le32 invalid_stag;
+ __le32 length;
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+ __le32 rsvd;
+};
+
+struct erdma_atomic_sqe {
+ __le64 hdr;
+ __le64 rsvd;
+ __le64 fetchadd_swap_data;
+ __le64 cmp_data;
+
+ struct erdma_sge remote;
+ struct erdma_sge sgl;
+};
+
+struct erdma_reg_mr_sqe {
+ __le64 hdr;
+ __le64 addr;
+ __le32 length;
+ __le32 stag;
+ __le32 attrs;
+ __le32 rsvd;
+};
+
+/* EQ related. */
+#define ERDMA_DEFAULT_EQ_DEPTH 4096
+
+/* ceqe */
+#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
+#define ERDMA_CEQE_HDR_PI_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CEQE_HDR_O_MASK BIT_ULL(31)
+#define ERDMA_CEQE_HDR_CQN_MASK GENMASK_ULL(19, 0)
+
+/* aeqe */
+#define ERDMA_AEQE_HDR_O_MASK BIT(31)
+#define ERDMA_AEQE_HDR_TYPE_MASK GENMASK(23, 16)
+#define ERDMA_AEQE_HDR_SUBTYPE_MASK GENMASK(7, 0)
+
+#define ERDMA_AE_TYPE_QP_FATAL_EVENT 0
+#define ERDMA_AE_TYPE_QP_ERQ_ERR_EVENT 1
+#define ERDMA_AE_TYPE_ACC_ERR_EVENT 2
+#define ERDMA_AE_TYPE_CQ_ERR 3
+#define ERDMA_AE_TYPE_OTHER_ERROR 4
+
+struct erdma_aeqe {
+ __le32 hdr;
+ __le32 event_data0;
+ __le32 event_data1;
+ __le32 rsvd;
+};
+
+enum erdma_opcode {
+ ERDMA_OP_WRITE = 0,
+ ERDMA_OP_READ = 1,
+ ERDMA_OP_SEND = 2,
+ ERDMA_OP_SEND_WITH_IMM = 3,
+
+ ERDMA_OP_RECEIVE = 4,
+ ERDMA_OP_RECV_IMM = 5,
+ ERDMA_OP_RECV_INV = 6,
+
+ ERDMA_OP_RSVD0 = 7,
+ ERDMA_OP_RSVD1 = 8,
+ ERDMA_OP_WRITE_WITH_IMM = 9,
+
+ ERDMA_OP_RSVD2 = 10,
+ ERDMA_OP_RSVD3 = 11,
+
+ ERDMA_OP_RSP_SEND_IMM = 12,
+ ERDMA_OP_SEND_WITH_INV = 13,
+
+ ERDMA_OP_REG_MR = 14,
+ ERDMA_OP_LOCAL_INV = 15,
+ ERDMA_OP_READ_WITH_INV = 16,
+ ERDMA_OP_ATOMIC_CAS = 17,
+ ERDMA_OP_ATOMIC_FAA = 18,
+ ERDMA_NUM_OPCODES = 19,
+ ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
+};
+
+enum erdma_wc_status {
+ ERDMA_WC_SUCCESS = 0,
+ ERDMA_WC_GENERAL_ERR = 1,
+ ERDMA_WC_RECV_WQE_FORMAT_ERR = 2,
+ ERDMA_WC_RECV_STAG_INVALID_ERR = 3,
+ ERDMA_WC_RECV_ADDR_VIOLATION_ERR = 4,
+ ERDMA_WC_RECV_RIGHT_VIOLATION_ERR = 5,
+ ERDMA_WC_RECV_PDID_ERR = 6,
+ ERDMA_WC_RECV_WARRPING_ERR = 7,
+ ERDMA_WC_SEND_WQE_FORMAT_ERR = 8,
+ ERDMA_WC_SEND_WQE_ORD_EXCEED = 9,
+ ERDMA_WC_SEND_STAG_INVALID_ERR = 10,
+ ERDMA_WC_SEND_ADDR_VIOLATION_ERR = 11,
+ ERDMA_WC_SEND_RIGHT_VIOLATION_ERR = 12,
+ ERDMA_WC_SEND_PDID_ERR = 13,
+ ERDMA_WC_SEND_WARRPING_ERR = 14,
+ ERDMA_WC_FLUSH_ERR = 15,
+ ERDMA_WC_RETRY_EXC_ERR = 16,
+ ERDMA_NUM_WC_STATUS
+};
+
+enum erdma_vendor_err {
+ ERDMA_WC_VENDOR_NO_ERR = 0,
+ ERDMA_WC_VENDOR_INVALID_RQE = 1,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG = 2,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION = 3,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR = 4,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD = 5,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR = 6,
+ ERDMA_WC_VENDOR_INVALID_SQE = 0x20,
+ ERDMA_WC_VENDOR_ZERO_ORD = 0x21,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG = 0x30,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION = 0x31,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR = 0x32,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD = 0x33,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR = 0x34
+};
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
new file mode 100644
index 000000000000..f35b30235018
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/module.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
+MODULE_DESCRIPTION("Alibaba elasticRDMA adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *arg)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(arg);
+ struct erdma_dev *dev = container_of(nb, struct erdma_dev, netdev_nb);
+
+ if (dev->netdev == NULL || dev->netdev != netdev)
+ goto done;
+
+ switch (event) {
+ case NETDEV_CHANGEMTU:
+ if (dev->mtu != netdev->mtu) {
+ erdma_set_mtu(dev, netdev->mtu);
+ dev->mtu = netdev->mtu;
+ }
+ break;
+ case NETDEV_REGISTER:
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_CHANGE:
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_OK;
+}
+
+static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
+{
+ struct net_device *netdev;
+ int ret = -EPROBE_DEFER;
+
+ /* Already binded to a net_device, so we skip. */
+ if (dev->netdev)
+ return 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev) {
+ /*
+ * In erdma, the paired netdev and ibdev should have the same
+ * MAC address. erdma can get the value from its PCIe bar
+ * registers. Since erdma can not get the paired netdev
+ * reference directly, we do a traverse here to get the paired
+ * netdev.
+ */
+ if (ether_addr_equal_unaligned(netdev->perm_addr,
+ dev->attrs.peer_addr)) {
+ ret = ib_device_set_netdev(&dev->ibdev, netdev, 1);
+ if (ret) {
+ rtnl_unlock();
+ ibdev_warn(&dev->ibdev,
+ "failed (%d) to link netdev", ret);
+ return ret;
+ }
+
+ dev->netdev = netdev;
+ break;
+ }
+ }
+
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int erdma_device_register(struct erdma_dev *dev)
+{
+ struct ib_device *ibdev = &dev->ibdev;
+ int ret;
+
+ ret = erdma_enum_and_get_netdev(dev);
+ if (ret)
+ return ret;
+
+ dev->mtu = dev->netdev->mtu;
+ addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
+
+ ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "ib_register_device failed: ret = %d\n", ret);
+ return ret;
+ }
+
+ dev->netdev_nb.notifier_call = erdma_netdev_event;
+ ret = register_netdevice_notifier(&dev->netdev_nb);
+ if (ret) {
+ ibdev_err(&dev->ibdev, "failed to register notifier.\n");
+ ib_unregister_device(ibdev);
+ }
+
+ return ret;
+}
+
+static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
+{
+ struct erdma_dev *dev = data;
+
+ erdma_cmdq_completion_handler(&dev->cmdq);
+ erdma_aeq_event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static int erdma_request_vectors(struct erdma_dev *dev)
+{
+ int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(dev->pdev, 1, expect_irq_num, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "request irq vectors failed(%d)\n",
+ ret);
+ return ret;
+ }
+ dev->attrs.irq_num = ret;
+
+ return 0;
+}
+
+static int erdma_comm_irq_init(struct erdma_dev *dev)
+{
+ snprintf(dev->comm_irq.name, ERDMA_IRQNAME_SIZE, "erdma-common@pci:%s",
+ pci_name(dev->pdev));
+ dev->comm_irq.msix_vector =
+ pci_irq_vector(dev->pdev, ERDMA_MSIX_VECTOR_CMDQ);
+
+ cpumask_set_cpu(cpumask_first(cpumask_of_pcibus(dev->pdev->bus)),
+ &dev->comm_irq.affinity_hint_mask);
+ irq_set_affinity_hint(dev->comm_irq.msix_vector,
+ &dev->comm_irq.affinity_hint_mask);
+
+ return request_irq(dev->comm_irq.msix_vector, erdma_comm_irq_handler, 0,
+ dev->comm_irq.name, dev);
+}
+
+static void erdma_comm_irq_uninit(struct erdma_dev *dev)
+{
+ irq_set_affinity_hint(dev->comm_irq.msix_vector, NULL);
+ free_irq(dev->comm_irq.msix_vector, dev);
+}
+
+static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
+{
+ int ret;
+
+ dev->proto = erdma_reg_read32(dev, ERDMA_REGS_DEV_PROTO_REG);
+
+ dev->resp_pool = dma_pool_create("erdma_resp_pool", &pdev->dev,
+ ERDMA_HW_RESP_SIZE, ERDMA_HW_RESP_SIZE,
+ 0);
+ if (!dev->resp_pool)
+ return -ENOMEM;
+
+ dev->db_pool = dma_pool_create("erdma_db_pool", &pdev->dev,
+ ERDMA_DB_SIZE, ERDMA_DB_SIZE, 0);
+ if (!dev->db_pool) {
+ ret = -ENOMEM;
+ goto destroy_resp_pool;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(ERDMA_PCI_WIDTH));
+ if (ret)
+ goto destroy_db_pool;
+
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+ return 0;
+
+destroy_db_pool:
+ dma_pool_destroy(dev->db_pool);
+
+destroy_resp_pool:
+ dma_pool_destroy(dev->resp_pool);
+
+ return ret;
+}
+
+static void erdma_device_uninit(struct erdma_dev *dev)
+{
+ dma_pool_destroy(dev->db_pool);
+ dma_pool_destroy(dev->resp_pool);
+}
+
+static void erdma_hw_reset(struct erdma_dev *dev)
+{
+ u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1);
+
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
+}
+
+static int erdma_wait_hw_init_done(struct erdma_dev *dev)
+{
+ int i;
+
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG,
+ FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1));
+
+ for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
+ if (erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
+ ERDMA_REG_DEV_ST_INIT_DONE_MASK))
+ break;
+
+ msleep(ERDMA_REG_ACCESS_WAIT_MS);
+ }
+
+ if (i == ERDMA_WAIT_DEV_DONE_CNT) {
+ dev_err(&dev->pdev->dev, "wait init done failed.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id erdma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) },
+ {}
+};
+
+static int erdma_probe_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev;
+ int bars, err;
+ u32 version;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed(%d)\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ dev = ib_alloc_device(erdma_dev, ibdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "ib_alloc_device failed\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ dev->pdev = pdev;
+ dev->attrs.numa_node = dev_to_node(&pdev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (bars != ERDMA_BAR_MASK || err) {
+ err = err ? err : -EINVAL;
+ goto err_ib_device_release;
+ }
+
+ dev->func_bar_addr = pci_resource_start(pdev, ERDMA_FUNC_BAR);
+ dev->func_bar_len = pci_resource_len(pdev, ERDMA_FUNC_BAR);
+
+ dev->func_bar =
+ devm_ioremap(&pdev->dev, dev->func_bar_addr, dev->func_bar_len);
+ if (!dev->func_bar) {
+ dev_err(&pdev->dev, "devm_ioremap failed.\n");
+ err = -EFAULT;
+ goto err_release_bars;
+ }
+
+ version = erdma_reg_read32(dev, ERDMA_REGS_VERSION_REG);
+ if (version == 0) {
+ /* we knows that it is a non-functional function. */
+ err = -ENODEV;
+ goto err_iounmap_func_bar;
+ }
+
+ err = erdma_device_init(dev, pdev);
+ if (err)
+ goto err_iounmap_func_bar;
+
+ err = erdma_request_vectors(dev);
+ if (err)
+ goto err_uninit_device;
+
+ err = erdma_comm_irq_init(dev);
+ if (err)
+ goto err_free_vectors;
+
+ err = erdma_aeq_init(dev);
+ if (err)
+ goto err_uninit_comm_irq;
+
+ err = erdma_cmdq_init(dev);
+ if (err)
+ goto err_uninit_aeq;
+
+ err = erdma_wait_hw_init_done(dev);
+ if (err)
+ goto err_uninit_cmdq;
+
+ err = erdma_ceqs_init(dev);
+ if (err)
+ goto err_reset_hw;
+
+ erdma_finish_cmdq_init(dev);
+
+ return 0;
+
+err_reset_hw:
+ erdma_hw_reset(dev);
+
+err_uninit_cmdq:
+ erdma_cmdq_destroy(dev);
+
+err_uninit_aeq:
+ erdma_eq_destroy(dev, &dev->aeq);
+
+err_uninit_comm_irq:
+ erdma_comm_irq_uninit(dev);
+
+err_free_vectors:
+ pci_free_irq_vectors(dev->pdev);
+
+err_uninit_device:
+ erdma_device_uninit(dev);
+
+err_iounmap_func_bar:
+ devm_iounmap(&pdev->dev, dev->func_bar);
+
+err_release_bars:
+ pci_release_selected_regions(pdev, bars);
+
+err_ib_device_release:
+ ib_dealloc_device(&dev->ibdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void erdma_remove_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ erdma_ceqs_uninit(dev);
+ erdma_hw_reset(dev);
+ erdma_cmdq_destroy(dev);
+ erdma_eq_destroy(dev, &dev->aeq);
+ erdma_comm_irq_uninit(dev);
+ pci_free_irq_vectors(dev->pdev);
+ erdma_device_uninit(dev);
+
+ devm_iounmap(&pdev->dev, dev->func_bar);
+ pci_release_selected_regions(pdev, ERDMA_BAR_MASK);
+
+ ib_dealloc_device(&dev->ibdev);
+
+ pci_disable_device(pdev);
+}
+
+#define ERDMA_GET_CAP(name, cap) FIELD_GET(ERDMA_CMD_DEV_CAP_##name##_MASK, cap)
+
+static int erdma_dev_attrs_init(struct erdma_dev *dev)
+{
+ int err;
+ u64 req_hdr, cap0, cap1;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_DEVICE);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1, true);
+ if (err)
+ return err;
+
+ dev->attrs.max_cqe = 1 << ERDMA_GET_CAP(MAX_CQE, cap0);
+ dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
+ dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
+ dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
+ dev->attrs.max_gid = 1 << ERDMA_GET_CAP(MAX_GID, cap0);
+ dev->attrs.max_ah = 1 << ERDMA_GET_CAP(MAX_AH, cap0);
+ dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
+ dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
+ dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
+ dev->attrs.max_mr = dev->attrs.max_qp << 1;
+ dev->attrs.max_cq = dev->attrs.max_qp << 1;
+ dev->attrs.cap_flags = ERDMA_GET_CAP(FLAGS, cap0);
+
+ dev->attrs.max_send_wr = ERDMA_MAX_SEND_WR;
+ dev->attrs.max_ord = ERDMA_MAX_ORD;
+ dev->attrs.max_ird = ERDMA_MAX_IRD;
+ dev->attrs.max_send_sge = ERDMA_MAX_SEND_SGE;
+ dev->attrs.max_recv_sge = ERDMA_MAX_RECV_SGE;
+ dev->attrs.max_sge_rd = ERDMA_MAX_SGE_RD;
+ dev->attrs.max_pd = ERDMA_MAX_PD;
+
+ dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
+ dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+ dev->res_cb[ERDMA_RES_TYPE_AH].max_cap = dev->attrs.max_ah;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_QUERY_FW_INFO);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1, true);
+ if (!err)
+ dev->attrs.fw_version =
+ FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
+
+ return err;
+}
+
+static int erdma_device_config(struct erdma_dev *dev)
+{
+ struct erdma_cmdq_config_device_req req = {};
+
+ if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_EXTEND_DB))
+ return 0;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CONF_DEVICE);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK, PAGE_SHIFT) |
+ FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK, 1);
+
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+static int erdma_res_cb_init(struct erdma_dev *dev)
+{
+ int i, j;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++) {
+ dev->res_cb[i].next_alloc_idx = 1;
+ spin_lock_init(&dev->res_cb[i].lock);
+ dev->res_cb[i].bitmap =
+ bitmap_zalloc(dev->res_cb[i].max_cap, GFP_KERNEL);
+ if (!dev->res_cb[i].bitmap)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ bitmap_free(dev->res_cb[j].bitmap);
+
+ return -ENOMEM;
+}
+
+static void erdma_res_cb_free(struct erdma_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++)
+ bitmap_free(dev->res_cb[i].bitmap);
+}
+
+static const struct ib_device_ops erdma_device_ops_rocev2 = {
+ .get_link_layer = erdma_get_link_layer,
+ .add_gid = erdma_add_gid,
+ .del_gid = erdma_del_gid,
+ .query_pkey = erdma_query_pkey,
+ .create_ah = erdma_create_ah,
+ .destroy_ah = erdma_destroy_ah,
+ .query_ah = erdma_query_ah,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, erdma_ah, ibah),
+};
+
+static const struct ib_device_ops erdma_device_ops_iwarp = {
+ .iw_accept = erdma_accept,
+ .iw_add_ref = erdma_qp_get_ref,
+ .iw_connect = erdma_connect,
+ .iw_create_listen = erdma_create_listen,
+ .iw_destroy_listen = erdma_destroy_listen,
+ .iw_get_qp = erdma_get_ibqp,
+ .iw_reject = erdma_reject,
+ .iw_rem_ref = erdma_qp_put_ref,
+};
+
+static const struct ib_device_ops erdma_device_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_ERDMA,
+ .uverbs_abi_ver = ERDMA_ABI_VERSION,
+
+ .alloc_hw_port_stats = erdma_alloc_hw_port_stats,
+ .alloc_mr = erdma_ib_alloc_mr,
+ .alloc_pd = erdma_alloc_pd,
+ .alloc_ucontext = erdma_alloc_ucontext,
+ .create_cq = erdma_create_cq,
+ .create_qp = erdma_create_qp,
+ .dealloc_pd = erdma_dealloc_pd,
+ .dealloc_ucontext = erdma_dealloc_ucontext,
+ .dereg_mr = erdma_dereg_mr,
+ .destroy_cq = erdma_destroy_cq,
+ .destroy_qp = erdma_destroy_qp,
+ .disassociate_ucontext = erdma_disassociate_ucontext,
+ .get_dma_mr = erdma_get_dma_mr,
+ .get_hw_stats = erdma_get_hw_stats,
+ .get_port_immutable = erdma_get_port_immutable,
+ .map_mr_sg = erdma_map_mr_sg,
+ .mmap = erdma_mmap,
+ .mmap_free = erdma_mmap_free,
+ .post_recv = erdma_post_recv,
+ .post_send = erdma_post_send,
+ .poll_cq = erdma_poll_cq,
+ .query_device = erdma_query_device,
+ .query_gid = erdma_query_gid,
+ .query_port = erdma_query_port,
+ .query_qp = erdma_query_qp,
+ .req_notify_cq = erdma_req_notify_cq,
+ .reg_user_mr = erdma_reg_user_mr,
+ .modify_qp = erdma_modify_qp,
+
+ INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, erdma_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_qp, erdma_qp, ibqp),
+};
+
+static int erdma_ib_device_add(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+ struct ib_device *ibdev = &dev->ibdev;
+ u64 mac;
+ int ret;
+
+ ret = erdma_dev_attrs_init(dev);
+ if (ret)
+ return ret;
+
+ ret = erdma_device_config(dev);
+ if (ret)
+ return ret;
+
+ if (erdma_device_iwarp(dev)) {
+ ibdev->node_type = RDMA_NODE_RNIC;
+ ib_set_device_ops(ibdev, &erdma_device_ops_iwarp);
+ } else {
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ib_set_device_ops(ibdev, &erdma_device_ops_rocev2);
+ }
+
+ memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
+
+ /*
+ * Current model (one-to-one device association):
+ * One ERDMA device per net_device or, equivalently,
+ * per physical port.
+ */
+ ibdev->phys_port_cnt = 1;
+ ibdev->num_comp_vectors = dev->attrs.irq_num - 1;
+
+ ib_set_device_ops(ibdev, &erdma_device_ops);
+
+ INIT_LIST_HEAD(&dev->cep_list);
+
+ spin_lock_init(&dev->lock);
+ xa_init_flags(&dev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&dev->cq_xa, XA_FLAGS_ALLOC1);
+ dev->next_alloc_cqn = 1;
+ dev->next_alloc_qpn = 1;
+
+ ret = erdma_res_cb_init(dev);
+ if (ret)
+ return ret;
+
+ atomic_set(&dev->num_ctx, 0);
+
+ mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
+ mac |= (u64)erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_H_REG) << 32;
+
+ u64_to_ether_addr(mac, dev->attrs.peer_addr);
+
+ dev->reflush_wq = alloc_workqueue("erdma-reflush-wq", WQ_UNBOUND,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!dev->reflush_wq) {
+ ret = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
+
+ ret = erdma_device_register(dev);
+ if (ret)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ destroy_workqueue(dev->reflush_wq);
+err_alloc_workqueue:
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+
+ erdma_res_cb_free(dev);
+
+ return ret;
+}
+
+static void erdma_ib_device_remove(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ unregister_netdevice_notifier(&dev->netdev_nb);
+ ib_unregister_device(&dev->ibdev);
+
+ destroy_workqueue(dev->reflush_wq);
+ erdma_res_cb_free(dev);
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+}
+
+static int erdma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = erdma_probe_dev(pdev);
+ if (ret)
+ return ret;
+
+ ret = erdma_ib_device_add(pdev);
+ if (ret) {
+ erdma_remove_dev(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void erdma_remove(struct pci_dev *pdev)
+{
+ erdma_ib_device_remove(pdev);
+ erdma_remove_dev(pdev);
+}
+
+static struct pci_driver erdma_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = erdma_pci_tbl,
+ .probe = erdma_probe,
+ .remove = erdma_remove
+};
+
+MODULE_DEVICE_TABLE(pci, erdma_pci_tbl);
+
+static __init int erdma_init_module(void)
+{
+ int ret;
+
+ ret = erdma_cm_init();
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&erdma_pci_driver);
+ if (ret)
+ erdma_cm_exit();
+
+ return ret;
+}
+
+static void __exit erdma_exit_module(void)
+{
+ pci_unregister_driver(&erdma_pci_driver);
+
+ erdma_cm_exit();
+}
+
+module_init(erdma_init_module);
+module_exit(erdma_exit_module);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
new file mode 100644
index 000000000000..25f6c49aec77
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2021, Alibaba Group */
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+void erdma_qp_llp_close(struct erdma_qp *qp)
+{
+ struct erdma_mod_qp_params_iwarp params;
+
+ down_write(&qp->state_lock);
+
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_RTS:
+ case ERDMA_QPS_IWARP_RTR:
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_TERMINATE:
+ params.state = ERDMA_QPS_IWARP_CLOSING;
+ erdma_modify_qp_state_iwarp(qp, &params, ERDMA_QPA_IWARP_STATE);
+ break;
+ case ERDMA_QPS_IWARP_CLOSING:
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ break;
+ default:
+ break;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+
+ up_write(&qp->state_lock);
+}
+
+struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
+{
+ struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
+
+ if (qp)
+ return &qp->ibqp;
+
+ return NULL;
+}
+
+static int
+erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
+{
+ int ret;
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+ struct tcp_sock *tp;
+ struct erdma_cep *cep = qp->cep;
+ struct sockaddr_storage local_addr, remote_addr;
+
+ if (!(mask & ERDMA_QPA_IWARP_LLP_HANDLE))
+ return -EINVAL;
+
+ if (!(mask & ERDMA_QPA_IWARP_MPA))
+ return -EINVAL;
+
+ if (!(mask & ERDMA_QPA_IWARP_CC))
+ params->cc = qp->attrs.cc;
+
+ ret = getname_local(cep->sock, &local_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = getname_peer(cep->sock, &remote_addr);
+ if (ret < 0)
+ return ret;
+
+ tp = tcp_sk(qp->cep->sock->sk);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, params->cc) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ req.cookie = be32_to_cpu(cep->mpa.ext_data.cookie);
+ req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
+ req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
+ req.dport = to_sockaddr_in(remote_addr).sin_port;
+ req.sport = to_sockaddr_in(local_addr).sin_port;
+
+ req.send_nxt = tp->snd_nxt;
+ /* rsvd tcp seq for mpa-rsp in server. */
+ if (params->qp_type == ERDMA_QP_PASSIVE)
+ req.send_nxt += MPA_DEFAULT_HDR_LEN + params->pd_len;
+ req.recv_nxt = tp->rcv_nxt;
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ if (mask & ERDMA_QPA_IWARP_IRD)
+ qp->attrs.irq_size = params->irq_size;
+
+ if (mask & ERDMA_QPA_IWARP_ORD)
+ qp->attrs.orq_size = params->orq_size;
+
+ if (mask & ERDMA_QPA_IWARP_CC)
+ qp->attrs.cc = params->cc;
+
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_RTS;
+
+ return 0;
+}
+
+static int
+erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
+{
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+ int ret;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ qp->attrs.iwarp.state = params->state;
+
+ return 0;
+}
+
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask)
+{
+ bool need_reflush = false;
+ int drop_conn, ret = 0;
+
+ if (!mask)
+ return 0;
+
+ if (!(mask & ERDMA_QPA_IWARP_STATE))
+ return 0;
+
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_RTR:
+ if (params->state == ERDMA_QPS_IWARP_RTS) {
+ ret = erdma_modify_qp_state_to_rts(qp, params, mask);
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ need_reflush = true;
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
+ }
+ break;
+ case ERDMA_QPS_IWARP_RTS:
+ drop_conn = 0;
+
+ if (params->state == ERDMA_QPS_IWARP_CLOSING ||
+ params->state == ERDMA_QPS_IWARP_TERMINATE ||
+ params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
+ drop_conn = 1;
+ need_reflush = true;
+ }
+
+ if (drop_conn)
+ erdma_qp_cm_drop(qp);
+
+ break;
+ case ERDMA_QPS_IWARP_TERMINATE:
+ if (params->state == ERDMA_QPS_IWARP_ERROR)
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ break;
+ case ERDMA_QPS_IWARP_CLOSING:
+ if (params->state == ERDMA_QPS_IWARP_IDLE) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ } else if (params->state != ERDMA_QPS_IWARP_CLOSING) {
+ return -ECONNABORTED;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (need_reflush && !ret && rdma_is_kernel_res(&qp->ibqp.res)) {
+ qp->flags |= ERDMA_QP_IN_FLUSHING;
+ mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork,
+ usecs_to_jiffies(100));
+ }
+
+ return ret;
+}
+
+static int modify_qp_cmd_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ enum erdma_qpa_mask_rocev2 attr_mask)
+{
+ struct erdma_cmdq_mod_qp_req_rocev2 req;
+
+ memset(&req, 0, sizeof(req));
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK,
+ params->state);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_DQPN_MASK,
+ params->dst_qpn);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ req.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ erdma_set_av_cfg(&req.av_cfg, &params->av);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_SQ_PSN)
+ req.sq_psn = params->sq_psn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_RQ_PSN)
+ req.rq_psn = params->rq_psn;
+
+ req.attr_mask = attr_mask;
+
+ return erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL,
+ NULL, true);
+}
+
+static void erdma_reset_qp(struct erdma_qp *qp)
+{
+ qp->kern_qp.sq_pi = 0;
+ qp->kern_qp.sq_ci = 0;
+ qp->kern_qp.rq_pi = 0;
+ qp->kern_qp.rq_ci = 0;
+ memset(qp->kern_qp.swr_tbl, 0, qp->attrs.sq_size * sizeof(u64));
+ memset(qp->kern_qp.rwr_tbl, 0, qp->attrs.rq_size * sizeof(u64));
+ memset(qp->kern_qp.sq_buf, 0, qp->attrs.sq_size << SQEBB_SHIFT);
+ memset(qp->kern_qp.rq_buf, 0, qp->attrs.rq_size << RQE_SHIFT);
+ erdma_remove_cqes_of_qp(&qp->scq->ibcq, QP_ID(qp));
+ if (qp->rcq != qp->scq)
+ erdma_remove_cqes_of_qp(&qp->rcq->ibcq, QP_ID(qp));
+}
+
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask)
+{
+ struct erdma_dev *dev = to_edev(qp->ibqp.device);
+ int ret;
+
+ ret = modify_qp_cmd_rocev2(qp, params, attr_mask);
+ if (ret)
+ return ret;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ qp->attrs.rocev2.state = params->state;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ qp->attrs.rocev2.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ qp->attrs.rocev2.dst_qpn = params->dst_qpn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ memcpy(&qp->attrs.rocev2.av, &params->av,
+ sizeof(struct erdma_av));
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_RESET)
+ erdma_reset_qp(qp);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_ERROR) {
+ qp->flags |= ERDMA_QP_IN_FLUSHING;
+ mod_delayed_work(dev->reflush_wq, &qp->reflush_dwork,
+ usecs_to_jiffies(100));
+ }
+
+ return 0;
+}
+
+static void erdma_qp_safe_free(struct kref *ref)
+{
+ struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
+
+ complete(&qp->safe_free);
+}
+
+void erdma_qp_put(struct erdma_qp *qp)
+{
+ WARN_ON(kref_read(&qp->ref) < 1);
+ kref_put(&qp->ref, erdma_qp_safe_free);
+}
+
+void erdma_qp_get(struct erdma_qp *qp)
+{
+ kref_get(&qp->ref);
+}
+
+static int fill_inline_data(struct erdma_qp *qp,
+ const struct ib_send_wr *send_wr, u16 wqe_idx,
+ u32 sgl_offset, __le32 *length_field)
+{
+ u32 remain_size, copy_size, data_off, bytes = 0;
+ char *data;
+ int i = 0;
+
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ while (i < send_wr->num_sge) {
+ bytes += send_wr->sg_list[i].length;
+ if (bytes > (int)ERDMA_MAX_INLINE)
+ return -EINVAL;
+
+ remain_size = send_wr->sg_list[i].length;
+ data_off = 0;
+
+ while (1) {
+ copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
+
+ memcpy(data + sgl_offset,
+ (void *)(uintptr_t)send_wr->sg_list[i].addr +
+ data_off,
+ copy_size);
+ remain_size -= copy_size;
+ data_off += copy_size;
+ sgl_offset += copy_size;
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ if (!remain_size)
+ break;
+ }
+
+ i++;
+ }
+ *length_field = cpu_to_le32(bytes);
+
+ return bytes;
+}
+
+static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
+ u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
+{
+ int i = 0;
+ u32 bytes = 0;
+ char *sgl;
+
+ if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
+ return -EINVAL;
+
+ if (sgl_offset & 0xF)
+ return -EINVAL;
+
+ while (i < send_wr->num_sge) {
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+
+ bytes += send_wr->sg_list[i].length;
+ memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
+ sizeof(struct ib_sge));
+
+ sgl_offset += sizeof(struct ib_sge);
+ i++;
+ }
+
+ *length_field = cpu_to_le32(bytes);
+ return 0;
+}
+
+static void init_send_sqe_rc(struct erdma_qp *qp, struct erdma_send_sqe_rc *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ } else if (wr->opcode == IB_WR_SEND_WITH_INV) {
+ op = ERDMA_OP_SEND_WITH_INV;
+ sqe->invalid_stag = cpu_to_le32(wr->ex.invalidate_rkey);
+ }
+
+ *hw_op = op;
+}
+
+static void init_send_sqe_ud(struct erdma_qp *qp, struct erdma_send_sqe_ud *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ const struct ib_ud_wr *uwr = ud_wr(wr);
+ struct erdma_ah *ah = to_eah(uwr->ah);
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ }
+
+ *hw_op = op;
+
+ sqe->ahn = cpu_to_le32(ah->ahn);
+ sqe->dst_qpn = cpu_to_le32(uwr->remote_qpn);
+ /* Not allowed to send control qkey */
+ if (uwr->remote_qkey & 0x80000000)
+ sqe->qkey = cpu_to_le32(qp->attrs.rocev2.qkey);
+ else
+ sqe->qkey = cpu_to_le32(uwr->remote_qkey);
+}
+
+static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
+ const struct ib_send_wr *send_wr)
+{
+ u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
+ u32 idx = *pi & (qp->attrs.sq_size - 1);
+ enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_send_sqe_rc *rc_send_sqe;
+ struct erdma_send_sqe_ud *ud_send_sqe;
+ struct erdma_atomic_sqe *atomic_sqe;
+ struct erdma_readreq_sqe *read_sqe;
+ struct erdma_reg_mr_sqe *regmr_sge;
+ struct erdma_write_sqe *write_sqe;
+ struct ib_rdma_wr *rdma_wr;
+ struct erdma_sge *sge;
+ __le32 *length_field;
+ struct erdma_mr *mr;
+ u64 wqe_hdr, *entry;
+ u32 attrs;
+ int ret;
+
+ if (qp->ibqp.qp_type != IB_QPT_RC && send_wr->opcode != IB_WR_SEND &&
+ send_wr->opcode != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+
+ entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ /* Clear the SQE header section. */
+ *entry = 0;
+
+ qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
+ flags = send_wr->send_flags;
+ wqe_hdr = FIELD_PREP(
+ ERDMA_SQE_HDR_CE_MASK,
+ ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
+ flags & IB_SEND_SOLICITED ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
+ flags & IB_SEND_FENCE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
+ flags & IB_SEND_INLINE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
+
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ hw_op = ERDMA_OP_WRITE;
+ if (op == IB_WR_RDMA_WRITE_WITH_IMM)
+ hw_op = ERDMA_OP_WRITE_WITH_IMM;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ write_sqe = (struct erdma_write_sqe *)entry;
+
+ write_sqe->imm_data = send_wr->ex.imm_data;
+ write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
+ write_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
+ write_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
+
+ length_field = &write_sqe->length;
+ wqe_size = sizeof(struct erdma_write_sqe);
+ sgl_offset = wqe_size;
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ read_sqe = (struct erdma_readreq_sqe *)entry;
+ if (unlikely(send_wr->num_sge != 1))
+ return -EINVAL;
+ hw_op = ERDMA_OP_READ;
+ if (op == IB_WR_RDMA_READ_WITH_INV) {
+ hw_op = ERDMA_OP_READ_WITH_INV;
+ read_sqe->invalid_stag =
+ cpu_to_le32(send_wr->ex.invalidate_rkey);
+ }
+
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
+ read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
+ read_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
+ read_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
+
+ sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ sge->addr = cpu_to_le64(rdma_wr->remote_addr);
+ sge->key = cpu_to_le32(rdma_wr->rkey);
+ sge->length = cpu_to_le32(send_wr->sg_list[0].length);
+ wqe_size = sizeof(struct erdma_readreq_sqe) +
+ send_wr->num_sge * sizeof(struct ib_sge);
+
+ goto out;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ rc_send_sqe = (struct erdma_send_sqe_rc *)entry;
+ init_send_sqe_rc(qp, rc_send_sqe, send_wr, &hw_op);
+ length_field = &rc_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_rc);
+ } else {
+ ud_send_sqe = (struct erdma_send_sqe_ud *)entry;
+ init_send_sqe_ud(qp, ud_send_sqe, send_wr, &hw_op);
+ length_field = &ud_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_ud);
+ }
+
+ sgl_offset = wqe_size;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ break;
+ case IB_WR_REG_MR:
+ wqe_hdr |=
+ FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ mr = to_emr(reg_wr(send_wr)->mr);
+
+ mr->access = ERDMA_MR_ACC_LR |
+ to_erdma_access_flags(reg_wr(send_wr)->access);
+ regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
+ regmr_sge->length = cpu_to_le32(mr->ibmr.length);
+ regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
+ attrs = FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
+ FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+ mr->mem.mtt_nents);
+
+ if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
+ /* Copy SGLs to SQE content to accelerate */
+ memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT),
+ mr->mem.mtt->buf, MTT_SIZE(mr->mem.mtt_nents));
+ wqe_size = sizeof(struct erdma_reg_mr_sqe) +
+ MTT_SIZE(mr->mem.mtt_nents);
+ } else {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ }
+
+ regmr_sge->attrs = cpu_to_le32(attrs);
+ goto out;
+ case IB_WR_LOCAL_INV:
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_LOCAL_INV);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ goto out;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ atomic_sqe = (struct erdma_atomic_sqe *)entry;
+ if (op == IB_WR_ATOMIC_CMP_AND_SWP) {
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_ATOMIC_CAS);
+ atomic_sqe->fetchadd_swap_data =
+ cpu_to_le64(atomic_wr(send_wr)->swap);
+ atomic_sqe->cmp_data =
+ cpu_to_le64(atomic_wr(send_wr)->compare_add);
+ } else {
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_ATOMIC_FAA);
+ atomic_sqe->fetchadd_swap_data =
+ cpu_to_le64(atomic_wr(send_wr)->compare_add);
+ }
+
+ sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ sge->addr = cpu_to_le64(atomic_wr(send_wr)->remote_addr);
+ sge->key = cpu_to_le32(atomic_wr(send_wr)->rkey);
+ sge++;
+
+ sge->addr = cpu_to_le64(send_wr->sg_list[0].addr);
+ sge->key = cpu_to_le32(send_wr->sg_list[0].lkey);
+ sge->length = cpu_to_le32(send_wr->sg_list[0].length);
+
+ wqe_size = sizeof(*atomic_sqe);
+ goto out;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (flags & IB_SEND_INLINE) {
+ ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
+ length_field);
+ if (ret < 0)
+ return -EINVAL;
+ wqe_size += ret;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
+ } else {
+ ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
+ if (ret)
+ return -EINVAL;
+ wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
+ send_wr->num_sge);
+ }
+
+out:
+ wqebb_cnt = SQEBB_COUNT(wqe_size);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
+ *pi += wqebb_cnt;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
+
+ *entry = wqe_hdr;
+
+ return 0;
+}
+
+static void kick_sq_db(struct erdma_qp *qp, u16 pi)
+{
+ u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
+ FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
+
+ *(u64 *)qp->kern_qp.sq_dbrec = db_data;
+ writeq(db_data, qp->kern_qp.hw_sq_db);
+}
+
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ int ret = 0;
+ const struct ib_send_wr *wr = send_wr;
+ unsigned long flags;
+ u16 sq_pi;
+
+ if (!send_wr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&qp->lock, flags);
+ sq_pi = qp->kern_qp.sq_pi;
+
+ while (wr) {
+ if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
+ ret = -ENOMEM;
+ *bad_send_wr = send_wr;
+ break;
+ }
+
+ ret = erdma_push_one_sqe(qp, &sq_pi, wr);
+ if (ret) {
+ *bad_send_wr = wr;
+ break;
+ }
+ qp->kern_qp.sq_pi = sq_pi;
+ kick_sq_db(qp, sq_pi);
+
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING))
+ mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork,
+ usecs_to_jiffies(100));
+
+ return ret;
+}
+
+static int erdma_post_recv_one(struct erdma_qp *qp,
+ const struct ib_recv_wr *recv_wr)
+{
+ struct erdma_rqe *rqe =
+ get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
+ qp->attrs.rq_size, RQE_SHIFT);
+
+ rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
+ rqe->qpn = cpu_to_le32(QP_ID(qp));
+
+ if (recv_wr->num_sge == 0) {
+ rqe->length = 0;
+ } else if (recv_wr->num_sge == 1) {
+ rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
+ rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
+ rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
+ } else {
+ return -EINVAL;
+ }
+
+ *(u64 *)qp->kern_qp.rq_dbrec = *(u64 *)rqe;
+ writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
+
+ qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
+ recv_wr->wr_id;
+ qp->kern_qp.rq_pi++;
+
+ return 0;
+}
+
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr)
+{
+ const struct ib_recv_wr *wr = recv_wr;
+ struct erdma_qp *qp = to_eqp(ibqp);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ while (wr) {
+ ret = erdma_post_recv_one(qp, wr);
+ if (ret) {
+ *bad_recv_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING))
+ mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork,
+ usecs_to_jiffies(100));
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
new file mode 100644
index 000000000000..109a3f3de911
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -0,0 +1,2300 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
+
+#include <linux/vmalloc.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+static void assemble_qbuf_mtt_for_cmd(struct erdma_mem *mem, u32 *cfg,
+ u64 *addr0, u64 *addr1)
+{
+ struct erdma_mtt *mtt = mem->mtt;
+
+ if (mem->mtt_nents > ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ *addr0 = mtt->buf_dma;
+ *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_1LEVEL);
+ } else {
+ *addr0 = mtt->buf[0];
+ memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1));
+ *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
+ }
+}
+
+static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
+{
+ struct erdma_dev *dev = to_edev(qp->ibqp.device);
+ struct erdma_pd *pd = to_epd(qp->ibqp.pd);
+ struct erdma_cmdq_create_qp_req req;
+ struct erdma_uqp *user_qp;
+ u64 resp0, resp1;
+ int err;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK,
+ ilog2(qp->attrs.sq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK,
+ ilog2(qp->attrs.rq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_RC);
+ else
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_UD);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
+
+ req.sq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+ req.rq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
+ req.rq_mtt_cfg = req.sq_mtt_cfg;
+
+ req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
+ req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
+ req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma;
+ req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma;
+ } else {
+ user_qp = &qp->user_qp;
+ req.sq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
+ req.sq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+
+ req.rq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
+ req.rq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg = user_qp->sq_mem.page_offset;
+ req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->sq_mem.mtt_nents);
+
+ req.rq_mtt_cfg = user_qp->rq_mem.page_offset;
+ req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->rq_mem.mtt_nents);
+
+ assemble_qbuf_mtt_for_cmd(&user_qp->sq_mem, &req.sq_mtt_cfg,
+ &req.sq_buf_addr, req.sq_mtt_entry);
+ assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
+ &req.rq_buf_addr, req.rq_mtt_entry);
+
+ req.sq_dbrec_dma = user_qp->sq_dbrec_dma;
+ req.rq_dbrec_dma = user_qp->rq_dbrec_dma;
+
+ if (uctx->ext_db.enable) {
+ req.sq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_DB_CFG_MASK, 1);
+ req.db_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK,
+ uctx->ext_db.sdb_off) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK,
+ uctx->ext_db.rdb_off);
+ }
+ }
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1,
+ true);
+ if (!err && erdma_device_iwarp(dev))
+ qp->attrs.iwarp.cookie =
+ FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
+
+ return err;
+}
+
+static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
+{
+ struct erdma_pd *pd = to_epd(mr->ibmr.pd);
+ u32 mtt_level = ERDMA_MR_MTT_0LEVEL;
+ struct erdma_cmdq_reg_mr_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
+
+ if (mr->type == ERDMA_MR_TYPE_FRMR ||
+ mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ if (mr->mem.mtt->continuous) {
+ req.phy_addr[0] = mr->mem.mtt->buf_dma;
+ mtt_level = ERDMA_MR_MTT_1LEVEL;
+ } else {
+ req.phy_addr[0] = mr->mem.mtt->dma_addrs[0];
+ mtt_level = mr->mem.mtt->level;
+ }
+ } else if (mr->type != ERDMA_MR_TYPE_DMA) {
+ memcpy(req.phy_addr, mr->mem.mtt->buf,
+ MTT_SIZE(mr->mem.page_cnt));
+ }
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
+ FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) |
+ FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access);
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
+ ilog2(mr->mem.page_size)) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_LEVEL_MASK, mtt_level) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
+
+ if (mr->type == ERDMA_MR_TYPE_DMA)
+ goto post_cmd;
+
+ if (mr->type == ERDMA_MR_TYPE_NORMAL) {
+ req.start_va = mr->mem.va;
+ req.size = mr->mem.len;
+ }
+
+ if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) {
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_MR_VERSION_MASK, 1);
+ req.cfg2 |= FIELD_PREP(ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK,
+ PAGE_SHIFT - ERDMA_HW_PAGE_SHIFT);
+ req.size_h = upper_32_bits(mr->mem.len);
+ req.mtt_cnt_h = mr->mem.page_cnt >> 20;
+ }
+
+post_cmd:
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+ struct erdma_cmdq_create_cq_req req;
+ struct erdma_mem *mem;
+ u32 page_size;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_CQ);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn);
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ page_size = SZ_32M;
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
+ req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
+ req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
+
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
+
+ req.first_page_offset = 0;
+ req.cq_dbrec_dma = cq->kern_cq.dbrec_dma;
+ } else {
+ mem = &cq->user_cq.qbuf_mem;
+ req.cfg0 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT);
+ if (mem->mtt_nents == 1) {
+ req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]);
+ req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]);
+ req.cfg1 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
+ } else {
+ req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma);
+ req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma);
+ req.cfg1 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_1LEVEL);
+ }
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
+ mem->mtt_nents);
+
+ req.first_page_offset = mem->page_offset;
+ req.cq_dbrec_dma = cq->user_cq.dbrec_dma;
+
+ if (uctx->ext_db.enable) {
+ req.cfg1 |= FIELD_PREP(
+ ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK, 1);
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_DB_CFG_MASK,
+ uctx->ext_db.cdb_off);
+ }
+ }
+
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap,
+ res_cb->next_alloc_idx);
+ if (idx == res_cb->max_cap) {
+ idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap);
+ if (idx == res_cb->max_cap) {
+ res_cb->next_alloc_idx = 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ return -ENOSPC;
+ }
+ }
+
+ set_bit(idx, res_cb->bitmap);
+ res_cb->next_alloc_idx = idx + 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+
+ return idx;
+}
+
+static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx)
+{
+ unsigned long flags;
+ u32 used;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ used = __test_and_clear_bit(idx, res_cb->bitmap);
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ WARN_ON(!used);
+}
+
+static struct rdma_user_mmap_entry *
+erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address,
+ u32 size, u8 mmap_flag, u64 *mmap_offset)
+{
+ struct erdma_user_mmap_entry *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ int ret;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = (u64)address;
+ entry->mmap_flag = mmap_flag;
+
+ size = PAGE_ALIGN(size);
+
+ ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
+ size);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
+ struct ib_udata *unused)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->max_mr_size = dev->attrs.max_mr_size;
+ attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
+ attr->vendor_part_id = dev->pdev->device;
+ attr->hw_ver = dev->pdev->revision;
+ attr->max_qp = dev->attrs.max_qp - 1;
+ attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
+ attr->max_qp_rd_atom = dev->attrs.max_ord;
+ attr->max_qp_init_rd_atom = dev->attrs.max_ird;
+ attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird;
+ attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ ibdev->local_dma_lkey = dev->attrs.local_dma_key;
+ attr->max_send_sge = dev->attrs.max_send_sge;
+ attr->max_recv_sge = dev->attrs.max_recv_sge;
+ attr->max_sge_rd = dev->attrs.max_sge_rd;
+ attr->max_cq = dev->attrs.max_cq - 1;
+ attr->max_cqe = dev->attrs.max_cqe;
+ attr->max_mr = dev->attrs.max_mr;
+ attr->max_pd = dev->attrs.max_pd;
+ attr->max_mw = dev->attrs.max_mw;
+ attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
+ attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
+
+ if (erdma_device_rocev2(dev)) {
+ attr->max_pkeys = ERDMA_MAX_PKEYS;
+ attr->max_ah = dev->attrs.max_ah;
+ }
+
+ if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
+ attr->atomic_cap = IB_ATOMIC_GLOB;
+
+ attr->fw_ver = dev->attrs.fw_version;
+
+ if (dev->netdev)
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->netdev->dev_addr);
+
+ return 0;
+}
+
+int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx,
+ union ib_gid *gid)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(gid, 0, sizeof(*gid));
+ ether_addr_copy(gid->raw, dev->attrs.peer_addr);
+
+ return 0;
+}
+
+int erdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+ struct net_device *ndev = dev->netdev;
+
+ memset(attr, 0, sizeof(*attr));
+
+ if (erdma_device_iwarp(dev)) {
+ attr->gid_tbl_len = 1;
+ } else {
+ attr->gid_tbl_len = dev->attrs.max_gid;
+ attr->ip_gids = true;
+ attr->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
+
+ attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
+ attr->max_msg_sz = -1;
+
+ if (!ndev)
+ goto out;
+
+ ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
+ attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ attr->state = ib_get_curr_port_state(ndev);
+
+out:
+ if (attr->state == IB_PORT_ACTIVE)
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ else
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ return 0;
+}
+
+int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *port_immutable)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ if (erdma_device_iwarp(dev)) {
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ port_immutable->gid_tbl_len = 1;
+ } else {
+ port_immutable->core_cap_flags =
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ port_immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ port_immutable->gid_tbl_len = dev->attrs.max_gid;
+ port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
+
+ return 0;
+}
+
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int pdn;
+
+ pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]);
+ if (pdn < 0)
+ return pdn;
+
+ pd->pdn = pdn;
+
+ return 0;
+}
+
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn);
+
+ return 0;
+}
+
+static void erdma_flush_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct erdma_qp *qp =
+ container_of(dwork, struct erdma_qp, reflush_dwork);
+ struct erdma_cmdq_reflush_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_REFLUSH);
+ req.qpn = QP_ID(qp);
+ req.sq_pi = qp->kern_qp.sq_pi;
+ req.rq_pi = qp->kern_qp.rq_pi;
+ erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+static int erdma_qp_validate_cap(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) ||
+ (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) ||
+ (attrs->cap.max_send_sge > dev->attrs.max_send_sge) ||
+ (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) ||
+ (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) ||
+ !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int erdma_qp_validate_attr(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+
+ if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC &&
+ attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI)
+ return -EOPNOTSUPP;
+
+ if (attrs->srq)
+ return -EOPNOTSUPP;
+
+ if (!attrs->send_cq || !attrs->recv_cq)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void free_kernel_qp(struct erdma_qp *qp)
+{
+ struct erdma_dev *dev = qp->dev;
+
+ vfree(qp->kern_qp.swr_tbl);
+ vfree(qp->kern_qp.rwr_tbl);
+
+ if (qp->kern_qp.sq_buf)
+ dma_free_coherent(&dev->pdev->dev,
+ qp->attrs.sq_size << SQEBB_SHIFT,
+ qp->kern_qp.sq_buf,
+ qp->kern_qp.sq_buf_dma_addr);
+
+ if (qp->kern_qp.sq_dbrec)
+ dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec,
+ qp->kern_qp.sq_dbrec_dma);
+
+ if (qp->kern_qp.rq_buf)
+ dma_free_coherent(&dev->pdev->dev,
+ qp->attrs.rq_size << RQE_SHIFT,
+ qp->kern_qp.rq_buf,
+ qp->kern_qp.rq_buf_dma_addr);
+
+ if (qp->kern_qp.rq_dbrec)
+ dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec,
+ qp->kern_qp.rq_dbrec_dma);
+}
+
+static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ struct erdma_kqp *kqp = &qp->kern_qp;
+ int size;
+
+ if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
+ kqp->sig_all = 1;
+
+ kqp->sq_pi = 0;
+ kqp->sq_ci = 0;
+ kqp->rq_pi = 0;
+ kqp->rq_ci = 0;
+ kqp->hw_sq_db =
+ dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
+ kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
+
+ kqp->swr_tbl = vmalloc_array(qp->attrs.sq_size, sizeof(u64));
+ kqp->rwr_tbl = vmalloc_array(qp->attrs.rq_size, sizeof(u64));
+ if (!kqp->swr_tbl || !kqp->rwr_tbl)
+ goto err_out;
+
+ size = qp->attrs.sq_size << SQEBB_SHIFT;
+ kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->sq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->sq_buf)
+ goto err_out;
+
+ kqp->sq_dbrec =
+ dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma);
+ if (!kqp->sq_dbrec)
+ goto err_out;
+
+ size = qp->attrs.rq_size << RQE_SHIFT;
+ kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->rq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->rq_buf)
+ goto err_out;
+
+ kqp->rq_dbrec =
+ dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma);
+ if (!kqp->rq_dbrec)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ free_kernel_qp(qp);
+ return -ENOMEM;
+}
+
+static void erdma_fill_bottom_mtt(struct erdma_dev *dev, struct erdma_mem *mem)
+{
+ struct erdma_mtt *mtt = mem->mtt;
+ struct ib_block_iter biter;
+ u32 idx = 0;
+
+ while (mtt->low_level)
+ mtt = mtt->low_level;
+
+ rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size)
+ mtt->buf[idx++] = rdma_block_iter_dma_address(&biter);
+}
+
+static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev,
+ size_t size)
+{
+ struct erdma_mtt *mtt;
+
+ mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
+ if (!mtt)
+ return ERR_PTR(-ENOMEM);
+
+ mtt->size = size;
+ mtt->buf = kzalloc(mtt->size, GFP_KERNEL);
+ if (!mtt->buf)
+ goto err_free_mtt;
+
+ mtt->continuous = true;
+ mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma))
+ goto err_free_mtt_buf;
+
+ return mtt;
+
+err_free_mtt_buf:
+ kfree(mtt->buf);
+
+err_free_mtt:
+ kfree(mtt);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static void erdma_unmap_page_list(struct erdma_dev *dev, dma_addr_t *pg_dma,
+ u32 npages)
+{
+ u32 i;
+
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+}
+
+static void erdma_destroy_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ erdma_unmap_page_list(dev, mtt->dma_addrs, mtt->npages);
+ vfree(mtt->dma_addrs);
+}
+
+static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ erdma_destroy_mtt_buf_dma_addrs(dev, mtt);
+ vfree(mtt->buf);
+ kfree(mtt);
+}
+
+static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
+ struct erdma_mtt *low_mtt)
+{
+ dma_addr_t *pg_addr = mtt->buf;
+ u32 i;
+
+ for (i = 0; i < low_mtt->npages; i++)
+ pg_addr[i] = low_mtt->dma_addrs[i];
+}
+
+static u32 vmalloc_to_dma_addrs(struct erdma_dev *dev, dma_addr_t **dma_addrs,
+ void *buf, u64 len)
+{
+ dma_addr_t *pg_dma;
+ struct page *pg;
+ u32 npages, i;
+ void *addr;
+
+ npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >>
+ PAGE_SHIFT;
+ pg_dma = vcalloc(npages, sizeof(*pg_dma));
+ if (!pg_dma)
+ return 0;
+
+ addr = buf;
+ for (i = 0; i < npages; i++) {
+ pg = vmalloc_to_page(addr);
+ if (!pg)
+ goto err;
+
+ pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, pg_dma[i]))
+ goto err;
+
+ addr += PAGE_SIZE;
+ }
+
+ *dma_addrs = pg_dma;
+
+ return npages;
+err:
+ erdma_unmap_page_list(dev, pg_dma, i);
+ vfree(pg_dma);
+
+ return 0;
+}
+
+static int erdma_create_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ dma_addr_t *addrs;
+ u32 npages;
+
+ /* Failed if buf is not page aligned */
+ if ((uintptr_t)mtt->buf & ~PAGE_MASK)
+ return -EINVAL;
+
+ npages = vmalloc_to_dma_addrs(dev, &addrs, mtt->buf, mtt->size);
+ if (!npages)
+ return -ENOMEM;
+
+ mtt->dma_addrs = addrs;
+ mtt->npages = npages;
+
+ return 0;
+}
+
+static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
+ size_t size)
+{
+ struct erdma_mtt *mtt;
+ int ret = -ENOMEM;
+
+ mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
+ if (!mtt)
+ return ERR_PTR(-ENOMEM);
+
+ mtt->size = ALIGN(size, PAGE_SIZE);
+ mtt->buf = vzalloc(mtt->size);
+ mtt->continuous = false;
+ if (!mtt->buf)
+ goto err_free_mtt;
+
+ ret = erdma_create_mtt_buf_dma_addrs(dev, mtt);
+ if (ret)
+ goto err_free_mtt_buf;
+
+ ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, npages:%u\n",
+ mtt->size, mtt->npages);
+
+ return mtt;
+
+err_free_mtt_buf:
+ vfree(mtt->buf);
+
+err_free_mtt:
+ kfree(mtt);
+
+ return ERR_PTR(ret);
+}
+
+static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
+ bool force_continuous)
+{
+ struct erdma_mtt *mtt, *tmp_mtt;
+ int ret, level = 0;
+
+ ibdev_dbg(&dev->ibdev, "create_mtt, size:%lu, force cont:%d\n", size,
+ force_continuous);
+
+ if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_MTT_VA))
+ force_continuous = true;
+
+ if (force_continuous)
+ return erdma_create_cont_mtt(dev, size);
+
+ mtt = erdma_create_scatter_mtt(dev, size);
+ if (IS_ERR(mtt))
+ return mtt;
+ level = 1;
+
+ /* convergence the mtt table. */
+ while (mtt->npages != 1 && level <= 3) {
+ tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->npages));
+ if (IS_ERR(tmp_mtt)) {
+ ret = PTR_ERR(tmp_mtt);
+ goto err_free_mtt;
+ }
+ erdma_init_middle_mtt(tmp_mtt, mtt);
+ tmp_mtt->low_level = mtt;
+ mtt = tmp_mtt;
+ level++;
+ }
+
+ if (level > 3) {
+ ret = -ENOMEM;
+ goto err_free_mtt;
+ }
+
+ mtt->level = level;
+ ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
+ mtt->level, mtt->dma_addrs[0]);
+
+ return mtt;
+err_free_mtt:
+ while (mtt) {
+ tmp_mtt = mtt->low_level;
+ erdma_destroy_scatter_mtt(dev, mtt);
+ mtt = tmp_mtt;
+ }
+
+ return ERR_PTR(ret);
+}
+
+static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
+{
+ struct erdma_mtt *tmp_mtt;
+
+ if (mtt->continuous) {
+ dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size,
+ DMA_TO_DEVICE);
+ kfree(mtt->buf);
+ kfree(mtt);
+ } else {
+ while (mtt) {
+ tmp_mtt = mtt->low_level;
+ erdma_destroy_scatter_mtt(dev, mtt);
+ mtt = tmp_mtt;
+ }
+ }
+}
+
+static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
+ u64 start, u64 len, int access, u64 virt,
+ unsigned long req_page_size, bool force_continuous)
+{
+ int ret = 0;
+
+ mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
+ if (IS_ERR(mem->umem)) {
+ ret = PTR_ERR(mem->umem);
+ mem->umem = NULL;
+ return ret;
+ }
+
+ mem->va = virt;
+ mem->len = len;
+ mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
+ mem->page_offset = start & (mem->page_size - 1);
+ mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
+ mem->page_cnt = mem->mtt_nents;
+ mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt),
+ force_continuous);
+ if (IS_ERR(mem->mtt)) {
+ ret = PTR_ERR(mem->mtt);
+ goto error_ret;
+ }
+
+ erdma_fill_bottom_mtt(dev, mem);
+
+ return 0;
+
+error_ret:
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+
+ return ret;
+}
+
+static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
+{
+ if (mem->mtt)
+ erdma_destroy_mtt(dev, mem->mtt);
+
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+}
+
+static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx,
+ u64 dbrecords_va,
+ struct erdma_user_dbrecords_page **dbr_page,
+ dma_addr_t *dma_addr)
+{
+ struct erdma_user_dbrecords_page *page = NULL;
+ int rv = 0;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+
+ list_for_each_entry(page, &ctx->dbrecords_page_list, list)
+ if (page->va == (dbrecords_va & PAGE_MASK))
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ page->va = (dbrecords_va & PAGE_MASK);
+ page->refcnt = 0;
+
+ page->umem = ib_umem_get(ctx->ibucontext.device,
+ dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
+ if (IS_ERR(page->umem)) {
+ rv = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &ctx->dbrecords_page_list);
+
+found:
+ *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (dbrecords_va & ~PAGE_MASK);
+ *dbr_page = page;
+ page->refcnt++;
+
+out:
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+ return rv;
+}
+
+static void
+erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
+ struct erdma_user_dbrecords_page **dbr_page)
+{
+ if (!ctx || !(*dbr_page))
+ return;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+ if (--(*dbr_page)->refcnt == 0) {
+ list_del(&(*dbr_page)->list);
+ ib_umem_release((*dbr_page)->umem);
+ kfree(*dbr_page);
+ }
+
+ *dbr_page = NULL;
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+}
+
+static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ u64 va, u32 len, u64 dbrec_va)
+{
+ dma_addr_t dbrec_dma;
+ u32 rq_offset;
+ int ret;
+
+ if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
+ qp->attrs.rq_size * RQE_SIZE))
+ return -EINVAL;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va,
+ qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
+ (SZ_1M - SZ_4K), true);
+ if (ret)
+ return ret;
+
+ rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
+ qp->user_qp.rq_offset = rq_offset;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset,
+ qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
+ (SZ_1M - SZ_4K), true);
+ if (ret)
+ goto put_sq_mtt;
+
+ ret = erdma_map_user_dbrecords(uctx, dbrec_va,
+ &qp->user_qp.user_dbr_page,
+ &dbrec_dma);
+ if (ret)
+ goto put_rq_mtt;
+
+ qp->user_qp.sq_dbrec_dma = dbrec_dma;
+ qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE;
+
+ return 0;
+
+put_rq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
+
+put_sq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
+
+ return ret;
+}
+
+static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
+{
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
+ erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
+}
+
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_ureq_create_qp ureq;
+ struct erdma_uresp_create_qp uresp;
+ void *old_entry;
+ int ret = 0;
+
+ ret = erdma_qp_validate_cap(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ ret = erdma_qp_validate_attr(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ qp->scq = to_ecq(attrs->send_cq);
+ qp->rcq = to_ecq(attrs->recv_cq);
+ qp->dev = dev;
+ qp->attrs.cc = dev->attrs.cc;
+
+ init_rwsem(&qp->state_lock);
+ kref_init(&qp->ref);
+ init_completion(&qp->safe_free);
+
+ if (qp->ibqp.qp_type == IB_QPT_GSI) {
+ old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
+ if (xa_is_err(old_entry))
+ ret = xa_err(old_entry);
+ else
+ qp->ibqp.qp_num = 1;
+ } else {
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+ &dev->next_alloc_qpn, GFP_KERNEL);
+ }
+
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr *
+ ERDMA_MAX_WQEBB_PER_SQE);
+ qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr);
+
+ if (uctx) {
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (ret)
+ goto err_out_xa;
+
+ ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len,
+ ureq.db_record_va);
+ if (ret)
+ goto err_out_xa;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.num_sqe = qp->attrs.sq_size;
+ uresp.num_rqe = qp->attrs.rq_size;
+ uresp.qp_id = QP_ID(qp);
+ uresp.rq_offset = qp->user_qp.rq_offset;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_out_cmd;
+ } else {
+ ret = init_kernel_qp(dev, qp, attrs);
+ if (ret)
+ goto err_out_xa;
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+ qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
+
+ if (erdma_device_iwarp(qp->dev))
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ else
+ qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET;
+
+ INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
+
+ ret = create_qp_cmd(uctx, qp);
+ if (ret)
+ goto err_out_cmd;
+
+ spin_lock_init(&qp->lock);
+
+ return 0;
+
+err_out_cmd:
+ if (uctx)
+ free_user_qp(qp, uctx);
+ else
+ free_kernel_qp(qp);
+err_out_xa:
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+err_out:
+ return ret;
+}
+
+static int erdma_create_stag(struct erdma_dev *dev, u32 *stag)
+{
+ int stag_idx;
+
+ stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]);
+ if (stag_idx < 0)
+ return stag_idx;
+
+ /* For now, we always let key field be zero. */
+ *stag = (stag_idx << 8);
+
+ return 0;
+}
+
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ struct erdma_mr *mr;
+ u32 stag;
+ int ret;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_DMA;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc);
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_remove_stag;
+
+ return &mr->ibmr;
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int ret;
+ u32 stag;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (max_num_sg > ERDMA_MR_MAX_MTT_CNT)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_FRMR;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ /* update it in FRMR. */
+ mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR |
+ ERDMA_MR_ACC_RW;
+
+ mr->mem.page_size = PAGE_SIZE; /* update it later. */
+ mr->mem.page_cnt = max_num_sg;
+ mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true);
+ if (IS_ERR(mr->mem.mtt)) {
+ ret = PTR_ERR(mr->mem.mtt);
+ goto out_remove_stag;
+ }
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_destroy_mtt;
+
+ return &mr->ibmr;
+
+out_destroy_mtt:
+ erdma_destroy_mtt(dev, mr->mem.mtt);
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+
+ if (mr->mem.mtt_nents >= mr->mem.page_cnt)
+ return -1;
+
+ mr->mem.mtt->buf[mr->mem.mtt_nents] = addr;
+ mr->mem.mtt_nents++;
+
+ return 0;
+}
+
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+ int num;
+
+ mr->mem.mtt_nents = 0;
+
+ num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset,
+ erdma_set_page);
+
+ return num;
+}
+
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct erdma_mr *mr = NULL;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ u32 stag;
+ int ret;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!len || len > dev->attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
+ SZ_2G - SZ_4K, false);
+ if (ret)
+ goto err_out_free;
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto err_out_put_mtt;
+
+ mr->ibmr.lkey = mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->mem.va = virt;
+ mr->mem.len = len;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access);
+ mr->valid = 1;
+ mr->type = ERDMA_MR_TYPE_NORMAL;
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto err_out_mr;
+
+ return &mr->ibmr;
+
+err_out_mr:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+err_out_put_mtt:
+ put_mtt_entries(dev, &mr->mem);
+
+err_out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibmr->device);
+ struct erdma_cmdq_dereg_mr_req req;
+ int ret;
+
+ mr = to_emr(ibmr);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DEREG_MR);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
+
+ put_mtt_entries(dev, &mr->mem);
+
+ kfree(mr);
+ return 0;
+}
+
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ int err;
+ struct erdma_cmdq_destroy_cq_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_CQ);
+ req.cqn = cq->cqn;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (err)
+ return err;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
+ cq->kern_cq.dbrec_dma);
+ } else {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
+ }
+
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return 0;
+}
+
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_cmdq_destroy_qp_req req;
+ union erdma_mod_qp_params params;
+ int err;
+
+ down_write(&qp->state_lock);
+ if (erdma_device_iwarp(dev)) {
+ params.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ ERDMA_QPA_IWARP_STATE);
+ } else {
+ params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR;
+ erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ ERDMA_QPA_ROCEV2_STATE);
+ }
+ up_write(&qp->state_lock);
+
+ cancel_delayed_work_sync(&qp->reflush_dwork);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_QP);
+ req.qpn = QP_ID(qp);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (err)
+ return err;
+
+ erdma_qp_put(qp);
+ wait_for_completion(&qp->safe_free);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ free_kernel_qp(qp);
+ } else {
+ put_mtt_entries(dev, &qp->user_qp.sq_mem);
+ put_mtt_entries(dev, &qp->user_qp.rq_mem);
+ erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
+ }
+
+ if (qp->cep)
+ erdma_cep_put(qp->cep);
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+
+ return 0;
+}
+
+void erdma_qp_get_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_get(to_eqp(ibqp));
+}
+
+void erdma_qp_put_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_put(to_eqp(ibqp));
+}
+
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct erdma_user_mmap_entry *entry;
+ pgprot_t prot;
+ int err;
+
+ rdma_entry = rdma_user_mmap_entry_get(ctx, vma);
+ if (!rdma_entry)
+ return -EINVAL;
+
+ entry = to_emmap(rdma_entry);
+
+ switch (entry->mmap_flag) {
+ case ERDMA_MMAP_IO_NC:
+ /* map doorbell. */
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ err = -EINVAL;
+ goto put_entry;
+ }
+
+ err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
+ prot, rdma_entry);
+
+put_entry:
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
+}
+
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ kfree(entry);
+}
+
+static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
+ bool ext_db_en)
+{
+ struct erdma_cmdq_ext_db_req req = {};
+ u64 val0, val1;
+ int ret;
+
+ /*
+ * CAP_SYS_RAWIO is required if hardware does not support extend
+ * doorbell mechanism.
+ */
+ if (!ext_db_en && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (!ext_db_en) {
+ ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET;
+ ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
+ ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
+ return 0;
+ }
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_ALLOC_DB);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1,
+ true);
+ if (ret)
+ return ret;
+
+ ctx->ext_db.enable = true;
+ ctx->ext_db.sdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_SDB);
+ ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB);
+ ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB);
+
+ ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT);
+ ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT);
+ ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT);
+
+ return 0;
+}
+
+static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
+{
+ struct erdma_cmdq_ext_db_req req = {};
+ int ret;
+
+ if (!ctx->ext_db.enable)
+ return;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_FREE_DB);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
+
+ req.sdb_off = ctx->ext_db.sdb_off;
+ req.rdb_off = ctx->ext_db.rdb_off;
+ req.cdb_off = ctx->ext_db.cdb_off;
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ ibdev_err_ratelimited(&dev->ibdev,
+ "free db resources failed %d", ret);
+}
+
+static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
+{
+ rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+ struct erdma_dev *dev = to_edev(ibctx->device);
+ int ret;
+ struct erdma_uresp_alloc_ctx uresp = {};
+
+ if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ if (udata->outlen < sizeof(uresp)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ INIT_LIST_HEAD(&ctx->dbrecords_page_list);
+ mutex_init(&ctx->dbrecords_page_mutex);
+
+ ret = alloc_db_resources(dev, ctx,
+ !!(dev->attrs.cap_flags &
+ ERDMA_DEV_CAP_FLAGS_EXTEND_DB));
+ if (ret)
+ goto err_out;
+
+ ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
+ if (!ctx->sq_db_mmap_entry) {
+ ret = -ENOMEM;
+ goto err_free_ext_db;
+ }
+
+ ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
+ if (!ctx->rq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_put_mmap_entries;
+ }
+
+ ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
+ if (!ctx->cq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_put_mmap_entries;
+ }
+
+ uresp.dev_id = dev->pdev->device;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_put_mmap_entries;
+
+ return 0;
+
+err_put_mmap_entries:
+ erdma_uctx_user_mmap_entries_remove(ctx);
+
+err_free_ext_db:
+ free_db_resources(dev, ctx);
+
+err_out:
+ atomic_dec(&dev->num_ctx);
+ return ret;
+}
+
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct erdma_dev *dev = to_edev(ibctx->device);
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+
+ erdma_uctx_user_mmap_entries_remove(ctx);
+ free_db_resources(dev, ctx);
+ atomic_dec(&dev->num_ctx);
+}
+
+static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr,
+ struct erdma_av *av, u16 sport)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
+ av->port = rdma_ah_get_port_num(ah_attr);
+ av->sgid_index = grh->sgid_index;
+ av->hop_limit = grh->hop_limit;
+ av->traffic_class = grh->traffic_class;
+ av->sl = rdma_ah_get_sl(ah_attr);
+
+ av->flow_label = grh->flow_label;
+ av->udp_sport = sport;
+
+ ether_addr_copy(av->dmac, ah_attr->roce.dmac);
+ memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE);
+
+ if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid))
+ av->ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ av->ntype = ERDMA_NETWORK_TYPE_IPV6;
+}
+
+static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr)
+{
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+
+ rdma_ah_set_sl(ah_attr, av->sl);
+ rdma_ah_set_port_num(ah_attr, av->port);
+ rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH);
+
+ rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index,
+ av->hop_limit, av->traffic_class);
+ rdma_ah_set_dgid_raw(ah_attr, av->dgid);
+}
+
+static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = {
+ [ERDMA_PROTO_IWARP] = {
+ [IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING,
+ [IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE,
+ [IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET,
+ [IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT,
+ [IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD,
+ [IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE,
+ [IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR,
+ },
+};
+
+static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = {
+ [ERDMA_PROTO_IWARP] = {
+ [ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT,
+ [ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET,
+ [ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT,
+ [ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD,
+ [ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE,
+ [ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR,
+ },
+};
+
+static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state)
+{
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state];
+}
+
+static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state)
+{
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state];
+}
+
+static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state];
+}
+
+static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state];
+}
+
+static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ enum ib_qp_state cur_state, nxt_state;
+ struct erdma_dev *dev = qp->dev;
+ int ret = -EINVAL;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(&dev->ibdev, attr->port_num))
+ goto out;
+
+ if (erdma_device_rocev2(dev)) {
+ cur_state = (attr_mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state :
+ rocev2_to_ib_qps(qp->attrs.rocev2.state);
+
+ nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state :
+ cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type,
+ attr_mask))
+ goto out;
+
+ if ((attr_mask & IB_QP_AV) &&
+ erdma_check_gid_attr(
+ rdma_ah_read_grh(&attr->ah_attr)->sgid_attr))
+ goto out;
+
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= ERDMA_MAX_PKEYS)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void erdma_init_mod_qp_params_rocev2(
+ struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params,
+ int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask)
+{
+ enum erdma_qpa_mask_rocev2 to_modify_attrs = 0;
+ enum erdma_qps_rocev2 cur_state, nxt_state;
+ u16 udp_sport;
+
+ if (ib_attr_mask & IB_QP_CUR_STATE)
+ cur_state = ib_to_rocev2_qps(attr->cur_qp_state);
+ else
+ cur_state = qp->attrs.rocev2.state;
+
+ if (ib_attr_mask & IB_QP_STATE)
+ nxt_state = ib_to_rocev2_qps(attr->qp_state);
+ else
+ nxt_state = cur_state;
+
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE;
+ params->state = nxt_state;
+
+ if (ib_attr_mask & IB_QP_QKEY) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY;
+ params->qkey = attr->qkey;
+ }
+
+ if (ib_attr_mask & IB_QP_SQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN;
+ params->sq_psn = attr->sq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_RQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN;
+ params->rq_psn = attr->rq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_DEST_QPN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN;
+ params->dst_qpn = attr->dest_qp_num;
+ }
+
+ if (ib_attr_mask & IB_QP_AV) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_AV;
+ udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ QP_ID(qp), params->dst_qpn);
+ erdma_attr_to_av(&attr->ah_attr, &params->av, udp_sport);
+ }
+
+ *erdma_attr_mask = to_modify_attrs;
+}
+
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ union erdma_mod_qp_params params;
+ int ret = 0, erdma_attr_mask = 0;
+
+ down_write(&qp->state_lock);
+
+ ret = erdma_check_qp_attrs(qp, attr, attr_mask);
+ if (ret)
+ goto out;
+
+ if (erdma_device_iwarp(qp->dev)) {
+ if (attr_mask & IB_QP_STATE) {
+ erdma_attr_mask |= ERDMA_QPA_IWARP_STATE;
+ params.iwarp.state = ib_to_iwarp_qps(attr->qp_state);
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ erdma_attr_mask);
+ } else {
+ erdma_init_mod_qp_params_rocev2(
+ qp, &params.rocev2, &erdma_attr_mask, attr, attr_mask);
+
+ ret = erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ erdma_attr_mask);
+ }
+
+out:
+ up_write(&qp->state_lock);
+ return ret;
+}
+
+static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
+{
+ if (erdma_device_iwarp(qp->dev))
+ return iwarp_to_ib_qps(qp->attrs.iwarp.state);
+ else
+ return rocev2_to_ib_qps(qp->attrs.rocev2.state);
+}
+
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct erdma_cmdq_query_qp_req_rocev2 req;
+ struct erdma_dev *dev;
+ struct erdma_qp *qp;
+ u64 resp0, resp1;
+ int ret;
+
+ if (ibqp && qp_attr && qp_init_attr) {
+ qp = to_eqp(ibqp);
+ dev = to_edev(ibqp->device);
+ } else {
+ return -EINVAL;
+ }
+
+ qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+ qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+
+ qp_attr->cap.max_send_wr = qp->attrs.sq_size;
+ qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
+ qp_attr->cap.max_send_sge = qp->attrs.max_send_sge;
+ qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge;
+
+ qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu);
+ qp_attr->max_rd_atomic = qp->attrs.irq_size;
+ qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
+
+ qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ if (erdma_device_rocev2(dev)) {
+ /* Query hardware to get some attributes */
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_QP);
+ req.qpn = QP_ID(qp);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
+ &resp1, true);
+ if (ret)
+ return ret;
+
+ qp_attr->sq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0);
+ qp_attr->rq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0);
+ qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0));
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->sq_draining = FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0);
+
+ qp_attr->pkey_index = 0;
+ qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ erdma_av_to_attr(&qp->attrs.rocev2.av,
+ &qp_attr->ah_attr);
+ } else {
+ qp_attr->qp_state = query_qp_state(qp);
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ }
+
+ return 0;
+}
+
+static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
+ struct erdma_ureq_create_cq *ureq)
+{
+ int ret;
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va,
+ ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
+ true);
+ if (ret)
+ return ret;
+
+ ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
+ &cq->user_cq.user_dbr_page,
+ &cq->user_cq.dbrec_dma);
+ if (ret)
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
+
+ return ret;
+}
+
+static int erdma_init_kernel_cq(struct erdma_cq *cq)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ cq->kern_cq.qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
+ if (!cq->kern_cq.qbuf)
+ return -ENOMEM;
+
+ cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL,
+ &cq->kern_cq.dbrec_dma);
+ if (!cq->kern_cq.dbrec)
+ goto err_out;
+
+ spin_lock_init(&cq->kern_cq.lock);
+ /* use default cqdb addr */
+ cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
+
+ return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+
+ return -ENOMEM;
+}
+
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ unsigned int depth = attr->cqe;
+ int ret;
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+
+ if (depth > dev->attrs.max_cqe)
+ return -EINVAL;
+
+ depth = roundup_pow_of_two(depth);
+ cq->ibcq.cqe = depth;
+ cq->depth = depth;
+ cq->assoc_eqn = attr->comp_vector + 1;
+
+ ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq,
+ XA_LIMIT(1, dev->attrs.max_cq - 1),
+ &dev->next_alloc_cqn, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ struct erdma_ureq_create_cq ureq;
+ struct erdma_uresp_create_cq uresp;
+
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (ret)
+ goto err_out_xa;
+
+ ret = erdma_init_user_cq(ctx, cq, &ureq);
+ if (ret)
+ goto err_out_xa;
+
+ uresp.cq_id = cq->cqn;
+ uresp.num_cqe = depth;
+
+ ret = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ if (ret)
+ goto err_free_res;
+ } else {
+ ret = erdma_init_kernel_cq(cq);
+ if (ret)
+ goto err_out_xa;
+ }
+
+ ret = create_cq_cmd(ctx, cq);
+ if (ret)
+ goto err_free_res;
+
+ return 0;
+
+err_free_res:
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
+ } else {
+ dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT,
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
+ cq->kern_cq.dbrec_dma);
+ }
+
+err_out_xa:
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return ret;
+}
+
+void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
+{
+ struct erdma_cmdq_config_mtu_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CONF_MTU);
+ req.mtu = mtu;
+
+ erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true);
+}
+
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
+{
+ struct ib_event event;
+
+ event.device = &dev->ibdev;
+ event.element.port_num = 1;
+ event.event = reason;
+
+ ib_dispatch_event(&event);
+}
+
+enum counters {
+ ERDMA_STATS_TX_REQS_CNT,
+ ERDMA_STATS_TX_PACKETS_CNT,
+ ERDMA_STATS_TX_BYTES_CNT,
+ ERDMA_STATS_TX_DISABLE_DROP_CNT,
+ ERDMA_STATS_TX_BPS_METER_DROP_CNT,
+ ERDMA_STATS_TX_PPS_METER_DROP_CNT,
+
+ ERDMA_STATS_RX_PACKETS_CNT,
+ ERDMA_STATS_RX_BYTES_CNT,
+ ERDMA_STATS_RX_DISABLE_DROP_CNT,
+ ERDMA_STATS_RX_BPS_METER_DROP_CNT,
+ ERDMA_STATS_RX_PPS_METER_DROP_CNT,
+
+ ERDMA_STATS_MAX
+};
+
+static const struct rdma_stat_desc erdma_descs[] = {
+ [ERDMA_STATS_TX_REQS_CNT].name = "tx_reqs_cnt",
+ [ERDMA_STATS_TX_PACKETS_CNT].name = "tx_packets_cnt",
+ [ERDMA_STATS_TX_BYTES_CNT].name = "tx_bytes_cnt",
+ [ERDMA_STATS_TX_DISABLE_DROP_CNT].name = "tx_disable_drop_cnt",
+ [ERDMA_STATS_TX_BPS_METER_DROP_CNT].name = "tx_bps_limit_drop_cnt",
+ [ERDMA_STATS_TX_PPS_METER_DROP_CNT].name = "tx_pps_limit_drop_cnt",
+ [ERDMA_STATS_RX_PACKETS_CNT].name = "rx_packets_cnt",
+ [ERDMA_STATS_RX_BYTES_CNT].name = "rx_bytes_cnt",
+ [ERDMA_STATS_RX_DISABLE_DROP_CNT].name = "rx_disable_drop_cnt",
+ [ERDMA_STATS_RX_BPS_METER_DROP_CNT].name = "rx_bps_limit_drop_cnt",
+ [ERDMA_STATS_RX_PPS_METER_DROP_CNT].name = "rx_pps_limit_drop_cnt",
+};
+
+struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
+ u32 port_num)
+{
+ return rdma_alloc_hw_stats_struct(erdma_descs, ERDMA_STATS_MAX,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int erdma_query_hw_stats(struct erdma_dev *dev,
+ struct rdma_hw_stats *stats)
+{
+ struct erdma_cmdq_query_stats_resp *resp;
+ struct erdma_cmdq_query_req req;
+ dma_addr_t dma_addr;
+ int err;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_GET_STATS);
+
+ resp = dma_pool_zalloc(dev->resp_pool, GFP_KERNEL, &dma_addr);
+ if (!resp)
+ return -ENOMEM;
+
+ req.target_addr = dma_addr;
+ req.target_length = ERDMA_HW_RESP_SIZE;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (err)
+ goto out;
+
+ if (resp->hdr.magic != ERDMA_HW_RESP_MAGIC) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&stats->value[0], &resp->tx_req_cnt,
+ sizeof(u64) * stats->num_counters);
+
+out:
+ dma_pool_free(dev->resp_pool, resp, dma_addr);
+
+ return err;
+}
+
+int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+ int ret;
+
+ if (port == 0)
+ return 0;
+
+ ret = erdma_query_hw_stats(dev, stats);
+ if (ret)
+ return ret;
+
+ return stats->num_counters;
+}
+
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx,
+ const union ib_gid *gid)
+{
+ struct erdma_cmdq_set_gid_req req;
+ u8 ntype;
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) |
+ FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op);
+
+ if (op == ERDMA_SET_GID_OP_ADD) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)gid))
+ ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ ntype = ERDMA_NETWORK_TYPE_IPV6;
+
+ req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype);
+
+ memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE);
+ }
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_SET_GID);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct erdma_dev *dev = to_edev(attr->device);
+ int ret;
+
+ ret = erdma_check_gid_attr(attr);
+ if (ret)
+ return ret;
+
+ return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index,
+ &attr->gid);
+}
+
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL,
+ attr->index, NULL);
+}
+
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index >= ERDMA_MAX_PKEYS)
+ return -EINVAL;
+
+ *pkey = ERDMA_DEFAULT_PKEY;
+ return 0;
+}
+
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av)
+{
+ av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) |
+ FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype);
+
+ av_cfg->traffic_class = av->traffic_class;
+ av_cfg->hop_limit = av->hop_limit;
+ av_cfg->sl = av->sl;
+
+ av_cfg->udp_sport = av->udp_sport;
+ av_cfg->sgid_index = av->sgid_index;
+
+ ether_addr_copy(av_cfg->dmac, av->dmac);
+ memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE);
+}
+
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(init_attr->ah_attr);
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_create_ah_req req;
+ u32 udp_sport;
+ int ret;
+
+ ret = erdma_check_gid_attr(grh->sgid_attr);
+ if (ret)
+ return ret;
+
+ ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]);
+ if (ret < 0)
+ return ret;
+
+ ah->ahn = ret;
+
+ if (grh->flow_label)
+ udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label);
+ else
+ udp_sport =
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF);
+
+ erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+ erdma_set_av_cfg(&req.av_cfg, &ah->av);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+ if (ret) {
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+ return ret;
+ }
+
+ return 0;
+}
+
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_destroy_ah_req req;
+ int ret;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ flags & RDMA_DESTROY_AH_SLEEPABLE);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+
+ return 0;
+}
+
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct erdma_ah *ah = to_eah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ erdma_av_to_attr(&ah->av, ah_attr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
new file mode 100644
index 000000000000..7d8d3fe501d5
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -0,0 +1,491 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_VERBS_H__
+#define __ERDMA_VERBS_H__
+
+#include "erdma.h"
+
+/* RDMA Capability. */
+#define ERDMA_MAX_PD (128 * 1024)
+#define ERDMA_MAX_SEND_WR 8192
+#define ERDMA_MAX_ORD 128
+#define ERDMA_MAX_IRD 128
+#define ERDMA_MAX_SGE_RD 1
+#define ERDMA_MAX_CONTEXT (128 * 1024)
+#define ERDMA_MAX_SEND_SGE 6
+#define ERDMA_MAX_RECV_SGE 1
+#define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
+#define ERDMA_MAX_FRMR_PA 512
+
+enum {
+ ERDMA_MMAP_IO_NC = 0, /* no cache */
+};
+
+struct erdma_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 address;
+ u8 mmap_flag;
+};
+
+struct erdma_ext_db_info {
+ bool enable;
+ u16 sdb_off;
+ u16 rdb_off;
+ u16 cdb_off;
+};
+
+struct erdma_ucontext {
+ struct ib_ucontext ibucontext;
+
+ struct erdma_ext_db_info ext_db;
+
+ u64 sdb;
+ u64 rdb;
+ u64 cdb;
+
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *cq_db_mmap_entry;
+
+ /* doorbell records */
+ struct list_head dbrecords_page_list;
+ struct mutex dbrecords_page_mutex;
+};
+
+struct erdma_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+};
+
+/*
+ * MemoryRegion definition.
+ */
+#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
+#define MTT_SIZE(mtt_cnt) ((mtt_cnt) << 3) /* per mtt entry takes 8 Bytes. */
+#define ERDMA_MR_MAX_MTT_CNT 524288
+#define ERDMA_MTT_ENTRY_SIZE 8
+
+#define ERDMA_MR_TYPE_NORMAL 0
+#define ERDMA_MR_TYPE_FRMR 1
+#define ERDMA_MR_TYPE_DMA 2
+
+#define ERDMA_MR_MTT_0LEVEL 0
+#define ERDMA_MR_MTT_1LEVEL 1
+
+#define ERDMA_MR_ACC_RA BIT(0)
+#define ERDMA_MR_ACC_LR BIT(1)
+#define ERDMA_MR_ACC_LW BIT(2)
+#define ERDMA_MR_ACC_RR BIT(3)
+#define ERDMA_MR_ACC_RW BIT(4)
+
+static inline u8 to_erdma_access_flags(int access)
+{
+ return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
+ (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
+ (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0) |
+ (access & IB_ACCESS_REMOTE_ATOMIC ? ERDMA_MR_ACC_RA : 0);
+}
+
+/* Hierarchical storage structure for MTT entries */
+struct erdma_mtt {
+ u64 *buf;
+ size_t size;
+
+ bool continuous;
+ union {
+ dma_addr_t buf_dma;
+ struct {
+ dma_addr_t *dma_addrs;
+ u32 npages;
+ u32 level;
+ };
+ };
+
+ struct erdma_mtt *low_level;
+};
+
+struct erdma_mem {
+ struct ib_umem *umem;
+ struct erdma_mtt *mtt;
+
+ u32 page_size;
+ u32 page_offset;
+ u32 page_cnt;
+ u32 mtt_nents;
+
+ u64 va;
+ u64 len;
+};
+
+struct erdma_mr {
+ struct ib_mr ibmr;
+ struct erdma_mem mem;
+ u8 type;
+ u8 access;
+ u8 valid;
+};
+
+struct erdma_user_dbrecords_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ u64 va;
+ int refcnt;
+};
+
+struct erdma_av {
+ u8 port;
+ u8 hop_limit;
+ u8 traffic_class;
+ u8 sl;
+ u8 sgid_index;
+ u16 udp_sport;
+ u32 flow_label;
+ u8 dmac[ETH_ALEN];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+ enum erdma_network_type ntype;
+};
+
+struct erdma_ah {
+ struct ib_ah ibah;
+ struct erdma_av av;
+ u32 ahn;
+};
+
+struct erdma_uqp {
+ struct erdma_mem sq_mem;
+ struct erdma_mem rq_mem;
+
+ dma_addr_t sq_dbrec_dma;
+ dma_addr_t rq_dbrec_dma;
+
+ struct erdma_user_dbrecords_page *user_dbr_page;
+
+ u32 rq_offset;
+};
+
+struct erdma_kqp {
+ u16 sq_pi;
+ u16 sq_ci;
+
+ u16 rq_pi;
+ u16 rq_ci;
+
+ u64 *swr_tbl;
+ u64 *rwr_tbl;
+
+ void __iomem *hw_sq_db;
+ void __iomem *hw_rq_db;
+
+ void *sq_buf;
+ dma_addr_t sq_buf_dma_addr;
+
+ void *rq_buf;
+ dma_addr_t rq_buf_dma_addr;
+
+ void *sq_dbrec;
+ void *rq_dbrec;
+
+ dma_addr_t sq_dbrec_dma;
+ dma_addr_t rq_dbrec_dma;
+
+ u8 sig_all;
+};
+
+enum erdma_qps_iwarp {
+ ERDMA_QPS_IWARP_IDLE = 0,
+ ERDMA_QPS_IWARP_RTR = 1,
+ ERDMA_QPS_IWARP_RTS = 2,
+ ERDMA_QPS_IWARP_CLOSING = 3,
+ ERDMA_QPS_IWARP_TERMINATE = 4,
+ ERDMA_QPS_IWARP_ERROR = 5,
+ ERDMA_QPS_IWARP_UNDEF = 6,
+ ERDMA_QPS_IWARP_COUNT = 7,
+};
+
+enum erdma_qpa_mask_iwarp {
+ ERDMA_QPA_IWARP_STATE = (1 << 0),
+ ERDMA_QPA_IWARP_LLP_HANDLE = (1 << 2),
+ ERDMA_QPA_IWARP_ORD = (1 << 3),
+ ERDMA_QPA_IWARP_IRD = (1 << 4),
+ ERDMA_QPA_IWARP_SQ_SIZE = (1 << 5),
+ ERDMA_QPA_IWARP_RQ_SIZE = (1 << 6),
+ ERDMA_QPA_IWARP_MPA = (1 << 7),
+ ERDMA_QPA_IWARP_CC = (1 << 8),
+};
+
+enum erdma_qps_rocev2 {
+ ERDMA_QPS_ROCEV2_RESET = 0,
+ ERDMA_QPS_ROCEV2_INIT = 1,
+ ERDMA_QPS_ROCEV2_RTR = 2,
+ ERDMA_QPS_ROCEV2_RTS = 3,
+ ERDMA_QPS_ROCEV2_SQD = 4,
+ ERDMA_QPS_ROCEV2_SQE = 5,
+ ERDMA_QPS_ROCEV2_ERROR = 6,
+ ERDMA_QPS_ROCEV2_COUNT = 7,
+};
+
+enum erdma_qpa_mask_rocev2 {
+ ERDMA_QPA_ROCEV2_STATE = (1 << 0),
+ ERDMA_QPA_ROCEV2_QKEY = (1 << 1),
+ ERDMA_QPA_ROCEV2_AV = (1 << 2),
+ ERDMA_QPA_ROCEV2_SQ_PSN = (1 << 3),
+ ERDMA_QPA_ROCEV2_RQ_PSN = (1 << 4),
+ ERDMA_QPA_ROCEV2_DST_QPN = (1 << 5),
+};
+
+enum erdma_qp_flags {
+ ERDMA_QP_IN_FLUSHING = (1 << 0),
+};
+
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+
+struct erdma_mod_qp_params_iwarp {
+ enum erdma_qps_iwarp state;
+ enum erdma_cc_alg cc;
+ u8 qp_type;
+ u8 pd_len;
+ u32 irq_size;
+ u32 orq_size;
+};
+
+struct erdma_qp_attrs_iwarp {
+ enum erdma_qps_iwarp state;
+ u32 cookie;
+};
+
+struct erdma_mod_qp_params_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 sq_psn;
+ u32 rq_psn;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
+union erdma_mod_qp_params {
+ struct erdma_mod_qp_params_iwarp iwarp;
+ struct erdma_mod_qp_params_rocev2 rocev2;
+};
+
+struct erdma_qp_attrs_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
+struct erdma_qp_attrs {
+ enum erdma_cc_alg cc; /* Congestion control algorithm */
+ u32 sq_size;
+ u32 rq_size;
+ u32 orq_size;
+ u32 irq_size;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ union {
+ struct erdma_qp_attrs_iwarp iwarp;
+ struct erdma_qp_attrs_rocev2 rocev2;
+ };
+};
+
+struct erdma_qp {
+ struct ib_qp ibqp;
+ struct kref ref;
+ struct completion safe_free;
+ struct erdma_dev *dev;
+ struct erdma_cep *cep;
+ struct rw_semaphore state_lock;
+
+ unsigned long flags;
+ struct delayed_work reflush_dwork;
+
+ union {
+ struct erdma_kqp kern_qp;
+ struct erdma_uqp user_qp;
+ };
+
+ struct erdma_cq *scq;
+ struct erdma_cq *rcq;
+
+ struct erdma_qp_attrs attrs;
+ spinlock_t lock;
+};
+
+struct erdma_kcq_info {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+ u32 ci;
+ u32 cmdsn;
+ u32 notify_cnt;
+
+ spinlock_t lock;
+ u8 __iomem *db;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
+};
+
+struct erdma_ucq_info {
+ struct erdma_mem qbuf_mem;
+ struct erdma_user_dbrecords_page *user_dbr_page;
+ dma_addr_t dbrec_dma;
+};
+
+struct erdma_cq {
+ struct ib_cq ibcq;
+ u32 cqn;
+
+ u32 depth;
+ u32 assoc_eqn;
+
+ union {
+ struct erdma_kcq_info kern_cq;
+ struct erdma_ucq_info user_cq;
+ };
+};
+
+#define QP_ID(qp) ((qp)->ibqp.qp_num)
+
+static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
+}
+
+static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
+}
+
+void erdma_qp_get(struct erdma_qp *qp);
+void erdma_qp_put(struct erdma_qp *qp);
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask);
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask);
+void erdma_qp_llp_close(struct erdma_qp *qp);
+void erdma_qp_cm_drop(struct erdma_qp *qp);
+
+static inline bool erdma_device_iwarp(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_IWARP;
+}
+
+static inline bool erdma_device_rocev2(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_ROCEV2;
+}
+
+static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct erdma_ucontext, ibucontext);
+}
+
+static inline struct erdma_pd *to_epd(struct ib_pd *pd)
+{
+ return container_of(pd, struct erdma_pd, ibpd);
+}
+
+static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct erdma_mr, ibmr);
+}
+
+static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
+{
+ return container_of(qp, struct erdma_qp, ibqp);
+}
+
+static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct erdma_cq, ibcq);
+}
+
+static inline struct erdma_ah *to_eah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct erdma_ah, ibah);
+}
+
+static inline int erdma_check_gid_attr(const struct ib_gid_attr *attr)
+{
+ u8 ntype = rdma_gid_attr_network_type(attr);
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline struct erdma_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *ibmmap)
+{
+ return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
+int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
+ struct ib_udata *data);
+int erdma_get_port_immutable(struct ib_device *dev, u32 port,
+ struct ib_port_immutable *ib_port_immutable);
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int erdma_query_port(struct ib_device *dev, u32 port,
+ struct ib_port_attr *attr);
+int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
+ union ib_gid *gid);
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *data);
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *data);
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext);
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+void erdma_qp_get_ref(struct ib_qp *ibqp);
+void erdma_qp_put_ref(struct ib_qp *ibqp);
+struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn);
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
+struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
+ u32 port_num);
+int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port, int index);
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev,
+ u32 port_num);
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av);
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags);
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+
+#endif
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
new file mode 100644
index 000000000000..14b92e12bf29
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_HFI1
+ tristate "Cornelis OPX Gen1 support"
+ depends on X86_64 && INFINIBAND_RDMAVT && I2C && !UML
+ select MMU_NOTIFIER
+ select CRC32
+ select I2C_ALGOBIT
+ help
+ This is a low-level driver for Cornelis OPX Gen1 adapter.
+config HFI1_DEBUG_SDMA_ORDER
+ bool "HFI1 SDMA Order debug"
+ depends on INFINIBAND_HFI1
+ default n
+ help
+ This is a debug flag to test for out of order
+ sdma completions for unit testing
+config SDMA_VERBOSITY
+ bool "Config SDMA Verbosity"
+ depends on INFINIBAND_HFI1
+ default n
+ help
+ This is a configuration flag to enable verbose
+ SDMA debug
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
new file mode 100644
index 000000000000..5d977f363684
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# HFI driver
+#
+#
+#
+# Called from the kernel module build system.
+#
+obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
+
+hfi1-y := \
+ affinity.o \
+ aspm.o \
+ chip.o \
+ device.o \
+ driver.o \
+ efivar.o \
+ eprom.o \
+ exp_rcv.o \
+ file_ops.o \
+ firmware.o \
+ init.o \
+ intr.o \
+ iowait.o \
+ ipoib_main.o \
+ ipoib_rx.o \
+ ipoib_tx.o \
+ mad.o \
+ mmu_rb.o \
+ msix.o \
+ netdev_rx.o \
+ opfn.o \
+ pcie.o \
+ pin_system.o \
+ pio.o \
+ pio_copy.o \
+ platform.o \
+ qp.o \
+ qsfp.o \
+ rc.o \
+ ruc.o \
+ sdma.o \
+ sysfs.o \
+ tid_rdma.o \
+ trace.o \
+ uc.o \
+ ud.o \
+ user_exp_rcv.o \
+ user_pages.o \
+ user_sdma.o \
+ verbs.o \
+ verbs_txreq.o \
+ vnic_main.o \
+ vnic_sdma.o
+
+ifdef CONFIG_DEBUG_FS
+hfi1-y += debugfs.o
+ifdef CONFIG_FAULT_INJECTION
+ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+hfi1-y += fault.o
+endif
+endif
+endif
+
+CFLAGS_trace.o = -I$(src)
+ifdef MVERSION
+CFLAGS_driver.o = -DHFI_DRIVER_VERSION_BASE=\"$(MVERSION)\"
+endif
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
new file mode 100644
index 000000000000..ee7fedc67b86
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -0,0 +1,1170 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ */
+
+#include <linux/topology.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/numa.h>
+
+#include "hfi.h"
+#include "affinity.h"
+#include "sdma.h"
+#include "trace.h"
+
+struct hfi1_affinity_node_list node_affinity = {
+ .list = LIST_HEAD_INIT(node_affinity.list),
+ .lock = __MUTEX_INITIALIZER(node_affinity.lock)
+};
+
+/* Name of IRQ types, indexed by enum irq_type */
+static const char * const irq_type_names[] = {
+ "SDMA",
+ "RCVCTXT",
+ "NETDEVCTXT",
+ "GENERAL",
+ "OTHER",
+};
+
+/* Per NUMA node count of HFI devices */
+static unsigned int *hfi1_per_node_cntr;
+
+static inline void init_cpu_mask_set(struct cpu_mask_set *set)
+{
+ cpumask_clear(&set->mask);
+ cpumask_clear(&set->used);
+ set->gen = 0;
+}
+
+/* Increment generation of CPU set if needed */
+static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
+{
+ if (cpumask_equal(&set->mask, &set->used)) {
+ /*
+ * We've used up all the CPUs, bump up the generation
+ * and reset the 'used' map
+ */
+ set->gen++;
+ cpumask_clear(&set->used);
+ }
+}
+
+static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
+{
+ if (cpumask_empty(&set->used) && set->gen) {
+ set->gen--;
+ cpumask_copy(&set->used, &set->mask);
+ }
+}
+
+/* Get the first CPU from the list of unused CPUs in a CPU set data structure */
+static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
+{
+ int cpu;
+
+ if (!diff || !set)
+ return -EINVAL;
+
+ _cpu_mask_set_gen_inc(set);
+
+ /* Find out CPUs left in CPU mask */
+ cpumask_andnot(diff, &set->mask, &set->used);
+
+ cpu = cpumask_first(diff);
+ if (cpu >= nr_cpu_ids) /* empty */
+ cpu = -EINVAL;
+ else
+ cpumask_set_cpu(cpu, &set->used);
+
+ return cpu;
+}
+
+static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
+{
+ if (!set)
+ return;
+
+ cpumask_clear_cpu(cpu, &set->used);
+ _cpu_mask_set_gen_dec(set);
+}
+
+/* Initialize non-HT cpu cores mask */
+void init_real_cpu_mask(void)
+{
+ int possible, curr_cpu, ht;
+
+ /* Start with cpu online mask as the real cpu mask */
+ cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
+
+ /*
+ * Remove HT cores from the real cpu mask. Do this in two steps below.
+ */
+ possible = cpumask_weight(&node_affinity.real_cpu_mask);
+ ht = cpumask_weight(topology_sibling_cpumask(
+ cpumask_first(&node_affinity.real_cpu_mask)));
+ /*
+ * Step 1. Skip over the first N HT siblings and use them as the
+ * "real" cores. Assumes that HT cores are not enumerated in
+ * succession (except in the single core case).
+ */
+ curr_cpu = cpumask_nth(possible / ht, &node_affinity.real_cpu_mask) + 1;
+
+ /* Step 2. Remove the remaining HT siblings. */
+ cpumask_clear_cpus(&node_affinity.real_cpu_mask, curr_cpu, nr_cpu_ids - curr_cpu);
+}
+
+int node_affinity_init(void)
+{
+ int node;
+ struct pci_dev *dev = NULL;
+ const struct pci_device_id *ids = hfi1_pci_tbl;
+
+ cpumask_clear(&node_affinity.proc.used);
+ cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
+
+ node_affinity.proc.gen = 0;
+ node_affinity.num_core_siblings =
+ cpumask_weight(topology_sibling_cpumask(
+ cpumask_first(&node_affinity.proc.mask)
+ ));
+ node_affinity.num_possible_nodes = num_possible_nodes();
+ node_affinity.num_online_nodes = num_online_nodes();
+ node_affinity.num_online_cpus = num_online_cpus();
+
+ /*
+ * The real cpu mask is part of the affinity struct but it has to be
+ * initialized early. It is needed to calculate the number of user
+ * contexts in set_up_context_variables().
+ */
+ init_real_cpu_mask();
+
+ hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
+ sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
+ if (!hfi1_per_node_cntr)
+ return -ENOMEM;
+
+ while (ids->vendor) {
+ dev = NULL;
+ while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
+ node = pcibus_to_node(dev->bus);
+ if (node < 0)
+ goto out;
+
+ hfi1_per_node_cntr[node]++;
+ }
+ ids++;
+ }
+
+ return 0;
+
+out:
+ /*
+ * Invalid PCI NUMA node information found, note it, and populate
+ * our database 1:1.
+ */
+ pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
+ pr_err("HFI: System BIOS may need to be upgraded\n");
+ for (node = 0; node < node_affinity.num_possible_nodes; node++)
+ hfi1_per_node_cntr[node] = 1;
+
+ pci_dev_put(dev);
+
+ return 0;
+}
+
+static void node_affinity_destroy(struct hfi1_affinity_node *entry)
+{
+ free_percpu(entry->comp_vect_affinity);
+ kfree(entry);
+}
+
+void node_affinity_destroy_all(void)
+{
+ struct list_head *pos, *q;
+ struct hfi1_affinity_node *entry;
+
+ mutex_lock(&node_affinity.lock);
+ list_for_each_safe(pos, q, &node_affinity.list) {
+ entry = list_entry(pos, struct hfi1_affinity_node,
+ list);
+ list_del(pos);
+ node_affinity_destroy(entry);
+ }
+ mutex_unlock(&node_affinity.lock);
+ kfree(hfi1_per_node_cntr);
+}
+
+static struct hfi1_affinity_node *node_affinity_allocate(int node)
+{
+ struct hfi1_affinity_node *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+ entry->node = node;
+ entry->comp_vect_affinity = alloc_percpu(u16);
+ INIT_LIST_HEAD(&entry->list);
+
+ return entry;
+}
+
+/*
+ * It appends an entry to the list.
+ * It *must* be called with node_affinity.lock held.
+ */
+static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
+{
+ list_add_tail(&entry->list, &node_affinity.list);
+}
+
+/* It must be called with node_affinity.lock held */
+static struct hfi1_affinity_node *node_affinity_lookup(int node)
+{
+ struct hfi1_affinity_node *entry;
+
+ list_for_each_entry(entry, &node_affinity.list, list) {
+ if (entry->node == node)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
+ u16 __percpu *comp_vect_affinity)
+{
+ int curr_cpu;
+ u16 cntr;
+ u16 prev_cntr;
+ int ret_cpu;
+
+ if (!possible_cpumask) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ if (!comp_vect_affinity) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ ret_cpu = cpumask_first(possible_cpumask);
+ if (ret_cpu >= nr_cpu_ids) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu);
+ for_each_cpu(curr_cpu, possible_cpumask) {
+ cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
+
+ if (cntr < prev_cntr) {
+ ret_cpu = curr_cpu;
+ prev_cntr = cntr;
+ }
+ }
+
+ *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1;
+
+fail:
+ return ret_cpu;
+}
+
+static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
+ u16 __percpu *comp_vect_affinity)
+{
+ int curr_cpu;
+ int max_cpu;
+ u16 cntr;
+ u16 prev_cntr;
+
+ if (!possible_cpumask)
+ return -EINVAL;
+
+ if (!comp_vect_affinity)
+ return -EINVAL;
+
+ max_cpu = cpumask_first(possible_cpumask);
+ if (max_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu);
+ for_each_cpu(curr_cpu, possible_cpumask) {
+ cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
+
+ if (cntr > prev_cntr) {
+ max_cpu = curr_cpu;
+ prev_cntr = cntr;
+ }
+ }
+
+ *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1;
+
+ return max_cpu;
+}
+
+/*
+ * Non-interrupt CPUs are used first, then interrupt CPUs.
+ * Two already allocated cpu masks must be passed.
+ */
+static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry,
+ cpumask_var_t non_intr_cpus,
+ cpumask_var_t available_cpus)
+ __must_hold(&node_affinity.lock)
+{
+ int cpu;
+ struct cpu_mask_set *set = dd->comp_vect;
+
+ lockdep_assert_held(&node_affinity.lock);
+ if (!non_intr_cpus) {
+ cpu = -1;
+ goto fail;
+ }
+
+ if (!available_cpus) {
+ cpu = -1;
+ goto fail;
+ }
+
+ /* Available CPUs for pinning completion vectors */
+ _cpu_mask_set_gen_inc(set);
+ cpumask_andnot(available_cpus, &set->mask, &set->used);
+
+ /* Available CPUs without SDMA engine interrupts */
+ cpumask_andnot(non_intr_cpus, available_cpus,
+ &entry->def_intr.used);
+
+ /* If there are non-interrupt CPUs available, use them first */
+ cpu = cpumask_first(non_intr_cpus);
+
+ /* Otherwise, use interrupt CPUs */
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(available_cpus);
+
+ if (cpu >= nr_cpu_ids) { /* empty */
+ cpu = -1;
+ goto fail;
+ }
+ cpumask_set_cpu(cpu, &set->used);
+
+fail:
+ return cpu;
+}
+
+static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
+{
+ struct cpu_mask_set *set = dd->comp_vect;
+
+ if (cpu < 0)
+ return;
+
+ cpu_mask_set_put(set, cpu);
+}
+
+/* _dev_comp_vect_mappings_destroy() is reentrant */
+static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
+{
+ int i, cpu;
+
+ if (!dd->comp_vect_mappings)
+ return;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = dd->comp_vect_mappings[i];
+ _dev_comp_vect_cpu_put(dd, cpu);
+ dd->comp_vect_mappings[i] = -1;
+ hfi1_cdbg(AFFINITY,
+ "[%s] Release CPU %d from completion vector %d",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
+ }
+
+ kfree(dd->comp_vect_mappings);
+ dd->comp_vect_mappings = NULL;
+}
+
+/*
+ * This function creates the table for looking up CPUs for completion vectors.
+ * num_comp_vectors needs to have been initilized before calling this function.
+ */
+static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry)
+ __must_hold(&node_affinity.lock)
+{
+ int i, cpu, ret;
+ cpumask_var_t non_intr_cpus;
+ cpumask_var_t available_cpus;
+
+ lockdep_assert_held(&node_affinity.lock);
+
+ if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) {
+ free_cpumask_var(non_intr_cpus);
+ return -ENOMEM;
+ }
+
+ dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
+ sizeof(*dd->comp_vect_mappings),
+ GFP_KERNEL);
+ if (!dd->comp_vect_mappings) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++)
+ dd->comp_vect_mappings[i] = -1;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
+ available_cpus);
+ if (cpu < 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dd->comp_vect_mappings[i] = cpu;
+ hfi1_cdbg(AFFINITY,
+ "[%s] Completion Vector %d -> CPU %d",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
+ }
+
+ free_cpumask_var(available_cpus);
+ free_cpumask_var(non_intr_cpus);
+ return 0;
+
+fail:
+ free_cpumask_var(available_cpus);
+ free_cpumask_var(non_intr_cpus);
+ _dev_comp_vect_mappings_destroy(dd);
+
+ return ret;
+}
+
+int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
+{
+ int ret;
+ struct hfi1_affinity_node *entry;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ if (!entry) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ ret = _dev_comp_vect_mappings_create(dd, entry);
+unlock:
+ mutex_unlock(&node_affinity.lock);
+
+ return ret;
+}
+
+void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
+{
+ _dev_comp_vect_mappings_destroy(dd);
+}
+
+int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
+{
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+
+ if (!dd->comp_vect_mappings)
+ return -EINVAL;
+ if (comp_vect >= dd->comp_vect_possible_cpus)
+ return -EINVAL;
+
+ return dd->comp_vect_mappings[comp_vect];
+}
+
+/*
+ * It assumes dd->comp_vect_possible_cpus is available.
+ */
+static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry,
+ bool first_dev_init)
+ __must_hold(&node_affinity.lock)
+{
+ int i, j, curr_cpu;
+ int possible_cpus_comp_vect = 0;
+ struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
+
+ lockdep_assert_held(&node_affinity.lock);
+ /*
+ * If there's only one CPU available for completion vectors, then
+ * there will only be one completion vector available. Othewise,
+ * the number of completion vector available will be the number of
+ * available CPUs divide it by the number of devices in the
+ * local NUMA node.
+ */
+ if (cpumask_weight(&entry->comp_vect_mask) == 1) {
+ possible_cpus_comp_vect = 1;
+ dd_dev_warn(dd,
+ "Number of kernel receive queues is too large for completion vector affinity to be effective\n");
+ } else {
+ possible_cpus_comp_vect +=
+ cpumask_weight(&entry->comp_vect_mask) /
+ hfi1_per_node_cntr[dd->node];
+
+ /*
+ * If the completion vector CPUs available doesn't divide
+ * evenly among devices, then the first device device to be
+ * initialized gets an extra CPU.
+ */
+ if (first_dev_init &&
+ cpumask_weight(&entry->comp_vect_mask) %
+ hfi1_per_node_cntr[dd->node] != 0)
+ possible_cpus_comp_vect++;
+ }
+
+ dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
+
+ /* Reserving CPUs for device completion vector */
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask,
+ entry->comp_vect_affinity);
+ if (curr_cpu < 0)
+ goto fail;
+
+ cpumask_set_cpu(curr_cpu, dev_comp_vect_mask);
+ }
+
+ hfi1_cdbg(AFFINITY,
+ "[%s] Completion vector affinity CPU set(s) %*pbl",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
+ cpumask_pr_args(dev_comp_vect_mask));
+
+ return 0;
+
+fail:
+ for (j = 0; j < i; j++)
+ per_cpu_affinity_put_max(&entry->comp_vect_mask,
+ entry->comp_vect_affinity);
+
+ return curr_cpu;
+}
+
+/*
+ * It assumes dd->comp_vect_possible_cpus is available.
+ */
+static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry)
+ __must_hold(&node_affinity.lock)
+{
+ int i, cpu;
+
+ lockdep_assert_held(&node_affinity.lock);
+ if (!dd->comp_vect_possible_cpus)
+ return;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
+ entry->comp_vect_affinity);
+ /* Clearing CPU in device completion vector cpu mask */
+ if (cpu >= 0)
+ cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
+ }
+
+ dd->comp_vect_possible_cpus = 0;
+}
+
+/*
+ * Interrupt affinity.
+ *
+ * non-rcv avail gets a default mask that
+ * starts as possible cpus with threads reset
+ * and each rcv avail reset.
+ *
+ * rcv avail gets node relative 1 wrapping back
+ * to the node relative 1 as necessary.
+ *
+ */
+int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+{
+ struct hfi1_affinity_node *entry;
+ const struct cpumask *local_mask;
+ int curr_cpu, possible, i, ret;
+ bool new_entry = false;
+
+ local_mask = cpumask_of_node(dd->node);
+ if (cpumask_first(local_mask) >= nr_cpu_ids)
+ local_mask = topology_core_cpumask(0);
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+
+ /*
+ * If this is the first time this NUMA node's affinity is used,
+ * create an entry in the global affinity structure and initialize it.
+ */
+ if (!entry) {
+ entry = node_affinity_allocate(dd->node);
+ if (!entry) {
+ dd_dev_err(dd,
+ "Unable to allocate global affinity node\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ new_entry = true;
+
+ init_cpu_mask_set(&entry->def_intr);
+ init_cpu_mask_set(&entry->rcv_intr);
+ cpumask_clear(&entry->comp_vect_mask);
+ cpumask_clear(&entry->general_intr_mask);
+ /* Use the "real" cpu mask of this node as the default */
+ cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
+ local_mask);
+
+ /* fill in the receive list */
+ possible = cpumask_weight(&entry->def_intr.mask);
+ curr_cpu = cpumask_first(&entry->def_intr.mask);
+
+ if (possible == 1) {
+ /* only one CPU, everyone will use it */
+ cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
+ cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
+ } else {
+ /*
+ * The general/control context will be the first CPU in
+ * the default list, so it is removed from the default
+ * list and added to the general interrupt list.
+ */
+ cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
+ cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
+ curr_cpu = cpumask_next(curr_cpu,
+ &entry->def_intr.mask);
+
+ /*
+ * Remove the remaining kernel receive queues from
+ * the default list and add them to the receive list.
+ */
+ for (i = 0;
+ i < (dd->n_krcv_queues - 1) *
+ hfi1_per_node_cntr[dd->node];
+ i++) {
+ cpumask_clear_cpu(curr_cpu,
+ &entry->def_intr.mask);
+ cpumask_set_cpu(curr_cpu,
+ &entry->rcv_intr.mask);
+ curr_cpu = cpumask_next(curr_cpu,
+ &entry->def_intr.mask);
+ if (curr_cpu >= nr_cpu_ids)
+ break;
+ }
+
+ /*
+ * If there ends up being 0 CPU cores leftover for SDMA
+ * engines, use the same CPU cores as general/control
+ * context.
+ */
+ if (cpumask_empty(&entry->def_intr.mask))
+ cpumask_copy(&entry->def_intr.mask,
+ &entry->general_intr_mask);
+ }
+
+ /* Determine completion vector CPUs for the entire node */
+ cpumask_and(&entry->comp_vect_mask,
+ &node_affinity.real_cpu_mask, local_mask);
+ cpumask_andnot(&entry->comp_vect_mask,
+ &entry->comp_vect_mask,
+ &entry->rcv_intr.mask);
+ cpumask_andnot(&entry->comp_vect_mask,
+ &entry->comp_vect_mask,
+ &entry->general_intr_mask);
+
+ /*
+ * If there ends up being 0 CPU cores leftover for completion
+ * vectors, use the same CPU core as the general/control
+ * context.
+ */
+ if (cpumask_empty(&entry->comp_vect_mask))
+ cpumask_copy(&entry->comp_vect_mask,
+ &entry->general_intr_mask);
+ }
+
+ ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
+ if (ret < 0)
+ goto fail;
+
+ if (new_entry)
+ node_affinity_add_tail(entry);
+
+ dd->affinity_entry = entry;
+ mutex_unlock(&node_affinity.lock);
+
+ return 0;
+
+fail:
+ if (new_entry)
+ node_affinity_destroy(entry);
+ mutex_unlock(&node_affinity.lock);
+ return ret;
+}
+
+void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
+{
+ struct hfi1_affinity_node *entry;
+
+ mutex_lock(&node_affinity.lock);
+ if (!dd->affinity_entry)
+ goto unlock;
+ entry = node_affinity_lookup(dd->node);
+ if (!entry)
+ goto unlock;
+
+ /*
+ * Free device completion vector CPUs to be used by future
+ * completion vectors
+ */
+ _dev_comp_vect_cpu_mask_clean_up(dd, entry);
+unlock:
+ dd->affinity_entry = NULL;
+ mutex_unlock(&node_affinity.lock);
+}
+
+/*
+ * Function updates the irq affinity hint for msix after it has been changed
+ * by the user using the /proc/irq interface. This function only accepts
+ * one cpu in the mask.
+ */
+static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
+{
+ struct sdma_engine *sde = msix->arg;
+ struct hfi1_devdata *dd = sde->dd;
+ struct hfi1_affinity_node *entry;
+ struct cpu_mask_set *set;
+ int i, old_cpu;
+
+ if (cpu > num_online_cpus() || cpu == sde->cpu)
+ return;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ if (!entry)
+ goto unlock;
+
+ old_cpu = sde->cpu;
+ sde->cpu = cpu;
+ cpumask_clear(&msix->mask);
+ cpumask_set_cpu(cpu, &msix->mask);
+ dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
+ msix->irq, irq_type_names[msix->type],
+ sde->this_idx, cpu);
+ irq_set_affinity_hint(msix->irq, &msix->mask);
+
+ /*
+ * Set the new cpu in the hfi1_affinity_node and clean
+ * the old cpu if it is not used by any other IRQ
+ */
+ set = &entry->def_intr;
+ cpumask_set_cpu(cpu, &set->mask);
+ cpumask_set_cpu(cpu, &set->used);
+ for (i = 0; i < dd->msix_info.max_requested; i++) {
+ struct hfi1_msix_entry *other_msix;
+
+ other_msix = &dd->msix_info.msix_entries[i];
+ if (other_msix->type != IRQ_SDMA || other_msix == msix)
+ continue;
+
+ if (cpumask_test_cpu(old_cpu, &other_msix->mask))
+ goto unlock;
+ }
+ cpumask_clear_cpu(old_cpu, &set->mask);
+ cpumask_clear_cpu(old_cpu, &set->used);
+unlock:
+ mutex_unlock(&node_affinity.lock);
+}
+
+static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ int cpu = cpumask_first(mask);
+ struct hfi1_msix_entry *msix = container_of(notify,
+ struct hfi1_msix_entry,
+ notify);
+
+ /* Only one CPU configuration supported currently */
+ hfi1_update_sdma_affinity(msix, cpu);
+}
+
+static void hfi1_irq_notifier_release(struct kref *ref)
+{
+ /*
+ * This is required by affinity notifier. We don't have anything to
+ * free here.
+ */
+}
+
+static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
+{
+ struct irq_affinity_notify *notify = &msix->notify;
+
+ notify->irq = msix->irq;
+ notify->notify = hfi1_irq_notifier_notify;
+ notify->release = hfi1_irq_notifier_release;
+
+ if (irq_set_affinity_notifier(notify->irq, notify))
+ pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
+ notify->irq);
+}
+
+static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
+{
+ struct irq_affinity_notify *notify = &msix->notify;
+
+ if (irq_set_affinity_notifier(notify->irq, NULL))
+ pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
+ notify->irq);
+}
+
+/*
+ * Function sets the irq affinity for msix.
+ * It *must* be called with node_affinity.lock held.
+ */
+static int get_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix)
+{
+ cpumask_var_t diff;
+ struct hfi1_affinity_node *entry;
+ struct cpu_mask_set *set = NULL;
+ struct sdma_engine *sde = NULL;
+ struct hfi1_ctxtdata *rcd = NULL;
+ char extra[64];
+ int cpu = -1;
+
+ extra[0] = '\0';
+ cpumask_clear(&msix->mask);
+
+ entry = node_affinity_lookup(dd->node);
+
+ switch (msix->type) {
+ case IRQ_SDMA:
+ sde = (struct sdma_engine *)msix->arg;
+ scnprintf(extra, 64, "engine %u", sde->this_idx);
+ set = &entry->def_intr;
+ break;
+ case IRQ_GENERAL:
+ cpu = cpumask_first(&entry->general_intr_mask);
+ break;
+ case IRQ_RCVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ cpu = cpumask_first(&entry->general_intr_mask);
+ else
+ set = &entry->rcv_intr;
+ scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
+ break;
+ case IRQ_NETDEVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ set = &entry->def_intr;
+ scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
+ break;
+ default:
+ dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
+ return -EINVAL;
+ }
+
+ /*
+ * The general and control contexts are placed on a particular
+ * CPU, which is set above. Skip accounting for it. Everything else
+ * finds its CPU here.
+ */
+ if (cpu == -1 && set) {
+ if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpu = cpu_mask_set_get_first(set, diff);
+ if (cpu < 0) {
+ free_cpumask_var(diff);
+ dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
+ return cpu;
+ }
+
+ free_cpumask_var(diff);
+ }
+
+ cpumask_set_cpu(cpu, &msix->mask);
+ dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
+ msix->irq, irq_type_names[msix->type],
+ extra, cpu);
+ irq_set_affinity_hint(msix->irq, &msix->mask);
+
+ if (msix->type == IRQ_SDMA) {
+ sde->cpu = cpu;
+ hfi1_setup_sdma_notifier(msix);
+ }
+
+ return 0;
+}
+
+int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
+{
+ int ret;
+
+ mutex_lock(&node_affinity.lock);
+ ret = get_irq_affinity(dd, msix);
+ mutex_unlock(&node_affinity.lock);
+ return ret;
+}
+
+void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix)
+{
+ struct cpu_mask_set *set = NULL;
+ struct hfi1_affinity_node *entry;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+
+ switch (msix->type) {
+ case IRQ_SDMA:
+ set = &entry->def_intr;
+ hfi1_cleanup_sdma_notifier(msix);
+ break;
+ case IRQ_GENERAL:
+ /* Don't do accounting for general contexts */
+ break;
+ case IRQ_RCVCTXT: {
+ struct hfi1_ctxtdata *rcd = msix->arg;
+
+ /* Don't do accounting for control contexts */
+ if (rcd->ctxt != HFI1_CTRL_CTXT)
+ set = &entry->rcv_intr;
+ break;
+ }
+ case IRQ_NETDEVCTXT:
+ set = &entry->def_intr;
+ break;
+ default:
+ mutex_unlock(&node_affinity.lock);
+ return;
+ }
+
+ if (set) {
+ cpumask_andnot(&set->used, &set->used, &msix->mask);
+ _cpu_mask_set_gen_dec(set);
+ }
+
+ irq_set_affinity_hint(msix->irq, NULL);
+ cpumask_clear(&msix->mask);
+ mutex_unlock(&node_affinity.lock);
+}
+
+/* This should be called with node_affinity.lock held */
+static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
+ struct hfi1_affinity_node_list *affinity)
+{
+ int curr_cpu;
+ uint num_cores;
+
+ cpumask_copy(hw_thread_mask, &affinity->proc.mask);
+
+ if (affinity->num_core_siblings == 0)
+ return;
+
+ num_cores = rounddown(node_affinity.num_online_cpus / affinity->num_core_siblings,
+ node_affinity.num_online_nodes);
+
+ /* Removing other siblings not needed for now */
+ curr_cpu = cpumask_nth(num_cores * node_affinity.num_online_nodes, hw_thread_mask) + 1;
+ cpumask_clear_cpus(hw_thread_mask, curr_cpu, nr_cpu_ids - curr_cpu);
+
+ /* Identifying correct HW threads within physical cores */
+ cpumask_shift_left(hw_thread_mask, hw_thread_mask, num_cores * hw_thread_no);
+}
+
+int hfi1_get_proc_affinity(int node)
+{
+ int cpu = -1, ret, i;
+ struct hfi1_affinity_node *entry;
+ cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
+ const struct cpumask *node_mask,
+ *proc_mask = current->cpus_ptr;
+ struct hfi1_affinity_node_list *affinity = &node_affinity;
+ struct cpu_mask_set *set = &affinity->proc;
+
+ /*
+ * check whether process/context affinity has already
+ * been set
+ */
+ if (current->nr_cpus_allowed == 1) {
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
+ /*
+ * Mark the pre-set CPU as used. This is atomic so we don't
+ * need the lock
+ */
+ cpu = cpumask_first(proc_mask);
+ cpumask_set_cpu(cpu, &set->used);
+ goto done;
+ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
+ goto done;
+ }
+
+ /*
+ * The process does not have a preset CPU affinity so find one to
+ * recommend using the following algorithm:
+ *
+ * For each user process that is opening a context on HFI Y:
+ * a) If all cores are filled, reinitialize the bitmask
+ * b) Fill real cores first, then HT cores (First set of HT
+ * cores on all physical cores, then second set of HT core,
+ * and, so on) in the following order:
+ *
+ * 1. Same NUMA node as HFI Y and not running an IRQ
+ * handler
+ * 2. Same NUMA node as HFI Y and running an IRQ handler
+ * 3. Different NUMA node to HFI Y and not running an IRQ
+ * handler
+ * 4. Different NUMA node to HFI Y and running an IRQ
+ * handler
+ * c) Mark core as filled in the bitmask. As user processes are
+ * done, clear cores from the bitmask.
+ */
+
+ ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
+ if (!ret)
+ goto done;
+ ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
+ if (!ret)
+ goto free_diff;
+ ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
+ if (!ret)
+ goto free_hw_thread_mask;
+ ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
+ if (!ret)
+ goto free_available_mask;
+
+ mutex_lock(&affinity->lock);
+ /*
+ * If we've used all available HW threads, clear the mask and start
+ * overloading.
+ */
+ _cpu_mask_set_gen_inc(set);
+
+ /*
+ * If NUMA node has CPUs used by interrupt handlers, include them in the
+ * interrupt handler mask.
+ */
+ entry = node_affinity_lookup(node);
+ if (entry) {
+ cpumask_copy(intrs_mask, (entry->def_intr.gen ?
+ &entry->def_intr.mask :
+ &entry->def_intr.used));
+ cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
+ &entry->rcv_intr.mask :
+ &entry->rcv_intr.used));
+ cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
+ }
+ hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+ cpumask_pr_args(intrs_mask));
+
+ cpumask_copy(hw_thread_mask, &set->mask);
+
+ /*
+ * If HT cores are enabled, identify which HW threads within the
+ * physical cores should be used.
+ */
+ for (i = 0; i < affinity->num_core_siblings; i++) {
+ find_hw_thread_mask(i, hw_thread_mask, affinity);
+
+ /*
+ * If there's at least one available core for this HW
+ * thread number, stop looking for a core.
+ *
+ * diff will always be not empty at least once in this
+ * loop as the used mask gets reset when
+ * (set->mask == set->used) before this loop.
+ */
+ if (cpumask_andnot(diff, hw_thread_mask, &set->used))
+ break;
+ }
+ hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
+ cpumask_pr_args(hw_thread_mask));
+
+ node_mask = cpumask_of_node(node);
+ hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
+ cpumask_pr_args(node_mask));
+
+ /* Get cpumask of available CPUs on preferred NUMA */
+ cpumask_and(available_mask, hw_thread_mask, node_mask);
+ cpumask_andnot(available_mask, available_mask, &set->used);
+ hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
+ cpumask_pr_args(available_mask));
+
+ /*
+ * At first, we don't want to place processes on the same
+ * CPUs as interrupt handlers. Then, CPUs running interrupt
+ * handlers are used.
+ *
+ * 1) If diff is not empty, then there are CPUs not running
+ * non-interrupt handlers available, so diff gets copied
+ * over to available_mask.
+ * 2) If diff is empty, then all CPUs not running interrupt
+ * handlers are taken, so available_mask contains all
+ * available CPUs running interrupt handlers.
+ * 3) If available_mask is empty, then all CPUs on the
+ * preferred NUMA node are taken, so other NUMA nodes are
+ * used for process assignments using the same method as
+ * the preferred NUMA node.
+ */
+ if (cpumask_andnot(diff, available_mask, intrs_mask))
+ cpumask_copy(available_mask, diff);
+
+ /* If we don't have CPUs on the preferred node, use other NUMA nodes */
+ if (cpumask_empty(available_mask)) {
+ cpumask_andnot(available_mask, hw_thread_mask, &set->used);
+ /* Excluding preferred NUMA cores */
+ cpumask_andnot(available_mask, available_mask, node_mask);
+ hfi1_cdbg(PROC,
+ "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
+ cpumask_pr_args(available_mask));
+
+ /*
+ * At first, we don't want to place processes on the same
+ * CPUs as interrupt handlers.
+ */
+ if (cpumask_andnot(diff, available_mask, intrs_mask))
+ cpumask_copy(available_mask, diff);
+ }
+ hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
+ cpumask_pr_args(available_mask));
+
+ cpu = cpumask_first(available_mask);
+ if (cpu >= nr_cpu_ids) /* empty */
+ cpu = -1;
+ else
+ cpumask_set_cpu(cpu, &set->used);
+
+ mutex_unlock(&affinity->lock);
+ hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
+
+ free_cpumask_var(intrs_mask);
+free_available_mask:
+ free_cpumask_var(available_mask);
+free_hw_thread_mask:
+ free_cpumask_var(hw_thread_mask);
+free_diff:
+ free_cpumask_var(diff);
+done:
+ return cpu;
+}
+
+void hfi1_put_proc_affinity(int cpu)
+{
+ struct hfi1_affinity_node_list *affinity = &node_affinity;
+ struct cpu_mask_set *set = &affinity->proc;
+
+ if (cpu < 0)
+ return;
+
+ mutex_lock(&affinity->lock);
+ cpu_mask_set_put(set, cpu);
+ hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
+ mutex_unlock(&affinity->lock);
+}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
new file mode 100644
index 000000000000..ffdd0d571c7a
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ */
+
+#ifndef _HFI1_AFFINITY_H
+#define _HFI1_AFFINITY_H
+
+#include "hfi.h"
+
+enum irq_type {
+ IRQ_SDMA,
+ IRQ_RCVCTXT,
+ IRQ_NETDEVCTXT,
+ IRQ_GENERAL,
+ IRQ_OTHER
+};
+
+/* Can be used for both memory and cpu */
+enum affinity_flags {
+ AFF_AUTO,
+ AFF_NUMA_LOCAL,
+ AFF_DEV_LOCAL,
+ AFF_IRQ_LOCAL
+};
+
+struct cpu_mask_set {
+ struct cpumask mask;
+ struct cpumask used;
+ uint gen;
+};
+
+struct hfi1_msix_entry;
+
+/* Initialize non-HT cpu cores mask */
+void init_real_cpu_mask(void);
+/* Initialize driver affinity data */
+int hfi1_dev_affinity_init(struct hfi1_devdata *dd);
+/*
+ * Set IRQ affinity to a CPU. The function will determine the
+ * CPU and set the affinity to it.
+ */
+int hfi1_get_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix);
+/*
+ * Remove the IRQ's CPU affinity. This function also updates
+ * any internal CPU tracking data
+ */
+void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix);
+/*
+ * Determine a CPU affinity for a user process, if the process does not
+ * have an affinity set yet.
+ */
+int hfi1_get_proc_affinity(int node);
+/* Release a CPU used by a user process. */
+void hfi1_put_proc_affinity(int cpu);
+
+struct hfi1_affinity_node {
+ int node;
+ u16 __percpu *comp_vect_affinity;
+ struct cpu_mask_set def_intr;
+ struct cpu_mask_set rcv_intr;
+ struct cpumask general_intr_mask;
+ struct cpumask comp_vect_mask;
+ struct list_head list;
+};
+
+struct hfi1_affinity_node_list {
+ struct list_head list;
+ struct cpumask real_cpu_mask;
+ struct cpu_mask_set proc;
+ int num_core_siblings;
+ int num_possible_nodes;
+ int num_online_nodes;
+ int num_online_cpus;
+ struct mutex lock; /* protects affinity nodes */
+};
+
+int node_affinity_init(void);
+void node_affinity_destroy_all(void);
+extern struct hfi1_affinity_node_list node_affinity;
+void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd);
+int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect);
+int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd);
+void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd);
+
+#endif /* _HFI1_AFFINITY_H */
diff --git a/drivers/infiniband/hw/hfi1/aspm.c b/drivers/infiniband/hw/hfi1/aspm.c
new file mode 100644
index 000000000000..79990d09522b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/aspm.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2019 Intel Corporation.
+ *
+ */
+
+#include "aspm.h"
+
+/* Time after which the timer interrupt will re-enable ASPM */
+#define ASPM_TIMER_MS 1000
+/* Time for which interrupts are ignored after a timer has been scheduled */
+#define ASPM_RESCHED_TIMER_MS (ASPM_TIMER_MS / 2)
+/* Two interrupts within this time trigger ASPM disable */
+#define ASPM_TRIGGER_MS 1
+#define ASPM_TRIGGER_NS (ASPM_TRIGGER_MS * 1000 * 1000ull)
+#define ASPM_L1_SUPPORTED(reg) \
+ ((((reg) & PCI_EXP_LNKCAP_ASPMS) >> 10) & 0x2)
+
+uint aspm_mode = ASPM_MODE_DISABLED;
+module_param_named(aspm, aspm_mode, uint, 0444);
+MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
+
+static bool aspm_hw_l1_supported(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent = dd->pcidev->bus->self;
+ u32 up, dn;
+
+ /*
+ * If the driver does not have access to the upstream component,
+ * it cannot support ASPM L1 at all.
+ */
+ if (!parent)
+ return false;
+
+ pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &dn);
+ dn = ASPM_L1_SUPPORTED(dn);
+
+ pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &up);
+ up = ASPM_L1_SUPPORTED(up);
+
+ /* ASPM works on A-step but is reported as not supported */
+ return (!!dn || is_ax(dd)) && !!up;
+}
+
+/* Set L1 entrance latency for slower entry to L1 */
+static void aspm_hw_set_l1_ent_latency(struct hfi1_devdata *dd)
+{
+ u32 l1_ent_lat = 0x4u;
+ u32 reg32;
+
+ pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, &reg32);
+ reg32 &= ~PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK;
+ reg32 |= l1_ent_lat << PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT;
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, reg32);
+}
+
+static void aspm_hw_enable_l1(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent = dd->pcidev->bus->self;
+
+ /*
+ * If the driver does not have access to the upstream component,
+ * it cannot support ASPM L1 at all.
+ */
+ if (!parent)
+ return;
+
+ /* Enable ASPM L1 first in upstream component and then downstream */
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ PCI_EXP_LNKCTL_ASPM_L1);
+ pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ PCI_EXP_LNKCTL_ASPM_L1);
+}
+
+void aspm_hw_disable_l1(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent = dd->pcidev->bus->self;
+
+ /* Disable ASPM L1 first in downstream component and then upstream */
+ pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0x0);
+ if (parent)
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0x0);
+}
+
+static void aspm_enable(struct hfi1_devdata *dd)
+{
+ if (dd->aspm_enabled || aspm_mode == ASPM_MODE_DISABLED ||
+ !dd->aspm_supported)
+ return;
+
+ aspm_hw_enable_l1(dd);
+ dd->aspm_enabled = true;
+}
+
+static void aspm_disable(struct hfi1_devdata *dd)
+{
+ if (!dd->aspm_enabled || aspm_mode == ASPM_MODE_ENABLED)
+ return;
+
+ aspm_hw_disable_l1(dd);
+ dd->aspm_enabled = false;
+}
+
+static void aspm_disable_inc(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->aspm_lock, flags);
+ aspm_disable(dd);
+ atomic_inc(&dd->aspm_disabled_cnt);
+ spin_unlock_irqrestore(&dd->aspm_lock, flags);
+}
+
+static void aspm_enable_dec(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->aspm_lock, flags);
+ if (atomic_dec_and_test(&dd->aspm_disabled_cnt))
+ aspm_enable(dd);
+ spin_unlock_irqrestore(&dd->aspm_lock, flags);
+}
+
+/* ASPM processing for each receive context interrupt */
+void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd)
+{
+ bool restart_timer;
+ bool close_interrupts;
+ unsigned long flags;
+ ktime_t now, prev;
+
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ /* PSM contexts are open */
+ if (!rcd->aspm_intr_enable)
+ goto unlock;
+
+ prev = rcd->aspm_ts_last_intr;
+ now = ktime_get();
+ rcd->aspm_ts_last_intr = now;
+
+ /* An interrupt pair close together in time */
+ close_interrupts = ktime_to_ns(ktime_sub(now, prev)) < ASPM_TRIGGER_NS;
+
+ /* Don't push out our timer till this much time has elapsed */
+ restart_timer = ktime_to_ns(ktime_sub(now, rcd->aspm_ts_timer_sched)) >
+ ASPM_RESCHED_TIMER_MS * NSEC_PER_MSEC;
+ restart_timer = restart_timer && close_interrupts;
+
+ /* Disable ASPM and schedule timer */
+ if (rcd->aspm_enabled && close_interrupts) {
+ aspm_disable_inc(rcd->dd);
+ rcd->aspm_enabled = false;
+ restart_timer = true;
+ }
+
+ if (restart_timer) {
+ mod_timer(&rcd->aspm_timer,
+ jiffies + msecs_to_jiffies(ASPM_TIMER_MS));
+ rcd->aspm_ts_timer_sched = now;
+ }
+unlock:
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+}
+
+/* Timer function for re-enabling ASPM in the absence of interrupt activity */
+static void aspm_ctx_timer_function(struct timer_list *t)
+{
+ struct hfi1_ctxtdata *rcd = timer_container_of(rcd, t, aspm_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ aspm_enable_dec(rcd->dd);
+ rcd->aspm_enabled = true;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+}
+
+/*
+ * Disable interrupt processing for verbs contexts when PSM or VNIC contexts
+ * are open.
+ */
+void aspm_disable_all(struct hfi1_devdata *dd)
+{
+ struct hfi1_ctxtdata *rcd;
+ unsigned long flags;
+ u16 i;
+
+ for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd) {
+ timer_delete_sync(&rcd->aspm_timer);
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ rcd->aspm_intr_enable = false;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ hfi1_rcd_put(rcd);
+ }
+ }
+
+ aspm_disable(dd);
+ atomic_set(&dd->aspm_disabled_cnt, 0);
+}
+
+/* Re-enable interrupt processing for verbs contexts */
+void aspm_enable_all(struct hfi1_devdata *dd)
+{
+ struct hfi1_ctxtdata *rcd;
+ unsigned long flags;
+ u16 i;
+
+ aspm_enable(dd);
+
+ if (aspm_mode != ASPM_MODE_DYNAMIC)
+ return;
+
+ for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd) {
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ rcd->aspm_intr_enable = true;
+ rcd->aspm_enabled = true;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ hfi1_rcd_put(rcd);
+ }
+ }
+}
+
+static void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
+{
+ spin_lock_init(&rcd->aspm_lock);
+ timer_setup(&rcd->aspm_timer, aspm_ctx_timer_function, 0);
+ rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
+ aspm_mode == ASPM_MODE_DYNAMIC &&
+ rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt;
+}
+
+void aspm_init(struct hfi1_devdata *dd)
+{
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
+
+ spin_lock_init(&dd->aspm_lock);
+ dd->aspm_supported = aspm_hw_l1_supported(dd);
+
+ for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd)
+ aspm_ctx_init(rcd);
+ hfi1_rcd_put(rcd);
+ }
+
+ /* Start with ASPM disabled */
+ aspm_hw_set_l1_ent_latency(dd);
+ dd->aspm_enabled = false;
+ aspm_hw_disable_l1(dd);
+
+ /* Now turn on ASPM if configured */
+ aspm_enable_all(dd);
+}
+
+void aspm_exit(struct hfi1_devdata *dd)
+{
+ aspm_disable_all(dd);
+
+ /* Turn on ASPM on exit to conserve power */
+ aspm_enable(dd);
+}
+
diff --git a/drivers/infiniband/hw/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
new file mode 100644
index 000000000000..c8d92dc13daa
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/aspm.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015-2017 Intel Corporation.
+ */
+
+#ifndef _ASPM_H
+#define _ASPM_H
+
+#include "hfi.h"
+
+extern uint aspm_mode;
+
+enum aspm_mode {
+ ASPM_MODE_DISABLED = 0, /* ASPM always disabled, performance mode */
+ ASPM_MODE_ENABLED = 1, /* ASPM always enabled, power saving mode */
+ ASPM_MODE_DYNAMIC = 2, /* ASPM enabled/disabled dynamically */
+};
+
+void aspm_init(struct hfi1_devdata *dd);
+void aspm_exit(struct hfi1_devdata *dd);
+void aspm_hw_disable_l1(struct hfi1_devdata *dd);
+void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd);
+void aspm_disable_all(struct hfi1_devdata *dd);
+void aspm_enable_all(struct hfi1_devdata *dd);
+
+static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd)
+{
+ /* Quickest exit for minimum impact */
+ if (likely(!rcd->aspm_intr_supported))
+ return;
+
+ __aspm_ctx_disable(rcd);
+}
+
+#endif /* _ASPM_H */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
new file mode 100644
index 000000000000..0781ab756d44
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -0,0 +1,15484 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
+ */
+
+/*
+ * This file contains all of the code that is specific to the HFI chip
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "hfi.h"
+#include "trace.h"
+#include "mad.h"
+#include "pio.h"
+#include "sdma.h"
+#include "eprom.h"
+#include "efivar.h"
+#include "platform.h"
+#include "aspm.h"
+#include "affinity.h"
+#include "debugfs.h"
+#include "fault.h"
+#include "netdev.h"
+
+uint num_vls = HFI1_MAX_VLS_SUPPORTED;
+module_param(num_vls, uint, S_IRUGO);
+MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
+
+/*
+ * Default time to aggregate two 10K packets from the idle state
+ * (timer not running). The timer starts at the end of the first packet,
+ * so only the time for one 10K packet and header plus a bit extra is needed.
+ * 10 * 1024 + 64 header byte = 10304 byte
+ * 10304 byte / 12.5 GB/s = 824.32ns
+ */
+uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
+module_param(rcv_intr_timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
+
+uint rcv_intr_count = 16; /* same as qib */
+module_param(rcv_intr_count, uint, S_IRUGO);
+MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
+
+ushort link_crc_mask = SUPPORTED_CRCS;
+module_param(link_crc_mask, ushort, S_IRUGO);
+MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
+
+uint loopback;
+module_param_named(loopback, loopback, uint, S_IRUGO);
+MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
+
+/* Other driver tunables */
+uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
+static ushort crc_14b_sideband = 1;
+static uint use_flr = 1;
+uint quick_linkup; /* skip LNI */
+
+struct flag_table {
+ u64 flag; /* the flag */
+ char *str; /* description string */
+ u16 extra; /* extra information */
+ u16 unused0;
+ u32 unused1;
+};
+
+/* str must be a string constant */
+#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
+#define FLAG_ENTRY0(str, flag) {flag, str, 0}
+
+/* Send Error Consequences */
+#define SEC_WRITE_DROPPED 0x1
+#define SEC_PACKET_DROPPED 0x2
+#define SEC_SC_HALTED 0x4 /* per-context only */
+#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
+
+#define DEFAULT_KRCVQS 2
+#define MIN_KERNEL_KCTXTS 2
+#define FIRST_KERNEL_KCTXT 1
+
+/*
+ * RSM instance allocation
+ * 0 - User Fecn Handling
+ * 1 - Vnic
+ * 2 - AIP
+ * 3 - Verbs
+ */
+#define RSM_INS_FECN 0
+#define RSM_INS_VNIC 1
+#define RSM_INS_AIP 2
+#define RSM_INS_VERBS 3
+
+/* Bit offset into the GUID which carries HFI id information */
+#define GUID_HFI_INDEX_SHIFT 39
+
+/* extract the emulation revision */
+#define emulator_rev(dd) ((dd)->irev >> 8)
+/* parallel and serial emulation versions are 3 and 4 respectively */
+#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
+#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
+
+/* RSM fields for Verbs */
+/* packet type */
+#define IB_PACKET_TYPE 2ull
+#define QW_SHIFT 6ull
+/* QPN[7..1] */
+#define QPN_WIDTH 7ull
+
+/* LRH.BTH: QW 0, OFFSET 48 - for match */
+#define LRH_BTH_QW 0ull
+#define LRH_BTH_BIT_OFFSET 48ull
+#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
+#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
+#define LRH_BTH_SELECT
+#define LRH_BTH_MASK 3ull
+#define LRH_BTH_VALUE 2ull
+
+/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
+#define LRH_SC_QW 0ull
+#define LRH_SC_BIT_OFFSET 56ull
+#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
+#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
+#define LRH_SC_MASK 128ull
+#define LRH_SC_VALUE 0ull
+
+/* SC[n..0] QW 0, OFFSET 60 - for select */
+#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
+
+/* QPN[m+n:1] QW 1, OFFSET 1 */
+#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
+
+/* RSM fields for AIP */
+/* LRH.BTH above is reused for this rule */
+
+/* BTH.DESTQP: QW 1, OFFSET 16 for match */
+#define BTH_DESTQP_QW 1ull
+#define BTH_DESTQP_BIT_OFFSET 16ull
+#define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
+#define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
+#define BTH_DESTQP_MASK 0xFFull
+#define BTH_DESTQP_VALUE 0x81ull
+
+/* DETH.SQPN: QW 1 Offset 56 for select */
+/* We use 8 most significant Soure QPN bits as entropy fpr AIP */
+#define DETH_AIP_SQPN_QW 3ull
+#define DETH_AIP_SQPN_BIT_OFFSET 56ull
+#define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
+#define DETH_AIP_SQPN_SELECT_OFFSET \
+ DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
+
+/* RSM fields for Vnic */
+/* L2_TYPE: QW 0, OFFSET 61 - for match */
+#define L2_TYPE_QW 0ull
+#define L2_TYPE_BIT_OFFSET 61ull
+#define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
+#define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
+#define L2_TYPE_MASK 3ull
+#define L2_16B_VALUE 2ull
+
+/* L4_TYPE QW 1, OFFSET 0 - for match */
+#define L4_TYPE_QW 1ull
+#define L4_TYPE_BIT_OFFSET 0ull
+#define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
+#define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
+#define L4_16B_TYPE_MASK 0xFFull
+#define L4_16B_ETH_VALUE 0x78ull
+
+/* 16B VESWID - for select */
+#define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
+/* 16B ENTROPY - for select */
+#define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
+
+/* defines to build power on SC2VL table */
+#define SC2VL_VAL( \
+ num, \
+ sc0, sc0val, \
+ sc1, sc1val, \
+ sc2, sc2val, \
+ sc3, sc3val, \
+ sc4, sc4val, \
+ sc5, sc5val, \
+ sc6, sc6val, \
+ sc7, sc7val) \
+( \
+ ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
+ ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
+ ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
+ ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
+ ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
+ ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
+ ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
+ ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
+)
+
+#define DC_SC_VL_VAL( \
+ range, \
+ e0, e0val, \
+ e1, e1val, \
+ e2, e2val, \
+ e3, e3val, \
+ e4, e4val, \
+ e5, e5val, \
+ e6, e6val, \
+ e7, e7val, \
+ e8, e8val, \
+ e9, e9val, \
+ e10, e10val, \
+ e11, e11val, \
+ e12, e12val, \
+ e13, e13val, \
+ e14, e14val, \
+ e15, e15val) \
+( \
+ ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
+ ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
+ ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
+ ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
+ ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
+ ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
+ ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
+ ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
+ ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
+ ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
+ ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
+ ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
+ ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
+ ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
+ ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
+ ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
+)
+
+/* all CceStatus sub-block freeze bits */
+#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
+ | CCE_STATUS_RXE_FROZE_SMASK \
+ | CCE_STATUS_TXE_FROZE_SMASK \
+ | CCE_STATUS_TXE_PIO_FROZE_SMASK)
+/* all CceStatus sub-block TXE pause bits */
+#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
+ | CCE_STATUS_TXE_PAUSED_SMASK \
+ | CCE_STATUS_SDMA_PAUSED_SMASK)
+/* all CceStatus sub-block RXE pause bits */
+#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
+
+#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
+#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
+
+/*
+ * CCE Error flags.
+ */
+static const struct flag_table cce_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
+ CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
+/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
+ CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
+/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
+ CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
+/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
+/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
+ CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
+/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
+ CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
+/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
+/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
+ CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
+/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
+/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
+/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
+/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
+/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
+/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
+ CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
+/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
+ CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
+/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
+ CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
+/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
+ CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
+/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
+ CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
+/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
+ CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
+/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
+ CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
+/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
+ CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
+/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
+ CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
+/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
+ CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
+/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
+ CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
+/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
+ CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
+/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
+ CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
+/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
+ CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
+/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
+ CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
+/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
+ CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
+/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
+ CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
+/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
+ CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
+/*31*/ FLAG_ENTRY0("LATriggered",
+ CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
+/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
+ CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
+/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
+ CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
+/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
+/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
+ CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
+/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
+ CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
+/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
+ CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
+/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
+ CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
+/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
+ CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
+/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
+ CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
+/*41-63 reserved*/
+};
+
+/*
+ * Misc Error flags
+ */
+#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
+static const struct flag_table misc_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
+/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
+/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
+/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
+/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
+/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
+/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
+/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
+/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
+/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
+/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
+/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
+/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
+};
+
+/*
+ * TXE PIO Error flags and consequences
+ */
+static const struct flag_table pio_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
+ SEC_WRITE_DROPPED,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
+/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
+/* 2*/ FLAG_ENTRY("PioCsrParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
+/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
+/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
+/* 5*/ FLAG_ENTRY("PioPccFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
+/* 6*/ FLAG_ENTRY("PioPecFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
+/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
+/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
+/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
+/*10*/ FLAG_ENTRY("PioSmPktResetParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
+/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
+/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
+/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
+/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
+/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
+/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
+/*17*/ FLAG_ENTRY("PioInitSmIn",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
+/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
+/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
+/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
+/*21*/ FLAG_ENTRY("PioWriteDataParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
+/*22*/ FLAG_ENTRY("PioStateMachine",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
+/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
+ SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
+/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
+ SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
+/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
+/*26*/ FLAG_ENTRY("PioVlfSopParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
+/*27*/ FLAG_ENTRY("PioVlFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
+/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
+/*29*/ FLAG_ENTRY("PioPpmcSopLen",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
+/*30-31 reserved*/
+/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
+/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
+/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
+/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
+/*36-63 reserved*/
+};
+
+/* TXE PIO errors that cause an SPC freeze */
+#define ALL_PIO_FREEZE_ERR \
+ (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
+
+/*
+ * TXE SDMA Error flags
+ */
+static const struct flag_table sdma_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
+ SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
+/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
+ SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
+/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
+ SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
+/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
+ SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
+/*04-63 reserved*/
+};
+
+/* TXE SDMA errors that cause an SPC freeze */
+#define ALL_SDMA_FREEZE_ERR \
+ (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
+ | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
+ | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
+
+/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
+#define PORT_DISCARD_EGRESS_ERRS \
+ (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
+ | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
+ | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
+
+/*
+ * TXE Egress Error flags
+ */
+#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
+static const struct flag_table egress_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
+/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
+/* 2 reserved */
+/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
+ SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
+/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
+/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
+/* 6 reserved */
+/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
+ SEES(TX_PIO_LAUNCH_INTF_PARITY)),
+/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
+ SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
+/* 9-10 reserved */
+/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
+ SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
+/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
+/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
+/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
+/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
+/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
+ SEES(TX_SDMA0_DISALLOWED_PACKET)),
+/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
+ SEES(TX_SDMA1_DISALLOWED_PACKET)),
+/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
+ SEES(TX_SDMA2_DISALLOWED_PACKET)),
+/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
+ SEES(TX_SDMA3_DISALLOWED_PACKET)),
+/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
+ SEES(TX_SDMA4_DISALLOWED_PACKET)),
+/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
+ SEES(TX_SDMA5_DISALLOWED_PACKET)),
+/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
+ SEES(TX_SDMA6_DISALLOWED_PACKET)),
+/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
+ SEES(TX_SDMA7_DISALLOWED_PACKET)),
+/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
+ SEES(TX_SDMA8_DISALLOWED_PACKET)),
+/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
+ SEES(TX_SDMA9_DISALLOWED_PACKET)),
+/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
+ SEES(TX_SDMA10_DISALLOWED_PACKET)),
+/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
+ SEES(TX_SDMA11_DISALLOWED_PACKET)),
+/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
+ SEES(TX_SDMA12_DISALLOWED_PACKET)),
+/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
+ SEES(TX_SDMA13_DISALLOWED_PACKET)),
+/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
+ SEES(TX_SDMA14_DISALLOWED_PACKET)),
+/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
+ SEES(TX_SDMA15_DISALLOWED_PACKET)),
+/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
+/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
+/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
+/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
+/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
+/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
+/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
+/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
+/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
+/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
+/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
+/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
+/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
+/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
+/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
+/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
+/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
+/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
+/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
+/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
+/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
+/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
+/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
+/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
+/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
+/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
+/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
+/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
+/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
+/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
+/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
+ SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
+/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
+ SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
+};
+
+/*
+ * TXE Egress Error Info flags
+ */
+#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
+static const struct flag_table egress_err_info_flags[] = {
+/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
+/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
+/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
+/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
+/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
+/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
+/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
+/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
+/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
+/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
+/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
+/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
+/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
+/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
+/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
+/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
+/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
+/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
+/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
+/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
+/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
+/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
+};
+
+/* TXE Egress errors that cause an SPC freeze */
+#define ALL_TXE_EGRESS_FREEZE_ERR \
+ (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
+ | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
+ | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
+ | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
+ | SEES(TX_LAUNCH_CSR_PARITY) \
+ | SEES(TX_SBRD_CTL_CSR_PARITY) \
+ | SEES(TX_CONFIG_PARITY) \
+ | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
+ | SEES(TX_CREDIT_RETURN_PARITY))
+
+/*
+ * TXE Send error flags
+ */
+#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
+static const struct flag_table send_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
+/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
+/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
+};
+
+/*
+ * TXE Send Context Error flags and consequences
+ */
+static const struct flag_table sc_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY("InconsistentSop",
+ SEC_PACKET_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
+/* 1*/ FLAG_ENTRY("DisallowedPacket",
+ SEC_PACKET_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
+/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
+ SEC_WRITE_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
+/* 3*/ FLAG_ENTRY("WriteOverflow",
+ SEC_WRITE_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
+/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
+ SEC_WRITE_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
+/* 5-63 reserved*/
+};
+
+/*
+ * RXE Receive Error flags
+ */
+#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
+static const struct flag_table rxe_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
+/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
+/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
+/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
+/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
+/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
+/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
+/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
+/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
+/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
+/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
+/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
+/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
+/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
+/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
+/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
+/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
+ RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
+/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
+/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
+/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
+ RXES(RBUF_BLOCK_LIST_READ_UNC)),
+/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
+ RXES(RBUF_BLOCK_LIST_READ_COR)),
+/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
+ RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
+/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
+ RXES(RBUF_CSR_QENT_CNT_PARITY)),
+/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
+ RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
+/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
+ RXES(RBUF_CSR_QVLD_BIT_PARITY)),
+/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
+/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
+/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
+ RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
+/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
+/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
+/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
+/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
+/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
+/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
+/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
+/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
+ RXES(RBUF_FL_INITDONE_PARITY)),
+/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
+ RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
+/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
+/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
+/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
+/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
+ RXES(LOOKUP_DES_PART1_UNC_COR)),
+/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
+ RXES(LOOKUP_DES_PART2_PARITY)),
+/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
+/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
+/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
+/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
+/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
+/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
+/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
+/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
+/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
+/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
+/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
+/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
+/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
+/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
+/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
+/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
+/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
+/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
+/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
+/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
+/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
+/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
+};
+
+/* RXE errors that will trigger an SPC freeze */
+#define ALL_RXE_FREEZE_ERR \
+ (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
+
+#define RXE_FREEZE_ABORT_MASK \
+ (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
+ RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
+ RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
+
+/*
+ * DCC Error Flags
+ */
+#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
+static const struct flag_table dcc_err_flags[] = {
+ FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
+ FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
+ FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
+ FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
+ FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
+ FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
+ FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
+ FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
+ FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
+ FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
+ FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
+ FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
+ FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
+ FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
+ FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
+ FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
+ FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
+ FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
+ FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
+ FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
+ FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
+ FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
+ FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
+ FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
+ FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
+ FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
+ FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
+ FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
+ FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
+ FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
+ FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
+ FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
+ FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
+ FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
+ FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
+ FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
+ FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
+ FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
+ FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
+ FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
+ FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
+ FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
+ FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
+ FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
+ FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
+ FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
+};
+
+/*
+ * LCB error flags
+ */
+#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
+static const struct flag_table lcb_err_flags[] = {
+/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
+/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
+/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
+/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
+ LCBE(ALL_LNS_FAILED_REINIT_TEST)),
+/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
+/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
+/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
+/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
+/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
+/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
+/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
+/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
+/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
+/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
+ LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
+/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
+/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
+/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
+/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
+/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
+/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
+ LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
+/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
+/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
+/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
+/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
+/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
+/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
+/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
+ LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
+/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
+/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
+ LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
+/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
+ LCBE(REDUNDANT_FLIT_PARITY_ERR))
+};
+
+/*
+ * DC8051 Error Flags
+ */
+#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
+static const struct flag_table dc8051_err_flags[] = {
+ FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
+ FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
+ FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
+ FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
+ FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
+ FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
+ FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
+ FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
+ FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
+ D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
+ FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
+};
+
+/*
+ * DC8051 Information Error flags
+ *
+ * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
+ */
+static const struct flag_table dc8051_info_err_flags[] = {
+ FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
+ FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
+ FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
+ FLAG_ENTRY0("Serdes internal loopback failure",
+ FAILED_SERDES_INTERNAL_LOOPBACK),
+ FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
+ FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
+ FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
+ FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
+ FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
+ FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
+ FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
+ FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
+ FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
+ FLAG_ENTRY0("External Device Request Timeout",
+ EXTERNAL_DEVICE_REQ_TIMEOUT),
+};
+
+/*
+ * DC8051 Information Host Information flags
+ *
+ * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
+ */
+static const struct flag_table dc8051_info_host_msg_flags[] = {
+ FLAG_ENTRY0("Host request done", 0x0001),
+ FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
+ FLAG_ENTRY0("BC SMA message", 0x0004),
+ FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
+ FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
+ FLAG_ENTRY0("External device config request", 0x0020),
+ FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
+ FLAG_ENTRY0("LinkUp achieved", 0x0080),
+ FLAG_ENTRY0("Link going down", 0x0100),
+ FLAG_ENTRY0("Link width downgraded", 0x0200),
+};
+
+static u32 encoded_size(u32 size);
+static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
+static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
+static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
+ u8 *continuous);
+static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
+ u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
+static void read_vc_remote_link_width(struct hfi1_devdata *dd,
+ u8 *remote_tx_rate, u16 *link_widths);
+static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths);
+static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
+ u8 *device_rev);
+static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
+static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
+ u8 *tx_polarity_inversion,
+ u8 *rx_polarity_inversion, u8 *max_rate);
+static void handle_sdma_eng_err(struct hfi1_devdata *dd,
+ unsigned int context, u64 err_status);
+static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
+static void handle_dcc_err(struct hfi1_devdata *dd,
+ unsigned int context, u64 err_status);
+static void handle_lcb_err(struct hfi1_devdata *dd,
+ unsigned int context, u64 err_status);
+static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void set_partition_keys(struct hfi1_pportdata *ppd);
+static const char *link_state_name(u32 state);
+static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
+ u32 state);
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data);
+static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
+static int thermal_init(struct hfi1_devdata *dd);
+
+static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+ int msecs);
+static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs);
+static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
+static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
+static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs);
+static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
+ int msecs);
+static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
+static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
+static void handle_temp_err(struct hfi1_devdata *dd);
+static void dc_shutdown(struct hfi1_devdata *dd);
+static void dc_start(struct hfi1_devdata *dd);
+static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
+ unsigned int *np);
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
+static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
+static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
+
+/*
+ * Error interrupt table entry. This is used as input to the interrupt
+ * "clear down" routine used for all second tier error interrupt register.
+ * Second tier interrupt registers have a single bit representing them
+ * in the top-level CceIntStatus.
+ */
+struct err_reg_info {
+ u32 status; /* status CSR offset */
+ u32 clear; /* clear CSR offset */
+ u32 mask; /* mask CSR offset */
+ void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
+ const char *desc;
+};
+
+#define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
+#define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
+#define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
+
+/*
+ * Helpers for building HFI and DC error interrupt table entries. Different
+ * helpers are needed because of inconsistent register names.
+ */
+#define EE(reg, handler, desc) \
+ { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
+ handler, desc }
+#define DC_EE1(reg, handler, desc) \
+ { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
+#define DC_EE2(reg, handler, desc) \
+ { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
+
+/*
+ * Table of the "misc" grouping of error interrupts. Each entry refers to
+ * another register containing more information.
+ */
+static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
+/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
+/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
+/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
+/* 3*/ { 0, 0, 0, NULL }, /* reserved */
+/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
+/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
+/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
+/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
+ /* the rest are reserved */
+};
+
+/*
+ * Index into the Various section of the interrupt sources
+ * corresponding to the Critical Temperature interrupt.
+ */
+#define TCRIT_INT_SOURCE 4
+
+/*
+ * SDMA error interrupt entry - refers to another register containing more
+ * information.
+ */
+static const struct err_reg_info sdma_eng_err =
+ EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
+
+static const struct err_reg_info various_err[NUM_VARIOUS] = {
+/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
+/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
+/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
+/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
+/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
+ /* rest are reserved */
+};
+
+/*
+ * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
+ * register can not be derived from the MTU value because 10K is not
+ * a power of 2. Therefore, we need a constant. Everything else can
+ * be calculated.
+ */
+#define DCC_CFG_PORT_MTU_CAP_10240 7
+
+/*
+ * Table of the DC grouping of error interrupts. Each entry refers to
+ * another register containing more information.
+ */
+static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
+/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
+/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
+/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
+/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
+ /* the rest are reserved */
+};
+
+struct cntr_entry {
+ /*
+ * counter name
+ */
+ char *name;
+
+ /*
+ * csr to read for name (if applicable)
+ */
+ u64 csr;
+
+ /*
+ * offset into dd or ppd to store the counter's value
+ */
+ int offset;
+
+ /*
+ * flags
+ */
+ u8 flags;
+
+ /*
+ * accessor for stat element, context either dd or ppd
+ */
+ u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
+ int mode, u64 data);
+};
+
+#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
+#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
+
+#define CNTR_ELEM(name, csr, offset, flags, accessor) \
+{ \
+ name, \
+ csr, \
+ offset, \
+ flags, \
+ accessor \
+}
+
+/* 32bit RXE */
+#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ port_access_u32_csr)
+
+#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ dev_access_u32_csr)
+
+/* 64bit RXE */
+#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY64), \
+ 0, flags, \
+ port_access_u64_csr)
+
+#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY64), \
+ 0, flags, \
+ dev_access_u64_csr)
+
+#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
+#define OVR_ELM(ctx) \
+CNTR_ELEM("RcvHdrOvr" #ctx, \
+ (RCV_HDR_OVFL_CNT + ctx * 0x100), \
+ 0, CNTR_NORMAL, port_access_u64_csr)
+
+/* 32bit TXE */
+#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + SEND_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ port_access_u32_csr)
+
+/* 64bit TXE */
+#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + SEND_COUNTER_ARRAY64), \
+ 0, flags, \
+ port_access_u64_csr)
+
+# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name,\
+ counter * 8 + SEND_COUNTER_ARRAY64, \
+ 0, \
+ flags, \
+ dev_access_u64_csr)
+
+/* CCE */
+#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + CCE_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ dev_access_u32_csr)
+
+#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ dev_access_u32_csr)
+
+/* DC */
+#define DC_PERF_CNTR(name, counter, flags) \
+CNTR_ELEM(#name, \
+ counter, \
+ 0, \
+ flags, \
+ dev_access_u64_csr)
+
+#define DC_PERF_CNTR_LCB(name, counter, flags) \
+CNTR_ELEM(#name, \
+ counter, \
+ 0, \
+ flags, \
+ dc_access_lcb_cntr)
+
+/* ibp counters */
+#define SW_IBP_CNTR(name, cntr) \
+CNTR_ELEM(#name, \
+ 0, \
+ 0, \
+ CNTR_SYNTH, \
+ access_ibp_##cntr)
+
+/**
+ * hfi1_addr_from_offset - return addr for readq/writeq
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
+ *
+ * This routine selects the appropriate base address
+ * based on the indicated offset.
+ */
+static inline void __iomem *hfi1_addr_from_offset(
+ const struct hfi1_devdata *dd,
+ u32 offset)
+{
+ if (offset >= dd->base2_start)
+ return dd->kregbase2 + (offset - dd->base2_start);
+ return dd->kregbase1 + offset;
+}
+
+/**
+ * read_csr - read CSR at the indicated offset
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
+ *
+ * Return: the value read or all FF's if there
+ * is no mapping
+ */
+u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
+{
+ if (dd->flags & HFI1_PRESENT)
+ return readq(hfi1_addr_from_offset(dd, offset));
+ return -1;
+}
+
+/**
+ * write_csr - write CSR at the indicated offset
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
+ * @value: value to write
+ */
+void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
+{
+ if (dd->flags & HFI1_PRESENT) {
+ void __iomem *base = hfi1_addr_from_offset(dd, offset);
+
+ /* avoid write to RcvArray */
+ if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
+ return;
+ writeq(value, base);
+ }
+}
+
+/**
+ * get_csr_addr - return te iomem address for offset
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
+ *
+ * Return: The iomem address to use in subsequent
+ * writeq/readq operations.
+ */
+void __iomem *get_csr_addr(
+ const struct hfi1_devdata *dd,
+ u32 offset)
+{
+ if (dd->flags & HFI1_PRESENT)
+ return hfi1_addr_from_offset(dd, offset);
+ return NULL;
+}
+
+static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
+ int mode, u64 value)
+{
+ u64 ret;
+
+ if (mode == CNTR_MODE_R) {
+ ret = read_csr(dd, csr);
+ } else if (mode == CNTR_MODE_W) {
+ write_csr(dd, csr, value);
+ ret = value;
+ } else {
+ dd_dev_err(dd, "Invalid cntr register access mode");
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
+ return ret;
+}
+
+/* Dev Access */
+static u64 dev_access_u32_csr(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+ u64 csr = entry->csr;
+
+ if (entry->flags & CNTR_SDMA) {
+ if (vl == CNTR_INVALID_VL)
+ return 0;
+ csr += 0x100 * vl;
+ } else {
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ }
+ return read_write_csr(dd, csr, mode, data);
+}
+
+static u64 access_sde_err_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].err_cnt;
+ return 0;
+}
+
+static u64 access_sde_int_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].sdma_int_cnt;
+ return 0;
+}
+
+static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].idle_int_cnt;
+ return 0;
+}
+
+static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].progress_int_cnt;
+ return 0;
+}
+
+static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ u64 val = 0;
+ u64 csr = entry->csr;
+
+ if (entry->flags & CNTR_VL) {
+ if (vl == CNTR_INVALID_VL)
+ return 0;
+ csr += 8 * vl;
+ } else {
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ }
+
+ val = read_write_csr(dd, csr, mode, data);
+ return val;
+}
+
+static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+ u32 csr = entry->csr;
+ int ret = 0;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ if (mode == CNTR_MODE_R)
+ ret = read_lcb_csr(dd, csr, &data);
+ else if (mode == CNTR_MODE_W)
+ ret = write_lcb_csr(dd, csr, data);
+
+ if (ret) {
+ if (!(dd->flags & HFI1_SHUTDOWN))
+ dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
+ return data;
+}
+
+/* Port Access */
+static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_csr(ppd->dd, entry->csr, mode, data);
+}
+
+static u64 port_access_u64_csr(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = context;
+ u64 val;
+ u64 csr = entry->csr;
+
+ if (entry->flags & CNTR_VL) {
+ if (vl == CNTR_INVALID_VL)
+ return 0;
+ csr += 8 * vl;
+ } else {
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ }
+ val = read_write_csr(ppd->dd, csr, mode, data);
+ return val;
+}
+
+/* Software defined */
+static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
+ u64 data)
+{
+ u64 ret;
+
+ if (mode == CNTR_MODE_R) {
+ ret = *cntr;
+ } else if (mode == CNTR_MODE_W) {
+ *cntr = data;
+ ret = data;
+ } else {
+ dd_dev_err(dd, "Invalid cntr sw access mode");
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
+
+ return ret;
+}
+
+static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
+}
+
+static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
+}
+
+static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
+}
+
+static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+ u64 zero = 0;
+ u64 *counter;
+
+ if (vl == CNTR_INVALID_VL)
+ counter = &ppd->port_xmit_discards;
+ else if (vl >= 0 && vl < C_VL_COUNT)
+ counter = &ppd->port_xmit_discards_vl[vl];
+ else
+ counter = &zero;
+
+ return read_write_sw(ppd->dd, counter, mode, data);
+}
+
+static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_pportdata *ppd = context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+
+ return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
+ mode, data);
+}
+
+static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+
+ return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
+ mode, data);
+}
+
+u64 get_all_cpu_total(u64 __percpu *cntr)
+{
+ int cpu;
+ u64 counter = 0;
+
+ for_each_possible_cpu(cpu)
+ counter += *per_cpu_ptr(cntr, cpu);
+ return counter;
+}
+
+static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
+ u64 __percpu *cntr,
+ int vl, int mode, u64 data)
+{
+ u64 ret = 0;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+
+ if (mode == CNTR_MODE_R) {
+ ret = get_all_cpu_total(cntr) - *z_val;
+ } else if (mode == CNTR_MODE_W) {
+ /* A write can only zero the counter */
+ if (data == 0)
+ *z_val = get_all_cpu_total(cntr);
+ else
+ dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
+ } else {
+ dd_dev_err(dd, "Invalid cntr sw cpu access mode");
+ return 0;
+ }
+
+ return ret;
+}
+
+static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
+ mode, data);
+}
+
+static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
+ mode, data);
+}
+
+static u64 access_sw_pio_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return dd->verbs_dev.n_piowait;
+}
+
+static u64 access_sw_pio_drain(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->verbs_dev.n_piodrain;
+}
+
+static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return dd->ctx0_seq_drop;
+}
+
+static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return dd->verbs_dev.n_txwait;
+}
+
+static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return dd->verbs_dev.n_kmem_wait;
+}
+
+static u64 access_sw_send_schedule(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
+ mode, data);
+}
+
+/* Software counters for the error status bits within MISC_ERR_STATUS */
+static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[12];
+}
+
+static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[11];
+}
+
+static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[10];
+}
+
+static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[9];
+}
+
+static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[8];
+}
+
+static u64 access_misc_efuse_read_bad_addr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[7];
+}
+
+static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[6];
+}
+
+static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[5];
+}
+
+static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[4];
+}
+
+static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[3];
+}
+
+static u64 access_misc_csr_write_bad_addr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[2];
+}
+
+static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[1];
+}
+
+static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[0];
+}
+
+/*
+ * Software counter for the aggregate of
+ * individual CceErrStatus counters
+ */
+static u64 access_sw_cce_err_status_aggregated_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_cce_err_status_aggregate;
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within CceErrStatus
+ */
+static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[40];
+}
+
+static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[39];
+}
+
+static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[38];
+}
+
+static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[37];
+}
+
+static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[36];
+}
+
+static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[35];
+}
+
+static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[34];
+}
+
+static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[33];
+}
+
+static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[32];
+}
+
+static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[31];
+}
+
+static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[30];
+}
+
+static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[29];
+}
+
+static u64 access_pcic_transmit_back_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[28];
+}
+
+static u64 access_pcic_transmit_front_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[27];
+}
+
+static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[26];
+}
+
+static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[25];
+}
+
+static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[24];
+}
+
+static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[23];
+}
+
+static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[22];
+}
+
+static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[21];
+}
+
+static u64 access_pcic_n_post_dat_q_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[20];
+}
+
+static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[19];
+}
+
+static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[18];
+}
+
+static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[17];
+}
+
+static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[16];
+}
+
+static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[15];
+}
+
+static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[14];
+}
+
+static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[13];
+}
+
+static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[12];
+}
+
+static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[11];
+}
+
+static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[10];
+}
+
+static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[9];
+}
+
+static u64 access_cce_cli2_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[8];
+}
+
+static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[7];
+}
+
+static u64 access_cce_cli0_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[6];
+}
+
+static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[5];
+}
+
+static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[4];
+}
+
+static u64 access_cce_trgt_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[3];
+}
+
+static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[2];
+}
+
+static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[1];
+}
+
+static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within RcvErrStatus
+ */
+static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[63];
+}
+
+static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[62];
+}
+
+static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[61];
+}
+
+static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[60];
+}
+
+static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[59];
+}
+
+static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[58];
+}
+
+static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[57];
+}
+
+static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[56];
+}
+
+static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[55];
+}
+
+static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[54];
+}
+
+static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[53];
+}
+
+static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[52];
+}
+
+static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[51];
+}
+
+static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[50];
+}
+
+static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[49];
+}
+
+static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[48];
+}
+
+static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[47];
+}
+
+static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[46];
+}
+
+static u64 access_rx_hq_intr_csr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[45];
+}
+
+static u64 access_rx_lookup_csr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[44];
+}
+
+static u64 access_rx_lookup_rcv_array_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[43];
+}
+
+static u64 access_rx_lookup_rcv_array_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[42];
+}
+
+static u64 access_rx_lookup_des_part2_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[41];
+}
+
+static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[40];
+}
+
+static u64 access_rx_lookup_des_part1_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[39];
+}
+
+static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[38];
+}
+
+static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[37];
+}
+
+static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[36];
+}
+
+static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[35];
+}
+
+static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[34];
+}
+
+static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[33];
+}
+
+static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[32];
+}
+
+static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[31];
+}
+
+static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[30];
+}
+
+static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[29];
+}
+
+static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[28];
+}
+
+static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[27];
+}
+
+static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[26];
+}
+
+static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[25];
+}
+
+static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[24];
+}
+
+static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[23];
+}
+
+static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[22];
+}
+
+static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[21];
+}
+
+static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[20];
+}
+
+static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[19];
+}
+
+static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[18];
+}
+
+static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[17];
+}
+
+static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[16];
+}
+
+static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[15];
+}
+
+static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[14];
+}
+
+static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[13];
+}
+
+static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[12];
+}
+
+static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[11];
+}
+
+static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[10];
+}
+
+static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[9];
+}
+
+static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[8];
+}
+
+static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[7];
+}
+
+static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[6];
+}
+
+static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[5];
+}
+
+static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[4];
+}
+
+static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[3];
+}
+
+static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[2];
+}
+
+static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[1];
+}
+
+static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendPioErrStatus
+ */
+static u64 access_pio_pec_sop_head_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[35];
+}
+
+static u64 access_pio_pcc_sop_head_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[34];
+}
+
+static u64 access_pio_last_returned_cnt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[33];
+}
+
+static u64 access_pio_current_free_cnt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[32];
+}
+
+static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[31];
+}
+
+static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[30];
+}
+
+static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[29];
+}
+
+static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[28];
+}
+
+static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[27];
+}
+
+static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[26];
+}
+
+static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[25];
+}
+
+static u64 access_pio_block_qw_count_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[24];
+}
+
+static u64 access_pio_write_qw_valid_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[23];
+}
+
+static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[22];
+}
+
+static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[21];
+}
+
+static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[20];
+}
+
+static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[19];
+}
+
+static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[18];
+}
+
+static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[17];
+}
+
+static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[16];
+}
+
+static u64 access_pio_credit_ret_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[15];
+}
+
+static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[14];
+}
+
+static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[13];
+}
+
+static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[12];
+}
+
+static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[11];
+}
+
+static u64 access_pio_sm_pkt_reset_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[10];
+}
+
+static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[9];
+}
+
+static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[8];
+}
+
+static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[7];
+}
+
+static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[6];
+}
+
+static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[5];
+}
+
+static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[4];
+}
+
+static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[3];
+}
+
+static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[2];
+}
+
+static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[1];
+}
+
+static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendDmaErrStatus
+ */
+static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[3];
+}
+
+static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[2];
+}
+
+static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[1];
+}
+
+static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendEgressErrStatus
+ */
+static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[63];
+}
+
+static u64 access_tx_read_sdma_memory_csr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[62];
+}
+
+static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[61];
+}
+
+static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[60];
+}
+
+static u64 access_tx_read_sdma_memory_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[59];
+}
+
+static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[58];
+}
+
+static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[57];
+}
+
+static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[56];
+}
+
+static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[55];
+}
+
+static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[54];
+}
+
+static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[53];
+}
+
+static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[52];
+}
+
+static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[51];
+}
+
+static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[50];
+}
+
+static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[49];
+}
+
+static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[48];
+}
+
+static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[47];
+}
+
+static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[46];
+}
+
+static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[45];
+}
+
+static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[44];
+}
+
+static u64 access_tx_read_sdma_memory_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[43];
+}
+
+static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[42];
+}
+
+static u64 access_tx_credit_return_partiy_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[41];
+}
+
+static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[40];
+}
+
+static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[39];
+}
+
+static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[38];
+}
+
+static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[37];
+}
+
+static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[36];
+}
+
+static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[35];
+}
+
+static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[34];
+}
+
+static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[33];
+}
+
+static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[32];
+}
+
+static u64 access_tx_sdma15_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[31];
+}
+
+static u64 access_tx_sdma14_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[30];
+}
+
+static u64 access_tx_sdma13_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[29];
+}
+
+static u64 access_tx_sdma12_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[28];
+}
+
+static u64 access_tx_sdma11_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[27];
+}
+
+static u64 access_tx_sdma10_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[26];
+}
+
+static u64 access_tx_sdma9_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[25];
+}
+
+static u64 access_tx_sdma8_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[24];
+}
+
+static u64 access_tx_sdma7_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[23];
+}
+
+static u64 access_tx_sdma6_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[22];
+}
+
+static u64 access_tx_sdma5_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[21];
+}
+
+static u64 access_tx_sdma4_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[20];
+}
+
+static u64 access_tx_sdma3_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[19];
+}
+
+static u64 access_tx_sdma2_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[18];
+}
+
+static u64 access_tx_sdma1_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[17];
+}
+
+static u64 access_tx_sdma0_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[16];
+}
+
+static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[15];
+}
+
+static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[14];
+}
+
+static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[13];
+}
+
+static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[12];
+}
+
+static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[11];
+}
+
+static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[10];
+}
+
+static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[9];
+}
+
+static u64 access_tx_sdma_launch_intf_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[8];
+}
+
+static u64 access_tx_pio_launch_intf_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[7];
+}
+
+static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[6];
+}
+
+static u64 access_tx_incorrect_link_state_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[5];
+}
+
+static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[4];
+}
+
+static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[3];
+}
+
+static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[2];
+}
+
+static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[1];
+}
+
+static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendErrStatus
+ */
+static u64 access_send_csr_write_bad_addr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_err_status_cnt[2];
+}
+
+static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_err_status_cnt[1];
+}
+
+static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendCtxtErrStatus
+ */
+static u64 access_pio_write_out_of_bounds_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[4];
+}
+
+static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[3];
+}
+
+static u64 access_pio_write_crosses_boundary_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[2];
+}
+
+static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[1];
+}
+
+static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendDmaEngErrStatus
+ */
+static u64 access_sdma_header_request_fifo_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[23];
+}
+
+static u64 access_sdma_header_storage_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[22];
+}
+
+static u64 access_sdma_packet_tracking_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[21];
+}
+
+static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[20];
+}
+
+static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[19];
+}
+
+static u64 access_sdma_header_request_fifo_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[18];
+}
+
+static u64 access_sdma_header_storage_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[17];
+}
+
+static u64 access_sdma_packet_tracking_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[16];
+}
+
+static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[15];
+}
+
+static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[14];
+}
+
+static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[13];
+}
+
+static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[12];
+}
+
+static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[11];
+}
+
+static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[10];
+}
+
+static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[9];
+}
+
+static u64 access_sdma_packet_desc_overflow_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[8];
+}
+
+static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[7];
+}
+
+static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[6];
+}
+
+static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[5];
+}
+
+static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[4];
+}
+
+static u64 access_sdma_tail_out_of_bounds_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[3];
+}
+
+static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[2];
+}
+
+static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[1];
+}
+
+static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[0];
+}
+
+static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ u64 val = 0;
+ u64 csr = entry->csr;
+
+ val = read_write_csr(dd, csr, mode, data);
+ if (mode == CNTR_MODE_R) {
+ val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
+ CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
+ } else if (mode == CNTR_MODE_W) {
+ dd->sw_rcv_bypass_packet_errors = 0;
+ } else {
+ dd_dev_err(dd, "Invalid cntr register access mode");
+ return 0;
+ }
+ return val;
+}
+
+#define def_access_sw_cpu(cntr) \
+static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
+ void *context, int vl, int mode, u64 data) \
+{ \
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
+ return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
+ ppd->ibport_data.rvp.cntr, vl, \
+ mode, data); \
+}
+
+def_access_sw_cpu(rc_acks);
+def_access_sw_cpu(rc_qacks);
+def_access_sw_cpu(rc_delayed_comp);
+
+#define def_access_ibp_counter(cntr) \
+static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
+ void *context, int vl, int mode, u64 data) \
+{ \
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
+ \
+ if (vl != CNTR_INVALID_VL) \
+ return 0; \
+ \
+ return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
+ mode, data); \
+}
+
+def_access_ibp_counter(loop_pkts);
+def_access_ibp_counter(rc_resends);
+def_access_ibp_counter(rnr_naks);
+def_access_ibp_counter(other_naks);
+def_access_ibp_counter(rc_timeouts);
+def_access_ibp_counter(pkt_drops);
+def_access_ibp_counter(dmawait);
+def_access_ibp_counter(rc_seqnak);
+def_access_ibp_counter(rc_dupreq);
+def_access_ibp_counter(rdma_seq);
+def_access_ibp_counter(unaligned);
+def_access_ibp_counter(seq_naks);
+def_access_ibp_counter(rc_crwaits);
+
+static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
+[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
+[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
+[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
+[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
+[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
+[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
+ CNTR_NORMAL),
+[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
+ CNTR_NORMAL),
+[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
+ RCV_TID_FLOW_GEN_MISMATCH_CNT,
+ CNTR_NORMAL),
+[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
+ CNTR_NORMAL),
+[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
+ RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
+[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
+ CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
+[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
+ CNTR_NORMAL),
+[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
+ CNTR_NORMAL),
+[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
+ CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
+[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
+ CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
+[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
+ access_dc_rcv_err_cnt),
+[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
+ CNTR_SYNTH),
+[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
+ CNTR_SYNTH),
+[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
+ CNTR_SYNTH),
+[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
+ DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
+[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
+ DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
+ DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
+[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
+ CNTR_SYNTH),
+[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
+[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
+[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
+ CNTR_SYNTH),
+[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_TOTAL_CRC] =
+ DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
+ CNTR_SYNTH),
+[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
+ CNTR_SYNTH),
+[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
+ CNTR_SYNTH),
+[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
+ CNTR_SYNTH),
+[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
+ CNTR_SYNTH),
+[C_DC_CRC_MULT_LN] =
+ DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
+ CNTR_SYNTH),
+[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
+ CNTR_SYNTH),
+[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
+ CNTR_SYNTH),
+[C_DC_SEQ_CRC_CNT] =
+ DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
+ CNTR_SYNTH),
+[C_DC_ESC0_ONLY_CNT] =
+ DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
+ CNTR_SYNTH),
+[C_DC_ESC0_PLUS1_CNT] =
+ DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
+ CNTR_SYNTH),
+[C_DC_ESC0_PLUS2_CNT] =
+ DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
+ CNTR_SYNTH),
+[C_DC_REINIT_FROM_PEER_CNT] =
+ DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
+ CNTR_SYNTH),
+[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
+ CNTR_SYNTH),
+[C_DC_MISC_FLG_CNT] =
+ DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
+ CNTR_SYNTH),
+[C_DC_PRF_GOOD_LTP_CNT] =
+ DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
+[C_DC_PRF_ACCEPTED_LTP_CNT] =
+ DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
+ CNTR_SYNTH),
+[C_DC_PRF_RX_FLIT_CNT] =
+ DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
+[C_DC_PRF_TX_FLIT_CNT] =
+ DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
+[C_DC_PRF_CLK_CNTR] =
+ DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
+[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
+ DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
+[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
+ DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
+ CNTR_SYNTH),
+[C_DC_PG_STS_TX_SBE_CNT] =
+ DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
+[C_DC_PG_STS_TX_MBE_CNT] =
+ DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
+ CNTR_SYNTH),
+[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_intr),
+[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rcv_limit),
+[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
+ access_sw_ctx0_seq_drop),
+[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
+ access_sw_vtx_wait),
+[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
+ access_sw_pio_wait),
+[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
+ access_sw_pio_drain),
+[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
+ access_sw_kmem_wait),
+[C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
+ hfi1_access_sw_tid_wait),
+[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
+ access_sw_send_schedule),
+[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
+ SEND_DMA_DESC_FETCHED_CNT, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ dev_access_u32_csr),
+[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_int_cnt),
+[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_err_cnt),
+[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_idle_int_cnt),
+[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_progress_int_cnt),
+/* MISC_ERR_STATUS */
+[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_pll_lock_fail_err_cnt),
+[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_mbist_fail_err_cnt),
+[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_invalid_eep_cmd_err_cnt),
+[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_efuse_done_parity_err_cnt),
+[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_efuse_write_err_cnt),
+[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
+ 0, CNTR_NORMAL,
+ access_misc_efuse_read_bad_addr_err_cnt),
+[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_efuse_csr_parity_err_cnt),
+[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_fw_auth_failed_err_cnt),
+[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_key_mismatch_err_cnt),
+[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_sbus_write_failed_err_cnt),
+[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_csr_write_bad_addr_err_cnt),
+[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_csr_read_bad_addr_err_cnt),
+[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_csr_parity_err_cnt),
+/* CceErrStatus */
+[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
+ CNTR_NORMAL,
+ access_sw_cce_err_status_aggregated_cnt),
+[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_msix_csr_parity_err_cnt),
+[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_int_map_unc_err_cnt),
+[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_int_map_cor_err_cnt),
+[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_msix_table_unc_err_cnt),
+[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_msix_table_cor_err_cnt),
+[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_rxdma_conv_fifo_parity_err_cnt),
+[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_rcpl_async_fifo_parity_err_cnt),
+[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_seg_write_bad_addr_err_cnt),
+[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_seg_read_bad_addr_err_cnt),
+[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
+ CNTR_NORMAL,
+ access_la_triggered_cnt),
+[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_trgt_cpl_timeout_err_cnt),
+[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_receive_parity_err_cnt),
+[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_transmit_back_parity_err_cnt),
+[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_pcic_transmit_front_parity_err_cnt),
+[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_dat_q_unc_err_cnt),
+[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_hd_q_unc_err_cnt),
+[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_dat_q_unc_err_cnt),
+[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_hd_q_unc_err_cnt),
+[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_sot_mem_unc_err_cnt),
+[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_mem_unc_err),
+[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_n_post_dat_q_parity_err_cnt),
+[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_n_post_h_q_parity_err_cnt),
+[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_dat_q_cor_err_cnt),
+[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_hd_q_cor_err_cnt),
+[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_dat_q_cor_err_cnt),
+[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_hd_q_cor_err_cnt),
+[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_sot_mem_cor_err_cnt),
+[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_mem_cor_err_cnt),
+[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoDbgParityError", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cli1_async_fifo_dbg_parity_err_cnt),
+[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoRxdmaParityError", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cli1_async_fifo_rxdma_parity_err_cnt
+ ),
+[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
+[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
+[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_cli2_async_fifo_parity_err_cnt),
+[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_csr_cfg_bus_parity_err_cnt),
+[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_cli0_async_fifo_parity_err_cnt),
+[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_rspd_data_parity_err_cnt),
+[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_trgt_access_err_cnt),
+[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_trgt_async_fifo_parity_err_cnt),
+[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_csr_write_bad_addr_err_cnt),
+[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_csr_read_bad_addr_err_cnt),
+[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_ccs_csr_parity_err_cnt),
+
+/* RcvErrStatus */
+[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_csr_parity_err_cnt),
+[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_csr_write_bad_addr_err_cnt),
+[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_csr_read_bad_addr_err_cnt),
+[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_csr_unc_err_cnt),
+[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_dq_fsm_encoding_err_cnt),
+[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_eq_fsm_encoding_err_cnt),
+[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_csr_parity_err_cnt),
+[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_data_cor_err_cnt),
+[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_data_unc_err_cnt),
+[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_data_fifo_rd_cor_err_cnt),
+[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_data_fifo_rd_unc_err_cnt),
+[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_hdr_fifo_rd_cor_err_cnt),
+[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_hdr_fifo_rd_unc_err_cnt),
+[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part2_cor_err_cnt),
+[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part2_unc_err_cnt),
+[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part1_cor_err_cnt),
+[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part1_unc_err_cnt),
+[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_hq_intr_fsm_err_cnt),
+[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_hq_intr_csr_parity_err_cnt),
+[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_csr_parity_err_cnt),
+[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_rcv_array_cor_err_cnt),
+[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_rcv_array_unc_err_cnt),
+[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_lookup_des_part2_parity_err_cnt),
+[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_lookup_des_part1_unc_cor_err_cnt),
+[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_des_part1_unc_err_cnt),
+[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_next_free_buf_cor_err_cnt),
+[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_next_free_buf_unc_err_cnt),
+[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufFlInitWrAddrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_fl_init_wr_addr_parity_err_cnt),
+[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_fl_initdone_parity_err_cnt),
+[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_fl_write_addr_parity_err_cnt),
+[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_fl_rd_addr_parity_err_cnt),
+[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_empty_err_cnt),
+[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_full_err_cnt),
+[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_bad_lookup_err_cnt),
+[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_ctx_id_parity_err_cnt),
+[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_csr_qeopdw_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufCsrQNumOfPktParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufCsrQTlPtrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufCsrQHeadBufNumParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
+[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_block_list_read_cor_err_cnt),
+[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_block_list_read_unc_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_cor_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_unc_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
+ "RxRbufLookupDesRegUncCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_reg_unc_err_cnt),
+[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_free_list_cor_err_cnt),
+[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_free_list_unc_err_cnt),
+[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_fsm_encoding_err_cnt),
+[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_flag_cor_err_cnt),
+[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_flag_unc_err_cnt),
+[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dc_sop_eop_parity_err_cnt),
+[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_csr_parity_err_cnt),
+[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_qp_map_table_cor_err_cnt),
+[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_qp_map_table_unc_err_cnt),
+[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_data_cor_err_cnt),
+[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_data_unc_err_cnt),
+[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_hdr_cor_err_cnt),
+[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_hdr_unc_err_cnt),
+[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dc_intf_parity_err_cnt),
+[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_csr_cor_err_cnt),
+/* SendPioErrStatus */
+[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pec_sop_head_parity_err_cnt),
+[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pcc_sop_head_parity_err_cnt),
+[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_pio_last_returned_cnt_parity_err_cnt),
+[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_pio_current_free_cnt_parity_err_cnt),
+[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
+ CNTR_NORMAL,
+ access_pio_reserved_31_err_cnt),
+[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
+ CNTR_NORMAL,
+ access_pio_reserved_30_err_cnt),
+[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_ppmc_sop_len_err_cnt),
+[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_ppmc_bqc_mem_parity_err_cnt),
+[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_vl_fifo_parity_err_cnt),
+[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_vlf_sop_parity_err_cnt),
+[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_vlf_v1_len_parity_err_cnt),
+[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_block_qw_count_parity_err_cnt),
+[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_qw_valid_parity_err_cnt),
+[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_state_machine_err_cnt),
+[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_data_parity_err_cnt),
+[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_host_addr_mem_cor_err_cnt),
+[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_host_addr_mem_unc_err_cnt),
+[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
+[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_init_sm_in_err_cnt),
+[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_ppmc_pbl_fifo_err_cnt),
+[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_pio_credit_ret_fifo_parity_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank1_cor_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank0_cor_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank1_unc_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank0_unc_err_cnt),
+[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sm_pkt_reset_parity_err_cnt),
+[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pkt_evict_fifo_parity_err_cnt),
+[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
+ "PioSbrdctrlCrrelFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
+[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sbrdctl_crrel_parity_err_cnt),
+[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pec_fifo_parity_err_cnt),
+[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pcc_fifo_parity_err_cnt),
+[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sb_mem_fifo1_err_cnt),
+[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sb_mem_fifo0_err_cnt),
+[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_csr_parity_err_cnt),
+[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_addr_parity_err_cnt),
+[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_bad_ctxt_err_cnt),
+/* SendDmaErrStatus */
+[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
+ 0, CNTR_NORMAL,
+ access_sdma_pcie_req_tracking_cor_err_cnt),
+[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
+ 0, CNTR_NORMAL,
+ access_sdma_pcie_req_tracking_unc_err_cnt),
+[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_csr_parity_err_cnt),
+[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_rpy_tag_err_cnt),
+/* SendEgressErrStatus */
+[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_pio_memory_csr_unc_err_cnt),
+[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
+ 0, CNTR_NORMAL,
+ access_tx_read_sdma_memory_csr_err_cnt),
+[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_egress_fifo_cor_err_cnt),
+[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_pio_memory_cor_err_cnt),
+[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_sdma_memory_cor_err_cnt),
+[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sb_hdr_cor_err_cnt),
+[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_credit_overrun_err_cnt),
+[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo8_cor_err_cnt),
+[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo7_cor_err_cnt),
+[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo6_cor_err_cnt),
+[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo5_cor_err_cnt),
+[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo4_cor_err_cnt),
+[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo3_cor_err_cnt),
+[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo2_cor_err_cnt),
+[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo1_cor_err_cnt),
+[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo0_cor_err_cnt),
+[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_credit_return_vl_err_cnt),
+[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_hcrc_insertion_err_cnt),
+[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_egress_fifo_unc_err_cnt),
+[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_pio_memory_unc_err_cnt),
+[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_sdma_memory_unc_err_cnt),
+[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sb_hdr_unc_err_cnt),
+[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_credit_return_partiy_err_cnt),
+[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo8_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo7_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo6_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo5_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo4_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo3_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo2_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo1_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo0_unc_or_parity_err_cnt),
+[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma15_disallowed_packet_err_cnt),
+[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma14_disallowed_packet_err_cnt),
+[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma13_disallowed_packet_err_cnt),
+[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma12_disallowed_packet_err_cnt),
+[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma11_disallowed_packet_err_cnt),
+[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma10_disallowed_packet_err_cnt),
+[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma9_disallowed_packet_err_cnt),
+[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma8_disallowed_packet_err_cnt),
+[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma7_disallowed_packet_err_cnt),
+[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma6_disallowed_packet_err_cnt),
+[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma5_disallowed_packet_err_cnt),
+[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma4_disallowed_packet_err_cnt),
+[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma3_disallowed_packet_err_cnt),
+[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma2_disallowed_packet_err_cnt),
+[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma1_disallowed_packet_err_cnt),
+[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma0_disallowed_packet_err_cnt),
+[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_config_parity_err_cnt),
+[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sbrd_ctl_csr_parity_err_cnt),
+[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_csr_parity_err_cnt),
+[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_illegal_vl_err_cnt),
+[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
+ "TxSbrdCtlStateMachineParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sbrd_ctl_state_machine_parity_err_cnt),
+[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_10_err_cnt),
+[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_9_err_cnt),
+[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma_launch_intf_parity_err_cnt),
+[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_pio_launch_intf_parity_err_cnt),
+[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_6_err_cnt),
+[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_incorrect_link_state_err_cnt),
+[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_linkdown_err_cnt),
+[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
+ "EgressFifoUnderrunOrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_egress_fifi_underrun_or_parity_err_cnt),
+[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_2_err_cnt),
+[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_pkt_integrity_mem_unc_err_cnt),
+[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_pkt_integrity_mem_cor_err_cnt),
+/* SendErrStatus */
+[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_send_csr_write_bad_addr_err_cnt),
+[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_send_csr_read_bad_addr_err_cnt),
+[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_send_csr_parity_cnt),
+/* SendCtxtErrStatus */
+[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_out_of_bounds_err_cnt),
+[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_overflow_err_cnt),
+[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
+ 0, 0, CNTR_NORMAL,
+ access_pio_write_crosses_boundary_err_cnt),
+[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_disallowed_packet_err_cnt),
+[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_inconsistent_sop_err_cnt),
+/* SendDmaEngErrStatus */
+[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
+ 0, 0, CNTR_NORMAL,
+ access_sdma_header_request_fifo_cor_err_cnt),
+[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_storage_cor_err_cnt),
+[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_packet_tracking_cor_err_cnt),
+[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_assembly_cor_err_cnt),
+[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_desc_table_cor_err_cnt),
+[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
+ 0, 0, CNTR_NORMAL,
+ access_sdma_header_request_fifo_unc_err_cnt),
+[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_storage_unc_err_cnt),
+[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_packet_tracking_unc_err_cnt),
+[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_assembly_unc_err_cnt),
+[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_desc_table_unc_err_cnt),
+[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_timeout_err_cnt),
+[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_length_err_cnt),
+[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_address_err_cnt),
+[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_select_err_cnt),
+[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_reserved_9_err_cnt),
+[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_packet_desc_overflow_err_cnt),
+[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_length_mismatch_err_cnt),
+[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_halt_err_cnt),
+[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_mem_read_err_cnt),
+[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_first_desc_err_cnt),
+[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_tail_out_of_bounds_err_cnt),
+[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_too_long_err_cnt),
+[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_gen_mismatch_err_cnt),
+[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_wrong_dw_err_cnt),
+};
+
+static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
+[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
+ CNTR_NORMAL),
+[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
+ CNTR_NORMAL),
+[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
+ CNTR_NORMAL),
+[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
+[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
+[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
+[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
+[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
+[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
+ access_sw_link_dn_cnt),
+[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
+ access_sw_link_up_cnt),
+[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
+ access_sw_unknown_frame_cnt),
+[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
+ access_sw_xmit_discards),
+[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
+ CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
+ access_sw_xmit_discards),
+[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
+ access_xmit_constraint_errs),
+[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
+ access_rcv_constraint_errs),
+[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
+[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
+[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
+[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
+[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
+[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
+[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
+[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
+[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
+[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
+[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
+[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
+[C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
+[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rc_acks),
+[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rc_qacks),
+[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rc_delayed_comp),
+[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
+[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
+[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
+[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
+[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
+[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
+[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
+[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
+[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
+[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
+[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
+[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
+[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
+[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
+[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
+[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
+[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
+[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
+[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
+[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
+[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
+[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
+[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
+[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
+[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
+[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
+[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
+[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
+[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
+[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
+[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
+[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
+[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
+[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
+[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
+[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
+[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
+[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
+[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
+[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
+[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
+[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
+[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
+[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
+[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
+[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
+[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
+[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
+[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
+[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
+[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
+[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
+[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
+[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
+[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
+[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
+[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
+[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
+[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
+[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
+[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
+[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
+[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
+[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
+[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
+[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
+[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
+[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
+[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
+[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
+[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
+[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
+[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
+[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
+[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
+[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
+[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
+[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
+[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
+[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
+};
+
+/* ======================================================================== */
+
+/* return true if this is chip revision revision a */
+int is_ax(struct hfi1_devdata *dd)
+{
+ u8 chip_rev_minor =
+ dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
+ & CCE_REVISION_CHIP_REV_MINOR_MASK;
+ return (chip_rev_minor & 0xf0) == 0;
+}
+
+/* return true if this is chip revision revision b */
+int is_bx(struct hfi1_devdata *dd)
+{
+ u8 chip_rev_minor =
+ dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
+ & CCE_REVISION_CHIP_REV_MINOR_MASK;
+ return (chip_rev_minor & 0xF0) == 0x10;
+}
+
+/* return true is kernel urg disabled for rcd */
+bool is_urg_masked(struct hfi1_ctxtdata *rcd)
+{
+ u64 mask;
+ u32 is = IS_RCVURGENT_START + rcd->ctxt;
+ u8 bit = is % 64;
+
+ mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
+ return !(mask & BIT_ULL(bit));
+}
+
+/*
+ * Append string s to buffer buf. Arguments curp and len are the current
+ * position and remaining length, respectively.
+ *
+ * return 0 on success, 1 on out of room
+ */
+static int append_str(char *buf, char **curp, int *lenp, const char *s)
+{
+ char *p = *curp;
+ int len = *lenp;
+ int result = 0; /* success */
+ char c;
+
+ /* add a comma, if first in the buffer */
+ if (p != buf) {
+ if (len == 0) {
+ result = 1; /* out of room */
+ goto done;
+ }
+ *p++ = ',';
+ len--;
+ }
+
+ /* copy the string */
+ while ((c = *s++) != 0) {
+ if (len == 0) {
+ result = 1; /* out of room */
+ goto done;
+ }
+ *p++ = c;
+ len--;
+ }
+
+done:
+ /* write return values */
+ *curp = p;
+ *lenp = len;
+
+ return result;
+}
+
+/*
+ * Using the given flag table, print a comma separated string into
+ * the buffer. End in '*' if the buffer is too short.
+ */
+static char *flag_string(char *buf, int buf_len, u64 flags,
+ const struct flag_table *table, int table_size)
+{
+ char extra[32];
+ char *p = buf;
+ int len = buf_len;
+ int no_room = 0;
+ int i;
+
+ /* make sure there is at least 2 so we can form "*" */
+ if (len < 2)
+ return "";
+
+ len--; /* leave room for a nul */
+ for (i = 0; i < table_size; i++) {
+ if (flags & table[i].flag) {
+ no_room = append_str(buf, &p, &len, table[i].str);
+ if (no_room)
+ break;
+ flags &= ~table[i].flag;
+ }
+ }
+
+ /* any undocumented bits left? */
+ if (!no_room && flags) {
+ snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
+ no_room = append_str(buf, &p, &len, extra);
+ }
+
+ /* add * if ran out of room */
+ if (no_room) {
+ /* may need to back up to add space for a '*' */
+ if (len == 0)
+ --p;
+ *p++ = '*';
+ }
+
+ /* add final nul - space already allocated above */
+ *p = 0;
+ return buf;
+}
+
+/* first 8 CCE error interrupt source names */
+static const char * const cce_misc_names[] = {
+ "CceErrInt", /* 0 */
+ "RxeErrInt", /* 1 */
+ "MiscErrInt", /* 2 */
+ "Reserved3", /* 3 */
+ "PioErrInt", /* 4 */
+ "SDmaErrInt", /* 5 */
+ "EgressErrInt", /* 6 */
+ "TxeErrInt" /* 7 */
+};
+
+/*
+ * Return the miscellaneous error interrupt name.
+ */
+static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
+{
+ if (source < ARRAY_SIZE(cce_misc_names))
+ strscpy_pad(buf, cce_misc_names[source], bsize);
+ else
+ snprintf(buf, bsize, "Reserved%u",
+ source + IS_GENERAL_ERR_START);
+
+ return buf;
+}
+
+/*
+ * Return the SDMA engine error interrupt name.
+ */
+static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "SDmaEngErrInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the send context error interrupt name.
+ */
+static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "SendCtxtErrInt%u", source);
+ return buf;
+}
+
+static const char * const various_names[] = {
+ "PbcInt",
+ "GpioAssertInt",
+ "Qsfp1Int",
+ "Qsfp2Int",
+ "TCritInt"
+};
+
+/*
+ * Return the various interrupt name.
+ */
+static char *is_various_name(char *buf, size_t bsize, unsigned int source)
+{
+ if (source < ARRAY_SIZE(various_names))
+ strscpy_pad(buf, various_names[source], bsize);
+ else
+ snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
+ return buf;
+}
+
+/*
+ * Return the DC interrupt name.
+ */
+static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
+{
+ static const char * const dc_int_names[] = {
+ "common",
+ "lcb",
+ "8051",
+ "lbm" /* local block merge */
+ };
+
+ if (source < ARRAY_SIZE(dc_int_names))
+ snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
+ else
+ snprintf(buf, bsize, "DCInt%u", source);
+ return buf;
+}
+
+static const char * const sdma_int_names[] = {
+ "SDmaInt",
+ "SdmaIdleInt",
+ "SdmaProgressInt",
+};
+
+/*
+ * Return the SDMA engine interrupt name.
+ */
+static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
+{
+ /* what interrupt */
+ unsigned int what = source / TXE_NUM_SDMA_ENGINES;
+ /* which engine */
+ unsigned int which = source % TXE_NUM_SDMA_ENGINES;
+
+ if (likely(what < 3))
+ snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
+ else
+ snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
+ return buf;
+}
+
+/*
+ * Return the receive available interrupt name.
+ */
+static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "RcvAvailInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the receive urgent interrupt name.
+ */
+static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "RcvUrgentInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the send credit interrupt name.
+ */
+static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "SendCreditInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the reserved interrupt name.
+ */
+static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
+ return buf;
+}
+
+static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ cce_err_status_flags,
+ ARRAY_SIZE(cce_err_status_flags));
+}
+
+static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ rxe_err_status_flags,
+ ARRAY_SIZE(rxe_err_status_flags));
+}
+
+static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, misc_err_status_flags,
+ ARRAY_SIZE(misc_err_status_flags));
+}
+
+static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ pio_err_status_flags,
+ ARRAY_SIZE(pio_err_status_flags));
+}
+
+static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ sdma_err_status_flags,
+ ARRAY_SIZE(sdma_err_status_flags));
+}
+
+static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ egress_err_status_flags,
+ ARRAY_SIZE(egress_err_status_flags));
+}
+
+static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ egress_err_info_flags,
+ ARRAY_SIZE(egress_err_info_flags));
+}
+
+static char *send_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ send_err_status_flags,
+ ARRAY_SIZE(send_err_status_flags));
+}
+
+static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+ int i = 0;
+
+ /*
+ * For most these errors, there is nothing that can be done except
+ * report or record it.
+ */
+ dd_dev_info(dd, "CCE Error: %s\n",
+ cce_err_status_string(buf, sizeof(buf), reg));
+
+ if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
+ is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
+ /* this error requires a manual drop into SPC freeze mode */
+ /* then a fix up */
+ start_freeze_handling(dd->pport, FREEZE_SELF);
+ }
+
+ for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i)) {
+ incr_cntr64(&dd->cce_err_status_cnt[i]);
+ /* maintain a counter over all cce_err_status errors */
+ incr_cntr64(&dd->sw_cce_err_status_aggregate);
+ }
+ }
+}
+
+/*
+ * Check counters for receive errors that do not have an interrupt
+ * associated with them.
+ */
+#define RCVERR_CHECK_TIME 10
+static void update_rcverr_timer(struct timer_list *t)
+{
+ struct hfi1_devdata *dd = timer_container_of(dd, t, rcverr_timer);
+ struct hfi1_pportdata *ppd = dd->pport;
+ u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
+
+ if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
+ ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
+ dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
+ set_link_down_reason(
+ ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
+ OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
+ queue_work(ppd->link_wq, &ppd->link_bounce_work);
+ }
+ dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
+
+ mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
+}
+
+static int init_rcverr(struct hfi1_devdata *dd)
+{
+ timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
+ /* Assume the hardware counter has been reset */
+ dd->rcv_ovfl_cnt = 0;
+ return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
+}
+
+static void free_rcverr(struct hfi1_devdata *dd)
+{
+ if (dd->rcverr_timer.function)
+ timer_delete_sync(&dd->rcverr_timer);
+}
+
+static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+ int i = 0;
+
+ dd_dev_info(dd, "Receive Error: %s\n",
+ rxe_err_status_string(buf, sizeof(buf), reg));
+
+ if (reg & ALL_RXE_FREEZE_ERR) {
+ int flags = 0;
+
+ /*
+ * Freeze mode recovery is disabled for the errors
+ * in RXE_FREEZE_ABORT_MASK
+ */
+ if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
+ flags = FREEZE_ABORT;
+
+ start_freeze_handling(dd->pport, flags);
+ }
+
+ for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->rcv_err_status_cnt[i]);
+ }
+}
+
+static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+ int i = 0;
+
+ dd_dev_info(dd, "Misc Error: %s",
+ misc_err_status_string(buf, sizeof(buf), reg));
+ for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->misc_err_status_cnt[i]);
+ }
+}
+
+static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+ int i = 0;
+
+ dd_dev_info(dd, "PIO Error: %s\n",
+ pio_err_status_string(buf, sizeof(buf), reg));
+
+ if (reg & ALL_PIO_FREEZE_ERR)
+ start_freeze_handling(dd->pport, 0);
+
+ for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_pio_err_status_cnt[i]);
+ }
+}
+
+static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+ int i = 0;
+
+ dd_dev_info(dd, "SDMA Error: %s\n",
+ sdma_err_status_string(buf, sizeof(buf), reg));
+
+ if (reg & ALL_SDMA_FREEZE_ERR)
+ start_freeze_handling(dd->pport, 0);
+
+ for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_dma_err_status_cnt[i]);
+ }
+}
+
+static inline void __count_port_discards(struct hfi1_pportdata *ppd)
+{
+ incr_cntr64(&ppd->port_xmit_discards);
+}
+
+static void count_port_inactive(struct hfi1_devdata *dd)
+{
+ __count_port_discards(dd->pport);
+}
+
+/*
+ * We have had a "disallowed packet" error during egress. Determine the
+ * integrity check which failed, and update relevant error counter, etc.
+ *
+ * Note that the SEND_EGRESS_ERR_INFO register has only a single
+ * bit of state per integrity check, and so we can miss the reason for an
+ * egress error if more than one packet fails the same integrity check
+ * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
+ */
+static void handle_send_egress_err_info(struct hfi1_devdata *dd,
+ int vl)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
+ u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
+ char buf[96];
+
+ /* clear down all observed info as quickly as possible after read */
+ write_csr(dd, SEND_EGRESS_ERR_INFO, info);
+
+ dd_dev_info(dd,
+ "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
+ info, egress_err_info_string(buf, sizeof(buf), info), src);
+
+ /* Eventually add other counters for each bit */
+ if (info & PORT_DISCARD_EGRESS_ERRS) {
+ int weight, i;
+
+ /*
+ * Count all applicable bits as individual errors and
+ * attribute them to the packet that triggered this handler.
+ * This may not be completely accurate due to limitations
+ * on the available hardware error information. There is
+ * a single information register and any number of error
+ * packets may have occurred and contributed to it before
+ * this routine is called. This means that:
+ * a) If multiple packets with the same error occur before
+ * this routine is called, earlier packets are missed.
+ * There is only a single bit for each error type.
+ * b) Errors may not be attributed to the correct VL.
+ * The driver is attributing all bits in the info register
+ * to the packet that triggered this call, but bits
+ * could be an accumulation of different packets with
+ * different VLs.
+ * c) A single error packet may have multiple counts attached
+ * to it. There is no way for the driver to know if
+ * multiple bits set in the info register are due to a
+ * single packet or multiple packets. The driver assumes
+ * multiple packets.
+ */
+ weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
+ for (i = 0; i < weight; i++) {
+ __count_port_discards(ppd);
+ if (vl >= 0 && vl < TXE_NUM_DATA_VL)
+ incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
+ else if (vl == 15)
+ incr_cntr64(&ppd->port_xmit_discards_vl
+ [C_VL_15]);
+ }
+ }
+}
+
+/*
+ * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
+ * register. Does it represent a 'port inactive' error?
+ */
+static inline int port_inactive_err(u64 posn)
+{
+ return (posn >= SEES(TX_LINKDOWN) &&
+ posn <= SEES(TX_INCORRECT_LINK_STATE));
+}
+
+/*
+ * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
+ * register. Does it represent a 'disallowed packet' error?
+ */
+static inline int disallowed_pkt_err(int posn)
+{
+ return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
+ posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
+}
+
+/*
+ * Input value is a bit position of one of the SDMA engine disallowed
+ * packet errors. Return which engine. Use of this must be guarded by
+ * disallowed_pkt_err().
+ */
+static inline int disallowed_pkt_engine(int posn)
+{
+ return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
+}
+
+/*
+ * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
+ * be done.
+ */
+static int engine_to_vl(struct hfi1_devdata *dd, int engine)
+{
+ struct sdma_vl_map *m;
+ int vl;
+
+ /* range check */
+ if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
+ return -1;
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->sdma_map);
+ vl = m->engine_to_vl[engine];
+ rcu_read_unlock();
+
+ return vl;
+}
+
+/*
+ * Translate the send context (sofware index) into a VL. Return -1 if the
+ * translation cannot be done.
+ */
+static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
+{
+ struct send_context_info *sci;
+ struct send_context *sc;
+ int i;
+
+ sci = &dd->send_contexts[sw_index];
+
+ /* there is no information for user (PSM) and ack contexts */
+ if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
+ return -1;
+
+ sc = sci->sc;
+ if (!sc)
+ return -1;
+ if (dd->vld[15].sc == sc)
+ return 15;
+ for (i = 0; i < num_vls; i++)
+ if (dd->vld[i].sc == sc)
+ return i;
+
+ return -1;
+}
+
+static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ u64 reg_copy = reg, handled = 0;
+ char buf[96];
+ int i = 0;
+
+ if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
+ start_freeze_handling(dd->pport, 0);
+ else if (is_ax(dd) &&
+ (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
+ (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
+ start_freeze_handling(dd->pport, 0);
+
+ while (reg_copy) {
+ int posn = fls64(reg_copy);
+ /* fls64() returns a 1-based offset, we want it zero based */
+ int shift = posn - 1;
+ u64 mask = 1ULL << shift;
+
+ if (port_inactive_err(shift)) {
+ count_port_inactive(dd);
+ handled |= mask;
+ } else if (disallowed_pkt_err(shift)) {
+ int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
+
+ handle_send_egress_err_info(dd, vl);
+ handled |= mask;
+ }
+ reg_copy &= ~mask;
+ }
+
+ reg &= ~handled;
+
+ if (reg)
+ dd_dev_info(dd, "Egress Error: %s\n",
+ egress_err_status_string(buf, sizeof(buf), reg));
+
+ for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_egress_err_status_cnt[i]);
+ }
+}
+
+static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+ int i = 0;
+
+ dd_dev_info(dd, "Send Error: %s\n",
+ send_err_status_string(buf, sizeof(buf), reg));
+
+ for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_err_status_cnt[i]);
+ }
+}
+
+/*
+ * The maximum number of times the error clear down will loop before
+ * blocking a repeating error. This value is arbitrary.
+ */
+#define MAX_CLEAR_COUNT 20
+
+/*
+ * Clear and handle an error register. All error interrupts are funneled
+ * through here to have a central location to correctly handle single-
+ * or multi-shot errors.
+ *
+ * For non per-context registers, call this routine with a context value
+ * of 0 so the per-context offset is zero.
+ *
+ * If the handler loops too many times, assume that something is wrong
+ * and can't be fixed, so mask the error bits.
+ */
+static void interrupt_clear_down(struct hfi1_devdata *dd,
+ u32 context,
+ const struct err_reg_info *eri)
+{
+ u64 reg;
+ u32 count;
+
+ /* read in a loop until no more errors are seen */
+ count = 0;
+ while (1) {
+ reg = read_kctxt_csr(dd, context, eri->status);
+ if (reg == 0)
+ break;
+ write_kctxt_csr(dd, context, eri->clear, reg);
+ if (likely(eri->handler))
+ eri->handler(dd, context, reg);
+ count++;
+ if (count > MAX_CLEAR_COUNT) {
+ u64 mask;
+
+ dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
+ eri->desc, reg);
+ /*
+ * Read-modify-write so any other masked bits
+ * remain masked.
+ */
+ mask = read_kctxt_csr(dd, context, eri->mask);
+ mask &= ~reg;
+ write_kctxt_csr(dd, context, eri->mask, mask);
+ break;
+ }
+ }
+}
+
+/*
+ * CCE block "misc" interrupt. Source is < 16.
+ */
+static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct err_reg_info *eri = &misc_errs[source];
+
+ if (eri->handler) {
+ interrupt_clear_down(dd, 0, eri);
+ } else {
+ dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
+ source);
+ }
+}
+
+static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ sc_err_status_flags,
+ ARRAY_SIZE(sc_err_status_flags));
+}
+
+/*
+ * Send context error interrupt. Source (hw_context) is < 160.
+ *
+ * All send context errors cause the send context to halt. The normal
+ * clear-down mechanism cannot be used because we cannot clear the
+ * error bits until several other long-running items are done first.
+ * This is OK because with the context halted, nothing else is going
+ * to happen on it anyway.
+ */
+static void is_sendctxt_err_int(struct hfi1_devdata *dd,
+ unsigned int hw_context)
+{
+ struct send_context_info *sci;
+ struct send_context *sc;
+ char flags[96];
+ u64 status;
+ u32 sw_index;
+ int i = 0;
+ unsigned long irq_flags;
+
+ sw_index = dd->hw_to_sw[hw_context];
+ if (sw_index >= dd->num_send_contexts) {
+ dd_dev_err(dd,
+ "out of range sw index %u for send context %u\n",
+ sw_index, hw_context);
+ return;
+ }
+ sci = &dd->send_contexts[sw_index];
+ spin_lock_irqsave(&dd->sc_lock, irq_flags);
+ sc = sci->sc;
+ if (!sc) {
+ dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
+ sw_index, hw_context);
+ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
+ return;
+ }
+
+ /* tell the software that a halt has begun */
+ sc_stop(sc, SCF_HALTED);
+
+ status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
+
+ dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
+ send_context_err_status_string(flags, sizeof(flags),
+ status));
+
+ if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
+ handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
+
+ /*
+ * Automatically restart halted kernel contexts out of interrupt
+ * context. User contexts must ask the driver to restart the context.
+ */
+ if (sc->type != SC_USER)
+ queue_work(dd->pport->hfi1_wq, &sc->halt_work);
+ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
+
+ /*
+ * Update the counters for the corresponding status bits.
+ * Note that these particular counters are aggregated over all
+ * 160 contexts.
+ */
+ for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
+ if (status & (1ull << i))
+ incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
+ }
+}
+
+static void handle_sdma_eng_err(struct hfi1_devdata *dd,
+ unsigned int source, u64 status)
+{
+ struct sdma_engine *sde;
+ int i = 0;
+
+ sde = &dd->per_sdma[source];
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
+ sde->this_idx, source, (unsigned long long)status);
+#endif
+ sde->err_cnt++;
+ sdma_engine_error(sde, status);
+
+ /*
+ * Update the counters for the corresponding status bits.
+ * Note that these particular counters are aggregated over
+ * all 16 DMA engines.
+ */
+ for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
+ if (status & (1ull << i))
+ incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
+ }
+}
+
+/*
+ * CCE block SDMA error interrupt. Source is < 16.
+ */
+static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
+{
+#ifdef CONFIG_SDMA_VERBOSITY
+ struct sdma_engine *sde = &dd->per_sdma[source];
+
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
+ source);
+ sdma_dumpstate(sde);
+#endif
+ interrupt_clear_down(dd, source, &sdma_eng_err);
+}
+
+/*
+ * CCE block "various" interrupt. Source is < 8.
+ */
+static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct err_reg_info *eri = &various_err[source];
+
+ /*
+ * TCritInt cannot go through interrupt_clear_down()
+ * because it is not a second tier interrupt. The handler
+ * should be called directly.
+ */
+ if (source == TCRIT_INT_SOURCE)
+ handle_temp_err(dd);
+ else if (eri->handler)
+ interrupt_clear_down(dd, 0, eri);
+ else
+ dd_dev_info(dd,
+ "%s: Unimplemented/reserved interrupt %d\n",
+ __func__, source);
+}
+
+static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
+{
+ /* src_ctx is always zero */
+ struct hfi1_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
+
+ if (reg & QSFP_HFI0_MODPRST_N) {
+ if (!qsfp_mod_present(ppd)) {
+ dd_dev_info(dd, "%s: QSFP module removed\n",
+ __func__);
+
+ ppd->driver_link_ready = 0;
+ /*
+ * Cable removed, reset all our information about the
+ * cache and cable capabilities
+ */
+
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ /*
+ * We don't set cache_refresh_required here as we expect
+ * an interrupt when a cable is inserted
+ */
+ ppd->qsfp_info.cache_valid = 0;
+ ppd->qsfp_info.reset_needed = 0;
+ ppd->qsfp_info.limiting_active = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
+ flags);
+ /* Invert the ModPresent pin now to detect plug-in */
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
+ ASIC_QSFP1_INVERT, qsfp_int_mgmt);
+
+ if ((ppd->offline_disabled_reason >
+ HFI1_ODR_MASK(
+ OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
+ (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(
+ OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
+
+ if (ppd->host_link_state == HLS_DN_POLL) {
+ /*
+ * The link is still in POLL. This means
+ * that the normal link down processing
+ * will not happen. We have to do it here
+ * before turning the DC off.
+ */
+ queue_work(ppd->link_wq, &ppd->link_down_work);
+ }
+ } else {
+ dd_dev_info(dd, "%s: QSFP module inserted\n",
+ __func__);
+
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.cache_valid = 0;
+ ppd->qsfp_info.cache_refresh_required = 1;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
+ flags);
+
+ /*
+ * Stop inversion of ModPresent pin to detect
+ * removal of the cable
+ */
+ qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
+ ASIC_QSFP1_INVERT, qsfp_int_mgmt);
+
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
+ }
+ }
+
+ if (reg & QSFP_HFI0_INT_N) {
+ dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
+ __func__);
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.check_interrupt_flags = 1;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
+ }
+
+ /* Schedule the QSFP work only if there is a cable attached. */
+ if (qsfp_mod_present(ppd))
+ queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
+}
+
+static int request_host_lcb_access(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = do_8051_command(dd, HCMD_MISC,
+ (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
+ LOAD_DATA_FIELD_ID_SHIFT, NULL);
+ if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) {
+ dd_dev_err(dd, "%s: command failed with error %d\n",
+ __func__, ret);
+ }
+ return ret == HCMD_SUCCESS ? 0 : -EBUSY;
+}
+
+static int request_8051_lcb_access(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = do_8051_command(dd, HCMD_MISC,
+ (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
+ LOAD_DATA_FIELD_ID_SHIFT, NULL);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd, "%s: command failed with error %d\n",
+ __func__, ret);
+ }
+ return ret == HCMD_SUCCESS ? 0 : -EBUSY;
+}
+
+/*
+ * Set the LCB selector - allow host access. The DCC selector always
+ * points to the host.
+ */
+static inline void set_host_lcb_access(struct hfi1_devdata *dd)
+{
+ write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
+ DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
+ DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
+}
+
+/*
+ * Clear the LCB selector - allow 8051 access. The DCC selector always
+ * points to the host.
+ */
+static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
+{
+ write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
+ DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
+}
+
+/*
+ * Acquire LCB access from the 8051. If the host already has access,
+ * just increment a counter. Otherwise, inform the 8051 that the
+ * host is taking access.
+ *
+ * Returns:
+ * 0 on success
+ * -EBUSY if the 8051 has control and cannot be disturbed
+ * -errno if unable to acquire access from the 8051
+ */
+int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ int ret = 0;
+
+ /*
+ * Use the host link state lock so the operation of this routine
+ * { link state check, selector change, count increment } can occur
+ * as a unit against a link state change. Otherwise there is a
+ * race between the state change and the count increment.
+ */
+ if (sleep_ok) {
+ mutex_lock(&ppd->hls_lock);
+ } else {
+ while (!mutex_trylock(&ppd->hls_lock))
+ udelay(1);
+ }
+
+ /* this access is valid only when the link is up */
+ if (ppd->host_link_state & HLS_DOWN) {
+ dd_dev_info(dd, "%s: link state %s not up\n",
+ __func__, link_state_name(ppd->host_link_state));
+ ret = -EBUSY;
+ goto done;
+ }
+
+ if (dd->lcb_access_count == 0) {
+ ret = request_host_lcb_access(dd);
+ if (ret) {
+ if (!(dd->flags & HFI1_SHUTDOWN))
+ dd_dev_err(dd,
+ "%s: unable to acquire LCB access, err %d\n",
+ __func__, ret);
+ goto done;
+ }
+ set_host_lcb_access(dd);
+ }
+ dd->lcb_access_count++;
+done:
+ mutex_unlock(&ppd->hls_lock);
+ return ret;
+}
+
+/*
+ * Release LCB access by decrementing the use count. If the count is moving
+ * from 1 to 0, inform 8051 that it has control back.
+ *
+ * Returns:
+ * 0 on success
+ * -errno if unable to release access to the 8051
+ */
+int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
+{
+ int ret = 0;
+
+ /*
+ * Use the host link state lock because the acquire needed it.
+ * Here, we only need to keep { selector change, count decrement }
+ * as a unit.
+ */
+ if (sleep_ok) {
+ mutex_lock(&dd->pport->hls_lock);
+ } else {
+ while (!mutex_trylock(&dd->pport->hls_lock))
+ udelay(1);
+ }
+
+ if (dd->lcb_access_count == 0) {
+ dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
+ __func__);
+ goto done;
+ }
+
+ if (dd->lcb_access_count == 1) {
+ set_8051_lcb_access(dd);
+ ret = request_8051_lcb_access(dd);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to release LCB access, err %d\n",
+ __func__, ret);
+ /* restore host access if the grant didn't work */
+ set_host_lcb_access(dd);
+ goto done;
+ }
+ }
+ dd->lcb_access_count--;
+done:
+ mutex_unlock(&dd->pport->hls_lock);
+ return ret;
+}
+
+/*
+ * Initialize LCB access variables and state. Called during driver load,
+ * after most of the initialization is finished.
+ *
+ * The DC default is LCB access on for the host. The driver defaults to
+ * leaving access to the 8051. Assign access now - this constrains the call
+ * to this routine to be after all LCB set-up is done. In particular, after
+ * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
+ */
+static void init_lcb_access(struct hfi1_devdata *dd)
+{
+ dd->lcb_access_count = 0;
+}
+
+/*
+ * Write a response back to a 8051 request.
+ */
+static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
+{
+ write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
+ DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
+ (u64)return_code <<
+ DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
+ (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
+}
+
+/*
+ * Handle host requests from the 8051.
+ */
+static void handle_8051_request(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg;
+ u16 data = 0;
+ u8 type;
+
+ reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
+ if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
+ return; /* no request */
+
+ /* zero out COMPLETED so the response is seen */
+ write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
+
+ /* extract request details */
+ type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
+ & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
+ data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
+ & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
+
+ switch (type) {
+ case HREQ_LOAD_CONFIG:
+ case HREQ_SAVE_CONFIG:
+ case HREQ_READ_CONFIG:
+ case HREQ_SET_TX_EQ_ABS:
+ case HREQ_SET_TX_EQ_REL:
+ case HREQ_ENABLE:
+ dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
+ type);
+ hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
+ break;
+ case HREQ_LCB_RESET:
+ /* Put the LCB, RX FPE and TX FPE into reset */
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
+ /* Make sure the write completed */
+ (void)read_csr(dd, DCC_CFG_RESET);
+ /* Hold the reset long enough to take effect */
+ udelay(1);
+ /* Take the LCB, RX FPE and TX FPE out of reset */
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
+ hreq_response(dd, HREQ_SUCCESS, 0);
+
+ break;
+ case HREQ_CONFIG_DONE:
+ hreq_response(dd, HREQ_SUCCESS, 0);
+ break;
+
+ case HREQ_INTERFACE_TEST:
+ hreq_response(dd, HREQ_SUCCESS, data);
+ break;
+ default:
+ dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
+ hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
+ break;
+ }
+}
+
+/*
+ * Set up allocation unit vaulue.
+ */
+void set_up_vau(struct hfi1_devdata *dd, u8 vau)
+{
+ u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+ /* do not modify other values in the register */
+ reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
+ reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
+}
+
+/*
+ * Set up initial VL15 credits of the remote. Assumes the rest of
+ * the CM credit registers are zero from a previous global or credit reset.
+ * Shared limit for VL15 will always be 0.
+ */
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
+{
+ u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+ /* set initial values for total and shared credit limit */
+ reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
+ SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
+
+ /*
+ * Set total limit to be equal to VL15 credits.
+ * Leave shared limit at 0.
+ */
+ reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
+
+ write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
+ << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
+}
+
+/*
+ * Zero all credit details from the previous connection and
+ * reset the CM manager's internal counters.
+ */
+void reset_link_credits(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* remove all previous VL credit limits */
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
+ write_csr(dd, SEND_CM_CREDIT_VL15, 0);
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
+ /* reset the CM block */
+ pio_send_control(dd, PSC_CM_RESET);
+ /* reset cached value */
+ dd->vl15buf_cached = 0;
+}
+
+/* convert a vCU to a CU */
+static u32 vcu_to_cu(u8 vcu)
+{
+ return 1 << vcu;
+}
+
+/* convert a CU to a vCU */
+static u8 cu_to_vcu(u32 cu)
+{
+ return ilog2(cu);
+}
+
+/* convert a vAU to an AU */
+static u32 vau_to_au(u8 vau)
+{
+ return 8 * (1 << vau);
+}
+
+static void set_linkup_defaults(struct hfi1_pportdata *ppd)
+{
+ ppd->sm_trap_qp = 0x0;
+ ppd->sa_qp = 0x1;
+}
+
+/*
+ * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
+ */
+static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
+{
+ u64 reg;
+
+ /* clear lcb run: LCB_CFG_RUN.EN = 0 */
+ write_csr(dd, DC_LCB_CFG_RUN, 0);
+ /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
+ 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
+ /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
+ dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
+ reg = read_csr(dd, DCC_CFG_RESET);
+ write_csr(dd, DCC_CFG_RESET, reg |
+ DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
+ (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
+ if (!abort) {
+ udelay(1); /* must hold for the longer of 16cclks or 20ns */
+ write_csr(dd, DCC_CFG_RESET, reg);
+ write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
+ }
+}
+
+/*
+ * This routine should be called after the link has been transitioned to
+ * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
+ * reset).
+ *
+ * The expectation is that the caller of this routine would have taken
+ * care of properly transitioning the link into the correct state.
+ * NOTE: the caller needs to acquire the dd->dc8051_lock lock
+ * before calling this function.
+ */
+static void _dc_shutdown(struct hfi1_devdata *dd)
+{
+ lockdep_assert_held(&dd->dc8051_lock);
+
+ if (dd->dc_shutdown)
+ return;
+
+ dd->dc_shutdown = 1;
+ /* Shutdown the LCB */
+ lcb_shutdown(dd, 1);
+ /*
+ * Going to OFFLINE would have causes the 8051 to put the
+ * SerDes into reset already. Just need to shut down the 8051,
+ * itself.
+ */
+ write_csr(dd, DC_DC8051_CFG_RST, 0x1);
+}
+
+static void dc_shutdown(struct hfi1_devdata *dd)
+{
+ mutex_lock(&dd->dc8051_lock);
+ _dc_shutdown(dd);
+ mutex_unlock(&dd->dc8051_lock);
+}
+
+/*
+ * Calling this after the DC has been brought out of reset should not
+ * do any damage.
+ * NOTE: the caller needs to acquire the dd->dc8051_lock lock
+ * before calling this function.
+ */
+static void _dc_start(struct hfi1_devdata *dd)
+{
+ lockdep_assert_held(&dd->dc8051_lock);
+
+ if (!dd->dc_shutdown)
+ return;
+
+ /* Take the 8051 out of reset */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+ /* Wait until 8051 is ready */
+ if (wait_fm_ready(dd, TIMEOUT_8051_START))
+ dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
+ __func__);
+
+ /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
+ /* lcb_shutdown() with abort=1 does not restore these */
+ write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
+ dd->dc_shutdown = 0;
+}
+
+static void dc_start(struct hfi1_devdata *dd)
+{
+ mutex_lock(&dd->dc8051_lock);
+ _dc_start(dd);
+ mutex_unlock(&dd->dc8051_lock);
+}
+
+/*
+ * These LCB adjustments are for the Aurora SerDes core in the FPGA.
+ */
+static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
+{
+ u64 rx_radr, tx_radr;
+ u32 version;
+
+ if (dd->icode != ICODE_FPGA_EMULATION)
+ return;
+
+ /*
+ * These LCB defaults on emulator _s are good, nothing to do here:
+ * LCB_CFG_TX_FIFOS_RADR
+ * LCB_CFG_RX_FIFOS_RADR
+ * LCB_CFG_LN_DCLK
+ * LCB_CFG_IGNORE_LOST_RCLK
+ */
+ if (is_emulator_s(dd))
+ return;
+ /* else this is _p */
+
+ version = emulator_rev(dd);
+ if (!is_ax(dd))
+ version = 0x2d; /* all B0 use 0x2d or higher settings */
+
+ if (version <= 0x12) {
+ /* release 0x12 and below */
+
+ /*
+ * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
+ * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
+ * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
+ */
+ rx_radr =
+ 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ /*
+ * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
+ * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
+ */
+ tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ } else if (version <= 0x18) {
+ /* release 0x13 up to 0x18 */
+ /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
+ rx_radr =
+ 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ } else if (version == 0x19) {
+ /* release 0x19 */
+ /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
+ rx_radr =
+ 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ } else if (version == 0x1a) {
+ /* release 0x1a */
+ /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
+ rx_radr =
+ 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
+ } else {
+ /* release 0x1b and higher */
+ /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
+ rx_radr =
+ 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ }
+
+ write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
+ /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
+ write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
+ DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
+}
+
+/*
+ * Handle a SMA idle message
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_sma_message(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ sma_message_work);
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 msg;
+ int ret;
+
+ /*
+ * msg is bytes 1-4 of the 40-bit idle message - the command code
+ * is stripped off
+ */
+ ret = read_idle_sma(dd, &msg);
+ if (ret)
+ return;
+ dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
+ /*
+ * React to the SMA message. Byte[1] (0 for us) is the command.
+ */
+ switch (msg & 0xff) {
+ case SMA_IDLE_ARM:
+ /*
+ * See OPAv1 table 9-14 - HFI and External Switch Ports Key
+ * State Transitions
+ *
+ * Only expected in INIT or ARMED, discard otherwise.
+ */
+ if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
+ ppd->neighbor_normal = 1;
+ break;
+ case SMA_IDLE_ACTIVE:
+ /*
+ * See OPAv1 table 9-14 - HFI and External Switch Ports Key
+ * State Transitions
+ *
+ * Can activate the node. Discard otherwise.
+ */
+ if (ppd->host_link_state == HLS_UP_ARMED &&
+ ppd->is_active_optimize_enabled) {
+ ppd->neighbor_normal = 1;
+ ret = set_link_state(ppd, HLS_UP_ACTIVE);
+ if (ret)
+ dd_dev_err(
+ dd,
+ "%s: received Active SMA idle message, couldn't set link to Active\n",
+ __func__);
+ }
+ break;
+ default:
+ dd_dev_err(dd,
+ "%s: received unexpected SMA idle message 0x%llx\n",
+ __func__, msg);
+ break;
+ }
+}
+
+static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
+{
+ u64 rcvctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->rcvctrl_lock, flags);
+ rcvctrl = read_csr(dd, RCV_CTRL);
+ rcvctrl |= add;
+ rcvctrl &= ~clear;
+ write_csr(dd, RCV_CTRL, rcvctrl);
+ spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
+}
+
+static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
+{
+ adjust_rcvctrl(dd, add, 0);
+}
+
+static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
+{
+ adjust_rcvctrl(dd, 0, clear);
+}
+
+/*
+ * Called from all interrupt handlers to start handling an SPC freeze.
+ */
+void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct send_context *sc;
+ int i;
+ int sc_flags;
+
+ if (flags & FREEZE_SELF)
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
+
+ /* enter frozen mode */
+ dd->flags |= HFI1_FROZEN;
+
+ /* notify all SDMA engines that they are going into a freeze */
+ sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
+
+ sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
+ SCF_LINK_DOWN : 0);
+ /* do halt pre-handling on all enabled send contexts */
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (sc && (sc->flags & SCF_ENABLED))
+ sc_stop(sc, sc_flags);
+ }
+
+ /* Send context are frozen. Notify user space */
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
+
+ if (flags & FREEZE_ABORT) {
+ dd_dev_err(dd,
+ "Aborted freeze recovery. Please REBOOT system\n");
+ return;
+ }
+ /* queue non-interrupt handler */
+ queue_work(ppd->hfi1_wq, &ppd->freeze_work);
+}
+
+/*
+ * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
+ * depending on the "freeze" parameter.
+ *
+ * No need to return an error if it times out, our only option
+ * is to proceed anyway.
+ */
+static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
+{
+ unsigned long timeout;
+ u64 reg;
+
+ timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, CCE_STATUS);
+ if (freeze) {
+ /* waiting until all indicators are set */
+ if ((reg & ALL_FROZE) == ALL_FROZE)
+ return; /* all done */
+ } else {
+ /* waiting until all indicators are clear */
+ if ((reg & ALL_FROZE) == 0)
+ return; /* all done */
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(dd,
+ "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
+ freeze ? "" : "un", reg & ALL_FROZE,
+ freeze ? ALL_FROZE : 0ull);
+ return;
+ }
+ usleep_range(80, 120);
+ }
+}
+
+/*
+ * Do all freeze handling for the RXE block.
+ */
+static void rxe_freeze(struct hfi1_devdata *dd)
+{
+ int i;
+ struct hfi1_ctxtdata *rcd;
+
+ /* disable port */
+ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ /* disable all receive contexts */
+ for (i = 0; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
+ hfi1_rcd_put(rcd);
+ }
+}
+
+/*
+ * Unfreeze handling for the RXE block - kernel contexts only.
+ * This will also enable the port. User contexts will do unfreeze
+ * handling on a per-context basis as they call into the driver.
+ *
+ */
+static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
+{
+ u32 rcvmask;
+ u16 i;
+ struct hfi1_ctxtdata *rcd;
+
+ /* enable all kernel contexts */
+ for (i = 0; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+
+ /* Ensure all non-user contexts(including vnic) are enabled */
+ if (!rcd ||
+ (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
+ hfi1_rcd_put(rcd);
+ continue;
+ }
+ rcvmask = HFI1_RCVCTRL_CTXT_ENB;
+ /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
+ rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
+ HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
+ hfi1_rcvctrl(dd, rcvmask, rcd);
+ hfi1_rcd_put(rcd);
+ }
+
+ /* enable port */
+ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+}
+
+/*
+ * Non-interrupt SPC freeze handling.
+ *
+ * This is a work-queue function outside of the triggering interrupt.
+ */
+void handle_freeze(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ freeze_work);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /* wait for freeze indicators on all affected blocks */
+ wait_for_freeze_status(dd, 1);
+
+ /* SPC is now frozen */
+
+ /* do send PIO freeze steps */
+ pio_freeze(dd);
+
+ /* do send DMA freeze steps */
+ sdma_freeze(dd);
+
+ /* do send egress freeze steps - nothing to do */
+
+ /* do receive freeze steps */
+ rxe_freeze(dd);
+
+ /*
+ * Unfreeze the hardware - clear the freeze, wait for each
+ * block's frozen bit to clear, then clear the frozen flag.
+ */
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
+ wait_for_freeze_status(dd, 0);
+
+ if (is_ax(dd)) {
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
+ wait_for_freeze_status(dd, 1);
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
+ wait_for_freeze_status(dd, 0);
+ }
+
+ /* do send PIO unfreeze steps for kernel contexts */
+ pio_kernel_unfreeze(dd);
+
+ /* do send DMA unfreeze steps */
+ sdma_unfreeze(dd);
+
+ /* do send egress unfreeze steps - nothing to do */
+
+ /* do receive unfreeze steps for kernel contexts */
+ rxe_kernel_unfreeze(dd);
+
+ /*
+ * The unfreeze procedure touches global device registers when
+ * it disables and re-enables RXE. Mark the device unfrozen
+ * after all that is done so other parts of the driver waiting
+ * for the device to unfreeze don't do things out of order.
+ *
+ * The above implies that the meaning of HFI1_FROZEN flag is
+ * "Device has gone into freeze mode and freeze mode handling
+ * is still in progress."
+ *
+ * The flag will be removed when freeze mode processing has
+ * completed.
+ */
+ dd->flags &= ~HFI1_FROZEN;
+ wake_up(&dd->event_queue);
+
+ /* no longer frozen */
+}
+
+/**
+ * update_xmit_counters - update PortXmitWait/PortVlXmitWait
+ * counters.
+ * @ppd: info of physical Hfi port
+ * @link_width: new link width after link up or downgrade
+ *
+ * Update the PortXmitWait and PortVlXmitWait counters after
+ * a link up or downgrade event to reflect a link width change.
+ */
+static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
+{
+ int i;
+ u16 tx_width;
+ u16 link_speed;
+
+ tx_width = tx_link_width(link_width);
+ link_speed = get_link_speed(ppd->link_speed_active);
+
+ /*
+ * There are C_VL_COUNT number of PortVLXmitWait counters.
+ * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
+ */
+ for (i = 0; i < C_VL_COUNT + 1; i++)
+ get_xmit_wait_counters(ppd, tx_width, link_speed, i);
+}
+
+/*
+ * Handle a link up interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_link_up(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_up_work);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ set_link_state(ppd, HLS_UP_INIT);
+
+ /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
+ read_ltp_rtt(dd);
+ /*
+ * OPA specifies that certain counters are cleared on a transition
+ * to link up, so do that.
+ */
+ clear_linkup_counters(dd);
+ /*
+ * And (re)set link up default values.
+ */
+ set_linkup_defaults(ppd);
+
+ /*
+ * Set VL15 credits. Use cached value from verify cap interrupt.
+ * In case of quick linkup or simulator, vl15 value will be set by
+ * handle_linkup_change. VerifyCap interrupt handler will not be
+ * called in those scenarios.
+ */
+ if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
+ set_up_vl15(dd, dd->vl15buf_cached);
+
+ /* enforce link speed enabled */
+ if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
+ /* oops - current speed is not enabled, bounce */
+ dd_dev_err(dd,
+ "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
+ ppd->link_speed_active, ppd->link_speed_enabled);
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
+ OPA_LINKDOWN_REASON_SPEED_POLICY);
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ start_link(ppd);
+ }
+}
+
+/*
+ * Several pieces of LNI information were cached for SMA in ppd.
+ * Reset these on link down
+ */
+static void reset_neighbor_info(struct hfi1_pportdata *ppd)
+{
+ ppd->neighbor_guid = 0;
+ ppd->neighbor_port_number = 0;
+ ppd->neighbor_type = 0;
+ ppd->neighbor_fm_security = 0;
+}
+
+static const char * const link_down_reason_strs[] = {
+ [OPA_LINKDOWN_REASON_NONE] = "None",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
+ [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
+ [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
+ [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
+ [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
+ [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
+ [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
+ [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
+ [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
+ [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
+ [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
+ [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
+ [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
+ [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
+ [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
+ [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
+ [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
+ [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
+ [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
+ [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
+ [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
+ "Excessive buffer overrun",
+ [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
+ [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
+ [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
+ [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
+ [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
+ [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
+ [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
+ [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
+ "Local media not installed",
+ [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
+ [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
+ [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
+ "End to end not installed",
+ [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
+ [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
+ [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
+ [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
+ [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
+ [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
+};
+
+/* return the neighbor link down reason string */
+static const char *link_down_reason_str(u8 reason)
+{
+ const char *str = NULL;
+
+ if (reason < ARRAY_SIZE(link_down_reason_strs))
+ str = link_down_reason_strs[reason];
+ if (!str)
+ str = "(invalid)";
+
+ return str;
+}
+
+/*
+ * Handle a link down interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_link_down(struct work_struct *work)
+{
+ u8 lcl_reason, neigh_reason = 0;
+ u8 link_down_reason;
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_down_work);
+ int was_up;
+ static const char ldr_str[] = "Link down reason: ";
+
+ if ((ppd->host_link_state &
+ (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
+ ppd->port_type == PORT_TYPE_FIXED)
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
+
+ /* Go offline first, then deal with reading/writing through 8051 */
+ was_up = !!(ppd->host_link_state & HLS_UP);
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ xchg(&ppd->is_link_down_queued, 0);
+
+ if (was_up) {
+ lcl_reason = 0;
+ /* link down reason is only valid if the link was up */
+ read_link_down_reason(ppd->dd, &link_down_reason);
+ switch (link_down_reason) {
+ case LDR_LINK_TRANSFER_ACTIVE_LOW:
+ /* the link went down, no idle message reason */
+ dd_dev_info(ppd->dd, "%sUnexpected link down\n",
+ ldr_str);
+ break;
+ case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
+ /*
+ * The neighbor reason is only valid if an idle message
+ * was received for it.
+ */
+ read_planned_down_reason_code(ppd->dd, &neigh_reason);
+ dd_dev_info(ppd->dd,
+ "%sNeighbor link down message %d, %s\n",
+ ldr_str, neigh_reason,
+ link_down_reason_str(neigh_reason));
+ break;
+ case LDR_RECEIVED_HOST_OFFLINE_REQ:
+ dd_dev_info(ppd->dd,
+ "%sHost requested link to go offline\n",
+ ldr_str);
+ break;
+ default:
+ dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
+ ldr_str, link_down_reason);
+ break;
+ }
+
+ /*
+ * If no reason, assume peer-initiated but missed
+ * LinkGoingDown idle flits.
+ */
+ if (neigh_reason == 0)
+ lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
+ } else {
+ /* went down while polling or going up */
+ lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
+ }
+
+ set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
+
+ /* inform the SMA when the link transitions from up to down */
+ if (was_up && ppd->local_link_down_reason.sma == 0 &&
+ ppd->neigh_link_down_reason.sma == 0) {
+ ppd->local_link_down_reason.sma =
+ ppd->local_link_down_reason.latest;
+ ppd->neigh_link_down_reason.sma =
+ ppd->neigh_link_down_reason.latest;
+ }
+
+ reset_neighbor_info(ppd);
+
+ /* disable the port */
+ clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ /*
+ * If there is no cable attached, turn the DC off. Otherwise,
+ * start the link bring up.
+ */
+ if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
+ dc_shutdown(ppd->dd);
+ else
+ start_link(ppd);
+}
+
+void handle_link_bounce(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_bounce_work);
+
+ /*
+ * Only do something if the link is currently up.
+ */
+ if (ppd->host_link_state & HLS_UP) {
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ start_link(ppd);
+ } else {
+ dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
+ __func__, link_state_name(ppd->host_link_state));
+ }
+}
+
+/*
+ * Mask conversion: Capability exchange to Port LTP. The capability
+ * exchange has an implicit 16b CRC that is mandatory.
+ */
+static int cap_to_port_ltp(int cap)
+{
+ int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
+
+ if (cap & CAP_CRC_14B)
+ port_ltp |= PORT_LTP_CRC_MODE_14;
+ if (cap & CAP_CRC_48B)
+ port_ltp |= PORT_LTP_CRC_MODE_48;
+ if (cap & CAP_CRC_12B_16B_PER_LANE)
+ port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
+
+ return port_ltp;
+}
+
+/*
+ * Convert an OPA Port LTP mask to capability mask
+ */
+int port_ltp_to_cap(int port_ltp)
+{
+ int cap_mask = 0;
+
+ if (port_ltp & PORT_LTP_CRC_MODE_14)
+ cap_mask |= CAP_CRC_14B;
+ if (port_ltp & PORT_LTP_CRC_MODE_48)
+ cap_mask |= CAP_CRC_48B;
+ if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
+ cap_mask |= CAP_CRC_12B_16B_PER_LANE;
+
+ return cap_mask;
+}
+
+/*
+ * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
+ */
+static int lcb_to_port_ltp(int lcb_crc)
+{
+ int port_ltp = 0;
+
+ if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
+ port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
+ else if (lcb_crc == LCB_CRC_48B)
+ port_ltp = PORT_LTP_CRC_MODE_48;
+ else if (lcb_crc == LCB_CRC_14B)
+ port_ltp = PORT_LTP_CRC_MODE_14;
+ else
+ port_ltp = PORT_LTP_CRC_MODE_16;
+
+ return port_ltp;
+}
+
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+ if (ppd->pkeys[2] != 0) {
+ ppd->pkeys[2] = 0;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+ }
+}
+
+/*
+ * Convert the given link width to the OPA link width bitmask.
+ */
+static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
+{
+ switch (width) {
+ case 0:
+ /*
+ * Simulator and quick linkup do not set the width.
+ * Just set it to 4x without complaint.
+ */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
+ return OPA_LINK_WIDTH_4X;
+ return 0; /* no lanes up */
+ case 1: return OPA_LINK_WIDTH_1X;
+ case 2: return OPA_LINK_WIDTH_2X;
+ case 3: return OPA_LINK_WIDTH_3X;
+ case 4: return OPA_LINK_WIDTH_4X;
+ default:
+ dd_dev_info(dd, "%s: invalid width %d, using 4\n",
+ __func__, width);
+ return OPA_LINK_WIDTH_4X;
+ }
+}
+
+/*
+ * Do a population count on the bottom nibble.
+ */
+static const u8 bit_counts[16] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
+};
+
+static inline u8 nibble_to_count(u8 nibble)
+{
+ return bit_counts[nibble & 0xf];
+}
+
+/*
+ * Read the active lane information from the 8051 registers and return
+ * their widths.
+ *
+ * Active lane information is found in these 8051 registers:
+ * enable_lane_tx
+ * enable_lane_rx
+ */
+static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
+ u16 *rx_width)
+{
+ u16 tx, rx;
+ u8 enable_lane_rx;
+ u8 enable_lane_tx;
+ u8 tx_polarity_inversion;
+ u8 rx_polarity_inversion;
+ u8 max_rate;
+
+ /* read the active lanes */
+ read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
+ &rx_polarity_inversion, &max_rate);
+ read_local_lni(dd, &enable_lane_rx);
+
+ /* convert to counts */
+ tx = nibble_to_count(enable_lane_tx);
+ rx = nibble_to_count(enable_lane_rx);
+
+ /*
+ * Set link_speed_active here, overriding what was set in
+ * handle_verify_cap(). The ASIC 8051 firmware does not correctly
+ * set the max_rate field in handle_verify_cap until v0.19.
+ */
+ if ((dd->icode == ICODE_RTL_SILICON) &&
+ (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
+ /* max_rate: 0 = 12.5G, 1 = 25G */
+ switch (max_rate) {
+ case 0:
+ dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
+ break;
+ case 1:
+ dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
+ break;
+ default:
+ dd_dev_err(dd,
+ "%s: unexpected max rate %d, using 25Gb\n",
+ __func__, (int)max_rate);
+ dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
+ break;
+ }
+ }
+
+ dd_dev_info(dd,
+ "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
+ enable_lane_tx, tx, enable_lane_rx, rx);
+ *tx_width = link_width_to_bits(dd, tx);
+ *rx_width = link_width_to_bits(dd, rx);
+}
+
+/*
+ * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
+ * Valid after the end of VerifyCap and during LinkUp. Does not change
+ * after link up. I.e. look elsewhere for downgrade information.
+ *
+ * Bits are:
+ * + bits [7:4] contain the number of active transmitters
+ * + bits [3:0] contain the number of active receivers
+ * These are numbers 1 through 4 and can be different values if the
+ * link is asymmetric.
+ *
+ * verify_cap_local_fm_link_width[0] retains its original value.
+ */
+static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
+ u16 *rx_width)
+{
+ u16 widths, tx, rx;
+ u8 misc_bits, local_flags;
+ u16 active_tx, active_rx;
+
+ read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
+ tx = widths >> 12;
+ rx = (widths >> 8) & 0xf;
+
+ *tx_width = link_width_to_bits(dd, tx);
+ *rx_width = link_width_to_bits(dd, rx);
+
+ /* print the active widths */
+ get_link_widths(dd, &active_tx, &active_rx);
+}
+
+/*
+ * Set ppd->link_width_active and ppd->link_width_downgrade_active using
+ * hardware information when the link first comes up.
+ *
+ * The link width is not available until after VerifyCap.AllFramesReceived
+ * (the trigger for handle_verify_cap), so this is outside that routine
+ * and should be called when the 8051 signals linkup.
+ */
+void get_linkup_link_widths(struct hfi1_pportdata *ppd)
+{
+ u16 tx_width, rx_width;
+
+ /* get end-of-LNI link widths */
+ get_linkup_widths(ppd->dd, &tx_width, &rx_width);
+
+ /* use tx_width as the link is supposed to be symmetric on link up */
+ ppd->link_width_active = tx_width;
+ /* link width downgrade active (LWD.A) starts out matching LW.A */
+ ppd->link_width_downgrade_tx_active = ppd->link_width_active;
+ ppd->link_width_downgrade_rx_active = ppd->link_width_active;
+ /* per OPA spec, on link up LWD.E resets to LWD.S */
+ ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
+ /* cache the active egress rate (units {10^6 bits/sec]) */
+ ppd->current_egress_rate = active_egress_rate(ppd);
+}
+
+/*
+ * Handle a verify capabilities interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_verify_cap(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_vc_work);
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg;
+ u8 power_management;
+ u8 continuous;
+ u8 vcu;
+ u8 vau;
+ u8 z;
+ u16 vl15buf;
+ u16 link_widths;
+ u16 crc_mask;
+ u16 crc_val;
+ u16 device_id;
+ u16 active_tx, active_rx;
+ u8 partner_supported_crc;
+ u8 remote_tx_rate;
+ u8 device_rev;
+
+ set_link_state(ppd, HLS_VERIFY_CAP);
+
+ lcb_shutdown(dd, 0);
+ adjust_lcb_for_fpga_serdes(dd);
+
+ read_vc_remote_phy(dd, &power_management, &continuous);
+ read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
+ &partner_supported_crc);
+ read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
+ read_remote_device_id(dd, &device_id, &device_rev);
+
+ /* print the active widths */
+ get_link_widths(dd, &active_tx, &active_rx);
+ dd_dev_info(dd,
+ "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
+ (int)power_management, (int)continuous);
+ dd_dev_info(dd,
+ "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
+ (int)vau, (int)z, (int)vcu, (int)vl15buf,
+ (int)partner_supported_crc);
+ dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
+ (u32)remote_tx_rate, (u32)link_widths);
+ dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
+ (u32)device_id, (u32)device_rev);
+ /*
+ * The peer vAU value just read is the peer receiver value. HFI does
+ * not support a transmit vAU of 0 (AU == 8). We advertised that
+ * with Z=1 in the fabric capabilities sent to the peer. The peer
+ * will see our Z=1, and, if it advertised a vAU of 0, will move its
+ * receive to vAU of 1 (AU == 16). Do the same here. We do not care
+ * about the peer Z value - our sent vAU is 3 (hardwired) and is not
+ * subject to the Z value exception.
+ */
+ if (vau == 0)
+ vau = 1;
+ set_up_vau(dd, vau);
+
+ /*
+ * Set VL15 credits to 0 in global credit register. Cache remote VL15
+ * credits value and wait for link-up interrupt ot set it.
+ */
+ set_up_vl15(dd, 0);
+ dd->vl15buf_cached = vl15buf;
+
+ /* set up the LCB CRC mode */
+ crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
+
+ /* order is important: use the lowest bit in common */
+ if (crc_mask & CAP_CRC_14B)
+ crc_val = LCB_CRC_14B;
+ else if (crc_mask & CAP_CRC_48B)
+ crc_val = LCB_CRC_48B;
+ else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
+ crc_val = LCB_CRC_12B_16B_PER_LANE;
+ else
+ crc_val = LCB_CRC_16B;
+
+ dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
+ write_csr(dd, DC_LCB_CFG_CRC_MODE,
+ (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
+
+ /* set (14b only) or clear sideband credit */
+ reg = read_csr(dd, SEND_CM_CTRL);
+ if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
+ write_csr(dd, SEND_CM_CTRL,
+ reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
+ } else {
+ write_csr(dd, SEND_CM_CTRL,
+ reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
+ }
+
+ ppd->link_speed_active = 0; /* invalid value */
+ if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
+ /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
+ switch (remote_tx_rate) {
+ case 0:
+ ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
+ break;
+ case 1:
+ ppd->link_speed_active = OPA_LINK_SPEED_25G;
+ break;
+ }
+ } else {
+ /* actual rate is highest bit of the ANDed rates */
+ u8 rate = remote_tx_rate & ppd->local_tx_rate;
+
+ if (rate & 2)
+ ppd->link_speed_active = OPA_LINK_SPEED_25G;
+ else if (rate & 1)
+ ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
+ }
+ if (ppd->link_speed_active == 0) {
+ dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
+ __func__, (int)remote_tx_rate);
+ ppd->link_speed_active = OPA_LINK_SPEED_25G;
+ }
+
+ /*
+ * Cache the values of the supported, enabled, and active
+ * LTP CRC modes to return in 'portinfo' queries. But the bit
+ * flags that are returned in the portinfo query differ from
+ * what's in the link_crc_mask, crc_sizes, and crc_val
+ * variables. Convert these here.
+ */
+ ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
+ /* supported crc modes */
+ ppd->port_ltp_crc_mode |=
+ cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
+ /* enabled crc modes */
+ ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
+ /* active crc mode */
+
+ /* set up the remote credit return table */
+ assign_remote_cm_au_table(dd, vcu);
+
+ /*
+ * The LCB is reset on entry to handle_verify_cap(), so this must
+ * be applied on every link up.
+ *
+ * Adjust LCB error kill enable to kill the link if
+ * these RBUF errors are seen:
+ * REPLAY_BUF_MBE_SMASK
+ * FLIT_INPUT_BUF_MBE_SMASK
+ */
+ if (is_ax(dd)) { /* fixed in B0 */
+ reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
+ reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
+ | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
+ write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
+ }
+
+ /* pull LCB fifos out of reset - all fifo clocks must be stable */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
+
+ /* give 8051 access to the LCB CSRs */
+ write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
+ set_8051_lcb_access(dd);
+
+ /* tell the 8051 to go to LinkUp */
+ set_link_state(ppd, HLS_GOING_UP);
+}
+
+/**
+ * apply_link_downgrade_policy - Apply the link width downgrade enabled
+ * policy against the current active link widths.
+ * @ppd: info of physical Hfi port
+ * @refresh_widths: True indicates link downgrade event
+ * @return: True indicates a successful link downgrade. False indicates
+ * link downgrade event failed and the link will bounce back to
+ * default link width.
+ *
+ * Called when the enabled policy changes or the active link widths
+ * change.
+ * Refresh_widths indicates that a link downgrade occurred. The
+ * link_downgraded variable is set by refresh_widths and
+ * determines the success/failure of the policy application.
+ */
+bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
+ bool refresh_widths)
+{
+ int do_bounce = 0;
+ int tries;
+ u16 lwde;
+ u16 tx, rx;
+ bool link_downgraded = refresh_widths;
+
+ /* use the hls lock to avoid a race with actual link up */
+ tries = 0;
+retry:
+ mutex_lock(&ppd->hls_lock);
+ /* only apply if the link is up */
+ if (ppd->host_link_state & HLS_DOWN) {
+ /* still going up..wait and retry */
+ if (ppd->host_link_state & HLS_GOING_UP) {
+ if (++tries < 1000) {
+ mutex_unlock(&ppd->hls_lock);
+ usleep_range(100, 120); /* arbitrary */
+ goto retry;
+ }
+ dd_dev_err(ppd->dd,
+ "%s: giving up waiting for link state change\n",
+ __func__);
+ }
+ goto done;
+ }
+
+ lwde = ppd->link_width_downgrade_enabled;
+
+ if (refresh_widths) {
+ get_link_widths(ppd->dd, &tx, &rx);
+ ppd->link_width_downgrade_tx_active = tx;
+ ppd->link_width_downgrade_rx_active = rx;
+ }
+
+ if (ppd->link_width_downgrade_tx_active == 0 ||
+ ppd->link_width_downgrade_rx_active == 0) {
+ /* the 8051 reported a dead link as a downgrade */
+ dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
+ link_downgraded = false;
+ } else if (lwde == 0) {
+ /* downgrade is disabled */
+
+ /* bounce if not at starting active width */
+ if ((ppd->link_width_active !=
+ ppd->link_width_downgrade_tx_active) ||
+ (ppd->link_width_active !=
+ ppd->link_width_downgrade_rx_active)) {
+ dd_dev_err(ppd->dd,
+ "Link downgrade is disabled and link has downgraded, downing link\n");
+ dd_dev_err(ppd->dd,
+ " original 0x%x, tx active 0x%x, rx active 0x%x\n",
+ ppd->link_width_active,
+ ppd->link_width_downgrade_tx_active,
+ ppd->link_width_downgrade_rx_active);
+ do_bounce = 1;
+ link_downgraded = false;
+ }
+ } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
+ (lwde & ppd->link_width_downgrade_rx_active) == 0) {
+ /* Tx or Rx is outside the enabled policy */
+ dd_dev_err(ppd->dd,
+ "Link is outside of downgrade allowed, downing link\n");
+ dd_dev_err(ppd->dd,
+ " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
+ lwde, ppd->link_width_downgrade_tx_active,
+ ppd->link_width_downgrade_rx_active);
+ do_bounce = 1;
+ link_downgraded = false;
+ }
+
+done:
+ mutex_unlock(&ppd->hls_lock);
+
+ if (do_bounce) {
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
+ OPA_LINKDOWN_REASON_WIDTH_POLICY);
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ start_link(ppd);
+ }
+
+ return link_downgraded;
+}
+
+/*
+ * Handle a link downgrade interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_link_downgrade(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_downgrade_work);
+
+ dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
+ if (apply_link_downgrade_policy(ppd, true))
+ update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
+}
+
+static char *dcc_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dcc_err_flags,
+ ARRAY_SIZE(dcc_err_flags));
+}
+
+static char *lcb_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, lcb_err_flags,
+ ARRAY_SIZE(lcb_err_flags));
+}
+
+static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dc8051_err_flags,
+ ARRAY_SIZE(dc8051_err_flags));
+}
+
+static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
+ ARRAY_SIZE(dc8051_info_err_flags));
+}
+
+static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
+ ARRAY_SIZE(dc8051_info_host_msg_flags));
+}
+
+static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 info, err, host_msg;
+ int queue_link_down = 0;
+ char buf[96];
+
+ /* look at the flags */
+ if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
+ /* 8051 information set by firmware */
+ /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
+ info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
+ err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
+ & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
+ host_msg = (info >>
+ DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
+ & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
+
+ /*
+ * Handle error flags.
+ */
+ if (err & FAILED_LNI) {
+ /*
+ * LNI error indications are cleared by the 8051
+ * only when starting polling. Only pay attention
+ * to them when in the states that occur during
+ * LNI.
+ */
+ if (ppd->host_link_state
+ & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
+ queue_link_down = 1;
+ dd_dev_info(dd, "Link error: %s\n",
+ dc8051_info_err_string(buf,
+ sizeof(buf),
+ err &
+ FAILED_LNI));
+ }
+ err &= ~(u64)FAILED_LNI;
+ }
+ /* unknown frames can happen durning LNI, just count */
+ if (err & UNKNOWN_FRAME) {
+ ppd->unknown_frame_count++;
+ err &= ~(u64)UNKNOWN_FRAME;
+ }
+ if (err) {
+ /* report remaining errors, but do not do anything */
+ dd_dev_err(dd, "8051 info error: %s\n",
+ dc8051_info_err_string(buf, sizeof(buf),
+ err));
+ }
+
+ /*
+ * Handle host message flags.
+ */
+ if (host_msg & HOST_REQ_DONE) {
+ /*
+ * Presently, the driver does a busy wait for
+ * host requests to complete. This is only an
+ * informational message.
+ * NOTE: The 8051 clears the host message
+ * information *on the next 8051 command*.
+ * Therefore, when linkup is achieved,
+ * this flag will still be set.
+ */
+ host_msg &= ~(u64)HOST_REQ_DONE;
+ }
+ if (host_msg & BC_SMA_MSG) {
+ queue_work(ppd->link_wq, &ppd->sma_message_work);
+ host_msg &= ~(u64)BC_SMA_MSG;
+ }
+ if (host_msg & LINKUP_ACHIEVED) {
+ dd_dev_info(dd, "8051: Link up\n");
+ queue_work(ppd->link_wq, &ppd->link_up_work);
+ host_msg &= ~(u64)LINKUP_ACHIEVED;
+ }
+ if (host_msg & EXT_DEVICE_CFG_REQ) {
+ handle_8051_request(ppd);
+ host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
+ }
+ if (host_msg & VERIFY_CAP_FRAME) {
+ queue_work(ppd->link_wq, &ppd->link_vc_work);
+ host_msg &= ~(u64)VERIFY_CAP_FRAME;
+ }
+ if (host_msg & LINK_GOING_DOWN) {
+ const char *extra = "";
+ /* no downgrade action needed if going down */
+ if (host_msg & LINK_WIDTH_DOWNGRADED) {
+ host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
+ extra = " (ignoring downgrade)";
+ }
+ dd_dev_info(dd, "8051: Link down%s\n", extra);
+ queue_link_down = 1;
+ host_msg &= ~(u64)LINK_GOING_DOWN;
+ }
+ if (host_msg & LINK_WIDTH_DOWNGRADED) {
+ queue_work(ppd->link_wq, &ppd->link_downgrade_work);
+ host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
+ }
+ if (host_msg) {
+ /* report remaining messages, but do not do anything */
+ dd_dev_info(dd, "8051 info host message: %s\n",
+ dc8051_info_host_msg_string(buf,
+ sizeof(buf),
+ host_msg));
+ }
+
+ reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
+ }
+ if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
+ /*
+ * Lost the 8051 heartbeat. If this happens, we
+ * receive constant interrupts about it. Disable
+ * the interrupt after the first.
+ */
+ dd_dev_err(dd, "Lost 8051 heartbeat\n");
+ write_csr(dd, DC_DC8051_ERR_EN,
+ read_csr(dd, DC_DC8051_ERR_EN) &
+ ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
+
+ reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
+ }
+ if (reg) {
+ /* report the error, but do not do anything */
+ dd_dev_err(dd, "8051 error: %s\n",
+ dc8051_err_string(buf, sizeof(buf), reg));
+ }
+
+ if (queue_link_down) {
+ /*
+ * if the link is already going down or disabled, do not
+ * queue another. If there's a link down entry already
+ * queued, don't queue another one.
+ */
+ if ((ppd->host_link_state &
+ (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
+ ppd->link_enabled == 0) {
+ dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
+ __func__, ppd->host_link_state,
+ ppd->link_enabled);
+ } else {
+ if (xchg(&ppd->is_link_down_queued, 1) == 1)
+ dd_dev_info(dd,
+ "%s: link down request already queued\n",
+ __func__);
+ else
+ queue_work(ppd->link_wq, &ppd->link_down_work);
+ }
+ }
+}
+
+static const char * const fm_config_txt[] = {
+[0] =
+ "BadHeadDist: Distance violation between two head flits",
+[1] =
+ "BadTailDist: Distance violation between two tail flits",
+[2] =
+ "BadCtrlDist: Distance violation between two credit control flits",
+[3] =
+ "BadCrdAck: Credits return for unsupported VL",
+[4] =
+ "UnsupportedVLMarker: Received VL Marker",
+[5] =
+ "BadPreempt: Exceeded the preemption nesting level",
+[6] =
+ "BadControlFlit: Received unsupported control flit",
+/* no 7 */
+[8] =
+ "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
+};
+
+static const char * const port_rcv_txt[] = {
+[1] =
+ "BadPktLen: Illegal PktLen",
+[2] =
+ "PktLenTooLong: Packet longer than PktLen",
+[3] =
+ "PktLenTooShort: Packet shorter than PktLen",
+[4] =
+ "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
+[5] =
+ "BadDLID: Illegal DLID (0, doesn't match HFI)",
+[6] =
+ "BadL2: Illegal L2 opcode",
+[7] =
+ "BadSC: Unsupported SC",
+[9] =
+ "BadRC: Illegal RC",
+[11] =
+ "PreemptError: Preempting with same VL",
+[12] =
+ "PreemptVL15: Preempting a VL15 packet",
+};
+
+#define OPA_LDR_FMCONFIG_OFFSET 16
+#define OPA_LDR_PORTRCV_OFFSET 0
+static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ u64 info, hdr0, hdr1;
+ const char *extra;
+ char buf[96];
+ struct hfi1_pportdata *ppd = dd->pport;
+ u8 lcl_reason = 0;
+ int do_bounce = 0;
+
+ if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
+ if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
+ info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
+ dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
+ /* set status bit */
+ dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
+ }
+ reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
+ struct hfi1_pportdata *ppd = dd->pport;
+ /* this counter saturates at (2^32) - 1 */
+ if (ppd->link_downed < (u32)UINT_MAX)
+ ppd->link_downed++;
+ reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
+ u8 reason_valid = 1;
+
+ info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
+ if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
+ dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
+ /* set status bit */
+ dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
+ }
+ switch (info) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ extra = fm_config_txt[info];
+ break;
+ case 8:
+ extra = fm_config_txt[info];
+ if (ppd->port_error_action &
+ OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
+ do_bounce = 1;
+ /*
+ * lcl_reason cannot be derived from info
+ * for this error
+ */
+ lcl_reason =
+ OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
+ }
+ break;
+ default:
+ reason_valid = 0;
+ snprintf(buf, sizeof(buf), "reserved%lld", info);
+ extra = buf;
+ break;
+ }
+
+ if (reason_valid && !do_bounce) {
+ do_bounce = ppd->port_error_action &
+ (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
+ lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
+ }
+
+ /* just report this */
+ dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
+ extra);
+ reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
+ u8 reason_valid = 1;
+
+ info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
+ hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
+ hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
+ if (!(dd->err_info_rcvport.status_and_code &
+ OPA_EI_STATUS_SMASK)) {
+ dd->err_info_rcvport.status_and_code =
+ info & OPA_EI_CODE_SMASK;
+ /* set status bit */
+ dd->err_info_rcvport.status_and_code |=
+ OPA_EI_STATUS_SMASK;
+ /*
+ * save first 2 flits in the packet that caused
+ * the error
+ */
+ dd->err_info_rcvport.packet_flit1 = hdr0;
+ dd->err_info_rcvport.packet_flit2 = hdr1;
+ }
+ switch (info) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 9:
+ case 11:
+ case 12:
+ extra = port_rcv_txt[info];
+ break;
+ default:
+ reason_valid = 0;
+ snprintf(buf, sizeof(buf), "reserved%lld", info);
+ extra = buf;
+ break;
+ }
+
+ if (reason_valid && !do_bounce) {
+ do_bounce = ppd->port_error_action &
+ (1 << (OPA_LDR_PORTRCV_OFFSET + info));
+ lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
+ }
+
+ /* just report this */
+ dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
+ " hdr0 0x%llx, hdr1 0x%llx\n",
+ extra, hdr0, hdr1);
+
+ reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
+ /* informative only */
+ dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
+ reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
+ }
+ if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
+ /* informative only */
+ dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
+ reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
+ }
+
+ if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
+ reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
+
+ /* report any remaining errors */
+ if (reg)
+ dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
+ dcc_err_string(buf, sizeof(buf), reg));
+
+ if (lcl_reason == 0)
+ lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
+
+ if (do_bounce) {
+ dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
+ __func__);
+ set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
+ queue_work(ppd->link_wq, &ppd->link_bounce_work);
+ }
+}
+
+static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ dd_dev_info(dd, "LCB Error: %s\n",
+ lcb_err_string(buf, sizeof(buf), reg));
+}
+
+/*
+ * CCE block DC interrupt. Source is < 8.
+ */
+static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct err_reg_info *eri = &dc_errs[source];
+
+ if (eri->handler) {
+ interrupt_clear_down(dd, 0, eri);
+ } else if (source == 3 /* dc_lbm_int */) {
+ /*
+ * This indicates that a parity error has occurred on the
+ * address/control lines presented to the LBM. The error
+ * is a single pulse, there is no associated error flag,
+ * and it is non-maskable. This is because if a parity
+ * error occurs on the request the request is dropped.
+ * This should never occur, but it is nice to know if it
+ * ever does.
+ */
+ dd_dev_err(dd, "Parity error in DC LBM block\n");
+ } else {
+ dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
+ }
+}
+
+/*
+ * TX block send credit interrupt. Source is < 160.
+ */
+static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ sc_group_release_update(dd, source);
+}
+
+/*
+ * TX block SDMA interrupt. Source is < 48.
+ *
+ * SDMA interrupts are grouped by type:
+ *
+ * 0 - N-1 = SDma
+ * N - 2N-1 = SDmaProgress
+ * 2N - 3N-1 = SDmaIdle
+ */
+static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ /* what interrupt */
+ unsigned int what = source / TXE_NUM_SDMA_ENGINES;
+ /* which engine */
+ unsigned int which = source % TXE_NUM_SDMA_ENGINES;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
+ slashstrip(__FILE__), __LINE__, __func__);
+ sdma_dumpstate(&dd->per_sdma[which]);
+#endif
+
+ if (likely(what < 3 && which < dd->num_sdma)) {
+ sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
+ } else {
+ /* should not happen */
+ dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
+ }
+}
+
+/**
+ * is_rcv_avail_int() - User receive context available IRQ handler
+ * @dd: valid dd
+ * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
+ *
+ * RX block receive available interrupt. Source is < 160.
+ *
+ * This is the general interrupt handler for user (PSM) receive contexts,
+ * and can only be used for non-threaded IRQs.
+ */
+static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ struct hfi1_ctxtdata *rcd;
+ char *err_detail;
+
+ if (likely(source < dd->num_rcv_contexts)) {
+ rcd = hfi1_rcd_get_by_index(dd, source);
+ if (rcd) {
+ handle_user_interrupt(rcd);
+ hfi1_rcd_put(rcd);
+ return; /* OK */
+ }
+ /* received an interrupt, but no rcd */
+ err_detail = "dataless";
+ } else {
+ /* received an interrupt, but are not using that context */
+ err_detail = "out of range";
+ }
+ dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
+ err_detail, source);
+}
+
+/**
+ * is_rcv_urgent_int() - User receive context urgent IRQ handler
+ * @dd: valid dd
+ * @source: logical IRQ source (offset from IS_RCVURGENT_START)
+ *
+ * RX block receive urgent interrupt. Source is < 160.
+ *
+ * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
+ */
+static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ struct hfi1_ctxtdata *rcd;
+ char *err_detail;
+
+ if (likely(source < dd->num_rcv_contexts)) {
+ rcd = hfi1_rcd_get_by_index(dd, source);
+ if (rcd) {
+ handle_user_interrupt(rcd);
+ hfi1_rcd_put(rcd);
+ return; /* OK */
+ }
+ /* received an interrupt, but no rcd */
+ err_detail = "dataless";
+ } else {
+ /* received an interrupt, but are not using that context */
+ err_detail = "out of range";
+ }
+ dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
+ err_detail, source);
+}
+
+/*
+ * Reserved range interrupt. Should not be called in normal operation.
+ */
+static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ char name[64];
+
+ dd_dev_err(dd, "unexpected %s interrupt\n",
+ is_reserved_name(name, sizeof(name), source));
+}
+
+static const struct is_table is_table[] = {
+/*
+ * start end
+ * name func interrupt func
+ */
+{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
+ is_misc_err_name, is_misc_err_int },
+{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
+ is_sdma_eng_err_name, is_sdma_eng_err_int },
+{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
+ is_sendctxt_err_name, is_sendctxt_err_int },
+{ IS_SDMA_START, IS_SDMA_IDLE_END,
+ is_sdma_eng_name, is_sdma_eng_int },
+{ IS_VARIOUS_START, IS_VARIOUS_END,
+ is_various_name, is_various_int },
+{ IS_DC_START, IS_DC_END,
+ is_dc_name, is_dc_int },
+{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
+ is_rcv_avail_name, is_rcv_avail_int },
+{ IS_RCVURGENT_START, IS_RCVURGENT_END,
+ is_rcv_urgent_name, is_rcv_urgent_int },
+{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
+ is_send_credit_name, is_send_credit_int},
+{ IS_RESERVED_START, IS_RESERVED_END,
+ is_reserved_name, is_reserved_int},
+};
+
+/*
+ * Interrupt source interrupt - called when the given source has an interrupt.
+ * Source is a bit index into an array of 64-bit integers.
+ */
+static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct is_table *entry;
+
+ /* avoids a double compare by walking the table in-order */
+ for (entry = &is_table[0]; entry->is_name; entry++) {
+ if (source <= entry->end) {
+ trace_hfi1_interrupt(dd, entry, source);
+ entry->is_int(dd, source - entry->start);
+ return;
+ }
+ }
+ /* fell off the end */
+ dd_dev_err(dd, "invalid interrupt source %u\n", source);
+}
+
+/**
+ * general_interrupt - General interrupt handler
+ * @irq: MSIx IRQ vector
+ * @data: hfi1 devdata
+ *
+ * This is able to correctly handle all non-threaded interrupts. Receive
+ * context DATA IRQs are threaded and are not supported by this handler.
+ *
+ */
+irqreturn_t general_interrupt(int irq, void *data)
+{
+ struct hfi1_devdata *dd = data;
+ u64 regs[CCE_NUM_INT_CSRS];
+ u32 bit;
+ int i;
+ irqreturn_t handled = IRQ_NONE;
+
+ this_cpu_inc(*dd->int_counter);
+
+ /* phase 1: scan and clear all handled interrupts */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
+ if (dd->gi_mask[i] == 0) {
+ regs[i] = 0; /* used later */
+ continue;
+ }
+ regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
+ dd->gi_mask[i];
+ /* only clear if anything is set */
+ if (regs[i])
+ write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
+ }
+
+ /* phase 2: call the appropriate handler */
+ for_each_set_bit(bit, (unsigned long *)&regs[0],
+ CCE_NUM_INT_CSRS * 64) {
+ is_interrupt(dd, bit);
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
+irqreturn_t sdma_interrupt(int irq, void *data)
+{
+ struct sdma_engine *sde = data;
+ struct hfi1_devdata *dd = sde->dd;
+ u64 status;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ sdma_dumpstate(sde);
+#endif
+
+ this_cpu_inc(*dd->int_counter);
+
+ /* This read_csr is really bad in the hot path */
+ status = read_csr(dd,
+ CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
+ & sde->imask;
+ if (likely(status)) {
+ /* clear the interrupt(s) */
+ write_csr(dd,
+ CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
+ status);
+
+ /* handle the interrupt(s) */
+ sdma_engine_interrupt(sde, status);
+ } else {
+ dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
+ sde->this_idx);
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * Clear the receive interrupt. Use a read of the interrupt clear CSR
+ * to insure that the write completed. This does NOT guarantee that
+ * queued DMA writes to memory from the chip are pushed.
+ */
+static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
+
+ write_csr(dd, addr, rcd->imask);
+ /* force the above write on the chip and get a value back */
+ (void)read_csr(dd, addr);
+}
+
+/* force the receive interrupt */
+void force_recv_intr(struct hfi1_ctxtdata *rcd)
+{
+ write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
+}
+
+/*
+ * Return non-zero if a packet is present.
+ *
+ * This routine is called when rechecking for packets after the RcvAvail
+ * interrupt has been cleared down. First, do a quick check of memory for
+ * a packet present. If not found, use an expensive CSR read of the context
+ * tail to determine the actual tail. The CSR read is necessary because there
+ * is no method to push pending DMAs to memory other than an interrupt and we
+ * are trying to determine if we need to force an interrupt.
+ */
+static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
+{
+ u32 tail;
+
+ if (hfi1_packet_present(rcd))
+ return 1;
+
+ /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
+ tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
+ return hfi1_rcd_head(rcd) != tail;
+}
+
+/*
+ * Common code for receive contexts interrupt handlers.
+ * Update traces, increment kernel IRQ counter and
+ * setup ASPM when needed.
+ */
+static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+
+ trace_hfi1_receive_interrupt(dd, rcd);
+ this_cpu_inc(*dd->int_counter);
+ aspm_ctx_disable(rcd);
+}
+
+/*
+ * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
+ * when there are packets present in the queue. When calling
+ * with interrupts enabled please use hfi1_rcd_eoi_intr.
+ *
+ * @rcd: valid receive context
+ */
+static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
+{
+ if (!rcd->rcvhdrq)
+ return;
+ clear_recv_intr(rcd);
+ if (check_packet_present(rcd))
+ force_recv_intr(rcd);
+}
+
+/**
+ * hfi1_rcd_eoi_intr() - End of Interrupt processing action
+ *
+ * @rcd: Ptr to hfi1_ctxtdata of receive context
+ *
+ * Hold IRQs so we can safely clear the interrupt and
+ * recheck for a packet that may have arrived after the previous
+ * check and the interrupt clear. If a packet arrived, force another
+ * interrupt. This routine can be called at the end of receive packet
+ * processing in interrupt service routines, interrupt service thread
+ * and softirqs
+ */
+static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __hfi1_rcd_eoi_intr(rcd);
+ local_irq_restore(flags);
+}
+
+/**
+ * hfi1_netdev_rx_napi - napi poll function to move eoi inline
+ * @napi: pointer to napi object
+ * @budget: netdev budget
+ */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ struct hfi1_ctxtdata *rcd = rxq->rcd;
+ int work_done = 0;
+
+ work_done = rcd->do_interrupt(rcd, budget);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return work_done;
+}
+
+/* Receive packet napi handler for netdevs VNIC and AIP */
+irqreturn_t receive_context_interrupt_napi(int irq, void *data)
+{
+ struct hfi1_ctxtdata *rcd = data;
+
+ receive_interrupt_common(rcd);
+
+ if (likely(rcd->napi)) {
+ if (likely(napi_schedule_prep(rcd->napi)))
+ __napi_schedule_irqoff(rcd->napi);
+ else
+ __hfi1_rcd_eoi_intr(rcd);
+ } else {
+ WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
+ rcd->ctxt);
+ __hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Receive packet IRQ handler. This routine expects to be on its own IRQ.
+ * This routine will try to handle packets immediately (latency), but if
+ * it finds too many, it will invoke the thread handler (bandwitdh). The
+ * chip receive interrupt is *not* cleared down until this or the thread (if
+ * invoked) is finished. The intent is to avoid extra interrupts while we
+ * are processing packets anyway.
+ */
+irqreturn_t receive_context_interrupt(int irq, void *data)
+{
+ struct hfi1_ctxtdata *rcd = data;
+ int disposition;
+
+ receive_interrupt_common(rcd);
+
+ /* receive interrupt remains blocked while processing packets */
+ disposition = rcd->do_interrupt(rcd, 0);
+
+ /*
+ * Too many packets were seen while processing packets in this
+ * IRQ handler. Invoke the handler thread. The receive interrupt
+ * remains blocked.
+ */
+ if (disposition == RCV_PKT_LIMIT)
+ return IRQ_WAKE_THREAD;
+
+ __hfi1_rcd_eoi_intr(rcd);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Receive packet thread handler. This expects to be invoked with the
+ * receive interrupt still blocked.
+ */
+irqreturn_t receive_context_thread(int irq, void *data)
+{
+ struct hfi1_ctxtdata *rcd = data;
+
+ /* receive interrupt is still blocked from the IRQ handler */
+ (void)rcd->do_interrupt(rcd, 1);
+
+ hfi1_rcd_eoi_intr(rcd);
+
+ return IRQ_HANDLED;
+}
+
+/* ========================================================================= */
+
+u32 read_physical_state(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
+ return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
+ & DC_DC8051_STS_CUR_STATE_PORT_MASK;
+}
+
+u32 read_logical_state(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
+ return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
+ & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
+}
+
+static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
+{
+ u64 reg;
+
+ reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
+ /* clear current state, set new state */
+ reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
+ reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
+ write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
+}
+
+/*
+ * Use the 8051 to read a LCB CSR.
+ */
+static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
+{
+ u32 regno;
+ int ret;
+
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ if (acquire_lcb_access(dd, 0) == 0) {
+ *data = read_csr(dd, addr);
+ release_lcb_access(dd, 0);
+ return 0;
+ }
+ return -EBUSY;
+ }
+
+ /* register is an index of LCB registers: (offset - base) / 8 */
+ regno = (addr - DC_LCB_CFG_RUN) >> 3;
+ ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
+ if (ret != HCMD_SUCCESS)
+ return -EBUSY;
+ return 0;
+}
+
+/*
+ * Provide a cache for some of the LCB registers in case the LCB is
+ * unavailable.
+ * (The LCB is unavailable in certain link states, for example.)
+ */
+struct lcb_datum {
+ u32 off;
+ u64 val;
+};
+
+static struct lcb_datum lcb_cache[] = {
+ { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
+ { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
+ { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
+};
+
+static void update_lcb_cache(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret;
+ u64 val;
+
+ for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
+ ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
+
+ /* Update if we get good data */
+ if (likely(ret != -EBUSY))
+ lcb_cache[i].val = val;
+ }
+}
+
+static int read_lcb_cache(u32 off, u64 *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
+ if (lcb_cache[i].off == off) {
+ *val = lcb_cache[i].val;
+ return 0;
+ }
+ }
+
+ pr_warn("%s bad offset 0x%x\n", __func__, off);
+ return -1;
+}
+
+/*
+ * Read an LCB CSR. Access may not be in host control, so check.
+ * Return 0 on success, -EBUSY on failure.
+ */
+int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ /* if up, go through the 8051 for the value */
+ if (ppd->host_link_state & HLS_UP)
+ return read_lcb_via_8051(dd, addr, data);
+ /* if going up or down, check the cache, otherwise, no access */
+ if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
+ if (read_lcb_cache(addr, data))
+ return -EBUSY;
+ return 0;
+ }
+
+ /* otherwise, host has access */
+ *data = read_csr(dd, addr);
+ return 0;
+}
+
+/*
+ * Use the 8051 to write a LCB CSR.
+ */
+static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
+{
+ u32 regno;
+ int ret;
+
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
+ (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
+ if (acquire_lcb_access(dd, 0) == 0) {
+ write_csr(dd, addr, data);
+ release_lcb_access(dd, 0);
+ return 0;
+ }
+ return -EBUSY;
+ }
+
+ /* register is an index of LCB registers: (offset - base) / 8 */
+ regno = (addr - DC_LCB_CFG_RUN) >> 3;
+ ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
+ if (ret != HCMD_SUCCESS)
+ return -EBUSY;
+ return 0;
+}
+
+/*
+ * Write an LCB CSR. Access may not be in host control, so check.
+ * Return 0 on success, -EBUSY on failure.
+ */
+int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ /* if up, go through the 8051 for the value */
+ if (ppd->host_link_state & HLS_UP)
+ return write_lcb_via_8051(dd, addr, data);
+ /* if going up or down, no access */
+ if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
+ return -EBUSY;
+ /* otherwise, host has access */
+ write_csr(dd, addr, data);
+ return 0;
+}
+
+/*
+ * Returns:
+ * < 0 = Linux error, not able to get access
+ * > 0 = 8051 command RETURN_CODE
+ */
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
+{
+ u64 reg, completed;
+ int return_code;
+ unsigned long timeout;
+
+ hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
+
+ mutex_lock(&dd->dc8051_lock);
+
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
+ /*
+ * If an 8051 host command timed out previously, then the 8051 is
+ * stuck.
+ *
+ * On first timeout, attempt to reset and restart the entire DC
+ * block (including 8051). (Is this too big of a hammer?)
+ *
+ * If the 8051 times out a second time, the reset did not bring it
+ * back to healthy life. In that case, fail any subsequent commands.
+ */
+ if (dd->dc8051_timed_out) {
+ if (dd->dc8051_timed_out > 1) {
+ dd_dev_err(dd,
+ "Previous 8051 host command timed out, skipping command %u\n",
+ type);
+ return_code = -ENXIO;
+ goto fail;
+ }
+ _dc_shutdown(dd);
+ _dc_start(dd);
+ }
+
+ /*
+ * If there is no timeout, then the 8051 command interface is
+ * waiting for a command.
+ */
+
+ /*
+ * When writing a LCB CSR, out_data contains the full value to
+ * be written, while in_data contains the relative LCB
+ * address in 7:0. Do the work here, rather than the caller,
+ * of distrubting the write data to where it needs to go:
+ *
+ * Write data
+ * 39:00 -> in_data[47:8]
+ * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
+ * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
+ */
+ if (type == HCMD_WRITE_LCB_CSR) {
+ in_data |= ((*out_data) & 0xffffffffffull) << 8;
+ /* must preserve COMPLETED - it is tied to hardware */
+ reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
+ reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
+ reg |= ((((*out_data) >> 40) & 0xff) <<
+ DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
+ | ((((*out_data) >> 48) & 0xffff) <<
+ DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
+ write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
+ }
+
+ /*
+ * Do two writes: the first to stabilize the type and req_data, the
+ * second to activate.
+ */
+ reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
+ << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
+ | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
+ << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
+ write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
+ reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
+ write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
+
+ /* wait for completion, alternate: interrupt */
+ timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
+ completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
+ if (completed)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd->dc8051_timed_out++;
+ dd_dev_err(dd, "8051 host command %u timeout\n", type);
+ if (out_data)
+ *out_data = 0;
+ return_code = -ETIMEDOUT;
+ goto fail;
+ }
+ udelay(2);
+ }
+
+ if (out_data) {
+ *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
+ & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
+ if (type == HCMD_READ_LCB_CSR) {
+ /* top 16 bits are in a different register */
+ *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
+ & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
+ << (48
+ - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
+ }
+ }
+ return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
+ & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
+ dd->dc8051_timed_out = 0;
+ /*
+ * Clear command for next user.
+ */
+ write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
+
+fail:
+ mutex_unlock(&dd->dc8051_lock);
+ return return_code;
+}
+
+static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
+{
+ return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
+}
+
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
+{
+ u64 data;
+ int ret;
+
+ data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
+ | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
+ | (u64)config_data << LOAD_DATA_DATA_SHIFT;
+ ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "load 8051 config: field id %d, lane %d, err %d\n",
+ (int)field_id, (int)lane_id, ret);
+ }
+ return ret;
+}
+
+/*
+ * Read the 8051 firmware "registers". Use the RAM directly. Always
+ * set the result, even on error.
+ * Return 0 on success, -errno on failure
+ */
+int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
+ u32 *result)
+{
+ u64 big_data;
+ u32 addr;
+ int ret;
+
+ /* address start depends on the lane_id */
+ if (lane_id < 4)
+ addr = (4 * NUM_GENERAL_FIELDS)
+ + (lane_id * 4 * NUM_LANE_FIELDS);
+ else
+ addr = 0;
+ addr += field_id * 4;
+
+ /* read is in 8-byte chunks, hardware will truncate the address down */
+ ret = read_8051_data(dd, addr, 8, &big_data);
+
+ if (ret == 0) {
+ /* extract the 4 bytes we want */
+ if (addr & 0x4)
+ *result = (u32)(big_data >> 32);
+ else
+ *result = (u32)big_data;
+ } else {
+ *result = 0;
+ dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
+ __func__, lane_id, field_id);
+ }
+
+ return ret;
+}
+
+static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
+ u8 continuous)
+{
+ u32 frame;
+
+ frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
+ | power_management << POWER_MANAGEMENT_SHIFT;
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
+ GENERAL_CONFIG, frame);
+}
+
+static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
+ u16 vl15buf, u8 crc_sizes)
+{
+ u32 frame;
+
+ frame = (u32)vau << VAU_SHIFT
+ | (u32)z << Z_SHIFT
+ | (u32)vcu << VCU_SHIFT
+ | (u32)vl15buf << VL15BUF_SHIFT
+ | (u32)crc_sizes << CRC_SIZES_SHIFT;
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
+ GENERAL_CONFIG, frame);
+}
+
+static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
+ &frame);
+ *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
+ *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
+ *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
+}
+
+static int write_vc_local_link_mode(struct hfi1_devdata *dd,
+ u8 misc_bits,
+ u8 flag_bits,
+ u16 link_widths)
+{
+ u32 frame;
+
+ frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
+ | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
+ | (u32)link_widths << LINK_WIDTH_SHIFT;
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
+ frame);
+}
+
+static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
+ u8 device_rev)
+{
+ u32 frame;
+
+ frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
+ | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
+ return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
+}
+
+static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
+ u8 *device_rev)
+{
+ u32 frame;
+
+ read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
+ *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
+ *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
+ & REMOTE_DEVICE_REV_MASK;
+}
+
+int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
+{
+ u32 frame;
+ u32 mask;
+
+ mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
+ read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
+ /* Clear, then set field */
+ frame &= ~mask;
+ frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
+ return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
+}
+
+void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
+ u8 *ver_patch)
+{
+ u32 frame;
+
+ read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
+ *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
+ STS_FM_VERSION_MAJOR_MASK;
+ *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
+ STS_FM_VERSION_MINOR_MASK;
+
+ read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
+ *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
+ STS_FM_VERSION_PATCH_MASK;
+}
+
+static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
+ u8 *continuous)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
+ *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
+ & POWER_MANAGEMENT_MASK;
+ *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
+ & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
+}
+
+static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
+ u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
+ *vau = (frame >> VAU_SHIFT) & VAU_MASK;
+ *z = (frame >> Z_SHIFT) & Z_MASK;
+ *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
+ *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
+ *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
+}
+
+static void read_vc_remote_link_width(struct hfi1_devdata *dd,
+ u8 *remote_tx_rate,
+ u16 *link_widths)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
+ &frame);
+ *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
+ & REMOTE_TX_RATE_MASK;
+ *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
+}
+
+static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
+{
+ u32 frame;
+
+ read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
+ *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
+}
+
+static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
+{
+ read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
+}
+
+static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
+{
+ read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
+}
+
+void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
+{
+ u32 frame;
+ int ret;
+
+ *link_quality = 0;
+ if (dd->pport->host_link_state & HLS_UP) {
+ ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
+ &frame);
+ if (ret == 0)
+ *link_quality = (frame >> LINK_QUALITY_SHIFT)
+ & LINK_QUALITY_MASK;
+ }
+}
+
+static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
+{
+ u32 frame;
+
+ read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
+ *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
+}
+
+static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
+{
+ u32 frame;
+
+ read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
+ *ldr = (frame & 0xff);
+}
+
+static int read_tx_settings(struct hfi1_devdata *dd,
+ u8 *enable_lane_tx,
+ u8 *tx_polarity_inversion,
+ u8 *rx_polarity_inversion,
+ u8 *max_rate)
+{
+ u32 frame;
+ int ret;
+
+ ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
+ *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
+ & ENABLE_LANE_TX_MASK;
+ *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
+ & TX_POLARITY_INVERSION_MASK;
+ *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
+ & RX_POLARITY_INVERSION_MASK;
+ *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
+ return ret;
+}
+
+static int write_tx_settings(struct hfi1_devdata *dd,
+ u8 enable_lane_tx,
+ u8 tx_polarity_inversion,
+ u8 rx_polarity_inversion,
+ u8 max_rate)
+{
+ u32 frame;
+
+ /* no need to mask, all variable sizes match field widths */
+ frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
+ | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
+ | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
+ | max_rate << MAX_RATE_SHIFT;
+ return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
+}
+
+/*
+ * Read an idle LCB message.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
+{
+ int ret;
+
+ ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd, "read idle message: type %d, err %d\n",
+ (u32)type, ret);
+ return -EINVAL;
+ }
+ dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
+ /* return only the payload as we already know the type */
+ *data_out >>= IDLE_PAYLOAD_SHIFT;
+ return 0;
+}
+
+/*
+ * Read an idle SMA message. To be done in response to a notification from
+ * the 8051.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
+{
+ return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
+ data);
+}
+
+/*
+ * Send an idle LCB message.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+static int send_idle_message(struct hfi1_devdata *dd, u64 data)
+{
+ int ret;
+
+ dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
+ ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
+ data, ret);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Send an idle SMA message.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+int send_idle_sma(struct hfi1_devdata *dd, u64 message)
+{
+ u64 data;
+
+ data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
+ ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
+ return send_idle_message(dd, data);
+}
+
+/*
+ * Initialize the LCB then do a quick link up. This may or may not be
+ * in loopback.
+ *
+ * return 0 on success, -errno on error
+ */
+static int do_quick_linkup(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ lcb_shutdown(dd, 0);
+
+ if (loopback) {
+ /* LCB_CFG_LOOPBACK.VAL = 2 */
+ /* LCB_CFG_LANE_WIDTH.VAL = 0 */
+ write_csr(dd, DC_LCB_CFG_LOOPBACK,
+ IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
+ write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
+ }
+
+ /* start the LCBs */
+ /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
+
+ /* simulator only loopback steps */
+ if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ /* LCB_CFG_RUN.EN = 1 */
+ write_csr(dd, DC_LCB_CFG_RUN,
+ 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
+
+ ret = wait_link_transfer_active(dd, 10);
+ if (ret)
+ return ret;
+
+ write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
+ 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
+ }
+
+ if (!loopback) {
+ /*
+ * When doing quick linkup and not in loopback, both
+ * sides must be done with LCB set-up before either
+ * starts the quick linkup. Put a delay here so that
+ * both sides can be started and have a chance to be
+ * done with LCB set up before resuming.
+ */
+ dd_dev_err(dd,
+ "Pausing for peer to be finished with LCB set up\n");
+ msleep(5000);
+ dd_dev_err(dd, "Continuing with quick linkup\n");
+ }
+
+ write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
+ set_8051_lcb_access(dd);
+
+ /*
+ * State "quick" LinkUp request sets the physical link state to
+ * LinkUp without a verify capability sequence.
+ * This state is in simulator v37 and later.
+ */
+ ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "%s: set physical link state to quick LinkUp failed with return %d\n",
+ __func__, ret);
+
+ set_host_lcb_access(dd);
+ write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
+
+ if (ret >= 0)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * Do all special steps to set up loopback.
+ */
+static int init_loopback(struct hfi1_devdata *dd)
+{
+ dd_dev_info(dd, "Entering loopback mode\n");
+
+ /* all loopbacks should disable self GUID check */
+ write_csr(dd, DC_DC8051_CFG_MODE,
+ (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
+
+ /*
+ * The simulator has only one loopback option - LCB. Switch
+ * to that option, which includes quick link up.
+ *
+ * Accept all valid loopback values.
+ */
+ if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
+ (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
+ loopback == LOOPBACK_CABLE)) {
+ loopback = LOOPBACK_LCB;
+ quick_linkup = 1;
+ return 0;
+ }
+
+ /*
+ * SerDes loopback init sequence is handled in set_local_link_attributes
+ */
+ if (loopback == LOOPBACK_SERDES)
+ return 0;
+
+ /* LCB loopback - handled at poll time */
+ if (loopback == LOOPBACK_LCB) {
+ quick_linkup = 1; /* LCB is always quick linkup */
+
+ /* not supported in emulation due to emulation RTL changes */
+ if (dd->icode == ICODE_FPGA_EMULATION) {
+ dd_dev_err(dd,
+ "LCB loopback not supported in emulation\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* external cable loopback requires no extra steps */
+ if (loopback == LOOPBACK_CABLE)
+ return 0;
+
+ dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
+ return -EINVAL;
+}
+
+/*
+ * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
+ * used in the Verify Capability link width attribute.
+ */
+static u16 opa_to_vc_link_widths(u16 opa_widths)
+{
+ int i;
+ u16 result = 0;
+
+ static const struct link_bits {
+ u16 from;
+ u16 to;
+ } opa_link_xlate[] = {
+ { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
+ { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
+ { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
+ { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
+ if (opa_widths & opa_link_xlate[i].from)
+ result |= opa_link_xlate[i].to;
+ }
+ return result;
+}
+
+/*
+ * Set link attributes before moving to polling.
+ */
+static int set_local_link_attributes(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u8 enable_lane_tx;
+ u8 tx_polarity_inversion;
+ u8 rx_polarity_inversion;
+ int ret;
+ u32 misc_bits = 0;
+ /* reset our fabric serdes to clear any lingering problems */
+ fabric_serdes_reset(dd);
+
+ /* set the local tx rate - need to read-modify-write */
+ ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
+ &rx_polarity_inversion, &ppd->local_tx_rate);
+ if (ret)
+ goto set_local_link_attributes_fail;
+
+ if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
+ /* set the tx rate to the fastest enabled */
+ if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
+ ppd->local_tx_rate = 1;
+ else
+ ppd->local_tx_rate = 0;
+ } else {
+ /* set the tx rate to all enabled */
+ ppd->local_tx_rate = 0;
+ if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
+ ppd->local_tx_rate |= 2;
+ if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
+ ppd->local_tx_rate |= 1;
+ }
+
+ enable_lane_tx = 0xF; /* enable all four lanes */
+ ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
+ rx_polarity_inversion, ppd->local_tx_rate);
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ goto set_local_link_attributes_fail;
+ }
+
+ /*
+ * DC supports continuous updates.
+ */
+ ret = write_vc_local_phy(dd,
+ 0 /* no power management */,
+ 1 /* continuous updates */);
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ /* z=1 in the next call: AU of 0 is not supported by the hardware */
+ ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
+ ppd->port_crc_mode_enabled);
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ /*
+ * SerDes loopback init sequence requires
+ * setting bit 0 of MISC_CONFIG_BITS
+ */
+ if (loopback == LOOPBACK_SERDES)
+ misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
+
+ /*
+ * An external device configuration request is used to reset the LCB
+ * to retry to obtain operational lanes when the first attempt is
+ * unsuccesful.
+ */
+ if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
+ misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
+
+ ret = write_vc_local_link_mode(dd, misc_bits, 0,
+ opa_to_vc_link_widths(
+ ppd->link_width_enabled));
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ /* let peer know who we are */
+ ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
+ if (ret == HCMD_SUCCESS)
+ return 0;
+
+set_local_link_attributes_fail:
+ dd_dev_err(dd,
+ "Failed to set local link attributes, return 0x%x\n",
+ ret);
+ return ret;
+}
+
+/*
+ * Call this to start the link.
+ * Do not do anything if the link is disabled.
+ * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
+ */
+int start_link(struct hfi1_pportdata *ppd)
+{
+ /*
+ * Tune the SerDes to a ballpark setting for optimal signal and bit
+ * error rate. Needs to be done before starting the link.
+ */
+ tune_serdes(ppd);
+
+ if (!ppd->driver_link_ready) {
+ dd_dev_info(ppd->dd,
+ "%s: stopping link start because driver is not ready\n",
+ __func__);
+ return 0;
+ }
+
+ /*
+ * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
+ * pkey table can be configured properly if the HFI unit is connected
+ * to switch port with MgmtAllowed=NO
+ */
+ clear_full_mgmt_pkey(ppd);
+
+ return set_link_state(ppd, HLS_DN_POLL);
+}
+
+static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 mask;
+ unsigned long timeout;
+
+ /*
+ * Some QSFP cables have a quirk that asserts the IntN line as a side
+ * effect of power up on plug-in. We ignore this false positive
+ * interrupt until the module has finished powering up by waiting for
+ * a minimum timeout of the module inrush initialization time of
+ * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
+ * module have stabilized.
+ */
+ msleep(500);
+
+ /*
+ * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
+ */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (1) {
+ mask = read_csr(dd, dd->hfi1_id ?
+ ASIC_QSFP2_IN : ASIC_QSFP1_IN);
+ if (!(mask & QSFP_HFI0_INT_N))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
+ __func__);
+ break;
+ }
+ udelay(2);
+ }
+}
+
+static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 mask;
+
+ mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
+ if (enable) {
+ /*
+ * Clear the status register to avoid an immediate interrupt
+ * when we re-enable the IntN pin
+ */
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
+ QSFP_HFI0_INT_N);
+ mask |= (u64)QSFP_HFI0_INT_N;
+ } else {
+ mask &= ~(u64)QSFP_HFI0_INT_N;
+ }
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
+}
+
+int reset_qsfp(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 mask, qsfp_mask;
+
+ /* Disable INT_N from triggering QSFP interrupts */
+ set_qsfp_int_n(ppd, 0);
+
+ /* Reset the QSFP */
+ mask = (u64)QSFP_HFI0_RESET_N;
+
+ qsfp_mask = read_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
+ qsfp_mask &= ~mask;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
+
+ udelay(10);
+
+ qsfp_mask |= mask;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
+
+ wait_for_qsfp_init(ppd);
+
+ /*
+ * Allow INT_N to trigger the QSFP interrupt to watch
+ * for alarms and warnings
+ */
+ set_qsfp_int_n(ppd, 1);
+
+ /*
+ * After the reset, AOC transmitters are enabled by default. They need
+ * to be turned off to complete the QSFP setup before they can be
+ * enabled again.
+ */
+ return set_qsfp_tx(ppd, 0);
+}
+
+static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
+ u8 *qsfp_interrupt_status)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
+ (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
+ dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
+ (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
+ dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
+ __func__);
+
+ /*
+ * The remaining alarms/warnings don't matter if the link is down.
+ */
+ if (ppd->host_link_state & HLS_DOWN)
+ return 0;
+
+ if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
+ (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
+ dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
+ (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
+ dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
+ __func__);
+
+ /* Byte 2 is vendor specific */
+
+ if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
+ (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
+ (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
+ (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
+ (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
+ __func__);
+
+ /* Bytes 9-10 and 11-12 are reserved */
+ /* Bytes 13-15 are vendor specific */
+
+ return 0;
+}
+
+/* This routine will only be scheduled if the QSFP module present is asserted */
+void qsfp_event(struct work_struct *work)
+{
+ struct qsfp_data *qd;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+
+ qd = container_of(work, struct qsfp_data, qsfp_work);
+ ppd = qd->ppd;
+ dd = ppd->dd;
+
+ /* Sanity check */
+ if (!qsfp_mod_present(ppd))
+ return;
+
+ if (ppd->host_link_state == HLS_DN_DISABLE) {
+ dd_dev_info(ppd->dd,
+ "%s: stopping link start because link is disabled\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * Turn DC back on after cable has been re-inserted. Up until
+ * now, the DC has been in reset to save power.
+ */
+ dc_start(dd);
+
+ if (qd->cache_refresh_required) {
+ set_qsfp_int_n(ppd, 0);
+
+ wait_for_qsfp_init(ppd);
+
+ /*
+ * Allow INT_N to trigger the QSFP interrupt to watch
+ * for alarms and warnings
+ */
+ set_qsfp_int_n(ppd, 1);
+
+ start_link(ppd);
+ }
+
+ if (qd->check_interrupt_flags) {
+ u8 qsfp_interrupt_status[16] = {0,};
+
+ if (one_qsfp_read(ppd, dd->hfi1_id, 6,
+ &qsfp_interrupt_status[0], 16) != 16) {
+ dd_dev_info(dd,
+ "%s: Failed to read status of QSFP module\n",
+ __func__);
+ } else {
+ unsigned long flags;
+
+ handle_qsfp_error_conditions(
+ ppd, qsfp_interrupt_status);
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.check_interrupt_flags = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
+ flags);
+ }
+ }
+}
+
+void init_qsfp_int(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 qsfp_mask;
+
+ qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
+ /* Clear current status to avoid spurious interrupts */
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
+ qsfp_mask);
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
+ qsfp_mask);
+
+ set_qsfp_int_n(ppd, 0);
+
+ /* Handle active low nature of INT_N and MODPRST_N pins */
+ if (qsfp_mod_present(ppd))
+ qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
+ qsfp_mask);
+
+ /* Enable the appropriate QSFP IRQ source */
+ if (!dd->hfi1_id)
+ set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
+ else
+ set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
+}
+
+/*
+ * Do a one-time initialize of the LCB block.
+ */
+static void init_lcb(struct hfi1_devdata *dd)
+{
+ /* simulator does not correctly handle LCB cclk loopback, skip */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ return;
+
+ /* the DC has been reset earlier in the driver load */
+
+ /* set LCB for cclk loopback on the port */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
+ write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
+ write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
+ write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
+ write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
+ write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
+}
+
+/*
+ * Perform a test read on the QSFP. Return 0 on success, -ERRNO
+ * on error.
+ */
+static int test_qsfp_read(struct hfi1_pportdata *ppd)
+{
+ int ret;
+ u8 status;
+
+ /*
+ * Report success if not a QSFP or, if it is a QSFP, but the cable is
+ * not present
+ */
+ if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
+ return 0;
+
+ /* read byte 2, the status byte */
+ ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+ return 0; /* success */
+}
+
+/*
+ * Values for QSFP retry.
+ *
+ * Give up after 10s (20 x 500ms). The overall timeout was empirically
+ * arrived at from experience on a large cluster.
+ */
+#define MAX_QSFP_RETRIES 20
+#define QSFP_RETRY_WAIT 500 /* msec */
+
+/*
+ * Try a QSFP read. If it fails, schedule a retry for later.
+ * Called on first link activation after driver load.
+ */
+static void try_start_link(struct hfi1_pportdata *ppd)
+{
+ if (test_qsfp_read(ppd)) {
+ /* read failed */
+ if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
+ dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
+ return;
+ }
+ dd_dev_info(ppd->dd,
+ "QSFP not responding, waiting and retrying %d\n",
+ (int)ppd->qsfp_retry_count);
+ ppd->qsfp_retry_count++;
+ queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
+ msecs_to_jiffies(QSFP_RETRY_WAIT));
+ return;
+ }
+ ppd->qsfp_retry_count = 0;
+
+ start_link(ppd);
+}
+
+/*
+ * Workqueue function to start the link after a delay.
+ */
+void handle_start_link(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ start_link_work.work);
+ try_start_link(ppd);
+}
+
+int bringup_serdes(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 guid;
+ int ret;
+
+ if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
+ add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
+
+ guid = ppd->guids[HFI1_PORT_GUID_INDEX];
+ if (!guid) {
+ if (dd->base_guid)
+ guid = dd->base_guid + ppd->port - 1;
+ ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
+ }
+
+ /* Set linkinit_reason on power up per OPA spec */
+ ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
+
+ /* one-time init of the LCB */
+ init_lcb(dd);
+
+ if (loopback) {
+ ret = init_loopback(dd);
+ if (ret < 0)
+ return ret;
+ }
+
+ get_port_type(ppd);
+ if (ppd->port_type == PORT_TYPE_QSFP) {
+ set_qsfp_int_n(ppd, 0);
+ wait_for_qsfp_init(ppd);
+ set_qsfp_int_n(ppd, 1);
+ }
+
+ try_start_link(ppd);
+ return 0;
+}
+
+void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /*
+ * Shut down the link and keep it down. First turn off that the
+ * driver wants to allow the link to be up (driver_link_ready).
+ * Then make sure the link is not automatically restarted
+ * (link_enabled). Cancel any pending restart. And finally
+ * go offline.
+ */
+ ppd->driver_link_ready = 0;
+ ppd->link_enabled = 0;
+
+ ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
+ flush_delayed_work(&ppd->start_link_work);
+ cancel_delayed_work_sync(&ppd->start_link_work);
+
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
+ OPA_LINKDOWN_REASON_REBOOT);
+ set_link_state(ppd, HLS_DN_OFFLINE);
+
+ /* disable the port */
+ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+ cancel_work_sync(&ppd->freeze_work);
+}
+
+static inline int init_cpu_counters(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ppd->ibport_data.rvp.rc_acks = NULL;
+ ppd->ibport_data.rvp.rc_qacks = NULL;
+ ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
+ if (!ppd->ibport_data.rvp.rc_acks ||
+ !ppd->ibport_data.rvp.rc_delayed_comp ||
+ !ppd->ibport_data.rvp.rc_qacks)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * index is the index into the receive array
+ */
+void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
+ u32 type, unsigned long pa, u16 order)
+{
+ u64 reg;
+
+ if (!(dd->flags & HFI1_PRESENT))
+ goto done;
+
+ if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
+ pa = 0;
+ order = 0;
+ } else if (type > PT_INVALID) {
+ dd_dev_err(dd,
+ "unexpected receive array type %u for index %u, not handled\n",
+ type, index);
+ goto done;
+ }
+ trace_hfi1_put_tid(dd, index, type, pa, order);
+
+#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
+ reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
+ | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
+ | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
+ << RCV_ARRAY_RT_ADDR_SHIFT;
+ trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
+ writeq(reg, dd->rcvarray_wc + (index * 8));
+
+ if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
+ /*
+ * Eager entries are written and flushed
+ *
+ * Expected entries are flushed every 4 writes
+ */
+ flush_wc();
+done:
+ return;
+}
+
+void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 i;
+
+ /* this could be optimized */
+ for (i = rcd->eager_base; i < rcd->eager_base +
+ rcd->egrbufs.alloced; i++)
+ hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
+
+ for (i = rcd->expected_base;
+ i < rcd->expected_base + rcd->expected_count; i++)
+ hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
+}
+
+static const char * const ib_cfg_name_strings[] = {
+ "HFI1_IB_CFG_LIDLMC",
+ "HFI1_IB_CFG_LWID_DG_ENB",
+ "HFI1_IB_CFG_LWID_ENB",
+ "HFI1_IB_CFG_LWID",
+ "HFI1_IB_CFG_SPD_ENB",
+ "HFI1_IB_CFG_SPD",
+ "HFI1_IB_CFG_RXPOL_ENB",
+ "HFI1_IB_CFG_LREV_ENB",
+ "HFI1_IB_CFG_LINKLATENCY",
+ "HFI1_IB_CFG_HRTBT",
+ "HFI1_IB_CFG_OP_VLS",
+ "HFI1_IB_CFG_VL_HIGH_CAP",
+ "HFI1_IB_CFG_VL_LOW_CAP",
+ "HFI1_IB_CFG_OVERRUN_THRESH",
+ "HFI1_IB_CFG_PHYERR_THRESH",
+ "HFI1_IB_CFG_LINKDEFAULT",
+ "HFI1_IB_CFG_PKEYS",
+ "HFI1_IB_CFG_MTU",
+ "HFI1_IB_CFG_LSTATE",
+ "HFI1_IB_CFG_VL_HIGH_LIMIT",
+ "HFI1_IB_CFG_PMA_TICKS",
+ "HFI1_IB_CFG_PORT"
+};
+
+static const char *ib_cfg_name(int which)
+{
+ if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
+ return "invalid";
+ return ib_cfg_name_strings[which];
+}
+
+int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int val = 0;
+
+ switch (which) {
+ case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
+ val = ppd->link_width_enabled;
+ break;
+ case HFI1_IB_CFG_LWID: /* currently active Link-width */
+ val = ppd->link_width_active;
+ break;
+ case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
+ val = ppd->link_speed_enabled;
+ break;
+ case HFI1_IB_CFG_SPD: /* current Link speed */
+ val = ppd->link_speed_active;
+ break;
+
+ case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
+ case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
+ case HFI1_IB_CFG_LINKLATENCY:
+ goto unimplemented;
+
+ case HFI1_IB_CFG_OP_VLS:
+ val = ppd->actual_vls_operational;
+ break;
+ case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
+ val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
+ break;
+ case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
+ val = VL_ARB_LOW_PRIO_TABLE_SIZE;
+ break;
+ case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ val = ppd->overrun_threshold;
+ break;
+ case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ val = ppd->phy_error_threshold;
+ break;
+ case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ val = HLS_DEFAULT;
+ break;
+
+ case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
+ case HFI1_IB_CFG_PMA_TICKS:
+ default:
+unimplemented:
+ if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
+ dd_dev_info(
+ dd,
+ "%s: which %s: not implemented\n",
+ __func__,
+ ib_cfg_name(which));
+ break;
+ }
+
+ return val;
+}
+
+/*
+ * The largest MAD packet size.
+ */
+#define MAX_MAD_PACKET 2048
+
+/*
+ * Return the maximum header bytes that can go on the _wire_
+ * for this device. This count includes the ICRC which is
+ * not part of the packet held in memory but it is appended
+ * by the HW.
+ * This is dependent on the device's receive header entry size.
+ * HFI allows this to be set per-receive context, but the
+ * driver presently enforces a global value.
+ */
+u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
+{
+ /*
+ * The maximum non-payload (MTU) bytes in LRH.PktLen are
+ * the Receive Header Entry Size minus the PBC (or RHF) size
+ * plus one DW for the ICRC appended by HW.
+ *
+ * dd->rcd[0].rcvhdrqentsize is in DW.
+ * We use rcd[0] as all context will have the same value. Also,
+ * the first kernel context would have been allocated by now so
+ * we are guaranteed a valid value.
+ */
+ return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
+}
+
+/*
+ * Set Send Length
+ * @ppd: per port data
+ *
+ * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
+ * registers compare against LRH.PktLen, so use the max bytes included
+ * in the LRH.
+ *
+ * This routine changes all VL values except VL15, which it maintains at
+ * the same value.
+ */
+static void set_send_length(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
+ u32 maxvlmtu = dd->vld[15].mtu;
+ u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
+ & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
+ SEND_LEN_CHECK1_LEN_VL15_SHIFT;
+ int i, j;
+ u32 thres;
+
+ for (i = 0; i < ppd->vls_supported; i++) {
+ if (dd->vld[i].mtu > maxvlmtu)
+ maxvlmtu = dd->vld[i].mtu;
+ if (i <= 3)
+ len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
+ & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
+ ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
+ else
+ len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
+ & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
+ ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
+ }
+ write_csr(dd, SEND_LEN_CHECK0, len1);
+ write_csr(dd, SEND_LEN_CHECK1, len2);
+ /* adjust kernel credit return thresholds based on new MTUs */
+ /* all kernel receive contexts have the same hdrqentsize */
+ for (i = 0; i < ppd->vls_supported; i++) {
+ thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
+ sc_mtu_to_threshold(dd->vld[i].sc,
+ dd->vld[i].mtu,
+ get_hdrqentsize(dd->rcd[0])));
+ for (j = 0; j < INIT_SC_PER_VL; j++)
+ sc_set_cr_threshold(
+ pio_select_send_context_vl(dd, j, i),
+ thres);
+ }
+ thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
+ sc_mtu_to_threshold(dd->vld[15].sc,
+ dd->vld[15].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+ sc_set_cr_threshold(dd->vld[15].sc, thres);
+
+ /* Adjust maximum MTU for the port in DC */
+ dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
+ (ilog2(maxvlmtu >> 8) + 1);
+ len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
+ len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
+ len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
+ DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
+ write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
+}
+
+static void set_lidlmc(struct hfi1_pportdata *ppd)
+{
+ int i;
+ u64 sreg = 0;
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 mask = ~((1U << ppd->lmc) - 1);
+ u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
+ u32 lid;
+
+ /*
+ * Program 0 in CSR if port lid is extended. This prevents
+ * 9B packets being sent out for large lids.
+ */
+ lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
+ c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
+ | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
+ c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
+ << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
+ ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
+ << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
+ write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
+
+ /*
+ * Iterate over all the send contexts and set their SLID check
+ */
+ sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
+ SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
+ (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
+ SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
+
+ for (i = 0; i < chip_send_contexts(dd); i++) {
+ hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
+ i, (u32)sreg);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
+ }
+
+ /* Now we have to do the same thing for the sdma engines */
+ sdma_update_lmc(dd, mask, lid);
+}
+
+static const char *state_completed_string(u32 completed)
+{
+ static const char * const state_completed[] = {
+ "EstablishComm",
+ "OptimizeEQ",
+ "VerifyCap"
+ };
+
+ if (completed < ARRAY_SIZE(state_completed))
+ return state_completed[completed];
+
+ return "unknown";
+}
+
+static const char all_lanes_dead_timeout_expired[] =
+ "All lanes were inactive – was the interconnect media removed?";
+static const char tx_out_of_policy[] =
+ "Passing lanes on local port do not meet the local link width policy";
+static const char no_state_complete[] =
+ "State timeout occurred before link partner completed the state";
+static const char * const state_complete_reasons[] = {
+ [0x00] = "Reason unknown",
+ [0x01] = "Link was halted by driver, refer to LinkDownReason",
+ [0x02] = "Link partner reported failure",
+ [0x10] = "Unable to achieve frame sync on any lane",
+ [0x11] =
+ "Unable to find a common bit rate with the link partner",
+ [0x12] =
+ "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
+ [0x13] =
+ "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
+ [0x14] = no_state_complete,
+ [0x15] =
+ "State timeout occurred before link partner identified equalization presets",
+ [0x16] =
+ "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
+ [0x17] = tx_out_of_policy,
+ [0x20] = all_lanes_dead_timeout_expired,
+ [0x21] =
+ "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
+ [0x22] = no_state_complete,
+ [0x23] =
+ "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
+ [0x24] = tx_out_of_policy,
+ [0x30] = all_lanes_dead_timeout_expired,
+ [0x31] =
+ "State timeout occurred waiting for host to process received frames",
+ [0x32] = no_state_complete,
+ [0x33] =
+ "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
+ [0x34] = tx_out_of_policy,
+ [0x35] = "Negotiated link width is mutually exclusive",
+ [0x36] =
+ "Timed out before receiving verifycap frames in VerifyCap.Exchange",
+ [0x37] = "Unable to resolve secure data exchange",
+};
+
+static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
+ u32 code)
+{
+ const char *str = NULL;
+
+ if (code < ARRAY_SIZE(state_complete_reasons))
+ str = state_complete_reasons[code];
+
+ if (str)
+ return str;
+ return "Reserved";
+}
+
+/* describe the given last state complete frame */
+static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
+ const char *prefix)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 success;
+ u32 state;
+ u32 reason;
+ u32 lanes;
+
+ /*
+ * Decode frame:
+ * [ 0: 0] - success
+ * [ 3: 1] - state
+ * [ 7: 4] - next state timeout
+ * [15: 8] - reason code
+ * [31:16] - lanes
+ */
+ success = frame & 0x1;
+ state = (frame >> 1) & 0x7;
+ reason = (frame >> 8) & 0xff;
+ lanes = (frame >> 16) & 0xffff;
+
+ dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
+ prefix, frame);
+ dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
+ state_completed_string(state), state);
+ dd_dev_err(dd, " state successfully completed: %s\n",
+ success ? "yes" : "no");
+ dd_dev_err(dd, " fail reason 0x%x: %s\n",
+ reason, state_complete_reason_code_string(ppd, reason));
+ dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
+}
+
+/*
+ * Read the last state complete frames and explain them. This routine
+ * expects to be called if the link went down during link negotiation
+ * and initialization (LNI). That is, anywhere between polling and link up.
+ */
+static void check_lni_states(struct hfi1_pportdata *ppd)
+{
+ u32 last_local_state;
+ u32 last_remote_state;
+
+ read_last_local_state(ppd->dd, &last_local_state);
+ read_last_remote_state(ppd->dd, &last_remote_state);
+
+ /*
+ * Don't report anything if there is nothing to report. A value of
+ * 0 means the link was taken down while polling and there was no
+ * training in-process.
+ */
+ if (last_local_state == 0 && last_remote_state == 0)
+ return;
+
+ decode_state_complete(ppd, last_local_state, "transmitted");
+ decode_state_complete(ppd, last_remote_state, "received");
+}
+
+/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
+static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
+{
+ u64 reg;
+ unsigned long timeout;
+
+ /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
+ timeout = jiffies + msecs_to_jiffies(wait_ms);
+ while (1) {
+ reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
+ if (reg)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(dd,
+ "timeout waiting for LINK_TRANSFER_ACTIVE\n");
+ return -ETIMEDOUT;
+ }
+ udelay(2);
+ }
+ return 0;
+}
+
+/* called when the logical link state is not down as it should be */
+static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /*
+ * Bring link up in LCB loopback
+ */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
+ write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
+ DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
+
+ write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
+ write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
+ write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
+ write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
+
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
+ (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
+ udelay(3);
+ write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
+ write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
+
+ wait_link_transfer_active(dd, 100);
+
+ /*
+ * Bring the link down again.
+ */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
+ write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
+ write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
+
+ dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
+}
+
+/*
+ * Helper for set_link_state(). Do not call except from that routine.
+ * Expects ppd->hls_mutex to be held.
+ *
+ * @rem_reason value to be sent to the neighbor
+ *
+ * LinkDownReasons only set if transition succeeds.
+ */
+static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 previous_state;
+ int offline_state_ret;
+ int ret;
+
+ update_lcb_cache(dd);
+
+ previous_state = ppd->host_link_state;
+ ppd->host_link_state = HLS_GOING_OFFLINE;
+
+ /* start offline transition */
+ ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
+
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Offline link state, return %d\n",
+ ret);
+ return -EINVAL;
+ }
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
+
+ offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
+ if (offline_state_ret < 0)
+ return offline_state_ret;
+
+ /* Disabling AOC transmitters */
+ if (ppd->port_type == PORT_TYPE_QSFP &&
+ ppd->qsfp_info.limiting_active &&
+ qsfp_mod_present(ppd)) {
+ int ret;
+
+ ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
+ if (ret == 0) {
+ set_qsfp_tx(ppd, 0);
+ release_chip_resource(dd, qsfp_resource(dd));
+ } else {
+ /* not fatal, but should warn */
+ dd_dev_err(dd,
+ "Unable to acquire lock to turn off QSFP TX\n");
+ }
+ }
+
+ /*
+ * Wait for the offline.Quiet transition if it hasn't happened yet. It
+ * can take a while for the link to go down.
+ */
+ if (offline_state_ret != PLS_OFFLINE_QUIET) {
+ ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Now in charge of LCB - must be after the physical state is
+ * offline.quiet and before host_link_state is changed.
+ */
+ set_host_lcb_access(dd);
+ write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
+
+ /* make sure the logical state is also down */
+ ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
+ if (ret)
+ force_logical_link_state_down(ppd);
+
+ ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+ update_statusp(ppd, IB_PORT_DOWN);
+
+ /*
+ * The LNI has a mandatory wait time after the physical state
+ * moves to Offline.Quiet. The wait time may be different
+ * depending on how the link went down. The 8051 firmware
+ * will observe the needed wait time and only move to ready
+ * when that is completed. The largest of the quiet timeouts
+ * is 6s, so wait that long and then at least 0.5s more for
+ * other transitions, and another 0.5s for a buffer.
+ */
+ ret = wait_fm_ready(dd, 7000);
+ if (ret) {
+ dd_dev_err(dd,
+ "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
+ /* state is really offline, so make it so */
+ ppd->host_link_state = HLS_DN_OFFLINE;
+ return ret;
+ }
+
+ /*
+ * The state is now offline and the 8051 is ready to accept host
+ * requests.
+ * - change our state
+ * - notify others if we were previously in a linkup state
+ */
+ ppd->host_link_state = HLS_DN_OFFLINE;
+ if (previous_state & HLS_UP) {
+ /* went down while link was up */
+ handle_linkup_change(dd, 0);
+ } else if (previous_state
+ & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
+ /* went down while attempting link up */
+ check_lni_states(ppd);
+
+ /* The QSFP doesn't need to be reset on LNI failure */
+ ppd->qsfp_info.reset_needed = 0;
+ }
+
+ /* the active link width (downgrade) is 0 on link down */
+ ppd->link_width_active = 0;
+ ppd->link_width_downgrade_tx_active = 0;
+ ppd->link_width_downgrade_rx_active = 0;
+ ppd->current_egress_rate = 0;
+ return 0;
+}
+
+/* return the link state name */
+static const char *link_state_name(u32 state)
+{
+ const char *name;
+ int n = ilog2(state);
+ static const char * const names[] = {
+ [__HLS_UP_INIT_BP] = "INIT",
+ [__HLS_UP_ARMED_BP] = "ARMED",
+ [__HLS_UP_ACTIVE_BP] = "ACTIVE",
+ [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
+ [__HLS_DN_POLL_BP] = "POLL",
+ [__HLS_DN_DISABLE_BP] = "DISABLE",
+ [__HLS_DN_OFFLINE_BP] = "OFFLINE",
+ [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
+ [__HLS_GOING_UP_BP] = "GOING_UP",
+ [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
+ [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
+ };
+
+ name = n < ARRAY_SIZE(names) ? names[n] : NULL;
+ return name ? name : "unknown";
+}
+
+/* return the link state reason name */
+static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
+{
+ if (state == HLS_UP_INIT) {
+ switch (ppd->linkinit_reason) {
+ case OPA_LINKINIT_REASON_LINKUP:
+ return "(LINKUP)";
+ case OPA_LINKINIT_REASON_FLAPPING:
+ return "(FLAPPING)";
+ case OPA_LINKINIT_OUTSIDE_POLICY:
+ return "(OUTSIDE_POLICY)";
+ case OPA_LINKINIT_QUARANTINED:
+ return "(QUARANTINED)";
+ case OPA_LINKINIT_INSUFIC_CAPABILITY:
+ return "(INSUFIC_CAPABILITY)";
+ default:
+ break;
+ }
+ }
+ return "";
+}
+
+/*
+ * driver_pstate - convert the driver's notion of a port's
+ * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
+ * Return -1 (converted to a u32) to indicate error.
+ */
+u32 driver_pstate(struct hfi1_pportdata *ppd)
+{
+ switch (ppd->host_link_state) {
+ case HLS_UP_INIT:
+ case HLS_UP_ARMED:
+ case HLS_UP_ACTIVE:
+ return IB_PORTPHYSSTATE_LINKUP;
+ case HLS_DN_POLL:
+ return IB_PORTPHYSSTATE_POLLING;
+ case HLS_DN_DISABLE:
+ return IB_PORTPHYSSTATE_DISABLED;
+ case HLS_DN_OFFLINE:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case HLS_VERIFY_CAP:
+ return IB_PORTPHYSSTATE_TRAINING;
+ case HLS_GOING_UP:
+ return IB_PORTPHYSSTATE_TRAINING;
+ case HLS_GOING_OFFLINE:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case HLS_LINK_COOLDOWN:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case HLS_DN_DOWNDEF:
+ default:
+ dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
+ ppd->host_link_state);
+ return -1;
+ }
+}
+
+/*
+ * driver_lstate - convert the driver's notion of a port's
+ * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
+ * (converted to a u32) to indicate error.
+ */
+u32 driver_lstate(struct hfi1_pportdata *ppd)
+{
+ if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
+ return IB_PORT_DOWN;
+
+ switch (ppd->host_link_state & HLS_UP) {
+ case HLS_UP_INIT:
+ return IB_PORT_INIT;
+ case HLS_UP_ARMED:
+ return IB_PORT_ARMED;
+ case HLS_UP_ACTIVE:
+ return IB_PORT_ACTIVE;
+ default:
+ dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
+ ppd->host_link_state);
+ return -1;
+ }
+}
+
+void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
+ u8 neigh_reason, u8 rem_reason)
+{
+ if (ppd->local_link_down_reason.latest == 0 &&
+ ppd->neigh_link_down_reason.latest == 0) {
+ ppd->local_link_down_reason.latest = lcl_reason;
+ ppd->neigh_link_down_reason.latest = neigh_reason;
+ ppd->remote_link_down_reason = rem_reason;
+ }
+}
+
+/**
+ * data_vls_operational() - Verify if data VL BCT credits and MTU
+ * are both set.
+ * @ppd: pointer to hfi1_pportdata structure
+ *
+ * Return: true - Ok, false -otherwise.
+ */
+static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
+{
+ int i;
+ u64 reg;
+
+ if (!ppd->actual_vls_operational)
+ return false;
+
+ for (i = 0; i < ppd->vls_supported; i++) {
+ reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
+ if ((reg && !ppd->dd->vld[i].mtu) ||
+ (!reg && ppd->dd->vld[i].mtu))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Change the physical and/or logical link state.
+ *
+ * Do not call this routine while inside an interrupt. It contains
+ * calls to routines that can take multiple seconds to finish.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int set_link_state(struct hfi1_pportdata *ppd, u32 state)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct ib_event event = {.device = NULL};
+ int ret1, ret = 0;
+ int orig_new_state, poll_bounce;
+
+ mutex_lock(&ppd->hls_lock);
+
+ orig_new_state = state;
+ if (state == HLS_DN_DOWNDEF)
+ state = HLS_DEFAULT;
+
+ /* interpret poll -> poll as a link bounce */
+ poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
+ state == HLS_DN_POLL;
+
+ dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
+ link_state_name(ppd->host_link_state),
+ link_state_name(orig_new_state),
+ poll_bounce ? "(bounce) " : "",
+ link_state_reason_name(ppd, state));
+
+ /*
+ * If we're going to a (HLS_*) link state that implies the logical
+ * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
+ * reset is_sm_config_started to 0.
+ */
+ if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
+ ppd->is_sm_config_started = 0;
+
+ /*
+ * Do nothing if the states match. Let a poll to poll link bounce
+ * go through.
+ */
+ if (ppd->host_link_state == state && !poll_bounce)
+ goto done;
+
+ switch (state) {
+ case HLS_UP_INIT:
+ if (ppd->host_link_state == HLS_DN_POLL &&
+ (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
+ /*
+ * Quick link up jumps from polling to here.
+ *
+ * Whether in normal or loopback mode, the
+ * simulator jumps from polling to link up.
+ * Accept that here.
+ */
+ /* OK */
+ } else if (ppd->host_link_state != HLS_GOING_UP) {
+ goto unexpected;
+ }
+
+ /*
+ * Wait for Link_Up physical state.
+ * Physical and Logical states should already be
+ * be transitioned to LinkUp and LinkInit respectively.
+ */
+ ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: physical state did not change to LINK-UP\n",
+ __func__);
+ break;
+ }
+
+ ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: logical state did not change to INIT\n",
+ __func__);
+ break;
+ }
+
+ /* clear old transient LINKINIT_REASON code */
+ if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
+ ppd->linkinit_reason =
+ OPA_LINKINIT_REASON_LINKUP;
+
+ /* enable the port */
+ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ handle_linkup_change(dd, 1);
+ pio_kernel_linkup(dd);
+
+ /*
+ * After link up, a new link width will have been set.
+ * Update the xmit counters with regards to the new
+ * link width.
+ */
+ update_xmit_counters(ppd, ppd->link_width_active);
+
+ ppd->host_link_state = HLS_UP_INIT;
+ update_statusp(ppd, IB_PORT_INIT);
+ break;
+ case HLS_UP_ARMED:
+ if (ppd->host_link_state != HLS_UP_INIT)
+ goto unexpected;
+
+ if (!data_vls_operational(ppd)) {
+ dd_dev_err(dd,
+ "%s: Invalid data VL credits or mtu\n",
+ __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ set_logical_state(dd, LSTATE_ARMED);
+ ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: logical state did not change to ARMED\n",
+ __func__);
+ break;
+ }
+ ppd->host_link_state = HLS_UP_ARMED;
+ update_statusp(ppd, IB_PORT_ARMED);
+ /*
+ * The simulator does not currently implement SMA messages,
+ * so neighbor_normal is not set. Set it here when we first
+ * move to Armed.
+ */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ ppd->neighbor_normal = 1;
+ break;
+ case HLS_UP_ACTIVE:
+ if (ppd->host_link_state != HLS_UP_ARMED)
+ goto unexpected;
+
+ set_logical_state(dd, LSTATE_ACTIVE);
+ ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: logical state did not change to ACTIVE\n",
+ __func__);
+ } else {
+ /* tell all engines to go running */
+ sdma_all_running(dd);
+ ppd->host_link_state = HLS_UP_ACTIVE;
+ update_statusp(ppd, IB_PORT_ACTIVE);
+
+ /* Signal the IB layer that the port has went active */
+ event.device = &dd->verbs_dev.rdi.ibdev;
+ event.element.port_num = ppd->port;
+ event.event = IB_EVENT_PORT_ACTIVE;
+ }
+ break;
+ case HLS_DN_POLL:
+ if ((ppd->host_link_state == HLS_DN_DISABLE ||
+ ppd->host_link_state == HLS_DN_OFFLINE) &&
+ dd->dc_shutdown)
+ dc_start(dd);
+ /* Hand LED control to the DC */
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0);
+
+ if (ppd->host_link_state != HLS_DN_OFFLINE) {
+ u8 tmp = ppd->link_enabled;
+
+ ret = goto_offline(ppd, ppd->remote_link_down_reason);
+ if (ret) {
+ ppd->link_enabled = tmp;
+ break;
+ }
+ ppd->remote_link_down_reason = 0;
+
+ if (ppd->driver_link_ready)
+ ppd->link_enabled = 1;
+ }
+
+ set_all_slowpath(ppd->dd);
+ ret = set_local_link_attributes(ppd);
+ if (ret)
+ break;
+
+ ppd->port_error_action = 0;
+
+ if (quick_linkup) {
+ /* quick linkup does not go into polling */
+ ret = do_quick_linkup(dd);
+ } else {
+ ret1 = set_physical_link_state(dd, PLS_POLLING);
+ if (!ret1)
+ ret1 = wait_phys_link_out_of_offline(ppd,
+ 3000);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Polling link state, return 0x%x\n",
+ ret1);
+ ret = -EINVAL;
+ }
+ }
+
+ /*
+ * Change the host link state after requesting DC8051 to
+ * change its physical state so that we can ignore any
+ * interrupt with stale LNI(XX) error, which will not be
+ * cleared until DC8051 transitions to Polling state.
+ */
+ ppd->host_link_state = HLS_DN_POLL;
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
+ /*
+ * If an error occurred above, go back to offline. The
+ * caller may reschedule another attempt.
+ */
+ if (ret)
+ goto_offline(ppd, 0);
+ else
+ log_physical_state(ppd, PLS_POLLING);
+ break;
+ case HLS_DN_DISABLE:
+ /* link is disabled */
+ ppd->link_enabled = 0;
+
+ /* allow any state to transition to disabled */
+
+ /* must transition to offline first */
+ if (ppd->host_link_state != HLS_DN_OFFLINE) {
+ ret = goto_offline(ppd, ppd->remote_link_down_reason);
+ if (ret)
+ break;
+ ppd->remote_link_down_reason = 0;
+ }
+
+ if (!dd->dc_shutdown) {
+ ret1 = set_physical_link_state(dd, PLS_DISABLED);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Disabled link state, return 0x%x\n",
+ ret1);
+ ret = -EINVAL;
+ break;
+ }
+ ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: physical state did not change to DISABLED\n",
+ __func__);
+ break;
+ }
+ dc_shutdown(dd);
+ }
+ ppd->host_link_state = HLS_DN_DISABLE;
+ break;
+ case HLS_DN_OFFLINE:
+ if (ppd->host_link_state == HLS_DN_DISABLE)
+ dc_start(dd);
+
+ /* allow any state to transition to offline */
+ ret = goto_offline(ppd, ppd->remote_link_down_reason);
+ if (!ret)
+ ppd->remote_link_down_reason = 0;
+ break;
+ case HLS_VERIFY_CAP:
+ if (ppd->host_link_state != HLS_DN_POLL)
+ goto unexpected;
+ ppd->host_link_state = HLS_VERIFY_CAP;
+ log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
+ break;
+ case HLS_GOING_UP:
+ if (ppd->host_link_state != HLS_VERIFY_CAP)
+ goto unexpected;
+
+ ret1 = set_physical_link_state(dd, PLS_LINKUP);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to link up state, return 0x%x\n",
+ ret1);
+ ret = -EINVAL;
+ break;
+ }
+ ppd->host_link_state = HLS_GOING_UP;
+ break;
+
+ case HLS_GOING_OFFLINE: /* transient within goto_offline() */
+ case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
+ default:
+ dd_dev_info(dd, "%s: state 0x%x: not supported\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ goto done;
+
+unexpected:
+ dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
+ __func__, link_state_name(ppd->host_link_state),
+ link_state_name(state));
+ ret = -EINVAL;
+
+done:
+ mutex_unlock(&ppd->hls_lock);
+
+ if (event.device)
+ ib_dispatch_event(&event);
+
+ return ret;
+}
+
+int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
+{
+ u64 reg;
+ int ret = 0;
+
+ switch (which) {
+ case HFI1_IB_CFG_LIDLMC:
+ set_lidlmc(ppd);
+ break;
+ case HFI1_IB_CFG_VL_HIGH_LIMIT:
+ /*
+ * The VL Arbitrator high limit is sent in units of 4k
+ * bytes, while HFI stores it in units of 64 bytes.
+ */
+ val *= 4096 / 64;
+ reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
+ << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
+ write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
+ break;
+ case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* HFI only supports POLL as the default link down state */
+ if (val != HLS_DN_POLL)
+ ret = -EINVAL;
+ break;
+ case HFI1_IB_CFG_OP_VLS:
+ if (ppd->vls_operational != val) {
+ ppd->vls_operational = val;
+ if (!ppd->port)
+ ret = -EINVAL;
+ }
+ break;
+ /*
+ * For link width, link width downgrade, and speed enable, always AND
+ * the setting with what is actually supported. This has two benefits.
+ * First, enabled can't have unsupported values, no matter what the
+ * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
+ * "fill in with your supported value" have all the bits in the
+ * field set, so simply ANDing with supported has the desired result.
+ */
+ case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ ppd->link_width_enabled = val & ppd->link_width_supported;
+ break;
+ case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
+ ppd->link_width_downgrade_enabled =
+ val & ppd->link_width_downgrade_supported;
+ break;
+ case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
+ ppd->link_speed_enabled = val & ppd->link_speed_supported;
+ break;
+ case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ /*
+ * HFI does not follow IB specs, save this value
+ * so we can report it, if asked.
+ */
+ ppd->overrun_threshold = val;
+ break;
+ case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ /*
+ * HFI does not follow IB specs, save this value
+ * so we can report it, if asked.
+ */
+ ppd->phy_error_threshold = val;
+ break;
+
+ case HFI1_IB_CFG_MTU:
+ set_send_length(ppd);
+ break;
+
+ case HFI1_IB_CFG_PKEYS:
+ if (HFI1_CAP_IS_KSET(PKEY_CHECK))
+ set_partition_keys(ppd);
+ break;
+
+ default:
+ if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
+ dd_dev_info(ppd->dd,
+ "%s: which %s, val 0x%x: not implemented\n",
+ __func__, ib_cfg_name(which), val);
+ break;
+ }
+ return ret;
+}
+
+/* begin functions related to vl arbitration table caching */
+static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
+{
+ int i;
+
+ BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
+ VL_ARB_LOW_PRIO_TABLE_SIZE);
+ BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
+ VL_ARB_HIGH_PRIO_TABLE_SIZE);
+
+ /*
+ * Note that we always return values directly from the
+ * 'vl_arb_cache' (and do no CSR reads) in response to a
+ * 'Get(VLArbTable)'. This is obviously correct after a
+ * 'Set(VLArbTable)', since the cache will then be up to
+ * date. But it's also correct prior to any 'Set(VLArbTable)'
+ * since then both the cache, and the relevant h/w registers
+ * will be zeroed.
+ */
+
+ for (i = 0; i < MAX_PRIO_TABLE; i++)
+ spin_lock_init(&ppd->vl_arb_cache[i].lock);
+}
+
+/*
+ * vl_arb_lock_cache
+ *
+ * All other vl_arb_* functions should be called only after locking
+ * the cache.
+ */
+static inline struct vl_arb_cache *
+vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
+{
+ if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
+ return NULL;
+ spin_lock(&ppd->vl_arb_cache[idx].lock);
+ return &ppd->vl_arb_cache[idx];
+}
+
+static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
+{
+ spin_unlock(&ppd->vl_arb_cache[idx].lock);
+}
+
+static void vl_arb_get_cache(struct vl_arb_cache *cache,
+ struct ib_vl_weight_elem *vl)
+{
+ memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
+}
+
+static void vl_arb_set_cache(struct vl_arb_cache *cache,
+ struct ib_vl_weight_elem *vl)
+{
+ memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
+}
+
+static int vl_arb_match_cache(struct vl_arb_cache *cache,
+ struct ib_vl_weight_elem *vl)
+{
+ return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
+}
+
+/* end functions related to vl arbitration table caching */
+
+static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
+ u32 size, struct ib_vl_weight_elem *vl)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg;
+ unsigned int i, is_up = 0;
+ int drain, ret = 0;
+
+ mutex_lock(&ppd->hls_lock);
+
+ if (ppd->host_link_state & HLS_UP)
+ is_up = 1;
+
+ drain = !is_ax(dd) && is_up;
+
+ if (drain)
+ /*
+ * Before adjusting VL arbitration weights, empty per-VL
+ * FIFOs, otherwise a packet whose VL weight is being
+ * set to 0 could get stuck in a FIFO with no chance to
+ * egress.
+ */
+ ret = stop_drain_data_vls(dd);
+
+ if (ret) {
+ dd_dev_err(
+ dd,
+ "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
+ __func__);
+ goto err;
+ }
+
+ for (i = 0; i < size; i++, vl++) {
+ /*
+ * NOTE: The low priority shift and mask are used here, but
+ * they are the same for both the low and high registers.
+ */
+ reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
+ << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
+ | (((u64)vl->weight
+ & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
+ << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
+ write_csr(dd, target + (i * 8), reg);
+ }
+ pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
+
+ if (drain)
+ open_fill_data_vls(dd); /* reopen all VLs */
+
+err:
+ mutex_unlock(&ppd->hls_lock);
+
+ return ret;
+}
+
+/*
+ * Read one credit merge VL register.
+ */
+static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
+ struct vl_limit *vll)
+{
+ u64 reg = read_csr(dd, csr);
+
+ vll->dedicated = cpu_to_be16(
+ (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
+ & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
+ vll->shared = cpu_to_be16(
+ (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
+ & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
+}
+
+/*
+ * Read the current credit merge limits.
+ */
+static int get_buffer_control(struct hfi1_devdata *dd,
+ struct buffer_control *bc, u16 *overall_limit)
+{
+ u64 reg;
+ int i;
+
+ /* not all entries are filled in */
+ memset(bc, 0, sizeof(*bc));
+
+ /* OPA and HFI have a 1-1 mapping */
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
+
+ /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
+ read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
+
+ reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+ bc->overall_shared_limit = cpu_to_be16(
+ (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
+ & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
+ if (overall_limit)
+ *overall_limit = (reg
+ >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
+ & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
+ return sizeof(struct buffer_control);
+}
+
+static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
+{
+ u64 reg;
+ int i;
+
+ /* each register contains 16 SC->VLnt mappings, 4 bits each */
+ reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
+ for (i = 0; i < sizeof(u64); i++) {
+ u8 byte = *(((u8 *)&reg) + i);
+
+ dp->vlnt[2 * i] = byte & 0xf;
+ dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
+ }
+
+ reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
+ for (i = 0; i < sizeof(u64); i++) {
+ u8 byte = *(((u8 *)&reg) + i);
+
+ dp->vlnt[16 + (2 * i)] = byte & 0xf;
+ dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
+ }
+ return sizeof(struct sc2vlnt);
+}
+
+static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned int i;
+
+ for (i = 0; i < nelems; i++, vl++) {
+ vl->vl = 0xf;
+ vl->weight = 0;
+ }
+}
+
+static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
+{
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
+ DC_SC_VL_VAL(15_0,
+ 0, dp->vlnt[0] & 0xf,
+ 1, dp->vlnt[1] & 0xf,
+ 2, dp->vlnt[2] & 0xf,
+ 3, dp->vlnt[3] & 0xf,
+ 4, dp->vlnt[4] & 0xf,
+ 5, dp->vlnt[5] & 0xf,
+ 6, dp->vlnt[6] & 0xf,
+ 7, dp->vlnt[7] & 0xf,
+ 8, dp->vlnt[8] & 0xf,
+ 9, dp->vlnt[9] & 0xf,
+ 10, dp->vlnt[10] & 0xf,
+ 11, dp->vlnt[11] & 0xf,
+ 12, dp->vlnt[12] & 0xf,
+ 13, dp->vlnt[13] & 0xf,
+ 14, dp->vlnt[14] & 0xf,
+ 15, dp->vlnt[15] & 0xf));
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
+ DC_SC_VL_VAL(31_16,
+ 16, dp->vlnt[16] & 0xf,
+ 17, dp->vlnt[17] & 0xf,
+ 18, dp->vlnt[18] & 0xf,
+ 19, dp->vlnt[19] & 0xf,
+ 20, dp->vlnt[20] & 0xf,
+ 21, dp->vlnt[21] & 0xf,
+ 22, dp->vlnt[22] & 0xf,
+ 23, dp->vlnt[23] & 0xf,
+ 24, dp->vlnt[24] & 0xf,
+ 25, dp->vlnt[25] & 0xf,
+ 26, dp->vlnt[26] & 0xf,
+ 27, dp->vlnt[27] & 0xf,
+ 28, dp->vlnt[28] & 0xf,
+ 29, dp->vlnt[29] & 0xf,
+ 30, dp->vlnt[30] & 0xf,
+ 31, dp->vlnt[31] & 0xf));
+}
+
+static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
+ u16 limit)
+{
+ if (limit != 0)
+ dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
+ what, (int)limit, idx);
+}
+
+/* change only the shared limit portion of SendCmGLobalCredit */
+static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
+{
+ u64 reg;
+
+ reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+ reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
+ reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
+}
+
+/* change only the total credit limit portion of SendCmGLobalCredit */
+static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
+{
+ u64 reg;
+
+ reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+ reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
+ reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
+}
+
+/* set the given per-VL shared limit */
+static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
+{
+ u64 reg;
+ u32 addr;
+
+ if (vl < TXE_NUM_DATA_VL)
+ addr = SEND_CM_CREDIT_VL + (8 * vl);
+ else
+ addr = SEND_CM_CREDIT_VL15;
+
+ reg = read_csr(dd, addr);
+ reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
+ reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
+ write_csr(dd, addr, reg);
+}
+
+/* set the given per-VL dedicated limit */
+static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
+{
+ u64 reg;
+ u32 addr;
+
+ if (vl < TXE_NUM_DATA_VL)
+ addr = SEND_CM_CREDIT_VL + (8 * vl);
+ else
+ addr = SEND_CM_CREDIT_VL15;
+
+ reg = read_csr(dd, addr);
+ reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
+ reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
+ write_csr(dd, addr, reg);
+}
+
+/* spin until the given per-VL status mask bits clear */
+static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
+ const char *which)
+{
+ unsigned long timeout;
+ u64 reg;
+
+ timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
+
+ if (reg == 0)
+ return; /* success */
+ if (time_after(jiffies, timeout))
+ break; /* timed out */
+ udelay(1);
+ }
+
+ dd_dev_err(dd,
+ "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
+ which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
+ /*
+ * If this occurs, it is likely there was a credit loss on the link.
+ * The only recovery from that is a link bounce.
+ */
+ dd_dev_err(dd,
+ "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
+}
+
+/*
+ * The number of credits on the VLs may be changed while everything
+ * is "live", but the following algorithm must be followed due to
+ * how the hardware is actually implemented. In particular,
+ * Return_Credit_Status[] is the only correct status check.
+ *
+ * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
+ * set Global_Shared_Credit_Limit = 0
+ * use_all_vl = 1
+ * mask0 = all VLs that are changing either dedicated or shared limits
+ * set Shared_Limit[mask0] = 0
+ * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
+ * if (changing any dedicated limit)
+ * mask1 = all VLs that are lowering dedicated limits
+ * lower Dedicated_Limit[mask1]
+ * spin until Return_Credit_Status[mask1] == 0
+ * raise Dedicated_Limits
+ * raise Shared_Limits
+ * raise Global_Shared_Credit_Limit
+ *
+ * lower = if the new limit is lower, set the limit to the new value
+ * raise = if the new limit is higher than the current value (may be changed
+ * earlier in the algorithm), set the new limit to the new value
+ */
+int set_buffer_control(struct hfi1_pportdata *ppd,
+ struct buffer_control *new_bc)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 changing_mask, ld_mask, stat_mask;
+ int change_count;
+ int i, use_all_mask;
+ int this_shared_changing;
+ int vl_count = 0, ret;
+ /*
+ * A0: add the variable any_shared_limit_changing below and in the
+ * algorithm above. If removing A0 support, it can be removed.
+ */
+ int any_shared_limit_changing;
+ struct buffer_control cur_bc;
+ u8 changing[OPA_MAX_VLS];
+ u8 lowering_dedicated[OPA_MAX_VLS];
+ u16 cur_total;
+ u32 new_total = 0;
+ const u64 all_mask =
+ SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
+
+#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
+#define NUM_USABLE_VLS 16 /* look at VL15 and less */
+
+ /* find the new total credits, do sanity check on unused VLs */
+ for (i = 0; i < OPA_MAX_VLS; i++) {
+ if (valid_vl(i)) {
+ new_total += be16_to_cpu(new_bc->vl[i].dedicated);
+ continue;
+ }
+ nonzero_msg(dd, i, "dedicated",
+ be16_to_cpu(new_bc->vl[i].dedicated));
+ nonzero_msg(dd, i, "shared",
+ be16_to_cpu(new_bc->vl[i].shared));
+ new_bc->vl[i].dedicated = 0;
+ new_bc->vl[i].shared = 0;
+ }
+ new_total += be16_to_cpu(new_bc->overall_shared_limit);
+
+ /* fetch the current values */
+ get_buffer_control(dd, &cur_bc, &cur_total);
+
+ /*
+ * Create the masks we will use.
+ */
+ memset(changing, 0, sizeof(changing));
+ memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
+ /*
+ * NOTE: Assumes that the individual VL bits are adjacent and in
+ * increasing order
+ */
+ stat_mask =
+ SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
+ changing_mask = 0;
+ ld_mask = 0;
+ change_count = 0;
+ any_shared_limit_changing = 0;
+ for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
+ if (!valid_vl(i))
+ continue;
+ this_shared_changing = new_bc->vl[i].shared
+ != cur_bc.vl[i].shared;
+ if (this_shared_changing)
+ any_shared_limit_changing = 1;
+ if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
+ this_shared_changing) {
+ changing[i] = 1;
+ changing_mask |= stat_mask;
+ change_count++;
+ }
+ if (be16_to_cpu(new_bc->vl[i].dedicated) <
+ be16_to_cpu(cur_bc.vl[i].dedicated)) {
+ lowering_dedicated[i] = 1;
+ ld_mask |= stat_mask;
+ }
+ }
+
+ /* bracket the credit change with a total adjustment */
+ if (new_total > cur_total)
+ set_global_limit(dd, new_total);
+
+ /*
+ * Start the credit change algorithm.
+ */
+ use_all_mask = 0;
+ if ((be16_to_cpu(new_bc->overall_shared_limit) <
+ be16_to_cpu(cur_bc.overall_shared_limit)) ||
+ (is_ax(dd) && any_shared_limit_changing)) {
+ set_global_shared(dd, 0);
+ cur_bc.overall_shared_limit = 0;
+ use_all_mask = 1;
+ }
+
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (changing[i]) {
+ set_vl_shared(dd, i, 0);
+ cur_bc.vl[i].shared = 0;
+ }
+ }
+
+ wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
+ "shared");
+
+ if (change_count > 0) {
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (lowering_dedicated[i]) {
+ set_vl_dedicated(dd, i,
+ be16_to_cpu(new_bc->
+ vl[i].dedicated));
+ cur_bc.vl[i].dedicated =
+ new_bc->vl[i].dedicated;
+ }
+ }
+
+ wait_for_vl_status_clear(dd, ld_mask, "dedicated");
+
+ /* now raise all dedicated that are going up */
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (be16_to_cpu(new_bc->vl[i].dedicated) >
+ be16_to_cpu(cur_bc.vl[i].dedicated))
+ set_vl_dedicated(dd, i,
+ be16_to_cpu(new_bc->
+ vl[i].dedicated));
+ }
+ }
+
+ /* next raise all shared that are going up */
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (be16_to_cpu(new_bc->vl[i].shared) >
+ be16_to_cpu(cur_bc.vl[i].shared))
+ set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
+ }
+
+ /* finally raise the global shared */
+ if (be16_to_cpu(new_bc->overall_shared_limit) >
+ be16_to_cpu(cur_bc.overall_shared_limit))
+ set_global_shared(dd,
+ be16_to_cpu(new_bc->overall_shared_limit));
+
+ /* bracket the credit change with a total adjustment */
+ if (new_total < cur_total)
+ set_global_limit(dd, new_total);
+
+ /*
+ * Determine the actual number of operational VLS using the number of
+ * dedicated and shared credits for each VL.
+ */
+ if (change_count > 0) {
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
+ be16_to_cpu(new_bc->vl[i].shared) > 0)
+ vl_count++;
+ ppd->actual_vls_operational = vl_count;
+ ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
+ ppd->actual_vls_operational :
+ ppd->vls_operational,
+ NULL);
+ if (ret == 0)
+ ret = pio_map_init(dd, ppd->port - 1, vl_count ?
+ ppd->actual_vls_operational :
+ ppd->vls_operational, NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Read the given fabric manager table. Return the size of the
+ * table (in bytes) on success, and a negative error code on
+ * failure.
+ */
+int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
+
+{
+ int size;
+ struct vl_arb_cache *vlc;
+
+ switch (which) {
+ case FM_TBL_VL_HIGH_ARB:
+ size = 256;
+ /*
+ * OPA specifies 128 elements (of 2 bytes each), though
+ * HFI supports only 16 elements in h/w.
+ */
+ vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
+ vl_arb_get_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
+ break;
+ case FM_TBL_VL_LOW_ARB:
+ size = 256;
+ /*
+ * OPA specifies 128 elements (of 2 bytes each), though
+ * HFI supports only 16 elements in h/w.
+ */
+ vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
+ vl_arb_get_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
+ break;
+ case FM_TBL_BUFFER_CONTROL:
+ size = get_buffer_control(ppd->dd, t, NULL);
+ break;
+ case FM_TBL_SC2VLNT:
+ size = get_sc2vlnt(ppd->dd, t);
+ break;
+ case FM_TBL_VL_PREEMPT_ELEMS:
+ size = 256;
+ /* OPA specifies 128 elements, of 2 bytes each */
+ get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
+ break;
+ case FM_TBL_VL_PREEMPT_MATRIX:
+ size = 256;
+ /*
+ * OPA specifies that this is the same size as the VL
+ * arbitration tables (i.e., 256 bytes).
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+ return size;
+}
+
+/*
+ * Write the given fabric manager table.
+ */
+int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
+{
+ int ret = 0;
+ struct vl_arb_cache *vlc;
+
+ switch (which) {
+ case FM_TBL_VL_HIGH_ARB:
+ vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
+ if (vl_arb_match_cache(vlc, t)) {
+ vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
+ break;
+ }
+ vl_arb_set_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
+ ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
+ VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
+ break;
+ case FM_TBL_VL_LOW_ARB:
+ vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
+ if (vl_arb_match_cache(vlc, t)) {
+ vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
+ break;
+ }
+ vl_arb_set_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
+ ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
+ VL_ARB_LOW_PRIO_TABLE_SIZE, t);
+ break;
+ case FM_TBL_BUFFER_CONTROL:
+ ret = set_buffer_control(ppd, t);
+ break;
+ case FM_TBL_SC2VLNT:
+ set_sc2vlnt(ppd->dd, t);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Disable all data VLs.
+ *
+ * Return 0 if disabled, non-zero if the VLs cannot be disabled.
+ */
+static int disable_data_vls(struct hfi1_devdata *dd)
+{
+ if (is_ax(dd))
+ return 1;
+
+ pio_send_control(dd, PSC_DATA_VL_DISABLE);
+
+ return 0;
+}
+
+/*
+ * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
+ * Just re-enables all data VLs (the "fill" part happens
+ * automatically - the name was chosen for symmetry with
+ * stop_drain_data_vls()).
+ *
+ * Return 0 if successful, non-zero if the VLs cannot be enabled.
+ */
+int open_fill_data_vls(struct hfi1_devdata *dd)
+{
+ if (is_ax(dd))
+ return 1;
+
+ pio_send_control(dd, PSC_DATA_VL_ENABLE);
+
+ return 0;
+}
+
+/*
+ * drain_data_vls() - assumes that disable_data_vls() has been called,
+ * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
+ * engines to drop to 0.
+ */
+static void drain_data_vls(struct hfi1_devdata *dd)
+{
+ sc_wait(dd);
+ sdma_wait(dd);
+ pause_for_credit_return(dd);
+}
+
+/*
+ * stop_drain_data_vls() - disable, then drain all per-VL fifos.
+ *
+ * Use open_fill_data_vls() to resume using data VLs. This pair is
+ * meant to be used like this:
+ *
+ * stop_drain_data_vls(dd);
+ * // do things with per-VL resources
+ * open_fill_data_vls(dd);
+ */
+int stop_drain_data_vls(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = disable_data_vls(dd);
+ if (ret == 0)
+ drain_data_vls(dd);
+
+ return ret;
+}
+
+/*
+ * Convert a nanosecond time to a cclock count. No matter how slow
+ * the cclock, a non-zero ns will always have a non-zero result.
+ */
+u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
+{
+ u32 cclocks;
+
+ if (dd->icode == ICODE_FPGA_EMULATION)
+ cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
+ else /* simulation pretends to be ASIC */
+ cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
+ if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
+ cclocks = 1;
+ return cclocks;
+}
+
+/*
+ * Convert a cclock count to nanoseconds. Not matter how slow
+ * the cclock, a non-zero cclocks will always have a non-zero result.
+ */
+u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
+{
+ u32 ns;
+
+ if (dd->icode == ICODE_FPGA_EMULATION)
+ ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
+ else /* simulation pretends to be ASIC */
+ ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
+ if (cclocks && !ns)
+ ns = 1;
+ return ns;
+}
+
+/*
+ * Dynamically adjust the receive interrupt timeout for a context based on
+ * incoming packet rate.
+ *
+ * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
+ */
+static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 timeout = rcd->rcvavail_timeout;
+
+ /*
+ * This algorithm doubles or halves the timeout depending on whether
+ * the number of packets received in this interrupt were less than or
+ * greater equal the interrupt count.
+ *
+ * The calculations below do not allow a steady state to be achieved.
+ * Only at the endpoints it is possible to have an unchanging
+ * timeout.
+ */
+ if (npkts < rcv_intr_count) {
+ /*
+ * Not enough packets arrived before the timeout, adjust
+ * timeout downward.
+ */
+ if (timeout < 2) /* already at minimum? */
+ return;
+ timeout >>= 1;
+ } else {
+ /*
+ * More than enough packets arrived before the timeout, adjust
+ * timeout upward.
+ */
+ if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
+ return;
+ timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
+ }
+
+ rcd->rcvavail_timeout = timeout;
+ /*
+ * timeout cannot be larger than rcv_intr_timeout_csr which has already
+ * been verified to be in range
+ */
+ write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
+ (u64)timeout <<
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
+}
+
+void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
+ u32 intr_adjust, u32 npkts)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u64 reg;
+ u32 ctxt = rcd->ctxt;
+
+ /*
+ * Need to write timeout register before updating RcvHdrHead to ensure
+ * that a new value is used when the HW decides to restart counting.
+ */
+ if (intr_adjust)
+ adjust_rcv_timeout(rcd, npkts);
+ if (updegr) {
+ reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
+ << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
+ write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
+ }
+ reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
+ (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
+ << RCV_HDR_HEAD_HEAD_SHIFT);
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
+}
+
+u32 hdrqempty(struct hfi1_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
+ & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
+
+ if (hfi1_rcvhdrtail_kvaddr(rcd))
+ tail = get_rcvhdrtail(rcd);
+ else
+ tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
+
+ return head == tail;
+}
+
+/*
+ * Context Control and Receive Array encoding for buffer size:
+ * 0x0 invalid
+ * 0x1 4 KB
+ * 0x2 8 KB
+ * 0x3 16 KB
+ * 0x4 32 KB
+ * 0x5 64 KB
+ * 0x6 128 KB
+ * 0x7 256 KB
+ * 0x8 512 KB (Receive Array only)
+ * 0x9 1 MB (Receive Array only)
+ * 0xa 2 MB (Receive Array only)
+ *
+ * 0xB-0xF - reserved (Receive Array only)
+ *
+ *
+ * This routine assumes that the value has already been sanity checked.
+ */
+static u32 encoded_size(u32 size)
+{
+ switch (size) {
+ case 4 * 1024: return 0x1;
+ case 8 * 1024: return 0x2;
+ case 16 * 1024: return 0x3;
+ case 32 * 1024: return 0x4;
+ case 64 * 1024: return 0x5;
+ case 128 * 1024: return 0x6;
+ case 256 * 1024: return 0x7;
+ case 512 * 1024: return 0x8;
+ case 1 * 1024 * 1024: return 0x9;
+ case 2 * 1024 * 1024: return 0xa;
+ }
+ return 0x1; /* if invalid, go with the minimum size */
+}
+
+/**
+ * encode_rcv_header_entry_size - return chip specific encoding for size
+ * @size: size in dwords
+ *
+ * Convert a receive header entry size that to the encoding used in the CSR.
+ *
+ * Return a zero if the given size is invalid, otherwise the encoding.
+ */
+u8 encode_rcv_header_entry_size(u8 size)
+{
+ /* there are only 3 valid receive header entry sizes */
+ if (size == 2)
+ return 1;
+ if (size == 16)
+ return 2;
+ if (size == 32)
+ return 4;
+ return 0; /* invalid */
+}
+
+/**
+ * hfi1_validate_rcvhdrcnt - validate hdrcnt
+ * @dd: the device data
+ * @thecnt: the header count
+ */
+int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
+{
+ if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+ dd_dev_err(dd, "Receive header queue count too small\n");
+ return -EINVAL;
+ }
+
+ if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+ dd_dev_err(dd,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
+ return -EINVAL;
+ }
+
+ if (thecnt % HDRQ_INCREMENT) {
+ dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * set_hdrq_regs - set header queue registers for context
+ * @dd: the device data
+ * @ctxt: the context
+ * @entsize: the dword entry size
+ * @hdrcnt: the number of header entries
+ */
+void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
+{
+ u64 reg;
+
+ reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
+ RCV_HDR_CNT_CNT_SHIFT;
+ write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
+ reg = ((u64)encode_rcv_header_entry_size(entsize) &
+ RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
+ RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
+ write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
+ reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
+ RCV_HDR_SIZE_HDR_SIZE_SHIFT;
+ write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
+
+ /*
+ * Program dummy tail address for every receive context
+ * before enabling any receive context
+ */
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ dd->rcvhdrtail_dummy_dma);
+}
+
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
+ struct hfi1_ctxtdata *rcd)
+{
+ u64 rcvctrl, reg;
+ int did_enable = 0;
+ u16 ctxt;
+
+ if (!rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
+
+ rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
+ /* if the context already enabled, don't do the extra steps */
+ if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
+ !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
+ /* reset the tail and hdr addresses, and sequence count */
+ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
+ rcd->rcvhdrq_dma);
+ if (hfi1_rcvhdrtail_kvaddr(rcd))
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ rcd->rcvhdrqtailaddr_dma);
+ hfi1_set_seq_cnt(rcd, 1);
+
+ /* reset the cached receive header queue head value */
+ hfi1_set_rcd_head(rcd, 0);
+
+ /*
+ * Zero the receive header queue so we don't get false
+ * positives when checking the sequence number. The
+ * sequence numbers could land exactly on the same spot.
+ * E.g. a rcd restart before the receive header wrapped.
+ */
+ memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
+
+ /* starting timeout */
+ rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
+
+ /* enable the context */
+ rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
+
+ /* clean the egr buffer size first */
+ rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
+ rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
+ & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
+ << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
+
+ /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
+ did_enable = 1;
+
+ /* zero RcvEgrIndexHead */
+ write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
+
+ /* set eager count and base index */
+ reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
+ & RCV_EGR_CTRL_EGR_CNT_MASK)
+ << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
+ (((rcd->eager_base >> RCV_SHIFT)
+ & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
+ << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
+ write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
+
+ /*
+ * Set TID (expected) count and base index.
+ * rcd->expected_count is set to individual RcvArray entries,
+ * not pairs, and the CSR takes a pair-count in groups of
+ * four, so divide by 8.
+ */
+ reg = (((rcd->expected_count >> RCV_SHIFT)
+ & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
+ << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
+ (((rcd->expected_base >> RCV_SHIFT)
+ & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
+ << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
+ write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
+ if (ctxt == HFI1_CTRL_CTXT)
+ write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
+ }
+ if (op & HFI1_RCVCTRL_CTXT_DIS) {
+ write_csr(dd, RCV_VL15, 0);
+ /*
+ * When receive context is being disabled turn on tail
+ * update with a dummy tail address and then disable
+ * receive context.
+ */
+ if (dd->rcvhdrtail_dummy_dma) {
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ dd->rcvhdrtail_dummy_dma);
+ /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
+ rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ }
+
+ rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
+ }
+ if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, true);
+ rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ }
+ if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, false);
+ rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ }
+ if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
+ rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
+ /* See comment on RcvCtxtCtrl.TailUpd above */
+ if (!(op & HFI1_RCVCTRL_CTXT_DIS))
+ rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ }
+ if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
+ rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
+ if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
+ if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
+ /*
+ * In one-packet-per-eager mode, the size comes from
+ * the RcvArray entry.
+ */
+ rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
+ rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
+ }
+ if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
+ if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
+ rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
+ rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_URGENT_ENB)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, true);
+ if (op & HFI1_RCVCTRL_URGENT_DIS)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, false);
+
+ hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx", ctxt, rcvctrl);
+ write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
+
+ /* work around sticky RcvCtxtStatus.BlockedRHQFull */
+ if (did_enable &&
+ (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
+ reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
+ if (reg != 0) {
+ dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
+ ctxt, reg);
+ read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
+ read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
+ reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
+ dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
+ ctxt, reg, reg == 0 ? "not" : "still");
+ }
+ }
+
+ if (did_enable) {
+ /*
+ * The interrupt timeout and count must be set after
+ * the context is enabled to take effect.
+ */
+ /* set interrupt timeout */
+ write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
+ (u64)rcd->rcvavail_timeout <<
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
+
+ /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
+ reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
+ }
+
+ if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
+ /*
+ * If the context has been disabled and the Tail Update has
+ * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
+ * so it doesn't contain an address that is invalid.
+ */
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ dd->rcvhdrtail_dummy_dma);
+}
+
+u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
+{
+ int ret;
+ u64 val = 0;
+
+ if (namep) {
+ ret = dd->cntrnameslen;
+ *namep = dd->cntrnames;
+ } else {
+ const struct cntr_entry *entry;
+ int i, j;
+
+ ret = (dd->ndevcntrs) * sizeof(u64);
+
+ /* Get the start of the block of counters */
+ *cntrp = dd->cntrs;
+
+ /*
+ * Now go and fill in each counter in the block.
+ */
+ for (i = 0; i < DEV_CNTR_LAST; i++) {
+ entry = &dev_cntrs[i];
+ hfi1_cdbg(CNTR, "reading %s", entry->name);
+ if (entry->flags & CNTR_DISABLED) {
+ /* Nothing */
+ hfi1_cdbg(CNTR, "\tDisabled");
+ } else {
+ if (entry->flags & CNTR_VL) {
+ hfi1_cdbg(CNTR, "\tPer VL");
+ for (j = 0; j < C_VL_COUNT; j++) {
+ val = entry->rw_cntr(entry,
+ dd, j,
+ CNTR_MODE_R,
+ 0);
+ hfi1_cdbg(
+ CNTR,
+ "\t\tRead 0x%llx for %d",
+ val, j);
+ dd->cntrs[entry->offset + j] =
+ val;
+ }
+ } else if (entry->flags & CNTR_SDMA) {
+ hfi1_cdbg(CNTR,
+ "\t Per SDMA Engine");
+ for (j = 0; j < chip_sdma_engines(dd);
+ j++) {
+ val =
+ entry->rw_cntr(entry, dd, j,
+ CNTR_MODE_R, 0);
+ hfi1_cdbg(CNTR,
+ "\t\tRead 0x%llx for %d",
+ val, j);
+ dd->cntrs[entry->offset + j] =
+ val;
+ }
+ } else {
+ val = entry->rw_cntr(entry, dd,
+ CNTR_INVALID_VL,
+ CNTR_MODE_R, 0);
+ dd->cntrs[entry->offset] = val;
+ hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * Used by sysfs to create files for hfi stats to read
+ */
+u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
+{
+ int ret;
+ u64 val = 0;
+
+ if (namep) {
+ ret = ppd->dd->portcntrnameslen;
+ *namep = ppd->dd->portcntrnames;
+ } else {
+ const struct cntr_entry *entry;
+ int i, j;
+
+ ret = ppd->dd->nportcntrs * sizeof(u64);
+ *cntrp = ppd->cntrs;
+
+ for (i = 0; i < PORT_CNTR_LAST; i++) {
+ entry = &port_cntrs[i];
+ hfi1_cdbg(CNTR, "reading %s", entry->name);
+ if (entry->flags & CNTR_DISABLED) {
+ /* Nothing */
+ hfi1_cdbg(CNTR, "\tDisabled");
+ continue;
+ }
+
+ if (entry->flags & CNTR_VL) {
+ hfi1_cdbg(CNTR, "\tPer VL");
+ for (j = 0; j < C_VL_COUNT; j++) {
+ val = entry->rw_cntr(entry, ppd, j,
+ CNTR_MODE_R,
+ 0);
+ hfi1_cdbg(
+ CNTR,
+ "\t\tRead 0x%llx for %d",
+ val, j);
+ ppd->cntrs[entry->offset + j] = val;
+ }
+ } else {
+ val = entry->rw_cntr(entry, ppd,
+ CNTR_INVALID_VL,
+ CNTR_MODE_R,
+ 0);
+ ppd->cntrs[entry->offset] = val;
+ hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
+ }
+ }
+ }
+ return ret;
+}
+
+static void free_cntrs(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+
+ if (dd->synth_stats_timer.function)
+ timer_delete_sync(&dd->synth_stats_timer);
+ cancel_work_sync(&dd->update_cntr_work);
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ kfree(ppd->cntrs);
+ kfree(ppd->scntrs);
+ free_percpu(ppd->ibport_data.rvp.rc_acks);
+ free_percpu(ppd->ibport_data.rvp.rc_qacks);
+ free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
+ ppd->cntrs = NULL;
+ ppd->scntrs = NULL;
+ ppd->ibport_data.rvp.rc_acks = NULL;
+ ppd->ibport_data.rvp.rc_qacks = NULL;
+ ppd->ibport_data.rvp.rc_delayed_comp = NULL;
+ }
+ kfree(dd->portcntrnames);
+ dd->portcntrnames = NULL;
+ kfree(dd->cntrs);
+ dd->cntrs = NULL;
+ kfree(dd->scntrs);
+ dd->scntrs = NULL;
+ kfree(dd->cntrnames);
+ dd->cntrnames = NULL;
+ if (dd->update_cntr_wq) {
+ destroy_workqueue(dd->update_cntr_wq);
+ dd->update_cntr_wq = NULL;
+ }
+}
+
+static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
+ u64 *psval, void *context, int vl)
+{
+ u64 val;
+ u64 sval = *psval;
+
+ if (entry->flags & CNTR_DISABLED) {
+ dd_dev_err(dd, "Counter %s not enabled", entry->name);
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
+
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
+
+ /* If its a synthetic counter there is more work we need to do */
+ if (entry->flags & CNTR_SYNTH) {
+ if (sval == CNTR_MAX) {
+ /* No need to read already saturated */
+ return CNTR_MAX;
+ }
+
+ if (entry->flags & CNTR_32BIT) {
+ /* 32bit counters can wrap multiple times */
+ u64 upper = sval >> 32;
+ u64 lower = (sval << 32) >> 32;
+
+ if (lower > val) { /* hw wrapped */
+ if (upper == CNTR_32BIT_MAX)
+ val = CNTR_MAX;
+ else
+ upper++;
+ }
+
+ if (val != CNTR_MAX)
+ val = (upper << 32) | val;
+
+ } else {
+ /* If we rolled we are saturated */
+ if ((val < sval) || (val > CNTR_MAX))
+ val = CNTR_MAX;
+ }
+ }
+
+ *psval = val;
+
+ hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
+
+ return val;
+}
+
+static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
+ struct cntr_entry *entry,
+ u64 *psval, void *context, int vl, u64 data)
+{
+ u64 val;
+
+ if (entry->flags & CNTR_DISABLED) {
+ dd_dev_err(dd, "Counter %s not enabled", entry->name);
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
+
+ if (entry->flags & CNTR_SYNTH) {
+ *psval = data;
+ if (entry->flags & CNTR_32BIT) {
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
+ (data << 32) >> 32);
+ val = data; /* return the full 64bit value */
+ } else {
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
+ data);
+ }
+ } else {
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
+ }
+
+ *psval = val;
+
+ hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
+
+ return val;
+}
+
+u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &dev_cntrs[index];
+ sval = dd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ return read_dev_port_cntr(dd, entry, sval, dd, vl);
+}
+
+u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &dev_cntrs[index];
+ sval = dd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
+}
+
+u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &port_cntrs[index];
+ sval = ppd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
+ (index <= C_RCV_HDR_OVF_LAST)) {
+ /* We do not want to bother for disabled contexts */
+ return 0;
+ }
+
+ return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
+}
+
+u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &port_cntrs[index];
+ sval = ppd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
+ (index <= C_RCV_HDR_OVF_LAST)) {
+ /* We do not want to bother for disabled contexts */
+ return 0;
+ }
+
+ return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
+}
+
+static void do_update_synth_timer(struct work_struct *work)
+{
+ u64 cur_tx;
+ u64 cur_rx;
+ u64 total_flits;
+ u8 update = 0;
+ int i, j, vl;
+ struct hfi1_pportdata *ppd;
+ struct cntr_entry *entry;
+ struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
+ update_cntr_work);
+
+ /*
+ * Rather than keep beating on the CSRs pick a minimal set that we can
+ * check to watch for potential roll over. We can do this by looking at
+ * the number of flits sent/recv. If the total flits exceeds 32bits then
+ * we have to iterate all the counters and update.
+ */
+ entry = &dev_cntrs[C_DC_RCV_FLITS];
+ cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
+
+ entry = &dev_cntrs[C_DC_XMIT_FLITS];
+ cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
+
+ hfi1_cdbg(
+ CNTR,
+ "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx",
+ dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
+
+ if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
+ /*
+ * May not be strictly necessary to update but it won't hurt and
+ * simplifies the logic here.
+ */
+ update = 1;
+ hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
+ dd->unit);
+ } else {
+ total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
+ hfi1_cdbg(CNTR,
+ "[%d] total flits 0x%llx limit 0x%llx", dd->unit,
+ total_flits, (u64)CNTR_32BIT_MAX);
+ if (total_flits >= CNTR_32BIT_MAX) {
+ hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
+ dd->unit);
+ update = 1;
+ }
+ }
+
+ if (update) {
+ hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
+ for (i = 0; i < DEV_CNTR_LAST; i++) {
+ entry = &dev_cntrs[i];
+ if (entry->flags & CNTR_VL) {
+ for (vl = 0; vl < C_VL_COUNT; vl++)
+ read_dev_cntr(dd, i, vl);
+ } else {
+ read_dev_cntr(dd, i, CNTR_INVALID_VL);
+ }
+ }
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ for (j = 0; j < PORT_CNTR_LAST; j++) {
+ entry = &port_cntrs[j];
+ if (entry->flags & CNTR_VL) {
+ for (vl = 0; vl < C_VL_COUNT; vl++)
+ read_port_cntr(ppd, j, vl);
+ } else {
+ read_port_cntr(ppd, j, CNTR_INVALID_VL);
+ }
+ }
+ }
+
+ /*
+ * We want the value in the register. The goal is to keep track
+ * of the number of "ticks" not the counter value. In other
+ * words if the register rolls we want to notice it and go ahead
+ * and force an update.
+ */
+ entry = &dev_cntrs[C_DC_XMIT_FLITS];
+ dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
+ CNTR_MODE_R, 0);
+
+ entry = &dev_cntrs[C_DC_RCV_FLITS];
+ dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
+ CNTR_MODE_R, 0);
+
+ hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
+ dd->unit, dd->last_tx, dd->last_rx);
+
+ } else {
+ hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
+ }
+}
+
+static void update_synth_timer(struct timer_list *t)
+{
+ struct hfi1_devdata *dd = timer_container_of(dd, t, synth_stats_timer);
+
+ queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
+ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+}
+
+#define C_MAX_NAME 16 /* 15 chars + one for /0 */
+static int init_cntrs(struct hfi1_devdata *dd)
+{
+ int i, rcv_ctxts, j;
+ size_t sz;
+ char *p;
+ char name[C_MAX_NAME];
+ struct hfi1_pportdata *ppd;
+ const char *bit_type_32 = ",32";
+ const int bit_type_32_sz = strlen(bit_type_32);
+ u32 sdma_engines = chip_sdma_engines(dd);
+
+ /* set up the stats timer; the add_timer is done at the end */
+ timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
+
+ /***********************/
+ /* per device counters */
+ /***********************/
+
+ /* size names and determine how many we have*/
+ dd->ndevcntrs = 0;
+ sz = 0;
+
+ for (i = 0; i < DEV_CNTR_LAST; i++) {
+ if (dev_cntrs[i].flags & CNTR_DISABLED) {
+ hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
+ continue;
+ }
+
+ if (dev_cntrs[i].flags & CNTR_VL) {
+ dev_cntrs[i].offset = dd->ndevcntrs;
+ for (j = 0; j < C_VL_COUNT; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name, vl_from_idx(j));
+ sz += strlen(name);
+ /* Add ",32" for 32-bit counters */
+ if (dev_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
+ sz++;
+ dd->ndevcntrs++;
+ }
+ } else if (dev_cntrs[i].flags & CNTR_SDMA) {
+ dev_cntrs[i].offset = dd->ndevcntrs;
+ for (j = 0; j < sdma_engines; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name, j);
+ sz += strlen(name);
+ /* Add ",32" for 32-bit counters */
+ if (dev_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
+ sz++;
+ dd->ndevcntrs++;
+ }
+ } else {
+ /* +1 for newline. */
+ sz += strlen(dev_cntrs[i].name) + 1;
+ /* Add ",32" for 32-bit counters */
+ if (dev_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
+ dev_cntrs[i].offset = dd->ndevcntrs;
+ dd->ndevcntrs++;
+ }
+ }
+
+ /* allocate space for the counter values */
+ dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
+ GFP_KERNEL);
+ if (!dd->cntrs)
+ goto bail;
+
+ dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
+ if (!dd->scntrs)
+ goto bail;
+
+ /* allocate space for the counter names */
+ dd->cntrnameslen = sz;
+ dd->cntrnames = kmalloc(sz, GFP_KERNEL);
+ if (!dd->cntrnames)
+ goto bail;
+
+ /* fill in the names */
+ for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
+ if (dev_cntrs[i].flags & CNTR_DISABLED) {
+ /* Nothing */
+ } else if (dev_cntrs[i].flags & CNTR_VL) {
+ for (j = 0; j < C_VL_COUNT; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name,
+ vl_from_idx(j));
+ memcpy(p, name, strlen(name));
+ p += strlen(name);
+
+ /* Counter is 32 bits */
+ if (dev_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
+ *p++ = '\n';
+ }
+ } else if (dev_cntrs[i].flags & CNTR_SDMA) {
+ for (j = 0; j < sdma_engines; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name, j);
+ memcpy(p, name, strlen(name));
+ p += strlen(name);
+
+ /* Counter is 32 bits */
+ if (dev_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
+ *p++ = '\n';
+ }
+ } else {
+ memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
+ p += strlen(dev_cntrs[i].name);
+
+ /* Counter is 32 bits */
+ if (dev_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
+ *p++ = '\n';
+ }
+ }
+
+ /*********************/
+ /* per port counters */
+ /*********************/
+
+ /*
+ * Go through the counters for the overflows and disable the ones we
+ * don't need. This varies based on platform so we need to do it
+ * dynamically here.
+ */
+ rcv_ctxts = dd->num_rcv_contexts;
+ for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
+ i <= C_RCV_HDR_OVF_LAST; i++) {
+ port_cntrs[i].flags |= CNTR_DISABLED;
+ }
+
+ /* size port counter names and determine how many we have*/
+ sz = 0;
+ dd->nportcntrs = 0;
+ for (i = 0; i < PORT_CNTR_LAST; i++) {
+ if (port_cntrs[i].flags & CNTR_DISABLED) {
+ hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
+ continue;
+ }
+
+ if (port_cntrs[i].flags & CNTR_VL) {
+ port_cntrs[i].offset = dd->nportcntrs;
+ for (j = 0; j < C_VL_COUNT; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ port_cntrs[i].name, vl_from_idx(j));
+ sz += strlen(name);
+ /* Add ",32" for 32-bit counters */
+ if (port_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
+ sz++;
+ dd->nportcntrs++;
+ }
+ } else {
+ /* +1 for newline */
+ sz += strlen(port_cntrs[i].name) + 1;
+ /* Add ",32" for 32-bit counters */
+ if (port_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
+ port_cntrs[i].offset = dd->nportcntrs;
+ dd->nportcntrs++;
+ }
+ }
+
+ /* allocate space for the counter names */
+ dd->portcntrnameslen = sz;
+ dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
+ if (!dd->portcntrnames)
+ goto bail;
+
+ /* fill in port cntr names */
+ for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
+ if (port_cntrs[i].flags & CNTR_DISABLED)
+ continue;
+
+ if (port_cntrs[i].flags & CNTR_VL) {
+ for (j = 0; j < C_VL_COUNT; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ port_cntrs[i].name, vl_from_idx(j));
+ memcpy(p, name, strlen(name));
+ p += strlen(name);
+
+ /* Counter is 32 bits */
+ if (port_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
+ *p++ = '\n';
+ }
+ } else {
+ memcpy(p, port_cntrs[i].name,
+ strlen(port_cntrs[i].name));
+ p += strlen(port_cntrs[i].name);
+
+ /* Counter is 32 bits */
+ if (port_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
+ *p++ = '\n';
+ }
+ }
+
+ /* allocate per port storage for counter values */
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
+ if (!ppd->cntrs)
+ goto bail;
+
+ ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
+ if (!ppd->scntrs)
+ goto bail;
+ }
+
+ /* CPU counters need to be allocated and zeroed */
+ if (init_cpu_counters(dd))
+ goto bail;
+
+ dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
+ WQ_MEM_RECLAIM, dd->unit);
+ if (!dd->update_cntr_wq)
+ goto bail;
+
+ INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
+
+ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+ return 0;
+bail:
+ free_cntrs(dd);
+ return -ENOMEM;
+}
+
+static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
+{
+ switch (chip_lstate) {
+ case LSTATE_DOWN:
+ return IB_PORT_DOWN;
+ case LSTATE_INIT:
+ return IB_PORT_INIT;
+ case LSTATE_ARMED:
+ return IB_PORT_ARMED;
+ case LSTATE_ACTIVE:
+ return IB_PORT_ACTIVE;
+ default:
+ dd_dev_err(dd,
+ "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
+ chip_lstate);
+ return IB_PORT_DOWN;
+ }
+}
+
+u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
+{
+ /* look at the HFI meta-states only */
+ switch (chip_pstate & 0xf0) {
+ case PLS_DISABLED:
+ return IB_PORTPHYSSTATE_DISABLED;
+ case PLS_OFFLINE:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case PLS_POLLING:
+ return IB_PORTPHYSSTATE_POLLING;
+ case PLS_CONFIGPHY:
+ return IB_PORTPHYSSTATE_TRAINING;
+ case PLS_LINKUP:
+ return IB_PORTPHYSSTATE_LINKUP;
+ case PLS_PHYTEST:
+ return IB_PORTPHYSSTATE_PHY_TEST;
+ default:
+ dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
+ chip_pstate);
+ return IB_PORTPHYSSTATE_DISABLED;
+ }
+}
+
+/* return the OPA port physical state name */
+const char *opa_pstate_name(u32 pstate)
+{
+ static const char * const port_physical_names[] = {
+ "PHYS_NOP",
+ "reserved1",
+ "PHYS_POLL",
+ "PHYS_DISABLED",
+ "PHYS_TRAINING",
+ "PHYS_LINKUP",
+ "PHYS_LINK_ERR_RECOVER",
+ "PHYS_PHY_TEST",
+ "reserved8",
+ "PHYS_OFFLINE",
+ "PHYS_GANGED",
+ "PHYS_TEST",
+ };
+ if (pstate < ARRAY_SIZE(port_physical_names))
+ return port_physical_names[pstate];
+ return "unknown";
+}
+
+/**
+ * update_statusp - Update userspace status flag
+ * @ppd: Port data structure
+ * @state: port state information
+ *
+ * Actual port status is determined by the host_link_state value
+ * in the ppd.
+ *
+ * host_link_state MUST be updated before updating the user space
+ * statusp.
+ */
+static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
+{
+ /*
+ * Set port status flags in the page mapped into userspace
+ * memory. Do it here to ensure a reliable state - this is
+ * the only function called by all state handling code.
+ * Always set the flags due to the fact that the cache value
+ * might have been changed explicitly outside of this
+ * function.
+ */
+ if (ppd->statusp) {
+ switch (state) {
+ case IB_PORT_DOWN:
+ case IB_PORT_INIT:
+ *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
+ HFI1_STATUS_IB_READY);
+ break;
+ case IB_PORT_ARMED:
+ *ppd->statusp |= HFI1_STATUS_IB_CONF;
+ break;
+ case IB_PORT_ACTIVE:
+ *ppd->statusp |= HFI1_STATUS_IB_READY;
+ break;
+ }
+ }
+}
+
+/**
+ * wait_logical_linkstate - wait for an IB link state change to occur
+ * @ppd: port device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for IB link state change to occur.
+ * For now, take the easy polling route.
+ * Returns 0 if state reached, otherwise -ETIMEDOUT.
+ */
+static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs)
+{
+ unsigned long timeout;
+ u32 new_state;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ new_state = chip_to_opa_lstate(ppd->dd,
+ read_logical_state(ppd->dd));
+ if (new_state == state)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for link state 0x%x\n",
+ state);
+ return -ETIMEDOUT;
+ }
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
+{
+ u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
+
+ dd_dev_info(ppd->dd,
+ "physical state changed to %s (0x%x), phy 0x%x\n",
+ opa_pstate_name(ib_pstate), ib_pstate, state);
+}
+
+/*
+ * Read the physical hardware link state and check if it matches host
+ * drivers anticipated state.
+ */
+static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
+{
+ u32 read_state = read_physical_state(ppd->dd);
+
+ if (read_state == state) {
+ log_state_transition(ppd, state);
+ } else {
+ dd_dev_err(ppd->dd,
+ "anticipated phy link state 0x%x, read 0x%x\n",
+ state, read_state);
+ }
+}
+
+/*
+ * wait_physical_linkstate - wait for an physical link state change to occur
+ * @ppd: port device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for physical link state change to occur.
+ * Returns 0 if state reached, otherwise -ETIMEDOUT.
+ */
+static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs)
+{
+ u32 read_state;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ read_state = read_physical_state(ppd->dd);
+ if (read_state == state)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for phy link state 0x%x\n",
+ state);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ log_state_transition(ppd, state);
+ return 0;
+}
+
+/*
+ * wait_phys_link_offline_quiet_substates - wait for any offline substate
+ * @ppd: port device
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for any offline physical link
+ * state change to occur.
+ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
+ */
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+ int msecs)
+{
+ u32 read_state;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ read_state = read_physical_state(ppd->dd);
+ if ((read_state & 0xF0) == PLS_OFFLINE)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
+ read_state, msecs);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ log_state_transition(ppd, read_state);
+ return read_state;
+}
+
+/*
+ * wait_phys_link_out_of_offline - wait for any out of offline state
+ * @ppd: port device
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for any out of offline physical link
+ * state change to occur.
+ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
+ */
+static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
+ int msecs)
+{
+ u32 read_state;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ read_state = read_physical_state(ppd->dd);
+ if ((read_state & 0xF0) != PLS_OFFLINE)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
+ read_state, msecs);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ log_state_transition(ppd, read_state);
+ return read_state;
+}
+
+#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
+(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
+
+#define SET_STATIC_RATE_CONTROL_SMASK(r) \
+(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
+
+void hfi1_init_ctxt(struct send_context *sc)
+{
+ if (sc) {
+ struct hfi1_devdata *dd = sc->dd;
+ u64 reg;
+ u8 set = (sc->type == SC_USER ?
+ HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
+ HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
+ reg = read_kctxt_csr(dd, sc->hw_context,
+ SEND_CTXT_CHECK_ENABLE);
+ if (set)
+ CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
+ else
+ SET_STATIC_RATE_CONTROL_SMASK(reg);
+ write_kctxt_csr(dd, sc->hw_context,
+ SEND_CTXT_CHECK_ENABLE, reg);
+ }
+}
+
+int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
+{
+ int ret = 0;
+ u64 reg;
+
+ if (dd->icode != ICODE_RTL_SILICON) {
+ if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
+ dd_dev_info(dd, "%s: tempsense not supported by HW\n",
+ __func__);
+ return -EINVAL;
+ }
+ reg = read_csr(dd, ASIC_STS_THERM);
+ temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
+ ASIC_STS_THERM_CURR_TEMP_MASK);
+ temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
+ ASIC_STS_THERM_LO_TEMP_MASK);
+ temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
+ ASIC_STS_THERM_HI_TEMP_MASK);
+ temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
+ ASIC_STS_THERM_CRIT_TEMP_MASK);
+ /* triggers is a 3-bit value - 1 bit per trigger. */
+ temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
+
+ return ret;
+}
+
+/* ========================================================================= */
+
+/**
+ * read_mod_write() - Calculate the IRQ register index and set/clear the bits
+ * @dd: valid devdata
+ * @src: IRQ source to determine register index from
+ * @bits: the bits to set or clear
+ * @set: true == set the bits, false == clear the bits
+ *
+ */
+static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
+ bool set)
+{
+ u64 reg;
+ u16 idx = src / BITS_PER_REGISTER;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->irq_src_lock, flags);
+ reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
+ if (set)
+ reg |= bits;
+ else
+ reg &= ~bits;
+ write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
+ spin_unlock_irqrestore(&dd->irq_src_lock, flags);
+}
+
+/**
+ * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
+ * @dd: valid devdata
+ * @first: first IRQ source to set/clear
+ * @last: last IRQ source (inclusive) to set/clear
+ * @set: true == set the bits, false == clear the bits
+ *
+ * If first == last, set the exact source.
+ */
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
+{
+ u64 bits = 0;
+ u64 bit;
+ u16 src;
+
+ if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
+ return -EINVAL;
+
+ if (last < first)
+ return -ERANGE;
+
+ for (src = first; src <= last; src++) {
+ bit = src % BITS_PER_REGISTER;
+ /* wrapped to next register? */
+ if (!bit && bits) {
+ read_mod_write(dd, src - 1, bits, set);
+ bits = 0;
+ }
+ bits |= BIT_ULL(bit);
+ }
+ read_mod_write(dd, last, bits, set);
+
+ return 0;
+}
+
+/*
+ * Clear all interrupt sources on the chip.
+ */
+static void clear_all_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
+
+ write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
+ for (i = 0; i < chip_send_contexts(dd); i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
+ for (i = 0; i < chip_sdma_engines(dd); i++)
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
+
+ write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
+ write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
+ write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
+}
+
+/*
+ * Remap the interrupt source from the general handler to the given MSI-X
+ * interrupt.
+ */
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
+{
+ u64 reg;
+ int m, n;
+
+ /* clear from the handled mask of the general interrupt */
+ m = isrc / 64;
+ n = isrc % 64;
+ if (likely(m < CCE_NUM_INT_CSRS)) {
+ dd->gi_mask[m] &= ~((u64)1 << n);
+ } else {
+ dd_dev_err(dd, "remap interrupt err\n");
+ return;
+ }
+
+ /* direct the chip source to the given MSI-X interrupt */
+ m = isrc / 8;
+ n = isrc % 8;
+ reg = read_csr(dd, CCE_INT_MAP + (8 * m));
+ reg &= ~((u64)0xff << (8 * n));
+ reg |= ((u64)msix_intr & 0xff) << (8 * n);
+ write_csr(dd, CCE_INT_MAP + (8 * m), reg);
+}
+
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
+{
+ /*
+ * SDMA engine interrupt sources grouped by type, rather than
+ * engine. Per-engine interrupts are as follows:
+ * SDMA
+ * SDMAProgress
+ * SDMAIdle
+ */
+ remap_intr(dd, IS_SDMA_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
+}
+
+/*
+ * Set the general handler to accept all interrupts, remap all
+ * chip interrupts back to MSI-X 0.
+ */
+void reset_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* all interrupts handled by the general handler */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ dd->gi_mask[i] = ~(u64)0;
+
+ /* all chip interrupts map to MSI-X 0 */
+ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
+ write_csr(dd, CCE_INT_MAP + (8 * i), 0);
+}
+
+/**
+ * set_up_interrupts() - Initialize the IRQ resources and state
+ * @dd: valid devdata
+ *
+ */
+static int set_up_interrupts(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ /* mask all interrupts */
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+
+ /* clear all pending interrupts */
+ clear_all_interrupts(dd);
+
+ /* reset general handler mask, chip MSI-X mappings */
+ reset_interrupts(dd);
+
+ /* ask for MSI-X interrupts */
+ ret = msix_initialize(dd);
+ if (ret)
+ return ret;
+
+ ret = msix_request_irqs(dd);
+ if (ret)
+ msix_clean_up_interrupts(dd);
+
+ return ret;
+}
+
+/*
+ * Set up context values in dd. Sets:
+ *
+ * num_rcv_contexts - number of contexts being used
+ * n_krcv_queues - number of kernel contexts
+ * first_dyn_alloc_ctxt - first dynamically allocated context
+ * in array of contexts
+ * freectxts - number of free user contexts
+ * num_send_contexts - number of PIO send contexts being used
+ * num_netdev_contexts - number of contexts reserved for netdev
+ */
+static int set_up_context_variables(struct hfi1_devdata *dd)
+{
+ unsigned long num_kernel_contexts;
+ u16 num_netdev_contexts;
+ int ret;
+ unsigned ngroups;
+ int rmt_count;
+ u32 n_usr_ctxts;
+ u32 send_contexts = chip_send_contexts(dd);
+ u32 rcv_contexts = chip_rcv_contexts(dd);
+
+ /*
+ * Kernel receive contexts:
+ * - Context 0 - control context (VL15/multicast/error)
+ * - Context 1 - first kernel context
+ * - Context 2 - second kernel context
+ * ...
+ */
+ if (n_krcvqs)
+ /*
+ * n_krcvqs is the sum of module parameter kernel receive
+ * contexts, krcvqs[]. It does not include the control
+ * context, so add that.
+ */
+ num_kernel_contexts = n_krcvqs + 1;
+ else
+ num_kernel_contexts = DEFAULT_KRCVQS + 1;
+ /*
+ * Every kernel receive context needs an ACK send context.
+ * one send context is allocated for each VL{0-7} and VL15
+ */
+ if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
+ dd_dev_err(dd,
+ "Reducing # kernel rcv contexts to: %d, from %lu\n",
+ send_contexts - num_vls - 1,
+ num_kernel_contexts);
+ num_kernel_contexts = send_contexts - num_vls - 1;
+ }
+
+ /*
+ * User contexts:
+ * - default to 1 user context per real (non-HT) CPU core if
+ * num_user_contexts is negative
+ */
+ if (num_user_contexts < 0)
+ n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
+ else
+ n_usr_ctxts = num_user_contexts;
+ /*
+ * Adjust the counts given a global max.
+ */
+ if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
+ dd_dev_err(dd,
+ "Reducing # user receive contexts to: %u, from %u\n",
+ (u32)(rcv_contexts - num_kernel_contexts),
+ n_usr_ctxts);
+ /* recalculate */
+ n_usr_ctxts = rcv_contexts - num_kernel_contexts;
+ }
+
+ num_netdev_contexts =
+ hfi1_num_netdev_contexts(dd, rcv_contexts -
+ (num_kernel_contexts + n_usr_ctxts),
+ &node_affinity.real_cpu_mask);
+ /*
+ * RMT entries are allocated as follows:
+ * 1. QOS (0 to 128 entries)
+ * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts +
+ * num_netdev_contexts [b])
+ * 3. netdev (NUM_NETDEV_MAP_ENTRIES)
+ *
+ * Notes:
+ * [a] Kernel contexts (except control) are included in FECN if kernel
+ * TID_RDMA is active.
+ * [b] Netdev and user contexts are randomly allocated from the same
+ * context pool, so FECN must cover all contexts in the pool.
+ */
+ rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL)
+ + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1
+ : 0)
+ + n_usr_ctxts
+ + num_netdev_contexts
+ + NUM_NETDEV_MAP_ENTRIES;
+ if (rmt_count > NUM_MAP_ENTRIES) {
+ int over = rmt_count - NUM_MAP_ENTRIES;
+ /* try to squish user contexts, minimum of 1 */
+ if (over >= n_usr_ctxts) {
+ dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n");
+ return -EINVAL;
+ }
+ dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n",
+ n_usr_ctxts, n_usr_ctxts - over);
+ n_usr_ctxts -= over;
+ }
+
+ /* the first N are kernel contexts, the rest are user/netdev contexts */
+ dd->num_rcv_contexts =
+ num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
+ dd->n_krcv_queues = num_kernel_contexts;
+ dd->first_dyn_alloc_ctxt = num_kernel_contexts;
+ dd->num_netdev_contexts = num_netdev_contexts;
+ dd->num_user_contexts = n_usr_ctxts;
+ dd->freectxts = n_usr_ctxts;
+ dd_dev_info(dd,
+ "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
+ rcv_contexts,
+ (int)dd->num_rcv_contexts,
+ (int)dd->n_krcv_queues,
+ dd->num_netdev_contexts,
+ dd->num_user_contexts);
+
+ /*
+ * Receive array allocation:
+ * All RcvArray entries are divided into groups of 8. This
+ * is required by the hardware and will speed up writes to
+ * consecutive entries by using write-combining of the entire
+ * cacheline.
+ *
+ * The number of groups are evenly divided among all contexts.
+ * any left over groups will be given to the first N user
+ * contexts.
+ */
+ dd->rcv_entries.group_size = RCV_INCREMENT;
+ ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
+ dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
+ dd->rcv_entries.nctxt_extra = ngroups -
+ (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
+ dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
+ dd->rcv_entries.ngroups,
+ dd->rcv_entries.nctxt_extra);
+ if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
+ MAX_EAGER_ENTRIES * 2) {
+ dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
+ dd->rcv_entries.group_size;
+ dd_dev_info(dd,
+ "RcvArray group count too high, change to %u\n",
+ dd->rcv_entries.ngroups);
+ dd->rcv_entries.nctxt_extra = 0;
+ }
+ /*
+ * PIO send contexts
+ */
+ ret = init_sc_pools_and_sizes(dd);
+ if (ret >= 0) { /* success */
+ dd->num_send_contexts = ret;
+ dd_dev_info(
+ dd,
+ "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
+ send_contexts,
+ dd->num_send_contexts,
+ dd->sc_sizes[SC_KERNEL].count,
+ dd->sc_sizes[SC_ACK].count,
+ dd->sc_sizes[SC_USER].count,
+ dd->sc_sizes[SC_VL15].count);
+ ret = 0; /* success */
+ }
+
+ return ret;
+}
+
+/*
+ * Set the device/port partition key table. The MAD code
+ * will ensure that, at least, the partial management
+ * partition key is present in the table.
+ */
+static void set_partition_keys(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg = 0;
+ int i;
+
+ dd_dev_info(dd, "Setting partition keys\n");
+ for (i = 0; i < hfi1_get_npkeys(dd); i++) {
+ reg |= (ppd->pkeys[i] &
+ RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
+ ((i % 4) *
+ RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
+ /* Each register holds 4 PKey values. */
+ if ((i % 4) == 3) {
+ write_csr(dd, RCV_PARTITION_KEY +
+ ((i - 3) * 2), reg);
+ reg = 0;
+ }
+ }
+
+ /* Always enable HW pkeys check when pkeys table is set */
+ add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
+}
+
+/*
+ * These CSRs and memories are uninitialized on reset and must be
+ * written before reading to set the ECC/parity bits.
+ *
+ * NOTE: All user context CSRs that are not mmaped write-only
+ * (e.g. the TID flows) must be initialized even if the driver never
+ * reads them.
+ */
+static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
+{
+ int i, j;
+
+ /* CceIntMap */
+ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
+ write_csr(dd, CCE_INT_MAP + (8 * i), 0);
+
+ /* SendCtxtCreditReturnAddr */
+ for (i = 0; i < chip_send_contexts(dd); i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
+
+ /* PIO Send buffers */
+ /* SDMA Send buffers */
+ /*
+ * These are not normally read, and (presently) have no method
+ * to be read, so are not pre-initialized
+ */
+
+ /* RcvHdrAddr */
+ /* RcvHdrTailAddr */
+ /* RcvTidFlowTable */
+ for (i = 0; i < chip_rcv_contexts(dd); i++) {
+ write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
+ for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
+ write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
+ }
+
+ /* RcvArray */
+ for (i = 0; i < chip_rcv_array_count(dd); i++)
+ hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
+
+ /* RcvQPMapTable */
+ for (i = 0; i < 32; i++)
+ write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
+}
+
+/*
+ * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
+ */
+static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
+ u64 ctrl_bits)
+{
+ unsigned long timeout;
+ u64 reg;
+
+ /* is the condition present? */
+ reg = read_csr(dd, CCE_STATUS);
+ if ((reg & status_bits) == 0)
+ return;
+
+ /* clear the condition */
+ write_csr(dd, CCE_CTRL, ctrl_bits);
+
+ /* wait for the condition to clear */
+ timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, CCE_STATUS);
+ if ((reg & status_bits) == 0)
+ return;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(dd,
+ "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
+ status_bits, reg & status_bits);
+ return;
+ }
+ udelay(1);
+ }
+}
+
+/* set CCE CSRs to chip reset defaults */
+static void reset_cce_csrs(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* CCE_REVISION read-only */
+ /* CCE_REVISION2 read-only */
+ /* CCE_CTRL - bits clear automatically */
+ /* CCE_STATUS read-only, use CceCtrl to clear */
+ clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
+ clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
+ clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
+ for (i = 0; i < CCE_NUM_SCRATCH; i++)
+ write_csr(dd, CCE_SCRATCH + (8 * i), 0);
+ /* CCE_ERR_STATUS read-only */
+ write_csr(dd, CCE_ERR_MASK, 0);
+ write_csr(dd, CCE_ERR_CLEAR, ~0ull);
+ /* CCE_ERR_FORCE leave alone */
+ for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
+ write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
+ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
+ /* CCE_PCIE_CTRL leave alone */
+ for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
+ write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
+ write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
+ CCE_MSIX_TABLE_UPPER_RESETCSR);
+ }
+ for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
+ /* CCE_MSIX_PBA read-only */
+ write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
+ write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
+ }
+ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
+ write_csr(dd, CCE_INT_MAP, 0);
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
+ /* CCE_INT_STATUS read-only */
+ write_csr(dd, CCE_INT_MASK + (8 * i), 0);
+ write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
+ /* CCE_INT_FORCE leave alone */
+ /* CCE_INT_BLOCKED read-only */
+ }
+ for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
+ write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
+}
+
+/* set MISC CSRs to chip reset defaults */
+static void reset_misc_csrs(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
+ write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
+ write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
+ }
+ /*
+ * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
+ * only be written 128-byte chunks
+ */
+ /* init RSA engine to clear lingering errors */
+ write_csr(dd, MISC_CFG_RSA_CMD, 1);
+ write_csr(dd, MISC_CFG_RSA_MU, 0);
+ write_csr(dd, MISC_CFG_FW_CTRL, 0);
+ /* MISC_STS_8051_DIGEST read-only */
+ /* MISC_STS_SBM_DIGEST read-only */
+ /* MISC_STS_PCIE_DIGEST read-only */
+ /* MISC_STS_FAB_DIGEST read-only */
+ /* MISC_ERR_STATUS read-only */
+ write_csr(dd, MISC_ERR_MASK, 0);
+ write_csr(dd, MISC_ERR_CLEAR, ~0ull);
+ /* MISC_ERR_FORCE leave alone */
+}
+
+/* set TXE CSRs to chip reset defaults */
+static void reset_txe_csrs(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /*
+ * TXE Kernel CSRs
+ */
+ write_csr(dd, SEND_CTRL, 0);
+ __cm_reset(dd, 0); /* reset CM internal state */
+ /* SEND_CONTEXTS read-only */
+ /* SEND_DMA_ENGINES read-only */
+ /* SEND_PIO_MEM_SIZE read-only */
+ /* SEND_DMA_MEM_SIZE read-only */
+ write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
+ pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
+ /* SEND_PIO_ERR_STATUS read-only */
+ write_csr(dd, SEND_PIO_ERR_MASK, 0);
+ write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
+ /* SEND_PIO_ERR_FORCE leave alone */
+ /* SEND_DMA_ERR_STATUS read-only */
+ write_csr(dd, SEND_DMA_ERR_MASK, 0);
+ write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
+ /* SEND_DMA_ERR_FORCE leave alone */
+ /* SEND_EGRESS_ERR_STATUS read-only */
+ write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
+ write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
+ /* SEND_EGRESS_ERR_FORCE leave alone */
+ write_csr(dd, SEND_BTH_QP, 0);
+ write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
+ write_csr(dd, SEND_SC2VLT0, 0);
+ write_csr(dd, SEND_SC2VLT1, 0);
+ write_csr(dd, SEND_SC2VLT2, 0);
+ write_csr(dd, SEND_SC2VLT3, 0);
+ write_csr(dd, SEND_LEN_CHECK0, 0);
+ write_csr(dd, SEND_LEN_CHECK1, 0);
+ /* SEND_ERR_STATUS read-only */
+ write_csr(dd, SEND_ERR_MASK, 0);
+ write_csr(dd, SEND_ERR_CLEAR, ~0ull);
+ /* SEND_ERR_FORCE read-only */
+ for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
+ write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
+ for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
+ write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
+ for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
+ write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
+ for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
+ write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
+ for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
+ write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
+ write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
+ /* SEND_CM_CREDIT_USED_STATUS read-only */
+ write_csr(dd, SEND_CM_TIMER_CTRL, 0);
+ write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
+ write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
+ write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
+ write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
+ write_csr(dd, SEND_CM_CREDIT_VL15, 0);
+ /* SEND_CM_CREDIT_USED_VL read-only */
+ /* SEND_CM_CREDIT_USED_VL15 read-only */
+ /* SEND_EGRESS_CTXT_STATUS read-only */
+ /* SEND_EGRESS_SEND_DMA_STATUS read-only */
+ write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
+ /* SEND_EGRESS_ERR_INFO read-only */
+ /* SEND_EGRESS_ERR_SOURCE read-only */
+
+ /*
+ * TXE Per-Context CSRs
+ */
+ for (i = 0; i < chip_send_contexts(dd); i++) {
+ write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
+ }
+
+ /*
+ * TXE Per-SDMA CSRs
+ */
+ for (i = 0; i < chip_sdma_engines(dd); i++) {
+ write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
+ /* SEND_DMA_STATUS read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
+ /* SEND_DMA_HEAD read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
+ /* SEND_DMA_IDLE_CNT read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
+ /* SEND_DMA_DESC_FETCHED_CNT read-only */
+ /* SEND_DMA_ENG_ERR_STATUS read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
+ /* SEND_DMA_ENG_ERR_FORCE leave alone */
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
+ }
+}
+
+/*
+ * Expect on entry:
+ * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
+ */
+static void init_rbufs(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ int count;
+
+ /*
+ * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
+ * clear.
+ */
+ count = 0;
+ while (1) {
+ reg = read_csr(dd, RCV_STATUS);
+ if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
+ | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
+ break;
+ /*
+ * Give up after 1ms - maximum wait time.
+ *
+ * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
+ * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
+ * 136 KB / (66% * 250MB/s) = 844us
+ */
+ if (count++ > 500) {
+ dd_dev_err(dd,
+ "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
+ __func__, reg);
+ break;
+ }
+ udelay(2); /* do not busy-wait the CSR */
+ }
+
+ /* start the init - expect RcvCtrl to be 0 */
+ write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
+
+ /*
+ * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
+ * period after the write before RcvStatus.RxRbufInitDone is valid.
+ * The delay in the first run through the loop below is sufficient and
+ * required before the first read of RcvStatus.RxRbufInintDone.
+ */
+ read_csr(dd, RCV_CTRL);
+
+ /* wait for the init to finish */
+ count = 0;
+ while (1) {
+ /* delay is required first time through - see above */
+ udelay(2); /* do not busy-wait the CSR */
+ reg = read_csr(dd, RCV_STATUS);
+ if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
+ break;
+
+ /* give up after 100us - slowest possible at 33MHz is 73us */
+ if (count++ > 50) {
+ dd_dev_err(dd,
+ "%s: RcvStatus.RxRbufInit not set, continuing\n",
+ __func__);
+ break;
+ }
+ }
+}
+
+/* set RXE CSRs to chip reset defaults */
+static void reset_rxe_csrs(struct hfi1_devdata *dd)
+{
+ int i, j;
+
+ /*
+ * RXE Kernel CSRs
+ */
+ write_csr(dd, RCV_CTRL, 0);
+ init_rbufs(dd);
+ /* RCV_STATUS read-only */
+ /* RCV_CONTEXTS read-only */
+ /* RCV_ARRAY_CNT read-only */
+ /* RCV_BUF_SIZE read-only */
+ write_csr(dd, RCV_BTH_QP, 0);
+ write_csr(dd, RCV_MULTICAST, 0);
+ write_csr(dd, RCV_BYPASS, 0);
+ write_csr(dd, RCV_VL15, 0);
+ /* this is a clear-down */
+ write_csr(dd, RCV_ERR_INFO,
+ RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
+ /* RCV_ERR_STATUS read-only */
+ write_csr(dd, RCV_ERR_MASK, 0);
+ write_csr(dd, RCV_ERR_CLEAR, ~0ull);
+ /* RCV_ERR_FORCE leave alone */
+ for (i = 0; i < 32; i++)
+ write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
+ for (i = 0; i < 4; i++)
+ write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
+ for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
+ write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
+ for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
+ write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
+ for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
+ clear_rsm_rule(dd, i);
+ for (i = 0; i < 32; i++)
+ write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
+
+ /*
+ * RXE Kernel and User Per-Context CSRs
+ */
+ for (i = 0; i < chip_rcv_contexts(dd); i++) {
+ /* kernel */
+ write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
+ /* RCV_CTXT_STATUS read-only */
+ write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
+ write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
+ write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
+ write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
+
+ /* user */
+ /* RCV_HDR_TAIL read-only */
+ write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
+ /* RCV_EGR_INDEX_TAIL read-only */
+ write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
+ /* RCV_EGR_OFFSET_TAIL read-only */
+ for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
+ write_uctxt_csr(dd, i,
+ RCV_TID_FLOW_TABLE + (8 * j), 0);
+ }
+ }
+}
+
+/*
+ * Set sc2vl tables.
+ *
+ * They power on to zeros, so to avoid send context errors
+ * they need to be set:
+ *
+ * SC 0-7 -> VL 0-7 (respectively)
+ * SC 15 -> VL 15
+ * otherwise
+ * -> VL 0
+ */
+static void init_sc2vl_tables(struct hfi1_devdata *dd)
+{
+ int i;
+ /* init per architecture spec, constrained by hardware capability */
+
+ /* HFI maps sent packets */
+ write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
+ 0,
+ 0, 0, 1, 1,
+ 2, 2, 3, 3,
+ 4, 4, 5, 5,
+ 6, 6, 7, 7));
+ write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
+ 1,
+ 8, 0, 9, 0,
+ 10, 0, 11, 0,
+ 12, 0, 13, 0,
+ 14, 0, 15, 15));
+ write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
+ 2,
+ 16, 0, 17, 0,
+ 18, 0, 19, 0,
+ 20, 0, 21, 0,
+ 22, 0, 23, 0));
+ write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
+ 3,
+ 24, 0, 25, 0,
+ 26, 0, 27, 0,
+ 28, 0, 29, 0,
+ 30, 0, 31, 0));
+
+ /* DC maps received packets */
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
+ 15_0,
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
+ 31_16,
+ 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
+ 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
+
+ /* initialize the cached sc2vl values consistently with h/w */
+ for (i = 0; i < 32; i++) {
+ if (i < 8 || i == 15)
+ *((u8 *)(dd->sc2vl) + i) = (u8)i;
+ else
+ *((u8 *)(dd->sc2vl) + i) = 0;
+ }
+}
+
+/*
+ * Read chip sizes and then reset parts to sane, disabled, values. We cannot
+ * depend on the chip going through a power-on reset - a driver may be loaded
+ * and unloaded many times.
+ *
+ * Do not write any CSR values to the chip in this routine - there may be
+ * a reset following the (possible) FLR in this routine.
+ *
+ */
+static int init_chip(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret = 0;
+
+ /*
+ * Put the HFI CSRs in a known state.
+ * Combine this with a DC reset.
+ *
+ * Stop the device from doing anything while we do a
+ * reset. We know there are no other active users of
+ * the device since we are now in charge. Turn off
+ * off all outbound and inbound traffic and make sure
+ * the device does not generate any interrupts.
+ */
+
+ /* disable send contexts and SDMA engines */
+ write_csr(dd, SEND_CTRL, 0);
+ for (i = 0; i < chip_send_contexts(dd); i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
+ for (i = 0; i < chip_sdma_engines(dd); i++)
+ write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
+ /* disable port (turn off RXE inbound traffic) and contexts */
+ write_csr(dd, RCV_CTRL, 0);
+ for (i = 0; i < chip_rcv_contexts(dd); i++)
+ write_csr(dd, RCV_CTXT_CTRL, 0);
+ /* mask all interrupt sources */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
+
+ /*
+ * DC Reset: do a full DC reset before the register clear.
+ * A recommended length of time to hold is one CSR read,
+ * so reread the CceDcCtrl. Then, hold the DC in reset
+ * across the clear.
+ */
+ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
+ (void)read_csr(dd, CCE_DC_CTRL);
+
+ if (use_flr) {
+ /*
+ * A FLR will reset the SPC core and part of the PCIe.
+ * The parts that need to be restored have already been
+ * saved.
+ */
+ dd_dev_info(dd, "Resetting CSRs with FLR\n");
+
+ /* do the FLR, the DC reset will remain */
+ pcie_flr(dd->pcidev);
+
+ /* restore command and BARs */
+ ret = restore_pci_variables(dd);
+ if (ret) {
+ dd_dev_err(dd, "%s: Could not restore PCI variables\n",
+ __func__);
+ return ret;
+ }
+
+ if (is_ax(dd)) {
+ dd_dev_info(dd, "Resetting CSRs with FLR\n");
+ pcie_flr(dd->pcidev);
+ ret = restore_pci_variables(dd);
+ if (ret) {
+ dd_dev_err(dd, "%s: Could not restore PCI variables\n",
+ __func__);
+ return ret;
+ }
+ }
+ } else {
+ dd_dev_info(dd, "Resetting CSRs with writes\n");
+ reset_cce_csrs(dd);
+ reset_txe_csrs(dd);
+ reset_rxe_csrs(dd);
+ reset_misc_csrs(dd);
+ }
+ /* clear the DC reset */
+ write_csr(dd, CCE_DC_CTRL, 0);
+
+ /* Set the LED off */
+ setextled(dd, 0);
+
+ /*
+ * Clear the QSFP reset.
+ * An FLR enforces a 0 on all out pins. The driver does not touch
+ * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
+ * anything plugged constantly in reset, if it pays attention
+ * to RESET_N.
+ * Prime examples of this are optical cables. Set all pins high.
+ * I2CCLK and I2CDAT will change per direction, and INT_N and
+ * MODPRS_N are input only and their value is ignored.
+ */
+ write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
+ write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
+ init_chip_resources(dd);
+ return ret;
+}
+
+static void init_early_variables(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* assign link credit variables */
+ dd->vau = CM_VAU;
+ dd->link_credits = CM_GLOBAL_CREDITS;
+ if (is_ax(dd))
+ dd->link_credits--;
+ dd->vcu = cu_to_vcu(hfi1_cu);
+ /* enough room for 8 MAD packets plus header - 17K */
+ dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
+ if (dd->vl15_init > dd->link_credits)
+ dd->vl15_init = dd->link_credits;
+
+ write_uninitialized_csrs_and_memories(dd);
+
+ if (HFI1_CAP_IS_KSET(PKEY_CHECK))
+ for (i = 0; i < dd->num_pports; i++) {
+ struct hfi1_pportdata *ppd = &dd->pport[i];
+
+ set_partition_keys(ppd);
+ }
+ init_sc2vl_tables(dd);
+}
+
+static void init_kdeth_qp(struct hfi1_devdata *dd)
+{
+ write_csr(dd, SEND_BTH_QP,
+ (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
+ SEND_BTH_QP_KDETH_QP_SHIFT);
+
+ write_csr(dd, RCV_BTH_QP,
+ (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
+ RCV_BTH_QP_KDETH_QP_SHIFT);
+}
+
+/**
+ * hfi1_get_qp_map - get qp map
+ * @dd: device data
+ * @idx: index to read
+ */
+u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
+{
+ u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
+
+ reg >>= (idx % 8) * 8;
+ return reg;
+}
+
+/**
+ * init_qpmap_table - init qp map
+ * @dd: device data
+ * @first_ctxt: first context
+ * @last_ctxt: first context
+ *
+ * This return sets the qpn mapping table that
+ * is indexed by qpn[8:1].
+ *
+ * The routine will round robin the 256 settings
+ * from first_ctxt to last_ctxt.
+ *
+ * The first/last looks ahead to having specialized
+ * receive contexts for mgmt and bypass. Normal
+ * verbs traffic will assumed to be on a range
+ * of receive contexts.
+ */
+static void init_qpmap_table(struct hfi1_devdata *dd,
+ u32 first_ctxt,
+ u32 last_ctxt)
+{
+ u64 reg = 0;
+ u64 regno = RCV_QP_MAP_TABLE;
+ int i;
+ u64 ctxt = first_ctxt;
+
+ for (i = 0; i < 256; i++) {
+ reg |= ctxt << (8 * (i % 8));
+ ctxt++;
+ if (ctxt > last_ctxt)
+ ctxt = first_ctxt;
+ if (i % 8 == 7) {
+ write_csr(dd, regno, reg);
+ reg = 0;
+ regno += 8;
+ }
+ }
+
+ add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
+ | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
+}
+
+struct rsm_map_table {
+ u64 map[NUM_MAP_REGS];
+ unsigned int used;
+};
+
+struct rsm_rule_data {
+ u8 offset;
+ u8 pkt_type;
+ u32 field1_off;
+ u32 field2_off;
+ u32 index1_off;
+ u32 index1_width;
+ u32 index2_off;
+ u32 index2_width;
+ u32 mask1;
+ u32 value1;
+ u32 mask2;
+ u32 value2;
+};
+
+/*
+ * Return an initialized RMT map table for users to fill in. OK if it
+ * returns NULL, indicating no table.
+ */
+static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
+{
+ struct rsm_map_table *rmt;
+ u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
+
+ rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
+ if (rmt) {
+ memset(rmt->map, rxcontext, sizeof(rmt->map));
+ rmt->used = 0;
+ }
+
+ return rmt;
+}
+
+/*
+ * Write the final RMT map table to the chip and free the table. OK if
+ * table is NULL.
+ */
+static void complete_rsm_map_table(struct hfi1_devdata *dd,
+ struct rsm_map_table *rmt)
+{
+ int i;
+
+ if (rmt) {
+ /* write table to chip */
+ for (i = 0; i < NUM_MAP_REGS; i++)
+ write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
+
+ /* enable RSM */
+ add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+ }
+}
+
+/* Is a receive side mapping rule */
+static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
+{
+ return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
+}
+
+/*
+ * Add a receive side mapping rule.
+ */
+static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
+ struct rsm_rule_data *rrd)
+{
+ write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
+ (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
+ 1ull << rule_index | /* enable bit */
+ (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
+ write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
+ (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
+ (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
+ (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
+ (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
+ (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
+ (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
+ write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
+ (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
+ (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
+ (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
+ (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
+}
+
+/*
+ * Clear a receive side mapping rule.
+ */
+static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
+{
+ write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
+ write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
+ write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
+}
+
+/* return the number of RSM map table entries that will be used for QOS */
+static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
+ unsigned int *np)
+{
+ int i;
+ unsigned int m, n;
+ uint max_by_vl = 0;
+
+ /* is QOS active at all? */
+ if (n_krcv_queues < MIN_KERNEL_KCTXTS ||
+ num_vls == 1 ||
+ krcvqsset <= 1)
+ goto no_qos;
+
+ /* determine bits for qpn */
+ for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
+ if (krcvqs[i] > max_by_vl)
+ max_by_vl = krcvqs[i];
+ if (max_by_vl > 32)
+ goto no_qos;
+ m = ilog2(__roundup_pow_of_two(max_by_vl));
+
+ /* determine bits for vl */
+ n = ilog2(__roundup_pow_of_two(num_vls));
+
+ /* reject if too much is used */
+ if ((m + n) > 7)
+ goto no_qos;
+
+ if (mp)
+ *mp = m;
+ if (np)
+ *np = n;
+
+ return 1 << (m + n);
+
+no_qos:
+ if (mp)
+ *mp = 0;
+ if (np)
+ *np = 0;
+ return 0;
+}
+
+/**
+ * init_qos - init RX qos
+ * @dd: device data
+ * @rmt: RSM map table
+ *
+ * This routine initializes Rule 0 and the RSM map table to implement
+ * quality of service (qos).
+ *
+ * If all of the limit tests succeed, qos is applied based on the array
+ * interpretation of krcvqs where entry 0 is VL0.
+ *
+ * The number of vl bits (n) and the number of qpn bits (m) are computed to
+ * feed both the RSM map table and the single rule.
+ */
+static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
+{
+ struct rsm_rule_data rrd;
+ unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
+ unsigned int rmt_entries;
+ u64 reg;
+
+ if (!rmt)
+ goto bail;
+ rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n);
+ if (rmt_entries == 0)
+ goto bail;
+ qpns_per_vl = 1 << m;
+
+ /* enough room in the map table? */
+ rmt_entries = 1 << (m + n);
+ if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
+ goto bail;
+
+ /* add qos entries to the RSM map table */
+ for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
+ unsigned tctxt;
+
+ for (qpn = 0, tctxt = ctxt;
+ krcvqs[i] && qpn < qpns_per_vl; qpn++) {
+ unsigned idx, regoff, regidx;
+
+ /* generate the index the hardware will produce */
+ idx = rmt->used + ((qpn << n) ^ i);
+ regoff = (idx % 8) * 8;
+ regidx = idx / 8;
+ /* replace default with context number */
+ reg = rmt->map[regidx];
+ reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
+ << regoff);
+ reg |= (u64)(tctxt++) << regoff;
+ rmt->map[regidx] = reg;
+ if (tctxt == ctxt + krcvqs[i])
+ tctxt = ctxt;
+ }
+ ctxt += krcvqs[i];
+ }
+
+ rrd.offset = rmt->used;
+ rrd.pkt_type = 2;
+ rrd.field1_off = LRH_BTH_MATCH_OFFSET;
+ rrd.field2_off = LRH_SC_MATCH_OFFSET;
+ rrd.index1_off = LRH_SC_SELECT_OFFSET;
+ rrd.index1_width = n;
+ rrd.index2_off = QPN_SELECT_OFFSET;
+ rrd.index2_width = m + n;
+ rrd.mask1 = LRH_BTH_MASK;
+ rrd.value1 = LRH_BTH_VALUE;
+ rrd.mask2 = LRH_SC_MASK;
+ rrd.value2 = LRH_SC_VALUE;
+
+ /* add rule 0 */
+ add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
+
+ /* mark RSM map entries as used */
+ rmt->used += rmt_entries;
+ /* map everything else to the mcast/err/vl15 context */
+ init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
+ dd->qos_shift = n + 1;
+ return;
+bail:
+ dd->qos_shift = 1;
+ init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
+}
+
+static void init_fecn_handling(struct hfi1_devdata *dd,
+ struct rsm_map_table *rmt)
+{
+ struct rsm_rule_data rrd;
+ u64 reg;
+ int i, idx, regoff, regidx, start;
+ u8 offset;
+ u32 total_cnt;
+
+ if (HFI1_CAP_IS_KSET(TID_RDMA))
+ /* Exclude context 0 */
+ start = 1;
+ else
+ start = dd->first_dyn_alloc_ctxt;
+
+ total_cnt = dd->num_rcv_contexts - start;
+
+ /* there needs to be enough room in the map table */
+ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
+ dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
+ return;
+ }
+
+ /*
+ * RSM will extract the destination context as an index into the
+ * map table. The destination contexts are a sequential block
+ * in the range start...num_rcv_contexts-1 (inclusive).
+ * Map entries are accessed as offset + extracted value. Adjust
+ * the added offset so this sequence can be placed anywhere in
+ * the table - as long as the entries themselves do not wrap.
+ * There are only enough bits in offset for the table size, so
+ * start with that to allow for a "negative" offset.
+ */
+ offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
+
+ for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
+ i++, idx++) {
+ /* replace with identity mapping */
+ regoff = (idx % 8) * 8;
+ regidx = idx / 8;
+ reg = rmt->map[regidx];
+ reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
+ reg |= (u64)i << regoff;
+ rmt->map[regidx] = reg;
+ }
+
+ /*
+ * For RSM intercept of Expected FECN packets:
+ * o packet type 0 - expected
+ * o match on F (bit 95), using select/match 1, and
+ * o match on SH (bit 133), using select/match 2.
+ *
+ * Use index 1 to extract the 8-bit receive context from DestQP
+ * (start at bit 64). Use that as the RSM map table index.
+ */
+ rrd.offset = offset;
+ rrd.pkt_type = 0;
+ rrd.field1_off = 95;
+ rrd.field2_off = 133;
+ rrd.index1_off = 64;
+ rrd.index1_width = 8;
+ rrd.index2_off = 0;
+ rrd.index2_width = 0;
+ rrd.mask1 = 1;
+ rrd.value1 = 1;
+ rrd.mask2 = 1;
+ rrd.value2 = 1;
+
+ /* add rule 1 */
+ add_rsm_rule(dd, RSM_INS_FECN, &rrd);
+
+ rmt->used += total_cnt;
+}
+
+static inline bool hfi1_is_rmt_full(int start, int spare)
+{
+ return (start + spare) > NUM_MAP_ENTRIES;
+}
+
+static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
+{
+ u8 i, j;
+ u8 ctx_id = 0;
+ u64 reg;
+ u32 regoff;
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
+
+ /* We already have contexts mapped in RMT */
+ if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
+ dd_dev_info(dd, "Contexts are already mapped in RMT\n");
+ return true;
+ }
+
+ if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
+ dd_dev_err(dd, "Not enough RMT entries used = %d\n",
+ rmt_start);
+ return false;
+ }
+
+ dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
+ rmt_start,
+ rmt_start + NUM_NETDEV_MAP_ENTRIES);
+
+ /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
+ regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
+ reg = read_csr(dd, regoff);
+ for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
+ /* Update map register with netdev context */
+ j = (rmt_start + i) % 8;
+ reg &= ~(0xffllu << (j * 8));
+ reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
+ /* Wrap up netdev ctx index */
+ ctx_id %= ctxt_count;
+ /* Write back map register */
+ if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
+ dev_dbg(&(dd)->pcidev->dev,
+ "RMT[%d] =0x%llx\n",
+ regoff - RCV_RSM_MAP_TABLE, reg);
+
+ write_csr(dd, regoff, reg);
+ regoff += 8;
+ if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
+ reg = read_csr(dd, regoff);
+ }
+ }
+
+ return true;
+}
+
+static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
+ int rule, struct rsm_rule_data *rrd)
+{
+ if (!hfi1_netdev_update_rmt(dd)) {
+ dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
+ return;
+ }
+
+ add_rsm_rule(dd, rule, rrd);
+ add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+}
+
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
+{
+ /*
+ * go through with the initialisation only if this rule actually doesn't
+ * exist yet
+ */
+ if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ .offset = rmt_start,
+ .pkt_type = IB_PACKET_TYPE,
+ .field1_off = LRH_BTH_MATCH_OFFSET,
+ .mask1 = LRH_BTH_MASK,
+ .value1 = LRH_BTH_VALUE,
+ .field2_off = BTH_DESTQP_MATCH_OFFSET,
+ .mask2 = BTH_DESTQP_MASK,
+ .value2 = BTH_DESTQP_VALUE,
+ .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
+ ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
+ }
+}
+
+/* Initialize RSM for VNIC */
+void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
+{
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ /* Add rule for vnic */
+ .offset = rmt_start,
+ .pkt_type = 4,
+ /* Match 16B packets */
+ .field1_off = L2_TYPE_MATCH_OFFSET,
+ .mask1 = L2_TYPE_MASK,
+ .value1 = L2_16B_VALUE,
+ /* Match ETH L4 packets */
+ .field2_off = L4_TYPE_MATCH_OFFSET,
+ .mask2 = L4_16B_TYPE_MASK,
+ .value2 = L4_16B_ETH_VALUE,
+ /* Calc context from veswid and entropy */
+ .index1_off = L4_16B_HDR_VESWID_OFFSET,
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = L2_16B_ENTROPY_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
+}
+
+void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
+{
+ clear_rsm_rule(dd, RSM_INS_VNIC);
+}
+
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
+{
+ /* only actually clear the rule if it's the last user asking to do so */
+ if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
+ clear_rsm_rule(dd, RSM_INS_AIP);
+}
+
+static int init_rxe(struct hfi1_devdata *dd)
+{
+ struct rsm_map_table *rmt;
+ u64 val;
+
+ /* enable all receive errors */
+ write_csr(dd, RCV_ERR_MASK, ~0ull);
+
+ rmt = alloc_rsm_map_table(dd);
+ if (!rmt)
+ return -ENOMEM;
+
+ /* set up QOS, including the QPN map table */
+ init_qos(dd, rmt);
+ init_fecn_handling(dd, rmt);
+ complete_rsm_map_table(dd, rmt);
+ /* record number of used rsm map entries for netdev */
+ hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
+ kfree(rmt);
+
+ /*
+ * make sure RcvCtrl.RcvWcb <= PCIe Device Control
+ * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
+ * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
+ * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
+ * Max_PayLoad_Size set to its minimum of 128.
+ *
+ * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
+ * (64 bytes). Max_Payload_Size is possibly modified upward in
+ * tune_pcie_caps() which is called after this routine.
+ */
+
+ /* Have 16 bytes (4DW) of bypass header available in header queue */
+ val = read_csr(dd, RCV_BYPASS);
+ val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
+ val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
+ RCV_BYPASS_HDR_SIZE_SHIFT);
+ write_csr(dd, RCV_BYPASS, val);
+ return 0;
+}
+
+static void init_other(struct hfi1_devdata *dd)
+{
+ /* enable all CCE errors */
+ write_csr(dd, CCE_ERR_MASK, ~0ull);
+ /* enable *some* Misc errors */
+ write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
+ /* enable all DC errors, except LCB */
+ write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
+ write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
+}
+
+/*
+ * Fill out the given AU table using the given CU. A CU is defined in terms
+ * AUs. The table is a an encoding: given the index, how many AUs does that
+ * represent?
+ *
+ * NOTE: Assumes that the register layout is the same for the
+ * local and remote tables.
+ */
+static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
+ u32 csr0to3, u32 csr4to7)
+{
+ write_csr(dd, csr0to3,
+ 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
+ 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
+ 2ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
+ 4ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
+ write_csr(dd, csr4to7,
+ 8ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
+ 16ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
+ 32ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
+ 64ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
+}
+
+static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
+{
+ assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
+ SEND_CM_LOCAL_AU_TABLE4_TO7);
+}
+
+void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
+{
+ assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
+ SEND_CM_REMOTE_AU_TABLE4_TO7);
+}
+
+static void init_txe(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* enable all PIO, SDMA, general, and Egress errors */
+ write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
+ write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
+ write_csr(dd, SEND_ERR_MASK, ~0ull);
+ write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
+
+ /* enable all per-context and per-SDMA engine errors */
+ for (i = 0; i < chip_send_contexts(dd); i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
+ for (i = 0; i < chip_sdma_engines(dd); i++)
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
+
+ /* set the local CU to AU mapping */
+ assign_local_cm_au_table(dd, dd->vcu);
+
+ /*
+ * Set reasonable default for Credit Return Timer
+ * Don't set on Simulator - causes it to choke.
+ */
+ if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
+ write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
+}
+
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
+ u16 jkey)
+{
+ u8 hw_ctxt;
+ u64 reg;
+
+ if (!rcd || !rcd->sc)
+ return -EINVAL;
+
+ hw_ctxt = rcd->sc->hw_context;
+ reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
+ ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
+ SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
+ /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
+ if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
+ reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
+ /*
+ * Enable send-side J_KEY integrity check, unless this is A0 h/w
+ */
+ if (!is_ax(dd)) {
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
+ reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ }
+
+ /* Enable J_KEY check on receive context. */
+ reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
+ ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
+ RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
+ write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
+
+ return 0;
+}
+
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+{
+ u8 hw_ctxt;
+ u64 reg;
+
+ if (!rcd || !rcd->sc)
+ return -EINVAL;
+
+ hw_ctxt = rcd->sc->hw_context;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
+ /*
+ * Disable send-side J_KEY integrity check, unless this is A0 h/w.
+ * This check would not have been enabled for A0 h/w, see
+ * set_ctxt_jkey().
+ */
+ if (!is_ax(dd)) {
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
+ reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ }
+ /* Turn off the J_KEY on the receive side */
+ write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
+
+ return 0;
+}
+
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
+ u16 pkey)
+{
+ u8 hw_ctxt;
+ u64 reg;
+
+ if (!rcd || !rcd->sc)
+ return -EINVAL;
+
+ hw_ctxt = rcd->sc->hw_context;
+ reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
+ SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
+ reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
+ reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
+
+ return 0;
+}
+
+int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
+{
+ u8 hw_ctxt;
+ u64 reg;
+
+ if (!ctxt || !ctxt->sc)
+ return -EINVAL;
+
+ hw_ctxt = ctxt->sc->hw_context;
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
+ reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
+
+ return 0;
+}
+
+/*
+ * Start doing the clean up the chip. Our clean up happens in multiple
+ * stages and this is just the first.
+ */
+void hfi1_start_cleanup(struct hfi1_devdata *dd)
+{
+ aspm_exit(dd);
+ free_cntrs(dd);
+ free_rcverr(dd);
+ finish_chip_resources(dd);
+}
+
+#define HFI_BASE_GUID(dev) \
+ ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
+
+/*
+ * Information can be shared between the two HFIs on the same ASIC
+ * in the same OS. This function finds the peer device and sets
+ * up a shared structure.
+ */
+static int init_asic_data(struct hfi1_devdata *dd)
+{
+ unsigned long index;
+ struct hfi1_devdata *peer;
+ struct hfi1_asic_data *asic_data;
+ int ret = 0;
+
+ /* pre-allocate the asic structure in case we are the first device */
+ asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+ if (!asic_data)
+ return -ENOMEM;
+
+ xa_lock_irq(&hfi1_dev_table);
+ /* Find our peer device */
+ xa_for_each(&hfi1_dev_table, index, peer) {
+ if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
+ dd->unit != peer->unit)
+ break;
+ }
+
+ if (peer) {
+ /* use already allocated structure */
+ dd->asic_data = peer->asic_data;
+ kfree(asic_data);
+ } else {
+ dd->asic_data = asic_data;
+ mutex_init(&dd->asic_data->asic_resource_mutex);
+ }
+ dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
+ xa_unlock_irq(&hfi1_dev_table);
+
+ /* first one through - set up i2c devices */
+ if (!peer)
+ ret = set_up_i2c(dd, dd->asic_data);
+
+ return ret;
+}
+
+/*
+ * Set dd->boardname. Use a generic name if a name is not returned from
+ * EFI variable space.
+ *
+ * Return 0 on success, -ENOMEM if space could not be allocated.
+ */
+static int obtain_boardname(struct hfi1_devdata *dd)
+{
+ /* generic board description */
+ const char generic[] =
+ "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series";
+ unsigned long size;
+ int ret;
+
+ ret = read_hfi1_efi_var(dd, "description", &size,
+ (void **)&dd->boardname);
+ if (ret) {
+ dd_dev_info(dd, "Board description not found\n");
+ /* use generic description */
+ dd->boardname = kstrdup(generic, GFP_KERNEL);
+ if (!dd->boardname)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * Check the interrupt registers to make sure that they are mapped correctly.
+ * It is intended to help user identify any mismapping by VMM when the driver
+ * is running in a VM. This function should only be called before interrupt
+ * is set up properly.
+ *
+ * Return 0 on success, -EINVAL on failure.
+ */
+static int check_int_registers(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ u64 all_bits = ~(u64)0;
+ u64 mask;
+
+ /* Clear CceIntMask[0] to avoid raising any interrupts */
+ mask = read_csr(dd, CCE_INT_MASK);
+ write_csr(dd, CCE_INT_MASK, 0ull);
+ reg = read_csr(dd, CCE_INT_MASK);
+ if (reg)
+ goto err_exit;
+
+ /* Clear all interrupt status bits */
+ write_csr(dd, CCE_INT_CLEAR, all_bits);
+ reg = read_csr(dd, CCE_INT_STATUS);
+ if (reg)
+ goto err_exit;
+
+ /* Set all interrupt status bits */
+ write_csr(dd, CCE_INT_FORCE, all_bits);
+ reg = read_csr(dd, CCE_INT_STATUS);
+ if (reg != all_bits)
+ goto err_exit;
+
+ /* Restore the interrupt mask */
+ write_csr(dd, CCE_INT_CLEAR, all_bits);
+ write_csr(dd, CCE_INT_MASK, mask);
+
+ return 0;
+err_exit:
+ write_csr(dd, CCE_INT_MASK, mask);
+ dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
+ return -EINVAL;
+}
+
+/**
+ * hfi1_init_dd() - Initialize most of the dd structure.
+ * @dd: the dd device
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+int hfi1_init_dd(struct hfi1_devdata *dd)
+{
+ struct pci_dev *pdev = dd->pcidev;
+ struct hfi1_pportdata *ppd;
+ u64 reg;
+ int i, ret;
+ static const char * const inames[] = { /* implementation names */
+ "RTL silicon",
+ "RTL VCS simulation",
+ "RTL FPGA emulation",
+ "Functional simulator"
+ };
+ struct pci_dev *parent = pdev->bus->self;
+ u32 sdma_engines = chip_sdma_engines(dd);
+
+ ppd = dd->pport;
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ int vl;
+ /* init common fields */
+ hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
+ /* DC supports 4 link widths */
+ ppd->link_width_supported =
+ OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
+ OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
+ ppd->link_width_downgrade_supported =
+ ppd->link_width_supported;
+ /* start out enabling only 4X */
+ ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
+ ppd->link_width_downgrade_enabled =
+ ppd->link_width_downgrade_supported;
+ /* link width active is 0 when link is down */
+ /* link width downgrade active is 0 when link is down */
+
+ if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
+ num_vls > HFI1_MAX_VLS_SUPPORTED) {
+ dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
+ num_vls, HFI1_MAX_VLS_SUPPORTED);
+ num_vls = HFI1_MAX_VLS_SUPPORTED;
+ }
+ ppd->vls_supported = num_vls;
+ ppd->vls_operational = ppd->vls_supported;
+ /* Set the default MTU. */
+ for (vl = 0; vl < num_vls; vl++)
+ dd->vld[vl].mtu = hfi1_max_mtu;
+ dd->vld[15].mtu = MAX_MAD_PACKET;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->overrun_threshold = 0x4;
+ ppd->phy_error_threshold = 0xf;
+ ppd->port_crc_mode_enabled = link_crc_mask;
+ /* initialize supported LTP CRC mode */
+ ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
+ /* initialize enabled LTP CRC mode */
+ ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
+ /* start in offline */
+ ppd->host_link_state = HLS_DN_OFFLINE;
+ init_vl_arb_caches(ppd);
+ }
+
+ /*
+ * Do remaining PCIe setup and save PCIe values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped.
+ */
+ ret = hfi1_pcie_ddinit(dd, pdev);
+ if (ret < 0)
+ goto bail_free;
+
+ /* Save PCI space registers to rewrite after device reset */
+ ret = save_pci_variables(dd);
+ if (ret < 0)
+ goto bail_cleanup;
+
+ dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
+ & CCE_REVISION_CHIP_REV_MAJOR_MASK;
+ dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
+ & CCE_REVISION_CHIP_REV_MINOR_MASK;
+
+ /*
+ * Check interrupt registers mapping if the driver has no access to
+ * the upstream component. In this case, it is likely that the driver
+ * is running in a VM.
+ */
+ if (!parent) {
+ ret = check_int_registers(dd);
+ if (ret)
+ goto bail_cleanup;
+ }
+
+ /*
+ * obtain the hardware ID - NOT related to unit, which is a
+ * software enumeration
+ */
+ reg = read_csr(dd, CCE_REVISION2);
+ dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
+ & CCE_REVISION2_HFI_ID_MASK;
+ /* the variable size will remove unwanted bits */
+ dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
+ dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
+ dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
+ dd->icode < ARRAY_SIZE(inames) ?
+ inames[dd->icode] : "unknown", (int)dd->irev);
+
+ /* speeds the hardware can support */
+ dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
+ /* speeds allowed to run at */
+ dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
+ /* give a reasonable active value, will be set on link up */
+ dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
+
+ /* fix up link widths for emulation _p */
+ ppd = dd->pport;
+ if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
+ ppd->link_width_supported =
+ ppd->link_width_enabled =
+ ppd->link_width_downgrade_supported =
+ ppd->link_width_downgrade_enabled =
+ OPA_LINK_WIDTH_1X;
+ }
+ /* insure num_vls isn't larger than number of sdma engines */
+ if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
+ dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
+ num_vls, sdma_engines);
+ num_vls = sdma_engines;
+ ppd->vls_supported = sdma_engines;
+ ppd->vls_operational = ppd->vls_supported;
+ }
+
+ /*
+ * Convert the ns parameter to the 64 * cclocks used in the CSR.
+ * Limit the max if larger than the field holds. If timeout is
+ * non-zero, then the calculated field will be at least 1.
+ *
+ * Must be after icode is set up - the cclock rate depends
+ * on knowing the hardware being used.
+ */
+ dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
+ if (dd->rcv_intr_timeout_csr >
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
+ dd->rcv_intr_timeout_csr =
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
+ else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
+ dd->rcv_intr_timeout_csr = 1;
+
+ /* needs to be done before we look for the peer device */
+ read_guid(dd);
+
+ /* set up shared ASIC data with peer device */
+ ret = init_asic_data(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* obtain chip sizes, reset chip CSRs */
+ ret = init_chip(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* read in the PCIe link speed information */
+ ret = pcie_speeds(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* call before get_platform_config(), after init_chip_resources() */
+ ret = eprom_init(dd);
+ if (ret)
+ goto bail_free_rcverr;
+
+ /* Needs to be called before hfi1_firmware_init */
+ get_platform_config(dd);
+
+ /* read in firmware */
+ ret = hfi1_firmware_init(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /*
+ * In general, the PCIe Gen3 transition must occur after the
+ * chip has been idled (so it won't initiate any PCIe transactions
+ * e.g. an interrupt) and before the driver changes any registers
+ * (the transition will reset the registers).
+ *
+ * In particular, place this call after:
+ * - init_chip() - the chip will not initiate any PCIe transactions
+ * - pcie_speeds() - reads the current link speed
+ * - hfi1_firmware_init() - the needed firmware is ready to be
+ * downloaded
+ */
+ ret = do_pcie_gen3_transition(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /*
+ * This should probably occur in hfi1_pcie_init(), but historically
+ * occurs after the do_pcie_gen3_transition() code.
+ */
+ tune_pcie_caps(dd);
+
+ /* start setting dd values and adjusting CSRs */
+ init_early_variables(dd);
+
+ parse_platform_config(dd);
+
+ ret = obtain_boardname(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ snprintf(dd->boardversion, BOARD_VERS_MAX,
+ "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
+ HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
+ (u32)dd->majrev,
+ (u32)dd->minrev,
+ (dd->revision >> CCE_REVISION_SW_SHIFT)
+ & CCE_REVISION_SW_MASK);
+
+ /* alloc VNIC/AIP rx data */
+ ret = hfi1_alloc_rx(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ ret = set_up_context_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* set initial RXE CSRs */
+ ret = init_rxe(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* set initial TXE CSRs */
+ init_txe(dd);
+ /* set initial non-RXE, non-TXE CSRs */
+ init_other(dd);
+ /* set up KDETH QP prefix in both RX and TX CSRs */
+ init_kdeth_qp(dd);
+
+ ret = hfi1_dev_affinity_init(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* send contexts must be set up before receive contexts */
+ ret = init_send_contexts(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ ret = hfi1_create_kctxts(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /*
+ * Initialize aspm, to be done after gen3 transition and setting up
+ * contexts and before enabling interrupts
+ */
+ aspm_init(dd);
+
+ ret = init_pervl_scs(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* sdma init */
+ for (i = 0; i < dd->num_pports; ++i) {
+ ret = sdma_init(dd, i);
+ if (ret)
+ goto bail_cleanup;
+ }
+
+ /* use contexts created by hfi1_create_kctxts */
+ ret = set_up_interrupts(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ ret = hfi1_comp_vectors_set_up(dd);
+ if (ret)
+ goto bail_clear_intr;
+
+ /* set up LCB access - must be after set_up_interrupts() */
+ init_lcb_access(dd);
+
+ /*
+ * Serial number is created from the base guid:
+ * [27:24] = base guid [38:35]
+ * [23: 0] = base guid [23: 0]
+ */
+ snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
+ (dd->base_guid & 0xFFFFFF) |
+ ((dd->base_guid >> 11) & 0xF000000));
+
+ dd->oui1 = dd->base_guid >> 56 & 0xFF;
+ dd->oui2 = dd->base_guid >> 48 & 0xFF;
+ dd->oui3 = dd->base_guid >> 40 & 0xFF;
+
+ ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
+ if (ret)
+ goto bail_clear_intr;
+
+ thermal_init(dd);
+
+ ret = init_cntrs(dd);
+ if (ret)
+ goto bail_clear_intr;
+
+ ret = init_rcverr(dd);
+ if (ret)
+ goto bail_free_cntrs;
+
+ init_completion(&dd->user_comp);
+
+ /* The user refcount starts with one to inidicate an active device */
+ refcount_set(&dd->user_refcount, 1);
+
+ goto bail;
+
+bail_free_rcverr:
+ free_rcverr(dd);
+bail_free_cntrs:
+ free_cntrs(dd);
+bail_clear_intr:
+ hfi1_comp_vectors_clean_up(dd);
+ msix_clean_up_interrupts(dd);
+bail_cleanup:
+ hfi1_free_rx(dd);
+ hfi1_pcie_ddcleanup(dd);
+bail_free:
+ hfi1_free_devdata(dd);
+bail:
+ return ret;
+}
+
+static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
+ u32 dw_len)
+{
+ u32 delta_cycles;
+ u32 current_egress_rate = ppd->current_egress_rate;
+ /* rates here are in units of 10^6 bits/sec */
+
+ if (desired_egress_rate == -1)
+ return 0; /* shouldn't happen */
+
+ if (desired_egress_rate >= current_egress_rate)
+ return 0; /* we can't help go faster, only slower */
+
+ delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
+ egress_cycles(dw_len * 4, current_egress_rate);
+
+ return (u16)delta_cycles;
+}
+
+/**
+ * create_pbc - build a pbc for transmission
+ * @ppd: info of physical Hfi port
+ * @flags: special case flags or-ed in built pbc
+ * @srate_mbs: static rate
+ * @vl: vl
+ * @dw_len: dword length (header words + data words + pbc words)
+ *
+ * Create a PBC with the given flags, rate, VL, and length.
+ *
+ * NOTE: The PBC created will not insert any HCRC - all callers but one are
+ * for verbs, which does not use this PSM feature. The lone other caller
+ * is for the diagnostic interface which calls this if the user does not
+ * supply their own PBC.
+ */
+u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
+ u32 dw_len)
+{
+ u64 pbc, delay = 0;
+
+ if (unlikely(srate_mbs))
+ delay = delay_cycles(ppd, srate_mbs, dw_len);
+
+ pbc = flags
+ | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
+ | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
+ | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
+ | (dw_len & PBC_LENGTH_DWS_MASK)
+ << PBC_LENGTH_DWS_SHIFT;
+
+ return pbc;
+}
+
+#define SBUS_THERMAL 0x4f
+#define SBUS_THERM_MONITOR_MODE 0x1
+
+#define THERM_FAILURE(dev, ret, reason) \
+ dd_dev_err((dd), \
+ "Thermal sensor initialization failed: %s (%d)\n", \
+ (reason), (ret))
+
+/*
+ * Initialize the thermal sensor.
+ *
+ * After initialization, enable polling of thermal sensor through
+ * SBus interface. In order for this to work, the SBus Master
+ * firmware has to be loaded due to the fact that the HW polling
+ * logic uses SBus interrupts, which are not supported with
+ * default firmware. Otherwise, no data will be returned through
+ * the ASIC_STS_THERM CSR.
+ */
+static int thermal_init(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ if (dd->icode != ICODE_RTL_SILICON ||
+ check_chip_resource(dd, CR_THERM_INIT, NULL))
+ return ret;
+
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Acquire SBus");
+ return ret;
+ }
+
+ dd_dev_info(dd, "Initializing thermal sensor\n");
+ /* Disable polling of thermal readings */
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
+ msleep(100);
+ /* Thermal Sensor Initialization */
+ /* Step 1: Reset the Thermal SBus Receiver */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
+ RESET_SBUS_RECEIVER, 0);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Bus Reset");
+ goto done;
+ }
+ /* Step 2: Set Reset bit in Thermal block */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
+ WRITE_SBUS_RECEIVER, 0x1);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Therm Block Reset");
+ goto done;
+ }
+ /* Step 3: Write clock divider value (100MHz -> 2MHz) */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
+ WRITE_SBUS_RECEIVER, 0x32);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Write Clock Div");
+ goto done;
+ }
+ /* Step 4: Select temperature mode */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
+ WRITE_SBUS_RECEIVER,
+ SBUS_THERM_MONITOR_MODE);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Write Mode Sel");
+ goto done;
+ }
+ /* Step 5: De-assert block reset and start conversion */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
+ WRITE_SBUS_RECEIVER, 0x2);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Write Reset Deassert");
+ goto done;
+ }
+ /* Step 5.1: Wait for first conversion (21.5ms per spec) */
+ msleep(22);
+
+ /* Enable polling of thermal readings */
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
+
+ /* Set initialized flag */
+ ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
+ if (ret)
+ THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
+
+done:
+ release_chip_resource(dd, CR_SBUS);
+ return ret;
+}
+
+static void handle_temp_err(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = &dd->pport[0];
+ /*
+ * Thermal Critical Interrupt
+ * Put the device into forced freeze mode, take link down to
+ * offline, and put DC into reset.
+ */
+ dd_dev_emerg(dd,
+ "Critical temperature reached! Forcing device into freeze mode!\n");
+ dd->flags |= HFI1_FORCED_FREEZE;
+ start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
+ /*
+ * Shut DC down as much and as quickly as possible.
+ *
+ * Step 1: Take the link down to OFFLINE. This will cause the
+ * 8051 to put the Serdes in reset. However, we don't want to
+ * go through the entire link state machine since we want to
+ * shutdown ASAP. Furthermore, this is not a graceful shutdown
+ * but rather an attempt to save the chip.
+ * Code below is almost the same as quiet_serdes() but avoids
+ * all the extra work and the sleeps.
+ */
+ ppd->driver_link_ready = 0;
+ ppd->link_enabled = 0;
+ set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
+ PLS_OFFLINE);
+ /*
+ * Step 2: Shutdown LCB and 8051
+ * After shutdown, do not restore DC_CFG_RESET value.
+ */
+ dc_shutdown(dd);
+}
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
new file mode 100644
index 000000000000..6992f6d40255
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -0,0 +1,1428 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ */
+
+#ifndef _CHIP_H
+#define _CHIP_H
+/*
+ * This file contains all of the defines that is specific to the HFI chip
+ */
+
+/* sizes */
+#define BITS_PER_REGISTER (BITS_PER_BYTE * sizeof(u64))
+#define NUM_INTERRUPT_SOURCES 768
+#define RXE_NUM_CONTEXTS 160
+#define RXE_PER_CONTEXT_SIZE 0x1000 /* 4k */
+#define RXE_NUM_TID_FLOWS 32
+#define RXE_NUM_DATA_VL 8
+#define TXE_NUM_CONTEXTS 160
+#define TXE_NUM_SDMA_ENGINES 16
+#define NUM_CONTEXTS_PER_SET 8
+#define VL_ARB_HIGH_PRIO_TABLE_SIZE 16
+#define VL_ARB_LOW_PRIO_TABLE_SIZE 16
+#define VL_ARB_TABLE_SIZE 16
+#define TXE_NUM_32_BIT_COUNTER 7
+#define TXE_NUM_64_BIT_COUNTER 30
+#define TXE_NUM_DATA_VL 8
+#define TXE_PIO_SIZE (32 * 0x100000) /* 32 MB */
+#define PIO_BLOCK_SIZE 64 /* bytes */
+#define SDMA_BLOCK_SIZE 64 /* bytes */
+#define RCV_BUF_BLOCK_SIZE 64 /* bytes */
+#define PIO_CMASK 0x7ff /* counter mask for free and fill counters */
+#define MAX_EAGER_ENTRIES 2048 /* max receive eager entries */
+#define MAX_TID_PAIR_ENTRIES 1024 /* max receive expected pairs */
+/*
+ * Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed
+ * at 64 bytes for all generation one devices
+ */
+#define CM_VAU 3
+/* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */
+#define CM_GLOBAL_CREDITS 0x880
+/* Number of PKey entries in the HW */
+#define MAX_PKEY_VALUES 16
+
+#include "chip_registers.h"
+
+#define RXE_PER_CONTEXT_USER (RXE + RXE_PER_CONTEXT_OFFSET)
+#define TXE_PIO_SEND (TXE + TXE_PIO_SEND_OFFSET)
+
+/* PBC flags */
+#define PBC_INTR BIT_ULL(31)
+#define PBC_DC_INFO_SHIFT (30)
+#define PBC_DC_INFO BIT_ULL(PBC_DC_INFO_SHIFT)
+#define PBC_TEST_EBP BIT_ULL(29)
+#define PBC_PACKET_BYPASS BIT_ULL(28)
+#define PBC_CREDIT_RETURN BIT_ULL(25)
+#define PBC_INSERT_BYPASS_ICRC BIT_ULL(24)
+#define PBC_TEST_BAD_ICRC BIT_ULL(23)
+#define PBC_FECN BIT_ULL(22)
+
+/* PbcInsertHcrc field settings */
+#define PBC_IHCRC_LKDETH 0x0 /* insert @ local KDETH offset */
+#define PBC_IHCRC_GKDETH 0x1 /* insert @ global KDETH offset */
+#define PBC_IHCRC_NONE 0x2 /* no HCRC inserted */
+
+/* PBC fields */
+#define PBC_STATIC_RATE_CONTROL_COUNT_SHIFT 32
+#define PBC_STATIC_RATE_CONTROL_COUNT_MASK 0xffffull
+#define PBC_STATIC_RATE_CONTROL_COUNT_SMASK \
+ (PBC_STATIC_RATE_CONTROL_COUNT_MASK << \
+ PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
+
+#define PBC_INSERT_HCRC_SHIFT 26
+#define PBC_INSERT_HCRC_MASK 0x3ull
+#define PBC_INSERT_HCRC_SMASK \
+ (PBC_INSERT_HCRC_MASK << PBC_INSERT_HCRC_SHIFT)
+
+#define PBC_VL_SHIFT 12
+#define PBC_VL_MASK 0xfull
+#define PBC_VL_SMASK (PBC_VL_MASK << PBC_VL_SHIFT)
+
+#define PBC_LENGTH_DWS_SHIFT 0
+#define PBC_LENGTH_DWS_MASK 0xfffull
+#define PBC_LENGTH_DWS_SMASK \
+ (PBC_LENGTH_DWS_MASK << PBC_LENGTH_DWS_SHIFT)
+
+/* Credit Return Fields */
+#define CR_COUNTER_SHIFT 0
+#define CR_COUNTER_MASK 0x7ffull
+#define CR_COUNTER_SMASK (CR_COUNTER_MASK << CR_COUNTER_SHIFT)
+
+#define CR_STATUS_SHIFT 11
+#define CR_STATUS_MASK 0x1ull
+#define CR_STATUS_SMASK (CR_STATUS_MASK << CR_STATUS_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_PBC_SHIFT 12
+#define CR_CREDIT_RETURN_DUE_TO_PBC_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_PBC_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_PBC_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_PBC_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SHIFT 13
+#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_THRESHOLD_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_ERR_SHIFT 14
+#define CR_CREDIT_RETURN_DUE_TO_ERR_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_ERR_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_ERR_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_ERR_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT 15
+#define CR_CREDIT_RETURN_DUE_TO_FORCE_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_FORCE_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT)
+
+/* Specific IRQ sources */
+#define CCE_ERR_INT 0
+#define RXE_ERR_INT 1
+#define MISC_ERR_INT 2
+#define PIO_ERR_INT 4
+#define SDMA_ERR_INT 5
+#define EGRESS_ERR_INT 6
+#define TXE_ERR_INT 7
+#define PBC_INT 240
+#define GPIO_ASSERT_INT 241
+#define QSFP1_INT 242
+#define QSFP2_INT 243
+#define TCRIT_INT 244
+
+/* interrupt source ranges */
+#define IS_FIRST_SOURCE CCE_ERR_INT
+#define IS_GENERAL_ERR_START 0
+#define IS_SDMAENG_ERR_START 16
+#define IS_SENDCTXT_ERR_START 32
+#define IS_SDMA_START 192
+#define IS_SDMA_PROGRESS_START 208
+#define IS_SDMA_IDLE_START 224
+#define IS_VARIOUS_START 240
+#define IS_DC_START 248
+#define IS_RCVAVAIL_START 256
+#define IS_RCVURGENT_START 416
+#define IS_SENDCREDIT_START 576
+#define IS_RESERVED_START 736
+#define IS_LAST_SOURCE 767
+
+/* derived interrupt source values */
+#define IS_GENERAL_ERR_END 7
+#define IS_SDMAENG_ERR_END 31
+#define IS_SENDCTXT_ERR_END 191
+#define IS_SDMA_END 207
+#define IS_SDMA_PROGRESS_END 223
+#define IS_SDMA_IDLE_END 239
+#define IS_VARIOUS_END 244
+#define IS_DC_END 255
+#define IS_RCVAVAIL_END 415
+#define IS_RCVURGENT_END 575
+#define IS_SENDCREDIT_END 735
+#define IS_RESERVED_END IS_LAST_SOURCE
+
+/* DCC_CFG_PORT_CONFIG logical link states */
+#define LSTATE_DOWN 0x1
+#define LSTATE_INIT 0x2
+#define LSTATE_ARMED 0x3
+#define LSTATE_ACTIVE 0x4
+
+/* DCC_CFG_RESET reset states */
+#define LCB_RX_FPE_TX_FPE_INTO_RESET (DCC_CFG_RESET_RESET_LCB | \
+ DCC_CFG_RESET_RESET_TX_FPE | \
+ DCC_CFG_RESET_RESET_RX_FPE | \
+ DCC_CFG_RESET_ENABLE_CCLK_BCC)
+ /* 0x17 */
+
+#define LCB_RX_FPE_TX_FPE_OUT_OF_RESET DCC_CFG_RESET_ENABLE_CCLK_BCC /* 0x10 */
+
+/* DC8051_STS_CUR_STATE port values (physical link states) */
+#define PLS_DISABLED 0x30
+#define PLS_OFFLINE 0x90
+#define PLS_OFFLINE_QUIET 0x90
+#define PLS_OFFLINE_PLANNED_DOWN_INFORM 0x91
+#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
+#define PLS_OFFLINE_REPORT_FAILURE 0x93
+#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
+#define PLS_OFFLINE_QUIET_DURATION 0x95
+#define PLS_POLLING 0x20
+#define PLS_POLLING_QUIET 0x20
+#define PLS_POLLING_ACTIVE 0x21
+#define PLS_CONFIGPHY 0x40
+#define PLS_CONFIGPHY_DEBOUCE 0x40
+#define PLS_CONFIGPHY_ESTCOMM 0x41
+#define PLS_CONFIGPHY_ESTCOMM_TXRX_HUNT 0x42
+#define PLS_CONFIGPHY_ESTCOMM_LOCAL_COMPLETE 0x43
+#define PLS_CONFIGPHY_OPTEQ 0x44
+#define PLS_CONFIGPHY_OPTEQ_OPTIMIZING 0x44
+#define PLS_CONFIGPHY_OPTEQ_LOCAL_COMPLETE 0x45
+#define PLS_CONFIGPHY_VERIFYCAP 0x46
+#define PLS_CONFIGPHY_VERIFYCAP_EXCHANGE 0x46
+#define PLS_CONFIGPHY_VERIFYCAP_LOCAL_COMPLETE 0x47
+#define PLS_CONFIGLT 0x48
+#define PLS_CONFIGLT_CONFIGURE 0x48
+#define PLS_CONFIGLT_LINK_TRANSFER_ACTIVE 0x49
+#define PLS_LINKUP 0x50
+#define PLS_PHYTEST 0xB0
+#define PLS_INTERNAL_SERDES_LOOPBACK 0xe1
+#define PLS_QUICK_LINKUP 0xe2
+
+/* DC_DC8051_CFG_HOST_CMD_0.REQ_TYPE - 8051 host commands */
+#define HCMD_LOAD_CONFIG_DATA 0x01
+#define HCMD_READ_CONFIG_DATA 0x02
+#define HCMD_CHANGE_PHY_STATE 0x03
+#define HCMD_SEND_LCB_IDLE_MSG 0x04
+#define HCMD_MISC 0x05
+#define HCMD_READ_LCB_IDLE_MSG 0x06
+#define HCMD_READ_LCB_CSR 0x07
+#define HCMD_WRITE_LCB_CSR 0x08
+#define HCMD_INTERFACE_TEST 0xff
+
+/* DC_DC8051_CFG_HOST_CMD_1.RETURN_CODE - 8051 host command return */
+#define HCMD_SUCCESS 2
+
+/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR - error flags */
+#define SPICO_ROM_FAILED BIT(0)
+#define UNKNOWN_FRAME BIT(1)
+#define TARGET_BER_NOT_MET BIT(2)
+#define FAILED_SERDES_INTERNAL_LOOPBACK BIT(3)
+#define FAILED_SERDES_INIT BIT(4)
+#define FAILED_LNI_POLLING BIT(5)
+#define FAILED_LNI_DEBOUNCE BIT(6)
+#define FAILED_LNI_ESTBCOMM BIT(7)
+#define FAILED_LNI_OPTEQ BIT(8)
+#define FAILED_LNI_VERIFY_CAP1 BIT(9)
+#define FAILED_LNI_VERIFY_CAP2 BIT(10)
+#define FAILED_LNI_CONFIGLT BIT(11)
+#define HOST_HANDSHAKE_TIMEOUT BIT(12)
+#define EXTERNAL_DEVICE_REQ_TIMEOUT BIT(13)
+
+#define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \
+ | FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \
+ | FAILED_LNI_VERIFY_CAP1 \
+ | FAILED_LNI_VERIFY_CAP2 \
+ | FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT \
+ | EXTERNAL_DEVICE_REQ_TIMEOUT)
+
+/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */
+#define HOST_REQ_DONE BIT(0)
+#define BC_PWR_MGM_MSG BIT(1)
+#define BC_SMA_MSG BIT(2)
+#define BC_BCC_UNKNOWN_MSG BIT(3)
+#define BC_IDLE_UNKNOWN_MSG BIT(4)
+#define EXT_DEVICE_CFG_REQ BIT(5)
+#define VERIFY_CAP_FRAME BIT(6)
+#define LINKUP_ACHIEVED BIT(7)
+#define LINK_GOING_DOWN BIT(8)
+#define LINK_WIDTH_DOWNGRADED BIT(9)
+
+/* DC_DC8051_CFG_EXT_DEV_1.REQ_TYPE - 8051 host requests */
+#define HREQ_LOAD_CONFIG 0x01
+#define HREQ_SAVE_CONFIG 0x02
+#define HREQ_READ_CONFIG 0x03
+#define HREQ_SET_TX_EQ_ABS 0x04
+#define HREQ_SET_TX_EQ_REL 0x05
+#define HREQ_ENABLE 0x06
+#define HREQ_LCB_RESET 0x07
+#define HREQ_CONFIG_DONE 0xfe
+#define HREQ_INTERFACE_TEST 0xff
+
+/* DC_DC8051_CFG_EXT_DEV_0.RETURN_CODE - 8051 host request return codes */
+#define HREQ_INVALID 0x01
+#define HREQ_SUCCESS 0x02
+#define HREQ_NOT_SUPPORTED 0x03
+#define HREQ_FEATURE_NOT_SUPPORTED 0x04 /* request specific feature */
+#define HREQ_REQUEST_REJECTED 0xfe
+#define HREQ_EXECUTION_ONGOING 0xff
+
+/* MISC host command functions */
+#define HCMD_MISC_REQUEST_LCB_ACCESS 0x1
+#define HCMD_MISC_GRANT_LCB_ACCESS 0x2
+
+/* idle flit message types */
+#define IDLE_PHYSICAL_LINK_MGMT 0x1
+#define IDLE_CRU 0x2
+#define IDLE_SMA 0x3
+#define IDLE_POWER_MGMT 0x4
+
+/* idle flit message send fields (both send and read) */
+#define IDLE_PAYLOAD_MASK 0xffffffffffull /* 40 bits */
+#define IDLE_PAYLOAD_SHIFT 8
+#define IDLE_MSG_TYPE_MASK 0xf
+#define IDLE_MSG_TYPE_SHIFT 0
+
+/* idle flit message read fields */
+#define READ_IDLE_MSG_TYPE_MASK 0xf
+#define READ_IDLE_MSG_TYPE_SHIFT 0
+
+/* SMA idle flit payload commands */
+#define SMA_IDLE_ARM 1
+#define SMA_IDLE_ACTIVE 2
+
+/* DC_DC8051_CFG_MODE.GENERAL bits */
+#define DISABLE_SELF_GUID_CHECK 0x2
+
+/* Bad L2 frame error code */
+#define BAD_L2_ERR 0x6
+
+/*
+ * Eager buffer minimum and maximum sizes supported by the hardware.
+ * All power-of-two sizes in between are supported as well.
+ * MAX_EAGER_BUFFER_TOTAL is the maximum size of memory
+ * allocatable for Eager buffer to a single context. All others
+ * are limits for the RcvArray entries.
+ */
+#define MIN_EAGER_BUFFER (4 * 1024)
+#define MAX_EAGER_BUFFER (256 * 1024)
+#define MAX_EAGER_BUFFER_TOTAL (64 * (1 << 20)) /* max per ctxt 64MB */
+#define MAX_EXPECTED_BUFFER (2048 * 1024)
+#define HFI1_MIN_HDRQ_EGRBUF_CNT 32
+#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
+
+/*
+ * Receive expected base and count and eager base and count increment -
+ * the CSR fields hold multiples of this value.
+ */
+#define RCV_SHIFT 3
+#define RCV_INCREMENT BIT(RCV_SHIFT)
+
+/*
+ * Receive header queue entry increment - the CSR holds multiples of
+ * this value.
+ */
+#define HDRQ_SIZE_SHIFT 5
+#define HDRQ_INCREMENT BIT(HDRQ_SIZE_SHIFT)
+
+/*
+ * Freeze handling flags
+ */
+#define FREEZE_ABORT 0x01 /* do not do recovery */
+#define FREEZE_SELF 0x02 /* initiate the freeze */
+#define FREEZE_LINK_DOWN 0x04 /* link is down */
+
+/*
+ * Chip implementation codes.
+ */
+#define ICODE_RTL_SILICON 0x00
+#define ICODE_RTL_VCS_SIMULATION 0x01
+#define ICODE_FPGA_EMULATION 0x02
+#define ICODE_FUNCTIONAL_SIMULATOR 0x03
+
+/*
+ * 8051 data memory size.
+ */
+#define DC8051_DATA_MEM_SIZE 0x1000
+
+/*
+ * 8051 firmware registers
+ */
+#define NUM_GENERAL_FIELDS 0x17
+#define NUM_LANE_FIELDS 0x8
+
+/* 8051 general register Field IDs */
+#define LINK_OPTIMIZATION_SETTINGS 0x00
+#define LINK_TUNING_PARAMETERS 0x02
+#define DC_HOST_COMM_SETTINGS 0x03
+#define TX_SETTINGS 0x06
+#define VERIFY_CAP_LOCAL_PHY 0x07
+#define VERIFY_CAP_LOCAL_FABRIC 0x08
+#define VERIFY_CAP_LOCAL_LINK_MODE 0x09
+#define LOCAL_DEVICE_ID 0x0a
+#define RESERVED_REGISTERS 0x0b
+#define LOCAL_LNI_INFO 0x0c
+#define REMOTE_LNI_INFO 0x0d
+#define MISC_STATUS 0x0e
+#define VERIFY_CAP_REMOTE_PHY 0x0f
+#define VERIFY_CAP_REMOTE_FABRIC 0x10
+#define VERIFY_CAP_REMOTE_LINK_WIDTH 0x11
+#define LAST_LOCAL_STATE_COMPLETE 0x12
+#define LAST_REMOTE_STATE_COMPLETE 0x13
+#define LINK_QUALITY_INFO 0x14
+#define REMOTE_DEVICE_ID 0x15
+#define LINK_DOWN_REASON 0x16 /* first byte of offset 0x16 */
+#define VERSION_PATCH 0x16 /* last byte of offset 0x16 */
+
+/* 8051 lane specific register field IDs */
+#define TX_EQ_SETTINGS 0x00
+#define CHANNEL_LOSS_SETTINGS 0x05
+
+/* Lane ID for general configuration registers */
+#define GENERAL_CONFIG 4
+
+/* LINK_TUNING_PARAMETERS fields */
+#define TUNING_METHOD_SHIFT 24
+
+/* LINK_OPTIMIZATION_SETTINGS fields */
+#define ENABLE_EXT_DEV_CONFIG_SHIFT 24
+
+/* LOAD_DATA 8051 command shifts and fields */
+#define LOAD_DATA_FIELD_ID_SHIFT 40
+#define LOAD_DATA_FIELD_ID_MASK 0xfull
+#define LOAD_DATA_LANE_ID_SHIFT 32
+#define LOAD_DATA_LANE_ID_MASK 0xfull
+#define LOAD_DATA_DATA_SHIFT 0x0
+#define LOAD_DATA_DATA_MASK 0xffffffffull
+
+/* READ_DATA 8051 command shifts and fields */
+#define READ_DATA_FIELD_ID_SHIFT 40
+#define READ_DATA_FIELD_ID_MASK 0xffull
+#define READ_DATA_LANE_ID_SHIFT 32
+#define READ_DATA_LANE_ID_MASK 0xffull
+#define READ_DATA_DATA_SHIFT 0x0
+#define READ_DATA_DATA_MASK 0xffffffffull
+
+/* TX settings fields */
+#define ENABLE_LANE_TX_SHIFT 0
+#define ENABLE_LANE_TX_MASK 0xff
+#define TX_POLARITY_INVERSION_SHIFT 8
+#define TX_POLARITY_INVERSION_MASK 0xff
+#define RX_POLARITY_INVERSION_SHIFT 16
+#define RX_POLARITY_INVERSION_MASK 0xff
+#define MAX_RATE_SHIFT 24
+#define MAX_RATE_MASK 0xff
+
+/* verify capability PHY fields */
+#define CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT 0x4
+#define CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK 0x1
+#define POWER_MANAGEMENT_SHIFT 0x0
+#define POWER_MANAGEMENT_MASK 0xf
+
+/* 8051 lane register Field IDs */
+#define SPICO_FW_VERSION 0x7 /* SPICO firmware version */
+
+/* SPICO firmware version fields */
+#define SPICO_ROM_VERSION_SHIFT 0
+#define SPICO_ROM_VERSION_MASK 0xffff
+#define SPICO_ROM_PROD_ID_SHIFT 16
+#define SPICO_ROM_PROD_ID_MASK 0xffff
+
+/* verify capability fabric fields */
+#define VAU_SHIFT 0
+#define VAU_MASK 0x0007
+#define Z_SHIFT 3
+#define Z_MASK 0x0001
+#define VCU_SHIFT 4
+#define VCU_MASK 0x0007
+#define VL15BUF_SHIFT 8
+#define VL15BUF_MASK 0x0fff
+#define CRC_SIZES_SHIFT 20
+#define CRC_SIZES_MASK 0x7
+
+/* verify capability local link width fields */
+#define LINK_WIDTH_SHIFT 0 /* also for remote link width */
+#define LINK_WIDTH_MASK 0xffff /* also for remote link width */
+#define LOCAL_FLAG_BITS_SHIFT 16
+#define LOCAL_FLAG_BITS_MASK 0xff
+#define MISC_CONFIG_BITS_SHIFT 24
+#define MISC_CONFIG_BITS_MASK 0xff
+
+/* verify capability remote link width fields */
+#define REMOTE_TX_RATE_SHIFT 16
+#define REMOTE_TX_RATE_MASK 0xff
+
+/* LOCAL_DEVICE_ID fields */
+#define LOCAL_DEVICE_REV_SHIFT 0
+#define LOCAL_DEVICE_REV_MASK 0xff
+#define LOCAL_DEVICE_ID_SHIFT 8
+#define LOCAL_DEVICE_ID_MASK 0xffff
+
+/* REMOTE_DEVICE_ID fields */
+#define REMOTE_DEVICE_REV_SHIFT 0
+#define REMOTE_DEVICE_REV_MASK 0xff
+#define REMOTE_DEVICE_ID_SHIFT 8
+#define REMOTE_DEVICE_ID_MASK 0xffff
+
+/* local LNI link width fields */
+#define ENABLE_LANE_RX_SHIFT 16
+#define ENABLE_LANE_RX_MASK 0xff
+
+/* mask, shift for reading 'mgmt_enabled' value from REMOTE_LNI_INFO field */
+#define MGMT_ALLOWED_SHIFT 23
+#define MGMT_ALLOWED_MASK 0x1
+
+/* mask, shift for 'link_quality' within LINK_QUALITY_INFO field */
+#define LINK_QUALITY_SHIFT 24
+#define LINK_QUALITY_MASK 0x7
+
+/*
+ * mask, shift for reading 'planned_down_remote_reason_code'
+ * from LINK_QUALITY_INFO field
+ */
+#define DOWN_REMOTE_REASON_SHIFT 16
+#define DOWN_REMOTE_REASON_MASK 0xff
+
+#define HOST_INTERFACE_VERSION 1
+#define HOST_INTERFACE_VERSION_SHIFT 16
+#define HOST_INTERFACE_VERSION_MASK 0xff
+
+/* verify capability PHY power management bits */
+#define PWRM_BER_CONTROL 0x1
+#define PWRM_BANDWIDTH_CONTROL 0x2
+
+/* 8051 link down reasons */
+#define LDR_LINK_TRANSFER_ACTIVE_LOW 0xa
+#define LDR_RECEIVED_LINKDOWN_IDLE_MSG 0xb
+#define LDR_RECEIVED_HOST_OFFLINE_REQ 0xc
+
+/* verify capability fabric CRC size bits */
+enum {
+ CAP_CRC_14B = (1 << 0), /* 14b CRC */
+ CAP_CRC_48B = (1 << 1), /* 48b CRC */
+ CAP_CRC_12B_16B_PER_LANE = (1 << 2) /* 12b-16b per lane CRC */
+};
+
+#define SUPPORTED_CRCS (CAP_CRC_14B | CAP_CRC_48B)
+
+/* misc status version fields */
+#define STS_FM_VERSION_MINOR_SHIFT 16
+#define STS_FM_VERSION_MINOR_MASK 0xff
+#define STS_FM_VERSION_MAJOR_SHIFT 24
+#define STS_FM_VERSION_MAJOR_MASK 0xff
+#define STS_FM_VERSION_PATCH_SHIFT 24
+#define STS_FM_VERSION_PATCH_MASK 0xff
+
+/* LCB_CFG_CRC_MODE TX_VAL and RX_VAL CRC mode values */
+#define LCB_CRC_16B 0x0 /* 16b CRC */
+#define LCB_CRC_14B 0x1 /* 14b CRC */
+#define LCB_CRC_48B 0x2 /* 48b CRC */
+#define LCB_CRC_12B_16B_PER_LANE 0x3 /* 12b-16b per lane CRC */
+
+/*
+ * the following enum is (almost) a copy/paste of the definition
+ * in the OPA spec, section 20.2.2.6.8 (PortInfo)
+ */
+enum {
+ PORT_LTP_CRC_MODE_NONE = 0,
+ PORT_LTP_CRC_MODE_14 = 1, /* 14-bit LTP CRC mode (optional) */
+ PORT_LTP_CRC_MODE_16 = 2, /* 16-bit LTP CRC mode */
+ PORT_LTP_CRC_MODE_48 = 4,
+ /* 48-bit overlapping LTP CRC mode (optional) */
+ PORT_LTP_CRC_MODE_PER_LANE = 8
+ /* 12 to 16 bit per lane LTP CRC mode (optional) */
+};
+
+/* timeouts */
+#define LINK_RESTART_DELAY 1000 /* link restart delay, in ms */
+#define TIMEOUT_8051_START 5000 /* 8051 start timeout, in ms */
+#define DC8051_COMMAND_TIMEOUT 1000 /* DC8051 command timeout, in ms */
+#define FREEZE_STATUS_TIMEOUT 20 /* wait for freeze indicators, in ms */
+#define VL_STATUS_CLEAR_TIMEOUT 5000 /* per-VL status clear, in ms */
+#define CCE_STATUS_TIMEOUT 10 /* time to clear CCE Status, in ms */
+
+/* cclock tick time, in picoseconds per tick: 1/speed * 10^12 */
+#define ASIC_CCLOCK_PS 1242 /* 805 MHz */
+#define FPGA_CCLOCK_PS 30300 /* 33 MHz */
+
+/*
+ * Mask of enabled MISC errors. Do not enable the two RSA engine errors -
+ * see firmware.c:run_rsa() for details.
+ */
+#define DRIVER_MISC_MASK \
+ (~(MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK \
+ | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK))
+
+/* valid values for the loopback module parameter */
+#define LOOPBACK_NONE 0 /* no loopback - default */
+#define LOOPBACK_SERDES 1
+#define LOOPBACK_LCB 2
+#define LOOPBACK_CABLE 3 /* external cable */
+
+/* set up bits in MISC_CONFIG_BITS */
+#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
+#define EXT_CFG_LCB_RESET_SUPPORTED_SHIFT 3
+
+/* read and write hardware registers */
+u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
+void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
+
+/*
+ * The *_kctxt_* flavor of the CSR read/write functions are for
+ * per-context or per-SDMA CSRs that are not mappable to user-space.
+ * Their spacing is not a PAGE_SIZE multiple.
+ */
+static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt,
+ u32 offset0)
+{
+ /* kernel per-context CSRs are separated by 0x100 */
+ return read_csr(dd, offset0 + (0x100 * ctxt));
+}
+
+static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt,
+ u32 offset0, u64 value)
+{
+ /* kernel per-context CSRs are separated by 0x100 */
+ write_csr(dd, offset0 + (0x100 * ctxt), value);
+}
+
+int read_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 *data);
+int write_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 data);
+
+void __iomem *get_csr_addr(
+ const struct hfi1_devdata *dd,
+ u32 offset);
+
+static inline void __iomem *get_kctxt_csr_addr(
+ const struct hfi1_devdata *dd,
+ int ctxt,
+ u32 offset0)
+{
+ return get_csr_addr(dd, offset0 + (0x100 * ctxt));
+}
+
+/*
+ * The *_uctxt_* flavor of the CSR read/write functions are for
+ * per-context CSRs that are mappable to user space. All these CSRs
+ * are spaced by a PAGE_SIZE multiple in order to be mappable to
+ * different processes without exposing other contexts' CSRs
+ */
+static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt,
+ u32 offset0)
+{
+ /* user per-context CSRs are separated by 0x1000 */
+ return read_csr(dd, offset0 + (0x1000 * ctxt));
+}
+
+static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
+ u32 offset0, u64 value)
+{
+ /* user per-context CSRs are separated by 0x1000 */
+ write_csr(dd, offset0 + (0x1000 * ctxt), value);
+}
+
+static inline u32 chip_rcv_contexts(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, RCV_CONTEXTS);
+}
+
+static inline u32 chip_send_contexts(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_CONTEXTS);
+}
+
+static inline u32 chip_sdma_engines(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_DMA_ENGINES);
+}
+
+static inline u32 chip_pio_mem_size(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_PIO_MEM_SIZE);
+}
+
+static inline u32 chip_sdma_mem_size(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_DMA_MEM_SIZE);
+}
+
+static inline u32 chip_rcv_array_count(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, RCV_ARRAY_CNT);
+}
+
+u8 encode_rcv_header_entry_size(u8 size);
+int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt);
+void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt);
+
+u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
+ u32 dw_len);
+
+/* firmware.c */
+#define SBUS_MASTER_BROADCAST 0xfd
+#define NUM_PCIE_SERDES 16 /* number of PCIe serdes on the SBus */
+extern const u8 pcie_serdes_broadcast[];
+extern const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES];
+
+/* SBus commands */
+#define RESET_SBUS_RECEIVER 0x20
+#define WRITE_SBUS_RECEIVER 0x21
+#define READ_SBUS_RECEIVER 0x22
+void sbus_request(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
+int sbus_request_slow(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
+void set_sbus_fast_mode(struct hfi1_devdata *dd);
+void clear_sbus_fast_mode(struct hfi1_devdata *dd);
+int hfi1_firmware_init(struct hfi1_devdata *dd);
+int load_pcie_firmware(struct hfi1_devdata *dd);
+int load_firmware(struct hfi1_devdata *dd);
+void dispose_firmware(void);
+int acquire_hw_mutex(struct hfi1_devdata *dd);
+void release_hw_mutex(struct hfi1_devdata *dd);
+
+/*
+ * Bitmask of dynamic access for ASIC block chip resources. Each HFI has its
+ * own range of bits for the resource so it can clear its own bits on
+ * starting and exiting. If either HFI has the resource bit set, the
+ * resource is in use. The separate bit ranges are:
+ * HFI0 bits 7:0
+ * HFI1 bits 15:8
+ */
+#define CR_SBUS 0x01 /* SBUS, THERM, and PCIE registers */
+#define CR_EPROM 0x02 /* EEP, GPIO registers */
+#define CR_I2C1 0x04 /* QSFP1_OE register */
+#define CR_I2C2 0x08 /* QSFP2_OE register */
+#define CR_DYN_SHIFT 8 /* dynamic flag shift */
+#define CR_DYN_MASK ((1ull << CR_DYN_SHIFT) - 1)
+
+/*
+ * Bitmask of static ASIC states these are outside of the dynamic ASIC
+ * block chip resources above. These are to be set once and never cleared.
+ * Must be holding the SBus dynamic flag when setting.
+ */
+#define CR_THERM_INIT 0x010000
+
+int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait);
+void release_chip_resource(struct hfi1_devdata *dd, u32 resource);
+bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
+ const char *func);
+void init_chip_resources(struct hfi1_devdata *dd);
+void finish_chip_resources(struct hfi1_devdata *dd);
+
+/* ms wait time for access to an SBus resoure */
+#define SBUS_TIMEOUT 4000 /* long enough for a FW download and SBR */
+
+/* ms wait time for a qsfp (i2c) chain to become available */
+#define QSFP_WAIT 20000 /* long enough for FW update to the F4 uc */
+
+void fabric_serdes_reset(struct hfi1_devdata *dd);
+int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result);
+
+/* chip.c */
+void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
+ u8 *ver_patch);
+int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
+void read_guid(struct hfi1_devdata *dd);
+int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
+void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
+ u8 neigh_reason, u8 rem_reason);
+int set_link_state(struct hfi1_pportdata *, u32 state);
+int port_ltp_to_cap(int port_ltp);
+void handle_verify_cap(struct work_struct *work);
+void handle_freeze(struct work_struct *work);
+void handle_link_up(struct work_struct *work);
+void handle_link_down(struct work_struct *work);
+void handle_link_downgrade(struct work_struct *work);
+void handle_link_bounce(struct work_struct *work);
+void handle_start_link(struct work_struct *work);
+void handle_sma_message(struct work_struct *work);
+int reset_qsfp(struct hfi1_pportdata *ppd);
+void qsfp_event(struct work_struct *work);
+void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
+int send_idle_sma(struct hfi1_devdata *dd, u64 message);
+int load_8051_config(struct hfi1_devdata *, u8, u8, u32);
+int read_8051_config(struct hfi1_devdata *, u8, u8, u32 *);
+int start_link(struct hfi1_pportdata *ppd);
+int bringup_serdes(struct hfi1_pportdata *ppd);
+void set_intr_state(struct hfi1_devdata *dd, u32 enable);
+bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
+ bool refresh_widths);
+void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
+ u32 intr_adjust, u32 npkts);
+int stop_drain_data_vls(struct hfi1_devdata *dd);
+int open_fill_data_vls(struct hfi1_devdata *dd);
+u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns);
+u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclock);
+void get_linkup_link_widths(struct hfi1_pportdata *ppd);
+void read_ltp_rtt(struct hfi1_devdata *dd);
+void clear_linkup_counters(struct hfi1_devdata *dd);
+u32 hdrqempty(struct hfi1_ctxtdata *rcd);
+int is_ax(struct hfi1_devdata *dd);
+int is_bx(struct hfi1_devdata *dd);
+bool is_urg_masked(struct hfi1_ctxtdata *rcd);
+u32 read_physical_state(struct hfi1_devdata *dd);
+u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate);
+const char *opa_pstate_name(u32 pstate);
+u32 driver_pstate(struct hfi1_pportdata *ppd);
+u32 driver_lstate(struct hfi1_pportdata *ppd);
+
+int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
+int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
+#define LCB_START DC_LCB_CSRS
+#define LCB_END DC_8051_CSRS /* next block is 8051 */
+extern uint num_vls;
+
+extern uint disable_integrity;
+u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl);
+u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data);
+u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl);
+u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data);
+u32 read_logical_state(struct hfi1_devdata *dd);
+void force_recv_intr(struct hfi1_ctxtdata *rcd);
+
+/* Per VL indexes */
+enum {
+ C_VL_0 = 0,
+ C_VL_1,
+ C_VL_2,
+ C_VL_3,
+ C_VL_4,
+ C_VL_5,
+ C_VL_6,
+ C_VL_7,
+ C_VL_15,
+ C_VL_COUNT
+};
+
+static inline int vl_from_idx(int idx)
+{
+ return (idx == C_VL_15 ? 15 : idx);
+}
+
+static inline int idx_from_vl(int vl)
+{
+ return (vl == 15 ? C_VL_15 : vl);
+}
+
+/* Per device counter indexes */
+enum {
+ C_RCV_OVF = 0,
+ C_RX_LEN_ERR,
+ C_RX_SHORT_ERR,
+ C_RX_ICRC_ERR,
+ C_RX_EBP,
+ C_RX_TID_FULL,
+ C_RX_TID_INVALID,
+ C_RX_TID_FLGMS,
+ C_RX_CTX_EGRS,
+ C_RCV_TID_FLSMS,
+ C_CCE_PCI_CR_ST,
+ C_CCE_PCI_TR_ST,
+ C_CCE_PIO_WR_ST,
+ C_CCE_ERR_INT,
+ C_CCE_SDMA_INT,
+ C_CCE_MISC_INT,
+ C_CCE_RCV_AV_INT,
+ C_CCE_RCV_URG_INT,
+ C_CCE_SEND_CR_INT,
+ C_DC_UNC_ERR,
+ C_DC_RCV_ERR,
+ C_DC_FM_CFG_ERR,
+ C_DC_RMT_PHY_ERR,
+ C_DC_DROPPED_PKT,
+ C_DC_MC_XMIT_PKTS,
+ C_DC_MC_RCV_PKTS,
+ C_DC_XMIT_CERR,
+ C_DC_RCV_CERR,
+ C_DC_RCV_FCC,
+ C_DC_XMIT_FCC,
+ C_DC_XMIT_FLITS,
+ C_DC_RCV_FLITS,
+ C_DC_XMIT_PKTS,
+ C_DC_RCV_PKTS,
+ C_DC_RX_FLIT_VL,
+ C_DC_RX_PKT_VL,
+ C_DC_RCV_FCN,
+ C_DC_RCV_FCN_VL,
+ C_DC_RCV_BCN,
+ C_DC_RCV_BCN_VL,
+ C_DC_RCV_BBL,
+ C_DC_RCV_BBL_VL,
+ C_DC_MARK_FECN,
+ C_DC_MARK_FECN_VL,
+ C_DC_TOTAL_CRC,
+ C_DC_CRC_LN0,
+ C_DC_CRC_LN1,
+ C_DC_CRC_LN2,
+ C_DC_CRC_LN3,
+ C_DC_CRC_MULT_LN,
+ C_DC_TX_REPLAY,
+ C_DC_RX_REPLAY,
+ C_DC_SEQ_CRC_CNT,
+ C_DC_ESC0_ONLY_CNT,
+ C_DC_ESC0_PLUS1_CNT,
+ C_DC_ESC0_PLUS2_CNT,
+ C_DC_REINIT_FROM_PEER_CNT,
+ C_DC_SBE_CNT,
+ C_DC_MISC_FLG_CNT,
+ C_DC_PRF_GOOD_LTP_CNT,
+ C_DC_PRF_ACCEPTED_LTP_CNT,
+ C_DC_PRF_RX_FLIT_CNT,
+ C_DC_PRF_TX_FLIT_CNT,
+ C_DC_PRF_CLK_CNTR,
+ C_DC_PG_DBG_FLIT_CRDTS_CNT,
+ C_DC_PG_STS_PAUSE_COMPLETE_CNT,
+ C_DC_PG_STS_TX_SBE_CNT,
+ C_DC_PG_STS_TX_MBE_CNT,
+ C_SW_CPU_INTR,
+ C_SW_CPU_RCV_LIM,
+ C_SW_CTX0_SEQ_DROP,
+ C_SW_VTX_WAIT,
+ C_SW_PIO_WAIT,
+ C_SW_PIO_DRAIN,
+ C_SW_KMEM_WAIT,
+ C_SW_TID_WAIT,
+ C_SW_SEND_SCHED,
+ C_SDMA_DESC_FETCHED_CNT,
+ C_SDMA_INT_CNT,
+ C_SDMA_ERR_CNT,
+ C_SDMA_IDLE_INT_CNT,
+ C_SDMA_PROGRESS_INT_CNT,
+/* MISC_ERR_STATUS */
+ C_MISC_PLL_LOCK_FAIL_ERR,
+ C_MISC_MBIST_FAIL_ERR,
+ C_MISC_INVALID_EEP_CMD_ERR,
+ C_MISC_EFUSE_DONE_PARITY_ERR,
+ C_MISC_EFUSE_WRITE_ERR,
+ C_MISC_EFUSE_READ_BAD_ADDR_ERR,
+ C_MISC_EFUSE_CSR_PARITY_ERR,
+ C_MISC_FW_AUTH_FAILED_ERR,
+ C_MISC_KEY_MISMATCH_ERR,
+ C_MISC_SBUS_WRITE_FAILED_ERR,
+ C_MISC_CSR_WRITE_BAD_ADDR_ERR,
+ C_MISC_CSR_READ_BAD_ADDR_ERR,
+ C_MISC_CSR_PARITY_ERR,
+/* CceErrStatus */
+ /*
+ * A special counter that is the aggregate count
+ * of all the cce_err_status errors. The remainder
+ * are actual bits in the CceErrStatus register.
+ */
+ C_CCE_ERR_STATUS_AGGREGATED_CNT,
+ C_CCE_MSIX_CSR_PARITY_ERR,
+ C_CCE_INT_MAP_UNC_ERR,
+ C_CCE_INT_MAP_COR_ERR,
+ C_CCE_MSIX_TABLE_UNC_ERR,
+ C_CCE_MSIX_TABLE_COR_ERR,
+ C_CCE_RXDMA_CONV_FIFO_PARITY_ERR,
+ C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR,
+ C_CCE_SEG_WRITE_BAD_ADDR_ERR,
+ C_CCE_SEG_READ_BAD_ADDR_ERR,
+ C_LA_TRIGGERED,
+ C_CCE_TRGT_CPL_TIMEOUT_ERR,
+ C_PCIC_RECEIVE_PARITY_ERR,
+ C_PCIC_TRANSMIT_BACK_PARITY_ERR,
+ C_PCIC_TRANSMIT_FRONT_PARITY_ERR,
+ C_PCIC_CPL_DAT_Q_UNC_ERR,
+ C_PCIC_CPL_HD_Q_UNC_ERR,
+ C_PCIC_POST_DAT_Q_UNC_ERR,
+ C_PCIC_POST_HD_Q_UNC_ERR,
+ C_PCIC_RETRY_SOT_MEM_UNC_ERR,
+ C_PCIC_RETRY_MEM_UNC_ERR,
+ C_PCIC_N_POST_DAT_Q_PARITY_ERR,
+ C_PCIC_N_POST_H_Q_PARITY_ERR,
+ C_PCIC_CPL_DAT_Q_COR_ERR,
+ C_PCIC_CPL_HD_Q_COR_ERR,
+ C_PCIC_POST_DAT_Q_COR_ERR,
+ C_PCIC_POST_HD_Q_COR_ERR,
+ C_PCIC_RETRY_SOT_MEM_COR_ERR,
+ C_PCIC_RETRY_MEM_COR_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR,
+ C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR,
+ C_CCE_CSR_CFG_BUS_PARITY_ERR,
+ C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR,
+ C_CCE_RSPD_DATA_PARITY_ERR,
+ C_CCE_TRGT_ACCESS_ERR,
+ C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR,
+ C_CCE_CSR_WRITE_BAD_ADDR_ERR,
+ C_CCE_CSR_READ_BAD_ADDR_ERR,
+ C_CCE_CSR_PARITY_ERR,
+/* RcvErrStatus */
+ C_RX_CSR_PARITY_ERR,
+ C_RX_CSR_WRITE_BAD_ADDR_ERR,
+ C_RX_CSR_READ_BAD_ADDR_ERR,
+ C_RX_DMA_CSR_UNC_ERR,
+ C_RX_DMA_DQ_FSM_ENCODING_ERR,
+ C_RX_DMA_EQ_FSM_ENCODING_ERR,
+ C_RX_DMA_CSR_PARITY_ERR,
+ C_RX_RBUF_DATA_COR_ERR,
+ C_RX_RBUF_DATA_UNC_ERR,
+ C_RX_DMA_DATA_FIFO_RD_COR_ERR,
+ C_RX_DMA_DATA_FIFO_RD_UNC_ERR,
+ C_RX_DMA_HDR_FIFO_RD_COR_ERR,
+ C_RX_DMA_HDR_FIFO_RD_UNC_ERR,
+ C_RX_RBUF_DESC_PART2_COR_ERR,
+ C_RX_RBUF_DESC_PART2_UNC_ERR,
+ C_RX_RBUF_DESC_PART1_COR_ERR,
+ C_RX_RBUF_DESC_PART1_UNC_ERR,
+ C_RX_HQ_INTR_FSM_ERR,
+ C_RX_HQ_INTR_CSR_PARITY_ERR,
+ C_RX_LOOKUP_CSR_PARITY_ERR,
+ C_RX_LOOKUP_RCV_ARRAY_COR_ERR,
+ C_RX_LOOKUP_RCV_ARRAY_UNC_ERR,
+ C_RX_LOOKUP_DES_PART2_PARITY_ERR,
+ C_RX_LOOKUP_DES_PART1_UNC_COR_ERR,
+ C_RX_LOOKUP_DES_PART1_UNC_ERR,
+ C_RX_RBUF_NEXT_FREE_BUF_COR_ERR,
+ C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR,
+ C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR,
+ C_RX_RBUF_FL_INITDONE_PARITY_ERR,
+ C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR,
+ C_RX_RBUF_FL_RD_ADDR_PARITY_ERR,
+ C_RX_RBUF_EMPTY_ERR,
+ C_RX_RBUF_FULL_ERR,
+ C_RX_RBUF_BAD_LOOKUP_ERR,
+ C_RX_RBUF_CTX_ID_PARITY_ERR,
+ C_RX_RBUF_CSR_QEOPDW_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR,
+ C_RX_RBUF_BLOCK_LIST_READ_COR_ERR,
+ C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR,
+ C_RX_RBUF_LOOKUP_DES_COR_ERR,
+ C_RX_RBUF_LOOKUP_DES_UNC_ERR,
+ C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR,
+ C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR,
+ C_RX_RBUF_FREE_LIST_COR_ERR,
+ C_RX_RBUF_FREE_LIST_UNC_ERR,
+ C_RX_RCV_FSM_ENCODING_ERR,
+ C_RX_DMA_FLAG_COR_ERR,
+ C_RX_DMA_FLAG_UNC_ERR,
+ C_RX_DC_SOP_EOP_PARITY_ERR,
+ C_RX_RCV_CSR_PARITY_ERR,
+ C_RX_RCV_QP_MAP_TABLE_COR_ERR,
+ C_RX_RCV_QP_MAP_TABLE_UNC_ERR,
+ C_RX_RCV_DATA_COR_ERR,
+ C_RX_RCV_DATA_UNC_ERR,
+ C_RX_RCV_HDR_COR_ERR,
+ C_RX_RCV_HDR_UNC_ERR,
+ C_RX_DC_INTF_PARITY_ERR,
+ C_RX_DMA_CSR_COR_ERR,
+/* SendPioErrStatus */
+ C_PIO_PEC_SOP_HEAD_PARITY_ERR,
+ C_PIO_PCC_SOP_HEAD_PARITY_ERR,
+ C_PIO_LAST_RETURNED_CNT_PARITY_ERR,
+ C_PIO_CURRENT_FREE_CNT_PARITY_ERR,
+ C_PIO_RSVD_31_ERR,
+ C_PIO_RSVD_30_ERR,
+ C_PIO_PPMC_SOP_LEN_ERR,
+ C_PIO_PPMC_BQC_MEM_PARITY_ERR,
+ C_PIO_VL_FIFO_PARITY_ERR,
+ C_PIO_VLF_SOP_PARITY_ERR,
+ C_PIO_VLF_V1_LEN_PARITY_ERR,
+ C_PIO_BLOCK_QW_COUNT_PARITY_ERR,
+ C_PIO_WRITE_QW_VALID_PARITY_ERR,
+ C_PIO_STATE_MACHINE_ERR,
+ C_PIO_WRITE_DATA_PARITY_ERR,
+ C_PIO_HOST_ADDR_MEM_COR_ERR,
+ C_PIO_HOST_ADDR_MEM_UNC_ERR,
+ C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR,
+ C_PIO_INIT_SM_IN_ERR,
+ C_PIO_PPMC_PBL_FIFO_ERR,
+ C_PIO_CREDIT_RET_FIFO_PARITY_ERR,
+ C_PIO_V1_LEN_MEM_BANK1_COR_ERR,
+ C_PIO_V1_LEN_MEM_BANK0_COR_ERR,
+ C_PIO_V1_LEN_MEM_BANK1_UNC_ERR,
+ C_PIO_V1_LEN_MEM_BANK0_UNC_ERR,
+ C_PIO_SM_PKT_RESET_PARITY_ERR,
+ C_PIO_PKT_EVICT_FIFO_PARITY_ERR,
+ C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR,
+ C_PIO_SBRDCTL_CRREL_PARITY_ERR,
+ C_PIO_PEC_FIFO_PARITY_ERR,
+ C_PIO_PCC_FIFO_PARITY_ERR,
+ C_PIO_SB_MEM_FIFO1_ERR,
+ C_PIO_SB_MEM_FIFO0_ERR,
+ C_PIO_CSR_PARITY_ERR,
+ C_PIO_WRITE_ADDR_PARITY_ERR,
+ C_PIO_WRITE_BAD_CTXT_ERR,
+/* SendDmaErrStatus */
+ C_SDMA_PCIE_REQ_TRACKING_COR_ERR,
+ C_SDMA_PCIE_REQ_TRACKING_UNC_ERR,
+ C_SDMA_CSR_PARITY_ERR,
+ C_SDMA_RPY_TAG_ERR,
+/* SendEgressErrStatus */
+ C_TX_READ_PIO_MEMORY_CSR_UNC_ERR,
+ C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR,
+ C_TX_EGRESS_FIFO_COR_ERR,
+ C_TX_READ_PIO_MEMORY_COR_ERR,
+ C_TX_READ_SDMA_MEMORY_COR_ERR,
+ C_TX_SB_HDR_COR_ERR,
+ C_TX_CREDIT_OVERRUN_ERR,
+ C_TX_LAUNCH_FIFO8_COR_ERR,
+ C_TX_LAUNCH_FIFO7_COR_ERR,
+ C_TX_LAUNCH_FIFO6_COR_ERR,
+ C_TX_LAUNCH_FIFO5_COR_ERR,
+ C_TX_LAUNCH_FIFO4_COR_ERR,
+ C_TX_LAUNCH_FIFO3_COR_ERR,
+ C_TX_LAUNCH_FIFO2_COR_ERR,
+ C_TX_LAUNCH_FIFO1_COR_ERR,
+ C_TX_LAUNCH_FIFO0_COR_ERR,
+ C_TX_CREDIT_RETURN_VL_ERR,
+ C_TX_HCRC_INSERTION_ERR,
+ C_TX_EGRESS_FIFI_UNC_ERR,
+ C_TX_READ_PIO_MEMORY_UNC_ERR,
+ C_TX_READ_SDMA_MEMORY_UNC_ERR,
+ C_TX_SB_HDR_UNC_ERR,
+ C_TX_CREDIT_RETURN_PARITY_ERR,
+ C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR,
+ C_TX_SDMA15_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA14_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA13_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA12_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA11_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA10_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA9_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA8_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA7_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA6_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA5_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA4_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA3_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA2_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA1_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA0_DISALLOWED_PACKET_ERR,
+ C_TX_CONFIG_PARITY_ERR,
+ C_TX_SBRD_CTL_CSR_PARITY_ERR,
+ C_TX_LAUNCH_CSR_PARITY_ERR,
+ C_TX_ILLEGAL_CL_ERR,
+ C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR,
+ C_TX_RESERVED_10,
+ C_TX_RESERVED_9,
+ C_TX_SDMA_LAUNCH_INTF_PARITY_ERR,
+ C_TX_PIO_LAUNCH_INTF_PARITY_ERR,
+ C_TX_RESERVED_6,
+ C_TX_INCORRECT_LINK_STATE_ERR,
+ C_TX_LINK_DOWN_ERR,
+ C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR,
+ C_TX_RESERVED_2,
+ C_TX_PKT_INTEGRITY_MEM_UNC_ERR,
+ C_TX_PKT_INTEGRITY_MEM_COR_ERR,
+/* SendErrStatus */
+ C_SEND_CSR_WRITE_BAD_ADDR_ERR,
+ C_SEND_CSR_READ_BAD_ADD_ERR,
+ C_SEND_CSR_PARITY_ERR,
+/* SendCtxtErrStatus */
+ C_PIO_WRITE_OUT_OF_BOUNDS_ERR,
+ C_PIO_WRITE_OVERFLOW_ERR,
+ C_PIO_WRITE_CROSSES_BOUNDARY_ERR,
+ C_PIO_DISALLOWED_PACKET_ERR,
+ C_PIO_INCONSISTENT_SOP_ERR,
+/*SendDmaEngErrStatus */
+ C_SDMA_HEADER_REQUEST_FIFO_COR_ERR,
+ C_SDMA_HEADER_STORAGE_COR_ERR,
+ C_SDMA_PACKET_TRACKING_COR_ERR,
+ C_SDMA_ASSEMBLY_COR_ERR,
+ C_SDMA_DESC_TABLE_COR_ERR,
+ C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR,
+ C_SDMA_HEADER_STORAGE_UNC_ERR,
+ C_SDMA_PACKET_TRACKING_UNC_ERR,
+ C_SDMA_ASSEMBLY_UNC_ERR,
+ C_SDMA_DESC_TABLE_UNC_ERR,
+ C_SDMA_TIMEOUT_ERR,
+ C_SDMA_HEADER_LENGTH_ERR,
+ C_SDMA_HEADER_ADDRESS_ERR,
+ C_SDMA_HEADER_SELECT_ERR,
+ C_SMDA_RESERVED_9,
+ C_SDMA_PACKET_DESC_OVERFLOW_ERR,
+ C_SDMA_LENGTH_MISMATCH_ERR,
+ C_SDMA_HALT_ERR,
+ C_SDMA_MEM_READ_ERR,
+ C_SDMA_FIRST_DESC_ERR,
+ C_SDMA_TAIL_OUT_OF_BOUNDS_ERR,
+ C_SDMA_TOO_LONG_ERR,
+ C_SDMA_GEN_MISMATCH_ERR,
+ C_SDMA_WRONG_DW_ERR,
+ DEV_CNTR_LAST /* Must be kept last */
+};
+
+/* Per port counter indexes */
+enum {
+ C_TX_UNSUP_VL = 0,
+ C_TX_INVAL_LEN,
+ C_TX_MM_LEN_ERR,
+ C_TX_UNDERRUN,
+ C_TX_FLOW_STALL,
+ C_TX_DROPPED,
+ C_TX_HDR_ERR,
+ C_TX_PKT,
+ C_TX_WORDS,
+ C_TX_WAIT,
+ C_TX_FLIT_VL,
+ C_TX_PKT_VL,
+ C_TX_WAIT_VL,
+ C_RX_PKT,
+ C_RX_WORDS,
+ C_SW_LINK_DOWN,
+ C_SW_LINK_UP,
+ C_SW_UNKNOWN_FRAME,
+ C_SW_XMIT_DSCD,
+ C_SW_XMIT_DSCD_VL,
+ C_SW_XMIT_CSTR_ERR,
+ C_SW_RCV_CSTR_ERR,
+ C_SW_IBP_LOOP_PKTS,
+ C_SW_IBP_RC_RESENDS,
+ C_SW_IBP_RNR_NAKS,
+ C_SW_IBP_OTHER_NAKS,
+ C_SW_IBP_RC_TIMEOUTS,
+ C_SW_IBP_PKT_DROPS,
+ C_SW_IBP_DMA_WAIT,
+ C_SW_IBP_RC_SEQNAK,
+ C_SW_IBP_RC_DUPREQ,
+ C_SW_IBP_RDMA_SEQ,
+ C_SW_IBP_UNALIGNED,
+ C_SW_IBP_SEQ_NAK,
+ C_SW_IBP_RC_CRWAITS,
+ C_SW_CPU_RC_ACKS,
+ C_SW_CPU_RC_QACKS,
+ C_SW_CPU_RC_DELAYED_COMP,
+ C_RCV_HDR_OVF_0,
+ C_RCV_HDR_OVF_1,
+ C_RCV_HDR_OVF_2,
+ C_RCV_HDR_OVF_3,
+ C_RCV_HDR_OVF_4,
+ C_RCV_HDR_OVF_5,
+ C_RCV_HDR_OVF_6,
+ C_RCV_HDR_OVF_7,
+ C_RCV_HDR_OVF_8,
+ C_RCV_HDR_OVF_9,
+ C_RCV_HDR_OVF_10,
+ C_RCV_HDR_OVF_11,
+ C_RCV_HDR_OVF_12,
+ C_RCV_HDR_OVF_13,
+ C_RCV_HDR_OVF_14,
+ C_RCV_HDR_OVF_15,
+ C_RCV_HDR_OVF_16,
+ C_RCV_HDR_OVF_17,
+ C_RCV_HDR_OVF_18,
+ C_RCV_HDR_OVF_19,
+ C_RCV_HDR_OVF_20,
+ C_RCV_HDR_OVF_21,
+ C_RCV_HDR_OVF_22,
+ C_RCV_HDR_OVF_23,
+ C_RCV_HDR_OVF_24,
+ C_RCV_HDR_OVF_25,
+ C_RCV_HDR_OVF_26,
+ C_RCV_HDR_OVF_27,
+ C_RCV_HDR_OVF_28,
+ C_RCV_HDR_OVF_29,
+ C_RCV_HDR_OVF_30,
+ C_RCV_HDR_OVF_31,
+ C_RCV_HDR_OVF_32,
+ C_RCV_HDR_OVF_33,
+ C_RCV_HDR_OVF_34,
+ C_RCV_HDR_OVF_35,
+ C_RCV_HDR_OVF_36,
+ C_RCV_HDR_OVF_37,
+ C_RCV_HDR_OVF_38,
+ C_RCV_HDR_OVF_39,
+ C_RCV_HDR_OVF_40,
+ C_RCV_HDR_OVF_41,
+ C_RCV_HDR_OVF_42,
+ C_RCV_HDR_OVF_43,
+ C_RCV_HDR_OVF_44,
+ C_RCV_HDR_OVF_45,
+ C_RCV_HDR_OVF_46,
+ C_RCV_HDR_OVF_47,
+ C_RCV_HDR_OVF_48,
+ C_RCV_HDR_OVF_49,
+ C_RCV_HDR_OVF_50,
+ C_RCV_HDR_OVF_51,
+ C_RCV_HDR_OVF_52,
+ C_RCV_HDR_OVF_53,
+ C_RCV_HDR_OVF_54,
+ C_RCV_HDR_OVF_55,
+ C_RCV_HDR_OVF_56,
+ C_RCV_HDR_OVF_57,
+ C_RCV_HDR_OVF_58,
+ C_RCV_HDR_OVF_59,
+ C_RCV_HDR_OVF_60,
+ C_RCV_HDR_OVF_61,
+ C_RCV_HDR_OVF_62,
+ C_RCV_HDR_OVF_63,
+ C_RCV_HDR_OVF_64,
+ C_RCV_HDR_OVF_65,
+ C_RCV_HDR_OVF_66,
+ C_RCV_HDR_OVF_67,
+ C_RCV_HDR_OVF_68,
+ C_RCV_HDR_OVF_69,
+ C_RCV_HDR_OVF_70,
+ C_RCV_HDR_OVF_71,
+ C_RCV_HDR_OVF_72,
+ C_RCV_HDR_OVF_73,
+ C_RCV_HDR_OVF_74,
+ C_RCV_HDR_OVF_75,
+ C_RCV_HDR_OVF_76,
+ C_RCV_HDR_OVF_77,
+ C_RCV_HDR_OVF_78,
+ C_RCV_HDR_OVF_79,
+ C_RCV_HDR_OVF_80,
+ C_RCV_HDR_OVF_81,
+ C_RCV_HDR_OVF_82,
+ C_RCV_HDR_OVF_83,
+ C_RCV_HDR_OVF_84,
+ C_RCV_HDR_OVF_85,
+ C_RCV_HDR_OVF_86,
+ C_RCV_HDR_OVF_87,
+ C_RCV_HDR_OVF_88,
+ C_RCV_HDR_OVF_89,
+ C_RCV_HDR_OVF_90,
+ C_RCV_HDR_OVF_91,
+ C_RCV_HDR_OVF_92,
+ C_RCV_HDR_OVF_93,
+ C_RCV_HDR_OVF_94,
+ C_RCV_HDR_OVF_95,
+ C_RCV_HDR_OVF_96,
+ C_RCV_HDR_OVF_97,
+ C_RCV_HDR_OVF_98,
+ C_RCV_HDR_OVF_99,
+ C_RCV_HDR_OVF_100,
+ C_RCV_HDR_OVF_101,
+ C_RCV_HDR_OVF_102,
+ C_RCV_HDR_OVF_103,
+ C_RCV_HDR_OVF_104,
+ C_RCV_HDR_OVF_105,
+ C_RCV_HDR_OVF_106,
+ C_RCV_HDR_OVF_107,
+ C_RCV_HDR_OVF_108,
+ C_RCV_HDR_OVF_109,
+ C_RCV_HDR_OVF_110,
+ C_RCV_HDR_OVF_111,
+ C_RCV_HDR_OVF_112,
+ C_RCV_HDR_OVF_113,
+ C_RCV_HDR_OVF_114,
+ C_RCV_HDR_OVF_115,
+ C_RCV_HDR_OVF_116,
+ C_RCV_HDR_OVF_117,
+ C_RCV_HDR_OVF_118,
+ C_RCV_HDR_OVF_119,
+ C_RCV_HDR_OVF_120,
+ C_RCV_HDR_OVF_121,
+ C_RCV_HDR_OVF_122,
+ C_RCV_HDR_OVF_123,
+ C_RCV_HDR_OVF_124,
+ C_RCV_HDR_OVF_125,
+ C_RCV_HDR_OVF_126,
+ C_RCV_HDR_OVF_127,
+ C_RCV_HDR_OVF_128,
+ C_RCV_HDR_OVF_129,
+ C_RCV_HDR_OVF_130,
+ C_RCV_HDR_OVF_131,
+ C_RCV_HDR_OVF_132,
+ C_RCV_HDR_OVF_133,
+ C_RCV_HDR_OVF_134,
+ C_RCV_HDR_OVF_135,
+ C_RCV_HDR_OVF_136,
+ C_RCV_HDR_OVF_137,
+ C_RCV_HDR_OVF_138,
+ C_RCV_HDR_OVF_139,
+ C_RCV_HDR_OVF_140,
+ C_RCV_HDR_OVF_141,
+ C_RCV_HDR_OVF_142,
+ C_RCV_HDR_OVF_143,
+ C_RCV_HDR_OVF_144,
+ C_RCV_HDR_OVF_145,
+ C_RCV_HDR_OVF_146,
+ C_RCV_HDR_OVF_147,
+ C_RCV_HDR_OVF_148,
+ C_RCV_HDR_OVF_149,
+ C_RCV_HDR_OVF_150,
+ C_RCV_HDR_OVF_151,
+ C_RCV_HDR_OVF_152,
+ C_RCV_HDR_OVF_153,
+ C_RCV_HDR_OVF_154,
+ C_RCV_HDR_OVF_155,
+ C_RCV_HDR_OVF_156,
+ C_RCV_HDR_OVF_157,
+ C_RCV_HDR_OVF_158,
+ C_RCV_HDR_OVF_159,
+ PORT_CNTR_LAST /* Must be kept last */
+};
+
+u64 get_all_cpu_total(u64 __percpu *cntr);
+void hfi1_start_cleanup(struct hfi1_devdata *dd);
+void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
+void hfi1_init_ctxt(struct send_context *sc);
+void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
+ u32 type, unsigned long pa, u16 order);
+void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
+ struct hfi1_ctxtdata *rcd);
+u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp);
+u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp);
+int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which);
+int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
+ u16 jkey);
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt,
+ u16 pkey);
+int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
+void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
+void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
+void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
+
+irqreturn_t general_interrupt(int irq, void *data);
+irqreturn_t sdma_interrupt(int irq, void *data);
+irqreturn_t receive_context_interrupt(int irq, void *data);
+irqreturn_t receive_context_thread(int irq, void *data);
+irqreturn_t receive_context_interrupt_napi(int irq, void *data);
+
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
+void init_qsfp_int(struct hfi1_devdata *dd);
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
+void reset_interrupts(struct hfi1_devdata *dd);
+u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd);
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd);
+
+/*
+ * Interrupt source table.
+ *
+ * Each entry is an interrupt source "type". It is ordered by increasing
+ * number.
+ */
+struct is_table {
+ int start; /* interrupt source type start */
+ int end; /* interrupt source type end */
+ /* routine that returns the name of the interrupt source */
+ char *(*is_name)(char *name, size_t size, unsigned int source);
+ /* routine to call when receiving an interrupt */
+ void (*is_int)(struct hfi1_devdata *dd, unsigned int source);
+};
+
+#endif /* _CHIP_H */
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
new file mode 100644
index 000000000000..d79e25d20fb8
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -0,0 +1,1295 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#ifndef DEF_CHIP_REG
+#define DEF_CHIP_REG
+
+#define CORE 0x000000000000
+#define CCE (CORE + 0x000000000000)
+#define ASIC (CORE + 0x000000400000)
+#define MISC (CORE + 0x000000500000)
+#define DC_TOP_CSRS (CORE + 0x000000600000)
+#define CHIP_DEBUG (CORE + 0x000000700000)
+#define RXE (CORE + 0x000001000000)
+#define TXE (CORE + 0x000001800000)
+#define DCC_CSRS (DC_TOP_CSRS + 0x000000000000)
+#define DC_LCB_CSRS (DC_TOP_CSRS + 0x000000001000)
+#define DC_8051_CSRS (DC_TOP_CSRS + 0x000000002000)
+#define PCIE 0
+
+#define ASIC_NUM_SCRATCH 4
+#define CCE_ERR_INT_CNT 0
+#define CCE_MISC_INT_CNT 2
+#define CCE_NUM_32_BIT_COUNTERS 3
+#define CCE_NUM_32_BIT_INT_COUNTERS 6
+#define CCE_NUM_INT_CSRS 12
+#define CCE_NUM_INT_MAP_CSRS 96
+#define CCE_NUM_MSIX_PBAS 4
+#define CCE_NUM_MSIX_VECTORS 256
+#define CCE_NUM_SCRATCH 4
+#define CCE_PCIE_POSTED_CRDT_STALL_CNT 2
+#define CCE_PCIE_TRGT_STALL_CNT 0
+#define CCE_PIO_WR_STALL_CNT 1
+#define CCE_RCV_AVAIL_INT_CNT 3
+#define CCE_RCV_URGENT_INT_CNT 4
+#define CCE_SDMA_INT_CNT 1
+#define CCE_SEND_CREDIT_INT_CNT 5
+#define DCC_CFG_LED_CNTRL (DCC_CSRS + 0x000000000040)
+#define DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK 0x10ull
+#define DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SHIFT 0
+#define DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK 0xFull
+#define DCC_CFG_PORT_CONFIG (DCC_CSRS + 0x000000000008)
+#define DCC_CFG_PORT_CONFIG1 (DCC_CSRS + 0x000000000010)
+#define DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK 0xFFFFull
+#define DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT 16
+#define DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK 0xFFFF0000ull
+#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK 0xFFFFull
+#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT 0
+#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 0xFFFFull
+#define DCC_CFG_PORT_CONFIG_LINK_STATE_MASK 0x7ull
+#define DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT 48
+#define DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK 0x7000000000000ull
+#define DCC_CFG_PORT_CONFIG_MTU_CAP_MASK 0x7ull
+#define DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT 32
+#define DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK 0x700000000ull
+#define DCC_CFG_RESET (DCC_CSRS + 0x000000000000)
+#define DCC_CFG_RESET_RESET_LCB BIT_ULL(0)
+#define DCC_CFG_RESET_RESET_TX_FPE BIT_ULL(1)
+#define DCC_CFG_RESET_RESET_RX_FPE BIT_ULL(2)
+#define DCC_CFG_RESET_RESET_8051 BIT_ULL(3)
+#define DCC_CFG_RESET_ENABLE_CCLK_BCC BIT_ULL(4)
+#define DCC_CFG_SC_VL_TABLE_15_0 (DCC_CSRS + 0x000000000028)
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY0_SHIFT 0
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY10_SHIFT 40
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY11_SHIFT 44
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY12_SHIFT 48
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY13_SHIFT 52
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY14_SHIFT 56
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY15_SHIFT 60
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY1_SHIFT 4
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY2_SHIFT 8
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY3_SHIFT 12
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY4_SHIFT 16
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY5_SHIFT 20
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY6_SHIFT 24
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY7_SHIFT 28
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY8_SHIFT 32
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY9_SHIFT 36
+#define DCC_CFG_SC_VL_TABLE_31_16 (DCC_CSRS + 0x000000000030)
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY16_SHIFT 0
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY17_SHIFT 4
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY18_SHIFT 8
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY19_SHIFT 12
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY20_SHIFT 16
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY21_SHIFT 20
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY22_SHIFT 24
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY23_SHIFT 28
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY24_SHIFT 32
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY25_SHIFT 36
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY26_SHIFT 40
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY27_SHIFT 44
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY28_SHIFT 48
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY29_SHIFT 52
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY30_SHIFT 56
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY31_SHIFT 60
+#define DCC_ERR_DROPPED_PKT_CNT (DCC_CSRS + 0x000000000120)
+#define DCC_ERR_FLG (DCC_CSRS + 0x000000000050)
+#define DCC_ERR_FLG_BAD_CRDT_ACK_ERR_SMASK 0x4000ull
+#define DCC_ERR_FLG_BAD_CTRL_DIST_ERR_SMASK 0x200000ull
+#define DCC_ERR_FLG_BAD_CTRL_FLIT_ERR_SMASK 0x10000ull
+#define DCC_ERR_FLG_BAD_DLID_TARGET_ERR_SMASK 0x200ull
+#define DCC_ERR_FLG_BAD_HEAD_DIST_ERR_SMASK 0x800000ull
+#define DCC_ERR_FLG_BAD_L2_ERR_SMASK 0x2ull
+#define DCC_ERR_FLG_BAD_LVER_ERR_SMASK 0x400ull
+#define DCC_ERR_FLG_BAD_MID_TAIL_ERR_SMASK 0x8ull
+#define DCC_ERR_FLG_BAD_PKT_LENGTH_ERR_SMASK 0x4000000ull
+#define DCC_ERR_FLG_BAD_PREEMPTION_ERR_SMASK 0x10ull
+#define DCC_ERR_FLG_BAD_SC_ERR_SMASK 0x4ull
+#define DCC_ERR_FLG_BAD_TAIL_DIST_ERR_SMASK 0x400000ull
+#define DCC_ERR_FLG_BAD_VL_MARKER_ERR_SMASK 0x80ull
+#define DCC_ERR_FLG_CLR (DCC_CSRS + 0x000000000060)
+#define DCC_ERR_FLG_CSR_ACCESS_BLOCKED_HOST_SMASK 0x8000000000ull
+#define DCC_ERR_FLG_CSR_ACCESS_BLOCKED_UC_SMASK 0x10000000000ull
+#define DCC_ERR_FLG_CSR_INVAL_ADDR_SMASK 0x400000000000ull
+#define DCC_ERR_FLG_CSR_PARITY_ERR_SMASK 0x200000000000ull
+#define DCC_ERR_FLG_DLID_ZERO_ERR_SMASK 0x40000000ull
+#define DCC_ERR_FLG_EN (DCC_CSRS + 0x000000000058)
+#define DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK 0x8000000000ull
+#define DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK 0x10000000000ull
+#define DCC_ERR_FLG_EVENT_CNTR_PARITY_ERR_SMASK 0x20000ull
+#define DCC_ERR_FLG_EVENT_CNTR_ROLLOVER_ERR_SMASK 0x40000ull
+#define DCC_ERR_FLG_FMCONFIG_ERR_SMASK 0x40000000000000ull
+#define DCC_ERR_FLG_FPE_TX_FIFO_OVFLW_ERR_SMASK 0x2000000000ull
+#define DCC_ERR_FLG_FPE_TX_FIFO_UNFLW_ERR_SMASK 0x4000000000ull
+#define DCC_ERR_FLG_LATE_EBP_ERR_SMASK 0x1000000000ull
+#define DCC_ERR_FLG_LATE_LONG_ERR_SMASK 0x800000000ull
+#define DCC_ERR_FLG_LATE_SHORT_ERR_SMASK 0x400000000ull
+#define DCC_ERR_FLG_LENGTH_MTU_ERR_SMASK 0x80000000ull
+#define DCC_ERR_FLG_LINK_ERR_SMASK 0x80000ull
+#define DCC_ERR_FLG_MISC_CNTR_ROLLOVER_ERR_SMASK 0x100000ull
+#define DCC_ERR_FLG_NONVL15_STATE_ERR_SMASK 0x1000000ull
+#define DCC_ERR_FLG_PERM_NVL15_ERR_SMASK 0x10000000ull
+#define DCC_ERR_FLG_PREEMPTION_ERR_SMASK 0x20ull
+#define DCC_ERR_FLG_PREEMPTIONVL15_ERR_SMASK 0x40ull
+#define DCC_ERR_FLG_RCVPORT_ERR_SMASK 0x80000000000000ull
+#define DCC_ERR_FLG_RX_BYTE_SHFT_PARITY_ERR_SMASK 0x1000000000000ull
+#define DCC_ERR_FLG_RX_CTRL_PARITY_MBE_ERR_SMASK 0x100000000000ull
+#define DCC_ERR_FLG_RX_EARLY_DROP_ERR_SMASK 0x200000000ull
+#define DCC_ERR_FLG_SLID_ZERO_ERR_SMASK 0x20000000ull
+#define DCC_ERR_FLG_TX_BYTE_SHFT_PARITY_ERR_SMASK 0x800000000000ull
+#define DCC_ERR_FLG_TX_CTRL_PARITY_ERR_SMASK 0x20000000000ull
+#define DCC_ERR_FLG_TX_CTRL_PARITY_MBE_ERR_SMASK 0x40000000000ull
+#define DCC_ERR_FLG_TX_SC_PARITY_ERR_SMASK 0x80000000000ull
+#define DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK 0x2000ull
+#define DCC_ERR_FLG_UNSUP_PKT_TYPE_SMASK 0x8000ull
+#define DCC_ERR_FLG_UNSUP_VL_ERR_SMASK 0x8000000ull
+#define DCC_ERR_FLG_VL15_MULTI_ERR_SMASK 0x2000000ull
+#define DCC_ERR_FMCONFIG_ERR_CNT (DCC_CSRS + 0x000000000110)
+#define DCC_ERR_INFO_FMCONFIG (DCC_CSRS + 0x000000000090)
+#define DCC_ERR_INFO_PORTRCV (DCC_CSRS + 0x000000000078)
+#define DCC_ERR_INFO_PORTRCV_HDR0 (DCC_CSRS + 0x000000000080)
+#define DCC_ERR_INFO_PORTRCV_HDR1 (DCC_CSRS + 0x000000000088)
+#define DCC_ERR_INFO_UNCORRECTABLE (DCC_CSRS + 0x000000000098)
+#define DCC_ERR_PORTRCV_ERR_CNT (DCC_CSRS + 0x000000000108)
+#define DCC_ERR_RCVREMOTE_PHY_ERR_CNT (DCC_CSRS + 0x000000000118)
+#define DCC_ERR_UNCORRECTABLE_CNT (DCC_CSRS + 0x000000000100)
+#define DCC_PRF_PORT_MARK_FECN_CNT (DCC_CSRS + 0x000000000330)
+#define DCC_PRF_PORT_RCV_BECN_CNT (DCC_CSRS + 0x000000000290)
+#define DCC_PRF_PORT_RCV_BUBBLE_CNT (DCC_CSRS + 0x0000000002E0)
+#define DCC_PRF_PORT_RCV_CORRECTABLE_CNT (DCC_CSRS + 0x000000000140)
+#define DCC_PRF_PORT_RCV_DATA_CNT (DCC_CSRS + 0x000000000198)
+#define DCC_PRF_PORT_RCV_FECN_CNT (DCC_CSRS + 0x000000000240)
+#define DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT (DCC_CSRS + 0x000000000130)
+#define DCC_PRF_PORT_RCV_PKTS_CNT (DCC_CSRS + 0x0000000001A8)
+#define DCC_PRF_PORT_VL_MARK_FECN_CNT (DCC_CSRS + 0x000000000338)
+#define DCC_PRF_PORT_VL_RCV_BECN_CNT (DCC_CSRS + 0x000000000298)
+#define DCC_PRF_PORT_VL_RCV_BUBBLE_CNT (DCC_CSRS + 0x0000000002E8)
+#define DCC_PRF_PORT_VL_RCV_DATA_CNT (DCC_CSRS + 0x0000000001B0)
+#define DCC_PRF_PORT_VL_RCV_FECN_CNT (DCC_CSRS + 0x000000000248)
+#define DCC_PRF_PORT_VL_RCV_PKTS_CNT (DCC_CSRS + 0x0000000001F8)
+#define DCC_PRF_PORT_XMIT_CORRECTABLE_CNT (DCC_CSRS + 0x000000000138)
+#define DCC_PRF_PORT_XMIT_DATA_CNT (DCC_CSRS + 0x000000000190)
+#define DCC_PRF_PORT_XMIT_MULTICAST_CNT (DCC_CSRS + 0x000000000128)
+#define DCC_PRF_PORT_XMIT_PKTS_CNT (DCC_CSRS + 0x0000000001A0)
+#define DCC_PRF_RX_FLOW_CRTL_CNT (DCC_CSRS + 0x000000000180)
+#define DCC_PRF_TX_FLOW_CRTL_CNT (DCC_CSRS + 0x000000000188)
+#define DC_DC8051_CFG_CSR_ACCESS_SEL (DC_8051_CSRS + 0x000000000110)
+#define DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK 0x2ull
+#define DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK 0x1ull
+#define DC_DC8051_CFG_EXT_DEV_0 (DC_8051_CSRS + 0x000000000118)
+#define DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK 0x1ull
+#define DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT 8
+#define DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT 16
+#define DC_DC8051_CFG_EXT_DEV_1 (DC_8051_CSRS + 0x000000000120)
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK 0xFFFFull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT 16
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK 0xFFFF0000ull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK 0x1ull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK 0xFFull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT 8
+#define DC_DC8051_CFG_HOST_CMD_0 (DC_8051_CSRS + 0x000000000028)
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK 0xFFFFFFFFFFFFull
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT 16
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK 0x1ull
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK 0xFFull
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT 8
+#define DC_DC8051_CFG_HOST_CMD_1 (DC_8051_CSRS + 0x000000000030)
+#define DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK 0x1ull
+#define DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK 0xFFull
+#define DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT 8
+#define DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK 0xFFFFFFFFFFFFull
+#define DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT 16
+#define DC_DC8051_CFG_LOCAL_GUID (DC_8051_CSRS + 0x000000000038)
+#define DC_DC8051_CFG_MODE (DC_8051_CSRS + 0x000000000070)
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL (DC_8051_CSRS + 0x000000000008)
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK 0x7FFFull
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT 0
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK 0x1000000ull
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK 0x10000ull
+#define DC_DC8051_CFG_RAM_ACCESS_SETUP (DC_8051_CSRS + 0x000000000000)
+#define DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK 0x100ull
+#define DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK 0x1ull
+#define DC_DC8051_CFG_RAM_ACCESS_STATUS (DC_8051_CSRS + 0x000000000018)
+#define DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK 0x10000ull
+#define DC_DC8051_CFG_RAM_ACCESS_WR_DATA (DC_8051_CSRS + 0x000000000010)
+#define DC_DC8051_CFG_RAM_ACCESS_RD_DATA (DC_8051_CSRS + 0x000000000020)
+#define DC_DC8051_CFG_RST (DC_8051_CSRS + 0x000000000068)
+#define DC_DC8051_CFG_RST_CRAM_SMASK 0x2ull
+#define DC_DC8051_CFG_RST_DRAM_SMASK 0x4ull
+#define DC_DC8051_CFG_RST_IRAM_SMASK 0x8ull
+#define DC_DC8051_CFG_RST_M8051W_SMASK 0x1ull
+#define DC_DC8051_CFG_RST_SFR_SMASK 0x10ull
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051 (DC_8051_CSRS + 0x0000000000D8)
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK 0xFFFFFFFFull
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT 16
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK 0xFFFFull
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT 0
+#define DC_DC8051_ERR_CLR (DC_8051_CSRS + 0x0000000000E8)
+#define DC_DC8051_ERR_EN (DC_8051_CSRS + 0x0000000000F0)
+#define DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK 0x2ull
+#define DC_DC8051_ERR_FLG (DC_8051_CSRS + 0x0000000000E0)
+#define DC_DC8051_ERR_FLG_CRAM_MBE_SMASK 0x4ull
+#define DC_DC8051_ERR_FLG_CRAM_SBE_SMASK 0x8ull
+#define DC_DC8051_ERR_FLG_DRAM_MBE_SMASK 0x10ull
+#define DC_DC8051_ERR_FLG_DRAM_SBE_SMASK 0x20ull
+#define DC_DC8051_ERR_FLG_INVALID_CSR_ADDR_SMASK 0x400ull
+#define DC_DC8051_ERR_FLG_IRAM_MBE_SMASK 0x40ull
+#define DC_DC8051_ERR_FLG_IRAM_SBE_SMASK 0x80ull
+#define DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK 0x2ull
+#define DC_DC8051_ERR_FLG_SET_BY_8051_SMASK 0x1ull
+#define DC_DC8051_ERR_FLG_UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES_SMASK 0x100ull
+#define DC_DC8051_STS_CUR_STATE (DC_8051_CSRS + 0x000000000060)
+#define DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK 0xFFull
+#define DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT 16
+#define DC_DC8051_STS_CUR_STATE_PORT_MASK 0xFFull
+#define DC_DC8051_STS_CUR_STATE_PORT_SHIFT 0
+#define DC_DC8051_STS_LOCAL_FM_SECURITY (DC_8051_CSRS + 0x000000000050)
+#define DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK 0x1ull
+#define DC_DC8051_STS_REMOTE_FM_SECURITY (DC_8051_CSRS + 0x000000000058)
+#define DC_DC8051_STS_REMOTE_GUID (DC_8051_CSRS + 0x000000000040)
+#define DC_DC8051_STS_REMOTE_NODE_TYPE (DC_8051_CSRS + 0x000000000048)
+#define DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK 0x3ull
+#define DC_DC8051_STS_REMOTE_PORT_NO (DC_8051_CSRS + 0x000000000130)
+#define DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK 0xFFull
+#define DC_LCB_CFG_ALLOW_LINK_UP (DC_LCB_CSRS + 0x000000000128)
+#define DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT 0
+#define DC_LCB_CFG_CRC_MODE (DC_LCB_CSRS + 0x000000000058)
+#define DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT 0
+#define DC_LCB_CFG_IGNORE_LOST_RCLK (DC_LCB_CSRS + 0x000000000020)
+#define DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK 0x1ull
+#define DC_LCB_CFG_LANE_WIDTH (DC_LCB_CSRS + 0x000000000100)
+#define DC_LCB_CFG_LINK_KILL_EN (DC_LCB_CSRS + 0x000000000120)
+#define DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK 0x100000ull
+#define DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK 0x400000ull
+#define DC_LCB_CFG_LN_DCLK (DC_LCB_CSRS + 0x000000000060)
+#define DC_LCB_CFG_LOOPBACK (DC_LCB_CSRS + 0x0000000000F8)
+#define DC_LCB_CFG_LOOPBACK_VAL_SHIFT 0
+#define DC_LCB_CFG_RUN (DC_LCB_CSRS + 0x000000000000)
+#define DC_LCB_CFG_RUN_EN_SHIFT 0
+#define DC_LCB_CFG_RX_FIFOS_RADR (DC_LCB_CSRS + 0x000000000018)
+#define DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 8
+#define DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 4
+#define DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT 0
+#define DC_LCB_CFG_TX_FIFOS_RADR (DC_LCB_CSRS + 0x000000000010)
+#define DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT 0
+#define DC_LCB_CFG_TX_FIFOS_RESET (DC_LCB_CSRS + 0x000000000008)
+#define DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT 0
+#define DC_LCB_CFG_REINIT_AS_SLAVE (DC_LCB_CSRS + 0x000000000030)
+#define DC_LCB_CFG_CNT_FOR_SKIP_STALL (DC_LCB_CSRS + 0x000000000040)
+#define DC_LCB_CFG_CLK_CNTR (DC_LCB_CSRS + 0x000000000110)
+#define DC_LCB_ERR_CLR (DC_LCB_CSRS + 0x000000000308)
+#define DC_LCB_ERR_EN (DC_LCB_CSRS + 0x000000000310)
+#define DC_LCB_ERR_FLG (DC_LCB_CSRS + 0x000000000300)
+#define DC_LCB_ERR_FLG_REDUNDANT_FLIT_PARITY_ERR_SMASK 0x20000000ull
+#define DC_LCB_ERR_FLG_NEG_EDGE_LINK_TRANSFER_ACTIVE_SMASK 0x10000000ull
+#define DC_LCB_ERR_FLG_HOLD_REINIT_SMASK 0x8000000ull
+#define DC_LCB_ERR_FLG_RST_FOR_INCOMPLT_RND_TRIP_SMASK 0x4000000ull
+#define DC_LCB_ERR_FLG_RST_FOR_LINK_TIMEOUT_SMASK 0x2000000ull
+#define DC_LCB_ERR_FLG_CREDIT_RETURN_FLIT_MBE_SMASK 0x1000000ull
+#define DC_LCB_ERR_FLG_REPLAY_BUF_SBE_SMASK 0x800000ull
+#define DC_LCB_ERR_FLG_REPLAY_BUF_MBE_SMASK 0x400000ull
+#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_SBE_SMASK 0x200000ull
+#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_MBE_SMASK 0x100000ull
+#define DC_LCB_ERR_FLG_VL_ACK_INPUT_WRONG_CRC_MODE_SMASK 0x80000ull
+#define DC_LCB_ERR_FLG_VL_ACK_INPUT_PARITY_ERR_SMASK 0x40000ull
+#define DC_LCB_ERR_FLG_VL_ACK_INPUT_BUF_OFLW_SMASK 0x20000ull
+#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_OFLW_SMASK 0x10000ull
+#define DC_LCB_ERR_FLG_ILLEGAL_FLIT_ENCODING_SMASK 0x8000ull
+#define DC_LCB_ERR_FLG_ILLEGAL_NULL_LTP_SMASK 0x4000ull
+#define DC_LCB_ERR_FLG_UNEXPECTED_ROUND_TRIP_MARKER_SMASK 0x2000ull
+#define DC_LCB_ERR_FLG_UNEXPECTED_REPLAY_MARKER_SMASK 0x1000ull
+#define DC_LCB_ERR_FLG_RCLK_STOPPED_SMASK 0x800ull
+#define DC_LCB_ERR_FLG_CRC_ERR_CNT_HIT_LIMIT_SMASK 0x400ull
+#define DC_LCB_ERR_FLG_REINIT_FOR_LN_DEGRADE_SMASK 0x200ull
+#define DC_LCB_ERR_FLG_REINIT_FROM_PEER_SMASK 0x100ull
+#define DC_LCB_ERR_FLG_SEQ_CRC_ERR_SMASK 0x80ull
+#define DC_LCB_ERR_FLG_RX_LESS_THAN_FOUR_LNS_SMASK 0x40ull
+#define DC_LCB_ERR_FLG_TX_LESS_THAN_FOUR_LNS_SMASK 0x20ull
+#define DC_LCB_ERR_FLG_LOST_REINIT_STALL_OR_TOS_SMASK 0x10ull
+#define DC_LCB_ERR_FLG_ALL_LNS_FAILED_REINIT_TEST_SMASK 0x8ull
+#define DC_LCB_ERR_FLG_RST_FOR_FAILED_DESKEW_SMASK 0x4ull
+#define DC_LCB_ERR_FLG_INVALID_CSR_ADDR_SMASK 0x2ull
+#define DC_LCB_ERR_FLG_CSR_PARITY_ERR_SMASK 0x1ull
+#define DC_LCB_ERR_INFO_CRC_ERR_LN0 (DC_LCB_CSRS + 0x000000000328)
+#define DC_LCB_ERR_INFO_CRC_ERR_LN1 (DC_LCB_CSRS + 0x000000000330)
+#define DC_LCB_ERR_INFO_CRC_ERR_LN2 (DC_LCB_CSRS + 0x000000000338)
+#define DC_LCB_ERR_INFO_CRC_ERR_LN3 (DC_LCB_CSRS + 0x000000000340)
+#define DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN (DC_LCB_CSRS + 0x000000000348)
+#define DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT (DC_LCB_CSRS + 0x000000000368)
+#define DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT (DC_LCB_CSRS + 0x000000000370)
+#define DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT (DC_LCB_CSRS + 0x000000000378)
+#define DC_LCB_ERR_INFO_MISC_FLG_CNT (DC_LCB_CSRS + 0x000000000390)
+#define DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT (DC_LCB_CSRS + 0x000000000380)
+#define DC_LCB_ERR_INFO_RX_REPLAY_CNT (DC_LCB_CSRS + 0x000000000358)
+#define DC_LCB_ERR_INFO_SBE_CNT (DC_LCB_CSRS + 0x000000000388)
+#define DC_LCB_ERR_INFO_SEQ_CRC_CNT (DC_LCB_CSRS + 0x000000000360)
+#define DC_LCB_ERR_INFO_TOTAL_CRC_ERR (DC_LCB_CSRS + 0x000000000320)
+#define DC_LCB_ERR_INFO_TX_REPLAY_CNT (DC_LCB_CSRS + 0x000000000350)
+#define DC_LCB_PG_DBG_FLIT_CRDTS_CNT (DC_LCB_CSRS + 0x000000000580)
+#define DC_LCB_PG_STS_PAUSE_COMPLETE_CNT (DC_LCB_CSRS + 0x0000000005F8)
+#define DC_LCB_PG_STS_TX_MBE_CNT (DC_LCB_CSRS + 0x000000000608)
+#define DC_LCB_PG_STS_TX_SBE_CNT (DC_LCB_CSRS + 0x000000000600)
+#define DC_LCB_PRF_ACCEPTED_LTP_CNT (DC_LCB_CSRS + 0x000000000408)
+#define DC_LCB_PRF_CLK_CNTR (DC_LCB_CSRS + 0x000000000420)
+#define DC_LCB_PRF_GOOD_LTP_CNT (DC_LCB_CSRS + 0x000000000400)
+#define DC_LCB_PRF_RX_FLIT_CNT (DC_LCB_CSRS + 0x000000000410)
+#define DC_LCB_PRF_TX_FLIT_CNT (DC_LCB_CSRS + 0x000000000418)
+#define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
+#define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
+#define RCV_LENGTH_ERR_CNT 0
+#define RCV_SHORT_ERR_CNT 2
+#define RCV_ICRC_ERR_CNT 6
+#define RCV_EBP_CNT 9
+#define RCV_BUF_OVFL_CNT 10
+#define RCV_CONTEXT_EGR_STALL 22
+#define RCV_DATA_PKT_CNT 0
+#define RCV_DWORD_CNT 1
+#define RCV_TID_FLOW_GEN_MISMATCH_CNT 20
+#define RCV_TID_FLOW_SEQ_MISMATCH_CNT 23
+#define RCV_TID_FULL_ERR_CNT 18
+#define RCV_TID_VALID_ERR_CNT 19
+#define RXE_NUM_32_BIT_COUNTERS 24
+#define RXE_NUM_64_BIT_COUNTERS 2
+#define RXE_NUM_RSM_INSTANCES 4
+#define RXE_NUM_TID_FLOWS 32
+#define RXE_PER_CONTEXT_OFFSET 0x0300000
+#define SEND_DATA_PKT_CNT 0
+#define SEND_DATA_PKT_VL0_CNT 12
+#define SEND_DATA_VL0_CNT 3
+#define SEND_DROPPED_PKT_CNT 5
+#define SEND_DWORD_CNT 1
+#define SEND_FLOW_STALL_CNT 4
+#define SEND_HEADERS_ERR_CNT 6
+#define SEND_LEN_ERR_CNT 1
+#define SEND_MAX_MIN_LEN_ERR_CNT 2
+#define SEND_UNDERRUN_CNT 3
+#define SEND_UNSUP_VL_ERR_CNT 0
+#define SEND_WAIT_CNT 2
+#define SEND_WAIT_VL0_CNT 21
+#define TXE_PIO_SEND_OFFSET 0x0800000
+#define ASIC_CFG_DRV_STR (ASIC + 0x000000000048)
+#define ASIC_CFG_MUTEX (ASIC + 0x000000000040)
+#define ASIC_CFG_SBUS_EXECUTE (ASIC + 0x000000000008)
+#define ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK 0x1ull
+#define ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK 0x2ull
+#define ASIC_CFG_SBUS_REQUEST (ASIC + 0x000000000000)
+#define ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT 16
+#define ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT 8
+#define ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT 32
+#define ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT 0
+#define ASIC_CFG_SCRATCH (ASIC + 0x000000000020)
+#define ASIC_CFG_SCRATCH_1 (ASIC_CFG_SCRATCH + 0x08)
+#define ASIC_CFG_SCRATCH_2 (ASIC_CFG_SCRATCH + 0x10)
+#define ASIC_CFG_SCRATCH_3 (ASIC_CFG_SCRATCH + 0x18)
+#define ASIC_CFG_THERM_POLL_EN (ASIC + 0x000000000050)
+#define ASIC_EEP_ADDR_CMD (ASIC + 0x000000000308)
+#define ASIC_EEP_ADDR_CMD_EP_ADDR_MASK 0xFFFFFFull
+#define ASIC_EEP_CTL_STAT (ASIC + 0x000000000300)
+#define ASIC_EEP_CTL_STAT_EP_RESET_SMASK 0x4ull
+#define ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT 8
+#define ASIC_EEP_CTL_STAT_RESETCSR 0x0000000083818000ull
+#define ASIC_EEP_DATA (ASIC + 0x000000000310)
+#define ASIC_GPIO_CLEAR (ASIC + 0x000000000230)
+#define ASIC_GPIO_FORCE (ASIC + 0x000000000238)
+#define ASIC_GPIO_IN (ASIC + 0x000000000200)
+#define ASIC_GPIO_INVERT (ASIC + 0x000000000210)
+#define ASIC_GPIO_MASK (ASIC + 0x000000000220)
+#define ASIC_GPIO_OE (ASIC + 0x000000000208)
+#define ASIC_GPIO_OUT (ASIC + 0x000000000218)
+#define ASIC_PCIE_SD_HOST_CMD (ASIC + 0x000000000100)
+#define ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT 0
+#define ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK 0x400ull
+#define ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT 2
+#define ASIC_PCIE_SD_HOST_CMD_TIMER_MASK 0xFFFFFull
+#define ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT 12
+#define ASIC_PCIE_SD_HOST_STATUS (ASIC + 0x000000000108)
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK 0x7ull
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT 2
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK 0x3ull
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT 0
+#define ASIC_PCIE_SD_INTRPT_DATA_CODE (ASIC + 0x000000000110)
+#define ASIC_PCIE_SD_INTRPT_ENABLE (ASIC + 0x000000000118)
+#define ASIC_PCIE_SD_INTRPT_LIST (ASIC + 0x000000000180)
+#define ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT 16
+#define ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT 0
+#define ASIC_PCIE_SD_INTRPT_STATUS (ASIC + 0x000000000128)
+#define ASIC_QSFP1_CLEAR (ASIC + 0x000000000270)
+#define ASIC_QSFP1_FORCE (ASIC + 0x000000000278)
+#define ASIC_QSFP1_IN (ASIC + 0x000000000240)
+#define ASIC_QSFP1_INVERT (ASIC + 0x000000000250)
+#define ASIC_QSFP1_MASK (ASIC + 0x000000000260)
+#define ASIC_QSFP1_OE (ASIC + 0x000000000248)
+#define ASIC_QSFP1_OUT (ASIC + 0x000000000258)
+#define ASIC_QSFP1_STATUS (ASIC + 0x000000000268)
+#define ASIC_QSFP2_CLEAR (ASIC + 0x0000000002B0)
+#define ASIC_QSFP2_FORCE (ASIC + 0x0000000002B8)
+#define ASIC_QSFP2_IN (ASIC + 0x000000000280)
+#define ASIC_QSFP2_INVERT (ASIC + 0x000000000290)
+#define ASIC_QSFP2_MASK (ASIC + 0x0000000002A0)
+#define ASIC_QSFP2_OE (ASIC + 0x000000000288)
+#define ASIC_QSFP2_OUT (ASIC + 0x000000000298)
+#define ASIC_QSFP2_STATUS (ASIC + 0x0000000002A8)
+#define ASIC_STS_SBUS_COUNTERS (ASIC + 0x000000000018)
+#define ASIC_STS_SBUS_COUNTERS_EXECUTE_CNT_MASK 0xFFFFull
+#define ASIC_STS_SBUS_COUNTERS_EXECUTE_CNT_SHIFT 0
+#define ASIC_STS_SBUS_COUNTERS_RCV_DATA_VALID_CNT_MASK 0xFFFFull
+#define ASIC_STS_SBUS_COUNTERS_RCV_DATA_VALID_CNT_SHIFT 16
+#define ASIC_STS_SBUS_RESULT (ASIC + 0x000000000010)
+#define ASIC_STS_SBUS_RESULT_DONE_SMASK 0x1ull
+#define ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK 0x2ull
+#define ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT 2
+#define ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK 0x7ull
+#define ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT 32
+#define ASIC_STS_SBUS_RESULT_DATA_OUT_MASK 0xFFFFFFFFull
+#define ASIC_STS_THERM (ASIC + 0x000000000058)
+#define ASIC_STS_THERM_CRIT_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_CRIT_TEMP_SHIFT 18
+#define ASIC_STS_THERM_CURR_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_CURR_TEMP_SHIFT 2
+#define ASIC_STS_THERM_HI_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_HI_TEMP_SHIFT 50
+#define ASIC_STS_THERM_LO_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_LO_TEMP_SHIFT 34
+#define ASIC_STS_THERM_LOW_SHIFT 13
+#define CCE_COUNTER_ARRAY32 (CCE + 0x000000000060)
+#define CCE_CTRL (CCE + 0x000000000010)
+#define CCE_CTRL_RXE_RESUME_SMASK 0x800ull
+#define CCE_CTRL_SPC_FREEZE_SMASK 0x100ull
+#define CCE_CTRL_SPC_UNFREEZE_SMASK 0x200ull
+#define CCE_CTRL_TXE_RESUME_SMASK 0x2000ull
+#define CCE_DC_CTRL (CCE + 0x0000000000B8)
+#define CCE_DC_CTRL_DC_RESET_SMASK 0x1ull
+#define CCE_DC_CTRL_RESETCSR 0x0000000000000001ull
+#define CCE_ERR_CLEAR (CCE + 0x000000000050)
+#define CCE_ERR_MASK (CCE + 0x000000000048)
+#define CCE_ERR_STATUS (CCE + 0x000000000040)
+#define CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK 0x40ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK 0x1000ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK \
+ 0x200ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK \
+ 0x800ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK \
+ 0x400ull
+#define CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK 0x100ull
+#define CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK 0x80ull
+#define CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK 0x1ull
+#define CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
+#define CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
+#define CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK 0x4000000000ull
+#define CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK 0x8000000000ull
+#define CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK 0x10000000000ull
+#define CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK 0x1000000000ull
+#define CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK 0x2000000000ull
+#define CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK 0x400000000ull
+#define CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK 0x20ull
+#define CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK 0x800000000ull
+#define CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK 0x100000000ull
+#define CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK 0x200000000ull
+#define CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK 0x10ull
+#define CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK 0x8ull
+#define CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK 0x40000000ull
+#define CCE_ERR_STATUS_LA_TRIGGERED_SMASK 0x80000000ull
+#define CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK 0x40000ull
+#define CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK 0x4000000ull
+#define CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK 0x20000ull
+#define CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK 0x2000000ull
+#define CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK 0x100000ull
+#define CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK 0x80000ull
+#define CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK 0x10000ull
+#define CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK 0x1000000ull
+#define CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK 0x8000ull
+#define CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK 0x800000ull
+#define CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK 0x20000000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK 0x2000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK 0x200000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK 0x4000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK 0x400000ull
+#define CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK 0x10000000ull
+#define CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK 0x8000000ull
+#define CCE_INT_CLEAR (CCE + 0x000000110A00)
+#define CCE_INT_COUNTER_ARRAY32 (CCE + 0x000000110D00)
+#define CCE_INT_FORCE (CCE + 0x000000110B00)
+#define CCE_INT_MAP (CCE + 0x000000110500)
+#define CCE_INT_MASK (CCE + 0x000000110900)
+#define CCE_INT_STATUS (CCE + 0x000000110800)
+#define CCE_MSIX_INT_GRANTED (CCE + 0x000000110200)
+#define CCE_MSIX_TABLE_LOWER (CCE + 0x000000100000)
+#define CCE_MSIX_TABLE_UPPER (CCE + 0x000000100008)
+#define CCE_MSIX_TABLE_UPPER_RESETCSR 0x0000000100000000ull
+#define CCE_MSIX_VEC_CLR_WITHOUT_INT (CCE + 0x000000110400)
+#define CCE_PCIE_CTRL (CCE + 0x0000000000C0)
+#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK 0x3ull
+#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT 0
+#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK 0xFull
+#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT 2
+#define CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT 8
+#define CCE_PCIE_CTRL_XMT_MARGIN_SHIFT 9
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK 0x1ull
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT 12
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK 0x7ull
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT 13
+#define CCE_REVISION (CCE + 0x000000000000)
+#define CCE_REVISION2 (CCE + 0x000000000008)
+#define CCE_REVISION2_HFI_ID_MASK 0x1ull
+#define CCE_REVISION2_HFI_ID_SHIFT 0
+#define CCE_REVISION2_IMPL_CODE_SHIFT 8
+#define CCE_REVISION2_IMPL_REVISION_SHIFT 16
+#define CCE_REVISION_BOARD_ID_LOWER_NIBBLE_MASK 0xFull
+#define CCE_REVISION_BOARD_ID_LOWER_NIBBLE_SHIFT 32
+#define CCE_REVISION_CHIP_REV_MAJOR_MASK 0xFFull
+#define CCE_REVISION_CHIP_REV_MAJOR_SHIFT 8
+#define CCE_REVISION_CHIP_REV_MINOR_MASK 0xFFull
+#define CCE_REVISION_CHIP_REV_MINOR_SHIFT 0
+#define CCE_REVISION_SW_MASK 0xFFull
+#define CCE_REVISION_SW_SHIFT 24
+#define CCE_SCRATCH (CCE + 0x000000000020)
+#define CCE_STATUS (CCE + 0x000000000018)
+#define CCE_STATUS_RXE_FROZE_SMASK 0x2ull
+#define CCE_STATUS_RXE_PAUSED_SMASK 0x20ull
+#define CCE_STATUS_SDMA_FROZE_SMASK 0x1ull
+#define CCE_STATUS_SDMA_PAUSED_SMASK 0x10ull
+#define CCE_STATUS_TXE_FROZE_SMASK 0x4ull
+#define CCE_STATUS_TXE_PAUSED_SMASK 0x40ull
+#define CCE_STATUS_TXE_PIO_FROZE_SMASK 0x8ull
+#define CCE_STATUS_TXE_PIO_PAUSED_SMASK 0x80ull
+#define MISC_CFG_FW_CTRL (MISC + 0x000000001000)
+#define MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK 0x2ull
+#define MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT 2
+#define MISC_CFG_FW_CTRL_RSA_STATUS_SMASK 0xCull
+#define MISC_CFG_RSA_CMD (MISC + 0x000000000A08)
+#define MISC_CFG_RSA_MODULUS (MISC + 0x000000000400)
+#define MISC_CFG_RSA_MU (MISC + 0x000000000A10)
+#define MISC_CFG_RSA_R2 (MISC + 0x000000000000)
+#define MISC_CFG_RSA_SIGNATURE (MISC + 0x000000000200)
+#define MISC_CFG_SHA_PRELOAD (MISC + 0x000000000A00)
+#define MISC_ERR_CLEAR (MISC + 0x000000002010)
+#define MISC_ERR_MASK (MISC + 0x000000002008)
+#define MISC_ERR_STATUS (MISC + 0x000000002000)
+#define MISC_ERR_STATUS_MISC_PLL_LOCK_FAIL_ERR_SMASK 0x1000ull
+#define MISC_ERR_STATUS_MISC_MBIST_FAIL_ERR_SMASK 0x800ull
+#define MISC_ERR_STATUS_MISC_INVALID_EEP_CMD_ERR_SMASK 0x400ull
+#define MISC_ERR_STATUS_MISC_EFUSE_DONE_PARITY_ERR_SMASK 0x200ull
+#define MISC_ERR_STATUS_MISC_EFUSE_WRITE_ERR_SMASK 0x100ull
+#define MISC_ERR_STATUS_MISC_EFUSE_READ_BAD_ADDR_ERR_SMASK 0x80ull
+#define MISC_ERR_STATUS_MISC_EFUSE_CSR_PARITY_ERR_SMASK 0x40ull
+#define MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK 0x20ull
+#define MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK 0x10ull
+#define MISC_ERR_STATUS_MISC_SBUS_WRITE_FAILED_ERR_SMASK 0x8ull
+#define MISC_ERR_STATUS_MISC_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
+#define MISC_ERR_STATUS_MISC_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
+#define MISC_ERR_STATUS_MISC_CSR_PARITY_ERR_SMASK 0x1ull
+#define PCI_CFG_MSIX0 (PCIE + 0x0000000000B0)
+#define PCI_CFG_REG1 (PCIE + 0x000000000004)
+#define PCI_CFG_REG11 (PCIE + 0x00000000002C)
+#define PCIE_CFG_SPCIE1 (PCIE + 0x00000000014C)
+#define PCIE_CFG_SPCIE2 (PCIE + 0x000000000150)
+#define PCIE_CFG_TPH2 (PCIE + 0x000000000180)
+#define RCV_ARRAY (RXE + 0x000000200000)
+#define RCV_ARRAY_CNT (RXE + 0x000000000018)
+#define RCV_ARRAY_RT_ADDR_MASK 0xFFFFFFFFFull
+#define RCV_ARRAY_RT_ADDR_SHIFT 0
+#define RCV_ARRAY_RT_BUF_SIZE_SHIFT 36
+#define RCV_ARRAY_RT_WRITE_ENABLE_SMASK 0x8000000000000000ull
+#define RCV_AVAIL_TIME_OUT (RXE + 0x000000100050)
+#define RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK 0xFFull
+#define RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT 0
+#define RCV_BTH_QP (RXE + 0x000000000028)
+#define RCV_BTH_QP_KDETH_QP_MASK 0xFFull
+#define RCV_BTH_QP_KDETH_QP_SHIFT 16
+#define RCV_BYPASS (RXE + 0x000000000038)
+#define RCV_BYPASS_HDR_SIZE_SHIFT 16
+#define RCV_BYPASS_HDR_SIZE_MASK 0x1Full
+#define RCV_BYPASS_HDR_SIZE_SMASK 0x1F0000ull
+#define RCV_BYPASS_BYPASS_CONTEXT_SHIFT 0
+#define RCV_BYPASS_BYPASS_CONTEXT_MASK 0xFFull
+#define RCV_BYPASS_BYPASS_CONTEXT_SMASK 0xFFull
+#define RCV_CONTEXTS (RXE + 0x000000000010)
+#define RCV_COUNTER_ARRAY32 (RXE + 0x000000000400)
+#define RCV_COUNTER_ARRAY64 (RXE + 0x000000000500)
+#define RCV_CTRL (RXE + 0x000000000000)
+#define RCV_CTRL_RCV_BYPASS_ENABLE_SMASK 0x10ull
+#define RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK 0x40ull
+#define RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK 0x4ull
+#define RCV_CTRL_RCV_PORT_ENABLE_SMASK 0x1ull
+#define RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK 0x2ull
+#define RCV_CTRL_RCV_RSM_ENABLE_SMASK 0x20ull
+#define RCV_CTRL_RX_RBUF_INIT_SMASK 0x200ull
+#define RCV_CTXT_CTRL (RXE + 0x000000100000)
+#define RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK 0x4ull
+#define RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK 0x8ull
+#define RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK 0x7ull
+#define RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT 8
+#define RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK 0x700ull
+#define RCV_CTXT_CTRL_ENABLE_SMASK 0x1ull
+#define RCV_CTXT_CTRL_INTR_AVAIL_SMASK 0x20ull
+#define RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK 0x2ull
+#define RCV_CTXT_CTRL_TAIL_UPD_SMASK 0x40ull
+#define RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK 0x10ull
+#define RCV_CTXT_STATUS (RXE + 0x000000100008)
+#define RCV_EGR_CTRL (RXE + 0x000000100010)
+#define RCV_EGR_CTRL_EGR_BASE_INDEX_MASK 0x1FFFull
+#define RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT 0
+#define RCV_EGR_CTRL_EGR_CNT_MASK 0x1FFull
+#define RCV_EGR_CTRL_EGR_CNT_SHIFT 32
+#define RCV_EGR_INDEX_HEAD (RXE + 0x000000300018)
+#define RCV_EGR_INDEX_HEAD_HEAD_MASK 0x7FFull
+#define RCV_EGR_INDEX_HEAD_HEAD_SHIFT 0
+#define RCV_ERR_CLEAR (RXE + 0x000000000070)
+#define RCV_ERR_INFO (RXE + 0x000000000050)
+#define RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK 0x1Full
+#define RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK 0x20ull
+#define RCV_ERR_MASK (RXE + 0x000000000068)
+#define RCV_ERR_STATUS (RXE + 0x000000000060)
+#define RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK 0x8000000000000000ull
+#define RCV_ERR_STATUS_RX_CSR_READ_BAD_ADDR_ERR_SMASK 0x2000000000000000ull
+#define RCV_ERR_STATUS_RX_CSR_WRITE_BAD_ADDR_ERR_SMASK \
+ 0x4000000000000000ull
+#define RCV_ERR_STATUS_RX_DC_INTF_PARITY_ERR_SMASK 0x2ull
+#define RCV_ERR_STATUS_RX_DC_SOP_EOP_PARITY_ERR_SMASK 0x200ull
+#define RCV_ERR_STATUS_RX_DMA_CSR_COR_ERR_SMASK 0x1ull
+#define RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK 0x200000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK 0x1000000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_COR_ERR_SMASK \
+ 0x40000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
+ 0x20000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
+ 0x800000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
+ 0x400000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_FLAG_COR_ERR_SMASK 0x800ull
+#define RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK 0x400ull
+#define RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_COR_ERR_SMASK 0x10000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK 0x8000000000000ull
+#define RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK 0x200000000000ull
+#define RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK 0x400000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK 0x100000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
+ 0x10000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK 0x8000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
+ 0x20000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_COR_ERR_SMASK 0x80000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK 0x40000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK 0x40000000ull
+#define RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_COR_ERR_SMASK 0x100000ull
+#define RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK 0x80000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK 0x400000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK 0x10000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK 0x2000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
+ 0x200000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK 0x800000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
+ 0x8000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK 0x4000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK 0x1000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK 0x20000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DATA_COR_ERR_SMASK 0x100000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK 0x80000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK 0x1000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK 0x800000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART2_COR_ERR_SMASK 0x4000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK 0x2000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK 0x100000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK 0x800000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
+ 0x1000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK 0x200000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK 0x400000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FREE_LIST_COR_ERR_SMASK 0x4000ull
+#define RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK 0x2000ull
+#define RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK 0x80000000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_COR_ERR_SMASK 0x40000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK 0x10000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK 0x8000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK 0x20000ull
+#define RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_COR_ERR_SMASK 0x4000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK 0x2000000000ull
+#define RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK 0x100ull
+#define RCV_ERR_STATUS_RX_RCV_DATA_COR_ERR_SMASK 0x20ull
+#define RCV_ERR_STATUS_RX_RCV_DATA_UNC_ERR_SMASK 0x10ull
+#define RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK 0x1000ull
+#define RCV_ERR_STATUS_RX_RCV_HDR_COR_ERR_SMASK 0x8ull
+#define RCV_ERR_STATUS_RX_RCV_HDR_UNC_ERR_SMASK 0x4ull
+#define RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_COR_ERR_SMASK 0x80ull
+#define RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK 0x40ull
+#define RCV_HDR_ADDR (RXE + 0x000000100028)
+#define RCV_HDR_CNT (RXE + 0x000000100030)
+#define RCV_HDR_CNT_CNT_MASK 0x1FFull
+#define RCV_HDR_CNT_CNT_SHIFT 0
+#define RCV_HDR_ENT_SIZE (RXE + 0x000000100038)
+#define RCV_HDR_ENT_SIZE_ENT_SIZE_MASK 0x7ull
+#define RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT 0
+#define RCV_HDR_HEAD (RXE + 0x000000300008)
+#define RCV_HDR_HEAD_COUNTER_MASK 0xFFull
+#define RCV_HDR_HEAD_COUNTER_SHIFT 32
+#define RCV_HDR_HEAD_HEAD_MASK 0x7FFFFull
+#define RCV_HDR_HEAD_HEAD_SHIFT 0
+#define RCV_HDR_HEAD_HEAD_SMASK 0x7FFFFull
+#define RCV_HDR_OVFL_CNT (RXE + 0x000000100058)
+#define RCV_HDR_SIZE (RXE + 0x000000100040)
+#define RCV_HDR_SIZE_HDR_SIZE_MASK 0x1Full
+#define RCV_HDR_SIZE_HDR_SIZE_SHIFT 0
+#define RCV_HDR_TAIL (RXE + 0x000000300000)
+#define RCV_HDR_TAIL_ADDR (RXE + 0x000000100048)
+#define RCV_KEY_CTRL (RXE + 0x000000100020)
+#define RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK 0x200000000ull
+#define RCV_KEY_CTRL_JOB_KEY_VALUE_MASK 0xFFFFull
+#define RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT 0
+#define RCV_MULTICAST (RXE + 0x000000000030)
+#define RCV_PARTITION_KEY (RXE + 0x000000000200)
+#define RCV_PARTITION_KEY_PARTITION_KEY_A_MASK 0xFFFFull
+#define RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT 16
+#define RCV_QP_MAP_TABLE (RXE + 0x000000000100)
+#define RCV_RSM_CFG (RXE + 0x000000000600)
+#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK 0x1ull
+#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT 0
+#define RCV_RSM_CFG_PACKET_TYPE_SHIFT 60
+#define RCV_RSM_CFG_OFFSET_SHIFT 32
+#define RCV_RSM_MAP_TABLE (RXE + 0x000000000900)
+#define RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK 0xFFull
+#define RCV_RSM_MATCH (RXE + 0x000000000800)
+#define RCV_RSM_MATCH_MASK1_SHIFT 0
+#define RCV_RSM_MATCH_MASK2_SHIFT 16
+#define RCV_RSM_MATCH_VALUE1_SHIFT 8
+#define RCV_RSM_MATCH_VALUE2_SHIFT 24
+#define RCV_RSM_SELECT (RXE + 0x000000000700)
+#define RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT 0
+#define RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT 16
+#define RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT 32
+#define RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT 44
+#define RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT 48
+#define RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT 60
+#define RCV_STATUS (RXE + 0x000000000008)
+#define RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK 0x1ull
+#define RCV_STATUS_RX_RBUF_INIT_DONE_SMASK 0x200ull
+#define RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK 0x40ull
+#define RCV_TID_CTRL (RXE + 0x000000100018)
+#define RCV_TID_CTRL_TID_BASE_INDEX_MASK 0x1FFFull
+#define RCV_TID_CTRL_TID_BASE_INDEX_SHIFT 0
+#define RCV_TID_CTRL_TID_PAIR_CNT_MASK 0x1FFull
+#define RCV_TID_CTRL_TID_PAIR_CNT_SHIFT 32
+#define RCV_TID_FLOW_TABLE (RXE + 0x000000300800)
+#define RCV_VL15 (RXE + 0x000000000048)
+#define SEND_BTH_QP (TXE + 0x0000000000A0)
+#define SEND_BTH_QP_KDETH_QP_MASK 0xFFull
+#define SEND_BTH_QP_KDETH_QP_SHIFT 16
+#define SEND_CM_CREDIT_USED_STATUS (TXE + 0x000000000510)
+#define SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK \
+ 0x1000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK \
+ 0x8000000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK \
+ 0x2000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK \
+ 0x4000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK \
+ 0x8000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK \
+ 0x10000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK \
+ 0x20000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK \
+ 0x40000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK \
+ 0x80000000000000ull
+#define SEND_CM_CREDIT_VL (TXE + 0x000000000600)
+#define SEND_CM_CREDIT_VL15 (TXE + 0x000000000678)
+#define SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT 0
+#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK 0xFFFFull
+#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT 0
+#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK 0xFFFFull
+#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK 0xFFFFull
+#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT 16
+#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK 0xFFFF0000ull
+#define SEND_CM_CTRL (TXE + 0x000000000500)
+#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
+#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
+#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
+#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
+#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
+#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
+#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
+#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
+#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
+#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK 0xFFFFull
+#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK 0xFFFFull
+#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT 32
+#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK 0xFFFF00000000ull
+#define SEND_CM_LOCAL_AU_TABLE0_TO3 (TXE + 0x000000000520)
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT 0
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT 16
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT 32
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT 48
+#define SEND_CM_LOCAL_AU_TABLE4_TO7 (TXE + 0x000000000528)
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT 0
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT 16
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT 32
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT 48
+#define SEND_CM_REMOTE_AU_TABLE0_TO3 (TXE + 0x000000000530)
+#define SEND_CM_REMOTE_AU_TABLE4_TO7 (TXE + 0x000000000538)
+#define SEND_CM_TIMER_CTRL (TXE + 0x000000000518)
+#define SEND_CONTEXTS (TXE + 0x000000000010)
+#define SEND_CONTEXT_SET_CTRL (TXE + 0x000000000200)
+#define SEND_COUNTER_ARRAY32 (TXE + 0x000000000300)
+#define SEND_COUNTER_ARRAY64 (TXE + 0x000000000400)
+#define SEND_CTRL (TXE + 0x000000000000)
+#define SEND_CTRL_CM_RESET_SMASK 0x4ull
+#define SEND_CTRL_SEND_ENABLE_SMASK 0x1ull
+#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
+#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xFFull
+#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
+ << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
+#define SEND_CTRL_VL_ARBITER_ENABLE_SMASK 0x2ull
+#define SEND_CTXT_CHECK_ENABLE (TXE + 0x000000100080)
+#define SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK 0x1ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 0x4ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 0x20ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK 0x8ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 0x10ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 0x40ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 0x2ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 0x20000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK \
+ 0x200000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK 0x800ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK 0x400ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK 0x1000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK 0x2000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK \
+ 0x100000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK 0x10000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 0x200ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK 0x100ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK \
+ 0x80000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK \
+ 0x40000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK \
+ 0x8000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK \
+ 0x4000ull
+#define SEND_CTXT_CHECK_JOB_KEY (TXE + 0x000000100090)
+#define SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK 0x100000000ull
+#define SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK 0xFFFF0000ull
+#define SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_OPCODE (TXE + 0x0000001000A8)
+#define SEND_CTXT_CHECK_OPCODE_MASK_SHIFT 8
+#define SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_PARTITION_KEY (TXE + 0x000000100098)
+#define SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_SLID (TXE + 0x0000001000A0)
+#define SEND_CTXT_CHECK_SLID_MASK_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_SLID_MASK_SHIFT 16
+#define SEND_CTXT_CHECK_SLID_VALUE_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_SLID_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_VL (TXE + 0x000000100088)
+#define SEND_CTXT_CREDIT_CTRL (TXE + 0x000000100010)
+#define SEND_CTXT_CREDIT_CTRL_CREDIT_INTR_SMASK 0x20000ull
+#define SEND_CTXT_CREDIT_CTRL_EARLY_RETURN_SMASK 0x10000ull
+#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_MASK 0x7FFull
+#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SHIFT 0
+#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SMASK 0x7FFull
+#define SEND_CTXT_CREDIT_STATUS (TXE + 0x000000100018)
+#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK 0x7FFull
+#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT 32
+#define SEND_CTXT_CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK 0x7FFull
+#define SEND_CTXT_CREDIT_FORCE (TXE + 0x000000100028)
+#define SEND_CTXT_CREDIT_FORCE_FORCE_RETURN_SMASK 0x1ull
+#define SEND_CTXT_CREDIT_RETURN_ADDR (TXE + 0x000000100020)
+#define SEND_CTXT_CREDIT_RETURN_ADDR_ADDRESS_SMASK 0xFFFFFFFFFFC0ull
+#define SEND_CTXT_CTRL (TXE + 0x000000100000)
+#define SEND_CTXT_CTRL_CTXT_BASE_MASK 0x3FFFull
+#define SEND_CTXT_CTRL_CTXT_BASE_SHIFT 32
+#define SEND_CTXT_CTRL_CTXT_DEPTH_MASK 0x7FFull
+#define SEND_CTXT_CTRL_CTXT_DEPTH_SHIFT 48
+#define SEND_CTXT_CTRL_CTXT_ENABLE_SMASK 0x1ull
+#define SEND_CTXT_ERR_CLEAR (TXE + 0x000000100050)
+#define SEND_CTXT_ERR_MASK (TXE + 0x000000100048)
+#define SEND_CTXT_ERR_STATUS (TXE + 0x000000100040)
+#define SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK 0x2ull
+#define SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK 0x1ull
+#define SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK 0x4ull
+#define SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK 0x10ull
+#define SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK 0x8ull
+#define SEND_CTXT_STATUS (TXE + 0x000000100008)
+#define SEND_CTXT_STATUS_CTXT_HALTED_SMASK 0x1ull
+#define SEND_DMA_BASE_ADDR (TXE + 0x000000200010)
+#define SEND_DMA_CHECK_ENABLE (TXE + 0x000000200080)
+#define SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK 0x1ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 0x4ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 0x20ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK 0x8ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 0x10ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 0x40ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 0x2ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 0x20000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 0x200000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK \
+ 0x100000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 0x200ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK 0x100ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK \
+ 0x80000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 0x40000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK \
+ 0x8000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK 0x4000ull
+#define SEND_DMA_CHECK_JOB_KEY (TXE + 0x000000200090)
+#define SEND_DMA_CHECK_OPCODE (TXE + 0x0000002000A8)
+#define SEND_DMA_CHECK_PARTITION_KEY (TXE + 0x000000200098)
+#define SEND_DMA_CHECK_SLID (TXE + 0x0000002000A0)
+#define SEND_DMA_CHECK_SLID_MASK_MASK 0xFFFFull
+#define SEND_DMA_CHECK_SLID_MASK_SHIFT 16
+#define SEND_DMA_CHECK_SLID_VALUE_MASK 0xFFFFull
+#define SEND_DMA_CHECK_SLID_VALUE_SHIFT 0
+#define SEND_DMA_CHECK_VL (TXE + 0x000000200088)
+#define SEND_DMA_CTRL (TXE + 0x000000200000)
+#define SEND_DMA_CTRL_SDMA_CLEANUP_SMASK 0x4ull
+#define SEND_DMA_CTRL_SDMA_ENABLE_SMASK 0x1ull
+#define SEND_DMA_CTRL_SDMA_HALT_SMASK 0x2ull
+#define SEND_DMA_CTRL_SDMA_INT_ENABLE_SMASK 0x8ull
+#define SEND_DMA_DESC_CNT (TXE + 0x000000200050)
+#define SEND_DMA_DESC_CNT_CNT_MASK 0xFFFFull
+#define SEND_DMA_DESC_CNT_CNT_SHIFT 0
+#define SEND_DMA_ENG_ERR_CLEAR (TXE + 0x000000200070)
+#define SEND_DMA_ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK 0x1ull
+#define SEND_DMA_ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT 18
+#define SEND_DMA_ENG_ERR_MASK (TXE + 0x000000200068)
+#define SEND_DMA_ENG_ERR_STATUS (TXE + 0x000000200060)
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK 0x8000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK 0x4000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK 0x10ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK 0x2ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK 0x40ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK 0x800ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK 0x1000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK \
+ 0x40000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK 0x400ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK \
+ 0x20000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK 0x80ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK 0x20ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK \
+ 0x100ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK \
+ 0x10000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK 0x8ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK 0x2000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK 0x4ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK 0x1ull
+#define SEND_DMA_ENGINES (TXE + 0x000000000018)
+#define SEND_DMA_ERR_CLEAR (TXE + 0x000000000070)
+#define SEND_DMA_ERR_MASK (TXE + 0x000000000068)
+#define SEND_DMA_ERR_STATUS (TXE + 0x000000000060)
+#define SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK 0x2ull
+#define SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK 0x8ull
+#define SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK 0x4ull
+#define SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK 0x1ull
+#define SEND_DMA_HEAD (TXE + 0x000000200028)
+#define SEND_DMA_HEAD_ADDR (TXE + 0x000000200030)
+#define SEND_DMA_LEN_GEN (TXE + 0x000000200018)
+#define SEND_DMA_LEN_GEN_GENERATION_SHIFT 16
+#define SEND_DMA_LEN_GEN_LENGTH_SHIFT 6
+#define SEND_DMA_MEMORY (TXE + 0x0000002000B0)
+#define SEND_DMA_MEMORY_SDMA_MEMORY_CNT_SHIFT 16
+#define SEND_DMA_MEMORY_SDMA_MEMORY_INDEX_SHIFT 0
+#define SEND_DMA_MEM_SIZE (TXE + 0x000000000028)
+#define SEND_DMA_PRIORITY_THLD (TXE + 0x000000200038)
+#define SEND_DMA_RELOAD_CNT (TXE + 0x000000200048)
+#define SEND_DMA_STATUS (TXE + 0x000000200008)
+#define SEND_DMA_STATUS_ENG_CLEANED_UP_SMASK 0x200000000000000ull
+#define SEND_DMA_STATUS_ENG_HALTED_SMASK 0x100000000000000ull
+#define SEND_DMA_TAIL (TXE + 0x000000200020)
+#define SEND_EGRESS_CTXT_STATUS (TXE + 0x000000000800)
+#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK 0x10000ull
+#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT 0
+#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK \
+ 0x3FFFull
+#define SEND_EGRESS_ERR_CLEAR (TXE + 0x000000000090)
+#define SEND_EGRESS_ERR_INFO (TXE + 0x000000000F00)
+#define SEND_EGRESS_ERR_INFO_BAD_PKT_LEN_ERR_SMASK 0x20000ull
+#define SEND_EGRESS_ERR_INFO_BYPASS_ERR_SMASK 0x800ull
+#define SEND_EGRESS_ERR_INFO_GRH_ERR_SMASK 0x400ull
+#define SEND_EGRESS_ERR_INFO_JOB_KEY_ERR_SMASK 0x4ull
+#define SEND_EGRESS_ERR_INFO_KDETH_PACKETS_ERR_SMASK 0x1000ull
+#define SEND_EGRESS_ERR_INFO_NON_KDETH_PACKETS_ERR_SMASK 0x2000ull
+#define SEND_EGRESS_ERR_INFO_OPCODE_ERR_SMASK 0x20ull
+#define SEND_EGRESS_ERR_INFO_PARTITION_KEY_ERR_SMASK 0x8ull
+#define SEND_EGRESS_ERR_INFO_PBC_STATIC_RATE_CONTROL_ERR_SMASK 0x100000ull
+#define SEND_EGRESS_ERR_INFO_PBC_TEST_ERR_SMASK 0x10000ull
+#define SEND_EGRESS_ERR_INFO_RAW_ERR_SMASK 0x100ull
+#define SEND_EGRESS_ERR_INFO_RAW_IPV6_ERR_SMASK 0x200ull
+#define SEND_EGRESS_ERR_INFO_SLID_ERR_SMASK 0x10ull
+#define SEND_EGRESS_ERR_INFO_TOO_LONG_BYPASS_PACKETS_ERR_SMASK 0x80000ull
+#define SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK 0x40000ull
+#define SEND_EGRESS_ERR_INFO_TOO_SMALL_BYPASS_PACKETS_ERR_SMASK 0x8000ull
+#define SEND_EGRESS_ERR_INFO_TOO_SMALL_IB_PACKETS_ERR_SMASK 0x4000ull
+#define SEND_EGRESS_ERR_INFO_VL_ERR_SMASK 0x2ull
+#define SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK 0x40ull
+#define SEND_EGRESS_ERR_MASK (TXE + 0x000000000088)
+#define SEND_EGRESS_ERR_SOURCE (TXE + 0x000000000F08)
+#define SEND_EGRESS_ERR_STATUS (TXE + 0x000000000080)
+#define SEND_EGRESS_ERR_STATUS_TX_CONFIG_PARITY_ERR_SMASK 0x8000ull
+#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_OVERRUN_ERR_SMASK \
+ 0x200000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_PARITY_ERR_SMASK \
+ 0x20000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK \
+ 0x800000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_COR_ERR_SMASK \
+ 0x2000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_UNC_ERR_SMASK \
+ 0x200000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR_SMASK \
+ 0x8ull
+#define SEND_EGRESS_ERR_STATUS_TX_HCRC_INSERTION_ERR_SMASK \
+ 0x400000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_ILLEGAL_VL_ERR_SMASK 0x1000ull
+#define SEND_EGRESS_ERR_STATUS_TX_INCORRECT_LINK_STATE_ERR_SMASK 0x20ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_CSR_PARITY_ERR_SMASK 0x2000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO0_COR_ERR_SMASK \
+ 0x1000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR_SMASK \
+ 0x100000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO1_COR_ERR_SMASK \
+ 0x2000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR_SMASK \
+ 0x200000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO2_COR_ERR_SMASK \
+ 0x4000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR_SMASK \
+ 0x400000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO3_COR_ERR_SMASK \
+ 0x8000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR_SMASK \
+ 0x800000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO4_COR_ERR_SMASK \
+ 0x10000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR_SMASK \
+ 0x1000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO5_COR_ERR_SMASK \
+ 0x20000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR_SMASK \
+ 0x2000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO6_COR_ERR_SMASK \
+ 0x40000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR_SMASK \
+ 0x4000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO7_COR_ERR_SMASK \
+ 0x80000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR_SMASK \
+ 0x8000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO8_COR_ERR_SMASK \
+ 0x100000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR_SMASK \
+ 0x10000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LINKDOWN_ERR_SMASK 0x10ull
+#define SEND_EGRESS_ERR_STATUS_TX_PIO_LAUNCH_INTF_PARITY_ERR_SMASK 0x80ull
+#define SEND_EGRESS_ERR_STATUS_TX_PKT_INTEGRITY_MEM_COR_ERR_SMASK 0x1ull
+#define SEND_EGRESS_ERR_STATUS_TX_PKT_INTEGRITY_MEM_UNC_ERR_SMASK 0x2ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_COR_ERR_SMASK \
+ 0x1000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_CSR_UNC_ERR_SMASK \
+ 0x8000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_UNC_ERR_SMASK \
+ 0x100000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_COR_ERR_SMASK \
+ 0x800000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_CSR_UNC_ERR_SMASK \
+ 0x4000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_UNC_ERR_SMASK \
+ 0x80000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SB_HDR_COR_ERR_SMASK 0x400000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SB_HDR_UNC_ERR_SMASK 0x40000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SBRD_CTL_CSR_PARITY_ERR_SMASK 0x4000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR_SMASK \
+ 0x800ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA0_DISALLOWED_PACKET_ERR_SMASK \
+ 0x10000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA10_DISALLOWED_PACKET_ERR_SMASK \
+ 0x4000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA11_DISALLOWED_PACKET_ERR_SMASK \
+ 0x8000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA12_DISALLOWED_PACKET_ERR_SMASK \
+ 0x10000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA13_DISALLOWED_PACKET_ERR_SMASK \
+ 0x20000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA14_DISALLOWED_PACKET_ERR_SMASK \
+ 0x40000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA15_DISALLOWED_PACKET_ERR_SMASK \
+ 0x80000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA1_DISALLOWED_PACKET_ERR_SMASK \
+ 0x20000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA2_DISALLOWED_PACKET_ERR_SMASK \
+ 0x40000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA3_DISALLOWED_PACKET_ERR_SMASK \
+ 0x80000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA4_DISALLOWED_PACKET_ERR_SMASK \
+ 0x100000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA5_DISALLOWED_PACKET_ERR_SMASK \
+ 0x200000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA6_DISALLOWED_PACKET_ERR_SMASK \
+ 0x400000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA7_DISALLOWED_PACKET_ERR_SMASK \
+ 0x800000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA8_DISALLOWED_PACKET_ERR_SMASK \
+ 0x1000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA9_DISALLOWED_PACKET_ERR_SMASK \
+ 0x2000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA_LAUNCH_INTF_PARITY_ERR_SMASK \
+ 0x100ull
+#define SEND_EGRESS_SEND_DMA_STATUS (TXE + 0x000000000E00)
+#define SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT 0
+#define SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
+ 0x3FFFull
+#define SEND_ERR_CLEAR (TXE + 0x0000000000F0)
+#define SEND_ERR_MASK (TXE + 0x0000000000E8)
+#define SEND_ERR_STATUS (TXE + 0x0000000000E0)
+#define SEND_ERR_STATUS_SEND_CSR_PARITY_ERR_SMASK 0x1ull
+#define SEND_ERR_STATUS_SEND_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
+#define SEND_ERR_STATUS_SEND_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
+#define SEND_HIGH_PRIORITY_LIMIT (TXE + 0x000000000030)
+#define SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK 0x3FFFull
+#define SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT 0
+#define SEND_HIGH_PRIORITY_LIST (TXE + 0x000000000180)
+#define SEND_LEN_CHECK0 (TXE + 0x0000000000D0)
+#define SEND_LEN_CHECK0_LEN_VL0_MASK 0xFFFull
+#define SEND_LEN_CHECK0_LEN_VL1_SHIFT 12
+#define SEND_LEN_CHECK1 (TXE + 0x0000000000D8)
+#define SEND_LEN_CHECK1_LEN_VL15_MASK 0xFFFull
+#define SEND_LEN_CHECK1_LEN_VL15_SHIFT 48
+#define SEND_LEN_CHECK1_LEN_VL4_MASK 0xFFFull
+#define SEND_LEN_CHECK1_LEN_VL5_SHIFT 12
+#define SEND_LOW_PRIORITY_LIST (TXE + 0x000000000100)
+#define SEND_LOW_PRIORITY_LIST_VL_MASK 0x7ull
+#define SEND_LOW_PRIORITY_LIST_VL_SHIFT 16
+#define SEND_LOW_PRIORITY_LIST_WEIGHT_MASK 0xFFull
+#define SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT 0
+#define SEND_PIO_ERR_CLEAR (TXE + 0x000000000050)
+#define SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK 0x20000ull
+#define SEND_PIO_ERR_MASK (TXE + 0x000000000048)
+#define SEND_PIO_ERR_STATUS (TXE + 0x000000000040)
+#define SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
+ 0x1000000ull
+#define SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK 0x8000ull
+#define SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK 0x4ull
+#define SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
+ 0x100000000ull
+#define SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK 0x100000ull
+#define SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK 0x80000ull
+#define SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK 0x20000ull
+#define SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
+ 0x200000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK 0x20ull
+#define SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
+ 0x400000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK 0x40ull
+#define SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK \
+ 0x800000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK 0x200ull
+#define SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK 0x40000ull
+#define SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK 0x10000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK 0x10000ull
+#define SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK 0x20000000ull
+#define SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK 0x8ull
+#define SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK 0x10ull
+#define SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK 0x80ull
+#define SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
+ 0x100ull
+#define SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK 0x400ull
+#define SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK 0x400000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK 0x8000000ull
+#define SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK 0x4000000ull
+#define SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK 0x2000000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK 0x2000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK 0x800ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK 0x4000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK 0x1000ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK 0x2ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK 0x1ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK 0x200000ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK 0x800000ull
+#define SEND_PIO_INIT_CTXT (TXE + 0x000000000038)
+#define SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK 0x1ull
+#define SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK 0xFFull
+#define SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT 8
+#define SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK 0x8ull
+#define SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK 0x4ull
+#define SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK 0x2ull
+#define SEND_PIO_MEM_SIZE (TXE + 0x000000000020)
+#define SEND_SC2VLT0 (TXE + 0x0000000000B0)
+#define SEND_SC2VLT0_SC0_SHIFT 0
+#define SEND_SC2VLT0_SC1_SHIFT 8
+#define SEND_SC2VLT0_SC2_SHIFT 16
+#define SEND_SC2VLT0_SC3_SHIFT 24
+#define SEND_SC2VLT0_SC4_SHIFT 32
+#define SEND_SC2VLT0_SC5_SHIFT 40
+#define SEND_SC2VLT0_SC6_SHIFT 48
+#define SEND_SC2VLT0_SC7_SHIFT 56
+#define SEND_SC2VLT1 (TXE + 0x0000000000B8)
+#define SEND_SC2VLT1_SC10_SHIFT 16
+#define SEND_SC2VLT1_SC11_SHIFT 24
+#define SEND_SC2VLT1_SC12_SHIFT 32
+#define SEND_SC2VLT1_SC13_SHIFT 40
+#define SEND_SC2VLT1_SC14_SHIFT 48
+#define SEND_SC2VLT1_SC15_SHIFT 56
+#define SEND_SC2VLT1_SC8_SHIFT 0
+#define SEND_SC2VLT1_SC9_SHIFT 8
+#define SEND_SC2VLT2 (TXE + 0x0000000000C0)
+#define SEND_SC2VLT2_SC16_SHIFT 0
+#define SEND_SC2VLT2_SC17_SHIFT 8
+#define SEND_SC2VLT2_SC18_SHIFT 16
+#define SEND_SC2VLT2_SC19_SHIFT 24
+#define SEND_SC2VLT2_SC20_SHIFT 32
+#define SEND_SC2VLT2_SC21_SHIFT 40
+#define SEND_SC2VLT2_SC22_SHIFT 48
+#define SEND_SC2VLT2_SC23_SHIFT 56
+#define SEND_SC2VLT3 (TXE + 0x0000000000C8)
+#define SEND_SC2VLT3_SC24_SHIFT 0
+#define SEND_SC2VLT3_SC25_SHIFT 8
+#define SEND_SC2VLT3_SC26_SHIFT 16
+#define SEND_SC2VLT3_SC27_SHIFT 24
+#define SEND_SC2VLT3_SC28_SHIFT 32
+#define SEND_SC2VLT3_SC29_SHIFT 40
+#define SEND_SC2VLT3_SC30_SHIFT 48
+#define SEND_SC2VLT3_SC31_SHIFT 56
+#define SEND_STATIC_RATE_CONTROL (TXE + 0x0000000000A8)
+#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT 0
+#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK 0xFFFFull
+#define PCIE_CFG_REG_PL2 (PCIE + 0x000000000708)
+#define PCIE_CFG_REG_PL3 (PCIE + 0x00000000070C)
+#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT 27
+#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK 0x38000000
+#define PCIE_CFG_REG_PL102 (PCIE + 0x000000000898)
+#define PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT 12
+#define PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT 6
+#define PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT 0
+#define PCIE_CFG_REG_PL103 (PCIE + 0x00000000089C)
+#define PCIE_CFG_REG_PL105 (PCIE + 0x0000000008A4)
+#define PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK 0x1ull
+#define PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT 24
+#define PCIE_CFG_REG_PL100 (PCIE + 0x000000000890)
+#define PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK 0x400ull
+#define PCIE_CFG_REG_PL101 (PCIE + 0x000000000894)
+#define PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT 6
+#define PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT 0
+#define PCIE_CFG_REG_PL106 (PCIE + 0x0000000008A8)
+#define PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT 8
+#define PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK 0x20ull
+#define PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK 0x10ull
+#define CCE_INT_BLOCKED (CCE + 0x000000110C00)
+#define SEND_DMA_IDLE_CNT (TXE + 0x000000200040)
+#define SEND_DMA_DESC_FETCHED_CNT (TXE + 0x000000200058)
+#define CCE_MSIX_PBA_OFFSET 0X0110000
+
+#endif /* DEF_CHIP_REG */
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
new file mode 100644
index 000000000000..8abc902b96f3
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ */
+
+#ifndef _COMMON_H
+#define _COMMON_H
+
+#include <rdma/hfi/hfi1_user.h>
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+/* version of protocol header (known to chip also). In the long run,
+ * we should be able to generate and accept a range of version numbers;
+ * for now we only accept one, and it's compiled in.
+ */
+#define IPS_PROTO_VERSION 2
+
+/*
+ * These are compile time constants that you may want to enable or disable
+ * if you are trying to debug problems with code or performance.
+ * HFI1_VERBOSE_TRACING define as 1 if you want additional tracing in
+ * fast path code
+ * HFI1_TRACE_REGWRITES define as 1 if you want register writes to be
+ * traced in fast path code
+ * _HFI1_TRACING define as 0 if you want to remove all tracing in a
+ * compilation unit
+ */
+
+/* driver/hw feature set bitmask */
+#define HFI1_CAP_USER_SHIFT 24
+#define HFI1_CAP_MASK ((1UL << HFI1_CAP_USER_SHIFT) - 1)
+/* locked flag - if set, only HFI1_CAP_WRITABLE_MASK bits can be set */
+#define HFI1_CAP_LOCKED_SHIFT 63
+#define HFI1_CAP_LOCKED_MASK 0x1ULL
+#define HFI1_CAP_LOCKED_SMASK (HFI1_CAP_LOCKED_MASK << HFI1_CAP_LOCKED_SHIFT)
+/* extra bits used between kernel and user processes */
+#define HFI1_CAP_MISC_SHIFT (HFI1_CAP_USER_SHIFT * 2)
+#define HFI1_CAP_MISC_MASK ((1ULL << (HFI1_CAP_LOCKED_SHIFT - \
+ HFI1_CAP_MISC_SHIFT)) - 1)
+
+#define HFI1_CAP_KSET(cap) ({ hfi1_cap_mask |= HFI1_CAP_##cap; hfi1_cap_mask; })
+#define HFI1_CAP_KCLEAR(cap) \
+ ({ \
+ hfi1_cap_mask &= ~HFI1_CAP_##cap; \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_USET(cap) \
+ ({ \
+ hfi1_cap_mask |= (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_UCLEAR(cap) \
+ ({ \
+ hfi1_cap_mask &= ~(HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_SET(cap) \
+ ({ \
+ hfi1_cap_mask |= (HFI1_CAP_##cap | (HFI1_CAP_##cap << \
+ HFI1_CAP_USER_SHIFT)); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_CLEAR(cap) \
+ ({ \
+ hfi1_cap_mask &= ~(HFI1_CAP_##cap | \
+ (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT)); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_LOCK() \
+ ({ hfi1_cap_mask |= HFI1_CAP_LOCKED_SMASK; hfi1_cap_mask; })
+#define HFI1_CAP_LOCKED() (!!(hfi1_cap_mask & HFI1_CAP_LOCKED_SMASK))
+/*
+ * The set of capability bits that can be changed after initial load
+ * This set is the same for kernel and user contexts. However, for
+ * user contexts, the set can be further filtered by using the
+ * HFI1_CAP_RESERVED_MASK bits.
+ */
+#define HFI1_CAP_WRITABLE_MASK (HFI1_CAP_SDMA_AHG | \
+ HFI1_CAP_HDRSUPP | \
+ HFI1_CAP_MULTI_PKT_EGR | \
+ HFI1_CAP_NODROP_RHQ_FULL | \
+ HFI1_CAP_NODROP_EGR_FULL | \
+ HFI1_CAP_ALLOW_PERM_JKEY | \
+ HFI1_CAP_STATIC_RATE_CTRL | \
+ HFI1_CAP_PRINT_UNIMPL | \
+ HFI1_CAP_TID_UNMAP | \
+ HFI1_CAP_OPFN)
+/*
+ * A set of capability bits that are "global" and are not allowed to be
+ * set in the user bitmask.
+ */
+#define HFI1_CAP_RESERVED_MASK ((HFI1_CAP_SDMA | \
+ HFI1_CAP_USE_SDMA_HEAD | \
+ HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_PRINT_UNIMPL | \
+ HFI1_CAP_NO_INTEGRITY | \
+ HFI1_CAP_PKEY_CHECK | \
+ HFI1_CAP_TID_RDMA | \
+ HFI1_CAP_OPFN | \
+ HFI1_CAP_AIP) << \
+ HFI1_CAP_USER_SHIFT)
+/*
+ * Set of capabilities that need to be enabled for kernel context in
+ * order to be allowed for user contexts, as well.
+ */
+#define HFI1_CAP_MUST_HAVE_KERN (HFI1_CAP_STATIC_RATE_CTRL)
+/* Default enabled capabilities (both kernel and user) */
+#define HFI1_CAP_MASK_DEFAULT (HFI1_CAP_HDRSUPP | \
+ HFI1_CAP_NODROP_RHQ_FULL | \
+ HFI1_CAP_NODROP_EGR_FULL | \
+ HFI1_CAP_SDMA | \
+ HFI1_CAP_PRINT_UNIMPL | \
+ HFI1_CAP_STATIC_RATE_CTRL | \
+ HFI1_CAP_PKEY_CHECK | \
+ HFI1_CAP_MULTI_PKT_EGR | \
+ HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_AIP | \
+ ((HFI1_CAP_HDRSUPP | \
+ HFI1_CAP_MULTI_PKT_EGR | \
+ HFI1_CAP_STATIC_RATE_CTRL | \
+ HFI1_CAP_PKEY_CHECK | \
+ HFI1_CAP_EARLY_CREDIT_RETURN) << \
+ HFI1_CAP_USER_SHIFT))
+/*
+ * A bitmask of kernel/global capabilities that should be communicated
+ * to user level processes.
+ */
+#define HFI1_CAP_K2U (HFI1_CAP_SDMA | \
+ HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_PKEY_CHECK | \
+ HFI1_CAP_NO_INTEGRITY)
+
+#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \
+ HFI1_USER_SWMINOR)
+
+/*
+ * The next set of defines are for packet headers, and chip register
+ * and memory bits that are visible to and/or used by user-mode software.
+ */
+
+/*
+ * Receive Header Flags
+ */
+#define RHF_PKT_LEN_SHIFT 0
+#define RHF_PKT_LEN_MASK 0xfffull
+#define RHF_PKT_LEN_SMASK (RHF_PKT_LEN_MASK << RHF_PKT_LEN_SHIFT)
+
+#define RHF_RCV_TYPE_SHIFT 12
+#define RHF_RCV_TYPE_MASK 0x7ull
+#define RHF_RCV_TYPE_SMASK (RHF_RCV_TYPE_MASK << RHF_RCV_TYPE_SHIFT)
+
+#define RHF_USE_EGR_BFR_SHIFT 15
+#define RHF_USE_EGR_BFR_MASK 0x1ull
+#define RHF_USE_EGR_BFR_SMASK (RHF_USE_EGR_BFR_MASK << RHF_USE_EGR_BFR_SHIFT)
+
+#define RHF_EGR_INDEX_SHIFT 16
+#define RHF_EGR_INDEX_MASK 0x7ffull
+#define RHF_EGR_INDEX_SMASK (RHF_EGR_INDEX_MASK << RHF_EGR_INDEX_SHIFT)
+
+#define RHF_DC_INFO_SHIFT 27
+#define RHF_DC_INFO_MASK 0x1ull
+#define RHF_DC_INFO_SMASK (RHF_DC_INFO_MASK << RHF_DC_INFO_SHIFT)
+
+#define RHF_RCV_SEQ_SHIFT 28
+#define RHF_RCV_SEQ_MASK 0xfull
+#define RHF_RCV_SEQ_SMASK (RHF_RCV_SEQ_MASK << RHF_RCV_SEQ_SHIFT)
+
+#define RHF_EGR_OFFSET_SHIFT 32
+#define RHF_EGR_OFFSET_MASK 0xfffull
+#define RHF_EGR_OFFSET_SMASK (RHF_EGR_OFFSET_MASK << RHF_EGR_OFFSET_SHIFT)
+#define RHF_HDRQ_OFFSET_SHIFT 44
+#define RHF_HDRQ_OFFSET_MASK 0x1ffull
+#define RHF_HDRQ_OFFSET_SMASK (RHF_HDRQ_OFFSET_MASK << RHF_HDRQ_OFFSET_SHIFT)
+#define RHF_K_HDR_LEN_ERR (0x1ull << 53)
+#define RHF_DC_UNC_ERR (0x1ull << 54)
+#define RHF_DC_ERR (0x1ull << 55)
+#define RHF_RCV_TYPE_ERR_SHIFT 56
+#define RHF_RCV_TYPE_ERR_MASK 0x7ul
+#define RHF_RCV_TYPE_ERR_SMASK (RHF_RCV_TYPE_ERR_MASK << RHF_RCV_TYPE_ERR_SHIFT)
+#define RHF_TID_ERR (0x1ull << 59)
+#define RHF_LEN_ERR (0x1ull << 60)
+#define RHF_ECC_ERR (0x1ull << 61)
+#define RHF_RESERVED (0x1ull << 62)
+#define RHF_ICRC_ERR (0x1ull << 63)
+
+#define RHF_ERROR_SMASK 0xffe0000000000000ull /* bits 63:53 */
+
+/* RHF receive types */
+#define RHF_RCV_TYPE_EXPECTED 0
+#define RHF_RCV_TYPE_EAGER 1
+#define RHF_RCV_TYPE_IB 2 /* normal IB, IB Raw, or IPv6 */
+#define RHF_RCV_TYPE_ERROR 3
+#define RHF_RCV_TYPE_BYPASS 4
+#define RHF_RCV_TYPE_INVALID5 5
+#define RHF_RCV_TYPE_INVALID6 6
+#define RHF_RCV_TYPE_INVALID7 7
+
+/* RHF receive type error - expected packet errors */
+#define RHF_RTE_EXPECTED_FLOW_SEQ_ERR 0x2
+#define RHF_RTE_EXPECTED_FLOW_GEN_ERR 0x4
+
+/* RHF receive type error - eager packet errors */
+#define RHF_RTE_EAGER_NO_ERR 0x0
+
+/* RHF receive type error - IB packet errors */
+#define RHF_RTE_IB_NO_ERR 0x0
+
+/* RHF receive type error - error packet errors */
+#define RHF_RTE_ERROR_NO_ERR 0x0
+#define RHF_RTE_ERROR_OP_CODE_ERR 0x1
+#define RHF_RTE_ERROR_KHDR_MIN_LEN_ERR 0x2
+#define RHF_RTE_ERROR_KHDR_HCRC_ERR 0x3
+#define RHF_RTE_ERROR_KHDR_KVER_ERR 0x4
+#define RHF_RTE_ERROR_CONTEXT_ERR 0x5
+#define RHF_RTE_ERROR_KHDR_TID_ERR 0x6
+
+/* RHF receive type error - bypass packet errors */
+#define RHF_RTE_BYPASS_NO_ERR 0x0
+
+/* MAX RcvSEQ */
+#define RHF_MAX_SEQ 13
+
+/* IB - LRH header constants */
+#define HFI1_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
+#define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SC15_PACKET 0xF
+#define SIZE_OF_CRC 1
+#define SIZE_OF_LT 1
+#define MAX_16B_PADDING 12 /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
+
+#define LIM_MGMT_P_KEY 0x7FFF
+#define FULL_MGMT_P_KEY 0xFFFF
+
+#define DEFAULT_P_KEY LIM_MGMT_P_KEY
+
+#define HFI1_PSM_IOC_BASE_SEQ 0x0
+
+/* Number of BTH.PSN bits used for sequence number in expected rcvs */
+#define HFI1_KDETH_BTH_SEQ_SHIFT 11
+#define HFI1_KDETH_BTH_SEQ_MASK (BIT(HFI1_KDETH_BTH_SEQ_SHIFT) - 1)
+
+static inline __u64 rhf_to_cpu(const __le32 *rbuf)
+{
+ return __le64_to_cpu(*((__le64 *)rbuf));
+}
+
+static inline u64 rhf_err_flags(u64 rhf)
+{
+ return rhf & RHF_ERROR_SMASK;
+}
+
+static inline u32 rhf_rcv_type(u64 rhf)
+{
+ return (rhf >> RHF_RCV_TYPE_SHIFT) & RHF_RCV_TYPE_MASK;
+}
+
+static inline u32 rhf_rcv_type_err(u64 rhf)
+{
+ return (rhf >> RHF_RCV_TYPE_ERR_SHIFT) & RHF_RCV_TYPE_ERR_MASK;
+}
+
+/* return size is in bytes, not DWORDs */
+static inline u32 rhf_pkt_len(u64 rhf)
+{
+ return ((rhf & RHF_PKT_LEN_SMASK) >> RHF_PKT_LEN_SHIFT) << 2;
+}
+
+static inline u32 rhf_egr_index(u64 rhf)
+{
+ return (rhf >> RHF_EGR_INDEX_SHIFT) & RHF_EGR_INDEX_MASK;
+}
+
+static inline u32 rhf_rcv_seq(u64 rhf)
+{
+ return (rhf >> RHF_RCV_SEQ_SHIFT) & RHF_RCV_SEQ_MASK;
+}
+
+/* returned offset is in DWORDS */
+static inline u32 rhf_hdrq_offset(u64 rhf)
+{
+ return (rhf >> RHF_HDRQ_OFFSET_SHIFT) & RHF_HDRQ_OFFSET_MASK;
+}
+
+static inline u64 rhf_use_egr_bfr(u64 rhf)
+{
+ return rhf & RHF_USE_EGR_BFR_SMASK;
+}
+
+static inline u64 rhf_dc_info(u64 rhf)
+{
+ return rhf & RHF_DC_INFO_SMASK;
+}
+
+static inline u32 rhf_egr_buf_offset(u64 rhf)
+{
+ return (rhf >> RHF_EGR_OFFSET_SHIFT) & RHF_EGR_OFFSET_MASK;
+}
+#endif /* _COMMON_H */
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
new file mode 100644
index 000000000000..ac37ab7f8995
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -0,0 +1,1335 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015-2018 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/fault-inject.h>
+
+#include "hfi.h"
+#include "trace.h"
+#include "debugfs.h"
+#include "device.h"
+#include "qp.h"
+#include "sdma.h"
+#include "fault.h"
+
+static struct dentry *hfi1_dbg_root;
+
+#define private2dd(file) (file_inode(file)->i_private)
+#define private2ppd(file) (file_inode(file)->i_private)
+
+static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int opcode_stats_show(struct seq_file *s, u8 i, u64 packets, u64 bytes)
+{
+ if (!packets && !bytes)
+ return SEQ_SKIP;
+ seq_printf(s, "%02x %llu/%llu\n", i,
+ (unsigned long long)packets,
+ (unsigned long long)bytes);
+
+ return 0;
+}
+
+static int _opcode_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos, j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
+
+ for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
+ rcd = hfi1_rcd_get_by_index(dd, j);
+ if (rcd) {
+ n_packets += rcd->opstats->stats[i].n_packets;
+ n_bytes += rcd->opstats->stats[i].n_bytes;
+ }
+ hfi1_rcd_put(rcd);
+ }
+ return opcode_stats_show(s, i, n_packets, n_bytes);
+}
+
+DEBUGFS_SEQ_FILE_OPS(opcode_stats);
+DEBUGFS_SEQ_FILE_OPEN(opcode_stats)
+DEBUGFS_FILE_OPS(opcode_stats);
+
+static void *_tx_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return _opcode_stats_seq_start(s, pos);
+}
+
+static void *_tx_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return _opcode_stats_seq_next(s, v, pos);
+}
+
+static void _tx_opcode_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _tx_opcode_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos;
+ int j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ for_each_possible_cpu(j) {
+ struct hfi1_opcode_stats_perctx *s =
+ per_cpu_ptr(dd->tx_opstats, j);
+ n_packets += s->stats[i].n_packets;
+ n_bytes += s->stats[i].n_bytes;
+ }
+ return opcode_stats_show(s, i, n_packets, n_bytes);
+}
+
+DEBUGFS_SEQ_FILE_OPS(tx_opcode_stats);
+DEBUGFS_SEQ_FILE_OPEN(tx_opcode_stats)
+DEBUGFS_FILE_OPS(tx_opcode_stats);
+
+static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ if (!*pos)
+ return SEQ_START_TOKEN;
+ if (*pos >= dd->first_dyn_alloc_ctxt)
+ return NULL;
+ return pos;
+}
+
+static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ if (v == SEQ_START_TOKEN)
+ return pos;
+
+ ++*pos;
+ if (*pos >= dd->first_dyn_alloc_ctxt)
+ return NULL;
+ return pos;
+}
+
+static void _ctx_stats_seq_stop(struct seq_file *s, void *v)
+{
+ /* nothing allocated */
+}
+
+static int _ctx_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos;
+ loff_t i, j;
+ u64 n_packets = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(s, "Ctx:npkts\n");
+ return 0;
+ }
+
+ spos = v;
+ i = *spos;
+
+ rcd = hfi1_rcd_get_by_index_safe(dd, i);
+ if (!rcd)
+ return SEQ_SKIP;
+
+ for (j = 0; j < ARRAY_SIZE(rcd->opstats->stats); j++)
+ n_packets += rcd->opstats->stats[j].n_packets;
+
+ hfi1_rcd_put(rcd);
+
+ if (!n_packets)
+ return SEQ_SKIP;
+
+ seq_printf(s, " %llu:%llu\n", i, n_packets);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(ctx_stats);
+DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
+DEBUGFS_FILE_OPS(ctx_stats);
+
+static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+ __acquires(RCU)
+{
+ struct rvt_qp_iter *iter;
+ loff_t n = *pos;
+
+ iter = rvt_qp_iter_init(s->private, 0, NULL);
+
+ /* stop calls rcu_read_unlock */
+ rcu_read_lock();
+
+ if (!iter)
+ return NULL;
+
+ do {
+ if (rvt_qp_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ } while (n--);
+
+ return iter;
+}
+
+static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
+ loff_t *pos)
+ __must_hold(RCU)
+{
+ struct rvt_qp_iter *iter = iter_ptr;
+
+ (*pos)++;
+
+ if (rvt_qp_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+ __releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
+{
+ struct rvt_qp_iter *iter = iter_ptr;
+
+ if (!iter)
+ return 0;
+
+ qp_iter_print(s, iter);
+
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(qp_stats);
+DEBUGFS_SEQ_FILE_OPEN(qp_stats)
+DEBUGFS_FILE_OPS(qp_stats);
+
+static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd;
+ struct hfi1_devdata *dd;
+
+ ibd = (struct hfi1_ibdev *)s->private;
+ dd = dd_from_dev(ibd);
+ if (!dd->per_sdma || *pos >= dd->num_sdma)
+ return NULL;
+ return pos;
+}
+
+static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ ++*pos;
+ if (!dd->per_sdma || *pos >= dd->num_sdma)
+ return NULL;
+ return pos;
+}
+
+static void _sdes_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _sdes_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ loff_t *spos = v;
+ loff_t i = *spos;
+
+ sdma_seqfile_dump_sde(s, &dd->per_sdma[i]);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(sdes);
+DEBUGFS_SEQ_FILE_OPEN(sdes)
+DEBUGFS_FILE_OPS(sdes);
+
+static void *_rcds_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd;
+ struct hfi1_devdata *dd;
+
+ ibd = (struct hfi1_ibdev *)s->private;
+ dd = dd_from_dev(ibd);
+ if (!dd->rcd || *pos >= dd->n_krcv_queues)
+ return NULL;
+ return pos;
+}
+
+static void *_rcds_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ ++*pos;
+ if (!dd->rcd || *pos >= dd->num_rcv_contexts)
+ return NULL;
+ return pos;
+}
+
+static void _rcds_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _rcds_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
+ loff_t *spos = v;
+ loff_t i = *spos;
+
+ rcd = hfi1_rcd_get_by_index_safe(dd, i);
+ if (rcd)
+ seqfile_dump_rcd(s, rcd);
+ hfi1_rcd_put(rcd);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(rcds);
+DEBUGFS_SEQ_FILE_OPEN(rcds)
+DEBUGFS_FILE_OPS(rcds);
+
+static void *_pios_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd;
+ struct hfi1_devdata *dd;
+
+ ibd = (struct hfi1_ibdev *)s->private;
+ dd = dd_from_dev(ibd);
+ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
+ return NULL;
+ return pos;
+}
+
+static void *_pios_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ ++*pos;
+ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
+ return NULL;
+ return pos;
+}
+
+static void _pios_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _pios_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct send_context_info *sci;
+ loff_t *spos = v;
+ loff_t i = *spos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sc_lock, flags);
+ sci = &dd->send_contexts[i];
+ if (sci && sci->type != SC_USER && sci->allocated && sci->sc)
+ seqfile_dump_sci(s, i, sci);
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(pios);
+DEBUGFS_SEQ_FILE_OPEN(pios)
+DEBUGFS_FILE_OPS(pios);
+
+/* read the per-device counters */
+static ssize_t dev_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct hfi1_devdata *dd;
+ ssize_t rval;
+
+ dd = private2dd(file);
+ avail = hfi1_read_cntrs(dd, NULL, &counters);
+ rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
+ return rval;
+}
+
+/* read the per-device counters */
+static ssize_t dev_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct hfi1_devdata *dd;
+ ssize_t rval;
+
+ dd = private2dd(file);
+ avail = hfi1_read_cntrs(dd, &names, NULL);
+ rval = simple_read_from_buffer(buf, count, ppos, names, avail);
+ return rval;
+}
+
+struct counter_info {
+ char *name;
+ const struct file_operations ops;
+};
+
+/*
+ * Could use file_inode(file)->i_ino to figure out which file,
+ * instead of separate routine for each, but for now, this works...
+ */
+
+/* read the per-port names (same for each port) */
+static ssize_t portnames_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct hfi1_devdata *dd;
+ ssize_t rval;
+
+ dd = private2dd(file);
+ avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
+ rval = simple_read_from_buffer(buf, count, ppos, names, avail);
+ return rval;
+}
+
+/* read the per-port counters */
+static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct hfi1_pportdata *ppd;
+ ssize_t rval;
+
+ ppd = private2ppd(file);
+ avail = hfi1_read_portcntrs(ppd, NULL, &counters);
+ rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
+ return rval;
+}
+
+static void check_dyn_flag(u64 scratch0, char *p, int size, int *used,
+ int this_hfi, int hfi, u32 flag, const char *what)
+{
+ u32 mask;
+
+ mask = flag << (hfi ? CR_DYN_SHIFT : 0);
+ if (scratch0 & mask) {
+ *used += scnprintf(p + *used, size - *used,
+ " 0x%08x - HFI%d %s in use, %s device\n",
+ mask, hfi, what,
+ this_hfi == hfi ? "this" : "other");
+ }
+}
+
+static ssize_t asic_flags_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ u64 scratch0;
+ char *tmp;
+ int ret = 0;
+ int size;
+ int used;
+ int i;
+
+ ppd = private2ppd(file);
+ dd = ppd->dd;
+ size = PAGE_SIZE;
+ used = 0;
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ used += scnprintf(tmp + used, size - used,
+ "Resource flags: 0x%016llx\n", scratch0);
+
+ /* check permanent flag */
+ if (scratch0 & CR_THERM_INIT) {
+ used += scnprintf(tmp + used, size - used,
+ " 0x%08x - thermal monitoring initialized\n",
+ (u32)CR_THERM_INIT);
+ }
+
+ /* check each dynamic flag on each HFI */
+ for (i = 0; i < 2; i++) {
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_SBUS, "SBus");
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_EPROM, "EPROM");
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_I2C1, "i2c chain 1");
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_I2C2, "i2c chain 2");
+ }
+ used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
+
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
+ kfree(tmp);
+ return ret;
+}
+
+static ssize_t asic_flags_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ char *buff;
+ int ret;
+ unsigned long long value;
+ u64 scratch0;
+ u64 clear;
+
+ ppd = private2ppd(file);
+ dd = ppd->dd;
+
+ /* zero terminate and read the expected integer */
+ buff = memdup_user_nul(buf, count);
+ if (IS_ERR(buff))
+ return PTR_ERR(buff);
+
+ ret = kstrtoull(buff, 0, &value);
+ if (ret)
+ goto do_free;
+ clear = value;
+
+ /* obtain exclusive access */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+ acquire_hw_mutex(dd);
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ scratch0 &= ~clear;
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+
+ release_hw_mutex(dd);
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+
+ /* return the number of bytes written */
+ ret = count;
+
+ do_free:
+ kfree(buff);
+ return ret;
+}
+
+/* read the dc8051 memory */
+static ssize_t dc8051_memory_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ ssize_t rval;
+ void *tmp;
+ loff_t start, end;
+
+ /* the checks below expect the position to be positive */
+ if (*ppos < 0)
+ return -EINVAL;
+
+ tmp = kzalloc(DC8051_DATA_MEM_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ /*
+ * Fill in the requested portion of the temporary buffer from the
+ * 8051 memory. The 8051 memory read is done in terms of 8 bytes.
+ * Adjust start and end to fit. Skip reading anything if out of
+ * range.
+ */
+ start = *ppos & ~0x7; /* round down */
+ if (start < DC8051_DATA_MEM_SIZE) {
+ end = (*ppos + count + 7) & ~0x7; /* round up */
+ if (end > DC8051_DATA_MEM_SIZE)
+ end = DC8051_DATA_MEM_SIZE;
+ rval = read_8051_data(ppd->dd, start, end - start,
+ (u64 *)(tmp + start));
+ if (rval)
+ goto done;
+ }
+
+ rval = simple_read_from_buffer(buf, count, ppos, tmp,
+ DC8051_DATA_MEM_SIZE);
+done:
+ kfree(tmp);
+ return rval;
+}
+
+static ssize_t debugfs_lcb_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned long total, csr_off;
+ u64 data;
+
+ if (*ppos < 0)
+ return -EINVAL;
+ /* only read 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*ppos % 8) != 0)
+ return -EINVAL;
+ /* do nothing if out of range or zero count */
+ if (*ppos >= (LCB_END - LCB_START) || !count)
+ return 0;
+ /* reduce count if needed */
+ if (*ppos + count > LCB_END - LCB_START)
+ count = (LCB_END - LCB_START) - *ppos;
+
+ csr_off = LCB_START + *ppos;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ if (read_lcb_csr(dd, csr_off, (u64 *)&data))
+ break; /* failed */
+ if (put_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ }
+ *ppos += total;
+ return total;
+}
+
+static ssize_t debugfs_lcb_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned long total, csr_off, data;
+
+ if (*ppos < 0)
+ return -EINVAL;
+ /* only write 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*ppos % 8) != 0)
+ return -EINVAL;
+ /* do nothing if out of range or zero count */
+ if (*ppos >= (LCB_END - LCB_START) || !count)
+ return 0;
+ /* reduce count if needed */
+ if (*ppos + count > LCB_END - LCB_START)
+ count = (LCB_END - LCB_START) - *ppos;
+
+ csr_off = LCB_START + *ppos;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ if (get_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ if (write_lcb_csr(dd, csr_off, data))
+ break; /* failed */
+ }
+ *ppos += total;
+ return total;
+}
+
+/*
+ * read the per-port QSFP data for ppd
+ */
+static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd;
+ char *tmp;
+ int ret;
+
+ ppd = private2ppd(file);
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ kfree(tmp);
+ return ret;
+}
+
+/* Do an i2c write operation on the chain for the given HFI. */
+static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int i2c_addr;
+ int offset;
+ int total_written;
+
+ ppd = private2ppd(file);
+
+ /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
+ i2c_addr = (*ppos >> 16) & 0xffff;
+ offset = *ppos & 0xffff;
+
+ /* explicitly reject invalid address 0 to catch cp and cat */
+ if (i2c_addr == 0)
+ return -EINVAL;
+
+ buff = memdup_user(buf, count);
+ if (IS_ERR(buff))
+ return PTR_ERR(buff);
+
+ total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count);
+ if (total_written < 0) {
+ ret = total_written;
+ goto _free;
+ }
+
+ *ppos += total_written;
+
+ ret = total_written;
+
+ _free:
+ kfree(buff);
+ return ret;
+}
+
+/* Do an i2c write operation on chain for HFI 0. */
+static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_write(file, buf, count, ppos, 0);
+}
+
+/* Do an i2c write operation on chain for HFI 1. */
+static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_write(file, buf, count, ppos, 1);
+}
+
+/* Do an i2c read operation on the chain for the given HFI. */
+static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int i2c_addr;
+ int offset;
+ int total_read;
+
+ ppd = private2ppd(file);
+
+ /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
+ i2c_addr = (*ppos >> 16) & 0xffff;
+ offset = *ppos & 0xffff;
+
+ /* explicitly reject invalid address 0 to catch cp and cat */
+ if (i2c_addr == 0)
+ return -EINVAL;
+
+ buff = kmalloc(count, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
+ if (total_read < 0) {
+ ret = total_read;
+ goto _free;
+ }
+
+ *ppos += total_read;
+
+ ret = copy_to_user(buf, buff, total_read);
+ if (ret > 0) {
+ ret = -EFAULT;
+ goto _free;
+ }
+
+ ret = total_read;
+
+ _free:
+ kfree(buff);
+ return ret;
+}
+
+/* Do an i2c read operation on chain for HFI 0. */
+static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_read(file, buf, count, ppos, 0);
+}
+
+/* Do an i2c read operation on chain for HFI 1. */
+static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_read(file, buf, count, ppos, 1);
+}
+
+/* Do a QSFP write operation on the i2c chain for the given HFI. */
+static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int total_written;
+
+ if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
+ return -EINVAL;
+
+ ppd = private2ppd(file);
+
+ buff = memdup_user(buf, count);
+ if (IS_ERR(buff))
+ return PTR_ERR(buff);
+
+ total_written = qsfp_write(ppd, target, *ppos, buff, count);
+ if (total_written < 0) {
+ ret = total_written;
+ goto _free;
+ }
+
+ *ppos += total_written;
+
+ ret = total_written;
+
+ _free:
+ kfree(buff);
+ return ret;
+}
+
+/* Do a QSFP write operation on i2c chain for HFI 0. */
+static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_write(file, buf, count, ppos, 0);
+}
+
+/* Do a QSFP write operation on i2c chain for HFI 1. */
+static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_write(file, buf, count, ppos, 1);
+}
+
+/* Do a QSFP read operation on the i2c chain for the given HFI. */
+static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int total_read;
+
+ if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
+ ret = -EINVAL;
+ goto _return;
+ }
+
+ ppd = private2ppd(file);
+
+ buff = kmalloc(count, GFP_KERNEL);
+ if (!buff) {
+ ret = -ENOMEM;
+ goto _return;
+ }
+
+ total_read = qsfp_read(ppd, target, *ppos, buff, count);
+ if (total_read < 0) {
+ ret = total_read;
+ goto _free;
+ }
+
+ *ppos += total_read;
+
+ ret = copy_to_user(buf, buff, total_read);
+ if (ret > 0) {
+ ret = -EFAULT;
+ goto _free;
+ }
+
+ ret = total_read;
+
+ _free:
+ kfree(buff);
+ _return:
+ return ret;
+}
+
+/* Do a QSFP read operation on i2c chain for HFI 0. */
+static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_read(file, buf, count, ppos, 0);
+}
+
+/* Do a QSFP read operation on i2c chain for HFI 1. */
+static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_read(file, buf, count, ppos, 1);
+}
+
+static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+
+ ppd = private2ppd(fp);
+
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
+}
+
+static int i2c1_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_open(in, fp, 0);
+}
+
+static int i2c2_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_open(in, fp, 1);
+}
+
+static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+
+ ppd = private2ppd(fp);
+
+ release_chip_resource(ppd->dd, i2c_target(target));
+
+ return 0;
+}
+
+static int i2c1_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_release(in, fp, 0);
+}
+
+static int i2c2_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_release(in, fp, 1);
+}
+
+static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+
+ ppd = private2ppd(fp);
+
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
+}
+
+static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_open(in, fp, 0);
+}
+
+static int qsfp2_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_open(in, fp, 1);
+}
+
+static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+
+ ppd = private2ppd(fp);
+
+ release_chip_resource(ppd->dd, i2c_target(target));
+
+ return 0;
+}
+
+static int qsfp1_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_release(in, fp, 0);
+}
+
+static int qsfp2_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_release(in, fp, 1);
+}
+
+#define EXPROM_WRITE_ENABLE BIT_ULL(14)
+
+static bool exprom_wp_disabled;
+
+static int exprom_wp_set(struct hfi1_devdata *dd, bool disable)
+{
+ u64 gpio_val = 0;
+
+ if (disable) {
+ gpio_val = EXPROM_WRITE_ENABLE;
+ exprom_wp_disabled = true;
+ dd_dev_info(dd, "Disable Expansion ROM Write Protection\n");
+ } else {
+ exprom_wp_disabled = false;
+ dd_dev_info(dd, "Enable Expansion ROM Write Protection\n");
+ }
+
+ write_csr(dd, ASIC_GPIO_OUT, gpio_val);
+ write_csr(dd, ASIC_GPIO_OE, gpio_val);
+
+ return 0;
+}
+
+static ssize_t exprom_wp_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t exprom_wp_debugfs_write(struct file *file,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ char cdata;
+
+ if (count != 1)
+ return -EINVAL;
+ if (get_user(cdata, buf))
+ return -EFAULT;
+ if (cdata == '0')
+ exprom_wp_set(ppd->dd, false);
+ else if (cdata == '1')
+ exprom_wp_set(ppd->dd, true);
+ else
+ return -EINVAL;
+
+ return 1;
+}
+
+static unsigned long exprom_in_use;
+
+static int exprom_wp_debugfs_open(struct inode *in, struct file *fp)
+{
+ if (test_and_set_bit(0, &exprom_in_use))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int exprom_wp_debugfs_release(struct inode *in, struct file *fp)
+{
+ struct hfi1_pportdata *ppd = private2ppd(fp);
+
+ if (exprom_wp_disabled)
+ exprom_wp_set(ppd->dd, false);
+ clear_bit(0, &exprom_in_use);
+
+ return 0;
+}
+
+#define DEBUGFS_OPS(nm, readroutine, writeroutine) \
+{ \
+ .name = nm, \
+ .ops = { \
+ .owner = THIS_MODULE, \
+ .read = readroutine, \
+ .write = writeroutine, \
+ .llseek = generic_file_llseek, \
+ }, \
+}
+
+#define DEBUGFS_XOPS(nm, readf, writef, openf, releasef) \
+{ \
+ .name = nm, \
+ .ops = { \
+ .owner = THIS_MODULE, \
+ .read = readf, \
+ .write = writef, \
+ .llseek = generic_file_llseek, \
+ .open = openf, \
+ .release = releasef \
+ }, \
+}
+
+static const struct counter_info cntr_ops[] = {
+ DEBUGFS_OPS("counter_names", dev_names_read, NULL),
+ DEBUGFS_OPS("counters", dev_counters_read, NULL),
+ DEBUGFS_OPS("portcounter_names", portnames_read, NULL),
+};
+
+static const struct counter_info port_cntr_ops[] = {
+ DEBUGFS_OPS("port%dcounters", portcntrs_debugfs_read, NULL),
+ DEBUGFS_XOPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write,
+ i2c1_debugfs_open, i2c1_debugfs_release),
+ DEBUGFS_XOPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write,
+ i2c2_debugfs_open, i2c2_debugfs_release),
+ DEBUGFS_OPS("qsfp_dump%d", qsfp_debugfs_dump, NULL),
+ DEBUGFS_XOPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write,
+ qsfp1_debugfs_open, qsfp1_debugfs_release),
+ DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
+ qsfp2_debugfs_open, qsfp2_debugfs_release),
+ DEBUGFS_XOPS("exprom_wp", exprom_wp_debugfs_read,
+ exprom_wp_debugfs_write, exprom_wp_debugfs_open,
+ exprom_wp_debugfs_release),
+ DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
+ DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
+ DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
+};
+
+static void *_sdma_cpu_list_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos >= num_online_cpus())
+ return NULL;
+
+ return pos;
+}
+
+static void *_sdma_cpu_list_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ ++*pos;
+ if (*pos >= num_online_cpus())
+ return NULL;
+
+ return pos;
+}
+
+static void _sdma_cpu_list_seq_stop(struct seq_file *s, void *v)
+{
+ /* nothing allocated */
+}
+
+static int _sdma_cpu_list_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ loff_t *spos = v;
+ loff_t i = *spos;
+
+ sdma_seqfile_dump_cpu_list(s, dd, (unsigned long)i);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(sdma_cpu_list);
+DEBUGFS_SEQ_FILE_OPEN(sdma_cpu_list)
+DEBUGFS_FILE_OPS(sdma_cpu_list);
+
+void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
+{
+ char name[sizeof("port0counters") + 1];
+ char link[10];
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_pportdata *ppd;
+ struct dentry *root;
+ int unit = dd->unit;
+ int i, j;
+
+ if (!hfi1_dbg_root)
+ return;
+ snprintf(name, sizeof(name), "%s_%d", class_name(), unit);
+ snprintf(link, sizeof(link), "%d", unit);
+ root = debugfs_create_dir(name, hfi1_dbg_root);
+ ibd->hfi1_ibdev_dbg = root;
+
+ ibd->hfi1_ibdev_link =
+ debugfs_create_symlink(link, hfi1_dbg_root, name);
+
+ debugfs_create_file("opcode_stats", 0444, root, ibd,
+ &_opcode_stats_file_ops);
+ debugfs_create_file("tx_opcode_stats", 0444, root, ibd,
+ &_tx_opcode_stats_file_ops);
+ debugfs_create_file("ctx_stats", 0444, root, ibd, &_ctx_stats_file_ops);
+ debugfs_create_file("qp_stats", 0444, root, ibd, &_qp_stats_file_ops);
+ debugfs_create_file("sdes", 0444, root, ibd, &_sdes_file_ops);
+ debugfs_create_file("rcds", 0444, root, ibd, &_rcds_file_ops);
+ debugfs_create_file("pios", 0444, root, ibd, &_pios_file_ops);
+ debugfs_create_file("sdma_cpu_list", 0444, root, ibd,
+ &_sdma_cpu_list_file_ops);
+
+ /* dev counter files */
+ for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
+ debugfs_create_file(cntr_ops[i].name, 0444, root, dd,
+ &cntr_ops[i].ops);
+
+ /* per port files */
+ for (ppd = dd->pport, j = 0; j < dd->num_pports; j++, ppd++)
+ for (i = 0; i < ARRAY_SIZE(port_cntr_ops); i++) {
+ snprintf(name,
+ sizeof(name),
+ port_cntr_ops[i].name,
+ j + 1);
+ debugfs_create_file(name,
+ !port_cntr_ops[i].ops.write ?
+ S_IRUGO :
+ S_IRUGO | S_IWUSR,
+ root, ppd, &port_cntr_ops[i].ops);
+ }
+
+ hfi1_fault_init_debugfs(ibd);
+}
+
+void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
+{
+ if (!hfi1_dbg_root)
+ goto out;
+ hfi1_fault_exit_debugfs(ibd);
+ debugfs_remove(ibd->hfi1_ibdev_link);
+ debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
+out:
+ ibd->hfi1_ibdev_dbg = NULL;
+}
+
+/*
+ * driver stats field names, one line per stat, single string. Used by
+ * programs like hfistats to print the stats in a way which works for
+ * different versions of drivers, without changing program source.
+ * if hfi1_ib_stats changes, this needs to change. Names need to be
+ * 12 chars or less (w/o newline), for proper display by hfistats utility.
+ */
+static const char * const hfi1_statnames[] = {
+ /* must be element 0*/
+ "KernIntr",
+ "ErrorIntr",
+ "Tx_Errs",
+ "Rcv_Errs",
+ "H/W_Errs",
+ "NoPIOBufs",
+ "CtxtsOpen",
+ "RcvLen_Errs",
+ "EgrBufFull",
+ "EgrHdrFull"
+};
+
+static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void *_driver_stats_names_seq_next(
+ struct seq_file *s,
+ void *v,
+ loff_t *pos)
+{
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+
+ seq_printf(s, "%s\n", hfi1_statnames[*spos]);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(driver_stats_names);
+DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
+DEBUGFS_FILE_OPS(driver_stats_names);
+
+static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void _driver_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static void hfi1_sps_show_ints(struct seq_file *s)
+{
+ unsigned long index, flags;
+ struct hfi1_devdata *dd;
+ u64 sps_ints = 0;
+
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ xa_for_each(&hfi1_dev_table, index, dd) {
+ sps_ints += get_all_cpu_total(dd->int_counter);
+ }
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
+ seq_write(s, &sps_ints, sizeof(u64));
+}
+
+static int _driver_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ u64 *stats = (u64 *)&hfi1_stats;
+
+ /* special case for interrupts */
+ if (*spos == 0)
+ hfi1_sps_show_ints(s);
+ else
+ seq_write(s, stats + *spos, sizeof(u64));
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(driver_stats);
+DEBUGFS_SEQ_FILE_OPEN(driver_stats)
+DEBUGFS_FILE_OPS(driver_stats);
+
+void hfi1_dbg_init(void)
+{
+ hfi1_dbg_root = debugfs_create_dir(DRIVER_NAME, NULL);
+ debugfs_create_file("driver_stats_names", 0444, hfi1_dbg_root, NULL,
+ &_driver_stats_names_file_ops);
+ debugfs_create_file("driver_stats", 0444, hfi1_dbg_root, NULL,
+ &_driver_stats_file_ops);
+}
+
+void hfi1_dbg_exit(void)
+{
+ debugfs_remove_recursive(hfi1_dbg_root);
+ hfi1_dbg_root = NULL;
+}
diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
new file mode 100644
index 000000000000..65b48839abc6
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016, 2018 Intel Corporation.
+ */
+
+#ifndef _HFI1_DEBUGFS_H
+#define _HFI1_DEBUGFS_H
+
+struct hfi1_ibdev;
+
+#define DEBUGFS_SEQ_FILE_OPS(name) \
+static const struct seq_operations _##name##_seq_ops = { \
+ .start = _##name##_seq_start, \
+ .next = _##name##_seq_next, \
+ .stop = _##name##_seq_stop, \
+ .show = _##name##_seq_show \
+}
+
+#define DEBUGFS_SEQ_FILE_OPEN(name) \
+static int _##name##_open(struct inode *inode, struct file *s) \
+{ \
+ struct seq_file *seq; \
+ int ret; \
+ ret = seq_open(s, &_##name##_seq_ops); \
+ if (ret) \
+ return ret; \
+ seq = s->private_data; \
+ seq->private = inode->i_private; \
+ return 0; \
+}
+
+#define DEBUGFS_FILE_OPS(name) \
+static const struct file_operations _##name##_file_ops = { \
+ .owner = THIS_MODULE, \
+ .open = _##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_release \
+}
+
+#ifdef CONFIG_DEBUG_FS
+void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
+void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
+void hfi1_dbg_init(void);
+void hfi1_dbg_exit(void);
+
+#else
+static inline void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
+{
+}
+
+static inline void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
+{
+}
+
+static inline void hfi1_dbg_init(void)
+{
+}
+
+static inline void hfi1_dbg_exit(void)
+{
+}
+#endif
+
+#endif /* _HFI1_DEBUGFS_H */
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
new file mode 100644
index 000000000000..a98a4175e53b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+
+#include "hfi.h"
+#include "device.h"
+
+static char *hfi1_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const struct class class = {
+ .name = "hfi1",
+ .devnode = hfi1_devnode,
+};
+
+static char *hfi1_user_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const struct class user_class = {
+ .name = "hfi1_user",
+ .devnode = hfi1_user_devnode,
+};
+static dev_t hfi1_dev;
+
+int hfi1_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev *cdev, struct device **devp,
+ bool user_accessible,
+ struct kobject *parent)
+{
+ const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
+ struct device *device = NULL;
+ int ret;
+
+ cdev_init(cdev, fops);
+ cdev->owner = THIS_MODULE;
+ cdev_set_parent(cdev, parent);
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, dev, 1);
+ if (ret < 0) {
+ pr_err("Could not add cdev for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ goto done;
+ }
+
+ if (user_accessible)
+ device = device_create(&user_class, NULL, dev, NULL, "%s", name);
+ else
+ device = device_create(&class, NULL, dev, NULL, "%s", name);
+
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ pr_err("Could not create device for minor %d, %s (err %pe)\n",
+ minor, name, device);
+ device = NULL;
+ cdev_del(cdev);
+ }
+done:
+ *devp = device;
+ return ret;
+}
+
+void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp)
+{
+ struct device *device = *devp;
+
+ if (device) {
+ device_unregister(device);
+ *devp = NULL;
+
+ cdev_del(cdev);
+ }
+}
+
+static const char *hfi1_class_name = "hfi1";
+
+const char *class_name(void)
+{
+ return hfi1_class_name;
+}
+
+int __init dev_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&hfi1_dev, 0, HFI1_NMINORS, DRIVER_NAME);
+ if (ret < 0) {
+ pr_err("Could not allocate chrdev region (err %d)\n", -ret);
+ goto done;
+ }
+
+ ret = class_register(&class);
+ if (ret) {
+ pr_err("Could not create device class (err %d)\n", -ret);
+ unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+ goto done;
+ }
+
+ ret = class_register(&user_class);
+ if (ret) {
+ pr_err("Could not create device class for user accessible files (err %d)\n",
+ -ret);
+ class_unregister(&class);
+ unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+ goto done;
+ }
+
+done:
+ return ret;
+}
+
+void dev_cleanup(void)
+{
+ class_unregister(&class);
+ class_unregister(&user_class);
+
+ unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+}
diff --git a/drivers/infiniband/hw/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h
new file mode 100644
index 000000000000..a91bea426ba5
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/device.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#ifndef _HFI1_DEVICE_H
+#define _HFI1_DEVICE_H
+
+int hfi1_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev *cdev, struct device **devp,
+ bool user_accessible,
+ struct kobject *parent);
+void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
+const char *class_name(void);
+int __init dev_init(void);
+void dev_cleanup(void);
+
+#endif /* _HFI1_DEVICE_H */
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
new file mode 100644
index 000000000000..06487e20f723
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -0,0 +1,1906 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015-2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/prefetch.h>
+#include <rdma/ib_verbs.h>
+#include <linux/etherdevice.h>
+
+#include "hfi.h"
+#include "trace.h"
+#include "qp.h"
+#include "sdma.h"
+#include "debugfs.h"
+#include "vnic.h"
+#include "fault.h"
+
+#include "ipoib.h"
+#include "netdev.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+DEFINE_MUTEX(hfi1_mutex); /* general driver use */
+
+unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
+module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
+ HFI1_DEFAULT_MAX_MTU));
+
+unsigned int hfi1_cu = 1;
+module_param_named(cu, hfi1_cu, uint, S_IRUGO);
+MODULE_PARM_DESC(cu, "Credit return units");
+
+unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
+static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
+static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
+static const struct kernel_param_ops cap_ops = {
+ .set = hfi1_caps_set,
+ .get = hfi1_caps_get
+};
+module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Cornelis Omni-Path Express driver");
+
+/*
+ * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define MAX_PKT_RECV 64
+/*
+ * MAX_PKT_THREAD_RCV is the max # of packets processed before
+ * the qp_wait_list queue is flushed.
+ */
+#define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
+#define EGR_HEAD_UPDATE_THRESHOLD 16
+
+struct hfi1_ib_stats hfi1_stats;
+
+static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
+ cap_mask = *cap_mask_ptr, value, diff,
+ write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
+ HFI1_CAP_WRITABLE_MASK);
+
+ ret = kstrtoul(val, 0, &value);
+ if (ret) {
+ pr_warn("Invalid module parameter value for 'cap_mask'\n");
+ goto done;
+ }
+ /* Get the changed bits (except the locked bit) */
+ diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
+
+ /* Remove any bits that are not allowed to change after driver load */
+ if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
+ pr_warn("Ignoring non-writable capability bits %#lx\n",
+ diff & ~write_mask);
+ diff &= write_mask;
+ }
+
+ /* Mask off any reserved bits */
+ diff &= ~HFI1_CAP_RESERVED_MASK;
+ /* Clear any previously set and changing bits */
+ cap_mask &= ~diff;
+ /* Update the bits with the new capability */
+ cap_mask |= (value & diff);
+ /* Check for any kernel/user restrictions */
+ diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
+ ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
+ cap_mask &= ~diff;
+ /* Set the bitmask to the final set */
+ *cap_mask_ptr = cap_mask;
+done:
+ return ret;
+}
+
+static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
+{
+ unsigned long cap_mask = *(unsigned long *)kp->arg;
+
+ cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
+ cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
+
+ return sysfs_emit(buffer, "0x%lx\n", cap_mask);
+}
+
+struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
+{
+ struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
+ struct hfi1_devdata *dd = container_of(ibdev,
+ struct hfi1_devdata, verbs_dev);
+ return dd->pcidev;
+}
+
+/*
+ * Return count of units with at least one port ACTIVE.
+ */
+int hfi1_count_active_units(void)
+{
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ unsigned long index, flags;
+ int pidx, nunits_active = 0;
+
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ xa_for_each(&hfi1_dev_table, index, dd) {
+ if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
+ continue;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && ppd->linkup) {
+ nunits_active++;
+ break;
+ }
+ }
+ }
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
+ return nunits_active;
+}
+
+/*
+ * Get address of eager buffer from it's index (allocated in chunks, not
+ * contiguous).
+ */
+static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
+ u8 *update)
+{
+ u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
+
+ *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
+ return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
+ (offset * RCV_BUF_BLOCK_SIZE));
+}
+
+static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
+ __le32 *rhf_addr)
+{
+ u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
+
+ return (void *)(rhf_addr - rcd->rhf_offset + offset);
+}
+
+static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
+ __le32 *rhf_addr)
+{
+ return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
+}
+
+static inline struct hfi1_16b_header
+ *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
+ __le32 *rhf_addr)
+{
+ return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
+}
+
+/*
+ * Validate and encode the a given RcvArray Buffer size.
+ * The function will check whether the given size falls within
+ * allowed size ranges for the respective type and, optionally,
+ * return the proper encoding.
+ */
+int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
+{
+ if (unlikely(!PAGE_ALIGNED(size)))
+ return 0;
+ if (unlikely(size < MIN_EAGER_BUFFER))
+ return 0;
+ if (size >
+ (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
+ return 0;
+ if (encoded)
+ *encoded = ilog2(size / PAGE_SIZE) + 1;
+ return 1;
+}
+
+static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
+ struct hfi1_packet *packet)
+{
+ struct ib_header *rhdr = packet->hdr;
+ u32 rte = rhf_rcv_type_err(packet->rhf);
+ u32 mlid_base;
+ struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_ibdev *verbs_dev = &dd->verbs_dev;
+ struct rvt_dev_info *rdi = &verbs_dev->rdi;
+
+ if ((packet->rhf & RHF_DC_ERR) &&
+ hfi1_dbg_fault_suppress_err(verbs_dev))
+ return;
+
+ if (packet->rhf & RHF_ICRC_ERR)
+ return;
+
+ if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ goto drop;
+ } else {
+ u8 lnh = ib_get_lnh(rhdr);
+
+ mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE);
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &rhdr->u.oth;
+ } else if (lnh == HFI1_LRH_GRH) {
+ packet->ohdr = &rhdr->u.l.oth;
+ packet->grh = &rhdr->u.l.grh;
+ } else {
+ goto drop;
+ }
+ }
+
+ if (packet->rhf & RHF_TID_ERR) {
+ /* For TIDERR and RC QPs preemptively schedule a NAK */
+ u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ u32 dlid = ib_get_dlid(rhdr);
+ u32 qp_num;
+
+ /* Sanity check packet */
+ if (tlen < 24)
+ goto drop;
+
+ /* Check for GRH */
+ if (packet->grh) {
+ u32 vtf;
+ struct ib_grh *grh = packet->grh;
+
+ if (grh->next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(grh->version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ }
+
+ /* Get the destination QP number. */
+ qp_num = ib_bth_get_qpn(packet->ohdr);
+ if (dlid < mlid_base) {
+ struct rvt_qp *qp;
+ unsigned long flags;
+
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!qp) {
+ rcu_read_unlock();
+ goto drop;
+ }
+
+ /*
+ * Handle only RC QPs - for other QP types drop error
+ * packet.
+ */
+ spin_lock_irqsave(&qp->r_lock, flags);
+
+ /* Check for valid receive state. */
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ hfi1_rc_hdrerr(rcd, packet, qp);
+ break;
+ default:
+ /* For now don't handle any other QP types */
+ break;
+ }
+
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+ rcu_read_unlock();
+ } /* Unicast QP */
+ } /* Valid packet with TIDErr */
+
+ /* handle "RcvTypeErr" flags */
+ switch (rte) {
+ case RHF_RTE_ERROR_OP_CODE_ERR:
+ {
+ void *ebuf = NULL;
+ u8 opcode;
+
+ if (rhf_use_egr_bfr(packet->rhf))
+ ebuf = packet->ebuf;
+
+ if (!ebuf)
+ goto drop; /* this should never happen */
+
+ opcode = ib_bth_get_opcode(packet->ohdr);
+ if (opcode == IB_OPCODE_CNP) {
+ /*
+ * Only in pre-B0 h/w is the CNP_OPCODE handled
+ * via this code path.
+ */
+ struct rvt_qp *qp = NULL;
+ u32 lqpn, rqpn;
+ u16 rlid;
+ u8 svc_type, sl, sc5;
+
+ sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
+ sl = ibp->sc_to_sl[sc5];
+
+ lqpn = ib_bth_get_qpn(packet->ohdr);
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
+ if (!qp) {
+ rcu_read_unlock();
+ goto drop;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+ rlid = 0;
+ rqpn = 0;
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
+ case IB_QPT_UC:
+ rlid = ib_get_slid(rhdr);
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_UC;
+ break;
+ default:
+ rcu_read_unlock();
+ goto drop;
+ }
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
+ rcu_read_unlock();
+ }
+
+ packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
+ break;
+ }
+ default:
+ break;
+ }
+
+drop:
+ return;
+}
+
+static inline void init_packet(struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet *packet)
+{
+ packet->rsize = get_hdrqentsize(rcd); /* words */
+ packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
+ packet->rcd = rcd;
+ packet->updegr = 0;
+ packet->etail = -1;
+ packet->rhf_addr = get_rhf_addr(rcd);
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+ packet->rhqoff = hfi1_rcd_head(rcd);
+ packet->numpkt = 0;
+}
+
+/* We support only two types - 9B and 16B for now */
+static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &return_cnp,
+ [HFI1_PKT_TYPE_16B] = &return_cnp_16B
+};
+
+/**
+ * hfi1_process_ecn_slowpath - Process FECN or BECN bits
+ * @qp: The packet's destination QP
+ * @pkt: The packet itself.
+ * @prescan: Is the caller the RXQ prescan
+ *
+ * Process the packet's FECN or BECN bits. By now, the packet
+ * has already been evaluated whether processing of those bit should
+ * be done.
+ * The significance of the @prescan argument is that if the caller
+ * is the RXQ prescan, a CNP will be send out instead of waiting for the
+ * normal packet processing to send an ACK with BECN set (or a CNP).
+ */
+bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool prescan)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_other_headers *ohdr = pkt->ohdr;
+ struct ib_grh *grh = pkt->grh;
+ u32 rqpn = 0;
+ u16 pkey;
+ u32 rlid, slid, dlid = 0;
+ u8 hdr_type, sc, svc_type, opcode;
+ bool is_mcast = false, ignore_fecn = false, do_cnp = false,
+ fecn, becn;
+
+ /* can be called from prescan */
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ pkey = hfi1_16B_get_pkey(pkt->hdr);
+ sc = hfi1_16B_get_sc(pkt->hdr);
+ dlid = hfi1_16B_get_dlid(pkt->hdr);
+ slid = hfi1_16B_get_slid(pkt->hdr);
+ is_mcast = hfi1_is_16B_mcast(dlid);
+ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_16B;
+ fecn = hfi1_16B_get_fecn(pkt->hdr);
+ becn = hfi1_16B_get_becn(pkt->hdr);
+ } else {
+ pkey = ib_bth_get_pkey(ohdr);
+ sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
+ dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
+ ppd->lid;
+ slid = ib_get_slid(pkt->hdr);
+ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
+ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_9B;
+ fecn = ib_bth_get_fecn(ohdr);
+ becn = ib_bth_get_becn(ohdr);
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+ rlid = slid;
+ rqpn = ib_get_sqpn(pkt->ohdr);
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ rlid = slid;
+ rqpn = ib_get_sqpn(pkt->ohdr);
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
+ case IB_QPT_UC:
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_UC;
+ break;
+ case IB_QPT_RC:
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_RC;
+ break;
+ default:
+ return false;
+ }
+
+ ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
+ (opcode == IB_OPCODE_RC_ACKNOWLEDGE);
+ /*
+ * ACKNOWLEDGE packets do not get a CNP but this will be
+ * guarded by ignore_fecn above.
+ */
+ do_cnp = prescan ||
+ (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
+ opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) ||
+ opcode == TID_OP(READ_RESP) ||
+ opcode == TID_OP(ACK);
+
+ /* Call appropriate CNP handler */
+ if (!ignore_fecn && do_cnp && fecn)
+ hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
+ dlid, rlid, sc, grh);
+
+ if (becn) {
+ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ u8 sl = ibp->sc_to_sl[sc];
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
+ }
+ return !ignore_fecn && fecn;
+}
+
+struct ps_mdata {
+ struct hfi1_ctxtdata *rcd;
+ u32 rsize;
+ u32 maxcnt;
+ u32 ps_head;
+ u32 ps_tail;
+ u32 ps_seq;
+};
+
+static inline void init_ps_mdata(struct ps_mdata *mdata,
+ struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ mdata->rcd = rcd;
+ mdata->rsize = packet->rsize;
+ mdata->maxcnt = packet->maxcnt;
+ mdata->ps_head = packet->rhqoff;
+
+ if (get_dma_rtail_setting(rcd)) {
+ mdata->ps_tail = get_rcvhdrtail(rcd);
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ mdata->ps_seq = hfi1_seq_cnt(rcd);
+ else
+ mdata->ps_seq = 0; /* not used with DMA_RTAIL */
+ } else {
+ mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
+ mdata->ps_seq = hfi1_seq_cnt(rcd);
+ }
+}
+
+static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
+ struct hfi1_ctxtdata *rcd)
+{
+ if (get_dma_rtail_setting(rcd))
+ return mdata->ps_head == mdata->ps_tail;
+ return mdata->ps_seq != rhf_rcv_seq(rhf);
+}
+
+static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
+ struct hfi1_ctxtdata *rcd)
+{
+ /*
+ * Control context can potentially receive an invalid rhf.
+ * Drop such packets.
+ */
+ if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
+ return mdata->ps_seq != rhf_rcv_seq(rhf);
+
+ return 0;
+}
+
+static inline void update_ps_mdata(struct ps_mdata *mdata,
+ struct hfi1_ctxtdata *rcd)
+{
+ mdata->ps_head += mdata->rsize;
+ if (mdata->ps_head >= mdata->maxcnt)
+ mdata->ps_head = 0;
+
+ /* Control context must do seq counting */
+ if (!get_dma_rtail_setting(rcd) ||
+ rcd->ctxt == HFI1_CTRL_CTXT)
+ mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq);
+}
+
+/*
+ * prescan_rxq - search through the receive queue looking for packets
+ * containing Excplicit Congestion Notifications (FECNs, or BECNs).
+ * When an ECN is found, process the Congestion Notification, and toggle
+ * it off.
+ * This is declared as a macro to allow quick checking of the port to avoid
+ * the overhead of a function call if not enabled.
+ */
+#define prescan_rxq(rcd, packet) \
+ do { \
+ if (rcd->ppd->cc_prescan) \
+ __prescan_rxq(packet); \
+ } while (0)
+static void __prescan_rxq(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct ps_mdata mdata;
+
+ init_ps_mdata(&mdata, packet);
+
+ while (1) {
+ struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
+ packet->rcd->rhf_offset;
+ struct rvt_qp *qp;
+ struct ib_header *hdr;
+ struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
+ u64 rhf = rhf_to_cpu(rhf_addr);
+ u32 etype = rhf_rcv_type(rhf), qpn, bth1;
+ u8 lnh;
+
+ if (ps_done(&mdata, rhf, rcd))
+ break;
+
+ if (ps_skip(&mdata, rhf, rcd))
+ goto next;
+
+ if (etype != RHF_RCV_TYPE_IB)
+ goto next;
+
+ packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
+ hdr = packet->hdr;
+ lnh = ib_get_lnh(hdr);
+
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &hdr->u.oth;
+ packet->grh = NULL;
+ } else if (lnh == HFI1_LRH_GRH) {
+ packet->ohdr = &hdr->u.l.oth;
+ packet->grh = &hdr->u.l.grh;
+ } else {
+ goto next; /* just in case */
+ }
+
+ if (!hfi1_may_ecn(packet))
+ goto next;
+
+ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+ qpn = bth1 & RVT_QPN_MASK;
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
+
+ if (!qp) {
+ rcu_read_unlock();
+ goto next;
+ }
+
+ hfi1_process_ecn_slowpath(qp, packet, true);
+ rcu_read_unlock();
+
+ /* turn off BECN, FECN */
+ bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK);
+ packet->ohdr->bth[1] = cpu_to_be32(bth1);
+next:
+ update_ps_mdata(&mdata, rcd);
+ }
+}
+
+static void process_rcv_qp_work(struct hfi1_packet *packet)
+{
+ struct rvt_qp *qp, *nqp;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ /*
+ * Iterate over all QPs waiting to respond.
+ * The list won't change since the IRQ is only run on one CPU.
+ */
+ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
+ list_del_init(&qp->rspwait);
+ if (qp->r_flags & RVT_R_RSP_NAK) {
+ qp->r_flags &= ~RVT_R_RSP_NAK;
+ packet->qp = qp;
+ hfi1_send_rc_ack(packet, 0);
+ }
+ if (qp->r_flags & RVT_R_RSP_SEND) {
+ unsigned long flags;
+
+ qp->r_flags &= ~RVT_R_RSP_SEND;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_OR_FLUSH_SEND)
+ hfi1_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ rvt_put_qp(qp);
+ }
+}
+
+static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
+{
+ if (thread) {
+ if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
+ /* allow defered processing */
+ process_rcv_qp_work(packet);
+ cond_resched();
+ return RCV_PKT_OK;
+ } else {
+ this_cpu_inc(*packet->rcd->dd->rcv_limit);
+ return RCV_PKT_LIMIT;
+ }
+}
+
+static inline int check_max_packet(struct hfi1_packet *packet, int thread)
+{
+ int ret = RCV_PKT_OK;
+
+ if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
+ ret = max_packet_exceeded(packet, thread);
+ return ret;
+}
+
+static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
+{
+ int ret;
+
+ packet->rcd->dd->ctx0_seq_drop++;
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ packet->numpkt++;
+ ret = check_max_packet(packet, thread);
+
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+
+ return ret;
+}
+
+static void process_rcv_packet_napi(struct hfi1_packet *packet)
+{
+ packet->etype = rhf_rcv_type(packet->rhf);
+
+ /* total length */
+ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ /* retrieve eager buffer details */
+ packet->etail = rhf_egr_index(packet->rhf);
+ packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
+ &packet->updegr);
+ /*
+ * Prefetch the contents of the eager buffer. It is
+ * OK to send a negative length to prefetch_range().
+ * The +2 is the size of the RHF.
+ */
+ prefetch_range(packet->ebuf,
+ packet->tlen - ((packet->rcd->rcvhdrqentsize -
+ (rhf_hdrq_offset(packet->rhf)
+ + 2)) * 4));
+
+ packet->rcd->rhf_rcv_function_map[packet->etype](packet);
+ packet->numpkt++;
+
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+}
+
+static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
+{
+ int ret;
+
+ packet->etype = rhf_rcv_type(packet->rhf);
+
+ /* total length */
+ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ /* retrieve eager buffer details */
+ packet->ebuf = NULL;
+ if (rhf_use_egr_bfr(packet->rhf)) {
+ packet->etail = rhf_egr_index(packet->rhf);
+ packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
+ &packet->updegr);
+ /*
+ * Prefetch the contents of the eager buffer. It is
+ * OK to send a negative length to prefetch_range().
+ * The +2 is the size of the RHF.
+ */
+ prefetch_range(packet->ebuf,
+ packet->tlen - ((get_hdrqentsize(packet->rcd) -
+ (rhf_hdrq_offset(packet->rhf)
+ + 2)) * 4));
+ }
+
+ /*
+ * Call a type specific handler for the packet. We
+ * should be able to trust that etype won't be beyond
+ * the range of valid indexes. If so something is really
+ * wrong and we can probably just let things come
+ * crashing down. There is no need to eat another
+ * comparison in this performance critical code.
+ */
+ packet->rcd->rhf_rcv_function_map[packet->etype](packet);
+ packet->numpkt++;
+
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ ret = check_max_packet(packet, thread);
+
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+
+ return ret;
+}
+
+static inline void process_rcv_update(int last, struct hfi1_packet *packet)
+{
+ /*
+ * Update head regs etc., every 16 packets, if not last pkt,
+ * to help prevent rcvhdrq overflows, when many packets
+ * are processed and queue is nearly full.
+ * Don't request an interrupt for intermediate updates.
+ */
+ if (!last && !(packet->numpkt & 0xf)) {
+ update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
+ packet->etail, 0, 0);
+ packet->updegr = 0;
+ }
+ packet->grh = NULL;
+}
+
+static inline void finish_packet(struct hfi1_packet *packet)
+{
+ /*
+ * Nothing we need to free for the packet.
+ *
+ * The only thing we need to do is a final update and call for an
+ * interrupt
+ */
+ update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
+ packet->etail, rcv_intr_dynamic, packet->numpkt);
+}
+
+/*
+ * handle_receive_interrupt_napi_fp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for receive interrupt.
+ * This is the fast path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (packet.numpkt < budget) {
+ process_rcv_packet_napi(&packet);
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ break;
+
+ process_rcv_update(0, &packet);
+ }
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+bail:
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
+ * Handle receive interrupts when using the no dma rtail option.
+ */
+int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
+{
+ int last = RCV_PKT_OK;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
+ last = RCV_PKT_DONE;
+ goto bail;
+ }
+
+ prescan_rxq(rcd, &packet);
+
+ while (last == RCV_PKT_OK) {
+ last = process_rcv_packet(&packet, thread);
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ last = RCV_PKT_DONE;
+ process_rcv_update(last, &packet);
+ }
+ process_rcv_qp_work(&packet);
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+bail:
+ finish_packet(&packet);
+ return last;
+}
+
+int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
+{
+ u32 hdrqtail;
+ int last = RCV_PKT_OK;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ hdrqtail = get_rcvhdrtail(rcd);
+ if (packet.rhqoff == hdrqtail) {
+ last = RCV_PKT_DONE;
+ goto bail;
+ }
+ smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+
+ prescan_rxq(rcd, &packet);
+
+ while (last == RCV_PKT_OK) {
+ last = process_rcv_packet(&packet, thread);
+ if (packet.rhqoff == hdrqtail)
+ last = RCV_PKT_DONE;
+ process_rcv_update(last, &packet);
+ }
+ process_rcv_qp_work(&packet);
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+bail:
+ finish_packet(&packet);
+ return last;
+}
+
+static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+{
+ u16 i;
+
+ /*
+ * For dynamically allocated kernel contexts (like vnic) switch
+ * interrupt handler only for that context. Otherwise, switch
+ * interrupt handler for all statically allocated kernel contexts.
+ */
+ if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) {
+ hfi1_rcd_get(rcd);
+ hfi1_set_fast(rcd);
+ hfi1_rcd_put(rcd);
+ return;
+ }
+
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic))
+ hfi1_set_fast(rcd);
+ hfi1_rcd_put(rcd);
+ }
+}
+
+void set_all_slowpath(struct hfi1_devdata *dd)
+{
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
+
+ /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (!rcd)
+ continue;
+ if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
+ rcd->do_interrupt = rcd->slow_handler;
+
+ hfi1_rcd_put(rcd);
+ }
+}
+
+static bool __set_armed_to_active(struct hfi1_packet *packet)
+{
+ u8 etype = rhf_rcv_type(packet->rhf);
+ u8 sc = SC15_PACKET;
+
+ if (etype == RHF_RCV_TYPE_IB) {
+ struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
+ packet->rhf_addr);
+ sc = hfi1_9B_get_sc5(hdr, packet->rhf);
+ } else if (etype == RHF_RCV_TYPE_BYPASS) {
+ struct hfi1_16b_header *hdr = hfi1_get_16B_header(
+ packet->rcd,
+ packet->rhf_addr);
+ sc = hfi1_16B_get_sc(hdr);
+ }
+ if (sc != SC15_PACKET) {
+ int hwstate = driver_lstate(packet->rcd->ppd);
+ struct work_struct *lsaw =
+ &packet->rcd->ppd->linkstate_active_work;
+
+ if (hwstate != IB_PORT_ACTIVE) {
+ dd_dev_info(packet->rcd->dd,
+ "Unexpected link state %s\n",
+ ib_port_state_to_str(hwstate));
+ return false;
+ }
+
+ queue_work(packet->rcd->ppd->link_wq, lsaw);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * set_armed_to_active - the fast path for armed to active
+ * @packet: the packet structure
+ *
+ * Return true if packet processing needs to bail.
+ */
+static bool set_armed_to_active(struct hfi1_packet *packet)
+{
+ if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
+ return false;
+ return __set_armed_to_active(packet);
+}
+
+/*
+ * handle_receive_interrupt - receive a packet
+ * @rcd: the context
+ *
+ * Called from interrupt handler for errors or receive interrupt.
+ * This is the slow path interrupt handler.
+ */
+int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 hdrqtail;
+ int needset, last = RCV_PKT_OK;
+ struct hfi1_packet packet;
+ int skip_pkt = 0;
+
+ if (!rcd->rcvhdrq)
+ return RCV_PKT_OK;
+ /* Control context will always use the slow path interrupt handler */
+ needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
+
+ init_packet(rcd, &packet);
+
+ if (!get_dma_rtail_setting(rcd)) {
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
+ last = RCV_PKT_DONE;
+ goto bail;
+ }
+ hdrqtail = 0;
+ } else {
+ hdrqtail = get_rcvhdrtail(rcd);
+ if (packet.rhqoff == hdrqtail) {
+ last = RCV_PKT_DONE;
+ goto bail;
+ }
+ smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+
+ /*
+ * Control context can potentially receive an invalid
+ * rhf. Drop such packets.
+ */
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ skip_pkt = 1;
+ }
+
+ prescan_rxq(rcd, &packet);
+
+ while (last == RCV_PKT_OK) {
+ if (hfi1_need_drop(dd)) {
+ /* On to the next packet */
+ packet.rhqoff += packet.rsize;
+ packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
+ packet.rhqoff +
+ rcd->rhf_offset;
+ packet.rhf = rhf_to_cpu(packet.rhf_addr);
+
+ } else if (skip_pkt) {
+ last = skip_rcv_packet(&packet, thread);
+ skip_pkt = 0;
+ } else {
+ if (set_armed_to_active(&packet))
+ goto bail;
+ last = process_rcv_packet(&packet, thread);
+ }
+
+ if (!get_dma_rtail_setting(rcd)) {
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ last = RCV_PKT_DONE;
+ } else {
+ if (packet.rhqoff == hdrqtail)
+ last = RCV_PKT_DONE;
+ /*
+ * Control context can potentially receive an invalid
+ * rhf. Drop such packets.
+ */
+ if (rcd->ctxt == HFI1_CTRL_CTXT) {
+ bool lseq;
+
+ lseq = hfi1_seq_incr(rcd,
+ rhf_rcv_seq(packet.rhf));
+ if (!last && lseq)
+ skip_pkt = 1;
+ }
+ }
+
+ if (needset) {
+ needset = false;
+ set_all_fastpath(dd, rcd);
+ }
+ process_rcv_update(last, &packet);
+ }
+
+ process_rcv_qp_work(&packet);
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+
+bail:
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ finish_packet(&packet);
+ return last;
+}
+
+/*
+ * handle_receive_interrupt_napi_sp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for errors or receive interrupt.
+ * This is the slow path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ int last = RCV_PKT_OK;
+ bool needset = true;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (last != RCV_PKT_DONE && packet.numpkt < budget) {
+ if (hfi1_need_drop(dd)) {
+ /* On to the next packet */
+ packet.rhqoff += packet.rsize;
+ packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
+ packet.rhqoff +
+ rcd->rhf_offset;
+ packet.rhf = rhf_to_cpu(packet.rhf_addr);
+
+ } else {
+ if (set_armed_to_active(&packet))
+ goto bail;
+ process_rcv_packet_napi(&packet);
+ }
+
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ last = RCV_PKT_DONE;
+
+ if (needset) {
+ needset = false;
+ set_all_fastpath(dd, rcd);
+ }
+
+ process_rcv_update(last, &packet);
+ }
+
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+
+bail:
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
+ * We may discover in the interrupt that the hardware link state has
+ * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
+ * and we need to update the driver's notion of the link state. We cannot
+ * run set_link_state from interrupt context, so we queue this function on
+ * a workqueue.
+ *
+ * We delay the regular interrupt processing until after the state changes
+ * so that the link will be in the correct state by the time any application
+ * we wake up attempts to send a reply to any message it received.
+ * (Subsequent receive interrupts may possibly force the wakeup before we
+ * update the link state.)
+ *
+ * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
+ * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
+ * so we're safe from use-after-free of the rcd.
+ */
+void receive_interrupt_work(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ linkstate_active_work);
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
+
+ /* Received non-SC15 packet implies neighbor_normal */
+ ppd->neighbor_normal = 1;
+ set_link_state(ppd, HLS_UP_ACTIVE);
+
+ /*
+ * Interrupt all statically allocated kernel contexts that could
+ * have had an interrupt during auto activation.
+ */
+ for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd)
+ force_recv_intr(rcd);
+ hfi1_rcd_put(rcd);
+ }
+}
+
+/*
+ * Convert a given MTU size to the on-wire MAD packet enumeration.
+ * Return -1 if the size is invalid.
+ */
+int mtu_to_enum(u32 mtu, int default_if_bad)
+{
+ switch (mtu) {
+ case 0: return OPA_MTU_0;
+ case 256: return OPA_MTU_256;
+ case 512: return OPA_MTU_512;
+ case 1024: return OPA_MTU_1024;
+ case 2048: return OPA_MTU_2048;
+ case 4096: return OPA_MTU_4096;
+ case 8192: return OPA_MTU_8192;
+ case 10240: return OPA_MTU_10240;
+ }
+ return default_if_bad;
+}
+
+u16 enum_to_mtu(int mtu)
+{
+ switch (mtu) {
+ case OPA_MTU_0: return 0;
+ case OPA_MTU_256: return 256;
+ case OPA_MTU_512: return 512;
+ case OPA_MTU_1024: return 1024;
+ case OPA_MTU_2048: return 2048;
+ case OPA_MTU_4096: return 4096;
+ case OPA_MTU_8192: return 8192;
+ case OPA_MTU_10240: return 10240;
+ default: return 0xffff;
+ }
+}
+
+/*
+ * set_mtu - set the MTU
+ * @ppd: the per port data
+ *
+ * We can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size. We do not deal with what happens
+ * to programs that are already running when the size changes.
+ */
+int set_mtu(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int i, drain, ret = 0, is_up = 0;
+
+ ppd->ibmtu = 0;
+ for (i = 0; i < ppd->vls_supported; i++)
+ if (ppd->ibmtu < dd->vld[i].mtu)
+ ppd->ibmtu = dd->vld[i].mtu;
+ ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
+
+ mutex_lock(&ppd->hls_lock);
+ if (ppd->host_link_state == HLS_UP_INIT ||
+ ppd->host_link_state == HLS_UP_ARMED ||
+ ppd->host_link_state == HLS_UP_ACTIVE)
+ is_up = 1;
+
+ drain = !is_ax(dd) && is_up;
+
+ if (drain)
+ /*
+ * MTU is specified per-VL. To ensure that no packet gets
+ * stuck (due, e.g., to the MTU for the packet's VL being
+ * reduced), empty the per-VL FIFOs before adjusting MTU.
+ */
+ ret = stop_drain_data_vls(dd);
+
+ if (ret) {
+ dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
+ __func__);
+ goto err;
+ }
+
+ hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
+
+ if (drain)
+ open_fill_data_vls(dd); /* reopen all VLs */
+
+err:
+ mutex_unlock(&ppd->hls_lock);
+
+ return ret;
+}
+
+int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ ppd->lid = lid;
+ ppd->lmc = lmc;
+ hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
+
+ dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
+
+ return 0;
+}
+
+void shutdown_led_override(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /*
+ * This pairs with the memory barrier in hfi1_start_led_override to
+ * ensure that we read the correct state of LED beaconing represented
+ * by led_override_timer_active
+ */
+ smp_rmb();
+ if (atomic_read(&ppd->led_override_timer_active)) {
+ timer_delete_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ /* Ensure the atomic_set is visible to all CPUs */
+ smp_wmb();
+ }
+
+ /* Hand control of the LED to the DC for normal operation */
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0);
+}
+
+static void run_led_override(struct timer_list *t)
+{
+ struct hfi1_pportdata *ppd = timer_container_of(ppd, t,
+ led_override_timer);
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned long timeout;
+ int phase_idx;
+
+ if (!(dd->flags & HFI1_INITTED))
+ return;
+
+ phase_idx = ppd->led_override_phase & 1;
+
+ setextled(dd, phase_idx);
+
+ timeout = ppd->led_override_vals[phase_idx];
+
+ /* Set up for next phase */
+ ppd->led_override_phase = !ppd->led_override_phase;
+
+ mod_timer(&ppd->led_override_timer, jiffies + timeout);
+}
+
+/*
+ * To have the LED blink in a particular pattern, provide timeon and timeoff
+ * in milliseconds.
+ * To turn off custom blinking and return to normal operation, use
+ * shutdown_led_override()
+ */
+void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
+ unsigned int timeoff)
+{
+ if (!(ppd->dd->flags & HFI1_INITTED))
+ return;
+
+ /* Convert to jiffies for direct use in timer */
+ ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
+ ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
+
+ /* Arbitrarily start from LED on phase */
+ ppd->led_override_phase = 1;
+
+ /*
+ * If the timer has not already been started, do so. Use a "quick"
+ * timeout so the handler will be called soon to look at our request.
+ */
+ if (!timer_pending(&ppd->led_override_timer)) {
+ timer_setup(&ppd->led_override_timer, run_led_override, 0);
+ ppd->led_override_timer.expires = jiffies + 1;
+ add_timer(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 1);
+ /* Ensure the atomic_set is visible to all CPUs */
+ smp_wmb();
+ }
+}
+
+/**
+ * hfi1_reset_device - reset the chip if possible
+ * @unit: the device to reset
+ *
+ * Whether or not reset is successful, we attempt to re-initialize the chip
+ * (that is, much like a driver unload/reload). We clear the INITTED flag
+ * so that the various entry points will fail until we reinitialize. For
+ * now, we only allow this if no user contexts are open that use chip resources
+ */
+int hfi1_reset_device(int unit)
+{
+ int ret;
+ struct hfi1_devdata *dd = hfi1_lookup(unit);
+ struct hfi1_pportdata *ppd;
+ int pidx;
+
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ dd_dev_info(dd, "Reset on unit %u requested\n", unit);
+
+ if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
+ dd_dev_info(dd,
+ "Invalid unit number %u or not initialized or not present\n",
+ unit);
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ /* If there are any user/vnic contexts, we cannot reset */
+ mutex_lock(&hfi1_mutex);
+ if (dd->rcd)
+ if (hfi1_stats.sps_ctxts) {
+ mutex_unlock(&hfi1_mutex);
+ ret = -EBUSY;
+ goto bail;
+ }
+ mutex_unlock(&hfi1_mutex);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ shutdown_led_override(ppd);
+ }
+ if (dd->flags & HFI1_HAS_SEND_DMA)
+ sdma_exit(dd);
+
+ hfi1_reset_cpu_counters(dd);
+
+ ret = hfi1_init(dd, 1);
+
+ if (ret)
+ dd_dev_err(dd,
+ "Reinitialize unit %u after reset failed with %d\n",
+ unit, ret);
+ else
+ dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
+ unit);
+
+bail:
+ return ret;
+}
+
+static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
+{
+ packet->hdr = (struct hfi1_ib_message_header *)
+ hfi1_get_msgheader(packet->rcd,
+ packet->rhf_addr);
+ packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
+}
+
+static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
+{
+ struct hfi1_pportdata *ppd = packet->rcd->ppd;
+
+ /* slid and dlid cannot be 0 */
+ if ((!packet->slid) || (!packet->dlid))
+ return -EINVAL;
+
+ /* Compare port lid with incoming packet dlid */
+ if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
+ (packet->dlid !=
+ opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
+ if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
+ return -EINVAL;
+ }
+
+ /* No multicast packets with SC15 */
+ if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
+ return -EINVAL;
+
+ /* Packets with permissive DLID always on SC15 */
+ if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
+ 16B)) &&
+ (packet->sc != 0xF))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
+ struct ib_header *hdr;
+ u8 lnh;
+
+ hfi1_setup_ib_header(packet);
+ hdr = packet->hdr;
+
+ lnh = ib_get_lnh(hdr);
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &hdr->u.oth;
+ packet->grh = NULL;
+ } else if (lnh == HFI1_LRH_GRH) {
+ u32 vtf;
+
+ packet->ohdr = &hdr->u.l.oth;
+ packet->grh = &hdr->u.l.grh;
+ if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(packet->grh->version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else {
+ goto drop;
+ }
+
+ /* Query commonly used fields from packet header */
+ packet->payload = packet->ebuf;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->slid = ib_get_slid(hdr);
+ packet->dlid = ib_get_dlid(hdr);
+ if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
+ packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
+ be16_to_cpu(IB_MULTICAST_LID_BASE);
+ packet->sl = ib_get_sl(hdr);
+ packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
+ packet->pad = ib_bth_get_pad(packet->ohdr);
+ packet->extra_byte = 0;
+ packet->pkey = ib_bth_get_pkey(packet->ohdr);
+ packet->migrated = ib_bth_is_migration(packet->ohdr);
+
+ return 0;
+drop:
+ ibp->rvp.n_pkt_drops++;
+ return -EINVAL;
+}
+
+static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
+{
+ /*
+ * Bypass packets have a different header/payload split
+ * compared to an IB packet.
+ * Current split is set such that 16 bytes of the actual
+ * header is in the header buffer and the remining is in
+ * the eager buffer. We chose 16 since hfi1 driver only
+ * supports 16B bypass packets and we will be able to
+ * receive the entire LRH with such a split.
+ */
+
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ u8 l4;
+
+ packet->hdr = (struct hfi1_16b_header *)
+ hfi1_get_16B_header(packet->rcd,
+ packet->rhf_addr);
+ l4 = hfi1_16B_get_l4(packet->hdr);
+ if (l4 == OPA_16B_L4_IB_LOCAL) {
+ packet->ohdr = packet->ebuf;
+ packet->grh = NULL;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
+ /* hdr_len_by_opcode already has an IB LRH factored in */
+ packet->hlen = hdr_len_by_opcode[packet->opcode] +
+ (LRH_16B_BYTES - LRH_9B_BYTES);
+ packet->migrated = opa_bth_is_migration(packet->ohdr);
+ } else if (l4 == OPA_16B_L4_IB_GLOBAL) {
+ u32 vtf;
+ u8 grh_len = sizeof(struct ib_grh);
+
+ packet->ohdr = packet->ebuf + grh_len;
+ packet->grh = packet->ebuf;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
+ /* hdr_len_by_opcode already has an IB LRH factored in */
+ packet->hlen = hdr_len_by_opcode[packet->opcode] +
+ (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
+ packet->migrated = opa_bth_is_migration(packet->ohdr);
+
+ if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(packet->grh->version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else if (l4 == OPA_16B_L4_FM) {
+ packet->mgmt = packet->ebuf;
+ packet->ohdr = NULL;
+ packet->grh = NULL;
+ packet->opcode = IB_OPCODE_UD_SEND_ONLY;
+ packet->pad = OPA_16B_L4_FM_PAD;
+ packet->hlen = OPA_16B_L4_FM_HLEN;
+ packet->migrated = false;
+ } else {
+ goto drop;
+ }
+
+ /* Query commonly used fields from packet header */
+ packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
+ packet->slid = hfi1_16B_get_slid(packet->hdr);
+ packet->dlid = hfi1_16B_get_dlid(packet->hdr);
+ if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
+ packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
+ opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR),
+ 16B);
+ packet->sc = hfi1_16B_get_sc(packet->hdr);
+ packet->sl = ibp->sc_to_sl[packet->sc];
+ packet->extra_byte = SIZE_OF_LT;
+ packet->pkey = hfi1_16B_get_pkey(packet->hdr);
+
+ if (hfi1_bypass_ingress_pkt_check(packet))
+ goto drop;
+
+ return 0;
+drop:
+ hfi1_cdbg(PKT, "%s: packet dropped", __func__);
+ ibp->rvp.n_pkt_drops++;
+ return -EINVAL;
+}
+
+static void show_eflags_errs(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ u32 rte = rhf_rcv_type_err(packet->rhf);
+
+ dd_dev_err(rcd->dd,
+ "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n",
+ rcd->ctxt, packet->rhf,
+ packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
+ packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
+ packet->rhf & RHF_DC_ERR ? "dc " : "",
+ packet->rhf & RHF_TID_ERR ? "tid " : "",
+ packet->rhf & RHF_LEN_ERR ? "len " : "",
+ packet->rhf & RHF_ECC_ERR ? "ecc " : "",
+ packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
+ rte);
+}
+
+void handle_eflags(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ rcv_hdrerr(rcd, rcd->ppd, packet);
+ if (rhf_err_flags(packet->rhf))
+ show_eflags_errs(packet);
+}
+
+static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp;
+ struct net_device *netdev;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct napi_struct *napi = rcd->napi;
+ struct sk_buff *skb;
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ u32 extra_bytes;
+ u32 tlen, qpnum;
+ bool do_work, do_cnp;
+
+ trace_hfi1_rcvhdr(packet);
+
+ hfi1_setup_ib_header(packet);
+
+ packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
+ packet->grh = NULL;
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return;
+ }
+
+ qpnum = ib_bth_get_qpn(packet->ohdr);
+ netdev = hfi1_netdev_get_data(rcd->dd, qpnum);
+ if (!netdev)
+ goto drop_no_nd;
+
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+ trace_ctxt_rsm_hist(rcd->ctxt);
+
+ /* handle congestion notifications */
+ do_work = hfi1_may_ecn(packet);
+ if (unlikely(do_work)) {
+ do_cnp = (packet->opcode != IB_OPCODE_CNP);
+ (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp,
+ packet, do_cnp);
+ }
+
+ /*
+ * We have split point after last byte of DETH
+ * lets strip padding and CRC and ICRC.
+ * tlen is whole packet len so we need to
+ * subtract header size as well.
+ */
+ tlen = packet->tlen;
+ extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
+ packet->hlen;
+ if (unlikely(tlen < extra_bytes))
+ goto drop;
+
+ tlen -= extra_bytes;
+
+ skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
+ if (unlikely(!skb))
+ goto drop;
+
+ dev_sw_netstats_rx_add(netdev, skb->len);
+
+ skb->dev = netdev;
+ skb->pkt_type = PACKET_HOST;
+ netif_receive_skb(skb);
+
+ return;
+
+drop:
+ ++netdev->stats.rx_dropped;
+drop_no_nd:
+ ibp = rcd_to_iport(packet->rcd);
+ ++ibp->rvp.n_pkt_drops;
+}
+
+/*
+ * The following functions are called by the interrupt handler. They are type
+ * specific handlers for each packet type.
+ */
+static void process_receive_ib(struct hfi1_packet *packet)
+{
+ if (hfi1_setup_9B_packet(packet))
+ return;
+
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
+ return;
+
+ trace_hfi1_rcvhdr(packet);
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return;
+ }
+
+ hfi1_ib_rcv(packet);
+}
+
+static void process_receive_bypass(struct hfi1_packet *packet)
+{
+ struct hfi1_devdata *dd = packet->rcd->dd;
+
+ if (hfi1_setup_bypass_packet(packet))
+ return;
+
+ trace_hfi1_rcvhdr(packet);
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return;
+ }
+
+ if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
+ hfi1_16B_rcv(packet);
+ } else {
+ dd_dev_err(dd,
+ "Bypass packets other than 16B are not supported in normal operation. Dropping\n");
+ incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
+ if (!(dd->err_info_rcvport.status_and_code &
+ OPA_EI_STATUS_SMASK)) {
+ u64 *flits = packet->ebuf;
+
+ if (flits && !(packet->rhf & RHF_LEN_ERR)) {
+ dd->err_info_rcvport.packet_flit1 = flits[0];
+ dd->err_info_rcvport.packet_flit2 =
+ packet->tlen > sizeof(flits[0]) ?
+ flits[1] : 0;
+ }
+ dd->err_info_rcvport.status_and_code |=
+ (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
+ }
+ }
+}
+
+static void process_receive_error(struct hfi1_packet *packet)
+{
+ /* KHdrHCRCErr -- KDETH packet with a bad HCRC */
+ if (unlikely(
+ hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
+ (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
+ packet->rhf & RHF_DC_ERR)))
+ return;
+
+ hfi1_setup_ib_header(packet);
+ handle_eflags(packet);
+
+ if (unlikely(rhf_err_flags(packet->rhf)))
+ dd_dev_err(packet->rcd->dd,
+ "Unhandled error packet received. Dropping.\n");
+}
+
+static void kdeth_process_expected(struct hfi1_packet *packet)
+{
+ hfi1_setup_9B_packet(packet);
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
+ return;
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
+ return;
+ }
+
+ hfi1_kdeth_expected_rcv(packet);
+}
+
+static void kdeth_process_eager(struct hfi1_packet *packet)
+{
+ hfi1_setup_9B_packet(packet);
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
+ return;
+
+ trace_hfi1_rcvhdr(packet);
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ show_eflags_errs(packet);
+ if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
+ return;
+ }
+
+ hfi1_kdeth_eager_rcv(packet);
+}
+
+static void process_receive_invalid(struct hfi1_packet *packet)
+{
+ dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
+ rhf_rcv_type(packet->rhf));
+}
+
+#define HFI1_RCVHDR_DUMP_MAX 5
+
+void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_packet packet;
+ struct ps_mdata mdata;
+ int i;
+
+ seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n",
+ rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd),
+ get_dma_rtail_setting(rcd) ?
+ "dma_rtail" : "nodma_rtail",
+ read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL),
+ read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS),
+ read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
+ RCV_HDR_HEAD_HEAD_MASK,
+ read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL),
+ rcd->head);
+
+ init_packet(rcd, &packet);
+ init_ps_mdata(&mdata, &packet);
+
+ for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) {
+ __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
+ rcd->rhf_offset;
+ struct ib_header *hdr;
+ u64 rhf = rhf_to_cpu(rhf_addr);
+ u32 etype = rhf_rcv_type(rhf), qpn;
+ u8 opcode;
+ u32 psn;
+ u8 lnh;
+
+ if (ps_done(&mdata, rhf, rcd))
+ break;
+
+ if (ps_skip(&mdata, rhf, rcd))
+ goto next;
+
+ if (etype > RHF_RCV_TYPE_IB)
+ goto next;
+
+ packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
+ hdr = packet.hdr;
+
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+
+ if (lnh == HFI1_LRH_BTH)
+ packet.ohdr = &hdr->u.oth;
+ else if (lnh == HFI1_LRH_GRH)
+ packet.ohdr = &hdr->u.l.oth;
+ else
+ goto next; /* just in case */
+
+ opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
+ qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
+ psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));
+
+ seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n",
+ mdata.ps_head, opcode, qpn, psn);
+next:
+ update_ps_mdata(&mdata, rcd);
+ }
+}
+
+const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
+ [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected,
+ [RHF_RCV_TYPE_EAGER] = kdeth_process_eager,
+ [RHF_RCV_TYPE_IB] = process_receive_ib,
+ [RHF_RCV_TYPE_ERROR] = process_receive_error,
+ [RHF_RCV_TYPE_BYPASS] = process_receive_bypass,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
+};
+
+const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = {
+ [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid,
+ [RHF_RCV_TYPE_EAGER] = process_receive_invalid,
+ [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv,
+ [RHF_RCV_TYPE_ERROR] = process_receive_error,
+ [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
+};
diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
new file mode 100644
index 000000000000..9ed05e10020e
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/efivar.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+
+#include "efivar.h"
+
+/* GUID for HFI1 variables in EFI */
+#define HFI1_EFIVAR_GUID EFI_GUID(0xc50a953e, 0xa8b2, 0x42a6, \
+ 0xbf, 0x89, 0xd3, 0x33, 0xa6, 0xe9, 0xe6, 0xd4)
+/* largest EFI data size we expect */
+#define EFI_DATA_SIZE 4096
+
+/*
+ * Read the named EFI variable. Return the size of the actual data in *size
+ * and a kmalloc'ed buffer in *return_data. The caller must free the
+ * data. It is guaranteed that *return_data will be NULL and *size = 0
+ * if this routine fails.
+ *
+ * Return 0 on success, -errno on failure.
+ */
+static int read_efi_var(const char *name, unsigned long *size,
+ void **return_data)
+{
+ efi_status_t status;
+ efi_char16_t *uni_name;
+ efi_guid_t guid;
+ unsigned long temp_size;
+ void *temp_buffer;
+ void *data;
+ int i;
+ int ret;
+
+ /* set failure return values */
+ *size = 0;
+ *return_data = NULL;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return -EOPNOTSUPP;
+
+ uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL);
+ temp_buffer = kzalloc(EFI_DATA_SIZE, GFP_KERNEL);
+
+ if (!uni_name || !temp_buffer) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* input: the size of the buffer */
+ temp_size = EFI_DATA_SIZE;
+
+ /* convert ASCII to unicode - it is a 1:1 mapping */
+ for (i = 0; name[i]; i++)
+ uni_name[i] = name[i];
+
+ /* need a variable for our GUID */
+ guid = HFI1_EFIVAR_GUID;
+
+ /* call into EFI runtime services */
+ status = efi.get_variable(
+ uni_name,
+ &guid,
+ NULL,
+ &temp_size,
+ temp_buffer);
+
+ /*
+ * It would be nice to call efi_status_to_err() here, but that
+ * is in the EFIVAR_FS code and may not be compiled in.
+ * However, even that is insufficient since it does not cover
+ * EFI_BUFFER_TOO_SMALL which could be an important return.
+ * For now, just split out success or not found.
+ */
+ ret = status == EFI_SUCCESS ? 0 :
+ status == EFI_NOT_FOUND ? -ENOENT :
+ -EINVAL;
+ if (ret)
+ goto fail;
+
+ /*
+ * We have successfully read the EFI variable into our
+ * temporary buffer. Now allocate a correctly sized
+ * buffer.
+ */
+ data = kmemdup(temp_buffer, temp_size, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ *size = temp_size;
+ *return_data = data;
+
+fail:
+ kfree(uni_name);
+ kfree(temp_buffer);
+
+ return ret;
+}
+
+/*
+ * Read an HFI1 EFI variable of the form:
+ * <PCIe address>-<kind>
+ * Return an kalloc'ed array and size of the data.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ unsigned long *size, void **return_data)
+{
+ char prefix_name[64];
+ char name[128];
+ int result;
+
+ /* create a common prefix */
+ snprintf(prefix_name, sizeof(prefix_name), "%04x:%02x:%02x.%x",
+ pci_domain_nr(dd->pcidev->bus),
+ dd->pcidev->bus->number,
+ PCI_SLOT(dd->pcidev->devfn),
+ PCI_FUNC(dd->pcidev->devfn));
+ snprintf(name, sizeof(name), "%s-%s", prefix_name, kind);
+ result = read_efi_var(name, size, return_data);
+
+ /*
+ * If reading the lowercase EFI variable fail, read the uppercase
+ * variable.
+ */
+ if (result) {
+ string_upper(prefix_name, prefix_name);
+ snprintf(name, sizeof(name), "%s-%s", prefix_name, kind);
+ result = read_efi_var(name, size, return_data);
+ }
+
+ return result;
+}
diff --git a/drivers/infiniband/hw/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h
new file mode 100644
index 000000000000..882240929a4b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/efivar.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#ifndef _HFI1_EFIVAR_H
+#define _HFI1_EFIVAR_H
+
+#include <linux/efi.h>
+
+#include "hfi.h"
+
+int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ unsigned long *size, void **return_data);
+
+#endif /* _HFI1_EFIVAR_H */
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
new file mode 100644
index 000000000000..f93a160d8d05
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+#include "hfi.h"
+#include "common.h"
+#include "eprom.h"
+
+/*
+ * The EPROM is logically divided into three partitions:
+ * partition 0: the first 128K, visible from PCI ROM BAR
+ * partition 1: 4K config file (sector size)
+ * partition 2: the rest
+ */
+#define P0_SIZE (128 * 1024)
+#define P1_SIZE (4 * 1024)
+#define P1_START P0_SIZE
+#define P2_START (P0_SIZE + P1_SIZE)
+
+/* controller page size, in bytes */
+#define EP_PAGE_SIZE 256
+#define EP_PAGE_MASK (EP_PAGE_SIZE - 1)
+#define EP_PAGE_DWORDS (EP_PAGE_SIZE / sizeof(u32))
+
+/* controller commands */
+#define CMD_SHIFT 24
+#define CMD_NOP (0)
+#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
+#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
+
+/* controller interface speeds */
+#define EP_SPEED_FULL 0x2 /* full speed */
+
+/*
+ * How long to wait for the EPROM to become available, in ms.
+ * The spec 32 Mb EPROM takes around 40s to erase then write.
+ * Double it for safety.
+ */
+#define EPROM_TIMEOUT 80000 /* ms */
+
+/*
+ * Read a 256 byte (64 dword) EPROM page.
+ * All callers have verified the offset is at a page boundary.
+ */
+static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
+{
+ int i;
+
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
+ for (i = 0; i < EP_PAGE_DWORDS; i++)
+ result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
+}
+
+/*
+ * Read length bytes starting at offset from the start of the EPROM.
+ */
+static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, void *dest)
+{
+ u32 buffer[EP_PAGE_DWORDS];
+ u32 end;
+ u32 start_offset;
+ u32 read_start;
+ u32 bytes;
+
+ if (len == 0)
+ return 0;
+
+ end = start + len;
+
+ /*
+ * Make sure the read range is not outside of the controller read
+ * command address range. Note that '>' is correct below - the end
+ * of the range is OK if it stops at the limit, but no higher.
+ */
+ if (end > (1 << CMD_SHIFT))
+ return -EINVAL;
+
+ /* read the first partial page */
+ start_offset = start & EP_PAGE_MASK;
+ if (start_offset) {
+ /* partial starting page */
+
+ /* align and read the page that contains the start */
+ read_start = start & ~EP_PAGE_MASK;
+ read_page(dd, read_start, buffer);
+
+ /* the rest of the page is available data */
+ bytes = EP_PAGE_SIZE - start_offset;
+
+ if (len <= bytes) {
+ /* end is within this page */
+ memcpy(dest, (u8 *)buffer + start_offset, len);
+ return 0;
+ }
+
+ memcpy(dest, (u8 *)buffer + start_offset, bytes);
+
+ start += bytes;
+ len -= bytes;
+ dest += bytes;
+ }
+ /* start is now page aligned */
+
+ /* read whole pages */
+ while (len >= EP_PAGE_SIZE) {
+ read_page(dd, start, buffer);
+ memcpy(dest, buffer, EP_PAGE_SIZE);
+
+ start += EP_PAGE_SIZE;
+ len -= EP_PAGE_SIZE;
+ dest += EP_PAGE_SIZE;
+ }
+
+ /* read the last partial page */
+ if (len) {
+ read_page(dd, start, buffer);
+ memcpy(dest, buffer, len);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize the EPROM handler.
+ */
+int eprom_init(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ /* only the discrete chip has an EPROM */
+ if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
+ return 0;
+
+ /*
+ * It is OK if both HFIs reset the EPROM as long as they don't
+ * do it at the same time.
+ */
+ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to acquire EPROM resource, no EPROM support\n",
+ __func__);
+ goto done_asic;
+ }
+
+ /* reset EPROM to be sure it is in a good state */
+
+ /* set reset */
+ write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
+ /* clear reset, set speed */
+ write_csr(dd, ASIC_EEP_CTL_STAT,
+ EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
+
+ /* wake the device with command "release powerdown NoID" */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
+
+ dd->eprom_available = true;
+ release_chip_resource(dd, CR_EPROM);
+done_asic:
+ return ret;
+}
+
+/* magic character sequence that begins an image */
+#define IMAGE_START_MAGIC "APO="
+
+/* magic character sequence that might trail an image */
+#define IMAGE_TRAIL_MAGIC "egamiAPO"
+
+/* EPROM file types */
+#define HFI1_EFT_PLATFORM_CONFIG 2
+
+/* segment size - 128 KiB */
+#define SEG_SIZE (128 * 1024)
+
+struct hfi1_eprom_footer {
+ u32 oprom_size; /* size of the oprom, in bytes */
+ u16 num_table_entries;
+ u16 version; /* version of this footer */
+ u32 magic; /* must be last */
+};
+
+struct hfi1_eprom_table_entry {
+ u32 type; /* file type */
+ u32 offset; /* file offset from start of EPROM */
+ u32 size; /* file size, in bytes */
+};
+
+/*
+ * Calculate the max number of table entries that will fit within a directory
+ * buffer of size 'dir_size'.
+ */
+#define MAX_TABLE_ENTRIES(dir_size) \
+ (((dir_size) - sizeof(struct hfi1_eprom_footer)) / \
+ sizeof(struct hfi1_eprom_table_entry))
+
+#define DIRECTORY_SIZE(n) (sizeof(struct hfi1_eprom_footer) + \
+ (sizeof(struct hfi1_eprom_table_entry) * (n)))
+
+#define MAGIC4(a, b, c, d) ((d) << 24 | (c) << 16 | (b) << 8 | (a))
+#define FOOTER_MAGIC MAGIC4('e', 'p', 'r', 'm')
+#define FOOTER_VERSION 1
+
+/*
+ * Read all of partition 1. The actual file is at the front. Adjust
+ * the returned size if a trailing image magic is found.
+ */
+static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
+ u32 *size)
+{
+ void *buffer;
+ void *p;
+ u32 length;
+ int ret;
+
+ buffer = kmalloc(P1_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ ret = read_length(dd, P1_START, P1_SIZE, buffer);
+ if (ret) {
+ kfree(buffer);
+ return ret;
+ }
+
+ /* config partition is valid only if it starts with IMAGE_START_MAGIC */
+ if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
+ kfree(buffer);
+ return -ENOENT;
+ }
+
+ /* scan for image magic that may trail the actual data */
+ p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
+ if (p)
+ length = p - buffer;
+ else
+ length = P1_SIZE;
+
+ *data = buffer;
+ *size = length;
+ return 0;
+}
+
+/*
+ * The segment magic has been checked. There is a footer and table of
+ * contents present.
+ *
+ * directory is a u32 aligned buffer of size EP_PAGE_SIZE.
+ */
+static int read_segment_platform_config(struct hfi1_devdata *dd,
+ void *directory, void **data, u32 *size)
+{
+ struct hfi1_eprom_footer *footer;
+ struct hfi1_eprom_table_entry *table;
+ struct hfi1_eprom_table_entry *entry;
+ void *buffer = NULL;
+ void *table_buffer = NULL;
+ int ret, i;
+ u32 directory_size;
+ u32 seg_base, seg_offset;
+ u32 bytes_available, ncopied, to_copy;
+
+ /* the footer is at the end of the directory */
+ footer = (struct hfi1_eprom_footer *)
+ (directory + EP_PAGE_SIZE - sizeof(*footer));
+
+ /* make sure the structure version is supported */
+ if (footer->version != FOOTER_VERSION)
+ return -EINVAL;
+
+ /* oprom size cannot be larger than a segment */
+ if (footer->oprom_size >= SEG_SIZE)
+ return -EINVAL;
+
+ /* the file table must fit in a segment with the oprom */
+ if (footer->num_table_entries >
+ MAX_TABLE_ENTRIES(SEG_SIZE - footer->oprom_size))
+ return -EINVAL;
+
+ /* find the file table start, which precedes the footer */
+ directory_size = DIRECTORY_SIZE(footer->num_table_entries);
+ if (directory_size <= EP_PAGE_SIZE) {
+ /* the file table fits into the directory buffer handed in */
+ table = (struct hfi1_eprom_table_entry *)
+ (directory + EP_PAGE_SIZE - directory_size);
+ } else {
+ /* need to allocate and read more */
+ table_buffer = kmalloc(directory_size, GFP_KERNEL);
+ if (!table_buffer)
+ return -ENOMEM;
+ ret = read_length(dd, SEG_SIZE - directory_size,
+ directory_size, table_buffer);
+ if (ret)
+ goto done;
+ table = table_buffer;
+ }
+
+ /* look for the platform configuration file in the table */
+ for (entry = NULL, i = 0; i < footer->num_table_entries; i++) {
+ if (table[i].type == HFI1_EFT_PLATFORM_CONFIG) {
+ entry = &table[i];
+ break;
+ }
+ }
+ if (!entry) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ /*
+ * Sanity check on the configuration file size - it should never
+ * be larger than 4 KiB.
+ */
+ if (entry->size > (4 * 1024)) {
+ dd_dev_err(dd, "Bad configuration file size 0x%x\n",
+ entry->size);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* check for bogus offset and size that wrap when added together */
+ if (entry->offset + entry->size < entry->offset) {
+ dd_dev_err(dd,
+ "Bad configuration file start + size 0x%x+0x%x\n",
+ entry->offset, entry->size);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* allocate the buffer to return */
+ buffer = kmalloc(entry->size, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * Extract the file by looping over segments until it is fully read.
+ */
+ seg_offset = entry->offset % SEG_SIZE;
+ seg_base = entry->offset - seg_offset;
+ ncopied = 0;
+ while (ncopied < entry->size) {
+ /* calculate data bytes available in this segment */
+
+ /* start with the bytes from the current offset to the end */
+ bytes_available = SEG_SIZE - seg_offset;
+ /* subtract off footer and table from segment 0 */
+ if (seg_base == 0) {
+ /*
+ * Sanity check: should not have a starting point
+ * at or within the directory.
+ */
+ if (bytes_available <= directory_size) {
+ dd_dev_err(dd,
+ "Bad configuration file - offset 0x%x within footer+table\n",
+ entry->offset);
+ ret = -EINVAL;
+ goto done;
+ }
+ bytes_available -= directory_size;
+ }
+
+ /* calculate bytes wanted */
+ to_copy = entry->size - ncopied;
+
+ /* max out at the available bytes in this segment */
+ if (to_copy > bytes_available)
+ to_copy = bytes_available;
+
+ /*
+ * Read from the EPROM.
+ *
+ * The sanity check for entry->offset is done in read_length().
+ * The EPROM offset is validated against what the hardware
+ * addressing supports. In addition, if the offset is larger
+ * than the actual EPROM, it silently wraps. It will work
+ * fine, though the reader may not get what they expected
+ * from the EPROM.
+ */
+ ret = read_length(dd, seg_base + seg_offset, to_copy,
+ buffer + ncopied);
+ if (ret)
+ goto done;
+
+ ncopied += to_copy;
+
+ /* set up for next segment */
+ seg_offset = footer->oprom_size;
+ seg_base += SEG_SIZE;
+ }
+
+ /* success */
+ ret = 0;
+ *data = buffer;
+ *size = entry->size;
+
+done:
+ kfree(table_buffer);
+ if (ret)
+ kfree(buffer);
+ return ret;
+}
+
+/*
+ * Read the platform configuration file from the EPROM.
+ *
+ * On success, an allocated buffer containing the data and its size are
+ * returned. It is up to the caller to free this buffer.
+ *
+ * Return value:
+ * 0 - success
+ * -ENXIO - no EPROM is available
+ * -EBUSY - not able to acquire access to the EPROM
+ * -ENOENT - no recognizable file written
+ * -ENOMEM - buffer could not be allocated
+ * -EINVAL - invalid EPROM contentents found
+ */
+int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
+{
+ u32 directory[EP_PAGE_DWORDS]; /* aligned buffer */
+ int ret;
+
+ if (!dd->eprom_available)
+ return -ENXIO;
+
+ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
+ if (ret)
+ return -EBUSY;
+
+ /* read the last page of the segment for the EPROM format magic */
+ ret = read_length(dd, SEG_SIZE - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
+ if (ret)
+ goto done;
+
+ /* last dword of the segment contains a magic value */
+ if (directory[EP_PAGE_DWORDS - 1] == FOOTER_MAGIC) {
+ /* segment format */
+ ret = read_segment_platform_config(dd, directory, data, size);
+ } else {
+ /* partition format */
+ ret = read_partition_platform_config(dd, data, size);
+ }
+
+done:
+ release_chip_resource(dd, CR_EPROM);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h
new file mode 100644
index 000000000000..51648d1afcf1
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/eprom.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+struct hfi1_devdata;
+
+int eprom_init(struct hfi1_devdata *dd);
+int eprom_read_platform_config(struct hfi1_devdata *dd, void **buf_ret,
+ u32 *size_ret);
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
new file mode 100644
index 000000000000..879a66edbded
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ */
+
+#include "exp_rcv.h"
+#include "trace.h"
+
+/**
+ * hfi1_exp_tid_set_init - initialize exp_tid_set
+ * @set: the set
+ */
+static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
+{
+ INIT_LIST_HEAD(&set->list);
+ set->count = 0;
+}
+
+/**
+ * hfi1_exp_tid_group_init - initialize rcd expected receive
+ * @rcd: the rcd
+ */
+void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
+{
+ hfi1_exp_tid_set_init(&rcd->tid_group_list);
+ hfi1_exp_tid_set_init(&rcd->tid_used_list);
+ hfi1_exp_tid_set_init(&rcd->tid_full_list);
+}
+
+/**
+ * hfi1_alloc_ctxt_rcv_groups - initialize expected receive groups
+ * @rcd: the context to add the groupings to
+ */
+int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 tidbase;
+ struct tid_group *grp;
+ int i;
+ u32 ngroups;
+
+ ngroups = rcd->expected_count / dd->rcv_entries.group_size;
+ rcd->groups =
+ kcalloc_node(ngroups, sizeof(*rcd->groups),
+ GFP_KERNEL, rcd->numa_id);
+ if (!rcd->groups)
+ return -ENOMEM;
+ tidbase = rcd->expected_base;
+ for (i = 0; i < ngroups; i++) {
+ grp = &rcd->groups[i];
+ grp->size = dd->rcv_entries.group_size;
+ grp->base = tidbase;
+ tid_group_add_tail(grp, &rcd->tid_group_list);
+ tidbase += dd->rcv_entries.group_size;
+ }
+
+ return 0;
+}
+
+/**
+ * hfi1_free_ctxt_rcv_groups - free expected receive groups
+ * @rcd: the context to free
+ *
+ * The routine dismantles the expect receive linked
+ * list and clears any tids associated with the receive
+ * context.
+ *
+ * This should only be called for kernel contexts and the
+ * a base user context.
+ */
+void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
+{
+ kfree(rcd->groups);
+ rcd->groups = NULL;
+ hfi1_exp_tid_group_init(rcd);
+
+ hfi1_clear_tids(rcd);
+}
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h
new file mode 100644
index 000000000000..141413d9fbc7
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ */
+
+#ifndef _HFI1_EXP_RCV_H
+#define _HFI1_EXP_RCV_H
+#include "hfi.h"
+
+#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
+
+#define EXP_TID_TIDLEN_MASK 0x7FFULL
+#define EXP_TID_TIDLEN_SHIFT 0
+#define EXP_TID_TIDCTRL_MASK 0x3ULL
+#define EXP_TID_TIDCTRL_SHIFT 20
+#define EXP_TID_TIDIDX_MASK 0x3FFULL
+#define EXP_TID_TIDIDX_SHIFT 22
+#define EXP_TID_GET(tid, field) \
+ (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK)
+
+#define EXP_TID_SET(field, value) \
+ (((value) & EXP_TID_TID##field##_MASK) << \
+ EXP_TID_TID##field##_SHIFT)
+#define EXP_TID_CLEAR(tid, field) ({ \
+ (tid) &= ~(EXP_TID_TID##field##_MASK << \
+ EXP_TID_TID##field##_SHIFT); \
+ })
+#define EXP_TID_RESET(tid, field, value) do { \
+ EXP_TID_CLEAR(tid, field); \
+ (tid) |= EXP_TID_SET(field, (value)); \
+ } while (0)
+
+/*
+ * Define fields in the KDETH header so we can update the header
+ * template.
+ */
+#define KDETH_OFFSET_SHIFT 0
+#define KDETH_OFFSET_MASK 0x7fff
+#define KDETH_OM_SHIFT 15
+#define KDETH_OM_MASK 0x1
+#define KDETH_TID_SHIFT 16
+#define KDETH_TID_MASK 0x3ff
+#define KDETH_TIDCTRL_SHIFT 26
+#define KDETH_TIDCTRL_MASK 0x3
+#define KDETH_INTR_SHIFT 28
+#define KDETH_INTR_MASK 0x1
+#define KDETH_SH_SHIFT 29
+#define KDETH_SH_MASK 0x1
+#define KDETH_KVER_SHIFT 30
+#define KDETH_KVER_MASK 0x3
+#define KDETH_JKEY_SHIFT 0x0
+#define KDETH_JKEY_MASK 0xff
+#define KDETH_HCRC_UPPER_SHIFT 16
+#define KDETH_HCRC_UPPER_MASK 0xff
+#define KDETH_HCRC_LOWER_SHIFT 24
+#define KDETH_HCRC_LOWER_MASK 0xff
+
+#define KDETH_GET(val, field) \
+ (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
+#define KDETH_SET(dw, field, val) do { \
+ u32 dwval = le32_to_cpu(dw); \
+ dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
+ dwval |= (((val) & KDETH_##field##_MASK) << \
+ KDETH_##field##_SHIFT); \
+ dw = cpu_to_le32(dwval); \
+ } while (0)
+
+#define KDETH_RESET(dw, field, val) ({ dw = 0; KDETH_SET(dw, field, val); })
+
+/* KDETH OM multipliers and switch over point */
+#define KDETH_OM_SMALL 4
+#define KDETH_OM_SMALL_SHIFT 2
+#define KDETH_OM_LARGE 64
+#define KDETH_OM_LARGE_SHIFT 6
+#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
+
+struct tid_group {
+ struct list_head list;
+ u32 base;
+ u8 size;
+ u8 used;
+ u8 map;
+};
+
+/*
+ * Write an "empty" RcvArray entry.
+ * This function exists so the TID registaration code can use it
+ * to write to unused/unneeded entries and still take advantage
+ * of the WC performance improvements. The HFI will ignore this
+ * write to the RcvArray entry.
+ */
+static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
+{
+ /*
+ * Doing the WC fill writes only makes sense if the device is
+ * present and the RcvArray has been mapped as WC memory.
+ */
+ if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc) {
+ writeq(0, dd->rcvarray_wc + (index * 8));
+ if ((index & 3) == 3)
+ flush_wc();
+ }
+}
+
+static inline void tid_group_add_tail(struct tid_group *grp,
+ struct exp_tid_set *set)
+{
+ list_add_tail(&grp->list, &set->list);
+ set->count++;
+}
+
+static inline void tid_group_remove(struct tid_group *grp,
+ struct exp_tid_set *set)
+{
+ list_del_init(&grp->list);
+ set->count--;
+}
+
+static inline void tid_group_move(struct tid_group *group,
+ struct exp_tid_set *s1,
+ struct exp_tid_set *s2)
+{
+ tid_group_remove(group, s1);
+ tid_group_add_tail(group, s2);
+}
+
+static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
+{
+ struct tid_group *grp =
+ list_first_entry(&set->list, struct tid_group, list);
+ list_del_init(&grp->list);
+ set->count--;
+ return grp;
+}
+
+static inline u32 create_tid(u32 rcventry, u32 npages)
+{
+ u32 pair = rcventry & ~0x1;
+
+ return EXP_TID_SET(IDX, pair >> 1) |
+ EXP_TID_SET(CTRL, 1 << (rcventry - pair)) |
+ EXP_TID_SET(LEN, npages);
+}
+
+/**
+ * hfi1_tid_group_to_idx - convert an index to a group
+ * @rcd - the receive context
+ * @grp - the group pointer
+ */
+static inline u16
+hfi1_tid_group_to_idx(struct hfi1_ctxtdata *rcd, struct tid_group *grp)
+{
+ return grp - &rcd->groups[0];
+}
+
+/**
+ * hfi1_idx_to_tid_group - convert a group to an index
+ * @rcd - the receive context
+ * @idx - the index
+ */
+static inline struct tid_group *
+hfi1_idx_to_tid_group(struct hfi1_ctxtdata *rcd, u16 idx)
+{
+ return &rcd->groups[idx];
+}
+
+int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
+void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
+void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd);
+
+#endif /* _HFI1_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
new file mode 100644
index 000000000000..a45cbffd52c7
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#include "debugfs.h"
+#include "fault.h"
+#include "trace.h"
+
+#define HFI1_FAULT_DIR_TX BIT(0)
+#define HFI1_FAULT_DIR_RX BIT(1)
+#define HFI1_FAULT_DIR_TXRX (HFI1_FAULT_DIR_TX | HFI1_FAULT_DIR_RX)
+
+static void *_fault_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void *_fault_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void _fault_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _fault_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos, j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
+
+ for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
+ rcd = hfi1_rcd_get_by_index(dd, j);
+ if (rcd) {
+ n_packets += rcd->opstats->stats[i].n_packets;
+ n_bytes += rcd->opstats->stats[i].n_bytes;
+ }
+ hfi1_rcd_put(rcd);
+ }
+ for_each_possible_cpu(j) {
+ struct hfi1_opcode_stats_perctx *sp =
+ per_cpu_ptr(dd->tx_opstats, j);
+
+ n_packets += sp->stats[i].n_packets;
+ n_bytes += sp->stats[i].n_bytes;
+ }
+ if (!n_packets && !n_bytes)
+ return SEQ_SKIP;
+ if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i])
+ return SEQ_SKIP;
+ seq_printf(s, "%02llx %llu/%llu (faults rx:%llu faults: tx:%llu)\n", i,
+ (unsigned long long)n_packets,
+ (unsigned long long)n_bytes,
+ (unsigned long long)ibd->fault->n_rxfaults[i],
+ (unsigned long long)ibd->fault->n_txfaults[i]);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(fault_stats);
+DEBUGFS_SEQ_FILE_OPEN(fault_stats);
+DEBUGFS_FILE_OPS(fault_stats);
+
+static int fault_opcodes_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ ssize_t ret = 0;
+ /* 1280 = 256 opcodes * 4 chars/opcode + 255 commas + NULL */
+ size_t copy, datalen = 1280;
+ char *data, *token, *ptr, *end;
+ struct fault *fault = file->private_data;
+
+ data = kcalloc(datalen, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ copy = min(len, datalen - 1);
+ if (copy_from_user(data, buf, copy)) {
+ ret = -EFAULT;
+ goto free_data;
+ }
+
+ ptr = data;
+ token = ptr;
+ for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
+ char *dash;
+ unsigned long range_start, range_end, i;
+ bool remove = false;
+ unsigned long bound = 1U << BITS_PER_BYTE;
+
+ end = strchr(ptr, ',');
+ if (end)
+ *end = '\0';
+ if (token[0] == '-') {
+ remove = true;
+ token++;
+ }
+ dash = strchr(token, '-');
+ if (dash)
+ *dash = '\0';
+ if (kstrtoul(token, 0, &range_start))
+ break;
+ if (dash) {
+ token = dash + 1;
+ if (kstrtoul(token, 0, &range_end))
+ break;
+ } else {
+ range_end = range_start;
+ }
+ if (range_start == range_end && range_start == -1UL) {
+ bitmap_zero(fault->opcodes, sizeof(fault->opcodes) *
+ BITS_PER_BYTE);
+ break;
+ }
+ /* Check the inputs */
+ if (range_start >= bound || range_end >= bound)
+ break;
+
+ for (i = range_start; i <= range_end; i++) {
+ if (remove)
+ clear_bit(i, fault->opcodes);
+ else
+ set_bit(i, fault->opcodes);
+ }
+ if (!end)
+ break;
+ }
+ ret = len;
+
+free_data:
+ kfree(data);
+ return ret;
+}
+
+static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ ssize_t ret = 0;
+ char *data;
+ size_t datalen = 1280, size = 0; /* see fault_opcodes_write() */
+ unsigned long bit = 0, zero = 0;
+ struct fault *fault = file->private_data;
+ size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE;
+
+ data = kcalloc(datalen, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ bit = find_first_bit(fault->opcodes, bitsize);
+ while (bit < bitsize) {
+ zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
+ if (zero - 1 != bit)
+ size += scnprintf(data + size,
+ datalen - size - 1,
+ "0x%lx-0x%lx,", bit, zero - 1);
+ else
+ size += scnprintf(data + size,
+ datalen - size - 1, "0x%lx,",
+ bit);
+ bit = find_next_bit(fault->opcodes, bitsize, zero);
+ }
+ data[size - 1] = '\n';
+ data[size] = '\0';
+ ret = simple_read_from_buffer(buf, len, pos, data, size);
+ kfree(data);
+ return ret;
+}
+
+static const struct file_operations __fault_opcodes_fops = {
+ .owner = THIS_MODULE,
+ .open = fault_opcodes_open,
+ .read = fault_opcodes_read,
+ .write = fault_opcodes_write,
+};
+
+void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
+{
+ if (ibd->fault)
+ debugfs_remove_recursive(ibd->fault->dir);
+ kfree(ibd->fault);
+ ibd->fault = NULL;
+}
+
+int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
+{
+ struct dentry *parent = ibd->hfi1_ibdev_dbg;
+ struct dentry *fault_dir;
+
+ ibd->fault = kzalloc(sizeof(*ibd->fault), GFP_KERNEL);
+ if (!ibd->fault)
+ return -ENOMEM;
+
+ ibd->fault->attr.interval = 1;
+ ibd->fault->attr.require_end = ULONG_MAX;
+ ibd->fault->attr.stacktrace_depth = 32;
+ ibd->fault->attr.dname = NULL;
+ ibd->fault->attr.verbose = 0;
+ ibd->fault->enable = false;
+ ibd->fault->opcode = false;
+ ibd->fault->fault_skip = 0;
+ ibd->fault->skip = 0;
+ ibd->fault->direction = HFI1_FAULT_DIR_TXRX;
+ ibd->fault->suppress_err = false;
+ bitmap_zero(ibd->fault->opcodes,
+ sizeof(ibd->fault->opcodes) * BITS_PER_BYTE);
+
+ fault_dir =
+ fault_create_debugfs_attr("fault", parent, &ibd->fault->attr);
+ if (IS_ERR(fault_dir)) {
+ kfree(ibd->fault);
+ ibd->fault = NULL;
+ return -ENOENT;
+ }
+ ibd->fault->dir = fault_dir;
+
+ debugfs_create_file("fault_stats", 0444, fault_dir, ibd,
+ &_fault_stats_file_ops);
+ debugfs_create_bool("enable", 0600, fault_dir, &ibd->fault->enable);
+ debugfs_create_bool("suppress_err", 0600, fault_dir,
+ &ibd->fault->suppress_err);
+ debugfs_create_bool("opcode_mode", 0600, fault_dir,
+ &ibd->fault->opcode);
+ debugfs_create_file("opcodes", 0600, fault_dir, ibd->fault,
+ &__fault_opcodes_fops);
+ debugfs_create_u64("skip_pkts", 0600, fault_dir,
+ &ibd->fault->fault_skip);
+ debugfs_create_u64("skip_usec", 0600, fault_dir,
+ &ibd->fault->fault_skip_usec);
+ debugfs_create_u8("direction", 0600, fault_dir, &ibd->fault->direction);
+
+ return 0;
+}
+
+bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
+{
+ if (ibd->fault)
+ return ibd->fault->suppress_err;
+ return false;
+}
+
+static bool __hfi1_should_fault(struct hfi1_ibdev *ibd, u32 opcode,
+ u8 direction)
+{
+ bool ret = false;
+
+ if (!ibd->fault || !ibd->fault->enable)
+ return false;
+ if (!(ibd->fault->direction & direction))
+ return false;
+ if (ibd->fault->opcode) {
+ if (bitmap_empty(ibd->fault->opcodes,
+ (sizeof(ibd->fault->opcodes) *
+ BITS_PER_BYTE)))
+ return false;
+ if (!(test_bit(opcode, ibd->fault->opcodes)))
+ return false;
+ }
+ if (ibd->fault->fault_skip_usec &&
+ time_before(jiffies, ibd->fault->skip_usec))
+ return false;
+ if (ibd->fault->fault_skip && ibd->fault->skip) {
+ ibd->fault->skip--;
+ return false;
+ }
+ ret = should_fail(&ibd->fault->attr, 1);
+ if (ret) {
+ ibd->fault->skip = ibd->fault->fault_skip;
+ ibd->fault->skip_usec = jiffies +
+ usecs_to_jiffies(ibd->fault->fault_skip_usec);
+ }
+ return ret;
+}
+
+bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode)
+{
+ struct hfi1_ibdev *ibd = to_idev(qp->ibqp.device);
+
+ if (__hfi1_should_fault(ibd, opcode, HFI1_FAULT_DIR_TX)) {
+ trace_hfi1_fault_opcode(qp, opcode);
+ ibd->fault->n_txfaults[opcode]++;
+ return true;
+ }
+ return false;
+}
+
+bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet)
+{
+ struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev;
+
+ if (__hfi1_should_fault(ibd, packet->opcode, HFI1_FAULT_DIR_RX)) {
+ trace_hfi1_fault_packet(packet);
+ ibd->fault->n_rxfaults[packet->opcode]++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/infiniband/hw/hfi1/fault.h b/drivers/infiniband/hw/hfi1/fault.h
new file mode 100644
index 000000000000..51adafe240d7
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/fault.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#ifndef _HFI1_FAULT_H
+#define _HFI1_FAULT_H
+
+#include <linux/fault-inject.h>
+#include <linux/dcache.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <rdma/rdma_vt.h>
+
+#include "hfi.h"
+
+struct hfi1_ibdev;
+
+#if defined(CONFIG_FAULT_INJECTION) && defined(CONFIG_FAULT_INJECTION_DEBUG_FS)
+struct fault {
+ struct fault_attr attr;
+ struct dentry *dir;
+ u64 n_rxfaults[(1U << BITS_PER_BYTE)];
+ u64 n_txfaults[(1U << BITS_PER_BYTE)];
+ u64 fault_skip;
+ u64 skip;
+ u64 fault_skip_usec;
+ unsigned long skip_usec;
+ unsigned long opcodes[(1U << BITS_PER_BYTE) / BITS_PER_LONG];
+ bool enable;
+ bool suppress_err;
+ bool opcode;
+ u8 direction;
+};
+
+int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd);
+bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode);
+bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet);
+bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd);
+void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd);
+
+#else
+
+static inline int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
+{
+ return 0;
+}
+
+static inline bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet)
+{
+ return false;
+}
+
+static inline bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp,
+ u32 opcode)
+{
+ return false;
+}
+
+static inline bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
+{
+ return false;
+}
+
+static inline void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
+{
+}
+#endif
+#endif /* _HFI1_FAULT_H */
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
new file mode 100644
index 000000000000..503abec709c9
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -0,0 +1,1714 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
+ * Copyright(c) 2015-2020 Intel Corporation.
+ */
+
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/sched/mm.h>
+#include <linux/bitmap.h>
+
+#include <rdma/ib.h>
+
+#include "hfi.h"
+#include "pio.h"
+#include "device.h"
+#include "common.h"
+#include "trace.h"
+#include "mmu_rb.h"
+#include "user_sdma.h"
+#include "user_exp_rcv.h"
+#include "aspm.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
+
+/*
+ * File operation functions
+ */
+static int hfi1_file_open(struct inode *inode, struct file *fp);
+static int hfi1_file_close(struct inode *inode, struct file *fp);
+static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt);
+static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
+
+static u64 kvirt_to_phys(void *addr);
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static void init_subctxts(struct hfi1_ctxtdata *uctxt,
+ const struct hfi1_user_info *uinfo);
+static int init_user_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
+static void user_init(struct hfi1_ctxtdata *uctxt);
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
+static int setup_base_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
+static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
+
+static int find_sub_ctxt(struct hfi1_filedata *fd,
+ const struct hfi1_user_info *uinfo);
+static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
+ struct hfi1_user_info *uinfo,
+ struct hfi1_ctxtdata **cd);
+static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
+static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt);
+static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
+ unsigned long arg);
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
+static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
+ unsigned long arg);
+static vm_fault_t vma_fault(struct vm_fault *vmf);
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg);
+
+static const struct file_operations hfi1_file_ops = {
+ .owner = THIS_MODULE,
+ .write_iter = hfi1_write_iter,
+ .open = hfi1_file_open,
+ .release = hfi1_file_close,
+ .unlocked_ioctl = hfi1_file_ioctl,
+ .poll = hfi1_poll,
+ .mmap = hfi1_file_mmap,
+ .llseek = noop_llseek,
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = vma_fault,
+};
+
+/*
+ * Types of memories mapped into user processes' space
+ */
+enum mmap_types {
+ PIO_BUFS = 1,
+ PIO_BUFS_SOP,
+ PIO_CRED,
+ RCV_HDRQ,
+ RCV_EGRBUF,
+ UREGS,
+ EVENTS,
+ STATUS,
+ RTAIL,
+ SUBCTXT_UREGS,
+ SUBCTXT_RCV_HDRQ,
+ SUBCTXT_EGRBUF,
+ SDMA_COMP
+};
+
+/*
+ * Masks and offsets defining the mmap tokens
+ */
+#define HFI1_MMAP_OFFSET_MASK 0xfffULL
+#define HFI1_MMAP_OFFSET_SHIFT 0
+#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
+#define HFI1_MMAP_SUBCTXT_SHIFT 12
+#define HFI1_MMAP_CTXT_MASK 0xffULL
+#define HFI1_MMAP_CTXT_SHIFT 16
+#define HFI1_MMAP_TYPE_MASK 0xfULL
+#define HFI1_MMAP_TYPE_SHIFT 24
+#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
+#define HFI1_MMAP_MAGIC_SHIFT 32
+
+#define HFI1_MMAP_MAGIC 0xdabbad00
+
+#define HFI1_MMAP_TOKEN_SET(field, val) \
+ (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
+#define HFI1_MMAP_TOKEN_GET(field, token) \
+ (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
+#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
+ (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
+ HFI1_MMAP_TOKEN_SET(TYPE, type) | \
+ HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
+ HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
+ HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
+
+#define dbg(fmt, ...) \
+ pr_info(fmt, ##__VA_ARGS__)
+
+static inline int is_valid_mmap(u64 token)
+{
+ return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
+}
+
+static int hfi1_file_open(struct inode *inode, struct file *fp)
+{
+ struct hfi1_filedata *fd;
+ struct hfi1_devdata *dd = container_of(inode->i_cdev,
+ struct hfi1_devdata,
+ user_cdev);
+
+ if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
+ return -EINVAL;
+
+ if (!refcount_inc_not_zero(&dd->user_refcount))
+ return -ENXIO;
+
+ /* The real work is performed later in assign_ctxt() */
+
+ fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+
+ if (!fd || init_srcu_struct(&fd->pq_srcu))
+ goto nomem;
+ spin_lock_init(&fd->pq_rcu_lock);
+ spin_lock_init(&fd->tid_lock);
+ spin_lock_init(&fd->invalid_lock);
+ fd->rec_cpu_num = -1; /* no cpu affinity by default */
+ fd->dd = dd;
+ fp->private_data = fd;
+ return 0;
+nomem:
+ kfree(fd);
+ fp->private_data = NULL;
+ if (refcount_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+ return -ENOMEM;
+}
+
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ int ret = 0;
+ int uval = 0;
+
+ hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
+ if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
+ cmd != HFI1_IOCTL_GET_VERS &&
+ !uctxt)
+ return -EINVAL;
+
+ switch (cmd) {
+ case HFI1_IOCTL_ASSIGN_CTXT:
+ ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
+ break;
+
+ case HFI1_IOCTL_CTXT_INFO:
+ ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
+ break;
+
+ case HFI1_IOCTL_USER_INFO:
+ ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
+ break;
+
+ case HFI1_IOCTL_CREDIT_UPD:
+ if (uctxt)
+ sc_return_credits(uctxt->sc);
+ break;
+
+ case HFI1_IOCTL_TID_UPDATE:
+ ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
+ break;
+
+ case HFI1_IOCTL_TID_FREE:
+ ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
+ break;
+
+ case HFI1_IOCTL_TID_INVAL_READ:
+ ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
+ break;
+
+ case HFI1_IOCTL_RECV_CTRL:
+ ret = manage_rcvq(uctxt, fd->subctxt, arg);
+ break;
+
+ case HFI1_IOCTL_POLL_TYPE:
+ if (get_user(uval, (int __user *)arg))
+ return -EFAULT;
+ uctxt->poll_type = (typeof(uctxt->poll_type))uval;
+ break;
+
+ case HFI1_IOCTL_ACK_EVENT:
+ ret = user_event_ack(uctxt, fd->subctxt, arg);
+ break;
+
+ case HFI1_IOCTL_SET_PKEY:
+ ret = set_ctxt_pkey(uctxt, arg);
+ break;
+
+ case HFI1_IOCTL_CTXT_RESET:
+ ret = ctxt_reset(uctxt);
+ break;
+
+ case HFI1_IOCTL_GET_VERS:
+ uval = HFI1_USER_SWVERSION;
+ if (put_user(uval, (int __user *)arg))
+ return -EFAULT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+{
+ struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
+ int done = 0, reqs = 0;
+ unsigned long dim = from->nr_segs;
+ int idx;
+
+ if (!HFI1_CAP_IS_KSET(SDMA))
+ return -EINVAL;
+ if (!user_backed_iter(from))
+ return -EINVAL;
+ idx = srcu_read_lock(&fd->pq_srcu);
+ pq = srcu_dereference(fd->pq, &fd->pq_srcu);
+ if (!cq || !pq) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
+ return -EIO;
+ }
+
+ trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
+
+ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
+ return -ENOSPC;
+ }
+
+ while (dim) {
+ const struct iovec *iov = iter_iov(from);
+ int ret;
+ unsigned long count = 0;
+
+ ret = hfi1_user_sdma_process_request(
+ fd, (struct iovec *)(iov + done),
+ dim, &count);
+ if (ret) {
+ reqs = ret;
+ break;
+ }
+ dim -= count;
+ done += count;
+ reqs++;
+ }
+
+ srcu_read_unlock(&fd->pq_srcu, idx);
+ return reqs;
+}
+
+static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf,
+ u64 memaddr, void *memvirt, dma_addr_t memdma,
+ ssize_t memlen, struct vm_area_struct *vma)
+{
+ hfi1_cdbg(PROC,
+ "%u:%u type:%u io/vf/dma:%d/%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx",
+ ctxt, subctxt, type, mapio, vmf, !!memdma,
+ memaddr ?: (u64)memvirt, memlen,
+ vma->vm_end - vma->vm_start, vma->vm_flags);
+}
+
+static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd;
+ unsigned long flags;
+ u64 token = vma->vm_pgoff << PAGE_SHIFT,
+ memaddr = 0;
+ void *memvirt = NULL;
+ dma_addr_t memdma = 0;
+ u8 subctxt, mapio = 0, vmf = 0, type;
+ ssize_t memlen = 0;
+ int ret = 0;
+ u16 ctxt;
+
+ if (!is_valid_mmap(token) || !uctxt ||
+ !(vma->vm_flags & VM_SHARED)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ dd = uctxt->dd;
+ ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
+ subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
+ type = HFI1_MMAP_TOKEN_GET(TYPE, token);
+ if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * vm_pgoff is used as a buffer selector cookie. Always mmap from
+ * the beginning.
+ */
+ vma->vm_pgoff = 0;
+ flags = vma->vm_flags;
+
+ switch (type) {
+ case PIO_BUFS:
+ case PIO_BUFS_SOP:
+ memaddr = ((dd->physaddr + TXE_PIO_SEND) +
+ /* chip pio base */
+ (uctxt->sc->hw_context * BIT(16))) +
+ /* 64K PIO space / ctxt */
+ (type == PIO_BUFS_SOP ?
+ (TXE_PIO_SIZE / 2) : 0); /* sop? */
+ /*
+ * Map only the amount allocated to the context, not the
+ * entire available context's PIO space.
+ */
+ memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
+ flags &= ~VM_MAYREAD;
+ flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ mapio = 1;
+ break;
+ case PIO_CRED: {
+ u64 cr_page_offset;
+ if (flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
+ /*
+ * The credit return location for this context could be on the
+ * second or third page allocated for credit returns (if number
+ * of enabled contexts > 64 and 128 respectively).
+ */
+ cr_page_offset = ((u64)uctxt->sc->hw_free -
+ (u64)dd->cr_base[uctxt->numa_id].va) &
+ PAGE_MASK;
+ memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset;
+ memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset;
+ memlen = PAGE_SIZE;
+ flags &= ~VM_MAYWRITE;
+ flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ /*
+ * The driver has already allocated memory for credit
+ * returns and programmed it into the chip. Has that
+ * memory been flagged as non-cached?
+ */
+ /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
+ break;
+ }
+ case RCV_HDRQ:
+ memlen = rcvhdrq_size(uctxt);
+ memvirt = uctxt->rcvhdrq;
+ memdma = uctxt->rcvhdrq_dma;
+ break;
+ case RCV_EGRBUF: {
+ unsigned long vm_start_save;
+ unsigned long vm_end_save;
+ int i;
+ /*
+ * The RcvEgr buffer need to be handled differently
+ * as multiple non-contiguous pages need to be mapped
+ * into the user process.
+ */
+ memlen = uctxt->egrbufs.size;
+ if ((vma->vm_end - vma->vm_start) != memlen) {
+ dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
+ (vma->vm_end - vma->vm_start), memlen);
+ ret = -EINVAL;
+ goto done;
+ }
+ if (vma->vm_flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
+ vm_flags_clear(vma, VM_MAYWRITE);
+ /*
+ * Mmap multiple separate allocations into a single vma. From
+ * here, dma_mmap_coherent() calls dma_direct_mmap(), which
+ * requires the mmap to exactly fill the vma starting at
+ * vma_start. Adjust the vma start and end for each eager
+ * buffer segment mapped. Restore the originals when done.
+ */
+ vm_start_save = vma->vm_start;
+ vm_end_save = vma->vm_end;
+ vma->vm_end = vma->vm_start;
+ for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
+ memlen = uctxt->egrbufs.buffers[i].len;
+ memvirt = uctxt->egrbufs.buffers[i].addr;
+ memdma = uctxt->egrbufs.buffers[i].dma;
+ vma->vm_end += memlen;
+ mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr,
+ memvirt, memdma, memlen, vma);
+ ret = dma_mmap_coherent(&dd->pcidev->dev, vma,
+ memvirt, memdma, memlen);
+ if (ret < 0) {
+ vma->vm_start = vm_start_save;
+ vma->vm_end = vm_end_save;
+ goto done;
+ }
+ vma->vm_start += memlen;
+ }
+ vma->vm_start = vm_start_save;
+ vma->vm_end = vm_end_save;
+ ret = 0;
+ goto done;
+ }
+ case UREGS:
+ /*
+ * Map only the page that contains this context's user
+ * registers.
+ */
+ memaddr = (unsigned long)
+ (dd->physaddr + RXE_PER_CONTEXT_USER)
+ + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
+ /*
+ * TidFlow table is on the same page as the rest of the
+ * user registers.
+ */
+ memlen = PAGE_SIZE;
+ flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ mapio = 1;
+ break;
+ case EVENTS:
+ /*
+ * Use the page where this context's flags are. User level
+ * knows where it's own bitmap is within the page.
+ */
+ memaddr = (unsigned long)
+ (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
+ memlen = PAGE_SIZE;
+ /*
+ * v3.7 removes VM_RESERVED but the effect is kept by
+ * using VM_IO.
+ */
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ case STATUS:
+ if (flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
+ memaddr = kvirt_to_phys((void *)dd->status);
+ memlen = PAGE_SIZE;
+ flags |= VM_IO | VM_DONTEXPAND;
+ break;
+ case RTAIL:
+ if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
+ /*
+ * If the memory allocation failed, the context alloc
+ * also would have failed, so we would never get here
+ */
+ ret = -EINVAL;
+ goto done;
+ }
+ if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) {
+ ret = -EPERM;
+ goto done;
+ }
+ memlen = PAGE_SIZE;
+ memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt);
+ memdma = uctxt->rcvhdrqtailaddr_dma;
+ flags &= ~VM_MAYWRITE;
+ break;
+ case SUBCTXT_UREGS:
+ memaddr = (u64)uctxt->subctxt_uregbase;
+ memlen = PAGE_SIZE;
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ case SUBCTXT_RCV_HDRQ:
+ memaddr = (u64)uctxt->subctxt_rcvhdr_base;
+ memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt;
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ case SUBCTXT_EGRBUF:
+ memaddr = (u64)uctxt->subctxt_rcvegrbuf;
+ memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
+ flags |= VM_IO | VM_DONTEXPAND;
+ flags &= ~VM_MAYWRITE;
+ vmf = 1;
+ break;
+ case SDMA_COMP: {
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
+
+ if (!cq) {
+ ret = -EFAULT;
+ goto done;
+ }
+ memaddr = (u64)cq->comps;
+ memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((vma->vm_end - vma->vm_start) != memlen) {
+ hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
+ uctxt->ctxt, fd->subctxt,
+ (vma->vm_end - vma->vm_start), memlen);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ vm_flags_reset(vma, flags);
+ mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, memvirt, memdma,
+ memlen, vma);
+ if (vmf) {
+ vma->vm_pgoff = PFN_DOWN(memaddr);
+ vma->vm_ops = &vm_ops;
+ ret = 0;
+ } else if (memdma) {
+ ret = dma_mmap_coherent(&dd->pcidev->dev, vma,
+ memvirt, memdma, memlen);
+ } else if (mapio) {
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(memaddr),
+ memlen,
+ vma->vm_page_prot);
+ } else if (memvirt) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(__pa(memvirt)),
+ memlen,
+ vma->vm_page_prot);
+ } else {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(memaddr),
+ memlen,
+ vma->vm_page_prot);
+ }
+done:
+ return ret;
+}
+
+/*
+ * Local (non-chip) user memory is not mapped right away but as it is
+ * accessed by the user-level code.
+ */
+static vm_fault_t vma_fault(struct vm_fault *vmf)
+{
+ struct page *page;
+
+ page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+}
+
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt)
+{
+ struct hfi1_ctxtdata *uctxt;
+ __poll_t pollflag;
+
+ uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
+ if (!uctxt)
+ pollflag = EPOLLERR;
+ else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
+ pollflag = poll_urgent(fp, pt);
+ else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
+ pollflag = poll_next(fp, pt);
+ else /* invalid */
+ pollflag = EPOLLERR;
+
+ return pollflag;
+}
+
+static int hfi1_file_close(struct inode *inode, struct file *fp)
+{
+ struct hfi1_filedata *fdata = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fdata->uctxt;
+ struct hfi1_devdata *dd = container_of(inode->i_cdev,
+ struct hfi1_devdata,
+ user_cdev);
+ unsigned long flags, *ev;
+
+ fp->private_data = NULL;
+
+ if (!uctxt)
+ goto done;
+
+ hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
+
+ flush_wc();
+ /* drain user sdma queue */
+ hfi1_user_sdma_free_queues(fdata, uctxt);
+
+ /* release the cpu */
+ hfi1_put_proc_affinity(fdata->rec_cpu_num);
+
+ /* clean up rcv side */
+ hfi1_user_exp_rcv_free(fdata);
+
+ /*
+ * fdata->uctxt is used in the above cleanup. It is not ready to be
+ * removed until here.
+ */
+ fdata->uctxt = NULL;
+ hfi1_rcd_put(uctxt);
+
+ /*
+ * Clear any left over, unhandled events so the next process that
+ * gets this context doesn't get confused.
+ */
+ ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
+ *ev = 0;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
+ if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ /*
+ * Disable receive context and interrupt available, reset all
+ * RcvCtxtCtrl bits to default values.
+ */
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_TIDFLOW_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_TAILUPD_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
+ HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS |
+ HFI1_RCVCTRL_URGENT_DIS, uctxt);
+ /* Clear the context's J_KEY */
+ hfi1_clear_ctxt_jkey(dd, uctxt);
+ /*
+ * If a send context is allocated, reset context integrity
+ * checks to default and disable the send context.
+ */
+ if (uctxt->sc) {
+ sc_disable(uctxt->sc);
+ set_pio_integrity(uctxt->sc);
+ }
+
+ hfi1_free_ctxt_rcv_groups(uctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt);
+
+ uctxt->event_flags = 0;
+
+ deallocate_ctxt(uctxt);
+done:
+
+ if (refcount_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
+ cleanup_srcu_struct(&fdata->pq_srcu);
+ kfree(fdata);
+ return 0;
+}
+
+/*
+ * Convert kernel *virtual* addresses to physical addresses.
+ * This is used to vmalloc'ed addresses.
+ */
+static u64 kvirt_to_phys(void *addr)
+{
+ struct page *page;
+ u64 paddr = 0;
+
+ page = vmalloc_to_page(addr);
+ if (page)
+ paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+ return paddr;
+}
+
+/**
+ * complete_subctxt - complete sub-context info
+ * @fd: valid filedata pointer
+ *
+ * Sub-context info can only be set up after the base context
+ * has been completed. This is indicated by the clearing of the
+ * HFI1_CTXT_BASE_UINIT bit.
+ *
+ * Wait for the bit to be cleared, and then complete the subcontext
+ * initialization.
+ *
+ */
+static int complete_subctxt(struct hfi1_filedata *fd)
+{
+ int ret;
+ unsigned long flags;
+
+ /*
+ * sub-context info can only be set up after the base context
+ * has been completed.
+ */
+ ret = wait_event_interruptible(
+ fd->uctxt->wait,
+ !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
+
+ if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
+ ret = -ENOMEM;
+
+ /* Finish the sub-context init */
+ if (!ret) {
+ fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
+ ret = init_user_ctxt(fd, fd->uctxt);
+ }
+
+ if (ret) {
+ spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
+ __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
+ spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
+ hfi1_rcd_put(fd->uctxt);
+ fd->uctxt = NULL;
+ }
+
+ return ret;
+}
+
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
+{
+ int ret;
+ unsigned int swmajor;
+ struct hfi1_ctxtdata *uctxt = NULL;
+ struct hfi1_user_info uinfo;
+
+ if (fd->uctxt)
+ return -EINVAL;
+
+ if (sizeof(uinfo) != len)
+ return -EINVAL;
+
+ if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
+ return -EFAULT;
+
+ swmajor = uinfo.userversion >> 16;
+ if (swmajor != HFI1_USER_SWMAJOR)
+ return -ENODEV;
+
+ if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
+ return -EINVAL;
+
+ /*
+ * Acquire the mutex to protect against multiple creations of what
+ * could be a shared base context.
+ */
+ mutex_lock(&hfi1_mutex);
+ /*
+ * Get a sub context if available (fd->uctxt will be set).
+ * ret < 0 error, 0 no context, 1 sub-context found
+ */
+ ret = find_sub_ctxt(fd, &uinfo);
+
+ /*
+ * Allocate a base context if context sharing is not required or a
+ * sub context wasn't found.
+ */
+ if (!ret)
+ ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
+
+ mutex_unlock(&hfi1_mutex);
+
+ /* Depending on the context type, finish the appropriate init */
+ switch (ret) {
+ case 0:
+ ret = setup_base_ctxt(fd, uctxt);
+ if (ret)
+ deallocate_ctxt(uctxt);
+ break;
+ case 1:
+ ret = complete_subctxt(fd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * match_ctxt - match context
+ * @fd: valid filedata pointer
+ * @uinfo: user info to compare base context with
+ * @uctxt: context to compare uinfo to.
+ *
+ * Compare the given context with the given information to see if it
+ * can be used for a sub context.
+ */
+static int match_ctxt(struct hfi1_filedata *fd,
+ const struct hfi1_user_info *uinfo,
+ struct hfi1_ctxtdata *uctxt)
+{
+ struct hfi1_devdata *dd = fd->dd;
+ unsigned long flags;
+ u16 subctxt;
+
+ /* Skip dynamically allocated kernel contexts */
+ if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
+ return 0;
+
+ /* Skip ctxt if it doesn't match the requested one */
+ if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
+ uctxt->jkey != generate_jkey(current_uid()) ||
+ uctxt->subctxt_id != uinfo->subctxt_id ||
+ uctxt->subctxt_cnt != uinfo->subctxt_cnt)
+ return 0;
+
+ /* Verify the sharing process matches the base */
+ if (uctxt->userversion != uinfo->userversion)
+ return -EINVAL;
+
+ /* Find an unused sub context */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
+ /* context is being closed, do not use */
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ return 0;
+ }
+
+ subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
+ HFI1_MAX_SHARED_CTXTS);
+ if (subctxt >= uctxt->subctxt_cnt) {
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ return -EBUSY;
+ }
+
+ fd->subctxt = subctxt;
+ __set_bit(fd->subctxt, uctxt->in_use_ctxts);
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ fd->uctxt = uctxt;
+ hfi1_rcd_get(uctxt);
+
+ return 1;
+}
+
+/**
+ * find_sub_ctxt - fund sub-context
+ * @fd: valid filedata pointer
+ * @uinfo: matching info to use to find a possible context to share.
+ *
+ * The hfi1_mutex must be held when this function is called. It is
+ * necessary to ensure serialized creation of shared contexts.
+ *
+ * Return:
+ * 0 No sub-context found
+ * 1 Subcontext found and allocated
+ * errno EINVAL (incorrect parameters)
+ * EBUSY (all sub contexts in use)
+ */
+static int find_sub_ctxt(struct hfi1_filedata *fd,
+ const struct hfi1_user_info *uinfo)
+{
+ struct hfi1_ctxtdata *uctxt;
+ struct hfi1_devdata *dd = fd->dd;
+ u16 i;
+ int ret;
+
+ if (!uinfo->subctxt_cnt)
+ return 0;
+
+ for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
+ uctxt = hfi1_rcd_get_by_index(dd, i);
+ if (uctxt) {
+ ret = match_ctxt(fd, uinfo, uctxt);
+ hfi1_rcd_put(uctxt);
+ /* value of != 0 will return */
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
+ struct hfi1_user_info *uinfo,
+ struct hfi1_ctxtdata **rcd)
+{
+ struct hfi1_ctxtdata *uctxt;
+ int ret, numa;
+
+ if (dd->flags & HFI1_FROZEN) {
+ /*
+ * Pick an error that is unique from all other errors
+ * that are returned so the user process knows that
+ * it tried to allocate while the SPC was frozen. It
+ * it should be able to retry with success in a short
+ * while.
+ */
+ return -EIO;
+ }
+
+ if (!dd->freectxts)
+ return -EBUSY;
+
+ /*
+ * If we don't have a NUMA node requested, preference is towards
+ * device NUMA node.
+ */
+ fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
+ if (fd->rec_cpu_num != -1)
+ numa = cpu_to_node(fd->rec_cpu_num);
+ else
+ numa = numa_node_id();
+ ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
+ if (ret < 0) {
+ dd_dev_err(dd, "user ctxtdata allocation failed\n");
+ return ret;
+ }
+ hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
+ uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
+ uctxt->numa_id);
+
+ /*
+ * Allocate and enable a PIO send context.
+ */
+ uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
+ if (!uctxt->sc) {
+ ret = -ENOMEM;
+ goto ctxdata_free;
+ }
+ hfi1_cdbg(PROC, "allocated send context %u(%u)", uctxt->sc->sw_index,
+ uctxt->sc->hw_context);
+ ret = sc_enable(uctxt->sc);
+ if (ret)
+ goto ctxdata_free;
+
+ /*
+ * Setup sub context information if the user-level has requested
+ * sub contexts.
+ * This has to be done here so the rest of the sub-contexts find the
+ * proper base context.
+ * NOTE: _set_bit() can be used here because the context creation is
+ * protected by the mutex (rather than the spin_lock), and will be the
+ * very first instance of this context.
+ */
+ __set_bit(0, uctxt->in_use_ctxts);
+ if (uinfo->subctxt_cnt)
+ init_subctxts(uctxt, uinfo);
+ uctxt->userversion = uinfo->userversion;
+ uctxt->flags = hfi1_cap_mask; /* save current flag state */
+ init_waitqueue_head(&uctxt->wait);
+ strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
+ memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
+ uctxt->jkey = generate_jkey(current_uid());
+ hfi1_stats.sps_ctxts++;
+ /*
+ * Disable ASPM when there are open user/PSM contexts to avoid
+ * issues with ASPM L1 exit latency
+ */
+ if (dd->freectxts-- == dd->num_user_contexts)
+ aspm_disable_all(dd);
+
+ *rcd = uctxt;
+
+ return 0;
+
+ctxdata_free:
+ hfi1_free_ctxt(uctxt);
+ return ret;
+}
+
+static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
+{
+ mutex_lock(&hfi1_mutex);
+ hfi1_stats.sps_ctxts--;
+ if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
+ aspm_enable_all(uctxt->dd);
+ mutex_unlock(&hfi1_mutex);
+
+ hfi1_free_ctxt(uctxt);
+}
+
+static void init_subctxts(struct hfi1_ctxtdata *uctxt,
+ const struct hfi1_user_info *uinfo)
+{
+ uctxt->subctxt_cnt = uinfo->subctxt_cnt;
+ uctxt->subctxt_id = uinfo->subctxt_id;
+ set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
+}
+
+static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
+{
+ int ret = 0;
+ u16 num_subctxts = uctxt->subctxt_cnt;
+
+ uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
+ if (!uctxt->subctxt_uregbase)
+ return -ENOMEM;
+
+ /* We can take the size of the RcvHdr Queue from the master */
+ uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) *
+ num_subctxts);
+ if (!uctxt->subctxt_rcvhdr_base) {
+ ret = -ENOMEM;
+ goto bail_ureg;
+ }
+
+ uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
+ num_subctxts);
+ if (!uctxt->subctxt_rcvegrbuf) {
+ ret = -ENOMEM;
+ goto bail_rhdr;
+ }
+
+ return 0;
+
+bail_rhdr:
+ vfree(uctxt->subctxt_rcvhdr_base);
+ uctxt->subctxt_rcvhdr_base = NULL;
+bail_ureg:
+ vfree(uctxt->subctxt_uregbase);
+ uctxt->subctxt_uregbase = NULL;
+
+ return ret;
+}
+
+static void user_init(struct hfi1_ctxtdata *uctxt)
+{
+ unsigned int rcvctrl_ops = 0;
+
+ /* initialize poll variables... */
+ uctxt->urgent = 0;
+ uctxt->urgent_poll = 0;
+
+ /*
+ * Now enable the ctxt for receive.
+ * For chips that are set to DMA the tail register to memory
+ * when they change (and when the update bit transitions from
+ * 0 to 1. So for those chips, we turn it off and then back on.
+ * This will (very briefly) affect any other open ctxts, but the
+ * duration is very short, and therefore isn't an issue. We
+ * explicitly set the in-memory tail copy to 0 beforehand, so we
+ * don't have to wait to be sure the DMA update has happened
+ * (chip resets head/tail to 0 on transition to enable).
+ */
+ if (hfi1_rcvhdrtail_kvaddr(uctxt))
+ clear_rcvhdrtail(uctxt);
+
+ /* Setup J_KEY before enabling the context */
+ hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
+
+ rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
+ rcvctrl_ops |= HFI1_RCVCTRL_URGENT_ENB;
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
+ rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
+ /*
+ * Ignore the bit in the flags for now until proper
+ * support for multiple packet per rcv array entry is
+ * added.
+ */
+ if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ /*
+ * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
+ * We can't rely on the correct value to be set from prior
+ * uses of the chip or ctxt. Therefore, add the rcvctrl op
+ * for both cases.
+ */
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
+ else
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
+}
+
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
+{
+ struct hfi1_ctxt_info cinfo;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
+ if (sizeof(cinfo) != len)
+ return -EINVAL;
+
+ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
+ HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
+ HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
+ HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
+ /* adjust flag if this fd is not able to cache */
+ if (!fd->use_mn)
+ cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
+
+ cinfo.num_active = hfi1_count_active_units();
+ cinfo.unit = uctxt->dd->unit;
+ cinfo.ctxt = uctxt->ctxt;
+ cinfo.subctxt = fd->subctxt;
+ cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
+ uctxt->dd->rcv_entries.group_size) +
+ uctxt->expected_count;
+ cinfo.credits = uctxt->sc->credits;
+ cinfo.numa_node = uctxt->numa_id;
+ cinfo.rec_cpu = fd->rec_cpu_num;
+ cinfo.send_ctxt = uctxt->sc->hw_context;
+
+ cinfo.egrtids = uctxt->egrbufs.alloced;
+ cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt);
+ cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2;
+ cinfo.sdma_ring_size = fd->cq->nentries;
+ cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
+
+ trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo);
+ if (copy_to_user((void __user *)arg, &cinfo, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int init_user_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ int ret;
+
+ ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
+ if (ret)
+ return ret;
+
+ ret = hfi1_user_exp_rcv_init(fd, uctxt);
+ if (ret)
+ hfi1_user_sdma_free_queues(fd, uctxt);
+
+ return ret;
+}
+
+static int setup_base_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ struct hfi1_devdata *dd = uctxt->dd;
+ int ret = 0;
+
+ hfi1_init_ctxt(uctxt->sc);
+
+ /* Now allocate the RcvHdr queue and eager buffers. */
+ ret = hfi1_create_rcvhdrq(dd, uctxt);
+ if (ret)
+ goto done;
+
+ ret = hfi1_setup_eagerbufs(uctxt);
+ if (ret)
+ goto done;
+
+ /* If sub-contexts are enabled, do the appropriate setup */
+ if (uctxt->subctxt_cnt)
+ ret = setup_subctxt(uctxt);
+ if (ret)
+ goto done;
+
+ ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
+ if (ret)
+ goto done;
+
+ ret = init_user_ctxt(fd, uctxt);
+ if (ret) {
+ hfi1_free_ctxt_rcv_groups(uctxt);
+ goto done;
+ }
+
+ user_init(uctxt);
+
+ /* Now that the context is set up, the fd can get a reference. */
+ fd->uctxt = uctxt;
+ hfi1_rcd_get(uctxt);
+
+done:
+ if (uctxt->subctxt_cnt) {
+ /*
+ * On error, set the failed bit so sub-contexts will clean up
+ * correctly.
+ */
+ if (ret)
+ set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
+
+ /*
+ * Base context is done (successfully or not), notify anybody
+ * using a sub-context that is waiting for this completion.
+ */
+ clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
+ wake_up(&uctxt->wait);
+ }
+
+ return ret;
+}
+
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
+{
+ struct hfi1_base_info binfo;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned offset;
+
+ trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
+
+ if (sizeof(binfo) != len)
+ return -EINVAL;
+
+ memset(&binfo, 0, sizeof(binfo));
+ binfo.hw_version = dd->revision;
+ binfo.sw_version = HFI1_USER_SWVERSION;
+ binfo.bthqp = RVT_KDETH_QP_PREFIX;
+ binfo.jkey = uctxt->jkey;
+ /*
+ * If more than 64 contexts are enabled the allocated credit
+ * return will span two or three contiguous pages. Since we only
+ * map the page containing the context's credit return address,
+ * we need to calculate the offset in the proper page.
+ */
+ offset = ((u64)uctxt->sc->hw_free -
+ (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
+ binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
+ fd->subctxt, offset);
+ binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
+ fd->subctxt,
+ uctxt->sc->base_addr);
+ binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
+ uctxt->ctxt,
+ fd->subctxt,
+ uctxt->sc->base_addr);
+ binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
+ fd->subctxt,
+ uctxt->rcvhdrq);
+ binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
+ fd->subctxt,
+ uctxt->egrbufs.rcvtids[0].dma);
+ binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
+ fd->subctxt, 0);
+ /*
+ * user regs are at
+ * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
+ */
+ binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
+ fd->subctxt, 0);
+ offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
+ sizeof(*dd->events));
+ binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
+ fd->subctxt,
+ offset);
+ binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
+ fd->subctxt,
+ dd->status);
+ if (HFI1_CAP_IS_USET(DMA_RTAIL))
+ binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
+ fd->subctxt, 0);
+ if (uctxt->subctxt_cnt) {
+ binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
+ uctxt->ctxt,
+ fd->subctxt, 0);
+ binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
+ uctxt->ctxt,
+ fd->subctxt, 0);
+ binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
+ uctxt->ctxt,
+ fd->subctxt, 0);
+ }
+
+ if (copy_to_user((void __user *)arg, &binfo, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * user_exp_rcv_setup - Set up the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_setup.
+ *
+ */
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
+ if (!ret) {
+ /*
+ * Copy the number of tidlist entries we used
+ * and the length of the buffer we registered.
+ */
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ ret = -EFAULT;
+
+ addr = arg + offsetof(struct hfi1_tid_info, length);
+ if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
+ sizeof(tinfo.length)))
+ ret = -EFAULT;
+
+ if (ret)
+ hfi1_user_exp_rcv_invalid(fd, &tinfo);
+ }
+
+ return ret;
+}
+
+/**
+ * user_exp_rcv_clear - Clear the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * The hfi1_user_exp_rcv_clear() can be called from the error path. Because
+ * of this, we need to use this wrapper to copy the user space information
+ * before doing the clear.
+ */
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
+ if (!ret) {
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+/**
+ * user_exp_rcv_invalid - Invalidate the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_invalid.
+ *
+ */
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (!fd->invalid_tids)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
+ if (ret)
+ return ret;
+
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static __poll_t poll_urgent(struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ __poll_t pollflag;
+
+ poll_wait(fp, &uctxt->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (uctxt->urgent != uctxt->urgent_poll) {
+ pollflag = EPOLLIN | EPOLLRDNORM;
+ uctxt->urgent_poll = uctxt->urgent;
+ } else {
+ pollflag = 0;
+ set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
+ }
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static __poll_t poll_next(struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ __poll_t pollflag;
+
+ poll_wait(fp, &uctxt->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (hdrqempty(uctxt)) {
+ set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
+ pollflag = 0;
+ } else {
+ pollflag = EPOLLIN | EPOLLRDNORM;
+ }
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+/*
+ * Find all user contexts in use, and set the specified bit in their
+ * event mask.
+ * See also find_ctxt() for a similar use, that is specific to send buffers.
+ */
+int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
+{
+ struct hfi1_ctxtdata *uctxt;
+ struct hfi1_devdata *dd = ppd->dd;
+ u16 ctxt;
+
+ if (!dd->events)
+ return -EINVAL;
+
+ for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
+ ctxt++) {
+ uctxt = hfi1_rcd_get_by_index(dd, ctxt);
+ if (uctxt) {
+ unsigned long *evs;
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ evs = dd->events + uctxt_offset(uctxt);
+ set_bit(evtbit, evs);
+ for (i = 1; i < uctxt->subctxt_cnt; i++)
+ set_bit(evtbit, evs + i);
+ hfi1_rcd_put(uctxt);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * manage_rcvq - manage a context's receive queue
+ * @uctxt: the context
+ * @subctxt: the sub-context
+ * @arg: start/stop action to carry out
+ *
+ * start_stop == 0 disables receive on the context, for use in queue
+ * overflow conditions. start_stop==1 re-enables, to be used to
+ * re-init the software copy of the head register
+ */
+static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
+ unsigned long arg)
+{
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned int rcvctrl_op;
+ int start_stop;
+
+ if (subctxt)
+ return 0;
+
+ if (get_user(start_stop, (int __user *)arg))
+ return -EFAULT;
+
+ /* atomically clear receive enable ctxt. */
+ if (start_stop) {
+ /*
+ * On enable, force in-memory copy of the tail register to
+ * 0, so that protocol code doesn't have to worry about
+ * whether or not the chip has yet updated the in-memory
+ * copy or not on return from the system call. The chip
+ * always resets it's tail register back to 0 on a
+ * transition from disabled to enabled.
+ */
+ if (hfi1_rcvhdrtail_kvaddr(uctxt))
+ clear_rcvhdrtail(uctxt);
+ rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
+ } else {
+ rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
+ }
+ hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
+ /* always; new head should be equal to new tail; see above */
+
+ return 0;
+}
+
+/*
+ * clear the event notifier events for this context.
+ * User process then performs actions appropriate to bit having been
+ * set, if desired, and checks again in future.
+ */
+static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
+ unsigned long arg)
+{
+ int i;
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned long *evs;
+ unsigned long events;
+
+ if (!dd->events)
+ return 0;
+
+ if (get_user(events, (unsigned long __user *)arg))
+ return -EFAULT;
+
+ evs = dd->events + uctxt_offset(uctxt) + subctxt;
+
+ for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
+ if (!test_bit(i, &events))
+ continue;
+ clear_bit(i, evs);
+ }
+ return 0;
+}
+
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
+{
+ int i;
+ struct hfi1_pportdata *ppd = uctxt->ppd;
+ struct hfi1_devdata *dd = uctxt->dd;
+ u16 pkey;
+
+ if (!HFI1_CAP_IS_USET(PKEY_CHECK))
+ return -EPERM;
+
+ if (get_user(pkey, (u16 __user *)arg))
+ return -EFAULT;
+
+ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
+ if (pkey == ppd->pkeys[i])
+ return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
+
+ return -ENOENT;
+}
+
+/**
+ * ctxt_reset - Reset the user context
+ * @uctxt: valid user context
+ */
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
+{
+ struct send_context *sc;
+ struct hfi1_devdata *dd;
+ int ret = 0;
+
+ if (!uctxt || !uctxt->dd || !uctxt->sc)
+ return -EINVAL;
+
+ /*
+ * There is no protection here. User level has to guarantee that
+ * no one will be writing to the send context while it is being
+ * re-initialized. If user level breaks that guarantee, it will
+ * break it's own context and no one else's.
+ */
+ dd = uctxt->dd;
+ sc = uctxt->sc;
+
+ /*
+ * Wait until the interrupt handler has marked the context as
+ * halted or frozen. Report error if we time out.
+ */
+ wait_event_interruptible_timeout(
+ sc->halt_wait, (sc->flags & SCF_HALTED),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (!(sc->flags & SCF_HALTED))
+ return -ENOLCK;
+
+ /*
+ * If the send context was halted due to a Freeze, wait until the
+ * device has been "unfrozen" before resetting the context.
+ */
+ if (sc->flags & SCF_FROZEN) {
+ wait_event_interruptible_timeout(
+ dd->event_queue,
+ !(READ_ONCE(dd->flags) & HFI1_FROZEN),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (dd->flags & HFI1_FROZEN)
+ return -ENOLCK;
+
+ if (dd->flags & HFI1_FORCED_FREEZE)
+ /*
+ * Don't allow context reset if we are into
+ * forced freeze
+ */
+ return -ENODEV;
+
+ sc_disable(sc);
+ ret = sc_enable(sc);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
+ } else {
+ ret = sc_restart(sc);
+ }
+ if (!ret)
+ sc_return_credits(sc);
+
+ return ret;
+}
+
+static void user_remove(struct hfi1_devdata *dd)
+{
+
+ hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
+}
+
+static int user_add(struct hfi1_devdata *dd)
+{
+ char name[10];
+ int ret;
+
+ snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
+ ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
+ &dd->user_cdev, &dd->user_device,
+ true, &dd->verbs_dev.rdi.ibdev.dev.kobj);
+ if (ret)
+ user_remove(dd);
+
+ return ret;
+}
+
+/*
+ * Create per-unit files in /dev
+ */
+int hfi1_device_create(struct hfi1_devdata *dd)
+{
+ return user_add(dd);
+}
+
+/*
+ * Remove per-unit files in /dev
+ * void, core kernel returns no errors for this stuff
+ */
+void hfi1_device_remove(struct hfi1_devdata *dd)
+{
+ user_remove(dd);
+}
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
new file mode 100644
index 000000000000..3c228aeaaf81
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -0,0 +1,2253 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2017 Intel Corporation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+
+#include "hfi.h"
+#include "trace.h"
+
+/*
+ * Make it easy to toggle firmware file name and if it gets loaded by
+ * editing the following. This may be something we do while in development
+ * but not necessarily something a user would ever need to use.
+ */
+#define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin"
+#define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw"
+#define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw"
+#define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
+#define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
+#define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw"
+#define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
+#define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
+#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
+
+MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
+MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
+
+static uint fw_8051_load = 1;
+static uint fw_fabric_serdes_load = 1;
+static uint fw_pcie_serdes_load = 1;
+static uint fw_sbus_load = 1;
+
+/* Firmware file names get set in hfi1_firmware_init() based on the above */
+static char *fw_8051_name;
+static char *fw_fabric_serdes_name;
+static char *fw_sbus_name;
+static char *fw_pcie_serdes_name;
+
+#define SBUS_MAX_POLL_COUNT 100
+#define SBUS_COUNTER(reg, name) \
+ (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \
+ ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK)
+
+/*
+ * Firmware security header.
+ */
+struct css_header {
+ u32 module_type;
+ u32 header_len;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
+ u32 date; /* BCD yyyymmdd */
+ u32 size; /* in DWORDs */
+ u32 key_size; /* in DWORDs */
+ u32 modulus_size; /* in DWORDs */
+ u32 exponent_size; /* in DWORDs */
+ u32 reserved[22];
+};
+
+/* expected field values */
+#define CSS_MODULE_TYPE 0x00000006
+#define CSS_HEADER_LEN 0x000000a1
+#define CSS_HEADER_VERSION 0x00010000
+#define CSS_MODULE_VENDOR 0x00008086
+
+#define KEY_SIZE 256
+#define MU_SIZE 8
+#define EXPONENT_SIZE 4
+
+/* size of platform configuration partition */
+#define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
+
+/* size of file of plaform configuration encoded in format version 4 */
+#define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
+
+/* the file itself */
+struct firmware_file {
+ struct css_header css_header;
+ u8 modulus[KEY_SIZE];
+ u8 exponent[EXPONENT_SIZE];
+ u8 signature[KEY_SIZE];
+ u8 firmware[];
+};
+
+struct augmented_firmware_file {
+ struct css_header css_header;
+ u8 modulus[KEY_SIZE];
+ u8 exponent[EXPONENT_SIZE];
+ u8 signature[KEY_SIZE];
+ u8 r2[KEY_SIZE];
+ u8 mu[MU_SIZE];
+ u8 firmware[];
+};
+
+/* augmented file size difference */
+#define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \
+ sizeof(struct firmware_file))
+
+struct firmware_details {
+ /* Linux core piece */
+ const struct firmware *fw;
+
+ struct css_header *css_header;
+ u8 *firmware_ptr; /* pointer to binary data */
+ u32 firmware_len; /* length in bytes */
+ u8 *modulus; /* pointer to the modulus */
+ u8 *exponent; /* pointer to the exponent */
+ u8 *signature; /* pointer to the signature */
+ u8 *r2; /* pointer to r2 */
+ u8 *mu; /* pointer to mu */
+ struct augmented_firmware_file dummy_header;
+};
+
+/*
+ * The mutex protects fw_state, fw_err, and all of the firmware_details
+ * variables.
+ */
+static DEFINE_MUTEX(fw_mutex);
+enum fw_state {
+ FW_EMPTY,
+ FW_TRY,
+ FW_FINAL,
+ FW_ERR
+};
+
+static enum fw_state fw_state = FW_EMPTY;
+static int fw_err;
+static struct firmware_details fw_8051;
+static struct firmware_details fw_fabric;
+static struct firmware_details fw_pcie;
+static struct firmware_details fw_sbus;
+
+/* flags for turn_off_spicos() */
+#define SPICO_SBUS 0x1
+#define SPICO_FABRIC 0x2
+#define ENABLE_SPICO_SMASK 0x1
+
+/* security block commands */
+#define RSA_CMD_INIT 0x1
+#define RSA_CMD_START 0x2
+
+/* security block status */
+#define RSA_STATUS_IDLE 0x0
+#define RSA_STATUS_ACTIVE 0x1
+#define RSA_STATUS_DONE 0x2
+#define RSA_STATUS_FAILED 0x3
+
+/* RSA engine timeout, in ms */
+#define RSA_ENGINE_TIMEOUT 100 /* ms */
+
+/* hardware mutex timeout, in ms */
+#define HM_TIMEOUT 10 /* ms */
+
+/* 8051 memory access timeout, in us */
+#define DC8051_ACCESS_TIMEOUT 100 /* us */
+
+/* the number of fabric SerDes on the SBus */
+#define NUM_FABRIC_SERDES 4
+
+/* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
+#define SBUS_READ_COMPLETE 0x4
+
+/* SBus fabric SerDes addresses, one set per HFI */
+static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
+ { 0x01, 0x02, 0x03, 0x04 },
+ { 0x28, 0x29, 0x2a, 0x2b }
+};
+
+/* SBus PCIe SerDes addresses, one set per HFI */
+static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = {
+ { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16,
+ 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 },
+ { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d,
+ 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d }
+};
+
+/* SBus PCIe PCS addresses, one set per HFI */
+const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = {
+ { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17,
+ 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 },
+ { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
+ 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e }
+};
+
+/* SBus fabric SerDes broadcast addresses, one per HFI */
+static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 };
+static const u8 all_fabric_serdes_broadcast = 0xe1;
+
+/* SBus PCIe SerDes broadcast addresses, one per HFI */
+const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
+static const u8 all_pcie_serdes_broadcast = 0xe0;
+
+static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
+ 0,
+ SYSTEM_TABLE_MAX,
+ PORT_TABLE_MAX,
+ RX_PRESET_TABLE_MAX,
+ TX_PRESET_TABLE_MAX,
+ QSFP_ATTEN_TABLE_MAX,
+ VARIABLE_SETTINGS_TABLE_MAX
+};
+
+/* forwards */
+static void dispose_one_firmware(struct firmware_details *fdet);
+static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet);
+static void dump_fw_version(struct hfi1_devdata *dd);
+
+/*
+ * Read a single 64-bit value from 8051 data memory.
+ *
+ * Expects:
+ * o caller to have already set up data read, no auto increment
+ * o caller to turn off read enable when finished
+ *
+ * The address argument is a byte offset. Bits 0:2 in the address are
+ * ignored - i.e. the hardware will always do aligned 8-byte reads as if
+ * the lower bits are zero.
+ *
+ * Return 0 on success, -ENXIO on a read error (timeout).
+ */
+static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
+{
+ u64 reg;
+ int count;
+
+ /* step 1: set the address, clear enable */
+ reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
+ << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
+ /* step 2: enable */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
+ reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
+
+ /* wait until ACCESS_COMPLETED is set */
+ count = 0;
+ while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
+ & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
+ == 0) {
+ count++;
+ if (count > DC8051_ACCESS_TIMEOUT) {
+ dd_dev_err(dd, "timeout reading 8051 data\n");
+ return -ENXIO;
+ }
+ ndelay(10);
+ }
+
+ /* gather the data */
+ *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
+
+ return 0;
+}
+
+/*
+ * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks.
+ * Return 0 on success, -errno on error.
+ */
+int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
+{
+ unsigned long flags;
+ u32 done;
+ int ret = 0;
+
+ spin_lock_irqsave(&dd->dc8051_memlock, flags);
+
+ /* data read set-up, no auto-increment */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
+
+ for (done = 0; done < len; addr += 8, done += 8, result++) {
+ ret = __read_8051_data(dd, addr, result);
+ if (ret)
+ break;
+ }
+
+ /* turn off read enable */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
+
+ spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
+
+ return ret;
+}
+
+/*
+ * Write data or code to the 8051 code or data RAM.
+ */
+static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
+ const u8 *data, u32 len)
+{
+ u64 reg;
+ u32 offset;
+ int aligned, count;
+
+ /* check alignment */
+ aligned = ((unsigned long)data & 0x7) == 0;
+
+ /* write set-up */
+ reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull)
+ | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
+
+ reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
+ << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
+ | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
+
+ /* write */
+ for (offset = 0; offset < len; offset += 8) {
+ int bytes = len - offset;
+
+ if (bytes < 8) {
+ reg = 0;
+ memcpy(&reg, &data[offset], bytes);
+ } else if (aligned) {
+ reg = *(u64 *)&data[offset];
+ } else {
+ memcpy(&reg, &data[offset], 8);
+ }
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
+
+ /* wait until ACCESS_COMPLETED is set */
+ count = 0;
+ while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
+ & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
+ == 0) {
+ count++;
+ if (count > DC8051_ACCESS_TIMEOUT) {
+ dd_dev_err(dd, "timeout writing 8051 data\n");
+ return -ENXIO;
+ }
+ udelay(1);
+ }
+ }
+
+ /* turn off write access, auto increment (also sets to data access) */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
+
+ return 0;
+}
+
+/* return 0 if values match, non-zero and complain otherwise */
+static int invalid_header(struct hfi1_devdata *dd, const char *what,
+ u32 actual, u32 expected)
+{
+ if (actual == expected)
+ return 0;
+
+ dd_dev_err(dd,
+ "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
+ what, expected, actual);
+ return 1;
+}
+
+/*
+ * Verify that the static fields in the CSS header match.
+ */
+static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
+{
+ /* verify CSS header fields (most sizes are in DW, so add /4) */
+ if (invalid_header(dd, "module_type", css->module_type,
+ CSS_MODULE_TYPE) ||
+ invalid_header(dd, "header_len", css->header_len,
+ (sizeof(struct firmware_file) / 4)) ||
+ invalid_header(dd, "header_version", css->header_version,
+ CSS_HEADER_VERSION) ||
+ invalid_header(dd, "module_vendor", css->module_vendor,
+ CSS_MODULE_VENDOR) ||
+ invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
+ invalid_header(dd, "modulus_size", css->modulus_size,
+ KEY_SIZE / 4) ||
+ invalid_header(dd, "exponent_size", css->exponent_size,
+ EXPONENT_SIZE / 4)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Make sure there are at least some bytes after the prefix.
+ */
+static int payload_check(struct hfi1_devdata *dd, const char *name,
+ long file_size, long prefix_size)
+{
+ /* make sure we have some payload */
+ if (prefix_size >= file_size) {
+ dd_dev_err(dd,
+ "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
+ name, file_size, prefix_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Request the firmware from the system. Extract the pieces and fill in
+ * fdet. If successful, the caller will need to call dispose_one_firmware().
+ * Returns 0 on success, -ERRNO on error.
+ */
+static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
+ struct firmware_details *fdet)
+{
+ struct css_header *css;
+ int ret;
+
+ memset(fdet, 0, sizeof(*fdet));
+
+ ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
+ if (ret) {
+ dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
+ name, ret);
+ return ret;
+ }
+
+ /* verify the firmware */
+ if (fdet->fw->size < sizeof(struct css_header)) {
+ dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
+ ret = -EINVAL;
+ goto done;
+ }
+ css = (struct css_header *)fdet->fw->data;
+
+ hfi1_cdbg(FIRMWARE, "Firmware %s details:", name);
+ hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size);
+ hfi1_cdbg(FIRMWARE, "CSS structure:");
+ hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type);
+ hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)",
+ css->header_len, 4 * css->header_len);
+ hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version);
+ hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id);
+ hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor);
+ hfi1_cdbg(FIRMWARE, " date 0x%x", css->date);
+ hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)",
+ css->size, 4 * css->size);
+ hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)",
+ css->key_size, 4 * css->key_size);
+ hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)",
+ css->modulus_size, 4 * css->modulus_size);
+ hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)",
+ css->exponent_size, 4 * css->exponent_size);
+ hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes",
+ fdet->fw->size - sizeof(struct firmware_file));
+
+ /*
+ * If the file does not have a valid CSS header, fail.
+ * Otherwise, check the CSS size field for an expected size.
+ * The augmented file has r2 and mu inserted after the header
+ * was generated, so there will be a known difference between
+ * the CSS header size and the actual file size. Use this
+ * difference to identify an augmented file.
+ *
+ * Note: css->size is in DWORDs, multiply by 4 to get bytes.
+ */
+ ret = verify_css_header(dd, css);
+ if (ret) {
+ dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
+ } else if ((css->size * 4) == fdet->fw->size) {
+ /* non-augmented firmware file */
+ struct firmware_file *ff = (struct firmware_file *)
+ fdet->fw->data;
+
+ /* make sure there are bytes in the payload */
+ ret = payload_check(dd, name, fdet->fw->size,
+ sizeof(struct firmware_file));
+ if (ret == 0) {
+ fdet->css_header = css;
+ fdet->modulus = ff->modulus;
+ fdet->exponent = ff->exponent;
+ fdet->signature = ff->signature;
+ fdet->r2 = fdet->dummy_header.r2; /* use dummy space */
+ fdet->mu = fdet->dummy_header.mu; /* use dummy space */
+ fdet->firmware_ptr = ff->firmware;
+ fdet->firmware_len = fdet->fw->size -
+ sizeof(struct firmware_file);
+ /*
+ * Header does not include r2 and mu - generate here.
+ * For now, fail.
+ */
+ dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
+ ret = -EINVAL;
+ }
+ } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) {
+ /* augmented firmware file */
+ struct augmented_firmware_file *aff =
+ (struct augmented_firmware_file *)fdet->fw->data;
+
+ /* make sure there are bytes in the payload */
+ ret = payload_check(dd, name, fdet->fw->size,
+ sizeof(struct augmented_firmware_file));
+ if (ret == 0) {
+ fdet->css_header = css;
+ fdet->modulus = aff->modulus;
+ fdet->exponent = aff->exponent;
+ fdet->signature = aff->signature;
+ fdet->r2 = aff->r2;
+ fdet->mu = aff->mu;
+ fdet->firmware_ptr = aff->firmware;
+ fdet->firmware_len = fdet->fw->size -
+ sizeof(struct augmented_firmware_file);
+ }
+ } else {
+ /* css->size check failed */
+ dd_dev_err(dd,
+ "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
+ fdet->fw->size / 4,
+ (fdet->fw->size - AUGMENT_SIZE) / 4,
+ css->size);
+
+ ret = -EINVAL;
+ }
+
+done:
+ /* if returning an error, clean up after ourselves */
+ if (ret)
+ dispose_one_firmware(fdet);
+ return ret;
+}
+
+static void dispose_one_firmware(struct firmware_details *fdet)
+{
+ release_firmware(fdet->fw);
+ /* erase all previous information */
+ memset(fdet, 0, sizeof(*fdet));
+}
+
+/*
+ * Obtain the 4 firmwares from the OS. All must be obtained at once or not
+ * at all. If called with the firmware state in FW_TRY, use alternate names.
+ * On exit, this routine will have set the firmware state to one of FW_TRY,
+ * FW_FINAL, or FW_ERR.
+ *
+ * Must be holding fw_mutex.
+ */
+static void __obtain_firmware(struct hfi1_devdata *dd)
+{
+ int err = 0;
+
+ if (fw_state == FW_FINAL) /* nothing more to obtain */
+ return;
+ if (fw_state == FW_ERR) /* already in error */
+ return;
+
+ /* fw_state is FW_EMPTY or FW_TRY */
+retry:
+ if (fw_state == FW_TRY) {
+ /*
+ * We tried the original and it failed. Move to the
+ * alternate.
+ */
+ dd_dev_warn(dd, "using alternate firmware names\n");
+ /*
+ * Let others run. Some systems, when missing firmware, does
+ * something that holds for 30 seconds. If we do that twice
+ * in a row it triggers task blocked warning.
+ */
+ cond_resched();
+ if (fw_8051_load)
+ dispose_one_firmware(&fw_8051);
+ if (fw_fabric_serdes_load)
+ dispose_one_firmware(&fw_fabric);
+ if (fw_sbus_load)
+ dispose_one_firmware(&fw_sbus);
+ if (fw_pcie_serdes_load)
+ dispose_one_firmware(&fw_pcie);
+ fw_8051_name = ALT_FW_8051_NAME_ASIC;
+ fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
+ fw_sbus_name = ALT_FW_SBUS_NAME;
+ fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
+
+ /*
+ * Add a delay before obtaining and loading debug firmware.
+ * Authorization will fail if the delay between firmware
+ * authorization events is shorter than 50us. Add 100us to
+ * make a delay time safe.
+ */
+ usleep_range(100, 120);
+ }
+
+ if (fw_sbus_load) {
+ err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
+ if (err)
+ goto done;
+ }
+
+ if (fw_pcie_serdes_load) {
+ err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
+ if (err)
+ goto done;
+ }
+
+ if (fw_fabric_serdes_load) {
+ err = obtain_one_firmware(dd, fw_fabric_serdes_name,
+ &fw_fabric);
+ if (err)
+ goto done;
+ }
+
+ if (fw_8051_load) {
+ err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
+ if (err)
+ goto done;
+ }
+
+done:
+ if (err) {
+ /* oops, had problems obtaining a firmware */
+ if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
+ /* retry with alternate (RTL only) */
+ fw_state = FW_TRY;
+ goto retry;
+ }
+ dd_dev_err(dd, "unable to obtain working firmware\n");
+ fw_state = FW_ERR;
+ fw_err = -ENOENT;
+ } else {
+ /* success */
+ if (fw_state == FW_EMPTY &&
+ dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
+ fw_state = FW_TRY; /* may retry later */
+ else
+ fw_state = FW_FINAL; /* cannot try again */
+ }
+}
+
+/*
+ * Called by all HFIs when loading their firmware - i.e. device probe time.
+ * The first one will do the actual firmware load. Use a mutex to resolve
+ * any possible race condition.
+ *
+ * The call to this routine cannot be moved to driver load because the kernel
+ * call request_firmware() requires a device which is only available after
+ * the first device probe.
+ */
+static int obtain_firmware(struct hfi1_devdata *dd)
+{
+ unsigned long timeout;
+
+ mutex_lock(&fw_mutex);
+
+ /* 40s delay due to long delay on missing firmware on some systems */
+ timeout = jiffies + msecs_to_jiffies(40000);
+ while (fw_state == FW_TRY) {
+ /*
+ * Another device is trying the firmware. Wait until it
+ * decides what works (or not).
+ */
+ if (time_after(jiffies, timeout)) {
+ /* waited too long */
+ dd_dev_err(dd, "Timeout waiting for firmware try");
+ fw_state = FW_ERR;
+ fw_err = -ETIMEDOUT;
+ break;
+ }
+ mutex_unlock(&fw_mutex);
+ msleep(20); /* arbitrary delay */
+ mutex_lock(&fw_mutex);
+ }
+ /* not in FW_TRY state */
+
+ /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
+ if (fw_state == FW_EMPTY)
+ __obtain_firmware(dd);
+
+ mutex_unlock(&fw_mutex);
+ return fw_err;
+}
+
+/*
+ * Called when the driver unloads. The timing is asymmetric with its
+ * counterpart, obtain_firmware(). If called at device remove time,
+ * then it is conceivable that another device could probe while the
+ * firmware is being disposed. The mutexes can be moved to do that
+ * safely, but then the firmware would be requested from the OS multiple
+ * times.
+ *
+ * No mutex is needed as the driver is unloading and there cannot be any
+ * other callers.
+ */
+void dispose_firmware(void)
+{
+ dispose_one_firmware(&fw_8051);
+ dispose_one_firmware(&fw_fabric);
+ dispose_one_firmware(&fw_pcie);
+ dispose_one_firmware(&fw_sbus);
+
+ /* retain the error state, otherwise revert to empty */
+ if (fw_state != FW_ERR)
+ fw_state = FW_EMPTY;
+}
+
+/*
+ * Called with the result of a firmware download.
+ *
+ * Return 1 to retry loading the firmware, 0 to stop.
+ */
+static int retry_firmware(struct hfi1_devdata *dd, int load_result)
+{
+ int retry;
+
+ mutex_lock(&fw_mutex);
+
+ if (load_result == 0) {
+ /*
+ * The load succeeded, so expect all others to do the same.
+ * Do not retry again.
+ */
+ if (fw_state == FW_TRY)
+ fw_state = FW_FINAL;
+ retry = 0; /* do NOT retry */
+ } else if (fw_state == FW_TRY) {
+ /* load failed, obtain alternate firmware */
+ __obtain_firmware(dd);
+ retry = (fw_state == FW_FINAL);
+ } else {
+ /* else in FW_FINAL or FW_ERR, no retry in either case */
+ retry = 0;
+ }
+
+ mutex_unlock(&fw_mutex);
+ return retry;
+}
+
+/*
+ * Write a block of data to a given array CSR. All calls will be in
+ * multiples of 8 bytes.
+ */
+static void write_rsa_data(struct hfi1_devdata *dd, int what,
+ const u8 *data, int nbytes)
+{
+ int qw_size = nbytes / 8;
+ int i;
+
+ if (((unsigned long)data & 0x7) == 0) {
+ /* aligned */
+ u64 *ptr = (u64 *)data;
+
+ for (i = 0; i < qw_size; i++, ptr++)
+ write_csr(dd, what + (8 * i), *ptr);
+ } else {
+ /* not aligned */
+ for (i = 0; i < qw_size; i++, data += 8) {
+ u64 value;
+
+ memcpy(&value, data, 8);
+ write_csr(dd, what + (8 * i), value);
+ }
+ }
+}
+
+/*
+ * Write a block of data to a given CSR as a stream of writes. All calls will
+ * be in multiples of 8 bytes.
+ */
+static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
+ const u8 *data, int nbytes)
+{
+ u64 *ptr = (u64 *)data;
+ int qw_size = nbytes / 8;
+
+ for (; qw_size > 0; qw_size--, ptr++)
+ write_csr(dd, what, *ptr);
+}
+
+/*
+ * Download the signature and start the RSA mechanism. Wait for
+ * RSA_ENGINE_TIMEOUT before giving up.
+ */
+static int run_rsa(struct hfi1_devdata *dd, const char *who,
+ const u8 *signature)
+{
+ unsigned long timeout;
+ u64 reg;
+ u32 status;
+ int ret = 0;
+
+ /* write the signature */
+ write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
+
+ /* initialize RSA */
+ write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
+
+ /*
+ * Make sure the engine is idle and insert a delay between the two
+ * writes to MISC_CFG_RSA_CMD.
+ */
+ status = (read_csr(dd, MISC_CFG_FW_CTRL)
+ & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
+ >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
+ if (status != RSA_STATUS_IDLE) {
+ dd_dev_err(dd, "%s security engine not idle - giving up\n",
+ who);
+ return -EBUSY;
+ }
+
+ /* start RSA */
+ write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
+
+ /*
+ * Look for the result.
+ *
+ * The RSA engine is hooked up to two MISC errors. The driver
+ * masks these errors as they do not respond to the standard
+ * error "clear down" mechanism. Look for these errors here and
+ * clear them when possible. This routine will exit with the
+ * errors of the current run still set.
+ *
+ * MISC_FW_AUTH_FAILED_ERR
+ * Firmware authorization failed. This can be cleared by
+ * re-initializing the RSA engine, then clearing the status bit.
+ * Do not re-init the RSA angine immediately after a successful
+ * run - this will reset the current authorization.
+ *
+ * MISC_KEY_MISMATCH_ERR
+ * Key does not match. The only way to clear this is to load
+ * a matching key then clear the status bit. If this error
+ * is raised, it will persist outside of this routine until a
+ * matching key is loaded.
+ */
+ timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies;
+ while (1) {
+ status = (read_csr(dd, MISC_CFG_FW_CTRL)
+ & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
+ >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
+
+ if (status == RSA_STATUS_IDLE) {
+ /* should not happen */
+ dd_dev_err(dd, "%s firmware security bad idle state\n",
+ who);
+ ret = -EINVAL;
+ break;
+ } else if (status == RSA_STATUS_DONE) {
+ /* finished successfully */
+ break;
+ } else if (status == RSA_STATUS_FAILED) {
+ /* finished unsuccessfully */
+ ret = -EINVAL;
+ break;
+ }
+ /* else still active */
+
+ if (time_after(jiffies, timeout)) {
+ /*
+ * Timed out while active. We can't reset the engine
+ * if it is stuck active, but run through the
+ * error code to see what error bits are set.
+ */
+ dd_dev_err(dd, "%s firmware security time out\n", who);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ msleep(20);
+ }
+
+ /*
+ * Arrive here on success or failure. Clear all RSA engine
+ * errors. All current errors will stick - the RSA logic is keeping
+ * error high. All previous errors will clear - the RSA logic
+ * is not keeping the error high.
+ */
+ write_csr(dd, MISC_ERR_CLEAR,
+ MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
+ MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
+ /*
+ * All that is left are the current errors. Print warnings on
+ * authorization failure details, if any. Firmware authorization
+ * can be retried, so these are only warnings.
+ */
+ reg = read_csr(dd, MISC_ERR_STATUS);
+ if (ret) {
+ if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
+ dd_dev_warn(dd, "%s firmware authorization failed\n",
+ who);
+ if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
+ dd_dev_warn(dd, "%s firmware key mismatch\n", who);
+ }
+
+ return ret;
+}
+
+static void load_security_variables(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ /* Security variables a. Write the modulus */
+ write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
+ /* Security variables b. Write the r2 */
+ write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
+ /* Security variables c. Write the mu */
+ write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
+ /* Security variables d. Write the header */
+ write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
+ (u8 *)fdet->css_header,
+ sizeof(struct css_header));
+}
+
+/* return the 8051 firmware state */
+static inline u32 get_firmware_state(struct hfi1_devdata *dd)
+{
+ u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
+
+ return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT)
+ & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK;
+}
+
+/*
+ * Wait until the firmware is up and ready to take host requests.
+ * Return 0 on success, -ETIMEDOUT on timeout.
+ */
+int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
+{
+ unsigned long timeout;
+
+ /* in the simulator, the fake 8051 is always ready */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ return 0;
+
+ timeout = msecs_to_jiffies(mstimeout) + jiffies;
+ while (1) {
+ if (get_firmware_state(dd) == 0xa0) /* ready */
+ return 0;
+ if (time_after(jiffies, timeout)) /* timed out */
+ return -ETIMEDOUT;
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+}
+
+/*
+ * Load the 8051 firmware.
+ */
+static int load_8051_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ u64 reg;
+ int ret;
+ u8 ver_major;
+ u8 ver_minor;
+ u8 ver_patch;
+
+ /*
+ * DC Reset sequence
+ * Load DC 8051 firmware
+ */
+ /*
+ * DC reset step 1: Reset DC8051
+ */
+ reg = DC_DC8051_CFG_RST_M8051W_SMASK
+ | DC_DC8051_CFG_RST_CRAM_SMASK
+ | DC_DC8051_CFG_RST_DRAM_SMASK
+ | DC_DC8051_CFG_RST_IRAM_SMASK
+ | DC_DC8051_CFG_RST_SFR_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RST, reg);
+
+ /*
+ * DC reset step 2 (optional): Load 8051 data memory with link
+ * configuration
+ */
+
+ /*
+ * DC reset step 3: Load DC8051 firmware
+ */
+ /* release all but the core reset */
+ reg = DC_DC8051_CFG_RST_M8051W_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RST, reg);
+
+ /* Firmware load step 1 */
+ load_security_variables(dd, fdet);
+
+ /*
+ * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED
+ */
+ write_csr(dd, MISC_CFG_FW_CTRL, 0);
+
+ /* Firmware load steps 3-5 */
+ ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
+ fdet->firmware_len);
+ if (ret)
+ return ret;
+
+ /*
+ * DC reset step 4. Host starts the DC8051 firmware
+ */
+ /*
+ * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED
+ */
+ write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
+
+ /* Firmware load steps 7-10 */
+ ret = run_rsa(dd, "8051", fdet->signature);
+ if (ret)
+ return ret;
+
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
+ /*
+ * DC reset step 5. Wait for firmware to be ready to accept host
+ * requests.
+ */
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) { /* timed out */
+ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
+ get_firmware_state(dd));
+ return -ETIMEDOUT;
+ }
+
+ read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
+ dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
+ (int)ver_major, (int)ver_minor, (int)ver_patch);
+ dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Write the SBus request register
+ *
+ * No need for masking - the arguments are sized exactly.
+ */
+void sbus_request(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
+{
+ write_csr(dd, ASIC_CFG_SBUS_REQUEST,
+ ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
+ ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
+ ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
+ ((u64)receiver_addr <<
+ ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
+}
+
+/*
+ * Read a value from the SBus.
+ *
+ * Requires the caller to be in fast mode
+ */
+static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
+ u32 data_in)
+{
+ u64 reg;
+ int retries;
+ int success = 0;
+ u32 result = 0;
+ u32 result_code = 0;
+
+ sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
+
+ for (retries = 0; retries < 100; retries++) {
+ usleep_range(1000, 1200); /* arbitrary */
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
+ & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
+ if (result_code != SBUS_READ_COMPLETE)
+ continue;
+
+ success = 1;
+ result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
+ & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
+ break;
+ }
+
+ if (!success) {
+ dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
+ result_code);
+ }
+
+ return result;
+}
+
+/*
+ * Turn off the SBus and fabric serdes spicos.
+ *
+ * + Must be called with Sbus fast mode turned on.
+ * + Must be called after fabric serdes broadcast is set up.
+ * + Must be called before the 8051 is loaded - assumes 8051 is not loaded
+ * when using MISC_CFG_FW_CTRL.
+ */
+static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
+{
+ /* only needed on A0 */
+ if (!is_ax(dd))
+ return;
+
+ dd_dev_info(dd, "Turning off spicos:%s%s\n",
+ flags & SPICO_SBUS ? " SBus" : "",
+ flags & SPICO_FABRIC ? " fabric" : "");
+
+ write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
+ /* disable SBus spico */
+ if (flags & SPICO_SBUS)
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
+ WRITE_SBUS_RECEIVER, 0x00000040);
+
+ /* disable the fabric serdes spicos */
+ if (flags & SPICO_FABRIC)
+ sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
+ 0x07, WRITE_SBUS_RECEIVER, 0x00000000);
+ write_csr(dd, MISC_CFG_FW_CTRL, 0);
+}
+
+/*
+ * Reset all of the fabric serdes for this HFI in preparation to take the
+ * link to Polling.
+ *
+ * To do a reset, we need to write to the serdes registers. Unfortunately,
+ * the fabric serdes download to the other HFI on the ASIC will have turned
+ * off the firmware validation on this HFI. This means we can't write to the
+ * registers to reset the serdes. Work around this by performing a complete
+ * re-download and validation of the fabric serdes firmware. This, as a
+ * by-product, will reset the serdes. NOTE: the re-download requires that
+ * the 8051 be in the Offline state. I.e. not actively trying to use the
+ * serdes. This routine is called at the point where the link is Offline and
+ * is getting ready to go to Polling.
+ */
+void fabric_serdes_reset(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ if (!fw_fabric_serdes_load)
+ return;
+
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd,
+ "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n");
+ return;
+ }
+ set_sbus_fast_mode(dd);
+
+ if (is_ax(dd)) {
+ /* A0 serdes do not work with a re-download */
+ u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
+
+ /* place SerDes in reset and disable SPICO */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
+ /* wait 100 refclk cycles @ 156.25MHz => 640ns */
+ udelay(1);
+ /* remove SerDes reset */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
+ /* turn SPICO enable on */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
+ } else {
+ turn_off_spicos(dd, SPICO_FABRIC);
+ /*
+ * No need for firmware retry - what to download has already
+ * been decided.
+ * No need to pay attention to the load return - the only
+ * failure is a validation failure, which has already been
+ * checked by the initial download.
+ */
+ (void)load_fabric_serdes_firmware(dd, &fw_fabric);
+ }
+
+ clear_sbus_fast_mode(dd);
+ release_chip_resource(dd, CR_SBUS);
+}
+
+/* Access to the SBus in this routine should probably be serialized */
+int sbus_request_slow(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
+{
+ u64 reg, count = 0;
+
+ /* make sure fast mode is clear */
+ clear_sbus_fast_mode(dd);
+
+ sbus_request(dd, receiver_addr, data_addr, command, data_in);
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
+ ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
+ /* Wait for both DONE and RCV_DATA_VALID to go high */
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
+ (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) {
+ if (count++ >= SBUS_MAX_POLL_COUNT) {
+ u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
+ /*
+ * If the loop has timed out, we are OK if DONE bit
+ * is set and RCV_DATA_VALID and EXECUTE counters
+ * are the same. If not, we cannot proceed.
+ */
+ if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
+ (SBUS_COUNTER(counts, RCV_DATA_VALID) ==
+ SBUS_COUNTER(counts, EXECUTE)))
+ break;
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ }
+ count = 0;
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
+ /* Wait for DONE to clear after EXECUTE is cleared */
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) {
+ if (count++ >= SBUS_MAX_POLL_COUNT)
+ return -ETIME;
+ udelay(1);
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ }
+ return 0;
+}
+
+static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ int i, err;
+ const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
+
+ dd_dev_info(dd, "Downloading fabric firmware\n");
+
+ /* step 1: load security variables */
+ load_security_variables(dd, fdet);
+ /* step 2: place SerDes in reset and disable SPICO */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
+ /* wait 100 refclk cycles @ 156.25MHz => 640ns */
+ udelay(1);
+ /* step 3: remove SerDes reset */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
+ /* step 4: assert IMEM override */
+ sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
+ /* step 5: download SerDes machine code */
+ for (i = 0; i < fdet->firmware_len; i += 4) {
+ sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
+ *(u32 *)&fdet->firmware_ptr[i]);
+ }
+ /* step 6: IMEM override off */
+ sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
+ /* step 7: turn ECC on */
+ sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
+
+ /* steps 8-11: run the RSA engine */
+ err = run_rsa(dd, "fabric serdes", fdet->signature);
+ if (err)
+ return err;
+
+ /* step 12: turn SPICO enable on */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
+ /* step 13: enable core hardware interrupts */
+ sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
+
+ return 0;
+}
+
+static int load_sbus_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ int i, err;
+ const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
+
+ dd_dev_info(dd, "Downloading SBus firmware\n");
+
+ /* step 1: load security variables */
+ load_security_variables(dd, fdet);
+ /* step 2: place SPICO into reset and enable off */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
+ /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
+ /* step 4: set starting IMEM address for burst download */
+ sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
+ /* step 5: download the SBus Master machine code */
+ for (i = 0; i < fdet->firmware_len; i += 4) {
+ sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
+ *(u32 *)&fdet->firmware_ptr[i]);
+ }
+ /* step 6: set IMEM_CNTL_EN off */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
+ /* step 7: turn ECC on */
+ sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
+
+ /* steps 8-11: run the RSA engine */
+ err = run_rsa(dd, "SBus", fdet->signature);
+ if (err)
+ return err;
+
+ /* step 12: set SPICO_ENABLE on */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
+
+ return 0;
+}
+
+static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ int i;
+ const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
+
+ dd_dev_info(dd, "Downloading PCIe firmware\n");
+
+ /* step 1: load security variables */
+ load_security_variables(dd, fdet);
+ /* step 2: assert single step (halts the SBus Master spico) */
+ sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
+ /* step 3: enable XDMEM access */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
+ /* step 4: load firmware into SBus Master XDMEM */
+ /*
+ * NOTE: the dmem address, write_en, and wdata are all pre-packed,
+ * we only need to pick up the bytes and write them
+ */
+ for (i = 0; i < fdet->firmware_len; i += 4) {
+ sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
+ *(u32 *)&fdet->firmware_ptr[i]);
+ }
+ /* step 5: disable XDMEM access */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
+ /* step 6: allow SBus Spico to run */
+ sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
+
+ /*
+ * steps 7-11: run RSA, if it succeeds, firmware is available to
+ * be swapped
+ */
+ return run_rsa(dd, "PCIe serdes", fdet->signature);
+}
+
+/*
+ * Set the given broadcast values on the given list of devices.
+ */
+static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
+ const u8 *addrs, int count)
+{
+ while (--count >= 0) {
+ /*
+ * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave
+ * defaults for everything else. Do not read-modify-write,
+ * per instruction from the manufacturer.
+ *
+ * Register 0xfd:
+ * bits what
+ * ----- ---------------------------------
+ * 0 IGNORE_BROADCAST (default 0)
+ * 11:4 BROADCAST_GROUP_1 (default 0xff)
+ * 23:16 BROADCAST_GROUP_2 (default 0xff)
+ */
+ sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
+ (u32)bg1 << 4 | (u32)bg2 << 16);
+ }
+}
+
+int acquire_hw_mutex(struct hfi1_devdata *dd)
+{
+ unsigned long timeout;
+ int try = 0;
+ u8 mask = 1 << dd->hfi1_id;
+ u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+ if (user == mask) {
+ dd_dev_info(dd,
+ "Hardware mutex already acquired, mutex mask %u\n",
+ (u32)mask);
+ return 0;
+ }
+
+retry:
+ timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
+ while (1) {
+ write_csr(dd, ASIC_CFG_MUTEX, mask);
+ user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+ if (user == mask)
+ return 0; /* success */
+ if (time_after(jiffies, timeout))
+ break; /* timed out */
+ msleep(20);
+ }
+
+ /* timed out */
+ dd_dev_err(dd,
+ "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
+ (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
+
+ if (try == 0) {
+ /* break mutex and retry */
+ write_csr(dd, ASIC_CFG_MUTEX, 0);
+ try++;
+ goto retry;
+ }
+
+ return -EBUSY;
+}
+
+void release_hw_mutex(struct hfi1_devdata *dd)
+{
+ u8 mask = 1 << dd->hfi1_id;
+ u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+ if (user != mask)
+ dd_dev_warn(dd,
+ "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
+ (u32)user, (u32)mask);
+ else
+ write_csr(dd, ASIC_CFG_MUTEX, 0);
+}
+
+/* return the given resource bit(s) as a mask for the given HFI */
+static inline u64 resource_mask(u32 hfi1_id, u32 resource)
+{
+ return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0);
+}
+
+static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
+ const char *func)
+{
+ dd_dev_err(dd,
+ "%s: hardware mutex stuck - suggest rebooting the machine\n",
+ func);
+}
+
+/*
+ * Acquire access to a chip resource.
+ *
+ * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed.
+ */
+static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
+{
+ u64 scratch0, all_bits, my_bit;
+ int ret;
+
+ if (resource & CR_DYN_MASK) {
+ /* a dynamic resource is in use if either HFI has set the bit */
+ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 &&
+ (resource & (CR_I2C1 | CR_I2C2))) {
+ /* discrete devices must serialize across both chains */
+ all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) |
+ resource_mask(1, CR_I2C1 | CR_I2C2);
+ } else {
+ all_bits = resource_mask(0, resource) |
+ resource_mask(1, resource);
+ }
+ my_bit = resource_mask(dd->hfi1_id, resource);
+ } else {
+ /* non-dynamic resources are not split between HFIs */
+ all_bits = resource;
+ my_bit = resource;
+ }
+
+ /* lock against other callers within the driver wanting a resource */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+
+ ret = acquire_hw_mutex(dd);
+ if (ret) {
+ fail_mutex_acquire_message(dd, __func__);
+ ret = -EIO;
+ goto done;
+ }
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ if (scratch0 & all_bits) {
+ ret = -EBUSY;
+ } else {
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+ }
+
+ release_hw_mutex(dd);
+
+done:
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+ return ret;
+}
+
+/*
+ * Acquire access to a chip resource, wait up to mswait milliseconds for
+ * the resource to become available.
+ *
+ * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex
+ * acquire failed.
+ */
+int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
+{
+ unsigned long timeout;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(mswait);
+ while (1) {
+ ret = __acquire_chip_resource(dd, resource);
+ if (ret != -EBUSY)
+ return ret;
+ /* resource is busy, check our timeout */
+ if (time_after_eq(jiffies, timeout))
+ return -EBUSY;
+ usleep_range(80, 120); /* arbitrary delay */
+ }
+}
+
+/*
+ * Release access to a chip resource
+ */
+void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
+{
+ u64 scratch0, bit;
+
+ /* only dynamic resources should ever be cleared */
+ if (!(resource & CR_DYN_MASK)) {
+ dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
+ resource);
+ return;
+ }
+ bit = resource_mask(dd->hfi1_id, resource);
+
+ /* lock against other callers within the driver wanting a resource */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+
+ if (acquire_hw_mutex(dd)) {
+ fail_mutex_acquire_message(dd, __func__);
+ goto done;
+ }
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ if ((scratch0 & bit) != 0) {
+ scratch0 &= ~bit;
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+ } else {
+ dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
+ __func__, dd->hfi1_id, resource);
+ }
+
+ release_hw_mutex(dd);
+
+done:
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+}
+
+/*
+ * Return true if resource is set, false otherwise. Print a warning
+ * if not set and a function is supplied.
+ */
+bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
+ const char *func)
+{
+ u64 scratch0, bit;
+
+ if (resource & CR_DYN_MASK)
+ bit = resource_mask(dd->hfi1_id, resource);
+ else
+ bit = resource;
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ if ((scratch0 & bit) == 0) {
+ if (func)
+ dd_dev_warn(dd,
+ "%s: id %d, resource 0x%x, not acquired!\n",
+ func, dd->hfi1_id, resource);
+ return false;
+ }
+ return true;
+}
+
+static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
+{
+ u64 scratch0;
+
+ /* lock against other callers within the driver wanting a resource */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+
+ if (acquire_hw_mutex(dd)) {
+ fail_mutex_acquire_message(dd, func);
+ goto done;
+ }
+
+ /* clear all dynamic access bits for this HFI */
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+
+ release_hw_mutex(dd);
+
+done:
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+}
+
+void init_chip_resources(struct hfi1_devdata *dd)
+{
+ /* clear any holds left by us */
+ clear_chip_resources(dd, __func__);
+}
+
+void finish_chip_resources(struct hfi1_devdata *dd)
+{
+ /* clear any holds left by us */
+ clear_chip_resources(dd, __func__);
+}
+
+void set_sbus_fast_mode(struct hfi1_devdata *dd)
+{
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
+ ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
+}
+
+void clear_sbus_fast_mode(struct hfi1_devdata *dd)
+{
+ u64 reg, count = 0;
+
+ reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
+ while (SBUS_COUNTER(reg, EXECUTE) !=
+ SBUS_COUNTER(reg, RCV_DATA_VALID)) {
+ if (count++ >= SBUS_MAX_POLL_COUNT)
+ break;
+ udelay(1);
+ reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
+ }
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
+}
+
+int load_firmware(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ if (fw_fabric_serdes_load) {
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret)
+ return ret;
+
+ set_sbus_fast_mode(dd);
+
+ set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
+ fabric_serdes_broadcast[dd->hfi1_id],
+ fabric_serdes_addrs[dd->hfi1_id],
+ NUM_FABRIC_SERDES);
+ turn_off_spicos(dd, SPICO_FABRIC);
+ do {
+ ret = load_fabric_serdes_firmware(dd, &fw_fabric);
+ } while (retry_firmware(dd, ret));
+
+ clear_sbus_fast_mode(dd);
+ release_chip_resource(dd, CR_SBUS);
+ if (ret)
+ return ret;
+ }
+
+ if (fw_8051_load) {
+ do {
+ ret = load_8051_firmware(dd, &fw_8051);
+ } while (retry_firmware(dd, ret));
+ if (ret)
+ return ret;
+ }
+
+ dump_fw_version(dd);
+ return 0;
+}
+
+int hfi1_firmware_init(struct hfi1_devdata *dd)
+{
+ /* only RTL can use these */
+ if (dd->icode != ICODE_RTL_SILICON) {
+ fw_fabric_serdes_load = 0;
+ fw_pcie_serdes_load = 0;
+ fw_sbus_load = 0;
+ }
+
+ /* no 8051 or QSFP on simulator */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ fw_8051_load = 0;
+
+ if (!fw_8051_name) {
+ if (dd->icode == ICODE_RTL_SILICON)
+ fw_8051_name = DEFAULT_FW_8051_NAME_ASIC;
+ else
+ fw_8051_name = DEFAULT_FW_8051_NAME_FPGA;
+ }
+ if (!fw_fabric_serdes_name)
+ fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME;
+ if (!fw_sbus_name)
+ fw_sbus_name = DEFAULT_FW_SBUS_NAME;
+ if (!fw_pcie_serdes_name)
+ fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
+
+ return obtain_firmware(dd);
+}
+
+/*
+ * This function is a helper function for parse_platform_config(...) and
+ * does not check for validity of the platform configuration cache
+ * (because we know it is invalid as we are building up the cache).
+ * As such, this should not be called from anywhere other than
+ * parse_platform_config
+ */
+static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
+{
+ u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask;
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+
+ if (!system_table)
+ return -EINVAL;
+
+ meta_ver_meta =
+ *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata
+ + SYSTEM_TABLE_META_VERSION);
+
+ mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
+ ver_start = meta_ver_meta & mask;
+
+ meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT;
+
+ mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
+ ver_len = meta_ver_meta & mask;
+
+ ver_start /= 8;
+ meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
+
+ if (meta_ver < 4) {
+ dd_dev_info(
+ dd, "%s:Please update platform config\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int parse_platform_config(struct hfi1_devdata *dd)
+{
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+ struct hfi1_pportdata *ppd = dd->pport;
+ u32 *ptr = NULL;
+ u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
+ u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
+ int ret = -EINVAL; /* assume failure */
+
+ /*
+ * For integrated devices that did not fall back to the default file,
+ * the SI tuning information for active channels is acquired from the
+ * scratch register bitmap, thus there is no platform config to parse.
+ * Skip parsing in these situations.
+ */
+ if (ppd->config_from_scratch)
+ return 0;
+
+ if (!dd->platform_config.data) {
+ dd_dev_err(dd, "%s: Missing config file\n", __func__);
+ ret = -EINVAL;
+ goto bail;
+ }
+ ptr = (u32 *)dd->platform_config.data;
+
+ magic_num = *ptr;
+ ptr++;
+ if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
+ dd_dev_err(dd, "%s: Bad config file\n", __func__);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* Field is file size in DWORDs */
+ file_length = (*ptr) * 4;
+
+ /*
+ * Length can't be larger than partition size. Assume platform
+ * config format version 4 is being used. Interpret the file size
+ * field as header instead by not moving the pointer.
+ */
+ if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
+ dd_dev_info(dd,
+ "%s:File length out of bounds, using alternative format\n",
+ __func__);
+ file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
+ } else {
+ ptr++;
+ }
+
+ if (file_length > dd->platform_config.size) {
+ dd_dev_info(dd, "%s:File claims to be larger than read size\n",
+ __func__);
+ ret = -EINVAL;
+ goto bail;
+ } else if (file_length < dd->platform_config.size) {
+ dd_dev_info(dd,
+ "%s:File claims to be smaller than read size, continuing\n",
+ __func__);
+ }
+ /* exactly equal, perfection */
+
+ /*
+ * In both cases where we proceed, using the self-reported file length
+ * is the safer option. In case of old format a predefined value is
+ * being used.
+ */
+ while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
+ header1 = *ptr;
+ header2 = *(ptr + 1);
+ if (header1 != ~header2) {
+ dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
+ __func__, (ptr - (u32 *)
+ dd->platform_config.data));
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ record_idx = *ptr &
+ ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1);
+
+ table_length_dwords = (*ptr >>
+ PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) &
+ ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1);
+
+ table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) &
+ ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1);
+
+ /* Done with this set of headers */
+ ptr += 2;
+
+ if (record_idx) {
+ /* data table */
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ pcfgcache->config_tables[table_type].num_table =
+ 1;
+ ret = check_meta_version(dd, ptr);
+ if (ret)
+ goto bail;
+ break;
+ case PLATFORM_CONFIG_PORT_TABLE:
+ pcfgcache->config_tables[table_type].num_table =
+ 2;
+ break;
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ pcfgcache->config_tables[table_type].num_table =
+ table_length_dwords;
+ break;
+ default:
+ dd_dev_err(dd,
+ "%s: Unknown data table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr - (u32 *)
+ dd->platform_config.data));
+ ret = -EINVAL;
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table = ptr;
+ } else {
+ /* metadata table */
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ case PLATFORM_CONFIG_PORT_TABLE:
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ break;
+ default:
+ dd_dev_err(dd,
+ "%s: Unknown meta table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr -
+ (u32 *)dd->platform_config.data));
+ ret = -EINVAL;
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table_metadata =
+ ptr;
+ }
+
+ /* Calculate and check table crc */
+ crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
+ (table_length_dwords * 4));
+ crc ^= ~(u32)0;
+
+ /* Jump the table */
+ ptr += table_length_dwords;
+ if (crc != *ptr) {
+ dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
+ __func__, (ptr -
+ (u32 *)dd->platform_config.data));
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* Jump the CRC DWORD */
+ ptr++;
+ }
+
+ pcfgcache->cache_valid = 1;
+ return 0;
+bail:
+ memset(pcfgcache, 0, sizeof(struct platform_config_cache));
+ return ret;
+}
+
+static void get_integrated_platform_config_field(
+ struct hfi1_devdata *dd,
+ enum platform_config_table_type_encoding table_type,
+ int field_index, u32 *data)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u8 *cache = ppd->qsfp_info.cache;
+ u32 tx_preset = 0;
+
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
+ *data = ppd->max_power_class;
+ else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
+ *data = ppd->default_atten;
+ break;
+ case PLATFORM_CONFIG_PORT_TABLE:
+ if (field_index == PORT_TABLE_PORT_TYPE)
+ *data = ppd->port_type;
+ else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
+ *data = ppd->local_atten;
+ else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
+ *data = ppd->remote_atten;
+ break;
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
+ QSFP_RX_CDR_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
+ QSFP_RX_EMP_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
+ QSFP_RX_AMP_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
+ *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
+ QSFP_RX_CDR_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
+ *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
+ QSFP_RX_EMP_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
+ *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
+ QSFP_RX_AMP_SHIFT;
+ break;
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
+ tx_preset = ppd->tx_preset_eq;
+ else
+ tx_preset = ppd->tx_preset_noeq;
+ if (field_index == TX_PRESET_TABLE_PRECUR)
+ *data = (tx_preset & TX_PRECUR_SMASK) >>
+ TX_PRECUR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_ATTN)
+ *data = (tx_preset & TX_ATTN_SMASK) >>
+ TX_ATTN_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_POSTCUR)
+ *data = (tx_preset & TX_POSTCUR_SMASK) >>
+ TX_POSTCUR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
+ *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
+ QSFP_TX_CDR_APPLY_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
+ *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
+ QSFP_TX_EQ_APPLY_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
+ *data = (tx_preset & QSFP_TX_CDR_SMASK) >>
+ QSFP_TX_CDR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
+ *data = (tx_preset & QSFP_TX_EQ_SMASK) >>
+ QSFP_TX_EQ_SHIFT;
+ break;
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ default:
+ break;
+ }
+}
+
+static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
+ int field, u32 *field_len_bits,
+ u32 *field_start_bits)
+{
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+ u32 *src_ptr = NULL;
+
+ if (!pcfgcache->cache_valid)
+ return -EINVAL;
+
+ switch (table) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ case PLATFORM_CONFIG_PORT_TABLE:
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ if (field && field < platform_config_table_limits[table])
+ src_ptr =
+ pcfgcache->config_tables[table].table_metadata + field;
+ break;
+ default:
+ dd_dev_info(dd, "%s: Unknown table\n", __func__);
+ break;
+ }
+
+ if (!src_ptr)
+ return -EINVAL;
+
+ if (field_start_bits)
+ *field_start_bits = *src_ptr &
+ ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
+
+ if (field_len_bits)
+ *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT)
+ & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
+
+ return 0;
+}
+
+/* This is the central interface to getting data out of the platform config
+ * file. It depends on parse_platform_config() having populated the
+ * platform_config_cache in hfi1_devdata, and checks the cache_valid member to
+ * validate the sanity of the cache.
+ *
+ * The non-obvious parameters:
+ * @table_index: Acts as a look up key into which instance of the tables the
+ * relevant field is fetched from.
+ *
+ * This applies to the data tables that have multiple instances. The port table
+ * is an exception to this rule as each HFI only has one port and thus the
+ * relevant table can be distinguished by hfi_id.
+ *
+ * @data: pointer to memory that will be populated with the field requested.
+ * @len: length of memory pointed by @data in bytes.
+ */
+int get_platform_config_field(struct hfi1_devdata *dd,
+ enum platform_config_table_type_encoding
+ table_type, int table_index, int field_index,
+ u32 *data, u32 len)
+{
+ int ret = 0, wlen = 0, seek = 0;
+ u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ if (data)
+ memset(data, 0, len);
+ else
+ return -EINVAL;
+
+ if (ppd->config_from_scratch) {
+ /*
+ * Use saved configuration from ppd for integrated platforms
+ */
+ get_integrated_platform_config_field(dd, table_type,
+ field_index, data);
+ return 0;
+ }
+
+ ret = get_platform_fw_field_metadata(dd, table_type, field_index,
+ &field_len_bits,
+ &field_start_bits);
+ if (ret)
+ return -EINVAL;
+
+ /* Convert length to bits */
+ len *= 8;
+
+ /* Our metadata function checked cache_valid and field_index for us */
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ src_ptr = pcfgcache->config_tables[table_type].table;
+
+ if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) {
+ if (len < field_len_bits)
+ return -EINVAL;
+
+ seek = field_start_bits / 8;
+ wlen = field_len_bits / 8;
+
+ src_ptr = (u32 *)((u8 *)src_ptr + seek);
+
+ /*
+ * We expect the field to be byte aligned and whole byte
+ * lengths if we are here
+ */
+ memcpy(data, src_ptr, wlen);
+ return 0;
+ }
+ break;
+ case PLATFORM_CONFIG_PORT_TABLE:
+ /* Port table is 4 DWORDS */
+ src_ptr = dd->hfi1_id ?
+ pcfgcache->config_tables[table_type].table + 4 :
+ pcfgcache->config_tables[table_type].table;
+ break;
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ src_ptr = pcfgcache->config_tables[table_type].table;
+
+ if (table_index <
+ pcfgcache->config_tables[table_type].num_table)
+ src_ptr += table_index;
+ else
+ src_ptr = NULL;
+ break;
+ default:
+ dd_dev_info(dd, "%s: Unknown table\n", __func__);
+ break;
+ }
+
+ if (!src_ptr || len < field_len_bits)
+ return -EINVAL;
+
+ src_ptr += (field_start_bits / 32);
+ *data = (*src_ptr >> (field_start_bits % 32)) &
+ ((1 << field_len_bits) - 1);
+
+ return 0;
+}
+
+/*
+ * Download the firmware needed for the Gen3 PCIe SerDes. An update
+ * to the SBus firmware is needed before updating the PCIe firmware.
+ *
+ * Note: caller must be holding the SBus resource.
+ */
+int load_pcie_firmware(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ /* both firmware loads below use the SBus */
+ set_sbus_fast_mode(dd);
+
+ if (fw_sbus_load) {
+ turn_off_spicos(dd, SPICO_SBUS);
+ do {
+ ret = load_sbus_firmware(dd, &fw_sbus);
+ } while (retry_firmware(dd, ret));
+ if (ret)
+ goto done;
+ }
+
+ if (fw_pcie_serdes_load) {
+ dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
+ set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
+ pcie_serdes_broadcast[dd->hfi1_id],
+ pcie_serdes_addrs[dd->hfi1_id],
+ NUM_PCIE_SERDES);
+ do {
+ ret = load_pcie_serdes_firmware(dd, &fw_pcie);
+ } while (retry_firmware(dd, ret));
+ if (ret)
+ goto done;
+ }
+
+done:
+ clear_sbus_fast_mode(dd);
+
+ return ret;
+}
+
+/*
+ * Read the GUID from the hardware, store it in dd.
+ */
+void read_guid(struct hfi1_devdata *dd)
+{
+ /* Take the DC out of reset to get a valid GUID value */
+ write_csr(dd, CCE_DC_CTRL, 0);
+ (void)read_csr(dd, CCE_DC_CTRL);
+
+ dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
+ dd_dev_info(dd, "GUID %llx",
+ (unsigned long long)dd->base_guid);
+}
+
+/* read and display firmware version info */
+static void dump_fw_version(struct hfi1_devdata *dd)
+{
+ u32 pcie_vers[NUM_PCIE_SERDES];
+ u32 fabric_vers[NUM_FABRIC_SERDES];
+ u32 sbus_vers;
+ int i;
+ int all_same;
+ int ret;
+ u8 rcv_addr;
+
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
+ return;
+ }
+
+ /* set fast mode */
+ set_sbus_fast_mode(dd);
+
+ /* read version for SBus Master */
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
+ dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
+
+ /* read version for PCIe SerDes */
+ all_same = 1;
+ pcie_vers[0] = 0;
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
+ sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
+ if (i > 0 && pcie_vers[0] != pcie_vers[i])
+ all_same = 0;
+ }
+
+ if (all_same) {
+ dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
+ pcie_vers[0]);
+ } else {
+ dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ dd_dev_info(dd,
+ "PCIe SerDes lane %d firmware version 0x%x\n",
+ i, pcie_vers[i]);
+ }
+ }
+
+ /* read version for fabric SerDes */
+ all_same = 1;
+ fabric_vers[0] = 0;
+ for (i = 0; i < NUM_FABRIC_SERDES; i++) {
+ rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
+ sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
+ if (i > 0 && fabric_vers[0] != fabric_vers[i])
+ all_same = 0;
+ }
+
+ if (all_same) {
+ dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
+ fabric_vers[0]);
+ } else {
+ dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
+ for (i = 0; i < NUM_FABRIC_SERDES; i++) {
+ dd_dev_info(dd,
+ "Fabric SerDes lane %d firmware version 0x%x\n",
+ i, fabric_vers[i]);
+ }
+ }
+
+ clear_sbus_fast_mode(dd);
+ release_chip_resource(dd, CR_SBUS);
+}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
new file mode 100644
index 000000000000..cb630551cf1a
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -0,0 +1,2632 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2020-2023 Cornelis Networks, Inc.
+ * Copyright(c) 2015-2020 Intel Corporation.
+ */
+
+#ifndef _HFI1_KERNEL_H
+#define _HFI1_KERNEL_H
+
+#include <linux/refcount.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/xarray.h>
+#include <rdma/ib_hdrs.h>
+#include <rdma/opa_addr.h>
+#include <linux/rhashtable.h>
+#include <rdma/rdma_vt.h>
+
+#include "chip_registers.h"
+#include "common.h"
+#include "opfn.h"
+#include "verbs.h"
+#include "pio.h"
+#include "chip.h"
+#include "mad.h"
+#include "qsfp.h"
+#include "platform.h"
+#include "affinity.h"
+#include "msix.h"
+
+/* bumped 1 from s/w major version of TrueScale */
+#define HFI1_CHIP_VERS_MAJ 3U
+
+/* don't care about this except printing */
+#define HFI1_CHIP_VERS_MIN 0U
+
+/* The Organization Unique Identifier (Mfg code), and its position in GUID */
+#define HFI1_OUI 0x001175
+#define HFI1_OUI_LSB 40
+
+#define DROP_PACKET_OFF 0
+#define DROP_PACKET_ON 1
+
+#define NEIGHBOR_TYPE_HFI 0
+#define NEIGHBOR_TYPE_SWITCH 1
+
+#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
+
+extern unsigned long hfi1_cap_mask;
+#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
+#define HFI1_CAP_UGET_MASK(mask, cap) \
+ (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
+#define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
+#define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
+#define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
+#define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
+#define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
+ HFI1_CAP_MISC_MASK)
+/* Offline Disabled Reason is 4-bits */
+#define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
+
+/*
+ * Control context is always 0 and handles the error packets.
+ * It also handles the VL15 and multicast packets.
+ */
+#define HFI1_CTRL_CTXT 0
+
+/*
+ * Driver context will store software counters for each of the events
+ * associated with these status registers
+ */
+#define NUM_CCE_ERR_STATUS_COUNTERS 41
+#define NUM_RCV_ERR_STATUS_COUNTERS 64
+#define NUM_MISC_ERR_STATUS_COUNTERS 13
+#define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
+#define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
+#define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
+#define NUM_SEND_ERR_STATUS_COUNTERS 3
+#define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
+#define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
+
+/*
+ * per driver stats, either not device nor port-specific, or
+ * summed over all of the devices and ports.
+ * They are described by name via ipathfs filesystem, so layout
+ * and number of elements can change without breaking compatibility.
+ * If members are added or deleted hfi1_statnames[] in debugfs.c must
+ * change to match.
+ */
+struct hfi1_ib_stats {
+ __u64 sps_ints; /* number of interrupts handled */
+ __u64 sps_errints; /* number of error interrupts */
+ __u64 sps_txerrs; /* tx-related packet errors */
+ __u64 sps_rcverrs; /* non-crc rcv packet errors */
+ __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
+ __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
+ __u64 sps_ctxts; /* number of contexts currently open */
+ __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
+ __u64 sps_buffull;
+ __u64 sps_hdrfull;
+};
+
+extern struct hfi1_ib_stats hfi1_stats;
+extern const struct pci_error_handlers hfi1_pci_err_handler;
+
+extern int num_driver_cntrs;
+
+/*
+ * First-cut criterion for "device is active" is
+ * two thousand dwords combined Tx, Rx traffic per
+ * 5-second interval. SMA packets are 64 dwords,
+ * and occur "a few per second", presumably each way.
+ */
+#define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
+
+/*
+ * Below contains all data related to a single context (formerly called port).
+ */
+
+struct hfi1_opcode_stats_perctx;
+
+struct ctxt_eager_bufs {
+ struct eager_buffer {
+ void *addr;
+ dma_addr_t dma;
+ ssize_t len;
+ } *buffers;
+ struct {
+ void *addr;
+ dma_addr_t dma;
+ } *rcvtids;
+ u32 size; /* total size of eager buffers */
+ u32 rcvtid_size; /* size of each eager rcv tid */
+ u16 count; /* size of buffers array */
+ u16 numbufs; /* number of buffers allocated */
+ u16 alloced; /* number of rcvarray entries used */
+ u16 threshold; /* head update threshold */
+};
+
+struct exp_tid_set {
+ struct list_head list;
+ u32 count;
+};
+
+struct hfi1_ctxtdata;
+typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data);
+typedef void (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
+
+struct tid_queue {
+ struct list_head queue_head;
+ /* queue head for QP TID resource waiters */
+ u32 enqueue; /* count of tid enqueues */
+ u32 dequeue; /* count of tid dequeues */
+};
+
+struct hfi1_ctxtdata {
+ /* rcvhdrq base, needs mmap before useful */
+ void *rcvhdrq;
+ /* kernel virtual address where hdrqtail is updated */
+ volatile __le64 *rcvhdrtail_kvaddr;
+ /* so functions that need physical port can get it easily */
+ struct hfi1_pportdata *ppd;
+ /* so file ops can get at unit */
+ struct hfi1_devdata *dd;
+ /* this receive context's assigned PIO ACK send context */
+ struct send_context *sc;
+ /* per context recv functions */
+ const rhf_rcv_function_ptr *rhf_rcv_function_map;
+ /*
+ * The interrupt handler for a particular receive context can vary
+ * throughout it's lifetime. This is not a lock protected data member so
+ * it must be updated atomically and the prev and new value must always
+ * be valid. Worst case is we process an extra interrupt and up to 64
+ * packets with the wrong interrupt handler.
+ */
+ intr_handler do_interrupt;
+ /** fast handler after autoactive */
+ intr_handler fast_handler;
+ /** slow handler */
+ intr_handler slow_handler;
+ /* napi pointer assiociated with netdev */
+ struct napi_struct *napi;
+ /* verbs rx_stats per rcd */
+ struct hfi1_opcode_stats_perctx *opstats;
+ /* clear interrupt mask */
+ u64 imask;
+ /* ctxt rcvhdrq head offset */
+ u32 head;
+ /* number of rcvhdrq entries */
+ u16 rcvhdrq_cnt;
+ u8 ireg; /* clear interrupt register */
+ /* receive packet sequence counter */
+ u8 seq_cnt;
+ /* size of each of the rcvhdrq entries */
+ u8 rcvhdrqentsize;
+ /* offset of RHF within receive header entry */
+ u8 rhf_offset;
+ /* dynamic receive available interrupt timeout */
+ u8 rcvavail_timeout;
+ /* Indicates that this is vnic context */
+ bool is_vnic;
+ /* vnic queue index this context is mapped to */
+ u8 vnic_q_idx;
+ /* Is ASPM interrupt supported for this context */
+ bool aspm_intr_supported;
+ /* ASPM state (enabled/disabled) for this context */
+ bool aspm_enabled;
+ /* Is ASPM processing enabled for this context (in intr context) */
+ bool aspm_intr_enable;
+ struct ctxt_eager_bufs egrbufs;
+ /* QPs waiting for context processing */
+ struct list_head qp_wait_list;
+ /* tid allocation lists */
+ struct exp_tid_set tid_group_list;
+ struct exp_tid_set tid_used_list;
+ struct exp_tid_set tid_full_list;
+
+ /* Timer for re-enabling ASPM if interrupt activity quiets down */
+ struct timer_list aspm_timer;
+ /* per-context configuration flags */
+ unsigned long flags;
+ /* array of tid_groups */
+ struct tid_group *groups;
+ /* mmap of hdrq, must fit in 44 bits */
+ dma_addr_t rcvhdrq_dma;
+ dma_addr_t rcvhdrqtailaddr_dma;
+ /* Last interrupt timestamp */
+ ktime_t aspm_ts_last_intr;
+ /* Last timestamp at which we scheduled a timer for this context */
+ ktime_t aspm_ts_timer_sched;
+ /* Lock to serialize between intr, timer intr and user threads */
+ spinlock_t aspm_lock;
+ /* Reference count the base context usage */
+ struct kref kref;
+ /* numa node of this context */
+ int numa_id;
+ /* associated msix interrupt. */
+ s16 msix_intr;
+ /* job key */
+ u16 jkey;
+ /* number of RcvArray groups for this context. */
+ u16 rcv_array_groups;
+ /* index of first eager TID entry. */
+ u16 eager_base;
+ /* number of expected TID entries */
+ u16 expected_count;
+ /* index of first expected TID entry. */
+ u16 expected_base;
+ /* Device context index */
+ u8 ctxt;
+
+ /* PSM Specific fields */
+ /* lock protecting all Expected TID data */
+ struct mutex exp_mutex;
+ /* lock protecting all Expected TID data of kernel contexts */
+ spinlock_t exp_lock;
+ /* Queue for QP's waiting for HW TID flows */
+ struct tid_queue flow_queue;
+ /* Queue for QP's waiting for HW receive array entries */
+ struct tid_queue rarr_queue;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t wait;
+ /* uuid from PSM */
+ u8 uuid[16];
+ /* same size as task_struct .comm[], command that opened context */
+ char comm[TASK_COMM_LEN];
+ /* Bitmask of in use context(s) */
+ DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
+ /* per-context event flags for fileops/intr communication */
+ unsigned long event_flags;
+ /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
+ void *subctxt_uregbase;
+ /* An array of pages for the eager receive buffers * N */
+ void *subctxt_rcvegrbuf;
+ /* An array of pages for the eager header queue entries * N */
+ void *subctxt_rcvhdr_base;
+ /* total number of polled urgent packets */
+ u32 urgent;
+ /* saved total number of polled urgent packets for poll edge trigger */
+ u32 urgent_poll;
+ /* Type of packets or conditions we want to poll for */
+ u16 poll_type;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_id;
+ /* The version of the library which opened this ctxt */
+ u32 userversion;
+ /*
+ * non-zero if ctxt can be shared, and defines the maximum number of
+ * sub-contexts for this device context.
+ */
+ u8 subctxt_cnt;
+
+ /* Bit mask to track free TID RDMA HW flows */
+ unsigned long flow_mask;
+ struct tid_flow_state flows[RXE_NUM_TID_FLOWS];
+};
+
+/**
+ * rcvhdrq_size - return total size in bytes for header queue
+ * @rcd: the receive context
+ *
+ * rcvhdrqentsize is in DWs, so we have to convert to bytes
+ *
+ */
+static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd)
+{
+ return PAGE_ALIGN(rcd->rcvhdrq_cnt *
+ rcd->rcvhdrqentsize * sizeof(u32));
+}
+
+/*
+ * Represents a single packet at a high level. Put commonly computed things in
+ * here so we do not have to keep doing them over and over. The rule of thumb is
+ * if something is used one time to derive some value, store that something in
+ * here. If it is used multiple times, then store the result of that derivation
+ * in here.
+ */
+struct hfi1_packet {
+ void *ebuf;
+ void *hdr;
+ void *payload;
+ struct hfi1_ctxtdata *rcd;
+ __le32 *rhf_addr;
+ struct rvt_qp *qp;
+ struct ib_other_headers *ohdr;
+ struct ib_grh *grh;
+ struct opa_16b_mgmt *mgmt;
+ u64 rhf;
+ u32 maxcnt;
+ u32 rhqoff;
+ u32 dlid;
+ u32 slid;
+ int numpkt;
+ u16 tlen;
+ s16 etail;
+ u16 pkey;
+ u8 hlen;
+ u8 rsize;
+ u8 updegr;
+ u8 etype;
+ u8 extra_byte;
+ u8 pad;
+ u8 sc;
+ u8 sl;
+ u8 opcode;
+ bool migrated;
+};
+
+/* Packet types */
+#define HFI1_PKT_TYPE_9B 0
+#define HFI1_PKT_TYPE_16B 1
+
+/*
+ * OPA 16B Header
+ */
+#define OPA_16B_L4_MASK 0xFFull
+#define OPA_16B_SC_MASK 0x1F00000ull
+#define OPA_16B_SC_SHIFT 20
+#define OPA_16B_LID_MASK 0xFFFFFull
+#define OPA_16B_DLID_MASK 0xF000ull
+#define OPA_16B_DLID_SHIFT 20
+#define OPA_16B_DLID_HIGH_SHIFT 12
+#define OPA_16B_SLID_MASK 0xF00ull
+#define OPA_16B_SLID_SHIFT 20
+#define OPA_16B_SLID_HIGH_SHIFT 8
+#define OPA_16B_BECN_MASK 0x80000000ull
+#define OPA_16B_BECN_SHIFT 31
+#define OPA_16B_FECN_MASK 0x10000000ull
+#define OPA_16B_FECN_SHIFT 28
+#define OPA_16B_L2_MASK 0x60000000ull
+#define OPA_16B_L2_SHIFT 29
+#define OPA_16B_PKEY_MASK 0xFFFF0000ull
+#define OPA_16B_PKEY_SHIFT 16
+#define OPA_16B_LEN_MASK 0x7FF00000ull
+#define OPA_16B_LEN_SHIFT 20
+#define OPA_16B_RC_MASK 0xE000000ull
+#define OPA_16B_RC_SHIFT 25
+#define OPA_16B_AGE_MASK 0xFF0000ull
+#define OPA_16B_AGE_SHIFT 16
+#define OPA_16B_ENTROPY_MASK 0xFFFFull
+
+/*
+ * OPA 16B L2/L4 Encodings
+ */
+#define OPA_16B_L4_9B 0x00
+#define OPA_16B_L2_TYPE 0x02
+#define OPA_16B_L4_FM 0x08
+#define OPA_16B_L4_IB_LOCAL 0x09
+#define OPA_16B_L4_IB_GLOBAL 0x0A
+#define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR
+
+/*
+ * OPA 16B Management
+ */
+#define OPA_16B_L4_FM_PAD 3 /* fixed 3B pad */
+#define OPA_16B_L4_FM_HLEN 24 /* 16B(16) + L4_FM(8) */
+
+static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
+{
+ return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK);
+}
+
+static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT);
+}
+
+static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr)
+{
+ return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) |
+ (((hdr->lrh[2] & OPA_16B_DLID_MASK) >>
+ OPA_16B_DLID_HIGH_SHIFT) << OPA_16B_DLID_SHIFT));
+}
+
+static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr)
+{
+ return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) |
+ (((hdr->lrh[2] & OPA_16B_SLID_MASK) >>
+ OPA_16B_SLID_HIGH_SHIFT) << OPA_16B_SLID_SHIFT));
+}
+
+static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT);
+}
+
+static inline u16 hfi1_16B_get_pkey(struct hfi1_16b_header *hdr)
+{
+ return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_rc(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_age(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT);
+}
+
+static inline u16 hfi1_16B_get_len(struct hfi1_16b_header *hdr)
+{
+ return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT);
+}
+
+static inline u16 hfi1_16B_get_entropy(struct hfi1_16b_header *hdr)
+{
+ return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK);
+}
+
+#define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw))
+
+/*
+ * BTH
+ */
+#define OPA_16B_BTH_PAD_MASK 7
+static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) &
+ OPA_16B_BTH_PAD_MASK);
+}
+
+/*
+ * 16B Management
+ */
+#define OPA_16B_MGMT_QPN_MASK 0xFFFFFF
+static inline u32 hfi1_16B_get_dest_qpn(struct opa_16b_mgmt *mgmt)
+{
+ return be32_to_cpu(mgmt->dest_qpn) & OPA_16B_MGMT_QPN_MASK;
+}
+
+static inline u32 hfi1_16B_get_src_qpn(struct opa_16b_mgmt *mgmt)
+{
+ return be32_to_cpu(mgmt->src_qpn) & OPA_16B_MGMT_QPN_MASK;
+}
+
+static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
+ u32 dest_qp, u32 src_qp)
+{
+ mgmt->dest_qpn = cpu_to_be32(dest_qp & OPA_16B_MGMT_QPN_MASK);
+ mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
+}
+
+/**
+ * hfi1_get_rc_ohdr - get extended header
+ * @opah - the opaheader
+ */
+static inline struct ib_other_headers *
+hfi1_get_rc_ohdr(struct hfi1_opa_header *opah)
+{
+ struct ib_other_headers *ohdr;
+ struct ib_header *hdr = NULL;
+ struct hfi1_16b_header *hdr_16b = NULL;
+
+ /* Find out where the BTH is */
+ if (opah->hdr_type == HFI1_PKT_TYPE_9B) {
+ hdr = &opah->ibh;
+ if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ } else {
+ u8 l4;
+
+ hdr_16b = &opah->opah;
+ l4 = hfi1_16B_get_l4(hdr_16b);
+ if (l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &hdr_16b->u.oth;
+ else
+ ohdr = &hdr_16b->u.l.oth;
+ }
+ return ohdr;
+}
+
+struct rvt_sge_state;
+
+/*
+ * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
+ * Mostly for MADs that set or query link parameters, also ipath
+ * config interfaces
+ */
+#define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
+#define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
+#define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
+#define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
+#define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
+#define HFI1_IB_CFG_SPD 5 /* current Link spd */
+#define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
+#define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
+#define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
+#define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
+#define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
+#define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
+#define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
+#define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
+#define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
+#define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
+#define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
+#define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
+#define HFI1_IB_CFG_VL_HIGH_LIMIT 19
+#define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
+#define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
+
+/*
+ * HFI or Host Link States
+ *
+ * These describe the states the driver thinks the logical and physical
+ * states are in. Used as an argument to set_link_state(). Implemented
+ * as bits for easy multi-state checking. The actual state can only be
+ * one.
+ */
+#define __HLS_UP_INIT_BP 0
+#define __HLS_UP_ARMED_BP 1
+#define __HLS_UP_ACTIVE_BP 2
+#define __HLS_DN_DOWNDEF_BP 3 /* link down default */
+#define __HLS_DN_POLL_BP 4
+#define __HLS_DN_DISABLE_BP 5
+#define __HLS_DN_OFFLINE_BP 6
+#define __HLS_VERIFY_CAP_BP 7
+#define __HLS_GOING_UP_BP 8
+#define __HLS_GOING_OFFLINE_BP 9
+#define __HLS_LINK_COOLDOWN_BP 10
+
+#define HLS_UP_INIT BIT(__HLS_UP_INIT_BP)
+#define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP)
+#define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP)
+#define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
+#define HLS_DN_POLL BIT(__HLS_DN_POLL_BP)
+#define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP)
+#define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP)
+#define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP)
+#define HLS_GOING_UP BIT(__HLS_GOING_UP_BP)
+#define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
+#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
+
+#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
+#define HLS_DOWN ~(HLS_UP)
+
+#define HLS_DEFAULT HLS_DN_POLL
+
+/* use this MTU size if none other is given */
+#define HFI1_DEFAULT_ACTIVE_MTU 10240
+/* use this MTU size as the default maximum */
+#define HFI1_DEFAULT_MAX_MTU 10240
+/* default partition key */
+#define DEFAULT_PKEY 0xffff
+
+/*
+ * Possible fabric manager config parameters for fm_{get,set}_table()
+ */
+#define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */
+#define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */
+#define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */
+#define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */
+#define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */
+#define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */
+
+/*
+ * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
+ * these are bits so they can be combined, e.g.
+ * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
+ */
+#define HFI1_RCVCTRL_TAILUPD_ENB 0x01
+#define HFI1_RCVCTRL_TAILUPD_DIS 0x02
+#define HFI1_RCVCTRL_CTXT_ENB 0x04
+#define HFI1_RCVCTRL_CTXT_DIS 0x08
+#define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
+#define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
+#define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
+#define HFI1_RCVCTRL_PKEY_DIS 0x80
+#define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
+#define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
+#define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
+#define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
+#define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
+#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
+#define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
+#define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
+#define HFI1_RCVCTRL_URGENT_ENB 0x40000
+#define HFI1_RCVCTRL_URGENT_DIS 0x80000
+
+/* partition enforcement flags */
+#define HFI1_PART_ENFORCE_IN 0x1
+#define HFI1_PART_ENFORCE_OUT 0x2
+
+/* how often we check for synthetic counter wrap around */
+#define SYNTH_CNT_TIME 3
+
+/* Counter flags */
+#define CNTR_NORMAL 0x0 /* Normal counters, just read register */
+#define CNTR_SYNTH 0x1 /* Synthetic counters, saturate at all 1s */
+#define CNTR_DISABLED 0x2 /* Disable this counter */
+#define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */
+#define CNTR_VL 0x8 /* Per VL counter */
+#define CNTR_SDMA 0x10
+#define CNTR_INVALID_VL -1 /* Specifies invalid VL */
+#define CNTR_MODE_W 0x0
+#define CNTR_MODE_R 0x1
+
+/* VLs Supported/Operational */
+#define HFI1_MIN_VLS_SUPPORTED 1
+#define HFI1_MAX_VLS_SUPPORTED 8
+
+#define HFI1_GUIDS_PER_PORT 5
+#define HFI1_PORT_GUID_INDEX 0
+
+static inline void incr_cntr64(u64 *cntr)
+{
+ if (*cntr < (u64)-1LL)
+ (*cntr)++;
+}
+
+#define MAX_NAME_SIZE 64
+struct hfi1_msix_entry {
+ enum irq_type type;
+ int irq;
+ void *arg;
+ cpumask_t mask;
+ struct irq_affinity_notify notify;
+};
+
+struct hfi1_msix_info {
+ /* lock to synchronize in_use_msix access */
+ spinlock_t msix_lock;
+ DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS);
+ struct hfi1_msix_entry *msix_entries;
+ u16 max_requested;
+};
+
+/* per-SL CCA information */
+struct cca_timer {
+ struct hrtimer hrtimer;
+ struct hfi1_pportdata *ppd; /* read-only */
+ int sl; /* read-only */
+ u16 ccti; /* read/write - current value of CCTI */
+};
+
+struct link_down_reason {
+ /*
+ * SMA-facing value. Should be set from .latest when
+ * HLS_UP_* -> HLS_DN_* transition actually occurs.
+ */
+ u8 sma;
+ u8 latest;
+};
+
+enum {
+ LO_PRIO_TABLE,
+ HI_PRIO_TABLE,
+ MAX_PRIO_TABLE
+};
+
+struct vl_arb_cache {
+ /* protect vl arb cache */
+ spinlock_t lock;
+ struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
+};
+
+/*
+ * The structure below encapsulates data relevant to a physical IB Port.
+ * Current chips support only one such port, but the separation
+ * clarifies things a bit. Note that to conform to IB conventions,
+ * port-numbers are one-based. The first or only port is port1.
+ */
+struct hfi1_pportdata {
+ struct hfi1_ibport ibport_data;
+
+ struct hfi1_devdata *dd;
+
+ /* PHY support */
+ struct qsfp_data qsfp_info;
+ /* Values for SI tuning of SerDes */
+ u32 port_type;
+ u32 tx_preset_eq;
+ u32 tx_preset_noeq;
+ u32 rx_preset;
+ u8 local_atten;
+ u8 remote_atten;
+ u8 default_atten;
+ u8 max_power_class;
+
+ /* did we read platform config from scratch registers? */
+ bool config_from_scratch;
+
+ /* GUIDs for this interface, in host order, guids[0] is a port guid */
+ u64 guids[HFI1_GUIDS_PER_PORT];
+
+ /* GUID for peer interface, in host order */
+ u64 neighbor_guid;
+
+ /* up or down physical link state */
+ u32 linkup;
+
+ /*
+ * this address is mapped read-only into user processes so they can
+ * get status cheaply, whenever they want. One qword of status per port
+ */
+ u64 *statusp;
+
+ /* SendDMA related entries */
+
+ struct workqueue_struct *hfi1_wq;
+ struct workqueue_struct *link_wq;
+
+ /* move out of interrupt context */
+ struct work_struct link_vc_work;
+ struct work_struct link_up_work;
+ struct work_struct link_down_work;
+ struct work_struct sma_message_work;
+ struct work_struct freeze_work;
+ struct work_struct link_downgrade_work;
+ struct work_struct link_bounce_work;
+ struct delayed_work start_link_work;
+ /* host link state variables */
+ struct mutex hls_lock;
+ u32 host_link_state;
+
+ /* these are the "32 bit" regs */
+
+ u32 ibmtu; /* The MTU programmed for this unit */
+ /*
+ * Current max size IB packet (in bytes) including IB headers, that
+ * we can send. Changes when ibmtu changes.
+ */
+ u32 ibmaxlen;
+ u32 current_egress_rate; /* units [10^6 bits/sec] */
+ /* LID programmed for this instance */
+ u32 lid;
+ /* list of pkeys programmed; 0 if not set */
+ u16 pkeys[MAX_PKEY_VALUES];
+ u16 link_width_supported;
+ u16 link_width_downgrade_supported;
+ u16 link_speed_supported;
+ u16 link_width_enabled;
+ u16 link_width_downgrade_enabled;
+ u16 link_speed_enabled;
+ u16 link_width_active;
+ u16 link_width_downgrade_tx_active;
+ u16 link_width_downgrade_rx_active;
+ u16 link_speed_active;
+ u8 vls_supported;
+ u8 vls_operational;
+ u8 actual_vls_operational;
+ /* LID mask control */
+ u8 lmc;
+ /* Rx Polarity inversion (compensate for ~tx on partner) */
+ u8 rx_pol_inv;
+
+ u8 hw_pidx; /* physical port index */
+ u32 port; /* IB port number and index into dd->pports - 1 */
+ /* type of neighbor node */
+ u8 neighbor_type;
+ u8 neighbor_normal;
+ u8 neighbor_fm_security; /* 1 if firmware checking is disabled */
+ u8 neighbor_port_number;
+ u8 is_sm_config_started;
+ u8 offline_disabled_reason;
+ u8 is_active_optimize_enabled;
+ u8 driver_link_ready; /* driver ready for active link */
+ u8 link_enabled; /* link enabled? */
+ u8 linkinit_reason;
+ u8 local_tx_rate; /* rate given to 8051 firmware */
+ u8 qsfp_retry_count;
+
+ /* placeholders for IB MAD packet settings */
+ u8 overrun_threshold;
+ u8 phy_error_threshold;
+ unsigned int is_link_down_queued;
+
+ /* Used to override LED behavior for things like maintenance beaconing*/
+ /*
+ * Alternates per phase of blink
+ * [0] holds LED off duration, [1] holds LED on duration
+ */
+ unsigned long led_override_vals[2];
+ u8 led_override_phase; /* LSB picks from vals[] */
+ atomic_t led_override_timer_active;
+ /* Used to flash LEDs in override mode */
+ struct timer_list led_override_timer;
+
+ u32 sm_trap_qp;
+ u32 sa_qp;
+
+ /*
+ * cca_timer_lock protects access to the per-SL cca_timer
+ * structures (specifically the ccti member).
+ */
+ spinlock_t cca_timer_lock ____cacheline_aligned_in_smp;
+ struct cca_timer cca_timer[OPA_MAX_SLS];
+
+ /* List of congestion control table entries */
+ struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX];
+
+ /* congestion entries, each entry corresponding to a SL */
+ struct opa_congestion_setting_entry_shadow
+ congestion_entries[OPA_MAX_SLS];
+
+ /*
+ * cc_state_lock protects (write) access to the per-port
+ * struct cc_state.
+ */
+ spinlock_t cc_state_lock ____cacheline_aligned_in_smp;
+
+ struct cc_state __rcu *cc_state;
+
+ /* Total number of congestion control table entries */
+ u16 total_cct_entry;
+
+ /* Bit map identifying service level */
+ u32 cc_sl_control_map;
+
+ /* CA's max number of 64 entry units in the congestion control table */
+ u8 cc_max_table_entries;
+
+ /*
+ * begin congestion log related entries
+ * cc_log_lock protects all congestion log related data
+ */
+ spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
+ u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
+ u16 threshold_event_counter;
+ struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS];
+ int cc_log_idx; /* index for logging events */
+ int cc_mad_idx; /* index for reporting events */
+ /* end congestion log related entries */
+
+ struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE];
+
+ /* port relative counter buffer */
+ u64 *cntrs;
+ /* port relative synthetic counter buffer */
+ u64 *scntrs;
+ /* port_xmit_discards are synthesized from different egress errors */
+ u64 port_xmit_discards;
+ u64 port_xmit_discards_vl[C_VL_COUNT];
+ u64 port_xmit_constraint_errors;
+ u64 port_rcv_constraint_errors;
+ /* count of 'link_err' interrupts from DC */
+ u64 link_downed;
+ /* number of times link retrained successfully */
+ u64 link_up;
+ /* number of times a link unknown frame was reported */
+ u64 unknown_frame_count;
+ /* port_ltp_crc_mode is returned in 'portinfo' MADs */
+ u16 port_ltp_crc_mode;
+ /* port_crc_mode_enabled is the crc we support */
+ u8 port_crc_mode_enabled;
+ /* mgmt_allowed is also returned in 'portinfo' MADs */
+ u8 mgmt_allowed;
+ u8 part_enforce; /* partition enforcement flags */
+ struct link_down_reason local_link_down_reason;
+ struct link_down_reason neigh_link_down_reason;
+ /* Value to be sent to link peer on LinkDown .*/
+ u8 remote_link_down_reason;
+ /* Error events that will cause a port bounce. */
+ u32 port_error_action;
+ struct work_struct linkstate_active_work;
+ /* Does this port need to prescan for FECNs */
+ bool cc_prescan;
+ /*
+ * Sample sendWaitCnt & sendWaitVlCnt during link transition
+ * and counter request.
+ */
+ u64 port_vl_xmit_wait_last[C_VL_COUNT + 1];
+ u16 prev_link_width;
+ u64 vl_xmit_flit_cnt[C_VL_COUNT + 1];
+};
+
+typedef void (*opcode_handler)(struct hfi1_packet *packet);
+typedef void (*hfi1_make_req)(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe);
+extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
+extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[];
+
+/* return values for the RHF receive functions */
+#define RHF_RCV_CONTINUE 0 /* keep going */
+#define RHF_RCV_DONE 1 /* stop, this packet processed */
+#define RHF_RCV_REPROCESS 2 /* stop. retain this packet */
+
+struct rcv_array_data {
+ u16 ngroups;
+ u16 nctxt_extra;
+ u8 group_size;
+};
+
+struct per_vl_data {
+ u16 mtu;
+ struct send_context *sc;
+};
+
+/* 16 to directly index */
+#define PER_VL_SEND_CONTEXTS 16
+
+struct err_info_rcvport {
+ u8 status_and_code;
+ u64 packet_flit1;
+ u64 packet_flit2;
+};
+
+struct err_info_constraint {
+ u8 status;
+ u16 pkey;
+ u32 slid;
+};
+
+struct hfi1_temp {
+ unsigned int curr; /* current temperature */
+ unsigned int lo_lim; /* low temperature limit */
+ unsigned int hi_lim; /* high temperature limit */
+ unsigned int crit_lim; /* critical temperature limit */
+ u8 triggers; /* temperature triggers */
+};
+
+struct hfi1_i2c_bus {
+ struct hfi1_devdata *controlling_dd; /* current controlling device */
+ struct i2c_adapter adapter; /* bus details */
+ struct i2c_algo_bit_data algo; /* bus algorithm details */
+ int num; /* bus number, 0 or 1 */
+};
+
+/* common data between shared ASIC HFIs */
+struct hfi1_asic_data {
+ struct hfi1_devdata *dds[2]; /* back pointers */
+ struct mutex asic_resource_mutex;
+ struct hfi1_i2c_bus *i2c_bus0;
+ struct hfi1_i2c_bus *i2c_bus1;
+};
+
+/* sizes for both the QP and RSM map tables */
+#define NUM_MAP_ENTRIES 256
+#define NUM_MAP_REGS 32
+
+/* Virtual NIC information */
+struct hfi1_vnic_data {
+ struct kmem_cache *txreq_cache;
+ u8 num_vports;
+};
+
+struct hfi1_vnic_vport_info;
+
+/* device data struct now contains only "general per-device" info.
+ * fields related to a physical IB port are in a hfi1_pportdata struct.
+ */
+struct sdma_engine;
+struct sdma_vl_map;
+
+#define BOARD_VERS_MAX 96 /* how long the version string can be */
+#define SERIAL_MAX 16 /* length of the serial number */
+
+typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
+struct hfi1_netdev_rx;
+struct hfi1_devdata {
+ struct hfi1_ibdev verbs_dev; /* must be first */
+ /* pointers to related structs for this device */
+ /* pci access data structure */
+ struct pci_dev *pcidev;
+ struct cdev user_cdev;
+ struct cdev diag_cdev;
+ struct cdev ui_cdev;
+ struct device *user_device;
+ struct device *diag_device;
+ struct device *ui_device;
+
+ /* first mapping up to RcvArray */
+ u8 __iomem *kregbase1;
+ resource_size_t physaddr;
+
+ /* second uncached mapping from RcvArray to pio send buffers */
+ u8 __iomem *kregbase2;
+ /* for detecting offset above kregbase2 address */
+ u32 base2_start;
+
+ /* Per VL data. Enough for all VLs but not all elements are set/used. */
+ struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
+ /* send context data */
+ struct send_context_info *send_contexts;
+ /* map hardware send contexts to software index */
+ u8 *hw_to_sw;
+ /* spinlock for allocating and releasing send context resources */
+ spinlock_t sc_lock;
+ /* lock for pio_map */
+ spinlock_t pio_map_lock;
+ /* Send Context initialization lock. */
+ spinlock_t sc_init_lock;
+ /* lock for sdma_map */
+ spinlock_t sde_map_lock;
+ /* array of kernel send contexts */
+ struct send_context **kernel_send_context;
+ /* array of vl maps */
+ struct pio_vl_map __rcu *pio_map;
+ /* default flags to last descriptor */
+ u64 default_desc1;
+
+ /* fields common to all SDMA engines */
+
+ volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */
+ dma_addr_t sdma_heads_phys;
+ void *sdma_pad_dma; /* DMA'ed by chip */
+ dma_addr_t sdma_pad_phys;
+ /* for deallocation */
+ size_t sdma_heads_size;
+ /* num used */
+ u32 num_sdma;
+ /* array of engines sized by num_sdma */
+ struct sdma_engine *per_sdma;
+ /* array of vl maps */
+ struct sdma_vl_map __rcu *sdma_map;
+ /* SPC freeze waitqueue and variable */
+ wait_queue_head_t sdma_unfreeze_wq;
+ atomic_t sdma_unfreeze_count;
+
+ u32 lcb_access_count; /* count of LCB users */
+
+ /* common data between shared ASIC HFIs in this OS */
+ struct hfi1_asic_data *asic_data;
+
+ /* mem-mapped pointer to base of PIO buffers */
+ void __iomem *piobase;
+ /*
+ * write-combining mem-mapped pointer to base of RcvArray
+ * memory.
+ */
+ void __iomem *rcvarray_wc;
+ /*
+ * credit return base - a per-NUMA range of DMA address that
+ * the chip will use to update the per-context free counter
+ */
+ struct credit_return_base *cr_base;
+
+ /* send context numbers and sizes for each type */
+ struct sc_config_sizes sc_sizes[SC_MAX];
+
+ char *boardname; /* human readable board info */
+
+ u64 ctx0_seq_drop;
+
+ /* reset value */
+ u64 z_int_counter;
+ u64 z_rcv_limit;
+ u64 z_send_schedule;
+
+ u64 __percpu *send_schedule;
+ /* number of reserved contexts for netdev usage */
+ u16 num_netdev_contexts;
+ /* number of receive contexts in use by the driver */
+ u32 num_rcv_contexts;
+ /* number of pio send contexts in use by the driver */
+ u32 num_send_contexts;
+ /*
+ * number of ctxts available for PSM open
+ */
+ u32 freectxts;
+ /* total number of available user/PSM contexts */
+ u32 num_user_contexts;
+ /* base receive interrupt timeout, in CSR units */
+ u32 rcv_intr_timeout_csr;
+
+ spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
+ spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
+ spinlock_t uctxt_lock; /* protect rcd changes */
+ struct mutex dc8051_lock; /* exclusive access to 8051 */
+ struct workqueue_struct *update_cntr_wq;
+ struct work_struct update_cntr_work;
+ /* exclusive access to 8051 memory */
+ spinlock_t dc8051_memlock;
+ int dc8051_timed_out; /* remember if the 8051 timed out */
+ /*
+ * A page that will hold event notification bitmaps for all
+ * contexts. This page will be mapped into all processes.
+ */
+ unsigned long *events;
+ /*
+ * per unit status, see also portdata statusp
+ * mapped read-only into user processes so they can get unit and
+ * IB link status cheaply
+ */
+ struct hfi1_status *status;
+
+ /* revision register shadow */
+ u64 revision;
+ /* Base GUID for device (network order) */
+ u64 base_guid;
+
+ /* both sides of the PCIe link are gen3 capable */
+ u8 link_gen3_capable;
+ u8 dc_shutdown;
+ /* localbus width (1, 2,4,8,16,32) from config space */
+ u32 lbus_width;
+ /* localbus speed in MHz */
+ u32 lbus_speed;
+ int unit; /* unit # of this chip */
+ int node; /* home node of this chip */
+
+ /* save these PCI fields to restore after a reset */
+ u32 pcibar0;
+ u32 pcibar1;
+ u32 pci_rom;
+ u16 pci_command;
+ u16 pcie_devctl;
+ u16 pcie_lnkctl;
+ u16 pcie_devctl2;
+ u32 pci_msix0;
+ u32 pci_tph2;
+
+ /*
+ * ASCII serial number, from flash, large enough for original
+ * all digit strings, and longer serial number format
+ */
+ u8 serial[SERIAL_MAX];
+ /* human readable board version */
+ u8 boardversion[BOARD_VERS_MAX];
+ u8 lbus_info[32]; /* human readable localbus info */
+ /* chip major rev, from CceRevision */
+ u8 majrev;
+ /* chip minor rev, from CceRevision */
+ u8 minrev;
+ /* hardware ID */
+ u8 hfi1_id;
+ /* implementation code */
+ u8 icode;
+ /* vAU of this device */
+ u8 vau;
+ /* vCU of this device */
+ u8 vcu;
+ /* link credits of this device */
+ u16 link_credits;
+ /* initial vl15 credits to use */
+ u16 vl15_init;
+
+ /*
+ * Cached value for vl15buf, read during verify cap interrupt. VL15
+ * credits are to be kept at 0 and set when handling the link-up
+ * interrupt. This removes the possibility of receiving VL15 MAD
+ * packets before this HFI is ready.
+ */
+ u16 vl15buf_cached;
+
+ /* Misc small ints */
+ u8 n_krcv_queues;
+ u8 qos_shift;
+
+ u16 irev; /* implementation revision */
+ u32 dc8051_ver; /* 8051 firmware version */
+
+ spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
+ struct platform_config platform_config;
+ struct platform_config_cache pcfg_cache;
+
+ struct diag_client *diag_client;
+
+ /* general interrupt: mask of handled interrupts */
+ u64 gi_mask[CCE_NUM_INT_CSRS];
+
+ struct rcv_array_data rcv_entries;
+
+ /* cycle length of PS* counters in HW (in picoseconds) */
+ u16 psxmitwait_check_rate;
+
+ /*
+ * 64 bit synthetic counters
+ */
+ struct timer_list synth_stats_timer;
+
+ /* MSI-X information */
+ struct hfi1_msix_info msix_info;
+
+ /*
+ * device counters
+ */
+ char *cntrnames;
+ size_t cntrnameslen;
+ size_t ndevcntrs;
+ u64 *cntrs;
+ u64 *scntrs;
+
+ /*
+ * remembered values for synthetic counters
+ */
+ u64 last_tx;
+ u64 last_rx;
+
+ /*
+ * per-port counters
+ */
+ size_t nportcntrs;
+ char *portcntrnames;
+ size_t portcntrnameslen;
+
+ struct err_info_rcvport err_info_rcvport;
+ struct err_info_constraint err_info_rcv_constraint;
+ struct err_info_constraint err_info_xmit_constraint;
+
+ atomic_t drop_packet;
+ bool do_drop;
+ u8 err_info_uncorrectable;
+ u8 err_info_fmconfig;
+
+ /*
+ * Software counters for the status bits defined by the
+ * associated error status registers
+ */
+ u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS];
+ u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS];
+ u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS];
+ u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS];
+ u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS];
+ u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS];
+ u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS];
+
+ /* Software counter that spans all contexts */
+ u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS];
+ /* Software counter that spans all DMA engines */
+ u64 sw_send_dma_eng_err_status_cnt[
+ NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
+ /* Software counter that aggregates all cce_err_status errors */
+ u64 sw_cce_err_status_aggregate;
+ /* Software counter that aggregates all bypass packet rcv errors */
+ u64 sw_rcv_bypass_packet_errors;
+
+ /* Save the enabled LCB error bits */
+ u64 lcb_err_en;
+ struct cpu_mask_set *comp_vect;
+ int *comp_vect_mappings;
+ u32 comp_vect_possible_cpus;
+
+ /*
+ * Capability to have different send engines simply by changing a
+ * pointer value.
+ */
+ send_routine process_pio_send ____cacheline_aligned_in_smp;
+ send_routine process_dma_send;
+ void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
+ u64 pbc, const void *from, size_t count);
+ int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx,
+ struct hfi1_vnic_vport_info *vinfo,
+ struct sk_buff *skb, u64 pbc, u8 plen);
+ /* hfi1_pportdata, points to array of (physical) port-specific
+ * data structs, indexed by pidx (0..n-1)
+ */
+ struct hfi1_pportdata *pport;
+ /* receive context data */
+ struct hfi1_ctxtdata **rcd;
+ u64 __percpu *int_counter;
+ /* verbs tx opcode stats */
+ struct hfi1_opcode_stats_perctx __percpu *tx_opstats;
+ /* device (not port) flags, basically device capabilities */
+ u16 flags;
+ /* Number of physical ports available */
+ u8 num_pports;
+ /* Lowest context number which can be used by user processes or VNIC */
+ u8 first_dyn_alloc_ctxt;
+ /* adding a new field here would make it part of this cacheline */
+
+ /* seqlock for sc2vl */
+ seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
+ u64 sc2vl[4];
+ u64 __percpu *rcv_limit;
+ /* adding a new field here would make it part of this cacheline */
+
+ /* OUI comes from the HW. Used everywhere as 3 separate bytes. */
+ u8 oui1;
+ u8 oui2;
+ u8 oui3;
+
+ /* Timer and counter used to detect RcvBufOvflCnt changes */
+ struct timer_list rcverr_timer;
+
+ wait_queue_head_t event_queue;
+
+ /* receive context tail dummy address */
+ __le64 *rcvhdrtail_dummy_kvaddr;
+ dma_addr_t rcvhdrtail_dummy_dma;
+
+ u32 rcv_ovfl_cnt;
+ /* Serialize ASPM enable/disable between multiple verbs contexts */
+ spinlock_t aspm_lock;
+ /* Number of verbs contexts which have disabled ASPM */
+ atomic_t aspm_disabled_cnt;
+ /* Keeps track of user space clients */
+ refcount_t user_refcount;
+ /* Used to wait for outstanding user space clients before dev removal */
+ struct completion user_comp;
+
+ bool eprom_available; /* true if EPROM is available for this device */
+ bool aspm_supported; /* Does HW support ASPM */
+ bool aspm_enabled; /* ASPM state: enabled/disabled */
+ struct rhashtable *sdma_rht;
+
+ /* vnic data */
+ struct hfi1_vnic_data vnic;
+ /* Lock to protect IRQ SRC register access */
+ spinlock_t irq_src_lock;
+ int vnic_num_vports;
+ struct hfi1_netdev_rx *netdev_rx;
+ struct hfi1_affinity_node *affinity_entry;
+
+ /* Keeps track of IPoIB RSM rule users */
+ atomic_t ipoib_rsm_usr_num;
+};
+
+/* 8051 firmware version helper */
+#define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
+#define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16)
+#define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8)
+#define dc8051_ver_patch(a) ((a) & 0x0000ff)
+
+/* f_put_tid types */
+#define PT_EXPECTED 0
+#define PT_EAGER 1
+#define PT_INVALID_FLUSH 2
+#define PT_INVALID 3
+
+struct tid_rb_node;
+
+/* Private data for file operations */
+struct hfi1_filedata {
+ struct srcu_struct pq_srcu;
+ struct hfi1_devdata *dd;
+ struct hfi1_ctxtdata *uctxt;
+ struct hfi1_user_sdma_comp_q *cq;
+ /* update side lock for SRCU */
+ spinlock_t pq_rcu_lock;
+ struct hfi1_user_sdma_pkt_q __rcu *pq;
+ u16 subctxt;
+ /* for cpu affinity; -1 if none */
+ int rec_cpu_num;
+ u32 tid_n_pinned;
+ bool use_mn;
+ struct tid_rb_node **entry_to_rb;
+ spinlock_t tid_lock; /* protect tid_[limit,used] counters */
+ u32 tid_limit;
+ u32 tid_used;
+ u32 *invalid_tids;
+ u32 invalid_tid_idx;
+ /* protect invalid_tids array and invalid_tid_idx */
+ spinlock_t invalid_lock;
+};
+
+extern struct xarray hfi1_dev_table;
+struct hfi1_devdata *hfi1_lookup(int unit);
+
+static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
+{
+ return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
+ HFI1_MAX_SHARED_CTXTS;
+}
+
+int hfi1_init(struct hfi1_devdata *dd, int reinit);
+int hfi1_count_active_units(void);
+
+int hfi1_diag_add(struct hfi1_devdata *dd);
+void hfi1_diag_remove(struct hfi1_devdata *dd);
+void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
+
+void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
+
+int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
+int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
+int hfi1_create_kctxts(struct hfi1_devdata *dd);
+int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
+ struct hfi1_ctxtdata **rcd);
+void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
+void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
+ struct hfi1_devdata *dd, u8 hw_pidx, u32 port);
+void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
+int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
+int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+ u16 ctxt);
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
+int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget);
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget);
+void set_all_slowpath(struct hfi1_devdata *dd);
+
+extern const struct pci_device_id hfi1_pci_tbl[];
+void hfi1_make_ud_req_9B(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe);
+
+void hfi1_make_ud_req_16B(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe);
+
+/* receive packet handler dispositions */
+#define RCV_PKT_OK 0x0 /* keep going */
+#define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
+#define RCV_PKT_DONE 0x2 /* stop, no more packets detected */
+
+/**
+ * hfi1_rcd_head - add accessor for rcd head
+ * @rcd: the context
+ */
+static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->head;
+}
+
+/**
+ * hfi1_set_rcd_head - add accessor for rcd head
+ * @rcd: the context
+ * @head: the new head
+ */
+static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head)
+{
+ rcd->head = head;
+}
+
+/* calculate the current RHF address */
+static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
+{
+ return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
+}
+
+/* return DMA_RTAIL configuration */
+static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd)
+{
+ return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL);
+}
+
+/**
+ * hfi1_seq_incr_wrap - wrapping increment for sequence
+ * @seq: the current sequence number
+ *
+ * Returns: the incremented seq
+ */
+static inline u8 hfi1_seq_incr_wrap(u8 seq)
+{
+ if (++seq > RHF_MAX_SEQ)
+ seq = 1;
+ return seq;
+}
+
+/**
+ * hfi1_seq_cnt - return seq_cnt member
+ * @rcd: the receive context
+ *
+ * Return seq_cnt member
+ */
+static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->seq_cnt;
+}
+
+/**
+ * hfi1_set_seq_cnt - return seq_cnt member
+ * @rcd: the receive context
+ *
+ * Return seq_cnt member
+ */
+static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt)
+{
+ rcd->seq_cnt = cnt;
+}
+
+/**
+ * last_rcv_seq - is last
+ * @rcd: the receive context
+ * @seq: sequence
+ *
+ * return true if last packet
+ */
+static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq)
+{
+ return seq != rcd->seq_cnt;
+}
+
+/**
+ * rcd_seq_incr - increment context sequence number
+ * @rcd: the receive context
+ * @seq: the current sequence number
+ *
+ * Returns: true if the this was the last packet
+ */
+static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq)
+{
+ rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt);
+ return last_rcv_seq(rcd, seq);
+}
+
+/**
+ * get_hdrqentsize - return hdrq entry size
+ * @rcd: the receive context
+ */
+static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->rcvhdrqentsize;
+}
+
+/**
+ * get_hdrq_cnt - return hdrq count
+ * @rcd: the receive context
+ */
+static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->rcvhdrq_cnt;
+}
+
+/**
+ * hfi1_is_slowpath - check if this context is slow path
+ * @rcd: the receive context
+ */
+static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->do_interrupt == rcd->slow_handler;
+}
+
+/**
+ * hfi1_is_fastpath - check if this context is fast path
+ * @rcd: the receive context
+ */
+static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd)
+{
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ return false;
+
+ return rcd->do_interrupt == rcd->fast_handler;
+}
+
+/**
+ * hfi1_set_fast - change to the fast handler
+ * @rcd: the receive context
+ */
+static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd)
+{
+ if (unlikely(!rcd))
+ return;
+ if (unlikely(!hfi1_is_fastpath(rcd)))
+ rcd->do_interrupt = rcd->fast_handler;
+}
+
+int hfi1_reset_device(int);
+
+void receive_interrupt_work(struct work_struct *work);
+
+/* extract service channel from header and rhf */
+static inline int hfi1_9B_get_sc5(struct ib_header *hdr, u64 rhf)
+{
+ return ib_get_sc(hdr) | ((!!(rhf_dc_info(rhf))) << 4);
+}
+
+#define HFI1_JKEY_WIDTH 16
+#define HFI1_JKEY_MASK (BIT(16) - 1)
+#define HFI1_ADMIN_JKEY_RANGE 32
+
+/*
+ * J_KEYs are split and allocated in the following groups:
+ * 0 - 31 - users with administrator privileges
+ * 32 - 63 - kernel protocols using KDETH packets
+ * 64 - 65535 - all other users using KDETH packets
+ */
+static inline u16 generate_jkey(kuid_t uid)
+{
+ u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
+
+ if (capable(CAP_SYS_ADMIN))
+ jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
+ else if (jkey < 64)
+ jkey |= BIT(HFI1_JKEY_WIDTH - 1);
+
+ return jkey;
+}
+
+/*
+ * active_egress_rate
+ *
+ * returns the active egress rate in units of [10^6 bits/sec]
+ */
+static inline u32 active_egress_rate(struct hfi1_pportdata *ppd)
+{
+ u16 link_speed = ppd->link_speed_active;
+ u16 link_width = ppd->link_width_active;
+ u32 egress_rate;
+
+ if (link_speed == OPA_LINK_SPEED_25G)
+ egress_rate = 25000;
+ else /* assume OPA_LINK_SPEED_12_5G */
+ egress_rate = 12500;
+
+ switch (link_width) {
+ case OPA_LINK_WIDTH_4X:
+ egress_rate *= 4;
+ break;
+ case OPA_LINK_WIDTH_3X:
+ egress_rate *= 3;
+ break;
+ case OPA_LINK_WIDTH_2X:
+ egress_rate *= 2;
+ break;
+ default:
+ /* assume IB_WIDTH_1X */
+ break;
+ }
+
+ return egress_rate;
+}
+
+/*
+ * egress_cycles
+ *
+ * Returns the number of 'fabric clock cycles' to egress a packet
+ * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
+ * rate is (approximately) 805 MHz, the units of the returned value
+ * are (1/805 MHz).
+ */
+static inline u32 egress_cycles(u32 len, u32 rate)
+{
+ u32 cycles;
+
+ /*
+ * cycles is:
+ *
+ * (length) [bits] / (rate) [bits/sec]
+ * ---------------------------------------------------
+ * fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
+ */
+
+ cycles = len * 8; /* bits */
+ cycles *= 805;
+ cycles /= rate;
+
+ return cycles;
+}
+
+void set_link_ipg(struct hfi1_pportdata *ppd);
+void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
+ u32 rqpn, u8 svc_type);
+void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
+ u16 pkey, u32 slid, u32 dlid, u8 sc5,
+ const struct ib_grh *old_grh);
+void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
+ u8 sc5, const struct ib_grh *old_grh);
+typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
+ u8 sc5, const struct ib_grh *old_grh);
+
+#define PKEY_CHECK_INVALID -1
+int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
+ u8 sc5, int8_t s_pkey_index);
+
+#define PACKET_EGRESS_TIMEOUT 350
+static inline void pause_for_credit_return(struct hfi1_devdata *dd)
+{
+ /* Pause at least 1us, to ensure chip returns all credits */
+ u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000;
+
+ udelay(usec ? usec : 1);
+}
+
+/**
+ * sc_to_vlt() - reverse lookup sc to vl
+ * @dd - devdata
+ * @sc5 - 5 bit sc
+ */
+static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
+{
+ unsigned seq;
+ u8 rval;
+
+ if (sc5 >= OPA_MAX_SCS)
+ return (u8)(0xff);
+
+ do {
+ seq = read_seqbegin(&dd->sc2vl_lock);
+ rval = *(((u8 *)dd->sc2vl) + sc5);
+ } while (read_seqretry(&dd->sc2vl_lock, seq));
+
+ return rval;
+}
+
+#define PKEY_MEMBER_MASK 0x8000
+#define PKEY_LOW_15_MASK 0x7fff
+
+/*
+ * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
+ * being an entry from the ingress partition key table), return 0
+ * otherwise. Use the matching criteria for ingress partition keys
+ * specified in the OPAv1 spec., section 9.10.14.
+ */
+static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent)
+{
+ u16 mkey = pkey & PKEY_LOW_15_MASK;
+ u16 ment = ent & PKEY_LOW_15_MASK;
+
+ if (mkey == ment) {
+ /*
+ * If pkey[15] is clear (limited partition member),
+ * is bit 15 in the corresponding table element
+ * clear (limited member)?
+ */
+ if (!(pkey & PKEY_MEMBER_MASK))
+ return !!(ent & PKEY_MEMBER_MASK);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * ingress_pkey_table_search - search the entire pkey table for
+ * an entry which matches 'pkey'. return 0 if a match is found,
+ * and 1 otherwise.
+ */
+static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
+{
+ int i;
+
+ for (i = 0; i < MAX_PKEY_VALUES; i++) {
+ if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i]))
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * ingress_pkey_table_fail - record a failure of ingress pkey validation,
+ * i.e., increment port_rcv_constraint_errors for the port, and record
+ * the 'error info' for this failure.
+ */
+static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
+ u32 slid)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ incr_cntr64(&ppd->port_rcv_constraint_errors);
+ if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) {
+ dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK;
+ dd->err_info_rcv_constraint.slid = slid;
+ dd->err_info_rcv_constraint.pkey = pkey;
+ }
+}
+
+/*
+ * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
+ * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
+ * is a hint as to the best place in the partition key table to begin
+ * searching. This function should not be called on the data path because
+ * of performance reasons. On datapath pkey check is expected to be done
+ * by HW and rcv_pkey_check function should be called instead.
+ */
+static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
+ u8 sc5, u8 idx, u32 slid, bool force)
+{
+ if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
+ return 0;
+
+ /* If SC15, pkey[0:14] must be 0x7fff */
+ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
+ goto bad;
+
+ /* Is the pkey = 0x0, or 0x8000? */
+ if ((pkey & PKEY_LOW_15_MASK) == 0)
+ goto bad;
+
+ /* The most likely matching pkey has index 'idx' */
+ if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx]))
+ return 0;
+
+ /* no match - try the whole table */
+ if (!ingress_pkey_table_search(ppd, pkey))
+ return 0;
+
+bad:
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+}
+
+/*
+ * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
+ * otherwise. It only ensures pkey is vlid for QP0. This function
+ * should be called on the data path instead of ingress_pkey_check
+ * as on data path, pkey check is done by HW (except for QP0).
+ */
+static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
+ u8 sc5, u16 slid)
+{
+ if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
+ return 0;
+
+ /* If SC15, pkey[0:14] must be 0x7fff */
+ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
+ goto bad;
+
+ return 0;
+bad:
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+}
+
+/* MTU handling */
+
+/* MTU enumeration, 256-4k match IB */
+#define OPA_MTU_0 0
+#define OPA_MTU_256 1
+#define OPA_MTU_512 2
+#define OPA_MTU_1024 3
+#define OPA_MTU_2048 4
+#define OPA_MTU_4096 5
+
+u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
+int mtu_to_enum(u32 mtu, int default_if_bad);
+u16 enum_to_mtu(int mtu);
+static inline int valid_ib_mtu(unsigned int mtu)
+{
+ return mtu == 256 || mtu == 512 ||
+ mtu == 1024 || mtu == 2048 ||
+ mtu == 4096;
+}
+
+static inline int valid_opa_max_mtu(unsigned int mtu)
+{
+ return mtu >= 2048 &&
+ (valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240);
+}
+
+int set_mtu(struct hfi1_pportdata *ppd);
+
+int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc);
+void hfi1_disable_after_error(struct hfi1_devdata *dd);
+int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit);
+int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
+
+int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
+int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
+
+void set_up_vau(struct hfi1_devdata *dd, u8 vau);
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
+void reset_link_credits(struct hfi1_devdata *dd);
+void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
+
+int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
+
+static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
+{
+ return ppd->dd;
+}
+
+static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev)
+{
+ return container_of(dev, struct hfi1_devdata, verbs_dev);
+}
+
+static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev)
+{
+ return dd_from_dev(to_idev(ibdev));
+}
+
+static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
+{
+ return container_of(ibp, struct hfi1_pportdata, ibport_data);
+}
+
+static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
+{
+ return container_of(rdi, struct hfi1_ibdev, rdi);
+}
+
+static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ WARN_ON(pidx >= dd->num_pports);
+ return &dd->pport[pidx].ibport_data;
+}
+
+static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
+{
+ return &rcd->ppd->ibport_data;
+}
+
+/**
+ * hfi1_may_ecn - Check whether FECN or BECN processing should be done
+ * @pkt: the packet to be evaluated
+ *
+ * Check whether the FECN or BECN bits in the packet's header are
+ * enabled, depending on packet type.
+ *
+ * This function only checks for FECN and BECN bits. Additional checks
+ * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
+ * ensure correct handling.
+ */
+static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
+{
+ bool fecn, becn;
+
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ fecn = hfi1_16B_get_fecn(pkt->hdr);
+ becn = hfi1_16B_get_becn(pkt->hdr);
+ } else {
+ fecn = ib_bth_get_fecn(pkt->ohdr);
+ becn = ib_bth_get_becn(pkt->ohdr);
+ }
+ return fecn || becn;
+}
+
+bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool prescan);
+static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
+{
+ bool do_work;
+
+ do_work = hfi1_may_ecn(pkt);
+ if (unlikely(do_work))
+ return hfi1_process_ecn_slowpath(qp, pkt, false);
+ return false;
+}
+
+/*
+ * Return the indexed PKEY from the port PKEY table.
+ */
+static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u16 ret;
+
+ if (index >= ARRAY_SIZE(ppd->pkeys))
+ ret = 0;
+ else
+ ret = ppd->pkeys[index];
+
+ return ret;
+}
+
+/*
+ * Return the indexed GUID from the port GUIDs table.
+ */
+static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ WARN_ON(index >= HFI1_GUIDS_PER_PORT);
+ return cpu_to_be64(ppd->guids[index]);
+}
+
+/*
+ * Called by readers of cc_state only, must call under rcu_read_lock().
+ */
+static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
+{
+ return rcu_dereference(ppd->cc_state);
+}
+
+/*
+ * Called by writers of cc_state only, must call under cc_state_lock.
+ */
+static inline
+struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
+{
+ return rcu_dereference_protected(ppd->cc_state,
+ lockdep_is_held(&ppd->cc_state_lock));
+}
+
+/*
+ * values for dd->flags (_device_ related flags)
+ */
+#define HFI1_INITTED 0x1 /* chip and driver up and initted */
+#define HFI1_PRESENT 0x2 /* chip accesses can be done */
+#define HFI1_FROZEN 0x4 /* chip in SPC freeze */
+#define HFI1_HAS_SDMA_TIMEOUT 0x8
+#define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
+#define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
+#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
+
+/* IB dword length mask in PBC (lower 11 bits); same for all chips */
+#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
+
+/* ctxt_flag bit offsets */
+ /* base context has not finished initializing */
+#define HFI1_CTXT_BASE_UNINIT 1
+ /* base context initaliation failed */
+#define HFI1_CTXT_BASE_FAILED 2
+ /* waiting for a packet to arrive */
+#define HFI1_CTXT_WAITING_RCV 3
+ /* waiting for an urgent packet to arrive */
+#define HFI1_CTXT_WAITING_URG 4
+
+/* free up any allocated data at closes */
+int hfi1_init_dd(struct hfi1_devdata *dd);
+void hfi1_free_devdata(struct hfi1_devdata *dd);
+
+/* LED beaconing functions */
+void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
+ unsigned int timeoff);
+void shutdown_led_override(struct hfi1_pportdata *ppd);
+
+#define HFI1_CREDIT_RETURN_RATE (100)
+
+/*
+ * The number of words for the KDETH protocol field. If this is
+ * larger then the actual field used, then part of the payload
+ * will be in the header.
+ *
+ * Optimally, we want this sized so that a typical case will
+ * use full cache lines. The typical local KDETH header would
+ * be:
+ *
+ * Bytes Field
+ * 8 LRH
+ * 12 BHT
+ * ?? KDETH
+ * 8 RHF
+ * ---
+ * 28 + KDETH
+ *
+ * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
+ */
+#define DEFAULT_RCVHDRSIZE 9
+
+/*
+ * Maximal header byte count:
+ *
+ * Bytes Field
+ * 8 LRH
+ * 40 GRH (optional)
+ * 12 BTH
+ * ?? KDETH
+ * 8 RHF
+ * ---
+ * 68 + KDETH
+ *
+ * We also want to maintain a cache line alignment to assist DMA'ing
+ * of the header bytes. Round up to a good size.
+ */
+#define DEFAULT_RCVHDR_ENTSIZE 32
+
+bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ u32 nlocked, u32 npages);
+int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
+ size_t npages, bool writable, struct page **pages);
+void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
+ size_t npages, bool dirty);
+
+/**
+ * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
+ * @rcd - the receive context
+ */
+static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd)
+{
+ return (__le64 *)rcd->rcvhdrtail_kvaddr;
+}
+
+static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
+{
+ u64 *kv = (u64 *)hfi1_rcvhdrtail_kvaddr(rcd);
+
+ if (kv)
+ *kv = 0ULL;
+}
+
+static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
+{
+ /*
+ * volatile because it's a DMA target from the chip, routine is
+ * inlined, and don't want register caching or reordering.
+ */
+ return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd));
+}
+
+static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd)
+{
+ if (likely(!rcd->rcvhdrtail_kvaddr)) {
+ u32 seq = rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)));
+
+ return !last_rcv_seq(rcd, seq);
+ }
+ return hfi1_rcd_head(rcd) != get_rcvhdrtail(rcd);
+}
+
+/*
+ * sysfs interface.
+ */
+
+extern const char ib_hfi1_version[];
+extern const struct attribute_group ib_hfi1_attr_group;
+extern const struct attribute_group *hfi1_attr_port_groups[];
+
+int hfi1_device_create(struct hfi1_devdata *dd);
+void hfi1_device_remove(struct hfi1_devdata *dd);
+
+int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
+void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
+/* Hook for sysfs read of QSFP */
+int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
+
+int hfi1_pcie_init(struct hfi1_devdata *dd);
+void hfi1_pcie_cleanup(struct pci_dev *pdev);
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
+void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
+int pcie_speeds(struct hfi1_devdata *dd);
+int restore_pci_variables(struct hfi1_devdata *dd);
+int save_pci_variables(struct hfi1_devdata *dd);
+int do_pcie_gen3_transition(struct hfi1_devdata *dd);
+void tune_pcie_caps(struct hfi1_devdata *dd);
+int parse_platform_config(struct hfi1_devdata *dd);
+int get_platform_config_field(struct hfi1_devdata *dd,
+ enum platform_config_table_type_encoding
+ table_type, int table_index, int field_index,
+ u32 *data, u32 len);
+
+struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
+
+/*
+ * Flush write combining store buffers (if present) and perform a write
+ * barrier.
+ */
+static inline void flush_wc(void)
+{
+ asm volatile("sfence" : : : "memory");
+}
+
+void handle_eflags(struct hfi1_packet *packet);
+void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);
+
+/* global module parameter variables */
+extern unsigned int hfi1_max_mtu;
+extern unsigned int hfi1_cu;
+extern unsigned int user_credit_return_threshold;
+extern int num_user_contexts;
+extern unsigned long n_krcvqs;
+extern uint krcvqs[];
+extern int krcvqsset;
+extern uint loopback;
+extern uint quick_linkup;
+extern uint rcv_intr_timeout;
+extern uint rcv_intr_count;
+extern uint rcv_intr_dynamic;
+extern ushort link_crc_mask;
+
+extern struct mutex hfi1_mutex;
+
+/* Number of seconds before our card status check... */
+#define STATUS_TIMEOUT 60
+
+#define DRIVER_NAME "hfi1"
+#define HFI1_USER_MINOR_BASE 0
+#define HFI1_TRACE_MINOR 127
+#define HFI1_NMINORS 255
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL0 0x24f0
+#define PCI_DEVICE_ID_INTEL1 0x24f1
+
+#define HFI1_PKT_USER_SC_INTEGRITY \
+ (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK \
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
+
+#define HFI1_PKT_KERNEL_SC_INTEGRITY \
+ (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
+
+static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
+ u16 ctxt_type)
+{
+ u64 base_sc_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sc_integrity =
+ SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
+#ifndef CONFIG_FAULT_INJECTION
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
+#endif
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
+
+ if (ctxt_type == SC_USER)
+ base_sc_integrity |=
+#ifndef CONFIG_FAULT_INJECTION
+ SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
+#endif
+ HFI1_PKT_USER_SC_INTEGRITY;
+ else if (ctxt_type != SC_KERNEL)
+ base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
+
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
+ return base_sc_integrity;
+}
+
+static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
+{
+ u64 base_sdma_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sdma_integrity =
+ SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
+
+ if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
+
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
+ return base_sdma_integrity;
+}
+
+#define dd_dev_emerg(dd, fmt, ...) \
+ dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
+
+#define dd_dev_err(dd, fmt, ...) \
+ dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
+
+#define dd_dev_err_ratelimited(dd, fmt, ...) \
+ dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
+
+#define dd_dev_warn(dd, fmt, ...) \
+ dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
+
+#define dd_dev_warn_ratelimited(dd, fmt, ...) \
+ dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
+
+#define dd_dev_info(dd, fmt, ...) \
+ dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
+
+#define dd_dev_info_ratelimited(dd, fmt, ...) \
+ dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
+
+#define dd_dev_dbg(dd, fmt, ...) \
+ dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
+
+#define hfi1_dev_porterr(dd, port, fmt, ...) \
+ dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
+
+#define USER_OPCODE_CHECK_VAL 0xC0
+#define USER_OPCODE_CHECK_MASK 0xC0
+#define OPCODE_CHECK_VAL_DISABLED 0x0
+#define OPCODE_CHECK_MASK_DISABLED 0x0
+
+static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+
+ dd->z_int_counter = get_all_cpu_total(dd->int_counter);
+ dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
+ dd->z_send_schedule = get_all_cpu_total(dd->send_schedule);
+
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ppd->ibport_data.rvp.z_rc_acks =
+ get_all_cpu_total(ppd->ibport_data.rvp.rc_acks);
+ ppd->ibport_data.rvp.z_rc_qacks =
+ get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks);
+ }
+}
+
+/* Control LED state */
+static inline void setextled(struct hfi1_devdata *dd, u32 on)
+{
+ if (on)
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F);
+ else
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
+}
+
+/* return the i2c resource given the target */
+static inline u32 i2c_target(u32 target)
+{
+ return target ? CR_I2C2 : CR_I2C1;
+}
+
+/* return the i2c chain chip resource that this HFI uses for QSFP */
+static inline u32 qsfp_resource(struct hfi1_devdata *dd)
+{
+ return i2c_target(dd->hfi1_id);
+}
+
+/* Is this device integrated or discrete? */
+static inline bool is_integrated(struct hfi1_devdata *dd)
+{
+ return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
+}
+
+/**
+ * hfi1_need_drop - detect need for drop
+ * @dd: - the device
+ *
+ * In some cases, the first packet needs to be dropped.
+ *
+ * Return true is the current packet needs to be dropped and false otherwise.
+ */
+static inline bool hfi1_need_drop(struct hfi1_devdata *dd)
+{
+ if (unlikely(dd->do_drop &&
+ atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
+ DROP_PACKET_ON)) {
+ dd->do_drop = false;
+ return true;
+ }
+ return false;
+}
+
+int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
+
+#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
+#define DD_DEV_ASSIGN(dd) __assign_str(dev)
+
+static inline void hfi1_update_ah_attr(struct ib_device *ibdev,
+ struct rdma_ah_attr *attr)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ u32 dlid = rdma_ah_get_dlid(attr);
+
+ /*
+ * Kernel clients may not have setup GRH information
+ * Set that here.
+ */
+ ibp = to_iport(ibdev, rdma_ah_get_port_num(attr));
+ ppd = ppd_from_ibp(ibp);
+ if ((((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
+ (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) &&
+ (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
+ (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))) ||
+ (rdma_ah_get_make_grd(attr))) {
+ rdma_ah_set_ah_flags(attr, IB_AH_GRH);
+ rdma_ah_set_interface_id(attr, OPA_MAKE_ID(dlid));
+ rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix);
+ }
+}
+
+/*
+ * hfi1_check_mcast- Check if the given lid is
+ * in the OPA multicast range.
+ *
+ * The LID might either reside in ah.dlid or might be
+ * in the GRH of the address handle as DGID if extended
+ * addresses are in use.
+ */
+static inline bool hfi1_check_mcast(u32 lid)
+{
+ return ((lid >= opa_get_mcast_base(OPA_MCAST_NR)) &&
+ (lid != be32_to_cpu(OPA_LID_PERMISSIVE)));
+}
+
+#define opa_get_lid(lid, format) \
+ __opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format)
+
+/* Convert a lid to a specific lid space */
+static inline u32 __opa_get_lid(u32 lid, u8 format)
+{
+ bool is_mcast = hfi1_check_mcast(lid);
+
+ switch (format) {
+ case OPA_PORT_PACKET_FORMAT_8B:
+ case OPA_PORT_PACKET_FORMAT_10B:
+ if (is_mcast)
+ return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
+ 0xF0000);
+ return lid & 0xFFFFF;
+ case OPA_PORT_PACKET_FORMAT_16B:
+ if (is_mcast)
+ return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
+ 0xF00000);
+ return lid & 0xFFFFFF;
+ case OPA_PORT_PACKET_FORMAT_9B:
+ if (is_mcast)
+ return (lid -
+ opa_get_mcast_base(OPA_MCAST_NR) +
+ be16_to_cpu(IB_MULTICAST_LID_BASE));
+ else
+ return lid & 0xFFFF;
+ default:
+ return lid;
+ }
+}
+
+/* Return true if the given lid is the OPA 16B multicast range */
+static inline bool hfi1_is_16B_mcast(u32 lid)
+{
+ return ((lid >=
+ opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B)) &&
+ (lid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B)));
+}
+
+static inline void hfi1_make_opa_lid(struct rdma_ah_attr *attr)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
+ u32 dlid = rdma_ah_get_dlid(attr);
+
+ /* Modify ah_attr.dlid to be in the 32 bit LID space.
+ * This is how the address will be laid out:
+ * Assuming MCAST_NR to be 4,
+ * 32 bit permissive LID = 0xFFFFFFFF
+ * Multicast LID range = 0xFFFFFFFE to 0xF0000000
+ * Unicast LID range = 0xEFFFFFFF to 1
+ * Invalid LID = 0
+ */
+ if (ib_is_opa_gid(&grh->dgid))
+ dlid = opa_get_lid_from_gid(&grh->dgid);
+ else if ((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
+ (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)))
+ dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) +
+ opa_get_mcast_base(OPA_MCAST_NR);
+ else if (dlid == be16_to_cpu(IB_LID_PERMISSIVE))
+ dlid = be32_to_cpu(OPA_LID_PERMISSIVE);
+
+ rdma_ah_set_dlid(attr, dlid);
+}
+
+static inline u8 hfi1_get_packet_type(u32 lid)
+{
+ /* 9B if lid > 0xF0000000 */
+ if (lid >= opa_get_mcast_base(OPA_MCAST_NR))
+ return HFI1_PKT_TYPE_9B;
+
+ /* 16B if lid > 0xC000 */
+ if (lid >= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 9B))
+ return HFI1_PKT_TYPE_16B;
+
+ return HFI1_PKT_TYPE_9B;
+}
+
+static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr)
+{
+ /*
+ * If there was an incoming 16B packet with permissive
+ * LIDs, OPA GIDs would have been programmed when those
+ * packets were received. A 16B packet will have to
+ * be sent in response to that packet. Return a 16B
+ * header type if that's the case.
+ */
+ if (rdma_ah_get_dlid(attr) == be32_to_cpu(OPA_LID_PERMISSIVE))
+ return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ?
+ HFI1_PKT_TYPE_16B : HFI1_PKT_TYPE_9B;
+
+ /*
+ * Return a 16B header type if either the destination
+ * or source lid is extended.
+ */
+ if (hfi1_get_packet_type(rdma_ah_get_dlid(attr)) == HFI1_PKT_TYPE_16B)
+ return HFI1_PKT_TYPE_16B;
+
+ return hfi1_get_packet_type(lid);
+}
+
+static inline void hfi1_make_ext_grh(struct hfi1_packet *packet,
+ struct ib_grh *grh, u32 slid,
+ u32 dlid)
+{
+ struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ if (!ibp)
+ return;
+
+ grh->hop_limit = 1;
+ grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
+ if (slid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))
+ grh->sgid.global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE));
+ else
+ grh->sgid.global.interface_id = OPA_MAKE_ID(slid);
+
+ /*
+ * Upper layers (like mad) may compare the dgid in the
+ * wc that is obtained here with the sgid_index in
+ * the wr. Since sgid_index in wr is always 0 for
+ * extended lids, set the dgid here to the default
+ * IB gid.
+ */
+ grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix;
+ grh->dgid.global.interface_id =
+ cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
+}
+
+static inline int hfi1_get_16b_padding(u32 hdr_size, u32 payload)
+{
+ return -(hdr_size + payload + (SIZE_OF_CRC << 2) +
+ SIZE_OF_LT) & 0x7;
+}
+
+static inline void hfi1_make_ib_hdr(struct ib_header *hdr,
+ u16 lrh0, u16 len,
+ u16 dlid, u16 slid)
+{
+ hdr->lrh[0] = cpu_to_be16(lrh0);
+ hdr->lrh[1] = cpu_to_be16(dlid);
+ hdr->lrh[2] = cpu_to_be16(len);
+ hdr->lrh[3] = cpu_to_be16(slid);
+}
+
+static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
+ u32 slid, u32 dlid,
+ u16 len, u16 pkey,
+ bool becn, bool fecn, u8 l4,
+ u8 sc)
+{
+ u32 lrh0 = 0;
+ u32 lrh1 = 0x40000000;
+ u32 lrh2 = 0;
+ u32 lrh3 = 0;
+
+ lrh0 = (lrh0 & ~OPA_16B_BECN_MASK) | (becn << OPA_16B_BECN_SHIFT);
+ lrh0 = (lrh0 & ~OPA_16B_LEN_MASK) | (len << OPA_16B_LEN_SHIFT);
+ lrh0 = (lrh0 & ~OPA_16B_LID_MASK) | (slid & OPA_16B_LID_MASK);
+ lrh1 = (lrh1 & ~OPA_16B_FECN_MASK) | (fecn << OPA_16B_FECN_SHIFT);
+ lrh1 = (lrh1 & ~OPA_16B_SC_MASK) | (sc << OPA_16B_SC_SHIFT);
+ lrh1 = (lrh1 & ~OPA_16B_LID_MASK) | (dlid & OPA_16B_LID_MASK);
+ lrh2 = (lrh2 & ~OPA_16B_SLID_MASK) |
+ ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
+ lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
+ ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
+ lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT);
+ lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
+
+ hdr->lrh[0] = lrh0;
+ hdr->lrh[1] = lrh1;
+ hdr->lrh[2] = lrh2;
+ hdr->lrh[3] = lrh3;
+}
+#endif /* _HFI1_KERNEL_H */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
new file mode 100644
index 000000000000..b35f92e7d865
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -0,0 +1,1978 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/xarray.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/hrtimer.h>
+#include <linux/bitmap.h>
+#include <linux/numa.h>
+#include <rdma/rdma_vt.h>
+
+#include "hfi.h"
+#include "device.h"
+#include "common.h"
+#include "trace.h"
+#include "mad.h"
+#include "sdma.h"
+#include "debugfs.h"
+#include "verbs.h"
+#include "aspm.h"
+#include "affinity.h"
+#include "vnic.h"
+#include "exp_rcv.h"
+#include "netdev.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+/*
+ * min buffers we want to have per context, after driver
+ */
+#define HFI1_MIN_USER_CTXT_BUFCNT 7
+
+#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
+#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
+
+#define NUM_IB_PORTS 1
+
+/*
+ * Number of user receive contexts we are configured to use (to allow for more
+ * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
+ */
+int num_user_contexts = -1;
+module_param_named(num_user_contexts, num_user_contexts, int, 0444);
+MODULE_PARM_DESC(
+ num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
+
+uint krcvqs[RXE_NUM_DATA_VL];
+int krcvqsset;
+module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
+MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
+
+/* computed based on above array */
+unsigned long n_krcvqs;
+
+static unsigned hfi1_rcvarr_split = 25;
+module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
+
+static uint eager_buffer_size = (8 << 20); /* 8MB */
+module_param(eager_buffer_size, uint, S_IRUGO);
+MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
+
+static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
+module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
+
+static uint hfi1_hdrq_entsize = 32;
+module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
+MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
+
+unsigned int user_credit_return_threshold = 33; /* default is 33% */
+module_param(user_credit_return_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
+
+DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+
+static int hfi1_create_kctxt(struct hfi1_devdata *dd,
+ struct hfi1_pportdata *ppd)
+{
+ struct hfi1_ctxtdata *rcd;
+ int ret;
+
+ /* Control context has to be always 0 */
+ BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
+
+ ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
+ if (ret < 0) {
+ dd_dev_err(dd, "Kernel receive context allocation failed\n");
+ return ret;
+ }
+
+ /*
+ * Set up the kernel context flags here and now because they use
+ * default values for all receive side memories. User contexts will
+ * be handled as they are created.
+ */
+ rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
+ HFI1_CAP_KGET(NODROP_RHQ_FULL) |
+ HFI1_CAP_KGET(NODROP_EGR_FULL) |
+ HFI1_CAP_KGET(DMA_RTAIL);
+
+ /* Control context must use DMA_RTAIL */
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ rcd->flags |= HFI1_CAP_DMA_RTAIL;
+ rcd->fast_handler = get_dma_rtail_setting(rcd) ?
+ handle_receive_interrupt_dma_rtail :
+ handle_receive_interrupt_nodma_rtail;
+
+ hfi1_set_seq_cnt(rcd, 1);
+
+ rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
+ if (!rcd->sc) {
+ dd_dev_err(dd, "Kernel send context allocation failed\n");
+ return -ENOMEM;
+ }
+ hfi1_init_ctxt(rcd->sc);
+
+ return 0;
+}
+
+/*
+ * Create the receive context array and one or more kernel contexts
+ */
+int hfi1_create_kctxts(struct hfi1_devdata *dd)
+{
+ u16 i;
+ int ret;
+
+ dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
+ GFP_KERNEL, dd->node);
+ if (!dd->rcd)
+ return -ENOMEM;
+
+ for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
+ ret = hfi1_create_kctxt(dd, dd->pport);
+ if (ret)
+ goto bail;
+ }
+
+ return 0;
+bail:
+ for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
+ hfi1_free_ctxt(dd->rcd[i]);
+
+ /* All the contexts should be freed, free the array */
+ kfree(dd->rcd);
+ dd->rcd = NULL;
+ return ret;
+}
+
+/*
+ * Helper routines for the receive context reference count (rcd and uctxt).
+ */
+static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
+{
+ kref_init(&rcd->kref);
+}
+
+/**
+ * hfi1_rcd_free - When reference is zero clean up.
+ * @kref: pointer to an initialized rcd data structure
+ *
+ */
+static void hfi1_rcd_free(struct kref *kref)
+{
+ unsigned long flags;
+ struct hfi1_ctxtdata *rcd =
+ container_of(kref, struct hfi1_ctxtdata, kref);
+
+ spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
+ rcd->dd->rcd[rcd->ctxt] = NULL;
+ spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
+
+ hfi1_free_ctxtdata(rcd->dd, rcd);
+
+ kfree(rcd);
+}
+
+/**
+ * hfi1_rcd_put - decrement reference for rcd
+ * @rcd: pointer to an initialized rcd data structure
+ *
+ * Use this to put a reference after the init.
+ */
+int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
+{
+ if (rcd)
+ return kref_put(&rcd->kref, hfi1_rcd_free);
+
+ return 0;
+}
+
+/**
+ * hfi1_rcd_get - increment reference for rcd
+ * @rcd: pointer to an initialized rcd data structure
+ *
+ * Use this to get a reference after the init.
+ *
+ * Return : reflect kref_get_unless_zero(), which returns non-zero on
+ * increment, otherwise 0.
+ */
+int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
+{
+ return kref_get_unless_zero(&rcd->kref);
+}
+
+/**
+ * allocate_rcd_index - allocate an rcd index from the rcd array
+ * @dd: pointer to a valid devdata structure
+ * @rcd: rcd data structure to assign
+ * @index: pointer to index that is allocated
+ *
+ * Find an empty index in the rcd array, and assign the given rcd to it.
+ * If the array is full, we are EBUSY.
+ *
+ */
+static int allocate_rcd_index(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata *rcd, u16 *index)
+{
+ unsigned long flags;
+ u16 ctxt;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
+ if (!dd->rcd[ctxt])
+ break;
+
+ if (ctxt < dd->num_rcv_contexts) {
+ rcd->ctxt = ctxt;
+ dd->rcd[ctxt] = rcd;
+ hfi1_rcd_init(rcd);
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ if (ctxt >= dd->num_rcv_contexts)
+ return -EBUSY;
+
+ *index = ctxt;
+
+ return 0;
+}
+
+/**
+ * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
+ * array
+ * @dd: pointer to a valid devdata structure
+ * @ctxt: the index of an possilbe rcd
+ *
+ * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
+ * ctxt index is valid.
+ *
+ * The caller is responsible for making the _put().
+ *
+ */
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+ u16 ctxt)
+{
+ if (ctxt < dd->num_rcv_contexts)
+ return hfi1_rcd_get_by_index(dd, ctxt);
+
+ return NULL;
+}
+
+/**
+ * hfi1_rcd_get_by_index - get by index
+ * @dd: pointer to a valid devdata structure
+ * @ctxt: the index of an possilbe rcd
+ *
+ * We need to protect access to the rcd array. If access is needed to
+ * one or more index, get the protecting spinlock and then increment the
+ * kref.
+ *
+ * The caller is responsible for making the _put().
+ *
+ */
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
+{
+ unsigned long flags;
+ struct hfi1_ctxtdata *rcd = NULL;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (dd->rcd[ctxt]) {
+ rcd = dd->rcd[ctxt];
+ if (!hfi1_rcd_get(rcd))
+ rcd = NULL;
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ return rcd;
+}
+
+/*
+ * Common code for user and kernel context create and setup.
+ * NOTE: the initial kref is done here (hf1_rcd_init()).
+ */
+int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
+ struct hfi1_ctxtdata **context)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_ctxtdata *rcd;
+ unsigned kctxt_ngroups = 0;
+ u32 base;
+
+ if (dd->rcv_entries.nctxt_extra >
+ dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
+ kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
+ (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
+ rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
+ if (rcd) {
+ u32 rcvtids, max_entries;
+ u16 ctxt;
+ int ret;
+
+ ret = allocate_rcd_index(dd, rcd, &ctxt);
+ if (ret) {
+ *context = NULL;
+ kfree(rcd);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&rcd->qp_wait_list);
+ hfi1_exp_tid_group_init(rcd);
+ rcd->ppd = ppd;
+ rcd->dd = dd;
+ rcd->numa_id = numa;
+ rcd->rcv_array_groups = dd->rcv_entries.ngroups;
+ rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+ rcd->slow_handler = handle_receive_interrupt;
+ rcd->do_interrupt = rcd->slow_handler;
+ rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
+
+ mutex_init(&rcd->exp_mutex);
+ spin_lock_init(&rcd->exp_lock);
+ INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
+ INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
+
+ hfi1_cdbg(PROC, "setting up context %u", rcd->ctxt);
+
+ /*
+ * Calculate the context's RcvArray entry starting point.
+ * We do this here because we have to take into account all
+ * the RcvArray entries that previous context would have
+ * taken and we have to account for any extra groups assigned
+ * to the static (kernel) or dynamic (vnic/user) contexts.
+ */
+ if (ctxt < dd->first_dyn_alloc_ctxt) {
+ if (ctxt < kctxt_ngroups) {
+ base = ctxt * (dd->rcv_entries.ngroups + 1);
+ rcd->rcv_array_groups++;
+ } else {
+ base = kctxt_ngroups +
+ (ctxt * dd->rcv_entries.ngroups);
+ }
+ } else {
+ u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
+
+ base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
+ kctxt_ngroups);
+ if (ct < dd->rcv_entries.nctxt_extra) {
+ base += ct * (dd->rcv_entries.ngroups + 1);
+ rcd->rcv_array_groups++;
+ } else {
+ base += dd->rcv_entries.nctxt_extra +
+ (ct * dd->rcv_entries.ngroups);
+ }
+ }
+ rcd->eager_base = base * dd->rcv_entries.group_size;
+
+ rcd->rcvhdrq_cnt = rcvhdrcnt;
+ rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
+ rcd->rhf_offset =
+ rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
+ /*
+ * Simple Eager buffer allocation: we have already pre-allocated
+ * the number of RcvArray entry groups. Each ctxtdata structure
+ * holds the number of groups for that context.
+ *
+ * To follow CSR requirements and maintain cacheline alignment,
+ * make sure all sizes and bases are multiples of group_size.
+ *
+ * The expected entry count is what is left after assigning
+ * eager.
+ */
+ max_entries = rcd->rcv_array_groups *
+ dd->rcv_entries.group_size;
+ rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
+ rcd->egrbufs.count = round_down(rcvtids,
+ dd->rcv_entries.group_size);
+ if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
+ dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
+ rcd->ctxt);
+ rcd->egrbufs.count = MAX_EAGER_ENTRIES;
+ }
+ hfi1_cdbg(PROC,
+ "ctxt%u: max Eager buffer RcvArray entries: %u",
+ rcd->ctxt, rcd->egrbufs.count);
+
+ /*
+ * Allocate array that will hold the eager buffer accounting
+ * data.
+ * This will allocate the maximum possible buffer count based
+ * on the value of the RcvArray split parameter.
+ * The resulting value will be rounded down to the closest
+ * multiple of dd->rcv_entries.group_size.
+ */
+ rcd->egrbufs.buffers =
+ kcalloc_node(rcd->egrbufs.count,
+ sizeof(*rcd->egrbufs.buffers),
+ GFP_KERNEL, numa);
+ if (!rcd->egrbufs.buffers)
+ goto bail;
+ rcd->egrbufs.rcvtids =
+ kcalloc_node(rcd->egrbufs.count,
+ sizeof(*rcd->egrbufs.rcvtids),
+ GFP_KERNEL, numa);
+ if (!rcd->egrbufs.rcvtids)
+ goto bail;
+ rcd->egrbufs.size = eager_buffer_size;
+ /*
+ * The size of the buffers programmed into the RcvArray
+ * entries needs to be big enough to handle the highest
+ * MTU supported.
+ */
+ if (rcd->egrbufs.size < hfi1_max_mtu) {
+ rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
+ hfi1_cdbg(PROC,
+ "ctxt%u: eager bufs size too small. Adjusting to %u",
+ rcd->ctxt, rcd->egrbufs.size);
+ }
+ rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
+
+ /* Applicable only for statically created kernel contexts */
+ if (ctxt < dd->first_dyn_alloc_ctxt) {
+ rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
+ GFP_KERNEL, numa);
+ if (!rcd->opstats)
+ goto bail;
+
+ /* Initialize TID flow generations for the context */
+ hfi1_kern_init_ctxt_generations(rcd);
+ }
+
+ *context = rcd;
+ return 0;
+ }
+
+bail:
+ *context = NULL;
+ hfi1_free_ctxt(rcd);
+ return -ENOMEM;
+}
+
+/**
+ * hfi1_free_ctxt - free context
+ * @rcd: pointer to an initialized rcd data structure
+ *
+ * This wrapper is the free function that matches hfi1_create_ctxtdata().
+ * When a context is done being used (kernel or user), this function is called
+ * for the "final" put to match the kref init from hfi1_create_ctxtdata().
+ * Other users of the context do a get/put sequence to make sure that the
+ * structure isn't removed while in use.
+ */
+void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
+{
+ hfi1_rcd_put(rcd);
+}
+
+/*
+ * Select the largest ccti value over all SLs to determine the intra-
+ * packet gap for the link.
+ *
+ * called with cca_timer_lock held (to protect access to cca_timer
+ * array), and rcu_read_lock() (to protect access to cc_state).
+ */
+void set_link_ipg(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct cc_state *cc_state;
+ int i;
+ u16 cce, ccti_limit, max_ccti = 0;
+ u16 shift, mult;
+ u64 src;
+ u32 current_egress_rate; /* Mbits /sec */
+ u64 max_pkt_time;
+ /*
+ * max_pkt_time is the maximum packet egress time in units
+ * of the fabric clock period 1/(805 MHz).
+ */
+
+ cc_state = get_cc_state(ppd);
+
+ if (!cc_state)
+ /*
+ * This should _never_ happen - rcu_read_lock() is held,
+ * and set_link_ipg() should not be called if cc_state
+ * is NULL.
+ */
+ return;
+
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ u16 ccti = ppd->cca_timer[i].ccti;
+
+ if (ccti > max_ccti)
+ max_ccti = ccti;
+ }
+
+ ccti_limit = cc_state->cct.ccti_limit;
+ if (max_ccti > ccti_limit)
+ max_ccti = ccti_limit;
+
+ cce = cc_state->cct.entries[max_ccti].entry;
+ shift = (cce & 0xc000) >> 14;
+ mult = (cce & 0x3fff);
+
+ current_egress_rate = active_egress_rate(ppd);
+
+ max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
+
+ src = (max_pkt_time >> shift) * mult;
+
+ src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
+ src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
+
+ write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
+}
+
+static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
+{
+ struct cca_timer *cca_timer;
+ struct hfi1_pportdata *ppd;
+ int sl;
+ u16 ccti_timer, ccti_min;
+ struct cc_state *cc_state;
+ unsigned long flags;
+ enum hrtimer_restart ret = HRTIMER_NORESTART;
+
+ cca_timer = container_of(t, struct cca_timer, hrtimer);
+ ppd = cca_timer->ppd;
+ sl = cca_timer->sl;
+
+ rcu_read_lock();
+
+ cc_state = get_cc_state(ppd);
+
+ if (!cc_state) {
+ rcu_read_unlock();
+ return HRTIMER_NORESTART;
+ }
+
+ /*
+ * 1) decrement ccti for SL
+ * 2) calculate IPG for link (set_link_ipg())
+ * 3) restart timer, unless ccti is at min value
+ */
+
+ ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
+ ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
+
+ spin_lock_irqsave(&ppd->cca_timer_lock, flags);
+
+ if (cca_timer->ccti > ccti_min) {
+ cca_timer->ccti--;
+ set_link_ipg(ppd);
+ }
+
+ if (cca_timer->ccti > ccti_min) {
+ unsigned long nsec = 1024 * ccti_timer;
+ /* ccti_timer is in units of 1.024 usec */
+ hrtimer_forward_now(t, ns_to_ktime(nsec));
+ ret = HRTIMER_RESTART;
+ }
+
+ spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Common code for initializing the physical port structure.
+ */
+void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
+ struct hfi1_devdata *dd, u8 hw_pidx, u32 port)
+{
+ int i;
+ uint default_pkey_idx;
+ struct cc_state *cc_state;
+
+ ppd->dd = dd;
+ ppd->hw_pidx = hw_pidx;
+ ppd->port = port; /* IB port number, not index */
+ ppd->prev_link_width = LINK_WIDTH_DEFAULT;
+ /*
+ * There are C_VL_COUNT number of PortVLXmitWait counters.
+ * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
+ */
+ for (i = 0; i < C_VL_COUNT + 1; i++) {
+ ppd->port_vl_xmit_wait_last[i] = 0;
+ ppd->vl_xmit_flit_cnt[i] = 0;
+ }
+
+ default_pkey_idx = 1;
+
+ ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
+ ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
+ ppd->pkeys[0] = 0x8001;
+
+ INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
+ INIT_WORK(&ppd->link_up_work, handle_link_up);
+ INIT_WORK(&ppd->link_down_work, handle_link_down);
+ INIT_WORK(&ppd->freeze_work, handle_freeze);
+ INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
+ INIT_WORK(&ppd->sma_message_work, handle_sma_message);
+ INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+ INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
+ INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
+ INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
+
+ mutex_init(&ppd->hls_lock);
+ spin_lock_init(&ppd->qsfp_info.qsfp_lock);
+
+ ppd->qsfp_info.ppd = ppd;
+ ppd->sm_trap_qp = 0x0;
+ ppd->sa_qp = 0x1;
+
+ ppd->hfi1_wq = NULL;
+
+ spin_lock_init(&ppd->cca_timer_lock);
+
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ ppd->cca_timer[i].ppd = ppd;
+ ppd->cca_timer[i].sl = i;
+ ppd->cca_timer[i].ccti = 0;
+ hrtimer_setup(&ppd->cca_timer[i].hrtimer, cca_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ }
+
+ ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
+
+ spin_lock_init(&ppd->cc_state_lock);
+ spin_lock_init(&ppd->cc_log_lock);
+ cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
+ RCU_INIT_POINTER(ppd->cc_state, cc_state);
+ if (!cc_state)
+ goto bail;
+ return;
+
+bail:
+ dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
+}
+
+/*
+ * Do initialization for device that is only needed on
+ * first detect, not on resets.
+ */
+static int loadtime_init(struct hfi1_devdata *dd)
+{
+ return 0;
+}
+
+/**
+ * init_after_reset - re-initialize after a reset
+ * @dd: the hfi1_ib device
+ *
+ * sanity check at least some of the values after reset, and
+ * ensure no receive or transmit (explicitly, in case reset
+ * failed
+ */
+static int init_after_reset(struct hfi1_devdata *dd)
+{
+ int i;
+ struct hfi1_ctxtdata *rcd;
+ /*
+ * Ensure chip does no sends or receives, tail updates, or
+ * pioavail updates while we re-initialize. This is mostly
+ * for the driver data structures, not chip registers.
+ */
+ for (i = 0; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_TAILUPD_DIS, rcd);
+ hfi1_rcd_put(rcd);
+ }
+ pio_send_control(dd, PSC_GLOBAL_DISABLE);
+ for (i = 0; i < dd->num_send_contexts; i++)
+ sc_disable(dd->send_contexts[i].sc);
+
+ return 0;
+}
+
+static void enable_chip(struct hfi1_devdata *dd)
+{
+ struct hfi1_ctxtdata *rcd;
+ u32 rcvmask;
+ u16 i;
+
+ /* enable PIO send */
+ pio_send_control(dd, PSC_GLOBAL_ENABLE);
+
+ /*
+ * Enable kernel ctxts' receive and receive interrupt.
+ * Other ctxts done as user opens and initializes them.
+ */
+ for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (!rcd)
+ continue;
+ rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
+ rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
+ HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
+ if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
+ rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
+ rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
+ rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_IS_KSET(TID_RDMA))
+ rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
+ hfi1_rcvctrl(dd, rcvmask, rcd);
+ sc_enable(rcd->sc);
+ hfi1_rcd_put(rcd);
+ }
+}
+
+/**
+ * create_workqueues - create per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static int create_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (!ppd->hfi1_wq) {
+ ppd->hfi1_wq =
+ alloc_workqueue(
+ "hfi%d_%d",
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM,
+ HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
+ dd->unit, pidx);
+ if (!ppd->hfi1_wq)
+ goto wq_error;
+ }
+ if (!ppd->link_wq) {
+ /*
+ * Make the link workqueue single-threaded to enforce
+ * serialization.
+ */
+ ppd->link_wq =
+ alloc_workqueue(
+ "hfi_link_%d_%d",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 1, /* max_active */
+ dd->unit, pidx);
+ if (!ppd->link_wq)
+ goto wq_error;
+ }
+ }
+ return 0;
+wq_error:
+ pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+ return -ENOMEM;
+}
+
+/**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+}
+
+/**
+ * enable_general_intr() - Enable the IRQs that will be handled by the
+ * general interrupt handler.
+ * @dd: valid devdata
+ *
+ */
+static void enable_general_intr(struct hfi1_devdata *dd)
+{
+ set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
+ set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
+ set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
+ set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
+ set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
+ set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
+ set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
+}
+
+/**
+ * hfi1_init - do the actual initialization sequence on the chip
+ * @dd: the hfi1_ib device
+ * @reinit: re-initializing, so don't allocate new memory
+ *
+ * Do the actual initialization sequence on the chip. This is done
+ * both from the init routine called from the PCI infrastructure, and
+ * when we reset the chip, or detect that it was reset internally,
+ * or it's administratively re-enabled.
+ *
+ * Memory allocation here and in called routines is only done in
+ * the first case (reinit == 0). We have to be careful, because even
+ * without memory allocation, we need to re-write all the chip registers
+ * TIDs, etc. after the reset or enable has completed.
+ */
+int hfi1_init(struct hfi1_devdata *dd, int reinit)
+{
+ int ret = 0, pidx, lastfail = 0;
+ unsigned long len;
+ u16 i;
+ struct hfi1_ctxtdata *rcd;
+ struct hfi1_pportdata *ppd;
+
+ /* Set up send low level handlers */
+ dd->process_pio_send = hfi1_verbs_send_pio;
+ dd->process_dma_send = hfi1_verbs_send_dma;
+ dd->pio_inline_send = pio_copy;
+ dd->process_vnic_dma_send = hfi1_vnic_send_dma;
+
+ if (is_ax(dd)) {
+ atomic_set(&dd->drop_packet, DROP_PACKET_ON);
+ dd->do_drop = true;
+ } else {
+ atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
+ dd->do_drop = false;
+ }
+
+ /* make sure the link is not "up" */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ ppd->linkup = 0;
+ }
+
+ if (reinit)
+ ret = init_after_reset(dd);
+ else
+ ret = loadtime_init(dd);
+ if (ret)
+ goto done;
+
+ /* dd->rcd can be NULL if early initialization failed */
+ for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
+ /*
+ * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
+ * re-init, the simplest way to handle this is to free
+ * existing, and re-allocate.
+ * Need to re-create rest of ctxt 0 ctxtdata as well.
+ */
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (!rcd)
+ continue;
+
+ lastfail = hfi1_create_rcvhdrq(dd, rcd);
+ if (!lastfail)
+ lastfail = hfi1_setup_eagerbufs(rcd);
+ if (!lastfail)
+ lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
+ if (lastfail) {
+ dd_dev_err(dd,
+ "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
+ ret = lastfail;
+ }
+ /* enable IRQ */
+ hfi1_rcd_put(rcd);
+ }
+
+ /* Allocate enough memory for user event notification. */
+ len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
+ sizeof(*dd->events));
+ dd->events = vmalloc_user(len);
+ if (!dd->events)
+ dd_dev_err(dd, "Failed to allocate user events page\n");
+ /*
+ * Allocate a page for device and port status.
+ * Page will be shared amongst all user processes.
+ */
+ dd->status = vmalloc_user(PAGE_SIZE);
+ if (!dd->status)
+ dd_dev_err(dd, "Failed to allocate dev status page\n");
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (dd->status)
+ /* Currently, we only have one port */
+ ppd->statusp = &dd->status->port;
+
+ set_mtu(ppd);
+ }
+
+ /* enable chip even if we have an error, so we can debug cause */
+ enable_chip(dd);
+
+done:
+ /*
+ * Set status even if port serdes is not initialized
+ * so that diags will work.
+ */
+ if (dd->status)
+ dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
+ HFI1_STATUS_INITTED;
+ if (!ret) {
+ /* enable all interrupts from the chip */
+ enable_general_intr(dd);
+ init_qsfp_int(dd);
+
+ /* chip is OK for user apps; mark it as initialized */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ /*
+ * start the serdes - must be after interrupts are
+ * enabled so we are notified when the link goes up
+ */
+ lastfail = bringup_serdes(ppd);
+ if (lastfail)
+ dd_dev_info(dd,
+ "Failed to bring up port %u\n",
+ ppd->port);
+
+ /*
+ * Set status even if port serdes is not initialized
+ * so that diags will work.
+ */
+ if (ppd->statusp)
+ *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
+ HFI1_STATUS_INITTED;
+ if (!ppd->link_speed_enabled)
+ continue;
+ }
+ }
+
+ /* if ret is non-zero, we probably should do some cleanup here... */
+ return ret;
+}
+
+struct hfi1_devdata *hfi1_lookup(int unit)
+{
+ return xa_load(&hfi1_dev_table, unit);
+}
+
+/*
+ * Stop the timers during unit shutdown, or after an error late
+ * in initialization.
+ */
+static void stop_timers(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->led_override_timer.function) {
+ timer_delete_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+ }
+}
+
+/**
+ * shutdown_device - shut down a device
+ * @dd: the hfi1_ib device
+ *
+ * This is called to make the device quiet when we are about to
+ * unload the driver, and also when the device is administratively
+ * disabled. It does not free any data structures.
+ * Everything it does has to be setup again by hfi1_init(dd, 1)
+ */
+static void shutdown_device(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ctxtdata *rcd;
+ unsigned pidx;
+ int i;
+
+ if (dd->flags & HFI1_SHUTDOWN)
+ return;
+ dd->flags |= HFI1_SHUTDOWN;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ ppd->linkup = 0;
+ if (ppd->statusp)
+ *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
+ HFI1_STATUS_IB_READY);
+ }
+ dd->flags &= ~HFI1_INITTED;
+
+ /* mask and clean up interrupts */
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+ msix_clean_up_interrupts(dd);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ for (i = 0; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
+ HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_PKEY_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
+ hfi1_rcd_put(rcd);
+ }
+ /*
+ * Gracefully stop all sends allowing any in progress to
+ * trickle out first.
+ */
+ for (i = 0; i < dd->num_send_contexts; i++)
+ sc_flush(dd->send_contexts[i].sc);
+ }
+
+ /*
+ * Enough for anything that's going to trickle out to have actually
+ * done so.
+ */
+ udelay(20);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ /* disable all contexts */
+ for (i = 0; i < dd->num_send_contexts; i++)
+ sc_disable(dd->send_contexts[i].sc);
+ /* disable the send device */
+ pio_send_control(dd, PSC_GLOBAL_DISABLE);
+
+ shutdown_led_override(ppd);
+
+ /*
+ * Clear SerdesEnable.
+ * We can't count on interrupts since we are stopping.
+ */
+ hfi1_quiet_serdes(ppd);
+ if (ppd->hfi1_wq)
+ flush_workqueue(ppd->hfi1_wq);
+ if (ppd->link_wq)
+ flush_workqueue(ppd->link_wq);
+ }
+ sdma_exit(dd);
+}
+
+/**
+ * hfi1_free_ctxtdata - free a context's allocated data
+ * @dd: the hfi1_ib device
+ * @rcd: the ctxtdata structure
+ *
+ * free up any allocated data for a context
+ * It should never change any chip state, or global driver state.
+ */
+void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+{
+ u32 e;
+
+ if (!rcd)
+ return;
+
+ if (rcd->rcvhdrq) {
+ dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
+ rcd->rcvhdrq, rcd->rcvhdrq_dma);
+ rcd->rcvhdrq = NULL;
+ if (hfi1_rcvhdrtail_kvaddr(rcd)) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *)hfi1_rcvhdrtail_kvaddr(rcd),
+ rcd->rcvhdrqtailaddr_dma);
+ rcd->rcvhdrtail_kvaddr = NULL;
+ }
+ }
+
+ /* all the RcvArray entries should have been cleared by now */
+ kfree(rcd->egrbufs.rcvtids);
+ rcd->egrbufs.rcvtids = NULL;
+
+ for (e = 0; e < rcd->egrbufs.alloced; e++) {
+ if (rcd->egrbufs.buffers[e].addr)
+ dma_free_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.buffers[e].len,
+ rcd->egrbufs.buffers[e].addr,
+ rcd->egrbufs.buffers[e].dma);
+ }
+ kfree(rcd->egrbufs.buffers);
+ rcd->egrbufs.alloced = 0;
+ rcd->egrbufs.buffers = NULL;
+
+ sc_free(rcd->sc);
+ rcd->sc = NULL;
+
+ vfree(rcd->subctxt_uregbase);
+ vfree(rcd->subctxt_rcvegrbuf);
+ vfree(rcd->subctxt_rcvhdr_base);
+ kfree(rcd->opstats);
+
+ rcd->subctxt_uregbase = NULL;
+ rcd->subctxt_rcvegrbuf = NULL;
+ rcd->subctxt_rcvhdr_base = NULL;
+ rcd->opstats = NULL;
+}
+
+/*
+ * Release our hold on the shared asic data. If we are the last one,
+ * return the structure to be finalized outside the lock. Must be
+ * holding hfi1_dev_table lock.
+ */
+static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
+{
+ struct hfi1_asic_data *ad;
+ int other;
+
+ if (!dd->asic_data)
+ return NULL;
+ dd->asic_data->dds[dd->hfi1_id] = NULL;
+ other = dd->hfi1_id ? 0 : 1;
+ ad = dd->asic_data;
+ dd->asic_data = NULL;
+ /* return NULL if the other dd still has a link */
+ return ad->dds[other] ? NULL : ad;
+}
+
+static void finalize_asic_data(struct hfi1_devdata *dd,
+ struct hfi1_asic_data *ad)
+{
+ clean_up_i2c(dd, ad);
+ kfree(ad);
+}
+
+/**
+ * hfi1_free_devdata - cleans up and frees per-unit data structure
+ * @dd: pointer to a valid devdata structure
+ *
+ * It cleans up and frees all data structures set up by
+ * by hfi1_alloc_devdata().
+ */
+void hfi1_free_devdata(struct hfi1_devdata *dd)
+{
+ struct hfi1_asic_data *ad;
+ unsigned long flags;
+
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ __xa_erase(&hfi1_dev_table, dd->unit);
+ ad = release_asic_data(dd);
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
+
+ finalize_asic_data(dd, ad);
+ free_platform_config(dd);
+ rcu_barrier(); /* wait for rcu callbacks to complete */
+ free_percpu(dd->int_counter);
+ free_percpu(dd->rcv_limit);
+ free_percpu(dd->send_schedule);
+ free_percpu(dd->tx_opstats);
+ dd->int_counter = NULL;
+ dd->rcv_limit = NULL;
+ dd->send_schedule = NULL;
+ dd->tx_opstats = NULL;
+ kfree(dd->comp_vect);
+ dd->comp_vect = NULL;
+ if (dd->rcvhdrtail_dummy_kvaddr)
+ dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+ (void *)dd->rcvhdrtail_dummy_kvaddr,
+ dd->rcvhdrtail_dummy_dma);
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
+ sdma_clean(dd, dd->num_sdma);
+ rvt_dealloc_device(&dd->verbs_dev.rdi);
+}
+
+/**
+ * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
+ * @pdev: Valid PCI device
+ * @extra: How many bytes to alloc past the default
+ *
+ * Must be done via verbs allocator, because the verbs cleanup process
+ * both does cleanup and free of the data structure.
+ * "extra" is for chip-specific data.
+ */
+static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
+ size_t extra)
+{
+ struct hfi1_devdata *dd;
+ int ret, nports;
+
+ /* extra is * number of ports */
+ nports = extra / sizeof(struct hfi1_pportdata);
+
+ dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
+ nports);
+ if (!dd)
+ return ERR_PTR(-ENOMEM);
+ dd->num_pports = nports;
+ dd->pport = (struct hfi1_pportdata *)(dd + 1);
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
+
+ ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
+ goto bail;
+ }
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+ /*
+ * If the BIOS does not have the NUMA node information set, select
+ * NUMA 0 so we get consistent performance.
+ */
+ dd->node = pcibus_to_node(pdev->bus);
+ if (dd->node == NUMA_NO_NODE) {
+ dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+ dd->node = 0;
+ }
+
+ /*
+ * Initialize all locks for the device. This needs to be as early as
+ * possible so locks are usable.
+ */
+ spin_lock_init(&dd->sc_lock);
+ spin_lock_init(&dd->sendctrl_lock);
+ spin_lock_init(&dd->rcvctrl_lock);
+ spin_lock_init(&dd->uctxt_lock);
+ spin_lock_init(&dd->hfi1_diag_trans_lock);
+ spin_lock_init(&dd->sc_init_lock);
+ spin_lock_init(&dd->dc8051_memlock);
+ seqlock_init(&dd->sc2vl_lock);
+ spin_lock_init(&dd->sde_map_lock);
+ spin_lock_init(&dd->pio_map_lock);
+ mutex_init(&dd->dc8051_lock);
+ init_waitqueue_head(&dd->event_queue);
+ spin_lock_init(&dd->irq_src_lock);
+
+ dd->int_counter = alloc_percpu(u64);
+ if (!dd->int_counter) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd->rcv_limit = alloc_percpu(u64);
+ if (!dd->rcv_limit) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd->send_schedule = alloc_percpu(u64);
+ if (!dd->send_schedule) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
+ if (!dd->tx_opstats) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
+ if (!dd->comp_vect) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* allocate dummy tail memory for all receive contexts */
+ dd->rcvhdrtail_dummy_kvaddr =
+ dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
+ &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
+ if (!dd->rcvhdrtail_dummy_kvaddr) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ atomic_set(&dd->ipoib_rsm_usr_num, 0);
+ return dd;
+
+bail:
+ hfi1_free_devdata(dd);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Called from freeze mode handlers, and from PCI error
+ * reporting code. Should be paranoid about state of
+ * system and data structures.
+ */
+void hfi1_disable_after_error(struct hfi1_devdata *dd)
+{
+ if (dd->flags & HFI1_INITTED) {
+ u32 pidx;
+
+ dd->flags &= ~HFI1_INITTED;
+ if (dd->pport)
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ struct hfi1_pportdata *ppd;
+
+ ppd = dd->pport + pidx;
+ if (dd->flags & HFI1_PRESENT)
+ set_link_state(ppd, HLS_DN_DISABLE);
+
+ if (ppd->statusp)
+ *ppd->statusp &= ~HFI1_STATUS_IB_READY;
+ }
+ }
+
+ /*
+ * Mark as having had an error for driver, and also
+ * for /sys and status word mapped to user programs.
+ * This marks unit as not usable, until reset.
+ */
+ if (dd->status)
+ dd->status->dev |= HFI1_STATUS_HWERROR;
+}
+
+static void remove_one(struct pci_dev *);
+static int init_one(struct pci_dev *, const struct pci_device_id *);
+static void shutdown_one(struct pci_dev *);
+
+#define DRIVER_LOAD_MSG "Cornelis " DRIVER_NAME " loaded: "
+#define PFX DRIVER_NAME ": "
+
+const struct pci_device_id hfi1_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
+
+static struct pci_driver hfi1_pci_driver = {
+ .name = DRIVER_NAME,
+ .probe = init_one,
+ .remove = remove_one,
+ .shutdown = shutdown_one,
+ .id_table = hfi1_pci_tbl,
+ .err_handler = &hfi1_pci_err_handler,
+};
+
+static void __init compute_krcvqs(void)
+{
+ int i;
+
+ for (i = 0; i < krcvqsset; i++)
+ n_krcvqs += krcvqs[i];
+}
+
+/*
+ * Do all the generic driver unit- and chip-independent memory
+ * allocation and initialization.
+ */
+static int __init hfi1_mod_init(void)
+{
+ int ret;
+
+ ret = dev_init();
+ if (ret)
+ goto bail;
+
+ ret = node_affinity_init();
+ if (ret)
+ goto bail;
+
+ /* validate max MTU before any devices start */
+ if (!valid_opa_max_mtu(hfi1_max_mtu)) {
+ pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
+ hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
+ hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
+ }
+ /* valid CUs run from 1-128 in powers of 2 */
+ if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
+ hfi1_cu = 1;
+ /* valid credit return threshold is 0-100, variable is unsigned */
+ if (user_credit_return_threshold > 100)
+ user_credit_return_threshold = 100;
+
+ compute_krcvqs();
+ /*
+ * sanitize receive interrupt count, time must wait until after
+ * the hardware type is known
+ */
+ if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
+ rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
+ /* reject invalid combinations */
+ if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
+ pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
+ rcv_intr_count = 1;
+ }
+ if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
+ /*
+ * Avoid indefinite packet delivery by requiring a timeout
+ * if count is > 1.
+ */
+ pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
+ rcv_intr_timeout = 1;
+ }
+ if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
+ /*
+ * The dynamic algorithm expects a non-zero timeout
+ * and a count > 1.
+ */
+ pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
+ rcv_intr_dynamic = 0;
+ }
+
+ /* sanitize link CRC options */
+ link_crc_mask &= SUPPORTED_CRCS;
+
+ ret = opfn_init();
+ if (ret < 0) {
+ pr_err("Failed to allocate opfn_wq");
+ goto bail_dev;
+ }
+
+ /*
+ * These must be called before the driver is registered with
+ * the PCI subsystem.
+ */
+ hfi1_dbg_init();
+ ret = pci_register_driver(&hfi1_pci_driver);
+ if (ret < 0) {
+ pr_err("Unable to register driver: error %d\n", -ret);
+ goto bail_dev;
+ }
+ goto bail; /* all OK */
+
+bail_dev:
+ hfi1_dbg_exit();
+ dev_cleanup();
+bail:
+ return ret;
+}
+
+module_init(hfi1_mod_init);
+
+/*
+ * Do the non-unit driver cleanup, memory free, etc. at unload.
+ */
+static void __exit hfi1_mod_cleanup(void)
+{
+ pci_unregister_driver(&hfi1_pci_driver);
+ opfn_exit();
+ node_affinity_destroy_all();
+ hfi1_dbg_exit();
+
+ WARN_ON(!xa_empty(&hfi1_dev_table));
+ dispose_firmware(); /* asymmetric with obtain_firmware() */
+ dev_cleanup();
+}
+
+module_exit(hfi1_mod_cleanup);
+
+/* this can only be called after a successful initialization */
+static void cleanup_device_data(struct hfi1_devdata *dd)
+{
+ int ctxt;
+ int pidx;
+
+ /* users can't do anything more with chip */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ struct hfi1_pportdata *ppd = &dd->pport[pidx];
+ struct cc_state *cc_state;
+ int i;
+
+ if (ppd->statusp)
+ *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
+
+ for (i = 0; i < OPA_MAX_SLS; i++)
+ hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
+
+ spin_lock(&ppd->cc_state_lock);
+ cc_state = get_cc_state_protected(ppd);
+ RCU_INIT_POINTER(ppd->cc_state, NULL);
+ spin_unlock(&ppd->cc_state_lock);
+
+ if (cc_state)
+ kfree_rcu(cc_state, rcu);
+ }
+
+ free_credit_return(dd);
+
+ /*
+ * Free any resources still in use (usually just kernel contexts)
+ * at unload; we do for ctxtcnt, because that's what we allocate.
+ */
+ for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
+ struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
+
+ if (rcd) {
+ hfi1_free_ctxt_rcv_groups(rcd);
+ hfi1_free_ctxt(rcd);
+ }
+ }
+
+ kfree(dd->rcd);
+ dd->rcd = NULL;
+
+ free_pio_map(dd);
+ /* must follow rcv context free - need to remove rcv's hooks */
+ for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
+ sc_free(dd->send_contexts[ctxt].sc);
+ dd->num_send_contexts = 0;
+ kfree(dd->send_contexts);
+ dd->send_contexts = NULL;
+ kfree(dd->hw_to_sw);
+ dd->hw_to_sw = NULL;
+ kfree(dd->boardname);
+ vfree(dd->events);
+ vfree(dd->status);
+}
+
+/*
+ * Clean up on unit shutdown, or error during unit load after
+ * successful initialization.
+ */
+static void postinit_cleanup(struct hfi1_devdata *dd)
+{
+ hfi1_start_cleanup(dd);
+ hfi1_comp_vectors_clean_up(dd);
+ hfi1_dev_affinity_clean_up(dd);
+
+ hfi1_pcie_ddcleanup(dd);
+ hfi1_pcie_cleanup(dd->pcidev);
+
+ cleanup_device_data(dd);
+
+ hfi1_free_devdata(dd);
+}
+
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret = 0, j, pidx, initfail;
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+
+ /* First, lock the non-writable module parameters */
+ HFI1_CAP_LOCK();
+
+ /* Validate dev ids */
+ if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
+ ent->device == PCI_DEVICE_ID_INTEL1)) {
+ dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
+ ent->device);
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ /* Allocate the dd so we can get to work */
+ dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
+ sizeof(struct hfi1_pportdata));
+ if (IS_ERR(dd)) {
+ ret = PTR_ERR(dd);
+ goto bail;
+ }
+
+ /* Validate some global module parameters */
+ ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt);
+ if (ret)
+ goto bail;
+
+ /* use the encoding function as a sanitization check */
+ if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
+ dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
+ hfi1_hdrq_entsize);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* The receive eager buffer size must be set before the receive
+ * contexts are created.
+ *
+ * Set the eager buffer size. Validate that it falls in a range
+ * allowed by the hardware - all powers of 2 between the min and
+ * max. The maximum valid MTU is within the eager buffer range
+ * so we do not need to cap the max_mtu by an eager buffer size
+ * setting.
+ */
+ if (eager_buffer_size) {
+ if (!is_power_of_2(eager_buffer_size))
+ eager_buffer_size =
+ roundup_pow_of_two(eager_buffer_size);
+ eager_buffer_size =
+ clamp_val(eager_buffer_size,
+ MIN_EAGER_BUFFER * 8,
+ MAX_EAGER_BUFFER_TOTAL);
+ dd_dev_info(dd, "Eager buffer size %u\n",
+ eager_buffer_size);
+ } else {
+ dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* restrict value of hfi1_rcvarr_split */
+ hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
+
+ ret = hfi1_pcie_init(dd);
+ if (ret)
+ goto bail;
+
+ /*
+ * Do device-specific initialization, function table setup, dd
+ * allocation, etc.
+ */
+ ret = hfi1_init_dd(dd);
+ if (ret)
+ goto clean_bail; /* error already printed */
+
+ ret = create_workqueues(dd);
+ if (ret)
+ goto clean_bail;
+
+ /* do the generic initialization */
+ initfail = hfi1_init(dd, 0);
+
+ ret = hfi1_register_ib_device(dd);
+
+ /*
+ * Now ready for use. this should be cleared whenever we
+ * detect a reset, or initiate one. If earlier failure,
+ * we still create devices, so diags, etc. can be used
+ * to determine cause of problem.
+ */
+ if (!initfail && !ret) {
+ dd->flags |= HFI1_INITTED;
+ /* create debufs files after init and ib register */
+ hfi1_dbg_ibdev_init(&dd->verbs_dev);
+ }
+
+ j = hfi1_device_create(dd);
+ if (j)
+ dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
+
+ if (initfail || ret) {
+ msix_clean_up_interrupts(dd);
+ stop_timers(dd);
+ flush_workqueue(ib_wq);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ hfi1_quiet_serdes(dd->pport + pidx);
+ ppd = dd->pport + pidx;
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+ if (!j)
+ hfi1_device_remove(dd);
+ if (!ret)
+ hfi1_unregister_ib_device(dd);
+ postinit_cleanup(dd);
+ if (initfail)
+ ret = initfail;
+ goto bail; /* everything already cleaned */
+ }
+
+ sdma_start(dd);
+
+ return 0;
+
+clean_bail:
+ hfi1_pcie_cleanup(pdev);
+bail:
+ return ret;
+}
+
+static void wait_for_clients(struct hfi1_devdata *dd)
+{
+ /*
+ * Remove the device init value and complete the device if there is
+ * no clients or wait for active clients to finish.
+ */
+ if (refcount_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
+ wait_for_completion(&dd->user_comp);
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ /* close debugfs files before ib unregister */
+ hfi1_dbg_ibdev_exit(&dd->verbs_dev);
+
+ /* remove the /dev hfi1 interface */
+ hfi1_device_remove(dd);
+
+ /* wait for existing user space clients to finish */
+ wait_for_clients(dd);
+
+ /* unregister from IB core */
+ hfi1_unregister_ib_device(dd);
+
+ /* free netdev data */
+ hfi1_free_rx(dd);
+
+ /*
+ * Disable the IB link, disable interrupts on the device,
+ * clear dma engines, etc.
+ */
+ shutdown_device(dd);
+ destroy_workqueues(dd);
+
+ stop_timers(dd);
+
+ /* wait until all of our (qsfp) queue_work() calls complete */
+ flush_workqueue(ib_wq);
+
+ postinit_cleanup(dd);
+}
+
+static void shutdown_one(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ shutdown_device(dd);
+}
+
+/**
+ * hfi1_create_rcvhdrq - create a receive header queue
+ * @dd: the hfi1_ib device
+ * @rcd: the context data
+ *
+ * This must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
+ */
+int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+{
+ unsigned amt;
+
+ if (!rcd->rcvhdrq) {
+ amt = rcvhdrq_size(rcd);
+
+ rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
+ &rcd->rcvhdrq_dma,
+ GFP_KERNEL);
+
+ if (!rcd->rcvhdrq) {
+ dd_dev_err(dd,
+ "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
+ amt, rcd->ctxt);
+ goto bail;
+ }
+
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
+ HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
+ rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+ PAGE_SIZE,
+ &rcd->rcvhdrqtailaddr_dma,
+ GFP_KERNEL);
+ if (!rcd->rcvhdrtail_kvaddr)
+ goto bail_free;
+ }
+ }
+
+ set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
+ rcd->rcvhdrq_cnt);
+
+ return 0;
+
+bail_free:
+ dd_dev_err(dd,
+ "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
+ rcd->ctxt);
+ dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
+ rcd->rcvhdrq_dma);
+ rcd->rcvhdrq = NULL;
+bail:
+ return -ENOMEM;
+}
+
+/**
+ * hfi1_setup_eagerbufs - llocate eager buffers, both kernel and user
+ * contexts.
+ * @rcd: the context we are setting up.
+ *
+ * Allocate the eager TID buffers and program them into hip.
+ * They are no longer completely contiguous, we do multiple allocation
+ * calls. Otherwise we get the OOM code involved, by asking for too
+ * much per call, with disastrous results on some kernels.
+ */
+int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 max_entries, egrtop, alloced_bytes = 0;
+ u16 order, idx = 0;
+ int ret = 0;
+ u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
+
+ /*
+ * The minimum size of the eager buffers is a groups of MTU-sized
+ * buffers.
+ * The global eager_buffer_size parameter is checked against the
+ * theoretical lower limit of the value. Here, we check against the
+ * MTU.
+ */
+ if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
+ rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
+ /*
+ * If using one-pkt-per-egr-buffer, lower the eager buffer
+ * size to the max MTU (page-aligned).
+ */
+ if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
+ rcd->egrbufs.rcvtid_size = round_mtu;
+
+ /*
+ * Eager buffers sizes of 1MB or less require smaller TID sizes
+ * to satisfy the "multiple of 8 RcvArray entries" requirement.
+ */
+ if (rcd->egrbufs.size <= (1 << 20))
+ rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
+ rounddown_pow_of_two(rcd->egrbufs.size / 8));
+
+ while (alloced_bytes < rcd->egrbufs.size &&
+ rcd->egrbufs.alloced < rcd->egrbufs.count) {
+ rcd->egrbufs.buffers[idx].addr =
+ dma_alloc_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.rcvtid_size,
+ &rcd->egrbufs.buffers[idx].dma,
+ GFP_KERNEL);
+ if (rcd->egrbufs.buffers[idx].addr) {
+ rcd->egrbufs.buffers[idx].len =
+ rcd->egrbufs.rcvtid_size;
+ rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
+ rcd->egrbufs.buffers[idx].addr;
+ rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
+ rcd->egrbufs.buffers[idx].dma;
+ rcd->egrbufs.alloced++;
+ alloced_bytes += rcd->egrbufs.rcvtid_size;
+ idx++;
+ } else {
+ u32 new_size, i, j;
+ u64 offset = 0;
+
+ /*
+ * Fail the eager buffer allocation if:
+ * - we are already using the lowest acceptable size
+ * - we are using one-pkt-per-egr-buffer (this implies
+ * that we are accepting only one size)
+ */
+ if (rcd->egrbufs.rcvtid_size == round_mtu ||
+ !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
+ dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
+ rcd->ctxt);
+ ret = -ENOMEM;
+ goto bail_rcvegrbuf_phys;
+ }
+
+ new_size = rcd->egrbufs.rcvtid_size / 2;
+
+ /*
+ * If the first attempt to allocate memory failed, don't
+ * fail everything but continue with the next lower
+ * size.
+ */
+ if (idx == 0) {
+ rcd->egrbufs.rcvtid_size = new_size;
+ continue;
+ }
+
+ /*
+ * Re-partition already allocated buffers to a smaller
+ * size.
+ */
+ rcd->egrbufs.alloced = 0;
+ for (i = 0, j = 0, offset = 0; j < idx; i++) {
+ if (i >= rcd->egrbufs.count)
+ break;
+ rcd->egrbufs.rcvtids[i].dma =
+ rcd->egrbufs.buffers[j].dma + offset;
+ rcd->egrbufs.rcvtids[i].addr =
+ rcd->egrbufs.buffers[j].addr + offset;
+ rcd->egrbufs.alloced++;
+ if ((rcd->egrbufs.buffers[j].dma + offset +
+ new_size) ==
+ (rcd->egrbufs.buffers[j].dma +
+ rcd->egrbufs.buffers[j].len)) {
+ j++;
+ offset = 0;
+ } else {
+ offset += new_size;
+ }
+ }
+ rcd->egrbufs.rcvtid_size = new_size;
+ }
+ }
+ rcd->egrbufs.numbufs = idx;
+ rcd->egrbufs.size = alloced_bytes;
+
+ hfi1_cdbg(PROC,
+ "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB",
+ rcd->ctxt, rcd->egrbufs.alloced,
+ rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
+
+ /*
+ * Set the contexts rcv array head update threshold to the closest
+ * power of 2 (so we can use a mask instead of modulo) below half
+ * the allocated entries.
+ */
+ rcd->egrbufs.threshold =
+ rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
+ /*
+ * Compute the expected RcvArray entry base. This is done after
+ * allocating the eager buffers in order to maximize the
+ * expected RcvArray entries for the context.
+ */
+ max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
+ egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
+ rcd->expected_count = max_entries - egrtop;
+ if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
+ rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
+
+ rcd->expected_base = rcd->eager_base + egrtop;
+ hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u",
+ rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
+ rcd->eager_base, rcd->expected_base);
+
+ if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
+ hfi1_cdbg(PROC,
+ "ctxt%u: current Eager buffer size is invalid %u",
+ rcd->ctxt, rcd->egrbufs.rcvtid_size);
+ ret = -EINVAL;
+ goto bail_rcvegrbuf_phys;
+ }
+
+ for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
+ hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
+ rcd->egrbufs.rcvtids[idx].dma, order);
+ cond_resched();
+ }
+
+ return 0;
+
+bail_rcvegrbuf_phys:
+ for (idx = 0; idx < rcd->egrbufs.alloced &&
+ rcd->egrbufs.buffers[idx].addr;
+ idx++) {
+ dma_free_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.buffers[idx].len,
+ rcd->egrbufs.buffers[idx].addr,
+ rcd->egrbufs.buffers[idx].dma);
+ rcd->egrbufs.buffers[idx].addr = NULL;
+ rcd->egrbufs.buffers[idx].dma = 0;
+ rcd->egrbufs.buffers[idx].len = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
new file mode 100644
index 000000000000..d8dd1a599631
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitmap.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "sdma.h"
+
+#define LINK_UP_DELAY 500 /* in microseconds */
+
+static void set_mgmt_allowed(struct hfi1_pportdata *ppd)
+{
+ u32 frame;
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (ppd->neighbor_type == NEIGHBOR_TYPE_HFI) {
+ ppd->mgmt_allowed = 1;
+ } else {
+ read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
+ ppd->mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT)
+ & MGMT_ALLOWED_MASK;
+ }
+}
+
+/*
+ * Our neighbor has indicated that we are allowed to act as a fabric
+ * manager, so place the full management partition key in the second
+ * (0-based) pkey array position. Note that we should already have
+ * the limited management partition key in array element 1, and also
+ * that the port is not yet up when add_full_mgmt_pkey() is invoked.
+ */
+static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
+ if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
+ dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
+ __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
+ ppd->pkeys[2] = FULL_MGMT_P_KEY;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+}
+
+static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
+{
+ struct ib_event event;
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /*
+ * Only call ib_dispatch_event() if the IB device has been
+ * registered. HFI1_INITED is set iff the driver has successfully
+ * registered with the IB core.
+ */
+ if (!(dd->flags & HFI1_INITTED))
+ return;
+ event.device = &dd->verbs_dev.rdi.ibdev;
+ event.element.port_num = ppd->port;
+ event.event = ev;
+ ib_dispatch_event(&event);
+}
+
+/**
+ * handle_linkup_change - finish linkup/down state changes
+ * @dd: valid device
+ * @linkup: link state information
+ *
+ * Handle a linkup or link down notification.
+ * The HW needs time to finish its link up state change. Give it that chance.
+ *
+ * This is called outside an interrupt.
+ *
+ */
+void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
+{
+ struct hfi1_pportdata *ppd = &dd->pport[0];
+ enum ib_event_type ev;
+
+ if (!(ppd->linkup ^ !!linkup))
+ return; /* no change, nothing to do */
+
+ if (linkup) {
+ /*
+ * Quick linkup and all link up on the simulator does not
+ * trigger or implement:
+ * - VerifyCap interrupt
+ * - VerifyCap frames
+ * But rather moves directly to LinkUp.
+ *
+ * Do the work of the VerifyCap interrupt handler,
+ * handle_verify_cap(), but do not try moving the state to
+ * LinkUp as we are already there.
+ *
+ * NOTE: This uses this device's vAU, vCU, and vl15_init for
+ * the remote values. Both sides must be using the values.
+ */
+ if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ set_up_vau(dd, dd->vau);
+ set_up_vl15(dd, dd->vl15_init);
+ assign_remote_cm_au_table(dd, dd->vcu);
+ }
+
+ ppd->neighbor_guid =
+ read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
+ ppd->neighbor_type =
+ read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
+ DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
+ ppd->neighbor_port_number =
+ read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
+ DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
+ ppd->neighbor_fm_security =
+ read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
+ DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
+ dd_dev_info(dd,
+ "Neighbor Guid %llx, Type %d, Port Num %d\n",
+ ppd->neighbor_guid, ppd->neighbor_type,
+ ppd->neighbor_port_number);
+
+ /* HW needs LINK_UP_DELAY to settle, give it that chance */
+ udelay(LINK_UP_DELAY);
+
+ /*
+ * 'MgmtAllowed' information, which is exchanged during
+ * LNI, is available at this point.
+ */
+ set_mgmt_allowed(ppd);
+
+ if (ppd->mgmt_allowed)
+ add_full_mgmt_pkey(ppd);
+
+ /* physical link went up */
+ ppd->linkup = 1;
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
+
+ /* link widths are not available until the link is fully up */
+ get_linkup_link_widths(ppd);
+
+ } else {
+ /* physical link went down */
+ ppd->linkup = 0;
+
+ /* clear HW details of the previous connection */
+ ppd->actual_vls_operational = 0;
+ reset_link_credits(dd);
+
+ /* freeze after a link down to guarantee a clean egress */
+ start_freeze_handling(ppd, FREEZE_SELF | FREEZE_LINK_DOWN);
+
+ ev = IB_EVENT_PORT_ERR;
+
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LINKDOWN_BIT);
+
+ /* if we are down, the neighbor is down */
+ ppd->neighbor_normal = 0;
+
+ /* notify IB of the link change */
+ signal_ib_event(ppd, ev);
+ }
+}
+
+/*
+ * Handle receive or urgent interrupts for user contexts. This means a user
+ * process was waiting for a packet to arrive, and didn't want to poll.
+ */
+void handle_user_interrupt(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS))
+ goto done;
+
+ if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) {
+ wake_up_interruptible(&rcd->wait);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd);
+ } else if (test_and_clear_bit(HFI1_CTXT_WAITING_URG,
+ &rcd->event_flags)) {
+ rcd->urgent++;
+ wake_up_interruptible(&rcd->wait);
+ }
+done:
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+}
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
new file mode 100644
index 000000000000..111489802614
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#include "iowait.h"
+#include "trace_iowait.h"
+
+/* 1 priority == 16 starve_cnt */
+#define IOWAIT_PRIORITY_STARVE_SHIFT 4
+
+void iowait_set_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_set(wait, flag);
+ set_bit(flag, &wait->flags);
+}
+
+bool iowait_flag_set(struct iowait *wait, u32 flag)
+{
+ return test_bit(flag, &wait->flags);
+}
+
+inline void iowait_clear_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_clear(wait, flag);
+ clear_bit(flag, &wait->flags);
+}
+
+/*
+ * iowait_init() - initialize wait structure
+ * @wait: wait struct to initialize
+ * @tx_limit: limit for overflow queuing
+ * @func: restart function for workqueue
+ * @sleep: sleep function for no space
+ * @resume: wakeup function for no space
+ *
+ * This function initializes the iowait
+ * structure embedded in the QP or PQ.
+ *
+ */
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait),
+ void (*init_priority)(struct iowait *wait))
+{
+ int i;
+
+ wait->count = 0;
+ INIT_LIST_HEAD(&wait->list);
+ init_waitqueue_head(&wait->wait_dma);
+ init_waitqueue_head(&wait->wait_pio);
+ atomic_set(&wait->sdma_busy, 0);
+ atomic_set(&wait->pio_busy, 0);
+ wait->tx_limit = tx_limit;
+ wait->sleep = sleep;
+ wait->wakeup = wakeup;
+ wait->sdma_drained = sdma_drained;
+ wait->init_priority = init_priority;
+ wait->flags = 0;
+ for (i = 0; i < IOWAIT_SES; i++) {
+ wait->wait[i].iow = wait;
+ INIT_LIST_HEAD(&wait->wait[i].tx_head);
+ if (i == IOWAIT_IB_SE)
+ INIT_WORK(&wait->wait[i].iowork, func);
+ else
+ INIT_WORK(&wait->wait[i].iowork, tidfunc);
+ }
+}
+
+/**
+ * iowait_cancel_work - cancel all work in iowait
+ * @w: the iowait struct
+ */
+void iowait_cancel_work(struct iowait *w)
+{
+ cancel_work_sync(&iowait_get_ib_work(w)->iowork);
+ /* Make sure that the iowork for TID RDMA is used */
+ if (iowait_get_tid_work(w)->iowork.func)
+ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+}
+
+/**
+ * iowait_set_work_flag - set work flag based on leg
+ * @w: the iowait work struct
+ */
+int iowait_set_work_flag(struct iowait_work *w)
+{
+ if (w == &w->iow->wait[IOWAIT_IB_SE]) {
+ iowait_set_flag(w->iow, IOWAIT_PENDING_IB);
+ return IOWAIT_IB_SE;
+ }
+ iowait_set_flag(w->iow, IOWAIT_PENDING_TID);
+ return IOWAIT_TID_SE;
+}
+
+/**
+ * iowait_priority_update_top - update the top priority entry
+ * @w: the iowait struct
+ * @top: a pointer to the top priority entry
+ * @idx: the index of the current iowait in an array
+ * @top_idx: the array index for the iowait entry that has the top priority
+ *
+ * This function is called to compare the priority of a given
+ * iowait with the given top priority entry. The top index will
+ * be returned.
+ */
+uint iowait_priority_update_top(struct iowait *w,
+ struct iowait *top,
+ uint idx, uint top_idx)
+{
+ u8 cnt, tcnt;
+
+ /* Convert priority into starve_cnt and compare the total.*/
+ cnt = (w->priority << IOWAIT_PRIORITY_STARVE_SHIFT) + w->starved_cnt;
+ tcnt = (top->priority << IOWAIT_PRIORITY_STARVE_SHIFT) +
+ top->starved_cnt;
+ if (cnt > tcnt)
+ return idx;
+ else
+ return top_idx;
+}
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
new file mode 100644
index 000000000000..7259f4f55700
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#ifndef _HFI1_IOWAIT_H
+#define _HFI1_IOWAIT_H
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "sdma_txreq.h"
+
+/*
+ * typedef (*restart_t)() - restart callback
+ * @work: pointer to work structure
+ */
+typedef void (*restart_t)(struct work_struct *work);
+
+#define IOWAIT_PENDING_IB 0x0
+#define IOWAIT_PENDING_TID 0x1
+
+/*
+ * A QP can have multiple Send Engines (SEs).
+ *
+ * The current use case is for supporting a TID RDMA
+ * packet build/xmit mechanism independent from verbs.
+ */
+#define IOWAIT_SES 2
+#define IOWAIT_IB_SE 0
+#define IOWAIT_TID_SE 1
+
+struct sdma_txreq;
+struct sdma_engine;
+/**
+ * @iowork: the work struct
+ * @tx_head: list of prebuilt packets
+ * @iow: the parent iowait structure
+ *
+ * This structure is the work item (process) specific
+ * details associated with the each of the two SEs of the
+ * QP.
+ *
+ * The workstruct and the queued TXs are unique to each
+ * SE.
+ */
+struct iowait;
+struct iowait_work {
+ struct work_struct iowork;
+ struct list_head tx_head;
+ struct iowait *iow;
+};
+
+/**
+ * @list: used to add/insert into QP/PQ wait lists
+ * @tx_head: overflow list of sdma_txreq's
+ * @sleep: no space callback
+ * @wakeup: space callback wakeup
+ * @sdma_drained: sdma count drained
+ * @init_priority: callback to manipulate priority
+ * @lock: lock protected head of wait queue
+ * @iowork: workqueue overhead
+ * @wait_dma: wait for sdma_busy == 0
+ * @wait_pio: wait for pio_busy == 0
+ * @sdma_busy: # of packets in flight
+ * @count: total number of descriptors in tx_head'ed list
+ * @tx_limit: limit for overflow queuing
+ * @tx_count: number of tx entry's in tx_head'ed list
+ * @flags: wait flags (one per QP)
+ * @wait: SE array for multiple legs
+ *
+ * This is to be embedded in user's state structure
+ * (QP or PQ).
+ *
+ * The sleep and wakeup members are a
+ * bit misnamed. They do not strictly
+ * speaking sleep or wake up, but they
+ * are callbacks for the ULP to implement
+ * what ever queuing/dequeuing of
+ * the embedded iowait and its containing struct
+ * when a resource shortage like SDMA ring space
+ * or PIO credit space is seen.
+ *
+ * Both potentially have locks help
+ * so sleeping is not allowed and it is not
+ * supported to submit txreqs from the wakeup
+ * call directly because of lock conflicts.
+ *
+ * The wait_dma member along with the iow
+ *
+ * The lock field is used by waiters to record
+ * the seqlock_t that guards the list head.
+ * Waiters explicitly know that, but the destroy
+ * code that unwaits QPs does not.
+ */
+struct iowait {
+ struct list_head list;
+ int (*sleep)(
+ struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent
+ );
+ void (*wakeup)(struct iowait *wait, int reason);
+ void (*sdma_drained)(struct iowait *wait);
+ void (*init_priority)(struct iowait *wait);
+ seqlock_t *lock;
+ wait_queue_head_t wait_dma;
+ wait_queue_head_t wait_pio;
+ atomic_t sdma_busy;
+ atomic_t pio_busy;
+ u32 count;
+ u32 tx_limit;
+ u32 tx_count;
+ u8 starved_cnt;
+ u8 priority;
+ unsigned long flags;
+ struct iowait_work wait[IOWAIT_SES];
+};
+
+#define SDMA_AVAIL_REASON 0
+
+void iowait_set_flag(struct iowait *wait, u32 flag);
+bool iowait_flag_set(struct iowait *wait, u32 flag);
+void iowait_clear_flag(struct iowait *wait, u32 flag);
+
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait),
+ void (*init_priority)(struct iowait *wait));
+
+/**
+ * iowait_schedule() - schedule the default send engine work
+ * @wait: wait struct to schedule
+ * @wq: workqueue for schedule
+ * @cpu: cpu
+ */
+static inline bool iowait_schedule(struct iowait *wait,
+ struct workqueue_struct *wq, int cpu)
+{
+ return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork);
+}
+
+/**
+ * iowait_tid_schedule - schedule the tid SE
+ * @wait: the iowait structure
+ * @wq: the work queue
+ * @cpu: the cpu
+ */
+static inline bool iowait_tid_schedule(struct iowait *wait,
+ struct workqueue_struct *wq, int cpu)
+{
+ return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork);
+}
+
+/**
+ * iowait_sdma_drain() - wait for DMAs to drain
+ *
+ * @wait: iowait structure
+ *
+ * This will delay until the iowait sdmas have
+ * completed.
+ */
+static inline void iowait_sdma_drain(struct iowait *wait)
+{
+ wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy));
+}
+
+/**
+ * iowait_sdma_pending() - return sdma pending count
+ *
+ * @wait: iowait structure
+ *
+ */
+static inline int iowait_sdma_pending(struct iowait *wait)
+{
+ return atomic_read(&wait->sdma_busy);
+}
+
+/**
+ * iowait_sdma_inc - note sdma io pending
+ * @wait: iowait structure
+ */
+static inline void iowait_sdma_inc(struct iowait *wait)
+{
+ atomic_inc(&wait->sdma_busy);
+}
+
+/**
+ * iowait_sdma_add - add count to pending
+ * @wait: iowait structure
+ */
+static inline void iowait_sdma_add(struct iowait *wait, int count)
+{
+ atomic_add(count, &wait->sdma_busy);
+}
+
+/**
+ * iowait_sdma_dec - note sdma complete
+ * @wait: iowait structure
+ */
+static inline int iowait_sdma_dec(struct iowait *wait)
+{
+ if (!wait)
+ return 0;
+ return atomic_dec_and_test(&wait->sdma_busy);
+}
+
+/**
+ * iowait_pio_drain() - wait for pios to drain
+ *
+ * @wait: iowait structure
+ *
+ * This will delay until the iowait pios have
+ * completed.
+ */
+static inline void iowait_pio_drain(struct iowait *wait)
+{
+ wait_event_timeout(wait->wait_pio,
+ !atomic_read(&wait->pio_busy),
+ HZ);
+}
+
+/**
+ * iowait_pio_pending() - return pio pending count
+ *
+ * @wait: iowait structure
+ *
+ */
+static inline int iowait_pio_pending(struct iowait *wait)
+{
+ return atomic_read(&wait->pio_busy);
+}
+
+/**
+ * iowait_pio_inc - note pio pending
+ * @wait: iowait structure
+ */
+static inline void iowait_pio_inc(struct iowait *wait)
+{
+ atomic_inc(&wait->pio_busy);
+}
+
+/**
+ * iowait_pio_dec - note pio complete
+ * @wait: iowait structure
+ */
+static inline int iowait_pio_dec(struct iowait *wait)
+{
+ if (!wait)
+ return 0;
+ return atomic_dec_and_test(&wait->pio_busy);
+}
+
+/**
+ * iowait_drain_wakeup() - trigger iowait_drain() waiter
+ *
+ * @wait: iowait structure
+ *
+ * This will trigger any waiters.
+ */
+static inline void iowait_drain_wakeup(struct iowait *wait)
+{
+ wake_up(&wait->wait_dma);
+ wake_up(&wait->wait_pio);
+ if (wait->sdma_drained)
+ wait->sdma_drained(wait);
+}
+
+/**
+ * iowait_get_txhead() - get packet off of iowait list
+ *
+ * @wait: iowait_work structure
+ */
+static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
+{
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&wait->tx_head)) {
+ tx = list_first_entry(
+ &wait->tx_head,
+ struct sdma_txreq,
+ list);
+ list_del_init(&tx->list);
+ }
+ return tx;
+}
+
+static inline u16 iowait_get_desc(struct iowait_work *w)
+{
+ u16 num_desc = 0;
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&w->tx_head)) {
+ tx = list_first_entry(&w->tx_head, struct sdma_txreq,
+ list);
+ num_desc = tx->num_desc;
+ if (tx->flags & SDMA_TXREQ_F_VIP)
+ w->iow->priority++;
+ }
+ return num_desc;
+}
+
+static inline u32 iowait_get_all_desc(struct iowait *w)
+{
+ u32 num_desc = 0;
+
+ num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]);
+ num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]);
+ return num_desc;
+}
+
+static inline void iowait_update_priority(struct iowait_work *w)
+{
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&w->tx_head)) {
+ tx = list_first_entry(&w->tx_head, struct sdma_txreq,
+ list);
+ if (tx->flags & SDMA_TXREQ_F_VIP)
+ w->iow->priority++;
+ }
+}
+
+static inline void iowait_update_all_priority(struct iowait *w)
+{
+ iowait_update_priority(&w->wait[IOWAIT_IB_SE]);
+ iowait_update_priority(&w->wait[IOWAIT_TID_SE]);
+}
+
+static inline void iowait_init_priority(struct iowait *w)
+{
+ w->priority = 0;
+ if (w->init_priority)
+ w->init_priority(w);
+}
+
+static inline void iowait_get_priority(struct iowait *w)
+{
+ iowait_init_priority(w);
+ iowait_update_all_priority(w);
+}
+
+/**
+ * iowait_queue - Put the iowait on a wait queue
+ * @pkts_sent: have some packets been sent before queuing?
+ * @w: the iowait struct
+ * @wait_head: the wait queue
+ *
+ * This function is called to insert an iowait struct into a
+ * wait queue after a resource (eg, sdma descriptor or pio
+ * buffer) is run out.
+ */
+static inline void iowait_queue(bool pkts_sent, struct iowait *w,
+ struct list_head *wait_head)
+{
+ /*
+ * To play fair, insert the iowait at the tail of the wait queue if it
+ * has already sent some packets; Otherwise, put it at the head.
+ * However, if it has priority packets to send, also put it at the
+ * head.
+ */
+ if (pkts_sent)
+ w->starved_cnt = 0;
+ else
+ w->starved_cnt++;
+
+ if (w->priority > 0 || !pkts_sent)
+ list_add(&w->list, wait_head);
+ else
+ list_add_tail(&w->list, wait_head);
+}
+
+/**
+ * iowait_starve_clear - clear the wait queue's starve count
+ * @pkts_sent: have some packets been sent?
+ * @w: the iowait struct
+ *
+ * This function is called to clear the starve count. If no
+ * packets have been sent, the starve count will not be cleared.
+ */
+static inline void iowait_starve_clear(bool pkts_sent, struct iowait *w)
+{
+ if (pkts_sent)
+ w->starved_cnt = 0;
+}
+
+/* Update the top priority index */
+uint iowait_priority_update_top(struct iowait *w,
+ struct iowait *top,
+ uint idx, uint top_idx);
+
+/**
+ * iowait_packet_queued() - determine if a packet is queued
+ * @wait: the iowait_work structure
+ */
+static inline bool iowait_packet_queued(struct iowait_work *wait)
+{
+ return !list_empty(&wait->tx_head);
+}
+
+/**
+ * inc_wait_count - increment wait counts
+ * @w: the log work struct
+ * @n: the count
+ */
+static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n)
+{
+ if (!w)
+ return;
+ w->iow->tx_count++;
+ w->iow->count += n;
+}
+
+/**
+ * iowait_get_tid_work - return iowait_work for tid SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_tid_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_TID_SE];
+}
+
+/**
+ * iowait_get_ib_work - return iowait_work for ib SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_ib_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_IB_SE];
+}
+
+/**
+ * iowait_ioww_to_iow - return iowait given iowait_work
+ * @w: the iowait_work struct
+ */
+static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w)
+{
+ if (likely(w))
+ return w->iow;
+ return NULL;
+}
+
+void iowait_cancel_work(struct iowait *w);
+int iowait_set_work_flag(struct iowait_work *w);
+
+#endif
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
new file mode 100644
index 000000000000..aec60d4888eb
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB functionality
+ */
+
+#ifndef HFI1_IPOIB_H
+#define HFI1_IPOIB_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/if_infiniband.h>
+
+#include "hfi.h"
+#include "iowait.h"
+#include "netdev.h"
+
+#include <rdma/ib_verbs.h>
+
+#define HFI1_IPOIB_ENTROPY_SHIFT 24
+
+#define HFI1_IPOIB_TXREQ_NAME_LEN 32
+
+#define HFI1_IPOIB_PSEUDO_LEN 20
+#define HFI1_IPOIB_ENCAP_LEN 4
+
+struct hfi1_ipoib_dev_priv;
+
+union hfi1_ipoib_flow {
+ u16 as_int;
+ struct {
+ u8 tx_queue;
+ u8 sc5;
+ } __attribute__((__packed__));
+};
+
+/**
+ * struct ipoib_txreq - IPOIB transmit descriptor
+ * @txreq: sdma transmit request
+ * @sdma_hdr: 9b ib headers
+ * @sdma_status: status returned by sdma engine
+ * @complete: non-zero implies complete
+ * @priv: ipoib netdev private data
+ * @txq: txq on which skb was output
+ * @skb: skb to send
+ */
+struct ipoib_txreq {
+ struct sdma_txreq txreq;
+ struct hfi1_sdma_header *sdma_hdr;
+ int sdma_status;
+ int complete;
+ struct hfi1_ipoib_dev_priv *priv;
+ struct hfi1_ipoib_txq *txq;
+ struct sk_buff *skb;
+};
+
+/**
+ * struct hfi1_ipoib_circ_buf - List of items to be processed
+ * @items: ring of items each a power of two size
+ * @max_items: max items + 1 that the ring can contain
+ * @shift: log2 of size for getting txreq
+ * @sent_txreqs: count of txreqs posted to sdma
+ * @tail: ring tail
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
+ * @complete_txreqs: count of txreqs completed by sdma
+ * @head: ring head
+ */
+struct hfi1_ipoib_circ_buf {
+ void *items;
+ u32 max_items;
+ u32 shift;
+ /* consumer cache line */
+ u64 ____cacheline_aligned_in_smp sent_txreqs;
+ u32 avail;
+ u32 tail;
+ atomic_t stops;
+ atomic_t ring_full;
+ atomic_t no_desc;
+ /* producer cache line */
+ u64 ____cacheline_aligned_in_smp complete_txreqs;
+ u32 head;
+};
+
+/**
+ * struct hfi1_ipoib_txq - IPOIB per Tx queue information
+ * @priv: private pointer
+ * @sde: sdma engine
+ * @tx_list: tx request list
+ * @sent_txreqs: count of txreqs posted to sdma
+ * @flow: tracks when list needs to be flushed for a flow change
+ * @q_idx: ipoib Tx queue index
+ * @pkts_sent: indicator packets have been sent from this queue
+ * @wait: iowait structure
+ * @napi: pointer to tx napi interface
+ * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
+ */
+struct hfi1_ipoib_txq {
+ struct napi_struct napi;
+ struct hfi1_ipoib_dev_priv *priv;
+ struct sdma_engine *sde;
+ struct list_head tx_list;
+ union hfi1_ipoib_flow flow;
+ u8 q_idx;
+ bool pkts_sent;
+ struct iowait wait;
+
+ struct hfi1_ipoib_circ_buf ____cacheline_aligned_in_smp tx_ring;
+};
+
+struct hfi1_ipoib_dev_priv {
+ struct hfi1_devdata *dd;
+ struct net_device *netdev;
+ struct ib_device *device;
+ struct hfi1_ipoib_txq *txqs;
+ const struct net_device_ops *netdev_ops;
+ struct rvt_qp *qp;
+ u32 qkey;
+ u16 pkey;
+ u16 pkey_index;
+ u8 port_num;
+};
+
+/* hfi1 ipoib rdma netdev's private data structure */
+struct hfi1_ipoib_rdma_netdev {
+ struct rdma_netdev rn; /* keep this first */
+ /* followed by device private data */
+ struct hfi1_ipoib_dev_priv dev_priv;
+};
+
+static inline struct hfi1_ipoib_dev_priv *
+hfi1_ipoib_priv(const struct net_device *dev)
+{
+ return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
+}
+
+int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn);
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
+
+int hfi1_ipoib_rxq_init(struct net_device *dev);
+void hfi1_ipoib_rxq_deinit(struct net_device *dev);
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data);
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u32 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params);
+
+void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q);
+
+#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
new file mode 100644
index 000000000000..7c9d5203002b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for ipoib functionality
+ */
+
+#include "ipoib.h"
+#include "hfi.h"
+
+static u32 qpn_from_mac(const u8 *mac_arr)
+{
+ return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3];
+}
+
+static int hfi1_ipoib_dev_init(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ ret = priv->netdev_ops->ndo_init(dev);
+ if (ret)
+ return ret;
+
+ ret = hfi1_netdev_add_data(priv->dd,
+ qpn_from_mac(priv->netdev->dev_addr),
+ dev);
+ if (ret < 0) {
+ priv->netdev_ops->ndo_uninit(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hfi1_ipoib_dev_uninit(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
+
+ priv->netdev_ops->ndo_uninit(dev);
+}
+
+static int hfi1_ipoib_dev_open(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ ret = priv->netdev_ops->ndo_open(dev);
+ if (!ret) {
+ struct hfi1_ibport *ibp = to_iport(priv->device,
+ priv->port_num);
+ struct rvt_qp *qp;
+ u32 qpn = qpn_from_mac(priv->netdev->dev_addr);
+
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (!qp) {
+ rcu_read_unlock();
+ priv->netdev_ops->ndo_stop(dev);
+ return -EINVAL;
+ }
+ rvt_get_qp(qp);
+ priv->qp = qp;
+ rcu_read_unlock();
+
+ hfi1_netdev_enable_queues(priv->dd);
+ hfi1_ipoib_napi_tx_enable(dev);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_dev_stop(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ if (!priv->qp)
+ return 0;
+
+ hfi1_ipoib_napi_tx_disable(dev);
+ hfi1_netdev_disable_queues(priv->dd);
+
+ rvt_put_qp(priv->qp);
+ priv->qp = NULL;
+
+ return priv->netdev_ops->ndo_stop(dev);
+}
+
+static const struct net_device_ops hfi1_ipoib_netdev_ops = {
+ .ndo_init = hfi1_ipoib_dev_init,
+ .ndo_uninit = hfi1_ipoib_dev_uninit,
+ .ndo_open = hfi1_ipoib_dev_open,
+ .ndo_stop = hfi1_ipoib_dev_stop,
+};
+
+static int hfi1_ipoib_mcast_attach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid,
+ int set_qkey,
+ u32 qkey)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ if (set_qkey)
+ priv->qkey = qkey;
+
+ /* attach QP to multicast group */
+ ret = ib_attach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_mcast_detach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ ret = ib_detach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+ return ret;
+}
+
+static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_ipoib_txreq_deinit(priv);
+ hfi1_ipoib_rxq_deinit(priv->netdev);
+}
+
+static void hfi1_ipoib_set_id(struct net_device *dev, int id)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ priv->pkey_index = (u16)id;
+ ib_query_pkey(priv->device,
+ priv->port_num,
+ priv->pkey_index,
+ &priv->pkey);
+}
+
+static int hfi1_ipoib_setup_rn(struct ib_device *device,
+ u32 port_num,
+ struct net_device *netdev,
+ void *param)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+ struct rdma_netdev *rn = netdev_priv(netdev);
+ struct hfi1_ipoib_dev_priv *priv;
+ int rc;
+
+ rn->send = hfi1_ipoib_send;
+ rn->tx_timeout = hfi1_ipoib_tx_timeout;
+ rn->attach_mcast = hfi1_ipoib_mcast_attach;
+ rn->detach_mcast = hfi1_ipoib_mcast_detach;
+ rn->set_id = hfi1_ipoib_set_id;
+ rn->hca = device;
+ rn->port_num = port_num;
+ rn->mtu = netdev->mtu;
+
+ priv = hfi1_ipoib_priv(netdev);
+ priv->dd = dd;
+ priv->netdev = netdev;
+ priv->device = device;
+ priv->port_num = port_num;
+ priv->netdev_ops = netdev->netdev_ops;
+
+ ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
+
+ rc = hfi1_ipoib_txreq_init(priv);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
+ return rc;
+ }
+
+ rc = hfi1_ipoib_rxq_init(netdev);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
+ hfi1_ipoib_txreq_deinit(priv);
+ return rc;
+ }
+
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
+ netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
+ netdev->needs_free_netdev = true;
+ netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
+ return 0;
+}
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u32 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+
+ if (type != RDMA_NETDEV_IPOIB)
+ return -EOPNOTSUPP;
+
+ if (!HFI1_CAP_IS_KSET(AIP) || !dd->num_netdev_contexts)
+ return -EOPNOTSUPP;
+
+ if (!port_num || port_num > dd->num_pports)
+ return -EINVAL;
+
+ params->sizeof_priv = sizeof(struct hfi1_ipoib_rdma_netdev);
+ params->txqs = dd->num_sdma;
+ params->rxqs = dd->num_netdev_contexts;
+ params->param = NULL;
+ params->initialize_rdma_netdev = hfi1_ipoib_setup_rn;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_rx.c b/drivers/infiniband/hw/hfi1/ipoib_rx.c
new file mode 100644
index 000000000000..629691a572ef
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_rx.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#include "netdev.h"
+#include "ipoib.h"
+
+#define HFI1_IPOIB_SKB_PAD ((NET_SKB_PAD) + (NET_IP_ALIGN))
+
+static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
+{
+ skb_checksum_none_assert(skb);
+ skb->protocol = *((__be16 *)data);
+
+ skb_put_data(skb, data, size);
+ skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
+ skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
+}
+
+static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size)
+{
+ struct sk_buff *skb;
+ int skb_size = SKB_DATA_ALIGN(size + HFI1_IPOIB_SKB_PAD);
+ void *frag;
+
+ skb_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb_size = SKB_DATA_ALIGN(skb_size);
+ frag = napi_alloc_frag(skb_size);
+
+ if (unlikely(!frag))
+ return napi_alloc_skb(napi, size);
+
+ skb = build_skb(frag, skb_size);
+
+ if (unlikely(!skb)) {
+ skb_free_frag(frag);
+ return NULL;
+ }
+
+ skb_reserve(skb, HFI1_IPOIB_SKB_PAD);
+ return skb;
+}
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data)
+{
+ struct napi_struct *napi = &rxq->napi;
+ int skb_size = size + HFI1_IPOIB_ENCAP_LEN;
+ struct sk_buff *skb;
+
+ /*
+ * For smaller(4k + skb overhead) allocations we will go using
+ * napi cache. Otherwise we will try to use napi frag cache.
+ */
+ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE))
+ skb = napi_alloc_skb(napi, skb_size);
+ else
+ skb = prepare_frag_skb(napi, skb_size);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ copy_ipoib_buf(skb, data, size);
+
+ return skb;
+}
+
+int hfi1_ipoib_rxq_init(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+ int ret;
+
+ ret = hfi1_netdev_rx_init(dd);
+ if (ret)
+ return ret;
+
+ hfi1_init_aip_rsm(dd);
+
+ return ret;
+}
+
+void hfi1_ipoib_rxq_deinit(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+
+ hfi1_deinit_aip_rsm(dd);
+ hfi1_netdev_rx_destroy(dd);
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
new file mode 100644
index 000000000000..8b9cc55db59d
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -0,0 +1,868 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB SDMA functionality
+ */
+
+#include <linux/log2.h>
+#include <linux/circ_buf.h>
+
+#include "sdma.h"
+#include "verbs.h"
+#include "trace_ibhdrs.h"
+#include "ipoib.h"
+#include "trace_tx.h"
+
+/* Add a convenience helper */
+#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
+#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
+#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
+
+struct ipoib_txparms {
+ struct hfi1_devdata *dd;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_ibport *ibp;
+ struct hfi1_ipoib_txq *txq;
+ union hfi1_ipoib_flow flow;
+ u32 dqpn;
+ u8 hdr_dwords;
+ u8 entropy;
+};
+
+static struct ipoib_txreq *
+hfi1_txreq_from_idx(struct hfi1_ipoib_circ_buf *r, u32 idx)
+{
+ return (struct ipoib_txreq *)(r->items + (idx << r->shift));
+}
+
+static u32 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
+{
+ return sent - completed;
+}
+
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
+{
+ return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
+ txq->tx_ring.complete_txreqs);
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+ trace_hfi1_txq_stop(txq);
+ if (atomic_inc_return(&txq->tx_ring.stops) == 1)
+ netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+ trace_hfi1_txq_wake(txq);
+ if (atomic_dec_and_test(&txq->tx_ring.stops))
+ netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ ++txq->tx_ring.sent_txreqs;
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+ !atomic_xchg(&txq->tx_ring.ring_full, 1)) {
+ trace_hfi1_txq_full(txq);
+ hfi1_ipoib_stop_txq(txq);
+ }
+}
+
+static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
+{
+ struct net_device *dev = txq->priv->netdev;
+
+ /* If shutting down just return as queue state is irrelevant */
+ if (unlikely(dev->reg_state != NETREG_REGISTERED))
+ return;
+
+ /*
+ * When the queue has been drained to less than half full it will be
+ * restarted.
+ * The size of the txreq ring is fixed at initialization.
+ * The tx queue len can be adjusted upward while the interface is
+ * running.
+ * The tx queue len can be large enough to overflow the txreq_ring.
+ * Use the minimum of the current tx_queue_len or the rings max txreqs
+ * to protect against ring overflow.
+ */
+ if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+ atomic_xchg(&txq->tx_ring.ring_full, 0)) {
+ trace_hfi1_txq_xmit_unstopped(txq);
+ hfi1_ipoib_wake_txq(txq);
+ }
+}
+
+static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
+
+ if (likely(!tx->sdma_status)) {
+ dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
+ } else {
+ ++priv->netdev->stats.tx_errors;
+ dd_dev_warn(priv->dd,
+ "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
+ __func__, tx->sdma_status,
+ le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
+ tx->txq->sde->this_idx);
+ }
+
+ napi_consume_skb(tx->skb, budget);
+ tx->skb = NULL;
+ sdma_txclean(priv->dd, &tx->txreq);
+}
+
+static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq)
+{
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ int i;
+ struct ipoib_txreq *tx;
+
+ for (i = 0; i < tx_ring->max_items; i++) {
+ tx = hfi1_txreq_from_idx(tx_ring, i);
+ tx->complete = 0;
+ dev_kfree_skb_any(tx->skb);
+ tx->skb = NULL;
+ sdma_txclean(txq->priv->dd, &tx->txreq);
+ }
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->complete_txreqs = 0;
+ tx_ring->sent_txreqs = 0;
+ tx_ring->avail = hfi1_ipoib_ring_hwat(txq);
+}
+
+static int hfi1_ipoib_poll_tx_ring(struct napi_struct *napi, int budget)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(napi, struct hfi1_ipoib_txq, napi);
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ u32 head = tx_ring->head;
+ u32 max_tx = tx_ring->max_items;
+ int work_done;
+ struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head);
+
+ trace_hfi1_txq_poll(txq);
+ for (work_done = 0; work_done < budget; work_done++) {
+ /* See hfi1_ipoib_sdma_complete() */
+ if (!smp_load_acquire(&tx->complete))
+ break;
+ tx->complete = 0;
+ trace_hfi1_tx_produce(tx, head);
+ hfi1_ipoib_free_tx(tx, budget);
+ head = CIRC_NEXT(head, max_tx);
+ tx = hfi1_txreq_from_idx(tx_ring, head);
+ }
+ tx_ring->complete_txreqs += work_done;
+
+ /* Finished freeing tx items so store the head value. */
+ smp_store_release(&tx_ring->head, head);
+
+ hfi1_ipoib_check_queue_stopped(txq);
+
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
+{
+ struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
+
+ trace_hfi1_txq_complete(tx->txq);
+ tx->sdma_status = status;
+ /* see hfi1_ipoib_poll_tx_ring */
+ smp_store_release(&tx->complete, 1);
+ napi_schedule_irqoff(&tx->txq->napi);
+}
+
+static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct sk_buff *skb = tx->skb;
+ int ret = 0;
+ int i;
+
+ if (skb_headlen(skb)) {
+ ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
+ if (unlikely(ret))
+ return ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ret = sdma_txadd_page(dd,
+ txreq,
+ skb_frag_page(frag),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ NULL, NULL, NULL);
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
+ u16 pkt_bytes =
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
+ int ret;
+
+ ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
+ if (unlikely(ret))
+ return ret;
+
+ /* add pbc + headers */
+ ret = sdma_txadd_kvaddr(dd,
+ txreq,
+ sdma_hdr,
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
+ if (unlikely(ret))
+ return ret;
+
+ /* add the ulp payload */
+ return hfi1_ipoib_build_ulp_payload(tx, txp);
+}
+
+static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
+ struct sk_buff *skb = tx->skb;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
+ struct rdma_ah_attr *ah_attr = txp->ah_attr;
+ struct ib_other_headers *ohdr;
+ struct ib_grh *grh;
+ u16 dwords;
+ u16 slid;
+ u16 dlid;
+ u16 lrh0;
+ u32 bth0;
+ u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
+ priv->netdev->dev_addr[2] << 8 |
+ priv->netdev->dev_addr[3]);
+ u16 payload_dwords;
+ u8 pad_cnt;
+
+ pad_cnt = -skb->len & 3;
+
+ /* Includes ICRC */
+ payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
+
+ /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
+ txp->hdr_dwords = 7;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ grh = &sdma_hdr->hdr.ibh.u.l.grh;
+ txp->hdr_dwords +=
+ hfi1_make_grh(txp->ibp,
+ grh,
+ rdma_ah_read_grh(ah_attr),
+ txp->hdr_dwords - LRH_9B_DWORDS,
+ payload_dwords);
+ lrh0 = HFI1_LRH_GRH;
+ ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
+ } else {
+ lrh0 = HFI1_LRH_BTH;
+ ohdr = &sdma_hdr->hdr.ibh.u.oth;
+ }
+
+ lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
+ lrh0 |= (txp->flow.sc5 & 0xf) << 12;
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
+ if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ } else {
+ u16 lid = (u16)ppd->lid;
+
+ if (lid) {
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
+ slid = lid;
+ } else {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ }
+ }
+
+ /* Includes ICRC */
+ dwords = txp->hdr_dwords + payload_dwords;
+
+ /* Build the lrh */
+ sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
+
+ /* Build the bth */
+ bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
+
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(txp->dqpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs));
+
+ /* Build the deth */
+ ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
+ HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
+
+ /* Construct the pbc. */
+ sdma_hdr->pbc =
+ cpu_to_le64(create_pbc(ppd,
+ ib_is_sc5(txp->flow.sc5) <<
+ PBC_DC_INFO_SHIFT,
+ 0,
+ sc_to_vlt(priv->dd, txp->flow.sc5),
+ dwords - SIZE_OF_CRC +
+ (sizeof(sdma_hdr->pbc) >> 2)));
+}
+
+static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct ipoib_txreq *tx;
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ u32 tail = tx_ring->tail;
+ int ret;
+
+ if (unlikely(!tx_ring->avail)) {
+ u32 head;
+
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq))
+ /* This shouldn't happen with a stopped queue */
+ return ERR_PTR(-ENOMEM);
+ /* See hfi1_ipoib_poll_tx_ring() */
+ head = smp_load_acquire(&tx_ring->head);
+ tx_ring->avail =
+ min_t(u32, hfi1_ipoib_ring_hwat(txq),
+ CIRC_CNT(head, tail, tx_ring->max_items));
+ } else {
+ tx_ring->avail--;
+ }
+ tx = hfi1_txreq_from_idx(tx_ring, tail);
+ trace_hfi1_txq_alloc_tx(txq);
+
+ /* so that we can test if the sdma descriptors are there */
+ tx->txreq.num_desc = 0;
+ tx->txq = txq;
+ tx->skb = skb;
+ INIT_LIST_HEAD(&tx->txreq.list);
+
+ hfi1_ipoib_build_ib_tx_headers(tx, txp);
+
+ ret = hfi1_ipoib_build_tx_desc(tx, txp);
+ if (likely(!ret)) {
+ if (txq->flow.as_int != txp->flow.as_int) {
+ txq->flow.tx_queue = txp->flow.tx_queue;
+ txq->flow.sc5 = txp->flow.sc5;
+ txq->sde =
+ sdma_select_engine_sc(priv->dd,
+ txp->flow.tx_queue,
+ txp->flow.sc5);
+ trace_hfi1_flow_switch(txq);
+ }
+
+ return tx;
+ }
+
+ sdma_txclean(priv->dd, &tx->txreq);
+
+ return ERR_PTR(ret);
+}
+
+static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret;
+ u16 count_out;
+
+ ret = sdma_send_txlist(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &txq->tx_list,
+ &count_out);
+ if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
+ return ret;
+
+ dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
+
+ return ret;
+}
+
+static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret = 0;
+
+ if (!list_empty(&txq->tx_list)) {
+ /* Flush the current list */
+ ret = hfi1_ipoib_submit_tx_list(dev, txq);
+
+ if (unlikely(ret))
+ if (ret != -EBUSY)
+ ++dev->stats.tx_carrier_errors;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
+ struct ipoib_txreq *tx)
+{
+ int ret;
+
+ ret = sdma_send_txreq(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &tx->txreq,
+ txq->pkts_sent);
+ if (likely(!ret)) {
+ txq->pkts_sent = true;
+ iowait_starve_clear(txq->pkts_sent, &txq->wait);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_send_dma_single(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct hfi1_ipoib_circ_buf *tx_ring;
+ struct ipoib_txreq *tx;
+ int ret;
+
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ tx_ring = &txq->tx_ring;
+ trace_hfi1_tx_consume(tx, tx_ring->tail);
+ /* consume tx */
+ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
+ ret = hfi1_ipoib_submit_tx(txq, tx);
+ if (likely(!ret)) {
+tx_ok:
+ trace_sdma_output_ibhdr(txq->priv->dd,
+ &tx->sdma_hdr->hdr,
+ ib_is_sc5(txp->flow.sc5));
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ txq->pkts_sent = false;
+
+ if (ret == -EBUSY || ret == -ECOMM)
+ goto tx_ok;
+
+ /* mark complete and kick napi tx */
+ smp_store_release(&tx->complete, 1);
+ napi_schedule(&tx->txq->napi);
+
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+}
+
+static int hfi1_ipoib_send_dma_list(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct hfi1_ipoib_circ_buf *tx_ring;
+ struct ipoib_txreq *tx;
+
+ /* Has the flow change ? */
+ if (txq->flow.as_int != txp->flow.as_int) {
+ int ret;
+
+ trace_hfi1_flow_flush(txq);
+ ret = hfi1_ipoib_flush_tx_list(dev, txq);
+ if (unlikely(ret)) {
+ if (ret == -EBUSY)
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ tx_ring = &txq->tx_ring;
+ trace_hfi1_tx_consume(tx, tx_ring->tail);
+ /* consume tx */
+ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
+ list_add_tail(&tx->txreq.list, &txq->tx_list);
+
+ hfi1_ipoib_check_queue_depth(txq);
+
+ trace_sdma_output_ibhdr(txq->priv->dd,
+ &tx->sdma_hdr->hdr,
+ ib_is_sc5(txp->flow.sc5));
+
+ if (!netdev_xmit_more())
+ (void)hfi1_ipoib_flush_tx_list(dev, txq);
+
+ return NETDEV_TX_OK;
+}
+
+static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
+{
+ if (skb_transport_header_was_set(skb)) {
+ u8 *hdr = (u8 *)skb_transport_header(skb);
+
+ return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
+ }
+
+ return (u8)skb_get_queue_mapping(skb);
+}
+
+int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct ipoib_txparms txp;
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
+ dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len,
+ rn->mtu + HFI1_IPOIB_ENCAP_LEN);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ txp.dd = priv->dd;
+ txp.ah_attr = &ibah_to_rvtah(address)->attr;
+ txp.ibp = to_iport(priv->device, priv->port_num);
+ txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
+ txp.dqpn = dqpn;
+ txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
+ txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
+ txp.entropy = hfi1_ipoib_calc_entropy(skb);
+
+ if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
+ return hfi1_ipoib_send_dma_list(dev, skb, &txp);
+
+ return hfi1_ipoib_send_dma_single(dev, skb, &txp);
+}
+
+/*
+ * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
+ *
+ * This function gets called from sdma_send_txreq() when there are not enough
+ * sdma descriptors available to send the packet. It adds Tx queue's wait
+ * structure to sdma engine's dmawait list to be woken up when descriptors
+ * become available.
+ */
+static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *txreq,
+ uint seq,
+ bool pkts_sent)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait->iow, struct hfi1_ipoib_txq, wait);
+
+ write_seqlock(&sde->waitlock);
+
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
+ if (sdma_progress(sde, seq, txreq)) {
+ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
+
+ if (list_empty(&txreq->list))
+ /* came from non-list submit */
+ list_add_tail(&txreq->list, &txq->tx_list);
+ if (list_empty(&txq->wait.list)) {
+ struct hfi1_ibport *ibp = &sde->ppd->ibport_data;
+
+ if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) {
+ trace_hfi1_txq_queued(txq);
+ hfi1_ipoib_stop_txq(txq);
+ }
+ ibp->rvp.n_dmawait++;
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ }
+
+ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ }
+
+ write_sequnlock(&sde->waitlock);
+ return -EINVAL;
+}
+
+/*
+ * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
+ *
+ * This function gets called when SDMA descriptors becomes available and Tx
+ * queue's wait structure was previously added to sdma engine's dmawait list.
+ */
+static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+
+ trace_hfi1_txq_wakeup(txq);
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
+ iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
+}
+
+static void hfi1_ipoib_flush_txq(struct work_struct *work)
+{
+ struct iowait_work *ioww =
+ container_of(work, struct iowait_work, iowork);
+ struct iowait *wait = iowait_ioww_to_iow(ioww);
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+ struct net_device *dev = txq->priv->netdev;
+
+ if (likely(dev->reg_state == NETREG_REGISTERED) &&
+ likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
+ if (atomic_xchg(&txq->tx_ring.no_desc, 0))
+ hfi1_ipoib_wake_txq(txq);
+}
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+{
+ struct net_device *dev = priv->netdev;
+ u32 tx_ring_size, tx_item_size;
+ struct hfi1_ipoib_circ_buf *tx_ring;
+ int i, j;
+
+ /*
+ * Ring holds 1 less than tx_ring_size
+ * Round up to next power of 2 in order to hold at least tx_queue_len
+ */
+ tx_ring_size = roundup_pow_of_two(dev->tx_queue_len + 1);
+ tx_item_size = roundup_pow_of_two(sizeof(struct ipoib_txreq));
+
+ priv->txqs = kcalloc_node(dev->num_tx_queues,
+ sizeof(struct hfi1_ipoib_txq),
+ GFP_KERNEL,
+ priv->dd->node);
+ if (!priv->txqs)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct ipoib_txreq *tx;
+
+ tx_ring = &txq->tx_ring;
+ iowait_init(&txq->wait,
+ 0,
+ hfi1_ipoib_flush_txq,
+ NULL,
+ hfi1_ipoib_sdma_sleep,
+ hfi1_ipoib_sdma_wakeup,
+ NULL,
+ NULL);
+ txq->priv = priv;
+ txq->sde = NULL;
+ INIT_LIST_HEAD(&txq->tx_list);
+ atomic_set(&txq->tx_ring.stops, 0);
+ atomic_set(&txq->tx_ring.ring_full, 0);
+ atomic_set(&txq->tx_ring.no_desc, 0);
+ txq->q_idx = i;
+ txq->flow.tx_queue = 0xff;
+ txq->flow.sc5 = 0xff;
+ txq->pkts_sent = false;
+
+ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
+ priv->dd->node);
+
+ txq->tx_ring.items =
+ kvzalloc_node(array_size(tx_ring_size, tx_item_size),
+ GFP_KERNEL, priv->dd->node);
+ if (!txq->tx_ring.items)
+ goto free_txqs;
+
+ txq->tx_ring.max_items = tx_ring_size;
+ txq->tx_ring.shift = ilog2(tx_item_size);
+ txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++) {
+ hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
+ kzalloc_node(sizeof(*tx->sdma_hdr),
+ GFP_KERNEL, priv->dd->node);
+ if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr)
+ goto free_txqs;
+ }
+
+ netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
+ }
+
+ return 0;
+
+free_txqs:
+ for (i--; i >= 0; i--) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ netif_napi_del(&txq->napi);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+ return -ENOMEM;
+}
+
+static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
+{
+ struct sdma_txreq *txreq;
+ struct sdma_txreq *txreq_tmp;
+
+ list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
+ struct ipoib_txreq *tx =
+ container_of(txreq, struct ipoib_txreq, txreq);
+
+ list_del(&txreq->list);
+ sdma_txclean(txq->priv->dd, &tx->txreq);
+ dev_kfree_skb_any(tx->skb);
+ tx->skb = NULL;
+ txq->tx_ring.complete_txreqs++;
+ }
+
+ if (hfi1_ipoib_used(txq))
+ dd_dev_warn(txq->priv->dd,
+ "txq %d not empty found %u requests\n",
+ txq->q_idx,
+ hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
+ txq->tx_ring.complete_txreqs));
+}
+
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
+{
+ int i, j;
+
+ for (i = 0; i < priv->netdev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+
+ iowait_cancel_work(&txq->wait);
+ iowait_sdma_drain(&txq->wait);
+ hfi1_ipoib_drain_tx_list(txq);
+ netif_napi_del(&txq->napi);
+ hfi1_ipoib_drain_tx_ring(txq);
+ for (j = 0; j < tx_ring->max_items; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+}
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_enable(&txq->napi);
+ }
+}
+
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_disable(&txq->napi);
+ hfi1_ipoib_drain_tx_ring(txq);
+ }
+}
+
+void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = &priv->txqs[q];
+
+ dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
+ txq, q,
+ __netif_subqueue_stopped(dev, txq->q_idx),
+ atomic_read(&txq->tx_ring.stops),
+ atomic_read(&txq->tx_ring.no_desc),
+ atomic_read(&txq->tx_ring.ring_full));
+ dd_dev_info(priv->dd, "sde %p engine %u\n",
+ txq->sde,
+ txq->sde ? txq->sde->this_idx : 0);
+ dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
+ dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
+ txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs,
+ hfi1_ipoib_used(txq));
+ dd_dev_info(priv->dd, "tx_queue_len %u max_items %u\n",
+ dev->tx_queue_len, txq->tx_ring.max_items);
+ dd_dev_info(priv->dd, "head %u tail %u\n",
+ txq->tx_ring.head, txq->tx_ring.tail);
+ dd_dev_info(priv->dd, "wait queued %u\n",
+ !list_empty(&txq->wait.list));
+ dd_dev_info(priv->dd, "tx_list empty %u\n",
+ list_empty(&txq->tx_list));
+}
+
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
new file mode 100644
index 000000000000..961fa07116f0
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -0,0 +1,4896 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015-2018 Intel Corporation.
+ */
+
+#include <linux/net.h>
+#include <rdma/opa_addr.h>
+#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
+ / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
+
+#include "hfi.h"
+#include "mad.h"
+#include "trace.h"
+#include "qp.h"
+#include "vnic.h"
+
+/* the reset value from the FM is supposed to be 0xffff, handle both */
+#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
+#define OPA_LINK_WIDTH_RESET 0xffff
+
+struct trap_node {
+ struct list_head list;
+ struct opa_mad_notice_attr data;
+ __be64 tid;
+ int len;
+ u32 retry;
+ u8 in_use;
+ u8 repress;
+};
+
+static int smp_length_check(u32 data_size, u32 request_len)
+{
+ if (unlikely(request_len < data_size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int reply(struct ib_mad_hdr *smp)
+{
+ /*
+ * The verbs framework will handle the directed/LID route
+ * packet changes.
+ */
+ smp->method = IB_MGMT_METHOD_GET_RESP;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ smp->status |= IB_SMP_DIRECTION;
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static inline void clear_opa_smp_data(struct opa_smp *smp)
+{
+ void *data = opa_get_smp_data(smp);
+ size_t size = opa_get_smp_data_size(smp);
+
+ memset(data, 0, size);
+}
+
+static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
+ return ppd->pkeys[pkey_idx];
+
+ return 0;
+}
+
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port)
+{
+ struct ib_event event;
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.rdi.ibdev;
+ event.element.port_num = port;
+ ib_dispatch_event(&event);
+}
+
+/*
+ * If the port is down, clean up all pending traps. We need to be careful
+ * with the given trap, because it may be queued.
+ */
+static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap)
+{
+ struct trap_node *node, *q;
+ unsigned long flags;
+ struct list_head trap_list;
+ int i;
+
+ for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list);
+ ibp->rvp.trap_lists[i].list_len = 0;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+
+ /*
+ * Remove all items from the list, freeing all the non-given
+ * traps.
+ */
+ list_for_each_entry_safe(node, q, &trap_list, list) {
+ list_del(&node->list);
+ if (node != trap)
+ kfree(node);
+ }
+ }
+
+ /*
+ * If this wasn't on one of the lists it would not be freed. If it
+ * was on the list, it is now safe to free.
+ */
+ kfree(trap);
+}
+
+static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp,
+ struct trap_node *trap)
+{
+ struct trap_node *node;
+ struct trap_list *trap_list;
+ unsigned long flags;
+ unsigned long timeout;
+ int found = 0;
+ unsigned int queue_id;
+ static int trap_count;
+
+ queue_id = trap->data.generic_type & 0x0F;
+ if (queue_id >= RVT_MAX_TRAP_LISTS) {
+ trap_count++;
+ pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n",
+ trap->data.generic_type, trap_count);
+ kfree(trap);
+ return NULL;
+ }
+
+ /*
+ * Since the retry (handle timeout) does not remove a trap request
+ * from the list, all we have to do is compare the node.
+ */
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ trap_list = &ibp->rvp.trap_lists[queue_id];
+
+ list_for_each_entry(node, &trap_list->list, list) {
+ if (node == trap) {
+ node->retry++;
+ found = 1;
+ break;
+ }
+ }
+
+ /* If it is not on the list, add it, limited to RVT-MAX_TRAP_LEN. */
+ if (!found) {
+ if (trap_list->list_len < RVT_MAX_TRAP_LEN) {
+ trap_list->list_len++;
+ list_add_tail(&trap->list, &trap_list->list);
+ } else {
+ pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n",
+ trap->data.generic_type);
+ kfree(trap);
+ }
+ }
+
+ /*
+ * Next check to see if there is a timer pending. If not, set it up
+ * and get the first trap from the list.
+ */
+ node = NULL;
+ if (!timer_pending(&ibp->rvp.trap_timer)) {
+ /*
+ * o14-2
+ * If the time out is set we have to wait until it expires
+ * before the trap can be sent.
+ * This should be > RVT_TRAP_TIMEOUT
+ */
+ timeout = (RVT_TRAP_TIMEOUT *
+ (1UL << ibp->rvp.subnet_timeout)) / 1000;
+ mod_timer(&ibp->rvp.trap_timer,
+ jiffies + usecs_to_jiffies(timeout));
+ node = list_first_entry(&trap_list->list, struct trap_node,
+ list);
+ node->in_use = 1;
+ }
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+
+ return node;
+}
+
+static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp,
+ struct opa_smp *smp)
+{
+ struct trap_list *trap_list;
+ struct trap_node *trap;
+ unsigned long flags;
+ int i;
+
+ if (smp->attr_id != IB_SMP_ATTR_NOTICE)
+ return;
+
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
+ trap_list = &ibp->rvp.trap_lists[i];
+ trap = list_first_entry_or_null(&trap_list->list,
+ struct trap_node, list);
+ if (trap && trap->tid == smp->tid) {
+ if (trap->in_use) {
+ trap->repress = 1;
+ } else {
+ trap_list->list_len--;
+ list_del(&trap->list);
+ kfree(trap);
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+}
+
+static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp,
+ struct rdma_ah_attr *attr, u32 dlid)
+{
+ rdma_ah_set_dlid(attr, dlid);
+ rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port);
+ if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
+ rdma_ah_set_ah_flags(attr, IB_AH_GRH);
+ grh->sgid_index = 0;
+ grh->hop_limit = 1;
+ grh->dgid.global.subnet_prefix =
+ ibp->rvp.gid_prefix;
+ grh->dgid.global.interface_id = OPA_MAKE_ID(dlid);
+ }
+}
+
+static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp,
+ struct rvt_ah *ah, u32 dlid)
+{
+ struct rdma_ah_attr attr;
+ struct rvt_qp *qp0;
+ int ret = -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = ah->ibah.type;
+ hfi1_update_sm_ah_attr(ibp, &attr, dlid);
+ rcu_read_lock();
+ qp0 = rcu_dereference(ibp->rvp.qp[0]);
+ if (qp0)
+ ret = rdma_modify_ah(&ah->ibah, &attr);
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
+{
+ struct rdma_ah_attr attr;
+ struct ib_ah *ah = ERR_PTR(-EINVAL);
+ struct rvt_qp *qp0;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = dd_from_ppd(ppd);
+ u32 port_num = ppd->port;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
+ hfi1_update_sm_ah_attr(ibp, &attr, dlid);
+ rcu_read_lock();
+ qp0 = rcu_dereference(ibp->rvp.qp[0]);
+ if (qp0)
+ ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0);
+ rcu_read_unlock();
+ return ah;
+}
+
+static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
+{
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct opa_smp *smp;
+ unsigned long flags;
+ int pkey_idx;
+ u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
+
+ agent = ibp->rvp.send_agent;
+ if (!agent) {
+ cleanup_traps(ibp, trap);
+ return;
+ }
+
+ /* o14-3.2.1 */
+ if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) {
+ cleanup_traps(ibp, trap);
+ return;
+ }
+
+ /* Add the trap to the list if necessary and see if we can send it */
+ trap = check_and_add_trap(ibp, trap);
+ if (!trap)
+ return;
+
+ pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
+ if (pkey_idx < 0) {
+ pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
+ __func__, hfi1_get_pkey(ibp, 1));
+ pkey_idx = 1;
+ }
+
+ send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
+ IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC, IB_MGMT_BASE_VERSION);
+ if (IS_ERR(send_buf))
+ return;
+
+ smp = send_buf->mad;
+ smp->base_version = OPA_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ smp->class_version = OPA_SM_CLASS_VERSION;
+ smp->method = IB_MGMT_METHOD_TRAP;
+
+ /* Only update the transaction ID for new traps (o13-5). */
+ if (trap->tid == 0) {
+ ibp->rvp.tid++;
+ /* make sure that tid != 0 */
+ if (ibp->rvp.tid == 0)
+ ibp->rvp.tid++;
+ trap->tid = cpu_to_be64(ibp->rvp.tid);
+ }
+ smp->tid = trap->tid;
+
+ smp->attr_id = IB_SMP_ATTR_NOTICE;
+ /* o14-1: smp->mkey = 0; */
+
+ memcpy(smp->route.lid.data, &trap->data, trap->len);
+
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (!ibp->rvp.sm_ah) {
+ if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ struct ib_ah *ah;
+
+ ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
+ if (IS_ERR(ah)) {
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ return;
+ }
+ send_buf->ah = ah;
+ ibp->rvp.sm_ah = ibah_to_rvtah(ah);
+ } else {
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ return;
+ }
+ } else {
+ send_buf->ah = &ibp->rvp.sm_ah->ibah;
+ }
+
+ /*
+ * If the trap was repressed while things were getting set up, don't
+ * bother sending it. This could happen for a retry.
+ */
+ if (trap->repress) {
+ list_del(&trap->list);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ kfree(trap);
+ ib_free_send_mad(send_buf);
+ return;
+ }
+
+ trap->in_use = 0;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+
+ if (ib_post_send_mad(send_buf, NULL))
+ ib_free_send_mad(send_buf);
+}
+
+void hfi1_handle_trap_timer(struct timer_list *t)
+{
+ struct hfi1_ibport *ibp = timer_container_of(ibp, t, rvp.trap_timer);
+ struct trap_node *trap = NULL;
+ unsigned long flags;
+ int i;
+
+ /* Find the trap with the highest priority */
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) {
+ trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list,
+ struct trap_node, list);
+ }
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+
+ if (trap)
+ send_trap(ibp, trap);
+}
+
+static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid)
+{
+ struct trap_node *trap;
+
+ trap = kzalloc(sizeof(*trap), GFP_ATOMIC);
+ if (!trap)
+ return NULL;
+
+ INIT_LIST_HEAD(&trap->list);
+ trap->data.generic_type = type;
+ trap->data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ trap->data.trap_num = trap_num;
+ trap->data.issuer_lid = cpu_to_be32(lid);
+
+ return trap;
+}
+
+/*
+ * Send a bad P_Key trap (ch. 14.3.8).
+ */
+void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
+ u32 qp1, u32 qp2, u32 lid1, u32 lid2)
+{
+ struct trap_node *trap;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ ibp->rvp.n_pkt_drops++;
+ ibp->rvp.pkey_violations++;
+
+ trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY,
+ lid);
+ if (!trap)
+ return;
+
+ /* Send violation trap */
+ trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1);
+ trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2);
+ trap->data.ntc_257_258.key = cpu_to_be32(key);
+ trap->data.ntc_257_258.sl = sl << 3;
+ trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1);
+ trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2);
+
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
+}
+
+/*
+ * Send a bad M_Key trap (ch. 14.3.9).
+ */
+static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
+ __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
+{
+ struct trap_node *trap;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY,
+ lid);
+ if (!trap)
+ return;
+
+ /* Send violation trap */
+ trap->data.ntc_256.lid = trap->data.issuer_lid;
+ trap->data.ntc_256.method = mad->method;
+ trap->data.ntc_256.attr_id = mad->attr_id;
+ trap->data.ntc_256.attr_mod = mad->attr_mod;
+ trap->data.ntc_256.mkey = mkey;
+ if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ trap->data.ntc_256.dr_slid = dr_slid;
+ trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+ if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) {
+ trap->data.ntc_256.dr_trunc_hop |=
+ IB_NOTICE_TRAP_DR_TRUNC;
+ hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path);
+ }
+ trap->data.ntc_256.dr_trunc_hop |= hop_cnt;
+ memcpy(trap->data.ntc_256.dr_rtn_path, return_path,
+ hop_cnt);
+ }
+
+ trap->len = sizeof(trap->data);
+
+ send_trap(ibp, trap);
+}
+
+/*
+ * Send a Port Capability Mask Changed trap (ch. 14.3.11).
+ */
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
+{
+ struct trap_node *trap;
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+ struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ trap = create_trap_node(IB_NOTICE_TYPE_INFO,
+ OPA_TRAP_CHANGE_CAPABILITY,
+ lid);
+ if (!trap)
+ return;
+
+ trap->data.ntc_144.lid = trap->data.issuer_lid;
+ trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
+ trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags);
+
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
+}
+
+/*
+ * Send a System Image GUID Changed trap (ch. 14.3.12).
+ */
+void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
+{
+ struct trap_node *trap;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID,
+ lid);
+ if (!trap)
+ return;
+
+ trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
+ trap->data.ntc_145.lid = trap->data.issuer_lid;
+
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
+}
+
+/*
+ * Send a Node Description Changed trap (ch. 14.3.13).
+ */
+void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
+{
+ struct trap_node *trap;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ trap = create_trap_node(IB_NOTICE_TYPE_INFO,
+ OPA_TRAP_CHANGE_CAPABILITY,
+ lid);
+ if (!trap)
+ return;
+
+ trap->data.ntc_144.lid = trap->data.issuer_lid;
+ trap->data.ntc_144.change_flags =
+ cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
+
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
+}
+
+static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev,
+ u32 port, u32 *resp_len, u32 max_len)
+{
+ struct opa_node_description *nd;
+
+ if (am || smp_length_check(sizeof(*nd), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ nd = (struct opa_node_description *)data;
+
+ memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
+
+ if (resp_len)
+ *resp_len += sizeof(*nd);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct opa_node_info *ni;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
+
+ ni = (struct opa_node_info *)data;
+
+ /* GUID 0 is illegal */
+ if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
+ smp_length_check(sizeof(*ni), max_len) ||
+ get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
+ ni->base_version = OPA_MGMT_BASE_VERSION;
+ ni->class_version = OPA_SM_CLASS_VERSION;
+ ni->node_type = 1; /* channel adapter */
+ ni->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ ni->system_image_guid = ib_hfi1_sys_image_guid;
+ ni->node_guid = ibdev->node_guid;
+ ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
+ ni->device_id = cpu_to_be16(dd->pcidev->device);
+ ni->revision = cpu_to_be32(dd->minrev);
+ ni->local_port_num = port;
+ ni->vendor_id[0] = dd->oui1;
+ ni->vendor_id[1] = dd->oui2;
+ ni->vendor_id[2] = dd->oui3;
+
+ if (resp_len)
+ *resp_len += sizeof(*ni);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u32 port)
+{
+ struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
+
+ /* GUID 0 is illegal */
+ if (smp->attr_mod || pidx >= dd->num_pports ||
+ ibdev->node_guid == 0 ||
+ get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
+ nip->base_version = OPA_MGMT_BASE_VERSION;
+ nip->class_version = OPA_SM_CLASS_VERSION;
+ nip->node_type = 1; /* channel adapter */
+ nip->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ nip->sys_guid = ib_hfi1_sys_image_guid;
+ nip->node_guid = ibdev->node_guid;
+ nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
+ nip->device_id = cpu_to_be16(dd->pcidev->device);
+ nip->revision = cpu_to_be32(dd->minrev);
+ nip->local_port_num = port;
+ nip->vendor_id[0] = dd->oui1;
+ nip->vendor_id[1] = dd->oui2;
+ nip->vendor_id[2] = dd->oui3;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
+{
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
+}
+
+static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
+{
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
+}
+
+static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
+{
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
+}
+
+static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
+ int mad_flags, __be64 mkey, __be32 dr_slid,
+ u8 return_path[], u8 hop_cnt)
+{
+ int valid_mkey = 0;
+ int ret = 0;
+
+ /* Is the mkey in the process of expiring? */
+ if (ibp->rvp.mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
+ /* Clear timeout and mkey protection field. */
+ ibp->rvp.mkey_lease_timeout = 0;
+ ibp->rvp.mkeyprot = 0;
+ }
+
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
+ ibp->rvp.mkey == mkey)
+ valid_mkey = 1;
+
+ /* Unset lease timeout on any valid Get/Set/TrapRepress */
+ if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
+ (mad->method == IB_MGMT_METHOD_GET ||
+ mad->method == IB_MGMT_METHOD_SET ||
+ mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
+ ibp->rvp.mkey_lease_timeout = 0;
+
+ if (!valid_mkey) {
+ switch (mad->method) {
+ case IB_MGMT_METHOD_GET:
+ /* Bad mkey not a violation below level 2 */
+ if (ibp->rvp.mkeyprot < 2)
+ break;
+ fallthrough;
+ case IB_MGMT_METHOD_SET:
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ if (ibp->rvp.mkey_violations != 0xFFFF)
+ ++ibp->rvp.mkey_violations;
+ if (!ibp->rvp.mkey_lease_timeout &&
+ ibp->rvp.mkey_lease_period)
+ ibp->rvp.mkey_lease_timeout = jiffies +
+ ibp->rvp.mkey_lease_period * HZ;
+ /* Generate a trap notice. */
+ bad_mkey(ibp, mad, mkey, dr_slid, return_path,
+ hop_cnt);
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * The SMA caches reads from LCB registers in case the LCB is unavailable.
+ * (The LCB is unavailable in certain link states, for example.)
+ */
+struct lcb_datum {
+ u32 off;
+ u64 val;
+};
+
+static struct lcb_datum lcb_cache[] = {
+ { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
+};
+
+static int write_lcb_cache(u32 off, u64 val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
+ if (lcb_cache[i].off == off) {
+ lcb_cache[i].val = val;
+ return 0;
+ }
+ }
+
+ pr_warn("%s bad offset 0x%x\n", __func__, off);
+ return -1;
+}
+
+static int read_lcb_cache(u32 off, u64 *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
+ if (lcb_cache[i].off == off) {
+ *val = lcb_cache[i].val;
+ return 0;
+ }
+ }
+
+ pr_warn("%s bad offset 0x%x\n", __func__, off);
+ return -1;
+}
+
+void read_ltp_rtt(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
+ dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
+ else
+ write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
+}
+
+static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ int i;
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct opa_port_info *pi = (struct opa_port_info *)data;
+ u8 mtu;
+ u8 credit_rate;
+ u8 is_beaconing_active;
+ u32 state;
+ u32 num_ports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ u32 buffer_units;
+ u64 tmp = 0;
+
+ if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ ibp = &ppd->ibport_data;
+
+ if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
+ ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ pi->lid = cpu_to_be32(ppd->lid);
+
+ /* Only return the mkey if the protection field allows it. */
+ if (!(smp->method == IB_MGMT_METHOD_GET &&
+ ibp->rvp.mkey != smp->mkey &&
+ ibp->rvp.mkeyprot == 1))
+ pi->mkey = ibp->rvp.mkey;
+
+ pi->subnet_prefix = ibp->rvp.gid_prefix;
+ pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
+ pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
+ pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
+ pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
+ pi->sa_qp = cpu_to_be32(ppd->sa_qp);
+
+ pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
+ pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
+ pi->link_width.active = cpu_to_be16(ppd->link_width_active);
+
+ pi->link_width_downgrade.supported =
+ cpu_to_be16(ppd->link_width_downgrade_supported);
+ pi->link_width_downgrade.enabled =
+ cpu_to_be16(ppd->link_width_downgrade_enabled);
+ pi->link_width_downgrade.tx_active =
+ cpu_to_be16(ppd->link_width_downgrade_tx_active);
+ pi->link_width_downgrade.rx_active =
+ cpu_to_be16(ppd->link_width_downgrade_rx_active);
+
+ pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
+ pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
+ pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
+
+ state = driver_lstate(ppd);
+
+ if (start_of_sm_config && (state == IB_PORT_INIT))
+ ppd->is_sm_config_started = 1;
+
+ pi->port_phys_conf = (ppd->port_type & 0xf);
+
+ pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
+ pi->port_states.ledenable_offlinereason |=
+ ppd->is_sm_config_started << 5;
+ /*
+ * This pairs with the memory barrier in hfi1_start_led_override to
+ * ensure that we read the correct state of LED beaconing represented
+ * by led_override_timer_active
+ */
+ smp_rmb();
+ is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
+ pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
+ pi->port_states.ledenable_offlinereason |=
+ ppd->offline_disabled_reason;
+
+ pi->port_states.portphysstate_portstate =
+ (driver_pstate(ppd) << 4) | state;
+
+ pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
+
+ memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
+ for (i = 0; i < ppd->vls_supported; i++) {
+ mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
+ if ((i % 2) == 0)
+ pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
+ else
+ pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
+ }
+ /* don't forget VL 15 */
+ mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
+ pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
+ pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
+ pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
+ pi->partenforce_filterraw |=
+ (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
+ if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
+ pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
+ if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
+ pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
+ pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
+ /* P_KeyViolations are counted by hardware. */
+ pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
+ pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
+
+ pi->vl.cap = ppd->vls_supported;
+ pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
+ pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
+ pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
+
+ pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
+
+ pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
+ OPA_PORT_LINK_MODE_OPA << 5 |
+ OPA_PORT_LINK_MODE_OPA);
+
+ pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
+
+ pi->port_mode = cpu_to_be16(
+ ppd->is_active_optimize_enabled ?
+ OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
+
+ pi->port_packet_format.supported =
+ cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
+ OPA_PORT_PACKET_FORMAT_16B);
+ pi->port_packet_format.enabled =
+ cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
+ OPA_PORT_PACKET_FORMAT_16B);
+
+ /* flit_control.interleave is (OPA V1, version .76):
+ * bits use
+ * ---- ---
+ * 2 res
+ * 2 DistanceSupported
+ * 2 DistanceEnabled
+ * 5 MaxNextLevelTxEnabled
+ * 5 MaxNestLevelRxSupported
+ *
+ * HFI supports only "distance mode 1" (see OPA V1, version .76,
+ * section 9.6.2), so set DistanceSupported, DistanceEnabled
+ * to 0x1.
+ */
+ pi->flit_control.interleave = cpu_to_be16(0x1400);
+
+ pi->link_down_reason = ppd->local_link_down_reason.sma;
+ pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
+ pi->port_error_action = cpu_to_be32(ppd->port_error_action);
+ pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
+
+ /* 32.768 usec. response time (guessing) */
+ pi->resptimevalue = 3;
+
+ pi->local_port_num = port;
+
+ /* buffer info for FM */
+ pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
+
+ pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
+ pi->neigh_port_num = ppd->neighbor_port_number;
+ pi->port_neigh_mode =
+ (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
+ (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
+ (ppd->neighbor_fm_security ?
+ OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
+
+ /* HFIs shall always return VL15 credits to their
+ * neighbor in a timely manner, without any credit return pacing.
+ */
+ credit_rate = 0;
+ buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
+ buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
+ buffer_units |= (credit_rate << 6) &
+ OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
+ buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
+ pi->buffer_units = cpu_to_be32(buffer_units);
+
+ pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags);
+ pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7)
+ << 3 | (OPA_MCAST_NR & 0x7));
+
+ /* HFI supports a replay buffer 128 LTPs in size */
+ pi->replay_depth.buffer = 0x80;
+ /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
+ read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
+
+ /*
+ * this counter is 16 bits wide, but the replay_depth.wire
+ * variable is only 8 bits
+ */
+ if (tmp > 0xff)
+ tmp = 0xff;
+ pi->replay_depth.wire = tmp;
+
+ if (resp_len)
+ *resp_len += sizeof(struct opa_port_info);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+/**
+ * get_pkeys - return the PKEY table
+ * @dd: the hfi1_ib device
+ * @port: the IB port number
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
+{
+ struct hfi1_pportdata *ppd = dd->pport + port - 1;
+
+ memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
+
+ return 0;
+}
+
+static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 n_blocks_req = OPA_AM_NBLK(am);
+ u32 start_block = am & 0x7ff;
+ __be16 *p;
+ u16 *q;
+ int i;
+ u16 n_blocks_avail;
+ unsigned npkeys = hfi1_get_npkeys(dd);
+ size_t size;
+
+ if (n_blocks_req == 0) {
+ pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
+ port, start_block, n_blocks_req);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
+
+ size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
+
+ if (smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ if (start_block + n_blocks_req > n_blocks_avail ||
+ n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
+ pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
+ "avail 0x%x; blk/smp 0x%lx\n",
+ start_block, n_blocks_req, n_blocks_avail,
+ OPA_NUM_PKEY_BLOCKS_PER_SMP);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ p = (__be16 *)data;
+ q = (u16 *)data;
+ /* get the real pkeys if we are requesting the first block */
+ if (start_block == 0) {
+ get_pkeys(dd, port, q);
+ for (i = 0; i < npkeys; i++)
+ p[i] = cpu_to_be16(q[i]);
+ if (resp_len)
+ *resp_len += size;
+ } else {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+enum {
+ HFI_TRANSITION_DISALLOWED,
+ HFI_TRANSITION_IGNORED,
+ HFI_TRANSITION_ALLOWED,
+ HFI_TRANSITION_UNDEFINED,
+};
+
+/*
+ * Use shortened names to improve readability of
+ * {logical,physical}_state_transitions
+ */
+enum {
+ __D = HFI_TRANSITION_DISALLOWED,
+ __I = HFI_TRANSITION_IGNORED,
+ __A = HFI_TRANSITION_ALLOWED,
+ __U = HFI_TRANSITION_UNDEFINED,
+};
+
+/*
+ * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
+ * represented in physical_state_transitions.
+ */
+#define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
+
+/*
+ * Within physical_state_transitions, rows represent "old" states,
+ * columns "new" states, and physical_state_transitions.allowed[old][new]
+ * indicates if the transition from old state to new state is legal (see
+ * OPAg1v1, Table 6-4).
+ */
+static const struct {
+ u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
+} physical_state_transitions = {
+ {
+ /* 2 3 4 5 6 7 8 9 10 11 */
+ /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
+ /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
+ /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
+ /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
+ /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
+ /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
+ }
+};
+
+/*
+ * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
+ * logical_state_transitions
+ */
+
+#define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
+
+/*
+ * Within logical_state_transitions rows represent "old" states,
+ * columns "new" states, and logical_state_transitions.allowed[old][new]
+ * indicates if the transition from old state to new state is legal (see
+ * OPAg1v1, Table 9-12).
+ */
+static const struct {
+ u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
+} logical_state_transitions = {
+ {
+ /* 1 2 3 4 5 */
+ /* 1 */ { __I, __D, __D, __D, __U},
+ /* 2 */ { __D, __I, __A, __D, __U},
+ /* 3 */ { __D, __D, __I, __A, __U},
+ /* 4 */ { __D, __D, __I, __I, __U},
+ /* 5 */ { __U, __U, __U, __U, __U},
+ }
+};
+
+static int logical_transition_allowed(int old, int new)
+{
+ if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
+ new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
+ pr_warn("invalid logical state(s) (old %d new %d)\n",
+ old, new);
+ return HFI_TRANSITION_UNDEFINED;
+ }
+
+ if (new == IB_PORT_NOP)
+ return HFI_TRANSITION_ALLOWED; /* always allowed */
+
+ /* adjust states for indexing into logical_state_transitions */
+ old -= IB_PORT_DOWN;
+ new -= IB_PORT_DOWN;
+
+ if (old < 0 || new < 0)
+ return HFI_TRANSITION_UNDEFINED;
+ return logical_state_transitions.allowed[old][new];
+}
+
+static int physical_transition_allowed(int old, int new)
+{
+ if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
+ new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
+ pr_warn("invalid physical state(s) (old %d new %d)\n",
+ old, new);
+ return HFI_TRANSITION_UNDEFINED;
+ }
+
+ if (new == IB_PORTPHYSSTATE_NOP)
+ return HFI_TRANSITION_ALLOWED; /* always allowed */
+
+ /* adjust states for indexing into physical_state_transitions */
+ old -= IB_PORTPHYSSTATE_POLLING;
+ new -= IB_PORTPHYSSTATE_POLLING;
+
+ if (old < 0 || new < 0)
+ return HFI_TRANSITION_UNDEFINED;
+ return physical_state_transitions.allowed[old][new];
+}
+
+static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
+ u32 logical_new, u32 physical_new)
+{
+ u32 physical_old = driver_pstate(ppd);
+ u32 logical_old = driver_lstate(ppd);
+ int ret, logical_allowed, physical_allowed;
+
+ ret = logical_transition_allowed(logical_old, logical_new);
+ logical_allowed = ret;
+
+ if (ret == HFI_TRANSITION_DISALLOWED ||
+ ret == HFI_TRANSITION_UNDEFINED) {
+ pr_warn("invalid logical state transition %s -> %s\n",
+ ib_port_state_to_str(logical_old),
+ ib_port_state_to_str(logical_new));
+ return ret;
+ }
+
+ ret = physical_transition_allowed(physical_old, physical_new);
+ physical_allowed = ret;
+
+ if (ret == HFI_TRANSITION_DISALLOWED ||
+ ret == HFI_TRANSITION_UNDEFINED) {
+ pr_warn("invalid physical state transition %s -> %s\n",
+ opa_pstate_name(physical_old),
+ opa_pstate_name(physical_new));
+ return ret;
+ }
+
+ if (logical_allowed == HFI_TRANSITION_IGNORED &&
+ physical_allowed == HFI_TRANSITION_IGNORED)
+ return HFI_TRANSITION_IGNORED;
+
+ /*
+ * A change request of Physical Port State from
+ * 'Offline' to 'Polling' should be ignored.
+ */
+ if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
+ (physical_new == IB_PORTPHYSSTATE_POLLING))
+ return HFI_TRANSITION_IGNORED;
+
+ /*
+ * Either physical_allowed or logical_allowed is
+ * HFI_TRANSITION_ALLOWED.
+ */
+ return HFI_TRANSITION_ALLOWED;
+}
+
+static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
+ u32 logical_state, u32 phys_state, int local_mad)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 link_state;
+ int ret;
+
+ ret = port_states_transition_allowed(ppd, logical_state, phys_state);
+ if (ret == HFI_TRANSITION_DISALLOWED ||
+ ret == HFI_TRANSITION_UNDEFINED) {
+ /* error message emitted above */
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return 0;
+ }
+
+ if (ret == HFI_TRANSITION_IGNORED)
+ return 0;
+
+ if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
+ !(logical_state == IB_PORT_DOWN ||
+ logical_state == IB_PORT_NOP)){
+ pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
+ logical_state, phys_state);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+
+ /*
+ * Logical state changes are summarized in OPAv1g1 spec.,
+ * Table 9-12; physical state changes are summarized in
+ * OPAv1g1 spec., Table 6.4.
+ */
+ switch (logical_state) {
+ case IB_PORT_NOP:
+ if (phys_state == IB_PORTPHYSSTATE_NOP)
+ break;
+ fallthrough;
+ case IB_PORT_DOWN:
+ if (phys_state == IB_PORTPHYSSTATE_NOP) {
+ link_state = HLS_DN_DOWNDEF;
+ } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
+ link_state = HLS_DN_POLL;
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
+ 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
+ } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
+ link_state = HLS_DN_DISABLE;
+ } else {
+ pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
+ phys_state);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ break;
+ }
+
+ if ((link_state == HLS_DN_POLL ||
+ link_state == HLS_DN_DOWNDEF)) {
+ /*
+ * Going to poll. No matter what the current state,
+ * always move offline first, then tune and start the
+ * link. This correctly handles a FM link bounce and
+ * a link enable. Going offline is a no-op if already
+ * offline.
+ */
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ start_link(ppd);
+ } else {
+ set_link_state(ppd, link_state);
+ }
+ if (link_state == HLS_DN_DISABLE &&
+ (ppd->offline_disabled_reason >
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
+ ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
+ /*
+ * Don't send a reply if the response would be sent
+ * through the disabled port.
+ */
+ if (link_state == HLS_DN_DISABLE && !local_mad)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ break;
+ case IB_PORT_ARMED:
+ ret = set_link_state(ppd, HLS_UP_ARMED);
+ if (!ret)
+ send_idle_sma(dd, SMA_IDLE_ARM);
+ break;
+ case IB_PORT_ACTIVE:
+ if (ppd->neighbor_normal) {
+ ret = set_link_state(ppd, HLS_UP_ACTIVE);
+ if (ret == 0)
+ send_idle_sma(dd, SMA_IDLE_ACTIVE);
+ } else {
+ pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ break;
+ default:
+ pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
+ logical_state);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+
+ return 0;
+}
+
+/*
+ * subn_set_opa_portinfo - set port information
+ * @smp: the incoming SM packet
+ * @ibdev: the infiniband device
+ * @port: the port on the device
+ *
+ */
+static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len, int local_mad)
+{
+ struct opa_port_info *pi = (struct opa_port_info *)data;
+ struct ib_event event;
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ u8 clientrereg;
+ unsigned long flags;
+ u32 smlid;
+ u32 lid;
+ u8 ls_old, ls_new, ps_new;
+ u8 vls;
+ u8 msl;
+ u8 crc_enabled;
+ u16 lse, lwe, mtu;
+ u32 num_ports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ int ret, i, invalid = 0, call_set_mtu = 0;
+ int call_link_downgrade_policy = 0;
+
+ if (num_ports != 1 ||
+ smp_length_check(sizeof(*pi), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ lid = be32_to_cpu(pi->lid);
+ if (lid & 0xFF000000) {
+ pr_warn("OPA_PortInfo lid out of range: %X\n", lid);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ goto get_only;
+ }
+
+
+ smlid = be32_to_cpu(pi->sm_lid);
+ if (smlid & 0xFF000000) {
+ pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ goto get_only;
+ }
+
+ clientrereg = (pi->clientrereg_subnettimeout &
+ OPA_PI_MASK_CLIENT_REREGISTER);
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ ibp = &ppd->ibport_data;
+ event.device = ibdev;
+ event.element.port_num = port;
+
+ ls_old = driver_lstate(ppd);
+
+ ibp->rvp.mkey = pi->mkey;
+ if (ibp->rvp.gid_prefix != pi->subnet_prefix) {
+ ibp->rvp.gid_prefix = pi->subnet_prefix;
+ event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+ ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
+
+ /* Must be a valid unicast LID address. */
+ if ((lid == 0 && ls_old > IB_PORT_INIT) ||
+ (hfi1_is_16B_mcast(lid))) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
+ lid);
+ } else if (ppd->lid != lid ||
+ ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
+ if (ppd->lid != lid)
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
+ if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
+ hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+
+ if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) {
+ /* Manufacture GID from LID to support extended
+ * addresses
+ */
+ ppd->guids[HFI1_PORT_GUID_INDEX + 1] =
+ be64_to_cpu(OPA_MAKE_ID(lid));
+ event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+ }
+
+ msl = pi->smsl & OPA_PI_MASK_SMSL;
+ if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
+ ppd->linkinit_reason =
+ (pi->partenforce_filterraw &
+ OPA_PI_MASK_LINKINIT_REASON);
+
+ /* Must be a valid unicast LID address. */
+ if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
+ (hfi1_is_16B_mcast(smlid))) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
+ } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
+ pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (ibp->rvp.sm_ah) {
+ if (smlid != ibp->rvp.sm_lid)
+ hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid);
+ if (msl != ibp->rvp.sm_sl)
+ rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
+ }
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ if (smlid != ibp->rvp.sm_lid)
+ ibp->rvp.sm_lid = smlid;
+ if (msl != ibp->rvp.sm_sl)
+ ibp->rvp.sm_sl = msl;
+ event.event = IB_EVENT_SM_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ if (pi->link_down_reason == 0) {
+ ppd->local_link_down_reason.sma = 0;
+ ppd->local_link_down_reason.latest = 0;
+ }
+
+ if (pi->neigh_link_down_reason == 0) {
+ ppd->neigh_link_down_reason.sma = 0;
+ ppd->neigh_link_down_reason.latest = 0;
+ }
+
+ ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
+ ppd->sa_qp = be32_to_cpu(pi->sa_qp);
+
+ ppd->port_error_action = be32_to_cpu(pi->port_error_action);
+ lwe = be16_to_cpu(pi->link_width.enabled);
+ if (lwe) {
+ if (lwe == OPA_LINK_WIDTH_RESET ||
+ lwe == OPA_LINK_WIDTH_RESET_OLD)
+ set_link_width_enabled(ppd, ppd->link_width_supported);
+ else if ((lwe & ~ppd->link_width_supported) == 0)
+ set_link_width_enabled(ppd, lwe);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
+ /* LWD.E is always applied - 0 means "disabled" */
+ if (lwe == OPA_LINK_WIDTH_RESET ||
+ lwe == OPA_LINK_WIDTH_RESET_OLD) {
+ set_link_width_downgrade_enabled(ppd,
+ ppd->
+ link_width_downgrade_supported
+ );
+ } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
+ /* only set and apply if something changed */
+ if (lwe != ppd->link_width_downgrade_enabled) {
+ set_link_width_downgrade_enabled(ppd, lwe);
+ call_link_downgrade_policy = 1;
+ }
+ } else {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ lse = be16_to_cpu(pi->link_speed.enabled);
+ if (lse) {
+ if (lse & be16_to_cpu(pi->link_speed.supported))
+ set_link_speed_enabled(ppd, lse);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+
+ ibp->rvp.mkeyprot =
+ (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
+ ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
+ ibp->rvp.vl_high_limit);
+
+ if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
+ ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ for (i = 0; i < ppd->vls_supported; i++) {
+ if ((i % 2) == 0)
+ mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
+ 4) & 0xF);
+ else
+ mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
+ 0xF);
+ if (mtu == 0xffff) {
+ pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
+ mtu,
+ (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ mtu = hfi1_max_mtu; /* use a valid MTU */
+ }
+ if (dd->vld[i].mtu != mtu) {
+ dd_dev_info(dd,
+ "MTU change on vl %d from %d to %d\n",
+ i, dd->vld[i].mtu, mtu);
+ dd->vld[i].mtu = mtu;
+ call_set_mtu++;
+ }
+ }
+ /* As per OPAV1 spec: VL15 must support and be configured
+ * for operation with a 2048 or larger MTU.
+ */
+ mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
+ if (mtu < 2048 || mtu == 0xffff)
+ mtu = 2048;
+ if (dd->vld[15].mtu != mtu) {
+ dd_dev_info(dd,
+ "MTU change on vl 15 from %d to %d\n",
+ dd->vld[15].mtu, mtu);
+ dd->vld[15].mtu = mtu;
+ call_set_mtu++;
+ }
+ if (call_set_mtu)
+ set_mtu(ppd);
+
+ /* Set operational VLs */
+ vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
+ if (vls) {
+ if (vls > ppd->vls_supported) {
+ pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
+ pi->operational_vls);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ } else {
+ if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
+ vls) == -EINVAL)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ }
+
+ if (pi->mkey_violations == 0)
+ ibp->rvp.mkey_violations = 0;
+
+ if (pi->pkey_violations == 0)
+ ibp->rvp.pkey_violations = 0;
+
+ if (pi->qkey_violations == 0)
+ ibp->rvp.qkey_violations = 0;
+
+ ibp->rvp.subnet_timeout =
+ pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
+
+ crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
+ crc_enabled >>= 4;
+ crc_enabled &= 0xf;
+
+ if (crc_enabled != 0)
+ ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
+
+ ppd->is_active_optimize_enabled =
+ !!(be16_to_cpu(pi->port_mode)
+ & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
+
+ ls_new = pi->port_states.portphysstate_portstate &
+ OPA_PI_MASK_PORT_STATE;
+ ps_new = (pi->port_states.portphysstate_portstate &
+ OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
+
+ if (ls_old == IB_PORT_INIT) {
+ if (start_of_sm_config) {
+ if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
+ ppd->is_sm_config_started = 1;
+ } else if (ls_new == IB_PORT_ARMED) {
+ if (ppd->is_sm_config_started == 0) {
+ invalid = 1;
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ }
+ }
+
+ /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
+ if (clientrereg) {
+ event.event = IB_EVENT_CLIENT_REREGISTER;
+ ib_dispatch_event(&event);
+ }
+
+ /*
+ * Do the port state change now that the other link parameters
+ * have been set.
+ * Changing the port physical state only makes sense if the link
+ * is down or is being set to down.
+ */
+
+ if (!invalid) {
+ ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
+ if (ret)
+ return ret;
+ }
+
+ ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
+ max_len);
+
+ /* restore re-reg bit per o14-12.2.1 */
+ pi->clientrereg_subnettimeout |= clientrereg;
+
+ /*
+ * Apply the new link downgrade policy. This may result in a link
+ * bounce. Do this after everything else so things are settled.
+ * Possible problem: if setting the port state above fails, then
+ * the policy change is not applied.
+ */
+ if (call_link_downgrade_policy)
+ apply_link_downgrade_policy(ppd, 0);
+
+ return ret;
+
+get_only:
+ return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+/**
+ * set_pkeys - set the PKEY table for ctxt 0
+ * @dd: the hfi1_ib device
+ * @port: the IB port number
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+ int changed = 0;
+ int update_includes_mgmt_partition = 0;
+
+ /*
+ * IB port one/two always maps to context zero/one,
+ * always a kernel context, no locking needed
+ * If we get here with ppd setup, no need to check
+ * that rcd is valid.
+ */
+ ppd = dd->pport + (port - 1);
+ /*
+ * If the update does not include the management pkey, don't do it.
+ */
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (pkeys[i] == LIM_MGMT_P_KEY) {
+ update_includes_mgmt_partition = 1;
+ break;
+ }
+ }
+
+ if (!update_includes_mgmt_partition)
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ u16 key = pkeys[i];
+ u16 okey = ppd->pkeys[i];
+
+ if (key == okey)
+ continue;
+ /*
+ * The SM gives us the complete PKey table. We have
+ * to ensure that we put the PKeys in the matching
+ * slots.
+ */
+ ppd->pkeys[i] = key;
+ changed = 1;
+ }
+
+ if (changed) {
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(dd, port);
+ }
+
+ return 0;
+}
+
+static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 n_blocks_sent = OPA_AM_NBLK(am);
+ u32 start_block = am & 0x7ff;
+ u16 *p = (u16 *)data;
+ __be16 *q = (__be16 *)data;
+ int i;
+ u16 n_blocks_avail;
+ unsigned npkeys = hfi1_get_npkeys(dd);
+ u32 size = 0;
+
+ if (n_blocks_sent == 0) {
+ pr_warn("OPA Get PKey AM Invalid : P = %u; B = 0x%x; N = 0x%x\n",
+ port, start_block, n_blocks_sent);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
+
+ size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE);
+
+ if (smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ if (start_block + n_blocks_sent > n_blocks_avail ||
+ n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
+ pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
+ start_block, n_blocks_sent, n_blocks_avail,
+ OPA_NUM_PKEY_BLOCKS_PER_SMP);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
+ p[i] = be16_to_cpu(q[i]);
+
+ if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+#define ILLEGAL_VL 12
+/*
+ * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
+ * for SC15, which must map to VL15). If we don't remap things this
+ * way it is possible for VL15 counters to increment when we try to
+ * send on a SC which is mapped to an invalid VL.
+ * When getting the table convert ILLEGAL_VL back to VL15.
+ */
+static void filter_sc2vlt(void *data, bool set)
+{
+ int i;
+ u8 *pd = data;
+
+ for (i = 0; i < OPA_MAX_SCS; i++) {
+ if (i == 15)
+ continue;
+
+ if (set) {
+ if ((pd[i] & 0x1f) == 0xf)
+ pd[i] = ILLEGAL_VL;
+ } else {
+ if ((pd[i] & 0x1f) == ILLEGAL_VL)
+ pd[i] = 0xf;
+ }
+ }
+}
+
+static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
+{
+ u64 *val = data;
+
+ filter_sc2vlt(data, true);
+
+ write_csr(dd, SEND_SC2VLT0, *val++);
+ write_csr(dd, SEND_SC2VLT1, *val++);
+ write_csr(dd, SEND_SC2VLT2, *val++);
+ write_csr(dd, SEND_SC2VLT3, *val++);
+ write_seqlock_irq(&dd->sc2vl_lock);
+ memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
+ write_sequnlock_irq(&dd->sc2vl_lock);
+ return 0;
+}
+
+static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
+{
+ u64 *val = (u64 *)data;
+
+ *val++ = read_csr(dd, SEND_SC2VLT0);
+ *val++ = read_csr(dd, SEND_SC2VLT1);
+ *val++ = read_csr(dd, SEND_SC2VLT2);
+ *val++ = read_csr(dd, SEND_SC2VLT3);
+
+ filter_sc2vlt((u64 *)data, false);
+ return 0;
+}
+
+static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = data;
+ size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
+ unsigned i;
+
+ if (am || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
+ *p++ = ibp->sl_to_sc[i];
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = data;
+ size_t size = ARRAY_SIZE(ibp->sl_to_sc);
+ int i;
+ u8 sc;
+
+ if (am || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
+ sc = *p++;
+ if (ibp->sl_to_sc[i] != sc) {
+ ibp->sl_to_sc[i] = sc;
+
+ /* Put all stale qps into error state */
+ hfi1_error_port_qps(ibp, i);
+ }
+ }
+
+ return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = data;
+ size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
+ unsigned i;
+
+ if (am || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
+ *p++ = ibp->sc_to_sl[i];
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ size_t size = ARRAY_SIZE(ibp->sc_to_sl);
+ u8 *p = data;
+ int i;
+
+ if (am || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
+ ibp->sc_to_sl[i] = *p++;
+
+ return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ u32 n_blocks = OPA_AM_NBLK(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ void *vp = (void *)data;
+ size_t size = 4 * sizeof(u64);
+
+ if (n_blocks != 1 || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ get_sc2vlt_tables(dd, vp);
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ u32 n_blocks = OPA_AM_NBLK(am);
+ int async_update = OPA_AM_ASYNC(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ void *vp = (void *)data;
+ struct hfi1_pportdata *ppd;
+ int lstate;
+ /*
+ * set_sc2vlt_tables writes the information contained in *data
+ * to four 64-bit registers SendSC2VLt[0-3]. We need to make
+ * sure *max_len is not greater than the total size of the four
+ * SendSC2VLt[0-3] registers.
+ */
+ size_t size = 4 * sizeof(u64);
+
+ if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ lstate = driver_lstate(ppd);
+ /*
+ * it's known that async_update is 0 by this point, but include
+ * the explicit check for clarity
+ */
+ if (!async_update &&
+ (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ set_sc2vlt_tables(dd, vp);
+
+ return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ u32 n_blocks = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ void *vp = (void *)data;
+ int size = sizeof(struct sc2vlnt);
+
+ if (n_blocks != 1 || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ppd = dd->pport + (port - 1);
+
+ fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ u32 n_blocks = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ void *vp = (void *)data;
+ int lstate;
+ int size = sizeof(struct sc2vlnt);
+
+ if (n_blocks != 1 || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ lstate = driver_lstate(ppd);
+ if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ppd = dd->pport + (port - 1);
+
+ fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
+
+ return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
+ resp_len, max_len);
+}
+
+static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ u32 nports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ u32 lstate;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
+
+ if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ibp = to_iport(ibdev, port);
+ ppd = ppd_from_ibp(ibp);
+
+ lstate = driver_lstate(ppd);
+
+ if (start_of_sm_config && (lstate == IB_PORT_INIT))
+ ppd->is_sm_config_started = 1;
+
+ psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
+ psi->port_states.ledenable_offlinereason |=
+ ppd->is_sm_config_started << 5;
+ psi->port_states.ledenable_offlinereason |=
+ ppd->offline_disabled_reason;
+
+ psi->port_states.portphysstate_portstate =
+ (driver_pstate(ppd) << 4) | (lstate & 0xf);
+ psi->link_width_downgrade_tx_active =
+ cpu_to_be16(ppd->link_width_downgrade_tx_active);
+ psi->link_width_downgrade_rx_active =
+ cpu_to_be16(ppd->link_width_downgrade_rx_active);
+ if (resp_len)
+ *resp_len += sizeof(struct opa_port_state_info);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len, int local_mad)
+{
+ u32 nports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ u32 ls_old;
+ u8 ls_new, ps_new;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
+ int ret, invalid = 0;
+
+ if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ibp = to_iport(ibdev, port);
+ ppd = ppd_from_ibp(ibp);
+
+ ls_old = driver_lstate(ppd);
+
+ ls_new = port_states_to_logical_state(&psi->port_states);
+ ps_new = port_states_to_phys_state(&psi->port_states);
+
+ if (ls_old == IB_PORT_INIT) {
+ if (start_of_sm_config) {
+ if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
+ ppd->is_sm_config_started = 1;
+ } else if (ls_new == IB_PORT_ARMED) {
+ if (ppd->is_sm_config_started == 0) {
+ invalid = 1;
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ }
+ }
+
+ if (!invalid) {
+ ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
+ if (ret)
+ return ret;
+ }
+
+ return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 addr = OPA_AM_CI_ADDR(am);
+ u32 len = OPA_AM_CI_LEN(am) + 1;
+ int ret;
+
+ if (dd->pport->port_type != PORT_TYPE_QSFP ||
+ smp_length_check(len, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
+#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
+#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
+
+ /*
+ * check that addr is within spec, and
+ * addr and (addr + len - 1) are on the same "page"
+ */
+ if (addr >= 4096 ||
+ (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ret = get_cable_info(dd, port, addr, len, data);
+
+ if (ret == -ENODEV) {
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* The address range for the CableInfo SMA query is wider than the
+ * memory available on the QSFP cable. We want to return a valid
+ * response, albeit zeroed out, for address ranges beyond available
+ * memory but that are within the CableInfo query spec
+ */
+ if (ret < 0 && ret != -ERANGE) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ if (resp_len)
+ *resp_len += len;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port, u32 *resp_len,
+ u32 max_len)
+{
+ u32 num_ports = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ struct buffer_control *p = (struct buffer_control *)data;
+ int size = sizeof(struct buffer_control);
+
+ if (num_ports != 1 || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ppd = dd->pport + (port - 1);
+ fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
+ trace_bct_get(dd, p);
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port, u32 *resp_len,
+ u32 max_len)
+{
+ u32 num_ports = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ struct buffer_control *p = (struct buffer_control *)data;
+
+ if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ ppd = dd->pport + (port - 1);
+ trace_bct_set(dd, p);
+ if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+ u32 num_ports = OPA_AM_NPORT(am);
+ u8 section = (am & 0x00ff0000) >> 16;
+ u8 *p = data;
+ int size = 256;
+
+ if (num_ports != 1 || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ switch (section) {
+ case OPA_VLARB_LOW_ELEMENTS:
+ fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
+ break;
+ case OPA_VLARB_HIGH_ELEMENTS:
+ fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
+ break;
+ case OPA_VLARB_PREEMPT_ELEMENTS:
+ fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
+ break;
+ case OPA_VLARB_PREEMPT_MATRIX:
+ fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
+ break;
+ default:
+ pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
+ be32_to_cpu(smp->attr_mod));
+ smp->status |= IB_SMP_INVALID_FIELD;
+ size = 0;
+ break;
+ }
+
+ if (size > 0 && resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+ u32 num_ports = OPA_AM_NPORT(am);
+ u8 section = (am & 0x00ff0000) >> 16;
+ u8 *p = data;
+ int size = 256;
+
+ if (num_ports != 1 || smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ switch (section) {
+ case OPA_VLARB_LOW_ELEMENTS:
+ (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
+ break;
+ case OPA_VLARB_HIGH_ELEMENTS:
+ (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
+ break;
+ /*
+ * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
+ * can be changed from the default values
+ */
+ case OPA_VLARB_PREEMPT_ELEMENTS:
+ case OPA_VLARB_PREEMPT_MATRIX:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ break;
+ default:
+ pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
+ be32_to_cpu(smp->attr_mod));
+ smp->status |= IB_SMP_INVALID_FIELD;
+ break;
+ }
+
+ return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+struct opa_pma_mad {
+ struct ib_mad_hdr mad_hdr;
+ u8 data[2024];
+} __packed;
+
+struct opa_port_status_req {
+ __u8 port_num;
+ __u8 reserved[3];
+ __be32 vl_select_mask;
+};
+
+#define VL_MASK_ALL 0x00000000000080ffUL
+
+struct opa_port_status_rsp {
+ __u8 port_num;
+ __u8 reserved[3];
+ __be32 vl_select_mask;
+
+ /* Data counters */
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_pkts;
+ __be64 port_rcv_pkts;
+ __be64 port_multicast_xmit_pkts;
+ __be64 port_multicast_rcv_pkts;
+ __be64 port_xmit_wait;
+ __be64 sw_port_congestion;
+ __be64 port_rcv_fecn;
+ __be64 port_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_xmit_wasted_bw;
+ __be64 port_xmit_wait_data;
+ __be64 port_rcv_bubble;
+ __be64 port_mark_fecn;
+ /* Error counters */
+ __be64 port_rcv_constraint_errors;
+ __be64 port_rcv_switch_relay_errors;
+ __be64 port_xmit_discards;
+ __be64 port_xmit_constraint_errors;
+ __be64 port_rcv_remote_physical_errors;
+ __be64 local_link_integrity_errors;
+ __be64 port_rcv_errors;
+ __be64 excessive_buffer_overruns;
+ __be64 fm_config_errors;
+ __be32 link_error_recovery;
+ __be32 link_downed;
+ u8 uncorrectable_errors;
+
+ u8 link_quality_indicator; /* 5res, 3bit */
+ u8 res2[6];
+ struct _vls_pctrs {
+ /* per-VL Data counters */
+ __be64 port_vl_xmit_data;
+ __be64 port_vl_rcv_data;
+ __be64 port_vl_xmit_pkts;
+ __be64 port_vl_rcv_pkts;
+ __be64 port_vl_xmit_wait;
+ __be64 sw_port_vl_congestion;
+ __be64 port_vl_rcv_fecn;
+ __be64 port_vl_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_vl_xmit_wasted_bw;
+ __be64 port_vl_xmit_wait_data;
+ __be64 port_vl_rcv_bubble;
+ __be64 port_vl_mark_fecn;
+ __be64 port_vl_xmit_discards;
+ } vls[]; /* real array size defined by # bits set in vl_select_mask */
+};
+
+enum counter_selects {
+ CS_PORT_XMIT_DATA = (1 << 31),
+ CS_PORT_RCV_DATA = (1 << 30),
+ CS_PORT_XMIT_PKTS = (1 << 29),
+ CS_PORT_RCV_PKTS = (1 << 28),
+ CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
+ CS_PORT_MCAST_RCV_PKTS = (1 << 26),
+ CS_PORT_XMIT_WAIT = (1 << 25),
+ CS_SW_PORT_CONGESTION = (1 << 24),
+ CS_PORT_RCV_FECN = (1 << 23),
+ CS_PORT_RCV_BECN = (1 << 22),
+ CS_PORT_XMIT_TIME_CONG = (1 << 21),
+ CS_PORT_XMIT_WASTED_BW = (1 << 20),
+ CS_PORT_XMIT_WAIT_DATA = (1 << 19),
+ CS_PORT_RCV_BUBBLE = (1 << 18),
+ CS_PORT_MARK_FECN = (1 << 17),
+ CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
+ CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
+ CS_PORT_XMIT_DISCARDS = (1 << 14),
+ CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
+ CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
+ CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
+ CS_PORT_RCV_ERRORS = (1 << 10),
+ CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
+ CS_FM_CONFIG_ERRORS = (1 << 8),
+ CS_LINK_ERROR_RECOVERY = (1 << 7),
+ CS_LINK_DOWNED = (1 << 6),
+ CS_UNCORRECTABLE_ERRORS = (1 << 5),
+};
+
+struct opa_clear_port_status {
+ __be64 port_select_mask[4];
+ __be32 counter_select_mask;
+};
+
+struct opa_aggregate {
+ __be16 attr_id;
+ __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
+ __be32 attr_mod;
+ u8 data[];
+};
+
+#define MSK_LLI 0x000000f0
+#define MSK_LLI_SFT 4
+#define MSK_LER 0x0000000f
+#define MSK_LER_SFT 0
+#define ADD_LLI 8
+#define ADD_LER 2
+
+/* Request contains first three fields, response contains those plus the rest */
+struct opa_port_data_counters_msg {
+ __be64 port_select_mask[4];
+ __be32 vl_select_mask;
+ __be32 resolution;
+
+ /* Response fields follow */
+ struct _port_dctrs {
+ u8 port_number;
+ u8 reserved2[3];
+ __be32 link_quality_indicator; /* 29res, 3bit */
+
+ /* Data counters */
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_pkts;
+ __be64 port_rcv_pkts;
+ __be64 port_multicast_xmit_pkts;
+ __be64 port_multicast_rcv_pkts;
+ __be64 port_xmit_wait;
+ __be64 sw_port_congestion;
+ __be64 port_rcv_fecn;
+ __be64 port_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_xmit_wasted_bw;
+ __be64 port_xmit_wait_data;
+ __be64 port_rcv_bubble;
+ __be64 port_mark_fecn;
+
+ __be64 port_error_counter_summary;
+ /* Sum of error counts/port */
+
+ struct _vls_dctrs {
+ /* per-VL Data counters */
+ __be64 port_vl_xmit_data;
+ __be64 port_vl_rcv_data;
+ __be64 port_vl_xmit_pkts;
+ __be64 port_vl_rcv_pkts;
+ __be64 port_vl_xmit_wait;
+ __be64 sw_port_vl_congestion;
+ __be64 port_vl_rcv_fecn;
+ __be64 port_vl_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_vl_xmit_wasted_bw;
+ __be64 port_vl_xmit_wait_data;
+ __be64 port_vl_rcv_bubble;
+ __be64 port_vl_mark_fecn;
+ } vls[];
+ /* array size defined by #bits set in vl_select_mask*/
+ } port;
+};
+
+struct opa_port_error_counters64_msg {
+ /*
+ * Request contains first two fields, response contains the
+ * whole magilla
+ */
+ __be64 port_select_mask[4];
+ __be32 vl_select_mask;
+
+ /* Response-only fields follow */
+ __be32 reserved1;
+ struct _port_ectrs {
+ u8 port_number;
+ u8 reserved2[7];
+ __be64 port_rcv_constraint_errors;
+ __be64 port_rcv_switch_relay_errors;
+ __be64 port_xmit_discards;
+ __be64 port_xmit_constraint_errors;
+ __be64 port_rcv_remote_physical_errors;
+ __be64 local_link_integrity_errors;
+ __be64 port_rcv_errors;
+ __be64 excessive_buffer_overruns;
+ __be64 fm_config_errors;
+ __be32 link_error_recovery;
+ __be32 link_downed;
+ u8 uncorrectable_errors;
+ u8 reserved3[7];
+ struct _vls_ectrs {
+ __be64 port_vl_xmit_discards;
+ } vls[];
+ /* array size defined by #bits set in vl_select_mask */
+ } port;
+};
+
+struct opa_port_error_info_msg {
+ __be64 port_select_mask[4];
+ __be32 error_info_select_mask;
+ __be32 reserved1;
+ struct _port_ei {
+ u8 port_number;
+ u8 reserved2[7];
+
+ /* PortRcvErrorInfo */
+ struct {
+ u8 status_and_code;
+ union {
+ u8 raw[17];
+ struct {
+ /* EI1to12 format */
+ u8 packet_flit1[8];
+ u8 packet_flit2[8];
+ u8 remaining_flit_bits12;
+ } ei1to12;
+ struct {
+ u8 packet_bytes[8];
+ u8 remaining_flit_bits;
+ } ei13;
+ } ei;
+ u8 reserved3[6];
+ } __packed port_rcv_ei;
+
+ /* ExcessiveBufferOverrunInfo */
+ struct {
+ u8 status_and_sc;
+ u8 reserved4[7];
+ } __packed excessive_buffer_overrun_ei;
+
+ /* PortXmitConstraintErrorInfo */
+ struct {
+ u8 status;
+ u8 reserved5;
+ __be16 pkey;
+ __be32 slid;
+ } __packed port_xmit_constraint_ei;
+
+ /* PortRcvConstraintErrorInfo */
+ struct {
+ u8 status;
+ u8 reserved6;
+ __be16 pkey;
+ __be32 slid;
+ } __packed port_rcv_constraint_ei;
+
+ /* PortRcvSwitchRelayErrorInfo */
+ struct {
+ u8 status_and_code;
+ u8 reserved7[3];
+ __u32 error_info;
+ } __packed port_rcv_switch_relay_ei;
+
+ /* UncorrectableErrorInfo */
+ struct {
+ u8 status_and_code;
+ u8 reserved8;
+ } __packed uncorrectable_ei;
+
+ /* FMConfigErrorInfo */
+ struct {
+ u8 status_and_code;
+ u8 error_info;
+ } __packed fm_config_ei;
+ __u32 reserved9;
+ } port;
+};
+
+/* opa_port_error_info_msg error_info_select_mask bit definitions */
+enum error_info_selects {
+ ES_PORT_RCV_ERROR_INFO = (1 << 31),
+ ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
+ ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
+ ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
+ ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
+ ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
+ ES_FM_CONFIG_ERROR_INFO = (1 << 25)
+};
+
+static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u32 *resp_len)
+{
+ struct opa_class_port_info *p =
+ (struct opa_class_port_info *)pmp->data;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ if (pmp->mad_hdr.attr_mod != 0)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+
+ p->base_version = OPA_MGMT_BASE_VERSION;
+ p->class_version = OPA_SM_CLASS_VERSION;
+ /*
+ * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
+ */
+ p->cap_mask2_resp_time = cpu_to_be32(18);
+
+ if (resp_len)
+ *resp_len += sizeof(*p);
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static void a0_portstatus(struct hfi1_pportdata *ppd,
+ struct opa_port_status_rsp *rsp)
+{
+ if (!is_bx(ppd->dd)) {
+ unsigned long vl;
+ u64 sum_vl_xmit_wait = 0;
+ unsigned long vl_all_mask = VL_MASK_ALL;
+
+ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
+ u64 tmp = sum_vl_xmit_wait +
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+ if (tmp < sum_vl_xmit_wait) {
+ /* we wrapped */
+ sum_vl_xmit_wait = (u64)~0;
+ break;
+ }
+ sum_vl_xmit_wait = tmp;
+ }
+ if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
+ rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
+ }
+}
+
+/**
+ * tx_link_width - convert link width bitmask to integer
+ * value representing actual link width.
+ * @link_width: width of active link
+ * @return: return index of the bit set in link_width var
+ *
+ * The function convert and return the index of bit set
+ * that indicate the current link width.
+ */
+u16 tx_link_width(u16 link_width)
+{
+ int n = LINK_WIDTH_DEFAULT;
+ u16 tx_width = n;
+
+ while (link_width && n) {
+ if (link_width & (1 << (n - 1))) {
+ tx_width = n;
+ break;
+ }
+ n--;
+ }
+
+ return tx_width;
+}
+
+/**
+ * get_xmit_wait_counters - Convert HFI 's SendWaitCnt/SendWaitVlCnt
+ * counter in unit of TXE cycle times to flit times.
+ * @ppd: info of physical Hfi port
+ * @link_width: width of active link
+ * @link_speed: speed of active link
+ * @vl: represent VL0-VL7, VL15 for PortVLXmitWait counters request
+ * and if vl value is C_VL_COUNT, it represent SendWaitCnt
+ * counter request
+ * @return: return SendWaitCnt/SendWaitVlCnt counter value per vl.
+ *
+ * Convert SendWaitCnt/SendWaitVlCnt counter from TXE cycle times to
+ * flit times. Call this function to samples these counters. This
+ * function will calculate for previous state transition and update
+ * current state at end of function using ppd->prev_link_width and
+ * ppd->port_vl_xmit_wait_last to port_vl_xmit_wait_curr and link_width.
+ */
+u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
+ u16 link_width, u16 link_speed, int vl)
+{
+ u64 port_vl_xmit_wait_curr;
+ u64 delta_vl_xmit_wait;
+ u64 xmit_wait_val;
+
+ if (vl > C_VL_COUNT)
+ return 0;
+ if (vl < C_VL_COUNT)
+ port_vl_xmit_wait_curr =
+ read_port_cntr(ppd, C_TX_WAIT_VL, vl);
+ else
+ port_vl_xmit_wait_curr =
+ read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL);
+
+ xmit_wait_val =
+ port_vl_xmit_wait_curr -
+ ppd->port_vl_xmit_wait_last[vl];
+ delta_vl_xmit_wait =
+ convert_xmit_counter(xmit_wait_val,
+ ppd->prev_link_width,
+ link_speed);
+
+ ppd->vl_xmit_flit_cnt[vl] += delta_vl_xmit_wait;
+ ppd->port_vl_xmit_wait_last[vl] = port_vl_xmit_wait_curr;
+ ppd->prev_link_width = link_width;
+
+ return ppd->vl_xmit_flit_cnt[vl];
+}
+
+static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev,
+ u32 port, u32 *resp_len)
+{
+ struct opa_port_status_req *req =
+ (struct opa_port_status_req *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct opa_port_status_rsp *rsp;
+ unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ unsigned long vl;
+ size_t response_data_size;
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u32 port_num = req->port_num;
+ u8 num_vls = hweight64(vl_select_mask);
+ struct _vls_pctrs *vlinfo;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ int vfi;
+ u64 tmp, tmp2;
+ u16 link_width;
+ u16 link_speed;
+
+ response_data_size = struct_size(rsp, vls, num_vls);
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ if (nports != 1 || (port_num && port_num != port) ||
+ num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ rsp = (struct opa_port_status_rsp *)pmp->data;
+ if (port_num)
+ rsp->port_num = port_num;
+ else
+ rsp->port_num = port;
+
+ rsp->port_rcv_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
+ CNTR_INVALID_VL));
+
+ hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
+
+ rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
+ rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_xmit_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ /*
+ * Convert PortXmitWait counter from TXE cycle times
+ * to flit times.
+ */
+ link_width =
+ tx_link_width(ppd->link_width_downgrade_tx_active);
+ link_speed = get_link_speed(ppd->link_speed_active);
+ rsp->port_xmit_wait =
+ cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
+ link_speed, C_VL_COUNT));
+ rsp->port_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
+ rsp->port_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
+ rsp->port_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_remote_physical_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL));
+ rsp->local_link_integrity_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL));
+ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL);
+ if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->link_error_recovery = cpu_to_be32(~0);
+ } else {
+ rsp->link_error_recovery = cpu_to_be32(tmp2);
+ }
+ rsp->port_rcv_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
+ rsp->excessive_buffer_overruns =
+ cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
+ rsp->fm_config_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
+ CNTR_INVALID_VL));
+ rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
+ CNTR_INVALID_VL));
+
+ /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
+ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
+ rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
+
+ vlinfo = &rsp->vls[0];
+ vfi = 0;
+ /* The vl_select_mask has been checked above, and we know
+ * that it contains only entries which represent valid VLs.
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
+ rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
+
+ rsp->vls[vfi].port_vl_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_data =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_pkts =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
+ idx_from_vl(vl)));
+ /*
+ * Convert PortVlXmitWait counter from TXE cycle
+ * times to flit times.
+ */
+ rsp->vls[vfi].port_vl_xmit_wait =
+ cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
+ link_speed,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl)));
+ vlinfo++;
+ vfi++;
+ }
+
+ a0_portstatus(ppd, rsp);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static u64 get_error_counter_summary(struct ib_device *ibdev, u32 port,
+ u8 res_lli, u8 res_ler)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 error_counter_summary = 0, tmp;
+
+ error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
+ CNTR_INVALID_VL);
+ /* port_rcv_switch_relay_errors is 0 for HFIs */
+ error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL);
+ /* local link integrity must be right-shifted by the lli resolution */
+ error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL) >> res_lli);
+ /* link error recovery must b right-shifted by the ler resolution */
+ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
+ tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
+ error_counter_summary += (tmp >> res_ler);
+ error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
+ CNTR_INVALID_VL);
+ /* ppd->link_downed is a 32-bit value */
+ error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
+ CNTR_INVALID_VL);
+ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
+ /* this is an 8-bit quantity */
+ error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
+
+ return error_counter_summary;
+}
+
+static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
+{
+ if (!is_bx(ppd->dd)) {
+ unsigned long vl;
+ u64 sum_vl_xmit_wait = 0;
+ unsigned long vl_all_mask = VL_MASK_ALL;
+
+ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
+ u64 tmp = sum_vl_xmit_wait +
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+ if (tmp < sum_vl_xmit_wait) {
+ /* we wrapped */
+ sum_vl_xmit_wait = (u64)~0;
+ break;
+ }
+ sum_vl_xmit_wait = tmp;
+ }
+ if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
+ rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
+ }
+}
+
+static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
+ struct _port_dctrs *rsp)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_xmit_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
+ CNTR_INVALID_VL));
+}
+
+static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev,
+ u32 port, u32 *resp_len)
+{
+ struct opa_port_data_counters_msg *req =
+ (struct opa_port_data_counters_msg *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct _port_dctrs *rsp;
+ struct _vls_dctrs *vlinfo;
+ size_t response_data_size;
+ u32 num_ports;
+ u8 lq, num_vls;
+ u8 res_lli, res_ler;
+ u64 port_mask;
+ u32 port_num;
+ unsigned long vl;
+ unsigned long vl_select_mask;
+ int vfi;
+ u16 link_width;
+ u16 link_speed;
+
+ num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
+ vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
+ res_lli = res_lli ? res_lli + ADD_LLI : 0;
+ res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
+ res_ler = res_ler ? res_ler + ADD_LER : 0;
+
+ if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /* Sanity check */
+ response_data_size = struct_size(req, port.vls, num_vls);
+
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /*
+ * The bit set in the mask needs to be consistent with the
+ * port the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask) * 8);
+
+ if (port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ rsp = &req->port;
+ memset(rsp, 0, sizeof(*rsp));
+
+ rsp->port_number = port;
+ /*
+ * Note that link_quality_indicator is a 32 bit quantity in
+ * 'datacounters' queries (as opposed to 'portinfo' queries,
+ * where it's a byte).
+ */
+ hfi1_read_link_quality(dd, &lq);
+ rsp->link_quality_indicator = cpu_to_be32((u32)lq);
+ pma_get_opa_port_dctrs(ibdev, rsp);
+
+ /*
+ * Convert PortXmitWait counter from TXE
+ * cycle times to flit times.
+ */
+ link_width =
+ tx_link_width(ppd->link_width_downgrade_tx_active);
+ link_speed = get_link_speed(ppd->link_speed_active);
+ rsp->port_xmit_wait =
+ cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
+ link_speed, C_VL_COUNT));
+ rsp->port_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
+ rsp->port_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
+ rsp->port_error_counter_summary =
+ cpu_to_be64(get_error_counter_summary(ibdev, port,
+ res_lli, res_ler));
+
+ vlinfo = &rsp->vls[0];
+ vfi = 0;
+ /* The vl_select_mask has been checked above, and we know
+ * that it contains only entries which represent valid VLs.
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ rsp->vls[vfi].port_vl_xmit_data =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_data =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_pkts =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
+ idx_from_vl(vl)));
+
+ /*
+ * Convert PortVlXmitWait counter from TXE
+ * cycle times to flit times.
+ */
+ rsp->vls[vfi].port_vl_xmit_wait =
+ cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
+ link_speed,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
+ idx_from_vl(vl)));
+ rsp->vls[vfi].port_vl_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
+ idx_from_vl(vl)));
+
+ /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
+ /* rsp->port_vl_xmit_wasted_bw ??? */
+ /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
+ * does this differ from rsp->vls[vfi].port_vl_xmit_wait
+ */
+ /*rsp->vls[vfi].port_vl_mark_fecn =
+ * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
+ * + offset));
+ */
+ vlinfo++;
+ vfi++;
+ }
+
+ a0_datacounters(ppd, rsp);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
+ struct ib_device *ibdev, u32 port)
+{
+ struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
+ pmp->data;
+ struct _port_dctrs rsp;
+
+ if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+
+ memset(&rsp, 0, sizeof(rsp));
+ pma_get_opa_port_dctrs(ibdev, &rsp);
+
+ p->port_xmit_data = rsp.port_xmit_data;
+ p->port_rcv_data = rsp.port_rcv_data;
+ p->port_xmit_packets = rsp.port_xmit_pkts;
+ p->port_rcv_packets = rsp.port_rcv_pkts;
+ p->port_unicast_xmit_packets = 0;
+ p->port_unicast_rcv_packets = 0;
+ p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
+ p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
+
+bail:
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
+ struct _port_ectrs *rsp, u32 port)
+{
+ u64 tmp, tmp2;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL);
+ if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->link_error_recovery = cpu_to_be32(~0);
+ } else {
+ rsp->link_error_recovery = cpu_to_be32(tmp2);
+ }
+
+ rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
+ rsp->port_rcv_remote_physical_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_switch_relay_errors = 0;
+ rsp->port_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
+ CNTR_INVALID_VL));
+ rsp->local_link_integrity_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL));
+ rsp->excessive_buffer_overruns =
+ cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
+}
+
+static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev,
+ u32 port, u32 *resp_len)
+{
+ size_t response_data_size;
+ struct _port_ectrs *rsp;
+ u32 port_num;
+ struct opa_port_error_counters64_msg *req;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 num_ports;
+ u8 num_pslm;
+ u8 num_vls;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct _vls_ectrs *vlinfo;
+ unsigned long vl;
+ u64 port_mask, tmp;
+ unsigned long vl_select_mask;
+ int vfi;
+
+ req = (struct opa_port_error_counters64_msg *)pmp->data;
+
+ num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+
+ num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
+ num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
+
+ if (num_ports != 1 || num_ports != num_pslm) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ response_data_size = struct_size(req, port.vls, num_vls);
+
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+ /*
+ * The bit set in the mask needs to be consistent with the
+ * port the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask) * 8);
+
+ if (port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ rsp = &req->port;
+
+ ibp = to_iport(ibdev, port_num);
+ ppd = ppd_from_ibp(ibp);
+
+ memset(rsp, 0, sizeof(*rsp));
+ rsp->port_number = port_num;
+
+ pma_get_opa_port_ectrs(ibdev, rsp, port_num);
+
+ rsp->port_rcv_remote_physical_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL));
+ rsp->fm_config_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
+ CNTR_INVALID_VL));
+ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
+
+ rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
+ rsp->port_rcv_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
+ vlinfo = &rsp->vls[0];
+ vfi = 0;
+ vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl)));
+ vlinfo += 1;
+ vfi++;
+ }
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
+ struct ib_device *ibdev, u32 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct _port_ectrs rsp;
+ u64 temp_link_overrun_errors;
+ u64 temp_64;
+ u32 temp_32;
+
+ memset(&rsp, 0, sizeof(rsp));
+ pma_get_opa_port_ectrs(ibdev, &rsp, port);
+
+ if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+
+ p->symbol_error_counter = 0; /* N/A for OPA */
+
+ temp_32 = be32_to_cpu(rsp.link_error_recovery);
+ if (temp_32 > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter = (u8)temp_32;
+
+ temp_32 = be32_to_cpu(rsp.link_downed);
+ if (temp_32 > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter = (u8)temp_32;
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_errors);
+ if (temp_64 > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
+ if (temp_64 > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
+ p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_xmit_discards);
+ if (temp_64 > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
+ if (temp_64 > 0xFFUL)
+ p->port_xmit_constraint_errors = 0xFF;
+ else
+ p->port_xmit_constraint_errors = (u8)temp_64;
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
+ if (temp_64 > 0xFFUL)
+ p->port_rcv_constraint_errors = 0xFFUL;
+ else
+ p->port_rcv_constraint_errors = (u8)temp_64;
+
+ /* LocalLink: 7:4, BufferOverrun: 3:0 */
+ temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
+ if (temp_64 > 0xFUL)
+ temp_64 = 0xFUL;
+
+ temp_link_overrun_errors = temp_64 << 4;
+
+ temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
+ if (temp_64 > 0xFUL)
+ temp_64 = 0xFUL;
+ temp_link_overrun_errors |= temp_64;
+
+ p->link_overrun_errors = (u8)temp_link_overrun_errors;
+
+ p->vl15_dropped = 0; /* N/A for OPA */
+
+bail:
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev,
+ u32 port, u32 *resp_len)
+{
+ size_t response_data_size;
+ struct _port_ei *rsp;
+ struct opa_port_error_info_msg *req;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u64 port_mask;
+ u32 num_ports;
+ u32 port_num;
+ u8 num_pslm;
+ u64 reg;
+
+ req = (struct opa_port_error_info_msg *)pmp->data;
+ rsp = &req->port;
+
+ num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
+ num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
+
+ memset(rsp, 0, sizeof(*rsp));
+
+ if (num_ports != 1 || num_ports != num_pslm) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /* Sanity check */
+ response_data_size = sizeof(struct opa_port_error_info_msg);
+
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /*
+ * The bit set in the mask needs to be consistent with the port
+ * the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask) * 8);
+
+ if (port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+ rsp->port_number = port;
+
+ /* PortRcvErrorInfo */
+ rsp->port_rcv_ei.status_and_code =
+ dd->err_info_rcvport.status_and_code;
+ memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
+ &dd->err_info_rcvport.packet_flit1, sizeof(u64));
+ memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
+ &dd->err_info_rcvport.packet_flit2, sizeof(u64));
+
+ /* ExcessiverBufferOverrunInfo */
+ reg = read_csr(dd, RCV_ERR_INFO);
+ if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
+ /*
+ * if the RcvExcessBufferOverrun bit is set, save SC of
+ * first pkt that encountered an excess buffer overrun
+ */
+ u8 tmp = (u8)reg;
+
+ tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
+ tmp <<= 2;
+ rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
+ /* set the status bit */
+ rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
+ }
+
+ rsp->port_xmit_constraint_ei.status =
+ dd->err_info_xmit_constraint.status;
+ rsp->port_xmit_constraint_ei.pkey =
+ cpu_to_be16(dd->err_info_xmit_constraint.pkey);
+ rsp->port_xmit_constraint_ei.slid =
+ cpu_to_be32(dd->err_info_xmit_constraint.slid);
+
+ rsp->port_rcv_constraint_ei.status =
+ dd->err_info_rcv_constraint.status;
+ rsp->port_rcv_constraint_ei.pkey =
+ cpu_to_be16(dd->err_info_rcv_constraint.pkey);
+ rsp->port_rcv_constraint_ei.slid =
+ cpu_to_be32(dd->err_info_rcv_constraint.slid);
+
+ /* UncorrectableErrorInfo */
+ rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
+
+ /* FMConfigErrorInfo */
+ rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev,
+ u32 port, u32 *resp_len)
+{
+ struct opa_clear_port_status *req =
+ (struct opa_clear_port_status *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u64 portn = be64_to_cpu(req->port_select_mask[3]);
+ u32 counter_select = be32_to_cpu(req->counter_select_mask);
+ unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
+ unsigned long vl;
+
+ if ((nports != 1) || (portn != 1 << port)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+ /*
+ * only counters returned by pma_get_opa_portstatus() are
+ * handled, so when pma_get_opa_portstatus() gets a fix,
+ * the corresponding change should be made here as well.
+ */
+
+ if (counter_select & CS_PORT_XMIT_DATA)
+ write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_DATA)
+ write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_XMIT_PKTS)
+ write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_PKTS)
+ write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
+ write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_MCAST_RCV_PKTS)
+ write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_XMIT_WAIT) {
+ write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
+ ppd->port_vl_xmit_wait_last[C_VL_COUNT] = 0;
+ ppd->vl_xmit_flit_cnt[C_VL_COUNT] = 0;
+ }
+ /* ignore cs_sw_portCongestion for HFIs */
+
+ if (counter_select & CS_PORT_RCV_FECN)
+ write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_BECN)
+ write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
+
+ /* ignore cs_port_xmit_time_cong for HFIs */
+ /* ignore cs_port_xmit_wasted_bw for now */
+ /* ignore cs_port_xmit_wait_data for now */
+ if (counter_select & CS_PORT_RCV_BUBBLE)
+ write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
+
+ /* Only applicable for switch */
+ /* if (counter_select & CS_PORT_MARK_FECN)
+ * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
+ */
+
+ if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
+ write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
+
+ /* ignore cs_port_rcv_switch_relay_errors for HFIs */
+ if (counter_select & CS_PORT_XMIT_DISCARDS)
+ write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
+ write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
+ write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
+ write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_LINK_ERROR_RECOVERY) {
+ write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
+ write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL, 0);
+ }
+
+ if (counter_select & CS_PORT_RCV_ERRORS)
+ write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
+ write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
+ dd->rcv_ovfl_cnt = 0;
+ }
+
+ if (counter_select & CS_FM_CONFIG_ERRORS)
+ write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_LINK_DOWNED)
+ write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_UNCORRECTABLE_ERRORS)
+ write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
+
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ if (counter_select & CS_PORT_XMIT_DATA)
+ write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_RCV_DATA)
+ write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_XMIT_PKTS)
+ write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_RCV_PKTS)
+ write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_XMIT_WAIT) {
+ write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
+ ppd->port_vl_xmit_wait_last[idx_from_vl(vl)] = 0;
+ ppd->vl_xmit_flit_cnt[idx_from_vl(vl)] = 0;
+ }
+
+ /* sw_port_vl_congestion is 0 for HFIs */
+ if (counter_select & CS_PORT_RCV_FECN)
+ write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_RCV_BECN)
+ write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
+
+ /* port_vl_xmit_time_cong is 0 for HFIs */
+ /* port_vl_xmit_wasted_bw ??? */
+ /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
+ if (counter_select & CS_PORT_RCV_BUBBLE)
+ write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
+
+ /* if (counter_select & CS_PORT_MARK_FECN)
+ * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
+ */
+ if (counter_select & C_SW_XMIT_DSCD_VL)
+ write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl), 0);
+ }
+
+ if (resp_len)
+ *resp_len += sizeof(*req);
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev,
+ u32 port, u32 *resp_len)
+{
+ struct _port_ei *rsp;
+ struct opa_port_error_info_msg *req;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u64 port_mask;
+ u32 num_ports;
+ u32 port_num;
+ u8 num_pslm;
+ u32 error_info_select;
+
+ req = (struct opa_port_error_info_msg *)pmp->data;
+ rsp = &req->port;
+
+ num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
+ num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
+
+ memset(rsp, 0, sizeof(*rsp));
+
+ if (num_ports != 1 || num_ports != num_pslm) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /*
+ * The bit set in the mask needs to be consistent with the port
+ * the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask) * 8);
+
+ if (port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ error_info_select = be32_to_cpu(req->error_info_select_mask);
+
+ /* PortRcvErrorInfo */
+ if (error_info_select & ES_PORT_RCV_ERROR_INFO)
+ /* turn off status bit */
+ dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
+
+ /* ExcessiverBufferOverrunInfo */
+ if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
+ /*
+ * status bit is essentially kept in the h/w - bit 5 of
+ * RCV_ERR_INFO
+ */
+ write_csr(dd, RCV_ERR_INFO,
+ RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
+
+ if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
+ dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
+
+ if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
+ dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
+
+ /* UncorrectableErrorInfo */
+ if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
+ /* turn off status bit */
+ dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
+
+ /* FMConfigErrorInfo */
+ if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
+ /* turn off status bit */
+ dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
+
+ if (resp_len)
+ *resp_len += sizeof(*req);
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+struct opa_congestion_info_attr {
+ __be16 congestion_info;
+ u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
+ u8 congestion_log_length;
+} __packed;
+
+static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct opa_congestion_info_attr *p =
+ (struct opa_congestion_info_attr *)data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ if (smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ p->congestion_info = 0;
+ p->control_table_cap = ppd->cc_max_table_entries;
+ p->congestion_log_length = OPA_CONG_LOG_ELEMS;
+
+ if (resp_len)
+ *resp_len += sizeof(*p);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev,
+ u32 port, u32 *resp_len, u32 max_len)
+{
+ int i;
+ struct opa_congestion_setting_attr *p =
+ (struct opa_congestion_setting_attr *)data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct opa_congestion_setting_entry_shadow *entries;
+ struct cc_state *cc_state;
+
+ if (smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ rcu_read_lock();
+
+ cc_state = get_cc_state(ppd);
+
+ if (!cc_state) {
+ rcu_read_unlock();
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ entries = cc_state->cong_setting.entries;
+ p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
+ p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ p->entries[i].ccti_increase = entries[i].ccti_increase;
+ p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
+ p->entries[i].trigger_threshold =
+ entries[i].trigger_threshold;
+ p->entries[i].ccti_min = entries[i].ccti_min;
+ }
+
+ rcu_read_unlock();
+
+ if (resp_len)
+ *resp_len += sizeof(*p);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+/*
+ * Apply congestion control information stored in the ppd to the
+ * active structure.
+ */
+static void apply_cc_state(struct hfi1_pportdata *ppd)
+{
+ struct cc_state *old_cc_state, *new_cc_state;
+
+ new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
+ if (!new_cc_state)
+ return;
+
+ /*
+ * Hold the lock for updating *and* to prevent ppd information
+ * from changing during the update.
+ */
+ spin_lock(&ppd->cc_state_lock);
+
+ old_cc_state = get_cc_state_protected(ppd);
+ if (!old_cc_state) {
+ /* never active, or shutting down */
+ spin_unlock(&ppd->cc_state_lock);
+ kfree(new_cc_state);
+ return;
+ }
+
+ *new_cc_state = *old_cc_state;
+
+ if (ppd->total_cct_entry)
+ new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+ else
+ new_cc_state->cct.ccti_limit = 0;
+
+ memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
+ ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
+
+ new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
+ new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
+ memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
+ OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
+
+ rcu_assign_pointer(ppd->cc_state, new_cc_state);
+
+ spin_unlock(&ppd->cc_state_lock);
+
+ kfree_rcu(old_cc_state, rcu);
+}
+
+static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct opa_congestion_setting_attr *p =
+ (struct opa_congestion_setting_attr *)data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct opa_congestion_setting_entry_shadow *entries;
+ int i;
+
+ if (smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /*
+ * Save details from packet into the ppd. Hold the cc_state_lock so
+ * our information is consistent with anyone trying to apply the state.
+ */
+ spin_lock(&ppd->cc_state_lock);
+ ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
+
+ entries = ppd->congestion_entries;
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ entries[i].ccti_increase = p->entries[i].ccti_increase;
+ entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
+ entries[i].trigger_threshold =
+ p->entries[i].trigger_threshold;
+ entries[i].ccti_min = p->entries[i].ccti_min;
+ }
+ spin_unlock(&ppd->cc_state_lock);
+
+ /* now apply the information */
+ apply_cc_state(ppd);
+
+ return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
+ resp_len, max_len);
+}
+
+static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev,
+ u32 port, u32 *resp_len, u32 max_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
+ u64 ts;
+ int i;
+
+ if (am || smp_length_check(sizeof(*cong_log), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ spin_lock_irq(&ppd->cc_log_lock);
+
+ cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
+ cong_log->congestion_flags = 0;
+ cong_log->threshold_event_counter =
+ cpu_to_be16(ppd->threshold_event_counter);
+ memcpy(cong_log->threshold_cong_event_map,
+ ppd->threshold_cong_event_map,
+ sizeof(cong_log->threshold_cong_event_map));
+ /* keep timestamp in units of 1.024 usec */
+ ts = ktime_get_ns() / 1024;
+ cong_log->current_time_stamp = cpu_to_be32(ts);
+ for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
+ struct opa_hfi1_cong_log_event_internal *cce =
+ &ppd->cc_events[ppd->cc_mad_idx++];
+ if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
+ ppd->cc_mad_idx = 0;
+ /*
+ * Entries which are older than twice the time
+ * required to wrap the counter are supposed to
+ * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
+ */
+ if ((ts - cce->timestamp) / 2 > U32_MAX)
+ continue;
+ memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
+ memcpy(cong_log->events[i].remote_qp_number_cn_entry,
+ &cce->rqpn, 3);
+ cong_log->events[i].sl_svc_type_cn_entry =
+ ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
+ cong_log->events[i].remote_lid_cn_entry =
+ cpu_to_be32(cce->rlid);
+ cong_log->events[i].timestamp_cn_entry =
+ cpu_to_be32(cce->timestamp);
+ }
+
+ /*
+ * Reset threshold_cong_event_map, and threshold_event_counter
+ * to 0 when log is read.
+ */
+ memset(ppd->threshold_cong_event_map, 0x0,
+ sizeof(ppd->threshold_cong_event_map));
+ ppd->threshold_event_counter = 0;
+
+ spin_unlock_irq(&ppd->cc_log_lock);
+
+ if (resp_len)
+ *resp_len += sizeof(struct opa_hfi1_cong_log);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct ib_cc_table_attr *cc_table_attr =
+ (struct ib_cc_table_attr *)data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 start_block = OPA_AM_START_BLK(am);
+ u32 n_blocks = OPA_AM_NBLK(am);
+ struct ib_cc_table_entry_shadow *entries;
+ int i, j;
+ u32 sentry, eentry;
+ struct cc_state *cc_state;
+ u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
+
+ /* sanity check n_blocks, start_block */
+ if (n_blocks == 0 || smp_length_check(size, max_len) ||
+ start_block + n_blocks > ppd->cc_max_table_entries) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ rcu_read_lock();
+
+ cc_state = get_cc_state(ppd);
+
+ if (!cc_state) {
+ rcu_read_unlock();
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ sentry = start_block * IB_CCT_ENTRIES;
+ eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
+
+ cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
+
+ entries = cc_state->cct.entries;
+
+ /* return n_blocks, though the last block may not be full */
+ for (j = 0, i = sentry; i < eentry; j++, i++)
+ cc_table_attr->ccti_entries[j].entry =
+ cpu_to_be16(entries[i].entry);
+
+ rcu_read_unlock();
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 start_block = OPA_AM_START_BLK(am);
+ u32 n_blocks = OPA_AM_NBLK(am);
+ struct ib_cc_table_entry_shadow *entries;
+ int i, j;
+ u32 sentry, eentry;
+ u16 ccti_limit;
+ u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
+
+ /* sanity check n_blocks, start_block */
+ if (n_blocks == 0 || smp_length_check(size, max_len) ||
+ start_block + n_blocks > ppd->cc_max_table_entries) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ sentry = start_block * IB_CCT_ENTRIES;
+ eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
+ (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
+
+ /* sanity check ccti_limit */
+ ccti_limit = be16_to_cpu(p->ccti_limit);
+ if (ccti_limit + 1 > eentry) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /*
+ * Save details from packet into the ppd. Hold the cc_state_lock so
+ * our information is consistent with anyone trying to apply the state.
+ */
+ spin_lock(&ppd->cc_state_lock);
+ ppd->total_cct_entry = ccti_limit + 1;
+ entries = ppd->ccti_entries;
+ for (j = 0, i = sentry; i < eentry; j++, i++)
+ entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
+ spin_unlock(&ppd->cc_state_lock);
+
+ /* now apply the information */
+ apply_cc_state(ppd);
+
+ return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+struct opa_led_info {
+ __be32 rsvd_led_mask;
+ __be32 rsvd;
+};
+
+#define OPA_LED_SHIFT 31
+#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
+
+static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd = dd->pport;
+ struct opa_led_info *p = (struct opa_led_info *)data;
+ u32 nport = OPA_AM_NPORT(am);
+ u32 is_beaconing_active;
+
+ if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /*
+ * This pairs with the memory barrier in hfi1_start_led_override to
+ * ensure that we read the correct state of LED beaconing represented
+ * by led_override_timer_active
+ */
+ smp_rmb();
+ is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
+ p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
+
+ if (resp_len)
+ *resp_len += sizeof(struct opa_led_info);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct opa_led_info *p = (struct opa_led_info *)data;
+ u32 nport = OPA_AM_NPORT(am);
+ int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
+
+ if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ if (on)
+ hfi1_start_led_override(dd->pport, 2000, 1500);
+ else
+ shutdown_led_override(dd->pport);
+
+ return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len,
+ max_len);
+}
+
+static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len)
+{
+ int ret;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ switch (attr_id) {
+ case IB_SMP_ATTR_NODE_DESC:
+ ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SL_TO_SC_MAP:
+ ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_SL_MAP:
+ ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
+ ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
+ ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_PORT_STATE_INFO:
+ ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
+ ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_CABLE_INFO:
+ ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_CONGESTION_INFO:
+ ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
+ ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
+ port, resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
+ ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
+ port, resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
+ ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_LED_INFO:
+ ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM)
+ return IB_MAD_RESULT_SUCCESS;
+ fallthrough;
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)smp);
+ break;
+ }
+ return ret;
+}
+
+static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev, u32 port,
+ u32 *resp_len, u32 max_len, int local_mad)
+{
+ int ret;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ switch (attr_id) {
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
+ resp_len, max_len, local_mad);
+ break;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SL_TO_SC_MAP:
+ ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_SL_MAP:
+ ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
+ ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
+ ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_PORT_STATE_INFO:
+ ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
+ resp_len, max_len, local_mad);
+ break;
+ case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
+ ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
+ ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
+ port, resp_len, max_len);
+ break;
+ case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
+ ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_LED_INFO:
+ ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
+ resp_len, max_len);
+ break;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM)
+ return IB_MAD_RESULT_SUCCESS;
+ fallthrough;
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)smp);
+ break;
+ }
+ return ret;
+}
+
+static inline void set_aggr_error(struct opa_aggregate *ag)
+{
+ ag->err_reqlength |= cpu_to_be16(0x8000);
+}
+
+static int subn_get_opa_aggregate(struct opa_smp *smp,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len)
+{
+ int i;
+ u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
+ u8 *next_smp = opa_get_smp_data(smp);
+
+ if (num_attr < 1 || num_attr > 117) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < num_attr; i++) {
+ struct opa_aggregate *agg;
+ size_t agg_data_len;
+ size_t agg_size;
+ u32 am;
+
+ agg = (struct opa_aggregate *)next_smp;
+ agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
+ agg_size = sizeof(*agg) + agg_data_len;
+ am = be32_to_cpu(agg->attr_mod);
+
+ *resp_len += agg_size;
+
+ if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* zero the payload for this segment */
+ memset(next_smp + sizeof(*agg), 0, agg_data_len);
+
+ (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
+ ibdev, port, NULL, (u32)agg_data_len);
+
+ if (smp->status & IB_SMP_INVALID_FIELD)
+ break;
+ if (smp->status & ~IB_SMP_DIRECTION) {
+ set_aggr_error(agg);
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ next_smp += agg_size;
+ }
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int subn_set_opa_aggregate(struct opa_smp *smp,
+ struct ib_device *ibdev, u32 port,
+ u32 *resp_len, int local_mad)
+{
+ int i;
+ u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
+ u8 *next_smp = opa_get_smp_data(smp);
+
+ if (num_attr < 1 || num_attr > 117) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < num_attr; i++) {
+ struct opa_aggregate *agg;
+ size_t agg_data_len;
+ size_t agg_size;
+ u32 am;
+
+ agg = (struct opa_aggregate *)next_smp;
+ agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
+ agg_size = sizeof(*agg) + agg_data_len;
+ am = be32_to_cpu(agg->attr_mod);
+
+ *resp_len += agg_size;
+
+ if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
+ ibdev, port, NULL, (u32)agg_data_len,
+ local_mad);
+
+ if (smp->status & IB_SMP_INVALID_FIELD)
+ break;
+ if (smp->status & ~IB_SMP_DIRECTION) {
+ set_aggr_error(agg);
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ next_smp += agg_size;
+ }
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+/*
+ * OPAv1 specifies that, on the transition to link up, these counters
+ * are cleared:
+ * PortRcvErrors [*]
+ * LinkErrorRecovery
+ * LocalLinkIntegrityErrors
+ * ExcessiveBufferOverruns [*]
+ *
+ * [*] Error info associated with these counters is retained, but the
+ * error info status is reset to 0.
+ */
+void clear_linkup_counters(struct hfi1_devdata *dd)
+{
+ /* PortRcvErrors */
+ write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
+ dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
+ /* LinkErrorRecovery */
+ write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
+ write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
+ /* LocalLinkIntegrityErrors */
+ write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
+ /* ExcessiveBufferOverruns */
+ write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
+ dd->rcv_ovfl_cnt = 0;
+ dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
+}
+
+static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
+{
+ unsigned int i;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
+ if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
+ * local node, 0 otherwise.
+ */
+static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
+ const struct ib_wc *in_wc)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ const struct opa_smp *smp = (const struct opa_smp *)mad;
+
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ return (smp->hop_cnt == 0 &&
+ smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
+ smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
+ }
+
+ return (in_wc->slid == ppd->lid);
+}
+
+/*
+ * opa_local_smp_check() should only be called on MADs for which
+ * is_local_mad() returns true. It applies the SMP checks that are
+ * specific to SMPs which are sent from, and destined to this node.
+ * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
+ * otherwise.
+ *
+ * SMPs which arrive from other nodes are instead checked by
+ * opa_smp_check().
+ */
+static int opa_local_smp_check(struct hfi1_ibport *ibp,
+ const struct ib_wc *in_wc)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u16 pkey;
+
+ if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
+ return 1;
+
+ pkey = ppd->pkeys[in_wc->pkey_index];
+ /*
+ * We need to do the "node-local" checks specified in OPAv1,
+ * rev 0.90, section 9.10.26, which are:
+ * - pkey is 0x7fff, or 0xffff
+ * - Source QPN == 0 || Destination QPN == 0
+ * - the MAD header's management class is either
+ * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
+ * IB_MGMT_CLASS_SUBN_LID_ROUTED
+ * - SLID != 0
+ *
+ * However, we know (and so don't need to check again) that,
+ * for local SMPs, the MAD stack passes MADs with:
+ * - Source QPN of 0
+ * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
+ * our own port's lid
+ *
+ */
+ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
+ return 0;
+ ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
+ return 1;
+}
+
+/**
+ * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
+ * @ibp: IB port data
+ * @in_mad: MAD packet with header and data
+ * @in_wc: Work completion data such as source LID, port number, etc.
+ *
+ * These are all the possible logic rules for validating a pkey:
+ *
+ * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
+ * and NOT self-originated packet:
+ * Drop MAD packet as it should always be part of the
+ * management partition unless it's a self-originated packet.
+ *
+ * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
+ * The packet is coming from a management node and the receiving node
+ * is also a management node, so it is safe for the packet to go through.
+ *
+ * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
+ * Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
+ * It could be an FM misconfiguration.
+ *
+ * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
+ * It is safe for the packet to go through since a non-management node is
+ * talking to another non-management node.
+ *
+ * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
+ * Drop the packet because a non-management node is talking to a
+ * management node, and it could be an attack.
+ *
+ * For the implementation, these rules can be simplied to only checking
+ * for (a) and (e). There's no need to check for rule (b) as
+ * the packet doesn't need to be dropped. Rule (c) is not possible in
+ * the driver as LIM_MGMT_P_KEY is always in the pkey table.
+ *
+ * Return:
+ * 0 - pkey is okay, -EINVAL it's a bad pkey
+ */
+static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
+ const struct opa_mad *in_mad,
+ const struct ib_wc *in_wc)
+{
+ u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
+
+ /* Rule (a) from above */
+ if (!is_local_mad(ibp, in_mad, in_wc) &&
+ pkey_value != LIM_MGMT_P_KEY &&
+ pkey_value != FULL_MGMT_P_KEY)
+ return -EINVAL;
+
+ /* Rule (e) from above */
+ if (pkey_value == LIM_MGMT_P_KEY &&
+ is_full_mgmt_pkey_in_table(ibp))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
+ u32 port, const struct opa_mad *in_mad,
+ struct opa_mad *out_mad,
+ u32 *resp_len, int local_mad)
+{
+ struct opa_smp *smp = (struct opa_smp *)out_mad;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *data;
+ u32 am, data_size;
+ __be16 attr_id;
+ int ret;
+
+ *out_mad = *in_mad;
+ data = opa_get_smp_data(smp);
+ data_size = (u32)opa_get_smp_data_size(smp);
+
+ am = be32_to_cpu(smp->attr_mod);
+ attr_id = smp->attr_id;
+ if (smp->class_version != OPA_SM_CLASS_VERSION) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_mad_hdr *)smp);
+ return ret;
+ }
+ ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
+ smp->route.dr.dr_slid, smp->route.dr.return_path,
+ smp->hop_cnt);
+ if (ret) {
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ /*
+ * If this is a get/set portinfo, we already check the
+ * M_Key if the MAD is for another port and the M_Key
+ * is OK on the receiving port. This check is needed
+ * to increment the error counters when the M_Key
+ * fails to match on *both* ports.
+ */
+ if (attr_id == IB_SMP_ATTR_PORT_INFO &&
+ (smp->method == IB_MGMT_METHOD_GET ||
+ smp->method == IB_MGMT_METHOD_SET) &&
+ port_num && port_num <= ibdev->phys_port_cnt &&
+ port != port_num)
+ (void)check_mkey(to_iport(ibdev, port_num),
+ (struct ib_mad_hdr *)smp, 0,
+ smp->mkey, smp->route.dr.dr_slid,
+ smp->route.dr.return_path,
+ smp->hop_cnt);
+ ret = IB_MAD_RESULT_FAILURE;
+ return ret;
+ }
+
+ *resp_len = opa_get_smp_header_size(smp);
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (attr_id) {
+ default:
+ clear_opa_smp_data(smp);
+ ret = subn_get_opa_sma(attr_id, smp, am, data,
+ ibdev, port, resp_len,
+ data_size);
+ break;
+ case OPA_ATTRIB_ID_AGGREGATE:
+ ret = subn_get_opa_aggregate(smp, ibdev, port,
+ resp_len);
+ break;
+ }
+ break;
+ case IB_MGMT_METHOD_SET:
+ switch (attr_id) {
+ default:
+ ret = subn_set_opa_sma(attr_id, smp, am, data,
+ ibdev, port, resp_len,
+ data_size, local_mad);
+ break;
+ case OPA_ATTRIB_ID_AGGREGATE:
+ ret = subn_set_opa_aggregate(smp, ibdev, port,
+ resp_len, local_mad);
+ break;
+ }
+ break;
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_REPORT:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ break;
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ subn_handle_opa_trap_repress(ibp, smp);
+ /* Always successful */
+ ret = IB_MAD_RESULT_SUCCESS;
+ break;
+ default:
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_mad_hdr *)smp);
+ break;
+ }
+
+ return ret;
+}
+
+static int process_subn(struct ib_device *ibdev, int mad_flags,
+ u32 port, const struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_smp *smp = (struct ib_smp *)out_mad;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ int ret;
+
+ *out_mad = *in_mad;
+ if (smp->class_version != 1) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_mad_hdr *)smp);
+ return ret;
+ }
+
+ ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
+ smp->mkey, (__force __be32)smp->dr_slid,
+ smp->return_path, smp->hop_cnt);
+ if (ret) {
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ /*
+ * If this is a get/set portinfo, we already check the
+ * M_Key if the MAD is for another port and the M_Key
+ * is OK on the receiving port. This check is needed
+ * to increment the error counters when the M_Key
+ * fails to match on *both* ports.
+ */
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ (smp->method == IB_MGMT_METHOD_GET ||
+ smp->method == IB_MGMT_METHOD_SET) &&
+ port_num && port_num <= ibdev->phys_port_cnt &&
+ port != port_num)
+ (void)check_mkey(to_iport(ibdev, port_num),
+ (struct ib_mad_hdr *)smp, 0,
+ smp->mkey,
+ (__force __be32)smp->dr_slid,
+ smp->return_path, smp->hop_cnt);
+ ret = IB_MAD_RESULT_FAILURE;
+ return ret;
+ }
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = subn_get_nodeinfo(smp, ibdev, port);
+ break;
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)smp);
+ break;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static int process_perf(struct ib_device *ibdev, u32 port,
+ const struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
+ struct ib_class_port_info *cpi = (struct ib_class_port_info *)
+ &pmp->data;
+ int ret = IB_MAD_RESULT_FAILURE;
+
+ *out_mad = *in_mad;
+ if (pmp->mad_hdr.class_version != 1) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ return ret;
+ }
+
+ switch (pmp->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->mad_hdr.attr_id) {
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_get_ib_portcounters(pmp, ibdev, port);
+ break;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
+ break;
+ case IB_PMA_CLASS_PORT_INFO:
+ cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ }
+ break;
+
+ case IB_MGMT_METHOD_SET:
+ if (pmp->mad_hdr.attr_id) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ }
+ break;
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ break;
+
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ }
+
+ return ret;
+}
+
+static int process_perf_opa(struct ib_device *ibdev, u32 port,
+ const struct opa_mad *in_mad,
+ struct opa_mad *out_mad, u32 *resp_len)
+{
+ struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
+ int ret;
+
+ *out_mad = *in_mad;
+
+ if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ *resp_len = sizeof(pmp->mad_hdr);
+
+ switch (pmp->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->mad_hdr.attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
+ break;
+ case OPA_PM_ATTRIB_ID_PORT_STATUS:
+ ret = pma_get_opa_portstatus(pmp, ibdev, port,
+ resp_len);
+ break;
+ case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
+ ret = pma_get_opa_datacounters(pmp, ibdev, port,
+ resp_len);
+ break;
+ case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
+ ret = pma_get_opa_porterrors(pmp, ibdev, port,
+ resp_len);
+ break;
+ case OPA_PM_ATTRIB_ID_ERROR_INFO:
+ ret = pma_get_opa_errorinfo(pmp, ibdev, port,
+ resp_len);
+ break;
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ }
+ break;
+
+ case IB_MGMT_METHOD_SET:
+ switch (pmp->mad_hdr.attr_id) {
+ case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
+ ret = pma_set_opa_portstatus(pmp, ibdev, port,
+ resp_len);
+ break;
+ case OPA_PM_ATTRIB_ID_ERROR_INFO:
+ ret = pma_set_opa_errorinfo(pmp, ibdev, port,
+ resp_len);
+ break;
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ }
+ break;
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ break;
+
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ }
+
+ return ret;
+}
+
+static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
+ u32 port, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct opa_mad *in_mad,
+ struct opa_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ int ret;
+ int pkey_idx;
+ int local_mad = 0;
+ u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
+ if (pkey_idx < 0) {
+ pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
+ hfi1_get_pkey(ibp, 1));
+ pkey_idx = 1;
+ }
+ *out_mad_pkey_index = (u16)pkey_idx;
+
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ local_mad = is_local_mad(ibp, in_mad, in_wc);
+ if (local_mad) {
+ ret = opa_local_smp_check(ibp, in_wc);
+ if (ret)
+ return IB_MAD_RESULT_FAILURE;
+ }
+ ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
+ out_mad, &resp_len, local_mad);
+ goto bail;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
+ if (ret)
+ return IB_MAD_RESULT_FAILURE;
+
+ ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
+ goto bail;
+
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ }
+
+bail:
+ if (ret & IB_MAD_RESULT_REPLY)
+ *out_mad_size = round_up(resp_len, 8);
+ else if (ret & IB_MAD_RESULT_SUCCESS)
+ *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
+
+ return ret;
+}
+
+static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u32 port,
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ int ret;
+
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = process_perf(ibdev, port, in_mad, out_mad);
+ break;
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * hfi1_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port: the port number this packet came in on
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: used to apss back the packet key index
+ *
+ * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
+ * interested in processing.
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ */
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ switch (in_mad->mad_hdr.base_version) {
+ case OPA_MGMT_BASE_VERSION:
+ return hfi1_process_opa_mad(ibdev, mad_flags, port,
+ in_wc, in_grh,
+ (struct opa_mad *)in_mad,
+ (struct opa_mad *)out_mad,
+ out_mad_size,
+ out_mad_pkey_index);
+ case IB_MGMT_BASE_VERSION:
+ return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
+ in_grh, in_mad, out_mad);
+ default:
+ break;
+ }
+
+ return IB_MAD_RESULT_FAILURE;
+}
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
new file mode 100644
index 000000000000..d6dde762921a
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2017 Intel Corporation.
+ */
+
+#ifndef _HFI1_MAD_H
+#define _HFI1_MAD_H
+
+#include <rdma/ib_pma.h>
+#include <rdma/opa_smi.h>
+#include <rdma/opa_port_info.h>
+#include "opa_compat.h"
+
+/*
+ * OPA Traps
+ */
+#define OPA_TRAP_GID_NOW_IN_SERVICE cpu_to_be16(64)
+#define OPA_TRAP_GID_OUT_OF_SERVICE cpu_to_be16(65)
+#define OPA_TRAP_ADD_MULTICAST_GROUP cpu_to_be16(66)
+#define OPA_TRAL_DEL_MULTICAST_GROUP cpu_to_be16(67)
+#define OPA_TRAP_UNPATH cpu_to_be16(68)
+#define OPA_TRAP_REPATH cpu_to_be16(69)
+#define OPA_TRAP_PORT_CHANGE_STATE cpu_to_be16(128)
+#define OPA_TRAP_LINK_INTEGRITY cpu_to_be16(129)
+#define OPA_TRAP_EXCESSIVE_BUFFER_OVERRUN cpu_to_be16(130)
+#define OPA_TRAP_FLOW_WATCHDOG cpu_to_be16(131)
+#define OPA_TRAP_CHANGE_CAPABILITY cpu_to_be16(144)
+#define OPA_TRAP_CHANGE_SYSGUID cpu_to_be16(145)
+#define OPA_TRAP_BAD_M_KEY cpu_to_be16(256)
+#define OPA_TRAP_BAD_P_KEY cpu_to_be16(257)
+#define OPA_TRAP_BAD_Q_KEY cpu_to_be16(258)
+#define OPA_TRAP_SWITCH_BAD_PKEY cpu_to_be16(259)
+#define OPA_SMA_TRAP_DATA_LINK_WIDTH cpu_to_be16(2048)
+
+/*
+ * Generic trap/notice other local changes flags (trap 144).
+ */
+#define OPA_NOTICE_TRAP_LWDE_CHG 0x08 /* Link Width Downgrade Enable
+ * changed
+ */
+#define OPA_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
+#define OPA_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
+#define OPA_NOTICE_TRAP_NODE_DESC_CHG 0x01
+
+struct opa_mad_notice_attr {
+ u8 generic_type;
+ u8 prod_type_msb;
+ __be16 prod_type_lsb;
+ __be16 trap_num;
+ __be16 toggle_count;
+ __be32 issuer_lid;
+ __be32 reserved1;
+ union ib_gid issuer_gid;
+
+ union {
+ struct {
+ u8 details[64];
+ } raw_data;
+
+ struct {
+ union ib_gid gid;
+ } __packed ntc_64_65_66_67;
+
+ struct {
+ __be32 lid;
+ } __packed ntc_128;
+
+ struct {
+ __be32 lid; /* where violation happened */
+ u8 port_num; /* where violation happened */
+ } __packed ntc_129_130_131;
+
+ struct {
+ __be32 lid; /* LID where change occurred */
+ __be32 new_cap_mask; /* new capability mask */
+ __be16 reserved2;
+ __be16 cap_mask3;
+ __be16 change_flags; /* low 4 bits only */
+ } __packed ntc_144;
+
+ struct {
+ __be64 new_sys_guid;
+ __be32 lid; /* lid where sys guid changed */
+ } __packed ntc_145;
+
+ struct {
+ __be32 lid;
+ __be32 dr_slid;
+ u8 method;
+ u8 dr_trunc_hop;
+ __be16 attr_id;
+ __be32 attr_mod;
+ __be64 mkey;
+ u8 dr_rtn_path[30];
+ } __packed ntc_256;
+
+ struct {
+ __be32 lid1;
+ __be32 lid2;
+ __be32 key;
+ u8 sl; /* SL: high 5 bits */
+ u8 reserved3[3];
+ union ib_gid gid1;
+ union ib_gid gid2;
+ __be32 qp1; /* high 8 bits reserved */
+ __be32 qp2; /* high 8 bits reserved */
+ } __packed ntc_257_258;
+
+ struct {
+ __be16 flags; /* low 8 bits reserved */
+ __be16 pkey;
+ __be32 lid1;
+ __be32 lid2;
+ u8 sl; /* SL: high 5 bits */
+ u8 reserved4[3];
+ union ib_gid gid1;
+ union ib_gid gid2;
+ __be32 qp1; /* high 8 bits reserved */
+ __be32 qp2; /* high 8 bits reserved */
+ } __packed ntc_259;
+
+ struct {
+ __be32 lid;
+ } __packed ntc_2048;
+
+ };
+};
+
+#define IB_VLARB_LOWPRI_0_31 1
+#define IB_VLARB_LOWPRI_32_63 2
+#define IB_VLARB_HIGHPRI_0_31 3
+#define IB_VLARB_HIGHPRI_32_63 4
+
+#define OPA_MAX_PREEMPT_CAP 32
+#define OPA_VLARB_LOW_ELEMENTS 0
+#define OPA_VLARB_HIGH_ELEMENTS 1
+#define OPA_VLARB_PREEMPT_ELEMENTS 2
+#define OPA_VLARB_PREEMPT_MATRIX 3
+
+#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
+#define LINK_SPEED_25G 1
+#define LINK_SPEED_12_5G 2
+#define LINK_WIDTH_DEFAULT 4
+#define DECIMAL_FACTORING 1000
+/*
+ * The default link width is multiplied by 1000
+ * to get accurate value after division.
+ */
+#define FACTOR_LINK_WIDTH (LINK_WIDTH_DEFAULT * DECIMAL_FACTORING)
+
+struct ib_pma_portcounters_cong {
+ u8 reserved;
+ u8 reserved1;
+ __be16 port_check_rate;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved2;
+ u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
+ __be16 reserved3;
+ __be16 vl15_dropped;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_xmit_wait;
+ __be64 port_adr_events;
+} __packed;
+
+#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
+
+#define OPA_MAX_PREEMPT_CAP 32
+#define OPA_VLARB_LOW_ELEMENTS 0
+#define OPA_VLARB_HIGH_ELEMENTS 1
+#define OPA_VLARB_PREEMPT_ELEMENTS 2
+#define OPA_VLARB_PREEMPT_MATRIX 3
+
+#define HFI1_XMIT_RATE_UNSUPPORTED 0x0
+#define HFI1_XMIT_RATE_PICO 0x7
+/* number of 4nsec cycles equaling 2secs */
+#define HFI1_CONG_TIMER_PSINTERVAL 0x1DCD64EC
+
+#define IB_CC_SVCTYPE_RC 0x0
+#define IB_CC_SVCTYPE_UC 0x1
+#define IB_CC_SVCTYPE_RD 0x2
+#define IB_CC_SVCTYPE_UD 0x3
+
+/*
+ * There should be an equivalent IB #define for the following, but
+ * I cannot find it.
+ */
+#define OPA_CC_LOG_TYPE_HFI 2
+
+struct opa_hfi1_cong_log_event_internal {
+ u32 lqpn;
+ u32 rqpn;
+ u8 sl;
+ u8 svc_type;
+ u32 rlid;
+ u64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
+};
+
+struct opa_hfi1_cong_log_event {
+ u8 local_qp_cn_entry[3];
+ u8 remote_qp_number_cn_entry[3];
+ u8 sl_svc_type_cn_entry; /* 5 bits SL, 3 bits svc type */
+ u8 reserved;
+ __be32 remote_lid_cn_entry;
+ __be32 timestamp_cn_entry;
+} __packed;
+
+#define OPA_CONG_LOG_ELEMS 96
+
+struct opa_hfi1_cong_log {
+ u8 log_type;
+ u8 congestion_flags;
+ __be16 threshold_event_counter;
+ __be32 current_time_stamp;
+ u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
+ struct opa_hfi1_cong_log_event events[OPA_CONG_LOG_ELEMS];
+} __packed;
+
+#define IB_CC_TABLE_CAP_DEFAULT 31
+
+/* Port control flags */
+#define IB_CC_CCS_PC_SL_BASED 0x01
+
+struct opa_congestion_setting_entry {
+ u8 ccti_increase;
+ u8 reserved;
+ __be16 ccti_timer;
+ u8 trigger_threshold;
+ u8 ccti_min; /* min CCTI for cc table */
+} __packed;
+
+struct opa_congestion_setting_entry_shadow {
+ u8 ccti_increase;
+ u8 reserved;
+ u16 ccti_timer;
+ u8 trigger_threshold;
+ u8 ccti_min; /* min CCTI for cc table */
+} __packed;
+
+struct opa_congestion_setting_attr {
+ __be32 control_map;
+ __be16 port_control;
+ struct opa_congestion_setting_entry entries[OPA_MAX_SLS];
+} __packed;
+
+struct opa_congestion_setting_attr_shadow {
+ u32 control_map;
+ u16 port_control;
+ struct opa_congestion_setting_entry_shadow entries[OPA_MAX_SLS];
+} __packed;
+
+#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
+#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
+
+/* 64 Congestion Control table entries in a single MAD */
+#define IB_CCT_ENTRIES 64
+#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
+
+struct ib_cc_table_entry {
+ __be16 entry; /* shift:2, multiplier:14 */
+};
+
+struct ib_cc_table_entry_shadow {
+ u16 entry; /* shift:2, multiplier:14 */
+};
+
+struct ib_cc_table_attr {
+ __be16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
+} __packed;
+
+struct ib_cc_table_attr_shadow {
+ u16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
+} __packed;
+
+#define CC_TABLE_SHADOW_MAX \
+ (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
+
+struct cc_table_shadow {
+ u16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
+} __packed;
+
+/*
+ * struct cc_state combines the (active) per-port congestion control
+ * table, and the (active) per-SL congestion settings. cc_state data
+ * may need to be read in code paths that we want to be fast, so it
+ * is an RCU protected structure.
+ */
+struct cc_state {
+ struct rcu_head rcu;
+ struct cc_table_shadow cct;
+ struct opa_congestion_setting_attr_shadow cong_setting;
+};
+
+/*
+ * OPA BufferControl MAD
+ */
+
+/* attribute modifier macros */
+#define OPA_AM_NPORT_SHIFT 24
+#define OPA_AM_NPORT_MASK 0xff
+#define OPA_AM_NPORT_SMASK (OPA_AM_NPORT_MASK << OPA_AM_NPORT_SHIFT)
+#define OPA_AM_NPORT(am) (((am) >> OPA_AM_NPORT_SHIFT) & \
+ OPA_AM_NPORT_MASK)
+
+#define OPA_AM_NBLK_SHIFT 24
+#define OPA_AM_NBLK_MASK 0xff
+#define OPA_AM_NBLK_SMASK (OPA_AM_NBLK_MASK << OPA_AM_NBLK_SHIFT)
+#define OPA_AM_NBLK(am) (((am) >> OPA_AM_NBLK_SHIFT) & \
+ OPA_AM_NBLK_MASK)
+
+#define OPA_AM_START_BLK_SHIFT 0
+#define OPA_AM_START_BLK_MASK 0xff
+#define OPA_AM_START_BLK_SMASK (OPA_AM_START_BLK_MASK << \
+ OPA_AM_START_BLK_SHIFT)
+#define OPA_AM_START_BLK(am) (((am) >> OPA_AM_START_BLK_SHIFT) & \
+ OPA_AM_START_BLK_MASK)
+
+#define OPA_AM_PORTNUM_SHIFT 0
+#define OPA_AM_PORTNUM_MASK 0xff
+#define OPA_AM_PORTNUM_SMASK (OPA_AM_PORTNUM_MASK << OPA_AM_PORTNUM_SHIFT)
+#define OPA_AM_PORTNUM(am) (((am) >> OPA_AM_PORTNUM_SHIFT) & \
+ OPA_AM_PORTNUM_MASK)
+
+#define OPA_AM_ASYNC_SHIFT 12
+#define OPA_AM_ASYNC_MASK 0x1
+#define OPA_AM_ASYNC_SMASK (OPA_AM_ASYNC_MASK << OPA_AM_ASYNC_SHIFT)
+#define OPA_AM_ASYNC(am) (((am) >> OPA_AM_ASYNC_SHIFT) & \
+ OPA_AM_ASYNC_MASK)
+
+#define OPA_AM_START_SM_CFG_SHIFT 9
+#define OPA_AM_START_SM_CFG_MASK 0x1
+#define OPA_AM_START_SM_CFG_SMASK (OPA_AM_START_SM_CFG_MASK << \
+ OPA_AM_START_SM_CFG_SHIFT)
+#define OPA_AM_START_SM_CFG(am) (((am) >> OPA_AM_START_SM_CFG_SHIFT) \
+ & OPA_AM_START_SM_CFG_MASK)
+
+#define OPA_AM_CI_ADDR_SHIFT 19
+#define OPA_AM_CI_ADDR_MASK 0xfff
+#define OPA_AM_CI_ADDR_SMASK (OPA_AM_CI_ADDR_MASK << OPA_CI_ADDR_SHIFT)
+#define OPA_AM_CI_ADDR(am) (((am) >> OPA_AM_CI_ADDR_SHIFT) & \
+ OPA_AM_CI_ADDR_MASK)
+
+#define OPA_AM_CI_LEN_SHIFT 13
+#define OPA_AM_CI_LEN_MASK 0x3f
+#define OPA_AM_CI_LEN_SMASK (OPA_AM_CI_LEN_MASK << OPA_CI_LEN_SHIFT)
+#define OPA_AM_CI_LEN(am) (((am) >> OPA_AM_CI_LEN_SHIFT) & \
+ OPA_AM_CI_LEN_MASK)
+
+/* error info macros */
+#define OPA_EI_STATUS_SMASK 0x80
+#define OPA_EI_CODE_SMASK 0x0f
+
+struct vl_limit {
+ __be16 dedicated;
+ __be16 shared;
+};
+
+struct buffer_control {
+ __be16 reserved;
+ __be16 overall_shared_limit;
+ struct vl_limit vl[OPA_MAX_VLS];
+};
+
+struct sc2vlnt {
+ u8 vlnt[32]; /* 5 bit VL, 3 bits reserved */
+};
+
+/*
+ * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
+ * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
+ * We support 5 counters which only count the mandatory quantities.
+ */
+#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
+#define COUNTER_MASK0_9 \
+ cpu_to_be32(COUNTER_MASK(1, 0) | \
+ COUNTER_MASK(1, 1) | \
+ COUNTER_MASK(1, 2) | \
+ COUNTER_MASK(1, 3) | \
+ COUNTER_MASK(1, 4))
+
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port);
+void hfi1_handle_trap_timer(struct timer_list *t);
+u16 tx_link_width(u16 link_width);
+u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, u16 link_width,
+ u16 link_speed, int vl);
+/**
+ * get_link_speed - determine whether 12.5G or 25G speed
+ * @link_speed: the speed of active link
+ * @return: Return 2 if link speed identified as 12.5G
+ * or return 1 if link speed is 25G.
+ *
+ * The function indirectly calculate required link speed
+ * value for convert_xmit_counter function. If the link
+ * speed is 25G, the function return as 1 as it is required
+ * by xmit counter conversion formula :-( 25G / link_speed).
+ * This conversion will provide value 1 if current
+ * link speed is 25G or 2 if 12.5G.This is done to avoid
+ * 12.5 float number conversion.
+ */
+static inline u16 get_link_speed(u16 link_speed)
+{
+ return (link_speed == 1) ?
+ LINK_SPEED_12_5G : LINK_SPEED_25G;
+}
+
+/**
+ * convert_xmit_counter - calculate flit times for given xmit counter
+ * value
+ * @xmit_wait_val: current xmit counter value
+ * @link_width: width of active link
+ * @link_speed: speed of active link
+ * @return: return xmit counter value in flit times.
+ */
+static inline u64 convert_xmit_counter(u64 xmit_wait_val, u16 link_width,
+ u16 link_speed)
+{
+ return (xmit_wait_val * 2 * (FACTOR_LINK_WIDTH / link_width)
+ * link_speed) / DECIMAL_FACTORING;
+}
+#endif /* _HFI1_MAD_H */
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
new file mode 100644
index 000000000000..67a5c410fb5e
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
+ * Copyright(c) 2016 - 2017 Intel Corporation.
+ */
+
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/mmu_notifier.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/sched/mm.h>
+
+#include "mmu_rb.h"
+#include "trace.h"
+
+static unsigned long mmu_node_start(struct mmu_rb_node *);
+static unsigned long mmu_node_last(struct mmu_rb_node *);
+static int mmu_notifier_range_start(struct mmu_notifier *,
+ const struct mmu_notifier_range *);
+static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
+ unsigned long, unsigned long);
+static void release_immediate(struct kref *refcount);
+static void handle_remove(struct work_struct *work);
+
+static const struct mmu_notifier_ops mn_opts = {
+ .invalidate_range_start = mmu_notifier_range_start,
+};
+
+INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
+ mmu_node_start, mmu_node_last, static, __mmu_int_rb);
+
+static unsigned long mmu_node_start(struct mmu_rb_node *node)
+{
+ return node->addr & PAGE_MASK;
+}
+
+static unsigned long mmu_node_last(struct mmu_rb_node *node)
+{
+ return PAGE_ALIGN(node->addr + node->len) - 1;
+}
+
+int hfi1_mmu_rb_register(void *ops_arg,
+ const struct mmu_rb_ops *ops,
+ struct workqueue_struct *wq,
+ struct mmu_rb_handler **handler)
+{
+ struct mmu_rb_handler *h;
+ void *free_ptr;
+ int ret;
+
+ free_ptr = kzalloc(sizeof(*h) + cache_line_size() - 1, GFP_KERNEL);
+ if (!free_ptr)
+ return -ENOMEM;
+
+ h = PTR_ALIGN(free_ptr, cache_line_size());
+ h->root = RB_ROOT_CACHED;
+ h->ops = ops;
+ h->ops_arg = ops_arg;
+ INIT_HLIST_NODE(&h->mn.hlist);
+ spin_lock_init(&h->lock);
+ h->mn.ops = &mn_opts;
+ INIT_WORK(&h->del_work, handle_remove);
+ INIT_LIST_HEAD(&h->del_list);
+ INIT_LIST_HEAD(&h->lru_list);
+ h->wq = wq;
+ h->free_ptr = free_ptr;
+
+ ret = mmu_notifier_register(&h->mn, current->mm);
+ if (ret) {
+ kfree(free_ptr);
+ return ret;
+ }
+
+ *handler = h;
+ return 0;
+}
+
+void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
+{
+ struct mmu_rb_node *rbnode;
+ struct rb_node *node;
+ unsigned long flags;
+ struct list_head del_list;
+
+ /* Prevent freeing of mm until we are completely finished. */
+ mmgrab(handler->mn.mm);
+
+ /* Unregister first so we don't get any more notifications. */
+ mmu_notifier_unregister(&handler->mn, handler->mn.mm);
+
+ /*
+ * Make sure the wq delete handler is finished running. It will not
+ * be triggered once the mmu notifiers are unregistered above.
+ */
+ flush_work(&handler->del_work);
+
+ INIT_LIST_HEAD(&del_list);
+
+ spin_lock_irqsave(&handler->lock, flags);
+ while ((node = rb_first_cached(&handler->root))) {
+ rbnode = rb_entry(node, struct mmu_rb_node, node);
+ rb_erase_cached(node, &handler->root);
+ /* move from LRU list to delete list */
+ list_move(&rbnode->list, &del_list);
+ }
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ while (!list_empty(&del_list)) {
+ rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
+ list_del(&rbnode->list);
+ kref_put(&rbnode->refcount, release_immediate);
+ }
+
+ /* Now the mm may be freed. */
+ mmdrop(handler->mn.mm);
+
+ kfree(handler->free_ptr);
+}
+
+int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode)
+{
+ struct mmu_rb_node *node;
+ unsigned long flags;
+ int ret = 0;
+
+ trace_hfi1_mmu_rb_insert(mnode);
+
+ if (current->mm != handler->mn.mm)
+ return -EPERM;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ node = __mmu_rb_search(handler, mnode->addr, mnode->len);
+ if (node) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+ __mmu_int_rb_insert(mnode, &handler->root);
+ list_add_tail(&mnode->list, &handler->lru_list);
+ mnode->handler = handler;
+unlock:
+ spin_unlock_irqrestore(&handler->lock, flags);
+ return ret;
+}
+
+/* Caller must hold handler lock */
+struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len)
+{
+ struct mmu_rb_node *node;
+
+ trace_hfi1_mmu_rb_search(addr, len);
+ node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
+ if (node)
+ list_move_tail(&node->list, &handler->lru_list);
+ return node;
+}
+
+/* Caller must hold handler lock */
+static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ unsigned long addr,
+ unsigned long len)
+{
+ struct mmu_rb_node *node = NULL;
+
+ trace_hfi1_mmu_rb_search(addr, len);
+ if (!handler->ops->filter) {
+ node = __mmu_int_rb_iter_first(&handler->root, addr,
+ (addr + len) - 1);
+ } else {
+ for (node = __mmu_int_rb_iter_first(&handler->root, addr,
+ (addr + len) - 1);
+ node;
+ node = __mmu_int_rb_iter_next(node, addr,
+ (addr + len) - 1)) {
+ if (handler->ops->filter(node, addr, len))
+ return node;
+ }
+ }
+ return node;
+}
+
+/*
+ * Must NOT call while holding mnode->handler->lock.
+ * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
+ * spinlock.
+ */
+static void release_immediate(struct kref *refcount)
+{
+ struct mmu_rb_node *mnode =
+ container_of(refcount, struct mmu_rb_node, refcount);
+ trace_hfi1_mmu_release_node(mnode);
+ mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
+}
+
+/* Caller must hold mnode->handler->lock */
+static void release_nolock(struct kref *refcount)
+{
+ struct mmu_rb_node *mnode =
+ container_of(refcount, struct mmu_rb_node, refcount);
+ list_move(&mnode->list, &mnode->handler->del_list);
+ queue_work(mnode->handler->wq, &mnode->handler->del_work);
+}
+
+/*
+ * struct mmu_rb_node->refcount kref_put() callback.
+ * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
+ * handler->del_work on handler->wq.
+ * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
+ * Acquires mmu_rb_node->handler->lock; do not call while already holding
+ * handler->lock.
+ */
+void hfi1_mmu_rb_release(struct kref *refcount)
+{
+ struct mmu_rb_node *mnode =
+ container_of(refcount, struct mmu_rb_node, refcount);
+ struct mmu_rb_handler *handler = mnode->handler;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ list_move(&mnode->list, &mnode->handler->del_list);
+ spin_unlock_irqrestore(&handler->lock, flags);
+ queue_work(handler->wq, &handler->del_work);
+}
+
+void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+{
+ struct mmu_rb_node *rbnode, *ptr;
+ struct list_head del_list;
+ unsigned long flags;
+ bool stop = false;
+
+ if (current->mm != handler->mn.mm)
+ return;
+
+ INIT_LIST_HEAD(&del_list);
+
+ spin_lock_irqsave(&handler->lock, flags);
+ list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
+ /* refcount == 1 implies mmu_rb_handler has only rbnode ref */
+ if (kref_read(&rbnode->refcount) > 1)
+ continue;
+
+ if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
+ &stop)) {
+ __mmu_int_rb_remove(rbnode, &handler->root);
+ /* move from LRU list to delete list */
+ list_move(&rbnode->list, &del_list);
+ }
+ if (stop)
+ break;
+ }
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
+ trace_hfi1_mmu_rb_evict(rbnode);
+ kref_put(&rbnode->refcount, release_immediate);
+ }
+}
+
+static int mmu_notifier_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *range)
+{
+ struct mmu_rb_handler *handler =
+ container_of(mn, struct mmu_rb_handler, mn);
+ struct rb_root_cached *root = &handler->root;
+ struct mmu_rb_node *node, *ptr = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
+ node; node = ptr) {
+ /* Guard against node removal. */
+ ptr = __mmu_int_rb_iter_next(node, range->start,
+ range->end - 1);
+ trace_hfi1_mmu_mem_invalidate(node);
+ /* Remove from rb tree and lru_list. */
+ __mmu_int_rb_remove(node, root);
+ list_del_init(&node->list);
+ kref_put(&node->refcount, release_nolock);
+ }
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Work queue function to remove all nodes that have been queued up to
+ * be removed. The key feature is that mm->mmap_lock is not being held
+ * and the remove callback can sleep while taking it, if needed.
+ */
+static void handle_remove(struct work_struct *work)
+{
+ struct mmu_rb_handler *handler = container_of(work,
+ struct mmu_rb_handler,
+ del_work);
+ struct list_head del_list;
+ unsigned long flags;
+ struct mmu_rb_node *node;
+
+ /* remove anything that is queued to get removed */
+ spin_lock_irqsave(&handler->lock, flags);
+ list_replace_init(&handler->del_list, &del_list);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ while (!list_empty(&del_list)) {
+ node = list_first_entry(&del_list, struct mmu_rb_node, list);
+ list_del(&node->list);
+ trace_hfi1_mmu_release_node(node);
+ handler->ops->remove(handler->ops_arg, node);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
new file mode 100644
index 000000000000..3fa50dd64db6
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef _HFI1_MMU_RB_H
+#define _HFI1_MMU_RB_H
+
+#include "hfi.h"
+
+struct mmu_rb_node {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long __last;
+ struct rb_node node;
+ struct mmu_rb_handler *handler;
+ struct list_head list;
+ struct kref refcount;
+};
+
+/* filter and evict must not sleep. Only remove is allowed to sleep. */
+struct mmu_rb_ops {
+ bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+ void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
+ int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop);
+};
+
+struct mmu_rb_handler {
+ /*
+ * struct mmu_notifier is 56 bytes, and spinlock_t is 4 bytes, so
+ * they fit together in one cache line. mn is relatively rarely
+ * accessed, so co-locating the spinlock with it achieves much of
+ * the cacheline contention reduction of giving the spinlock its own
+ * cacheline without the overhead of doing so.
+ */
+ struct mmu_notifier mn;
+ spinlock_t lock; /* protect the RB tree */
+
+ /* Begin on a new cachline boundary here */
+ struct rb_root_cached root ____cacheline_aligned_in_smp;
+ void *ops_arg;
+ const struct mmu_rb_ops *ops;
+ struct list_head lru_list;
+ struct work_struct del_work;
+ struct list_head del_list;
+ struct workqueue_struct *wq;
+ void *free_ptr;
+};
+
+int hfi1_mmu_rb_register(void *ops_arg,
+ const struct mmu_rb_ops *ops,
+ struct workqueue_struct *wq,
+ struct mmu_rb_handler **handler);
+void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
+int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode);
+void hfi1_mmu_rb_release(struct kref *refcount);
+
+void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
+struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
+ unsigned long addr,
+ unsigned long len);
+
+#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
new file mode 100644
index 000000000000..77d2ece9a9cb
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 - 2020 Intel Corporation.
+ */
+
+#include "hfi.h"
+#include "affinity.h"
+#include "sdma.h"
+#include "netdev.h"
+
+/**
+ * msix_initialize() - Calculate, request and configure MSIx IRQs
+ * @dd: valid hfi1 devdata
+ *
+ */
+int msix_initialize(struct hfi1_devdata *dd)
+{
+ u32 total;
+ int ret;
+ struct hfi1_msix_entry *entries;
+
+ /*
+ * MSIx interrupt count:
+ * one for the general, "slow path" interrupt
+ * one per used SDMA engine
+ * one per kernel receive context
+ * one for each VNIC context
+ * ...any new IRQs should be added here.
+ */
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
+
+ if (total >= CCE_NUM_MSIX_VECTORS)
+ return -EINVAL;
+
+ ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
+ return ret;
+ }
+
+ entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
+ GFP_KERNEL);
+ if (!entries) {
+ pci_free_irq_vectors(dd->pcidev);
+ return -ENOMEM;
+ }
+
+ dd->msix_info.msix_entries = entries;
+ spin_lock_init(&dd->msix_info.msix_lock);
+ bitmap_zero(dd->msix_info.in_use_msix, total);
+ dd->msix_info.max_requested = total;
+ dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
+
+ return 0;
+}
+
+/**
+ * msix_request_irq() - Allocate a free MSIx IRQ
+ * @dd: valid devdata
+ * @arg: context information for the IRQ
+ * @handler: IRQ handler
+ * @thread: IRQ thread handler (could be NULL)
+ * @type: affinty IRQ type
+ * @name: IRQ name
+ *
+ * Allocated an MSIx vector if available, and then create the appropriate
+ * meta data needed to keep track of the pci IRQ request.
+ *
+ * Return:
+ * < 0 Error
+ * >= 0 MSIx vector
+ *
+ */
+static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
+ irq_handler_t handler, irq_handler_t thread,
+ enum irq_type type, const char *name)
+{
+ unsigned long nr;
+ int irq;
+ int ret;
+ struct hfi1_msix_entry *me;
+
+ /* Allocate an MSIx vector */
+ spin_lock(&dd->msix_info.msix_lock);
+ nr = find_first_zero_bit(dd->msix_info.in_use_msix,
+ dd->msix_info.max_requested);
+ if (nr < dd->msix_info.max_requested)
+ __set_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+
+ if (nr == dd->msix_info.max_requested)
+ return -ENOSPC;
+
+ if (type < IRQ_SDMA || type >= IRQ_OTHER)
+ return -EINVAL;
+
+ irq = pci_irq_vector(dd->pcidev, nr);
+ ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
+ name, irq, nr, ret);
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+ return ret;
+ }
+
+ /*
+ * assign arg after pci_request_irq call, so it will be
+ * cleaned up
+ */
+ me = &dd->msix_info.msix_entries[nr];
+ me->irq = irq;
+ me->arg = arg;
+ me->type = type;
+
+ /* This is a request, so a failure is not fatal */
+ ret = hfi1_get_irq_affinity(dd, me);
+ if (ret)
+ dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
+
+ return nr;
+}
+
+static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
+ irq_handler_t handler,
+ irq_handler_t thread,
+ const char *name)
+{
+ int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
+ rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
+ name);
+ if (nr < 0)
+ return nr;
+
+ /*
+ * Set the interrupt register and mask for this
+ * context's interrupt.
+ */
+ rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
+ rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
+ rcd->msix_intr = nr;
+ remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
+
+ return 0;
+}
+
+/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * @rcd: valid rcd context
+ *
+ */
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
+ rcd->dd->unit, rcd->ctxt);
+
+ return msix_request_rcd_irq_common(rcd, receive_context_interrupt,
+ receive_context_thread, name);
+}
+
+/**
+ * msix_netdev_request_rcd_irq - Helper function for RCVAVAIL IRQs
+ * for netdev context
+ * @rcd: valid netdev contexti
+ */
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
+ rcd->dd->unit, rcd->ctxt);
+ return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
+ NULL, name);
+}
+
+/**
+ * msix_request_sdma_irq - Helper for getting SDMA IRQ resources
+ * @sde: valid sdma engine
+ *
+ */
+int msix_request_sdma_irq(struct sdma_engine *sde)
+{
+ int nr;
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
+ sde->dd->unit, sde->this_idx);
+ nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
+ IRQ_SDMA, name);
+ if (nr < 0)
+ return nr;
+ sde->msix_intr = nr;
+ remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
+
+ return 0;
+}
+
+/**
+ * msix_request_general_irq - Helper for getting general IRQ
+ * resources
+ * @dd: valid device data
+ */
+int msix_request_general_irq(struct hfi1_devdata *dd)
+{
+ int nr;
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
+ nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
+ name);
+ if (nr < 0)
+ return nr;
+
+ /* general interrupt must be MSIx vector 0 */
+ if (nr) {
+ msix_free_irq(dd, (u8)nr);
+ dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * enable_sdma_srcs - Helper to enable SDMA IRQ srcs
+ * @dd: valid devdata structure
+ * @i: index of SDMA engine
+ */
+static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
+{
+ set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
+ set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
+ IS_SDMA_PROGRESS_START + i, true);
+ set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
+ set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
+ true);
+}
+
+/**
+ * msix_request_irqs() - Allocate all MSIx IRQs
+ * @dd: valid devdata structure
+ *
+ * Helper function to request the used MSIx IRQs.
+ *
+ */
+int msix_request_irqs(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret = msix_request_general_irq(dd);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ struct sdma_engine *sde = &dd->per_sdma[i];
+
+ ret = msix_request_sdma_irq(sde);
+ if (ret)
+ return ret;
+ enable_sdma_srcs(sde->dd, i);
+ }
+
+ for (i = 0; i < dd->n_krcv_queues; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
+
+ if (rcd)
+ ret = msix_request_rcd_irq(rcd);
+ hfi1_rcd_put(rcd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * msix_free_irq() - Free the specified MSIx resources and IRQ
+ * @dd: valid devdata
+ * @msix_intr: MSIx vector to free.
+ *
+ */
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
+{
+ struct hfi1_msix_entry *me;
+
+ if (msix_intr >= dd->msix_info.max_requested)
+ return;
+
+ me = &dd->msix_info.msix_entries[msix_intr];
+
+ if (!me->arg) /* => no irq, no affinity */
+ return;
+
+ hfi1_put_irq_affinity(dd, me);
+ pci_free_irq(dd->pcidev, msix_intr, me->arg);
+
+ me->arg = NULL;
+
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(msix_intr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+}
+
+/**
+ * msix_clean_up_interrupts - Free all MSIx IRQ resources
+ * @dd: valid device data data structure
+ *
+ * Free the MSIx and associated PCI resources, if they have been allocated.
+ */
+void msix_clean_up_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+ struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
+
+ /* remove irqs - must happen before disabling/turning off */
+ for (i = 0; i < dd->msix_info.max_requested; i++, me++)
+ msix_free_irq(dd, i);
+
+ /* clean structures */
+ kfree(dd->msix_info.msix_entries);
+ dd->msix_info.msix_entries = NULL;
+ dd->msix_info.max_requested = 0;
+
+ pci_free_irq_vectors(dd->pcidev);
+}
+
+/**
+ * msix_netdev_synchronize_irq - netdev IRQ synchronize
+ * @dd: valid devdata
+ */
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
+{
+ int i;
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
+
+ for (i = 0; i < ctxt_count; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
+ struct hfi1_msix_entry *me;
+
+ me = &dd->msix_info.msix_entries[rcd->msix_intr];
+
+ synchronize_irq(me->irq);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
new file mode 100644
index 000000000000..9530ccb0a2ce
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 - 2020 Intel Corporation.
+ */
+
+#ifndef _HFI1_MSIX_H
+#define _HFI1_MSIX_H
+
+#include "hfi.h"
+
+/* MSIx interface */
+int msix_initialize(struct hfi1_devdata *dd);
+int msix_request_irqs(struct hfi1_devdata *dd);
+void msix_clean_up_interrupts(struct hfi1_devdata *dd);
+int msix_request_general_irq(struct hfi1_devdata *dd);
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
+int msix_request_sdma_irq(struct sdma_engine *sde);
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
+
+/* Netdev interface */
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd);
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd);
+
+#endif
diff --git a/drivers/infiniband/hw/hfi1/netdev.h b/drivers/infiniband/hw/hfi1/netdev.h
new file mode 100644
index 000000000000..07c8f77c9181
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#ifndef HFI1_NETDEV_H
+#define HFI1_NETDEV_H
+
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+
+/**
+ * struct hfi1_netdev_rxq - Receive Queue for HFI
+ * Both IPoIB and VNIC netdevices will be working on the rx abstraction.
+ * @napi: napi object
+ * @rx: ptr to netdev_rx
+ * @rcd: ptr to receive context data
+ */
+struct hfi1_netdev_rxq {
+ struct napi_struct napi;
+ struct hfi1_netdev_rx *rx;
+ struct hfi1_ctxtdata *rcd;
+};
+
+/*
+ * Number of netdev contexts used. Ensure it is less than or equal to
+ * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
+ */
+#define HFI1_MAX_NETDEV_CTXTS 8
+
+/* Number of NETDEV RSM entries */
+#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
+
+/**
+ * struct hfi1_netdev_rx: data required to setup and run HFI netdev.
+ * @rx_napi: the dummy netdevice to support "polling" the receive contexts
+ * @dd: hfi1_devdata
+ * @rxq: pointer to dummy netdev receive queues.
+ * @num_rx_q: number of receive queues
+ * @rmt_index: first free index in RMT Array
+ * @msix_start: first free MSI-X interrupt vector.
+ * @dev_tbl: netdev table for unique identifier VNIC and IPoIb VLANs.
+ * @enabled: atomic counter of netdevs enabling receive queues.
+ * When 0 NAPI will be disabled.
+ * @netdevs: atomic counter of netdevs using dummy netdev.
+ * When 0 receive queues will be freed.
+ */
+struct hfi1_netdev_rx {
+ struct net_device *rx_napi;
+ struct hfi1_devdata *dd;
+ struct hfi1_netdev_rxq *rxq;
+ int num_rx_q;
+ int rmt_start;
+ struct xarray dev_tbl;
+ /* count of enabled napi polls */
+ atomic_t enabled;
+ /* count of netdevs on top */
+ atomic_t netdevs;
+};
+
+static inline
+int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
+{
+ return dd->netdev_rx->num_rx_q;
+}
+
+static inline
+struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
+{
+ return dd->netdev_rx->rxq[ctxt].rcd;
+}
+
+static inline
+int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
+{
+ return dd->netdev_rx->rmt_start;
+}
+
+static inline
+void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
+{
+ dd->netdev_rx->rmt_start = rmt_idx;
+}
+
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask);
+
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
+int hfi1_alloc_rx(struct hfi1_devdata *dd);
+void hfi1_free_rx(struct hfi1_devdata *dd);
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id);
+
+/* chip.c */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
+
+#endif /* HFI1_NETDEV_H */
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
new file mode 100644
index 000000000000..8608044203bb
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for netdev RX functionality
+ */
+
+#include "sdma.h"
+#include "verbs.h"
+#include "netdev.h"
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <rdma/ib_verbs.h>
+
+static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
+ struct hfi1_ctxtdata *uctxt)
+{
+ unsigned int rcvctrl_ops;
+ struct hfi1_devdata *dd = rx->dd;
+ int ret;
+
+ uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
+ uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
+
+ /* Now allocate the RcvHdr queue and eager buffers. */
+ ret = hfi1_create_rcvhdrq(dd, uctxt);
+ if (ret)
+ goto done;
+
+ ret = hfi1_setup_eagerbufs(uctxt);
+ if (ret)
+ goto done;
+
+ clear_rcvhdrtail(uctxt);
+
+ rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
+ rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
+
+ if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
+
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
+done:
+ return ret;
+}
+
+static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata **ctxt)
+{
+ struct hfi1_ctxtdata *uctxt;
+ int ret;
+
+ if (dd->flags & HFI1_FROZEN)
+ return -EIO;
+
+ ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
+ if (ret < 0) {
+ dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
+ return -ENOMEM;
+ }
+
+ uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
+ HFI1_CAP_KGET(NODROP_RHQ_FULL) |
+ HFI1_CAP_KGET(NODROP_EGR_FULL) |
+ HFI1_CAP_KGET(DMA_RTAIL);
+ /* Netdev contexts are always NO_RDMA_RTAIL */
+ uctxt->fast_handler = handle_receive_interrupt_napi_fp;
+ uctxt->slow_handler = handle_receive_interrupt_napi_sp;
+ hfi1_set_seq_cnt(uctxt, 1);
+ uctxt->is_vnic = true;
+
+ hfi1_stats.sps_ctxts++;
+
+ dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
+ *ctxt = uctxt;
+
+ return 0;
+}
+
+static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ flush_wc();
+
+ /*
+ * Disable receive context and interrupt available, reset all
+ * RcvCtxtCtrl bits to default values.
+ */
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_TIDFLOW_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
+ HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+
+ if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
+ msix_free_irq(dd, uctxt->msix_intr);
+
+ uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
+ uctxt->event_flags = 0;
+
+ hfi1_clear_tids(uctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt);
+
+ hfi1_stats.sps_ctxts--;
+
+ hfi1_free_ctxt(uctxt);
+}
+
+static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
+ struct hfi1_ctxtdata **ctxt)
+{
+ int rc;
+ struct hfi1_devdata *dd = rx->dd;
+
+ rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
+ return rc;
+ }
+
+ rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
+ hfi1_netdev_deallocate_ctxt(dd, *ctxt);
+ *ctxt = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
+ * @dd: device on which to allocate netdev contexts
+ * @available_contexts: count of available receive contexts
+ * @cpu_mask: mask of possible cpus to include for contexts
+ *
+ * Return: count of physical cores on a node or the remaining available recv
+ * contexts for netdev recv context usage up to the maximum of
+ * HFI1_MAX_NETDEV_CTXTS.
+ * A value of 0 can be returned when acceleration is explicitly turned off,
+ * a memory allocation error occurs or when there are no available contexts.
+ *
+ */
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask)
+{
+ cpumask_var_t node_cpu_mask;
+ unsigned int available_cpus;
+
+ if (!HFI1_CAP_IS_KSET(AIP))
+ return 0;
+
+ /* Always give user contexts priority over netdev contexts */
+ if (available_contexts == 0) {
+ dd_dev_info(dd, "No receive contexts available for netdevs.\n");
+ return 0;
+ }
+
+ if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
+ dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
+ return 0;
+ }
+
+ cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
+
+ available_cpus = cpumask_weight(node_cpu_mask);
+
+ free_cpumask_var(node_cpu_mask);
+
+ return min3(available_cpus, available_contexts,
+ (u32)HFI1_MAX_NETDEV_CTXTS);
+}
+
+static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
+{
+ int i;
+ int rc;
+ struct hfi1_devdata *dd = rx->dd;
+ struct net_device *dev = rx->rx_napi;
+
+ rx->num_rx_q = dd->num_netdev_contexts;
+ rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
+ GFP_KERNEL, dd->node);
+
+ if (!rx->rxq) {
+ dd_dev_err(dd, "Unable to allocate netdev queue data\n");
+ return (-ENOMEM);
+ }
+
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
+
+ rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+
+ hfi1_rcd_get(rxq->rcd);
+ rxq->rx = rx;
+ rxq->rcd->napi = &rxq->napi;
+ dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
+ i, rxq->rcd->ctxt);
+ /*
+ * Disable BUSY_POLL on this NAPI as this is not supported
+ * right now.
+ */
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
+ netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi);
+ rc = msix_netdev_request_rcd_irq(rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+ }
+
+ return 0;
+
+bail_context_irq_failure:
+ dd_dev_err(dd, "Unable to allot receive context\n");
+ for (; i >= 0; i--) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
+
+ if (rxq->rcd) {
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+ }
+ kfree(rx->rxq);
+ rx->rxq = NULL;
+
+ return rc;
+}
+
+static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
+{
+ int i;
+ struct hfi1_devdata *dd = rx->dd;
+
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
+
+ netif_napi_del(&rxq->napi);
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+
+ kfree(rx->rxq);
+ rx->rxq = NULL;
+ rx->num_rx_q = 0;
+}
+
+static void enable_queues(struct hfi1_netdev_rx *rx)
+{
+ int i;
+
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
+
+ dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+ napi_enable(&rxq->napi);
+ hfi1_rcvctrl(rx->dd,
+ HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
+ rxq->rcd);
+ }
+}
+
+static void disable_queues(struct hfi1_netdev_rx *rx)
+{
+ int i;
+
+ msix_netdev_synchronize_irq(rx->dd);
+
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
+
+ dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+
+ /* wait for napi if it was scheduled */
+ hfi1_rcvctrl(rx->dd,
+ HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
+ rxq->rcd);
+ napi_synchronize(&rxq->napi);
+ napi_disable(&rxq->napi);
+ }
+}
+
+/**
+ * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
+ * it allocates receive queue data and calls netif_napi_add
+ * for each queue.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
+ int res;
+
+ if (atomic_fetch_inc(&rx->netdevs))
+ return 0;
+
+ mutex_lock(&hfi1_mutex);
+ res = hfi1_netdev_rxq_init(rx);
+ mutex_unlock(&hfi1_mutex);
+ return res;
+}
+
+/**
+ * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
+ * napi is deleted and receive queses memory is freed.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
+
+ /* destroy the RX queues only if it is the last netdev going away */
+ if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
+ mutex_lock(&hfi1_mutex);
+ hfi1_netdev_rxq_deinit(rx);
+ mutex_unlock(&hfi1_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * hfi1_alloc_rx - Allocates the rx support structure
+ * @dd: hfi1 dev data
+ *
+ * Allocate the rx structure to support gathering the receive
+ * resources and the dummy netdev.
+ *
+ * Updates dd struct pointer upon success.
+ *
+ * Return: 0 (success) -error on failure
+ *
+ */
+int hfi1_alloc_rx(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_rx *rx;
+
+ dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
+ rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
+
+ if (!rx)
+ return -ENOMEM;
+ rx->dd = dd;
+ rx->rx_napi = alloc_netdev_dummy(0);
+ if (!rx->rx_napi) {
+ kfree(rx);
+ return -ENOMEM;
+ }
+
+ xa_init(&rx->dev_tbl);
+ atomic_set(&rx->enabled, 0);
+ atomic_set(&rx->netdevs, 0);
+ dd->netdev_rx = rx;
+
+ return 0;
+}
+
+void hfi1_free_rx(struct hfi1_devdata *dd)
+{
+ if (dd->netdev_rx) {
+ dd_dev_info(dd, "hfi1 rx freed\n");
+ free_netdev(dd->netdev_rx->rx_napi);
+ kfree(dd->netdev_rx);
+ dd->netdev_rx = NULL;
+ }
+}
+
+/**
+ * hfi1_netdev_enable_queues - This is napi enable function.
+ * It enables napi objects associated with queues.
+ * When at least one device has called it it increments atomic counter.
+ * Disable function decrements counter and when it is 0,
+ * calls napi_disable for every queue.
+ *
+ * @dd: hfi1 dev data
+ */
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_rx *rx;
+
+ if (!dd->netdev_rx)
+ return;
+
+ rx = dd->netdev_rx;
+ if (atomic_fetch_inc(&rx->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ enable_queues(rx);
+ mutex_unlock(&hfi1_mutex);
+}
+
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_rx *rx;
+
+ if (!dd->netdev_rx)
+ return;
+
+ rx = dd->netdev_rx;
+ if (atomic_dec_if_positive(&rx->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ disable_queues(rx);
+ mutex_unlock(&hfi1_mutex);
+}
+
+/**
+ * hfi1_netdev_add_data - Registers data with unique identifier
+ * to be requested later this is needed for VNIC and IPoIB VLANs
+ * implementations.
+ * This call is protected by mutex idr_lock.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ * @data: data to be associated with index
+ */
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
+{
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
+
+ return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
+}
+
+/**
+ * hfi1_netdev_remove_data - Removes data with previously given id.
+ * Returns the reference to removed entry.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
+
+ return xa_erase(&rx->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_data - Gets data with given id
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
+
+ return xa_load(&rx->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
+ *
+ * @dd: hfi1 dev data
+ * @start_id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
+{
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
+ unsigned long index = *start_id;
+ void *ret;
+
+ ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+ *start_id = (int)index;
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h
new file mode 100644
index 000000000000..49f2da677b03
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/opa_compat.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#ifndef _LINUX_H
+#define _LINUX_H
+/*
+ * This header file is for OPA-specific definitions which are
+ * required by the HFI driver, and which aren't yet in the Linux
+ * IB core. We'll collect these all here, then merge them into
+ * the kernel when that's convenient.
+ */
+
+/* OPA SMA attribute IDs */
+#define OPA_ATTRIB_ID_CONGESTION_INFO cpu_to_be16(0x008b)
+#define OPA_ATTRIB_ID_HFI_CONGESTION_LOG cpu_to_be16(0x008f)
+#define OPA_ATTRIB_ID_HFI_CONGESTION_SETTING cpu_to_be16(0x0090)
+#define OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0091)
+
+/* OPA PMA attribute IDs */
+#define OPA_PM_ATTRIB_ID_PORT_STATUS cpu_to_be16(0x0040)
+#define OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS cpu_to_be16(0x0041)
+#define OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS cpu_to_be16(0x0042)
+#define OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS cpu_to_be16(0x0043)
+#define OPA_PM_ATTRIB_ID_ERROR_INFO cpu_to_be16(0x0044)
+
+/* OPA status codes */
+#define OPA_PM_STATUS_REQUEST_TOO_LARGE cpu_to_be16(0x100)
+
+static inline u8 port_states_to_logical_state(struct opa_port_states *ps)
+{
+ return ps->portphysstate_portstate & OPA_PI_MASK_PORT_STATE;
+}
+
+static inline u8 port_states_to_phys_state(struct opa_port_states *ps)
+{
+ return ((ps->portphysstate_portstate &
+ OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4) & 0xf;
+}
+
+/*
+ * OPA port physical states
+ * IB Volume 1, Table 146 PortInfo/IB Volume 2 Section 5.4.2(1) PortPhysState
+ * values are the same in OmniPath Architecture. OPA leverages some of the same
+ * concepts as InfiniBand, but has a few other states as well.
+ *
+ * When writing, only values 0-3 are valid, other values are ignored.
+ * When reading, 0 is reserved.
+ *
+ * Returned by the ibphys_portstate() routine.
+ */
+enum opa_port_phys_state {
+ /* Values 0-7 have the same meaning in OPA as in InfiniBand. */
+
+ IB_PORTPHYSSTATE_NOP = 0,
+ /* 1 is reserved */
+ IB_PORTPHYSSTATE_POLLING = 2,
+ IB_PORTPHYSSTATE_DISABLED = 3,
+ IB_PORTPHYSSTATE_TRAINING = 4,
+ IB_PORTPHYSSTATE_LINKUP = 5,
+ IB_PORTPHYSSTATE_LINK_ERROR_RECOVERY = 6,
+ IB_PORTPHYSSTATE_PHY_TEST = 7,
+ /* 8 is reserved */
+
+ /*
+ * Offline: Port is quiet (transmitters disabled) due to lack of
+ * physical media, unsupported media, or transition between link up
+ * and next link up attempt
+ */
+ OPA_PORTPHYSSTATE_OFFLINE = 9,
+
+ /* 10 is reserved */
+
+ /*
+ * Phy_Test: Specific test patterns are transmitted, and receiver BER
+ * can be monitored. This facilitates signal integrity testing for the
+ * physical layer of the port.
+ */
+ OPA_PORTPHYSSTATE_TEST = 11,
+
+ OPA_PORTPHYSSTATE_MAX = 11,
+ /* values 12-15 are reserved/ignored */
+};
+
+#endif /* _LINUX_H */
diff --git a/drivers/infiniband/hw/hfi1/opfn.c b/drivers/infiniband/hw/hfi1/opfn.c
new file mode 100644
index 000000000000..370a5a8eaa71
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/opfn.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#include "hfi.h"
+#include "trace.h"
+#include "qp.h"
+#include "opfn.h"
+
+#define IB_BTHE_E BIT(IB_BTHE_E_SHIFT)
+
+#define OPFN_CODE(code) BIT((code) - 1)
+#define OPFN_MASK(code) OPFN_CODE(STL_VERBS_EXTD_##code)
+
+struct hfi1_opfn_type {
+ bool (*request)(struct rvt_qp *qp, u64 *data);
+ bool (*response)(struct rvt_qp *qp, u64 *data);
+ bool (*reply)(struct rvt_qp *qp, u64 data);
+ void (*error)(struct rvt_qp *qp);
+};
+
+static struct hfi1_opfn_type hfi1_opfn_handlers[STL_VERBS_EXTD_MAX] = {
+ [STL_VERBS_EXTD_TID_RDMA] = {
+ .request = tid_rdma_conn_req,
+ .response = tid_rdma_conn_resp,
+ .reply = tid_rdma_conn_reply,
+ .error = tid_rdma_conn_error,
+ },
+};
+
+static struct workqueue_struct *opfn_wq;
+
+static void opfn_schedule_conn_request(struct rvt_qp *qp);
+
+static bool hfi1_opfn_extended(u32 bth1)
+{
+ return !!(bth1 & IB_BTHE_E);
+}
+
+static void opfn_conn_request(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_atomic_wr wr;
+ u16 mask, capcode;
+ struct hfi1_opfn_type *extd;
+ u64 data;
+ unsigned long flags;
+ int ret = 0;
+
+ trace_hfi1_opfn_state_conn_request(qp);
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ /*
+ * Exit if the extended bit is not set, or if nothing is requested, or
+ * if we have completed all requests, or if a previous request is in
+ * progress
+ */
+ if (!priv->opfn.extended || !priv->opfn.requested ||
+ priv->opfn.requested == priv->opfn.completed || priv->opfn.curr)
+ goto done;
+
+ mask = priv->opfn.requested & ~priv->opfn.completed;
+ capcode = ilog2(mask & ~(mask - 1)) + 1;
+ if (capcode >= STL_VERBS_EXTD_MAX) {
+ priv->opfn.completed |= OPFN_CODE(capcode);
+ goto done;
+ }
+
+ extd = &hfi1_opfn_handlers[capcode];
+ if (!extd || !extd->request || !extd->request(qp, &data)) {
+ /*
+ * Either there is no handler for this capability or the request
+ * packet could not be generated. Either way, mark it as done so
+ * we don't keep attempting to complete it.
+ */
+ priv->opfn.completed |= OPFN_CODE(capcode);
+ goto done;
+ }
+
+ trace_hfi1_opfn_data_conn_request(qp, capcode, data);
+ data = (data & ~0xf) | capcode;
+
+ memset(&wr, 0, sizeof(wr));
+ wr.wr.opcode = IB_WR_OPFN;
+ wr.remote_addr = HFI1_VERBS_E_ATOMIC_VADDR;
+ wr.compare_add = data;
+
+ priv->opfn.curr = capcode; /* A new request is now in progress */
+ /* Drop opfn.lock before calling ib_post_send() */
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+
+ ret = ib_post_send(&qp->ibqp, &wr.wr, NULL);
+ if (ret)
+ goto err;
+ trace_hfi1_opfn_state_conn_request(qp);
+ return;
+err:
+ trace_hfi1_msg_opfn_conn_request(qp, "ib_ost_send failed: ret = ",
+ (u64)ret);
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ /*
+ * In case of an unexpected error return from ib_post_send
+ * clear opfn.curr and reschedule to try again
+ */
+ priv->opfn.curr = STL_VERBS_EXTD_NONE;
+ opfn_schedule_conn_request(qp);
+done:
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_send_conn_request(struct work_struct *work)
+{
+ struct hfi1_opfn_data *od;
+ struct hfi1_qp_priv *qpriv;
+
+ od = container_of(work, struct hfi1_opfn_data, opfn_work);
+ qpriv = container_of(od, struct hfi1_qp_priv, opfn);
+
+ opfn_conn_request(qpriv->owner);
+}
+
+/*
+ * When QP s_lock is held in the caller, the OPFN request must be scheduled
+ * to a different workqueue to avoid double locking QP s_lock in call to
+ * ib_post_send in opfn_conn_request
+ */
+static void opfn_schedule_conn_request(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ trace_hfi1_opfn_state_sched_conn_request(qp);
+ queue_work(opfn_wq, &priv->opfn.opfn_work);
+}
+
+void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_atomic_eth *ateth)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ u64 data = be64_to_cpu(ateth->compare_data);
+ struct hfi1_opfn_type *extd;
+ u8 capcode;
+ unsigned long flags;
+
+ trace_hfi1_opfn_state_conn_response(qp);
+ capcode = data & 0xf;
+ trace_hfi1_opfn_data_conn_response(qp, capcode, data);
+ if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
+ return;
+
+ extd = &hfi1_opfn_handlers[capcode];
+
+ if (!extd || !extd->response) {
+ e->atomic_data = capcode;
+ return;
+ }
+
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ if (priv->opfn.completed & OPFN_CODE(capcode)) {
+ /*
+ * We are receiving a request for a feature that has already
+ * been negotiated. This may mean that the other side has reset
+ */
+ priv->opfn.completed &= ~OPFN_CODE(capcode);
+ if (extd->error)
+ extd->error(qp);
+ }
+
+ if (extd->response(qp, &data))
+ priv->opfn.completed |= OPFN_CODE(capcode);
+ e->atomic_data = (data & ~0xf) | capcode;
+ trace_hfi1_opfn_state_conn_response(qp);
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_conn_reply(struct rvt_qp *qp, u64 data)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_opfn_type *extd;
+ u8 capcode;
+ unsigned long flags;
+
+ trace_hfi1_opfn_state_conn_reply(qp);
+ capcode = data & 0xf;
+ trace_hfi1_opfn_data_conn_reply(qp, capcode, data);
+ if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
+ return;
+
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ /*
+ * Either there is no previous request or the reply is not for the
+ * current request
+ */
+ if (!priv->opfn.curr || capcode != priv->opfn.curr)
+ goto done;
+
+ extd = &hfi1_opfn_handlers[capcode];
+
+ if (!extd || !extd->reply)
+ goto clear;
+
+ if (extd->reply(qp, data))
+ priv->opfn.completed |= OPFN_CODE(capcode);
+clear:
+ /*
+ * Clear opfn.curr to indicate that the previous request is no longer in
+ * progress
+ */
+ priv->opfn.curr = STL_VERBS_EXTD_NONE;
+ trace_hfi1_opfn_state_conn_reply(qp);
+done:
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_conn_error(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_opfn_type *extd = NULL;
+ unsigned long flags;
+ u16 capcode;
+
+ trace_hfi1_opfn_state_conn_error(qp);
+ trace_hfi1_msg_opfn_conn_error(qp, "error. qp state ", (u64)qp->state);
+ /*
+ * The QP has gone into the Error state. We have to invalidate all
+ * negotiated feature, including the one in progress (if any). The RC
+ * QP handling will clean the WQE for the connection request.
+ */
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ while (priv->opfn.completed) {
+ capcode = priv->opfn.completed & ~(priv->opfn.completed - 1);
+ extd = &hfi1_opfn_handlers[ilog2(capcode) + 1];
+ if (extd->error)
+ extd->error(qp);
+ priv->opfn.completed &= ~OPFN_CODE(capcode);
+ }
+ priv->opfn.extended = 0;
+ priv->opfn.requested = 0;
+ priv->opfn.curr = STL_VERBS_EXTD_NONE;
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ unsigned long flags;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ priv->s_retry = attr->retry_cnt;
+
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
+ struct tid_rdma_params *local = &priv->tid_rdma.local;
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ priv->tid_retry_timeout_jiffies = qp->timeout_jiffies;
+ if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) ||
+ qp->pmtu == enum_to_mtu(OPA_MTU_8192)) {
+ tid_rdma_opfn_init(qp, local);
+ /*
+ * We only want to set the OPFN requested bit when the
+ * QP transitions to RTS.
+ */
+ if (attr_mask & IB_QP_STATE &&
+ attr->qp_state == IB_QPS_RTS) {
+ priv->opfn.requested |= OPFN_MASK(TID_RDMA);
+ /*
+ * If the QP is transitioning to RTS and the
+ * opfn.completed for TID RDMA has already been
+ * set, the QP is being moved *back* into RTS.
+ * We can now renegotiate the TID RDMA
+ * parameters.
+ */
+ if (priv->opfn.completed &
+ OPFN_MASK(TID_RDMA)) {
+ priv->opfn.completed &=
+ ~OPFN_MASK(TID_RDMA);
+ /*
+ * Since the opfn.completed bit was
+ * already set, it is safe to assume
+ * that the opfn.extended is also set.
+ */
+ opfn_schedule_conn_request(qp);
+ }
+ }
+ } else {
+ memset(local, 0, sizeof(*local));
+ }
+ }
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (!priv->opfn.extended && hfi1_opfn_extended(bth1) &&
+ HFI1_CAP_IS_KSET(OPFN)) {
+ priv->opfn.extended = 1;
+ if (qp->state == IB_QPS_RTS)
+ opfn_conn_request(qp);
+ }
+}
+
+int opfn_init(void)
+{
+ opfn_wq = alloc_workqueue("hfi_opfn",
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM,
+ HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES);
+ if (!opfn_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void opfn_exit(void)
+{
+ if (opfn_wq) {
+ destroy_workqueue(opfn_wq);
+ opfn_wq = NULL;
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/opfn.h b/drivers/infiniband/hw/hfi1/opfn.h
new file mode 100644
index 000000000000..62f93c1dc082
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/opfn.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#ifndef _HFI1_OPFN_H
+#define _HFI1_OPFN_H
+
+/**
+ * DOC: Omni Path Feature Negotion (OPFN)
+ *
+ * OPFN is a discovery protocol for Intel Omni-Path fabric that
+ * allows two RC QPs to negotiate a common feature that both QPs
+ * can support. Currently, the only OPA feature that OPFN
+ * supports is TID RDMA.
+ *
+ * Architecture
+ *
+ * OPFN involves the communication between two QPs on the HFI
+ * level on an Omni-Path fabric, and ULPs have no knowledge of
+ * OPFN at all.
+ *
+ * Implementation
+ *
+ * OPFN extends the existing IB RC protocol with the following
+ * changes:
+ * -- Uses Bit 24 (reserved) of DWORD 1 of Base Transport
+ * Header (BTH1) to indicate that the RC QP supports OPFN;
+ * -- Uses a combination of RC COMPARE_SWAP opcode (0x13) and
+ * the address U64_MAX (0xFFFFFFFFFFFFFFFF) as an OPFN
+ * request; The 64-bit data carried with the request/response
+ * contains the parameters for negotiation and will be
+ * defined in tid_rdma.c file;
+ * -- Defines IB_WR_RESERVED3 as IB_WR_OPFN.
+ *
+ * The OPFN communication will be triggered when an RC QP
+ * receives a request with Bit 24 of BTH1 set. The responder QP
+ * will then post send an OPFN request with its local
+ * parameters, which will be sent to the requester QP once all
+ * existing requests on the responder QP side have been sent.
+ * Once the requester QP receives the OPFN request, it will
+ * keep a copy of the responder QP's parameters, and return a
+ * response packet with its own local parameters. The responder
+ * QP receives the response packet and keeps a copy of the requester
+ * QP's parameters. After this exchange, each side has the parameters
+ * for both sides and therefore can select the right parameters
+ * for future transactions
+ */
+
+#include <linux/workqueue.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_qp.h>
+
+/* STL Verbs Extended */
+#define IB_BTHE_E_SHIFT 24
+#define HFI1_VERBS_E_ATOMIC_VADDR U64_MAX
+
+enum hfi1_opfn_codes {
+ STL_VERBS_EXTD_NONE = 0,
+ STL_VERBS_EXTD_TID_RDMA,
+ STL_VERBS_EXTD_MAX
+};
+
+struct hfi1_opfn_data {
+ u8 extended;
+ u16 requested;
+ u16 completed;
+ enum hfi1_opfn_codes curr;
+ /* serialize opfn function calls */
+ spinlock_t lock;
+ struct work_struct opfn_work;
+};
+
+/* WR opcode for OPFN */
+#define IB_WR_OPFN IB_WR_RESERVED3
+
+void opfn_send_conn_request(struct work_struct *work);
+void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_atomic_eth *ateth);
+void opfn_conn_reply(struct rvt_qp *qp, u64 data);
+void opfn_conn_error(struct rvt_qp *qp);
+void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask);
+void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1);
+int opfn_init(void);
+void opfn_exit(void);
+
+#endif /* _HFI1_OPFN_H */
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
new file mode 100644
index 000000000000..7133964749f8
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -0,0 +1,1382 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2019 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "hfi.h"
+#include "chip_registers.h"
+#include "aspm.h"
+
+/*
+ * This file contains PCIe utility routines.
+ */
+
+/*
+ * Do all the common PCIe setup and initialization.
+ */
+int hfi1_pcie_init(struct hfi1_devdata *dd)
+{
+ int ret;
+ struct pci_dev *pdev = dd->pcidev;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ /*
+ * This can happen (in theory) iff:
+ * We did a chip reset, and then failed to reprogram the
+ * BAR, or the chip reset due to an internal error. We then
+ * unloaded the driver and reloaded it.
+ *
+ * Both reset cases set the BAR back to initial state. For
+ * the latter case, the AER sticky error bit at offset 0x718
+ * should be set, but the Linux kernel doesn't yet know
+ * about that, it appears. If the original BAR was retained
+ * in the kernel data structures, this may be OK.
+ */
+ dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
+ return ret;
+ }
+
+ ret = pci_request_regions(pdev, DRIVER_NAME);
+ if (ret) {
+ dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
+ goto bail;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ /*
+ * If the 64 bit setup fails, try 32 bit. Some systems
+ * do not setup 64 bit maps on systems with 2GB or less
+ * memory installed.
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
+ goto bail;
+ }
+ }
+
+ pci_set_master(pdev);
+ return 0;
+
+bail:
+ hfi1_pcie_cleanup(pdev);
+ return ret;
+}
+
+/*
+ * Clean what was done in hfi1_pcie_init()
+ */
+void hfi1_pcie_cleanup(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+ /*
+ * Release regions should be called after the disable. OK to
+ * call if request regions has not been called or failed.
+ */
+ pci_release_regions(pdev);
+}
+
+/*
+ * Do remaining PCIe setup, once dd is allocated, and save away
+ * fields required to re-initialize after a chip reset, or for
+ * various other purposes
+ */
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
+{
+ unsigned long len;
+ resource_size_t addr;
+ int ret = 0;
+ u32 rcv_array_count;
+
+ addr = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+ /*
+ * The TXE PIO buffers are at the tail end of the chip space.
+ * Cut them off and map them separately.
+ */
+
+ /* sanity check vs expectations */
+ if (len != TXE_PIO_SEND + TXE_PIO_SIZE) {
+ dd_dev_err(dd, "chip PIO range does not match\n");
+ return -EINVAL;
+ }
+
+ dd->kregbase1 = ioremap(addr, RCV_ARRAY);
+ if (!dd->kregbase1) {
+ dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
+ return -ENOMEM;
+ }
+ dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY);
+
+ /* verify that reads actually work, save revision for reset check */
+ dd->revision = readq(dd->kregbase1 + CCE_REVISION);
+ if (dd->revision == ~(u64)0) {
+ dd_dev_err(dd, "Cannot read chip CSRs\n");
+ goto nomem;
+ }
+
+ rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
+ dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
+ dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
+
+ dd->kregbase2 = ioremap(
+ addr + dd->base2_start,
+ TXE_PIO_SEND - dd->base2_start);
+ if (!dd->kregbase2) {
+ dd_dev_err(dd, "UC mapping of kregbase2 failed\n");
+ goto nomem;
+ }
+ dd_dev_info(dd, "UC base2: %p for %x\n", dd->kregbase2,
+ TXE_PIO_SEND - dd->base2_start);
+
+ dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE);
+ if (!dd->piobase) {
+ dd_dev_err(dd, "WC mapping of send buffers failed\n");
+ goto nomem;
+ }
+ dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
+
+ dd->physaddr = addr; /* used for io_remap, etc. */
+
+ /*
+ * Map the chip's RcvArray as write-combining to allow us
+ * to write an entire cacheline worth of entries in one shot.
+ */
+ dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
+ rcv_array_count * 8);
+ if (!dd->rcvarray_wc) {
+ dd_dev_err(dd, "WC mapping of receive array failed\n");
+ goto nomem;
+ }
+ dd_dev_info(dd, "WC RcvArray: %p for %x\n",
+ dd->rcvarray_wc, rcv_array_count * 8);
+
+ dd->flags |= HFI1_PRESENT; /* chip.c CSR routines now work */
+ return 0;
+nomem:
+ ret = -ENOMEM;
+ hfi1_pcie_ddcleanup(dd);
+ return ret;
+}
+
+/*
+ * Do PCIe cleanup related to dd, after chip-specific cleanup, etc. Just prior
+ * to releasing the dd memory.
+ * Void because all of the core pcie cleanup functions are void.
+ */
+void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
+{
+ dd->flags &= ~HFI1_PRESENT;
+ if (dd->kregbase1)
+ iounmap(dd->kregbase1);
+ dd->kregbase1 = NULL;
+ if (dd->kregbase2)
+ iounmap(dd->kregbase2);
+ dd->kregbase2 = NULL;
+ if (dd->rcvarray_wc)
+ iounmap(dd->rcvarray_wc);
+ dd->rcvarray_wc = NULL;
+ if (dd->piobase)
+ iounmap(dd->piobase);
+ dd->piobase = NULL;
+}
+
+/* return the PCIe link speed from the given link status */
+static u32 extract_speed(u16 linkstat)
+{
+ u32 speed;
+
+ switch (linkstat & PCI_EXP_LNKSTA_CLS) {
+ default: /* not defined, assume Gen1 */
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ speed = 2500; /* Gen 1, 2.5GHz */
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ speed = 5000; /* Gen 2, 5GHz */
+ break;
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ speed = 8000; /* Gen 3, 8GHz */
+ break;
+ }
+ return speed;
+}
+
+/* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
+static void update_lbus_info(struct hfi1_devdata *dd)
+{
+ u16 linkstat;
+ int ret;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return;
+ }
+
+ dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
+ dd->lbus_speed = extract_speed(linkstat);
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
+}
+
+/*
+ * Read in the current PCIe link width and speed. Find if the link is
+ * Gen3 capable.
+ */
+int pcie_speeds(struct hfi1_devdata *dd)
+{
+ u32 linkcap;
+ struct pci_dev *parent = dd->pcidev->bus->self;
+ int ret;
+
+ if (!pci_is_pcie(dd->pcidev)) {
+ dd_dev_err(dd, "Can't find PCI Express capability!\n");
+ return -EINVAL;
+ }
+
+ /* find if our max speed is Gen3 and parent supports Gen3 speeds */
+ dd->link_gen3_capable = 1;
+
+ ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return pcibios_err_to_errno(ret);
+ }
+
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
+ dd_dev_info(dd,
+ "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
+ linkcap & PCI_EXP_LNKCAP_SLS);
+ dd->link_gen3_capable = 0;
+ }
+
+ /*
+ * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
+ */
+ if (parent &&
+ (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
+ dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
+ dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
+ dd->link_gen3_capable = 0;
+ }
+
+ /* obtain the link width and current speed */
+ update_lbus_info(dd);
+
+ dd_dev_info(dd, "%s\n", dd->lbus_info);
+
+ return 0;
+}
+
+/*
+ * Restore command and BARs after a reset has wiped them out
+ *
+ * Returns 0 on success, otherwise a negative error value
+ */
+int restore_pci_variables(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ dd->pcibar0);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ dd->pcibar1);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL,
+ dd->pcie_devctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL,
+ dd->pcie_lnkctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
+ dd->pcie_devctl2);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
+ if (ret)
+ goto error;
+
+ if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+ ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+ dd->pci_tph2);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ dd_dev_err(dd, "Unable to write to PCI config\n");
+ return pcibios_err_to_errno(ret);
+}
+
+/*
+ * Save BARs and command to rewrite after device reset
+ *
+ * Returns 0 on success, otherwise a negative error value
+ */
+int save_pci_variables(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ &dd->pcibar0);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ &dd->pcibar1);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL,
+ &dd->pcie_devctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL,
+ &dd->pcie_lnkctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
+ &dd->pcie_devctl2);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
+ if (ret)
+ goto error;
+
+ if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+ &dd->pci_tph2);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return pcibios_err_to_errno(ret);
+}
+
+/*
+ * BIOS may not set PCIe bus-utilization parameters for best performance.
+ * Check and optionally adjust them to maximize our throughput.
+ */
+static int hfi1_pcie_caps;
+module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444);
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
+
+/**
+ * tune_pcie_caps() - Code to adjust PCIe capabilities.
+ * @dd: Valid device data structure
+ *
+ */
+void tune_pcie_caps(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent;
+ u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
+ u16 rc_mrrs, ep_mrrs, max_mrrs, ectl;
+ int ret;
+
+ /*
+ * Turn on extended tags in DevCtl in case the BIOS has turned it off
+ * to improve WFR SDMA bandwidth
+ */
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+ if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
+ dd_dev_info(dd, "Enabling PCIe extended tags\n");
+ ectl |= PCI_EXP_DEVCTL_EXT_TAG;
+ ret = pcie_capability_write_word(dd->pcidev,
+ PCI_EXP_DEVCTL, ectl);
+ if (ret)
+ dd_dev_info(dd, "Unable to write to PCI config\n");
+ }
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ /*
+ * The driver cannot perform the tuning if it does not have
+ * access to the upstream component.
+ */
+ if (!parent) {
+ dd_dev_info(dd, "Parent not found\n");
+ return;
+ }
+ if (!pci_is_root_bus(parent->bus)) {
+ dd_dev_info(dd, "Parent not root\n");
+ return;
+ }
+ if (!pci_is_pcie(parent)) {
+ dd_dev_info(dd, "Parent is not PCI Express capable\n");
+ return;
+ }
+ if (!pci_is_pcie(dd->pcidev)) {
+ dd_dev_info(dd, "PCI device is not PCI Express capable\n");
+ return;
+ }
+ rc_mpss = parent->pcie_mpss;
+ rc_mps = ffs(pcie_get_mps(parent)) - 8;
+ /* Find out supported and configured values for endpoint (us) */
+ ep_mpss = dd->pcidev->pcie_mpss;
+ ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
+
+ /* Find max payload supported by root, endpoint */
+ if (rc_mpss > ep_mpss)
+ rc_mpss = ep_mpss;
+
+ /* If Supported greater than limit in module param, limit it */
+ if (rc_mpss > (hfi1_pcie_caps & 7))
+ rc_mpss = hfi1_pcie_caps & 7;
+ /* If less than (allowed, supported), bump root payload */
+ if (rc_mpss > rc_mps) {
+ rc_mps = rc_mpss;
+ pcie_set_mps(parent, 128 << rc_mps);
+ }
+ /* If less than (allowed, supported), bump endpoint payload */
+ if (rc_mpss > ep_mps) {
+ ep_mps = rc_mpss;
+ pcie_set_mps(dd->pcidev, 128 << ep_mps);
+ }
+
+ /*
+ * Now the Read Request size.
+ * No field for max supported, but PCIe spec limits it to 4096,
+ * which is code '5' (log2(4096) - 7)
+ */
+ max_mrrs = 5;
+ if (max_mrrs > ((hfi1_pcie_caps >> 4) & 7))
+ max_mrrs = (hfi1_pcie_caps >> 4) & 7;
+
+ max_mrrs = 128 << max_mrrs;
+ rc_mrrs = pcie_get_readrq(parent);
+ ep_mrrs = pcie_get_readrq(dd->pcidev);
+
+ if (max_mrrs > rc_mrrs) {
+ rc_mrrs = max_mrrs;
+ pcie_set_readrq(parent, rc_mrrs);
+ }
+ if (max_mrrs > ep_mrrs) {
+ ep_mrrs = max_mrrs;
+ pcie_set_readrq(dd->pcidev, ep_mrrs);
+ }
+}
+
+/* End of PCIe capability tuning */
+
+/*
+ * From here through hfi1_pci_err_handler definition is invoked via
+ * PCI error infrastructure, registered via pci
+ */
+static pci_ers_result_t
+pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dd_dev_info(dd, "State Normal, ignoring\n");
+ break;
+
+ case pci_channel_io_frozen:
+ dd_dev_info(dd, "State Frozen, requesting reset\n");
+ pci_disable_device(pdev);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
+
+ case pci_channel_io_perm_failure:
+ if (dd) {
+ dd_dev_info(dd, "State Permanent Failure, disabling\n");
+ /* no more register accesses! */
+ dd->flags &= ~HFI1_PRESENT;
+ hfi1_disable_after_error(dd);
+ }
+ /* else early, or other problem */
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ break;
+
+ default: /* shouldn't happen */
+ dd_dev_info(dd, "HFI1 PCI errors detected (state %d)\n",
+ state);
+ break;
+ }
+ return ret;
+}
+
+static pci_ers_result_t
+pci_mmio_enabled(struct pci_dev *pdev)
+{
+ u64 words = 0U;
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ if (dd && dd->pport) {
+ words = read_port_cntr(dd->pport, C_RX_WORDS, CNTR_INVALID_VL);
+ if (words == ~0ULL)
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ dd_dev_info(dd,
+ "HFI1 mmio_enabled function called, read wordscntr %llx, returning %d\n",
+ words, ret);
+ }
+ return ret;
+}
+
+static pci_ers_result_t
+pci_slot_reset(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ dd_dev_info(dd, "HFI1 slot_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void
+pci_resume(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ dd_dev_info(dd, "HFI1 resume function called\n");
+ /*
+ * Running jobs will fail, since it's asynchronous
+ * unlike sysfs-requested reset. Better than
+ * doing nothing.
+ */
+ hfi1_init(dd, 1); /* same as re-init after reset */
+}
+
+const struct pci_error_handlers hfi1_pci_err_handler = {
+ .error_detected = pci_error_detected,
+ .mmio_enabled = pci_mmio_enabled,
+ .slot_reset = pci_slot_reset,
+ .resume = pci_resume,
+};
+
+/*============================================================================*/
+/* PCIe Gen3 support */
+
+/*
+ * This code is separated out because it is expected to be removed in the
+ * final shipping product. If not, then it will be revisited and items
+ * will be moved to more standard locations.
+ */
+
+/* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_STS field values */
+#define DL_STATUS_HFI0 0x1 /* hfi0 firmware download complete */
+#define DL_STATUS_HFI1 0x2 /* hfi1 firmware download complete */
+#define DL_STATUS_BOTH 0x3 /* hfi0 and hfi1 firmware download complete */
+
+/* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_ERR field values */
+#define DL_ERR_NONE 0x0 /* no error */
+#define DL_ERR_SWAP_PARITY 0x1 /* parity error in SerDes interrupt */
+ /* or response data */
+#define DL_ERR_DISABLED 0x2 /* hfi disabled */
+#define DL_ERR_SECURITY 0x3 /* security check failed */
+#define DL_ERR_SBUS 0x4 /* SBus status error */
+#define DL_ERR_XFR_PARITY 0x5 /* parity error during ROM transfer*/
+
+/* gasket block secondary bus reset delay */
+#define SBR_DELAY_US 200000 /* 200ms */
+
+static uint pcie_target = 3;
+module_param(pcie_target, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
+
+static uint pcie_force;
+module_param(pcie_force, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_force, "Force driver to do a PCIe firmware download even if already at target speed");
+
+static uint pcie_retry = 5;
+module_param(pcie_retry, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested speed");
+
+#define UNSET_PSET 255
+#define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
+#define DEFAULT_MCP_PSET 6 /* MCP HFI */
+static uint pcie_pset = UNSET_PSET;
+module_param(pcie_pset, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
+
+static uint pcie_ctle = 3; /* discrete on, integrated on */
+module_param(pcie_ctle, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
+
+/* equalization columns */
+#define PREC 0
+#define ATTN 1
+#define POST 2
+
+/* discrete silicon preliminary equalization values */
+static const u8 discrete_preliminary_eq[11][3] = {
+ /* prec attn post */
+ { 0x00, 0x00, 0x12 }, /* p0 */
+ { 0x00, 0x00, 0x0c }, /* p1 */
+ { 0x00, 0x00, 0x0f }, /* p2 */
+ { 0x00, 0x00, 0x09 }, /* p3 */
+ { 0x00, 0x00, 0x00 }, /* p4 */
+ { 0x06, 0x00, 0x00 }, /* p5 */
+ { 0x09, 0x00, 0x00 }, /* p6 */
+ { 0x06, 0x00, 0x0f }, /* p7 */
+ { 0x09, 0x00, 0x09 }, /* p8 */
+ { 0x0c, 0x00, 0x00 }, /* p9 */
+ { 0x00, 0x00, 0x18 }, /* p10 */
+};
+
+/* integrated silicon preliminary equalization values */
+static const u8 integrated_preliminary_eq[11][3] = {
+ /* prec attn post */
+ { 0x00, 0x1e, 0x07 }, /* p0 */
+ { 0x00, 0x1e, 0x05 }, /* p1 */
+ { 0x00, 0x1e, 0x06 }, /* p2 */
+ { 0x00, 0x1e, 0x04 }, /* p3 */
+ { 0x00, 0x1e, 0x00 }, /* p4 */
+ { 0x03, 0x1e, 0x00 }, /* p5 */
+ { 0x04, 0x1e, 0x00 }, /* p6 */
+ { 0x03, 0x1e, 0x06 }, /* p7 */
+ { 0x03, 0x1e, 0x04 }, /* p8 */
+ { 0x05, 0x1e, 0x00 }, /* p9 */
+ { 0x00, 0x1e, 0x0a }, /* p10 */
+};
+
+static const u8 discrete_ctle_tunings[11][4] = {
+ /* DC LF HF BW */
+ { 0x48, 0x0b, 0x04, 0x04 }, /* p0 */
+ { 0x60, 0x05, 0x0f, 0x0a }, /* p1 */
+ { 0x50, 0x09, 0x06, 0x06 }, /* p2 */
+ { 0x68, 0x05, 0x0f, 0x0a }, /* p3 */
+ { 0x80, 0x05, 0x0f, 0x0a }, /* p4 */
+ { 0x70, 0x05, 0x0f, 0x0a }, /* p5 */
+ { 0x68, 0x05, 0x0f, 0x0a }, /* p6 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
+ { 0x48, 0x09, 0x06, 0x06 }, /* p8 */
+ { 0x60, 0x05, 0x0f, 0x0a }, /* p9 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p10 */
+};
+
+static const u8 integrated_ctle_tunings[11][4] = {
+ /* DC LF HF BW */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p0 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p1 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p2 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p3 */
+ { 0x58, 0x0a, 0x05, 0x05 }, /* p4 */
+ { 0x48, 0x0a, 0x05, 0x05 }, /* p5 */
+ { 0x40, 0x0a, 0x05, 0x05 }, /* p6 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p8 */
+ { 0x38, 0x09, 0x06, 0x06 }, /* p9 */
+ { 0x38, 0x0e, 0x01, 0x01 }, /* p10 */
+};
+
+/* helper to format the value to write to hardware */
+#define eq_value(pre, curr, post) \
+ ((((u32)(pre)) << \
+ PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT) \
+ | (((u32)(curr)) << PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT) \
+ | (((u32)(post)) << \
+ PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT))
+
+/*
+ * Load the given EQ preset table into the PCIe hardware.
+ */
+static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
+ u8 div)
+{
+ struct pci_dev *pdev = dd->pcidev;
+ u32 hit_error = 0;
+ u32 violation;
+ u32 i;
+ u8 c_minus1, c0, c_plus1;
+ int ret;
+
+ for (i = 0; i < 11; i++) {
+ /* set index */
+ pci_write_config_dword(pdev, PCIE_CFG_REG_PL103, i);
+ /* write the value */
+ c_minus1 = eq[i][PREC] / div;
+ c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div);
+ c_plus1 = eq[i][POST] / div;
+ pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
+ eq_value(c_minus1, c0, c_plus1));
+ /* check if these coefficients violate EQ rules */
+ ret = pci_read_config_dword(dd->pcidev,
+ PCIE_CFG_REG_PL105, &violation);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ hit_error = 1;
+ break;
+ }
+
+ if (violation
+ & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
+ if (hit_error == 0) {
+ dd_dev_err(dd,
+ "Gen3 EQ Table Coefficient rule violations\n");
+ dd_dev_err(dd, " prec attn post\n");
+ }
+ dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
+ i, (u32)eq[i][0], (u32)eq[i][1],
+ (u32)eq[i][2]);
+ dd_dev_err(dd, " %02x %02x %02x\n",
+ (u32)c_minus1, (u32)c0, (u32)c_plus1);
+ hit_error = 1;
+ }
+ }
+ if (hit_error)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Steps to be done after the PCIe firmware is downloaded and
+ * before the SBR for the Pcie Gen3.
+ * The SBus resource is already being held.
+ */
+static void pcie_post_steps(struct hfi1_devdata *dd)
+{
+ int i;
+
+ set_sbus_fast_mode(dd);
+ /*
+ * Write to the PCIe PCSes to set the G3_LOCKED_NEXT bits to 1.
+ * This avoids a spurious framing error that can otherwise be
+ * generated by the MAC layer.
+ *
+ * Use individual addresses since no broadcast is set up.
+ */
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ sbus_request(dd, pcie_pcs_addrs[dd->hfi1_id][i],
+ 0x03, WRITE_SBUS_RECEIVER, 0x00022132);
+ }
+
+ clear_sbus_fast_mode(dd);
+}
+
+/*
+ * Trigger a secondary bus reset (SBR) on ourselves using our parent.
+ *
+ * Based on pci_parent_bus_reset() which is not exported by the
+ * kernel core.
+ */
+static int trigger_sbr(struct hfi1_devdata *dd)
+{
+ struct pci_dev *dev = dd->pcidev;
+ struct pci_dev *pdev;
+
+ /* need a parent */
+ if (!dev->bus->self) {
+ dd_dev_err(dd, "%s: no parent device\n", __func__);
+ return -ENOTTY;
+ }
+
+ /* should not be anyone else on the bus */
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev) {
+ dd_dev_err(dd,
+ "%s: another device is on the same bus\n",
+ __func__);
+ return -ENOTTY;
+ }
+
+ /*
+ * This is an end around to do an SBR during probe time. A new API needs
+ * to be implemented to have cleaner interface but this fixes the
+ * current brokenness
+ */
+ return pci_bridge_secondary_bus_reset(dev->bus->self);
+}
+
+/*
+ * Write the given gasket interrupt register.
+ */
+static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
+ u16 code, u16 data)
+{
+ write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
+ (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) |
+ ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
+}
+
+/*
+ * Tell the gasket logic how to react to the reset.
+ */
+static void arm_gasket_logic(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ reg = (((u64)1 << dd->hfi1_id) <<
+ ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) |
+ ((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
+ ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT |
+ ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK |
+ ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) <<
+ ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT);
+ write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
+ /* read back to push the write */
+ read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
+}
+
+/*
+ * CCE_PCIE_CTRL long name helpers
+ * We redefine these shorter macros to use in the code while leaving
+ * chip_registers.h to be autogenerated from the hardware spec.
+ */
+#define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK
+#define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT
+#define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK
+#define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT
+#define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT
+#define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT
+#define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK
+#define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT
+#define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK
+#define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT
+
+ /*
+ * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C).
+ */
+static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname)
+{
+ u64 pcie_ctrl;
+ u64 xmt_margin;
+ u64 xmt_margin_oe;
+ u64 lane_delay;
+ u64 lane_bundle;
+
+ pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL);
+
+ /*
+ * For Discrete, use full-swing.
+ * - PCIe TX defaults to full-swing.
+ * Leave this register as default.
+ * For Integrated, use half-swing
+ * - Copy xmt_margin and xmt_margin_oe
+ * from Gen1/Gen2 to Gen3.
+ */
+ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */
+ /* extract initial fields */
+ xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT)
+ & MARGIN_GEN1_GEN2_MASK;
+ xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT)
+ & MARGIN_G1_G2_OVERWRITE_MASK;
+ lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK;
+ lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT)
+ & LANE_BUNDLE_MASK;
+
+ /*
+ * For A0, EFUSE values are not set. Override with the
+ * correct values.
+ */
+ if (is_ax(dd)) {
+ /*
+ * xmt_margin and OverwiteEnabel should be the
+ * same for Gen1/Gen2 and Gen3
+ */
+ xmt_margin = 0x5;
+ xmt_margin_oe = 0x1;
+ lane_delay = 0xF; /* Delay 240ns. */
+ lane_bundle = 0x0; /* Set to 1 lane. */
+ }
+
+ /* overwrite existing values */
+ pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT)
+ | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT)
+ | (xmt_margin << MARGIN_SHIFT)
+ | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT)
+ | (lane_delay << LANE_DELAY_SHIFT)
+ | (lane_bundle << LANE_BUNDLE_SHIFT);
+
+ write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl);
+ }
+
+ dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n",
+ fname, pcie_ctrl);
+}
+
+/*
+ * Do all the steps needed to transition the PCIe link to Gen3 speed.
+ */
+int do_pcie_gen3_transition(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent = dd->pcidev->bus->self;
+ u64 fw_ctrl;
+ u64 reg, therm;
+ u32 reg32, fs, lf;
+ u32 status, err;
+ int ret;
+ int do_retry, retry_count = 0;
+ int intnum = 0;
+ uint default_pset;
+ uint pset = pcie_pset;
+ u16 target_vector, target_speed;
+ u16 lnkctl2, vendor;
+ u8 div;
+ const u8 (*eq)[3];
+ const u8 (*ctle_tunings)[4];
+ uint static_ctle_mode;
+ int return_error = 0;
+ u32 target_width;
+
+ /* PCIe Gen3 is for the ASIC only */
+ if (dd->icode != ICODE_RTL_SILICON)
+ return 0;
+
+ if (pcie_target == 1) { /* target Gen1 */
+ target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
+ target_speed = 2500;
+ } else if (pcie_target == 2) { /* target Gen2 */
+ target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
+ target_speed = 5000;
+ } else if (pcie_target == 3) { /* target Gen3 */
+ target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
+ target_speed = 8000;
+ } else {
+ /* off or invalid target - skip */
+ dd_dev_info(dd, "%s: Skipping PCIe transition\n", __func__);
+ return 0;
+ }
+
+ /* if already at target speed, done (unless forced) */
+ if (dd->lbus_speed == target_speed) {
+ dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
+ pcie_target,
+ pcie_force ? "re-doing anyway" : "skipping");
+ if (!pcie_force)
+ return 0;
+ }
+
+ /*
+ * The driver cannot do the transition if it has no access to the
+ * upstream component
+ */
+ if (!parent) {
+ dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n",
+ __func__);
+ return 0;
+ }
+
+ /* Previous Gen1/Gen2 bus width */
+ target_width = dd->lbus_width;
+
+ /*
+ * Do the Gen3 transition. Steps are those of the PCIe Gen3
+ * recipe.
+ */
+
+ /* step 1: pcie link working in gen1/gen2 */
+
+ /* step 2: if either side is not capable of Gen3, done */
+ if (pcie_target == 3 && !dd->link_gen3_capable) {
+ dd_dev_err(dd, "The PCIe link is not Gen3 capable\n");
+ ret = -ENOSYS;
+ goto done_no_mutex;
+ }
+
+ /* hold the SBus resource across the firmware download and SBR */
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd, "%s: unable to acquire SBus resource\n",
+ __func__);
+ return ret;
+ }
+
+ /* make sure thermal polling is not causing interrupts */
+ therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN);
+ if (therm) {
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
+ msleep(100);
+ dd_dev_info(dd, "%s: Disabled therm polling\n",
+ __func__);
+ }
+
+retry:
+ /* the SBus download will reset the spico for thermal */
+
+ /* step 3: download SBus Master firmware */
+ /* step 4: download PCIe Gen3 SerDes firmware */
+ dd_dev_info(dd, "%s: downloading firmware\n", __func__);
+ ret = load_pcie_firmware(dd);
+ if (ret) {
+ /* do not proceed if the firmware cannot be downloaded */
+ return_error = 1;
+ goto done;
+ }
+
+ /* step 5: set up device parameter settings */
+ dd_dev_info(dd, "%s: setting PCIe registers\n", __func__);
+
+ /*
+ * PcieCfgSpcie1 - Link Control 3
+ * Leave at reset value. No need to set PerfEq - link equalization
+ * will be performed automatically after the SBR when the target
+ * speed is 8GT/s.
+ */
+
+ /* clear all 16 per-lane error bits (PCIe: Lane Error Status) */
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, 0xffff);
+
+ /* step 5a: Set Synopsys Port Logic registers */
+
+ /*
+ * PcieCfgRegPl2 - Port Force Link
+ *
+ * Set the low power field to 0x10 to avoid unnecessary power
+ * management messages. All other fields are zero.
+ */
+ reg32 = 0x10ul << PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT;
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL2, reg32);
+
+ /*
+ * PcieCfgRegPl100 - Gen3 Control
+ *
+ * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl
+ * turn on PcieCfgRegPl100.EqEieosCnt
+ * Everything else zero.
+ */
+ reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK;
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL100, reg32);
+
+ /*
+ * PcieCfgRegPl101 - Gen3 EQ FS and LF
+ * PcieCfgRegPl102 - Gen3 EQ Presets to Coefficients Mapping
+ * PcieCfgRegPl103 - Gen3 EQ Preset Index
+ * PcieCfgRegPl105 - Gen3 EQ Status
+ *
+ * Give initial EQ settings.
+ */
+ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0) { /* discrete */
+ /* 1000mV, FS=24, LF = 8 */
+ fs = 24;
+ lf = 8;
+ div = 3;
+ eq = discrete_preliminary_eq;
+ default_pset = DEFAULT_DISCRETE_PSET;
+ ctle_tunings = discrete_ctle_tunings;
+ /* bit 0 - discrete on/off */
+ static_ctle_mode = pcie_ctle & 0x1;
+ } else {
+ /* 400mV, FS=29, LF = 9 */
+ fs = 29;
+ lf = 9;
+ div = 1;
+ eq = integrated_preliminary_eq;
+ default_pset = DEFAULT_MCP_PSET;
+ ctle_tunings = integrated_ctle_tunings;
+ /* bit 1 - integrated on/off */
+ static_ctle_mode = (pcie_ctle >> 1) & 0x1;
+ }
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
+ (fs <<
+ PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) |
+ (lf <<
+ PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
+ ret = load_eq_table(dd, eq, fs, div);
+ if (ret)
+ goto done;
+
+ /*
+ * PcieCfgRegPl106 - Gen3 EQ Control
+ *
+ * Set Gen3EqPsetReqVec, leave other fields 0.
+ */
+ if (pset == UNSET_PSET)
+ pset = default_pset;
+ if (pset > 10) { /* valid range is 0-10, inclusive */
+ dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
+ __func__, pset, default_pset);
+ pset = default_pset;
+ }
+ dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pset);
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
+ ((1 << pset) <<
+ PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) |
+ PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK |
+ PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
+
+ /*
+ * step 5b: Do post firmware download steps via SBus
+ */
+ dd_dev_info(dd, "%s: doing pcie post steps\n", __func__);
+ pcie_post_steps(dd);
+
+ /*
+ * step 5c: Program gasket interrupts
+ */
+ /* set the Rx Bit Rate to REFCLK ratio */
+ write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050);
+ /* disable pCal for PCIe Gen3 RX equalization */
+ /* select adaptive or static CTLE */
+ write_gasket_interrupt(dd, intnum++, 0x0026,
+ 0x5b01 | (static_ctle_mode << 3));
+ /*
+ * Enable iCal for PCIe Gen3 RX equalization, and set which
+ * evaluation of RX_EQ_EVAL will launch the iCal procedure.
+ */
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202);
+
+ if (static_ctle_mode) {
+ /* apply static CTLE tunings */
+ u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw;
+
+ pcie_dc = ctle_tunings[pset][0];
+ pcie_lf = ctle_tunings[pset][1];
+ pcie_hf = ctle_tunings[pset][2];
+ pcie_bw = ctle_tunings[pset][3];
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw);
+ }
+
+ /* terminate list */
+ write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000);
+
+ /*
+ * step 5d: program XMT margin
+ */
+ write_xmt_margin(dd, __func__);
+
+ /*
+ * step 5e: disable active state power management (ASPM). It
+ * will be enabled if required later
+ */
+ dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
+ aspm_hw_disable_l1(dd);
+
+ /*
+ * step 5f: clear DirectSpeedChange
+ * PcieCfgRegPl67.DirectSpeedChange must be zero to prevent the
+ * change in the speed target from starting before we are ready.
+ * This field defaults to 0 and we are not changing it, so nothing
+ * needs to be done.
+ */
+
+ /* step 5g: Set target link speed */
+ /*
+ * Set target link speed to be target on both device and parent.
+ * On setting the parent: Some system BIOSs "helpfully" set the
+ * parent target speed to Gen2 to match the ASIC's initial speed.
+ * We can set the target Gen3 because we have already checked
+ * that it is Gen3 capable earlier.
+ */
+ dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
+ ret = pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return_error = 1;
+ goto done;
+ }
+
+ dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
+ (u32)lnkctl2);
+ /* only write to parent if target is not as high as ours */
+ if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
+ ret = pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS,
+ target_vector);
+ if (ret) {
+ dd_dev_err(dd, "Unable to change parent PCI target speed\n");
+ return_error = 1;
+ goto done;
+ }
+ } else {
+ dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
+ }
+
+ dd_dev_info(dd, "%s: setting target link speed\n", __func__);
+ ret = pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS,
+ target_vector);
+ if (ret) {
+ dd_dev_err(dd, "Unable to change device PCI target speed\n");
+ return_error = 1;
+ goto done;
+ }
+
+ /* step 5h: arm gasket logic */
+ /* hold DC in reset across the SBR */
+ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
+ (void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
+ /* save firmware control across the SBR */
+ fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
+
+ dd_dev_info(dd, "%s: arming gasket logic\n", __func__);
+ arm_gasket_logic(dd);
+
+ /*
+ * step 6: quiesce PCIe link
+ * The chip has already been reset, so there will be no traffic
+ * from the chip. Linux has no easy way to enforce that it will
+ * not try to access the device, so we just need to hope it doesn't
+ * do it while we are doing the reset.
+ */
+
+ /*
+ * step 7: initiate the secondary bus reset (SBR)
+ * step 8: hardware brings the links back up
+ * step 9: wait for link speed transition to be complete
+ */
+ dd_dev_info(dd, "%s: calling trigger_sbr\n", __func__);
+ ret = trigger_sbr(dd);
+ if (ret)
+ goto done;
+
+ /* step 10: decide what to do next */
+
+ /* check if we can read PCI space */
+ ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
+ if (ret) {
+ dd_dev_info(dd,
+ "%s: read of VendorID failed after SBR, err %d\n",
+ __func__, ret);
+ return_error = 1;
+ goto done;
+ }
+ if (vendor == 0xffff) {
+ dd_dev_info(dd, "%s: VendorID is all 1s after SBR\n", __func__);
+ return_error = 1;
+ ret = -EIO;
+ goto done;
+ }
+
+ /* restore PCI space registers we know were reset */
+ dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__);
+ ret = restore_pci_variables(dd);
+ if (ret) {
+ dd_dev_err(dd, "%s: Could not restore PCI variables\n",
+ __func__);
+ return_error = 1;
+ goto done;
+ }
+
+ /* restore firmware control */
+ write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl);
+
+ /*
+ * Check the gasket block status.
+ *
+ * This is the first CSR read after the SBR. If the read returns
+ * all 1s (fails), the link did not make it back.
+ *
+ * Once we're sure we can read and write, clear the DC reset after
+ * the SBR. Then check for any per-lane errors. Then look over
+ * the status.
+ */
+ reg = read_csr(dd, ASIC_PCIE_SD_HOST_STATUS);
+ dd_dev_info(dd, "%s: gasket block status: 0x%llx\n", __func__, reg);
+ if (reg == ~0ull) { /* PCIe read failed/timeout */
+ dd_dev_err(dd, "SBR failed - unable to read from device\n");
+ return_error = 1;
+ ret = -ENOSYS;
+ goto done;
+ }
+
+ /* clear the DC reset */
+ write_csr(dd, CCE_DC_CTRL, 0);
+
+ /* Set the LED off */
+ setextled(dd, 0);
+
+ /* check for any per-lane errors */
+ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return_error = 1;
+ goto done;
+ }
+
+ dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32);
+
+ /* extract status, look for our HFI */
+ status = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT)
+ & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK;
+ if ((status & (1 << dd->hfi1_id)) == 0) {
+ dd_dev_err(dd,
+ "%s: gasket status 0x%x, expecting 0x%x\n",
+ __func__, status, 1 << dd->hfi1_id);
+ ret = -EIO;
+ goto done;
+ }
+
+ /* extract error */
+ err = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT)
+ & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK;
+ if (err) {
+ dd_dev_err(dd, "%s: gasket error %d\n", __func__, err);
+ ret = -EIO;
+ goto done;
+ }
+
+ /* update our link information cache */
+ update_lbus_info(dd);
+ dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
+ dd->lbus_info);
+
+ if (dd->lbus_speed != target_speed ||
+ dd->lbus_width < target_width) { /* not target */
+ /* maybe retry */
+ do_retry = retry_count < pcie_retry;
+ dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
+ do_retry ? ", retrying" : "");
+ retry_count++;
+ if (do_retry) {
+ msleep(100); /* allow time to settle */
+ goto retry;
+ }
+ ret = -EIO;
+ }
+
+done:
+ if (therm) {
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
+ msleep(100);
+ dd_dev_info(dd, "%s: Re-enable therm polling\n",
+ __func__);
+ }
+ release_chip_resource(dd, CR_SBUS);
+done_no_mutex:
+ /* return no error if it is OK to be at current speed */
+ if (ret && !return_error) {
+ dd_dev_err(dd, "Proceeding at current speed PCIe speed\n");
+ ret = 0;
+ }
+
+ dd_dev_info(dd, "%s: done\n", __func__);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/pin_system.c b/drivers/infiniband/hw/hfi1/pin_system.c
new file mode 100644
index 000000000000..cce56134519b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pin_system.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
+/*
+ * Copyright(c) 2023 - Cornelis Networks, Inc.
+ */
+
+#include <linux/types.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "device.h"
+#include "pinning.h"
+#include "mmu_rb.h"
+#include "user_sdma.h"
+#include "trace.h"
+
+struct sdma_mmu_node {
+ struct mmu_rb_node rb;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct page **pages;
+ unsigned int npages;
+};
+
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, void *arg2,
+ bool *stop);
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
+
+static const struct mmu_rb_ops sdma_rb_ops = {
+ .filter = sdma_rb_filter,
+ .evict = sdma_rb_evict,
+ .remove = sdma_rb_remove,
+};
+
+int hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
+{
+ struct hfi1_devdata *dd = pq->dd;
+ int ret;
+
+ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
+ &pq->handler);
+ if (ret)
+ dd_dev_err(dd,
+ "[%u:%u] Failed to register system memory DMA support with MMU: %d\n",
+ pq->ctxt, pq->subctxt, ret);
+ return ret;
+}
+
+void hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
+{
+ if (pq->handler)
+ hfi1_mmu_rb_unregister(pq->handler);
+}
+
+static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
+{
+ struct evict_data evict_data;
+
+ evict_data.cleared = 0;
+ evict_data.target = npages;
+ hfi1_mmu_rb_evict(pq->handler, &evict_data);
+ return evict_data.cleared;
+}
+
+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+ unsigned int start, unsigned int npages)
+{
+ hfi1_release_user_pages(mm, pages + start, npages, false);
+ kfree(pages);
+}
+
+static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
+{
+ return node->rb.handler->mn.mm;
+}
+
+static void free_system_node(struct sdma_mmu_node *node)
+{
+ if (node->npages) {
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+ node->npages);
+ atomic_sub(node->npages, &node->pq->n_locked);
+ }
+ kfree(node);
+}
+
+/*
+ * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
+ * from being released until after rb_node is assigned to an SDMA descriptor
+ * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
+ * virtual address range for rb_node is invalidated between now and then.
+ */
+static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mmu_rb_node *rb_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
+ if (!rb_node) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ return NULL;
+ }
+
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
+ kref_get(&rb_node->refcount);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ return container_of(rb_node, struct sdma_mmu_node, rb);
+}
+
+static int pin_system_pages(struct user_sdma_request *req,
+ uintptr_t start_address, size_t length,
+ struct sdma_mmu_node *node, int npages)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ int pinned, cleared;
+ struct page **pages;
+
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+retry:
+ if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
+ npages)) {
+ SDMA_DBG(req, "Evicting: nlocked %u npages %u",
+ atomic_read(&pq->n_locked), npages);
+ cleared = sdma_cache_evict(pq, npages);
+ if (cleared >= npages)
+ goto retry;
+ }
+
+ SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
+ start_address, node->npages, npages);
+ pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
+ pages);
+
+ if (pinned < 0) {
+ kfree(pages);
+ SDMA_DBG(req, "pinned %d", pinned);
+ return pinned;
+ }
+ if (pinned != npages) {
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
+ SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
+ return -EFAULT;
+ }
+ node->rb.addr = start_address;
+ node->rb.len = length;
+ node->pages = pages;
+ node->npages = npages;
+ atomic_add(pinned, &pq->n_locked);
+ SDMA_DBG(req, "done. pinned %d", pinned);
+ return 0;
+}
+
+/*
+ * kref refcount on *node_p will be 2 on successful addition: one kref from
+ * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
+ * released until after *node_p is assigned to an SDMA descriptor (struct
+ * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
+ * address range for *node_p is invalidated between now and then.
+ */
+static int add_system_pinning(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ unsigned long start, unsigned long len)
+
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct sdma_mmu_node *node;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* First kref "moves" to mmu_rb_handler */
+ kref_init(&node->rb.refcount);
+
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
+ kref_get(&node->rb.refcount);
+
+ node->pq = pq;
+ ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
+ if (ret == 0) {
+ ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
+ if (ret)
+ free_system_node(node);
+ else
+ *node_p = node;
+
+ return ret;
+ }
+
+ kfree(node);
+ return ret;
+}
+
+static int get_system_cache_entry(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ size_t req_start, size_t req_len)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
+ u64 end = PFN_ALIGN(req_start + req_len);
+ int ret;
+
+ if ((end - start) == 0) {
+ SDMA_DBG(req,
+ "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
+ req_start, req_len, start, end);
+ return -EINVAL;
+ }
+
+ SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
+
+ while (1) {
+ struct sdma_mmu_node *node =
+ find_system_node(pq->handler, start, end);
+ u64 prepend_len = 0;
+
+ SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
+ if (!node) {
+ ret = add_system_pinning(req, node_p, start,
+ end - start);
+ if (ret == -EEXIST) {
+ /*
+ * Another execution context has inserted a
+ * conficting entry first.
+ */
+ continue;
+ }
+ return ret;
+ }
+
+ if (node->rb.addr <= start) {
+ /*
+ * This entry covers at least part of the region. If it doesn't extend
+ * to the end, then this will be called again for the next segment.
+ */
+ *node_p = node;
+ return 0;
+ }
+
+ SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
+ node->rb.addr, kref_read(&node->rb.refcount));
+ prepend_len = node->rb.addr - start;
+
+ /*
+ * This node will not be returned, instead a new node
+ * will be. So release the reference.
+ */
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+
+ /* Prepend a node to cover the beginning of the allocation */
+ ret = add_system_pinning(req, node_p, start, prepend_len);
+ if (ret == -EEXIST) {
+ /* Another execution context has inserted a conficting entry first. */
+ continue;
+ }
+ return ret;
+ }
+}
+
+static void sdma_mmu_rb_node_get(void *ctx)
+{
+ struct mmu_rb_node *node = ctx;
+
+ kref_get(&node->refcount);
+}
+
+static void sdma_mmu_rb_node_put(void *ctx)
+{
+ struct sdma_mmu_node *node = ctx;
+
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+}
+
+static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct sdma_mmu_node *cache_entry,
+ size_t start,
+ size_t from_this_cache_entry)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ unsigned int page_offset;
+ unsigned int from_this_page;
+ size_t page_index;
+ void *ctx;
+ int ret;
+
+ /*
+ * Because the cache may be more fragmented than the memory that is being accessed,
+ * it's not strictly necessary to have a descriptor per cache entry.
+ */
+
+ while (from_this_cache_entry) {
+ page_index = PFN_DOWN(start - cache_entry->rb.addr);
+
+ if (page_index >= cache_entry->npages) {
+ SDMA_DBG(req,
+ "Request for page_index %zu >= cache_entry->npages %u",
+ page_index, cache_entry->npages);
+ return -EINVAL;
+ }
+
+ page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
+ from_this_page = PAGE_SIZE - page_offset;
+
+ if (from_this_page < from_this_cache_entry) {
+ ctx = NULL;
+ } else {
+ /*
+ * In the case they are equal the next line has no practical effect,
+ * but it's better to do a register to register copy than a conditional
+ * branch.
+ */
+ from_this_page = from_this_cache_entry;
+ ctx = cache_entry;
+ }
+
+ ret = sdma_txadd_page(pq->dd, &tx->txreq,
+ cache_entry->pages[page_index],
+ page_offset, from_this_page,
+ ctx,
+ sdma_mmu_rb_node_get,
+ sdma_mmu_rb_node_put);
+ if (ret) {
+ /*
+ * When there's a failure, the entire request is freed by
+ * user_sdma_send_pkts().
+ */
+ SDMA_DBG(req,
+ "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
+ ret, page_index, page_offset, from_this_page);
+ return ret;
+ }
+ start += from_this_page;
+ from_this_cache_entry -= from_this_page;
+ }
+ return 0;
+}
+
+static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ size_t from_this_iovec)
+{
+ while (from_this_iovec > 0) {
+ struct sdma_mmu_node *cache_entry;
+ size_t from_this_cache_entry;
+ size_t start;
+ int ret;
+
+ start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
+ ret = get_system_cache_entry(req, &cache_entry, start,
+ from_this_iovec);
+ if (ret) {
+ SDMA_DBG(req, "pin system segment failed %d", ret);
+ return ret;
+ }
+
+ from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
+ if (from_this_cache_entry > from_this_iovec)
+ from_this_cache_entry = from_this_iovec;
+
+ ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
+ from_this_cache_entry);
+
+ /*
+ * Done adding cache_entry to zero or more sdma_desc. Can
+ * kref_put() the "safety" kref taken under
+ * get_system_cache_entry().
+ */
+ kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
+
+ if (ret) {
+ SDMA_DBG(req, "add system segment failed %d", ret);
+ return ret;
+ }
+
+ iovec->offset += from_this_cache_entry;
+ from_this_iovec -= from_this_cache_entry;
+ }
+
+ return 0;
+}
+
+/*
+ * Add up to pkt_data_remaining bytes to the txreq, starting at the current
+ * offset in the given iovec entry and continuing until all data has been added
+ * to the iovec or the iovec entry type changes.
+ *
+ * On success, prior to returning, adjust pkt_data_remaining, req->iov_idx, and
+ * the offset value in req->iov[req->iov_idx] to reflect the data that has been
+ * consumed.
+ */
+int hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_data_remaining)
+{
+ size_t remaining_to_add = *pkt_data_remaining;
+ /*
+ * Walk through iovec entries, ensure the associated pages
+ * are pinned and mapped, add data to the packet until no more
+ * data remains to be added or the iovec entry type changes.
+ */
+ while (remaining_to_add > 0) {
+ struct user_sdma_iovec *cur_iovec;
+ size_t from_this_iovec;
+ int ret;
+
+ cur_iovec = iovec;
+ from_this_iovec = iovec->iov.iov_len - iovec->offset;
+
+ if (from_this_iovec > remaining_to_add) {
+ from_this_iovec = remaining_to_add;
+ } else {
+ /* The current iovec entry will be consumed by this pass. */
+ req->iov_idx++;
+ iovec++;
+ }
+
+ ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
+ from_this_iovec);
+ if (ret)
+ return ret;
+
+ remaining_to_add -= from_this_iovec;
+ }
+ *pkt_data_remaining = remaining_to_add;
+
+ return 0;
+}
+
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len)
+{
+ return (bool)(node->addr == addr);
+}
+
+/*
+ * Return 1 to remove the node from the rb tree and call the remove op.
+ *
+ * Called with the rb tree lock held.
+ */
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+ struct evict_data *evict_data = evict_arg;
+
+ /* this node will be evicted, add its pages to our count */
+ evict_data->cleared += node->npages;
+
+ /* have enough pages been cleared? */
+ if (evict_data->cleared >= evict_data->target)
+ *stop = true;
+
+ return 1; /* remove this node */
+}
+
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+
+ free_system_node(node);
+}
diff --git a/drivers/infiniband/hw/hfi1/pinning.h b/drivers/infiniband/hw/hfi1/pinning.h
new file mode 100644
index 000000000000..a814a3aa9654
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pinning.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright(c) 2023 Cornelis Networks, Inc.
+ */
+#ifndef _HFI1_PINNING_H
+#define _HFI1_PINNING_H
+
+struct hfi1_user_sdma_pkt_q;
+struct user_sdma_request;
+struct user_sdma_txreq;
+struct user_sdma_iovec;
+
+int hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq);
+void hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq);
+int hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_data_remaining);
+
+#endif /* _HFI1_PINNING_H */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
new file mode 100644
index 000000000000..764286da2ce8
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -0,0 +1,2135 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015-2018 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+#include "hfi.h"
+#include "qp.h"
+#include "trace.h"
+
+#define SC(name) SEND_CTXT_##name
+/*
+ * Send Context functions
+ */
+static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
+
+/*
+ * Set the CM reset bit and wait for it to clear. Use the provided
+ * sendctrl register. This routine has no locking.
+ */
+void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
+{
+ write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
+ while (1) {
+ udelay(1);
+ sendctrl = read_csr(dd, SEND_CTRL);
+ if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
+ break;
+ }
+}
+
+/* global control of PIO send */
+void pio_send_control(struct hfi1_devdata *dd, int op)
+{
+ u64 reg, mask;
+ unsigned long flags;
+ int write = 1; /* write sendctrl back */
+ int flush = 0; /* re-read sendctrl to make sure it is flushed */
+ int i;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ reg = read_csr(dd, SEND_CTRL);
+ switch (op) {
+ case PSC_GLOBAL_ENABLE:
+ reg |= SEND_CTRL_SEND_ENABLE_SMASK;
+ fallthrough;
+ case PSC_DATA_VL_ENABLE:
+ mask = 0;
+ for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+ if (!dd->vld[i].mtu)
+ mask |= BIT_ULL(i);
+ /* Disallow sending on VLs not enabled */
+ mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+ SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+ reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
+ break;
+ case PSC_GLOBAL_DISABLE:
+ reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
+ break;
+ case PSC_GLOBAL_VLARB_ENABLE:
+ reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
+ break;
+ case PSC_GLOBAL_VLARB_DISABLE:
+ reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
+ break;
+ case PSC_CM_RESET:
+ __cm_reset(dd, reg);
+ write = 0; /* CSR already written (and flushed) */
+ break;
+ case PSC_DATA_VL_DISABLE:
+ reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
+ flush = 1;
+ break;
+ default:
+ dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
+ break;
+ }
+
+ if (write) {
+ write_csr(dd, SEND_CTRL, reg);
+ if (flush)
+ (void)read_csr(dd, SEND_CTRL); /* flush write */
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/* number of send context memory pools */
+#define NUM_SC_POOLS 2
+
+/* Send Context Size (SCS) wildcards */
+#define SCS_POOL_0 -1
+#define SCS_POOL_1 -2
+
+/* Send Context Count (SCC) wildcards */
+#define SCC_PER_VL -1
+#define SCC_PER_CPU -2
+#define SCC_PER_KRCVQ -3
+
+/* Send Context Size (SCS) constants */
+#define SCS_ACK_CREDITS 32
+#define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */
+
+#define PIO_THRESHOLD_CEILING 4096
+
+#define PIO_WAIT_BATCH_SIZE 5
+
+/* default send context sizes */
+static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
+ [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */
+ .count = SCC_PER_VL }, /* one per NUMA */
+ [SC_ACK] = { .size = SCS_ACK_CREDITS,
+ .count = SCC_PER_KRCVQ },
+ [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */
+ .count = SCC_PER_CPU }, /* one per CPU */
+ [SC_VL15] = { .size = SCS_VL15_CREDITS,
+ .count = 1 },
+
+};
+
+/* send context memory pool configuration */
+struct mem_pool_config {
+ int centipercent; /* % of memory, in 100ths of 1% */
+ int absolute_blocks; /* absolute block count */
+};
+
+/* default memory pool configuration: 100% in pool 0 */
+static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
+ /* centi%, abs blocks */
+ { 10000, -1 }, /* pool 0 */
+ { 0, -1 }, /* pool 1 */
+};
+
+/* memory pool information, used when calculating final sizes */
+struct mem_pool_info {
+ int centipercent; /*
+ * 100th of 1% of memory to use, -1 if blocks
+ * already set
+ */
+ int count; /* count of contexts in the pool */
+ int blocks; /* block size of the pool */
+ int size; /* context size, in blocks */
+};
+
+/*
+ * Convert a pool wildcard to a valid pool index. The wildcards
+ * start at -1 and increase negatively. Map them as:
+ * -1 => 0
+ * -2 => 1
+ * etc.
+ *
+ * Return -1 on non-wildcard input, otherwise convert to a pool number.
+ */
+static int wildcard_to_pool(int wc)
+{
+ if (wc >= 0)
+ return -1; /* non-wildcard */
+ return -wc - 1;
+}
+
+static const char *sc_type_names[SC_MAX] = {
+ "kernel",
+ "ack",
+ "user",
+ "vl15"
+};
+
+static const char *sc_type_name(int index)
+{
+ if (index < 0 || index >= SC_MAX)
+ return "unknown";
+ return sc_type_names[index];
+}
+
+/*
+ * Read the send context memory pool configuration and send context
+ * size configuration. Replace any wildcards and come up with final
+ * counts and sizes for the send context types.
+ */
+int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
+{
+ struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
+ int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1;
+ int total_contexts = 0;
+ int fixed_blocks;
+ int pool_blocks;
+ int used_blocks;
+ int cp_total; /* centipercent total */
+ int ab_total; /* absolute block total */
+ int extra;
+ int i;
+
+ /*
+ * When SDMA is enabled, kernel context pio packet size is capped by
+ * "piothreshold". Reduce pio buffer allocation for kernel context by
+ * setting it to a fixed size. The allocation allows 3-deep buffering
+ * of the largest pio packets plus up to 128 bytes header, sufficient
+ * to maintain verbs performance.
+ *
+ * When SDMA is disabled, keep the default pooling allocation.
+ */
+ if (HFI1_CAP_IS_KSET(SDMA)) {
+ u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
+ piothreshold : PIO_THRESHOLD_CEILING;
+ sc_config_sizes[SC_KERNEL].size =
+ 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
+ }
+
+ /*
+ * Step 0:
+ * - copy the centipercents/absolute sizes from the pool config
+ * - sanity check these values
+ * - add up centipercents, then later check for full value
+ * - add up absolute blocks, then later check for over-commit
+ */
+ cp_total = 0;
+ ab_total = 0;
+ for (i = 0; i < NUM_SC_POOLS; i++) {
+ int cp = sc_mem_pool_config[i].centipercent;
+ int ab = sc_mem_pool_config[i].absolute_blocks;
+
+ /*
+ * A negative value is "unused" or "invalid". Both *can*
+ * be valid, but centipercent wins, so check that first
+ */
+ if (cp >= 0) { /* centipercent valid */
+ cp_total += cp;
+ } else if (ab >= 0) { /* absolute blocks valid */
+ ab_total += ab;
+ } else { /* neither valid */
+ dd_dev_err(
+ dd,
+ "Send context memory pool %d: both the block count and centipercent are invalid\n",
+ i);
+ return -EINVAL;
+ }
+
+ mem_pool_info[i].centipercent = cp;
+ mem_pool_info[i].blocks = ab;
+ }
+
+ /* do not use both % and absolute blocks for different pools */
+ if (cp_total != 0 && ab_total != 0) {
+ dd_dev_err(
+ dd,
+ "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
+ return -EINVAL;
+ }
+
+ /* if any percentages are present, they must add up to 100% x 100 */
+ if (cp_total != 0 && cp_total != 10000) {
+ dd_dev_err(
+ dd,
+ "Send context memory pool centipercent is %d, expecting 10000\n",
+ cp_total);
+ return -EINVAL;
+ }
+
+ /* the absolute pool total cannot be more than the mem total */
+ if (ab_total > total_blocks) {
+ dd_dev_err(
+ dd,
+ "Send context memory pool absolute block count %d is larger than the memory size %d\n",
+ ab_total, total_blocks);
+ return -EINVAL;
+ }
+
+ /*
+ * Step 2:
+ * - copy from the context size config
+ * - replace context type wildcard counts with real values
+ * - add up non-memory pool block sizes
+ * - add up memory pool user counts
+ */
+ fixed_blocks = 0;
+ for (i = 0; i < SC_MAX; i++) {
+ int count = sc_config_sizes[i].count;
+ int size = sc_config_sizes[i].size;
+ int pool;
+
+ /*
+ * Sanity check count: Either a positive value or
+ * one of the expected wildcards is valid. The positive
+ * value is checked later when we compare against total
+ * memory available.
+ */
+ if (i == SC_ACK) {
+ count = dd->n_krcv_queues;
+ } else if (i == SC_KERNEL) {
+ count = INIT_SC_PER_VL * num_vls;
+ } else if (count == SCC_PER_CPU) {
+ count = dd->num_rcv_contexts - dd->n_krcv_queues;
+ } else if (count < 0) {
+ dd_dev_err(
+ dd,
+ "%s send context invalid count wildcard %d\n",
+ sc_type_name(i), count);
+ return -EINVAL;
+ }
+ if (total_contexts + count > chip_send_contexts(dd))
+ count = chip_send_contexts(dd) - total_contexts;
+
+ total_contexts += count;
+
+ /*
+ * Sanity check pool: The conversion will return a pool
+ * number or -1 if a fixed (non-negative) value. The fixed
+ * value is checked later when we compare against
+ * total memory available.
+ */
+ pool = wildcard_to_pool(size);
+ if (pool == -1) { /* non-wildcard */
+ fixed_blocks += size * count;
+ } else if (pool < NUM_SC_POOLS) { /* valid wildcard */
+ mem_pool_info[pool].count += count;
+ } else { /* invalid wildcard */
+ dd_dev_err(
+ dd,
+ "%s send context invalid pool wildcard %d\n",
+ sc_type_name(i), size);
+ return -EINVAL;
+ }
+
+ dd->sc_sizes[i].count = count;
+ dd->sc_sizes[i].size = size;
+ }
+ if (fixed_blocks > total_blocks) {
+ dd_dev_err(
+ dd,
+ "Send context fixed block count, %u, larger than total block count %u\n",
+ fixed_blocks, total_blocks);
+ return -EINVAL;
+ }
+
+ /* step 3: calculate the blocks in the pools, and pool context sizes */
+ pool_blocks = total_blocks - fixed_blocks;
+ if (ab_total > pool_blocks) {
+ dd_dev_err(
+ dd,
+ "Send context fixed pool sizes, %u, larger than pool block count %u\n",
+ ab_total, pool_blocks);
+ return -EINVAL;
+ }
+ /* subtract off the fixed pool blocks */
+ pool_blocks -= ab_total;
+
+ for (i = 0; i < NUM_SC_POOLS; i++) {
+ struct mem_pool_info *pi = &mem_pool_info[i];
+
+ /* % beats absolute blocks */
+ if (pi->centipercent >= 0)
+ pi->blocks = (pool_blocks * pi->centipercent) / 10000;
+
+ if (pi->blocks == 0 && pi->count != 0) {
+ dd_dev_err(
+ dd,
+ "Send context memory pool %d has %u contexts, but no blocks\n",
+ i, pi->count);
+ return -EINVAL;
+ }
+ if (pi->count == 0) {
+ /* warn about wasted blocks */
+ if (pi->blocks != 0)
+ dd_dev_err(
+ dd,
+ "Send context memory pool %d has %u blocks, but zero contexts\n",
+ i, pi->blocks);
+ pi->size = 0;
+ } else {
+ pi->size = pi->blocks / pi->count;
+ }
+ }
+
+ /* step 4: fill in the context type sizes from the pool sizes */
+ used_blocks = 0;
+ for (i = 0; i < SC_MAX; i++) {
+ if (dd->sc_sizes[i].size < 0) {
+ unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
+
+ WARN_ON_ONCE(pool >= NUM_SC_POOLS);
+ dd->sc_sizes[i].size = mem_pool_info[pool].size;
+ }
+ /* make sure we are not larger than what is allowed by the HW */
+#define PIO_MAX_BLOCKS 1024
+ if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
+ dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
+
+ /* calculate our total usage */
+ used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
+ }
+ extra = total_blocks - used_blocks;
+ if (extra != 0)
+ dd_dev_info(dd, "unused send context blocks: %d\n", extra);
+
+ return total_contexts;
+}
+
+int init_send_contexts(struct hfi1_devdata *dd)
+{
+ u16 base;
+ int ret, i, j, context;
+
+ ret = init_credit_return(dd);
+ if (ret)
+ return ret;
+
+ dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
+ GFP_KERNEL);
+ dd->send_contexts = kcalloc(dd->num_send_contexts,
+ sizeof(struct send_context_info),
+ GFP_KERNEL);
+ if (!dd->send_contexts || !dd->hw_to_sw) {
+ kfree(dd->hw_to_sw);
+ kfree(dd->send_contexts);
+ free_credit_return(dd);
+ return -ENOMEM;
+ }
+
+ /* hardware context map starts with invalid send context indices */
+ for (i = 0; i < TXE_NUM_CONTEXTS; i++)
+ dd->hw_to_sw[i] = INVALID_SCI;
+
+ /*
+ * All send contexts have their credit sizes. Allocate credits
+ * for each context one after another from the global space.
+ */
+ context = 0;
+ base = 1;
+ for (i = 0; i < SC_MAX; i++) {
+ struct sc_config_sizes *scs = &dd->sc_sizes[i];
+
+ for (j = 0; j < scs->count; j++) {
+ struct send_context_info *sci =
+ &dd->send_contexts[context];
+ sci->type = i;
+ sci->base = base;
+ sci->credits = scs->size;
+
+ context++;
+ base += scs->size;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate a software index and hardware context of the given type.
+ *
+ * Must be called with dd->sc_lock held.
+ */
+static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
+ u32 *hw_context)
+{
+ struct send_context_info *sci;
+ u32 index;
+ u32 context;
+
+ for (index = 0, sci = &dd->send_contexts[0];
+ index < dd->num_send_contexts; index++, sci++) {
+ if (sci->type == type && sci->allocated == 0) {
+ sci->allocated = 1;
+ /* use a 1:1 mapping, but make them non-equal */
+ context = chip_send_contexts(dd) - index - 1;
+ dd->hw_to_sw[context] = index;
+ *sw_index = index;
+ *hw_context = context;
+ return 0; /* success */
+ }
+ }
+ dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
+ return -ENOSPC;
+}
+
+/*
+ * Free the send context given by its software index.
+ *
+ * Must be called with dd->sc_lock held.
+ */
+static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
+{
+ struct send_context_info *sci;
+
+ sci = &dd->send_contexts[sw_index];
+ if (!sci->allocated) {
+ dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
+ __func__, sw_index, hw_context);
+ }
+ sci->allocated = 0;
+ dd->hw_to_sw[hw_context] = INVALID_SCI;
+}
+
+/* return the base context of a context in a group */
+static inline u32 group_context(u32 context, u32 group)
+{
+ return (context >> group) << group;
+}
+
+/* return the size of a group */
+static inline u32 group_size(u32 group)
+{
+ return 1 << group;
+}
+
+/*
+ * Obtain the credit return addresses, kernel virtual and bus, for the
+ * given sc.
+ *
+ * To understand this routine:
+ * o va and dma are arrays of struct credit_return. One for each physical
+ * send context, per NUMA.
+ * o Each send context always looks in its relative location in a struct
+ * credit_return for its credit return.
+ * o Each send context in a group must have its return address CSR programmed
+ * with the same value. Use the address of the first send context in the
+ * group.
+ */
+static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
+{
+ u32 gc = group_context(sc->hw_context, sc->group);
+ u32 index = sc->hw_context & 0x7;
+
+ sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
+ *dma = (unsigned long)
+ &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
+}
+
+/*
+ * Work queue function triggered in error interrupt routine for
+ * kernel contexts.
+ */
+static void sc_halted(struct work_struct *work)
+{
+ struct send_context *sc;
+
+ sc = container_of(work, struct send_context, halt_work);
+ sc_restart(sc);
+}
+
+/*
+ * Calculate PIO block threshold for this send context using the given MTU.
+ * Trigger a return when one MTU plus optional header of credits remain.
+ *
+ * Parameter mtu is in bytes.
+ * Parameter hdrqentsize is in DWORDs.
+ *
+ * Return value is what to write into the CSR: trigger return when
+ * unreturned credits pass this count.
+ */
+u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
+{
+ u32 release_credits;
+ u32 threshold;
+
+ /* add in the header size, then divide by the PIO block size */
+ mtu += hdrqentsize << 2;
+ release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
+
+ /* check against this context's credits */
+ if (sc->credits <= release_credits)
+ threshold = 1;
+ else
+ threshold = sc->credits - release_credits;
+
+ return threshold;
+}
+
+/*
+ * Calculate credit threshold in terms of percent of the allocated credits.
+ * Trigger when unreturned credits equal or exceed the percentage of the whole.
+ *
+ * Return value is what to write into the CSR: trigger return when
+ * unreturned credits pass this count.
+ */
+u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
+{
+ return (sc->credits * percent) / 100;
+}
+
+/*
+ * Set the credit return threshold.
+ */
+void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
+{
+ unsigned long flags;
+ u32 old_threshold;
+ int force_return = 0;
+
+ spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
+
+ old_threshold = (sc->credit_ctrl >>
+ SC(CREDIT_CTRL_THRESHOLD_SHIFT))
+ & SC(CREDIT_CTRL_THRESHOLD_MASK);
+
+ if (new_threshold != old_threshold) {
+ sc->credit_ctrl =
+ (sc->credit_ctrl
+ & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
+ | ((new_threshold
+ & SC(CREDIT_CTRL_THRESHOLD_MASK))
+ << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
+ write_kctxt_csr(sc->dd, sc->hw_context,
+ SC(CREDIT_CTRL), sc->credit_ctrl);
+
+ /* force a credit return on change to avoid a possible stall */
+ force_return = 1;
+ }
+
+ spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
+
+ if (force_return)
+ sc_return_credits(sc);
+}
+
+/*
+ * set_pio_integrity
+ *
+ * Set the CHECK_ENABLE register for the send context 'sc'.
+ */
+void set_pio_integrity(struct send_context *sc)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ u32 hw_context = sc->hw_context;
+ int type = sc->type;
+
+ write_kctxt_csr(dd, hw_context,
+ SC(CHECK_ENABLE),
+ hfi1_pkt_default_send_ctxt_mask(dd, type));
+}
+
+static u32 get_buffers_allocated(struct send_context *sc)
+{
+ int cpu;
+ u32 ret = 0;
+
+ for_each_possible_cpu(cpu)
+ ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
+ return ret;
+}
+
+static void reset_buffers_allocated(struct send_context *sc)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
+}
+
+/*
+ * Allocate a NUMA relative send context structure of the given type along
+ * with a HW context.
+ */
+struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
+ uint hdrqentsize, int numa)
+{
+ struct send_context_info *sci;
+ struct send_context *sc = NULL;
+ dma_addr_t dma;
+ unsigned long flags;
+ u64 reg;
+ u32 thresh;
+ u32 sw_index;
+ u32 hw_context;
+ int ret;
+ u8 opval, opmask;
+
+ /* do not allocate while frozen */
+ if (dd->flags & HFI1_FROZEN)
+ return NULL;
+
+ sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
+ if (!sc)
+ return NULL;
+
+ sc->buffers_allocated = alloc_percpu(u32);
+ if (!sc->buffers_allocated) {
+ kfree(sc);
+ dd_dev_err(dd,
+ "Cannot allocate buffers_allocated per cpu counters\n"
+ );
+ return NULL;
+ }
+
+ spin_lock_irqsave(&dd->sc_lock, flags);
+ ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
+ if (ret) {
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+ free_percpu(sc->buffers_allocated);
+ kfree(sc);
+ return NULL;
+ }
+
+ sci = &dd->send_contexts[sw_index];
+ sci->sc = sc;
+
+ sc->dd = dd;
+ sc->node = numa;
+ sc->type = type;
+ spin_lock_init(&sc->alloc_lock);
+ spin_lock_init(&sc->release_lock);
+ spin_lock_init(&sc->credit_ctrl_lock);
+ seqlock_init(&sc->waitlock);
+ INIT_LIST_HEAD(&sc->piowait);
+ INIT_WORK(&sc->halt_work, sc_halted);
+ init_waitqueue_head(&sc->halt_wait);
+
+ /* grouping is always single context for now */
+ sc->group = 0;
+
+ sc->sw_index = sw_index;
+ sc->hw_context = hw_context;
+ cr_group_addresses(sc, &dma);
+ sc->credits = sci->credits;
+ sc->size = sc->credits * PIO_BLOCK_SIZE;
+
+/* PIO Send Memory Address details */
+#define PIO_ADDR_CONTEXT_MASK 0xfful
+#define PIO_ADDR_CONTEXT_SHIFT 16
+ sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
+ << PIO_ADDR_CONTEXT_SHIFT);
+
+ /* set base and credits */
+ reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
+ << SC(CTRL_CTXT_DEPTH_SHIFT))
+ | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
+ << SC(CTRL_CTXT_BASE_SHIFT));
+ write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
+
+ set_pio_integrity(sc);
+
+ /* unmask all errors */
+ write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
+
+ /* set the default partition key */
+ write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
+ (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
+ DEFAULT_PKEY) <<
+ SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
+
+ /* per context type checks */
+ if (type == SC_USER) {
+ opval = USER_OPCODE_CHECK_VAL;
+ opmask = USER_OPCODE_CHECK_MASK;
+ } else {
+ opval = OPCODE_CHECK_VAL_DISABLED;
+ opmask = OPCODE_CHECK_MASK_DISABLED;
+ }
+
+ /* set the send context check opcode mask and value */
+ write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
+ ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
+ ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
+
+ /* set up credit return */
+ reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
+
+ /*
+ * Calculate the initial credit return threshold.
+ *
+ * For Ack contexts, set a threshold for half the credits.
+ * For User contexts use the given percentage. This has been
+ * sanitized on driver start-up.
+ * For Kernel contexts, use the default MTU plus a header
+ * or half the credits, whichever is smaller. This should
+ * work for both the 3-deep buffering allocation and the
+ * pooling allocation.
+ */
+ if (type == SC_ACK) {
+ thresh = sc_percent_to_threshold(sc, 50);
+ } else if (type == SC_USER) {
+ thresh = sc_percent_to_threshold(sc,
+ user_credit_return_threshold);
+ } else { /* kernel */
+ thresh = min(sc_percent_to_threshold(sc, 50),
+ sc_mtu_to_threshold(sc, hfi1_max_mtu,
+ hdrqentsize));
+ }
+ reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
+ /* add in early return */
+ if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
+ reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
+ else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
+ reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
+
+ /* set up write-through credit_ctrl */
+ sc->credit_ctrl = reg;
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
+
+ /* User send contexts should not allow sending on VL15 */
+ if (type == SC_USER) {
+ reg = 1ULL << 15;
+ write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
+ }
+
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+
+ /*
+ * Allocate shadow ring to track outstanding PIO buffers _after_
+ * unlocking. We don't know the size until the lock is held and
+ * we can't allocate while the lock is held. No one is using
+ * the context yet, so allocate it now.
+ *
+ * User contexts do not get a shadow ring.
+ */
+ if (type != SC_USER) {
+ /*
+ * Size the shadow ring 1 larger than the number of credits
+ * so head == tail can mean empty.
+ */
+ sc->sr_size = sci->credits + 1;
+ sc->sr = kcalloc_node(sc->sr_size,
+ sizeof(union pio_shadow_ring),
+ GFP_KERNEL, numa);
+ if (!sc->sr) {
+ sc_free(sc);
+ return NULL;
+ }
+ }
+
+ hfi1_cdbg(PIO,
+ "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u",
+ sw_index,
+ hw_context,
+ sc_type_name(type),
+ sc->group,
+ sc->credits,
+ sc->credit_ctrl,
+ thresh);
+
+ return sc;
+}
+
+/* free a per-NUMA send context structure */
+void sc_free(struct send_context *sc)
+{
+ struct hfi1_devdata *dd;
+ unsigned long flags;
+ u32 sw_index;
+ u32 hw_context;
+
+ if (!sc)
+ return;
+
+ sc->flags |= SCF_IN_FREE; /* ensure no restarts */
+ dd = sc->dd;
+ if (!list_empty(&sc->piowait))
+ dd_dev_err(dd, "piowait list not empty!\n");
+ sw_index = sc->sw_index;
+ hw_context = sc->hw_context;
+ sc_disable(sc); /* make sure the HW is disabled */
+ flush_work(&sc->halt_work);
+
+ spin_lock_irqsave(&dd->sc_lock, flags);
+ dd->send_contexts[sw_index].sc = NULL;
+
+ /* clear/disable all registers set in sc_alloc */
+ write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
+ write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
+ write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
+ write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
+ write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
+
+ /* release the index and context for re-use */
+ sc_hw_free(dd, sw_index, hw_context);
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+
+ kfree(sc->sr);
+ free_percpu(sc->buffers_allocated);
+ kfree(sc);
+}
+
+/* disable the context */
+void sc_disable(struct send_context *sc)
+{
+ u64 reg;
+ struct pio_buf *pbuf;
+ LIST_HEAD(wake_list);
+
+ if (!sc)
+ return;
+
+ /* do all steps, even if already disabled */
+ spin_lock_irq(&sc->alloc_lock);
+ reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
+ reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
+ sc->flags &= ~SCF_ENABLED;
+ sc_wait_for_packet_egress(sc, 1);
+ write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
+
+ /*
+ * Flush any waiters. Once the context is disabled,
+ * credit return interrupts are stopped (although there
+ * could be one in-process when the context is disabled).
+ * Wait one microsecond for any lingering interrupts, then
+ * proceed with the flush.
+ */
+ udelay(1);
+ spin_lock(&sc->release_lock);
+ if (sc->sr) { /* this context has a shadow ring */
+ while (sc->sr_tail != sc->sr_head) {
+ pbuf = &sc->sr[sc->sr_tail].pbuf;
+ if (pbuf->cb)
+ (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
+ sc->sr_tail++;
+ if (sc->sr_tail >= sc->sr_size)
+ sc->sr_tail = 0;
+ }
+ }
+ spin_unlock(&sc->release_lock);
+
+ write_seqlock(&sc->waitlock);
+ list_splice_init(&sc->piowait, &wake_list);
+ write_sequnlock(&sc->waitlock);
+ while (!list_empty(&wake_list)) {
+ struct iowait *wait;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
+
+ wait = list_first_entry(&wake_list, struct iowait, list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
+ }
+
+ spin_unlock_irq(&sc->alloc_lock);
+}
+
+/* return SendEgressCtxtStatus.PacketOccupancy */
+static u64 packet_occupancy(u64 reg)
+{
+ return (reg &
+ SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
+ >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
+}
+
+/* is egress halted on the context? */
+static bool egress_halted(u64 reg)
+{
+ return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
+}
+
+/* is the send context halted? */
+static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
+{
+ return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
+ SC(STATUS_CTXT_HALTED_SMASK));
+}
+
+/**
+ * sc_wait_for_packet_egress - wait for packet
+ * @sc: valid send context
+ * @pause: wait for credit return
+ *
+ * Wait for packet egress, optionally pause for credit return
+ *
+ * Egress halt and Context halt are not necessarily the same thing, so
+ * check for both.
+ *
+ * NOTE: The context halt bit may not be set immediately. Because of this,
+ * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
+ * context bit to determine if the context is halted.
+ */
+static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ u64 reg = 0;
+ u64 reg_prev;
+ u32 loop = 0;
+
+ while (1) {
+ reg_prev = reg;
+ reg = read_csr(dd, sc->hw_context * 8 +
+ SEND_EGRESS_CTXT_STATUS);
+ /* done if any halt bits, SW or HW are set */
+ if (sc->flags & SCF_HALTED ||
+ is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
+ break;
+ reg = packet_occupancy(reg);
+ if (reg == 0)
+ break;
+ /* counter is reset if occupancy count changes */
+ if (reg != reg_prev)
+ loop = 0;
+ if (loop > 50000) {
+ /* timed out - bounce the link */
+ dd_dev_err(dd,
+ "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
+ __func__, sc->sw_index,
+ sc->hw_context, (u32)reg);
+ queue_work(dd->pport->link_wq,
+ &dd->pport->link_bounce_work);
+ break;
+ }
+ loop++;
+ udelay(1);
+ }
+
+ if (pause)
+ /* Add additional delay to ensure chip returns all credits */
+ pause_for_credit_return(dd);
+}
+
+void sc_wait(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ struct send_context *sc = dd->send_contexts[i].sc;
+
+ if (!sc)
+ continue;
+ sc_wait_for_packet_egress(sc, 0);
+ }
+}
+
+/*
+ * Restart a context after it has been halted due to error.
+ *
+ * If the first step fails - wait for the halt to be asserted, return early.
+ * Otherwise complain about timeouts but keep going.
+ *
+ * It is expected that allocations (enabled flag bit) have been shut off
+ * already (only applies to kernel contexts).
+ */
+int sc_restart(struct send_context *sc)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ u64 reg;
+ u32 loop;
+ int count;
+
+ /* bounce off if not halted, or being free'd */
+ if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
+ return -EINVAL;
+
+ dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
+ sc->hw_context);
+
+ /*
+ * Step 1: Wait for the context to actually halt.
+ *
+ * The error interrupt is asynchronous to actually setting halt
+ * on the context.
+ */
+ loop = 0;
+ while (1) {
+ reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
+ if (reg & SC(STATUS_CTXT_HALTED_SMASK))
+ break;
+ if (loop > 100) {
+ dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
+ __func__, sc->sw_index, sc->hw_context);
+ return -ETIME;
+ }
+ loop++;
+ udelay(1);
+ }
+
+ /*
+ * Step 2: Ensure no users are still trying to write to PIO.
+ *
+ * For kernel contexts, we have already turned off buffer allocation.
+ * Now wait for the buffer count to go to zero.
+ *
+ * For user contexts, the user handling code has cut off write access
+ * to the context's PIO pages before calling this routine and will
+ * restore write access after this routine returns.
+ */
+ if (sc->type != SC_USER) {
+ /* kernel context */
+ loop = 0;
+ while (1) {
+ count = get_buffers_allocated(sc);
+ if (count == 0)
+ break;
+ if (loop > 100) {
+ dd_dev_err(dd,
+ "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
+ __func__, sc->sw_index,
+ sc->hw_context, count);
+ }
+ loop++;
+ udelay(1);
+ }
+ }
+
+ /*
+ * Step 3: Wait for all packets to egress.
+ * This is done while disabling the send context
+ *
+ * Step 4: Disable the context
+ *
+ * This is a superset of the halt. After the disable, the
+ * errors can be cleared.
+ */
+ sc_disable(sc);
+
+ /*
+ * Step 5: Enable the context
+ *
+ * This enable will clear the halted flag and per-send context
+ * error flags.
+ */
+ return sc_enable(sc);
+}
+
+/*
+ * PIO freeze processing. To be called after the TXE block is fully frozen.
+ * Go through all frozen send contexts and disable them. The contexts are
+ * already stopped by the freeze.
+ */
+void pio_freeze(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ /*
+ * Don't disable unallocated, unfrozen, or user send contexts.
+ * User send contexts will be disabled when the process
+ * calls into the driver to reset its context.
+ */
+ if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
+ continue;
+
+ /* only need to disable, the context is already stopped */
+ sc_disable(sc);
+ }
+}
+
+/*
+ * Unfreeze PIO for kernel send contexts. The precondition for calling this
+ * is that all PIO send contexts have been disabled and the SPC freeze has
+ * been cleared. Now perform the last step and re-enable each kernel context.
+ * User (PSM) processing will occur when PSM calls into the kernel to
+ * acknowledge the freeze.
+ */
+void pio_kernel_unfreeze(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
+ continue;
+ if (sc->flags & SCF_LINK_DOWN)
+ continue;
+
+ sc_enable(sc); /* will clear the sc frozen flag */
+ }
+}
+
+/**
+ * pio_kernel_linkup() - Re-enable send contexts after linkup event
+ * @dd: valid devive data
+ *
+ * When the link goes down, the freeze path is taken. However, a link down
+ * event is different from a freeze because if the send context is re-enabled
+ * whowever is sending data will start sending data again, which will hang
+ * any QP that is sending data.
+ *
+ * The freeze path now looks at the type of event that occurs and takes this
+ * path for link down event.
+ */
+void pio_kernel_linkup(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
+ continue;
+
+ sc_enable(sc); /* will clear the sc link down flag */
+ }
+}
+
+/*
+ * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
+ * Returns:
+ * -ETIMEDOUT - if we wait too long
+ * -EIO - if there was an error
+ */
+static int pio_init_wait_progress(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ int max, count = 0;
+
+ /* max is the longest possible HW init time / delay */
+ max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
+ while (1) {
+ reg = read_csr(dd, SEND_PIO_INIT_CTXT);
+ if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
+ break;
+ if (count >= max)
+ return -ETIMEDOUT;
+ udelay(5);
+ count++;
+ }
+
+ return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
+}
+
+/*
+ * Reset all of the send contexts to their power-on state. Used
+ * only during manual init - no lock against sc_enable needed.
+ */
+void pio_reset_all(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ /* make sure the init engine is not busy */
+ ret = pio_init_wait_progress(dd);
+ /* ignore any timeout */
+ if (ret == -EIO) {
+ /* clear the error */
+ write_csr(dd, SEND_PIO_ERR_CLEAR,
+ SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
+ }
+
+ /* reset init all */
+ write_csr(dd, SEND_PIO_INIT_CTXT,
+ SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
+ udelay(2);
+ ret = pio_init_wait_progress(dd);
+ if (ret < 0) {
+ dd_dev_err(dd,
+ "PIO send context init %s while initializing all PIO blocks\n",
+ ret == -ETIMEDOUT ? "is stuck" : "had an error");
+ }
+}
+
+/* enable the context */
+int sc_enable(struct send_context *sc)
+{
+ u64 sc_ctrl, reg, pio;
+ struct hfi1_devdata *dd;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!sc)
+ return -EINVAL;
+ dd = sc->dd;
+
+ /*
+ * Obtain the allocator lock to guard against any allocation
+ * attempts (which should not happen prior to context being
+ * enabled). On the release/disable side we don't need to
+ * worry about locking since the releaser will not do anything
+ * if the context accounting values have not changed.
+ */
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
+ if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
+ goto unlock; /* already enabled */
+
+ /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
+
+ *sc->hw_free = 0;
+ sc->free = 0;
+ sc->alloc_free = 0;
+ sc->fill = 0;
+ sc->fill_wrap = 0;
+ sc->sr_head = 0;
+ sc->sr_tail = 0;
+ sc->flags = 0;
+ /* the alloc lock insures no fast path allocation */
+ reset_buffers_allocated(sc);
+
+ /*
+ * Clear all per-context errors. Some of these will be set when
+ * we are re-enabling after a context halt. Now that the context
+ * is disabled, the halt will not clear until after the PIO init
+ * engine runs below.
+ */
+ reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
+ if (reg)
+ write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
+
+ /*
+ * The HW PIO initialization engine can handle only one init
+ * request at a time. Serialize access to each device's engine.
+ */
+ spin_lock(&dd->sc_init_lock);
+ /*
+ * Since access to this code block is serialized and
+ * each access waits for the initialization to complete
+ * before releasing the lock, the PIO initialization engine
+ * should not be in use, so we don't have to wait for the
+ * InProgress bit to go down.
+ */
+ pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
+ SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
+ SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
+ write_csr(dd, SEND_PIO_INIT_CTXT, pio);
+ /*
+ * Wait until the engine is done. Give the chip the required time
+ * so, hopefully, we read the register just once.
+ */
+ udelay(2);
+ ret = pio_init_wait_progress(dd);
+ spin_unlock(&dd->sc_init_lock);
+ if (ret) {
+ dd_dev_err(dd,
+ "sctxt%u(%u): Context not enabled due to init failure %d\n",
+ sc->sw_index, sc->hw_context, ret);
+ goto unlock;
+ }
+
+ /*
+ * All is well. Enable the context.
+ */
+ sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
+ write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
+ /*
+ * Read SendCtxtCtrl to force the write out and prevent a timing
+ * hazard where a PIO write may reach the context before the enable.
+ */
+ read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
+ sc->flags |= SCF_ENABLED;
+
+unlock:
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+
+ return ret;
+}
+
+/* force a credit return on the context */
+void sc_return_credits(struct send_context *sc)
+{
+ if (!sc)
+ return;
+
+ /* a 0->1 transition schedules a credit return */
+ write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
+ SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
+ /*
+ * Ensure that the write is flushed and the credit return is
+ * scheduled. We care more about the 0 -> 1 transition.
+ */
+ read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
+ /* set back to 0 for next time */
+ write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
+}
+
+/* allow all in-flight packets to drain on the context */
+void sc_flush(struct send_context *sc)
+{
+ if (!sc)
+ return;
+
+ sc_wait_for_packet_egress(sc, 1);
+}
+
+/*
+ * Start the software reaction to a context halt or SPC freeze:
+ * - mark the context as halted or frozen
+ * - stop buffer allocations
+ *
+ * Called from the error interrupt. Other work is deferred until
+ * out of the interrupt.
+ */
+void sc_stop(struct send_context *sc, int flag)
+{
+ unsigned long flags;
+
+ /* stop buffer allocations */
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ /* mark the context */
+ sc->flags |= flag;
+ sc->flags &= ~SCF_ENABLED;
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ wake_up(&sc->halt_wait);
+}
+
+#define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
+#define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
+
+/*
+ * The send context buffer "allocator".
+ *
+ * @sc: the PIO send context we are allocating from
+ * @len: length of whole packet - including PBC - in dwords
+ * @cb: optional callback to call when the buffer is finished sending
+ * @arg: argument for cb
+ *
+ * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
+ * when link is down.
+ */
+struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
+ pio_release_cb cb, void *arg)
+{
+ struct pio_buf *pbuf = NULL;
+ unsigned long flags;
+ unsigned long avail;
+ unsigned long blocks = dwords_to_blocks(dw_len);
+ u32 fill_wrap;
+ int trycount = 0;
+ u32 head, next;
+
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ if (!(sc->flags & SCF_ENABLED)) {
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ return ERR_PTR(-ECOMM);
+ }
+
+retry:
+ avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
+ if (blocks > avail) {
+ /* not enough room */
+ if (unlikely(trycount)) { /* already tried to get more room */
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ goto done;
+ }
+ /* copy from receiver cache line and recalculate */
+ sc->alloc_free = READ_ONCE(sc->free);
+ avail =
+ (unsigned long)sc->credits -
+ (sc->fill - sc->alloc_free);
+ if (blocks > avail) {
+ /* still no room, actively update */
+ sc_release_update(sc);
+ sc->alloc_free = READ_ONCE(sc->free);
+ trycount++;
+ goto retry;
+ }
+ }
+
+ /* there is enough room */
+
+ preempt_disable();
+ this_cpu_inc(*sc->buffers_allocated);
+
+ /* read this once */
+ head = sc->sr_head;
+
+ /* "allocate" the buffer */
+ sc->fill += blocks;
+ fill_wrap = sc->fill_wrap;
+ sc->fill_wrap += blocks;
+ if (sc->fill_wrap >= sc->credits)
+ sc->fill_wrap = sc->fill_wrap - sc->credits;
+
+ /*
+ * Fill the parts that the releaser looks at before moving the head.
+ * The only necessary piece is the sent_at field. The credits
+ * we have just allocated cannot have been returned yet, so the
+ * cb and arg will not be looked at for a "while". Put them
+ * on this side of the memory barrier anyway.
+ */
+ pbuf = &sc->sr[head].pbuf;
+ pbuf->sent_at = sc->fill;
+ pbuf->cb = cb;
+ pbuf->arg = arg;
+ pbuf->sc = sc; /* could be filled in at sc->sr init time */
+ /* make sure this is in memory before updating the head */
+
+ /* calculate next head index, do not store */
+ next = head + 1;
+ if (next >= sc->sr_size)
+ next = 0;
+ /*
+ * update the head - must be last! - the releaser can look at fields
+ * in pbuf once we move the head
+ */
+ smp_wmb();
+ sc->sr_head = next;
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+
+ /* finish filling in the buffer outside the lock */
+ pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
+ pbuf->end = sc->base_addr + sc->size;
+ pbuf->qw_written = 0;
+ pbuf->carry_bytes = 0;
+ pbuf->carry.val64 = 0;
+done:
+ return pbuf;
+}
+
+/*
+ * There are at least two entities that can turn on credit return
+ * interrupts and they can overlap. Avoid problems by implementing
+ * a count scheme that is enforced by a lock. The lock is needed because
+ * the count and CSR write must be paired.
+ */
+
+/*
+ * Start credit return interrupts. This is managed by a count. If already
+ * on, just increment the count.
+ */
+void sc_add_credit_return_intr(struct send_context *sc)
+{
+ unsigned long flags;
+
+ /* lock must surround both the count change and the CSR update */
+ spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
+ if (sc->credit_intr_count == 0) {
+ sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
+ write_kctxt_csr(sc->dd, sc->hw_context,
+ SC(CREDIT_CTRL), sc->credit_ctrl);
+ }
+ sc->credit_intr_count++;
+ spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
+}
+
+/*
+ * Stop credit return interrupts. This is managed by a count. Decrement the
+ * count, if the last user, then turn the credit interrupts off.
+ */
+void sc_del_credit_return_intr(struct send_context *sc)
+{
+ unsigned long flags;
+
+ WARN_ON(sc->credit_intr_count == 0);
+
+ /* lock must surround both the count change and the CSR update */
+ spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
+ sc->credit_intr_count--;
+ if (sc->credit_intr_count == 0) {
+ sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
+ write_kctxt_csr(sc->dd, sc->hw_context,
+ SC(CREDIT_CTRL), sc->credit_ctrl);
+ }
+ spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
+}
+
+/*
+ * The caller must be careful when calling this. All needint calls
+ * must be paired with !needint.
+ */
+void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
+{
+ if (needint)
+ sc_add_credit_return_intr(sc);
+ else
+ sc_del_credit_return_intr(sc);
+ trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
+ if (needint)
+ sc_return_credits(sc);
+}
+
+/**
+ * sc_piobufavail - callback when a PIO buffer is available
+ * @sc: the send context
+ *
+ * This is called from the interrupt handler when a PIO buffer is
+ * available after hfi1_verbs_send() returned an error that no buffers were
+ * available. Disable the interrupt if there are no more QPs waiting.
+ */
+static void sc_piobufavail(struct send_context *sc)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ struct list_head *list;
+ struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
+ unsigned long flags;
+ uint i, n = 0, top_idx = 0;
+
+ if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
+ dd->send_contexts[sc->sw_index].type != SC_VL15)
+ return;
+ list = &sc->piowait;
+ /*
+ * Note: checking that the piowait list is empty and clearing
+ * the buffer available interrupt needs to be atomic or we
+ * could end up with QPs on the wait list with the interrupt
+ * disabled.
+ */
+ write_seqlock_irqsave(&sc->waitlock, flags);
+ while (!list_empty(list)) {
+ struct iowait *wait;
+
+ if (n == ARRAY_SIZE(qps))
+ break;
+ wait = list_first_entry(list, struct iowait, list);
+ iowait_get_priority(wait);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ if (n) {
+ priv = qps[top_idx]->priv;
+ top_idx = iowait_priority_update_top(wait,
+ &priv->s_iowait,
+ n, top_idx);
+ }
+
+ /* refcount held until actual wake up */
+ qps[n++] = qp;
+ }
+ /*
+ * If there had been waiters and there are more
+ * insure that we redo the force to avoid a potential hang.
+ */
+ if (n) {
+ hfi1_sc_wantpiobuf_intr(sc, 0);
+ if (!list_empty(list))
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
+ write_sequnlock_irqrestore(&sc->waitlock, flags);
+
+ /* Wake up the top-priority one first */
+ if (n)
+ hfi1_qp_wakeup(qps[top_idx],
+ RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
+ for (i = 0; i < n; i++)
+ if (i != top_idx)
+ hfi1_qp_wakeup(qps[i],
+ RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
+}
+
+/* translate a send credit update to a bit code of reasons */
+static inline int fill_code(u64 hw_free)
+{
+ int code = 0;
+
+ if (hw_free & CR_STATUS_SMASK)
+ code |= PRC_STATUS_ERR;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
+ code |= PRC_PBC;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
+ code |= PRC_THRESHOLD;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
+ code |= PRC_FILL_ERR;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
+ code |= PRC_SC_DISABLE;
+ return code;
+}
+
+/* use the jiffies compare to get the wrap right */
+#define sent_before(a, b) time_before(a, b) /* a < b */
+
+/*
+ * The send context buffer "releaser".
+ */
+void sc_release_update(struct send_context *sc)
+{
+ struct pio_buf *pbuf;
+ u64 hw_free;
+ u32 head, tail;
+ unsigned long old_free;
+ unsigned long free;
+ unsigned long extra;
+ unsigned long flags;
+ int code;
+
+ if (!sc)
+ return;
+
+ spin_lock_irqsave(&sc->release_lock, flags);
+ /* update free */
+ hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */
+ old_free = sc->free;
+ extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
+ - (old_free & CR_COUNTER_MASK))
+ & CR_COUNTER_MASK;
+ free = old_free + extra;
+ trace_hfi1_piofree(sc, extra);
+
+ /* call sent buffer callbacks */
+ code = -1; /* code not yet set */
+ head = READ_ONCE(sc->sr_head); /* snapshot the head */
+ tail = sc->sr_tail;
+ while (head != tail) {
+ pbuf = &sc->sr[tail].pbuf;
+
+ if (sent_before(free, pbuf->sent_at)) {
+ /* not sent yet */
+ break;
+ }
+ if (pbuf->cb) {
+ if (code < 0) /* fill in code on first user */
+ code = fill_code(hw_free);
+ (*pbuf->cb)(pbuf->arg, code);
+ }
+
+ tail++;
+ if (tail >= sc->sr_size)
+ tail = 0;
+ }
+ sc->sr_tail = tail;
+ /* make sure tail is updated before free */
+ smp_wmb();
+ sc->free = free;
+ spin_unlock_irqrestore(&sc->release_lock, flags);
+ sc_piobufavail(sc);
+}
+
+/*
+ * Send context group releaser. Argument is the send context that caused
+ * the interrupt. Called from the send context interrupt handler.
+ *
+ * Call release on all contexts in the group.
+ *
+ * This routine takes the sc_lock without an irqsave because it is only
+ * called from an interrupt handler. Adjust if that changes.
+ */
+void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
+{
+ struct send_context *sc;
+ u32 sw_index;
+ u32 gc, gc_end;
+
+ spin_lock(&dd->sc_lock);
+ sw_index = dd->hw_to_sw[hw_context];
+ if (unlikely(sw_index >= dd->num_send_contexts)) {
+ dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
+ __func__, hw_context, sw_index);
+ goto done;
+ }
+ sc = dd->send_contexts[sw_index].sc;
+ if (unlikely(!sc))
+ goto done;
+
+ gc = group_context(hw_context, sc->group);
+ gc_end = gc + group_size(sc->group);
+ for (; gc < gc_end; gc++) {
+ sw_index = dd->hw_to_sw[gc];
+ if (unlikely(sw_index >= dd->num_send_contexts)) {
+ dd_dev_err(dd,
+ "%s: invalid hw (%u) to sw (%u) mapping\n",
+ __func__, hw_context, sw_index);
+ continue;
+ }
+ sc_release_update(dd->send_contexts[sw_index].sc);
+ }
+done:
+ spin_unlock(&dd->sc_lock);
+}
+
+/*
+ * pio_select_send_context_vl() - select send context
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @vl: this vl
+ *
+ * This function returns a send context based on the selector and a vl.
+ * The mapping fields are protected by RCU
+ */
+struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
+ u32 selector, u8 vl)
+{
+ struct pio_vl_map *m;
+ struct pio_map_elem *e;
+ struct send_context *rval;
+
+ /*
+ * NOTE This should only happen if SC->VL changed after the initial
+ * checks on the QP/AH
+ * Default will return VL0's send context below
+ */
+ if (unlikely(vl >= num_vls)) {
+ rval = NULL;
+ goto done;
+ }
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->pio_map);
+ if (unlikely(!m)) {
+ rcu_read_unlock();
+ return dd->vld[0].sc;
+ }
+ e = m->map[vl & m->mask];
+ rval = e->ksc[selector & e->mask];
+ rcu_read_unlock();
+
+done:
+ rval = !rval ? dd->vld[0].sc : rval;
+ return rval;
+}
+
+/*
+ * pio_select_send_context_sc() - select send context
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @sc5: the 5 bit sc
+ *
+ * This function returns an send context based on the selector and an sc
+ */
+struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
+ u32 selector, u8 sc5)
+{
+ u8 vl = sc_to_vlt(dd, sc5);
+
+ return pio_select_send_context_vl(dd, selector, vl);
+}
+
+/*
+ * Free the indicated map struct
+ */
+static void pio_map_free(struct pio_vl_map *m)
+{
+ int i;
+
+ for (i = 0; m && i < m->actual_vls; i++)
+ kfree(m->map[i]);
+ kfree(m);
+}
+
+/*
+ * Handle RCU callback
+ */
+static void pio_map_rcu_callback(struct rcu_head *list)
+{
+ struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
+
+ pio_map_free(m);
+}
+
+/*
+ * Set credit return threshold for the kernel send context
+ */
+static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
+{
+ u32 thres;
+
+ thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
+ 50),
+ sc_mtu_to_threshold(dd->kernel_send_context[scontext],
+ dd->vld[i].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+ sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
+}
+
+/*
+ * pio_map_init - called when #vls change
+ * @dd: hfi1_devdata
+ * @port: port number
+ * @num_vls: number of vls
+ * @vl_scontexts: per vl send context mapping (optional)
+ *
+ * This routine changes the mapping based on the number of vls.
+ *
+ * vl_scontexts is used to specify a non-uniform vl/send context
+ * loading. NULL implies auto computing the loading and giving each
+ * VL an uniform distribution of send contexts per VL.
+ *
+ * The auto algorithm computers the sc_per_vl and the number of extra
+ * send contexts. Any extra send contexts are added from the last VL
+ * on down
+ *
+ * rcu locking is used here to control access to the mapping fields.
+ *
+ * If either the num_vls or num_send_contexts are non-power of 2, the
+ * array sizes in the struct pio_vl_map and the struct pio_map_elem are
+ * rounded up to the next highest power of 2 and the first entry is
+ * reused in a round robin fashion.
+ *
+ * If an error occurs the map change is not done and the mapping is not
+ * chaged.
+ *
+ */
+int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
+{
+ int i, j;
+ int extra, sc_per_vl;
+ int scontext = 1;
+ int num_kernel_send_contexts = 0;
+ u8 lvl_scontexts[OPA_MAX_VLS];
+ struct pio_vl_map *oldmap, *newmap;
+
+ if (!vl_scontexts) {
+ for (i = 0; i < dd->num_send_contexts; i++)
+ if (dd->send_contexts[i].type == SC_KERNEL)
+ num_kernel_send_contexts++;
+ /* truncate divide */
+ sc_per_vl = num_kernel_send_contexts / num_vls;
+ /* extras */
+ extra = num_kernel_send_contexts % num_vls;
+ vl_scontexts = lvl_scontexts;
+ /* add extras from last vl down */
+ for (i = num_vls - 1; i >= 0; i--, extra--)
+ vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
+ }
+ /* build new map */
+ newmap = kzalloc(struct_size(newmap, map, roundup_pow_of_two(num_vls)),
+ GFP_KERNEL);
+ if (!newmap)
+ goto bail;
+ newmap->actual_vls = num_vls;
+ newmap->vls = roundup_pow_of_two(num_vls);
+ newmap->mask = (1 << ilog2(newmap->vls)) - 1;
+ for (i = 0; i < newmap->vls; i++) {
+ /* save for wrap around */
+ int first_scontext = scontext;
+
+ if (i < newmap->actual_vls) {
+ int sz = roundup_pow_of_two(vl_scontexts[i]);
+
+ /* only allocate once */
+ newmap->map[i] = kzalloc(struct_size(newmap->map[i],
+ ksc, sz),
+ GFP_KERNEL);
+ if (!newmap->map[i])
+ goto bail;
+ newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
+ /*
+ * assign send contexts and
+ * adjust credit return threshold
+ */
+ for (j = 0; j < sz; j++) {
+ if (dd->kernel_send_context[scontext]) {
+ newmap->map[i]->ksc[j] =
+ dd->kernel_send_context[scontext];
+ set_threshold(dd, scontext, i);
+ }
+ if (++scontext >= first_scontext +
+ vl_scontexts[i])
+ /* wrap back to first send context */
+ scontext = first_scontext;
+ }
+ } else {
+ /* just re-use entry without allocating */
+ newmap->map[i] = newmap->map[i % num_vls];
+ }
+ scontext = first_scontext + vl_scontexts[i];
+ }
+ /* newmap in hand, save old map */
+ spin_lock_irq(&dd->pio_map_lock);
+ oldmap = rcu_dereference_protected(dd->pio_map,
+ lockdep_is_held(&dd->pio_map_lock));
+
+ /* publish newmap */
+ rcu_assign_pointer(dd->pio_map, newmap);
+
+ spin_unlock_irq(&dd->pio_map_lock);
+ /* success, free any old map after grace period */
+ if (oldmap)
+ call_rcu(&oldmap->list, pio_map_rcu_callback);
+ return 0;
+bail:
+ /* free any partial allocation */
+ pio_map_free(newmap);
+ return -ENOMEM;
+}
+
+void free_pio_map(struct hfi1_devdata *dd)
+{
+ /* Free PIO map if allocated */
+ if (rcu_access_pointer(dd->pio_map)) {
+ spin_lock_irq(&dd->pio_map_lock);
+ pio_map_free(rcu_access_pointer(dd->pio_map));
+ RCU_INIT_POINTER(dd->pio_map, NULL);
+ spin_unlock_irq(&dd->pio_map_lock);
+ synchronize_rcu();
+ }
+ kfree(dd->kernel_send_context);
+ dd->kernel_send_context = NULL;
+}
+
+int init_pervl_scs(struct hfi1_devdata *dd)
+{
+ int i;
+ u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
+ u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
+ u32 ctxt;
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ dd->vld[15].sc = sc_alloc(dd, SC_VL15,
+ dd->rcd[0]->rcvhdrqentsize, dd->node);
+ if (!dd->vld[15].sc)
+ return -ENOMEM;
+
+ hfi1_init_ctxt(dd->vld[15].sc);
+ dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
+
+ dd->kernel_send_context = kcalloc_node(dd->num_send_contexts,
+ sizeof(struct send_context *),
+ GFP_KERNEL, dd->node);
+ if (!dd->kernel_send_context)
+ goto freesc15;
+
+ dd->kernel_send_context[0] = dd->vld[15].sc;
+
+ for (i = 0; i < num_vls; i++) {
+ /*
+ * Since this function does not deal with a specific
+ * receive context but we need the RcvHdrQ entry size,
+ * use the size from rcd[0]. It is guaranteed to be
+ * valid at this point and will remain the same for all
+ * receive contexts.
+ */
+ dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
+ dd->rcd[0]->rcvhdrqentsize, dd->node);
+ if (!dd->vld[i].sc)
+ goto nomem;
+ dd->kernel_send_context[i + 1] = dd->vld[i].sc;
+ hfi1_init_ctxt(dd->vld[i].sc);
+ /* non VL15 start with the max MTU */
+ dd->vld[i].mtu = hfi1_max_mtu;
+ }
+ for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
+ dd->kernel_send_context[i + 1] =
+ sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
+ if (!dd->kernel_send_context[i + 1])
+ goto nomem;
+ hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
+ }
+
+ sc_enable(dd->vld[15].sc);
+ ctxt = dd->vld[15].sc->hw_context;
+ mask = all_vl_mask & ~(1LL << 15);
+ write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
+ dd_dev_info(dd,
+ "Using send context %u(%u) for VL15\n",
+ dd->vld[15].sc->sw_index, ctxt);
+
+ for (i = 0; i < num_vls; i++) {
+ sc_enable(dd->vld[i].sc);
+ ctxt = dd->vld[i].sc->hw_context;
+ mask = all_vl_mask & ~(data_vls_mask);
+ write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
+ }
+ for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
+ sc_enable(dd->kernel_send_context[i + 1]);
+ ctxt = dd->kernel_send_context[i + 1]->hw_context;
+ mask = all_vl_mask & ~(data_vls_mask);
+ write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
+ }
+
+ if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
+ goto nomem;
+ return 0;
+
+nomem:
+ for (i = 0; i < num_vls; i++) {
+ sc_free(dd->vld[i].sc);
+ dd->vld[i].sc = NULL;
+ }
+
+ for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
+ sc_free(dd->kernel_send_context[i + 1]);
+
+ kfree(dd->kernel_send_context);
+ dd->kernel_send_context = NULL;
+
+freesc15:
+ sc_free(dd->vld[15].sc);
+ return -ENOMEM;
+}
+
+int init_credit_return(struct hfi1_devdata *dd)
+{
+ int ret;
+ int i;
+
+ dd->cr_base = kcalloc(
+ node_affinity.num_possible_nodes,
+ sizeof(struct credit_return_base),
+ GFP_KERNEL);
+ if (!dd->cr_base) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ for_each_node_with_cpus(i) {
+ int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
+
+ set_dev_node(&dd->pcidev->dev, i);
+ dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
+ bytes,
+ &dd->cr_base[i].dma,
+ GFP_KERNEL);
+ if (!dd->cr_base[i].va) {
+ set_dev_node(&dd->pcidev->dev, dd->node);
+ dd_dev_err(dd,
+ "Unable to allocate credit return DMA range for NUMA %d\n",
+ i);
+ ret = -ENOMEM;
+ goto free_cr_base;
+ }
+ }
+ set_dev_node(&dd->pcidev->dev, dd->node);
+
+ ret = 0;
+done:
+ return ret;
+
+free_cr_base:
+ free_credit_return(dd);
+ goto done;
+}
+
+void free_credit_return(struct hfi1_devdata *dd)
+{
+ int i;
+
+ if (!dd->cr_base)
+ return;
+ for (i = 0; i < node_affinity.num_possible_nodes; i++) {
+ if (dd->cr_base[i].va) {
+ dma_free_coherent(&dd->pcidev->dev,
+ TXE_NUM_CONTEXTS *
+ sizeof(struct credit_return),
+ dd->cr_base[i].va,
+ dd->cr_base[i].dma);
+ }
+ }
+ kfree(dd->cr_base);
+ dd->cr_base = NULL;
+}
+
+void seqfile_dump_sci(struct seq_file *s, u32 i,
+ struct send_context_info *sci)
+{
+ struct send_context *sc = sci->sc;
+ u64 reg;
+
+ seq_printf(s, "SCI %u: type %u base %u credits %u\n",
+ i, sci->type, sci->base, sci->credits);
+ seq_printf(s, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
+ sc->flags, sc->sw_index, sc->hw_context, sc->group);
+ seq_printf(s, " sr_size %u credits %u sr_head %u sr_tail %u\n",
+ sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
+ seq_printf(s, " fill %lu free %lu fill_wrap %u alloc_free %lu\n",
+ sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
+ seq_printf(s, " credit_intr_count %u credit_ctrl 0x%llx\n",
+ sc->credit_intr_count, sc->credit_ctrl);
+ reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
+ seq_printf(s, " *hw_free %llu CurrentFree %llu LastReturned %llu\n",
+ (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>
+ CR_COUNTER_SHIFT,
+ (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) &
+ SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK),
+ reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK));
+}
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
new file mode 100644
index 000000000000..ab0f9a3a8d12
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015-2017 Intel Corporation.
+ */
+
+#ifndef _PIO_H
+#define _PIO_H
+/* send context types */
+#define SC_KERNEL 0
+#define SC_VL15 1
+#define SC_ACK 2
+#define SC_USER 3 /* must be the last one: it may take all left */
+#define SC_MAX 4 /* count of send context types */
+
+/* invalid send context index */
+#define INVALID_SCI 0xff
+
+/* PIO buffer release callback function */
+typedef void (*pio_release_cb)(void *arg, int code);
+
+/* PIO release codes - in bits, as there could more than one that apply */
+#define PRC_OK 0 /* no known error */
+#define PRC_STATUS_ERR 0x01 /* credit return due to status error */
+#define PRC_PBC 0x02 /* credit return due to PBC */
+#define PRC_THRESHOLD 0x04 /* credit return due to threshold */
+#define PRC_FILL_ERR 0x08 /* credit return due fill error */
+#define PRC_FORCE 0x10 /* credit return due credit force */
+#define PRC_SC_DISABLE 0x20 /* clean-up after a context disable */
+
+/* byte helper */
+union mix {
+ u64 val64;
+ u32 val32[2];
+ u8 val8[8];
+};
+
+/* an allocated PIO buffer */
+struct pio_buf {
+ struct send_context *sc;/* back pointer to owning send context */
+ pio_release_cb cb; /* called when the buffer is released */
+ void *arg; /* argument for cb */
+ void __iomem *start; /* buffer start address */
+ void __iomem *end; /* context end address */
+ unsigned long sent_at; /* buffer is sent when <= free */
+ union mix carry; /* pending unwritten bytes */
+ u16 qw_written; /* QW written so far */
+ u8 carry_bytes; /* number of valid bytes in carry */
+};
+
+/* cache line aligned pio buffer array */
+union pio_shadow_ring {
+ struct pio_buf pbuf;
+} ____cacheline_aligned;
+
+/* per-NUMA send context */
+struct send_context {
+ /* read-only after init */
+ struct hfi1_devdata *dd; /* device */
+ union pio_shadow_ring *sr; /* shadow ring */
+ void __iomem *base_addr; /* start of PIO memory */
+ u32 __percpu *buffers_allocated;/* count of buffers allocated */
+ u32 size; /* context size, in bytes */
+
+ int node; /* context home node */
+ u32 sr_size; /* size of the shadow ring */
+ u16 flags; /* flags */
+ u8 type; /* context type */
+ u8 sw_index; /* software index number */
+ u8 hw_context; /* hardware context number */
+ u8 group; /* credit return group */
+
+ /* allocator fields */
+ spinlock_t alloc_lock ____cacheline_aligned_in_smp;
+ u32 sr_head; /* shadow ring head */
+ unsigned long fill; /* official alloc count */
+ unsigned long alloc_free; /* copy of free (less cache thrash) */
+ u32 fill_wrap; /* tracks fill within ring */
+ u32 credits; /* number of blocks in context */
+ /* adding a new field here would make it part of this cacheline */
+
+ /* releaser fields */
+ spinlock_t release_lock ____cacheline_aligned_in_smp;
+ u32 sr_tail; /* shadow ring tail */
+ unsigned long free; /* official free count */
+ volatile __le64 *hw_free; /* HW free counter */
+ /* list for PIO waiters */
+ struct list_head piowait ____cacheline_aligned_in_smp;
+ seqlock_t waitlock;
+
+ spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
+ u32 credit_intr_count; /* count of credit intr users */
+ u64 credit_ctrl; /* cache for credit control */
+ wait_queue_head_t halt_wait; /* wait until kernel sees interrupt */
+ struct work_struct halt_work; /* halted context work queue entry */
+};
+
+/* send context flags */
+#define SCF_ENABLED 0x01
+#define SCF_IN_FREE 0x02
+#define SCF_HALTED 0x04
+#define SCF_FROZEN 0x08
+#define SCF_LINK_DOWN 0x10
+
+struct send_context_info {
+ struct send_context *sc; /* allocated working context */
+ u16 allocated; /* has this been allocated? */
+ u16 type; /* context type */
+ u16 base; /* base in PIO array */
+ u16 credits; /* size in PIO array */
+};
+
+/* DMA credit return, index is always (context & 0x7) */
+struct credit_return {
+ volatile __le64 cr[8];
+};
+
+/* NUMA indexed credit return array */
+struct credit_return_base {
+ struct credit_return *va;
+ dma_addr_t dma;
+};
+
+/* send context configuration sizes (one per type) */
+struct sc_config_sizes {
+ short int size;
+ short int count;
+};
+
+/*
+ * The diagram below details the relationship of the mapping structures
+ *
+ * Since the mapping now allows for non-uniform send contexts per vl, the
+ * number of send contexts for a vl is either the vl_scontexts[vl] or
+ * a computation based on num_kernel_send_contexts/num_vls:
+ *
+ * For example:
+ * nactual = vl_scontexts ? vl_scontexts[vl] : num_kernel_send_contexts/num_vls
+ *
+ * n = roundup to next highest power of 2 using nactual
+ *
+ * In the case where there are num_kernel_send_contexts/num_vls doesn't divide
+ * evenly, the extras are added from the last vl downward.
+ *
+ * For the case where n > nactual, the send contexts are assigned
+ * in a round robin fashion wrapping back to the first send context
+ * for a particular vl.
+ *
+ * dd->pio_map
+ * | pio_map_elem[0]
+ * | +--------------------+
+ * v | mask |
+ * pio_vl_map |--------------------|
+ * +--------------------------+ | ksc[0] -> sc 1 |
+ * | list (RCU) | |--------------------|
+ * |--------------------------| ->| ksc[1] -> sc 2 |
+ * | mask | --/ |--------------------|
+ * |--------------------------| -/ | * |
+ * | actual_vls (max 8) | -/ |--------------------|
+ * |--------------------------| --/ | ksc[n-1] -> sc n |
+ * | vls (max 8) | -/ +--------------------+
+ * |--------------------------| --/
+ * | map[0] |-/
+ * |--------------------------| +--------------------+
+ * | map[1] |--- | mask |
+ * |--------------------------| \---- |--------------------|
+ * | * | \-- | ksc[0] -> sc 1+n |
+ * | * | \---- |--------------------|
+ * | * | \->| ksc[1] -> sc 2+n |
+ * |--------------------------| |--------------------|
+ * | map[vls - 1] |- | * |
+ * +--------------------------+ \- |--------------------|
+ * \- | ksc[m-1] -> sc m+n |
+ * \ +--------------------+
+ * \-
+ * \
+ * \- +----------------------+
+ * \- | mask |
+ * \ |----------------------|
+ * \- | ksc[0] -> sc 1+m+n |
+ * \- |----------------------|
+ * >| ksc[1] -> sc 2+m+n |
+ * |----------------------|
+ * | * |
+ * |----------------------|
+ * | ksc[o-1] -> sc o+m+n |
+ * +----------------------+
+ *
+ */
+
+/* Initial number of send contexts per VL */
+#define INIT_SC_PER_VL 2
+
+/*
+ * struct pio_map_elem - mapping for a vl
+ * @mask - selector mask
+ * @ksc - array of kernel send contexts for this vl
+ *
+ * The mask is used to "mod" the selector to
+ * produce index into the trailing array of
+ * kscs
+ */
+struct pio_map_elem {
+ u32 mask;
+ struct send_context *ksc[];
+};
+
+/*
+ * struct pio_vl_map - mapping for a vl
+ * @list - rcu head for free callback
+ * @mask - vl mask to "mod" the vl to produce an index to map array
+ * @actual_vls - number of vls
+ * @vls - numbers of vls rounded to next power of 2
+ * @map - array of pio_map_elem entries
+ *
+ * This is the parent mapping structure. The trailing members of the
+ * struct point to pio_map_elem entries, which in turn point to an
+ * array of kscs for that vl.
+ */
+struct pio_vl_map {
+ struct rcu_head list;
+ u32 mask;
+ u8 actual_vls;
+ u8 vls;
+ struct pio_map_elem *map[];
+};
+
+int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
+ u8 *vl_scontexts);
+void free_pio_map(struct hfi1_devdata *dd);
+struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
+ u32 selector, u8 vl);
+struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
+ u32 selector, u8 sc5);
+
+/* send context functions */
+int init_credit_return(struct hfi1_devdata *dd);
+void free_credit_return(struct hfi1_devdata *dd);
+int init_sc_pools_and_sizes(struct hfi1_devdata *dd);
+int init_send_contexts(struct hfi1_devdata *dd);
+int init_pervl_scs(struct hfi1_devdata *dd);
+struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
+ uint hdrqentsize, int numa);
+void sc_free(struct send_context *sc);
+int sc_enable(struct send_context *sc);
+void sc_disable(struct send_context *sc);
+int sc_restart(struct send_context *sc);
+void sc_return_credits(struct send_context *sc);
+void sc_flush(struct send_context *sc);
+void sc_stop(struct send_context *sc, int bit);
+struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
+ pio_release_cb cb, void *arg);
+void sc_release_update(struct send_context *sc);
+void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
+void sc_add_credit_return_intr(struct send_context *sc);
+void sc_del_credit_return_intr(struct send_context *sc);
+void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold);
+u32 sc_percent_to_threshold(struct send_context *sc, u32 percent);
+u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize);
+void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint);
+void sc_wait(struct hfi1_devdata *dd);
+void set_pio_integrity(struct send_context *sc);
+
+/* support functions */
+void pio_reset_all(struct hfi1_devdata *dd);
+void pio_freeze(struct hfi1_devdata *dd);
+void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+void pio_kernel_linkup(struct hfi1_devdata *dd);
+
+/* global PIO send control operations */
+#define PSC_GLOBAL_ENABLE 0
+#define PSC_GLOBAL_DISABLE 1
+#define PSC_GLOBAL_VLARB_ENABLE 2
+#define PSC_GLOBAL_VLARB_DISABLE 3
+#define PSC_CM_RESET 4
+#define PSC_DATA_VL_ENABLE 5
+#define PSC_DATA_VL_DISABLE 6
+
+void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl);
+void pio_send_control(struct hfi1_devdata *dd, int op);
+
+/* PIO copy routines */
+void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t count);
+void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t nbytes);
+void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
+void seg_pio_copy_end(struct pio_buf *pbuf);
+
+void seqfile_dump_sci(struct seq_file *s, u32 i,
+ struct send_context_info *sci);
+
+#endif /* _PIO_H */
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
new file mode 100644
index 000000000000..80fee812a930
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#include "hfi.h"
+
+/* additive distance between non-SOP and SOP space */
+#define SOP_DISTANCE (TXE_PIO_SIZE / 2)
+#define PIO_BLOCK_MASK (PIO_BLOCK_SIZE - 1)
+/* number of QUADWORDs in a block */
+#define PIO_BLOCK_QWS (PIO_BLOCK_SIZE / sizeof(u64))
+
+/**
+ * pio_copy - copy data block to MMIO space
+ * @dd: hfi1 dev data
+ * @pbuf: a number of blocks allocated within a PIO send context
+ * @pbc: PBC to send
+ * @from: source, must be 8 byte aligned
+ * @count: number of DWORD (32-bit) quantities to copy from source
+ *
+ * Copy data from source to PIO Send Buffer memory, 8 bytes at a time.
+ * Must always write full BLOCK_SIZE bytes blocks. The first block must
+ * be written to the corresponding SOP=1 address.
+ *
+ * Known:
+ * o pbuf->start always starts on a block boundary
+ * o pbuf can wrap only at a block boundary
+ */
+void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t count)
+{
+ void __iomem *dest = pbuf->start + SOP_DISTANCE;
+ void __iomem *send = dest + PIO_BLOCK_SIZE;
+ void __iomem *dend; /* 8-byte data end */
+
+ /* write the PBC */
+ writeq(pbc, dest);
+ dest += sizeof(u64);
+
+ /* calculate where the QWORD data ends - in SOP=1 space */
+ dend = dest + ((count >> 1) * sizeof(u64));
+
+ if (dend < send) {
+ /*
+ * all QWORD data is within the SOP block, does *not*
+ * reach the end of the SOP block
+ */
+
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /*
+ * No boundary checks are needed here:
+ * 0. We're not on the SOP block boundary
+ * 1. The possible DWORD dangle will still be within
+ * the SOP block
+ * 2. We cannot wrap except on a block boundary.
+ */
+ } else {
+ /* QWORD data extends _to_ or beyond the SOP block */
+
+ /* write 8-byte SOP chunk data */
+ while (dest < send) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /* drop out of the SOP range */
+ dest -= SOP_DISTANCE;
+ dend -= SOP_DISTANCE;
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written, but we will wrap in
+ * case there is a dangling DWORD.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ }
+ /* at this point we have wrapped if we are going to wrap */
+
+ /* write dangling u32, if any */
+ if (count & 1) {
+ union mix val;
+
+ val.val64 = 0;
+ val.val32[0] = *(u32 *)from;
+ writeq(val.val64, dest);
+ dest += sizeof(u64);
+ }
+ /*
+ * fill in rest of block, no need to check pbuf->end
+ * as we only wrap on a block boundary
+ */
+ while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
+ writeq(0, dest);
+ dest += sizeof(u64);
+ }
+
+ /* finished with this buffer */
+ this_cpu_dec(*pbuf->sc->buffers_allocated);
+ preempt_enable();
+}
+
+/*
+ * Handle carry bytes using shifts and masks.
+ *
+ * NOTE: the value the unused portion of carry is expected to always be zero.
+ */
+
+/*
+ * "zero" shift - bit shift used to zero out upper bytes. Input is
+ * the count of LSB bytes to preserve.
+ */
+#define zshift(x) (8 * (8 - (x)))
+
+/*
+ * "merge" shift - bit shift used to merge with carry bytes. Input is
+ * the LSB byte count to move beyond.
+ */
+#define mshift(x) (8 * (x))
+
+/*
+ * Jump copy - no-loop copy for < 8 bytes.
+ */
+static inline void jcopy(u8 *dest, const u8 *src, u32 n)
+{
+ switch (n) {
+ case 7:
+ *dest++ = *src++;
+ fallthrough;
+ case 6:
+ *dest++ = *src++;
+ fallthrough;
+ case 5:
+ *dest++ = *src++;
+ fallthrough;
+ case 4:
+ *dest++ = *src++;
+ fallthrough;
+ case 3:
+ *dest++ = *src++;
+ fallthrough;
+ case 2:
+ *dest++ = *src++;
+ fallthrough;
+ case 1:
+ *dest++ = *src++;
+ }
+}
+
+/*
+ * Read nbytes from "from" and place them in the low bytes
+ * of pbuf->carry. Other bytes are left as-is. Any previous
+ * value in pbuf->carry is lost.
+ *
+ * NOTES:
+ * o do not read from from if nbytes is zero
+ * o from may _not_ be u64 aligned.
+ */
+static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
+ unsigned int nbytes)
+{
+ pbuf->carry.val64 = 0;
+ jcopy(&pbuf->carry.val8[0], from, nbytes);
+ pbuf->carry_bytes = nbytes;
+}
+
+/*
+ * Read nbytes bytes from "from" and put them at the end of pbuf->carry.
+ * It is expected that the extra read does not overfill carry.
+ *
+ * NOTES:
+ * o from may _not_ be u64 aligned
+ * o nbytes may span a QW boundary
+ */
+static inline void read_extra_bytes(struct pio_buf *pbuf,
+ const void *from, unsigned int nbytes)
+{
+ jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
+ pbuf->carry_bytes += nbytes;
+}
+
+/*
+ * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
+ * Put the unused part of the next 8 bytes of src into the LSB bytes of
+ * pbuf->carry with the upper bytes zeroed..
+ *
+ * NOTES:
+ * o result must keep unused bytes zeroed
+ * o src must be u64 aligned
+ */
+static inline void merge_write8(
+ struct pio_buf *pbuf,
+ void __iomem *dest,
+ const void *src)
+{
+ u64 new, temp;
+
+ new = *(u64 *)src;
+ temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
+ writeq(temp, dest);
+ pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
+}
+
+/*
+ * Write a quad word using all bytes of carry.
+ */
+static inline void carry8_write8(union mix carry, void __iomem *dest)
+{
+ writeq(carry.val64, dest);
+}
+
+/*
+ * Write a quad word using all the valid bytes of carry. If carry
+ * has zero valid bytes, nothing is written.
+ * Returns 0 on nothing written, non-zero on quad word written.
+ */
+static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
+{
+ if (pbuf->carry_bytes) {
+ /* unused bytes are always kept zeroed, so just write */
+ writeq(pbuf->carry.val64, dest);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Segmented PIO Copy - start
+ *
+ * Start a PIO copy.
+ *
+ * @pbuf: destination buffer
+ * @pbc: the PBC for the PIO buffer
+ * @from: data source, QWORD aligned
+ * @nbytes: bytes to copy
+ */
+void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t nbytes)
+{
+ void __iomem *dest = pbuf->start + SOP_DISTANCE;
+ void __iomem *send = dest + PIO_BLOCK_SIZE;
+ void __iomem *dend; /* 8-byte data end */
+
+ writeq(pbc, dest);
+ dest += sizeof(u64);
+
+ /* calculate where the QWORD data ends - in SOP=1 space */
+ dend = dest + ((nbytes >> 3) * sizeof(u64));
+
+ if (dend < send) {
+ /*
+ * all QWORD data is within the SOP block, does *not*
+ * reach the end of the SOP block
+ */
+
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /*
+ * No boundary checks are needed here:
+ * 0. We're not on the SOP block boundary
+ * 1. The possible DWORD dangle will still be within
+ * the SOP block
+ * 2. We cannot wrap except on a block boundary.
+ */
+ } else {
+ /* QWORD data extends _to_ or beyond the SOP block */
+
+ /* write 8-byte SOP chunk data */
+ while (dest < send) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /* drop out of the SOP range */
+ dest -= SOP_DISTANCE;
+ dend -= SOP_DISTANCE;
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written, but we will wrap in
+ * case there is a dangling DWORD.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ }
+ /* at this point we have wrapped if we are going to wrap */
+
+ /* ...but it doesn't matter as we're done writing */
+
+ /* save dangling bytes, if any */
+ read_low_bytes(pbuf, from, nbytes & 0x7);
+
+ pbuf->qw_written = 1 /*PBC*/ + (nbytes >> 3);
+}
+
+/*
+ * Mid copy helper, "mixed case" - source is 64-bit aligned but carry
+ * bytes are non-zero.
+ *
+ * Whole u64s must be written to the chip, so bytes must be manually merged.
+ *
+ * @pbuf: destination buffer
+ * @from: data source, is QWORD aligned.
+ * @nbytes: bytes to copy
+ *
+ * Must handle nbytes < 8.
+ */
+static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
+{
+ void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+ void __iomem *dend; /* 8-byte data end */
+ unsigned long qw_to_write = nbytes >> 3;
+ unsigned long bytes_left = nbytes & 0x7;
+
+ /* calculate 8-byte data end */
+ dend = dest + (qw_to_write * sizeof(u64));
+
+ if (pbuf->qw_written < PIO_BLOCK_QWS) {
+ /*
+ * Still within SOP block. We don't need to check for
+ * wrap because we are still in the first block and
+ * can only wrap on block boundaries.
+ */
+ void __iomem *send; /* SOP end */
+ void __iomem *xend;
+
+ /*
+ * calculate the end of data or end of block, whichever
+ * comes first
+ */
+ send = pbuf->start + PIO_BLOCK_SIZE;
+ xend = min(send, dend);
+
+ /* shift up to SOP=1 space */
+ dest += SOP_DISTANCE;
+ xend += SOP_DISTANCE;
+
+ /* write 8-byte chunk data */
+ while (dest < xend) {
+ merge_write8(pbuf, dest, from);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ /* shift down to SOP=0 space */
+ dest -= SOP_DISTANCE;
+ }
+ /*
+ * At this point dest could be (either, both, or neither):
+ * - at dend
+ * - at the wrap
+ */
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If dest is at the wrap, we will fall into the if,
+ * not do the loop, when wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ merge_write8(pbuf, dest, from);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ merge_write8(pbuf, dest, from);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ pbuf->qw_written += qw_to_write;
+
+ /* handle carry and left-over bytes */
+ if (pbuf->carry_bytes + bytes_left >= 8) {
+ unsigned long nread;
+
+ /* there is enough to fill another qw - fill carry */
+ nread = 8 - pbuf->carry_bytes;
+ read_extra_bytes(pbuf, from, nread);
+
+ /*
+ * One more write - but need to make sure dest is correct.
+ * Check for wrap and the possibility the write
+ * should be in SOP space.
+ *
+ * The two checks immediately below cannot both be true, hence
+ * the else. If we have wrapped, we cannot still be within the
+ * first block. Conversely, if we are still in the first block,
+ * we cannot have wrapped. We do the wrap check first as that
+ * is more likely.
+ */
+ /* adjust if we have wrapped */
+ if (dest >= pbuf->end)
+ dest -= pbuf->sc->size;
+ /* jump to the SOP range if within the first block */
+ else if (pbuf->qw_written < PIO_BLOCK_QWS)
+ dest += SOP_DISTANCE;
+
+ /* flush out full carry */
+ carry8_write8(pbuf->carry, dest);
+ pbuf->qw_written++;
+
+ /* now adjust and read the rest of the bytes into carry */
+ bytes_left -= nread;
+ from += nread; /* from is now not aligned */
+ read_low_bytes(pbuf, from, bytes_left);
+ } else {
+ /* not enough to fill another qw, append the rest to carry */
+ read_extra_bytes(pbuf, from, bytes_left);
+ }
+}
+
+/*
+ * Mid copy helper, "straight case" - source pointer is 64-bit aligned
+ * with no carry bytes.
+ *
+ * @pbuf: destination buffer
+ * @from: data source, is QWORD aligned
+ * @nbytes: bytes to copy
+ *
+ * Must handle nbytes < 8.
+ */
+static void mid_copy_straight(struct pio_buf *pbuf,
+ const void *from, size_t nbytes)
+{
+ void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+ void __iomem *dend; /* 8-byte data end */
+
+ /* calculate 8-byte data end */
+ dend = dest + ((nbytes >> 3) * sizeof(u64));
+
+ if (pbuf->qw_written < PIO_BLOCK_QWS) {
+ /*
+ * Still within SOP block. We don't need to check for
+ * wrap because we are still in the first block and
+ * can only wrap on block boundaries.
+ */
+ void __iomem *send; /* SOP end */
+ void __iomem *xend;
+
+ /*
+ * calculate the end of data or end of block, whichever
+ * comes first
+ */
+ send = pbuf->start + PIO_BLOCK_SIZE;
+ xend = min(send, dend);
+
+ /* shift up to SOP=1 space */
+ dest += SOP_DISTANCE;
+ xend += SOP_DISTANCE;
+
+ /* write 8-byte chunk data */
+ while (dest < xend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ /* shift down to SOP=0 space */
+ dest -= SOP_DISTANCE;
+ }
+ /*
+ * At this point dest could be (either, both, or neither):
+ * - at dend
+ * - at the wrap
+ */
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If dest is at the wrap, we will fall into the if,
+ * not do the loop, when wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ /* we know carry_bytes was zero on entry to this routine */
+ read_low_bytes(pbuf, from, nbytes & 0x7);
+
+ pbuf->qw_written += nbytes >> 3;
+}
+
+/*
+ * Segmented PIO Copy - middle
+ *
+ * Must handle any aligned tail and any aligned source with any byte count.
+ *
+ * @pbuf: a number of blocks allocated within a PIO send context
+ * @from: data source
+ * @nbytes: number of bytes to copy
+ */
+void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
+{
+ unsigned long from_align = (unsigned long)from & 0x7;
+
+ if (pbuf->carry_bytes + nbytes < 8) {
+ /* not enough bytes to fill a QW */
+ read_extra_bytes(pbuf, from, nbytes);
+ return;
+ }
+
+ if (from_align) {
+ /* misaligned source pointer - align it */
+ unsigned long to_align;
+
+ /* bytes to read to align "from" */
+ to_align = 8 - from_align;
+
+ /*
+ * In the advance-to-alignment logic below, we do not need
+ * to check if we are using more than nbytes. This is because
+ * if we are here, we already know that carry+nbytes will
+ * fill at least one QW.
+ */
+ if (pbuf->carry_bytes + to_align < 8) {
+ /* not enough align bytes to fill a QW */
+ read_extra_bytes(pbuf, from, to_align);
+ from += to_align;
+ nbytes -= to_align;
+ } else {
+ /* bytes to fill carry */
+ unsigned long to_fill = 8 - pbuf->carry_bytes;
+ /* bytes left over to be read */
+ unsigned long extra = to_align - to_fill;
+ void __iomem *dest;
+
+ /* fill carry... */
+ read_extra_bytes(pbuf, from, to_fill);
+ from += to_fill;
+ nbytes -= to_fill;
+ /* may not be enough valid bytes left to align */
+ if (extra > nbytes)
+ extra = nbytes;
+
+ /* ...now write carry */
+ dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+
+ /*
+ * The two checks immediately below cannot both be
+ * true, hence the else. If we have wrapped, we
+ * cannot still be within the first block.
+ * Conversely, if we are still in the first block, we
+ * cannot have wrapped. We do the wrap check first
+ * as that is more likely.
+ */
+ /* adjust if we've wrapped */
+ if (dest >= pbuf->end)
+ dest -= pbuf->sc->size;
+ /* jump to SOP range if within the first block */
+ else if (pbuf->qw_written < PIO_BLOCK_QWS)
+ dest += SOP_DISTANCE;
+
+ carry8_write8(pbuf->carry, dest);
+ pbuf->qw_written++;
+
+ /* read any extra bytes to do final alignment */
+ /* this will overwrite anything in pbuf->carry */
+ read_low_bytes(pbuf, from, extra);
+ from += extra;
+ nbytes -= extra;
+ /*
+ * If no bytes are left, return early - we are done.
+ * NOTE: This short-circuit is *required* because
+ * "extra" may have been reduced in size and "from"
+ * is not aligned, as required when leaving this
+ * if block.
+ */
+ if (nbytes == 0)
+ return;
+ }
+
+ /* at this point, from is QW aligned */
+ }
+
+ if (pbuf->carry_bytes)
+ mid_copy_mix(pbuf, from, nbytes);
+ else
+ mid_copy_straight(pbuf, from, nbytes);
+}
+
+/*
+ * Segmented PIO Copy - end
+ *
+ * Write any remainder (in pbuf->carry) and finish writing the whole block.
+ *
+ * @pbuf: a number of blocks allocated within a PIO send context
+ */
+void seg_pio_copy_end(struct pio_buf *pbuf)
+{
+ void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+
+ /*
+ * The two checks immediately below cannot both be true, hence the
+ * else. If we have wrapped, we cannot still be within the first
+ * block. Conversely, if we are still in the first block, we
+ * cannot have wrapped. We do the wrap check first as that is
+ * more likely.
+ */
+ /* adjust if we have wrapped */
+ if (dest >= pbuf->end)
+ dest -= pbuf->sc->size;
+ /* jump to the SOP range if within the first block */
+ else if (pbuf->qw_written < PIO_BLOCK_QWS)
+ dest += SOP_DISTANCE;
+
+ /* write final bytes, if any */
+ if (carry_write8(pbuf, dest)) {
+ dest += sizeof(u64);
+ /*
+ * NOTE: We do not need to recalculate whether dest needs
+ * SOP_DISTANCE or not.
+ *
+ * If we are in the first block and the dangle write
+ * keeps us in the same block, dest will need
+ * to retain SOP_DISTANCE in the loop below.
+ *
+ * If we are in the first block and the dangle write pushes
+ * us to the next block, then loop below will not run
+ * and dest is not used. Hence we do not need to update
+ * it.
+ *
+ * If we are past the first block, then SOP_DISTANCE
+ * was never added, so there is nothing to do.
+ */
+ }
+
+ /* fill in rest of block */
+ while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
+ writeq(0, dest);
+ dest += sizeof(u64);
+ }
+
+ /* finished with this buffer */
+ this_cpu_dec(*pbuf->sc->buffers_allocated);
+ preempt_enable();
+}
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
new file mode 100644
index 000000000000..7bd0e9b6cb50
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -0,0 +1,1035 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#include <linux/firmware.h>
+
+#include "hfi.h"
+#include "efivar.h"
+#include "eprom.h"
+
+#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
+
+static int validate_scratch_checksum(struct hfi1_devdata *dd)
+{
+ u64 checksum = 0, temp_scratch = 0;
+ int i, j, version;
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+ version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
+
+ /* Prevent power on default of all zeroes from passing checksum */
+ if (!version) {
+ dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
+ dd_dev_err(dd,
+ "%s: Please update your BIOS to support active channels\n",
+ __func__);
+ return 0;
+ }
+
+ /*
+ * ASIC scratch 0 only contains the checksum and bitmap version as
+ * fields of interest, both of which are handled separately from the
+ * loop below, so skip it
+ */
+ checksum += version;
+ for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
+ for (j = sizeof(u64); j != 0; j -= 2) {
+ checksum += (temp_scratch & 0xFFFF);
+ temp_scratch >>= 16;
+ }
+ }
+
+ while (checksum >> 16)
+ checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+ temp_scratch &= CHECKSUM_SMASK;
+ temp_scratch >>= CHECKSUM_SHIFT;
+
+ if (checksum + temp_scratch == 0xFFFF)
+ return 1;
+
+ dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
+ return 0;
+}
+
+static void save_platform_config_fields(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 temp_scratch = 0, temp_dest = 0;
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
+ PORT0_PORT_TYPE_SMASK);
+ ppd->port_type = temp_dest >>
+ (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
+ PORT0_PORT_TYPE_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
+ PORT0_LOCAL_ATTEN_SMASK);
+ ppd->local_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
+ PORT0_LOCAL_ATTEN_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
+ PORT0_REMOTE_ATTEN_SMASK);
+ ppd->remote_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
+ PORT0_REMOTE_ATTEN_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
+ PORT0_DEFAULT_ATTEN_SMASK);
+ ppd->default_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
+ PORT0_DEFAULT_ATTEN_SHIFT);
+
+ temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
+ ASIC_CFG_SCRATCH_2);
+
+ ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
+ ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
+ ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
+
+ ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
+ QSFP_MAX_POWER_SHIFT;
+
+ ppd->config_from_scratch = true;
+}
+
+void get_platform_config(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+ u8 *temp_platform_config = NULL;
+ u32 esize;
+ const struct firmware *platform_config_file = NULL;
+
+ if (is_integrated(dd)) {
+ if (validate_scratch_checksum(dd)) {
+ save_platform_config_fields(dd);
+ return;
+ }
+ } else {
+ ret = eprom_read_platform_config(dd,
+ (void **)&temp_platform_config,
+ &esize);
+ if (!ret) {
+ /* success */
+ dd->platform_config.data = temp_platform_config;
+ dd->platform_config.size = esize;
+ return;
+ }
+ }
+ dd_dev_err(dd,
+ "%s: Failed to get platform config, falling back to sub-optimal default file\n",
+ __func__);
+
+ ret = request_firmware(&platform_config_file,
+ DEFAULT_PLATFORM_CONFIG_NAME,
+ &dd->pcidev->dev);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: No default platform config file found\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * Allocate separate memory block to store data and free firmware
+ * structure. This allows free_platform_config to treat EPROM and
+ * fallback configs in the same manner.
+ */
+ dd->platform_config.data = kmemdup(platform_config_file->data,
+ platform_config_file->size,
+ GFP_KERNEL);
+ dd->platform_config.size = platform_config_file->size;
+ release_firmware(platform_config_file);
+}
+
+void free_platform_config(struct hfi1_devdata *dd)
+{
+ /* Release memory allocated for eprom or fallback file read. */
+ kfree(dd->platform_config.data);
+ dd->platform_config.data = NULL;
+}
+
+void get_port_type(struct hfi1_pportdata *ppd)
+{
+ int ret;
+ u32 temp;
+
+ ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_PORT_TYPE, &temp,
+ 4);
+ if (ret) {
+ ppd->port_type = PORT_TYPE_UNKNOWN;
+ return;
+ }
+ ppd->port_type = temp;
+}
+
+int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
+{
+ u8 tx_ctrl_byte = on ? 0x0 : 0xF;
+ int ret = 0;
+
+ ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
+ &tx_ctrl_byte, 1);
+ /* we expected 1, so consider 0 an error */
+ if (ret == 0)
+ ret = -EIO;
+ else if (ret == 1)
+ ret = 0;
+ return ret;
+}
+
+static int qual_power(struct hfi1_pportdata *ppd)
+{
+ u32 cable_power_class = 0, power_class_max = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+ int ret = 0;
+
+ ret = get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
+ SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
+ if (ret)
+ return ret;
+
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class > power_class_max)
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
+
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
+ dd_dev_err(
+ ppd->dd,
+ "%s: Port disabled due to system power restrictions\n",
+ __func__);
+ ret = -EPERM;
+ }
+ return ret;
+}
+
+static int qual_bitrate(struct hfi1_pportdata *ppd)
+{
+ u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
+ cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
+
+ if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
+ cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
+
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
+ dd_dev_err(
+ ppd->dd,
+ "%s: Cable failed bitrate check, disabling port\n",
+ __func__);
+ return -EPERM;
+ }
+ return 0;
+}
+
+static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
+{
+ u8 cable_power_class = 0, power_ctrl_byte = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+ int ret;
+
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class > QSFP_POWER_CLASS_1) {
+ power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
+
+ power_ctrl_byte |= 1;
+ power_ctrl_byte &= ~(0x2);
+
+ ret = qsfp_write(ppd, ppd->dd->hfi1_id,
+ QSFP_PWR_CTRL_BYTE_OFFS,
+ &power_ctrl_byte, 1);
+ if (ret != 1)
+ return -EIO;
+
+ if (cable_power_class > QSFP_POWER_CLASS_4) {
+ power_ctrl_byte |= (1 << 2);
+ ret = qsfp_write(ppd, ppd->dd->hfi1_id,
+ QSFP_PWR_CTRL_BYTE_OFFS,
+ &power_ctrl_byte, 1);
+ if (ret != 1)
+ return -EIO;
+ }
+
+ /* SFF 8679 rev 1.7 LPMode Deassert time */
+ msleep(300);
+ }
+ return 0;
+}
+
+static void apply_rx_cdr(struct hfi1_pportdata *ppd,
+ u32 rx_preset_index,
+ u8 *cdr_ctrl_byte)
+{
+ u32 rx_preset;
+ u8 *cache = ppd->qsfp_info.cache;
+ int cable_power_class;
+
+ if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
+ (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
+ return;
+
+ /* RX CDR present, bypass supported */
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class <= QSFP_POWER_CLASS_3) {
+ /* Power class <= 3, ignore config & turn RX CDR on */
+ *cdr_ctrl_byte |= 0xF;
+ return;
+ }
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
+ &rx_preset, 4);
+
+ if (!rx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: RX_CDR_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
+ &rx_preset, 4);
+
+ /* Expand cdr setting to all 4 lanes */
+ rx_preset = (rx_preset | (rx_preset << 1) |
+ (rx_preset << 2) | (rx_preset << 3));
+
+ if (rx_preset) {
+ *cdr_ctrl_byte |= rx_preset;
+ } else {
+ *cdr_ctrl_byte &= rx_preset;
+ /* Preserve current TX CDR status */
+ *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
+ }
+}
+
+static void apply_tx_cdr(struct hfi1_pportdata *ppd,
+ u32 tx_preset_index,
+ u8 *cdr_ctrl_byte)
+{
+ u32 tx_preset;
+ u8 *cache = ppd->qsfp_info.cache;
+ int cable_power_class;
+
+ if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
+ (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
+ return;
+
+ /* TX CDR present, bypass supported */
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class <= QSFP_POWER_CLASS_3) {
+ /* Power class <= 3, ignore config & turn TX CDR on */
+ *cdr_ctrl_byte |= 0xF0;
+ return;
+ }
+
+ get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
+ TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
+
+ if (!tx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: TX_CDR_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index,
+ TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
+
+ /* Expand cdr setting to all 4 lanes */
+ tx_preset = (tx_preset | (tx_preset << 1) |
+ (tx_preset << 2) | (tx_preset << 3));
+
+ if (tx_preset)
+ *cdr_ctrl_byte |= (tx_preset << 4);
+ else
+ /* Preserve current/determined RX CDR status */
+ *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
+}
+
+static void apply_cdr_settings(
+ struct hfi1_pportdata *ppd, u32 rx_preset_index,
+ u32 tx_preset_index)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
+
+ apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
+
+ apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
+
+ qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
+ &cdr_ctrl_byte, 1);
+}
+
+static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ u8 tx_eq;
+
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
+ return;
+ /* Disable adaptive TX EQ if present */
+ tx_eq = cache[(128 * 3) + 241];
+ tx_eq &= 0xF0;
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
+}
+
+static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ u32 tx_preset;
+ u8 tx_eq;
+
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
+ return;
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
+ &tx_preset, 4);
+ if (!tx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: TX_EQ_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
+ &tx_preset, 4);
+
+ if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: TX EQ %x unsupported\n",
+ __func__, tx_preset);
+
+ dd_dev_info(
+ ppd->dd,
+ "%s: Applying EQ %x\n",
+ __func__, cache[608] & 0xF0);
+
+ tx_preset = (cache[608] & 0xF0) >> 4;
+ }
+
+ tx_eq = tx_preset | (tx_preset << 4);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
+}
+
+static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
+{
+ u32 rx_preset;
+ u8 rx_eq, *cache = ppd->qsfp_info.cache;
+
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
+ return;
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
+ &rx_preset, 4);
+
+ if (!rx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: RX_EMP_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
+ &rx_preset, 4);
+
+ if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: Requested RX EMP %x\n",
+ __func__, rx_preset);
+
+ dd_dev_info(
+ ppd->dd,
+ "%s: Applying supported EMP %x\n",
+ __func__, cache[608] & 0xF);
+
+ rx_preset = cache[608] & 0xF;
+ }
+
+ rx_eq = rx_preset | (rx_preset << 4);
+
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
+}
+
+static void apply_eq_settings(struct hfi1_pportdata *ppd,
+ u32 rx_preset_index, u32 tx_preset_index)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+
+ /* no point going on w/o a page 3 */
+ if (cache[2] & 4) {
+ dd_dev_info(ppd->dd,
+ "%s: Upper page 03 not present\n",
+ __func__);
+ return;
+ }
+
+ apply_tx_eq_auto(ppd);
+
+ apply_tx_eq_prog(ppd, tx_preset_index);
+
+ apply_rx_eq_emp(ppd, rx_preset_index);
+}
+
+static void apply_rx_amplitude_settings(
+ struct hfi1_pportdata *ppd, u32 rx_preset_index,
+ u32 tx_preset_index)
+{
+ u32 rx_preset;
+ u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
+
+ /* no point going on w/o a page 3 */
+ if (cache[2] & 4) {
+ dd_dev_info(ppd->dd,
+ "%s: Upper page 03 not present\n",
+ __func__);
+ return;
+ }
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
+ dd_dev_info(ppd->dd,
+ "%s: RX_AMP_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+
+ get_platform_config_field(ppd->dd,
+ PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index,
+ RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
+ &rx_preset, 4);
+
+ if (!rx_preset) {
+ dd_dev_info(ppd->dd,
+ "%s: RX_AMP_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(ppd->dd,
+ PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index,
+ RX_PRESET_TABLE_QSFP_RX_AMP,
+ &rx_preset, 4);
+
+ dd_dev_info(ppd->dd,
+ "%s: Requested RX AMP %x\n",
+ __func__,
+ rx_preset);
+
+ for (i = 0; i < 4; i++) {
+ if (cache[(128 * 3) + 225] & (1 << i)) {
+ preferred = i;
+ if (preferred == rx_preset)
+ break;
+ }
+ }
+
+ /*
+ * Verify that preferred RX amplitude is not just a
+ * fall through of the default
+ */
+ if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
+ dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
+ return;
+ }
+
+ dd_dev_info(ppd->dd,
+ "%s: Applying RX AMP %x\n", __func__, preferred);
+
+ rx_amp = preferred | (preferred << 4);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
+}
+
+#define OPA_INVALID_INDEX 0xFFF
+
+static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
+ u32 config_data, const char *message)
+{
+ u8 i;
+ int ret;
+
+ for (i = 0; i < 4; i++) {
+ ret = load_8051_config(ppd->dd, field_id, i, config_data);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(
+ ppd->dd,
+ "%s: %s for lane %u failed\n",
+ message, __func__, i);
+ }
+ }
+}
+
+/*
+ * Return a special SerDes setting for low power AOC cables. The power class
+ * threshold and setting being used were all found by empirical testing.
+ *
+ * Summary of the logic:
+ *
+ * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
+ * return 0xe
+ * return 0; // leave at default
+ */
+static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ int power_class;
+
+ /* QSFP only */
+ if (ppd->port_type != PORT_TYPE_QSFP)
+ return 0; /* leave at default */
+
+ /* active optical cables only */
+ switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
+ case 0x0 ... 0x9: fallthrough;
+ case 0xC: fallthrough;
+ case 0xE:
+ /* active AOC */
+ power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+ if (power_class < QSFP_POWER_CLASS_4)
+ return 0xe;
+ }
+ return 0; /* leave at default */
+}
+
+static void apply_tunings(
+ struct hfi1_pportdata *ppd, u32 tx_preset_index,
+ u8 tuning_method, u32 total_atten, u8 limiting_active)
+{
+ int ret = 0;
+ u32 config_data = 0, tx_preset = 0;
+ u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ /* Pass tuning method to 8051 */
+ read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
+ &config_data);
+ config_data &= ~(0xff << TUNING_METHOD_SHIFT);
+ config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
+ ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
+ config_data);
+ if (ret != HCMD_SUCCESS)
+ dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
+ __func__);
+
+ /* Set same channel loss for both TX and RX */
+ config_data = 0 | (total_atten << 16) | (total_atten << 24);
+ apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
+ "Setting channel loss");
+
+ /* Inform 8051 of cable capabilities */
+ if (ppd->qsfp_info.cache_valid) {
+ external_device_config =
+ ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
+ ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
+ ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
+ (cache[QSFP_EQ_INFO_OFFS] & 0x4);
+ ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
+ GENERAL_CONFIG, &config_data);
+ /* Clear, then set the external device config field */
+ config_data &= ~(u32)0xFF;
+ config_data |= external_device_config;
+ ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
+ GENERAL_CONFIG, config_data);
+ if (ret != HCMD_SUCCESS)
+ dd_dev_err(ppd->dd,
+ "%s: Failed set ext device config params\n",
+ __func__);
+ }
+
+ if (tx_preset_index == OPA_INVALID_INDEX) {
+ if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
+ dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
+ __func__);
+ return;
+ }
+
+ /* Following for limiting active channels only */
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
+ TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
+ precur = tx_preset;
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
+ attn = tx_preset;
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
+ postcur = tx_preset;
+
+ /*
+ * NOTES:
+ * o The aoc_low_power_setting is applied to all lanes even
+ * though only lane 0's value is examined by the firmware.
+ * o A lingering low power setting after a cable swap does
+ * not occur. On cable unplug the 8051 is reset and
+ * restarted on cable insert. This resets all settings to
+ * their default, erasing any previous low power setting.
+ */
+ config_data = precur | (attn << 8) | (postcur << 16) |
+ (aoc_low_power_setting(ppd) << 24);
+
+ apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
+ "Applying TX settings");
+}
+
+/* Must be holding the QSFP i2c resource */
+static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
+ u32 *ptr_rx_preset, u32 *ptr_total_atten)
+{
+ int ret;
+ u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ ppd->qsfp_info.limiting_active = 1;
+
+ ret = set_qsfp_tx(ppd, 0);
+ if (ret)
+ return ret;
+
+ ret = qual_power(ppd);
+ if (ret)
+ return ret;
+
+ ret = qual_bitrate(ppd);
+ if (ret)
+ return ret;
+
+ /*
+ * We'll change the QSFP memory contents from here on out, thus we set a
+ * flag here to remind ourselves to reset the QSFP module. This prevents
+ * reuse of stale settings established in our previous pass through.
+ */
+ if (ppd->qsfp_info.reset_needed) {
+ ret = reset_qsfp(ppd);
+ if (ret)
+ return ret;
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ } else {
+ ppd->qsfp_info.reset_needed = 1;
+ }
+
+ ret = set_qsfp_high_power(ppd);
+ if (ret)
+ return ret;
+
+ if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
+ ptr_tx_preset, 4);
+ if (ret) {
+ *ptr_tx_preset = OPA_INVALID_INDEX;
+ return ret;
+ }
+ } else {
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
+ ptr_tx_preset, 4);
+ if (ret) {
+ *ptr_tx_preset = OPA_INVALID_INDEX;
+ return ret;
+ }
+ }
+
+ ret = get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
+ if (ret) {
+ *ptr_rx_preset = OPA_INVALID_INDEX;
+ return ret;
+ }
+
+ if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
+ else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
+
+ apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
+
+ apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
+
+ apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
+
+ ret = set_qsfp_tx(ppd, 1);
+
+ return ret;
+}
+
+static int tune_qsfp(struct hfi1_pportdata *ppd,
+ u32 *ptr_tx_preset, u32 *ptr_rx_preset,
+ u8 *ptr_tuning_method, u32 *ptr_total_atten)
+{
+ u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
+ u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
+ int ret = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
+ case 0xA ... 0xB:
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G,
+ &platform_atten, 4);
+ if (ret)
+ return ret;
+
+ if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
+ cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
+ else if ((lss & OPA_LINK_SPEED_12_5G) &&
+ (lse & OPA_LINK_SPEED_12_5G))
+ cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
+
+ /* Fallback to configured attenuation if cable memory is bad */
+ if (cable_atten == 0 || cable_atten > 36) {
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_SYSTEM_TABLE, 0,
+ SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
+ &cable_atten, 4);
+ if (ret)
+ return ret;
+ }
+
+ ret = get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
+ if (ret)
+ return ret;
+
+ *ptr_total_atten = platform_atten + cable_atten + remote_atten;
+
+ *ptr_tuning_method = OPA_PASSIVE_TUNING;
+ break;
+ case 0x0 ... 0x9: fallthrough;
+ case 0xC: fallthrough;
+ case 0xE:
+ ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
+ ptr_total_atten);
+ if (ret)
+ return ret;
+
+ *ptr_tuning_method = OPA_ACTIVE_TUNING;
+ break;
+ case 0xD: fallthrough;
+ case 0xF:
+ default:
+ dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
+ __func__);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * This function communicates its success or failure via ppd->driver_link_ready
+ * Thus, it depends on its association with start_link(...) which checks
+ * driver_link_ready before proceeding with the link negotiation and
+ * initialization process.
+ */
+void tune_serdes(struct hfi1_pportdata *ppd)
+{
+ int ret = 0;
+ u32 total_atten = 0;
+ u32 remote_atten = 0, platform_atten = 0;
+ u32 rx_preset_index, tx_preset_index;
+ u8 tuning_method = 0, limiting_active = 0;
+ struct hfi1_devdata *dd = ppd->dd;
+
+ rx_preset_index = OPA_INVALID_INDEX;
+ tx_preset_index = OPA_INVALID_INDEX;
+
+ /* the link defaults to enabled */
+ ppd->link_enabled = 1;
+ /* the driver link ready state defaults to not ready */
+ ppd->driver_link_ready = 0;
+ ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
+
+ /* Skip the tuning for testing (loopback != none) and simulations */
+ if (loopback != LOOPBACK_NONE ||
+ ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ ppd->driver_link_ready = 1;
+
+ if (qsfp_mod_present(ppd)) {
+ ret = acquire_chip_resource(ppd->dd,
+ qsfp_resource(ppd->dd),
+ QSFP_WAIT);
+ if (ret) {
+ dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
+ __func__, (int)ppd->dd->hfi1_id);
+ goto bail;
+ }
+
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
+ }
+
+ return;
+ }
+
+ switch (ppd->port_type) {
+ case PORT_TYPE_DISCONNECTED:
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
+ dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
+ __func__);
+ goto bail;
+ case PORT_TYPE_FIXED:
+ /* platform_atten, remote_atten pre-zeroed to catch error */
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
+
+ total_atten = platform_atten + remote_atten;
+
+ tuning_method = OPA_PASSIVE_TUNING;
+ break;
+ case PORT_TYPE_VARIABLE:
+ if (qsfp_mod_present(ppd)) {
+ /*
+ * platform_atten, remote_atten pre-zeroed to
+ * catch error
+ */
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G,
+ &platform_atten, 4);
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_REMOTE_ATTEN_25G,
+ &remote_atten, 4);
+
+ total_atten = platform_atten + remote_atten;
+
+ tuning_method = OPA_PASSIVE_TUNING;
+ } else {
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
+ goto bail;
+ }
+ break;
+ case PORT_TYPE_QSFP:
+ if (qsfp_mod_present(ppd)) {
+ ret = acquire_chip_resource(ppd->dd,
+ qsfp_resource(ppd->dd),
+ QSFP_WAIT);
+ if (ret) {
+ dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
+ __func__, (int)ppd->dd->hfi1_id);
+ goto bail;
+ }
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+
+ if (ppd->qsfp_info.cache_valid) {
+ ret = tune_qsfp(ppd,
+ &tx_preset_index,
+ &rx_preset_index,
+ &tuning_method,
+ &total_atten);
+
+ /*
+ * We may have modified the QSFP memory, so
+ * update the cache to reflect the changes
+ */
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ limiting_active =
+ ppd->qsfp_info.limiting_active;
+ } else {
+ dd_dev_err(dd,
+ "%s: Reading QSFP memory failed\n",
+ __func__);
+ ret = -EINVAL; /* a fail indication */
+ }
+ release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
+ if (ret)
+ goto bail;
+ } else {
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(
+ OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
+ goto bail;
+ }
+ break;
+ default:
+ dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
+ ppd->port_type = PORT_TYPE_UNKNOWN;
+ tuning_method = OPA_UNKNOWN_TUNING;
+ total_atten = 0;
+ limiting_active = 0;
+ tx_preset_index = OPA_INVALID_INDEX;
+ break;
+ }
+
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
+ apply_tunings(ppd, tx_preset_index, tuning_method,
+ total_atten, limiting_active);
+
+ if (!ret)
+ ppd->driver_link_ready = 1;
+
+ return;
+bail:
+ ppd->driver_link_ready = 0;
+}
diff --git a/drivers/infiniband/hw/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
new file mode 100644
index 000000000000..0631f9bf3a89
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#ifndef __PLATFORM_H
+#define __PLATFORM_H
+
+#define METADATA_TABLE_FIELD_START_SHIFT 0
+#define METADATA_TABLE_FIELD_START_LEN_BITS 15
+#define METADATA_TABLE_FIELD_LEN_SHIFT 16
+#define METADATA_TABLE_FIELD_LEN_LEN_BITS 16
+
+/* Header structure */
+#define PLATFORM_CONFIG_HEADER_RECORD_IDX_SHIFT 0
+#define PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS 6
+#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT 16
+#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS 12
+#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT 28
+#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS 4
+
+enum platform_config_table_type_encoding {
+ PLATFORM_CONFIG_TABLE_RESERVED,
+ PLATFORM_CONFIG_SYSTEM_TABLE,
+ PLATFORM_CONFIG_PORT_TABLE,
+ PLATFORM_CONFIG_RX_PRESET_TABLE,
+ PLATFORM_CONFIG_TX_PRESET_TABLE,
+ PLATFORM_CONFIG_QSFP_ATTEN_TABLE,
+ PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE,
+ PLATFORM_CONFIG_TABLE_MAX
+};
+
+enum platform_config_system_table_fields {
+ SYSTEM_TABLE_RESERVED,
+ SYSTEM_TABLE_NODE_STRING,
+ SYSTEM_TABLE_SYSTEM_IMAGE_GUID,
+ SYSTEM_TABLE_NODE_GUID,
+ SYSTEM_TABLE_REVISION,
+ SYSTEM_TABLE_VENDOR_OUI,
+ SYSTEM_TABLE_META_VERSION,
+ SYSTEM_TABLE_DEVICE_ID,
+ SYSTEM_TABLE_PARTITION_ENFORCEMENT_CAP,
+ SYSTEM_TABLE_QSFP_POWER_CLASS_MAX,
+ SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_12G,
+ SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
+ SYSTEM_TABLE_VARIABLE_TABLE_ENTRIES_PER_PORT,
+ SYSTEM_TABLE_MAX
+};
+
+enum platform_config_port_table_fields {
+ PORT_TABLE_RESERVED,
+ PORT_TABLE_PORT_TYPE,
+ PORT_TABLE_LOCAL_ATTEN_12G,
+ PORT_TABLE_LOCAL_ATTEN_25G,
+ PORT_TABLE_LINK_SPEED_SUPPORTED,
+ PORT_TABLE_LINK_WIDTH_SUPPORTED,
+ PORT_TABLE_AUTO_LANE_SHEDDING_ENABLED,
+ PORT_TABLE_EXTERNAL_LOOPBACK_ALLOWED,
+ PORT_TABLE_VL_CAP,
+ PORT_TABLE_MTU_CAP,
+ PORT_TABLE_TX_LANE_ENABLE_MASK,
+ PORT_TABLE_LOCAL_MAX_TIMEOUT,
+ PORT_TABLE_REMOTE_ATTEN_12G,
+ PORT_TABLE_REMOTE_ATTEN_25G,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
+ PORT_TABLE_RX_PRESET_IDX,
+ PORT_TABLE_CABLE_REACH_CLASS,
+ PORT_TABLE_MAX
+};
+
+enum platform_config_rx_preset_table_fields {
+ RX_PRESET_TABLE_RESERVED,
+ RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
+ RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
+ RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
+ RX_PRESET_TABLE_QSFP_RX_CDR,
+ RX_PRESET_TABLE_QSFP_RX_EMP,
+ RX_PRESET_TABLE_QSFP_RX_AMP,
+ RX_PRESET_TABLE_MAX
+};
+
+enum platform_config_tx_preset_table_fields {
+ TX_PRESET_TABLE_RESERVED,
+ TX_PRESET_TABLE_PRECUR,
+ TX_PRESET_TABLE_ATTN,
+ TX_PRESET_TABLE_POSTCUR,
+ TX_PRESET_TABLE_QSFP_TX_CDR_APPLY,
+ TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
+ TX_PRESET_TABLE_QSFP_TX_CDR,
+ TX_PRESET_TABLE_QSFP_TX_EQ,
+ TX_PRESET_TABLE_MAX
+};
+
+enum platform_config_qsfp_attn_table_fields {
+ QSFP_ATTEN_TABLE_RESERVED,
+ QSFP_ATTEN_TABLE_TX_PRESET_IDX,
+ QSFP_ATTEN_TABLE_RX_PRESET_IDX,
+ QSFP_ATTEN_TABLE_MAX
+};
+
+enum platform_config_variable_settings_table_fields {
+ VARIABLE_SETTINGS_TABLE_RESERVED,
+ VARIABLE_SETTINGS_TABLE_TX_PRESET_IDX,
+ VARIABLE_SETTINGS_TABLE_RX_PRESET_IDX,
+ VARIABLE_SETTINGS_TABLE_MAX
+};
+
+struct platform_config {
+ size_t size;
+ const u8 *data;
+};
+
+struct platform_config_data {
+ u32 *table;
+ u32 *table_metadata;
+ u32 num_table;
+};
+
+/*
+ * This struct acts as a quick reference into the platform_data binary image
+ * and is populated by parse_platform_config(...) depending on the specific
+ * META_VERSION
+ */
+struct platform_config_cache {
+ u8 cache_valid;
+ struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX];
+};
+
+/* This section defines default values and encodings for the
+ * fields defined for each table above
+ */
+
+/*
+ * =====================================================
+ * System table encodings
+ * =====================================================
+ */
+#define PLATFORM_CONFIG_MAGIC_NUM 0x3d4f5041
+#define PLATFORM_CONFIG_MAGIC_NUMBER_LEN 4
+
+/*
+ * These power classes are the same as defined in SFF 8636 spec rev 2.4
+ * describing byte 129 in table 6-16, except enumerated in a different order
+ */
+enum platform_config_qsfp_power_class_encoding {
+ QSFP_POWER_CLASS_1 = 1,
+ QSFP_POWER_CLASS_2,
+ QSFP_POWER_CLASS_3,
+ QSFP_POWER_CLASS_4,
+ QSFP_POWER_CLASS_5,
+ QSFP_POWER_CLASS_6,
+ QSFP_POWER_CLASS_7
+};
+
+/*
+ * ====================================================
+ * Port table encodings
+ * ====================================================
+ */
+enum platform_config_port_type_encoding {
+ PORT_TYPE_UNKNOWN,
+ PORT_TYPE_DISCONNECTED,
+ PORT_TYPE_FIXED,
+ PORT_TYPE_VARIABLE,
+ PORT_TYPE_QSFP,
+ PORT_TYPE_MAX
+};
+
+enum platform_config_link_speed_supported_encoding {
+ LINK_SPEED_SUPP_12G = 1,
+ LINK_SPEED_SUPP_25G,
+ LINK_SPEED_SUPP_12G_25G,
+ LINK_SPEED_SUPP_MAX
+};
+
+/*
+ * This is a subset (not strict) of the link downgrades
+ * supported. The link downgrades supported are expected
+ * to be supplied to the driver by another entity such as
+ * the fabric manager
+ */
+enum platform_config_link_width_supported_encoding {
+ LINK_WIDTH_SUPP_1X = 1,
+ LINK_WIDTH_SUPP_2X,
+ LINK_WIDTH_SUPP_2X_1X,
+ LINK_WIDTH_SUPP_3X,
+ LINK_WIDTH_SUPP_3X_1X,
+ LINK_WIDTH_SUPP_3X_2X,
+ LINK_WIDTH_SUPP_3X_2X_1X,
+ LINK_WIDTH_SUPP_4X,
+ LINK_WIDTH_SUPP_4X_1X,
+ LINK_WIDTH_SUPP_4X_2X,
+ LINK_WIDTH_SUPP_4X_2X_1X,
+ LINK_WIDTH_SUPP_4X_3X,
+ LINK_WIDTH_SUPP_4X_3X_1X,
+ LINK_WIDTH_SUPP_4X_3X_2X,
+ LINK_WIDTH_SUPP_4X_3X_2X_1X,
+ LINK_WIDTH_SUPP_MAX
+};
+
+enum platform_config_virtual_lane_capability_encoding {
+ VL_CAP_VL0 = 1,
+ VL_CAP_VL0_1,
+ VL_CAP_VL0_2,
+ VL_CAP_VL0_3,
+ VL_CAP_VL0_4,
+ VL_CAP_VL0_5,
+ VL_CAP_VL0_6,
+ VL_CAP_VL0_7,
+ VL_CAP_VL0_8,
+ VL_CAP_VL0_9,
+ VL_CAP_VL0_10,
+ VL_CAP_VL0_11,
+ VL_CAP_VL0_12,
+ VL_CAP_VL0_13,
+ VL_CAP_VL0_14,
+ VL_CAP_MAX
+};
+
+/* Max MTU */
+enum platform_config_mtu_capability_encoding {
+ MTU_CAP_256 = 1,
+ MTU_CAP_512 = 2,
+ MTU_CAP_1024 = 3,
+ MTU_CAP_2048 = 4,
+ MTU_CAP_4096 = 5,
+ MTU_CAP_8192 = 6,
+ MTU_CAP_10240 = 7
+};
+
+enum platform_config_local_max_timeout_encoding {
+ LOCAL_MAX_TIMEOUT_10_MS = 1,
+ LOCAL_MAX_TIMEOUT_100_MS,
+ LOCAL_MAX_TIMEOUT_1_S,
+ LOCAL_MAX_TIMEOUT_10_S,
+ LOCAL_MAX_TIMEOUT_100_S,
+ LOCAL_MAX_TIMEOUT_1000_S
+};
+
+enum link_tuning_encoding {
+ OPA_PASSIVE_TUNING,
+ OPA_ACTIVE_TUNING,
+ OPA_UNKNOWN_TUNING
+};
+
+/*
+ * Shifts and masks for the link SI tuning values stuffed into the ASIC scratch
+ * registers for integrated platforms
+ */
+#define PORT0_PORT_TYPE_SHIFT 0
+#define PORT0_LOCAL_ATTEN_SHIFT 4
+#define PORT0_REMOTE_ATTEN_SHIFT 10
+#define PORT0_DEFAULT_ATTEN_SHIFT 32
+
+#define PORT1_PORT_TYPE_SHIFT 16
+#define PORT1_LOCAL_ATTEN_SHIFT 20
+#define PORT1_REMOTE_ATTEN_SHIFT 26
+#define PORT1_DEFAULT_ATTEN_SHIFT 40
+
+#define PORT0_PORT_TYPE_MASK 0xFUL
+#define PORT0_LOCAL_ATTEN_MASK 0x3FUL
+#define PORT0_REMOTE_ATTEN_MASK 0x3FUL
+#define PORT0_DEFAULT_ATTEN_MASK 0xFFUL
+
+#define PORT1_PORT_TYPE_MASK 0xFUL
+#define PORT1_LOCAL_ATTEN_MASK 0x3FUL
+#define PORT1_REMOTE_ATTEN_MASK 0x3FUL
+#define PORT1_DEFAULT_ATTEN_MASK 0xFFUL
+
+#define PORT0_PORT_TYPE_SMASK (PORT0_PORT_TYPE_MASK << \
+ PORT0_PORT_TYPE_SHIFT)
+#define PORT0_LOCAL_ATTEN_SMASK (PORT0_LOCAL_ATTEN_MASK << \
+ PORT0_LOCAL_ATTEN_SHIFT)
+#define PORT0_REMOTE_ATTEN_SMASK (PORT0_REMOTE_ATTEN_MASK << \
+ PORT0_REMOTE_ATTEN_SHIFT)
+#define PORT0_DEFAULT_ATTEN_SMASK (PORT0_DEFAULT_ATTEN_MASK << \
+ PORT0_DEFAULT_ATTEN_SHIFT)
+
+#define PORT1_PORT_TYPE_SMASK (PORT1_PORT_TYPE_MASK << \
+ PORT1_PORT_TYPE_SHIFT)
+#define PORT1_LOCAL_ATTEN_SMASK (PORT1_LOCAL_ATTEN_MASK << \
+ PORT1_LOCAL_ATTEN_SHIFT)
+#define PORT1_REMOTE_ATTEN_SMASK (PORT1_REMOTE_ATTEN_MASK << \
+ PORT1_REMOTE_ATTEN_SHIFT)
+#define PORT1_DEFAULT_ATTEN_SMASK (PORT1_DEFAULT_ATTEN_MASK << \
+ PORT1_DEFAULT_ATTEN_SHIFT)
+
+#define QSFP_MAX_POWER_SHIFT 0
+#define TX_NO_EQ_SHIFT 4
+#define TX_EQ_SHIFT 25
+#define RX_SHIFT 46
+
+#define QSFP_MAX_POWER_MASK 0xFUL
+#define TX_NO_EQ_MASK 0x1FFFFFUL
+#define TX_EQ_MASK 0x1FFFFFUL
+#define RX_MASK 0xFFFFUL
+
+#define QSFP_MAX_POWER_SMASK (QSFP_MAX_POWER_MASK << \
+ QSFP_MAX_POWER_SHIFT)
+#define TX_NO_EQ_SMASK (TX_NO_EQ_MASK << TX_NO_EQ_SHIFT)
+#define TX_EQ_SMASK (TX_EQ_MASK << TX_EQ_SHIFT)
+#define RX_SMASK (RX_MASK << RX_SHIFT)
+
+#define TX_PRECUR_SHIFT 0
+#define TX_ATTN_SHIFT 4
+#define QSFP_TX_CDR_APPLY_SHIFT 9
+#define QSFP_TX_EQ_APPLY_SHIFT 10
+#define QSFP_TX_CDR_SHIFT 11
+#define QSFP_TX_EQ_SHIFT 12
+#define TX_POSTCUR_SHIFT 16
+
+#define TX_PRECUR_MASK 0xFUL
+#define TX_ATTN_MASK 0x1FUL
+#define QSFP_TX_CDR_APPLY_MASK 0x1UL
+#define QSFP_TX_EQ_APPLY_MASK 0x1UL
+#define QSFP_TX_CDR_MASK 0x1UL
+#define QSFP_TX_EQ_MASK 0xFUL
+#define TX_POSTCUR_MASK 0x1FUL
+
+#define TX_PRECUR_SMASK (TX_PRECUR_MASK << TX_PRECUR_SHIFT)
+#define TX_ATTN_SMASK (TX_ATTN_MASK << TX_ATTN_SHIFT)
+#define QSFP_TX_CDR_APPLY_SMASK (QSFP_TX_CDR_APPLY_MASK << \
+ QSFP_TX_CDR_APPLY_SHIFT)
+#define QSFP_TX_EQ_APPLY_SMASK (QSFP_TX_EQ_APPLY_MASK << \
+ QSFP_TX_EQ_APPLY_SHIFT)
+#define QSFP_TX_CDR_SMASK (QSFP_TX_CDR_MASK << QSFP_TX_CDR_SHIFT)
+#define QSFP_TX_EQ_SMASK (QSFP_TX_EQ_MASK << QSFP_TX_EQ_SHIFT)
+#define TX_POSTCUR_SMASK (TX_POSTCUR_MASK << TX_POSTCUR_SHIFT)
+
+#define QSFP_RX_CDR_APPLY_SHIFT 0
+#define QSFP_RX_EMP_APPLY_SHIFT 1
+#define QSFP_RX_AMP_APPLY_SHIFT 2
+#define QSFP_RX_CDR_SHIFT 3
+#define QSFP_RX_EMP_SHIFT 4
+#define QSFP_RX_AMP_SHIFT 8
+
+#define QSFP_RX_CDR_APPLY_MASK 0x1UL
+#define QSFP_RX_EMP_APPLY_MASK 0x1UL
+#define QSFP_RX_AMP_APPLY_MASK 0x1UL
+#define QSFP_RX_CDR_MASK 0x1UL
+#define QSFP_RX_EMP_MASK 0xFUL
+#define QSFP_RX_AMP_MASK 0x3UL
+
+#define QSFP_RX_CDR_APPLY_SMASK (QSFP_RX_CDR_APPLY_MASK << \
+ QSFP_RX_CDR_APPLY_SHIFT)
+#define QSFP_RX_EMP_APPLY_SMASK (QSFP_RX_EMP_APPLY_MASK << \
+ QSFP_RX_EMP_APPLY_SHIFT)
+#define QSFP_RX_AMP_APPLY_SMASK (QSFP_RX_AMP_APPLY_MASK << \
+ QSFP_RX_AMP_APPLY_SHIFT)
+#define QSFP_RX_CDR_SMASK (QSFP_RX_CDR_MASK << QSFP_RX_CDR_SHIFT)
+#define QSFP_RX_EMP_SMASK (QSFP_RX_EMP_MASK << QSFP_RX_EMP_SHIFT)
+#define QSFP_RX_AMP_SMASK (QSFP_RX_AMP_MASK << QSFP_RX_AMP_SHIFT)
+
+#define BITMAP_VERSION 1
+#define BITMAP_VERSION_SHIFT 44
+#define BITMAP_VERSION_MASK 0xFUL
+#define BITMAP_VERSION_SMASK (BITMAP_VERSION_MASK << \
+ BITMAP_VERSION_SHIFT)
+#define CHECKSUM_SHIFT 48
+#define CHECKSUM_MASK 0xFFFFUL
+#define CHECKSUM_SMASK (CHECKSUM_MASK << CHECKSUM_SHIFT)
+
+/* platform.c */
+void get_platform_config(struct hfi1_devdata *dd);
+void free_platform_config(struct hfi1_devdata *dd);
+void get_port_type(struct hfi1_pportdata *ppd);
+int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
+void tune_serdes(struct hfi1_pportdata *ppd);
+
+#endif /*__PLATFORM_H*/
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
new file mode 100644
index 000000000000..f3d8c0c193ac
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
+#include <rdma/ib_verbs.h>
+
+#include "hfi.h"
+#include "qp.h"
+#include "trace.h"
+#include "verbs_txreq.h"
+
+unsigned int hfi1_qp_table_size = 256;
+module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(qp_table_size, "QP table size");
+
+static void flush_tx_list(struct rvt_qp *qp);
+static int iowait_sleep(
+ struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *stx,
+ unsigned int seq,
+ bool pkts_sent);
+static void iowait_wakeup(struct iowait *wait, int reason);
+static void iowait_sdma_drained(struct iowait *wait);
+static void qp_pio_drain(struct rvt_qp *qp);
+
+const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
+[IB_WR_RDMA_WRITE] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_RDMA_READ] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC,
+},
+
+[IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND_WITH_IMM] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_REG_MR] = {
+ .length = sizeof(struct ib_reg_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_LOCAL,
+},
+
+[IB_WR_LOCAL_INV] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_LOCAL,
+},
+
+[IB_WR_SEND_WITH_INV] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+},
+
+[IB_WR_OPFN] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_USE_RESERVE,
+},
+
+[IB_WR_TID_RDMA_WRITE] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_IGN_RNR_CNT,
+},
+
+};
+
+static void flush_list_head(struct list_head *l)
+{
+ while (!list_empty(l)) {
+ struct sdma_txreq *tx;
+
+ tx = list_first_entry(
+ l,
+ struct sdma_txreq,
+ list);
+ list_del_init(&tx->list);
+ hfi1_put_txreq(
+ container_of(tx, struct verbs_txreq, txreq));
+ }
+}
+
+static void flush_tx_list(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head);
+ flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head);
+}
+
+static void flush_iowait(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ unsigned long flags;
+ seqlock_t *lock = priv->s_iowait.lock;
+
+ if (!lock)
+ return;
+ write_seqlock_irqsave(lock, flags);
+ if (!list_empty(&priv->s_iowait.list)) {
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ rvt_put_qp(qp);
+ }
+ write_sequnlock_irqrestore(lock, flags);
+}
+
+/*
+ * This function is what we would push to the core layer if we wanted to be a
+ * "first class citizen". Instead we hide this here and rely on Verbs ULPs
+ * to blindly pass the MTU enum value from the PathRecord to us.
+ */
+static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
+{
+ /* Constraining 10KB packets to 8KB packets */
+ if (mtu == (enum ib_mtu)OPA_MTU_10240)
+ mtu = (enum ib_mtu)OPA_MTU_8192;
+ return opa_mtu_enum_to_int((enum opa_mtu)mtu);
+}
+
+int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct hfi1_ibdev *dev = to_idev(ibqp->device);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ u8 sc;
+
+ if (attr_mask & IB_QP_AV) {
+ sc = ah_to_sc(ibqp->device, &attr->ah_attr);
+ if (sc == 0xf)
+ return -EINVAL;
+
+ if (!qp_to_sdma_engine(qp, sc) &&
+ dd->flags & HFI1_HAS_SEND_DMA)
+ return -EINVAL;
+
+ if (!qp_to_send_context(qp, sc))
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
+ if (sc == 0xf)
+ return -EINVAL;
+
+ if (!qp_to_sdma_engine(qp, sc) &&
+ dd->flags & HFI1_HAS_SEND_DMA)
+ return -EINVAL;
+
+ if (!qp_to_send_context(qp, sc))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * qp_set_16b - Set the hdr_type based on whether the slid or the
+ * dlid in the connection is extended. Only applicable for RC and UC
+ * QPs. UD QPs determine this on the fly from the ah in the wqe
+ */
+static inline void qp_set_16b(struct rvt_qp *qp)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ /* Update ah_attr to account for extended LIDs */
+ hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr);
+
+ /* Create 32 bit LIDs */
+ hfi1_make_opa_lid(&qp->remote_ah_attr);
+
+ if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH))
+ return;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr);
+}
+
+void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (attr_mask & IB_QP_AV) {
+ priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
+ qp_set_16b(qp);
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE &&
+ attr->path_mig_state == IB_MIG_MIGRATED &&
+ qp->s_mig_state == IB_MIG_ARMED) {
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
+ priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
+ qp_set_16b(qp);
+ }
+
+ opfn_qp_init(qp, attr, attr_mask);
+}
+
+/**
+ * hfi1_setup_wqe - set up the wqe
+ * @qp: The qp
+ * @wqe: The built wqe
+ * @call_send: Determine if the send should be posted or scheduled.
+ *
+ * Perform setup of the wqe. This is called
+ * prior to inserting the wqe into the ring but after
+ * the wqe has been setup by RDMAVT. This function
+ * allows the driver the opportunity to perform
+ * validation and additional setup of the wqe.
+ *
+ * Returns 0 on success, -EINVAL on failure
+ *
+ */
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct rvt_ah *ah;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ hfi1_setup_tid_rdma_wqe(qp, wqe);
+ fallthrough;
+ case IB_QPT_UC:
+ if (wqe->length > 0x80000000U)
+ return -EINVAL;
+ if (wqe->length > qp->pmtu)
+ *call_send = false;
+ break;
+ case IB_QPT_SMI:
+ /*
+ * SM packets should exclusively use VL15 and their SL is
+ * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah
+ * is created, SL is 0 in most cases and as a result some
+ * fields (vl and pmtu) in ah may not be set correctly,
+ * depending on the SL2SC and SC2VL tables at the time.
+ */
+ ppd = ppd_from_ibp(ibp);
+ dd = dd_from_ppd(ppd);
+ if (wqe->length > dd->vld[15].mtu)
+ return -EINVAL;
+ break;
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ ah = rvt_get_swqe_ah(wqe);
+ if (wqe->length > (1 << ah->log_pmtu))
+ return -EINVAL;
+ if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * System latency between send and schedule is large enough that
+ * forcing call_send to true for piothreshold packets is necessary.
+ */
+ if (wqe->length <= piothreshold)
+ *call_send = true;
+ return 0;
+}
+
+/**
+ * _hfi1_schedule_send - schedule progress
+ * @qp: the QP
+ *
+ * This schedules qp progress w/o regard to the s_flags.
+ *
+ * It is only used in the post send, which doesn't hold
+ * the s_lock.
+ */
+bool _hfi1_schedule_send(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (dd->flags & HFI1_SHUTDOWN)
+ return true;
+
+ return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
+}
+
+static void qp_pio_drain(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (!priv->s_sendcontext)
+ return;
+ while (iowait_pio_pending(&priv->s_iowait)) {
+ write_seqlock_irq(&priv->s_sendcontext->waitlock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
+ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
+ iowait_pio_drain(&priv->s_iowait);
+ write_seqlock_irq(&priv->s_sendcontext->waitlock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
+ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
+ }
+}
+
+/**
+ * hfi1_schedule_send - schedule progress
+ * @qp: the QP
+ *
+ * This schedules qp progress and caller should hold
+ * the s_lock.
+ * @return true if the first leg is scheduled;
+ * false if the first leg is not scheduled.
+ */
+bool hfi1_schedule_send(struct rvt_qp *qp)
+{
+ lockdep_assert_held(&qp->s_lock);
+ if (hfi1_send_ok(qp)) {
+ _hfi1_schedule_send(qp);
+ return true;
+ }
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
+ IOWAIT_PENDING_IB);
+ return false;
+}
+
+static void hfi1_qp_schedule(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ bool ret;
+
+ if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
+ ret = hfi1_schedule_send(qp);
+ if (ret)
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ }
+ if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) {
+ ret = hfi1_schedule_tid_send(qp);
+ if (ret)
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ }
+}
+
+void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & flag) {
+ qp->s_flags &= ~flag;
+ trace_hfi1_qpwakeup(qp, flag);
+ hfi1_qp_schedule(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ /* Notify hfi1_destroy_qp() if it is waiting. */
+ rvt_put_qp(qp);
+}
+
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) {
+ qp->s_flags &= ~RVT_S_BUSY;
+ /*
+ * If we are sending a first-leg packet from the second leg,
+ * we need to clear the busy flag from priv->s_flags to
+ * avoid a race condition when the qp wakes up before
+ * the call to hfi1_verbs_send() returns to the second
+ * leg. In that case, the second leg will terminate without
+ * being re-scheduled, resulting in failure to send TID RDMA
+ * WRITE DATA and TID RDMA ACK packets.
+ */
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
+ priv->s_flags &= ~(HFI1_S_TID_BUSY_SET |
+ RVT_S_BUSY);
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ }
+ } else {
+ priv->s_flags &= ~RVT_S_BUSY;
+ }
+}
+
+static int iowait_sleep(
+ struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *stx,
+ uint seq,
+ bool pkts_sent)
+{
+ struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
+ unsigned long flags;
+ int ret = 0;
+
+ qp = tx->qp;
+ priv = qp->priv;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ /*
+ * If we couldn't queue the DMA request, save the info
+ * and try again later rather than destroying the
+ * buffer and undoing the side effects of the copy.
+ */
+ /* Make a common routine? */
+ list_add_tail(&stx->list, &wait->tx_head);
+ write_seqlock(&sde->waitlock);
+ if (sdma_progress(sde, seq, stx))
+ goto eagain;
+ if (list_empty(&priv->s_iowait.list)) {
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+
+ ibp->rvp.n_dmawait++;
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ iowait_get_priority(&priv->s_iowait);
+ iowait_queue(pkts_sent, &priv->s_iowait,
+ &sde->dmawait);
+ priv->s_iowait.lock = &sde->waitlock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
+ rvt_get_qp(qp);
+ }
+ write_sequnlock(&sde->waitlock);
+ hfi1_qp_unbusy(qp, wait);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ret = -EBUSY;
+ } else {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ hfi1_put_txreq(tx);
+ }
+ return ret;
+eagain:
+ write_sequnlock(&sde->waitlock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ list_del_init(&stx->list);
+ return -EAGAIN;
+}
+
+static void iowait_wakeup(struct iowait *wait, int reason)
+{
+ struct rvt_qp *qp = iowait_to_qp(wait);
+
+ WARN_ON(reason != SDMA_AVAIL_REASON);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
+}
+
+static void iowait_sdma_drained(struct iowait *wait)
+{
+ struct rvt_qp *qp = iowait_to_qp(wait);
+ unsigned long flags;
+
+ /*
+ * This happens when the send engine notes
+ * a QP in the error state and cannot
+ * do the flush work until that QP's
+ * sdma work has finished.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & RVT_S_WAIT_DMA) {
+ qp->s_flags &= ~RVT_S_WAIT_DMA;
+ hfi1_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+static void hfi1_init_priority(struct iowait *w)
+{
+ struct rvt_qp *qp = iowait_to_qp(w);
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (qp->s_flags & RVT_S_ACK_PENDING)
+ w->priority++;
+ if (priv->s_flags & RVT_S_ACK_PENDING)
+ w->priority++;
+}
+
+/**
+ * qp_to_sdma_engine - map a qp to a send engine
+ * @qp: the QP
+ * @sc5: the 5 bit sc
+ *
+ * Return:
+ * A send engine for the qp or NULL for SMI type qp.
+ */
+struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct sdma_engine *sde;
+
+ if (!(dd->flags & HFI1_HAS_SEND_DMA))
+ return NULL;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ return NULL;
+ default:
+ break;
+ }
+ sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
+ return sde;
+}
+
+/**
+ * qp_to_send_context - map a qp to a send context
+ * @qp: the QP
+ * @sc5: the 5 bit sc
+ *
+ * Return:
+ * A send context for the qp
+ */
+struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ /* SMA packets to VL15 */
+ return dd->vld[15].sc;
+ default:
+ break;
+ }
+
+ return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
+ sc5);
+}
+
+static const char * const qp_type_str[] = {
+ "SMI", "GSI", "RC", "UC", "UD",
+};
+
+static int qp_idle(struct rvt_qp *qp)
+{
+ return
+ qp->s_last == qp->s_acked &&
+ qp->s_acked == qp->s_cur &&
+ qp->s_cur == qp->s_tail &&
+ qp->s_tail == qp->s_head;
+}
+
+/**
+ * qp_iter_print - print the qp information to seq_file
+ * @s: the seq_file to emit the qp information on
+ * @iter: the iterator for the qp hash list
+ */
+void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
+{
+ struct rvt_swqe *wqe;
+ struct rvt_qp *qp = iter->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct sdma_engine *sde;
+ struct send_context *send_context;
+ struct rvt_ack_entry *e = NULL;
+ struct rvt_srq *srq = qp->ibqp.srq ?
+ ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
+
+ sde = qp_to_sdma_engine(qp, priv->s_sc);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ send_context = qp_to_send_context(qp, priv->s_sc);
+ if (qp->s_ack_queue)
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ seq_printf(s,
+ "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
+ iter->n,
+ qp_idle(qp) ? "I" : "B",
+ qp->ibqp.qp_num,
+ atomic_read(&qp->refcount),
+ qp_type_str[qp->ibqp.qp_type],
+ qp->state,
+ wqe ? wqe->wr.opcode : 0,
+ qp->s_flags,
+ iowait_sdma_pending(&priv->s_iowait),
+ iowait_pio_pending(&priv->s_iowait),
+ !list_empty(&priv->s_iowait.list),
+ qp->timeout,
+ wqe ? wqe->ssn : 0,
+ qp->s_lsn,
+ qp->s_last_psn,
+ qp->s_psn, qp->s_next_psn,
+ qp->s_sending_psn, qp->s_sending_hpsn,
+ qp->r_psn,
+ qp->s_last, qp->s_acked, qp->s_cur,
+ qp->s_tail, qp->s_head, qp->s_size,
+ qp->s_avail,
+ /* ack_queue ring pointers, size */
+ qp->s_tail_ack_queue, qp->r_head_ack_queue,
+ rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi),
+ /* remote QP info */
+ qp->remote_qpn,
+ rdma_ah_get_dlid(&qp->remote_ah_attr),
+ rdma_ah_get_sl(&qp->remote_ah_attr),
+ qp->pmtu,
+ qp->s_retry,
+ qp->s_retry_cnt,
+ qp->s_rnr_retry_cnt,
+ qp->s_rnr_retry,
+ sde,
+ sde ? sde->this_idx : 0,
+ send_context,
+ send_context ? send_context->sw_index : 0,
+ ib_cq_head(qp->ibqp.send_cq),
+ ib_cq_tail(qp->ibqp.send_cq),
+ qp->pid,
+ qp->s_state,
+ qp->s_ack_state,
+ /* ack queue information */
+ e ? e->opcode : 0,
+ e ? e->psn : 0,
+ e ? e->lpsn : 0,
+ qp->r_min_rnr_timer,
+ srq ? "SRQ" : "RQ",
+ srq ? srq->rq.size : qp->r_rq.size
+ );
+}
+
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv;
+
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->owner = qp;
+
+ priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
+ rdi->dparms.node);
+ if (!priv->s_ahg) {
+ kfree(priv);
+ return ERR_PTR(-ENOMEM);
+ }
+ iowait_init(
+ &priv->s_iowait,
+ 1,
+ _hfi1_do_send,
+ _hfi1_do_tid_send,
+ iowait_sleep,
+ iowait_wakeup,
+ iowait_sdma_drained,
+ hfi1_init_priority);
+ /* Init to a value to start the running average correctly */
+ priv->s_running_pkt_size = piothreshold / 2;
+ return priv;
+}
+
+void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ hfi1_qp_priv_tid_free(rdi, qp);
+ kfree(priv->s_ahg);
+ kfree(priv);
+}
+
+unsigned free_all_qps(struct rvt_dev_info *rdi)
+{
+ struct hfi1_ibdev *verbs_dev = container_of(rdi,
+ struct hfi1_ibdev,
+ rdi);
+ struct hfi1_devdata *dd = container_of(verbs_dev,
+ struct hfi1_devdata,
+ verbs_dev);
+ int n;
+ unsigned qp_inuse = 0;
+
+ for (n = 0; n < dd->num_pports; n++) {
+ struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
+
+ rcu_read_lock();
+ if (rcu_dereference(ibp->rvp.qp[0]))
+ qp_inuse++;
+ if (rcu_dereference(ibp->rvp.qp[1]))
+ qp_inuse++;
+ rcu_read_unlock();
+ }
+
+ return qp_inuse;
+}
+
+void flush_qp_waiters(struct rvt_qp *qp)
+{
+ lockdep_assert_held(&qp->s_lock);
+ flush_iowait(qp);
+ hfi1_tid_rdma_flush_wait(qp);
+}
+
+void stop_send_queue(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ iowait_cancel_work(&priv->s_iowait);
+ if (cancel_work_sync(&priv->tid_rdma.trigger_work))
+ rvt_put_qp(qp);
+}
+
+void quiesce_qp(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ hfi1_del_tid_reap_timer(qp);
+ hfi1_del_tid_retry_timer(qp);
+ iowait_sdma_drain(&priv->s_iowait);
+ qp_pio_drain(qp);
+ flush_tx_list(qp);
+}
+
+void notify_qp_reset(struct rvt_qp *qp)
+{
+ hfi1_qp_kern_exp_rcv_clear_all(qp);
+ qp->r_adefered = 0;
+ clear_ahg(qp);
+
+ /* Clear any OPFN state */
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ opfn_conn_error(qp);
+}
+
+/*
+ * Switch to alternate path.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+void hfi1_migrate_qp(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_event ev;
+
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
+ priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ qp_set_16b(qp);
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+}
+
+int mtu_to_path_mtu(u32 mtu)
+{
+ return mtu_to_enum(mtu, OPA_MTU_8192);
+}
+
+u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
+{
+ u32 mtu;
+ struct hfi1_ibdev *verbs_dev = container_of(rdi,
+ struct hfi1_ibdev,
+ rdi);
+ struct hfi1_devdata *dd = container_of(verbs_dev,
+ struct hfi1_devdata,
+ verbs_dev);
+ struct hfi1_ibport *ibp;
+ u8 sc, vl;
+
+ ibp = &dd->pport[qp->port_num - 1].ibport_data;
+ sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
+ vl = sc_to_vlt(dd, sc);
+
+ mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
+ if (vl < PER_VL_SEND_CONTEXTS)
+ mtu = min_t(u32, mtu, dd->vld[vl].mtu);
+ return mtu;
+}
+
+int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr)
+{
+ int mtu, pidx = qp->port_num - 1;
+ struct hfi1_ibdev *verbs_dev = container_of(rdi,
+ struct hfi1_ibdev,
+ rdi);
+ struct hfi1_devdata *dd = container_of(verbs_dev,
+ struct hfi1_devdata,
+ verbs_dev);
+ mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
+ if (mtu == -1)
+ return -1; /* values less than 0 are error */
+
+ if (mtu > dd->pport[pidx].ibmtu)
+ return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
+ else
+ return attr->path_mtu;
+}
+
+void notify_error_qp(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ seqlock_t *lock = priv->s_iowait.lock;
+
+ if (lock) {
+ write_seqlock(lock);
+ if (!list_empty(&priv->s_iowait.list) &&
+ !(qp->s_flags & RVT_S_BUSY) &&
+ !(priv->s_flags & RVT_S_BUSY)) {
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ rvt_put_qp(qp);
+ }
+ write_sequnlock(lock);
+ }
+
+ if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) {
+ qp->s_hdrwords = 0;
+ if (qp->s_rdma_mr) {
+ rvt_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+ flush_tx_list(qp);
+ }
+}
+
+/**
+ * hfi1_qp_iter_cb - callback for iterator
+ * @qp: the qp
+ * @v: the sl in low bits of v
+ *
+ * This is called from the iterator callback to work
+ * on an individual qp.
+ */
+static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
+{
+ int lastwqe;
+ struct ib_event ev;
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u8 sl = (u8)v;
+
+ if (qp->port_num != ppd->port ||
+ (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC) ||
+ rdma_ah_get_sl(&qp->remote_ah_attr) != sl ||
+ !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))
+ return;
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+/**
+ * hfi1_error_port_qps - put a port's RC/UC qps into error state
+ * @ibp: the ibport.
+ * @sl: the service level.
+ *
+ * This function places all RC/UC qps with a given service level into error
+ * state. It is generally called to force upper lay apps to abandon stale qps
+ * after an sl->sc mapping change.
+ */
+void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
+
+ rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb);
+}
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
new file mode 100644
index 000000000000..870ff1a6e5c4
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#ifndef _QP_H
+#define _QP_H
+#include <linux/hash.h>
+#include <rdma/rdmavt_qp.h>
+#include "verbs.h"
+#include "sdma.h"
+#include "verbs_txreq.h"
+
+extern unsigned int hfi1_qp_table_size;
+
+extern const struct rvt_operation_params hfi1_post_parms[];
+
+/*
+ * Driver specific s_flags starting at bit 31 down to HFI1_S_MIN_BIT_MASK
+ *
+ * HFI1_S_AHG_VALID - ahg header valid on chip
+ * HFI1_S_AHG_CLEAR - have send engine clear ahg state
+ * HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain
+ * HFI1_S_WAIT_TID_SPACE - a QP is waiting for TID resource
+ * HFI1_S_WAIT_TID_RESP - waiting for a TID RDMA WRITE response
+ * HFI1_S_WAIT_HALT - halt the first leg send engine
+ * HFI1_S_MIN_BIT_MASK - the lowest bit that can be used by hfi1
+ */
+#define HFI1_S_AHG_VALID 0x80000000
+#define HFI1_S_AHG_CLEAR 0x40000000
+#define HFI1_S_WAIT_PIO_DRAIN 0x20000000
+#define HFI1_S_WAIT_TID_SPACE 0x10000000
+#define HFI1_S_WAIT_TID_RESP 0x08000000
+#define HFI1_S_WAIT_HALT 0x04000000
+#define HFI1_S_MIN_BIT_MASK 0x01000000
+
+/*
+ * overload wait defines
+ */
+
+#define HFI1_S_ANY_WAIT_IO (RVT_S_ANY_WAIT_IO | HFI1_S_WAIT_PIO_DRAIN)
+#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
+#define HFI1_S_ANY_TID_WAIT_SEND (RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA)
+
+/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int hfi1_send_ok(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
+ (verbs_txreq_queued(iowait_get_ib_work(&priv->s_iowait)) ||
+ (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
+}
+
+/*
+ * free_ahg - clear ahg from QP
+ */
+static inline void clear_ahg(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ priv->s_ahg->ahgcount = 0;
+ qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
+ if (priv->s_sde && qp->s_ahgidx >= 0)
+ sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
+ qp->s_ahgidx = -1;
+}
+
+/**
+ * hfi1_qp_wakeup - wake up on the indicated event
+ * @qp: the QP
+ * @flag: flag the qp on which the qp is stalled
+ */
+void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag);
+
+struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5);
+struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5);
+
+void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
+
+bool _hfi1_schedule_send(struct rvt_qp *qp);
+bool hfi1_schedule_send(struct rvt_qp *qp);
+
+void hfi1_migrate_qp(struct rvt_qp *qp);
+
+/*
+ * Functions provided by hfi1 driver for rdmavt to use
+ */
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
+void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
+unsigned free_all_qps(struct rvt_dev_info *rdi);
+void notify_qp_reset(struct rvt_qp *qp);
+int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr);
+void flush_qp_waiters(struct rvt_qp *qp);
+void notify_error_qp(struct rvt_qp *qp);
+void stop_send_queue(struct rvt_qp *qp);
+void quiesce_qp(struct rvt_qp *qp);
+u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
+int mtu_to_path_mtu(u32 mtu);
+void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl);
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait);
+#endif /* _QP_H */
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
new file mode 100644
index 000000000000..3b7842a7f634
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "hfi.h"
+
+/* for the given bus number, return the CSR for reading an i2c line */
+static inline u32 i2c_in_csr(u32 bus_num)
+{
+ return bus_num ? ASIC_QSFP2_IN : ASIC_QSFP1_IN;
+}
+
+/* for the given bus number, return the CSR for writing an i2c line */
+static inline u32 i2c_oe_csr(u32 bus_num)
+{
+ return bus_num ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
+}
+
+static void hfi1_setsda(void *data, int state)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ struct hfi1_devdata *dd = bus->controlling_dd;
+ u64 reg;
+ u32 target_oe;
+
+ target_oe = i2c_oe_csr(bus->num);
+ reg = read_csr(dd, target_oe);
+ /*
+ * The OE bit value is inverted and connected to the pin. When
+ * OE is 0 the pin is left to be pulled up, when the OE is 1
+ * the pin is driven low. This matches the "open drain" or "open
+ * collector" convention.
+ */
+ if (state)
+ reg &= ~QSFP_HFI0_I2CDAT;
+ else
+ reg |= QSFP_HFI0_I2CDAT;
+ write_csr(dd, target_oe, reg);
+ /* do a read to force the write into the chip */
+ (void)read_csr(dd, target_oe);
+}
+
+static void hfi1_setscl(void *data, int state)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ struct hfi1_devdata *dd = bus->controlling_dd;
+ u64 reg;
+ u32 target_oe;
+
+ target_oe = i2c_oe_csr(bus->num);
+ reg = read_csr(dd, target_oe);
+ /*
+ * The OE bit value is inverted and connected to the pin. When
+ * OE is 0 the pin is left to be pulled up, when the OE is 1
+ * the pin is driven low. This matches the "open drain" or "open
+ * collector" convention.
+ */
+ if (state)
+ reg &= ~QSFP_HFI0_I2CCLK;
+ else
+ reg |= QSFP_HFI0_I2CCLK;
+ write_csr(dd, target_oe, reg);
+ /* do a read to force the write into the chip */
+ (void)read_csr(dd, target_oe);
+}
+
+static int hfi1_getsda(void *data)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ u64 reg;
+ u32 target_in;
+
+ hfi1_setsda(data, 1); /* clear OE so we do not pull line down */
+ udelay(2); /* 1us pull up + 250ns hold */
+
+ target_in = i2c_in_csr(bus->num);
+ reg = read_csr(bus->controlling_dd, target_in);
+ return !!(reg & QSFP_HFI0_I2CDAT);
+}
+
+static int hfi1_getscl(void *data)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ u64 reg;
+ u32 target_in;
+
+ hfi1_setscl(data, 1); /* clear OE so we do not pull line down */
+ udelay(2); /* 1us pull up + 250ns hold */
+
+ target_in = i2c_in_csr(bus->num);
+ reg = read_csr(bus->controlling_dd, target_in);
+ return !!(reg & QSFP_HFI0_I2CCLK);
+}
+
+/*
+ * Allocate and initialize the given i2c bus number.
+ * Returns NULL on failure.
+ */
+static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd,
+ struct hfi1_asic_data *ad, int num)
+{
+ struct hfi1_i2c_bus *bus;
+ int ret;
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus->controlling_dd = dd;
+ bus->num = num; /* our bus number */
+
+ bus->algo.setsda = hfi1_setsda;
+ bus->algo.setscl = hfi1_setscl;
+ bus->algo.getsda = hfi1_getsda;
+ bus->algo.getscl = hfi1_getscl;
+ bus->algo.udelay = 5;
+ bus->algo.timeout = usecs_to_jiffies(100000);
+ bus->algo.data = bus;
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.algo_data = &bus->algo;
+ bus->adapter.dev.parent = &dd->pcidev->dev;
+ snprintf(bus->adapter.name, sizeof(bus->adapter.name),
+ "hfi1_i2c%d", num);
+
+ ret = i2c_bit_add_bus(&bus->adapter);
+ if (ret) {
+ dd_dev_info(dd, "%s: unable to add i2c bus %d, err %d\n",
+ __func__, num, ret);
+ kfree(bus);
+ return NULL;
+ }
+
+ return bus;
+}
+
+/*
+ * Initialize i2c buses.
+ * Return 0 on success, -errno on error.
+ */
+int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
+{
+ ad->i2c_bus0 = init_i2c_bus(dd, ad, 0);
+ ad->i2c_bus1 = init_i2c_bus(dd, ad, 1);
+ if (!ad->i2c_bus0 || !ad->i2c_bus1)
+ return -ENOMEM;
+ return 0;
+};
+
+static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
+{
+ if (bus) {
+ i2c_del_adapter(&bus->adapter);
+ kfree(bus);
+ }
+}
+
+void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
+{
+ if (!ad)
+ return;
+ clean_i2c_bus(ad->i2c_bus0);
+ ad->i2c_bus0 = NULL;
+ clean_i2c_bus(ad->i2c_bus1);
+ ad->i2c_bus1 = NULL;
+}
+
+static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
+ u8 slave_addr, int offset, int offset_size,
+ u8 *data, u16 len)
+{
+ int ret;
+ int num_msgs;
+ u8 offset_bytes[2];
+ struct i2c_msg msgs[2];
+
+ switch (offset_size) {
+ case 0:
+ num_msgs = 1;
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+ break;
+ case 2:
+ offset_bytes[1] = (offset >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ num_msgs = 2;
+ offset_bytes[0] = offset & 0xff;
+
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = offset_size;
+ msgs[0].buf = offset_bytes;
+
+ msgs[1].addr = slave_addr;
+ msgs[1].flags = I2C_M_NOSTART;
+ msgs[1].len = len;
+ msgs[1].buf = data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2c->controlling_dd = dd;
+ ret = i2c_transfer(&i2c->adapter, msgs, num_msgs);
+ if (ret != num_msgs) {
+ dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; write failed, ret %d\n",
+ __func__, i2c->num, slave_addr, offset, len, ret);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
+
+static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
+ u8 slave_addr, int offset, int offset_size,
+ u8 *data, u16 len)
+{
+ int ret;
+ int num_msgs;
+ u8 offset_bytes[2];
+ struct i2c_msg msgs[2];
+
+ switch (offset_size) {
+ case 0:
+ num_msgs = 1;
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = I2C_M_RD;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+ break;
+ case 2:
+ offset_bytes[1] = (offset >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ num_msgs = 2;
+ offset_bytes[0] = offset & 0xff;
+
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = offset_size;
+ msgs[0].buf = offset_bytes;
+
+ msgs[1].addr = slave_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bus->controlling_dd = dd;
+ ret = i2c_transfer(&bus->adapter, msgs, num_msgs);
+ if (ret != num_msgs) {
+ dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; read failed, ret %d\n",
+ __func__, bus->num, slave_addr, offset, len, ret);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Raw i2c write. No set-up or lock checking.
+ *
+ * Return 0 on success, -errno on error.
+ */
+static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_i2c_bus *bus;
+ u8 slave_addr;
+ int offset_size;
+
+ bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
+ slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
+ offset_size = (i2c_addr >> 8) & 0x3;
+ return i2c_bus_write(dd, bus, slave_addr, offset, offset_size, bp, len);
+}
+
+/*
+ * Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes written, or -errno.
+ */
+int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
+ void *bp, int len)
+{
+ int ret;
+
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
+ return -EACCES;
+
+ ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+/*
+ * Raw i2c read. No set-up or lock checking.
+ *
+ * Return 0 on success, -errno on error.
+ */
+static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_i2c_bus *bus;
+ u8 slave_addr;
+ int offset_size;
+
+ bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
+ slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
+ offset_size = (i2c_addr >> 8) & 0x3;
+ return i2c_bus_read(dd, bus, slave_addr, offset, offset_size, bp, len);
+}
+
+/*
+ * Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes read, or -errno.
+ */
+int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
+ void *bp, int len)
+{
+ int ret;
+
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
+ return -EACCES;
+
+ ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+/*
+ * Write page n, offset m of QSFP memory as defined by SFF 8636
+ * by writing @addr = ((256 * n) + m)
+ *
+ * Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes written or -errno.
+ */
+int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len)
+{
+ int count = 0;
+ int offset;
+ int nwrite;
+ int ret = 0;
+ u8 page;
+
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
+ return -EACCES;
+
+ while (count < len) {
+ /*
+ * Set the qsfp page based on a zero-based address
+ * and a page size of QSFP_PAGESIZE bytes.
+ */
+ page = (u8)(addr / QSFP_PAGESIZE);
+
+ ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) {
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
+ target, ret);
+ break;
+ }
+
+ offset = addr % QSFP_PAGESIZE;
+ nwrite = len - count;
+ /* truncate write to boundary if crossing boundary */
+ if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY)
+ nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
+
+ ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ offset, bp + count, nwrite);
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) /* stop on error */
+ break;
+
+ count += nwrite;
+ addr += nwrite;
+ }
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/*
+ * Access page n, offset m of QSFP memory as defined by SFF 8636
+ * by reading @addr = ((256 * n) + m)
+ *
+ * Caller must hold the i2c chain resource.
+ *
+ * Return the number of bytes read or -errno.
+ */
+int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len)
+{
+ int count = 0;
+ int offset;
+ int nread;
+ int ret = 0;
+ u8 page;
+
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
+ return -EACCES;
+
+ while (count < len) {
+ /*
+ * Set the qsfp page based on a zero-based address
+ * and a page size of QSFP_PAGESIZE bytes.
+ */
+ page = (u8)(addr / QSFP_PAGESIZE);
+ ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) {
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
+ target, ret);
+ break;
+ }
+
+ offset = addr % QSFP_PAGESIZE;
+ nread = len - count;
+ /* truncate read to boundary if crossing boundary */
+ if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY)
+ nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
+
+ ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ offset, bp + count, nread);
+ if (ret) /* stop on error */
+ break;
+
+ count += nread;
+ addr += nread;
+ }
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/*
+ * Perform a stand-alone single QSFP read. Acquire the resource, do the
+ * read, then release the resource.
+ */
+int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 resource = qsfp_resource(dd);
+ int ret;
+
+ ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
+ if (ret)
+ return ret;
+ ret = qsfp_read(ppd, target, addr, bp, len);
+ release_chip_resource(dd, resource);
+
+ return ret;
+}
+
+/*
+ * This function caches the QSFP memory range in 128 byte chunks.
+ * As an example, the next byte after address 255 is byte 128 from
+ * upper page 01H (if existing) rather than byte 0 from lower page 00H.
+ * Access page n, offset m of QSFP memory as defined by SFF 8636
+ * in the cache by reading byte ((128 * n) + m)
+ * The calls to qsfp_{read,write} in this function correctly handle the
+ * address map difference between this mapping and the mapping implemented
+ * by those functions
+ *
+ * The caller must be holding the QSFP i2c chain resource.
+ */
+int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
+{
+ u32 target = ppd->dd->hfi1_id;
+ int ret;
+ unsigned long flags;
+ u8 *cache = &cp->cache[0];
+
+ /* ensure sane contents on invalid reads, for cable swaps */
+ memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.cache_valid = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
+
+ if (!qsfp_mod_present(ppd)) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE);
+ if (ret != QSFP_PAGESIZE) {
+ dd_dev_info(ppd->dd,
+ "%s: Page 0 read failed, expected %d, got %d\n",
+ __func__, QSFP_PAGESIZE, ret);
+ goto bail;
+ }
+
+ /* Is paging enabled? */
+ if (!(cache[2] & 4)) {
+ /* Paging enabled, page 03 required */
+ if ((cache[195] & 0xC0) == 0xC0) {
+ /* all */
+ ret = qsfp_read(ppd, target, 384, cache + 256, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 640, cache + 384, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ } else if ((cache[195] & 0x80) == 0x80) {
+ /* only page 2 and 3 */
+ ret = qsfp_read(ppd, target, 640, cache + 384, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ } else if ((cache[195] & 0x40) == 0x40) {
+ /* only page 1 and 3 */
+ ret = qsfp_read(ppd, target, 384, cache + 256, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ } else {
+ /* only page 3 */
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
+ goto bail;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.cache_valid = 1;
+ ppd->qsfp_info.cache_refresh_required = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
+
+ return 0;
+
+bail:
+ memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
+ return ret;
+}
+
+const char * const hfi1_qsfp_devtech[16] = {
+ "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
+ "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
+ "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
+ "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
+};
+
+#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
+#define QSFP_DEFAULT_HDR_CNT 224
+
+#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_HIGH_PWR(pbyte) ((pbyte) & 3)
+/* For use with QSFP_HIGH_PWR macro */
+#define QSFP_HIGH_PWR_UNUSED 0 /* Bits [1:0] = 00 implies low power module */
+
+/*
+ * Takes power class byte [Page 00 Byte 129] in SFF 8636
+ * Returns power class as integer (1 through 7, per SFF 8636 rev 2.4)
+ */
+int get_qsfp_power_class(u8 power_byte)
+{
+ if (QSFP_HIGH_PWR(power_byte) == QSFP_HIGH_PWR_UNUSED)
+ /* power classes count from 1, their bit encodings from 0 */
+ return (QSFP_PWR(power_byte) + 1);
+ /*
+ * 00 in the high power classes stands for unused, bringing
+ * balance to the off-by-1 offset above, we add 4 here to
+ * account for the difference between the low and high power
+ * groups
+ */
+ return (QSFP_HIGH_PWR(power_byte) + 4);
+}
+
+int qsfp_mod_present(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg;
+
+ reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
+ return !(reg & QSFP_HFI0_MODPRST_N);
+}
+
+/*
+ * This function maps QSFP memory addresses in 128 byte chunks in the following
+ * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1
+ * spec
+ * For addr 000-127, lower page 00h
+ * For addr 128-255, upper page 00h
+ * For addr 256-383, upper page 01h
+ * For addr 384-511, upper page 02h
+ * For addr 512-639, upper page 03h
+ *
+ * For addresses beyond this range, it returns the invalid range of data buffer
+ * set to 0.
+ * For upper pages that are optional, if they are not valid, returns the
+ * particular range of bytes in the data buffer set to 0.
+ */
+int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
+ u8 *data)
+{
+ struct hfi1_pportdata *ppd;
+ u32 excess_len = len;
+ int ret = 0, offset = 0;
+
+ if (port_num > dd->num_pports || port_num < 1) {
+ dd_dev_info(dd, "%s: Invalid port number %d\n",
+ __func__, port_num);
+ ret = -EINVAL;
+ goto set_zeroes;
+ }
+
+ ppd = dd->pport + (port_num - 1);
+ if (!qsfp_mod_present(ppd)) {
+ ret = -ENODEV;
+ goto set_zeroes;
+ }
+
+ if (!ppd->qsfp_info.cache_valid) {
+ ret = -EINVAL;
+ goto set_zeroes;
+ }
+
+ if (addr >= (QSFP_MAX_NUM_PAGES * 128)) {
+ ret = -ERANGE;
+ goto set_zeroes;
+ }
+
+ if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) {
+ excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128);
+ memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len));
+ data += (len - excess_len);
+ goto set_zeroes;
+ }
+
+ memcpy(data, &ppd->qsfp_info.cache[addr], len);
+
+ if (addr <= QSFP_MONITOR_VAL_END &&
+ (addr + len) >= QSFP_MONITOR_VAL_START) {
+ /* Overlap with the dynamic channel monitor range */
+ if (addr < QSFP_MONITOR_VAL_START) {
+ if (addr + len <= QSFP_MONITOR_VAL_END)
+ len = addr + len - QSFP_MONITOR_VAL_START;
+ else
+ len = QSFP_MONITOR_RANGE;
+ offset = QSFP_MONITOR_VAL_START - addr;
+ addr = QSFP_MONITOR_VAL_START;
+ } else if (addr == QSFP_MONITOR_VAL_START) {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_RANGE;
+ } else {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_VAL_END - addr + 1;
+ }
+ /* Refresh the values of the dynamic monitors from the cable */
+ ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
+ if (ret != len) {
+ ret = -EAGAIN;
+ goto set_zeroes;
+ }
+ }
+
+ return 0;
+
+set_zeroes:
+ memset(data, 0, excess_len);
+ return ret;
+}
+
+static const char *pwr_codes[8] = {"N/AW",
+ "1.5W",
+ "2.0W",
+ "2.5W",
+ "3.5W",
+ "4.0W",
+ "4.5W",
+ "5.0W"
+ };
+
+int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
+{
+ u8 *cache = &ppd->qsfp_info.cache[0];
+ u8 bin_buff[QSFP_DUMP_CHUNK];
+ char lenstr[6];
+ int sofar;
+ int bidx = 0;
+ u8 *atten = &cache[QSFP_ATTEN_OFFS];
+ u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
+ u8 power_byte = 0;
+
+ sofar = 0;
+ lenstr[0] = ' ';
+ lenstr[1] = '\0';
+
+ if (ppd->qsfp_info.cache_valid) {
+ if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
+ snprintf(lenstr, sizeof(lenstr), "%dM ",
+ cache[QSFP_MOD_LEN_OFFS]);
+
+ power_byte = cache[QSFP_MOD_PWR_OFFS];
+ sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
+ pwr_codes[get_qsfp_power_class(power_byte)]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
+ lenstr,
+ hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
+ QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
+ QSFP_OUI(vendor_oui));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
+ QSFP_PN_LEN, &cache[QSFP_PN_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
+ QSFP_REV_LEN, &cache[QSFP_REV_OFFS]);
+
+ if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
+ sofar += scnprintf(buf + sofar, len - sofar,
+ "Atten:%d, %d\n",
+ QSFP_ATTEN_SDR(atten),
+ QSFP_ATTEN_DDR(atten));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
+ QSFP_SN_LEN, &cache[QSFP_SN_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
+ QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
+ QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]);
+
+ while (bidx < QSFP_DEFAULT_HDR_CNT) {
+ int iidx;
+
+ memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
+ for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
+ sofar += scnprintf(buf + sofar, len - sofar,
+ " %02X", bin_buff[iidx]);
+ }
+ sofar += scnprintf(buf + sofar, len - sofar, "\n");
+ bidx += QSFP_DUMP_CHUNK;
+ }
+ }
+ return sofar;
+}
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
new file mode 100644
index 000000000000..5c59d53fcb63
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ */
+/* QSFP support common definitions, for hfi driver */
+
+#define QSFP_DEV 0xA0
+#define QSFP_PWR_LAG_MSEC 2000
+#define QSFP_MODPRS_LAG_MSEC 20
+/* 128 byte pages, per SFF 8636 rev 2.4 */
+#define QSFP_MAX_NUM_PAGES 5
+
+/*
+ * Below are masks for QSFP pins. Pins are the same for HFI0 and HFI1.
+ * _N means asserted low
+ */
+#define QSFP_HFI0_I2CCLK BIT(0)
+#define QSFP_HFI0_I2CDAT BIT(1)
+#define QSFP_HFI0_RESET_N BIT(2)
+#define QSFP_HFI0_INT_N BIT(3)
+#define QSFP_HFI0_MODPRST_N BIT(4)
+
+/* QSFP is paged at 256 bytes */
+#define QSFP_PAGESIZE 256
+/* Reads/writes cannot cross 128 byte boundaries */
+#define QSFP_RW_BOUNDARY 128
+
+/* number of bytes in i2c offset for QSFP devices */
+#define __QSFP_OFFSET_SIZE 1 /* num address bytes */
+#define QSFP_OFFSET_SIZE (__QSFP_OFFSET_SIZE << 8) /* shifted value */
+
+/* Defined fields that Intel requires of qualified cables */
+/* Byte 0 is Identifier, not checked */
+/* Byte 1 is reserved "status MSB" */
+#define QSFP_MONITOR_VAL_START 22
+#define QSFP_MONITOR_VAL_END 81
+#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
+#define QSFP_TX_CTRL_BYTE_OFFS 86
+#define QSFP_PWR_CTRL_BYTE_OFFS 93
+#define QSFP_CDR_CTRL_BYTE_OFFS 98
+
+#define QSFP_PAGE_SELECT_BYTE_OFFS 127
+/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
+#define QSFP_MOD_ID_OFFS 128
+/*
+ * Byte 129 is "Extended Identifier".
+ * For bits [7:6]: 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ * For bits [1:0]: 0:Unused, 1:4W, 2:4.5W, 3:5W
+ */
+#define QSFP_MOD_PWR_OFFS 129
+/* Byte 130 is Connector type. Not Intel req'd */
+/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
+/* Byte 139 is encoding. code 0x01 is 8b10b. Not Intel req'd */
+/* byte 140 is nominal bit-rate, in units of 100Mbits/sec */
+#define QSFP_NOM_BIT_RATE_100_OFFS 140
+/* Byte 141 is Extended Rate Select. Not Intel req'd */
+/* Bytes 142..145 are lengths for various fiber types. Not Intel req'd */
+/* Byte 146 is length for Copper. Units of 1 meter */
+#define QSFP_MOD_LEN_OFFS 146
+/*
+ * Byte 147 is Device technology. D0..3 not Intel req'd
+ * D4..7 select from 15 choices, translated by table:
+ */
+#define QSFP_MOD_TECH_OFFS 147
+extern const char *const hfi1_qsfp_devtech[16];
+/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
+#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
+/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
+#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
+/* Attenuation should be valid for copper other than full/near Eq */
+#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
+/* Length is only valid if technology is "copper" */
+#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
+#define QSFP_TECH_1490 9
+
+#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
+ oui[2])
+#define QSFP_OUI_AMPHENOL 0x415048
+#define QSFP_OUI_FINISAR 0x009065
+#define QSFP_OUI_GORE 0x002177
+
+/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
+#define QSFP_VEND_OFFS 148
+#define QSFP_VEND_LEN 16
+/* Byte 164 is IB Extended transceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
+#define QSFP_IBXCV_OFFS 164
+/* Bytes 165..167 are Vendor OUI number */
+#define QSFP_VOUI_OFFS 165
+#define QSFP_VOUI_LEN 3
+/* Bytes 168..183 are Vendor Part Number, string */
+#define QSFP_PN_OFFS 168
+#define QSFP_PN_LEN 16
+/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
+#define QSFP_REV_OFFS 184
+#define QSFP_REV_LEN 2
+/*
+ * Bytes 186,187 are Wavelength, if Optical. Not Intel req'd
+ * If copper, they are attenuation in dB:
+ * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
+ */
+#define QSFP_ATTEN_OFFS 186
+#define QSFP_ATTEN_LEN 2
+/*
+ * Bytes 188,189 are Wavelength tolerance, if optical
+ * If copper, they are attenuation in dB:
+ * Byte 188 is at 12.5 Gb/s, Byte 189 at 25 Gb/s
+ */
+#define QSFP_CU_ATTEN_7G_OFFS 188
+#define QSFP_CU_ATTEN_12G_OFFS 189
+/* Byte 190 is Max Case Temp. Not Intel req'd */
+/* Byte 191 is LSB of sum of bytes 128..190. Not Intel req'd */
+#define QSFP_CC_OFFS 191
+#define QSFP_EQ_INFO_OFFS 193
+#define QSFP_CDR_INFO_OFFS 194
+/* Bytes 196..211 are Serial Number, String */
+#define QSFP_SN_OFFS 196
+#define QSFP_SN_LEN 16
+/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
+#define QSFP_DATE_OFFS 212
+#define QSFP_DATE_LEN 6
+/* Bytes 218,219 are optional lot-code, string */
+#define QSFP_LOT_OFFS 218
+#define QSFP_LOT_LEN 2
+/* Bytes 220, 221 indicate monitoring options, Not Intel req'd */
+/* Byte 222 indicates nominal bitrate in units of 250Mbits/sec */
+#define QSFP_NOM_BIT_RATE_250_OFFS 222
+/* Byte 223 is LSB of sum of bytes 192..222 */
+#define QSFP_CC_EXT_OFFS 223
+
+/*
+ * Interrupt flag masks
+ */
+#define QSFP_DATA_NOT_READY 0x01
+
+#define QSFP_HIGH_TEMP_ALARM 0x80
+#define QSFP_LOW_TEMP_ALARM 0x40
+#define QSFP_HIGH_TEMP_WARNING 0x20
+#define QSFP_LOW_TEMP_WARNING 0x10
+
+#define QSFP_HIGH_VCC_ALARM 0x80
+#define QSFP_LOW_VCC_ALARM 0x40
+#define QSFP_HIGH_VCC_WARNING 0x20
+#define QSFP_LOW_VCC_WARNING 0x10
+
+#define QSFP_HIGH_POWER_ALARM 0x88
+#define QSFP_LOW_POWER_ALARM 0x44
+#define QSFP_HIGH_POWER_WARNING 0x22
+#define QSFP_LOW_POWER_WARNING 0x11
+
+#define QSFP_HIGH_BIAS_ALARM 0x88
+#define QSFP_LOW_BIAS_ALARM 0x44
+#define QSFP_HIGH_BIAS_WARNING 0x22
+#define QSFP_LOW_BIAS_WARNING 0x11
+
+#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
+#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
+
+/*
+ * struct qsfp_data encapsulates state of QSFP device for one port.
+ * it will be part of port-specific data if a board supports QSFP.
+ *
+ * Since multiple board-types use QSFP, and their pport_data structs
+ * differ (in the chip-specific section), we need a pointer to its head.
+ *
+ * Avoiding premature optimization, we will have one work_struct per port,
+ * and let the qsfp_lock arbitrate access to common resources.
+ *
+ */
+struct qsfp_data {
+ /* Helps to find our way */
+ struct hfi1_pportdata *ppd;
+ struct work_struct qsfp_work;
+ u8 cache[QSFP_MAX_NUM_PAGES * 128];
+ /* protect qsfp data */
+ spinlock_t qsfp_lock;
+ u8 check_interrupt_flags;
+ u8 reset_needed;
+ u8 limiting_active;
+ u8 cache_valid;
+ u8 cache_refresh_required;
+};
+
+int refresh_qsfp_cache(struct hfi1_pportdata *ppd,
+ struct qsfp_data *cp);
+int get_qsfp_power_class(u8 power_byte);
+int qsfp_mod_present(struct hfi1_pportdata *ppd);
+int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr,
+ u32 len, u8 *data);
+
+int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len);
+int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len);
+int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len);
+int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len);
+int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len);
+struct hfi1_asic_data;
+int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
+void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
new file mode 100644
index 000000000000..b36242c9d42c
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -0,0 +1,3244 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#include <linux/io.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
+
+#include "hfi.h"
+#include "qp.h"
+#include "rc.h"
+#include "verbs_txreq.h"
+#include "trace.h"
+
+struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
+ u8 *prev_ack, bool *scheduled)
+ __must_hold(&qp->s_lock)
+{
+ struct rvt_ack_entry *e = NULL;
+ u8 i, p;
+ bool s = true;
+
+ for (i = qp->r_head_ack_queue; ; i = p) {
+ if (i == qp->s_tail_ack_queue)
+ s = false;
+ if (i)
+ p = i - 1;
+ else
+ p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
+ if (p == qp->r_head_ack_queue) {
+ e = NULL;
+ break;
+ }
+ e = &qp->s_ack_queue[p];
+ if (!e->opcode) {
+ e = NULL;
+ break;
+ }
+ if (cmp_psn(psn, e->psn) >= 0) {
+ if (p == qp->s_tail_ack_queue &&
+ cmp_psn(psn, e->lpsn) <= 0)
+ s = false;
+ break;
+ }
+ }
+ if (prev)
+ *prev = p;
+ if (prev_ack)
+ *prev_ack = i;
+ if (scheduled)
+ *scheduled = s;
+ return e;
+}
+
+/**
+ * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
+ * @dev: the device for this QP
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @ps: the xmit packet state
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ * Note that we are in the responder's side of the QP context.
+ * Note the QP s_lock must be held.
+ */
+static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ struct hfi1_pkt_state *ps)
+{
+ struct rvt_ack_entry *e;
+ u32 hwords, hdrlen;
+ u32 len = 0;
+ u32 bth0 = 0, bth2 = 0;
+ u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
+ int middle = 0;
+ u32 pmtu = qp->pmtu;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ bool last_pkt;
+ u32 delta;
+ u8 next = qp->s_tail_ack_queue;
+ struct tid_rdma_request *req;
+
+ trace_hfi1_rsp_make_rc_ack(qp, 0);
+ lockdep_assert_held(&qp->s_lock);
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
+ goto bail;
+
+ if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ else
+ /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
+ hwords = 7;
+
+ switch (qp->s_ack_state) {
+ case OP(RDMA_READ_RESPONSE_LAST):
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ release_rdma_sge_mr(e);
+ fallthrough;
+ case OP(ATOMIC_ACKNOWLEDGE):
+ /*
+ * We can increment the tail pointer now that the last
+ * response has been sent instead of only being
+ * constructed.
+ */
+ if (++next > rvt_size_atomic(&dev->rdi))
+ next = 0;
+ /*
+ * Only advance the s_acked_ack_queue pointer if there
+ * have been no TID RDMA requests.
+ */
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->opcode != TID_OP(WRITE_REQ) &&
+ qp->s_acked_ack_queue == qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue = next;
+ qp->s_tail_ack_queue = next;
+ trace_hfi1_rsp_make_rc_ack(qp, e->psn);
+ fallthrough;
+ case OP(SEND_ONLY):
+ case OP(ACKNOWLEDGE):
+ /* Check for no next entry in the queue. */
+ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
+ if (qp->s_flags & RVT_S_ACK_PENDING)
+ goto normal;
+ goto bail;
+ }
+
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ /* Check for tid write fence */
+ if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
+ hfi1_tid_rdma_ack_interlock(qp, e)) {
+ iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
+ goto bail;
+ }
+ if (e->opcode == OP(RDMA_READ_REQUEST)) {
+ /*
+ * If a RDMA read response is being resent and
+ * we haven't seen the duplicate request yet,
+ * then stop sending the remaining responses the
+ * responder has seen until the requester re-sends it.
+ */
+ len = e->rdma_sge.sge_length;
+ if (len && !e->rdma_sge.mr) {
+ if (qp->s_acked_ack_queue ==
+ qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue =
+ qp->r_head_ack_queue;
+ qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ goto bail;
+ }
+ /* Copy SGE state in case we need to resend */
+ ps->s_txreq->mr = e->rdma_sge.mr;
+ if (ps->s_txreq->mr)
+ rvt_get_mr(ps->s_txreq->mr);
+ qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ qp->s_ack_rdma_sge.num_sge = 1;
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
+ if (len > pmtu) {
+ len = pmtu;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+ } else {
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ e->sent = 1;
+ }
+ ohdr->u.aeth = rvt_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_rdma_psn = e->psn;
+ bth2 = mask_psn(qp->s_ack_rdma_psn++);
+ } else if (e->opcode == TID_OP(WRITE_REQ)) {
+ /*
+ * If a TID RDMA WRITE RESP is being resent, we have to
+ * wait for the actual request. All requests that are to
+ * be resent will have their state set to
+ * TID_REQUEST_RESEND. When the new request arrives, the
+ * state will be changed to TID_REQUEST_RESEND_ACTIVE.
+ */
+ req = ack_to_tid_req(e);
+ if (req->state == TID_REQUEST_RESEND ||
+ req->state == TID_REQUEST_INIT_RESEND)
+ goto bail;
+ qp->s_ack_state = TID_OP(WRITE_RESP);
+ qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
+ goto write_resp;
+ } else if (e->opcode == TID_OP(READ_REQ)) {
+ /*
+ * If a TID RDMA read response is being resent and
+ * we haven't seen the duplicate request yet,
+ * then stop sending the remaining responses the
+ * responder has seen until the requester re-sends it.
+ */
+ len = e->rdma_sge.sge_length;
+ if (len && !e->rdma_sge.mr) {
+ if (qp->s_acked_ack_queue ==
+ qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue =
+ qp->r_head_ack_queue;
+ qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ goto bail;
+ }
+ /* Copy SGE state in case we need to resend */
+ ps->s_txreq->mr = e->rdma_sge.mr;
+ if (ps->s_txreq->mr)
+ rvt_get_mr(ps->s_txreq->mr);
+ qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ qp->s_ack_rdma_sge.num_sge = 1;
+ qp->s_ack_state = TID_OP(READ_RESP);
+ goto read_resp;
+ } else {
+ /* COMPARE_SWAP or FETCH_ADD */
+ ps->s_txreq->ss = NULL;
+ len = 0;
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
+ ohdr->u.at.aeth = rvt_compute_aeth(qp);
+ ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
+ hwords += sizeof(ohdr->u.at) / sizeof(u32);
+ bth2 = mask_psn(e->psn);
+ e->sent = 1;
+ }
+ trace_hfi1_tid_write_rsp_make_rc_ack(qp);
+ bth0 = qp->s_ack_state << 24;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ fallthrough;
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
+ ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
+ if (ps->s_txreq->mr)
+ rvt_get_mr(ps->s_txreq->mr);
+ len = qp->s_ack_rdma_sge.sge.sge_length;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ } else {
+ ohdr->u.aeth = rvt_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ bth2 = mask_psn(qp->s_ack_rdma_psn++);
+ break;
+
+ case TID_OP(WRITE_RESP):
+write_resp:
+ /*
+ * 1. Check if RVT_S_ACK_PENDING is set. If yes,
+ * goto normal.
+ * 2. Attempt to allocate TID resources.
+ * 3. Remove RVT_S_RESP_PENDING flags from s_flags
+ * 4. If resources not available:
+ * 4.1 Set RVT_S_WAIT_TID_SPACE
+ * 4.2 Queue QP on RCD TID queue
+ * 4.3 Put QP on iowait list.
+ * 4.4 Build IB RNR NAK with appropriate timeout value
+ * 4.5 Return indication progress made.
+ * 5. If resources are available:
+ * 5.1 Program HW flow CSRs
+ * 5.2 Build TID RDMA WRITE RESP packet
+ * 5.3 If more resources needed, do 2.1 - 2.3.
+ * 5.4 Wake up next QP on RCD TID queue.
+ * 5.5 Return indication progress made.
+ */
+
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ req = ack_to_tid_req(e);
+
+ /*
+ * Send scheduled RNR NAK's. RNR NAK's need to be sent at
+ * segment boundaries, not at request boundaries. Don't change
+ * s_ack_state because we are still in the middle of a request
+ */
+ if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
+ qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
+ req->cur_seg == req->alloc_seg) {
+ qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
+ goto normal_no_state;
+ }
+
+ bth2 = mask_psn(qp->s_ack_rdma_psn);
+ hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
+ bth2, &len,
+ &ps->s_txreq->ss);
+ if (!hdrlen)
+ return 0;
+
+ hwords += hdrlen;
+ bth0 = qp->s_ack_state << 24;
+ qp->s_ack_rdma_psn++;
+ trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ if (req->cur_seg != req->total_segs)
+ break;
+
+ e->sent = 1;
+ /* Do not free e->rdma_sge until all data are received */
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
+ break;
+
+ case TID_OP(READ_RESP):
+read_resp:
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
+ delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
+ &bth1, &bth2, &len,
+ &last_pkt);
+ if (delta == 0)
+ goto error_qp;
+ hwords += delta;
+ if (last_pkt) {
+ e->sent = 1;
+ /*
+ * Increment qp->s_tail_ack_queue through s_ack_state
+ * transition.
+ */
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ }
+ break;
+ case TID_OP(READ_REQ):
+ goto bail;
+
+ default:
+normal:
+ /*
+ * Send a regular ACK.
+ * Set the s_ack_state so we wait until after sending
+ * the ACK before setting s_ack_state to ACKNOWLEDGE
+ * (see above).
+ */
+ qp->s_ack_state = OP(SEND_ONLY);
+normal_no_state:
+ if (qp->s_nak_state)
+ ohdr->u.aeth =
+ cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
+ (qp->s_nak_state <<
+ IB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = rvt_compute_aeth(qp);
+ hwords++;
+ len = 0;
+ bth0 = OP(ACKNOWLEDGE) << 24;
+ bth2 = mask_psn(qp->s_ack_psn);
+ qp->s_flags &= ~RVT_S_ACK_PENDING;
+ ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
+ ps->s_txreq->ss = NULL;
+ }
+ qp->s_rdma_ack_cnt++;
+ ps->s_txreq->sde = qpriv->s_sde;
+ ps->s_txreq->s_cur_size = len;
+ ps->s_txreq->hdr_dwords = hwords;
+ hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
+ return 1;
+error_qp:
+ spin_unlock_irqrestore(&qp->s_lock, ps->flags);
+ spin_lock_irqsave(&qp->r_lock, ps->flags);
+ spin_lock(&qp->s_lock);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, ps->flags);
+ spin_lock_irqsave(&qp->s_lock, ps->flags);
+bail:
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ /*
+ * Ensure s_rdma_ack_cnt changes are committed prior to resetting
+ * RVT_S_RESP_PENDING
+ */
+ smp_wmb();
+ qp->s_flags &= ~(RVT_S_RESP_PENDING
+ | RVT_S_ACK_PENDING
+ | HFI1_S_AHG_VALID);
+ return 0;
+}
+
+/**
+ * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
+ * @qp: a pointer to the QP
+ * @ps: the current packet state
+ *
+ * Assumes s_lock is held.
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_other_headers *ohdr;
+ struct rvt_sge_state *ss = NULL;
+ struct rvt_swqe *wqe;
+ struct hfi1_swqe_priv *wpriv;
+ struct tid_rdma_request *req = NULL;
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ u32 hwords = 5;
+ u32 len = 0;
+ u32 bth0 = 0, bth2 = 0;
+ u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
+ u32 pmtu = qp->pmtu;
+ char newreq;
+ int middle = 0;
+ int delta;
+ struct tid_rdma_flow *flow = NULL;
+ struct tid_rdma_params *remote;
+
+ trace_hfi1_sender_make_rc_req(qp);
+ lockdep_assert_held(&qp->s_lock);
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (!ps->s_txreq)
+ goto bail_no_tx;
+
+ if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+ } else {
+ /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
+ hwords = 7;
+ if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
+ }
+
+ /* Sending responses has higher priority over sending requests. */
+ if ((qp->s_flags & RVT_S_RESP_PENDING) &&
+ make_rc_ack(dev, qp, ohdr, ps))
+ return 1;
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == READ_ONCE(qp->s_head))
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
+ goto bail;
+ }
+ clear_ahg(qp);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
+ /* will get called again */
+ goto done_free_tx;
+ }
+
+ if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
+ goto bail;
+
+ if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+ if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
+ qp->s_flags |= RVT_S_WAIT_PSN;
+ goto bail;
+ }
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ }
+
+ /* Send a request. */
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
+check_s_state:
+ switch (qp->s_state) {
+ default:
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /*
+ * Resend an old request or start a new one.
+ *
+ * We keep track of the current SWQE so that
+ * we don't reset the "furthest progress" state
+ * if we need to back up.
+ */
+ newreq = 0;
+ if (qp->s_cur == qp->s_tail) {
+ /* Check if send work queue is empty. */
+ if (qp->s_tail == READ_ONCE(qp->s_head)) {
+ clear_ahg(qp);
+ goto bail;
+ }
+ /*
+ * If a fence is requested, wait for previous
+ * RDMA read and atomic operations to finish.
+ * However, there is no need to guard against
+ * TID RDMA READ after TID RDMA READ.
+ */
+ if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
+ qp->s_num_rd_atomic &&
+ (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
+ priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
+ qp->s_flags |= RVT_S_WAIT_FENCE;
+ goto bail;
+ }
+ /*
+ * Local operations are processed immediately
+ * after all prior requests have completed
+ */
+ if (wqe->wr.opcode == IB_WR_REG_MR ||
+ wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ int local_ops = 0;
+ int err = 0;
+
+ if (qp->s_last != qp->s_cur)
+ goto bail;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ if (++qp->s_tail == qp->s_size)
+ qp->s_tail = 0;
+ if (!(wqe->wr.send_flags &
+ RVT_SEND_COMPLETION_ONLY)) {
+ err = rvt_invalidate_rkey(
+ qp,
+ wqe->wr.ex.invalidate_rkey);
+ local_ops = 1;
+ }
+ rvt_send_complete(qp, wqe,
+ err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
+ if (local_ops)
+ atomic_dec(&qp->local_ops_pending);
+ goto done_free_tx;
+ }
+
+ newreq = 1;
+ qp->s_psn = wqe->psn;
+ }
+ /*
+ * Note that we have to be careful not to modify the
+ * original work request since we may need to resend
+ * it.
+ */
+ len = wqe->length;
+ ss = &qp->s_sge;
+ bth2 = mask_psn(qp->s_psn);
+
+ /*
+ * Interlock between various IB requests and TID RDMA
+ * if necessary.
+ */
+ if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
+ hfi1_tid_rdma_wqe_interlock(qp, wqe))
+ goto bail;
+
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ /* If no credit, return. */
+ if (!rvt_rc_credit_avail(qp, wqe))
+ goto bail;
+ if (len > pmtu) {
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND) {
+ qp->s_state = OP(SEND_ONLY);
+ } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ } else {
+ qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
+ /* Invalidate rkey comes after the BTH */
+ ohdr->u.ieth = cpu_to_be32(
+ wqe->wr.ex.invalidate_rkey);
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ goto no_flow_control;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!rvt_rc_credit_avail(qp, wqe))
+ goto bail;
+no_flow_control:
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr,
+ &ohdr->u.rc.reth);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->rdma_wr.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / sizeof(u32);
+ if (len > pmtu) {
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ } else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_TID_RDMA_WRITE:
+ if (newreq) {
+ /*
+ * Limit the number of TID RDMA WRITE requests.
+ */
+ if (atomic_read(&priv->n_tid_requests) >=
+ HFI1_TID_RDMA_WRITE_CNT)
+ goto bail;
+
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ }
+
+ hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
+ &bth1, &bth2,
+ &len);
+ ss = NULL;
+ if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
+ priv->s_tid_cur = qp->s_cur;
+ if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
+ priv->s_tid_tail = qp->s_cur;
+ priv->s_state = TID_OP(WRITE_RESP);
+ }
+ } else if (priv->s_tid_cur == priv->s_tid_head) {
+ struct rvt_swqe *__w;
+ struct tid_rdma_request *__r;
+
+ __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
+ __r = wqe_to_tid_req(__w);
+
+ /*
+ * The s_tid_cur pointer is advanced to s_cur if
+ * any of the following conditions about the WQE
+ * to which s_ti_cur currently points to are
+ * satisfied:
+ * 1. The request is not a TID RDMA WRITE
+ * request,
+ * 2. The request is in the INACTIVE or
+ * COMPLETE states (TID RDMA READ requests
+ * stay at INACTIVE and TID RDMA WRITE
+ * transition to COMPLETE when done),
+ * 3. The request is in the ACTIVE or SYNC
+ * state and the number of completed
+ * segments is equal to the total segment
+ * count.
+ * (If ACTIVE, the request is waiting for
+ * ACKs. If SYNC, the request has not
+ * received any responses because it's
+ * waiting on a sync point.)
+ */
+ if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
+ __r->state == TID_REQUEST_INACTIVE ||
+ __r->state == TID_REQUEST_COMPLETE ||
+ ((__r->state == TID_REQUEST_ACTIVE ||
+ __r->state == TID_REQUEST_SYNC) &&
+ __r->comp_seg == __r->total_segs)) {
+ if (priv->s_tid_tail ==
+ priv->s_tid_cur &&
+ priv->s_state ==
+ TID_OP(WRITE_DATA_LAST)) {
+ priv->s_tid_tail = qp->s_cur;
+ priv->s_state =
+ TID_OP(WRITE_RESP);
+ }
+ priv->s_tid_cur = qp->s_cur;
+ }
+ /*
+ * A corner case: when the last TID RDMA WRITE
+ * request was completed, s_tid_head,
+ * s_tid_cur, and s_tid_tail all point to the
+ * same location. Other requests are posted and
+ * s_cur wraps around to the same location,
+ * where a new TID RDMA WRITE is posted. In
+ * this case, none of the indices need to be
+ * updated. However, the priv->s_state should.
+ */
+ if (priv->s_tid_tail == qp->s_cur &&
+ priv->s_state == TID_OP(WRITE_DATA_LAST))
+ priv->s_state = TID_OP(WRITE_RESP);
+ }
+ req = wqe_to_tid_req(wqe);
+ if (newreq) {
+ priv->s_tid_head = qp->s_cur;
+ priv->pending_tid_w_resp += req->total_segs;
+ atomic_inc(&priv->n_tid_requests);
+ atomic_dec(&priv->n_requests);
+ } else {
+ req->state = TID_REQUEST_RESEND;
+ req->comp_seg = delta_psn(bth2, wqe->psn);
+ /*
+ * Pull back any segments since we are going
+ * to re-receive them.
+ */
+ req->setup_head = req->clear_tail;
+ priv->pending_tid_w_resp +=
+ delta_psn(wqe->lpsn, bth2) + 1;
+ }
+
+ trace_hfi1_tid_write_sender_make_req(qp, newreq);
+ trace_hfi1_tid_req_make_req_write(qp, newreq,
+ wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ req);
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_READ:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr,
+ &ohdr->u.rc.reth);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->rdma_wr.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_TID_RDMA_READ:
+ trace_hfi1_tid_read_sender_make_req(qp, newreq);
+ wpriv = wqe->priv;
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_make_req_read(qp, newreq,
+ wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ req);
+ delta = cmp_psn(qp->s_psn, wqe->psn);
+
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow. We could get here under
+ * three conditions; (1) It's a new request; (2) We are
+ * sending the second or later segment of a request,
+ * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
+ * when the last segment of a previous request is
+ * received just before this; (3) We are re-sending a
+ * request.
+ */
+ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
+ }
+ if (newreq) {
+ struct tid_rdma_flow *flow =
+ &req->flows[req->setup_head];
+
+ /*
+ * Set up s_sge as it is needed for TID
+ * allocation. However, if the pages have been
+ * walked and mapped, skip it. An earlier try
+ * has failed to allocate the TID entries.
+ */
+ if (!flow->npagesets) {
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ req->isge = 0;
+ req->clear_tail = req->setup_head;
+ req->flow_idx = req->setup_head;
+ req->state = TID_REQUEST_ACTIVE;
+ }
+ } else if (delta == 0) {
+ /* Re-send a request */
+ req->cur_seg = 0;
+ req->comp_seg = 0;
+ req->ack_pending = 0;
+ req->flow_idx = req->clear_tail;
+ req->state = TID_REQUEST_RESEND;
+ }
+ req->s_next_psn = qp->s_psn;
+ /* Read one segment at a time */
+ len = min_t(u32, req->seg_len,
+ wqe->length - req->seg_len * req->cur_seg);
+ delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
+ &bth1, &bth2,
+ &len);
+ if (delta <= 0) {
+ /* Wait for TID space */
+ goto bail;
+ }
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ hwords += delta;
+ ss = &wpriv->ss;
+ /* Check if this is the last segment */
+ if (req->cur_seg >= req->total_segs &&
+ ++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ fallthrough;
+ case IB_WR_OPFN:
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_OPFN) {
+ qp->s_state = OP(COMPARE_SWAP);
+ put_ib_ateth_swap(wqe->atomic_wr.swap,
+ &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
+ } else {
+ qp->s_state = OP(FETCH_ADD);
+ put_ib_ateth_swap(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
+ }
+ put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
+ &ohdr->u.atomic_eth);
+ ohdr->u.atomic_eth.rkey = cpu_to_be32(
+ wqe->atomic_wr.rkey);
+ hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ }
+ if (newreq) {
+ qp->s_tail++;
+ if (qp->s_tail >= qp->s_size)
+ qp->s_tail = 0;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ qp->s_psn = wqe->lpsn + 1;
+ else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ qp->s_psn = req->s_next_psn;
+ else
+ qp->s_psn++;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
+ * thread to indicate a SEND needs to be restarted from an
+ * earlier PSN without interfering with the sending thread.
+ * See restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ fallthrough;
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ fallthrough;
+ case OP(SEND_MIDDLE):
+ bth2 = mask_psn(qp->s_psn++);
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND) {
+ qp->s_state = OP(SEND_LAST);
+ } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ } else {
+ qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
+ /* invalidate data comes after the BTH */
+ ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_LAST is used by the ACK processing
+ * thread to indicate a RDMA write needs to be restarted from
+ * an earlier PSN without interfering with the sending thread.
+ * See restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ fallthrough;
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ fallthrough;
+ case OP(RDMA_WRITE_MIDDLE):
+ bth2 = mask_psn(qp->s_psn++);
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ } else {
+ qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
+ * thread to indicate a RDMA read needs to be restarted from
+ * an earlier PSN without interfering with the sending thread.
+ * See restart_rc().
+ */
+ len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr + len,
+ &ohdr->u.rc.reth);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->rdma_wr.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
+ qp->s_psn = wqe->lpsn + 1;
+ ss = NULL;
+ len = 0;
+ qp->s_cur++;
+ if (qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case TID_OP(WRITE_RESP):
+ /*
+ * This value for s_state is used for restarting a TID RDMA
+ * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
+ * for more).
+ */
+ req = wqe_to_tid_req(wqe);
+ req->state = TID_REQUEST_RESEND;
+ rcu_read_lock();
+ remote = rcu_dereference(priv->tid_rdma.remote);
+ req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
+ len = wqe->length - (req->comp_seg * remote->max_len);
+ rcu_read_unlock();
+
+ bth2 = mask_psn(qp->s_psn);
+ hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
+ &bth2, &len);
+ qp->s_psn = wqe->lpsn + 1;
+ ss = NULL;
+ qp->s_state = TID_OP(WRITE_REQ);
+ priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
+ priv->s_tid_cur = qp->s_cur;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ break;
+
+ case TID_OP(READ_RESP):
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
+ goto bail;
+ /* This is used to restart a TID read request */
+ req = wqe_to_tid_req(wqe);
+ wpriv = wqe->priv;
+ /*
+ * Back down. The field qp->s_psn has been set to the psn with
+ * which the request should be restart. It's OK to use division
+ * as this is on the retry path.
+ */
+ req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
+
+ /*
+ * The following function need to be redefined to return the
+ * status to make sure that we find the flow. At the same
+ * time, we can use the req->state change to check if the
+ * call succeeds or not.
+ */
+ req->state = TID_REQUEST_RESEND;
+ hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
+ if (req->state != TID_REQUEST_ACTIVE) {
+ /*
+ * Failed to find the flow. Release all allocated tid
+ * resources.
+ */
+ hfi1_kern_exp_rcv_clear_all(req);
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+
+ hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
+ goto bail;
+ }
+ req->state = TID_REQUEST_RESEND;
+ len = min_t(u32, req->seg_len,
+ wqe->length - req->seg_len * req->cur_seg);
+ flow = &req->flows[req->flow_idx];
+ len -= flow->sent;
+ req->s_next_psn = flow->flow_state.ib_lpsn + 1;
+ delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
+ &bth2, &len);
+ if (delta <= 0) {
+ /* Wait for TID space */
+ goto bail;
+ }
+ hwords += delta;
+ ss = &wpriv->ss;
+ /* Check if this is the last segment */
+ if (req->cur_seg >= req->total_segs &&
+ ++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ qp->s_psn = req->s_next_psn;
+ trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ break;
+ case TID_OP(READ_REQ):
+ req = wqe_to_tid_req(wqe);
+ delta = cmp_psn(qp->s_psn, wqe->psn);
+ /*
+ * If the current WR is not TID RDMA READ, or this is the start
+ * of a new request, we need to change the qp->s_state so that
+ * the request can be set up properly.
+ */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
+ qp->s_cur == qp->s_tail) {
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ if (delta == 0 || qp->s_cur == qp->s_tail)
+ goto check_s_state;
+ else
+ goto bail;
+ }
+
+ /* Rate limiting */
+ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
+ }
+
+ wpriv = wqe->priv;
+ /* Read one segment at a time */
+ len = min_t(u32, req->seg_len,
+ wqe->length - req->seg_len * req->cur_seg);
+ delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
+ &bth2, &len);
+ if (delta <= 0) {
+ /* Wait for TID space */
+ goto bail;
+ }
+ hwords += delta;
+ ss = &wpriv->ss;
+ /* Check if this is the last segment */
+ if (req->cur_seg >= req->total_segs &&
+ ++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ qp->s_psn = req->s_next_psn;
+ trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ break;
+ }
+ qp->s_sending_hpsn = bth2;
+ delta = delta_psn(bth2, wqe->psn);
+ if (delta && delta % HFI1_PSN_CREDIT == 0 &&
+ wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ bth2 |= IB_BTH_REQ_ACK;
+ if (qp->s_flags & RVT_S_SEND_ONE) {
+ qp->s_flags &= ~RVT_S_SEND_ONE;
+ qp->s_flags |= RVT_S_WAIT_ACK;
+ bth2 |= IB_BTH_REQ_ACK;
+ }
+ qp->s_len -= len;
+ ps->s_txreq->hdr_dwords = hwords;
+ ps->s_txreq->sde = priv->s_sde;
+ ps->s_txreq->ss = ss;
+ ps->s_txreq->s_cur_size = len;
+ hfi1_make_ruc_header(
+ qp,
+ ohdr,
+ bth0 | (qp->s_state << 24),
+ bth1,
+ bth2,
+ middle,
+ ps);
+ return 1;
+
+done_free_tx:
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ return 1;
+
+bail:
+ hfi1_put_txreq(ps->s_txreq);
+
+bail_no_tx:
+ ps->s_txreq = NULL;
+ qp->s_flags &= ~RVT_S_BUSY;
+ /*
+ * If we didn't get a txreq, the QP will be woken up later to try
+ * again. Set the flags to indicate which work item to wake
+ * up.
+ */
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ return 0;
+}
+
+static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1)
+{
+ if (qp->r_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
+ (qp->r_nak_state <<
+ IB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = rvt_compute_aeth(qp);
+
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
+}
+
+static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
+{
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
+ goto unlock;
+ ibp = rcd_to_iport(packet->rcd);
+ this_cpu_inc(*ibp->rvp.rc_qacks);
+ qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+ if (is_fecn)
+ qp->s_flags |= RVT_S_ECN;
+
+ /* Schedule the send tasklet. */
+ hfi1_schedule_send(qp);
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
+ struct hfi1_opa_header *opa_hdr,
+ u8 sc5, bool is_fecn,
+ u64 *pbc_flags, u32 *hwords,
+ u32 *nwords)
+{
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_header *hdr = &opa_hdr->ibh;
+ struct ib_other_headers *ohdr;
+ u16 lrh0 = HFI1_LRH_BTH;
+ u16 pkey;
+ u32 bth0, bth1;
+
+ opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
+ ohdr = &hdr->u.oth;
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
+ *hwords = 6;
+
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
+ *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ *hwords - 2, SIZE_OF_CRC);
+ ohdr = &hdr->u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ }
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
+ *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
+
+ /* read pkey_index w/o lock (its atomic) */
+ pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+
+ lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
+ (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
+ IB_SL_SHIFT;
+
+ hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
+ ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
+
+ bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ bth1 = (!!is_fecn) << IB_BECN_SHIFT;
+ /*
+ * Inline ACKs go out without the use of the Verbs send engine, so
+ * we need to set the STL Verbs Extended bit here
+ */
+ bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
+ hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
+}
+
+static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
+ struct hfi1_opa_header *opa_hdr,
+ u8 sc5, bool is_fecn,
+ u64 *pbc_flags, u32 *hwords,
+ u32 *nwords)
+{
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_16b_header *hdr = &opa_hdr->opah;
+ struct ib_other_headers *ohdr;
+ u32 bth0, bth1 = 0;
+ u16 len, pkey;
+ bool becn = is_fecn;
+ u8 l4 = OPA_16B_L4_IB_LOCAL;
+ u8 extra_bytes;
+
+ opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
+ ohdr = &hdr->u.oth;
+ /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
+ *hwords = 8;
+ extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
+ *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
+
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
+ *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ *hwords - 4, *nwords);
+ ohdr = &hdr->u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ }
+ *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+
+ /* read pkey_index w/o lock (its atomic) */
+ pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+
+ /* Convert dwords to flits */
+ len = (*hwords + *nwords) >> 1;
+
+ hfi1_make_16b_hdr(hdr, ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1)),
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
+ 16B), len, pkey, becn, 0, l4, sc5);
+
+ bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
+ bth0 |= extra_bytes << 20;
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth1 = OPA_BTH_MIG_REQ;
+ hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
+}
+
+typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
+ struct hfi1_opa_header *opa_hdr,
+ u8 sc5, bool is_fecn,
+ u64 *pbc_flags, u32 *hwords,
+ u32 *nwords);
+
+/* We support only two types - 9B and 16B for now */
+static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
+ [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
+};
+
+/*
+ * hfi1_send_rc_ack - Construct an ACK packet and send it
+ *
+ * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
+ * Note that RDMA reads and atomics are handled in the
+ * send side QP state and send engine.
+ */
+void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
+ u64 pbc, pbc_flags = 0;
+ u32 hwords = 0;
+ u32 nwords = 0;
+ u32 plen;
+ struct pio_buf *pbuf;
+ struct hfi1_opa_header opa_hdr;
+
+ /* clear the defer count */
+ qp->r_adefered = 0;
+
+ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
+ if (qp->s_flags & RVT_S_RESP_PENDING) {
+ hfi1_queue_rc_ack(packet, is_fecn);
+ return;
+ }
+
+ /* Ensure s_rdma_ack_cnt changes are committed */
+ if (qp->s_rdma_ack_cnt) {
+ hfi1_queue_rc_ack(packet, is_fecn);
+ return;
+ }
+
+ /* Don't try to send ACKs if the link isn't ACTIVE */
+ if (driver_lstate(ppd) != IB_PORT_ACTIVE)
+ return;
+
+ /* Make the appropriate header */
+ hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
+ &pbc_flags, &hwords, &nwords);
+
+ plen = 2 /* PBC */ + hwords + nwords;
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
+ sc_to_vlt(ppd->dd, sc5), plen);
+ pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
+ if (IS_ERR_OR_NULL(pbuf)) {
+ /*
+ * We have no room to send at the moment. Pass
+ * responsibility for sending the ACK to the send engine
+ * so that when enough buffer space becomes available,
+ * the ACK is sent ahead of other outgoing packets.
+ */
+ hfi1_queue_rc_ack(packet, is_fecn);
+ return;
+ }
+ trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
+ &opa_hdr, ib_is_sc5(sc5));
+
+ /* write the pbc and data */
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ (priv->hdr_type == HFI1_PKT_TYPE_9B ?
+ (void *)&opa_hdr.ibh :
+ (void *)&opa_hdr.opah), hwords);
+ return;
+}
+
+/**
+ * update_num_rd_atomic - update the qp->s_num_rd_atomic
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ * @wqe: the wqe
+ *
+ * This is called from reset_psn() to update qp->s_num_rd_atomic
+ * for the current wqe.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
+ struct rvt_swqe *wqe)
+{
+ u32 opcode = wqe->wr.opcode;
+
+ if (opcode == IB_WR_RDMA_READ ||
+ opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ qp->s_num_rd_atomic++;
+ } else if (opcode == IB_WR_TID_RDMA_READ) {
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (cmp_psn(psn, wqe->lpsn) <= 0) {
+ u32 cur_seg;
+
+ cur_seg = (psn - wqe->psn) / priv->pkts_ps;
+ req->ack_pending = cur_seg - req->comp_seg;
+ priv->pending_tid_r_segs += req->ack_pending;
+ qp->s_num_rd_atomic += req->ack_pending;
+ trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
+ wqe->wr.opcode,
+ wqe->psn,
+ wqe->lpsn,
+ req);
+ } else {
+ priv->pending_tid_r_segs += req->total_segs;
+ qp->s_num_rd_atomic += req->total_segs;
+ }
+ }
+}
+
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from hfi1_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct rvt_qp *qp, u32 psn)
+{
+ u32 n = qp->s_acked;
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
+ u32 opcode;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ qp->s_cur = n;
+ priv->pending_tid_r_segs = 0;
+ priv->pending_tid_w_resp = 0;
+ qp->s_num_rd_atomic = 0;
+
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (cmp_psn(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+ update_num_rd_atomic(qp, psn, wqe);
+
+ /* Find the work request opcode corresponding to the given PSN. */
+ for (;;) {
+ int diff;
+
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ wqe = rvt_get_swqe_ptr(qp, n);
+ diff = cmp_psn(psn, wqe->psn);
+ if (diff < 0) {
+ /* Point wqe back to the previous one*/
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
+ break;
+ }
+ qp->s_cur = n;
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (diff == 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+
+ update_num_rd_atomic(qp, psn, wqe);
+ }
+ opcode = wqe->wr.opcode;
+
+ /*
+ * Set the state to restart in the middle of a request.
+ * Don't change the s_sge, s_cur_sge, or s_cur_size.
+ * See hfi1_make_rc_req().
+ */
+ switch (opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_TID_RDMA_WRITE:
+ qp->s_state = TID_OP(WRITE_RESP);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ case IB_WR_TID_RDMA_READ:
+ qp->s_state = TID_OP(READ_RESP);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+done:
+ priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
+ qp->s_psn = psn;
+ /*
+ * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
+ * asynchronously before the send engine can get scheduled.
+ * Doing it in hfi1_make_rc_req() is too late.
+ */
+ if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
+ (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
+ qp->s_flags |= RVT_S_WAIT_PSN;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
+ trace_hfi1_sender_reset_psn(qp);
+}
+
+/*
+ * Back up requester to resend the last un-ACKed request.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
+ */
+void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ struct hfi1_ibport *ibp;
+
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_lock);
+ trace_hfi1_sender_restart_rc(qp);
+ if (qp->s_retry == 0) {
+ if (qp->s_mig_state == IB_MIG_ARMED) {
+ hfi1_migrate_qp(qp);
+ qp->s_retry = qp->s_retry_cnt;
+ } else if (qp->s_last == qp->s_acked) {
+ /*
+ * We need special handling for the OPFN request WQEs as
+ * they are not allowed to generate real user errors
+ */
+ if (wqe->wr.opcode == IB_WR_OPFN) {
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ /*
+ * Call opfn_conn_reply() with capcode and
+ * remaining data as 0 to close out the
+ * current request
+ */
+ opfn_conn_reply(qp, priv->opfn.curr);
+ wqe = do_rc_completion(qp, wqe, ibp);
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
+ } else {
+ trace_hfi1_tid_write_sender_restart_rc(qp, 0);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ struct tid_rdma_request *req;
+
+ req = wqe_to_tid_req(wqe);
+ hfi1_kern_exp_rcv_clear_all(req);
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+ }
+
+ hfi1_trdma_send_complete(qp, wqe,
+ IB_WC_RETRY_EXC_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ return;
+ } else { /* need to handle delayed completion */
+ return;
+ }
+ } else {
+ qp->s_retry--;
+ }
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ ibp->rvp.n_rc_resends++;
+ else
+ ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
+
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
+ RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
+ if (wait)
+ qp->s_flags |= RVT_S_SEND_ONE;
+ reset_psn(qp, psn);
+}
+
+/*
+ * Set qp->s_sending_psn to the next PSN after the given one.
+ * This would be psn+1 except when RDMA reads or TID RDMA ops
+ * are present.
+ */
+static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
+{
+ struct rvt_swqe *wqe;
+ u32 n = qp->s_last;
+
+ lockdep_assert_held(&qp->s_lock);
+ /* Find the work request corresponding to the given PSN. */
+ for (;;) {
+ wqe = rvt_get_swqe_ptr(qp, n);
+ if (cmp_psn(psn, wqe->lpsn) <= 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ qp->s_sending_psn = wqe->lpsn + 1;
+ else
+ qp->s_sending_psn = psn + 1;
+ break;
+ }
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ }
+}
+
+/**
+ * hfi1_rc_verbs_aborted - handle abort status
+ * @qp: the QP
+ * @opah: the opa header
+ *
+ * This code modifies both ACK bit in BTH[2]
+ * and the s_flags to go into send one mode.
+ *
+ * This serves to throttle the send engine to only
+ * send a single packet in the likely case the
+ * a link has gone down.
+ */
+void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
+{
+ struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
+ u8 opcode = ib_bth_get_opcode(ohdr);
+ u32 psn;
+
+ /* ignore responses */
+ if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
+ opcode == TID_OP(READ_RESP) ||
+ opcode == TID_OP(WRITE_RESP))
+ return;
+
+ psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
+ ohdr->bth[2] = cpu_to_be32(psn);
+ qp->s_flags |= RVT_S_SEND_ONE;
+}
+
+/*
+ * This should be called with the QP s_lock held and interrupts disabled.
+ */
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
+{
+ struct ib_other_headers *ohdr;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct rvt_swqe *wqe;
+ u32 opcode, head, tail;
+ u32 psn;
+ struct tid_rdma_request *req;
+
+ lockdep_assert_held(&qp->s_lock);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
+ return;
+
+ ohdr = hfi1_get_rc_ohdr(opah);
+ opcode = ib_bth_get_opcode(ohdr);
+ if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
+ opcode == TID_OP(READ_RESP) ||
+ opcode == TID_OP(WRITE_RESP)) {
+ WARN_ON(!qp->s_rdma_ack_cnt);
+ qp->s_rdma_ack_cnt--;
+ return;
+ }
+
+ psn = ib_bth_get_psn(ohdr);
+ /*
+ * Don't attempt to reset the sending PSN for packets in the
+ * KDETH PSN space since the PSN does not match anything.
+ */
+ if (opcode != TID_OP(WRITE_DATA) &&
+ opcode != TID_OP(WRITE_DATA_LAST) &&
+ opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
+ reset_sending_psn(qp, psn);
+
+ /* Handle TID RDMA WRITE packets differently */
+ if (opcode >= TID_OP(WRITE_REQ) &&
+ opcode <= TID_OP(WRITE_DATA_LAST)) {
+ head = priv->s_tid_head;
+ tail = priv->s_tid_cur;
+ /*
+ * s_tid_cur is set to s_tid_head in the case, where
+ * a new TID RDMA request is being started and all
+ * previous ones have been completed.
+ * Therefore, we need to do a secondary check in order
+ * to properly determine whether we should start the
+ * RC timer.
+ */
+ wqe = rvt_get_swqe_ptr(qp, tail);
+ req = wqe_to_tid_req(wqe);
+ if (head == tail && req->comp_seg < req->total_segs) {
+ if (tail == 0)
+ tail = qp->s_size - 1;
+ else
+ tail -= 1;
+ }
+ } else {
+ head = qp->s_tail;
+ tail = qp->s_acked;
+ }
+
+ /*
+ * Start timer after a packet requesting an ACK has been sent and
+ * there are still requests that haven't been acked.
+ */
+ if ((psn & IB_BTH_REQ_ACK) && tail != head &&
+ opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
+ opcode != TID_OP(RESYNC) &&
+ !(qp->s_flags &
+ (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
+ (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ if (opcode == TID_OP(READ_REQ))
+ rvt_add_retry_timer_ext(qp, priv->timeout_shift);
+ else
+ rvt_add_retry_timer(qp);
+ }
+
+ /* Start TID RDMA ACK timer */
+ if ((opcode == TID_OP(WRITE_DATA) ||
+ opcode == TID_OP(WRITE_DATA_LAST) ||
+ opcode == TID_OP(RESYNC)) &&
+ (psn & IB_BTH_REQ_ACK) &&
+ !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
+ (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ /*
+ * The TID RDMA ACK packet could be received before this
+ * function is called. Therefore, add the timer only if TID
+ * RDMA ACK packets are actually pending.
+ */
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ req = wqe_to_tid_req(wqe);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ req->ack_seg < req->cur_seg)
+ hfi1_add_tid_retry_timer(qp);
+ }
+
+ while (qp->s_last != qp->s_acked) {
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ trdma_clean_swqe(qp, wqe);
+ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
+ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_hfi1_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+ }
+ /*
+ * If we were waiting for sends to complete before re-sending,
+ * and they are now complete, restart sending.
+ */
+ trace_hfi1_sendcomplete(qp, psn);
+ if (qp->s_flags & RVT_S_WAIT_PSN &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ qp->s_flags &= ~RVT_S_WAIT_PSN;
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ hfi1_schedule_send(qp);
+ }
+}
+
+static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
+{
+ qp->s_last_psn = psn;
+}
+
+/*
+ * Generate a SWQE completion.
+ * This is similar to hfi1_send_complete but has to check to be sure
+ * that the SGEs are not being referenced if the SWQE is being resent.
+ */
+struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
+ struct hfi1_ibport *ibp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ /*
+ * Don't decrement refcount and don't generate a
+ * completion if the SWQE is being resent until the send
+ * is finished.
+ */
+ trace_hfi1_rc_completion(qp, wqe->lpsn);
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ trdma_clean_swqe(qp, wqe);
+ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
+ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_hfi1_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+ } else {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ this_cpu_inc(*ibp->rvp.rc_delayed_comp);
+ /*
+ * If send progress not running attempt to progress
+ * SDMA queue.
+ */
+ if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
+ struct sdma_engine *engine;
+ u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
+ u8 sc5;
+
+ /* For now use sc to find engine */
+ sc5 = ibp->sl_to_sc[sl];
+ engine = qp_to_sdma_engine(qp, sc5);
+ sdma_engine_progress_schedule(engine);
+ }
+ }
+
+ qp->s_retry = qp->s_retry_cnt;
+ /*
+ * Don't update the last PSN if the request being completed is
+ * a TID RDMA WRITE request.
+ * Completion of the TID RDMA WRITE requests are done by the
+ * TID RDMA ACKs and as such could be for a request that has
+ * already been ACKed as far as the IB state machine is
+ * concerned.
+ */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ update_last_psn(qp, wqe->lpsn);
+
+ /*
+ * If we are completing a request which is in the process of
+ * being resent, we can stop re-sending it since we know the
+ * responder has already seen it.
+ */
+ if (qp->s_acked == qp->s_cur) {
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ qp->s_acked = qp->s_cur;
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
+ if (qp->s_acked != qp->s_tail) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ }
+ } else {
+ if (++qp->s_acked >= qp->s_size)
+ qp->s_acked = 0;
+ if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
+ qp->s_draining = 0;
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ }
+ if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
+ priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
+ hfi1_schedule_send(qp);
+ }
+ return wqe;
+}
+
+static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
+{
+ /* Retry this request. */
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
+ hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_SEND;
+ rvt_get_qp(qp);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ }
+}
+
+/**
+ * update_qp_retry_state - Update qp retry state.
+ * @qp: the QP
+ * @psn: the packet sequence number of the TID RDMA WRITE RESP.
+ * @spsn: The start psn for the given TID RDMA WRITE swqe.
+ * @lpsn: The last psn for the given TID RDMA WRITE swqe.
+ *
+ * This function is called to update the qp retry state upon
+ * receiving a TID WRITE RESP after the qp is scheduled to retry
+ * a request.
+ */
+static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
+ u32 lpsn)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ qp->s_psn = psn + 1;
+ /*
+ * If this is the first TID RDMA WRITE RESP packet for the current
+ * request, change the s_state so that the retry will be processed
+ * correctly. Similarly, if this is the last TID RDMA WRITE RESP
+ * packet, change the s_state and advance the s_cur.
+ */
+ if (cmp_psn(psn, lpsn) >= 0) {
+ qp->s_cur = qpriv->s_tid_cur + 1;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ qp->s_state = TID_OP(WRITE_REQ);
+ } else if (!cmp_psn(psn, spsn)) {
+ qp->s_cur = qpriv->s_tid_cur;
+ qp->s_state = TID_OP(WRITE_RESP);
+ }
+}
+
+/*
+ * do_rc_ack - process an incoming RC ACK
+ * @qp: the QP the ACK came in on
+ * @psn: the packet sequence number of the ACK
+ * @opcode: the opcode of the request that resulted in the ACK
+ *
+ * This is called from rc_rcv_resp() to process an incoming RC ACK
+ * for the given QP.
+ * May be called at interrupt level, with the QP s_lock held.
+ * Returns 1 if OK, 0 if current operation should be aborted (NAK).
+ */
+int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
+ u64 val, struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_ibport *ibp;
+ enum ib_wc_status status;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct rvt_swqe *wqe;
+ int ret = 0;
+ u32 ack_psn;
+ int diff;
+ struct rvt_dev_info *rdi;
+
+ lockdep_assert_held(&qp->s_lock);
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request. The MSN won't include the NAK'ed
+ * request but will include an ACK'ed request(s).
+ */
+ ack_psn = psn;
+ if (aeth >> IB_AETH_NAK_SHIFT)
+ ack_psn--;
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ ibp = rcd_to_iport(rcd);
+
+ /*
+ * The MSN might be for a later WQE than the PSN indicates so
+ * only complete WQEs that the PSN finishes.
+ */
+ while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
+ /*
+ * RDMA_READ_RESPONSE_ONLY is a special case since
+ * we want to generate completion events for everything
+ * before the RDMA read, copy the data, then generate
+ * the completion for the read.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ &&
+ opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
+ diff == 0) {
+ ret = 1;
+ goto bail_stop;
+ }
+ /*
+ * If this request is a RDMA read or atomic, and the ACK is
+ * for a later operation, this ACK NAKs the RDMA read or
+ * atomic. In other words, only a RDMA_READ_LAST or ONLY
+ * can ACK a RDMA read and likewise for atomic ops. Note
+ * that the NAK case can only happen if relaxed ordering is
+ * used and requests are sent after an RDMA read or atomic
+ * is sent but before the response is received.
+ */
+ if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
+ (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
+ (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
+ (opcode != TID_OP(READ_RESP) || diff != 0)) ||
+ ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
+ (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ (delta_psn(psn, qp->s_last_psn) != 1))) {
+ set_restart_qp(qp, rcd);
+ /*
+ * No need to process the ACK/NAK since we are
+ * restarting an earlier request.
+ */
+ goto bail_stop;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ u64 *vaddr = wqe->sg_list[0].vaddr;
+ *vaddr = val;
+ }
+ if (wqe->wr.opcode == IB_WR_OPFN)
+ opfn_conn_reply(qp, val);
+
+ if (qp->s_num_rd_atomic &&
+ (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
+ qp->s_num_rd_atomic--;
+ /* Restart sending task if fence is complete */
+ if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
+ !qp->s_num_rd_atomic) {
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE |
+ RVT_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ }
+ }
+
+ /*
+ * TID RDMA WRITE requests will be completed by the TID RDMA
+ * ACK packet handler (see tid_rdma.c).
+ */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ break;
+
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ }
+
+ trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
+ trace_hfi1_sender_do_rc_ack(qp);
+ switch (aeth >> IB_AETH_NAK_SHIFT) {
+ case 0: /* ACK */
+ this_cpu_inc(*ibp->rvp.rc_acks);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ if (wqe_to_tid_req(wqe)->ack_pending)
+ rvt_mod_retry_timer_ext(qp,
+ qpriv->timeout_shift);
+ else
+ rvt_stop_rc_timers(qp);
+ } else if (qp->s_acked != qp->s_tail) {
+ struct rvt_swqe *__w = NULL;
+
+ if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
+ __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
+
+ /*
+ * Stop timers if we've received all of the TID RDMA
+ * WRITE * responses.
+ */
+ if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ opcode == TID_OP(WRITE_RESP)) {
+ /*
+ * Normally, the loop above would correctly
+ * process all WQEs from s_acked onward and
+ * either complete them or check for correct
+ * PSN sequencing.
+ * However, for TID RDMA, due to pipelining,
+ * the response may not be for the request at
+ * s_acked so the above look would just be
+ * skipped. This does not allow for checking
+ * the PSN sequencing. It has to be done
+ * separately.
+ */
+ if (cmp_psn(psn, qp->s_last_psn + 1)) {
+ set_restart_qp(qp, rcd);
+ goto bail_stop;
+ }
+ /*
+ * If the psn is being resent, stop the
+ * resending.
+ */
+ if (qp->s_cur != qp->s_tail &&
+ cmp_psn(qp->s_psn, psn) <= 0)
+ update_qp_retry_state(qp, psn,
+ __w->psn,
+ __w->lpsn);
+ else if (--qpriv->pending_tid_w_resp)
+ rvt_mod_retry_timer(qp);
+ else
+ rvt_stop_rc_timers(qp);
+ } else {
+ /*
+ * We are expecting more ACKs so
+ * mod the retry timer.
+ */
+ rvt_mod_retry_timer(qp);
+ /*
+ * We can stop re-sending the earlier packets
+ * and continue with the next packet the
+ * receiver wants.
+ */
+ if (cmp_psn(qp->s_psn, psn) <= 0)
+ reset_psn(qp, psn + 1);
+ }
+ } else {
+ /* No more acks - kill all timers */
+ rvt_stop_rc_timers(qp);
+ if (cmp_psn(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
+ }
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
+ hfi1_schedule_send(qp);
+ }
+ rvt_get_credit(qp, aeth);
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ qp->s_retry = qp->s_retry_cnt;
+ /*
+ * If the current request is a TID RDMA WRITE request and the
+ * response is not a TID RDMA WRITE RESP packet, s_last_psn
+ * can't be advanced.
+ */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ opcode != TID_OP(WRITE_RESP) &&
+ cmp_psn(psn, wqe->psn) >= 0)
+ return 1;
+ update_last_psn(qp, psn);
+ return 1;
+
+ case 1: /* RNR NAK */
+ ibp->rvp.n_rnr_naks++;
+ if (qp->s_acked == qp->s_tail)
+ goto bail_stop;
+ if (qp->s_flags & RVT_S_WAIT_RNR)
+ goto bail_stop;
+ rdi = ib_to_rvt(qp->ibqp.device);
+ if (!(rdi->post_parms[wqe->wr.opcode].flags &
+ RVT_OPERATION_IGN_RNR_CNT)) {
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
+ qp->s_rnr_retry--;
+ }
+
+ /*
+ * The last valid PSN is the previous PSN. For TID RDMA WRITE
+ * request, s_last_psn should be incremented only when a TID
+ * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
+ * WRITE RESP packets.
+ */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
+ reset_psn(qp, qp->s_last_psn + 1);
+ } else {
+ update_last_psn(qp, psn - 1);
+ reset_psn(qp, psn);
+ }
+
+ ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
+ qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
+ rvt_stop_rc_timers(qp);
+ rvt_add_rnr_timer(qp, aeth);
+ return 0;
+
+ case 3: /* NAK */
+ if (qp->s_acked == qp->s_tail)
+ goto bail_stop;
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+ switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
+ IB_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ ibp->rvp.n_seq_naks++;
+ /*
+ * Back up to the responder's expected PSN.
+ * Note that we might get a NAK in the middle of an
+ * RDMA READ response which terminates the RDMA
+ * READ.
+ */
+ hfi1_restart_rc(qp, psn, 0);
+ hfi1_schedule_send(qp);
+ break;
+
+ case 1: /* Invalid Request */
+ status = IB_WC_REM_INV_REQ_ERR;
+ ibp->rvp.n_other_naks++;
+ goto class_b;
+
+ case 2: /* Remote Access Error */
+ status = IB_WC_REM_ACCESS_ERR;
+ ibp->rvp.n_other_naks++;
+ goto class_b;
+
+ case 3: /* Remote Operation Error */
+ status = IB_WC_REM_OP_ERR;
+ ibp->rvp.n_other_naks++;
+class_b:
+ if (qp->s_last == qp->s_acked) {
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ hfi1_kern_read_tid_flow_free(qp);
+
+ hfi1_trdma_send_complete(qp, wqe, status);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ break;
+
+ default:
+ /* Ignore other reserved NAK error codes */
+ goto reserved;
+ }
+ qp->s_retry = qp->s_retry_cnt;
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ goto bail_stop;
+
+ default: /* 2: reserved */
+reserved:
+ /* Ignore reserved NAK codes. */
+ goto bail_stop;
+ }
+ /* cannot be reached */
+bail_stop:
+ rvt_stop_rc_timers(qp);
+ return ret;
+}
+
+/*
+ * We have seen an out of sequence RDMA read middle or last packet.
+ * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
+ */
+static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
+ struct hfi1_ctxtdata *rcd)
+{
+ struct rvt_swqe *wqe;
+
+ lockdep_assert_held(&qp->s_lock);
+ /* Remove QP from retry timer */
+ rvt_stop_rc_timers(qp);
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+
+ while (cmp_psn(psn, wqe->lpsn) > 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ break;
+ wqe = do_rc_completion(qp, wqe, ibp);
+ }
+
+ ibp->rvp.n_rdma_seq++;
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
+ hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_SEND;
+ rvt_get_qp(qp);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+/**
+ * rc_rcv_resp - process an incoming RC response packet
+ * @packet: data packet information
+ *
+ * This is called from hfi1_rc_rcv() to process an incoming RC response
+ * packet for the given QP.
+ * Called at interrupt level.
+ */
+static void rc_rcv_resp(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ void *data = packet->payload;
+ u32 tlen = packet->tlen;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp;
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_swqe *wqe;
+ enum ib_wc_status status;
+ unsigned long flags;
+ int diff;
+ u64 val;
+ u32 aeth;
+ u32 psn = ib_bth_get_psn(packet->ohdr);
+ u32 pmtu = qp->pmtu;
+ u16 hdrsize = packet->hlen;
+ u8 opcode = packet->opcode;
+ u8 pad = packet->pad;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ trace_hfi1_ack(qp, psn);
+
+ /* Ignore invalid responses. */
+ if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ diff = cmp_psn(psn, qp->s_last_psn);
+ if (unlikely(diff <= 0)) {
+ /* Update credits for "ghost" ACKs */
+ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
+ rvt_get_credit(qp, aeth);
+ }
+ goto ack_done;
+ }
+
+ /*
+ * Skip everything other than the PSN we expect, if we are waiting
+ * for a reply to a restarted RDMA read or atomic op.
+ */
+ if (qp->r_flags & RVT_R_RDMAR_SEQ) {
+ if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
+ goto ack_done;
+ qp->r_flags &= ~RVT_R_RDMAR_SEQ;
+ }
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_done;
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ status = IB_WC_SUCCESS;
+
+ switch (opcode) {
+ case OP(ACKNOWLEDGE):
+ case OP(ATOMIC_ACKNOWLEDGE):
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE))
+ val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
+ else
+ val = 0;
+ if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
+ opcode != OP(RDMA_READ_RESPONSE_FIRST))
+ goto ack_done;
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_middle;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /* no AETH, no ACK */
+ if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+read_middle:
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
+ goto ack_len_err;
+ if (unlikely(pmtu >= qp->s_rdma_read_len))
+ goto ack_len_err;
+
+ /*
+ * We got a response so update the timeout.
+ * 4.096 usec. * (1 << qp->timeout)
+ */
+ rvt_mod_retry_timer(qp);
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
+ hfi1_schedule_send(qp);
+ }
+
+ if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
+ qp->s_retry = qp->s_retry_cnt;
+
+ /*
+ * Update the RDMA receive state but do the copy w/o
+ * holding the locks and blocking interrupts.
+ */
+ qp->s_rdma_read_len -= pmtu;
+ update_last_psn(qp, psn);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, pmtu, false, false);
+ goto bail;
+
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+ goto ack_done;
+ /*
+ * Check that the data size is >= 0 && <= pmtu.
+ * Remember to account for ICRC (4).
+ */
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
+ goto ack_len_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_last;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /* ACKs READ req. */
+ if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /*
+ * Check that the data size is >= 1 && <= pmtu.
+ * Remember to account for ICRC (4).
+ */
+ if (unlikely(tlen <= (hdrsize + extra_bytes)))
+ goto ack_len_err;
+read_last:
+ tlen -= hdrsize + extra_bytes;
+ if (unlikely(tlen != qp->s_rdma_read_len))
+ goto ack_len_err;
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, tlen, false, false);
+ WARN_ON(qp->s_rdma_read_sge.num_sge);
+ (void)do_rc_ack(qp, aeth, psn,
+ OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
+ goto ack_done;
+ }
+
+ack_op_err:
+ status = IB_WC_LOC_QP_OP_ERR;
+ goto ack_err;
+
+ack_seq_err:
+ ibp = rcd_to_iport(rcd);
+ rdma_seq_err(qp, ibp, psn, rcd);
+ goto ack_done;
+
+ack_len_err:
+ status = IB_WC_LOC_LEN_ERR;
+ack_err:
+ if (qp->s_last == qp->s_acked) {
+ rvt_send_complete(qp, wqe, status);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+bail:
+ return;
+}
+
+static inline void rc_cancel_ack(struct rvt_qp *qp)
+{
+ qp->r_adefered = 0;
+ if (list_empty(&qp->rspwait))
+ return;
+ list_del_init(&qp->rspwait);
+ qp->r_flags &= ~RVT_R_RSP_NAK;
+ rvt_put_qp(qp);
+}
+
+/**
+ * rc_rcv_error - process an incoming duplicate or error RC packet
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @diff: the difference between the PSN and the expected PSN
+ * @rcd: the receive context
+ *
+ * This is called from hfi1_rc_rcv() to process an unexpected
+ * incoming RC packet for the given QP.
+ * Called at interrupt level.
+ * Return 1 if no more processing is needed; otherwise return 0 to
+ * schedule a response to be sent.
+ */
+static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
+ struct rvt_qp *qp, u32 opcode, u32 psn,
+ int diff, struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ struct rvt_ack_entry *e;
+ unsigned long flags;
+ u8 prev;
+ u8 mra; /* most recent ACK */
+ bool old_req;
+
+ trace_hfi1_rcv_error(qp, psn);
+ if (diff > 0) {
+ /*
+ * Packet sequence error.
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if we already sent one.
+ */
+ if (!qp->r_nak_state) {
+ ibp->rvp.n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence NAK until all packets
+ * in the receive queue have been processed.
+ * Otherwise, we end up propagating congestion.
+ */
+ rc_defered_ack(rcd, qp);
+ }
+ goto done;
+ }
+
+ /*
+ * Handle a duplicate request. Don't re-execute SEND, RDMA
+ * write or atomic op. Don't NAK errors, just silently drop
+ * the duplicate request. Note that r_sge, r_len, and
+ * r_rcv_len may be in use so don't modify them.
+ *
+ * We are supposed to ACK the earliest duplicate PSN but we
+ * can coalesce an outstanding duplicate ACK. We have to
+ * send the earliest so that RDMA reads can be restarted at
+ * the requester's expected PSN.
+ *
+ * First, find where this duplicate PSN falls within the
+ * ACKs previously sent.
+ * old_req is true if there is an older response that is scheduled
+ * to be sent before sending this one.
+ */
+ e = NULL;
+ old_req = true;
+ ibp->rvp.n_rc_dupreq++;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
+
+ switch (opcode) {
+ case OP(RDMA_READ_REQUEST): {
+ struct ib_reth *reth;
+ u32 offset;
+ u32 len;
+
+ /*
+ * If we didn't find the RDMA read request in the ack queue,
+ * we can ignore this request.
+ */
+ if (!e || e->opcode != OP(RDMA_READ_REQUEST))
+ goto unlock_done;
+ /* RETH comes after BTH */
+ reth = &ohdr->u.rc.reth;
+ /*
+ * Address range must be a subset of the original
+ * request and start on pmtu boundaries.
+ * We reuse the old ack_queue slot since the requester
+ * should not back up and request an earlier PSN for the
+ * same request.
+ */
+ offset = delta_psn(psn, e->psn) * qp->pmtu;
+ len = be32_to_cpu(reth->length);
+ if (unlikely(offset + len != e->rdma_sge.sge_length))
+ goto unlock_done;
+ release_rdma_sge_mr(e);
+ if (len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = get_ib_reth_vaddr(reth);
+ int ok;
+
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto unlock_done;
+ } else {
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->psn = psn;
+ if (old_req)
+ goto unlock_done;
+ if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue = prev;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ /*
+ * If we didn't find the atomic request in the ack queue
+ * or the send engine is already backed up to send an
+ * earlier entry, we can ignore this request.
+ */
+ if (!e || e->opcode != (u8)opcode || old_req)
+ goto unlock_done;
+ if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
+ qp->s_acked_ack_queue = prev;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ default:
+ /*
+ * Ignore this operation if it doesn't request an ACK
+ * or an earlier RDMA read or atomic is going to be resent.
+ */
+ if (!(psn & IB_BTH_REQ_ACK) || old_req)
+ goto unlock_done;
+ /*
+ * Resend the most recent ACK if this request is
+ * after all the previous RDMA reads and atomics.
+ */
+ if (mra == qp->r_head_ack_queue) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->r_psn - 1;
+ goto send_ack;
+ }
+
+ /*
+ * Resend the RDMA read or atomic op which
+ * ACKs this duplicate request.
+ */
+ if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
+ qp->s_acked_ack_queue = mra;
+ qp->s_tail_ack_queue = mra;
+ break;
+ }
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ qp->r_nak_state = 0;
+ hfi1_schedule_send(qp);
+
+unlock_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return 1;
+
+send_ack:
+ return 0;
+}
+
+static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
+ u32 lqpn, u32 rqpn, u8 svc_type)
+{
+ struct opa_hfi1_cong_log_event_internal *cc_event;
+ unsigned long flags;
+
+ if (sl >= OPA_MAX_SLS)
+ return;
+
+ spin_lock_irqsave(&ppd->cc_log_lock, flags);
+
+ ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
+ ppd->threshold_event_counter++;
+
+ cc_event = &ppd->cc_events[ppd->cc_log_idx++];
+ if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
+ ppd->cc_log_idx = 0;
+ cc_event->lqpn = lqpn & RVT_QPN_MASK;
+ cc_event->rqpn = rqpn & RVT_QPN_MASK;
+ cc_event->sl = sl;
+ cc_event->svc_type = svc_type;
+ cc_event->rlid = rlid;
+ /* keep timestamp in units of 1.024 usec */
+ cc_event->timestamp = ktime_get_ns() / 1024;
+
+ spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
+}
+
+void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
+ u32 rqpn, u8 svc_type)
+{
+ struct cca_timer *cca_timer;
+ u16 ccti, ccti_incr, ccti_timer, ccti_limit;
+ u8 trigger_threshold;
+ struct cc_state *cc_state;
+ unsigned long flags;
+
+ if (sl >= OPA_MAX_SLS)
+ return;
+
+ cc_state = get_cc_state(ppd);
+
+ if (!cc_state)
+ return;
+
+ /*
+ * 1) increase CCTI (for this SL)
+ * 2) select IPG (i.e., call set_link_ipg())
+ * 3) start timer
+ */
+ ccti_limit = cc_state->cct.ccti_limit;
+ ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
+ ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
+ trigger_threshold =
+ cc_state->cong_setting.entries[sl].trigger_threshold;
+
+ spin_lock_irqsave(&ppd->cca_timer_lock, flags);
+
+ cca_timer = &ppd->cca_timer[sl];
+ if (cca_timer->ccti < ccti_limit) {
+ if (cca_timer->ccti + ccti_incr <= ccti_limit)
+ cca_timer->ccti += ccti_incr;
+ else
+ cca_timer->ccti = ccti_limit;
+ set_link_ipg(ppd);
+ }
+
+ ccti = cca_timer->ccti;
+
+ if (!hrtimer_active(&cca_timer->hrtimer)) {
+ /* ccti_timer is in units of 1.024 usec */
+ unsigned long nsec = 1024 * ccti_timer;
+
+ hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
+ spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
+
+ if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
+ log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
+}
+
+/**
+ * hfi1_rc_rcv - process an incoming RC packet
+ * @packet: data packet information
+ *
+ * This is called from qp_rcv() to process an incoming RC packet
+ * for the given QP.
+ * May be called at interrupt level.
+ */
+void hfi1_rc_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ void *data = packet->payload;
+ u32 tlen = packet->tlen;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ struct ib_other_headers *ohdr = packet->ohdr;
+ u32 opcode = packet->opcode;
+ u32 hdrsize = packet->hlen;
+ u32 psn = ib_bth_get_psn(packet->ohdr);
+ u32 pad = packet->pad;
+ struct ib_wc wc;
+ u32 pmtu = qp->pmtu;
+ int diff;
+ struct ib_reth *reth;
+ unsigned long flags;
+ int ret;
+ bool copy_last = false, fecn;
+ u32 rkey;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
+
+ lockdep_assert_held(&qp->r_lock);
+
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+ fecn = process_ecn(qp, packet);
+ opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+ * packet sequence number will be for something in the send work
+ * queue rather than the expected receive packet sequence number.
+ * In other words, this QP is the requester.
+ */
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ rc_rcv_resp(packet);
+ return;
+ }
+
+ /* Compute 24 bits worth of difference. */
+ diff = delta_psn(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
+ return;
+ goto send_ack;
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(SEND_LAST_WITH_INVALIDATE))
+ break;
+ goto nack_inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ default:
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
+ opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ goto nack_inv;
+ /*
+ * Note that it is up to the requester to not send a new
+ * RDMA read or atomic operation before receiving an ACK
+ * for the previous operation.
+ */
+ break;
+ }
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
+ rvt_comm_est(qp);
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ fallthrough;
+ case OP(SEND_MIDDLE):
+ case OP(RDMA_WRITE_MIDDLE):
+send_middle:
+ /* Check for invalid length PMTU or posted rwqe len. */
+ /*
+ * There will be no padding for 9B packet but 16B packets
+ * will come in with some padding since we always add
+ * CRC and LT bytes which will need to be flit aligned
+ */
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
+ goto nack_inv;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto nack_inv;
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ /* consume RWQE */
+ ret = rvt_get_rwqe(qp, true);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ case OP(SEND_ONLY_WITH_INVALIDATE):
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto no_immediate_data;
+ if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
+ goto send_last_inv;
+ fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ goto send_last;
+ case OP(SEND_LAST_WITH_INVALIDATE):
+send_last_inv:
+ rkey = be32_to_cpu(ohdr->u.ieth);
+ if (rvt_invalidate_rkey(qp, rkey))
+ goto no_immediate_data;
+ wc.ex.invalidate_rkey = rkey;
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ goto send_last;
+ case OP(RDMA_WRITE_LAST):
+ copy_last = rvt_is_user_qp(qp);
+ fallthrough;
+ case OP(SEND_LAST):
+no_immediate_data:
+ wc.wc_flags = 0;
+ wc.ex.imm_data = 0;
+send_last:
+ /* Check for invalid length. */
+ /* LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
+ goto nack_inv;
+ /* Don't count the CRC(and padding and LT byte for 16B). */
+ tlen -= (hdrsize + extra_bytes);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto nack_inv;
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
+ rvt_put_ss(&qp->r_sge);
+ qp->r_msn++;
+ if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ break;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
+ /*
+ * It seems that IB mandates the presence of an SL in a
+ * work completion only for the UD transport (see section
+ * 11.4.2 of IBTA Vol. 1).
+ *
+ * However, the way the SL is chosen below is consistent
+ * with the way that IB/qib works and is trying avoid
+ * introducing incompatibilities.
+ *
+ * See also OPA Vol. 1, section 9.7.6, and table 9-17.
+ */
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
+ /* zero fields that are N/A */
+ wc.vendor_err = 0;
+ wc.pkey_index = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ /* Signal completion event if the solicited bit is set. */
+ rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
+ break;
+
+ case OP(RDMA_WRITE_ONLY):
+ copy_last = rvt_is_user_qp(qp);
+ fallthrough;
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
+ /* consume RWQE */
+ reth = &ohdr->u.rc.reth;
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = get_ib_reth_vaddr(reth);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto nack_acc;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_FIRST))
+ goto send_middle;
+ else if (opcode == OP(RDMA_WRITE_ONLY))
+ goto no_immediate_data;
+ ret = rvt_get_rwqe(qp, true);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret) {
+ /* peer will send again */
+ rvt_put_ss(&qp->r_sge);
+ goto rnr_nak;
+ }
+ wc.ex.imm_data = ohdr->u.rc.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ goto send_last;
+
+ case OP(RDMA_READ_REQUEST): {
+ struct rvt_ack_entry *e;
+ u32 len;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_acked_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ release_rdma_sge_mr(e);
+ reth = &ohdr->u.rc.reth;
+ len = be32_to_cpu(reth->length);
+ if (len) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = get_ib_reth_vaddr(reth);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto nack_acc_unlck;
+ /*
+ * Update the next expected PSN. We add 1 later
+ * below, so only add the remainder here.
+ */
+ qp->r_psn += rvt_div_mtu(qp, len - 1);
+ } else {
+ e->rdma_sge.mr = NULL;
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = qp->r_psn;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+ qpriv->r_tid_alloc = qp->r_head_ack_queue;
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
+ u64 vaddr = get_ib_ateth_vaddr(ateth);
+ bool opfn = opcode == OP(COMPARE_SWAP) &&
+ vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
+ struct rvt_ack_entry *e;
+ atomic64_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ !opfn))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_acked_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ release_rdma_sge_mr(e);
+ /* Process OPFN special virtual address */
+ if (opfn) {
+ opfn_conn_response(qp, e, ateth);
+ goto ack;
+ }
+ if (unlikely(vaddr & (sizeof(u64) - 1)))
+ goto nack_inv_unlck;
+ rkey = be32_to_cpu(ateth->rkey);
+ /* Check rkey & NAK */
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ vaddr, rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
+ sdata = get_ib_ateth_swap(ateth);
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+ (u64)atomic64_add_return(sdata, maddr) - sdata :
+ (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
+ get_ib_ateth_compare(ateth),
+ sdata);
+ rvt_put_mr(qp->r_sge.sge.mr);
+ qp->r_sge.num_sge = 0;
+ack:
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = psn;
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+ qpriv->r_tid_alloc = qp->r_head_ack_queue;
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+ }
+
+ default:
+ /* NAK unknown opcodes. */
+ goto nack_inv;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+ if (psn & IB_BTH_REQ_ACK || fecn) {
+ if (packet->numpkt == 0 || fecn ||
+ qp->r_adefered >= HFI1_PSN_CREDIT) {
+ rc_cancel_ack(qp);
+ goto send_ack;
+ }
+ qp->r_adefered++;
+ rc_defered_ack(rcd, qp);
+ }
+ return;
+
+rnr_nak:
+ qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue RNR NAK for later */
+ rc_defered_ack(rcd, qp);
+ return;
+
+nack_op_err:
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ rc_defered_ack(rcd, qp);
+ return;
+
+nack_inv_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ rc_defered_ack(rcd, qp);
+ return;
+
+nack_acc_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_acc:
+ rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+send_ack:
+ hfi1_send_rc_ack(packet, fecn);
+}
+
+void hfi1_rc_hdrerr(
+ struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet *packet,
+ struct rvt_qp *qp)
+{
+ struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ int diff;
+ u32 opcode;
+ u32 psn;
+
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+ psn = ib_bth_get_psn(packet->ohdr);
+ opcode = ib_bth_get_opcode(packet->ohdr);
+
+ /* Only deal with RDMA Writes for now */
+ if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+ diff = delta_psn(psn, qp->r_psn);
+ if (!qp->r_nak_state && diff >= 0) {
+ ibp->rvp.n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence
+ * NAK until all packets
+ * in the receive queue have
+ * been processed.
+ * Otherwise, we end up
+ * propagating congestion.
+ */
+ rc_defered_ack(rcd, qp);
+ } /* Out of sequence NAK */
+ } /* QP Request NAKs */
+}
diff --git a/drivers/infiniband/hw/hfi1/rc.h b/drivers/infiniband/hw/hfi1/rc.h
new file mode 100644
index 000000000000..5ed5e85d5841
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/rc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+
+#ifndef HFI1_RC_H
+#define HFI1_RC_H
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
+{
+ unsigned int next;
+
+ next = n + 1;
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ qp->s_tail_ack_queue = next;
+ qp->s_acked_ack_queue = next;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+}
+
+static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
+ struct rvt_qp *qp)
+{
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_NAK;
+ rvt_get_qp(qp);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
+ u32 psn, u32 pmtu)
+{
+ u32 len;
+
+ len = delta_psn(psn, wqe->psn) * pmtu;
+ return rvt_restart_sge(ss, wqe, len);
+}
+
+static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
+{
+ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+}
+
+struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
+ u8 *prev_ack, bool *scheduled);
+int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
+ struct hfi1_ctxtdata *rcd);
+struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct hfi1_ibport *ibp);
+
+#endif /* HFI1_RC_H */
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
new file mode 100644
index 000000000000..aafa4e03b179
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#include <linux/spinlock.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "qp.h"
+#include "verbs_txreq.h"
+#include "trace.h"
+
+static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
+{
+ return (gid->global.interface_id == id &&
+ (gid->global.subnet_prefix == gid_prefix ||
+ gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
+}
+
+/*
+ *
+ * This should be called with the QP r_lock held.
+ *
+ * The s_lock will be acquired around the hfi1_migrate_qp() call.
+ */
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
+{
+ __be64 guid;
+ unsigned long flags;
+ struct rvt_qp *qp = packet->qp;
+ u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
+ u32 dlid = packet->dlid;
+ u32 slid = packet->slid;
+ u32 sl = packet->sl;
+ bool migrated = packet->migrated;
+ u16 pkey = packet->pkey;
+
+ if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
+ if (!packet->grh) {
+ if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
+ IB_AH_GRH) &&
+ (packet->etype != RHF_RCV_TYPE_BYPASS))
+ return 1;
+ } else {
+ const struct ib_global_route *grh;
+
+ if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
+ IB_AH_GRH))
+ return 1;
+ grh = rdma_ah_read_grh(&qp->alt_ah_attr);
+ guid = get_sguid(ibp, grh->sgid_index);
+ if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
+ guid))
+ return 1;
+ if (!gid_ok(
+ &packet->grh->sgid,
+ grh->dgid.global.subnet_prefix,
+ grh->dgid.global.interface_id))
+ return 1;
+ }
+ if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
+ sc5, slid))) {
+ hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
+ slid, dlid);
+ return 1;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
+ if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
+ ppd_from_ibp(ibp)->port !=
+ rdma_ah_get_port_num(&qp->alt_ah_attr))
+ return 1;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_migrate_qp(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ } else {
+ if (!packet->grh) {
+ if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH) &&
+ (packet->etype != RHF_RCV_TYPE_BYPASS))
+ return 1;
+ } else {
+ const struct ib_global_route *grh;
+
+ if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH))
+ return 1;
+ grh = rdma_ah_read_grh(&qp->remote_ah_attr);
+ guid = get_sguid(ibp, grh->sgid_index);
+ if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
+ guid))
+ return 1;
+ if (!gid_ok(
+ &packet->grh->sgid,
+ grh->dgid.global.subnet_prefix,
+ grh->dgid.global.interface_id))
+ return 1;
+ }
+ if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
+ sc5, slid))) {
+ hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
+ slid, dlid);
+ return 1;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 */
+ if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
+ ppd_from_ibp(ibp)->port != qp->port_num)
+ return 1;
+ if (qp->s_mig_state == IB_MIG_REARM && !migrated)
+ qp->s_mig_state = IB_MIG_ARMED;
+ }
+
+ return 0;
+}
+
+/**
+ * hfi1_make_grh - construct a GRH header
+ * @ibp: a pointer to the IB port
+ * @hdr: a pointer to the GRH header being constructed
+ * @grh: the global route address to send to
+ * @hwords: size of header after grh being sent in dwords
+ * @nwords: the number of 32 bit words of data being sent
+ *
+ * Return the size of the header in 32 bit words.
+ */
+u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
+ const struct ib_global_route *grh, u32 hwords, u32 nwords)
+{
+ hdr->version_tclass_flow =
+ cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
+ (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
+ (grh->flow_label << IB_GRH_FLOW_SHIFT));
+ hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ hdr->next_hdr = IB_GRH_NEXT_HDR;
+ hdr->hop_limit = grh->hop_limit;
+ /* The SGID is 32-bit aligned. */
+ hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
+ hdr->sgid.global.interface_id =
+ grh->sgid_index < HFI1_GUIDS_PER_PORT ?
+ get_sguid(ibp, grh->sgid_index) :
+ get_sguid(ibp, HFI1_PORT_GUID_INDEX);
+ hdr->dgid = grh->dgid;
+
+ /* GRH header size in 32-bit words. */
+ return sizeof(struct ib_grh) / sizeof(u32);
+}
+
+#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
+ hdr.ibh.u.oth.bth[2]) / 4)
+
+/**
+ * build_ahg - create ahg in s_ahg
+ * @qp: a pointer to QP
+ * @npsn: the next PSN for the request/response
+ *
+ * This routine handles the AHG by allocating an ahg entry and causing the
+ * copy of the first middle.
+ *
+ * Subsequent middles use the copied entry, editing the
+ * PSN with 1 or 2 edits.
+ */
+static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
+ clear_ahg(qp);
+ if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
+ /* first middle that needs copy */
+ if (qp->s_ahgidx < 0)
+ qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
+ if (qp->s_ahgidx >= 0) {
+ qp->s_ahgpsn = npsn;
+ priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
+ /* save to protect a change in another thread */
+ priv->s_ahg->ahgidx = qp->s_ahgidx;
+ qp->s_flags |= HFI1_S_AHG_VALID;
+ }
+ } else {
+ /* subsequent middle after valid */
+ if (qp->s_ahgidx >= 0) {
+ priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
+ priv->s_ahg->ahgidx = qp->s_ahgidx;
+ priv->s_ahg->ahgcount++;
+ priv->s_ahg->ahgdesc[0] =
+ sdma_build_ahg_descriptor(
+ (__force u16)cpu_to_be16((u16)npsn),
+ BTH2_OFFSET,
+ 16,
+ 16);
+ if ((npsn & 0xffff0000) !=
+ (qp->s_ahgpsn & 0xffff0000)) {
+ priv->s_ahg->ahgcount++;
+ priv->s_ahg->ahgdesc[1] =
+ sdma_build_ahg_descriptor(
+ (__force u16)cpu_to_be16(
+ (u16)(npsn >> 16)),
+ BTH2_OFFSET,
+ 0,
+ 16);
+ }
+ }
+ }
+}
+
+static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1, u32 bth2)
+{
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(bth1);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+}
+
+/**
+ * hfi1_make_ruc_header_16B - build a 16B header
+ * @qp: the queue pair
+ * @ohdr: a pointer to the destination header memory
+ * @bth0: bth0 passed in from the RC/UC builder
+ * @bth1: bth1 passed in from the RC/UC builder
+ * @bth2: bth2 passed in from the RC/UC builder
+ * @middle: non zero implies indicates ahg "could" be used
+ * @ps: the current packet state
+ *
+ * This routine may disarm ahg under these situations:
+ * - packet needs a GRH
+ * - BECN needed
+ * - migration state not IB_MIG_MIGRATED
+ */
+static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1, u32 bth2,
+ int middle,
+ struct hfi1_pkt_state *ps)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp = ps->ibp;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 slid;
+ u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+ u8 l4 = OPA_16B_L4_IB_LOCAL;
+ u8 extra_bytes = hfi1_get_16b_padding(
+ (ps->s_txreq->hdr_dwords << 2),
+ ps->s_txreq->s_cur_size);
+ u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
+ extra_bytes + SIZE_OF_LT) >> 2);
+ bool becn = false;
+
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
+ struct ib_grh *grh;
+ struct ib_global_route *grd =
+ rdma_ah_retrieve_grh(&qp->remote_ah_attr);
+ /*
+ * Ensure OPA GIDs are transformed to IB gids
+ * before creating the GRH.
+ */
+ if (grd->sgid_index == OPA_GID_INDEX)
+ grd->sgid_index = 0;
+ grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ ps->s_txreq->hdr_dwords +=
+ hfi1_make_grh(ibp, grh, grd,
+ ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
+ nwords);
+ middle = 0;
+ }
+
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth1 |= OPA_BTH_MIG_REQ;
+ else
+ middle = 0;
+
+ if (qp->s_flags & RVT_S_ECN) {
+ qp->s_flags &= ~RVT_S_ECN;
+ /* we recently received a FECN, so return a BECN */
+ becn = true;
+ middle = 0;
+ }
+ if (middle)
+ build_ahg(qp, bth2);
+ else
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
+
+ bth0 |= pkey;
+ bth0 |= extra_bytes << 20;
+ hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
+
+ if (!ppd->lid)
+ slid = be32_to_cpu(OPA_LID_PERMISSIVE);
+ else
+ slid = ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1));
+
+ hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
+ slid,
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
+ 16B),
+ (ps->s_txreq->hdr_dwords + nwords) >> 1,
+ pkey, becn, 0, l4, priv->s_sc);
+}
+
+/**
+ * hfi1_make_ruc_header_9B - build a 9B header
+ * @qp: the queue pair
+ * @ohdr: a pointer to the destination header memory
+ * @bth0: bth0 passed in from the RC/UC builder
+ * @bth1: bth1 passed in from the RC/UC builder
+ * @bth2: bth2 passed in from the RC/UC builder
+ * @middle: non zero implies indicates ahg "could" be used
+ * @ps: the current packet state
+ *
+ * This routine may disarm ahg under these situations:
+ * - packet needs a GRH
+ * - BECN needed
+ * - migration state not IB_MIG_MIGRATED
+ */
+static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1, u32 bth2,
+ int middle,
+ struct hfi1_pkt_state *ps)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp = ps->ibp;
+ u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+ u16 lrh0 = HFI1_LRH_BTH;
+ u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
+ u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
+ extra_bytes) >> 2);
+
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
+ struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
+
+ lrh0 = HFI1_LRH_GRH;
+ ps->s_txreq->hdr_dwords +=
+ hfi1_make_grh(ibp, grh,
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
+ nwords);
+ middle = 0;
+ }
+ lrh0 |= (priv->s_sc & 0xf) << 12 |
+ (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
+
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ else
+ middle = 0;
+
+ if (qp->s_flags & RVT_S_ECN) {
+ qp->s_flags &= ~RVT_S_ECN;
+ /* we recently received a FECN, so return a BECN */
+ bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
+ middle = 0;
+ }
+ if (middle)
+ build_ahg(qp, bth2);
+ else
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
+
+ bth0 |= pkey;
+ bth0 |= extra_bytes << 20;
+ hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
+ hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
+ lrh0,
+ ps->s_txreq->hdr_dwords + nwords,
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
+ ppd_from_ibp(ibp)->lid |
+ rdma_ah_get_path_bits(&qp->remote_ah_attr));
+}
+
+typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps);
+
+/* We support only two types - 9B and 16B for now */
+static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
+ [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
+};
+
+void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ /*
+ * reset s_ahg/AHG fields
+ *
+ * This insures that the ahgentry/ahgcount
+ * are at a non-AHG default to protect
+ * build_verbs_tx_desc() from using
+ * an include ahgidx.
+ *
+ * build_ahg() will modify as appropriate
+ * to use the AHG feature.
+ */
+ priv->s_ahg->tx_flags = 0;
+ priv->s_ahg->ahgcount = 0;
+ priv->s_ahg->ahgidx = 0;
+
+ /* Make the appropriate header */
+ hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
+ ps);
+}
+
+/* when sending, force a reschedule every one of these periods */
+#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
+
+/**
+ * hfi1_schedule_send_yield - test for a yield required for QP
+ * send engine
+ * @qp: a pointer to QP
+ * @ps: a pointer to a structure with commonly lookup values for
+ * the send engine progress
+ * @tid: true if it is the tid leg
+ *
+ * This routine checks if the time slice for the QP has expired
+ * for RC QPs, if so an additional work entry is queued. At this
+ * point, other QPs have an opportunity to be scheduled. It
+ * returns true if a yield is required, otherwise, false
+ * is returned.
+ */
+bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ bool tid)
+{
+ ps->pkts_sent = true;
+
+ if (unlikely(time_after(jiffies, ps->timeout))) {
+ if (!ps->in_thread ||
+ workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
+ spin_lock_irqsave(&qp->s_lock, ps->flags);
+ if (!tid) {
+ qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_schedule_send(qp);
+ } else {
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (priv->s_flags &
+ HFI1_S_TID_BUSY_SET) {
+ qp->s_flags &= ~RVT_S_BUSY;
+ priv->s_flags &=
+ ~(HFI1_S_TID_BUSY_SET |
+ RVT_S_BUSY);
+ } else {
+ priv->s_flags &= ~RVT_S_BUSY;
+ }
+ hfi1_schedule_tid_send(qp);
+ }
+
+ spin_unlock_irqrestore(&qp->s_lock, ps->flags);
+ this_cpu_inc(*ps->ppd->dd->send_schedule);
+ trace_hfi1_rc_expired_time_slice(qp, true);
+ return true;
+ }
+
+ cond_resched();
+ this_cpu_inc(*ps->ppd->dd->send_schedule);
+ ps->timeout = jiffies + ps->timeout_int;
+ }
+
+ trace_hfi1_rc_expired_time_slice(qp, false);
+ return false;
+}
+
+void hfi1_do_send_from_rvt(struct rvt_qp *qp)
+{
+ hfi1_do_send(qp, false);
+}
+
+void _hfi1_do_send(struct work_struct *work)
+{
+ struct iowait_work *w = container_of(work, struct iowait_work, iowork);
+ struct rvt_qp *qp = iowait_to_qp(w->iow);
+
+ hfi1_do_send(qp, true);
+}
+
+/**
+ * hfi1_do_send - perform a send on a QP
+ * @qp: a pointer to the QP
+ * @in_thread: true if in a workqueue thread
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted. Only allow one CPU to send a packet per QP.
+ * Otherwise, two threads could send packets out of order.
+ */
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
+{
+ struct hfi1_pkt_state ps;
+ struct hfi1_qp_priv *priv = qp->priv;
+ int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+
+ ps.dev = to_idev(qp->ibqp.device);
+ ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ps.ppd = ppd_from_ibp(ps.ibp);
+ ps.in_thread = in_thread;
+ ps.wait = iowait_get_ib_work(&priv->s_iowait);
+
+ trace_hfi1_rc_do_send(qp, in_thread);
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
+ ~((1 << ps.ppd->lmc) - 1)) ==
+ ps.ppd->lid)) {
+ rvt_ruc_loopback(qp);
+ return;
+ }
+ make_req = hfi1_make_rc_req;
+ ps.timeout_int = qp->timeout_jiffies;
+ break;
+ case IB_QPT_UC:
+ if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
+ ~((1 << ps.ppd->lmc) - 1)) ==
+ ps.ppd->lid)) {
+ rvt_ruc_loopback(qp);
+ return;
+ }
+ make_req = hfi1_make_uc_req;
+ ps.timeout_int = SEND_RESCHED_TIMEOUT;
+ break;
+ default:
+ make_req = hfi1_make_ud_req;
+ ps.timeout_int = SEND_RESCHED_TIMEOUT;
+ }
+
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
+
+ /* Return if we are already busy processing a work request. */
+ if (!hfi1_send_ok(qp)) {
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+ return;
+ }
+
+ qp->s_flags |= RVT_S_BUSY;
+
+ ps.timeout_int = ps.timeout_int / 8;
+ ps.timeout = jiffies + ps.timeout_int;
+ ps.cpu = priv->s_sde ? priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(ps.ppd->dd->node));
+ ps.pkts_sent = false;
+
+ /* insure a pre-built packet is handled */
+ ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
+ do {
+ /* Check for a constructed packet to be sent. */
+ if (ps.s_txreq) {
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET)
+ qp->s_flags |= RVT_S_BUSY;
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+ /*
+ * If the packet cannot be sent now, return and
+ * the send engine will be woken up later.
+ */
+ if (hfi1_verbs_send(qp, &ps))
+ return;
+
+ /* allow other tasks to run */
+ if (hfi1_schedule_send_yield(qp, &ps, false))
+ return;
+
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
+ }
+ } while (make_req(qp, &ps));
+ iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
new file mode 100644
index 000000000000..5cfa4f8fbf3d
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -0,0 +1,3364 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
+#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "qp.h"
+#include "sdma.h"
+#include "iowait.h"
+#include "trace.h"
+
+/* must be a power of 2 >= 64 <= 32768 */
+#define SDMA_DESCQ_CNT 2048
+#define SDMA_DESC_INTR 64
+#define INVALID_TAIL 0xffff
+#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
+
+static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
+module_param(sdma_descq_cnt, uint, S_IRUGO);
+MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
+
+static uint sdma_idle_cnt = 250;
+module_param(sdma_idle_cnt, uint, S_IRUGO);
+MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
+
+uint mod_num_sdma;
+module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
+MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
+
+static uint sdma_desct_intr = SDMA_DESC_INTR;
+module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
+
+#define SDMA_WAIT_BATCH_SIZE 20
+/* max wait time for a SDMA engine to indicate it has halted */
+#define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
+/* all SDMA engine errors that cause a halt */
+
+#define SD(name) SEND_DMA_##name
+#define ALL_SDMA_ENG_HALT_ERRS \
+ (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
+
+/* sdma_sendctrl operations */
+#define SDMA_SENDCTRL_OP_ENABLE BIT(0)
+#define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
+#define SDMA_SENDCTRL_OP_HALT BIT(2)
+#define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
+
+/* handle long defines */
+#define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
+SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
+#define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
+SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
+
+static const char * const sdma_state_names[] = {
+ [sdma_state_s00_hw_down] = "s00_HwDown",
+ [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
+ [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
+ [sdma_state_s20_idle] = "s20_Idle",
+ [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
+ [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
+ [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
+ [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
+ [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
+ [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
+ [sdma_state_s99_running] = "s99_Running",
+};
+
+#ifdef CONFIG_SDMA_VERBOSITY
+static const char * const sdma_event_names[] = {
+ [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
+ [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
+ [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
+ [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
+ [sdma_event_e30_go_running] = "e30_GoRunning",
+ [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
+ [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
+ [sdma_event_e60_hw_halted] = "e60_HwHalted",
+ [sdma_event_e70_go_idle] = "e70_GoIdle",
+ [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
+ [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
+ [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
+ [sdma_event_e85_link_down] = "e85_LinkDown",
+ [sdma_event_e90_sw_halted] = "e90_SwHalted",
+};
+#endif
+
+static const struct sdma_set_state_action sdma_action_table[] = {
+ [sdma_state_s00_hw_down] = {
+ .go_s99_running_tofalse = 1,
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s10_hw_start_up_halt_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 1,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s15_hw_start_up_clean_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_cleanup = 1,
+ },
+ [sdma_state_s20_idle] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 1,
+ },
+ [sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s60_idle_halt_wait] = {
+ .go_s99_running_tofalse = 1,
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 1,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s80_hw_freeze] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s82_freeze_sw_clean] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+#define SDMA_TAIL_UPDATE_THRESH 0x1F
+
+/* declare all statics here rather than keep sorting */
+static void sdma_complete(struct kref *);
+static void sdma_finalput(struct sdma_state *);
+static void sdma_get(struct sdma_state *);
+static void sdma_hw_clean_up_task(struct tasklet_struct *);
+static void sdma_put(struct sdma_state *);
+static void sdma_set_state(struct sdma_engine *, enum sdma_states);
+static void sdma_start_hw_clean_up(struct sdma_engine *);
+static void sdma_sw_clean_up_task(struct tasklet_struct *);
+static void sdma_sendctrl(struct sdma_engine *, unsigned);
+static void init_sdma_regs(struct sdma_engine *, u32, uint);
+static void sdma_process_event(
+ struct sdma_engine *sde,
+ enum sdma_events event);
+static void __sdma_process_event(
+ struct sdma_engine *sde,
+ enum sdma_events event);
+static void dump_sdma_state(struct sdma_engine *sde);
+static void sdma_make_progress(struct sdma_engine *sde, u64 status);
+static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
+static void sdma_flush_descq(struct sdma_engine *sde);
+
+/**
+ * sdma_state_name() - return state string from enum
+ * @state: state
+ */
+static const char *sdma_state_name(enum sdma_states state)
+{
+ return sdma_state_names[state];
+}
+
+static void sdma_get(struct sdma_state *ss)
+{
+ kref_get(&ss->kref);
+}
+
+static void sdma_complete(struct kref *kref)
+{
+ struct sdma_state *ss =
+ container_of(kref, struct sdma_state, kref);
+
+ complete(&ss->comp);
+}
+
+static void sdma_put(struct sdma_state *ss)
+{
+ kref_put(&ss->kref, sdma_complete);
+}
+
+static void sdma_finalput(struct sdma_state *ss)
+{
+ sdma_put(ss);
+ wait_for_completion(&ss->comp);
+}
+
+static inline void write_sde_csr(
+ struct sdma_engine *sde,
+ u32 offset0,
+ u64 value)
+{
+ write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
+}
+
+static inline u64 read_sde_csr(
+ struct sdma_engine *sde,
+ u32 offset0)
+{
+ return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
+}
+
+/*
+ * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
+ * sdma engine 'sde' to drop to 0.
+ */
+static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
+ int pause)
+{
+ u64 off = 8 * sde->this_idx;
+ struct hfi1_devdata *dd = sde->dd;
+ int lcnt = 0;
+ u64 reg_prev;
+ u64 reg = 0;
+
+ while (1) {
+ reg_prev = reg;
+ reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
+
+ reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
+ reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
+ if (reg == 0)
+ break;
+ /* counter is reest if accupancy count changes */
+ if (reg != reg_prev)
+ lcnt = 0;
+ if (lcnt++ > 500) {
+ /* timed out - bounce the link */
+ dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
+ __func__, sde->this_idx, (u32)reg);
+ queue_work(dd->pport->link_wq,
+ &dd->pport->link_bounce_work);
+ break;
+ }
+ udelay(1);
+ }
+}
+
+/*
+ * sdma_wait() - wait for packet egress to complete for all SDMA engines,
+ * and pause for credit return.
+ */
+void sdma_wait(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ struct sdma_engine *sde = &dd->per_sdma[i];
+
+ sdma_wait_for_packet_egress(sde, 0);
+ }
+}
+
+static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
+{
+ u64 reg;
+
+ if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
+ return;
+ reg = cnt;
+ reg &= SD(DESC_CNT_CNT_MASK);
+ reg <<= SD(DESC_CNT_CNT_SHIFT);
+ write_sde_csr(sde, SD(DESC_CNT), reg);
+}
+
+static inline void complete_tx(struct sdma_engine *sde,
+ struct sdma_txreq *tx,
+ int res)
+{
+ /* protect against complete modifying */
+ struct iowait *wait = tx->wait;
+ callback_t complete = tx->complete;
+
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ trace_hfi1_sdma_out_sn(sde, tx->sn);
+ if (WARN_ON_ONCE(sde->head_sn != tx->sn))
+ dd_dev_err(sde->dd, "expected %llu got %llu\n",
+ sde->head_sn, tx->sn);
+ sde->head_sn++;
+#endif
+ __sdma_txclean(sde->dd, tx);
+ if (complete)
+ (*complete)(tx, res);
+ if (iowait_sdma_dec(wait))
+ iowait_drain_wakeup(wait);
+}
+
+/*
+ * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
+ *
+ * Depending on timing there can be txreqs in two places:
+ * - in the descq ring
+ * - in the flush list
+ *
+ * To avoid ordering issues the descq ring needs to be flushed
+ * first followed by the flush list.
+ *
+ * This routine is called from two places
+ * - From a work queue item
+ * - Directly from the state machine just before setting the
+ * state to running
+ *
+ * Must be called with head_lock held
+ *
+ */
+static void sdma_flush(struct sdma_engine *sde)
+{
+ struct sdma_txreq *txp, *txp_next;
+ LIST_HEAD(flushlist);
+ unsigned long flags;
+ uint seq;
+
+ /* flush from head to tail */
+ sdma_flush_descq(sde);
+ spin_lock_irqsave(&sde->flushlist_lock, flags);
+ /* copy flush list */
+ list_splice_init(&sde->flushlist, &flushlist);
+ spin_unlock_irqrestore(&sde->flushlist_lock, flags);
+ /* flush from flush list */
+ list_for_each_entry_safe(txp, txp_next, &flushlist, list)
+ complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
+ /* wakeup QPs orphaned on the dmawait list */
+ do {
+ struct iowait *w, *nw;
+
+ seq = read_seqbegin(&sde->waitlock);
+ if (!list_empty(&sde->dmawait)) {
+ write_seqlock(&sde->waitlock);
+ list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
+ if (w->wakeup) {
+ w->wakeup(w, SDMA_AVAIL_REASON);
+ list_del_init(&w->list);
+ }
+ }
+ write_sequnlock(&sde->waitlock);
+ }
+ } while (read_seqretry(&sde->waitlock, seq));
+}
+
+/*
+ * Fields a work request for flushing the descq ring
+ * and the flush list
+ *
+ * If the engine has been brought to running during
+ * the scheduling delay, the flush is ignored, assuming
+ * that the process of bringing the engine to running
+ * would have done this flush prior to going to running.
+ *
+ */
+static void sdma_field_flush(struct work_struct *work)
+{
+ unsigned long flags;
+ struct sdma_engine *sde =
+ container_of(work, struct sdma_engine, flush_worker);
+
+ write_seqlock_irqsave(&sde->head_lock, flags);
+ if (!__sdma_running(sde))
+ sdma_flush(sde);
+ write_sequnlock_irqrestore(&sde->head_lock, flags);
+}
+
+static void sdma_err_halt_wait(struct work_struct *work)
+{
+ struct sdma_engine *sde = container_of(work, struct sdma_engine,
+ err_halt_worker);
+ u64 statuscsr;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
+ while (1) {
+ statuscsr = read_sde_csr(sde, SD(STATUS));
+ statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
+ if (statuscsr)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(sde->dd,
+ "SDMA engine %d - timeout waiting for engine to halt\n",
+ sde->this_idx);
+ /*
+ * Continue anyway. This could happen if there was
+ * an uncorrectable error in the wrong spot.
+ */
+ break;
+ }
+ usleep_range(80, 120);
+ }
+
+ sdma_process_event(sde, sdma_event_e15_hw_halt_done);
+}
+
+static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
+{
+ if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
+ unsigned index;
+ struct hfi1_devdata *dd = sde->dd;
+
+ for (index = 0; index < dd->num_sdma; index++) {
+ struct sdma_engine *curr_sdma = &dd->per_sdma[index];
+
+ if (curr_sdma != sde)
+ curr_sdma->progress_check_head =
+ curr_sdma->descq_head;
+ }
+ dd_dev_err(sde->dd,
+ "SDMA engine %d - check scheduled\n",
+ sde->this_idx);
+ mod_timer(&sde->err_progress_check_timer, jiffies + 10);
+ }
+}
+
+static void sdma_err_progress_check(struct timer_list *t)
+{
+ unsigned index;
+ struct sdma_engine *sde = timer_container_of(sde, t,
+ err_progress_check_timer);
+
+ dd_dev_err(sde->dd, "SDE progress check event\n");
+ for (index = 0; index < sde->dd->num_sdma; index++) {
+ struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
+ unsigned long flags;
+
+ /* check progress on each engine except the current one */
+ if (curr_sde == sde)
+ continue;
+ /*
+ * We must lock interrupts when acquiring sde->lock,
+ * to avoid a deadlock if interrupt triggers and spins on
+ * the same lock on same CPU
+ */
+ spin_lock_irqsave(&curr_sde->tail_lock, flags);
+ write_seqlock(&curr_sde->head_lock);
+
+ /* skip non-running queues */
+ if (curr_sde->state.current_state != sdma_state_s99_running) {
+ write_sequnlock(&curr_sde->head_lock);
+ spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
+ continue;
+ }
+
+ if ((curr_sde->descq_head != curr_sde->descq_tail) &&
+ (curr_sde->descq_head ==
+ curr_sde->progress_check_head))
+ __sdma_process_event(curr_sde,
+ sdma_event_e90_sw_halted);
+ write_sequnlock(&curr_sde->head_lock);
+ spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
+ }
+ schedule_work(&sde->err_halt_worker);
+}
+
+static void sdma_hw_clean_up_task(struct tasklet_struct *t)
+{
+ struct sdma_engine *sde = from_tasklet(sde, t,
+ sdma_hw_clean_up_task);
+ u64 statuscsr;
+
+ while (1) {
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__,
+ __func__);
+#endif
+ statuscsr = read_sde_csr(sde, SD(STATUS));
+ statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
+ if (statuscsr)
+ break;
+ udelay(10);
+ }
+
+ sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
+}
+
+static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
+{
+ return sde->tx_ring[sde->tx_head & sde->sdma_mask];
+}
+
+/*
+ * flush ring for recovery
+ */
+static void sdma_flush_descq(struct sdma_engine *sde)
+{
+ u16 head, tail;
+ int progress = 0;
+ struct sdma_txreq *txp = get_txhead(sde);
+
+ /* The reason for some of the complexity of this code is that
+ * not all descriptors have corresponding txps. So, we have to
+ * be able to skip over descs until we wander into the range of
+ * the next txp on the list.
+ */
+ head = sde->descq_head & sde->sdma_mask;
+ tail = sde->descq_tail & sde->sdma_mask;
+ while (head != tail) {
+ /* advance head, wrap if needed */
+ head = ++sde->descq_head & sde->sdma_mask;
+ /* if now past this txp's descs, do the callback */
+ if (txp && txp->next_descq_idx == head) {
+ /* remove from list */
+ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
+ complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
+ trace_hfi1_sdma_progress(sde, head, tail, txp);
+ txp = get_txhead(sde);
+ }
+ progress++;
+ }
+ if (progress)
+ sdma_desc_avail(sde, sdma_descq_freecnt(sde));
+}
+
+static void sdma_sw_clean_up_task(struct tasklet_struct *t)
+{
+ struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sde->tail_lock, flags);
+ write_seqlock(&sde->head_lock);
+
+ /*
+ * At this point, the following should always be true:
+ * - We are halted, so no more descriptors are getting retired.
+ * - We are not running, so no one is submitting new work.
+ * - Only we can send the e40_sw_cleaned, so we can't start
+ * running again until we say so. So, the active list and
+ * descq are ours to play with.
+ */
+
+ /*
+ * In the error clean up sequence, software clean must be called
+ * before the hardware clean so we can use the hardware head in
+ * the progress routine. A hardware clean or SPC unfreeze will
+ * reset the hardware head.
+ *
+ * Process all retired requests. The progress routine will use the
+ * latest physical hardware head - we are not running so speed does
+ * not matter.
+ */
+ sdma_make_progress(sde, 0);
+
+ sdma_flush(sde);
+
+ /*
+ * Reset our notion of head and tail.
+ * Note that the HW registers have been reset via an earlier
+ * clean up.
+ */
+ sde->descq_tail = 0;
+ sde->descq_head = 0;
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ *sde->head_dma = 0;
+
+ __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
+
+ write_sequnlock(&sde->head_lock);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+}
+
+static void sdma_sw_tear_down(struct sdma_engine *sde)
+{
+ struct sdma_state *ss = &sde->state;
+
+ /* Releasing this reference means the state machine has stopped. */
+ sdma_put(ss);
+
+ /* stop waiting for all unfreeze events to complete */
+ atomic_set(&sde->dd->sdma_unfreeze_count, -1);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+}
+
+static void sdma_start_hw_clean_up(struct sdma_engine *sde)
+{
+ tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
+}
+
+static void sdma_set_state(struct sdma_engine *sde,
+ enum sdma_states next_state)
+{
+ struct sdma_state *ss = &sde->state;
+ const struct sdma_set_state_action *action = sdma_action_table;
+ unsigned op = 0;
+
+ trace_hfi1_sdma_state(
+ sde,
+ sdma_state_names[ss->current_state],
+ sdma_state_names[next_state]);
+
+ /* debugging bookkeeping */
+ ss->previous_state = ss->current_state;
+ ss->previous_op = ss->current_op;
+ ss->current_state = next_state;
+
+ if (ss->previous_state != sdma_state_s99_running &&
+ next_state == sdma_state_s99_running)
+ sdma_flush(sde);
+
+ if (action[next_state].op_enable)
+ op |= SDMA_SENDCTRL_OP_ENABLE;
+
+ if (action[next_state].op_intenable)
+ op |= SDMA_SENDCTRL_OP_INTENABLE;
+
+ if (action[next_state].op_halt)
+ op |= SDMA_SENDCTRL_OP_HALT;
+
+ if (action[next_state].op_cleanup)
+ op |= SDMA_SENDCTRL_OP_CLEANUP;
+
+ if (action[next_state].go_s99_running_tofalse)
+ ss->go_s99_running = 0;
+
+ if (action[next_state].go_s99_running_totrue)
+ ss->go_s99_running = 1;
+
+ ss->current_op = op;
+ sdma_sendctrl(sde, ss->current_op);
+}
+
+/**
+ * sdma_get_descq_cnt() - called when device probed
+ *
+ * Return a validated descq count.
+ *
+ * This is currently only used in the verbs initialization to build the tx
+ * list.
+ *
+ * This will probably be deleted in favor of a more scalable approach to
+ * alloc tx's.
+ *
+ */
+u16 sdma_get_descq_cnt(void)
+{
+ u16 count = sdma_descq_cnt;
+
+ if (!count)
+ return SDMA_DESCQ_CNT;
+ /* count must be a power of 2 greater than 64 and less than
+ * 32768. Otherwise return default.
+ */
+ if (!is_power_of_2(count))
+ return SDMA_DESCQ_CNT;
+ if (count < 64 || count > 32768)
+ return SDMA_DESCQ_CNT;
+ return count;
+}
+
+/**
+ * sdma_engine_get_vl() - return vl for a given sdma engine
+ * @sde: sdma engine
+ *
+ * This function returns the vl mapped to a given engine, or an error if
+ * the mapping can't be found. The mapping fields are protected by RCU.
+ */
+int sdma_engine_get_vl(struct sdma_engine *sde)
+{
+ struct hfi1_devdata *dd = sde->dd;
+ struct sdma_vl_map *m;
+ u8 vl;
+
+ if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
+ return -EINVAL;
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->sdma_map);
+ if (unlikely(!m)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ vl = m->engine_to_vl[sde->this_idx];
+ rcu_read_unlock();
+
+ return vl;
+}
+
+/**
+ * sdma_select_engine_vl() - select sdma engine
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @vl: this vl
+ *
+ *
+ * This function returns an engine based on the selector and a vl. The
+ * mapping fields are protected by RCU.
+ */
+struct sdma_engine *sdma_select_engine_vl(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 vl)
+{
+ struct sdma_vl_map *m;
+ struct sdma_map_elem *e;
+ struct sdma_engine *rval;
+
+ /* NOTE This should only happen if SC->VL changed after the initial
+ * checks on the QP/AH
+ * Default will return engine 0 below
+ */
+ if (vl >= num_vls) {
+ rval = NULL;
+ goto done;
+ }
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->sdma_map);
+ if (unlikely(!m)) {
+ rcu_read_unlock();
+ return &dd->per_sdma[0];
+ }
+ e = m->map[vl & m->mask];
+ rval = e->sde[selector & e->mask];
+ rcu_read_unlock();
+
+done:
+ rval = !rval ? &dd->per_sdma[0] : rval;
+ trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
+ return rval;
+}
+
+/**
+ * sdma_select_engine_sc() - select sdma engine
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @sc5: the 5 bit sc
+ *
+ *
+ * This function returns an engine based on the selector and an sc.
+ */
+struct sdma_engine *sdma_select_engine_sc(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 sc5)
+{
+ u8 vl = sc_to_vlt(dd, sc5);
+
+ return sdma_select_engine_vl(dd, selector, vl);
+}
+
+struct sdma_rht_map_elem {
+ u32 mask;
+ u8 ctr;
+ struct sdma_engine *sde[];
+};
+
+struct sdma_rht_node {
+ unsigned long cpu_id;
+ struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
+ struct rhash_head node;
+};
+
+#define NR_CPUS_HINT 192
+
+static const struct rhashtable_params sdma_rht_params = {
+ .nelem_hint = NR_CPUS_HINT,
+ .head_offset = offsetof(struct sdma_rht_node, node),
+ .key_offset = offsetof(struct sdma_rht_node, cpu_id),
+ .key_len = sizeof_field(struct sdma_rht_node, cpu_id),
+ .max_size = NR_CPUS,
+ .min_size = 8,
+ .automatic_shrinking = true,
+};
+
+/*
+ * sdma_select_user_engine() - select sdma engine based on user setup
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @vl: this vl
+ *
+ * This function returns an sdma engine for a user sdma request.
+ * User defined sdma engine affinity setting is honored when applicable,
+ * otherwise system default sdma engine mapping is used. To ensure correct
+ * ordering, the mapping from <selector, vl> to sde must remain unchanged.
+ */
+struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
+ u32 selector, u8 vl)
+{
+ struct sdma_rht_node *rht_node;
+ struct sdma_engine *sde = NULL;
+ unsigned long cpu_id;
+
+ /*
+ * To ensure that always the same sdma engine(s) will be
+ * selected make sure the process is pinned to this CPU only.
+ */
+ if (current->nr_cpus_allowed != 1)
+ goto out;
+
+ rcu_read_lock();
+ cpu_id = smp_processor_id();
+ rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
+ sdma_rht_params);
+
+ if (rht_node && rht_node->map[vl]) {
+ struct sdma_rht_map_elem *map = rht_node->map[vl];
+
+ sde = map->sde[selector & map->mask];
+ }
+ rcu_read_unlock();
+
+ if (sde)
+ return sde;
+
+out:
+ return sdma_select_engine_vl(dd, selector, vl);
+}
+
+static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
+{
+ int i;
+
+ for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
+ map->sde[map->ctr + i] = map->sde[i];
+}
+
+static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
+ struct sdma_engine *sde)
+{
+ unsigned int i, pow;
+
+ /* only need to check the first ctr entries for a match */
+ for (i = 0; i < map->ctr; i++) {
+ if (map->sde[i] == sde) {
+ memmove(&map->sde[i], &map->sde[i + 1],
+ (map->ctr - i - 1) * sizeof(map->sde[0]));
+ map->ctr--;
+ pow = roundup_pow_of_two(map->ctr ? : 1);
+ map->mask = pow - 1;
+ sdma_populate_sde_map(map);
+ break;
+ }
+ }
+}
+
+/*
+ * Prevents concurrent reads and writes of the sdma engine cpu_mask
+ */
+static DEFINE_MUTEX(process_to_sde_mutex);
+
+ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
+ size_t count)
+{
+ struct hfi1_devdata *dd = sde->dd;
+ cpumask_var_t mask, new_mask;
+ unsigned long cpu;
+ int ret, vl, sz;
+ struct sdma_rht_node *rht_node;
+
+ vl = sdma_engine_get_vl(sde);
+ if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
+ return -EINVAL;
+
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
+ if (!ret) {
+ free_cpumask_var(mask);
+ return -ENOMEM;
+ }
+ ret = cpulist_parse(buf, mask);
+ if (ret)
+ goto out_free;
+
+ if (!cpumask_subset(mask, cpu_online_mask)) {
+ dd_dev_warn(sde->dd, "Invalid CPU mask\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ sz = sizeof(struct sdma_rht_map_elem) +
+ (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
+
+ mutex_lock(&process_to_sde_mutex);
+
+ for_each_cpu(cpu, mask) {
+ /* Check if we have this already mapped */
+ if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
+ cpumask_set_cpu(cpu, new_mask);
+ continue;
+ }
+
+ rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
+ sdma_rht_params);
+ if (!rht_node) {
+ rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
+ if (!rht_node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
+ if (!rht_node->map[vl]) {
+ kfree(rht_node);
+ ret = -ENOMEM;
+ goto out;
+ }
+ rht_node->cpu_id = cpu;
+ rht_node->map[vl]->mask = 0;
+ rht_node->map[vl]->ctr = 1;
+ rht_node->map[vl]->sde[0] = sde;
+
+ ret = rhashtable_insert_fast(dd->sdma_rht,
+ &rht_node->node,
+ sdma_rht_params);
+ if (ret) {
+ kfree(rht_node->map[vl]);
+ kfree(rht_node);
+ dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
+ cpu);
+ goto out;
+ }
+
+ } else {
+ int ctr, pow;
+
+ /* Add new user mappings */
+ if (!rht_node->map[vl])
+ rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
+
+ if (!rht_node->map[vl]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ rht_node->map[vl]->ctr++;
+ ctr = rht_node->map[vl]->ctr;
+ rht_node->map[vl]->sde[ctr - 1] = sde;
+ pow = roundup_pow_of_two(ctr);
+ rht_node->map[vl]->mask = pow - 1;
+
+ /* Populate the sde map table */
+ sdma_populate_sde_map(rht_node->map[vl]);
+ }
+ cpumask_set_cpu(cpu, new_mask);
+ }
+
+ /* Clean up old mappings */
+ for_each_online_cpu(cpu) {
+ struct sdma_rht_node *rht_node;
+
+ /* Don't cleanup sdes that are set in the new mask */
+ if (cpumask_test_cpu(cpu, mask))
+ continue;
+
+ rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
+ sdma_rht_params);
+ if (rht_node) {
+ bool empty = true;
+ int i;
+
+ /* Remove mappings for old sde */
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
+ if (rht_node->map[i])
+ sdma_cleanup_sde_map(rht_node->map[i],
+ sde);
+
+ /* Free empty hash table entries */
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
+ if (!rht_node->map[i])
+ continue;
+
+ if (rht_node->map[i]->ctr) {
+ empty = false;
+ break;
+ }
+ }
+
+ if (empty) {
+ ret = rhashtable_remove_fast(dd->sdma_rht,
+ &rht_node->node,
+ sdma_rht_params);
+ WARN_ON(ret);
+
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
+ kfree(rht_node->map[i]);
+
+ kfree(rht_node);
+ }
+ }
+ }
+
+ cpumask_copy(&sde->cpu_mask, new_mask);
+out:
+ mutex_unlock(&process_to_sde_mutex);
+out_free:
+ free_cpumask_var(mask);
+ free_cpumask_var(new_mask);
+ return ret ? : strnlen(buf, PAGE_SIZE);
+}
+
+ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
+{
+ mutex_lock(&process_to_sde_mutex);
+ if (cpumask_empty(&sde->cpu_mask))
+ snprintf(buf, PAGE_SIZE, "%s\n", "empty");
+ else
+ cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
+ mutex_unlock(&process_to_sde_mutex);
+ return strnlen(buf, PAGE_SIZE);
+}
+
+static void sdma_rht_free(void *ptr, void *arg)
+{
+ struct sdma_rht_node *rht_node = ptr;
+ int i;
+
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
+ kfree(rht_node->map[i]);
+
+ kfree(rht_node);
+}
+
+/**
+ * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
+ * @s: seq file
+ * @dd: hfi1_devdata
+ * @cpuid: cpu id
+ *
+ * This routine dumps the process to sde mappings per cpu
+ */
+void sdma_seqfile_dump_cpu_list(struct seq_file *s,
+ struct hfi1_devdata *dd,
+ unsigned long cpuid)
+{
+ struct sdma_rht_node *rht_node;
+ int i, j;
+
+ rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
+ sdma_rht_params);
+ if (!rht_node)
+ return;
+
+ seq_printf(s, "cpu%3lu: ", cpuid);
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
+ if (!rht_node->map[i] || !rht_node->map[i]->ctr)
+ continue;
+
+ seq_printf(s, " vl%d: [", i);
+
+ for (j = 0; j < rht_node->map[i]->ctr; j++) {
+ if (!rht_node->map[i]->sde[j])
+ continue;
+
+ if (j > 0)
+ seq_puts(s, ",");
+
+ seq_printf(s, " sdma%2d",
+ rht_node->map[i]->sde[j]->this_idx);
+ }
+ seq_puts(s, " ]");
+ }
+
+ seq_puts(s, "\n");
+}
+
+/*
+ * Free the indicated map struct
+ */
+static void sdma_map_free(struct sdma_vl_map *m)
+{
+ int i;
+
+ for (i = 0; m && i < m->actual_vls; i++)
+ kfree(m->map[i]);
+ kfree(m);
+}
+
+/*
+ * Handle RCU callback
+ */
+static void sdma_map_rcu_callback(struct rcu_head *list)
+{
+ struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
+
+ sdma_map_free(m);
+}
+
+/**
+ * sdma_map_init - called when # vls change
+ * @dd: hfi1_devdata
+ * @port: port number
+ * @num_vls: number of vls
+ * @vl_engines: per vl engine mapping (optional)
+ *
+ * This routine changes the mapping based on the number of vls.
+ *
+ * vl_engines is used to specify a non-uniform vl/engine loading. NULL
+ * implies auto computing the loading and giving each VLs a uniform
+ * distribution of engines per VL.
+ *
+ * The auto algorithm computes the sde_per_vl and the number of extra
+ * engines. Any extra engines are added from the last VL on down.
+ *
+ * rcu locking is used here to control access to the mapping fields.
+ *
+ * If either the num_vls or num_sdma are non-power of 2, the array sizes
+ * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
+ * up to the next highest power of 2 and the first entry is reused
+ * in a round robin fashion.
+ *
+ * If an error occurs the map change is not done and the mapping is
+ * not changed.
+ *
+ */
+int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
+{
+ int i, j;
+ int extra, sde_per_vl;
+ int engine = 0;
+ u8 lvl_engines[OPA_MAX_VLS];
+ struct sdma_vl_map *oldmap, *newmap;
+
+ if (!(dd->flags & HFI1_HAS_SEND_DMA))
+ return 0;
+
+ if (!vl_engines) {
+ /* truncate divide */
+ sde_per_vl = dd->num_sdma / num_vls;
+ /* extras */
+ extra = dd->num_sdma % num_vls;
+ vl_engines = lvl_engines;
+ /* add extras from last vl down */
+ for (i = num_vls - 1; i >= 0; i--, extra--)
+ vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
+ }
+ /* build new map */
+ newmap = kzalloc(
+ sizeof(struct sdma_vl_map) +
+ roundup_pow_of_two(num_vls) *
+ sizeof(struct sdma_map_elem *),
+ GFP_KERNEL);
+ if (!newmap)
+ goto bail;
+ newmap->actual_vls = num_vls;
+ newmap->vls = roundup_pow_of_two(num_vls);
+ newmap->mask = (1 << ilog2(newmap->vls)) - 1;
+ /* initialize back-map */
+ for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
+ newmap->engine_to_vl[i] = -1;
+ for (i = 0; i < newmap->vls; i++) {
+ /* save for wrap around */
+ int first_engine = engine;
+
+ if (i < newmap->actual_vls) {
+ int sz = roundup_pow_of_two(vl_engines[i]);
+
+ /* only allocate once */
+ newmap->map[i] = kzalloc(
+ sizeof(struct sdma_map_elem) +
+ sz * sizeof(struct sdma_engine *),
+ GFP_KERNEL);
+ if (!newmap->map[i])
+ goto bail;
+ newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
+ /* assign engines */
+ for (j = 0; j < sz; j++) {
+ newmap->map[i]->sde[j] =
+ &dd->per_sdma[engine];
+ if (++engine >= first_engine + vl_engines[i])
+ /* wrap back to first engine */
+ engine = first_engine;
+ }
+ /* assign back-map */
+ for (j = 0; j < vl_engines[i]; j++)
+ newmap->engine_to_vl[first_engine + j] = i;
+ } else {
+ /* just re-use entry without allocating */
+ newmap->map[i] = newmap->map[i % num_vls];
+ }
+ engine = first_engine + vl_engines[i];
+ }
+ /* newmap in hand, save old map */
+ spin_lock_irq(&dd->sde_map_lock);
+ oldmap = rcu_dereference_protected(dd->sdma_map,
+ lockdep_is_held(&dd->sde_map_lock));
+
+ /* publish newmap */
+ rcu_assign_pointer(dd->sdma_map, newmap);
+
+ spin_unlock_irq(&dd->sde_map_lock);
+ /* success, free any old map after grace period */
+ if (oldmap)
+ call_rcu(&oldmap->list, sdma_map_rcu_callback);
+ return 0;
+bail:
+ /* free any partial allocation */
+ sdma_map_free(newmap);
+ return -ENOMEM;
+}
+
+/**
+ * sdma_clean - Clean up allocated memory
+ * @dd: struct hfi1_devdata
+ * @num_engines: num sdma engines
+ *
+ * This routine can be called regardless of the success of
+ * sdma_init()
+ */
+void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
+{
+ size_t i;
+ struct sdma_engine *sde;
+
+ if (dd->sdma_pad_dma) {
+ dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
+ (void *)dd->sdma_pad_dma,
+ dd->sdma_pad_phys);
+ dd->sdma_pad_dma = NULL;
+ dd->sdma_pad_phys = 0;
+ }
+ if (dd->sdma_heads_dma) {
+ dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
+ (void *)dd->sdma_heads_dma,
+ dd->sdma_heads_phys);
+ dd->sdma_heads_dma = NULL;
+ dd->sdma_heads_phys = 0;
+ }
+ for (i = 0; dd->per_sdma && i < num_engines; ++i) {
+ sde = &dd->per_sdma[i];
+
+ sde->head_dma = NULL;
+ sde->head_phys = 0;
+
+ if (sde->descq) {
+ dma_free_coherent(
+ &dd->pcidev->dev,
+ sde->descq_cnt * sizeof(u64[2]),
+ sde->descq,
+ sde->descq_phys
+ );
+ sde->descq = NULL;
+ sde->descq_phys = 0;
+ }
+ kvfree(sde->tx_ring);
+ sde->tx_ring = NULL;
+ }
+ if (rcu_access_pointer(dd->sdma_map)) {
+ spin_lock_irq(&dd->sde_map_lock);
+ sdma_map_free(rcu_access_pointer(dd->sdma_map));
+ RCU_INIT_POINTER(dd->sdma_map, NULL);
+ spin_unlock_irq(&dd->sde_map_lock);
+ synchronize_rcu();
+ }
+ kfree(dd->per_sdma);
+ dd->per_sdma = NULL;
+
+ if (dd->sdma_rht) {
+ rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
+ kfree(dd->sdma_rht);
+ dd->sdma_rht = NULL;
+ }
+}
+
+/**
+ * sdma_init() - called when device probed
+ * @dd: hfi1_devdata
+ * @port: port number (currently only zero)
+ *
+ * Initializes each sde and its csrs.
+ * Interrupts are not required to be enabled.
+ *
+ * Returns:
+ * 0 - success, -errno on failure
+ */
+int sdma_init(struct hfi1_devdata *dd, u8 port)
+{
+ unsigned this_idx;
+ struct sdma_engine *sde;
+ struct rhashtable *tmp_sdma_rht;
+ u16 descq_cnt;
+ void *curr_head;
+ struct hfi1_pportdata *ppd = dd->pport + port;
+ u32 per_sdma_credits;
+ uint idle_cnt = sdma_idle_cnt;
+ size_t num_engines = chip_sdma_engines(dd);
+ int ret = -ENOMEM;
+
+ if (!HFI1_CAP_IS_KSET(SDMA)) {
+ HFI1_CAP_CLEAR(SDMA_AHG);
+ return 0;
+ }
+ if (mod_num_sdma &&
+ /* can't exceed chip support */
+ mod_num_sdma <= chip_sdma_engines(dd) &&
+ /* count must be >= vls */
+ mod_num_sdma >= num_vls)
+ num_engines = mod_num_sdma;
+
+ dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
+ dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
+ dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
+ chip_sdma_mem_size(dd));
+
+ per_sdma_credits =
+ chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
+
+ /* set up freeze waitqueue */
+ init_waitqueue_head(&dd->sdma_unfreeze_wq);
+ atomic_set(&dd->sdma_unfreeze_count, 0);
+
+ descq_cnt = sdma_get_descq_cnt();
+ dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
+ num_engines, descq_cnt);
+
+ /* alloc memory for array of send engines */
+ dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma),
+ GFP_KERNEL, dd->node);
+ if (!dd->per_sdma)
+ return ret;
+
+ idle_cnt = ns_to_cclock(dd, idle_cnt);
+ if (idle_cnt)
+ dd->default_desc1 =
+ SDMA_DESC1_HEAD_TO_HOST_FLAG;
+ else
+ dd->default_desc1 =
+ SDMA_DESC1_INT_REQ_FLAG;
+
+ if (!sdma_desct_intr)
+ sdma_desct_intr = SDMA_DESC_INTR;
+
+ /* Allocate memory for SendDMA descriptor FIFOs */
+ for (this_idx = 0; this_idx < num_engines; ++this_idx) {
+ sde = &dd->per_sdma[this_idx];
+ sde->dd = dd;
+ sde->ppd = ppd;
+ sde->this_idx = this_idx;
+ sde->descq_cnt = descq_cnt;
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ sde->sdma_shift = ilog2(descq_cnt);
+ sde->sdma_mask = (1 << sde->sdma_shift) - 1;
+
+ /* Create a mask specifically for each interrupt source */
+ sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
+ this_idx);
+ sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
+ this_idx);
+ sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
+ this_idx);
+ /* Create a combined mask to cover all 3 interrupt sources */
+ sde->imask = sde->int_mask | sde->progress_mask |
+ sde->idle_mask;
+
+ spin_lock_init(&sde->tail_lock);
+ seqlock_init(&sde->head_lock);
+ spin_lock_init(&sde->senddmactrl_lock);
+ spin_lock_init(&sde->flushlist_lock);
+ seqlock_init(&sde->waitlock);
+ /* insure there is always a zero bit */
+ sde->ahg_bits = 0xfffffffe00000000ULL;
+
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+
+ /* set up reference counting */
+ kref_init(&sde->state.kref);
+ init_completion(&sde->state.comp);
+
+ INIT_LIST_HEAD(&sde->flushlist);
+ INIT_LIST_HEAD(&sde->dmawait);
+
+ sde->tail_csr =
+ get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
+
+ tasklet_setup(&sde->sdma_hw_clean_up_task,
+ sdma_hw_clean_up_task);
+ tasklet_setup(&sde->sdma_sw_clean_up_task,
+ sdma_sw_clean_up_task);
+ INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
+ INIT_WORK(&sde->flush_worker, sdma_field_flush);
+
+ sde->progress_check_head = 0;
+
+ timer_setup(&sde->err_progress_check_timer,
+ sdma_err_progress_check, 0);
+
+ sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
+ descq_cnt * sizeof(u64[2]),
+ &sde->descq_phys, GFP_KERNEL);
+ if (!sde->descq)
+ goto bail;
+ sde->tx_ring =
+ kvzalloc_node(array_size(descq_cnt,
+ sizeof(struct sdma_txreq *)),
+ GFP_KERNEL, dd->node);
+ if (!sde->tx_ring)
+ goto bail;
+ }
+
+ dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
+ /* Allocate memory for DMA of head registers to memory */
+ dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
+ dd->sdma_heads_size,
+ &dd->sdma_heads_phys,
+ GFP_KERNEL);
+ if (!dd->sdma_heads_dma) {
+ dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
+ goto bail;
+ }
+
+ /* Allocate memory for pad */
+ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
+ &dd->sdma_pad_phys, GFP_KERNEL);
+ if (!dd->sdma_pad_dma) {
+ dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
+ goto bail;
+ }
+
+ /* assign each engine to different cacheline and init registers */
+ curr_head = (void *)dd->sdma_heads_dma;
+ for (this_idx = 0; this_idx < num_engines; ++this_idx) {
+ unsigned long phys_offset;
+
+ sde = &dd->per_sdma[this_idx];
+
+ sde->head_dma = curr_head;
+ curr_head += L1_CACHE_BYTES;
+ phys_offset = (unsigned long)sde->head_dma -
+ (unsigned long)dd->sdma_heads_dma;
+ sde->head_phys = dd->sdma_heads_phys + phys_offset;
+ init_sdma_regs(sde, per_sdma_credits, idle_cnt);
+ }
+ dd->flags |= HFI1_HAS_SEND_DMA;
+ dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
+ dd->num_sdma = num_engines;
+ ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
+ if (ret < 0)
+ goto bail;
+
+ tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
+ if (!tmp_sdma_rht) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
+ if (ret < 0) {
+ kfree(tmp_sdma_rht);
+ goto bail;
+ }
+
+ dd->sdma_rht = tmp_sdma_rht;
+
+ dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
+ return 0;
+
+bail:
+ sdma_clean(dd, num_engines);
+ return ret;
+}
+
+/**
+ * sdma_all_running() - called when the link goes up
+ * @dd: hfi1_devdata
+ *
+ * This routine moves all engines to the running state.
+ */
+void sdma_all_running(struct hfi1_devdata *dd)
+{
+ struct sdma_engine *sde;
+ unsigned int i;
+
+ /* move all engines to running */
+ for (i = 0; i < dd->num_sdma; ++i) {
+ sde = &dd->per_sdma[i];
+ sdma_process_event(sde, sdma_event_e30_go_running);
+ }
+}
+
+/**
+ * sdma_start() - called to kick off state processing for all engines
+ * @dd: hfi1_devdata
+ *
+ * This routine is for kicking off the state processing for all required
+ * sdma engines. Interrupts need to be working at this point.
+ *
+ */
+void sdma_start(struct hfi1_devdata *dd)
+{
+ unsigned i;
+ struct sdma_engine *sde;
+
+ /* kick off the engines state processing */
+ for (i = 0; i < dd->num_sdma; ++i) {
+ sde = &dd->per_sdma[i];
+ sdma_process_event(sde, sdma_event_e10_go_hw_start);
+ }
+}
+
+/**
+ * sdma_exit() - used when module is removed
+ * @dd: hfi1_devdata
+ */
+void sdma_exit(struct hfi1_devdata *dd)
+{
+ unsigned this_idx;
+ struct sdma_engine *sde;
+
+ for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
+ ++this_idx) {
+ sde = &dd->per_sdma[this_idx];
+ if (!list_empty(&sde->dmawait))
+ dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
+ sde->this_idx);
+ sdma_process_event(sde, sdma_event_e00_go_hw_down);
+
+ timer_delete_sync(&sde->err_progress_check_timer);
+
+ /*
+ * This waits for the state machine to exit so it is not
+ * necessary to kill the sdma_sw_clean_up_task to make sure
+ * it is not running.
+ */
+ sdma_finalput(&sde->state);
+ }
+}
+
+/*
+ * unmap the indicated descriptor
+ */
+static inline void sdma_unmap_desc(
+ struct hfi1_devdata *dd,
+ struct sdma_desc *descp)
+{
+ switch (sdma_mapping_type(descp)) {
+ case SDMA_MAP_SINGLE:
+ dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
+ sdma_mapping_len(descp), DMA_TO_DEVICE);
+ break;
+ case SDMA_MAP_PAGE:
+ dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
+ sdma_mapping_len(descp), DMA_TO_DEVICE);
+ break;
+ }
+
+ if (descp->pinning_ctx && descp->ctx_put)
+ descp->ctx_put(descp->pinning_ctx);
+ descp->pinning_ctx = NULL;
+}
+
+/*
+ * return the mode as indicated by the first
+ * descriptor in the tx.
+ */
+static inline u8 ahg_mode(struct sdma_txreq *tx)
+{
+ return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
+ >> SDMA_DESC1_HEADER_MODE_SHIFT;
+}
+
+/**
+ * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
+ * @dd: hfi1_devdata for unmapping
+ * @tx: tx request to clean
+ *
+ * This is used in the progress routine to clean the tx or
+ * by the ULP to toss an in-process tx build.
+ *
+ * The code can be called multiple times without issue.
+ *
+ */
+void __sdma_txclean(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx)
+{
+ u16 i;
+
+ if (tx->num_desc) {
+ u8 skip = 0, mode = ahg_mode(tx);
+
+ /* unmap first */
+ sdma_unmap_desc(dd, &tx->descp[0]);
+ /* determine number of AHG descriptors to skip */
+ if (mode > SDMA_AHG_APPLY_UPDATE1)
+ skip = mode >> 1;
+ for (i = 1 + skip; i < tx->num_desc; i++)
+ sdma_unmap_desc(dd, &tx->descp[i]);
+ tx->num_desc = 0;
+ }
+ kfree(tx->coalesce_buf);
+ tx->coalesce_buf = NULL;
+ /* kmalloc'ed descp */
+ if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
+ tx->desc_limit = ARRAY_SIZE(tx->descs);
+ kfree(tx->descp);
+ }
+}
+
+static inline u16 sdma_gethead(struct sdma_engine *sde)
+{
+ struct hfi1_devdata *dd = sde->dd;
+ int use_dmahead;
+ u16 hwhead;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+retry:
+ use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
+ (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
+ hwhead = use_dmahead ?
+ (u16)le64_to_cpu(*sde->head_dma) :
+ (u16)read_sde_csr(sde, SD(HEAD));
+
+ if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
+ u16 cnt;
+ u16 swtail;
+ u16 swhead;
+ int sane;
+
+ swhead = sde->descq_head & sde->sdma_mask;
+ /* this code is really bad for cache line trading */
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+ cnt = sde->descq_cnt;
+
+ if (swhead < swtail)
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ else if (swhead > swtail)
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ else
+ /* empty */
+ sane = (hwhead == swhead);
+
+ if (unlikely(!sane)) {
+ dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%u swhd=%u swtl=%u cnt=%u\n",
+ sde->this_idx,
+ use_dmahead ? "dma" : "kreg",
+ hwhead, swhead, swtail, cnt);
+ if (use_dmahead) {
+ /* try one more time, using csr */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* proceed as if no progress */
+ hwhead = swhead;
+ }
+ }
+ return hwhead;
+}
+
+/*
+ * This is called when there are send DMA descriptors that might be
+ * available.
+ *
+ * This is called with head_lock held.
+ */
+static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
+{
+ struct iowait *wait, *nw, *twait;
+ struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
+ uint i, n = 0, seq, tidx = 0;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ dd_dev_err(sde->dd, "avail: %u\n", avail);
+#endif
+
+ do {
+ seq = read_seqbegin(&sde->waitlock);
+ if (!list_empty(&sde->dmawait)) {
+ /* at least one item */
+ write_seqlock(&sde->waitlock);
+ /* Harvest waiters wanting DMA descriptors */
+ list_for_each_entry_safe(
+ wait,
+ nw,
+ &sde->dmawait,
+ list) {
+ u32 num_desc;
+
+ if (!wait->wakeup)
+ continue;
+ if (n == ARRAY_SIZE(waits))
+ break;
+ iowait_init_priority(wait);
+ num_desc = iowait_get_all_desc(wait);
+ if (num_desc > avail)
+ break;
+ avail -= num_desc;
+ /* Find the top-priority wait memeber */
+ if (n) {
+ twait = waits[tidx];
+ tidx =
+ iowait_priority_update_top(wait,
+ twait,
+ n,
+ tidx);
+ }
+ list_del_init(&wait->list);
+ waits[n++] = wait;
+ }
+ write_sequnlock(&sde->waitlock);
+ break;
+ }
+ } while (read_seqretry(&sde->waitlock, seq));
+
+ /* Schedule the top-priority entry first */
+ if (n)
+ waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON);
+
+ for (i = 0; i < n; i++)
+ if (i != tidx)
+ waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
+}
+
+/* head_lock must be held */
+static void sdma_make_progress(struct sdma_engine *sde, u64 status)
+{
+ struct sdma_txreq *txp = NULL;
+ int progress = 0;
+ u16 hwhead, swhead;
+ int idle_check_done = 0;
+
+ hwhead = sdma_gethead(sde);
+
+ /* The reason for some of the complexity of this code is that
+ * not all descriptors have corresponding txps. So, we have to
+ * be able to skip over descs until we wander into the range of
+ * the next txp on the list.
+ */
+
+retry:
+ txp = get_txhead(sde);
+ swhead = sde->descq_head & sde->sdma_mask;
+ trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
+ while (swhead != hwhead) {
+ /* advance head, wrap if needed */
+ swhead = ++sde->descq_head & sde->sdma_mask;
+
+ /* if now past this txp's descs, do the callback */
+ if (txp && txp->next_descq_idx == swhead) {
+ /* remove from list */
+ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
+ complete_tx(sde, txp, SDMA_TXREQ_S_OK);
+ /* see if there is another txp */
+ txp = get_txhead(sde);
+ }
+ trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
+ progress++;
+ }
+
+ /*
+ * The SDMA idle interrupt is not guaranteed to be ordered with respect
+ * to updates to the dma_head location in host memory. The head
+ * value read might not be fully up to date. If there are pending
+ * descriptors and the SDMA idle interrupt fired then read from the
+ * CSR SDMA head instead to get the latest value from the hardware.
+ * The hardware SDMA head should be read at most once in this invocation
+ * of sdma_make_progress(..) which is ensured by idle_check_done flag
+ */
+ if ((status & sde->idle_mask) && !idle_check_done) {
+ u16 swtail;
+
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+ if (swtail != hwhead) {
+ hwhead = (u16)read_sde_csr(sde, SD(HEAD));
+ idle_check_done = 1;
+ goto retry;
+ }
+ }
+
+ sde->last_status = status;
+ if (progress)
+ sdma_desc_avail(sde, sdma_descq_freecnt(sde));
+}
+
+/*
+ * sdma_engine_interrupt() - interrupt handler for engine
+ * @sde: sdma engine
+ * @status: sdma interrupt reason
+ *
+ * Status is a mask of the 3 possible interrupts for this engine. It will
+ * contain bits _only_ for this SDMA engine. It will contain at least one
+ * bit, it may contain more.
+ */
+void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
+{
+ trace_hfi1_sdma_engine_interrupt(sde, status);
+ write_seqlock(&sde->head_lock);
+ sdma_set_desc_cnt(sde, sdma_desct_intr);
+ if (status & sde->idle_mask)
+ sde->idle_int_cnt++;
+ else if (status & sde->progress_mask)
+ sde->progress_int_cnt++;
+ else if (status & sde->int_mask)
+ sde->sdma_int_cnt++;
+ sdma_make_progress(sde, status);
+ write_sequnlock(&sde->head_lock);
+}
+
+/**
+ * sdma_engine_error() - error handler for engine
+ * @sde: sdma engine
+ * @status: sdma interrupt reason
+ */
+void sdma_engine_error(struct sdma_engine *sde, u64 status)
+{
+ unsigned long flags;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
+ sde->this_idx,
+ (unsigned long long)status,
+ sdma_state_names[sde->state.current_state]);
+#endif
+ spin_lock_irqsave(&sde->tail_lock, flags);
+ write_seqlock(&sde->head_lock);
+ if (status & ALL_SDMA_ENG_HALT_ERRS)
+ __sdma_process_event(sde, sdma_event_e60_hw_halted);
+ if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
+ dd_dev_err(sde->dd,
+ "SDMA (%u) engine error: 0x%llx state %s\n",
+ sde->this_idx,
+ (unsigned long long)status,
+ sdma_state_names[sde->state.current_state]);
+ dump_sdma_state(sde);
+ }
+ write_sequnlock(&sde->head_lock);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+}
+
+static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
+{
+ u64 set_senddmactrl = 0;
+ u64 clr_senddmactrl = 0;
+ unsigned long flags;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
+ sde->this_idx,
+ (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
+ (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
+ (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
+ (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
+#endif
+
+ if (op & SDMA_SENDCTRL_OP_ENABLE)
+ set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
+ else
+ clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
+
+ if (op & SDMA_SENDCTRL_OP_INTENABLE)
+ set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
+ else
+ clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
+
+ if (op & SDMA_SENDCTRL_OP_HALT)
+ set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
+ else
+ clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
+
+ spin_lock_irqsave(&sde->senddmactrl_lock, flags);
+
+ sde->p_senddmactrl |= set_senddmactrl;
+ sde->p_senddmactrl &= ~clr_senddmactrl;
+
+ if (op & SDMA_SENDCTRL_OP_CLEANUP)
+ write_sde_csr(sde, SD(CTRL),
+ sde->p_senddmactrl |
+ SD(CTRL_SDMA_CLEANUP_SMASK));
+ else
+ write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
+
+ spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ sdma_dumpstate(sde);
+#endif
+}
+
+static void sdma_setlengen(struct sdma_engine *sde)
+{
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+ /*
+ * Set SendDmaLenGen and clear-then-set the MSB of the generation
+ * count to enable generation checking and load the internal
+ * generation counter.
+ */
+ write_sde_csr(sde, SD(LEN_GEN),
+ (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
+ write_sde_csr(sde, SD(LEN_GEN),
+ ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
+ (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
+}
+
+static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ smp_wmb(); /* see get_txhead() */
+ writeq(tail, sde->tail_csr);
+}
+
+/*
+ * This is called when changing to state s10_hw_start_up_halt_wait as
+ * a result of send buffer errors or send DMA descriptor errors.
+ */
+static void sdma_hw_start_up(struct sdma_engine *sde)
+{
+ u64 reg;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+ sdma_setlengen(sde);
+ sdma_update_tail(sde, 0); /* Set SendDmaTail */
+ *sde->head_dma = 0;
+
+ reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
+ SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
+ write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
+}
+
+/*
+ * set_sdma_integrity
+ *
+ * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
+ */
+static void set_sdma_integrity(struct sdma_engine *sde)
+{
+ struct hfi1_devdata *dd = sde->dd;
+
+ write_sde_csr(sde, SD(CHECK_ENABLE),
+ hfi1_pkt_base_sdma_integrity(dd));
+}
+
+static void init_sdma_regs(
+ struct sdma_engine *sde,
+ u32 credits,
+ uint idle_cnt)
+{
+ u8 opval, opmask;
+#ifdef CONFIG_SDMA_VERBOSITY
+ struct hfi1_devdata *dd = sde->dd;
+
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+ write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
+ sdma_setlengen(sde);
+ sdma_update_tail(sde, 0); /* Set SendDmaTail */
+ write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
+ write_sde_csr(sde, SD(DESC_CNT), 0);
+ write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
+ write_sde_csr(sde, SD(MEMORY),
+ ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
+ ((u64)(credits * sde->this_idx) <<
+ SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
+ write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
+ set_sdma_integrity(sde);
+ opmask = OPCODE_CHECK_MASK_DISABLED;
+ opval = OPCODE_CHECK_VAL_DISABLED;
+ write_sde_csr(sde, SD(CHECK_OPCODE),
+ (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
+ (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
+}
+
+#ifdef CONFIG_SDMA_VERBOSITY
+
+#define sdma_dumpstate_helper0(reg) do { \
+ csr = read_csr(sde->dd, reg); \
+ dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
+ } while (0)
+
+#define sdma_dumpstate_helper(reg) do { \
+ csr = read_sde_csr(sde, reg); \
+ dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
+ #reg, sde->this_idx, csr); \
+ } while (0)
+
+#define sdma_dumpstate_helper2(reg) do { \
+ csr = read_csr(sde->dd, reg + (8 * i)); \
+ dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
+ #reg, i, csr); \
+ } while (0)
+
+void sdma_dumpstate(struct sdma_engine *sde)
+{
+ u64 csr;
+ unsigned i;
+
+ sdma_dumpstate_helper(SD(CTRL));
+ sdma_dumpstate_helper(SD(STATUS));
+ sdma_dumpstate_helper0(SD(ERR_STATUS));
+ sdma_dumpstate_helper0(SD(ERR_MASK));
+ sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
+ sdma_dumpstate_helper(SD(ENG_ERR_MASK));
+
+ for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
+ sdma_dumpstate_helper2(CCE_INT_STATUS);
+ sdma_dumpstate_helper2(CCE_INT_MASK);
+ sdma_dumpstate_helper2(CCE_INT_BLOCKED);
+ }
+
+ sdma_dumpstate_helper(SD(TAIL));
+ sdma_dumpstate_helper(SD(HEAD));
+ sdma_dumpstate_helper(SD(PRIORITY_THLD));
+ sdma_dumpstate_helper(SD(IDLE_CNT));
+ sdma_dumpstate_helper(SD(RELOAD_CNT));
+ sdma_dumpstate_helper(SD(DESC_CNT));
+ sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
+ sdma_dumpstate_helper(SD(MEMORY));
+ sdma_dumpstate_helper0(SD(ENGINES));
+ sdma_dumpstate_helper0(SD(MEM_SIZE));
+ /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
+ sdma_dumpstate_helper(SD(BASE_ADDR));
+ sdma_dumpstate_helper(SD(LEN_GEN));
+ sdma_dumpstate_helper(SD(HEAD_ADDR));
+ sdma_dumpstate_helper(SD(CHECK_ENABLE));
+ sdma_dumpstate_helper(SD(CHECK_VL));
+ sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
+ sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
+ sdma_dumpstate_helper(SD(CHECK_SLID));
+ sdma_dumpstate_helper(SD(CHECK_OPCODE));
+}
+#endif
+
+static void dump_sdma_state(struct sdma_engine *sde)
+{
+ struct hw_sdma_desc *descqp;
+ u64 desc[2];
+ u64 addr;
+ u8 gen;
+ u16 len;
+ u16 head, tail, cnt;
+
+ head = sde->descq_head & sde->sdma_mask;
+ tail = sde->descq_tail & sde->sdma_mask;
+ cnt = sdma_descq_freecnt(sde);
+
+ dd_dev_err(sde->dd,
+ "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
+ sde->this_idx, head, tail, cnt,
+ !list_empty(&sde->flushlist));
+
+ /* print info for each entry in the descriptor queue */
+ while (head != tail) {
+ char flags[6] = { 'x', 'x', 'x', 'x', 0 };
+
+ descqp = &sde->descq[head];
+ desc[0] = le64_to_cpu(descqp->qw[0]);
+ desc[1] = le64_to_cpu(descqp->qw[1]);
+ flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
+ flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
+ 'H' : '-';
+ flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
+ flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
+ addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
+ & SDMA_DESC0_PHY_ADDR_MASK;
+ gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
+ & SDMA_DESC1_GENERATION_MASK;
+ len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
+ & SDMA_DESC0_BYTE_COUNT_MASK;
+ dd_dev_err(sde->dd,
+ "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+ head, flags, addr, gen, len);
+ dd_dev_err(sde->dd,
+ "\tdesc0:0x%016llx desc1 0x%016llx\n",
+ desc[0], desc[1]);
+ if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
+ dd_dev_err(sde->dd,
+ "\taidx: %u amode: %u alen: %u\n",
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_INDEX_SMASK) >>
+ SDMA_DESC1_HEADER_INDEX_SHIFT),
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_MODE_SMASK) >>
+ SDMA_DESC1_HEADER_MODE_SHIFT),
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_DWS_SMASK) >>
+ SDMA_DESC1_HEADER_DWS_SHIFT));
+ head++;
+ head &= sde->sdma_mask;
+ }
+}
+
+#define SDE_FMT \
+ "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
+/**
+ * sdma_seqfile_dump_sde() - debugfs dump of sde
+ * @s: seq file
+ * @sde: send dma engine to dump
+ *
+ * This routine dumps the sde to the indicated seq file.
+ */
+void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
+{
+ u16 head, tail;
+ struct hw_sdma_desc *descqp;
+ u64 desc[2];
+ u64 addr;
+ u8 gen;
+ u16 len;
+
+ head = sde->descq_head & sde->sdma_mask;
+ tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+ seq_printf(s, SDE_FMT, sde->this_idx,
+ sde->cpu,
+ sdma_state_name(sde->state.current_state),
+ (unsigned long long)read_sde_csr(sde, SD(CTRL)),
+ (unsigned long long)read_sde_csr(sde, SD(STATUS)),
+ (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
+ (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
+ (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
+ (unsigned long long)le64_to_cpu(*sde->head_dma),
+ (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
+ (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
+ (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
+ (unsigned long long)sde->last_status,
+ (unsigned long long)sde->ahg_bits,
+ sde->tx_tail,
+ sde->tx_head,
+ sde->descq_tail,
+ sde->descq_head,
+ !list_empty(&sde->flushlist),
+ sde->descq_full_count,
+ (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
+
+ /* print info for each entry in the descriptor queue */
+ while (head != tail) {
+ char flags[6] = { 'x', 'x', 'x', 'x', 0 };
+
+ descqp = &sde->descq[head];
+ desc[0] = le64_to_cpu(descqp->qw[0]);
+ desc[1] = le64_to_cpu(descqp->qw[1]);
+ flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
+ flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
+ 'H' : '-';
+ flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
+ flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
+ addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
+ & SDMA_DESC0_PHY_ADDR_MASK;
+ gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
+ & SDMA_DESC1_GENERATION_MASK;
+ len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
+ & SDMA_DESC0_BYTE_COUNT_MASK;
+ seq_printf(s,
+ "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+ head, flags, addr, gen, len);
+ if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
+ seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_INDEX_SMASK) >>
+ SDMA_DESC1_HEADER_INDEX_SHIFT),
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_MODE_SMASK) >>
+ SDMA_DESC1_HEADER_MODE_SHIFT));
+ head = (head + 1) & sde->sdma_mask;
+ }
+}
+
+/*
+ * add the generation number into
+ * the qw1 and return
+ */
+static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
+{
+ u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
+
+ qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
+ qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
+ << SDMA_DESC1_GENERATION_SHIFT;
+ return qw1;
+}
+
+/*
+ * This routine submits the indicated tx
+ *
+ * Space has already been guaranteed and
+ * tail side of ring is locked.
+ *
+ * The hardware tail update is done
+ * in the caller and that is facilitated
+ * by returning the new tail.
+ *
+ * There is special case logic for ahg
+ * to not add the generation number for
+ * up to 2 descriptors that follow the
+ * first descriptor.
+ *
+ */
+static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
+{
+ int i;
+ u16 tail;
+ struct sdma_desc *descp = tx->descp;
+ u8 skip = 0, mode = ahg_mode(tx);
+
+ tail = sde->descq_tail & sde->sdma_mask;
+ sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
+ sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
+ trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
+ tail, &sde->descq[tail]);
+ tail = ++sde->descq_tail & sde->sdma_mask;
+ descp++;
+ if (mode > SDMA_AHG_APPLY_UPDATE1)
+ skip = mode >> 1;
+ for (i = 1; i < tx->num_desc; i++, descp++) {
+ u64 qw1;
+
+ sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
+ if (skip) {
+ /* edits don't have generation */
+ qw1 = descp->qw[1];
+ skip--;
+ } else {
+ /* replace generation with real one for non-edits */
+ qw1 = add_gen(sde, descp->qw[1]);
+ }
+ sde->descq[tail].qw[1] = cpu_to_le64(qw1);
+ trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
+ tail, &sde->descq[tail]);
+ tail = ++sde->descq_tail & sde->sdma_mask;
+ }
+ tx->next_descq_idx = tail;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ tx->sn = sde->tail_sn++;
+ trace_hfi1_sdma_in_sn(sde, tx->sn);
+ WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
+#endif
+ sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
+ sde->desc_avail -= tx->num_desc;
+ return tail;
+}
+
+/*
+ * Check for progress
+ */
+static int sdma_check_progress(
+ struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ bool pkts_sent)
+{
+ int ret;
+
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ if (tx->num_desc <= sde->desc_avail)
+ return -EAGAIN;
+ /* pulse the head_lock */
+ if (wait && iowait_ioww_to_iow(wait)->sleep) {
+ unsigned seq;
+
+ seq = raw_seqcount_begin(
+ (const seqcount_t *)&sde->head_lock.seqcount);
+ ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
+ if (ret == -EAGAIN)
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+/**
+ * sdma_send_txreq() - submit a tx req to ring
+ * @sde: sdma engine to use
+ * @wait: SE wait structure to use when full (may be NULL)
+ * @tx: sdma_txreq to submit
+ * @pkts_sent: has any packet been sent yet?
+ *
+ * The call submits the tx into the ring. If a iowait structure is non-NULL
+ * the packet will be queued to the list in wait.
+ *
+ * Return:
+ * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
+ * ring (wait == NULL)
+ * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
+ */
+int sdma_send_txreq(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ bool pkts_sent)
+{
+ int ret = 0;
+ u16 tail;
+ unsigned long flags;
+
+ /* user should have supplied entire packet */
+ if (unlikely(tx->tlen))
+ return -EINVAL;
+ tx->wait = iowait_ioww_to_iow(wait);
+ spin_lock_irqsave(&sde->tail_lock, flags);
+retry:
+ if (unlikely(!__sdma_running(sde)))
+ goto unlock_noconn;
+ if (unlikely(tx->num_desc > sde->desc_avail))
+ goto nodesc;
+ tail = submit_tx(sde, tx);
+ if (wait)
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
+ sdma_update_tail(sde, tail);
+unlock:
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+ return ret;
+unlock_noconn:
+ if (wait)
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
+ tx->next_descq_idx = 0;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ tx->sn = sde->tail_sn++;
+ trace_hfi1_sdma_in_sn(sde, tx->sn);
+#endif
+ spin_lock(&sde->flushlist_lock);
+ list_add_tail(&tx->list, &sde->flushlist);
+ spin_unlock(&sde->flushlist_lock);
+ iowait_inc_wait_count(wait, tx->num_desc);
+ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
+ ret = -ECOMM;
+ goto unlock;
+nodesc:
+ ret = sdma_check_progress(sde, wait, tx, pkts_sent);
+ if (ret == -EAGAIN) {
+ ret = 0;
+ goto retry;
+ }
+ sde->descq_full_count++;
+ goto unlock;
+}
+
+/**
+ * sdma_send_txlist() - submit a list of tx req to ring
+ * @sde: sdma engine to use
+ * @wait: SE wait structure to use when full (may be NULL)
+ * @tx_list: list of sdma_txreqs to submit
+ * @count_out: pointer to a u16 which, after return will contain the total number of
+ * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
+ * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
+ * which are added to SDMA engine flush list if the SDMA engine state is
+ * not running.
+ *
+ * The call submits the list into the ring.
+ *
+ * If the iowait structure is non-NULL and not equal to the iowait list
+ * the unprocessed part of the list will be appended to the list in wait.
+ *
+ * In all cases, the tx_list will be updated so the head of the tx_list is
+ * the list of descriptors that have yet to be transmitted.
+ *
+ * The intent of this call is to provide a more efficient
+ * way of submitting multiple packets to SDMA while holding the tail
+ * side locking.
+ *
+ * Return:
+ * 0 - Success,
+ * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
+ * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
+ */
+int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
+ struct list_head *tx_list, u16 *count_out)
+{
+ struct sdma_txreq *tx, *tx_next;
+ int ret = 0;
+ unsigned long flags;
+ u16 tail = INVALID_TAIL;
+ u32 submit_count = 0, flush_count = 0, total_count;
+
+ spin_lock_irqsave(&sde->tail_lock, flags);
+retry:
+ list_for_each_entry_safe(tx, tx_next, tx_list, list) {
+ tx->wait = iowait_ioww_to_iow(wait);
+ if (unlikely(!__sdma_running(sde)))
+ goto unlock_noconn;
+ if (unlikely(tx->num_desc > sde->desc_avail))
+ goto nodesc;
+ if (unlikely(tx->tlen)) {
+ ret = -EINVAL;
+ goto update_tail;
+ }
+ list_del_init(&tx->list);
+ tail = submit_tx(sde, tx);
+ submit_count++;
+ if (tail != INVALID_TAIL &&
+ (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
+ sdma_update_tail(sde, tail);
+ tail = INVALID_TAIL;
+ }
+ }
+update_tail:
+ total_count = submit_count + flush_count;
+ if (wait) {
+ iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
+ iowait_starve_clear(submit_count > 0,
+ iowait_ioww_to_iow(wait));
+ }
+ if (tail != INVALID_TAIL)
+ sdma_update_tail(sde, tail);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+ *count_out = total_count;
+ return ret;
+unlock_noconn:
+ spin_lock(&sde->flushlist_lock);
+ list_for_each_entry_safe(tx, tx_next, tx_list, list) {
+ tx->wait = iowait_ioww_to_iow(wait);
+ list_del_init(&tx->list);
+ tx->next_descq_idx = 0;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ tx->sn = sde->tail_sn++;
+ trace_hfi1_sdma_in_sn(sde, tx->sn);
+#endif
+ list_add_tail(&tx->list, &sde->flushlist);
+ flush_count++;
+ iowait_inc_wait_count(wait, tx->num_desc);
+ }
+ spin_unlock(&sde->flushlist_lock);
+ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
+ ret = -ECOMM;
+ goto update_tail;
+nodesc:
+ ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
+ if (ret == -EAGAIN) {
+ ret = 0;
+ goto retry;
+ }
+ sde->descq_full_count++;
+ goto update_tail;
+}
+
+static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sde->tail_lock, flags);
+ write_seqlock(&sde->head_lock);
+
+ __sdma_process_event(sde, event);
+
+ if (sde->state.current_state == sdma_state_s99_running)
+ sdma_desc_avail(sde, sdma_descq_freecnt(sde));
+
+ write_sequnlock(&sde->head_lock);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+}
+
+static void __sdma_process_event(struct sdma_engine *sde,
+ enum sdma_events event)
+{
+ struct sdma_state *ss = &sde->state;
+ int need_progress = 0;
+
+ /* CONFIG SDMA temporary */
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
+ sdma_state_names[ss->current_state],
+ sdma_event_names[event]);
+#endif
+
+ switch (ss->current_state) {
+ case sdma_state_s00_hw_down:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ break;
+ case sdma_event_e30_go_running:
+ /*
+ * If down, but running requested (usually result
+ * of link up, then we need to start up.
+ * This can happen when hw down is requested while
+ * bringing the link up with traffic active on
+ * 7220, e.g.
+ */
+ ss->go_s99_running = 1;
+ fallthrough; /* and start dma engine */
+ case sdma_event_e10_go_hw_start:
+ /* This reference means the state machine is started */
+ sdma_get(&sde->state);
+ sdma_set_state(sde,
+ sdma_state_s10_hw_start_up_halt_wait);
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e40_sw_cleaned:
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s10_hw_start_up_halt_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ sdma_set_state(sde,
+ sdma_state_s15_hw_start_up_clean_wait);
+ sdma_start_hw_clean_up(sde);
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ schedule_work(&sde->err_halt_worker);
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s15_hw_start_up_clean_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ sdma_hw_start_up(sde);
+ sdma_set_state(sde, ss->go_s99_running ?
+ sdma_state_s99_running :
+ sdma_state_s20_idle);
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s20_idle:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ sdma_set_state(sde, sdma_state_s99_running);
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
+ schedule_work(&sde->err_halt_worker);
+ break;
+ case sdma_event_e70_go_idle:
+ break;
+ case sdma_event_e85_link_down:
+ case sdma_event_e80_hw_freeze:
+ sdma_set_state(sde, sdma_state_s80_hw_freeze);
+ atomic_dec(&sde->dd->sdma_unfreeze_count);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s30_sw_clean_up_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
+ sdma_start_hw_clean_up(sde);
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s40_hw_clean_up_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ sdma_hw_start_up(sde);
+ sdma_set_state(sde, ss->go_s99_running ?
+ sdma_state_s99_running :
+ sdma_state_s20_idle);
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s50_hw_halt_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ schedule_work(&sde->err_halt_worker);
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s60_idle_halt_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ schedule_work(&sde->err_halt_worker);
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s80_hw_freeze:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s82_freeze_sw_clean:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ /* notify caller this engine is done cleaning */
+ atomic_dec(&sde->dd->sdma_unfreeze_count);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ sdma_hw_start_up(sde);
+ sdma_set_state(sde, ss->go_s99_running ?
+ sdma_state_s99_running :
+ sdma_state_s20_idle);
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s99_running:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ need_progress = 1;
+ sdma_err_progress_check_schedule(sde);
+ fallthrough;
+ case sdma_event_e90_sw_halted:
+ /*
+ * SW initiated halt does not perform engines
+ * progress check
+ */
+ sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
+ schedule_work(&sde->err_halt_worker);
+ break;
+ case sdma_event_e70_go_idle:
+ sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ fallthrough;
+ case sdma_event_e80_hw_freeze:
+ sdma_set_state(sde, sdma_state_s80_hw_freeze);
+ atomic_dec(&sde->dd->sdma_unfreeze_count);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ }
+ break;
+ }
+
+ ss->last_event = event;
+ if (need_progress)
+ sdma_make_progress(sde, 0);
+}
+
+/*
+ * _extend_sdma_tx_descs() - helper to extend txreq
+ *
+ * This is called once the initial nominal allocation
+ * of descriptors in the sdma_txreq is exhausted.
+ *
+ * The code will bump the allocation up to the max
+ * of MAX_DESC (64) descriptors. There doesn't seem
+ * much point in an interim step. The last descriptor
+ * is reserved for coalesce buffer in order to support
+ * cases where input packet has >MAX_DESC iovecs.
+ *
+ */
+static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+ int i;
+ struct sdma_desc *descp;
+
+ /* Handle last descriptor */
+ if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
+ /* if tlen is 0, it is for padding, release last descriptor */
+ if (!tx->tlen) {
+ tx->desc_limit = MAX_DESC;
+ } else if (!tx->coalesce_buf) {
+ /* allocate coalesce buffer with space for padding */
+ tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
+ GFP_ATOMIC);
+ if (!tx->coalesce_buf)
+ goto enomem;
+ tx->coalesce_idx = 0;
+ }
+ return 0;
+ }
+
+ if (unlikely(tx->num_desc == MAX_DESC))
+ goto enomem;
+
+ descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
+ if (!descp)
+ goto enomem;
+ tx->descp = descp;
+
+ /* reserve last descriptor for coalescing */
+ tx->desc_limit = MAX_DESC - 1;
+ /* copy ones already built */
+ for (i = 0; i < tx->num_desc; i++)
+ tx->descp[i] = tx->descs[i];
+ return 0;
+enomem:
+ __sdma_txclean(dd, tx);
+ return -ENOMEM;
+}
+
+/*
+ * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
+ *
+ * This is called once the initial nominal allocation of descriptors
+ * in the sdma_txreq is exhausted.
+ *
+ * This function calls _extend_sdma_tx_descs to extend or allocate
+ * coalesce buffer. If there is a allocated coalesce buffer, it will
+ * copy the input packet data into the coalesce buffer. It also adds
+ * coalesce buffer descriptor once when whole packet is received.
+ *
+ * Return:
+ * <0 - error
+ * 0 - coalescing, don't populate descriptor
+ * 1 - continue with populating descriptor
+ */
+int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
+ int type, void *kvaddr, struct page *page,
+ unsigned long offset, u16 len)
+{
+ int pad_len, rval;
+ dma_addr_t addr;
+
+ rval = _extend_sdma_tx_descs(dd, tx);
+ if (rval) {
+ __sdma_txclean(dd, tx);
+ return rval;
+ }
+
+ /* If coalesce buffer is allocated, copy data into it */
+ if (tx->coalesce_buf) {
+ if (type == SDMA_MAP_NONE) {
+ __sdma_txclean(dd, tx);
+ return -EINVAL;
+ }
+
+ if (type == SDMA_MAP_PAGE) {
+ kvaddr = kmap_local_page(page);
+ kvaddr += offset;
+ } else if (WARN_ON(!kvaddr)) {
+ __sdma_txclean(dd, tx);
+ return -EINVAL;
+ }
+
+ memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
+ tx->coalesce_idx += len;
+ if (type == SDMA_MAP_PAGE)
+ kunmap_local(kvaddr);
+
+ /* If there is more data, return */
+ if (tx->tlen - tx->coalesce_idx)
+ return 0;
+
+ /* Whole packet is received; add any padding */
+ pad_len = tx->packet_len & (sizeof(u32) - 1);
+ if (pad_len) {
+ pad_len = sizeof(u32) - pad_len;
+ memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
+ /* padding is taken care of for coalescing case */
+ tx->packet_len += pad_len;
+ tx->tlen += pad_len;
+ }
+
+ /* dma map the coalesce buffer */
+ addr = dma_map_single(&dd->pcidev->dev,
+ tx->coalesce_buf,
+ tx->tlen,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
+ __sdma_txclean(dd, tx);
+ return -ENOSPC;
+ }
+
+ /* Add descriptor for coalesce buffer */
+ tx->desc_limit = MAX_DESC;
+ return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
+ addr, tx->tlen, NULL, NULL, NULL);
+ }
+
+ return 1;
+}
+
+/* Update sdes when the lmc changes */
+void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
+{
+ struct sdma_engine *sde;
+ int i;
+ u64 sreg;
+
+ sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
+ SD(CHECK_SLID_MASK_SHIFT)) |
+ (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
+ SD(CHECK_SLID_VALUE_SHIFT));
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
+ i, (u32)sreg);
+ sde = &dd->per_sdma[i];
+ write_sde_csr(sde, SD(CHECK_SLID), sreg);
+ }
+}
+
+/* tx not dword sized - pad */
+int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+ int rval = 0;
+
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = _extend_sdma_tx_descs(dd, tx);
+ if (rval) {
+ __sdma_txclean(dd, tx);
+ return rval;
+ }
+ }
+
+ /* finish the one just added */
+ make_tx_sdma_desc(
+ tx,
+ SDMA_MAP_NONE,
+ dd->sdma_pad_phys,
+ sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)),
+ NULL, NULL, NULL);
+ tx->num_desc++;
+ _sdma_close_tx(dd, tx);
+ return rval;
+}
+
+/*
+ * Add ahg to the sdma_txreq
+ *
+ * The logic will consume up to 3
+ * descriptors at the beginning of
+ * sdma_txreq.
+ */
+void _sdma_txreq_ahgadd(
+ struct sdma_txreq *tx,
+ u8 num_ahg,
+ u8 ahg_entry,
+ u32 *ahg,
+ u8 ahg_hlen)
+{
+ u32 i, shift = 0, desc = 0;
+ u8 mode;
+
+ WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
+ /* compute mode */
+ if (num_ahg == 1)
+ mode = SDMA_AHG_APPLY_UPDATE1;
+ else if (num_ahg <= 5)
+ mode = SDMA_AHG_APPLY_UPDATE2;
+ else
+ mode = SDMA_AHG_APPLY_UPDATE3;
+ tx->num_desc++;
+ /* initialize to consumed descriptors to zero */
+ switch (mode) {
+ case SDMA_AHG_APPLY_UPDATE3:
+ tx->num_desc++;
+ tx->descs[2].qw[0] = 0;
+ tx->descs[2].qw[1] = 0;
+ fallthrough;
+ case SDMA_AHG_APPLY_UPDATE2:
+ tx->num_desc++;
+ tx->descs[1].qw[0] = 0;
+ tx->descs[1].qw[1] = 0;
+ break;
+ }
+ ahg_hlen >>= 2;
+ tx->descs[0].qw[1] |=
+ (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
+ << SDMA_DESC1_HEADER_INDEX_SHIFT) |
+ (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
+ << SDMA_DESC1_HEADER_DWS_SHIFT) |
+ (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
+ << SDMA_DESC1_HEADER_MODE_SHIFT) |
+ (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
+ << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
+ for (i = 0; i < (num_ahg - 1); i++) {
+ if (!shift && !(i & 2))
+ desc++;
+ tx->descs[desc].qw[!!(i & 2)] |=
+ (((u64)ahg[i + 1])
+ << shift);
+ shift = (shift + 32) & 63;
+ }
+}
+
+/**
+ * sdma_ahg_alloc - allocate an AHG entry
+ * @sde: engine to allocate from
+ *
+ * Return:
+ * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
+ * -ENOSPC if an entry is not available
+ */
+int sdma_ahg_alloc(struct sdma_engine *sde)
+{
+ int nr;
+ int oldbit;
+
+ if (!sde) {
+ trace_hfi1_ahg_allocate(sde, -EINVAL);
+ return -EINVAL;
+ }
+ while (1) {
+ nr = ffz(READ_ONCE(sde->ahg_bits));
+ if (nr > 31) {
+ trace_hfi1_ahg_allocate(sde, -ENOSPC);
+ return -ENOSPC;
+ }
+ oldbit = test_and_set_bit(nr, &sde->ahg_bits);
+ if (!oldbit)
+ break;
+ cpu_relax();
+ }
+ trace_hfi1_ahg_allocate(sde, nr);
+ return nr;
+}
+
+/**
+ * sdma_ahg_free - free an AHG entry
+ * @sde: engine to return AHG entry
+ * @ahg_index: index to free
+ *
+ * This routine frees the indicate AHG entry.
+ */
+void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
+{
+ if (!sde)
+ return;
+ trace_hfi1_ahg_deallocate(sde, ahg_index);
+ if (ahg_index < 0 || ahg_index > 31)
+ return;
+ clear_bit(ahg_index, &sde->ahg_bits);
+}
+
+/*
+ * SPC freeze handling for SDMA engines. Called when the driver knows
+ * the SPC is going into a freeze but before the freeze is fully
+ * settled. Generally an error interrupt.
+ *
+ * This event will pull the engine out of running so no more entries can be
+ * added to the engine's queue.
+ */
+void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
+{
+ int i;
+ enum sdma_events event = link_down ? sdma_event_e85_link_down :
+ sdma_event_e80_hw_freeze;
+
+ /* set up the wait but do not wait here */
+ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
+
+ /* tell all engines to stop running and wait */
+ for (i = 0; i < dd->num_sdma; i++)
+ sdma_process_event(&dd->per_sdma[i], event);
+
+ /* sdma_freeze() will wait for all engines to have stopped */
+}
+
+/*
+ * SPC freeze handling for SDMA engines. Called when the driver knows
+ * the SPC is fully frozen.
+ */
+void sdma_freeze(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret;
+
+ /*
+ * Make sure all engines have moved out of the running state before
+ * continuing.
+ */
+ ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
+ atomic_read(&dd->sdma_unfreeze_count) <=
+ 0);
+ /* interrupted or count is negative, then unloading - just exit */
+ if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
+ return;
+
+ /* set up the count for the next wait */
+ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
+
+ /* tell all engines that the SPC is frozen, they can start cleaning */
+ for (i = 0; i < dd->num_sdma; i++)
+ sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
+
+ /*
+ * Wait for everyone to finish software clean before exiting. The
+ * software clean will read engine CSRs, so must be completed before
+ * the next step, which will clear the engine CSRs.
+ */
+ (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
+ atomic_read(&dd->sdma_unfreeze_count) <= 0);
+ /* no need to check results - done no matter what */
+}
+
+/*
+ * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
+ *
+ * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
+ * that is left is a software clean. We could do it after the SPC is fully
+ * frozen, but then we'd have to add another state to wait for the unfreeze.
+ * Instead, just defer the software clean until the unfreeze step.
+ */
+void sdma_unfreeze(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* tell all engines start freeze clean up */
+ for (i = 0; i < dd->num_sdma; i++)
+ sdma_process_event(&dd->per_sdma[i],
+ sdma_event_e82_hw_unfreeze);
+}
+
+/**
+ * _sdma_engine_progress_schedule() - schedule progress on engine
+ * @sde: sdma_engine to schedule progress
+ *
+ */
+void _sdma_engine_progress_schedule(
+ struct sdma_engine *sde)
+{
+ trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
+ /* assume we have selected a good cpu */
+ write_csr(sde->dd,
+ CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
+ sde->progress_mask);
+}
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
new file mode 100644
index 000000000000..91dfd5d0c419
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -0,0 +1,1055 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#ifndef _HFI1_SDMA_H
+#define _HFI1_SDMA_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <asm/byteorder.h>
+#include <linux/workqueue.h>
+#include <linux/rculist.h>
+
+#include "hfi.h"
+#include "verbs.h"
+#include "sdma_txreq.h"
+
+/* Hardware limit */
+#define MAX_DESC 64
+/* Hardware limit for SDMA packet size */
+#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
+
+#define SDMA_MAP_NONE 0
+#define SDMA_MAP_SINGLE 1
+#define SDMA_MAP_PAGE 2
+
+#define SDMA_AHG_VALUE_MASK 0xffff
+#define SDMA_AHG_VALUE_SHIFT 0
+#define SDMA_AHG_INDEX_MASK 0xf
+#define SDMA_AHG_INDEX_SHIFT 16
+#define SDMA_AHG_FIELD_LEN_MASK 0xf
+#define SDMA_AHG_FIELD_LEN_SHIFT 20
+#define SDMA_AHG_FIELD_START_MASK 0x1f
+#define SDMA_AHG_FIELD_START_SHIFT 24
+#define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
+#define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
+
+/* AHG modes */
+
+/*
+ * Be aware the ordering and values
+ * for SDMA_AHG_APPLY_UPDATE[123]
+ * are assumed in generating a skip
+ * count in submit_tx() in sdma.c
+ */
+#define SDMA_AHG_NO_AHG 0
+#define SDMA_AHG_COPY 1
+#define SDMA_AHG_APPLY_UPDATE1 2
+#define SDMA_AHG_APPLY_UPDATE2 3
+#define SDMA_AHG_APPLY_UPDATE3 4
+
+/*
+ * Bits defined in the send DMA descriptor.
+ */
+#define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
+#define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
+#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
+#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
+#define SDMA_DESC0_BYTE_COUNT_MASK \
+ ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
+#define SDMA_DESC0_BYTE_COUNT_SMASK \
+ (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
+#define SDMA_DESC0_PHY_ADDR_SHIFT 0
+#define SDMA_DESC0_PHY_ADDR_WIDTH 48
+#define SDMA_DESC0_PHY_ADDR_MASK \
+ ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
+#define SDMA_DESC0_PHY_ADDR_SMASK \
+ (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
+
+#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
+#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
+#define SDMA_DESC1_HEADER_UPDATE1_MASK \
+ ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
+#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
+ (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
+#define SDMA_DESC1_HEADER_MODE_SHIFT 13
+#define SDMA_DESC1_HEADER_MODE_WIDTH 3
+#define SDMA_DESC1_HEADER_MODE_MASK \
+ ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
+#define SDMA_DESC1_HEADER_MODE_SMASK \
+ (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
+#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
+#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
+#define SDMA_DESC1_HEADER_INDEX_MASK \
+ ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
+#define SDMA_DESC1_HEADER_INDEX_SMASK \
+ (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
+#define SDMA_DESC1_HEADER_DWS_SHIFT 4
+#define SDMA_DESC1_HEADER_DWS_WIDTH 4
+#define SDMA_DESC1_HEADER_DWS_MASK \
+ ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
+#define SDMA_DESC1_HEADER_DWS_SMASK \
+ (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
+#define SDMA_DESC1_GENERATION_SHIFT 2
+#define SDMA_DESC1_GENERATION_WIDTH 2
+#define SDMA_DESC1_GENERATION_MASK \
+ ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
+#define SDMA_DESC1_GENERATION_SMASK \
+ (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
+#define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
+#define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
+
+enum sdma_states {
+ sdma_state_s00_hw_down,
+ sdma_state_s10_hw_start_up_halt_wait,
+ sdma_state_s15_hw_start_up_clean_wait,
+ sdma_state_s20_idle,
+ sdma_state_s30_sw_clean_up_wait,
+ sdma_state_s40_hw_clean_up_wait,
+ sdma_state_s50_hw_halt_wait,
+ sdma_state_s60_idle_halt_wait,
+ sdma_state_s80_hw_freeze,
+ sdma_state_s82_freeze_sw_clean,
+ sdma_state_s99_running,
+};
+
+enum sdma_events {
+ sdma_event_e00_go_hw_down,
+ sdma_event_e10_go_hw_start,
+ sdma_event_e15_hw_halt_done,
+ sdma_event_e25_hw_clean_up_done,
+ sdma_event_e30_go_running,
+ sdma_event_e40_sw_cleaned,
+ sdma_event_e50_hw_cleaned,
+ sdma_event_e60_hw_halted,
+ sdma_event_e70_go_idle,
+ sdma_event_e80_hw_freeze,
+ sdma_event_e81_hw_frozen,
+ sdma_event_e82_hw_unfreeze,
+ sdma_event_e85_link_down,
+ sdma_event_e90_sw_halted,
+};
+
+struct sdma_set_state_action {
+ unsigned op_enable:1;
+ unsigned op_intenable:1;
+ unsigned op_halt:1;
+ unsigned op_cleanup:1;
+ unsigned go_s99_running_tofalse:1;
+ unsigned go_s99_running_totrue:1;
+};
+
+struct sdma_state {
+ struct kref kref;
+ struct completion comp;
+ enum sdma_states current_state;
+ unsigned current_op;
+ unsigned go_s99_running;
+ /* debugging/development */
+ enum sdma_states previous_state;
+ unsigned previous_op;
+ enum sdma_events last_event;
+};
+
+/**
+ * DOC: sdma exported routines
+ *
+ * These sdma routines fit into three categories:
+ * - The SDMA API for building and submitting packets
+ * to the ring
+ *
+ * - Initialization and tear down routines to buildup
+ * and tear down SDMA
+ *
+ * - ISR entrances to handle interrupts, state changes
+ * and errors
+ */
+
+/**
+ * DOC: sdma PSM/verbs API
+ *
+ * The sdma API is designed to be used by both PSM
+ * and verbs to supply packets to the SDMA ring.
+ *
+ * The usage of the API is as follows:
+ *
+ * Embed a struct iowait in the QP or
+ * PQ. The iowait should be initialized with a
+ * call to iowait_init().
+ *
+ * The user of the API should create an allocation method
+ * for their version of the txreq. slabs, pre-allocated lists,
+ * and dma pools can be used. Once the user's overload of
+ * the sdma_txreq has been allocated, the sdma_txreq member
+ * must be initialized with sdma_txinit() or sdma_txinit_ahg().
+ *
+ * The txreq must be declared with the sdma_txreq first.
+ *
+ * The tx request, once initialized, is manipulated with calls to
+ * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
+ * for each disjoint memory location. It is the user's responsibility
+ * to understand the packet boundaries and page boundaries to do the
+ * appropriate number of sdma_txadd_* calls.. The user
+ * must be prepared to deal with failures from these routines due to
+ * either memory allocation or dma_mapping failures.
+ *
+ * The mapping specifics for each memory location are recorded
+ * in the tx. Memory locations added with sdma_txadd_page()
+ * and sdma_txadd_kvaddr() are automatically mapped when added
+ * to the tx and nmapped as part of the progress processing in the
+ * SDMA interrupt handling.
+ *
+ * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
+ * tx. An example of a use case would be a pre-allocated
+ * set of headers allocated via dma_pool_alloc() or
+ * dma_alloc_coherent(). For these memory locations, it
+ * is the responsibility of the user to handle that unmapping.
+ * (This would usually be at an unload or job termination.)
+ *
+ * The routine sdma_send_txreq() is used to submit
+ * a tx to the ring after the appropriate number of
+ * sdma_txadd_* have been done.
+ *
+ * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
+ * can be used to submit a list of packets.
+ *
+ * The user is free to use the link overhead in the struct sdma_txreq as
+ * long as the tx isn't in flight.
+ *
+ * The extreme degenerate case of the number of descriptors
+ * exceeding the ring size is automatically handled as
+ * memory locations are added. An overflow of the descriptor
+ * array that is part of the sdma_txreq is also automatically
+ * handled.
+ *
+ */
+
+/**
+ * DOC: Infrastructure calls
+ *
+ * sdma_init() is used to initialize data structures and
+ * CSRs for the desired number of SDMA engines.
+ *
+ * sdma_start() is used to kick the SDMA engines initialized
+ * with sdma_init(). Interrupts must be enabled at this
+ * point since aspects of the state machine are interrupt
+ * driven.
+ *
+ * sdma_engine_error() and sdma_engine_interrupt() are
+ * entrances for interrupts.
+ *
+ * sdma_map_init() is for the management of the mapping
+ * table when the number of vls is changed.
+ *
+ */
+
+/*
+ * struct hw_sdma_desc - raw 128 bit SDMA descriptor
+ *
+ * This is the raw descriptor in the SDMA ring
+ */
+struct hw_sdma_desc {
+ /* private: don't use directly */
+ __le64 qw[2];
+};
+
+/**
+ * struct sdma_engine - Data pertaining to each SDMA engine.
+ * @dd: a back-pointer to the device data
+ * @ppd: per port back-pointer
+ * @imask: mask for irq manipulation
+ * @idle_mask: mask for determining if an interrupt is due to sdma_idle
+ *
+ * This structure has the state for each sdma_engine.
+ *
+ * Accessing to non public fields are not supported
+ * since the private members are subject to change.
+ */
+struct sdma_engine {
+ /* read mostly */
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ /* private: */
+ void __iomem *tail_csr;
+ u64 imask; /* clear interrupt mask */
+ u64 idle_mask;
+ u64 progress_mask;
+ u64 int_mask;
+ /* private: */
+ volatile __le64 *head_dma; /* DMA'ed by chip */
+ /* private: */
+ dma_addr_t head_phys;
+ /* private: */
+ struct hw_sdma_desc *descq;
+ /* private: */
+ unsigned descq_full_count;
+ struct sdma_txreq **tx_ring;
+ /* private: */
+ dma_addr_t descq_phys;
+ /* private */
+ u32 sdma_mask;
+ /* private */
+ struct sdma_state state;
+ /* private */
+ int cpu;
+ /* private: */
+ u8 sdma_shift;
+ /* private: */
+ u8 this_idx; /* zero relative engine */
+ /* protect changes to senddmactrl shadow */
+ spinlock_t senddmactrl_lock;
+ /* private: */
+ u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
+
+ /* read/write using tail_lock */
+ spinlock_t tail_lock ____cacheline_aligned_in_smp;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ /* private: */
+ u64 tail_sn;
+#endif
+ /* private: */
+ u32 descq_tail;
+ /* private: */
+ unsigned long ahg_bits;
+ /* private: */
+ u16 desc_avail;
+ /* private: */
+ u16 tx_tail;
+ /* private: */
+ u16 descq_cnt;
+
+ /* read/write using head_lock */
+ /* private: */
+ seqlock_t head_lock ____cacheline_aligned_in_smp;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ /* private: */
+ u64 head_sn;
+#endif
+ /* private: */
+ u32 descq_head;
+ /* private: */
+ u16 tx_head;
+ /* private: */
+ u64 last_status;
+ /* private */
+ u64 err_cnt;
+ /* private */
+ u64 sdma_int_cnt;
+ u64 idle_int_cnt;
+ u64 progress_int_cnt;
+
+ /* private: */
+ seqlock_t waitlock;
+ struct list_head dmawait;
+
+ /* CONFIG SDMA for now, just blindly duplicate */
+ /* private: */
+ struct tasklet_struct sdma_hw_clean_up_task
+ ____cacheline_aligned_in_smp;
+
+ /* private: */
+ struct tasklet_struct sdma_sw_clean_up_task
+ ____cacheline_aligned_in_smp;
+ /* private: */
+ struct work_struct err_halt_worker;
+ /* private */
+ struct timer_list err_progress_check_timer;
+ u32 progress_check_head;
+ /* private: */
+ struct work_struct flush_worker;
+ /* protect flush list */
+ spinlock_t flushlist_lock;
+ /* private: */
+ struct list_head flushlist;
+ struct cpumask cpu_mask;
+ struct kobject kobj;
+ u32 msix_intr;
+};
+
+int sdma_init(struct hfi1_devdata *dd, u8 port);
+void sdma_start(struct hfi1_devdata *dd);
+void sdma_exit(struct hfi1_devdata *dd);
+void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
+void sdma_all_running(struct hfi1_devdata *dd);
+void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
+void sdma_freeze(struct hfi1_devdata *dd);
+void sdma_unfreeze(struct hfi1_devdata *dd);
+void sdma_wait(struct hfi1_devdata *dd);
+
+/**
+ * sdma_empty() - idle engine test
+ * @engine: sdma engine
+ *
+ * Currently used by verbs as a latency optimization.
+ *
+ * Return:
+ * 1 - empty, 0 - non-empty
+ */
+static inline int sdma_empty(struct sdma_engine *sde)
+{
+ return sde->descq_tail == sde->descq_head;
+}
+
+static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
+{
+ return sde->descq_cnt -
+ (sde->descq_tail -
+ READ_ONCE(sde->descq_head)) - 1;
+}
+
+static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
+{
+ return sde->descq_cnt - sdma_descq_freecnt(sde);
+}
+
+/*
+ * Either head_lock or tail lock required to see
+ * a steady state.
+ */
+static inline int __sdma_running(struct sdma_engine *engine)
+{
+ return engine->state.current_state == sdma_state_s99_running;
+}
+
+/**
+ * sdma_running() - state suitability test
+ * @engine: sdma engine
+ *
+ * sdma_running probes the internal state to determine if it is suitable
+ * for submitting packets.
+ *
+ * Return:
+ * 1 - ok to submit, 0 - not ok to submit
+ *
+ */
+static inline int sdma_running(struct sdma_engine *engine)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&engine->tail_lock, flags);
+ ret = __sdma_running(engine);
+ spin_unlock_irqrestore(&engine->tail_lock, flags);
+ return ret;
+}
+
+void _sdma_txreq_ahgadd(
+ struct sdma_txreq *tx,
+ u8 num_ahg,
+ u8 ahg_entry,
+ u32 *ahg,
+ u8 ahg_hlen);
+
+/**
+ * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
+ * @tx: tx request to initialize
+ * @flags: flags to key last descriptor additions
+ * @tlen: total packet length (pbc + headers + data)
+ * @ahg_entry: ahg entry to use (0 - 31)
+ * @num_ahg: ahg descriptor for first descriptor (0 - 9)
+ * @ahg: array of AHG descriptors (up to 9 entries)
+ * @ahg_hlen: number of bytes from ASIC entry to use
+ * @cb: callback
+ *
+ * The allocation of the sdma_txreq and it enclosing structure is user
+ * dependent. This routine must be called to initialize the user independent
+ * fields.
+ *
+ * The currently supported flags are SDMA_TXREQ_F_URGENT,
+ * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
+ *
+ * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
+ * completion is desired as soon as possible.
+ *
+ * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
+ * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
+ * the AHG descriptors into the first 1 to 3 descriptors.
+ *
+ * Completions of submitted requests can be gotten on selected
+ * txreqs by giving a completion routine callback to sdma_txinit() or
+ * sdma_txinit_ahg(). The environment in which the callback runs
+ * can be from an ISR, a tasklet, or a thread, so no sleeping
+ * kernel routines can be used. Aspects of the sdma ring may
+ * be locked so care should be taken with locking.
+ *
+ * The callback pointer can be NULL to avoid any callback for the packet
+ * being submitted. The callback will be provided this tx, a status, and a flag.
+ *
+ * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
+ * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
+ *
+ * The flag, if the is the iowait had been used, indicates the iowait
+ * sdma_busy count has reached zero.
+ *
+ * user data portion of tlen should be precise. The sdma_txadd_* entrances
+ * will pad with a descriptor references 1 - 3 bytes when the number of bytes
+ * specified in tlen have been supplied to the sdma_txreq.
+ *
+ * ahg_hlen is used to determine the number of on-chip entry bytes to
+ * use as the header. This is for cases where the stored header is
+ * larger than the header to be used in a packet. This is typical
+ * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
+ * and RDMA_WRITE_MIDDLE.
+ *
+ */
+static inline int sdma_txinit_ahg(
+ struct sdma_txreq *tx,
+ u16 flags,
+ u16 tlen,
+ u8 ahg_entry,
+ u8 num_ahg,
+ u32 *ahg,
+ u8 ahg_hlen,
+ void (*cb)(struct sdma_txreq *, int))
+{
+ if (tlen == 0)
+ return -ENODATA;
+ if (tlen > MAX_SDMA_PKT_SIZE)
+ return -EMSGSIZE;
+ tx->desc_limit = ARRAY_SIZE(tx->descs);
+ tx->descp = &tx->descs[0];
+ INIT_LIST_HEAD(&tx->list);
+ tx->num_desc = 0;
+ tx->flags = flags;
+ tx->complete = cb;
+ tx->coalesce_buf = NULL;
+ tx->wait = NULL;
+ tx->packet_len = tlen;
+ tx->tlen = tx->packet_len;
+ tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
+ tx->descs[0].qw[1] = 0;
+ if (flags & SDMA_TXREQ_F_AHG_COPY)
+ tx->descs[0].qw[1] |=
+ (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
+ << SDMA_DESC1_HEADER_INDEX_SHIFT) |
+ (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
+ << SDMA_DESC1_HEADER_MODE_SHIFT);
+ else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
+ _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
+ return 0;
+}
+
+/**
+ * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
+ * @tx: tx request to initialize
+ * @flags: flags to key last descriptor additions
+ * @tlen: total packet length (pbc + headers + data)
+ * @cb: callback pointer
+ *
+ * The allocation of the sdma_txreq and it enclosing structure is user
+ * dependent. This routine must be called to initialize the user
+ * independent fields.
+ *
+ * The currently supported flags is SDMA_TXREQ_F_URGENT.
+ *
+ * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
+ * completion is desired as soon as possible.
+ *
+ * Completions of submitted requests can be gotten on selected
+ * txreqs by giving a completion routine callback to sdma_txinit() or
+ * sdma_txinit_ahg(). The environment in which the callback runs
+ * can be from an ISR, a tasklet, or a thread, so no sleeping
+ * kernel routines can be used. The head size of the sdma ring may
+ * be locked so care should be taken with locking.
+ *
+ * The callback pointer can be NULL to avoid any callback for the packet
+ * being submitted.
+ *
+ * The callback, if non-NULL, will be provided this tx and a status. The
+ * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
+ * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
+ *
+ */
+static inline int sdma_txinit(
+ struct sdma_txreq *tx,
+ u16 flags,
+ u16 tlen,
+ void (*cb)(struct sdma_txreq *, int))
+{
+ return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
+}
+
+/* helpers - don't use */
+static inline int sdma_mapping_type(struct sdma_desc *d)
+{
+ return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
+ >> SDMA_DESC1_GENERATION_SHIFT;
+}
+
+static inline size_t sdma_mapping_len(struct sdma_desc *d)
+{
+ return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
+ >> SDMA_DESC0_BYTE_COUNT_SHIFT;
+}
+
+static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
+{
+ return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
+ >> SDMA_DESC0_PHY_ADDR_SHIFT;
+}
+
+static inline void make_tx_sdma_desc(
+ struct sdma_txreq *tx,
+ int type,
+ dma_addr_t addr,
+ size_t len,
+ void *pinning_ctx,
+ void (*ctx_get)(void *),
+ void (*ctx_put)(void *))
+{
+ struct sdma_desc *desc = &tx->descp[tx->num_desc];
+
+ if (!tx->num_desc) {
+ /* qw[0] zero; qw[1] first, ahg mode already in from init */
+ desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
+ << SDMA_DESC1_GENERATION_SHIFT;
+ } else {
+ desc->qw[0] = 0;
+ desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
+ << SDMA_DESC1_GENERATION_SHIFT;
+ }
+ desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
+ << SDMA_DESC0_PHY_ADDR_SHIFT) |
+ (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
+ << SDMA_DESC0_BYTE_COUNT_SHIFT);
+
+ desc->pinning_ctx = pinning_ctx;
+ desc->ctx_put = ctx_put;
+ if (pinning_ctx && ctx_get)
+ ctx_get(pinning_ctx);
+}
+
+/* helper to extend txreq */
+int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
+ int type, void *kvaddr, struct page *page,
+ unsigned long offset, u16 len);
+int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
+void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+
+static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+ if (tx->num_desc)
+ __sdma_txclean(dd, tx);
+}
+
+/* helpers used by public routines */
+static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ struct sdma_txreq *tx)
+{
+ u16 last_desc = tx->num_desc - 1;
+
+ tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
+ tx->descp[last_desc].qw[1] |= dd->default_desc1;
+ if (tx->flags & SDMA_TXREQ_F_URGENT)
+ tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
+ SDMA_DESC1_INT_REQ_FLAG);
+}
+
+static inline int _sdma_txadd_daddr(
+ struct hfi1_devdata *dd,
+ int type,
+ struct sdma_txreq *tx,
+ dma_addr_t addr,
+ u16 len,
+ void *pinning_ctx,
+ void (*ctx_get)(void *),
+ void (*ctx_put)(void *))
+{
+ int rval = 0;
+
+ make_tx_sdma_desc(
+ tx,
+ type,
+ addr, len,
+ pinning_ctx, ctx_get, ctx_put);
+ WARN_ON(len > tx->tlen);
+ tx->num_desc++;
+ tx->tlen -= len;
+ /* special cases for last */
+ if (!tx->tlen) {
+ if (tx->packet_len & (sizeof(u32) - 1)) {
+ rval = _pad_sdma_tx_descs(dd, tx);
+ if (rval)
+ return rval;
+ } else {
+ _sdma_close_tx(dd, tx);
+ }
+ }
+ return rval;
+}
+
+/**
+ * sdma_txadd_page() - add a page to the sdma_txreq
+ * @dd: the device to use for mapping
+ * @tx: tx request to which the page is added
+ * @page: page to map
+ * @offset: offset within the page
+ * @len: length in bytes
+ * @pinning_ctx: context to be stored on struct sdma_desc .pinning_ctx. Not
+ * added if coalesce buffer is used. E.g. pointer to pinned-page
+ * cache entry for the sdma_desc.
+ * @ctx_get: optional function to take reference to @pinning_ctx. Not called if
+ * @pinning_ctx is NULL.
+ * @ctx_put: optional function to release reference to @pinning_ctx after
+ * sdma_desc completes. May be called in interrupt context so must
+ * not sleep. Not called if @pinning_ctx is NULL.
+ *
+ * This is used to add a page/offset/length descriptor.
+ *
+ * The mapping/unmapping of the page/offset/len is automatically handled.
+ *
+ * Return:
+ * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
+ * extend/coalesce descriptor array
+ */
+static inline int sdma_txadd_page(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx,
+ struct page *page,
+ unsigned long offset,
+ u16 len,
+ void *pinning_ctx,
+ void (*ctx_get)(void *),
+ void (*ctx_put)(void *))
+{
+ dma_addr_t addr;
+ int rval;
+
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
+ NULL, page, offset, len);
+ if (rval <= 0)
+ return rval;
+ }
+
+ addr = dma_map_page(
+ &dd->pcidev->dev,
+ page,
+ offset,
+ len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
+ __sdma_txclean(dd, tx);
+ return -ENOSPC;
+ }
+
+ return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, tx, addr, len,
+ pinning_ctx, ctx_get, ctx_put);
+}
+
+/**
+ * sdma_txadd_daddr() - add a dma address to the sdma_txreq
+ * @dd: the device to use for mapping
+ * @tx: sdma_txreq to which the page is added
+ * @addr: dma address mapped by caller
+ * @len: length in bytes
+ *
+ * This is used to add a descriptor for memory that is already dma mapped.
+ *
+ * In this case, there is no unmapping as part of the progress processing for
+ * this memory location.
+ *
+ * Return:
+ * 0 - success, -ENOMEM - couldn't extend descriptor array
+ */
+
+static inline int sdma_txadd_daddr(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx,
+ dma_addr_t addr,
+ u16 len)
+{
+ int rval;
+
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
+ NULL, NULL, 0, 0);
+ if (rval <= 0)
+ return rval;
+ }
+
+ return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len,
+ NULL, NULL, NULL);
+}
+
+/**
+ * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
+ * @dd: the device to use for mapping
+ * @tx: sdma_txreq to which the page is added
+ * @kvaddr: the kernel virtual address
+ * @len: length in bytes
+ *
+ * This is used to add a descriptor referenced by the indicated kvaddr and
+ * len.
+ *
+ * The mapping/unmapping of the kvaddr and len is automatically handled.
+ *
+ * Return:
+ * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
+ * descriptor array
+ */
+static inline int sdma_txadd_kvaddr(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx,
+ void *kvaddr,
+ u16 len)
+{
+ dma_addr_t addr;
+ int rval;
+
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
+ kvaddr, NULL, 0, len);
+ if (rval <= 0)
+ return rval;
+ }
+
+ addr = dma_map_single(
+ &dd->pcidev->dev,
+ kvaddr,
+ len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
+ __sdma_txclean(dd, tx);
+ return -ENOSPC;
+ }
+
+ return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, len,
+ NULL, NULL, NULL);
+}
+
+struct iowait_work;
+
+int sdma_send_txreq(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ bool pkts_sent);
+int sdma_send_txlist(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct list_head *tx_list,
+ u16 *count_out);
+
+int sdma_ahg_alloc(struct sdma_engine *sde);
+void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
+
+/**
+ * sdma_build_ahg - build ahg descriptor
+ * @data
+ * @dwindex
+ * @startbit
+ * @bits
+ *
+ * Build and return a 32 bit descriptor.
+ */
+static inline u32 sdma_build_ahg_descriptor(
+ u16 data,
+ u8 dwindex,
+ u8 startbit,
+ u8 bits)
+{
+ return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
+ ((startbit & SDMA_AHG_FIELD_START_MASK) <<
+ SDMA_AHG_FIELD_START_SHIFT) |
+ ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
+ SDMA_AHG_FIELD_LEN_SHIFT) |
+ ((dwindex & SDMA_AHG_INDEX_MASK) <<
+ SDMA_AHG_INDEX_SHIFT) |
+ ((data & SDMA_AHG_VALUE_MASK) <<
+ SDMA_AHG_VALUE_SHIFT));
+}
+
+/**
+ * sdma_progress - use seq number of detect head progress
+ * @sde: sdma_engine to check
+ * @seq: base seq count
+ * @tx: txreq for which we need to check descriptor availability
+ *
+ * This is used in the appropriate spot in the sleep routine
+ * to check for potential ring progress. This routine gets the
+ * seqcount before queuing the iowait structure for progress.
+ *
+ * If the seqcount indicates that progress needs to be checked,
+ * re-submission is detected by checking whether the descriptor
+ * queue has enough descriptor for the txreq.
+ */
+static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
+ struct sdma_txreq *tx)
+{
+ if (read_seqretry(&sde->head_lock, seq)) {
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ if (tx->num_desc > sde->desc_avail)
+ return 0;
+ return 1;
+ }
+ return 0;
+}
+
+/* for use by interrupt handling */
+void sdma_engine_error(struct sdma_engine *sde, u64 status);
+void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
+
+/*
+ *
+ * The diagram below details the relationship of the mapping structures
+ *
+ * Since the mapping now allows for non-uniform engines per vl, the
+ * number of engines for a vl is either the vl_engines[vl] or
+ * a computation based on num_sdma/num_vls:
+ *
+ * For example:
+ * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
+ *
+ * n = roundup to next highest power of 2 using nactual
+ *
+ * In the case where there are num_sdma/num_vls doesn't divide
+ * evenly, the extras are added from the last vl downward.
+ *
+ * For the case where n > nactual, the engines are assigned
+ * in a round robin fashion wrapping back to the first engine
+ * for a particular vl.
+ *
+ * dd->sdma_map
+ * | sdma_map_elem[0]
+ * | +--------------------+
+ * v | mask |
+ * sdma_vl_map |--------------------|
+ * +--------------------------+ | sde[0] -> eng 1 |
+ * | list (RCU) | |--------------------|
+ * |--------------------------| ->| sde[1] -> eng 2 |
+ * | mask | --/ |--------------------|
+ * |--------------------------| -/ | * |
+ * | actual_vls (max 8) | -/ |--------------------|
+ * |--------------------------| --/ | sde[n-1] -> eng n |
+ * | vls (max 8) | -/ +--------------------+
+ * |--------------------------| --/
+ * | map[0] |-/
+ * |--------------------------| +---------------------+
+ * | map[1] |--- | mask |
+ * |--------------------------| \---- |---------------------|
+ * | * | \-- | sde[0] -> eng 1+n |
+ * | * | \---- |---------------------|
+ * | * | \->| sde[1] -> eng 2+n |
+ * |--------------------------| |---------------------|
+ * | map[vls - 1] |- | * |
+ * +--------------------------+ \- |---------------------|
+ * \- | sde[m-1] -> eng m+n |
+ * \ +---------------------+
+ * \-
+ * \
+ * \- +----------------------+
+ * \- | mask |
+ * \ |----------------------|
+ * \- | sde[0] -> eng 1+m+n |
+ * \- |----------------------|
+ * >| sde[1] -> eng 2+m+n |
+ * |----------------------|
+ * | * |
+ * |----------------------|
+ * | sde[o-1] -> eng o+m+n|
+ * +----------------------+
+ *
+ */
+
+/**
+ * struct sdma_map_elem - mapping for a vl
+ * @mask - selector mask
+ * @sde - array of engines for this vl
+ *
+ * The mask is used to "mod" the selector
+ * to produce index into the trailing
+ * array of sdes.
+ */
+struct sdma_map_elem {
+ u32 mask;
+ struct sdma_engine *sde[];
+};
+
+/**
+ * struct sdma_map_el - mapping for a vl
+ * @engine_to_vl - map of an engine to a vl
+ * @list - rcu head for free callback
+ * @mask - vl mask to "mod" the vl to produce an index to map array
+ * @actual_vls - number of vls
+ * @vls - number of vls rounded to next power of 2
+ * @map - array of sdma_map_elem entries
+ *
+ * This is the parent mapping structure. The trailing
+ * members of the struct point to sdma_map_elem entries, which
+ * in turn point to an array of sde's for that vl.
+ */
+struct sdma_vl_map {
+ s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
+ struct rcu_head list;
+ u32 mask;
+ u8 actual_vls;
+ u8 vls;
+ struct sdma_map_elem *map[];
+};
+
+int sdma_map_init(
+ struct hfi1_devdata *dd,
+ u8 port,
+ u8 num_vls,
+ u8 *vl_engines);
+
+/* slow path */
+void _sdma_engine_progress_schedule(struct sdma_engine *sde);
+
+/**
+ * sdma_engine_progress_schedule() - schedule progress on engine
+ * @sde: sdma_engine to schedule progress
+ *
+ * This is the fast path.
+ *
+ */
+static inline void sdma_engine_progress_schedule(
+ struct sdma_engine *sde)
+{
+ if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
+ return;
+ _sdma_engine_progress_schedule(sde);
+}
+
+struct sdma_engine *sdma_select_engine_sc(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 sc5);
+
+struct sdma_engine *sdma_select_engine_vl(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 vl);
+
+struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
+ u32 selector, u8 vl);
+ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
+ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
+ size_t count);
+int sdma_engine_get_vl(struct sdma_engine *sde);
+void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
+void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
+ unsigned long cpuid);
+
+#ifdef CONFIG_SDMA_VERBOSITY
+void sdma_dumpstate(struct sdma_engine *);
+#endif
+static inline char *slashstrip(char *s)
+{
+ char *r = s;
+
+ while (*s)
+ if (*s++ == '/')
+ r = s;
+ return r;
+}
+
+u16 sdma_get_descq_cnt(void);
+
+extern uint mod_num_sdma;
+
+void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
+#endif
diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
new file mode 100644
index 000000000000..5782166d984c
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef HFI1_SDMA_TXREQ_H
+#define HFI1_SDMA_TXREQ_H
+
+/* increased for AHG */
+#define NUM_DESC 6
+
+/*
+ * struct sdma_desc - canonical fragment descriptor
+ *
+ * This is the descriptor carried in the tx request
+ * corresponding to each fragment.
+ *
+ */
+struct sdma_desc {
+ /* private: don't use directly */
+ u64 qw[2];
+ void *pinning_ctx;
+ /* Release reference to @pinning_ctx. May be called in interrupt context. Must not sleep. */
+ void (*ctx_put)(void *ctx);
+};
+
+/**
+ * struct sdma_txreq - the sdma_txreq structure (one per packet)
+ * @list: for use by user and by queuing for wait
+ *
+ * This is the representation of a packet which consists of some
+ * number of fragments. Storage is provided to within the structure.
+ * for all fragments.
+ *
+ * The storage for the descriptors are automatically extended as needed
+ * when the currently allocation is exceeded.
+ *
+ * The user (Verbs or PSM) may overload this structure with fields
+ * specific to their use by putting this struct first in their struct.
+ * The method of allocation of the overloaded structure is user dependent
+ *
+ * The list is the only public field in the structure.
+ *
+ */
+
+#define SDMA_TXREQ_S_OK 0
+#define SDMA_TXREQ_S_SENDERROR 1
+#define SDMA_TXREQ_S_ABORTED 2
+#define SDMA_TXREQ_S_SHUTDOWN 3
+
+/* flags bits */
+#define SDMA_TXREQ_F_URGENT 0x0001
+#define SDMA_TXREQ_F_AHG_COPY 0x0002
+#define SDMA_TXREQ_F_USE_AHG 0x0004
+#define SDMA_TXREQ_F_VIP 0x0010
+
+struct sdma_txreq;
+typedef void (*callback_t)(struct sdma_txreq *, int);
+
+struct iowait;
+struct sdma_txreq {
+ struct list_head list;
+ /* private: */
+ struct sdma_desc *descp;
+ /* private: */
+ void *coalesce_buf;
+ /* private: */
+ struct iowait *wait;
+ /* private: */
+ callback_t complete;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ u64 sn;
+#endif
+ /* private: - used in coalesce/pad processing */
+ u16 packet_len;
+ /* private: - down-counted to trigger last */
+ u16 tlen;
+ /* private: */
+ u16 num_desc;
+ /* private: */
+ u16 desc_limit;
+ /* private: */
+ u16 next_descq_idx;
+ /* private: */
+ u16 coalesce_idx;
+ /* private: flags */
+ u16 flags;
+ /* private: */
+ struct sdma_desc descs[NUM_DESC];
+};
+
+static inline int sdma_txreq_built(struct sdma_txreq *tx)
+{
+ return tx->num_desc;
+}
+
+#endif /* HFI1_SDMA_TXREQ_H */
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
new file mode 100644
index 000000000000..372cfd13dc61
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015-2017 Intel Corporation.
+ */
+
+#include <linux/ctype.h>
+#include <rdma/ib_sysfs.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "trace.h"
+
+static struct hfi1_pportdata *hfi1_get_pportdata_kobj(struct kobject *kobj)
+{
+ u32 port_num;
+ struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ return &dd->pport[port_num - 1];
+}
+
+/*
+ * Start of per-port congestion control structures and support code
+ */
+
+/*
+ * Congestion control table size followed by table entries
+ */
+static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ int ret;
+ struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
+ struct cc_state *cc_state;
+
+ ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
+ + sizeof(__be16);
+
+ if (pos > ret)
+ return -EINVAL;
+
+ if (count > ret - pos)
+ count = ret - pos;
+
+ if (!count)
+ return count;
+
+ rcu_read_lock();
+ cc_state = get_cc_state(ppd);
+ if (!cc_state) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ memcpy(buf, (void *)&cc_state->cct + pos, count);
+ rcu_read_unlock();
+
+ return count;
+}
+static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
+
+/*
+ * Congestion settings: port control, control map and an array of 16
+ * entries for the congestion entries - increase, timer, event log
+ * trigger threshold and the minimum injection rate delay.
+ */
+static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
+ int ret;
+ struct cc_state *cc_state;
+
+ ret = sizeof(struct opa_congestion_setting_attr_shadow);
+
+ if (pos > ret)
+ return -EINVAL;
+ if (count > ret - pos)
+ count = ret - pos;
+
+ if (!count)
+ return count;
+
+ rcu_read_lock();
+ cc_state = get_cc_state(ppd);
+ if (!cc_state) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ memcpy(buf, (void *)&cc_state->cong_setting + pos, count);
+ rcu_read_unlock();
+
+ return count;
+}
+static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
+
+static const struct bin_attribute *const port_cc_bin_attributes[] = {
+ &bin_attr_cc_setting_bin,
+ &bin_attr_cc_table_bin,
+ NULL
+};
+
+static ssize_t cc_prescan_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
+
+ return sysfs_emit(buf, "%s\n", ppd->cc_prescan ? "on" : "off");
+}
+
+static ssize_t cc_prescan_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
+
+ if (!memcmp(buf, "on", 2))
+ ppd->cc_prescan = true;
+ else if (!memcmp(buf, "off", 3))
+ ppd->cc_prescan = false;
+
+ return count;
+}
+static IB_PORT_ATTR_ADMIN_RW(cc_prescan);
+
+static struct attribute *port_cc_attributes[] = {
+ &ib_port_attr_cc_prescan.attr,
+ NULL
+};
+
+static const struct attribute_group port_cc_group = {
+ .name = "CCMgtA",
+ .attrs = port_cc_attributes,
+ .bin_attrs = port_cc_bin_attributes,
+};
+
+/* Start sc2vl */
+struct hfi1_sc2vl_attr {
+ struct ib_port_attribute attr;
+ int sc;
+};
+
+static ssize_t sc2vl_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hfi1_sc2vl_attr *sattr =
+ container_of(attr, struct hfi1_sc2vl_attr, attr);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ return sysfs_emit(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
+}
+
+#define HFI1_SC2VL_ATTR(N) \
+ static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \
+ .attr = __ATTR(N, 0444, sc2vl_attr_show, NULL), \
+ .sc = N, \
+ }
+
+HFI1_SC2VL_ATTR(0);
+HFI1_SC2VL_ATTR(1);
+HFI1_SC2VL_ATTR(2);
+HFI1_SC2VL_ATTR(3);
+HFI1_SC2VL_ATTR(4);
+HFI1_SC2VL_ATTR(5);
+HFI1_SC2VL_ATTR(6);
+HFI1_SC2VL_ATTR(7);
+HFI1_SC2VL_ATTR(8);
+HFI1_SC2VL_ATTR(9);
+HFI1_SC2VL_ATTR(10);
+HFI1_SC2VL_ATTR(11);
+HFI1_SC2VL_ATTR(12);
+HFI1_SC2VL_ATTR(13);
+HFI1_SC2VL_ATTR(14);
+HFI1_SC2VL_ATTR(15);
+HFI1_SC2VL_ATTR(16);
+HFI1_SC2VL_ATTR(17);
+HFI1_SC2VL_ATTR(18);
+HFI1_SC2VL_ATTR(19);
+HFI1_SC2VL_ATTR(20);
+HFI1_SC2VL_ATTR(21);
+HFI1_SC2VL_ATTR(22);
+HFI1_SC2VL_ATTR(23);
+HFI1_SC2VL_ATTR(24);
+HFI1_SC2VL_ATTR(25);
+HFI1_SC2VL_ATTR(26);
+HFI1_SC2VL_ATTR(27);
+HFI1_SC2VL_ATTR(28);
+HFI1_SC2VL_ATTR(29);
+HFI1_SC2VL_ATTR(30);
+HFI1_SC2VL_ATTR(31);
+
+static struct attribute *port_sc2vl_attributes[] = {
+ &hfi1_sc2vl_attr_0.attr.attr,
+ &hfi1_sc2vl_attr_1.attr.attr,
+ &hfi1_sc2vl_attr_2.attr.attr,
+ &hfi1_sc2vl_attr_3.attr.attr,
+ &hfi1_sc2vl_attr_4.attr.attr,
+ &hfi1_sc2vl_attr_5.attr.attr,
+ &hfi1_sc2vl_attr_6.attr.attr,
+ &hfi1_sc2vl_attr_7.attr.attr,
+ &hfi1_sc2vl_attr_8.attr.attr,
+ &hfi1_sc2vl_attr_9.attr.attr,
+ &hfi1_sc2vl_attr_10.attr.attr,
+ &hfi1_sc2vl_attr_11.attr.attr,
+ &hfi1_sc2vl_attr_12.attr.attr,
+ &hfi1_sc2vl_attr_13.attr.attr,
+ &hfi1_sc2vl_attr_14.attr.attr,
+ &hfi1_sc2vl_attr_15.attr.attr,
+ &hfi1_sc2vl_attr_16.attr.attr,
+ &hfi1_sc2vl_attr_17.attr.attr,
+ &hfi1_sc2vl_attr_18.attr.attr,
+ &hfi1_sc2vl_attr_19.attr.attr,
+ &hfi1_sc2vl_attr_20.attr.attr,
+ &hfi1_sc2vl_attr_21.attr.attr,
+ &hfi1_sc2vl_attr_22.attr.attr,
+ &hfi1_sc2vl_attr_23.attr.attr,
+ &hfi1_sc2vl_attr_24.attr.attr,
+ &hfi1_sc2vl_attr_25.attr.attr,
+ &hfi1_sc2vl_attr_26.attr.attr,
+ &hfi1_sc2vl_attr_27.attr.attr,
+ &hfi1_sc2vl_attr_28.attr.attr,
+ &hfi1_sc2vl_attr_29.attr.attr,
+ &hfi1_sc2vl_attr_30.attr.attr,
+ &hfi1_sc2vl_attr_31.attr.attr,
+ NULL
+};
+
+static const struct attribute_group port_sc2vl_group = {
+ .name = "sc2vl",
+ .attrs = port_sc2vl_attributes,
+};
+/* End sc2vl */
+
+/* Start sl2sc */
+struct hfi1_sl2sc_attr {
+ struct ib_port_attribute attr;
+ int sl;
+};
+
+static ssize_t sl2sc_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hfi1_sl2sc_attr *sattr =
+ container_of(attr, struct hfi1_sl2sc_attr, attr);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
+
+ return sysfs_emit(buf, "%u\n", ibp->sl_to_sc[sattr->sl]);
+}
+
+#define HFI1_SL2SC_ATTR(N) \
+ static struct hfi1_sl2sc_attr hfi1_sl2sc_attr_##N = { \
+ .attr = __ATTR(N, 0444, sl2sc_attr_show, NULL), .sl = N \
+ }
+
+HFI1_SL2SC_ATTR(0);
+HFI1_SL2SC_ATTR(1);
+HFI1_SL2SC_ATTR(2);
+HFI1_SL2SC_ATTR(3);
+HFI1_SL2SC_ATTR(4);
+HFI1_SL2SC_ATTR(5);
+HFI1_SL2SC_ATTR(6);
+HFI1_SL2SC_ATTR(7);
+HFI1_SL2SC_ATTR(8);
+HFI1_SL2SC_ATTR(9);
+HFI1_SL2SC_ATTR(10);
+HFI1_SL2SC_ATTR(11);
+HFI1_SL2SC_ATTR(12);
+HFI1_SL2SC_ATTR(13);
+HFI1_SL2SC_ATTR(14);
+HFI1_SL2SC_ATTR(15);
+HFI1_SL2SC_ATTR(16);
+HFI1_SL2SC_ATTR(17);
+HFI1_SL2SC_ATTR(18);
+HFI1_SL2SC_ATTR(19);
+HFI1_SL2SC_ATTR(20);
+HFI1_SL2SC_ATTR(21);
+HFI1_SL2SC_ATTR(22);
+HFI1_SL2SC_ATTR(23);
+HFI1_SL2SC_ATTR(24);
+HFI1_SL2SC_ATTR(25);
+HFI1_SL2SC_ATTR(26);
+HFI1_SL2SC_ATTR(27);
+HFI1_SL2SC_ATTR(28);
+HFI1_SL2SC_ATTR(29);
+HFI1_SL2SC_ATTR(30);
+HFI1_SL2SC_ATTR(31);
+
+static struct attribute *port_sl2sc_attributes[] = {
+ &hfi1_sl2sc_attr_0.attr.attr,
+ &hfi1_sl2sc_attr_1.attr.attr,
+ &hfi1_sl2sc_attr_2.attr.attr,
+ &hfi1_sl2sc_attr_3.attr.attr,
+ &hfi1_sl2sc_attr_4.attr.attr,
+ &hfi1_sl2sc_attr_5.attr.attr,
+ &hfi1_sl2sc_attr_6.attr.attr,
+ &hfi1_sl2sc_attr_7.attr.attr,
+ &hfi1_sl2sc_attr_8.attr.attr,
+ &hfi1_sl2sc_attr_9.attr.attr,
+ &hfi1_sl2sc_attr_10.attr.attr,
+ &hfi1_sl2sc_attr_11.attr.attr,
+ &hfi1_sl2sc_attr_12.attr.attr,
+ &hfi1_sl2sc_attr_13.attr.attr,
+ &hfi1_sl2sc_attr_14.attr.attr,
+ &hfi1_sl2sc_attr_15.attr.attr,
+ &hfi1_sl2sc_attr_16.attr.attr,
+ &hfi1_sl2sc_attr_17.attr.attr,
+ &hfi1_sl2sc_attr_18.attr.attr,
+ &hfi1_sl2sc_attr_19.attr.attr,
+ &hfi1_sl2sc_attr_20.attr.attr,
+ &hfi1_sl2sc_attr_21.attr.attr,
+ &hfi1_sl2sc_attr_22.attr.attr,
+ &hfi1_sl2sc_attr_23.attr.attr,
+ &hfi1_sl2sc_attr_24.attr.attr,
+ &hfi1_sl2sc_attr_25.attr.attr,
+ &hfi1_sl2sc_attr_26.attr.attr,
+ &hfi1_sl2sc_attr_27.attr.attr,
+ &hfi1_sl2sc_attr_28.attr.attr,
+ &hfi1_sl2sc_attr_29.attr.attr,
+ &hfi1_sl2sc_attr_30.attr.attr,
+ &hfi1_sl2sc_attr_31.attr.attr,
+ NULL
+};
+
+static const struct attribute_group port_sl2sc_group = {
+ .name = "sl2sc",
+ .attrs = port_sl2sc_attributes,
+};
+
+/* End sl2sc */
+
+/* Start vl2mtu */
+
+struct hfi1_vl2mtu_attr {
+ struct ib_port_attribute attr;
+ int vl;
+};
+
+static ssize_t vl2mtu_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hfi1_vl2mtu_attr *vlattr =
+ container_of(attr, struct hfi1_vl2mtu_attr, attr);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ return sysfs_emit(buf, "%u\n", dd->vld[vlattr->vl].mtu);
+}
+
+#define HFI1_VL2MTU_ATTR(N) \
+ static struct hfi1_vl2mtu_attr hfi1_vl2mtu_attr_##N = { \
+ .attr = __ATTR(N, 0444, vl2mtu_attr_show, NULL), \
+ .vl = N, \
+ }
+
+HFI1_VL2MTU_ATTR(0);
+HFI1_VL2MTU_ATTR(1);
+HFI1_VL2MTU_ATTR(2);
+HFI1_VL2MTU_ATTR(3);
+HFI1_VL2MTU_ATTR(4);
+HFI1_VL2MTU_ATTR(5);
+HFI1_VL2MTU_ATTR(6);
+HFI1_VL2MTU_ATTR(7);
+HFI1_VL2MTU_ATTR(8);
+HFI1_VL2MTU_ATTR(9);
+HFI1_VL2MTU_ATTR(10);
+HFI1_VL2MTU_ATTR(11);
+HFI1_VL2MTU_ATTR(12);
+HFI1_VL2MTU_ATTR(13);
+HFI1_VL2MTU_ATTR(14);
+HFI1_VL2MTU_ATTR(15);
+
+static struct attribute *port_vl2mtu_attributes[] = {
+ &hfi1_vl2mtu_attr_0.attr.attr,
+ &hfi1_vl2mtu_attr_1.attr.attr,
+ &hfi1_vl2mtu_attr_2.attr.attr,
+ &hfi1_vl2mtu_attr_3.attr.attr,
+ &hfi1_vl2mtu_attr_4.attr.attr,
+ &hfi1_vl2mtu_attr_5.attr.attr,
+ &hfi1_vl2mtu_attr_6.attr.attr,
+ &hfi1_vl2mtu_attr_7.attr.attr,
+ &hfi1_vl2mtu_attr_8.attr.attr,
+ &hfi1_vl2mtu_attr_9.attr.attr,
+ &hfi1_vl2mtu_attr_10.attr.attr,
+ &hfi1_vl2mtu_attr_11.attr.attr,
+ &hfi1_vl2mtu_attr_12.attr.attr,
+ &hfi1_vl2mtu_attr_13.attr.attr,
+ &hfi1_vl2mtu_attr_14.attr.attr,
+ &hfi1_vl2mtu_attr_15.attr.attr,
+ NULL
+};
+
+static const struct attribute_group port_vl2mtu_group = {
+ .name = "vl2mtu",
+ .attrs = port_vl2mtu_attributes,
+};
+
+/* end of per-port file structures and support code */
+
+/*
+ * Start of per-unit (or driver, in some cases, but replicated
+ * per unit) functions (these get a device *)
+ */
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+
+ return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ if (!dd->boardname)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", dd->boardname);
+}
+static DEVICE_ATTR_RO(board_id);
+
+static ssize_t boardversion_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return sysfs_emit(buf, "%s", dd->boardversion);
+}
+static DEVICE_ATTR_RO(boardversion);
+
+static ssize_t nctxts_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ /*
+ * Return the smaller of send and receive contexts.
+ * Normally, user level applications would require both a send
+ * and a receive context, so returning the smaller of the two counts
+ * give a more accurate picture of total contexts available.
+ */
+ return sysfs_emit(buf, "%u\n",
+ min(dd->num_user_contexts,
+ (u32)dd->sc_sizes[SC_USER].count));
+}
+static DEVICE_ATTR_RO(nctxts);
+
+static ssize_t nfreectxts_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of free user ports (contexts) available. */
+ return sysfs_emit(buf, "%u\n", dd->freectxts);
+}
+static DEVICE_ATTR_RO(nfreectxts);
+
+static ssize_t serial_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ /* dd->serial is already newline terminated in chip.c */
+ return sysfs_emit(buf, "%s", dd->serial);
+}
+static DEVICE_ATTR_RO(serial);
+
+static ssize_t chip_reset_store(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = hfi1_reset_device(dd->unit);
+bail:
+ return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_WO(chip_reset);
+
+/*
+ * Convert the reported temperature from an integer (reported in
+ * units of 0.25C) to a floating point number.
+ */
+#define temp_d(t) ((t) >> 2)
+#define temp_f(t) (((t)&0x3) * 25u)
+
+/*
+ * Dump tempsense values, in decimal, to ease shell-scripts.
+ */
+static ssize_t tempsense_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ struct hfi1_temp temp;
+ int ret;
+
+ ret = hfi1_tempsense_rd(dd, &temp);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u.%02u %u.%02u %u.%02u %u.%02u %u %u %u\n",
+ temp_d(temp.curr), temp_f(temp.curr),
+ temp_d(temp.lo_lim), temp_f(temp.lo_lim),
+ temp_d(temp.hi_lim), temp_f(temp.hi_lim),
+ temp_d(temp.crit_lim), temp_f(temp.crit_lim),
+ temp.triggers & 0x1,
+ temp.triggers & 0x2,
+ temp.triggers & 0x4);
+}
+static DEVICE_ATTR_RO(tempsense);
+
+/*
+ * end of per-unit (or driver, in some cases, but replicated
+ * per unit) functions
+ */
+
+/* start of per-unit file structures and support code */
+static struct attribute *hfi1_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_nctxts.attr,
+ &dev_attr_nfreectxts.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_tempsense.attr,
+ &dev_attr_chip_reset.attr,
+ NULL,
+};
+
+const struct attribute_group ib_hfi1_attr_group = {
+ .attrs = hfi1_attributes,
+};
+
+const struct attribute_group *hfi1_attr_port_groups[] = {
+ &port_cc_group,
+ &port_sc2vl_group,
+ &port_sl2sc_group,
+ &port_vl2mtu_group,
+ NULL,
+};
+
+struct sde_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct sdma_engine *sde, char *buf);
+ ssize_t (*store)(struct sdma_engine *sde, const char *buf, size_t cnt);
+};
+
+static ssize_t sde_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct sde_attribute *sde_attr =
+ container_of(attr, struct sde_attribute, attr);
+ struct sdma_engine *sde =
+ container_of(kobj, struct sdma_engine, kobj);
+
+ if (!sde_attr->show)
+ return -EINVAL;
+
+ return sde_attr->show(sde, buf);
+}
+
+static ssize_t sde_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sde_attribute *sde_attr =
+ container_of(attr, struct sde_attribute, attr);
+ struct sdma_engine *sde =
+ container_of(kobj, struct sdma_engine, kobj);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!sde_attr->store)
+ return -EINVAL;
+
+ return sde_attr->store(sde, buf, count);
+}
+
+static const struct sysfs_ops sde_sysfs_ops = {
+ .show = sde_show,
+ .store = sde_store,
+};
+
+static struct kobj_type sde_ktype = {
+ .sysfs_ops = &sde_sysfs_ops,
+};
+
+#define SDE_ATTR(_name, _mode, _show, _store) \
+ struct sde_attribute sde_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static ssize_t sde_show_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
+{
+ return sdma_get_cpu_to_sde_map(sde, buf);
+}
+
+static ssize_t sde_store_cpu_to_sde_map(struct sdma_engine *sde,
+ const char *buf, size_t count)
+{
+ return sdma_set_cpu_to_sde_map(sde, buf, count);
+}
+
+static ssize_t sde_show_vl(struct sdma_engine *sde, char *buf)
+{
+ int vl;
+
+ vl = sdma_engine_get_vl(sde);
+ if (vl < 0)
+ return vl;
+
+ return sysfs_emit(buf, "%d\n", vl);
+}
+
+static SDE_ATTR(cpu_list, S_IWUSR | S_IRUGO,
+ sde_show_cpu_to_sde_map,
+ sde_store_cpu_to_sde_map);
+static SDE_ATTR(vl, S_IRUGO, sde_show_vl, NULL);
+
+static struct sde_attribute *sde_attribs[] = {
+ &sde_attr_cpu_list,
+ &sde_attr_vl
+};
+
+/*
+ * Register and create our files in /sys/class/infiniband.
+ */
+int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
+{
+ struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
+ struct device *class_dev = &dev->dev;
+ int i, j, ret;
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ ret = kobject_init_and_add(&dd->per_sdma[i].kobj,
+ &sde_ktype, &class_dev->kobj,
+ "sdma%d", i);
+ if (ret)
+ goto bail;
+
+ for (j = 0; j < ARRAY_SIZE(sde_attribs); j++) {
+ ret = sysfs_create_file(&dd->per_sdma[i].kobj,
+ &sde_attribs[j]->attr);
+ if (ret)
+ goto bail;
+ }
+ }
+
+ return 0;
+bail:
+ /*
+ * The function kobject_put() will call kobject_del() if the kobject
+ * has been added successfully. The sysfs files created under the
+ * kobject directory will also be removed during the process.
+ */
+ for (; i >= 0; i--)
+ kobject_put(&dd->per_sdma[i].kobj);
+
+ return ret;
+}
+
+/*
+ * Unregister and remove our files in /sys/class/infiniband.
+ */
+void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* Unwind operations in hfi1_verbs_register_sysfs() */
+ for (i = 0; i < dd->num_sdma; i++)
+ kobject_put(&dd->per_sdma[i].kobj);
+}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
new file mode 100644
index 000000000000..eafd2f157e32
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -0,0 +1,5534 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 - 2020 Intel Corporation.
+ *
+ */
+
+#include "hfi.h"
+#include "qp.h"
+#include "rc.h"
+#include "verbs.h"
+#include "tid_rdma.h"
+#include "exp_rcv.h"
+#include "trace.h"
+
+/**
+ * DOC: TID RDMA READ protocol
+ *
+ * This is an end-to-end protocol at the hfi1 level between two nodes that
+ * improves performance by avoiding data copy on the requester side. It
+ * converts a qualified RDMA READ request into a TID RDMA READ request on
+ * the requester side and thereafter handles the request and response
+ * differently. To be qualified, the RDMA READ request should meet the
+ * following:
+ * -- The total data length should be greater than 256K;
+ * -- The total data length should be a multiple of 4K page size;
+ * -- Each local scatter-gather entry should be 4K page aligned;
+ * -- Each local scatter-gather entry should be a multiple of 4K page size;
+ */
+
+#define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32)
+#define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33)
+#define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34)
+#define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35)
+#define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37)
+#define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38)
+
+/* Maximum number of packets within a flow generation. */
+#define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT)
+
+#define GENERATION_MASK 0xFFFFF
+
+static u32 mask_generation(u32 a)
+{
+ return a & GENERATION_MASK;
+}
+
+/* Reserved generation value to set to unused flows for kernel contexts */
+#define KERN_GENERATION_RESERVED mask_generation(U32_MAX)
+
+/*
+ * J_KEY for kernel contexts when TID RDMA is used.
+ * See generate_jkey() in hfi.h for more information.
+ */
+#define TID_RDMA_JKEY 32
+#define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE
+#define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1)
+
+/* Maximum number of segments in flight per QP request. */
+#define TID_RDMA_MAX_READ_SEGS_PER_REQ 6
+#define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4
+#define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \
+ TID_RDMA_MAX_WRITE_SEGS_PER_REQ)
+#define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1)
+
+#define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE)
+
+#define TID_RDMA_DESTQP_FLOW_SHIFT 11
+#define TID_RDMA_DESTQP_FLOW_MASK 0x1f
+
+#define TID_OPFN_QP_CTXT_MASK 0xff
+#define TID_OPFN_QP_CTXT_SHIFT 56
+#define TID_OPFN_QP_KDETH_MASK 0xff
+#define TID_OPFN_QP_KDETH_SHIFT 48
+#define TID_OPFN_MAX_LEN_MASK 0x7ff
+#define TID_OPFN_MAX_LEN_SHIFT 37
+#define TID_OPFN_TIMEOUT_MASK 0x1f
+#define TID_OPFN_TIMEOUT_SHIFT 32
+#define TID_OPFN_RESERVED_MASK 0x3f
+#define TID_OPFN_RESERVED_SHIFT 26
+#define TID_OPFN_URG_MASK 0x1
+#define TID_OPFN_URG_SHIFT 25
+#define TID_OPFN_VER_MASK 0x7
+#define TID_OPFN_VER_SHIFT 22
+#define TID_OPFN_JKEY_MASK 0x3f
+#define TID_OPFN_JKEY_SHIFT 16
+#define TID_OPFN_MAX_READ_MASK 0x3f
+#define TID_OPFN_MAX_READ_SHIFT 10
+#define TID_OPFN_MAX_WRITE_MASK 0x3f
+#define TID_OPFN_MAX_WRITE_SHIFT 4
+
+/*
+ * OPFN TID layout
+ *
+ * 63 47 31 15
+ * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC
+ * 3210987654321098 7654321098765432 1098765432109876 5432109876543210
+ * N - the context Number
+ * K - the Kdeth_qp
+ * M - Max_len
+ * T - Timeout
+ * D - reserveD
+ * V - version
+ * U - Urg capable
+ * J - Jkey
+ * R - max_Read
+ * W - max_Write
+ * C - Capcode
+ */
+
+static void tid_rdma_trigger_resume(struct work_struct *work);
+static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
+static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
+ gfp_t gfp);
+static void hfi1_init_trdma_req(struct rvt_qp *qp,
+ struct tid_rdma_request *req);
+static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
+static void hfi1_tid_timeout(struct timer_list *t);
+static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
+static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
+static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
+static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
+static void hfi1_tid_retry_timeout(struct timer_list *t);
+static int make_tid_rdma_ack(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ struct hfi1_pkt_state *ps);
+static void hfi1_do_tid_send(struct rvt_qp *qp);
+static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx);
+static void tid_rdma_rcv_err(struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ struct rvt_qp *qp, u32 psn, int diff, bool fecn);
+static void update_r_next_psn_fecn(struct hfi1_packet *packet,
+ struct hfi1_qp_priv *priv,
+ struct hfi1_ctxtdata *rcd,
+ struct tid_rdma_flow *flow,
+ bool fecn);
+
+static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
+{
+ if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+ priv->r_tid_ack = priv->r_tid_tail;
+}
+
+static void tid_rdma_schedule_ack(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ priv->s_flags |= RVT_S_ACK_PENDING;
+ hfi1_schedule_tid_send(qp);
+}
+
+static void tid_rdma_trigger_ack(struct rvt_qp *qp)
+{
+ validate_r_tid_ack(qp->priv);
+ tid_rdma_schedule_ack(qp);
+}
+
+static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
+{
+ return
+ (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) <<
+ TID_OPFN_QP_CTXT_SHIFT) |
+ ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) <<
+ TID_OPFN_QP_KDETH_SHIFT) |
+ (((u64)((p->max_len >> PAGE_SHIFT) - 1) &
+ TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) |
+ (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) <<
+ TID_OPFN_TIMEOUT_SHIFT) |
+ (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) |
+ (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) |
+ (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) <<
+ TID_OPFN_MAX_READ_SHIFT) |
+ (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) <<
+ TID_OPFN_MAX_WRITE_SHIFT);
+}
+
+static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data)
+{
+ p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) &
+ TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT;
+ p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK;
+ p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) &
+ TID_OPFN_MAX_WRITE_MASK;
+ p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) &
+ TID_OPFN_MAX_READ_MASK;
+ p->qp =
+ ((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK)
+ << 16) |
+ ((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK));
+ p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK;
+ p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK;
+}
+
+void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
+ p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
+ p->jkey = priv->rcd->jkey;
+ p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
+ p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ;
+ p->timeout = qp->timeout;
+ p->urg = is_urg_masked(priv->rcd);
+}
+
+bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ *data = tid_rdma_opfn_encode(&priv->tid_rdma.local);
+ return true;
+}
+
+bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct tid_rdma_params *remote, *old;
+ bool ret = true;
+
+ old = rcu_dereference_protected(priv->tid_rdma.remote,
+ lockdep_is_held(&priv->opfn.lock));
+ data &= ~0xfULL;
+ /*
+ * If data passed in is zero, return true so as not to continue the
+ * negotiation process
+ */
+ if (!data || !HFI1_CAP_IS_KSET(TID_RDMA))
+ goto null;
+ /*
+ * If kzalloc fails, return false. This will result in:
+ * * at the requester a new OPFN request being generated to retry
+ * the negotiation
+ * * at the responder, 0 being returned to the requester so as to
+ * disable TID RDMA at both the requester and the responder
+ */
+ remote = kzalloc(sizeof(*remote), GFP_ATOMIC);
+ if (!remote) {
+ ret = false;
+ goto null;
+ }
+
+ tid_rdma_opfn_decode(remote, data);
+ priv->tid_timer_timeout_jiffies =
+ usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) /
+ 1000UL) << 3) * 7);
+ trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local);
+ trace_hfi1_opfn_param(qp, 1, remote);
+ rcu_assign_pointer(priv->tid_rdma.remote, remote);
+ /*
+ * A TID RDMA READ request's segment size is not equal to
+ * remote->max_len only when the request's data length is smaller
+ * than remote->max_len. In that case, there will be only one segment.
+ * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
+ * during retry, it will lead to req->cur_seg = 0, which is exactly
+ * what is expected.
+ */
+ priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len);
+ priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1;
+ goto free;
+null:
+ RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
+ priv->timeout_shift = 0;
+free:
+ if (old)
+ kfree_rcu(old, rcu_head);
+ return ret;
+}
+
+bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data)
+{
+ bool ret;
+
+ ret = tid_rdma_conn_reply(qp, *data);
+ *data = 0;
+ /*
+ * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate
+ * TID RDMA could not be enabled. This will result in TID RDMA being
+ * disabled at the requester too.
+ */
+ if (ret)
+ (void)tid_rdma_conn_req(qp, data);
+ return ret;
+}
+
+void tid_rdma_conn_error(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct tid_rdma_params *old;
+
+ old = rcu_dereference_protected(priv->tid_rdma.remote,
+ lockdep_is_held(&priv->opfn.lock));
+ RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
+ if (old)
+ kfree_rcu(old, rcu_head);
+}
+
+/* This is called at context initialization time */
+int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
+{
+ if (reinit)
+ return 0;
+
+ BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY);
+ BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY);
+ rcd->jkey = TID_RDMA_JKEY;
+ hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
+ return hfi1_alloc_ctxt_rcv_groups(rcd);
+}
+
+/**
+ * qp_to_rcd - determine the receive context used by a qp
+ * @rdi: rvt dev struct
+ * @qp: the qp
+ *
+ * This routine returns the receive context associated
+ * with a a qp's qpn.
+ *
+ * Return: the context.
+ */
+static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
+ struct rvt_qp *qp)
+{
+ struct hfi1_ibdev *verbs_dev = container_of(rdi,
+ struct hfi1_ibdev,
+ rdi);
+ struct hfi1_devdata *dd = container_of(verbs_dev,
+ struct hfi1_devdata,
+ verbs_dev);
+ unsigned int ctxt;
+
+ if (qp->ibqp.qp_num == 0)
+ ctxt = 0;
+ else
+ ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
+ return dd->rcd[ctxt];
+}
+
+int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ int i, ret;
+
+ qpriv->rcd = qp_to_rcd(rdi, qp);
+
+ spin_lock_init(&qpriv->opfn.lock);
+ INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request);
+ INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume);
+ qpriv->flow_state.psn = 0;
+ qpriv->flow_state.index = RXE_NUM_TID_FLOWS;
+ qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS;
+ qpriv->flow_state.generation = KERN_GENERATION_RESERVED;
+ qpriv->s_state = TID_OP(WRITE_RESP);
+ qpriv->s_tid_cur = HFI1_QP_WQE_INVALID;
+ qpriv->s_tid_head = HFI1_QP_WQE_INVALID;
+ qpriv->s_tid_tail = HFI1_QP_WQE_INVALID;
+ qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
+ qpriv->r_tid_head = HFI1_QP_WQE_INVALID;
+ qpriv->r_tid_tail = HFI1_QP_WQE_INVALID;
+ qpriv->r_tid_ack = HFI1_QP_WQE_INVALID;
+ qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID;
+ atomic_set(&qpriv->n_requests, 0);
+ atomic_set(&qpriv->n_tid_requests, 0);
+ timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0);
+ timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0);
+ INIT_LIST_HEAD(&qpriv->tid_wait);
+
+ if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
+ struct hfi1_devdata *dd = qpriv->rcd->dd;
+
+ qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES *
+ sizeof(*qpriv->pages),
+ GFP_KERNEL, dd->node);
+ if (!qpriv->pages)
+ return -ENOMEM;
+ for (i = 0; i < qp->s_size; i++) {
+ struct hfi1_swqe_priv *priv;
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
+
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
+ dd->node);
+ if (!priv)
+ return -ENOMEM;
+
+ hfi1_init_trdma_req(qp, &priv->tid_req);
+ priv->tid_req.e.swqe = wqe;
+ wqe->priv = priv;
+ }
+ for (i = 0; i < rvt_max_atomic(rdi); i++) {
+ struct hfi1_ack_priv *priv;
+
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
+ dd->node);
+ if (!priv)
+ return -ENOMEM;
+
+ hfi1_init_trdma_req(qp, &priv->tid_req);
+ priv->tid_req.e.ack = &qp->s_ack_queue[i];
+
+ ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+ qp->s_ack_queue[i].priv = priv;
+ }
+ }
+
+ return 0;
+}
+
+void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct rvt_swqe *wqe;
+ u32 i;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
+ for (i = 0; i < qp->s_size; i++) {
+ wqe = rvt_get_swqe_ptr(qp, i);
+ kfree(wqe->priv);
+ wqe->priv = NULL;
+ }
+ for (i = 0; i < rvt_max_atomic(rdi); i++) {
+ struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv;
+
+ if (priv)
+ hfi1_kern_exp_rcv_free_flows(&priv->tid_req);
+ kfree(priv);
+ qp->s_ack_queue[i].priv = NULL;
+ }
+ cancel_work_sync(&qpriv->opfn.opfn_work);
+ kfree(qpriv->pages);
+ qpriv->pages = NULL;
+ }
+}
+
+/* Flow and tid waiter functions */
+/**
+ * DOC: lock ordering
+ *
+ * There are two locks involved with the queuing
+ * routines: the qp s_lock and the exp_lock.
+ *
+ * Since the tid space allocation is called from
+ * the send engine, the qp s_lock is already held.
+ *
+ * The allocation routines will get the exp_lock.
+ *
+ * The first_qp() call is provided to allow the head of
+ * the rcd wait queue to be fetched under the exp_lock and
+ * followed by a drop of the exp_lock.
+ *
+ * Any qp in the wait list will have the qp reference count held
+ * to hold the qp in memory.
+ */
+
+/*
+ * return head of rcd wait list
+ *
+ * Must hold the exp_lock.
+ *
+ * Get a reference to the QP to hold the QP in memory.
+ *
+ * The caller must release the reference when the local
+ * is no longer being used.
+ */
+static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue)
+ __must_hold(&rcd->exp_lock)
+{
+ struct hfi1_qp_priv *priv;
+
+ lockdep_assert_held(&rcd->exp_lock);
+ priv = list_first_entry_or_null(&queue->queue_head,
+ struct hfi1_qp_priv,
+ tid_wait);
+ if (!priv)
+ return NULL;
+ rvt_get_qp(priv->owner);
+ return priv->owner;
+}
+
+/**
+ * kernel_tid_waiters - determine rcd wait
+ * @rcd: the receive context
+ * @queue: the queue to operate on
+ * @qp: the head of the qp being processed
+ *
+ * This routine will return false IFF
+ * the list is NULL or the head of the
+ * list is the indicated qp.
+ *
+ * Must hold the qp s_lock and the exp_lock.
+ *
+ * Return:
+ * false if either of the conditions below are satisfied:
+ * 1. The list is empty or
+ * 2. The indicated qp is at the head of the list and the
+ * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
+ * true is returned otherwise.
+ */
+static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue, struct rvt_qp *qp)
+ __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
+{
+ struct rvt_qp *fqp;
+ bool ret = true;
+
+ lockdep_assert_held(&qp->s_lock);
+ lockdep_assert_held(&rcd->exp_lock);
+ fqp = first_qp(rcd, queue);
+ if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE)))
+ ret = false;
+ rvt_put_qp(fqp);
+ return ret;
+}
+
+/**
+ * dequeue_tid_waiter - dequeue the qp from the list
+ * @rcd: the receive context
+ * @queue: the queue to operate on
+ * @qp: the qp to remove the wait list
+ *
+ * This routine removes the indicated qp from the
+ * wait list if it is there.
+ *
+ * This should be done after the hardware flow and
+ * tid array resources have been allocated.
+ *
+ * Must hold the qp s_lock and the rcd exp_lock.
+ *
+ * It assumes the s_lock to protect the s_flags
+ * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag.
+ */
+static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue, struct rvt_qp *qp)
+ __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ lockdep_assert_held(&rcd->exp_lock);
+ if (list_empty(&priv->tid_wait))
+ return;
+ list_del_init(&priv->tid_wait);
+ qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
+ queue->dequeue++;
+ rvt_put_qp(qp);
+}
+
+/**
+ * queue_qp_for_tid_wait - suspend QP on tid space
+ * @rcd: the receive context
+ * @queue: the queue to operate on
+ * @qp: the qp
+ *
+ * The qp is inserted at the tail of the rcd
+ * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
+ *
+ * Must hold the qp s_lock and the exp_lock.
+ */
+static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue, struct rvt_qp *qp)
+ __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ lockdep_assert_held(&rcd->exp_lock);
+ if (list_empty(&priv->tid_wait)) {
+ qp->s_flags |= HFI1_S_WAIT_TID_SPACE;
+ list_add_tail(&priv->tid_wait, &queue->queue_head);
+ priv->tid_enqueue = ++queue->enqueue;
+ rcd->dd->verbs_dev.n_tidwait++;
+ trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE);
+ rvt_get_qp(qp);
+ }
+}
+
+/**
+ * __trigger_tid_waiter - trigger tid waiter
+ * @qp: the qp
+ *
+ * This is a private entrance to schedule the qp
+ * assuming the caller is holding the qp->s_lock.
+ */
+static void __trigger_tid_waiter(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ lockdep_assert_held(&qp->s_lock);
+ if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE))
+ return;
+ trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE);
+ hfi1_schedule_send(qp);
+}
+
+/**
+ * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
+ * @qp: the qp
+ *
+ * trigger a schedule or a waiting qp in a deadlock
+ * safe manner. The qp reference is held prior
+ * to this call via first_qp().
+ *
+ * If the qp trigger was already scheduled (!rval)
+ * the reference is dropped, otherwise the resume
+ * or the destroy cancel will dispatch the reference.
+ */
+static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ bool rval;
+
+ if (!qp)
+ return;
+
+ priv = qp->priv;
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ dd = dd_from_ibdev(qp->ibqp.device);
+
+ rval = queue_work_on(priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)),
+ ppd->hfi1_wq,
+ &priv->tid_rdma.trigger_work);
+ if (!rval)
+ rvt_put_qp(qp);
+}
+
+/**
+ * tid_rdma_trigger_resume - field a trigger work request
+ * @work: the work item
+ *
+ * Complete the off qp trigger processing by directly
+ * calling the progress routine.
+ */
+static void tid_rdma_trigger_resume(struct work_struct *work)
+{
+ struct tid_rdma_qp_params *tr;
+ struct hfi1_qp_priv *priv;
+ struct rvt_qp *qp;
+
+ tr = container_of(work, struct tid_rdma_qp_params, trigger_work);
+ priv = container_of(tr, struct hfi1_qp_priv, tid_rdma);
+ qp = priv->owner;
+ spin_lock_irq(&qp->s_lock);
+ if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) {
+ spin_unlock_irq(&qp->s_lock);
+ hfi1_do_send(priv->owner, true);
+ } else {
+ spin_unlock_irq(&qp->s_lock);
+ }
+ rvt_put_qp(qp);
+}
+
+/*
+ * tid_rdma_flush_wait - unwind any tid space wait
+ *
+ * This is called when resetting a qp to
+ * allow a destroy or reset to get rid
+ * of any tid space linkage and reference counts.
+ */
+static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv;
+
+ if (!qp)
+ return;
+ lockdep_assert_held(&qp->s_lock);
+ priv = qp->priv;
+ qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
+ spin_lock(&priv->rcd->exp_lock);
+ if (!list_empty(&priv->tid_wait)) {
+ list_del_init(&priv->tid_wait);
+ qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
+ queue->dequeue++;
+ rvt_put_qp(qp);
+ }
+ spin_unlock(&priv->rcd->exp_lock);
+}
+
+void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
+ _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
+}
+
+/* Flow functions */
+/**
+ * kern_reserve_flow - allocate a hardware flow
+ * @rcd: the context to use for allocation
+ * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
+ * signify "don't care".
+ *
+ * Use a bit mask based allocation to reserve a hardware
+ * flow for use in receiving KDETH data packets. If a preferred flow is
+ * specified the function will attempt to reserve that flow again, if
+ * available.
+ *
+ * The exp_lock must be held.
+ *
+ * Return:
+ * On success: a value positive value between 0 and RXE_NUM_TID_FLOWS - 1
+ * On failure: -EAGAIN
+ */
+static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
+ __must_hold(&rcd->exp_lock)
+{
+ int nr;
+
+ /* Attempt to reserve the preferred flow index */
+ if (last >= 0 && last < RXE_NUM_TID_FLOWS &&
+ !test_and_set_bit(last, &rcd->flow_mask))
+ return last;
+
+ nr = ffz(rcd->flow_mask);
+ BUILD_BUG_ON(RXE_NUM_TID_FLOWS >=
+ (sizeof(rcd->flow_mask) * BITS_PER_BYTE));
+ if (nr > (RXE_NUM_TID_FLOWS - 1))
+ return -EAGAIN;
+ set_bit(nr, &rcd->flow_mask);
+ return nr;
+}
+
+static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
+ u32 flow_idx)
+{
+ u64 reg;
+
+ reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
+ RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK |
+ RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK |
+ RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK |
+ RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK |
+ RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK;
+
+ if (generation != KERN_GENERATION_RESERVED)
+ reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK;
+
+ write_uctxt_csr(rcd->dd, rcd->ctxt,
+ RCV_TID_FLOW_TABLE + 8 * flow_idx, reg);
+}
+
+static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
+ __must_hold(&rcd->exp_lock)
+{
+ u32 generation = rcd->flows[flow_idx].generation;
+
+ kern_set_hw_flow(rcd, generation, flow_idx);
+ return generation;
+}
+
+static u32 kern_flow_generation_next(u32 gen)
+{
+ u32 generation = mask_generation(gen + 1);
+
+ if (generation == KERN_GENERATION_RESERVED)
+ generation = mask_generation(generation + 1);
+ return generation;
+}
+
+static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
+ __must_hold(&rcd->exp_lock)
+{
+ rcd->flows[flow_idx].generation =
+ kern_flow_generation_next(rcd->flows[flow_idx].generation);
+ kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx);
+}
+
+int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ struct rvt_qp *fqp;
+ unsigned long flags;
+ int ret = 0;
+
+ /* The QP already has an allocated flow */
+ if (fs->index != RXE_NUM_TID_FLOWS)
+ return ret;
+
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+ if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
+ goto queue;
+
+ ret = kern_reserve_flow(rcd, fs->last_index);
+ if (ret < 0)
+ goto queue;
+ fs->index = ret;
+ fs->last_index = fs->index;
+
+ /* Generation received in a RESYNC overrides default flow generation */
+ if (fs->generation != KERN_GENERATION_RESERVED)
+ rcd->flows[fs->index].generation = fs->generation;
+ fs->generation = kern_setup_hw_flow(rcd, fs->index);
+ fs->psn = 0;
+ dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->flow_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+
+ tid_rdma_schedule_tid_wakeup(fqp);
+ return 0;
+queue:
+ queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+ return -EAGAIN;
+}
+
+void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ struct rvt_qp *fqp;
+ unsigned long flags;
+
+ if (fs->index >= RXE_NUM_TID_FLOWS)
+ return;
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+ kern_clear_hw_flow(rcd, fs->index);
+ clear_bit(fs->index, &rcd->flow_mask);
+ fs->index = RXE_NUM_TID_FLOWS;
+ fs->psn = 0;
+ fs->generation = KERN_GENERATION_RESERVED;
+
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->flow_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+
+ if (fqp == qp) {
+ __trigger_tid_waiter(fqp);
+ rvt_put_qp(fqp);
+ } else {
+ tid_rdma_schedule_tid_wakeup(fqp);
+ }
+}
+
+void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
+{
+ int i;
+
+ for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
+ rcd->flows[i].generation = mask_generation(get_random_u32());
+ kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
+ }
+}
+
+/* TID allocation functions */
+static u8 trdma_pset_order(struct tid_rdma_pageset *s)
+{
+ u8 count = s->count;
+
+ return ilog2(count) + 1;
+}
+
+/**
+ * tid_rdma_find_phys_blocks_4k - get groups base on mr info
+ * @flow: overall info for a TID RDMA segment
+ * @pages: pointer to an array of page structs
+ * @npages: number of pages
+ * @list: page set array to return
+ *
+ * This routine returns the number of groups associated with
+ * the current sge information. This implementation is based
+ * on the expected receive find_phys_blocks() adjusted to
+ * use the MR information vs. the pfn.
+ *
+ * Return:
+ * the number of RcvArray entries
+ */
+static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
+ struct page **pages,
+ u32 npages,
+ struct tid_rdma_pageset *list)
+{
+ u32 pagecount, pageidx, setcount = 0, i;
+ void *vaddr, *this_vaddr;
+
+ if (!npages)
+ return 0;
+
+ /*
+ * Look for sets of physically contiguous pages in the user buffer.
+ * This will allow us to optimize Expected RcvArray entry usage by
+ * using the bigger supported sizes.
+ */
+ vaddr = page_address(pages[0]);
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
+ for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
+ this_vaddr = i < npages ? page_address(pages[i]) : NULL;
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
+ this_vaddr);
+ /*
+ * If the vaddr's are not sequential, pages are not physically
+ * contiguous.
+ */
+ if (this_vaddr != (vaddr + PAGE_SIZE)) {
+ /*
+ * At this point we have to loop over the set of
+ * physically contiguous pages and break them down it
+ * sizes supported by the HW.
+ * There are two main constraints:
+ * 1. The max buffer size is MAX_EXPECTED_BUFFER.
+ * If the total set size is bigger than that
+ * program only a MAX_EXPECTED_BUFFER chunk.
+ * 2. The buffer size has to be a power of two. If
+ * it is not, round down to the closes power of
+ * 2 and program that size.
+ */
+ while (pagecount) {
+ int maxpages = pagecount;
+ u32 bufsize = pagecount * PAGE_SIZE;
+
+ if (bufsize > MAX_EXPECTED_BUFFER)
+ maxpages =
+ MAX_EXPECTED_BUFFER >>
+ PAGE_SHIFT;
+ else if (!is_power_of_2(bufsize))
+ maxpages =
+ rounddown_pow_of_two(bufsize) >>
+ PAGE_SHIFT;
+
+ list[setcount].idx = pageidx;
+ list[setcount].count = maxpages;
+ trace_hfi1_tid_pageset(flow->req->qp, setcount,
+ list[setcount].idx,
+ list[setcount].count);
+ pagecount -= maxpages;
+ pageidx += maxpages;
+ setcount++;
+ }
+ pageidx = i;
+ pagecount = 1;
+ vaddr = this_vaddr;
+ } else {
+ vaddr += PAGE_SIZE;
+ pagecount++;
+ }
+ }
+ /* insure we always return an even number of sets */
+ if (setcount & 1)
+ list[setcount++].count = 0;
+ return setcount;
+}
+
+/**
+ * tid_flush_pages - dump out pages into pagesets
+ * @list: list of pagesets
+ * @idx: pointer to current page index
+ * @pages: number of pages to dump
+ * @sets: current number of pagesset
+ *
+ * This routine flushes out accumuated pages.
+ *
+ * To insure an even number of sets the
+ * code may add a filler.
+ *
+ * This can happen with when pages is not
+ * a power of 2 or pages is a power of 2
+ * less than the maximum pages.
+ *
+ * Return:
+ * The new number of sets
+ */
+
+static u32 tid_flush_pages(struct tid_rdma_pageset *list,
+ u32 *idx, u32 pages, u32 sets)
+{
+ while (pages) {
+ u32 maxpages = pages;
+
+ if (maxpages > MAX_EXPECTED_PAGES)
+ maxpages = MAX_EXPECTED_PAGES;
+ else if (!is_power_of_2(maxpages))
+ maxpages = rounddown_pow_of_two(maxpages);
+ list[sets].idx = *idx;
+ list[sets++].count = maxpages;
+ *idx += maxpages;
+ pages -= maxpages;
+ }
+ /* might need a filler */
+ if (sets & 1)
+ list[sets++].count = 0;
+ return sets;
+}
+
+/**
+ * tid_rdma_find_phys_blocks_8k - get groups base on mr info
+ * @flow: overall info for a TID RDMA segment
+ * @pages: pointer to an array of page structs
+ * @npages: number of pages
+ * @list: page set array to return
+ *
+ * This routine parses an array of pages to compute pagesets
+ * in an 8k compatible way.
+ *
+ * pages are tested two at a time, i, i + 1 for contiguous
+ * pages and i - 1 and i contiguous pages.
+ *
+ * If any condition is false, any accumulated pages are flushed and
+ * v0,v1 are emitted as separate PAGE_SIZE pagesets
+ *
+ * Otherwise, the current 8k is totaled for a future flush.
+ *
+ * Return:
+ * The number of pagesets
+ * list set with the returned number of pagesets
+ *
+ */
+static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
+ struct page **pages,
+ u32 npages,
+ struct tid_rdma_pageset *list)
+{
+ u32 idx, sets = 0, i;
+ u32 pagecnt = 0;
+ void *v0, *v1, *vm1;
+
+ if (!npages)
+ return 0;
+ for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) {
+ /* get a new v0 */
+ v0 = page_address(pages[i]);
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
+ v1 = i + 1 < npages ?
+ page_address(pages[i + 1]) : NULL;
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
+ /* compare i, i + 1 vaddr */
+ if (v1 != (v0 + PAGE_SIZE)) {
+ /* flush out pages */
+ sets = tid_flush_pages(list, &idx, pagecnt, sets);
+ /* output v0,v1 as two pagesets */
+ list[sets].idx = idx++;
+ list[sets++].count = 1;
+ if (v1) {
+ list[sets].count = 1;
+ list[sets++].idx = idx++;
+ } else {
+ list[sets++].count = 0;
+ }
+ vm1 = NULL;
+ pagecnt = 0;
+ continue;
+ }
+ /* i,i+1 consecutive, look at i-1,i */
+ if (vm1 && v0 != (vm1 + PAGE_SIZE)) {
+ /* flush out pages */
+ sets = tid_flush_pages(list, &idx, pagecnt, sets);
+ pagecnt = 0;
+ }
+ /* pages will always be a multiple of 8k */
+ pagecnt += 2;
+ /* save i-1 */
+ vm1 = v1;
+ /* move to next pair */
+ }
+ /* dump residual pages at end */
+ sets = tid_flush_pages(list, &idx, npages - idx, sets);
+ /* by design cannot be odd sets */
+ WARN_ON(sets & 1);
+ return sets;
+}
+
+/*
+ * Find pages for one segment of a sge array represented by @ss. The function
+ * does not check the sge, the sge must have been checked for alignment with a
+ * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
+ * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge
+ * copy maintained in @ss->sge, the original sge is not modified.
+ *
+ * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
+ * releasing the MR reference count at the same time. Otherwise, we'll "leak"
+ * references to the MR. This difference requires that we keep track of progress
+ * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request
+ * structure.
+ */
+static u32 kern_find_pages(struct tid_rdma_flow *flow,
+ struct page **pages,
+ struct rvt_sge_state *ss, bool *last)
+{
+ struct tid_rdma_request *req = flow->req;
+ struct rvt_sge *sge = &ss->sge;
+ u32 length = flow->req->seg_len;
+ u32 len = PAGE_SIZE;
+ u32 i = 0;
+
+ while (length && req->isge < ss->num_sge) {
+ pages[i++] = virt_to_page(sge->vaddr);
+
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (!sge->sge_length) {
+ if (++req->isge < ss->num_sge)
+ *sge = ss->sg_list[req->isge - 1];
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= RVT_SEGSZ) {
+ ++sge->m;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+
+ flow->length = flow->req->seg_len - length;
+ *last = req->isge != ss->num_sge;
+ return i;
+}
+
+static void dma_unmap_flow(struct tid_rdma_flow *flow)
+{
+ struct hfi1_devdata *dd;
+ int i;
+ struct tid_rdma_pageset *pset;
+
+ dd = flow->req->rcd->dd;
+ for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
+ i++, pset++) {
+ if (pset->count && pset->addr) {
+ dma_unmap_page(&dd->pcidev->dev,
+ pset->addr,
+ PAGE_SIZE * pset->count,
+ DMA_FROM_DEVICE);
+ pset->mapped = 0;
+ }
+ }
+}
+
+static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
+{
+ int i;
+ struct hfi1_devdata *dd = flow->req->rcd->dd;
+ struct tid_rdma_pageset *pset;
+
+ for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
+ i++, pset++) {
+ if (pset->count) {
+ pset->addr = dma_map_page(&dd->pcidev->dev,
+ pages[pset->idx],
+ 0,
+ PAGE_SIZE * pset->count,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) {
+ dma_unmap_flow(flow);
+ return -ENOMEM;
+ }
+ pset->mapped = 1;
+ }
+ }
+ return 0;
+}
+
+static inline bool dma_mapped(struct tid_rdma_flow *flow)
+{
+ return !!flow->pagesets[0].mapped;
+}
+
+/*
+ * Get pages pointers and identify contiguous physical memory chunks for a
+ * segment. All segments are of length flow->req->seg_len.
+ */
+static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
+ struct page **pages,
+ struct rvt_sge_state *ss, bool *last)
+{
+ u8 npages;
+
+ /* Reuse previously computed pagesets, if any */
+ if (flow->npagesets) {
+ trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
+ flow);
+ if (!dma_mapped(flow))
+ return dma_map_flow(flow, pages);
+ return 0;
+ }
+
+ npages = kern_find_pages(flow, pages, ss, last);
+
+ if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
+ flow->npagesets =
+ tid_rdma_find_phys_blocks_4k(flow, pages, npages,
+ flow->pagesets);
+ else
+ flow->npagesets =
+ tid_rdma_find_phys_blocks_8k(flow, pages, npages,
+ flow->pagesets);
+
+ return dma_map_flow(flow, pages);
+}
+
+static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
+ struct hfi1_ctxtdata *rcd, char *s,
+ struct tid_group *grp, u8 cnt)
+{
+ struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
+
+ WARN_ON_ONCE(flow->tnode_cnt >=
+ (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT));
+ if (WARN_ON_ONCE(cnt & 1))
+ dd_dev_err(rcd->dd,
+ "unexpected odd allocation cnt %u map 0x%x used %u",
+ cnt, grp->map, grp->used);
+
+ node->grp = grp;
+ node->map = grp->map;
+ node->cnt = cnt;
+ trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
+ grp->base, grp->map, grp->used, cnt);
+}
+
+/*
+ * Try to allocate pageset_count TID's from TID groups for a context
+ *
+ * This function allocates TID's without moving groups between lists or
+ * modifying grp->map. This is done as follows, being cogizant of the lists
+ * between which the TID groups will move:
+ * 1. First allocate complete groups of 8 TID's since this is more efficient,
+ * these groups will move from group->full without affecting used
+ * 2. If more TID's are needed allocate from used (will move from used->full or
+ * stay in used)
+ * 3. If we still don't have the required number of TID's go back and look again
+ * at a complete group (will move from group->used)
+ */
+static int kern_alloc_tids(struct tid_rdma_flow *flow)
+{
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 ngroups, pageidx = 0;
+ struct tid_group *group = NULL, *used;
+ u8 use;
+
+ flow->tnode_cnt = 0;
+ ngroups = flow->npagesets / dd->rcv_entries.group_size;
+ if (!ngroups)
+ goto used_list;
+
+ /* First look at complete groups */
+ list_for_each_entry(group, &rcd->tid_group_list.list, list) {
+ kern_add_tid_node(flow, rcd, "complete groups", group,
+ group->size);
+
+ pageidx += group->size;
+ if (!--ngroups)
+ break;
+ }
+
+ if (pageidx >= flow->npagesets)
+ goto ok;
+
+used_list:
+ /* Now look at partially used groups */
+ list_for_each_entry(used, &rcd->tid_used_list.list, list) {
+ use = min_t(u32, flow->npagesets - pageidx,
+ used->size - used->used);
+ kern_add_tid_node(flow, rcd, "used groups", used, use);
+
+ pageidx += use;
+ if (pageidx >= flow->npagesets)
+ goto ok;
+ }
+
+ /*
+ * Look again at a complete group, continuing from where we left.
+ * However, if we are at the head, we have reached the end of the
+ * complete groups list from the first loop above
+ */
+ if (group && &group->list == &rcd->tid_group_list.list)
+ goto bail_eagain;
+ group = list_prepare_entry(group, &rcd->tid_group_list.list,
+ list);
+ if (list_is_last(&group->list, &rcd->tid_group_list.list))
+ goto bail_eagain;
+ group = list_next_entry(group, list);
+ use = min_t(u32, flow->npagesets - pageidx, group->size);
+ kern_add_tid_node(flow, rcd, "complete continue", group, use);
+ pageidx += use;
+ if (pageidx >= flow->npagesets)
+ goto ok;
+bail_eagain:
+ trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
+ (u64)flow->npagesets);
+ return -EAGAIN;
+ok:
+ return 0;
+}
+
+static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
+ u32 *pset_idx)
+{
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+ struct kern_tid_node *node = &flow->tnode[grp_num];
+ struct tid_group *grp = node->grp;
+ struct tid_rdma_pageset *pset;
+ u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
+ u32 rcventry, npages = 0, pair = 0, tidctrl;
+ u8 i, cnt = 0;
+
+ for (i = 0; i < grp->size; i++) {
+ rcventry = grp->base + i;
+
+ if (node->map & BIT(i) || cnt >= node->cnt) {
+ rcv_array_wc_fill(dd, rcventry);
+ continue;
+ }
+ pset = &flow->pagesets[(*pset_idx)++];
+ if (pset->count) {
+ hfi1_put_tid(dd, rcventry, PT_EXPECTED,
+ pset->addr, trdma_pset_order(pset));
+ } else {
+ hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
+ }
+ npages += pset->count;
+
+ rcventry -= rcd->expected_base;
+ tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1;
+ /*
+ * A single TID entry will be used to use a rcvarr pair (with
+ * tidctrl 0x3), if ALL these are true (a) the bit pos is even
+ * (b) the group map shows current and the next bits as free
+ * indicating two consecutive rcvarry entries are available (c)
+ * we actually need 2 more entries
+ */
+ pair = !(i & 0x1) && !((node->map >> i) & 0x3) &&
+ node->cnt >= cnt + 2;
+ if (!pair) {
+ if (!pset->count)
+ tidctrl = 0x1;
+ flow->tid_entry[flow->tidcnt++] =
+ EXP_TID_SET(IDX, rcventry >> 1) |
+ EXP_TID_SET(CTRL, tidctrl) |
+ EXP_TID_SET(LEN, npages);
+ trace_hfi1_tid_entry_alloc(/* entry */
+ flow->req->qp, flow->tidcnt - 1,
+ flow->tid_entry[flow->tidcnt - 1]);
+
+ /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */
+ flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg);
+ npages = 0;
+ }
+
+ if (grp->used == grp->size - 1)
+ tid_group_move(grp, &rcd->tid_used_list,
+ &rcd->tid_full_list);
+ else if (!grp->used)
+ tid_group_move(grp, &rcd->tid_group_list,
+ &rcd->tid_used_list);
+
+ grp->used++;
+ grp->map |= BIT(i);
+ cnt++;
+ }
+}
+
+static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
+{
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+ struct kern_tid_node *node = &flow->tnode[grp_num];
+ struct tid_group *grp = node->grp;
+ u32 rcventry;
+ u8 i, cnt = 0;
+
+ for (i = 0; i < grp->size; i++) {
+ rcventry = grp->base + i;
+
+ if (node->map & BIT(i) || cnt >= node->cnt) {
+ rcv_array_wc_fill(dd, rcventry);
+ continue;
+ }
+
+ hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
+
+ grp->used--;
+ grp->map &= ~BIT(i);
+ cnt++;
+
+ if (grp->used == grp->size - 1)
+ tid_group_move(grp, &rcd->tid_full_list,
+ &rcd->tid_used_list);
+ else if (!grp->used)
+ tid_group_move(grp, &rcd->tid_used_list,
+ &rcd->tid_group_list);
+ }
+ if (WARN_ON_ONCE(cnt & 1)) {
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+
+ dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u",
+ cnt, grp->map, grp->used);
+ }
+}
+
+static void kern_program_rcvarray(struct tid_rdma_flow *flow)
+{
+ u32 pset_idx = 0;
+ int i;
+
+ flow->npkts = 0;
+ flow->tidcnt = 0;
+ for (i = 0; i < flow->tnode_cnt; i++)
+ kern_program_rcv_group(flow, i, &pset_idx);
+ trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
+}
+
+/**
+ * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
+ * TID RDMA request
+ *
+ * @req: TID RDMA request for which the segment/flow is being set up
+ * @ss: sge state, maintains state across successive segments of a sge
+ * @last: set to true after the last sge segment has been processed
+ *
+ * This function
+ * (1) finds a free flow entry in the flow circular buffer
+ * (2) finds pages and continuous physical chunks constituing one segment
+ * of an sge
+ * (3) allocates TID group entries for those chunks
+ * (4) programs rcvarray entries in the hardware corresponding to those
+ * TID's
+ * (5) computes a tidarray with formatted TID entries which can be sent
+ * to the sender
+ * (6) Reserves and programs HW flows.
+ * (7) It also manages queueing the QP when TID/flow resources are not
+ * available.
+ *
+ * @req points to struct tid_rdma_request of which the segments are a part. The
+ * function uses qp, rcd and seg_len members of @req. In the absence of errors,
+ * req->flow_idx is the index of the flow which has been prepared in this
+ * invocation of function call. With flow = &req->flows[req->flow_idx],
+ * flow->tid_entry contains the TID array which the sender can use for TID RDMA
+ * sends and flow->npkts contains number of packets required to send the
+ * segment.
+ *
+ * hfi1_check_sge_align should be called prior to calling this function and if
+ * it signals error TID RDMA cannot be used for this sge and this function
+ * should not be called.
+ *
+ * For the queuing, caller must hold the flow->req->qp s_lock from the send
+ * engine and the function will procure the exp_lock.
+ *
+ * Return:
+ * The function returns -EAGAIN if sufficient number of TID/flow resources to
+ * map the segment could not be allocated. In this case the function should be
+ * called again with previous arguments to retry the TID allocation. There are
+ * no other error returns. The function returns 0 on success.
+ */
+int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
+ struct rvt_sge_state *ss, bool *last)
+ __must_hold(&req->qp->s_lock)
+{
+ struct tid_rdma_flow *flow = &req->flows[req->setup_head];
+ struct hfi1_ctxtdata *rcd = req->rcd;
+ struct hfi1_qp_priv *qpriv = req->qp->priv;
+ unsigned long flags;
+ struct rvt_qp *fqp;
+ u16 clear_tail = req->clear_tail;
+
+ lockdep_assert_held(&req->qp->s_lock);
+ /*
+ * We return error if either (a) we don't have space in the flow
+ * circular buffer, or (b) we already have max entries in the buffer.
+ * Max entries depend on the type of request we are processing and the
+ * negotiated TID RDMA parameters.
+ */
+ if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
+ CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
+ req->n_flows)
+ return -EINVAL;
+
+ /*
+ * Get pages, identify contiguous physical memory chunks for the segment
+ * If we can not determine a DMA address mapping we will treat it just
+ * like if we ran out of space above.
+ */
+ if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
+ hfi1_wait_kmem(flow->req->qp);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+ if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
+ goto queue;
+
+ /*
+ * At this point we know the number of pagesets and hence the number of
+ * TID's to map the segment. Allocate the TID's from the TID groups. If
+ * we cannot allocate the required number we exit and try again later
+ */
+ if (kern_alloc_tids(flow))
+ goto queue;
+ /*
+ * Finally program the TID entries with the pagesets, compute the
+ * tidarray and enable the HW flow
+ */
+ kern_program_rcvarray(flow);
+
+ /*
+ * Setup the flow state with relevant information.
+ * This information is used for tracking the sequence of data packets
+ * for the segment.
+ * The flow is setup here as this is the most accurate time and place
+ * to do so. Doing at a later time runs the risk of the flow data in
+ * qpriv getting out of sync.
+ */
+ memset(&flow->flow_state, 0x0, sizeof(flow->flow_state));
+ flow->idx = qpriv->flow_state.index;
+ flow->flow_state.generation = qpriv->flow_state.generation;
+ flow->flow_state.spsn = qpriv->flow_state.psn;
+ flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1;
+ flow->flow_state.r_next_psn =
+ full_flow_psn(flow, flow->flow_state.spsn);
+ qpriv->flow_state.psn += flow->npkts;
+
+ dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->rarr_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+ tid_rdma_schedule_tid_wakeup(fqp);
+
+ req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
+ return 0;
+queue:
+ queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+ return -EAGAIN;
+}
+
+static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
+{
+ flow->npagesets = 0;
+}
+
+/*
+ * This function is called after one segment has been successfully sent to
+ * release the flow and TID HW/SW resources for that segment. The segments for a
+ * TID RDMA request are setup and cleared in FIFO order which is managed using a
+ * circular buffer.
+ */
+int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
+ __must_hold(&req->qp->s_lock)
+{
+ struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
+ struct hfi1_ctxtdata *rcd = req->rcd;
+ unsigned long flags;
+ int i;
+ struct rvt_qp *fqp;
+
+ lockdep_assert_held(&req->qp->s_lock);
+ /* Exit if we have nothing in the flow circular buffer */
+ if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
+ return -EINVAL;
+
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+
+ for (i = 0; i < flow->tnode_cnt; i++)
+ kern_unprogram_rcv_group(flow, i);
+ /* To prevent double unprogramming */
+ flow->tnode_cnt = 0;
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->rarr_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+
+ dma_unmap_flow(flow);
+
+ hfi1_tid_rdma_reset_flow(flow);
+ req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
+
+ if (fqp == req->qp) {
+ __trigger_tid_waiter(fqp);
+ rvt_put_qp(fqp);
+ } else {
+ tid_rdma_schedule_tid_wakeup(fqp);
+ }
+
+ return 0;
+}
+
+/*
+ * This function is called to release all the tid entries for
+ * a request.
+ */
+void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
+ __must_hold(&req->qp->s_lock)
+{
+ /* Use memory barrier for proper ordering */
+ while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
+ if (hfi1_kern_exp_rcv_clear(req))
+ break;
+ }
+}
+
+/**
+ * hfi1_kern_exp_rcv_free_flows - free previously allocated flow information
+ * @req: the tid rdma request to be cleaned
+ */
+static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
+{
+ kfree(req->flows);
+ req->flows = NULL;
+}
+
+/**
+ * __trdma_clean_swqe - clean up for large sized QPs
+ * @qp: the queue patch
+ * @wqe: the send wqe
+ */
+void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct hfi1_swqe_priv *p = wqe->priv;
+
+ hfi1_kern_exp_rcv_free_flows(&p->tid_req);
+}
+
+/*
+ * This can be called at QP create time or in the data path.
+ */
+static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
+ gfp_t gfp)
+{
+ struct tid_rdma_flow *flows;
+ int i;
+
+ if (likely(req->flows))
+ return 0;
+ flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp,
+ req->rcd->numa_id);
+ if (!flows)
+ return -ENOMEM;
+ /* mini init */
+ for (i = 0; i < MAX_FLOWS; i++) {
+ flows[i].req = req;
+ flows[i].npagesets = 0;
+ flows[i].pagesets[0].mapped = 0;
+ flows[i].resync_npkts = 0;
+ }
+ req->flows = flows;
+ return 0;
+}
+
+static void hfi1_init_trdma_req(struct rvt_qp *qp,
+ struct tid_rdma_request *req)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ /*
+ * Initialize various TID RDMA request variables.
+ * These variables are "static", which is why they
+ * can be pre-initialized here before the WRs has
+ * even been submitted.
+ * However, non-NULL values for these variables do not
+ * imply that this WQE has been enabled for TID RDMA.
+ * Drivers should check the WQE's opcode to determine
+ * if a request is a TID RDMA one or not.
+ */
+ req->qp = qp;
+ req->rcd = qpriv->rcd;
+}
+
+u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return dd->verbs_dev.n_tidwait;
+}
+
+static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
+ u32 psn, u16 *fidx)
+{
+ u16 head, tail;
+ struct tid_rdma_flow *flow;
+
+ head = req->setup_head;
+ tail = req->clear_tail;
+ for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
+ tail = CIRC_NEXT(tail, MAX_FLOWS)) {
+ flow = &req->flows[tail];
+ if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
+ cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
+ if (fidx)
+ *fidx = tail;
+ return flow;
+ }
+ }
+ return NULL;
+}
+
+/* TID RDMA READ functions */
+u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u32 *len)
+{
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
+ struct rvt_qp *qp = req->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_swqe_priv *wpriv = wqe->priv;
+ struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req;
+ struct tid_rdma_params *remote;
+ u32 req_len = 0;
+ void *req_addr = NULL;
+
+ /* This is the IB psn used to send the request */
+ *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
+ trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
+
+ /* TID Entries for TID RDMA READ payload */
+ req_addr = &flow->tid_entry[flow->tid_idx];
+ req_len = sizeof(*flow->tid_entry) *
+ (flow->tidcnt - flow->tid_idx);
+
+ memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req));
+ wpriv->ss.sge.vaddr = req_addr;
+ wpriv->ss.sge.sge_length = req_len;
+ wpriv->ss.sge.length = wpriv->ss.sge.sge_length;
+ /*
+ * We can safely zero these out. Since the first SGE covers the
+ * entire packet, nothing else should even look at the MR.
+ */
+ wpriv->ss.sge.mr = NULL;
+ wpriv->ss.sge.m = 0;
+ wpriv->ss.sge.n = 0;
+
+ wpriv->ss.sg_list = NULL;
+ wpriv->ss.total_len = wpriv->ss.sge.sge_length;
+ wpriv->ss.num_sge = 1;
+
+ /* Construct the TID RDMA READ REQ packet header */
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+
+ KDETH_RESET(rreq->kdeth0, KVER, 0x1);
+ KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey);
+ rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr +
+ req->cur_seg * req->seg_len + flow->sent);
+ rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey);
+ rreq->reth.length = cpu_to_be32(*len);
+ rreq->tid_flow_psn =
+ cpu_to_be32((flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT) |
+ ((flow->flow_state.spsn + flow->pkt) &
+ HFI1_KDETH_BTH_SEQ_MASK));
+ rreq->tid_flow_qp =
+ cpu_to_be32(qpriv->tid_rdma.local.qp |
+ ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
+ TID_RDMA_DESTQP_FLOW_SHIFT) |
+ qpriv->rcd->ctxt);
+ rreq->verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 &= ~RVT_QPN_MASK;
+ *bth1 |= remote->qp;
+ *bth2 |= IB_BTH_REQ_ACK;
+ rcu_read_unlock();
+
+ /* We are done with this segment */
+ flow->sent += *len;
+ req->cur_seg++;
+ qp->s_state = TID_OP(READ_REQ);
+ req->ack_pending++;
+ req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
+ qpriv->pending_tid_r_segs++;
+ qp->s_num_rd_atomic++;
+
+ /* Set the TID RDMA READ request payload size */
+ *len = req_len;
+
+ return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32);
+}
+
+/*
+ * @len: contains the data length to read upon entry and the read request
+ * payload length upon exit.
+ */
+u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u32 *len)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = NULL;
+ u32 hdwords = 0;
+ bool last;
+ bool retry = true;
+ u32 npkts = rvt_div_round_up_mtu(qp, *len);
+
+ trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ /*
+ * Check sync conditions. Make sure that there are no pending
+ * segments before freeing the flow.
+ */
+sync_check:
+ if (req->state == TID_REQUEST_SYNC) {
+ if (qpriv->pending_tid_r_segs)
+ goto done;
+
+ hfi1_kern_clear_hw_flow(req->rcd, qp);
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ req->state = TID_REQUEST_ACTIVE;
+ }
+
+ /*
+ * If the request for this segment is resent, the tid resources should
+ * have been allocated before. In this case, req->flow_idx should
+ * fall behind req->setup_head.
+ */
+ if (req->flow_idx == req->setup_head) {
+ retry = false;
+ if (req->state == TID_REQUEST_RESEND) {
+ /*
+ * This is the first new segment for a request whose
+ * earlier segments have been re-sent. We need to
+ * set up the sge pointer correctly.
+ */
+ restart_sge(&qp->s_sge, wqe, req->s_next_psn,
+ qp->pmtu);
+ req->isge = 0;
+ req->state = TID_REQUEST_ACTIVE;
+ }
+
+ /*
+ * Check sync. The last PSN of each generation is reserved for
+ * RESYNC.
+ */
+ if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) {
+ req->state = TID_REQUEST_SYNC;
+ goto sync_check;
+ }
+
+ /* Allocate the flow if not yet */
+ if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
+ goto done;
+
+ /*
+ * The following call will advance req->setup_head after
+ * allocating the tid entries.
+ */
+ if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
+ req->state = TID_REQUEST_QUEUED;
+
+ /*
+ * We don't have resources for this segment. The QP has
+ * already been queued.
+ */
+ goto done;
+ }
+ }
+
+ /* req->flow_idx should only be one slot behind req->setup_head */
+ flow = &req->flows[req->flow_idx];
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->sent = 0;
+ if (!retry) {
+ /* Set the first and last IB PSN for the flow in use.*/
+ flow->flow_state.ib_spsn = req->s_next_psn;
+ flow->flow_state.ib_lpsn =
+ flow->flow_state.ib_spsn + flow->npkts - 1;
+ }
+
+ /* Calculate the next segment start psn.*/
+ req->s_next_psn += flow->npkts;
+
+ /* Build the packet header */
+ hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len);
+done:
+ return hdwords;
+}
+
+/*
+ * Validate and accept the TID RDMA READ request parameters.
+ * Return 0 if the request is accepted successfully;
+ * Return 1 otherwise.
+ */
+static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
+ struct rvt_ack_entry *e,
+ struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 psn, u64 vaddr, u32 len)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 flow_psn, i, tidlen = 0, pktlen, tlen;
+
+ req = ack_to_tid_req(e);
+
+ /* Validate the payload first */
+ flow = &req->flows[req->setup_head];
+
+ /* payload length = packet length - (header length + ICRC length) */
+ pktlen = packet->tlen - (packet->hlen + 4);
+ if (pktlen > sizeof(flow->tid_entry))
+ return 1;
+ memcpy(flow->tid_entry, packet->ebuf, pktlen);
+ flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
+
+ /*
+ * Walk the TID_ENTRY list to make sure we have enough space for a
+ * complete segment. Also calculate the number of required packets.
+ */
+ flow->npkts = rvt_div_round_up_mtu(qp, len);
+ for (i = 0; i < flow->tidcnt; i++) {
+ trace_hfi1_tid_entry_rcv_read_req(qp, i,
+ flow->tid_entry[i]);
+ tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
+ if (!tlen)
+ return 1;
+
+ /*
+ * For tid pair (tidctr == 3), the buffer size of the pair
+ * should be the sum of the buffer size described by each
+ * tid entry. However, only the first entry needs to be
+ * specified in the request (see WFR HAS Section 8.5.7.1).
+ */
+ tidlen += tlen;
+ }
+ if (tidlen * PAGE_SIZE < len)
+ return 1;
+
+ /* Empty the flow array */
+ req->clear_tail = req->setup_head;
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->tid_offset = 0;
+ flow->sent = 0;
+ flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp);
+ flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
+ TID_RDMA_DESTQP_FLOW_MASK;
+ flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn));
+ flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
+ flow->length = len;
+
+ flow->flow_state.lpsn = flow->flow_state.spsn +
+ flow->npkts - 1;
+ flow->flow_state.ib_spsn = psn;
+ flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
+
+ trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
+ /* Set the initial flow index to the current flow. */
+ req->flow_idx = req->setup_head;
+
+ /* advance circular buffer head */
+ req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
+
+ /*
+ * Compute last PSN for request.
+ */
+ e->opcode = (bth0 >> 24) & 0xff;
+ e->psn = psn;
+ e->lpsn = psn + flow->npkts - 1;
+ e->sent = 0;
+
+ req->n_flows = qpriv->tid_rdma.local.max_read;
+ req->state = TID_REQUEST_ACTIVE;
+ req->cur_seg = 0;
+ req->comp_seg = 0;
+ req->ack_seg = 0;
+ req->isge = 0;
+ req->seg_len = qpriv->tid_rdma.local.max_len;
+ req->total_len = len;
+ req->total_segs = 1;
+ req->r_flow_psn = e->psn;
+
+ trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ return 0;
+}
+
+static int tid_rdma_rcv_error(struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ struct rvt_qp *qp, u32 psn, int diff)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ unsigned long flags;
+ u8 prev;
+ bool old_req;
+
+ trace_hfi1_rsp_tid_rcv_error(qp, psn);
+ trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
+ if (diff > 0) {
+ /* sequence error */
+ if (!qp->r_nak_state) {
+ ibp->rvp.n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ rc_defered_ack(rcd, qp);
+ }
+ goto done;
+ }
+
+ ibp->rvp.n_rc_dupreq++;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
+ if (!e || (e->opcode != TID_OP(READ_REQ) &&
+ e->opcode != TID_OP(WRITE_REQ)))
+ goto unlock;
+
+ req = ack_to_tid_req(e);
+ req->r_flow_psn = psn;
+ trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
+ if (e->opcode == TID_OP(READ_REQ)) {
+ struct ib_reth *reth;
+ u32 len;
+ u32 rkey;
+ u64 vaddr;
+ int ok;
+ u32 bth0;
+
+ reth = &ohdr->u.tid_rdma.r_req.reth;
+ /*
+ * The requester always restarts from the start of the original
+ * request.
+ */
+ len = be32_to_cpu(reth->length);
+ if (psn != e->psn || len != req->total_len)
+ goto unlock;
+
+ release_rdma_sge_mr(e);
+
+ rkey = be32_to_cpu(reth->rkey);
+ vaddr = get_ib_reth_vaddr(reth);
+
+ qp->r_len = len;
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto unlock;
+
+ /*
+ * If all the response packets for the current request have
+ * been sent out and this request is complete (old_request
+ * == false) and the TID flow may be unusable (the
+ * req->clear_tail is advanced). However, when an earlier
+ * request is received, this request will not be complete any
+ * more (qp->s_tail_ack_queue is moved back, see below).
+ * Consequently, we need to update the TID flow info every time
+ * a duplicate request is received.
+ */
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
+ vaddr, len))
+ goto unlock;
+
+ /*
+ * True if the request is already scheduled (between
+ * qp->s_tail_ack_queue and qp->r_head_ack_queue);
+ */
+ if (old_req)
+ goto unlock;
+ } else {
+ struct flow_state *fstate;
+ bool schedule = false;
+ u8 i;
+
+ if (req->state == TID_REQUEST_RESEND) {
+ req->state = TID_REQUEST_RESEND_ACTIVE;
+ } else if (req->state == TID_REQUEST_INIT_RESEND) {
+ req->state = TID_REQUEST_INIT;
+ schedule = true;
+ }
+
+ /*
+ * True if the request is already scheduled (between
+ * qp->s_tail_ack_queue and qp->r_head_ack_queue).
+ * Also, don't change requests, which are at the SYNC
+ * point and haven't generated any responses yet.
+ * There is nothing to retransmit for them yet.
+ */
+ if (old_req || req->state == TID_REQUEST_INIT ||
+ (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
+ for (i = prev + 1; ; i++) {
+ if (i > rvt_size_atomic(&dev->rdi))
+ i = 0;
+ if (i == qp->r_head_ack_queue)
+ break;
+ e = &qp->s_ack_queue[i];
+ req = ack_to_tid_req(e);
+ if (e->opcode == TID_OP(WRITE_REQ) &&
+ req->state == TID_REQUEST_INIT)
+ req->state = TID_REQUEST_INIT_RESEND;
+ }
+ /*
+ * If the state of the request has been changed,
+ * the first leg needs to get scheduled in order to
+ * pick up the change. Otherwise, normal response
+ * processing should take care of it.
+ */
+ if (!schedule)
+ goto unlock;
+ }
+
+ /*
+ * If there is no more allocated segment, just schedule the qp
+ * without changing any state.
+ */
+ if (req->clear_tail == req->setup_head)
+ goto schedule;
+ /*
+ * If this request has sent responses for segments, which have
+ * not received data yet (flow_idx != clear_tail), the flow_idx
+ * pointer needs to be adjusted so the same responses can be
+ * re-sent.
+ */
+ if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
+ fstate = &req->flows[req->clear_tail].flow_state;
+ qpriv->pending_tid_w_segs -=
+ CIRC_CNT(req->flow_idx, req->clear_tail,
+ MAX_FLOWS);
+ req->flow_idx =
+ CIRC_ADD(req->clear_tail,
+ delta_psn(psn, fstate->resp_ib_psn),
+ MAX_FLOWS);
+ qpriv->pending_tid_w_segs +=
+ delta_psn(psn, fstate->resp_ib_psn);
+ /*
+ * When flow_idx == setup_head, we've gotten a duplicate
+ * request for a segment, which has not been allocated
+ * yet. In that case, don't adjust this request.
+ * However, we still want to go through the loop below
+ * to adjust all subsequent requests.
+ */
+ if (CIRC_CNT(req->setup_head, req->flow_idx,
+ MAX_FLOWS)) {
+ req->cur_seg = delta_psn(psn, e->psn);
+ req->state = TID_REQUEST_RESEND_ACTIVE;
+ }
+ }
+
+ for (i = prev + 1; ; i++) {
+ /*
+ * Look at everything up to and including
+ * s_tail_ack_queue
+ */
+ if (i > rvt_size_atomic(&dev->rdi))
+ i = 0;
+ if (i == qp->r_head_ack_queue)
+ break;
+ e = &qp->s_ack_queue[i];
+ req = ack_to_tid_req(e);
+ trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ if (e->opcode != TID_OP(WRITE_REQ) ||
+ req->cur_seg == req->comp_seg ||
+ req->state == TID_REQUEST_INIT ||
+ req->state == TID_REQUEST_INIT_RESEND) {
+ if (req->state == TID_REQUEST_INIT)
+ req->state = TID_REQUEST_INIT_RESEND;
+ continue;
+ }
+ qpriv->pending_tid_w_segs -=
+ CIRC_CNT(req->flow_idx,
+ req->clear_tail,
+ MAX_FLOWS);
+ req->flow_idx = req->clear_tail;
+ req->state = TID_REQUEST_RESEND;
+ req->cur_seg = req->comp_seg;
+ }
+ qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
+ }
+ /* Re-process old requests.*/
+ if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue = prev;
+ qp->s_tail_ack_queue = prev;
+ /*
+ * Since the qp->s_tail_ack_queue is modified, the
+ * qp->s_ack_state must be changed to re-initialize
+ * qp->s_ack_rdma_sge; Otherwise, we will end up in
+ * wrong memory region.
+ */
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+schedule:
+ /*
+ * It's possible to receive a retry psn that is earlier than an RNRNAK
+ * psn. In this case, the rnrnak state should be cleared.
+ */
+ if (qpriv->rnr_nak_state) {
+ qp->s_nak_state = 0;
+ qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
+ qp->r_psn = e->lpsn + 1;
+ hfi1_tid_write_alloc_resources(qp, true);
+ }
+
+ qp->r_state = e->opcode;
+ qp->r_nak_state = 0;
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return 1;
+}
+
+void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/
+
+ /*
+ * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
+ * (see hfi1_rc_rcv())
+ * 2. Put TID RDMA READ REQ into the response queue (s_ack_queue)
+ * - Setup struct tid_rdma_req with request info
+ * - Initialize struct tid_rdma_flow info;
+ * - Copy TID entries;
+ * 3. Set the qp->s_ack_state.
+ * 4. Set RVT_S_RESP_PENDING in s_flags.
+ * 5. Kick the send engine (hfi1_schedule_send())
+ */
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_ack_entry *e;
+ unsigned long flags;
+ struct ib_reth *reth;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ u32 bth0, psn, len, rkey;
+ bool fecn;
+ u8 next;
+ u64 vaddr;
+ int diff;
+ u8 nack_state = IB_NAK_INVALID_REQUEST;
+
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+ fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
+ rvt_comm_est(qp);
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
+
+ reth = &ohdr->u.tid_rdma.r_req.reth;
+ vaddr = be64_to_cpu(reth->vaddr);
+ len = be32_to_cpu(reth->length);
+ /* The length needs to be in multiples of PAGE_SIZE */
+ if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len)
+ goto nack_inv;
+
+ diff = delta_psn(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
+ return;
+ }
+
+ /* We've verified the request, insert it into the ack queue. */
+ next = qp->r_head_ack_queue + 1;
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent) {
+ nack_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+ goto nack_inv_unlock;
+ }
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ release_rdma_sge_mr(e);
+
+ rkey = be32_to_cpu(reth->rkey);
+ qp->r_len = len;
+
+ if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ)))
+ goto nack_acc;
+
+ /* Accept the request parameters */
+ if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
+ len))
+ goto nack_inv_unlock;
+
+ qp->r_state = e->opcode;
+ qp->r_nak_state = 0;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn += e->lpsn - e->psn + 1;
+
+ qp->r_head_ack_queue = next;
+
+ /*
+ * For all requests other than TID WRITE which are added to the ack
+ * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
+ * do this because of interlocks between these and TID WRITE
+ * requests. The same change has also been made in hfi1_rc_rcv().
+ */
+ qpriv->r_tid_alloc = qp->r_head_ack_queue;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+
+nack_inv_unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = nack_state;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ rc_defered_ack(rcd, qp);
+ return;
+nack_acc:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+}
+
+u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth0,
+ u32 *bth1, u32 *bth2, u32 *len, bool *last)
+{
+ struct hfi1_ack_priv *epriv = e->priv;
+ struct tid_rdma_request *req = &epriv->tid_req;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
+ u32 tidentry = flow->tid_entry[flow->tid_idx];
+ u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
+ struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp;
+ u32 next_offset, om = KDETH_OM_LARGE;
+ bool last_pkt;
+ u32 hdwords = 0;
+ struct tid_rdma_params *remote;
+
+ *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
+ flow->sent += *len;
+ next_offset = flow->tid_offset + *len;
+ last_pkt = (flow->sent >= flow->length);
+
+ trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
+ trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ if (!remote) {
+ rcu_read_unlock();
+ goto done;
+ }
+ KDETH_RESET(resp->kdeth0, KVER, 0x1);
+ KDETH_SET(resp->kdeth0, SH, !last_pkt);
+ KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg));
+ KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
+ KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
+ KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE);
+ KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om);
+ KDETH_RESET(resp->kdeth1, JKEY, remote->jkey);
+ resp->verbs_qp = cpu_to_be32(qp->remote_qpn);
+ rcu_read_unlock();
+
+ resp->aeth = rvt_compute_aeth(qp);
+ resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn +
+ flow->pkt));
+
+ *bth0 = TID_OP(READ_RESP) << 24;
+ *bth1 = flow->tid_qpn;
+ *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
+ HFI1_KDETH_BTH_SEQ_MASK) |
+ (flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT));
+ *last = last_pkt;
+ if (last_pkt)
+ /* Advance to next flow */
+ req->clear_tail = (req->clear_tail + 1) &
+ (MAX_FLOWS - 1);
+
+ if (next_offset >= tidlen) {
+ flow->tid_offset = 0;
+ flow->tid_idx++;
+ } else {
+ flow->tid_offset = next_offset;
+ }
+
+ hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32);
+
+done:
+ return hdwords;
+}
+
+static inline struct tid_rdma_request *
+find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
+ __must_hold(&qp->s_lock)
+{
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req = NULL;
+ u32 i, end;
+
+ end = qp->s_cur + 1;
+ if (end == qp->s_size)
+ end = 0;
+ for (i = qp->s_acked; i != end;) {
+ wqe = rvt_get_swqe_ptr(qp, i);
+ if (cmp_psn(psn, wqe->psn) >= 0 &&
+ cmp_psn(psn, wqe->lpsn) <= 0) {
+ if (wqe->wr.opcode == opcode)
+ req = wqe_to_tid_req(wqe);
+ break;
+ }
+ if (++i == qp->s_size)
+ i = 0;
+ }
+
+ return req;
+}
+
+void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA READ RESPONSE packet (Requester side) */
+
+ /*
+ * 1. Find matching SWQE
+ * 2. Check that the entire segment has been read.
+ * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags.
+ * 4. Free the TID flow resources.
+ * 5. Kick the send engine (hfi1_schedule_send())
+ */
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 opcode, aeth;
+ bool fecn;
+ unsigned long flags;
+ u32 kpsn, ipsn;
+
+ trace_hfi1_sender_rcv_tid_read_resp(qp);
+ fecn = process_ecn(qp, packet);
+ kpsn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth);
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
+ req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
+ if (unlikely(!req))
+ goto ack_op_err;
+
+ flow = &req->flows[req->clear_tail];
+ /* When header suppression is disabled */
+ if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
+ update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
+
+ if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
+ goto ack_done;
+ flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
+ /*
+ * Copy the payload to destination buffer if this packet is
+ * delivered as an eager packet due to RSM rule and FECN.
+ * The RSM rule selects FECN bit in BTH and SH bit in
+ * KDETH header and therefore will not match the last
+ * packet of each segment that has SH bit cleared.
+ */
+ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
+ struct rvt_sge_state ss;
+ u32 len;
+ u32 tlen = packet->tlen;
+ u16 hdrsize = packet->hlen;
+ u8 pad = packet->pad;
+ u8 extra_bytes = pad + packet->extra_byte +
+ (SIZE_OF_CRC << 2);
+ u32 pmtu = qp->pmtu;
+
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
+ goto ack_op_err;
+ len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
+ if (unlikely(len < pmtu))
+ goto ack_op_err;
+ rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
+ false);
+ /* Raise the sw sequence check flag for next packet */
+ priv->s_flags |= HFI1_R_TID_SW_PSN;
+ }
+
+ goto ack_done;
+ }
+ flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
+ req->ack_pending--;
+ priv->pending_tid_r_segs--;
+ qp->s_num_rd_atomic--;
+ if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
+ !qp->s_num_rd_atomic) {
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE |
+ RVT_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ }
+ if (qp->s_flags & RVT_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ }
+
+ trace_hfi1_ack(qp, ipsn);
+ trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
+ req->e.swqe->psn, req->e.swqe->lpsn,
+ req);
+ trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
+
+ /* Release the tid resources */
+ hfi1_kern_exp_rcv_clear(req);
+
+ if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
+ goto ack_done;
+
+ /* If not done yet, build next read request */
+ if (++req->comp_seg >= req->total_segs) {
+ priv->tid_r_comp++;
+ req->state = TID_REQUEST_COMPLETE;
+ }
+
+ /*
+ * Clear the hw flow under two conditions:
+ * 1. This request is a sync point and it is complete;
+ * 2. Current request is completed and there are no more requests.
+ */
+ if ((req->state == TID_REQUEST_SYNC &&
+ req->comp_seg == req->cur_seg) ||
+ priv->tid_r_comp == priv->tid_r_reqs) {
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+ priv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ if (req->state == TID_REQUEST_SYNC)
+ req->state = TID_REQUEST_ACTIVE;
+ }
+
+ hfi1_schedule_send(qp);
+ goto ack_done;
+
+ack_op_err:
+ /*
+ * The test indicates that the send engine has finished its cleanup
+ * after sending the request and it's now safe to put the QP into error
+ * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
+ * == qp->s_head), it would be unsafe to complete the wqe pointed by
+ * qp->s_acked here. Putting the qp into error state will safely flush
+ * all remaining requests.
+ */
+ if (qp->s_last == qp->s_acked)
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ u32 n = qp->s_acked;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ /* Free any TID entries */
+ while (n != qp->s_tail) {
+ wqe = rvt_get_swqe_ptr(qp, n);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ req = wqe_to_tid_req(wqe);
+ hfi1_kern_exp_rcv_clear_all(req);
+ }
+
+ if (++n == qp->s_size)
+ n = 0;
+ }
+ /* Free flow */
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+}
+
+static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
+{
+ struct rvt_qp *qp = packet->qp;
+
+ if (rcv_type >= RHF_RCV_TYPE_IB)
+ goto done;
+
+ spin_lock(&qp->s_lock);
+
+ /*
+ * We've ran out of space in the eager buffer.
+ * Eagerly received KDETH packets which require space in the
+ * Eager buffer (packet that have payload) are TID RDMA WRITE
+ * response packets. In this case, we have to re-transmit the
+ * TID RDMA WRITE request.
+ */
+ if (rcv_type == RHF_RCV_TYPE_EAGER) {
+ hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
+ hfi1_schedule_send(qp);
+ }
+
+ /* Since no payload is delivered, just drop the packet */
+ spin_unlock(&qp->s_lock);
+done:
+ return true;
+}
+
+static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
+ struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+
+ /* Start from the right segment */
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
+ req = wqe_to_tid_req(wqe);
+ flow = &req->flows[req->clear_tail];
+ hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_SEND;
+ rvt_get_qp(qp);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+/*
+ * Handle the KDETH eflags for TID RDMA READ response.
+ *
+ * Return true if the last packet for a segment has been received and it is
+ * time to process the response normally; otherwise, return true.
+ *
+ * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
+ */
+static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet *packet, u8 rcv_type,
+ u8 rte, u32 psn, u32 ibpsn)
+ __must_hold(&packet->qp->r_lock) __must_hold(RCU)
+{
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_ibport *ibp;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 ack_psn;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ bool ret = true;
+ int diff = 0;
+ u32 fpsn;
+
+ lockdep_assert_held(&qp->r_lock);
+ trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn);
+ trace_hfi1_sender_read_kdeth_eflags(qp);
+ trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0);
+ spin_lock(&qp->s_lock);
+ /* If the psn is out of valid range, drop the packet */
+ if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
+ cmp_psn(ibpsn, qp->s_psn) > 0)
+ goto s_unlock;
+
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request.
+ */
+ ack_psn = ibpsn - 1;
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+
+ /* Complete WQEs that the PSN finishes. */
+ while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) {
+ /*
+ * If this request is a RDMA read or atomic, and the NACK is
+ * for a later operation, this NACK NAKs the RDMA read or
+ * atomic.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ /* Retry this request. */
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ restart_tid_rdma_read_req(rcd, qp,
+ wqe);
+ } else {
+ hfi1_restart_rc(qp, qp->s_last_psn + 1,
+ 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_SEND;
+ rvt_get_qp(qp);
+ list_add_tail(/* wait */
+ &qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ }
+ }
+ /*
+ * No need to process the NAK since we are
+ * restarting an earlier request.
+ */
+ break;
+ }
+
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+ goto s_unlock;
+ }
+
+ if (qp->s_acked == qp->s_tail)
+ goto s_unlock;
+
+ /* Handle the eflags for the request */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
+ goto s_unlock;
+
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ switch (rcv_type) {
+ case RHF_RCV_TYPE_EXPECTED:
+ switch (rte) {
+ case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
+ /*
+ * On the first occurrence of a Flow Sequence error,
+ * the flag TID_FLOW_SW_PSN is set.
+ *
+ * After that, the flow is *not* reprogrammed and the
+ * protocol falls back to SW PSN checking. This is done
+ * to prevent continuous Flow Sequence errors for any
+ * packets that could be still in the fabric.
+ */
+ flow = &req->flows[req->clear_tail];
+ trace_hfi1_tid_flow_read_kdeth_eflags(qp,
+ req->clear_tail,
+ flow);
+ if (priv->s_flags & HFI1_R_TID_SW_PSN) {
+ diff = cmp_psn(psn,
+ flow->flow_state.r_next_psn);
+ if (diff > 0) {
+ /* Drop the packet.*/
+ goto s_unlock;
+ } else if (diff < 0) {
+ /*
+ * If a response packet for a restarted
+ * request has come back, reset the
+ * restart flag.
+ */
+ if (qp->r_flags & RVT_R_RDMAR_SEQ)
+ qp->r_flags &=
+ ~RVT_R_RDMAR_SEQ;
+
+ /* Drop the packet.*/
+ goto s_unlock;
+ }
+
+ /*
+ * If SW PSN verification is successful and
+ * this is the last packet in the segment, tell
+ * the caller to process it as a normal packet.
+ */
+ fpsn = full_flow_psn(flow,
+ flow->flow_state.lpsn);
+ if (cmp_psn(fpsn, psn) == 0) {
+ ret = false;
+ if (qp->r_flags & RVT_R_RDMAR_SEQ)
+ qp->r_flags &=
+ ~RVT_R_RDMAR_SEQ;
+ }
+ flow->flow_state.r_next_psn =
+ mask_psn(psn + 1);
+ } else {
+ u32 last_psn;
+
+ last_psn = read_r_next_psn(dd, rcd->ctxt,
+ flow->idx);
+ flow->flow_state.r_next_psn = last_psn;
+ priv->s_flags |= HFI1_R_TID_SW_PSN;
+ /*
+ * If no request has been restarted yet,
+ * restart the current one.
+ */
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
+ restart_tid_rdma_read_req(rcd, qp,
+ wqe);
+ }
+
+ break;
+
+ case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
+ /*
+ * Since the TID flow is able to ride through
+ * generation mismatch, drop this stale packet.
+ */
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case RHF_RCV_TYPE_ERROR:
+ switch (rte) {
+ case RHF_RTE_ERROR_OP_CODE_ERR:
+ case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
+ case RHF_RTE_ERROR_KHDR_HCRC_ERR:
+ case RHF_RTE_ERROR_KHDR_KVER_ERR:
+ case RHF_RTE_ERROR_CONTEXT_ERR:
+ case RHF_RTE_ERROR_KHDR_TID_ERR:
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+s_unlock:
+ spin_unlock(&qp->s_lock);
+ return ret;
+}
+
+bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ struct hfi1_pportdata *ppd,
+ struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct hfi1_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ u8 rcv_type = rhf_rcv_type(packet->rhf);
+ u8 rte = rhf_rcv_type_err(packet->rhf);
+ struct ib_header *hdr = packet->hdr;
+ struct ib_other_headers *ohdr = NULL;
+ int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ u16 lid = be16_to_cpu(hdr->lrh[1]);
+ u8 opcode;
+ u32 qp_num, psn, ibpsn;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *qpriv;
+ unsigned long flags;
+ bool ret = true;
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ int diff = 0;
+
+ trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
+ packet->rhf);
+ if (packet->rhf & RHF_ICRC_ERR)
+ return ret;
+
+ packet->ohdr = &hdr->u.oth;
+ ohdr = packet->ohdr;
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) &
+ RVT_QPN_MASK;
+ if (lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto drop;
+
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!qp)
+ goto rcu_unlock;
+
+ packet->qp = qp;
+
+ /* Check for valid receive state. */
+ spin_lock_irqsave(&qp->r_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
+ goto r_unlock;
+ }
+
+ if (packet->rhf & RHF_TID_ERR) {
+ /* For TIDERR and RC QPs preemptively schedule a NAK */
+ u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+
+ /* Sanity check packet */
+ if (tlen < 24)
+ goto r_unlock;
+
+ /*
+ * Check for GRH. We should never get packets with GRH in this
+ * path.
+ */
+ if (lnh == HFI1_LRH_GRH)
+ goto r_unlock;
+
+ if (tid_rdma_tid_err(packet, rcv_type))
+ goto r_unlock;
+ }
+
+ /* handle TID RDMA READ */
+ if (opcode == TID_OP(READ_RESP)) {
+ ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn);
+ ibpsn = mask_psn(ibpsn);
+ ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
+ ibpsn);
+ goto r_unlock;
+ }
+
+ /*
+ * qp->s_tail_ack_queue points to the rvt_ack_entry currently being
+ * processed. These a completed sequentially so we can be sure that
+ * the pointer will not change until the entire request has completed.
+ */
+ spin_lock(&qp->s_lock);
+ qpriv = qp->priv;
+ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
+ qpriv->r_tid_tail == qpriv->r_tid_head)
+ goto unlock;
+ e = &qp->s_ack_queue[qpriv->r_tid_tail];
+ if (e->opcode != TID_OP(WRITE_REQ))
+ goto unlock;
+ req = ack_to_tid_req(e);
+ if (req->comp_seg == req->cur_seg)
+ goto unlock;
+ flow = &req->flows[req->clear_tail];
+ trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
+ trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
+ trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp);
+ trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
+
+ switch (rcv_type) {
+ case RHF_RCV_TYPE_EXPECTED:
+ switch (rte) {
+ case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
+ if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) {
+ qpriv->s_flags |= HFI1_R_TID_SW_PSN;
+ flow->flow_state.r_next_psn =
+ read_r_next_psn(dd, rcd->ctxt,
+ flow->idx);
+ qpriv->r_next_psn_kdeth =
+ flow->flow_state.r_next_psn;
+ goto nak_psn;
+ } else {
+ /*
+ * If the received PSN does not match the next
+ * expected PSN, NAK the packet.
+ * However, only do that if we know that the a
+ * NAK has already been sent. Otherwise, this
+ * mismatch could be due to packets that were
+ * already in flight.
+ */
+ diff = cmp_psn(psn,
+ flow->flow_state.r_next_psn);
+ if (diff > 0)
+ goto nak_psn;
+ else if (diff < 0)
+ break;
+
+ qpriv->s_nak_state = 0;
+ /*
+ * If SW PSN verification is successful and this
+ * is the last packet in the segment, tell the
+ * caller to process it as a normal packet.
+ */
+ if (psn == full_flow_psn(flow,
+ flow->flow_state.lpsn))
+ ret = false;
+ flow->flow_state.r_next_psn =
+ mask_psn(psn + 1);
+ qpriv->r_next_psn_kdeth =
+ flow->flow_state.r_next_psn;
+ }
+ break;
+
+ case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
+ goto nak_psn;
+
+ default:
+ break;
+ }
+ break;
+
+ case RHF_RCV_TYPE_ERROR:
+ switch (rte) {
+ case RHF_RTE_ERROR_OP_CODE_ERR:
+ case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
+ case RHF_RTE_ERROR_KHDR_HCRC_ERR:
+ case RHF_RTE_ERROR_KHDR_KVER_ERR:
+ case RHF_RTE_ERROR_CONTEXT_ERR:
+ case RHF_RTE_ERROR_KHDR_TID_ERR:
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+unlock:
+ spin_unlock(&qp->s_lock);
+r_unlock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+rcu_unlock:
+ rcu_read_unlock();
+drop:
+ return ret;
+nak_psn:
+ ibp->rvp.n_rc_seqnak++;
+ if (!qpriv->s_nak_state) {
+ qpriv->s_nak_state = IB_NAK_PSN_ERROR;
+ /* We are NAK'ing the next expected PSN */
+ qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
+ tid_rdma_trigger_ack(qp);
+ }
+ goto unlock;
+}
+
+/*
+ * "Rewind" the TID request information.
+ * This means that we reset the state back to ACTIVE,
+ * find the proper flow, set the flow index to that flow,
+ * and reset the flow information.
+ */
+void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ u32 *bth2)
+{
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ int diff, delta_pkts;
+ u32 tididx = 0, i;
+ u16 fidx;
+
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ *bth2 = mask_psn(qp->s_psn);
+ flow = find_flow_ib(req, *bth2, &fidx);
+ if (!flow) {
+ trace_hfi1_msg_tid_restart_req(/* msg */
+ qp, "!!!!!! Could not find flow to restart: bth2 ",
+ (u64)*bth2);
+ trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ req);
+ return;
+ }
+ } else {
+ fidx = req->acked_tail;
+ flow = &req->flows[fidx];
+ *bth2 = mask_psn(req->r_ack_psn);
+ }
+
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
+ else
+ delta_pkts = delta_psn(*bth2,
+ full_flow_psn(flow,
+ flow->flow_state.spsn));
+
+ trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
+ diff = delta_pkts + flow->resync_npkts;
+
+ flow->sent = 0;
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->tid_offset = 0;
+ if (diff) {
+ for (tididx = 0; tididx < flow->tidcnt; tididx++) {
+ u32 tidentry = flow->tid_entry[tididx], tidlen,
+ tidnpkts, npkts;
+
+ flow->tid_offset = 0;
+ tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE;
+ tidnpkts = rvt_div_round_up_mtu(qp, tidlen);
+ npkts = min_t(u32, diff, tidnpkts);
+ flow->pkt += npkts;
+ flow->sent += (npkts == tidnpkts ? tidlen :
+ npkts * qp->pmtu);
+ flow->tid_offset += npkts * qp->pmtu;
+ diff -= npkts;
+ if (!diff)
+ break;
+ }
+ }
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
+ rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
+ flow->sent, 0);
+ /*
+ * Packet PSN is based on flow_state.spsn + flow->pkt. However,
+ * during a RESYNC, the generation is incremented and the
+ * sequence is reset to 0. Since we've adjusted the npkts in the
+ * flow and the SGE has been sufficiently advanced, we have to
+ * adjust flow->pkt in order to calculate the correct PSN.
+ */
+ flow->pkt -= flow->resync_npkts;
+ }
+
+ if (flow->tid_offset ==
+ EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
+ tididx++;
+ flow->tid_offset = 0;
+ }
+ flow->tid_idx = tididx;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ /* Move flow_idx to correct index */
+ req->flow_idx = fidx;
+ else
+ req->clear_tail = fidx;
+
+ trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
+ trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ req->state = TID_REQUEST_ACTIVE;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
+ /* Reset all the flows that we are going to resend */
+ fidx = CIRC_NEXT(fidx, MAX_FLOWS);
+ i = qpriv->s_tid_tail;
+ do {
+ for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
+ fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
+ req->flows[fidx].sent = 0;
+ req->flows[fidx].pkt = 0;
+ req->flows[fidx].tid_idx = 0;
+ req->flows[fidx].tid_offset = 0;
+ req->flows[fidx].resync_npkts = 0;
+ }
+ if (i == qpriv->s_tid_cur)
+ break;
+ do {
+ i = (++i == qp->s_size ? 0 : i);
+ wqe = rvt_get_swqe_ptr(qp, i);
+ } while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE);
+ req = wqe_to_tid_req(wqe);
+ req->cur_seg = req->ack_seg;
+ fidx = req->acked_tail;
+ /* Pull req->clear_tail back */
+ req->clear_tail = fidx;
+ } while (1);
+ }
+}
+
+void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
+{
+ int i, ret;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_flow_state *fs;
+
+ if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA))
+ return;
+
+ /*
+ * First, clear the flow to help prevent any delayed packets from
+ * being delivered.
+ */
+ fs = &qpriv->flow_state;
+ if (fs->index != RXE_NUM_TID_FLOWS)
+ hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
+
+ for (i = qp->s_acked; i != qp->s_head;) {
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
+
+ if (++i == qp->s_size)
+ i = 0;
+ /* Free only locally allocated TID entries */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
+ continue;
+ do {
+ struct hfi1_swqe_priv *priv = wqe->priv;
+
+ ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
+ } while (!ret);
+ }
+ for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) {
+ struct rvt_ack_entry *e = &qp->s_ack_queue[i];
+
+ if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device)))
+ i = 0;
+ /* Free only locally allocated TID entries */
+ if (e->opcode != TID_OP(WRITE_REQ))
+ continue;
+ do {
+ struct hfi1_ack_priv *priv = e->priv;
+
+ ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
+ } while (!ret);
+ }
+}
+
+bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct rvt_swqe *prev;
+ struct hfi1_qp_priv *priv = qp->priv;
+ u32 s_prev;
+ struct tid_rdma_request *req;
+
+ s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1;
+ prev = rvt_get_swqe_ptr(qp, s_prev);
+
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ switch (prev->wr.opcode) {
+ case IB_WR_TID_RDMA_WRITE:
+ req = wqe_to_tid_req(prev);
+ if (req->ack_seg != req->total_segs)
+ goto interlock;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IB_WR_RDMA_READ:
+ if (prev->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ break;
+ fallthrough;
+ case IB_WR_TID_RDMA_READ:
+ switch (prev->wr.opcode) {
+ case IB_WR_RDMA_READ:
+ if (qp->s_acked != qp->s_cur)
+ goto interlock;
+ break;
+ case IB_WR_TID_RDMA_WRITE:
+ req = wqe_to_tid_req(prev);
+ if (req->ack_seg != req->total_segs)
+ goto interlock;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+
+interlock:
+ priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK;
+ return true;
+}
+
+/* Does @sge meet the alignment requirements for tid rdma? */
+static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
+ struct rvt_sge *sge, int num_sge)
+{
+ int i;
+
+ for (i = 0; i < num_sge; i++, sge++) {
+ trace_hfi1_sge_check_align(qp, i, sge);
+ if ((u64)sge->vaddr & ~PAGE_MASK ||
+ sge->sge_length & ~PAGE_MASK)
+ return false;
+ }
+ return true;
+}
+
+void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
+ struct hfi1_swqe_priv *priv = wqe->priv;
+ struct tid_rdma_params *remote;
+ enum ib_wr_opcode new_opcode;
+ bool do_tid_rdma = false;
+ struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
+
+ if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) ==
+ ppd->lid)
+ return;
+ if (qpriv->hdr_type != HFI1_PKT_TYPE_9B)
+ return;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ /*
+ * If TID RDMA is disabled by the negotiation, don't
+ * use it.
+ */
+ if (!remote)
+ goto exit;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_READ) {
+ if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
+ wqe->wr.num_sge)) {
+ new_opcode = IB_WR_TID_RDMA_READ;
+ do_tid_rdma = true;
+ }
+ } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
+ /*
+ * TID RDMA is enabled for this RDMA WRITE request iff:
+ * 1. The remote address is page-aligned,
+ * 2. The length is larger than the minimum segment size,
+ * 3. The length is page-multiple.
+ */
+ if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
+ !(wqe->length & ~PAGE_MASK)) {
+ new_opcode = IB_WR_TID_RDMA_WRITE;
+ do_tid_rdma = true;
+ }
+ }
+
+ if (do_tid_rdma) {
+ if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC))
+ goto exit;
+ wqe->wr.opcode = new_opcode;
+ priv->tid_req.seg_len =
+ min_t(u32, remote->max_len, wqe->length);
+ priv->tid_req.total_segs =
+ DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len);
+ /* Compute the last PSN of the request */
+ wqe->lpsn = wqe->psn;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ priv->tid_req.n_flows = remote->max_read;
+ qpriv->tid_r_reqs++;
+ wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
+ } else {
+ wqe->lpsn += priv->tid_req.total_segs - 1;
+ atomic_inc(&qpriv->n_requests);
+ }
+
+ priv->tid_req.cur_seg = 0;
+ priv->tid_req.comp_seg = 0;
+ priv->tid_req.ack_seg = 0;
+ priv->tid_req.state = TID_REQUEST_INACTIVE;
+ /*
+ * Reset acked_tail.
+ * TID RDMA READ does not have ACKs so it does not
+ * update the pointer. We have to reset it so TID RDMA
+ * WRITE does not get confused.
+ */
+ priv->tid_req.acked_tail = priv->tid_req.setup_head;
+ trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ &priv->tid_req);
+ }
+exit:
+ rcu_read_unlock();
+}
+
+/* TID RDMA WRITE functions */
+
+u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_params *remote;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ /*
+ * Set the number of flow to be used based on negotiated
+ * parameters.
+ */
+ req->n_flows = remote->max_write;
+ req->state = TID_REQUEST_ACTIVE;
+
+ KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1);
+ KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.w_req.reth.vaddr =
+ cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len));
+ ohdr->u.tid_rdma.w_req.reth.rkey =
+ cpu_to_be32(wqe->rdma_wr.rkey);
+ ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len);
+ ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 &= ~RVT_QPN_MASK;
+ *bth1 |= remote->qp;
+ qp->s_state = TID_OP(WRITE_REQ);
+ qp->s_flags |= HFI1_S_WAIT_TID_RESP;
+ *bth2 |= IB_BTH_REQ_ACK;
+ *len = 0;
+
+ rcu_read_unlock();
+ return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
+}
+
+static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
+{
+ /*
+ * Heuristic for computing the RNR timeout when waiting on the flow
+ * queue. Rather than a computationaly expensive exact estimate of when
+ * a flow will be available, we assume that if a QP is at position N in
+ * the flow queue it has to wait approximately (N + 1) * (number of
+ * segments between two sync points). The rationale for this is that
+ * flows are released and recycled at each sync point.
+ */
+ return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
+}
+
+static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
+ struct tid_queue *queue)
+{
+ return qpriv->tid_enqueue - queue->dequeue;
+}
+
+/*
+ * @qp: points to rvt_qp context.
+ * @to_seg: desired RNR timeout in segments.
+ * Return: index of the next highest timeout in the ib_hfi1_rnr_table[]
+ */
+static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ u64 timeout;
+ u32 bytes_per_us;
+ u8 i;
+
+ bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
+ timeout = (to_seg * TID_RDMA_MAX_SEGMENT_SIZE) / bytes_per_us;
+ /*
+ * Find the next highest value in the RNR table to the required
+ * timeout. This gives the responder some padding.
+ */
+ for (i = 1; i <= IB_AETH_CREDIT_MASK; i++)
+ if (rvt_rnr_tbl_to_usec(i) >= timeout)
+ return i;
+ return 0;
+}
+
+/*
+ * Central place for resource allocation at TID write responder,
+ * is called from write_req and write_data interrupt handlers as
+ * well as the send thread when a queued QP is scheduled for
+ * resource allocation.
+ *
+ * Iterates over (a) segments of a request and then (b) queued requests
+ * themselves to allocate resources for up to local->max_write
+ * segments across multiple requests. Stop allocating when we
+ * hit a sync point, resume allocating after data packets at
+ * sync point have been received.
+ *
+ * Resource allocation and sending of responses is decoupled. The
+ * request/segment which are being allocated and sent are as follows.
+ * Resources are allocated for:
+ * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
+ * The send thread sends:
+ * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
+ */
+static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
+{
+ struct tid_rdma_request *req;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ctxtdata *rcd = qpriv->rcd;
+ struct tid_rdma_params *local = &qpriv->tid_rdma.local;
+ struct rvt_ack_entry *e;
+ u32 npkts, to_seg;
+ bool last;
+ int ret = 0;
+
+ lockdep_assert_held(&qp->s_lock);
+
+ while (1) {
+ trace_hfi1_rsp_tid_write_alloc_res(qp, 0);
+ trace_hfi1_tid_write_rsp_alloc_res(qp);
+ /*
+ * Don't allocate more segments if a RNR NAK has already been
+ * scheduled to avoid messing up qp->r_psn: the RNR NAK will
+ * be sent only when all allocated segments have been sent.
+ * However, if more segments are allocated before that, TID RDMA
+ * WRITE RESP packets will be sent out for these new segments
+ * before the RNR NAK packet. When the requester receives the
+ * RNR NAK packet, it will restart with qp->s_last_psn + 1,
+ * which does not match qp->r_psn and will be dropped.
+ * Consequently, the requester will exhaust its retries and
+ * put the qp into error state.
+ */
+ if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND)
+ break;
+
+ /* No requests left to process */
+ if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
+ /* If all data has been received, clear the flow */
+ if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
+ !qpriv->alloc_w_segs) {
+ hfi1_kern_clear_hw_flow(rcd, qp);
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ }
+ break;
+ }
+
+ e = &qp->s_ack_queue[qpriv->r_tid_alloc];
+ if (e->opcode != TID_OP(WRITE_REQ))
+ goto next_req;
+ req = ack_to_tid_req(e);
+ trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ /* Finished allocating for all segments of this request */
+ if (req->alloc_seg >= req->total_segs)
+ goto next_req;
+
+ /* Can allocate only a maximum of local->max_write for a QP */
+ if (qpriv->alloc_w_segs >= local->max_write)
+ break;
+
+ /* Don't allocate at a sync point with data packets pending */
+ if (qpriv->sync_pt && qpriv->alloc_w_segs)
+ break;
+
+ /* All data received at the sync point, continue */
+ if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
+ hfi1_kern_clear_hw_flow(rcd, qp);
+ qpriv->sync_pt = false;
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ }
+
+ /* Allocate flow if we don't have one */
+ if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
+ ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
+ if (ret) {
+ to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
+ position_in_queue(qpriv,
+ &rcd->flow_queue);
+ break;
+ }
+ }
+
+ npkts = rvt_div_round_up_mtu(qp, req->seg_len);
+
+ /*
+ * We are at a sync point if we run out of KDETH PSN space.
+ * Last PSN of every generation is reserved for RESYNC.
+ */
+ if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) {
+ qpriv->sync_pt = true;
+ break;
+ }
+
+ /*
+ * If overtaking req->acked_tail, send an RNR NAK. Because the
+ * QP is not queued in this case, and the issue can only be
+ * caused by a delay in scheduling the second leg which we
+ * cannot estimate, we use a rather arbitrary RNR timeout of
+ * (MAX_FLOWS / 2) segments
+ */
+ if (!CIRC_SPACE(req->setup_head, req->acked_tail,
+ MAX_FLOWS)) {
+ ret = -EAGAIN;
+ to_seg = MAX_FLOWS >> 1;
+ tid_rdma_trigger_ack(qp);
+ break;
+ }
+
+ /* Try to allocate rcv array / TID entries */
+ ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
+ if (ret == -EAGAIN)
+ to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
+ if (ret)
+ break;
+
+ qpriv->alloc_w_segs++;
+ req->alloc_seg++;
+ continue;
+next_req:
+ /* Begin processing the next request */
+ if (++qpriv->r_tid_alloc >
+ rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ qpriv->r_tid_alloc = 0;
+ }
+
+ /*
+ * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
+ * has failed (b) we are called from the rcv handler interrupt context
+ * (c) an RNR NAK has not already been scheduled
+ */
+ if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state)
+ goto send_rnr_nak;
+
+ return;
+
+send_rnr_nak:
+ lockdep_assert_held(&qp->r_lock);
+
+ /* Set r_nak_state to prevent unrelated events from generating NAK's */
+ qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK;
+
+ /* Pull back r_psn to the segment being RNR NAK'd */
+ qp->r_psn = e->psn + req->alloc_seg;
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Pull back r_head_ack_queue to the ack entry following the request
+ * being RNR NAK'd. This allows resources to be allocated to the request
+ * if the queued QP is scheduled.
+ */
+ qp->r_head_ack_queue = qpriv->r_tid_alloc + 1;
+ if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ qp->r_head_ack_queue = 0;
+ qpriv->r_tid_head = qp->r_head_ack_queue;
+ /*
+ * These send side fields are used in make_rc_ack(). They are set in
+ * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock
+ * for consistency
+ */
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+ /*
+ * Clear the ACK PENDING flag to prevent unwanted ACK because we
+ * have modified qp->s_ack_psn here.
+ */
+ qp->s_flags &= ~(RVT_S_ACK_PENDING);
+
+ trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn);
+ /*
+ * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK
+ * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be
+ * used for this because qp->s_lock is dropped before calling
+ * hfi1_send_rc_ack() leading to inconsistency between the receive
+ * interrupt handlers and the send thread in make_rc_ack()
+ */
+ qpriv->rnr_nak_state = TID_RNR_NAK_SEND;
+
+ /*
+ * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive
+ * interrupt handlers but will be sent from the send engine behind any
+ * previous responses that may have been scheduled
+ */
+ rc_defered_ack(rcd, qp);
+}
+
+void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/
+
+ /*
+ * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
+ * (see hfi1_rc_rcv())
+ * - Don't allow 0-length requests.
+ * 2. Put TID RDMA WRITE REQ into the response queue (s_ack_queue)
+ * - Setup struct tid_rdma_req with request info
+ * - Prepare struct tid_rdma_flow array?
+ * 3. Set the qp->s_ack_state as state diagram in design doc.
+ * 4. Set RVT_S_RESP_PENDING in s_flags.
+ * 5. Kick the send engine (hfi1_schedule_send())
+ */
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_ack_entry *e;
+ unsigned long flags;
+ struct ib_reth *reth;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req;
+ u32 bth0, psn, len, rkey, num_segs;
+ bool fecn;
+ u8 next;
+ u64 vaddr;
+ int diff;
+
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+ fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
+ rvt_comm_est(qp);
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
+
+ reth = &ohdr->u.tid_rdma.w_req.reth;
+ vaddr = be64_to_cpu(reth->vaddr);
+ len = be32_to_cpu(reth->length);
+
+ num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len);
+ diff = delta_psn(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
+ return;
+ }
+
+ /*
+ * The resent request which was previously RNR NAK'd is inserted at the
+ * location of the original request, which is one entry behind
+ * r_head_ack_queue
+ */
+ if (qpriv->rnr_nak_state)
+ qp->r_head_ack_queue = qp->r_head_ack_queue ?
+ qp->r_head_ack_queue - 1 :
+ rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
+
+ /* We've verified the request, insert it into the ack queue. */
+ next = qp->r_head_ack_queue + 1;
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_acked_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlock;
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ req = ack_to_tid_req(e);
+
+ /* Bring previously RNR NAK'd request back to life */
+ if (qpriv->rnr_nak_state) {
+ qp->r_nak_state = 0;
+ qp->s_nak_state = 0;
+ qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
+ qp->r_psn = e->lpsn + 1;
+ req->state = TID_REQUEST_INIT;
+ goto update_head;
+ }
+
+ release_rdma_sge_mr(e);
+
+ /* The length needs to be in multiples of PAGE_SIZE */
+ if (!len || len & ~PAGE_MASK)
+ goto nack_inv_unlock;
+
+ rkey = be32_to_cpu(reth->rkey);
+ qp->r_len = len;
+
+ if (e->opcode == TID_OP(WRITE_REQ) &&
+ (req->setup_head != req->clear_tail ||
+ req->clear_tail != req->acked_tail))
+ goto nack_inv_unlock;
+
+ if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE)))
+ goto nack_acc;
+
+ qp->r_psn += num_segs - 1;
+
+ e->opcode = (bth0 >> 24) & 0xff;
+ e->psn = psn;
+ e->lpsn = qp->r_psn;
+ e->sent = 0;
+
+ req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
+ req->state = TID_REQUEST_INIT;
+ req->cur_seg = 0;
+ req->comp_seg = 0;
+ req->ack_seg = 0;
+ req->alloc_seg = 0;
+ req->isge = 0;
+ req->seg_len = qpriv->tid_rdma.local.max_len;
+ req->total_len = len;
+ req->total_segs = num_segs;
+ req->r_flow_psn = e->psn;
+ req->ss.sge = e->rdma_sge;
+ req->ss.num_sge = 1;
+
+ req->flow_idx = req->setup_head;
+ req->clear_tail = req->setup_head;
+ req->acked_tail = req->setup_head;
+
+ qp->r_state = e->opcode;
+ qp->r_nak_state = 0;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn++;
+
+ trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+
+ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) {
+ qpriv->r_tid_tail = qp->r_head_ack_queue;
+ } else if (qpriv->r_tid_tail == qpriv->r_tid_head) {
+ struct tid_rdma_request *ptr;
+
+ e = &qp->s_ack_queue[qpriv->r_tid_tail];
+ ptr = ack_to_tid_req(e);
+
+ if (e->opcode != TID_OP(WRITE_REQ) ||
+ ptr->comp_seg == ptr->total_segs) {
+ if (qpriv->r_tid_tail == qpriv->r_tid_ack)
+ qpriv->r_tid_ack = qp->r_head_ack_queue;
+ qpriv->r_tid_tail = qp->r_head_ack_queue;
+ }
+ }
+update_head:
+ qp->r_head_ack_queue = next;
+ qpriv->r_tid_head = qp->r_head_ack_queue;
+
+ hfi1_tid_write_alloc_resources(qp, true);
+ trace_hfi1_tid_write_rsp_rcv_req(qp);
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+
+nack_inv_unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ rc_defered_ack(rcd, qp);
+ return;
+nack_acc:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+}
+
+u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 bth2, u32 *len,
+ struct rvt_sge_state **ss)
+{
+ struct hfi1_ack_priv *epriv = e->priv;
+ struct tid_rdma_request *req = &epriv->tid_req;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_flow *flow = NULL;
+ u32 resp_len = 0, hdwords = 0;
+ void *resp_addr = NULL;
+ struct tid_rdma_params *remote;
+
+ trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ trace_hfi1_tid_write_rsp_build_resp(qp);
+ trace_hfi1_rsp_build_tid_write_resp(qp, bth2);
+ flow = &req->flows[req->flow_idx];
+ switch (req->state) {
+ default:
+ /*
+ * Try to allocate resources here in case QP was queued and was
+ * later scheduled when resources became available
+ */
+ hfi1_tid_write_alloc_resources(qp, false);
+
+ /* We've already sent everything which is ready */
+ if (req->cur_seg >= req->alloc_seg)
+ goto done;
+
+ /*
+ * Resources can be assigned but responses cannot be sent in
+ * rnr_nak state, till the resent request is received
+ */
+ if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT)
+ goto done;
+
+ req->state = TID_REQUEST_ACTIVE;
+ trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
+ req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
+ hfi1_add_tid_reap_timer(qp);
+ break;
+
+ case TID_REQUEST_RESEND_ACTIVE:
+ case TID_REQUEST_RESEND:
+ trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
+ req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
+ if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
+ req->state = TID_REQUEST_ACTIVE;
+
+ hfi1_mod_tid_reap_timer(qp);
+ break;
+ }
+ flow->flow_state.resp_ib_psn = bth2;
+ resp_addr = (void *)flow->tid_entry;
+ resp_len = sizeof(*flow->tid_entry) * flow->tidcnt;
+ req->cur_seg++;
+
+ memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp));
+ epriv->ss.sge.vaddr = resp_addr;
+ epriv->ss.sge.sge_length = resp_len;
+ epriv->ss.sge.length = epriv->ss.sge.sge_length;
+ /*
+ * We can safely zero these out. Since the first SGE covers the
+ * entire packet, nothing else should even look at the MR.
+ */
+ epriv->ss.sge.mr = NULL;
+ epriv->ss.sge.m = 0;
+ epriv->ss.sge.n = 0;
+
+ epriv->ss.sg_list = NULL;
+ epriv->ss.total_len = epriv->ss.sge.sge_length;
+ epriv->ss.num_sge = 1;
+
+ *ss = &epriv->ss;
+ *len = epriv->ss.total_len;
+
+ /* Construct the TID RDMA WRITE RESP packet header */
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+
+ KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1);
+ KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp);
+ ohdr->u.tid_rdma.w_rsp.tid_flow_psn =
+ cpu_to_be32((flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT) |
+ (flow->flow_state.spsn &
+ HFI1_KDETH_BTH_SEQ_MASK));
+ ohdr->u.tid_rdma.w_rsp.tid_flow_qp =
+ cpu_to_be32(qpriv->tid_rdma.local.qp |
+ ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
+ TID_RDMA_DESTQP_FLOW_SHIFT) |
+ qpriv->rcd->ctxt);
+ ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 = remote->qp;
+ rcu_read_unlock();
+ hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32);
+ qpriv->pending_tid_w_segs++;
+done:
+ return hdwords;
+}
+
+static void hfi1_add_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) {
+ qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
+ qpriv->s_tid_timer.expires = jiffies +
+ qpriv->tid_timer_timeout_jiffies;
+ add_timer(&qpriv->s_tid_timer);
+ }
+}
+
+static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
+ mod_timer(&qpriv->s_tid_timer, jiffies +
+ qpriv->tid_timer_timeout_jiffies);
+}
+
+static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ int rval = 0;
+
+ lockdep_assert_held(&qp->s_lock);
+ if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
+ rval = timer_delete(&qpriv->s_tid_timer);
+ qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
+ }
+ return rval;
+}
+
+void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ timer_delete_sync(&qpriv->s_tid_timer);
+ qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
+}
+
+static void hfi1_tid_timeout(struct timer_list *t)
+{
+ struct hfi1_qp_priv *qpriv = timer_container_of(qpriv, t, s_tid_timer);
+ struct rvt_qp *qp = qpriv->owner;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ unsigned long flags;
+ u32 i;
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
+ if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
+ dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n",
+ qp->ibqp.qp_num, __func__, __LINE__);
+ trace_hfi1_msg_tid_timeout(/* msg */
+ qp, "resource timeout = ",
+ (u64)qpriv->tid_timer_timeout_jiffies);
+ hfi1_stop_tid_reap_timer(qp);
+ /*
+ * Go though the entire ack queue and clear any outstanding
+ * HW flow and RcvArray resources.
+ */
+ hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
+ for (i = 0; i < rvt_max_atomic(rdi); i++) {
+ struct tid_rdma_request *req =
+ ack_to_tid_req(&qp->s_ack_queue[i]);
+
+ hfi1_kern_exp_rcv_clear_all(req);
+ }
+ spin_unlock(&qp->s_lock);
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_FATAL;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR);
+ goto unlock_r_lock;
+ }
+ spin_unlock(&qp->s_lock);
+unlock_r_lock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+}
+
+void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requester side) */
+
+ /*
+ * 1. Find matching SWQE
+ * 2. Check that TIDENTRY array has enough space for a complete
+ * segment. If not, put QP in error state.
+ * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow
+ * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags.
+ * 5. Set qp->s_state
+ * 6. Kick the send engine (hfi1_schedule_send())
+ */
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ enum ib_wc_status status;
+ u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen;
+ bool fecn;
+ unsigned long flags;
+
+ fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth);
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Ignore invalid responses */
+ if (cmp_psn(psn, qp->s_next_psn) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0))
+ goto ack_done;
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_done;
+
+ /*
+ * If we are waiting for a particular packet sequence number
+ * due to a request being resent, check for it. Otherwise,
+ * ensure that we haven't missed anything.
+ */
+ if (qp->r_flags & RVT_R_RDMAR_SEQ) {
+ if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
+ goto ack_done;
+ qp->r_flags &= ~RVT_R_RDMAR_SEQ;
+ }
+
+ wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
+ if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE))
+ goto ack_op_err;
+
+ req = wqe_to_tid_req(wqe);
+ /*
+ * If we've lost ACKs and our acked_tail pointer is too far
+ * behind, don't overwrite segments. Just drop the packet and
+ * let the reliability protocol take care of it.
+ */
+ if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
+ goto ack_done;
+
+ /*
+ * The call to do_rc_ack() should be last in the chain of
+ * packet checks because it will end up updating the QP state.
+ * Therefore, anything that would prevent the packet from
+ * being accepted as a successful response should be prior
+ * to it.
+ */
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+ goto ack_done;
+
+ trace_hfi1_ack(qp, psn);
+
+ flow = &req->flows[req->setup_head];
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->tid_offset = 0;
+ flow->sent = 0;
+ flow->resync_npkts = 0;
+ flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp);
+ flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
+ TID_RDMA_DESTQP_FLOW_MASK;
+ flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn));
+ flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
+ flow->flow_state.resp_ib_psn = psn;
+ flow->length = min_t(u32, req->seg_len,
+ (wqe->length - (req->comp_seg * req->seg_len)));
+
+ flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
+ flow->flow_state.lpsn = flow->flow_state.spsn +
+ flow->npkts - 1;
+ /* payload length = packet length - (header length + ICRC length) */
+ pktlen = packet->tlen - (packet->hlen + 4);
+ if (pktlen > sizeof(flow->tid_entry)) {
+ status = IB_WC_LOC_LEN_ERR;
+ goto ack_err;
+ }
+ memcpy(flow->tid_entry, packet->ebuf, pktlen);
+ flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
+ trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
+
+ req->comp_seg++;
+ trace_hfi1_tid_write_sender_rcv_resp(qp, 0);
+ /*
+ * Walk the TID_ENTRY list to make sure we have enough space for a
+ * complete segment.
+ */
+ for (i = 0; i < flow->tidcnt; i++) {
+ trace_hfi1_tid_entry_rcv_write_resp(/* entry */
+ qp, i, flow->tid_entry[i]);
+ if (!EXP_TID_GET(flow->tid_entry[i], LEN)) {
+ status = IB_WC_LOC_LEN_ERR;
+ goto ack_err;
+ }
+ tidlen += EXP_TID_GET(flow->tid_entry[i], LEN);
+ }
+ if (tidlen * PAGE_SIZE < flow->length) {
+ status = IB_WC_LOC_LEN_ERR;
+ goto ack_err;
+ }
+
+ trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ /*
+ * If this is the first response for this request, set the initial
+ * flow index to the current flow.
+ */
+ if (!cmp_psn(psn, wqe->psn)) {
+ req->r_last_acked = mask_psn(wqe->psn - 1);
+ /* Set acked flow index to head index */
+ req->acked_tail = req->setup_head;
+ }
+
+ /* advance circular buffer head */
+ req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
+ req->state = TID_REQUEST_ACTIVE;
+
+ /*
+ * If all responses for this TID RDMA WRITE request have been received
+ * advance the pointer to the next one.
+ * Since TID RDMA requests could be mixed in with regular IB requests,
+ * they might not appear sequentially in the queue. Therefore, the
+ * next request needs to be "found".
+ */
+ if (qpriv->s_tid_cur != qpriv->s_tid_head &&
+ req->comp_seg == req->total_segs) {
+ for (i = qpriv->s_tid_cur + 1; ; i++) {
+ if (i == qp->s_size)
+ i = 0;
+ wqe = rvt_get_swqe_ptr(qp, i);
+ if (i == qpriv->s_tid_head)
+ break;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ break;
+ }
+ qpriv->s_tid_cur = i;
+ }
+ qp->s_flags &= ~HFI1_S_WAIT_TID_RESP;
+ hfi1_schedule_tid_send(qp);
+ goto ack_done;
+
+ack_op_err:
+ status = IB_WC_LOC_QP_OP_ERR;
+ack_err:
+ rvt_error_qp(qp, status);
+ack_done:
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len)
+{
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
+ struct tid_rdma_params *remote;
+ struct rvt_qp *qp = req->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ u32 tidentry = flow->tid_entry[flow->tid_idx];
+ u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
+ struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data;
+ u32 next_offset, om = KDETH_OM_LARGE;
+ bool last_pkt;
+
+ if (!tidlen) {
+ hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR);
+ rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR);
+ }
+
+ *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
+ flow->sent += *len;
+ next_offset = flow->tid_offset + *len;
+ last_pkt = (flow->tid_idx == (flow->tidcnt - 1) &&
+ next_offset >= tidlen) || (flow->sent >= flow->length);
+ trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
+ trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ KDETH_RESET(wd->kdeth0, KVER, 0x1);
+ KDETH_SET(wd->kdeth0, SH, !last_pkt);
+ KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg));
+ KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
+ KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
+ KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE);
+ KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om);
+ KDETH_RESET(wd->kdeth1, JKEY, remote->jkey);
+ wd->verbs_qp = cpu_to_be32(qp->remote_qpn);
+ rcu_read_unlock();
+
+ *bth1 = flow->tid_qpn;
+ *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
+ HFI1_KDETH_BTH_SEQ_MASK) |
+ (flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT));
+ if (last_pkt) {
+ /* PSNs are zero-based, so +1 to count number of packets */
+ if (flow->flow_state.lpsn + 1 +
+ rvt_div_round_up_mtu(qp, req->seg_len) >
+ MAX_TID_FLOW_PSN)
+ req->state = TID_REQUEST_SYNC;
+ *bth2 |= IB_BTH_REQ_ACK;
+ }
+
+ if (next_offset >= tidlen) {
+ flow->tid_offset = 0;
+ flow->tid_idx++;
+ } else {
+ flow->tid_offset = next_offset;
+ }
+ return last_pkt;
+}
+
+void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
+{
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ctxtdata *rcd = priv->rcd;
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ unsigned long flags;
+ u32 psn, next;
+ u8 opcode;
+ bool fecn;
+
+ fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ /*
+ * All error handling should be done by now. If we are here, the packet
+ * is either good or been accepted by the error handler.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ e = &qp->s_ack_queue[priv->r_tid_tail];
+ req = ack_to_tid_req(e);
+ flow = &req->flows[req->clear_tail];
+ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
+ update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
+
+ if (cmp_psn(psn, flow->flow_state.r_next_psn))
+ goto send_nak;
+
+ flow->flow_state.r_next_psn = mask_psn(psn + 1);
+ /*
+ * Copy the payload to destination buffer if this packet is
+ * delivered as an eager packet due to RSM rule and FECN.
+ * The RSM rule selects FECN bit in BTH and SH bit in
+ * KDETH header and therefore will not match the last
+ * packet of each segment that has SH bit cleared.
+ */
+ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
+ struct rvt_sge_state ss;
+ u32 len;
+ u32 tlen = packet->tlen;
+ u16 hdrsize = packet->hlen;
+ u8 pad = packet->pad;
+ u8 extra_bytes = pad + packet->extra_byte +
+ (SIZE_OF_CRC << 2);
+ u32 pmtu = qp->pmtu;
+
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
+ goto send_nak;
+ len = req->comp_seg * req->seg_len;
+ len += delta_psn(psn,
+ full_flow_psn(flow, flow->flow_state.spsn)) *
+ pmtu;
+ if (unlikely(req->total_len - len < pmtu))
+ goto send_nak;
+
+ /*
+ * The e->rdma_sge field is set when TID RDMA WRITE REQ
+ * is first received and is never modified thereafter.
+ */
+ ss.sge = e->rdma_sge;
+ ss.sg_list = NULL;
+ ss.num_sge = 1;
+ ss.total_len = req->total_len;
+ rvt_skip_sge(&ss, len, false);
+ rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
+ false);
+ /* Raise the sw sequence check flag for next packet */
+ priv->r_next_psn_kdeth = mask_psn(psn + 1);
+ priv->s_flags |= HFI1_R_TID_SW_PSN;
+ }
+ goto exit;
+ }
+ flow->flow_state.r_next_psn = mask_psn(psn + 1);
+ hfi1_kern_exp_rcv_clear(req);
+ priv->alloc_w_segs--;
+ rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
+ req->comp_seg++;
+ priv->s_nak_state = 0;
+
+ /*
+ * Release the flow if one of the following conditions has been met:
+ * - The request has reached a sync point AND all outstanding
+ * segments have been completed, or
+ * - The entire request is complete and there are no more requests
+ * (of any kind) in the queue.
+ */
+ trace_hfi1_rsp_rcv_tid_write_data(qp, psn);
+ trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ trace_hfi1_tid_write_rsp_rcv_data(qp);
+ validate_r_tid_ack(priv);
+
+ if (opcode == TID_OP(WRITE_DATA_LAST)) {
+ release_rdma_sge_mr(e);
+ for (next = priv->r_tid_tail + 1; ; next++) {
+ if (next > rvt_size_atomic(&dev->rdi))
+ next = 0;
+ if (next == priv->r_tid_head)
+ break;
+ e = &qp->s_ack_queue[next];
+ if (e->opcode == TID_OP(WRITE_REQ))
+ break;
+ }
+ priv->r_tid_tail = next;
+ if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi))
+ qp->s_acked_ack_queue = 0;
+ }
+
+ hfi1_tid_write_alloc_resources(qp, true);
+
+ /*
+ * If we need to generate more responses, schedule the
+ * send engine.
+ */
+ if (req->cur_seg < req->total_segs ||
+ qp->s_tail_ack_queue != qp->r_head_ack_queue) {
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+ }
+
+ priv->pending_tid_w_segs--;
+ if (priv->s_flags & HFI1_R_TID_RSC_TIMER) {
+ if (priv->pending_tid_w_segs)
+ hfi1_mod_tid_reap_timer(req->qp);
+ else
+ hfi1_stop_tid_reap_timer(req->qp);
+ }
+
+done:
+ tid_rdma_schedule_ack(qp);
+exit:
+ priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+
+send_nak:
+ if (!priv->s_nak_state) {
+ priv->s_nak_state = IB_NAK_PSN_ERROR;
+ priv->s_nak_psn = flow->flow_state.r_next_psn;
+ tid_rdma_trigger_ack(qp);
+ }
+ goto done;
+}
+
+static bool hfi1_tid_rdma_is_resync_psn(u32 psn)
+{
+ return (bool)((psn & HFI1_KDETH_BTH_SEQ_MASK) ==
+ HFI1_KDETH_BTH_SEQ_MASK);
+}
+
+u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u16 iflow,
+ u32 *bth1, u32 *bth2)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ struct tid_rdma_request *req = ack_to_tid_req(e);
+ struct tid_rdma_flow *flow = &req->flows[iflow];
+ struct tid_rdma_params *remote;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 = remote->qp;
+ rcu_read_unlock();
+
+ if (qpriv->resync) {
+ *bth2 = mask_psn((fs->generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
+ ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
+ } else if (qpriv->s_nak_state) {
+ *bth2 = mask_psn(qpriv->s_nak_psn);
+ ohdr->u.tid_rdma.ack.aeth =
+ cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
+ (qpriv->s_nak_state <<
+ IB_AETH_CREDIT_SHIFT));
+ } else {
+ *bth2 = full_flow_psn(flow, flow->flow_state.lpsn);
+ ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
+ }
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
+ ohdr->u.tid_rdma.ack.tid_flow_qp =
+ cpu_to_be32(qpriv->tid_rdma.local.qp |
+ ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
+ TID_RDMA_DESTQP_FLOW_SHIFT) |
+ qpriv->rcd->ctxt);
+
+ ohdr->u.tid_rdma.ack.tid_flow_psn = 0;
+ ohdr->u.tid_rdma.ack.verbs_psn =
+ cpu_to_be32(flow->flow_state.resp_ib_psn);
+
+ if (qpriv->resync) {
+ /*
+ * If the PSN before the current expect KDETH PSN is the
+ * RESYNC PSN, then we never received a good TID RDMA WRITE
+ * DATA packet after a previous RESYNC.
+ * In this case, the next expected KDETH PSN stays the same.
+ */
+ if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) {
+ ohdr->u.tid_rdma.ack.tid_flow_psn =
+ cpu_to_be32(qpriv->r_next_psn_kdeth_save);
+ } else {
+ /*
+ * Because the KDETH PSNs jump during a RESYNC, it's
+ * not possible to infer (or compute) the previous value
+ * of r_next_psn_kdeth in the case of back-to-back
+ * RESYNC packets. Therefore, we save it.
+ */
+ qpriv->r_next_psn_kdeth_save =
+ qpriv->r_next_psn_kdeth - 1;
+ ohdr->u.tid_rdma.ack.tid_flow_psn =
+ cpu_to_be32(qpriv->r_next_psn_kdeth_save);
+ qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1);
+ }
+ qpriv->resync = false;
+ }
+
+ return sizeof(ohdr->u.tid_rdma.ack) / sizeof(u32);
+}
+
+void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
+{
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
+ unsigned long flags;
+ u16 fidx;
+
+ trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0);
+ process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth);
+ req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn));
+ resync_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.tid_flow_psn));
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn);
+
+ /* If we are waiting for an ACK to RESYNC, drop any other packets */
+ if ((qp->s_flags & HFI1_S_WAIT_HALT) &&
+ cmp_psn(psn, qpriv->s_resync_psn))
+ goto ack_op_err;
+
+ ack_psn = req_psn;
+ if (hfi1_tid_rdma_is_resync_psn(psn))
+ ack_kpsn = resync_psn;
+ else
+ ack_kpsn = psn;
+ if (aeth >> 29) {
+ ack_psn--;
+ ack_kpsn--;
+ }
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_op_err;
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ goto ack_op_err;
+
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ flow = &req->flows[req->acked_tail];
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
+
+ /* Drop stale ACK/NAK */
+ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
+ cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
+ goto ack_op_err;
+
+ while (cmp_psn(ack_kpsn,
+ full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 &&
+ req->ack_seg < req->cur_seg) {
+ req->ack_seg++;
+ /* advance acked segment pointer */
+ req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
+ req->r_last_acked = flow->flow_state.resp_ib_psn;
+ trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ if (req->ack_seg == req->total_segs) {
+ req->state = TID_REQUEST_COMPLETE;
+ wqe = do_rc_completion(qp, wqe,
+ to_iport(qp->ibqp.device,
+ qp->port_num));
+ trace_hfi1_sender_rcv_tid_ack(qp);
+ atomic_dec(&qpriv->n_tid_requests);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ break;
+ req = wqe_to_tid_req(wqe);
+ }
+ flow = &req->flows[req->acked_tail];
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
+ }
+
+ trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ switch (aeth >> 29) {
+ case 0: /* ACK */
+ if (qpriv->s_flags & RVT_S_WAIT_ACK)
+ qpriv->s_flags &= ~RVT_S_WAIT_ACK;
+ if (!hfi1_tid_rdma_is_resync_psn(psn)) {
+ /* Check if there is any pending TID ACK */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ req->ack_seg < req->cur_seg)
+ hfi1_mod_tid_retry_timer(qp);
+ else
+ hfi1_stop_tid_retry_timer(qp);
+ hfi1_schedule_send(qp);
+ } else {
+ u32 spsn, fpsn, last_acked, generation;
+ struct tid_rdma_request *rptr;
+
+ /* ACK(RESYNC) */
+ hfi1_stop_tid_retry_timer(qp);
+ /* Allow new requests (see hfi1_make_tid_rdma_pkt) */
+ qp->s_flags &= ~HFI1_S_WAIT_HALT;
+ /*
+ * Clear RVT_S_SEND_ONE flag in case that the TID RDMA
+ * ACK is received after the TID retry timer is fired
+ * again. In this case, do not send any more TID
+ * RESYNC request or wait for any more TID ACK packet.
+ */
+ qpriv->s_flags &= ~RVT_S_SEND_ONE;
+ hfi1_schedule_send(qp);
+
+ if ((qp->s_acked == qpriv->s_tid_tail &&
+ req->ack_seg == req->total_segs) ||
+ qp->s_acked == qp->s_tail) {
+ qpriv->s_state = TID_OP(WRITE_DATA_LAST);
+ goto done;
+ }
+
+ if (req->ack_seg == req->comp_seg) {
+ qpriv->s_state = TID_OP(WRITE_DATA);
+ goto done;
+ }
+
+ /*
+ * The PSN to start with is the next PSN after the
+ * RESYNC PSN.
+ */
+ psn = mask_psn(psn + 1);
+ generation = psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ spsn = 0;
+
+ /*
+ * Update to the correct WQE when we get an ACK(RESYNC)
+ * in the middle of a request.
+ */
+ if (delta_psn(ack_psn, wqe->lpsn))
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ req = wqe_to_tid_req(wqe);
+ flow = &req->flows[req->acked_tail];
+ /*
+ * RESYNC re-numbers the PSN ranges of all remaining
+ * segments. Also, PSN's start from 0 in the middle of a
+ * segment and the first segment size is less than the
+ * default number of packets. flow->resync_npkts is used
+ * to track the number of packets from the start of the
+ * real segment to the point of 0 PSN after the RESYNC
+ * in order to later correctly rewind the SGE.
+ */
+ fpsn = full_flow_psn(flow, flow->flow_state.spsn);
+ req->r_ack_psn = psn;
+ /*
+ * If resync_psn points to the last flow PSN for a
+ * segment and the new segment (likely from a new
+ * request) starts with a new generation number, we
+ * need to adjust resync_psn accordingly.
+ */
+ if (flow->flow_state.generation !=
+ (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
+ resync_psn = mask_psn(fpsn - 1);
+ flow->resync_npkts +=
+ delta_psn(mask_psn(resync_psn + 1), fpsn);
+ /*
+ * Renumber all packet sequence number ranges
+ * based on the new generation.
+ */
+ last_acked = qp->s_acked;
+ rptr = req;
+ while (1) {
+ /* start from last acked segment */
+ for (fidx = rptr->acked_tail;
+ CIRC_CNT(rptr->setup_head, fidx,
+ MAX_FLOWS);
+ fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
+ u32 lpsn;
+ u32 gen;
+
+ flow = &rptr->flows[fidx];
+ gen = flow->flow_state.generation;
+ if (WARN_ON(gen == generation &&
+ flow->flow_state.spsn !=
+ spsn))
+ continue;
+ lpsn = flow->flow_state.lpsn;
+ lpsn = full_flow_psn(flow, lpsn);
+ flow->npkts =
+ delta_psn(lpsn,
+ mask_psn(resync_psn)
+ );
+ flow->flow_state.generation =
+ generation;
+ flow->flow_state.spsn = spsn;
+ flow->flow_state.lpsn =
+ flow->flow_state.spsn +
+ flow->npkts - 1;
+ flow->pkt = 0;
+ spsn += flow->npkts;
+ resync_psn += flow->npkts;
+ trace_hfi1_tid_flow_rcv_tid_ack(qp,
+ fidx,
+ flow);
+ }
+ if (++last_acked == qpriv->s_tid_cur + 1)
+ break;
+ if (last_acked == qp->s_size)
+ last_acked = 0;
+ wqe = rvt_get_swqe_ptr(qp, last_acked);
+ rptr = wqe_to_tid_req(wqe);
+ }
+ req->cur_seg = req->ack_seg;
+ qpriv->s_tid_tail = qp->s_acked;
+ qpriv->s_state = TID_OP(WRITE_REQ);
+ hfi1_schedule_tid_send(qp);
+ }
+done:
+ qpriv->s_retry = qp->s_retry_cnt;
+ break;
+
+ case 3: /* NAK */
+ hfi1_stop_tid_retry_timer(qp);
+ switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
+ IB_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ if (!req->flows)
+ break;
+ flow = &req->flows[req->acked_tail];
+ flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
+ if (cmp_psn(psn, flpsn) > 0)
+ break;
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
+ flow);
+ req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ req->cur_seg = req->ack_seg;
+ qpriv->s_tid_tail = qp->s_acked;
+ qpriv->s_state = TID_OP(WRITE_REQ);
+ qpriv->s_retry = qp->s_retry_cnt;
+ hfi1_schedule_tid_send(qp);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ack_op_err:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ lockdep_assert_held(&qp->s_lock);
+ if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) {
+ priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
+ priv->s_tid_retry_timer.expires = jiffies +
+ priv->tid_retry_timeout_jiffies + rdi->busy_jiffies;
+ add_timer(&priv->s_tid_retry_timer);
+ }
+}
+
+static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ lockdep_assert_held(&qp->s_lock);
+ priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
+ mod_timer(&priv->s_tid_retry_timer, jiffies +
+ priv->tid_retry_timeout_jiffies + rdi->busy_jiffies);
+}
+
+static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ int rval = 0;
+
+ lockdep_assert_held(&qp->s_lock);
+ if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
+ rval = timer_delete(&priv->s_tid_retry_timer);
+ priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
+ }
+ return rval;
+}
+
+void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ timer_delete_sync(&priv->s_tid_retry_timer);
+ priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
+}
+
+static void hfi1_tid_retry_timeout(struct timer_list *t)
+{
+ struct hfi1_qp_priv *priv = timer_container_of(priv, t,
+ s_tid_retry_timer);
+ struct rvt_qp *qp = priv->owner;
+ struct rvt_swqe *wqe;
+ unsigned long flags;
+ struct tid_rdma_request *req;
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
+ trace_hfi1_tid_write_sender_retry_timeout(qp, 0);
+ if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
+ hfi1_stop_tid_retry_timer(qp);
+ if (!priv->s_retry) {
+ trace_hfi1_msg_tid_retry_timeout(/* msg */
+ qp,
+ "Exhausted retries. Tid retry timeout = ",
+ (u64)priv->tid_retry_timeout_jiffies);
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ } else {
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_tid_retry_timeout(/* req */
+ qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
+
+ priv->s_flags &= ~RVT_S_WAIT_ACK;
+ /* Only send one packet (the RESYNC) */
+ priv->s_flags |= RVT_S_SEND_ONE;
+ /*
+ * No additional request shall be made by this QP until
+ * the RESYNC has been complete.
+ */
+ qp->s_flags |= HFI1_S_WAIT_HALT;
+ priv->s_state = TID_OP(RESYNC);
+ priv->s_retry--;
+ hfi1_schedule_tid_send(qp);
+ }
+ }
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+}
+
+u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u16 fidx)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_params *remote;
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = &req->flows[fidx];
+ u32 generation;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 = remote->qp;
+ rcu_read_unlock();
+
+ generation = kern_flow_generation_next(flow->flow_state.generation);
+ *bth2 = mask_psn((generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
+ qpriv->s_resync_psn = *bth2;
+ *bth2 |= IB_BTH_REQ_ACK;
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
+
+ return sizeof(ohdr->u.tid_rdma.resync) / sizeof(u32);
+}
+
+void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
+{
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ctxtdata *rcd = qpriv->rcd;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ u32 psn, generation, idx, gen_next;
+ bool fecn;
+ unsigned long flags;
+
+ fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+
+ generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ gen_next = (fs->generation == KERN_GENERATION_RESERVED) ?
+ generation : kern_flow_generation_next(fs->generation);
+ /*
+ * RESYNC packet contains the "next" generation and can only be
+ * from the current or previous generations
+ */
+ if (generation != mask_generation(gen_next - 1) &&
+ generation != gen_next)
+ goto bail;
+ /* Already processing a resync */
+ if (qpriv->resync)
+ goto bail;
+
+ spin_lock(&rcd->exp_lock);
+ if (fs->index >= RXE_NUM_TID_FLOWS) {
+ /*
+ * If we don't have a flow, save the generation so it can be
+ * applied when a new flow is allocated
+ */
+ fs->generation = generation;
+ } else {
+ /* Reprogram the QP flow with new generation */
+ rcd->flows[fs->index].generation = generation;
+ fs->generation = kern_setup_hw_flow(rcd, fs->index);
+ }
+ fs->psn = 0;
+ /*
+ * Disable SW PSN checking since a RESYNC is equivalent to a
+ * sync point and the flow has/will be reprogrammed
+ */
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ trace_hfi1_tid_write_rsp_rcv_resync(qp);
+
+ /*
+ * Reset all TID flow information with the new generation.
+ * This is done for all requests and segments after the
+ * last received segment
+ */
+ for (idx = qpriv->r_tid_tail; ; idx++) {
+ u16 flow_idx;
+
+ if (idx > rvt_size_atomic(&dev->rdi))
+ idx = 0;
+ e = &qp->s_ack_queue[idx];
+ if (e->opcode == TID_OP(WRITE_REQ)) {
+ req = ack_to_tid_req(e);
+ trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+
+ /* start from last unacked segment */
+ for (flow_idx = req->clear_tail;
+ CIRC_CNT(req->setup_head, flow_idx,
+ MAX_FLOWS);
+ flow_idx = CIRC_NEXT(flow_idx, MAX_FLOWS)) {
+ u32 lpsn;
+ u32 next;
+
+ flow = &req->flows[flow_idx];
+ lpsn = full_flow_psn(flow,
+ flow->flow_state.lpsn);
+ next = flow->flow_state.r_next_psn;
+ flow->npkts = delta_psn(lpsn, next - 1);
+ flow->flow_state.generation = fs->generation;
+ flow->flow_state.spsn = fs->psn;
+ flow->flow_state.lpsn =
+ flow->flow_state.spsn + flow->npkts - 1;
+ flow->flow_state.r_next_psn =
+ full_flow_psn(flow,
+ flow->flow_state.spsn);
+ fs->psn += flow->npkts;
+ trace_hfi1_tid_flow_rcv_resync(qp, flow_idx,
+ flow);
+ }
+ }
+ if (idx == qp->s_tail_ack_queue)
+ break;
+ }
+
+ spin_unlock(&rcd->exp_lock);
+ qpriv->resync = true;
+ /* RESYNC request always gets a TID RDMA ACK. */
+ qpriv->s_nak_state = 0;
+ tid_rdma_trigger_ack(qp);
+bail:
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * Call this function when the last TID RDMA WRITE DATA packet for a request
+ * is built.
+ */
+static void update_tid_tail(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ u32 i;
+ struct rvt_swqe *wqe;
+
+ lockdep_assert_held(&qp->s_lock);
+ /* Can't move beyond s_tid_cur */
+ if (priv->s_tid_tail == priv->s_tid_cur)
+ return;
+ for (i = priv->s_tid_tail + 1; ; i++) {
+ if (i == qp->s_size)
+ i = 0;
+
+ if (i == priv->s_tid_cur)
+ break;
+ wqe = rvt_get_swqe_ptr(qp, i);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ break;
+ }
+ priv->s_tid_tail = i;
+ priv->s_state = TID_OP(WRITE_RESP);
+}
+
+int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct rvt_swqe *wqe;
+ u32 bth1 = 0, bth2 = 0, hwords = 5, len, middle = 0;
+ struct ib_other_headers *ohdr;
+ struct rvt_sge_state *ss = &qp->s_sge;
+ struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ struct tid_rdma_request *req = ack_to_tid_req(e);
+ bool last = false;
+ u8 opcode = TID_OP(WRITE_DATA);
+
+ lockdep_assert_held(&qp->s_lock);
+ trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
+ /*
+ * Prioritize the sending of the requests and responses over the
+ * sending of the TID RDMA data packets.
+ */
+ if (((atomic_read(&priv->n_tid_requests) < HFI1_TID_RDMA_WRITE_CNT) &&
+ atomic_read(&priv->n_requests) &&
+ !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK |
+ HFI1_S_ANY_WAIT_IO))) ||
+ (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
+ !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) {
+ struct iowait_work *iowork;
+
+ iowork = iowait_get_ib_work(&priv->s_iowait);
+ ps->s_txreq = get_waiting_verbs_txreq(iowork);
+ if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) {
+ priv->s_flags |= HFI1_S_TID_BUSY_SET;
+ return 1;
+ }
+ }
+
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (!ps->s_txreq)
+ goto bail_no_tx;
+
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+
+ if ((priv->s_flags & RVT_S_ACK_PENDING) &&
+ make_tid_rdma_ack(qp, ohdr, ps))
+ return 1;
+
+ /*
+ * Bail out if we can't send data.
+ * Be reminded that this check must been done after the call to
+ * make_tid_rdma_ack() because the responding QP could be in
+ * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+ */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+ goto bail;
+
+ if (priv->s_flags & RVT_S_WAIT_ACK)
+ goto bail;
+
+ /* Check whether there is anything to do. */
+ if (priv->s_tid_tail == HFI1_QP_WQE_INVALID)
+ goto bail;
+ wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ switch (priv->s_state) {
+ case TID_OP(WRITE_REQ):
+ case TID_OP(WRITE_RESP):
+ priv->tid_ss.sge = wqe->sg_list[0];
+ priv->tid_ss.sg_list = wqe->sg_list + 1;
+ priv->tid_ss.num_sge = wqe->wr.num_sge;
+ priv->tid_ss.total_len = wqe->length;
+
+ if (priv->s_state == TID_OP(WRITE_REQ))
+ hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
+ priv->s_state = TID_OP(WRITE_DATA);
+ fallthrough;
+
+ case TID_OP(WRITE_DATA):
+ /*
+ * 1. Check whether TID RDMA WRITE RESP available.
+ * 2. If no:
+ * 2.1 If have more segments and no TID RDMA WRITE RESP,
+ * set HFI1_S_WAIT_TID_RESP
+ * 2.2 Return indicating no progress made.
+ * 3. If yes:
+ * 3.1 Build TID RDMA WRITE DATA packet.
+ * 3.2 If last packet in segment:
+ * 3.2.1 Change KDETH header bits
+ * 3.2.2 Advance RESP pointers.
+ * 3.3 Return indicating progress made.
+ */
+ trace_hfi1_sender_make_tid_pkt(qp);
+ trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
+ wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
+ req = wqe_to_tid_req(wqe);
+ len = wqe->length;
+
+ if (!req->comp_seg || req->cur_seg == req->comp_seg)
+ goto bail;
+
+ trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ last = hfi1_build_tid_rdma_packet(wqe, ohdr, &bth1, &bth2,
+ &len);
+
+ if (last) {
+ /* move pointer to next flow */
+ req->clear_tail = CIRC_NEXT(req->clear_tail,
+ MAX_FLOWS);
+ if (++req->cur_seg < req->total_segs) {
+ if (!CIRC_CNT(req->setup_head, req->clear_tail,
+ MAX_FLOWS))
+ qp->s_flags |= HFI1_S_WAIT_TID_RESP;
+ } else {
+ priv->s_state = TID_OP(WRITE_DATA_LAST);
+ opcode = TID_OP(WRITE_DATA_LAST);
+
+ /* Advance the s_tid_tail now */
+ update_tid_tail(qp);
+ }
+ }
+ hwords += sizeof(ohdr->u.tid_rdma.w_data) / sizeof(u32);
+ ss = &priv->tid_ss;
+ break;
+
+ case TID_OP(RESYNC):
+ trace_hfi1_sender_make_tid_pkt(qp);
+ /* Use generation from the most recently received response */
+ wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
+ req = wqe_to_tid_req(wqe);
+ /* If no responses for this WQE look at the previous one */
+ if (!req->comp_seg) {
+ wqe = rvt_get_swqe_ptr(qp,
+ (!priv->s_tid_cur ? qp->s_size :
+ priv->s_tid_cur) - 1);
+ req = wqe_to_tid_req(wqe);
+ }
+ hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1,
+ &bth2,
+ CIRC_PREV(req->setup_head,
+ MAX_FLOWS));
+ ss = NULL;
+ len = 0;
+ opcode = TID_OP(RESYNC);
+ break;
+
+ default:
+ goto bail;
+ }
+ if (priv->s_flags & RVT_S_SEND_ONE) {
+ priv->s_flags &= ~RVT_S_SEND_ONE;
+ priv->s_flags |= RVT_S_WAIT_ACK;
+ bth2 |= IB_BTH_REQ_ACK;
+ }
+ qp->s_len -= len;
+ ps->s_txreq->hdr_dwords = hwords;
+ ps->s_txreq->sde = priv->s_sde;
+ ps->s_txreq->ss = ss;
+ ps->s_txreq->s_cur_size = len;
+ hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
+ middle, ps);
+ return 1;
+bail:
+ hfi1_put_txreq(ps->s_txreq);
+bail_no_tx:
+ ps->s_txreq = NULL;
+ priv->s_flags &= ~RVT_S_BUSY;
+ /*
+ * If we didn't get a txreq, the QP will be woken up later to try
+ * again, set the flags to the wake up which work item to wake
+ * up.
+ * (A better algorithm should be found to do this and generalize the
+ * sleep/wakeup flags.)
+ */
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ return 0;
+}
+
+static int make_tid_rdma_ack(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ struct hfi1_pkt_state *ps)
+{
+ struct rvt_ack_entry *e;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ u32 hwords, next;
+ u32 len = 0;
+ u32 bth1 = 0, bth2 = 0;
+ int middle = 0;
+ u16 flow;
+ struct tid_rdma_request *req, *nreq;
+
+ trace_hfi1_tid_write_rsp_make_tid_ack(qp);
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
+ goto bail;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ /*
+ * In the RESYNC case, we are exactly one segment past the
+ * previously sent ack or at the previously sent NAK. So to send
+ * the resync ack, we go back one segment (which might be part of
+ * the previous request) and let the do-while loop execute again.
+ * The advantage of executing the do-while loop is that any data
+ * received after the previous ack is automatically acked in the
+ * RESYNC ack. It turns out that for the do-while loop we only need
+ * to pull back qpriv->r_tid_ack, not the segment
+ * indices/counters. The scheme works even if the previous request
+ * was not a TID WRITE request.
+ */
+ if (qpriv->resync) {
+ if (!req->ack_seg || req->ack_seg == req->total_segs)
+ qpriv->r_tid_ack = !qpriv->r_tid_ack ?
+ rvt_size_atomic(&dev->rdi) :
+ qpriv->r_tid_ack - 1;
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ }
+
+ trace_hfi1_rsp_make_tid_ack(qp, e->psn);
+ trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ /*
+ * If we've sent all the ACKs that we can, we are done
+ * until we get more segments...
+ */
+ if (!qpriv->s_nak_state && !qpriv->resync &&
+ req->ack_seg == req->comp_seg)
+ goto bail;
+
+ do {
+ /*
+ * To deal with coalesced ACKs, the acked_tail pointer
+ * into the flow array is used. The distance between it
+ * and the clear_tail is the number of flows that are
+ * being ACK'ed.
+ */
+ req->ack_seg +=
+ /* Get up-to-date value */
+ CIRC_CNT(req->clear_tail, req->acked_tail,
+ MAX_FLOWS);
+ /* Advance acked index */
+ req->acked_tail = req->clear_tail;
+
+ /*
+ * req->clear_tail points to the segment currently being
+ * received. So, when sending an ACK, the previous
+ * segment is being ACK'ed.
+ */
+ flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
+ if (req->ack_seg != req->total_segs)
+ break;
+ req->state = TID_REQUEST_COMPLETE;
+
+ next = qpriv->r_tid_ack + 1;
+ if (next > rvt_size_atomic(&dev->rdi))
+ next = 0;
+ qpriv->r_tid_ack = next;
+ if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ))
+ break;
+ nreq = ack_to_tid_req(&qp->s_ack_queue[next]);
+ if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg)
+ break;
+
+ /* Move to the next ack entry now */
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ } while (1);
+
+ /*
+ * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and
+ * req could be pointing at the previous ack queue entry
+ */
+ if (qpriv->s_nak_state ||
+ (qpriv->resync &&
+ !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) &&
+ (cmp_psn(qpriv->r_next_psn_kdeth - 1,
+ full_flow_psn(&req->flows[flow],
+ req->flows[flow].flow_state.lpsn)) > 0))) {
+ /*
+ * A NAK will implicitly acknowledge all previous TID RDMA
+ * requests. Therefore, we NAK with the req->acked_tail
+ * segment for the request at qpriv->r_tid_ack (same at
+ * this point as the req->clear_tail segment for the
+ * qpriv->r_tid_tail request)
+ */
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ flow = req->acked_tail;
+ } else if (req->ack_seg == req->total_segs &&
+ qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK)
+ qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
+
+ trace_hfi1_tid_write_rsp_make_tid_ack(qp);
+ trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
+ &bth2);
+ len = 0;
+ qpriv->s_flags &= ~RVT_S_ACK_PENDING;
+ ps->s_txreq->hdr_dwords = hwords;
+ ps->s_txreq->sde = qpriv->s_sde;
+ ps->s_txreq->s_cur_size = len;
+ ps->s_txreq->ss = NULL;
+ hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle,
+ ps);
+ ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
+ return 1;
+bail:
+ /*
+ * Ensure s_rdma_ack_cnt changes are committed prior to resetting
+ * RVT_S_RESP_PENDING
+ */
+ smp_wmb();
+ qpriv->s_flags &= ~RVT_S_ACK_PENDING;
+ return 0;
+}
+
+static int hfi1_send_tid_ok(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ return !(priv->s_flags & RVT_S_BUSY ||
+ qp->s_flags & HFI1_S_ANY_WAIT_IO) &&
+ (verbs_txreq_queued(iowait_get_tid_work(&priv->s_iowait)) ||
+ (priv->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND));
+}
+
+void _hfi1_do_tid_send(struct work_struct *work)
+{
+ struct iowait_work *w = container_of(work, struct iowait_work, iowork);
+ struct rvt_qp *qp = iowait_to_qp(w->iow);
+
+ hfi1_do_tid_send(qp);
+}
+
+static void hfi1_do_tid_send(struct rvt_qp *qp)
+{
+ struct hfi1_pkt_state ps;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ ps.dev = to_idev(qp->ibqp.device);
+ ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ps.ppd = ppd_from_ibp(ps.ibp);
+ ps.wait = iowait_get_tid_work(&priv->s_iowait);
+ ps.in_thread = false;
+ ps.timeout_int = qp->timeout_jiffies / 8;
+
+ trace_hfi1_rc_do_tid_send(qp, false);
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
+
+ /* Return if we are already busy processing a work request. */
+ if (!hfi1_send_tid_ok(qp)) {
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+ return;
+ }
+
+ priv->s_flags |= RVT_S_BUSY;
+
+ ps.timeout = jiffies + ps.timeout_int;
+ ps.cpu = priv->s_sde ? priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(ps.ppd->dd->node));
+ ps.pkts_sent = false;
+
+ /* insure a pre-built packet is handled */
+ ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
+ do {
+ /* Check for a constructed packet to be sent. */
+ if (ps.s_txreq) {
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
+ qp->s_flags |= RVT_S_BUSY;
+ ps.wait = iowait_get_ib_work(&priv->s_iowait);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+
+ /*
+ * If the packet cannot be sent now, return and
+ * the send tasklet will be woken up later.
+ */
+ if (hfi1_verbs_send(qp, &ps))
+ return;
+
+ /* allow other tasks to run */
+ if (hfi1_schedule_send_yield(qp, &ps, true))
+ return;
+
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
+ qp->s_flags &= ~RVT_S_BUSY;
+ priv->s_flags &= ~HFI1_S_TID_BUSY_SET;
+ ps.wait = iowait_get_tid_work(&priv->s_iowait);
+ if (iowait_flag_set(&priv->s_iowait,
+ IOWAIT_PENDING_IB))
+ hfi1_schedule_send(qp);
+ }
+ }
+ } while (hfi1_make_tid_rdma_pkt(qp, &ps));
+ iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+}
+
+static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((dd->flags & HFI1_SHUTDOWN))
+ return true;
+
+ return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
+}
+
+/**
+ * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
+ * @qp: the QP
+ *
+ * This schedules qp progress on the TID RDMA state machine. Caller
+ * should hold the s_lock.
+ * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because
+ * the two state machines can step on each other with respect to the
+ * RVT_S_BUSY flag.
+ * Therefore, a modified test is used.
+ *
+ * Return: %true if the second leg is scheduled;
+ * %false if the second leg is not scheduled.
+ */
+bool hfi1_schedule_tid_send(struct rvt_qp *qp)
+{
+ lockdep_assert_held(&qp->s_lock);
+ if (hfi1_send_tid_ok(qp)) {
+ /*
+ * The following call returns true if the qp is not on the
+ * queue and false if the qp is already on the queue before
+ * this call. Either way, the qp will be on the queue when the
+ * call returns.
+ */
+ _hfi1_schedule_tid_send(qp);
+ return true;
+ }
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
+ IOWAIT_PENDING_TID);
+ return false;
+}
+
+bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
+{
+ struct rvt_ack_entry *prev;
+ struct tid_rdma_request *req;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_qp_priv *priv = qp->priv;
+ u32 s_prev;
+
+ s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) :
+ (qp->s_tail_ack_queue - 1);
+ prev = &qp->s_ack_queue[s_prev];
+
+ if ((e->opcode == TID_OP(READ_REQ) ||
+ e->opcode == OP(RDMA_READ_REQUEST)) &&
+ prev->opcode == TID_OP(WRITE_REQ)) {
+ req = ack_to_tid_req(prev);
+ if (req->ack_seg != req->total_segs) {
+ priv->s_flags |= HFI1_R_TID_WAIT_INTERLCK;
+ return true;
+ }
+ }
+ return false;
+}
+
+static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx)
+{
+ u64 reg;
+
+ /*
+ * The only sane way to get the amount of
+ * progress is to read the HW flow state.
+ */
+ reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx));
+ return mask_psn(reg);
+}
+
+static void tid_rdma_rcv_err(struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ struct rvt_qp *qp, u32 psn, int diff, bool fecn)
+{
+ unsigned long flags;
+
+ tid_rdma_rcv_error(packet, ohdr, qp, psn, diff);
+ if (fecn) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qp->s_flags |= RVT_S_ECN;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+}
+
+static void update_r_next_psn_fecn(struct hfi1_packet *packet,
+ struct hfi1_qp_priv *priv,
+ struct hfi1_ctxtdata *rcd,
+ struct tid_rdma_flow *flow,
+ bool fecn)
+{
+ /*
+ * If a start/middle packet is delivered here due to
+ * RSM rule and FECN, we need to update the r_next_psn.
+ */
+ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER &&
+ !(priv->s_flags & HFI1_R_TID_SW_PSN)) {
+ struct hfi1_devdata *dd = rcd->dd;
+
+ flow->flow_state.r_next_psn =
+ read_r_next_psn(dd, rcd->ctxt, flow->idx);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h
new file mode 100644
index 000000000000..6e82df2190b7
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#ifndef HFI1_TID_RDMA_H
+#define HFI1_TID_RDMA_H
+
+#include <linux/circ_buf.h>
+#include "common.h"
+
+/* Add a convenience helper */
+#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
+#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
+#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
+
+#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
+#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
+#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
+#define TID_RDMA_SEGMENT_SHIFT 18
+
+/*
+ * Bit definitions for priv->s_flags.
+ * These bit flags overload the bit flags defined for the QP's s_flags.
+ * Due to the fact that these bit fields are used only for the QP priv
+ * s_flags, there are no collisions.
+ *
+ * HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock
+ * HFI1_R_TID_WAIT_INTERLCK - QP is waiting for responder interlock
+ */
+#define HFI1_S_TID_BUSY_SET BIT(0)
+/* BIT(1) reserved for RVT_S_BUSY. */
+#define HFI1_R_TID_RSC_TIMER BIT(2)
+/* BIT(3) reserved for RVT_S_RESP_PENDING. */
+/* BIT(4) reserved for RVT_S_ACK_PENDING. */
+#define HFI1_S_TID_WAIT_INTERLCK BIT(5)
+#define HFI1_R_TID_WAIT_INTERLCK BIT(6)
+/* BIT(7) - BIT(15) reserved for RVT_S_WAIT_*. */
+/* BIT(16) reserved for RVT_S_SEND_ONE */
+#define HFI1_S_TID_RETRY_TIMER BIT(17)
+/* BIT(18) reserved for RVT_S_ECN. */
+#define HFI1_R_TID_SW_PSN BIT(19)
+/* BIT(26) reserved for HFI1_S_WAIT_HALT */
+/* BIT(27) reserved for HFI1_S_WAIT_TID_RESP */
+/* BIT(28) reserved for HFI1_S_WAIT_TID_SPACE */
+
+/*
+ * Unlike regular IB RDMA VERBS, which do not require an entry
+ * in the s_ack_queue, TID RDMA WRITE requests do because they
+ * generate responses.
+ * Therefore, the s_ack_queue needs to be extended by a certain
+ * amount. The key point is that the queue needs to be extended
+ * without letting the "user" know so they user doesn't end up
+ * using these extra entries.
+ */
+#define HFI1_TID_RDMA_WRITE_CNT 8
+
+struct tid_rdma_params {
+ struct rcu_head rcu_head;
+ u32 qp;
+ u32 max_len;
+ u16 jkey;
+ u8 max_read;
+ u8 max_write;
+ u8 timeout;
+ u8 urg;
+ u8 version;
+};
+
+struct tid_rdma_qp_params {
+ struct work_struct trigger_work;
+ struct tid_rdma_params local;
+ struct tid_rdma_params __rcu *remote;
+};
+
+/* Track state for each hardware flow */
+struct tid_flow_state {
+ u32 generation;
+ u32 psn;
+ u8 index;
+ u8 last_index;
+};
+
+enum tid_rdma_req_state {
+ TID_REQUEST_INACTIVE = 0,
+ TID_REQUEST_INIT,
+ TID_REQUEST_INIT_RESEND,
+ TID_REQUEST_ACTIVE,
+ TID_REQUEST_RESEND,
+ TID_REQUEST_RESEND_ACTIVE,
+ TID_REQUEST_QUEUED,
+ TID_REQUEST_SYNC,
+ TID_REQUEST_RNR_NAK,
+ TID_REQUEST_COMPLETE,
+};
+
+struct tid_rdma_request {
+ struct rvt_qp *qp;
+ struct hfi1_ctxtdata *rcd;
+ union {
+ struct rvt_swqe *swqe;
+ struct rvt_ack_entry *ack;
+ } e;
+
+ struct tid_rdma_flow *flows; /* array of tid flows */
+ struct rvt_sge_state ss; /* SGE state for TID RDMA requests */
+ u16 n_flows; /* size of the flow buffer window */
+ u16 setup_head; /* flow index we are setting up */
+ u16 clear_tail; /* flow index we are clearing */
+ u16 flow_idx; /* flow index most recently set up */
+ u16 acked_tail;
+
+ u32 seg_len;
+ u32 total_len;
+ u32 r_ack_psn; /* next expected ack PSN */
+ u32 r_flow_psn; /* IB PSN of next segment start */
+ u32 r_last_acked; /* IB PSN of last ACK'ed packet */
+ u32 s_next_psn; /* IB PSN of next segment start for read */
+
+ u32 total_segs; /* segments required to complete a request */
+ u32 cur_seg; /* index of current segment */
+ u32 comp_seg; /* index of last completed segment */
+ u32 ack_seg; /* index of last ack'ed segment */
+ u32 alloc_seg; /* index of next segment to be allocated */
+ u32 isge; /* index of "current" sge */
+ u32 ack_pending; /* num acks pending for this request */
+
+ enum tid_rdma_req_state state;
+};
+
+/*
+ * When header suppression is used, PSNs associated with a "flow" are
+ * relevant (and not the PSNs maintained by verbs). Track per-flow
+ * PSNs here for a TID RDMA segment.
+ *
+ */
+struct flow_state {
+ u32 flags;
+ u32 resp_ib_psn; /* The IB PSN of the response for this flow */
+ u32 generation; /* generation of flow */
+ u32 spsn; /* starting PSN in TID space */
+ u32 lpsn; /* last PSN in TID space */
+ u32 r_next_psn; /* next PSN to be received (in TID space) */
+
+ /* For tid rdma read */
+ u32 ib_spsn; /* starting PSN in Verbs space */
+ u32 ib_lpsn; /* last PSn in Verbs space */
+};
+
+struct tid_rdma_pageset {
+ dma_addr_t addr : 48; /* Only needed for the first page */
+ u8 idx: 8;
+ u8 count : 7;
+ u8 mapped: 1;
+};
+
+/**
+ * kern_tid_node - used for managing TID's in TID groups
+ *
+ * @grp_idx: rcd relative index to tid_group
+ * @map: grp->map captured prior to programming this TID group in HW
+ * @cnt: Only @cnt of available group entries are actually programmed
+ */
+struct kern_tid_node {
+ struct tid_group *grp;
+ u8 map;
+ u8 cnt;
+};
+
+/* Overall info for a TID RDMA segment */
+struct tid_rdma_flow {
+ /*
+ * While a TID RDMA segment is being transferred, it uses a QP number
+ * from the "KDETH section of QP numbers" (which is different from the
+ * QP number that originated the request). Bits 11-15 of these QP
+ * numbers identify the "TID flow" for the segment.
+ */
+ struct flow_state flow_state;
+ struct tid_rdma_request *req;
+ u32 tid_qpn;
+ u32 tid_offset;
+ u32 length;
+ u32 sent;
+ u8 tnode_cnt;
+ u8 tidcnt;
+ u8 tid_idx;
+ u8 idx;
+ u8 npagesets;
+ u8 npkts;
+ u8 pkt;
+ u8 resync_npkts;
+ struct kern_tid_node tnode[TID_RDMA_MAX_PAGES];
+ struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES];
+ u32 tid_entry[TID_RDMA_MAX_PAGES];
+};
+
+enum tid_rnr_nak_state {
+ TID_RNR_NAK_INIT = 0,
+ TID_RNR_NAK_SEND,
+ TID_RNR_NAK_SENT,
+};
+
+bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
+bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
+bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
+void tid_rdma_conn_error(struct rvt_qp *qp);
+void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
+
+int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
+int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
+ struct rvt_sge_state *ss, bool *last);
+int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
+void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
+void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
+/**
+ * trdma_clean_swqe - clean flows for swqe if large send queue
+ * @qp: the qp
+ * @wqe: the send wqe
+ */
+static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ if (!wqe->priv)
+ return;
+ __trdma_clean_swqe(qp, wqe);
+}
+
+void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp);
+
+int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_init_attr *init_attr);
+void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
+
+void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp);
+
+int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
+void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
+void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
+
+struct cntr_entry;
+u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data);
+
+u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len);
+u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u32 *len);
+void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet);
+u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth0,
+ u32 *bth1, u32 *bth2, u32 *len, bool *last);
+void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet);
+bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ struct hfi1_pportdata *ppd,
+ struct hfi1_packet *packet);
+void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ u32 *bth2);
+void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp);
+bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
+void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ if (wqe->priv &&
+ (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
+ wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
+ setup_tid_rdma_wqe(qp, wqe);
+}
+
+u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len);
+
+void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
+
+u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 bth2, u32 *len,
+ struct rvt_sge_state **ss);
+
+void hfi1_del_tid_reap_timer(struct rvt_qp *qp);
+
+void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet);
+
+bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len);
+
+void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet);
+
+u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u16 iflow,
+ u32 *bth1, u32 *bth2);
+
+void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet);
+
+void hfi1_add_tid_retry_timer(struct rvt_qp *qp);
+void hfi1_del_tid_retry_timer(struct rvt_qp *qp);
+
+u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u16 fidx);
+
+void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet);
+
+struct hfi1_pkt_state;
+int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+
+void _hfi1_do_tid_send(struct work_struct *work);
+
+bool hfi1_schedule_tid_send(struct rvt_qp *qp);
+
+bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e);
+
+#endif /* HFI1_TID_RDMA_H */
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
new file mode 100644
index 000000000000..10290ebf76b2
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+#include "exp_rcv.h"
+#include "ipoib.h"
+
+static u8 __get_ib_hdr_len(struct ib_header *hdr)
+{
+ struct ib_other_headers *ohdr;
+ u8 opcode;
+
+ if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ opcode = ib_bth_get_opcode(ohdr);
+ return hdr_len_by_opcode[opcode] == 0 ?
+ 0 : hdr_len_by_opcode[opcode] - (12 + 8);
+}
+
+static u8 __get_16b_hdr_len(struct hfi1_16b_header *hdr)
+{
+ struct ib_other_headers *ohdr = NULL;
+ u8 opcode;
+ u8 l4 = hfi1_16B_get_l4(hdr);
+
+ if (l4 == OPA_16B_L4_FM) {
+ opcode = IB_OPCODE_UD_SEND_ONLY;
+ return (8 + 8); /* No BTH */
+ }
+
+ if (l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+
+ opcode = ib_bth_get_opcode(ohdr);
+ return hdr_len_by_opcode[opcode] == 0 ?
+ 0 : hdr_len_by_opcode[opcode] - (12 + 8 + 8);
+}
+
+u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet)
+{
+ if (packet->etype != RHF_RCV_TYPE_BYPASS)
+ return __get_ib_hdr_len(packet->hdr);
+ else
+ return __get_16b_hdr_len(packet->hdr);
+}
+
+u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
+{
+ if (!opa_hdr->hdr_type)
+ return __get_ib_hdr_len(&opa_hdr->ibh);
+ else
+ return __get_16b_hdr_len(&opa_hdr->opah);
+}
+
+const char *hfi1_trace_get_packet_l4_str(u8 l4)
+{
+ if (l4)
+ return "16B";
+ else
+ return "9B";
+}
+
+const char *hfi1_trace_get_packet_l2_str(u8 l2)
+{
+ switch (l2) {
+ case 0:
+ return "0";
+ case 1:
+ return "1";
+ case 2:
+ return "16B";
+ case 3:
+ return "9B";
+ }
+ return "";
+}
+
+#define IMM_PRN "imm:%d"
+#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
+#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
+#define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x"
+#define DETH_ENTROPY_PRN "deth qkey:0x%.8x sqpn:0x%.6x entropy:0x%.2x"
+#define IETH_PRN "ieth rkey:0x%.8x"
+#define ATOMICACKETH_PRN "origdata:%llx"
+#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
+#define TID_RDMA_KDETH "kdeth0 0x%x kdeth1 0x%x"
+#define TID_RDMA_KDETH_DATA "kdeth0 0x%x: kver %u sh %u intr %u tidctrl %u tid %x offset %x kdeth1 0x%x: jkey %x"
+#define TID_READ_REQ_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
+#define TID_READ_RSP_PRN "verbs_qp 0x%x"
+#define TID_WRITE_REQ_PRN "original_qp 0x%x"
+#define TID_WRITE_RSP_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
+#define TID_WRITE_DATA_PRN "verbs_qp 0x%x"
+#define TID_ACK_PRN "tid_flow_psn 0x%x verbs_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
+#define TID_RESYNC_PRN "verbs_qp 0x%x"
+
+#define OP(transport, op) IB_OPCODE_## transport ## _ ## op
+
+static const char *parse_syndrome(u8 syndrome)
+{
+ switch (syndrome >> 5) {
+ case 0:
+ return "ACK";
+ case 1:
+ return "RNRNAK";
+ case 3:
+ return "NAK";
+ }
+ return "";
+}
+
+void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, bool *becn, bool *fecn, u8 *mig,
+ u8 *se, u8 *pad, u8 *opcode, u8 *tver,
+ u16 *pkey, u32 *psn, u32 *qpn)
+{
+ *ack = ib_bth_get_ackreq(ohdr);
+ *becn = ib_bth_get_becn(ohdr);
+ *fecn = ib_bth_get_fecn(ohdr);
+ *mig = ib_bth_get_migreq(ohdr);
+ *se = ib_bth_get_se(ohdr);
+ *pad = ib_bth_get_pad(ohdr);
+ *opcode = ib_bth_get_opcode(ohdr);
+ *tver = ib_bth_get_tver(ohdr);
+ *pkey = ib_bth_get_pkey(ohdr);
+ *psn = mask_psn(ib_bth_get_psn(ohdr));
+ *qpn = ib_bth_get_qpn(ohdr);
+}
+
+void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, u8 *mig, u8 *opcode,
+ u8 *pad, u8 *se, u8 *tver,
+ u32 *psn, u32 *qpn)
+{
+ *ack = ib_bth_get_ackreq(ohdr);
+ *mig = ib_bth_get_migreq(ohdr);
+ *opcode = ib_bth_get_opcode(ohdr);
+ *pad = ib_bth_get_pad(ohdr);
+ *se = ib_bth_get_se(ohdr);
+ *tver = ib_bth_get_tver(ohdr);
+ *psn = mask_psn(ib_bth_get_psn(ohdr));
+ *qpn = ib_bth_get_qpn(ohdr);
+}
+
+static u16 ib_get_len(const struct ib_header *hdr)
+{
+ return be16_to_cpu(hdr->lrh[2]);
+}
+
+void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5,
+ u8 *lnh, u8 *lver, u8 *sl, u8 *sc,
+ u16 *len, u32 *dlid, u32 *slid)
+{
+ *lnh = ib_get_lnh(hdr);
+ *lver = ib_get_lver(hdr);
+ *sl = ib_get_sl(hdr);
+ *sc = ib_get_sc(hdr) | (sc5 << 4);
+ *len = ib_get_len(hdr);
+ *dlid = ib_get_dlid(hdr);
+ *slid = ib_get_slid(hdr);
+}
+
+void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr,
+ u8 *age, bool *becn, bool *fecn,
+ u8 *l4, u8 *rc, u8 *sc,
+ u16 *entropy, u16 *len, u16 *pkey,
+ u32 *dlid, u32 *slid)
+{
+ *age = hfi1_16B_get_age(hdr);
+ *becn = hfi1_16B_get_becn(hdr);
+ *fecn = hfi1_16B_get_fecn(hdr);
+ *l4 = hfi1_16B_get_l4(hdr);
+ *rc = hfi1_16B_get_rc(hdr);
+ *sc = hfi1_16B_get_sc(hdr);
+ *entropy = hfi1_16B_get_entropy(hdr);
+ *len = hfi1_16B_get_len(hdr);
+ *pkey = hfi1_16B_get_pkey(hdr);
+ *dlid = hfi1_16B_get_dlid(hdr);
+ *slid = hfi1_16B_get_slid(hdr);
+}
+
+#define LRH_PRN "len:%d sc:%d dlid:0x%.4x slid:0x%.4x "
+#define LRH_9B_PRN "lnh:%d,%s lver:%d sl:%d"
+#define LRH_16B_PRN "age:%d becn:%d fecn:%d l4:%d " \
+ "rc:%d sc:%d pkey:0x%.4x entropy:0x%.4x"
+const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
+ u8 age, bool becn, bool fecn, u8 l4,
+ u8 lnh, const char *lnh_name, u8 lver,
+ u8 rc, u8 sc, u8 sl, u16 entropy,
+ u16 len, u16 pkey, u32 dlid, u32 slid)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, LRH_PRN, len, sc, dlid, slid);
+
+ if (bypass)
+ trace_seq_printf(p, LRH_16B_PRN,
+ age, becn, fecn, l4, rc, sc, pkey, entropy);
+
+ else
+ trace_seq_printf(p, LRH_9B_PRN,
+ lnh, lnh_name, lver, sl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+#define BTH_9B_PRN \
+ "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d pkey:0x%.4x " \
+ "f:%d b:%d qpn:0x%.6x a:%d psn:0x%.8x"
+#define BTH_16B_PRN \
+ "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \
+ "qpn:0x%.6x a:%d psn:0x%.8x"
+#define L4_FM_16B_PRN \
+ "op:0x%.2x,%s dest_qpn:0x%.6x src_qpn:0x%.6x"
+const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
+ u8 ack, bool becn, bool fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn,
+ u32 dest_qpn, u32 src_qpn)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (bypass)
+ if (l4 == OPA_16B_L4_FM)
+ trace_seq_printf(p, L4_FM_16B_PRN,
+ opcode, opname, dest_qpn, src_qpn);
+ else
+ trace_seq_printf(p, BTH_16B_PRN,
+ opcode, opname,
+ se, mig, pad, tver, qpn, ack, psn);
+
+ else
+ trace_seq_printf(p, BTH_9B_PRN,
+ opcode, opname,
+ se, mig, pad, tver, pkey, fecn, becn,
+ qpn, ack, psn);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *parse_everbs_hdrs(
+ struct trace_seq *p,
+ u8 opcode, u8 l4, u32 dest_qpn, u32 src_qpn,
+ void *ehdrs)
+{
+ union ib_ehdrs *eh = ehdrs;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (l4 == OPA_16B_L4_FM) {
+ trace_seq_printf(p, "mgmt pkt");
+ goto out;
+ }
+
+ switch (opcode) {
+ /* imm */
+ case OP(RC, SEND_LAST_WITH_IMMEDIATE):
+ case OP(UC, SEND_LAST_WITH_IMMEDIATE):
+ case OP(RC, SEND_ONLY_WITH_IMMEDIATE):
+ case OP(UC, SEND_ONLY_WITH_IMMEDIATE):
+ case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ trace_seq_printf(p, IMM_PRN,
+ be32_to_cpu(eh->imm_data));
+ break;
+ /* reth + imm */
+ case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ trace_seq_printf(p, RETH_PRN " " IMM_PRN,
+ get_ib_reth_vaddr(&eh->rc.reth),
+ be32_to_cpu(eh->rc.reth.rkey),
+ be32_to_cpu(eh->rc.reth.length),
+ be32_to_cpu(eh->rc.imm_data));
+ break;
+ /* reth */
+ case OP(RC, RDMA_READ_REQUEST):
+ case OP(RC, RDMA_WRITE_FIRST):
+ case OP(UC, RDMA_WRITE_FIRST):
+ case OP(RC, RDMA_WRITE_ONLY):
+ case OP(UC, RDMA_WRITE_ONLY):
+ trace_seq_printf(p, RETH_PRN,
+ get_ib_reth_vaddr(&eh->rc.reth),
+ be32_to_cpu(eh->rc.reth.rkey),
+ be32_to_cpu(eh->rc.reth.length));
+ break;
+ case OP(RC, RDMA_READ_RESPONSE_FIRST):
+ case OP(RC, RDMA_READ_RESPONSE_LAST):
+ case OP(RC, RDMA_READ_RESPONSE_ONLY):
+ case OP(RC, ACKNOWLEDGE):
+ trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
+ parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
+ be32_to_cpu(eh->aeth) & IB_MSN_MASK);
+ break;
+ case OP(TID_RDMA, WRITE_REQ):
+ trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
+ TID_WRITE_REQ_PRN,
+ le32_to_cpu(eh->tid_rdma.w_req.kdeth0),
+ le32_to_cpu(eh->tid_rdma.w_req.kdeth1),
+ ib_u64_get(&eh->tid_rdma.w_req.reth.vaddr),
+ be32_to_cpu(eh->tid_rdma.w_req.reth.rkey),
+ be32_to_cpu(eh->tid_rdma.w_req.reth.length),
+ be32_to_cpu(eh->tid_rdma.w_req.verbs_qp));
+ break;
+ case OP(TID_RDMA, WRITE_RESP):
+ trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
+ TID_WRITE_RSP_PRN,
+ le32_to_cpu(eh->tid_rdma.w_rsp.kdeth0),
+ le32_to_cpu(eh->tid_rdma.w_rsp.kdeth1),
+ be32_to_cpu(eh->tid_rdma.w_rsp.aeth) >> 24,
+ parse_syndrome(/* aeth */
+ be32_to_cpu(eh->tid_rdma.w_rsp.aeth)
+ >> 24),
+ (be32_to_cpu(eh->tid_rdma.w_rsp.aeth) &
+ IB_MSN_MASK),
+ be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_psn),
+ be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_qp),
+ be32_to_cpu(eh->tid_rdma.w_rsp.verbs_qp));
+ break;
+ case OP(TID_RDMA, WRITE_DATA_LAST):
+ case OP(TID_RDMA, WRITE_DATA):
+ trace_seq_printf(p, TID_RDMA_KDETH_DATA " " TID_WRITE_DATA_PRN,
+ le32_to_cpu(eh->tid_rdma.w_data.kdeth0),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, KVER),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, SH),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, INTR),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, TIDCTRL),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, TID),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, OFFSET),
+ le32_to_cpu(eh->tid_rdma.w_data.kdeth1),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth1, JKEY),
+ be32_to_cpu(eh->tid_rdma.w_data.verbs_qp));
+ break;
+ case OP(TID_RDMA, READ_REQ):
+ trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
+ TID_READ_REQ_PRN,
+ le32_to_cpu(eh->tid_rdma.r_req.kdeth0),
+ le32_to_cpu(eh->tid_rdma.r_req.kdeth1),
+ ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr),
+ be32_to_cpu(eh->tid_rdma.r_req.reth.rkey),
+ be32_to_cpu(eh->tid_rdma.r_req.reth.length),
+ be32_to_cpu(eh->tid_rdma.r_req.tid_flow_psn),
+ be32_to_cpu(eh->tid_rdma.r_req.tid_flow_qp),
+ be32_to_cpu(eh->tid_rdma.r_req.verbs_qp));
+ break;
+ case OP(TID_RDMA, READ_RESP):
+ trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " "
+ TID_READ_RSP_PRN,
+ le32_to_cpu(eh->tid_rdma.r_rsp.kdeth0),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, KVER),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, SH),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, INTR),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TIDCTRL),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TID),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, OFFSET),
+ le32_to_cpu(eh->tid_rdma.r_rsp.kdeth1),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth1, JKEY),
+ be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24,
+ parse_syndrome(/* aeth */
+ be32_to_cpu(eh->tid_rdma.r_rsp.aeth)
+ >> 24),
+ (be32_to_cpu(eh->tid_rdma.r_rsp.aeth) &
+ IB_MSN_MASK),
+ be32_to_cpu(eh->tid_rdma.r_rsp.verbs_qp));
+ break;
+ case OP(TID_RDMA, ACK):
+ trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
+ TID_ACK_PRN,
+ le32_to_cpu(eh->tid_rdma.ack.kdeth0),
+ le32_to_cpu(eh->tid_rdma.ack.kdeth1),
+ be32_to_cpu(eh->tid_rdma.ack.aeth) >> 24,
+ parse_syndrome(/* aeth */
+ be32_to_cpu(eh->tid_rdma.ack.aeth)
+ >> 24),
+ (be32_to_cpu(eh->tid_rdma.ack.aeth) &
+ IB_MSN_MASK),
+ be32_to_cpu(eh->tid_rdma.ack.tid_flow_psn),
+ be32_to_cpu(eh->tid_rdma.ack.verbs_psn),
+ be32_to_cpu(eh->tid_rdma.ack.tid_flow_qp),
+ be32_to_cpu(eh->tid_rdma.ack.verbs_qp));
+ break;
+ case OP(TID_RDMA, RESYNC):
+ trace_seq_printf(p, TID_RDMA_KDETH " " TID_RESYNC_PRN,
+ le32_to_cpu(eh->tid_rdma.resync.kdeth0),
+ le32_to_cpu(eh->tid_rdma.resync.kdeth1),
+ be32_to_cpu(eh->tid_rdma.resync.verbs_qp));
+ break;
+ /* aeth + atomicacketh */
+ case OP(RC, ATOMIC_ACKNOWLEDGE):
+ trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
+ be32_to_cpu(eh->at.aeth) >> 24,
+ parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
+ be32_to_cpu(eh->at.aeth) & IB_MSN_MASK,
+ ib_u64_get(&eh->at.atomic_ack_eth));
+ break;
+ /* atomiceth */
+ case OP(RC, COMPARE_SWAP):
+ case OP(RC, FETCH_ADD):
+ trace_seq_printf(p, ATOMICETH_PRN,
+ get_ib_ateth_vaddr(&eh->atomic_eth),
+ eh->atomic_eth.rkey,
+ get_ib_ateth_swap(&eh->atomic_eth),
+ get_ib_ateth_compare(&eh->atomic_eth));
+ break;
+ /* deth */
+ case OP(UD, SEND_ONLY):
+ trace_seq_printf(p, DETH_ENTROPY_PRN,
+ be32_to_cpu(eh->ud.deth[0]),
+ be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK,
+ be32_to_cpu(eh->ud.deth[1]) >>
+ HFI1_IPOIB_ENTROPY_SHIFT);
+ break;
+ case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
+ trace_seq_printf(p, DETH_PRN,
+ be32_to_cpu(eh->ud.deth[0]),
+ be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
+ break;
+ /* ieth */
+ case OP(RC, SEND_LAST_WITH_INVALIDATE):
+ case OP(RC, SEND_ONLY_WITH_INVALIDATE):
+ trace_seq_printf(p, IETH_PRN,
+ be32_to_cpu(eh->ieth));
+ break;
+ }
+out:
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *parse_sdma_flags(
+ struct trace_seq *p,
+ u64 desc0, u64 desc1)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ char flags[5] = { 'x', 'x', 'x', 'x', 0 };
+
+ flags[0] = (desc1 & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
+ flags[1] = (desc1 & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-';
+ flags[2] = (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
+ flags[3] = (desc0 & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
+ trace_seq_printf(p, "%s", flags);
+ if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
+ trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
+ (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) &
+ SDMA_DESC1_HEADER_MODE_MASK),
+ (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) &
+ SDMA_DESC1_HEADER_INDEX_MASK),
+ (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) &
+ SDMA_DESC1_HEADER_DWS_MASK));
+ return ret;
+}
+
+const char *print_u32_array(
+ struct trace_seq *p,
+ u32 *arr, int len)
+{
+ int i;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ for (i = 0; i < len ; i++)
+ trace_seq_printf(p, "%s%#x", i == 0 ? "" : " ", arr[i]);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+u8 hfi1_trace_get_tid_ctrl(u32 ent)
+{
+ return EXP_TID_GET(ent, CTRL);
+}
+
+u16 hfi1_trace_get_tid_len(u32 ent)
+{
+ return EXP_TID_GET(ent, LEN);
+}
+
+u16 hfi1_trace_get_tid_idx(u32 ent)
+{
+ return EXP_TID_GET(ent, IDX);
+}
+
+struct hfi1_ctxt_hist {
+ atomic_t count;
+ atomic_t data[255];
+};
+
+static struct hfi1_ctxt_hist hist = {
+ .count = ATOMIC_INIT(0)
+};
+
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt)
+{
+ int i, len = ARRAY_SIZE(hist.data);
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned long packet_count = atomic_fetch_inc(&hist.count);
+
+ trace_seq_printf(p, "packet[%lu]", packet_count);
+ for (i = 0; i < len; ++i) {
+ unsigned long val;
+ atomic_t *count = &hist.data[i];
+
+ if (ctxt == i)
+ val = atomic_fetch_inc(count);
+ else
+ val = atomic_read(count);
+
+ if (val)
+ trace_seq_printf(p, "(%d:%lu)", i, val);
+ }
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+__hfi1_trace_fn(AFFINITY);
+__hfi1_trace_fn(PKT);
+__hfi1_trace_fn(PROC);
+__hfi1_trace_fn(SDMA);
+__hfi1_trace_fn(LINKVERB);
+__hfi1_trace_fn(DEBUG);
+__hfi1_trace_fn(SNOOP);
+__hfi1_trace_fn(CNTR);
+__hfi1_trace_fn(PIO);
+__hfi1_trace_fn(DC8051);
+__hfi1_trace_fn(FIRMWARE);
+__hfi1_trace_fn(RCVCTRL);
+__hfi1_trace_fn(TID);
+__hfi1_trace_fn(MMU);
+__hfi1_trace_fn(IOCTL);
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
new file mode 100644
index 000000000000..bb3cc006bacd
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype) \
+__print_symbolic(etype, \
+ packettype_name(EXPECTED), \
+ packettype_name(EAGER), \
+ packettype_name(IB), \
+ packettype_name(ERROR), \
+ packettype_name(BYPASS))
+
+#include "trace_dbg.h"
+#include "trace_misc.h"
+#include "trace_ctxts.h"
+#include "trace_ibhdrs.h"
+#include "trace_rc.h"
+#include "trace_rx.h"
+#include "trace_tx.h"
+#include "trace_mmu.h"
+#include "trace_iowait.h"
+#include "trace_tid.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
new file mode 100644
index 000000000000..76c41bd79071
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+* Copyright(c) 2015 - 2020 Intel Corporation.
+*/
+
+#if !defined(__HFI1_TRACE_CTXTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_CTXTS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ctxts
+
+#define UCTXT_FMT \
+ "cred:%u, credaddr:0x%llx, piobase:0x%p, rcvhdr_cnt:%u, " \
+ "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx, subctxt_cnt:%u"
+TRACE_EVENT(hfi1_uctxtdata,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt,
+ unsigned int subctxt),
+ TP_ARGS(dd, uctxt, subctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned int, ctxt)
+ __field(unsigned int, subctxt)
+ __field(u32, credits)
+ __field(u64, hw_free)
+ __field(void __iomem *, piobase)
+ __field(u16, rcvhdrq_cnt)
+ __field(u64, rcvhdrq_dma)
+ __field(u32, eager_cnt)
+ __field(u64, rcvegr_dma)
+ __field(unsigned int, subctxt_cnt)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = uctxt->ctxt;
+ __entry->subctxt = subctxt;
+ __entry->credits = uctxt->sc->credits;
+ __entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
+ __entry->piobase = uctxt->sc->base_addr;
+ __entry->rcvhdrq_cnt = get_hdrq_cnt(uctxt);
+ __entry->rcvhdrq_dma = uctxt->rcvhdrq_dma;
+ __entry->eager_cnt = uctxt->egrbufs.alloced;
+ __entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma;
+ __entry->subctxt_cnt = uctxt->subctxt_cnt;
+ ),
+ TP_printk("[%s] ctxt %u:%u " UCTXT_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->credits,
+ __entry->hw_free,
+ __entry->piobase,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_dma,
+ __entry->eager_cnt,
+ __entry->rcvegr_dma,
+ __entry->subctxt_cnt
+ )
+);
+
+#define CINFO_FMT \
+ "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
+TRACE_EVENT(hfi1_ctxt_info,
+ TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
+ unsigned int subctxt,
+ struct hfi1_ctxt_info *cinfo),
+ TP_ARGS(dd, ctxt, subctxt, cinfo),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned int, ctxt)
+ __field(unsigned int, subctxt)
+ __field(u16, egrtids)
+ __field(u16, rcvhdrq_cnt)
+ __field(u16, rcvhdrq_size)
+ __field(u16, sdma_ring_size)
+ __field(u32, rcvegr_size)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->egrtids = cinfo->egrtids;
+ __entry->rcvhdrq_cnt = cinfo->rcvhdrq_cnt;
+ __entry->rcvhdrq_size = cinfo->rcvhdrq_entsize;
+ __entry->sdma_ring_size = cinfo->sdma_ring_size;
+ __entry->rcvegr_size = cinfo->rcvegr_size;
+ ),
+ TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->egrtids,
+ __entry->rcvegr_size,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_size,
+ __entry->sdma_ring_size
+ )
+);
+
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt);
+TRACE_EVENT(ctxt_rsm_hist,
+ TP_PROTO(unsigned int ctxt),
+ TP_ARGS(ctxt),
+ TP_STRUCT__entry(__field(unsigned int, ctxt)),
+ TP_fast_assign(__entry->ctxt = ctxt;),
+ TP_printk("%s", hfi1_trace_print_rsm_hist(p, __entry->ctxt))
+);
+
+#endif /* __HFI1_TRACE_CTXTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_ctxts
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
new file mode 100644
index 000000000000..58304b91380f
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+* Copyright(c) 2015 - 2018 Intel Corporation.
+*/
+
+#if !defined(__HFI1_TRACE_EXTRA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_EXTRA_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+/*
+ * Note:
+ * This produces a REALLY ugly trace in the console output when the string is
+ * too long.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_dbg
+
+#define MAX_MSG_LEN 512
+
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+
+DECLARE_EVENT_CLASS(hfi1_trace_template,
+ TP_PROTO(const char *function, struct va_format *vaf),
+ TP_ARGS(function, vaf),
+ TP_STRUCT__entry(__string(function, function)
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(__assign_str(function);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+ TP_printk("(%s) %s",
+ __get_str(function),
+ __get_str(msg))
+);
+
+#pragma GCC diagnostic pop
+
+/*
+ * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
+ * actual function to work and can not be in a macro.
+ */
+#define __hfi1_trace_def(lvl) \
+void __printf(2, 3) __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+ \
+DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
+ TP_PROTO(const char *function, struct va_format *vaf), \
+ TP_ARGS(function, vaf))
+
+#define __hfi1_trace_fn(lvl) \
+void __printf(2, 3) __hfi1_trace_##lvl(const char *func, char *fmt, ...)\
+{ \
+ struct va_format vaf = { \
+ .fmt = fmt, \
+ }; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ vaf.va = &args; \
+ trace_hfi1_ ##lvl(func, &vaf); \
+ va_end(args); \
+ return; \
+}
+
+/*
+ * To create a new trace level simply define it below and as a __hfi1_trace_fn
+ * in trace.c. This will create all the hooks for calling
+ * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
+ * the debugfs stuff.
+ */
+__hfi1_trace_def(AFFINITY);
+__hfi1_trace_def(PKT);
+__hfi1_trace_def(PROC);
+__hfi1_trace_def(SDMA);
+__hfi1_trace_def(LINKVERB);
+__hfi1_trace_def(DEBUG);
+__hfi1_trace_def(SNOOP);
+__hfi1_trace_def(CNTR);
+__hfi1_trace_def(PIO);
+__hfi1_trace_def(DC8051);
+__hfi1_trace_def(FIRMWARE);
+__hfi1_trace_def(RCVCTRL);
+__hfi1_trace_def(TID);
+__hfi1_trace_def(MMU);
+__hfi1_trace_def(IOCTL);
+
+#define hfi1_cdbg(which, fmt, ...) \
+ __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
+
+#define hfi1_dbg(fmt, ...) \
+ hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
+
+/*
+ * Define HFI1_EARLY_DBG at compile time or here to enable early trace
+ * messages. Do not check in an enablement for this.
+ */
+
+#ifdef HFI1_EARLY_DBG
+#define hfi1_dbg_early(fmt, ...) \
+ trace_printk(fmt, ##__VA_ARGS__)
+#else
+#define hfi1_dbg_early(fmt, ...)
+#endif
+
+#endif /* __HFI1_TRACE_EXTRA_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_dbg
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
new file mode 100644
index 000000000000..b21356abc9ec
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -0,0 +1,455 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2017 Intel Corporation.
+ */
+
+#if !defined(__HFI1_TRACE_IBHDRS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_IBHDRS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ibhdrs
+
+#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
+#define show_ib_opcode(opcode) \
+__print_symbolic(opcode, \
+ ib_opcode_name(RC_SEND_FIRST), \
+ ib_opcode_name(RC_SEND_MIDDLE), \
+ ib_opcode_name(RC_SEND_LAST), \
+ ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_SEND_ONLY), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_READ_REQUEST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
+ ib_opcode_name(RC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_COMPARE_SWAP), \
+ ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
+ ib_opcode_name(TID_RDMA_WRITE_REQ), \
+ ib_opcode_name(TID_RDMA_WRITE_RESP), \
+ ib_opcode_name(TID_RDMA_WRITE_DATA), \
+ ib_opcode_name(TID_RDMA_WRITE_DATA_LAST), \
+ ib_opcode_name(TID_RDMA_READ_REQ), \
+ ib_opcode_name(TID_RDMA_READ_RESP), \
+ ib_opcode_name(TID_RDMA_RESYNC), \
+ ib_opcode_name(TID_RDMA_ACK), \
+ ib_opcode_name(UC_SEND_FIRST), \
+ ib_opcode_name(UC_SEND_MIDDLE), \
+ ib_opcode_name(UC_SEND_LAST), \
+ ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_SEND_ONLY), \
+ ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UD_SEND_ONLY), \
+ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(CNP))
+
+u8 ibhdr_exhdr_len(struct ib_header *hdr);
+const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode,
+ u8 l4, u32 dest_qpn, u32 src_qpn,
+ void *ehdrs);
+u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
+u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
+const char *hfi1_trace_get_packet_l4_str(u8 l4);
+void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, bool *becn, bool *fecn, u8 *mig,
+ u8 *se, u8 *pad, u8 *opcode, u8 *tver,
+ u16 *pkey, u32 *psn, u32 *qpn);
+void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5,
+ u8 *lnh, u8 *lver, u8 *sl, u8 *sc,
+ u16 *len, u32 *dlid, u32 *slid);
+void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, u8 *mig, u8 *opcode,
+ u8 *pad, u8 *se, u8 *tver,
+ u32 *psn, u32 *qpn);
+void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr,
+ u8 *age, bool *becn, bool *fecn,
+ u8 *l4, u8 *rc, u8 *sc,
+ u16 *entropy, u16 *len, u16 *pkey,
+ u32 *dlid, u32 *slid);
+
+const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
+ u8 age, bool becn, bool fecn, u8 l4,
+ u8 lnh, const char *lnh_name, u8 lver,
+ u8 rc, u8 sc, u8 sl, u16 entropy,
+ u16 len, u16 pkey, u32 dlid, u32 slid);
+
+const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
+ u8 ack, bool becn, bool fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn,
+ u32 dest_qpn, u32 src_qpn);
+
+const char *hfi1_trace_get_packet_l2_str(u8 l2);
+
+#define __parse_ib_ehdrs(op, l4, dest_qpn, src_qpn, ehdrs) \
+ parse_everbs_hdrs(p, op, l4, dest_qpn, src_qpn, ehdrs)
+
+#define lrh_name(lrh) { HFI1_##lrh, #lrh }
+#define show_lnh(lrh) \
+__print_symbolic(lrh, \
+ lrh_name(LRH_BTH), \
+ lrh_name(LRH_GRH))
+
+DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_packet *packet,
+ bool sc5),
+ TP_ARGS(dd, packet, sc5),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u8, etype)
+ __field(u8, ack)
+ __field(u8, age)
+ __field(bool, becn)
+ __field(bool, fecn)
+ __field(u8, l2)
+ __field(u8, l4)
+ __field(u8, lnh)
+ __field(u8, lver)
+ __field(u8, mig)
+ __field(u8, opcode)
+ __field(u8, pad)
+ __field(u8, rc)
+ __field(u8, sc)
+ __field(u8, se)
+ __field(u8, sl)
+ __field(u8, tver)
+ __field(u16, entropy)
+ __field(u16, len)
+ __field(u16, pkey)
+ __field(u32, dlid)
+ __field(u32, psn)
+ __field(u32, qpn)
+ __field(u32, slid)
+ __field(u32, dest_qpn)
+ __field(u32, src_qpn)
+ /* extended headers */
+ __dynamic_array(u8, ehdrs,
+ hfi1_trace_packet_hdr_len(packet))
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+
+ __entry->etype = packet->etype;
+ __entry->l2 = hfi1_16B_get_l2(packet->hdr);
+ __entry->dest_qpn = 0;
+ __entry->src_qpn = 0;
+ if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
+ hfi1_trace_parse_16b_hdr(packet->hdr,
+ &__entry->age,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->l4,
+ &__entry->rc,
+ &__entry->sc,
+ &__entry->entropy,
+ &__entry->len,
+ &__entry->pkey,
+ &__entry->dlid,
+ &__entry->slid);
+
+ if (__entry->l4 == OPA_16B_L4_FM) {
+ __entry->opcode = IB_OPCODE_UD_SEND_ONLY;
+ __entry->dest_qpn = hfi1_16B_get_dest_qpn(packet->mgmt);
+ __entry->src_qpn = hfi1_16B_get_src_qpn(packet->mgmt);
+ } else {
+ hfi1_trace_parse_16b_bth(packet->ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ }
+ } else {
+ __entry->l4 = OPA_16B_L4_9B;
+ hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
+ &__entry->lnh,
+ &__entry->lver,
+ &__entry->sl,
+ &__entry->sc,
+ &__entry->len,
+ &__entry->dlid,
+ &__entry->slid);
+
+ hfi1_trace_parse_9b_bth(packet->ohdr,
+ &__entry->ack,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->mig,
+ &__entry->se,
+ &__entry->pad,
+ &__entry->opcode,
+ &__entry->tver,
+ &__entry->pkey,
+ &__entry->psn,
+ &__entry->qpn);
+ }
+ /* extended headers */
+ if (__entry->l4 != OPA_16B_L4_FM)
+ memcpy(__get_dynamic_array(ehdrs),
+ &packet->ohdr->u,
+ __get_dynamic_array_len(ehdrs));
+ ),
+ TP_printk("[%s] (%s) %s %s hlen:%d %s",
+ __get_str(dev),
+ __entry->etype != RHF_RCV_TYPE_BYPASS ?
+ show_packettype(__entry->etype) :
+ hfi1_trace_get_packet_l2_str(
+ __entry->l2),
+ hfi1_trace_fmt_lrh(p,
+ __entry->etype ==
+ RHF_RCV_TYPE_BYPASS,
+ __entry->age,
+ __entry->becn,
+ __entry->fecn,
+ __entry->l4,
+ __entry->lnh,
+ show_lnh(__entry->lnh),
+ __entry->lver,
+ __entry->rc,
+ __entry->sc,
+ __entry->sl,
+ __entry->entropy,
+ __entry->len,
+ __entry->pkey,
+ __entry->dlid,
+ __entry->slid),
+ hfi1_trace_fmt_rest(p,
+ __entry->etype ==
+ RHF_RCV_TYPE_BYPASS,
+ __entry->l4,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn,
+ __entry->dest_qpn,
+ __entry->src_qpn),
+ /* extended headers */
+ __get_dynamic_array_len(ehdrs),
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ __entry->l4,
+ __entry->dest_qpn,
+ __entry->src_qpn,
+ (void *)__get_dynamic_array(ehdrs))
+ )
+);
+
+DEFINE_EVENT(hfi1_input_ibhdr_template, input_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_packet *packet, bool sc5),
+ TP_ARGS(dd, packet, sc5));
+
+DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u8, hdr_type)
+ __field(u8, ack)
+ __field(u8, age)
+ __field(bool, becn)
+ __field(bool, fecn)
+ __field(u8, l4)
+ __field(u8, lnh)
+ __field(u8, lver)
+ __field(u8, mig)
+ __field(u8, opcode)
+ __field(u8, pad)
+ __field(u8, rc)
+ __field(u8, sc)
+ __field(u8, se)
+ __field(u8, sl)
+ __field(u8, tver)
+ __field(u16, entropy)
+ __field(u16, len)
+ __field(u16, pkey)
+ __field(u32, dlid)
+ __field(u32, psn)
+ __field(u32, qpn)
+ __field(u32, slid)
+ __field(u32, dest_qpn)
+ __field(u32, src_qpn)
+ /* extended headers */
+ __dynamic_array(u8, ehdrs,
+ hfi1_trace_opa_hdr_len(opah))
+ ),
+ TP_fast_assign(
+ struct ib_other_headers *ohdr;
+
+ DD_DEV_ASSIGN(dd);
+
+ __entry->hdr_type = opah->hdr_type;
+ __entry->dest_qpn = 0;
+ __entry->src_qpn = 0;
+ if (__entry->hdr_type) {
+ hfi1_trace_parse_16b_hdr(&opah->opah,
+ &__entry->age,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->l4,
+ &__entry->rc,
+ &__entry->sc,
+ &__entry->entropy,
+ &__entry->len,
+ &__entry->pkey,
+ &__entry->dlid,
+ &__entry->slid);
+
+ if (__entry->l4 == OPA_16B_L4_FM) {
+ ohdr = NULL;
+ __entry->opcode = IB_OPCODE_UD_SEND_ONLY;
+ __entry->dest_qpn = hfi1_16B_get_dest_qpn(&opah->opah.u.mgmt);
+ __entry->src_qpn = hfi1_16B_get_src_qpn(&opah->opah.u.mgmt);
+ } else {
+ if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &opah->opah.u.oth;
+ else
+ ohdr = &opah->opah.u.l.oth;
+ hfi1_trace_parse_16b_bth(ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ }
+ } else {
+ __entry->l4 = OPA_16B_L4_9B;
+ hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
+ &__entry->lnh,
+ &__entry->lver,
+ &__entry->sl,
+ &__entry->sc,
+ &__entry->len,
+ &__entry->dlid,
+ &__entry->slid);
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &opah->ibh.u.oth;
+ else
+ ohdr = &opah->ibh.u.l.oth;
+ hfi1_trace_parse_9b_bth(ohdr,
+ &__entry->ack,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->mig,
+ &__entry->se,
+ &__entry->pad,
+ &__entry->opcode,
+ &__entry->tver,
+ &__entry->pkey,
+ &__entry->psn,
+ &__entry->qpn);
+ }
+
+ /* extended headers */
+ if (__entry->l4 != OPA_16B_L4_FM)
+ memcpy(__get_dynamic_array(ehdrs),
+ &ohdr->u, __get_dynamic_array_len(ehdrs));
+ ),
+ TP_printk("[%s] (%s) %s %s hlen:%d %s",
+ __get_str(dev),
+ hfi1_trace_get_packet_l4_str(__entry->l4),
+ hfi1_trace_fmt_lrh(p,
+ !!__entry->hdr_type,
+ __entry->age,
+ __entry->becn,
+ __entry->fecn,
+ __entry->l4,
+ __entry->lnh,
+ show_lnh(__entry->lnh),
+ __entry->lver,
+ __entry->rc,
+ __entry->sc,
+ __entry->sl,
+ __entry->entropy,
+ __entry->len,
+ __entry->pkey,
+ __entry->dlid,
+ __entry->slid),
+ hfi1_trace_fmt_rest(p,
+ !!__entry->hdr_type,
+ __entry->l4,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn,
+ __entry->dest_qpn,
+ __entry->src_qpn),
+ /* extended headers */
+ __get_dynamic_array_len(ehdrs),
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ __entry->l4,
+ __entry->dest_qpn,
+ __entry->src_qpn,
+ (void *)__get_dynamic_array(ehdrs))
+ )
+);
+
+DEFINE_EVENT(hfi1_output_ibhdr_template, pio_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5));
+
+DEFINE_EVENT(hfi1_output_ibhdr_template, ack_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5));
+
+DEFINE_EVENT(hfi1_output_ibhdr_template, sdma_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5));
+
+
+#endif /* __HFI1_TRACE_IBHDRS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_ibhdrs
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_iowait.h b/drivers/infiniband/hw/hfi1/trace_iowait.h
new file mode 100644
index 000000000000..27f4334ece2b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_iowait.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#if !defined(__HFI1_TRACE_IOWAIT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_IOWAIT_H
+
+#include <linux/tracepoint.h>
+#include "iowait.h"
+#include "verbs.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_iowait
+
+DECLARE_EVENT_CLASS(hfi1_iowait_template,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned long, addr)
+ __field(unsigned long, flags)
+ __field(u32, flag)
+ __field(u32, qpn)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->addr = (unsigned long)wait;
+ __entry->flags = wait->flags;
+ __entry->flag = (1 << flag);
+ __entry->qpn = iowait_to_qp(wait)->ibqp.qp_num;
+ ),
+ TP_printk(/* print */
+ "iowait 0x%lx qp %u flags 0x%lx flag 0x%x",
+ __entry->addr,
+ __entry->qpn,
+ __entry->flags,
+ __entry->flag
+ )
+ );
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_set,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_clear,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+#endif /* __HFI1_TRACE_IOWAIT_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_iowait
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h
new file mode 100644
index 000000000000..8dc46b6891df
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_misc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*/
+
+#if !defined(__HFI1_TRACE_MISC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_MISC_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_misc
+
+TRACE_EVENT(hfi1_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
+ int src),
+ TP_ARGS(dd, is_entry, src),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __array(char, buf, 64)
+ __field(int, src)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ is_entry->is_name(__entry->buf, 64,
+ src - is_entry->start);
+ __entry->src = src;
+ ),
+ TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
+ __entry->src)
+);
+
+DECLARE_EVENT_CLASS(
+ hfi1_csr_template,
+ TP_PROTO(void __iomem *addr, u64 value),
+ TP_ARGS(addr, value),
+ TP_STRUCT__entry(
+ __field(void __iomem *, addr)
+ __field(u64, value)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->value = value;
+ ),
+ TP_printk("addr %p value %llx", __entry->addr, __entry->value)
+);
+
+DEFINE_EVENT(
+ hfi1_csr_template, hfi1_write_rcvarray,
+ TP_PROTO(void __iomem *addr, u64 value),
+ TP_ARGS(addr, value));
+
+#ifdef CONFIG_FAULT_INJECTION
+TRACE_EVENT(hfi1_fault_opcode,
+ TP_PROTO(struct rvt_qp *qp, u8 opcode),
+ TP_ARGS(qp, opcode),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u8, opcode)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->opcode = opcode;
+ ),
+ TP_printk("[%s] qpn 0x%x opcode 0x%x",
+ __get_str(dev), __entry->qpn, __entry->opcode)
+);
+
+TRACE_EVENT(hfi1_fault_packet,
+ TP_PROTO(struct hfi1_packet *packet),
+ TP_ARGS(packet),
+ TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->ppd->dd)
+ __field(u64, eflags)
+ __field(u32, ctxt)
+ __field(u32, hlen)
+ __field(u32, tlen)
+ __field(u32, updegr)
+ __field(u32, etail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->ppd->dd);
+ __entry->eflags = rhf_err_flags(packet->rhf);
+ __entry->ctxt = packet->rcd->ctxt;
+ __entry->hlen = packet->hlen;
+ __entry->tlen = packet->tlen;
+ __entry->updegr = packet->updegr;
+ __entry->etail = rhf_egr_index(packet->rhf);
+ ),
+ TP_printk(
+ "[%s] ctxt %d eflags 0x%llx hlen %d tlen %d updegr %d etail %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->eflags,
+ __entry->hlen,
+ __entry->tlen,
+ __entry->updegr,
+ __entry->etail
+ )
+);
+#endif
+
+#endif /* __HFI1_TRACE_MISC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_misc
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h
new file mode 100644
index 000000000000..5a9dfd85e7f5
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_mmu.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ */
+
+#if !defined(__HFI1_TRACE_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_MMU_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_mmu
+
+DECLARE_EVENT_CLASS(hfi1_mmu_rb_template,
+ TP_PROTO(struct mmu_rb_node *node),
+ TP_ARGS(node),
+ TP_STRUCT__entry(__field(unsigned long, addr)
+ __field(unsigned long, len)
+ __field(unsigned int, refcount)
+ ),
+ TP_fast_assign(__entry->addr = node->addr;
+ __entry->len = node->len;
+ __entry->refcount = kref_read(&node->refcount);
+ ),
+ TP_printk("MMU node addr 0x%lx, len %lu, refcount %u",
+ __entry->addr,
+ __entry->len,
+ __entry->refcount
+ )
+);
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_insert,
+ TP_PROTO(struct mmu_rb_node *node),
+ TP_ARGS(node));
+
+TRACE_EVENT(hfi1_mmu_rb_search,
+ TP_PROTO(unsigned long addr, unsigned long len),
+ TP_ARGS(addr, len),
+ TP_STRUCT__entry(__field(unsigned long, addr)
+ __field(unsigned long, len)
+ ),
+ TP_fast_assign(__entry->addr = addr;
+ __entry->len = len;
+ ),
+ TP_printk("MMU node addr 0x%lx, len %lu",
+ __entry->addr,
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
+ TP_PROTO(struct mmu_rb_node *node),
+ TP_ARGS(node));
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_evict,
+ TP_PROTO(struct mmu_rb_node *node),
+ TP_ARGS(node));
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_release_node,
+ TP_PROTO(struct mmu_rb_node *node),
+ TP_ARGS(node));
+
+#endif /* __HFI1_TRACE_RC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_mmu
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_rc.h b/drivers/infiniband/hw/hfi1/trace_rc.h
new file mode 100644
index 000000000000..fa254f9b9c42
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_rc.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+* Copyright(c) 2015, 2016, 2017 Intel Corporation.
+*/
+
+#if !defined(__HFI1_TRACE_RC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_RC_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rc
+
+DECLARE_EVENT_CLASS(hfi1_rc_template,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, s_flags)
+ __field(u32, psn)
+ __field(u32, s_psn)
+ __field(u32, s_next_psn)
+ __field(u32, s_sending_psn)
+ __field(u32, s_sending_hpsn)
+ __field(u32, r_psn)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->psn = psn;
+ __entry->s_psn = qp->s_psn;
+ __entry->s_next_psn = qp->s_next_psn;
+ __entry->s_sending_psn = qp->s_sending_psn;
+ __entry->s_sending_hpsn = qp->s_sending_hpsn;
+ __entry->r_psn = qp->r_psn;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->s_flags,
+ __entry->psn,
+ __entry->s_psn,
+ __entry->s_next_psn,
+ __entry->s_sending_psn,
+ __entry->s_sending_hpsn,
+ __entry->r_psn
+ )
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_sendcomplete,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_rc_template, hfi1_rc_completion,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DECLARE_EVENT_CLASS(/* rc_ack */
+ hfi1_rc_ack_template,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ struct rvt_swqe *wqe),
+ TP_ARGS(qp, aeth, psn, wqe),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, aeth)
+ __field(u32, psn)
+ __field(u8, opcode)
+ __field(u32, spsn)
+ __field(u32, lpsn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->aeth = aeth;
+ __entry->psn = psn;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->spsn = wqe->psn;
+ __entry->lpsn = wqe->lpsn;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x aeth 0x%x psn 0x%x opcode 0x%x spsn 0x%x lpsn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->aeth,
+ __entry->psn,
+ __entry->opcode,
+ __entry->spsn,
+ __entry->lpsn
+ )
+);
+
+DEFINE_EVENT(/* do_rc_ack */
+ hfi1_rc_ack_template, hfi1_rc_ack_do,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ struct rvt_swqe *wqe),
+ TP_ARGS(qp, aeth, psn, wqe)
+);
+
+#endif /* __HFI1_TRACE_RC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rc
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
new file mode 100644
index 000000000000..8d5e12fe88a5
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#if !defined(__HFI1_TRACE_RX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_RX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#define tidtype_name(type) { PT_##type, #type }
+#define show_tidtype(type) \
+__print_symbolic(type, \
+ tidtype_name(EXPECTED), \
+ tidtype_name(EAGER), \
+ tidtype_name(INVALID)) \
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rx
+
+TRACE_EVENT(hfi1_rcvhdr,
+ TP_PROTO(struct hfi1_packet *packet),
+ TP_ARGS(packet),
+ TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd)
+ __field(u64, eflags)
+ __field(u32, ctxt)
+ __field(u32, etype)
+ __field(u32, hlen)
+ __field(u32, tlen)
+ __field(u32, updegr)
+ __field(u32, etail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd);
+ __entry->eflags = rhf_err_flags(packet->rhf);
+ __entry->ctxt = packet->rcd->ctxt;
+ __entry->etype = packet->etype;
+ __entry->hlen = packet->hlen;
+ __entry->tlen = packet->tlen;
+ __entry->updegr = packet->updegr;
+ __entry->etail = rhf_egr_index(packet->rhf);
+ ),
+ TP_printk(
+ "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->eflags,
+ __entry->etype, show_packettype(__entry->etype),
+ __entry->hlen,
+ __entry->tlen,
+ __entry->updegr,
+ __entry->etail
+ )
+);
+
+TRACE_EVENT(hfi1_receive_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd),
+ TP_ARGS(dd, rcd),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, ctxt)
+ __field(u8, slow_path)
+ __field(u8, dma_rtail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = rcd->ctxt;
+ __entry->slow_path = hfi1_is_slowpath(rcd);
+ __entry->dma_rtail = get_dma_rtail_setting(rcd);
+ ),
+ TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->slow_path,
+ __entry->dma_rtail
+ )
+);
+
+TRACE_EVENT(hfi1_mmu_invalidate,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type,
+ unsigned long start, unsigned long end),
+ TP_ARGS(ctxt, subctxt, type, start, end),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __string(type, type)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __assign_str(type);
+ __entry->start = start;
+ __entry->end = end;
+ ),
+ TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __get_str(type),
+ __entry->start,
+ __entry->end
+ )
+ );
+
+#endif /* __HFI1_TRACE_RX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rx
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h
new file mode 100644
index 000000000000..e358f5b885fa
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_tid.h
@@ -0,0 +1,1642 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#if !defined(__HFI1_TRACE_TID_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_TID_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#define tidtype_name(type) { PT_##type, #type }
+#define show_tidtype(type) \
+__print_symbolic(type, \
+ tidtype_name(EXPECTED), \
+ tidtype_name(EAGER), \
+ tidtype_name(INVALID)) \
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_tid
+
+u8 hfi1_trace_get_tid_ctrl(u32 ent);
+u16 hfi1_trace_get_tid_len(u32 ent);
+u16 hfi1_trace_get_tid_idx(u32 ent);
+
+#define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
+ "max write %u, max length %u, jkey 0x%x timeout %u " \
+ "urg %u"
+
+#define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \
+ "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \
+ "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \
+ "tidcnt %u tid_idx %u tid_offset %u length %u sent %u"
+
+#define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \
+ "used %u cnt %u"
+
+#define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
+ "r_psn 0x%x r_state 0x%x r_flags 0x%x " \
+ "r_head_ack_queue %u s_tail_ack_queue %u " \
+ "s_acked_ack_queue %u s_ack_state 0x%x " \
+ "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \
+ "iow_flags 0x%lx"
+
+#define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \
+ "s_head %u s_acked %u s_last %u s_psn 0x%x " \
+ "s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \
+ "iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u"
+
+#define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \
+ "tid_r_comp %u pending_tid_r_segs %u " \
+ "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
+ "s_state 0x%x hw_flow_index %u generation 0x%x " \
+ "fpsn 0x%x"
+
+#define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
+ "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \
+ "total_segs %u setup_head %u clear_tail %u flow_idx %u " \
+ "acked_tail %u state %u r_ack_psn 0x%x r_flow_psn 0x%x " \
+ "r_last_ackd 0x%x s_next_psn 0x%x"
+
+#define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
+ "s_acked_ack_queue %u s_tail_ack_queue %u " \
+ "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \
+ " diff %d"
+
+#define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \
+ "r_tid_ack %u r_tid_alloc %u alloc_w_segs %u " \
+ "pending_tid_w_segs %u sync_pt %s " \
+ "ps_nak_psn 0x%x ps_nak_state 0x%x " \
+ "prnr_nak_state 0x%x hw_flow_index %u generation "\
+ "0x%x fpsn 0x%x resync %s" \
+ "r_next_psn_kdeth 0x%x"
+
+#define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \
+ "s_tid_tail %u s_tid_head %u " \
+ "pending_tid_w_resp %u n_requests %u " \
+ "n_tid_requests %u s_flags 0x%x ps_flags 0x%x "\
+ "iow_flags 0x%lx s_state 0x%x s_retry %u"
+
+#define KDETH_EFLAGS_ERR_PRN "[%s] qpn 0x%x TID ERR: RcvType 0x%x " \
+ "RcvTypeError 0x%x PSN 0x%x"
+
+DECLARE_EVENT_CLASS(/* class */
+ hfi1_exp_tid_reg_unreg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
+ )
+);
+
+DEFINE_EVENT(/* exp_tid_unreg */
+ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
+);
+
+DEFINE_EVENT(/* exp_tid_reg */
+ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
+);
+
+TRACE_EVENT(/* put_tid */
+ hfi1_put_tid,
+ TP_PROTO(struct hfi1_devdata *dd,
+ u32 index, u32 type, unsigned long pa, u16 order),
+ TP_ARGS(dd, index, type, pa, order),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd)
+ __field(unsigned long, pa)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u16, order)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd);
+ __entry->pa = pa;
+ __entry->index = index;
+ __entry->type = type;
+ __entry->order = order;
+ ),
+ TP_printk("[%s] type %s pa %lx index %u order %u",
+ __get_str(dev),
+ show_tidtype(__entry->type),
+ __entry->pa,
+ __entry->index,
+ __entry->order
+ )
+);
+
+TRACE_EVENT(/* exp_tid_inval */
+ hfi1_exp_tid_inval,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
+ u32 npages, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(unsigned long, va)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->va = va;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->va,
+ __entry->dma
+ )
+);
+
+DECLARE_EVENT_CLASS(/* opfn_state */
+ hfi1_opfn_state_template,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u16, requested)
+ __field(u16, completed)
+ __field(u8, curr)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->requested = priv->opfn.requested;
+ __entry->completed = priv->opfn.completed;
+ __entry->curr = priv->opfn.curr;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x requested 0x%x completed 0x%x curr 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->requested,
+ __entry->completed,
+ __entry->curr
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_request,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_sched_conn_request,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_response,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_reply,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_error,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DECLARE_EVENT_CLASS(/* opfn_data */
+ hfi1_opfn_data_template,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, state)
+ __field(u8, capcode)
+ __field(u64, data)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->state = qp->state;
+ __entry->capcode = capcode;
+ __entry->data = data;
+ ),
+ TP_printk(/* printk */
+ "[%s] qpn 0x%x (state 0x%x) Capcode %u data 0x%llx",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->state,
+ __entry->capcode,
+ __entry->data
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_data_template, hfi1_opfn_data_conn_request,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_data_template, hfi1_opfn_data_conn_response,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_data_template, hfi1_opfn_data_conn_reply,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data)
+);
+
+DECLARE_EVENT_CLASS(/* opfn_param */
+ hfi1_opfn_param_template,
+ TP_PROTO(struct rvt_qp *qp, char remote,
+ struct tid_rdma_params *param),
+ TP_ARGS(qp, remote, param),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, remote)
+ __field(u32, param_qp)
+ __field(u32, max_len)
+ __field(u16, jkey)
+ __field(u8, max_read)
+ __field(u8, max_write)
+ __field(u8, timeout)
+ __field(u8, urg)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->remote = remote;
+ __entry->param_qp = param->qp;
+ __entry->max_len = param->max_len;
+ __entry->jkey = param->jkey;
+ __entry->max_read = param->max_read;
+ __entry->max_write = param->max_write;
+ __entry->timeout = param->timeout;
+ __entry->urg = param->urg;
+ ),
+ TP_printk(/* print */
+ OPFN_PARAM_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->remote ? "remote" : "local",
+ __entry->param_qp,
+ __entry->max_read,
+ __entry->max_write,
+ __entry->max_len,
+ __entry->jkey,
+ __entry->timeout,
+ __entry->urg
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_param_template, hfi1_opfn_param,
+ TP_PROTO(struct rvt_qp *qp, char remote,
+ struct tid_rdma_params *param),
+ TP_ARGS(qp, remote, param)
+);
+
+DECLARE_EVENT_CLASS(/* msg */
+ hfi1_msg_template,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more),
+ TP_STRUCT__entry(/* entry */
+ __field(u32, qpn)
+ __string(msg, msg)
+ __field(u64, more)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->qpn = qp ? qp->ibqp.qp_num : 0;
+ __assign_str(msg);
+ __entry->more = more;
+ ),
+ TP_printk(/* print */
+ "qpn 0x%x %s 0x%llx",
+ __entry->qpn,
+ __get_str(msg),
+ __entry->more
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_opfn_conn_request,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_opfn_conn_error,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_alloc_tids,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_tid_restart_req,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_tid_timeout,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_tid_retry_timeout,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DECLARE_EVENT_CLASS(/* tid_flow_page */
+ hfi1_tid_flow_page_template,
+ TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
+ char mtu8k, char v1, void *vaddr),
+ TP_ARGS(qp, flow, index, mtu8k, v1, vaddr),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, mtu8k)
+ __field(char, v1)
+ __field(u32, index)
+ __field(u64, page)
+ __field(u64, vaddr)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->mtu8k = mtu8k;
+ __entry->v1 = v1;
+ __entry->index = index;
+ __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL;
+ __entry->vaddr = (u64)vaddr;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x page[%u]: page 0x%llx %s 0x%llx",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->page,
+ __entry->mtu8k ? (__entry->v1 ? "v1" : "v0") : "vaddr",
+ __entry->vaddr
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_page_template, hfi1_tid_flow_page,
+ TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
+ char mtu8k, char v1, void *vaddr),
+ TP_ARGS(qp, flow, index, mtu8k, v1, vaddr)
+);
+
+DECLARE_EVENT_CLASS(/* tid_pageset */
+ hfi1_tid_pageset_template,
+ TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
+ TP_ARGS(qp, index, idx, count),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, index)
+ __field(u16, idx)
+ __field(u16, count)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->idx = idx;
+ __entry->count = count;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x list[%u]: idx %u count %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->idx,
+ __entry->count
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_pageset_template, hfi1_tid_pageset,
+ TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
+ TP_ARGS(qp, index, idx, count)
+);
+
+DECLARE_EVENT_CLASS(/* tid_fow */
+ hfi1_tid_flow_template,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(int, index)
+ __field(int, idx)
+ __field(u32, resp_ib_psn)
+ __field(u32, generation)
+ __field(u32, fspsn)
+ __field(u32, flpsn)
+ __field(u32, r_next_psn)
+ __field(u32, ib_spsn)
+ __field(u32, ib_lpsn)
+ __field(u32, npagesets)
+ __field(u32, tnode_cnt)
+ __field(u32, tidcnt)
+ __field(u32, tid_idx)
+ __field(u32, tid_offset)
+ __field(u32, length)
+ __field(u32, sent)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->idx = flow->idx;
+ __entry->resp_ib_psn = flow->flow_state.resp_ib_psn;
+ __entry->generation = flow->flow_state.generation;
+ __entry->fspsn = full_flow_psn(flow,
+ flow->flow_state.spsn);
+ __entry->flpsn = full_flow_psn(flow,
+ flow->flow_state.lpsn);
+ __entry->r_next_psn = flow->flow_state.r_next_psn;
+ __entry->ib_spsn = flow->flow_state.ib_spsn;
+ __entry->ib_lpsn = flow->flow_state.ib_lpsn;
+ __entry->npagesets = flow->npagesets;
+ __entry->tnode_cnt = flow->tnode_cnt;
+ __entry->tidcnt = flow->tidcnt;
+ __entry->tid_idx = flow->tid_idx;
+ __entry->tid_offset = flow->tid_offset;
+ __entry->length = flow->length;
+ __entry->sent = flow->sent;
+ ),
+ TP_printk(/* print */
+ TID_FLOW_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->idx,
+ __entry->resp_ib_psn,
+ __entry->generation,
+ __entry->fspsn,
+ __entry->flpsn,
+ __entry->r_next_psn,
+ __entry->ib_spsn,
+ __entry->ib_lpsn,
+ __entry->npagesets,
+ __entry->tnode_cnt,
+ __entry->tidcnt,
+ __entry->tid_idx,
+ __entry->tid_offset,
+ __entry->length,
+ __entry->sent
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_alloc,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_restart_req,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_write_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_write_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_write_data,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_resync,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DECLARE_EVENT_CLASS(/* tid_node */
+ hfi1_tid_node_template,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
+ u8 map, u8 used, u8 cnt),
+ TP_ARGS(qp, msg, index, base, map, used, cnt),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __string(msg, msg)
+ __field(u32, index)
+ __field(u32, base)
+ __field(u8, map)
+ __field(u8, used)
+ __field(u8, cnt)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __assign_str(msg);
+ __entry->index = index;
+ __entry->base = base;
+ __entry->map = map;
+ __entry->used = used;
+ __entry->cnt = cnt;
+ ),
+ TP_printk(/* print */
+ TID_NODE_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __get_str(msg),
+ __entry->index,
+ __entry->base,
+ __entry->map,
+ __entry->used,
+ __entry->cnt
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_node_template, hfi1_tid_node_add,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
+ u8 map, u8 used, u8 cnt),
+ TP_ARGS(qp, msg, index, base, map, used, cnt)
+);
+
+DECLARE_EVENT_CLASS(/* tid_entry */
+ hfi1_tid_entry_template,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
+ TP_ARGS(qp, index, ent),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(int, index)
+ __field(u8, ctrl)
+ __field(u16, idx)
+ __field(u16, len)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->ctrl = hfi1_trace_get_tid_ctrl(ent);
+ __entry->idx = hfi1_trace_get_tid_idx(ent);
+ __entry->len = hfi1_trace_get_tid_len(ent);
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x TID entry %d: idx %u len %u ctrl 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->idx,
+ __entry->len,
+ __entry->ctrl
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_alloc,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
+ TP_ARGS(qp, index, entry)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
+ TP_ARGS(qp, index, ent)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
+ TP_ARGS(qp, index, ent)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_rcv_write_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
+ TP_ARGS(qp, index, entry)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_build_write_data,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
+ TP_ARGS(qp, index, entry)
+);
+
+DECLARE_EVENT_CLASS(/* rsp_info */
+ hfi1_responder_info_template,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u8, state)
+ __field(u8, s_state)
+ __field(u32, psn)
+ __field(u32, r_psn)
+ __field(u8, r_state)
+ __field(u8, r_flags)
+ __field(u8, r_head_ack_queue)
+ __field(u8, s_tail_ack_queue)
+ __field(u8, s_acked_ack_queue)
+ __field(u8, s_ack_state)
+ __field(u8, s_nak_state)
+ __field(u8, r_nak_state)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->state = qp->state;
+ __entry->s_state = qp->s_state;
+ __entry->psn = psn;
+ __entry->r_psn = qp->r_psn;
+ __entry->r_state = qp->r_state;
+ __entry->r_flags = qp->r_flags;
+ __entry->r_head_ack_queue = qp->r_head_ack_queue;
+ __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
+ __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
+ __entry->s_ack_state = qp->s_ack_state;
+ __entry->s_nak_state = qp->s_nak_state;
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = priv->s_flags;
+ __entry->iow_flags = priv->s_iowait.flags;
+ ),
+ TP_printk(/* print */
+ RSP_INFO_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->state,
+ __entry->s_state,
+ __entry->psn,
+ __entry->r_psn,
+ __entry->r_state,
+ __entry->r_flags,
+ __entry->r_head_ack_queue,
+ __entry->s_tail_ack_queue,
+ __entry->s_acked_ack_queue,
+ __entry->s_ack_state,
+ __entry->s_nak_state,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_make_rc_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_tid_rcv_error,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_tid_write_alloc_res,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_req,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_build_tid_write_resp,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_data,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_make_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DECLARE_EVENT_CLASS(/* sender_info */
+ hfi1_sender_info_template,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u8, state)
+ __field(u32, s_cur)
+ __field(u32, s_tail)
+ __field(u32, s_head)
+ __field(u32, s_acked)
+ __field(u32, s_last)
+ __field(u32, s_psn)
+ __field(u32, s_last_psn)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ __field(u8, s_state)
+ __field(u8, s_num_rd)
+ __field(u8, s_retry)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->state = qp->state;
+ __entry->s_cur = qp->s_cur;
+ __entry->s_tail = qp->s_tail;
+ __entry->s_head = qp->s_head;
+ __entry->s_acked = qp->s_acked;
+ __entry->s_last = qp->s_last;
+ __entry->s_psn = qp->s_psn;
+ __entry->s_last_psn = qp->s_last_psn;
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags;
+ __entry->iow_flags =
+ ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
+ __entry->s_state = qp->s_state;
+ __entry->s_num_rd = qp->s_num_rd_atomic;
+ __entry->s_retry = qp->s_retry;
+ ),
+ TP_printk(/* print */
+ SENDER_INFO_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->state,
+ __entry->s_cur,
+ __entry->s_tail,
+ __entry->s_head,
+ __entry->s_acked,
+ __entry->s_last,
+ __entry->s_psn,
+ __entry->s_last_psn,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags,
+ __entry->s_state,
+ __entry->s_num_rd,
+ __entry->s_retry
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_make_rc_req,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_reset_psn,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_restart_rc,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_do_rc_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_make_tid_pkt,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DECLARE_EVENT_CLASS(/* tid_read_sender */
+ hfi1_tid_read_sender_template,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, newreq)
+ __field(u32, tid_r_reqs)
+ __field(u32, tid_r_comp)
+ __field(u32, pending_tid_r_segs)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ __field(u8, s_state)
+ __field(u32, hw_flow_index)
+ __field(u32, generation)
+ __field(u32, fpsn)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->newreq = newreq;
+ __entry->tid_r_reqs = priv->tid_r_reqs;
+ __entry->tid_r_comp = priv->tid_r_comp;
+ __entry->pending_tid_r_segs = priv->pending_tid_r_segs;
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = priv->s_flags;
+ __entry->iow_flags = priv->s_iowait.flags;
+ __entry->s_state = priv->s_state;
+ __entry->hw_flow_index = priv->flow_state.index;
+ __entry->generation = priv->flow_state.generation;
+ __entry->fpsn = priv->flow_state.psn;
+ ),
+ TP_printk(/* print */
+ TID_READ_SENDER_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->newreq,
+ __entry->tid_r_reqs,
+ __entry->tid_r_comp,
+ __entry->pending_tid_r_segs,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags,
+ __entry->s_state,
+ __entry->hw_flow_index,
+ __entry->generation,
+ __entry->fpsn
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DECLARE_EVENT_CLASS(/* tid_rdma_request */
+ hfi1_tid_rdma_request_template,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, newreq)
+ __field(u8, opcode)
+ __field(u32, psn)
+ __field(u32, lpsn)
+ __field(u32, cur_seg)
+ __field(u32, comp_seg)
+ __field(u32, ack_seg)
+ __field(u32, alloc_seg)
+ __field(u32, total_segs)
+ __field(u16, setup_head)
+ __field(u16, clear_tail)
+ __field(u16, flow_idx)
+ __field(u16, acked_tail)
+ __field(u32, state)
+ __field(u32, r_ack_psn)
+ __field(u32, r_flow_psn)
+ __field(u32, r_last_acked)
+ __field(u32, s_next_psn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->newreq = newreq;
+ __entry->opcode = opcode;
+ __entry->psn = psn;
+ __entry->lpsn = lpsn;
+ __entry->cur_seg = req->cur_seg;
+ __entry->comp_seg = req->comp_seg;
+ __entry->ack_seg = req->ack_seg;
+ __entry->alloc_seg = req->alloc_seg;
+ __entry->total_segs = req->total_segs;
+ __entry->setup_head = req->setup_head;
+ __entry->clear_tail = req->clear_tail;
+ __entry->flow_idx = req->flow_idx;
+ __entry->acked_tail = req->acked_tail;
+ __entry->state = req->state;
+ __entry->r_ack_psn = req->r_ack_psn;
+ __entry->r_flow_psn = req->r_flow_psn;
+ __entry->r_last_acked = req->r_last_acked;
+ __entry->s_next_psn = req->s_next_psn;
+ ),
+ TP_printk(/* print */
+ TID_REQ_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->newreq,
+ __entry->opcode,
+ __entry->psn,
+ __entry->lpsn,
+ __entry->cur_seg,
+ __entry->comp_seg,
+ __entry->ack_seg,
+ __entry->alloc_seg,
+ __entry->total_segs,
+ __entry->setup_head,
+ __entry->clear_tail,
+ __entry->flow_idx,
+ __entry->acked_tail,
+ __entry->state,
+ __entry->r_ack_psn,
+ __entry->r_flow_psn,
+ __entry->r_last_acked,
+ __entry->s_next_psn
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_write_alloc_res,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_build_write_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_data,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_tid_retry_timeout,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_resync,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_pkt,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_write,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DECLARE_EVENT_CLASS(/* rc_rcv_err */
+ hfi1_rc_rcv_err_template,
+ TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
+ TP_ARGS(qp, opcode, psn, diff),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, s_flags)
+ __field(u8, state)
+ __field(u8, s_acked_ack_queue)
+ __field(u8, s_tail_ack_queue)
+ __field(u8, r_head_ack_queue)
+ __field(u32, opcode)
+ __field(u32, psn)
+ __field(u32, r_psn)
+ __field(int, diff)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->state = qp->state;
+ __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
+ __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
+ __entry->r_head_ack_queue = qp->r_head_ack_queue;
+ __entry->opcode = opcode;
+ __entry->psn = psn;
+ __entry->r_psn = qp->r_psn;
+ __entry->diff = diff;
+ ),
+ TP_printk(/* print */
+ RCV_ERR_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->s_flags,
+ __entry->state,
+ __entry->s_acked_ack_queue,
+ __entry->s_tail_ack_queue,
+ __entry->r_head_ack_queue,
+ __entry->opcode,
+ __entry->psn,
+ __entry->r_psn,
+ __entry->diff
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err,
+ TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
+ TP_ARGS(qp, opcode, psn, diff)
+);
+
+DECLARE_EVENT_CLASS(/* sge */
+ hfi1_sge_template,
+ TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
+ TP_ARGS(qp, index, sge),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(int, index)
+ __field(u64, vaddr)
+ __field(u32, sge_length)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->vaddr = (u64)sge->vaddr;
+ __entry->sge_length = sge->sge_length;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->vaddr,
+ __entry->sge_length
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sge_template, hfi1_sge_check_align,
+ TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
+ TP_ARGS(qp, index, sge)
+);
+
+DECLARE_EVENT_CLASS(/* tid_write_sp */
+ hfi1_tid_write_rsp_template,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, r_tid_head)
+ __field(u32, r_tid_tail)
+ __field(u32, r_tid_ack)
+ __field(u32, r_tid_alloc)
+ __field(u32, alloc_w_segs)
+ __field(u32, pending_tid_w_segs)
+ __field(bool, sync_pt)
+ __field(u32, ps_nak_psn)
+ __field(u8, ps_nak_state)
+ __field(u8, prnr_nak_state)
+ __field(u32, hw_flow_index)
+ __field(u32, generation)
+ __field(u32, fpsn)
+ __field(bool, resync)
+ __field(u32, r_next_psn_kdeth)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->r_tid_head = priv->r_tid_head;
+ __entry->r_tid_tail = priv->r_tid_tail;
+ __entry->r_tid_ack = priv->r_tid_ack;
+ __entry->r_tid_alloc = priv->r_tid_alloc;
+ __entry->alloc_w_segs = priv->alloc_w_segs;
+ __entry->pending_tid_w_segs = priv->pending_tid_w_segs;
+ __entry->sync_pt = priv->sync_pt;
+ __entry->ps_nak_psn = priv->s_nak_psn;
+ __entry->ps_nak_state = priv->s_nak_state;
+ __entry->prnr_nak_state = priv->rnr_nak_state;
+ __entry->hw_flow_index = priv->flow_state.index;
+ __entry->generation = priv->flow_state.generation;
+ __entry->fpsn = priv->flow_state.psn;
+ __entry->resync = priv->resync;
+ __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth;
+ ),
+ TP_printk(/* print */
+ TID_WRITE_RSPDR_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->r_tid_head,
+ __entry->r_tid_tail,
+ __entry->r_tid_ack,
+ __entry->r_tid_alloc,
+ __entry->alloc_w_segs,
+ __entry->pending_tid_w_segs,
+ __entry->sync_pt ? "yes" : "no",
+ __entry->ps_nak_psn,
+ __entry->ps_nak_state,
+ __entry->prnr_nak_state,
+ __entry->hw_flow_index,
+ __entry->generation,
+ __entry->fpsn,
+ __entry->resync ? "yes" : "no",
+ __entry->r_next_psn_kdeth
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_alloc_res,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_req,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_build_resp,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_data,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_resync,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_tid_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_rc_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DECLARE_EVENT_CLASS(/* tid_write_sender */
+ hfi1_tid_write_sender_template,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, newreq)
+ __field(u32, s_tid_cur)
+ __field(u32, s_tid_tail)
+ __field(u32, s_tid_head)
+ __field(u32, pending_tid_w_resp)
+ __field(u32, n_requests)
+ __field(u32, n_tid_requests)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ __field(u8, s_state)
+ __field(u8, s_retry)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->newreq = newreq;
+ __entry->s_tid_cur = priv->s_tid_cur;
+ __entry->s_tid_tail = priv->s_tid_tail;
+ __entry->s_tid_head = priv->s_tid_head;
+ __entry->pending_tid_w_resp = priv->pending_tid_w_resp;
+ __entry->n_requests = atomic_read(&priv->n_requests);
+ __entry->n_tid_requests = atomic_read(&priv->n_tid_requests);
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = priv->s_flags;
+ __entry->iow_flags = priv->s_iowait.flags;
+ __entry->s_state = priv->s_state;
+ __entry->s_retry = priv->s_retry;
+ ),
+ TP_printk(/* print */
+ TID_WRITE_SENDER_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->newreq,
+ __entry->s_tid_cur,
+ __entry->s_tid_tail,
+ __entry->s_tid_head,
+ __entry->pending_tid_w_resp,
+ __entry->n_requests,
+ __entry->n_tid_requests,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags,
+ __entry->s_state,
+ __entry->s_retry
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_retry_timeout,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_tid_pkt,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_restart_rc,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DECLARE_EVENT_CLASS(/* tid_ack */
+ hfi1_tid_ack_template,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ u32 req_psn, u32 resync_psn),
+ TP_ARGS(qp, aeth, psn, req_psn, resync_psn),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, aeth)
+ __field(u32, psn)
+ __field(u32, req_psn)
+ __field(u32, resync_psn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->aeth = aeth;
+ __entry->psn = psn;
+ __entry->req_psn = req_psn;
+ __entry->resync_psn = resync_psn;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x aeth 0x%x psn 0x%x req_psn 0x%x resync_psn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->aeth,
+ __entry->psn,
+ __entry->req_psn,
+ __entry->resync_psn
+ )
+);
+
+DEFINE_EVENT(/* rcv_tid_ack */
+ hfi1_tid_ack_template, hfi1_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ u32 req_psn, u32 resync_psn),
+ TP_ARGS(qp, aeth, psn, req_psn, resync_psn)
+);
+
+DECLARE_EVENT_CLASS(/* kdeth_eflags_error */
+ hfi1_kdeth_eflags_error_template,
+ TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
+ TP_ARGS(qp, rcv_type, rte, psn),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u8, rcv_type)
+ __field(u8, rte)
+ __field(u32, psn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->rcv_type = rcv_type;
+ __entry->rte = rte;
+ __entry->psn = psn;
+ ),
+ TP_printk(/* print */
+ KDETH_EFLAGS_ERR_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->rcv_type,
+ __entry->rte,
+ __entry->psn
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_kdeth_eflags_error_template, hfi1_eflags_err_write,
+ TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
+ TP_ARGS(qp, rcv_type, rte, psn)
+);
+
+#endif /* __HFI1_TRACE_TID_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tid
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
new file mode 100644
index 000000000000..c0ba6b0a2c4e
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -0,0 +1,1065 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2017 Intel Corporation.
+ */
+#if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_TX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "sdma.h"
+#include "ipoib.h"
+#include "user_sdma.h"
+
+const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
+
+#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_tx
+
+TRACE_EVENT(hfi1_piofree,
+ TP_PROTO(struct send_context *sc, int extra),
+ TP_ARGS(sc, extra),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(int, extra)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->extra = extra;
+ ),
+ TP_printk("[%s] ctxt %u(%u) extra %d",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->extra
+ )
+);
+
+TRACE_EVENT(hfi1_wantpiointr,
+ TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
+ TP_ARGS(sc, needint, credit_ctrl),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(u32, needint)
+ __field(u64, credit_ctrl)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->needint = needint;
+ __entry->credit_ctrl = credit_ctrl;
+ ),
+ TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->needint,
+ (unsigned long long)__entry->credit_ctrl
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, flags)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->flags = flags;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags =
+ ((struct hfi1_qp_priv *)qp->priv)->s_flags;
+ __entry->iow_flags =
+ ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flags,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags
+ )
+);
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+TRACE_EVENT(hfi1_sdma_descriptor,
+ TP_PROTO(struct sdma_engine *sde,
+ u64 desc0,
+ u64 desc1,
+ u16 e,
+ void *descp),
+ TP_ARGS(sde, desc0, desc1, e, descp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(void *, descp)
+ __field(u64, desc0)
+ __field(u64, desc1)
+ __field(u16, e)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->desc0 = desc0;
+ __entry->desc1 = desc1;
+ __entry->idx = sde->this_idx;
+ __entry->descp = descp;
+ __entry->e = e;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
+ __get_str(dev),
+ __entry->idx,
+ __parse_sdma_flags(__entry->desc0, __entry->desc1),
+ (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
+ SDMA_DESC0_PHY_ADDR_MASK,
+ (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
+ SDMA_DESC1_GENERATION_MASK),
+ (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
+ SDMA_DESC0_BYTE_COUNT_MASK),
+ __entry->desc0,
+ __entry->desc1,
+ __entry->descp,
+ __entry->e
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_engine_select,
+ TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
+ TP_ARGS(dd, sel, vl, idx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, sel)
+ __field(u8, vl)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->sel = sel;
+ __entry->vl = vl;
+ __entry->idx = idx;
+ ),
+ TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sel,
+ __entry->vl
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_user_free_queues,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
+ TP_ARGS(dd, ctxt, subctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ ),
+ TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_user_process_request,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(u16, comp_idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->comp_idx = comp_idx;
+ ),
+ TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->comp_idx
+ )
+);
+
+DECLARE_EVENT_CLASS(
+ hfi1_sdma_value_template,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
+ u32 value),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(u16, comp_idx)
+ __field(u32, value)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->comp_idx = comp_idx;
+ __entry->value = value;
+ ),
+ TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->comp_idx,
+ __entry->value
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 tidoffset),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
+
+DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 data_len),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
+
+DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 data_len),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
+
+TRACE_EVENT(hfi1_sdma_user_tid_info,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(u16, comp_idx)
+ __field(u32, tidoffset)
+ __field(u32, units)
+ __field(u8, shift)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->comp_idx = comp_idx;
+ __entry->tidoffset = tidoffset;
+ __entry->units = units;
+ __entry->shift = shift;
+ ),
+ TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->comp_idx,
+ __entry->tidoffset,
+ __entry->units,
+ __entry->shift
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_request,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ unsigned long dim),
+ TP_ARGS(dd, ctxt, subctxt, dim),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(unsigned long, dim)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->dim = dim;
+ ),
+ TP_printk("[%s] SDMA from %u:%u (%lu)",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->dim
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, status)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->status = status;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) status %llx",
+ __get_str(dev),
+ __entry->idx,
+ (unsigned long long)__entry->status
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(int, aidx)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->idx = sde->this_idx;
+ __entry->aidx = aidx;
+ ),
+ TP_printk("[%s] SDE(%u) aidx %d",
+ __get_str(dev),
+ __entry->idx,
+ __entry->aidx
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx));
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx));
+
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead,
+ u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ __entry->sn = txp ? txp->sn : ~0;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#else
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead, u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#endif
+
+DECLARE_EVENT_CLASS(hfi1_sdma_sn,
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
+ TP_ARGS(sde, sn),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->sn = sn;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) sn %llu",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 sn
+ ),
+ TP_ARGS(sde, sn)
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
+ TP_ARGS(sde, sn)
+);
+
+#define USDMA_HDR_FORMAT \
+ "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
+
+TRACE_EVENT(hfi1_sdma_user_header,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ struct hfi1_pkt_header *hdr, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(u32, pbc0)
+ __field(u32, pbc1)
+ __field(u32, lrh0)
+ __field(u32, lrh1)
+ __field(u32, bth0)
+ __field(u32, bth1)
+ __field(u32, bth2)
+ __field(u32, kdeth0)
+ __field(u32, kdeth1)
+ __field(u32, kdeth2)
+ __field(u32, kdeth3)
+ __field(u32, kdeth4)
+ __field(u32, kdeth5)
+ __field(u32, kdeth6)
+ __field(u32, kdeth7)
+ __field(u32, kdeth8)
+ __field(u32, tidval)
+ ),
+ TP_fast_assign(
+ __le32 *pbc = (__le32 *)hdr->pbc;
+ __be32 *lrh = (__be32 *)hdr->lrh;
+ __be32 *bth = (__be32 *)hdr->bth;
+ __le32 *kdeth = (__le32 *)&hdr->kdeth;
+
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->pbc0 = le32_to_cpu(pbc[0]);
+ __entry->pbc1 = le32_to_cpu(pbc[1]);
+ __entry->lrh0 = be32_to_cpu(lrh[0]);
+ __entry->lrh1 = be32_to_cpu(lrh[1]);
+ __entry->bth0 = be32_to_cpu(bth[0]);
+ __entry->bth1 = be32_to_cpu(bth[1]);
+ __entry->bth2 = be32_to_cpu(bth[2]);
+ __entry->kdeth0 = le32_to_cpu(kdeth[0]);
+ __entry->kdeth1 = le32_to_cpu(kdeth[1]);
+ __entry->kdeth2 = le32_to_cpu(kdeth[2]);
+ __entry->kdeth3 = le32_to_cpu(kdeth[3]);
+ __entry->kdeth4 = le32_to_cpu(kdeth[4]);
+ __entry->kdeth5 = le32_to_cpu(kdeth[5]);
+ __entry->kdeth6 = le32_to_cpu(kdeth[6]);
+ __entry->kdeth7 = le32_to_cpu(kdeth[7]);
+ __entry->kdeth8 = le32_to_cpu(kdeth[8]);
+ __entry->tidval = tidval;
+ ),
+ TP_printk(USDMA_HDR_FORMAT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->pbc1,
+ __entry->pbc0,
+ __entry->lrh0,
+ __entry->lrh1,
+ __entry->bth0,
+ __entry->bth1,
+ __entry->bth2,
+ __entry->kdeth0,
+ __entry->kdeth1,
+ __entry->kdeth2,
+ __entry->kdeth3,
+ __entry->kdeth4,
+ __entry->kdeth5,
+ __entry->kdeth6,
+ __entry->kdeth7,
+ __entry->kdeth8,
+ __entry->tidval
+ )
+);
+
+#define SDMA_UREQ_FMT \
+ "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
+TRACE_EVENT(hfi1_sdma_user_reqinfo,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
+ TP_ARGS(dd, ctxt, subctxt, i),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u8, ver_opcode)
+ __field(u8, iovcnt)
+ __field(u16, npkts)
+ __field(u16, fragsize)
+ __field(u16, comp_idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->ver_opcode = i[0] & 0xff;
+ __entry->iovcnt = (i[0] >> 8) & 0xff;
+ __entry->npkts = i[1];
+ __entry->fragsize = i[2];
+ __entry->comp_idx = i[3];
+ ),
+ TP_printk(SDMA_UREQ_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->ver_opcode,
+ __entry->iovcnt,
+ __entry->npkts,
+ __entry->fragsize,
+ __entry->comp_idx
+ )
+);
+
+#define usdma_complete_name(st) { st, #st }
+#define show_usdma_complete_state(st) \
+ __print_symbolic(st, \
+ usdma_complete_name(FREE), \
+ usdma_complete_name(QUEUED), \
+ usdma_complete_name(COMPLETE), \
+ usdma_complete_name(ERROR))
+
+TRACE_EVENT(hfi1_sdma_user_completion,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
+ u8 state, int code),
+ TP_ARGS(dd, ctxt, subctxt, idx, state, code),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, idx)
+ __field(u8, state)
+ __field(int, code)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->idx = idx;
+ __entry->state = state;
+ __entry->code = code;
+ ),
+ TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
+ __get_str(dev), __entry->ctxt, __entry->subctxt,
+ __entry->idx, show_usdma_complete_state(__entry->state),
+ __entry->code)
+);
+
+TRACE_EVENT(hfi1_usdma_defer,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ struct sdma_engine *sde,
+ struct iowait *wait),
+ TP_ARGS(pq, sde, wait),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(struct sdma_engine *, sde)
+ __field(struct iowait *, wait)
+ __field(int, engine)
+ __field(int, empty)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->sde = sde;
+ __entry->wait = wait;
+ __entry->engine = sde->this_idx;
+ __entry->empty = list_empty(&__entry->wait->list);
+ ),
+ TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ (unsigned long long)__entry->sde,
+ (unsigned long long)__entry->wait,
+ __entry->engine,
+ __entry->empty
+ )
+);
+
+TRACE_EVENT(hfi1_usdma_activate,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ struct iowait *wait,
+ int reason),
+ TP_ARGS(pq, wait, reason),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(struct iowait *, wait)
+ __field(int, reason)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->wait = wait;
+ __entry->reason = reason;
+ ),
+ TP_printk("[%s] pq %llx wait %llx reason %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ (unsigned long long)__entry->wait,
+ __entry->reason
+ )
+);
+
+TRACE_EVENT(hfi1_usdma_we,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ int we_ret),
+ TP_ARGS(pq, we_ret),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(int, state)
+ __field(int, we_ret)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->state = pq->state;
+ __entry->we_ret = we_ret;
+ ),
+ TP_printk("[%s] pq %llx state %d we_ret %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ __entry->state,
+ __entry->we_ret
+ )
+);
+
+const char *print_u32_array(struct trace_seq *, u32 *, int);
+#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
+
+TRACE_EVENT(hfi1_sdma_user_header_ahg,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(u8, sde)
+ __field(u8, idx)
+ __field(int, len)
+ __field(u32, tidval)
+ __array(u32, ahg, 10)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->sde = sde;
+ __entry->idx = ahgidx;
+ __entry->len = len;
+ __entry->tidval = tidval;
+ memcpy(__entry->ahg, ahg, len * sizeof(u32));
+ ),
+ TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->sde,
+ __entry->idx,
+ __entry->len - 1,
+ __print_u32_hex(__entry->ahg, __entry->len),
+ __entry->tidval
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_state,
+ TP_PROTO(struct sdma_engine *sde,
+ const char *cstate,
+ const char *nstate
+ ),
+ TP_ARGS(sde, cstate, nstate),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __string(curstate, cstate)
+ __string(newstate, nstate)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __assign_str(curstate);
+ __assign_str(newstate);
+ ),
+ TP_printk("[%s] current state %s new state %s",
+ __get_str(dev),
+ __get_str(curstate),
+ __get_str(newstate)
+ )
+);
+
+#define BCT_FORMAT \
+ "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
+
+#define BCT(field) \
+ be16_to_cpu( \
+ ((struct buffer_control *)__get_dynamic_array(bct))->field \
+ )
+
+DECLARE_EVENT_CLASS(hfi1_bct_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct buffer_control *bc),
+ TP_ARGS(dd, bc),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __dynamic_array(u8, bct, sizeof(*bc))
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ memcpy(__get_dynamic_array(bct), bc,
+ sizeof(*bc));
+ ),
+ TP_printk(BCT_FORMAT,
+ BCT(overall_shared_limit),
+
+ BCT(vl[0].dedicated),
+ BCT(vl[0].shared),
+
+ BCT(vl[1].dedicated),
+ BCT(vl[1].shared),
+
+ BCT(vl[2].dedicated),
+ BCT(vl[2].shared),
+
+ BCT(vl[3].dedicated),
+ BCT(vl[3].shared),
+
+ BCT(vl[4].dedicated),
+ BCT(vl[4].shared),
+
+ BCT(vl[5].dedicated),
+ BCT(vl[5].shared),
+
+ BCT(vl[6].dedicated),
+ BCT(vl[6].shared),
+
+ BCT(vl[7].dedicated),
+ BCT(vl[7].shared),
+
+ BCT(vl[15].dedicated),
+ BCT(vl[15].shared)
+ )
+);
+
+DEFINE_EVENT(hfi1_bct_template, bct_set,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+DEFINE_EVENT(hfi1_bct_template, bct_get,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+TRACE_EVENT(
+ hfi1_qp_send_completion,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
+ TP_ARGS(qp, wqe, idx),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(struct rvt_swqe *, wqe)
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, qpt)
+ __field(u32, length)
+ __field(u32, idx)
+ __field(u32, ssn)
+ __field(enum ib_wr_opcode, opcode)
+ __field(int, send_flags)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->wqe = wqe;
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->qpt = qp->ibqp.qp_type;
+ __entry->length = wqe->length;
+ __entry->idx = idx;
+ __entry->ssn = wqe->ssn;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->send_flags = wqe->wr.send_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->qpt,
+ __entry->wqe,
+ __entry->idx,
+ __entry->wr_id,
+ __entry->length,
+ __entry->ssn,
+ __entry->opcode,
+ __entry->send_flags
+ )
+);
+
+DECLARE_EVENT_CLASS(
+ hfi1_do_send_template,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(bool, flag)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->flag = flag;
+ ),
+ TP_printk(
+ "[%s] qpn %x flag %d",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flag
+ )
+);
+
+DEFINE_EVENT(
+ hfi1_do_send_template, hfi1_rc_do_send,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_do_send_template, hfi1_rc_do_tid_send,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag)
+);
+
+DEFINE_EVENT(
+ hfi1_do_send_template, hfi1_rc_expired_time_slice,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag)
+);
+
+DECLARE_EVENT_CLASS(/* AIP */
+ hfi1_ipoib_txq_template,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(txq->priv->dd)
+ __field(struct hfi1_ipoib_txq *, txq)
+ __field(struct sdma_engine *, sde)
+ __field(ulong, head)
+ __field(ulong, tail)
+ __field(uint, used)
+ __field(uint, flow)
+ __field(int, stops)
+ __field(int, no_desc)
+ __field(u8, idx)
+ __field(u8, stopped)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(txq->priv->dd);
+ __entry->txq = txq;
+ __entry->sde = txq->sde;
+ __entry->head = txq->tx_ring.head;
+ __entry->tail = txq->tx_ring.tail;
+ __entry->idx = txq->q_idx;
+ __entry->used =
+ txq->tx_ring.sent_txreqs -
+ txq->tx_ring.complete_txreqs;
+ __entry->flow = txq->flow.as_int;
+ __entry->stops = atomic_read(&txq->tx_ring.stops);
+ __entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
+ __entry->stopped =
+ __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
+ ),
+ TP_printk(/* print */
+ "[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
+ __get_str(dev),
+ (unsigned long long)__entry->txq,
+ __entry->idx,
+ (unsigned long long)__entry->sde,
+ __entry->sde ? __entry->sde->this_idx : 0,
+ __entry->sde ? __entry->sde->cpu : 0,
+ __entry->head,
+ __entry->tail,
+ __entry->flow,
+ __entry->used,
+ __entry->stops,
+ __entry->no_desc,
+ __entry->stopped
+ )
+);
+
+DEFINE_EVENT(/* queue stop */
+ hfi1_ipoib_txq_template, hfi1_txq_stop,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* queue wake */
+ hfi1_ipoib_txq_template, hfi1_txq_wake,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* flow flush */
+ hfi1_ipoib_txq_template, hfi1_flow_flush,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* flow switch */
+ hfi1_ipoib_txq_template, hfi1_flow_switch,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* wakeup */
+ hfi1_ipoib_txq_template, hfi1_txq_wakeup,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* full */
+ hfi1_ipoib_txq_template, hfi1_txq_full,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* queued */
+ hfi1_ipoib_txq_template, hfi1_txq_queued,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* xmit_stopped */
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* xmit_unstopped */
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DECLARE_EVENT_CLASS(/* AIP */
+ hfi1_ipoib_tx_template,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(tx->txq->priv->dd)
+ __field(struct ipoib_txreq *, tx)
+ __field(struct hfi1_ipoib_txq *, txq)
+ __field(struct sk_buff *, skb)
+ __field(ulong, idx)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(tx->txq->priv->dd);
+ __entry->tx = tx;
+ __entry->skb = tx->skb;
+ __entry->txq = tx->txq;
+ __entry->idx = idx;
+ ),
+ TP_printk(/* print */
+ "[%s] tx %llx txq %llx,%u skb %llx idx %lu",
+ __get_str(dev),
+ (unsigned long long)__entry->tx,
+ (unsigned long long)__entry->txq,
+ __entry->txq ? __entry->txq->q_idx : 0,
+ (unsigned long long)__entry->skb,
+ __entry->idx
+ )
+);
+
+DEFINE_EVENT(/* produce */
+ hfi1_ipoib_tx_template, hfi1_tx_produce,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx)
+);
+
+DEFINE_EVENT(/* consume */
+ hfi1_ipoib_tx_template, hfi1_tx_consume,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx)
+);
+
+DEFINE_EVENT(/* alloc_tx */
+ hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* poll */
+ hfi1_ipoib_txq_template, hfi1_txq_poll,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* complete */
+ hfi1_ipoib_txq_template, hfi1_txq_complete,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+#endif /* __HFI1_TRACE_TX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tx
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/hfi1/uc.c
index aa3a8035bb68..33d2c2a218e2 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,101 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*/
-#include "qib.h"
+#include "hfi.h"
+#include "verbs_txreq.h"
+#include "qp.h"
/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
+#define OP(x) UC_OP(x)
/**
- * qib_make_uc_req - construct a request packet (SEND, RDMA write)
+ * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
+ * @ps: the current packet state
+ *
+ * Assume s_lock is held.
*
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_uc_req(struct qib_qp *qp)
+int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
- struct qib_other_headers *ohdr;
- struct qib_swqe *wqe;
- unsigned long flags;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_other_headers *ohdr;
+ struct rvt_swqe *wqe;
u32 hwords;
- u32 bth0;
+ u32 bth0 = 0;
u32 len;
u32 pmtu = qp->pmtu;
- int ret = 0;
+ int middle = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (!ps->s_txreq)
+ goto bail_no_tx;
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
+ clear_ahg(qp);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done_free_tx;
}
- ohdr = &qp->s_hdr->u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->u.l.oth;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 0;
+ if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+ } else {
+ /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
+ hwords = 7;
+ if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
+ }
/* Get the next send request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
qp->s_wqe = NULL;
switch (qp->s_state) {
default:
- if (!(ib_qib_state_ops[qp->state] &
- QIB_PROCESS_NEXT_SEND_OK))
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- if (qp->s_cur == qp->s_head)
+ if (qp->s_cur == READ_ONCE(qp->s_head)) {
+ clear_ahg(qp);
goto bail;
+ }
+ /*
+ * Local operations are processed immediately
+ * after all prior requests have completed.
+ */
+ if (wqe->wr.opcode == IB_WR_REG_MR ||
+ wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ int local_ops = 0;
+ int err = 0;
+
+ if (qp->s_last != qp->s_cur)
+ goto bail;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ err = rvt_invalidate_rkey(
+ qp, wqe->wr.ex.invalidate_rkey);
+ local_ops = 1;
+ }
+ rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
+ if (local_ops)
+ atomic_dec(&qp->local_ops_pending);
+ goto done_free_tx;
+ }
/*
* Start a new request.
*/
- wqe->psn = qp->s_next_psn;
- qp->s_psn = qp->s_next_psn;
+ qp->s_psn = wqe->psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
@@ -110,9 +123,9 @@ int qib_make_uc_req(struct qib_qp *qp)
len = pmtu;
break;
}
- if (wqe->wr.opcode == IB_WR_SEND)
+ if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_ONLY);
- else {
+ } else {
qp->s_state =
OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
@@ -129,9 +142,9 @@ int qib_make_uc_req(struct qib_qp *qp)
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ cpu_to_be64(wqe->rdma_wr.remote_addr);
ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
hwords += sizeof(struct ib_reth) / 4;
if (len > pmtu) {
@@ -139,9 +152,9 @@ int qib_make_uc_req(struct qib_qp *qp)
len = pmtu;
break;
}
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
+ } else {
qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the RETH */
@@ -162,16 +175,17 @@ int qib_make_uc_req(struct qib_qp *qp)
case OP(SEND_FIRST):
qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
break;
}
- if (wqe->wr.opcode == IB_WR_SEND)
+ if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_LAST);
- else {
+ } else {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
@@ -186,16 +200,17 @@ int qib_make_uc_req(struct qib_qp *qp)
case OP(RDMA_WRITE_FIRST):
qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
break;
}
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
qp->s_state = OP(RDMA_WRITE_LAST);
- else {
+ } else {
qp->s_state =
OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
@@ -210,66 +225,62 @@ int qib_make_uc_req(struct qib_qp *qp)
break;
}
qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- qp->s_next_psn++ & QIB_PSN_MASK);
-done:
- ret = 1;
- goto unlock;
+ ps->s_txreq->hdr_dwords = hwords;
+ ps->s_txreq->sde = priv->s_sde;
+ ps->s_txreq->ss = &qp->s_sge;
+ ps->s_txreq->s_cur_size = len;
+ hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
+ qp->remote_qpn, mask_psn(qp->s_psn++),
+ middle, ps);
+ return 1;
+
+done_free_tx:
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ return 1;
bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
+ hfi1_put_txreq(ps->s_txreq);
+
+bail_no_tx:
+ ps->s_txreq = NULL;
+ qp->s_flags &= ~RVT_S_BUSY;
+ return 0;
}
/**
- * qib_uc_rcv - handle an incoming UC packet
- * @ibp: the port the packet came in on
- * @hdr: the header of the packet
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
+ * hfi1_uc_rcv - handle an incoming UC packet
+ * @packet: the packet structure
*
- * This is called from qib_qp_rcv() to process an incoming UC packet
+ * This is called from qp_rcv() to process an incoming UC packet
* for the given QP.
* Called at interrupt level.
*/
-void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+void hfi1_uc_rcv(struct hfi1_packet *packet)
{
- struct qib_other_headers *ohdr;
- u32 opcode;
- u32 hdrsize;
+ struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
+ void *data = packet->payload;
+ u32 tlen = packet->tlen;
+ struct rvt_qp *qp = packet->qp;
+ struct ib_other_headers *ohdr = packet->ohdr;
+ u32 opcode = packet->opcode;
+ u32 hdrsize = packet->hlen;
u32 psn;
- u32 pad;
+ u32 pad = packet->pad;
struct ib_wc wc;
u32 pmtu = qp->pmtu;
struct ib_reth *reth;
int ret;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- }
-
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ if (hfi1_ruc_check_hdr(ibp, packet))
return;
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
+ process_ecn(qp, packet);
+ psn = ib_bth_get_psn(ohdr);
/* Compare the PSN verses the expected PSN. */
- if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
+ if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
/*
* Handle a sequence error.
* Silently drop any current message.
@@ -278,10 +289,11 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
inv:
if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
- } else
- qib_put_ss(&qp->r_sge);
+ } else {
+ rvt_put_ss(&qp->r_sge);
+ }
qp->r_state = OP(SEND_LAST);
switch (opcode) {
case OP(SEND_FIRST):
@@ -328,17 +340,8 @@ inv:
goto inv;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
- qp->r_flags |= QIB_R_COMM_EST;
- if (qp->ibqp.event_handler) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_COMM_EST;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- }
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
+ rvt_comm_est(qp);
/* OK, process the packet. */
switch (opcode) {
@@ -346,10 +349,10 @@ inv:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
qp->r_sge = qp->s_rdma_read_sge;
- else {
- ret = qib_get_rwqe(qp, 0);
+ } else {
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -365,21 +368,25 @@ send_first:
goto no_immediate_data;
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
goto send_last_imm;
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ /*
+ * There will be no padding for 9B packet but 16B packets
+ * will come in with some padding since we always add
+ * CRC and LT bytes which will need to be flit aligned
+ */
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
goto rewind;
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
- hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
case OP(SEND_LAST):
@@ -387,36 +394,43 @@ no_immediate_data:
wc.ex.imm_data = 0;
wc.wc_flags = 0;
send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
+ /* LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
goto rewind;
/* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
+ tlen -= (hdrsize + extra_bytes);
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- qib_copy_sge(&qp->r_sge, data, tlen, 0);
- qib_put_ss(&qp->s_rdma_read_sge);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
+ rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
+ /*
+ * It seems that IB mandates the presence of an SL in a
+ * work completion only for the UD transport (see section
+ * 11.4.2 of IBTA Vol. 1).
+ *
+ * However, the way the SL is chosen below is consistent
+ * with the way that IB/qib works and is trying avoid
+ * introducing incompatibilities.
+ *
+ * See also OPA Vol. 1, section 9.7.6, and table 9-17.
+ */
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
/* zero fields that are N/A */
wc.vendor_err = 0;
wc.pkey_index = 0;
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
break;
case OP(RDMA_WRITE_FIRST):
@@ -428,7 +442,6 @@ rdma_first:
goto drop;
}
reth = &ohdr->u.rc.reth;
- hdrsize += sizeof(*reth);
qp->r_len = be32_to_cpu(reth->length);
qp->r_rcv_len = 0;
qp->r_sge.sg_list = NULL;
@@ -438,7 +451,7 @@ rdma_first:
int ok;
/* Check rkey */
- ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto drop;
@@ -450,13 +463,13 @@ rdma_first:
qp->r_sge.sge.length = 0;
qp->r_sge.sge.sge_length = 0;
}
- if (opcode == OP(RDMA_WRITE_ONLY))
+ if (opcode == OP(RDMA_WRITE_ONLY)) {
goto rdma_last;
- else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
+ } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
wc.ex.imm_data = ohdr->u.rc.imm_data;
goto rdma_last_imm;
}
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
@@ -464,29 +477,26 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
wc.ex.imm_data = ohdr->u.imm_data;
rdma_last_imm:
- hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
+ /* LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
+ tlen -= (hdrsize + extra_bytes);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- qib_put_ss(&qp->s_rdma_read_sge);
- else {
- ret = qib_get_rwqe(qp, 1);
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
+ rvt_put_ss(&qp->s_rdma_read_sge);
+ } else {
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
@@ -494,24 +504,22 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
+ rvt_put_ss(&qp->r_sge);
goto last_imm;
case OP(RDMA_WRITE_LAST):
rdma_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
+ /* LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
+ tlen -= (hdrsize + extra_bytes);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
+ rvt_put_ss(&qp->r_sge);
break;
default:
@@ -523,14 +531,12 @@ rdma_last:
return;
rewind:
- set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
return;
op_err:
- qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
-
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
}
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
new file mode 100644
index 000000000000..89d1bae8f824
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -0,0 +1,1023 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2019 Intel Corporation.
+ */
+
+#include <linux/net.h>
+#include <rdma/ib_smi.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "verbs_txreq.h"
+#include "trace_ibhdrs.h"
+#include "qp.h"
+
+/* We support only two types - 9B and 16B for now */
+static const hfi1_make_req hfi1_make_ud_req_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &hfi1_make_ud_req_9B,
+ [HFI1_PKT_TYPE_16B] = &hfi1_make_ud_req_16B
+};
+
+/**
+ * ud_loopback - handle send on loopback QPs
+ * @sqp: the sending QP
+ * @swqe: the send work request
+ *
+ * This is called from hfi1_make_ud_req() to forward a WQE addressed
+ * to the same HFI.
+ * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
+ * while this is being called.
+ */
+static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
+{
+ struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct hfi1_pportdata *ppd;
+ struct hfi1_qp_priv *priv = sqp->priv;
+ struct rvt_qp *qp;
+ struct rdma_ah_attr *ah_attr;
+ unsigned long flags;
+ struct rvt_sge_state ssge;
+ struct rvt_sge *sge;
+ struct ib_wc wc;
+ u32 length;
+ enum ib_qp_type sqptype, dqptype;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
+ rvt_get_swqe_remote_qpn(swqe));
+ if (!qp) {
+ ibp->rvp.n_pkt_drops++;
+ rcu_read_unlock();
+ return;
+ }
+
+ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : sqp->ibqp.qp_type;
+ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : qp->ibqp.qp_type;
+
+ if (dqptype != sqptype ||
+ !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
+ goto drop;
+ }
+
+ ah_attr = rvt_get_swqe_ah_attr(swqe);
+ ppd = ppd_from_ibp(ibp);
+
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey;
+ u32 slid;
+ u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
+
+ pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
+ slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1));
+ if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
+ qp->s_pkey_index,
+ slid, false))) {
+ hfi1_bad_pkey(ibp, pkey,
+ rdma_ah_get_sl(ah_attr),
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ slid, rdma_ah_get_dlid(ah_attr));
+ goto drop;
+ }
+ }
+
+ /*
+ * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ if (qp->ibqp.qp_num) {
+ u32 qkey;
+
+ qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ?
+ sqp->qkey : rvt_get_swqe_remote_qkey(swqe);
+ if (unlikely(qkey != qp->qkey))
+ goto drop; /* silently drop per IBTA spec */
+ }
+
+ /*
+ * A GRH is expected to precede the data even if not
+ * present on the wire.
+ */
+ length = swqe->length;
+ memset(&wc, 0, sizeof(wc));
+ wc.byte_len = length + sizeof(struct ib_grh);
+
+ if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = swqe->wr.ex.imm_data;
+ }
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & RVT_R_REUSE_SGE) {
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
+ } else {
+ int ret;
+
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0) {
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->rvp.n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= RVT_R_REUSE_SGE;
+ ibp->rvp.n_pkt_drops++;
+ goto bail_unlock;
+ }
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ struct ib_grh grh;
+ struct ib_global_route grd = *(rdma_ah_read_grh(ah_attr));
+
+ /*
+ * For loopback packets with extended LIDs, the
+ * sgid_index in the GRH is 0 and the dgid is
+ * OPA GID of the sender. While creating a response
+ * to the loopback packet, IB core creates the new
+ * sgid_index from the DGID and that will be the
+ * OPA_GID_INDEX. The new dgid is from the sgid
+ * index and that will be in the IB GID format.
+ *
+ * We now have a case where the sent packet had a
+ * different sgid_index and dgid compared to the
+ * one that was received in response.
+ *
+ * Fix this inconsistency.
+ */
+ if (priv->hdr_type == HFI1_PKT_TYPE_16B) {
+ if (grd.sgid_index == 0)
+ grd.sgid_index = OPA_GID_INDEX;
+
+ if (ib_is_opa_gid(&grd.dgid))
+ grd.dgid.global.interface_id =
+ cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
+ }
+
+ hfi1_make_grh(ibp, &grh, &grd, 0, 0);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(grh), true, false);
+ wc.wc_flags |= IB_WC_GRH;
+ } else {
+ rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
+ }
+ ssge.sg_list = swqe->sg_list + 1;
+ ssge.sge = *swqe->sg_list;
+ ssge.num_sge = swqe->wr.num_sge;
+ sge = &ssge.sge;
+ while (length) {
+ u32 len = rvt_get_sge_length(sge, length);
+
+ WARN_ON_ONCE(len == 0);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
+ rvt_update_sge(&ssge, len, false);
+ length -= len;
+ }
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = sqp->ibqp.qp_num;
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
+ if (sqp->ibqp.qp_type == IB_QPT_GSI ||
+ sqp->ibqp.qp_type == IB_QPT_SMI)
+ wc.pkey_index = rvt_get_swqe_pkey_index(swqe);
+ else
+ wc.pkey_index = sqp->s_pkey_index;
+ } else {
+ wc.pkey_index = 0;
+ }
+ wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1))) & U16_MAX;
+ /* Check for loopback when the port lid is not set */
+ if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
+ wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ wc.sl = rdma_ah_get_sl(ah_attr);
+ wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
+ ibp->rvp.n_loop_pkts++;
+bail_unlock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+drop:
+ rcu_read_unlock();
+}
+
+static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u16 *pkey, u32 extra_bytes, bool bypass)
+{
+ u32 bth0;
+ struct hfi1_ibport *ibp;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
+ bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ } else {
+ bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ }
+
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth0 |= extra_bytes << 20;
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
+ *pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe));
+ else
+ *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+ if (!bypass)
+ bth0 |= *pkey;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe));
+ ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
+ /*
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ ohdr->u.ud.deth[0] =
+ cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey :
+ rvt_get_swqe_remote_qkey(wqe));
+ ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+}
+
+void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe)
+{
+ u32 nwords, extra_bytes;
+ u16 len, slid, dlid, pkey;
+ u16 lrh0 = 0;
+ u8 sc5;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_other_headers *ohdr;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct ib_grh *grh;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = rvt_get_swqe_ah_attr(wqe);
+
+ extra_bytes = -wqe->length & 3;
+ nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC;
+ /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
+ ps->s_txreq->hdr_dwords = 7;
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ ps->s_txreq->hdr_dwords++;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
+ ps->s_txreq->hdr_dwords +=
+ hfi1_make_grh(ibp, grh, rdma_ah_read_grh(ah_attr),
+ ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
+ nwords);
+ lrh0 = HFI1_LRH_GRH;
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
+ } else {
+ lrh0 = HFI1_LRH_BTH;
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+ }
+
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
+ lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
+ if (qp->ibqp.qp_type == IB_QPT_SMI) {
+ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+ priv->s_sc = 0xf;
+ } else {
+ lrh0 |= (sc5 & 0xf) << 12;
+ priv->s_sc = sc5;
+ }
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
+ if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ } else {
+ u16 lid = (u16)ppd->lid;
+
+ if (lid) {
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
+ slid = lid;
+ } else {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ }
+ }
+ hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false);
+ len = ps->s_txreq->hdr_dwords + nwords;
+
+ /* Setup the packet */
+ ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
+ lrh0, len, dlid, slid);
+}
+
+void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_other_headers *ohdr;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ u32 dlid, slid, nwords, extra_bytes;
+ u32 dest_qp = rvt_get_swqe_remote_qpn(wqe);
+ u32 src_qp = qp->ibqp.qp_num;
+ u16 len, pkey;
+ u8 l4, sc5;
+ bool is_mgmt = false;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = rvt_get_swqe_ah_attr(wqe);
+
+ /*
+ * Build 16B Management Packet if either the destination
+ * or source queue pair number is 0 or 1.
+ */
+ if (dest_qp == 0 || src_qp == 0 || dest_qp == 1 || src_qp == 1) {
+ /* header size in dwords 16B LRH+L4_FM = (16+8)/4. */
+ ps->s_txreq->hdr_dwords = 6;
+ is_mgmt = true;
+ } else {
+ /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
+ ps->s_txreq->hdr_dwords = 9;
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ ps->s_txreq->hdr_dwords++;
+ }
+
+ /* SW provides space for CRC and LT for bypass packets. */
+ extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2),
+ wqe->length);
+ nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
+
+ if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
+ hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) {
+ struct ib_grh *grh;
+ struct ib_global_route *grd = rdma_ah_retrieve_grh(ah_attr);
+ /*
+ * Ensure OPA GIDs are transformed to IB gids
+ * before creating the GRH.
+ */
+ if (grd->sgid_index == OPA_GID_INDEX) {
+ dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n",
+ grd->sgid_index);
+ grd->sgid_index = 0;
+ }
+ grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
+ ps->s_txreq->hdr_dwords += hfi1_make_grh(
+ ibp, grh, grd,
+ ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
+ nwords);
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ } else {
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
+ l4 = OPA_16B_L4_IB_LOCAL;
+ }
+
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ priv->s_sc = 0xf;
+ else
+ priv->s_sc = sc5;
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 16B);
+ if (!ppd->lid)
+ slid = be32_to_cpu(OPA_LID_PERMISSIVE);
+ else
+ slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1));
+
+ if (is_mgmt) {
+ l4 = OPA_16B_L4_FM;
+ pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe));
+ hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt,
+ dest_qp, src_qp);
+ } else {
+ hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
+ }
+ /* Convert dwords to flits */
+ len = (ps->s_txreq->hdr_dwords + nwords) >> 1;
+
+ /* Setup the packet */
+ ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B;
+ hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
+ slid, dlid, len, pkey, 0, 0, l4, priv->s_sc);
+}
+
+/**
+ * hfi1_make_ud_req - construct a UD request packet
+ * @qp: the QP
+ * @ps: the current packet state
+ *
+ * Assume s_lock is held.
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct rvt_swqe *wqe;
+ int next_cur;
+ u32 lid;
+
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (!ps->s_txreq)
+ goto bail_no_tx;
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == READ_ONCE(qp->s_head))
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done_free_tx;
+ }
+
+ /* see post_one_send() */
+ if (qp->s_cur == READ_ONCE(qp->s_head))
+ goto bail;
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
+ next_cur = qp->s_cur + 1;
+ if (next_cur >= qp->s_size)
+ next_cur = 0;
+
+ /* Construct the header. */
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = rvt_get_swqe_ah_attr(wqe);
+ priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr);
+ if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) ||
+ (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) {
+ lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
+ if (unlikely(!loopback &&
+ ((lid == ppd->lid) ||
+ ((lid == be32_to_cpu(OPA_LID_PERMISSIVE)) &&
+ (qp->ibqp.qp_type == IB_QPT_GSI))))) {
+ unsigned long tflags = ps->flags;
+ /*
+ * If DMAs are in progress, we can't generate
+ * a completion for the loopback packet since
+ * it would be out of order.
+ * Instead of waiting, we could queue a
+ * zero length descriptor so we get a callback.
+ */
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
+ goto bail;
+ }
+ qp->s_cur = next_cur;
+ spin_unlock_irqrestore(&qp->s_lock, tflags);
+ ud_loopback(qp, wqe);
+ spin_lock_irqsave(&qp->s_lock, tflags);
+ ps->flags = tflags;
+ rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
+ goto done_free_tx;
+ }
+ }
+
+ qp->s_cur = next_cur;
+ ps->s_txreq->s_cur_size = wqe->length;
+ ps->s_txreq->ss = &qp->s_sge;
+ qp->s_srate = rdma_ah_get_static_rate(ah_attr);
+ qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+ qp->s_wqe = wqe;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+
+ /* Make the appropriate header */
+ hfi1_make_ud_req_tbl[priv->hdr_type](qp, ps, qp->s_wqe);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ ps->s_txreq->sde = priv->s_sde;
+ priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
+ ps->s_txreq->psc = priv->s_sendcontext;
+ /* disarm any ahg */
+ priv->s_ahg->ahgcount = 0;
+ priv->s_ahg->ahgidx = 0;
+ priv->s_ahg->tx_flags = 0;
+
+ return 1;
+
+done_free_tx:
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ return 1;
+
+bail:
+ hfi1_put_txreq(ps->s_txreq);
+
+bail_no_tx:
+ ps->s_txreq = NULL;
+ qp->s_flags &= ~RVT_S_BUSY;
+ return 0;
+}
+
+/*
+ * Hardware can't check this so we do it here.
+ *
+ * This is a slightly different algorithm than the standard pkey check. It
+ * special cases the management keys and allows for 0x7fff and 0xffff to be in
+ * the table at the same time.
+ *
+ * @returns the index found or -1 if not found
+ */
+int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned i;
+
+ if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
+ unsigned lim_idx = -1;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
+ /* here we look for an exact match */
+ if (ppd->pkeys[i] == pkey)
+ return i;
+ if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
+ lim_idx = i;
+ }
+
+ /* did not find 0xffff return 0x7fff idx if found */
+ if (pkey == FULL_MGMT_P_KEY)
+ return lim_idx;
+
+ /* no match... */
+ return -1;
+ }
+
+ pkey &= 0x7fff; /* remove limited/full membership bit */
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
+ if ((ppd->pkeys[i] & 0x7fff) == pkey)
+ return i;
+
+ /*
+ * Should not get here, this means hardware failed to validate pkeys.
+ */
+ return -1;
+}
+
+void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
+ u8 sc5, const struct ib_grh *old_grh)
+{
+ u64 pbc, pbc_flags = 0;
+ u32 bth0, plen, vl, hwords = 7;
+ u16 len;
+ u8 l4;
+ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 nwords;
+
+ hdr.hdr_type = HFI1_PKT_TYPE_16B;
+ /* Populate length */
+ nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
+ SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
+ if (old_grh) {
+ struct ib_grh *grh = &hdr.opah.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+ (hwords - LRH_16B_DWORDS + nwords) << 2);
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+ ohdr = &hdr.opah.u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+ ohdr = &hdr.opah.u.oth;
+ l4 = OPA_16B_L4_IB_LOCAL;
+ }
+
+ /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
+ bth0 = (IB_OPCODE_CNP << 24) | (1 << 16) |
+ (hfi1_get_16b_padding(hwords << 2, 0) << 20);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+
+ ohdr->bth[1] = cpu_to_be32(remote_qpn);
+ ohdr->bth[2] = 0; /* PSN 0 */
+
+ /* Convert dwords to flits */
+ len = (hwords + nwords) >> 1;
+ hfi1_make_16b_hdr(&hdr.opah, slid, dlid, len, pkey, 1, 0, l4, sc5);
+
+ plen = 2 /* PBC */ + hwords + nwords;
+ pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+ if (!IS_ERR_OR_NULL(pbuf)) {
+ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
+ }
+ }
+}
+
+void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
+ u16 pkey, u32 slid, u32 dlid, u8 sc5,
+ const struct ib_grh *old_grh)
+{
+ u64 pbc, pbc_flags = 0;
+ u32 bth0, plen, vl, hwords = 5;
+ u16 lrh0;
+ u8 sl = ibp->sc_to_sl[sc5];
+ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ if (old_grh) {
+ struct ib_grh *grh = &hdr.ibh.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+ (hwords - LRH_9B_DWORDS + SIZE_OF_CRC) << 2);
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+ ohdr = &hdr.ibh.u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+ ohdr = &hdr.ibh.u.oth;
+ lrh0 = HFI1_LRH_BTH;
+ }
+
+ lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
+
+ bth0 = pkey | (IB_OPCODE_CNP << 24);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+
+ ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
+ ohdr->bth[2] = 0; /* PSN 0 */
+
+ hfi1_make_ib_hdr(&hdr.ibh, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
+ plen = 2 /* PBC */ + hwords;
+ pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+ if (!IS_ERR_OR_NULL(pbuf)) {
+ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
+ }
+ }
+}
+
+/*
+ * opa_smp_check() - Do the regular pkey checking, and the additional
+ * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
+ * 9.10.25 ("SMA Packet Checks").
+ *
+ * Note that:
+ * - Checks are done using the pkey directly from the packet's BTH,
+ * and specifically _not_ the pkey that we attach to the completion,
+ * which may be different.
+ * - These checks are specifically for "non-local" SMPs (i.e., SMPs
+ * which originated on another node). SMPs which are sent from, and
+ * destined to this node are checked in opa_local_smp_check().
+ *
+ * At the point where opa_smp_check() is called, we know:
+ * - destination QP is QP0
+ *
+ * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
+ */
+static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
+ struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ /*
+ * I don't think it's possible for us to get here with sc != 0xf,
+ * but check it to be certain.
+ */
+ if (sc5 != 0xf)
+ return 1;
+
+ if (rcv_pkey_check(ppd, pkey, sc5, slid))
+ return 1;
+
+ /*
+ * At this point we know (and so don't need to check again) that
+ * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
+ * (see ingress_pkey_check).
+ */
+ if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+ }
+
+ /*
+ * SMPs fall into one of four (disjoint) categories:
+ * SMA request, SMA response, SMA trap, or SMA trap repress.
+ * Our response depends, in part, on which type of SMP we're
+ * processing.
+ *
+ * If this is an SMA response, skip the check here.
+ *
+ * If this is an SMA request or SMA trap repress:
+ * - pkey != FULL_MGMT_P_KEY =>
+ * increment port recv constraint errors, drop MAD
+ *
+ * Otherwise:
+ * - accept if the port is running an SM
+ * - drop MAD if it's an SMA trap
+ * - pkey == FULL_MGMT_P_KEY =>
+ * reply with unsupported method
+ * - pkey != FULL_MGMT_P_KEY =>
+ * increment port recv constraint errors, drop MAD
+ */
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET_RESP:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ break;
+ case IB_MGMT_METHOD_GET:
+ case IB_MGMT_METHOD_SET:
+ case IB_MGMT_METHOD_REPORT:
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ if (pkey != FULL_MGMT_P_KEY) {
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+ }
+ break;
+ default:
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM)
+ return 0;
+ if (smp->method == IB_MGMT_METHOD_TRAP)
+ return 1;
+ if (pkey == FULL_MGMT_P_KEY) {
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ return 0;
+ }
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * hfi1_ud_rcv - receive an incoming UD packet
+ * @packet: the packet structure
+ *
+ * This is called from qp_rcv() to process an incoming UD packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void hfi1_ud_rcv(struct hfi1_packet *packet)
+{
+ u32 hdrsize = packet->hlen;
+ struct ib_wc wc;
+ u32 src_qp;
+ u16 pkey;
+ int mgmt_pkey_idx = -1;
+ struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ void *data = packet->payload;
+ u32 tlen = packet->tlen;
+ struct rvt_qp *qp = packet->qp;
+ u8 sc5 = packet->sc;
+ u8 sl_from_sc;
+ u8 opcode = packet->opcode;
+ u8 sl = packet->sl;
+ u32 dlid = packet->dlid;
+ u32 slid = packet->slid;
+ u8 extra_bytes;
+ u8 l4 = 0;
+ bool dlid_is_permissive;
+ bool slid_is_permissive;
+ bool solicited = false;
+
+ extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
+
+ if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ u32 permissive_lid =
+ opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
+
+ l4 = hfi1_16B_get_l4(packet->hdr);
+ pkey = hfi1_16B_get_pkey(packet->hdr);
+ dlid_is_permissive = (dlid == permissive_lid);
+ slid_is_permissive = (slid == permissive_lid);
+ } else {
+ pkey = ib_bth_get_pkey(packet->ohdr);
+ dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
+ slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
+ }
+ sl_from_sc = ibp->sc_to_sl[sc5];
+
+ if (likely(l4 != OPA_16B_L4_FM)) {
+ src_qp = ib_get_sqpn(packet->ohdr);
+ solicited = ib_bth_is_solicited(packet->ohdr);
+ } else {
+ src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
+ }
+
+ process_ecn(qp, packet);
+ /*
+ * Get the number of bytes the message was padded by
+ * and drop incomplete packets.
+ */
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
+ goto drop;
+
+ tlen -= hdrsize + extra_bytes;
+
+ /*
+ * Check that the permissive LID is only used on QP0
+ * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
+ */
+ if (qp->ibqp.qp_num) {
+ if (unlikely(dlid_is_permissive || slid_is_permissive))
+ goto drop;
+ if (qp->ibqp.qp_num > 1) {
+ if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
+ /*
+ * Traps will not be sent for packets dropped
+ * by the HW. This is fine, as sending trap
+ * for invalid pkeys is optional according to
+ * IB spec (release 1.3, section 10.9.4)
+ */
+ hfi1_bad_pkey(ibp,
+ pkey, sl,
+ src_qp, qp->ibqp.qp_num,
+ slid, dlid);
+ return;
+ }
+ } else {
+ /* GSI packet */
+ mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
+ if (mgmt_pkey_idx < 0)
+ goto drop;
+ }
+ if (unlikely(l4 != OPA_16B_L4_FM &&
+ ib_get_qkey(packet->ohdr) != qp->qkey))
+ return; /* Silent drop */
+
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (unlikely(qp->ibqp.qp_num == 1 &&
+ (tlen > 2048 || (sc5 == 0xF))))
+ goto drop;
+ } else {
+ /* Received on QP0, and so by definition, this is an SMP */
+ struct opa_smp *smp = (struct opa_smp *)data;
+
+ if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
+ goto drop;
+
+ if (tlen > 2048)
+ goto drop;
+ if ((dlid_is_permissive || slid_is_permissive) &&
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ goto drop;
+
+ /* look up SMI pkey */
+ mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
+ if (mgmt_pkey_idx < 0)
+ goto drop;
+ }
+
+ if (qp->ibqp.qp_num > 1 &&
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+ } else {
+ goto drop;
+ }
+
+ /*
+ * A GRH is expected to precede the data even if not
+ * present on the wire.
+ */
+ wc.byte_len = tlen + sizeof(struct ib_grh);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & RVT_R_REUSE_SGE) {
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
+ } else {
+ int ret;
+
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0) {
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ return;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->rvp.n_vl15_dropped++;
+ return;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= RVT_R_REUSE_SGE;
+ goto drop;
+ }
+ if (packet->grh) {
+ rvt_copy_sge(qp, &qp->r_sge, packet->grh,
+ sizeof(struct ib_grh), true, false);
+ wc.wc_flags |= IB_WC_GRH;
+ } else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ struct ib_grh grh;
+ /*
+ * Assuming we only created 16B on the send side
+ * if we want to use large LIDs, since GRH was stripped
+ * out when creating 16B, add back the GRH here.
+ */
+ hfi1_make_ext_grh(packet, &grh, slid, dlid);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(struct ib_grh), true, false);
+ wc.wc_flags |= IB_WC_GRH;
+ } else {
+ rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
+ }
+ rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ true, false);
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ return;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = src_qp;
+
+ if (qp->ibqp.qp_type == IB_QPT_GSI ||
+ qp->ibqp.qp_type == IB_QPT_SMI) {
+ if (mgmt_pkey_idx < 0) {
+ if (net_ratelimit()) {
+ struct hfi1_devdata *dd = ppd->dd;
+
+ dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
+ qp->ibqp.qp_type);
+ mgmt_pkey_idx = 0;
+ }
+ }
+ wc.pkey_index = (unsigned)mgmt_pkey_idx;
+ } else {
+ wc.pkey_index = 0;
+ }
+ if (slid_is_permissive)
+ slid = be32_to_cpu(OPA_LID_PERMISSIVE);
+ wc.slid = slid & U16_MAX;
+ wc.sl = sl_from_sc;
+
+ /*
+ * Save the LMC lower bits if the destination LID is a unicast LID.
+ */
+ wc.dlid_path_bits = hfi1_check_mcast(dlid) ? 0 :
+ dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ rvt_recv_cq(qp, &wc, solicited);
+ return;
+
+drop:
+ ibp->rvp.n_pkt_drops++;
+}
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
new file mode 100644
index 000000000000..62b4f16dab27
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -0,0 +1,966 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
+ * Copyright(c) 2015-2018 Intel Corporation.
+ */
+#include <asm/page.h>
+#include <linux/string.h>
+
+#include "mmu_rb.h"
+#include "user_exp_rcv.h"
+#include "trace.h"
+
+static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
+ struct exp_tid_set *set,
+ struct hfi1_filedata *fd);
+static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
+static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ struct tid_user_buf *tbuf,
+ u32 rcventry, struct tid_group *grp,
+ u16 pageidx, unsigned int npages);
+static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ struct tid_rb_node *tnode);
+static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
+static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
+ struct tid_group *grp, u16 count,
+ u32 *tidlist, unsigned int *tididx,
+ unsigned int *pmapped);
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
+static void __clear_tid_node(struct hfi1_filedata *fd,
+ struct tid_rb_node *node);
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
+
+static const struct mmu_interval_notifier_ops tid_mn_ops = {
+ .invalidate = tid_rb_invalidate,
+};
+static const struct mmu_interval_notifier_ops tid_cover_ops = {
+ .invalidate = tid_cover_invalidate,
+};
+
+/*
+ * Initialize context and file private data needed for Expected
+ * receive caching. This needs to be done after the context has
+ * been configured with the eager/expected RcvEntry counts.
+ */
+int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ int ret = 0;
+
+ fd->entry_to_rb = kcalloc(uctxt->expected_count,
+ sizeof(*fd->entry_to_rb),
+ GFP_KERNEL);
+ if (!fd->entry_to_rb)
+ return -ENOMEM;
+
+ if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
+ fd->invalid_tid_idx = 0;
+ fd->invalid_tids = kcalloc(uctxt->expected_count,
+ sizeof(*fd->invalid_tids),
+ GFP_KERNEL);
+ if (!fd->invalid_tids) {
+ kfree(fd->entry_to_rb);
+ fd->entry_to_rb = NULL;
+ return -ENOMEM;
+ }
+ fd->use_mn = true;
+ }
+
+ /*
+ * PSM does not have a good way to separate, count, and
+ * effectively enforce a limit on RcvArray entries used by
+ * subctxts (when context sharing is used) when TID caching
+ * is enabled. To help with that, we calculate a per-process
+ * RcvArray entry share and enforce that.
+ * If TID caching is not in use, PSM deals with usage on its
+ * own. In that case, we allow any subctxt to take all of the
+ * entries.
+ *
+ * Make sure that we set the tid counts only after successful
+ * init.
+ */
+ spin_lock(&fd->tid_lock);
+ if (uctxt->subctxt_cnt && fd->use_mn) {
+ u16 remainder;
+
+ fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
+ remainder = uctxt->expected_count % uctxt->subctxt_cnt;
+ if (remainder && fd->subctxt < remainder)
+ fd->tid_limit++;
+ } else {
+ fd->tid_limit = uctxt->expected_count;
+ }
+ spin_unlock(&fd->tid_lock);
+
+ return ret;
+}
+
+void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
+ mutex_lock(&uctxt->exp_mutex);
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
+ mutex_unlock(&uctxt->exp_mutex);
+
+ kfree(fd->invalid_tids);
+ fd->invalid_tids = NULL;
+
+ kfree(fd->entry_to_rb);
+ fd->entry_to_rb = NULL;
+}
+
+/*
+ * Release pinned receive buffer pages.
+ *
+ * @mapped: true if the pages have been DMA mapped. false otherwise.
+ * @idx: Index of the first page to unpin.
+ * @npages: No of pages to unpin.
+ *
+ * If the pages have been DMA mapped (indicated by mapped parameter), their
+ * info will be passed via a struct tid_rb_node. If they haven't been mapped,
+ * their info will be passed via a struct tid_user_buf.
+ */
+static void unpin_rcv_pages(struct hfi1_filedata *fd,
+ struct tid_user_buf *tidbuf,
+ struct tid_rb_node *node,
+ unsigned int idx,
+ unsigned int npages,
+ bool mapped)
+{
+ struct page **pages;
+ struct hfi1_devdata *dd = fd->uctxt->dd;
+ struct mm_struct *mm;
+
+ if (mapped) {
+ dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
+ node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
+ pages = &node->pages[idx];
+ mm = mm_from_tid_node(node);
+ } else {
+ pages = &tidbuf->pages[idx];
+ mm = current->mm;
+ }
+ hfi1_release_user_pages(mm, pages, npages, mapped);
+ fd->tid_n_pinned -= npages;
+}
+
+/*
+ * Pin receive buffer pages.
+ */
+static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
+{
+ int pinned;
+ unsigned int npages = tidbuf->npages;
+ unsigned long vaddr = tidbuf->vaddr;
+ struct page **pages = NULL;
+ struct hfi1_devdata *dd = fd->uctxt->dd;
+
+ if (npages > fd->uctxt->expected_count) {
+ dd_dev_err(dd, "Expected buffer too big\n");
+ return -EINVAL;
+ }
+
+ /* Allocate the array of struct page pointers needed for pinning */
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ /*
+ * Pin all the pages of the user buffer. If we can't pin all the
+ * pages, accept the amount pinned so far and program only that.
+ * User space knows how to deal with partially programmed buffers.
+ */
+ if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
+ kfree(pages);
+ return -ENOMEM;
+ }
+
+ pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
+ if (pinned <= 0) {
+ kfree(pages);
+ return pinned;
+ }
+ tidbuf->pages = pages;
+ fd->tid_n_pinned += pinned;
+ return pinned;
+}
+
+/*
+ * RcvArray entry allocation for Expected Receives is done by the
+ * following algorithm:
+ *
+ * The context keeps 3 lists of groups of RcvArray entries:
+ * 1. List of empty groups - tid_group_list
+ * This list is created during user context creation and
+ * contains elements which describe sets (of 8) of empty
+ * RcvArray entries.
+ * 2. List of partially used groups - tid_used_list
+ * This list contains sets of RcvArray entries which are
+ * not completely used up. Another mapping request could
+ * use some of all of the remaining entries.
+ * 3. List of full groups - tid_full_list
+ * This is the list where sets that are completely used
+ * up go.
+ *
+ * An attempt to optimize the usage of RcvArray entries is
+ * made by finding all sets of physically contiguous pages in a
+ * user's buffer.
+ * These physically contiguous sets are further split into
+ * sizes supported by the receive engine of the HFI. The
+ * resulting sets of pages are stored in struct tid_pageset,
+ * which describes the sets as:
+ * * .count - number of pages in this set
+ * * .idx - starting index into struct page ** array
+ * of this set
+ *
+ * From this point on, the algorithm deals with the page sets
+ * described above. The number of pagesets is divided by the
+ * RcvArray group size to produce the number of full groups
+ * needed.
+ *
+ * Groups from the 3 lists are manipulated using the following
+ * rules:
+ * 1. For each set of 8 pagesets, a complete group from
+ * tid_group_list is taken, programmed, and moved to
+ * the tid_full_list list.
+ * 2. For all remaining pagesets:
+ * 2.1 If the tid_used_list is empty and the tid_group_list
+ * is empty, stop processing pageset and return only
+ * what has been programmed up to this point.
+ * 2.2 If the tid_used_list is empty and the tid_group_list
+ * is not empty, move a group from tid_group_list to
+ * tid_used_list.
+ * 2.3 For each group is tid_used_group, program as much as
+ * can fit into the group. If the group becomes fully
+ * used, move it to tid_full_list.
+ */
+int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo)
+{
+ int ret = 0, need_group = 0, pinned;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned int ngroups, pageset_count,
+ tididx = 0, mapped, mapped_pages = 0;
+ u32 *tidlist = NULL;
+ struct tid_user_buf *tidbuf;
+ unsigned long mmu_seq = 0;
+
+ if (!PAGE_ALIGNED(tinfo->vaddr))
+ return -EINVAL;
+ if (tinfo->length == 0)
+ return -EINVAL;
+
+ tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
+ if (!tidbuf)
+ return -ENOMEM;
+
+ mutex_init(&tidbuf->cover_mutex);
+ tidbuf->vaddr = tinfo->vaddr;
+ tidbuf->length = tinfo->length;
+ tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
+ tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
+ GFP_KERNEL);
+ if (!tidbuf->psets) {
+ ret = -ENOMEM;
+ goto fail_release_mem;
+ }
+
+ if (fd->use_mn) {
+ ret = mmu_interval_notifier_insert(
+ &tidbuf->notifier, current->mm,
+ tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
+ &tid_cover_ops);
+ if (ret)
+ goto fail_release_mem;
+ mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
+ }
+
+ pinned = pin_rcv_pages(fd, tidbuf);
+ if (pinned <= 0) {
+ ret = (pinned < 0) ? pinned : -ENOSPC;
+ goto fail_unpin;
+ }
+
+ /* Find sets of physically contiguous pages */
+ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
+
+ /* Reserve the number of expected tids to be used. */
+ spin_lock(&fd->tid_lock);
+ if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
+ pageset_count = fd->tid_limit - fd->tid_used;
+ else
+ pageset_count = tidbuf->n_psets;
+ fd->tid_used += pageset_count;
+ spin_unlock(&fd->tid_lock);
+
+ if (!pageset_count) {
+ ret = -ENOSPC;
+ goto fail_unreserve;
+ }
+
+ ngroups = pageset_count / dd->rcv_entries.group_size;
+ tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
+ if (!tidlist) {
+ ret = -ENOMEM;
+ goto fail_unreserve;
+ }
+
+ tididx = 0;
+
+ /*
+ * From this point on, we are going to be using shared (between master
+ * and subcontexts) context resources. We need to take the lock.
+ */
+ mutex_lock(&uctxt->exp_mutex);
+ /*
+ * The first step is to program the RcvArray entries which are complete
+ * groups.
+ */
+ while (ngroups && uctxt->tid_group_list.count) {
+ struct tid_group *grp =
+ tid_group_pop(&uctxt->tid_group_list);
+
+ ret = program_rcvarray(fd, tidbuf, grp,
+ dd->rcv_entries.group_size,
+ tidlist, &tididx, &mapped);
+ /*
+ * If there was a failure to program the RcvArray
+ * entries for the entire group, reset the grp fields
+ * and add the grp back to the free group list.
+ */
+ if (ret <= 0) {
+ tid_group_add_tail(grp, &uctxt->tid_group_list);
+ hfi1_cdbg(TID,
+ "Failed to program RcvArray group %d", ret);
+ goto unlock;
+ }
+
+ tid_group_add_tail(grp, &uctxt->tid_full_list);
+ ngroups--;
+ mapped_pages += mapped;
+ }
+
+ while (tididx < pageset_count) {
+ struct tid_group *grp, *ptr;
+ /*
+ * If we don't have any partially used tid groups, check
+ * if we have empty groups. If so, take one from there and
+ * put in the partially used list.
+ */
+ if (!uctxt->tid_used_list.count || need_group) {
+ if (!uctxt->tid_group_list.count)
+ goto unlock;
+
+ grp = tid_group_pop(&uctxt->tid_group_list);
+ tid_group_add_tail(grp, &uctxt->tid_used_list);
+ need_group = 0;
+ }
+ /*
+ * There is an optimization opportunity here - instead of
+ * fitting as many page sets as we can, check for a group
+ * later on in the list that could fit all of them.
+ */
+ list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
+ list) {
+ unsigned use = min_t(unsigned, pageset_count - tididx,
+ grp->size - grp->used);
+
+ ret = program_rcvarray(fd, tidbuf, grp,
+ use, tidlist,
+ &tididx, &mapped);
+ if (ret < 0) {
+ hfi1_cdbg(TID,
+ "Failed to program RcvArray entries %d",
+ ret);
+ goto unlock;
+ } else if (ret > 0) {
+ if (grp->used == grp->size)
+ tid_group_move(grp,
+ &uctxt->tid_used_list,
+ &uctxt->tid_full_list);
+ mapped_pages += mapped;
+ need_group = 0;
+ /* Check if we are done so we break out early */
+ if (tididx >= pageset_count)
+ break;
+ } else if (WARN_ON(ret == 0)) {
+ /*
+ * If ret is 0, we did not program any entries
+ * into this group, which can only happen if
+ * we've screwed up the accounting somewhere.
+ * Warn and try to continue.
+ */
+ need_group = 1;
+ }
+ }
+ }
+unlock:
+ mutex_unlock(&uctxt->exp_mutex);
+ hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
+ mapped_pages, ret);
+
+ /* fail if nothing was programmed, set error if none provided */
+ if (tididx == 0) {
+ if (ret >= 0)
+ ret = -ENOSPC;
+ goto fail_unreserve;
+ }
+
+ /* adjust reserved tid_used to actual count */
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= pageset_count - tididx;
+ spin_unlock(&fd->tid_lock);
+
+ /* unpin all pages not covered by a TID */
+ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
+ false);
+
+ if (fd->use_mn) {
+ /* check for an invalidate during setup */
+ bool fail = false;
+
+ mutex_lock(&tidbuf->cover_mutex);
+ fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
+ mutex_unlock(&tidbuf->cover_mutex);
+
+ if (fail) {
+ ret = -EBUSY;
+ goto fail_unprogram;
+ }
+ }
+
+ tinfo->tidcnt = tididx;
+ tinfo->length = mapped_pages * PAGE_SIZE;
+
+ if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+ tidlist, sizeof(tidlist[0]) * tididx)) {
+ ret = -EFAULT;
+ goto fail_unprogram;
+ }
+
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&tidbuf->notifier);
+ kfree(tidbuf->pages);
+ kfree(tidbuf->psets);
+ kfree(tidbuf);
+ kfree(tidlist);
+ return 0;
+
+fail_unprogram:
+ /* unprogram, unmap, and unpin all allocated TIDs */
+ tinfo->tidlist = (unsigned long)tidlist;
+ hfi1_user_exp_rcv_clear(fd, tinfo);
+ tinfo->tidlist = 0;
+ pinned = 0; /* nothing left to unpin */
+ pageset_count = 0; /* nothing left reserved */
+fail_unreserve:
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= pageset_count;
+ spin_unlock(&fd->tid_lock);
+fail_unpin:
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&tidbuf->notifier);
+ if (pinned > 0)
+ unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
+fail_release_mem:
+ kfree(tidbuf->pages);
+ kfree(tidbuf->psets);
+ kfree(tidbuf);
+ kfree(tidlist);
+ return ret;
+}
+
+int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo)
+{
+ int ret = 0;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ u32 *tidinfo;
+ unsigned tididx;
+
+ if (unlikely(tinfo->tidcnt > fd->tid_used))
+ return -EINVAL;
+
+ tidinfo = memdup_array_user(u64_to_user_ptr(tinfo->tidlist),
+ tinfo->tidcnt, sizeof(tidinfo[0]));
+ if (IS_ERR(tidinfo))
+ return PTR_ERR(tidinfo);
+
+ mutex_lock(&uctxt->exp_mutex);
+ for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
+ ret = unprogram_rcvarray(fd, tidinfo[tididx]);
+ if (ret) {
+ hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
+ ret);
+ break;
+ }
+ }
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= tididx;
+ spin_unlock(&fd->tid_lock);
+ tinfo->tidcnt = tididx;
+ mutex_unlock(&uctxt->exp_mutex);
+
+ kfree(tidinfo);
+ return ret;
+}
+
+int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ unsigned long *ev = uctxt->dd->events +
+ (uctxt_offset(uctxt) + fd->subctxt);
+ u32 *array;
+ int ret = 0;
+
+ /*
+ * copy_to_user() can sleep, which will leave the invalid_lock
+ * locked and cause the MMU notifier to be blocked on the lock
+ * for a long time.
+ * Copy the data to a local buffer so we can release the lock.
+ */
+ array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
+ if (!array)
+ return -EFAULT;
+
+ spin_lock(&fd->invalid_lock);
+ if (fd->invalid_tid_idx) {
+ memcpy(array, fd->invalid_tids, sizeof(*array) *
+ fd->invalid_tid_idx);
+ memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
+ fd->invalid_tid_idx);
+ tinfo->tidcnt = fd->invalid_tid_idx;
+ fd->invalid_tid_idx = 0;
+ /*
+ * Reset the user flag while still holding the lock.
+ * Otherwise, PSM can miss events.
+ */
+ clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
+ } else {
+ tinfo->tidcnt = 0;
+ }
+ spin_unlock(&fd->invalid_lock);
+
+ if (tinfo->tidcnt) {
+ if (copy_to_user((void __user *)tinfo->tidlist,
+ array, sizeof(*array) * tinfo->tidcnt))
+ ret = -EFAULT;
+ }
+ kfree(array);
+
+ return ret;
+}
+
+static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
+{
+ unsigned pagecount, pageidx, setcount = 0, i;
+ unsigned long pfn, this_pfn;
+ struct page **pages = tidbuf->pages;
+ struct tid_pageset *list = tidbuf->psets;
+
+ if (!npages)
+ return 0;
+
+ /*
+ * Look for sets of physically contiguous pages in the user buffer.
+ * This will allow us to optimize Expected RcvArray entry usage by
+ * using the bigger supported sizes.
+ */
+ pfn = page_to_pfn(pages[0]);
+ for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
+ this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
+
+ /*
+ * If the pfn's are not sequential, pages are not physically
+ * contiguous.
+ */
+ if (this_pfn != ++pfn) {
+ /*
+ * At this point we have to loop over the set of
+ * physically contiguous pages and break them down it
+ * sizes supported by the HW.
+ * There are two main constraints:
+ * 1. The max buffer size is MAX_EXPECTED_BUFFER.
+ * If the total set size is bigger than that
+ * program only a MAX_EXPECTED_BUFFER chunk.
+ * 2. The buffer size has to be a power of two. If
+ * it is not, round down to the closes power of
+ * 2 and program that size.
+ */
+ while (pagecount) {
+ int maxpages = pagecount;
+ u32 bufsize = pagecount * PAGE_SIZE;
+
+ if (bufsize > MAX_EXPECTED_BUFFER)
+ maxpages =
+ MAX_EXPECTED_BUFFER >>
+ PAGE_SHIFT;
+ else if (!is_power_of_2(bufsize))
+ maxpages =
+ rounddown_pow_of_two(bufsize) >>
+ PAGE_SHIFT;
+
+ list[setcount].idx = pageidx;
+ list[setcount].count = maxpages;
+ pagecount -= maxpages;
+ pageidx += maxpages;
+ setcount++;
+ }
+ pageidx = i;
+ pagecount = 1;
+ pfn = this_pfn;
+ } else {
+ pagecount++;
+ }
+ }
+ return setcount;
+}
+
+/**
+ * program_rcvarray() - program an RcvArray group with receive buffers
+ * @fd: filedata pointer
+ * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
+ * virtual address, buffer length, page pointers, pagesets (array of
+ * struct tid_pageset holding information on physically contiguous
+ * chunks from the user buffer), and other fields.
+ * @grp: RcvArray group
+ * @count: number of struct tid_pageset's to program
+ * @tidlist: the array of u32 elements when the information about the
+ * programmed RcvArray entries is to be encoded.
+ * @tididx: starting offset into tidlist
+ * @pmapped: (output parameter) number of pages programmed into the RcvArray
+ * entries.
+ *
+ * This function will program up to 'count' number of RcvArray entries from the
+ * group 'grp'. To make best use of write-combining writes, the function will
+ * perform writes to the unused RcvArray entries which will be ignored by the
+ * HW. Each RcvArray entry will be programmed with a physically contiguous
+ * buffer chunk from the user's virtual buffer.
+ *
+ * Return:
+ * -EINVAL if the requested count is larger than the size of the group,
+ * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
+ * number of RcvArray entries programmed.
+ */
+static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
+ struct tid_group *grp, u16 count,
+ u32 *tidlist, unsigned int *tididx,
+ unsigned int *pmapped)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ u16 idx;
+ unsigned int start = *tididx;
+ u32 tidinfo = 0, rcventry, useidx = 0;
+ int mapped = 0;
+
+ /* Count should never be larger than the group size */
+ if (count > grp->size)
+ return -EINVAL;
+
+ /* Find the first unused entry in the group */
+ for (idx = 0; idx < grp->size; idx++) {
+ if (!(grp->map & (1 << idx))) {
+ useidx = idx;
+ break;
+ }
+ rcv_array_wc_fill(dd, grp->base + idx);
+ }
+
+ idx = 0;
+ while (idx < count) {
+ u16 npages, pageidx, setidx = start + idx;
+ int ret = 0;
+
+ /*
+ * If this entry in the group is used, move to the next one.
+ * If we go past the end of the group, exit the loop.
+ */
+ if (useidx >= grp->size) {
+ break;
+ } else if (grp->map & (1 << useidx)) {
+ rcv_array_wc_fill(dd, grp->base + useidx);
+ useidx++;
+ continue;
+ }
+
+ rcventry = grp->base + useidx;
+ npages = tbuf->psets[setidx].count;
+ pageidx = tbuf->psets[setidx].idx;
+
+ ret = set_rcvarray_entry(fd, tbuf,
+ rcventry, grp, pageidx,
+ npages);
+ if (ret)
+ return ret;
+ mapped += npages;
+
+ tidinfo = create_tid(rcventry - uctxt->expected_base, npages);
+ tidlist[(*tididx)++] = tidinfo;
+ grp->used++;
+ grp->map |= 1 << useidx++;
+ idx++;
+ }
+
+ /* Fill the rest of the group with "blank" writes */
+ for (; useidx < grp->size; useidx++)
+ rcv_array_wc_fill(dd, grp->base + useidx);
+ *pmapped = mapped;
+ return idx;
+}
+
+static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ struct tid_user_buf *tbuf,
+ u32 rcventry, struct tid_group *grp,
+ u16 pageidx, unsigned int npages)
+{
+ int ret;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct tid_rb_node *node;
+ struct hfi1_devdata *dd = uctxt->dd;
+ dma_addr_t phys;
+ struct page **pages = tbuf->pages + pageidx;
+
+ /*
+ * Allocate the node first so we can handle a potential
+ * failure before we've programmed anything.
+ */
+ node = kzalloc(struct_size(node, pages, npages), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])),
+ npages * PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, phys)) {
+ dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
+ phys);
+ kfree(node);
+ return -EFAULT;
+ }
+
+ node->fdata = fd;
+ mutex_init(&node->invalidate_mutex);
+ node->phys = page_to_phys(pages[0]);
+ node->npages = npages;
+ node->rcventry = rcventry;
+ node->dma_addr = phys;
+ node->grp = grp;
+ node->freed = false;
+ memcpy(node->pages, pages, flex_array_size(node, pages, npages));
+
+ if (fd->use_mn) {
+ ret = mmu_interval_notifier_insert(
+ &node->notifier, current->mm,
+ tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
+ &tid_mn_ops);
+ if (ret)
+ goto out_unmap;
+ }
+ fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
+
+ hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
+ trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
+ node->notifier.interval_tree.start, node->phys,
+ phys);
+ return 0;
+
+out_unmap:
+ hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
+ node->rcventry, node->notifier.interval_tree.start,
+ node->phys, ret);
+ dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ kfree(node);
+ return -EFAULT;
+}
+
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ struct tid_rb_node *node;
+ u32 tidctrl = EXP_TID_GET(tidinfo, CTRL);
+ u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
+
+ if (tidctrl == 0x3 || tidctrl == 0x0)
+ return -EINVAL;
+
+ rcventry = tididx + (tidctrl - 1);
+
+ if (rcventry >= uctxt->expected_count) {
+ dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
+ rcventry, uctxt->ctxt);
+ return -EINVAL;
+ }
+
+ node = fd->entry_to_rb[rcventry];
+ if (!node || node->rcventry != (uctxt->expected_base + rcventry))
+ return -EBADF;
+
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&node->notifier);
+ cacheless_tid_rb_remove(fd, node);
+
+ return 0;
+}
+
+static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+
+ mutex_lock(&node->invalidate_mutex);
+ if (node->freed)
+ goto done;
+ node->freed = true;
+
+ trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
+ node->npages,
+ node->notifier.interval_tree.start, node->phys,
+ node->dma_addr);
+
+ /* Make sure device has seen the write before pages are unpinned */
+ hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
+
+ unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
+done:
+ mutex_unlock(&node->invalidate_mutex);
+}
+
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
+ __clear_tid_node(fd, node);
+
+ node->grp->used--;
+ node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
+
+ if (node->grp->used == node->grp->size - 1)
+ tid_group_move(node->grp, &uctxt->tid_full_list,
+ &uctxt->tid_used_list);
+ else if (!node->grp->used)
+ tid_group_move(node->grp, &uctxt->tid_used_list,
+ &uctxt->tid_group_list);
+ kfree(node);
+}
+
+/*
+ * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
+ * clearing nodes in the non-cached case.
+ */
+static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
+ struct exp_tid_set *set,
+ struct hfi1_filedata *fd)
+{
+ struct tid_group *grp, *ptr;
+ int i;
+
+ list_for_each_entry_safe(grp, ptr, &set->list, list) {
+ list_del_init(&grp->list);
+
+ for (i = 0; i < grp->size; i++) {
+ if (grp->map & (1 << i)) {
+ u16 rcventry = grp->base + i;
+ struct tid_rb_node *node;
+
+ node = fd->entry_to_rb[rcventry -
+ uctxt->expected_base];
+ if (!node || node->rcventry != rcventry)
+ continue;
+
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(
+ &node->notifier);
+ cacheless_tid_rb_remove(fd, node);
+ }
+ }
+ }
+}
+
+static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct tid_rb_node *node =
+ container_of(mni, struct tid_rb_node, notifier);
+ struct hfi1_filedata *fdata = node->fdata;
+ struct hfi1_ctxtdata *uctxt = fdata->uctxt;
+
+ if (node->freed)
+ return true;
+
+ /* take action only if unmapping */
+ if (range->event != MMU_NOTIFY_UNMAP)
+ return true;
+
+ trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
+ node->notifier.interval_tree.start,
+ node->rcventry, node->npages, node->dma_addr);
+
+ /* clear the hardware rcvarray entry */
+ __clear_tid_node(fdata, node);
+
+ spin_lock(&fdata->invalid_lock);
+ if (fdata->invalid_tid_idx < uctxt->expected_count) {
+ fdata->invalid_tids[fdata->invalid_tid_idx] =
+ create_tid(node->rcventry - uctxt->expected_base,
+ node->npages);
+ if (!fdata->invalid_tid_idx) {
+ unsigned long *ev;
+
+ /*
+ * hfi1_set_uevent_bits() sets a user event flag
+ * for all processes. Because calling into the
+ * driver to process TID cache invalidations is
+ * expensive and TID cache invalidations are
+ * handled on a per-process basis, we can
+ * optimize this to set the flag only for the
+ * process in question.
+ */
+ ev = uctxt->dd->events +
+ (uctxt_offset(uctxt) + fdata->subctxt);
+ set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
+ }
+ fdata->invalid_tid_idx++;
+ }
+ spin_unlock(&fdata->invalid_lock);
+ return true;
+}
+
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct tid_user_buf *tidbuf =
+ container_of(mni, struct tid_user_buf, notifier);
+
+ /* take action only if unmapping */
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ mutex_lock(&tidbuf->cover_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+ mutex_unlock(&tidbuf->cover_mutex);
+ }
+
+ return true;
+}
+
+static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ struct tid_rb_node *tnode)
+{
+ u32 base = fdata->uctxt->expected_base;
+
+ fdata->entry_to_rb[tnode->rcventry - base] = NULL;
+ clear_tid_node(fdata, tnode);
+}
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
new file mode 100644
index 000000000000..055726f7c139
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
+ */
+
+#ifndef _HFI1_USER_EXP_RCV_H
+#define _HFI1_USER_EXP_RCV_H
+
+#include "hfi.h"
+#include "exp_rcv.h"
+
+struct tid_pageset {
+ u16 idx;
+ u16 count;
+};
+
+struct tid_user_buf {
+ struct mmu_interval_notifier notifier;
+ struct mutex cover_mutex;
+ unsigned long vaddr;
+ unsigned long length;
+ unsigned int npages;
+ struct page **pages;
+ struct tid_pageset *psets;
+ unsigned int n_psets;
+};
+
+struct tid_rb_node {
+ struct mmu_interval_notifier notifier;
+ struct hfi1_filedata *fdata;
+ struct mutex invalidate_mutex; /* covers hw removal */
+ unsigned long phys;
+ struct tid_group *grp;
+ u32 rcventry;
+ dma_addr_t dma_addr;
+ bool freed;
+ unsigned int npages;
+ struct page *pages[] __counted_by(npages);
+};
+
+static inline int num_user_pages(unsigned long addr,
+ unsigned long len)
+{
+ const unsigned long spage = addr & PAGE_MASK;
+ const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+ return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
+void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd);
+int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo);
+int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo);
+int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo);
+
+static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
+{
+ return node->notifier.mm;
+}
+
+#endif /* _HFI1_USER_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
new file mode 100644
index 000000000000..c77913a7920f
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015-2017 Intel Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include "hfi.h"
+
+static unsigned long cache_size = 256;
+module_param(cache_size, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
+
+/*
+ * Determine whether the caller can pin pages.
+ *
+ * This function should be used in the implementation of buffer caches.
+ * The cache implementation should call this function prior to attempting
+ * to pin buffer pages in order to determine whether they should do so.
+ * The function computes cache limits based on the configured ulimit and
+ * cache size. Use of this function is especially important for caches
+ * which are not limited in any other way (e.g. by HW resources) and, thus,
+ * could keeping caching buffers.
+ *
+ */
+bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ u32 nlocked, u32 npages)
+{
+ unsigned long ulimit_pages;
+ unsigned long cache_limit_pages;
+ unsigned int usr_ctxts;
+
+ /*
+ * Perform RLIMIT_MEMLOCK based checks unless CAP_IPC_LOCK is present.
+ */
+ if (!capable(CAP_IPC_LOCK)) {
+ ulimit_pages =
+ DIV_ROUND_DOWN_ULL(rlimit(RLIMIT_MEMLOCK), PAGE_SIZE);
+
+ /*
+ * Pinning these pages would exceed this process's locked memory
+ * limit.
+ */
+ if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages)
+ return false;
+
+ /*
+ * Only allow 1/4 of the user's RLIMIT_MEMLOCK to be used for HFI
+ * caches. This fraction is then equally distributed among all
+ * existing user contexts. Note that if RLIMIT_MEMLOCK is
+ * 'unlimited' (-1), the value of this limit will be > 2^42 pages
+ * (2^64 / 2^12 / 2^8 / 2^2).
+ *
+ * The effectiveness of this check may be reduced if I/O occurs on
+ * some user contexts before all user contexts are created. This
+ * check assumes that this process is the only one using this
+ * context (e.g., the corresponding fd was not passed to another
+ * process for concurrent access) as there is no per-context,
+ * per-process tracking of pinned pages. It also assumes that each
+ * user context has only one cache to limit.
+ */
+ usr_ctxts = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+ if (nlocked + npages > (ulimit_pages / usr_ctxts / 4))
+ return false;
+ }
+
+ /*
+ * Pinning these pages would exceed the size limit for this cache.
+ */
+ cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE;
+ if (nlocked + npages > cache_limit_pages)
+ return false;
+
+ return true;
+}
+
+int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
+ bool writable, struct page **pages)
+{
+ int ret;
+ unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
+
+ ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
+ if (ret < 0)
+ return ret;
+
+ atomic64_add(ret, &mm->pinned_vm);
+
+ return ret;
+}
+
+void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
+ size_t npages, bool dirty)
+{
+ unpin_user_pages_dirty_lock(p, npages, dirty);
+
+ if (mm) { /* during close after signal, mm can be NULL */
+ atomic64_sub(npages, &mm->pinned_vm);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
new file mode 100644
index 000000000000..9b1aece1b080
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -0,0 +1,1224 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2020 - 2023 Cornelis Networks, Inc.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/mmu_context.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+
+#include "hfi.h"
+#include "sdma.h"
+#include "user_sdma.h"
+#include "verbs.h" /* for the headers */
+#include "common.h" /* for struct hfi1_tid_info */
+#include "trace.h"
+
+static uint hfi1_sdma_comp_ring_size = 128;
+module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
+MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
+
+static unsigned initial_pkt_count = 8;
+
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
+static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
+static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
+static void user_sdma_free_request(struct user_sdma_request *req);
+static int check_header_template(struct user_sdma_request *req,
+ struct hfi1_pkt_header *hdr, u32 lrhlen,
+ u32 datalen);
+static int set_txreq_header(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 datalen);
+static int set_txreq_header_ahg(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 len);
+static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
+ struct hfi1_user_sdma_comp_q *cq,
+ u16 idx, enum hfi1_sdma_comp_state state,
+ int ret);
+static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
+static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
+
+static int defer_packet_queue(
+ struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *txreq,
+ uint seq,
+ bool pkts_sent);
+static void activate_packet_queue(struct iowait *wait, int reason);
+
+static int defer_packet_queue(
+ struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *txreq,
+ uint seq,
+ bool pkts_sent)
+{
+ struct hfi1_user_sdma_pkt_q *pq =
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
+
+ write_seqlock(&sde->waitlock);
+ trace_hfi1_usdma_defer(pq, sde, &pq->busy);
+ if (sdma_progress(sde, seq, txreq))
+ goto eagain;
+ /*
+ * We are assuming that if the list is enqueued somewhere, it
+ * is to the dmawait list since that is the only place where
+ * it is supposed to be enqueued.
+ */
+ xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
+ if (list_empty(&pq->busy.list)) {
+ pq->busy.lock = &sde->waitlock;
+ iowait_get_priority(&pq->busy);
+ iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
+ }
+ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+eagain:
+ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+}
+
+static void activate_packet_queue(struct iowait *wait, int reason)
+{
+ struct hfi1_user_sdma_pkt_q *pq =
+ container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+
+ trace_hfi1_usdma_activate(pq, wait, reason);
+ xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
+ wake_up(&wait->wait_dma);
+};
+
+int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
+ struct hfi1_filedata *fd)
+{
+ int ret = -ENOMEM;
+ char buf[64];
+ struct hfi1_devdata *dd;
+ struct hfi1_user_sdma_comp_q *cq;
+ struct hfi1_user_sdma_pkt_q *pq;
+
+ if (!uctxt || !fd)
+ return -EBADF;
+
+ if (!hfi1_sdma_comp_ring_size)
+ return -EINVAL;
+
+ dd = uctxt->dd;
+
+ pq = kzalloc(sizeof(*pq), GFP_KERNEL);
+ if (!pq)
+ return -ENOMEM;
+ pq->dd = dd;
+ pq->ctxt = uctxt->ctxt;
+ pq->subctxt = fd->subctxt;
+ pq->n_max_reqs = hfi1_sdma_comp_ring_size;
+ atomic_set(&pq->n_reqs, 0);
+ init_waitqueue_head(&pq->wait);
+ atomic_set(&pq->n_locked, 0);
+
+ iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
+ activate_packet_queue, NULL, NULL);
+ pq->reqidx = 0;
+
+ pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
+ sizeof(*pq->reqs),
+ GFP_KERNEL);
+ if (!pq->reqs)
+ goto pq_reqs_nomem;
+
+ pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
+ if (!pq->req_in_use)
+ goto pq_reqs_no_in_use;
+
+ snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
+ fd->subctxt);
+ pq->txreq_cache = kmem_cache_create(buf,
+ sizeof(struct user_sdma_txreq),
+ L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!pq->txreq_cache) {
+ dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
+ uctxt->ctxt);
+ goto pq_txreq_nomem;
+ }
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ goto cq_nomem;
+
+ cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
+ * hfi1_sdma_comp_ring_size));
+ if (!cq->comps)
+ goto cq_comps_nomem;
+
+ cq->nentries = hfi1_sdma_comp_ring_size;
+
+ ret = hfi1_init_system_pinning(pq);
+ if (ret)
+ goto pq_mmu_fail;
+
+ rcu_assign_pointer(fd->pq, pq);
+ fd->cq = cq;
+
+ return 0;
+
+pq_mmu_fail:
+ vfree(cq->comps);
+cq_comps_nomem:
+ kfree(cq);
+cq_nomem:
+ kmem_cache_destroy(pq->txreq_cache);
+pq_txreq_nomem:
+ bitmap_free(pq->req_in_use);
+pq_reqs_no_in_use:
+ kfree(pq->reqs);
+pq_reqs_nomem:
+ kfree(pq);
+
+ return ret;
+}
+
+static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
+{
+ unsigned long flags;
+ seqlock_t *lock = pq->busy.lock;
+
+ if (!lock)
+ return;
+ write_seqlock_irqsave(lock, flags);
+ if (!list_empty(&pq->busy.list)) {
+ list_del_init(&pq->busy.list);
+ pq->busy.lock = NULL;
+ }
+ write_sequnlock_irqrestore(lock, flags);
+}
+
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ struct hfi1_user_sdma_pkt_q *pq;
+
+ trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
+
+ spin_lock(&fd->pq_rcu_lock);
+ pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
+ lockdep_is_held(&fd->pq_rcu_lock));
+ if (pq) {
+ rcu_assign_pointer(fd->pq, NULL);
+ spin_unlock(&fd->pq_rcu_lock);
+ synchronize_srcu(&fd->pq_srcu);
+ /* at this point there can be no more new requests */
+ iowait_sdma_drain(&pq->busy);
+ /* Wait until all requests have been freed. */
+ wait_event_interruptible(
+ pq->wait,
+ !atomic_read(&pq->n_reqs));
+ kfree(pq->reqs);
+ hfi1_free_system_pinning(pq);
+ bitmap_free(pq->req_in_use);
+ kmem_cache_destroy(pq->txreq_cache);
+ flush_pq_iowait(pq);
+ kfree(pq);
+ } else {
+ spin_unlock(&fd->pq_rcu_lock);
+ }
+ if (fd->cq) {
+ vfree(fd->cq->comps);
+ kfree(fd->cq);
+ fd->cq = NULL;
+ }
+ return 0;
+}
+
+static u8 dlid_to_selector(u16 dlid)
+{
+ static u8 mapping[256];
+ static int initialized;
+ static u8 next;
+ int hash;
+
+ if (!initialized) {
+ memset(mapping, 0xFF, 256);
+ initialized = 1;
+ }
+
+ hash = ((dlid >> 8) ^ dlid) & 0xFF;
+ if (mapping[hash] == 0xFF) {
+ mapping[hash] = next;
+ next = (next + 1) & 0x7F;
+ }
+
+ return mapping[hash];
+}
+
+/**
+ * hfi1_user_sdma_process_request() - Process and start a user sdma request
+ * @fd: valid file descriptor
+ * @iovec: array of io vectors to process
+ * @dim: overall iovec array size
+ * @count: number of io vector array entries processed
+ */
+int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ struct iovec *iovec, unsigned long dim,
+ unsigned long *count)
+{
+ int ret = 0, i;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_user_sdma_pkt_q *pq =
+ srcu_dereference(fd->pq, &fd->pq_srcu);
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
+ struct hfi1_devdata *dd = pq->dd;
+ unsigned long idx = 0;
+ u8 pcount = initial_pkt_count;
+ struct sdma_req_info info;
+ struct user_sdma_request *req;
+ u8 opcode, sc, vl;
+ u16 pkey;
+ u32 slid;
+ u16 dlid;
+ u32 selector;
+
+ if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
+ hfi1_cdbg(
+ SDMA,
+ "[%u:%u:%u] First vector not big enough for header %lu/%lu",
+ dd->unit, uctxt->ctxt, fd->subctxt,
+ iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
+ return -EINVAL;
+ }
+ ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
+ if (ret) {
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
+ dd->unit, uctxt->ctxt, fd->subctxt, ret);
+ return -EFAULT;
+ }
+
+ trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
+ (u16 *)&info);
+ if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Invalid comp index",
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
+ return -EINVAL;
+ }
+
+ /*
+ * Sanity check the header io vector count. Need at least 1 vector
+ * (header) and cannot be larger than the actual io vector count.
+ */
+ if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
+ req_iovcnt(info.ctrl), dim);
+ return -EINVAL;
+ }
+
+ if (!info.fragsize) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Request does not specify fragsize",
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
+ return -EINVAL;
+ }
+
+ /* Try to claim the request. */
+ if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
+ dd->unit, uctxt->ctxt, fd->subctxt,
+ info.comp_idx);
+ return -EBADSLT;
+ }
+ /*
+ * All safety checks have been done and this request has been claimed.
+ */
+ trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt,
+ info.comp_idx);
+ req = pq->reqs + info.comp_idx;
+ req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
+ req->data_len = 0;
+ req->pq = pq;
+ req->cq = cq;
+ req->ahg_idx = -1;
+ req->iov_idx = 0;
+ req->sent = 0;
+ req->seqnum = 0;
+ req->seqcomp = 0;
+ req->seqsubmitted = 0;
+ req->tids = NULL;
+ req->has_error = 0;
+ INIT_LIST_HEAD(&req->txps);
+
+ memcpy(&req->info, &info, sizeof(info));
+
+ /* The request is initialized, count it */
+ atomic_inc(&pq->n_reqs);
+
+ if (req_opcode(info.ctrl) == EXPECTED) {
+ /* expected must have a TID info and at least one data vector */
+ if (req->data_iovs < 2) {
+ SDMA_DBG(req,
+ "Not enough vectors for expected request");
+ ret = -EINVAL;
+ goto free_req;
+ }
+ req->data_iovs--;
+ }
+
+ if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
+ SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
+ MAX_VECTORS_PER_REQ);
+ ret = -EINVAL;
+ goto free_req;
+ }
+
+ /* Copy the header from the user buffer */
+ ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
+ sizeof(req->hdr));
+ if (ret) {
+ SDMA_DBG(req, "Failed to copy header template (%d)", ret);
+ ret = -EFAULT;
+ goto free_req;
+ }
+
+ /* If Static rate control is not enabled, sanitize the header. */
+ if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
+ req->hdr.pbc[2] = 0;
+
+ /* Validate the opcode. Do not trust packets from user space blindly. */
+ opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
+ if ((opcode & USER_OPCODE_CHECK_MASK) !=
+ USER_OPCODE_CHECK_VAL) {
+ SDMA_DBG(req, "Invalid opcode (%d)", opcode);
+ ret = -EINVAL;
+ goto free_req;
+ }
+ /*
+ * Validate the vl. Do not trust packets from user space blindly.
+ * VL comes from PBC, SC comes from LRH, and the VL needs to
+ * match the SC look up.
+ */
+ vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
+ sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
+ (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
+ if (vl >= dd->pport->vls_operational ||
+ vl != sc_to_vlt(dd, sc)) {
+ SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
+ ret = -EINVAL;
+ goto free_req;
+ }
+
+ /* Checking P_KEY for requests from user-space */
+ pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
+ slid = be16_to_cpu(req->hdr.lrh[3]);
+ if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) {
+ ret = -EINVAL;
+ goto free_req;
+ }
+
+ /*
+ * Also should check the BTH.lnh. If it says the next header is GRH then
+ * the RXE parsing will be off and will land in the middle of the KDETH
+ * or miss it entirely.
+ */
+ if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
+ SDMA_DBG(req, "User tried to pass in a GRH");
+ ret = -EINVAL;
+ goto free_req;
+ }
+
+ req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
+ /*
+ * Calculate the initial TID offset based on the values of
+ * KDETH.OFFSET and KDETH.OM that are passed in.
+ */
+ req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
+ (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
+ KDETH_OM_LARGE : KDETH_OM_SMALL);
+ trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt,
+ info.comp_idx, req->tidoffset);
+ idx++;
+
+ /* Save all the IO vector structures */
+ for (i = 0; i < req->data_iovs; i++) {
+ req->iovs[i].offset = 0;
+ INIT_LIST_HEAD(&req->iovs[i].list);
+ memcpy(&req->iovs[i].iov,
+ iovec + idx++,
+ sizeof(req->iovs[i].iov));
+ if (req->iovs[i].iov.iov_len == 0) {
+ ret = -EINVAL;
+ goto free_req;
+ }
+ req->data_len += req->iovs[i].iov.iov_len;
+ }
+ trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt,
+ info.comp_idx, req->data_len);
+ if (pcount > req->info.npkts)
+ pcount = req->info.npkts;
+ /*
+ * Copy any TID info
+ * User space will provide the TID info only when the
+ * request type is EXPECTED. This is true even if there is
+ * only one packet in the request and the header is already
+ * setup. The reason for the singular TID case is that the
+ * driver needs to perform safety checks.
+ */
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
+ u32 *tmp;
+
+ if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
+ ret = -EINVAL;
+ goto free_req;
+ }
+
+ /*
+ * We have to copy all of the tids because they may vary
+ * in size and, therefore, the TID count might not be
+ * equal to the pkt count. However, there is no way to
+ * tell at this point.
+ */
+ tmp = memdup_array_user(iovec[idx].iov_base,
+ ntids, sizeof(*req->tids));
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ SDMA_DBG(req, "Failed to copy %d TIDs (%pe)", ntids,
+ tmp);
+ goto free_req;
+ }
+ req->tids = tmp;
+ req->n_tids = ntids;
+ req->tididx = 0;
+ idx++;
+ }
+
+ dlid = be16_to_cpu(req->hdr.lrh[1]);
+ selector = dlid_to_selector(dlid);
+ selector += uctxt->ctxt + fd->subctxt;
+ req->sde = sdma_select_user_engine(dd, selector, vl);
+
+ if (!req->sde || !sdma_running(req->sde)) {
+ ret = -ECOMM;
+ goto free_req;
+ }
+
+ /* We don't need an AHG entry if the request contains only one packet */
+ if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
+ req->ahg_idx = sdma_ahg_alloc(req->sde);
+
+ set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
+ pq->state = SDMA_PKT_Q_ACTIVE;
+
+ /*
+ * This is a somewhat blocking send implementation.
+ * The driver will block the caller until all packets of the
+ * request have been submitted to the SDMA engine. However, it
+ * will not wait for send completions.
+ */
+ while (req->seqsubmitted != req->info.npkts) {
+ ret = user_sdma_send_pkts(req, pcount);
+ if (ret < 0) {
+ int we_ret;
+
+ if (ret != -EBUSY)
+ goto free_req;
+ we_ret = wait_event_interruptible_timeout(
+ pq->busy.wait_dma,
+ pq->state == SDMA_PKT_Q_ACTIVE,
+ msecs_to_jiffies(
+ SDMA_IOWAIT_TIMEOUT));
+ trace_hfi1_usdma_we(pq, we_ret);
+ if (we_ret <= 0)
+ flush_pq_iowait(pq);
+ }
+ }
+ *count += idx;
+ return 0;
+free_req:
+ /*
+ * If the submitted seqsubmitted == npkts, the completion routine
+ * controls the final state. If sequbmitted < npkts, wait for any
+ * outstanding packets to finish before cleaning up.
+ */
+ if (req->seqsubmitted < req->info.npkts) {
+ if (req->seqsubmitted)
+ wait_event(pq->busy.wait_dma,
+ (req->seqcomp == req->seqsubmitted - 1));
+ user_sdma_free_request(req);
+ pq_update(pq);
+ set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
+ }
+ return ret;
+}
+
+static inline u32 compute_data_length(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx)
+{
+ /*
+ * Determine the proper size of the packet data.
+ * The size of the data of the first packet is in the header
+ * template. However, it includes the header and ICRC, which need
+ * to be subtracted.
+ * The minimum representable packet data length in a header is 4 bytes,
+ * therefore, when the data length request is less than 4 bytes, there's
+ * only one packet, and the packet data length is equal to that of the
+ * request data length.
+ * The size of the remaining packets is the minimum of the frag
+ * size (MTU) or remaining data in the request.
+ */
+ u32 len;
+
+ if (!req->seqnum) {
+ if (req->data_len < sizeof(u32))
+ len = req->data_len;
+ else
+ len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
+ (sizeof(tx->hdr) - 4));
+ } else if (req_opcode(req->info.ctrl) == EXPECTED) {
+ u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
+ PAGE_SIZE;
+ /*
+ * Get the data length based on the remaining space in the
+ * TID pair.
+ */
+ len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
+ /* If we've filled up the TID pair, move to the next one. */
+ if (unlikely(!len) && ++req->tididx < req->n_tids &&
+ req->tids[req->tididx]) {
+ tidlen = EXP_TID_GET(req->tids[req->tididx],
+ LEN) * PAGE_SIZE;
+ req->tidoffset = 0;
+ len = min_t(u32, tidlen, req->info.fragsize);
+ }
+ /*
+ * Since the TID pairs map entire pages, make sure that we
+ * are not going to try to send more data that we have
+ * remaining.
+ */
+ len = min(len, req->data_len - req->sent);
+ } else {
+ len = min(req->data_len - req->sent, (u32)req->info.fragsize);
+ }
+ trace_hfi1_sdma_user_compute_length(req->pq->dd,
+ req->pq->ctxt,
+ req->pq->subctxt,
+ req->info.comp_idx,
+ len);
+ return len;
+}
+
+static inline u32 pad_len(u32 len)
+{
+ if (len & (sizeof(u32) - 1))
+ len += sizeof(u32) - (len & (sizeof(u32) - 1));
+ return len;
+}
+
+static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
+{
+ /* (Size of complete header - size of PBC) + 4B ICRC + data length */
+ return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
+}
+
+static int user_sdma_txadd_ahg(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ u32 datalen)
+{
+ int ret;
+ u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
+ u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+
+ /*
+ * Copy the request header into the tx header
+ * because the HW needs a cacheline-aligned
+ * address.
+ * This copy can be optimized out if the hdr
+ * member of user_sdma_request were also
+ * cacheline aligned.
+ */
+ memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
+ if (PBC2LRH(pbclen) != lrhlen) {
+ pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
+ tx->hdr.pbc[0] = cpu_to_le16(pbclen);
+ }
+ ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
+ if (ret)
+ return ret;
+ ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY,
+ sizeof(tx->hdr) + datalen, req->ahg_idx,
+ 0, NULL, 0, user_sdma_txreq_cb);
+ if (ret)
+ return ret;
+ ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
+ if (ret)
+ sdma_txclean(pq->dd, &tx->txreq);
+ return ret;
+}
+
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
+{
+ int ret = 0;
+ u16 count;
+ unsigned npkts = 0;
+ struct user_sdma_txreq *tx = NULL;
+ struct hfi1_user_sdma_pkt_q *pq = NULL;
+ struct user_sdma_iovec *iovec = NULL;
+
+ if (!req->pq)
+ return -EINVAL;
+
+ pq = req->pq;
+
+ /* If tx completion has reported an error, we are done. */
+ if (READ_ONCE(req->has_error))
+ return -EFAULT;
+
+ /*
+ * Check if we might have sent the entire request already
+ */
+ if (unlikely(req->seqnum == req->info.npkts)) {
+ if (!list_empty(&req->txps))
+ goto dosend;
+ return ret;
+ }
+
+ if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
+ maxpkts = req->info.npkts - req->seqnum;
+
+ while (npkts < maxpkts) {
+ u32 datalen = 0;
+
+ /*
+ * Check whether any of the completions have come back
+ * with errors. If so, we are not going to process any
+ * more packets from this request.
+ */
+ if (READ_ONCE(req->has_error))
+ return -EFAULT;
+
+ tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+
+ tx->flags = 0;
+ tx->req = req;
+ INIT_LIST_HEAD(&tx->list);
+
+ /*
+ * For the last packet set the ACK request
+ * and disable header suppression.
+ */
+ if (req->seqnum == req->info.npkts - 1)
+ tx->flags |= (TXREQ_FLAGS_REQ_ACK |
+ TXREQ_FLAGS_REQ_DISABLE_SH);
+
+ /*
+ * Calculate the payload size - this is min of the fragment
+ * (MTU) size or the remaining bytes in the request but only
+ * if we have payload data.
+ */
+ if (req->data_len) {
+ iovec = &req->iovs[req->iov_idx];
+ if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ if (++req->iov_idx == req->data_iovs) {
+ ret = -EFAULT;
+ goto free_tx;
+ }
+ iovec = &req->iovs[req->iov_idx];
+ WARN_ON(iovec->offset);
+ }
+
+ datalen = compute_data_length(req, tx);
+
+ /*
+ * Disable header suppression for the payload <= 8DWS.
+ * If there is an uncorrectable error in the receive
+ * data FIFO when the received payload size is less than
+ * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
+ * not reported.There is set RHF.EccErr if the header
+ * is not suppressed.
+ */
+ if (!datalen) {
+ SDMA_DBG(req,
+ "Request has data but pkt len is 0");
+ ret = -EFAULT;
+ goto free_tx;
+ } else if (datalen <= 32) {
+ tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
+ }
+ }
+
+ if (req->ahg_idx >= 0) {
+ if (!req->seqnum) {
+ ret = user_sdma_txadd_ahg(req, tx, datalen);
+ if (ret)
+ goto free_tx;
+ } else {
+ int changes;
+
+ changes = set_txreq_header_ahg(req, tx,
+ datalen);
+ if (changes < 0) {
+ ret = changes;
+ goto free_tx;
+ }
+ }
+ } else {
+ ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
+ datalen, user_sdma_txreq_cb);
+ if (ret)
+ goto free_tx;
+ /*
+ * Modify the header for this packet. This only needs
+ * to be done if we are not going to use AHG. Otherwise,
+ * the HW will do it based on the changes we gave it
+ * during sdma_txinit_ahg().
+ */
+ ret = set_txreq_header(req, tx, datalen);
+ if (ret)
+ goto free_txreq;
+ }
+
+ req->koffset += datalen;
+ if (req_opcode(req->info.ctrl) == EXPECTED)
+ req->tidoffset += datalen;
+ req->sent += datalen;
+ while (datalen) {
+ ret = hfi1_add_pages_to_sdma_packet(req, tx, iovec,
+ &datalen);
+ if (ret)
+ goto free_txreq;
+ iovec = &req->iovs[req->iov_idx];
+ }
+ list_add_tail(&tx->txreq.list, &req->txps);
+ /*
+ * It is important to increment this here as it is used to
+ * generate the BTH.PSN and, therefore, can't be bulk-updated
+ * outside of the loop.
+ */
+ tx->seqnum = req->seqnum++;
+ npkts++;
+ }
+dosend:
+ ret = sdma_send_txlist(req->sde,
+ iowait_get_ib_work(&pq->busy),
+ &req->txps, &count);
+ req->seqsubmitted += count;
+ if (req->seqsubmitted == req->info.npkts) {
+ /*
+ * The txreq has already been submitted to the HW queue
+ * so we can free the AHG entry now. Corruption will not
+ * happen due to the sequential manner in which
+ * descriptors are processed.
+ */
+ if (req->ahg_idx >= 0)
+ sdma_ahg_free(req->sde, req->ahg_idx);
+ }
+ return ret;
+
+free_txreq:
+ sdma_txclean(pq->dd, &tx->txreq);
+free_tx:
+ kmem_cache_free(pq->txreq_cache, tx);
+ return ret;
+}
+
+static int check_header_template(struct user_sdma_request *req,
+ struct hfi1_pkt_header *hdr, u32 lrhlen,
+ u32 datalen)
+{
+ /*
+ * Perform safety checks for any type of packet:
+ * - transfer size is multiple of 64bytes
+ * - packet length is multiple of 4 bytes
+ * - packet length is not larger than MTU size
+ *
+ * These checks are only done for the first packet of the
+ * transfer since the header is "given" to us by user space.
+ * For the remainder of the packets we compute the values.
+ */
+ if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
+ lrhlen > get_lrh_len(*hdr, req->info.fragsize))
+ return -EINVAL;
+
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ /*
+ * The header is checked only on the first packet. Furthermore,
+ * we ensure that at least one TID entry is copied when the
+ * request is submitted. Therefore, we don't have to verify that
+ * tididx points to something sane.
+ */
+ u32 tidval = req->tids[req->tididx],
+ tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
+ tididx = EXP_TID_GET(tidval, IDX),
+ tidctrl = EXP_TID_GET(tidval, CTRL),
+ tidoff;
+ __le32 kval = hdr->kdeth.ver_tid_offset;
+
+ tidoff = KDETH_GET(kval, OFFSET) *
+ (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
+ KDETH_OM_LARGE : KDETH_OM_SMALL);
+ /*
+ * Expected receive packets have the following
+ * additional checks:
+ * - offset is not larger than the TID size
+ * - TIDCtrl values match between header and TID array
+ * - TID indexes match between header and TID array
+ */
+ if ((tidoff + datalen > tidlen) ||
+ KDETH_GET(kval, TIDCTRL) != tidctrl ||
+ KDETH_GET(kval, TID) != tididx)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Correctly set the BTH.PSN field based on type of
+ * transfer - eager packets can just increment the PSN but
+ * expected packets encode generation and sequence in the
+ * BTH.PSN field so just incrementing will result in errors.
+ */
+static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
+{
+ u32 val = be32_to_cpu(bthpsn),
+ mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
+ 0xffffffull),
+ psn = val & mask;
+ if (expct)
+ psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) |
+ ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK);
+ else
+ psn = psn + frags;
+ return psn & mask;
+}
+
+static int set_txreq_header(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 datalen)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct hfi1_pkt_header *hdr = &tx->hdr;
+ u8 omfactor; /* KDETH.OM */
+ u16 pbclen;
+ int ret;
+ u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
+
+ /* Copy the header template to the request before modification */
+ memcpy(hdr, &req->hdr, sizeof(*hdr));
+
+ /*
+ * Check if the PBC and LRH length are mismatched. If so
+ * adjust both in the header.
+ */
+ pbclen = le16_to_cpu(hdr->pbc[0]);
+ if (PBC2LRH(pbclen) != lrhlen) {
+ pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
+ hdr->pbc[0] = cpu_to_le16(pbclen);
+ hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
+ /*
+ * Third packet
+ * This is the first packet in the sequence that has
+ * a "static" size that can be used for the rest of
+ * the packets (besides the last one).
+ */
+ if (unlikely(req->seqnum == 2)) {
+ /*
+ * From this point on the lengths in both the
+ * PBC and LRH are the same until the last
+ * packet.
+ * Adjust the template so we don't have to update
+ * every packet
+ */
+ req->hdr.pbc[0] = hdr->pbc[0];
+ req->hdr.lrh[2] = hdr->lrh[2];
+ }
+ }
+ /*
+ * We only have to modify the header if this is not the
+ * first packet in the request. Otherwise, we use the
+ * header given to us.
+ */
+ if (unlikely(!req->seqnum)) {
+ ret = check_header_template(req, hdr, lrhlen, datalen);
+ if (ret)
+ return ret;
+ goto done;
+ }
+
+ hdr->bth[2] = cpu_to_be32(
+ set_pkt_bth_psn(hdr->bth[2],
+ (req_opcode(req->info.ctrl) == EXPECTED),
+ req->seqnum));
+
+ /* Set ACK request on last packet */
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
+ hdr->bth[2] |= cpu_to_be32(1UL << 31);
+
+ /* Set the new offset */
+ hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
+ /* Expected packets have to fill in the new TID information */
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ tidval = req->tids[req->tididx];
+ /*
+ * If the offset puts us at the end of the current TID,
+ * advance everything.
+ */
+ if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
+ PAGE_SIZE)) {
+ req->tidoffset = 0;
+ /*
+ * Since we don't copy all the TIDs, all at once,
+ * we have to check again.
+ */
+ if (++req->tididx > req->n_tids - 1 ||
+ !req->tids[req->tididx]) {
+ return -EINVAL;
+ }
+ tidval = req->tids[req->tididx];
+ }
+ omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
+ KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
+ KDETH_OM_SMALL_SHIFT;
+ /* Set KDETH.TIDCtrl based on value for this TID. */
+ KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
+ EXP_TID_GET(tidval, CTRL));
+ /* Set KDETH.TID based on value for this TID */
+ KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
+ EXP_TID_GET(tidval, IDX));
+ /* Clear KDETH.SH when DISABLE_SH flag is set */
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
+ KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
+ /*
+ * Set the KDETH.OFFSET and KDETH.OM based on size of
+ * transfer.
+ */
+ trace_hfi1_sdma_user_tid_info(
+ pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
+ req->tidoffset, req->tidoffset >> omfactor,
+ omfactor != KDETH_OM_SMALL_SHIFT);
+ KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
+ req->tidoffset >> omfactor);
+ KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
+ omfactor != KDETH_OM_SMALL_SHIFT);
+ }
+done:
+ trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
+ req->info.comp_idx, hdr, tidval);
+ return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
+}
+
+static int set_txreq_header_ahg(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 datalen)
+{
+ u32 ahg[AHG_KDETH_ARRAY_SIZE];
+ int idx = 0;
+ u8 omfactor; /* KDETH.OM */
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct hfi1_pkt_header *hdr = &req->hdr;
+ u16 pbclen = le16_to_cpu(hdr->pbc[0]);
+ u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
+ size_t array_size = ARRAY_SIZE(ahg);
+
+ if (PBC2LRH(pbclen) != lrhlen) {
+ /* PBC.PbcLengthDWs */
+ idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
+ (__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
+ if (idx < 0)
+ return idx;
+ /* LRH.PktLen (we need the full 16 bits due to byte swap) */
+ idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
+ (__force u16)cpu_to_be16(lrhlen >> 2));
+ if (idx < 0)
+ return idx;
+ }
+
+ /*
+ * Do the common updates
+ */
+ /* BTH.PSN and BTH.A */
+ val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
+ (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
+ val32 |= 1UL << 31;
+ idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
+ (__force u16)cpu_to_be16(val32 >> 16));
+ if (idx < 0)
+ return idx;
+ idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
+ (__force u16)cpu_to_be16(val32 & 0xffff));
+ if (idx < 0)
+ return idx;
+ /* KDETH.Offset */
+ idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
+ (__force u16)cpu_to_le16(req->koffset & 0xffff));
+ if (idx < 0)
+ return idx;
+ idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
+ (__force u16)cpu_to_le16(req->koffset >> 16));
+ if (idx < 0)
+ return idx;
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ __le16 val;
+
+ tidval = req->tids[req->tididx];
+
+ /*
+ * If the offset puts us at the end of the current TID,
+ * advance everything.
+ */
+ if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
+ PAGE_SIZE)) {
+ req->tidoffset = 0;
+ /*
+ * Since we don't copy all the TIDs, all at once,
+ * we have to check again.
+ */
+ if (++req->tididx > req->n_tids - 1 ||
+ !req->tids[req->tididx])
+ return -EINVAL;
+ tidval = req->tids[req->tididx];
+ }
+ omfactor = ((EXP_TID_GET(tidval, LEN) *
+ PAGE_SIZE) >=
+ KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
+ KDETH_OM_SMALL_SHIFT;
+ /* KDETH.OM and KDETH.OFFSET (TID) */
+ idx = ahg_header_set(
+ ahg, idx, array_size, 7, 0, 16,
+ ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
+ ((req->tidoffset >> omfactor)
+ & 0x7fff)));
+ if (idx < 0)
+ return idx;
+ /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
+ val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
+ (EXP_TID_GET(tidval, IDX) & 0x3ff));
+
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
+ val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT));
+ } else {
+ val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
+ cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
+ cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT));
+ }
+
+ idx = ahg_header_set(ahg, idx, array_size,
+ 7, 16, 14, (__force u16)val);
+ if (idx < 0)
+ return idx;
+ }
+
+ trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
+ req->info.comp_idx, req->sde->this_idx,
+ req->ahg_idx, ahg, idx, tidval);
+ sdma_txinit_ahg(&tx->txreq,
+ SDMA_TXREQ_F_USE_AHG,
+ datalen, req->ahg_idx, idx,
+ ahg, sizeof(req->hdr),
+ user_sdma_txreq_cb);
+
+ return idx;
+}
+
+/**
+ * user_sdma_txreq_cb() - SDMA tx request completion callback.
+ * @txreq: valid sdma tx request
+ * @status: success/failure of request
+ *
+ * Called when the SDMA progress state machine gets notification that
+ * the SDMA descriptors for this tx request have been processed by the
+ * DMA engine. Called in interrupt context.
+ * Only do work on completed sequences.
+ */
+static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+{
+ struct user_sdma_txreq *tx =
+ container_of(txreq, struct user_sdma_txreq, txreq);
+ struct user_sdma_request *req;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq;
+ enum hfi1_sdma_comp_state state = COMPLETE;
+
+ if (!tx->req)
+ return;
+
+ req = tx->req;
+ pq = req->pq;
+ cq = req->cq;
+
+ if (status != SDMA_TXREQ_S_OK) {
+ SDMA_DBG(req, "SDMA completion with error %d",
+ status);
+ WRITE_ONCE(req->has_error, 1);
+ state = ERROR;
+ }
+
+ req->seqcomp = tx->seqnum;
+ kmem_cache_free(pq->txreq_cache, tx);
+
+ /* sequence isn't complete? We are done */
+ if (req->seqcomp != req->info.npkts - 1)
+ return;
+
+ user_sdma_free_request(req);
+ set_comp_state(pq, cq, req->info.comp_idx, state, status);
+ pq_update(pq);
+}
+
+static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
+{
+ if (atomic_dec_and_test(&pq->n_reqs))
+ wake_up(&pq->wait);
+}
+
+static void user_sdma_free_request(struct user_sdma_request *req)
+{
+ if (!list_empty(&req->txps)) {
+ struct sdma_txreq *t, *p;
+
+ list_for_each_entry_safe(t, p, &req->txps, list) {
+ struct user_sdma_txreq *tx =
+ container_of(t, struct user_sdma_txreq, txreq);
+ list_del_init(&t->list);
+ sdma_txclean(req->pq->dd, t);
+ kmem_cache_free(req->pq->txreq_cache, tx);
+ }
+ }
+
+ kfree(req->tids);
+ clear_bit(req->info.comp_idx, req->pq->req_in_use);
+}
+
+static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
+ struct hfi1_user_sdma_comp_q *cq,
+ u16 idx, enum hfi1_sdma_comp_state state,
+ int ret)
+{
+ if (state == ERROR)
+ cq->comps[idx].errcode = -ret;
+ smp_wmb(); /* make sure errcode is visible first */
+ cq->comps[idx].status = state;
+ trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
+ idx, state, ret);
+}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
new file mode 100644
index 000000000000..8735524e3a9a
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2023 - Cornelis Networks, Inc.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+#ifndef _HFI1_USER_SDMA_H
+#define _HFI1_USER_SDMA_H
+
+#include <linux/device.h>
+#include <linux/wait.h>
+
+#include "common.h"
+#include "iowait.h"
+#include "user_exp_rcv.h"
+#include "mmu_rb.h"
+#include "pinning.h"
+#include "sdma.h"
+
+/* The maximum number of Data io vectors per message/request */
+#define MAX_VECTORS_PER_REQ 8
+/*
+ * Maximum number of packet to send from each message/request
+ * before moving to the next one.
+ */
+#define MAX_PKTS_PER_QUEUE 16
+
+#define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
+
+#define req_opcode(x) \
+ (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
+#define req_version(x) \
+ (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
+#define req_iovcnt(x) \
+ (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
+
+/* Number of BTH.PSN bits used for sequence number in expected rcvs */
+#define BTH_SEQ_MASK 0x7ffull
+
+#define AHG_KDETH_INTR_SHIFT 12
+#define AHG_KDETH_SH_SHIFT 13
+#define AHG_KDETH_ARRAY_SIZE 9
+
+#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
+#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
+
+/**
+ * Build an SDMA AHG header update descriptor and save it to an array.
+ * @arr - Array to save the descriptor to.
+ * @idx - Index of the array at which the descriptor will be saved.
+ * @array_size - Size of the array arr.
+ * @dw - Update index into the header in DWs.
+ * @bit - Start bit.
+ * @width - Field width.
+ * @value - 16 bits of immediate data to write into the field.
+ * Returns -ERANGE if idx is invalid. If successful, returns the next index
+ * (idx + 1) of the array to be used for the next descriptor.
+ */
+static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
+ u8 dw, u8 bit, u8 width, u16 value)
+{
+ if ((size_t)idx >= array_size)
+ return -ERANGE;
+ arr[idx++] = sdma_build_ahg_descriptor(value, dw, bit, width);
+ return idx;
+}
+
+/* Tx request flag bits */
+#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
+#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
+
+enum pkt_q_sdma_state {
+ SDMA_PKT_Q_ACTIVE,
+ SDMA_PKT_Q_DEFERRED,
+};
+
+#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
+
+#define SDMA_DBG(req, fmt, ...) \
+ hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
+ (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
+ ##__VA_ARGS__)
+
+struct hfi1_user_sdma_pkt_q {
+ u16 ctxt;
+ u16 subctxt;
+ u16 n_max_reqs;
+ atomic_t n_reqs;
+ u16 reqidx;
+ struct hfi1_devdata *dd;
+ struct kmem_cache *txreq_cache;
+ struct user_sdma_request *reqs;
+ unsigned long *req_in_use;
+ struct iowait busy;
+ enum pkt_q_sdma_state state;
+ wait_queue_head_t wait;
+ unsigned long unpinned;
+ struct mmu_rb_handler *handler;
+ atomic_t n_locked;
+};
+
+struct hfi1_user_sdma_comp_q {
+ u16 nentries;
+ struct hfi1_sdma_comp_entry *comps;
+};
+
+struct user_sdma_iovec {
+ struct list_head list;
+ struct iovec iov;
+ /*
+ * offset into the virtual address space of the vector at
+ * which we last left off.
+ */
+ u64 offset;
+};
+
+/* evict operation argument */
+struct evict_data {
+ u32 cleared; /* count evicted so far */
+ u32 target; /* target count to evict */
+};
+
+struct user_sdma_request {
+ /* This is the original header from user space */
+ struct hfi1_pkt_header hdr;
+
+ /* Read mostly fields */
+ struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp;
+ struct hfi1_user_sdma_comp_q *cq;
+ /*
+ * Pointer to the SDMA engine for this request.
+ * Since different request could be on different VLs,
+ * each request will need it's own engine pointer.
+ */
+ struct sdma_engine *sde;
+ struct sdma_req_info info;
+ /* TID array values copied from the tid_iov vector */
+ u32 *tids;
+ /* total length of the data in the request */
+ u32 data_len;
+ /* number of elements copied to the tids array */
+ u16 n_tids;
+ /*
+ * We copy the iovs for this request (based on
+ * info.iovcnt). These are only the data vectors
+ */
+ u8 data_iovs;
+ s8 ahg_idx;
+
+ /* Writeable fields shared with interrupt */
+ u16 seqcomp ____cacheline_aligned_in_smp;
+ u16 seqsubmitted;
+
+ /* Send side fields */
+ struct list_head txps ____cacheline_aligned_in_smp;
+ u16 seqnum;
+ /*
+ * KDETH.OFFSET (TID) field
+ * The offset can cover multiple packets, depending on the
+ * size of the TID entry.
+ */
+ u32 tidoffset;
+ /*
+ * KDETH.Offset (Eager) field
+ * We need to remember the initial value so the headers
+ * can be updated properly.
+ */
+ u32 koffset;
+ u32 sent;
+ /* TID index copied from the tid_iov vector */
+ u16 tididx;
+ /* progress index moving along the iovs array */
+ u8 iov_idx;
+ u8 has_error;
+
+ struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
+} ____cacheline_aligned_in_smp;
+
+/*
+ * A single txreq could span up to 3 physical pages when the MTU
+ * is sufficiently large (> 4K). Each of the IOV pointers also
+ * needs it's own set of flags so the vector has been handled
+ * independently of each other.
+ */
+struct user_sdma_txreq {
+ /* Packet header for the txreq */
+ struct hfi1_pkt_header hdr;
+ struct sdma_txreq txreq;
+ struct list_head list;
+ struct user_sdma_request *req;
+ u16 flags;
+ u16 seqnum;
+};
+
+int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
+ struct hfi1_filedata *fd);
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
+int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ struct iovec *iovec, unsigned long dim,
+ unsigned long *count);
+#endif /* _HFI1_USER_SDMA_H */
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
new file mode 100644
index 000000000000..3cbbfccdd8cd
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -0,0 +1,1948 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2015 - 2020 Intel Corporation.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/utsname.h>
+#include <linux/rculist.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <rdma/opa_addr.h>
+#include <linux/nospec.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "device.h"
+#include "trace.h"
+#include "qp.h"
+#include "verbs_txreq.h"
+#include "debugfs.h"
+#include "vnic.h"
+#include "fault.h"
+#include "affinity.h"
+#include "ipoib.h"
+
+static unsigned int hfi1_lkey_table_size = 16;
+module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(lkey_table_size,
+ "LKEY table size in bits (2^n, 1 <= n <= 23)");
+
+static unsigned int hfi1_max_pds = 0xFFFF;
+module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
+MODULE_PARM_DESC(max_pds,
+ "Maximum number of protection domains to support");
+
+static unsigned int hfi1_max_ahs = 0xFFFF;
+module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
+
+unsigned int hfi1_max_cqes = 0x2FFFFF;
+module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqes,
+ "Maximum number of completion queue entries to support");
+
+unsigned int hfi1_max_cqs = 0x1FFFF;
+module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
+
+unsigned int hfi1_max_qp_wrs = 0x3FFF;
+module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+
+unsigned int hfi1_max_qps = 32768;
+module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
+unsigned int hfi1_max_sges = 0x60;
+module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
+
+unsigned int hfi1_max_mcast_grps = 16384;
+module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_grps,
+ "Maximum number of multicast groups to support");
+
+unsigned int hfi1_max_mcast_qp_attached = 16;
+module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
+ uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_qp_attached,
+ "Maximum number of attached QPs to support");
+
+unsigned int hfi1_max_srqs = 1024;
+module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
+
+unsigned int hfi1_max_srq_sges = 128;
+module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
+
+unsigned int hfi1_max_srq_wrs = 0x1FFFF;
+module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+
+unsigned short piothreshold = 256;
+module_param(piothreshold, ushort, S_IRUGO);
+MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
+
+static unsigned int sge_copy_mode;
+module_param(sge_copy_mode, uint, S_IRUGO);
+MODULE_PARM_DESC(sge_copy_mode,
+ "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
+
+static void verbs_sdma_complete(
+ struct sdma_txreq *cookie,
+ int status);
+
+static int pio_wait(struct rvt_qp *qp,
+ struct send_context *sc,
+ struct hfi1_pkt_state *ps,
+ u32 flag);
+
+/* Length of buffer to create verbs txreq cache name */
+#define TXREQ_NAME_LEN 24
+
+static uint wss_threshold = 80;
+module_param(wss_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
+static uint wss_clean_period = 256;
+module_param(wss_clean_period, uint, S_IRUGO);
+MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
+
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_TID_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_TID_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+ [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+ [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [IB_WR_REG_MR] = IB_WC_REG_MR
+};
+
+/*
+ * Length of header by opcode, 0 --> not supported
+ */
+const u8 hdr_len_by_opcode[256] = {
+ /* RC */
+ [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
+ [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
+ [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
+ [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
+ [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
+ [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
+ [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8,
+ [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
+ [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
+ [IB_OPCODE_TID_RDMA_READ_REQ] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_READ_RESP] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_REQ] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_RESP] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_ACK] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_RESYNC] = 12 + 8 + 36,
+ /* UC */
+ [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
+ [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
+ [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
+ [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
+ [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
+ [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
+ /* UD */
+ [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
+ [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
+};
+
+static const opcode_handler opcode_handler_tbl[256] = {
+ /* RC */
+ [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
+
+ /* TID RDMA has separate handlers for different opcodes.*/
+ [IB_OPCODE_TID_RDMA_WRITE_REQ] = &hfi1_rc_rcv_tid_rdma_write_req,
+ [IB_OPCODE_TID_RDMA_WRITE_RESP] = &hfi1_rc_rcv_tid_rdma_write_resp,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA] = &hfi1_rc_rcv_tid_rdma_write_data,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = &hfi1_rc_rcv_tid_rdma_write_data,
+ [IB_OPCODE_TID_RDMA_READ_REQ] = &hfi1_rc_rcv_tid_rdma_read_req,
+ [IB_OPCODE_TID_RDMA_READ_RESP] = &hfi1_rc_rcv_tid_rdma_read_resp,
+ [IB_OPCODE_TID_RDMA_RESYNC] = &hfi1_rc_rcv_tid_rdma_resync,
+ [IB_OPCODE_TID_RDMA_ACK] = &hfi1_rc_rcv_tid_rdma_ack,
+
+ /* UC */
+ [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ /* UD */
+ [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
+ [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
+ /* CNP */
+ [IB_OPCODE_CNP] = &hfi1_cnp_rcv
+};
+
+#define OPMASK 0x1f
+
+static const u32 pio_opmask[BIT(3)] = {
+ /* RC */
+ [IB_OPCODE_RC >> 5] =
+ BIT(RC_OP(SEND_ONLY) & OPMASK) |
+ BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
+ BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) |
+ BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) |
+ BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) |
+ BIT(RC_OP(ACKNOWLEDGE) & OPMASK) |
+ BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) |
+ BIT(RC_OP(COMPARE_SWAP) & OPMASK) |
+ BIT(RC_OP(FETCH_ADD) & OPMASK),
+ /* UC */
+ [IB_OPCODE_UC >> 5] =
+ BIT(UC_OP(SEND_ONLY) & OPMASK) |
+ BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
+ BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) |
+ BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK),
+};
+
+/*
+ * System image GUID.
+ */
+__be64 ib_hfi1_sys_image_guid;
+
+/*
+ * Make sure the QP is ready and able to accept the given opcode.
+ */
+static inline opcode_handler qp_ok(struct hfi1_packet *packet)
+{
+ if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
+ return NULL;
+ if (((packet->opcode & RVT_OPCODE_QP_MASK) ==
+ packet->qp->allowed_ops) ||
+ (packet->opcode == IB_OPCODE_CNP))
+ return opcode_handler_tbl[packet->opcode];
+
+ return NULL;
+}
+
+static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
+{
+#ifdef CONFIG_FAULT_INJECTION
+ if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) {
+ /*
+ * In order to drop non-IB traffic we
+ * set PbcInsertHrc to NONE (0x2).
+ * The packet will still be delivered
+ * to the receiving node but a
+ * KHdrHCRCErr (KDETH packet with a bad
+ * HCRC) will be triggered and the
+ * packet will not be delivered to the
+ * correct context.
+ */
+ pbc &= ~PBC_INSERT_HCRC_SMASK;
+ pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
+ } else {
+ /*
+ * In order to drop regular verbs
+ * traffic we set the PbcTestEbp
+ * flag. The packet will still be
+ * delivered to the receiving node but
+ * a 'late ebp error' will be
+ * triggered and will be dropped.
+ */
+ pbc |= PBC_TEST_EBP;
+ }
+#endif
+ return pbc;
+}
+
+static opcode_handler tid_qp_ok(int opcode, struct hfi1_packet *packet)
+{
+ if (packet->qp->ibqp.qp_type != IB_QPT_RC ||
+ !(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
+ return NULL;
+ if ((opcode & RVT_OPCODE_QP_MASK) == IB_OPCODE_TID_RDMA)
+ return opcode_handler_tbl[opcode];
+ return NULL;
+}
+
+void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct ib_header *hdr = packet->hdr;
+ u32 tlen = packet->tlen;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+ opcode_handler opcode_handler;
+ unsigned long flags;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+
+ /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
+ if (unlikely(tlen < 15 * sizeof(u32)))
+ goto drop;
+
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh != HFI1_LRH_BTH)
+ goto drop;
+
+ packet->ohdr = &hdr->u.oth;
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+
+ opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
+ inc_opstats(tlen, &rcd->opstats->stats[opcode]);
+
+ /* verbs_qp can be picked up from any tid_rdma header struct */
+ qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) &
+ RVT_QPN_MASK;
+
+ rcu_read_lock();
+ packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!packet->qp)
+ goto drop_rcu;
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ opcode_handler = tid_qp_ok(opcode, packet);
+ if (likely(opcode_handler))
+ opcode_handler(packet);
+ else
+ goto drop_unlock;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ rcu_read_unlock();
+
+ return;
+drop_unlock:
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+drop_rcu:
+ rcu_read_unlock();
+drop:
+ ibp->rvp.n_pkt_drops++;
+}
+
+void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct ib_header *hdr = packet->hdr;
+ u32 tlen = packet->tlen;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+ opcode_handler opcode_handler;
+ unsigned long flags;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+
+ /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
+ if (unlikely(tlen < 15 * sizeof(u32)))
+ goto drop;
+
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh != HFI1_LRH_BTH)
+ goto drop;
+
+ packet->ohdr = &hdr->u.oth;
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+
+ opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
+ inc_opstats(tlen, &rcd->opstats->stats[opcode]);
+
+ /* verbs_qp can be picked up from any tid_rdma header struct */
+ qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) &
+ RVT_QPN_MASK;
+
+ rcu_read_lock();
+ packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!packet->qp)
+ goto drop_rcu;
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ opcode_handler = tid_qp_ok(opcode, packet);
+ if (likely(opcode_handler))
+ opcode_handler(packet);
+ else
+ goto drop_unlock;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ rcu_read_unlock();
+
+ return;
+drop_unlock:
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+drop_rcu:
+ rcu_read_unlock();
+drop:
+ ibp->rvp.n_pkt_drops++;
+}
+
+static int hfi1_do_pkey_check(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_16b_header *hdr = packet->hdr;
+ u16 pkey;
+
+ /* Pkey check needed only for bypass packets */
+ if (packet->etype != RHF_RCV_TYPE_BYPASS)
+ return 0;
+
+ /* Perform pkey check */
+ pkey = hfi1_16B_get_pkey(hdr);
+ return ingress_pkey_check(ppd, pkey, packet->sc,
+ packet->qp->s_pkey_index,
+ packet->slid, true);
+}
+
+static inline void hfi1_handle_packet(struct hfi1_packet *packet,
+ bool is_mcast)
+{
+ u32 qp_num;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+ opcode_handler packet_handler;
+ unsigned long flags;
+
+ inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]);
+
+ if (unlikely(is_mcast)) {
+ struct rvt_mcast *mcast;
+ struct rvt_mcast_qp *p;
+
+ if (!packet->grh)
+ goto drop;
+ mcast = rvt_mcast_find(&ibp->rvp,
+ &packet->grh->dgid,
+ opa_get_lid(packet->dlid, 9B));
+ if (!mcast)
+ goto drop;
+ rcu_read_lock();
+ list_for_each_entry_rcu(p, &mcast->qp_list, list) {
+ packet->qp = p->qp;
+ if (hfi1_do_pkey_check(packet))
+ goto unlock_drop;
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ packet_handler = qp_ok(packet);
+ if (likely(packet_handler))
+ packet_handler(packet);
+ else
+ ibp->rvp.n_pkt_drops++;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ }
+ rcu_read_unlock();
+ /*
+ * Notify rvt_multicast_detach() if it is waiting for us
+ * to finish.
+ */
+ if (atomic_dec_return(&mcast->refcount) <= 1)
+ wake_up(&mcast->wait);
+ } else {
+ /* Get the destination QP number. */
+ if (packet->etype == RHF_RCV_TYPE_BYPASS &&
+ hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM)
+ qp_num = hfi1_16B_get_dest_qpn(packet->mgmt);
+ else
+ qp_num = ib_bth_get_qpn(packet->ohdr);
+
+ rcu_read_lock();
+ packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!packet->qp)
+ goto unlock_drop;
+
+ if (hfi1_do_pkey_check(packet))
+ goto unlock_drop;
+
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ packet_handler = qp_ok(packet);
+ if (likely(packet_handler))
+ packet_handler(packet);
+ else
+ ibp->rvp.n_pkt_drops++;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ rcu_read_unlock();
+ }
+ return;
+unlock_drop:
+ rcu_read_unlock();
+drop:
+ ibp->rvp.n_pkt_drops++;
+}
+
+/**
+ * hfi1_ib_rcv - process an incoming packet
+ * @packet: data packet information
+ *
+ * This is called to process an incoming packet at interrupt level.
+ */
+void hfi1_ib_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+ hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
+}
+
+void hfi1_16B_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ trace_input_ibhdr(rcd->dd, packet, false);
+ hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
+}
+
+/*
+ * This is called from a timer to check for QPs
+ * which need kernel memory in order to send a packet.
+ */
+static void mem_timer(struct timer_list *t)
+{
+ struct hfi1_ibdev *dev = timer_container_of(dev, t, mem_timer);
+ struct list_head *list = &dev->memwait;
+ struct rvt_qp *qp = NULL;
+ struct iowait *wait;
+ unsigned long flags;
+ struct hfi1_qp_priv *priv;
+
+ write_seqlock_irqsave(&dev->iowait_lock, flags);
+ if (!list_empty(list)) {
+ wait = list_first_entry(list, struct iowait, list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ /* refcount held until actual wake up */
+ if (!list_empty(list))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ }
+ write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+
+ if (qp)
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
+}
+
+/*
+ * This is called with progress side lock held.
+ */
+/* New API */
+static void verbs_sdma_complete(
+ struct sdma_txreq *cookie,
+ int status)
+{
+ struct verbs_txreq *tx =
+ container_of(cookie, struct verbs_txreq, txreq);
+ struct rvt_qp *qp = tx->qp;
+
+ spin_lock(&qp->s_lock);
+ if (tx->wqe) {
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ struct hfi1_opa_header *hdr;
+
+ hdr = &tx->phdr.hdr;
+ if (unlikely(status == SDMA_TXREQ_S_ABORTED))
+ hfi1_rc_verbs_aborted(qp, hdr);
+ hfi1_rc_send_complete(qp, hdr);
+ }
+ spin_unlock(&qp->s_lock);
+
+ hfi1_put_txreq(tx);
+}
+
+void hfi1_wait_kmem(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct ib_device *ibdev = ibqp->device;
+ struct hfi1_ibdev *dev = to_idev(ibdev);
+
+ if (list_empty(&priv->s_iowait.list)) {
+ if (list_empty(&dev->memwait))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ qp->s_flags |= RVT_S_WAIT_KMEM;
+ list_add_tail(&priv->s_iowait.list, &dev->memwait);
+ priv->s_iowait.lock = &dev->iowait_lock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
+ rvt_get_qp(qp);
+ }
+}
+
+static int wait_kmem(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ write_seqlock(&dev->iowait_lock);
+ list_add_tail(&ps->s_txreq->txreq.list,
+ &ps->wait->tx_head);
+ hfi1_wait_kmem(qp);
+ write_sequnlock(&dev->iowait_lock);
+ hfi1_qp_unbusy(qp, ps->wait);
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return ret;
+}
+
+/*
+ * This routine calls txadds for each sg entry.
+ *
+ * Add failures will revert the sge cursor
+ */
+static noinline int build_verbs_ulp_payload(
+ struct sdma_engine *sde,
+ u32 length,
+ struct verbs_txreq *tx)
+{
+ struct rvt_sge_state *ss = tx->ss;
+ struct rvt_sge *sg_list = ss->sg_list;
+ struct rvt_sge sge = ss->sge;
+ u8 num_sge = ss->num_sge;
+ u32 len;
+ int ret = 0;
+
+ while (length) {
+ len = rvt_get_sge_length(&ss->sge, length);
+ WARN_ON_ONCE(len == 0);
+ ret = sdma_txadd_kvaddr(
+ sde->dd,
+ &tx->txreq,
+ ss->sge.vaddr,
+ len);
+ if (ret)
+ goto bail_txadd;
+ rvt_update_sge(ss, len, false);
+ length -= len;
+ }
+ return ret;
+bail_txadd:
+ /* unwind cursor */
+ ss->sge = sge;
+ ss->num_sge = num_sge;
+ ss->sg_list = sg_list;
+ return ret;
+}
+
+/**
+ * update_tx_opstats - record stats by opcode
+ * @qp: the qp
+ * @ps: transmit packet state
+ * @plen: the plen in dwords
+ *
+ * This is a routine to record the tx opstats after a
+ * packet has been presented to the egress mechanism.
+ */
+static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ u32 plen)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
+
+ inc_opstats(plen * 4, &s->stats[ps->opcode]);
+ put_cpu_ptr(s);
+#endif
+}
+
+/*
+ * Build the number of DMA descriptors needed to send length bytes of data.
+ *
+ * NOTE: DMA mapping is held in the tx until completed in the ring or
+ * the tx desc is freed without having been submitted to the ring
+ *
+ * This routine ensures all the helper routine calls succeed.
+ */
+/* New API */
+static int build_verbs_tx_desc(
+ struct sdma_engine *sde,
+ u32 length,
+ struct verbs_txreq *tx,
+ struct hfi1_ahg_info *ahg_info,
+ u64 pbc)
+{
+ int ret = 0;
+ struct hfi1_sdma_header *phdr = &tx->phdr;
+ u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2;
+ u8 extra_bytes = 0;
+
+ if (tx->phdr.hdr.hdr_type) {
+ /*
+ * hdrbytes accounts for PBC. Need to subtract 8 bytes
+ * before calculating padding.
+ */
+ extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
+ (SIZE_OF_CRC << 2) + SIZE_OF_LT;
+ }
+ if (!ahg_info->ahgcount) {
+ ret = sdma_txinit_ahg(
+ &tx->txreq,
+ ahg_info->tx_flags,
+ hdrbytes + length +
+ extra_bytes,
+ ahg_info->ahgidx,
+ 0,
+ NULL,
+ 0,
+ verbs_sdma_complete);
+ if (ret)
+ goto bail_txadd;
+ phdr->pbc = cpu_to_le64(pbc);
+ ret = sdma_txadd_kvaddr(
+ sde->dd,
+ &tx->txreq,
+ phdr,
+ hdrbytes);
+ if (ret)
+ goto bail_txadd;
+ } else {
+ ret = sdma_txinit_ahg(
+ &tx->txreq,
+ ahg_info->tx_flags,
+ length,
+ ahg_info->ahgidx,
+ ahg_info->ahgcount,
+ ahg_info->ahgdesc,
+ hdrbytes,
+ verbs_sdma_complete);
+ if (ret)
+ goto bail_txadd;
+ }
+ /* add the ulp payload - if any. tx->ss can be NULL for acks */
+ if (tx->ss) {
+ ret = build_verbs_ulp_payload(sde, length, tx);
+ if (ret)
+ goto bail_txadd;
+ }
+
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+ ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
+ extra_bytes);
+
+bail_txadd:
+ return ret;
+}
+
+static u64 update_hcrc(u8 opcode, u64 pbc)
+{
+ if ((opcode & IB_OPCODE_TID_RDMA) == IB_OPCODE_TID_RDMA) {
+ pbc &= ~PBC_INSERT_HCRC_SMASK;
+ pbc |= (u64)PBC_IHCRC_LKDETH << PBC_INSERT_HCRC_SHIFT;
+ }
+ return pbc;
+}
+
+int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ahg_info *ahg_info = priv->s_ahg;
+ u32 hdrwords = ps->s_txreq->hdr_dwords;
+ u32 len = ps->s_txreq->s_cur_size;
+ u32 plen;
+ struct hfi1_ibdev *dev = ps->dev;
+ struct hfi1_pportdata *ppd = ps->ppd;
+ struct verbs_txreq *tx;
+ u8 sc5 = priv->s_sc;
+ int ret;
+ u32 dwords;
+
+ if (ps->s_txreq->phdr.hdr.hdr_type) {
+ u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
+
+ dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
+ SIZE_OF_LT) >> 2;
+ } else {
+ dwords = (len + 3) >> 2;
+ }
+ plen = hdrwords + dwords + sizeof(pbc) / 4;
+
+ tx = ps->s_txreq;
+ if (!sdma_txreq_built(&tx->txreq)) {
+ if (likely(pbc == 0)) {
+ u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
+
+ /* No vl15 here */
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
+ if (ps->s_txreq->phdr.hdr.hdr_type)
+ pbc |= PBC_PACKET_BYPASS |
+ PBC_INSERT_BYPASS_ICRC;
+ else
+ pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+
+ pbc = create_pbc(ppd,
+ pbc,
+ qp->srate_mbps,
+ vl,
+ plen);
+
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
+ pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
+ else
+ /* Update HCRC based on packet opcode */
+ pbc = update_hcrc(ps->opcode, pbc);
+ }
+ tx->wqe = qp->s_wqe;
+ ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
+ if (unlikely(ret))
+ goto bail_build;
+ }
+ ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent);
+ if (unlikely(ret < 0)) {
+ if (ret == -ECOMM)
+ goto bail_ecomm;
+ return ret;
+ }
+
+ update_tx_opstats(qp, ps, plen);
+ trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
+ &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
+ return ret;
+
+bail_ecomm:
+ /* The current one got "sent" */
+ return 0;
+bail_build:
+ ret = wait_kmem(dev, qp, ps);
+ if (!ret) {
+ /* free txreq - bad state */
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ }
+ return ret;
+}
+
+/*
+ * If we are now in the error state, return zero to flush the
+ * send work request.
+ */
+static int pio_wait(struct rvt_qp *qp,
+ struct send_context *sc,
+ struct hfi1_pkt_state *ps,
+ u32 flag)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_devdata *dd = sc->dd;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Note that as soon as want_buffer() is called and
+ * possibly before it returns, sc_piobufavail()
+ * could be called. Therefore, put QP on the I/O wait list before
+ * enabling the PIO avail interrupt.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ write_seqlock(&sc->waitlock);
+ list_add_tail(&ps->s_txreq->txreq.list,
+ &ps->wait->tx_head);
+ if (list_empty(&priv->s_iowait.list)) {
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ int was_empty;
+
+ dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
+ dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN);
+ qp->s_flags |= flag;
+ was_empty = list_empty(&sc->piowait);
+ iowait_get_priority(&priv->s_iowait);
+ iowait_queue(ps->pkts_sent, &priv->s_iowait,
+ &sc->piowait);
+ priv->s_iowait.lock = &sc->waitlock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
+ rvt_get_qp(qp);
+ /* counting: only call wantpiobuf_intr if first user */
+ if (was_empty)
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
+ write_sequnlock(&sc->waitlock);
+ hfi1_qp_unbusy(qp, ps->wait);
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+static void verbs_pio_complete(void *arg, int code)
+{
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (iowait_pio_dec(&priv->s_iowait))
+ iowait_drain_wakeup(&priv->s_iowait);
+}
+
+int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ u32 hdrwords = ps->s_txreq->hdr_dwords;
+ struct rvt_sge_state *ss = ps->s_txreq->ss;
+ u32 len = ps->s_txreq->s_cur_size;
+ u32 dwords;
+ u32 plen;
+ struct hfi1_pportdata *ppd = ps->ppd;
+ u32 *hdr;
+ u8 sc5;
+ unsigned long flags = 0;
+ struct send_context *sc;
+ struct pio_buf *pbuf;
+ int wc_status = IB_WC_SUCCESS;
+ int ret = 0;
+ pio_release_cb cb = NULL;
+ u8 extra_bytes = 0;
+
+ if (ps->s_txreq->phdr.hdr.hdr_type) {
+ u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len);
+
+ extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
+ dwords = (len + extra_bytes) >> 2;
+ hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
+ } else {
+ dwords = (len + 3) >> 2;
+ hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
+ }
+ plen = hdrwords + dwords + sizeof(pbc) / 4;
+
+ /* only RC/UC use complete */
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ cb = verbs_pio_complete;
+ break;
+ default:
+ break;
+ }
+
+ /* vl15 special case taken care of in ud.c */
+ sc5 = priv->s_sc;
+ sc = ps->s_txreq->psc;
+
+ if (likely(pbc == 0)) {
+ u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
+
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
+ if (ps->s_txreq->phdr.hdr.hdr_type)
+ pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+ else
+ pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+
+ pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
+ pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
+ else
+ /* Update HCRC based on packet opcode */
+ pbc = update_hcrc(ps->opcode, pbc);
+ }
+ if (cb)
+ iowait_pio_inc(&priv->s_iowait);
+ pbuf = sc_buffer_alloc(sc, plen, cb, qp);
+ if (IS_ERR_OR_NULL(pbuf)) {
+ if (cb)
+ verbs_pio_complete(qp, 0);
+ if (IS_ERR(pbuf)) {
+ /*
+ * If we have filled the PIO buffers to capacity and are
+ * not in an active state this request is not going to
+ * go out to so just complete it with an error or else a
+ * ULP or the core may be stuck waiting.
+ */
+ hfi1_cdbg(
+ PIO,
+ "alloc failed. state not active, completing");
+ wc_status = IB_WC_GENERAL_ERR;
+ goto pio_bail;
+ } else {
+ /*
+ * This is a normal occurrence. The PIO buffs are full
+ * up but we are still happily sending, well we could be
+ * so lets continue to queue the request.
+ */
+ hfi1_cdbg(PIO, "alloc failed. state active, queuing");
+ ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
+ if (!ret)
+ /* txreq not queued - free */
+ goto bail;
+ /* tx consumed in wait */
+ return ret;
+ }
+ }
+
+ if (dwords == 0) {
+ pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
+ } else {
+ seg_pio_copy_start(pbuf, pbc,
+ hdr, hdrwords * 4);
+ if (ss) {
+ while (len) {
+ void *addr = ss->sge.vaddr;
+ u32 slen = rvt_get_sge_length(&ss->sge, len);
+
+ rvt_update_sge(ss, slen, false);
+ seg_pio_copy_mid(pbuf, addr, slen);
+ len -= slen;
+ }
+ }
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+ seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
+ extra_bytes);
+
+ seg_pio_copy_end(pbuf);
+ }
+
+ update_tx_opstats(qp, ps, plen);
+ trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
+ &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
+
+pio_bail:
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_wqe) {
+ rvt_send_complete(qp, qp->s_wqe, wc_status);
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ if (unlikely(wc_status == IB_WC_GENERAL_ERR))
+ hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr);
+ hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ ret = 0;
+
+bail:
+ hfi1_put_txreq(ps->s_txreq);
+ return ret;
+}
+
+/*
+ * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
+ * being an entry from the partition key table), return 0
+ * otherwise. Use the matching criteria for egress partition keys
+ * specified in the OPAv1 spec., section 9.1l.7.
+ */
+static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
+{
+ u16 mkey = pkey & PKEY_LOW_15_MASK;
+ u16 mentry = ent & PKEY_LOW_15_MASK;
+
+ if (mkey == mentry) {
+ /*
+ * If pkey[15] is set (full partition member),
+ * is bit 15 in the corresponding table element
+ * clear (limited member)?
+ */
+ if (pkey & PKEY_MEMBER_MASK)
+ return !!(ent & PKEY_MEMBER_MASK);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * egress_pkey_check - check P_KEY of a packet
+ * @ppd: Physical IB port data
+ * @slid: SLID for packet
+ * @pkey: PKEY for header
+ * @sc5: SC for packet
+ * @s_pkey_index: It will be used for look up optimization for kernel contexts
+ * only. If it is negative value, then it means user contexts is calling this
+ * function.
+ *
+ * It checks if hdr's pkey is valid.
+ *
+ * Return: 0 on success, otherwise, 1
+ */
+int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
+ u8 sc5, int8_t s_pkey_index)
+{
+ struct hfi1_devdata *dd;
+ int i;
+ int is_user_ctxt_mechanism = (s_pkey_index < 0);
+
+ if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
+ return 0;
+
+ /* If SC15, pkey[0:14] must be 0x7fff */
+ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
+ goto bad;
+
+ /* Is the pkey = 0x0, or 0x8000? */
+ if ((pkey & PKEY_LOW_15_MASK) == 0)
+ goto bad;
+
+ /*
+ * For the kernel contexts only, if a qp is passed into the function,
+ * the most likely matching pkey has index qp->s_pkey_index
+ */
+ if (!is_user_ctxt_mechanism &&
+ egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
+ return 0;
+ }
+
+ for (i = 0; i < MAX_PKEY_VALUES; i++) {
+ if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
+ return 0;
+ }
+bad:
+ /*
+ * For the user-context mechanism, the P_KEY check would only happen
+ * once per SDMA request, not once per packet. Therefore, there's no
+ * need to increment the counter for the user-context mechanism.
+ */
+ if (!is_user_ctxt_mechanism) {
+ incr_cntr64(&ppd->port_xmit_constraint_errors);
+ dd = ppd->dd;
+ if (!(dd->err_info_xmit_constraint.status &
+ OPA_EI_STATUS_SMASK)) {
+ dd->err_info_xmit_constraint.status |=
+ OPA_EI_STATUS_SMASK;
+ dd->err_info_xmit_constraint.slid = slid;
+ dd->err_info_xmit_constraint.pkey = pkey;
+ }
+ }
+ return 1;
+}
+
+/*
+ * get_send_routine - choose an egress routine
+ *
+ * Choose an egress routine based on QP type
+ * and size
+ */
+static inline send_routine get_send_routine(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct verbs_txreq *tx = ps->s_txreq;
+
+ if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
+ return dd->process_pio_send;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ return dd->process_pio_send;
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ break;
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ priv->s_running_pkt_size =
+ (tx->s_cur_size + priv->s_running_pkt_size) / 2;
+ if (piothreshold &&
+ priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) &&
+ (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
+ iowait_sdma_pending(&priv->s_iowait) == 0 &&
+ !sdma_txreq_built(&tx->txreq))
+ return dd->process_pio_send;
+ break;
+ default:
+ break;
+ }
+ return dd->process_dma_send;
+}
+
+/**
+ * hfi1_verbs_send - send a packet
+ * @qp: the QP to send on
+ * @ps: the state of the packet to send
+ *
+ * Return zero if packet is sent or queued OK.
+ * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
+ */
+int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_other_headers *ohdr = NULL;
+ send_routine sr;
+ int ret;
+ u16 pkey;
+ u32 slid;
+ u8 l4 = 0;
+
+ /* locate the pkey within the headers */
+ if (ps->s_txreq->phdr.hdr.hdr_type) {
+ struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
+
+ l4 = hfi1_16B_get_l4(hdr);
+ if (l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &hdr->u.oth;
+ else if (l4 == OPA_16B_L4_IB_GLOBAL)
+ ohdr = &hdr->u.l.oth;
+
+ slid = hfi1_16B_get_slid(hdr);
+ pkey = hfi1_16B_get_pkey(hdr);
+ } else {
+ struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh;
+ u8 lnh = ib_get_lnh(hdr);
+
+ if (lnh == HFI1_LRH_GRH)
+ ohdr = &hdr->u.l.oth;
+ else
+ ohdr = &hdr->u.oth;
+ slid = ib_get_slid(hdr);
+ pkey = ib_bth_get_pkey(ohdr);
+ }
+
+ if (likely(l4 != OPA_16B_L4_FM))
+ ps->opcode = ib_bth_get_opcode(ohdr);
+ else
+ ps->opcode = IB_OPCODE_UD_SEND_ONLY;
+
+ sr = get_send_routine(qp, ps);
+ ret = egress_pkey_check(dd->pport, slid, pkey,
+ priv->s_sc, qp->s_pkey_index);
+ if (unlikely(ret)) {
+ /*
+ * The value we are returning here does not get propagated to
+ * the verbs caller. Thus we need to complete the request with
+ * error otherwise the caller could be sitting waiting on the
+ * completion event. Only do this for PIO. SDMA has its own
+ * mechanism for handling the errors. So for SDMA we can just
+ * return.
+ */
+ if (sr == dd->process_pio_send) {
+ unsigned long flags;
+
+ hfi1_cdbg(PIO, "%s() Failed. Completing with err",
+ __func__);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ return -EINVAL;
+ }
+ if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
+ return pio_wait(qp,
+ ps->s_txreq->psc,
+ ps,
+ HFI1_S_WAIT_PIO_DRAIN);
+ return sr(qp, ps, 0);
+}
+
+/**
+ * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
+ * @dd: the device data structure
+ */
+static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
+{
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ u32 ver = dd->dc8051_ver;
+
+ memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
+
+ rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) |
+ ((u64)(dc8051_ver_min(ver)) << 16) |
+ (u64)dc8051_ver_patch(ver);
+
+ rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+ rdi->dparms.props.kernel_cap_flags = IBK_RDMA_NETDEV_OPA;
+ rdi->dparms.props.page_size_cap = PAGE_SIZE;
+ rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
+ rdi->dparms.props.vendor_part_id = dd->pcidev->device;
+ rdi->dparms.props.hw_ver = dd->minrev;
+ rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
+ rdi->dparms.props.max_mr_size = U64_MAX;
+ rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
+ rdi->dparms.props.max_qp = hfi1_max_qps;
+ rdi->dparms.props.max_qp_wr =
+ (hfi1_max_qp_wrs >= HFI1_QP_WQE_INVALID ?
+ HFI1_QP_WQE_INVALID - 1 : hfi1_max_qp_wrs);
+ rdi->dparms.props.max_send_sge = hfi1_max_sges;
+ rdi->dparms.props.max_recv_sge = hfi1_max_sges;
+ rdi->dparms.props.max_sge_rd = hfi1_max_sges;
+ rdi->dparms.props.max_cq = hfi1_max_cqs;
+ rdi->dparms.props.max_ah = hfi1_max_ahs;
+ rdi->dparms.props.max_cqe = hfi1_max_cqes;
+ rdi->dparms.props.max_pd = hfi1_max_pds;
+ rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
+ rdi->dparms.props.max_qp_init_rd_atom = 255;
+ rdi->dparms.props.max_srq = hfi1_max_srqs;
+ rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
+ rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
+ rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
+ rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
+ rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
+ rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
+ rdi->dparms.props.max_total_mcast_qp_attach =
+ rdi->dparms.props.max_mcast_qp_attach *
+ rdi->dparms.props.max_mcast_grp;
+}
+
+static inline u16 opa_speed_to_ib(u16 in)
+{
+ u16 out = 0;
+
+ if (in & OPA_LINK_SPEED_25G)
+ out |= IB_SPEED_EDR;
+ if (in & OPA_LINK_SPEED_12_5G)
+ out |= IB_SPEED_FDR;
+
+ return out;
+}
+
+/*
+ * Convert a single OPA link width (no multiple flags) to an IB value.
+ * A zero OPA link width means link down, which means the IB width value
+ * is a don't care.
+ */
+static inline u16 opa_width_to_ib(u16 in)
+{
+ switch (in) {
+ case OPA_LINK_WIDTH_1X:
+ /* map 2x and 3x to 1x as they don't exist in IB */
+ case OPA_LINK_WIDTH_2X:
+ case OPA_LINK_WIDTH_3X:
+ return IB_WIDTH_1X;
+ default: /* link down or unknown, return our largest width */
+ case OPA_LINK_WIDTH_4X:
+ return IB_WIDTH_4X;
+ }
+}
+
+static int query_port(struct rvt_dev_info *rdi, u32 port_num,
+ struct ib_port_attr *props)
+{
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
+ u32 lid = ppd->lid;
+
+ /* props being zeroed by the caller, avoid zeroing it here */
+ props->lid = lid ? lid : 0;
+ props->lmc = ppd->lmc;
+ /* OPA logical states match IB logical states */
+ props->state = driver_lstate(ppd);
+ props->phys_state = driver_pstate(ppd);
+ props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
+ props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
+ /* see rate_show() in ib core/sysfs.c */
+ props->active_speed = opa_speed_to_ib(ppd->link_speed_active);
+ props->max_vl_num = ppd->vls_supported;
+
+ /* Once we are a "first class" citizen and have added the OPA MTUs to
+ * the core we can advertise the larger MTU enum to the ULPs, for now
+ * advertise only 4K.
+ *
+ * Those applications which are either OPA aware or pass the MTU enum
+ * from the Path Records to us will get the new 8k MTU. Those that
+ * attempt to process the MTU enum may fail in various ways.
+ */
+ props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
+ 4096 : hfi1_max_mtu), IB_MTU_4096);
+ props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
+ mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
+ props->phys_mtu = hfi1_max_mtu;
+
+ return 0;
+}
+
+static int modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+ unsigned i;
+ int ret;
+
+ if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ ret = -EOPNOTSUPP;
+ goto bail;
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(device->node_desc, device_modify->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
+
+ hfi1_node_desc_chg(ibp);
+ }
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+ ib_hfi1_sys_image_guid =
+ cpu_to_be64(device_modify->sys_image_guid);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
+
+ hfi1_sys_guid_chg(ibp);
+ }
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
+{
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
+
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
+ OPA_LINKDOWN_REASON_UNKNOWN);
+ return set_link_state(ppd, HLS_DN_DOWNDEF);
+}
+
+static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
+ int guid_index, __be64 *guid)
+{
+ struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
+
+ if (guid_index >= HFI1_GUIDS_PER_PORT)
+ return -EINVAL;
+
+ *guid = get_sguid(ibp, guid_index);
+ return 0;
+}
+
+/*
+ * convert ah port,sl to sc
+ */
+u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah));
+
+ return ibp->sl_to_sc[rdma_ah_get_sl(ah)];
+}
+
+static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
+{
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ u8 sc5;
+ u8 sl;
+
+ if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+
+ /* test the mapping for validity */
+ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
+ ppd = ppd_from_ibp(ibp);
+ dd = dd_from_ppd(ppd);
+
+ sl = rdma_ah_get_sl(ah_attr);
+ if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+ return -EINVAL;
+ sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
+
+ sc5 = ibp->sl_to_sc[sl];
+ if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
+ return -EINVAL;
+ return 0;
+}
+
+static void hfi1_notify_new_ah(struct ib_device *ibdev,
+ struct rdma_ah_attr *ah_attr,
+ struct rvt_ah *ah)
+{
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ u8 sc5;
+ struct rdma_ah_attr *attr = &ah->attr;
+
+ /*
+ * Do not trust reading anything from rvt_ah at this point as it is not
+ * done being setup. We can however modify things which we need to set.
+ */
+
+ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
+ ppd = ppd_from_ibp(ibp);
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
+ hfi1_update_ah_attr(ibdev, attr);
+ hfi1_make_opa_lid(attr);
+ dd = dd_from_ppd(ppd);
+ ah->vl = sc_to_vlt(dd, sc5);
+ if (ah->vl < num_vls || ah->vl == 15)
+ ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
+}
+
+/**
+ * hfi1_get_npkeys - return the size of the PKEY table for context 0
+ * @dd: the hfi1_ib device
+ */
+unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
+{
+ return ARRAY_SIZE(dd->pport[0].pkeys);
+}
+
+static void init_ibport(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
+ int i;
+
+ for (i = 0; i < sz; i++) {
+ ibp->sl_to_sc[i] = i;
+ ibp->sc_to_sl[i] = i;
+ }
+
+ for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
+ INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
+ timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
+
+ spin_lock_init(&ibp->rvp.lock);
+ /* Set the prefix to the default value (see ch. 4.1.1) */
+ ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->rvp.sm_lid = 0;
+ /*
+ * Below should only set bits defined in OPA PortInfo.CapabilityMask
+ * and PortInfo.CapabilityMask3
+ */
+ ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
+ IB_PORT_CAP_MASK_NOTICE_SUP;
+ ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported;
+ ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+
+ RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
+ RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
+}
+
+static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct hfi1_ibdev *dev = dev_from_rdi(rdi);
+ u32 ver = dd_from_dev(dev)->dc8051_ver;
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver),
+ dc8051_ver_min(ver), dc8051_ver_patch(ver));
+}
+
+static const char * const driver_cntr_names[] = {
+ /* must be element 0*/
+ "DRIVER_KernIntr",
+ "DRIVER_ErrorIntr",
+ "DRIVER_Tx_Errs",
+ "DRIVER_Rcv_Errs",
+ "DRIVER_HW_Errs",
+ "DRIVER_NoPIOBufs",
+ "DRIVER_CtxtsOpen",
+ "DRIVER_RcvLen_Errs",
+ "DRIVER_EgrBufFull",
+ "DRIVER_EgrHdrFull"
+};
+
+static struct rdma_stat_desc *dev_cntr_descs;
+static struct rdma_stat_desc *port_cntr_descs;
+int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+static int num_dev_cntrs;
+static int num_port_cntrs;
+
+/*
+ * Convert a list of names separated by '\n' into an array of NULL terminated
+ * strings. Optionally some entries can be reserved in the array to hold extra
+ * external strings.
+ */
+static int init_cntr_names(const char *names_in, const size_t names_len,
+ int num_extra_names, int *num_cntrs,
+ struct rdma_stat_desc **cntr_descs)
+{
+ struct rdma_stat_desc *names_out;
+ char *p;
+ int i, n;
+
+ n = 0;
+ for (i = 0; i < names_len; i++)
+ if (names_in[i] == '\n')
+ n++;
+
+ names_out = kzalloc((n + num_extra_names) * sizeof(*names_out)
+ + names_len,
+ GFP_KERNEL);
+ if (!names_out) {
+ *num_cntrs = 0;
+ *cntr_descs = NULL;
+ return -ENOMEM;
+ }
+
+ p = (char *)&names_out[n + num_extra_names];
+ memcpy(p, names_in, names_len);
+
+ for (i = 0; i < n; i++) {
+ names_out[i].name = p;
+ p = strchr(p, '\n');
+ *p++ = '\0';
+ }
+
+ *num_cntrs = n;
+ *cntr_descs = names_out;
+ return 0;
+}
+
+static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ if (!dev_cntr_descs) {
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int i, err;
+
+ err = init_cntr_names(dd->cntrnames, dd->cntrnameslen,
+ num_driver_cntrs,
+ &num_dev_cntrs, &dev_cntr_descs);
+ if (err)
+ return NULL;
+
+ for (i = 0; i < num_driver_cntrs; i++)
+ dev_cntr_descs[num_dev_cntrs + i].name =
+ driver_cntr_names[i];
+ }
+ return rdma_alloc_hw_stats_struct(dev_cntr_descs,
+ num_dev_cntrs + num_driver_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ if (!port_cntr_descs) {
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int err;
+
+ err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen,
+ 0,
+ &num_port_cntrs, &port_cntr_descs);
+ if (err)
+ return NULL;
+ }
+ return rdma_alloc_hw_stats_struct(port_cntr_descs, num_port_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static u64 hfi1_sps_ints(void)
+{
+ unsigned long index, flags;
+ struct hfi1_devdata *dd;
+ u64 sps_ints = 0;
+
+ xa_lock_irqsave(&hfi1_dev_table, flags);
+ xa_for_each(&hfi1_dev_table, index, dd) {
+ sps_ints += get_all_cpu_total(dd->int_counter);
+ }
+ xa_unlock_irqrestore(&hfi1_dev_table, flags);
+ return sps_ints;
+}
+
+static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ u64 *values;
+ int count;
+
+ if (!port) {
+ u64 *stats = (u64 *)&hfi1_stats;
+ int i;
+
+ hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
+ values[num_dev_cntrs] = hfi1_sps_ints();
+ for (i = 1; i < num_driver_cntrs; i++)
+ values[num_dev_cntrs + i] = stats[i];
+ count = num_dev_cntrs + num_driver_cntrs;
+ } else {
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
+ count = num_port_cntrs;
+ }
+
+ memcpy(stats->value, values, count * sizeof(u64));
+ return count;
+}
+
+static const struct ib_device_ops hfi1_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_HFI1,
+
+ .alloc_hw_device_stats = hfi1_alloc_hw_device_stats,
+ .alloc_hw_port_stats = hfi_alloc_hw_port_stats,
+ .alloc_rdma_netdev = hfi1_vnic_alloc_rn,
+ .device_group = &ib_hfi1_attr_group,
+ .get_dev_fw_str = hfi1_get_dev_fw_str,
+ .get_hw_stats = get_hw_stats,
+ .modify_device = modify_device,
+ .port_groups = hfi1_attr_port_groups,
+ /* keep process mad in the driver */
+ .process_mad = hfi1_process_mad,
+ .rdma_netdev_get_params = hfi1_ipoib_rn_get_params,
+};
+
+/**
+ * hfi1_register_ib_device - register our device with the infiniband core
+ * @dd: the device data structure
+ * Return 0 if successful, errno if unsuccessful.
+ */
+int hfi1_register_ib_device(struct hfi1_devdata *dd)
+{
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->rdi.ibdev;
+ struct hfi1_pportdata *ppd = dd->pport;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ unsigned i;
+ int ret;
+
+ for (i = 0; i < dd->num_pports; i++)
+ init_ibport(ppd + i);
+
+ /* Only need to initialize non-zero fields. */
+
+ timer_setup(&dev->mem_timer, mem_timer, 0);
+
+ seqlock_init(&dev->iowait_lock);
+ seqlock_init(&dev->txwait_lock);
+ INIT_LIST_HEAD(&dev->txwait);
+ INIT_LIST_HEAD(&dev->memwait);
+
+ ret = verbs_txreq_init(dev);
+ if (ret)
+ goto err_verbs_txreq;
+
+ /* Use first-port GUID as node guid */
+ ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
+
+ /*
+ * The system image GUID is supposed to be the same for all
+ * HFIs in a single system but since there can be other
+ * device types in the system, we can't be sure this is unique.
+ */
+ if (!ib_hfi1_sys_image_guid)
+ ib_hfi1_sys_image_guid = ibdev->node_guid;
+ ibdev->phys_port_cnt = dd->num_pports;
+ ibdev->dev.parent = &dd->pcidev->dev;
+
+ ib_set_device_ops(ibdev, &hfi1_dev_ops);
+
+ strscpy(ibdev->node_desc, init_utsname()->nodename,
+ sizeof(ibdev->node_desc));
+
+ /*
+ * Fill in rvt info object.
+ */
+ dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
+ dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
+ dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
+ dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
+ dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
+ dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
+ dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
+ /*
+ * Fill in rvt info device attributes.
+ */
+ hfi1_fill_device_attr(dd);
+
+ /* queue pair */
+ dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
+ dd->verbs_dev.rdi.dparms.qpn_start = 0;
+ dd->verbs_dev.rdi.dparms.qpn_inc = 1;
+ dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
+ dd->verbs_dev.rdi.dparms.qpn_res_start = RVT_KDETH_QP_BASE;
+ dd->verbs_dev.rdi.dparms.qpn_res_end = RVT_AIP_QP_MAX;
+ dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
+ dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
+ dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
+ dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
+ dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA |
+ RDMA_CORE_CAP_OPA_AH;
+ dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
+
+ dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
+ dd->verbs_dev.rdi.driver_f.qp_priv_init = hfi1_qp_priv_init;
+ dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
+ dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
+ dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
+ dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
+ dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
+ dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
+ dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
+ dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
+ dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
+ dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
+ dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
+ dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
+ dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
+ dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
+ dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
+ dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
+ dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
+ dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
+ dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
+ hfi1_comp_vect_mappings_lookup;
+
+ /* completeion queue */
+ dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus;
+ dd->verbs_dev.rdi.dparms.node = dd->node;
+
+ /* misc settings */
+ dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
+ dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
+ dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
+ dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
+ dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
+ dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
+ dd->verbs_dev.rdi.dparms.reserved_operations = 1;
+ dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT;
+
+ /* post send table */
+ dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
+
+ /* opcode translation table */
+ dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
+
+ ppd = dd->pport;
+ for (i = 0; i < dd->num_pports; i++, ppd++)
+ rvt_init_port(&dd->verbs_dev.rdi,
+ &ppd->ibport_data.rvp,
+ i,
+ ppd->pkeys);
+
+ ret = rvt_register_device(&dd->verbs_dev.rdi);
+ if (ret)
+ goto err_verbs_txreq;
+
+ ret = hfi1_verbs_register_sysfs(dd);
+ if (ret)
+ goto err_class;
+
+ return ret;
+
+err_class:
+ rvt_unregister_device(&dd->verbs_dev.rdi);
+err_verbs_txreq:
+ verbs_txreq_exit(dev);
+ dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
+ return ret;
+}
+
+void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
+{
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+
+ hfi1_verbs_unregister_sysfs(dd);
+
+ rvt_unregister_device(&dd->verbs_dev.rdi);
+
+ if (!list_empty(&dev->txwait))
+ dd_dev_err(dd, "txwait list not empty!\n");
+ if (!list_empty(&dev->memwait))
+ dd_dev_err(dd, "memwait list not empty!\n");
+
+ timer_delete_sync(&dev->mem_timer);
+ verbs_txreq_exit(dev);
+
+ kfree(dev_cntr_descs);
+ kfree(port_cntr_descs);
+ dev_cntr_descs = NULL;
+ port_cntr_descs = NULL;
+}
+
+void hfi1_cnp_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_header *hdr = packet->hdr;
+ struct rvt_qp *qp = packet->qp;
+ u32 lqpn, rqpn = 0;
+ u16 rlid = 0;
+ u8 sl, sc5, svc_type;
+
+ switch (packet->qp->ibqp.qp_type) {
+ case IB_QPT_UC:
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_UC;
+ break;
+ case IB_QPT_RC:
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_RC;
+ break;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
+ default:
+ ibp->rvp.n_pkt_drops++;
+ return;
+ }
+
+ sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
+ sl = ibp->sc_to_sl[sc5];
+ lqpn = qp->ibqp.qp_num;
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
+}
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
new file mode 100644
index 000000000000..070e4f0babe8
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2015 - 2018 Intel Corporation.
+ */
+
+#ifndef HFI1_VERBS_H
+#define HFI1_VERBS_H
+
+#include <linux/types.h>
+#include <linux/seqlock.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_hdrs.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
+#include <rdma/rdmavt_cq.h>
+
+struct hfi1_ctxtdata;
+struct hfi1_pportdata;
+struct hfi1_devdata;
+struct hfi1_packet;
+
+#include "iowait.h"
+#include "tid_rdma.h"
+#include "opfn.h"
+
+#define HFI1_MAX_RDMA_ATOMIC 16
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define HFI1_UVERBS_ABI_VERSION 2
+
+/* IB Performance Manager status values */
+#define IB_PMA_SAMPLE_STATUS_DONE 0x00
+#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
+#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
+
+/* Mandatory IB performance counter select values. */
+#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
+
+#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
+
+#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+#define OPA_BTH_MIG_REQ BIT(31)
+
+#define RC_OP(x) IB_OPCODE_RC_##x
+#define UC_OP(x) IB_OPCODE_UC_##x
+
+/* flags passed by hfi1_ib_rcv() */
+enum {
+ HFI1_HAS_GRH = (1 << 0),
+};
+
+#define LRH_16B_BYTES (sizeof_field(struct hfi1_16b_header, lrh))
+#define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32))
+#define LRH_9B_BYTES (sizeof_field(struct ib_header, lrh))
+#define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32))
+
+/* 24Bits for qpn, upper 8Bits reserved */
+struct opa_16b_mgmt {
+ __be32 dest_qpn;
+ __be32 src_qpn;
+};
+
+struct hfi1_16b_header {
+ u32 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct ib_other_headers oth;
+ } l;
+ struct ib_other_headers oth;
+ struct opa_16b_mgmt mgmt;
+ } u;
+} __packed;
+
+struct hfi1_opa_header {
+ union {
+ struct ib_header ibh; /* 9B header */
+ struct hfi1_16b_header opah; /* 16B header */
+ };
+ u8 hdr_type; /* 9B or 16B */
+} __packed;
+
+struct hfi1_ahg_info {
+ u32 ahgdesc[2];
+ u16 tx_flags;
+ u8 ahgcount;
+ u8 ahgidx;
+};
+
+struct hfi1_sdma_header {
+ __le64 pbc;
+ struct hfi1_opa_header hdr;
+} __packed;
+
+/*
+ * hfi1 specific data structures that will be hidden from rvt after the queue
+ * pair is made common
+ */
+struct hfi1_qp_priv {
+ struct hfi1_ahg_info *s_ahg; /* ahg info for next header */
+ struct sdma_engine *s_sde; /* current sde */
+ struct send_context *s_sendcontext; /* current sendcontext */
+ struct hfi1_ctxtdata *rcd; /* QP's receive context */
+ struct page **pages; /* for TID page scan */
+ u32 tid_enqueue; /* saved when tid waited */
+ u8 s_sc; /* SC[0..4] for next packet */
+ struct iowait s_iowait;
+ struct timer_list s_tid_timer; /* for timing tid wait */
+ struct timer_list s_tid_retry_timer; /* for timing tid ack */
+ struct list_head tid_wait; /* for queueing tid space */
+ struct hfi1_opfn_data opfn;
+ struct tid_flow_state flow_state;
+ struct tid_rdma_qp_params tid_rdma;
+ struct rvt_qp *owner;
+ u16 s_running_pkt_size;
+ u8 hdr_type; /* 9B or 16B */
+ struct rvt_sge_state tid_ss; /* SGE state pointer for 2nd leg */
+ atomic_t n_requests; /* # of TID RDMA requests in the */
+ /* queue */
+ atomic_t n_tid_requests; /* # of sent TID RDMA requests */
+ unsigned long tid_timer_timeout_jiffies;
+ unsigned long tid_retry_timeout_jiffies;
+
+ /* variables for the TID RDMA SE state machine */
+ u8 s_state;
+ u8 s_retry;
+ u8 rnr_nak_state; /* RNR NAK state */
+ u8 s_nak_state;
+ u32 s_nak_psn;
+ u32 s_flags;
+ u32 s_tid_cur;
+ u32 s_tid_head;
+ u32 s_tid_tail;
+ u32 r_tid_head; /* Most recently added TID RDMA request */
+ u32 r_tid_tail; /* the last completed TID RDMA request */
+ u32 r_tid_ack; /* the TID RDMA request to be ACK'ed */
+ u32 r_tid_alloc; /* Request for which we are allocating resources */
+ u32 pending_tid_w_segs; /* Num of pending tid write segments */
+ u32 pending_tid_w_resp; /* Num of pending tid write responses */
+ u32 alloc_w_segs; /* Number of segments for which write */
+ /* resources have been allocated for this QP */
+
+ /* For TID RDMA READ */
+ u32 tid_r_reqs; /* Num of tid reads requested */
+ u32 tid_r_comp; /* Num of tid reads completed */
+ u32 pending_tid_r_segs; /* Num of pending tid read segments */
+ u16 pkts_ps; /* packets per segment */
+ u8 timeout_shift; /* account for number of packets per segment */
+
+ u32 r_next_psn_kdeth;
+ u32 r_next_psn_kdeth_save;
+ u32 s_resync_psn;
+ u8 sync_pt; /* Set when QP reaches sync point */
+ u8 resync;
+};
+
+#define HFI1_QP_WQE_INVALID ((u32)-1)
+
+struct hfi1_swqe_priv {
+ struct tid_rdma_request tid_req;
+ struct rvt_sge_state ss; /* Used for TID RDMA READ Request */
+};
+
+struct hfi1_ack_priv {
+ struct rvt_sge_state ss; /* used for TID WRITE RESP */
+ struct tid_rdma_request tid_req;
+};
+
+/*
+ * This structure is used to hold commonly lookedup and computed values during
+ * the send engine progress.
+ */
+struct iowait_work;
+struct hfi1_pkt_state {
+ struct hfi1_ibdev *dev;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct verbs_txreq *s_txreq;
+ struct iowait_work *wait;
+ unsigned long flags;
+ unsigned long timeout;
+ unsigned long timeout_int;
+ int cpu;
+ u8 opcode;
+ bool in_thread;
+ bool pkts_sent;
+};
+
+#define HFI1_PSN_CREDIT 16
+
+struct hfi1_opcode_stats {
+ u64 n_packets; /* number of packets */
+ u64 n_bytes; /* total number of bytes */
+};
+
+struct hfi1_opcode_stats_perctx {
+ struct hfi1_opcode_stats stats[256];
+};
+
+static inline void inc_opstats(
+ u32 tlen,
+ struct hfi1_opcode_stats *stats)
+{
+#ifdef CONFIG_DEBUG_FS
+ stats->n_bytes += tlen;
+ stats->n_packets++;
+#endif
+}
+
+struct hfi1_ibport {
+ struct rvt_qp __rcu *qp[2];
+ struct rvt_ibport rvp;
+
+ /* the first 16 entries are sl_to_vl for !OPA */
+ u8 sl_to_sc[32];
+ u8 sc_to_sl[32];
+};
+
+struct hfi1_ibdev {
+ struct rvt_dev_info rdi; /* Must be first */
+
+ /* QP numbers are shared by all IB ports */
+ /* protect txwait list */
+ seqlock_t txwait_lock ____cacheline_aligned_in_smp;
+ struct list_head txwait; /* list for wait verbs_txreq */
+ struct list_head memwait; /* list for wait kernel memory */
+ struct kmem_cache *verbs_txreq_cache;
+ u64 n_txwait;
+ u64 n_kmem_wait;
+ u64 n_tidwait;
+
+ /* protect iowait lists */
+ seqlock_t iowait_lock ____cacheline_aligned_in_smp;
+ u64 n_piowait;
+ u64 n_piodrain;
+ struct timer_list mem_timer;
+
+#ifdef CONFIG_DEBUG_FS
+ /* per HFI debugfs */
+ struct dentry *hfi1_ibdev_dbg;
+ /* per HFI symlinks to above */
+ struct dentry *hfi1_ibdev_link;
+#ifdef CONFIG_FAULT_INJECTION
+ struct fault *fault;
+#endif
+#endif
+};
+
+static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
+{
+ struct rvt_dev_info *rdi;
+
+ rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
+ return container_of(rdi, struct hfi1_ibdev, rdi);
+}
+
+static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
+{
+ struct hfi1_qp_priv *priv;
+
+ priv = container_of(s_iowait, struct hfi1_qp_priv, s_iowait);
+ return priv->owner;
+}
+
+/*
+ * This must be called with s_lock held.
+ */
+void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
+ u32 qp1, u32 qp2, u32 lid1, u32 lid2);
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
+void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
+void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+/*
+ * The PSN_MASK and PSN_SHIFT allow for
+ * 1) comparing two PSNs
+ * 2) returning the PSN with any upper bits masked
+ * 3) returning the difference between to PSNs
+ *
+ * The number of significant bits in the PSN must
+ * necessarily be at least one bit less than
+ * the container holding the PSN.
+ */
+#define PSN_MASK 0x7FFFFFFF
+#define PSN_SHIFT 1
+#define PSN_MODIFY_MASK 0xFFFFFF
+
+/*
+ * Compare two PSNs
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int cmp_psn(u32 a, u32 b)
+{
+ return (((int)a) - ((int)b)) << PSN_SHIFT;
+}
+
+/*
+ * Return masked PSN
+ */
+static inline u32 mask_psn(u32 a)
+{
+ return a & PSN_MASK;
+}
+
+/*
+ * Return delta between two PSNs
+ */
+static inline u32 delta_psn(u32 a, u32 b)
+{
+ return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
+}
+
+static inline struct tid_rdma_request *wqe_to_tid_req(struct rvt_swqe *wqe)
+{
+ return &((struct hfi1_swqe_priv *)wqe->priv)->tid_req;
+}
+
+static inline struct tid_rdma_request *ack_to_tid_req(struct rvt_ack_entry *e)
+{
+ return &((struct hfi1_ack_priv *)e->priv)->tid_req;
+}
+
+/*
+ * Look through all the active flows for a TID RDMA request and find
+ * the one (if it exists) that contains the specified PSN.
+ */
+static inline u32 __full_flow_psn(struct flow_state *state, u32 psn)
+{
+ return mask_psn((state->generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
+ (psn & HFI1_KDETH_BTH_SEQ_MASK));
+}
+
+static inline u32 full_flow_psn(struct tid_rdma_flow *flow, u32 psn)
+{
+ return __full_flow_psn(&flow->flow_state, psn);
+}
+
+struct verbs_txreq;
+void hfi1_put_txreq(struct verbs_txreq *tx);
+
+int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+
+void hfi1_cnp_rcv(struct hfi1_packet *packet);
+
+void hfi1_uc_rcv(struct hfi1_packet *packet);
+
+void hfi1_rc_rcv(struct hfi1_packet *packet);
+
+void hfi1_rc_hdrerr(
+ struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet *packet,
+ struct rvt_qp *qp);
+
+u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
+
+void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah);
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah);
+
+void hfi1_ud_rcv(struct hfi1_packet *packet);
+
+int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
+
+void hfi1_migrate_qp(struct rvt_qp *qp);
+
+int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
+
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
+
+u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
+ const struct ib_global_route *grh, u32 hwords, u32 nwords);
+
+void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps);
+
+bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ bool tid);
+
+void _hfi1_do_send(struct work_struct *work);
+
+void hfi1_do_send_from_rvt(struct rvt_qp *qp);
+
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
+
+void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn);
+
+int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+
+int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+
+int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+
+int hfi1_register_ib_device(struct hfi1_devdata *);
+
+void hfi1_unregister_ib_device(struct hfi1_devdata *);
+
+void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet);
+
+void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet);
+
+void hfi1_ib_rcv(struct hfi1_packet *packet);
+
+void hfi1_16B_rcv(struct hfi1_packet *packet);
+
+unsigned hfi1_get_npkeys(struct hfi1_devdata *);
+
+int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
+
+int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
+
+static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
+{
+ return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
+}
+
+void hfi1_wait_kmem(struct rvt_qp *qp);
+
+static inline void hfi1_trdma_send_complete(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+{
+ trdma_clean_swqe(qp, wqe);
+ rvt_send_complete(qp, wqe, status);
+}
+
+extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
+
+extern const u8 hdr_len_by_opcode[];
+
+extern const int ib_rvt_state_ops[];
+
+extern __be64 ib_hfi1_sys_image_guid; /* in network order */
+
+extern unsigned int hfi1_max_cqes;
+
+extern unsigned int hfi1_max_cqs;
+
+extern unsigned int hfi1_max_qp_wrs;
+
+extern unsigned int hfi1_max_qps;
+
+extern unsigned int hfi1_max_sges;
+
+extern unsigned int hfi1_max_mcast_grps;
+
+extern unsigned int hfi1_max_mcast_qp_attached;
+
+extern unsigned int hfi1_max_srqs;
+
+extern unsigned int hfi1_max_srq_sges;
+
+extern unsigned int hfi1_max_srq_wrs;
+
+extern unsigned short piothreshold;
+
+extern const u32 ib_hfi1_rnr_table[];
+
+#endif /* HFI1_VERBS_H */
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
new file mode 100644
index 000000000000..822f0d05bac8
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 - 2018 Intel Corporation.
+ */
+
+#include "hfi.h"
+#include "verbs_txreq.h"
+#include "qp.h"
+#include "trace.h"
+
+#define TXREQ_LEN 24
+
+void hfi1_put_txreq(struct verbs_txreq *tx)
+{
+ struct hfi1_ibdev *dev;
+ struct rvt_qp *qp;
+ unsigned long flags;
+ unsigned int seq;
+ struct hfi1_qp_priv *priv;
+
+ qp = tx->qp;
+ dev = to_idev(qp->ibqp.device);
+
+ if (tx->mr)
+ rvt_put_mr(tx->mr);
+
+ sdma_txclean(dd_from_dev(dev), &tx->txreq);
+
+ /* Free verbs_txreq and return to slab cache */
+ kmem_cache_free(dev->verbs_txreq_cache, tx);
+
+ do {
+ seq = read_seqbegin(&dev->txwait_lock);
+ if (!list_empty(&dev->txwait)) {
+ struct iowait *wait;
+
+ write_seqlock_irqsave(&dev->txwait_lock, flags);
+ wait = list_first_entry(&dev->txwait, struct iowait,
+ list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
+ /* refcount held until actual wake up */
+ write_sequnlock_irqrestore(&dev->txwait_lock, flags);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
+ break;
+ }
+ } while (read_seqretry(&dev->txwait_lock, seq));
+}
+
+struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ struct verbs_txreq *tx = NULL;
+
+ write_seqlock(&dev->txwait_lock);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ struct hfi1_qp_priv *priv;
+
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
+ if (tx)
+ goto out;
+ priv = qp->priv;
+ if (list_empty(&priv->s_iowait.list)) {
+ dev->n_txwait++;
+ qp->s_flags |= RVT_S_WAIT_TX;
+ list_add_tail(&priv->s_iowait.list, &dev->txwait);
+ priv->s_iowait.lock = &dev->txwait_lock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
+ rvt_get_qp(qp);
+ }
+ qp->s_flags &= ~RVT_S_BUSY;
+ }
+out:
+ write_sequnlock(&dev->txwait_lock);
+ return tx;
+}
+
+int verbs_txreq_init(struct hfi1_ibdev *dev)
+{
+ char buf[TXREQ_LEN];
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
+ dev->verbs_txreq_cache = kmem_cache_create(buf,
+ sizeof(struct verbs_txreq),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!dev->verbs_txreq_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void verbs_txreq_exit(struct hfi1_ibdev *dev)
+{
+ kmem_cache_destroy(dev->verbs_txreq_cache);
+ dev->verbs_txreq_cache = NULL;
+}
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
new file mode 100644
index 000000000000..56353c7676d0
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 - 2018 Intel Corporation.
+ */
+
+#ifndef HFI1_VERBS_TXREQ_H
+#define HFI1_VERBS_TXREQ_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "verbs.h"
+#include "sdma_txreq.h"
+#include "iowait.h"
+
+struct verbs_txreq {
+ struct hfi1_sdma_header phdr;
+ struct sdma_txreq txreq;
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_mregion *mr;
+ struct rvt_sge_state *ss;
+ struct sdma_engine *sde;
+ struct send_context *psc;
+ u16 hdr_dwords;
+ u16 s_cur_size;
+};
+
+struct hfi1_ibdev;
+struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp);
+
+#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN)
+static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp)
+ __must_hold(&qp->slock)
+{
+ struct verbs_txreq *tx;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
+ if (unlikely(!tx)) {
+ /* call slow path to get the lock */
+ tx = __get_txreq(dev, qp);
+ if (!tx)
+ return tx;
+ }
+ tx->qp = qp;
+ tx->mr = NULL;
+ tx->sde = priv->s_sde;
+ tx->psc = priv->s_sendcontext;
+ /* so that we can test if the sdma descriptors are there */
+ tx->txreq.num_desc = 0;
+ /* Set the header type */
+ tx->phdr.hdr.hdr_type = priv->hdr_type;
+ tx->txreq.flags = 0;
+ return tx;
+}
+
+static inline struct verbs_txreq *get_waiting_verbs_txreq(struct iowait_work *w)
+{
+ struct sdma_txreq *stx;
+
+ stx = iowait_get_txhead(w);
+ if (stx)
+ return container_of(stx, struct verbs_txreq, txreq);
+ return NULL;
+}
+
+static inline bool verbs_txreq_queued(struct iowait_work *w)
+{
+ return iowait_packet_queued(w);
+}
+
+void hfi1_put_txreq(struct verbs_txreq *tx);
+int verbs_txreq_init(struct hfi1_ibdev *dev);
+void verbs_txreq_exit(struct hfi1_ibdev *dev);
+
+#endif /* HFI1_VERBS_TXREQ_H */
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
new file mode 100644
index 000000000000..bbafeb5fc0ec
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2017 - 2020 Intel Corporation.
+ */
+
+#ifndef _HFI1_VNIC_H
+#define _HFI1_VNIC_H
+#include <rdma/opa_vnic.h>
+#include "hfi.h"
+#include "sdma.h"
+
+#define HFI1_VNIC_MAX_TXQ 16
+#define HFI1_VNIC_MAX_PAD 12
+
+/* L4 header definitions */
+#define HFI1_VNIC_L4_HDR_OFFSET OPA_VNIC_L2_HDR_LEN
+
+#define HFI1_VNIC_GET_L4_HDR(data) \
+ (*((u16 *)((u8 *)(data) + HFI1_VNIC_L4_HDR_OFFSET)))
+
+#define HFI1_VNIC_GET_VESWID(data) \
+ (HFI1_VNIC_GET_L4_HDR(data) & 0xFFF)
+
+/* Service class */
+#define HFI1_VNIC_SC_OFFSET_LOW 6
+#define HFI1_VNIC_SC_OFFSET_HI 7
+#define HFI1_VNIC_SC_SHIFT 4
+
+#define HFI1_VNIC_MAX_QUEUE 16
+#define HFI1_NUM_VNIC_CTXT 8
+
+/**
+ * struct hfi1_vnic_sdma - VNIC per Tx ring SDMA information
+ * @dd - device data pointer
+ * @sde - sdma engine
+ * @vinfo - vnic info pointer
+ * @wait - iowait structure
+ * @stx - sdma tx request
+ * @state - vnic Tx ring SDMA state
+ * @q_idx - vnic Tx queue index
+ */
+struct hfi1_vnic_sdma {
+ struct hfi1_devdata *dd;
+ struct sdma_engine *sde;
+ struct hfi1_vnic_vport_info *vinfo;
+ struct iowait wait;
+ struct sdma_txreq stx;
+ unsigned int state;
+ u8 q_idx;
+ bool pkts_sent;
+};
+
+/**
+ * struct hfi1_vnic_rx_queue - HFI1 VNIC receive queue
+ * @idx: queue index
+ * @vinfo: pointer to vport information
+ * @netdev: network device
+ * @napi: netdev napi structure
+ * @skbq: queue of received socket buffers
+ */
+struct hfi1_vnic_rx_queue {
+ u8 idx;
+ struct hfi1_vnic_vport_info *vinfo;
+ struct net_device *netdev;
+ struct napi_struct napi;
+};
+
+/**
+ * struct hfi1_vnic_vport_info - HFI1 VNIC virtual port information
+ * @dd: device data pointer
+ * @netdev: net device pointer
+ * @flags: state flags
+ * @lock: vport lock
+ * @num_tx_q: number of transmit queues
+ * @num_rx_q: number of receive queues
+ * @vesw_id: virtual switch id
+ * @rxq: Array of receive queues
+ * @stats: per queue stats
+ * @sdma: VNIC SDMA structure per TXQ
+ */
+struct hfi1_vnic_vport_info {
+ struct hfi1_devdata *dd;
+ struct net_device *netdev;
+ unsigned long flags;
+
+ /* Lock used around state updates */
+ struct mutex lock;
+
+ u8 num_tx_q;
+ u8 num_rx_q;
+ u16 vesw_id;
+ struct hfi1_vnic_rx_queue rxq[HFI1_NUM_VNIC_CTXT];
+
+ struct opa_vnic_stats stats[HFI1_VNIC_MAX_QUEUE];
+ struct hfi1_vnic_sdma sdma[HFI1_VNIC_MAX_TXQ];
+};
+
+#define v_dbg(format, arg...) \
+ netdev_dbg(vinfo->netdev, format, ## arg)
+#define v_err(format, arg...) \
+ netdev_err(vinfo->netdev, format, ## arg)
+#define v_info(format, arg...) \
+ netdev_info(vinfo->netdev, format, ## arg)
+
+/* vnic hfi1 internal functions */
+void hfi1_vnic_setup(struct hfi1_devdata *dd);
+int hfi1_vnic_txreq_init(struct hfi1_devdata *dd);
+void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd);
+
+void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet);
+void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo);
+bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
+ u8 q_idx);
+
+/* vnic rdma netdev operations */
+struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
+ u32 port_num,
+ enum rdma_netdev_t type,
+ const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *));
+int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
+ struct hfi1_vnic_vport_info *vinfo,
+ struct sk_buff *skb, u64 pbc, u8 plen);
+
+#endif /* _HFI1_VNIC_H */
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
new file mode 100644
index 000000000000..16a4c297a897
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2017 - 2020 Intel Corporation.
+ */
+
+/*
+ * This file contains HFI1 support for VNIC functionality
+ */
+
+#include <linux/io.h>
+#include <linux/if_vlan.h>
+
+#include "vnic.h"
+#include "netdev.h"
+
+#define HFI_TX_TIMEOUT_MS 1000
+
+#define HFI1_VNIC_RCV_Q_SIZE 1024
+
+#define HFI1_VNIC_UP 0
+
+static DEFINE_SPINLOCK(vport_cntr_lock);
+
+#define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \
+ u64 *src64, *dst64; \
+ for (src64 = &qstats->x_grp.unicast, \
+ dst64 = &stats->x_grp.unicast; \
+ dst64 <= &stats->x_grp.s_1519_max;) { \
+ *dst64++ += *src64++; \
+ } \
+ } while (0)
+
+#define VNIC_MASK (0xFF)
+#define VNIC_ID(val) ((1ull << 24) | ((val) & VNIC_MASK))
+
+/* hfi1_vnic_update_stats - update statistics */
+static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo,
+ struct opa_vnic_stats *stats)
+{
+ struct net_device *netdev = vinfo->netdev;
+ u8 i;
+
+ /* add tx counters on different queues */
+ for (i = 0; i < vinfo->num_tx_q; i++) {
+ struct opa_vnic_stats *qstats = &vinfo->stats[i];
+ struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats;
+
+ stats->netstats.tx_fifo_errors += qnstats->tx_fifo_errors;
+ stats->netstats.tx_carrier_errors += qnstats->tx_carrier_errors;
+ stats->tx_drop_state += qstats->tx_drop_state;
+ stats->tx_dlid_zero += qstats->tx_dlid_zero;
+
+ SUM_GRP_COUNTERS(stats, qstats, tx_grp);
+ stats->netstats.tx_packets += qnstats->tx_packets;
+ stats->netstats.tx_bytes += qnstats->tx_bytes;
+ }
+
+ /* add rx counters on different queues */
+ for (i = 0; i < vinfo->num_rx_q; i++) {
+ struct opa_vnic_stats *qstats = &vinfo->stats[i];
+ struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats;
+
+ stats->netstats.rx_fifo_errors += qnstats->rx_fifo_errors;
+ stats->netstats.rx_nohandler += qnstats->rx_nohandler;
+ stats->rx_drop_state += qstats->rx_drop_state;
+ stats->rx_oversize += qstats->rx_oversize;
+ stats->rx_runt += qstats->rx_runt;
+
+ SUM_GRP_COUNTERS(stats, qstats, rx_grp);
+ stats->netstats.rx_packets += qnstats->rx_packets;
+ stats->netstats.rx_bytes += qnstats->rx_bytes;
+ }
+
+ stats->netstats.tx_errors = stats->netstats.tx_fifo_errors +
+ stats->netstats.tx_carrier_errors +
+ stats->tx_drop_state + stats->tx_dlid_zero;
+ stats->netstats.tx_dropped = stats->netstats.tx_errors;
+
+ stats->netstats.rx_errors = stats->netstats.rx_fifo_errors +
+ stats->netstats.rx_nohandler +
+ stats->rx_drop_state + stats->rx_oversize +
+ stats->rx_runt;
+ stats->netstats.rx_dropped = stats->netstats.rx_errors;
+
+ netdev->stats.tx_packets = stats->netstats.tx_packets;
+ netdev->stats.tx_bytes = stats->netstats.tx_bytes;
+ netdev->stats.tx_fifo_errors = stats->netstats.tx_fifo_errors;
+ netdev->stats.tx_carrier_errors = stats->netstats.tx_carrier_errors;
+ netdev->stats.tx_errors = stats->netstats.tx_errors;
+ netdev->stats.tx_dropped = stats->netstats.tx_dropped;
+
+ netdev->stats.rx_packets = stats->netstats.rx_packets;
+ netdev->stats.rx_bytes = stats->netstats.rx_bytes;
+ netdev->stats.rx_fifo_errors = stats->netstats.rx_fifo_errors;
+ netdev->stats.multicast = stats->rx_grp.mcastbcast;
+ netdev->stats.rx_length_errors = stats->rx_oversize + stats->rx_runt;
+ netdev->stats.rx_errors = stats->netstats.rx_errors;
+ netdev->stats.rx_dropped = stats->netstats.rx_dropped;
+}
+
+/* update_len_counters - update pkt's len histogram counters */
+static inline void update_len_counters(struct opa_vnic_grp_stats *grp,
+ int len)
+{
+ /* account for 4 byte FCS */
+ if (len >= 1515)
+ grp->s_1519_max++;
+ else if (len >= 1020)
+ grp->s_1024_1518++;
+ else if (len >= 508)
+ grp->s_512_1023++;
+ else if (len >= 252)
+ grp->s_256_511++;
+ else if (len >= 124)
+ grp->s_128_255++;
+ else if (len >= 61)
+ grp->s_65_127++;
+ else
+ grp->s_64++;
+}
+
+/* hfi1_vnic_update_tx_counters - update transmit counters */
+static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo,
+ u8 q_idx, struct sk_buff *skb, int err)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ struct opa_vnic_stats *stats = &vinfo->stats[q_idx];
+ struct opa_vnic_grp_stats *tx_grp = &stats->tx_grp;
+ u16 vlan_tci;
+
+ stats->netstats.tx_packets++;
+ stats->netstats.tx_bytes += skb->len + ETH_FCS_LEN;
+
+ update_len_counters(tx_grp, skb->len);
+
+ /* rest of the counts are for good packets only */
+ if (unlikely(err))
+ return;
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ tx_grp->mcastbcast++;
+ else
+ tx_grp->unicast++;
+
+ if (!__vlan_get_tag(skb, &vlan_tci))
+ tx_grp->vlan++;
+ else
+ tx_grp->untagged++;
+}
+
+/* hfi1_vnic_update_rx_counters - update receive counters */
+static void hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info *vinfo,
+ u8 q_idx, struct sk_buff *skb, int err)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb->data;
+ struct opa_vnic_stats *stats = &vinfo->stats[q_idx];
+ struct opa_vnic_grp_stats *rx_grp = &stats->rx_grp;
+ u16 vlan_tci;
+
+ stats->netstats.rx_packets++;
+ stats->netstats.rx_bytes += skb->len + ETH_FCS_LEN;
+
+ update_len_counters(rx_grp, skb->len);
+
+ /* rest of the counts are for good packets only */
+ if (unlikely(err))
+ return;
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ rx_grp->mcastbcast++;
+ else
+ rx_grp->unicast++;
+
+ if (!__vlan_get_tag(skb, &vlan_tci))
+ rx_grp->vlan++;
+ else
+ rx_grp->untagged++;
+}
+
+/* This function is overloaded for opa_vnic specific implementation */
+static void hfi1_vnic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct opa_vnic_stats *vstats = (struct opa_vnic_stats *)stats;
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+
+ hfi1_vnic_update_stats(vinfo, vstats);
+}
+
+static u64 create_bypass_pbc(u32 vl, u32 dw_len)
+{
+ u64 pbc;
+
+ pbc = ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
+ | PBC_INSERT_BYPASS_ICRC | PBC_CREDIT_RETURN
+ | PBC_PACKET_BYPASS
+ | ((vl & PBC_VL_MASK) << PBC_VL_SHIFT)
+ | (dw_len & PBC_LENGTH_DWS_MASK) << PBC_LENGTH_DWS_SHIFT;
+
+ return pbc;
+}
+
+/* hfi1_vnic_maybe_stop_tx - stop tx queue if required */
+static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo,
+ u8 q_idx)
+{
+ netif_stop_subqueue(vinfo->netdev, q_idx);
+ if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx))
+ return;
+
+ netif_start_subqueue(vinfo->netdev, q_idx);
+}
+
+static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+ u8 pad_len, q_idx = skb->queue_mapping;
+ struct hfi1_devdata *dd = vinfo->dd;
+ struct opa_vnic_skb_mdata *mdata;
+ u32 pkt_len, total_len;
+ int err = -EINVAL;
+ u64 pbc;
+
+ v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len);
+ if (unlikely(!netif_oper_up(netdev))) {
+ vinfo->stats[q_idx].tx_drop_state++;
+ goto tx_finish;
+ }
+
+ /* take out meta data */
+ mdata = (struct opa_vnic_skb_mdata *)skb->data;
+ skb_pull(skb, sizeof(*mdata));
+ if (unlikely(mdata->flags & OPA_VNIC_SKB_MDATA_ENCAP_ERR)) {
+ vinfo->stats[q_idx].tx_dlid_zero++;
+ goto tx_finish;
+ }
+
+ /* add tail padding (for 8 bytes size alignment) and icrc */
+ pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7;
+ pad_len += OPA_VNIC_ICRC_TAIL_LEN;
+
+ /*
+ * pkt_len is how much data we have to write, includes header and data.
+ * total_len is length of the packet in Dwords plus the PBC should not
+ * include the CRC.
+ */
+ pkt_len = (skb->len + pad_len) >> 2;
+ total_len = pkt_len + 2; /* PBC + packet */
+
+ pbc = create_bypass_pbc(mdata->vl, total_len);
+
+ skb_get(skb);
+ v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc, skb->len, pad_len);
+ err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len);
+ if (unlikely(err)) {
+ if (err == -ENOMEM)
+ vinfo->stats[q_idx].netstats.tx_fifo_errors++;
+ else if (err != -EBUSY)
+ vinfo->stats[q_idx].netstats.tx_carrier_errors++;
+ }
+ /* remove the header before updating tx counters */
+ skb_pull(skb, OPA_VNIC_HDR_LEN);
+
+ if (unlikely(err == -EBUSY)) {
+ hfi1_vnic_maybe_stop_tx(vinfo, q_idx);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_BUSY;
+ }
+
+tx_finish:
+ /* update tx counters */
+ hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static u16 hfi1_vnic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+ struct opa_vnic_skb_mdata *mdata;
+ struct sdma_engine *sde;
+
+ mdata = (struct opa_vnic_skb_mdata *)skb->data;
+ sde = sdma_select_engine_vl(vinfo->dd, mdata->entropy, mdata->vl);
+ return sde->this_idx;
+}
+
+/* hfi1_vnic_decap_skb - strip OPA header from the skb (ethernet) packet */
+static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq,
+ struct sk_buff *skb)
+{
+ struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
+ int max_len = vinfo->netdev->mtu + VLAN_ETH_HLEN;
+ int rc = -EFAULT;
+
+ skb_pull(skb, OPA_VNIC_HDR_LEN);
+
+ /* Validate Packet length */
+ if (unlikely(skb->len > max_len))
+ vinfo->stats[rxq->idx].rx_oversize++;
+ else if (unlikely(skb->len < ETH_ZLEN))
+ vinfo->stats[rxq->idx].rx_runt++;
+ else
+ rc = 0;
+ return rc;
+}
+
+static struct hfi1_vnic_vport_info *get_vnic_port(struct hfi1_devdata *dd,
+ int vesw_id)
+{
+ int vnic_id = VNIC_ID(vesw_id);
+
+ return hfi1_netdev_get_data(dd, vnic_id);
+}
+
+static struct hfi1_vnic_vport_info *get_first_vnic_port(struct hfi1_devdata *dd)
+{
+ struct hfi1_vnic_vport_info *vinfo;
+ int next_id = VNIC_ID(0);
+
+ vinfo = hfi1_netdev_get_first_data(dd, &next_id);
+
+ if (next_id > VNIC_ID(VNIC_MASK))
+ return NULL;
+
+ return vinfo;
+}
+
+void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_devdata *dd = packet->rcd->dd;
+ struct hfi1_vnic_vport_info *vinfo = NULL;
+ struct hfi1_vnic_rx_queue *rxq;
+ struct sk_buff *skb;
+ int l4_type, vesw_id = -1, rc;
+ u8 q_idx;
+ unsigned char *pad_info;
+
+ l4_type = hfi1_16B_get_l4(packet->ebuf);
+ if (likely(l4_type == OPA_16B_L4_ETHR)) {
+ vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
+ vinfo = get_vnic_port(dd, vesw_id);
+
+ /*
+ * In case of invalid vesw id, count the error on
+ * the first available vport.
+ */
+ if (unlikely(!vinfo)) {
+ struct hfi1_vnic_vport_info *vinfo_tmp;
+
+ vinfo_tmp = get_first_vnic_port(dd);
+ if (vinfo_tmp) {
+ spin_lock(&vport_cntr_lock);
+ vinfo_tmp->stats[0].netstats.rx_nohandler++;
+ spin_unlock(&vport_cntr_lock);
+ }
+ }
+ }
+
+ if (unlikely(!vinfo)) {
+ dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
+ l4_type, vesw_id, packet->rcd->ctxt);
+ return;
+ }
+
+ q_idx = packet->rcd->vnic_q_idx;
+ rxq = &vinfo->rxq[q_idx];
+ if (unlikely(!netif_oper_up(vinfo->netdev))) {
+ vinfo->stats[q_idx].rx_drop_state++;
+ return;
+ }
+
+ skb = netdev_alloc_skb(vinfo->netdev, packet->tlen);
+ if (unlikely(!skb)) {
+ vinfo->stats[q_idx].netstats.rx_fifo_errors++;
+ return;
+ }
+
+ memcpy(skb->data, packet->ebuf, packet->tlen);
+ skb_put(skb, packet->tlen);
+
+ pad_info = skb->data + skb->len - 1;
+ skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
+ ((*pad_info) & 0x7)));
+
+ rc = hfi1_vnic_decap_skb(rxq, skb);
+
+ /* update rx counters */
+ hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
+ if (unlikely(rc)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
+
+ napi_gro_receive(&rxq->napi, skb);
+}
+
+static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo)
+{
+ struct hfi1_devdata *dd = vinfo->dd;
+ struct net_device *netdev = vinfo->netdev;
+ int rc;
+
+ /* ensure virtual eth switch id is valid */
+ if (!vinfo->vesw_id)
+ return -EINVAL;
+
+ rc = hfi1_netdev_add_data(dd, VNIC_ID(vinfo->vesw_id), vinfo);
+ if (rc < 0)
+ return rc;
+
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc)
+ goto err_remove;
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ set_bit(HFI1_VNIC_UP, &vinfo->flags);
+
+ return 0;
+
+err_remove:
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
+ return rc;
+}
+
+static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
+{
+ struct hfi1_devdata *dd = vinfo->dd;
+
+ clear_bit(HFI1_VNIC_UP, &vinfo->flags);
+ netif_carrier_off(vinfo->netdev);
+ netif_tx_disable(vinfo->netdev);
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
+
+ hfi1_netdev_rx_destroy(dd);
+}
+
+static int hfi1_netdev_open(struct net_device *netdev)
+{
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+ int rc;
+
+ mutex_lock(&vinfo->lock);
+ rc = hfi1_vnic_up(vinfo);
+ mutex_unlock(&vinfo->lock);
+ return rc;
+}
+
+static int hfi1_netdev_close(struct net_device *netdev)
+{
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+
+ mutex_lock(&vinfo->lock);
+ if (test_bit(HFI1_VNIC_UP, &vinfo->flags))
+ hfi1_vnic_down(vinfo);
+ mutex_unlock(&vinfo->lock);
+ return 0;
+}
+
+static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
+{
+ struct hfi1_devdata *dd = vinfo->dd;
+ int rc = 0;
+
+ mutex_lock(&hfi1_mutex);
+ if (!dd->vnic_num_vports) {
+ rc = hfi1_vnic_txreq_init(dd);
+ if (rc)
+ goto txreq_fail;
+ }
+
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc) {
+ dd_dev_err(dd, "Unable to initialize netdev contexts\n");
+ goto alloc_fail;
+ }
+
+ hfi1_init_vnic_rsm(dd);
+
+ dd->vnic_num_vports++;
+ hfi1_vnic_sdma_init(vinfo);
+
+alloc_fail:
+ if (!dd->vnic_num_vports)
+ hfi1_vnic_txreq_deinit(dd);
+txreq_fail:
+ mutex_unlock(&hfi1_mutex);
+ return rc;
+}
+
+static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo)
+{
+ struct hfi1_devdata *dd = vinfo->dd;
+
+ mutex_lock(&hfi1_mutex);
+ if (--dd->vnic_num_vports == 0) {
+ hfi1_deinit_vnic_rsm(dd);
+ hfi1_vnic_txreq_deinit(dd);
+ }
+ mutex_unlock(&hfi1_mutex);
+ hfi1_netdev_rx_destroy(dd);
+}
+
+static void hfi1_vnic_set_vesw_id(struct net_device *netdev, int id)
+{
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+ bool reopen = false;
+
+ /*
+ * If vesw_id is being changed, and if the vnic port is up,
+ * reset the vnic port to ensure new vesw_id gets picked up
+ */
+ if (id != vinfo->vesw_id) {
+ mutex_lock(&vinfo->lock);
+ if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) {
+ hfi1_vnic_down(vinfo);
+ reopen = true;
+ }
+
+ vinfo->vesw_id = id;
+ if (reopen)
+ hfi1_vnic_up(vinfo);
+
+ mutex_unlock(&vinfo->lock);
+ }
+}
+
+/* netdev ops */
+static const struct net_device_ops hfi1_netdev_ops = {
+ .ndo_open = hfi1_netdev_open,
+ .ndo_stop = hfi1_netdev_close,
+ .ndo_start_xmit = hfi1_netdev_start_xmit,
+ .ndo_select_queue = hfi1_vnic_select_queue,
+ .ndo_get_stats64 = hfi1_vnic_get_stats64,
+};
+
+static void hfi1_vnic_free_rn(struct net_device *netdev)
+{
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+
+ hfi1_vnic_deinit(vinfo);
+ mutex_destroy(&vinfo->lock);
+ free_netdev(netdev);
+}
+
+struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
+ u32 port_num,
+ enum rdma_netdev_t type,
+ const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *))
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+ struct hfi1_vnic_vport_info *vinfo;
+ struct net_device *netdev;
+ struct rdma_netdev *rn;
+ int i, size, rc;
+
+ if (!dd->num_netdev_contexts)
+ return ERR_PTR(-ENOMEM);
+
+ if (!port_num || (port_num > dd->num_pports))
+ return ERR_PTR(-EINVAL);
+
+ if (type != RDMA_NETDEV_OPA_VNIC)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
+ netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
+ chip_sdma_engines(dd),
+ dd->num_netdev_contexts);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+
+ rn = netdev_priv(netdev);
+ vinfo = opa_vnic_dev_priv(netdev);
+ vinfo->dd = dd;
+ vinfo->num_tx_q = chip_sdma_engines(dd);
+ vinfo->num_rx_q = dd->num_netdev_contexts;
+ vinfo->netdev = netdev;
+ rn->free_rdma_netdev = hfi1_vnic_free_rn;
+ rn->set_id = hfi1_vnic_set_vesw_id;
+
+ netdev->features = NETIF_F_HIGHDMA | NETIF_F_SG;
+ netdev->hw_features = netdev->features;
+ netdev->vlan_features = netdev->features;
+ netdev->watchdog_timeo = msecs_to_jiffies(HFI_TX_TIMEOUT_MS);
+ netdev->netdev_ops = &hfi1_netdev_ops;
+ mutex_init(&vinfo->lock);
+
+ for (i = 0; i < vinfo->num_rx_q; i++) {
+ struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
+
+ rxq->idx = i;
+ rxq->vinfo = vinfo;
+ rxq->netdev = netdev;
+ }
+
+ rc = hfi1_vnic_init(vinfo);
+ if (rc)
+ goto init_fail;
+
+ return netdev;
+init_fail:
+ mutex_destroy(&vinfo->lock);
+ free_netdev(netdev);
+ return ERR_PTR(rc);
+}
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
new file mode 100644
index 000000000000..6caf01ba0bca
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2017 - 2018 Intel Corporation.
+ */
+
+/*
+ * This file contains HFI1 support for VNIC SDMA functionality
+ */
+
+#include "sdma.h"
+#include "vnic.h"
+
+#define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0)
+#define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
+
+#define HFI1_VNIC_TXREQ_NAME_LEN 32
+#define HFI1_VNIC_SDMA_DESC_WTRMRK 64
+
+/*
+ * struct vnic_txreq - VNIC transmit descriptor
+ * @txreq: sdma transmit request
+ * @sdma: vnic sdma pointer
+ * @skb: skb to send
+ * @pad: pad buffer
+ * @plen: pad length
+ * @pbc_val: pbc value
+ */
+struct vnic_txreq {
+ struct sdma_txreq txreq;
+ struct hfi1_vnic_sdma *sdma;
+
+ struct sk_buff *skb;
+ unsigned char pad[HFI1_VNIC_MAX_PAD];
+ u16 plen;
+ __le64 pbc_val;
+};
+
+static void vnic_sdma_complete(struct sdma_txreq *txreq,
+ int status)
+{
+ struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
+ struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
+
+ sdma_txclean(vnic_sdma->dd, txreq);
+ dev_kfree_skb_any(tx->skb);
+ kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
+}
+
+static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
+ struct vnic_txreq *tx)
+{
+ int i, ret = 0;
+
+ ret = sdma_txadd_kvaddr(
+ sde->dd,
+ &tx->txreq,
+ tx->skb->data,
+ skb_headlen(tx->skb));
+ if (unlikely(ret))
+ goto bail_txadd;
+
+ for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
+
+ /* combine physically continuous fragments later? */
+ ret = sdma_txadd_page(sde->dd,
+ &tx->txreq,
+ skb_frag_page(frag),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ NULL, NULL, NULL);
+ if (unlikely(ret))
+ goto bail_txadd;
+ }
+
+ if (tx->plen)
+ ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
+ tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
+ tx->plen);
+
+bail_txadd:
+ return ret;
+}
+
+static int build_vnic_tx_desc(struct sdma_engine *sde,
+ struct vnic_txreq *tx,
+ u64 pbc)
+{
+ int ret = 0;
+ u16 hdrbytes = 2 << 2; /* PBC */
+
+ ret = sdma_txinit_ahg(
+ &tx->txreq,
+ 0,
+ hdrbytes + tx->skb->len + tx->plen,
+ 0,
+ 0,
+ NULL,
+ 0,
+ vnic_sdma_complete);
+ if (unlikely(ret))
+ goto bail_txadd;
+
+ /* add pbc */
+ tx->pbc_val = cpu_to_le64(pbc);
+ ret = sdma_txadd_kvaddr(
+ sde->dd,
+ &tx->txreq,
+ &tx->pbc_val,
+ hdrbytes);
+ if (unlikely(ret))
+ goto bail_txadd;
+
+ /* add the ulp payload */
+ ret = build_vnic_ulp_payload(sde, tx);
+bail_txadd:
+ return ret;
+}
+
+/* setup the last plen bypes of pad */
+static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
+{
+ pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
+}
+
+int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
+ struct hfi1_vnic_vport_info *vinfo,
+ struct sk_buff *skb, u64 pbc, u8 plen)
+{
+ struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
+ struct sdma_engine *sde = vnic_sdma->sde;
+ struct vnic_txreq *tx;
+ int ret = -ECOMM;
+
+ if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
+ goto tx_err;
+
+ if (unlikely(!sde || !sdma_running(sde)))
+ goto tx_err;
+
+ tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
+ if (unlikely(!tx)) {
+ ret = -ENOMEM;
+ goto tx_err;
+ }
+
+ tx->sdma = vnic_sdma;
+ tx->skb = skb;
+ hfi1_vnic_update_pad(tx->pad, plen);
+ tx->plen = plen;
+ ret = build_vnic_tx_desc(sde, tx, pbc);
+ if (unlikely(ret))
+ goto free_desc;
+
+ ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
+ &tx->txreq, vnic_sdma->pkts_sent);
+ /* When -ECOMM, sdma callback will be called with ABORT status */
+ if (unlikely(ret && unlikely(ret != -ECOMM)))
+ goto free_desc;
+
+ if (!ret) {
+ vnic_sdma->pkts_sent = true;
+ iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
+ }
+ return ret;
+
+free_desc:
+ sdma_txclean(dd, &tx->txreq);
+ kmem_cache_free(dd->vnic.txreq_cache, tx);
+tx_err:
+ if (ret != -EBUSY)
+ dev_kfree_skb_any(skb);
+ else
+ vnic_sdma->pkts_sent = false;
+ return ret;
+}
+
+/*
+ * hfi1_vnic_sdma_sleep - vnic sdma sleep function
+ *
+ * This function gets called from sdma_send_txreq() when there are not enough
+ * sdma descriptors available to send the packet. It adds Tx queue's wait
+ * structure to sdma engine's dmawait list to be woken up when descriptors
+ * become available.
+ */
+static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *txreq,
+ uint seq,
+ bool pkts_sent)
+{
+ struct hfi1_vnic_sdma *vnic_sdma =
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
+
+ write_seqlock(&sde->waitlock);
+ if (sdma_progress(sde, seq, txreq)) {
+ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
+
+ vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
+ if (list_empty(&vnic_sdma->wait.list)) {
+ iowait_get_priority(wait->iow);
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ }
+ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+}
+
+/*
+ * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function
+ *
+ * This function gets called when SDMA descriptors becomes available and Tx
+ * queue's wait structure was previously added to sdma engine's dmawait list.
+ * It notifies the upper driver about Tx queue wakeup.
+ */
+static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
+{
+ struct hfi1_vnic_sdma *vnic_sdma =
+ container_of(wait, struct hfi1_vnic_sdma, wait);
+ struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
+
+ vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
+ if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
+ netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
+};
+
+inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
+ u8 q_idx)
+{
+ struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
+
+ return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
+}
+
+void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
+{
+ int i;
+
+ for (i = 0; i < vinfo->num_tx_q; i++) {
+ struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
+
+ iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
+ hfi1_vnic_sdma_sleep,
+ hfi1_vnic_sdma_wakeup, NULL, NULL);
+ vnic_sdma->sde = &vinfo->dd->per_sdma[i];
+ vnic_sdma->dd = vinfo->dd;
+ vnic_sdma->vinfo = vinfo;
+ vnic_sdma->q_idx = i;
+ vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
+
+ /* Add a free descriptor watermark for wakeups */
+ if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
+ struct iowait_work *work;
+
+ INIT_LIST_HEAD(&vnic_sdma->stx.list);
+ vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
+ work = iowait_get_ib_work(&vnic_sdma->wait);
+ list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
+ }
+ }
+}
+
+int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
+{
+ char buf[HFI1_VNIC_TXREQ_NAME_LEN];
+
+ snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
+ dd->vnic.txreq_cache = kmem_cache_create(buf,
+ sizeof(struct vnic_txreq),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!dd->vnic.txreq_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
+{
+ kmem_cache_destroy(dd->vnic.txreq_cache);
+ dd->vnic.txreq_cache = NULL;
+}
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
new file mode 100644
index 000000000000..44cdb706fe27
--- /dev/null
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_HNS_HIP08
+ tristate "Hisilicon Hip08 Family RoCE support"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on PCI && HNS3
+ help
+ RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
+ The RoCE engine is a PCI device.
+
+ To compile this driver, choose M here. This module will be called
+ hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
new file mode 100644
index 000000000000..baf592e6f21b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Hisilicon RoCE drivers.
+#
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(src)
+
+hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
+ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
+ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
+ hns_roce_debugfs.o hns_roce_hw_v2.o
+
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
new file mode 100644
index 000000000000..307c35888b30
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+{
+ u32 fl = ah_attr->grh.flow_label;
+ u16 sport;
+
+ if (!fl)
+ sport = get_random_u32_inclusive(IB_ROCE_UDP_ENCAP_VALID_PORT_MIN,
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MAX);
+ else
+ sport = rdma_flow_label_to_udp_sport(fl);
+
+ return sport;
+}
+
+int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ struct hns_roce_ib_create_ah_resp resp = {};
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+ u8 tclass = get_tclass(grh);
+ u8 priority = 0;
+ u8 tc_mode = 0;
+ int ret;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ ah->av.port = rdma_ah_get_port_num(ah_attr);
+ ah->av.gid_index = grh->sgid_index;
+
+ if (rdma_ah_get_static_rate(ah_attr))
+ ah->av.stat_rate = IB_RATE_10_GBPS;
+
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.flowlabel = grh->flow_label;
+ ah->av.udp_sport = get_ah_udp_sport(ah_attr);
+ ah->av.tclass = tclass;
+
+ ret = hr_dev->hw->get_dscp(hr_dev, tclass, &tc_mode, &priority);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
+ if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ goto err_out;
+
+ if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ ah->av.sl = priority;
+ else
+ ah->av.sl = rdma_ah_get_sl(ah_attr);
+
+ if (!check_sl_valid(hr_dev, ah->av.sl)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+
+ /* HIP08 needs to record vlan info in Address Vector */
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
+ &ah->av.vlan_id, NULL);
+ if (ret)
+ goto err_out;
+
+ ah->av.vlan_en = ah->av.vlan_id < VLAN_N_VID;
+ }
+
+ if (udata) {
+ resp.priority = ah->av.sl;
+ resp.tc_mode = tc_mode;
+ memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ }
+
+err_out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AH_CREATE_ERR_CNT]);
+
+ return ret;
+}
+
+int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+
+ rdma_ah_set_sl(ah_attr, ah->av.sl);
+ rdma_ah_set_port_num(ah_attr, ah->av.port);
+ rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate);
+ rdma_ah_set_grh(ah_attr, NULL, ah->av.flowlabel,
+ ah->av.gid_index, ah->av.hop_limit, ah->av.tclass);
+ rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
new file mode 100644
index 000000000000..6ee911f6885b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
+
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
+{
+ struct hns_roce_buf_list *trunks;
+ u32 i;
+
+ if (!buf)
+ return;
+
+ trunks = buf->trunk_list;
+ if (trunks) {
+ buf->trunk_list = NULL;
+ for (i = 0; i < buf->ntrunks; i++)
+ dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
+ trunks[i].buf, trunks[i].map);
+
+ kfree(trunks);
+ }
+
+ kfree(buf);
+}
+
+/*
+ * Allocate the dma buffer for storing ROCEE table entries
+ *
+ * @size: required size
+ * @page_shift: the unit size in a continuous dma address range
+ * @flags: HNS_ROCE_BUF_ flags to control the allocation flow.
+ */
+struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
+ u32 page_shift, u32 flags)
+{
+ u32 trunk_size, page_size, alloced_size;
+ struct hns_roce_buf_list *trunks;
+ struct hns_roce_buf *buf;
+ gfp_t gfp_flags;
+ u32 ntrunk, i;
+
+ /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
+ if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
+ return ERR_PTR(-EINVAL);
+
+ gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
+ buf = kzalloc(sizeof(*buf), gfp_flags);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->page_shift = page_shift;
+ page_size = 1 << buf->page_shift;
+
+ /* Calc the trunk size and num by required size and page_shift */
+ if (flags & HNS_ROCE_BUF_DIRECT) {
+ buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
+ ntrunk = 1;
+ } else {
+ buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
+ ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
+ }
+
+ trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
+ if (!trunks) {
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ trunk_size = 1 << buf->trunk_shift;
+ alloced_size = 0;
+ for (i = 0; i < ntrunk; i++) {
+ trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
+ &trunks[i].map, gfp_flags);
+ if (!trunks[i].buf)
+ break;
+
+ alloced_size += trunk_size;
+ }
+
+ buf->ntrunks = i;
+
+ /* In nofail mode, it's only failed when the alloced size is 0 */
+ if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
+ for (i = 0; i < buf->ntrunks; i++)
+ dma_free_coherent(hr_dev->dev, trunk_size,
+ trunks[i].buf, trunks[i].map);
+
+ kfree(trunks);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ buf->npages = DIV_ROUND_UP(alloced_size, page_size);
+ buf->trunk_list = trunks;
+
+ return buf;
+}
+
+int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+ int buf_cnt, struct hns_roce_buf *buf,
+ unsigned int page_shift)
+{
+ unsigned int offset, max_size;
+ int total = 0;
+ int i;
+
+ if (page_shift > buf->trunk_shift) {
+ dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
+ page_shift, buf->trunk_shift);
+ return -EINVAL;
+ }
+
+ offset = 0;
+ max_size = buf->ntrunks << buf->trunk_shift;
+ for (i = 0; i < buf_cnt && offset < max_size; i++) {
+ bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
+ offset += (1 << page_shift);
+ }
+
+ return total;
+}
+
+int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem,
+ unsigned int page_shift)
+{
+ struct ib_block_iter biter;
+ int total = 0;
+
+ /* convert system page cnt to hw page cnt */
+ rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
+ bufs[total++] = rdma_block_iter_dma_address(&biter);
+ if (total >= buf_cnt)
+ goto done;
+ }
+
+done:
+ return total;
+}
+
+void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
+{
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ ida_destroy(&hr_dev->xrcd_ida.ida);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ ida_destroy(&hr_dev->srq_table.srq_ida.ida);
+ xa_destroy(&hr_dev->srq_table.xa);
+ }
+ hns_roce_cleanup_qp_table(hr_dev);
+ hns_roce_cleanup_cq_table(hr_dev);
+ ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
+ ida_destroy(&hr_dev->pd_ida.ida);
+ ida_destroy(&hr_dev->uar_ida.ida);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
new file mode 100644
index 000000000000..873e8a69a1b9
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/dmapool.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+
+#define CMD_POLL_TOKEN 0xffff
+#define CMD_MAX_NUM 32
+
+static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
+{
+ int ret;
+
+ ret = hr_dev->hw->post_mbox(hr_dev, mbox_msg);
+ if (ret)
+ return ret;
+
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POSTED_CNT]);
+
+ return 0;
+}
+
+/* this should be called with "poll_sem" */
+static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
+{
+ int ret;
+
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg);
+ if (ret) {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to post mailbox 0x%x in poll mode, ret = %d.\n",
+ mbox_msg->cmd, ret);
+ return ret;
+ }
+
+ ret = hr_dev->hw->poll_mbox_done(hr_dev);
+ if (ret)
+ return ret;
+
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POLLED_CNT]);
+
+ return 0;
+}
+
+static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
+{
+ int ret;
+
+ down(&hr_dev->cmd.poll_sem);
+ ret = __hns_roce_cmd_mbox_poll(hr_dev, mbox_msg);
+ up(&hr_dev->cmd.poll_sem);
+
+ return ret;
+}
+
+void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
+ u64 out_param)
+{
+ struct hns_roce_cmd_context *context =
+ &hr_dev->cmd.context[token % hr_dev->cmd.max_cmds];
+
+ if (unlikely(token != context->token)) {
+ dev_err_ratelimited(hr_dev->dev,
+ "[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
+ token, context->token);
+ return;
+ }
+
+ context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
+ context->out_param = out_param;
+ complete(&context->done);
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_EVENT_CNT]);
+}
+
+static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
+{
+ struct hns_roce_cmdq *cmd = &hr_dev->cmd;
+ struct hns_roce_cmd_context *context;
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ spin_lock(&cmd->context_lock);
+
+ do {
+ context = &cmd->context[cmd->free_head];
+ cmd->free_head = context->next;
+ } while (context->busy);
+
+ context->busy = 1;
+ context->token += cmd->max_cmds;
+
+ spin_unlock(&cmd->context_lock);
+
+ reinit_completion(&context->done);
+
+ mbox_msg->token = context->token;
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg);
+ if (ret) {
+ dev_err_ratelimited(dev,
+ "failed to post mailbox 0x%x in event mode, ret = %d.\n",
+ mbox_msg->cmd, ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS))) {
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
+ context->token, mbox_msg->cmd);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = context->result;
+ if (ret)
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
+ context->token, mbox_msg->cmd, ret);
+
+out:
+ context->busy = 0;
+ return ret;
+}
+
+static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
+{
+ int ret;
+
+ down(&hr_dev->cmd.event_sem);
+ ret = __hns_roce_cmd_mbox_wait(hr_dev, mbox_msg);
+ up(&hr_dev->cmd.event_sem);
+
+ return ret;
+}
+
+int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
+ u8 cmd, unsigned long tag)
+{
+ struct hns_roce_mbox_msg mbox_msg = {};
+ bool is_busy;
+
+ if (hr_dev->hw->chk_mbox_avail)
+ if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy))
+ return is_busy ? -EBUSY : 0;
+
+ mbox_msg.in_param = in_param;
+ mbox_msg.out_param = out_param;
+ mbox_msg.cmd = cmd;
+ mbox_msg.tag = tag;
+
+ if (hr_dev->cmd.use_events) {
+ mbox_msg.event_en = 1;
+
+ return hns_roce_cmd_mbox_wait(hr_dev, &mbox_msg);
+ } else {
+ mbox_msg.event_en = 0;
+ mbox_msg.token = CMD_POLL_TOKEN;
+
+ return hns_roce_cmd_mbox_poll(hr_dev, &mbox_msg);
+ }
+}
+
+int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
+{
+ sema_init(&hr_dev->cmd.poll_sem, 1);
+ hr_dev->cmd.use_events = 0;
+ hr_dev->cmd.max_cmds = CMD_MAX_NUM;
+ hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", hr_dev->dev,
+ HNS_ROCE_MAILBOX_SIZE,
+ HNS_ROCE_MAILBOX_SIZE, 0);
+ if (!hr_dev->cmd.pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev)
+{
+ dma_pool_destroy(hr_dev->cmd.pool);
+}
+
+int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
+ int i;
+
+ hr_cmd->context =
+ kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
+ if (!hr_cmd->context) {
+ hr_dev->cmd_mod = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hr_cmd->max_cmds; ++i) {
+ hr_cmd->context[i].token = i;
+ hr_cmd->context[i].next = i + 1;
+ init_completion(&hr_cmd->context[i].done);
+ }
+ hr_cmd->context[hr_cmd->max_cmds - 1].next = 0;
+ hr_cmd->free_head = 0;
+
+ sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
+ spin_lock_init(&hr_cmd->context_lock);
+
+ hr_cmd->use_events = 1;
+
+ return 0;
+}
+
+void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
+
+ kfree(hr_cmd->context);
+ hr_cmd->use_events = 0;
+}
+
+struct hns_roce_cmd_mailbox *
+hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+
+ mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
+ if (!mailbox)
+ return ERR_PTR(-ENOMEM);
+
+ mailbox->buf =
+ dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, &mailbox->dma);
+ if (!mailbox->buf) {
+ kfree(mailbox);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mailbox;
+}
+
+void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox)
+{
+ if (!mailbox)
+ return;
+
+ dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
+ kfree(mailbox);
+}
+
+int hns_roce_create_hw_ctx(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ u8 cmd, unsigned long idx)
+{
+ return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx);
+}
+
+int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, unsigned long idx)
+{
+ return hns_roce_cmd_mbox(dev, 0, 0, cmd, idx);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
new file mode 100644
index 000000000000..11dbbabebdc9
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_CMD_H
+#define _HNS_ROCE_CMD_H
+
+#define HNS_ROCE_MAILBOX_SIZE 4096
+#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
+
+enum {
+ /* QPC BT commands */
+ HNS_ROCE_CMD_WRITE_QPC_BT0 = 0x0,
+ HNS_ROCE_CMD_WRITE_QPC_BT1 = 0x1,
+ HNS_ROCE_CMD_WRITE_QPC_BT2 = 0x2,
+ HNS_ROCE_CMD_READ_QPC_BT0 = 0x4,
+ HNS_ROCE_CMD_READ_QPC_BT1 = 0x5,
+ HNS_ROCE_CMD_READ_QPC_BT2 = 0x6,
+ HNS_ROCE_CMD_DESTROY_QPC_BT0 = 0x8,
+ HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
+ HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
+
+ /* QPC operation */
+ HNS_ROCE_CMD_MODIFY_QPC = 0x41,
+ HNS_ROCE_CMD_QUERY_QPC = 0x42,
+
+ HNS_ROCE_CMD_MODIFY_CQC = 0x52,
+ HNS_ROCE_CMD_QUERY_CQC = 0x53,
+ /* CQC BT commands */
+ HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
+ HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
+ HNS_ROCE_CMD_WRITE_CQC_BT2 = 0x12,
+ HNS_ROCE_CMD_READ_CQC_BT0 = 0x14,
+ HNS_ROCE_CMD_READ_CQC_BT1 = 0x15,
+ HNS_ROCE_CMD_READ_CQC_BT2 = 0x1b,
+ HNS_ROCE_CMD_DESTROY_CQC_BT0 = 0x18,
+ HNS_ROCE_CMD_DESTROY_CQC_BT1 = 0x19,
+ HNS_ROCE_CMD_DESTROY_CQC_BT2 = 0x1a,
+
+ /* MPT BT commands */
+ HNS_ROCE_CMD_WRITE_MPT_BT0 = 0x20,
+ HNS_ROCE_CMD_WRITE_MPT_BT1 = 0x21,
+ HNS_ROCE_CMD_WRITE_MPT_BT2 = 0x22,
+ HNS_ROCE_CMD_READ_MPT_BT0 = 0x24,
+ HNS_ROCE_CMD_READ_MPT_BT1 = 0x25,
+ HNS_ROCE_CMD_READ_MPT_BT2 = 0x26,
+ HNS_ROCE_CMD_DESTROY_MPT_BT0 = 0x28,
+ HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
+ HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
+
+ /* CQC TIMER commands */
+ HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23,
+ HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27,
+
+ /* MPT commands */
+ HNS_ROCE_CMD_QUERY_MPT = 0x62,
+
+ /* SRQC BT commands */
+ HNS_ROCE_CMD_WRITE_SRQC_BT0 = 0x30,
+ HNS_ROCE_CMD_WRITE_SRQC_BT1 = 0x31,
+ HNS_ROCE_CMD_WRITE_SRQC_BT2 = 0x32,
+ HNS_ROCE_CMD_READ_SRQC_BT0 = 0x34,
+ HNS_ROCE_CMD_READ_SRQC_BT1 = 0x35,
+ HNS_ROCE_CMD_READ_SRQC_BT2 = 0x36,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
+ /* QPC TIMER commands */
+ HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33,
+ HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37,
+
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+ HNS_ROCE_CMD_QUERY_AEQC = 0x82,
+ HNS_ROCE_CMD_DESTROY_AEQC = 0x83,
+ HNS_ROCE_CMD_CREATE_CEQC = 0x90,
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
+
+ /* SCC CTX commands */
+ HNS_ROCE_CMD_QUERY_SCCC = 0xa2,
+
+ /* SCC CTX BT commands */
+ HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
+ HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,
+};
+
+enum {
+ /* TPT commands */
+ HNS_ROCE_CMD_CREATE_MPT = 0xd,
+ HNS_ROCE_CMD_DESTROY_MPT = 0xf,
+
+ /* CQ commands */
+ HNS_ROCE_CMD_CREATE_CQC = 0x16,
+ HNS_ROCE_CMD_DESTROY_CQC = 0x17,
+
+ /* QP/EE commands */
+ HNS_ROCE_CMD_RST2INIT_QP = 0x19,
+ HNS_ROCE_CMD_INIT2RTR_QP = 0x1a,
+ HNS_ROCE_CMD_RTR2RTS_QP = 0x1b,
+ HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
+ HNS_ROCE_CMD_2ERR_QP = 0x1e,
+ HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
+ HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
+ HNS_ROCE_CMD_2RST_QP = 0x21,
+ HNS_ROCE_CMD_QUERY_QP = 0x22,
+ HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
+ HNS_ROCE_CMD_CREATE_SRQ = 0x70,
+ HNS_ROCE_CMD_MODIFY_SRQC = 0x72,
+ HNS_ROCE_CMD_QUERY_SRQC = 0x73,
+ HNS_ROCE_CMD_DESTROY_SRQ = 0x74,
+};
+
+int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
+ u8 cmd, unsigned long tag);
+
+struct hns_roce_cmd_mailbox *
+hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
+void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox);
+int hns_roce_create_hw_ctx(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ u8 cmd, unsigned long idx);
+int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd,
+ unsigned long idx);
+
+#endif /* _HNS_ROCE_CMD_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
new file mode 100644
index 000000000000..465d1f914b6c
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_COMMON_H
+#define _HNS_ROCE_COMMON_H
+#include <linux/bitfield.h>
+
+#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
+#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
+#define roce_raw_write(value, addr) \
+ __raw_writel((__force u32)cpu_to_le32(value), (addr))
+
+#define roce_get_field(origin, mask, shift) \
+ ((le32_to_cpu(origin) & (mask)) >> (u32)(shift))
+
+#define roce_get_bit(origin, shift) \
+ roce_get_field((origin), (1ul << (shift)), (shift))
+
+#define roce_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= ~cpu_to_le32(mask); \
+ (origin) |= \
+ cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
+ } while (0)
+
+#define roce_set_bit(origin, shift, val) \
+ roce_set_field((origin), (1ul << (shift)), (shift), (val))
+
+#define FIELD_LOC(field_type, field_h, field_l) field_type, field_h, field_l
+
+#define _hr_reg_enable(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ *((__le32 *)_ptr + (field_h) / 32) |= cpu_to_le32( \
+ BIT((field_l) % 32) + \
+ BUILD_BUG_ON_ZERO((field_h) != (field_l))); \
+ })
+
+#define hr_reg_enable(ptr, field) _hr_reg_enable(ptr, field)
+
+#define _hr_reg_clear(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
+ *((__le32 *)_ptr + (field_h) / 32) &= \
+ ~cpu_to_le32(GENMASK((field_h) % 32, (field_l) % 32)); \
+ })
+
+#define hr_reg_clear(ptr, field) _hr_reg_clear(ptr, field)
+
+#define _hr_reg_write_bool(ptr, field_type, field_h, field_l, val) \
+ ({ \
+ (val) ? _hr_reg_enable(ptr, field_type, field_h, field_l) : \
+ _hr_reg_clear(ptr, field_type, field_h, field_l); \
+ })
+
+#define hr_reg_write_bool(ptr, field, val) _hr_reg_write_bool(ptr, field, val)
+
+#define _hr_reg_write(ptr, field_type, field_h, field_l, val) \
+ ({ \
+ _hr_reg_clear(ptr, field_type, field_h, field_l); \
+ *((__le32 *)ptr + (field_h) / 32) |= cpu_to_le32(FIELD_PREP( \
+ GENMASK((field_h) % 32, (field_l) % 32), val)); \
+ })
+
+#define hr_reg_write(ptr, field, val) _hr_reg_write(ptr, field, val)
+
+#define _hr_reg_read(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
+ FIELD_GET(GENMASK((field_h) % 32, (field_l) % 32), \
+ le32_to_cpu(*((__le32 *)_ptr + (field_h) / 32))); \
+ })
+
+#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
+
+/*************ROCEE_REG DEFINITION****************/
+#define ROCEE_VENDOR_ID_REG 0x0
+#define ROCEE_VENDOR_PART_ID_REG 0x4
+
+#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
+#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
+
+#define ROCEE_PORT_GID_L_0_REG 0x50
+#define ROCEE_PORT_GID_ML_0_REG 0x54
+#define ROCEE_PORT_GID_MH_0_REG 0x58
+#define ROCEE_PORT_GID_H_0_REG 0x5C
+
+#define ROCEE_BT_CMD_H_REG 0x204
+
+#define ROCEE_SMAC_L_0_REG 0x240
+#define ROCEE_SMAC_H_0_REG 0x244
+
+#define ROCEE_QP1C_CFG3_0_REG 0x27C
+
+#define ROCEE_CAEP_AEQE_CONS_IDX_REG 0x3AC
+#define ROCEE_CAEP_CEQC_CONS_IDX_0_REG 0x3BC
+
+#define ROCEE_ECC_UCERR_ALM1_REG 0xB38
+#define ROCEE_ECC_UCERR_ALM2_REG 0xB3C
+#define ROCEE_ECC_CERR_ALM1_REG 0xB44
+#define ROCEE_ECC_CERR_ALM2_REG 0xB48
+
+#define ROCEE_ACK_DELAY_REG 0x14
+#define ROCEE_GLB_CFG_REG 0x18
+
+#define ROCEE_DMAE_USER_CFG1_REG 0x40
+#define ROCEE_DMAE_USER_CFG2_REG 0x44
+
+#define ROCEE_DB_SQ_WL_REG 0x154
+#define ROCEE_DB_OTHERS_WL_REG 0x158
+#define ROCEE_RAQ_WL_REG 0x15C
+#define ROCEE_WRMS_POL_TIME_INTERVAL_REG 0x160
+#define ROCEE_EXT_DB_SQ_REG 0x164
+#define ROCEE_EXT_DB_SQ_H_REG 0x168
+#define ROCEE_EXT_DB_OTH_REG 0x16C
+
+#define ROCEE_EXT_DB_OTH_H_REG 0x170
+#define ROCEE_EXT_DB_SQ_WL_EMPTY_REG 0x174
+#define ROCEE_EXT_DB_SQ_WL_REG 0x178
+#define ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG 0x17C
+#define ROCEE_EXT_DB_OTHERS_WL_REG 0x180
+#define ROCEE_EXT_RAQ_REG 0x184
+#define ROCEE_EXT_RAQ_H_REG 0x188
+
+#define ROCEE_CAEP_CE_INTERVAL_CFG_REG 0x190
+#define ROCEE_CAEP_CE_BURST_NUM_CFG_REG 0x194
+#define ROCEE_BT_CMD_L_REG 0x200
+
+#define ROCEE_MB1_REG 0x210
+#define ROCEE_MB6_REG 0x224
+#define ROCEE_DB_SQ_L_0_REG 0x230
+#define ROCEE_DB_OTHERS_L_0_REG 0x238
+#define ROCEE_QP1C_CFG0_0_REG 0x270
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_REG 0x3A0
+#define ROCEE_CAEP_CEQC_SHIFT_0_REG 0x3B0
+#define ROCEE_CAEP_CE_IRQ_MASK_0_REG 0x3C0
+#define ROCEE_CAEP_CEQ_ALM_OVF_0_REG 0x3C4
+#define ROCEE_CAEP_AE_MASK_REG 0x6C8
+#define ROCEE_CAEP_AE_ST_REG 0x6CC
+
+#define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850
+#define ROCEE_SCAEP_WR_CQE_CNT 0x8D0
+#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
+#define ROCEE_ECC_CERR_ALM0_REG 0xB40
+
+/* V2 ROCEE REG */
+#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
+#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
+#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
+#define ROCEE_TX_CMQ_PI_REG 0x07010
+#define ROCEE_TX_CMQ_CI_REG 0x07014
+
+#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
+#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
+#define ROCEE_RX_CMQ_DEPTH_REG 0x07020
+#define ROCEE_RX_CMQ_TAIL_REG 0x07024
+#define ROCEE_RX_CMQ_HEAD_REG 0x07028
+
+#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
+#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
+
+#define ROCEE_VF_ABN_INT_CFG_REG 0x13000
+#define ROCEE_VF_ABN_INT_ST_REG 0x13004
+#define ROCEE_VF_ABN_INT_EN_REG 0x13008
+#define ROCEE_VF_EVENT_INT_EN_REG 0x1300c
+
+#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
new file mode 100644
index 000000000000..3a5c93c9fb3e
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_common.h"
+
+static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
+{
+ u32 least_load = bank[0].inuse;
+ u8 bankid = 0;
+ u32 bankcnt;
+ u8 i;
+
+ for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ bankcnt = bank[i].inuse;
+ if (bankcnt < least_load) {
+ least_load = bankcnt;
+ bankid = i;
+ }
+ }
+
+ return bankid;
+}
+
+static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+ u8 bankid;
+ int id;
+
+ mutex_lock(&cq_table->bank_mutex);
+ bankid = get_least_load_bankid_for_cq(cq_table->bank);
+ bank = &cq_table->bank[bankid];
+
+ id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
+ if (id < 0) {
+ mutex_unlock(&cq_table->bank_mutex);
+ return id;
+ }
+
+ /* the lower 2 bits is bankid */
+ hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
+ bank->inuse++;
+ mutex_unlock(&cq_table->bank_mutex);
+
+ return 0;
+}
+
+static inline u8 get_cq_bankid(unsigned long cqn)
+{
+ /* The lower 2 bits of CQN are used to hash to different banks */
+ return (u8)(cqn & GENMASK(1, 0));
+}
+
+static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+
+ bank = &cq_table->bank[get_cq_bankid(cqn)];
+
+ ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
+
+ mutex_lock(&cq_table->bank_mutex);
+ bank->inuse--;
+ mutex_unlock(&cq_table->bank_mutex);
+}
+
+static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq,
+ u64 *mtts, dma_addr_t dma_handle)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
+ return PTR_ERR(mailbox);
+ }
+
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
+
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
+ hr_cq->cqn);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
+ hr_cq->cqn, ret);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 mtts[MTT_MIN_COUNT] = {};
+ int ret;
+
+ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
+ if (ret) {
+ ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Get CQC memory HEM(Hardware Entry Memory) table */
+ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
+ if (ret) {
+ ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
+ hr_cq->cqn, ret);
+ return ret;
+ }
+
+ ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+ if (ret) {
+ ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
+ goto err_put;
+ }
+
+ ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
+ hns_roce_get_mtr_ba(&hr_cq->mtr));
+ if (ret)
+ goto err_xa;
+
+ return 0;
+
+err_xa:
+ xa_erase_irq(&cq_table->array, hr_cq->cqn);
+err_put:
+ hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+
+ return ret;
+}
+
+static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
+ hr_cq->cqn);
+ if (ret)
+ dev_err_ratelimited(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n",
+ ret, hr_cq->cqn);
+
+ xa_erase_irq(&cq_table->array, hr_cq->cqn);
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (refcount_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
+
+ hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+}
+
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int ret;
+
+ buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
+ buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
+ buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
+ buf_attr.region_count = 1;
+
+ ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
+ hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
+ udata, addr);
+ if (ret)
+ ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+{
+ hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
+}
+
+static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr,
+ struct hns_roce_ib_create_cq_resp *resp)
+{
+ bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
+ struct hns_roce_ucontext *uctx;
+ int err;
+
+ if (udata) {
+ if (has_db &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
+ if (err)
+ return err;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ }
+ } else {
+ if (has_db) {
+ err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+ if (err)
+ return err;
+ hr_cq->set_ci_db = hr_cq->db.db_record;
+ *hr_cq->set_ci_db = 0;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ }
+ hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+ }
+
+ return 0;
+}
+
+static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx;
+
+ if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
+ return;
+
+ hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
+ if (udata) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+ hns_roce_db_unmap_user(uctx, &hr_cq->db);
+ } else {
+ hns_roce_free_db(hr_dev, &hr_cq->db);
+ }
+}
+
+static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
+ const struct ib_cq_init_attr *attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+
+ if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
+ ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
+ attr->cqe, hr_dev->caps.max_cqes);
+ return -EINVAL;
+ }
+
+ if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
+ ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
+ attr->comp_vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct ib_device *ibdev = hr_cq->ib_cq.device;
+ int ret;
+
+ ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
+ if (ret) {
+ ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+
+ spin_lock_init(&hr_cq->lock);
+ INIT_LIST_HEAD(&hr_cq->sq_list);
+ INIT_LIST_HEAD(&hr_cq->rq_list);
+}
+
+static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+
+ if (!udata) {
+ hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+ return 0;
+ }
+
+ if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
+ if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
+ ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid cqe size %u.\n", ucmd->cqe_size);
+ return -EINVAL;
+ }
+
+ hr_cq->cqe_size = ucmd->cqe_size;
+ } else {
+ hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
+ }
+
+ return 0;
+}
+
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct hns_roce_ib_create_cq_resp resp = {};
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_cq ucmd = {};
+ int ret;
+
+ if (attr->flags) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ ret = verify_cq_create_attr(hr_dev, attr);
+ if (ret)
+ goto err_out;
+
+ if (udata) {
+ ret = get_cq_ucmd(hr_cq, udata, &ucmd);
+ if (ret)
+ goto err_out;
+ }
+
+ set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
+
+ ret = set_cqe_size(hr_cq, udata, &ucmd);
+ if (ret)
+ goto err_out;
+
+ ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
+ goto err_out;
+ }
+
+ ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
+ goto err_cq_buf;
+ }
+
+ ret = alloc_cqn(hr_dev, hr_cq);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
+ goto err_cq_db;
+ }
+
+ ret = alloc_cqc(hr_dev, hr_cq);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to alloc CQ context, ret = %d.\n", ret);
+ goto err_cqn;
+ }
+
+ if (udata) {
+ resp.cqn = hr_cq->cqn;
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ goto err_cqc;
+ }
+
+ hr_cq->cons_index = 0;
+ hr_cq->arm_sn = 1;
+ refcount_set(&hr_cq->refcount, 1);
+ init_completion(&hr_cq->free);
+
+ return 0;
+
+err_cqc:
+ free_cqc(hr_dev, hr_cq);
+err_cqn:
+ free_cqn(hr_dev, hr_cq->cqn);
+err_cq_db:
+ free_cq_db(hr_dev, hr_cq, udata);
+err_cq_buf:
+ free_cq_buf(hr_dev, hr_cq);
+err_out:
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_CREATE_ERR_CNT]);
+
+ return ret;
+}
+
+int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+
+ free_cqc(hr_dev, hr_cq);
+ free_cqn(hr_dev, hr_cq->cqn);
+ free_cq_db(hr_dev, hr_cq, udata);
+ free_cq_buf(hr_dev, hr_cq);
+
+ return 0;
+}
+
+void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
+{
+ struct hns_roce_cq *hr_cq;
+ struct ib_cq *ibcq;
+
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
+ cqn);
+ return;
+ }
+
+ ++hr_cq->arm_sn;
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->comp_handler)
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
+}
+
+void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cq *hr_cq;
+ struct ib_event event;
+ struct ib_cq *ibcq;
+
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
+ event_type, cqn);
+ return;
+ }
+
+ xa_lock(&hr_dev->cq_table.array);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (hr_cq)
+ refcount_inc(&hr_cq->refcount);
+ xa_unlock(&hr_dev->cq_table.array);
+ if (!hr_cq) {
+ dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
+
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+
+ if (refcount_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+}
+
+void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ unsigned int reserved_from_bot;
+ unsigned int i;
+
+ mutex_init(&cq_table->bank_mutex);
+ xa_init(&cq_table->array);
+
+ reserved_from_bot = hr_dev->caps.reserved_cqs;
+
+ for (i = 0; i < reserved_from_bot; i++) {
+ cq_table->bank[get_cq_bankid(i)].inuse++;
+ cq_table->bank[get_cq_bankid(i)].min++;
+ }
+
+ for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ ida_init(&cq_table->bank[i].ida);
+ cq_table->bank[i].max = hr_dev->caps.num_cqs /
+ HNS_ROCE_CQ_BANK_NUM - 1;
+ }
+}
+
+void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
+{
+ int i;
+
+ for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
+ ida_destroy(&hr_dev->cq_table.bank[i].ida);
+ xa_destroy(&hr_dev->cq_table.array);
+ mutex_destroy(&hr_dev->cq_table.bank_mutex);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
new file mode 100644
index 000000000000..5c4c0480832b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2017 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
+
+int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
+ struct hns_roce_db *db)
+{
+ unsigned long page_addr = virt & PAGE_MASK;
+ struct hns_roce_user_db_page *page;
+ unsigned int offset;
+ int ret = 0;
+
+ mutex_lock(&context->page_mutex);
+
+ list_for_each_entry(page, &context->page_list, list)
+ if (page->user_virt == page_addr)
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ refcount_set(&page->refcount, 1);
+ page->user_virt = page_addr;
+ page->umem = ib_umem_get(context->ibucontext.device, page_addr,
+ PAGE_SIZE, 0);
+ if (IS_ERR(page->umem)) {
+ ret = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &context->page_list);
+
+found:
+ offset = virt - page_addr;
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
+ db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
+ db->u.user_page = page;
+ refcount_inc(&page->refcount);
+
+out:
+ mutex_unlock(&context->page_mutex);
+
+ return ret;
+}
+
+void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
+ struct hns_roce_db *db)
+{
+ mutex_lock(&context->page_mutex);
+
+ refcount_dec(&db->u.user_page->refcount);
+ if (refcount_dec_if_one(&db->u.user_page->refcount)) {
+ list_del(&db->u.user_page->list);
+ ib_umem_release(db->u.user_page->umem);
+ kfree(db->u.user_page);
+ }
+
+ mutex_unlock(&context->page_mutex);
+}
+
+static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
+ struct device *dma_device)
+{
+ struct hns_roce_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+ if (!pgdir)
+ return NULL;
+
+ bitmap_fill(pgdir->order1,
+ HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
+ pgdir->bits[0] = pgdir->order0;
+ pgdir->bits[1] = pgdir->order1;
+ pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
+ &pgdir->db_dma, GFP_KERNEL);
+ if (!pgdir->page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
+ struct hns_roce_db *db, int order)
+{
+ unsigned long o;
+ unsigned long i;
+
+ for (o = order; o <= 1; ++o) {
+ i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
+ if (i < HNS_ROCE_DB_PER_PAGE >> o)
+ goto found;
+ }
+
+ return -ENOMEM;
+
+found:
+ clear_bit(i, pgdir->bits[o]);
+
+ i <<= o;
+
+ if (o > order)
+ set_bit(i ^ 1, pgdir->bits[order]);
+
+ db->u.pgdir = pgdir;
+ db->index = i;
+ db->db_record = pgdir->page + db->index;
+ db->dma = pgdir->db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE;
+ db->order = order;
+
+ return 0;
+}
+
+int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
+ int order)
+{
+ struct hns_roce_db_pgdir *pgdir;
+ int ret = 0;
+
+ mutex_lock(&hr_dev->pgdir_mutex);
+
+ list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
+ if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
+ goto out;
+
+ pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &hr_dev->pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+ mutex_unlock(&hr_dev->pgdir_mutex);
+
+ return ret;
+}
+
+void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
+{
+ unsigned long o;
+ unsigned long i;
+
+ mutex_lock(&hr_dev->pgdir_mutex);
+
+ o = db->order;
+ i = db->index;
+
+ if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+ clear_bit(i ^ 1, db->u.pgdir->order0);
+ ++o;
+ }
+
+ i >>= o;
+ set_bit(i, db->u.pgdir->bits[o]);
+
+ if (bitmap_full(db->u.pgdir->order1,
+ HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
+ dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
+ db->u.pgdir->db_dma);
+ list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir);
+ }
+
+ mutex_unlock(&hr_dev->pgdir_mutex);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.c b/drivers/infiniband/hw/hns/hns_roce_debugfs.c
new file mode 100644
index 000000000000..b869cdc54118
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2023 Hisilicon Limited.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "hns_roce_device.h"
+
+static struct dentry *hns_roce_dbgfs_root;
+
+static int hns_debugfs_seqfile_open(struct inode *inode, struct file *f)
+{
+ struct hns_debugfs_seqfile *seqfile = inode->i_private;
+
+ return single_open(f, seqfile->read, seqfile->data);
+}
+
+static const struct file_operations hns_debugfs_seqfile_fops = {
+ .owner = THIS_MODULE,
+ .open = hns_debugfs_seqfile_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static void init_debugfs_seqfile(struct hns_debugfs_seqfile *seq,
+ const char *name, struct dentry *parent,
+ int (*read_fn)(struct seq_file *, void *),
+ void *data)
+{
+ debugfs_create_file(name, 0400, parent, seq, &hns_debugfs_seqfile_fops);
+
+ seq->read = read_fn;
+ seq->data = data;
+}
+
+static const char * const sw_stat_info[] = {
+ [HNS_ROCE_DFX_AEQE_CNT] = "aeqe",
+ [HNS_ROCE_DFX_CEQE_CNT] = "ceqe",
+ [HNS_ROCE_DFX_CMDS_CNT] = "cmds",
+ [HNS_ROCE_DFX_CMDS_ERR_CNT] = "cmds_err",
+ [HNS_ROCE_DFX_MBX_POSTED_CNT] = "posted_mbx",
+ [HNS_ROCE_DFX_MBX_POLLED_CNT] = "polled_mbx",
+ [HNS_ROCE_DFX_MBX_EVENT_CNT] = "mbx_event",
+ [HNS_ROCE_DFX_QP_CREATE_ERR_CNT] = "qp_create_err",
+ [HNS_ROCE_DFX_QP_MODIFY_ERR_CNT] = "qp_modify_err",
+ [HNS_ROCE_DFX_CQ_CREATE_ERR_CNT] = "cq_create_err",
+ [HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT] = "cq_modify_err",
+ [HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT] = "srq_create_err",
+ [HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT] = "srq_modify_err",
+ [HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT] = "xrcd_alloc_err",
+ [HNS_ROCE_DFX_MR_REG_ERR_CNT] = "mr_reg_err",
+ [HNS_ROCE_DFX_MR_REREG_ERR_CNT] = "mr_rereg_err",
+ [HNS_ROCE_DFX_AH_CREATE_ERR_CNT] = "ah_create_err",
+ [HNS_ROCE_DFX_MMAP_ERR_CNT] = "mmap_err",
+ [HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT] = "uctx_alloc_err",
+};
+
+static int sw_stat_debugfs_show(struct seq_file *file, void *offset)
+{
+ struct hns_roce_dev *hr_dev = file->private;
+ int i;
+
+ for (i = 0; i < HNS_ROCE_DFX_CNT_TOTAL; i++)
+ seq_printf(file, "%-20s --- %lld\n", sw_stat_info[i],
+ atomic64_read(&hr_dev->dfx_cnt[i]));
+
+ return 0;
+}
+
+static void create_sw_stat_debugfs(struct hns_roce_dev *hr_dev,
+ struct dentry *parent)
+{
+ struct hns_sw_stat_debugfs *dbgfs = &hr_dev->dbgfs.sw_stat_root;
+
+ dbgfs->root = debugfs_create_dir("sw_stat", parent);
+
+ init_debugfs_seqfile(&dbgfs->sw_stat, "sw_stat", dbgfs->root,
+ sw_stat_debugfs_show, hr_dev);
+}
+
+/* debugfs for device */
+void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_dev_debugfs *dbgfs = &hr_dev->dbgfs;
+
+ dbgfs->root = debugfs_create_dir(pci_name(hr_dev->pci_dev),
+ hns_roce_dbgfs_root);
+
+ create_sw_stat_debugfs(hr_dev, dbgfs->root);
+}
+
+void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev)
+{
+ debugfs_remove_recursive(hr_dev->dbgfs.root);
+}
+
+/* debugfs for hns module */
+void hns_roce_init_debugfs(void)
+{
+ hns_roce_dbgfs_root = debugfs_create_dir("hns_roce", NULL);
+}
+
+void hns_roce_cleanup_debugfs(void)
+{
+ debugfs_remove_recursive(hns_roce_dbgfs_root);
+ hns_roce_dbgfs_root = NULL;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.h b/drivers/infiniband/hw/hns/hns_roce_debugfs.h
new file mode 100644
index 000000000000..98e87bd3161e
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2023 Hisilicon Limited.
+ */
+
+#ifndef __HNS_ROCE_DEBUGFS_H
+#define __HNS_ROCE_DEBUGFS_H
+
+/* debugfs seqfile */
+struct hns_debugfs_seqfile {
+ int (*read)(struct seq_file *seq, void *data);
+ void *data;
+};
+
+struct hns_sw_stat_debugfs {
+ struct dentry *root;
+ struct hns_debugfs_seqfile sw_stat;
+};
+
+/* Debugfs for device */
+struct hns_roce_dev_debugfs {
+ struct dentry *root;
+ struct hns_sw_stat_debugfs sw_stat_root;
+};
+
+struct hns_roce_dev;
+
+void hns_roce_init_debugfs(void);
+void hns_roce_cleanup_debugfs(void);
+void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev);
+void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev);
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
new file mode 100644
index 000000000000..78ee04a48a74
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -0,0 +1,1309 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_DEVICE_H
+#define _HNS_ROCE_DEVICE_H
+
+#include <rdma/ib_verbs.h>
+#include <rdma/hns-abi.h>
+#include "hns_roce_debugfs.h"
+
+#define PCI_REVISION_ID_HIP08 0x21
+#define PCI_REVISION_ID_HIP09 0x30
+
+#define HNS_ROCE_MAX_MSG_LEN 0x80000000
+
+#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
+
+#define BA_BYTE_LEN 8
+
+#define HNS_ROCE_MIN_CQE_NUM 0x40
+#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
+
+#define HNS_ROCE_MAX_IRQ_NUM 128
+
+#define HNS_ROCE_SGE_IN_WQE 2
+#define HNS_ROCE_SGE_SHIFT 4
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+
+#define HNS_ROCE_CEQ 0
+#define HNS_ROCE_AEQ 1
+
+#define HNS_ROCE_CEQE_SIZE 0x4
+#define HNS_ROCE_AEQE_SIZE 0x10
+
+#define HNS_ROCE_V3_EQE_SIZE 0x40
+
+#define HNS_ROCE_V2_CQE_SIZE 32
+#define HNS_ROCE_V3_CQE_SIZE 64
+
+#define HNS_ROCE_V2_QPC_SZ 256
+#define HNS_ROCE_V3_QPC_SZ 512
+
+#define HNS_ROCE_MAX_PORTS 6
+#define HNS_ROCE_GID_SIZE 16
+#define HNS_ROCE_SGE_SIZE 16
+#define HNS_ROCE_DWQE_SIZE 65536
+
+#define HNS_ROCE_HOP_NUM_0 0xff
+
+#define MR_TYPE_MR 0x00
+#define MR_TYPE_FRMR 0x01
+#define MR_TYPE_DMA 0x03
+
+#define HNS_ROCE_FRMR_MAX_PA 512
+#define HNS_ROCE_FRMR_ALIGN_SIZE 128
+
+#define PKEY_ID 0xffff
+#define NODE_DESC_SIZE 64
+#define DB_REG_OFFSET 0x1000
+
+/* Configure to HW for PAGE_SIZE larger than 4KB */
+#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
+
+#define ATOMIC_WR_LEN 8
+
+#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
+#define SRQ_DB_REG 0x230
+
+#define HNS_ROCE_QP_BANK_NUM 8
+#define HNS_ROCE_CQ_BANK_NUM 4
+
+#define CQ_BANKID_SHIFT 2
+#define CQ_BANKID_MASK GENMASK(1, 0)
+
+#define HNS_ROCE_MAX_CQ_COUNT 0xFFFF
+#define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF
+
+enum {
+ SERV_TYPE_RC,
+ SERV_TYPE_UC,
+ SERV_TYPE_RD,
+ SERV_TYPE_UD,
+ SERV_TYPE_XRC = 5,
+};
+
+enum hns_roce_event {
+ HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
+ HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
+ HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
+ HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
+ HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
+ HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
+ HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
+ HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
+ HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
+ HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
+ HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
+ HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
+ HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
+ HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
+ /* 0x10 and 0x11 is unused in currently application case */
+ HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
+ HNS_ROCE_EVENT_TYPE_MB = 0x13,
+ HNS_ROCE_EVENT_TYPE_FLR = 0x15,
+ HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16,
+ HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17,
+};
+
+enum {
+ HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+ HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
+ HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
+ HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
+ HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
+ HNS_ROCE_CAP_FLAG_XRC = BIT(6),
+ HNS_ROCE_CAP_FLAG_MW = BIT(7),
+ HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
+ HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
+ HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
+ HNS_ROCE_CAP_FLAG_STASH = BIT(17),
+ HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
+ HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22),
+};
+
+#define HNS_ROCE_DB_TYPE_COUNT 2
+#define HNS_ROCE_DB_UNIT_SIZE 4
+
+enum {
+ HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
+};
+
+enum hns_roce_reset_stage {
+ HNS_ROCE_STATE_NON_RST,
+ HNS_ROCE_STATE_RST_BEF_DOWN,
+ HNS_ROCE_STATE_RST_DOWN,
+ HNS_ROCE_STATE_RST_UNINIT,
+ HNS_ROCE_STATE_RST_INIT,
+ HNS_ROCE_STATE_RST_INITED,
+};
+
+enum hns_roce_instance_state {
+ HNS_ROCE_STATE_NON_INIT,
+ HNS_ROCE_STATE_INIT,
+ HNS_ROCE_STATE_INITED,
+ HNS_ROCE_STATE_UNINIT,
+};
+
+enum {
+ HNS_ROCE_RST_DIRECT_RETURN = 0,
+};
+
+#define HNS_ROCE_CMD_SUCCESS 1
+
+#define HNS_ROCE_MAX_HOP_NUM 3
+/* The minimum page size is 4K for hardware */
+#define HNS_HW_PAGE_SHIFT 12
+#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
+
+#define HNS_HW_MAX_PAGE_SHIFT 27
+#define HNS_HW_MAX_PAGE_SIZE (1 << HNS_HW_MAX_PAGE_SHIFT)
+
+struct hns_roce_uar {
+ u64 pfn;
+ unsigned long index;
+ unsigned long logic_idx;
+};
+
+enum hns_roce_mmap_type {
+ HNS_ROCE_MMAP_TYPE_DB = 1,
+ HNS_ROCE_MMAP_TYPE_DWQE,
+};
+
+struct hns_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ enum hns_roce_mmap_type mmap_type;
+ u64 address;
+};
+
+struct hns_roce_ucontext {
+ struct ib_ucontext ibucontext;
+ struct hns_roce_uar uar;
+ struct list_head page_list;
+ struct mutex page_mutex;
+ struct hns_user_mmap_entry *db_mmap_entry;
+ u32 config;
+};
+
+struct hns_roce_pd {
+ struct ib_pd ibpd;
+ unsigned long pdn;
+};
+
+struct hns_roce_xrcd {
+ struct ib_xrcd ibxrcd;
+ u32 xrcdn;
+};
+
+struct hns_roce_bitmap {
+ /* Bitmap Traversal last a bit which is 1 */
+ unsigned long last;
+ unsigned long top;
+ unsigned long max;
+ unsigned long reserved_top;
+ unsigned long mask;
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+struct hns_roce_ida {
+ struct ida ida;
+ u32 min; /* Lowest ID to allocate. */
+ u32 max; /* Highest ID to allocate. */
+};
+
+/* For Hardware Entry Memory */
+struct hns_roce_hem_table {
+ /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
+ u32 type;
+ /* HEM array elment num */
+ unsigned long num_hem;
+ /* Single obj size */
+ unsigned long obj_size;
+ unsigned long table_chunk_size;
+ struct mutex mutex;
+ struct hns_roce_hem **hem;
+ u64 **bt_l1;
+ dma_addr_t *bt_l1_dma_addr;
+ u64 **bt_l0;
+ dma_addr_t *bt_l0_dma_addr;
+};
+
+struct hns_roce_buf_region {
+ u32 offset; /* page offset */
+ u32 count; /* page count */
+ int hopnum; /* addressing hop num */
+};
+
+#define HNS_ROCE_MAX_BT_REGION 3
+#define HNS_ROCE_MAX_BT_LEVEL 3
+struct hns_roce_hem_list {
+ struct list_head root_bt;
+ /* link all bt dma mem by hop config */
+ struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
+ struct list_head btm_bt; /* link all bottom bt in @mid_bt */
+ dma_addr_t root_ba; /* pointer to the root ba table */
+};
+
+enum mtr_type {
+ MTR_DEFAULT = 0,
+ MTR_PBL,
+};
+
+struct hns_roce_buf_attr {
+ struct {
+ size_t size; /* region size */
+ int hopnum; /* multi-hop addressing hop num */
+ } region[HNS_ROCE_MAX_BT_REGION];
+ unsigned int region_count; /* valid region count */
+ unsigned int page_shift; /* buffer page shift */
+ unsigned int user_access; /* umem access flag */
+ u64 iova;
+ enum mtr_type type;
+ bool mtt_only; /* only alloc buffer-required MTT memory */
+ bool adaptive; /* adaptive for page_shift and hopnum */
+};
+
+struct hns_roce_hem_cfg {
+ dma_addr_t root_ba; /* root BA table's address */
+ bool is_direct; /* addressing without BA table */
+ unsigned int ba_pg_shift; /* BA table page shift */
+ unsigned int buf_pg_shift; /* buffer page shift */
+ unsigned int buf_pg_count; /* buffer page count */
+ struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
+ unsigned int region_count;
+};
+
+/* memory translate region */
+struct hns_roce_mtr {
+ struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
+ struct ib_umem *umem; /* user space buffer */
+ struct hns_roce_buf *kmem; /* kernel space buffer */
+ struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
+};
+
+struct hns_roce_mr {
+ struct ib_mr ibmr;
+ u64 iova; /* MR's virtual original addr */
+ u64 size; /* Address range of MR */
+ u32 key; /* Key of MR */
+ u32 pd; /* PD num of MR */
+ u32 access; /* Access permission of MR */
+ int enabled; /* MR's active status */
+ int type; /* MR's register type */
+ u32 pbl_hop_num; /* multi-hop number */
+ struct hns_roce_mtr pbl_mtr;
+ u32 npages;
+ dma_addr_t *page_list;
+};
+
+struct hns_roce_mr_table {
+ struct hns_roce_ida mtpt_ida;
+ struct hns_roce_hem_table mtpt_table;
+};
+
+struct hns_roce_wq {
+ u64 *wrid; /* Work request ID */
+ spinlock_t lock;
+ u32 wqe_cnt; /* WQE num */
+ u32 max_gs;
+ u32 rsv_sge;
+ u32 offset;
+ u32 wqe_shift; /* WQE size */
+ u32 head;
+ u32 tail;
+ void __iomem *db_reg;
+ u32 ext_sge_cnt;
+};
+
+struct hns_roce_sge {
+ unsigned int sge_cnt; /* SGE num */
+ u32 offset;
+ u32 sge_shift; /* SGE size */
+};
+
+struct hns_roce_buf_list {
+ void *buf;
+ dma_addr_t map;
+};
+
+/*
+ * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
+ * dma address range.
+ *
+ * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
+ *
+ * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
+ * the allocated size is smaller than the required size.
+ */
+enum {
+ HNS_ROCE_BUF_DIRECT = BIT(0),
+ HNS_ROCE_BUF_NOSLEEP = BIT(1),
+ HNS_ROCE_BUF_NOFAIL = BIT(2),
+};
+
+struct hns_roce_buf {
+ struct hns_roce_buf_list *trunk_list;
+ u32 ntrunks;
+ u32 npages;
+ unsigned int trunk_shift;
+ unsigned int page_shift;
+};
+
+struct hns_roce_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
+ DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
+ unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT];
+ u32 *page;
+ dma_addr_t db_dma;
+};
+
+struct hns_roce_user_db_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ unsigned long user_virt;
+ refcount_t refcount;
+};
+
+struct hns_roce_db {
+ u32 *db_record;
+ union {
+ struct hns_roce_db_pgdir *pgdir;
+ struct hns_roce_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ void *virt_addr;
+ unsigned long index;
+ unsigned long order;
+};
+
+struct hns_roce_cq {
+ struct ib_cq ib_cq;
+ struct hns_roce_mtr mtr;
+ struct hns_roce_db db;
+ u32 flags;
+ spinlock_t lock;
+ u32 cq_depth;
+ u32 cons_index;
+ u32 *set_ci_db;
+ void __iomem *db_reg;
+ int arm_sn;
+ int cqe_size;
+ unsigned long cqn;
+ u32 vector;
+ refcount_t refcount;
+ struct completion free;
+ struct list_head sq_list; /* all qps on this send cq */
+ struct list_head rq_list; /* all qps on this recv cq */
+ int is_armed; /* cq is armed */
+ struct list_head node; /* all armed cqs are on a list */
+};
+
+struct hns_roce_idx_que {
+ struct hns_roce_mtr mtr;
+ u32 entry_shift;
+ unsigned long *bitmap;
+ u32 head;
+ u32 tail;
+};
+
+struct hns_roce_srq {
+ struct ib_srq ibsrq;
+ unsigned long srqn;
+ u32 wqe_cnt;
+ int max_gs;
+ u32 rsv_sge;
+ u32 wqe_shift;
+ u32 cqn;
+ u32 xrcdn;
+ void __iomem *db_reg;
+
+ refcount_t refcount;
+ struct completion free;
+
+ struct hns_roce_mtr buf_mtr;
+
+ u64 *wrid;
+ struct hns_roce_idx_que idx_que;
+ spinlock_t lock;
+ struct mutex mutex;
+ void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
+ struct hns_roce_db rdb;
+ u32 cap_flags;
+};
+
+struct hns_roce_uar_table {
+ struct hns_roce_bitmap bitmap;
+};
+
+struct hns_roce_bank {
+ struct ida ida;
+ u32 inuse; /* Number of IDs allocated */
+ u32 min; /* Lowest ID to allocate. */
+ u32 max; /* Highest ID to allocate. */
+ u32 next; /* Next ID to allocate. */
+};
+
+struct hns_roce_qp_table {
+ struct hns_roce_hem_table qp_table;
+ struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
+ struct hns_roce_hem_table sccc_table;
+ struct mutex scc_mutex;
+ struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
+ struct mutex bank_mutex;
+ struct xarray dip_xa;
+};
+
+struct hns_roce_cq_table {
+ struct xarray array;
+ struct hns_roce_hem_table table;
+ struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
+ struct mutex bank_mutex;
+};
+
+struct hns_roce_srq_table {
+ struct hns_roce_ida srq_ida;
+ struct xarray xa;
+ struct hns_roce_hem_table table;
+};
+
+struct hns_roce_av {
+ u8 port;
+ u8 gid_index;
+ u8 stat_rate;
+ u8 hop_limit;
+ u32 flowlabel;
+ u16 udp_sport;
+ u8 sl;
+ u8 tclass;
+ u8 dgid[HNS_ROCE_GID_SIZE];
+ u8 mac[ETH_ALEN];
+ u16 vlan_id;
+ u8 vlan_en;
+};
+
+struct hns_roce_ah {
+ struct ib_ah ibah;
+ struct hns_roce_av av;
+};
+
+struct hns_roce_cmd_context {
+ struct completion done;
+ int result;
+ int next;
+ u64 out_param;
+ u16 token;
+ u16 busy;
+};
+
+enum hns_roce_cmdq_state {
+ HNS_ROCE_CMDQ_STATE_NORMAL,
+ HNS_ROCE_CMDQ_STATE_FATAL_ERR,
+};
+
+struct hns_roce_cmdq {
+ struct dma_pool *pool;
+ struct semaphore poll_sem;
+ /*
+ * Event mode: cmd register mutex protection,
+ * ensure to not exceed max_cmds and user use limit region
+ */
+ struct semaphore event_sem;
+ int max_cmds;
+ spinlock_t context_lock;
+ int free_head;
+ struct hns_roce_cmd_context *context;
+ /*
+ * Process whether use event mode, init default non-zero
+ * After the event queue of cmd event ready,
+ * can switch into event mode
+ * close device, switch into poll mode(non event mode)
+ */
+ u8 use_events;
+ enum hns_roce_cmdq_state state;
+};
+
+struct hns_roce_cmd_mailbox {
+ void *buf;
+ dma_addr_t dma;
+};
+
+struct hns_roce_mbox_msg {
+ u64 in_param;
+ u64 out_param;
+ u8 cmd;
+ u32 tag;
+ u16 token;
+ u8 event_en;
+};
+
+struct hns_roce_dev;
+
+enum {
+ HNS_ROCE_FLUSH_FLAG = 0,
+ HNS_ROCE_STOP_FLUSH_FLAG = 1,
+};
+
+struct hns_roce_work {
+ struct hns_roce_dev *hr_dev;
+ struct work_struct work;
+ int event_type;
+ int sub_type;
+ u32 queue_num;
+};
+
+enum hns_roce_cong_type {
+ CONG_TYPE_DCQCN,
+ CONG_TYPE_LDCP,
+ CONG_TYPE_HC3,
+ CONG_TYPE_DIP,
+};
+
+struct hns_roce_qp {
+ struct ib_qp ibqp;
+ struct hns_roce_wq rq;
+ struct hns_roce_db rdb;
+ struct hns_roce_db sdb;
+ unsigned long en_flags;
+ enum ib_sig_type sq_signal_bits;
+ struct hns_roce_wq sq;
+
+ struct hns_roce_mtr mtr;
+
+ u32 buff_size;
+ struct mutex mutex;
+ u8 port;
+ u8 phy_port;
+ u8 sl;
+ u8 resp_depth;
+ u8 state;
+ u32 atomic_rd_en;
+ u32 qkey;
+ void (*event)(struct hns_roce_qp *qp,
+ enum hns_roce_event event_type);
+ unsigned long qpn;
+
+ u32 xrcdn;
+
+ refcount_t refcount;
+ struct completion free;
+
+ struct hns_roce_sge sge;
+ u32 next_sge;
+ enum ib_mtu path_mtu;
+ u32 max_inline_data;
+ u8 free_mr_en;
+
+ /* 0: flush needed, 1: unneeded */
+ unsigned long flush_flag;
+ struct hns_roce_work flush_work;
+ struct list_head node; /* all qps are on a list */
+ struct list_head rq_node; /* all recv qps are on a list */
+ struct list_head sq_node; /* all send qps are on a list */
+ struct hns_user_mmap_entry *dwqe_mmap_entry;
+ u32 config;
+ enum hns_roce_cong_type cong_type;
+ u8 tc_mode;
+ u8 priority;
+ spinlock_t flush_lock;
+ struct hns_roce_dip *dip;
+};
+
+struct hns_roce_ib_iboe {
+ spinlock_t lock;
+ struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
+ struct notifier_block nb;
+ u8 phy_port[HNS_ROCE_MAX_PORTS];
+};
+
+struct hns_roce_ceqe {
+ __le32 comp;
+ __le32 rsv[15];
+};
+
+#define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l)
+
+#define CEQE_CQN CEQE_FIELD_LOC(23, 0)
+#define CEQE_OWNER CEQE_FIELD_LOC(31, 31)
+
+struct hns_roce_aeqe {
+ __le32 asyn;
+ union {
+ struct {
+ __le32 num;
+ u32 rsv0;
+ u32 rsv1;
+ } queue_event;
+
+ struct {
+ __le64 out_param;
+ __le16 token;
+ u8 status;
+ u8 rsv0;
+ } __packed cmd;
+ } event;
+ __le32 rsv[12];
+};
+
+#define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l)
+
+#define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0)
+#define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8)
+#define AEQE_OWNER AEQE_FIELD_LOC(31, 31)
+#define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32)
+
+struct hns_roce_eq {
+ struct hns_roce_dev *hr_dev;
+ void __iomem *db_reg;
+
+ int type_flag; /* Aeq:1 ceq:0 */
+ int eqn;
+ u32 entries;
+ int eqe_size;
+ int irq;
+ u32 cons_index;
+ int over_ignore;
+ int coalesce;
+ int arm_st;
+ int hop_num;
+ struct hns_roce_mtr mtr;
+ u16 eq_max_cnt;
+ u32 eq_period;
+ int shift;
+ int event_type;
+ int sub_type;
+ struct work_struct work;
+};
+
+struct hns_roce_eq_table {
+ struct hns_roce_eq *eq;
+};
+
+struct hns_roce_caps {
+ u64 fw_ver;
+ u8 num_ports;
+ int gid_table_len[HNS_ROCE_MAX_PORTS];
+ int pkey_table_len[HNS_ROCE_MAX_PORTS];
+ int local_ca_ack_delay;
+ int num_uars;
+ u32 phy_num_uars;
+ u32 max_sq_sg;
+ u32 max_sq_inline;
+ u32 max_rq_sg;
+ u32 rsv0;
+ u32 num_qps;
+ u32 reserved_qps;
+ u32 num_srqs;
+ u32 max_wqes;
+ u32 max_srq_wrs;
+ u32 max_srq_sges;
+ u32 max_sq_desc_sz;
+ u32 max_rq_desc_sz;
+ u32 rsv2;
+ int max_qp_init_rdma;
+ int max_qp_dest_rdma;
+ u32 num_cqs;
+ u32 max_cqes;
+ u32 min_cqes;
+ u32 min_wqes;
+ u32 reserved_cqs;
+ u32 reserved_srqs;
+ int num_aeq_vectors;
+ int num_comp_vectors;
+ int num_other_vectors;
+ u32 num_mtpts;
+ u32 rsv1;
+ u32 num_srqwqe_segs;
+ u32 num_idx_segs;
+ int reserved_mrws;
+ int reserved_uars;
+ int num_pds;
+ int reserved_pds;
+ u32 num_xrcds;
+ u32 reserved_xrcds;
+ u32 mtt_entry_sz;
+ u32 cqe_sz;
+ u32 page_size_cap;
+ u32 reserved_lkey;
+ int mtpt_entry_sz;
+ int qpc_sz;
+ int irrl_entry_sz;
+ int trrl_entry_sz;
+ int cqc_entry_sz;
+ int sccc_sz;
+ int qpc_timer_entry_sz;
+ int cqc_timer_entry_sz;
+ int srqc_entry_sz;
+ int idx_entry_sz;
+ u32 pbl_ba_pg_sz;
+ u32 pbl_buf_pg_sz;
+ u32 pbl_hop_num;
+ int aeqe_depth;
+ int ceqe_depth;
+ u32 aeqe_size;
+ u32 ceqe_size;
+ enum ib_mtu max_mtu;
+ u32 qpc_bt_num;
+ u32 qpc_timer_bt_num;
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
+ u32 cqc_timer_bt_num;
+ u32 mpt_bt_num;
+ u32 eqc_bt_num;
+ u32 smac_bt_num;
+ u32 sgid_bt_num;
+ u32 sccc_bt_num;
+ u32 gmv_bt_num;
+ u32 qpc_ba_pg_sz;
+ u32 qpc_buf_pg_sz;
+ u32 qpc_hop_num;
+ u32 srqc_ba_pg_sz;
+ u32 srqc_buf_pg_sz;
+ u32 srqc_hop_num;
+ u32 cqc_ba_pg_sz;
+ u32 cqc_buf_pg_sz;
+ u32 cqc_hop_num;
+ u32 mpt_ba_pg_sz;
+ u32 mpt_buf_pg_sz;
+ u32 mpt_hop_num;
+ u32 mtt_ba_pg_sz;
+ u32 mtt_buf_pg_sz;
+ u32 mtt_hop_num;
+ u32 wqe_sq_hop_num;
+ u32 wqe_sge_hop_num;
+ u32 wqe_rq_hop_num;
+ u32 sccc_ba_pg_sz;
+ u32 sccc_buf_pg_sz;
+ u32 sccc_hop_num;
+ u32 qpc_timer_ba_pg_sz;
+ u32 qpc_timer_buf_pg_sz;
+ u32 qpc_timer_hop_num;
+ u32 cqc_timer_ba_pg_sz;
+ u32 cqc_timer_buf_pg_sz;
+ u32 cqc_timer_hop_num;
+ u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+ u32 srqwqe_ba_pg_sz;
+ u32 srqwqe_buf_pg_sz;
+ u32 srqwqe_hop_num;
+ u32 idx_ba_pg_sz;
+ u32 idx_buf_pg_sz;
+ u32 idx_hop_num;
+ u32 eqe_ba_pg_sz;
+ u32 eqe_buf_pg_sz;
+ u32 eqe_hop_num;
+ u32 gmv_entry_num;
+ u32 gmv_entry_sz;
+ u32 gmv_ba_pg_sz;
+ u32 gmv_buf_pg_sz;
+ u32 gmv_hop_num;
+ u32 sl_num;
+ u32 llm_buf_pg_sz;
+ u32 chunk_sz; /* chunk size in non multihop mode */
+ u64 flags;
+ u16 default_ceq_max_cnt;
+ u16 default_ceq_period;
+ u16 default_aeq_max_cnt;
+ u16 default_aeq_period;
+ u16 default_aeq_arm_st;
+ u16 default_ceq_arm_st;
+ u8 cong_cap;
+ enum hns_roce_cong_type default_cong_type;
+ u32 max_ack_req_msg_len;
+};
+
+enum hns_roce_device_state {
+ HNS_ROCE_DEVICE_STATE_INITED,
+ HNS_ROCE_DEVICE_STATE_RST_DOWN,
+ HNS_ROCE_DEVICE_STATE_UNINIT,
+};
+
+enum hns_roce_hw_pkt_stat_index {
+ HNS_ROCE_HW_RX_RC_PKT_CNT,
+ HNS_ROCE_HW_RX_UC_PKT_CNT,
+ HNS_ROCE_HW_RX_UD_PKT_CNT,
+ HNS_ROCE_HW_RX_XRC_PKT_CNT,
+ HNS_ROCE_HW_RX_PKT_CNT,
+ HNS_ROCE_HW_RX_ERR_PKT_CNT,
+ HNS_ROCE_HW_RX_CNP_PKT_CNT,
+ HNS_ROCE_HW_TX_RC_PKT_CNT,
+ HNS_ROCE_HW_TX_UC_PKT_CNT,
+ HNS_ROCE_HW_TX_UD_PKT_CNT,
+ HNS_ROCE_HW_TX_XRC_PKT_CNT,
+ HNS_ROCE_HW_TX_PKT_CNT,
+ HNS_ROCE_HW_TX_ERR_PKT_CNT,
+ HNS_ROCE_HW_TX_CNP_PKT_CNT,
+ HNS_ROCE_HW_TRP_GET_MPT_ERR_PKT_CNT,
+ HNS_ROCE_HW_TRP_GET_IRRL_ERR_PKT_CNT,
+ HNS_ROCE_HW_ECN_DB_CNT,
+ HNS_ROCE_HW_RX_BUF_CNT,
+ HNS_ROCE_HW_TRP_RX_SOF_CNT,
+ HNS_ROCE_HW_CQ_CQE_CNT,
+ HNS_ROCE_HW_CQ_POE_CNT,
+ HNS_ROCE_HW_CQ_NOTIFY_CNT,
+ HNS_ROCE_HW_CNT_TOTAL
+};
+
+enum hns_roce_sw_dfx_stat_index {
+ HNS_ROCE_DFX_AEQE_CNT,
+ HNS_ROCE_DFX_CEQE_CNT,
+ HNS_ROCE_DFX_CMDS_CNT,
+ HNS_ROCE_DFX_CMDS_ERR_CNT,
+ HNS_ROCE_DFX_MBX_POSTED_CNT,
+ HNS_ROCE_DFX_MBX_POLLED_CNT,
+ HNS_ROCE_DFX_MBX_EVENT_CNT,
+ HNS_ROCE_DFX_QP_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_QP_MODIFY_ERR_CNT,
+ HNS_ROCE_DFX_CQ_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT,
+ HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT,
+ HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT,
+ HNS_ROCE_DFX_MR_REG_ERR_CNT,
+ HNS_ROCE_DFX_MR_REREG_ERR_CNT,
+ HNS_ROCE_DFX_AH_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_MMAP_ERR_CNT,
+ HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT,
+ HNS_ROCE_DFX_CNT_TOTAL
+};
+
+struct hns_roce_hw {
+ int (*cmq_init)(struct hns_roce_dev *hr_dev);
+ void (*cmq_exit)(struct hns_roce_dev *hr_dev);
+ int (*hw_profile)(struct hns_roce_dev *hr_dev);
+ int (*hw_init)(struct hns_roce_dev *hr_dev);
+ void (*hw_exit)(struct hns_roce_dev *hr_dev);
+ int (*post_mbox)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg);
+ int (*poll_mbox_done)(struct hns_roce_dev *hr_dev);
+ bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
+ const union ib_gid *gid, const struct ib_gid_attr *attr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
+ const u8 *addr);
+ int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr);
+ int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int flags,
+ void *mb_buf);
+ int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ void (*write_cqc)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
+ dma_addr_t dma_handle);
+ int (*set_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj, u32 step_idx);
+ int (*clear_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ u32 step_idx);
+ int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state, struct ib_udata *udata);
+ int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp);
+ void (*dereg_mr)(struct hns_roce_dev *hr_dev);
+ int (*init_eq)(struct hns_roce_dev *hr_dev);
+ void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
+ int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
+ int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
+ int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
+ int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
+ int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer);
+ int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
+ int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
+ u64 *stats, u32 port, int *hw_counters);
+ int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
+ u8 *tc_mode, u8 *priority);
+ const struct ib_device_ops *hns_roce_dev_ops;
+ const struct ib_device_ops *hns_roce_dev_srq_ops;
+};
+
+struct hns_roce_dev {
+ struct ib_device ib_dev;
+ struct pci_dev *pci_dev;
+ struct device *dev;
+ struct hns_roce_uar priv_uar;
+ const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
+ spinlock_t sm_lock;
+ bool active;
+ bool is_reset;
+ bool dis_db;
+ unsigned long reset_cnt;
+ struct hns_roce_ib_iboe iboe;
+ enum hns_roce_device_state state;
+ struct list_head qp_list; /* list of all qps on this dev */
+ spinlock_t qp_list_lock; /* protect qp_list */
+
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
+ int irq[HNS_ROCE_MAX_IRQ_NUM];
+ u8 __iomem *reg_base;
+ void __iomem *mem_base;
+ struct hns_roce_caps caps;
+ struct xarray qp_table_xa;
+
+ unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
+ u64 sys_image_guid;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_rev;
+ void __iomem *priv_addr;
+
+ struct hns_roce_cmdq cmd;
+ struct hns_roce_ida pd_ida;
+ struct hns_roce_ida xrcd_ida;
+ struct hns_roce_ida uar_ida;
+ struct hns_roce_mr_table mr_table;
+ struct hns_roce_cq_table cq_table;
+ struct hns_roce_srq_table srq_table;
+ struct hns_roce_qp_table qp_table;
+ struct hns_roce_eq_table eq_table;
+ struct hns_roce_hem_table qpc_timer_table;
+ struct hns_roce_hem_table cqc_timer_table;
+ /* GMV is the memory area that the driver allocates for the hardware
+ * to store SGID, SMAC and VLAN information.
+ */
+ struct hns_roce_hem_table gmv_table;
+
+ int cmd_mod;
+ int loop_idc;
+ u32 sdb_offset;
+ u32 odb_offset;
+ const struct hns_roce_hw *hw;
+ void *priv;
+ struct workqueue_struct *irq_workq;
+ struct work_struct ecc_work;
+ u32 func_num;
+ u32 is_vf;
+ u32 cong_algo_tmpl_id;
+ u64 dwqe_page;
+ struct hns_roce_dev_debugfs dbgfs;
+ atomic64_t *dfx_cnt;
+};
+
+enum hns_roce_trace_type {
+ TRACE_SQ,
+ TRACE_RQ,
+ TRACE_SRQ,
+};
+
+static inline const char *trace_type_to_str(enum hns_roce_trace_type type)
+{
+ switch (type) {
+ case TRACE_SQ:
+ return "SQ";
+ case TRACE_RQ:
+ return "RQ";
+ case TRACE_SRQ:
+ return "SRQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
+{
+ return container_of(ib_dev, struct hns_roce_dev, ib_dev);
+}
+
+static inline struct hns_roce_ucontext
+ *to_hr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
+}
+
+static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct hns_roce_pd, ibpd);
+}
+
+static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
+}
+
+static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct hns_roce_ah, ibah);
+}
+
+static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct hns_roce_mr, ibmr);
+}
+
+static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct hns_roce_qp, ibqp);
+}
+
+static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
+{
+ return container_of(ib_cq, struct hns_roce_cq, ib_cq);
+}
+
+static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct hns_roce_srq, ibsrq);
+}
+
+static inline struct hns_user_mmap_entry *
+to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
+}
+
+static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
+{
+ writeq(*(u64 *)val, dest);
+}
+
+static inline struct hns_roce_qp
+ *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
+{
+ return xa_load(&hr_dev->qp_table_xa, qpn);
+}
+
+static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
+ unsigned int offset)
+{
+ return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
+ (offset & ((1 << buf->trunk_shift) - 1));
+}
+
+static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
+ unsigned int offset)
+{
+ return buf->trunk_list[offset >> buf->trunk_shift].map +
+ (offset & ((1 << buf->trunk_shift) - 1));
+}
+
+static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
+{
+ return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
+}
+
+#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
+
+static inline u64 to_hr_hw_page_addr(u64 addr)
+{
+ return addr >> HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hw_page_shift(u32 page_shift)
+{
+ return page_shift - HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
+{
+ if (count > 0)
+ return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
+
+ return 0;
+}
+
+static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift);
+}
+
+static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift) >> buf_shift;
+}
+
+static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
+{
+ if (!count)
+ return 0;
+
+ return ilog2(to_hr_hem_entries_count(count, buf_shift));
+}
+
+#define DSCP_SHIFT 2
+
+static inline u8 get_tclass(const struct ib_global_route *grh)
+{
+ return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
+ grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
+}
+
+void hns_roce_init_uar_table(struct hns_roce_dev *dev);
+int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
+
+int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
+ u64 out_param);
+int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
+
+/* hns roce hw need current block and next block addr from mtt */
+#define MTT_MIN_COUNT 2
+static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
+{
+ return mtr->hem_cfg.root_ba;
+}
+
+int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ u32 offset, u64 *mtt_buf, int mtt_max);
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+ unsigned long user_addr);
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr);
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, unsigned int page_cnt);
+
+void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
+
+void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
+
+void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
+
+int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return 0;
+}
+
+int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+
+struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+unsigned long key_to_hw_index(u32 key);
+
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
+struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
+ u32 page_shift, u32 flags);
+
+int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+ int buf_cnt, struct hns_roce_buf *buf,
+ unsigned int page_shift);
+int hns_roce_get_umem_bufs(dma_addr_t *bufs,
+ int buf_cnt, struct ib_umem *umem,
+ unsigned int page_shift);
+
+int hns_roce_create_srq(struct ib_srq *srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+
+int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
+int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
+
+int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
+ struct ib_cq *ib_cq);
+void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq);
+void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq);
+void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata);
+__be32 send_ieth(const struct ib_send_wr *wr);
+int to_hr_qp_type(int qp_type);
+
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+
+int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
+ struct hns_roce_db *db);
+void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
+ struct hns_roce_db *db);
+int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
+ int order);
+void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
+
+void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
+void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
+void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
+void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn);
+void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
+void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
+int hns_roce_init(struct hns_roce_dev *hr_dev);
+void hns_roce_exit(struct hns_roce_dev *hr_dev);
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
+int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq);
+int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq);
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type);
+bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
+
+#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
new file mode 100644
index 000000000000..3d479c63b117
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -0,0 +1,1467 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_common.h"
+
+#define HEM_INDEX_BUF BIT(0)
+#define HEM_INDEX_L0 BIT(1)
+#define HEM_INDEX_L1 BIT(2)
+struct hns_roce_hem_index {
+ u64 buf;
+ u64 l0;
+ u64 l1;
+ u32 inited; /* indicate which index is available */
+};
+
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
+{
+ int hop_num = 0;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_SCCC:
+ hop_num = hr_dev->caps.sccc_hop_num;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ hop_num = hr_dev->caps.qpc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ hop_num = hr_dev->caps.cqc_timer_hop_num;
+ break;
+ case HEM_TYPE_GMV:
+ hop_num = hr_dev->caps.gmv_hop_num;
+ break;
+ default:
+ return false;
+ }
+
+ return hop_num;
+}
+
+static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
+ u32 bt_chunk_num, u64 hem_max_num)
+{
+ u64 start_idx = round_down(hem_idx, bt_chunk_num);
+ u64 check_max_num = start_idx + bt_chunk_num;
+ u64 i;
+
+ for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
+ if (i != hem_idx && hem[i])
+ return false;
+
+ return true;
+}
+
+static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
+{
+ u64 start_idx = round_down(ba_idx, bt_chunk_num);
+ int i;
+
+ for (i = 0; i < bt_chunk_num; i++)
+ if (i != ba_idx && bt[start_idx + i])
+ return false;
+
+ return true;
+}
+
+static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
+{
+ if (check_whether_bt_num_3(table_type, hop_num))
+ return 3;
+ else if (check_whether_bt_num_2(table_type, hop_num))
+ return 2;
+ else if (check_whether_bt_num_1(table_type, hop_num))
+ return 1;
+ else
+ return 0;
+}
+
+static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_mhop *mhop,
+ u32 type)
+{
+ struct device *dev = hr_dev->dev;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
+ mhop->hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
+ mhop->hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SCCC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
+ mhop->hop_num = hr_dev->caps.sccc_hop_num;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
+ mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
+ mhop->hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_GMV:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
+ PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
+ PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
+ mhop->hop_num = hr_dev->caps.gmv_hop_num;
+ break;
+ default:
+ dev_err(dev, "table %u not support multi-hop addressing!\n",
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop)
+{
+ struct device *dev = hr_dev->dev;
+ u32 chunk_ba_num;
+ u32 chunk_size;
+ u32 table_idx;
+ u32 bt_num;
+
+ if (get_hem_table_config(hr_dev, mhop, table->type))
+ return -EINVAL;
+
+ if (!obj)
+ return 0;
+
+ /*
+ * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
+ * MTT/CQE alloc hem for bt pages.
+ */
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
+ mhop->bt_chunk_size;
+ table_idx = *obj / (chunk_size / table->obj_size);
+ switch (bt_num) {
+ case 3:
+ mhop->l2_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
+ mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
+ break;
+ case 2:
+ mhop->l1_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l0_idx = table_idx / chunk_ba_num;
+ break;
+ case 1:
+ mhop->l0_idx = table_idx;
+ break;
+ default:
+ dev_err(dev, "table %u not support hop_num = %u!\n",
+ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+ if (mhop->l0_idx >= mhop->ba_l0_num)
+ mhop->l0_idx %= mhop->ba_l0_num;
+
+ return 0;
+}
+
+static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+ unsigned long hem_alloc_size)
+{
+ struct hns_roce_hem *hem;
+ int order;
+ void *buf;
+
+ order = get_order(hem_alloc_size);
+ if (PAGE_SIZE << order != hem_alloc_size) {
+ dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
+ hem_alloc_size);
+ return NULL;
+ }
+
+ hem = kmalloc(sizeof(*hem), GFP_KERNEL);
+ if (!hem)
+ return NULL;
+
+ buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
+ &hem->dma, GFP_KERNEL);
+ if (!buf)
+ goto fail;
+
+ hem->buf = buf;
+ hem->size = hem_alloc_size;
+
+ return hem;
+
+fail:
+ kfree(hem);
+ return NULL;
+}
+
+void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
+{
+ if (!hem)
+ return;
+
+ dma_free_coherent(hr_dev->dev, hem->size, hem->buf, hem->dma);
+
+ kfree(hem);
+}
+
+static int calc_hem_config(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long mhop_obj = obj;
+ u32 l0_idx, l1_idx, l2_idx;
+ u32 chunk_ba_num;
+ u32 bt_num;
+ int ret;
+
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
+ if (ret)
+ return ret;
+
+ l0_idx = mhop->l0_idx;
+ l1_idx = mhop->l1_idx;
+ l2_idx = mhop->l2_idx;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+ switch (bt_num) {
+ case 3:
+ index->l1 = l0_idx * chunk_ba_num + l1_idx;
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
+ l1_idx * chunk_ba_num + l2_idx;
+ break;
+ case 2:
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num + l1_idx;
+ break;
+ case 1:
+ index->buf = l0_idx;
+ break;
+ default:
+ dev_err(dev, "table %u not support mhop.hop_num = %u!\n",
+ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+
+ if (unlikely(index->buf >= table->num_hem)) {
+ dev_err(dev, "table %u exceed hem limt idx %llu, max %lu!\n",
+ table->type, index->buf, table->num_hem);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void free_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+
+ if (index->inited & HEM_INDEX_BUF) {
+ hns_roce_free_hem(hr_dev, table->hem[index->buf]);
+ table->hem[index->buf] = NULL;
+ }
+
+ if (index->inited & HEM_INDEX_L1) {
+ dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
+ table->bt_l1_dma_addr[index->l1]);
+ table->bt_l1[index->l1] = NULL;
+ }
+
+ if (index->inited & HEM_INDEX_L0) {
+ dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
+ table->bt_l0_dma_addr[index->l0]);
+ table->bt_l0[index->l0] = NULL;
+ }
+}
+
+static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+ u64 bt_ba;
+ u32 size;
+ int ret;
+
+ /* alloc L1 BA's chunk */
+ if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
+ check_whether_bt_num_2(table->type, mhop->hop_num)) &&
+ !table->bt_l0[index->l0]) {
+ table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l0_dma_addr[index->l0],
+ GFP_KERNEL);
+ if (!table->bt_l0[index->l0]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ index->inited |= HEM_INDEX_L0;
+ }
+
+ /* alloc L2 BA's chunk */
+ if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
+ !table->bt_l1[index->l1]) {
+ table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l1_dma_addr[index->l1],
+ GFP_KERNEL);
+ if (!table->bt_l1[index->l1]) {
+ ret = -ENOMEM;
+ goto err_alloc_hem;
+ }
+ index->inited |= HEM_INDEX_L1;
+ *(table->bt_l0[index->l0] + mhop->l1_idx) =
+ table->bt_l1_dma_addr[index->l1];
+ }
+
+ /*
+ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * alloc bt space chunk for MTT/CQE.
+ */
+ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size);
+ if (!table->hem[index->buf]) {
+ ret = -ENOMEM;
+ goto err_alloc_hem;
+ }
+
+ index->inited |= HEM_INDEX_BUF;
+ bt_ba = table->hem[index->buf]->dma;
+
+ if (table->type < HEM_TYPE_MTT) {
+ if (mhop->hop_num == 2)
+ *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
+ else if (mhop->hop_num == 1)
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ } else if (mhop->hop_num == 2) {
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ }
+
+ return 0;
+err_alloc_hem:
+ free_mhop_hem(hr_dev, table, mhop, index);
+out:
+ return ret;
+}
+
+static int set_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct device *dev = hr_dev->dev;
+ u32 step_idx;
+ int ret = 0;
+
+ if (index->inited & HEM_INDEX_L0) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
+ if (ret) {
+ dev_err(dev, "set HEM step 0 failed!\n");
+ goto out;
+ }
+ }
+
+ if (index->inited & HEM_INDEX_L1) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
+ if (ret) {
+ dev_err(dev, "set HEM step 1 failed!\n");
+ goto out;
+ }
+ }
+
+ if (index->inited & HEM_INDEX_BUF) {
+ if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = mhop->hop_num;
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
+ if (ret)
+ dev_err(dev, "set HEM step last failed!\n");
+ }
+out:
+ return ret;
+}
+
+static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj)
+{
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ dev_err(dev, "calc hem config failed!\n");
+ return ret;
+ }
+
+ mutex_lock(&table->mutex);
+ if (table->hem[index.buf]) {
+ refcount_inc(&table->hem[index.buf]->refcount);
+ goto out;
+ }
+
+ ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
+ if (ret) {
+ dev_err(dev, "alloc mhop hem failed!\n");
+ goto out;
+ }
+
+ /* set HEM base address to hardware */
+ if (table->type < HEM_TYPE_MTT) {
+ ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ dev_err(dev, "set HEM address to HW failed!\n");
+ goto err_alloc;
+ }
+ }
+
+ refcount_set(&table->hem[index.buf]->refcount, 1);
+ goto out;
+
+err_alloc:
+ free_mhop_hem(hr_dev, table, &mhop, &index);
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long i;
+ int ret = 0;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type))
+ return hns_roce_table_mhop_get(hr_dev, table, obj);
+
+ i = obj / (table->table_chunk_size / table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (table->hem[i]) {
+ refcount_inc(&table->hem[i]->refcount);
+ goto out;
+ }
+
+ table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size);
+ if (!table->hem[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Set HEM base address(128K/page, pa) to Hardware */
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
+ if (ret) {
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
+ dev_err(dev, "set HEM base address to HW failed, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ refcount_set(&table->hem[i]->refcount, 1);
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct device *dev = hr_dev->dev;
+ u32 hop_num = mhop->hop_num;
+ u32 chunk_ba_num;
+ u32 step_idx;
+ int ret;
+
+ index->inited = HEM_INDEX_BUF;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ if (check_whether_bt_num_2(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem))
+ index->inited |= HEM_INDEX_L0;
+ } else if (check_whether_bt_num_3(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem)) {
+ index->inited |= HEM_INDEX_L1;
+ if (hns_roce_check_bt_null(table->bt_l1, index->l1,
+ chunk_ba_num))
+ index->inited |= HEM_INDEX_L0;
+ }
+ }
+
+ if (table->type < HEM_TYPE_MTT) {
+ if (hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = hop_num;
+
+ ret = hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx);
+ if (ret)
+ dev_warn(dev, "failed to clear hop%u HEM, ret = %d.\n",
+ hop_num, ret);
+
+ if (index->inited & HEM_INDEX_L1) {
+ ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 1);
+ if (ret)
+ dev_warn(dev, "failed to clear HEM step 1, ret = %d.\n",
+ ret);
+ }
+
+ if (index->inited & HEM_INDEX_L0) {
+ ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
+ if (ret)
+ dev_warn(dev, "failed to clear HEM step 0, ret = %d.\n",
+ ret);
+ }
+ }
+}
+
+static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj,
+ int check_refcount)
+{
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ dev_err(dev, "calc hem config failed!\n");
+ return;
+ }
+
+ if (!check_refcount)
+ mutex_lock(&table->mutex);
+ else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
+ &table->mutex))
+ return;
+
+ clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ free_mhop_hem(hr_dev, table, &mhop, &index);
+
+ mutex_unlock(&table->mutex);
+}
+
+void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long i;
+ int ret;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_table_mhop_put(hr_dev, table, obj, 1);
+ return;
+ }
+
+ i = obj / (table->table_chunk_size / table->obj_size);
+
+ if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
+ &table->mutex))
+ return;
+
+ ret = hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
+ if (ret)
+ dev_warn_ratelimited(dev, "failed to clear HEM base address, ret = %d.\n",
+ ret);
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
+
+ mutex_unlock(&table->mutex);
+}
+
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj, dma_addr_t *dma_handle)
+{
+ struct hns_roce_hem_mhop mhop;
+ struct hns_roce_hem *hem;
+ unsigned long mhop_obj = obj;
+ unsigned long obj_per_chunk;
+ unsigned long idx_offset;
+ int offset, dma_offset;
+ void *addr = NULL;
+ u32 hem_idx = 0;
+ int i, j;
+
+ mutex_lock(&table->mutex);
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ obj_per_chunk = table->table_chunk_size / table->obj_size;
+ hem = table->hem[obj / obj_per_chunk];
+ idx_offset = obj % obj_per_chunk;
+ dma_offset = offset = idx_offset * table->obj_size;
+ } else {
+ u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
+ if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
+ goto out;
+ /* mtt mhop */
+ i = mhop.l0_idx;
+ j = mhop.l1_idx;
+ if (mhop.hop_num == 2)
+ hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
+ else if (mhop.hop_num == 1 ||
+ mhop.hop_num == HNS_ROCE_HOP_NUM_0)
+ hem_idx = i;
+
+ hem = table->hem[hem_idx];
+ dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
+ if (mhop.hop_num == 2)
+ dma_offset = offset = 0;
+ }
+
+ if (!hem)
+ goto out;
+
+ *dma_handle = hem->dma + dma_offset;
+ addr = hem->buf + offset;
+
+out:
+ mutex_unlock(&table->mutex);
+ return addr;
+}
+
+int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, u32 type,
+ unsigned long obj_size, unsigned long nobj)
+{
+ unsigned long obj_per_chunk;
+ unsigned long num_hem;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, type)) {
+ table->table_chunk_size = hr_dev->caps.chunk_sz;
+ obj_per_chunk = table->table_chunk_size / obj_size;
+ num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
+ if (!table->hem)
+ return -ENOMEM;
+ } else {
+ struct hns_roce_hem_mhop mhop = {};
+ unsigned long buf_chunk_size;
+ unsigned long bt_chunk_size;
+ unsigned long bt_chunk_num;
+ unsigned long num_bt_l0;
+ u32 hop_num;
+
+ if (get_hem_table_config(hr_dev, &mhop, type))
+ return -EINVAL;
+
+ buf_chunk_size = mhop.buf_chunk_size;
+ bt_chunk_size = mhop.bt_chunk_size;
+ num_bt_l0 = mhop.ba_l0_num;
+ hop_num = mhop.hop_num;
+
+ obj_per_chunk = buf_chunk_size / obj_size;
+ num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
+ bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
+
+ if (type >= HEM_TYPE_MTT)
+ num_bt_l0 = bt_chunk_num;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem),
+ GFP_KERNEL);
+ if (!table->hem)
+ goto err_kcalloc_hem_buf;
+
+ if (check_whether_bt_num_3(type, hop_num)) {
+ unsigned long num_bt_l1;
+
+ num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
+ table->bt_l1 = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1),
+ GFP_KERNEL);
+ if (!table->bt_l1)
+ goto err_kcalloc_bt_l1;
+
+ table->bt_l1_dma_addr = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1_dma_addr),
+ GFP_KERNEL);
+
+ if (!table->bt_l1_dma_addr)
+ goto err_kcalloc_l1_dma;
+ }
+
+ if (check_whether_bt_num_2(type, hop_num) ||
+ check_whether_bt_num_3(type, hop_num)) {
+ table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
+ GFP_KERNEL);
+ if (!table->bt_l0)
+ goto err_kcalloc_bt_l0;
+
+ table->bt_l0_dma_addr = kcalloc(num_bt_l0,
+ sizeof(*table->bt_l0_dma_addr),
+ GFP_KERNEL);
+ if (!table->bt_l0_dma_addr)
+ goto err_kcalloc_l0_dma;
+ }
+ }
+
+ table->type = type;
+ table->num_hem = num_hem;
+ table->obj_size = obj_size;
+ mutex_init(&table->mutex);
+
+ return 0;
+
+err_kcalloc_l0_dma:
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+
+err_kcalloc_bt_l0:
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(table->hem);
+ table->hem = NULL;
+
+err_kcalloc_hem_buf:
+ return -ENOMEM;
+}
+
+static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct hns_roce_hem_mhop mhop;
+ u32 buf_chunk_size;
+ u64 obj;
+ int i;
+
+ if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
+ return;
+ buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
+ mhop.bt_chunk_size;
+
+ for (i = 0; i < table->num_hem; ++i) {
+ obj = i * buf_chunk_size / table->obj_size;
+ if (table->hem[i])
+ hns_roce_table_mhop_put(hr_dev, table, obj, 0);
+ }
+
+ kfree(table->hem);
+ table->hem = NULL;
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+ kfree(table->bt_l0_dma_addr);
+ table->bt_l0_dma_addr = NULL;
+}
+
+void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long i;
+ int obj;
+ int ret;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_cleanup_mhop_hem_table(hr_dev, table);
+ mutex_destroy(&table->mutex);
+ return;
+ }
+
+ for (i = 0; i < table->num_hem; ++i)
+ if (table->hem[i]) {
+ obj = i * table->table_chunk_size / table->obj_size;
+ ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
+ if (ret)
+ dev_err(dev, "clear HEM base address failed, ret = %d.\n",
+ ret);
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ }
+
+ mutex_destroy(&table->mutex);
+ kfree(table->hem);
+}
+
+void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
+{
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
+ if (hr_dev->caps.qpc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qpc_timer_table);
+ if (hr_dev->caps.cqc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->cqc_timer_table);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+
+ if (hr_dev->caps.gmv_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
+
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
+}
+
+struct hns_roce_hem_item {
+ struct list_head list; /* link all hems in the same bt level */
+ struct list_head sibling; /* link all hems in last hop for mtt */
+ void *addr;
+ dma_addr_t dma_addr;
+ size_t count; /* max ba numbers */
+ int start; /* start buf offset in this hem */
+ int end; /* end buf offset in this hem */
+ bool exist_bt;
+};
+
+/* All HEM items are linked in a tree structure */
+struct hns_roce_hem_head {
+ struct list_head branch[HNS_ROCE_MAX_BT_REGION];
+ struct list_head root;
+ struct list_head leaf;
+};
+
+static struct hns_roce_hem_item *
+hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ bool exist_bt)
+{
+ struct hns_roce_hem_item *hem;
+
+ hem = kzalloc(sizeof(*hem), GFP_KERNEL);
+ if (!hem)
+ return NULL;
+
+ if (exist_bt) {
+ hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
+ &hem->dma_addr, GFP_KERNEL);
+ if (!hem->addr) {
+ kfree(hem);
+ return NULL;
+ }
+ }
+
+ hem->exist_bt = exist_bt;
+ hem->count = count;
+ hem->start = start;
+ hem->end = end;
+ INIT_LIST_HEAD(&hem->list);
+ INIT_LIST_HEAD(&hem->sibling);
+
+ return hem;
+}
+
+static void hem_list_free_item(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_item *hem)
+{
+ if (hem->exist_bt)
+ dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
+ hem->addr, hem->dma_addr);
+ kfree(hem);
+}
+
+static void hem_list_free_all(struct hns_roce_dev *hr_dev,
+ struct list_head *head)
+{
+ struct hns_roce_hem_item *hem, *temp_hem;
+
+ list_for_each_entry_safe(hem, temp_hem, head, list) {
+ list_del(&hem->list);
+ hem_list_free_item(hr_dev, hem);
+ }
+}
+
+static void hem_list_link_bt(void *base_addr, u64 table_addr)
+{
+ *(u64 *)(base_addr) = table_addr;
+}
+
+/* assign L0 table address to hem from root bt */
+static void hem_list_assign_bt(struct hns_roce_hem_item *hem, void *cpu_addr,
+ u64 phy_addr)
+{
+ hem->addr = cpu_addr;
+ hem->dma_addr = (dma_addr_t)phy_addr;
+}
+
+static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
+ int offset)
+{
+ return (hem->start <= offset && offset <= hem->end);
+}
+
+static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
+ int page_offset)
+{
+ struct hns_roce_hem_item *hem, *temp_hem;
+ struct hns_roce_hem_item *found = NULL;
+
+ list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
+ if (hem_list_page_is_in_range(hem, page_offset)) {
+ found = hem;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
+{
+ /*
+ * hopnum base address table levels
+ * 0 L0(buf)
+ * 1 L0 -> buf
+ * 2 L0 -> L1 -> buf
+ * 3 L0 -> L1 -> L2 -> buf
+ */
+ return bt_level >= (hopnum ? hopnum - 1 : hopnum);
+}
+
+/*
+ * calc base address entries num
+ * @hopnum: num of mutihop addressing
+ * @bt_level: base address table level
+ * @unit: ba entries per bt page
+ */
+static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
+{
+ u64 step;
+ int max;
+ int i;
+
+ if (hopnum <= bt_level)
+ return 0;
+ /*
+ * hopnum bt_level range
+ * 1 0 unit
+ * ------------
+ * 2 0 unit * unit
+ * 2 1 unit
+ * ------------
+ * 3 0 unit * unit * unit
+ * 3 1 unit * unit
+ * 3 2 unit
+ */
+ step = 1;
+ max = hopnum - bt_level;
+ for (i = 0; i < max; i++)
+ step = step * unit;
+
+ return step;
+}
+
+/*
+ * calc the root ba entries which could cover all regions
+ * @regions: buf region array
+ * @region_cnt: array size of @regions
+ * @unit: ba entries per bt page
+ */
+int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
+ int region_cnt, int unit)
+{
+ struct hns_roce_buf_region *r;
+ int total = 0;
+ u64 step;
+ int i;
+
+ for (i = 0; i < region_cnt; i++) {
+ r = (struct hns_roce_buf_region *)&regions[i];
+ /* when r->hopnum = 0, the region should not occupy root_ba. */
+ if (!r->hopnum)
+ continue;
+
+ if (r->hopnum > 1) {
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step > 0)
+ total += (r->count + step - 1) / step;
+ } else {
+ total += r->count;
+ }
+ }
+
+ return total;
+}
+
+static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+ const struct hns_roce_buf_region *r, int unit,
+ int offset, struct list_head *mid_bt,
+ struct list_head *btm_bt)
+{
+ struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
+ struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
+ struct hns_roce_hem_item *cur, *pre;
+ const int hopnum = r->hopnum;
+ int start_aligned;
+ int distance;
+ int ret = 0;
+ int max_ofs;
+ int level;
+ u64 step;
+ int end;
+
+ if (hopnum <= 1)
+ return 0;
+
+ if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
+ dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
+ return -EINVAL;
+ }
+
+ if (offset < r->offset) {
+ dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
+ offset, r->offset);
+ return -EINVAL;
+ }
+
+ distance = offset - r->offset;
+ max_ofs = r->offset + r->count - 1;
+ for (level = 0; level < hopnum; level++)
+ INIT_LIST_HEAD(&temp_list[level]);
+
+ /* config L1 bt to last bt and link them to corresponding parent */
+ for (level = 1; level < hopnum; level++) {
+ if (!hem_list_is_bottom_bt(hopnum, level)) {
+ cur = hem_list_search_item(&mid_bt[level], offset);
+ if (cur) {
+ hem_ptrs[level] = cur;
+ continue;
+ }
+ }
+
+ step = hem_list_calc_ba_range(hopnum, level, unit);
+ if (step < 1) {
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ start_aligned = (distance / step) * step + r->offset;
+ end = min_t(u64, start_aligned + step - 1, max_ofs);
+ cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
+ true);
+ if (!cur) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ hem_ptrs[level] = cur;
+ list_add(&cur->list, &temp_list[level]);
+ if (hem_list_is_bottom_bt(hopnum, level))
+ list_add(&cur->sibling, &temp_list[0]);
+
+ /* link bt to parent bt */
+ if (level > 1) {
+ pre = hem_ptrs[level - 1];
+ step = (cur->start - pre->start) / step * BA_BYTE_LEN;
+ hem_list_link_bt(pre->addr + step, cur->dma_addr);
+ }
+ }
+
+ list_splice(&temp_list[0], btm_bt);
+ for (level = 1; level < hopnum; level++)
+ list_splice(&temp_list[level], &mid_bt[level]);
+
+ return 0;
+
+err_exit:
+ for (level = 1; level < hopnum; level++)
+ hem_list_free_all(hr_dev, &temp_list[level]);
+
+ return ret;
+}
+
+static struct hns_roce_hem_item *
+alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
+ const struct hns_roce_buf_region *regions, int region_cnt)
+{
+ const struct hns_roce_buf_region *r;
+ struct hns_roce_hem_item *hem;
+ int ba_num;
+ int offset;
+
+ ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
+ if (ba_num < 1)
+ return ERR_PTR(-ENOMEM);
+
+ if (ba_num > unit)
+ return ERR_PTR(-ENOBUFS);
+
+ offset = regions[0].offset;
+ /* indicate to last region */
+ r = &regions[region_cnt - 1];
+ hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true);
+ if (!hem)
+ return ERR_PTR(-ENOMEM);
+
+ *max_ba_num = ba_num;
+
+ return hem;
+}
+
+static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ u64 phy_base, const struct hns_roce_buf_region *r,
+ struct list_head *branch_head,
+ struct list_head *leaf_head)
+{
+ struct hns_roce_hem_item *hem;
+
+ /* This is on the has_mtt branch, if r->hopnum
+ * is 0, there is no root_ba to reuse for the
+ * region's fake hem, so a dma_alloc request is
+ * necessary here.
+ */
+ hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
+ r->count, !r->hopnum);
+ if (!hem)
+ return -ENOMEM;
+
+ /* The root_ba can be reused only when r->hopnum > 0. */
+ if (r->hopnum)
+ hem_list_assign_bt(hem, cpu_base, phy_base);
+ list_add(&hem->list, branch_head);
+ list_add(&hem->sibling, leaf_head);
+
+ /* If r->hopnum == 0, 0 is returned,
+ * so that the root_bt entry is not occupied.
+ */
+ return r->hopnum ? r->count : 0;
+}
+
+static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ int unit, const struct hns_roce_buf_region *r,
+ const struct list_head *branch_head)
+{
+ struct hns_roce_hem_item *hem, *temp_hem;
+ int total = 0;
+ int offset;
+ u64 step;
+
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step < 1)
+ return -EINVAL;
+
+ /* if exist mid bt, link L1 to L0 */
+ list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
+ offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
+ hem_list_link_bt(cpu_base + offset, hem->dma_addr);
+ total++;
+ }
+
+ return total;
+}
+
+static int
+setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
+ int unit, int max_ba_num, struct hns_roce_hem_head *head,
+ const struct hns_roce_buf_region *regions, int region_cnt)
+{
+ const struct hns_roce_buf_region *r;
+ struct hns_roce_hem_item *root_hem;
+ void *cpu_base;
+ u64 phy_base;
+ int i, total;
+ int ret;
+
+ root_hem = list_first_entry(&head->root,
+ struct hns_roce_hem_item, list);
+ if (!root_hem)
+ return -ENOMEM;
+
+ total = 0;
+ for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
+ r = &regions[i];
+ if (!r->count)
+ continue;
+
+ /* all regions's mid[x][0] shared the root_bt's trunk */
+ cpu_base = root_hem->addr + total * BA_BYTE_LEN;
+ phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
+
+ /* if hopnum is 0 or 1, cut a new fake hem from the root bt
+ * which's address share to all regions.
+ */
+ if (hem_list_is_bottom_bt(r->hopnum, 0))
+ ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
+ &head->branch[i], &head->leaf);
+ else
+ ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
+ &hem_list->mid_bt[i][1]);
+
+ if (ret < 0)
+ return ret;
+
+ total += ret;
+ }
+
+ list_splice(&head->leaf, &hem_list->btm_bt);
+ list_splice(&head->root, &hem_list->root_bt);
+ for (i = 0; i < region_cnt; i++)
+ list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
+
+ return 0;
+}
+
+static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list, int unit,
+ const struct hns_roce_buf_region *regions,
+ int region_cnt)
+{
+ struct hns_roce_hem_item *root_hem;
+ struct hns_roce_hem_head head;
+ int max_ba_num;
+ int ret;
+ int i;
+
+ root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
+ if (root_hem)
+ return 0;
+
+ max_ba_num = 0;
+ root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
+ region_cnt);
+ if (IS_ERR(root_hem))
+ return PTR_ERR(root_hem);
+
+ /* List head for storing all allocated HEM items */
+ INIT_LIST_HEAD(&head.root);
+ INIT_LIST_HEAD(&head.leaf);
+ for (i = 0; i < region_cnt; i++)
+ INIT_LIST_HEAD(&head.branch[i]);
+
+ hem_list->root_ba = root_hem->dma_addr;
+ list_add(&root_hem->list, &head.root);
+ ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
+ region_cnt);
+ if (ret) {
+ for (i = 0; i < region_cnt; i++)
+ hem_list_free_all(hr_dev, &head.branch[i]);
+
+ hem_list_free_all(hr_dev, &head.root);
+ }
+
+ return ret;
+}
+
+/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
+ * the bt page size is not expanded by cal_best_bt_pg_sz()
+ */
+#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
+
+/* construct the base address table and link them by address hop config */
+int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list,
+ const struct hns_roce_buf_region *regions,
+ int region_cnt, unsigned int bt_pg_shift)
+{
+ const struct hns_roce_buf_region *r;
+ int ofs, end;
+ int loop;
+ int unit;
+ int ret;
+ int i;
+
+ if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
+ dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
+ region_cnt);
+ return -EINVAL;
+ }
+
+ unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
+ for (i = 0; i < region_cnt; i++) {
+ r = &regions[i];
+ if (!r->count)
+ continue;
+
+ end = r->offset + r->count;
+ for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+
+ ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
+ hem_list->mid_bt[i],
+ &hem_list->btm_bt);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "alloc hem trunk fail ret = %d!\n", ret);
+ goto err_alloc;
+ }
+ }
+ }
+
+ ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
+ region_cnt);
+ if (ret)
+ dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
+ else
+ return 0;
+
+err_alloc:
+ hns_roce_hem_list_release(hr_dev, hem_list);
+
+ return ret;
+}
+
+void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list)
+{
+ int i, j;
+
+ for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
+ for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
+ hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
+
+ hem_list_free_all(hr_dev, &hem_list->root_bt);
+ INIT_LIST_HEAD(&hem_list->btm_bt);
+ hem_list->root_ba = 0;
+}
+
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
+{
+ int i, j;
+
+ INIT_LIST_HEAD(&hem_list->root_bt);
+ INIT_LIST_HEAD(&hem_list->btm_bt);
+ for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
+ for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
+ INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
+}
+
+void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list,
+ int offset, int *mtt_cnt)
+{
+ struct list_head *head = &hem_list->btm_bt;
+ struct hns_roce_hem_item *hem, *temp_hem;
+ void *cpu_base = NULL;
+ int loop = 1;
+ int nr = 0;
+
+ list_for_each_entry_safe(hem, temp_hem, head, sibling) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+ loop++;
+
+ if (hem_list_page_is_in_range(hem, offset)) {
+ nr = offset - hem->start;
+ cpu_base = hem->addr + nr * BA_BYTE_LEN;
+ nr = hem->end + 1 - offset;
+ break;
+ }
+ }
+
+ if (mtt_cnt)
+ *mtt_cnt = nr;
+
+ return cpu_base;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
new file mode 100644
index 000000000000..9c415b2541af
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HEM_H
+#define _HNS_ROCE_HEM_H
+
+#define HEM_HOP_STEP_DIRECT 0xff
+
+enum {
+ /* MAP HEM(Hardware Entry Memory) */
+ HEM_TYPE_QPC = 0,
+ HEM_TYPE_MTPT,
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
+ HEM_TYPE_SCCC,
+ HEM_TYPE_QPC_TIMER,
+ HEM_TYPE_CQC_TIMER,
+ HEM_TYPE_GMV,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+ HEM_TYPE_CQE,
+ HEM_TYPE_SRQWQE,
+ HEM_TYPE_IDX,
+ HEM_TYPE_IRRL,
+ HEM_TYPE_TRRL,
+};
+
+#define check_whether_bt_num_3(type, hop_num) \
+ ((type) < HEM_TYPE_MTT && (hop_num) == 2)
+
+#define check_whether_bt_num_2(type, hop_num) \
+ (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == 2))
+
+#define check_whether_bt_num_1(type, hop_num) \
+ (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
+
+struct hns_roce_hem {
+ void *buf;
+ dma_addr_t dma;
+ unsigned long size;
+ refcount_t refcount;
+};
+
+struct hns_roce_hem_mhop {
+ u32 hop_num;
+ u32 buf_chunk_size;
+ u32 bt_chunk_size;
+ u32 ba_l0_num;
+ u32 l0_idx; /* level 0 base address table index */
+ u32 l1_idx; /* level 1 base address table index */
+ u32 l2_idx; /* level 2 base address table index */
+};
+
+void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
+int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj);
+void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj);
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ dma_addr_t *dma_handle);
+int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, u32 type,
+ unsigned long obj_size, unsigned long nobj);
+void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table);
+void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop);
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
+
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list);
+int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
+ int region_cnt, int unit);
+int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list,
+ const struct hns_roce_buf_region *regions,
+ int region_cnt, unsigned int bt_pg_shift);
+void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list);
+void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list,
+ int offset, int *mtt_cnt);
+
+#endif /* _HNS_ROCE_HEM_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
new file mode 100644
index 000000000000..f82bdd46a917
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -0,0 +1,7274 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_hw_v2.h"
+
+#define CREATE_TRACE_POINTS
+#include "hns_roce_trace.h"
+
+enum {
+ CMD_RST_PRC_OTHERS,
+ CMD_RST_PRC_SUCCESS,
+ CMD_RST_PRC_EBUSY,
+};
+
+enum ecc_resource_type {
+ ECC_RESOURCE_QPC,
+ ECC_RESOURCE_CQC,
+ ECC_RESOURCE_MPT,
+ ECC_RESOURCE_SRQC,
+ ECC_RESOURCE_GMV,
+ ECC_RESOURCE_QPC_TIMER,
+ ECC_RESOURCE_CQC_TIMER,
+ ECC_RESOURCE_SCCC,
+ ECC_RESOURCE_COUNT,
+};
+
+static const struct {
+ const char *name;
+ u8 read_bt0_op;
+ u8 write_bt0_op;
+} fmea_ram_res[] = {
+ { "ECC_RESOURCE_QPC",
+ HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
+ { "ECC_RESOURCE_CQC",
+ HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
+ { "ECC_RESOURCE_MPT",
+ HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
+ { "ECC_RESOURCE_SRQC",
+ HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
+ /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
+ { "ECC_RESOURCE_GMV",
+ 0, 0 },
+ { "ECC_RESOURCE_QPC_TIMER",
+ HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
+ { "ECC_RESOURCE_CQC_TIMER",
+ HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
+ { "ECC_RESOURCE_SCCC",
+ HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
+};
+
+static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
+ struct ib_sge *sg)
+{
+ dseg->lkey = cpu_to_le32(sg->lkey);
+ dseg->addr = cpu_to_le64(sg->addr);
+ dseg->len = cpu_to_le32(sg->length);
+}
+
+/*
+ * mapped-value = 1 + real-value
+ * The hns wr opcode real value is start from 0, In order to distinguish between
+ * initialized and uninitialized map values, we plus 1 to the actual value when
+ * defining the mapping, so that the validity can be identified by checking the
+ * mapped value is greater than 0.
+ */
+#define HR_OPC_MAP(ib_key, hr_key) \
+ [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
+
+static const u32 hns_roce_op_code[] = {
+ HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
+ HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
+ HR_OPC_MAP(SEND, SEND),
+ HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
+ HR_OPC_MAP(RDMA_READ, RDMA_READ),
+ HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
+ HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
+ HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
+ HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
+ HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
+ HR_OPC_MAP(REG_MR, FAST_REG_PMR),
+};
+
+static u32 to_hr_opcode(u32 ib_opcode)
+{
+ if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
+ return HNS_ROCE_V2_WQE_OP_MASK;
+
+ return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
+ HNS_ROCE_V2_WQE_OP_MASK;
+}
+
+static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ const struct ib_reg_wr *wr)
+{
+ struct hns_roce_wqe_frmr_seg *fseg =
+ (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
+ struct hns_roce_mr *mr = to_hr_mr(wr->mr);
+ u64 pbl_ba;
+
+ /* use ib_access_flags */
+ hr_reg_write_bool(fseg, FRMR_BIND_EN, 0);
+ hr_reg_write_bool(fseg, FRMR_ATOMIC,
+ wr->access & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
+ hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
+ hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
+
+ /* Data structure reuse may lead to confusion */
+ pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
+ rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
+ rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
+
+ rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
+ rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
+ rc_sq_wqe->rkey = cpu_to_le32(wr->key);
+ rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
+
+ hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
+ hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(fseg, FRMR_BLK_MODE);
+}
+
+static void set_atomic_seg(const struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ unsigned int valid_num_sge)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg =
+ (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
+ struct hns_roce_wqe_atomic_seg *aseg =
+ (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
+
+ set_data_seg_v2(dseg, wr->sg_list);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
+ aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
+ } else {
+ aseg->fetchadd_swap_data =
+ cpu_to_le64(atomic_wr(wr)->compare_add);
+ aseg->cmp_data = 0;
+ }
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
+}
+
+static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ unsigned int *sge_idx, u32 msg_len)
+{
+ struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
+ unsigned int left_len_in_pg;
+ unsigned int idx = *sge_idx;
+ unsigned int i = 0;
+ unsigned int len;
+ void *addr;
+ void *dseg;
+
+ if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
+ ibdev_err(ibdev,
+ "no enough extended sge space for inline data.\n");
+ return -EINVAL;
+ }
+
+ dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
+ left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
+ len = wr->sg_list[0].length;
+ addr = (void *)(unsigned long)(wr->sg_list[0].addr);
+
+ /* When copying data to extended sge space, the left length in page may
+ * not long enough for current user's sge. So the data should be
+ * splited into several parts, one in the first page, and the others in
+ * the subsequent pages.
+ */
+ while (1) {
+ if (len <= left_len_in_pg) {
+ memcpy(dseg, addr, len);
+
+ idx += len / HNS_ROCE_SGE_SIZE;
+
+ i++;
+ if (i >= wr->num_sge)
+ break;
+
+ left_len_in_pg -= len;
+ len = wr->sg_list[i].length;
+ addr = (void *)(unsigned long)(wr->sg_list[i].addr);
+ dseg += len;
+ } else {
+ memcpy(dseg, addr, left_len_in_pg);
+
+ len -= left_len_in_pg;
+ addr += left_len_in_pg;
+ idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
+ dseg = hns_roce_get_extend_sge(qp,
+ idx & (qp->sge.sge_cnt - 1));
+ left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
+ }
+ }
+
+ *sge_idx = idx;
+
+ return 0;
+}
+
+static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
+ unsigned int *sge_ind, unsigned int cnt)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ unsigned int idx = *sge_ind;
+
+ while (cnt > 0) {
+ dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
+ if (likely(sge->length)) {
+ set_data_seg_v2(dseg, sge);
+ idx++;
+ cnt--;
+ }
+ sge++;
+ }
+
+ *sge_ind = idx;
+}
+
+static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ int mtu = ib_mtu_enum_to_int(qp->path_mtu);
+
+ if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
+ len, qp->max_inline_data, mtu);
+ return false;
+ }
+
+ return true;
+}
+
+static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ unsigned int *sge_idx)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned int curr_idx = *sge_idx;
+ void *dseg = rc_sq_wqe;
+ unsigned int i;
+ int ret;
+
+ if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
+ ibdev_err(ibdev, "invalid inline parameters!\n");
+ return -EINVAL;
+ }
+
+ if (!check_inl_data_len(qp, msg_len))
+ return -EINVAL;
+
+ dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
+
+ if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
+ hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(dseg, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ dseg += wr->sg_list[i].length;
+ }
+ } else {
+ hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
+
+ ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
+ if (ret)
+ return ret;
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx);
+ }
+
+ *sge_idx = curr_idx;
+
+ return 0;
+}
+
+static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ unsigned int *sge_ind,
+ unsigned int valid_num_sge)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg =
+ (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ int j = 0;
+ int i;
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
+ !!(wr->send_flags & IB_SEND_INLINE));
+ if (wr->send_flags & IB_SEND_INLINE)
+ return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
+
+ if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+ } else {
+ for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ j++;
+ }
+ }
+
+ set_extend_sge(qp, wr->sg_list + i, sge_ind,
+ valid_num_sge - HNS_ROCE_SGE_IN_WQE);
+ }
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
+
+ return 0;
+}
+
+static int check_send_valid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ if (unlikely(hr_qp->state == IB_QPS_RESET ||
+ hr_qp->state == IB_QPS_INIT ||
+ hr_qp->state == IB_QPS_RTR))
+ return -EINVAL;
+ else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
+ return -EIO;
+
+ return 0;
+}
+
+static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
+ unsigned int *sge_len)
+{
+ unsigned int valid_num = 0;
+ unsigned int len = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ len += wr->sg_list[i].length;
+ valid_num++;
+ }
+ }
+
+ *sge_len = len;
+ return valid_num;
+}
+
+static __le32 get_immtdata(const struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
+ default:
+ return 0;
+ }
+}
+
+static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
+ const struct ib_send_wr *wr)
+{
+ u32 ib_op = wr->opcode;
+
+ if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+
+ ud_sq_wqe->immtdata = get_immtdata(wr);
+
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
+
+ return 0;
+}
+
+static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
+ struct hns_roce_ah *ah)
+{
+ struct ib_device *ib_dev = ah->ibah.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
+
+ ud_sq_wqe->sgid_index = ah->av.gid_index;
+
+ memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
+ memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
+
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id);
+
+ return 0;
+}
+
+static inline int set_ud_wqe(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ void *wqe, unsigned int *sge_idx,
+ unsigned int owner_bit)
+{
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
+ unsigned int curr_idx = *sge_idx;
+ unsigned int valid_num_sge;
+ u32 msg_len = 0;
+ int ret;
+
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+
+ ret = set_ud_opcode(ud_sq_wqe, wr);
+ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE,
+ !!(wr->send_flags & IB_SEND_SIGNALED));
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE,
+ !!(wr->send_flags & IB_SEND_SOLICITED));
+
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX,
+ curr_idx & (qp->sge.sge_cnt - 1));
+
+ ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
+ qp->qkey : ud_wr(wr)->remote_qkey);
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn);
+
+ ret = fill_ud_av(ud_sq_wqe, ah);
+ if (ret)
+ return ret;
+
+ qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
+
+ set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
+
+ /*
+ * The pipeline can sequentially post all valid WQEs into WQ buffer,
+ * including new WQEs waiting for the doorbell to update the PI again.
+ * Therefore, the owner bit of WQE MUST be updated after all fields
+ * and extSGEs have been written into DDR instead of cache.
+ */
+ if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
+ dma_wmb();
+
+ *sge_idx = curr_idx;
+ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit);
+
+ return 0;
+}
+
+static int set_rc_opcode(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ const struct ib_send_wr *wr)
+{
+ u32 ib_op = wr->opcode;
+ int ret = 0;
+
+ rc_sq_wqe->immtdata = get_immtdata(wr);
+
+ switch (ib_op) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
+ break;
+ case IB_WR_REG_MR:
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ set_frmr_seg(rc_sq_wqe, reg_wr(wr));
+ else
+ ret = -EOPNOTSUPP;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (unlikely(ret))
+ return ret;
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
+
+ return ret;
+}
+
+static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ void *wqe, unsigned int *sge_idx,
+ unsigned int owner_bit)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+ unsigned int curr_idx = *sge_idx;
+ unsigned int valid_num_sge;
+ u32 msg_len = 0;
+ int ret;
+
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+
+ rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
+ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ if (msg_len != ATOMIC_WR_LEN)
+ return -EINVAL;
+ set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
+ } else if (wr->opcode != IB_WR_REG_MR) {
+ ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
+ &curr_idx, valid_num_sge);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * The pipeline can sequentially post all valid WQEs into WQ buffer,
+ * including new WQEs waiting for the doorbell to update the PI again.
+ * Therefore, the owner bit of WQE MUST be updated after all fields
+ * and extSGEs have been written into DDR instead of cache.
+ */
+ if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
+ dma_wmb();
+
+ *sge_idx = curr_idx;
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit);
+
+ return ret;
+}
+
+static inline void update_sq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *qp)
+{
+ if (unlikely(qp->state == IB_QPS_ERR)) {
+ flush_cqe(hr_dev, qp);
+ } else {
+ struct hns_roce_v2_db sq_db = {};
+
+ hr_reg_write(&sq_db, DB_TAG, qp->qpn);
+ hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
+ hr_reg_write(&sq_db, DB_PI, qp->sq.head);
+ hr_reg_write(&sq_db, DB_SL, qp->sl);
+
+ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
+ }
+}
+
+static inline void update_rq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *qp)
+{
+ if (unlikely(qp->state == IB_QPS_ERR)) {
+ flush_cqe(hr_dev, qp);
+ } else {
+ if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
+ *qp->rdb.db_record =
+ qp->rq.head & V2_DB_PRODUCER_IDX_M;
+ } else {
+ struct hns_roce_v2_db rq_db = {};
+
+ hr_reg_write(&rq_db, DB_TAG, qp->qpn);
+ hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
+ hr_reg_write(&rq_db, DB_PI, qp->rq.head);
+
+ hns_roce_write64(hr_dev, (__le32 *)&rq_db,
+ qp->rq.db_reg);
+ }
+ }
+}
+
+static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
+ u64 __iomem *dest)
+{
+#define HNS_ROCE_WRITE_TIMES 8
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ int i;
+
+ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
+ for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
+ writeq_relaxed(*(val + i), dest + i);
+}
+
+static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ void *wqe)
+{
+#define HNS_ROCE_SL_SHIFT 2
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+
+ if (unlikely(qp->state == IB_QPS_ERR)) {
+ flush_cqe(hr_dev, qp);
+ return;
+ }
+ /* All kinds of DirectWQE have the same header field layout */
+ hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H,
+ qp->sl >> HNS_ROCE_SL_SHIFT);
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
+
+ hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
+}
+
+static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ unsigned long flags = 0;
+ unsigned int owner_bit;
+ unsigned int sge_idx;
+ unsigned int wqe_idx;
+ void *wqe = NULL;
+ u32 nreq;
+ int ret;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ ret = check_send_valid(hr_dev, qp);
+ if (unlikely(ret)) {
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
+ sge_idx = qp->next_sge;
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+ ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
+ wr->num_sge, qp->sq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe = hns_roce_get_send_wqe(qp, wqe_idx);
+ qp->sq.wrid[wqe_idx] = wr->wr_id;
+ owner_bit =
+ ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ else
+ ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+
+ trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift,
+ wr->wr_id, TRACE_SQ);
+ if (unlikely(ret)) {
+ *bad_wr = wr;
+ goto out;
+ }
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+ qp->next_sge = sge_idx;
+
+ if (nreq == 1 && !ret &&
+ (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ write_dwqe(hr_dev, qp, wqe);
+ else
+ update_sq_db(hr_dev, qp);
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return ret;
+}
+
+static int check_recv_valid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
+ return -EIO;
+
+ if (hr_qp->state == IB_QPS_RESET)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
+ u32 max_sge, bool rsv)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ u32 i, cnt;
+
+ for (i = 0, cnt = 0; i < wr->num_sge; i++) {
+ /* Skip zero-length sge */
+ if (!wr->sg_list[i].length)
+ continue;
+ set_data_seg_v2(dseg + cnt, wr->sg_list + i);
+ cnt++;
+ }
+
+ /* Fill a reserved sge to make hw stop reading remaining segments */
+ if (rsv) {
+ dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[cnt].addr = 0;
+ dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ } else {
+ /* Clear remaining segments to make ROCEE ignore sges */
+ if (cnt < max_sge)
+ memset(dseg + cnt, 0,
+ (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
+ }
+}
+
+static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
+ u32 wqe_idx, u32 max_sge)
+{
+ void *wqe = NULL;
+
+ wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
+ fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
+ wr->wr_id, TRACE_RQ);
+}
+
+static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 wqe_idx, nreq, max_sge;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hr_qp->rq.lock, flags);
+
+ ret = check_recv_valid(hr_dev, hr_qp);
+ if (unlikely(ret)) {
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
+ max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq))) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > max_sge)) {
+ ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
+ wr->num_sge, max_sge);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+ fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
+ hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
+ }
+
+out:
+ if (likely(nreq)) {
+ hr_qp->rq.head += nreq;
+
+ update_rq_db(hr_dev, hr_qp);
+ }
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+
+ return ret;
+}
+
+static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
+{
+ return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
+}
+
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
+{
+ return hns_roce_buf_offset(idx_que->mtr.kmem,
+ n << idx_que->entry_shift);
+}
+
+static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
+{
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+ srq->idx_que.tail++;
+
+ spin_unlock(&srq->lock);
+}
+
+static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+
+ return idx_que->head - idx_que->tail >= srq->wqe_cnt;
+}
+
+static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
+ const struct ib_recv_wr *wr)
+{
+ struct ib_device *ib_dev = srq->ibsrq.device;
+
+ if (unlikely(wr->num_sge > max_sge)) {
+ ibdev_err(ib_dev,
+ "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
+ wr->num_sge, max_sge);
+ return -EINVAL;
+ }
+
+ if (unlikely(hns_roce_srqwq_overflow(srq))) {
+ ibdev_err(ib_dev,
+ "failed to check srqwq status, srqwq is full.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ u32 pos;
+
+ pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
+ if (unlikely(pos == srq->wqe_cnt))
+ return -ENOSPC;
+
+ bitmap_set(idx_que->bitmap, pos, 1);
+ *wqe_idx = pos;
+ return 0;
+}
+
+static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ unsigned int head;
+ __le32 *buf;
+
+ head = idx_que->head & (srq->wqe_cnt - 1);
+
+ buf = get_idx_buf(idx_que, head);
+ *buf = cpu_to_le32(wqe_idx);
+
+ idx_que->head++;
+}
+
+static void update_srq_db(struct hns_roce_srq *srq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct hns_roce_v2_db db = {};
+
+ hr_reg_write(&db, DB_TAG, srq->srqn);
+ hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
+ hr_reg_write(&db, DB_PI, srq->idx_que.head);
+
+ hns_roce_write64(hr_dev, (__le32 *)&db, srq->db_reg);
+}
+
+static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ unsigned long flags;
+ int ret = 0;
+ u32 max_sge;
+ u32 wqe_idx;
+ void *wqe;
+ u32 nreq;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ max_sge = srq->max_gs - srq->rsv_sge;
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ ret = check_post_srq_valid(srq, max_sge, wr);
+ if (ret) {
+ *bad_wr = wr;
+ break;
+ }
+
+ ret = get_srq_wqe_idx(srq, &wqe_idx);
+ if (unlikely(ret)) {
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe = get_srq_wqe_buf(srq, wqe_idx);
+ fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
+ fill_wqe_idx(srq, wqe_idx);
+ srq->wrid[wqe_idx] = wr->wr_id;
+
+ trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift,
+ wr->wr_id, TRACE_SRQ);
+ }
+
+ if (likely(nreq)) {
+ if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)
+ *srq->rdb.db_record = srq->idx_que.head &
+ V2_DB_PRODUCER_IDX_M;
+ else
+ update_srq_db(srq);
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return ret;
+}
+
+static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
+ unsigned long instance_stage,
+ unsigned long reset_stage)
+{
+ /* When hardware reset has been completed once or more, we should stop
+ * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
+ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
+ * stage of soft reset process, we should exit with error, and then
+ * HNAE3_INIT_CLIENT related process can rollback the operation like
+ * notifing hardware to free resources, HNAE3_INIT_CLIENT related
+ * process will exit with error to notify NIC driver to reschedule soft
+ * reset process once again.
+ */
+ hr_dev->is_reset = true;
+ hr_dev->dis_db = true;
+
+ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
+ instance_stage == HNS_ROCE_STATE_INIT)
+ return CMD_RST_PRC_EBUSY;
+
+ return CMD_RST_PRC_SUCCESS;
+}
+
+static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
+ unsigned long instance_stage,
+ unsigned long reset_stage)
+{
+#define HW_RESET_TIMEOUT_US 1000000
+#define HW_RESET_SLEEP_US 1000
+
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long val;
+ int ret;
+
+ /* When hardware reset is detected, we should stop sending mailbox&cmq&
+ * doorbell to hardware. If now in .init_instance() function, we should
+ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
+ * process, we should exit with error, and then HNAE3_INIT_CLIENT
+ * related process can rollback the operation like notifing hardware to
+ * free resources, HNAE3_INIT_CLIENT related process will exit with
+ * error to notify NIC driver to reschedule soft reset process once
+ * again.
+ */
+ hr_dev->dis_db = true;
+
+ ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
+ val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
+ HW_RESET_TIMEOUT_US, false, handle);
+ if (!ret)
+ hr_dev->is_reset = true;
+
+ if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
+ instance_stage == HNS_ROCE_STATE_INIT)
+ return CMD_RST_PRC_EBUSY;
+
+ return CMD_RST_PRC_SUCCESS;
+}
+
+static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ /* When software reset is detected at .init_instance() function, we
+ * should stop sending mailbox&cmq&doorbell to hardware, and exit
+ * with error.
+ */
+ hr_dev->dis_db = true;
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
+ hr_dev->is_reset = true;
+
+ return CMD_RST_PRC_EBUSY;
+}
+
+static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long instance_stage; /* the current instance stage */
+ unsigned long reset_stage; /* the current reset stage */
+ unsigned long reset_cnt;
+ bool sw_resetting;
+ bool hw_resetting;
+
+ /* Get information about reset from NIC driver or RoCE driver itself,
+ * the meaning of the following variables from NIC driver are described
+ * as below:
+ * reset_cnt -- The count value of completed hardware reset.
+ * hw_resetting -- Whether hardware device is resetting now.
+ * sw_resetting -- Whether NIC's software reset process is running now.
+ */
+ instance_stage = handle->rinfo.instance_state;
+ reset_stage = handle->rinfo.reset_state;
+ reset_cnt = ops->ae_dev_reset_cnt(handle);
+ if (reset_cnt != hr_dev->reset_cnt)
+ return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
+ reset_stage);
+
+ hw_resetting = ops->get_cmdq_stat(handle);
+ if (hw_resetting)
+ return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
+ reset_stage);
+
+ sw_resetting = ops->ae_dev_resetting(handle);
+ if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
+ return hns_roce_v2_cmd_sw_resetting(hr_dev);
+
+ return CMD_RST_PRC_OTHERS;
+}
+
+static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
+ return true;
+
+ if (ops->get_hw_reset_stat(handle))
+ return true;
+
+ if (ops->ae_dev_resetting(handle))
+ return true;
+
+ return false;
+}
+
+static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ u32 status;
+
+ if (hr_dev->is_reset)
+ status = CMD_RST_PRC_SUCCESS;
+ else
+ status = check_aedev_reset_status(hr_dev, priv->handle);
+
+ *busy = (status == CMD_RST_PRC_EBUSY);
+
+ return status == CMD_RST_PRC_OTHERS;
+}
+
+static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
+
+ ring->desc = dma_alloc_coherent(hr_dev->dev, size,
+ &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *ring)
+{
+ dma_free_coherent(hr_dev->dev,
+ ring->desc_num * sizeof(struct hns_roce_cmq_desc),
+ ring->desc, ring->desc_dma_addr);
+
+ ring->desc_dma_addr = 0;
+}
+
+static int init_csq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *csq)
+{
+ dma_addr_t dma;
+ int ret;
+
+ csq->desc_num = CMD_CSQ_DESC_NUM;
+ spin_lock_init(&csq->lock);
+ csq->flag = TYPE_CSQ;
+ csq->head = 0;
+
+ ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
+ if (ret)
+ return ret;
+
+ dma = csq->desc_dma_addr;
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
+ roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
+ (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
+
+ /* Make sure to write CI first and then PI */
+ roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
+ roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
+
+ return 0;
+}
+
+static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ int ret;
+
+ priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
+
+ ret = init_csq(hr_dev, &priv->cmq.csq);
+ if (ret)
+ dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+}
+
+static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
+ enum hns_roce_opcode_type opcode,
+ bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
+}
+
+static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
+{
+ u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+
+ return tail == priv->cmq.csq.head;
+}
+
+static void update_cmdq_status(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+
+ if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
+ handle->rinfo.instance_state == HNS_ROCE_STATE_INIT)
+ hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
+}
+
+static int hns_roce_cmd_err_convert_errno(u16 desc_ret)
+{
+ struct hns_roce_cmd_errcode errcode_table[] = {
+ {CMD_EXEC_SUCCESS, 0},
+ {CMD_NO_AUTH, -EPERM},
+ {CMD_NOT_EXIST, -EOPNOTSUPP},
+ {CMD_CRQ_FULL, -EXFULL},
+ {CMD_NEXT_ERR, -ENOSR},
+ {CMD_NOT_EXEC, -ENOTBLK},
+ {CMD_PARA_ERR, -EINVAL},
+ {CMD_RESULT_ERR, -ERANGE},
+ {CMD_TIMEOUT, -ETIME},
+ {CMD_HILINK_ERR, -ENOLINK},
+ {CMD_INFO_ILLEGAL, -ENXIO},
+ {CMD_INVALID, -EBADR},
+ };
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(errcode_table); i++)
+ if (desc_ret == errcode_table[i].return_status)
+ return errcode_table[i].errno;
+ return -EIO;
+}
+
+static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+{
+ static const struct hns_roce_cmdq_tx_timeout_map cmdq_tx_timeout[] = {
+ {HNS_ROCE_OPC_POST_MB, HNS_ROCE_OPC_POST_MB_TIMEOUT},
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout); i++)
+ if (cmdq_tx_timeout[i].opcode == opcode)
+ return cmdq_tx_timeout[i].tx_timeout;
+
+ return tx_timeout;
+}
+
+static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
+{
+ u32 timeout = 0;
+
+ do {
+ if (hns_roce_cmq_csq_done(hr_dev))
+ break;
+ udelay(1);
+ } while (++timeout < tx_timeout);
+}
+
+static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc,
+ int num, u32 tx_timeout)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ u16 desc_ret;
+ u32 tail;
+ int ret;
+ int i;
+
+ tail = csq->head;
+
+ for (i = 0; i < num; i++) {
+ trace_hns_cmdq_req(hr_dev, &desc[i]);
+
+ csq->desc[csq->head++] = desc[i];
+ if (csq->head == csq->desc_num)
+ csq->head = 0;
+ }
+
+ /* Write to hardware */
+ roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
+
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
+
+ hns_roce_wait_csq_done(hr_dev, tx_timeout);
+ if (hns_roce_cmq_csq_done(hr_dev)) {
+ ret = 0;
+ for (i = 0; i < num; i++) {
+ trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]);
+
+ /* check the result of hardware write back */
+ desc_ret = le16_to_cpu(csq->desc[tail++].retval);
+ if (tail == csq->desc_num)
+ tail = 0;
+ if (likely(desc_ret == CMD_EXEC_SUCCESS))
+ continue;
+
+ ret = hns_roce_cmd_err_convert_errno(desc_ret);
+ }
+ } else {
+ /* FW/HW reset or incorrect number of desc */
+ tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
+ dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
+ csq->head, tail);
+ csq->head = tail;
+
+ update_cmdq_status(hr_dev);
+
+ ret = -EAGAIN;
+ }
+
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
+
+ return ret;
+}
+
+static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ u16 opcode = le16_to_cpu(desc->opcode);
+ u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
+ u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
+ u32 rsv_tail;
+ int ret;
+ int i;
+
+ while (try_cnt) {
+ try_cnt--;
+
+ spin_lock_bh(&csq->lock);
+ rsv_tail = csq->head;
+ ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
+ if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
+ try_cnt) {
+ spin_unlock_bh(&csq->lock);
+ mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
+ continue;
+ }
+
+ for (i = 0; i < num; i++) {
+ desc[i] = csq->desc[rsv_tail++];
+ if (rsv_tail == csq->desc_num)
+ rsv_tail = 0;
+ }
+ spin_unlock_bh(&csq->lock);
+ break;
+ }
+
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = 0x%x, return = %d.\n",
+ opcode, ret);
+
+ return ret;
+}
+
+static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ bool busy;
+ int ret;
+
+ if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
+ return -EIO;
+
+ if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+ return busy ? -EBUSY : 0;
+
+ ret = __hns_roce_cmq_send(hr_dev, desc, num);
+ if (ret) {
+ if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+ return busy ? -EBUSY : 0;
+ }
+
+ return ret;
+}
+
+static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
+ dma_addr_t base_addr, u8 cmd, unsigned long tag)
+{
+ struct hns_roce_cmd_mailbox *mbox;
+ int ret;
+
+ mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mbox))
+ return PTR_ERR(mbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag);
+ hns_roce_free_cmd_mailbox(hr_dev, mbox);
+ return ret;
+}
+
+static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_query_version *resp;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ resp = (struct hns_roce_query_version *)desc.data;
+ hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
+ hr_dev->vendor_id = hr_dev->pci_dev->vendor;
+
+ return 0;
+}
+
+static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long end;
+
+ hr_dev->dis_db = true;
+
+ dev_warn(hr_dev->dev,
+ "func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (!ops->get_hw_reset_stat(handle)) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "func clear success after reset.\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
+
+ dev_warn(hr_dev->dev, "func clear failed.\n");
+}
+
+static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long end;
+
+ hr_dev->dis_db = true;
+
+ dev_warn(hr_dev->dev,
+ "func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (ops->ae_dev_reset_cnt(handle) !=
+ hr_dev->reset_cnt) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "func clear success after sw reset\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
+
+ dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
+}
+
+static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
+ int flag)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
+ hr_dev->dis_db = true;
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev, "func clear success after reset.\n");
+ return;
+ }
+
+ if (ops->get_hw_reset_stat(handle)) {
+ func_clr_hw_resetting_state(hr_dev, handle);
+ return;
+ }
+
+ if (ops->ae_dev_resetting(handle) &&
+ handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
+ func_clr_sw_resetting_state(hr_dev, handle);
+ return;
+ }
+
+ if (retval && !flag)
+ dev_warn(hr_dev->dev,
+ "func clear read failed, ret = %d.\n", retval);
+
+ dev_warn(hr_dev->dev, "func clear failed.\n");
+}
+
+static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
+{
+ bool fclr_write_fail_flag = false;
+ struct hns_roce_func_clear *resp;
+ struct hns_roce_cmq_desc desc;
+ unsigned long end;
+ int ret = 0;
+
+ if (check_device_is_in_reset(hr_dev))
+ goto out;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
+ resp = (struct hns_roce_func_clear *)desc.data;
+ resp->rst_funcid_en = cpu_to_le32(vf_id);
+
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ fclr_write_fail_flag = true;
+ dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
+ end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
+ while (end) {
+ if (check_device_is_in_reset(hr_dev))
+ goto out;
+ msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
+ end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
+ true);
+
+ resp->rst_funcid_en = cpu_to_le32(vf_id);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ continue;
+
+ if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) {
+ if (vf_id == 0)
+ hr_dev->is_reset = true;
+ return;
+ }
+ }
+
+out:
+ hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
+}
+
+static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+{
+ enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *req_a;
+
+ req_a = (struct hns_roce_cmq_req *)desc[0].data;
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
+ hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ int i;
+
+ if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
+ return;
+
+ for (i = hr_dev->func_num - 1; i >= 0; i--) {
+ __hns_roce_function_clear(hr_dev, i);
+
+ if (i == 0)
+ continue;
+
+ ret = hns_roce_free_vf_resource(hr_dev, i);
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to free vf resource, vf_id = %d, ret = %d.\n",
+ i, ret);
+ }
+}
+
+static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
+ false);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to clear extended doorbell info, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_query_fw_info *resp;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ resp = (struct hns_roce_query_fw_info *)desc.data;
+ hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
+
+ return 0;
+}
+
+static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ hr_dev->func_num = 1;
+ return 0;
+ }
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
+ true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ hr_dev->func_num = 1;
+ return ret;
+ }
+
+ hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
+ hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
+ u64 *stats, u32 port, int *num_counters)
+{
+#define CNT_PER_DESC 3
+ struct hns_roce_cmq_desc *desc;
+ int bd_idx, cnt_idx;
+ __le64 *cnt_data;
+ int desc_num;
+ int ret;
+ int i;
+
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ desc_num = DIV_ROUND_UP(HNS_ROCE_HW_CNT_TOTAL, CNT_PER_DESC);
+ desc = kcalloc(desc_num, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (i = 0; i < desc_num; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_QUERY_COUNTER, true);
+ if (i != desc_num - 1)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ ret = hns_roce_cmq_send(hr_dev, desc, desc_num);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to get counter, ret = %d.\n", ret);
+ goto err_out;
+ }
+
+ for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
+ bd_idx = i / CNT_PER_DESC;
+ if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC &&
+ !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT)))
+ break;
+
+ cnt_data = (__le64 *)&desc[bd_idx].data[0];
+ cnt_idx = i % CNT_PER_DESC;
+ stats[i] = le64_to_cpu(cnt_data[cnt_idx]);
+ }
+ *num_counters = i;
+
+err_out:
+ kfree(desc);
+ return ret;
+}
+
+static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 clock_cycles_of_1us;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
+ false);
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
+ else
+ clock_cycles_of_1us = HNS_ROCE_1US_CFG;
+
+ hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
+ hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ enum hns_roce_opcode_type opcode;
+ u32 func_num;
+ int ret;
+
+ if (is_vf) {
+ opcode = HNS_ROCE_OPC_QUERY_VF_RES;
+ func_num = 1;
+ } else {
+ opcode = HNS_ROCE_OPC_QUERY_PF_RES;
+ func_num = hr_dev->func_num;
+ }
+
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
+
+ ret = hns_roce_cmq_send(hr_dev, desc, 2);
+ if (ret)
+ return ret;
+
+ caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
+ caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
+ caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
+ caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
+ caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
+ caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
+ caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
+ caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
+
+ if (is_vf) {
+ caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
+ caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
+ func_num;
+ } else {
+ caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
+ caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
+ func_num;
+ }
+
+ return 0;
+}
+
+static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
+ true);
+
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
+ caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
+
+ return 0;
+}
+
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = load_func_res_caps(hr_dev, false);
+ if (ret) {
+ dev_err(dev, "failed to load pf res caps, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = load_pf_timer_res_caps(hr_dev);
+ if (ret)
+ dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = load_func_res_caps(hr_dev, true);
+ if (ret)
+ dev_err(dev, "failed to load vf res caps, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+ u32 vf_id)
+{
+ struct hns_roce_vf_switch *swt;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ swt = (struct hns_roce_vf_switch *)desc.data;
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
+ swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
+ hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
+ desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
+ hr_reg_enable(swt, VF_SWITCH_ALW_LPBK);
+ hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK);
+ hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
+{
+ u32 vf_id;
+ int ret;
+
+ for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
+ ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
+
+ hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
+
+ hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
+ hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
+ hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
+ hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
+ vf_id * caps->gmv_bt_num);
+ } else {
+ hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
+ vf_id * caps->sgid_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
+ vf_id * caps->smac_bt_num);
+ }
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ u32 func_num = max_t(u32, 1, hr_dev->func_num);
+ u32 vf_id;
+ int ret;
+
+ for (vf_id = 0; vf_id < func_num; vf_id++) {
+ ret = config_vf_hem_resource(hr_dev, vf_id);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to config vf-%u hem res, ret = %d.\n",
+ vf_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
+
+ hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
+ caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
+ caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
+ to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
+
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
+ caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
+ caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
+ to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
+
+ hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
+ caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
+ caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
+ to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
+
+ hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
+ caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
+ caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
+ to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
+
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
+ caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
+ caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
+ to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
+ u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
+{
+ u64 obj_per_chunk;
+ u64 bt_chunk_size = PAGE_SIZE;
+ u64 buf_chunk_size = PAGE_SIZE;
+ u64 obj_per_chunk_default = buf_chunk_size / obj_size;
+
+ *buf_page_size = 0;
+ *bt_page_size = 0;
+
+ switch (hop_num) {
+ case 3:
+ obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+ (bt_chunk_size / BA_BYTE_LEN) *
+ (bt_chunk_size / BA_BYTE_LEN) *
+ obj_per_chunk_default;
+ break;
+ case 2:
+ obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+ (bt_chunk_size / BA_BYTE_LEN) *
+ obj_per_chunk_default;
+ break;
+ case 1:
+ obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+ obj_per_chunk_default;
+ break;
+ case HNS_ROCE_HOP_NUM_0:
+ obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
+ break;
+ default:
+ pr_err("table %u not support hop_num = %u!\n", hem_type,
+ hop_num);
+ return;
+ }
+
+ if (hem_type >= HEM_TYPE_MTT)
+ *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
+ else
+ *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
+}
+
+static void set_hem_page_size(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ /* EQ */
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+
+ /* Link Table */
+ caps->llm_buf_pg_sz = 0;
+
+ /* MR */
+ caps->mpt_ba_pg_sz = 0;
+ caps->mpt_buf_pg_sz = 0;
+ caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
+ caps->pbl_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
+ caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
+ HEM_TYPE_MTPT);
+
+ /* QP */
+ caps->qpc_ba_pg_sz = 0;
+ caps->qpc_buf_pg_sz = 0;
+ caps->qpc_timer_ba_pg_sz = 0;
+ caps->qpc_timer_buf_pg_sz = 0;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+ caps->mtt_ba_pg_sz = 0;
+ caps->mtt_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
+ caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
+ HEM_TYPE_QPC);
+
+ if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
+ calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
+ caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
+ &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
+
+ /* CQ */
+ caps->cqc_ba_pg_sz = 0;
+ caps->cqc_buf_pg_sz = 0;
+ caps->cqc_timer_ba_pg_sz = 0;
+ caps->cqc_timer_buf_pg_sz = 0;
+ caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
+ caps->cqe_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
+ caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
+ HEM_TYPE_CQC);
+ calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
+ 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
+
+ /* SRQ */
+ if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ caps->srqc_ba_pg_sz = 0;
+ caps->srqc_buf_pg_sz = 0;
+ caps->srqwqe_ba_pg_sz = 0;
+ caps->srqwqe_buf_pg_sz = 0;
+ caps->idx_ba_pg_sz = 0;
+ caps->idx_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
+ caps->srqc_hop_num, caps->srqc_bt_num,
+ &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
+ HEM_TYPE_SRQC);
+ calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
+ caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
+ &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
+ calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
+ caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
+ &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
+ }
+
+ /* GMV */
+ caps->gmv_ba_pg_sz = 0;
+ caps->gmv_buf_pg_sz = 0;
+}
+
+/* Apply all loaded caps before setting to hardware */
+static void apply_func_caps(struct hns_roce_dev *hr_dev)
+{
+#define MAX_GID_TBL_LEN 256
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+
+ /* The following configurations don't need to be got from firmware. */
+ caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
+ caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
+ caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
+
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+
+ caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
+ caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
+
+ if (!caps->num_comp_vectors)
+ caps->num_comp_vectors =
+ min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
+ (u32)priv->handle->rinfo.num_vectors -
+ (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
+ caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
+ caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
+
+ /* The following configurations will be overwritten */
+ caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
+ caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
+ caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
+
+ /* The following configurations are not got from firmware */
+ caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
+
+ caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
+
+ /* It's meaningless to support excessively large gid_table_len,
+ * as the type of sgid_index in kernel struct ib_global_route
+ * and userspace struct ibv_global_route are u8/uint8_t (0-255).
+ */
+ caps->gid_table_len[0] = min_t(u32, MAX_GID_TBL_LEN,
+ caps->gmv_bt_num *
+ (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
+
+ caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
+ caps->gmv_entry_sz);
+ } else {
+ u32 func_num = max_t(u32, 1, hr_dev->func_num);
+
+ caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
+ caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
+ caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
+ caps->gid_table_len[0] /= func_num;
+ }
+
+ if (hr_dev->is_vf) {
+ caps->default_aeq_arm_st = 0x3;
+ caps->default_ceq_arm_st = 0x3;
+ caps->default_ceq_max_cnt = 0x1;
+ caps->default_ceq_period = 0x10;
+ caps->default_aeq_max_cnt = 0x1;
+ caps->default_aeq_period = 0x10;
+ }
+
+ set_hem_page_size(hr_dev);
+}
+
+static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {};
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ struct hns_roce_query_pf_caps_a *resp_a;
+ struct hns_roce_query_pf_caps_b *resp_b;
+ struct hns_roce_query_pf_caps_c *resp_c;
+ struct hns_roce_query_pf_caps_d *resp_d;
+ struct hns_roce_query_pf_caps_e *resp_e;
+ struct hns_roce_query_pf_caps_f *resp_f;
+ enum hns_roce_opcode_type cmd;
+ int ctx_hop_num;
+ int pbl_hop_num;
+ int cmd_num;
+ int ret;
+ int i;
+
+ cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
+ HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
+ cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+ HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 :
+ HNS_ROCE_QUERY_PF_CAPS_CMD_NUM;
+
+ for (i = 0; i < cmd_num - 1; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true);
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ hns_roce_cmq_setup_basic_desc(&desc[cmd_num - 1], cmd, true);
+ desc[cmd_num - 1].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ ret = hns_roce_cmq_send(hr_dev, desc, cmd_num);
+ if (ret)
+ return ret;
+
+ resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
+ resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
+ resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
+ resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
+ resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
+ resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data;
+
+ caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
+ caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
+ caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
+ caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
+ caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
+ caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
+ caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
+ caps->num_aeq_vectors = resp_a->num_aeq_vectors;
+ caps->num_other_vectors = resp_a->num_other_vectors;
+ caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
+ caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
+
+ caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
+ caps->irrl_entry_sz = resp_b->irrl_entry_sz;
+ caps->trrl_entry_sz = resp_b->trrl_entry_sz;
+ caps->cqc_entry_sz = resp_b->cqc_entry_sz;
+ caps->srqc_entry_sz = resp_b->srqc_entry_sz;
+ caps->idx_entry_sz = resp_b->idx_entry_sz;
+ caps->sccc_sz = resp_b->sccc_sz;
+ caps->max_mtu = resp_b->max_mtu;
+ caps->min_cqes = resp_b->min_cqes;
+ caps->min_wqes = resp_b->min_wqes;
+ caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
+ caps->pkey_table_len[0] = resp_b->pkey_table_len;
+ caps->phy_num_uars = resp_b->phy_num_uars;
+ ctx_hop_num = resp_b->ctx_hop_num;
+ pbl_hop_num = resp_b->pbl_hop_num;
+
+ caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS);
+
+ caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS);
+ caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
+ HNS_ROCE_CAP_FLAGS_EX_SHIFT;
+
+ caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
+ caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
+ caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
+ caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS);
+ caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
+ caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
+ caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
+ caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
+ caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
+
+ caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
+ caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP);
+ caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
+ caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
+ caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
+ caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
+ caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG);
+ caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
+ caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
+ caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
+ caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS);
+
+ caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
+ caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
+ caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
+ caps->reserved_xrcds = hr_reg_read(resp_e, PF_CAPS_E_RSV_XRCDS);
+ caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
+ caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
+
+ caps->max_ack_req_msg_len = le32_to_cpu(resp_f->max_ack_req_msg_len);
+
+ caps->qpc_hop_num = ctx_hop_num;
+ caps->sccc_hop_num = ctx_hop_num;
+ caps->srqc_hop_num = ctx_hop_num;
+ caps->cqc_hop_num = ctx_hop_num;
+ caps->mpt_hop_num = ctx_hop_num;
+ caps->mtt_hop_num = pbl_hop_num;
+ caps->cqe_hop_num = pbl_hop_num;
+ caps->srqwqe_hop_num = pbl_hop_num;
+ caps->idx_hop_num = pbl_hop_num;
+ caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM);
+ caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM);
+ caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM);
+
+ if (!(caps->page_size_cap & PAGE_SIZE))
+ caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
+
+ if (!hr_dev->is_vf) {
+ caps->cqe_sz = resp_a->cqe_sz;
+ caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
+ caps->default_aeq_arm_st =
+ hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST);
+ caps->default_ceq_arm_st =
+ hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST);
+ caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
+ caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
+ caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
+ caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
+ }
+
+ return 0;
+}
+
+static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
+ false);
+
+ hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
+ hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ int ret;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ return 0;
+
+ ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
+ caps->qpc_sz);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
+ caps->sccc_sz);
+ if (ret)
+ dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ hr_dev->func_num = 1;
+
+ ret = hns_roce_query_caps(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to query VF caps, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
+ return ret;
+ }
+
+ apply_func_caps(hr_dev);
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret)
+ dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = hns_roce_query_func_info(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to query func info, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_config_global_param(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to config global param, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_set_vf_switch_param(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_caps(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to query PF caps, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_pf_resource(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
+ return ret;
+ }
+
+ apply_func_caps(hr_dev);
+
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Configure the size of QPC, SCCC, etc. */
+ return hns_roce_config_entry_size(hr_dev);
+}
+
+static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = hns_roce_cmq_query_hw_info(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_fw_ver(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
+
+ if (hr_dev->is_vf)
+ return hns_roce_v2_vf_profile(hr_dev);
+ else
+ return hns_roce_v2_pf_profile(hr_dev);
+}
+
+static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
+{
+ u32 i, next_ptr, page_num;
+ __le64 *entry = cfg_buf;
+ dma_addr_t addr;
+ u64 val;
+
+ page_num = data_buf->npages;
+ for (i = 0; i < page_num; i++) {
+ addr = hns_roce_buf_page(data_buf, i);
+ if (i == (page_num - 1))
+ next_ptr = 0;
+ else
+ next_ptr = i + 1;
+
+ val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
+ entry[i] = cpu_to_le64(val);
+ }
+}
+
+static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
+ struct hns_roce_link_table *table)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ struct hns_roce_buf *buf = table->buf;
+ enum hns_roce_opcode_type opcode;
+ dma_addr_t addr;
+
+ opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
+
+ hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
+ hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
+ hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
+ hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
+ hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
+
+ addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
+
+ addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
+ hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
+ hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
+ hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static struct hns_roce_link_table *
+alloc_link_table_buf(struct hns_roce_dev *hr_dev)
+{
+ u16 total_sl = hr_dev->caps.sl_num * hr_dev->func_num;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_link_table *link_tbl;
+ u32 pg_shift, size, min_size;
+
+ link_tbl = &priv->ext_llm;
+ pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
+ size = hr_dev->caps.num_qps * hr_dev->func_num *
+ HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
+ min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(total_sl) << pg_shift;
+
+ /* Alloc data table */
+ size = max(size, min_size);
+ link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
+ if (IS_ERR(link_tbl->buf))
+ return ERR_PTR(-ENOMEM);
+
+ /* Alloc config table */
+ size = link_tbl->buf->npages * sizeof(u64);
+ link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
+ &link_tbl->table.map,
+ GFP_KERNEL);
+ if (!link_tbl->table.buf) {
+ hns_roce_buf_free(hr_dev, link_tbl->buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return link_tbl;
+}
+
+static void free_link_table_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_link_table *tbl)
+{
+ if (tbl->buf) {
+ u32 size = tbl->buf->npages * sizeof(u64);
+
+ dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
+ tbl->table.map);
+ }
+
+ hns_roce_buf_free(hr_dev, tbl->buf);
+}
+
+static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_link_table *link_tbl;
+ int ret;
+
+ link_tbl = alloc_link_table_buf(hr_dev);
+ if (IS_ERR(link_tbl))
+ return -ENOMEM;
+
+ if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
+ ret = -EINVAL;
+ goto err_alloc;
+ }
+
+ config_llm_table(link_tbl->buf, link_tbl->table.buf);
+ ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
+ if (ret)
+ goto err_alloc;
+
+ return 0;
+
+err_alloc:
+ free_link_table_buf(hr_dev, link_tbl);
+ return ret;
+}
+
+static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+
+ free_link_table_buf(hr_dev, &priv->ext_llm);
+}
+
+static void free_dip_entry(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_dip *hr_dip;
+ unsigned long idx;
+
+ xa_lock(&hr_dev->qp_table.dip_xa);
+
+ xa_for_each(&hr_dev->qp_table.dip_xa, idx, hr_dip) {
+ __xa_erase(&hr_dev->qp_table.dip_xa, hr_dip->dip_idx);
+ kfree(hr_dip);
+ }
+
+ xa_unlock(&hr_dev->qp_table.dip_xa);
+}
+
+static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_pd *hr_pd;
+ struct ib_pd *pd;
+
+ hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
+ if (!hr_pd)
+ return NULL;
+ pd = &hr_pd->ibpd;
+ pd->device = ibdev;
+
+ if (hns_roce_alloc_pd(pd, NULL)) {
+ ibdev_err(ibdev, "failed to create pd for free mr.\n");
+ kfree(hr_pd);
+ return NULL;
+ }
+ free_mr->rsv_pd = to_hr_pd(pd);
+ free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev;
+ free_mr->rsv_pd->ibpd.uobject = NULL;
+ free_mr->rsv_pd->ibpd.__internal_mr = NULL;
+ atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0);
+
+ return pd;
+}
+
+static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_cq_init_attr cq_init_attr = {};
+ struct hns_roce_cq *hr_cq;
+ struct ib_cq *cq;
+
+ cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
+
+ hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
+ if (!hr_cq)
+ return NULL;
+
+ cq = &hr_cq->ib_cq;
+ cq->device = ibdev;
+
+ if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) {
+ ibdev_err(ibdev, "failed to create cq for free mr.\n");
+ kfree(hr_cq);
+ return NULL;
+ }
+ free_mr->rsv_cq = to_hr_cq(cq);
+ free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev;
+ free_mr->rsv_cq->ib_cq.uobject = NULL;
+ free_mr->rsv_cq->ib_cq.comp_handler = NULL;
+ free_mr->rsv_cq->ib_cq.event_handler = NULL;
+ free_mr->rsv_cq->ib_cq.cq_context = NULL;
+ atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0);
+
+ return cq;
+}
+
+static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
+ struct ib_qp_init_attr *init_attr, int i)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_qp *hr_qp;
+ struct ib_qp *qp;
+ int ret;
+
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
+ return -ENOMEM;
+
+ qp = &hr_qp->ibqp;
+ qp->device = ibdev;
+
+ ret = hns_roce_create_qp(qp, init_attr, NULL);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create qp for free mr.\n");
+ kfree(hr_qp);
+ return ret;
+ }
+
+ free_mr->rsv_qp[i] = hr_qp;
+ free_mr->rsv_qp[i]->ibqp.recv_cq = cq;
+ free_mr->rsv_qp[i]->ibqp.send_cq = cq;
+
+ return 0;
+}
+
+static void free_mr_exit(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_qp *qp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ if (free_mr->rsv_qp[i]) {
+ qp = &free_mr->rsv_qp[i]->ibqp;
+ hns_roce_v2_destroy_qp(qp, NULL);
+ kfree(free_mr->rsv_qp[i]);
+ free_mr->rsv_qp[i] = NULL;
+ }
+ }
+
+ if (free_mr->rsv_cq) {
+ hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL);
+ kfree(free_mr->rsv_cq);
+ free_mr->rsv_cq = NULL;
+ }
+
+ if (free_mr->rsv_pd) {
+ hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL);
+ kfree(free_mr->rsv_pd);
+ free_mr->rsv_pd = NULL;
+ }
+
+ mutex_destroy(&free_mr->mutex);
+}
+
+static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_qp_init_attr qp_init_attr = {};
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ int ret;
+ int i;
+
+ pd = free_mr_init_pd(hr_dev);
+ if (!pd)
+ return -ENOMEM;
+
+ cq = free_mr_init_cq(hr_dev);
+ if (!cq) {
+ ret = -ENOMEM;
+ goto create_failed_cq;
+ }
+
+ qp_init_attr.qp_type = IB_QPT_RC;
+ qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ qp_init_attr.send_cq = cq;
+ qp_init_attr.recv_cq = cq;
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
+ qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
+ qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
+ qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
+
+ ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i);
+ if (ret)
+ goto create_failed_qp;
+ }
+
+ return 0;
+
+create_failed_qp:
+ for (i--; i >= 0; i--) {
+ hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL);
+ kfree(free_mr->rsv_qp[i]);
+ }
+ hns_roce_destroy_cq(cq, NULL);
+ kfree(cq);
+
+create_failed_cq:
+ hns_roce_dealloc_pd(pd, NULL);
+ kfree(pd);
+
+ return ret;
+}
+
+static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ struct ib_qp_attr *attr, int sl_num)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_qp *hr_qp;
+ int loopback;
+ int mask;
+ int ret;
+
+ hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp);
+ hr_qp->free_mr_en = 1;
+ hr_qp->ibqp.device = ibdev;
+ hr_qp->ibqp.qp_type = IB_QPT_RC;
+
+ mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
+ attr->qp_state = IB_QPS_INIT;
+ attr->port_num = 1;
+ attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
+ IB_QPS_INIT, NULL);
+ if (ret) {
+ ibdev_err_ratelimited(ibdev, "failed to modify qp to init, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ loopback = hr_dev->loop_idc;
+ /* Set qpc lbi = 1 incidate loopback IO */
+ hr_dev->loop_idc = 1;
+
+ mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
+ IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
+ attr->qp_state = IB_QPS_RTR;
+ attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ attr->path_mtu = IB_MTU_256;
+ attr->dest_qp_num = hr_qp->qpn;
+ attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN;
+
+ rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
+
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
+ IB_QPS_RTR, NULL);
+ hr_dev->loop_idc = loopback;
+ if (ret) {
+ ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT |
+ IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC;
+ attr->qp_state = IB_QPS_RTS;
+ attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
+ attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
+ attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
+ IB_QPS_RTS, NULL);
+ if (ret)
+ ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_qp_attr attr = {};
+ int ret;
+ int i;
+
+ rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
+ rdma_ah_set_static_rate(&attr.ah_attr, 3);
+ rdma_ah_set_port_num(&attr.ah_attr, 1);
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ ret = free_mr_modify_rsv_qp(hr_dev, &attr, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int free_mr_init(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ int ret;
+
+ mutex_init(&free_mr->mutex);
+
+ ret = free_mr_alloc_res(hr_dev);
+ if (ret) {
+ mutex_destroy(&free_mr->mutex);
+ return ret;
+ }
+
+ ret = free_mr_modify_qp(hr_dev);
+ if (ret)
+ goto err_modify_qp;
+
+ return 0;
+
+err_modify_qp:
+ free_mr_exit(hr_dev);
+
+ return ret;
+}
+
+static int get_hem_table(struct hns_roce_dev *hr_dev)
+{
+ unsigned int qpc_count;
+ unsigned int cqc_count;
+ unsigned int gmv_count;
+ int ret;
+ int i;
+
+ /* Alloc memory for source address table buffer space chunk */
+ for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
+ gmv_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
+ if (ret)
+ goto err_gmv_failed;
+ }
+
+ if (hr_dev->is_vf)
+ return 0;
+
+ /* Alloc memory for QPC Timer buffer space chunk */
+ for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
+ qpc_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
+ qpc_count);
+ if (ret) {
+ dev_err(hr_dev->dev, "QPC Timer get failed\n");
+ goto err_qpc_timer_failed;
+ }
+ }
+
+ /* Alloc memory for CQC Timer buffer space chunk */
+ for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
+ cqc_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
+ cqc_count);
+ if (ret) {
+ dev_err(hr_dev->dev, "CQC Timer get failed\n");
+ goto err_cqc_timer_failed;
+ }
+ }
+
+ return 0;
+
+err_cqc_timer_failed:
+ for (i = 0; i < cqc_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
+
+err_qpc_timer_failed:
+ for (i = 0; i < qpc_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+
+err_gmv_failed:
+ for (i = 0; i < gmv_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+
+ return ret;
+}
+
+static void put_hem_table(struct hns_roce_dev *hr_dev)
+{
+ int i;
+
+ for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+
+ if (hr_dev->is_vf)
+ return;
+
+ for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+
+ for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
+}
+
+static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ ret = free_mr_init(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to init free mr!\n");
+ return ret;
+ }
+ }
+
+ /* The hns ROCEE requires the extdb info to be cleared before using */
+ ret = hns_roce_clear_extdb_list_info(hr_dev);
+ if (ret)
+ goto err_clear_extdb_failed;
+
+ ret = get_hem_table(hr_dev);
+ if (ret)
+ goto err_get_hem_table_failed;
+
+ if (hr_dev->is_vf)
+ return 0;
+
+ ret = hns_roce_init_link_table(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
+ goto err_llm_init_failed;
+ }
+
+ return 0;
+
+err_llm_init_failed:
+ put_hem_table(hr_dev);
+err_get_hem_table_failed:
+ hns_roce_function_clear(hr_dev);
+err_clear_extdb_failed:
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_exit(hr_dev);
+
+ return ret;
+}
+
+static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+{
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_exit(hr_dev);
+
+ hns_roce_function_clear(hr_dev);
+
+ if (!hr_dev->is_vf)
+ hns_roce_free_link_table(hr_dev);
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ free_dip_entry(hr_dev);
+}
+
+static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
+
+ mb->in_param_l = cpu_to_le32(mbox_msg->in_param);
+ mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32);
+ mb->out_param_l = cpu_to_le32(mbox_msg->out_param);
+ mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32);
+ mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd);
+ mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 |
+ mbox_msg->token);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
+ u8 *complete_status)
+{
+ struct hns_roce_mbox_status *mb_st;
+ struct hns_roce_cmq_desc desc;
+ unsigned long end;
+ int ret = -EBUSY;
+ u32 status;
+ bool busy;
+
+ mb_st = (struct hns_roce_mbox_status *)desc.data;
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
+ return -EIO;
+
+ status = 0;
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
+ true);
+ ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (!ret) {
+ status = le32_to_cpu(mb_st->mb_status_hw_run);
+ /* No pending message exists in ROCEE mbox. */
+ if (!(status & MB_ST_HW_RUN_M))
+ break;
+ } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ break;
+ }
+
+ if (time_after(jiffies, end)) {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to wait mbox status 0x%x\n",
+ status);
+ return -ETIMEDOUT;
+ }
+
+ cond_resched();
+ ret = -EBUSY;
+ }
+
+ if (!ret) {
+ *complete_status = (u8)(status & MB_ST_COMPLETE_M);
+ } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ /* Ignore all errors if the mbox is unavailable. */
+ ret = 0;
+ *complete_status = MB_ST_COMPLETE_M;
+ }
+
+ return ret;
+}
+
+static int v2_post_mbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
+{
+ u8 status = 0;
+ int ret;
+
+ /* Waiting for the mbox to be idle */
+ ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
+ &status);
+ if (unlikely(ret)) {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to check post mbox status = 0x%x, ret = %d.\n",
+ status, ret);
+ return ret;
+ }
+
+ /* Post new message to mbox */
+ ret = hns_roce_mbox_post(hr_dev, mbox_msg);
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to post mailbox, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev)
+{
+ u8 status = 0;
+ int ret;
+
+ ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS,
+ &status);
+ if (!ret) {
+ if (status != MB_ST_COMPLETE_SUCC)
+ return -EBUSY;
+ } else {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to check mbox status = 0x%x, ret = %d.\n",
+ status, ret);
+ }
+
+ return ret;
+}
+
+static void copy_gid(void *dest, const union ib_gid *gid)
+{
+#define GID_SIZE 4
+ const union ib_gid *src = gid;
+ __le32 (*p)[GID_SIZE] = dest;
+ int i;
+
+ if (!gid)
+ src = &zgid;
+
+ for (i = 0; i < GID_SIZE; i++)
+ (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
+}
+
+static int config_sgid_table(struct hns_roce_dev *hr_dev,
+ int gid_index, const union ib_gid *gid,
+ enum hns_roce_sgid_type sgid_type)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_sgid_tb *sgid_tb =
+ (struct hns_roce_cfg_sgid_tb *)desc.data;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
+
+ hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index);
+ hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type);
+
+ copy_gid(&sgid_tb->vf_sgid_l, gid);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int config_gmv_table(struct hns_roce_dev *hr_dev,
+ int gid_index, const union ib_gid *gid,
+ enum hns_roce_sgid_type sgid_type,
+ const struct ib_gid_attr *attr)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cfg_gmv_tb_a *tb_a =
+ (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
+ struct hns_roce_cfg_gmv_tb_b *tb_b =
+ (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
+
+ u16 vlan_id = VLAN_CFI_MASK;
+ u8 mac[ETH_ALEN] = {};
+ int ret;
+
+ if (gid) {
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
+ if (ret)
+ return ret;
+ }
+
+ hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
+
+ copy_gid(&tb_a->vf_sgid_l, gid);
+
+ hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type);
+ hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK);
+ hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id);
+
+ tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
+
+ hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]);
+ hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index);
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+{
+ enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
+ int ret;
+
+ if (gid) {
+ if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ if (ipv6_addr_v4mapped((void *)gid))
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
+ else
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
+ } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
+ sgid_type = GID_TYPE_FLAG_ROCE_V1;
+ }
+ }
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
+ else
+ ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
+
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
+ ret);
+
+ return ret;
+}
+
+static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+ const u8 *addr)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_smac_tb *smac_tb =
+ (struct hns_roce_cfg_smac_tb *)desc.data;
+ u16 reg_smac_h;
+ u32 reg_smac_l;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
+
+ reg_smac_l = *(u32 *)(&addr[0]);
+ reg_smac_h = *(u16 *)(&addr[4]);
+
+ hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port);
+ hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h);
+ smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_mpt_entry *mpt_entry,
+ struct hns_roce_mr *mr)
+{
+ u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t pbl_ba;
+ int ret;
+ int i;
+
+ ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ min_t(int, ARRAY_SIZE(pages), mr->npages));
+ if (ret) {
+ ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Aligned to the hardware address access unit */
+ for (i = 0; i < ARRAY_SIZE(pages); i++)
+ pages[i] >>= MPT_PBL_BUF_ADDR_S;
+
+ pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+ mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S);
+ hr_reg_write(mpt_entry, MPT_PBL_BA_H,
+ upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
+
+ mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+ hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
+
+ mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+ hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
+ hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+
+ return 0;
+}
+
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
+ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+
+ hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
+ mr->access & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_write_bool(mpt_entry, MPT_RR_EN,
+ mr->access & IB_ACCESS_REMOTE_READ);
+ hr_reg_write_bool(mpt_entry, MPT_RW_EN,
+ mr->access & IB_ACCESS_REMOTE_WRITE);
+ hr_reg_write_bool(mpt_entry, MPT_LW_EN,
+ mr->access & IB_ACCESS_LOCAL_WRITE);
+
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
+ mpt_entry->lkey = cpu_to_le32(mr->key);
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+
+ if (mr->type != MR_TYPE_MR)
+ hr_reg_enable(mpt_entry, MPT_PA);
+
+ if (mr->type == MR_TYPE_DMA)
+ return 0;
+
+ if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
+ hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
+
+ hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
+ hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
+
+ return set_mtpt_pbl(hr_dev, mpt_entry, mr);
+}
+
+static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int flags,
+ void *mb_buf)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+ u32 mr_access_flags = mr->access;
+ int ret = 0;
+
+ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
+ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
+ mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
+ hr_reg_write(mpt_entry, MPT_RR_EN,
+ mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
+ hr_reg_write(mpt_entry, MPT_RW_EN,
+ mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
+ hr_reg_write(mpt_entry, MPT_LW_EN,
+ mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
+
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
+ }
+
+ return ret;
+}
+
+static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+{
+ dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
+ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+
+ hr_reg_enable(mpt_entry, MPT_RA_EN);
+ hr_reg_enable(mpt_entry, MPT_R_INV_EN);
+
+ hr_reg_enable(mpt_entry, MPT_FRE);
+ hr_reg_enable(mpt_entry, MPT_BPD);
+ hr_reg_clear(mpt_entry, MPT_PA);
+
+ hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1);
+ hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >>
+ MPT_PBL_BA_ADDR_S));
+ hr_reg_write(mpt_entry, MPT_PBL_BA_H,
+ upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
+
+ return 0;
+}
+
+static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ const struct ib_send_wr *bad_wr;
+ struct ib_rdma_wr rdma_wr = {};
+ struct ib_send_wr *send_wr;
+ int ret;
+
+ send_wr = &rdma_wr.wr;
+ send_wr->opcode = IB_WR_RDMA_WRITE;
+
+ ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr);
+ if (ret) {
+ ibdev_err_ratelimited(ibdev, "failed to post wqe for free mr, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc);
+
+static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)];
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_qp *hr_qp;
+ unsigned long end;
+ int cqe_cnt = 0;
+ int npolled;
+ int ret;
+ int i;
+
+ /*
+ * If the device initialization is not complete or in the uninstall
+ * process, then there is no need to execute free mr.
+ */
+ if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
+ priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT ||
+ hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT)
+ return;
+
+ mutex_lock(&free_mr->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ hr_qp = free_mr->rsv_qp[i];
+
+ ret = free_mr_post_send_lp_wqe(hr_qp);
+ if (ret) {
+ ibdev_err_ratelimited(ibdev,
+ "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
+ hr_qp->qpn, ret);
+ break;
+ }
+
+ cqe_cnt++;
+ }
+
+ end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
+ while (cqe_cnt) {
+ npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc);
+ if (npolled < 0) {
+ ibdev_err_ratelimited(ibdev,
+ "failed to poll cqe for free mr, remain %d cqe.\n",
+ cqe_cnt);
+ goto out;
+ }
+
+ if (time_after(jiffies, end)) {
+ ibdev_err_ratelimited(ibdev,
+ "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
+ cqe_cnt);
+ goto out;
+ }
+ cqe_cnt -= npolled;
+ }
+
+out:
+ mutex_unlock(&free_mr->mutex);
+}
+
+static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)
+{
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_send_cmd_to_hw(hr_dev);
+}
+
+static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+{
+ return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
+}
+
+static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
+{
+ struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
+
+ /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
+ return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
+ NULL;
+}
+
+static inline void update_cq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
+{
+ if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
+ *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
+ } else {
+ struct hns_roce_v2_db cq_db = {};
+
+ hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
+ hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
+ hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
+ hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
+
+ hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
+ }
+}
+
+static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct hns_roce_v2_cqe *cqe, *dest;
+ u32 prod_index;
+ int nfreed = 0;
+ int wqe_index;
+ u8 owner_bit;
+
+ for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
+ ++prod_index) {
+ if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
+ break;
+ }
+
+ /*
+ * Now backwards through the CQ, removing CQ entries
+ * that match our QP by overwriting them with next entries.
+ */
+ while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
+ cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
+ if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
+ if (srq && hr_reg_read(cqe, CQE_S_R)) {
+ wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
+ hns_roce_free_srq_wqe(srq, wqe_index);
+ }
+ ++nfreed;
+ } else if (nfreed) {
+ dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
+ hr_cq->ib_cq.cqe);
+ owner_bit = hr_reg_read(dest, CQE_OWNER);
+ memcpy(dest, cqe, hr_cq->cqe_size);
+ hr_reg_write(dest, CQE_OWNER, owner_bit);
+ }
+ }
+
+ if (nfreed) {
+ hr_cq->cons_index += nfreed;
+ update_cq_db(hr_dev, hr_cq);
+ }
+}
+
+static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ spin_lock_irq(&hr_cq->lock);
+ __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
+ spin_unlock_irq(&hr_cq->lock);
+}
+
+static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf,
+ u64 *mtts, dma_addr_t dma_handle)
+{
+ struct hns_roce_v2_cq_context *cq_context;
+
+ cq_context = mb_buf;
+ memset(cq_context, 0, sizeof(*cq_context));
+
+ hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
+ hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
+ hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
+ hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
+ hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
+
+ if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
+ hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
+ hr_reg_enable(cq_context, CQC_STASH);
+
+ hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts[0]));
+ hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
+ hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
+ HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
+ hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts[1]));
+ hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
+ hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
+ hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> CQC_CQE_BA_L_S);
+ hr_reg_write(cq_context, CQC_CQE_BA_H, dma_handle >> CQC_CQE_BA_H_S);
+ hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
+ hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
+ hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
+ ((u32)hr_cq->db.dma) >> 1);
+ hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
+ hr_cq->db.dma >> CQC_CQE_DB_RECORD_ADDR_H_S);
+ hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
+ HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
+ hr_reg_write(cq_context, CQC_CQ_PERIOD,
+ HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
+}
+
+static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct hns_roce_v2_db cq_db = {};
+ u32 notify_flag;
+
+ /*
+ * flags = 0, then notify_flag : next
+ * flags = 1, then notify flag : solocited
+ */
+ notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
+
+ hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
+ hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
+ hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
+ hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
+ hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
+
+ hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
+
+ return 0;
+}
+
+static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
+ int num_entries, struct ib_wc *wc)
+{
+ unsigned int left;
+ int npolled = 0;
+
+ left = wq->head - wq->tail;
+ if (left == 0)
+ return 0;
+
+ left = min_t(unsigned int, (unsigned int)num_entries, left);
+ while (npolled < left) {
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
+ wc->qp = &hr_qp->ibqp;
+
+ wq->tail++;
+ wc++;
+ npolled++;
+ }
+
+ return npolled;
+}
+
+static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct hns_roce_qp *hr_qp;
+ int npolled = 0;
+
+ list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
+ npolled += sw_comp(hr_qp, &hr_qp->sq,
+ num_entries - npolled, wc + npolled);
+ if (npolled >= num_entries)
+ goto out;
+ }
+
+ list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
+ npolled += sw_comp(hr_qp, &hr_qp->rq,
+ num_entries - npolled, wc + npolled);
+ if (npolled >= num_entries)
+ goto out;
+ }
+
+out:
+ return npolled;
+}
+
+static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
+ struct ib_wc *wc)
+{
+ static const struct {
+ u32 cqe_status;
+ enum ib_wc_status wc_status;
+ } map[] = {
+ { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
+ { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
+ { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
+ { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
+ { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
+ { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
+ IB_WC_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
+ { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
+ };
+
+ u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
+ int i;
+
+ wc->status = IB_WC_GENERAL_ERR;
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (cqe_status == map[i].cqe_status) {
+ wc->status = map[i].wc_status;
+ break;
+ }
+
+ if (likely(wc->status == IB_WC_SUCCESS ||
+ wc->status == IB_WC_WR_FLUSH_ERR))
+ return;
+
+ ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
+ cqe_status);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ cq->cqe_size, false);
+ wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
+
+ /*
+ * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
+ * the standard protocol, the driver must ignore it and needn't to set
+ * the QP to an error state.
+ */
+ if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
+ return;
+
+ flush_cqe(hr_dev, qp);
+}
+
+static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
+ struct hns_roce_qp **cur_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct hns_roce_qp *hr_qp = *cur_qp;
+ u32 qpn;
+
+ qpn = hr_reg_read(cqe, CQE_LCL_QPN);
+
+ if (!hr_qp || qpn != hr_qp->qpn) {
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (unlikely(!hr_qp)) {
+ ibdev_err(&hr_dev->ib_dev,
+ "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, qpn);
+ return -EINVAL;
+ }
+ *cur_qp = hr_qp;
+ }
+
+ return 0;
+}
+
+/*
+ * mapped-value = 1 + real-value
+ * The ib wc opcode's real value is start from 0, In order to distinguish
+ * between initialized and uninitialized map values, we plus 1 to the actual
+ * value when defining the mapping, so that the validity can be identified by
+ * checking whether the mapped value is greater than 0.
+ */
+#define HR_WC_OP_MAP(hr_key, ib_key) \
+ [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
+
+static const u32 wc_send_op_map[] = {
+ HR_WC_OP_MAP(SEND, SEND),
+ HR_WC_OP_MAP(SEND_WITH_INV, SEND),
+ HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
+ HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
+ HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
+ HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
+ HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
+ HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
+ HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
+ HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
+ HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
+};
+
+static int to_ib_wc_send_op(u32 hr_opcode)
+{
+ if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
+ return -EINVAL;
+
+ return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
+ -EINVAL;
+}
+
+static const u32 wc_recv_op_map[] = {
+ HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
+ HR_WC_OP_MAP(SEND, RECV),
+ HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
+ HR_WC_OP_MAP(SEND_WITH_INV, RECV),
+};
+
+static int to_ib_wc_recv_op(u32 hr_opcode)
+{
+ if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
+ return -EINVAL;
+
+ return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
+ -EINVAL;
+}
+
+static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+{
+ u32 hr_opcode;
+ int ib_opcode;
+
+ wc->wc_flags = 0;
+
+ hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
+ switch (hr_opcode) {
+ case HNS_ROCE_V2_WQE_OP_RDMA_READ:
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ break;
+ case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
+ case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
+ case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
+ case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
+ case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
+ wc->byte_len = 8;
+ break;
+ default:
+ break;
+ }
+
+ ib_opcode = to_ib_wc_send_op(hr_opcode);
+ if (ib_opcode < 0)
+ wc->status = IB_WC_GENERAL_ERR;
+ else
+ wc->opcode = ib_opcode;
+}
+
+static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+{
+ u32 hr_opcode;
+ int ib_opcode;
+
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+
+ hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
+ switch (hr_opcode) {
+ case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
+ break;
+ default:
+ wc->wc_flags = 0;
+ }
+
+ ib_opcode = to_ib_wc_recv_op(hr_opcode);
+ if (ib_opcode < 0)
+ wc->status = IB_WC_GENERAL_ERR;
+ else
+ wc->opcode = ib_opcode;
+
+ wc->sl = hr_reg_read(cqe, CQE_SL);
+ wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
+ wc->slid = 0;
+ wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
+ wc->port_num = hr_reg_read(cqe, CQE_PORTN);
+ wc->pkey_index = 0;
+
+ if (hr_reg_read(cqe, CQE_VID_VLD)) {
+ wc->vlan_id = hr_reg_read(cqe, CQE_VID);
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+
+ wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
+
+ return 0;
+}
+
+static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
+ struct hns_roce_qp **cur_qp, struct ib_wc *wc)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct hns_roce_qp *qp = *cur_qp;
+ struct hns_roce_srq *srq = NULL;
+ struct hns_roce_v2_cqe *cqe;
+ struct hns_roce_wq *wq;
+ int is_send;
+ u16 wqe_idx;
+ int ret;
+
+ cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
+ if (!cqe)
+ return -EAGAIN;
+
+ ++hr_cq->cons_index;
+ /* Memory barrier */
+ rmb();
+
+ ret = get_cur_qp(hr_cq, cqe, &qp);
+ if (ret)
+ return ret;
+
+ wc->qp = &qp->ibqp;
+ wc->vendor_err = 0;
+
+ wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
+
+ is_send = !hr_reg_read(cqe, CQE_S_R);
+ if (is_send) {
+ wq = &qp->sq;
+
+ /* If sg_signal_bit is set, tail pointer will be updated to
+ * the WQE corresponding to the current CQE.
+ */
+ if (qp->sq_signal_bits)
+ wq->tail += (wqe_idx - (u16)wq->tail) &
+ (wq->wqe_cnt - 1);
+
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+
+ fill_send_wc(wc, cqe);
+ } else {
+ if (qp->ibqp.srq) {
+ srq = to_hr_srq(qp->ibqp.srq);
+ wc->wr_id = srq->wrid[wqe_idx];
+ hns_roce_free_srq_wqe(srq, wqe_idx);
+ } else {
+ wq = &qp->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ }
+
+ ret = fill_recv_wc(wc, cqe);
+ }
+
+ get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ return 0;
+
+ return ret;
+}
+
+static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct hns_roce_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+
+ spin_lock_irqsave(&hr_cq->lock, flags);
+
+ /*
+ * When the device starts to reset, the state is RST_DOWN. At this time,
+ * there may still be some valid CQEs in the hardware that are not
+ * polled. Therefore, it is not allowed to switch to the software mode
+ * immediately. When the state changes to UNINIT, CQE no longer exists
+ * in the hardware, and then switch to software mode.
+ */
+ if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
+ npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
+ goto out;
+ }
+
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
+ break;
+ }
+
+ if (npolled)
+ update_cq_db(hr_dev, hr_cq);
+
+out:
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
+
+ return npolled;
+}
+
+static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
+ u32 step_idx, u8 *mbox_cmd)
+{
+ u8 cmd;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ cmd = HNS_ROCE_CMD_WRITE_QPC_BT0;
+ break;
+ case HEM_TYPE_MTPT:
+ cmd = HNS_ROCE_CMD_WRITE_MPT_BT0;
+ break;
+ case HEM_TYPE_CQC:
+ cmd = HNS_ROCE_CMD_WRITE_CQC_BT0;
+ break;
+ case HEM_TYPE_SRQC:
+ cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ break;
+ case HEM_TYPE_SCCC:
+ cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
+ break;
+ default:
+ dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
+ return -EINVAL;
+ }
+
+ *mbox_cmd = cmd + step_idx;
+
+ return 0;
+}
+
+static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
+ dma_addr_t base_addr)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
+ u64 addr = to_hr_hw_page_addr(base_addr);
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
+
+ hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
+ hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
+ dma_addr_t base_addr, u32 hem_type, u32 step_idx)
+{
+ int ret;
+ u8 cmd;
+
+ if (unlikely(hem_type == HEM_TYPE_GMV))
+ return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
+
+ if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
+ return 0;
+
+ ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd);
+ if (ret < 0)
+ return ret;
+
+ return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj);
+}
+
+static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ u32 step_idx)
+{
+ struct hns_roce_hem_mhop mhop;
+ struct hns_roce_hem *hem;
+ unsigned long mhop_obj = obj;
+ int i, j, k;
+ int ret = 0;
+ u64 hem_idx = 0;
+ u64 l1_idx = 0;
+ u64 bt_ba = 0;
+ u32 chunk_ba_num;
+ u32 hop_num;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+ return 0;
+
+ hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ i = mhop.l0_idx;
+ j = mhop.l1_idx;
+ k = mhop.l2_idx;
+ hop_num = mhop.hop_num;
+ chunk_ba_num = mhop.bt_chunk_size / 8;
+
+ if (hop_num == 2) {
+ hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
+ k;
+ l1_idx = i * chunk_ba_num + j;
+ } else if (hop_num == 1) {
+ hem_idx = i * chunk_ba_num + j;
+ } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
+ hem_idx = i;
+ }
+
+ if (table->type == HEM_TYPE_SCCC)
+ obj = mhop.l0_idx;
+
+ if (check_whether_last_step(hop_num, step_idx)) {
+ hem = table->hem[hem_idx];
+
+ ret = set_hem_to_hw(hr_dev, obj, hem->dma, table->type, step_idx);
+ } else {
+ if (step_idx == 0)
+ bt_ba = table->bt_l0_dma_addr[i];
+ else if (step_idx == 1 && hop_num == 2)
+ bt_ba = table->bt_l1_dma_addr[l1_idx];
+
+ ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
+ }
+
+ return ret;
+}
+
+static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ int tag, u32 step_idx)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ u8 cmd = 0xff;
+ int ret;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+ return 0;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0;
+ break;
+ case HEM_TYPE_MTPT:
+ cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0;
+ break;
+ case HEM_TYPE_CQC:
+ cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
+ case HEM_TYPE_SRQC:
+ cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ break;
+ case HEM_TYPE_SCCC:
+ case HEM_TYPE_QPC_TIMER:
+ case HEM_TYPE_CQC_TIMER:
+ case HEM_TYPE_GMV:
+ return 0;
+ default:
+ dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
+ table->type);
+ return 0;
+ }
+
+ cmd += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int qpc_size;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
+ qpc_size = hr_dev->caps.qpc_sz;
+ memcpy(mailbox->buf, context, qpc_size);
+ memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
+ HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static void set_access_flags(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ const struct ib_qp_attr *attr, int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
+ attr->max_dest_rd_atomic : hr_qp->resp_depth;
+
+ access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
+ attr->qp_access_flags : hr_qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ hr_reg_write_bool(context, QPC_RRE,
+ access_flags & IB_ACCESS_REMOTE_READ);
+ hr_reg_clear(qpc_mask, QPC_RRE);
+
+ hr_reg_write_bool(context, QPC_RWE,
+ access_flags & IB_ACCESS_REMOTE_WRITE);
+ hr_reg_clear(qpc_mask, QPC_RWE);
+
+ hr_reg_write_bool(context, QPC_ATE,
+ access_flags & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_clear(qpc_mask, QPC_ATE);
+ hr_reg_write_bool(context, QPC_EXT_ATE,
+ access_flags & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_clear(qpc_mask, QPC_EXT_ATE);
+}
+
+static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context)
+{
+ hr_reg_write(context, QPC_SGE_SHIFT,
+ to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift));
+
+ hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
+
+ hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
+}
+
+static inline int get_cqn(struct ib_cq *ib_cq)
+{
+ return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
+}
+
+static inline int get_pdn(struct ib_pd *ib_pd)
+{
+ return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
+}
+
+static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+ struct hns_roce_v2_qp_context *context)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
+
+ hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
+
+ hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
+
+ set_qpc_wqe_cnt(hr_qp, context);
+
+ /* No VLAN need to set 0xFFF */
+ hr_reg_write(context, QPC_VLAN_ID, 0xfff);
+
+ if (ibqp->qp_type == IB_QPT_XRC_TGT) {
+ context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
+
+ hr_reg_enable(context, QPC_XRC_QP_TYPE);
+ }
+
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+ hr_reg_enable(context, QPC_RQ_RECORD_EN);
+
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
+ hr_reg_enable(context, QPC_OWNER_MODE);
+
+ hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
+ lower_32_bits(hr_qp->rdb.dma) >> 1);
+ hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
+ upper_32_bits(hr_qp->rdb.dma));
+
+ hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
+
+ if (ibqp->srq) {
+ hr_reg_enable(context, QPC_SRQ_EN);
+ hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
+ }
+
+ hr_reg_enable(context, QPC_FRE);
+
+ hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
+
+ if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
+ return;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
+ hr_reg_enable(&context->ext, QPCEX_STASH);
+}
+
+static void modify_qp_init_to_init(struct ib_qp *ibqp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
+ hr_reg_clear(qpc_mask, QPC_TST);
+
+ hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
+ hr_reg_clear(qpc_mask, QPC_PD);
+
+ hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
+ hr_reg_clear(qpc_mask, QPC_RX_CQN);
+
+ hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
+ hr_reg_clear(qpc_mask, QPC_TX_CQN);
+
+ if (ibqp->srq) {
+ hr_reg_enable(context, QPC_SRQ_EN);
+ hr_reg_clear(qpc_mask, QPC_SRQ_EN);
+ hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
+ hr_reg_clear(qpc_mask, QPC_SRQN);
+ }
+}
+
+static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ u64 mtts[MTT_MIN_COUNT] = { 0 };
+ u64 wqe_sge_ba;
+ int ret;
+
+ /* Search qp buf's mtts */
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
+ MTT_MIN_COUNT);
+ if (hr_qp->rq.wqe_cnt && ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
+ }
+
+ wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr);
+
+ context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
+ qpc_mask->wqe_sge_ba = 0;
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
+ hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
+
+ hr_reg_write(context, QPC_SQ_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
+ hr_qp->sq.wqe_cnt));
+ hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
+
+ hr_reg_write(context, QPC_SGE_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
+ hr_qp->sge.sge_cnt));
+ hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
+
+ hr_reg_write(context, QPC_RQ_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
+ hr_qp->rq.wqe_cnt));
+
+ hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
+
+ hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
+
+ hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
+
+ context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
+ qpc_mask->rq_cur_blk_addr = 0;
+
+ hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
+ hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ context->rq_nxt_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
+ qpc_mask->rq_nxt_blk_addr = 0;
+ hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
+ hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
+ }
+
+ return 0;
+}
+
+static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 sge_cur_blk = 0;
+ u64 sq_cur_blk = 0;
+ int ret;
+
+ /* search qp buf's mtts */
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset,
+ &sq_cur_blk, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
+ }
+ if (hr_qp->sge.sge_cnt > 0) {
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
+ hr_qp->sge.offset, &sge_cur_blk, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
+ }
+ }
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
+ lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
+
+ hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
+ lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
+ hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
+
+ hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
+ lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
+ hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
+
+ return 0;
+}
+
+static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr)
+{
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ return IB_MTU_4096;
+
+ return attr->path_mtu;
+}
+
+static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t trrl_ba;
+ dma_addr_t irrl_ba;
+ enum ib_mtu ib_mtu;
+ u8 ack_req_freq;
+ const u8 *smac;
+ int lp_msg_len;
+ u8 lp_pktn_ini;
+ u64 *mtts;
+ u8 *dmac;
+ u32 port;
+ int mtu;
+ int ret;
+
+ ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Search IRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &irrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp irrl_table.\n");
+ return -EINVAL;
+ }
+
+ /* Search TRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
+ hr_qp->qpn, &trrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp trrl_table.\n");
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
+ attr_mask);
+ return -EINVAL;
+ }
+
+ hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> QPC_TRRL_BA_L_S);
+ hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
+ context->trrl_ba = cpu_to_le32(trrl_ba >> QPC_TRRL_BA_M_S);
+ qpc_mask->trrl_ba = 0;
+ hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> QPC_TRRL_BA_H_S);
+ hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
+
+ context->irrl_ba = cpu_to_le32(irrl_ba >> QPC_IRRL_BA_L_S);
+ qpc_mask->irrl_ba = 0;
+ hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> QPC_IRRL_BA_H_S);
+ hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
+
+ hr_reg_enable(context, QPC_RMT_E2E);
+ hr_reg_clear(qpc_mask, QPC_RMT_E2E);
+
+ hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
+ hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
+
+ port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
+
+ smac = (const u8 *)hr_dev->dev_addr[port];
+ dmac = (u8 *)attr->ah_attr.roce.dmac;
+ /* when dmac equals smac or loop_idc is 1, it should loopback */
+ if (ether_addr_equal_unaligned(dmac, smac) ||
+ hr_dev->loop_idc == 0x1) {
+ hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
+ hr_reg_clear(qpc_mask, QPC_LBI);
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN) {
+ hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
+ hr_reg_clear(qpc_mask, QPC_DQPN);
+ }
+
+ memcpy(&context->dmac, dmac, sizeof(u32));
+ hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
+ qpc_mask->dmac = 0;
+ hr_reg_clear(qpc_mask, QPC_DMAC_H);
+
+ ib_mtu = get_mtu(ibqp, attr);
+ hr_qp->path_mtu = ib_mtu;
+
+ mtu = ib_mtu_enum_to_int(ib_mtu);
+ if (WARN_ON(mtu <= 0))
+ return -EINVAL;
+#define MIN_LP_MSG_LEN 1024
+ /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
+ lp_msg_len = max(mtu, MIN_LP_MSG_LEN);
+ lp_pktn_ini = ilog2(lp_msg_len / mtu);
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ hr_reg_write(context, QPC_MTU, ib_mtu);
+ hr_reg_clear(qpc_mask, QPC_MTU);
+ }
+
+ hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
+ hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
+
+ /*
+ * There are several constraints for ACK_REQ_FREQ:
+ * 1. mtu * (2 ^ ACK_REQ_FREQ) should not be too large, otherwise
+ * it may cause some unexpected retries when sending large
+ * payload.
+ * 2. ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI.
+ * 3. ACK_REQ_FREQ must be equal to LP_PKTN_INI when using LDCP
+ * or HC3 congestion control algorithm.
+ */
+ if (hr_qp->cong_type == CONG_TYPE_LDCP ||
+ hr_qp->cong_type == CONG_TYPE_HC3 ||
+ hr_dev->caps.max_ack_req_msg_len < lp_msg_len)
+ ack_req_freq = lp_pktn_ini;
+ else
+ ack_req_freq = ilog2(hr_dev->caps.max_ack_req_msg_len / mtu);
+ hr_reg_write(context, QPC_ACK_REQ_FREQ, ack_req_freq);
+ hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
+
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
+
+ context->rq_rnr_timer = 0;
+ qpc_mask->rq_rnr_timer = 0;
+
+ hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
+ hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
+
+#define MAX_LP_SGEN 3
+ /* rocee send 2^lp_sgen_ini segs every time */
+ hr_reg_write(context, QPC_LP_SGEN_INI, MAX_LP_SGEN);
+ hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
+
+ if (udata && ibqp->qp_type == IB_QPT_RC &&
+ (uctx->config & HNS_ROCE_RQ_INLINE_FLAGS)) {
+ hr_reg_write_bool(context, QPC_RQIE,
+ hr_dev->caps.flags &
+ HNS_ROCE_CAP_FLAG_RQ_INLINE);
+ hr_reg_clear(qpc_mask, QPC_RQIE);
+ }
+
+ if (udata &&
+ (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_XRC_TGT) &&
+ (uctx->config & HNS_ROCE_CQE_INLINE_FLAGS)) {
+ hr_reg_write_bool(context, QPC_CQEIE,
+ hr_dev->caps.flags &
+ HNS_ROCE_CAP_FLAG_CQE_INLINE);
+ hr_reg_clear(qpc_mask, QPC_CQEIE);
+
+ hr_reg_write(context, QPC_CQEIS, 0);
+ hr_reg_clear(qpc_mask, QPC_CQEIS);
+ }
+
+ return 0;
+}
+
+static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ /* Not support alternate path and path migration */
+ if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
+ ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
+ return -EINVAL;
+ }
+
+ ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set some fields in context to zero, Because the default values
+ * of all fields in context are zero, we need not set them to 0 again.
+ * but we should set the relevant fields of context mask to 0.
+ */
+ hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
+
+ hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
+
+ hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
+ hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
+ hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
+
+ hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
+
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
+
+ hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
+
+ hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
+
+ hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
+
+ return 0;
+}
+
+static int alloc_dip_entry(struct xarray *dip_xa, u32 qpn)
+{
+ struct hns_roce_dip *hr_dip;
+ int ret;
+
+ hr_dip = xa_load(dip_xa, qpn);
+ if (hr_dip)
+ return 0;
+
+ hr_dip = kzalloc(sizeof(*hr_dip), GFP_KERNEL);
+ if (!hr_dip)
+ return -ENOMEM;
+
+ ret = xa_err(xa_store(dip_xa, qpn, hr_dip, GFP_KERNEL));
+ if (ret)
+ kfree(hr_dip);
+
+ return ret;
+}
+
+static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ u32 *dip_idx)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct xarray *dip_xa = &hr_dev->qp_table.dip_xa;
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_dip *hr_dip;
+ unsigned long idx;
+ int ret = 0;
+
+ ret = alloc_dip_entry(dip_xa, ibqp->qp_num);
+ if (ret)
+ return ret;
+
+ xa_lock(dip_xa);
+
+ xa_for_each(dip_xa, idx, hr_dip) {
+ if (hr_dip->qp_cnt &&
+ !memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) {
+ *dip_idx = hr_dip->dip_idx;
+ hr_dip->qp_cnt++;
+ hr_qp->dip = hr_dip;
+ goto out;
+ }
+ }
+
+ /* If no dgid is found, a new dip and a mapping between dgid and
+ * dip_idx will be created.
+ */
+ xa_for_each(dip_xa, idx, hr_dip) {
+ if (hr_dip->qp_cnt)
+ continue;
+
+ *dip_idx = idx;
+ memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ hr_dip->dip_idx = idx;
+ hr_dip->qp_cnt++;
+ hr_qp->dip = hr_dip;
+ break;
+ }
+
+ /* This should never happen. */
+ if (WARN_ON_ONCE(!hr_qp->dip))
+ ret = -ENOSPC;
+
+out:
+ xa_unlock(dip_xa);
+ return ret;
+}
+
+enum {
+ CONG_DCQCN,
+ CONG_WINDOW,
+};
+
+enum {
+ UNSUPPORT_CONG_LEVEL,
+ SUPPORT_CONG_LEVEL,
+};
+
+enum {
+ CONG_LDCP,
+ CONG_HC3,
+};
+
+enum {
+ DIP_INVALID,
+ DIP_VALID,
+};
+
+enum {
+ WND_LIMIT,
+ WND_UNLIMIT,
+};
+
+static int check_cong_type(struct ib_qp *ibqp,
+ struct hns_roce_congestion_algorithm *cong_alg)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ /* different congestion types match different configurations */
+ switch (hr_qp->cong_type) {
+ case CONG_TYPE_DCQCN:
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
+ case CONG_TYPE_LDCP:
+ cong_alg->alg_sel = CONG_WINDOW;
+ cong_alg->alg_sub_sel = CONG_LDCP;
+ cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_UNLIMIT;
+ break;
+ case CONG_TYPE_HC3:
+ cong_alg->alg_sel = CONG_WINDOW;
+ cong_alg->alg_sub_sel = CONG_HC3;
+ cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
+ case CONG_TYPE_DIP:
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_VALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
+ default:
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
+ }
+
+ return 0;
+}
+
+static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_congestion_algorithm cong_field;
+ struct ib_device *ibdev = ibqp->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ u32 dip_idx = 0;
+ int ret;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
+ return 0;
+
+ ret = check_cong_type(ibqp, &cong_field);
+ if (ret)
+ return ret;
+
+ hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
+ hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
+ hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
+ hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
+ hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
+ cong_field.alg_sub_sel);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
+ hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
+ hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
+ cong_field.wnd_mode_sel);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
+
+ /* if dip is disabled, there is no need to set dip idx */
+ if (cong_field.dip_vld == 0)
+ return 0;
+
+ ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
+ if (ret) {
+ ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
+ hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_get_dscp(struct hns_roce_dev *hr_dev, u8 dscp,
+ u8 *tc_mode, u8 *priority)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!ops->get_dscp_prio)
+ return -EOPNOTSUPP;
+
+ return ops->get_dscp_prio(handle, dscp, tc_mode, priority);
+}
+
+bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl)
+{
+ u32 max_sl;
+
+ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
+ if (unlikely(sl > max_sl)) {
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to set SL(%u). Shouldn't be larger than %u.\n",
+ sl, max_sl);
+ return false;
+ }
+
+ return true;
+}
+
+static int hns_roce_set_sl(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh),
+ &hr_qp->tc_mode, &hr_qp->priority);
+ if (ret && ret != -EOPNOTSUPP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ ibdev_err_ratelimited(ibdev,
+ "failed to get dscp, ret = %d.\n", ret);
+ return ret;
+ }
+
+ if (hr_qp->tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ hr_qp->sl = hr_qp->priority;
+ else
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ if (!check_sl_valid(hr_dev, hr_qp->sl))
+ return -EINVAL;
+
+ hr_reg_write(context, QPC_SL, hr_qp->sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+
+ return 0;
+}
+
+static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ const struct ib_gid_attr *gid_attr = NULL;
+ u8 sl = rdma_ah_get_sl(&attr->ah_attr);
+ int is_roce_protocol;
+ u16 vlan_id = 0xffff;
+ bool is_udp = false;
+ u8 ib_port;
+ u8 hr_port;
+ int ret;
+
+ /*
+ * If free_mr_en of qp is set, it means that this qp comes from
+ * free mr. This qp will perform the loopback operation.
+ * In the loopback scenario, only sl needs to be set.
+ */
+ if (hr_qp->free_mr_en) {
+ if (!check_sl_valid(hr_dev, sl))
+ return -EINVAL;
+ hr_reg_write(context, QPC_SL, sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+ hr_qp->sl = sl;
+ return 0;
+ }
+
+ ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
+ hr_port = ib_port - 1;
+ is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
+ rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
+
+ if (is_roce_protocol) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+ if (ret)
+ return ret;
+
+ is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ }
+
+ /* Only HIP08 needs to set the vlan_en bits in QPC */
+ if (vlan_id < VLAN_N_VID &&
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ hr_reg_enable(context, QPC_RQ_VLAN_EN);
+ hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
+ hr_reg_enable(context, QPC_SQ_VLAN_EN);
+ hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
+ }
+
+ hr_reg_write(context, QPC_VLAN_ID, vlan_id);
+ hr_reg_clear(qpc_mask, QPC_VLAN_ID);
+
+ if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
+ ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
+ grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
+ return -EINVAL;
+ }
+
+ if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
+ ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
+ return -EINVAL;
+ }
+
+ hr_reg_write(context, QPC_UDPSPN,
+ is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
+ attr->dest_qp_num) :
+ 0);
+
+ hr_reg_clear(qpc_mask, QPC_UDPSPN);
+
+ hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
+
+ hr_reg_clear(qpc_mask, QPC_GMV_IDX);
+
+ hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
+ hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
+
+ ret = fill_cong_field(ibqp, attr, context, qpc_mask);
+ if (ret)
+ return ret;
+
+ hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
+ hr_reg_clear(qpc_mask, QPC_TC);
+
+ hr_reg_write(context, QPC_FL, grh->flow_label);
+ hr_reg_clear(qpc_mask, QPC_FL);
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+ return hns_roce_set_sl(ibqp, attr, context, qpc_mask);
+}
+
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ static const bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
+static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ int ret = 0;
+
+ if (!check_qp_state(cur_state, new_state))
+ return -EINVAL;
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
+ modify_qp_reset_to_init(ibqp, context);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ modify_qp_init_to_init(ibqp, context, qpc_mask);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
+ qpc_mask, udata);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ ret = modify_qp_rtr_to_rts(ibqp, attr_mask, context, qpc_mask);
+ }
+
+ return ret;
+}
+
+static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
+{
+#define QP_ACK_TIMEOUT_MAX_HIP08 20
+#define QP_ACK_TIMEOUT_MAX 31
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
+ ibdev_warn(&hr_dev->ib_dev,
+ "local ACK timeout shall be 0 to 20.\n");
+ return false;
+ }
+ *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
+ } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
+ if (*timeout > QP_ACK_TIMEOUT_MAX) {
+ ibdev_warn(&hr_dev->ib_dev,
+ "local ACK timeout shall be 0 to 31.\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ int ret = 0;
+ u8 timeout;
+
+ if (attr_mask & IB_QP_AV) {
+ ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ return ret;
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ timeout = attr->timeout;
+ if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
+ hr_reg_write(context, QPC_AT, timeout);
+ hr_reg_clear(qpc_mask, QPC_AT);
+ }
+ }
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
+ hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
+
+ hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
+ hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
+ hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
+
+ hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
+ hr_reg_clear(qpc_mask, QPC_RNR_CNT);
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
+
+ hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
+
+ hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
+
+ hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
+ attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
+
+ hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
+
+ hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
+ }
+
+ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+ attr->max_dest_rd_atomic) {
+ hr_reg_write(context, QPC_RR_MAX,
+ fls(attr->max_dest_rd_atomic - 1));
+ hr_reg_clear(qpc_mask, QPC_RR_MAX);
+ }
+
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
+ hr_reg_clear(qpc_mask, QPC_SR_MAX);
+ }
+
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ hr_reg_write(context, QPC_MIN_RNR_TIME,
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+ HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
+ hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN) {
+ hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
+
+ hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
+ hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
+ }
+
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = cpu_to_le32(attr->qkey);
+ qpc_mask->qkey_xrcd = 0;
+ hr_qp->qkey = attr->qkey;
+ }
+
+ return ret;
+}
+
+static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ hr_qp->atomic_rd_en = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ hr_qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
+}
+
+static void clear_qp(struct hns_roce_qp *hr_qp)
+{
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+
+ if (ibqp->send_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
+ hr_qp->qpn, ibqp->srq ?
+ to_hr_srq(ibqp->srq) : NULL);
+
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+ *hr_qp->rdb.db_record = 0;
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->next_sge = 0;
+}
+
+static void v2_set_flushed_fields(struct ib_qp *ibqp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ unsigned long sq_flag = 0;
+ unsigned long rq_flag = 0;
+
+ if (ibqp->qp_type == IB_QPT_XRC_TGT)
+ return;
+
+ spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ trace_hns_sq_flush_cqe(hr_qp->qpn, hr_qp->sq.head, TRACE_SQ);
+ hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
+ hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
+ hr_qp->state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
+
+ if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */
+ return;
+
+ spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ);
+ hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
+ hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
+ spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
+}
+
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_qp_context *context;
+ struct hns_roce_v2_qp_context *qpc_mask;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret = -ENOMEM;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ context = kvzalloc(sizeof(*context), GFP_KERNEL);
+ qpc_mask = kvzalloc(sizeof(*qpc_mask), GFP_KERNEL);
+ if (!context || !qpc_mask)
+ goto out;
+
+ memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
+
+ ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
+ new_state, context, qpc_mask, udata);
+ if (ret)
+ goto out;
+
+ /* When QP state is err, SQ and RQ WQE should be flushed */
+ if (new_state == IB_QPS_ERR)
+ v2_set_flushed_fields(ibqp, context, qpc_mask);
+
+ /* Configure the optional fields */
+ ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ goto out;
+
+ hr_reg_write_bool(context, QPC_INV_CREDIT,
+ to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
+ ibqp->srq);
+ hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
+
+ /* Every status migrate must change state */
+ hr_reg_write(context, QPC_QP_ST, new_state);
+ hr_reg_clear(qpc_mask, QPC_QP_ST);
+
+ /* SW pass context to HW */
+ ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
+ if (ret) {
+ ibdev_err_ratelimited(ibdev, "failed to modify QP, ret = %d.\n", ret);
+ goto out;
+ }
+
+ hr_qp->state = new_state;
+
+ hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
+
+ if (new_state == IB_QPS_RESET && !ibqp->uobject)
+ clear_qp(hr_qp);
+
+out:
+ kvfree(qpc_mask);
+ kvfree(context);
+ return ret;
+}
+
+static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
+{
+ static const enum ib_qp_state map[] = {
+ [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
+ [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
+ [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
+ [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
+ [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
+ [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
+ [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
+ [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
+ };
+
+ return (state < ARRAY_SIZE(map)) ? map[state] : -1;
+}
+
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
+ void *buffer)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
+ qpn);
+ if (ret)
+ goto out;
+
+ memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_query_srqc(struct hns_roce_dev *hr_dev, u32 srqn,
+ void *buffer)
+{
+ struct hns_roce_srq_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SRQC,
+ srqn);
+ if (ret)
+ goto out;
+
+ memcpy(buffer, context, sizeof(*context));
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn,
+ void *buffer)
+{
+ struct hns_roce_v2_scc_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
+ sccn);
+ if (ret)
+ goto out;
+
+ context = mailbox->buf;
+ memcpy(buffer, context, sizeof(*context));
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_qp_context *context)
+{
+ u8 timeout;
+
+ timeout = (u8)hr_reg_read(context, QPC_AT);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
+
+ return timeout;
+}
+
+static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_qp_context context = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int tmp_qp_state;
+ int state;
+ int ret;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (hr_qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ ret = 0;
+ goto done;
+ }
+
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
+ if (ret) {
+ ibdev_err_ratelimited(ibdev,
+ "failed to query QPC, ret = %d.\n",
+ ret);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ state = hr_reg_read(&context, QPC_QP_ST);
+ tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
+ if (tmp_qp_state == -1) {
+ ibdev_err_ratelimited(ibdev, "Illegal ib_qp_state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ hr_qp->state = (u8)tmp_qp_state;
+ qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
+ qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
+ qp_attr->path_mig_state = IB_MIG_ARMED;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD)
+ qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
+
+ qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
+ qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
+ qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
+ qp_attr->qp_access_flags =
+ ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
+ ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
+ ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
+ struct ib_global_route *grh =
+ rdma_ah_retrieve_grh(&qp_attr->ah_attr);
+
+ rdma_ah_set_sl(&qp_attr->ah_attr,
+ hr_reg_read(&context, QPC_SL));
+ rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1);
+ rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
+ grh->flow_label = hr_reg_read(&context, QPC_FL);
+ grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
+ grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
+ grh->traffic_class = hr_reg_read(&context, QPC_TC);
+
+ memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
+ }
+
+ qp_attr->port_num = hr_qp->port + 1;
+ qp_attr->sq_draining = 0;
+ qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
+ qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
+
+ qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
+ qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
+ qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
+ qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
+
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+
+ qp_init_attr->qp_context = ibqp->qp_context;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_init_attr->cap = qp_attr->cap;
+ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+ return ret;
+}
+
+static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
+{
+ return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+ hr_qp->state != IB_QPS_RESET);
+}
+
+static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cq *send_cq, *recv_cq;
+ unsigned long flags;
+ int ret = 0;
+
+ if (modify_qp_is_ok(hr_qp)) {
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
+ hr_qp->state, IB_QPS_RESET, udata);
+ if (ret)
+ ibdev_err_ratelimited(ibdev,
+ "failed to modify QP to RST, ret = %d.\n",
+ ret);
+ }
+
+ send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
+ recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
+
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ hns_roce_lock_cqs(send_cq, recv_cq);
+
+ if (!udata) {
+ if (recv_cq)
+ __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
+ (hr_qp->ibqp.srq ?
+ to_hr_srq(hr_qp->ibqp.srq) :
+ NULL));
+
+ if (send_cq && send_cq != recv_cq)
+ __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
+ }
+
+ hns_roce_qp_remove(hr_dev, hr_qp);
+
+ hns_roce_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+
+ return ret;
+}
+
+static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_dip *hr_dip = hr_qp->dip;
+
+ if (!hr_dip)
+ return;
+
+ xa_lock(&hr_dev->qp_table.dip_xa);
+
+ hr_dip->qp_cnt--;
+ if (!hr_dip->qp_cnt)
+ memset(hr_dip->dgid, 0, GID_LEN_V2);
+
+ xa_unlock(&hr_dev->qp_table.dip_xa);
+}
+
+int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ unsigned long flags;
+ int ret;
+
+ /* Make sure flush_cqe() is completed */
+ spin_lock_irqsave(&hr_qp->flush_lock, flags);
+ set_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag);
+ spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
+ flush_work(&hr_qp->flush_work.work);
+
+ if (hr_qp->cong_type == CONG_TYPE_DIP)
+ put_dip_ctx_idx(hr_dev, hr_qp);
+
+ ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
+ if (ret)
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
+ hr_qp->qpn, ret);
+
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
+
+ return 0;
+}
+
+static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_sccc_clr_done *resp;
+ struct hns_roce_sccc_clr *clr;
+ struct hns_roce_cmq_desc desc;
+ int ret, i;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
+
+ mutex_lock(&hr_dev->qp_table.scc_mutex);
+
+ /* set scc ctx clear done flag */
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
+ goto out;
+ }
+
+ /* clear scc context */
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
+ clr = (struct hns_roce_sccc_clr *)desc.data;
+ clr->qpn = cpu_to_le32(hr_qp->qpn);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
+ goto out;
+ }
+
+ /* query scc context clear is done or not */
+ resp = (struct hns_roce_sccc_clr_done *)desc.data;
+ for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc,
+ HNS_ROCE_OPC_QUERY_SCCC, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->clr_done)
+ goto out;
+
+ msleep(20);
+ }
+
+ ibdev_err(ibdev, "query SCC clr done flag overtime.\n");
+ ret = -ETIMEDOUT;
+
+out:
+ mutex_unlock(&hr_dev->qp_table.scc_mutex);
+ return ret;
+}
+
+#define DMA_IDX_SHIFT 3
+#define DMA_WQE_SHIFT 3
+
+static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
+ struct hns_roce_srq_context *ctx)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ struct ib_device *ibdev = srq->ibsrq.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ u64 mtts_idx[MTT_MIN_COUNT] = {};
+ dma_addr_t dma_handle_idx;
+ int ret;
+
+ /* Get physical address of idx que buf */
+ ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
+ ARRAY_SIZE(mtts_idx));
+ if (ret) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr);
+
+ hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
+
+ hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
+ hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
+ upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
+
+ hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
+ to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
+ to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
+
+ hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts_idx[0]));
+ hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
+
+ hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts_idx[1]));
+ hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
+
+ return 0;
+}
+
+static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
+{
+ struct ib_device *ibdev = srq->ibsrq.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_srq_context *ctx = mb_buf;
+ u64 mtts_wqe[MTT_MIN_COUNT] = {};
+ dma_addr_t dma_handle_wqe;
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Get the physical address of srq buf */
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+ ARRAY_SIZE(mtts_wqe));
+ if (ret) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr);
+
+ hr_reg_write(ctx, SRQC_SRQ_ST, 1);
+ hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
+ srq->ibsrq.srq_type == IB_SRQT_XRC);
+ hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
+ hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
+ hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
+ hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
+ hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
+ hr_reg_write(ctx, SRQC_RQWS,
+ srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
+
+ hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
+ srq->wqe_cnt));
+
+ hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
+ hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
+ upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
+
+ hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
+
+ if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) {
+ hr_reg_enable(ctx, SRQC_DB_RECORD_EN);
+ hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_L,
+ lower_32_bits(srq->rdb.dma) >> 1);
+ hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_H,
+ upper_32_bits(srq->rdb.dma));
+ }
+
+ return hns_roce_v2_write_srqc_index_queue(srq, ctx);
+}
+
+static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_srq_context *srqc_mask;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret = 0;
+
+ /* Resizing SRQs is not supported yet */
+ if (srq_attr_mask & IB_SRQ_MAX_WR) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (srq_attr_mask & IB_SRQ_LIMIT) {
+ if (srq_attr->srq_limit > srq->wqe_cnt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto out;
+ }
+
+ srq_context = mailbox->buf;
+ srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
+
+ memset(srqc_mask, 0xff, sizeof(*srqc_mask));
+
+ hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
+ hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
+ HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to handle cmd of modifying SRQ, ret = %d.\n",
+ ret);
+ }
+
+out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT]);
+
+ return ret;
+}
+
+static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
+ HNS_ROCE_CMD_QUERY_SRQC, srq->srqn);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd of querying SRQ, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
+ attr->max_wr = srq->wqe_cnt;
+ attr->max_sge = srq->max_gs - srq->rsv_sge;
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
+ struct hns_roce_v2_cq_context *cq_context;
+ struct hns_roce_cq *hr_cq = to_hr_cq(cq);
+ struct hns_roce_v2_cq_context *cqc_mask;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ ret = PTR_ERR_OR_ZERO(mailbox);
+ if (ret)
+ goto err_out;
+
+ cq_context = mailbox->buf;
+ cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
+
+ memset(cqc_mask, 0xff, sizeof(*cqc_mask));
+
+ hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
+ hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+ dev_info(hr_dev->dev,
+ "cq_period(%u) reached the upper limit, adjusted to 65.\n",
+ cq_period);
+ cq_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
+ }
+ cq_period *= HNS_ROCE_CLOCK_ADJUST;
+ }
+ hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
+ hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
+ HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret)
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to process cmd when modifying CQ, ret = %d.\n",
+ ret);
+
+err_out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT]);
+
+ return ret;
+}
+
+static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
+ void *buffer)
+{
+ struct hns_roce_v2_cq_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
+ HNS_ROCE_CMD_QUERY_CQC, cqn);
+ if (ret) {
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to process cmd when querying CQ, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
+ void *buffer)
+{
+ struct hns_roce_v2_mpt_entry *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
+ key_to_hw_index(key));
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying MPT, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static void dump_aeqe_log(struct hns_roce_work *irq_work)
+{
+ struct hns_roce_dev *hr_dev = irq_work->hr_dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+
+ switch (irq_work->event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ ibdev_info(ibdev, "path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ ibdev_warn(ibdev, "path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ ibdev_dbg(ibdev, "send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
+ irq_work->queue_num, irq_work->sub_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n",
+ irq_work->queue_num);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n",
+ irq_work->queue_num, irq_work->sub_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ ibdev_dbg(ibdev, "SRQ limit reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ ibdev_err(ibdev, "SRQ catas error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ ibdev_warn(ibdev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ ibdev_warn(ibdev, "function level reset.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ ibdev_err(ibdev, "xrc domain violation error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+ ibdev_err(ibdev, "invalid xrceth error.\n");
+ break;
+ default:
+ ibdev_info(ibdev, "Undefined event %d.\n",
+ irq_work->event_type);
+ break;
+ }
+}
+
+static void hns_roce_irq_work_handle(struct work_struct *work)
+{
+ struct hns_roce_work *irq_work =
+ container_of(work, struct hns_roce_work, work);
+ struct hns_roce_dev *hr_dev = irq_work->hr_dev;
+ int event_type = irq_work->event_type;
+ u32 queue_num = irq_work->queue_num;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+ hns_roce_qp_event(hr_dev, queue_num, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ hns_roce_srq_event(hr_dev, queue_num, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ hns_roce_cq_event(hr_dev, queue_num, event_type);
+ break;
+ default:
+ break;
+ }
+
+ dump_aeqe_log(irq_work);
+
+ kfree(irq_work);
+}
+
+static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq, u32 queue_num)
+{
+ struct hns_roce_work *irq_work;
+
+ irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
+ if (!irq_work)
+ return;
+
+ INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
+ irq_work->hr_dev = hr_dev;
+ irq_work->event_type = eq->event_type;
+ irq_work->sub_type = eq->sub_type;
+ irq_work->queue_num = queue_num;
+ queue_work(hr_dev->irq_workq, &irq_work->work);
+}
+
+static void update_eq_db(struct hns_roce_eq *eq)
+{
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ struct hns_roce_v2_db eq_db = {};
+
+ if (eq->type_flag == HNS_ROCE_AEQ) {
+ hr_reg_write(&eq_db, EQ_DB_CMD,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_AEQ :
+ HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
+ } else {
+ hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
+
+ hr_reg_write(&eq_db, EQ_DB_CMD,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_CEQ :
+ HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
+ }
+
+ hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
+
+ hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe;
+
+ aeqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ eq->eqe_size);
+
+ return (hr_reg_read(aeqe, AEQE_OWNER) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
+ irqreturn_t aeqe_found = IRQ_NONE;
+ int num_aeqes = 0;
+ int event_type;
+ u32 queue_num;
+ int sub_type;
+
+ while (aeqe && num_aeqes < HNS_AEQ_POLLING_BUDGET) {
+ /* Make sure we read AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE);
+ sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE);
+ queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+ hns_roce_flush_cqe(hr_dev, queue_num);
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param));
+ break;
+ default:
+ break;
+ }
+
+ eq->event_type = event_type;
+ eq->sub_type = sub_type;
+ ++eq->cons_index;
+ aeqe_found = IRQ_HANDLED;
+ trace_hns_ae_info(event_type, aeqe, eq->eqe_size);
+
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]);
+
+ hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
+
+ aeqe = next_aeqe_sw_v2(eq);
+ ++num_aeqes;
+ }
+
+ update_eq_db(eq);
+
+ return IRQ_RETVAL(aeqe_found);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+
+ ceqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ eq->eqe_size);
+
+ return (hr_reg_read(ceqe, CEQE_OWNER) ^
+ !!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_eq *eq)
+{
+ queue_work(system_bh_wq, &eq->work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ irqreturn_t int_work;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* Completion event interrupt */
+ int_work = hns_roce_v2_ceq_int(eq);
+ else
+ /* Asynchronous event interrupt */
+ int_work = hns_roce_v2_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
+ u32 int_st)
+{
+ struct pci_dev *pdev = hr_dev->pci_dev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops = ae_dev->ops;
+ enum hnae3_reset_type reset_type;
+ irqreturn_t int_work = IRQ_NONE;
+ u32 int_en;
+
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+ dev_err(hr_dev->dev, "AEQ overflow!\n");
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
+ 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
+
+ reset_type = hr_dev->is_vf ?
+ HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET;
+
+ /* Set reset level for reset_event() */
+ if (ops->set_default_reset_request)
+ ops->set_default_reset_request(ae_dev, reset_type);
+ if (ops->reset_event)
+ ops->reset_event(pdev, NULL);
+
+ int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = IRQ_HANDLED;
+ } else {
+ dev_err(hr_dev->dev, "there is no basic abn irq found.\n");
+ }
+
+ return IRQ_RETVAL(int_work);
+}
+
+static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR);
+ ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE);
+ ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG);
+
+ return 0;
+}
+
+static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 addr_upper;
+ u32 addr_low;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read gmv, ret = %d.\n", ret);
+ return ret;
+ }
+
+ addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L);
+ addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H);
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
+ hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low);
+ hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
+{
+ if (res_type == ECC_RESOURCE_QPC_TIMER ||
+ res_type == ECC_RESOURCE_CQC_TIMER ||
+ res_type == ECC_RESOURCE_SCCC)
+ return le64_to_cpu(*data);
+
+ return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT;
+}
+
+static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
+ u32 index)
+{
+ u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op;
+ u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u64 addr;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read fmea ram, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ addr = fmea_get_ram_res_addr(res_type, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to write fmea ram, ret = %d.\n",
+ ret);
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ u32 res_type = ecc_info->res_type;
+ u32 index = ecc_info->index;
+ int ret;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT);
+
+ if (res_type >= ECC_RESOURCE_COUNT) {
+ dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n",
+ res_type);
+ return;
+ }
+
+ if (res_type == ECC_RESOURCE_GMV)
+ ret = fmea_recover_gmv(hr_dev, index);
+ else
+ ret = fmea_recover_others(hr_dev, res_type, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to recover %s, index = %u, ret = %d.\n",
+ fmea_ram_res[res_type].name, index, ret);
+}
+
+static void fmea_ram_ecc_work(struct work_struct *ecc_work)
+{
+ struct hns_roce_dev *hr_dev =
+ container_of(ecc_work, struct hns_roce_dev, ecc_work);
+ struct fmea_ram_ecc ecc_info = {};
+
+ if (fmea_ram_ecc_query(hr_dev, &ecc_info)) {
+ dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n");
+ return;
+ }
+
+ if (!ecc_info.is_ecc_err) {
+ dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n");
+ return;
+ }
+
+ fmea_ram_ecc_recover(hr_dev, &ecc_info);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ irqreturn_t int_work = IRQ_NONE;
+ u32 int_st;
+
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+
+ if (int_st) {
+ int_work = abnormal_interrupt_basic(hr_dev, int_st);
+ } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ queue_work(hr_dev->irq_workq, &hr_dev->ecc_work);
+ int_work = IRQ_HANDLED;
+ } else {
+ dev_err(hr_dev->dev, "there is no abnormal irq found.\n");
+ }
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+ int eq_num, u32 enable_flag)
+{
+ int i;
+
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET, enable_flag);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
+}
+
+static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ hns_roce_mtr_destroy(hr_dev, &eq->mtr);
+}
+
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ int eqn = eq->eqn;
+ int ret;
+ u8 cmd;
+
+ if (eqn < hr_dev->caps.num_comp_vectors)
+ cmd = HNS_ROCE_CMD_DESTROY_CEQC;
+ else
+ cmd = HNS_ROCE_CMD_DESTROY_AEQC;
+
+ ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
+ if (ret)
+ dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+
+ free_eq_buf(hr_dev, eq);
+}
+
+static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+ eq->cons_index = 0;
+ eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+ eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+ eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+ eq->shift = ilog2((unsigned int)eq->entries);
+}
+
+static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ void *mb_buf)
+{
+ u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
+ struct hns_roce_eq_context *eqc;
+ u64 bt_ba = 0;
+ int ret;
+
+ eqc = mb_buf;
+ memset(eqc, 0, sizeof(struct hns_roce_eq_context));
+
+ init_eq_config(hr_dev, eq);
+
+ /* if not multi-hop, eqe buffer only use one trunk */
+ ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba,
+ ARRAY_SIZE(eqe_ba));
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret);
+ return ret;
+ }
+
+ bt_ba = hns_roce_get_mtr_ba(&eq->mtr);
+
+ hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
+ hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
+ hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
+ hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
+ hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
+ hr_reg_write(eqc, EQC_EQN, eq->eqn);
+ hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
+ hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
+ hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
+ hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+ dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
+ eq->eq_period);
+ eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
+ }
+ eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
+ }
+
+ hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
+ hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
+ hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
+ hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
+ hr_reg_write(eqc, EQC_SHIFT, eq->shift);
+ hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
+ hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
+ hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
+ hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
+ hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
+
+ return 0;
+}
+
+static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
+ eq->hop_num = 0;
+ else
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
+
+ buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
+ buf_attr.region[0].size = eq->entries * eq->eqe_size;
+ buf_attr.region[0].hopnum = eq->hop_num;
+ buf_attr.region_count = 1;
+
+ err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
+ hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
+ 0);
+ if (err)
+ dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err);
+
+ return err;
+}
+
+static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq, u8 eq_cmd)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = alloc_eq_buf(hr_dev, eq);
+ if (ret)
+ goto free_cmd_mbox;
+
+ ret = config_eqc(hr_dev, eq, mailbox->buf);
+ if (ret)
+ goto err_cmd_mbox;
+
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn);
+ if (ret) {
+ dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
+ goto err_cmd_mbox;
+ }
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_cmd_mbox:
+ free_eq_buf(hr_dev, eq);
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static void hns_roce_ceq_work(struct work_struct *work)
+{
+ struct hns_roce_eq *eq = from_work(eq, work, work);
+ struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int ceqe_num = 0;
+ u32 cqn;
+
+ while (ceqe && ceqe_num < hr_dev->caps.ceqe_depth) {
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = hr_reg_read(ceqe, CEQE_CQN);
+
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ++ceqe_num;
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE_CNT]);
+
+ ceqe = next_ceqe_sw_v2(eq);
+ }
+
+ update_eq_db(eq);
+}
+
+static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
+ int comp_num, int aeq_num, int other_num)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < irq_num; i++) {
+ hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+ GFP_KERNEL);
+ if (!hr_dev->irq_names[i]) {
+ ret = -ENOMEM;
+ goto err_kzalloc_failed;
+ }
+ }
+
+ /* irq contains: abnormal + AEQ + CEQ */
+ for (j = 0; j < other_num; j++)
+ snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+ "hns-%s-abn-%d", pci_name(hr_dev->pci_dev), j);
+
+ for (j = other_num; j < (other_num + aeq_num); j++)
+ snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+ "hns-%s-aeq-%d", pci_name(hr_dev->pci_dev), j - other_num);
+
+ for (j = (other_num + aeq_num); j < irq_num; j++)
+ snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+ "hns-%s-ceq-%d", pci_name(hr_dev->pci_dev),
+ j - other_num - aeq_num);
+
+ for (j = 0; j < irq_num; j++) {
+ if (j < other_num) {
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v2_msix_interrupt_abn,
+ 0, hr_dev->irq_names[j], hr_dev);
+ } else if (j < (other_num + comp_num)) {
+ INIT_WORK(&eq_table->eq[j - other_num].work,
+ hns_roce_ceq_work);
+ ret = request_irq(eq_table->eq[j - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[j + aeq_num],
+ &eq_table->eq[j - other_num]);
+ } else {
+ ret = request_irq(eq_table->eq[j - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[j - comp_num],
+ &eq_table->eq[j - other_num]);
+ }
+
+ if (ret) {
+ dev_err(hr_dev->dev, "request irq error!\n");
+ goto err_request_failed;
+ }
+ }
+
+ return 0;
+
+err_request_failed:
+ for (j -= 1; j >= 0; j--) {
+ if (j < other_num) {
+ free_irq(hr_dev->irq[j], hr_dev);
+ continue;
+ }
+ free_irq(eq_table->eq[j - other_num].irq,
+ &eq_table->eq[j - other_num]);
+ if (j < other_num + comp_num)
+ cancel_work_sync(&eq_table->eq[j - other_num].work);
+ }
+
+err_kzalloc_failed:
+ for (i -= 1; i >= 0; i--)
+ kfree(hr_dev->irq_names[i]);
+
+ return ret;
+}
+
+static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
+{
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ for (i = 0; i < eq_num; i++) {
+ free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
+ if (i < hr_dev->caps.num_comp_vectors)
+ cancel_work_sync(&hr_dev->eq_table.eq[i].work);
+ }
+
+ for (i = 0; i < irq_num; i++)
+ kfree(hr_dev->irq_names[i]);
+}
+
+static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_eq *eq;
+ int other_num;
+ int comp_num;
+ int aeq_num;
+ int irq_num;
+ int eq_num;
+ u8 eq_cmd;
+ int ret;
+ int i;
+
+ if (hr_dev->caps.aeqe_depth < HNS_AEQ_POLLING_BUDGET)
+ return -EINVAL;
+
+ other_num = hr_dev->caps.num_other_vectors;
+ comp_num = hr_dev->caps.num_comp_vectors;
+ aeq_num = hr_dev->caps.num_aeq_vectors;
+
+ eq_num = comp_num + aeq_num;
+ irq_num = eq_num + other_num;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ /* create eq */
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
+ eq->hr_dev = hr_dev;
+ eq->eqn = i;
+ if (i < comp_num) {
+ /* CEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->eqe_size = hr_dev->caps.ceqe_size;
+ eq->irq = hr_dev->irq[i + other_num + aeq_num];
+ eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
+ } else {
+ /* AEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->eqe_size = hr_dev->caps.aeqe_size;
+ eq->irq = hr_dev->irq[i - comp_num + other_num];
+ eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
+ }
+
+ ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
+ if (ret) {
+ dev_err(dev, "failed to create eq.\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work);
+
+ hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
+ if (!hr_dev->irq_workq) {
+ dev_err(dev, "failed to create irq workqueue.\n");
+ ret = -ENOMEM;
+ goto err_create_eq_fail;
+ }
+
+ ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num,
+ other_num);
+ if (ret) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_request_irq_fail;
+ }
+
+ /* enable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
+
+ return 0;
+
+err_request_irq_fail:
+ destroy_workqueue(hr_dev->irq_workq);
+
+err_create_eq_fail:
+ for (i -= 1; i >= 0; i--)
+ hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+
+ /* Disable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
+
+ __hns_roce_free_irq(hr_dev);
+ destroy_workqueue(hr_dev->irq_workq);
+
+ for (i = 0; i < eq_num; i++)
+ hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eq);
+}
+
+static const struct ib_device_ops hns_roce_v2_dev_ops = {
+ .destroy_qp = hns_roce_v2_destroy_qp,
+ .modify_cq = hns_roce_v2_modify_cq,
+ .poll_cq = hns_roce_v2_poll_cq,
+ .post_recv = hns_roce_v2_post_recv,
+ .post_send = hns_roce_v2_post_send,
+ .query_qp = hns_roce_v2_query_qp,
+ .req_notify_cq = hns_roce_v2_req_notify_cq,
+};
+
+static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
+ .modify_srq = hns_roce_v2_modify_srq,
+ .post_srq_recv = hns_roce_v2_post_srq_recv,
+ .query_srq = hns_roce_v2_query_srq,
+};
+
+static const struct hns_roce_hw hns_roce_hw_v2 = {
+ .cmq_init = hns_roce_v2_cmq_init,
+ .cmq_exit = hns_roce_v2_cmq_exit,
+ .hw_profile = hns_roce_v2_profile,
+ .hw_init = hns_roce_v2_init,
+ .hw_exit = hns_roce_v2_exit,
+ .post_mbox = v2_post_mbox,
+ .poll_mbox_done = v2_poll_mbox_done,
+ .chk_mbox_avail = v2_chk_mbox_is_avail,
+ .set_gid = hns_roce_v2_set_gid,
+ .set_mac = hns_roce_v2_set_mac,
+ .write_mtpt = hns_roce_v2_write_mtpt,
+ .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
+ .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
+ .write_cqc = hns_roce_v2_write_cqc,
+ .set_hem = hns_roce_v2_set_hem,
+ .clear_hem = hns_roce_v2_clear_hem,
+ .modify_qp = hns_roce_v2_modify_qp,
+ .dereg_mr = hns_roce_v2_dereg_mr,
+ .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
+ .init_eq = hns_roce_v2_init_eq_table,
+ .cleanup_eq = hns_roce_v2_cleanup_eq_table,
+ .write_srqc = hns_roce_v2_write_srqc,
+ .query_cqc = hns_roce_v2_query_cqc,
+ .query_qpc = hns_roce_v2_query_qpc,
+ .query_mpt = hns_roce_v2_query_mpt,
+ .query_srqc = hns_roce_v2_query_srqc,
+ .query_sccc = hns_roce_v2_query_sccc,
+ .query_hw_counter = hns_roce_hw_v2_query_counter,
+ .get_dscp = hns_roce_hw_v2_get_dscp,
+ .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
+ .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
+};
+
+static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
+
+static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ const struct pci_device_id *id;
+ int i;
+
+ hr_dev->pci_dev = handle->pdev;
+ id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+ hr_dev->is_vf = id->driver_data;
+ hr_dev->dev = &handle->pdev->dev;
+ hr_dev->hw = &hns_roce_hw_v2;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = hr_dev->sdb_offset;
+
+ /* Get info from NIC driver. */
+ hr_dev->reg_base = handle->rinfo.roce_io_base;
+ hr_dev->mem_base = handle->rinfo.roce_mem_base;
+ hr_dev->caps.num_ports = 1;
+ hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
+ hr_dev->iboe.phy_port[0] = 0;
+
+ addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
+ hr_dev->iboe.netdevs[0]->dev_addr);
+
+ for (i = 0; i < handle->rinfo.num_vectors; i++)
+ hr_dev->irq[i] = pci_irq_vector(handle->pdev,
+ i + handle->rinfo.base_vector);
+
+ /* cmd issue mode: 0 is poll, 1 is event */
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+
+ hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
+ priv->handle = handle;
+}
+
+static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+{
+ struct hns_roce_dev *hr_dev;
+ int ret;
+
+ hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
+ if (!hr_dev)
+ return -ENOMEM;
+
+ hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
+ if (!hr_dev->priv) {
+ ret = -ENOMEM;
+ goto error_failed_kzalloc;
+ }
+
+ hns_roce_hw_v2_get_cfg(hr_dev, handle);
+
+ ret = hns_roce_init(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
+ goto error_failed_roce_init;
+ }
+
+
+ handle->priv = hr_dev;
+
+ return 0;
+
+error_failed_roce_init:
+ kfree(hr_dev->priv);
+
+error_failed_kzalloc:
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return ret;
+}
+
+static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+{
+ struct hns_roce_dev *hr_dev = handle->priv;
+
+ if (!hr_dev)
+ return;
+
+ handle->priv = NULL;
+
+ hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
+ hns_roce_handle_device_err(hr_dev);
+
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+}
+
+static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct pci_device_id *id;
+ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
+
+ if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+ goto reset_chk_err;
+ }
+
+ id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
+ if (!id)
+ return 0;
+
+ if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
+ return 0;
+
+ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+ dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
+ if (ops->ae_dev_resetting(handle) ||
+ ops->get_hw_reset_stat(handle))
+ goto reset_chk_err;
+ else
+ return ret;
+ }
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
+
+ return 0;
+
+reset_chk_err:
+ dev_err(dev, "Device is busy in resetting state.\n"
+ "please retry later.\n");
+
+ return -EBUSY;
+}
+
+static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+{
+ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
+ return;
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
+
+ __hns_roce_hw_v2_uninit_instance(handle, reset);
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+}
+
+static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+{
+ struct hns_roce_dev *hr_dev;
+
+ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
+ set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
+ return 0;
+ }
+
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
+ clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
+
+ hr_dev = handle->priv;
+ if (!hr_dev)
+ return 0;
+
+ hr_dev->active = false;
+ hr_dev->dis_db = true;
+
+ rdma_user_mmap_disassociate(&hr_dev->ib_dev);
+
+ hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
+{
+ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+ if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
+ &handle->rinfo.state)) {
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
+ return 0;
+ }
+
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
+
+ dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
+ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
+ * callback function, RoCE Engine reinitialize. If RoCE reinit
+ * failed, we should inform NIC driver.
+ */
+ handle->priv = NULL;
+ dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
+ } else {
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
+ dev_info(dev, "reset done, RoCE client reinit finished.\n");
+ }
+
+ return ret;
+}
+
+static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
+{
+ if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
+ return 0;
+
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
+ dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
+ msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
+ __hns_roce_hw_v2_uninit_instance(handle, false);
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case HNAE3_DOWN_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_down(handle);
+ break;
+ case HNAE3_INIT_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_init(handle);
+ break;
+ case HNAE3_UNINIT_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_uninit(handle);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle,
+ bool linkup)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct net_device *netdev = handle->rinfo.netdev;
+
+ if (linkup || !hr_dev)
+ return;
+
+ ib_dispatch_port_state_event(&hr_dev->ib_dev, netdev);
+}
+
+static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
+ .init_instance = hns_roce_hw_v2_init_instance,
+ .uninit_instance = hns_roce_hw_v2_uninit_instance,
+ .link_status_change = hns_roce_hw_v2_link_status_change,
+ .reset_notify = hns_roce_hw_v2_reset_notify,
+};
+
+static struct hnae3_client hns_roce_hw_v2_client = {
+ .name = "hns_roce_hw_v2",
+ .type = HNAE3_CLIENT_ROCE,
+ .ops = &hns_roce_hw_v2_ops,
+};
+
+static int __init hns_roce_hw_v2_init(void)
+{
+ hns_roce_init_debugfs();
+ return hnae3_register_client(&hns_roce_hw_v2_client);
+}
+
+static void __exit hns_roce_hw_v2_exit(void)
+{
+ hnae3_unregister_client(&hns_roce_hw_v2_client);
+ hns_roce_cleanup_debugfs();
+}
+
+module_init(hns_roce_hw_v2_init);
+module_exit(hns_roce_hw_v2_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
+MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
new file mode 100644
index 000000000000..e64a04d6f85b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -0,0 +1,1481 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HW_V2_H
+#define _HNS_ROCE_HW_V2_H
+
+#include <linux/bitops.h>
+#include "hnae3.h"
+
+#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
+#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+#define HNS_ROCE_V2_AEQE_VEC_NUM 1
+#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
+#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
+#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
+#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
+
+#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10
+
+#define HNS_ROCE_V3_SCCC_SZ 64
+#define HNS_ROCE_V3_GMV_ENTRY_SZ 32
+
+#define HNS_ROCE_V2_EXT_LLM_ENTRY_SZ 8
+#define HNS_ROCE_V2_EXT_LLM_MAX_DEPTH 4096
+
+#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
+#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
+#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+#define HNS_ROCE_INVALID_LKEY 0x0
+#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
+#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
+#define HNS_ROCE_V2_RSV_QPS 8
+
+#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
+#define HNS_ROCE_V2_HW_RST_UNINT_DELAY 100
+
+#define HNS_ROCE_V2_HW_RST_COMPLETION_WAIT 20
+
+#define HNS_ROCE_CONTEXT_HOP_NUM 1
+#define HNS_ROCE_SCCC_HOP_NUM 1
+#define HNS_ROCE_MTT_HOP_NUM 1
+#define HNS_ROCE_CQE_HOP_NUM 1
+#define HNS_ROCE_SRQWQE_HOP_NUM 1
+#define HNS_ROCE_PBL_HOP_NUM 2
+#define HNS_ROCE_IDX_HOP_NUM 1
+#define HNS_ROCE_SQWQE_HOP_NUM 2
+#define HNS_ROCE_EXT_SGE_HOP_NUM 1
+#define HNS_ROCE_RQWQE_HOP_NUM 2
+
+#define HNS_ROCE_V2_EQE_HOP_NUM 2
+#define HNS_ROCE_V3_EQE_HOP_NUM 1
+
+#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
+#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
+#define HNS_ROCE_V2_GID_INDEX_NUM 16
+
+#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
+
+/* budget must be smaller than aeqe_depth to guarantee that we update
+ * the ci before we polled all the entries in the EQ.
+ */
+#define HNS_AEQ_POLLING_BUDGET 64
+
+enum {
+ HNS_ROCE_CMD_FLAG_IN = BIT(0),
+ HNS_ROCE_CMD_FLAG_OUT = BIT(1),
+ HNS_ROCE_CMD_FLAG_NEXT = BIT(2),
+ HNS_ROCE_CMD_FLAG_WR = BIT(3),
+ HNS_ROCE_CMD_FLAG_ERR_INTR = BIT(5),
+};
+
+#define HNS_ROCE_CMQ_DESC_NUM_S 3
+
+#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
+
+#define HNS_ROCE_CONG_SIZE 64
+
+#define check_whether_last_step(hop_num, step_idx) \
+ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (step_idx == 1 && hop_num == 1) || \
+ (step_idx == 2 && hop_num == 2))
+#define HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT 0
+#define HNS_ICL_SWITCH_CMD_ROCEE_SEL BIT(HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT)
+
+#define CMD_CSQ_DESC_NUM 1024
+#define CMD_CRQ_DESC_NUM 1024
+
+/* Free mr used parameters */
+#define HNS_ROCE_FREE_MR_USED_CQE_NUM 128
+#define HNS_ROCE_FREE_MR_USED_QP_NUM 0x8
+#define HNS_ROCE_FREE_MR_USED_PSN 0x0808
+#define HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT 0x7
+#define HNS_ROCE_FREE_MR_USED_QP_TIMEOUT 0x12
+#define HNS_ROCE_FREE_MR_USED_SQWQE_NUM 128
+#define HNS_ROCE_FREE_MR_USED_SQSGE_NUM 0x2
+#define HNS_ROCE_FREE_MR_USED_RQWQE_NUM 128
+#define HNS_ROCE_FREE_MR_USED_RQSGE_NUM 0x2
+#define HNS_ROCE_V2_FREE_MR_TIMEOUT 4500
+
+enum {
+ NO_ARMED = 0x0,
+ REG_NXT_CEQE = 0x2,
+ REG_NXT_SE_CEQE = 0x3
+};
+
+enum {
+ CQE_SIZE_32B = 0x0,
+ CQE_SIZE_64B = 0x1
+};
+
+#define V2_CQ_DB_REQ_NOT_SOL 0
+#define V2_CQ_DB_REQ_NOT 1
+
+#define V2_CQ_STATE_VALID 1
+#define V2_QKEY_VAL 0x80010000
+
+#define GID_LEN_V2 16
+
+enum {
+ HNS_ROCE_V2_WQE_OP_SEND = 0x0,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_INV = 0x1,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE = 0x3,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM = 0x4,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ = 0x5,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP = 0x6,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD = 0x7,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9,
+ HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa,
+ HNS_ROCE_V2_WQE_OP_BIND_MW = 0xc,
+ HNS_ROCE_V2_WQE_OP_MASK = 0x1f,
+};
+
+enum {
+ /* rq operations */
+ HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM = 0x0,
+ HNS_ROCE_V2_OPCODE_SEND = 0x1,
+ HNS_ROCE_V2_OPCODE_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_V2_OPCODE_SEND_WITH_INV = 0x3,
+};
+
+enum {
+ HNS_ROCE_V2_SQ_DB,
+ HNS_ROCE_V2_RQ_DB,
+ HNS_ROCE_V2_SRQ_DB,
+ HNS_ROCE_V2_CQ_DB,
+ HNS_ROCE_V2_CQ_DB_NOTIFY
+};
+
+enum {
+ HNS_ROCE_CQE_V2_SUCCESS = 0x00,
+ HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR = 0x01,
+ HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR = 0x02,
+ HNS_ROCE_CQE_V2_LOCAL_PROT_ERR = 0x04,
+ HNS_ROCE_CQE_V2_WR_FLUSH_ERR = 0x05,
+ HNS_ROCE_CQE_V2_MW_BIND_ERR = 0x06,
+ HNS_ROCE_CQE_V2_BAD_RESP_ERR = 0x10,
+ HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR = 0x11,
+ HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR = 0x12,
+ HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR = 0x13,
+ HNS_ROCE_CQE_V2_REMOTE_OP_ERR = 0x14,
+ HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR = 0x15,
+ HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR = 0x16,
+ HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR = 0x22,
+ HNS_ROCE_CQE_V2_GENERAL_ERR = 0x23,
+
+ HNS_ROCE_V2_CQE_STATUS_MASK = 0xff,
+};
+
+/* CMQ command */
+enum hns_roce_opcode_type {
+ HNS_QUERY_FW_VER = 0x0001,
+ HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
+ HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
+ HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
+ HNS_ROCE_OPC_QUERY_COUNTER = 0x8206,
+ HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
+ HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
+ HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
+ HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407,
+ HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
+ HNS_ROCE_OPC_CFG_ENTRY_SIZE = 0x8409,
+ HNS_ROCE_OPC_QUERY_VF_CAPS_NUM = 0x8410,
+ HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
+ HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
+ HNS_ROCE_OPC_POST_MB = 0x8504,
+ HNS_ROCE_OPC_QUERY_MB_ST = 0x8505,
+ HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
+ HNS_ROCE_OPC_FUNC_CLEAR = 0x8508,
+ HNS_ROCE_OPC_CLR_SCCC = 0x8509,
+ HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
+ HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO = 0x850d,
+ HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
+ HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
+ HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
+ HNS_ROCE_QUERY_RAM_ECC = 0x8513,
+ HNS_SWITCH_PARAMETER_CFG = 0x1033,
+};
+
+#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
+#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
+#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
+struct hns_roce_cmdq_tx_timeout_map {
+ u16 opcode;
+ u32 tx_timeout;
+};
+
+enum {
+ TYPE_CRQ,
+ TYPE_CSQ,
+};
+
+enum hns_roce_cmd_return_status {
+ CMD_EXEC_SUCCESS,
+ CMD_NO_AUTH,
+ CMD_NOT_EXIST,
+ CMD_CRQ_FULL,
+ CMD_NEXT_ERR,
+ CMD_NOT_EXEC,
+ CMD_PARA_ERR,
+ CMD_RESULT_ERR,
+ CMD_TIMEOUT,
+ CMD_HILINK_ERR,
+ CMD_INFO_ILLEGAL,
+ CMD_INVALID,
+ CMD_ROH_CHECK_FAIL,
+ CMD_OTHER_ERR = 0xff
+};
+
+struct hns_roce_cmd_errcode {
+ enum hns_roce_cmd_return_status return_status;
+ int errno;
+};
+
+enum hns_roce_sgid_type {
+ GID_TYPE_FLAG_ROCE_V1 = 0,
+ GID_TYPE_FLAG_ROCE_V2_IPV4,
+ GID_TYPE_FLAG_ROCE_V2_IPV6,
+};
+
+struct hns_roce_v2_cq_context {
+ __le32 byte_4_pg_ceqn;
+ __le32 byte_8_cqn;
+ __le32 cqe_cur_blk_addr;
+ __le32 byte_16_hop_addr;
+ __le32 cqe_nxt_blk_addr;
+ __le32 byte_24_pgsz_addr;
+ __le32 byte_28_cq_pi;
+ __le32 byte_32_cq_ci;
+ __le32 cqe_ba;
+ __le32 byte_40_cqe_ba;
+ __le32 byte_44_db_record;
+ __le32 db_record_addr;
+ __le32 byte_52_cqe_cnt;
+ __le32 byte_56_cqe_period_maxcnt;
+ __le32 cqe_report_timer;
+ __le32 byte_64_se_cqe_idx;
+};
+
+#define CQC_CQE_BA_L_S 3
+#define CQC_CQE_BA_H_S (32 + CQC_CQE_BA_L_S)
+#define CQC_CQE_DB_RECORD_ADDR_H_S 32
+
+#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
+
+#define CQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cq_context, h, l)
+
+#define CQC_CQ_ST CQC_FIELD_LOC(1, 0)
+#define CQC_POLL CQC_FIELD_LOC(2, 2)
+#define CQC_SE CQC_FIELD_LOC(3, 3)
+#define CQC_OVER_IGNORE CQC_FIELD_LOC(4, 4)
+#define CQC_ARM_ST CQC_FIELD_LOC(7, 6)
+#define CQC_SHIFT CQC_FIELD_LOC(12, 8)
+#define CQC_CMD_SN CQC_FIELD_LOC(14, 13)
+#define CQC_CEQN CQC_FIELD_LOC(23, 15)
+#define CQC_CQN CQC_FIELD_LOC(55, 32)
+#define CQC_POE_EN CQC_FIELD_LOC(56, 56)
+#define CQC_POE_NUM CQC_FIELD_LOC(58, 57)
+#define CQC_CQE_SIZE CQC_FIELD_LOC(60, 59)
+#define CQC_CQ_CNT_MODE CQC_FIELD_LOC(61, 61)
+#define CQC_STASH CQC_FIELD_LOC(63, 63)
+#define CQC_CQE_CUR_BLK_ADDR_L CQC_FIELD_LOC(95, 64)
+#define CQC_CQE_CUR_BLK_ADDR_H CQC_FIELD_LOC(115, 96)
+#define CQC_POE_QID CQC_FIELD_LOC(125, 116)
+#define CQC_CQE_HOP_NUM CQC_FIELD_LOC(127, 126)
+#define CQC_CQE_NEX_BLK_ADDR_L CQC_FIELD_LOC(159, 128)
+#define CQC_CQE_NEX_BLK_ADDR_H CQC_FIELD_LOC(179, 160)
+#define CQC_CQE_BAR_PG_SZ CQC_FIELD_LOC(187, 184)
+#define CQC_CQE_BUF_PG_SZ CQC_FIELD_LOC(191, 188)
+#define CQC_CQ_PRODUCER_IDX CQC_FIELD_LOC(215, 192)
+#define CQC_CQ_CONSUMER_IDX CQC_FIELD_LOC(247, 224)
+#define CQC_CQE_BA_L CQC_FIELD_LOC(287, 256)
+#define CQC_CQE_BA_H CQC_FIELD_LOC(316, 288)
+#define CQC_POE_QID_H_0 CQC_FIELD_LOC(319, 317)
+#define CQC_DB_RECORD_EN CQC_FIELD_LOC(320, 320)
+#define CQC_CQE_DB_RECORD_ADDR_L CQC_FIELD_LOC(351, 321)
+#define CQC_CQE_DB_RECORD_ADDR_H CQC_FIELD_LOC(383, 352)
+#define CQC_CQE_CNT CQC_FIELD_LOC(407, 384)
+#define CQC_CQ_MAX_CNT CQC_FIELD_LOC(431, 416)
+#define CQC_CQ_PERIOD CQC_FIELD_LOC(447, 432)
+#define CQC_CQE_REPORT_TIMER CQC_FIELD_LOC(471, 448)
+#define CQC_WR_CQE_IDX CQC_FIELD_LOC(479, 472)
+#define CQC_SE_CQE_IDX CQC_FIELD_LOC(503, 480)
+#define CQC_POE_QID_H_1 CQC_FIELD_LOC(511, 511)
+
+struct hns_roce_srq_context {
+ __le32 data[16];
+};
+
+#define SRQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_srq_context, h, l)
+
+#define SRQC_SRQ_ST SRQC_FIELD_LOC(1, 0)
+#define SRQC_WQE_HOP_NUM SRQC_FIELD_LOC(3, 2)
+#define SRQC_SHIFT SRQC_FIELD_LOC(7, 4)
+#define SRQC_SRQN SRQC_FIELD_LOC(31, 8)
+#define SRQC_LIMIT_WL SRQC_FIELD_LOC(47, 32)
+#define SRQC_RSV0 SRQC_FIELD_LOC(63, 48)
+#define SRQC_XRCD SRQC_FIELD_LOC(87, 64)
+#define SRQC_RSV1 SRQC_FIELD_LOC(95, 88)
+#define SRQC_PRODUCER_IDX SRQC_FIELD_LOC(111, 96)
+#define SRQC_CONSUMER_IDX SRQC_FIELD_LOC(127, 112)
+#define SRQC_WQE_BT_BA_L SRQC_FIELD_LOC(159, 128)
+#define SRQC_WQE_BT_BA_H SRQC_FIELD_LOC(188, 160)
+#define SRQC_RSV2 SRQC_FIELD_LOC(190, 189)
+#define SRQC_SRQ_TYPE SRQC_FIELD_LOC(191, 191)
+#define SRQC_PD SRQC_FIELD_LOC(215, 192)
+#define SRQC_RQWS SRQC_FIELD_LOC(219, 216)
+#define SRQC_RSV3 SRQC_FIELD_LOC(223, 220)
+#define SRQC_IDX_BT_BA_L SRQC_FIELD_LOC(255, 224)
+#define SRQC_IDX_BT_BA_H SRQC_FIELD_LOC(284, 256)
+#define SRQC_RSV4 SRQC_FIELD_LOC(287, 285)
+#define SRQC_IDX_CUR_BLK_ADDR_L SRQC_FIELD_LOC(319, 288)
+#define SRQC_IDX_CUR_BLK_ADDR_H SRQC_FIELD_LOC(339, 320)
+#define SRQC_RSV5 SRQC_FIELD_LOC(341, 340)
+#define SRQC_IDX_HOP_NUM SRQC_FIELD_LOC(343, 342)
+#define SRQC_IDX_BA_PG_SZ SRQC_FIELD_LOC(347, 344)
+#define SRQC_IDX_BUF_PG_SZ SRQC_FIELD_LOC(351, 348)
+#define SRQC_IDX_NXT_BLK_ADDR_L SRQC_FIELD_LOC(383, 352)
+#define SRQC_IDX_NXT_BLK_ADDR_H SRQC_FIELD_LOC(403, 384)
+#define SRQC_RSV6 SRQC_FIELD_LOC(415, 404)
+#define SRQC_XRC_CQN SRQC_FIELD_LOC(439, 416)
+#define SRQC_WQE_BA_PG_SZ SRQC_FIELD_LOC(443, 440)
+#define SRQC_WQE_BUF_PG_SZ SRQC_FIELD_LOC(447, 444)
+#define SRQC_DB_RECORD_EN SRQC_FIELD_LOC(448, 448)
+#define SRQC_DB_RECORD_ADDR_L SRQC_FIELD_LOC(479, 449)
+#define SRQC_DB_RECORD_ADDR_H SRQC_FIELD_LOC(511, 480)
+
+enum {
+ V2_MPT_ST_VALID = 0x1,
+ V2_MPT_ST_FREE = 0x2,
+};
+
+enum hns_roce_v2_qp_state {
+ HNS_ROCE_QP_ST_RST,
+ HNS_ROCE_QP_ST_INIT,
+ HNS_ROCE_QP_ST_RTR,
+ HNS_ROCE_QP_ST_RTS,
+ HNS_ROCE_QP_ST_SQD,
+ HNS_ROCE_QP_ST_SQER,
+ HNS_ROCE_QP_ST_ERR,
+ HNS_ROCE_QP_ST_SQ_DRAINING,
+ HNS_ROCE_QP_NUM_ST
+};
+
+struct hns_roce_v2_qp_context_ex {
+ __le32 data[64];
+};
+
+struct hns_roce_v2_qp_context {
+ __le32 byte_4_sqpn_tst;
+ __le32 wqe_sge_ba;
+ __le32 byte_12_sq_hop;
+ __le32 byte_16_buf_ba_pg_sz;
+ __le32 byte_20_smac_sgid_idx;
+ __le32 byte_24_mtu_tc;
+ __le32 byte_28_at_fl;
+ u8 dgid[GID_LEN_V2];
+ __le32 dmac;
+ __le32 byte_52_udpspn_dmac;
+ __le32 byte_56_dqpn_err;
+ __le32 byte_60_qpst_tempid;
+ __le32 qkey_xrcd;
+ __le32 byte_68_rq_db;
+ __le32 rq_db_record_addr;
+ __le32 byte_76_srqn_op_en;
+ __le32 byte_80_rnr_rx_cqn;
+ __le32 byte_84_rq_ci_pi;
+ __le32 rq_cur_blk_addr;
+ __le32 byte_92_srq_info;
+ __le32 byte_96_rx_reqmsn;
+ __le32 rq_nxt_blk_addr;
+ __le32 byte_104_rq_sge;
+ __le32 byte_108_rx_reqepsn;
+ __le32 rq_rnr_timer;
+ __le32 rx_msg_len;
+ __le32 rx_rkey_pkt_info;
+ __le64 rx_va;
+ __le32 byte_132_trrl;
+ __le32 trrl_ba;
+ __le32 byte_140_raq;
+ __le32 byte_144_raq;
+ __le32 byte_148_raq;
+ __le32 byte_152_raq;
+ __le32 byte_156_raq;
+ __le32 byte_160_sq_ci_pi;
+ __le32 sq_cur_blk_addr;
+ __le32 byte_168_irrl_idx;
+ __le32 byte_172_sq_psn;
+ __le32 byte_176_msg_pktn;
+ __le32 sq_cur_sge_blk_addr;
+ __le32 byte_184_irrl_idx;
+ __le32 cur_sge_offset;
+ __le32 byte_192_ext_sge;
+ __le32 byte_196_sq_psn;
+ __le32 byte_200_sq_max;
+ __le32 irrl_ba;
+ __le32 byte_208_irrl;
+ __le32 byte_212_lsn;
+ __le32 sq_timer;
+ __le32 byte_220_retry_psn_msn;
+ __le32 byte_224_retry_msg;
+ __le32 rx_sq_cur_blk_addr;
+ __le32 byte_232_irrl_sge;
+ __le32 irrl_cur_sge_offset;
+ __le32 byte_240_irrl_tail;
+ __le32 byte_244_rnr_rxack;
+ __le32 byte_248_ack_psn;
+ __le32 byte_252_err_txcqn;
+ __le32 byte_256_sqflush_rqcqe;
+
+ struct hns_roce_v2_qp_context_ex ext;
+};
+
+#define QPC_TRRL_BA_L_S 4
+#define QPC_TRRL_BA_M_S (16 + QPC_TRRL_BA_L_S)
+#define QPC_TRRL_BA_H_S (32 + QPC_TRRL_BA_M_S)
+#define QPC_IRRL_BA_L_S 6
+#define QPC_IRRL_BA_H_S (32 + QPC_IRRL_BA_L_S)
+
+#define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l)
+
+#define QPC_TST QPC_FIELD_LOC(2, 0)
+#define QPC_SGE_SHIFT QPC_FIELD_LOC(7, 3)
+#define QPC_CNP_TIMER QPC_FIELD_LOC(31, 8)
+#define QPC_WQE_SGE_BA_L QPC_FIELD_LOC(63, 32)
+#define QPC_WQE_SGE_BA_H QPC_FIELD_LOC(92, 64)
+#define QPC_SQ_HOP_NUM QPC_FIELD_LOC(94, 93)
+#define QPC_CIRE_EN QPC_FIELD_LOC(95, 95)
+#define QPC_WQE_SGE_BA_PG_SZ QPC_FIELD_LOC(99, 96)
+#define QPC_WQE_SGE_BUF_PG_SZ QPC_FIELD_LOC(103, 100)
+#define QPC_PD QPC_FIELD_LOC(127, 104)
+#define QPC_RQ_HOP_NUM QPC_FIELD_LOC(129, 128)
+#define QPC_SGE_HOP_NUM QPC_FIELD_LOC(131, 130)
+#define QPC_RQWS QPC_FIELD_LOC(135, 132)
+#define QPC_SQ_SHIFT QPC_FIELD_LOC(139, 136)
+#define QPC_RQ_SHIFT QPC_FIELD_LOC(143, 140)
+#define QPC_GMV_IDX QPC_FIELD_LOC(159, 144)
+#define QPC_HOPLIMIT QPC_FIELD_LOC(167, 160)
+#define QPC_TC QPC_FIELD_LOC(175, 168)
+#define QPC_VLAN_ID QPC_FIELD_LOC(187, 176)
+#define QPC_MTU QPC_FIELD_LOC(191, 188)
+#define QPC_FL QPC_FIELD_LOC(211, 192)
+#define QPC_SL QPC_FIELD_LOC(215, 212)
+#define QPC_CNP_TX_FLAG QPC_FIELD_LOC(216, 216)
+#define QPC_CE_FLAG QPC_FIELD_LOC(217, 217)
+#define QPC_LBI QPC_FIELD_LOC(218, 218)
+#define QPC_AT QPC_FIELD_LOC(223, 219)
+#define QPC_DGID QPC_FIELD_LOC(351, 224)
+#define QPC_DMAC_L QPC_FIELD_LOC(383, 352)
+#define QPC_DMAC_H QPC_FIELD_LOC(399, 384)
+#define QPC_UDPSPN QPC_FIELD_LOC(415, 400)
+#define QPC_DQPN QPC_FIELD_LOC(439, 416)
+#define QPC_SQ_TX_ERR QPC_FIELD_LOC(440, 440)
+#define QPC_SQ_RX_ERR QPC_FIELD_LOC(441, 441)
+#define QPC_RQ_TX_ERR QPC_FIELD_LOC(442, 442)
+#define QPC_RQ_RX_ERR QPC_FIELD_LOC(443, 443)
+#define QPC_LP_PKTN_INI QPC_FIELD_LOC(447, 444)
+#define QPC_CONG_ALGO_TMPL_ID QPC_FIELD_LOC(455, 448)
+#define QPC_SCC_TOKEN QPC_FIELD_LOC(474, 456)
+#define QPC_SQ_DB_DOING QPC_FIELD_LOC(475, 475)
+#define QPC_RQ_DB_DOING QPC_FIELD_LOC(476, 476)
+#define QPC_QP_ST QPC_FIELD_LOC(479, 477)
+#define QPC_QKEY_XRCD QPC_FIELD_LOC(511, 480)
+#define QPC_RQ_RECORD_EN QPC_FIELD_LOC(512, 512)
+#define QPC_RQ_DB_RECORD_ADDR_L QPC_FIELD_LOC(543, 513)
+#define QPC_RQ_DB_RECORD_ADDR_H QPC_FIELD_LOC(575, 544)
+#define QPC_SRQN QPC_FIELD_LOC(599, 576)
+#define QPC_SRQ_EN QPC_FIELD_LOC(600, 600)
+#define QPC_RRE QPC_FIELD_LOC(601, 601)
+#define QPC_RWE QPC_FIELD_LOC(602, 602)
+#define QPC_ATE QPC_FIELD_LOC(603, 603)
+#define QPC_RQIE QPC_FIELD_LOC(604, 604)
+#define QPC_EXT_ATE QPC_FIELD_LOC(605, 605)
+#define QPC_RQ_VLAN_EN QPC_FIELD_LOC(606, 606)
+#define QPC_RQ_RTY_TX_ERR QPC_FIELD_LOC(607, 607)
+#define QPC_RX_CQN QPC_FIELD_LOC(631, 608)
+#define QPC_XRC_QP_TYPE QPC_FIELD_LOC(632, 632)
+#define QPC_CQEIE QPC_FIELD_LOC(633, 633)
+#define QPC_CQEIS QPC_FIELD_LOC(634, 634)
+#define QPC_MIN_RNR_TIME QPC_FIELD_LOC(639, 635)
+#define QPC_RQ_PRODUCER_IDX QPC_FIELD_LOC(655, 640)
+#define QPC_RQ_CONSUMER_IDX QPC_FIELD_LOC(671, 656)
+#define QPC_RQ_CUR_BLK_ADDR_L QPC_FIELD_LOC(703, 672)
+#define QPC_RQ_CUR_BLK_ADDR_H QPC_FIELD_LOC(723, 704)
+#define QPC_SRQ_INFO QPC_FIELD_LOC(735, 724)
+#define QPC_RX_REQ_MSN QPC_FIELD_LOC(759, 736)
+#define QPC_REDUCE_CODE QPC_FIELD_LOC(766, 760)
+#define QPC_RX_XRC_PKT_CQE_FLG QPC_FIELD_LOC(767, 767)
+#define QPC_RQ_NXT_BLK_ADDR_L QPC_FIELD_LOC(799, 768)
+#define QPC_RQ_NXT_BLK_ADDR_H QPC_FIELD_LOC(819, 800)
+#define QPC_REDUCE_EN QPC_FIELD_LOC(820, 820)
+#define QPC_FLUSH_EN QPC_FIELD_LOC(821, 821)
+#define QPC_AW_EN QPC_FIELD_LOC(822, 822)
+#define QPC_WN_EN QPC_FIELD_LOC(823, 823)
+#define QPC_RQ_CUR_WQE_SGE_NUM QPC_FIELD_LOC(831, 824)
+#define QPC_INV_CREDIT QPC_FIELD_LOC(832, 832)
+#define QPC_LAST_WRITE_TYPE QPC_FIELD_LOC(834, 833)
+#define QPC_RX_REQ_PSN_ERR QPC_FIELD_LOC(835, 835)
+#define QPC_RX_REQ_LAST_OPTYPE QPC_FIELD_LOC(838, 836)
+#define QPC_RX_REQ_RNR QPC_FIELD_LOC(839, 839)
+#define QPC_RX_REQ_EPSN QPC_FIELD_LOC(863, 840)
+#define QPC_RQ_RNR_TIMER QPC_FIELD_LOC(895, 864)
+#define QPC_RX_MSG_LEN QPC_FIELD_LOC(927, 896)
+#define QPC_RX_RKEY_PKT_INFO QPC_FIELD_LOC(959, 928)
+#define QPC_RX_VA QPC_FIELD_LOC(1023, 960)
+#define QPC_TRRL_HEAD_MAX QPC_FIELD_LOC(1031, 1024)
+#define QPC_TRRL_TAIL_MAX QPC_FIELD_LOC(1039, 1032)
+#define QPC_TRRL_BA_L QPC_FIELD_LOC(1055, 1040)
+#define QPC_TRRL_BA_M QPC_FIELD_LOC(1087, 1056)
+#define QPC_TRRL_BA_H QPC_FIELD_LOC(1099, 1088)
+#define QPC_RR_MAX QPC_FIELD_LOC(1102, 1100)
+#define QPC_RQ_RTY_WAIT_DO QPC_FIELD_LOC(1103, 1103)
+#define QPC_RAQ_TRRL_HEAD QPC_FIELD_LOC(1111, 1104)
+#define QPC_RAQ_TRRL_TAIL QPC_FIELD_LOC(1119, 1112)
+#define QPC_RAQ_RTY_INI_PSN QPC_FIELD_LOC(1143, 1120)
+#define QPC_CIRE_SLV_RQ_EN QPC_FIELD_LOC(1144, 1144)
+#define QPC_RAQ_CREDIT QPC_FIELD_LOC(1149, 1145)
+#define QPC_RQ_DB_IN_EXT QPC_FIELD_LOC(1150, 1150)
+#define QPC_RESP_RTY_FLG QPC_FIELD_LOC(1151, 1151)
+#define QPC_RAQ_MSN QPC_FIELD_LOC(1175, 1152)
+#define QPC_RAQ_SYNDROME QPC_FIELD_LOC(1183, 1176)
+#define QPC_RAQ_PSN QPC_FIELD_LOC(1207, 1184)
+#define QPC_RAQ_TRRL_RTY_HEAD QPC_FIELD_LOC(1215, 1208)
+#define QPC_RAQ_USE_PKTN QPC_FIELD_LOC(1239, 1216)
+#define QPC_RQ_SCC_TOKEN QPC_FIELD_LOC(1245, 1240)
+#define QPC_RVD10 QPC_FIELD_LOC(1247, 1246)
+#define QPC_SQ_PRODUCER_IDX QPC_FIELD_LOC(1263, 1248)
+#define QPC_SQ_CONSUMER_IDX QPC_FIELD_LOC(1279, 1264)
+#define QPC_SQ_CUR_BLK_ADDR_L QPC_FIELD_LOC(1311, 1280)
+#define QPC_SQ_CUR_BLK_ADDR_H QPC_FIELD_LOC(1331, 1312)
+#define QPC_MSG_RTY_LP_FLG QPC_FIELD_LOC(1332, 1332)
+#define QPC_SQ_INVLD_FLG QPC_FIELD_LOC(1333, 1333)
+#define QPC_LP_SGEN_INI QPC_FIELD_LOC(1335, 1334)
+#define QPC_SQ_VLAN_EN QPC_FIELD_LOC(1336, 1336)
+#define QPC_POLL_DB_WAIT_DO QPC_FIELD_LOC(1337, 1337)
+#define QPC_SCC_TOKEN_FORBID_SQ_DEQ QPC_FIELD_LOC(1338, 1338)
+#define QPC_WAIT_ACK_TIMEOUT QPC_FIELD_LOC(1339, 1339)
+#define QPC_IRRL_IDX_LSB QPC_FIELD_LOC(1343, 1340)
+#define QPC_ACK_REQ_FREQ QPC_FIELD_LOC(1349, 1344)
+#define QPC_MSG_RNR_FLG QPC_FIELD_LOC(1350, 1350)
+#define QPC_FRE QPC_FIELD_LOC(1351, 1351)
+#define QPC_SQ_CUR_PSN QPC_FIELD_LOC(1375, 1352)
+#define QPC_MSG_USE_PKTN QPC_FIELD_LOC(1399, 1376)
+#define QPC_IRRL_HEAD_PRE QPC_FIELD_LOC(1407, 1400)
+#define QPC_SQ_CUR_SGE_BLK_ADDR_L QPC_FIELD_LOC(1439, 1408)
+#define QPC_SQ_CUR_SGE_BLK_ADDR_H QPC_FIELD_LOC(1459, 1440)
+#define QPC_IRRL_IDX_MSB QPC_FIELD_LOC(1471, 1460)
+#define QPC_CUR_SGE_OFFSET QPC_FIELD_LOC(1503, 1472)
+#define QPC_CUR_SGE_IDX QPC_FIELD_LOC(1527, 1504)
+#define QPC_EXT_SGE_NUM_LEFT QPC_FIELD_LOC(1535, 1528)
+#define QPC_OWNER_MODE QPC_FIELD_LOC(1536, 1536)
+#define QPC_CIRE_SLV_SQ_EN QPC_FIELD_LOC(1537, 1537)
+#define QPC_CIRE_DOING QPC_FIELD_LOC(1538, 1538)
+#define QPC_CIRE_RESULT QPC_FIELD_LOC(1539, 1539)
+#define QPC_OWNER_DB_WAIT_DO QPC_FIELD_LOC(1540, 1540)
+#define QPC_SQ_WQE_INVLD QPC_FIELD_LOC(1541, 1541)
+#define QPC_DCA_MODE QPC_FIELD_LOC(1542, 1542)
+#define QPC_RTY_OWNER_NOCHK QPC_FIELD_LOC(1543, 1543)
+#define QPC_V2_IRRL_HEAD QPC_FIELD_LOC(1543, 1536)
+#define QPC_SQ_MAX_PSN QPC_FIELD_LOC(1567, 1544)
+#define QPC_SQ_MAX_IDX QPC_FIELD_LOC(1583, 1568)
+#define QPC_LCL_OPERATED_CNT QPC_FIELD_LOC(1599, 1584)
+#define QPC_IRRL_BA_L QPC_FIELD_LOC(1631, 1600)
+#define QPC_IRRL_BA_H QPC_FIELD_LOC(1657, 1632)
+#define QPC_PKT_RNR_FLG QPC_FIELD_LOC(1658, 1658)
+#define QPC_PKT_RTY_FLG QPC_FIELD_LOC(1659, 1659)
+#define QPC_RMT_E2E QPC_FIELD_LOC(1660, 1660)
+#define QPC_SR_MAX QPC_FIELD_LOC(1663, 1661)
+#define QPC_LSN QPC_FIELD_LOC(1687, 1664)
+#define QPC_RETRY_NUM_INIT QPC_FIELD_LOC(1690, 1688)
+#define QPC_CHECK_FLG QPC_FIELD_LOC(1692, 1691)
+#define QPC_RETRY_CNT QPC_FIELD_LOC(1695, 1693)
+#define QPC_SQ_TIMER QPC_FIELD_LOC(1727, 1696)
+#define QPC_RETRY_MSG_MSN QPC_FIELD_LOC(1743, 1728)
+#define QPC_RETRY_MSG_PSN_L QPC_FIELD_LOC(1759, 1744)
+#define QPC_RETRY_MSG_PSN_H QPC_FIELD_LOC(1767, 1760)
+#define QPC_RETRY_MSG_FPKT_PSN QPC_FIELD_LOC(1791, 1768)
+#define QPC_RX_SQ_CUR_BLK_ADDR_L QPC_FIELD_LOC(1823, 1792)
+#define QPC_RX_SQ_CUR_BLK_ADDR_H QPC_FIELD_LOC(1843, 1824)
+#define QPC_IRRL_SGE_IDX QPC_FIELD_LOC(1851, 1844)
+#define QPC_LSAN_EN QPC_FIELD_LOC(1852, 1852)
+#define QPC_SO_LP_VLD QPC_FIELD_LOC(1853, 1853)
+#define QPC_FENCE_LP_VLD QPC_FIELD_LOC(1854, 1854)
+#define QPC_IRRL_LP_VLD QPC_FIELD_LOC(1855, 1855)
+#define QPC_IRRL_CUR_SGE_OFFSET QPC_FIELD_LOC(1887, 1856)
+#define QPC_IRRL_TAIL_REAL QPC_FIELD_LOC(1895, 1888)
+#define QPC_IRRL_TAIL_RD QPC_FIELD_LOC(1903, 1896)
+#define QPC_RX_ACK_MSN QPC_FIELD_LOC(1919, 1904)
+#define QPC_RX_ACK_EPSN QPC_FIELD_LOC(1943, 1920)
+#define QPC_RNR_NUM_INIT QPC_FIELD_LOC(1946, 1944)
+#define QPC_RNR_CNT QPC_FIELD_LOC(1949, 1947)
+#define QPC_LCL_OP_FLG QPC_FIELD_LOC(1950, 1950)
+#define QPC_IRRL_RD_FLG QPC_FIELD_LOC(1951, 1951)
+#define QPC_IRRL_PSN QPC_FIELD_LOC(1975, 1952)
+#define QPC_ACK_PSN_ERR QPC_FIELD_LOC(1976, 1976)
+#define QPC_ACK_LAST_OPTYPE QPC_FIELD_LOC(1978, 1977)
+#define QPC_IRRL_PSN_VLD QPC_FIELD_LOC(1979, 1979)
+#define QPC_RNR_RETRY_FLAG QPC_FIELD_LOC(1980, 1980)
+#define QPC_SQ_RTY_TX_ERR QPC_FIELD_LOC(1981, 1981)
+#define QPC_LAST_IND QPC_FIELD_LOC(1982, 1982)
+#define QPC_CQ_ERR_IND QPC_FIELD_LOC(1983, 1983)
+#define QPC_TX_CQN QPC_FIELD_LOC(2007, 1984)
+#define QPC_SIG_TYPE QPC_FIELD_LOC(2008, 2008)
+#define QPC_ERR_TYPE QPC_FIELD_LOC(2015, 2009)
+#define QPC_RQ_CQE_IDX QPC_FIELD_LOC(2031, 2016)
+#define QPC_SQ_FLUSH_IDX QPC_FIELD_LOC(2047, 2032)
+
+#define RETRY_MSG_PSN_SHIFT 16
+
+#define QPCEX_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context_ex, h, l)
+
+#define QPCEX_CONG_ALG_SEL QPCEX_FIELD_LOC(0, 0)
+#define QPCEX_CONG_ALG_SUB_SEL QPCEX_FIELD_LOC(1, 1)
+#define QPCEX_DIP_CTX_IDX_VLD QPCEX_FIELD_LOC(2, 2)
+#define QPCEX_DIP_CTX_IDX QPCEX_FIELD_LOC(22, 3)
+#define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23)
+#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
+
+#define SCC_CONTEXT_SIZE 16
+
+struct hns_roce_v2_scc_context {
+ __le32 data[SCC_CONTEXT_SIZE];
+};
+
+#define V2_QP_RWE_S 1 /* rdma write enable */
+#define V2_QP_RRE_S 2 /* rdma read enable */
+#define V2_QP_ATE_S 3 /* rdma atomic enable */
+
+struct hns_roce_v2_cqe {
+ __le32 byte_4;
+ union {
+ __le32 rkey;
+ __le32 immtdata;
+ };
+ __le32 byte_12;
+ __le32 byte_16;
+ __le32 byte_cnt;
+ u8 smac[4];
+ __le32 byte_28;
+ __le32 byte_32;
+ __le32 rsv[8];
+};
+
+#define CQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cqe, h, l)
+
+#define CQE_OPCODE CQE_FIELD_LOC(4, 0)
+#define CQE_RQ_INLINE CQE_FIELD_LOC(5, 5)
+#define CQE_S_R CQE_FIELD_LOC(6, 6)
+#define CQE_OWNER CQE_FIELD_LOC(7, 7)
+#define CQE_STATUS CQE_FIELD_LOC(15, 8)
+#define CQE_WQE_IDX CQE_FIELD_LOC(31, 16)
+#define CQE_RKEY_IMMTDATA CQE_FIELD_LOC(63, 32)
+#define CQE_XRC_SRQN CQE_FIELD_LOC(87, 64)
+#define CQE_RSV0 CQE_FIELD_LOC(95, 88)
+#define CQE_LCL_QPN CQE_FIELD_LOC(119, 96)
+#define CQE_SUB_STATUS CQE_FIELD_LOC(127, 120)
+#define CQE_BYTE_CNT CQE_FIELD_LOC(159, 128)
+#define CQE_SMAC CQE_FIELD_LOC(207, 160)
+#define CQE_PORT_TYPE CQE_FIELD_LOC(209, 208)
+#define CQE_VID CQE_FIELD_LOC(221, 210)
+#define CQE_VID_VLD CQE_FIELD_LOC(222, 222)
+#define CQE_RSV2 CQE_FIELD_LOC(223, 223)
+#define CQE_RMT_QPN CQE_FIELD_LOC(247, 224)
+#define CQE_SL CQE_FIELD_LOC(250, 248)
+#define CQE_PORTN CQE_FIELD_LOC(253, 251)
+#define CQE_GRH CQE_FIELD_LOC(254, 254)
+#define CQE_LPK CQE_FIELD_LOC(255, 255)
+#define CQE_RSV3 CQE_FIELD_LOC(511, 256)
+
+struct hns_roce_v2_mpt_entry {
+ __le32 byte_4_pd_hop_st;
+ __le32 byte_8_mw_cnt_en;
+ __le32 byte_12_mw_pa;
+ __le32 bound_lkey;
+ __le32 len_l;
+ __le32 len_h;
+ __le32 lkey;
+ __le32 va_l;
+ __le32 va_h;
+ __le32 pbl_size;
+ __le32 pbl_ba_l;
+ __le32 byte_48_mode_ba;
+ __le32 pa0_l;
+ __le32 byte_56_pa0_h;
+ __le32 pa1_l;
+ __le32 byte_64_buf_pa1;
+};
+
+#define MPT_PBL_BUF_ADDR_S 6
+#define MPT_PBL_BA_ADDR_S 3
+
+#define MPT_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_mpt_entry, h, l)
+
+#define MPT_ST MPT_FIELD_LOC(1, 0)
+#define MPT_PBL_HOP_NUM MPT_FIELD_LOC(3, 2)
+#define MPT_PBL_BA_PG_SZ MPT_FIELD_LOC(7, 4)
+#define MPT_PD MPT_FIELD_LOC(31, 8)
+#define MPT_RA_EN MPT_FIELD_LOC(32, 32)
+#define MPT_R_INV_EN MPT_FIELD_LOC(33, 33)
+#define MPT_L_INV_EN MPT_FIELD_LOC(34, 34)
+#define MPT_BIND_EN MPT_FIELD_LOC(35, 35)
+#define MPT_ATOMIC_EN MPT_FIELD_LOC(36, 36)
+#define MPT_RR_EN MPT_FIELD_LOC(37, 37)
+#define MPT_RW_EN MPT_FIELD_LOC(38, 38)
+#define MPT_LW_EN MPT_FIELD_LOC(39, 39)
+#define MPT_MW_CNT MPT_FIELD_LOC(63, 40)
+#define MPT_FRE MPT_FIELD_LOC(64, 64)
+#define MPT_PA MPT_FIELD_LOC(65, 65)
+#define MPT_ZBVA MPT_FIELD_LOC(66, 66)
+#define MPT_SHARE MPT_FIELD_LOC(67, 67)
+#define MPT_MR_MW MPT_FIELD_LOC(68, 68)
+#define MPT_BPD MPT_FIELD_LOC(69, 69)
+#define MPT_BQP MPT_FIELD_LOC(70, 70)
+#define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71)
+#define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72)
+#define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96)
+#define MPT_LEN_L MPT_FIELD_LOC(159, 128)
+#define MPT_LEN_H MPT_FIELD_LOC(191, 160)
+#define MPT_LKEY MPT_FIELD_LOC(223, 192)
+#define MPT_VA MPT_FIELD_LOC(287, 224)
+#define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
+#define MPT_PBL_BA_L MPT_FIELD_LOC(351, 320)
+#define MPT_PBL_BA_H MPT_FIELD_LOC(380, 352)
+#define MPT_BLK_MODE MPT_FIELD_LOC(381, 381)
+#define MPT_RSV0 MPT_FIELD_LOC(383, 382)
+#define MPT_PA0_L MPT_FIELD_LOC(415, 384)
+#define MPT_PA0_H MPT_FIELD_LOC(441, 416)
+#define MPT_BOUND_VA MPT_FIELD_LOC(447, 442)
+#define MPT_PA1_L MPT_FIELD_LOC(479, 448)
+#define MPT_PA1_H MPT_FIELD_LOC(505, 480)
+#define MPT_PERSIST_EN MPT_FIELD_LOC(506, 506)
+#define MPT_RSV2 MPT_FIELD_LOC(507, 507)
+#define MPT_PBL_BUF_PG_SZ MPT_FIELD_LOC(511, 508)
+
+#define V2_MPT_BYTE_4_MPT_ST_S 0
+#define V2_MPT_BYTE_4_MPT_ST_M GENMASK(1, 0)
+
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_S 2
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_M GENMASK(3, 2)
+
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_S 4
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_M GENMASK(7, 4)
+
+#define V2_MPT_BYTE_4_PD_S 8
+#define V2_MPT_BYTE_4_PD_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_8_RA_EN_S 0
+
+#define V2_MPT_BYTE_8_R_INV_EN_S 1
+
+#define V2_MPT_BYTE_8_L_INV_EN_S 2
+
+#define V2_MPT_BYTE_8_BIND_EN_S 3
+
+#define V2_MPT_BYTE_8_ATOMIC_EN_S 4
+
+#define V2_MPT_BYTE_8_RR_EN_S 5
+
+#define V2_MPT_BYTE_8_RW_EN_S 6
+
+#define V2_MPT_BYTE_8_LW_EN_S 7
+
+#define V2_MPT_BYTE_12_FRE_S 0
+
+#define V2_MPT_BYTE_12_PA_S 1
+
+#define V2_MPT_BYTE_12_BPD_S 5
+
+#define V2_MPT_BYTE_12_BQP_S 6
+
+#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
+
+#define V2_MPT_BYTE_48_PBL_BA_H_S 0
+#define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0)
+
+#define V2_MPT_BYTE_48_BLK_MODE_S 29
+
+#define V2_MPT_BYTE_56_PA0_H_S 0
+#define V2_MPT_BYTE_56_PA0_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PA1_H_S 0
+#define V2_MPT_BYTE_64_PA1_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
+
+struct hns_roce_v2_db {
+ __le32 data[2];
+};
+
+#define DB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_db, h, l)
+
+#define DB_TAG DB_FIELD_LOC(23, 0)
+#define DB_CMD DB_FIELD_LOC(27, 24)
+#define DB_FLAG DB_FIELD_LOC(31, 31)
+#define DB_PI DB_FIELD_LOC(47, 32)
+#define DB_SL DB_FIELD_LOC(50, 48)
+#define DB_CQ_CI DB_FIELD_LOC(55, 32)
+#define DB_CQ_NOTIFY DB_FIELD_LOC(56, 56)
+#define DB_CQ_CMD_SN DB_FIELD_LOC(58, 57)
+#define EQ_DB_TAG DB_FIELD_LOC(7, 0)
+#define EQ_DB_CMD DB_FIELD_LOC(17, 16)
+#define EQ_DB_CI DB_FIELD_LOC(55, 32)
+
+#define V2_DB_PRODUCER_IDX_S 0
+#define V2_DB_PRODUCER_IDX_M GENMASK(15, 0)
+
+#define V2_CQ_DB_CONS_IDX_S 0
+#define V2_CQ_DB_CONS_IDX_M GENMASK(23, 0)
+
+struct hns_roce_v2_ud_send_wqe {
+ __le32 byte_4;
+ __le32 msg_len;
+ __le32 immtdata;
+ __le32 byte_16;
+ __le32 byte_20;
+ __le32 byte_24;
+ __le32 qkey;
+ __le32 byte_32;
+ __le32 byte_36;
+ __le32 byte_40;
+ u8 dmac[ETH_ALEN];
+ u8 sgid_index;
+ u8 smac_index;
+ u8 dgid[GID_LEN_V2];
+};
+
+#define UD_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_ud_send_wqe, h, l)
+
+#define UD_SEND_WQE_OPCODE UD_SEND_WQE_FIELD_LOC(4, 0)
+#define UD_SEND_WQE_OWNER UD_SEND_WQE_FIELD_LOC(7, 7)
+#define UD_SEND_WQE_CQE UD_SEND_WQE_FIELD_LOC(8, 8)
+#define UD_SEND_WQE_SE UD_SEND_WQE_FIELD_LOC(11, 11)
+#define UD_SEND_WQE_PD UD_SEND_WQE_FIELD_LOC(119, 96)
+#define UD_SEND_WQE_SGE_NUM UD_SEND_WQE_FIELD_LOC(127, 120)
+#define UD_SEND_WQE_MSG_START_SGE_IDX UD_SEND_WQE_FIELD_LOC(151, 128)
+#define UD_SEND_WQE_UDPSPN UD_SEND_WQE_FIELD_LOC(191, 176)
+#define UD_SEND_WQE_DQPN UD_SEND_WQE_FIELD_LOC(247, 224)
+#define UD_SEND_WQE_VLAN UD_SEND_WQE_FIELD_LOC(271, 256)
+#define UD_SEND_WQE_HOPLIMIT UD_SEND_WQE_FIELD_LOC(279, 272)
+#define UD_SEND_WQE_TCLASS UD_SEND_WQE_FIELD_LOC(287, 280)
+#define UD_SEND_WQE_FLOW_LABEL UD_SEND_WQE_FIELD_LOC(307, 288)
+#define UD_SEND_WQE_SL UD_SEND_WQE_FIELD_LOC(311, 308)
+#define UD_SEND_WQE_VLAN_EN UD_SEND_WQE_FIELD_LOC(318, 318)
+#define UD_SEND_WQE_LBI UD_SEND_WQE_FIELD_LOC(319, 319)
+
+struct hns_roce_v2_rc_send_wqe {
+ __le32 byte_4;
+ __le32 msg_len;
+ union {
+ __le32 inv_key;
+ __le32 immtdata;
+ };
+ __le32 byte_16;
+ __le32 byte_20;
+ __le32 rkey;
+ __le64 va;
+};
+
+#define RC_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_rc_send_wqe, h, l)
+
+#define RC_SEND_WQE_OPCODE RC_SEND_WQE_FIELD_LOC(4, 0)
+#define RC_SEND_WQE_DB_SL_L RC_SEND_WQE_FIELD_LOC(6, 5)
+#define RC_SEND_WQE_DB_SL_H RC_SEND_WQE_FIELD_LOC(14, 13)
+#define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7)
+#define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8)
+#define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9)
+#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10)
+#define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11)
+#define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12)
+#define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15)
+#define RC_SEND_WQE_FLAG RC_SEND_WQE_FIELD_LOC(31, 31)
+#define RC_SEND_WQE_XRC_SRQN RC_SEND_WQE_FIELD_LOC(119, 96)
+#define RC_SEND_WQE_SGE_NUM RC_SEND_WQE_FIELD_LOC(127, 120)
+#define RC_SEND_WQE_MSG_START_SGE_IDX RC_SEND_WQE_FIELD_LOC(151, 128)
+#define RC_SEND_WQE_INL_TYPE RC_SEND_WQE_FIELD_LOC(159, 159)
+
+struct hns_roce_wqe_frmr_seg {
+ __le32 pbl_size;
+ __le32 byte_40;
+};
+
+#define FRMR_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_wqe_frmr_seg, h, l)
+
+#define FRMR_PBL_SIZE FRMR_WQE_FIELD_LOC(31, 0)
+#define FRMR_BLOCK_SIZE FRMR_WQE_FIELD_LOC(35, 32)
+#define FRMR_PBL_BUF_PG_SZ FRMR_WQE_FIELD_LOC(39, 36)
+#define FRMR_BLK_MODE FRMR_WQE_FIELD_LOC(40, 40)
+#define FRMR_ZBVA FRMR_WQE_FIELD_LOC(41, 41)
+#define FRMR_BIND_EN FRMR_WQE_FIELD_LOC(42, 42)
+#define FRMR_ATOMIC FRMR_WQE_FIELD_LOC(43, 43)
+#define FRMR_RR FRMR_WQE_FIELD_LOC(44, 44)
+#define FRMR_RW FRMR_WQE_FIELD_LOC(45, 45)
+#define FRMR_LW FRMR_WQE_FIELD_LOC(46, 46)
+
+struct hns_roce_v2_wqe_data_seg {
+ __le32 len;
+ __le32 lkey;
+ __le64 addr;
+};
+
+struct hns_roce_query_version {
+ __le16 rocee_vendor_id;
+ __le16 rocee_hw_version;
+ __le32 rsv[5];
+};
+
+struct hns_roce_query_fw_info {
+ __le32 fw_ver;
+ __le32 rsv[5];
+};
+
+struct hns_roce_func_clear {
+ __le32 rst_funcid_en;
+ __le32 func_done;
+ __le32 rsv[4];
+};
+
+#define FUNC_CLEAR_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_func_clear, h, l)
+
+#define FUNC_CLEAR_RST_FUN_DONE FUNC_CLEAR_FIELD_LOC(32, 32)
+
+/* Each physical function manages up to 248 virtual functions, it takes up to
+ * 100ms for each function to execute clear. If an abnormal reset occurs, it is
+ * executed twice at most, so it takes up to 249 * 2 * 100ms.
+ */
+#define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (249 * 2 * 100)
+#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
+#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT 20
+
+#define CFG_LLM_A_BA_L CMQ_REQ_FIELD_LOC(31, 0)
+#define CFG_LLM_A_BA_H CMQ_REQ_FIELD_LOC(63, 32)
+#define CFG_LLM_A_DEPTH CMQ_REQ_FIELD_LOC(76, 64)
+#define CFG_LLM_A_PGSZ CMQ_REQ_FIELD_LOC(83, 80)
+#define CFG_LLM_A_INIT_EN CMQ_REQ_FIELD_LOC(84, 84)
+#define CFG_LLM_A_HEAD_BA_L CMQ_REQ_FIELD_LOC(127, 96)
+#define CFG_LLM_A_HEAD_BA_H CMQ_REQ_FIELD_LOC(147, 128)
+#define CFG_LLM_A_HEAD_NXTPTR CMQ_REQ_FIELD_LOC(159, 148)
+#define CFG_LLM_A_HEAD_PTR CMQ_REQ_FIELD_LOC(171, 160)
+#define CFG_LLM_B_TAIL_BA_L CMQ_REQ_FIELD_LOC(31, 0)
+#define CFG_LLM_B_TAIL_BA_H CMQ_REQ_FIELD_LOC(63, 32)
+#define CFG_LLM_B_TAIL_PTR CMQ_REQ_FIELD_LOC(75, 64)
+
+/* Fields of HNS_ROCE_OPC_CFG_GLOBAL_PARAM */
+#define CFG_GLOBAL_PARAM_1US_CYCLES CMQ_REQ_FIELD_LOC(9, 0)
+#define CFG_GLOBAL_PARAM_UDP_PORT CMQ_REQ_FIELD_LOC(31, 16)
+
+/*
+ * Fields of HNS_ROCE_OPC_QUERY_PF_RES, HNS_ROCE_OPC_QUERY_VF_RES
+ * and HNS_ROCE_OPC_ALLOC_VF_RES
+ */
+#define FUNC_RES_A_VF_ID CMQ_REQ_FIELD_LOC(7, 0)
+#define FUNC_RES_A_QPC_BT_IDX CMQ_REQ_FIELD_LOC(42, 32)
+#define FUNC_RES_A_QPC_BT_NUM CMQ_REQ_FIELD_LOC(59, 48)
+#define FUNC_RES_A_SRQC_BT_IDX CMQ_REQ_FIELD_LOC(72, 64)
+#define FUNC_RES_A_SRQC_BT_NUM CMQ_REQ_FIELD_LOC(89, 80)
+#define FUNC_RES_A_CQC_BT_IDX CMQ_REQ_FIELD_LOC(104, 96)
+#define FUNC_RES_A_CQC_BT_NUM CMQ_REQ_FIELD_LOC(121, 112)
+#define FUNC_RES_A_MPT_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
+#define FUNC_RES_A_MPT_BT_NUM CMQ_REQ_FIELD_LOC(153, 144)
+#define FUNC_RES_A_EQC_BT_IDX CMQ_REQ_FIELD_LOC(168, 160)
+#define FUNC_RES_A_EQC_BT_NUM CMQ_REQ_FIELD_LOC(185, 176)
+#define FUNC_RES_B_SMAC_IDX CMQ_REQ_FIELD_LOC(39, 32)
+#define FUNC_RES_B_SMAC_NUM CMQ_REQ_FIELD_LOC(48, 40)
+#define FUNC_RES_B_SGID_IDX CMQ_REQ_FIELD_LOC(71, 64)
+#define FUNC_RES_B_SGID_NUM CMQ_REQ_FIELD_LOC(80, 72)
+#define FUNC_RES_B_QID_IDX CMQ_REQ_FIELD_LOC(105, 96)
+#define FUNC_RES_B_QID_NUM CMQ_REQ_FIELD_LOC(122, 112)
+#define FUNC_RES_V_QID_NUM CMQ_REQ_FIELD_LOC(115, 112)
+
+#define FUNC_RES_B_SCCC_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
+#define FUNC_RES_B_SCCC_BT_NUM CMQ_REQ_FIELD_LOC(145, 137)
+#define FUNC_RES_B_GMV_BT_IDX CMQ_REQ_FIELD_LOC(167, 160)
+#define FUNC_RES_B_GMV_BT_NUM CMQ_REQ_FIELD_LOC(176, 168)
+#define FUNC_RES_V_GMV_BT_NUM CMQ_REQ_FIELD_LOC(184, 176)
+
+/* Fields of HNS_ROCE_OPC_QUERY_PF_TIMER_RES */
+#define PF_TIMER_RES_QPC_ITEM_IDX CMQ_REQ_FIELD_LOC(43, 32)
+#define PF_TIMER_RES_QPC_ITEM_NUM CMQ_REQ_FIELD_LOC(60, 48)
+#define PF_TIMER_RES_CQC_ITEM_IDX CMQ_REQ_FIELD_LOC(74, 64)
+#define PF_TIMER_RES_CQC_ITEM_NUM CMQ_REQ_FIELD_LOC(91, 80)
+
+struct hns_roce_vf_switch {
+ __le32 rocee_sel;
+ __le32 fun_id;
+ __le32 cfg;
+ __le32 resv1;
+ __le32 resv2;
+ __le32 resv3;
+};
+
+#define VF_SWITCH_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_vf_switch, h, l)
+
+#define VF_SWITCH_VF_ID VF_SWITCH_FIELD_LOC(42, 35)
+#define VF_SWITCH_ALW_LPBK VF_SWITCH_FIELD_LOC(65, 65)
+#define VF_SWITCH_ALW_LCL_LPBK VF_SWITCH_FIELD_LOC(66, 66)
+#define VF_SWITCH_ALW_DST_OVRD VF_SWITCH_FIELD_LOC(67, 67)
+
+struct hns_roce_post_mbox {
+ __le32 in_param_l;
+ __le32 in_param_h;
+ __le32 out_param_l;
+ __le32 out_param_h;
+ __le32 cmd_tag;
+ __le32 token_event_en;
+};
+
+struct hns_roce_mbox_status {
+ __le32 mb_status_hw_run;
+ __le32 rsv[5];
+};
+
+#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
+
+#define MB_ST_HW_RUN_M BIT(31)
+#define MB_ST_COMPLETE_M GENMASK(7, 0)
+
+#define MB_ST_COMPLETE_SUCC 1
+
+/* Fields of HNS_ROCE_OPC_CFG_BT_ATTR */
+#define CFG_BT_ATTR_QPC_BA_PGSZ CMQ_REQ_FIELD_LOC(3, 0)
+#define CFG_BT_ATTR_QPC_BUF_PGSZ CMQ_REQ_FIELD_LOC(7, 4)
+#define CFG_BT_ATTR_QPC_HOPNUM CMQ_REQ_FIELD_LOC(9, 8)
+#define CFG_BT_ATTR_SRQC_BA_PGSZ CMQ_REQ_FIELD_LOC(35, 32)
+#define CFG_BT_ATTR_SRQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(39, 36)
+#define CFG_BT_ATTR_SRQC_HOPNUM CMQ_REQ_FIELD_LOC(41, 40)
+#define CFG_BT_ATTR_CQC_BA_PGSZ CMQ_REQ_FIELD_LOC(67, 64)
+#define CFG_BT_ATTR_CQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(71, 68)
+#define CFG_BT_ATTR_CQC_HOPNUM CMQ_REQ_FIELD_LOC(73, 72)
+#define CFG_BT_ATTR_MPT_BA_PGSZ CMQ_REQ_FIELD_LOC(99, 96)
+#define CFG_BT_ATTR_MPT_BUF_PGSZ CMQ_REQ_FIELD_LOC(103, 100)
+#define CFG_BT_ATTR_MPT_HOPNUM CMQ_REQ_FIELD_LOC(105, 104)
+#define CFG_BT_ATTR_SCCC_BA_PGSZ CMQ_REQ_FIELD_LOC(131, 128)
+#define CFG_BT_ATTR_SCCC_BUF_PGSZ CMQ_REQ_FIELD_LOC(135, 132)
+#define CFG_BT_ATTR_SCCC_HOPNUM CMQ_REQ_FIELD_LOC(137, 136)
+
+/* Fields of HNS_ROCE_OPC_CFG_ENTRY_SIZE */
+#define CFG_HEM_ENTRY_SIZE_TYPE CMQ_REQ_FIELD_LOC(31, 0)
+enum {
+ HNS_ROCE_CFG_QPC_SIZE = BIT(0),
+ HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
+};
+
+#define CFG_HEM_ENTRY_SIZE_VALUE CMQ_REQ_FIELD_LOC(191, 160)
+
+/* Fields of HNS_ROCE_OPC_CFG_GMV_BT */
+#define CFG_GMV_BT_BA_L CMQ_REQ_FIELD_LOC(31, 0)
+#define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32)
+#define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64)
+
+/* Fields of HNS_ROCE_QUERY_RAM_ECC */
+#define QUERY_RAM_ECC_1BIT_ERR CMQ_REQ_FIELD_LOC(31, 0)
+#define QUERY_RAM_ECC_RES_TYPE CMQ_REQ_FIELD_LOC(63, 32)
+#define QUERY_RAM_ECC_TAG CMQ_REQ_FIELD_LOC(95, 64)
+
+struct hns_roce_cfg_sgid_tb {
+ __le32 table_idx_rsv;
+ __le32 vf_sgid_l;
+ __le32 vf_sgid_ml;
+ __le32 vf_sgid_mh;
+ __le32 vf_sgid_h;
+ __le32 vf_sgid_type_rsv;
+};
+
+#define SGID_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_sgid_tb, h, l)
+
+#define CFG_SGID_TB_TABLE_IDX SGID_TB_FIELD_LOC(7, 0)
+#define CFG_SGID_TB_VF_SGID_TYPE SGID_TB_FIELD_LOC(161, 160)
+
+struct hns_roce_cfg_smac_tb {
+ __le32 tb_idx_rsv;
+ __le32 vf_smac_l;
+ __le32 vf_smac_h_rsv;
+ __le32 rsv[3];
+};
+
+#define SMAC_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_smac_tb, h, l)
+
+#define CFG_SMAC_TB_IDX SMAC_TB_FIELD_LOC(7, 0)
+#define CFG_SMAC_TB_VF_SMAC_H SMAC_TB_FIELD_LOC(79, 64)
+
+struct hns_roce_cfg_gmv_tb_a {
+ __le32 vf_sgid_l;
+ __le32 vf_sgid_ml;
+ __le32 vf_sgid_mh;
+ __le32 vf_sgid_h;
+ __le32 vf_sgid_type_vlan;
+ __le32 resv;
+};
+
+#define GMV_TB_A_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_a, h, l)
+
+#define GMV_TB_A_VF_SGID_TYPE GMV_TB_A_FIELD_LOC(129, 128)
+#define GMV_TB_A_VF_VLAN_EN GMV_TB_A_FIELD_LOC(130, 130)
+#define GMV_TB_A_VF_VLAN_ID GMV_TB_A_FIELD_LOC(155, 144)
+
+struct hns_roce_cfg_gmv_tb_b {
+ __le32 vf_smac_l;
+ __le32 vf_smac_h;
+ __le32 table_idx_rsv;
+ __le32 resv[3];
+};
+
+#define GMV_TB_B_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_b, h, l)
+
+#define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32)
+#define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64)
+
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 5
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 6
+struct hns_roce_query_pf_caps_a {
+ u8 number_ports;
+ u8 local_ca_ack_delay;
+ __le16 max_sq_sg;
+ __le16 max_sq_inline;
+ __le16 max_rq_sg;
+ __le32 rsv0;
+ __le16 num_qpc_timer;
+ __le16 num_cqc_timer;
+ __le16 max_srq_sges;
+ u8 num_aeq_vectors;
+ u8 num_other_vectors;
+ u8 max_sq_desc_sz;
+ u8 max_rq_desc_sz;
+ u8 rsv1;
+ u8 cqe_sz;
+};
+
+struct hns_roce_query_pf_caps_b {
+ u8 mtpt_entry_sz;
+ u8 irrl_entry_sz;
+ u8 trrl_entry_sz;
+ u8 cqc_entry_sz;
+ u8 srqc_entry_sz;
+ u8 idx_entry_sz;
+ u8 sccc_sz;
+ u8 max_mtu;
+ __le16 qpc_sz;
+ __le16 qpc_timer_entry_sz;
+ __le16 cqc_timer_entry_sz;
+ u8 min_cqes;
+ u8 min_wqes;
+ __le32 page_size_cap;
+ u8 pkey_table_len;
+ u8 phy_num_uars;
+ u8 ctx_hop_num;
+ u8 pbl_hop_num;
+};
+
+struct hns_roce_query_pf_caps_c {
+ __le32 cap_flags_num_pds;
+ __le32 max_gid_num_cqs;
+ __le32 cq_depth;
+ __le32 num_mrws;
+ __le32 ord_num_qps;
+ __le16 sq_depth;
+ __le16 rq_depth;
+};
+
+#define PF_CAPS_C_FIELD_LOC(h, l) \
+ FIELD_LOC(struct hns_roce_query_pf_caps_c, h, l)
+
+#define PF_CAPS_C_NUM_PDS PF_CAPS_C_FIELD_LOC(19, 0)
+#define PF_CAPS_C_CAP_FLAGS PF_CAPS_C_FIELD_LOC(31, 20)
+#define PF_CAPS_C_NUM_CQS PF_CAPS_C_FIELD_LOC(51, 32)
+#define PF_CAPS_C_MAX_GID PF_CAPS_C_FIELD_LOC(60, 52)
+#define PF_CAPS_C_CQ_DEPTH PF_CAPS_C_FIELD_LOC(86, 64)
+#define PF_CAPS_C_NUM_XRCDS PF_CAPS_C_FIELD_LOC(91, 87)
+#define PF_CAPS_C_NUM_MRWS PF_CAPS_C_FIELD_LOC(115, 96)
+#define PF_CAPS_C_NUM_QPS PF_CAPS_C_FIELD_LOC(147, 128)
+#define PF_CAPS_C_MAX_ORD PF_CAPS_C_FIELD_LOC(155, 148)
+
+struct hns_roce_query_pf_caps_d {
+ __le32 wq_hop_num_max_srqs;
+ __le16 srq_depth;
+ __le16 cap_flags_ex;
+ __le32 num_ceqs_ceq_depth;
+ __le32 arm_st_aeq_depth;
+ __le32 num_uars_rsv_pds;
+ __le32 rsv_uars_rsv_qps;
+};
+
+#define PF_CAPS_D_FIELD_LOC(h, l) \
+ FIELD_LOC(struct hns_roce_query_pf_caps_d, h, l)
+
+#define PF_CAPS_D_NUM_SRQS PF_CAPS_D_FIELD_LOC(19, 0)
+#define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20)
+#define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22)
+#define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24)
+#define PF_CAPS_D_CONG_CAP PF_CAPS_D_FIELD_LOC(29, 26)
+#define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64)
+#define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86)
+#define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96)
+#define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118)
+#define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120)
+#define PF_CAPS_D_DEFAULT_ALG PF_CAPS_D_FIELD_LOC(127, 122)
+#define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128)
+#define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148)
+#define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160)
+#define PF_CAPS_D_RSV_UARS PF_CAPS_D_FIELD_LOC(187, 180)
+
+#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
+
+struct hns_roce_congestion_algorithm {
+ u8 alg_sel;
+ u8 alg_sub_sel;
+ u8 dip_vld;
+ u8 wnd_mode_sel;
+};
+
+struct hns_roce_query_pf_caps_e {
+ __le32 chunk_size_shift_rsv_mrws;
+ __le32 rsv_cqs;
+ __le32 rsv_srqs;
+ __le32 rsv_lkey;
+ __le16 ceq_max_cnt;
+ __le16 ceq_period;
+ __le16 aeq_max_cnt;
+ __le16 aeq_period;
+};
+
+struct hns_roce_query_pf_caps_f {
+ __le32 max_ack_req_msg_len;
+ __le32 rsv[5];
+};
+
+#define PF_CAPS_E_FIELD_LOC(h, l) \
+ FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l)
+
+#define PF_CAPS_E_RSV_MRWS PF_CAPS_E_FIELD_LOC(19, 0)
+#define PF_CAPS_E_CHUNK_SIZE_SHIFT PF_CAPS_E_FIELD_LOC(31, 20)
+#define PF_CAPS_E_RSV_CQS PF_CAPS_E_FIELD_LOC(51, 32)
+#define PF_CAPS_E_RSV_XRCDS PF_CAPS_E_FIELD_LOC(63, 52)
+#define PF_CAPS_E_RSV_SRQS PF_CAPS_E_FIELD_LOC(83, 64)
+#define PF_CAPS_E_RSV_LKEYS PF_CAPS_E_FIELD_LOC(115, 96)
+
+struct hns_roce_cmq_req {
+ __le32 data[6];
+};
+
+#define CMQ_REQ_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cmq_req, h, l)
+
+struct hns_roce_cmq_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ union {
+ __le32 data[6];
+ struct {
+ __le32 own_func_num;
+ __le32 own_mac_id;
+ __le32 rsv[4];
+ } func_info;
+ };
+};
+
+struct hns_roce_v2_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hns_roce_cmq_desc *desc;
+ u32 head;
+ u16 buf_size;
+ u16 desc_num;
+ u8 flag;
+ spinlock_t lock; /* command queue lock */
+};
+
+struct hns_roce_v2_cmq {
+ struct hns_roce_v2_cmq_ring csq;
+ u16 tx_timeout;
+};
+
+struct hns_roce_link_table {
+ struct hns_roce_buf_list table;
+ struct hns_roce_buf *buf;
+};
+
+#define HNS_ROCE_EXT_LLM_ENTRY(addr, id) (((id) << (64 - 12)) | ((addr) >> 12))
+#define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
+
+struct hns_roce_v2_free_mr {
+ struct hns_roce_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
+ struct hns_roce_cq *rsv_cq;
+ struct hns_roce_pd *rsv_pd;
+ struct mutex mutex;
+};
+
+struct hns_roce_v2_priv {
+ struct hnae3_handle *handle;
+ struct hns_roce_v2_cmq cmq;
+ struct hns_roce_link_table ext_llm;
+ struct hns_roce_v2_free_mr free_mr;
+};
+
+struct hns_roce_dip {
+ u8 dgid[GID_LEN_V2];
+ u32 dip_idx;
+ u32 qp_cnt;
+};
+
+struct fmea_ram_ecc {
+ u32 is_ecc_err;
+ u32 res_type;
+ u32 index;
+};
+
+/* only for RNR timeout issue of HIP08 */
+#define HNS_ROCE_CLOCK_ADJUST 1000
+#define HNS_ROCE_MAX_CQ_PERIOD_HIP08 65
+#define HNS_ROCE_MAX_EQ_PERIOD 65
+#define HNS_ROCE_RNR_TIMER_10NS 1
+#define HNS_ROCE_1US_CFG 999
+#define HNS_ROCE_1NS_CFG 0
+
+#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x0
+
+#define HNS_ROCE_V2_EQ_STATE_INVALID 0
+#define HNS_ROCE_V2_EQ_STATE_VALID 1
+#define HNS_ROCE_V2_EQ_STATE_OVERFLOW 2
+#define HNS_ROCE_V2_EQ_STATE_FAILURE 3
+
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_0 0
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_1 1
+
+#define HNS_ROCE_V2_EQ_COALESCE_0 0
+#define HNS_ROCE_V2_EQ_COALESCE_1 1
+
+#define HNS_ROCE_V2_EQ_FIRED 0
+#define HNS_ROCE_V2_EQ_ARMED 1
+#define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3
+
+#define HNS_ROCE_EQ_INIT_EQE_CNT 0
+#define HNS_ROCE_EQ_INIT_PROD_IDX 0
+#define HNS_ROCE_EQ_INIT_REPORT_TIMER 0
+#define HNS_ROCE_EQ_INIT_MSI_IDX 0
+#define HNS_ROCE_EQ_INIT_CONS_IDX 0
+#define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0
+
+#define HNS_ROCE_V2_COMP_EQE_NUM 0x1000
+#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
+
+#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
+
+#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
+#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
+#define HNS_ROCE_EQ_DB_CMD_CEQ 0x2
+#define HNS_ROCE_EQ_DB_CMD_CEQ_ARMED 0x3
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+
+#define EQ_REG_OFFSET 0x4
+
+#define HNS_ROCE_INT_NAME_LEN 32
+#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
+#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
+
+struct hns_roce_eq_context {
+ __le32 data[16];
+};
+
+#define EQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_eq_context, h, l)
+
+#define EQC_EQ_ST EQC_FIELD_LOC(1, 0)
+#define EQC_EQE_HOP_NUM EQC_FIELD_LOC(3, 2)
+#define EQC_OVER_IGNORE EQC_FIELD_LOC(4, 4)
+#define EQC_COALESCE EQC_FIELD_LOC(5, 5)
+#define EQC_ARM_ST EQC_FIELD_LOC(7, 6)
+#define EQC_EQN EQC_FIELD_LOC(15, 8)
+#define EQC_EQE_CNT EQC_FIELD_LOC(31, 16)
+#define EQC_EQE_BA_PG_SZ EQC_FIELD_LOC(35, 32)
+#define EQC_EQE_BUF_PG_SZ EQC_FIELD_LOC(39, 36)
+#define EQC_EQ_PROD_INDX EQC_FIELD_LOC(63, 40)
+#define EQC_EQ_MAX_CNT EQC_FIELD_LOC(79, 64)
+#define EQC_EQ_PERIOD EQC_FIELD_LOC(95, 80)
+#define EQC_EQE_REPORT_TIMER EQC_FIELD_LOC(127, 96)
+#define EQC_EQE_BA_L EQC_FIELD_LOC(159, 128)
+#define EQC_EQE_BA_H EQC_FIELD_LOC(188, 160)
+#define EQC_SHIFT EQC_FIELD_LOC(199, 192)
+#define EQC_MSI_INDX EQC_FIELD_LOC(207, 200)
+#define EQC_CUR_EQE_BA_L EQC_FIELD_LOC(223, 208)
+#define EQC_CUR_EQE_BA_M EQC_FIELD_LOC(255, 224)
+#define EQC_CUR_EQE_BA_H EQC_FIELD_LOC(259, 256)
+#define EQC_EQ_CONS_INDX EQC_FIELD_LOC(287, 264)
+#define EQC_NEX_EQE_BA_L EQC_FIELD_LOC(319, 288)
+#define EQC_NEX_EQE_BA_H EQC_FIELD_LOC(339, 320)
+#define EQC_EQE_SIZE EQC_FIELD_LOC(341, 340)
+
+#define MAX_SERVICE_LEVEL 0x7
+
+struct hns_roce_wqe_atomic_seg {
+ __le64 fetchadd_swap_data;
+ __le64 cmp_data;
+};
+
+struct hns_roce_sccc_clr {
+ __le32 qpn;
+ __le32 rsv[5];
+};
+
+struct hns_roce_sccc_clr_done {
+ __le32 clr_done;
+ __le32 rsv[5];
+};
+
+int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+
+static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
+ void __iomem *dest)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
+ hns_roce_write64_k(val, dest);
+}
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
new file mode 100644
index 000000000000..d50f36f8a110
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_hw_v2.h"
+
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
+ const u8 *addr)
+{
+ u8 phy_port;
+ u32 i;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
+
+ if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
+ return 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hr_dev->dev_addr[port][i] = addr[i];
+
+ phy_port = hr_dev->iboe.phy_port[port];
+ return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
+}
+
+static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ u32 port = attr->port_num - 1;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
+
+ return ret;
+}
+
+static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ u32 port = attr->port_num - 1;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
+
+ return ret;
+}
+
+static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
+ unsigned long event)
+{
+ struct device *dev = hr_dev->dev;
+ struct net_device *netdev;
+ int ret = 0;
+
+ netdev = hr_dev->iboe.netdevs[port];
+ if (!netdev) {
+ dev_err(dev, "can't find netdev on port(%u)!\n", port);
+ return -ENODEV;
+ }
+
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ case NETDEV_REGISTER:
+ case NETDEV_CHANGEADDR:
+ ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
+ break;
+ case NETDEV_DOWN:
+ /*
+ * In v1 engine, only support all ports closed together.
+ */
+ break;
+ default:
+ dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
+ break;
+ }
+
+ return ret;
+}
+
+static int hns_roce_netdev_event(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct hns_roce_ib_iboe *iboe = NULL;
+ struct hns_roce_dev *hr_dev = NULL;
+ int ret;
+ u32 port;
+
+ hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
+ iboe = &hr_dev->iboe;
+
+ for (port = 0; port < hr_dev->caps.num_ports; port++) {
+ if (dev == iboe->netdevs[port]) {
+ ret = handle_en_event(hr_dev, port, event);
+ if (ret)
+ return NOTIFY_DONE;
+ break;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ u8 i;
+
+ for (i = 0; i < hr_dev->caps.num_ports; i++) {
+ ret = hns_roce_set_mac(hr_dev, i,
+ hr_dev->iboe.netdevs[i]->dev_addr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_roce_query_device(struct ib_device *ib_dev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+ memset(props, 0, sizeof(*props));
+
+ props->fw_ver = hr_dev->caps.fw_ver;
+ props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
+ props->max_mr_size = (u64)(~(0ULL));
+ props->page_size_cap = hr_dev->caps.page_size_cap;
+ props->vendor_id = hr_dev->vendor_id;
+ props->vendor_part_id = hr_dev->vendor_part_id;
+ props->hw_ver = hr_dev->hw_rev;
+ props->max_qp = hr_dev->caps.num_qps;
+ props->max_qp_wr = hr_dev->caps.max_wqes;
+ props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_RC_RNR_NAK_GEN;
+ props->max_send_sge = hr_dev->caps.max_sq_sg;
+ props->max_recv_sge = hr_dev->caps.max_rq_sg;
+ props->max_sge_rd = hr_dev->caps.max_sq_sg;
+ props->max_cq = hr_dev->caps.num_cqs;
+ props->max_cqe = hr_dev->caps.max_cqes;
+ props->max_mr = hr_dev->caps.num_mtpts;
+ props->max_pd = hr_dev->caps.num_pds;
+ props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
+ props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
+ props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->max_pkeys = 1;
+ props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
+ props->max_ah = INT_MAX;
+ props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD;
+ props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ props->max_srq = hr_dev->caps.num_srqs;
+ props->max_srq_wr = hr_dev->caps.max_srq_wrs;
+ props->max_srq_sge = hr_dev->caps.max_srq_sges;
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
+ hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ props->device_cap_flags |= IB_DEVICE_XRC;
+
+ return 0;
+}
+
+static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
+ struct ib_port_attr *props)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = hr_dev->dev;
+ struct net_device *net_dev;
+ unsigned long flags;
+ enum ib_mtu mtu;
+ u32 port;
+ int ret;
+
+ port = port_num - 1;
+
+ /* props being zeroed by the caller, avoid zeroing it here */
+
+ props->max_mtu = hr_dev->caps.max_mtu;
+ props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP |
+ IB_PORT_BOOT_MGMT_SUP;
+ props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
+ props->pkey_tbl_len = 1;
+ ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
+ &props->active_width);
+ if (ret)
+ ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+ net_dev = hr_dev->iboe.netdevs[port];
+ if (!net_dev) {
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+ dev_err(dev, "find netdev %u failed!\n", port);
+ return -EINVAL;
+ }
+
+ mtu = iboe_get_mtu(net_dev->mtu);
+ props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
+ props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
+ IB_PORT_ACTIVE :
+ IB_PORT_DOWN;
+ props->phys_state = props->state == IB_PORT_ACTIVE ?
+ IB_PORT_PHYS_STATE_LINK_UP :
+ IB_PORT_PHYS_STATE_DISABLED;
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+ return 0;
+}
+
+static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (index > 0)
+ return -EINVAL;
+
+ *pkey = PKEY_ID;
+
+ return 0;
+}
+
+static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
+ struct ib_device_modify *props)
+{
+ unsigned long flags;
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
+ memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
+ spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
+ }
+
+ return 0;
+}
+
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type)
+{
+ struct hns_user_mmap_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_type = mmap_type;
+
+ switch (mmap_type) {
+ /* pgoff 0 must be used by DB for compatibility */
+ case HNS_ROCE_MMAP_TYPE_DB:
+ ret = rdma_user_mmap_entry_insert_exact(
+ ucontext, &entry->rdma_entry, length, 0);
+ break;
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ ret = rdma_user_mmap_entry_insert_range(
+ ucontext, &entry->rdma_entry, length, 1,
+ U32_MAX);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
+{
+ if (context->db_mmap_entry)
+ rdma_user_mmap_entry_remove(
+ &context->db_mmap_entry->rdma_entry);
+}
+
+static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+ u64 address;
+
+ address = context->uar.pfn << PAGE_SHIFT;
+ context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
+ uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
+ if (!context->db_mmap_entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+ struct hns_roce_ib_alloc_ucontext_resp resp = {};
+ struct hns_roce_ib_alloc_ucontext ucmd = {};
+ int ret = -EAGAIN;
+
+ if (!hr_dev->active)
+ goto error_out;
+
+ resp.qp_tab_size = hr_dev->caps.num_qps;
+ resp.srq_tab_size = hr_dev->caps.num_srqs;
+
+ ret = ib_copy_from_udata(&ucmd, udata,
+ min(udata->inlen, sizeof(ucmd)));
+ if (ret)
+ goto error_out;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
+
+ if (context->config & HNS_ROCE_EXSGE_FLAGS) {
+ resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
+ resp.max_inline_data = hr_dev->caps.max_sq_inline;
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS;
+ if (context->config & HNS_ROCE_RQ_INLINE_FLAGS)
+ resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS;
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) {
+ context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS;
+ if (context->config & HNS_ROCE_CQE_INLINE_FLAGS)
+ resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
+ }
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ resp.congest_type = hr_dev->caps.cong_cap;
+
+ ret = hns_roce_uar_alloc(hr_dev, &context->uar);
+ if (ret)
+ goto error_out;
+
+ ret = hns_roce_alloc_uar_entry(uctx);
+ if (ret)
+ goto error_fail_uar_entry;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
+ INIT_LIST_HEAD(&context->page_list);
+ mutex_init(&context->page_mutex);
+ }
+
+ resp.cqe_size = hr_dev->caps.cqe_sz;
+
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ goto error_fail_copy_to_udata;
+
+ return 0;
+
+error_fail_copy_to_udata:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&context->page_mutex);
+ hns_roce_dealloc_uar_entry(context);
+
+error_fail_uar_entry:
+ ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
+
+error_out:
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT]);
+
+ return ret;
+}
+
+static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&context->page_mutex);
+
+ hns_roce_dealloc_uar_entry(context);
+
+ ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
+}
+
+static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct hns_user_mmap_entry *entry;
+ phys_addr_t pfn;
+ pgprot_t prot;
+ int ret;
+
+ if (hr_dev->dis_db) {
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
+ return -EPERM;
+ }
+
+ rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
+ if (!rdma_entry) {
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
+ return -EINVAL;
+ }
+
+ entry = to_hns_mmap(rdma_entry);
+ pfn = entry->address >> PAGE_SHIFT;
+
+ switch (entry->mmap_type) {
+ case HNS_ROCE_MMAP_TYPE_DB:
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+ prot, rdma_entry);
+
+out:
+ rdma_user_mmap_entry_put(rdma_entry);
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
+
+ return ret;
+}
+
+static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
+
+ kfree(entry);
+}
+
+static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int ret;
+
+ ret = ib_query_port(ib_dev, port_num, &attr);
+ if (ret)
+ return ret;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ return 0;
+}
+
+static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
+static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
+{
+ u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
+ unsigned int major, minor, sub_minor;
+
+ major = upper_32_bits(fw_ver);
+ minor = high_16_bits(lower_32_bits(fw_ver));
+ sub_minor = low_16_bits(fw_ver);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
+ sub_minor);
+}
+
+#define HNS_ROCE_HW_CNT(ename, cname) \
+ [HNS_ROCE_HW_##ename##_CNT].name = cname
+
+static const struct rdma_stat_desc hns_roce_port_stats_descs[] = {
+ HNS_ROCE_HW_CNT(RX_RC_PKT, "rx_rc_pkt"),
+ HNS_ROCE_HW_CNT(RX_UC_PKT, "rx_uc_pkt"),
+ HNS_ROCE_HW_CNT(RX_UD_PKT, "rx_ud_pkt"),
+ HNS_ROCE_HW_CNT(RX_XRC_PKT, "rx_xrc_pkt"),
+ HNS_ROCE_HW_CNT(RX_PKT, "rx_pkt"),
+ HNS_ROCE_HW_CNT(RX_ERR_PKT, "rx_err_pkt"),
+ HNS_ROCE_HW_CNT(RX_CNP_PKT, "rx_cnp_pkt"),
+ HNS_ROCE_HW_CNT(TX_RC_PKT, "tx_rc_pkt"),
+ HNS_ROCE_HW_CNT(TX_UC_PKT, "tx_uc_pkt"),
+ HNS_ROCE_HW_CNT(TX_UD_PKT, "tx_ud_pkt"),
+ HNS_ROCE_HW_CNT(TX_XRC_PKT, "tx_xrc_pkt"),
+ HNS_ROCE_HW_CNT(TX_PKT, "tx_pkt"),
+ HNS_ROCE_HW_CNT(TX_ERR_PKT, "tx_err_pkt"),
+ HNS_ROCE_HW_CNT(TX_CNP_PKT, "tx_cnp_pkt"),
+ HNS_ROCE_HW_CNT(TRP_GET_MPT_ERR_PKT, "trp_get_mpt_err_pkt"),
+ HNS_ROCE_HW_CNT(TRP_GET_IRRL_ERR_PKT, "trp_get_irrl_err_pkt"),
+ HNS_ROCE_HW_CNT(ECN_DB, "ecn_doorbell"),
+ HNS_ROCE_HW_CNT(RX_BUF, "rx_buffer"),
+ HNS_ROCE_HW_CNT(TRP_RX_SOF, "trp_rx_sof"),
+ HNS_ROCE_HW_CNT(CQ_CQE, "cq_cqe"),
+ HNS_ROCE_HW_CNT(CQ_POE, "cq_poe"),
+ HNS_ROCE_HW_CNT(CQ_NOTIFY, "cq_notify"),
+};
+
+static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
+ struct ib_device *device, u32 port_num)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+
+ if (port_num > hr_dev->caps.num_ports) {
+ ibdev_err(device, "invalid port num.\n");
+ return NULL;
+ }
+
+ return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
+ ARRAY_SIZE(hns_roce_port_stats_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int hns_roce_get_hw_stats(struct ib_device *device,
+ struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ int num_counters = HNS_ROCE_HW_CNT_TOTAL;
+ int ret;
+
+ if (port == 0)
+ return 0;
+
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
+ &num_counters);
+ if (ret) {
+ ibdev_err(device, "failed to query hw counter, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return num_counters;
+}
+
+static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
+
+ hr_dev->active = false;
+ unregister_netdevice_notifier(&iboe->nb);
+ ib_unregister_device(&hr_dev->ib_dev);
+}
+
+static const struct ib_device_ops hns_roce_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_HNS,
+ .uverbs_abi_ver = 1,
+ .uverbs_no_driver_id_binding = 1,
+
+ .get_dev_fw_str = hns_roce_get_fw_ver,
+ .add_gid = hns_roce_add_gid,
+ .alloc_pd = hns_roce_alloc_pd,
+ .alloc_ucontext = hns_roce_alloc_ucontext,
+ .create_ah = hns_roce_create_ah,
+ .create_user_ah = hns_roce_create_ah,
+ .create_cq = hns_roce_create_cq,
+ .create_qp = hns_roce_create_qp,
+ .dealloc_pd = hns_roce_dealloc_pd,
+ .dealloc_ucontext = hns_roce_dealloc_ucontext,
+ .del_gid = hns_roce_del_gid,
+ .dereg_mr = hns_roce_dereg_mr,
+ .destroy_ah = hns_roce_destroy_ah,
+ .destroy_cq = hns_roce_destroy_cq,
+ .disassociate_ucontext = hns_roce_disassociate_ucontext,
+ .get_dma_mr = hns_roce_get_dma_mr,
+ .get_link_layer = hns_roce_get_link_layer,
+ .get_port_immutable = hns_roce_port_immutable,
+ .mmap = hns_roce_mmap,
+ .mmap_free = hns_roce_free_mmap,
+ .modify_device = hns_roce_modify_device,
+ .modify_qp = hns_roce_modify_qp,
+ .query_ah = hns_roce_query_ah,
+ .query_device = hns_roce_query_device,
+ .query_pkey = hns_roce_query_pkey,
+ .query_port = hns_roce_query_port,
+ .reg_user_mr = hns_roce_reg_user_mr,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops hns_roce_dev_hw_stats_ops = {
+ .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
+ .get_hw_stats = hns_roce_get_hw_stats,
+};
+
+static const struct ib_device_ops hns_roce_dev_mr_ops = {
+ .rereg_user_mr = hns_roce_rereg_user_mr,
+};
+
+static const struct ib_device_ops hns_roce_dev_frmr_ops = {
+ .alloc_mr = hns_roce_alloc_mr,
+ .map_mr_sg = hns_roce_map_mr_sg,
+};
+
+static const struct ib_device_ops hns_roce_dev_srq_ops = {
+ .create_srq = hns_roce_create_srq,
+ .destroy_srq = hns_roce_destroy_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
+};
+
+static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
+ .alloc_xrcd = hns_roce_alloc_xrcd,
+ .dealloc_xrcd = hns_roce_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
+};
+
+static const struct ib_device_ops hns_roce_dev_restrack_ops = {
+ .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
+ .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
+ .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
+ .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
+ .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
+ .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
+ .fill_res_srq_entry = hns_roce_fill_res_srq_entry,
+ .fill_res_srq_entry_raw = hns_roce_fill_res_srq_entry_raw,
+};
+
+static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ struct hns_roce_ib_iboe *iboe = NULL;
+ struct ib_device *ib_dev = NULL;
+ struct device *dev = hr_dev->dev;
+ unsigned int i;
+
+ iboe = &hr_dev->iboe;
+ spin_lock_init(&iboe->lock);
+
+ ib_dev = &hr_dev->ib_dev;
+
+ ib_dev->node_type = RDMA_NODE_IB_CA;
+ ib_dev->dev.parent = dev;
+
+ ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
+ ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
+ ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
+ ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 &&
+ !hr_dev->is_vf)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops);
+
+ ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
+ for (i = 0; i < hr_dev->caps.num_ports; i++) {
+ if (!hr_dev->iboe.netdevs[i])
+ continue;
+
+ ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
+ i + 1);
+ if (ret)
+ return ret;
+ }
+ dma_set_max_seg_size(dev, SZ_2G);
+ ret = ib_register_device(ib_dev, "hns_%d", dev);
+ if (ret) {
+ dev_err(dev, "ib_register_device failed!\n");
+ return ret;
+ }
+
+ ret = hns_roce_setup_mtu_mac(hr_dev);
+ if (ret) {
+ dev_err(dev, "setup_mtu_mac failed!\n");
+ goto error_failed_setup_mtu_mac;
+ }
+
+ iboe->nb.notifier_call = hns_roce_netdev_event;
+ ret = register_netdevice_notifier(&iboe->nb);
+ if (ret) {
+ dev_err(dev, "register_netdevice_notifier failed!\n");
+ goto error_failed_setup_mtu_mac;
+ }
+
+ hr_dev->active = true;
+ return 0;
+
+error_failed_setup_mtu_mac:
+ ib_unregister_device(ib_dev);
+
+ return ret;
+}
+
+static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
+ HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
+ hr_dev->caps.num_mtpts);
+ if (ret) {
+ dev_err(dev, "failed to init MTPT context memory, aborting.\n");
+ return ret;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
+ HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
+ hr_dev->caps.num_qps);
+ if (ret) {
+ dev_err(dev, "failed to init QP context memory, aborting.\n");
+ goto err_unmap_dmpt;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
+ HEM_TYPE_IRRL,
+ hr_dev->caps.irrl_entry_sz *
+ hr_dev->caps.max_qp_init_rdma,
+ hr_dev->caps.num_qps);
+ if (ret) {
+ dev_err(dev, "failed to init irrl_table memory, aborting.\n");
+ goto err_unmap_qp;
+ }
+
+ if (hr_dev->caps.trrl_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table,
+ HEM_TYPE_TRRL,
+ hr_dev->caps.trrl_entry_sz *
+ hr_dev->caps.max_qp_dest_rdma,
+ hr_dev->caps.num_qps);
+ if (ret) {
+ dev_err(dev,
+ "failed to init trrl_table memory, aborting.\n");
+ goto err_unmap_irrl;
+ }
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
+ HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
+ hr_dev->caps.num_cqs);
+ if (ret) {
+ dev_err(dev, "failed to init CQ context memory, aborting.\n");
+ goto err_unmap_trrl;
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
+ HEM_TYPE_SRQC,
+ hr_dev->caps.srqc_entry_sz,
+ hr_dev->caps.num_srqs);
+ if (ret) {
+ dev_err(dev,
+ "failed to init SRQ context memory, aborting.\n");
+ goto err_unmap_cq;
+ }
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table,
+ HEM_TYPE_SCCC,
+ hr_dev->caps.sccc_sz,
+ hr_dev->caps.num_qps);
+ if (ret) {
+ dev_err(dev,
+ "failed to init SCC context memory, aborting.\n");
+ goto err_unmap_srq;
+ }
+ }
+
+ if (hr_dev->caps.qpc_timer_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
+ HEM_TYPE_QPC_TIMER,
+ hr_dev->caps.qpc_timer_entry_sz,
+ hr_dev->caps.qpc_timer_bt_num);
+ if (ret) {
+ dev_err(dev,
+ "failed to init QPC timer memory, aborting.\n");
+ goto err_unmap_ctx;
+ }
+ }
+
+ if (hr_dev->caps.cqc_timer_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
+ HEM_TYPE_CQC_TIMER,
+ hr_dev->caps.cqc_timer_entry_sz,
+ hr_dev->caps.cqc_timer_bt_num);
+ if (ret) {
+ dev_err(dev,
+ "failed to init CQC timer memory, aborting.\n");
+ goto err_unmap_qpc_timer;
+ }
+ }
+
+ if (hr_dev->caps.gmv_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
+ HEM_TYPE_GMV,
+ hr_dev->caps.gmv_entry_sz,
+ hr_dev->caps.gmv_entry_num);
+ if (ret) {
+ dev_err(dev,
+ "failed to init gmv table memory, ret = %d\n",
+ ret);
+ goto err_unmap_cqc_timer;
+ }
+ }
+
+ return 0;
+
+err_unmap_cqc_timer:
+ if (hr_dev->caps.cqc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table);
+
+err_unmap_qpc_timer:
+ if (hr_dev->caps.qpc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
+
+err_unmap_ctx:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
+err_unmap_srq:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
+
+err_unmap_cq:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
+
+err_unmap_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+
+err_unmap_irrl:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+
+err_unmap_qp:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
+
+err_unmap_dmpt:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
+
+ return ret;
+}
+
+static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_cleanup_bitmap(hr_dev);
+ mutex_destroy(&hr_dev->pgdir_mutex);
+}
+
+/**
+ * hns_roce_setup_hca - setup host channel adapter
+ * @hr_dev: pointer to hns roce device
+ * Return : int
+ */
+static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ spin_lock_init(&hr_dev->sm_lock);
+
+ INIT_LIST_HEAD(&hr_dev->qp_list);
+ spin_lock_init(&hr_dev->qp_list_lock);
+
+ INIT_LIST_HEAD(&hr_dev->pgdir_list);
+ mutex_init(&hr_dev->pgdir_mutex);
+
+ hns_roce_init_uar_table(hr_dev);
+
+ ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
+ if (ret) {
+ dev_err(dev, "failed to allocate priv_uar.\n");
+ goto err_uar_table_free;
+ }
+
+ ret = hns_roce_init_qp_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to init qp_table.\n");
+ goto err_uar_table_free;
+ }
+
+ hns_roce_init_pd_table(hr_dev);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ hns_roce_init_xrcd_table(hr_dev);
+
+ hns_roce_init_mr_table(hr_dev);
+
+ hns_roce_init_cq_table(hr_dev);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ hns_roce_init_srq_table(hr_dev);
+
+ return 0;
+
+err_uar_table_free:
+ ida_destroy(&hr_dev->uar_ida.ida);
+ mutex_destroy(&hr_dev->pgdir_mutex);
+
+ return ret;
+}
+
+static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(cq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hr_cq->lock, flags);
+ if (cq->comp_handler) {
+ if (!hr_cq->is_armed) {
+ hr_cq->is_armed = 1;
+ list_add_tail(&hr_cq->node, cq_list);
+ }
+ }
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
+}
+
+void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_qp *hr_qp;
+ struct hns_roce_cq *hr_cq;
+ struct list_head cq_list;
+ unsigned long flags_qp;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&cq_list);
+
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
+ spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
+ if (hr_qp->sq.tail != hr_qp->sq.head)
+ check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
+ spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
+
+ spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
+ if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
+ check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
+ }
+
+ list_for_each_entry(hr_cq, &cq_list, node)
+ hns_roce_cq_completion(hr_dev, hr_cq->cqn);
+
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
+static int hns_roce_alloc_dfx_cnt(struct hns_roce_dev *hr_dev)
+{
+ hr_dev->dfx_cnt = kvcalloc(HNS_ROCE_DFX_CNT_TOTAL, sizeof(atomic64_t),
+ GFP_KERNEL);
+ if (!hr_dev->dfx_cnt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void hns_roce_dealloc_dfx_cnt(struct hns_roce_dev *hr_dev)
+{
+ kvfree(hr_dev->dfx_cnt);
+}
+
+int hns_roce_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ hr_dev->is_reset = false;
+
+ ret = hns_roce_alloc_dfx_cnt(hr_dev);
+ if (ret)
+ return ret;
+
+ if (hr_dev->hw->cmq_init) {
+ ret = hr_dev->hw->cmq_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "init RoCE Command Queue failed!\n");
+ goto error_failed_alloc_dfx_cnt;
+ }
+ }
+
+ ret = hr_dev->hw->hw_profile(hr_dev);
+ if (ret) {
+ dev_err(dev, "get RoCE engine profile failed!\n");
+ goto error_failed_cmd_init;
+ }
+
+ ret = hns_roce_cmd_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "cmd init failed!\n");
+ goto error_failed_cmd_init;
+ }
+
+ /* EQ depends on poll mode, event mode depends on EQ */
+ ret = hr_dev->hw->init_eq(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
+ }
+
+ if (hr_dev->cmd_mod) {
+ ret = hns_roce_cmd_use_events(hr_dev);
+ if (ret)
+ dev_warn(dev,
+ "Cmd event mode failed, set back to poll!\n");
+ }
+
+ ret = hns_roce_init_hem(hr_dev);
+ if (ret) {
+ dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
+ goto error_failed_init_hem;
+ }
+
+ ret = hns_roce_setup_hca(hr_dev);
+ if (ret) {
+ dev_err(dev, "setup hca failed!\n");
+ goto error_failed_setup_hca;
+ }
+
+ if (hr_dev->hw->hw_init) {
+ ret = hr_dev->hw->hw_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "hw_init failed!\n");
+ goto error_failed_engine_init;
+ }
+ }
+
+ ret = hns_roce_register_device(hr_dev);
+ if (ret)
+ goto error_failed_register_device;
+
+ hns_roce_register_debugfs(hr_dev);
+
+ return 0;
+
+error_failed_register_device:
+ if (hr_dev->hw->hw_exit)
+ hr_dev->hw->hw_exit(hr_dev);
+
+error_failed_engine_init:
+ hns_roce_teardown_hca(hr_dev);
+
+error_failed_setup_hca:
+ hns_roce_cleanup_hem(hr_dev);
+
+error_failed_init_hem:
+ if (hr_dev->cmd_mod)
+ hns_roce_cmd_use_polling(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
+
+error_failed_eq_table:
+ hns_roce_cmd_cleanup(hr_dev);
+
+error_failed_cmd_init:
+ if (hr_dev->hw->cmq_exit)
+ hr_dev->hw->cmq_exit(hr_dev);
+
+error_failed_alloc_dfx_cnt:
+ hns_roce_dealloc_dfx_cnt(hr_dev);
+
+ return ret;
+}
+
+void hns_roce_exit(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_unregister_debugfs(hr_dev);
+ hns_roce_unregister_device(hr_dev);
+
+ if (hr_dev->hw->hw_exit)
+ hr_dev->hw->hw_exit(hr_dev);
+ hns_roce_teardown_hca(hr_dev);
+ hns_roce_cleanup_hem(hr_dev);
+
+ if (hr_dev->cmd_mod)
+ hns_roce_cmd_use_polling(hr_dev);
+
+ hr_dev->hw->cleanup_eq(hr_dev);
+ hns_roce_cmd_cleanup(hr_dev);
+ if (hr_dev->hw->cmq_exit)
+ hr_dev->hw->cmq_exit(hr_dev);
+ hns_roce_dealloc_dfx_cnt(hr_dev);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_DESCRIPTION("HNS RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
new file mode 100644
index 000000000000..31cb8699e198
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/count_zeros.h>
+#include <rdma/ib_umem.h>
+#include <linux/math.h>
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_trace.h"
+
+static u32 hw_index_to_key(int ind)
+{
+ return ((u32)ind >> 24) | ((u32)ind << 8);
+}
+
+unsigned long key_to_hw_index(u32 key)
+{
+ return (key << 24) | (key >> 8);
+}
+
+static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+{
+ struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int err;
+ int id;
+
+ /* Allocate a key for mr from mr_table */
+ id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
+ return -ENOMEM;
+ }
+
+ mr->key = hw_index_to_key(id); /* MR key */
+
+ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
+ (unsigned long)id);
+ if (err) {
+ ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
+ goto err_free_bitmap;
+ }
+
+ return 0;
+err_free_bitmap:
+ ida_free(&mtpt_ida->ida, id);
+ return err;
+}
+
+static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+{
+ unsigned long obj = key_to_hw_index(mr->key);
+
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
+}
+
+static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ struct ib_udata *udata, u64 start)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ bool is_fast = mr->type == MR_TYPE_FRMR;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
+ buf_attr.page_shift = is_fast ? PAGE_SHIFT :
+ hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
+ buf_attr.region[0].size = mr->size;
+ buf_attr.region[0].hopnum = mr->pbl_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.user_access = mr->access;
+ /* fast MR's buffer is alloced before mapping, not at creation */
+ buf_attr.mtt_only = is_fast;
+ buf_attr.iova = mr->iova;
+ /* pagesize and hopnum is fixed for fast MR */
+ buf_attr.adaptive = !is_fast;
+ buf_attr.type = MTR_PBL;
+
+ err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
+ hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
+ udata, start);
+ if (err) {
+ ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
+ return err;
+ }
+
+ mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
+ mr->pbl_hop_num = buf_attr.region[0].hopnum;
+
+ return err;
+}
+
+static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+{
+ hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
+}
+
+static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ if (mr->enabled) {
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1));
+ if (ret)
+ ibdev_warn_ratelimited(ibdev, "failed to destroy mpt, ret = %d.\n",
+ ret);
+ }
+
+ free_mr_pbl(hr_dev, mr);
+ free_mr_key(hr_dev, mr);
+}
+
+static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
+{
+ unsigned long mtpt_idx = key_to_hw_index(mr->key);
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ trace_hns_mr(mr);
+ if (mr->type != MR_TYPE_FRMR)
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
+ else
+ ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+ if (ret) {
+ dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
+ goto err_page;
+ }
+
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ if (ret) {
+ dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
+ goto err_page;
+ }
+
+ mr->enabled = 1;
+
+err_page:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
+
+ ida_init(&mtpt_ida->ida);
+ mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
+ mtpt_ida->min = hr_dev->caps.reserved_mrws;
+}
+
+struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct hns_roce_mr *mr;
+ int ret;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = MR_TYPE_DMA;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->access = acc;
+
+ /* Allocate memory region key */
+ hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
+ ret = alloc_mr_key(hr_dev, mr);
+ if (ret)
+ goto err_free;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+
+ return &mr->ibmr;
+err_mr:
+ free_mr_key(hr_dev, mr);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct hns_roce_mr *mr;
+ int ret;
+
+ if (dmah) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ mr->iova = virt_addr;
+ mr->size = length;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->access = access_flags;
+ mr->type = MR_TYPE_MR;
+
+ ret = alloc_mr_key(hr_dev, mr);
+ if (ret)
+ goto err_alloc_mr;
+
+ ret = alloc_mr_pbl(hr_dev, mr, udata, start);
+ if (ret)
+ goto err_alloc_key;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_alloc_pbl;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+
+ return &mr->ibmr;
+
+err_alloc_pbl:
+ free_mr_pbl(hr_dev, mr);
+err_alloc_key:
+ free_mr_key(hr_dev, mr);
+err_alloc_mr:
+ kfree(mr);
+err_out:
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REG_ERR_CNT]);
+
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ib_dev = &hr_dev->ib_dev;
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_cmd_mailbox *mailbox;
+ unsigned long mtpt_idx;
+ int ret;
+
+ if (!mr->enabled) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ ret = PTR_ERR_OR_ZERO(mailbox);
+ if (ret)
+ goto err_out;
+
+ mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
+ mtpt_idx);
+ if (ret)
+ goto free_cmd_mbox;
+
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
+ mtpt_idx);
+ if (ret)
+ ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
+
+ mr->enabled = 0;
+ mr->iova = virt_addr;
+ mr->size = length;
+
+ if (flags & IB_MR_REREG_PD)
+ mr->pd = to_hr_pd(pd)->pdn;
+
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->access = mr_access_flags;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ free_mr_pbl(hr_dev, mr);
+ ret = alloc_mr_pbl(hr_dev, mr, udata, start);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
+ ret);
+ goto free_cmd_mbox;
+ }
+ }
+
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
+ goto free_cmd_mbox;
+ }
+
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
+ mtpt_idx);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
+ goto free_cmd_mbox;
+ }
+
+ mr->enabled = 1;
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+err_out:
+ if (ret) {
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REREG_ERR_CNT]);
+ return ERR_PTR(ret);
+ }
+
+ return NULL;
+}
+
+int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ if (hr_dev->hw->dereg_mr)
+ hr_dev->hw->dereg_mr(hr_dev);
+
+ hns_roce_mr_free(hr_dev, mr);
+ kfree(mr);
+
+ return 0;
+}
+
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_mr *mr;
+ int ret;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
+ dev_err(dev, "max_num_sg larger than %d\n",
+ HNS_ROCE_FRMR_MAX_PA);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = MR_TYPE_FRMR;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->size = max_num_sg * (1 << PAGE_SHIFT);
+
+ /* Allocate memory region key */
+ ret = alloc_mr_key(hr_dev, mr);
+ if (ret)
+ goto err_free;
+
+ ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
+ if (ret)
+ goto err_key;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_pbl;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->ibmr.length = mr->size;
+
+ return &mr->ibmr;
+
+err_pbl:
+ free_mr_pbl(hr_dev, mr);
+err_key:
+ free_mr_key(hr_dev, mr);
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
+ mr->page_list[mr->npages++] = addr;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset_p)
+{
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+ int ret, sg_num = 0;
+
+ if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
+ ibmr->page_size < HNS_HW_PAGE_SIZE ||
+ ibmr->page_size > HNS_HW_MAX_PAGE_SIZE)
+ return sg_num;
+
+ mr->npages = 0;
+ mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!mr->page_list)
+ return sg_num;
+
+ sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page);
+ if (sg_num < 1) {
+ ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
+ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
+ goto err_page_list;
+ }
+
+ mtr->hem_cfg.region[0].offset = 0;
+ mtr->hem_cfg.region[0].count = mr->npages;
+ mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
+ mtr->hem_cfg.region_count = 1;
+ ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+ if (ret) {
+ ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+ sg_num = 0;
+ } else {
+ mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
+ }
+
+err_page_list:
+ kvfree(mr->page_list);
+ mr->page_list = NULL;
+
+ return sg_num;
+}
+
+static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_region *region, dma_addr_t *pages,
+ int max_count)
+{
+ int count, npage;
+ int offset, end;
+ __le64 *mtts;
+ u64 addr;
+ int i;
+
+ offset = region->offset;
+ end = offset + region->count;
+ npage = 0;
+ while (offset < end && npage < max_count) {
+ count = 0;
+ mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ offset, &count);
+ if (!mtts)
+ return -ENOBUFS;
+
+ for (i = 0; i < count && npage < max_count; i++) {
+ addr = pages[npage];
+
+ mtts[i] = cpu_to_le64(addr);
+ npage++;
+ }
+ offset += count;
+ }
+
+ return npage;
+}
+
+static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
+{
+ int i;
+
+ for (i = 0; i < attr->region_count; i++)
+ if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
+ attr->region[i].hopnum > 0)
+ return true;
+
+ /* because the mtr only one root base address, when hopnum is 0 means
+ * root base address equals the first buffer address, thus all alloced
+ * memory must in a continuous space accessed by direct mode.
+ */
+ return false;
+}
+
+static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < attr->region_count; i++)
+ size += attr->region[i].size;
+
+ return size;
+}
+
+/*
+ * check the given pages in continuous address space
+ * Returns 0 on success, or the error page num.
+ */
+static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
+ unsigned int page_shift)
+{
+ size_t page_size = 1 << page_shift;
+ int i;
+
+ for (i = 1; i < page_count; i++)
+ if (pages[i] - pages[i - 1] != page_size)
+ return i;
+
+ return 0;
+}
+
+static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release user buffers */
+ if (mtr->umem) {
+ ib_umem_release(mtr->umem);
+ mtr->umem = NULL;
+ }
+
+ /* release kernel buffers */
+ if (mtr->kmem) {
+ hns_roce_buf_free(hr_dev, mtr->kmem);
+ mtr->kmem = NULL;
+ }
+}
+
+static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ struct ib_udata *udata, unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ size_t total_size;
+
+ total_size = mtr_bufs_size(buf_attr);
+
+ if (udata) {
+ mtr->kmem = NULL;
+ mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
+ buf_attr->user_access);
+ if (IS_ERR(mtr->umem)) {
+ ibdev_err(ibdev, "failed to get umem, ret = %pe.\n",
+ mtr->umem);
+ return -ENOMEM;
+ }
+ } else {
+ mtr->umem = NULL;
+ mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
+ buf_attr->page_shift,
+ !mtr_has_mtt(buf_attr) ?
+ HNS_ROCE_BUF_DIRECT : 0);
+ if (IS_ERR(mtr->kmem)) {
+ ibdev_err(ibdev, "failed to alloc kmem, ret = %pe.\n",
+ mtr->kmem);
+ return PTR_ERR(mtr->kmem);
+ }
+ }
+
+ return 0;
+}
+
+static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
+{
+ struct hns_roce_buf_region *region;
+ int page_cnt = 0;
+ int i;
+
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ region = &mtr->hem_cfg.region[i];
+ page_cnt += region->count;
+ }
+
+ return page_cnt;
+}
+
+static bool need_split_huge_page(struct hns_roce_mtr *mtr)
+{
+ /* When HEM buffer uses 0-level addressing, the page size is
+ * equal to the whole buffer size. If the current MTR has multiple
+ * regions, we split the buffer into small pages(4k, required by hns
+ * ROCEE). These pages will be used in multiple regions.
+ */
+ return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1;
+}
+
+static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int page_count = cal_mtr_pg_cnt(mtr);
+ unsigned int page_shift;
+ dma_addr_t *pages;
+ int npage;
+ int ret;
+
+ page_shift = need_split_huge_page(mtr) ? HNS_HW_PAGE_SHIFT :
+ mtr->hem_cfg.buf_pg_shift;
+ /* alloc a tmp array to store buffer's dma address */
+ pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ if (mtr->umem)
+ npage = hns_roce_get_umem_bufs(pages, page_count,
+ mtr->umem, page_shift);
+ else
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
+ mtr->kmem, page_shift);
+
+ if (npage != page_count) {
+ ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
+ page_count);
+ ret = -ENOBUFS;
+ goto err_alloc_list;
+ }
+
+ if (need_split_huge_page(mtr) && npage > 1) {
+ ret = mtr_check_direct_pages(pages, npage, page_shift);
+ if (ret) {
+ ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
+ mtr->umem ? "umtr" : "kmtr", ret, npage);
+ ret = -ENOBUFS;
+ goto err_alloc_list;
+ }
+ }
+
+ ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
+ if (ret)
+ ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
+
+err_alloc_list:
+ kvfree(pages);
+
+ return ret;
+}
+
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, unsigned int page_cnt)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_region *r;
+ unsigned int i, mapped_cnt;
+ int ret = 0;
+
+ /*
+ * Only use the first page address as root ba when hopnum is 0, this
+ * is because the addresses of all pages are consecutive in this case.
+ */
+ if (mtr->hem_cfg.is_direct) {
+ mtr->hem_cfg.root_ba = pages[0];
+ return 0;
+ }
+
+ for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
+ mapped_cnt < page_cnt; i++) {
+ r = &mtr->hem_cfg.region[i];
+
+ if (r->offset + r->count > page_cnt) {
+ ret = -EINVAL;
+ ibdev_err(ibdev,
+ "failed to check mtr%u count %u + %u > %u.\n",
+ i, r->offset, r->count, page_cnt);
+ return ret;
+ }
+
+ ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
+ page_cnt - mapped_cnt);
+ if (ret < 0) {
+ ibdev_err(ibdev,
+ "failed to map mtr%u offset %u, ret = %d.\n",
+ i, r->offset, ret);
+ return ret;
+ }
+ mapped_cnt += ret;
+ ret = 0;
+ }
+
+ if (mapped_cnt < page_cnt) {
+ ret = -ENOBUFS;
+ ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
+ mapped_cnt, page_cnt);
+ }
+
+ return ret;
+}
+
+static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
+ u32 start_index, u64 *mtt_buf,
+ int mtt_cnt)
+{
+ int mtt_count;
+ int total = 0;
+ u32 npage;
+ u64 addr;
+
+ if (mtt_cnt > cfg->region_count)
+ return -EINVAL;
+
+ for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
+ mtt_count++) {
+ npage = cfg->region[mtt_count].offset;
+ if (npage < start_index)
+ continue;
+
+ addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
+ mtt_buf[total] = addr;
+
+ total++;
+ }
+
+ if (!total)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr, u32 start_index,
+ u64 *mtt_buf, int mtt_cnt)
+{
+ int left = mtt_cnt;
+ int total = 0;
+ int mtt_count;
+ __le64 *mtts;
+ u32 npage;
+
+ while (left > 0) {
+ mtt_count = 0;
+ mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ start_index + total,
+ &mtt_count);
+ if (!mtts || !mtt_count)
+ break;
+
+ npage = min(mtt_count, left);
+ left -= npage;
+ for (mtt_count = 0; mtt_count < npage; mtt_count++)
+ mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
+ }
+
+ if (!total)
+ return -ENOENT;
+
+ return 0;
+}
+
+int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ u32 offset, u64 *mtt_buf, int mtt_max)
+{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ u32 start_index;
+ int ret;
+
+ if (!mtt_buf || mtt_max < 1)
+ return -EINVAL;
+
+ /* no mtt memory in direct mode, so just return the buffer address */
+ if (cfg->is_direct) {
+ start_index = offset >> HNS_HW_PAGE_SHIFT;
+ ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
+ mtt_buf, mtt_max);
+ } else {
+ start_index = offset >> cfg->buf_pg_shift;
+ ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
+ mtt_buf, mtt_max);
+ }
+ return ret;
+}
+
+static int get_best_page_shift(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr)
+{
+ unsigned int page_sz;
+
+ if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem)
+ return 0;
+
+ page_sz = ib_umem_find_best_pgsz(mtr->umem,
+ hr_dev->caps.page_size_cap,
+ buf_attr->iova);
+ if (!page_sz)
+ return -EINVAL;
+
+ buf_attr->page_shift = order_base_2(page_sz);
+ return 0;
+}
+
+static int get_best_hop_num(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int ba_pg_shift)
+{
+#define INVALID_HOPNUM -1
+#define MIN_BA_CNT 1
+ size_t buf_pg_sz = 1 << buf_attr->page_shift;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ size_t ba_pg_sz = 1 << ba_pg_shift;
+ int hop_num = INVALID_HOPNUM;
+ size_t unit = MIN_BA_CNT;
+ size_t ba_cnt;
+ int j;
+
+ if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
+ return 0;
+
+ /* Caculating the number of buf pages, each buf page need a BA */
+ if (mtr->umem)
+ ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
+ else
+ ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
+
+ for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) {
+ if (ba_cnt <= unit) {
+ hop_num = j;
+ break;
+ }
+ /* Number of BAs can be represented at per hop */
+ unit *= ba_pg_sz / BA_BYTE_LEN;
+ }
+
+ if (hop_num < 0) {
+ ibdev_err(ibdev,
+ "failed to calculate a valid hopnum.\n");
+ return -EINVAL;
+ }
+
+ buf_attr->region[0].hopnum = hop_num;
+
+ return 0;
+}
+
+static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_buf_attr *attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+
+ if (attr->region_count > ARRAY_SIZE(attr->region) ||
+ attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
+ ibdev_err(ibdev,
+ "invalid buf attr, region count %u, page shift %u.\n",
+ attr->region_count, attr->page_shift);
+ return false;
+ }
+
+ return true;
+}
+
+static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *attr)
+{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ struct hns_roce_buf_region *r;
+ size_t buf_pg_sz;
+ size_t buf_size;
+ int page_cnt, i;
+ u64 pgoff = 0;
+
+ if (!is_buf_attr_valid(hr_dev, attr))
+ return -EINVAL;
+
+ /* If mtt is disabled, all pages must be within a continuous range */
+ cfg->is_direct = !mtr_has_mtt(attr);
+ cfg->region_count = attr->region_count;
+ buf_size = mtr_bufs_size(attr);
+ if (need_split_huge_page(mtr)) {
+ buf_pg_sz = HNS_HW_PAGE_SIZE;
+ cfg->buf_pg_count = 1;
+ /* The ROCEE requires the page size to be 4K * 2 ^ N. */
+ cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
+ order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
+ } else {
+ buf_pg_sz = 1 << attr->page_shift;
+ cfg->buf_pg_count = mtr->umem ?
+ ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) :
+ DIV_ROUND_UP(buf_size, buf_pg_sz);
+ cfg->buf_pg_shift = attr->page_shift;
+ pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0;
+ }
+
+ /* Convert buffer size to page index and page count for each region and
+ * the buffer's offset needs to be appended to the first region.
+ */
+ for (page_cnt = 0, i = 0; i < attr->region_count; i++) {
+ r = &cfg->region[i];
+ r->offset = page_cnt;
+ buf_size = hr_hw_page_align(attr->region[i].size + pgoff);
+ if (attr->type == MTR_PBL && mtr->umem)
+ r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
+ else
+ r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
+
+ pgoff = 0;
+ page_cnt += r->count;
+ r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count);
+ }
+
+ return 0;
+}
+
+static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
+{
+ return int_pow(ba_per_bt, hopnum - 1);
+}
+
+static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ unsigned int pg_shift)
+{
+ unsigned long cap = hr_dev->caps.page_size_cap;
+ struct hns_roce_buf_region *re;
+ unsigned int pgs_per_l1ba;
+ unsigned int ba_per_bt;
+ unsigned int ba_num;
+ int i;
+
+ for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
+ if (!(BIT(pg_shift) & cap))
+ continue;
+
+ ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
+ ba_num = 0;
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ re = &mtr->hem_cfg.region[i];
+ if (re->hopnum == 0)
+ continue;
+
+ pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
+ ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
+ }
+
+ if (ba_num <= ba_per_bt)
+ return pg_shift;
+ }
+
+ return 0;
+}
+
+static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ unsigned int ba_page_shift)
+{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ int ret;
+
+ hns_roce_hem_list_init(&mtr->hem_list);
+ if (!cfg->is_direct) {
+ ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
+ if (!ba_page_shift)
+ return -ERANGE;
+
+ ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
+ cfg->region, cfg->region_count,
+ ba_page_shift);
+ if (ret)
+ return ret;
+ cfg->root_ba = mtr->hem_list.root_ba;
+ cfg->ba_pg_shift = ba_page_shift;
+ } else {
+ cfg->ba_pg_shift = cfg->buf_pg_shift;
+ }
+
+ return 0;
+}
+
+static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+}
+
+/**
+ * hns_roce_mtr_create - Create hns memory translate region.
+ *
+ * @hr_dev: RoCE device struct pointer
+ * @mtr: memory translate region
+ * @buf_attr: buffer attribute for creating mtr
+ * @ba_page_shift: page shift for multi-hop base address table
+ * @udata: user space context, if it's NULL, means kernel space
+ * @user_addr: userspace virtual address to start at
+ */
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int ba_page_shift, struct ib_udata *udata,
+ unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ trace_hns_buf_attr(buf_attr);
+ /* The caller has its own buffer list and invokes the hns_roce_mtr_map()
+ * to finish the MTT configuration.
+ */
+ if (buf_attr->mtt_only) {
+ mtr->umem = NULL;
+ mtr->kmem = NULL;
+ } else {
+ ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to alloc mtr bufs, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = get_best_page_shift(hr_dev, mtr, buf_attr);
+ if (ret)
+ goto err_init_buf;
+
+ ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift);
+ if (ret)
+ goto err_init_buf;
+ }
+
+ ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr);
+ if (ret)
+ goto err_init_buf;
+
+ ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
+ goto err_init_buf;
+ }
+
+ if (buf_attr->mtt_only)
+ return 0;
+
+ /* Write buffer's dma address to MTT */
+ ret = mtr_map_bufs(hr_dev, mtr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
+ goto err_alloc_mtt;
+ }
+
+ return 0;
+
+err_alloc_mtt:
+ mtr_free_mtt(hr_dev, mtr);
+err_init_buf:
+ mtr_free_bufs(hr_dev, mtr);
+
+ return ret;
+}
+
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release multi-hop addressing resource */
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+
+ /* free buffers */
+ mtr_free_bufs(hr_dev, mtr);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
new file mode 100644
index 000000000000..d35cf59d0f43
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include "hns_roce_device.h"
+
+void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
+
+ ida_init(&pd_ida->ida);
+ pd_ida->max = hr_dev->caps.num_pds - 1;
+ pd_ida->min = hr_dev->caps.reserved_pds;
+}
+
+int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ib_device *ib_dev = ibpd->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
+ struct hns_roce_pd *pd = to_hr_pd(ibpd);
+ int ret = 0;
+ int id;
+
+ id = ida_alloc_range(&pd_ida->ida, pd_ida->min, pd_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id);
+ return -ENOMEM;
+ }
+ pd->pdn = (unsigned long)id;
+
+ if (udata) {
+ struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn};
+
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret) {
+ ida_free(&pd_ida->ida, id);
+ ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+
+ ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn);
+
+ return 0;
+}
+
+int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
+{
+ struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
+ int id;
+
+ /* Using bitmap to manager UAR index */
+ id = ida_alloc_range(&uar_ida->ida, uar_ida->min, uar_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id);
+ return -ENOMEM;
+ }
+ uar->logic_idx = (unsigned long)id;
+
+ if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
+ uar->index = (uar->logic_idx - 1) %
+ (hr_dev->caps.phy_num_uars - 1) + 1;
+ else
+ uar->index = 0;
+
+ uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4);
+
+ return 0;
+}
+
+void hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
+
+ ida_init(&uar_ida->ida);
+ uar_ida->max = hr_dev->caps.num_uars - 1;
+ uar_ida->min = hr_dev->caps.reserved_uars;
+}
+
+static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
+{
+ struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
+ int id;
+
+ id = ida_alloc_range(&xrcd_ida->ida, xrcd_ida->min, xrcd_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id);
+ return -ENOMEM;
+ }
+ *xrcdn = (u32)id;
+
+ return 0;
+}
+
+void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
+
+ ida_init(&xrcd_ida->ida);
+ xrcd_ida->max = hr_dev->caps.num_xrcds - 1;
+ xrcd_ida->min = hr_dev->caps.reserved_xrcds;
+}
+
+int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
+ struct hns_roce_xrcd *xrcd = to_hr_xrcd(ib_xrcd);
+ int ret;
+
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
+
+err_out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT]);
+
+ return ret;
+}
+
+int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
+ u32 xrcdn = to_hr_xrcd(ib_xrcd)->xrcdn;
+
+ ida_free(&hr_dev->xrcd_ida.ida, (int)xrcdn);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
new file mode 100644
index 000000000000..6ff1b8ce580c
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -0,0 +1,1609 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+
+static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev,
+ u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_qp *qp;
+ unsigned long flags;
+
+ xa_lock_irqsave(&hr_dev->qp_table_xa, flags);
+ qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (qp)
+ refcount_inc(&qp->refcount);
+ xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags);
+
+ if (!qp)
+ dev_warn(dev, "async event for bogus QP %08x\n", qpn);
+
+ return qp;
+}
+
+static void flush_work_handle(struct work_struct *work)
+{
+ struct hns_roce_work *flush_work = container_of(work,
+ struct hns_roce_work, work);
+ struct hns_roce_qp *hr_qp = container_of(flush_work,
+ struct hns_roce_qp, flush_work);
+ struct device *dev = flush_work->hr_dev->dev;
+ struct ib_qp_attr attr;
+ int attr_mask;
+ int ret;
+
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+
+ if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
+ ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
+ if (ret)
+ dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n",
+ ret);
+ }
+
+ /*
+ * make sure we signal QP destroy leg that flush QP was completed
+ * so that it can safely proceed ahead now and destroy QP
+ */
+ if (refcount_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+}
+
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_work *flush_work = &hr_qp->flush_work;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hr_qp->flush_lock, flags);
+ /* Exit directly after destroy_qp() */
+ if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) {
+ spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
+ return;
+ }
+
+ refcount_inc(&hr_qp->refcount);
+ queue_work(hr_dev->irq_workq, &flush_work->work);
+ spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
+}
+
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
+{
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(dev, qp);
+}
+
+void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
+{
+ struct hns_roce_qp *qp;
+
+ qp = hns_roce_qp_lookup(hr_dev, qpn);
+ if (!qp)
+ return;
+
+ qp->event(qp, (enum hns_roce_event)event_type);
+
+ if (refcount_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn)
+{
+ struct hns_roce_qp *qp;
+
+ qp = hns_roce_qp_lookup(hr_dev, qpn);
+ if (!qp)
+ return;
+
+ qp->state = IB_QPS_ERR;
+ flush_cqe(hr_dev, qp);
+
+ if (refcount_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
+ enum hns_roce_event type)
+{
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+ struct ib_event event;
+
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
+ type, hr_qp->qpn);
+ return;
+ }
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+static u8 get_affinity_cq_bank(u8 qp_bank)
+{
+ return (qp_bank >> 1) & CQ_BANKID_MASK;
+}
+
+static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
+ struct hns_roce_bank *bank)
+{
+#define INVALID_LOAD_QPNUM 0xFFFFFFFF
+ struct ib_cq *scq = init_attr->send_cq;
+ u32 least_load = INVALID_LOAD_QPNUM;
+ unsigned long cqn = 0;
+ u8 bankid = 0;
+ u32 bankcnt;
+ u8 i;
+
+ if (scq)
+ cqn = to_hr_cq(scq)->cqn;
+
+ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
+ continue;
+
+ bankcnt = bank[i].inuse;
+ if (bankcnt < least_load) {
+ least_load = bankcnt;
+ bankid = i;
+ }
+ }
+
+ return bankid;
+}
+
+static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
+ unsigned long *qpn)
+{
+ int id;
+
+ id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
+ if (id < 0) {
+ id = ida_alloc_range(&bank->ida, bank->min, bank->max,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+ }
+
+ /* the QPN should keep increasing until the max value is reached. */
+ bank->next = (id + 1) > bank->max ? bank->min : id + 1;
+
+ /* the lower 3 bits is bankid */
+ *qpn = (id << 3) | bankid;
+
+ return 0;
+}
+static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ unsigned long num = 0;
+ u8 bankid;
+ int ret;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ num = 1;
+ } else {
+ mutex_lock(&qp_table->bank_mutex);
+ bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
+
+ ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
+ &num);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to alloc QPN, ret = %d\n", ret);
+ mutex_unlock(&qp_table->bank_mutex);
+ return ret;
+ }
+
+ qp_table->bank[bankid].inuse++;
+ mutex_unlock(&qp_table->bank_mutex);
+ }
+
+ hr_qp->qpn = num;
+
+ return 0;
+}
+
+static void add_qp_to_list(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_cq *send_cq, struct ib_cq *recv_cq)
+{
+ struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
+ unsigned long flags;
+
+ hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
+ hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
+
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
+
+ list_add_tail(&hr_qp->node, &hr_dev->qp_list);
+ if (hr_send_cq)
+ list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
+ if (hr_recv_cq)
+ list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
+
+ hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
+static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct xarray *xa = &hr_dev->qp_table_xa;
+ int ret;
+
+ if (!hr_qp->qpn)
+ return -EINVAL;
+
+ ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
+ if (ret)
+ dev_err(hr_dev->dev, "failed to xa store for QPC\n");
+ else
+ /* add QP to device's QP list for softwc */
+ add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
+ init_attr->recv_cq);
+
+ return ret;
+}
+
+static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (!hr_qp->qpn)
+ return -EINVAL;
+
+ /* Alloc memory for QPC */
+ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "failed to get QPC table\n");
+ goto err_out;
+ }
+
+ /* Alloc memory for IRRL */
+ ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "failed to get IRRL table\n");
+ goto err_put_qp;
+ }
+
+ if (hr_dev->caps.trrl_entry_sz) {
+ /* Alloc memory for TRRL */
+ ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "failed to get TRRL table\n");
+ goto err_put_irrl;
+ }
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
+ /* Alloc memory for SCC CTX */
+ ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
+ hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "failed to get SCC CTX table\n");
+ goto err_put_trrl;
+ }
+ }
+
+ return 0;
+
+err_put_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+
+err_put_irrl:
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+
+err_put_qp:
+ hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+
+err_out:
+ return ret;
+}
+
+static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
+{
+ rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
+}
+
+void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct xarray *xa = &hr_dev->qp_table_xa;
+ unsigned long flags;
+
+ list_del(&hr_qp->node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->sq_node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
+ hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->rq_node);
+
+ xa_lock_irqsave(xa, flags);
+ __xa_erase(xa, hr_qp->qpn);
+ xa_unlock_irqrestore(xa, flags);
+}
+
+static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+}
+
+static inline u8 get_qp_bankid(unsigned long qpn)
+{
+ /* The lower 3 bits of QPN are used to hash to different banks */
+ return (u8)(qpn & GENMASK(2, 0));
+}
+
+static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ u8 bankid;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ return;
+
+ if (hr_qp->qpn < hr_dev->caps.reserved_qps)
+ return;
+
+ bankid = get_qp_bankid(hr_qp->qpn);
+
+ ida_free(&hr_dev->qp_table.bank[bankid].ida,
+ hr_qp->qpn / HNS_ROCE_QP_BANK_NUM);
+
+ mutex_lock(&hr_dev->qp_table.bank_mutex);
+ hr_dev->qp_table.bank[bankid].inuse--;
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
+}
+
+static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
+ bool user)
+{
+ u32 max_sge = dev->caps.max_rq_sg;
+
+ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return max_sge;
+
+ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
+ * calculate number of max_sge with reserved SGEs when allocating wqe
+ * buf, so there is no need to do this again in kernel. But the number
+ * may exceed the capacity of SGEs recorded in the firmware, so the
+ * kernel driver should just adapt the value accordingly.
+ */
+ if (user)
+ max_sge = roundup_pow_of_two(max_sge + 1);
+ else
+ hr_qp->rq.rsv_sge = 1;
+
+ return max_sge;
+}
+
+static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ struct hns_roce_qp *hr_qp, int has_rq, bool user)
+{
+ u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
+ u32 cnt;
+
+ /* If srq exist, set zero for relative number of rq */
+ if (!has_rq) {
+ hr_qp->rq.wqe_cnt = 0;
+ hr_qp->rq.max_gs = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
+
+ return 0;
+ }
+
+ /* Check the validity of QP support capacity */
+ if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
+ cap->max_recv_sge > max_sge) {
+ ibdev_err(&hr_dev->ib_dev,
+ "RQ config error, depth = %u, sge = %u\n",
+ cap->max_recv_wr, cap->max_recv_sge);
+ return -EINVAL;
+ }
+
+ cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
+ ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
+ cap->max_recv_wr);
+ return -EINVAL;
+ }
+
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
+ hr_qp->rq.rsv_sge);
+
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
+
+ hr_qp->rq.wqe_cnt = cnt;
+
+ cap->max_recv_wr = cnt;
+ cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+
+ return 0;
+}
+
+static u32 get_max_inline_data(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap)
+{
+ if (cap->max_inline_data) {
+ cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data);
+ return min(cap->max_inline_data,
+ hr_dev->caps.max_sq_inline);
+ }
+
+ return 0;
+}
+
+static void update_inline_data(struct hns_roce_qp *hr_qp,
+ struct ib_qp_cap *cap)
+{
+ u32 sge_num = hr_qp->sq.ext_sge_cnt;
+
+ if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
+ if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD))
+ sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num);
+
+ cap->max_inline_data = max(cap->max_inline_data,
+ sge_num * HNS_ROCE_SGE_SIZE);
+ }
+
+ hr_qp->max_inline_data = cap->max_inline_data;
+}
+
+static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi,
+ u32 max_send_sge)
+{
+ unsigned int std_sge_num;
+ unsigned int min_sge;
+
+ std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
+ min_sge = is_ud_or_gsi ? 1 : 0;
+ return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) :
+ min_sge;
+}
+
+static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi,
+ u32 max_inline_data)
+{
+ unsigned int inline_sge;
+
+ if (!max_inline_data)
+ return 0;
+
+ /*
+ * if max_inline_data less than
+ * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE,
+ * In addition to ud's mode, no need to extend sge.
+ */
+ inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
+ if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE)
+ inline_sge = 0;
+
+ return inline_sge;
+}
+
+static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
+ struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
+{
+ bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD);
+ unsigned int std_sge_num;
+ u32 inline_ext_sge = 0;
+ u32 ext_wqe_sge_cnt;
+ u32 total_sge_cnt;
+
+ cap->max_inline_data = get_max_inline_data(hr_dev, cap);
+
+ hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
+ std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
+ ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi,
+ cap->max_send_sge);
+
+ if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
+ inline_ext_sge = max(ext_wqe_sge_cnt,
+ get_sge_num_from_max_inl_data(is_ud_or_gsi,
+ cap->max_inline_data));
+ hr_qp->sq.ext_sge_cnt = inline_ext_sge ?
+ roundup_pow_of_two(inline_ext_sge) : 0;
+
+ hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num));
+ hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
+
+ ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt;
+ } else {
+ hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
+ hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
+ hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs;
+ }
+
+ /* If the number of extended sge is not zero, they MUST use the
+ * space of HNS_HW_PAGE_SIZE at least.
+ */
+ if (ext_wqe_sge_cnt) {
+ total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt);
+ hr_qp->sge.sge_cnt = max(total_sge_cnt,
+ (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
+ }
+
+ update_inline_data(hr_qp, cap);
+}
+
+static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
+ u8 max_sq_stride = ilog2(roundup_sq_stride);
+
+ /* Sanity check SQ size before proceeding */
+ if (ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
+ return -EINVAL;
+ }
+
+ if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
+ ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
+ cap->max_send_sge);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt = 0;
+ int ret;
+
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > hr_dev->caps.max_wqes)
+ return -EINVAL;
+
+ ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
+ if (ret) {
+ ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
+
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
+ cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+}
+
+static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_buf_attr *buf_attr)
+{
+ int buf_size;
+ int idx = 0;
+
+ hr_qp->buff_size = 0;
+
+ /* SQ WQE */
+ hr_qp->sq.offset = 0;
+ buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
+ hr_qp->sq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* extend SGE WQE in SQ */
+ hr_qp->sge.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* RQ WQE */
+ hr_qp->rq.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
+ hr_qp->rq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ if (hr_qp->buff_size < 1)
+ return -EINVAL;
+
+ buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ buf_attr->region_count = idx;
+
+ return 0;
+}
+
+static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt;
+
+ if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
+ cap->max_send_sge > hr_dev->caps.max_sq_sg) {
+ ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n");
+ return -EINVAL;
+ }
+
+ cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
+ ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
+ cnt);
+ return -EINVAL;
+ }
+
+ hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
+ hr_qp->sq.wqe_cnt = cnt;
+
+ set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
+
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
+ cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+}
+
+static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
+ return 0;
+
+ return 1;
+}
+
+static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_INI ||
+ attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
+ !attr->cap.max_recv_wr)
+ return 0;
+
+ return 1;
+}
+
+static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int ret;
+
+ ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
+ goto err_inline;
+ }
+ ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
+ PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
+ udata, addr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
+ goto err_inline;
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
+
+ return 0;
+
+err_inline:
+
+ return ret;
+}
+
+static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
+}
+
+static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
+ hns_roce_qp_has_sq(init_attr) &&
+ udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
+}
+
+static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
+ hns_roce_qp_has_rq(init_attr));
+}
+
+static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
+ hns_roce_qp_has_rq(init_attr));
+}
+
+static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
+ struct hns_roce_dev *hr_dev,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ struct hns_roce_ucontext *uctx =
+ rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ struct rdma_user_mmap_entry *rdma_entry;
+ u64 address;
+
+ address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
+
+ hr_qp->dwqe_mmap_entry =
+ hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
+ HNS_ROCE_DWQE_SIZE,
+ HNS_ROCE_MMAP_TYPE_DWQE);
+
+ if (!hr_qp->dwqe_mmap_entry) {
+ ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
+ return -ENOMEM;
+ }
+
+ rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
+ resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
+
+ return 0;
+}
+
+static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ if (has_sdb) {
+ ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to map user SQ doorbell, ret = %d.\n",
+ ret);
+ goto err_out;
+ }
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
+ }
+
+ if (has_rdb) {
+ ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to map user RQ doorbell, ret = %d.\n",
+ ret);
+ goto err_sdb;
+ }
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+ }
+
+ return 0;
+
+err_sdb:
+ if (has_sdb)
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+err_out:
+ return ret;
+}
+
+static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ hr_qp->sq.db_reg = hr_dev->mem_base +
+ HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
+ else
+ hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ if (kernel_qp_has_rdb(hr_dev, init_attr)) {
+ ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to alloc kernel RQ doorbell, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ *hr_qp->rdb.db_record = 0;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+ }
+
+ return 0;
+}
+
+static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ int ret;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
+
+ if (udata) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
+ ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
+ if (ret)
+ return ret;
+ }
+
+ ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
+ resp);
+ if (ret)
+ goto err_remove_qp;
+ } else {
+ ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+err_remove_qp:
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
+
+ return ret;
+}
+
+static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
+
+ if (udata) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+ hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
+ } else {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+ hns_roce_free_db(hr_dev, &hr_qp->rdb);
+ }
+}
+
+static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 *sq_wrid = NULL;
+ u64 *rq_wrid = NULL;
+ int ret;
+
+ sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (!sq_wrid) {
+ ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
+ return -ENOMEM;
+ }
+
+ if (hr_qp->rq.wqe_cnt) {
+ rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (!rq_wrid) {
+ ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
+ ret = -ENOMEM;
+ goto err_sq;
+ }
+ }
+
+ hr_qp->sq.wrid = sq_wrid;
+ hr_qp->rq.wrid = rq_wrid;
+ return 0;
+err_sq:
+ kfree(sq_wrid);
+
+ return ret;
+}
+
+static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
+{
+ kfree(hr_qp->rq.wrid);
+ kfree(hr_qp->sq.wrid);
+}
+
+static void default_congest_type(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD ||
+ hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ else
+ hr_qp->cong_type = hr_dev->caps.default_cong_type;
+}
+
+static int set_congest_type(struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+
+ switch (ucmd->cong_type_flags) {
+ case HNS_ROCE_CREATE_QP_FLAGS_DCQCN:
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_LDCP:
+ hr_qp->cong_type = CONG_TYPE_LDCP;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_HC3:
+ hr_qp->cong_type = CONG_TYPE_HC3;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_DIP:
+ hr_qp->cong_type = CONG_TYPE_DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap))
+ return -EOPNOTSUPP;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD &&
+ hr_qp->cong_type != CONG_TYPE_DCQCN)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int set_congest_param(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE)
+ return set_congest_type(hr_qp, ucmd);
+
+ default_congest_type(hr_dev, hr_qp);
+
+ return 0;
+}
+
+static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ucontext *uctx;
+ int ret;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
+ else
+ hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+
+ ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
+ hns_roce_qp_has_rq(init_attr), !!udata);
+ if (ret) {
+ ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (udata) {
+ ret = ib_copy_from_udata(ucmd, udata,
+ min(udata->inlen, sizeof(*ucmd)));
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to copy QP ucmd, ret = %d\n", ret);
+ return ret;
+ }
+
+ uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
+ ibucontext);
+ hr_qp->config = uctx->config;
+ ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to set user SQ size, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = set_congest_param(hr_dev, hr_qp, ucmd);
+ } else {
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
+ default_congest_type(hr_dev, hr_qp);
+ ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to set kernel SQ size, ret = %d.\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_work *flush_work = &hr_qp->flush_work;
+ struct hns_roce_ib_create_qp_resp resp = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_qp ucmd = {};
+ int ret;
+
+ mutex_init(&hr_qp->mutex);
+ spin_lock_init(&hr_qp->sq.lock);
+ spin_lock_init(&hr_qp->rq.lock);
+ spin_lock_init(&hr_qp->flush_lock);
+
+ hr_qp->state = IB_QPS_RESET;
+ hr_qp->flush_flag = 0;
+ flush_work->hr_dev = hr_dev;
+ INIT_WORK(&flush_work->work, flush_work_handle);
+
+ if (init_attr->create_flags)
+ return -EOPNOTSUPP;
+
+ ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
+ if (ret) {
+ ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
+ goto err_out;
+ }
+
+ if (!udata) {
+ ret = alloc_kernel_wrid(hr_dev, hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
+ ret);
+ goto err_out;
+ }
+ }
+
+ ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
+ goto err_buf;
+ }
+
+ ret = alloc_qpn(hr_dev, hr_qp, init_attr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
+ goto err_qpn;
+ }
+
+ ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
+ ret);
+ goto err_db;
+ }
+
+ ret = alloc_qpc(hr_dev, hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
+ ret);
+ goto err_qpc;
+ }
+
+ ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
+ goto err_store;
+ }
+
+ if (udata) {
+ resp.cap_flags = hr_qp->en_flags;
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret) {
+ ibdev_err(ibdev, "copy qp resp failed!\n");
+ goto err_flow_ctrl;
+ }
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
+ ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
+ if (ret)
+ goto err_flow_ctrl;
+ }
+
+ hr_qp->ibqp.qp_num = hr_qp->qpn;
+ hr_qp->event = hns_roce_ib_qp_event;
+ refcount_set(&hr_qp->refcount, 1);
+ init_completion(&hr_qp->free);
+
+ return 0;
+
+err_flow_ctrl:
+ hns_roce_qp_remove(hr_dev, hr_qp);
+err_store:
+ free_qpc(hr_dev, hr_qp);
+err_qpc:
+ free_qp_db(hr_dev, hr_qp, udata);
+err_db:
+ free_qpn(hr_dev, hr_qp);
+err_qpn:
+ free_qp_buf(hr_dev, hr_qp);
+err_buf:
+ free_kernel_wrid(hr_qp);
+err_out:
+ mutex_destroy(&hr_qp->mutex);
+ return ret;
+}
+
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ if (refcount_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+ wait_for_completion(&hr_qp->free);
+
+ free_qpc(hr_dev, hr_qp);
+ free_qpn(hr_dev, hr_qp);
+ free_qp_buf(hr_dev, hr_qp);
+ free_kernel_wrid(hr_qp);
+ free_qp_db(hr_dev, hr_qp, udata);
+ mutex_destroy(&hr_qp->mutex);
+}
+
+static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
+ bool is_user)
+{
+ switch (type) {
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+ goto out;
+ break;
+ case IB_QPT_UD:
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
+ is_user)
+ goto out;
+ break;
+ case IB_QPT_RC:
+ case IB_QPT_GSI:
+ break;
+ default:
+ goto out;
+ }
+
+ return 0;
+
+out:
+ ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type);
+
+ return -EOPNOTSUPP;
+}
+
+int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = qp->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_qp *hr_qp = to_hr_qp(qp);
+ int ret;
+
+ ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
+ if (ret)
+ goto err_out;
+
+ if (init_attr->qp_type == IB_QPT_XRC_TGT)
+ hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
+
+ if (init_attr->qp_type == IB_QPT_GSI) {
+ hr_qp->port = init_attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
+
+ ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
+ if (ret)
+ ibdev_err(ibdev, "create QP type %d failed(%d)\n",
+ init_attr->qp_type, ret);
+
+err_out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]);
+
+ return ret;
+}
+
+int to_hr_qp_type(int qp_type)
+{
+ switch (qp_type) {
+ case IB_QPT_RC:
+ return SERV_TYPE_RC;
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return SERV_TYPE_UD;
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ return SERV_TYPE_XRC;
+ default:
+ return -1;
+ }
+}
+
+static int check_mtu_validate(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_attr *attr, int attr_mask)
+{
+ enum ib_mtu active_mtu;
+ int p;
+
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+ if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
+ attr->path_mtu > hr_dev->caps.max_mtu) ||
+ attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
+ ibdev_err(&hr_dev->ib_dev,
+ "attr path_mtu(%d)invalid while modify qp",
+ attr->path_mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ int p;
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
+ ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
+ attr->port_num);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid attr, pkey_index = %u.\n",
+ attr->pkey_index);
+ return -EINVAL;
+ }
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid attr, max_rd_atomic = %u.\n",
+ attr->max_rd_atomic);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid attr, max_dest_rd_atomic = %u.\n",
+ attr->max_dest_rd_atomic);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
+
+ return 0;
+}
+
+int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ib_modify_qp_resp resp = {};
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ int ret = -EINVAL;
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
+ goto out;
+
+ cur_state = hr_qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (ibqp->uobject &&
+ (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ ibdev_warn(&hr_dev->ib_dev,
+ "flush cqe is not supported in userspace!\n");
+ goto out;
+ }
+ }
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
+ ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
+ goto out;
+ }
+
+ ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
+ if (ret)
+ goto out;
+
+ if (cur_state == new_state && cur_state == IB_QPS_RESET)
+ goto out;
+
+ ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
+ new_state, udata);
+ if (ret)
+ goto out;
+
+ if (udata && udata->outlen) {
+ resp.tc_mode = hr_qp->tc_mode;
+ resp.priority = hr_qp->sl;
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to copy modify qp resp.\n");
+ }
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]);
+
+ return ret;
+}
+
+void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
+{
+ if (unlikely(send_cq == NULL && recv_cq == NULL)) {
+ __acquire(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
+ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
+ spin_lock(&recv_cq->lock);
+ __acquire(&send_cq->lock);
+ } else if (send_cq == recv_cq) {
+ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+ spin_lock(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
+ __releases(&recv_cq->lock)
+{
+ if (unlikely(send_cq == NULL && recv_cq == NULL)) {
+ __release(&recv_cq->lock);
+ __release(&send_cq->lock);
+ } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
+ __release(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ } else if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ }
+}
+
+static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
+{
+ return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
+}
+
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
+{
+ return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
+}
+
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
+{
+ return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
+}
+
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
+{
+ return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
+}
+
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
+ struct ib_cq *ib_cq)
+{
+ struct hns_roce_cq *hr_cq;
+ u32 cur;
+
+ cur = hr_wq->head - hr_wq->tail;
+ if (likely(cur + nreq < hr_wq->wqe_cnt))
+ return false;
+
+ hr_cq = to_hr_cq(ib_cq);
+ spin_lock(&hr_cq->lock);
+ cur = hr_wq->head - hr_wq->tail;
+ spin_unlock(&hr_cq->lock);
+
+ return cur + nreq >= hr_wq->wqe_cnt;
+}
+
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ unsigned int reserved_from_bot;
+ unsigned int i;
+
+ mutex_init(&qp_table->scc_mutex);
+ mutex_init(&qp_table->bank_mutex);
+ xa_init(&hr_dev->qp_table_xa);
+ xa_init(&qp_table->dip_xa);
+
+ reserved_from_bot = hr_dev->caps.reserved_qps;
+
+ for (i = 0; i < reserved_from_bot; i++) {
+ hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
+ hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
+ }
+
+ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ ida_init(&hr_dev->qp_table.bank[i].ida);
+ hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
+ HNS_ROCE_QP_BANK_NUM - 1;
+ hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
+ }
+
+ return 0;
+}
+
+void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
+{
+ int i;
+
+ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
+ ida_destroy(&hr_dev->qp_table.bank[i].ida);
+ xa_destroy(&hr_dev->qp_table.dip_xa);
+ xa_destroy(&hr_dev->qp_table_xa);
+ mutex_destroy(&hr_dev->qp_table.bank_mutex);
+ mutex_destroy(&hr_dev->qp_table.scc_mutex);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
new file mode 100644
index 000000000000..230187dda6a0
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+// Copyright (c) 2019 Hisilicon Limited.
+
+#include <rdma/rdma_cm.h>
+#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_netlink.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
+ goto err;
+
+ if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct hns_roce_v2_cq_context context;
+ int ret;
+
+ if (!hr_dev->hw->query_cqc)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
+ if (ret)
+ return -EINVAL;
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
+
+ return ret;
+}
+
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct hns_roce_full_qp_ctx {
+ struct hns_roce_v2_qp_context qpc;
+ struct hns_roce_v2_scc_context sccc;
+ } context = {};
+ u32 sccn = hr_qp->qpn;
+ int ret;
+
+ if (!hr_dev->hw->query_qpc)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc);
+ if (ret)
+ return ret;
+
+ /* If SCC is disabled or the query fails, the queried SCCC will
+ * be all 0.
+ */
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) ||
+ !hr_dev->hw->query_sccc)
+ goto out;
+
+ if (hr_qp->cong_type == CONG_TYPE_DIP) {
+ if (!hr_qp->dip)
+ goto out;
+ sccn = hr_qp->dip->dip_idx;
+ }
+
+ ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc);
+ if (ret)
+ ibdev_warn_ratelimited(&hr_dev->ib_dev,
+ "failed to query SCCC, ret = %d.\n",
+ ret);
+
+out:
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
+
+ return ret;
+}
+
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct hns_roce_v2_mpt_entry context;
+ int ret;
+
+ if (!hr_dev->hw->query_mpt)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
+ if (ret)
+ return -EINVAL;
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
+
+ return ret;
+}
+
+int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
+{
+ struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
+ struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
+ struct hns_roce_srq_context context;
+ int ret;
+
+ if (!hr_dev->hw->query_srqc)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context);
+ if (ret)
+ return ret;
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
new file mode 100644
index 000000000000..1090051f493b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018 Hisilicon Limited.
+ */
+
+#include <linux/pci.h>
+#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+
+void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
+{
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct hns_roce_srq *srq;
+
+ xa_lock(&srq_table->xa);
+ srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
+ if (srq)
+ refcount_inc(&srq->refcount);
+ xa_unlock(&srq_table->xa);
+
+ if (!srq) {
+ dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
+ return;
+ }
+
+ srq->event(srq, event_type);
+
+ if (refcount_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+}
+
+static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
+ enum hns_roce_event event_type)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct ib_srq *ibsrq = &srq->ibsrq;
+ struct ib_event event;
+
+ if (ibsrq->event_handler) {
+ event.device = ibsrq->device;
+ event.element.srq = ibsrq;
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ event.event = IB_EVENT_SRQ_ERR;
+ break;
+ default:
+ dev_err(hr_dev->dev,
+ "hns_roce:Unexpected event type %d on SRQ %06lx\n",
+ event_type, srq->srqn);
+ return;
+ }
+
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+ }
+}
+
+static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
+ int id;
+
+ id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id);
+ return -ENOMEM;
+ }
+
+ srq->srqn = id;
+
+ return 0;
+}
+
+static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn);
+}
+
+static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
+ return PTR_ERR(mailbox);
+ }
+
+ ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
+ if (ret) {
+ ibdev_err(ibdev, "failed to write SRQC.\n");
+ goto err_mbox;
+ }
+
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ,
+ srq->srqn);
+ if (ret)
+ ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
+
+err_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
+ if (ret) {
+ ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ if (ret) {
+ ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
+ goto err_put;
+ }
+
+ ret = hns_roce_create_srqc(hr_dev, srq);
+ if (ret)
+ goto err_xa;
+
+ return 0;
+
+err_xa:
+ xa_erase_irq(&srq_table->xa, srq->srqn);
+err_put:
+ hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
+
+ return ret;
+}
+
+static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ int ret;
+
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ,
+ srq->srqn);
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
+ ret, srq->srqn);
+
+ xa_erase_irq(&srq_table->xa, srq->srqn);
+
+ if (refcount_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+ wait_for_completion(&srq->free);
+
+ hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
+}
+
+static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int ret;
+
+ srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
+
+ buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->idx_que.entry_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
+ buf_attr.region_count = 1;
+
+ ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
+ hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT,
+ udata, addr);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
+ return ret;
+ }
+
+ if (!udata) {
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
+ if (!idx_que->bitmap) {
+ ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
+ ret = -ENOMEM;
+ goto err_idx_mtr;
+ }
+ }
+
+ idx_que->head = 0;
+ idx_que->tail = 0;
+
+ return 0;
+err_idx_mtr:
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
+
+ return ret;
+}
+
+static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+
+ bitmap_free(idx_que->bitmap);
+ idx_que->bitmap = NULL;
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
+}
+
+static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int ret;
+
+ srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE *
+ srq->max_gs)));
+
+ buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->wqe_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
+ buf_attr.region_count = 1;
+
+ ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
+ hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT,
+ udata, addr);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq)
+{
+ hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
+}
+
+static int alloc_srq_wrid(struct hns_roce_srq *srq)
+{
+ srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (!srq->wrid)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_srq_wrid(struct hns_roce_srq *srq)
+{
+ kvfree(srq->wrid);
+ srq->wrid = NULL;
+}
+
+static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
+ bool user)
+{
+ u32 max_sge = dev->caps.max_srq_sges;
+
+ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return max_sge;
+
+ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
+ * calculate number of max_sge with reserved SGEs when allocating wqe
+ * buf, so there is no need to do this again in kernel. But the number
+ * may exceed the capacity of SGEs recorded in the firmware, so the
+ * kernel driver should just adapt the value accordingly.
+ */
+ if (user)
+ max_sge = roundup_pow_of_two(max_sge + 1);
+ else
+ hr_srq->rsv_sge = 1;
+
+ return max_sge;
+}
+
+static int set_srq_basic_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct ib_srq_attr *attr = &init_attr->attr;
+ u32 max_sge;
+
+ max_sge = proc_srq_sge(hr_dev, srq, !!udata);
+ if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
+ attr->max_sge > max_sge || !attr->max_sge) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid SRQ attr, depth = %u, sge = %u.\n",
+ attr->max_wr, attr->max_sge);
+ return -EINVAL;
+ }
+
+ attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
+ srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
+ srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
+
+ attr->max_wr = srq->wqe_cnt;
+ attr->max_sge = srq->max_gs - srq->rsv_sge;
+ attr->srq_limit = 0;
+
+ return 0;
+}
+
+static void set_srq_ext_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr)
+{
+ srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_hr_cq(init_attr->ext.cq)->cqn : 0;
+
+ srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
+ to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
+}
+
+static int set_srq_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ int ret;
+
+ ret = set_srq_basic_param(srq, init_attr, udata);
+ if (ret)
+ return ret;
+
+ set_srq_ext_param(srq, init_attr);
+
+ return 0;
+}
+
+static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ib_create_srq ucmd = {};
+ int ret;
+
+ if (udata) {
+ ret = ib_copy_from_udata(&ucmd, udata,
+ min(udata->inlen, sizeof(ucmd)));
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to copy SRQ udata, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
+ if (ret)
+ return ret;
+
+ ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
+ if (ret)
+ goto err_idx;
+
+ if (!udata) {
+ ret = alloc_srq_wrid(srq);
+ if (ret)
+ goto err_wqe_buf;
+ }
+
+ return 0;
+
+err_wqe_buf:
+ free_srq_wqe_buf(hr_dev, srq);
+err_idx:
+ free_srq_idx(hr_dev, srq);
+
+ return ret;
+}
+
+static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ free_srq_wrid(srq);
+ free_srq_wqe_buf(hr_dev, srq);
+ free_srq_idx(hr_dev, srq);
+}
+
+static int get_srq_ucmd(struct hns_roce_srq *srq, struct ib_udata *udata,
+ struct hns_roce_ib_create_srq *ucmd)
+{
+ struct ib_device *ibdev = srq->ibsrq.device;
+ int ret;
+
+ ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
+ if (ret) {
+ ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx;
+
+ if (!(srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB))
+ return;
+
+ srq->cap_flags &= ~HNS_ROCE_SRQ_CAP_RECORD_DB;
+ if (udata) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+ hns_roce_db_unmap_user(uctx, &srq->rdb);
+ } else {
+ hns_roce_free_db(hr_dev, &srq->rdb);
+ }
+}
+
+static int alloc_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_srq_resp *resp)
+{
+ struct hns_roce_ib_create_srq ucmd = {};
+ struct hns_roce_ucontext *uctx;
+ int ret;
+
+ if (udata) {
+ ret = get_srq_ucmd(srq, udata, &ucmd);
+ if (ret)
+ return ret;
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) &&
+ (ucmd.req_cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ ret = hns_roce_db_map_user(uctx, ucmd.db_addr,
+ &srq->rdb);
+ if (ret)
+ return ret;
+
+ srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
+ }
+ } else {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) {
+ ret = hns_roce_alloc_db(hr_dev, &srq->rdb, 1);
+ if (ret)
+ return ret;
+
+ *srq->rdb.db_record = 0;
+ srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
+ }
+ srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
+ }
+
+ return 0;
+}
+
+int hns_roce_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
+ struct hns_roce_ib_create_srq_resp resp = {};
+ struct hns_roce_srq *srq = to_hr_srq(ib_srq);
+ int ret;
+
+ mutex_init(&srq->mutex);
+ spin_lock_init(&srq->lock);
+
+ ret = set_srq_param(srq, init_attr, udata);
+ if (ret)
+ goto err_out;
+
+ ret = alloc_srq_buf(hr_dev, srq, udata);
+ if (ret)
+ goto err_out;
+
+ ret = alloc_srq_db(hr_dev, srq, udata, &resp);
+ if (ret)
+ goto err_srq_buf;
+
+ ret = alloc_srqn(hr_dev, srq);
+ if (ret)
+ goto err_srq_db;
+
+ ret = alloc_srqc(hr_dev, srq);
+ if (ret)
+ goto err_srqn;
+
+ if (udata) {
+ resp.cap_flags = srq->cap_flags;
+ resp.srqn = srq->srqn;
+ if (ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)))) {
+ ret = -EFAULT;
+ goto err_srqc;
+ }
+ }
+
+ srq->event = hns_roce_ib_srq_event;
+ refcount_set(&srq->refcount, 1);
+ init_completion(&srq->free);
+
+ return 0;
+
+err_srqc:
+ free_srqc(hr_dev, srq);
+err_srqn:
+ free_srqn(hr_dev, srq);
+err_srq_db:
+ free_srq_db(hr_dev, srq, udata);
+err_srq_buf:
+ free_srq_buf(hr_dev, srq);
+err_out:
+ mutex_destroy(&srq->mutex);
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT]);
+
+ return ret;
+}
+
+int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+
+ free_srqc(hr_dev, srq);
+ free_srqn(hr_dev, srq);
+ free_srq_db(hr_dev, srq, udata);
+ free_srq_buf(hr_dev, srq);
+ mutex_destroy(&srq->mutex);
+ return 0;
+}
+
+void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct hns_roce_ida *srq_ida = &srq_table->srq_ida;
+
+ xa_init(&srq_table->xa);
+
+ ida_init(&srq_ida->ida);
+ srq_ida->max = hr_dev->caps.num_srqs - 1;
+ srq_ida->min = hr_dev->caps.reserved_srqs;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_trace.h b/drivers/infiniband/hw/hns/hns_roce_trace.h
new file mode 100644
index 000000000000..59ceb591b3a1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_trace.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns_roce
+
+#if !defined(__HNS_ROCE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HNS_ROCE_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/string_choices.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+DECLARE_EVENT_CLASS(flush_head_template,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, pi)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->pi = pi;
+ __entry->type = type;
+ ),
+
+ TP_printk("%s 0x%lx flush head 0x%x.",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->pi)
+);
+
+DEFINE_EVENT(flush_head_template, hns_sq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+DEFINE_EVENT(flush_head_template, hns_rq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+
+#define MAX_SGE_PER_WQE 64
+#define MAX_WQE_SIZE (MAX_SGE_PER_WQE * HNS_ROCE_SGE_SIZE)
+DECLARE_EVENT_CLASS(wqe_template,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len,
+ u64 id, enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, idx)
+ __array(u32, wqe,
+ MAX_WQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ __field(u64, id)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->idx = idx;
+ __entry->id = id;
+ __entry->len = len / sizeof(__le32);
+ __entry->type = type;
+ for (int i = 0; i < __entry->len; i++)
+ __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]);
+ ),
+
+ TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->idx, __entry->id,
+ __print_array(__entry->wqe, __entry->len,
+ sizeof(__le32)))
+);
+
+DEFINE_EVENT(wqe_template, hns_sq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_rq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_srq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+
+TRACE_EVENT(hns_ae_info,
+ TP_PROTO(int event_type, void *aeqe, unsigned int len),
+ TP_ARGS(event_type, aeqe, len),
+
+ TP_STRUCT__entry(__field(int, event_type)
+ __array(u32, aeqe,
+ HNS_ROCE_V3_EQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(__entry->event_type = event_type;
+ __entry->len = len / sizeof(__le32);
+ for (int i = 0; i < __entry->len; i++)
+ __entry->aeqe[i] = le32_to_cpu(((__le32 *)aeqe)[i]);
+ ),
+
+ TP_printk("event %2d aeqe: %s", __entry->event_type,
+ __print_array(__entry->aeqe, __entry->len, sizeof(__le32)))
+);
+
+TRACE_EVENT(hns_mr,
+ TP_PROTO(struct hns_roce_mr *mr),
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(__field(u64, iova)
+ __field(u64, size)
+ __field(u32, key)
+ __field(u32, pd)
+ __field(u32, pbl_hop_num)
+ __field(u32, npages)
+ __field(int, type)
+ __field(int, enabled)
+ ),
+
+ TP_fast_assign(__entry->iova = mr->iova;
+ __entry->size = mr->size;
+ __entry->key = mr->key;
+ __entry->pd = mr->pd;
+ __entry->pbl_hop_num = mr->pbl_hop_num;
+ __entry->npages = mr->npages;
+ __entry->type = mr->type;
+ __entry->enabled = mr->enabled;
+ ),
+
+ TP_printk("iova:0x%llx, size:%llu, key:%u, pd:%u, pbl_hop:%u, npages:%u, type:%d, status:%d",
+ __entry->iova, __entry->size, __entry->key,
+ __entry->pd, __entry->pbl_hop_num, __entry->npages,
+ __entry->type, __entry->enabled)
+);
+
+TRACE_EVENT(hns_buf_attr,
+ TP_PROTO(struct hns_roce_buf_attr *attr),
+ TP_ARGS(attr),
+
+ TP_STRUCT__entry(__field(unsigned int, region_count)
+ __field(unsigned int, region0_size)
+ __field(int, region0_hopnum)
+ __field(unsigned int, region1_size)
+ __field(int, region1_hopnum)
+ __field(unsigned int, region2_size)
+ __field(int, region2_hopnum)
+ __field(unsigned int, page_shift)
+ __field(bool, mtt_only)
+ ),
+
+ TP_fast_assign(__entry->region_count = attr->region_count;
+ __entry->region0_size = attr->region[0].size;
+ __entry->region0_hopnum = attr->region[0].hopnum;
+ __entry->region1_size = attr->region[1].size;
+ __entry->region1_hopnum = attr->region[1].hopnum;
+ __entry->region2_size = attr->region[2].size;
+ __entry->region2_hopnum = attr->region[2].hopnum;
+ __entry->page_shift = attr->page_shift;
+ __entry->mtt_only = attr->mtt_only;
+ ),
+
+ TP_printk("rg cnt:%u, pg_sft:0x%x, mtt_only:%s, rg 0 (sz:%u, hop:%u), rg 1 (sz:%u, hop:%u), rg 2 (sz:%u, hop:%u)\n",
+ __entry->region_count, __entry->page_shift,
+ str_yes_no(__entry->mtt_only),
+ __entry->region0_size, __entry->region0_hopnum,
+ __entry->region1_size, __entry->region1_hopnum,
+ __entry->region2_size, __entry->region2_hopnum)
+);
+
+DECLARE_EVENT_CLASS(cmdq,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc),
+
+ TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev))
+ __field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __array(u32, data, 6)
+ ),
+
+ TP_fast_assign(__assign_str(dev_name);
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ for (int i = 0; i < 6; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);
+ ),
+
+ TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n",
+ __get_str(dev_name), __entry->opcode,
+ __entry->flag, __entry->retval,
+ __print_array(__entry->data, 6, sizeof(__le32)))
+);
+
+DEFINE_EVENT(cmdq, hns_cmdq_req,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+DEFINE_EVENT(cmdq, hns_cmdq_resp,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+
+#endif /* __HNS_ROCE_TRACE_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns_roce_trace
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/ionic/Kconfig b/drivers/infiniband/hw/ionic/Kconfig
new file mode 100644
index 000000000000..de6f10e9b6e9
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018-2025, Advanced Micro Devices, Inc.
+
+config INFINIBAND_IONIC
+ tristate "AMD Pensando DSC RDMA/RoCE Support"
+ depends on NETDEVICES && ETHERNET && PCI && INET && IONIC
+ help
+ This enables RDMA/RoCE support for the AMD Pensando family of
+ Distributed Services Cards (DSCs).
+
+ To learn more, visit our website at
+ <https://www.amd.com/en/products/accelerators/pensando.html>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ionic_rdma.
diff --git a/drivers/infiniband/hw/ionic/Makefile b/drivers/infiniband/hw/ionic/Makefile
new file mode 100644
index 000000000000..957973742820
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/pensando/ionic
+
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic_rdma.o
+
+ionic_rdma-y := \
+ ionic_ibdev.o ionic_lif_cfg.o ionic_queue.o ionic_pgtbl.o ionic_admin.o \
+ ionic_controlpath.o ionic_datapath.o ionic_hw_stats.o
diff --git a/drivers/infiniband/hw/ionic/ionic_admin.c b/drivers/infiniband/hw/ionic/ionic_admin.c
new file mode 100644
index 000000000000..2537aa55d12d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_admin.c
@@ -0,0 +1,1229 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_EQ_COUNT_MIN 4
+#define IONIC_AQ_COUNT_MIN 1
+
+/* not a valid queue position or negative error status */
+#define IONIC_ADMIN_POSTED 0x10000
+
+/* cpu can be held with irq disabled for COUNT * MS (for create/destroy_ah) */
+#define IONIC_ADMIN_BUSY_RETRY_COUNT 2000
+#define IONIC_ADMIN_BUSY_RETRY_MS 1
+
+/* admin queue will be considered failed if a command takes longer */
+#define IONIC_ADMIN_TIMEOUT (HZ * 2)
+#define IONIC_ADMIN_WARN (HZ / 8)
+
+/* will poll for admin cq to tolerate and report from missed event */
+#define IONIC_ADMIN_DELAY (HZ / 8)
+
+/* work queue for polling the event queue and admin cq */
+struct workqueue_struct *ionic_evt_workq;
+
+static void ionic_admin_timedout(struct ionic_aq *aq)
+{
+ struct ionic_ibdev *dev = aq->dev;
+ unsigned long irqflags;
+ u16 pos;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ if (ionic_queue_empty(&aq->q))
+ goto out;
+
+ /* Reset ALL adminq if any one times out */
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &dev->reset_work);
+
+ ibdev_err(&dev->ibdev, "admin command timed out, aq %d after: %ums\n",
+ aq->aqid, (u32)jiffies_to_msecs(jiffies - aq->stamp));
+
+ pos = (aq->q.prod - 1) & aq->q.mask;
+ if (pos == aq->q.cons)
+ goto out;
+
+ ibdev_warn(&dev->ibdev, "admin pos %u (last posted)\n", pos);
+ print_hex_dump(KERN_WARNING, "cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at(&aq->q, pos),
+ BIT(aq->q.stride_log2), true);
+
+out:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_reset_dwork(struct ionic_ibdev *dev)
+{
+ if (atomic_read(&dev->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ queue_delayed_work(ionic_evt_workq, &dev->admin_dwork,
+ IONIC_ADMIN_DELAY);
+}
+
+static void ionic_admin_reset_wdog(struct ionic_aq *aq)
+{
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ aq->stamp = jiffies;
+ ionic_admin_reset_dwork(aq->dev);
+}
+
+static bool ionic_admin_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+ *cqe = qcqe;
+
+ return true;
+}
+
+static void ionic_admin_poll_locked(struct ionic_aq *aq)
+{
+ struct ionic_cq *cq = &aq->vcq->cq[0];
+ struct ionic_admin_wr *wr, *wr_next;
+ struct ionic_ibdev *dev = aq->dev;
+ u32 wr_strides, avlbl_strides;
+ struct ionic_v1_cqe *cqe;
+ u32 qtf, qid;
+ u16 old_prod;
+ u8 type;
+
+ lockdep_assert_held(&aq->lock);
+
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) {
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_prod, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ aq->q_wr[wr->status].wr = NULL;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_prod);
+
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_post, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ return;
+ }
+
+ old_prod = cq->q.prod;
+
+ while (ionic_admin_next_cqe(dev, cq, &cqe)) {
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (unlikely(type != IONIC_V1_CQE_TYPE_ADMIN)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe type %u\n", type);
+ goto cq_next;
+ }
+
+ if (unlikely(qid != aq->aqid)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe qid %u\n", qid);
+ goto cq_next;
+ }
+
+ if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad idx %u cons %u qid %u\n",
+ be16_to_cpu(cqe->admin.cmd_idx),
+ aq->q.cons, qid);
+ goto cq_next;
+ }
+
+ if (unlikely(ionic_queue_empty(&aq->q))) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe for empty adminq\n");
+ goto cq_next;
+ }
+
+ wr = aq->q_wr[aq->q.cons].wr;
+ if (wr) {
+ aq->q_wr[aq->q.cons].wr = NULL;
+ list_del_init(&wr->aq_ent);
+
+ wr->cqe = *cqe;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+
+ ionic_queue_consume_entries(&aq->q,
+ aq->q_wr[aq->q.cons].wqe_strides);
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ if (old_prod != cq->q.prod) {
+ ionic_admin_reset_wdog(aq);
+ cq->q.cons = cq->q.prod;
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ queue_work(ionic_evt_workq, &aq->work);
+ } else if (!aq->armed) {
+ aq->armed = true;
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ cq->q.dbell | IONIC_CQ_RING_ARM |
+ cq->arm_any_prod);
+ queue_work(ionic_evt_workq, &aq->work);
+ }
+
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_ACTIVE)
+ return;
+
+ old_prod = aq->q.prod;
+
+ if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post))
+ ionic_admin_reset_wdog(aq);
+
+ if (list_empty(&aq->wr_post))
+ return;
+
+ do {
+ u8 *src;
+ int i, src_len;
+ size_t stride_len;
+
+ wr = list_first_entry(&aq->wr_post, struct ionic_admin_wr,
+ aq_ent);
+ wr_strides = (le16_to_cpu(wr->wqe.len) + ADMIN_WQE_HDR_LEN +
+ (ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2;
+ avlbl_strides = ionic_queue_length_remaining(&aq->q);
+
+ if (wr_strides > avlbl_strides)
+ break;
+
+ list_move(&wr->aq_ent, &aq->wr_prod);
+ wr->status = aq->q.prod;
+ aq->q_wr[aq->q.prod].wr = wr;
+ aq->q_wr[aq->q.prod].wqe_strides = wr_strides;
+
+ src_len = le16_to_cpu(wr->wqe.len);
+ src = (uint8_t *)&wr->wqe.cmd;
+
+ /* First stride */
+ memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe,
+ ADMIN_WQE_HDR_LEN);
+ stride_len = ADMIN_WQE_STRIDE - ADMIN_WQE_HDR_LEN;
+ if (stride_len > src_len)
+ stride_len = src_len;
+ memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN,
+ src, stride_len);
+ ibdev_dbg(&dev->ibdev, "post admin prod %u (%u strides)\n",
+ aq->q.prod, wr_strides);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+
+ /* Remaining strides */
+ for (i = stride_len; i < src_len; i += stride_len) {
+ stride_len = ADMIN_WQE_STRIDE;
+
+ if (i + stride_len > src_len)
+ stride_len = src_len - i;
+
+ memcpy(ionic_queue_at_prod(&aq->q), src + i,
+ stride_len);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+ }
+ } while (!list_empty(&aq->wr_post));
+
+ if (old_prod != aq->q.prod)
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.aq_qtype,
+ ionic_queue_dbell_val(&aq->q));
+}
+
+static void ionic_admin_dwork(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, admin_dwork.work);
+ struct ionic_aq *aq, *bad_aq = NULL;
+ bool do_reschedule = false;
+ unsigned long irqflags;
+ bool do_reset = false;
+ u16 pos;
+ int i;
+
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ aq = dev->aq_vec[i];
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (ionic_queue_empty(&aq->q))
+ goto next_aq;
+
+ /* Reschedule if any queue has outstanding work */
+ do_reschedule = true;
+
+ if (time_is_after_eq_jiffies(aq->stamp + IONIC_ADMIN_WARN))
+ /* Warning threshold not met, nothing to do */
+ goto next_aq;
+
+ /* See if polling now makes some progress */
+ pos = aq->q.cons;
+ ionic_admin_poll_locked(aq);
+ if (pos != aq->q.cons) {
+ ibdev_dbg(&dev->ibdev,
+ "missed event for acq %d\n", aq->cqid);
+ goto next_aq;
+ }
+
+ if (time_is_after_eq_jiffies(aq->stamp +
+ IONIC_ADMIN_TIMEOUT)) {
+ /* Timeout threshold not met */
+ ibdev_dbg(&dev->ibdev, "no progress after %ums\n",
+ (u32)jiffies_to_msecs(jiffies - aq->stamp));
+ goto next_aq;
+ }
+
+ /* Queue timed out */
+ bad_aq = aq;
+ do_reset = true;
+next_aq:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ if (do_reset)
+ /* Reset RDMA lif on a timeout */
+ ionic_admin_timedout(bad_aq);
+ else if (do_reschedule)
+ /* Try to poll again later */
+ ionic_admin_reset_dwork(dev);
+}
+
+static void ionic_admin_work(struct work_struct *ws)
+{
+ struct ionic_aq *aq = container_of(ws, struct ionic_aq, work);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_post_aq(struct ionic_aq *aq, struct ionic_admin_wr *wr)
+{
+ unsigned long irqflags;
+ bool poll;
+
+ wr->status = IONIC_ADMIN_POSTED;
+ wr->aq = aq;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ poll = list_empty(&aq->wr_post);
+ list_add(&wr->aq_ent, &aq->wr_post);
+ if (poll)
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr)
+{
+ int aq_idx;
+
+ /* Use cpu id for the adminq selection */
+ aq_idx = raw_smp_processor_id() % dev->lif_cfg.aq_count;
+ ionic_admin_post_aq(dev->aq_vec[aq_idx], wr);
+}
+
+static void ionic_admin_cancel(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (!list_empty(&wr->aq_ent)) {
+ list_del(&wr->aq_ent);
+ if (wr->status != IONIC_ADMIN_POSTED)
+ aq->q_wr[wr->status].wr = NULL;
+ }
+
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static int ionic_admin_busy_wait(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+ int try_i;
+
+ for (try_i = 0; try_i < IONIC_ADMIN_BUSY_RETRY_COUNT; ++try_i) {
+ if (completion_done(&wr->work))
+ return 0;
+
+ mdelay(IONIC_ADMIN_BUSY_RETRY_MS);
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ /*
+ * we timed out. Initiate RDMA LIF reset and indicate
+ * error to caller.
+ */
+ ionic_admin_timedout(aq);
+ return -ETIMEDOUT;
+}
+
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags flags)
+{
+ int rc, timo;
+
+ if (flags & IONIC_ADMIN_F_BUSYWAIT) {
+ /* Spin */
+ rc = ionic_admin_busy_wait(wr);
+ } else if (flags & IONIC_ADMIN_F_INTERRUPT) {
+ /*
+ * Interruptible sleep, 1s timeout
+ * This is used for commands which are safe for the caller
+ * to clean up without killing and resetting the adminq.
+ */
+ timo = wait_for_completion_interruptible_timeout(&wr->work,
+ HZ);
+ if (timo > 0)
+ rc = 0;
+ else if (timo == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = timo;
+ } else {
+ /*
+ * Uninterruptible sleep
+ * This is used for commands which are NOT safe for the
+ * caller to clean up. Cleanup must be handled by the
+ * adminq kill and reset process so that host memory is
+ * not corrupted by the device.
+ */
+ wait_for_completion(&wr->work);
+ rc = 0;
+ }
+
+ if (rc) {
+ ibdev_warn(&dev->ibdev, "wait status %d\n", rc);
+ ionic_admin_cancel(wr);
+ } else if (wr->status == IONIC_ADMIN_KILLED) {
+ ibdev_dbg(&dev->ibdev, "admin killed\n");
+
+ /* No error if admin already killed during teardown */
+ rc = (flags & IONIC_ADMIN_F_TEARDOWN) ? 0 : -ENODEV;
+ } else if (ionic_v1_cqe_error(&wr->cqe)) {
+ ibdev_warn(&dev->ibdev, "opcode %u error %u\n",
+ wr->wqe.op,
+ be32_to_cpu(wr->cqe.status_length));
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int ionic_rdma_devcmd(struct ionic_ibdev *dev,
+ struct ionic_admin_ctx *admin)
+{
+ int rc;
+
+ rc = ionic_adminq_post_wait(dev->lif_cfg.lif, admin);
+ if (rc)
+ return rc;
+
+ return ionic_error_to_errno(admin->comp.comp.status);
+}
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_reset = {
+ .opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static int ionic_rdma_queue_devcmd(struct ionic_ibdev *dev,
+ struct ionic_queue *q,
+ u32 qid, u32 cid, u16 opcode)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_queue = {
+ .opcode = opcode,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ .qid_ver = cpu_to_le32(qid),
+ .cid = cpu_to_le32(cid),
+ .dbid = cpu_to_le16(dev->lif_cfg.dbid),
+ .depth_log2 = q->depth_log2,
+ .stride_log2 = q->stride_log2,
+ .dma_addr = cpu_to_le64(q->dma),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static void ionic_rdma_admincq_comp(struct ib_cq *ibcq, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ aq->armed = false;
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &aq->work);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_rdma_admincq_event(struct ib_event *event, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+
+ ibdev_err(&aq->dev->ibdev, "admincq event %d\n", event->event);
+}
+
+static struct ionic_vcq *ionic_create_rdma_admincq(struct ionic_ibdev *dev,
+ int comp_vector)
+{
+ struct ib_cq_init_attr attr = {
+ .cqe = IONIC_AQ_DEPTH,
+ .comp_vector = comp_vector,
+ };
+ struct ionic_tbl_buf buf = {};
+ struct ionic_vcq *vcq;
+ struct ionic_cq *cq;
+ int rc;
+
+ vcq = kzalloc(sizeof(*vcq), GFP_KERNEL);
+ if (!vcq)
+ return ERR_PTR(-ENOMEM);
+
+ vcq->ibcq.device = &dev->ibdev;
+ vcq->ibcq.comp_handler = ionic_rdma_admincq_comp;
+ vcq->ibcq.event_handler = ionic_rdma_admincq_event;
+ atomic_set(&vcq->ibcq.usecnt, 0);
+
+ vcq->udma_mask = 1;
+ cq = &vcq->cq[0];
+
+ rc = ionic_create_cq_common(vcq, &buf, &attr, NULL, NULL,
+ NULL, NULL, 0);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid,
+ IONIC_CMD_RDMA_CREATE_CQ);
+ if (rc)
+ goto err_cmd;
+
+ return vcq;
+
+err_cmd:
+ ionic_destroy_cq_common(dev, cq);
+err_init:
+ kfree(vcq);
+
+ return ERR_PTR(rc);
+}
+
+static struct ionic_aq *__ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+ if (!aq)
+ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ aq->dev = dev;
+ aq->aqid = aqid;
+ aq->cqid = cqid;
+ spin_lock_init(&aq->lock);
+
+ rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ ADMIN_WQE_STRIDE);
+ if (rc)
+ goto err_q;
+
+ ionic_queue_dbell_init(&aq->q, aq->aqid);
+
+ aq->q_wr = kcalloc((u32)aq->q.mask + 1, sizeof(*aq->q_wr), GFP_KERNEL);
+ if (!aq->q_wr) {
+ rc = -ENOMEM;
+ goto err_wr;
+ }
+
+ INIT_LIST_HEAD(&aq->wr_prod);
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ INIT_WORK(&aq->work, ionic_admin_work);
+ aq->armed = false;
+
+ return aq;
+
+err_wr:
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(aq);
+
+ return ERR_PTR(rc);
+}
+
+static void __ionic_destroy_rdma_adminq(struct ionic_ibdev *dev,
+ struct ionic_aq *aq)
+{
+ kfree(aq->q_wr);
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+ kfree(aq);
+}
+
+static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = __ionic_create_rdma_adminq(dev, aqid, cqid);
+ if (IS_ERR(aq))
+ return aq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid,
+ IONIC_CMD_RDMA_CREATE_ADMINQ);
+ if (rc)
+ goto err_cmd;
+
+ return aq;
+
+err_cmd:
+ __ionic_destroy_rdma_adminq(dev, aq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_flush_qs(struct ionic_ibdev *dev)
+{
+ struct ionic_qp *qp, *qp_tmp;
+ struct ionic_cq *cq, *cq_tmp;
+ LIST_HEAD(flush_list);
+ unsigned long index;
+
+ WARN_ON(!irqs_disabled());
+
+ /* Flush qp send and recv */
+ xa_lock(&dev->qp_tbl);
+ xa_for_each(&dev->qp_tbl, index, qp) {
+ kref_get(&qp->qp_kref);
+ list_add_tail(&qp->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->qp_tbl);
+
+ list_for_each_entry_safe(qp, qp_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_flush_qp(dev, qp);
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ list_del(&qp->ibkill_flush_ent);
+ }
+
+ /* Notify completions */
+ xa_lock(&dev->cq_tbl);
+ xa_for_each(&dev->cq_tbl, index, cq) {
+ kref_get(&cq->cq_kref);
+ list_add_tail(&cq->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->cq_tbl);
+
+ list_for_each_entry_safe(cq, cq_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_notify_flush_cq(cq);
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ list_del(&cq->ibkill_flush_ent);
+ }
+}
+
+static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
+{
+ unsigned long irqflags;
+ bool do_flush = false;
+ int i;
+
+ /* Mark AQs for drain and flush the QPs while irq is disabled */
+ local_irq_save(irqflags);
+
+ /* Mark the admin queue, flushing at most once */
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_KILLED) {
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ /* Flush incomplete admin commands */
+ ionic_admin_poll_locked(aq);
+ do_flush = true;
+ }
+ spin_unlock(&aq->lock);
+ }
+
+ if (do_flush)
+ ionic_flush_qs(dev);
+
+ local_irq_restore(irqflags);
+
+ /* Post a fatal event if requested */
+ if (fatal_path) {
+ struct ib_event ev;
+
+ ev.device = &dev->ibdev;
+ ev.element.port_num = 1;
+ ev.event = IB_EVENT_DEVICE_FATAL;
+
+ ib_dispatch_event(&ev);
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+}
+
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path)
+{
+ enum ionic_admin_state old_state;
+ unsigned long irqflags = 0;
+ int i, rc;
+
+ if (!dev->aq_vec)
+ return;
+
+ /*
+ * Admin queues are transitioned from active to paused to killed state.
+ * When in paused state, no new commands are issued to the device,
+ * nor are any completed locally. After resetting the lif, it will be
+ * safe to resume the rdma admin queues in the killed state. Commands
+ * will not be issued to the device, but will complete locally with status
+ * IONIC_ADMIN_KILLED. Handling completion will ensure that creating or
+ * modifying resources fails, but destroying resources succeeds.
+ * If there was a failure resetting the lif using this strategy,
+ * then the state of the device is unknown.
+ */
+ old_state = atomic_cmpxchg(&dev->admin_state, IONIC_ADMIN_ACTIVE,
+ IONIC_ADMIN_PAUSED);
+ if (old_state != IONIC_ADMIN_ACTIVE)
+ return;
+
+ /* Pause all the AQs */
+ local_irq_save(irqflags);
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ /* pause rdma admin queues to reset lif */
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_ACTIVE)
+ atomic_set(&aq->admin_state, IONIC_ADMIN_PAUSED);
+ spin_unlock(&aq->lock);
+ }
+ local_irq_restore(irqflags);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (unlikely(rc)) {
+ ibdev_err(&dev->ibdev, "failed to reset rdma %d\n", rc);
+ ionic_request_rdma_reset(dev->lif_cfg.lif);
+ }
+
+ ionic_kill_ibdev(dev, fatal_path);
+}
+
+static void ionic_reset_work(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, reset_work);
+
+ ionic_kill_rdma_admin(dev, true);
+}
+
+static bool ionic_next_eqe(struct ionic_eq *eq, struct ionic_v1_eqe *eqe)
+{
+ struct ionic_v1_eqe *qeqe;
+ bool color;
+
+ qeqe = ionic_queue_at_prod(&eq->q);
+ color = ionic_v1_eqe_color(qeqe);
+
+ /* cons is color for eq */
+ if (eq->q.cons != color)
+ return false;
+
+ /* Prevent out-of-order reads of the EQE */
+ dma_rmb();
+
+ ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod);
+ print_hex_dump_debug("eqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ qeqe, BIT(eq->q.stride_log2), true);
+ *eqe = *qeqe;
+
+ return true;
+}
+
+static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_cq *cq;
+
+ xa_lock_irqsave(&dev->cq_tbl, irqflags);
+ cq = xa_load(&dev->cq_tbl, cqid);
+ if (cq)
+ kref_get(&cq->cq_kref);
+ xa_unlock_irqrestore(&dev->cq_tbl, irqflags);
+
+ if (!cq) {
+ ibdev_dbg(&dev->ibdev,
+ "missing cqid %#x code %u\n", cqid, code);
+ return;
+ }
+
+ switch (code) {
+ case IONIC_V1_EQE_CQ_NOTIFY:
+ if (cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+ break;
+
+ case IONIC_V1_EQE_CQ_ERR:
+ if (cq->vcq->ibcq.event_handler) {
+ ibev.event = IB_EVENT_CQ_ERR;
+ ibev.device = &dev->ibdev;
+ ibev.element.cq = &cq->vcq->ibcq;
+
+ cq->vcq->ibcq.event_handler(&ibev,
+ cq->vcq->ibcq.cq_context);
+ }
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized cqid %#x code %u\n", cqid, code);
+ break;
+ }
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+}
+
+static void ionic_qp_event(struct ionic_ibdev *dev, u32 qpid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_qp *qp;
+
+ xa_lock_irqsave(&dev->qp_tbl, irqflags);
+ qp = xa_load(&dev->qp_tbl, qpid);
+ if (qp)
+ kref_get(&qp->qp_kref);
+ xa_unlock_irqrestore(&dev->qp_tbl, irqflags);
+
+ if (!qp) {
+ ibdev_dbg(&dev->ibdev,
+ "missing qpid %#x code %u\n", qpid, code);
+ return;
+ }
+
+ ibev.device = &dev->ibdev;
+ ibev.element.qp = &qp->ibqp;
+
+ switch (code) {
+ case IONIC_V1_EQE_SQ_DRAIN:
+ ibev.event = IB_EVENT_SQ_DRAINED;
+ break;
+
+ case IONIC_V1_EQE_QP_COMM_EST:
+ ibev.event = IB_EVENT_COMM_EST;
+ break;
+
+ case IONIC_V1_EQE_QP_LAST_WQE:
+ ibev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR:
+ ibev.event = IB_EVENT_QP_FATAL;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_REQUEST:
+ ibev.event = IB_EVENT_QP_REQ_ERR;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_ACCESS:
+ ibev.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized qpid %#x code %u\n", qpid, code);
+ goto out;
+ }
+
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&ibev, qp->ibqp.qp_context);
+
+out:
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+}
+
+static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
+{
+ struct ionic_ibdev *dev = eq->dev;
+ struct ionic_v1_eqe eqe;
+ u16 npolled = 0;
+ u8 type, code;
+ u32 evt, qid;
+
+ while (npolled < budget) {
+ if (!ionic_next_eqe(eq, &eqe))
+ break;
+
+ ionic_queue_produce(&eq->q);
+
+ /* cons is color for eq */
+ eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons);
+
+ ++npolled;
+
+ evt = ionic_v1_eqe_evt(&eqe);
+ type = ionic_v1_eqe_evt_type(evt);
+ code = ionic_v1_eqe_evt_code(evt);
+ qid = ionic_v1_eqe_evt_qid(evt);
+
+ switch (type) {
+ case IONIC_V1_EQE_TYPE_CQ:
+ ionic_cq_event(dev, qid, code);
+ break;
+
+ case IONIC_V1_EQE_TYPE_QP:
+ ionic_qp_event(dev, qid, code);
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unknown event %#x type %u\n", evt, type);
+ }
+ }
+
+ return npolled;
+}
+
+static void ionic_poll_eq_work(struct work_struct *work)
+{
+ struct ionic_eq *eq = container_of(work, struct ionic_eq, work);
+ u32 npolled;
+
+ if (unlikely(!eq->enable) || WARN_ON(eq->armed))
+ return;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_WORK_BUDGET);
+ if (npolled == IONIC_EQ_WORK_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+}
+
+static irqreturn_t ionic_poll_eq_isr(int irq, void *eqptr)
+{
+ struct ionic_eq *eq = eqptr;
+ int was_armed;
+ u32 npolled;
+
+ was_armed = xchg(&eq->armed, 0);
+
+ if (unlikely(!eq->enable) || !was_armed)
+ return IRQ_HANDLED;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_ISR_BUDGET);
+ if (npolled == IONIC_EQ_ISR_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ionic_eq *ionic_create_eq(struct ionic_ibdev *dev, int eqid)
+{
+ struct ionic_intr_info intr_obj = { };
+ struct ionic_eq *eq;
+ int rc;
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq)
+ return ERR_PTR(-ENOMEM);
+
+ eq->dev = dev;
+
+ rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ sizeof(struct ionic_v1_eqe));
+ if (rc)
+ goto err_q;
+
+ eq->eqid = eqid;
+
+ eq->armed = true;
+ eq->enable = false;
+ INIT_WORK(&eq->work, ionic_poll_eq_work);
+
+ rc = ionic_intr_alloc(dev->lif_cfg.lif, &intr_obj);
+ if (rc < 0)
+ goto err_intr;
+
+ eq->irq = intr_obj.vector;
+ eq->intr = intr_obj.index;
+
+ ionic_queue_dbell_init(&eq->q, eq->eqid);
+
+ /* cons is color for eq */
+ eq->q.cons = true;
+
+ snprintf(eq->name, sizeof(eq->name), "%s-%d-%d-eq",
+ "ionr", dev->lif_cfg.lif_index, eq->eqid);
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_mask_assert(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_coal_init(dev->lif_cfg.intr_ctrl, eq->intr, 0);
+ ionic_intr_clean(dev->lif_cfg.intr_ctrl, eq->intr);
+
+ eq->enable = true;
+
+ rc = request_irq(eq->irq, ionic_poll_eq_isr, 0, eq->name, eq);
+ if (rc)
+ goto err_irq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr,
+ IONIC_CMD_RDMA_CREATE_EQ);
+ if (rc)
+ goto err_cmd;
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_CLEAR);
+
+ return eq;
+
+err_cmd:
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+err_irq:
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+err_intr:
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(eq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_destroy_eq(struct ionic_eq *eq)
+{
+ struct ionic_ibdev *dev = eq->dev;
+
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+ kfree(eq);
+}
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev)
+{
+ int eq_i = 0, aq_i = 0, rc = 0;
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ dev->eq_vec = NULL;
+ dev->aq_vec = NULL;
+
+ INIT_WORK(&dev->reset_work, ionic_reset_work);
+ INIT_DELAYED_WORK(&dev->admin_dwork, ionic_admin_dwork);
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+
+ if (dev->lif_cfg.aq_count > IONIC_AQ_COUNT) {
+ ibdev_dbg(&dev->ibdev, "limiting adminq count to %d\n",
+ IONIC_AQ_COUNT);
+ dev->lif_cfg.aq_count = IONIC_AQ_COUNT;
+ }
+
+ if (dev->lif_cfg.eq_count > IONIC_EQ_COUNT) {
+ dev_dbg(&dev->ibdev.dev, "limiting eventq count to %d\n",
+ IONIC_EQ_COUNT);
+ dev->lif_cfg.eq_count = IONIC_EQ_COUNT;
+ }
+
+ /* need at least two eq and one aq */
+ if (dev->lif_cfg.eq_count < IONIC_EQ_COUNT_MIN ||
+ dev->lif_cfg.aq_count < IONIC_AQ_COUNT_MIN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev->eq_vec = kmalloc_array(dev->lif_cfg.eq_count, sizeof(*dev->eq_vec),
+ GFP_KERNEL);
+ if (!dev->eq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (eq_i = 0; eq_i < dev->lif_cfg.eq_count; ++eq_i) {
+ eq = ionic_create_eq(dev, eq_i + dev->lif_cfg.eq_base);
+ if (IS_ERR(eq)) {
+ rc = PTR_ERR(eq);
+
+ if (eq_i < IONIC_EQ_COUNT_MIN) {
+ ibdev_err(&dev->ibdev,
+ "fail create eq %pe\n", eq);
+ goto out;
+ }
+
+ /* ok, just fewer eq than device supports */
+ ibdev_dbg(&dev->ibdev, "eq count %d want %d rc %pe\n",
+ eq_i, dev->lif_cfg.eq_count, eq);
+
+ rc = 0;
+ break;
+ }
+
+ dev->eq_vec[eq_i] = eq;
+ }
+
+ dev->lif_cfg.eq_count = eq_i;
+
+ dev->aq_vec = kmalloc_array(dev->lif_cfg.aq_count, sizeof(*dev->aq_vec),
+ GFP_KERNEL);
+ if (!dev->aq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Create one CQ per AQ */
+ for (aq_i = 0; aq_i < dev->lif_cfg.aq_count; ++aq_i) {
+ vcq = ionic_create_rdma_admincq(dev, aq_i % eq_i);
+ if (IS_ERR(vcq)) {
+ rc = PTR_ERR(vcq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create acq %pe\n", vcq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "acq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, vcq);
+ break;
+ }
+
+ aq = ionic_create_rdma_adminq(dev, aq_i + dev->lif_cfg.aq_base,
+ vcq->cq[0].cqid);
+ if (IS_ERR(aq)) {
+ /* Clean up the dangling CQ */
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+
+ rc = PTR_ERR(aq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create aq %pe\n", aq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "aq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, aq);
+ break;
+ }
+
+ vcq->ibcq.cq_context = aq;
+ aq->vcq = vcq;
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_ACTIVE);
+ dev->aq_vec[aq_i] = aq;
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_ACTIVE);
+out:
+ dev->lif_cfg.eq_count = eq_i;
+ dev->lif_cfg.aq_count = aq_i;
+
+ return rc;
+}
+
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev)
+{
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ /*
+ * Killing the admin before destroy makes sure all admin and
+ * completions are flushed. admin_state = IONIC_ADMIN_KILLED
+ * stops queueing up further works.
+ */
+ cancel_delayed_work_sync(&dev->admin_dwork);
+ cancel_work_sync(&dev->reset_work);
+
+ if (dev->aq_vec) {
+ while (dev->lif_cfg.aq_count > 0) {
+ aq = dev->aq_vec[--dev->lif_cfg.aq_count];
+ vcq = aq->vcq;
+
+ cancel_work_sync(&aq->work);
+
+ __ionic_destroy_rdma_adminq(dev, aq);
+ if (vcq) {
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+ }
+ }
+
+ kfree(dev->aq_vec);
+ }
+
+ if (dev->eq_vec) {
+ while (dev->lif_cfg.eq_count > 0) {
+ eq = dev->eq_vec[--dev->lif_cfg.eq_count];
+ ionic_destroy_eq(eq);
+ }
+
+ kfree(dev->eq_vec);
+ }
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_controlpath.c b/drivers/infiniband/hw/ionic/ionic_controlpath.c
new file mode 100644
index 000000000000..ea12d9b8e125
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_controlpath.c
@@ -0,0 +1,2679 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_user_verbs.h>
+#include <ionic_api.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define ionic_set_ecn(tos) (((tos) | 2u) & ~1u)
+#define ionic_clear_ecn(tos) ((tos) & ~3u)
+
+static int ionic_validate_qdesc(struct ionic_qdesc *q)
+{
+ if (!q->addr || !q->size || !q->mask ||
+ !q->depth_log2 || !q->stride_log2)
+ return -EINVAL;
+
+ if (q->addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (q->mask != BIT(q->depth_log2) - 1)
+ return -EINVAL;
+
+ if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
+{
+ /* EQ per vector per udma, and the first eqs reserved for async events.
+ * The rest of the vectors can be requested for completions.
+ */
+ u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
+
+ return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
+}
+
+static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
+{
+ unsigned int size, base, bound;
+ int rc;
+
+ size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ base = size * udma_idx;
+ bound = base + size;
+
+ rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
+ if (rc >= 0) {
+ /* cq_base is zero or a multiple of two queue groups */
+ *cqid = dev->lif_cfg.cq_base +
+ ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
+{
+ u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_cqid, bitid);
+}
+
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
+ struct ionic_cq *cq = &vcq->cq[udma_idx];
+ void *entry;
+ int rc;
+
+ cq->vcq = vcq;
+
+ if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
+ rc = -EINVAL;
+ goto err_args;
+ }
+
+ rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
+ if (rc)
+ goto err_args;
+
+ cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
+
+ spin_lock_init(&cq->lock);
+ INIT_LIST_HEAD(&cq->poll_sq);
+ INIT_LIST_HEAD(&cq->flush_sq);
+ INIT_LIST_HEAD(&cq->flush_rq);
+
+ if (udata) {
+ rc = ionic_validate_qdesc(req_cq);
+ if (rc)
+ goto err_qdesc;
+
+ cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->umem)) {
+ rc = PTR_ERR(cq->umem);
+ goto err_qdesc;
+ }
+
+ cq->q.ptr = NULL;
+ cq->q.size = req_cq->size;
+ cq->q.mask = req_cq->mask;
+ cq->q.depth_log2 = req_cq->depth_log2;
+ cq->q.stride_log2 = req_cq->stride_log2;
+
+ *resp_cqid = cq->cqid;
+ } else {
+ rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
+ attr->cqe + IONIC_CQ_GRACE,
+ sizeof(struct ionic_v1_cqe));
+ if (rc)
+ goto err_q_init;
+
+ ionic_queue_dbell_init(&cq->q, cq->cqid);
+ cq->color = true;
+ cq->credit = cq->q.mask;
+ }
+
+ rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl_init;
+
+ init_completion(&cq->cq_rel_comp);
+ kref_init(&cq->cq_kref);
+
+ entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_xa;
+ }
+
+ return 0;
+
+err_xa:
+ ionic_pgtbl_unbuf(dev, buf);
+err_pgtbl_init:
+ if (!udata)
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+err_q_init:
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+err_qdesc:
+ ionic_put_cqid(dev, cq->cqid);
+err_args:
+ cq->vcq = NULL;
+
+ return rc;
+}
+
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!cq->vcq)
+ return;
+
+ xa_erase_irq(&dev->cq_tbl, cq->cqid);
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ wait_for_completion(&cq->cq_rel_comp);
+
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+ else
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+
+ ionic_put_cqid(dev, cq->cqid);
+
+ cq->vcq = NULL;
+}
+
+static int ionic_validate_qdesc_zero(struct ionic_qdesc *q)
+{
+ if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ionic_get_pdid(struct ionic_ibdev *dev, u32 *pdid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_pdid);
+ if (rc < 0)
+ return rc;
+
+ *pdid = rc;
+ return 0;
+}
+
+static int ionic_get_ahid(struct ionic_ibdev *dev, u32 *ahid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_ahid);
+ if (rc < 0)
+ return rc;
+
+ *ahid = rc;
+ return 0;
+}
+
+static int ionic_get_mrid(struct ionic_ibdev *dev, u32 *mrid)
+{
+ int rc;
+
+ /* wrap to 1, skip reserved lkey */
+ rc = ionic_resid_get_shared(&dev->inuse_mrid, 1,
+ dev->inuse_mrid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ *mrid = ionic_mrid(rc, dev->next_mrkey++);
+ return 0;
+}
+
+static int ionic_get_gsi_qpid(struct ionic_ibdev *dev, u32 *qpid)
+{
+ int rc = 0;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, IB_QPT_GSI, IB_QPT_GSI + 1);
+ if (rc < 0)
+ return rc;
+
+ *qpid = IB_QPT_GSI;
+ return 0;
+}
+
+static int ionic_get_qpid(struct ionic_ibdev *dev, u32 *qpid,
+ u8 *udma_idx, u8 udma_mask)
+{
+ unsigned int size, base, bound;
+ int udma_i, udma_x, udma_ix;
+ int rc = -EINVAL;
+
+ udma_x = dev->next_qpid_udma_idx;
+
+ dev->next_qpid_udma_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (udma_i = 0; udma_i < dev->lif_cfg.udma_count; ++udma_i) {
+ udma_ix = udma_i ^ udma_x;
+
+ if (!(udma_mask & BIT(udma_ix)))
+ continue;
+
+ size = dev->lif_cfg.qp_count / dev->lif_cfg.udma_count;
+ base = size * udma_ix;
+ bound = base + size;
+
+ /* skip reserved SMI and GSI qpids in group zero */
+ if (!base)
+ base = 2;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, base, bound);
+ if (rc >= 0) {
+ *qpid = ionic_bitid_to_qid(rc,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+ *udma_idx = udma_ix;
+
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int ionic_get_dbid(struct ionic_ibdev *dev, u32 *dbid, phys_addr_t *addr)
+{
+ int rc, dbpage_num;
+
+ /* wrap to 1, skip kernel reserved */
+ rc = ionic_resid_get_shared(&dev->inuse_dbid, 1,
+ dev->inuse_dbid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ dbpage_num = (dev->lif_cfg.lif_hw_index * dev->lif_cfg.dbid_count) + rc;
+ *addr = dev->lif_cfg.db_phys + ((phys_addr_t)dbpage_num << PAGE_SHIFT);
+
+ *dbid = rc;
+
+ return 0;
+}
+
+static void ionic_put_pdid(struct ionic_ibdev *dev, u32 pdid)
+{
+ ionic_resid_put(&dev->inuse_pdid, pdid);
+}
+
+static void ionic_put_ahid(struct ionic_ibdev *dev, u32 ahid)
+{
+ ionic_resid_put(&dev->inuse_ahid, ahid);
+}
+
+static void ionic_put_mrid(struct ionic_ibdev *dev, u32 mrid)
+{
+ ionic_resid_put(&dev->inuse_mrid, ionic_mrid_index(mrid));
+}
+
+static void ionic_put_qpid(struct ionic_ibdev *dev, u32 qpid)
+{
+ u32 bitid = ionic_qid_to_bitid(qpid,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_qpid, bitid);
+}
+
+static void ionic_put_dbid(struct ionic_ibdev *dev, u32 dbid)
+{
+ ionic_resid_put(&dev->inuse_dbid, dbid);
+}
+
+static struct rdma_user_mmap_entry*
+ionic_mmap_entry_insert(struct ionic_ctx *ctx, unsigned long size,
+ unsigned long pfn, u8 mmap_flags, u64 *offset)
+{
+ struct ionic_mmap_entry *entry;
+ int rc;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->size = size;
+ entry->pfn = pfn;
+ entry->mmap_flags = mmap_flags;
+
+ rc = rdma_user_mmap_entry_insert(&ctx->ibctx, &entry->rdma_entry,
+ entry->size);
+ if (rc) {
+ kfree(entry);
+ return NULL;
+ }
+
+ if (offset)
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct ionic_ctx_resp resp = {};
+ struct ionic_ctx_req req;
+ phys_addr_t db_phys = 0;
+ int rc;
+
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+
+ /* try to allocate dbid for user ctx */
+ rc = ionic_get_dbid(dev, &ctx->dbid, &db_phys);
+ if (rc < 0)
+ return rc;
+
+ ibdev_dbg(&dev->ibdev, "user space dbid %u\n", ctx->dbid);
+
+ ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE,
+ PHYS_PFN(db_phys), 0, NULL);
+ if (!ctx->mmap_dbell) {
+ rc = -ENOMEM;
+ goto err_mmap_dbell;
+ }
+
+ resp.page_shift = PAGE_SHIFT;
+
+ resp.dbell_offset = db_phys & ~PAGE_MASK;
+
+ resp.version = dev->lif_cfg.rdma_version;
+ resp.qp_opcodes = dev->lif_cfg.qp_opcodes;
+ resp.admin_opcodes = dev->lif_cfg.admin_opcodes;
+
+ resp.sq_qtype = dev->lif_cfg.sq_qtype;
+ resp.rq_qtype = dev->lif_cfg.rq_qtype;
+ resp.cq_qtype = dev->lif_cfg.cq_qtype;
+ resp.admin_qtype = dev->lif_cfg.aq_qtype;
+ resp.max_stride = dev->lif_cfg.max_stride;
+ resp.max_spec = IONIC_SPEC_HIGH;
+
+ resp.udma_count = dev->lif_cfg.udma_count;
+ resp.expdb_mask = dev->lif_cfg.expdb_mask;
+
+ if (dev->lif_cfg.sq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_SQ;
+ if (dev->lif_cfg.rq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_RQ;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+
+ return 0;
+
+err_resp:
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+err_mmap_dbell:
+ ionic_put_dbid(dev, ctx->dbid);
+
+ return rc;
+}
+
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+ ionic_put_dbid(dev, ctx->dbid);
+}
+
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct ionic_mmap_entry *ionic_entry;
+ int rc = 0;
+
+ rdma_entry = rdma_user_mmap_entry_get(&ctx->ibctx, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev, "not found %#lx\n",
+ vma->vm_pgoff << PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+
+ ibdev_dbg(&dev->ibdev, "writecombine? %d\n",
+ ionic_entry->mmap_flags & IONIC_MMAP_WC);
+ if (ionic_entry->mmap_flags & IONIC_MMAP_WC)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ibdev_dbg(&dev->ibdev, "remap st %#lx pf %#lx sz %#lx\n",
+ vma->vm_start, ionic_entry->pfn, ionic_entry->size);
+ rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn,
+ ionic_entry->size, vma->vm_page_prot,
+ rdma_entry);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "remap failed %d\n", rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
+}
+
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct ionic_mmap_entry *ionic_entry;
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+ kfree(ionic_entry);
+}
+
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ return ionic_get_pdid(dev, &pd->pdid);
+}
+
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ ionic_put_pdid(dev, pd->pdid);
+
+ return 0;
+}
+
+static int ionic_build_hdr(struct ionic_ibdev *dev,
+ struct ib_ud_header *hdr,
+ const struct rdma_ah_attr *attr,
+ u16 sport, bool want_ecn)
+{
+ const struct ib_global_route *grh;
+ enum rdma_network_type net;
+ u16 vlan;
+ int rc;
+
+ if (attr->ah_flags != IB_AH_GRH)
+ return -EINVAL;
+ if (attr->type != RDMA_AH_ATTR_TYPE_ROCE)
+ return -EINVAL;
+
+ grh = rdma_ah_read_grh(attr);
+
+ rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, &hdr->eth.smac_h[0]);
+ if (rc)
+ return rc;
+
+ net = rdma_gid_attr_network_type(grh->sgid_attr);
+
+ rc = ib_ud_header_init(0, /* no payload */
+ 0, /* no lrh */
+ 1, /* yes eth */
+ vlan != 0xffff,
+ 0, /* no grh */
+ net == RDMA_NETWORK_IPV4 ? 4 : 6,
+ 1, /* yes udp */
+ 0, /* no imm */
+ hdr);
+ if (rc)
+ return rc;
+
+ ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac);
+
+ if (net == RDMA_NETWORK_IPV4) {
+ hdr->eth.type = cpu_to_be16(ETH_P_IP);
+ hdr->ip4.frag_off = cpu_to_be16(0x4000); /* don't fragment */
+ hdr->ip4.ttl = grh->hop_limit;
+ hdr->ip4.tot_len = cpu_to_be16(0xffff);
+ hdr->ip4.saddr =
+ *(const __be32 *)(grh->sgid_attr->gid.raw + 12);
+ hdr->ip4.daddr = *(const __be32 *)(grh->dgid.raw + 12);
+
+ if (want_ecn)
+ hdr->ip4.tos = ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->ip4.tos = ionic_clear_ecn(grh->traffic_class);
+ } else {
+ hdr->eth.type = cpu_to_be16(ETH_P_IPV6);
+ hdr->grh.flow_label = cpu_to_be32(grh->flow_label);
+ hdr->grh.hop_limit = grh->hop_limit;
+ hdr->grh.source_gid = grh->sgid_attr->gid;
+ hdr->grh.destination_gid = grh->dgid;
+
+ if (want_ecn)
+ hdr->grh.traffic_class =
+ ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->grh.traffic_class =
+ ionic_clear_ecn(grh->traffic_class);
+ }
+
+ if (vlan != 0xffff) {
+ vlan |= rdma_ah_get_sl(attr) << VLAN_PRIO_SHIFT;
+ hdr->vlan.tag = cpu_to_be16(vlan);
+ hdr->vlan.type = hdr->eth.type;
+ hdr->eth.type = cpu_to_be16(ETH_P_8021Q);
+ }
+
+ hdr->udp.sport = cpu_to_be16(sport);
+ hdr->udp.dport = cpu_to_be16(ROCE_V2_UDP_DPORT);
+
+ return 0;
+}
+
+static void ionic_set_ah_attr(struct ionic_ibdev *dev,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_ud_header *hdr,
+ int sgid_index)
+{
+ u32 flow_label;
+ u16 vlan = 0;
+ u8 tos, ttl;
+
+ if (hdr->vlan_present)
+ vlan = be16_to_cpu(hdr->vlan.tag);
+
+ if (hdr->ipv4_present) {
+ flow_label = 0;
+ ttl = hdr->ip4.ttl;
+ tos = hdr->ip4.tos;
+ *(__be16 *)(hdr->grh.destination_gid.raw + 10) = cpu_to_be16(0xffff);
+ *(__be32 *)(hdr->grh.destination_gid.raw + 12) = hdr->ip4.daddr;
+ } else {
+ flow_label = be32_to_cpu(hdr->grh.flow_label);
+ ttl = hdr->grh.hop_limit;
+ tos = hdr->grh.traffic_class;
+ }
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+ if (hdr->eth_present)
+ ether_addr_copy(ah_attr->roce.dmac, hdr->eth.dmac_h);
+ rdma_ah_set_sl(ah_attr, vlan >> VLAN_PRIO_SHIFT);
+ rdma_ah_set_port_num(ah_attr, 1);
+ rdma_ah_set_grh(ah_attr, NULL, flow_label, sgid_index, ttl, tos);
+ rdma_ah_set_dgid_raw(ah_attr, &hdr->grh.destination_gid);
+}
+
+static int ionic_create_ah_cmd(struct ionic_ibdev *dev,
+ struct ionic_ah *ah,
+ struct ionic_pd *pd,
+ struct rdma_ah_attr *attr,
+ u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_AH_IN_V1_LEN),
+ .cmd.create_ah = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .dbid_flags = cpu_to_le16(dev->lif_cfg.dbid),
+ .id_ver = cpu_to_le32(ah->ahid),
+ }
+ }
+ };
+ enum ionic_admin_flags admin_flags = 0;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf;
+ gfp_t gfp = GFP_ATOMIC;
+ int rc, hdr_len = 0;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_AH)
+ return -EBADRQC;
+
+ if (flags & RDMA_CREATE_AH_SLEEPABLE)
+ gfp = GFP_KERNEL;
+ else
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ rc = ionic_build_hdr(dev, &ah->hdr, attr, IONIC_ROCE_UDP_SPORT, false);
+ if (rc)
+ return rc;
+
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_8021Q)) {
+ if (ah->hdr.vlan.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP;
+ } else {
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ ah->sgid_index = rdma_ah_read_grh(attr)->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, gfp);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(&ah->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ wr.wqe.cmd.create_ah.dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.create_ah.length = cpu_to_le32(hdr_len);
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, admin_flags);
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_destroy_ah_cmd(struct ionic_ibdev *dev, u32 ahid, u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_AH_IN_V1_LEN),
+ .cmd.destroy_ah = {
+ .ah_id = cpu_to_le32(ahid),
+ },
+ }
+ };
+ enum ionic_admin_flags admin_flags = IONIC_ADMIN_F_TEARDOWN;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_AH)
+ return -EBADRQC;
+
+ if (!(flags & RDMA_CREATE_AH_SLEEPABLE))
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ ionic_admin_post(dev, &wr);
+ ionic_admin_wait(dev, &wr, admin_flags);
+
+ /* No host-memory resource is associated with ah, so it is ok
+ * to "succeed" and complete this destroy ah on the host.
+ */
+ return 0;
+}
+
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
+ struct ionic_pd *pd = to_ionic_pd(ibah->pd);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ struct ionic_ah_resp resp = {};
+ u32 flags = init_attr->flags;
+ int rc;
+
+ rc = ionic_get_ahid(dev, &ah->ahid);
+ if (rc)
+ return rc;
+
+ rc = ionic_create_ah_cmd(dev, ah, pd, attr, flags);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.ahid = ah->ahid;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+err_cmd:
+ ionic_put_ahid(dev, ah->ahid);
+ return rc;
+}
+
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+
+ ionic_set_ah_attr(dev, ah_attr, &ah->hdr, ah->sgid_index);
+
+ return 0;
+}
+
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ int rc;
+
+ rc = ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+ if (rc)
+ return rc;
+
+ ionic_put_ahid(dev, ah->ahid);
+
+ return 0;
+}
+
+static int ionic_create_mr_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_mr *mr,
+ u64 addr,
+ u64 length)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_MR_IN_V1_LEN),
+ .cmd.create_mr = {
+ .va = cpu_to_le64(addr),
+ .length = cpu_to_le64(length),
+ .pd_id = cpu_to_le32(pd->pdid),
+ .page_size_log2 = mr->buf.page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(mr->buf.tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(&mr->buf, addr),
+ .dbid_flags = cpu_to_le16(mr->flags),
+ .id_ver = cpu_to_le32(mr->mrid),
+ }
+ }
+ };
+ int rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, 0);
+ if (!rc)
+ mr->created = true;
+
+ return rc;
+}
+
+static int ionic_destroy_mr_cmd(struct ionic_ibdev *dev, u32 mrid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_MR_IN_V1_LEN),
+ .cmd.destroy_mr = {
+ .mr_id = cpu_to_le32(mrid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access)
+{
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->ibmr.lkey = IONIC_DMA_LKEY;
+ mr->ibmr.rkey = IONIC_DMA_RKEY;
+
+ if (pd)
+ pd->flags |= IONIC_QPF_PRIVILEGED;
+
+ return &mr->ibmr;
+}
+
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ unsigned long pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ mr->umem = ib_umem_get(&dev->ibdev, start, length, access);
+ if (IS_ERR(mr->umem)) {
+ rc = PTR_ERR(mr->umem);
+ goto err_umem;
+ }
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ionic_mr *mr;
+ u64 pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&dev->ibdev, offset, length,
+ fd, access);
+ if (IS_ERR(umem_dmabuf)) {
+ rc = PTR_ERR(umem_dmabuf);
+ goto err_umem;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ if (!mr->ibmr.lkey)
+ goto out;
+
+ if (mr->created) {
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+ }
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ ionic_put_mrid(dev, mr->mrid);
+
+out:
+ kfree(mr);
+
+ return 0;
+}
+
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ int rc;
+
+ if (type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+
+ mr->flags = IONIC_MRF_PHYS_MR;
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl;
+
+ mr->buf.tbl_pages = 0;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static int ionic_map_mr_page(struct ib_mr *ibmr, u64 dma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+
+ ibdev_dbg(&dev->ibdev, "dma %p\n", (void *)dma);
+ return ionic_pgtbl_page(&mr->buf, dma);
+}
+
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ /* mr must be allocated using ib_alloc_mr() */
+ if (unlikely(!mr->buf.tbl_limit))
+ return -EINVAL;
+
+ mr->buf.tbl_pages = 0;
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_cpu(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ ibdev_dbg(&dev->ibdev, "sg %p nent %d\n", sg, sg_nents);
+ rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ionic_map_mr_page);
+
+ mr->buf.page_size_log2 = order_base_2(ibmr->page_size);
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_device(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_pd *pd = to_ionic_pd(ibmw->pd);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ return rc;
+
+ mr->ibmw.rkey = mr->mrid;
+
+ if (mr->ibmw.type == IB_MW_TYPE_1)
+ mr->flags = IONIC_MRF_MW_1;
+ else
+ mr->flags = IONIC_MRF_MW_2;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return 0;
+
+err_cmd:
+ ionic_put_mrid(dev, mr->mrid);
+ return rc;
+}
+
+int ionic_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+
+ ionic_put_mrid(dev, mr->mrid);
+
+ return 0;
+}
+
+static int ionic_create_cq_cmd(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_cq *cq,
+ struct ionic_tbl_buf *buf)
+{
+ const u16 dbid = ionic_ctx_dbid(dev, ctx);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_CQ_IN_V1_LEN),
+ .cmd.create_cq = {
+ .eq_id = cpu_to_le32(cq->eqid),
+ .depth_log2 = cq->q.depth_log2,
+ .stride_log2 = cq->q.stride_log2,
+ .page_size_log2 = buf->page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(buf->tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(buf, 0),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(cq->cqid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_destroy_cq_cmd(struct ionic_ibdev *dev, u32 cqid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN),
+ .cmd.destroy_cq = {
+ .cq_id = cpu_to_le32(cqid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ struct ionic_tbl_buf buf = {};
+ struct ionic_cq_resp resp;
+ struct ionic_cq_req req;
+ int udma_idx = 0, rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ }
+
+ vcq->udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (udata)
+ vcq->udma_mask &= req.udma_mask;
+
+ if (!vcq->udma_mask) {
+ rc = -EINVAL;
+ goto err_init;
+ }
+
+ for (; udma_idx < dev->lif_cfg.udma_count; ++udma_idx) {
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc = ionic_create_cq_common(vcq, &buf, attr, ctx, udata,
+ &req.cq[udma_idx],
+ &resp.cqid[udma_idx],
+ udma_idx);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_create_cq_cmd(dev, ctx, &vcq->cq[udma_idx], &buf);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &buf);
+ }
+
+ vcq->ibcq.cqe = attr->cqe;
+
+ if (udata) {
+ resp.udma_mask = vcq->udma_mask;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ while (udma_idx) {
+ --udma_idx;
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+ ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &buf);
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+err_init:
+ ;
+ }
+
+ return rc;
+}
+
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int udma_idx, rc_tmp, rc = 0;
+
+ for (udma_idx = dev->lif_cfg.udma_count; udma_idx; ) {
+ --udma_idx;
+
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc_tmp = ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+ if (rc_tmp) {
+ if (!rc)
+ rc = rc_tmp;
+
+ continue;
+ }
+
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+ }
+
+ return rc;
+}
+
+static bool pd_remote_privileged(struct ib_pd *pd)
+{
+ return pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+}
+
+static int ionic_create_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_cq *send_cq,
+ struct ionic_cq *recv_cq,
+ struct ionic_qp *qp,
+ struct ionic_tbl_buf *sq_buf,
+ struct ionic_tbl_buf *rq_buf,
+ struct ib_qp_init_attr *attr)
+{
+ const u16 dbid = ionic_obj_dbid(dev, pd->ibpd.uobject);
+ const u32 flags = to_ionic_qp_flags(0, 0,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(&pd->ibpd));
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_QP_IN_V1_LEN),
+ .cmd.create_qp = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .priv_flags = cpu_to_be32(flags),
+ .type_state = to_ionic_qp_type(attr->qp_type),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid);
+ wr.wqe.cmd.create_qp.sq_depth_log2 = qp->sq.depth_log2;
+ wr.wqe.cmd.create_qp.sq_stride_log2 = qp->sq.stride_log2;
+ wr.wqe.cmd.create_qp.sq_page_size_log2 = sq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.sq_tbl_index_xrcd_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.sq_map_count =
+ cpu_to_le32(sq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.sq_dma_addr = ionic_pgtbl_dma(sq_buf, 0);
+ }
+
+ if (qp->has_rq) {
+ wr.wqe.cmd.create_qp.rq_cq_id = cpu_to_le32(recv_cq->cqid);
+ wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2;
+ wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2;
+ wr.wqe.cmd.create_qp.rq_page_size_log2 = rq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.rq_tbl_index_srq_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.rq_map_count =
+ cpu_to_le32(rq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.rq_dma_addr = ionic_pgtbl_dma(rq_buf, 0);
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_modify_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ const u32 flags = to_ionic_qp_flags(attr->qp_access_flags,
+ attr->en_sqd_async_notify,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(qp->ibqp.pd));
+ const u8 state = to_ionic_qp_modify_state(attr->qp_state,
+ attr->cur_qp_state);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_MODIFY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_MODIFY_QP_IN_V1_LEN),
+ .cmd.mod_qp = {
+ .attr_mask = cpu_to_be32(mask),
+ .access_flags = cpu_to_be16(flags),
+ .rq_psn = cpu_to_le32(attr->rq_psn),
+ .sq_psn = cpu_to_le32(attr->sq_psn),
+ .rate_limit_kbps =
+ cpu_to_le32(attr->rate_limit),
+ .pmtu = (attr->path_mtu + 7),
+ .retry = (attr->retry_cnt |
+ (attr->rnr_retry << 4)),
+ .rnr_timer = attr->min_rnr_timer,
+ .retry_timeout = attr->timeout,
+ .type_state = state,
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ void *hdr_buf = NULL;
+ dma_addr_t hdr_dma = 0;
+ int rc, hdr_len = 0;
+ u16 sport;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_MODIFY_QP)
+ return -EBADRQC;
+
+ if ((mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
+ /* Note, round up/down was already done for allocating
+ * resources on the device. The allocation order is in cache
+ * line size. We can't use the order of the resource
+ * allocation to determine the order wqes here, because for
+ * queue length <= one cache line it is not distinct.
+ *
+ * Therefore, order wqes is computed again here.
+ *
+ * Account for hole and round up to the next order.
+ */
+ wr.wqe.cmd.mod_qp.rsq_depth =
+ order_base_2(attr->max_dest_rd_atomic + 1);
+ wr.wqe.cmd.mod_qp.rsq_index = cpu_to_le32(~0);
+ }
+
+ if ((mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ /* Account for hole and round down to the next order */
+ wr.wqe.cmd.mod_qp.rrq_depth =
+ order_base_2(attr->max_rd_atomic + 2) - 1;
+ wr.wqe.cmd.mod_qp.rrq_index = cpu_to_le32(~0);
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn =
+ cpu_to_le32(attr->dest_qp_num);
+ else
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn = cpu_to_le32(attr->qkey);
+
+ if (mask & IB_QP_AV) {
+ if (!qp->hdr)
+ return -ENOMEM;
+
+ sport = rdma_get_udp_sport(grh->flow_label,
+ qp->qpid,
+ attr->dest_qp_num);
+
+ rc = ionic_build_hdr(dev, qp->hdr, &attr->ah_attr, sport, true);
+ if (rc)
+ return rc;
+
+ qp->sgid_index = grh->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(qp->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ if (qp->hdr->ipv4_present) {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ } else {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ wr.wqe.cmd.mod_qp.ah_id_len =
+ cpu_to_le32(qp->ahid | (hdr_len << 24));
+ wr.wqe.cmd.mod_qp.dma_addr = cpu_to_le64(hdr_dma);
+
+ wr.wqe.cmd.mod_qp.en_pcp = attr->ah_attr.sl;
+ wr.wqe.cmd.mod_qp.ip_dscp = grh->traffic_class >> 2;
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (mask & IB_QP_AV)
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ if (mask & IB_QP_AV)
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_query_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_QUERY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_QUERY_QP_IN_V1_LEN),
+ .cmd.query_qp = {
+ .id_ver = cpu_to_le32(qp->qpid),
+ },
+ }
+ };
+ struct ionic_v1_admin_query_qp_sq *query_sqbuf;
+ struct ionic_v1_admin_query_qp_rq *query_rqbuf;
+ dma_addr_t query_sqdma;
+ dma_addr_t query_rqdma;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf = NULL;
+ int flags, rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_QUERY_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ bool expdb = !!(qp->sq_cmb & IONIC_CMB_EXPDB);
+
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ expdb);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2, expdb);
+ }
+
+ if (qp->has_rq) {
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ }
+
+ query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_sqbuf)
+ return -ENOMEM;
+
+ query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_rqbuf) {
+ rc = -ENOMEM;
+ goto err_rqbuf;
+ }
+
+ query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_sqdma);
+ if (rc)
+ goto err_sqdma;
+
+ query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_rqdma);
+ if (rc)
+ goto err_rqdma;
+
+ if (mask & IB_QP_AV) {
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf) {
+ rc = -ENOMEM;
+ goto err_hdrbuf;
+ }
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_hdrdma;
+ }
+
+ wr.wqe.cmd.query_qp.sq_dma_addr = cpu_to_le64(query_sqdma);
+ wr.wqe.cmd.query_qp.rq_dma_addr = cpu_to_le64(query_rqdma);
+ wr.wqe.cmd.query_qp.hdr_dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.query_qp.ah_id = cpu_to_le32(qp->ahid);
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (rc)
+ goto err_hdrdma;
+
+ flags = be16_to_cpu(query_sqbuf->access_perms_flags |
+ query_rqbuf->access_perms_flags);
+
+ print_hex_dump_debug("sqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_sqbuf, sizeof(*query_sqbuf), true);
+ print_hex_dump_debug("rqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_rqbuf, sizeof(*query_rqbuf), true);
+ ibdev_dbg(&dev->ibdev, "query qp %u state_pmtu %#x flags %#x",
+ qp->qpid, query_rqbuf->state_pmtu, flags);
+
+ attr->qp_state = from_ionic_qp_state(query_rqbuf->state_pmtu >> 4);
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = (query_rqbuf->state_pmtu & 0xf) - 7;
+ attr->path_mig_state = IB_MIG_MIGRATED;
+ attr->qkey = be32_to_cpu(query_sqbuf->qkey_dest_qpn);
+ attr->rq_psn = be32_to_cpu(query_sqbuf->rq_psn);
+ attr->sq_psn = be32_to_cpu(query_rqbuf->sq_psn);
+ attr->dest_qp_num = attr->qkey;
+ attr->qp_access_flags = from_ionic_qp_flags(flags);
+ attr->pkey_index = 0;
+ attr->alt_pkey_index = 0;
+ attr->en_sqd_async_notify = !!(flags & IONIC_QPF_SQD_NOTIFY);
+ attr->sq_draining = !!(flags & IONIC_QPF_SQ_DRAINING);
+ attr->max_rd_atomic = BIT(query_rqbuf->rrq_depth) - 1;
+ attr->max_dest_rd_atomic = BIT(query_rqbuf->rsq_depth) - 1;
+ attr->min_rnr_timer = query_sqbuf->rnr_timer;
+ attr->port_num = 0;
+ attr->timeout = query_sqbuf->retry_timeout;
+ attr->retry_cnt = query_rqbuf->retry_rnrtry & 0xf;
+ attr->rnr_retry = query_rqbuf->retry_rnrtry >> 4;
+ attr->alt_port_num = 0;
+ attr->alt_timeout = 0;
+ attr->rate_limit = be32_to_cpu(query_sqbuf->rate_limit_kbps);
+
+ if (mask & IB_QP_AV)
+ ionic_set_ah_attr(dev, &attr->ah_attr,
+ qp->hdr, qp->sgid_index);
+
+err_hdrdma:
+ if (mask & IB_QP_AV) {
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ kfree(hdr_buf);
+ }
+err_hdrbuf:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_rqdma, sizeof(*query_rqbuf),
+ DMA_FROM_DEVICE);
+err_rqdma:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_sqdma, sizeof(*query_sqbuf),
+ DMA_FROM_DEVICE);
+err_sqdma:
+ kfree(query_rqbuf);
+err_rqbuf:
+ kfree(query_sqbuf);
+
+ return rc;
+}
+
+static int ionic_destroy_qp_cmd(struct ionic_ibdev *dev, u32 qpid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_QP_IN_V1_LEN),
+ .cmd.destroy_qp = {
+ .qp_id = cpu_to_le32(qpid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_QP)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+static bool ionic_expdb_wqe_size_supported(struct ionic_ibdev *dev,
+ uint32_t wqe_size)
+{
+ switch (wqe_size) {
+ case 64: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_64;
+ case 128: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_128;
+ case 256: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_256;
+ case 512: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_512;
+ }
+
+ return false;
+}
+
+static void ionic_qp_sq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata,
+ int max_data)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.sq_expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE);
+
+ if (qp->sq_cmb_order >= IONIC_SQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->sq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->sq_cmb_pgid,
+ &qp->sq_cmb_addr, qp->sq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+not_in_cmb:
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place sq in cmb as required\n");
+
+ qp->sq_cmb = 0;
+ qp->sq_cmb_order = IONIC_RES_INVALID;
+ qp->sq_cmb_pgid = 0;
+ qp->sq_cmb_addr = 0;
+}
+
+static void ionic_qp_sq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+}
+
+static int ionic_qp_sq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *sq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int max_data, int sq_spec, struct ib_udata *udata)
+{
+ u32 wqe_size;
+ int rc = 0;
+
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+
+ if (!qp->has_sq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(sq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (max_data < 0)
+ return rc;
+
+ if (max_data > ionic_v1_send_wqe_max_data(dev->lif_cfg.max_stride,
+ qp->sq_cmb & IONIC_CMB_EXPDB))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(sq);
+ if (rc)
+ return rc;
+
+ qp->sq_spec = sq_spec;
+
+ qp->sq.ptr = NULL;
+ qp->sq.size = sq->size;
+ qp->sq.mask = sq->mask;
+ qp->sq.depth_log2 = sq->depth_log2;
+ qp->sq.stride_log2 = sq->stride_log2;
+
+ qp->sq_meta = NULL;
+ qp->sq_msn_idx = NULL;
+
+ qp->sq_umem = ib_umem_get(&dev->ibdev, sq->addr, sq->size, 0);
+ if (IS_ERR(qp->sq_umem))
+ return PTR_ERR(qp->sq_umem);
+ } else {
+ qp->sq_umem = NULL;
+
+ qp->sq_spec = ionic_v1_use_spec_sge(max_sge, sq_spec);
+ if (sq_spec && !qp->sq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init sq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->sq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->sq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->sq, qp->qpid);
+
+ qp->sq_meta = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_meta),
+ GFP_KERNEL);
+ if (!qp->sq_meta) {
+ rc = -ENOMEM;
+ goto err_sq_meta;
+ }
+
+ qp->sq_msn_idx = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_msn_idx),
+ GFP_KERNEL);
+ if (!qp->sq_msn_idx) {
+ rc = -ENOMEM;
+ goto err_sq_msn;
+ }
+ }
+
+ ionic_qp_sq_init_cmb(dev, qp, udata, max_data);
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->sq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_sq_tbl;
+
+ return 0;
+
+err_sq_tbl:
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->sq_msn_idx);
+err_sq_msn:
+ kfree(qp->sq_meta);
+err_sq_meta:
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_sq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_sq)
+ return;
+
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->sq_msn_idx);
+ kfree(qp->sq_meta);
+
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+}
+
+static void ionic_qp_rq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.rq_expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
+
+ if (qp->rq_cmb_order >= IONIC_RQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->rq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->rq_cmb_pgid,
+ &qp->rq_cmb_addr, qp->rq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+not_in_cmb:
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place rq in cmb as required\n");
+
+ qp->rq_cmb = 0;
+ qp->rq_cmb_order = IONIC_RES_INVALID;
+ qp->rq_cmb_pgid = 0;
+ qp->rq_cmb_addr = 0;
+}
+
+static void ionic_qp_rq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+}
+
+static int ionic_qp_rq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *rq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int rq_spec, struct ib_udata *udata)
+{
+ int rc = 0, i;
+ u32 wqe_size;
+
+ if (!qp->has_rq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(rq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(rq);
+ if (rc)
+ return rc;
+
+ qp->rq_spec = rq_spec;
+
+ qp->rq.ptr = NULL;
+ qp->rq.size = rq->size;
+ qp->rq.mask = rq->mask;
+ qp->rq.depth_log2 = rq->depth_log2;
+ qp->rq.stride_log2 = rq->stride_log2;
+
+ qp->rq_meta = NULL;
+
+ qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0);
+ if (IS_ERR(qp->rq_umem))
+ return PTR_ERR(qp->rq_umem);
+ } else {
+ qp->rq_umem = NULL;
+
+ qp->rq_spec = ionic_v1_use_spec_sge(max_sge, rq_spec);
+ if (rq_spec && !qp->rq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init rq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->rq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->rq, qp->qpid);
+
+ qp->rq_meta = kmalloc_array((u32)qp->rq.mask + 1,
+ sizeof(*qp->rq_meta),
+ GFP_KERNEL);
+ if (!qp->rq_meta) {
+ rc = -ENOMEM;
+ goto err_rq_meta;
+ }
+
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ qp->rq_meta_head = &qp->rq_meta[0];
+ }
+
+ ionic_qp_rq_init_cmb(dev, qp, udata);
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->rq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_rq_tbl;
+
+ return 0;
+
+err_rq_tbl:
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->rq_meta);
+err_rq_meta:
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_rq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_rq)
+ return;
+
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->rq_meta);
+
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+}
+
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_tbl_buf sq_buf = {}, rq_buf = {};
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_qp_resp resp = {};
+ struct ionic_qp_req req = {};
+ struct ionic_cq *cq;
+ u8 udma_mask;
+ void *entry;
+ int rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ } else {
+ req.sq_spec = IONIC_SPEC_HIGH;
+ req.rq_spec = IONIC_SPEC_HIGH;
+ }
+
+ if (attr->qp_type == IB_QPT_SMI || attr->qp_type > IB_QPT_UD)
+ return -EOPNOTSUPP;
+
+ qp->state = IB_QPS_RESET;
+
+ INIT_LIST_HEAD(&qp->cq_poll_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_rq);
+
+ spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
+
+ qp->has_sq = 1;
+ qp->has_rq = 1;
+
+ if (attr->qp_type == IB_QPT_GSI) {
+ rc = ionic_get_gsi_qpid(dev, &qp->qpid);
+ } else {
+ udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (qp->has_sq)
+ udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask;
+
+ if (qp->has_rq)
+ udma_mask &= to_ionic_vcq(attr->recv_cq)->udma_mask;
+
+ if (udata && req.udma_mask)
+ udma_mask &= req.udma_mask;
+
+ if (!udma_mask)
+ return -EINVAL;
+
+ rc = ionic_get_qpid(dev, &qp->qpid, &qp->udma_idx, udma_mask);
+ }
+ if (rc)
+ return rc;
+
+ qp->sig_all = attr->sq_sig_type == IB_SIGNAL_ALL_WR;
+ qp->has_ah = attr->qp_type == IB_QPT_RC;
+
+ if (qp->has_ah) {
+ qp->hdr = kzalloc(sizeof(*qp->hdr), GFP_KERNEL);
+ if (!qp->hdr) {
+ rc = -ENOMEM;
+ goto err_ah_alloc;
+ }
+
+ rc = ionic_get_ahid(dev, &qp->ahid);
+ if (rc)
+ goto err_ahid;
+ }
+
+ if (udata) {
+ if (req.rq_cmb & IONIC_CMB_ENABLE)
+ qp->rq_cmb = req.rq_cmb;
+
+ if (req.sq_cmb & IONIC_CMB_ENABLE)
+ qp->sq_cmb = req.sq_cmb;
+ }
+
+ rc = ionic_qp_sq_init(dev, ctx, qp, &req.sq, &sq_buf,
+ attr->cap.max_send_wr, attr->cap.max_send_sge,
+ attr->cap.max_inline_data, req.sq_spec, udata);
+ if (rc)
+ goto err_sq;
+
+ rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf,
+ attr->cap.max_recv_wr, attr->cap.max_recv_sge,
+ req.rq_spec, udata);
+ if (rc)
+ goto err_rq;
+
+ rc = ionic_create_qp_cmd(dev, pd,
+ to_ionic_vcq_cq(attr->send_cq, qp->udma_idx),
+ to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx),
+ qp, &sq_buf, &rq_buf, attr);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.qpid = qp->qpid;
+ resp.udma_idx = qp->udma_idx;
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both sq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->sq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ wc = (qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->sq_cmb |= IONIC_CMB_WC;
+ else
+ qp->sq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_sq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->sq.size,
+ PHYS_PFN(qp->sq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.sq_cmb_offset);
+ if (!qp->mmap_sq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_sq;
+ }
+
+ resp.sq_cmb = qp->sq_cmb;
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both rq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->rq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ == IONIC_CMB_WC;
+ else
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->rq_cmb |= IONIC_CMB_WC;
+ else
+ qp->rq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_rq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->rq.size,
+ PHYS_PFN(qp->rq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.rq_cmb_offset);
+ if (!qp->mmap_rq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_rq;
+ }
+
+ resp.rq_cmb = qp->rq_cmb;
+ }
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+
+ qp->ibqp.qp_num = qp->qpid;
+
+ init_completion(&qp->qp_rel_comp);
+ kref_init(&qp->qp_kref);
+
+ entry = xa_store_irq(&dev->qp_tbl, qp->qpid, qp, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_resp;
+ }
+
+ if (qp->has_sq) {
+ cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx);
+
+ attr->cap.max_send_wr = qp->sq.mask;
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ qp->sq_cmb & IONIC_CMB_EXPDB);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB);
+ qp->sq_cqid = cq->cqid;
+ }
+
+ if (qp->has_rq) {
+ cq = to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx);
+
+ attr->cap.max_recv_wr = qp->rq.mask;
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ qp->rq_cqid = cq->cqid;
+ }
+
+ return 0;
+
+err_resp:
+ if (udata && (qp->rq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+err_mmap_rq:
+ if (udata && (qp->sq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+err_mmap_sq:
+ ionic_destroy_qp_cmd(dev, qp->qpid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_qp_rq_destroy(dev, ctx, qp);
+err_rq:
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+err_sq:
+ if (qp->has_ah)
+ ionic_put_ahid(dev, qp->ahid);
+err_ahid:
+ kfree(qp->hdr);
+err_ah_alloc:
+ ionic_put_qpid(dev, qp->qpid);
+ return rc;
+}
+
+void ionic_notify_flush_cq(struct ionic_cq *cq)
+{
+ if (cq->flush && cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+}
+
+static void ionic_notify_qp_cqs(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ if (qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq,
+ qp->udma_idx));
+ if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.recv_cq,
+ qp->udma_idx));
+}
+
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP sq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = true;
+ if (!ionic_queue_empty(&qp->sq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP rq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = true;
+ if (!ionic_queue_empty(&qp->rq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+}
+
+static void ionic_clean_cq(struct ionic_cq *cq, u32 qpid)
+{
+ struct ionic_v1_cqe *qcqe;
+ int prod, qtf, qid, type;
+ bool color;
+
+ if (!cq->q.ptr)
+ return;
+
+ color = cq->color;
+ prod = cq->q.prod;
+ qcqe = ionic_queue_at(&cq->q, prod);
+
+ while (color == ionic_v1_cqe_color(qcqe)) {
+ qtf = ionic_v1_cqe_qtf(qcqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN)
+ ionic_v1_cqe_clean(qcqe);
+
+ prod = ionic_queue_next(&cq->q, prod);
+ qcqe = ionic_queue_at(&cq->q, prod);
+ color = ionic_color_wrap(prod, color);
+ }
+}
+
+static void ionic_reset_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int i;
+
+ local_irq_save(irqflags);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->has_sq) {
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = false;
+ qp->sq_flush_rcvd = false;
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+ qp->sq.prod = 0;
+ qp->sq.cons = 0;
+ spin_unlock(&qp->sq_lock);
+ }
+
+ if (qp->has_rq) {
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = false;
+ qp->rq.prod = 0;
+ qp->rq.cons = 0;
+ if (qp->rq_meta) {
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ }
+ qp->rq_meta_head = &qp->rq_meta[0];
+ spin_unlock(&qp->rq_lock);
+ }
+
+ local_irq_restore(irqflags);
+}
+
+static bool ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,
+ enum ib_qp_state attr_state)
+{
+ if (q_state == attr_state)
+ return true;
+
+ if (attr_state == IB_QPS_ERR)
+ return true;
+
+ if (attr_state == IB_QPS_SQE)
+ return q_state == IB_QPS_RTS || q_state == IB_QPS_SQD;
+
+ return false;
+}
+
+static int ionic_check_modify_qp(struct ionic_qp *qp, struct ib_qp_attr *attr,
+ int mask)
+{
+ enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->state;
+ enum ib_qp_state next_state = (mask & IB_QP_STATE) ?
+ attr->qp_state : cur_state;
+
+ if ((mask & IB_QP_CUR_STATE) &&
+ !ionic_qp_cur_state_is_ok(qp->state, attr->cur_qp_state))
+ return -EINVAL;
+
+ if (!ib_modify_qp_is_ok(cur_state, next_state, qp->ibqp.qp_type, mask))
+ return -EINVAL;
+
+ /* unprivileged qp not allowed privileged qkey */
+ if ((mask & IB_QP_QKEY) && (attr->qkey & 0x80000000) &&
+ qp->ibqp.uobject)
+ return -EPERM;
+
+ return 0;
+}
+
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ rc = ionic_check_modify_qp(qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_CAP)
+ return -EINVAL;
+
+ rc = ionic_modify_qp_cmd(dev, pd, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_STATE) {
+ qp->state = attr->qp_state;
+
+ if (attr->qp_state == IB_QPS_ERR) {
+ ionic_flush_qp(dev, qp);
+ ionic_notify_qp_cqs(dev, qp);
+ } else if (attr->qp_state == IB_QPS_RESET) {
+ ionic_reset_qp(dev, qp);
+ }
+ }
+
+ return 0;
+}
+
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_qp_init_attr *init_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
+
+ rc = ionic_query_qp_cmd(dev, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (qp->has_sq)
+ attr->cap.max_send_wr = qp->sq.mask;
+
+ if (qp->has_rq)
+ attr->cap.max_recv_wr = qp->rq.mask;
+
+ init_attr->event_handler = ibqp->event_handler;
+ init_attr->qp_context = ibqp->qp_context;
+ init_attr->send_cq = ibqp->send_cq;
+ init_attr->recv_cq = ibqp->recv_cq;
+ init_attr->srq = ibqp->srq;
+ init_attr->xrcd = ibqp->xrcd;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type = qp->sig_all ?
+ IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+ init_attr->qp_type = ibqp->qp_type;
+ init_attr->create_flags = 0;
+ init_attr->port_num = 0;
+ init_attr->rwq_ind_tbl = ibqp->rwq_ind_tbl;
+ init_attr->source_qpn = 0;
+
+ return rc;
+}
+
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int rc;
+
+ rc = ionic_destroy_qp_cmd(dev, qp->qpid);
+ if (rc)
+ return rc;
+
+ xa_erase_irq(&dev->qp_tbl, qp->qpid);
+
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ wait_for_completion(&qp->qp_rel_comp);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_poll_sq);
+ list_del(&qp->cq_flush_sq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_flush_rq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ ionic_qp_rq_destroy(dev, ctx, qp);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+ if (qp->has_ah) {
+ ionic_put_ahid(dev, qp->ahid);
+ kfree(qp->hdr);
+ }
+ ionic_put_qpid(dev, qp->qpid);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_datapath.c b/drivers/infiniband/hw/ionic/ionic_datapath.c
new file mode 100644
index 000000000000..aa2944887f23
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_datapath.c
@@ -0,0 +1,1399 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_OP(version, opname) \
+ ((version) < 2 ? IONIC_V1_OP_##opname : IONIC_V2_OP_##opname)
+
+static bool ionic_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+
+ *cqe = qcqe;
+
+ return true;
+}
+
+static int ionic_flush_recv(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (!qp->rq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->rq))
+ return 0;
+
+ wqe = ionic_queue_at_cons(&qp->rq);
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[wqe->base.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ ionic_queue_consume(&qp->rq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ return 1;
+}
+
+static int ionic_flush_recv_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_recv(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_flush_send(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (!qp->sq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->sq))
+ return 0;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ ionic_queue_consume(&qp->sq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ return 1;
+}
+
+static int ionic_flush_send_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_send(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_poll_recv(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *cqe_qp, struct ionic_v1_cqe *cqe,
+ struct ib_wc *wc)
+{
+ struct ionic_qp *qp = NULL;
+ struct ionic_rq_meta *meta;
+ u32 src_qpn, st_len;
+ u16 vlan_tag;
+ u8 op;
+
+ if (cqe_qp->rq_flush)
+ return 0;
+
+ qp = cqe_qp;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ /* ignore wqe_id in case of flush error */
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ /* posted recvs (if any) flushed by ionic_flush_recv */
+ return 0;
+ }
+
+ /* there had better be something in the recv queue to complete */
+ if (ionic_queue_empty(&qp->rq)) {
+ ibdev_warn(&dev->ibdev, "qp %u is empty\n", qp->qpid);
+ return -EIO;
+ }
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[cqe->recv.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->wr_id = meta->wrid;
+
+ wc->qp = &cqe_qp->ibqp;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ wc->vendor_err = st_len;
+ wc->status = ionic_to_ib_status(st_len);
+
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ ibdev_warn(&dev->ibdev,
+ "qp %d recv cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, BIT(cq->q.stride_log2), true);
+ goto out;
+ }
+
+ wc->vendor_err = 0;
+ wc->status = IB_WC_SUCCESS;
+
+ src_qpn = be32_to_cpu(cqe->recv.src_qpn_op);
+ op = src_qpn >> IONIC_V1_CQE_RECV_OP_SHIFT;
+
+ src_qpn &= IONIC_V1_CQE_RECV_QPN_MASK;
+ op &= IONIC_V1_CQE_RECV_OP_MASK;
+
+ wc->opcode = IB_WC_RECV;
+ switch (op) {
+ case IONIC_V1_CQE_RECV_OP_RDMA_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_INV:
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->recv.imm_data_rkey);
+ break;
+ }
+
+ wc->byte_len = st_len;
+ wc->src_qp = src_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI) {
+ wc->wc_flags |= IB_WC_GRH | IB_WC_WITH_SMAC;
+ ether_addr_copy(wc->smac, cqe->recv.src_mac);
+
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ if (ionic_v1_cqe_recv_is_ipv4(cqe))
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+
+ if (ionic_v1_cqe_recv_is_vlan(cqe))
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+
+ /* vlan_tag in cqe will be valid from dpath even if no vlan */
+ vlan_tag = be16_to_cpu(cqe->recv.vlan_tag);
+ wc->vlan_id = vlan_tag & 0xfff; /* 802.1q VID */
+ wc->sl = vlan_tag >> VLAN_PRIO_SHIFT; /* 802.1q PCP */
+ }
+
+ wc->pkey_index = 0;
+ wc->port_num = 1;
+
+out:
+ ionic_queue_consume(&qp->rq);
+
+ return 1;
+}
+
+static bool ionic_peek_send(struct ionic_qp *qp)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return false;
+
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ return false;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ return false;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ return false;
+
+ return true;
+}
+
+static int ionic_poll_send(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return 0;
+
+ do {
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ goto out_empty;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ goto out_empty;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ goto out_empty;
+
+ ionic_queue_consume(&qp->sq);
+
+ /* produce wc only if signaled or error status */
+ } while (!meta->signal && meta->ibsts == IB_WC_SUCCESS);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = meta->ibsts;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ if (meta->ibsts == IB_WC_SUCCESS) {
+ wc->byte_len = meta->len;
+ wc->opcode = meta->ibop;
+ } else {
+ wc->vendor_err = meta->len;
+
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ return 1;
+
+out_empty:
+ if (qp->sq_flush_rcvd) {
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ return 0;
+}
+
+static int ionic_poll_send_many(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_poll_send(dev, cq, qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_validate_cons(u16 prod, u16 cons,
+ u16 comp, u16 mask)
+{
+ if (((prod - cons) & mask) <= ((comp - cons) & mask))
+ return -EIO;
+
+ return 0;
+}
+
+static int ionic_comp_msn(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_seq, cqe_idx;
+ int rc;
+
+ if (qp->sq_flush)
+ return 0;
+
+ cqe_seq = be32_to_cpu(cqe->send.msg_msn) & qp->sq.mask;
+
+ rc = ionic_validate_cons(qp->sq_msn_prod,
+ qp->sq_msn_cons,
+ cqe_seq - 1,
+ qp->sq.mask);
+ if (rc) {
+ ibdev_warn(qp->ibqp.device,
+ "qp %u bad msn %#x seq %u for prod %u cons %u\n",
+ qp->qpid, be32_to_cpu(cqe->send.msg_msn),
+ cqe_seq, qp->sq_msn_prod, qp->sq_msn_cons);
+ return rc;
+ }
+
+ qp->sq_msn_cons = cqe_seq;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ cqe_idx = qp->sq_msn_idx[(cqe_seq - 1) & qp->sq.mask];
+
+ meta = &qp->sq_meta[cqe_idx];
+ meta->len = be32_to_cpu(cqe->status_length);
+ meta->ibsts = ionic_to_ib_status(meta->len);
+
+ ibdev_warn(qp->ibqp.device,
+ "qp %d msn cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static int ionic_comp_npg(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_idx;
+ u32 st_len;
+
+ if (qp->sq_flush)
+ return 0;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ /*
+ * Flush cqe does not consume a wqe on the device, and maybe
+ * no such work request is posted.
+ *
+ * The driver should begin flushing after the last indicated
+ * normal or error completion. Here, only set a hint that the
+ * flush request was indicated. In poll_send, if nothing more
+ * can be polled normally, then begin flushing.
+ */
+ qp->sq_flush_rcvd = true;
+ return 0;
+ }
+
+ cqe_idx = cqe->send.npg_wqe_id & qp->sq.mask;
+ meta = &qp->sq_meta[cqe_idx];
+ meta->local_comp = true;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ meta->len = st_len;
+ meta->ibsts = ionic_to_ib_status(st_len);
+ meta->remote = false;
+ ibdev_warn(qp->ibqp.device,
+ "qp %d npg cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static void ionic_reserve_sync_cq(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!ionic_queue_empty(&cq->q)) {
+ cq->credit += ionic_queue_length(&cq->q);
+ cq->q.cons = cq->q.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ }
+}
+
+static void ionic_reserve_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ int spend)
+{
+ cq->credit -= spend;
+
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+}
+
+static int ionic_poll_vcq_cq(struct ionic_ibdev *dev,
+ struct ionic_cq *cq,
+ int nwc, struct ib_wc *wc)
+{
+ struct ionic_qp *qp, *qp_next;
+ struct ionic_v1_cqe *cqe;
+ int rc = 0, npolled = 0;
+ unsigned long irqflags;
+ u32 qtf, qid;
+ bool peek;
+ u8 type;
+
+ if (nwc < 1)
+ return 0;
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+
+ /* poll already indicated work completions for send queue */
+ list_for_each_entry_safe(qp, qp_next, &cq->poll_sq, cq_poll_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_poll_send_many(dev, cq, qp, wc + npolled,
+ nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_poll_sq);
+ }
+
+ /* poll for more work completions */
+ while (likely(ionic_next_cqe(dev, cq, &cqe))) {
+ if (npolled == nwc)
+ goto out;
+
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ /*
+ * Safe to access QP without additional reference here as,
+ * 1. We hold cq->lock throughout
+ * 2. ionic_destroy_qp() acquires the same cq->lock before cleanup
+ * 3. QP is removed from qp_tbl before any cleanup begins
+ * This ensures no concurrent access between polling and destruction.
+ */
+ qp = xa_load(&dev->qp_tbl, qid);
+ if (unlikely(!qp)) {
+ ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid);
+ goto cq_next;
+ }
+
+ switch (type) {
+ case IONIC_V1_CQE_TYPE_RECV:
+ spin_lock(&qp->rq_lock);
+ rc = ionic_poll_recv(dev, cq, qp, cqe, wc + npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_MSN:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_msn(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_NPG:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_npg(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ default:
+ ibdev_warn(&dev->ibdev,
+ "unexpected cqe type %u\n", type);
+ rc = -EIO;
+ goto out;
+ }
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ /* lastly, flush send and recv queues */
+ if (likely(!cq->flush))
+ goto out;
+
+ cq->flush = false;
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_sq, cq_flush_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_flush_send_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_sq);
+ else
+ cq->flush = true;
+ }
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_rq, cq_flush_rq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->rq_lock);
+ rc = ionic_flush_recv_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_rq);
+ else
+ cq->flush = true;
+ }
+
+out:
+ /* in case credit was depleted (more work posted than cq depth) */
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ return npolled ?: rc;
+}
+
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc_tmp, rc = 0, npolled = 0;
+ int cq_i, cq_x, cq_ix;
+
+ cq_x = vcq->poll_idx;
+ vcq->poll_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (cq_i = 0; npolled < nwc && cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ cq_ix = cq_i ^ cq_x;
+
+ if (!(vcq->udma_mask & BIT(cq_ix)))
+ continue;
+
+ rc_tmp = ionic_poll_vcq_cq(dev, &vcq->cq[cq_ix],
+ nwc - npolled,
+ wc + npolled);
+
+ if (rc_tmp >= 0)
+ npolled += rc_tmp;
+ else if (!rc)
+ rc = rc_tmp;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_req_notify_vcq_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ enum ib_cq_notify_flags flags)
+{
+ u64 dbell_val = cq->q.dbell;
+
+ if (flags & IB_CQ_SOLICITED) {
+ cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod);
+ dbell_val |= cq->arm_sol_prod | IONIC_CQ_RING_SOL;
+ } else {
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ dbell_val |= cq->arm_any_prod | IONIC_CQ_RING_ARM;
+ }
+
+ ionic_reserve_sync_cq(dev, cq);
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, dbell_val);
+
+ /*
+ * IB_CQ_REPORT_MISSED_EVENTS:
+ *
+ * The queue index in ring zero guarantees no missed events.
+ *
+ * Here, we check if the color bit in the next cqe is flipped. If it
+ * is flipped, then progress can be made by immediately polling the cq.
+ * Still, the cq will be armed, and an event will be generated. The cq
+ * may be empty when polled after the event, because the next poll
+ * after arming the cq can empty it.
+ */
+ return (flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q));
+}
+
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc = 0, cq_i;
+
+ for (cq_i = 0; cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ if (!(vcq->udma_mask & BIT(cq_i)))
+ continue;
+
+ if (ionic_req_notify_vcq_cq(dev, &vcq->cq[cq_i], flags))
+ rc = 1;
+ }
+
+ return rc;
+}
+
+static s64 ionic_prep_inline(void *data, u32 max_data,
+ const struct ib_sge *ib_sgl, int num_sge)
+{
+ static const s64 bit_31 = 1u << 31;
+ s64 len = 0, sg_len;
+ int sg_i;
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than max inline data is invalid */
+ if (unlikely(len + sg_len > max_data))
+ return -EINVAL;
+
+ memcpy(data + len, (void *)ib_sgl[sg_i].addr, sg_len);
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static s64 ionic_prep_pld(struct ionic_v1_wqe *wqe,
+ union ionic_v1_pld *pld,
+ int spec, u32 max_sge,
+ const struct ib_sge *ib_sgl,
+ int num_sge)
+{
+ static const s64 bit_31 = 1l << 31;
+ struct ionic_sge *sgl;
+ __be32 *spec32 = NULL;
+ __be16 *spec16 = NULL;
+ s64 len = 0, sg_len;
+ int sg_i = 0;
+
+ if (unlikely(num_sge < 0 || (u32)num_sge > max_sge))
+ return -EINVAL;
+
+ if (spec && num_sge > IONIC_V1_SPEC_FIRST_SGE) {
+ sg_i = IONIC_V1_SPEC_FIRST_SGE;
+
+ if (num_sge > 8) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC16);
+ spec16 = pld->spec16;
+ } else {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC32);
+ spec32 = pld->spec32;
+ }
+ }
+
+ sgl = &pld->sgl[sg_i];
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than 2GB data is invalid */
+ if (unlikely(len + sg_len > bit_31))
+ return -EINVAL;
+
+ sgl[sg_i].va = cpu_to_be64(ib_sgl[sg_i].addr);
+ sgl[sg_i].len = cpu_to_be32(sg_len);
+ sgl[sg_i].lkey = cpu_to_be32(ib_sgl[sg_i].lkey);
+
+ if (spec32) {
+ spec32[sg_i] = sgl[sg_i].len;
+ } else if (spec16) {
+ if (unlikely(sg_len > U16_MAX))
+ return -EINVAL;
+ spec16[sg_i] = cpu_to_be16(sg_len);
+ }
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static void ionic_prep_base(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ meta->wrid = wr->wr_id;
+ meta->ibsts = IB_WC_SUCCESS;
+ meta->signal = false;
+ meta->local_comp = false;
+
+ wqe->base.wqe_id = qp->sq.prod;
+
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_FENCE);
+
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SOL);
+
+ if (qp->sig_all || wr->send_flags & IB_SEND_SIGNALED) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SIG);
+ meta->signal = true;
+ }
+
+ meta->seq = qp->sq_msn_prod;
+ meta->remote =
+ qp->ibqp.qp_type != IB_QPT_UD &&
+ qp->ibqp.qp_type != IB_QPT_GSI &&
+ !ionic_ibop_is_local(wr->opcode);
+
+ if (meta->remote) {
+ qp->sq_msn_idx[meta->seq] = qp->sq.prod;
+ qp->sq_msn_prod = ionic_queue_next(&qp->sq, qp->sq_msn_prod);
+ }
+
+ ionic_queue_produce(&qp->sq);
+}
+
+static int ionic_prep_common(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ s64 signed_len;
+ u32 mval;
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ wqe->base.num_sge_key = 0;
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_INL);
+ mval = ionic_v1_send_wqe_max_data(qp->sq.stride_log2, false);
+ signed_len = ionic_prep_inline(wqe->common.pld.data, mval,
+ wr->sg_list, wr->num_sge);
+ } else {
+ wqe->base.num_sge_key = wr->num_sge;
+ mval = ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->common.pld,
+ qp->sq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ }
+
+ if (unlikely(signed_len < 0))
+ return signed_len;
+
+ meta->len = signed_len;
+ wqe->common.length = cpu_to_be32(signed_len);
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static void ionic_prep_sq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->sq.stride_log2);
+}
+
+static void ionic_prep_rq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->rq.stride_log2);
+}
+
+static int ionic_prep_send(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_INV);
+ wqe->base.imm_data_key =
+ cpu_to_be32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, wr, meta, wqe);
+}
+
+static int ionic_prep_send_ud(struct ionic_qp *qp,
+ const struct ib_ud_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ struct ionic_ah *ah;
+
+ if (unlikely(!wr->ah))
+ return -EINVAL;
+
+ ah = to_ionic_ah(wr->ah);
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->common.send.ah_id = cpu_to_be32(ah->ahid);
+ wqe->common.send.dest_qpn = cpu_to_be32(wr->remote_qpn);
+ wqe->common.send.dest_qkey = cpu_to_be32(wr->remote_qkey);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_rdma(struct ionic_qp *qp,
+ const struct ib_rdma_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_RDMA_READ:
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+ meta->ibop = IB_WC_RDMA_READ;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_READ);
+ break;
+ case IB_WR_RDMA_WRITE:
+ if (wr->wr.send_flags & IB_SEND_SOLICITED)
+ return -EINVAL;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->common.rdma.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->common.rdma.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->common.rdma.remote_rkey = cpu_to_be32(wr->rkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_atomic(struct ionic_qp *qp,
+ const struct ib_atomic_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->wr.num_sge != 1 || wr->wr.sg_list[0].length != 8)
+ return -EINVAL;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ meta->ibop = IB_WC_COMP_SWAP;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_CS);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->swap >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->swap);
+ wqe->atomic.compare_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.compare_low = cpu_to_be32(wr->compare_add);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ meta->ibop = IB_WC_FETCH_ADD;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_FA);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->compare_add);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->atomic.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->atomic.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->atomic.remote_rkey = cpu_to_be32(wr->rkey);
+
+ wqe->base.num_sge_key = 1;
+ wqe->atomic.sge.va = cpu_to_be64(wr->wr.sg_list[0].addr);
+ wqe->atomic.sge.len = cpu_to_be32(8);
+ wqe->atomic.sge.lkey = cpu_to_be32(wr->wr.sg_list[0].lkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_inv(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, LOCAL_INV);
+ wqe->base.imm_data_key = cpu_to_be32(wr->ex.invalidate_rkey);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_LOCAL_INV;
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_reg(struct ionic_qp *qp,
+ const struct ib_reg_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_mr *mr = to_ionic_mr(wr->mr);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ __le64 dma_addr;
+ int flags;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ /* must call ib_map_mr_sg before posting reg wr */
+ if (!mr->buf.tbl_pages)
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ flags = to_ionic_mr_flags(wr->access);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, REG_MR);
+ wqe->base.num_sge_key = wr->key;
+ wqe->base.imm_data_key = cpu_to_be32(mr->ibmr.lkey);
+ wqe->reg_mr.va = cpu_to_be64(mr->ibmr.iova);
+ wqe->reg_mr.length = cpu_to_be64(mr->ibmr.length);
+ wqe->reg_mr.offset = ionic_pgtbl_off(&mr->buf, mr->ibmr.iova);
+ dma_addr = ionic_pgtbl_dma(&mr->buf, mr->ibmr.iova);
+ wqe->reg_mr.dma_addr = cpu_to_be64(le64_to_cpu(dma_addr));
+
+ wqe->reg_mr.map_count = cpu_to_be32(mr->buf.tbl_pages);
+ wqe->reg_mr.flags = cpu_to_be16(flags);
+ wqe->reg_mr.dir_size_log2 = 0;
+ wqe->reg_mr.page_size_log2 = order_base_2(mr->ibmr.page_size);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_REG_MR;
+
+ ionic_prep_base(qp, &wr->wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_one_rc(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ rc = ionic_prep_send(qp, wr);
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc = ionic_prep_rdma(qp, rdma_wr(wr));
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc = ionic_prep_atomic(qp, atomic_wr(wr));
+ break;
+ case IB_WR_LOCAL_INV:
+ rc = ionic_prep_inv(qp, wr);
+ break;
+ case IB_WR_REG_MR:
+ rc = ionic_prep_reg(qp, reg_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_one_ud(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ rc = ionic_prep_send_ud(qp, ud_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_recv(struct ionic_qp *qp,
+ const struct ib_recv_wr *wr)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ s64 signed_len;
+ u32 mval;
+
+ wqe = ionic_queue_at_prod(&qp->rq);
+
+ /* if wqe is owned by device, caller can try posting again soon */
+ if (wqe->base.flags & cpu_to_be16(IONIC_V1_FLAG_FENCE))
+ return -EAGAIN;
+
+ meta = qp->rq_meta_head;
+ if (unlikely(meta == IONIC_META_LAST) ||
+ unlikely(meta == IONIC_META_POSTED))
+ return -EIO;
+
+ ionic_prep_rq_wqe(qp, wqe);
+
+ mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->recv.pld,
+ qp->rq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ if (signed_len < 0)
+ return signed_len;
+
+ meta->wrid = wr->wr_id;
+
+ wqe->base.wqe_id = meta - qp->rq_meta;
+ wqe->base.num_sge_key = wr->num_sge;
+
+ /* total length for recv goes in base imm_data_key */
+ wqe->base.imm_data_key = cpu_to_be32(signed_len);
+
+ ionic_queue_produce(&qp->rq);
+
+ qp->rq_meta_head = meta->next;
+ meta->next = IONIC_META_POSTED;
+
+ return 0;
+}
+
+static int ionic_post_send_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_sq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_RTS) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->sq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ rc = ionic_prep_one_ud(qp, wr);
+ else
+ rc = ionic_prep_one_rc(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&qp->sq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+
+ if (likely(qp->sq.prod != qp->sq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->sq.prod - qp->sq_old_prod) & qp->sq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->sq_old_prod = qp->sq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.sq_qtype,
+ ionic_queue_dbell_val(&qp->sq));
+ }
+
+ if (qp->sq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+ *bad = wr;
+ return rc;
+}
+
+static int ionic_post_recv_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_rq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_INIT) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->rq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->rq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = ionic_prep_recv(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ if (!cq) {
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+ goto out_unlocked;
+ }
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+
+ if (likely(qp->rq.prod != qp->rq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->rq_old_prod = qp->rq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.rq_qtype,
+ ionic_queue_dbell_val(&qp->rq));
+ }
+
+ if (qp->rq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+out_unlocked:
+ *bad = wr;
+ return rc;
+}
+
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->send_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->send_cq, qp->udma_idx);
+
+ return ionic_post_send_common(dev, vcq, cq, qp, wr, bad);
+}
+
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->recv_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->recv_cq, qp->udma_idx);
+
+ return ionic_post_recv_common(dev, vcq, cq, qp, wr, bad);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_fw.h b/drivers/infiniband/hw/ionic/ionic_fw.h
new file mode 100644
index 000000000000..adfbb89d856c
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_fw.h
@@ -0,0 +1,1029 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_FW_H_
+#define _IONIC_FW_H_
+
+#include <linux/kernel.h>
+#include <rdma/ib_verbs.h>
+
+/* common for ib spec */
+
+#define IONIC_EXP_DBELL_SZ 8
+
+enum ionic_mrid_bits {
+ IONIC_MRID_INDEX_SHIFT = 8,
+};
+
+static inline u32 ionic_mrid(u32 index, u8 key)
+{
+ return (index << IONIC_MRID_INDEX_SHIFT) | key;
+}
+
+static inline u32 ionic_mrid_index(u32 lrkey)
+{
+ return lrkey >> IONIC_MRID_INDEX_SHIFT;
+}
+
+/* common to all versions */
+
+/* wqe scatter gather element */
+struct ionic_sge {
+ __be64 va;
+ __be32 len;
+ __be32 lkey;
+};
+
+/* admin queue mr type */
+enum ionic_mr_flags {
+ /* bits that determine mr access */
+ IONIC_MRF_LOCAL_WRITE = BIT(0),
+ IONIC_MRF_REMOTE_WRITE = BIT(1),
+ IONIC_MRF_REMOTE_READ = BIT(2),
+ IONIC_MRF_REMOTE_ATOMIC = BIT(3),
+ IONIC_MRF_MW_BIND = BIT(4),
+ IONIC_MRF_ZERO_BASED = BIT(5),
+ IONIC_MRF_ON_DEMAND = BIT(6),
+ IONIC_MRF_PB = BIT(7),
+ IONIC_MRF_ACCESS_MASK = BIT(12) - 1,
+
+ /* bits that determine mr type */
+ IONIC_MRF_UKEY_EN = BIT(13),
+ IONIC_MRF_IS_MW = BIT(14),
+ IONIC_MRF_INV_EN = BIT(15),
+
+ /* base flags combinations for mr types */
+ IONIC_MRF_USER_MR = 0,
+ IONIC_MRF_PHYS_MR = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_INV_EN),
+ IONIC_MRF_MW_1 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW),
+ IONIC_MRF_MW_2 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW |
+ IONIC_MRF_INV_EN),
+};
+
+static inline int to_ionic_mr_flags(int access)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_LOCAL_WRITE)
+ flags |= IONIC_MRF_LOCAL_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_MRF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_MRF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_MRF_REMOTE_ATOMIC;
+
+ if (access & IB_ACCESS_MW_BIND)
+ flags |= IONIC_MRF_MW_BIND;
+
+ if (access & IB_ZERO_BASED)
+ flags |= IONIC_MRF_ZERO_BASED;
+
+ return flags;
+}
+
+enum ionic_qp_flags {
+ /* bits that determine qp access */
+ IONIC_QPF_REMOTE_WRITE = BIT(0),
+ IONIC_QPF_REMOTE_READ = BIT(1),
+ IONIC_QPF_REMOTE_ATOMIC = BIT(2),
+
+ /* bits that determine other qp behavior */
+ IONIC_QPF_SQ_PB = BIT(6),
+ IONIC_QPF_RQ_PB = BIT(7),
+ IONIC_QPF_SQ_SPEC = BIT(8),
+ IONIC_QPF_RQ_SPEC = BIT(9),
+ IONIC_QPF_REMOTE_PRIVILEGED = BIT(10),
+ IONIC_QPF_SQ_DRAINING = BIT(11),
+ IONIC_QPF_SQD_NOTIFY = BIT(12),
+ IONIC_QPF_SQ_CMB = BIT(13),
+ IONIC_QPF_RQ_CMB = BIT(14),
+ IONIC_QPF_PRIVILEGED = BIT(15),
+};
+
+static inline int from_ionic_qp_flags(int flags)
+{
+ int access_flags = 0;
+
+ if (flags & IONIC_QPF_REMOTE_WRITE)
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+
+ if (flags & IONIC_QPF_REMOTE_READ)
+ access_flags |= IB_ACCESS_REMOTE_READ;
+
+ if (flags & IONIC_QPF_REMOTE_ATOMIC)
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return access_flags;
+}
+
+static inline int to_ionic_qp_flags(int access, bool sqd_notify,
+ bool sq_is_cmb, bool rq_is_cmb,
+ bool sq_spec, bool rq_spec,
+ bool privileged, bool remote_privileged)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_QPF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_QPF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_QPF_REMOTE_ATOMIC;
+
+ if (sqd_notify)
+ flags |= IONIC_QPF_SQD_NOTIFY;
+
+ if (sq_is_cmb)
+ flags |= IONIC_QPF_SQ_CMB;
+
+ if (rq_is_cmb)
+ flags |= IONIC_QPF_RQ_CMB;
+
+ if (sq_spec)
+ flags |= IONIC_QPF_SQ_SPEC;
+
+ if (rq_spec)
+ flags |= IONIC_QPF_RQ_SPEC;
+
+ if (privileged)
+ flags |= IONIC_QPF_PRIVILEGED;
+
+ if (remote_privileged)
+ flags |= IONIC_QPF_REMOTE_PRIVILEGED;
+
+ return flags;
+}
+
+/* cqe non-admin status indicated in status_length field when err bit is set */
+enum ionic_status {
+ IONIC_STS_OK,
+ IONIC_STS_LOCAL_LEN_ERR,
+ IONIC_STS_LOCAL_QP_OPER_ERR,
+ IONIC_STS_LOCAL_PROT_ERR,
+ IONIC_STS_WQE_FLUSHED_ERR,
+ IONIC_STS_MEM_MGMT_OPER_ERR,
+ IONIC_STS_BAD_RESP_ERR,
+ IONIC_STS_LOCAL_ACC_ERR,
+ IONIC_STS_REMOTE_INV_REQ_ERR,
+ IONIC_STS_REMOTE_ACC_ERR,
+ IONIC_STS_REMOTE_OPER_ERR,
+ IONIC_STS_RETRY_EXCEEDED,
+ IONIC_STS_RNR_RETRY_EXCEEDED,
+ IONIC_STS_XRC_VIO_ERR,
+ IONIC_STS_LOCAL_SGL_INV_ERR,
+};
+
+static inline int ionic_to_ib_status(int sts)
+{
+ switch (sts) {
+ case IONIC_STS_OK:
+ return IB_WC_SUCCESS;
+ case IONIC_STS_LOCAL_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case IONIC_STS_LOCAL_QP_OPER_ERR:
+ case IONIC_STS_LOCAL_SGL_INV_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case IONIC_STS_LOCAL_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case IONIC_STS_WQE_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case IONIC_STS_MEM_MGMT_OPER_ERR:
+ return IB_WC_MW_BIND_ERR;
+ case IONIC_STS_BAD_RESP_ERR:
+ return IB_WC_BAD_RESP_ERR;
+ case IONIC_STS_LOCAL_ACC_ERR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case IONIC_STS_REMOTE_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case IONIC_STS_REMOTE_ACC_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case IONIC_STS_REMOTE_OPER_ERR:
+ return IB_WC_REM_OP_ERR;
+ case IONIC_STS_RETRY_EXCEEDED:
+ return IB_WC_RETRY_EXC_ERR;
+ case IONIC_STS_RNR_RETRY_EXCEEDED:
+ return IB_WC_RNR_RETRY_EXC_ERR;
+ case IONIC_STS_XRC_VIO_ERR:
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+/* admin queue qp type */
+enum ionic_qp_type {
+ IONIC_QPT_RC,
+ IONIC_QPT_UC,
+ IONIC_QPT_RD,
+ IONIC_QPT_UD,
+ IONIC_QPT_SRQ,
+ IONIC_QPT_XRC_INI,
+ IONIC_QPT_XRC_TGT,
+ IONIC_QPT_XRC_SRQ,
+};
+
+static inline int to_ionic_qp_type(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ return IONIC_QPT_UD;
+ case IB_QPT_RC:
+ return IONIC_QPT_RC;
+ case IB_QPT_UC:
+ return IONIC_QPT_UC;
+ case IB_QPT_XRC_INI:
+ return IONIC_QPT_XRC_INI;
+ case IB_QPT_XRC_TGT:
+ return IONIC_QPT_XRC_TGT;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* admin queue qp state */
+enum ionic_qp_state {
+ IONIC_QPS_RESET,
+ IONIC_QPS_INIT,
+ IONIC_QPS_RTR,
+ IONIC_QPS_RTS,
+ IONIC_QPS_SQD,
+ IONIC_QPS_SQE,
+ IONIC_QPS_ERR,
+};
+
+static inline int from_ionic_qp_state(enum ionic_qp_state state)
+{
+ switch (state) {
+ case IONIC_QPS_RESET:
+ return IB_QPS_RESET;
+ case IONIC_QPS_INIT:
+ return IB_QPS_INIT;
+ case IONIC_QPS_RTR:
+ return IB_QPS_RTR;
+ case IONIC_QPS_RTS:
+ return IB_QPS_RTS;
+ case IONIC_QPS_SQD:
+ return IB_QPS_SQD;
+ case IONIC_QPS_SQE:
+ return IB_QPS_SQE;
+ case IONIC_QPS_ERR:
+ return IB_QPS_ERR;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int to_ionic_qp_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return IONIC_QPS_RESET;
+ case IB_QPS_INIT:
+ return IONIC_QPS_INIT;
+ case IB_QPS_RTR:
+ return IONIC_QPS_RTR;
+ case IB_QPS_RTS:
+ return IONIC_QPS_RTS;
+ case IB_QPS_SQD:
+ return IONIC_QPS_SQD;
+ case IB_QPS_SQE:
+ return IONIC_QPS_SQE;
+ case IB_QPS_ERR:
+ return IONIC_QPS_ERR;
+ default:
+ return 0;
+ }
+}
+
+static inline int to_ionic_qp_modify_state(enum ib_qp_state to_state,
+ enum ib_qp_state from_state)
+{
+ return to_ionic_qp_state(to_state) |
+ (to_ionic_qp_state(from_state) << 4);
+}
+
+/* fw abi v1 */
+
+/* data payload part of v1 wqe */
+union ionic_v1_pld {
+ struct ionic_sge sgl[2];
+ __be32 spec32[8];
+ __be16 spec16[16];
+ __u8 data[32];
+};
+
+/* completion queue v1 cqe */
+struct ionic_v1_cqe {
+ union {
+ struct {
+ __be16 cmd_idx;
+ __u8 cmd_op;
+ __u8 rsvd[17];
+ __le16 old_sq_cindex;
+ __le16 old_rq_cq_cindex;
+ } admin;
+ struct {
+ __u64 wqe_id;
+ __be32 src_qpn_op;
+ __u8 src_mac[6];
+ __be16 vlan_tag;
+ __be32 imm_data_rkey;
+ } recv;
+ struct {
+ __u8 rsvd[4];
+ __be32 msg_msn;
+ __u8 rsvd2[8];
+ __u64 npg_wqe_id;
+ } send;
+ };
+ __be32 status_length;
+ __be32 qid_type_flags;
+};
+
+/* bits for cqe recv */
+enum ionic_v1_cqe_src_qpn_bits {
+ IONIC_V1_CQE_RECV_QPN_MASK = 0xffffff,
+ IONIC_V1_CQE_RECV_OP_SHIFT = 24,
+
+ /* MASK could be 0x3, but need 0x1f for makeshift values:
+ * OP_TYPE_RDMA_OPER_WITH_IMM, OP_TYPE_SEND_RCVD
+ */
+ IONIC_V1_CQE_RECV_OP_MASK = 0x1f,
+ IONIC_V1_CQE_RECV_OP_SEND = 0,
+ IONIC_V1_CQE_RECV_OP_SEND_INV = 1,
+ IONIC_V1_CQE_RECV_OP_SEND_IMM = 2,
+ IONIC_V1_CQE_RECV_OP_RDMA_IMM = 3,
+
+ IONIC_V1_CQE_RECV_IS_IPV4 = BIT(7 + IONIC_V1_CQE_RECV_OP_SHIFT),
+ IONIC_V1_CQE_RECV_IS_VLAN = BIT(6 + IONIC_V1_CQE_RECV_OP_SHIFT),
+};
+
+/* bits for cqe qid_type_flags */
+enum ionic_v1_cqe_qtf_bits {
+ IONIC_V1_CQE_COLOR = BIT(0),
+ IONIC_V1_CQE_ERROR = BIT(1),
+ IONIC_V1_CQE_TYPE_SHIFT = 5,
+ IONIC_V1_CQE_TYPE_MASK = 0x7,
+ IONIC_V1_CQE_QID_SHIFT = 8,
+
+ IONIC_V1_CQE_TYPE_ADMIN = 0,
+ IONIC_V1_CQE_TYPE_RECV = 1,
+ IONIC_V1_CQE_TYPE_SEND_MSN = 2,
+ IONIC_V1_CQE_TYPE_SEND_NPG = 3,
+};
+
+static inline bool ionic_v1_cqe_color(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_COLOR);
+}
+
+static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR);
+}
+
+static inline bool ionic_v1_cqe_recv_is_ipv4(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_IPV4);
+}
+
+static inline bool ionic_v1_cqe_recv_is_vlan(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_VLAN);
+}
+
+static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe)
+{
+ cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT);
+}
+
+static inline u32 ionic_v1_cqe_qtf(struct ionic_v1_cqe *cqe)
+{
+ return be32_to_cpu(cqe->qid_type_flags);
+}
+
+static inline u8 ionic_v1_cqe_qtf_type(u32 qtf)
+{
+ return (qtf >> IONIC_V1_CQE_TYPE_SHIFT) & IONIC_V1_CQE_TYPE_MASK;
+}
+
+static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf)
+{
+ return qtf >> IONIC_V1_CQE_QID_SHIFT;
+}
+
+/* v1 base wqe header */
+struct ionic_v1_base_hdr {
+ __u64 wqe_id;
+ __u8 op;
+ __u8 num_sge_key;
+ __be16 flags;
+ __be32 imm_data_key;
+};
+
+/* v1 receive wqe body */
+struct ionic_v1_recv_bdy {
+ __u8 rsvd[16];
+ union ionic_v1_pld pld;
+};
+
+/* v1 send/rdma wqe body (common, has sgl) */
+struct ionic_v1_common_bdy {
+ union {
+ struct {
+ __be32 ah_id;
+ __be32 dest_qpn;
+ __be32 dest_qkey;
+ } send;
+ struct {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ } rdma;
+ };
+ __be32 length;
+ union ionic_v1_pld pld;
+};
+
+/* v1 atomic wqe body */
+struct ionic_v1_atomic_bdy {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ __be32 swap_add_high;
+ __be32 swap_add_low;
+ __be32 compare_high;
+ __be32 compare_low;
+ __u8 rsvd[4];
+ struct ionic_sge sge;
+};
+
+/* v1 reg mr wqe body */
+struct ionic_v1_reg_mr_bdy {
+ __be64 va;
+ __be64 length;
+ __be64 offset;
+ __be64 dma_addr;
+ __be32 map_count;
+ __be16 flags;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+ __u8 rsvd[8];
+};
+
+/* v1 bind mw wqe body */
+struct ionic_v1_bind_mw_bdy {
+ __be64 va;
+ __be64 length;
+ __be32 lkey;
+ __be16 flags;
+ __u8 rsvd[26];
+};
+
+/* v1 send/recv wqe */
+struct ionic_v1_wqe {
+ struct ionic_v1_base_hdr base;
+ union {
+ struct ionic_v1_recv_bdy recv;
+ struct ionic_v1_common_bdy common;
+ struct ionic_v1_atomic_bdy atomic;
+ struct ionic_v1_reg_mr_bdy reg_mr;
+ struct ionic_v1_bind_mw_bdy bind_mw;
+ };
+};
+
+/* queue pair v1 send opcodes */
+enum ionic_v1_op {
+ IONIC_V1_OP_SEND,
+ IONIC_V1_OP_SEND_INV,
+ IONIC_V1_OP_SEND_IMM,
+ IONIC_V1_OP_RDMA_READ,
+ IONIC_V1_OP_RDMA_WRITE,
+ IONIC_V1_OP_RDMA_WRITE_IMM,
+ IONIC_V1_OP_ATOMIC_CS,
+ IONIC_V1_OP_ATOMIC_FA,
+ IONIC_V1_OP_REG_MR,
+ IONIC_V1_OP_LOCAL_INV,
+ IONIC_V1_OP_BIND_MW,
+
+ /* flags */
+ IONIC_V1_FLAG_FENCE = BIT(0),
+ IONIC_V1_FLAG_SOL = BIT(1),
+ IONIC_V1_FLAG_INL = BIT(2),
+ IONIC_V1_FLAG_SIG = BIT(3),
+
+ /* flags last four bits for sgl spec format */
+ IONIC_V1_FLAG_SPEC32 = (1u << 12),
+ IONIC_V1_FLAG_SPEC16 = (2u << 12),
+ IONIC_V1_SPEC_FIRST_SGE = 2,
+};
+
+/* queue pair v2 send opcodes */
+enum ionic_v2_op {
+ IONIC_V2_OPSL_OUT = 0x20,
+ IONIC_V2_OPSL_IMM = 0x40,
+ IONIC_V2_OPSL_INV = 0x80,
+
+ IONIC_V2_OP_SEND = 0x0 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_SEND_IMM = IONIC_V2_OP_SEND | IONIC_V2_OPSL_IMM,
+ IONIC_V2_OP_SEND_INV = IONIC_V2_OP_SEND | IONIC_V2_OPSL_INV,
+
+ IONIC_V2_OP_RDMA_WRITE = 0x1 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_RDMA_WRITE_IMM = IONIC_V2_OP_RDMA_WRITE | IONIC_V2_OPSL_IMM,
+
+ IONIC_V2_OP_RDMA_READ = 0x2,
+
+ IONIC_V2_OP_ATOMIC_CS = 0x4,
+ IONIC_V2_OP_ATOMIC_FA = 0x5,
+ IONIC_V2_OP_REG_MR = 0x6,
+ IONIC_V2_OP_LOCAL_INV = 0x7,
+ IONIC_V2_OP_BIND_MW = 0x8,
+};
+
+static inline size_t ionic_v1_send_wqe_min_size(int min_sge, int min_data,
+ int spec, bool expdb)
+{
+ size_t sz_wqe, sz_sgl, sz_data;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb) {
+ min_sge += 1;
+ min_data += IONIC_EXP_DBELL_SZ;
+ }
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, common.pld.sgl[min_sge]);
+ sz_data = offsetof(struct ionic_v1_wqe, common.pld.data[min_data]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ if (sz_data > sz_wqe)
+ sz_wqe = sz_data;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_send_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->common.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_send_wqe_max_data(u8 stride_log2, bool expdb)
+{
+ struct ionic_v1_wqe *wqe = (void *)0;
+ __u8 *data = (void *)(1ull << stride_log2);
+
+ if (expdb)
+ data -= IONIC_EXP_DBELL_SZ;
+
+ return data - wqe->common.pld.data;
+}
+
+static inline size_t ionic_v1_recv_wqe_min_size(int min_sge, int spec,
+ bool expdb)
+{
+ size_t sz_wqe, sz_sgl;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb)
+ min_sge += 1;
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, recv.pld.sgl[min_sge]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_recv_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->recv.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_use_spec_sge(int min_sge, int spec)
+{
+ if (!spec || min_sge > spec)
+ return 0;
+
+ if (min_sge <= IONIC_V1_SPEC_FIRST_SGE)
+ return IONIC_V1_SPEC_FIRST_SGE;
+
+ return spec;
+}
+
+struct ionic_admin_stats_hdr {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 id_ver;
+ __u8 type_state;
+} __packed;
+
+#define IONIC_ADMIN_STATS_HDRS_IN_V1_LEN 17
+static_assert(sizeof(struct ionic_admin_stats_hdr) ==
+ IONIC_ADMIN_STATS_HDRS_IN_V1_LEN);
+
+struct ionic_admin_create_ah {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 csum_profile;
+ __u8 crypto;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_AH_IN_V1_LEN 24
+static_assert(sizeof(struct ionic_admin_create_ah) ==
+ IONIC_ADMIN_CREATE_AH_IN_V1_LEN);
+
+struct ionic_admin_destroy_ah {
+ __le32 ah_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_AH_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_ah) ==
+ IONIC_ADMIN_DESTROY_AH_IN_V1_LEN);
+
+struct ionic_admin_query_ah {
+ __le64 dma_addr;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_AH_IN_V1_LEN 8
+static_assert(sizeof(struct ionic_admin_query_ah) ==
+ IONIC_ADMIN_QUERY_AH_IN_V1_LEN);
+
+struct ionic_admin_create_mr {
+ __le64 va;
+ __le64 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+ __u8 pt_type;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_MR_IN_V1_LEN 45
+static_assert(sizeof(struct ionic_admin_create_mr) ==
+ IONIC_ADMIN_CREATE_MR_IN_V1_LEN);
+
+struct ionic_admin_destroy_mr {
+ __le32 mr_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_MR_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_mr) ==
+ IONIC_ADMIN_DESTROY_MR_IN_V1_LEN);
+
+struct ionic_admin_create_cq {
+ __le32 eq_id;
+ __u8 depth_log2;
+ __u8 stride_log2;
+ __u8 dir_size_log2_rsvd;
+ __u8 page_size_log2;
+ __le32 cq_flags;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_CQ_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_create_cq) ==
+ IONIC_ADMIN_CREATE_CQ_IN_V1_LEN);
+
+struct ionic_admin_destroy_cq {
+ __le32 cq_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_cq) ==
+ IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN);
+
+struct ionic_admin_create_qp {
+ __le32 pd_id;
+ __be32 priv_flags;
+ __le32 sq_cq_id;
+ __u8 sq_depth_log2;
+ __u8 sq_stride_log2;
+ __u8 sq_dir_size_log2_rsvd;
+ __u8 sq_page_size_log2;
+ __le32 sq_tbl_index_xrcd_id;
+ __le32 sq_map_count;
+ __le64 sq_dma_addr;
+ __le32 rq_cq_id;
+ __u8 rq_depth_log2;
+ __u8 rq_stride_log2;
+ __u8 rq_dir_size_log2_rsvd;
+ __u8 rq_page_size_log2;
+ __le32 rq_tbl_index_srq_id;
+ __le32 rq_map_count;
+ __le64 rq_dma_addr;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 type_state;
+ __u8 rsvd;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_QP_IN_V1_LEN 64
+static_assert(sizeof(struct ionic_admin_create_qp) ==
+ IONIC_ADMIN_CREATE_QP_IN_V1_LEN);
+
+struct ionic_admin_destroy_qp {
+ __le32 qp_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_QP_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_qp) ==
+ IONIC_ADMIN_DESTROY_QP_IN_V1_LEN);
+
+struct ionic_admin_mod_qp {
+ __be32 attr_mask;
+ __u8 dcqcn_profile;
+ __u8 tfp_csum_profile;
+ __be16 access_flags;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ __le32 qkey_dest_qpn;
+ __le32 rate_limit_kbps;
+ __u8 pmtu;
+ __u8 retry;
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __u8 rsq_depth;
+ __u8 rrq_depth;
+ __le16 pkey_id;
+ __le32 ah_id_len;
+ __u8 en_pcp;
+ __u8 ip_dscp;
+ __u8 rsvd2;
+ __u8 type_state;
+ union {
+ struct {
+ __le16 rsvd1;
+ };
+ __le32 rrq_index;
+ };
+ __le32 rsq_index;
+ __le64 dma_addr;
+ __le32 id_ver;
+} __packed;
+
+#define IONIC_ADMIN_MODIFY_QP_IN_V1_LEN 60
+static_assert(sizeof(struct ionic_admin_mod_qp) ==
+ IONIC_ADMIN_MODIFY_QP_IN_V1_LEN);
+
+struct ionic_admin_query_qp {
+ __le64 hdr_dma_addr;
+ __le64 sq_dma_addr;
+ __le64 rq_dma_addr;
+ __le32 ah_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_QP_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_query_qp) ==
+ IONIC_ADMIN_QUERY_QP_IN_V1_LEN);
+
+#define ADMIN_WQE_STRIDE 64
+#define ADMIN_WQE_HDR_LEN 4
+
+/* admin queue v1 wqe */
+struct ionic_v1_admin_wqe {
+ __u8 op;
+ __u8 rsvd;
+ __le16 len;
+
+ union {
+ struct ionic_admin_stats_hdr stats;
+ struct ionic_admin_create_ah create_ah;
+ struct ionic_admin_destroy_ah destroy_ah;
+ struct ionic_admin_query_ah query_ah;
+ struct ionic_admin_create_mr create_mr;
+ struct ionic_admin_destroy_mr destroy_mr;
+ struct ionic_admin_create_cq create_cq;
+ struct ionic_admin_destroy_cq destroy_cq;
+ struct ionic_admin_create_qp create_qp;
+ struct ionic_admin_destroy_qp destroy_qp;
+ struct ionic_admin_mod_qp mod_qp;
+ struct ionic_admin_query_qp query_qp;
+ } cmd;
+};
+
+/* side data for query qp */
+struct ionic_v1_admin_query_qp_sq {
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+ __be16 pkey_id;
+ __be32 qkey_dest_qpn;
+ __be32 rate_limit_kbps;
+ __be32 rq_psn;
+};
+
+struct ionic_v1_admin_query_qp_rq {
+ __u8 state_pmtu;
+ __u8 retry_rnrtry;
+ __u8 rrq_depth;
+ __u8 rsq_depth;
+ __be32 sq_psn;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+};
+
+/* admin queue v1 opcodes */
+enum ionic_v1_admin_op {
+ IONIC_V1_ADMIN_NOOP,
+ IONIC_V1_ADMIN_CREATE_CQ,
+ IONIC_V1_ADMIN_CREATE_QP,
+ IONIC_V1_ADMIN_CREATE_MR,
+ IONIC_V1_ADMIN_STATS_HDRS,
+ IONIC_V1_ADMIN_STATS_VALS,
+ IONIC_V1_ADMIN_DESTROY_MR,
+ IONIC_V1_ADMIN_RSVD_7, /* RESIZE_CQ */
+ IONIC_V1_ADMIN_DESTROY_CQ,
+ IONIC_V1_ADMIN_MODIFY_QP,
+ IONIC_V1_ADMIN_QUERY_QP,
+ IONIC_V1_ADMIN_DESTROY_QP,
+ IONIC_V1_ADMIN_DEBUG,
+ IONIC_V1_ADMIN_CREATE_AH,
+ IONIC_V1_ADMIN_QUERY_AH,
+ IONIC_V1_ADMIN_MODIFY_DCQCN,
+ IONIC_V1_ADMIN_DESTROY_AH,
+ IONIC_V1_ADMIN_QP_STATS_HDRS,
+ IONIC_V1_ADMIN_QP_STATS_VALS,
+ IONIC_V1_ADMIN_OPCODES_MAX,
+};
+
+/* admin queue v1 cqe status */
+enum ionic_v1_admin_status {
+ IONIC_V1_ASTS_OK,
+ IONIC_V1_ASTS_BAD_CMD,
+ IONIC_V1_ASTS_BAD_INDEX,
+ IONIC_V1_ASTS_BAD_STATE,
+ IONIC_V1_ASTS_BAD_TYPE,
+ IONIC_V1_ASTS_BAD_ATTR,
+ IONIC_V1_ASTS_MSG_TOO_BIG,
+};
+
+/* event queue v1 eqe */
+struct ionic_v1_eqe {
+ __be32 evt;
+};
+
+/* bits for cqe queue_type_flags */
+enum ionic_v1_eqe_evt_bits {
+ IONIC_V1_EQE_COLOR = BIT(0),
+ IONIC_V1_EQE_TYPE_SHIFT = 1,
+ IONIC_V1_EQE_TYPE_MASK = 0x7,
+ IONIC_V1_EQE_CODE_SHIFT = 4,
+ IONIC_V1_EQE_CODE_MASK = 0xf,
+ IONIC_V1_EQE_QID_SHIFT = 8,
+
+ /* cq events */
+ IONIC_V1_EQE_TYPE_CQ = 0,
+ /* cq normal events */
+ IONIC_V1_EQE_CQ_NOTIFY = 0,
+ /* cq error events */
+ IONIC_V1_EQE_CQ_ERR = 8,
+
+ /* qp and srq events */
+ IONIC_V1_EQE_TYPE_QP = 1,
+ /* qp normal events */
+ IONIC_V1_EQE_SRQ_LEVEL = 0,
+ IONIC_V1_EQE_SQ_DRAIN = 1,
+ IONIC_V1_EQE_QP_COMM_EST = 2,
+ IONIC_V1_EQE_QP_LAST_WQE = 3,
+ /* qp error events */
+ IONIC_V1_EQE_QP_ERR = 8,
+ IONIC_V1_EQE_QP_ERR_REQUEST = 9,
+ IONIC_V1_EQE_QP_ERR_ACCESS = 10,
+};
+
+enum ionic_tfp_csum_profiles {
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP = 0,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP = 1,
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP = 2,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP = 3,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 4,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 5,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 6,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 7,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_IPV4_UDP = 8,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_ESP_UDP = 9,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_UDP = 10,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_ESP_UDP = 11,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_CSUM = 12,
+};
+
+static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe)
+{
+ return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR);
+}
+
+static inline u32 ionic_v1_eqe_evt(struct ionic_v1_eqe *eqe)
+{
+ return be32_to_cpu(eqe->evt);
+}
+
+static inline u8 ionic_v1_eqe_evt_type(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_TYPE_SHIFT) & IONIC_V1_EQE_TYPE_MASK;
+}
+
+static inline u8 ionic_v1_eqe_evt_code(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_CODE_SHIFT) & IONIC_V1_EQE_CODE_MASK;
+}
+
+static inline u32 ionic_v1_eqe_evt_qid(u32 evt)
+{
+ return evt >> IONIC_V1_EQE_QID_SHIFT;
+}
+
+enum ionic_v1_stat_bits {
+ IONIC_V1_STAT_TYPE_SHIFT = 28,
+ IONIC_V1_STAT_TYPE_NONE = 0,
+ IONIC_V1_STAT_TYPE_8 = 1,
+ IONIC_V1_STAT_TYPE_LE16 = 2,
+ IONIC_V1_STAT_TYPE_LE32 = 3,
+ IONIC_V1_STAT_TYPE_LE64 = 4,
+ IONIC_V1_STAT_TYPE_BE16 = 5,
+ IONIC_V1_STAT_TYPE_BE32 = 6,
+ IONIC_V1_STAT_TYPE_BE64 = 7,
+ IONIC_V1_STAT_OFF_MASK = BIT(IONIC_V1_STAT_TYPE_SHIFT) - 1,
+};
+
+struct ionic_v1_stat {
+ union {
+ __be32 be_type_off;
+ u32 type_off;
+ };
+ char name[28];
+};
+
+static inline int ionic_v1_stat_type(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off >> IONIC_V1_STAT_TYPE_SHIFT;
+}
+
+static inline unsigned int ionic_v1_stat_off(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off & IONIC_V1_STAT_OFF_MASK;
+}
+
+#endif /* _IONIC_FW_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_hw_stats.c b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
new file mode 100644
index 000000000000..244a80dde08f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ stat->type_off = be32_to_cpu(stat->be_type_off);
+ stat->name[sizeof(stat->name) - 1] = 0;
+ if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
+ break;
+ }
+
+ return hw_stat_i;
+}
+
+static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
+ struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ hw_stats_hdrs[hw_stat_i].name = stat->name;
+ }
+}
+
+static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
+ void *vals_buf, size_t vals_len)
+{
+ unsigned int off = ionic_v1_stat_off(stat);
+ int type = ionic_v1_stat_type(stat);
+
+#define __ionic_v1_stat_validate(__type) \
+ ((off + sizeof(__type) <= vals_len) && \
+ (IS_ALIGNED(off, sizeof(__type))))
+
+ switch (type) {
+ case IONIC_V1_STAT_TYPE_8:
+ if (__ionic_v1_stat_validate(u8))
+ return *(u8 *)(vals_buf + off);
+ break;
+ case IONIC_V1_STAT_TYPE_LE16:
+ if (__ionic_v1_stat_validate(__le16))
+ return le16_to_cpu(*(__le16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE32:
+ if (__ionic_v1_stat_validate(__le32))
+ return le32_to_cpu(*(__le32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE64:
+ if (__ionic_v1_stat_validate(__le64))
+ return le64_to_cpu(*(__le64 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE16:
+ if (__ionic_v1_stat_validate(__be16))
+ return be16_to_cpu(*(__be16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE32:
+ if (__ionic_v1_stat_validate(__be32))
+ return be32_to_cpu(*(__be32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE64:
+ if (__ionic_v1_stat_validate(__be64))
+ return be64_to_cpu(*(__be64 *)(vals_buf + off));
+ break;
+ }
+
+ return ~0ull;
+#undef __ionic_v1_stat_validate
+}
+
+static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
+ dma_addr_t dma, size_t len, int qid, int op)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = op,
+ .len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
+ .cmd.stats = {
+ .dma_addr = cpu_to_le64(dma),
+ .length = cpu_to_le32(len),
+ .id_ver = cpu_to_le32(qid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= op)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
+}
+
+static int ionic_init_hw_stats(struct ionic_ibdev *dev)
+{
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stats_count;
+
+ if (dev->hw_stats_hdrs)
+ return 0;
+
+ dev->hw_stats_count = 0;
+
+ /* buffer for current values from the device */
+ dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats_buf) {
+ rc = -ENOMEM;
+ goto err_buf;
+ }
+
+ /* buffer for names, sizes, offsets of values */
+ dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats) {
+ rc = -ENOMEM;
+ goto err_hw_stats;
+ }
+
+ /* request the names, sizes, offsets */
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count =
+ ionic_v1_stat_normalize(dev->hw_stats,
+ PAGE_SIZE / sizeof(*dev->hw_stats));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ dev->hw_stats_count = hw_stats_count;
+
+ /* alloc and init array of names, for alloc_hw_stats */
+ dev->hw_stats_hdrs = kcalloc(hw_stats_count,
+ sizeof(*dev->hw_stats_hdrs),
+ GFP_KERNEL);
+ if (!dev->hw_stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
+ hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(dev->hw_stats);
+err_hw_stats:
+ kfree(dev->hw_stats_buf);
+err_buf:
+ dev->hw_stats_count = 0;
+ dev->hw_stats = NULL;
+ dev->hw_stats_buf = NULL;
+ dev->hw_stats_hdrs = NULL;
+ return rc;
+}
+
+static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (port != 1)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
+ dev->hw_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int ionic_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 port, int index)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stat_i;
+
+ if (port != 1)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ 0, IONIC_V1_ADMIN_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
+ hw_stats->value[hw_stat_i] =
+ ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
+ dev->hw_stats_buf, PAGE_SIZE);
+
+ return hw_stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ return rc;
+}
+
+static struct rdma_hw_stats *
+ionic_counter_alloc_stats(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+ int err;
+
+ cntr = kzalloc(sizeof(*cntr), GFP_KERNEL);
+ if (!cntr)
+ return NULL;
+
+ /* buffer for current values from the device */
+ cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cntr->vals)
+ goto err_vals;
+
+ err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
+ cntr,
+ XA_LIMIT(0, IONIC_MAX_QPID),
+ GFP_KERNEL);
+ if (err)
+ goto err_xa;
+
+ INIT_LIST_HEAD(&cntr->qp_list);
+
+ return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
+ dev->counter_stats->queue_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+err_xa:
+ kfree(cntr->vals);
+err_vals:
+ kfree(cntr);
+
+ return NULL;
+}
+
+static int ionic_counter_dealloc(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+
+ cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ kfree(cntr->vals);
+ kfree(cntr);
+
+ return 0;
+}
+
+static int ionic_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *ibqp,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_counter *cntr;
+
+ cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
+ ibqp->counter = counter;
+
+ return 0;
+}
+
+static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
+{
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+
+ if (ibqp->counter) {
+ list_del(&qp->qp_list_counter);
+ ibqp->counter = NULL;
+ }
+
+ return 0;
+}
+
+static int ionic_get_qp_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 counter_id)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct ionic_counter_stats *cs;
+ struct ionic_counter *cntr;
+ dma_addr_t hw_stats_dma;
+ struct ionic_qp *qp;
+ int rc, stat_i = 0;
+
+ cs = dev->counter_stats;
+ cntr = xa_load(&cs->xa_counters, counter_id);
+ if (!cntr)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ return rc;
+
+ memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
+
+ list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ qp->qpid,
+ IONIC_V1_ADMIN_QP_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
+ hw_stats->value[stat_i] +=
+ ionic_v1_stat_val(&cs->hdr[stat_i],
+ cntr->vals,
+ PAGE_SIZE);
+ }
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ return stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ return rc;
+}
+
+static int ionic_counter_update_stats(struct rdma_counter *counter)
+{
+ return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
+}
+
+static int ionic_alloc_counters(struct ionic_ibdev *dev)
+{
+ struct ionic_counter_stats *cs = dev->counter_stats;
+ int rc, hw_stats_count;
+ dma_addr_t hdr_dma;
+
+ /* buffer for names, sizes, offsets of values */
+ cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cs->hdr)
+ return -ENOMEM;
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_QP_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
+ PAGE_SIZE / sizeof(*cs->hdr));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ cs->queue_stats_count = hw_stats_count;
+
+ /* alloc and init array of names */
+ cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs),
+ GFP_KERNEL);
+ if (!cs->stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(cs->hdr);
+
+ return rc;
+}
+
+static const struct ib_device_ops ionic_hw_stats_ops = {
+ .driver_id = RDMA_DRIVER_IONIC,
+ .alloc_hw_port_stats = ionic_alloc_hw_stats,
+ .get_hw_stats = ionic_get_hw_stats,
+};
+
+static const struct ib_device_ops ionic_counter_stats_ops = {
+ .counter_alloc_stats = ionic_counter_alloc_stats,
+ .counter_dealloc = ionic_counter_dealloc,
+ .counter_bind_qp = ionic_counter_bind_qp,
+ .counter_unbind_qp = ionic_counter_unbind_qp,
+ .counter_update_stats = ionic_counter_update_stats,
+};
+
+void ionic_stats_init(struct ionic_ibdev *dev)
+{
+ u16 stats_type = dev->lif_cfg.stats_type;
+ int rc;
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
+ rc = ionic_init_hw_stats(dev);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
+ else
+ ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
+ }
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
+ dev->counter_stats = kzalloc(sizeof(*dev->counter_stats),
+ GFP_KERNEL);
+ if (!dev->counter_stats)
+ return;
+
+ rc = ionic_alloc_counters(dev);
+ if (rc) {
+ ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ return;
+ }
+
+ xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
+
+ ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
+ }
+}
+
+void ionic_stats_cleanup(struct ionic_ibdev *dev)
+{
+ if (dev->counter_stats) {
+ xa_destroy(&dev->counter_stats->xa_counters);
+ kfree(dev->counter_stats->hdr);
+ kfree(dev->counter_stats->stats_hdrs);
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ }
+
+ kfree(dev->hw_stats);
+ kfree(dev->hw_stats_buf);
+ kfree(dev->hw_stats_hdrs);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.c b/drivers/infiniband/hw/ionic/ionic_ibdev.c
new file mode 100644
index 000000000000..164046d00e5d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+
+#include "ionic_ibdev.h"
+
+#define DRIVER_DESCRIPTION "AMD Pensando RoCE HCA driver"
+#define DEVICE_DESCRIPTION "AMD Pensando RoCE HCA"
+
+MODULE_AUTHOR("Allen Hubbe <allen.hubbe@amd.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("NET_IONIC");
+
+static int ionic_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct net_device *ndev;
+
+ ndev = ib_device_get_netdev(ibdev, 1);
+ addrconf_ifid_eui48((u8 *)&attr->sys_image_guid, ndev);
+ dev_put(ndev);
+ attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2;
+ attr->page_size_cap = dev->lif_cfg.page_size_supported;
+
+ attr->vendor_id = to_pci_dev(dev->lif_cfg.hwdev)->vendor;
+ attr->vendor_part_id = to_pci_dev(dev->lif_cfg.hwdev)->device;
+
+ attr->hw_ver = ionic_lif_asic_rev(dev->lif_cfg.lif);
+ attr->fw_ver = 0;
+ attr->max_qp = dev->lif_cfg.qp_count;
+ attr->max_qp_wr = IONIC_MAX_DEPTH;
+ attr->device_cap_flags =
+ IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B |
+ 0;
+ attr->max_send_sge =
+ min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_recv_sge =
+ min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_sge_rd = attr->max_send_sge;
+ attr->max_cq = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ attr->max_cqe = IONIC_MAX_CQ_DEPTH - IONIC_CQ_GRACE;
+ attr->max_mr = dev->lif_cfg.nmrs_per_lif;
+ attr->max_pd = IONIC_MAX_PD;
+ attr->max_qp_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_rd_atom = 0;
+ attr->max_res_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_qp_init_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_init_rd_atom = 0;
+ attr->atomic_cap = IB_ATOMIC_GLOB;
+ attr->masked_atomic_cap = IB_ATOMIC_GLOB;
+ attr->max_mw = dev->lif_cfg.nmrs_per_lif;
+ attr->max_mcast_grp = 0;
+ attr->max_mcast_qp_attach = 0;
+ attr->max_ah = dev->lif_cfg.nahs_per_lif;
+ attr->max_fast_reg_page_list_len = dev->lif_cfg.npts_per_lif / 2;
+ attr->max_pkeys = IONIC_PKEY_TBL_LEN;
+
+ return 0;
+}
+
+static int ionic_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct net_device *ndev;
+
+ if (port != 1)
+ return -EINVAL;
+
+ ndev = ib_device_get_netdev(ibdev, port);
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev)) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else if (netif_running(ndev)) {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ attr->max_mtu = iboe_get_mtu(ndev->max_mtu);
+ attr->active_mtu = min(attr->max_mtu, iboe_get_mtu(ndev->mtu));
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->ip_gids = true;
+ attr->port_cap_flags = 0;
+ attr->max_msg_sz = 0x80000000;
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->max_vl_num = 1;
+ attr->subnet_prefix = 0xfe80000000000000ull;
+
+ dev_put(ndev);
+
+ return ib_get_eth_speed(ibdev, port,
+ &attr->active_speed,
+ &attr->active_width);
+}
+
+static enum rdma_link_layer ionic_get_link_layer(struct ib_device *ibdev,
+ u32 port)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int ionic_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ if (index != 0)
+ return -EINVAL;
+
+ *pkey = IB_DEFAULT_PKEY_FULL;
+
+ return 0;
+}
+
+static int ionic_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC)
+ memcpy(dev->ibdev.node_desc, attr->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
+
+ return 0;
+}
+
+static int ionic_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *attr)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ attr->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void ionic_get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ ionic_lif_fw_version(dev->lif_cfg.lif, str, IB_FW_VERSION_NAME_MAX);
+}
+
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", ionic_lif_asic_rev(dev->lif_cfg.lif));
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "%s\n", dev->ibdev.node_desc);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ionic_rdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ionic_rdma_attr_group = {
+ .attrs = ionic_rdma_attributes,
+};
+
+static void ionic_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ /*
+ * Dummy define disassociate_ucontext so that it does not
+ * wait for user context before cleaning up hw resources.
+ */
+}
+
+static const struct ib_device_ops ionic_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_IONIC,
+ .uverbs_abi_ver = IONIC_ABI_VERSION,
+
+ .alloc_ucontext = ionic_alloc_ucontext,
+ .dealloc_ucontext = ionic_dealloc_ucontext,
+ .mmap = ionic_mmap,
+ .mmap_free = ionic_mmap_free,
+ .alloc_pd = ionic_alloc_pd,
+ .dealloc_pd = ionic_dealloc_pd,
+ .create_ah = ionic_create_ah,
+ .query_ah = ionic_query_ah,
+ .destroy_ah = ionic_destroy_ah,
+ .create_user_ah = ionic_create_ah,
+ .get_dma_mr = ionic_get_dma_mr,
+ .reg_user_mr = ionic_reg_user_mr,
+ .reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf,
+ .dereg_mr = ionic_dereg_mr,
+ .alloc_mr = ionic_alloc_mr,
+ .map_mr_sg = ionic_map_mr_sg,
+ .alloc_mw = ionic_alloc_mw,
+ .dealloc_mw = ionic_dealloc_mw,
+ .create_cq = ionic_create_cq,
+ .destroy_cq = ionic_destroy_cq,
+ .create_qp = ionic_create_qp,
+ .modify_qp = ionic_modify_qp,
+ .query_qp = ionic_query_qp,
+ .destroy_qp = ionic_destroy_qp,
+
+ .post_send = ionic_post_send,
+ .post_recv = ionic_post_recv,
+ .poll_cq = ionic_poll_cq,
+ .req_notify_cq = ionic_req_notify_cq,
+
+ .query_device = ionic_query_device,
+ .query_port = ionic_query_port,
+ .get_link_layer = ionic_get_link_layer,
+ .query_pkey = ionic_query_pkey,
+ .modify_device = ionic_modify_device,
+ .get_port_immutable = ionic_get_port_immutable,
+ .get_dev_fw_str = ionic_get_dev_fw_str,
+ .device_group = &ionic_rdma_attr_group,
+ .disassociate_ucontext = ionic_disassociate_ucontext,
+
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
+ INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw),
+};
+
+static void ionic_init_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
+ dev->half_cqid_udma_shift =
+ order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD);
+ ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif);
+ ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif);
+ /* skip reserved lkey */
+ dev->next_mrkey = 1;
+ ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count);
+ /* skip reserved SMI and GSI qpids */
+ dev->half_qpid_udma_shift =
+ order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count);
+}
+
+static void ionic_destroy_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_destroy(&dev->inuse_cqid);
+ ionic_resid_destroy(&dev->inuse_pdid);
+ ionic_resid_destroy(&dev->inuse_ahid);
+ ionic_resid_destroy(&dev->inuse_mrid);
+ ionic_resid_destroy(&dev->inuse_qpid);
+ ionic_resid_destroy(&dev->inuse_dbid);
+}
+
+static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
+{
+ ionic_kill_rdma_admin(dev, false);
+ ib_unregister_device(&dev->ibdev);
+ ionic_stats_cleanup(dev);
+ ionic_destroy_rdma_admin(dev);
+ ionic_destroy_resids(dev);
+ WARN_ON(!xa_empty(&dev->qp_tbl));
+ xa_destroy(&dev->qp_tbl);
+ WARN_ON(!xa_empty(&dev->cq_tbl));
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
+{
+ struct ib_device *ibdev;
+ struct ionic_ibdev *dev;
+ struct net_device *ndev;
+ int rc;
+
+ dev = ib_alloc_device(ionic_ibdev, ibdev);
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
+
+ xa_init_flags(&dev->qp_tbl, GFP_ATOMIC);
+ xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
+
+ ionic_init_resids(dev);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (rc)
+ goto err_reset;
+
+ rc = ionic_create_rdma_admin(dev);
+ if (rc)
+ goto err_admin;
+
+ ibdev = &dev->ibdev;
+ ibdev->dev.parent = dev->lif_cfg.hwdev;
+
+ strscpy(ibdev->name, "ionic_%d", IB_DEVICE_NAME_MAX);
+ strscpy(ibdev->node_desc, DEVICE_DESCRIPTION, IB_DEVICE_NODE_DESC_MAX);
+
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = 1;
+
+ /* the first two eq are reserved for async events */
+ ibdev->num_comp_vectors = dev->lif_cfg.eq_count - 2;
+
+ ndev = ionic_lif_netdev(ionic_adev->lif);
+ addrconf_ifid_eui48((u8 *)&ibdev->node_guid, ndev);
+ rc = ib_device_set_netdev(ibdev, ndev, 1);
+ /* ionic_lif_netdev() returns ndev with refcount held */
+ dev_put(ndev);
+ if (rc)
+ goto err_admin;
+
+ ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
+
+ ionic_stats_init(dev);
+
+ rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
+ if (rc)
+ goto err_register;
+
+ return dev;
+
+err_register:
+ ionic_stats_cleanup(dev);
+err_admin:
+ ionic_kill_rdma_admin(dev, false);
+ ionic_destroy_rdma_admin(dev);
+err_reset:
+ ionic_destroy_resids(dev);
+ xa_destroy(&dev->qp_tbl);
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+
+ return ERR_PTR(rc);
+}
+
+static int ionic_aux_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct ionic_ibdev *dev;
+
+ ionic_adev = container_of(adev, struct ionic_aux_dev, adev);
+ dev = ionic_create_ibdev(ionic_adev);
+ if (IS_ERR(dev))
+ return dev_err_probe(&adev->dev, PTR_ERR(dev),
+ "Failed to register ibdev\n");
+
+ auxiliary_set_drvdata(adev, dev);
+ ibdev_dbg(&dev->ibdev, "registered\n");
+
+ return 0;
+}
+
+static void ionic_aux_remove(struct auxiliary_device *adev)
+{
+ struct ionic_ibdev *dev = auxiliary_get_drvdata(adev);
+
+ dev_dbg(&adev->dev, "unregister ibdev\n");
+ ionic_destroy_ibdev(dev);
+ dev_dbg(&adev->dev, "unregistered\n");
+}
+
+static const struct auxiliary_device_id ionic_aux_id_table[] = {
+ { .name = "ionic.rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ionic_aux_id_table);
+
+static struct auxiliary_driver ionic_aux_r_driver = {
+ .name = "rdma",
+ .probe = ionic_aux_probe,
+ .remove = ionic_aux_remove,
+ .id_table = ionic_aux_id_table,
+};
+
+static int __init ionic_mod_init(void)
+{
+ int rc;
+
+ ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt");
+ if (!ionic_evt_workq)
+ return -ENOMEM;
+
+ rc = auxiliary_driver_register(&ionic_aux_r_driver);
+ if (rc)
+ goto err_aux;
+
+ return 0;
+
+err_aux:
+ destroy_workqueue(ionic_evt_workq);
+
+ return rc;
+}
+
+static void __exit ionic_mod_exit(void)
+{
+ auxiliary_driver_unregister(&ionic_aux_r_driver);
+ destroy_workqueue(ionic_evt_workq);
+}
+
+module_init(ionic_mod_init);
+module_exit(ionic_mod_exit);
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.h b/drivers/infiniband/hw/ionic/ionic_ibdev.h
new file mode 100644
index 000000000000..82fda1e3cdb6
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.h
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_IBDEV_H_
+#define _IONIC_IBDEV_H_
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include <rdma/ionic-abi.h>
+#include <ionic_api.h>
+#include <ionic_regs.h>
+
+#include "ionic_fw.h"
+#include "ionic_queue.h"
+#include "ionic_res.h"
+
+#include "ionic_lif_cfg.h"
+
+/* Config knobs */
+#define IONIC_EQ_DEPTH 511
+#define IONIC_EQ_COUNT 32
+#define IONIC_AQ_DEPTH 63
+#define IONIC_AQ_COUNT 4
+#define IONIC_EQ_ISR_BUDGET 10
+#define IONIC_EQ_WORK_BUDGET 1000
+#define IONIC_MAX_RD_ATOM 16
+#define IONIC_PKEY_TBL_LEN 1
+#define IONIC_GID_TBL_LEN 256
+
+#define IONIC_MAX_QPID 0xffffff
+#define IONIC_SPEC_HIGH 8
+#define IONIC_MAX_PD 1024
+#define IONIC_SPEC_HIGH 8
+#define IONIC_SQCMB_ORDER 5
+#define IONIC_RQCMB_ORDER 0
+
+#define IONIC_META_LAST ((void *)1ul)
+#define IONIC_META_POSTED ((void *)2ul)
+
+#define IONIC_CQ_GRACE 100
+
+#define IONIC_ROCE_UDP_SPORT 28272
+#define IONIC_DMA_LKEY 0
+#define IONIC_DMA_RKEY IONIC_DMA_LKEY
+
+#define IONIC_CMB_SUPPORTED \
+ (IONIC_CMB_ENABLE | IONIC_CMB_REQUIRE | IONIC_CMB_EXPDB | \
+ IONIC_CMB_WC | IONIC_CMB_UC)
+
+/* resource is not reserved on the device, indicated in tbl_order */
+#define IONIC_RES_INVALID -1
+
+struct ionic_aq;
+struct ionic_cq;
+struct ionic_eq;
+struct ionic_vcq;
+
+enum ionic_admin_state {
+ IONIC_ADMIN_ACTIVE, /* submitting admin commands to queue */
+ IONIC_ADMIN_PAUSED, /* not submitting, but may complete normally */
+ IONIC_ADMIN_KILLED, /* not submitting, locally completed */
+};
+
+enum ionic_admin_flags {
+ IONIC_ADMIN_F_BUSYWAIT = BIT(0), /* Don't sleep */
+ IONIC_ADMIN_F_TEARDOWN = BIT(1), /* In destroy path */
+ IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */
+};
+
+enum ionic_mmap_flag {
+ IONIC_MMAP_WC = BIT(0),
+};
+
+struct ionic_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ unsigned long size;
+ unsigned long pfn;
+ u8 mmap_flags;
+};
+
+struct ionic_ibdev {
+ struct ib_device ibdev;
+
+ struct ionic_lif_cfg lif_cfg;
+
+ struct xarray qp_tbl;
+ struct xarray cq_tbl;
+
+ struct ionic_resid_bits inuse_dbid;
+ struct ionic_resid_bits inuse_pdid;
+ struct ionic_resid_bits inuse_ahid;
+ struct ionic_resid_bits inuse_mrid;
+ struct ionic_resid_bits inuse_qpid;
+ struct ionic_resid_bits inuse_cqid;
+
+ u8 half_cqid_udma_shift;
+ u8 half_qpid_udma_shift;
+ u8 next_qpid_udma_idx;
+ u8 next_mrkey;
+
+ struct work_struct reset_work;
+ bool reset_posted;
+ u32 reset_cnt;
+
+ struct delayed_work admin_dwork;
+ struct ionic_aq **aq_vec;
+ atomic_t admin_state;
+
+ struct ionic_eq **eq_vec;
+
+ struct ionic_v1_stat *hw_stats;
+ void *hw_stats_buf;
+ struct rdma_stat_desc *hw_stats_hdrs;
+ struct ionic_counter_stats *counter_stats;
+ int hw_stats_count;
+};
+
+struct ionic_eq {
+ struct ionic_ibdev *dev;
+
+ u32 eqid;
+ u32 intr;
+
+ struct ionic_queue q;
+
+ int armed;
+ bool enable;
+
+ struct work_struct work;
+
+ int irq;
+ char name[32];
+};
+
+struct ionic_admin_wr {
+ struct completion work;
+ struct list_head aq_ent;
+ struct ionic_v1_admin_wqe wqe;
+ struct ionic_v1_cqe cqe;
+ struct ionic_aq *aq;
+ int status;
+};
+
+struct ionic_admin_wr_q {
+ struct ionic_admin_wr *wr;
+ int wqe_strides;
+};
+
+struct ionic_aq {
+ struct ionic_ibdev *dev;
+ struct ionic_vcq *vcq;
+
+ struct work_struct work;
+
+ atomic_t admin_state;
+ unsigned long stamp;
+ bool armed;
+
+ u32 aqid;
+ u32 cqid;
+
+ spinlock_t lock; /* for posting */
+ struct ionic_queue q;
+ struct ionic_admin_wr_q *q_wr;
+ struct list_head wr_prod;
+ struct list_head wr_post;
+};
+
+struct ionic_ctx {
+ struct ib_ucontext ibctx;
+ u32 dbid;
+ struct rdma_user_mmap_entry *mmap_dbell;
+};
+
+struct ionic_tbl_buf {
+ u32 tbl_limit;
+ u32 tbl_pages;
+ size_t tbl_size;
+ __le64 *tbl_buf;
+ dma_addr_t tbl_dma;
+ u8 page_size_log2;
+};
+
+struct ionic_pd {
+ struct ib_pd ibpd;
+
+ u32 pdid;
+ u32 flags;
+};
+
+struct ionic_cq {
+ struct ionic_vcq *vcq;
+
+ u32 cqid;
+ u32 eqid;
+
+ spinlock_t lock; /* for polling */
+ struct list_head poll_sq;
+ bool flush;
+ struct list_head flush_sq;
+ struct list_head flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ struct ionic_queue q;
+ bool color;
+ int credit;
+ u16 arm_any_prod;
+ u16 arm_sol_prod;
+
+ struct kref cq_kref;
+ struct completion cq_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ struct ib_umem *umem;
+};
+
+struct ionic_vcq {
+ struct ib_cq ibcq;
+ struct ionic_cq cq[2];
+ u8 udma_mask;
+ u8 poll_idx;
+};
+
+struct ionic_sq_meta {
+ u64 wrid;
+ u32 len;
+ u16 seq;
+ u8 ibop;
+ u8 ibsts;
+ u8 remote:1;
+ u8 signal:1;
+ u8 local_comp:1;
+};
+
+struct ionic_rq_meta {
+ struct ionic_rq_meta *next;
+ u64 wrid;
+};
+
+struct ionic_qp {
+ struct ib_qp ibqp;
+ enum ib_qp_state state;
+
+ u32 qpid;
+ u32 ahid;
+ u32 sq_cqid;
+ u32 rq_cqid;
+ u8 udma_idx;
+ u8 has_ah:1;
+ u8 has_sq:1;
+ u8 has_rq:1;
+ u8 sig_all:1;
+
+ struct list_head qp_list_counter;
+
+ struct list_head cq_poll_sq;
+ struct list_head cq_flush_sq;
+ struct list_head cq_flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ spinlock_t sq_lock; /* for posting and polling */
+ struct ionic_queue sq;
+ struct ionic_sq_meta *sq_meta;
+ u16 *sq_msn_idx;
+ int sq_spec;
+ u16 sq_old_prod;
+ u16 sq_msn_prod;
+ u16 sq_msn_cons;
+ u8 sq_cmb;
+ bool sq_flush;
+ bool sq_flush_rcvd;
+
+ spinlock_t rq_lock; /* for posting and polling */
+ struct ionic_queue rq;
+ struct ionic_rq_meta *rq_meta;
+ struct ionic_rq_meta *rq_meta_head;
+ int rq_spec;
+ u16 rq_old_prod;
+ u8 rq_cmb;
+ bool rq_flush;
+
+ struct kref qp_kref;
+ struct completion qp_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ int sgid_index;
+ int sq_cmb_order;
+ u32 sq_cmb_pgid;
+ phys_addr_t sq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_sq_cmb;
+
+ struct ib_umem *sq_umem;
+
+ int rq_cmb_order;
+ u32 rq_cmb_pgid;
+ phys_addr_t rq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_rq_cmb;
+
+ struct ib_umem *rq_umem;
+
+ int dcqcn_profile;
+
+ struct ib_ud_header *hdr;
+};
+
+struct ionic_ah {
+ struct ib_ah ibah;
+ u32 ahid;
+ int sgid_index;
+ struct ib_ud_header hdr;
+};
+
+struct ionic_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+
+ u32 mrid;
+ int flags;
+
+ struct ib_umem *umem;
+ struct ionic_tbl_buf buf;
+ bool created;
+};
+
+struct ionic_counter_stats {
+ int queue_stats_count;
+ struct ionic_v1_stat *hdr;
+ struct rdma_stat_desc *stats_hdrs;
+ struct xarray xa_counters;
+};
+
+struct ionic_counter {
+ void *vals;
+ struct list_head qp_list;
+};
+
+static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct ionic_ibdev, ibdev);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct ionic_ctx, ibctx);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx_uobj(struct ib_uobject *uobj)
+{
+ if (!uobj)
+ return NULL;
+
+ if (!uobj->context)
+ return NULL;
+
+ return to_ionic_ctx(uobj->context);
+}
+
+static inline struct ionic_pd *to_ionic_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct ionic_pd, ibpd);
+}
+
+static inline struct ionic_mr *to_ionic_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct ionic_mr, ibmr);
+}
+
+static inline struct ionic_mr *to_ionic_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct ionic_mr, ibmw);
+}
+
+static inline struct ionic_vcq *to_ionic_vcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct ionic_vcq, ibcq);
+}
+
+static inline struct ionic_cq *to_ionic_vcq_cq(struct ib_cq *ibcq,
+ uint8_t udma_idx)
+{
+ return &to_ionic_vcq(ibcq)->cq[udma_idx];
+}
+
+static inline struct ionic_qp *to_ionic_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct ionic_qp, ibqp);
+}
+
+static inline struct ionic_ah *to_ionic_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct ionic_ah, ibah);
+}
+
+static inline u32 ionic_ctx_dbid(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx)
+{
+ if (!ctx)
+ return dev->lif_cfg.dbid;
+
+ return ctx->dbid;
+}
+
+static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev,
+ struct ib_uobject *uobj)
+{
+ return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj));
+}
+
+static inline bool ionic_ibop_is_local(enum ib_wr_opcode op)
+{
+ return op == IB_WR_LOCAL_INV || op == IB_WR_REG_MR;
+}
+
+static inline void ionic_qp_complete(struct kref *kref)
+{
+ struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref);
+
+ complete(&qp->qp_rel_comp);
+}
+
+static inline void ionic_cq_complete(struct kref *kref)
+{
+ struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref);
+
+ complete(&cq->cq_rel_comp);
+}
+
+/* ionic_admin.c */
+extern struct workqueue_struct *ionic_evt_workq;
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr);
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags);
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev);
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev);
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev);
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path);
+
+/* ionic_controlpath.c */
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx);
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq);
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp);
+void ionic_notify_flush_cq(struct ionic_cq *cq);
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata);
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx);
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma);
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags);
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access);
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg);
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
+int ionic_dealloc_mw(struct ib_mw *ibmw);
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata);
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata);
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+
+/* ionic_datapath.c */
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad);
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad);
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc);
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+
+/* ionic_hw_stats.c */
+void ionic_stats_init(struct ionic_ibdev *dev);
+void ionic_stats_cleanup(struct ionic_ibdev *dev);
+
+/* ionic_pgtbl.c */
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va);
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size);
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf);
+#endif /* _IONIC_IBDEV_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.c b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
new file mode 100644
index 000000000000..f3cd281c3a2f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+
+#include <ionic.h>
+#include <ionic_lif.h>
+
+#include "ionic_lif_cfg.h"
+
+#define IONIC_MIN_RDMA_VERSION 0
+#define IONIC_MAX_RDMA_VERSION 2
+
+static u8 ionic_get_expdb(struct ionic_lif *lif)
+{
+ u8 expdb_support = 0;
+
+ if (lif->ionic->idev.phy_cmb_expdb64_pages)
+ expdb_support |= IONIC_EXPDB_64B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb128_pages)
+ expdb_support |= IONIC_EXPDB_128B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb256_pages)
+ expdb_support |= IONIC_EXPDB_256B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb512_pages)
+ expdb_support |= IONIC_EXPDB_512B_WQE;
+
+ return expdb_support;
+}
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg)
+{
+ union ionic_lif_identity *ident = &lif->ionic->ident.lif;
+
+ cfg->lif = lif;
+ cfg->hwdev = &lif->ionic->pdev->dev;
+ cfg->lif_index = lif->index;
+ cfg->lif_hw_index = lif->hw_index;
+
+ cfg->dbid = lif->kern_pid;
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+ cfg->dbpage = lif->kern_dbpage;
+ cfg->intr_ctrl = lif->ionic->idev.intr_ctrl;
+
+ cfg->db_phys = lif->ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr;
+
+ if (IONIC_VERSION(ident->rdma.version, ident->rdma.minor_version) >=
+ IONIC_VERSION(2, 1))
+ cfg->page_size_supported =
+ le64_to_cpu(ident->rdma.page_size_cap);
+ else
+ cfg->page_size_supported = IONIC_PAGE_SIZE_SUPPORTED;
+
+ cfg->rdma_version = ident->rdma.version;
+ cfg->qp_opcodes = ident->rdma.qp_opcodes;
+ cfg->admin_opcodes = ident->rdma.admin_opcodes;
+
+ cfg->stats_type = le16_to_cpu(ident->rdma.stats_type);
+ cfg->npts_per_lif = le32_to_cpu(ident->rdma.npts_per_lif);
+ cfg->nmrs_per_lif = le32_to_cpu(ident->rdma.nmrs_per_lif);
+ cfg->nahs_per_lif = le32_to_cpu(ident->rdma.nahs_per_lif);
+
+ cfg->aq_base = le32_to_cpu(ident->rdma.aq_qtype.qid_base);
+ cfg->cq_base = le32_to_cpu(ident->rdma.cq_qtype.qid_base);
+ cfg->eq_base = le32_to_cpu(ident->rdma.eq_qtype.qid_base);
+
+ /*
+ * ionic_create_rdma_admin() may reduce aq_count or eq_count if
+ * it is unable to allocate all that were requested.
+ * aq_count is tunable; see ionic_aq_count
+ * eq_count is tunable; see ionic_eq_count
+ */
+ cfg->aq_count = le32_to_cpu(ident->rdma.aq_qtype.qid_count);
+ cfg->eq_count = le32_to_cpu(ident->rdma.eq_qtype.qid_count);
+ cfg->cq_count = le32_to_cpu(ident->rdma.cq_qtype.qid_count);
+ cfg->qp_count = le32_to_cpu(ident->rdma.sq_qtype.qid_count);
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+
+ cfg->aq_qtype = ident->rdma.aq_qtype.qtype;
+ cfg->sq_qtype = ident->rdma.sq_qtype.qtype;
+ cfg->rq_qtype = ident->rdma.rq_qtype.qtype;
+ cfg->cq_qtype = ident->rdma.cq_qtype.qtype;
+ cfg->eq_qtype = ident->rdma.eq_qtype.qtype;
+ cfg->udma_qgrp_shift = ident->rdma.udma_shift;
+ cfg->udma_count = 2;
+
+ cfg->max_stride = ident->rdma.max_stride;
+ cfg->expdb_mask = ionic_get_expdb(lif);
+
+ cfg->sq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EXPDB);
+ cfg->rq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EXPDB);
+}
+
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif)
+{
+ struct net_device *netdev = lif->netdev;
+
+ dev_hold(netdev);
+ return netdev;
+}
+
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len)
+{
+ strscpy(str, lif->ionic->idev.dev_info.fw_version, len);
+}
+
+u8 ionic_lif_asic_rev(struct ionic_lif *lif)
+{
+ return lif->ionic->idev.dev_info.asic_rev;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.h b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
new file mode 100644
index 000000000000..20853429f623
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_LIF_CFG_H_
+
+#define IONIC_VERSION(a, b) (((a) << 16) + ((b) << 8))
+#define IONIC_PAGE_SIZE_SUPPORTED 0x40201000 /* 4kb, 2Mb, 1Gb */
+
+#define IONIC_EXPDB_64B_WQE BIT(0)
+#define IONIC_EXPDB_128B_WQE BIT(1)
+#define IONIC_EXPDB_256B_WQE BIT(2)
+#define IONIC_EXPDB_512B_WQE BIT(3)
+
+struct ionic_lif_cfg {
+ struct device *hwdev;
+ struct ionic_lif *lif;
+
+ int lif_index;
+ int lif_hw_index;
+
+ u32 dbid;
+ int dbid_count;
+ u64 __iomem *dbpage;
+ struct ionic_intr __iomem *intr_ctrl;
+ phys_addr_t db_phys;
+
+ u64 page_size_supported;
+ u32 npts_per_lif;
+ u32 nmrs_per_lif;
+ u32 nahs_per_lif;
+
+ u32 aq_base;
+ u32 cq_base;
+ u32 eq_base;
+
+ int aq_count;
+ int eq_count;
+ int cq_count;
+ int qp_count;
+
+ u16 stats_type;
+ u8 aq_qtype;
+ u8 sq_qtype;
+ u8 rq_qtype;
+ u8 cq_qtype;
+ u8 eq_qtype;
+
+ u8 udma_count;
+ u8 udma_qgrp_shift;
+
+ u8 rdma_version;
+ u8 qp_opcodes;
+ u8 admin_opcodes;
+
+ u8 max_stride;
+ bool sq_expdb;
+ bool rq_expdb;
+ u8 expdb_mask;
+};
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg);
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif);
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len);
+u8 ionic_lif_asic_rev(struct ionic_lif *lif);
+
+#endif /* _IONIC_LIF_CFG_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_pgtbl.c b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
new file mode 100644
index 000000000000..e74db73c9246
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/mman.h>
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va)
+{
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+ u64 dma;
+
+ if (!buf->tbl_pages)
+ return cpu_to_le64(0);
+
+ if (buf->tbl_pages > 1)
+ return cpu_to_le64(buf->tbl_dma);
+
+ if (buf->tbl_buf)
+ dma = le64_to_cpu(buf->tbl_buf[0]);
+ else
+ dma = buf->tbl_dma;
+
+ return cpu_to_le64(dma + (va & pg_mask));
+}
+
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va)
+{
+ if (buf->tbl_pages > 1) {
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+
+ return cpu_to_be64(va & pg_mask);
+ }
+
+ return 0;
+}
+
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
+{
+ if (unlikely(buf->tbl_pages == buf->tbl_limit))
+ return -ENOMEM;
+
+ if (buf->tbl_buf)
+ buf->tbl_buf[buf->tbl_pages] = cpu_to_le64(dma);
+ else
+ buf->tbl_dma = dma;
+
+ ++buf->tbl_pages;
+
+ return 0;
+}
+
+static int ionic_tbl_buf_alloc(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf)
+{
+ int rc;
+
+ buf->tbl_size = buf->tbl_limit * sizeof(*buf->tbl_buf);
+ buf->tbl_buf = kmalloc(buf->tbl_size, GFP_KERNEL);
+ if (!buf->tbl_buf)
+ return -ENOMEM;
+
+ buf->tbl_dma = dma_map_single(dev->lif_cfg.hwdev, buf->tbl_buf,
+ buf->tbl_size, DMA_TO_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, buf->tbl_dma);
+ if (rc) {
+ kfree(buf->tbl_buf);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ionic_pgtbl_umem(struct ionic_tbl_buf *buf, struct ib_umem *umem)
+{
+ struct ib_block_iter biter;
+ u64 page_dma;
+ int rc;
+
+ rdma_umem_for_each_dma_block(umem, &biter, BIT_ULL(buf->page_size_log2)) {
+ page_dma = rdma_block_iter_dma_address(&biter);
+ rc = ionic_pgtbl_page(buf, page_dma);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf)
+{
+ if (buf->tbl_buf)
+ dma_unmap_single(dev->lif_cfg.hwdev, buf->tbl_dma,
+ buf->tbl_size, DMA_TO_DEVICE);
+
+ kfree(buf->tbl_buf);
+ memset(buf, 0, sizeof(*buf));
+}
+
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size)
+{
+ int rc;
+
+ memset(buf, 0, sizeof(*buf));
+
+ if (umem) {
+ limit = ib_umem_num_dma_blocks(umem, page_size);
+ buf->page_size_log2 = order_base_2(page_size);
+ }
+
+ if (limit < 1)
+ return -EINVAL;
+
+ buf->tbl_limit = limit;
+
+ /* skip pgtbl if contiguous / direct translation */
+ if (limit > 1) {
+ rc = ionic_tbl_buf_alloc(dev, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (umem)
+ rc = ionic_pgtbl_umem(buf, umem);
+ else
+ rc = ionic_pgtbl_page(buf, dma);
+
+ if (rc)
+ goto err_unbuf;
+
+ return 0;
+
+err_unbuf:
+ ionic_pgtbl_unbuf(dev, buf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.c b/drivers/infiniband/hw/ionic/ionic_queue.c
new file mode 100644
index 000000000000..aa897ed2a412
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_queue.h"
+
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride)
+{
+ if (depth < 0 || depth > 0xffff)
+ return -EINVAL;
+
+ if (stride == 0 || stride > 0x10000)
+ return -EINVAL;
+
+ if (depth == 0)
+ depth = 1;
+
+ q->depth_log2 = order_base_2(depth + 1);
+ q->stride_log2 = order_base_2(stride);
+
+ if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT)
+ q->depth_log2 = PAGE_SHIFT - q->stride_log2;
+
+ if (q->depth_log2 > 16 || q->stride_log2 > 16)
+ return -EINVAL;
+
+ q->size = BIT_ULL(q->depth_log2 + q->stride_log2);
+ q->mask = BIT(q->depth_log2) - 1;
+
+ q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL);
+ if (!q->ptr)
+ return -ENOMEM;
+
+ /* it will always be page aligned, but just to be sure... */
+ if (!PAGE_ALIGNED(q->ptr)) {
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+ return -ENOMEM;
+ }
+
+ q->prod = 0;
+ q->cons = 0;
+ q->dbell = 0;
+
+ return 0;
+}
+
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev)
+{
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.h b/drivers/infiniband/hw/ionic/ionic_queue.h
new file mode 100644
index 000000000000..d18020d4cad5
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_QUEUE_H_
+#define _IONIC_QUEUE_H_
+
+#include <linux/io.h>
+#include <ionic_regs.h>
+
+#define IONIC_MAX_DEPTH 0xffff
+#define IONIC_MAX_CQ_DEPTH 0xffff
+#define IONIC_CQ_RING_ARM IONIC_DBELL_RING_1
+#define IONIC_CQ_RING_SOL IONIC_DBELL_RING_2
+
+/**
+ * struct ionic_queue - Ring buffer used between device and driver
+ * @size: Size of the buffer, in bytes
+ * @dma: Dma address of the buffer
+ * @ptr: Buffer virtual address
+ * @prod: Driver position in the queue
+ * @cons: Device position in the queue
+ * @mask: Capacity of the queue, subtracting the hole
+ * This value is equal to ((1 << depth_log2) - 1)
+ * @depth_log2: Log base two size depth of the queue
+ * @stride_log2: Log base two size of an element in the queue
+ * @dbell: Doorbell identifying bits
+ */
+struct ionic_queue {
+ size_t size;
+ dma_addr_t dma;
+ void *ptr;
+ u16 prod;
+ u16 cons;
+ u16 mask;
+ u8 depth_log2;
+ u8 stride_log2;
+ u64 dbell;
+};
+
+/**
+ * ionic_queue_init() - Initialize user space queue
+ * @q: Uninitialized queue structure
+ * @dma_dev: DMA device for mapping
+ * @depth: Depth of the queue
+ * @stride: Size of each element of the queue
+ *
+ * Return: status code
+ */
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride);
+
+/**
+ * ionic_queue_destroy() - Destroy user space queue
+ * @q: Queue structure
+ * @dma_dev: DMA device for mapping
+ *
+ * Return: status code
+ */
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev);
+
+/**
+ * ionic_queue_empty() - Test if queue is empty
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is empty
+ */
+static inline bool ionic_queue_empty(struct ionic_queue *q)
+{
+ return q->prod == q->cons;
+}
+
+/**
+ * ionic_queue_length() - Get the current length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length
+ */
+static inline u16 ionic_queue_length(struct ionic_queue *q)
+{
+ return (q->prod - q->cons) & q->mask;
+}
+
+/**
+ * ionic_queue_length_remaining() - Get the remaining length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length remaining
+ */
+static inline u16 ionic_queue_length_remaining(struct ionic_queue *q)
+{
+ return q->mask - ionic_queue_length(q);
+}
+
+/**
+ * ionic_queue_full() - Test if queue is full
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is full
+ */
+static inline bool ionic_queue_full(struct ionic_queue *q)
+{
+ return q->mask == ionic_queue_length(q);
+}
+
+/**
+ * ionic_color_wrap() - Flip the color if prod is wrapped
+ * @prod: Queue index just after advancing
+ * @color: Queue color just prior to advancing the index
+ *
+ * Return: color after advancing the index
+ */
+static inline bool ionic_color_wrap(u16 prod, bool color)
+{
+ /* logical xor color with (prod == 0) */
+ return color != (prod == 0);
+}
+
+/**
+ * ionic_queue_at() - Get the element at the given index
+ * @q: Queue structure
+ * @idx: Index in the queue
+ *
+ * The index must be within the bounds of the queue. It is not checked here.
+ *
+ * Return: pointer to element at index
+ */
+static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx)
+{
+ return q->ptr + ((unsigned long)idx << q->stride_log2);
+}
+
+/**
+ * ionic_queue_at_prod() - Get the element at the producer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at producer index
+ */
+static inline void *ionic_queue_at_prod(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->prod);
+}
+
+/**
+ * ionic_queue_at_cons() - Get the element at the consumer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at consumer index
+ */
+static inline void *ionic_queue_at_cons(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->cons);
+}
+
+/**
+ * ionic_queue_next() - Compute the next index
+ * @q: Queue structure
+ * @idx: Index
+ *
+ * Return: next index after idx
+ */
+static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx)
+{
+ return (idx + 1) & q->mask;
+}
+
+/**
+ * ionic_queue_produce() - Increase the producer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not full. It is not checked here.
+ */
+static inline void ionic_queue_produce(struct ionic_queue *q)
+{
+ q->prod = ionic_queue_next(q, q->prod);
+}
+
+/**
+ * ionic_queue_consume() - Increase the consumer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume(struct ionic_queue *q)
+{
+ q->cons = ionic_queue_next(q, q->cons);
+}
+
+/**
+ * ionic_queue_consume_entries() - Increase the consumer index by entries
+ * @q: Queue structure
+ * @entries: Number of entries to increment
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume_entries(struct ionic_queue *q,
+ u16 entries)
+{
+ q->cons = (q->cons + entries) & q->mask;
+}
+
+/**
+ * ionic_queue_dbell_init() - Initialize doorbell bits for queue id
+ * @q: Queue structure
+ * @qid: Queue identifying number
+ */
+static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
+{
+ q->dbell = IONIC_DBELL_QID(qid);
+}
+
+/**
+ * ionic_queue_dbell_val() - Get current doorbell update value
+ * @q: Queue structure
+ *
+ * Return: current doorbell update value
+ */
+static inline u64 ionic_queue_dbell_val(struct ionic_queue *q)
+{
+ return q->dbell | q->prod;
+}
+
+#endif /* _IONIC_QUEUE_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_res.h b/drivers/infiniband/hw/ionic/ionic_res.h
new file mode 100644
index 000000000000..46c8c584bd9a
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_res.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_RES_H_
+#define _IONIC_RES_H_
+
+#include <linux/kernel.h>
+#include <linux/idr.h>
+
+/**
+ * struct ionic_resid_bits - Number allocator based on IDA
+ *
+ * @inuse: IDA handle
+ * @inuse_size: Highest ID limit for IDA
+ */
+struct ionic_resid_bits {
+ struct ida inuse;
+ unsigned int inuse_size;
+};
+
+/**
+ * ionic_resid_init() - Initialize a resid allocator
+ * @resid: Uninitialized resid allocator
+ * @size: Capacity of the allocator
+ *
+ * Return: Zero on success, or negative error number
+ */
+static inline void ionic_resid_init(struct ionic_resid_bits *resid,
+ unsigned int size)
+{
+ resid->inuse_size = size;
+ ida_init(&resid->inuse);
+}
+
+/**
+ * ionic_resid_destroy() - Destroy a resid allocator
+ * @resid: Resid allocator
+ */
+static inline void ionic_resid_destroy(struct ionic_resid_bits *resid)
+{
+ ida_destroy(&resid->inuse);
+}
+
+/**
+ * ionic_resid_get_shared() - Allocate an available shared resource id
+ * @resid: Resid allocator
+ * @min: Smallest valid resource id
+ * @size: One after largest valid resource id
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get_shared(struct ionic_resid_bits *resid,
+ unsigned int min,
+ unsigned int size)
+{
+ return ida_alloc_range(&resid->inuse, min, size - 1, GFP_KERNEL);
+}
+
+/**
+ * ionic_resid_get() - Allocate an available resource id
+ * @resid: Resid allocator
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get(struct ionic_resid_bits *resid)
+{
+ return ionic_resid_get_shared(resid, 0, resid->inuse_size);
+}
+
+/**
+ * ionic_resid_put() - Free a resource id
+ * @resid: Resid allocator
+ * @id: Resource id
+ */
+static inline void ionic_resid_put(struct ionic_resid_bits *resid, int id)
+{
+ ida_free(&resid->inuse, id);
+}
+
+/**
+ * ionic_bitid_to_qid() - Transform a resource bit index into a queue id
+ * @bitid: Bit index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Queue id
+ *
+ * Udma-constrained queues (QPs and CQs) are associated with their udma by
+ * queue group. Even queue groups are associated with udma0, and odd queue
+ * groups with udma1.
+ *
+ * For allocating queue ids, we want to arrange the bits into two halves,
+ * with the even queue groups of udma0 in the lower half of the bitset,
+ * and the odd queue groups of udma1 in the upper half of the bitset.
+ * Then, one or two calls of find_next_zero_bit can examine all the bits
+ * for queues of an entire udma.
+ *
+ * For example, assuming eight queue groups with qgrp qids per group:
+ *
+ * bitid 0*qgrp..1*qgrp-1 : qid 0*qgrp..1*qgrp-1
+ * bitid 1*qgrp..2*qgrp-1 : qid 2*qgrp..3*qgrp-1
+ * bitid 2*qgrp..3*qgrp-1 : qid 4*qgrp..5*qgrp-1
+ * bitid 3*qgrp..4*qgrp-1 : qid 6*qgrp..7*qgrp-1
+ * bitid 4*qgrp..5*qgrp-1 : qid 1*qgrp..2*qgrp-1
+ * bitid 5*qgrp..6*qgrp-1 : qid 3*qgrp..4*qgrp-1
+ * bitid 6*qgrp..7*qgrp-1 : qid 5*qgrp..6*qgrp-1
+ * bitid 7*qgrp..8*qgrp-1 : qid 7*qgrp..8*qgrp-1
+ *
+ * There are three important ranges of bits in the qid. There is the udma
+ * bit "U" at qgrp_shift, which is the least significant bit of the group
+ * index, and determines which udma a queue is associated with.
+ * The bits of lesser significance we can call the idx bits "I", which are
+ * the index of the queue within the group. The bits of greater significance
+ * we can call the grp bits "G", which are other bits of the group index that
+ * do not determine the udma. Those bits are just rearranged in the bit index
+ * in the bitset. A bitid has the udma bit in the most significant place,
+ * then the grp bits, then the idx bits.
+ *
+ * bitid: 00000000000000 U GGG IIIIII
+ * qid: 00000000000000 GGG U IIIIII
+ *
+ * Transforming from bit index to qid, or from qid to bit index, can be
+ * accomplished by rearranging the bits by masking and shifting.
+ */
+static inline u32 ionic_bitid_to_qid(u32 bitid, u8 qgrp_shift,
+ u8 half_qid_shift)
+{
+ u32 udma_bit =
+ (bitid & BIT(half_qid_shift)) >> (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (bitid & GENMASK(half_qid_shift - 1, qgrp_shift)) << 1;
+ u32 idx_bits = bitid & (BIT(qgrp_shift) - 1);
+
+ return grp_bits | udma_bit | idx_bits;
+}
+
+/**
+ * ionic_qid_to_bitid() - Transform a queue id into a resource bit index
+ * @qid: queue index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Resource bit index
+ *
+ * This is the inverse of ionic_bitid_to_qid().
+ */
+static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift)
+{
+ u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1;
+ u32 idx_bits = qid & (BIT(qgrp_shift) - 1);
+
+ return udma_bit | grp_bits | idx_bits;
+}
+#endif /* _IONIC_RES_H_ */
diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
new file mode 100644
index 000000000000..0bd7e3fca1fb
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_IRDMA
+ tristate "Intel(R) Ethernet Protocol Driver for RDMA"
+ depends on INET
+ depends on IPV6 || !IPV6
+ depends on PCI
+ depends on IDPF && ICE && I40E
+ select GENERIC_ALLOCATOR
+ select AUXILIARY_BUS
+ select CRC32
+ help
+ This is an Intel(R) Ethernet Protocol Driver for RDMA that
+ supports IPU E2000 (RoCEv2), E810 (iWARP/RoCEv2) and X722 (iWARP)
+ network devices.
diff --git a/drivers/infiniband/hw/irdma/Makefile b/drivers/infiniband/hw/irdma/Makefile
new file mode 100644
index 000000000000..03ceb9e5475f
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+# Copyright (c) 2019, Intel Corporation.
+
+#
+# Makefile for the Intel(R) Ethernet Connection RDMA Linux Driver
+#
+
+obj-$(CONFIG_INFINIBAND_IRDMA) += irdma.o
+
+irdma-objs := cm.o \
+ ctrl.o \
+ hmc.o \
+ hw.o \
+ i40iw_hw.o \
+ i40iw_if.o \
+ ig3rdma_if.o\
+ icrdma_if.o \
+ icrdma_hw.o \
+ ig3rdma_hw.o\
+ main.o \
+ pble.o \
+ puda.o \
+ trace.o \
+ uda.o \
+ uk.o \
+ utils.o \
+ verbs.o \
+ virtchnl.o \
+ ws.o \
+
+CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
new file mode 100644
index 000000000000..c6a0a661d6e7
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -0,0 +1,4434 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "trace.h"
+
+static void irdma_cm_post_event(struct irdma_cm_event *event);
+static void irdma_disconnect_worker(struct work_struct *work);
+
+/**
+ * irdma_free_sqbuf - put back puda buffer if refcount is 0
+ * @vsi: The VSI structure of the device
+ * @bufp: puda buffer to free
+ */
+void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp)
+{
+ struct irdma_puda_buf *buf = bufp;
+ struct irdma_puda_rsrc *ilq = vsi->ilq;
+
+ if (refcount_dec_and_test(&buf->refcount))
+ irdma_puda_ret_bufpool(ilq, buf);
+}
+
+/**
+ * irdma_record_ird_ord - Record IRD/ORD passed in
+ * @cm_node: connection's node
+ * @conn_ird: connection IRD
+ * @conn_ord: connection ORD
+ */
+static void irdma_record_ird_ord(struct irdma_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
+{
+ if (conn_ird > cm_node->dev->hw_attrs.max_hw_ird)
+ conn_ird = cm_node->dev->hw_attrs.max_hw_ird;
+
+ if (conn_ord > cm_node->dev->hw_attrs.max_hw_ord)
+ conn_ord = cm_node->dev->hw_attrs.max_hw_ord;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
+ cm_node->ird_size = conn_ird;
+ cm_node->ord_size = conn_ord;
+}
+
+/**
+ * irdma_copy_ip_ntohl - copy IP address from network to host
+ * @dst: IP address in host order
+ * @src: IP address in network order (big endian)
+ */
+void irdma_copy_ip_ntohl(u32 *dst, __be32 *src)
+{
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst = ntohl(*src);
+}
+
+/**
+ * irdma_copy_ip_htonl - copy IP address from host to network order
+ * @dst: IP address in network order (big endian)
+ * @src: IP address in host order
+ */
+void irdma_copy_ip_htonl(__be32 *dst, u32 *src)
+{
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst = htonl(*src);
+}
+
+/**
+ * irdma_get_addr_info
+ * @cm_node: contains ip/tcp info
+ * @cm_info: to get a copy of the cm_node ip/tcp info
+ */
+static void irdma_get_addr_info(struct irdma_cm_node *cm_node,
+ struct irdma_cm_info *cm_info)
+{
+ memset(cm_info, 0, sizeof(*cm_info));
+ cm_info->ipv4 = cm_node->ipv4;
+ cm_info->vlan_id = cm_node->vlan_id;
+ memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
+ memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+ cm_info->loc_port = cm_node->loc_port;
+ cm_info->rem_port = cm_node->rem_port;
+}
+
+/**
+ * irdma_fill_sockaddr4 - fill in addr info for IPv4 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void irdma_fill_sockaddr4(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_node->loc_port);
+ raddr->sin_port = htons(cm_node->rem_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
+ raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
+}
+
+/**
+ * irdma_fill_sockaddr6 - fill in addr info for IPv6 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void irdma_fill_sockaddr6(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_node->loc_port);
+ raddr6->sin6_port = htons(cm_node->rem_port);
+
+ irdma_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+}
+
+/**
+ * irdma_get_cmevent_info - for cm event upcall
+ * @cm_node: connection's node
+ * @cm_id: upper layers cm struct for the event
+ * @event: upper layer's cm event
+ */
+static inline void irdma_get_cmevent_info(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ struct iw_cm_event *event)
+{
+ memcpy(&event->local_addr, &cm_id->m_local_addr,
+ sizeof(event->local_addr));
+ memcpy(&event->remote_addr, &cm_id->m_remote_addr,
+ sizeof(event->remote_addr));
+ if (cm_node) {
+ event->private_data = cm_node->pdata_buf;
+ event->private_data_len = (u8)cm_node->pdata.size;
+ event->ird = cm_node->ird_size;
+ event->ord = cm_node->ord_size;
+ }
+}
+
+/**
+ * irdma_send_cm_event - upcall cm's event handler
+ * @cm_node: connection's node
+ * @cm_id: upper layer's cm info struct
+ * @type: Event type to indicate
+ * @status: status for the event type
+ */
+static int irdma_send_cm_event(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type, int status)
+{
+ struct iw_cm_event event = {};
+
+ event.event = type;
+ event.status = status;
+ trace_irdma_send_cm_event(cm_node, cm_id, type, status,
+ __builtin_return_address(0));
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n",
+ cm_node, cm_id, cm_node->accelerated, cm_node->state, type,
+ status);
+
+ switch (type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ if (cm_node->ipv4)
+ irdma_fill_sockaddr4(cm_node, &event);
+ else
+ irdma_fill_sockaddr6(cm_node, &event);
+ event.provider_data = cm_node;
+ event.private_data = cm_node->pdata_buf;
+ event.private_data_len = (u8)cm_node->pdata.size;
+ event.ird = cm_node->ird_size;
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ irdma_get_cmevent_info(cm_node, cm_id, &event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ event.ird = cm_node->ird_size;
+ event.ord = cm_node->ord_size;
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ case IW_CM_EVENT_CLOSE:
+ /* Wait if we are in RTS but havent issued the iwcm event upcall */
+ if (!cm_node->accelerated)
+ wait_for_completion(&cm_node->establish_comp);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+/**
+ * irdma_timer_list_prep - add connection nodes to a list to perform timer tasks
+ * @cm_core: cm's core
+ * @timer_list: a timer list to which cm_node will be selected
+ */
+static void irdma_timer_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *timer_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((cm_node->close_entry || cm_node->send_entry) &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->timer_entry, timer_list);
+ }
+}
+
+/**
+ * irdma_create_event - create cm event
+ * @cm_node: connection's node
+ * @type: Event type to generate
+ */
+static struct irdma_cm_event *irdma_create_event(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type)
+{
+ struct irdma_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ memcpy(event->cm_info.rem_addr, cm_node->rem_addr,
+ sizeof(event->cm_info.rem_addr));
+ memcpy(event->cm_info.loc_addr, cm_node->loc_addr,
+ sizeof(event->cm_info.loc_addr));
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node,
+ event, type, event->cm_info.loc_addr,
+ event->cm_info.rem_addr);
+ trace_irdma_create_event(cm_node, type, __builtin_return_address(0));
+ irdma_cm_post_event(event);
+
+ return event;
+}
+
+/**
+ * irdma_free_retrans_entry - free send entry
+ * @cm_node: connection's node
+ */
+static void irdma_free_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+ struct irdma_timer_entry *send_entry;
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ return;
+
+ cm_node->send_entry = NULL;
+ irdma_free_sqbuf(&iwdev->vsi, send_entry->sqbuf);
+ kfree(send_entry);
+ refcount_dec(&cm_node->refcnt);
+}
+
+/**
+ * irdma_cleanup_retrans_entry - free send entry with lock
+ * @cm_node: connection's node
+ */
+static void irdma_cleanup_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ irdma_free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+}
+
+/**
+ * irdma_form_ah_cm_frame - get a free packet and build frame with address handle
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+ struct tcphdr *tcph;
+ u16 pktsize;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: AH invalid\n");
+ return NULL;
+ }
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: SQ buf NULL\n");
+ return NULL;
+ }
+
+ sqbuf->ah_id = cm_node->ah->ah_info.ah_idx;
+ buf = sqbuf->mem.va;
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, sizeof(*tcph));
+
+ sqbuf->totallen = pktsize;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ refcount_set(&sqbuf->refcount, 1);
+
+ print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, sqbuf->mem.va, sqbuf->totallen, false);
+
+ return sqbuf;
+}
+
+/**
+ * irdma_form_uda_cm_frame - get a free packet and build frame full tcpip packet
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *ethh;
+ u16 pktsize;
+ u16 eth_hlen = ETH_HLEN;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ u16 vtag;
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf)
+ return NULL;
+
+ buf = sqbuf->mem.va;
+
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ if (cm_node->vlan_id < VLAN_N_VID)
+ eth_hlen += 4;
+
+ if (cm_node->ipv4)
+ pktsize = sizeof(*iph) + sizeof(*tcph);
+ else
+ pktsize = sizeof(*ip6h) + sizeof(*tcph);
+ pktsize += opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, eth_hlen + pktsize);
+
+ sqbuf->totallen = pktsize + eth_hlen;
+ sqbuf->maclen = eth_hlen;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ ethh = (struct ethhdr *)buf;
+ buf += eth_hlen;
+
+ if (cm_node->do_lpb)
+ sqbuf->do_lpb = true;
+
+ if (cm_node->ipv4) {
+ sqbuf->ipv4 = true;
+
+ iph = (struct iphdr *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
+ htons(ETH_P_IP);
+ } else {
+ ethh->h_proto = htons(ETH_P_IP);
+ }
+
+ iph->version = IPVERSION;
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->tos = cm_node->tos;
+ iph->tot_len = htons(pktsize);
+ iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->frag_off = htons(0x4000);
+ iph->ttl = 0x40;
+ iph->protocol = IPPROTO_TCP;
+ iph->saddr = htonl(cm_node->loc_addr[0]);
+ iph->daddr = htonl(cm_node->rem_addr[0]);
+ } else {
+ sqbuf->ipv4 = false;
+ ip6h = (struct ipv6hdr *)buf;
+ buf += sizeof(*ip6h);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
+ htons(ETH_P_IPV6);
+ } else {
+ ethh->h_proto = htons(ETH_P_IPV6);
+ }
+ ip6h->version = 6;
+ ip6h->priority = cm_node->tos >> 4;
+ ip6h->flow_lbl[0] = cm_node->tos << 4;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+ ip6h->payload_len = htons(pktsize - sizeof(*ip6h));
+ ip6h->nexthdr = 6;
+ ip6h->hop_limit = 128;
+ irdma_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+ }
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ refcount_set(&sqbuf->refcount, 1);
+
+ print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, sqbuf->mem.va, sqbuf->totallen, false);
+ return sqbuf;
+}
+
+/**
+ * irdma_send_reset - Send RST packet
+ * @cm_node: connection's node
+ */
+int irdma_send_reset(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_RST | SET_ACK;
+
+ trace_irdma_send_reset(cm_node, 0, __builtin_return_address(0));
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->cm_id,
+ cm_node->accelerated, cm_node->state, cm_node->rem_port,
+ cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 0,
+ 1);
+}
+
+/**
+ * irdma_active_open_err - send event for active side cm error
+ * @cm_node: connection's node
+ * @reset: Flag to send reset or not
+ */
+static void irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ trace_irdma_active_open_err(cm_node, reset,
+ __builtin_return_address(0));
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_connect_errs++;
+ if (reset) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p state=%d\n", cm_node,
+ cm_node->state);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ }
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+}
+
+/**
+ * irdma_passive_open_err - handle passive side cm error
+ * @cm_node: connection's node
+ * @reset: send reset or just free cm_node
+ */
+static void irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_passive_errs++;
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p state =%d\n",
+ cm_node, cm_node->state);
+ trace_irdma_passive_open_err(cm_node, reset,
+ __builtin_return_address(0));
+ if (reset)
+ irdma_send_reset(cm_node);
+ else
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * irdma_event_connect_error - to create connect error event
+ * @event: cm information for connect event
+ */
+static void irdma_event_connect_error(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct iw_cm_id *cm_id;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+
+ if (!iwqp || !iwqp->iwdev)
+ return;
+
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_process_options - process options from TCP header
+ * @cm_node: connection's node
+ * @optionsloc: point to start of options
+ * @optionsize: size of all options
+ * @syn_pkt: flag if syn packet
+ */
+static int irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
+ u32 optionsize, u32 syn_pkt)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->base.optionnum) {
+ case OPTION_NUM_EOL:
+ offset = optionsize;
+ break;
+ case OPTION_NUM_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUM_MSS:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: MSS Length: %d Offset: %d Size: %d\n",
+ all_options->mss.len, offset, optionsize);
+ got_mss_option = 1;
+ if (all_options->mss.len != 4)
+ return -EINVAL;
+ tmp = ntohs(all_options->mss.mss);
+ if ((cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV4) < IRDMA_MIN_MTU_IPV4) ||
+ (!cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV6) < IRDMA_MIN_MTU_IPV6))
+ return -EINVAL;
+ if (tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ break;
+ case OPTION_NUM_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale =
+ all_options->windowscale.shiftcount;
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unsupported TCP Option: %x\n",
+ all_options->base.optionnum);
+ break;
+ }
+ offset += all_options->base.len;
+ }
+ if (!got_mss_option && syn_pkt)
+ cm_node->tcp_cntxt.mss = IRDMA_CM_DEFAULT_MSS;
+
+ return 0;
+}
+
+/**
+ * irdma_handle_tcp_options - setup TCP context info after parsing TCP options
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ * @optionsize: size of options rcvd
+ * @passive: active or passive flag
+ */
+static int irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
+ struct tcphdr *tcph, int optionsize,
+ int passive)
+{
+ u8 *optionsloc = (u8 *)&tcph[1];
+ int ret;
+
+ if (optionsize) {
+ ret = irdma_process_options(cm_node, optionsloc, optionsize,
+ (u32)tcph->syn);
+ if (ret) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Node %p, Sending Reset\n", cm_node);
+ if (passive)
+ irdma_passive_open_err(cm_node, true);
+ else
+ irdma_active_open_err(cm_node, true);
+ return ret;
+ }
+ }
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window)
+ << cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+
+ return 0;
+}
+
+/**
+ * irdma_build_mpa_v1 - build a MPA V1 frame
+ * @cm_node: connection's node
+ * @start_addr: address where to build frame
+ * @mpa_key: to do read0 or write0
+ */
+static void irdma_build_mpa_v1(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v1 *mpa_frame = start_addr;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ break;
+ case MPA_KEY_REPLY:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ break;
+ default:
+ break;
+ }
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = cm_node->mpa_frame_rev;
+ mpa_frame->priv_data_len = htons(cm_node->pdata.size);
+}
+
+/**
+ * irdma_build_mpa_v2 - build a MPA V2 frame
+ * @cm_node: connection's node
+ * @start_addr: buffer start address
+ * @mpa_key: to do read0 or write0
+ */
+static void irdma_build_mpa_v2(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v2 *mpa_frame = start_addr;
+ struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+ u16 ctrl_ird, ctrl_ord;
+
+ /* initialize the upper 5 bytes of the frame */
+ irdma_build_mpa_v1(cm_node, start_addr, mpa_key);
+ mpa_frame->flags |= IETF_MPA_V2_FLAG;
+ if (cm_node->iwdev->iw_ooo) {
+ mpa_frame->flags |= IETF_MPA_FLAGS_MARKERS;
+ cm_node->rcv_mark_en = true;
+ }
+ mpa_frame->priv_data_len = cpu_to_be16(be16_to_cpu(mpa_frame->priv_data_len) +
+ IETF_RTR_MSG_SIZE);
+
+ /* initialize RTR msg */
+ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ ctrl_ird = IETF_NO_IRD_ORD;
+ ctrl_ord = IETF_NO_IRD_ORD;
+ } else {
+ ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ird_size;
+ ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ord_size;
+ }
+ ctrl_ird |= IETF_PEER_TO_PEER;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ case MPA_KEY_REPLY:
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ break;
+ case SEND_RDMA_READ_ZERO:
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ rtr_msg->ctrl_ird = htons(ctrl_ird);
+ rtr_msg->ctrl_ord = htons(ctrl_ord);
+}
+
+/**
+ * irdma_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
+ * @cm_node: connection's node
+ * @mpa: mpa: data buffer
+ * @mpa_key: to do read0 or write0
+ */
+static int irdma_cm_build_mpa_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *mpa, u8 mpa_key)
+{
+ int hdr_len = 0;
+
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V1:
+ hdr_len = sizeof(struct ietf_mpa_v1);
+ irdma_build_mpa_v1(cm_node, mpa->addr, mpa_key);
+ break;
+ case IETF_MPA_V2:
+ hdr_len = sizeof(struct ietf_mpa_v2);
+ irdma_build_mpa_v2(cm_node, mpa->addr, mpa_key);
+ break;
+ default:
+ break;
+ }
+
+ return hdr_len;
+}
+
+/**
+ * irdma_send_mpa_request - active node send mpa request to passive node
+ * @cm_node: connection's node
+ */
+static int irdma_send_mpa_request(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REQUEST);
+ if (!cm_node->mpa_hdr.size) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: mpa size = %d\n", cm_node->mpa_hdr.size);
+ return -EINVAL;
+ }
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr,
+ &cm_node->pdata, SET_ACK);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_mpa_reject -
+ * @cm_node: connection's node
+ * @pdata: reject data for connection
+ * @plen: length of reject data
+ */
+static int irdma_send_mpa_reject(struct irdma_cm_node *cm_node,
+ const void *pdata, u8 plen)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_mpa_priv_info priv_info;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REPLY);
+
+ cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
+ priv_info.addr = pdata;
+ priv_info.size = plen;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr, &priv_info,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT1;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_negotiate_mpa_v2_ird_ord - negotiate MPAv2 IRD/ORD
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ */
+static int irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node,
+ u8 *buf)
+{
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ struct ietf_rtr_msg *rtr_msg;
+ u16 ird_size;
+ u16 ord_size;
+ u16 ctrl_ord;
+ u16 ctrl_ird;
+
+ mpa_v2_frame = (struct ietf_mpa_v2 *)buf;
+ rtr_msg = &mpa_v2_frame->rtr_msg;
+
+ /* parse rtr message */
+ ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ ird_size = ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = ctrl_ord & IETF_NO_IRD_ORD;
+
+ if (!(ctrl_ird & IETF_PEER_TO_PEER))
+ return -EOPNOTSUPP;
+
+ if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
+ cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
+ goto negotiate_done;
+ }
+
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ /* responder */
+ if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
+ cm_node->ird_size = 1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+ } else {
+ /* initiator */
+ if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
+ /* Remote peer doesn't support RDMA0_READ */
+ return -EOPNOTSUPP;
+
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+
+ if (cm_node->ird_size < ord_size)
+ /* no resources available */
+ return -EINVAL;
+ }
+
+negotiate_done:
+ if (ctrl_ord & IETF_RDMA0_READ)
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ else if (ctrl_ord & IETF_RDMA0_WRITE)
+ cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+ else
+ /* Not supported RDMA0 operation */
+ return -EOPNOTSUPP;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: MPAV2 Negotiated ORD: %d, IRD: %d\n",
+ cm_node->ord_size, cm_node->ird_size);
+ trace_irdma_negotiate_mpa_v2(cm_node);
+ return 0;
+}
+
+/**
+ * irdma_parse_mpa - process an IETF MPA frame
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ * @type: to return accept or reject
+ * @len: Len of mpa buffer
+ */
+static int irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
+ u32 len)
+{
+ struct ietf_mpa_v1 *mpa_frame;
+ int mpa_hdr_len, priv_data_len, ret;
+
+ *type = IRDMA_MPA_REQUEST_ACCEPT;
+
+ if (len < sizeof(struct ietf_mpa_v1)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer small (%x)\n", len);
+ return -EINVAL;
+ }
+
+ mpa_frame = (struct ietf_mpa_v1 *)buf;
+ mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ priv_data_len = ntohs(mpa_frame->priv_data_len);
+
+ if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: private_data too big %d\n", priv_data_len);
+ return -EOVERFLOW;
+ }
+
+ if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: unsupported mpa rev = %d\n", mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ if (mpa_frame->rev > cm_node->mpa_frame_rev) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: rev %d\n",
+ mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ cm_node->mpa_frame_rev = mpa_frame->rev;
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
+ IETF_MPA_KEY_SIZE)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
+ IETF_MPA_KEY_SIZE)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ }
+
+ if (priv_data_len + mpa_hdr_len > len) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer len(%x + %x != %x)\n",
+ priv_data_len, mpa_hdr_len, len);
+ return -EOVERFLOW;
+ }
+
+ if (len > IRDMA_MAX_CM_BUF) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer large len = %d\n", len);
+ return -EOVERFLOW;
+ }
+
+ switch (mpa_frame->rev) {
+ case IETF_MPA_V2:
+ mpa_hdr_len += IETF_RTR_MSG_SIZE;
+ ret = irdma_negotiate_mpa_v2_ird_ord(cm_node, buf);
+ if (ret)
+ return ret;
+ break;
+ case IETF_MPA_V1:
+ default:
+ break;
+ }
+
+ memcpy(cm_node->pdata_buf, buf + mpa_hdr_len, priv_data_len);
+ cm_node->pdata.size = priv_data_len;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
+ *type = IRDMA_MPA_REQUEST_REJECT;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
+ cm_node->snd_mark_en = true;
+
+ return 0;
+}
+
+/**
+ * irdma_schedule_cm_timer
+ * @cm_node: connection's node
+ * @sqbuf: buffer to send
+ * @type: if it is send or close
+ * @send_retrans: if rexmits to be done
+ * @close_when_complete: is cm_node to be removed
+ *
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * irdma_rem_ref_cm_node(cm_core, cm_node);
+ * irdma_schedule_cm_timer(...)
+ * refcount_inc(&cm_node->refcnt);
+ */
+int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete)
+{
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_timer_entry *new_send;
+ u32 was_timer_set;
+ unsigned long flags;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send) {
+ if (type != IRDMA_TIMER_TYPE_CLOSE)
+ irdma_free_sqbuf(vsi, sqbuf);
+ return -ENOMEM;
+ }
+
+ new_send->retrycount = IRDMA_DEFAULT_RETRYS;
+ new_send->retranscount = IRDMA_DEFAULT_RETRANS;
+ new_send->sqbuf = sqbuf;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == IRDMA_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ / 10);
+ if (cm_node->close_entry) {
+ kfree(new_send);
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: already close entry\n");
+ return -EINVAL;
+ }
+
+ cm_node->close_entry = new_send;
+ } else { /* type == IRDMA_TIMER_TYPE_SEND */
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ cm_node->send_entry = new_send;
+ refcount_inc(&cm_node->refcnt);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ new_send->timetosend = jiffies + IRDMA_RETRY_TIMEOUT;
+
+ refcount_inc(&sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+ if (!send_retrans) {
+ irdma_cleanup_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+ }
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_retrans_expired - Could not rexmit the packet
+ * @cm_node: connection's node
+ */
+static void irdma_retrans_expired(struct irdma_cm_node *cm_node)
+{
+ enum irdma_cm_node_state state = cm_node->state;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ switch (state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ irdma_send_reset(cm_node);
+ break;
+ default:
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_close_entry - for handling retry/timeouts
+ * @cm_node: connection's node
+ * @rem_node: flag for remove cm_node
+ */
+static void irdma_handle_close_entry(struct irdma_cm_node *cm_node,
+ u32 rem_node)
+{
+ struct irdma_timer_entry *close_entry = cm_node->close_entry;
+ struct irdma_qp *iwqp;
+ unsigned long flags;
+
+ if (!close_entry)
+ return;
+ iwqp = (struct irdma_qp *)close_entry->sqbuf;
+ if (iwqp) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
+ iwqp->hw_iwarp_state = IRDMA_QP_STATE_ERROR;
+ iwqp->last_aeq = IRDMA_AE_RESET_SENT;
+ iwqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_cm_disconn(iwqp);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else if (rem_node) {
+ /* TIME_WAIT state */
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ kfree(close_entry);
+ cm_node->close_entry = NULL;
+}
+
+/**
+ * irdma_cm_timer_tick - system's timer expired callback
+ * @t: Pointer to timer_list
+ */
+static void irdma_cm_timer_tick(struct timer_list *t)
+{
+ unsigned long nexttimeout = jiffies + IRDMA_LONG_TIME;
+ struct irdma_cm_node *cm_node;
+ struct irdma_timer_entry *send_entry, *close_entry;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_core *cm_core = timer_container_of(cm_core, t,
+ tcp_timer);
+ struct irdma_sc_vsi *vsi;
+ u32 settimer = 0;
+ unsigned long timetosend;
+ unsigned long flags;
+ struct list_head timer_list;
+
+ INIT_LIST_HEAD(&timer_list);
+
+ rcu_read_lock();
+ irdma_timer_list_prep(cm_core, &timer_list);
+ rcu_read_unlock();
+
+ list_for_each_safe (list_node, list_core_temp, &timer_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ timer_entry);
+ close_entry = cm_node->close_entry;
+
+ if (close_entry) {
+ if (time_after(close_entry->timetosend, jiffies)) {
+ if (nexttimeout > close_entry->timetosend ||
+ !settimer) {
+ nexttimeout = close_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_handle_close_entry(cm_node, 1);
+ }
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ goto done;
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ if (nexttimeout > send_entry->timetosend ||
+ !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_free_retrans_entry(cm_node);
+ }
+ goto done;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_OFFLOADED ||
+ cm_node->state == IRDMA_CM_STATE_CLOSED) {
+ irdma_free_retrans_entry(cm_node);
+ goto done;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ irdma_free_retrans_entry(cm_node);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock,
+ flags);
+ irdma_retrans_expired(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ vsi = &cm_node->iwdev->vsi;
+ if (!cm_node->ack_rcvd) {
+ refcount_inc(&send_entry->sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ cm_node->cm_core->stats_pkt_retrans++;
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ timetosend = (IRDMA_RETRY_TIMEOUT <<
+ (IRDMA_DEFAULT_RETRANS -
+ send_entry->retranscount));
+
+ send_entry->timetosend = jiffies +
+ min(timetosend, IRDMA_MAX_TIMEOUT);
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ int close_when_complete;
+
+ close_when_complete = send_entry->close_when_complete;
+ irdma_free_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+done:
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ if (settimer) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+}
+
+/**
+ * irdma_send_syn - send SYN packet
+ * @cm_node: connection's node
+ * @sendack: flag to set ACK bit or not
+ */
+int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_SYN;
+ char optionsbuf[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + TCP_OPTIONS_PADDING];
+ struct irdma_kmem_info opts;
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ opts.addr = optionsbuf;
+ if (!cm_node)
+ return -EINVAL;
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->mss.optionnum = OPTION_NUM_MSS;
+ options->mss.len = sizeof(struct option_mss);
+ options->mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->windowscale.optionnum = OPTION_NUM_WINDOW_SCALE;
+ options->windowscale.len = sizeof(struct option_windowscale);
+ options->windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->eol = OPTION_NUM_EOL;
+ optionssize += 1;
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ opts.size = optionssize;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, &opts, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_ack - Send ACK packet
+ * @cm_node: connection's node
+ */
+void irdma_send_ack(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK);
+ if (sqbuf)
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+}
+
+/**
+ * irdma_send_fin - Send FIN pkt
+ * @cm_node: connection's node
+ */
+static int irdma_send_fin(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_find_listener - find a cm node listening on this addr-port pair
+ * @cm_core: cm's core
+ * @dst_addr: listener ip addr
+ * @ipv4: flag indicating IPv4 when true
+ * @dst_port: listener tcp port num
+ * @vlan_id: virtual LAN ID
+ * @listener_state: state to match with listen node's
+ */
+static struct irdma_cm_listener *
+irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
+ u16 dst_port, u16 vlan_id,
+ enum irdma_cm_listener_state listener_state)
+{
+ struct irdma_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ u32 listen_addr[4];
+ u16 listen_port;
+ unsigned long flags;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ listen_port = listen_node->loc_port;
+ if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
+ !(listener_state & listen_node->listener_state))
+ continue;
+ /* compare node pair, return node handle if a match */
+ if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) ||
+ (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) &&
+ vlan_id == listen_node->vlan_id)) {
+ refcount_inc(&listen_node->refcnt);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock,
+ flags);
+ trace_irdma_find_listener(listen_node);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ return NULL;
+}
+
+/**
+ * irdma_del_multiple_qhash - Remove qhash and child listens
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ */
+static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct irdma_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_for_each_safe (pos, tpos,
+ &cm_parent_listen_node->child_listen_list) {
+ child_listen_node = list_entry(pos, struct irdma_cm_listener,
+ child_listen_list);
+ if (child_listen_node->ipv4)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: removing child listen for IP=%pI4, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: removing child listen for IP=%pI6, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ trace_irdma_del_multiple_qhash(child_listen_node);
+ list_del(pos);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ cm_info->vlan_id = child_listen_node->vlan_id;
+ if (child_listen_node->qhash_set) {
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ child_listen_node->qhash_set = false;
+ } else {
+ ret = 0;
+ }
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Child listen node freed = %p\n",
+ child_listen_node);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
+ }
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+
+ return ret;
+}
+
+static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4)
+{
+ struct net_device *ndev = NULL;
+
+ rcu_read_lock();
+ if (ipv4) {
+ ndev = ip_dev_find(&init_net, htonl(loc_addr[0]));
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ struct net_device *ip_dev;
+ struct in6_addr laddr6;
+
+ irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, loc_addr);
+
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ ndev = ip_dev;
+ break;
+ }
+ }
+ }
+
+ if (!ndev)
+ goto done;
+ if (is_vlan_dev(ndev))
+ prio = (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ if (ipv4)
+ dev_put(ndev);
+
+done:
+ rcu_read_unlock();
+
+ return prio;
+}
+
+/**
+ * irdma_get_vlan_mac_ipv6 - Gets the vlan and mac
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+ * @mac: mac address for the given IPv6 address
+ *
+ * Returns the vlan id and mac for an IPv6 address.
+ */
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+{
+ struct net_device *ip_dev = NULL;
+ struct in6_addr laddr6;
+
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return;
+
+ irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+ if (vlan_id)
+ *vlan_id = 0xFFFF; /* Match rdma_vlan_dev_vlan_id() */
+ if (mac)
+ eth_zero_addr(mac);
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ if (ip_dev->dev_addr && mac)
+ ether_addr_copy(mac, ip_dev->dev_addr);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
+ * @addr: local IPv4 address
+ */
+u16 irdma_get_vlan_ipv4(u32 *addr)
+{
+ struct net_device *netdev;
+ u16 vlan_id = 0xFFFF;
+
+ netdev = ip_dev_find(&init_net, htonl(addr[0]));
+ if (netdev) {
+ vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ dev_put(netdev);
+ }
+
+ return vlan_id;
+}
+
+/**
+ * irdma_add_mqh_6 - Adds multiple qhashes for IPv6
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv6 address
+ * on the adapter and adds the associated qhash filter
+ */
+static int irdma_add_mqh_6(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ struct irdma_cm_listener *child_listen_node;
+ unsigned long flags;
+ int ret = 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, ip_dev) {
+ if (!(ip_dev->flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ ibdev_dbg(&iwdev->ibdev, "CM: idev == NULL\n");
+ break;
+ }
+ list_for_each_entry_safe (ifp, tmp, &idev->addr_list, if_list) {
+ ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr, rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ irdma_copy_ip_ntohl(child_listen_node->loc_addr,
+ ifp->addr.in6_u.u6_addr32);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ false);
+
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ continue;
+ }
+
+ trace_irdma_add_mqh_6(iwdev, child_listen_node,
+ ip_dev->dev_addr);
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ }
+ }
+exit:
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh_4 - Adds multiple qhashes for IPv4
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv4 address
+ * on the adapter and adds the associated qhash filter
+ */
+static int irdma_add_mqh_4(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct in_device *idev;
+ struct irdma_cm_listener *child_listen_node;
+ unsigned long flags;
+ const struct in_ifaddr *ifa;
+ int ret = 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, ip_dev) {
+ if (!(ip_dev->flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ idev = in_dev_get(ip_dev);
+ if (!idev)
+ continue;
+
+ in_dev_for_each_ifa_rtnl(ifa, idev) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address, rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
+ in_dev_put(idev);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ child_listen_node->loc_addr[0] =
+ ntohl(ifa->ifa_address);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ true);
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core
+ ->stats_listen_nodes_created--;
+ continue;
+ }
+
+ trace_irdma_add_mqh_4(iwdev, child_listen_node,
+ ip_dev->dev_addr);
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock,
+ flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ }
+ in_dev_put(idev);
+ }
+exit:
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh - Adds multiple qhashes
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_listen_node: The parent listen node
+ */
+static int irdma_add_mqh(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_listen_node)
+{
+ if (cm_info->ipv4)
+ return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
+ else
+ return irdma_add_mqh_6(iwdev, cm_info, cm_listen_node);
+}
+
+/**
+ * irdma_reset_list_prep - add connection nodes slated for reset to list
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @reset_list: a list to which cm_node will be selected
+ */
+static void irdma_reset_list_prep(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ struct list_head *reset_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if (cm_node->listener == listener &&
+ !cm_node->accelerated &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->reset_entry, reset_list);
+ }
+}
+
+/**
+ * irdma_dec_refcnt_listen - delete listener and associated cm nodes
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @free_hanging_nodes: to free associated cm_nodes
+ * @apbvt_del: flag to delete the apbvt
+ */
+static int irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ int free_hanging_nodes, bool apbvt_del)
+{
+ int err;
+ struct list_head *list_pos;
+ struct list_head *list_temp;
+ struct irdma_cm_node *cm_node;
+ struct list_head reset_list;
+ struct irdma_cm_info nfo;
+ enum irdma_cm_node_state old_state;
+ unsigned long flags;
+
+ trace_irdma_dec_refcnt_listen(listener, __builtin_return_address(0));
+ /* free non-accelerated child nodes for this listener */
+ INIT_LIST_HEAD(&reset_list);
+ if (free_hanging_nodes) {
+ rcu_read_lock();
+ irdma_reset_list_prep(cm_core, listener, &reset_list);
+ rcu_read_unlock();
+ }
+
+ list_for_each_safe (list_pos, list_temp, &reset_list) {
+ cm_node = container_of(list_pos, struct irdma_cm_node,
+ reset_entry);
+ if (cm_node->state >= IRDMA_CM_STATE_FIN_WAIT1) {
+ irdma_rem_ref_cm_node(cm_node);
+ continue;
+ }
+
+ irdma_cleanup_retrans_entry(cm_node);
+ err = irdma_send_reset(cm_node);
+ if (err) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: send reset failed\n");
+ } else {
+ old_state = cm_node->state;
+ cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != IRDMA_CM_STATE_MPAREQ_RCVD)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+ }
+
+ if (refcount_dec_and_test(&listener->refcnt)) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_del(&listener->list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (apbvt_del)
+ irdma_del_apbvt(listener->iwdev,
+ listener->apbvt_entry);
+ memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
+ nfo.loc_port = listener->loc_port;
+ nfo.ipv4 = listener->ipv4;
+ nfo.vlan_id = listener->vlan_id;
+ nfo.user_pri = listener->user_pri;
+ nfo.qh_qpid = listener->iwdev->vsi.ilq->qp_id;
+
+ if (!list_empty(&listener->child_listen_list)) {
+ irdma_del_multiple_qhash(listener->iwdev, &nfo,
+ listener);
+ } else {
+ if (listener->qhash_set)
+ irdma_manage_qhash(listener->iwdev,
+ &nfo,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ }
+
+ cm_core->stats_listen_destroyed++;
+ cm_core->stats_listen_nodes_destroyed++;
+ ibdev_dbg(&listener->iwdev->ibdev,
+ "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
+ listener->loc_port, listener->loc_addr, listener,
+ listener->cm_id, listener->qhash_set,
+ listener->vlan_id, apbvt_del);
+ kfree(listener);
+ listener = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_cm_del_listen - delete a listener
+ * @cm_core: cm's core
+ * @listener: passive connection's listener
+ * @apbvt_del: flag to delete apbvt
+ */
+static int irdma_cm_del_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ bool apbvt_del)
+{
+ listener->listener_state = IRDMA_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL;
+
+ return irdma_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
+}
+
+/**
+ * irdma_addr_resolve_neigh - resolve neighbor address
+ * @iwdev: iwarp device structure
+ * @src_ip: local ip address
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int irdma_addr_resolve_neigh(struct irdma_device *iwdev, u32 src_ip,
+ u32 dst_ip, int arpindex)
+{
+ struct rtable *rt;
+ struct neighbour *neigh;
+ int rc = arpindex;
+ __be32 dst_ipaddr = htonl(dst_ip);
+ __be32 src_ipaddr = htonl(src_ip);
+
+ rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0,
+ RT_SCOPE_UNIVERSE);
+ if (IS_ERR(rt)) {
+ ibdev_dbg(&iwdev->ibdev, "CM: ip_route_output fail\n");
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
+ if (!neigh)
+ goto exit;
+
+ if (neigh->nud_state & NUD_VALID)
+ rc = irdma_add_arp(iwdev->rf, &dst_ip, true, neigh->ha);
+ else
+ neigh_event_send(neigh, NULL);
+ if (neigh)
+ neigh_release(neigh);
+exit:
+ ip_rt_put(rt);
+
+ return rc;
+}
+
+/**
+ * irdma_get_dst_ipv6 - get destination cache entry via ipv6 lookup
+ * @src_addr: local ipv6 sock address
+ * @dst_addr: destination ipv6 sock address
+ */
+static struct dst_entry *irdma_get_dst_ipv6(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr)
+{
+ struct dst_entry *dst = NULL;
+
+ if ((IS_ENABLED(CONFIG_IPV6))) {
+ struct flowi6 fl6 = {};
+
+ fl6.daddr = dst_addr->sin6_addr;
+ fl6.saddr = src_addr->sin6_addr;
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ }
+
+ return dst;
+}
+
+/**
+ * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
+ * @iwdev: iwarp device structure
+ * @src: local ip address
+ * @dest: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int irdma_addr_resolve_neigh_ipv6(struct irdma_device *iwdev, u32 *src,
+ u32 *dest, int arpindex)
+{
+ struct neighbour *neigh;
+ int rc = arpindex;
+ struct dst_entry *dst;
+ struct sockaddr_in6 dst_addr = {};
+ struct sockaddr_in6 src_addr = {};
+
+ dst_addr.sin6_family = AF_INET6;
+ irdma_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
+ src_addr.sin6_family = AF_INET6;
+ irdma_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
+ dst = irdma_get_dst_ipv6(&src_addr, &dst_addr);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ }
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
+ if (!neigh)
+ goto exit;
+
+ ibdev_dbg(&iwdev->ibdev, "CM: dst_neigh_lookup MAC=%pM\n",
+ neigh->ha);
+
+ trace_irdma_addr_resolve(iwdev, neigh->ha);
+
+ if (neigh->nud_state & NUD_VALID)
+ rc = irdma_add_arp(iwdev->rf, dest, false, neigh->ha);
+ else
+ neigh_event_send(neigh, NULL);
+ if (neigh)
+ neigh_release(neigh);
+exit:
+ dst_release(dst);
+
+ return rc;
+}
+
+/**
+ * irdma_find_node - find a cm node that matches the reference cm node
+ * @cm_core: cm's core
+ * @rem_port: remote tcp port num
+ * @rem_addr: remote ip addr
+ * @loc_port: local tcp port num
+ * @loc_addr: local ip addr
+ * @vlan_id: local VLAN ID
+ */
+struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
+ u16 rem_port, u32 *rem_addr, u16 loc_port,
+ u32 *loc_addr, u16 vlan_id)
+{
+ struct irdma_cm_node *cm_node;
+ u32 key = (rem_port << 16) | loc_port;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(cm_core->cm_hash_tbl, cm_node, list, key) {
+ if (cm_node->vlan_id == vlan_id &&
+ cm_node->loc_port == loc_port && cm_node->rem_port == rem_port &&
+ !memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
+ !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr))) {
+ if (!refcount_inc_not_zero(&cm_node->refcnt))
+ goto exit;
+ rcu_read_unlock();
+ trace_irdma_find_node(cm_node, 0, NULL);
+ return cm_node;
+ }
+ }
+
+exit:
+ rcu_read_unlock();
+
+ /* no owner node */
+ return NULL;
+}
+
+/**
+ * irdma_add_hte_node - add a cm node to the hash table
+ * @cm_core: cm's core
+ * @cm_node: connection's node
+ */
+static void irdma_add_hte_node(struct irdma_cm_core *cm_core,
+ struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+ u32 key = (cm_node->rem_port << 16) | cm_node->loc_port;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ hash_add_rcu(cm_core->cm_hash_tbl, &cm_node->list, key);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+}
+
+/**
+ * irdma_ipv4_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr)
+{
+ return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
+}
+
+/**
+ * irdma_ipv6_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr)
+{
+ struct in6_addr raddr6;
+
+ irdma_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
+
+ return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
+}
+
+/**
+ * irdma_cm_create_ah - create a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ * @wait: Provides option to wait for ah creation or not
+ */
+static int irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
+{
+ struct irdma_ah_info ah_info = {};
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ ether_addr_copy(ah_info.mac_addr, iwdev->netdev->dev_addr);
+
+ ah_info.hop_ttl = 0x40;
+ ah_info.tc_tos = cm_node->tos;
+ ah_info.vsi = &iwdev->vsi;
+
+ if (cm_node->ipv4) {
+ ah_info.ipv4_valid = true;
+ ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
+ ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
+ ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
+ ah_info.dest_ip_addr[0]);
+ } else {
+ memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
+ sizeof(ah_info.dest_ip_addr));
+ memcpy(ah_info.src_ip_addr, cm_node->loc_addr,
+ sizeof(ah_info.src_ip_addr));
+ ah_info.do_lpbk = irdma_ipv6_is_lpb(ah_info.src_ip_addr,
+ ah_info.dest_ip_addr);
+ }
+
+ ah_info.vlan_tag = cm_node->vlan_id;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ah_info.insert_vlan_tag = 1;
+ ah_info.vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+
+ ah_info.dst_arpindex =
+ irdma_arp_table(iwdev->rf, ah_info.dest_ip_addr,
+ ah_info.ipv4_valid, NULL, IRDMA_ARP_RESOLVE);
+
+ if (irdma_puda_create_ah(&iwdev->rf->sc_dev, &ah_info, wait,
+ IRDMA_PUDA_RSRC_TYPE_ILQ, cm_node,
+ &cm_node->ah))
+ return -ENOMEM;
+
+ trace_irdma_create_ah(cm_node);
+ return 0;
+}
+
+/**
+ * irdma_cm_free_ah - free a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ */
+static void irdma_cm_free_ah(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ trace_irdma_cm_free_ah(cm_node);
+ irdma_puda_free_ah(&iwdev->rf->sc_dev, cm_node->ah);
+ cm_node->ah = NULL;
+}
+
+/**
+ * irdma_make_cm_node - create a new instance of a cm node
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ * @listener: passive connection's listener
+ */
+static struct irdma_cm_node *
+irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *listener)
+{
+ struct irdma_cm_node *cm_node;
+ int oldarpindex;
+ int arpindex;
+ struct net_device *netdev = iwdev->netdev;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->ipv4 = cm_info->ipv4;
+ cm_node->vlan_id = cm_info->vlan_id;
+ if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ cm_node->vlan_id = 0;
+ cm_node->tos = cm_info->tos;
+ cm_node->user_pri = cm_info->user_pri;
+ if (listener) {
+ if (listener->tos != cm_info->tos)
+ ibdev_warn(&iwdev->ibdev,
+ "application TOS[%d] and remote client TOS[%d] mismatch\n",
+ listener->tos, cm_info->tos);
+ if (iwdev->vsi.dscp_mode) {
+ cm_node->user_pri = listener->user_pri;
+ } else {
+ cm_node->tos = max(listener->tos, cm_info->tos);
+ cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ cm_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info->loc_addr,
+ cm_node->user_pri,
+ cm_info->ipv4);
+ }
+ ibdev_dbg(&iwdev->ibdev,
+ "DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
+ cm_node->user_pri);
+ trace_irdma_listener_tos(iwdev, cm_node->tos,
+ cm_node->user_pri);
+ }
+ memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+ memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+
+ cm_node->mpa_frame_rev = IRDMA_CM_DEFAULT_MPA_VER;
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ cm_node->iwdev = iwdev;
+ cm_node->dev = &iwdev->rf->sc_dev;
+
+ cm_node->ird_size = cm_node->dev->hw_attrs.max_hw_ird;
+ cm_node->ord_size = cm_node->dev->hw_attrs.max_hw_ord;
+
+ cm_node->listener = listener;
+ cm_node->cm_id = cm_info->cm_id;
+ ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
+ spin_lock_init(&cm_node->retrans_list_lock);
+ cm_node->ack_rcvd = false;
+
+ init_completion(&cm_node->establish_comp);
+ refcount_set(&cm_node->refcnt, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = IRDMA_CM_DEFAULT_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = iwdev->rcv_wscale;
+ cm_node->tcp_cntxt.rcv_wnd = iwdev->rcv_wnd >> cm_node->tcp_cntxt.rcv_wscale;
+ if (cm_node->ipv4) {
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
+ htonl(cm_node->rem_addr[0]),
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4;
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ __be32 loc[4] = {
+ htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
+ htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
+ };
+ __be32 rem[4] = {
+ htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
+ htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
+ };
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6;
+ }
+
+ if ((cm_node->ipv4 &&
+ irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 &&
+ irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
+ cm_node->do_lpb = true;
+ arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ cm_node->ipv4, NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ cm_node->ipv4, NULL,
+ IRDMA_ARP_RESOLVE);
+ if (cm_node->ipv4)
+ arpindex = irdma_addr_resolve_neigh(iwdev,
+ cm_info->loc_addr[0],
+ cm_info->rem_addr[0],
+ oldarpindex);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ arpindex = irdma_addr_resolve_neigh_ipv6(iwdev,
+ cm_info->loc_addr,
+ cm_info->rem_addr,
+ oldarpindex);
+ else
+ arpindex = -EINVAL;
+ }
+
+ if (arpindex < 0)
+ goto err;
+
+ ether_addr_copy(cm_node->rem_mac,
+ iwdev->rf->arp_table[arpindex].mac_addr);
+ irdma_add_hte_node(cm_core, cm_node);
+ cm_core->stats_nodes_created++;
+ return cm_node;
+
+err:
+ kfree(cm_node);
+
+ return NULL;
+}
+
+static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_qp *iwqp;
+ struct irdma_cm_info nfo;
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: node destroyed before established\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ }
+ if (cm_node->close_entry)
+ irdma_handle_close_entry(cm_node, 0);
+ if (cm_node->listener) {
+ irdma_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+ } else {
+ if (cm_node->apbvt_set) {
+ irdma_del_apbvt(cm_node->iwdev, cm_node->apbvt_entry);
+ cm_node->apbvt_set = 0;
+ }
+ irdma_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+ }
+
+ iwqp = cm_node->iwqp;
+ if (iwqp) {
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ cm_node->cm_id = NULL;
+ iwqp->cm_id = NULL;
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ cm_node->iwqp = NULL;
+ } else if (cm_node->qhash_set) {
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ cm_node->qhash_set = 0;
+ }
+
+ cm_core->cm_free_ah(cm_node);
+}
+
+/**
+ * irdma_rem_ref_cm_node - destroy an instance of a cm node
+ * @cm_node: connection's node
+ */
+void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ unsigned long flags;
+
+ trace_irdma_rem_ref_cm_node(cm_node, 0, __builtin_return_address(0));
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ if (!refcount_dec_and_test(&cm_node->refcnt)) {
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return;
+ }
+ if (cm_node->iwqp) {
+ cm_node->iwqp->cm_node = NULL;
+ cm_node->iwqp->cm_id = NULL;
+ }
+ hash_del_rcu(&cm_node->list);
+ cm_node->cm_core->stats_nodes_destroyed++;
+
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ irdma_destroy_connection(cm_node);
+
+ kfree_rcu(cm_node, rcu_head);
+}
+
+/**
+ * irdma_handle_fin_pkt - FIN packet received
+ * @cm_node: connection's node
+ */
+static void irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSING;
+ irdma_send_ack(cm_node);
+ /*
+ * Wait for ACK as this is simultaneous close.
+ * After we receive ACK, do not send anything.
+ * Just rm the node.
+ */
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_TIME_WAIT;
+ irdma_send_ack(cm_node);
+ irdma_schedule_cm_timer(cm_node, NULL, IRDMA_TIMER_TYPE_CLOSE,
+ 1, 0);
+ break;
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: bad state node state = %d\n", cm_node->state);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rst_pkt - process received RST packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->state,
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr);
+
+ irdma_cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V2:
+ /* Drop down to MPA_V1*/
+ cm_node->mpa_frame_rev = IETF_MPA_V1;
+ /* send a syn and goto syn sent state */
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ if (irdma_send_syn(cm_node, 0))
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IETF_MPA_V1:
+ default:
+ irdma_active_open_err(cm_node, false);
+ break;
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ atomic_inc(&cm_node->passive_state);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_passive_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rcv_mpa - Process a recv'd mpa buffer
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ int err;
+ int datasize = rbuf->datalen;
+ u8 *dataloc = rbuf->data;
+
+ enum irdma_cm_event_type type = IRDMA_CM_EVENT_UNKNOWN;
+ u32 res_type;
+
+ err = irdma_parse_mpa(cm_node, dataloc, &res_type, datasize);
+ if (err) {
+ if (cm_node->state == IRDMA_CM_STATE_MPAREQ_SENT)
+ irdma_active_open_err(cm_node, true);
+ else
+ irdma_passive_open_err(cm_node, true);
+ return;
+ }
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_ESTABLISHED:
+ if (res_type == IRDMA_MPA_REQUEST_REJECT)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: state for reject\n");
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
+ type = IRDMA_CM_EVENT_MPA_REQ;
+ irdma_send_ack(cm_node); /* ACK received MPA request */
+ atomic_set(&cm_node->passive_state,
+ IRDMA_PASSIVE_STATE_INDICATED);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (res_type == IRDMA_MPA_REQUEST_REJECT) {
+ type = IRDMA_CM_EVENT_MPA_REJECT;
+ cm_node->state = IRDMA_CM_STATE_MPAREJ_RCVD;
+ } else {
+ type = IRDMA_CM_EVENT_CONNECTED;
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ }
+ irdma_send_ack(cm_node);
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: wrong cm_node state =%d\n", cm_node->state);
+ break;
+ }
+ irdma_create_event(cm_node, type);
+}
+
+/**
+ * irdma_check_syn - Check for error on received syn ack
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int irdma_check_syn(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
+ irdma_active_open_err(cm_node, true);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_check_seq - check seq numbers if OK
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ u32 seq;
+ u32 ack_seq;
+ u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+ u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ u32 rcv_wnd;
+ int err = 0;
+
+ seq = ntohl(tcph->seq);
+ ack_seq = ntohl(tcph->ack_seq);
+ rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ if (ack_seq != loc_seq_num ||
+ !between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
+ err = -1;
+ if (err)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: seq number err\n");
+
+ return err;
+}
+
+void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_info nfo;
+
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ cm_node, false);
+ cm_node->qhash_set = true;
+}
+
+/**
+ * irdma_handle_syn_pkt - is for Passive node
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_syn_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ /* Rcvd syn on active open connection */
+ irdma_active_open_err(cm_node, 1);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ /* Passive OPEN */
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ cm_node->cm_core->stats_backlog_drops++;
+ irdma_passive_open_err(cm_node, false);
+ break;
+ }
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ err = cm_node->cm_core->cm_create_ah(cm_node, false);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ cm_node->accept_pend = 1;
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+
+ cm_node->state = IRDMA_CM_STATE_SYN_RCVD;
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_synack_pkt - Process SYN+ACK packet (active side)
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ /* active open */
+ if (irdma_check_syn(cm_node, tcph)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: check syn fail\n");
+ return;
+ }
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ /* setup options */
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0);
+ if (err) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p tcp_options failed\n",
+ cm_node);
+ break;
+ }
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ irdma_send_ack(cm_node); /* ACK for the syn_ack */
+ err = irdma_send_mpa_request(cm_node);
+ if (err) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p irdma_send_mpa_request failed\n",
+ cm_node);
+ break;
+ }
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_SENT;
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_passive_open_err(cm_node, true);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_ack_pkt - process packet with ACK
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static int irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 inc_sequence;
+ int ret;
+ int optionsize;
+ u32 datasize = rbuf->datalen;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+ if (irdma_check_seq(cm_node, tcph))
+ return -EINVAL;
+
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ ret = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret)
+ return ret;
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ cm_node->state = IRDMA_CM_STATE_ESTABLISHED;
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->ack_rcvd = false;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ } else {
+ cm_node->ack_rcvd = true;
+ }
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT2;
+ break;
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ irdma_cleanup_retrans_entry(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_process_pkt - process cm packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_process_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ enum irdma_tcpip_pkt_type pkt_type = IRDMA_PKT_TYPE_UNKNOWN;
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 fin_set = 0;
+ int err;
+
+ if (tcph->rst) {
+ pkt_type = IRDMA_PKT_TYPE_RST;
+ } else if (tcph->syn) {
+ pkt_type = IRDMA_PKT_TYPE_SYN;
+ if (tcph->ack)
+ pkt_type = IRDMA_PKT_TYPE_SYNACK;
+ } else if (tcph->ack) {
+ pkt_type = IRDMA_PKT_TYPE_ACK;
+ }
+ if (tcph->fin)
+ fin_set = 1;
+
+ switch (pkt_type) {
+ case IRDMA_PKT_TYPE_SYN:
+ irdma_handle_syn_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_SYNACK:
+ irdma_handle_synack_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_ACK:
+ err = irdma_handle_ack_pkt(cm_node, rbuf);
+ if (fin_set && !err)
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ case IRDMA_PKT_TYPE_RST:
+ irdma_handle_rst_pkt(cm_node, rbuf);
+ break;
+ default:
+ if (fin_set &&
+ (!irdma_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ }
+}
+
+/**
+ * irdma_make_listen_node - create a listen node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ */
+static struct irdma_cm_listener *
+irdma_make_listen_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info)
+{
+ struct irdma_cm_listener *listener;
+ unsigned long flags;
+
+ /* cannot have multiple matching listeners */
+ listener =
+ irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
+ cm_info->loc_port, cm_info->vlan_id,
+ IRDMA_CM_LISTENER_EITHER_STATE);
+ if (listener &&
+ listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
+ refcount_dec(&listener->refcnt);
+ return NULL;
+ }
+
+ if (!listener) {
+ /* create a CM listen node
+ * 1/2 node to compare incoming traffic to
+ */
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return NULL;
+ cm_core->stats_listen_nodes_created++;
+ memcpy(listener->loc_addr, cm_info->loc_addr,
+ sizeof(listener->loc_addr));
+ listener->loc_port = cm_info->loc_port;
+
+ INIT_LIST_HEAD(&listener->child_listen_list);
+
+ refcount_set(&listener->refcnt, 1);
+ } else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ listener->ipv4 = cm_info->ipv4;
+ listener->vlan_id = cm_info->vlan_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->iwdev = iwdev;
+
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = IRDMA_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+
+ return listener;
+}
+
+/**
+ * irdma_create_cm_node - make a connection node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @conn_param: connection parameters
+ * @cm_info: quad info for connection
+ * @caller_cm_node: pointer to cm_node structure to return
+ */
+static int irdma_create_cm_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct iw_cm_conn_param *conn_param,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_node **caller_cm_node)
+{
+ struct irdma_cm_node *cm_node;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
+ /* create a CM connection node */
+ cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL);
+ if (!cm_node)
+ return -ENOMEM;
+
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+
+ irdma_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
+ cm_node->pdata.size = private_data_len;
+ cm_node->pdata.addr = cm_node->pdata_buf;
+
+ memcpy(cm_node->pdata_buf, private_data, private_data_len);
+ *caller_cm_node = cm_node;
+
+ return 0;
+}
+
+/**
+ * irdma_cm_reject - reject and teardown a connection
+ * @cm_node: connection's node
+ * @pdata: ptr to private data for reject
+ * @plen: size of private data
+ */
+static int irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
+ u8 plen)
+{
+ int ret;
+ int passive_state;
+
+ if (cm_node->tcp_cntxt.client)
+ return 0;
+
+ irdma_cleanup_retrans_entry(cm_node);
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ ret = irdma_send_mpa_reject(cm_node, pdata, plen);
+ if (!ret)
+ return 0;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ if (irdma_send_reset(cm_node))
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: send reset failed\n");
+
+ return ret;
+}
+
+/**
+ * irdma_cm_close - close of cm connection
+ * @cm_node: connection's node
+ */
+static int irdma_cm_close(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_ACCEPTING:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSE_WAIT:
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ case IRDMA_CM_STATE_CLOSING:
+ return -EINVAL;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_INITED:
+ case IRDMA_CM_STATE_CLOSED:
+ case IRDMA_CM_STATE_LISTENER_DESTROYED:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ if (cm_node->send_entry)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: CM send_entry in OFFLOADED state\n");
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_receive_ilq - recv an ETHERNET packet, and process it
+ * through CM
+ * @vsi: VSI structure of dev
+ * @rbuf: receive buffer
+ */
+void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
+{
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_listener *listener;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ struct irdma_cm_info cm_info = {};
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct vlan_ethhdr *ethh;
+ u16 vtag;
+
+ /* if vlan, then maclen = 18 else 14 */
+ iph = (struct iphdr *)rbuf->iph;
+ print_hex_dump_debug("ILQ: RECEIVE ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, rbuf->mem.va, rbuf->totallen, false);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (rbuf->vlan_valid) {
+ vtag = rbuf->vlan_id;
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ } else {
+ ethh = rbuf->mem.va;
+
+ if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
+ vtag = ntohs(ethh->h_vlan_TCI);
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: vlan_id=%d\n", cm_info.vlan_id);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ }
+ tcph = (struct tcphdr *)rbuf->tcph;
+
+ if (rbuf->ipv4) {
+ cm_info.loc_addr[0] = ntohl(iph->daddr);
+ cm_info.rem_addr[0] = ntohl(iph->saddr);
+ cm_info.ipv4 = true;
+ cm_info.tos = iph->tos;
+ } else {
+ ip6h = (struct ipv6hdr *)rbuf->iph;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ cm_info.ipv4 = false;
+ cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
+ }
+ cm_info.loc_port = ntohs(tcph->dest);
+ cm_info.rem_port = ntohs(tcph->source);
+ cm_node = irdma_find_node(cm_core, cm_info.rem_port, cm_info.rem_addr,
+ cm_info.loc_port, cm_info.loc_addr, cm_info.vlan_id);
+
+ if (!cm_node) {
+ /* Only type of packet accepted are for the
+ * PASSIVE open (syn only)
+ */
+ if (!tcph->syn || tcph->ack)
+ return;
+
+ listener = irdma_find_listener(cm_core,
+ cm_info.loc_addr,
+ cm_info.ipv4,
+ cm_info.loc_port,
+ cm_info.vlan_id,
+ IRDMA_CM_LISTENER_ACTIVE_STATE);
+ if (!listener) {
+ cm_info.cm_id = NULL;
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: no listener found\n");
+ return;
+ }
+
+ cm_info.cm_id = listener->cm_id;
+ cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
+ listener);
+ if (!cm_node) {
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: allocate node failed\n");
+ refcount_dec(&listener->refcnt);
+ return;
+ }
+
+ if (!tcph->rst && !tcph->fin) {
+ cm_node->state = IRDMA_CM_STATE_LISTENING;
+ } else {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ refcount_inc(&cm_node->refcnt);
+ } else if (cm_node->state == IRDMA_CM_STATE_OFFLOADED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ irdma_process_pkt(cm_node, rbuf);
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+static int irdma_add_qh(struct irdma_cm_node *cm_node, bool active)
+{
+ if (!active)
+ irdma_add_conn_est_qh(cm_node);
+ return 0;
+}
+
+static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
+{
+}
+
+/**
+ * irdma_setup_cm_core - setup top level instance of a cm core
+ * @iwdev: iwarp device structure
+ * @rdma_ver: HW version
+ */
+int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+
+ cm_core->iwdev = iwdev;
+ cm_core->dev = &iwdev->rf->sc_dev;
+
+ /* Handles CM event work items send to Iwarp core */
+ cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
+ if (!cm_core->event_wq)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cm_core->listen_list);
+
+ timer_setup(&cm_core->tcp_timer, irdma_cm_timer_tick, 0);
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+ spin_lock_init(&cm_core->apbvt_lock);
+ switch (rdma_ver) {
+ case IRDMA_GEN_1:
+ cm_core->form_cm_frame = irdma_form_uda_cm_frame;
+ cm_core->cm_create_ah = irdma_add_qh;
+ cm_core->cm_free_ah = irdma_cm_free_ah_nop;
+ break;
+ case IRDMA_GEN_2:
+ default:
+ cm_core->form_cm_frame = irdma_form_ah_cm_frame;
+ cm_core->cm_create_ah = irdma_cm_create_ah;
+ cm_core->cm_free_ah = irdma_cm_free_ah;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_cleanup_cm_core - deallocate a top level instance of a
+ * cm core
+ * @cm_core: cm's core
+ */
+void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
+{
+ if (!cm_core)
+ return;
+
+ timer_delete_sync(&cm_core->tcp_timer);
+
+ destroy_workqueue(cm_core->event_wq);
+ cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
+}
+
+/**
+ * irdma_init_tcp_ctx - setup qp context
+ * @cm_node: connection's node
+ * @tcp_info: offload info for tcp
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_init_tcp_ctx(struct irdma_cm_node *cm_node,
+ struct irdma_tcp_offload_info *tcp_info,
+ struct irdma_qp *iwqp)
+{
+ tcp_info->ipv4 = cm_node->ipv4;
+ tcp_info->drop_ooo_seg = !iwqp->iwdev->iw_ooo;
+ tcp_info->wscale = true;
+ tcp_info->ignore_tcp_opt = true;
+ tcp_info->ignore_tcp_uns_opt = true;
+ tcp_info->no_nagle = false;
+
+ tcp_info->ttl = IRDMA_DEFAULT_TTL;
+ tcp_info->rtt_var = IRDMA_DEFAULT_RTT_VAR;
+ tcp_info->ss_thresh = IRDMA_DEFAULT_SS_THRESH;
+ tcp_info->rexmit_thresh = IRDMA_DEFAULT_REXMIT_THRESH;
+
+ tcp_info->tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
+ tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->snd_nxt = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ tcp_info->rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_max = cm_node->tcp_cntxt.loc_seq_num;
+
+ tcp_info->snd_una = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->cwnd = 2 * cm_node->tcp_cntxt.mss;
+ tcp_info->snd_wl1 = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_wl2 = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->max_snd_window = cm_node->tcp_cntxt.max_snd_wnd;
+ tcp_info->rcv_wnd = cm_node->tcp_cntxt.rcv_wnd
+ << cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->flow_label = 0;
+ tcp_info->snd_mss = (u32)cm_node->tcp_cntxt.mss;
+ tcp_info->tos = cm_node->tos;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ tcp_info->insert_vlan_tag = true;
+ tcp_info->vlan_tag = cm_node->vlan_id;
+ tcp_info->vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+ if (cm_node->ipv4) {
+ tcp_info->src_port = cm_node->loc_port;
+ tcp_info->dst_port = cm_node->rem_port;
+
+ tcp_info->dest_ip_addr[3] = cm_node->rem_addr[0];
+ tcp_info->local_ipaddr[3] = cm_node->loc_addr[0];
+ tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
+ &tcp_info->dest_ip_addr[3],
+ true, NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ tcp_info->src_port = cm_node->loc_port;
+ tcp_info->dst_port = cm_node->rem_port;
+ memcpy(tcp_info->dest_ip_addr, cm_node->rem_addr,
+ sizeof(tcp_info->dest_ip_addr));
+ memcpy(tcp_info->local_ipaddr, cm_node->loc_addr,
+ sizeof(tcp_info->local_ipaddr));
+
+ tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
+ &tcp_info->dest_ip_addr[0],
+ false, NULL,
+ IRDMA_ARP_RESOLVE);
+ }
+}
+
+/**
+ * irdma_cm_init_tsa_conn - setup qp for RTS
+ * @iwqp: associate qp for the connection
+ * @cm_node: connection's node
+ */
+static void irdma_cm_init_tsa_conn(struct irdma_qp *iwqp,
+ struct irdma_cm_node *cm_node)
+{
+ struct irdma_iwarp_offload_info *iwarp_info;
+ struct irdma_qp_host_ctx_info *ctx_info;
+
+ iwarp_info = &iwqp->iwarp_info;
+ ctx_info = &iwqp->ctx_info;
+
+ ctx_info->tcp_info = &iwqp->tcp_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ iwarp_info->ord_size = cm_node->ord_size;
+ iwarp_info->ird_size = cm_node->ird_size;
+ iwarp_info->rd_en = true;
+ iwarp_info->rdmap_ver = 1;
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
+
+ ctx_info->tcp_info_valid = true;
+ ctx_info->iwarp_info_valid = true;
+ ctx_info->user_pri = cm_node->user_pri;
+
+ irdma_init_tcp_ctx(cm_node, &iwqp->tcp_info, iwqp);
+ if (cm_node->snd_mark_en) {
+ iwarp_info->snd_mark_en = true;
+ iwarp_info->snd_mark_offset = (iwqp->tcp_info.snd_nxt & SNDMARKER_SEQNMASK) +
+ cm_node->lsmm_size;
+ }
+
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ iwqp->tcp_info.tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ iwqp->tcp_info.src_mac_addr_idx = iwqp->iwdev->mac_ip_table_idx;
+
+ if (cm_node->rcv_mark_en) {
+ iwarp_info->rcv_mark_en = true;
+ iwarp_info->align_hdrs = true;
+ }
+
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+
+ /* once tcp_info is set, no need to do it again */
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+}
+
+/**
+ * irdma_cm_disconn - when a connection is being closed
+ * @iwqp: associated qp for the connection
+ */
+void irdma_cm_disconn(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct disconn_work *work;
+ unsigned long flags;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: qp_id %d is already freed\n",
+ iwqp->ibqp.qp_num);
+ kfree(work);
+ return;
+ }
+ irdma_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+
+ work->iwqp = iwqp;
+ INIT_WORK(&work->work, irdma_disconnect_worker);
+ queue_work(iwdev->cleanup_wq, &work->work);
+}
+
+/**
+ * irdma_qp_disconnect - free qp and close cm
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_qp_disconnect(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+
+ iwqp->active_conn = 0;
+ /* close the CM node down if it is still active */
+ ibdev_dbg(&iwdev->ibdev, "CM: Call close API\n");
+ irdma_cm_close(iwqp->cm_node);
+}
+
+/**
+ * irdma_cm_disconn_true - called by worker thread to disconnect qp
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
+{
+ struct iw_cm_id *cm_id;
+ struct irdma_device *iwdev;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ int disconn_status = 0;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ unsigned long flags;
+ int err;
+
+ iwdev = iwqp->iwdev;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ struct ib_qp_attr attr;
+
+ if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ return;
+ }
+
+ cm_id = iwqp->cm_id;
+ original_hw_tcp_state = iwqp->hw_tcp_state;
+ original_ibqp_state = iwqp->ibqp_state;
+ last_ae = iwqp->last_aeq;
+
+ if (qp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ irdma_terminate_del_timer(qp);
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == IRDMA_AE_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == IRDMA_AE_LLP_CONNECTION_RESET)
+ disconn_status = -ECONNRESET;
+ }
+
+ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
+ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
+ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
+ last_ae == IRDMA_AE_BAD_CLOSE ||
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ qp->term_flags = 0;
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) {
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ |
+ IRDMA_FLUSH_WAIT);
+
+ if (qp->term_flags)
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ }
+
+ if (!cm_id || !cm_id->event_handler)
+ return;
+
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+ if (!iwqp->cm_node) {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ return;
+ }
+ refcount_inc(&iwqp->cm_node->refcnt);
+
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+
+ if (issue_disconn) {
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_DISCONNECT,
+ disconn_status);
+ if (err)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: disconnect event failed: - cm_id = %p\n",
+ cm_id);
+ }
+ if (issue_close) {
+ cm_id->provider_data = iwqp;
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_CLOSE, 0);
+ if (err)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: close event failed: - cm_id = %p\n",
+ cm_id);
+ irdma_qp_disconnect(iwqp);
+ }
+ irdma_rem_ref_cm_node(iwqp->cm_node);
+}
+
+/**
+ * irdma_disconnect_worker - worker for connection close
+ * @work: points or disconn structure
+ */
+static void irdma_disconnect_worker(struct work_struct *work)
+{
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct irdma_qp *iwqp = dwork->iwqp;
+
+ kfree(dwork);
+ irdma_cm_disconn_true(iwqp);
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_free_lsmm_rsrc - free lsmm memory and deregister
+ * @iwqp: associate qp for the connection
+ */
+void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = iwqp->iwdev;
+
+ if (iwqp->ietf_mem.va) {
+ if (iwqp->lsmm_mr)
+ iwdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr, NULL);
+ dma_free_coherent(iwdev->rf->sc_dev.hw->device,
+ iwqp->ietf_mem.size, iwqp->ietf_mem.va,
+ iwqp->ietf_mem.pa);
+ iwqp->ietf_mem.va = NULL;
+ }
+}
+
+/**
+ * irdma_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+ * @conn_param: accept parameters
+ */
+int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_node *cm_node;
+ struct ib_qp_attr attr = {};
+ int passive_state;
+ struct ib_mr *ibmr;
+ struct irdma_pd *iwpd;
+ u16 buf_len = 0;
+ struct irdma_kmem_info accept;
+ u64 tagged_offset;
+ int wait_ret;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ iwqp = to_iwqp(ibqp);
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ cm_node = cm_id->provider_data;
+
+ if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
+ cm_node->ipv4 = true;
+ cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr);
+ } else {
+ cm_node->ipv4 = false;
+ irdma_get_vlan_mac_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
+ NULL);
+ }
+ ibdev_dbg(&iwdev->ibdev, "CM: Accept vlan_id=%d\n",
+ cm_node->vlan_id);
+
+ trace_irdma_accept(cm_node, 0, NULL);
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ ret = -ECONNRESET;
+ goto error;
+ }
+
+ buf_len = conn_param->private_data_len + IRDMA_MAX_IETF_SIZE;
+ iwqp->ietf_mem.size = ALIGN(buf_len, 1);
+ iwqp->ietf_mem.va = dma_alloc_coherent(dev->hw->device,
+ iwqp->ietf_mem.size,
+ &iwqp->ietf_mem.pa, GFP_KERNEL);
+ if (!iwqp->ietf_mem.va) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ cm_node->pdata.size = conn_param->private_data_len;
+ accept.addr = iwqp->ietf_mem.va;
+ accept.size = irdma_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
+ memcpy((u8 *)accept.addr + accept.size, conn_param->private_data,
+ conn_param->private_data_len);
+
+ if (cm_node->dev->ws_add(iwqp->sc_qp.vsi, cm_node->user_pri)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iwqp->sc_qp.user_pri = cm_node->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ iwpd = iwqp->iwpd;
+ tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
+ ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len,
+ IB_ACCESS_LOCAL_WRITE, &tagged_offset);
+ if (IS_ERR(ibmr)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ibmr->pd = &iwpd->ibpd;
+ ibmr->device = iwpd->ibpd.device;
+ iwqp->lsmm_mr = ibmr;
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+
+ cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ irdma_sc_send_lsmm(&iwqp->sc_qp, iwqp->ietf_mem.va, cm_node->lsmm_size,
+ ibmr->lkey);
+
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ iwqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ cm_id->provider_data = iwqp;
+ iwqp->active_conn = 0;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ cm_node->cm_core->cm_free_ah(cm_node);
+
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ ret = -ECONNRESET;
+ goto error;
+ }
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+
+ if (cm_node->accept_pend) {
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+ cm_node->cm_core->stats_accepts++;
+
+ return 0;
+error:
+ irdma_free_lsmm_rsrc(iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+
+ return ret;
+}
+
+/**
+ * irdma_reject - registered call for connection to be rejected
+ * @cm_id: cm information for passive connection
+ * @pdata: private data to be sent
+ * @pdata_len: private data length
+ */
+int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+
+ cm_node = cm_id->provider_data;
+ cm_node->pdata.size = pdata_len;
+
+ trace_irdma_reject(cm_node, 0, NULL);
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ cm_node->cm_core->stats_rejects++;
+
+ if (pdata_len + sizeof(struct ietf_mpa_v2) > IRDMA_MAX_CM_BUF)
+ return -EINVAL;
+
+ return irdma_cm_reject(cm_node, pdata, pdata_len);
+}
+
+/**
+ * irdma_connect - registered call for connection to be established
+ * @cm_id: cm information for passive connection
+ * @conn_param: Information about the connection
+ */
+int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_info cm_info;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ iwqp = to_iwqp(ibqp);
+ if (!iwqp)
+ return -EINVAL;
+ iwdev = iwqp->iwdev;
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ if (!(laddr->sin_port) || !(raddr->sin_port))
+ return -EINVAL;
+
+ iwqp->active_conn = 1;
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = iwqp;
+
+ /* set up the connection params for the node */
+ if (cm_id->remote_addr.ss_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
+ memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+ cm_info.rem_port = ntohs(raddr->sin_port);
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ raddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ cm_info.rem_port = ntohs(raddr6->sin6_port);
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
+ NULL);
+ }
+ cm_info.cm_id = cm_id;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+ cm_info.tos = cm_id->tos;
+ if (iwdev->vsi.dscp_mode) {
+ cm_info.user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
+ } else {
+ cm_info.user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ }
+
+ if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = cm_info.user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ ibdev_dbg(&iwdev->ibdev, "DCB: TOS:[%d] UP:[%d]\n", cm_id->tos,
+ cm_info.user_pri);
+
+ trace_irdma_dcb_tos(iwdev, cm_id->tos, cm_info.user_pri);
+
+ ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info,
+ &cm_node);
+ if (ret)
+ return ret;
+ ret = cm_node->cm_core->cm_create_ah(cm_node, true);
+ if (ret)
+ goto err;
+ if (irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ cm_node->qhash_set = true;
+
+ cm_node->apbvt_entry = irdma_add_apbvt(iwdev, cm_info.loc_port);
+ if (!cm_node->apbvt_entry) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cm_node->apbvt_set = true;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ iwqp->cm_id = cm_id;
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ ret = irdma_send_syn(cm_node, 0);
+ if (ret)
+ goto err;
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+
+ trace_irdma_connect(cm_node, 0, NULL);
+
+ return 0;
+
+err:
+ if (cm_info.ipv4)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: connect() FAILED: dest addr=%pI4",
+ cm_info.rem_addr);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: connect() FAILED: dest addr=%pI6",
+ cm_info.rem_addr);
+ irdma_rem_ref_cm_node(cm_node);
+ iwdev->cm_core.stats_connect_errs++;
+
+ return ret;
+}
+
+/**
+ * irdma_create_listen - registered call creating listener
+ * @cm_id: cm information for passive connection
+ * @backlog: to max accept pending count
+ */
+int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_listener *cm_listen_node;
+ struct irdma_cm_info cm_info = {};
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ bool wildcard = false;
+ int err;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+
+ if (laddr->sin_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+
+ if (laddr->sin_addr.s_addr != htonl(INADDR_ANY)) {
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) {
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id, NULL);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ }
+
+ if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ cm_info.vlan_id = 0;
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ trace_irdma_create_listen(iwdev, &cm_info);
+
+ cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev,
+ &cm_info);
+ if (!cm_listen_node) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: cm_listen_node == NULL\n");
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_listen_node;
+
+ cm_listen_node->tos = cm_id->tos;
+ if (iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
+ else
+ cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = cm_listen_node->user_pri;
+ if (!cm_listen_node->reused_node) {
+ if (wildcard) {
+ err = irdma_add_mqh(iwdev, &cm_info, cm_listen_node);
+ if (err)
+ goto error;
+ } else {
+ if (!iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ cm_info.user_pri = cm_listen_node->user_pri;
+ err = irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (err)
+ goto error;
+
+ cm_listen_node->qhash_set = true;
+ }
+
+ cm_listen_node->apbvt_entry = irdma_add_apbvt(iwdev,
+ cm_info.loc_port);
+ if (!cm_listen_node->apbvt_entry)
+ goto error;
+ }
+ cm_id->add_ref(cm_id);
+ cm_listen_node->cm_core->stats_listen_created++;
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
+ cm_listen_node->loc_port, cm_listen_node->loc_addr,
+ cm_listen_node, cm_listen_node->cm_id,
+ cm_listen_node->qhash_set, cm_listen_node->vlan_id);
+
+ return 0;
+
+error:
+
+ irdma_cm_del_listen(&iwdev->cm_core, cm_listen_node, false);
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_destroy_listen - registered call to destroy listener
+ * @cm_id: cm information for passive connection
+ */
+int irdma_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (cm_id->provider_data)
+ irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data,
+ true);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+/**
+ * irdma_teardown_list_prep - add conn nodes slated for tear down to list
+ * @cm_core: cm's core
+ * @teardown_list: a list to which cm_node will be selected
+ * @ipaddr: pointer to ip address
+ * @nfo: pointer to cm_info structure instance
+ * @disconnect_all: flag indicating disconnect all QPs
+ */
+static void irdma_teardown_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *teardown_list,
+ u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->teardown_entry, teardown_list);
+ }
+}
+
+/**
+ * irdma_cm_event_connected - handle connected active node
+ * @event: the info for cm_node of connection
+ */
+static void irdma_cm_event_connected(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_sc_dev *dev;
+ struct ib_qp_attr attr = {};
+ struct iw_cm_id *cm_id;
+ int status;
+ bool read0;
+ int wait_ret = 0;
+
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ iwqp = cm_id->provider_data;
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ if (iwqp->sc_qp.qp_uk.destroy_pending) {
+ status = -ETIMEDOUT;
+ goto error;
+ }
+
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+ irdma_sc_send_rtt(&iwqp->sc_qp, read0);
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+ cm_node->cm_core->cm_free_ah(cm_node);
+ return;
+
+error:
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ status);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_cm_event_reset - handle reset
+ * @event: the info for cm_node of connection
+ */
+static void irdma_cm_event_reset(struct irdma_cm_event *event)
+{
+ struct irdma_cm_node *cm_node = event->cm_node;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct irdma_qp *iwqp;
+
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+ if (!iwqp)
+ return;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: reset event %p - cm_id = %p\n", event->cm_node, cm_id);
+ iwqp->cm_id = NULL;
+
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT,
+ -ECONNRESET);
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
+}
+
+/**
+ * irdma_cm_event_handler - send event to cm upper layer
+ * @work: pointer of cm event info.
+ */
+static void irdma_cm_event_handler(struct work_struct *work)
+{
+ struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work);
+ struct irdma_cm_node *cm_node;
+
+ if (!event || !event->cm_node || !event->cm_node->cm_core)
+ return;
+
+ cm_node = event->cm_node;
+ trace_irdma_cm_event_handler(cm_node, event->type, NULL);
+
+ switch (event->type) {
+ case IRDMA_CM_EVENT_MPA_REQ:
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REQUEST, 0);
+ break;
+ case IRDMA_CM_EVENT_RESET:
+ irdma_cm_event_reset(event);
+ break;
+ case IRDMA_CM_EVENT_CONNECTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state != IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_cm_event_connected(event);
+ break;
+ case IRDMA_CM_EVENT_MPA_REJECT:
+ if (!event->cm_node->cm_id ||
+ cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ break;
+ case IRDMA_CM_EVENT_ABORTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_event_connect_error(event);
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: bad event type = %d\n", event->type);
+ break;
+ }
+
+ irdma_rem_ref_cm_node(event->cm_node);
+ kfree(event);
+}
+
+/**
+ * irdma_cm_post_event - queue event request for worker thread
+ * @event: cm node's info for up event call
+ */
+static void irdma_cm_post_event(struct irdma_cm_event *event)
+{
+ refcount_inc(&event->cm_node->refcnt);
+ INIT_WORK(&event->event_work, irdma_cm_event_handler);
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+}
+
+/**
+ * irdma_cm_teardown_connections - teardown QPs
+ * @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @nfo: Connection info
+ * @disconnect_all: flag indicating disconnect all QPs
+ *
+ * teardown QPs where source or destination addr matches ip addr
+ */
+void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_node *cm_node;
+ struct list_head teardown_list;
+ struct ib_qp_attr attr;
+
+ INIT_LIST_HEAD(&teardown_list);
+
+ rcu_read_lock();
+ irdma_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all);
+ rcu_read_unlock();
+
+ list_for_each_safe (list_node, list_core_temp, &teardown_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ teardown_entry);
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (iwdev->rf->reset)
+ irdma_cm_disconn(cm_node->iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+}
+
+/**
+ * irdma_qhash_ctrl - enable/disable qhash for list
+ * @iwdev: device pointer
+ * @parent_listen_node: parent listen node
+ * @nfo: cm info node
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ *
+ * Enables or disables the qhash for the node in the child
+ * listen list that matches ipaddr. If no matching IP was found
+ * it will allocate and add a new child listen node to the
+ * parent listen node. The listen_list_lock is assumed to be
+ * held when called.
+ */
+static void irdma_qhash_ctrl(struct irdma_device *iwdev,
+ struct irdma_cm_listener *parent_listen_node,
+ struct irdma_cm_info *nfo, u32 *ipaddr, bool ipv4,
+ bool ifup)
+{
+ struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
+ struct irdma_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ bool node_allocated = false;
+ enum irdma_quad_hash_manage_type op = ifup ?
+ IRDMA_QHASH_MANAGE_TYPE_ADD :
+ IRDMA_QHASH_MANAGE_TYPE_DELETE;
+ int err;
+
+ list_for_each_safe (pos, tpos, child_listen_list) {
+ child_listen_node = list_entry(pos, struct irdma_cm_listener,
+ child_listen_list);
+ if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
+ goto set_qhash;
+ }
+
+ /* if not found then add a child listener if interface is going up */
+ if (!ifup)
+ return;
+ child_listen_node = kmemdup(parent_listen_node,
+ sizeof(*child_listen_node), GFP_ATOMIC);
+ if (!child_listen_node)
+ return;
+
+ node_allocated = true;
+ memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
+
+set_qhash:
+ memcpy(nfo->loc_addr, child_listen_node->loc_addr,
+ sizeof(nfo->loc_addr));
+ nfo->vlan_id = child_listen_node->vlan_id;
+ err = irdma_manage_qhash(iwdev, nfo, IRDMA_QHASH_TYPE_TCP_SYN, op, NULL,
+ false);
+ if (!err) {
+ child_listen_node->qhash_set = ifup;
+ if (node_allocated)
+ list_add(&child_listen_node->child_listen_list,
+ &parent_listen_node->child_listen_list);
+ } else if (node_allocated) {
+ kfree(child_listen_node);
+ }
+}
+
+/**
+ * irdma_if_notify - process an ifdown on an interface
+ * @iwdev: device pointer
+ * @netdev: network device structure
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ */
+void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+ struct irdma_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ struct irdma_cm_info nfo = {};
+ u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ enum irdma_quad_hash_manage_type op = ifup ?
+ IRDMA_QHASH_MANAGE_TYPE_ADD :
+ IRDMA_QHASH_MANAGE_TYPE_DELETE;
+
+ nfo.vlan_id = vlan_id;
+ nfo.ipv4 = ipv4;
+ nfo.qh_qpid = 1;
+
+ /* Disable or enable qhash for listeners */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ if (vlan_id != listen_node->vlan_id ||
+ (memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) &&
+ memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16)))
+ continue;
+
+ memcpy(nfo.loc_addr, listen_node->loc_addr,
+ sizeof(nfo.loc_addr));
+ nfo.loc_port = listen_node->loc_port;
+ nfo.user_pri = listen_node->user_pri;
+ if (!list_empty(&listen_node->child_listen_list)) {
+ irdma_qhash_ctrl(iwdev, listen_node, &nfo, ipaddr, ipv4,
+ ifup);
+ } else if (memcmp(listen_node->loc_addr, ip_zero,
+ ipv4 ? 4 : 16)) {
+ if (!irdma_manage_qhash(iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_SYN, op,
+ NULL, false))
+ listen_node->qhash_set = ifup;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ /* disconnect any connected qp's on ifdown */
+ if (!ifup)
+ irdma_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
+}
diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
new file mode 100644
index 000000000000..48ee285cf745
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/cm.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_CM_H
+#define IRDMA_CM_H
+
+#define IRDMA_MPA_REQUEST_ACCEPT 1
+#define IRDMA_MPA_REQUEST_REJECT 2
+
+/* IETF MPA -- defines */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VER 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
+#define IETF_RTR_MSG_SIZE 4
+#define IETF_MPA_V2_FLAG 0x10
+#define SNDMARKER_SEQNMASK 0x000001ff
+#define IRDMA_MAX_IETF_SIZE 32
+
+/* IETF RTR MSG Fields */
+#define IETF_PEER_TO_PEER 0x8000
+#define IETF_FLPDU_ZERO_LEN 0x4000
+#define IETF_RDMA0_WRITE 0x8000
+#define IETF_RDMA0_READ 0x4000
+#define IETF_NO_IRD_ORD 0x3fff
+
+#define MAX_PORTS 65536
+
+#define IRDMA_PASSIVE_STATE_INDICATED 0
+#define IRDMA_DO_NOT_SEND_RESET_EVENT 1
+#define IRDMA_SEND_RESET_EVENT 2
+
+#define MAX_IRDMA_IFS 4
+
+#define SET_ACK 1
+#define SET_SYN 2
+#define SET_FIN 4
+#define SET_RST 8
+
+#define TCP_OPTIONS_PADDING 3
+
+#define IRDMA_DEFAULT_RETRYS 64
+#define IRDMA_DEFAULT_RETRANS 32
+#define IRDMA_DEFAULT_TTL 0x40
+#define IRDMA_DEFAULT_RTT_VAR 6
+#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
+#define IRDMA_DEFAULT_REXMIT_THRESH 8
+
+#define IRDMA_RETRY_TIMEOUT HZ
+#define IRDMA_SHORT_TIME 10
+#define IRDMA_LONG_TIME (2 * HZ)
+#define IRDMA_MAX_TIMEOUT ((unsigned long)(12 * HZ))
+
+#define IRDMA_CM_HASHTABLE_SIZE 1024
+#define IRDMA_CM_TCP_TIMER_INTERVAL 3000
+#define IRDMA_CM_DEFAULT_MTU 1540
+#define IRDMA_CM_DEFAULT_FRAME_CNT 10
+#define IRDMA_CM_THREAD_STACK_SIZE 256
+#define IRDMA_CM_DEFAULT_RCV_WND 64240
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALE 2
+#define IRDMA_CM_DEFAULT_FREE_PKTS 10
+#define IRDMA_CM_FREE_PKT_LO_WATERMARK 2
+#define IRDMA_CM_DEFAULT_MSS 536
+#define IRDMA_CM_DEFAULT_MPA_VER 2
+#define IRDMA_CM_DEFAULT_SEQ 0x159bf75f
+#define IRDMA_CM_DEFAULT_LOCAL_ID 0x3b47
+#define IRDMA_CM_DEFAULT_SEQ2 0x18ed5740
+#define IRDMA_CM_DEFAULT_LOCAL_ID2 0xb807
+#define IRDMA_MAX_CM_BUF (IRDMA_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_REJECT = 0x20,
+ IETF_MPA_FLAGS_CRC = 0x40,
+ IETF_MPA_FLAGS_MARKERS = 0x80,
+};
+
+enum irdma_timer_type {
+ IRDMA_TIMER_TYPE_SEND,
+ IRDMA_TIMER_TYPE_CLOSE,
+};
+
+enum option_nums {
+ OPTION_NUM_EOL,
+ OPTION_NUM_NONE,
+ OPTION_NUM_MSS,
+ OPTION_NUM_WINDOW_SCALE,
+ OPTION_NUM_SACK_PERM,
+ OPTION_NUM_SACK,
+ OPTION_NUM_WRITE0 = 0xbc,
+};
+
+/* cm node transition states */
+enum irdma_cm_node_state {
+ IRDMA_CM_STATE_UNKNOWN,
+ IRDMA_CM_STATE_INITED,
+ IRDMA_CM_STATE_LISTENING,
+ IRDMA_CM_STATE_SYN_RCVD,
+ IRDMA_CM_STATE_SYN_SENT,
+ IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED,
+ IRDMA_CM_STATE_ESTABLISHED,
+ IRDMA_CM_STATE_ACCEPTING,
+ IRDMA_CM_STATE_MPAREQ_SENT,
+ IRDMA_CM_STATE_MPAREQ_RCVD,
+ IRDMA_CM_STATE_MPAREJ_RCVD,
+ IRDMA_CM_STATE_OFFLOADED,
+ IRDMA_CM_STATE_FIN_WAIT1,
+ IRDMA_CM_STATE_FIN_WAIT2,
+ IRDMA_CM_STATE_CLOSE_WAIT,
+ IRDMA_CM_STATE_TIME_WAIT,
+ IRDMA_CM_STATE_LAST_ACK,
+ IRDMA_CM_STATE_CLOSING,
+ IRDMA_CM_STATE_LISTENER_DESTROYED,
+ IRDMA_CM_STATE_CLOSED,
+};
+
+enum mpa_frame_ver {
+ IETF_MPA_V1 = 1,
+ IETF_MPA_V2 = 2,
+};
+
+enum mpa_frame_key {
+ MPA_KEY_REQUEST,
+ MPA_KEY_REPLY,
+};
+
+enum send_rdma0 {
+ SEND_RDMA_READ_ZERO = 1,
+ SEND_RDMA_WRITE_ZERO = 2,
+};
+
+enum irdma_tcpip_pkt_type {
+ IRDMA_PKT_TYPE_UNKNOWN,
+ IRDMA_PKT_TYPE_SYN,
+ IRDMA_PKT_TYPE_SYNACK,
+ IRDMA_PKT_TYPE_ACK,
+ IRDMA_PKT_TYPE_FIN,
+ IRDMA_PKT_TYPE_RST,
+};
+
+enum irdma_cm_listener_state {
+ IRDMA_CM_LISTENER_PASSIVE_STATE = 1,
+ IRDMA_CM_LISTENER_ACTIVE_STATE = 2,
+ IRDMA_CM_LISTENER_EITHER_STATE = 3,
+};
+
+/* CM event codes */
+enum irdma_cm_event_type {
+ IRDMA_CM_EVENT_UNKNOWN,
+ IRDMA_CM_EVENT_ESTABLISHED,
+ IRDMA_CM_EVENT_MPA_REQ,
+ IRDMA_CM_EVENT_MPA_CONNECT,
+ IRDMA_CM_EVENT_MPA_ACCEPT,
+ IRDMA_CM_EVENT_MPA_REJECT,
+ IRDMA_CM_EVENT_MPA_ESTABLISHED,
+ IRDMA_CM_EVENT_CONNECTED,
+ IRDMA_CM_EVENT_RESET,
+ IRDMA_CM_EVENT_ABORTED,
+};
+
+struct ietf_mpa_v1 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[];
+};
+
+struct ietf_rtr_msg {
+ __be16 ctrl_ird;
+ __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ struct ietf_rtr_msg rtr_msg;
+ u8 priv_data[];
+};
+
+struct option_base {
+ u8 optionnum;
+ u8 len;
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 len;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 len;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char eol;
+ struct option_base base;
+ struct option_mss mss;
+ struct option_windowscale windowscale;
+};
+
+struct irdma_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct irdma_puda_buf *sqbuf;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 send_retrans;
+ int close_when_complete;
+};
+
+/* CM context params */
+struct irdma_cm_tcp_context {
+ u8 client;
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+ u32 loc_id;
+ u32 rem_id;
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+};
+
+struct irdma_apbvt_entry {
+ struct hlist_node hlist;
+ u32 use_cnt;
+ u16 port;
+};
+
+struct irdma_cm_listener {
+ struct list_head list;
+ struct iw_cm_id *cm_id;
+ struct irdma_cm_core *cm_core;
+ struct irdma_device *iwdev;
+ struct list_head child_listen_list;
+ struct irdma_apbvt_entry *apbvt_entry;
+ enum irdma_cm_listener_state listener_state;
+ refcount_t refcnt;
+ atomic_t pend_accepts_cnt;
+ u32 loc_addr[4];
+ u32 reused_node;
+ int backlog;
+ u16 loc_port;
+ u16 vlan_id;
+ u8 loc_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool qhash_set:1;
+ bool ipv4:1;
+};
+
+struct irdma_kmem_info {
+ void *addr;
+ u32 size;
+};
+
+struct irdma_mpa_priv_info {
+ const void *addr;
+ u32 size;
+};
+
+struct irdma_cm_node {
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_tcp_context tcp_cntxt;
+ struct irdma_cm_core *cm_core;
+ struct irdma_timer_entry *send_entry;
+ struct irdma_timer_entry *close_entry;
+ struct irdma_cm_listener *listener;
+ struct list_head timer_entry;
+ struct list_head reset_entry;
+ struct list_head teardown_entry;
+ struct irdma_apbvt_entry *apbvt_entry;
+ struct rcu_head rcu_head;
+ struct irdma_mpa_priv_info pdata;
+ struct irdma_sc_ah *ah;
+ union {
+ struct ietf_mpa_v1 mpa_frame;
+ struct ietf_mpa_v2 mpa_v2_frame;
+ };
+ struct irdma_kmem_info mpa_hdr;
+ struct iw_cm_id *cm_id;
+ struct hlist_node list;
+ struct completion establish_comp;
+ spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/
+ atomic_t passive_state;
+ refcount_t refcnt;
+ enum irdma_cm_node_state state;
+ enum send_rdma0 send_rdma0_op;
+ enum mpa_frame_ver mpa_frame_rev;
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ int apbvt_set;
+ int accept_pend;
+ u16 vlan_id;
+ u16 ird_size;
+ u16 ord_size;
+ u16 mpav2_ird_ord;
+ u16 lsmm_size;
+ u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool ack_rcvd:1;
+ bool qhash_set:1;
+ bool ipv4:1;
+ bool snd_mark_en:1;
+ bool rcv_mark_en:1;
+ bool do_lpb:1;
+ bool accelerated:1;
+};
+
+/* Used by internal CM APIs to pass CM information*/
+struct irdma_cm_info {
+ struct iw_cm_id *cm_id;
+ u16 loc_port;
+ u16 rem_port;
+ u32 loc_addr[4];
+ u32 rem_addr[4];
+ u32 qh_qpid;
+ u16 vlan_id;
+ int backlog;
+ u8 user_pri;
+ u8 tos;
+ bool ipv4;
+};
+
+struct irdma_cm_event {
+ enum irdma_cm_event_type type;
+ struct irdma_cm_info cm_info;
+ struct work_struct event_work;
+ struct irdma_cm_node *cm_node;
+};
+
+struct irdma_cm_core {
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct list_head listen_list;
+ DECLARE_HASHTABLE(cm_hash_tbl, 8);
+ DECLARE_HASHTABLE(apbvt_hash_tbl, 8);
+ struct timer_list tcp_timer;
+ struct workqueue_struct *event_wq;
+ spinlock_t ht_lock; /* protect CM node (active side) list */
+ spinlock_t listen_list_lock; /* protect listener list */
+ spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/
+ u64 stats_nodes_created;
+ u64 stats_nodes_destroyed;
+ u64 stats_listen_created;
+ u64 stats_listen_destroyed;
+ u64 stats_listen_nodes_created;
+ u64 stats_listen_nodes_destroyed;
+ u64 stats_lpbs;
+ u64 stats_accepts;
+ u64 stats_rejects;
+ u64 stats_connect_errs;
+ u64 stats_passive_errs;
+ u64 stats_pkt_retrans;
+ u64 stats_backlog_drops;
+ struct irdma_puda_buf *(*form_cm_frame)(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags);
+ int (*cm_create_ah)(struct irdma_cm_node *cm_node, bool wait);
+ void (*cm_free_ah)(struct irdma_cm_node *cm_node);
+};
+
+int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete);
+
+static inline u8 irdma_tos2dscp(u8 tos)
+{
+#define IRDMA_DSCP_VAL GENMASK(7, 2)
+ return (u8)FIELD_GET(IRDMA_DSCP_VAL, tos);
+}
+
+int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
+int irdma_destroy_listen(struct iw_cm_id *cm_id);
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac);
+void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all);
+int irdma_cm_start(struct irdma_device *dev);
+int irdma_cm_stop(struct irdma_device *dev);
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
+bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
+int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
+ const u8 *mac_addr, u32 action);
+void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup);
+bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
+void irdma_send_ack(struct irdma_cm_node *cm_node);
+void irdma_lpb_nop(struct irdma_sc_qp *qp);
+void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node);
+void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node);
+#endif /* IRDMA_CM_H */
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
new file mode 100644
index 000000000000..4ef1c29032f7
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -0,0 +1,6697 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include <linux/etherdevice.h>
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "ws.h"
+#include "protos.h"
+
+/**
+ * irdma_get_qp_from_list - get next qp from a list
+ * @head: Listhead of qp's
+ * @qp: current qp
+ */
+struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
+ struct irdma_sc_qp *qp)
+{
+ struct list_head *lastentry;
+ struct list_head *entry = NULL;
+
+ if (list_empty(head))
+ return NULL;
+
+ if (!qp) {
+ entry = head->next;
+ } else {
+ lastentry = &qp->list;
+ entry = lastentry->next;
+ if (entry == head)
+ return NULL;
+ }
+
+ return container_of(entry, struct irdma_sc_qp, list);
+}
+
+/**
+ * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
+ * @vsi: the VSI struct pointer
+ * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
+ */
+void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
+{
+ struct irdma_sc_qp *qp = NULL;
+ u8 i;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_lock(&vsi->qos[i].qos_mutex);
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ while (qp) {
+ if (op == IRDMA_OP_RESUME) {
+ if (!qp->dev->ws_add(vsi, i)) {
+ qp->qs_handle =
+ vsi->qos[qp->user_pri].qs_handle;
+ irdma_cqp_qp_suspend_resume(qp, op);
+ } else {
+ irdma_cqp_qp_suspend_resume(qp, op);
+ irdma_modify_qp_to_err(qp);
+ }
+ } else if (op == IRDMA_OP_SUSPEND) {
+ /* issue cqp suspend command */
+ if (!irdma_cqp_qp_suspend_resume(qp, op))
+ atomic_inc(&vsi->qp_suspend_reqs);
+ }
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ }
+ mutex_unlock(&vsi->qos[i].qos_mutex);
+ }
+}
+
+static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2p)
+{
+ u8 i;
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ vsi->qos[i].qs_handle = vsi->dev->qos[i].qs_handle;
+ vsi->qos[i].valid = true;
+ }
+
+ return;
+ }
+ vsi->qos_rel_bw = l2p->vsi_rel_bw;
+ vsi->qos_prio_type = l2p->vsi_prio_type;
+ vsi->dscp_mode = l2p->dscp_mode;
+ if (l2p->dscp_mode) {
+ memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ l2p->up2tc[i] = i;
+ }
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
+ vsi->qos[i].traffic_class = l2p->up2tc[i];
+ vsi->qos[i].rel_bw =
+ l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
+ vsi->qos[i].prio_type =
+ l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
+ vsi->qos[i].valid = false;
+ }
+}
+
+/**
+ * irdma_change_l2params - given the new l2 parameters, change all qp
+ * @vsi: RDMA VSI pointer
+ * @l2params: New parameters from l2
+ */
+void irdma_change_l2params(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params)
+{
+ if (l2params->mtu_changed) {
+ vsi->mtu = l2params->mtu;
+ if (vsi->ieq)
+ irdma_reinitialize_ieq(vsi);
+ }
+
+ if (!l2params->tc_changed)
+ return;
+
+ vsi->tc_change_pending = false;
+ irdma_set_qos_info(vsi, l2params);
+ irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
+}
+
+/**
+ * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
+ * @qp: qp to be removed from qos
+ */
+void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (qp->on_qoslist) {
+ qp->on_qoslist = false;
+ list_del(&qp->list);
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_qp_add_qos - called during setctx for qp to be added to qos
+ * @qp: qp to be added to qos
+ */
+void irdma_qp_add_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (!qp->on_qoslist) {
+ list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+ qp->on_qoslist = true;
+ qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_sc_pd_init - initialize sc pd struct
+ * @dev: sc device struct
+ * @pd: sc pd ptr
+ * @pd_id: pd_id for allocated pd
+ * @abi_ver: User/Kernel ABI version
+ */
+void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
+ int abi_ver)
+{
+ pd->pd_id = pd_id;
+ pd->abi_ver = abi_ver;
+ pd->dev = dev;
+}
+
+/**
+ * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
+ * @cqp: struct for cqp hw
+ * @info: arp entry information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_add_arp_cache_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 8, info->reach_max);
+ set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
+
+ hdr = info->arp_index |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_del_arp_cache_entry - dele arp cache entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 arp_index, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ hdr = arp_index |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
+ * @cqp: struct for cqp hw
+ * @info: info for apbvt entry to add or delete
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_apbvt_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, info->port);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
+ FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_qhash_table_entry - manage quad hash entries
+ * @cqp: struct for cqp hw
+ * @info: info for quad hash to manage
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ *
+ * This is called before connection establishment is started.
+ * For passive connections, when listener is created, it will
+ * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
+ * ip address and tcp port. When SYN is received (passive
+ * connections) or sent (active connections), this routine is
+ * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
+ * and quad is passed in info.
+ *
+ * When iwarp connection is done and its state moves to RTS, the
+ * quad hash entry in the hardware will point to iwarp's qp
+ * number and requires no calls from the driver.
+ */
+static int
+irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_qhash_table_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 qw1 = 0;
+ u64 qw2 = 0;
+ u64 temp;
+ struct irdma_sc_vsi *vsi = info->vsi;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
+
+ qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
+ if (info->ipv4_valid) {
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
+ } else {
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
+
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
+ }
+ qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
+ vsi->qos[info->user_pri].qs_handle);
+ if (info->vlan_valid)
+ qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
+ set_64bit_val(wqe, 16, qw2);
+ if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
+ qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
+ } else {
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
+ }
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
+ IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_init - initialize qp
+ * @qp: sc qp
+ * @info: initialization qp info
+ */
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
+{
+ int ret_code;
+ u32 pble_obj_cnt;
+ u16 wqe_size;
+
+ if (info->qp_uk_init_info.max_sq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
+ info->qp_uk_init_info.max_rq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
+ return -EINVAL;
+
+ qp->dev = info->pd->dev;
+ qp->vsi = info->vsi;
+ qp->ieq_qp = info->vsi->exception_lan_q;
+ qp->sq_pa = info->sq_pa;
+ qp->rq_pa = info->rq_pa;
+ qp->hw_host_ctx_pa = info->host_ctx_pa;
+ qp->q2_pa = info->q2_pa;
+ qp->shadow_area_pa = info->shadow_area_pa;
+ qp->q2_buf = info->q2;
+ qp->pd = info->pd;
+ qp->hw_host_ctx = info->host_ctx;
+ info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
+ ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ qp->virtual_map = info->virtual_map;
+ pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
+ (!info->qp_uk_init_info.srq_uk &&
+ info->virtual_map && info->rq_pa >= pble_obj_cnt))
+ return -EINVAL;
+
+ qp->llp_stream_handle = (void *)(-1);
+ qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
+ IRDMA_QUEUE_TYPE_SQ_RQ);
+ ibdev_dbg(to_ibdev(qp->dev),
+ "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
+ qp->hw_sq_size, qp->qp_uk.sq_ring.size);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
+ wqe_size = IRDMA_WQE_SIZE_128;
+ else
+ ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
+ (wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
+ ibdev_dbg(to_ibdev(qp->dev),
+ "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
+ qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
+ qp->sq_tph_val = info->sq_tph_val;
+ qp->rq_tph_val = info->rq_tph_val;
+ qp->sq_tph_en = info->sq_tph_en;
+ qp->rq_tph_en = info->rq_tph_en;
+ qp->rcv_tph_en = info->rcv_tph_en;
+ qp->xmit_tph_en = info->xmit_tph_en;
+ qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
+ qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_init - init sc_srq structure
+ * @srq: srq sc struct
+ * @info: parameters for srq init
+ */
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info)
+{
+ u32 srq_size_quanta;
+ int ret_code;
+
+ ret_code = irdma_uk_srq_init(&srq->srq_uk, &info->srq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ srq->dev = info->pd->dev;
+ srq->pd = info->pd;
+ srq->vsi = info->vsi;
+ srq->srq_pa = info->srq_pa;
+ srq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ srq->pasid = info->pasid;
+ srq->pasid_valid = info->pasid_valid;
+ srq->srq_limit = info->srq_limit;
+ srq->leaf_pbl_size = info->leaf_pbl_size;
+ srq->virtual_map = info->virtual_map;
+ srq->tph_en = info->tph_en;
+ srq->arm_limit_event = info->arm_limit_event;
+ srq->tph_val = info->tph_value;
+ srq->shadow_area_pa = info->shadow_area_pa;
+
+ /* Smallest SRQ size is 256B i.e. 8 quanta */
+ srq_size_quanta = max((u32)IRDMA_SRQ_MIN_QUANTA,
+ srq->srq_uk.srq_size *
+ srq->srq_uk.wqe_size_multiplier);
+ srq->hw_srq_size = irdma_get_encoded_wqe_size(srq_size_quanta,
+ IRDMA_QUEUE_TYPE_SRQ);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_create - send srq create CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_create(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->pd->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, srq->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ srq->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_modify - send modify_srq CQP WQE
+ * @srq: srq sc struct
+ * @info: parameters for srq modification
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_modify(struct irdma_sc_srq *srq,
+ struct irdma_modify_srq_info *info, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, info->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQCTX, srq->srq_uk.srq_id));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ info->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_destroy - send srq_destroy CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_destroy(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_create - create qp
+ * @qp: sc qp
+ * @info: qp create info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
+ qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_modify - modify qp cqp wqe
+ * @qp: sc qp
+ * @info: modify qp info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u8 term_actions = 0;
+ u8 term_len = 0;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
+ if (info->dont_send_fin)
+ term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
+ if (info->dont_send_term)
+ term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
+ if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
+ term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
+ term_len = info->termlen;
+ }
+
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
+ info->cached_var_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
+ info->remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_destroy - cqp destroy qp
+ * @qp: sc qp
+ * @scratch: u64 saved to be used during cqp completion
+ * @remove_hash_idx: flag if to remove hash idx
+ * @ignore_mw_bnd: memory window bind flag
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_get_encoded_ird_size -
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
+ */
+static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
+{
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128;
+ case 64:
+ case 32:
+ return IRDMA_IRD_HW_SIZE_64;
+ case 16:
+ case 8:
+ return IRDMA_IRD_HW_SIZE_16;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce_gen_2 - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static void irdma_sc_qp_setctx_roce_gen_2(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp;
+ u8 push_mode_en;
+ u32 push_idx;
+
+ roce_info = info->roce_info;
+ udp = info->udp_info;
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ set_64bit_val(qp_ctx, 0,
+ FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
+ !(udp->tos & 0x03))
+ udp->tos |= ECN_CODE_PT_VAL;
+ set_64bit_val(qp_ctx, 24,
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
+ set_64bit_val(qp_ctx, 56,
+ FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
+ set_64bit_val(qp_ctx, 64,
+ FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
+ set_64bit_val(qp_ctx, 128,
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
+ set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
+ set_64bit_val(qp_ctx, 208,
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
+
+ print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/**
+ * irdma_sc_get_encoded_ird_size_gen_3 - get encoded IRD size for GEN 3
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u8 irdma_sc_get_encoded_ird_size_gen_3(u16 ird_size)
+{
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 4096:
+ return IRDMA_IRD_HW_SIZE_4096_GEN3;
+ case 2048:
+ return IRDMA_IRD_HW_SIZE_2048_GEN3;
+ case 1024:
+ return IRDMA_IRD_HW_SIZE_1024_GEN3;
+ case 512:
+ return IRDMA_IRD_HW_SIZE_512_GEN3;
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256_GEN3;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128_GEN3;
+ case 64:
+ return IRDMA_IRD_HW_SIZE_64_GEN3;
+ case 32:
+ return IRDMA_IRD_HW_SIZE_32_GEN3;
+ case 16:
+ return IRDMA_IRD_HW_SIZE_16_GEN3;
+ case 8:
+ return IRDMA_IRD_HW_SIZE_8_GEN3;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4_GEN3;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce_gen_3 - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static void irdma_sc_qp_setctx_roce_gen_3(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info = info->roce_info;
+ struct irdma_udp_offload_info *udp = info->udp_info;
+ u64 qw0, qw3, qw7 = 0, qw8 = 0;
+ u8 push_mode_en;
+ u32 push_idx;
+
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_USE_SRQ, !qp->qp_uk.srq_uk ? 0 : 1) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag);
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) |
+ FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
+ qw7 = FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label);
+ set_64bit_val(qp_ctx, 56, qw7);
+ qw8 = FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp);
+ set_64bit_val(qp_ctx, 64, qw8);
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
+ set_64bit_val(qp_ctx, 128,
+ FIELD_PREP(IRDMAQPC_MINRNR_TIMER, udp->min_rnr_timer) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_TMR, udp->rnr_nak_tmr) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 152,
+ FIELD_PREP(IRDMAQPC_MACADDRESS,
+ ether_addr_to_u64(roce_info->mac_addr)) |
+ FIELD_PREP(IRDMAQPC_LOCALACKTIMEOUT,
+ roce_info->local_ack_timeout));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE_GEN3, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE_GEN3,
+ irdma_sc_get_encoded_ird_size_gen_3(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE,
+ info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE,
+ roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE,
+ roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_SRQ_ID,
+ !qp->qp_uk.srq_uk ?
+ 0 : qp->qp_uk.srq_uk->srq_id) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
+ set_64bit_val(qp_ctx, 208, roce_info->pd_id |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX_GEN3, info->stats_idx) |
+ FIELD_PREP(IRDMAQPC_PKT_LIMIT, qp->pkt_limit));
+
+ print_hex_dump_debug("WQE: QP_HOST ROCE CTX WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
+ irdma_sc_qp_setctx_roce_gen_2(qp, qp_ctx, info);
+ else
+ irdma_sc_qp_setctx_roce_gen_3(qp, qp_ctx, info);
+}
+
+/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_add_local_mac_entry - add mac enry
+ * @cqp: struct for cqp hw
+ * @info:mac addr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_local_mac_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
+
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @entry_idx: index of mac entry
+ * @ignore_ref_count: to force mac adde delete
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 entry_idx, u8 ignore_ref_count,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_setctx - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_iwarp_offload_info *iw;
+ struct irdma_tcp_offload_info *tcp;
+ struct irdma_sc_dev *dev;
+ u8 push_mode_en;
+ u32 push_idx;
+ u64 qw0, qw3, qw7 = 0, qw16 = 0;
+ u64 mac = 0;
+
+ iw = info->iwarp_info;
+ tcp = info->tcp_info;
+ dev = qp->dev;
+ if (iw->rcv_mark_en) {
+ qp->pfpdu.marker_len = 4;
+ qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
+ }
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
+ qp->src_mac_addr_idx);
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
+ FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
+ if (info->iwarp_info_valid) {
+ qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
+ FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
+ FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
+ iw->err_rq_idx_valid);
+ qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
+ qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ mac = ether_addr_to_u64(iw->mac_addr);
+
+ set_64bit_val(qp_ctx, 152,
+ mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
+ FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
+ FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
+ FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
+ FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
+ FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
+ FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
+ FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
+ }
+ if (info->tcp_info_valid) {
+ qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
+ FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
+ tcp->insert_vlan_tag) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
+ FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
+ FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
+ FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
+
+ if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
+ tcp->tos |= ECN_CODE_PT_VAL;
+
+ qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
+ FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
+ FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
+
+ qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
+ }
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
+ qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
+ FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
+ tcp->ignore_tcp_opt) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
+ tcp->ignore_tcp_uns_opt) |
+ FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
+ FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
+ FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
+ set_64bit_val(qp_ctx, 72,
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
+ FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
+ FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
+ FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
+ set_64bit_val(qp_ctx, 104,
+ FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
+ FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
+ FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
+ set_64bit_val(qp_ctx, 120,
+ FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
+ FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
+ qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
+ set_64bit_val(qp_ctx, 208,
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
+ }
+
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 56, qw7);
+ set_64bit_val(qp_ctx, 128, qw16);
+
+ print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
+ qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/**
+ * irdma_sc_alloc_stag - mr stag alloc
+ * @dev: sc device struct
+ * @info: stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_allocate_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ enum irdma_page_size page_size;
+
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
+
+ if (info->chunk_size)
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_reg_non_shared - non-shared mr registration
+ * @dev: sc device struct
+ * @info: mr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ struct irdma_reg_ns_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 fbo;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u32 pble_obj_cnt;
+ bool remote_access;
+ u8 addr_type;
+ enum irdma_page_size page_size;
+
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else if (info->page_size == 0x1000)
+ page_size = IRDMA_PAGE_SIZE_4K;
+ else
+ return -EINVAL;
+
+ if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+
+ pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
+ return -EINVAL;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ fbo = info->va & (info->page_size - 1);
+
+ set_64bit_val(wqe, 0,
+ (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
+ info->va : fbo));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ if (!info->chunk_size) {
+ set_64bit_val(wqe, 32, info->reg_addr_pa);
+ set_64bit_val(wqe, 48, 0);
+ } else {
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
+ }
+ set_64bit_val(wqe, 40, info->hmc_fcn_index);
+ set_64bit_val(wqe, 56, 0);
+
+ addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_dealloc_stag - deallocate stag
+ * @dev: sc device struct
+ * @info: dealloc stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_dealloc_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mw_alloc - mw allocate
+ * @dev: sc device struct
+ * @info: memory window allocation information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
+ struct irdma_mw_alloc_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 hdr;
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
+ info->mw1_bind_dont_vldt_key) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
+ * @qp: sc qp struct
+ * @info: fast mr info
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq)
+{
+ u64 temp, hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ enum irdma_page_size page_size;
+ struct irdma_post_sq_info sq_info = {};
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ sq_info.wr_id = info->wr_id;
+ sq_info.signaled = info->signaled;
+
+ wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
+ IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(&qp->qp_uk, wqe_idx);
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
+ info->wr_id, wqe_idx,
+ &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
+
+ temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
+ (uintptr_t)info->va : info->fbo;
+ set_64bit_val(wqe, 0, temp);
+
+ temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
+ info->first_pm_pbl_index >> 16);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
+ FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
+ set_64bit_val(wqe, 16,
+ info->total_len |
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
+ FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
+ FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_ATOMICS_EN, info->remote_atomics_en) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(&qp->qp_uk);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_rts_ae - request AE generated after RTS
+ * @qp: sc qp struct
+ */
+static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+
+ wqe = qp_uk->sq_base[1].elem;
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ wqe = qp_uk->sq_base[2].elem;
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+}
+
+/**
+ * irdma_sc_send_lsmm - send last streaming mode message
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ * @stag: stag of lsmm buffer
+ */
+void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
+ FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
+ FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+}
+
+/**
+ * irdma_sc_send_rtt - send last read0 or write0
+ * @qp: sc qp struct
+ * @read: Do read0 or write0
+ */
+void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 16, 0);
+ if (read) {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
+ } else {
+ set_64bit_val(wqe, 8,
+ (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+
+ } else {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8, 0);
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ }
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+}
+
+/**
+ * irdma_iwarp_opcode - determine if incoming is rdma layer
+ * @info: aeq info for the packet
+ * @pkt: packet for error
+ */
+static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
+{
+ __be16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (info->q2_data_written) {
+ mpa = (__be16 *)pkt;
+ opcode = ntohs(mpa[1]) & 0xf;
+ }
+
+ return opcode;
+}
+
+/**
+ * irdma_locate_mpa - return pointer to mpa in the pkt
+ * @pkt: packet with data
+ */
+static u8 *irdma_locate_mpa(u8 *pkt)
+{
+ /* skip over ethernet header */
+ pkt += IRDMA_MAC_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+
+ return pkt;
+}
+
+/**
+ * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
+ * @qp: sc qp ptr for pkt
+ * @hdr: term hdr
+ * @opcode: flush opcode for termhdr
+ * @layer_etype: error layer + error type
+ * @err: error cod ein the header
+ */
+static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
+ struct irdma_terminate_hdr *hdr,
+ enum irdma_flush_opcode opcode,
+ u8 layer_etype, u8 err)
+{
+ qp->flush_code = opcode;
+ hdr->layer_etype = layer_etype;
+ hdr->error_code = err;
+}
+
+/**
+ * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
+ * @pkt: ptr to mpa in offending pkt
+ * @hdr: term hdr
+ * @copy_len: offending pkt length to be copied to term hdr
+ * @is_tagged: DDP tagged or untagged
+ */
+static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
+ int *copy_len, u8 *is_tagged)
+{
+ u16 ddp_seg_len;
+
+ ddp_seg_len = ntohs(*(__be16 *)pkt);
+ if (ddp_seg_len) {
+ *copy_len = 2;
+ hdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ *is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ *copy_len += TERM_DDP_LEN_TAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ *copy_len += TERM_DDP_LEN_UNTAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
+ ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
+ *copy_len += TERM_RDMA_LEN;
+ hdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+}
+
+/**
+ * irdma_bld_terminate_hdr - build terminate message header
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ u32 opcode;
+ struct irdma_terminate_hdr *termhdr;
+
+ termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
+ memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
+
+ if (info->q2_data_written) {
+ pkt = irdma_locate_mpa(pkt);
+ irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
+ }
+
+ opcode = irdma_iwarp_opcode(info, pkt);
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ qp->sq_flush_code = info->sq;
+ qp->rq_flush_code = info->rq;
+
+ switch (info->ae_id) {
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_STAG);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (info->q2_data_written)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_BOUNDS);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_BOUNDS);
+ break;
+ case IRDMA_AE_AMP_BAD_PD:
+ switch (opcode) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_UNASSOC_STAG);
+ break;
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_UNASSOC_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_INVALID_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BAD_QP:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ switch (opcode) {
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_ACCESS);
+ break;
+ case IRDMA_AE_AMP_TO_WRAP:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_TO_WRAP);
+ break;
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_RANGE);
+ break;
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_TOO_LONG);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ if (is_tagged)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_DDP_VER);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_DDP_VER);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MO);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_NO_BUF);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_RDMAP_VER);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_UNSPECIFIED);
+ break;
+ }
+
+ if (copy_len)
+ memcpy(termhdr + 1, pkt, copy_len);
+
+ return sizeof(struct irdma_terminate_hdr) + copy_len;
+}
+
+/**
+ * irdma_terminate_send_fin() - Send fin for terminate message
+ * @qp: qp associated with received terminate AE
+ */
+void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
+{
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_FIN_ONLY, 0);
+}
+
+/**
+ * irdma_terminate_connection() - Bad AE and send terminate to remote QP
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void irdma_terminate_connection(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 termlen = 0;
+
+ if (qp->term_flags & IRDMA_TERM_SENT)
+ return;
+
+ termlen = irdma_bld_terminate_hdr(qp, info);
+ irdma_terminate_start_timer(qp);
+ qp->term_flags |= IRDMA_TERM_SENT;
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
+}
+
+/**
+ * irdma_terminate_received - handle terminate received AE
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void irdma_terminate_received(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ __be32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+ struct irdma_terminate_hdr *termhdr;
+
+ mpa = (__be32 *)irdma_locate_mpa(pkt);
+ if (info->q2_data_written) {
+ /* did not validate the frame - do it now */
+ ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = ntohl(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
+ else if (ntohl(mpa[2]) != 2)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
+ else if (ntohl(mpa[3]) != 1)
+ aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (ntohl(mpa[4]) != 0)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ info->ae_id = aeq_id;
+ if (info->ae_id) {
+ /* Bad terminate recvd - send back a terminate */
+ irdma_terminate_connection(qp, info);
+ return;
+ }
+ }
+
+ qp->term_flags |= IRDMA_TERM_RCVD;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ termhdr = (struct irdma_terminate_hdr *)&mpa[5];
+ if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
+ termhdr->layer_etype == RDMAP_REMOTE_OP) {
+ irdma_terminate_done(qp, 0);
+ } else {
+ irdma_terminate_start_timer(qp);
+ irdma_terminate_send_fin(qp);
+ }
+}
+
+static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ return 0;
+}
+
+static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ /* do nothing */
+}
+
+static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
+{
+ /* do nothing */
+}
+
+/**
+ * irdma_sc_vsi_init - Init the vsi structure
+ * @vsi: pointer to vsi structure to initialize
+ * @info: the info used to initialize the vsi struct
+ */
+void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_init_info *info)
+{
+ int i;
+
+ vsi->dev = info->dev;
+ vsi->back_vsi = info->back_vsi;
+ vsi->register_qset = info->register_qset;
+ vsi->unregister_qset = info->unregister_qset;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_q = info->exception_lan_q;
+ vsi->vsi_idx = info->pf_data_vsi_num;
+
+ irdma_set_qos_info(vsi, info->params);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_init(&vsi->qos[i].qos_mutex);
+ INIT_LIST_HEAD(&vsi->qos[i].qplist);
+ }
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
+ vsi->dev->ws_add = irdma_ws_add;
+ vsi->dev->ws_remove = irdma_ws_remove;
+ vsi->dev->ws_reset = irdma_ws_reset;
+ } else {
+ vsi->dev->ws_add = irdma_null_ws_add;
+ vsi->dev->ws_remove = irdma_null_ws_remove;
+ vsi->dev->ws_reset = irdma_null_ws_reset;
+ }
+}
+
+/**
+ * irdma_get_stats_idx - Return stats index
+ * @vsi: pointer to the vsi
+ */
+static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_stats_inst_info stats_info = {};
+ struct irdma_sc_dev *dev = vsi->dev;
+ u8 i;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
+ &stats_info))
+ return stats_info.stats_idx;
+ }
+
+ for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) {
+ if (!dev->stats_idx_array[i]) {
+ dev->stats_idx_array[i] = true;
+ return i;
+ }
+ }
+
+ return IRDMA_INVALID_STATS_IDX;
+}
+
+/**
+ * irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1
+ * @vsi: vsi structure where hw_regs are set
+ *
+ * Populate the HW stats table
+ */
+static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ const struct irdma_hw_stat_map *map;
+ u64 *stat_reg = vsi->hw_stats_regs;
+ u64 *regs = dev->hw_stats_regs;
+ u16 i, stats_reg_set = vsi->stats_idx;
+
+ map = dev->hw_stats_map;
+
+ /* First 4 stat instances are reserved for port level statistics. */
+ stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0;
+
+ for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) {
+ if (map[i].bitmask <= IRDMA_MAX_STATS_32)
+ stat_reg[i] = regs[i] + stats_reg_set * sizeof(u32);
+ else
+ stat_reg[i] = regs[i] + stats_reg_set * sizeof(u64);
+ }
+}
+
+/**
+ * irdma_vsi_stats_init - Initialize the vsi statistics
+ * @vsi: pointer to the vsi structure
+ * @info: The info structure used for initialization
+ */
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info)
+{
+ struct irdma_dma_mem *stats_buff_mem;
+
+ vsi->pestat = info->pestat;
+ vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
+ stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
+ stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
+ stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
+ stats_buff_mem->size,
+ &stats_buff_mem->pa,
+ GFP_KERNEL);
+ if (!stats_buff_mem->va)
+ return -ENOMEM;
+
+ vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
+ vsi->pestat->gather_info.last_gather_stats_va =
+ (void *)((uintptr_t)stats_buff_mem->va +
+ IRDMA_GATHER_STATS_BUF_SIZE);
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_start_timer(vsi);
+
+ /* when stat allocation is not required default to fcn_id. */
+ vsi->stats_idx = info->fcn_id;
+ if (info->alloc_stats_inst) {
+ u16 stats_idx = irdma_get_stats_idx(vsi);
+
+ if (stats_idx != IRDMA_INVALID_STATS_IDX) {
+ vsi->stats_inst_alloc = true;
+ vsi->stats_idx = stats_idx;
+ vsi->pestat->gather_info.use_stats_inst = true;
+ vsi->pestat->gather_info.stats_inst_index = stats_idx;
+ }
+ }
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_hw_stats_init_gen1(vsi);
+
+ return 0;
+}
+
+/**
+ * irdma_vsi_stats_free - Free the vsi stats
+ * @vsi: pointer to the vsi structure
+ */
+void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_stats_inst_info stats_info = {};
+ struct irdma_sc_dev *dev = vsi->dev;
+ u16 stats_idx = vsi->stats_idx;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (vsi->stats_inst_alloc) {
+ stats_info.stats_idx = vsi->stats_idx;
+ irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
+ &stats_info);
+ }
+ } else {
+ if (vsi->stats_inst_alloc &&
+ stats_idx < vsi->dev->hw_attrs.max_stat_inst)
+ vsi->dev->stats_idx_array[stats_idx] = false;
+ }
+
+ if (!vsi->pestat)
+ return;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_stop_timer(vsi);
+ dma_free_coherent(vsi->pestat->hw->device,
+ vsi->pestat->gather_info.stats_buff_mem.size,
+ vsi->pestat->gather_info.stats_buff_mem.va,
+ vsi->pestat->gather_info.stats_buff_mem.pa);
+ vsi->pestat->gather_info.stats_buff_mem.va = NULL;
+}
+
+/**
+ * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
+ * @wqsize: size of the wq (sq, rq) to encoded_size
+ * @queue_type: queue type selected for the calculation algorithm
+ */
+u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
+{
+ u8 encoded_size = 0;
+
+ if (queue_type == IRDMA_QUEUE_TYPE_SRQ) {
+ /* Smallest SRQ size is 256B (8 quanta) that gets
+ * encoded to 0.
+ */
+ encoded_size = ilog2(wqsize) - 3;
+
+ return encoded_size;
+ }
+ /* cqp sq's hw coded value starts from 1 for size of 4
+ * while it starts from 0 for qp' wq's.
+ */
+ if (queue_type == IRDMA_QUEUE_TYPE_CQP)
+ encoded_size = 1;
+ wqsize >>= 2;
+ while (wqsize >>= 1)
+ encoded_size++;
+
+ return encoded_size;
+}
+
+/**
+ * irdma_sc_gather_stats - collect the statistics
+ * @cqp: struct for cqp hw
+ * @info: gather stats info structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_gather_info *info,
+ u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
+ return -ENOMEM;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
+ set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
+ info->stats_inst_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ irdma_sc_cqp_post_sq(cqp);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_stats_inst - allocate or free stats instance
+ * @cqp: struct for cqp hw
+ * @info: stats info structure
+ * @alloc: alloc vs. delete flag
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_inst_info *info,
+ bool alloc, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_set_up_map - set the up map table
+ * @cqp: struct for cqp hw
+ * @info: User priority map info
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
+ struct irdma_up_info *info, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp = 0;
+ int i;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ temp |= (u64)info->map[i] << (i * 8);
+
+ set_64bit_val(wqe, 0, temp);
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
+ info->use_cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_ws_node - create/modify/destroy WS node
+ * @cqp: struct for cqp hw
+ * @info: node info structure
+ * @node_op: 0 for add 1 for modify, 2 for delete
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
+ struct irdma_ws_node_info *info,
+ enum irdma_ws_node_op node_op, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_flush_wqes - flush qp's wqe
+ * @qp: sc qp
+ * @info: dlush information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 temp = 0;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ bool flush_sq = false, flush_rq = false;
+
+ if (info->rq && !qp->flush_rq)
+ flush_rq = true;
+ if (info->sq && !qp->flush_sq)
+ flush_sq = true;
+ qp->flush_sq |= flush_sq;
+ qp->flush_rq |= flush_rq;
+
+ if (!flush_sq && !flush_rq) {
+ ibdev_dbg(to_ibdev(qp->dev),
+ "CQP: Additional flush request ignored for qp %x\n",
+ qp->qp_uk.qp_id);
+ return -EALREADY;
+ }
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ if (info->userflushcode) {
+ if (flush_rq)
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
+ info->rq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
+ info->rq_major_code);
+ if (flush_sq)
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
+ info->sq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
+ info->sq_major_code);
+ }
+ set_64bit_val(wqe, 16, temp);
+
+ temp = (info->generate_ae) ?
+ info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src) : 0;
+ set_64bit_val(wqe, 8, temp);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX, info->err_sq_idx));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX, info->err_rq_idx));
+ }
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ hdr |= FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID, info->err_sq_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID, info->err_rq_idx_valid);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
+ * @qp: sc qp
+ * @info: gen ae information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 temp;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src);
+ set_64bit_val(wqe, 8, temp);
+
+ hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_GEN_AE) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/*** irdma_sc_qp_upload_context - upload qp's context
+ * @dev: sc device struct
+ * @info: upload context info ptr for return
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
+ struct irdma_upload_context_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, info->buf_pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_push_page - Handle push page
+ * @cqp: struct for cqp hw
+ * @info: push page info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_manage_push_page_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ if (info->free_page &&
+ info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, info->qs_handle);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_suspend_qp - suspend qp for param change
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_resume_qp - resume qp after suspend
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_ack - acknowledge completion q
+ * @cq: cq struct
+ */
+static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
+{
+ writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
+}
+
+/**
+ * irdma_sc_cq_init - initialize completion q
+ * @cq: cq struct
+ * @info: cq initialization info
+ */
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cq->cq_pa = info->cq_base_pa;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
+ info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
+ irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
+
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->ceqe_mask = info->ceqe_mask;
+ cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->vsi = info->vsi;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_create - create completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: flag for overflow check
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
+ bool check_overflow, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+ int ret_code = 0;
+
+ cqp = cq->dev->cqp;
+ if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
+ return -EINVAL;
+
+ if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ ret_code = irdma_sc_add_cq_ctx(ceq, cq);
+
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe) {
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+ return -ENOMEM;
+ }
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
+ set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQID_HIGH, cq->cq_uk.cq_id >> 22) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQID_HIGH,
+ (cq->ceq_id_valid ? cq->ceq_id : 0) >> 10) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_destroy - destroy completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+
+ hdr = cq->cq_uk.cq_id |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_resize - set resized cq buffer info
+ * @cq: resized cq
+ * @info: resized cq buffer info
+ */
+void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
+{
+ cq->virtual_map = info->virtual_map;
+ cq->cq_pa = info->cq_pa;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
+}
+
+/**
+ * irdma_sc_cq_modify - modify a Completion Queue
+ * @cq: cq struct
+ * @info: modification info struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag to post to sq
+ */
+static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
+ struct irdma_modify_cq_info *info, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->cq_resize && info->virtual_map &&
+ info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, info->cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
+ set_64bit_val(wqe, 32, info->cq_pa);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = cq->cq_uk.cq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_get_decoded_ird_size_gen_3 - get decoded IRD size for GEN 3
+ * @ird_enc: IRD encoding
+ * IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u16 irdma_sc_get_decoded_ird_size_gen_3(u8 ird_enc)
+{
+ switch (ird_enc) {
+ case IRDMA_IRD_HW_SIZE_4096_GEN3:
+ return 4096;
+ case IRDMA_IRD_HW_SIZE_2048_GEN3:
+ return 2048;
+ case IRDMA_IRD_HW_SIZE_1024_GEN3:
+ return 1024;
+ case IRDMA_IRD_HW_SIZE_512_GEN3:
+ return 512;
+ case IRDMA_IRD_HW_SIZE_256_GEN3:
+ return 256;
+ case IRDMA_IRD_HW_SIZE_128_GEN3:
+ return 128;
+ case IRDMA_IRD_HW_SIZE_64_GEN3:
+ return 64;
+ case IRDMA_IRD_HW_SIZE_32_GEN3:
+ return 32;
+ case IRDMA_IRD_HW_SIZE_16_GEN3:
+ return 16;
+ case IRDMA_IRD_HW_SIZE_8_GEN3:
+ return 8;
+ case IRDMA_IRD_HW_SIZE_4_GEN3:
+ return 4;
+ default:
+ return 4;
+ }
+}
+
+/**
+ * irdma_check_cqp_progress - check cqp processing progress
+ * @timeout: timeout info struct
+ * @dev: sc device struct
+ */
+void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
+{
+ u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
+
+ if (timeout->compl_cqp_cmds != completed_ops) {
+ timeout->compl_cqp_cmds = completed_ops;
+ timeout->count = 0;
+ } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
+ timeout->count++;
+ }
+}
+
+/**
+ * irdma_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+ * @val: cqp tail register value
+ * @tail: wqtail register value
+ * @error: cqp processing err
+ */
+static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
+ u32 *tail, u32 *error)
+{
+ *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
+ *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
+}
+
+/**
+ * irdma_sc_cqp_def_cmpl_ae_handler - remove completed requests from pending list
+ * @dev: sc device struct
+ * @info: AE entry info
+ * @first: true if this is the first call to this handler for given AEQE
+ * @scratch: (out) scratch entry pointer
+ * @sw_def_info: (in/out) SW ticket value for this AE
+ *
+ * In case of AE_DEF_CMPL event, this function should be called in a loop
+ * until it returns NULL-ptr via scratch.
+ * For each call, it looks for a matching CQP request on pending list,
+ * removes it from the list and returns the pointer to the associated scratch
+ * entry.
+ * If this is the first call to this function for given AEQE, sw_def_info
+ * value is not used to find matching requests. Instead, it is populated
+ * with the value from the first matching cqp_request on the list.
+ * For subsequent calls, ooo_op->sw_def_info need to match the value passed
+ * by a caller.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if no matching request is found.
+ */
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ *scratch = 0;
+
+ spin_lock_irqsave(&dev->cqp->ooo_list_lock, flags);
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ if (ooo_op->deferred &&
+ ((first && ooo_op->def_info == info->def_info) ||
+ (!first && ooo_op->sw_def_info == *sw_def_info))) {
+ *sw_def_info = ooo_op->sw_def_info;
+ *scratch = ooo_op->scratch;
+
+ list_move(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cqp->ooo_list_lock, flags);
+
+ if (first && !*scratch)
+ ibdev_dbg(to_ibdev(dev),
+ "AEQ: deferred completion with unknown ticket: def_info 0x%x\n",
+ info->def_info);
+}
+
+/**
+ * irdma_sc_cqp_cleanup_handler - remove requests from pending list
+ * @dev: sc device struct
+ *
+ * This function should be called in a loop from irdma_cleanup_pending_cqp_op.
+ * For each call, it returns first CQP request on pending list, removes it
+ * from the list and returns the pointer to the associated scratch entry.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if pending list is empty.
+ */
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u64 scratch = 0;
+
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ scratch = ooo_op->scratch;
+
+ list_del(&ooo_op->list_entry);
+ list_add(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+
+ return scratch;
+}
+
+/**
+ * irdma_cqp_poll_registers - poll cqp registers
+ * @cqp: struct for cqp hw
+ * @tail: wqtail register value
+ * @count: how many times to try for completion
+ */
+static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
+ u32 count)
+{
+ u32 i = 0;
+ u32 newtail, error, val;
+
+ while (i++ < count) {
+ irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
+ if (error) {
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: CQPERRCODES error_code[x%08X]\n",
+ error);
+ return -EIO;
+ }
+ if (newtail != tail) {
+ /* SUCCESS */
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ atomic64_inc(&cqp->completed_ops);
+ return 0;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
+ * @dev: sc device struct
+ * @buf: pointer to commit buffer
+ * @buf_idx: buffer index
+ * @obj_info: object info pointer
+ * @rsrc_idx: indexs of memory resource
+ */
+static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
+ u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+
+ get_64bit_val(buf, buf_idx, &temp);
+
+ switch (rsrc_idx) {
+ case IRDMA_HMC_IW_QP:
+ obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
+ break;
+ case IRDMA_HMC_IW_CQ:
+ obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
+ break;
+ case IRDMA_HMC_IW_APBVT_ENTRY:
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ obj_info[rsrc_idx].cnt = 1;
+ else
+ obj_info[rsrc_idx].cnt = 0;
+ break;
+ default:
+ obj_info[rsrc_idx].cnt = (u32)temp;
+ break;
+ }
+
+ obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
+ * @dev: pointer to dev struct
+ * @buf: ptr to fpm commit buffer
+ * @info: ptr to irdma_hmc_obj_info struct
+ * @sd: number of SDs for HMC objects
+ *
+ * parses fpm commit info and copy base value
+ * of hmc objects in hmc_info
+ */
+static void
+irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_obj_info *info, u32 *sd)
+{
+ u64 size;
+ u32 i;
+ u64 max_base = 0;
+ u32 last_hmc_obj = 0;
+
+ irdma_sc_decode_fpm_commit(dev, buf, 0, info,
+ IRDMA_HMC_IW_QP);
+ irdma_sc_decode_fpm_commit(dev, buf, 8, info,
+ IRDMA_HMC_IW_CQ);
+ irdma_sc_decode_fpm_commit(dev, buf, 16, info,
+ IRDMA_HMC_IW_SRQ);
+ irdma_sc_decode_fpm_commit(dev, buf, 24, info,
+ IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_commit(dev, buf, 32, info,
+ IRDMA_HMC_IW_ARP);
+ irdma_sc_decode_fpm_commit(dev, buf, 40, info,
+ IRDMA_HMC_IW_APBVT_ENTRY);
+ irdma_sc_decode_fpm_commit(dev, buf, 48, info,
+ IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_commit(dev, buf, 56, info,
+ IRDMA_HMC_IW_XF);
+ irdma_sc_decode_fpm_commit(dev, buf, 64, info,
+ IRDMA_HMC_IW_XFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, 72, info,
+ IRDMA_HMC_IW_Q1);
+ irdma_sc_decode_fpm_commit(dev, buf, 80, info,
+ IRDMA_HMC_IW_Q1FL);
+ irdma_sc_decode_fpm_commit(dev, buf, 88, info,
+ IRDMA_HMC_IW_TIMER);
+ irdma_sc_decode_fpm_commit(dev, buf, 112, info,
+ IRDMA_HMC_IW_PBLE);
+ /* skipping RSVD. */
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ irdma_sc_decode_fpm_commit(dev, buf, 96, info,
+ IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_commit(dev, buf, 104, info,
+ IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_commit(dev, buf, 128, info,
+ IRDMA_HMC_IW_RRF);
+ irdma_sc_decode_fpm_commit(dev, buf, 136, info,
+ IRDMA_HMC_IW_RRFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, 144, info,
+ IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_commit(dev, buf, 152, info,
+ IRDMA_HMC_IW_MD);
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_commit(dev, buf, 160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, 168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
+ }
+
+ /* searching for the last object in HMC to find the size of the HMC area. */
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (info[i].base > max_base && info[i].cnt) {
+ max_base = info[i].base;
+ last_hmc_obj = i;
+ }
+ }
+
+ size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
+ info[last_hmc_obj].base;
+
+ if (size & 0x1FFFFF)
+ *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
+ else
+ *sd = (u32)(size >> 21);
+
+}
+
+/**
+ * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @obj_info: ptr to irdma_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
+ struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+ u32 size;
+
+ get_64bit_val(buf, buf_idx, &temp);
+ obj_info[rsrc_idx].max_cnt = (u32)temp;
+ size = (u32)(temp >> 32);
+ obj_info[rsrc_idx].size = BIT_ULL(size);
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @dev: ptr to shared code device
+ * @buf: ptr to fpm query buffer
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ *
+ * parses fpm query buffer and copy max_cnt and
+ * size value of hmc objects in hmc_info
+ */
+static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ struct irdma_hmc_obj_info *obj_info;
+ u8 ird_encoding;
+ u64 temp;
+ u32 size;
+ u16 max_pe_sds;
+
+ obj_info = hmc_info->hmc_obj;
+
+ get_64bit_val(buf, 0, &temp);
+ hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3, temp);
+ else
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ /* Reduce SD count for unprivleged functions by 1 to account for PBLE
+ * backing page rounding
+ */
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2 &&
+ (hmc_info->hmc_fn_id >= dev->hw_attrs.first_hw_vf_fpm_id ||
+ !dev->privileged))
+ max_pe_sds--;
+
+ hmc_fpm_misc->max_sds = max_pe_sds;
+ hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+ get_64bit_val(buf, 8, &temp);
+ obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
+ size = (u32)(temp >> 32);
+ obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
+
+ get_64bit_val(buf, 16, &temp);
+ obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
+ size = (u32)(temp >> 32);
+ obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
+
+ irdma_sc_decode_fpm_query(buf, 24, obj_info, IRDMA_HMC_IW_SRQ);
+ irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 0;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 0;
+ } else {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+ }
+
+ irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
+
+ get_64bit_val(buf, 64, &temp);
+ obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_XFFL].size = 4;
+ hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
+ if (obj_info[IRDMA_HMC_IW_XF].max_cnt && !hmc_fpm_misc->xf_block_size)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
+ get_64bit_val(buf, 80, &temp);
+ obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
+
+ hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
+ if (!hmc_fpm_misc->q1_block_size)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
+
+ get_64bit_val(buf, 112, &temp);
+ obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_PBLE].size = 8;
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
+ hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
+ hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2,
+ dev->feature_info[IRDMA_FTN_FLAGS])) {
+ ird_encoding = (u8)FIELD_GET(IRDMA_QUERY_FPM_MAX_IRD, temp);
+ hmc_fpm_misc->ird =
+ irdma_sc_get_decoded_ird_size_gen_3(ird_encoding) / 2;
+ dev->hw_attrs.max_hw_ird = hmc_fpm_misc->ird;
+ dev->hw_attrs.max_hw_ord = hmc_fpm_misc->ird;
+ }
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return 0;
+ irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
+
+ get_64bit_val(buf, 136, &temp);
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
+ hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->rrf_block_size &&
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
+
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
+
+ get_64bit_val(buf, 168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return -EINVAL;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ get_64bit_val(buf, 176, &temp);
+ hmc_fpm_misc->loc_mem_pages = (u32)FIELD_GET(IRDMA_QUERY_FPM_LOC_MEM_PAGES, temp);
+ if (!hmc_fpm_misc->loc_mem_pages)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_sc_find_reg_cq - find cq ctx index
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
+ struct irdma_sc_cq *cq)
+{
+ u32 i;
+
+ for (i = 0; i < ceq->reg_cq_size; i++) {
+ if (cq == ceq->reg_cq[i])
+ return i;
+ }
+
+ return IRDMA_INVALID_CQ_IDX;
+}
+
+/**
+ * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+
+ if (ceq->reg_cq_size == ceq->elem_cnt) {
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ return -ENOMEM;
+ }
+
+ ceq->reg_cq[ceq->reg_cq_size++] = cq;
+
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+ u32 cq_ctx_idx;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
+ if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
+ goto exit;
+
+ ceq->reg_cq_size--;
+ if (cq_ctx_idx != ceq->reg_cq_size)
+ ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
+ ceq->reg_cq[ceq->reg_cq_size] = NULL;
+
+exit:
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+}
+
+/**
+ * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
+ * @cqp: IWARP control queue pair pointer
+ * @info: IWARP control queue pair init info pointer
+ *
+ * Initializes the object and context buffers for a control Queue Pair.
+ */
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u32 num_ooo_ops;
+ u8 hw_sq_size;
+
+ if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
+ info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
+ ((info->sq_size & (info->sq_size - 1))))
+ return -EINVAL;
+
+ hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
+ IRDMA_QUEUE_TYPE_CQP);
+ cqp->size = sizeof(*cqp);
+ cqp->sq_size = info->sq_size;
+ cqp->hw_sq_size = hw_sq_size;
+ cqp->sq_base = info->sq;
+ cqp->host_ctx = info->host_ctx;
+ cqp->sq_pa = info->sq_pa;
+ cqp->host_ctx_pa = info->host_ctx_pa;
+ cqp->dev = info->dev;
+ cqp->struct_ver = info->struct_ver;
+ cqp->hw_maj_ver = info->hw_maj_ver;
+ cqp->hw_min_ver = info->hw_min_ver;
+ cqp->scratch_array = info->scratch_array;
+ cqp->polarity = 0;
+ cqp->en_datacenter_tcp = info->en_datacenter_tcp;
+ cqp->ena_vf_count = info->ena_vf_count;
+ cqp->hmc_profile = info->hmc_profile;
+ cqp->ceqs_per_vf = info->ceqs_per_vf;
+ cqp->disable_packed = info->disable_packed;
+ cqp->rocev2_rto_policy = info->rocev2_rto_policy;
+ cqp->protocol_used = info->protocol_used;
+ memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ cqp->ooisc_blksize = info->ooisc_blksize;
+ cqp->rrsp_blksize = info->rrsp_blksize;
+ cqp->q1_blksize = info->q1_blksize;
+ cqp->xmit_blksize = info->xmit_blksize;
+ cqp->blksizes_valid = info->blksizes_valid;
+ cqp->ts_shift = info->ts_shift;
+ cqp->ts_override = info->ts_override;
+ cqp->en_fine_grained_timers = info->en_fine_grained_timers;
+ cqp->pe_en_vf_cnt = info->pe_en_vf_cnt;
+ cqp->ooo_op_array = info->ooo_op_array;
+ /* initialize the OOO lists */
+ INIT_LIST_HEAD(&cqp->ooo_avail);
+ INIT_LIST_HEAD(&cqp->ooo_pnd);
+ if (cqp->ooo_op_array) {
+ /* Populate avail list entries */
+ for (num_ooo_ops = 0, ooo_op = info->ooo_op_array;
+ num_ooo_ops < cqp->sq_size;
+ num_ooo_ops++, ooo_op++)
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ }
+ }
+ info->dev->cqp = cqp;
+
+ IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->last_def_cmpl_ticket = 0;
+ cqp->sw_def_cmpl_ticket = 0;
+ cqp->requested_ops = 0;
+ atomic64_set(&cqp->completed_ops, 0);
+ /* for the cqp commands backlog. */
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) {
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ }
+
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
+ cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
+ (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
+ return 0;
+}
+
+/**
+ * irdma_sc_cqp_create - create cqp during bringup
+ * @cqp: struct for cqp hw
+ * @maj_err: If error, major err number
+ * @min_err: If error, minor err number
+ */
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
+{
+ u64 temp;
+ u8 hw_rev;
+ u32 cnt = 0, p1, p2, val = 0, err_code;
+ int ret_code;
+
+ hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
+ cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
+ IRDMA_SD_BUF_ALIGNMENT);
+ cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
+ cqp->sdbuf.size, &cqp->sdbuf.pa,
+ GFP_KERNEL);
+ if (!cqp->sdbuf.va)
+ return -ENOMEM;
+
+ spin_lock_init(&cqp->dev->cqp_lock);
+ spin_lock_init(&cqp->ooo_list_lock);
+
+ temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
+ FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
+ FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
+ FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
+ cqp->rocev2_rto_policy) |
+ FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
+ cqp->protocol_used);
+ }
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS,
+ cqp->en_fine_grained_timers);
+
+ set_64bit_val(cqp->host_ctx, 0, temp);
+ set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
+
+ temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
+ FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_OOISC_BLKSIZE,
+ cqp->ooisc_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_RRSP_BLKSIZE,
+ cqp->rrsp_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_Q1_BLKSIZE, cqp->q1_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_XMIT_BLKSIZE,
+ cqp->xmit_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_BLKSIZES_VALID,
+ cqp->blksizes_valid) |
+ FIELD_PREP(IRDMA_CQPHC_TIMESTAMP_OVERRIDE,
+ cqp->ts_override) |
+ FIELD_PREP(IRDMA_CQPHC_TS_SHIFT, cqp->ts_shift);
+ set_64bit_val(cqp->host_ctx, 16, temp);
+ set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
+ temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
+ FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
+ FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
+ }
+ set_64bit_val(cqp->host_ctx, 32, temp);
+ set_64bit_val(cqp->host_ctx, 40, 0);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
+ FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
+ FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
+ }
+ set_64bit_val(cqp->host_ctx, 48, temp);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
+ FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
+ FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
+ FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
+ }
+ set_64bit_val(cqp->host_ctx, 56, temp);
+ print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
+ p1 = cqp->host_ctx_pa >> 32;
+ p2 = (u32)cqp->host_ctx_pa;
+
+ writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = -ETIMEDOUT;
+ goto err;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (!val);
+
+ if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
+ ret_code = -EOPNOTSUPP;
+ goto err;
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+ return 0;
+
+err:
+ dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
+ cqp->sdbuf.va, cqp->sdbuf.pa);
+ cqp->sdbuf.va = NULL;
+ err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
+ *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
+ return ret_code;
+}
+
+/**
+ * irdma_sc_cqp_post_sq - post of cqp's sq
+ * @cqp: struct for cqp hw
+ */
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
+{
+ writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
+
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
+}
+
+/**
+ * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
+ * and pass back index
+ * @cqp: CQP HW structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index of CQP SQ
+ */
+__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
+ u32 *wqe_idx)
+{
+ __le64 *wqe = NULL;
+ int ret_code;
+
+ if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+ return NULL;
+ }
+ IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ cqp->requested_ops++;
+ if (!*wqe_idx)
+ cqp->polarity = !cqp->polarity;
+ wqe = cqp->sq_base[*wqe_idx].elem;
+ cqp->scratch_array[*wqe_idx] = scratch;
+ IRDMA_CQP_INIT_WQE(wqe);
+
+ return wqe;
+}
+
+/**
+ * irdma_sc_cqp_destroy - destroy cqp during close
+ * @cqp: struct for cqp hw
+ */
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+{
+ u32 cnt = 0, val;
+ int ret_code = 0;
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = -ETIMEDOUT;
+ break;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
+
+ dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
+ cqp->sdbuf.va, cqp->sdbuf.pa);
+ cqp->sdbuf.va = NULL;
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ccq_arm - enable intr for control cq
+ * @ccq: ccq sc struct
+ */
+void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_seq_num;
+
+ get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_seq_num++;
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
+ set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
+
+ dma_wmb(); /* make sure shadow area is updated before arming */
+
+ writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
+}
+
+/**
+ * irdma_sc_process_def_cmpl - process deferred or pending completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @wqe_idx: CQP WQE descriptor index
+ * @def_info: deferred op ticket value or out-of-order completion id
+ * @def_cmpl: true for deferred completion, false for pending (RCA)
+ */
+static void irdma_sc_process_def_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 wqe_idx, u32 def_info, bool def_cmpl)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ /* Deferred and out-of-order completions share the same list of pending
+ * completions. Since the list can be also accessed from AE handler,
+ * it must be protected by a lock.
+ */
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+
+ /* For deferred completions bump up SW completion ticket value. */
+ if (def_cmpl) {
+ cqp->last_def_cmpl_ticket = def_info;
+ cqp->sw_def_cmpl_ticket++;
+ }
+ if (!list_empty(&cqp->ooo_avail)) {
+ ooo_op = (struct irdma_ooo_cqp_op *)
+ list_entry(cqp->ooo_avail.next,
+ struct irdma_ooo_cqp_op, list_entry);
+
+ list_del(&ooo_op->list_entry);
+ ooo_op->scratch = info->scratch;
+ ooo_op->def_info = def_info;
+ ooo_op->sw_def_info = cqp->sw_def_cmpl_ticket;
+ ooo_op->deferred = def_cmpl;
+ ooo_op->wqe_idx = wqe_idx;
+ /* Pending completions must be chronologically ordered,
+ * so adding at the end of list.
+ */
+ list_add_tail(&ooo_op->list_entry, &cqp->ooo_pnd);
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ info->pending = true;
+}
+
+/**
+ * irdma_sc_process_ooo_cmpl - process out-of-order (final) completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @def_info: out-of-order completion id
+ */
+static void irdma_sc_process_ooo_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op_tmp;
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ info->scratch = 0;
+
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+ list_for_each_entry_safe(ooo_op, ooo_op_tmp, &cqp->ooo_pnd,
+ list_entry) {
+ if (!ooo_op->deferred && ooo_op->def_info == def_info) {
+ list_del(&ooo_op->list_entry);
+ info->scratch = ooo_op->scratch;
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ if (!info->scratch)
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: DEBUG_FW_OOO out-of-order completion with unknown def_info = 0x%x\n",
+ def_info);
+}
+
+/**
+ * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
+ * @ccq: ccq sc struct
+ * @info: completion q entry to return
+ */
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info)
+{
+ u32 def_info;
+ bool def_cmpl = false;
+ bool pend_cmpl = false;
+ bool ooo_final_cmpl = false;
+ u64 qp_ctx, temp, temp1;
+ __le64 *cqe;
+ struct irdma_sc_cqp *cqp;
+ u32 wqe_idx;
+ u32 error;
+ u8 polarity;
+ int ret_code = 0;
+ unsigned long flags;
+
+ if (ccq->cq_uk.avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
+
+ get_64bit_val(cqe, 24, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
+ if (polarity != ccq->cq_uk.polarity)
+ return -ENOENT;
+
+ /* Ensure CEQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(cqe, 8, &qp_ctx);
+ cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
+ info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
+ info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
+ if (info->error) {
+ info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: CQPERRCODES error_code[x%08X]\n", error);
+ }
+
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
+ info->scratch = cqp->scratch_array[wqe_idx];
+
+ get_64bit_val(cqe, 16, &temp1);
+ info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ def_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_DEF_CMPL;
+ def_info = (u32)FIELD_GET(IRDMA_CCQ_DEFINFO, temp1);
+
+ pend_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_OOO_CMPL;
+
+ ooo_final_cmpl = (bool)FIELD_GET(IRDMA_OOO_CMPL, temp);
+
+ if (def_cmpl || pend_cmpl || ooo_final_cmpl) {
+ if (ooo_final_cmpl)
+ irdma_sc_process_ooo_cmpl(cqp, info, def_info);
+ else
+ irdma_sc_process_def_cmpl(cqp, info, wqe_idx,
+ def_info, def_cmpl);
+ }
+ }
+
+ get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
+ info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
+ info->cqp = cqp;
+
+ /* move the head for cq */
+ IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
+ if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
+ ccq->cq_uk.polarity ^= 1;
+
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
+ set_64bit_val(ccq->cq_uk.shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
+
+ dma_wmb(); /* make sure shadow area is updated before moving tail */
+
+ spin_lock_irqsave(&cqp->dev->cqp_lock, flags);
+ if (!ooo_final_cmpl)
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_unlock_irqrestore(&cqp->dev->cqp_lock, flags);
+
+ /* Do not increment completed_ops counter on pending or deferred
+ * completions.
+ */
+ if (pend_cmpl || def_cmpl)
+ return ret_code;
+ atomic64_inc(&cqp->completed_ops);
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
+ * @cqp: struct for cqp hw
+ * @op_code: cqp opcode for completion
+ * @compl_info: completion q entry to return
+ */
+int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
+ struct irdma_ccq_cqe_info *compl_info)
+{
+ struct irdma_ccq_cqe_info info = {};
+ struct irdma_sc_cq *ccq;
+ int ret_code = 0;
+ u32 cnt = 0;
+
+ ccq = cqp->dev->ccq;
+ while (1) {
+ if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
+ return -ETIMEDOUT;
+
+ if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ continue;
+ }
+ if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
+ ret_code = -EIO;
+ break;
+ }
+ /* make sure op code matches*/
+ if (op_code == info.op_code)
+ break;
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
+ op_code, info.op_code);
+ }
+
+ if (compl_info)
+ memcpy(compl_info, &info, sizeof(*compl_info));
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_manage_hmc_pm_func_table - manage of function table
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @info: info for the manage function table operation
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
+ struct irdma_hmc_fcn_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56, 0);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
+ * for fpm commit
+ * @cqp: struct for cqp hw
+ */
+static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @commit_fpm_mem: Memory for fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *commit_fpm_mem,
+ bool post_sq, u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ int ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, commit_fpm_mem->pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_commit_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
+ * query fpm
+ * @cqp: struct for cqp hw
+ */
+static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_query_fpm_val - cqp wqe query fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @query_fpm_mem: memory for return fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *query_fpm_mem,
+ bool post_sq, u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ int ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, query_fpm_mem->pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_query_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_init - initialize ceq
+ * @ceq: ceq sc structure
+ * @info: ceq initialization info
+ */
+int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
+ return -EINVAL;
+
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ ceq->size = sizeof(*ceq);
+ ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
+ ceq->ceq_id = info->ceq_id;
+ ceq->dev = info->dev;
+ ceq->elem_cnt = info->elem_cnt;
+ ceq->ceq_elem_pa = info->ceqe_pa;
+ ceq->virtual_map = info->virtual_map;
+ ceq->itr_no_expire = info->itr_no_expire;
+ ceq->reg_cq = info->reg_cq;
+ ceq->reg_cq_size = 0;
+ spin_lock_init(&ceq->req_cq_lock);
+ ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
+ ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
+ ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
+ ceq->tph_en = info->tph_en;
+ ceq->tph_val = info->tph_val;
+ ceq->vsi_idx = info->vsi_idx;
+ ceq->polarity = 1;
+ IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
+ ceq->dev->ceq[info->ceq_id] = ceq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ceq_create - create ceq wqe
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+
+static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi_idx));
+ hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID_HIGH, ceq->ceq_id >> 10) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
+ * @ceq: ceq sc structure
+ */
+static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
+ * @ceq: ceq sc structure
+ */
+int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ if (ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
+
+ cqp = ceq->dev->cqp;
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_create - create cceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
+{
+ int ret_code;
+ struct irdma_sc_dev *dev = ceq->dev;
+
+ dev->ccq->vsi_idx = ceq->vsi_idx;
+ if (ceq->reg_cq) {
+ ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
+ if (ret_code)
+ return ret_code;
+ }
+
+ ret_code = irdma_sc_ceq_create(ceq, scratch, true);
+ if (!ret_code)
+ return irdma_sc_cceq_create_done(ceq);
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_destroy - destroy ceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid));
+ hdr = ceq->ceq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_process_ceq - process ceq
+ * @dev: sc device struct
+ * @ceq: ceq sc structure
+ *
+ * It is expected caller serializes this function with cleanup_ceqes()
+ * because these functions manipulate the same ceq
+ */
+void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
+{
+ u64 temp;
+ __le64 *ceqe;
+ struct irdma_sc_cq *cq = NULL;
+ struct irdma_sc_cq *temp_cq;
+ u8 polarity;
+ u32 cq_idx;
+ unsigned long flags;
+
+ do {
+ cq_idx = 0;
+ ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
+ if (polarity != ceq->polarity)
+ return NULL;
+
+ temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
+ if (!temp_cq) {
+ cq_idx = IRDMA_INVALID_CQ_IDX;
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ continue;
+ }
+
+ cq = temp_cq;
+ if (ceq->reg_cq) {
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_idx = irdma_sc_find_reg_cq(ceq, cq);
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ }
+
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ } while (cq_idx == IRDMA_INVALID_CQ_IDX);
+
+ if (cq)
+ irdma_sc_cq_ack(cq);
+ return cq;
+}
+
+/**
+ * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
+ * @cq: cq for which the ceqes need to be cleaned up
+ * @ceq: ceq ptr
+ *
+ * The function is called after the cq is destroyed to cleanup
+ * its pending ceqe entries. It is expected caller serializes this
+ * function with process_ceq() in interrupt context.
+ */
+void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cq *next_cq;
+ u8 ceq_polarity = ceq->polarity;
+ __le64 *ceqe;
+ u8 polarity;
+ u64 temp;
+ int next;
+ u32 i;
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
+
+ for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
+ ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
+
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
+ if (polarity != ceq_polarity)
+ return;
+
+ next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
+ if (cq == next_cq)
+ set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
+ if (!next)
+ ceq_polarity ^= 1;
+ }
+}
+
+/**
+ * irdma_sc_aeq_init - initialize aeq
+ * @aeq: aeq structure ptr
+ * @info: aeq initialization info
+ */
+int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
+ return -EINVAL;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ aeq->size = sizeof(*aeq);
+ aeq->polarity = 1;
+ aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
+ aeq->dev = info->dev;
+ aeq->elem_cnt = info->elem_cnt;
+ aeq->aeq_elem_pa = info->aeq_elem_pa;
+ IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
+ aeq->virtual_map = info->virtual_map;
+ aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
+ aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
+ aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
+ aeq->msix_idx = info->msix_idx;
+ info->dev->aeq = aeq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_create - create aeq
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = aeq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_destroy - destroy aeq during close
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_dev *dev;
+ u64 hdr;
+
+ dev = aeq->dev;
+ if (dev->privileged)
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_get_next_aeqe - get next aeq entry
+ * @aeq: aeq structure ptr
+ * @info: aeqe info to be returned
+ */
+int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info)
+{
+ u64 temp, compl_ctx;
+ __le64 *aeqe;
+ u8 ae_src;
+ u8 polarity;
+
+ aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
+ get_64bit_val(aeqe, 8, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
+
+ if (aeq->polarity != polarity)
+ return -ENOENT;
+
+ /* Ensure AEQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(aeqe, 0, &compl_ctx);
+
+ print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ aeqe, 16, false);
+
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC_GEN_3, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX_GEN_3,
+ temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_GEN_3, temp);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE_GEN_3, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE_GEN_3, compl_ctx);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE_GEN_3, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA_GEN_3, compl_ctx);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW_GEN_3, temp);
+ info->compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx);
+ compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx) << IRDMA_AEQE_CMPL_CTXT_S;
+ } else {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
+ ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW,
+ temp);
+ }
+
+ info->ae_src = ae_src;
+ switch (info->ae_id) {
+ case IRDMA_AE_SRQ_LIMIT:
+ info->srq = true;
+ /* [63:6] from CMPL_CTXT, [5:0] from WQDESCIDX. */
+ info->compl_ctx = compl_ctx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
+ case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
+ case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
+ case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case IRDMA_AE_STAG_ZERO_INVALID:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_INVALID_ARP_ENTRY:
+ case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
+ case IRDMA_AE_STALE_ARP_ENTRY:
+ case IRDMA_AE_INVALID_AH_ENTRY:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ case IRDMA_AE_RESET_SENT:
+ case IRDMA_AE_TERMINATE_SENT:
+ case IRDMA_AE_RESET_NOT_SENT:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = compl_ctx << 1;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ info->def_info = info->wqe_idx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
+ case IRDMA_AE_ROCE_EMPTY_MCG:
+ case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
+ case IRDMA_AE_ROCE_BAD_MC_QPID:
+ case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
+ fallthrough;
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ default:
+ break;
+ }
+
+ switch (ae_src) {
+ case IRDMA_AE_SOURCE_RQ:
+ case IRDMA_AE_SOURCE_RQ_0011:
+ info->qp = true;
+ info->rq = true;
+ info->compl_ctx = compl_ctx;
+ info->err_rq_idx_valid = true;
+ break;
+ case IRDMA_AE_SOURCE_CQ:
+ case IRDMA_AE_SOURCE_CQ_0110:
+ case IRDMA_AE_SOURCE_CQ_1010:
+ case IRDMA_AE_SOURCE_CQ_1110:
+ info->cq = true;
+ info->compl_ctx = compl_ctx << 1;
+ break;
+ case IRDMA_AE_SOURCE_SQ:
+ case IRDMA_AE_SOURCE_SQ_0111:
+ info->qp = true;
+ info->sq = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_SOURCE_IN_RR_WR:
+ info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ info->err_rq_idx_valid = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
+ case IRDMA_AE_SOURCE_IN_RR_WR_1011:
+ info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info->sq = true;
+ info->err_rq_idx_valid = true;
+ }
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
+ case IRDMA_AE_SOURCE_OUT_RR:
+ case IRDMA_AE_SOURCE_OUT_RR_1111:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->out_rdrsp = true;
+ break;
+ case IRDMA_AE_SOURCE_RSVD:
+ default:
+ break;
+ }
+
+ IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
+ aeq->polarity ^= 1;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_repost_aeq_entries - repost completed aeq entries
+ * @dev: sc device struct
+ * @count: allocate count
+ */
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+{
+ writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
+}
+
+/**
+ * irdma_sc_ccq_init - initialize control cq
+ * @cq: sc's cq ctruct
+ * @info: info for control cq initialization
+ */
+int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
+ info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
+ return -EINVAL;
+
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cq->cq_pa = info->cq_pa;
+ cq->cq_uk.cq_base = info->cq_base;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->cq_uk.shadow_area = info->shadow_area;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ cq->cq_uk.cq_size = info->num_elem;
+ cq->cq_type = IRDMA_CQ_TYPE_CQP;
+ cq->ceqe_mask = info->ceqe_mask;
+ IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
+ cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
+ cq->pbl_list = info->pbl_list;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->cq_uk.polarity = true;
+ cq->vsi = info->vsi;
+ cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
+
+ /* Only applicable to CQs other than CCQ so initialize to zero */
+ cq->cq_uk.cqe_alloc_db = NULL;
+
+ info->dev->ccq = cq;
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_create_done - poll cqp for ccq create
+ * @ccq: ccq sc struct
+ */
+static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ccq->dev->cqp;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
+}
+
+/**
+ * irdma_sc_ccq_create - create control cq
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: overlow flag for ccq
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq)
+{
+ int ret_code;
+
+ ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
+ if (ret_code)
+ return ret_code;
+
+ if (post_sq) {
+ ret_code = irdma_sc_ccq_create_done(ccq);
+ if (ret_code)
+ return ret_code;
+ }
+ ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_destroy - destroy ccq during close
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ int ret_code = 0;
+ u32 tail, val, error;
+
+ cqp = ccq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
+ set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+
+ hdr = ccq->cq_uk.cq_id |
+ FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
+ * @dev : ptr to irdma_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_dma_mem query_fpm_mem;
+ int ret_code = 0;
+ u8 wait_type;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ query_fpm_mem.pa = dev->fpm_query_buf_pa;
+ query_fpm_mem.va = dev->fpm_query_buf;
+ hmc_info->hmc_fn_id = hmc_fn_id;
+ wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
+
+ ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
+ &query_fpm_mem, true, wait_type);
+ if (ret_code)
+ return ret_code;
+
+ /* parse the fpm_query_buf and fill hmc obj info */
+ ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
+ hmc_fpm_misc);
+
+ print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
+ false);
+ return ret_code;
+}
+
+/**
+ * irdma_set_loc_mem() - set a local memory bit field
+ * @buf: ptr to a buffer where local memory gets enabled
+ */
+static void irdma_set_loc_mem(__le64 *buf)
+{
+ u64 loc_mem_en = BIT_ULL(ENABLE_LOC_MEM);
+ u32 offset;
+ u64 temp;
+
+ for (offset = 0; offset < IRDMA_COMMIT_FPM_BUF_SIZE;
+ offset += sizeof(__le64)) {
+ if (offset == IRDMA_PBLE_COMMIT_OFFSET)
+ continue;
+ get_64bit_val(buf, offset, &temp);
+ if (temp)
+ set_64bit_val(buf, offset, temp | loc_mem_en);
+ }
+}
+
+/**
+ * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
+ * command and populates fpm base address in hmc_info
+ * @dev : ptr to irdma_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_obj_info *obj_info;
+ __le64 *buf;
+ struct irdma_dma_mem commit_fpm_mem;
+ int ret_code = 0;
+ u8 wait_type;
+
+ hmc_info = dev->hmc_info;
+ obj_info = hmc_info->hmc_obj;
+ buf = dev->fpm_commit_buf;
+
+ set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
+ set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
+ set_64bit_val(buf, 16, (u64)obj_info[IRDMA_HMC_IW_SRQ].cnt);
+ set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
+ set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
+ set_64bit_val(buf, 40, (u64)0); /* RSVD */
+ set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
+ set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
+ set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
+ set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
+ set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
+ set_64bit_val(buf, 88,
+ (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
+ set_64bit_val(buf, 96,
+ (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
+ set_64bit_val(buf, 104,
+ (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
+ set_64bit_val(buf, 112,
+ (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
+ set_64bit_val(buf, 120, (u64)0); /* RSVD */
+ set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
+ set_64bit_val(buf, 136,
+ (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
+ set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
+ set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
+ set_64bit_val(buf, 160,
+ (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
+ set_64bit_val(buf, 168,
+ (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_fpm_misc.loc_mem_pages)
+ irdma_set_loc_mem(buf);
+ commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
+ commit_fpm_mem.va = dev->fpm_commit_buf;
+
+ wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
+ print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
+ false);
+ ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
+ &commit_fpm_mem, true, wait_type);
+ if (!ret_code)
+ irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+ hmc_info->hmc_obj,
+ &hmc_info->sd_table.sd_cnt);
+ print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
+ false);
+
+ return ret_code;
+}
+
+/**
+ * cqp_sds_wqe_fill - fill cqp wqe doe sd
+ * @cqp: struct for cqp hw
+ * @info: sd info for wqe
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
+ struct irdma_update_sds_info *info, u64 scratch)
+{
+ u64 data;
+ u64 hdr;
+ __le64 *wqe;
+ int mem_entries, wqe_entries;
+ struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
+ u64 offset = 0;
+ u32 wqe_idx;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
+ mem_entries = info->cnt - wqe_entries;
+
+ if (mem_entries) {
+ offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
+ memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
+
+ data = (u64)sdbuf->pa + offset;
+ } else {
+ data = 0;
+ }
+ data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
+ set_64bit_val(wqe, 16, data);
+
+ switch (wqe_entries) {
+ case 3:
+ set_64bit_val(wqe, 48,
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
+
+ set_64bit_val(wqe, 56, info->entry[2].data);
+ fallthrough;
+ case 2:
+ set_64bit_val(wqe, 32,
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
+
+ set_64bit_val(wqe, 40, info->entry[1].data);
+ fallthrough;
+ case 1:
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
+
+ set_64bit_val(wqe, 8, info->entry[0].data);
+ break;
+ default:
+ break;
+ }
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (mem_entries)
+ print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
+ DUMP_PREFIX_OFFSET, 16, 8,
+ (char *)sdbuf->va + offset,
+ mem_entries << 4, false);
+
+ print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ return 0;
+}
+
+/**
+ * irdma_update_pe_sds - cqp wqe for sd
+ * @dev: ptr to irdma_dev struct
+ * @info: sd info for sd's
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info, u64 scratch)
+{
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ int ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
+ if (!ret_code)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return ret_code;
+}
+
+/**
+ * irdma_update_sds_noccq - update sd before ccq created
+ * @dev: sc device struct
+ * @info: sd info for sd's
+ */
+int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info)
+{
+ u32 error, val, tail;
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ int ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, 0);
+ if (ret_code)
+ return ret_code;
+
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ irdma_sc_cqp_post_sq(cqp);
+ return irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+}
+
+/**
+ * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers)
+{
+ u64 hdr;
+ __le64 *wqe;
+ u32 tail, val, error;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (poll_registers)
+ /* check for cqp sq tail update */
+ return irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else
+ return irdma_sc_poll_for_cqp_op_done(cqp,
+ IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
+ NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_cqp_ring_full - check if cqp ring is full
+ * @cqp: struct for cqp hw
+ */
+static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
+{
+ return IRDMA_RING_FULL_ERR(cqp->sq_ring);
+}
+
+/**
+ * irdma_est_sd - returns approximate number of SDs for HMC
+ * @dev: sc device struct
+ * @hmc_info: hmc structure, size and count for HMC objects
+ */
+static u32 irdma_est_sd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ struct irdma_hmc_obj_info *pble_info;
+ int i;
+ u64 size = 0;
+ u64 sd;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ if (i != IRDMA_HMC_IW_PBLE)
+ size += round_up(hmc_info->hmc_obj[i].cnt *
+ hmc_info->hmc_obj[i].size, 512);
+
+ pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE];
+ if (dev->privileged)
+ size += round_up(pble_info->cnt * pble_info->size, 512);
+ if (size & 0x1FFFFF)
+ sd = (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd = size >> 21;
+ if (!dev->privileged && !dev->hmc_fpm_misc.loc_mem_pages) {
+ /* 2MB alignment for VF PBLE HMC */
+ size = pble_info->cnt * pble_info->size;
+ if (size & 0x1FFFFF)
+ sd += (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd += size >> 21;
+ }
+ if (sd > 0xFFFFFFFF) {
+ ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
+ sd = 0xFFFFFFFF - 1;
+ }
+
+ return (u32)sd;
+}
+
+/**
+ * irdma_sc_query_rdma_features - query RDMA features and FW ver
+ * @cqp: struct for cqp hw
+ * @buf: buffer to hold query info
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
+ struct irdma_dma_mem *buf, u64 scratch)
+{
+ u32 tail, val, error;
+ __le64 *wqe;
+ int status;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ temp = buf->pa;
+ set_64bit_val(wqe, 32, temp);
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
+ cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ irdma_sc_cqp_post_sq(cqp);
+ status = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ if (error || status)
+ status = -EINVAL;
+
+ return status;
+}
+
+/**
+ * irdma_get_rdma_features - get RDMA features
+ * @dev: sc device struct
+ */
+int irdma_get_rdma_features(struct irdma_sc_dev *dev)
+{
+ int ret_code;
+ struct irdma_dma_mem feat_buf;
+ u64 temp;
+ u16 byte_idx, feat_type, feat_cnt, feat_idx;
+
+ feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
+ IRDMA_FEATURE_BUF_ALIGNMENT);
+ feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
+ &feat_buf.pa, GFP_KERNEL);
+ if (!feat_buf.va)
+ return -ENOMEM;
+
+ ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
+ if (feat_cnt < 2) {
+ ret_code = -EINVAL;
+ goto exit;
+ } else if (feat_cnt > IRDMA_MAX_FEATURES) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: feature buf size insufficient, retrying with larger buffer\n");
+ dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
+ feat_buf.pa);
+ feat_buf.va = NULL;
+ feat_buf.size = ALIGN(8 * feat_cnt,
+ IRDMA_FEATURE_BUF_ALIGNMENT);
+ feat_buf.va = dma_alloc_coherent(dev->hw->device,
+ feat_buf.size, &feat_buf.pa,
+ GFP_KERNEL);
+ if (!feat_buf.va)
+ return -ENOMEM;
+
+ ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
+ if (feat_cnt < 2) {
+ ret_code = -EINVAL;
+ goto exit;
+ }
+ }
+
+ print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
+ 16, 8, feat_buf.va, feat_cnt * 8, false);
+
+ for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
+ feat_idx++, byte_idx += 8) {
+ get_64bit_val(feat_buf.va, byte_idx, &temp);
+ feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
+ if (feat_type >= IRDMA_MAX_FEATURES) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: found unrecognized feature type %d\n",
+ feat_type);
+ continue;
+ }
+ dev->feature_info[feat_type] = temp;
+ }
+
+ if (dev->feature_info[IRDMA_FTN_FLAGS] & IRDMA_ATOMICS_ALLOWED_BIT)
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_ATOMIC_OPS;
+
+exit:
+ dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
+ feat_buf.pa);
+ feat_buf.va = NULL;
+ return ret_code;
+}
+
+static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ u32 q1_cnt;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
+ } else {
+ if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
+ else
+ q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
+ }
+
+ return q1_cnt;
+}
+
+static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
+}
+
+static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
+ 4 * hmc_fpm_misc->xf_block_size * qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
+ hmc_fpm_misc->ooiscf_block_size;
+}
+
+/**
+ * irdma_get_rsrc_mem_config - configure resources if local memory or host
+ * @dev: sc device struct
+ * @is_mrte_loc_mem: if true, MR's to be in local memory because sd=loc pages
+ *
+ * Only mr can be configured host or local memory if qp's are in local memory.
+ * If qp is in local memory, then all resource object will be in local memory
+ * except mr which can be either host or local memory. The only exception
+ * is pble's which are always in host memory.
+ */
+static void irdma_get_rsrc_mem_config(struct irdma_sc_dev *dev, bool is_mrte_loc_mem)
+{
+ struct irdma_hmc_info *hmc_info = dev->hmc_info;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].mem_loc = IRDMA_LOC_MEM;
+
+ if (dev->feature_info[IRDMA_OBJ_1] && !is_mrte_loc_mem) {
+ u8 mem_type;
+
+ mem_type = (u8)FIELD_GET(IRDMA_MR_MEM_LOC, dev->feature_info[IRDMA_OBJ_1]);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc =
+ (mem_type & IRDMA_OBJ_LOC_MEM_BIT) ?
+ IRDMA_LOC_MEM : IRDMA_HOST_MEM;
+ } else {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc = IRDMA_LOC_MEM;
+ }
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc = IRDMA_HOST_MEM;
+
+ ibdev_dbg(to_ibdev(dev), "HMC: INFO: mrte_mem_loc = %d pble = %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc);
+}
+
+/**
+ * irdma_cfg_sd_mem - allocate sd memory
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ */
+static int irdma_cfg_sd_mem(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ struct irdma_virt_mem virt_mem;
+ u32 mem_size;
+
+ mem_size = sizeof(struct irdma_hmc_sd_entry) * hmc_info->sd_table.sd_cnt;
+ virt_mem.size = mem_size;
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
+ if (!virt_mem.va)
+ return -ENOMEM;
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return 0;
+}
+
+/**
+ * irdma_get_objs_pages - get number of 2M pages needed
+ * @dev: sc device struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @mem_loc: pages for local or host memory
+ */
+static u32 irdma_get_objs_pages(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ enum irdma_hmc_obj_mem mem_loc)
+{
+ u64 size = 0;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (hmc_info->hmc_obj[i].mem_loc == mem_loc) {
+ size += round_up(hmc_info->hmc_obj[i].cnt *
+ hmc_info->hmc_obj[i].size, 512);
+ }
+ }
+
+ return DIV_ROUND_UP(size, IRDMA_HMC_PAGE_SIZE);
+}
+
+/**
+ * irdma_set_host_hmc_rsrc_gen_3 - calculate host hmc resources for gen 3
+ * @dev: sc device struct
+ */
+static void irdma_set_host_hmc_rsrc_gen_3(struct irdma_sc_dev *dev)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_hmc_info *hmc_info;
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, pblewanted;
+ u32 avail_sds, mr_sds;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ avail_sds = hmc_fpm_misc->max_sds;
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
+ pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
+
+ if (mrte_loc == IRDMA_HOST_MEM && avail_sds > IRDMA_MIN_PBLE_PAGES) {
+ mr_sds = avail_sds - IRDMA_MIN_PBLE_PAGES;
+ mrwanted = min(mrwanted, mr_sds * MAX_MR_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
+ avail_sds -= DIV_ROUND_UP(mrwanted, MAX_MR_PER_SD);
+ }
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]) &&
+ pblewanted > avail_sds * MAX_PBLE_PER_SD)
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: Warn: Resource version 2: pble wanted = 0x%x available = 0x%x\n",
+ pblewanted, avail_sds * MAX_PBLE_PER_SD);
+
+ pblewanted = min(pblewanted, avail_sds * MAX_PBLE_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+}
+
+/**
+ * irdma_verify_commit_fpm_gen_3 - verify query fpm values
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_verify_commit_fpm_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 rrffl_cnt = 0;
+ u32 xffl_cnt = 0;
+ u32 q1fl_cnt;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ rrffl_cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+
+ if (xf_cnt)
+ xffl_cnt = xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+
+ q1fl_cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed > max_pages) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts rrf_cnt = %u rrffl_cnt = %u timer_cnt = %u",
+ rrf_cnt, rrffl_cnt, timer_cnt);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts xf_cnt = %u xffl_cnt = %u q1fl_cnt = %u",
+ xf_cnt, xffl_cnt, q1fl_cnt);
+
+ return -EINVAL;
+ }
+
+ hmc_fpm_misc->max_sds -= pages_needed;
+ hmc_fpm_misc->loc_mem_pages -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * irdma_set_loc_hmc_rsrc_gen_3 - calculate hmc resources for gen 3
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_set_loc_hmc_rsrc_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 ird, ord;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return irdma_verify_commit_fpm_gen_3(dev, max_pages, qpwanted);
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ ird = dev->hw_attrs.max_hw_ird;
+ ord = dev->hw_attrs.max_hw_ord;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, qpwanted * 2);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
+ min(qpwanted * 8, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt, rrf_cnt);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt, xf_cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
+ xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
+ min(timer_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt);
+
+ do {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = roundup_pow_of_two(ird * 2 * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed <= max_pages)
+ break;
+
+ ird /= 2;
+ ord /= 2;
+ } while (ird >= IRDMA_MIN_IRD);
+
+ if (ird < IRDMA_MIN_IRD) {
+ ibdev_dbg(to_ibdev(dev), "HMC: FAIL: IRD=%u Q1 CNT = %u\n",
+ ird, hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt);
+ return -EINVAL;
+ }
+
+ dev->hw_attrs.max_hw_ird = ird;
+ dev->hw_attrs.max_hw_ord = ord;
+ hmc_fpm_misc->max_sds -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * cfg_fpm_value_gen_3 - configure fpm for gen 3
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ */
+static int cfg_fpm_value_gen_3(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, qpwanted;
+ int i, ret_code = 0;
+ u32 loc_mem_pages;
+ bool is_mrte_loc_mem;
+
+ loc_mem_pages = hmc_fpm_misc->loc_mem_pages;
+ is_mrte_loc_mem = hmc_fpm_misc->loc_mem_pages == hmc_fpm_misc->max_sds ?
+ true : false;
+
+ irdma_get_rsrc_mem_config(dev, is_mrte_loc_mem);
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+
+ if (is_mrte_loc_mem)
+ loc_mem_pages -= IRDMA_MIN_PBLE_PAGES;
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: mrte_loc %d loc_mem %u fpm max sds %u host_obj %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_fpm_misc->loc_mem_pages, hmc_fpm_misc->max_sds,
+ is_mrte_loc_mem);
+
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
+ qpwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt = 0;
+
+ if (!FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt,
+ (u32)IRDMA_FSIAV_CNT_MAX);
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+
+ while (qpwanted >= IRDMA_MIN_QP_CNT) {
+ if (!irdma_set_loc_hmc_rsrc_gen_3(dev, loc_mem_pages, qpwanted))
+ break;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return -EINVAL;
+
+ qpwanted /= 2;
+ if (mrte_loc == IRDMA_LOC_MEM) {
+ mrwanted = qpwanted * IRDMA_MIN_MR_PER_QP;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, mrwanted);
+ }
+ }
+
+ if (qpwanted < IRDMA_MIN_QP_CNT) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: ERROR: could not allocate fpm resources\n");
+ return -EINVAL;
+ }
+
+ irdma_set_host_hmc_rsrc_gen_3(dev);
+ ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
+ readl(dev->hw_regs[IRDMA_CQPERRCODES]));
+
+ return ret_code;
+ }
+
+ return irdma_cfg_sd_mem(dev, hmc_info);
+}
+
+/**
+ * irdma_cfg_fpm_val - configure HMC objects
+ * @dev: sc device struct
+ * @qp_count: desired qp count
+ */
+int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
+{
+ u32 qpwanted, mrwanted, pblewanted;
+ u32 powerof2, hte, i;
+ u32 sd_needed;
+ u32 sd_diff;
+ u32 loop_count = 0;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ int ret_code = 0;
+ u32 max_sds;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ max_sds = hmc_fpm_misc->max_sds;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ return cfg_fpm_value_gen_3(dev, hmc_info, hmc_fpm_misc);
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ ibdev_dbg(to_ibdev(dev), "HMC: sd count %u where max sd is %u\n",
+ hmc_info->sd_table.sd_cnt, max_sds);
+
+ qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
+
+ powerof2 = 1;
+ while (powerof2 <= qpwanted)
+ powerof2 *= 2;
+ powerof2 /= 2;
+ qpwanted = powerof2;
+
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
+ pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: req_qp=%d max_sd=%u, max_qp = %u, max_cq=%u, max_mr=%u, max_pble=%u, mc=%d, av=%u\n",
+ qp_count, max_sds,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
+
+ while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
+ qpwanted /= 2;
+
+ do {
+ ++loop_count;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
+ min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt = 0; /* Reserved */
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
+
+ hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
+ powerof2 = 1;
+ while (powerof2 < hte)
+ powerof2 *= 2;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
+ powerof2 * hmc_fpm_misc->ht_multiplier;
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
+ else
+ cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
+ (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
+ sd_needed, hmc_fpm_misc->max_sds, mrwanted,
+ pblewanted, qpwanted);
+
+ /* Do not reduce resources further. All objects fit with max SDs */
+ if (sd_needed <= hmc_fpm_misc->max_sds)
+ break;
+
+ sd_diff = sd_needed - hmc_fpm_misc->max_sds;
+ if (sd_diff > 128) {
+ if (!(loop_count % 2) && qpwanted > 128) {
+ qpwanted /= 2;
+ } else {
+ pblewanted /= 2;
+ mrwanted /= 2;
+ }
+ continue;
+ }
+
+ if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
+ pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
+ pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
+ continue;
+ } else if (pblewanted > (100 * FPM_MULTIPLIER)) {
+ pblewanted -= 10 * FPM_MULTIPLIER;
+ } else if (pblewanted > FPM_MULTIPLIER) {
+ pblewanted -= FPM_MULTIPLIER;
+ } else if (qpwanted <= 128) {
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
+ }
+ if (mrwanted > FPM_MULTIPLIER)
+ mrwanted -= FPM_MULTIPLIER;
+ if (!(loop_count % 10) && qpwanted > 128) {
+ qpwanted /= 2;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
+ }
+ } while (loop_count < 2000);
+
+ if (sd_needed > hmc_fpm_misc->max_sds) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_fpm failed loop_cnt=%u, sd_needed=%u, max sd count %u\n",
+ loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
+ return -EINVAL;
+ }
+
+ if (loop_count > 1 && sd_needed < max_sds) {
+ pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ }
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
+ loop_count, sd_needed,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
+ hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
+
+ ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
+ readl(dev->hw_regs[IRDMA_CQPERRCODES]));
+ return ret_code;
+ }
+
+ return irdma_cfg_sd_mem(dev, hmc_info);
+}
+
+/**
+ * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
+ * @dev: rdma device
+ * @pcmdinfo: cqp command info
+ */
+static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
+{
+ int status;
+ struct irdma_dma_mem val_mem;
+ bool alloc = false;
+
+ dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
+ switch (pcmdinfo->cqp_cmd) {
+ case IRDMA_OP_CEQ_DESTROY:
+ status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
+ pcmdinfo->in.u.ceq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AEQ_DESTROY:
+ status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
+ pcmdinfo->in.u.aeq_destroy.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case IRDMA_OP_CEQ_CREATE:
+ status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
+ pcmdinfo->in.u.ceq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AEQ_CREATE:
+ status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
+ pcmdinfo->in.u.aeq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_UPLOAD_CONTEXT:
+ status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
+ &pcmdinfo->in.u.qp_upload_context.info,
+ pcmdinfo->in.u.qp_upload_context.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_CREATE:
+ status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
+ pcmdinfo->in.u.cq_create.scratch,
+ pcmdinfo->in.u.cq_create.check_overflow,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_MODIFY:
+ status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
+ &pcmdinfo->in.u.cq_modify.info,
+ pcmdinfo->in.u.cq_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_DESTROY:
+ status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
+ pcmdinfo->in.u.cq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_FLUSH_WQES:
+ status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
+ &pcmdinfo->in.u.qp_flush_wqes.info,
+ pcmdinfo->in.u.qp_flush_wqes.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_GEN_AE:
+ status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
+ &pcmdinfo->in.u.gen_ae.info,
+ pcmdinfo->in.u.gen_ae.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_PUSH_PAGE:
+ status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
+ &pcmdinfo->in.u.manage_push_page.info,
+ pcmdinfo->in.u.manage_push_page.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_UPDATE_PE_SDS:
+ status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
+ &pcmdinfo->in.u.update_pe_sds.info,
+ pcmdinfo->in.u.update_pe_sds.scratch);
+ break;
+ case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
+ /* switch to calling through the call table */
+ status =
+ irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
+ &pcmdinfo->in.u.manage_hmc_pm.info,
+ pcmdinfo->in.u.manage_hmc_pm.scratch,
+ true);
+ break;
+ case IRDMA_OP_SUSPEND:
+ status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case IRDMA_OP_RESUME:
+ status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case IRDMA_OP_QUERY_FPM_VAL:
+ val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
+ val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
+ status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
+ pcmdinfo->in.u.query_fpm_val.scratch,
+ pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
+ &val_mem, true, IRDMA_CQP_WAIT_EVENT);
+ break;
+ case IRDMA_OP_COMMIT_FPM_VAL:
+ val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
+ val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
+ status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
+ pcmdinfo->in.u.commit_fpm_val.scratch,
+ pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
+ &val_mem,
+ true,
+ IRDMA_CQP_WAIT_EVENT);
+ break;
+ case IRDMA_OP_STATS_ALLOCATE:
+ alloc = true;
+ fallthrough;
+ case IRDMA_OP_STATS_FREE:
+ status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
+ &pcmdinfo->in.u.stats_manage.info,
+ alloc,
+ pcmdinfo->in.u.stats_manage.scratch);
+ break;
+ case IRDMA_OP_STATS_GATHER:
+ status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
+ &pcmdinfo->in.u.stats_gather.info,
+ pcmdinfo->in.u.stats_gather.scratch);
+ break;
+ case IRDMA_OP_WS_MODIFY_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_MODIFY_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_WS_DELETE_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_DEL_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_WS_ADD_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_ADD_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_SET_UP_MAP:
+ status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
+ &pcmdinfo->in.u.up_map.info,
+ pcmdinfo->in.u.up_map.scratch);
+ break;
+ case IRDMA_OP_QUERY_RDMA_FEATURES:
+ status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
+ &pcmdinfo->in.u.query_rdma.query_buff_mem,
+ pcmdinfo->in.u.query_rdma.scratch);
+ break;
+ case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
+ status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
+ pcmdinfo->in.u.del_arp_cache_entry.scratch,
+ pcmdinfo->in.u.del_arp_cache_entry.arp_index,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_APBVT_ENTRY:
+ status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
+ &pcmdinfo->in.u.manage_apbvt_entry.info,
+ pcmdinfo->in.u.manage_apbvt_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
+ status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
+ &pcmdinfo->in.u.manage_qhash_table_entry.info,
+ pcmdinfo->in.u.manage_qhash_table_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_MODIFY:
+ status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
+ &pcmdinfo->in.u.qp_modify.info,
+ pcmdinfo->in.u.qp_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_CREATE:
+ status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
+ &pcmdinfo->in.u.qp_create.info,
+ pcmdinfo->in.u.qp_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_DESTROY:
+ status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
+ pcmdinfo->in.u.qp_destroy.scratch,
+ pcmdinfo->in.u.qp_destroy.remove_hash_idx,
+ pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ALLOC_STAG:
+ status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
+ &pcmdinfo->in.u.alloc_stag.info,
+ pcmdinfo->in.u.alloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MR_REG_NON_SHARED:
+ status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
+ &pcmdinfo->in.u.mr_reg_non_shared.info,
+ pcmdinfo->in.u.mr_reg_non_shared.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_DEALLOC_STAG:
+ status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
+ &pcmdinfo->in.u.dealloc_stag.info,
+ pcmdinfo->in.u.dealloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MW_ALLOC:
+ status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
+ &pcmdinfo->in.u.mw_alloc.info,
+ pcmdinfo->in.u.mw_alloc.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
+ status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
+ &pcmdinfo->in.u.add_arp_cache_entry.info,
+ pcmdinfo->in.u.add_arp_cache_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
+ status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
+ pcmdinfo->in.u.alloc_local_mac_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
+ status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
+ &pcmdinfo->in.u.add_local_mac_entry.info,
+ pcmdinfo->in.u.add_local_mac_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
+ status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
+ pcmdinfo->in.u.del_local_mac_entry.scratch,
+ pcmdinfo->in.u.del_local_mac_entry.entry_idx,
+ pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AH_CREATE:
+ status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
+ &pcmdinfo->in.u.ah_create.info,
+ pcmdinfo->in.u.ah_create.scratch);
+ break;
+ case IRDMA_OP_AH_DESTROY:
+ status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
+ &pcmdinfo->in.u.ah_destroy.info,
+ pcmdinfo->in.u.ah_destroy.scratch);
+ break;
+ case IRDMA_OP_MC_CREATE:
+ status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
+ &pcmdinfo->in.u.mc_create.info,
+ pcmdinfo->in.u.mc_create.scratch);
+ break;
+ case IRDMA_OP_MC_DESTROY:
+ status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
+ &pcmdinfo->in.u.mc_destroy.info,
+ pcmdinfo->in.u.mc_destroy.scratch);
+ break;
+ case IRDMA_OP_MC_MODIFY:
+ status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
+ &pcmdinfo->in.u.mc_modify.info,
+ pcmdinfo->in.u.mc_modify.scratch);
+ break;
+ case IRDMA_OP_SRQ_CREATE:
+ status = irdma_sc_srq_create(pcmdinfo->in.u.srq_create.srq,
+ pcmdinfo->in.u.srq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_MODIFY:
+ status = irdma_sc_srq_modify(pcmdinfo->in.u.srq_modify.srq,
+ &pcmdinfo->in.u.srq_modify.info,
+ pcmdinfo->in.u.srq_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_DESTROY:
+ status = irdma_sc_srq_destroy(pcmdinfo->in.u.srq_destroy.srq,
+ pcmdinfo->in.u.srq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_process_cqp_cmd - process all cqp commands
+ * @dev: sc device struct
+ * @pcmdinfo: cqp command info
+ */
+int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
+{
+ int status = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
+ status = irdma_exec_cqp_cmd(dev, pcmdinfo);
+ else
+ list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * irdma_process_bh - called from tasklet for cqp list
+ * @dev: sc device struct
+ */
+int irdma_process_bh(struct irdma_sc_dev *dev)
+{
+ int status = 0;
+ struct cqp_cmds_info *pcmdinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ while (!list_empty(&dev->cqp_cmd_head) &&
+ !irdma_cqp_ring_full(dev->cqp)) {
+ pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
+ status = irdma_exec_cqp_cmd(dev, pcmdinfo);
+ if (status)
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * irdma_cfg_aeq- Configure AEQ interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ * @enable: True to enable, False disables
+ */
+void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
+ writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+}
+
+/**
+ * sc_vsi_update_stats - Update statistics
+ * @vsi: sc_vsi instance to update
+ */
+void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats;
+ struct irdma_gather_stats *gather_stats =
+ vsi->pestat->gather_info.gather_stats_va;
+ struct irdma_gather_stats *last_gather_stats =
+ vsi->pestat->gather_info.last_gather_stats_va;
+ const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map;
+ u16 max_stat_idx = vsi->dev->hw_attrs.max_stat_idx;
+ u16 i;
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < max_stat_idx; i++) {
+ u16 idx = map[i].byteoff / sizeof(u64);
+
+ hw_stats->stats_val[i] = gather_stats->val[idx];
+ }
+ return;
+ }
+
+ irdma_update_stats(hw_stats, gather_stats, last_gather_stats,
+ map, max_stat_idx);
+}
+
+/**
+ * irdma_wait_pe_ready - Check if firmware is ready
+ * @dev: provides access to registers
+ */
+static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
+{
+ u32 statuscpu0;
+ u32 statuscpu1;
+ u32 statuscpu2;
+ u32 retrycount = 0;
+
+ do {
+ statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
+ statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
+ statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
+ if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
+ statuscpu2 == 0x80)
+ return 0;
+ mdelay(1000);
+ } while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
+ return -1;
+}
+
+static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
+{
+ switch (dev->hw_attrs.uk_attrs.hw_rev) {
+ case IRDMA_GEN_1:
+ i40iw_init_hw(dev);
+ break;
+ case IRDMA_GEN_2:
+ icrdma_init_hw(dev);
+ break;
+ case IRDMA_GEN_3:
+ ig3rdma_init_hw(dev);
+ break;
+ }
+}
+
+/**
+ * irdma_sc_dev_init - Initialize control part of device
+ * @ver: version
+ * @dev: Device pointer
+ * @info: Device init info
+ */
+int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info)
+{
+ u32 val;
+ int ret_code = 0;
+ u8 db_size;
+
+ INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
+ mutex_init(&dev->ws_mutex);
+ dev->hmc_fn_id = info->hmc_fn_id;
+ dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
+ dev->fpm_query_buf = info->fpm_query_buf;
+ dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
+ dev->fpm_commit_buf = info->fpm_commit_buf;
+ dev->hw = info->hw;
+ dev->hw->hw_addr = info->bar0;
+ dev->protocol_used = info->protocol_used;
+ /* Setup the hardware limits, hmc may limit further */
+ dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
+ dev->hw_attrs.min_hw_srq_id = IRDMA_MIN_IW_SRQ_ID;
+ dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES_GEN_3;
+ else
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
+ dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
+ dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
+ dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
+ dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
+ dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
+ dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
+ dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
+
+ dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
+ dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
+ dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
+
+ dev->hw_attrs.max_pe_ready_count = 14;
+ dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
+ dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
+ dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
+
+ if (!dev->privileged) {
+ ret_code = irdma_vchnl_req_get_hmc_fcn(dev);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get HMC function ret = %d\n",
+ ret_code);
+
+ return ret_code;
+ }
+ }
+
+ irdma_sc_init_hw(dev);
+
+ if (dev->privileged) {
+ if (irdma_wait_pe_ready(dev))
+ return -ETIMEDOUT;
+
+ val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
+ db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
+ if (db_size != IRDMA_PE_DB_SIZE_4M &&
+ db_size != IRDMA_PE_DB_SIZE_8M) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
+ val, db_size);
+ return -ENODEV;
+ }
+ } else {
+ ret_code = irdma_vchnl_req_get_reg_layout(dev);
+ if (ret_code)
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Register layout failed ret = %d\n",
+ ret_code);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_stat_val - Extract HW counter value from statistics buffer
+ * @stats_val: pointer to statistics buffer
+ * @byteoff: byte offset of counter value in the buffer (8B-aligned)
+ * @bitoff: bit offset of counter value within 8B entry
+ * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter)
+ */
+static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, u8 bitoff,
+ u64 bitmask)
+{
+ u16 idx = byteoff / sizeof(*stats_val);
+
+ return (stats_val[idx] >> bitoff) & bitmask;
+}
+
+/**
+ * irdma_stat_delta - Calculate counter delta
+ * @new_val: updated counter value
+ * @old_val: last counter value
+ * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter)
+ */
+static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val)
+{
+ if (new_val >= old_val)
+ return new_val - old_val;
+
+ /* roll-over case */
+ return max_val - old_val + new_val + 1;
+}
+
+/**
+ * irdma_update_stats - Update statistics
+ * @hw_stats: hw_stats instance to update
+ * @gather_stats: updated stat counters
+ * @last_gather_stats: last stat counters
+ * @map: HW stat map (hw_stats => gather_stats)
+ * @max_stat_idx: number of HW stats
+ */
+void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
+ struct irdma_gather_stats *gather_stats,
+ struct irdma_gather_stats *last_gather_stats,
+ const struct irdma_hw_stat_map *map, u16 max_stat_idx)
+{
+ u64 *stats_val = hw_stats->stats_val;
+ u16 i;
+
+ for (i = 0; i < max_stat_idx; i++) {
+ u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff,
+ map[i].bitoff, map[i].bitmask);
+ u64 last_val = irdma_stat_val(last_gather_stats->val,
+ map[i].byteoff, map[i].bitoff,
+ map[i].bitmask);
+
+ stats_val[i] +=
+ irdma_stat_delta(new_val, last_val, map[i].bitmask);
+ }
+
+ memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
+}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
new file mode 100644
index 000000000000..983b22d7ae23
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -0,0 +1,1184 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_DEFS_H
+#define IRDMA_DEFS_H
+
+#define IRDMA_FIRST_USER_QP_ID 3
+
+#define ECN_CODE_PT_VAL 2
+
+#define IRDMA_PUSH_OFFSET (8 * 1024 * 1024)
+#define IRDMA_PF_FIRST_PUSH_PAGE_INDEX 16
+#define IRDMA_PF_BAR_RSVD (60 * 1024)
+
+#define IRDMA_PE_DB_SIZE_4M 1
+#define IRDMA_PE_DB_SIZE_8M 2
+
+#define IRDMA_IRD_HW_SIZE_4_GEN3 0
+#define IRDMA_IRD_HW_SIZE_8_GEN3 1
+#define IRDMA_IRD_HW_SIZE_16_GEN3 2
+#define IRDMA_IRD_HW_SIZE_32_GEN3 3
+#define IRDMA_IRD_HW_SIZE_64_GEN3 4
+#define IRDMA_IRD_HW_SIZE_128_GEN3 5
+#define IRDMA_IRD_HW_SIZE_256_GEN3 6
+#define IRDMA_IRD_HW_SIZE_512_GEN3 7
+#define IRDMA_IRD_HW_SIZE_1024_GEN3 8
+#define IRDMA_IRD_HW_SIZE_2048_GEN3 9
+#define IRDMA_IRD_HW_SIZE_4096_GEN3 10
+
+#define IRDMA_IRD_HW_SIZE_4 0
+#define IRDMA_IRD_HW_SIZE_16 1
+#define IRDMA_IRD_HW_SIZE_64 2
+#define IRDMA_IRD_HW_SIZE_128 3
+#define IRDMA_IRD_HW_SIZE_256 4
+
+enum irdma_protocol_used {
+ IRDMA_ANY_PROTOCOL = 0,
+ IRDMA_IWARP_PROTOCOL_ONLY = 1,
+ IRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
+#define IRDMA_QP_STATE_INVALID 0
+#define IRDMA_QP_STATE_IDLE 1
+#define IRDMA_QP_STATE_RTS 2
+#define IRDMA_QP_STATE_CLOSING 3
+#define IRDMA_QP_STATE_SQD 3
+#define IRDMA_QP_STATE_RTR 4
+#define IRDMA_QP_STATE_TERMINATE 5
+#define IRDMA_QP_STATE_ERROR 6
+
+#define IRDMA_MAX_TRAFFIC_CLASS 8
+#define IRDMA_MAX_STATS_COUNT_GEN_1 12
+#define IRDMA_MAX_USER_PRIORITY 8
+#define IRDMA_MAX_APPS 8
+#define IRDMA_MAX_STATS_COUNT 128
+#define IRDMA_FIRST_NON_PF_STAT 4
+
+#define IRDMA_MIN_MTU_IPV4 576
+#define IRDMA_MIN_MTU_IPV6 1280
+#define IRDMA_MTU_TO_MSS_IPV4 40
+#define IRDMA_MTU_TO_MSS_IPV6 60
+#define IRDMA_DEFAULT_MTU 1500
+
+#define Q2_FPSN_OFFSET 64
+#define TERM_DDP_LEN_TAGGED 14
+#define TERM_DDP_LEN_UNTAGGED 18
+#define TERM_RDMA_LEN 28
+#define RDMA_OPCODE_M 0x0f
+#define RDMA_READ_REQ_OPCODE 1
+#define Q2_BAD_FRAME_OFFSET 72
+#define CQE_MAJOR_DRV 0x8000
+
+#define IRDMA_TERM_SENT 1
+#define IRDMA_TERM_RCVD 2
+#define IRDMA_TERM_DONE 4
+#define IRDMA_MAC_HLEN 14
+
+#define IRDMA_CQP_WAIT_POLL_REGS 1
+#define IRDMA_CQP_WAIT_POLL_CQ 2
+#define IRDMA_CQP_WAIT_EVENT 3
+
+#define IRDMA_AE_SOURCE_RSVD 0x0
+#define IRDMA_AE_SOURCE_RQ 0x1
+#define IRDMA_AE_SOURCE_RQ_0011 0x3
+
+#define IRDMA_AE_SOURCE_CQ 0x2
+#define IRDMA_AE_SOURCE_CQ_0110 0x6
+#define IRDMA_AE_SOURCE_CQ_1010 0xa
+#define IRDMA_AE_SOURCE_CQ_1110 0xe
+
+#define IRDMA_AE_SOURCE_SQ 0x5
+#define IRDMA_AE_SOURCE_SQ_0111 0x7
+
+#define IRDMA_AE_SOURCE_IN_RR_WR 0x9
+#define IRDMA_AE_SOURCE_IN_RR_WR_1011 0xb
+#define IRDMA_AE_SOURCE_OUT_RR 0xd
+#define IRDMA_AE_SOURCE_OUT_RR_1111 0xf
+
+#define IRDMA_TCP_STATE_NON_EXISTENT 0
+#define IRDMA_TCP_STATE_CLOSED 1
+#define IRDMA_TCP_STATE_LISTEN 2
+#define IRDMA_STATE_SYN_SEND 3
+#define IRDMA_TCP_STATE_SYN_RECEIVED 4
+#define IRDMA_TCP_STATE_ESTABLISHED 5
+#define IRDMA_TCP_STATE_CLOSE_WAIT 6
+#define IRDMA_TCP_STATE_FIN_WAIT_1 7
+#define IRDMA_TCP_STATE_CLOSING 8
+#define IRDMA_TCP_STATE_LAST_ACK 9
+#define IRDMA_TCP_STATE_FIN_WAIT_2 10
+#define IRDMA_TCP_STATE_TIME_WAIT 11
+#define IRDMA_TCP_STATE_RESERVED_1 12
+#define IRDMA_TCP_STATE_RESERVED_2 13
+#define IRDMA_TCP_STATE_RESERVED_3 14
+#define IRDMA_TCP_STATE_RESERVED_4 15
+
+#define IRDMA_CQP_SW_SQSIZE_4 4
+#define IRDMA_CQP_SW_SQSIZE_2048 2048
+
+#define IRDMA_CQ_TYPE_IWARP 1
+#define IRDMA_CQ_TYPE_ILQ 2
+#define IRDMA_CQ_TYPE_IEQ 3
+#define IRDMA_CQ_TYPE_CQP 4
+
+#define IRDMA_DONE_COUNT 1000
+#define IRDMA_SLEEP_COUNT 10
+
+#define IRDMA_UPDATE_SD_BUFF_SIZE 128
+#define IRDMA_FEATURE_BUF_SIZE (8 * IRDMA_MAX_FEATURES)
+
+#define ENABLE_LOC_MEM 63
+#define IRDMA_ATOMICS_ALLOWED_BIT 1
+#define MAX_PBLE_PER_SD 0x40000
+#define MAX_PBLE_SD_PER_FCN 0x400
+#define MAX_MR_PER_SD 0x8000
+#define MAX_MR_SD_PER_FCN 0x80
+#define IRDMA_PBLE_COMMIT_OFFSET 112
+#define IRDMA_MAX_QUANTA_PER_WR 8
+
+#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
+#define IRDMA_QP_SW_MAX_SQ_QUANTA 32768
+#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
+#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
+ ((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
+#define IRDMA_SRQ_MIN_QUANTA 8
+#define IRDMA_SRQ_MAX_QUANTA 262144
+#define IRDMA_MAX_SRQ_WRS \
+ ((IRDMA_SRQ_MAX_QUANTA - IRDMA_RQ_RSVD) / IRDMA_MAX_QUANTA_PER_WR)
+
+#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0
+#define IRDMAQP_TERM_SEND_TERM_ONLY 1
+#define IRDMAQP_TERM_SEND_FIN_ONLY 2
+#define IRDMAQP_TERM_DONOT_SEND_TERM_OR_FIN 3
+
+#define IRDMA_QP_TYPE_IWARP 1
+#define IRDMA_QP_TYPE_UDA 2
+#define IRDMA_QP_TYPE_ROCE_RC 3
+#define IRDMA_QP_TYPE_ROCE_UD 4
+
+#define IRDMA_HW_PAGE_SIZE 4096
+#define IRDMA_HW_PAGE_SHIFT 12
+#define IRDMA_CQE_QTYPE_RQ 0
+#define IRDMA_CQE_QTYPE_SQ 1
+
+#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
+#define IRDMA_QP_WQE_MIN_SIZE 32
+#define IRDMA_QP_WQE_MAX_SIZE 256
+#define IRDMA_QP_WQE_MIN_QUANTA 1
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN1 2
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN2 3
+
+#define IRDMA_SQ_RSVD 258
+#define IRDMA_RQ_RSVD 1
+
+#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
+#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
+#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
+#define IRDMA_FEATURE_ATOMIC_OPS BIT_ULL(6)
+#define IRDMA_FEATURE_SRQ BIT_ULL(7)
+#define IRDMA_FEATURE_CQE_TIMESTAMPING BIT_ULL(8)
+
+#define IRDMAQP_OP_RDMA_WRITE 0x00
+#define IRDMAQP_OP_RDMA_READ 0x01
+#define IRDMAQP_OP_RDMA_SEND 0x03
+#define IRDMAQP_OP_RDMA_SEND_INV 0x04
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT 0x05
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT_INV 0x06
+#define IRDMAQP_OP_BIND_MW 0x08
+#define IRDMAQP_OP_FAST_REGISTER 0x09
+#define IRDMAQP_OP_LOCAL_INVALIDATE 0x0a
+#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
+#define IRDMAQP_OP_NOP 0x0c
+#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
+#define IRDMAQP_OP_ATOMIC_FETCH_ADD 0x0f
+#define IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD 0x11
+#define IRDMAQP_OP_GEN_RTS_AE 0x30
+
+enum irdma_cqp_op_type {
+ IRDMA_OP_CEQ_DESTROY = 1,
+ IRDMA_OP_AEQ_DESTROY = 2,
+ IRDMA_OP_DELETE_ARP_CACHE_ENTRY = 3,
+ IRDMA_OP_MANAGE_APBVT_ENTRY = 4,
+ IRDMA_OP_CEQ_CREATE = 5,
+ IRDMA_OP_AEQ_CREATE = 6,
+ IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY = 7,
+ IRDMA_OP_QP_MODIFY = 8,
+ IRDMA_OP_QP_UPLOAD_CONTEXT = 9,
+ IRDMA_OP_CQ_CREATE = 10,
+ IRDMA_OP_CQ_DESTROY = 11,
+ IRDMA_OP_QP_CREATE = 12,
+ IRDMA_OP_QP_DESTROY = 13,
+ IRDMA_OP_ALLOC_STAG = 14,
+ IRDMA_OP_MR_REG_NON_SHARED = 15,
+ IRDMA_OP_DEALLOC_STAG = 16,
+ IRDMA_OP_MW_ALLOC = 17,
+ IRDMA_OP_QP_FLUSH_WQES = 18,
+ IRDMA_OP_ADD_ARP_CACHE_ENTRY = 19,
+ IRDMA_OP_MANAGE_PUSH_PAGE = 20,
+ IRDMA_OP_UPDATE_PE_SDS = 21,
+ IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE = 22,
+ IRDMA_OP_SUSPEND = 23,
+ IRDMA_OP_RESUME = 24,
+ IRDMA_OP_MANAGE_VF_PBLE_BP = 25,
+ IRDMA_OP_QUERY_FPM_VAL = 26,
+ IRDMA_OP_COMMIT_FPM_VAL = 27,
+ IRDMA_OP_AH_CREATE = 28,
+ IRDMA_OP_AH_MODIFY = 29,
+ IRDMA_OP_AH_DESTROY = 30,
+ IRDMA_OP_MC_CREATE = 31,
+ IRDMA_OP_MC_DESTROY = 32,
+ IRDMA_OP_MC_MODIFY = 33,
+ IRDMA_OP_STATS_ALLOCATE = 34,
+ IRDMA_OP_STATS_FREE = 35,
+ IRDMA_OP_STATS_GATHER = 36,
+ IRDMA_OP_WS_ADD_NODE = 37,
+ IRDMA_OP_WS_MODIFY_NODE = 38,
+ IRDMA_OP_WS_DELETE_NODE = 39,
+ IRDMA_OP_WS_FAILOVER_START = 40,
+ IRDMA_OP_WS_FAILOVER_COMPLETE = 41,
+ IRDMA_OP_SET_UP_MAP = 42,
+ IRDMA_OP_GEN_AE = 43,
+ IRDMA_OP_QUERY_RDMA_FEATURES = 44,
+ IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45,
+ IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
+ IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
+ IRDMA_OP_CQ_MODIFY = 48,
+ IRDMA_OP_SRQ_CREATE = 49,
+ IRDMA_OP_SRQ_MODIFY = 50,
+ IRDMA_OP_SRQ_DESTROY = 51,
+
+ /* Must be last entry*/
+ IRDMA_MAX_CQP_OPS = 52,
+};
+
+/* CQP SQ WQES */
+#define IRDMA_CQP_OP_CREATE_QP 0
+#define IRDMA_CQP_OP_MODIFY_QP 0x1
+#define IRDMA_CQP_OP_DESTROY_QP 0x02
+#define IRDMA_CQP_OP_CREATE_CQ 0x03
+#define IRDMA_CQP_OP_MODIFY_CQ 0x04
+#define IRDMA_CQP_OP_DESTROY_CQ 0x05
+#define IRDMA_CQP_OP_CREATE_SRQ 0x06
+#define IRDMA_CQP_OP_MODIFY_SRQ 0x07
+#define IRDMA_CQP_OP_DESTROY_SRQ 0x08
+#define IRDMA_CQP_OP_ALLOC_STAG 0x09
+#define IRDMA_CQP_OP_REG_MR 0x0a
+#define IRDMA_CQP_OP_QUERY_STAG 0x0b
+#define IRDMA_CQP_OP_REG_SMR 0x0c
+#define IRDMA_CQP_OP_DEALLOC_STAG 0x0d
+#define IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE 0x0e
+#define IRDMA_CQP_OP_MANAGE_ARP 0x0f
+#define IRDMA_CQP_OP_MANAGE_VF_PBLE_BP 0x10
+#define IRDMA_CQP_OP_MANAGE_PUSH_PAGES 0x11
+#define IRDMA_CQP_OP_QUERY_RDMA_FEATURES 0x12
+#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
+#define IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY 0x14
+#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
+#define IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
+#define IRDMA_CQP_OP_CREATE_CEQ 0x16
+#define IRDMA_CQP_OP_DESTROY_CEQ 0x18
+#define IRDMA_CQP_OP_CREATE_AEQ 0x19
+#define IRDMA_CQP_OP_DESTROY_AEQ 0x1b
+#define IRDMA_CQP_OP_CREATE_ADDR_HANDLE 0x1c
+#define IRDMA_CQP_OP_MODIFY_ADDR_HANDLE 0x1d
+#define IRDMA_CQP_OP_DESTROY_ADDR_HANDLE 0x1e
+#define IRDMA_CQP_OP_UPDATE_PE_SDS 0x1f
+#define IRDMA_CQP_OP_QUERY_FPM_VAL 0x20
+#define IRDMA_CQP_OP_COMMIT_FPM_VAL 0x21
+#define IRDMA_CQP_OP_FLUSH_WQES 0x22
+/* IRDMA_CQP_OP_GEN_AE is the same value as IRDMA_CQP_OP_FLUSH_WQES */
+#define IRDMA_CQP_OP_GEN_AE 0x22
+#define IRDMA_CQP_OP_MANAGE_APBVT 0x23
+#define IRDMA_CQP_OP_NOP 0x24
+#define IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25
+#define IRDMA_CQP_OP_CREATE_MCAST_GRP 0x26
+#define IRDMA_CQP_OP_MODIFY_MCAST_GRP 0x27
+#define IRDMA_CQP_OP_DESTROY_MCAST_GRP 0x28
+#define IRDMA_CQP_OP_SUSPEND_QP 0x29
+#define IRDMA_CQP_OP_RESUME_QP 0x2a
+#define IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
+#define IRDMA_CQP_OP_WORK_SCHED_NODE 0x2c
+#define IRDMA_CQP_OP_MANAGE_STATS 0x2d
+#define IRDMA_CQP_OP_GATHER_STATS 0x2e
+#define IRDMA_CQP_OP_UP_MAP 0x2f
+
+#define FLD_LS_64(dev, val, field) \
+ (((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
+#define FLD_RS_64(dev, val, field) \
+ ((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S])
+#define FLD_LS_32(dev, val, field) \
+ (((val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
+#define FLD_RS_32(dev, val, field) \
+ ((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S])
+
+#define IRDMA_MAX_STATS_24 0xffffffULL
+#define IRDMA_MAX_STATS_32 0xffffffffULL
+#define IRDMA_MAX_STATS_48 0xffffffffffffULL
+#define IRDMA_MAX_STATS_56 0xffffffffffffffULL
+#define IRDMA_MAX_STATS_64 0xffffffffffffffffULL
+
+#define IRDMA_MAX_CQ_READ_THRESH 0x3FFFF
+#define IRDMA_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
+#define IRDMA_CQPSQ_QHASH_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
+#define IRDMA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
+#define IRDMA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60)
+#define IRDMA_CQPSQ_QHASH_VLANVALID BIT_ULL(59)
+#define IRDMA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
+#define IRDMA_CQPSQ_STATS_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_STATS_ALLOC_INST BIT_ULL(62)
+#define IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX BIT_ULL(60)
+#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
+#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
+#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(55, 52)
+#define IRDMA_SD_MAX GENMASK_ULL(15, 0)
+#define IRDMA_MEM_MAX GENMASK_ULL(15, 0)
+#define IRDMA_QP_MEM_LOC GENMASK_ULL(47, 44)
+#define IRDMA_MR_MEM_LOC GENMASK_ULL(27, 24)
+
+#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
+#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
+#define IRDMA_CQPSQ_WS_PRIOTYPE GENMASK_ULL(60, 59)
+#define IRDMA_CQPSQ_WS_TC GENMASK_ULL(58, 56)
+#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
+#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
+#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(29, 16)
+#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(13, 0)
+#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(63, 48)
+#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
+
+#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
+#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
+#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED GENMASK_ULL(47, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION GENMASK_ULL(23, 16)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION GENMASK_ULL(7, 0)
+#define IRDMA_CQPHC_SQSIZE GENMASK_ULL(11, 8)
+#define IRDMA_CQPHC_DISABLE_PFPDUS BIT_ULL(1)
+#define IRDMA_CQPHC_ROCEV2_RTO_POLICY BIT_ULL(2)
+#define IRDMA_CQPHC_PROTOCOL_USED GENMASK_ULL(4, 3)
+#define IRDMA_CQPHC_MIN_RATE GENMASK_ULL(51, 48)
+#define IRDMA_CQPHC_MIN_DEC_FACTOR GENMASK_ULL(59, 56)
+#define IRDMA_CQPHC_DCQCN_T GENMASK_ULL(15, 0)
+#define IRDMA_CQPHC_HAI_FACTOR GENMASK_ULL(47, 32)
+#define IRDMA_CQPHC_RAI_FACTOR GENMASK_ULL(63, 48)
+#define IRDMA_CQPHC_DCQCN_B GENMASK_ULL(24, 0)
+#define IRDMA_CQPHC_DCQCN_F GENMASK_ULL(27, 25)
+#define IRDMA_CQPHC_CC_CFG_VALID BIT_ULL(31)
+#define IRDMA_CQPHC_RREDUCE_MPERIOD GENMASK_ULL(63, 32)
+#define IRDMA_CQPHC_HW_MINVER GENMASK_ULL(15, 0)
+
+#define IRDMA_CQPHC_HW_MAJVER_GEN_1 0
+#define IRDMA_CQPHC_HW_MAJVER_GEN_2 1
+#define IRDMA_CQPHC_HW_MAJVER_GEN_3 2
+#define IRDMA_CQPHC_HW_MAJVER GENMASK_ULL(31, 16)
+#define IRDMA_CQPHC_CEQPERVF GENMASK_ULL(39, 32)
+
+#define IRDMA_CQPHC_ENABLED_VFS GENMASK_ULL(37, 32)
+
+#define IRDMA_CQPHC_HMC_PROFILE GENMASK_ULL(2, 0)
+#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
+#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
+
+#define IRDMA_CQPHC_TIMESTAMP_OVERRIDE BIT_ULL(5)
+#define IRDMA_CQPHC_TS_SHIFT GENMASK_ULL(12, 8)
+#define IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS BIT_ULL(0)
+
+#define IRDMA_CQPHC_OOISC_BLKSIZE GENMASK_ULL(63, 60)
+#define IRDMA_CQPHC_RRSP_BLKSIZE GENMASK_ULL(59, 56)
+#define IRDMA_CQPHC_Q1_BLKSIZE GENMASK_ULL(55, 52)
+#define IRDMA_CQPHC_XMIT_BLKSIZE GENMASK_ULL(51, 48)
+#define IRDMA_CQPHC_BLKSIZES_VALID BIT_ULL(4)
+
+#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
+#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
+#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
+#define IRDMA_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(13, 0)
+#define IRDMA_CQ_DBSA_ARM_NEXT BIT_ULL(14)
+#define IRDMA_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15)
+#define IRDMA_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(17, 16)
+
+/* CQP and iWARP Completion Queue */
+#define IRDMA_CQ_QPCTX IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
+
+#define IRDMA_CCQ_DEFINFO GENMASK_ULL(63, 32)
+
+#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
+#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
+#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
+#define IRDMA_CQ_EXTCQE BIT_ULL(50)
+#define IRDMA_OOO_CMPL BIT_ULL(54)
+#define IRDMA_CQ_ERROR BIT_ULL(55)
+#define IRDMA_CQ_SQ BIT_ULL(62)
+
+#define IRDMA_CQ_SRQ BIT_ULL(52)
+#define IRDMA_CQ_VALID BIT_ULL(63)
+#define IRDMA_CQ_IMMVALID BIT_ULL(62)
+#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
+#define IRDMA_CQ_UDVLANVALID BIT_ULL(60)
+#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
+#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
+
+#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
+#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
+#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
+#define IRDMACQ_TCPSEQNUMRTT GENMASK_ULL(63, 32)
+#define IRDMACQ_INVSTAG GENMASK_ULL(31, 0)
+#define IRDMACQ_QPID GENMASK_ULL(55, 32)
+
+#define IRDMACQ_UDSRCQPN GENMASK_ULL(31, 0)
+#define IRDMACQ_PSHDROP BIT_ULL(51)
+#define IRDMACQ_STAG BIT_ULL(53)
+#define IRDMACQ_IPV4 BIT_ULL(53)
+#define IRDMACQ_SOEVENT BIT_ULL(54)
+#define IRDMACQ_OP GENMASK_ULL(61, 56)
+
+#define IRDMA_CEQE_CQCTX GENMASK_ULL(62, 0)
+#define IRDMA_CEQE_VALID BIT_ULL(63)
+
+/* AEQE format */
+#define IRDMA_AEQE_COMPCTX IRDMA_CQPHC_QPCTX
+#define IRDMA_AEQE_QPCQID_LOW GENMASK_ULL(17, 0)
+#define IRDMA_AEQE_QPCQID_HI BIT_ULL(46)
+#define IRDMA_AEQE_WQDESCIDX GENMASK_ULL(32, 18)
+#define IRDMA_AEQE_OVERFLOW BIT_ULL(33)
+#define IRDMA_AEQE_AECODE GENMASK_ULL(45, 34)
+#define IRDMA_AEQE_AESRC GENMASK_ULL(53, 50)
+#define IRDMA_AEQE_IWSTATE GENMASK_ULL(56, 54)
+#define IRDMA_AEQE_TCPSTATE GENMASK_ULL(60, 57)
+#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
+#define IRDMA_AEQE_VALID BIT_ULL(63)
+
+#define IRDMA_AEQE_Q2DATA_GEN_3 GENMASK_ULL(5, 4)
+#define IRDMA_AEQE_TCPSTATE_GEN_3 GENMASK_ULL(3, 0)
+#define IRDMA_AEQE_QPCQID_GEN_3 GENMASK_ULL(24, 0)
+#define IRDMA_AEQE_AECODE_GEN_3 GENMASK_ULL(61, 50)
+#define IRDMA_AEQE_OVERFLOW_GEN_3 BIT_ULL(62)
+#define IRDMA_AEQE_WQDESCIDX_GEN_3 GENMASK_ULL(49, 32)
+#define IRDMA_AEQE_IWSTATE_GEN_3 GENMASK_ULL(31, 29)
+#define IRDMA_AEQE_AESRC_GEN_3 GENMASK_ULL(28, 25)
+#define IRDMA_AEQE_CMPL_CTXT_S 6
+#define IRDMA_AEQE_CMPL_CTXT GENMASK_ULL(63, 6)
+
+#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
+#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
+#define IRDMA_GEN1_UDA_QPSQ_L4LEN GENMASK_ULL(27, 24)
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_QPSQ_VALID BIT_ULL(63)
+#define IRDMA_UDA_QPSQ_SIGCOMPL BIT_ULL(62)
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
+#define IRDMA_UDA_PAYLOADLEN GENMASK_ULL(13, 0)
+#define IRDMA_UDA_HDRLEN GENMASK_ULL(24, 16)
+#define IRDMA_VLAN_TAG_VALID BIT_ULL(50)
+#define IRDMA_UDA_L3PROTO GENMASK_ULL(1, 0)
+#define IRDMA_UDA_L4PROTO GENMASK_ULL(17, 16)
+#define IRDMA_UDA_QPSQ_DOLOOPBACK BIT_ULL(44)
+#define IRDMA_CQPSQ_BUFSIZE GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
+
+#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(23, 8)
+#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
+
+#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CQPSQ_PASID GENMASK_ULL(51, 32)
+#define IRDMA_CQPSQ_PASID_VALID BIT_ULL(62)
+
+/* Create/Modify/Destroy QP */
+
+#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
+#define IRDMA_CQPSQ_QP_TERMLEN GENMASK_ULL(51, 48)
+
+#define IRDMA_CQPSQ_QP_QPCTX IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CQPSQ_QP_QPID_S 0
+#define IRDMA_CQPSQ_QP_QPID_M (0xFFFFFFUL)
+
+#define IRDMA_CQPSQ_QP_OP_S 32
+#define IRDMA_CQPSQ_QP_OP_M IRDMACQ_OP_M
+#define IRDMA_CQPSQ_QP_ORDVALID BIT_ULL(42)
+#define IRDMA_CQPSQ_QP_TOECTXVALID BIT_ULL(43)
+#define IRDMA_CQPSQ_QP_CACHEDVARVALID BIT_ULL(44)
+#define IRDMA_CQPSQ_QP_VQ BIT_ULL(45)
+#define IRDMA_CQPSQ_QP_FORCELOOPBACK BIT_ULL(46)
+#define IRDMA_CQPSQ_QP_CQNUMVALID BIT_ULL(47)
+#define IRDMA_CQPSQ_QP_QPTYPE GENMASK_ULL(50, 48)
+#define IRDMA_CQPSQ_QP_MACVALID BIT_ULL(51)
+#define IRDMA_CQPSQ_QP_MSSCHANGE BIT_ULL(52)
+
+#define IRDMA_CQPSQ_QP_IGNOREMWBOUND BIT_ULL(54)
+#define IRDMA_CQPSQ_QP_REMOVEHASHENTRY BIT_ULL(55)
+#define IRDMA_CQPSQ_QP_TERMACT GENMASK_ULL(57, 56)
+#define IRDMA_CQPSQ_QP_RESETCON BIT_ULL(58)
+#define IRDMA_CQPSQ_QP_ARPTABIDXVALID BIT_ULL(59)
+#define IRDMA_CQPSQ_QP_NEXTIWSTATE GENMASK_ULL(62, 60)
+
+#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CQPSQ_SRQ_RQSIZE GENMASK_ULL(3, 0)
+#define IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE GENMASK_ULL(5, 4)
+#define IRDMA_CQPSQ_SRQ_SRQ_LIMIT GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_SRQ_SRQCTX GENMASK_ULL(63, 6)
+#define IRDMA_CQPSQ_SRQ_PD_ID GENMASK_ULL(39, 16)
+#define IRDMA_CQPSQ_SRQ_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_SRQ_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE GENMASK_ULL(45, 44)
+#define IRDMA_CQPSQ_SRQ_VIRTMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_SRQ_TPH_EN BIT_ULL(60)
+#define IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT BIT_ULL(61)
+#define IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_SRQ_TPH_VALUE GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S 8
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR GENMASK_ULL(63, 8)
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S 6
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR GENMASK_ULL(63, 6)
+
+#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
+#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
+#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
+
+#define IRDMA_CQPSQ_CQ_CQID_HIGH GENMASK_ULL(52, 50)
+#define IRDMA_CQPSQ_CQ_CEQID_HIGH GENMASK_ULL(59, 54)
+#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
+#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
+#define IRDMA_CQPSQ_CQ_CHKOVERFLOW BIT_ULL(46)
+#define IRDMA_CQPSQ_CQ_VIRTMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_CQ_ENCEQEMASK BIT_ULL(48)
+#define IRDMA_CQPSQ_CQ_CEQIDVALID BIT_ULL(49)
+#define IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT BIT_ULL(61)
+#define IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+
+/* Allocate/Register/Register Shared/Deallocate Stag */
+#define IRDMA_CQPSQ_STAG_VA_FBO IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_STAG_STAGLEN GENMASK_ULL(45, 0)
+#define IRDMA_CQPSQ_STAG_KEY GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_STAG_IDX GENMASK_ULL(31, 8)
+#define IRDMA_CQPSQ_STAG_IDX_S 8
+#define IRDMA_CQPSQ_STAG_PARENTSTAGIDX GENMASK_ULL(55, 32)
+#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
+#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
+#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
+#define IRDMA_CQPSQ_STAG_PDID_HI GENMASK_ULL(59, 54)
+
+#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
+#define IRDMA_CQPSQ_STAG_ARIGHTS GENMASK_ULL(52, 48)
+#define IRDMA_CQPSQ_STAG_REMACCENABLED BIT_ULL(53)
+#define IRDMA_CQPSQ_STAG_VABASEDTO BIT_ULL(59)
+#define IRDMA_CQPSQ_STAG_USEHMCFNIDX BIT_ULL(60)
+#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
+
+#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN BIT_ULL(61)
+
+#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
+#define IRDMA_CQPSQ_MLM_TABLEIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_MLM_FREEENTRY BIT_ULL(62)
+#define IRDMA_CQPSQ_MLM_IGNORE_REF_CNT BIT_ULL(61)
+#define IRDMA_CQPSQ_MLM_MAC0 GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_MLM_MAC1 GENMASK_ULL(15, 8)
+#define IRDMA_CQPSQ_MLM_MAC2 GENMASK_ULL(23, 16)
+#define IRDMA_CQPSQ_MLM_MAC3 GENMASK_ULL(31, 24)
+#define IRDMA_CQPSQ_MLM_MAC4 GENMASK_ULL(39, 32)
+#define IRDMA_CQPSQ_MLM_MAC5 GENMASK_ULL(47, 40)
+#define IRDMA_CQPSQ_MAT_REACHMAX GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_MAT_MACADDR GENMASK_ULL(47, 0)
+#define IRDMA_CQPSQ_MAT_ARPENTRYIDX GENMASK_ULL(11, 0)
+#define IRDMA_CQPSQ_MAT_ENTRYVALID BIT_ULL(42)
+#define IRDMA_CQPSQ_MAT_PERMANENT BIT_ULL(43)
+#define IRDMA_CQPSQ_MAT_QUERY BIT_ULL(44)
+#define IRDMA_CQPSQ_MVPBP_PD_ENTRY_CNT GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MVPBP_FIRST_PD_INX GENMASK_ULL(24, 16)
+#define IRDMA_CQPSQ_MVPBP_SD_INX GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_MVPBP_INV_PD_ENT BIT_ULL(62)
+#define IRDMA_CQPSQ_MVPBP_PD_PLPBA GENMASK_ULL(63, 3)
+
+/* Manage Push Page - MPP */
+#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
+#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
+#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
+#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
+
+/* Upload Context - UCTX */
+#define IRDMA_CQPSQ_UCTX_QPCTXADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_UCTX_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_UCTX_QPTYPE GENMASK_ULL(51, 48)
+
+#define IRDMA_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61)
+#define IRDMA_CQPSQ_UCTX_FREEZEQP BIT_ULL(62)
+
+#define IRDMA_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_MHMC_FREEPMFN BIT_ULL(62)
+
+#define IRDMA_CQPSQ_SHMCRP_HMC_PROFILE GENMASK_ULL(2, 0)
+#define IRDMA_CQPSQ_SHMCRP_VFNUM GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
+#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
+
+#define IRDMA_CQPSQ_CEQ_CEQID_HIGH GENMASK_ULL(15, 10)
+
+#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
+#define IRDMA_CQPSQ_CEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_AEQ_AEQECNT GENMASK_ULL(18, 0)
+#define IRDMA_CQPSQ_AEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(20, 0)
+#define IRDMA_COMMIT_FPM_BASE_S 32
+#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(15, 0)
+
+#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
+#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_FWQE_RQMJERR GENMASK_ULL(31, 16)
+#define IRDMA_CQPSQ_FWQE_SQMNERR GENMASK_ULL(47, 32)
+#define IRDMA_CQPSQ_FWQE_SQMJERR GENMASK_ULL(63, 48)
+#define IRDMA_CQPSQ_FWQE_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_FWQE_GENERATE_AE BIT_ULL(59)
+#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
+#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
+#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID BIT_ULL(42)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX GENMASK_ULL(49, 32)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID BIT_ULL(43)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX GENMASK_ULL(46, 32)
+#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
+#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_UPESD_SDDATALOW GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_UPESD_SDDATAHI GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UPESD_ENTRY_VALID BIT_ULL(63)
+
+#define IRDMA_CQPSQ_UPESD_BM_PF 0
+#define IRDMA_CQPSQ_UPESD_BM_CP_LM 1
+#define IRDMA_CQPSQ_UPESD_BM_AXF 2
+#define IRDMA_CQPSQ_UPESD_BM_LM 4
+#define IRDMA_CQPSQ_UPESD_BM GENMASK_ULL(34, 32)
+#define IRDMA_CQPSQ_UPESD_ENTRY_COUNT GENMASK_ULL(3, 0)
+#define IRDMA_CQPSQ_UPESD_SKIP_ENTRY BIT_ULL(7)
+#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_RESUMEQP_QPID GENMASK(23, 0)
+#define IRDMA_MANAGE_RSRC_VER2 BIT_ULL(2)
+
+#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
+#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
+#define IRDMA_CQPSQ_MIN_DEF_CMPL 0x0006
+#define IRDMA_CQPSQ_MIN_OOO_CMPL 0x0007
+
+#define IRDMA_CQPSQ_MAJ_NO_ERROR 0x0000
+#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
+#define IRDMA_CQPSQ_MAJ_CNTXTCACHE_ERROR 0xF001
+#define IRDMA_CQPSQ_MAJ_ERROR 0xFFFF
+#define IRDMAQPC_DDP_VER GENMASK_ULL(1, 0)
+#define IRDMAQPC_IBRDENABLE BIT_ULL(2)
+#define IRDMAQPC_IPV4 BIT_ULL(3)
+#define IRDMAQPC_NONAGLE BIT_ULL(4)
+#define IRDMAQPC_INSERTVLANTAG BIT_ULL(5)
+#define IRDMAQPC_ISQP1 BIT_ULL(6)
+#define IRDMAQPC_TIMESTAMP BIT_ULL(7)
+#define IRDMAQPC_RQWQESIZE GENMASK_ULL(9, 8)
+#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
+#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
+
+#define IRDMAQPC_USE_SRQ BIT_ULL(10)
+#define IRDMAQPC_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMAQPC_PASID GENMASK_ULL(19, 0)
+#define IRDMAQPC_PASID_VALID BIT_ULL(11)
+
+#define IRDMAQPC_ECN_EN BIT_ULL(14)
+#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
+#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
+#define IRDMAQPC_ERR_RQ_IDX_VALID BIT_ULL(19)
+#define IRDMAQPC_DIS_VLAN_CHECKS GENMASK_ULL(21, 19)
+#define IRDMAQPC_DC_TCP_EN BIT_ULL(25)
+#define IRDMAQPC_RCVTPHEN BIT_ULL(28)
+#define IRDMAQPC_XMITTPHEN BIT_ULL(29)
+#define IRDMAQPC_RQTPHEN BIT_ULL(30)
+#define IRDMAQPC_SQTPHEN BIT_ULL(31)
+#define IRDMAQPC_PPIDX GENMASK_ULL(41, 32)
+#define IRDMAQPC_PMENA BIT_ULL(47)
+#define IRDMAQPC_RDMAP_VER GENMASK_ULL(63, 62)
+#define IRDMAQPC_ROCE_TVER GENMASK_ULL(63, 60)
+
+#define IRDMAQPC_SQADDR IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_RQADDR IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_TTL GENMASK_ULL(7, 0)
+#define IRDMAQPC_RQSIZE GENMASK_ULL(11, 8)
+#define IRDMAQPC_SQSIZE GENMASK_ULL(15, 12)
+#define IRDMAQPC_GEN1_SRCMACADDRIDX GENMASK(21, 16)
+#define IRDMAQPC_AVOIDSTRETCHACK BIT_ULL(23)
+#define IRDMAQPC_TOS GENMASK_ULL(31, 24)
+#define IRDMAQPC_SRCPORTNUM GENMASK_ULL(47, 32)
+#define IRDMAQPC_DESTPORTNUM GENMASK_ULL(63, 48)
+#define IRDMAQPC_DESTIPADDR0 GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTIPADDR1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_DESTIPADDR2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTIPADDR3 GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDMSS GENMASK_ULL(29, 16)
+#define IRDMAQPC_SYN_RST_HANDLING GENMASK_ULL(31, 30)
+#define IRDMAQPC_VLANTAG GENMASK_ULL(47, 32)
+#define IRDMAQPC_ARPIDX GENMASK_ULL(63, 48)
+#define IRDMAQPC_FLOWLABEL GENMASK_ULL(19, 0)
+#define IRDMAQPC_WSCALE BIT_ULL(20)
+#define IRDMAQPC_KEEPALIVE BIT_ULL(21)
+#define IRDMAQPC_IGNORE_TCP_OPT BIT_ULL(22)
+#define IRDMAQPC_IGNORE_TCP_UNS_OPT BIT_ULL(23)
+#define IRDMAQPC_TCPSTATE GENMASK_ULL(31, 28)
+#define IRDMAQPC_RCVSCALE GENMASK_ULL(35, 32)
+#define IRDMAQPC_SNDSCALE GENMASK_ULL(43, 40)
+#define IRDMAQPC_PDIDX GENMASK_ULL(63, 48)
+#define IRDMAQPC_PDIDXHI GENMASK_ULL(21, 20)
+#define IRDMAQPC_PKEY GENMASK_ULL(47, 32)
+#define IRDMAQPC_ACKCREDITS GENMASK_ULL(24, 20)
+#define IRDMAQPC_QKEY GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTQP GENMASK_ULL(23, 0)
+#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES GENMASK_ULL(23, 16)
+#define IRDMAQPC_KEEPALIVE_INTERVAL GENMASK_ULL(31, 24)
+#define IRDMAQPC_TIMESTAMP_RECENT GENMASK_ULL(31, 0)
+#define IRDMAQPC_TIMESTAMP_AGE GENMASK_ULL(63, 32)
+#define IRDMAQPC_SNDNXT GENMASK_ULL(31, 0)
+#define IRDMAQPC_ISN GENMASK_ULL(55, 32)
+#define IRDMAQPC_PSNNXT GENMASK_ULL(23, 0)
+#define IRDMAQPC_LSN GENMASK_ULL(55, 32)
+#define IRDMAQPC_SNDWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_RCVNXT GENMASK_ULL(31, 0)
+#define IRDMAQPC_EPSN GENMASK_ULL(23, 0)
+#define IRDMAQPC_RCVWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_SNDMAX GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDUNA GENMASK_ULL(63, 32)
+#define IRDMAQPC_PSNMAX GENMASK_ULL(23, 0)
+#define IRDMAQPC_PSNUNA GENMASK_ULL(55, 32)
+#define IRDMAQPC_SRTT GENMASK_ULL(31, 0)
+#define IRDMAQPC_RTTVAR GENMASK_ULL(63, 32)
+#define IRDMAQPC_SSTHRESH GENMASK_ULL(31, 0)
+#define IRDMAQPC_CWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
+#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_MINRNR_TIMER GENMASK_ULL(4, 0)
+#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(46, 32)
+#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
+#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
+#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
+#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(24, 0)
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(56, 32)
+#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
+#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
+#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
+#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
+#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
+
+#define IRDMAQPC_LOCALACKTIMEOUT GENMASK_ULL(12, 8)
+#define IRDMAQPC_RNRNAK_TMR GENMASK_ULL(4, 0)
+#define IRDMAQPC_ORDSIZE_GEN3 GENMASK_ULL(10, 0)
+#define IRDMAQPC_REMOTE_ATOMIC_EN BIT_ULL(18)
+#define IRDMAQPC_STAT_INDEX_GEN3 GENMASK_ULL(47, 32)
+#define IRDMAQPC_PKT_LIMIT GENMASK_ULL(55, 48)
+
+#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
+
+#define IRDMAQPC_IRDSIZE_GEN3 GENMASK_ULL(17, 14)
+
+#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
+#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
+#define IRDMAQPC_RDOK BIT_ULL(21)
+#define IRDMAQPC_SNDMARKERS BIT_ULL(22)
+#define IRDMAQPC_DCQCNENABLE BIT_ULL(22)
+#define IRDMAQPC_FW_CC_ENABLE BIT_ULL(28)
+#define IRDMAQPC_RCVNOICRC BIT_ULL(31)
+#define IRDMAQPC_BINDEN BIT_ULL(23)
+#define IRDMAQPC_FASTREGEN BIT_ULL(24)
+#define IRDMAQPC_PRIVEN BIT_ULL(25)
+#define IRDMAQPC_TIMELYENABLE BIT_ULL(27)
+#define IRDMAQPC_THIGH GENMASK_ULL(63, 52)
+#define IRDMAQPC_TLOW GENMASK_ULL(39, 32)
+#define IRDMAQPC_REMENDPOINTIDX GENMASK_ULL(16, 0)
+#define IRDMAQPC_USESTATSINSTANCE BIT_ULL(26)
+#define IRDMAQPC_IWARPMODE BIT_ULL(28)
+#define IRDMAQPC_RCVMARKERS BIT_ULL(29)
+#define IRDMAQPC_ALIGNHDRS BIT_ULL(30)
+#define IRDMAQPC_RCVNOMPACRC BIT_ULL(31)
+#define IRDMAQPC_RCVMARKOFFSET GENMASK_ULL(40, 32)
+#define IRDMAQPC_SNDMARKOFFSET GENMASK_ULL(56, 48)
+
+#define IRDMAQPC_QPCOMPCTX IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_SQTPHVAL GENMASK_ULL(7, 0)
+#define IRDMAQPC_RQTPHVAL GENMASK_ULL(15, 8)
+#define IRDMAQPC_QSHANDLE GENMASK_ULL(25, 16)
+#define IRDMAQPC_EXCEPTION_LAN_QUEUE GENMASK_ULL(43, 32)
+#define IRDMAQPC_LOCAL_IPADDR3 GENMASK_ULL(31, 0)
+#define IRDMAQPC_LOCAL_IPADDR2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_LOCAL_IPADDR1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_LOCAL_IPADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_FW_VER_MINOR GENMASK_ULL(15, 0)
+#define IRDMA_FW_VER_MAJOR GENMASK_ULL(31, 16)
+#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
+#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
+#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
+#define IRDMA_FEATURE_RSRC_MAX GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
+#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
+#define IRDMAQPSQ_PUSHWQE BIT_ULL(56)
+#define IRDMAQPSQ_STREAMMODE BIT_ULL(58)
+#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59)
+#define IRDMAQPSQ_READFENCE BIT_ULL(60)
+#define IRDMAQPSQ_LOCALFENCE BIT_ULL(61)
+#define IRDMAQPSQ_UDPHEADER BIT_ULL(61)
+#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42)
+#define IRDMAQPSQ_SIGCOMPL BIT_ULL(62)
+#define IRDMAQPSQ_VALID BIT_ULL(63)
+
+#define IRDMAQPSQ_FRAG_TO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63)
+#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32)
+#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32)
+#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
+#define IRDMAQPSQ_AHID GENMASK_ULL(24, 0)
+#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
+
+#define IRDMA_INLINE_VALID_S 7
+#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
+#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(47)
+#define IRDMAQPSQ_REPORTRTT BIT_ULL(46)
+
+#define IRDMAQPSQ_IMMDATA GENMASK_ULL(63, 0)
+#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
+
+#define IRDMAQPSQ_STAG GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_REMOTE_STAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
+#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
+#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
+
+#define IRDMAQPSQ_MWLEN IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32)
+#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
+
+#define IRDMAQPSQ_REMOTE_ATOMICS_EN BIT_ULL(55)
+
+#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
+#define IRDMAQPSQ_STAGINDEX GENMASK_ULL(31, 8)
+#define IRDMAQPSQ_COPYHOSTPBLS BIT_ULL(43)
+#define IRDMAQPSQ_LPBLSIZE GENMASK_ULL(45, 44)
+#define IRDMAQPSQ_HPAGESIZE GENMASK_ULL(47, 46)
+#define IRDMAQPSQ_STAGLEN GENMASK_ULL(40, 0)
+#define IRDMAQPSQ_FIRSTPMPBLIDXLO GENMASK_ULL(63, 48)
+#define IRDMAQPSQ_FIRSTPMPBLIDXHI GENMASK_ULL(11, 0)
+#define IRDMAQPSQ_PBLADDR GENMASK_ULL(63, 12)
+
+/* iwarp QP RQ WQE common fields */
+#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT
+#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID
+#define IRDMAQPRQ_COMPLCTX IRDMA_CQPHC_QPCTX
+#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN
+#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG
+#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO
+
+#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
+#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
+#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
+
+#define IRDMA_QUERY_FPM_LOC_MEM_PAGES GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(31, 0)
+#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(31, 0)
+#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(44, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3 GENMASK_ULL(47, 32)
+#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
+#define IRDMA_QUERY_FPM_MAX_IRD GENMASK_ULL(53, 50)
+#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
+#define IRDMA_QUERY_FPM_TIMERBUCKET GENMASK_ULL(47, 32)
+#define IRDMA_QUERY_FPM_RRFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_RRFFLBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_OOISCFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID GENMASK_ULL(5, 0)
+
+#define IRDMA_GET_CURRENT_AEQ_ELEM(_aeq) \
+ ( \
+ (_aeq)->aeqe_base[IRDMA_RING_CURRENT_TAIL((_aeq)->aeq_ring)].buf \
+ )
+
+#define IRDMA_GET_CURRENT_CEQ_ELEM(_ceq) \
+ ( \
+ (_ceq)->ceqe_base[IRDMA_RING_CURRENT_TAIL((_ceq)->ceq_ring)].buf \
+ )
+
+#define IRDMA_GET_CEQ_ELEM_AT_POS(_ceq, _pos) \
+ ( \
+ (_ceq)->ceqe_base[_pos].buf \
+ )
+
+#define IRDMA_RING_GET_NEXT_TAIL(_ring, _idx) \
+ ( \
+ ((_ring).tail + (_idx)) % (_ring).size \
+ )
+
+#define IRDMA_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)
+
+#define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
+ ( \
+ (_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+#define IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(_cq) \
+ ( \
+ ((struct irdma_extended_cqe *) \
+ ((_cq)->cq_base))[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+
+#define IRDMA_RING_INIT(_ring, _size) \
+ { \
+ (_ring).head = 0; \
+ (_ring).tail = 0; \
+ (_ring).size = (_size); \
+ }
+#define IRDMA_RING_SIZE(_ring) ((_ring).size)
+#define IRDMA_RING_CURRENT_HEAD(_ring) ((_ring).head)
+#define IRDMA_RING_CURRENT_TAIL(_ring) ((_ring).tail)
+
+#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+ (_ring).head = ((_ring).head + (_count)) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL(_ring) \
+ (_ring).tail = ((_ring).tail + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_HEAD_NOCHECK(_ring) \
+ (_ring).head = ((_ring).head + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
+ (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+
+#define IRDMA_RING_SET_TAIL(_ring, _pos) \
+ (_ring).tail = (_pos) % (_ring).size
+
+#define IRDMA_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 1)) \
+ )
+
+#define IRDMA_ERR_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 2)) \
+ )
+
+#define IRDMA_ERR_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 3)) \
+ )
+
+#define IRDMA_SQ_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 257)) \
+ )
+
+#define IRDMA_ERR_SQ_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 258)) \
+ )
+#define IRDMA_ERR_SQ_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 259)) \
+ )
+#define IRDMA_RING_MORE_WORK(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) != 0) \
+ )
+
+#define IRDMA_RING_USED_QUANTA(_ring) \
+ ( \
+ (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ )
+
+#define IRDMA_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 1) \
+ )
+
+#define IRDMA_SQ_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 257) \
+ )
+
+#define IRDMA_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+ { \
+ index = IRDMA_RING_CURRENT_HEAD(_ring); \
+ IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
+ }
+
+enum irdma_qp_wqe_size {
+ IRDMA_WQE_SIZE_32 = 32,
+ IRDMA_WQE_SIZE_64 = 64,
+ IRDMA_WQE_SIZE_96 = 96,
+ IRDMA_WQE_SIZE_128 = 128,
+ IRDMA_WQE_SIZE_256 = 256,
+};
+
+enum irdma_ws_node_op {
+ IRDMA_ADD_NODE = 0,
+ IRDMA_MODIFY_NODE,
+ IRDMA_DEL_NODE,
+};
+
+enum { IRDMA_Q_ALIGNMENT_M = (128 - 1),
+ IRDMA_AEQ_ALIGNMENT_M = (256 - 1),
+ IRDMA_Q2_ALIGNMENT_M = (256 - 1),
+ IRDMA_CEQ_ALIGNMENT_M = (256 - 1),
+ IRDMA_CQ0_ALIGNMENT_M = (256 - 1),
+ IRDMA_HOST_CTX_ALIGNMENT_M = (4 - 1),
+ IRDMA_SHADOWAREA_M = (128 - 1),
+ IRDMA_FPM_QUERY_BUF_ALIGNMENT_M = (4 - 1),
+ IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M = (4 - 1),
+};
+
+enum irdma_alignment {
+ IRDMA_CQP_ALIGNMENT = 0x200,
+ IRDMA_AEQ_ALIGNMENT = 0x100,
+ IRDMA_CEQ_ALIGNMENT = 0x100,
+ IRDMA_CQ0_ALIGNMENT = 0x100,
+ IRDMA_SD_BUF_ALIGNMENT = 0x80,
+ IRDMA_FEATURE_BUF_ALIGNMENT = 0x10,
+};
+
+enum icrdma_protocol_used {
+ ICRDMA_ANY_PROTOCOL = 0,
+ ICRDMA_IWARP_PROTOCOL_ONLY = 1,
+ ICRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
+/**
+ * set_64bit_val - set 64 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val)
+{
+ wqe_words[byte_index >> 3] = cpu_to_le64(val);
+}
+
+/**
+ * set_32bit_val - set 32 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val)
+{
+ wqe_words[byte_index >> 2] = cpu_to_le32(val);
+}
+
+/**
+ * get_64bit_val - read 64 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to read from
+ * @val: read value
+ **/
+static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val)
+{
+ *val = le64_to_cpu(wqe_words[byte_index >> 3]);
+}
+
+/**
+ * get_32bit_val - read 32 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to reaad from
+ * @val: return 32 bit value
+ **/
+static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val)
+{
+ *val = le32_to_cpu(wqe_words[byte_index >> 2]);
+}
+#endif /* IRDMA_DEFS_H */
diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c
new file mode 100644
index 000000000000..da18add141da
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hmc.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "virtchnl.h"
+
+/**
+ * irdma_find_sd_index_limit - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by irdma_hmc_rsrc_type.
+ */
+
+static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
+ u32 idx, u32 cnt, u32 *sd_idx,
+ u32 *sd_limit)
+{
+ u64 fpm_addr, fpm_limit;
+
+ fpm_addr = hmc_info->hmc_obj[(type)].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
+ *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
+ *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
+ *sd_limit += 1;
+}
+
+/**
+ * irdma_find_pd_index_limit - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_idx: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by irdma_hmc_rsrc_type.
+ */
+
+static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
+ u32 idx, u32 cnt, u32 *pd_idx,
+ u32 *pd_limit)
+{
+ u64 fpm_adr, fpm_limit;
+
+ fpm_adr = hmc_info->hmc_obj[type].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
+ *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
+ *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
+ *pd_limit += 1;
+}
+
+/**
+ * irdma_set_sd_entry - setup entry for sd programming
+ * @pa: physical addr
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
+ struct irdma_update_sd_entry *entry)
+{
+ entry->data = pa |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
+}
+
+/**
+ * irdma_clr_sd_entry - setup entry for sd clear
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
+ struct irdma_update_sd_entry *entry)
+{
+ entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
+}
+
+/**
+ * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
+ * @dev: pointer to our device struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ */
+static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
+ u32 pd_idx)
+{
+ u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
+
+ writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
+}
+
+/**
+ * irdma_hmc_sd_one - setup 1 sd entry for cqp
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ * @pa: physical addr
+ * @sd_idx: sd index
+ * @type: paged or direct sd
+ * @setsd: flag to set or clear sd
+ */
+int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type, bool setsd)
+{
+ struct irdma_update_sds_info sdinfo;
+
+ sdinfo.cnt = 1;
+ sdinfo.hmc_fn_id = hmc_fn_id;
+ if (setsd)
+ irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
+ else
+ irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
+ return dev->cqp->process_cqp_sds(dev, &sdinfo);
+}
+
+/**
+ * irdma_hmc_sd_grp - setup group of sd entries for cqp
+ * @dev: pointer to the device structure
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: sd index
+ * @sd_cnt: number of sd entries
+ * @setsd: flag to set or clear sd
+ */
+static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ u32 sd_cnt, bool setsd)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_update_sds_info sdinfo = {};
+ u64 pa;
+ u32 i;
+ int ret_code = 0;
+
+ sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
+ for (i = sd_index; i < sd_index + sd_cnt; i++) {
+ sd_entry = &hmc_info->sd_table.sd_entry[i];
+ if (!sd_entry || (!sd_entry->valid && setsd) ||
+ (sd_entry->valid && !setsd))
+ continue;
+ if (setsd) {
+ pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+ irdma_set_sd_entry(pa, i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ } else {
+ irdma_clr_sd_entry(i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ }
+ sdinfo.cnt++;
+ if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: sd_programming failed err=%d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ sdinfo.cnt = 0;
+ }
+ }
+ if (sdinfo.cnt)
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+
+ return ret_code;
+}
+
+/**
+ * irdma_hmc_finish_add_sd_reg - program sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: create obj info
+ */
+static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return -EINVAL;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return -EINVAL;
+
+ if (!info->add_sd_cnt)
+ return 0;
+ return irdma_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0], info->add_sd_cnt,
+ true);
+}
+
+/**
+ * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to irdma_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ */
+int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx = 0, pd_lmt = 0;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 i, j;
+ bool pd_error = false;
+ int ret_code = 0;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return -EINVAL;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
+ info->rsrc_type, info->start_idx, info->count,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return -EINVAL;
+ }
+
+ irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx,
+ &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ return -EINVAL;
+ }
+
+ irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = sd_idx; j < sd_lmt; j++) {
+ ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
+ info->entry_type,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ goto exit_sd_error;
+
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
+ (dev->hmc_info == info->hmc_info &&
+ info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
+ pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = irdma_add_pd_table_entry(dev,
+ info->hmc_info,
+ i, NULL);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ while (i && (i > pd_idx1)) {
+ irdma_remove_pd_bp(dev, info->hmc_info,
+ i - 1);
+ i--;
+ }
+ }
+ }
+ if (sd_entry->valid)
+ continue;
+
+ info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
+ info->add_sd_cnt++;
+ sd_entry->valid = true;
+ }
+ return irdma_hmc_finish_add_sd_reg(dev, info);
+
+exit_sd_error:
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case IRDMA_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
+ pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ irdma_prep_remove_pd_page(info->hmc_info, i);
+ break;
+ case IRDMA_SD_TYPE_DIRECT:
+ irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = -EINVAL;
+ break;
+ }
+ j--;
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_finish_del_sd_reg - delete sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: dele obj info
+ * @reset: true if called before reset
+ */
+static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info,
+ bool reset)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
+ u32 i, sd_idx;
+ struct irdma_dma_mem *mem;
+
+ if (dev->privileged && !reset)
+ ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0],
+ info->del_sd_cnt, false);
+
+ if (ret_code)
+ ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
+ for (i = 0; i < info->del_sd_cnt; i++) {
+ sd_idx = info->hmc_info->sd_indexes[i];
+ sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
+ mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
+ &sd_entry->u.pd_table.pd_page_addr :
+ &sd_entry->u.bp.addr;
+
+ if (!mem || !mem->va) {
+ ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
+ } else {
+ dma_free_coherent(dev->hw->device, mem->size, mem->va,
+ mem->pa);
+ mem->va = NULL;
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_del_hmc_obj - remove pe hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to irdma_hmc_del_obj_info struct
+ * @reset: true if called before reset
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ */
+int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info, bool reset)
+{
+ struct irdma_hmc_pd_table *pd_table;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 i, j;
+ int ret_code = 0;
+
+ if (dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
+ info->start_idx, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return -EINVAL;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
+ info->start_idx, info->count, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return -EINVAL;
+ }
+
+ irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
+
+ if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
+ continue;
+
+ if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ IRDMA_SD_TYPE_PAGED)
+ continue;
+
+ rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
+ pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry &&
+ pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: remove_pd_bp error\n");
+ return ret_code;
+ }
+ }
+ }
+
+ irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx,
+ &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
+ return -EINVAL;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case IRDMA_SD_TYPE_DIRECT:
+ ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
+ if (!ret_code) {
+ info->hmc_info->sd_indexes[info->del_sd_cnt] =
+ (u16)i;
+ info->del_sd_cnt++;
+ }
+ break;
+ case IRDMA_SD_TYPE_PAGED:
+ ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
+ if (ret_code)
+ break;
+ if (dev->hmc_info != info->hmc_info &&
+ info->rsrc_type == IRDMA_HMC_IW_PBLE &&
+ pd_table->pd_entry) {
+ kfree(pd_table->pd_entry_virt_mem.va);
+ pd_table->pd_entry = NULL;
+ }
+ info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+ info->del_sd_cnt++;
+ break;
+ default:
+ break;
+ }
+ }
+ return irdma_finish_del_sd_reg(dev, info, reset);
+}
+
+/**
+ * irdma_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ */
+int irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ enum irdma_sd_entry_type type, u64 direct_mode_sz)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_dma_mem dma_mem;
+ u64 alloc_len;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (type == IRDMA_SD_TYPE_PAGED)
+ alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
+ else
+ alloc_len = direct_mode_sz;
+
+ /* allocate a 4K pd page or 2M backing page */
+ dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
+ dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
+ &dma_mem.pa, GFP_KERNEL);
+ if (!dma_mem.va)
+ return -ENOMEM;
+ if (type == IRDMA_SD_TYPE_PAGED) {
+ struct irdma_virt_mem *vmem =
+ &sd_entry->u.pd_table.pd_entry_virt_mem;
+
+ vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
+ if (!vmem->va) {
+ dma_free_coherent(hw->device, dma_mem.size,
+ dma_mem.va, dma_mem.pa);
+ dma_mem.va = NULL;
+ return -ENOMEM;
+ }
+ sd_entry->u.pd_table.pd_entry = vmem->va;
+
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
+ sizeof(sd_entry->u.pd_table.pd_page_addr));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &dma_mem,
+ sizeof(sd_entry->u.bp.addr));
+
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+ hmc_info->sd_table.use_cnt++;
+ }
+ if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
+ sd_entry->u.bp.use_cnt++;
+
+ return 0;
+}
+
+/**
+ * irdma_add_pd_table_entry - Adds page descriptor to the specified table
+ * @dev: pointer to our device structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in irdma_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ */
+int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg)
+{
+ struct irdma_hmc_pd_table *pd_table;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_dma_mem mem;
+ struct irdma_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
+ return -EINVAL;
+
+ sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
+ if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ IRDMA_SD_TYPE_PAGED)
+ return 0;
+
+ rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,
+ IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
+ page->va = dma_alloc_coherent(dev->hw->device,
+ page->size, &page->pa,
+ GFP_KERNEL);
+ if (!page->va)
+ return -ENOMEM;
+
+ pd_entry->rsrc_pg = false;
+ }
+
+ memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
+ page_desc = page->pa | 0x1;
+ pd_addr = pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ pd_table->use_cnt++;
+
+ if (hmc_info->hmc_fn_id < dev->hw_attrs.first_hw_vf_fpm_id &&
+ dev->privileged)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
+ }
+ pd_entry->bp.use_cnt++;
+
+ return 0;
+}
+
+/**
+ * irdma_remove_pd_bp - remove a backing page from a page descriptor
+ * @dev: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ */
+int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 idx)
+{
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_hmc_pd_table *pd_table;
+ struct irdma_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ struct irdma_dma_mem *mem;
+ u64 *pd_addr;
+
+ sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt)
+ return -EINVAL;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
+ return -EINVAL;
+
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (--pd_entry->bp.use_cnt)
+ return 0;
+
+ pd_entry->valid = false;
+ pd_table->use_cnt--;
+ pd_addr = pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ if (dev->privileged && dev->hmc_fn_id == hmc_info->hmc_fn_id)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
+
+ if (!pd_entry->rsrc_pg) {
+ mem = &pd_entry->bp.addr;
+ if (!mem || !mem->va)
+ return -EINVAL;
+
+ dma_free_coherent(dev->hw->device, mem->size, mem->va,
+ mem->pa);
+ mem->va = NULL;
+ }
+ if (!pd_table->use_cnt)
+ kfree(pd_table->pd_entry_virt_mem.va);
+
+ return 0;
+}
+
+/**
+ * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ */
+int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (--sd_entry->u.bp.use_cnt)
+ return -EBUSY;
+
+ hmc_info->sd_table.use_cnt--;
+ sd_entry->valid = false;
+
+ return 0;
+}
+
+/**
+ * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ */
+int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.use_cnt)
+ return -EBUSY;
+
+ sd_entry->valid = false;
+ hmc_info->sd_table.use_cnt--;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h
new file mode 100644
index 000000000000..257a5d22aa96
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hmc.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_HMC_H
+#define IRDMA_HMC_H
+
+#include "defs.h"
+
+#define IRDMA_HMC_MAX_BP_COUNT 512
+#define IRDMA_MAX_SD_ENTRIES 11
+#define IRDMA_HW_DBG_HMC_INVALID_BP_MARK 0xca
+#define IRDMA_HMC_INFO_SIGNATURE 0x484d5347
+#define IRDMA_HMC_PD_CNT_IN_SD 512
+#define IRDMA_HMC_DIRECT_BP_SIZE 0x200000
+#define IRDMA_HMC_MAX_SD_COUNT 8192
+#define IRDMA_HMC_PAGED_BP_SIZE 4096
+#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define IRDMA_FIRST_VF_FPM_ID 8
+#define FPM_MULTIPLIER 1024
+#define IRDMA_OBJ_LOC_MEM_BIT 0x4
+#define IRDMA_XF_MULTIPLIER 16
+#define IRDMA_RRF_MULTIPLIER 8
+#define IRDMA_MIN_PBLE_PAGES 3
+#define IRDMA_HMC_PAGE_SIZE 2097152
+#define IRDMA_MIN_MR_PER_QP 4
+#define IRDMA_MIN_QP_CNT 64
+#define IRDMA_FSIAV_CNT_MAX 1048576
+#define IRDMA_MIN_IRD 8
+#define IRDMA_HMC_MIN_RRF 16
+
+enum irdma_hmc_rsrc_type {
+ IRDMA_HMC_IW_QP = 0,
+ IRDMA_HMC_IW_CQ = 1,
+ IRDMA_HMC_IW_SRQ = 2,
+ IRDMA_HMC_IW_HTE = 3,
+ IRDMA_HMC_IW_ARP = 4,
+ IRDMA_HMC_IW_APBVT_ENTRY = 5,
+ IRDMA_HMC_IW_MR = 6,
+ IRDMA_HMC_IW_XF = 7,
+ IRDMA_HMC_IW_XFFL = 8,
+ IRDMA_HMC_IW_Q1 = 9,
+ IRDMA_HMC_IW_Q1FL = 10,
+ IRDMA_HMC_IW_TIMER = 11,
+ IRDMA_HMC_IW_FSIMC = 12,
+ IRDMA_HMC_IW_FSIAV = 13,
+ IRDMA_HMC_IW_PBLE = 14,
+ IRDMA_HMC_IW_RRF = 15,
+ IRDMA_HMC_IW_RRFFL = 16,
+ IRDMA_HMC_IW_HDR = 17,
+ IRDMA_HMC_IW_MD = 18,
+ IRDMA_HMC_IW_OOISC = 19,
+ IRDMA_HMC_IW_OOISCFFL = 20,
+ IRDMA_HMC_IW_MAX, /* Must be last entry */
+};
+
+enum irdma_sd_entry_type {
+ IRDMA_SD_TYPE_INVALID = 0,
+ IRDMA_SD_TYPE_PAGED = 1,
+ IRDMA_SD_TYPE_DIRECT = 2,
+};
+
+enum irdma_hmc_obj_mem {
+ IRDMA_HOST_MEM = 0,
+ IRDMA_LOC_MEM = 1,
+};
+
+struct irdma_hmc_obj_info {
+ u64 base;
+ u32 max_cnt;
+ u32 cnt;
+ u64 size;
+ enum irdma_hmc_obj_mem mem_loc;
+};
+
+struct irdma_hmc_bp {
+ enum irdma_sd_entry_type entry_type;
+ struct irdma_dma_mem addr;
+ u32 sd_pd_index;
+ u32 use_cnt;
+};
+
+struct irdma_hmc_pd_entry {
+ struct irdma_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg:1;
+ bool valid:1;
+};
+
+struct irdma_hmc_pd_table {
+ struct irdma_dma_mem pd_page_addr;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_virt_mem pd_entry_virt_mem;
+ u32 use_cnt;
+ u32 sd_index;
+};
+
+struct irdma_hmc_sd_entry {
+ enum irdma_sd_entry_type entry_type;
+ bool valid;
+ union {
+ struct irdma_hmc_pd_table pd_table;
+ struct irdma_hmc_bp bp;
+ } u;
+};
+
+struct irdma_hmc_sd_table {
+ struct irdma_virt_mem addr;
+ u32 sd_cnt;
+ u32 use_cnt;
+ struct irdma_hmc_sd_entry *sd_entry;
+};
+
+struct irdma_hmc_info {
+ u32 signature;
+ u8 hmc_fn_id;
+ u16 first_sd_index;
+ struct irdma_hmc_obj_info *hmc_obj;
+ struct irdma_virt_mem hmc_obj_virt_mem;
+ struct irdma_hmc_sd_table sd_table;
+ u16 sd_indexes[IRDMA_HMC_MAX_SD_COUNT];
+};
+
+struct irdma_update_sd_entry {
+ u64 cmd;
+ u64 data;
+};
+
+struct irdma_update_sds_info {
+ u32 cnt;
+ u8 hmc_fn_id;
+ struct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];
+};
+
+struct irdma_ccq_cqe_info;
+struct irdma_hmc_fcn_info {
+ u32 vf_id;
+ u8 protocol_used;
+ u8 free_fcn;
+};
+
+struct irdma_hmc_create_obj_info {
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_virt_mem add_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 add_sd_cnt;
+ enum irdma_sd_entry_type entry_type;
+ bool privileged;
+};
+
+struct irdma_hmc_del_obj_info {
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_virt_mem del_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 del_sd_cnt;
+ bool privileged;
+};
+
+int irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
+ struct irdma_dma_mem *src_mem, u64 src_offset, u64 size);
+int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info);
+int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info, bool reset);
+int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type,
+ bool setsd);
+int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id);
+struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id);
+int irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ enum irdma_sd_entry_type type, u64 direct_mode_sz);
+int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg);
+int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 idx);
+int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx);
+int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
+#endif /* IRDMA_HMC_H */
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
new file mode 100644
index 000000000000..7bad0e38786a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -0,0 +1,2826 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+static struct irdma_rsrc_limits rsrc_limits_table[] = {
+ [0] = {
+ .qplimit = SZ_128,
+ },
+ [1] = {
+ .qplimit = SZ_1K,
+ },
+ [2] = {
+ .qplimit = SZ_2K,
+ },
+ [3] = {
+ .qplimit = SZ_4K,
+ },
+ [4] = {
+ .qplimit = SZ_16K,
+ },
+ [5] = {
+ .qplimit = SZ_64K,
+ },
+ [6] = {
+ .qplimit = SZ_128K,
+ },
+ [7] = {
+ .qplimit = SZ_256K,
+ },
+};
+
+/* types of hmc objects */
+static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
+ IRDMA_HMC_IW_QP,
+ IRDMA_HMC_IW_CQ,
+ IRDMA_HMC_IW_SRQ,
+ IRDMA_HMC_IW_HTE,
+ IRDMA_HMC_IW_ARP,
+ IRDMA_HMC_IW_APBVT_ENTRY,
+ IRDMA_HMC_IW_MR,
+ IRDMA_HMC_IW_XF,
+ IRDMA_HMC_IW_XFFL,
+ IRDMA_HMC_IW_Q1,
+ IRDMA_HMC_IW_Q1FL,
+ IRDMA_HMC_IW_PBLE,
+ IRDMA_HMC_IW_TIMER,
+ IRDMA_HMC_IW_FSIMC,
+ IRDMA_HMC_IW_FSIAV,
+ IRDMA_HMC_IW_RRF,
+ IRDMA_HMC_IW_RRFFL,
+ IRDMA_HMC_IW_HDR,
+ IRDMA_HMC_IW_MD,
+ IRDMA_HMC_IW_OOISC,
+ IRDMA_HMC_IW_OOISCFFL,
+};
+
+/**
+ * irdma_iwarp_ce_handler - handle iwarp completions
+ * @iwcq: iwarp cq receiving event
+ */
+static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
+{
+ struct irdma_cq *cq = iwcq->back_cq;
+
+ if (!cq->user_mode)
+ atomic_set(&cq->armed, 0);
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+/**
+ * irdma_puda_ce_handler - handle puda completion events
+ * @rf: RDMA PCI function
+ * @cq: puda completion q for event
+ */
+static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
+ struct irdma_sc_cq *cq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u32 compl_error;
+ int status;
+
+ do {
+ status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
+ if (status == -ENOENT)
+ break;
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
+ break;
+ }
+ if (compl_error) {
+ ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n",
+ compl_error);
+ break;
+ }
+ } while (1);
+
+ irdma_sc_ccq_arm(cq);
+}
+
+/**
+ * irdma_process_ceq - handle ceq for completions
+ * @rf: RDMA PCI function
+ * @ceq: ceq having cq for completion
+ */
+static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_sc_ceq *sc_ceq;
+ struct irdma_sc_cq *cq;
+ unsigned long flags;
+
+ sc_ceq = &ceq->sc_ceq;
+ do {
+ spin_lock_irqsave(&ceq->ce_lock, flags);
+ cq = irdma_sc_process_ceq(dev, sc_ceq);
+ if (!cq) {
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+ break;
+ }
+
+ if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
+ irdma_iwarp_ce_handler(cq);
+
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+
+ if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
+ queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
+ else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
+ cq->cq_type == IRDMA_CQ_TYPE_IEQ)
+ irdma_puda_ce_handler(rf, cq);
+ } while (1);
+}
+
+static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ struct qp_err_code qp_err;
+
+ qp->sq_flush_code = info->sq;
+ qp->rq_flush_code = info->rq;
+ if (qp->qp_uk.uk_attrs->hw_rev >= IRDMA_GEN_3) {
+ if (info->sq) {
+ qp->err_sq_idx_valid = true;
+ qp->err_sq_idx = info->wqe_idx;
+ }
+ if (info->rq) {
+ qp->err_rq_idx_valid = true;
+ qp->err_rq_idx = info->wqe_idx;
+ }
+ }
+
+ qp_err = irdma_ae_to_qp_err_code(info->ae_id);
+ qp->flush_code = qp_err.flush_code;
+ qp->event_type = qp_err.event_type;
+}
+
+/**
+ * irdma_complete_cqp_request - perform post-completion cleanup
+ * @cqp: device CQP
+ * @cqp_request: CQP request
+ *
+ * Mark CQP request as done, wake up waiting thread or invoke
+ * callback function and release/free CQP request.
+ */
+static void irdma_complete_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ WRITE_ONCE(cqp_request->request_done, true);
+ wake_up(&cqp_request->waitq);
+ } else if (cqp_request->callback_fcn) {
+ cqp_request->callback_fcn(cqp_request);
+ }
+ irdma_put_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_process_ae_def_cmpl - handle IRDMA_AE_CQP_DEFERRED_COMPLETE event
+ * @rf: RDMA PCI function
+ * @info: AEQ entry info
+ */
+static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf,
+ struct irdma_aeqe_info *info)
+{
+ u32 sw_def_info;
+ u64 scratch;
+
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true,
+ &scratch, &sw_def_info);
+ while (scratch) {
+ struct irdma_cqp_request *cqp_request =
+ (struct irdma_cqp_request *)(uintptr_t)scratch;
+
+ irdma_complete_cqp_request(&rf->cqp, cqp_request);
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false,
+ &scratch, &sw_def_info);
+ }
+}
+
+/**
+ * irdma_process_aeq - handle aeq events
+ * @rf: RDMA PCI function
+ */
+static void irdma_process_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
+ struct irdma_aeqe_info aeinfo;
+ struct irdma_aeqe_info *info = &aeinfo;
+ int ret;
+ struct irdma_qp *iwqp = NULL;
+ struct irdma_cq *iwcq = NULL;
+ struct irdma_sc_qp *qp = NULL;
+ struct irdma_qp_host_ctx_info *ctx_info = NULL;
+ struct irdma_device *iwdev = rf->iwdev;
+ struct irdma_sc_srq *srq;
+ unsigned long flags;
+
+ u32 aeqcnt = 0;
+
+ if (!sc_aeq->size)
+ return;
+
+ do {
+ memset(info, 0, sizeof(*info));
+ ret = irdma_sc_get_next_aeqe(sc_aeq, info);
+ if (ret)
+ break;
+
+ if (info->aeqe_overflow) {
+ ibdev_err(&iwdev->ibdev, "AEQ has overflowed\n");
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ return;
+ }
+
+ aeqcnt++;
+ ibdev_dbg(&iwdev->ibdev,
+ "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
+ info->iwarp_state, info->ae_src);
+
+ if (info->qp) {
+ spin_lock_irqsave(&rf->qptable_lock, flags);
+ iwqp = rf->qp_table[info->qp_cq_id];
+ if (!iwqp) {
+ spin_unlock_irqrestore(&rf->qptable_lock,
+ flags);
+ if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
+ atomic_dec(&iwdev->vsi.qp_suspend_reqs);
+ wake_up(&iwdev->suspend_wq);
+ continue;
+ }
+ ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
+ info->qp_cq_id);
+ continue;
+ }
+ irdma_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&rf->qptable_lock, flags);
+ qp = &iwqp->sc_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = info->tcp_state;
+ iwqp->hw_iwarp_state = info->iwarp_state;
+ if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
+ iwqp->last_aeq = info->ae_id;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ } else if (info->srq) {
+ if (info->ae_id != IRDMA_AE_SRQ_LIMIT)
+ continue;
+ } else {
+ if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR &&
+ info->ae_id != IRDMA_AE_CQP_DEFERRED_COMPLETE)
+ continue;
+ }
+
+ switch (info->ae_id) {
+ struct irdma_cm_node *cm_node;
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ cm_node = iwqp->cm_node;
+ if (cm_node->accept_pend) {
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+ iwqp->rts_ae_rcvd = 1;
+ wake_up_interruptible(&iwqp->waitq);
+ break;
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ if (qp->term_flags)
+ break;
+ if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
+ if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
+ iwqp->ibqp_state == IB_QPS_RTS) {
+ irdma_next_iw_state(iwqp,
+ IRDMA_QP_STATE_CLOSING,
+ 0, 0, 0);
+ irdma_cm_disconn(iwqp);
+ }
+ irdma_schedule_cm_timer(iwqp->cm_node,
+ (struct irdma_puda_buf *)iwqp,
+ IRDMA_TIMER_TYPE_CLOSE,
+ 1, 0);
+ }
+ break;
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ if (qp->term_flags)
+ irdma_terminate_done(qp, 0);
+ else
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_RESET_SENT:
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
+ 0);
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ if (atomic_read(&iwqp->close_timer_started))
+ break;
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ if (iwqp->iwdev->vsi.tc_change_pending) {
+ if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ if (iwqp->suspend_pending) {
+ iwqp->suspend_pending = false;
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ break;
+ case IRDMA_AE_TERMINATE_SENT:
+ irdma_terminate_send_fin(qp);
+ break;
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ irdma_terminate_received(qp, info);
+ break;
+ case IRDMA_AE_CQ_OPERATION_ERROR:
+ ibdev_err(&iwdev->ibdev,
+ "Processing an iWARP related AE for CQ misc = 0x%04X\n",
+ info->ae_id);
+
+ spin_lock_irqsave(&rf->cqtable_lock, flags);
+ iwcq = rf->cq_table[info->qp_cq_id];
+ if (!iwcq) {
+ spin_unlock_irqrestore(&rf->cqtable_lock,
+ flags);
+ ibdev_dbg(to_ibdev(dev),
+ "cq_id %d is already freed\n", info->qp_cq_id);
+ continue;
+ }
+ irdma_cq_add_ref(&iwcq->ibcq);
+ spin_unlock_irqrestore(&rf->cqtable_lock, flags);
+
+ if (iwcq->ibcq.event_handler) {
+ struct ib_event ibevent;
+
+ ibevent.device = iwcq->ibcq.device;
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &iwcq->ibcq;
+ iwcq->ibcq.event_handler(&ibevent,
+ iwcq->ibcq.cq_context);
+ }
+ irdma_cq_rem_ref(&iwcq->ibcq);
+ break;
+ case IRDMA_AE_SRQ_LIMIT:
+ srq = (struct irdma_sc_srq *)(uintptr_t)info->compl_ctx;
+ irdma_srq_event(srq);
+ break;
+ case IRDMA_AE_SRQ_CATASTROPHIC_ERROR:
+ break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ /* Remove completed CQP requests from pending list
+ * and notify about those CQP ops completion.
+ */
+ irdma_process_ae_def_cmpl(rf, info);
+ break;
+ case IRDMA_AE_RESET_NOT_SENT:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_RESOURCE_EXHAUSTION:
+ break;
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_STAG_ZERO_INVALID:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case IRDMA_AE_INVALID_ARP_ENTRY:
+ case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
+ case IRDMA_AE_STALE_ARP_ENTRY:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ default:
+ ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
+ ctx_info = &iwqp->ctx_info;
+ if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
+ ctx_info->roce_info->err_rq_idx_valid =
+ ctx_info->srq_valid ? false : info->err_rq_idx_valid;
+ if (ctx_info->roce_info->err_rq_idx_valid) {
+ ctx_info->roce_info->err_rq_idx = info->wqe_idx;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
+ ctx_info);
+ }
+ irdma_set_flush_fields(qp, info);
+ irdma_cm_disconn(iwqp);
+ break;
+ }
+ ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
+ ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = true;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
+ ctx_info);
+ }
+ if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
+ iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
+ irdma_cm_disconn(iwqp);
+ } else {
+ irdma_terminate_connection(qp, info);
+ }
+ break;
+ }
+ if (info->qp)
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ } while (1);
+
+ if (aeqcnt)
+ irdma_sc_repost_aeq_entries(dev, aeqcnt);
+}
+
+/**
+ * irdma_ena_intr - set up device interrupts
+ * @dev: hardware control device structure
+ * @msix_id: id of the interrupt to be enabled
+ */
+static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
+{
+ dev->irq_ops->irdma_en_irq(dev, msix_id);
+}
+
+/**
+ * irdma_dpc - tasklet for aeq and ceq 0
+ * @t: tasklet_struct ptr
+ */
+static void irdma_dpc(struct tasklet_struct *t)
+{
+ struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
+
+ if (rf->msix_shared)
+ irdma_process_ceq(rf, rf->ceqlist);
+ irdma_process_aeq(rf);
+ irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
+}
+
+/**
+ * irdma_ceq_dpc - dpc handler for CEQ
+ * @t: tasklet_struct ptr
+ */
+static void irdma_ceq_dpc(struct tasklet_struct *t)
+{
+ struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
+ struct irdma_pci_f *rf = iwceq->rf;
+
+ irdma_process_ceq(rf, iwceq);
+ irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
+}
+
+/**
+ * irdma_save_msix_info - copy msix vector information to iwarp device
+ * @rf: RDMA PCI function
+ *
+ * Allocate iwdev msix table and copy the msix info to the table
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_save_msix_info(struct irdma_pci_f *rf)
+{
+ struct irdma_qvlist_info *iw_qvlist;
+ struct irdma_qv_info *iw_qvinfo;
+ struct msix_entry *pmsix;
+ u32 ceq_idx;
+ u32 i;
+ size_t size;
+
+ if (!rf->msix_count)
+ return -EINVAL;
+
+ size = sizeof(struct irdma_msix_vector) * rf->msix_count;
+ size += struct_size(iw_qvlist, qv_info, rf->msix_count);
+ rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
+ if (!rf->iw_msixtbl)
+ return -ENOMEM;
+
+ rf->iw_qvlist = (struct irdma_qvlist_info *)
+ (&rf->iw_msixtbl[rf->msix_count]);
+ iw_qvlist = rf->iw_qvlist;
+ iw_qvinfo = iw_qvlist->qv_info;
+ iw_qvlist->num_vectors = rf->msix_count;
+ if (rf->msix_count <= num_online_cpus())
+ rf->msix_shared = true;
+
+ pmsix = rf->msix_entries;
+ for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
+ rf->iw_msixtbl[i].idx = pmsix->entry;
+ rf->iw_msixtbl[i].irq = pmsix->vector;
+ rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
+ if (!i) {
+ iw_qvinfo->aeq_idx = 0;
+ if (rf->msix_shared)
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ else
+ iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
+ } else {
+ iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ }
+ iw_qvinfo->itr_idx = 3;
+ iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
+ pmsix++;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_irq_handler - interrupt handler for aeq and ceq0
+ * @irq: Interrupt request number
+ * @data: RDMA PCI function
+ */
+static irqreturn_t irdma_irq_handler(int irq, void *data)
+{
+ struct irdma_pci_f *rf = data;
+
+ tasklet_schedule(&rf->dpc_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * irdma_ceq_handler - interrupt handler for ceq
+ * @irq: interrupt request number
+ * @data: ceq pointer
+ */
+static irqreturn_t irdma_ceq_handler(int irq, void *data)
+{
+ struct irdma_ceq *iwceq = data;
+
+ if (iwceq->irq != irq)
+ ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
+ iwceq->irq, irq);
+ tasklet_schedule(&iwceq->dpc_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * irdma_destroy_irq - destroy device interrupts
+ * @rf: RDMA PCI function
+ * @msix_vec: msix vector to disable irq
+ * @dev_id: parameter to pass to free_irq (used during irq setup)
+ *
+ * The function is called when destroying aeq/ceq
+ */
+static void irdma_destroy_irq(struct irdma_pci_f *rf,
+ struct irdma_msix_vector *msix_vec, void *dev_id)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+
+ dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
+ irq_update_affinity_hint(msix_vec->irq, NULL);
+ free_irq(msix_vec->irq, dev_id);
+ if (rf == dev_id) {
+ tasklet_kill(&rf->dpc_tasklet);
+ } else {
+ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
+
+ tasklet_kill(&iwceq->dpc_tasklet);
+ }
+}
+
+/**
+ * irdma_destroy_cqp - destroy control qp
+ * @rf: RDMA PCI function
+ *
+ * Issue destroy cqp request and
+ * free the resources associated with the cqp
+ */
+static void irdma_destroy_cqp(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp *cqp = &rf->cqp;
+ int status = 0;
+
+ status = irdma_sc_cqp_destroy(dev->cqp);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
+
+ irdma_cleanup_pending_cqp_op(rf);
+ dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
+ cqp->sq.pa);
+ cqp->sq.va = NULL;
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
+}
+
+static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_aeq *aeq = &rf->aeq;
+ u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
+ dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
+
+ irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
+ irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
+ vfree(aeq->mem.va);
+}
+
+/**
+ * irdma_destroy_aeq - destroy aeq
+ * @rf: RDMA PCI function
+ *
+ * Issue a destroy aeq request and
+ * free the resources associated with the aeq
+ * The function is called during driver unload
+ */
+static void irdma_destroy_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ int status = -EBUSY;
+
+ if (!rf->msix_shared) {
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev,
+ rf->iw_msixtbl->idx, false);
+ irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
+ }
+ if (rf->reset)
+ goto exit;
+
+ aeq->sc_aeq.size = 0;
+ status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
+
+exit:
+ if (aeq->virtual_map) {
+ irdma_destroy_virt_aeq(rf);
+ } else {
+ dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
+ aeq->mem.pa);
+ aeq->mem.va = NULL;
+ }
+}
+
+/**
+ * irdma_destroy_ceq - destroy ceq
+ * @rf: RDMA PCI function
+ * @iwceq: ceq to be destroyed
+ *
+ * Issue a destroy ceq request and
+ * free the resources associated with the ceq
+ */
+static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
+
+ if (rf->reset)
+ goto exit;
+
+ status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
+ goto exit;
+ }
+
+ status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
+ status);
+exit:
+ dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
+ iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+}
+
+/**
+ * irdma_del_ceq_0 - destroy ceq 0
+ * @rf: RDMA PCI function
+ *
+ * Disable the ceq 0 interrupt and destroy the ceq 0
+ */
+static void irdma_del_ceq_0(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq = rf->ceqlist;
+ struct irdma_msix_vector *msix_vec;
+
+ if (rf->msix_shared) {
+ msix_vec = &rf->iw_msixtbl[0];
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
+ irdma_destroy_irq(rf, msix_vec, rf);
+ } else {
+ msix_vec = &rf->iw_msixtbl[1];
+ irdma_destroy_irq(rf, msix_vec, iwceq);
+ }
+
+ irdma_destroy_ceq(rf, iwceq);
+ rf->sc_dev.ceq_valid = false;
+ rf->ceqs_count = 0;
+}
+
+/**
+ * irdma_del_ceqs - destroy all ceq's except CEQ 0
+ * @rf: RDMA PCI function
+ *
+ * Go through all of the device ceq's, except 0, and for each
+ * ceq disable the ceq interrupt and destroy the ceq
+ */
+static void irdma_del_ceqs(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq = &rf->ceqlist[1];
+ struct irdma_msix_vector *msix_vec;
+ u32 i = 0;
+
+ if (rf->msix_shared)
+ msix_vec = &rf->iw_msixtbl[1];
+ else
+ msix_vec = &rf->iw_msixtbl[2];
+
+ for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
+ irdma_destroy_irq(rf, msix_vec, iwceq);
+ irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_DESTROY);
+ dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
+ iwceq->mem.va, iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+ }
+ rf->ceqs_count = 1;
+}
+
+/**
+ * irdma_destroy_ccq - destroy control cq
+ * @rf: RDMA PCI function
+ *
+ * Issue destroy ccq request and
+ * free the resources associated with the ccq
+ */
+static void irdma_destroy_ccq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_ccq *ccq = &rf->ccq;
+ int status = 0;
+
+ if (rf->cqp_cmpl_wq)
+ destroy_workqueue(rf->cqp_cmpl_wq);
+
+ if (!rf->reset)
+ status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
+ dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
+ ccq->mem_cq.pa);
+ ccq->mem_cq.va = NULL;
+}
+
+/**
+ * irdma_close_hmc_objects_type - delete hmc objects of a given type
+ * @dev: iwarp device
+ * @obj_type: the hmc object type to be deleted
+ * @hmc_info: host memory info struct
+ * @privileged: permission to close HMC objects
+ * @reset: true if called before reset
+ */
+static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
+ enum irdma_hmc_rsrc_type obj_type,
+ struct irdma_hmc_info *hmc_info,
+ bool privileged, bool reset)
+{
+ struct irdma_hmc_del_obj_info info = {};
+
+ info.hmc_info = hmc_info;
+ info.rsrc_type = obj_type;
+ info.count = hmc_info->hmc_obj[obj_type].cnt;
+ info.privileged = privileged;
+ if (irdma_sc_del_hmc_obj(dev, &info, reset))
+ ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
+ obj_type);
+}
+
+/**
+ * irdma_del_hmc_objects - remove all device hmc objects
+ * @dev: iwarp device
+ * @hmc_info: hmc_info to free
+ * @privileged: permission to delete HMC objects
+ * @reset: true if called before reset
+ * @vers: hardware version
+ */
+static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, bool privileged,
+ bool reset, enum irdma_vers vers)
+{
+ unsigned int i;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
+ irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
+ hmc_info, privileged, reset);
+ if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
+ break;
+ }
+}
+
+/**
+ * irdma_create_hmc_obj_type - create hmc object of a given type
+ * @dev: hardware control device structure
+ * @info: information for the hmc object to create
+ */
+static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ return irdma_sc_create_hmc_obj(dev, info);
+}
+
+/**
+ * irdma_create_hmc_objs - create all hmc objects for the device
+ * @rf: RDMA PCI function
+ * @privileged: permission to create HMC objects
+ * @vers: HW version
+ *
+ * Create the device hmc objects and allocate hmc pages
+ * Return 0 if successful, otherwise clean up and return error
+ */
+static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
+ enum irdma_vers vers)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_hmc_create_obj_info info = {};
+ int i, status = 0;
+
+ info.hmc_info = dev->hmc_info;
+ info.privileged = privileged;
+ info.entry_type = rf->sd_type;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
+ continue;
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
+ info.rsrc_type = iw_hmc_obj_types[i];
+ info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ info.add_sd_cnt = 0;
+ status = irdma_create_hmc_obj_type(dev, &info);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: create obj type %d status = %d\n",
+ iw_hmc_obj_types[i], status);
+ break;
+ }
+ }
+ if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
+ break;
+ }
+
+ if (!status)
+ return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
+ true, true);
+
+ while (i) {
+ i--;
+ /* destroy the hmc objects of a given type */
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
+ irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
+ dev->hmc_info, privileged,
+ false);
+ }
+
+ return status;
+}
+
+/**
+ * irdma_obj_aligned_mem - get aligned memory from device allocated memory
+ * @rf: RDMA PCI function
+ * @memptr: points to the memory addresses
+ * @size: size of memory needed
+ * @mask: mask for the aligned memory
+ *
+ * Get aligned memory of the requested size and
+ * update the memptr to point to the new aligned memory
+ * Return 0 if successful, otherwise return no memory error
+ */
+static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
+ struct irdma_dma_mem *memptr, u32 size,
+ u32 mask)
+{
+ unsigned long va, newva;
+ unsigned long extra;
+
+ va = (unsigned long)rf->obj_next.va;
+ newva = va;
+ if (mask)
+ newva = ALIGN(va, (unsigned long)mask + 1ULL);
+ extra = newva - va;
+ memptr->va = (u8 *)va + extra;
+ memptr->pa = rf->obj_next.pa + extra;
+ memptr->size = size;
+ if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
+ return -ENOMEM;
+
+ rf->obj_next.va = (u8 *)memptr->va + size;
+ rf->obj_next.pa = memptr->pa + size;
+
+ return 0;
+}
+
+/**
+ * irdma_create_cqp - create control qp
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the cqp and all the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_cqp(struct irdma_pci_f *rf)
+{
+ u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
+ struct irdma_dma_mem mem;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp_init_info cqp_init_info = {};
+ struct irdma_cqp *cqp = &rf->cqp;
+ u16 maj_err, min_err;
+ int i, status;
+
+ cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
+ if (!cqp->cqp_requests)
+ return -ENOMEM;
+
+ cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+ if (!cqp->scratch_array) {
+ status = -ENOMEM;
+ goto err_scratch;
+ }
+
+ cqp->oop_op_array = kcalloc(sqsize, sizeof(*cqp->oop_op_array),
+ GFP_KERNEL);
+ if (!cqp->oop_op_array) {
+ status = -ENOMEM;
+ goto err_oop;
+ }
+ cqp_init_info.ooo_op_array = cqp->oop_op_array;
+ dev->cqp = &cqp->sc_cqp;
+ dev->cqp->dev = dev;
+ cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
+ IRDMA_CQP_ALIGNMENT);
+ cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
+ &cqp->sq.pa, GFP_KERNEL);
+ if (!cqp->sq.va) {
+ status = -ENOMEM;
+ goto err_sq;
+ }
+
+ status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
+ IRDMA_HOST_CTX_ALIGNMENT_M);
+ if (status)
+ goto err_ctx;
+
+ dev->cqp->host_ctx_pa = mem.pa;
+ dev->cqp->host_ctx = mem.va;
+ /* populate the cqp init info */
+ cqp_init_info.dev = dev;
+ cqp_init_info.sq_size = sqsize;
+ cqp_init_info.sq = cqp->sq.va;
+ cqp_init_info.sq_pa = cqp->sq.pa;
+ cqp_init_info.host_ctx_pa = mem.pa;
+ cqp_init_info.host_ctx = mem.va;
+ cqp_init_info.hmc_profile = rf->rsrc_profile;
+ cqp_init_info.scratch_array = cqp->scratch_array;
+ cqp_init_info.protocol_used = rf->protocol_used;
+
+ switch (rf->rdma_ver) {
+ case IRDMA_GEN_1:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
+ break;
+ case IRDMA_GEN_2:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
+ break;
+ case IRDMA_GEN_3:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
+ cqp_init_info.ts_override = 1;
+ break;
+ }
+ status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
+ goto err_ctx;
+ }
+
+ spin_lock_init(&cqp->req_lock);
+ spin_lock_init(&cqp->compl_lock);
+
+ status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+ goto err_ctx;
+ }
+
+ INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+ INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
+
+ /* init the waitqueue of the cqp_requests and add them to the list */
+ for (i = 0; i < sqsize; i++) {
+ init_waitqueue_head(&cqp->cqp_requests[i].waitq);
+ list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
+ }
+ init_waitqueue_head(&cqp->remove_wq);
+ return 0;
+
+err_ctx:
+ dma_free_coherent(dev->hw->device, cqp->sq.size,
+ cqp->sq.va, cqp->sq.pa);
+ cqp->sq.va = NULL;
+err_sq:
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
+err_oop:
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+err_scratch:
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
+
+ return status;
+}
+
+/**
+ * irdma_create_ccq - create control cq
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the ccq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_ccq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_ccq_init_info info = {};
+ struct irdma_ccq *ccq = &rf->ccq;
+ int ccq_size;
+ int status;
+
+ dev->ccq = &ccq->sc_cq;
+ dev->ccq->dev = dev;
+ info.dev = dev;
+ ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
+ ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
+ ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
+ IRDMA_CQ0_ALIGNMENT);
+ ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
+ &ccq->mem_cq.pa, GFP_KERNEL);
+ if (!ccq->mem_cq.va)
+ return -ENOMEM;
+
+ status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
+ ccq->shadow_area.size,
+ IRDMA_SHADOWAREA_M);
+ if (status)
+ goto exit;
+
+ ccq->sc_cq.back_cq = ccq;
+ /* populate the ccq init info */
+ info.cq_base = ccq->mem_cq.va;
+ info.cq_pa = ccq->mem_cq.pa;
+ info.num_elem = ccq_size;
+ info.shadow_area = ccq->shadow_area.va;
+ info.shadow_area_pa = ccq->shadow_area.pa;
+ info.ceqe_mask = false;
+ info.ceq_id_valid = true;
+ info.shadow_read_threshold = 16;
+ info.vsi = &rf->default_vsi;
+ status = irdma_sc_ccq_init(dev->ccq, &info);
+ if (!status)
+ status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
+exit:
+ if (status) {
+ dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
+ ccq->mem_cq.va, ccq->mem_cq.pa);
+ ccq->mem_cq.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_alloc_set_mac - set up a mac address table entry
+ * @iwdev: irdma device
+ *
+ * Allocate a mac ip entry and add it to the hw table Return 0
+ * if successful, otherwise return error
+ */
+static int irdma_alloc_set_mac(struct irdma_device *iwdev)
+{
+ int status;
+
+ status = irdma_alloc_local_mac_entry(iwdev->rf,
+ &iwdev->mac_ip_table_idx);
+ if (!status) {
+ status = irdma_add_local_mac_entry(iwdev->rf,
+ (const u8 *)iwdev->netdev->dev_addr,
+ (u8)iwdev->mac_ip_table_idx);
+ if (status)
+ irdma_del_local_mac_entry(iwdev->rf,
+ (u8)iwdev->mac_ip_table_idx);
+ }
+ return status;
+}
+
+/**
+ * irdma_cfg_ceq_vector - set up the msix interrupt vector for
+ * ceq
+ * @rf: RDMA PCI function
+ * @iwceq: ceq associated with the vector
+ * @ceq_id: the id number of the iwceq
+ * @msix_vec: interrupt vector information
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ u32 ceq_id, struct irdma_msix_vector *msix_vec)
+{
+ int status;
+
+ if (rf->msix_shared && !ceq_id) {
+ snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
+ "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
+ tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
+ status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
+ msix_vec->name, rf);
+ } else {
+ snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
+ "irdma-%s-CEQ-%d",
+ dev_name(&rf->pcidev->dev), ceq_id);
+ tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
+
+ status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
+ msix_vec->name, iwceq);
+ }
+ cpumask_clear(&msix_vec->mask);
+ cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
+ irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
+ return status;
+ }
+
+ msix_vec->ceq_id = ceq_id;
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id,
+ msix_vec->idx, true);
+ else
+ status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id,
+ msix_vec->idx);
+ return status;
+}
+
+/**
+ * irdma_cfg_aeq_vector - set up the msix vector for aeq
+ * @rf: RDMA PCI function
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
+{
+ struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
+ int ret = 0;
+
+ if (!rf->msix_shared) {
+ snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
+ "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
+ tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
+ ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
+ msix_vec->name, rf);
+ }
+ if (ret) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
+ return ret;
+ }
+
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx,
+ true);
+ else
+ ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx);
+
+ return ret;
+}
+
+/**
+ * irdma_create_ceq - create completion event queue
+ * @rf: RDMA PCI function
+ * @iwceq: pointer to the ceq resources to be created
+ * @ceq_id: the id number of the iwceq
+ * @vsi_idx: vsi idx
+ *
+ * Return 0, if the ceq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ u32 ceq_id, u16 vsi_idx)
+{
+ int status;
+ struct irdma_ceq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u32 ceq_size;
+
+ info.ceq_id = ceq_id;
+ iwceq->rf = rf;
+ ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
+ dev->hw_attrs.max_hw_ceq_size);
+ iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
+ IRDMA_CEQ_ALIGNMENT);
+ iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
+ &iwceq->mem.pa, GFP_KERNEL);
+ if (!iwceq->mem.va)
+ return -ENOMEM;
+
+ info.ceq_id = ceq_id;
+ info.ceqe_base = iwceq->mem.va;
+ info.ceqe_pa = iwceq->mem.pa;
+ info.elem_cnt = ceq_size;
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ info.vsi_idx = vsi_idx;
+ status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
+ if (!status) {
+ if (dev->ceq_valid)
+ status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_CREATE);
+ else
+ status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
+ }
+
+ if (status) {
+ dma_free_coherent(dev->hw->device, iwceq->mem.size,
+ iwceq->mem.va, iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
+ * @rf: RDMA PCI function
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq 0 and configure it's msix interrupt vector
+ * Return 0, if successfully set up, otherwise return error
+ */
+static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq;
+ struct irdma_msix_vector *msix_vec;
+ u32 i;
+ int status = 0;
+ u32 num_ceqs;
+
+ num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
+ rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
+ if (!rf->ceqlist) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ iwceq = &rf->ceqlist[0];
+ status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
+ status);
+ goto exit;
+ }
+
+ spin_lock_init(&iwceq->ce_lock);
+ i = rf->msix_shared ? 0 : 1;
+ msix_vec = &rf->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
+ if (status) {
+ irdma_destroy_ceq(rf, iwceq);
+ goto exit;
+ }
+
+ irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
+ rf->ceqs_count++;
+
+exit:
+ if (status && !rf->ceqs_count) {
+ kfree(rf->ceqlist);
+ rf->ceqlist = NULL;
+ return status;
+ }
+ rf->sc_dev.ceq_valid = true;
+
+ return 0;
+}
+
+/**
+ * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
+ * @rf: RDMA PCI function
+ * @vsi_idx: vsi_idx for this CEQ
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq's and configure their msix interrupt vectors
+ * Return 0, if ceqs are successfully set up, otherwise return error
+ */
+static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx)
+{
+ u32 i;
+ u32 ceq_id;
+ struct irdma_ceq *iwceq;
+ struct irdma_msix_vector *msix_vec;
+ int status;
+ u32 num_ceqs;
+
+ num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
+ i = (rf->msix_shared) ? 1 : 2;
+ for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
+ iwceq = &rf->ceqlist[ceq_id];
+ status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "ERR: create ceq status = %d\n", status);
+ goto del_ceqs;
+ }
+ spin_lock_init(&iwceq->ce_lock);
+ msix_vec = &rf->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
+ if (status) {
+ irdma_destroy_ceq(rf, iwceq);
+ goto del_ceqs;
+ }
+ irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
+ rf->ceqs_count++;
+ }
+
+ return 0;
+
+del_ceqs:
+ irdma_del_ceqs(rf);
+
+ return status;
+}
+
+static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
+{
+ struct irdma_aeq *aeq = &rf->aeq;
+ dma_addr_t *pg_arr;
+ u32 pg_cnt;
+ int status;
+
+ if (rf->rdma_ver < IRDMA_GEN_2)
+ return -EOPNOTSUPP;
+
+ aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
+ aeq->mem.va = vzalloc(aeq->mem.size);
+
+ if (!aeq->mem.va)
+ return -ENOMEM;
+
+ pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
+ status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
+ if (status) {
+ vfree(aeq->mem.va);
+ return status;
+ }
+
+ pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
+ status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
+ if (status) {
+ irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
+ vfree(aeq->mem.va);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_create_aeq - create async event queue
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the aeq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_aeq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
+ u32 aeq_size;
+ u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
+ int status;
+
+ aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
+ /* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
+ if (rf->rdma_ver == IRDMA_GEN_3)
+ aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
+ sizeof(struct irdma_sc_aeqe)));
+ aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
+ IRDMA_AEQ_ALIGNMENT);
+ aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
+ &aeq->mem.pa,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (aeq->mem.va)
+ goto skip_virt_aeq;
+ else if (rf->rdma_ver == IRDMA_GEN_3)
+ return -ENOMEM;
+
+ /* physically mapped aeq failed. setup virtual aeq */
+ status = irdma_create_virt_aeq(rf, aeq_size);
+ if (status)
+ return status;
+
+ info.virtual_map = true;
+ aeq->virtual_map = info.virtual_map;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = aeq->palloc.level1.idx;
+
+skip_virt_aeq:
+ info.aeqe_base = aeq->mem.va;
+ info.aeq_elem_pa = aeq->mem.pa;
+ info.elem_cnt = aeq_size;
+ info.dev = dev;
+ info.msix_idx = rf->iw_msixtbl->idx;
+ status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
+ if (status)
+ goto err;
+
+ status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
+ if (status)
+ goto err;
+
+ return 0;
+
+err:
+ if (aeq->virtual_map) {
+ irdma_destroy_virt_aeq(rf);
+ } else {
+ dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
+ aeq->mem.pa);
+ aeq->mem.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_setup_aeq - set up the device aeq
+ * @rf: RDMA PCI function
+ *
+ * Create the aeq and configure its msix interrupt vector
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_setup_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
+
+ status = irdma_create_aeq(rf);
+ if (status)
+ return status;
+
+ status = irdma_cfg_aeq_vector(rf);
+ if (status) {
+ irdma_destroy_aeq(rf);
+ return status;
+ }
+
+ if (!rf->msix_shared)
+ irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
+
+ return 0;
+}
+
+/**
+ * irdma_initialize_ilq - create iwarp local queue for cm
+ * @iwdev: irdma device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_initialize_ilq(struct irdma_device *iwdev)
+{
+ struct irdma_puda_rsrc_info info = {};
+ int status;
+
+ info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
+ info.cq_id = 1;
+ info.qp_id = 1;
+ info.count = 1;
+ info.pd_id = 1;
+ info.abi_ver = IRDMA_ABI_VER;
+ info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
+ info.rq_size = info.sq_size;
+ info.buf_size = 1024;
+ info.tx_buf_cnt = 2 * info.sq_size;
+ info.receive = irdma_receive_ilq;
+ info.xmit_complete = irdma_free_sqbuf;
+ status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
+ if (status)
+ ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
+
+ return status;
+}
+
+/**
+ * irdma_initialize_ieq - create iwarp exception queue
+ * @iwdev: irdma device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_initialize_ieq(struct irdma_device *iwdev)
+{
+ struct irdma_puda_rsrc_info info = {};
+ int status;
+
+ info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
+ info.cq_id = 2;
+ info.qp_id = iwdev->vsi.exception_lan_q;
+ info.count = 1;
+ info.pd_id = 2;
+ info.abi_ver = IRDMA_ABI_VER;
+ info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
+ info.rq_size = info.sq_size;
+ info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
+ info.tx_buf_cnt = 4096;
+ status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
+ if (status)
+ ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
+
+ return status;
+}
+
+/**
+ * irdma_reinitialize_ieq - destroy and re-create ieq
+ * @vsi: VSI structure
+ */
+void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
+ if (irdma_initialize_ieq(iwdev)) {
+ iwdev->rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+}
+
+/**
+ * irdma_hmc_setup - create hmc objects for the device
+ * @rf: RDMA PCI function
+ *
+ * Set up the device private memory space for the number and size of
+ * the hmc objects and create the objects
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_hmc_setup(struct irdma_pci_f *rf)
+{
+ int status;
+ u32 qpcnt;
+
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
+
+ rf->sd_type = IRDMA_SD_TYPE_DIRECT;
+ status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
+ if (status)
+ return status;
+
+ status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
+
+ return status;
+}
+
+/**
+ * irdma_del_init_mem - deallocate memory resources
+ * @rf: RDMA PCI function
+ */
+static void irdma_del_init_mem(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+
+ if (!rf->sc_dev.privileged)
+ irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
+ kfree(dev->hmc_info->sd_table.sd_entry);
+ dev->hmc_info->sd_table.sd_entry = NULL;
+ vfree(rf->mem_rsrc);
+ rf->mem_rsrc = NULL;
+ dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
+ rf->obj_mem.pa);
+ rf->obj_mem.va = NULL;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ bitmap_free(rf->allocated_ws_nodes);
+ rf->allocated_ws_nodes = NULL;
+ }
+ kfree(rf->ceqlist);
+ rf->ceqlist = NULL;
+ kfree(rf->iw_msixtbl);
+ rf->iw_msixtbl = NULL;
+ kfree(rf->hmc_info_mem);
+ rf->hmc_info_mem = NULL;
+}
+
+/**
+ * irdma_initialize_dev - initialize device
+ * @rf: RDMA PCI function
+ *
+ * Allocate memory for the hmc objects and initialize iwdev
+ * Return 0 if successful, otherwise clean up the resources
+ * and return error
+ */
+static int irdma_initialize_dev(struct irdma_pci_f *rf)
+{
+ int status;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_device_init_info info = {};
+ struct irdma_dma_mem mem;
+ u32 size;
+
+ size = sizeof(struct irdma_hmc_pble_rsrc) +
+ sizeof(struct irdma_hmc_info) +
+ (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
+
+ rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
+ if (!rf->hmc_info_mem)
+ return -ENOMEM;
+
+ rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
+ dev->hmc_info = &rf->hw.hmc;
+ dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
+ (rf->pble_rsrc + 1);
+
+ status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
+ IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
+ if (status)
+ goto error;
+
+ info.fpm_query_buf_pa = mem.pa;
+ info.fpm_query_buf = mem.va;
+
+ status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
+ IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
+ if (status)
+ goto error;
+
+ info.fpm_commit_buf_pa = mem.pa;
+ info.fpm_commit_buf = mem.va;
+
+ info.bar0 = rf->hw.hw_addr;
+ info.hmc_fn_id = rf->pf_id;
+ info.protocol_used = rf->protocol_used;
+ info.hw = &rf->hw;
+ status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
+ if (status)
+ goto error;
+
+ return status;
+error:
+ kfree(rf->hmc_info_mem);
+ rf->hmc_info_mem = NULL;
+
+ return status;
+}
+
+/**
+ * irdma_rt_deinit_hw - clean up the irdma device resources
+ * @iwdev: irdma device
+ *
+ * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
+ * device queues and free the pble and the hmc objects
+ */
+void irdma_rt_deinit_hw(struct irdma_device *iwdev)
+{
+ ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
+
+ switch (iwdev->init_state) {
+ case IP_ADDR_REGISTERED:
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_del_local_mac_entry(iwdev->rf,
+ (u8)iwdev->mac_ip_table_idx);
+ fallthrough;
+ case IEQ_CREATED:
+ if (!iwdev->roce_mode)
+ irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
+ iwdev->rf->reset);
+ fallthrough;
+ case ILQ_CREATED:
+ if (!iwdev->roce_mode)
+ irdma_puda_dele_rsrc(&iwdev->vsi,
+ IRDMA_PUDA_RSRC_TYPE_ILQ,
+ iwdev->rf->reset);
+ break;
+ default:
+ ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
+ break;
+ }
+
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ if (iwdev->vsi.pestat) {
+ irdma_vsi_stats_free(&iwdev->vsi);
+ kfree(iwdev->vsi.pestat);
+ }
+ if (iwdev->cleanup_wq)
+ destroy_workqueue(iwdev->cleanup_wq);
+}
+
+static int irdma_setup_init_state(struct irdma_pci_f *rf)
+{
+ int status;
+
+ status = irdma_save_msix_info(rf);
+ if (status)
+ return status;
+
+ rf->hw.device = &rf->pcidev->dev;
+ rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
+ rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
+ &rf->obj_mem.pa, GFP_KERNEL);
+ if (!rf->obj_mem.va) {
+ status = -ENOMEM;
+ goto clean_msixtbl;
+ }
+
+ rf->obj_next = rf->obj_mem;
+ status = irdma_initialize_dev(rf);
+ if (status)
+ goto clean_obj_mem;
+
+ return 0;
+
+clean_obj_mem:
+ dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
+ rf->obj_mem.pa);
+ rf->obj_mem.va = NULL;
+clean_msixtbl:
+ kfree(rf->iw_msixtbl);
+ rf->iw_msixtbl = NULL;
+ return status;
+}
+
+/**
+ * irdma_get_used_rsrc - determine resources used internally
+ * @iwdev: irdma device
+ *
+ * Called at the end of open to get all internal allocations
+ */
+static void irdma_get_used_rsrc(struct irdma_device *iwdev)
+{
+ iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
+ iwdev->rf->max_pd);
+ iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
+ iwdev->rf->max_qp);
+ iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
+ iwdev->rf->max_cq);
+ iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs,
+ iwdev->rf->max_srq);
+ iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr);
+}
+
+void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
+{
+ enum init_completion_state state = rf->init_state;
+
+ rf->init_state = INVALID_STATE;
+
+ switch (state) {
+ case AEQ_CREATED:
+ irdma_destroy_aeq(rf);
+ fallthrough;
+ case PBLE_CHUNK_MEM:
+ irdma_destroy_pble_prm(rf->pble_rsrc);
+ fallthrough;
+ case CEQS_CREATED:
+ irdma_del_ceqs(rf);
+ fallthrough;
+ case CEQ0_CREATED:
+ irdma_del_ceq_0(rf);
+ fallthrough;
+ case CCQ_CREATED:
+ irdma_destroy_ccq(rf);
+ fallthrough;
+ case HW_RSRC_INITIALIZED:
+ case HMC_OBJS_CREATED:
+ irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
+ rf->reset, rf->rdma_ver);
+ fallthrough;
+ case CQP_CREATED:
+ irdma_destroy_cqp(rf);
+ fallthrough;
+ case INITIAL_STATE:
+ irdma_del_init_mem(rf);
+ break;
+ case INVALID_STATE:
+ default:
+ ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
+ break;
+ }
+}
+
+/**
+ * irdma_rt_init_hw - Initializes runtime portion of HW
+ * @iwdev: irdma device
+ * @l2params: qos, tc, mtu info from netdev driver
+ *
+ * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
+ * device resource objects.
+ */
+int irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_vsi_init_info vsi_info = {};
+ struct irdma_vsi_stats_info stats_info = {};
+ int status;
+
+ vsi_info.dev = dev;
+ vsi_info.back_vsi = iwdev;
+ vsi_info.params = l2params;
+ vsi_info.pf_data_vsi_num = iwdev->vsi_num;
+ vsi_info.register_qset = rf->gen_ops.register_qset;
+ vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
+ vsi_info.exception_lan_q = 2;
+ irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
+
+ status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
+ if (status)
+ return status;
+
+ stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+ if (!stats_info.pestat) {
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ return -ENOMEM;
+ }
+ stats_info.fcn_id = dev->hmc_fn_id;
+ status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
+ if (status) {
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ kfree(stats_info.pestat);
+ return status;
+ }
+
+ do {
+ if (!iwdev->roce_mode) {
+ status = irdma_initialize_ilq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = ILQ_CREATED;
+ status = irdma_initialize_ieq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = IEQ_CREATED;
+ }
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_alloc_set_mac(iwdev);
+ irdma_add_ip(iwdev);
+ iwdev->init_state = IP_ADDR_REGISTERED;
+
+ /* handles asynch cleanup tasks - disconnect CM , free qp,
+ * free cq bufs
+ */
+ iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!iwdev->cleanup_wq)
+ return -ENOMEM;
+ irdma_get_used_rsrc(iwdev);
+ init_waitqueue_head(&iwdev->suspend_wq);
+
+ return 0;
+ } while (0);
+
+ dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
+ status, iwdev->init_state);
+ irdma_rt_deinit_hw(iwdev);
+
+ return status;
+}
+
+/**
+ * irdma_ctrl_init_hw - Initializes control portion of HW
+ * @rf: RDMA PCI function
+ *
+ * Create admin queues, HMC obejcts and RF resource objects
+ */
+int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
+ do {
+ status = irdma_setup_init_state(rf);
+ if (status)
+ break;
+ rf->init_state = INITIAL_STATE;
+
+ status = irdma_create_cqp(rf);
+ if (status)
+ break;
+ rf->init_state = CQP_CREATED;
+
+ dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ status = irdma_get_rdma_features(dev);
+ if (status)
+ break;
+ }
+
+ status = irdma_hmc_setup(rf);
+ if (status)
+ break;
+ rf->init_state = HMC_OBJS_CREATED;
+
+ status = irdma_initialize_hw_rsrc(rf);
+ if (status)
+ break;
+ rf->init_state = HW_RSRC_INITIALIZED;
+
+ status = irdma_create_ccq(rf);
+ if (status)
+ break;
+ rf->init_state = CCQ_CREATED;
+
+ status = irdma_setup_ceq_0(rf);
+ if (status)
+ break;
+ rf->init_state = CEQ0_CREATED;
+ /* Handles processing of CQP completions */
+ rf->cqp_cmpl_wq =
+ alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI);
+ if (!rf->cqp_cmpl_wq) {
+ status = -ENOMEM;
+ break;
+ }
+ INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
+ irdma_sc_ccq_arm(dev->ccq);
+
+ status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0);
+ if (status)
+ break;
+
+ rf->init_state = CEQS_CREATED;
+
+ status = irdma_hmc_init_pble(&rf->sc_dev,
+ rf->pble_rsrc);
+ if (status)
+ break;
+
+ rf->init_state = PBLE_CHUNK_MEM;
+
+ status = irdma_setup_aeq(rf);
+ if (status)
+ break;
+ rf->init_state = AEQ_CREATED;
+
+ return 0;
+ } while (0);
+
+ dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
+ rf->init_state, status);
+ irdma_ctrl_deinit_hw(rf);
+ return status;
+}
+
+/**
+ * irdma_set_hw_rsrc - set hw memory resources.
+ * @rf: RDMA PCI function
+ */
+static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+{
+ rf->allocated_qps = (void *)(rf->mem_rsrc +
+ (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
+ rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
+ rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)];
+ rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
+ rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
+ rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
+ rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
+ rf->qp_table = (struct irdma_qp **)
+ (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
+ rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
+
+ spin_lock_init(&rf->rsrc_lock);
+ spin_lock_init(&rf->arp_lock);
+ spin_lock_init(&rf->qptable_lock);
+ spin_lock_init(&rf->cqtable_lock);
+ spin_lock_init(&rf->qh_list_lock);
+}
+
+/**
+ * irdma_calc_mem_rsrc_size - calculate memory resources size.
+ * @rf: RDMA PCI function
+ */
+static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
+{
+ u32 rsrc_size;
+
+ rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
+ rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
+ rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
+ rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
+
+ return rsrc_size;
+}
+
+/**
+ * irdma_initialize_hw_rsrc - initialize hw resource tracking array
+ * @rf: RDMA PCI function
+ */
+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
+{
+ u32 rsrc_size;
+ u32 mrdrvbits;
+ u32 ret;
+
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
+ GFP_KERNEL);
+ if (!rf->allocated_ws_nodes)
+ return -ENOMEM;
+
+ set_bit(0, rf->allocated_ws_nodes);
+ rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
+ }
+ rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
+ rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
+ rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
+ rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt;
+ rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
+ rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
+ rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
+ rf->max_mcg = rf->max_qp;
+
+ rsrc_size = irdma_calc_mem_rsrc_size(rf);
+ rf->mem_rsrc = vzalloc(rsrc_size);
+ if (!rf->mem_rsrc) {
+ ret = -ENOMEM;
+ goto mem_rsrc_vzalloc_fail;
+ }
+
+ rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
+
+ irdma_set_hw_rsrc(rf);
+
+ set_bit(0, rf->allocated_mrs);
+ set_bit(0, rf->allocated_qps);
+ set_bit(0, rf->allocated_cqs);
+ set_bit(0, rf->allocated_srqs);
+ set_bit(0, rf->allocated_pds);
+ set_bit(0, rf->allocated_arps);
+ set_bit(0, rf->allocated_ahs);
+ set_bit(0, rf->allocated_mcgs);
+ set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
+ set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
+ set_bit(1, rf->allocated_cqs);
+ set_bit(1, rf->allocated_pds);
+ set_bit(2, rf->allocated_cqs);
+ set_bit(2, rf->allocated_pds);
+
+ INIT_LIST_HEAD(&rf->mc_qht_list.list);
+ /* stag index mask has a minimum of 14 bits */
+ mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
+ rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
+
+ return 0;
+
+mem_rsrc_vzalloc_fail:
+ bitmap_free(rf->allocated_ws_nodes);
+ rf->allocated_ws_nodes = NULL;
+
+ return ret;
+}
+
+/**
+ * irdma_cqp_ce_handler - handle cqp completions
+ * @rf: RDMA PCI function
+ * @cq: cq for cqp completions
+ */
+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u32 cqe_count = 0;
+ struct irdma_ccq_cqe_info info;
+ unsigned long flags;
+ int ret;
+
+ do {
+ memset(&info, 0, sizeof(info));
+ spin_lock_irqsave(&rf->cqp.compl_lock, flags);
+ ret = irdma_sc_ccq_get_cqe_info(cq, &info);
+ spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
+ if (ret)
+ break;
+
+ cqp_request = (struct irdma_cqp_request *)
+ (unsigned long)info.scratch;
+ if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
+ info.maj_err_code,
+ info.min_err_code))
+ ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
+ info.op_code, info.maj_err_code, info.min_err_code);
+ if (cqp_request) {
+ cqp_request->compl_info.maj_err_code = info.maj_err_code;
+ cqp_request->compl_info.min_err_code = info.min_err_code;
+ cqp_request->compl_info.op_ret_val = info.op_ret_val;
+ cqp_request->compl_info.error = info.error;
+
+ /*
+ * If this is deferred or pending completion, then mark
+ * CQP request as pending to not block the CQ, but don't
+ * release CQP request, as it is still on the OOO list.
+ */
+ if (info.pending)
+ cqp_request->pending = true;
+ else
+ irdma_complete_cqp_request(&rf->cqp,
+ cqp_request);
+ }
+
+ cqe_count++;
+ } while (1);
+
+ if (cqe_count) {
+ irdma_process_bh(dev);
+ irdma_sc_ccq_arm(cq);
+ }
+}
+
+/**
+ * cqp_compl_worker - Handle cqp completions
+ * @work: Pointer to work structure
+ */
+void cqp_compl_worker(struct work_struct *work)
+{
+ struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
+ cqp_cmpl_work);
+ struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
+
+ irdma_cqp_ce_handler(rf, cq);
+}
+
+/**
+ * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
+ * @cm_core: cm's core
+ * @port: port to identify apbvt entry
+ */
+static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
+ u16 port)
+{
+ struct irdma_apbvt_entry *entry;
+
+ hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
+ if (entry->port == port) {
+ entry->use_cnt++;
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_next_iw_state - modify qp state
+ * @iwqp: iwarp qp to modify
+ * @state: next state for qp
+ * @del_hash: del hash
+ * @term: term message
+ * @termlen: length of term message
+ */
+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
+ u8 termlen)
+{
+ struct irdma_modify_qp_info info = {};
+
+ info.next_iwarp_state = state;
+ info.remove_hash_idx = del_hash;
+ info.cq_num_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.dont_send_term = true;
+ info.dont_send_fin = true;
+ info.termlen = termlen;
+
+ if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
+ info.dont_send_term = false;
+ if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
+ info.dont_send_fin = false;
+ if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
+ info.reset_tcp_conn = true;
+ iwqp->hw_iwarp_state = state;
+ irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
+ iwqp->iwarp_state = info.next_iwarp_state;
+}
+
+/**
+ * irdma_del_local_mac_entry - remove a mac entry from the hw
+ * table
+ * @rf: RDMA PCI function
+ * @idx: the index of the mac ip address to delete
+ */
+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
+{
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
+ cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+}
+
+/**
+ * irdma_add_local_mac_entry - add a mac ip address entry to the
+ * hw table
+ * @rf: RDMA PCI function
+ * @mac_addr: pointer to mac address
+ * @idx: the index of the mac ip address to add
+ */
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
+{
+ struct irdma_local_mac_entry_info *info;
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ info = &cqp_info->in.u.add_local_mac_entry.info;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ info->entry_idx = idx;
+ cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
+ cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_alloc_local_mac_entry - allocate a mac entry
+ * @rf: RDMA PCI function
+ * @mac_tbl_idx: the index of the new mac address
+ *
+ * Allocate a mac address entry and update the mac_tbl_idx
+ * to hold the index of the newly created mac address
+ * Return 0 if successful, otherwise return error
+ */
+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
+{
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status = 0;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (!status)
+ *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
+
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
+ * @iwdev: irdma device
+ * @accel_local_port: port for apbvt
+ * @add_port: add ordelete port
+ */
+static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
+ u16 accel_local_port, bool add_port)
+{
+ struct irdma_apbvt_info *info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_apbvt_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->add = add_port;
+ info->port = accel_local_port;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
+ ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
+ (!add_port) ? "DELETE" : "ADD", accel_local_port);
+
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_add_apbvt - add tcp port to HW apbvt table
+ * @iwdev: irdma device
+ * @port: port for apbvt
+ */
+struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct irdma_apbvt_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ entry = irdma_lookup_apbvt_entry(cm_core, port);
+ if (entry) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return entry;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return NULL;
+ }
+
+ entry->port = port;
+ entry->use_cnt = 1;
+ hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+
+ if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
+ kfree(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+/**
+ * irdma_del_apbvt - delete tcp port from HW apbvt table
+ * @iwdev: irdma device
+ * @entry: apbvt entry object
+ */
+void irdma_del_apbvt(struct irdma_device *iwdev,
+ struct irdma_apbvt_entry *entry)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ if (--entry->use_cnt) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return;
+ }
+
+ hash_del(&entry->hlist);
+ /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
+ * protect against race where add APBVT CQP can race ahead of the delete
+ * APBVT for same port.
+ */
+ irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
+ kfree(entry);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+}
+
+/**
+ * irdma_manage_arp_cache - manage hw arp cache
+ * @rf: RDMA PCI function
+ * @mac_addr: mac address ptr
+ * @ip_addr: ip addr for arp cache
+ * @ipv4: flag inicating IPv4
+ * @action: add, delete or modify
+ */
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
+ u32 *ip_addr, bool ipv4, u32 action)
+{
+ struct irdma_add_arp_cache_entry_info *info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int arp_index;
+
+ arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
+ if (arp_index == -1)
+ return;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ if (action == IRDMA_ARP_ADD) {
+ cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
+ info = &cqp_info->in.u.add_arp_cache_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->arp_index = (u16)arp_index;
+ info->permanent = true;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ cqp_info->in.u.add_arp_cache_entry.scratch =
+ (uintptr_t)cqp_request;
+ cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
+ } else {
+ cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
+ cqp_info->in.u.del_arp_cache_entry.scratch =
+ (uintptr_t)cqp_request;
+ cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
+ cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
+ }
+
+ cqp_info->post_sq = 1;
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_send_syn_cqp_callback - do syn/ack after qhash
+ * @cqp_request: qhash cqp completion
+ */
+static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cm_node *cm_node = cqp_request->param;
+
+ irdma_send_syn(cm_node, 1);
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * irdma_manage_qhash - add or modify qhash
+ * @iwdev: irdma device
+ * @cminfo: cm info for qhash
+ * @etype: type (syn or quad)
+ * @mtype: type of qhash
+ * @cmnode: cmnode associated with connection
+ * @wait: wait for completion
+ */
+int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait)
+{
+ struct irdma_qhash_table_info *info;
+ struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cm_node *cm_node = cmnode;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_qhash_table_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->vsi = &iwdev->vsi;
+ info->manage = mtype;
+ info->entry_type = etype;
+ if (cminfo->vlan_id < VLAN_N_VID) {
+ info->vlan_valid = true;
+ info->vlan_id = cminfo->vlan_id;
+ } else {
+ info->vlan_valid = false;
+ }
+ info->ipv4_valid = cminfo->ipv4;
+ info->user_pri = cminfo->user_pri;
+ ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+ info->qp_num = cminfo->qh_qpid;
+ info->dest_port = cminfo->loc_port;
+ info->dest_ip[0] = cminfo->loc_addr[0];
+ info->dest_ip[1] = cminfo->loc_addr[1];
+ info->dest_ip[2] = cminfo->loc_addr[2];
+ info->dest_ip[3] = cminfo->loc_addr[3];
+ if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
+ etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
+ etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
+ etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
+ etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
+ info->src_port = cminfo->rem_port;
+ info->src_ip[0] = cminfo->rem_addr[0];
+ info->src_ip[1] = cminfo->rem_addr[1];
+ info->src_ip[2] = cminfo->rem_addr[2];
+ info->src_ip[3] = cminfo->rem_addr[3];
+ }
+ if (cmnode) {
+ cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
+ cqp_request->param = cmnode;
+ if (!wait)
+ refcount_inc(&cm_node->refcnt);
+ }
+ if (info->ipv4_valid)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
+ (!mtype) ? "DELETE" : "ADD",
+ __builtin_return_address(0), info->dest_port,
+ info->src_port, info->dest_ip, info->src_ip,
+ info->mac_addr, cminfo->vlan_id,
+ cmnode ? cmnode : NULL);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
+ (!mtype) ? "DELETE" : "ADD",
+ __builtin_return_address(0), info->dest_port,
+ info->src_port, info->dest_ip, info->src_ip,
+ info->mac_addr, cminfo->vlan_id,
+ cmnode ? cmnode : NULL);
+
+ cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
+ cqp_info->post_sq = 1;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ if (status && cm_node && !wait)
+ irdma_rem_ref_cm_node(cm_node);
+
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_hw_flush_wqes_callback - Check return code after flush
+ * @cqp_request: qhash cqp completion
+ */
+static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_qp_flush_info *hw_info;
+ struct irdma_sc_qp *qp;
+ struct irdma_qp *iwqp;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_info = &cqp_request->info;
+ hw_info = &cqp_info->in.u.qp_flush_wqes.info;
+ qp = cqp_info->in.u.qp_flush_wqes.qp;
+ iwqp = qp->qp_uk.back_qp;
+
+ if (cqp_request->compl_info.maj_err_code)
+ return;
+
+ if (hw_info->rq &&
+ (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0)) {
+ /* RQ WQE flush was requested but did not happen */
+ qp->qp_uk.rq_flush_complete = true;
+ }
+ if (hw_info->sq &&
+ (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0)) {
+ if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
+ ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
+ qp->qp_uk.qp_id);
+ irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
+ }
+ qp->qp_uk.sq_flush_complete = true;
+ }
+}
+
+/**
+ * irdma_hw_flush_wqes - flush qp's wqe
+ * @rf: RDMA PCI function
+ * @qp: hardware control qp
+ * @info: info for flush
+ * @wait: flag wait for completion
+ */
+int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, bool wait)
+{
+ int status;
+ struct irdma_qp_flush_info *hw_info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_qp *iwqp = qp->qp_uk.back_qp;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ if (!wait)
+ cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
+ hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (status) {
+ qp->qp_uk.sq_flush_complete = true;
+ qp->qp_uk.rq_flush_complete = true;
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ return status;
+ }
+
+ if (!wait || cqp_request->compl_info.maj_err_code)
+ goto put_cqp;
+
+ if (info->rq) {
+ if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0) {
+ /* RQ WQE flush was requested but did not happen */
+ qp->qp_uk.rq_flush_complete = true;
+ }
+ }
+ if (info->sq) {
+ if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0) {
+ /*
+ * Handling case where WQE is posted to empty SQ when
+ * flush has not completed
+ */
+ if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
+ struct irdma_cqp_request *new_req;
+
+ if (!qp->qp_uk.sq_flush_complete)
+ goto put_cqp;
+ qp->qp_uk.sq_flush_complete = false;
+ qp->flush_sq = false;
+
+ info->rq = false;
+ info->sq = true;
+ new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!new_req) {
+ status = -ENOMEM;
+ goto put_cqp;
+ }
+ cqp_info = &new_req->info;
+ hw_info = &new_req->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
+
+ status = irdma_handle_cqp_op(rf, new_req);
+ if (new_req->compl_info.maj_err_code ||
+ new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ status) {
+ ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
+ iwqp->ibqp.qp_num);
+ qp->qp_uk.sq_flush_complete = false;
+ irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
+ }
+ irdma_put_cqp_request(&rf->cqp, new_req);
+ } else {
+ /* SQ WQE flush was requested but did not happen */
+ qp->qp_uk.sq_flush_complete = true;
+ }
+ } else {
+ if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
+ qp->qp_uk.sq_flush_complete = true;
+ }
+ }
+
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
+ iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
+ iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+put_cqp:
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_gen_ae - generate AE
+ * @rf: RDMA PCI function
+ * @qp: qp associated with AE
+ * @info: info for ae
+ * @wait: wait for completion
+ */
+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, bool wait)
+{
+ struct irdma_gen_ae_info *ae_info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ ae_info = &cqp_request->info.in.u.gen_ae.info;
+ memcpy(ae_info, info, sizeof(*ae_info));
+ cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.gen_ae.qp = qp;
+ cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
+{
+ struct irdma_qp_flush_info info = {};
+ struct irdma_pci_f *rf = iwqp->iwdev->rf;
+ u8 flush_code = iwqp->sc_qp.flush_code;
+
+ if ((!(flush_mask & IRDMA_FLUSH_SQ) &&
+ !(flush_mask & IRDMA_FLUSH_RQ)) ||
+ ((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3))
+ return;
+
+ /* Set flush info fields*/
+ info.sq = flush_mask & IRDMA_FLUSH_SQ;
+ info.rq = flush_mask & IRDMA_FLUSH_RQ;
+
+ /* Generate userflush errors in CQE */
+ info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
+ info.sq_minor_code = FLUSH_GENERAL_ERR;
+ info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
+ info.rq_minor_code = FLUSH_GENERAL_ERR;
+ info.userflushcode = true;
+ info.err_sq_idx_valid = iwqp->sc_qp.err_sq_idx_valid;
+ info.err_sq_idx = iwqp->sc_qp.err_sq_idx;
+ info.err_rq_idx_valid = iwqp->sc_qp.err_rq_idx_valid;
+ info.err_rq_idx = iwqp->sc_qp.err_rq_idx;
+
+ if (flush_mask & IRDMA_REFLUSH) {
+ if (info.sq)
+ iwqp->sc_qp.flush_sq = false;
+ if (info.rq)
+ iwqp->sc_qp.flush_rq = false;
+ } else {
+ if (flush_code) {
+ if (info.sq && iwqp->sc_qp.sq_flush_code)
+ info.sq_minor_code = flush_code;
+ if (info.rq && iwqp->sc_qp.rq_flush_code)
+ info.rq_minor_code = flush_code;
+ }
+ if (!iwqp->user_mode)
+ queue_delayed_work(iwqp->iwdev->cleanup_wq,
+ &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+
+ /* Issue flush */
+ (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
+ flush_mask & IRDMA_FLUSH_WAIT);
+ iwqp->flush_issued = true;
+}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
new file mode 100644
index 000000000000..60c1f2b1811d
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "i40iw_hw.h"
+#include "protos.h"
+
+static u32 i40iw_regs[IRDMA_MAX_REGS] = {
+ I40E_PFPE_CQPTAIL,
+ I40E_PFPE_CQPDB,
+ I40E_PFPE_CCQPSTATUS,
+ I40E_PFPE_CCQPHIGH,
+ I40E_PFPE_CCQPLOW,
+ I40E_PFPE_CQARM,
+ I40E_PFPE_CQACK,
+ I40E_PFPE_AEQALLOC,
+ I40E_PFPE_CQPERRCODES,
+ I40E_PFPE_WQEALLOC,
+ I40E_PFINT_DYN_CTLN(0),
+ I40IW_DB_ADDR_OFFSET,
+
+ I40E_GLPCI_LBARCTRL,
+ I40E_GLPE_CPUSTATUS0,
+ I40E_GLPE_CPUSTATUS1,
+ I40E_GLPE_CPUSTATUS2,
+ I40E_PFINT_AEQCTL,
+ I40E_PFINT_CEQCTL(0),
+ I40E_VSIQF_CTL(0),
+ I40E_PFHMC_PDINV,
+ I40E_GLHMC_VFPDINV(0),
+ I40E_GLPE_CRITERR,
+ 0xffffffff /* PFINT_RATEN not used in FPK */
+};
+
+static u32 i40iw_stat_offsets[] = {
+ I40E_GLPES_PFIP4RXDISCARD(0),
+ I40E_GLPES_PFIP4RXTRUNC(0),
+ I40E_GLPES_PFIP4TXNOROUTE(0),
+ I40E_GLPES_PFIP6RXDISCARD(0),
+ I40E_GLPES_PFIP6RXTRUNC(0),
+ I40E_GLPES_PFIP6TXNOROUTE(0),
+ I40E_GLPES_PFTCPRTXSEG(0),
+ I40E_GLPES_PFTCPRXOPTERR(0),
+ I40E_GLPES_PFTCPRXPROTOERR(0),
+ I40E_GLPES_PFRXVLANERR(0),
+
+ I40E_GLPES_PFIP4RXOCTSLO(0),
+ I40E_GLPES_PFIP4RXPKTSLO(0),
+ I40E_GLPES_PFIP4RXFRAGSLO(0),
+ I40E_GLPES_PFIP4RXMCPKTSLO(0),
+ I40E_GLPES_PFIP4TXOCTSLO(0),
+ I40E_GLPES_PFIP4TXPKTSLO(0),
+ I40E_GLPES_PFIP4TXFRAGSLO(0),
+ I40E_GLPES_PFIP4TXMCPKTSLO(0),
+ I40E_GLPES_PFIP6RXOCTSLO(0),
+ I40E_GLPES_PFIP6RXPKTSLO(0),
+ I40E_GLPES_PFIP6RXFRAGSLO(0),
+ I40E_GLPES_PFIP6RXMCPKTSLO(0),
+ I40E_GLPES_PFIP6TXOCTSLO(0),
+ I40E_GLPES_PFIP6TXPKTSLO(0),
+ I40E_GLPES_PFIP6TXFRAGSLO(0),
+ I40E_GLPES_PFIP6TXMCPKTSLO(0),
+ I40E_GLPES_PFTCPRXSEGSLO(0),
+ I40E_GLPES_PFTCPTXSEGLO(0),
+ I40E_GLPES_PFRDMARXRDSLO(0),
+ I40E_GLPES_PFRDMARXSNDSLO(0),
+ I40E_GLPES_PFRDMARXWRSLO(0),
+ I40E_GLPES_PFRDMATXRDSLO(0),
+ I40E_GLPES_PFRDMATXSNDSLO(0),
+ I40E_GLPES_PFRDMATXWRSLO(0),
+ I40E_GLPES_PFRDMAVBNDLO(0),
+ I40E_GLPES_PFRDMAVINVLO(0),
+ I40E_GLPES_PFIP4RXMCOCTSLO(0),
+ I40E_GLPES_PFIP4TXMCOCTSLO(0),
+ I40E_GLPES_PFIP6RXMCOCTSLO(0),
+ I40E_GLPES_PFIP6TXMCOCTSLO(0),
+ I40E_GLPES_PFUDPRXPKTSLO(0),
+ I40E_GLPES_PFUDPTXPKTSLO(0)
+};
+
+static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
+ I40E_PFPE_CCQPSTATUS_CCQP_DONE,
+ I40E_PFPE_CCQPSTATUS_CCQP_ERR,
+ I40E_CQPSQ_STAG_PDID,
+ I40E_CQPSQ_CQ_CEQID,
+ I40E_CQPSQ_CQ_CQID,
+ I40E_COMMIT_FPM_CQCNT,
+ I40E_CQPSQ_UPESD_HMCFNID,
+};
+
+static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
+ I40E_PFPE_CCQPSTATUS_CCQP_DONE_S,
+ I40E_PFPE_CCQPSTATUS_CCQP_ERR_S,
+ I40E_CQPSQ_STAG_PDID_S,
+ I40E_CQPSQ_CQ_CEQID_S,
+ I40E_CQPSQ_CQ_CQID_S,
+ I40E_COMMIT_FPM_CQCNT_S,
+ I40E_CQPSQ_UPESD_HMCFNID_S,
+};
+
+/**
+ * i40iw_config_ceq- Configure CEQ interrupt
+ * @dev: pointer to the device structure
+ * @ceq_id: Completion Event Queue ID
+ * @idx: vector index
+ * @enable: Enable CEQ interrupt when true
+ */
+static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_INDX, ceq_id) |
+ FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_TYPE, QUEUE_TYPE_CEQ);
+ wr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val);
+
+ reg_val = FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX, 0x3) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTENA, 0x1);
+ wr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val);
+
+ reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(I40E_PFINT_CEQCTL_NEXTQ_INDX, NULL_QUEUE_INDEX) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 0x3);
+
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val);
+}
+
+/**
+ * i40iw_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 0x1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 0x1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0x3);
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val);
+}
+
+/**
+ * i40iw_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0);
+}
+
+static const struct irdma_irq_ops i40iw_irq_ops = {
+ .irdma_cfg_aeq = irdma_cfg_aeq,
+ .irdma_cfg_ceq = i40iw_config_ceq,
+ .irdma_dis_irq = i40iw_disable_irq,
+ .irdma_en_irq = i40iw_ena_irq,
+};
+
+static const struct irdma_hw_stat_map i40iw_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 80, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 88, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 128, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 136, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 144, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 152, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 160, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 168, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 176, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 184, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 192, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 224, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 232, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 240, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 248, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 256, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 264, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 272, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 280, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 288, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 296, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 304, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 312, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 320, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 328, 0, IRDMA_MAX_STATS_48 },
+};
+
+void i40iw_init_hw(struct irdma_sc_dev *dev)
+{
+ int i;
+ u8 __iomem *hw_addr;
+
+ for (i = 0; i < IRDMA_MAX_REGS; ++i) {
+ hw_addr = dev->hw->hw_addr;
+
+ if (i == IRDMA_DB_ADDR_OFFSET)
+ hw_addr = NULL;
+
+ dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr);
+ }
+
+ for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_GEN_1; ++i)
+ dev->hw_stats_regs[i] = i40iw_stat_offsets[i];
+
+ dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;
+ dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
+
+ for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
+ dev->hw_shifts[i] = i40iw_shifts[i];
+
+ for (i = 0; i < IRDMA_MAX_MASKS; ++i)
+ dev->hw_masks[i] = i40iw_masks[i];
+
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+ dev->ceq_itr_mask_db = NULL;
+ dev->aeq_itr_mask_db = NULL;
+ dev->irq_ops = &i40iw_irq_ops;
+ dev->hw_stats_map = i40iw_hw_stat_map;
+
+ /* Setup the hardware limits, hmc may limit further */
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
+ dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
+ dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
+ dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
+ dev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;
+ dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1;
+ dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
+ dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = I40IW_MIN_WQ_SIZE;
+ dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
+}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
new file mode 100644
index 000000000000..0095b327afcc
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef I40IW_HW_H
+#define I40IW_HW_H
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CRITERR 0x000B4000 /* Reset: PE_CORER */
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
+
+#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
+
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX GENMASK(10, 0)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE GENMASK(12, 11)
+
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+
+/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
+#define I40E_PFINT_CEQCTL_MSIX_INDX_S 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX GENMASK(7, 0)
+#define I40E_PFINT_CEQCTL_ITR_INDX_S 11
+#define I40E_PFINT_CEQCTL_ITR_INDX GENMASK(12, 11)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_S 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX GENMASK(15, 13)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_S 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX GENMASK(26, 16)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_S 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE GENMASK(28, 27)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_S 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA BIT(30)
+#define I40E_PFINT_CEQCTL_INTEVENT_S 31
+#define I40E_PFINT_CEQCTL_INTEVENT BIT(31)
+#define I40E_CQPSQ_STAG_PDID_S 48
+#define I40E_CQPSQ_STAG_PDID GENMASK_ULL(62, 48)
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_S 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_S 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_S 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX GENMASK(4, 3)
+#define I40E_PFINT_DYN_CTLN_INTENA_S 0
+#define I40E_PFINT_DYN_CTLN_INTENA BIT(0)
+#define I40E_CQPSQ_CQ_CEQID_S 24
+#define I40E_CQPSQ_CQ_CEQID GENMASK(30, 24)
+#define I40E_CQPSQ_CQ_CQID_S 0
+#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
+#define I40E_COMMIT_FPM_CQCNT_S 0
+#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
+#define I40E_CQPSQ_UPESD_HMCFNID_S 0
+#define I40E_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
+
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
+
+enum i40iw_device_caps_const {
+ I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
+ I40IW_MAX_SGE_RD = 1,
+ I40IW_MAX_PUSH_PAGE_COUNT = 0,
+ I40IW_MAX_INLINE_DATA_SIZE = 48,
+ I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_ORD_SIZE = 127,
+ I40IW_MAX_WQ_ENTRIES = 2048,
+ I40IW_MAX_WQE_SIZE_RQ = 128,
+ I40IW_MAX_PDS = 32768,
+ I40IW_MAX_STATS_COUNT = 16,
+ I40IW_MAX_CQ_SIZE = 1048575,
+ I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+ I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
+ I40IW_MIN_WQ_SIZE = 4 /* WQEs */,
+};
+
+#define I40IW_QP_WQE_MIN_SIZE 32
+#define I40IW_QP_WQE_MAX_SIZE 128
+#define I40IW_MAX_RQ_WQE_SHIFT 2
+#define I40IW_MAX_QUANTA_PER_WR 2
+
+#define I40IW_QP_SW_MAX_SQ_QUANTA 2048
+#define I40IW_QP_SW_MAX_RQ_QUANTA 16384
+#define I40IW_QP_SW_MAX_WQ_QUANTA 2048
+#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTA - IRDMA_SQ_RSVD) / I40IW_MAX_QUANTA_PER_WR)
+#define I40IW_FIRST_VF_FPM_ID 16
+#define QUEUE_TYPE_CEQ 2
+#define NULL_QUEUE_INDEX 0x7FF
+
+void i40iw_init_hw(struct irdma_sc_dev *dev);
+#endif /* I40IW_HW_H */
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
new file mode 100644
index 000000000000..15e036ddaffb
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "i40iw_hw.h"
+#include <linux/net/intel/i40e_client.h>
+
+static struct i40e_client i40iw_client;
+
+/**
+ * i40iw_l2param_change - handle mss change
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: client for parameter change
+ * @params: new parameters from L2
+ */
+static void i40iw_l2param_change(struct i40e_info *cdev_info,
+ struct i40e_client *client,
+ struct i40e_params *params)
+{
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+
+ ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return;
+
+ iwdev = to_iwdev(ibdev);
+
+ if (iwdev->vsi.mtu != params->mtu) {
+ l2params.mtu_changed = true;
+ l2params.mtu = params->mtu;
+ }
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ ib_device_put(ibdev);
+}
+
+/**
+ * i40iw_close - client interface operation close for iwarp/uda device
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: client to close
+ * @reset: flag to indicate close on reset
+ *
+ * Called by the lan driver during the processing of client unregister
+ * Destroy and clean up the driver resources
+ */
+static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
+ bool reset)
+{
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+
+ ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
+ if (WARN_ON(!ibdev))
+ return;
+
+ iwdev = to_iwdev(ibdev);
+ if (reset)
+ iwdev->rf->reset = true;
+
+ iwdev->iw_status = 0;
+ irdma_port_ibevent(iwdev);
+ ib_unregister_device_and_put(ibdev);
+ pr_debug("INIT: Gen1 PF[%d] close complete\n", PCI_FUNC(cdev_info->pcidev->devfn));
+}
+
+static void i40iw_request_reset(struct irdma_pci_f *rf)
+{
+ struct i40e_info *cdev_info = rf->cdev;
+
+ cdev_info->ops->request_reset(cdev_info, &i40iw_client, 1);
+}
+
+static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info *cdev_info)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->rdma_ver = IRDMA_GEN_1;
+ rf->sc_dev.hw = &rf->hw;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_1;
+ rf->sc_dev.privileged = true;
+ rf->gen_ops.request_reset = i40iw_request_reset;
+ rf->pcidev = cdev_info->pcidev;
+ rf->pf_id = cdev_info->fid;
+ rf->hw.hw_addr = cdev_info->hw_addr;
+ rf->cdev = cdev_info;
+ rf->msix_count = cdev_info->msix_count;
+ rf->msix_entries = cdev_info->msix_entries;
+ rf->limits_sel = 5;
+ rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->iwdev = iwdev;
+
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ iwdev->netdev = cdev_info->netdev;
+ iwdev->vsi_num = 0;
+}
+
+/**
+ * i40iw_open - client interface operation open for iwarp/uda device
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: iwarp client information, provided during registration
+ *
+ * Called by the lan driver during the processing of client register
+ * Create device resources, set up queues, pble and hmc objects and
+ * register the device with the ib verbs interface
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_open(struct i40e_info *cdev_info, struct i40e_client *client)
+{
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ int err = -EIO;
+ int i;
+ u16 qset;
+ u16 last_qset = IRDMA_NO_QSET;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ i40iw_fill_device_info(iwdev, cdev_info);
+ rf = iwdev->rf;
+
+ if (irdma_ctrl_init_hw(rf)) {
+ err = -EIO;
+ goto err_ctrl_init;
+ }
+
+ l2params.mtu = (cdev_info->params.mtu) ? cdev_info->params.mtu : IRDMA_DEFAULT_MTU;
+ for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
+ qset = cdev_info->params.qos.prio_qos[i].qs_handle;
+ l2params.up2tc[i] = cdev_info->params.qos.prio_qos[i].tc;
+ l2params.qs_handle_list[i] = qset;
+ if (last_qset == IRDMA_NO_QSET)
+ last_qset = qset;
+ else if ((qset != last_qset) && (qset != IRDMA_NO_QSET))
+ iwdev->dcb_vlan_mode = true;
+ }
+
+ if (irdma_rt_init_hw(iwdev, &l2params)) {
+ err = -EIO;
+ goto err_rt_init;
+ }
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen1 PF[%d] open success\n",
+ PCI_FUNC(rf->pcidev->devfn));
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+/* client interface functions */
+static const struct i40e_client_ops i40e_ops = {
+ .open = i40iw_open,
+ .close = i40iw_close,
+ .l2_param_change = i40iw_l2param_change
+};
+
+static struct i40e_client i40iw_client = {
+ .ops = &i40e_ops,
+ .type = I40E_CLIENT_IWARP,
+};
+
+static int i40iw_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
+ struct i40e_auxiliary_device,
+ aux_dev);
+ struct i40e_info *cdev_info = i40e_adev->ldev;
+
+ strscpy_pad(i40iw_client.name, "irdma", I40E_CLIENT_STR_LENGTH);
+ i40e_client_device_register(cdev_info, &i40iw_client);
+
+ return 0;
+}
+
+static void i40iw_remove(struct auxiliary_device *aux_dev)
+{
+ struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
+ struct i40e_auxiliary_device,
+ aux_dev);
+ struct i40e_info *cdev_info = i40e_adev->ldev;
+
+ i40e_client_device_unregister(cdev_info);
+}
+
+static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
+ {.name = "i40e.iwarp", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, i40iw_auxiliary_id_table);
+
+struct auxiliary_driver i40iw_auxiliary_drv = {
+ .name = "gen_1",
+ .id_table = i40iw_auxiliary_id_table,
+ .probe = i40iw_probe,
+ .remove = i40iw_remove,
+};
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
new file mode 100644
index 000000000000..32f26284a788
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "icrdma_hw.h"
+
+static u32 icrdma_regs[IRDMA_MAX_REGS] = {
+ PFPE_CQPTAIL,
+ PFPE_CQPDB,
+ PFPE_CCQPSTATUS,
+ PFPE_CCQPHIGH,
+ PFPE_CCQPLOW,
+ PFPE_CQARM,
+ PFPE_CQACK,
+ PFPE_AEQALLOC,
+ PFPE_CQPERRCODES,
+ PFPE_WQEALLOC,
+ GLINT_DYN_CTL(0),
+ ICRDMA_DB_ADDR_OFFSET,
+
+ GLPCI_LBARCTRL,
+ GLPE_CPUSTATUS0,
+ GLPE_CPUSTATUS1,
+ GLPE_CPUSTATUS2,
+ PFINT_AEQCTL,
+ GLINT_CEQCTL(0),
+ VSIQF_PE_CTL1(0),
+ PFHMC_PDINV,
+ GLHMC_VFPDINV(0),
+ GLPE_CRITERR,
+ GLINT_RATE(0),
+};
+
+static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE,
+ ICRDMA_CCQPSTATUS_CCQP_ERR,
+ ICRDMA_CQPSQ_STAG_PDID,
+ ICRDMA_CQPSQ_CQ_CEQID,
+ ICRDMA_CQPSQ_CQ_CQID,
+ ICRDMA_COMMIT_FPM_CQCNT,
+ ICRDMA_CQPSQ_UPESD_HMCFNID,
+};
+
+static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE_S,
+ ICRDMA_CCQPSTATUS_CCQP_ERR_S,
+ ICRDMA_CQPSQ_STAG_PDID_S,
+ ICRDMA_CQPSQ_CQ_CEQID_S,
+ ICRDMA_CQPSQ_CQ_CQID_S,
+ ICRDMA_COMMIT_FPM_CQCNT_S,
+ ICRDMA_CQPSQ_UPESD_HMCFNID_S,
+};
+
+/**
+ * icrdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 interval = 0;
+
+ if (dev->ceq_itr && dev->aeq->msix_idx != idx)
+ interval = dev->ceq_itr >> 1; /* 2 usec units */
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+ else
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
+}
+
+/**
+ * icrdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+ else
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
+}
+
+/**
+ * icrdma_cfg_ceq- Configure CEQ interrupt
+ * @dev: pointer to the device structure
+ * @ceq_id: Completion Event Queue ID
+ * @idx: vector index
+ * @enable: True to enable, False disables
+ */
+static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3);
+
+ writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
+}
+
+static const struct irdma_irq_ops icrdma_irq_ops = {
+ .irdma_cfg_aeq = irdma_cfg_aeq,
+ .irdma_cfg_ceq = icrdma_cfg_ceq,
+ .irdma_dis_irq = icrdma_disable_irq,
+ .irdma_en_irq = icrdma_ena_irq,
+};
+
+static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 32, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 24, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 32, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 40, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 32, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 72, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 80, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 88, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 128, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 136, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 144, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 152, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 160, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 168, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 176, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 184, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 184, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 192, 32, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 200, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 32, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 224, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 232, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 240, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 248, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 256, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 264, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 272, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 280, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 288, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 296, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 304, 0, IRDMA_MAX_STATS_56 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 312, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 312, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 320, 0, IRDMA_MAX_STATS_32 },
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev)
+{
+ int i;
+ u8 __iomem *hw_addr;
+
+ for (i = 0; i < IRDMA_MAX_REGS; ++i) {
+ hw_addr = dev->hw->hw_addr;
+
+ if (i == IRDMA_DB_ADDR_OFFSET)
+ hw_addr = NULL;
+
+ dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]);
+ }
+ dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
+ dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
+
+ for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
+ dev->hw_shifts[i] = icrdma_shifts[i];
+
+ for (i = 0; i < IRDMA_MAX_MASKS; ++i)
+ dev->hw_masks[i] = icrdma_masks[i];
+
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+ dev->irq_ops = &icrdma_irq_ops;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ dev->hw_stats_map = icrdma_hw_stat_map;
+ dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+ dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
+
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+}
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
new file mode 100644
index 000000000000..d97944ab45da
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#ifndef ICRDMA_HW_H
+#define ICRDMA_HW_H
+
+#include "irdma.h"
+
+#define VFPE_CQPTAIL1 0x0000a000
+#define VFPE_CQPDB1 0x0000bc00
+#define VFPE_CCQPSTATUS1 0x0000b800
+#define VFPE_CCQPHIGH1 0x00009800
+#define VFPE_CCQPLOW1 0x0000ac00
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQACK1 0x0000b000
+#define VFPE_AEQALLOC1 0x0000a400
+#define VFPE_CQPERRCODES1 0x00009c00
+#define VFPE_WQEALLOC1 0x0000c000
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */
+
+#define PFPE_CQPTAIL 0x00500880
+#define PFPE_CQPDB 0x00500800
+#define PFPE_CCQPSTATUS 0x0050a000
+#define PFPE_CCQPHIGH 0x0050a100
+#define PFPE_CCQPLOW 0x0050a080
+#define PFPE_CQARM 0x00502c00
+#define PFPE_CQACK 0x00502c80
+#define PFPE_AEQALLOC 0x00502d00
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */
+#define GLPCI_LBARCTRL 0x0009de74
+#define GLPE_CPUSTATUS0 0x0050ba5c
+#define GLPE_CPUSTATUS1 0x0050ba60
+#define GLPE_CPUSTATUS2 0x0050ba64
+#define PFINT_AEQCTL 0x0016cb00
+#define PFPE_CQPERRCODES 0x0050a200
+#define PFPE_WQEALLOC 0x00504400
+#define GLINT_CEQCTL(_INT) (0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */
+#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */
+#define PFHMC_PDINV 0x00520300
+#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */
+#define GLPE_CRITERR 0x00534000
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+
+#define ICRDMA_DB_ADDR_OFFSET (8 * 1024 * 1024 - 64 * 1024)
+
+#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
+
+/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
+#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
+#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
+#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
+#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
+#define ICRDMA_CQPSQ_STAG_PDID_S 46
+#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46)
+#define ICRDMA_CQPSQ_CQ_CEQID_S 22
+#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(31, 22)
+#define ICRDMA_CQPSQ_CQ_CQID_S 0
+#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
+#define ICRDMA_COMMIT_FPM_CQCNT_S 0
+#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
+#define ICRDMA_CQPSQ_UPESD_HMCFNID_S 0
+#define ICRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
+enum icrdma_device_caps_const {
+ ICRDMA_MAX_STATS_COUNT = 128,
+
+ ICRDMA_MAX_IRD_SIZE = 127,
+ ICRDMA_MAX_ORD_SIZE = 255,
+ ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
+ ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+#endif /* ICRDMA_HW_H*/
diff --git a/drivers/infiniband/hw/irdma/icrdma_if.c b/drivers/infiniband/hw/irdma/icrdma_if.c
new file mode 100644
index 000000000000..27b191f61caf
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_if.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_ice.h>
+
+static void icrdma_prep_tc_change(struct irdma_device *iwdev)
+{
+ iwdev->vsi.tc_change_pending = true;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
+
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+}
+
+static void icrdma_fill_qos_info(struct irdma_l2params *l2params,
+ struct iidc_rdma_qos_params *qos_info)
+{
+ int i;
+
+ l2params->num_tc = qos_info->num_tc;
+ l2params->vsi_prio_type = qos_info->vport_priority_type;
+ l2params->vsi_rel_bw = qos_info->vport_relative_bw;
+ for (i = 0; i < l2params->num_tc; i++) {
+ l2params->tc_info[i].egress_virt_up =
+ qos_info->tc_info[i].egress_virt_up;
+ l2params->tc_info[i].ingress_virt_up =
+ qos_info->tc_info[i].ingress_virt_up;
+ l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
+ l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
+ l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
+ }
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ l2params->up2tc[i] = qos_info->up2tc[i];
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
+ l2params->dscp_mode = true;
+ memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
+ }
+}
+
+static void icrdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
+ struct irdma_l2params l2params = {};
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
+ ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
+ if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
+ l2params.mtu = iwdev->netdev->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
+ if (iwdev->vsi.tc_change_pending)
+ return;
+
+ icrdma_prep_tc_change(iwdev);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+
+ if (!iwdev->vsi.tc_change_pending)
+ return;
+
+ l2params.tc_changed = true;
+ ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
+
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode =
+ l2params.num_tc > 1 && !l2params.dscp_mode;
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
+ ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
+ event->reg);
+ if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
+ u32 pe_criterr;
+
+ pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
+#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
+ if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
+ ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
+ pe_criterr);
+ iwdev->rf->reset = true;
+ } else {
+ ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
+ }
+ }
+ if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
+ ibdev_err(&iwdev->ibdev, "HMC Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
+ ibdev_err(&iwdev->ibdev, "PE Push Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (iwdev->rf->reset)
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+ }
+}
+
+/**
+ * icrdma_lan_register_qset - Register qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static int icrdma_lan_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+ int ret;
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ ret = ice_add_rdma_qset(cdev_info, &qset);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
+ return ret;
+ }
+
+ tc_node->l2_sched_node_id = qset.teid;
+ vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
+
+ return 0;
+}
+
+/**
+ * icrdma_lan_unregister_qset - Unregister qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static void icrdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ qset.teid = tc_node->l2_sched_node_id;
+
+ if (ice_del_rdma_qset(cdev_info, &qset))
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
+}
+
+/**
+ * icrdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void icrdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int icrdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
+ rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
+ GFP_KERNEL);
+ if (!rf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < rf->msix_count; i++)
+ if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
+ break;
+
+ if (i < IRDMA_MIN_MSIX) {
+ while (--i >= 0)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+ return -ENOMEM;
+ }
+
+ rf->msix_count = i;
+
+ return 0;
+}
+
+static void icrdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ for (i = 0; i < rf->msix_count; i++)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+}
+
+static void icrdma_fill_device_info(struct irdma_device *iwdev,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->iwdev = iwdev;
+ rf->cdev = cdev_info;
+ rf->hw.hw_addr = idc_priv->hw_addr;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->pf_id = idc_priv->pf_id;
+ rf->rdma_ver = IRDMA_GEN_2;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
+ rf->sc_dev.is_pf = true;
+ rf->sc_dev.privileged = true;
+
+ rf->gen_ops.register_qset = icrdma_lan_register_qset;
+ rf->gen_ops.unregister_qset = icrdma_lan_unregister_qset;
+
+ rf->default_vsi.vsi_idx = idc_priv->vport_id;
+ rf->protocol_used =
+ cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = icrdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ iwdev->netdev = idc_priv->netdev;
+ iwdev->vsi_num = idc_priv->vport_id;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->roce_mode = true;
+}
+
+static int icrdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *idc_priv;
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ int err;
+
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ idc_priv = cdev_info->iidc_priv;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ icrdma_fill_device_info(iwdev, cdev_info);
+ rf = iwdev->rf;
+
+ err = icrdma_init_interrupts(rf, cdev_info);
+ if (err)
+ goto err_init_interrupts;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ l2params.mtu = iwdev->netdev->mtu;
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ err = irdma_rt_init_hw(iwdev, &l2params);
+ if (err)
+ goto err_rt_init;
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
+ auxiliary_set_drvdata(aux_dev, iwdev);
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ icrdma_deinit_interrupts(rf, cdev_info);
+err_init_interrupts:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+static void icrdma_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+ u8 rdma_ver = iwdev->rf->rdma_ver;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
+ irdma_ib_unregister_device(iwdev);
+ icrdma_deinit_interrupts(iwdev->rf, cdev_info);
+
+ pr_debug("INIT: Gen[%d] func[%d] device remove success\n",
+ rdma_ver, PCI_FUNC(cdev_info->pdev->devfn));
+}
+
+static const struct auxiliary_device_id icrdma_auxiliary_id_table[] = {
+ {.name = "ice.iwarp", },
+ {.name = "ice.roce", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, icrdma_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "gen_2",
+ .id_table = icrdma_auxiliary_id_table,
+ .probe = icrdma_probe,
+ .remove = icrdma_remove,
+ },
+ .event_handler = icrdma_iidc_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.c b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
new file mode 100644
index 000000000000..2e8bb475e22a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2018 - 2024 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "protos.h"
+#include "ig3rdma_hw.h"
+
+/**
+ * ig3rdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
+
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+/**
+ * ig3rdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+static const struct irdma_irq_ops ig3rdma_irq_ops = {
+ .irdma_dis_irq = ig3rdma_disable_irq,
+ .irdma_en_irq = ig3rdma_ena_irq,
+};
+
+static const struct irdma_hw_stat_map ig3rdma_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 48, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 56, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 64, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 72, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 80, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 88, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 96, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 104, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 112, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 120, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 128, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 136, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 144, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 152, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 160, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 168, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 176, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 184, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 192, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 200, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 208, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 224, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 232, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 240, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 248, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 256, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 264, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 272, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 280, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 288, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 296, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 304, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 312, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 320, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 328, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 336, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 344, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 352, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 360, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_SENT] = { 368, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD] = { 376, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT] = { 384, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT] = { 392, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXATS] = { 408, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXATS] = { 416, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR] = { 424, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED] = { 432, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RTO] = { 440, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS] = { 448, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_ICRCERR] = { 456, 0, 0 },
+};
+
+void ig3rdma_init_hw(struct irdma_sc_dev *dev)
+{
+ dev->irq_ops = &ig3rdma_irq_ops;
+ dev->hw_stats_map = ig3rdma_hw_stat_map;
+
+ dev->hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_3;
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = IG3RDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = IG3RDMA_MAX_SGE_RD;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.first_hw_vf_fpm_id = 0;
+ dev->hw_attrs.max_hw_vf_fpm_id = IG3_MAX_APFS + IG3_MAX_AVFS;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_64_BYTE_CQE;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_CQE_TIMESTAMPING;
+
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_SRQ;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ dev->hw_attrs.max_hw_ird = IG3RDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = IG3RDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = IG3RDMA_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_3;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = IG3RDMA_MIN_WQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_srq_quanta = IRDMA_SRQ_MAX_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_inline = IG3RDMA_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_device_pages =
+ dev->is_pf ? IG3RDMA_MAX_PF_PUSH_PAGE_COUNT : IG3RDMA_MAX_VF_PUSH_PAGE_COUNT;
+}
+
+static void __iomem *__ig3rdma_get_reg_addr(struct irdma_mmio_region *region, u64 reg_offset)
+{
+ if (reg_offset >= region->offset &&
+ reg_offset < (region->offset + region->len)) {
+ reg_offset -= region->offset;
+
+ return region->addr + reg_offset;
+ }
+
+ return NULL;
+}
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset)
+{
+ u8 __iomem *reg_addr;
+ int i;
+
+ reg_addr = __ig3rdma_get_reg_addr(&hw->rdma_reg, reg_offset);
+ if (reg_addr)
+ return reg_addr;
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ reg_addr = __ig3rdma_get_reg_addr(&hw->io_regs[i], reg_offset);
+ if (reg_addr)
+ return reg_addr;
+ }
+
+ WARN_ON_ONCE(1);
+
+ return NULL;
+}
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.h b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
new file mode 100644
index 000000000000..03d5f1188789
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2021 - 2024 Intel Corporation */
+#ifndef IG3RDMA_HW_H
+#define IG3RDMA_HW_H
+
+#define IG3_MAX_APFS 1
+#define IG3_MAX_AVFS 0
+
+#define IG3_PF_RDMA_REGION_OFFSET 0xBC00000
+#define IG3_PF_RDMA_REGION_LEN 0x401000
+#define IG3_VF_RDMA_REGION_OFFSET 0x8C00
+#define IG3_VF_RDMA_REGION_LEN 0x8400
+
+enum ig3rdma_device_caps_const {
+ IG3RDMA_MAX_WQ_FRAGMENT_COUNT = 14,
+ IG3RDMA_MAX_SGE_RD = 14,
+
+ IG3RDMA_MAX_STATS_COUNT = 128,
+
+ IG3RDMA_MAX_IRD_SIZE = 64,
+ IG3RDMA_MAX_ORD_SIZE = 64,
+ IG3RDMA_MIN_WQ_SIZE = 16 /* WQEs */,
+ IG3RDMA_MAX_INLINE_DATA_SIZE = 216,
+ IG3RDMA_MAX_PF_PUSH_PAGE_COUNT = 8192,
+ IG3RDMA_MAX_VF_PUSH_PAGE_COUNT = 16,
+};
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len);
+
+#endif /* IG3RDMA_HW_H*/
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_if.c b/drivers/infiniband/hw/irdma/ig3rdma_if.c
new file mode 100644
index 000000000000..1bb42eb298ba
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_if.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2023 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
+#include "ig3rdma_hw.h"
+
+static void ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(cdev_info->adev);
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_WARN_RESET)) {
+ rf->reset = true;
+ rf->sc_dev.vchnl_up = false;
+ }
+}
+
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len)
+{
+ struct iidc_rdma_core_dev_info *cdev_info = dev_to_rf(dev)->cdev;
+ int ret;
+
+ ret = idpf_idc_rdma_vc_send_sync(cdev_info, msg, len, recv_msg,
+ recv_len);
+ if (ret == -ETIMEDOUT) {
+ ibdev_err(&(dev_to_rf(dev)->iwdev->ibdev),
+ "Virtual channel Req <-> Resp completion timeout\n");
+ dev->vchnl_up = false;
+ }
+
+ return ret;
+}
+
+static int ig3rdma_vchnl_init(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *rdma_ver)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_vchnl_init_info virt_info;
+ u8 gen = rf->rdma_ver;
+ int ret;
+
+ rf->vchnl_wq = alloc_ordered_workqueue("irdma-virtchnl-wq", 0);
+ if (!rf->vchnl_wq)
+ return -ENOMEM;
+
+ mutex_init(&rf->sc_dev.vchnl_mutex);
+
+ virt_info.is_pf = !idc_priv->ftype;
+ virt_info.hw_rev = gen;
+ virt_info.privileged = gen == IRDMA_GEN_2;
+ virt_info.vchnl_wq = rf->vchnl_wq;
+ ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info);
+ if (ret) {
+ destroy_workqueue(rf->vchnl_wq);
+ return ret;
+ }
+
+ *rdma_ver = rf->sc_dev.hw_attrs.uk_attrs.hw_rev;
+
+ return 0;
+}
+
+/**
+ * ig3rdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void ig3rdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ idpf_idc_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int ig3rdma_cfg_regions(struct irdma_hw *hw,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct pci_dev *pdev = cdev_info->pdev;
+ int i;
+
+ switch (idc_priv->ftype) {
+ case IIDC_FUNCTION_TYPE_PF:
+ hw->rdma_reg.len = IG3_PF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_PF_RDMA_REGION_OFFSET;
+ break;
+ case IIDC_FUNCTION_TYPE_VF:
+ hw->rdma_reg.len = IG3_VF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_VF_RDMA_REGION_OFFSET;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ hw->rdma_reg.addr = ioremap(pci_resource_start(pdev, 0) + hw->rdma_reg.offset,
+ hw->rdma_reg.len);
+
+ if (!hw->rdma_reg.addr)
+ return -ENOMEM;
+
+ hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
+ hw->io_regs = kcalloc(hw->num_io_regions,
+ sizeof(struct irdma_mmio_region), GFP_KERNEL);
+
+ if (!hw->io_regs) {
+ iounmap(hw->rdma_reg.addr);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ hw->io_regs[i].addr =
+ idc_priv->mapped_mem_regions[i].region_addr;
+ hw->io_regs[i].len =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].size);
+ hw->io_regs[i].offset =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].start_offset);
+ }
+
+ return 0;
+}
+
+static void ig3rdma_decfg_rf(struct irdma_pci_f *rf)
+{
+ struct irdma_hw *hw = &rf->hw;
+
+ destroy_workqueue(rf->vchnl_wq);
+ kfree(hw->io_regs);
+ iounmap(hw->rdma_reg.addr);
+}
+
+static int ig3rdma_cfg_rf(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ int err;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->cdev = cdev_info;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->msix_count = idc_priv->msix_count;
+ rf->msix_entries = idc_priv->msix_entries;
+
+ err = ig3rdma_vchnl_init(rf, cdev_info, &rf->rdma_ver);
+ if (err)
+ return err;
+
+ err = ig3rdma_cfg_regions(&rf->hw, cdev_info);
+ if (err) {
+ destroy_workqueue(rf->vchnl_wq);
+ return err;
+ }
+
+ rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = ig3rdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ return 0;
+}
+
+static int ig3rdma_core_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf;
+ int err;
+
+ rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ err = ig3rdma_cfg_rf(rf, cdev_info);
+ if (err)
+ goto err_cfg_rf;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ auxiliary_set_drvdata(aux_dev, rf);
+
+ err = idpf_idc_vport_dev_ctrl(cdev_info, true);
+ if (err)
+ goto err_vport_ctrl;
+
+ return 0;
+
+err_vport_ctrl:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ ig3rdma_decfg_rf(rf);
+err_cfg_rf:
+ kfree(rf);
+
+ return err;
+}
+
+static void ig3rdma_core_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_dev);
+
+ idpf_idc_vport_dev_ctrl(cdev_info, false);
+ irdma_ctrl_deinit_hw(rf);
+ ig3rdma_decfg_rf(rf);
+ kfree(rf);
+}
+
+static const struct auxiliary_device_id ig3rdma_core_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.core", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_core_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "core",
+ .id_table = ig3rdma_core_auxiliary_id_table,
+ .probe = ig3rdma_core_probe,
+ .remove = ig3rdma_core_remove,
+ },
+ .event_handler = ig3rdma_idc_core_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
new file mode 100644
index 000000000000..ff938a01d70c
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#ifndef IRDMA_H
+#define IRDMA_H
+
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
+
+#define IRDMA_CQPTAIL_WQTAIL GENMASK(10, 0)
+#define IRDMA_CQPTAIL_CQP_OP_ERR BIT(31)
+
+#define IRDMA_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0)
+#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16)
+#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4)
+#define IRDMA_GLINT_RATE_INTERVAL GENMASK(5, 0)
+#define IRDMA_GLINT_RATE_INTRL_ENA BIT(6)
+#define IRDMA_GLINT_DYN_CTL_INTENA BIT(0)
+#define IRDMA_GLINT_DYN_CTL_CLEARPBA BIT(1)
+#define IRDMA_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3)
+#define IRDMA_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5)
+#define IRDMA_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_GLINT_CEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_PFINT_AEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_PFHMC_PDINV_PMSDIDX GENMASK(11, 0)
+#define IRDMA_PFHMC_PDINV_PMSDPARTSEL BIT(15)
+#define IRDMA_PFHMC_PDINV_PMPDIDX GENMASK(24, 16)
+#define IRDMA_PFHMC_SDDATALOW_PMSDVALID BIT(0)
+#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE BIT(1)
+#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2)
+#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
+#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
+
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_Q_INVALID_IDX 0xffff
+
+enum irdma_dyn_idx_t {
+ IRDMA_IDX_ITR0 = 0,
+ IRDMA_IDX_ITR1 = 1,
+ IRDMA_IDX_ITR2 = 2,
+ IRDMA_IDX_NOITR = 3,
+};
+
+enum irdma_registers {
+ IRDMA_CQPTAIL,
+ IRDMA_CQPDB,
+ IRDMA_CCQPSTATUS,
+ IRDMA_CCQPHIGH,
+ IRDMA_CCQPLOW,
+ IRDMA_CQARM,
+ IRDMA_CQACK,
+ IRDMA_AEQALLOC,
+ IRDMA_CQPERRCODES,
+ IRDMA_WQEALLOC,
+ IRDMA_GLINT_DYN_CTL,
+ IRDMA_DB_ADDR_OFFSET,
+ IRDMA_GLPCI_LBARCTRL,
+ IRDMA_GLPE_CPUSTATUS0,
+ IRDMA_GLPE_CPUSTATUS1,
+ IRDMA_GLPE_CPUSTATUS2,
+ IRDMA_PFINT_AEQCTL,
+ IRDMA_GLINT_CEQCTL,
+ IRDMA_VSIQF_PE_CTL1,
+ IRDMA_PFHMC_PDINV,
+ IRDMA_GLHMC_VFPDINV,
+ IRDMA_GLPE_CRITERR,
+ IRDMA_GLINT_RATE,
+ IRDMA_MAX_REGS, /* Must be last entry */
+};
+
+enum irdma_shifts {
+ IRDMA_CCQPSTATUS_CCQP_DONE_S,
+ IRDMA_CCQPSTATUS_CCQP_ERR_S,
+ IRDMA_CQPSQ_STAG_PDID_S,
+ IRDMA_CQPSQ_CQ_CEQID_S,
+ IRDMA_CQPSQ_CQ_CQID_S,
+ IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_CQPSQ_UPESD_HMCFNID_S,
+ IRDMA_MAX_SHIFTS,
+};
+
+enum irdma_masks {
+ IRDMA_CCQPSTATUS_CCQP_DONE_M,
+ IRDMA_CCQPSTATUS_CCQP_ERR_M,
+ IRDMA_CQPSQ_STAG_PDID_M,
+ IRDMA_CQPSQ_CQ_CEQID_M,
+ IRDMA_CQPSQ_CQ_CQID_M,
+ IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_CQPSQ_UPESD_HMCFNID_M,
+ IRDMA_MAX_MASKS, /* Must be last entry */
+};
+
+#define IRDMA_MAX_MGS_PER_CTX 8
+
+struct irdma_mcast_grp_ctx_entry_info {
+ u32 qp_id;
+ bool valid_entry;
+ u16 dest_port;
+ u32 use_cnt;
+};
+
+struct irdma_mcast_grp_info {
+ u8 dest_mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ u16 hmc_fcn_id;
+ bool ipv4_valid:1;
+ bool vlan_valid:1;
+ u16 mg_id;
+ u32 no_of_mgs;
+ u32 dest_ip_addr[4];
+ u16 qs_handle;
+ struct irdma_dma_mem dma_mem_mc;
+ struct irdma_mcast_grp_ctx_entry_info mg_ctx_info[IRDMA_MAX_MGS_PER_CTX];
+};
+
+enum irdma_vers {
+ IRDMA_GEN_RSVD,
+ IRDMA_GEN_1,
+ IRDMA_GEN_2,
+ IRDMA_GEN_3,
+ IRDMA_GEN_NEXT,
+ IRDMA_GEN_MAX = IRDMA_GEN_NEXT-1
+};
+
+struct irdma_uk_attrs {
+ u64 feature_flags;
+ u32 max_hw_wq_frags;
+ u32 max_hw_read_sges;
+ u32 max_hw_inline;
+ u32 max_hw_rq_quanta;
+ u32 max_hw_wq_quanta;
+ u32 min_hw_cq_size;
+ u32 max_hw_cq_size;
+ u32 max_hw_srq_quanta;
+ u16 max_hw_sq_chunk;
+ u16 min_hw_wq_size;
+ u8 hw_rev;
+};
+
+struct irdma_hw_attrs {
+ struct irdma_uk_attrs uk_attrs;
+ u64 max_hw_outbound_msg_size;
+ u64 max_hw_inbound_msg_size;
+ u64 max_mr_size;
+ u64 page_size_cap;
+ u32 min_hw_qp_id;
+ u32 min_hw_aeq_size;
+ u32 max_hw_aeq_size;
+ u32 min_hw_ceq_size;
+ u32 max_hw_ceq_size;
+ u32 max_hw_device_pages;
+ u32 max_hw_vf_fpm_id;
+ u32 first_hw_vf_fpm_id;
+ u32 max_hw_ird;
+ u32 max_hw_ord;
+ u32 max_hw_wqes;
+ u32 max_hw_pds;
+ u32 max_hw_ena_vf_count;
+ u32 max_qp_wr;
+ u32 max_pe_ready_count;
+ u32 max_done_count;
+ u32 max_sleep_count;
+ u32 max_cqp_compl_wait_time_ms;
+ u32 min_hw_srq_id;
+ u16 max_stat_inst;
+ u16 max_stat_idx;
+};
+
+void i40iw_init_hw(struct irdma_sc_dev *dev);
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+void ig3rdma_init_hw(struct irdma_sc_dev *dev);
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
+#endif /* IRDMA_H*/
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
new file mode 100644
index 000000000000..95957d52883d
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
+
+MODULE_ALIAS("i40iw");
+MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct notifier_block irdma_inetaddr_notifier = {
+ .notifier_call = irdma_inetaddr_event
+};
+
+static struct notifier_block irdma_inetaddr6_notifier = {
+ .notifier_call = irdma_inet6addr_event
+};
+
+static struct notifier_block irdma_net_notifier = {
+ .notifier_call = irdma_net_event
+};
+
+static struct notifier_block irdma_netdevice_notifier = {
+ .notifier_call = irdma_netdevice_event
+};
+
+static void irdma_register_notifiers(void)
+{
+ register_inetaddr_notifier(&irdma_inetaddr_notifier);
+ register_inet6addr_notifier(&irdma_inetaddr6_notifier);
+ register_netevent_notifier(&irdma_net_notifier);
+ register_netdevice_notifier(&irdma_netdevice_notifier);
+}
+
+static void irdma_unregister_notifiers(void)
+{
+ unregister_netevent_notifier(&irdma_net_notifier);
+ unregister_inetaddr_notifier(&irdma_inetaddr_notifier);
+ unregister_inet6addr_notifier(&irdma_inetaddr6_notifier);
+ unregister_netdevice_notifier(&irdma_netdevice_notifier);
+}
+
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
+{
+ if (mtu < IRDMA_MIN_MTU_IPV4)
+ ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
+ else if (mtu < IRDMA_MIN_MTU_IPV6)
+ ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
+}
+
+static void ig3rdma_idc_vport_event_handler(struct iidc_rdma_vport_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_device *iwdev = auxiliary_get_drvdata(cdev_info->adev);
+ struct irdma_l2params l2params = {};
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
+ ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
+ if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
+ l2params.mtu = iwdev->netdev->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ }
+}
+
+static int ig3rdma_vport_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct auxiliary_device *aux_core_dev = idc_adev->vdev_info->core_adev;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_core_dev);
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ int err;
+
+ if (!rf) {
+ WARN_ON_ONCE(1);
+ return -ENOMEM;
+ }
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ /* Fill iwdev info */
+ iwdev->is_vport = true;
+ iwdev->rf = rf;
+ iwdev->vport_id = idc_adev->vdev_info->vport_id;
+ iwdev->netdev = idc_adev->vdev_info->netdev;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ iwdev->roce_mode = true;
+ iwdev->push_mode = false;
+
+ l2params.mtu = iwdev->netdev->mtu;
+
+ err = irdma_rt_init_hw(iwdev, &l2params);
+ if (err)
+ goto err_rt_init;
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ auxiliary_set_drvdata(aux_dev, iwdev);
+
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] vport[%d] probe success. dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ rf->rdma_ver, idc_adev->vdev_info->vport_id,
+ dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+
+ return 0;
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+static void ig3rdma_vport_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ iwdev->rf->rdma_ver, dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+
+ irdma_ib_unregister_device(iwdev);
+}
+
+static const struct auxiliary_device_id ig3rdma_vport_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.vdev", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_vport_auxiliary_id_table);
+
+static struct iidc_rdma_vport_auxiliary_drv ig3rdma_vport_auxiliary_drv = {
+ .adrv = {
+ .name = "vdev",
+ .id_table = ig3rdma_vport_auxiliary_id_table,
+ .probe = ig3rdma_vport_probe,
+ .remove = ig3rdma_vport_remove,
+ },
+ .event_handler = ig3rdma_idc_vport_event_handler,
+};
+
+
+static int __init irdma_init_module(void)
+{
+ int ret;
+
+ ret = auxiliary_driver_register(&i40iw_auxiliary_drv);
+ if (ret) {
+ pr_err("Failed i40iw(gen_1) auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&icrdma_core_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed icrdma(gen_2) auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&ig3rdma_core_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed ig3rdma(gen_3) core auxiliary_driver_register() ret=%d\n",
+ ret);
+
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&ig3rdma_vport_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed ig3rdma vport auxiliary_driver_register() ret=%d\n",
+ ret);
+
+ return ret;
+ }
+ irdma_register_notifiers();
+
+ return 0;
+}
+
+static void __exit irdma_exit_module(void)
+{
+ irdma_unregister_notifiers();
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&ig3rdma_vport_auxiliary_drv.adrv);
+}
+
+module_init(irdma_init_module);
+module_exit(irdma_exit_module);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
new file mode 100644
index 000000000000..886b30da188a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_MAIN_H
+#define IRDMA_MAIN_H
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+#include <net/netevent.h>
+#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/flow.h>
+#include <net/secure_seq.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/crc32c.h>
+#include <linux/kthread.h>
+#ifndef CONFIG_64BIT
+#include <linux/io-64-nonatomic-lo-hi.h>
+#endif
+#include <linux/auxiliary_bus.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
+#include "osdep.h"
+#include "defs.h"
+#include "hmc.h"
+#include "type.h"
+#include "ws.h"
+#include "protos.h"
+#include "pble.h"
+#include "cm.h"
+#include <rdma/irdma-abi.h>
+#include "verbs.h"
+#include "user.h"
+#include "puda.h"
+
+extern struct auxiliary_driver i40iw_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv;
+
+#define IRDMA_FW_VER_DEFAULT 2
+#define IRDMA_HW_VER 2
+
+#define IRDMA_ARP_ADD 1
+#define IRDMA_ARP_DELETE 2
+#define IRDMA_ARP_RESOLVE 3
+
+#define IRDMA_MACIP_ADD 1
+#define IRDMA_MACIP_DELETE 2
+
+#define IW_GEN_3_CCQ_SIZE (2 * IRDMA_CQP_SW_SQSIZE_2048 + 2)
+#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 2)
+#define IW_CEQ_SIZE 2048
+#define IW_AEQ_SIZE 2048
+
+#define RX_BUF_SIZE (1536 + 8)
+#define IW_REG0_SIZE (4 * 1024)
+#define IW_TX_TIMEOUT (6 * HZ)
+#define IW_FIRST_QPN 1
+
+#define IW_SW_CONTEXT_ALIGN 1024
+
+#define MAX_DPC_ITERATIONS 128
+
+#define IRDMA_EVENT_TIMEOUT_MS 5000
+#define IRDMA_VCHNL_EVENT_TIMEOUT 100000
+#define IRDMA_RST_TIMEOUT_HZ 4
+
+#define IRDMA_NO_QSET 0xffff
+
+#define IW_CFG_FPM_QP_COUNT 32768
+#define IRDMA_MAX_PAGES_PER_FMR 262144
+#define IRDMA_MIN_PAGES_PER_FMR 1
+#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
+#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
+
+#define IRDMA_Q_TYPE_PE_AEQ 0x80
+#define IRDMA_Q_INVALID_IDX 0xffff
+#define IRDMA_REM_ENDPOINT_TRK_QPID 3
+
+#define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
+#define IRDMA_DRV_OPT_DISABLE_MPA_CRC 0x00000002
+#define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
+#define IRDMA_DRV_OPT_DISABLE_INTF 0x00000008
+#define IRDMA_DRV_OPT_ENA_MSI 0x00000010
+#define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
+#define IRDMA_DRV_OPT_NO_INLINE_DATA 0x00000080
+#define IRDMA_DRV_OPT_DISABLE_INT_MOD 0x00000100
+#define IRDMA_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+#define IRDMA_DRV_OPT_ENA_PAU 0x00000400
+#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
+
+#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
+#define IRDMA_ROCE_CWND_DEFAULT 0x400
+#define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
+
+#define IRDMA_FLUSH_SQ BIT(0)
+#define IRDMA_FLUSH_RQ BIT(1)
+#define IRDMA_REFLUSH BIT(2)
+#define IRDMA_FLUSH_WAIT BIT(3)
+
+#define IRDMA_IRQ_NAME_STR_LEN (64)
+
+#define IRDMA_NUM_AEQ_MSIX 1
+#define IRDMA_MIN_MSIX 2
+
+enum init_completion_state {
+ INVALID_STATE = 0,
+ INITIAL_STATE,
+ CQP_CREATED,
+ HMC_OBJS_CREATED,
+ HW_RSRC_INITIALIZED,
+ CCQ_CREATED,
+ CEQ0_CREATED,
+ CEQS_CREATED,
+ PBLE_CHUNK_MEM,
+ AEQ_CREATED,
+ ILQ_CREATED,
+ IEQ_CREATED, /* Last state of probe */
+ IP_ADDR_REGISTERED, /* Last state of open */
+};
+
+struct irdma_rsrc_limits {
+ u32 qplimit;
+ u32 mrlimit;
+ u32 cqlimit;
+};
+
+struct irdma_cqp_err_info {
+ u16 maj;
+ u16 min;
+ const char *desc;
+};
+
+struct irdma_cqp_compl_info {
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ bool error;
+ u8 op_code;
+};
+
+struct irdma_cqp_request {
+ struct cqp_cmds_info info;
+ wait_queue_head_t waitq;
+ struct list_head list;
+ refcount_t refcnt;
+ void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
+ void *param;
+ struct irdma_cqp_compl_info compl_info;
+ bool request_done; /* READ/WRITE_ONCE macros operate on it */
+ bool waiting:1;
+ bool dynamic:1;
+ bool pending:1;
+};
+
+struct irdma_cqp {
+ struct irdma_sc_cqp sc_cqp;
+ spinlock_t req_lock; /* protect CQP request list */
+ spinlock_t compl_lock; /* protect CQP completion processing */
+ wait_queue_head_t waitq;
+ wait_queue_head_t remove_wq;
+ struct irdma_dma_mem sq;
+ struct irdma_dma_mem host_ctx;
+ u64 *scratch_array;
+ struct irdma_cqp_request *cqp_requests;
+ struct irdma_ooo_cqp_op *oop_op_array;
+ struct list_head cqp_avail_reqs;
+ struct list_head cqp_pending_reqs;
+};
+
+struct irdma_ccq {
+ struct irdma_sc_cq sc_cq;
+ struct irdma_dma_mem mem_cq;
+ struct irdma_dma_mem shadow_area;
+};
+
+struct irdma_ceq {
+ struct irdma_sc_ceq sc_ceq;
+ struct irdma_dma_mem mem;
+ u32 irq;
+ u32 msix_idx;
+ struct irdma_pci_f *rf;
+ struct tasklet_struct dpc_tasklet;
+ spinlock_t ce_lock; /* sync cq destroy with cq completion event notification */
+};
+
+struct irdma_aeq {
+ struct irdma_sc_aeq sc_aeq;
+ struct irdma_dma_mem mem;
+ struct irdma_pble_alloc palloc;
+ bool virtual_map;
+};
+
+struct irdma_arp_entry {
+ u32 ip_addr[4];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct irdma_msix_vector {
+ u32 idx;
+ u32 irq;
+ u32 cpu_affinity;
+ u32 ceq_id;
+ cpumask_t mask;
+ char name[IRDMA_IRQ_NAME_STR_LEN];
+};
+
+struct irdma_mc_table_info {
+ u32 mgn;
+ u32 dest_ip[4];
+ bool lan_fwd:1;
+ bool ipv4_valid:1;
+};
+
+struct mc_table_list {
+ struct list_head list;
+ struct irdma_mc_table_info mc_info;
+ struct irdma_mcast_grp_info mc_grp_ctx;
+};
+
+struct irdma_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct irdma_qvlist_info {
+ u32 num_vectors;
+ struct irdma_qv_info qv_info[] __counted_by(num_vectors);
+};
+
+struct irdma_gen_ops {
+ void (*request_reset)(struct irdma_pci_f *rf);
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+};
+
+struct irdma_pci_f {
+ bool reset:1;
+ bool rsrc_created:1;
+ bool msix_shared:1;
+ bool hwqp1_rsvd:1;
+ u8 rsrc_profile;
+ u8 *hmc_info_mem;
+ u8 *mem_rsrc;
+ u8 rdma_ver;
+ u8 rst_to;
+ u8 pf_id;
+ enum irdma_protocol_used protocol_used;
+ u32 sd_type;
+ u32 msix_count;
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_srq;
+ u32 next_srq;
+ u32 max_ah;
+ u32 next_ah;
+ u32 max_mcg;
+ u32 next_mcg;
+ u32 max_pd;
+ u32 next_qp;
+ u32 next_cq;
+ u32 next_pd;
+ u32 max_mr_size;
+ u32 max_cqe;
+ u32 mr_stagmask;
+ u32 used_pds;
+ u32 used_cqs;
+ u32 used_srqs;
+ u32 used_mrs;
+ u32 used_qps;
+ u32 arp_table_size;
+ u32 next_arp_index;
+ u32 ceqs_count;
+ u32 next_ws_node_id;
+ u32 max_ws_node_id;
+ u32 limits_sel;
+ unsigned long *allocated_ws_nodes;
+ unsigned long *allocated_qps;
+ unsigned long *allocated_cqs;
+ unsigned long *allocated_srqs;
+ unsigned long *allocated_mrs;
+ unsigned long *allocated_pds;
+ unsigned long *allocated_mcgs;
+ unsigned long *allocated_ahs;
+ unsigned long *allocated_arps;
+ enum init_completion_state init_state;
+ struct irdma_sc_dev sc_dev;
+ struct pci_dev *pcidev;
+ void *cdev;
+ struct irdma_hw hw;
+ struct irdma_cqp cqp;
+ struct irdma_ccq ccq;
+ struct irdma_aeq aeq;
+ struct irdma_ceq *ceqlist;
+ struct irdma_hmc_pble_rsrc *pble_rsrc;
+ struct irdma_arp_entry *arp_table;
+ spinlock_t arp_lock; /*protect ARP table access*/
+ spinlock_t rsrc_lock; /* protect HW resource array access */
+ spinlock_t qptable_lock; /*protect QP table access*/
+ spinlock_t cqtable_lock; /*protect CQ table access*/
+ struct irdma_qp **qp_table;
+ struct irdma_cq **cq_table;
+ spinlock_t qh_list_lock; /* protect mc_qht_list */
+ struct mc_table_list mc_qht_list;
+ struct irdma_msix_vector *iw_msixtbl;
+ struct irdma_qvlist_info *iw_qvlist;
+ struct tasklet_struct dpc_tasklet;
+ struct msix_entry *msix_entries;
+ struct irdma_dma_mem obj_mem;
+ struct irdma_dma_mem obj_next;
+ atomic_t vchnl_msgs;
+ wait_queue_head_t vchnl_waitq;
+ struct workqueue_struct *cqp_cmpl_wq;
+ struct work_struct cqp_cmpl_work;
+ struct workqueue_struct *vchnl_wq;
+ struct irdma_sc_vsi default_vsi;
+ void *back_fcn;
+ struct irdma_gen_ops gen_ops;
+ struct irdma_device *iwdev;
+ DECLARE_HASHTABLE(ah_hash_tbl, 8);
+ struct mutex ah_tbl_lock; /* protect AH hash table access */
+};
+
+struct irdma_device {
+ struct ib_device ibdev;
+ struct irdma_pci_f *rf;
+ struct net_device *netdev;
+ struct workqueue_struct *cleanup_wq;
+ struct irdma_sc_vsi vsi;
+ struct irdma_cm_core cm_core;
+ u32 roce_cwnd;
+ u32 roce_ackcreds;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 push_mode;
+ u32 rcv_wnd;
+ u16 mac_ip_table_idx;
+ u16 vsi_num;
+ u16 vport_id;
+ u8 rcv_wscale;
+ u8 iw_status;
+ bool roce_mode:1;
+ bool roce_dcqcn_en:1;
+ bool dcb_vlan_mode:1;
+ bool iw_ooo:1;
+ bool is_vport:1;
+ enum init_completion_state init_state;
+
+ wait_queue_head_t suspend_wq;
+};
+
+static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct irdma_device, ibdev);
+}
+
+static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct irdma_ucontext, ibucontext);
+}
+
+static inline struct irdma_user_mmap_entry *
+to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct irdma_user_mmap_entry,
+ rdma_entry);
+}
+
+static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct irdma_pd, ibpd);
+}
+
+static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct irdma_ah, ibah);
+}
+
+static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct irdma_mr, ibmr);
+}
+
+static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct irdma_mr, ibmw);
+}
+
+static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct irdma_cq, ibcq);
+}
+
+static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct irdma_qp, ibqp);
+}
+
+static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
+{
+ return container_of(dev, struct irdma_pci_f, sc_dev);
+}
+
+static inline struct irdma_srq *to_iwsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct irdma_srq, ibsrq);
+}
+
+/**
+ * irdma_alloc_resource - allocate a resource
+ * @iwdev: device pointer
+ * @resource_array: resource bit array:
+ * @max_resources: maximum resource number
+ * @req_resources_num: Allocated resource number
+ * @next: next free id
+ **/
+static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,
+ unsigned long *rsrc_array, u32 max_rsrc,
+ u32 *req_rsrc_num, u32 *next)
+{
+ u32 rsrc_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);
+ if (rsrc_num >= max_rsrc) {
+ rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);
+ if (rsrc_num >= max_rsrc) {
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "ERR: resource [%d] allocation failed\n",
+ rsrc_num);
+ return -EOVERFLOW;
+ }
+ }
+ __set_bit(rsrc_num, rsrc_array);
+ *next = rsrc_num + 1;
+ if (*next == max_rsrc)
+ *next = 0;
+ *req_rsrc_num = rsrc_num;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_free_resource - free a resource
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to free
+ **/
+static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
+ unsigned long *rsrc_array, u32 rsrc_num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ __clear_bit(rsrc_num, rsrc_array);
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+}
+
+int irdma_ctrl_init_hw(struct irdma_pci_f *rf);
+void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
+int irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params);
+void irdma_rt_deinit_hw(struct irdma_device *iwdev);
+void irdma_qp_add_ref(struct ib_qp *ibqp);
+void irdma_qp_rem_ref(struct ib_qp *ibqp);
+void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
+struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
+void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
+ u32 *ip_addr, bool ipv4, u32 action);
+struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
+void irdma_del_apbvt(struct irdma_device *iwdev,
+ struct irdma_apbvt_entry *entry);
+struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
+ bool wait);
+void irdma_free_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request);
+void irdma_put_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request);
+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
+
+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
+void irdma_port_ibevent(struct irdma_device *iwdev);
+void irdma_cm_disconn(struct irdma_qp *qp);
+
+bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
+ u16 maj_err_code, u16 min_err_code);
+int irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request);
+
+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata);
+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+void irdma_cq_add_ref(struct ib_cq *ibcq);
+void irdma_cq_rem_ref(struct ib_cq *ibcq);
+void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
+void irdma_srq_event(struct irdma_sc_srq *srq);
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq);
+void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
+int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info, bool wait);
+int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend);
+int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait);
+void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
+void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
+void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
+int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
+void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
+ u8 term_len);
+int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);
+int irdma_send_reset(struct irdma_cm_node *cm_node);
+struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
+ u16 rem_port, u32 *rem_addr, u16 loc_port,
+ u32 *loc_addr, u16 vlan_id);
+int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, bool wait);
+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, bool wait);
+void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
+void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
+u16 irdma_get_vlan_ipv4(u32 *addr);
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
+ int acc, u64 *iova_start);
+int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
+int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+ bool wait,
+ void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
+ void *cb_param);
+void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
+int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_net_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+void irdma_add_ip(struct irdma_device *iwdev);
+void cqp_compl_worker(struct work_struct *work);
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev);
+#endif /* IRDMA_MAIN_H */
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
new file mode 100644
index 000000000000..3f73ceacccb6
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_OSDEP_H
+#define IRDMA_OSDEP_H
+
+#include <linux/pci.h>
+#include <linux/bitfield.h>
+#include <rdma/ib_verbs.h>
+#include <net/dscp.h>
+
+#define STATS_TIMER_DELAY 60000
+
+struct irdma_dma_info {
+ dma_addr_t *dmaaddrs;
+};
+
+struct irdma_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+struct irdma_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+struct irdma_sc_vsi;
+struct irdma_sc_dev;
+struct irdma_sc_qp;
+struct irdma_puda_buf;
+struct irdma_puda_cmpl_info;
+struct irdma_update_sds_info;
+struct irdma_hmc_fcn_info;
+struct irdma_manage_vf_pble_info;
+struct irdma_hw;
+struct irdma_pci_f;
+
+struct ib_device *to_ibdev(struct irdma_sc_dev *dev);
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
+bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
+void irdma_add_dev_ref(struct irdma_sc_dev *dev);
+void irdma_put_dev_ref(struct irdma_sc_dev *dev);
+int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val);
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf);
+void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
+ u32 seqnum);
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
+int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
+void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
+ u8 term_len);
+void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred);
+void irdma_terminate_start_timer(struct irdma_sc_qp *qp);
+void irdma_terminate_del_timer(struct irdma_sc_qp *qp);
+void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi);
+void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi);
+void wr32(struct irdma_hw *hw, u32 reg, u32 val);
+u32 rd32(struct irdma_hw *hw, u32 reg);
+u64 rd64(struct irdma_hw *hw, u32 reg);
+int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
+ u32 pg_cnt);
+void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt);
+#endif /* IRDMA_OSDEP_H */
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
new file mode 100644
index 000000000000..3091f9345f12
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "pble.h"
+
+static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
+
+/**
+ * irdma_destroy_pble_prm - destroy prm during module unload
+ * @pble_rsrc: pble resources
+ */
+void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_chunk *chunk;
+ struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
+
+ while (!list_empty(&pinfo->clist)) {
+ chunk = (struct irdma_chunk *) pinfo->clist.next;
+ list_del(&chunk->list);
+ if (chunk->type == PBLE_SD_PAGED)
+ irdma_pble_free_paged_mem(chunk);
+ bitmap_free(chunk->bitmapbuf);
+ kfree(chunk->chunkmem.va);
+ }
+}
+
+/**
+ * irdma_hmc_init_pble - Initialize pble resources during module load
+ * @dev: irdma_sc_dev struct
+ * @pble_rsrc: pble resources
+ */
+int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_hmc_info *hmc_info;
+ u32 fpm_idx = 0;
+ int status = 0;
+
+ hmc_info = dev->hmc_info;
+ pble_rsrc->dev = dev;
+ pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
+ /* Start pble' on 4k boundary */
+ if (pble_rsrc->fpm_base_addr & 0xfff)
+ fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
+ pble_rsrc->unallocated_pble =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
+ pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
+ pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
+
+ mutex_init(&pble_rsrc->pble_mutex_lock);
+
+ spin_lock_init(&pble_rsrc->pinfo.prm_lock);
+ INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
+ if (add_pble_prm(pble_rsrc)) {
+ irdma_destroy_pble_prm(pble_rsrc);
+ status = -ENOMEM;
+ }
+
+ return status;
+}
+
+/**
+ * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
+ * @pble_rsrc: structure containing fpm address
+ * @idx: where to return indexes
+ */
+static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+{
+ idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+}
+
+/**
+ * add_sd_direct - add sd direct for pble
+ * @pble_rsrc: pble resource ptr
+ * @info: page info for sd
+ */
+static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ int ret_code = 0;
+ struct sd_pd_idx *idx = &info->idx;
+ struct irdma_chunk *chunk = info->chunk;
+ struct irdma_hmc_info *hmc_info = info->hmc_info;
+ struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
+ u32 offset = 0;
+
+ if (!sd_entry->valid) {
+ ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx,
+ IRDMA_SD_TYPE_DIRECT,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ return ret_code;
+
+ chunk->type = PBLE_SD_CONTIGOUS;
+ }
+
+ offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
+ chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
+ chunk->vaddr = sd_entry->u.bp.addr.va + offset;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n",
+ chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
+
+ return 0;
+}
+
+/**
+ * fpm_to_idx - given fpm address, get pble index
+ * @pble_rsrc: pble resource management
+ * @addr: fpm address for index
+ */
+static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
+{
+ u64 idx;
+
+ idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
+
+ return (u32)idx;
+}
+
+/**
+ * add_bp_pages - add backing pages for sd
+ * @pble_rsrc: pble resource management
+ * @info: page info for sd
+ */
+static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ u8 *addr;
+ struct irdma_dma_mem mem;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
+ struct irdma_hmc_info *hmc_info = info->hmc_info;
+ struct irdma_chunk *chunk = info->chunk;
+ int status = 0;
+ u32 rel_pd_idx = info->idx.rel_pd_idx;
+ u32 pd_idx = info->idx.pd_idx;
+ u32 i;
+
+ if (irdma_pble_get_paged_mem(chunk, info->pages))
+ return -ENOMEM;
+
+ status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
+ IRDMA_SD_TYPE_PAGED,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (status)
+ goto error;
+
+ addr = chunk->vaddr;
+ for (i = 0; i < info->pages; i++) {
+ mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
+ mem.size = 4096;
+ mem.va = addr;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
+ if (!pd_entry->valid) {
+ status = irdma_add_pd_table_entry(dev, hmc_info,
+ pd_idx++, &mem);
+ if (status)
+ goto error;
+
+ addr += 4096;
+ }
+ }
+
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ return 0;
+
+error:
+ irdma_pble_free_paged_mem(chunk);
+
+ return status;
+}
+
+/**
+ * irdma_get_type - add a sd entry type for sd
+ * @dev: irdma_sc_dev struct
+ * @idx: index of sd
+ * @pages: pages in the sd
+ */
+static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
+ struct sd_pd_idx *idx, u32 pages)
+{
+ enum irdma_sd_entry_type sd_entry_type;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ else
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD &&
+ dev->privileged) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ return sd_entry_type;
+}
+
+/**
+ * add_pble_prm - add a sd entry for pble resoure
+ * @pble_rsrc: pble resource management
+ */
+static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_chunk *chunk;
+ struct irdma_add_page_info info;
+ struct sd_pd_idx *idx = &info.idx;
+ int ret_code = 0;
+ enum irdma_sd_entry_type sd_entry_type;
+ u64 sd_reg_val = 0;
+ struct irdma_virt_mem chunkmem;
+ u32 pages;
+
+ if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
+ return -ENOMEM;
+
+ if (pble_rsrc->next_fpm_addr & 0xfff)
+ return -EINVAL;
+
+ chunkmem.size = sizeof(*chunk);
+ chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
+ if (!chunkmem.va)
+ return -ENOMEM;
+
+ chunk = chunkmem.va;
+ chunk->chunkmem = chunkmem;
+ hmc_info = dev->hmc_info;
+ chunk->dev = dev;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ get_sd_pd_idx(pble_rsrc, idx);
+ sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
+ pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
+ IRDMA_HMC_PD_CNT_IN_SD;
+ pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
+ info.chunk = chunk;
+ info.hmc_info = hmc_info;
+ info.pages = pages;
+ info.sd_entry = sd_entry;
+ if (!sd_entry->valid)
+ sd_entry_type = irdma_get_type(dev, idx, pages);
+ else
+ sd_entry_type = sd_entry->entry_type;
+
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
+ pages, pble_rsrc->unallocated_pble,
+ pble_rsrc->next_fpm_addr);
+ ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
+ if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
+ ret_code = add_sd_direct(pble_rsrc, &info);
+
+ if (ret_code)
+ sd_entry_type = IRDMA_SD_TYPE_PAGED;
+ else
+ pble_rsrc->stats_direct_sds++;
+
+ if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
+ ret_code = add_bp_pages(pble_rsrc, &info);
+ if (ret_code)
+ goto error;
+ else
+ pble_rsrc->stats_paged_sds++;
+ }
+
+ ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
+ if (ret_code)
+ goto error;
+
+ pble_rsrc->next_fpm_addr += chunk->size;
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
+ pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+ pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
+ sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+ if ((dev->privileged && !sd_entry->valid) ||
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ sd_reg_val, idx->sd_idx,
+ sd_entry->entry_type, true);
+ if (ret_code)
+ goto error;
+ }
+
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ sd_entry->valid = true;
+ return 0;
+
+error:
+ bitmap_free(chunk->bitmapbuf);
+ kfree(chunk->chunkmem.va);
+
+ return ret_code;
+}
+
+/**
+ * free_lvl2 - fee level 2 pble
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ u32 i;
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *root = &lvl2->root;
+ struct irdma_pble_info *leaf = lvl2->leaf;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ if (leaf->addr)
+ irdma_prm_return_pbles(&pble_rsrc->pinfo,
+ &leaf->chunkinfo);
+ else
+ break;
+ }
+
+ if (root->addr)
+ irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
+
+ kfree(lvl2->leafmem.va);
+ lvl2->leaf = NULL;
+}
+
+/**
+ * get_lvl2_pble - get level 2 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ u32 lf4k, lflast, total, i;
+ u32 pblcnt = PBLE_PER_PAGE;
+ u64 *addr;
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *root = &lvl2->root;
+ struct irdma_pble_info *leaf;
+ int ret_code;
+ u64 fpm_addr;
+
+ /* number of full 512 (4K) leafs) */
+ lf4k = palloc->total_cnt >> 9;
+ lflast = palloc->total_cnt % PBLE_PER_PAGE;
+ total = (lflast == 0) ? lf4k : lf4k + 1;
+ lvl2->leaf_cnt = total;
+
+ lvl2->leafmem.size = (sizeof(*leaf) * total);
+ lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
+ if (!lvl2->leafmem.va)
+ return -ENOMEM;
+
+ lvl2->leaf = lvl2->leafmem.va;
+ leaf = lvl2->leaf;
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
+ total << 3, &root->addr, &fpm_addr);
+ if (ret_code) {
+ kfree(lvl2->leafmem.va);
+ lvl2->leaf = NULL;
+ return -ENOMEM;
+ }
+
+ root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+ root->cnt = total;
+ addr = root->addr;
+ for (i = 0; i < total; i++, leaf++) {
+ pblcnt = (lflast && ((i + 1) == total)) ?
+ lflast : PBLE_PER_PAGE;
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
+ &leaf->chunkinfo, pblcnt << 3,
+ &leaf->addr, &fpm_addr);
+ if (ret_code)
+ goto error;
+
+ leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+
+ leaf->cnt = pblcnt;
+ *addr = (u64)leaf->idx;
+ addr++;
+ }
+
+ palloc->level = PBLE_LEVEL_2;
+ pble_rsrc->stats_lvl2++;
+ return 0;
+
+error:
+ free_lvl2(pble_rsrc, palloc);
+
+ return -ENOMEM;
+}
+
+/**
+ * get_lvl1_pble - get level 1 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 1 pble allocation
+ */
+static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ int ret_code;
+ u64 fpm_addr;
+ struct irdma_pble_info *lvl1 = &palloc->level1;
+
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
+ palloc->total_cnt << 3, &lvl1->addr,
+ &fpm_addr);
+ if (ret_code)
+ return -ENOMEM;
+
+ palloc->level = PBLE_LEVEL_1;
+ lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+ lvl1->cnt = palloc->total_cnt;
+ pble_rsrc->stats_lvl1++;
+
+ return 0;
+}
+
+/**
+ * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @lvl: Bitmask for requested pble level
+ */
+static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u8 lvl)
+{
+ int status = 0;
+
+ status = get_lvl1_pble(pble_rsrc, palloc);
+ if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE)
+ return status;
+
+ status = get_lvl2_pble(pble_rsrc, palloc);
+
+ return status;
+}
+
+/**
+ * irdma_get_pble - allocate pbles from the prm
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pble_cnt: #of pbles requested
+ * @lvl: requested pble level mask
+ */
+int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u32 pble_cnt,
+ u8 lvl)
+{
+ int status = 0;
+ int max_sds = 0;
+ int i;
+
+ palloc->total_cnt = pble_cnt;
+ palloc->level = PBLE_LEVEL_0;
+
+ mutex_lock(&pble_rsrc->pble_mutex_lock);
+
+ /*check first to see if we can get pble's without acquiring
+ * additional sd's
+ */
+ status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
+ if (!status)
+ goto exit;
+
+ max_sds = (palloc->total_cnt >> 18) + 1;
+ for (i = 0; i < max_sds; i++) {
+ status = add_pble_prm(pble_rsrc);
+ if (status)
+ break;
+
+ status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
+ /* if level1_only, only go through it once */
+ if (!status || lvl)
+ break;
+ }
+
+exit:
+ if (!status) {
+ pble_rsrc->allocdpbles += pble_cnt;
+ pble_rsrc->stats_alloc_ok++;
+ } else {
+ pble_rsrc->stats_alloc_fail++;
+ }
+ mutex_unlock(&pble_rsrc->pble_mutex_lock);
+
+ return status;
+}
+
+/**
+ * irdma_free_pble - put pbles back into prm
+ * @pble_rsrc: pble resources
+ * @palloc: contains all information regarding pble resource being freed
+ */
+void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ pble_rsrc->freedpbles += palloc->total_cnt;
+
+ if (palloc->level == PBLE_LEVEL_2)
+ free_lvl2(pble_rsrc, palloc);
+ else
+ irdma_prm_return_pbles(&pble_rsrc->pinfo,
+ &palloc->level1.chunkinfo);
+ pble_rsrc->stats_alloc_freed++;
+}
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
new file mode 100644
index 000000000000..160ad728e9fb
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2019 Intel Corporation */
+#ifndef IRDMA_PBLE_H
+#define IRDMA_PBLE_H
+
+#define PBLE_SHIFT 6
+#define PBLE_PER_PAGE 512
+#define HMC_PAGED_BP_SHIFT 12
+#define PBLE_512_SHIFT 9
+#define PBLE_INVALID_IDX 0xffffffff
+
+enum irdma_pble_level {
+ PBLE_LEVEL_0 = 0,
+ PBLE_LEVEL_1 = 1,
+ PBLE_LEVEL_2 = 2,
+};
+
+enum irdma_alloc_type {
+ PBLE_NO_ALLOC = 0,
+ PBLE_SD_CONTIGOUS = 1,
+ PBLE_SD_PAGED = 2,
+};
+
+struct irdma_chunk;
+
+struct irdma_pble_chunkinfo {
+ struct irdma_chunk *pchunk;
+ u64 bit_idx;
+ u64 bits_used;
+};
+
+struct irdma_pble_info {
+ u64 *addr;
+ u32 idx;
+ u32 cnt;
+ struct irdma_pble_chunkinfo chunkinfo;
+};
+
+struct irdma_pble_level2 {
+ struct irdma_pble_info root;
+ struct irdma_pble_info *leaf;
+ struct irdma_virt_mem leafmem;
+ u32 leaf_cnt;
+};
+
+struct irdma_pble_alloc {
+ u32 total_cnt;
+ enum irdma_pble_level level;
+ union {
+ struct irdma_pble_info level1;
+ struct irdma_pble_level2 level2;
+ };
+};
+
+struct sd_pd_idx {
+ u32 sd_idx;
+ u32 pd_idx;
+ u32 rel_pd_idx;
+};
+
+struct irdma_add_page_info {
+ struct irdma_chunk *chunk;
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_hmc_info *hmc_info;
+ struct sd_pd_idx idx;
+ u32 pages;
+};
+
+struct irdma_chunk {
+ struct list_head list;
+ struct irdma_dma_info dmainfo;
+ unsigned long *bitmapbuf;
+
+ u32 sizeofbitmap;
+ u64 size;
+ void *vaddr;
+ u64 fpm_addr;
+ u32 pg_cnt;
+ enum irdma_alloc_type type;
+ struct irdma_sc_dev *dev;
+ struct irdma_virt_mem chunkmem;
+};
+
+struct irdma_pble_prm {
+ struct list_head clist;
+ spinlock_t prm_lock; /* protect prm bitmap */
+ u64 total_pble_alloc;
+ u64 free_pble_cnt;
+ u8 pble_shift;
+};
+
+struct irdma_hmc_pble_rsrc {
+ u32 unallocated_pble;
+ struct mutex pble_mutex_lock; /* protect PBLE resource */
+ struct irdma_sc_dev *dev;
+ u64 fpm_base_addr;
+ u64 next_fpm_addr;
+ struct irdma_pble_prm pinfo;
+ u64 allocdpbles;
+ u64 freedpbles;
+ u32 stats_direct_sds;
+ u32 stats_paged_sds;
+ u64 stats_alloc_ok;
+ u64 stats_alloc_fail;
+ u64 stats_alloc_freed;
+ u64 stats_lvl1;
+ u64 stats_lvl2;
+};
+
+void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
+int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc);
+void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc);
+int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u32 pble_cnt,
+ u8 lvl);
+int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk);
+int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr);
+void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo);
+void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ unsigned long *flags);
+void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ unsigned long *flags);
+void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
+int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt);
+void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
+#endif /* IRDMA_PBLE_H */
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
new file mode 100644
index 000000000000..324cfbf21764
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_PROTOS_H
+#define IRDMA_PROTOS_H
+
+#define PAUSE_TIMER_VAL 0xffff
+#define REFRESH_THRESHOLD 0x7fff
+#define HIGH_THRESHOLD 0x800
+#define LOW_THRESHOLD 0x200
+#define ALL_TC2PFC 0xff
+#define CQP_COMPL_WAIT_TIME_MS 10
+#define CQP_TIMEOUT_THRESHOLD 500
+#define CQP_DEF_CMPL_TIMEOUT_THRESHOLD 2500
+
+/* init operations */
+int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info);
+void irdma_sc_rt_init(struct irdma_sc_dev *dev);
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
+__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq);
+/* HMC/FPM functions */
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id);
+/* stats misc */
+int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait);
+void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat);
+void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
+ const u64 *hw_stats_regs);
+int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info);
+int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
+ u8 op);
+int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
+ u8 op);
+int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info);
+u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
+void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
+void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
+ struct irdma_gather_stats *gather_stats,
+ struct irdma_gather_stats *last_gather_stats,
+ const struct irdma_hw_stat_map *map, u16 max_stat_idx);
+
+/* vsi functions */
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info);
+void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi);
+void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_init_info *info);
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
+void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
+/* misc L2 param change functions */
+void irdma_change_l2params(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params);
+void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend);
+int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 cmd);
+void irdma_qp_add_qos(struct irdma_sc_qp *qp);
+void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
+struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
+ struct irdma_sc_qp *qp);
+void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi);
+/* terminate functions*/
+void irdma_terminate_send_fin(struct irdma_sc_qp *qp);
+
+void irdma_terminate_connection(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info);
+
+void irdma_terminate_received(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info);
+/* dynamic memory allocation */
+/* misc */
+u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
+void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers);
+int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count);
+int irdma_get_rdma_features(struct irdma_sc_dev *dev);
+void free_sd_mem(struct irdma_sc_dev *dev);
+int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo);
+int irdma_process_bh(struct irdma_sc_dev *dev);
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
+int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
+void irdma_add_dev_ref(struct irdma_sc_dev *dev);
+void irdma_put_dev_ref(struct irdma_sc_dev *dev);
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
+#endif /* IRDMA_PROTOS_H */
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
new file mode 100644
index 000000000000..694e5a9ed15d
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -0,0 +1,1734 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "puda.h"
+#include "ws.h"
+
+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_buf *buf);
+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf, u32 wqe_idx);
+/**
+ * irdma_puda_get_listbuf - get buffer from puda list
+ * @list: list to use for buffers (ILQ or IEQ)
+ */
+static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list)
+{
+ struct irdma_puda_buf *buf = NULL;
+
+ if (!list_empty(list)) {
+ buf = (struct irdma_puda_buf *)list->next;
+ list_del((struct list_head *)&buf->list);
+ }
+
+ return buf;
+}
+
+/**
+ * irdma_puda_get_bufpool - return buffer from resource
+ * @rsrc: resource to use for buffer
+ */
+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_puda_buf *buf = NULL;
+ struct list_head *list = &rsrc->bufpool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ buf = irdma_puda_get_listbuf(list);
+ if (buf) {
+ rsrc->avail_buf_count--;
+ buf->vsi = rsrc->vsi;
+ } else {
+ rsrc->stats_buf_alloc_fail++;
+ }
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+
+ return buf;
+}
+
+/**
+ * irdma_puda_ret_bufpool - return buffer to rsrc list
+ * @rsrc: resource to use for buffer
+ * @buf: buffer to return to resource
+ */
+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf)
+{
+ unsigned long flags;
+
+ buf->do_lpb = false;
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ list_add(&buf->list, &rsrc->bufpool);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->avail_buf_count++;
+}
+
+/**
+ * irdma_puda_post_recvbuf - set wqe for rcv buffer
+ * @rsrc: resource ptr
+ * @wqe_idx: wqe index to use
+ * @buf: puda buffer for rcv q
+ * @initial: flag if during init time
+ */
+static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
+ struct irdma_puda_buf *buf, bool initial)
+{
+ __le64 *wqe;
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ u64 offset24 = 0;
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ if (!initial)
+ get_64bit_val(wqe, 24, &offset24);
+
+ offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
+
+ set_64bit_val(wqe, 16, 0);
+ set_64bit_val(wqe, 0, buf->mem.pa);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
+ offset24);
+ }
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * irdma_puda_replenish_rq - post rcv buffers
+ * @rsrc: resource to use for buffer
+ * @initial: flag if during init time
+ */
+static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
+{
+ u32 i;
+ u32 invalid_cnt = rsrc->rxq_invalid_cnt;
+ struct irdma_puda_buf *buf = NULL;
+
+ for (i = 0; i < invalid_cnt; i++) {
+ buf = irdma_puda_get_bufpool(rsrc);
+ if (!buf)
+ return -ENOBUFS;
+ irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
+ rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
+ rsrc->rxq_invalid_cnt--;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_puda_alloc_buf - allocate mem for buffer
+ * @dev: iwarp device
+ * @len: length of buffer
+ */
+static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
+ u32 len)
+{
+ struct irdma_puda_buf *buf;
+ struct irdma_virt_mem buf_mem;
+
+ buf_mem.size = sizeof(struct irdma_puda_buf);
+ buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
+ if (!buf_mem.va)
+ return NULL;
+
+ buf = buf_mem.va;
+ buf->mem.size = len;
+ buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
+ if (!buf->mem.va)
+ goto free_virt;
+ buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->device, buf->mem.pa)) {
+ kfree(buf->mem.va);
+ goto free_virt;
+ }
+
+ buf->buf_mem.va = buf_mem.va;
+ buf->buf_mem.size = buf_mem.size;
+
+ return buf;
+
+free_virt:
+ kfree(buf_mem.va);
+ return NULL;
+}
+
+/**
+ * irdma_puda_dele_buf - delete buffer back to system
+ * @dev: iwarp device
+ * @buf: buffer to free
+ */
+static void irdma_puda_dele_buf(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf)
+{
+ dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size,
+ DMA_BIDIRECTIONAL);
+ kfree(buf->mem.va);
+ kfree(buf->buf_mem.va);
+}
+
+/**
+ * irdma_puda_get_next_send_wqe - return next wqe for processing
+ * @qp: puda qp for wqe
+ * @wqe_idx: wqe index for caller
+ */
+static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
+ u32 *wqe_idx)
+{
+ int ret_code = 0;
+
+ *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return NULL;
+
+ return qp->sq_base[*wqe_idx].elem;
+}
+
+/**
+ * irdma_puda_poll_info - poll cq for completion
+ * @cq: cq for poll
+ * @info: info return for successful completion
+ */
+static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
+ struct irdma_puda_cmpl_info *info)
+{
+ struct irdma_cq_uk *cq_uk = &cq->cq_uk;
+ u64 qword0, qword2, qword3, qword6;
+ __le64 *cqe;
+ __le64 *ext_cqe = NULL;
+ u64 qword7 = 0;
+ u64 comp_ctx;
+ bool valid_bit;
+ bool ext_valid = 0;
+ u32 major_err, minor_err;
+ u32 peek_head;
+ bool error;
+ u8 polarity;
+
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
+ get_64bit_val(cqe, 24, &qword3);
+ valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ if (valid_bit != cq_uk->polarity)
+ return -ENOENT;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
+
+ if (ext_valid) {
+ peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
+ ext_cqe = cq_uk->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ if (!peek_head)
+ polarity ^= 1;
+ if (polarity != cq_uk->polarity)
+ return -ENOENT;
+
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ dma_rmb();
+
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
+ }
+
+ print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe,
+ 32, false);
+ if (ext_valid)
+ print_hex_dump_debug("PUDA: PUDA EXT-CQE", DUMP_PREFIX_OFFSET,
+ 16, 8, ext_cqe, 32, false);
+
+ error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ if (error) {
+ ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n");
+ major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
+ minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
+ info->compl_error = major_err << 16 | minor_err;
+ return -EIO;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
+ info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (ext_valid) {
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
+ if (info->vlan_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
+ }
+ info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ if (info->smac_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
+ info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
+ info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
+ info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
+ info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
+ info->smac[5] = (u8)(qword6 & 0xFF);
+ }
+ }
+
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
+ info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
+ info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
+ }
+
+ info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_puda_poll_cmpl - processes completion for cq
+ * @dev: iwarp device
+ * @cq: cq getting interrupt
+ * @compl_err: return any completion err
+ */
+int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
+ u32 *compl_err)
+{
+ struct irdma_qp_uk *qp;
+ struct irdma_cq_uk *cq_uk = &cq->cq_uk;
+ struct irdma_puda_cmpl_info info = {};
+ int ret = 0;
+ struct irdma_puda_buf *buf;
+ struct irdma_puda_rsrc *rsrc;
+ u8 cq_type = cq->cq_type;
+ unsigned long flags;
+
+ if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
+ rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
+ cq->vsi->ieq;
+ } else {
+ ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n");
+ return -EINVAL;
+ }
+
+ ret = irdma_puda_poll_info(cq, &info);
+ *compl_err = info.compl_error;
+ if (ret == -ENOENT)
+ return ret;
+ if (ret)
+ goto done;
+
+ qp = info.qp;
+ if (!qp || !rsrc) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (qp->qp_id != rsrc->qp_id) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
+ buf = (struct irdma_puda_buf *)(uintptr_t)
+ qp->rq_wrid_array[info.wqe_idx];
+
+ /* reusing so synch the buffer for CPU use */
+ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ /* Get all the tcpip information in the buf header */
+ ret = irdma_puda_get_tcpip_info(&info, buf);
+ if (ret) {
+ rsrc->stats_rcvd_pkt_err++;
+ if (cq_type == IRDMA_CQ_TYPE_ILQ) {
+ irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
+ info.wqe_idx);
+ } else {
+ irdma_puda_ret_bufpool(rsrc, buf);
+ irdma_puda_replenish_rq(rsrc, false);
+ }
+ goto done;
+ }
+
+ rsrc->stats_pkt_rcvd++;
+ rsrc->compl_rxwqe_idx = info.wqe_idx;
+ ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n");
+ rsrc->receive(rsrc->vsi, buf);
+ if (cq_type == IRDMA_CQ_TYPE_ILQ)
+ irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
+ else
+ irdma_puda_replenish_rq(rsrc, false);
+
+ } else {
+ ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n");
+ buf = (struct irdma_puda_buf *)(uintptr_t)
+ qp->sq_wrtrk_array[info.wqe_idx].wrid;
+
+ /* reusing so synch the buffer for CPU use */
+ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+ rsrc->xmit_complete(rsrc->vsi, buf);
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ rsrc->tx_wqe_avail_cnt++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ if (!list_empty(&rsrc->txpend))
+ irdma_puda_send_buf(rsrc, NULL);
+ }
+
+done:
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
+ set_64bit_val(cq_uk->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
+
+ return ret;
+}
+
+/**
+ * irdma_puda_send - complete send wqe for transmit
+ * @qp: puda qp for send
+ * @info: buffer information for transmit
+ */
+int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
+{
+ __le64 *wqe;
+ u32 iplen, l4len;
+ u64 hdr[2];
+ u32 wqe_idx;
+ u8 iipt;
+
+ /* number of 32 bits DWORDS in header */
+ l4len = info->tcplen >> 2;
+ if (info->ipv4) {
+ iipt = 3;
+ iplen = 5;
+ } else {
+ iipt = 1;
+ iplen = 10;
+ }
+
+ wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
+ /* Third line of WQE descriptor */
+ /* maclen is in words */
+
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
+ FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
+ qp->qp_uk.swqe_polarity);
+
+ /* Forth line of WQE descriptor */
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
+ } else {
+ hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
+ FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
+
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
+
+ /* Forth line of WQE descriptor */
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
+ }
+
+ set_64bit_val(wqe, 16, hdr[0]);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr[1]);
+
+ print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, 32, false);
+ irdma_uk_qp_post_wr(&qp->qp_uk);
+ return 0;
+}
+
+/**
+ * irdma_puda_send_buf - transmit puda buffer
+ * @rsrc: resource to use for buffer
+ * @buf: puda buffer to transmit
+ */
+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_puda_send_info info;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ /* if no wqe available or not from a completion and we have
+ * pending buffers, we must queue new buffer
+ */
+ if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
+ list_add_tail(&buf->list, &rsrc->txpend);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->stats_sent_pkt_q++;
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ ibdev_dbg(to_ibdev(rsrc->dev),
+ "PUDA: adding to txpend\n");
+ return;
+ }
+ rsrc->tx_wqe_avail_cnt--;
+ /* if we are coming from a completion and have pending buffers
+ * then Get one from pending list
+ */
+ if (!buf) {
+ buf = irdma_puda_get_listbuf(&rsrc->txpend);
+ if (!buf)
+ goto done;
+ }
+
+ info.scratch = buf;
+ info.paddr = buf->mem.pa;
+ info.len = buf->totallen;
+ info.tcplen = buf->tcphlen;
+ info.ipv4 = buf->ipv4;
+
+ if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ info.ah_id = buf->ah_id;
+ } else {
+ info.maclen = buf->maclen;
+ info.do_lpb = buf->do_lpb;
+ }
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ ret = irdma_puda_send(&rsrc->qp, &info);
+ if (ret) {
+ rsrc->tx_wqe_avail_cnt++;
+ rsrc->stats_sent_pkt_q++;
+ list_add(&buf->list, &rsrc->txpend);
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ ibdev_dbg(to_ibdev(rsrc->dev),
+ "PUDA: adding to puda_send\n");
+ } else {
+ rsrc->stats_pkt_sent++;
+ }
+done:
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+}
+
+/**
+ * irdma_puda_qp_setctx - during init, set qp's context
+ * @rsrc: qp's resource
+ */
+static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ __le64 *qp_ctx = qp->hw_host_ctx;
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ set_64bit_val(qp_ctx, 24,
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
+ set_64bit_val(qp_ctx, 56, 0);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ set_64bit_val(qp_ctx, 64, 1);
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+
+ print_hex_dump_debug("PUDA: PUDA QP CONTEXT", DUMP_PREFIX_OFFSET, 16,
+ 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/**
+ * irdma_puda_qp_wqe - setup wqe for qp create
+ * @dev: Device
+ * @qp: Resource qp
+ */
+static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_ccq_cqe_info compl_info;
+ int status = 0;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("PUDA: PUDA QP CREATE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, 40, false);
+ irdma_sc_cqp_post_sq(cqp);
+ status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
+ &compl_info);
+
+ return status;
+}
+
+/**
+ * irdma_puda_qp_create - create qp for resource
+ * @rsrc: resource to use for buffer
+ */
+static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ struct irdma_qp_uk *ukqp = &qp->qp_uk;
+ int ret = 0;
+ u32 sq_size, rq_size;
+ struct irdma_dma_mem *mem;
+
+ sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
+ rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
+ rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),
+ IRDMA_HW_PAGE_SIZE);
+ rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device,
+ rsrc->qpmem.size, &rsrc->qpmem.pa,
+ GFP_KERNEL);
+ if (!rsrc->qpmem.va)
+ return -ENOMEM;
+
+ mem = &rsrc->qpmem;
+ memset(mem->va, 0, rsrc->qpmem.size);
+ qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
+ qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
+ qp->pd = &rsrc->sc_pd;
+ qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
+ qp->dev = rsrc->dev;
+ qp->qp_uk.back_qp = rsrc;
+ qp->sq_pa = mem->pa;
+ qp->rq_pa = qp->sq_pa + sq_size;
+ qp->vsi = rsrc->vsi;
+ ukqp->sq_base = mem->va;
+ ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
+ ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
+ ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
+ qp->shadow_area_pa = qp->rq_pa + rq_size;
+ qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
+ qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+ ukqp->qp_id = rsrc->qp_id;
+ ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
+ ukqp->rq_wrid_array = rsrc->rq_wrid_array;
+ ukqp->sq_size = rsrc->sq_size;
+ ukqp->rq_size = rsrc->rq_size;
+
+ IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
+ IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
+ IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
+ ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
+
+ ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
+ if (ret) {
+ dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ return ret;
+ }
+
+ irdma_qp_add_qos(qp);
+ irdma_puda_qp_setctx(rsrc);
+
+ if (rsrc->dev->ceq_valid)
+ ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
+ else
+ ret = irdma_puda_qp_wqe(rsrc->dev, qp);
+ if (ret) {
+ irdma_qp_rem_qos(qp);
+ rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
+ dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_puda_cq_wqe - setup wqe for CQ create
+ * @dev: Device
+ * @cq: resource for cq
+ */
+static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ struct irdma_ccq_cqe_info compl_info;
+ int status = 0;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
+ set_64bit_val(wqe, 32, cq->cq_pa);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = cq->cq_uk.cq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(dev->cqp);
+ status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
+ &compl_info);
+ if (!status) {
+ struct irdma_sc_ceq *ceq = dev->ceq[0];
+
+ if (ceq && ceq->reg_cq)
+ status = irdma_sc_add_cq_ctx(ceq, cq);
+ }
+
+ return status;
+}
+
+/**
+ * irdma_puda_cq_create - create cq for resource
+ * @rsrc: resource for which cq to create
+ */
+static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_dev *dev = rsrc->dev;
+ struct irdma_sc_cq *cq = &rsrc->cq;
+ int ret = 0;
+ u32 cqsize;
+ struct irdma_dma_mem *mem;
+ struct irdma_cq_init_info info = {};
+ struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
+
+ cq->vsi = rsrc->vsi;
+ cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
+ rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),
+ IRDMA_CQ0_ALIGNMENT);
+ rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
+ &rsrc->cqmem.pa, GFP_KERNEL);
+ if (!rsrc->cqmem.va)
+ return -ENOMEM;
+
+ mem = &rsrc->cqmem;
+ info.dev = dev;
+ info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
+ IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
+ info.shadow_read_threshold = rsrc->cq_size >> 2;
+ info.cq_base_pa = mem->pa;
+ info.shadow_area_pa = mem->pa + cqsize;
+ init_info->cq_base = mem->va;
+ init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize);
+ init_info->cq_size = rsrc->cq_size;
+ init_info->cq_id = rsrc->cq_id;
+ info.ceqe_mask = true;
+ info.ceq_id_valid = true;
+ info.vsi = rsrc->vsi;
+
+ ret = irdma_sc_cq_init(cq, &info);
+ if (ret)
+ goto error;
+
+ if (rsrc->dev->ceq_valid)
+ ret = irdma_cqp_cq_create_cmd(dev, cq);
+ else
+ ret = irdma_puda_cq_wqe(dev, cq);
+error:
+ if (ret) {
+ dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
+ rsrc->cqmem.va, rsrc->cqmem.pa);
+ rsrc->cqmem.va = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_puda_free_qp - free qp for resource
+ * @rsrc: resource for which qp to free
+ */
+static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
+{
+ int ret;
+ struct irdma_ccq_cqe_info compl_info;
+ struct irdma_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->dev->ceq_valid) {
+ irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
+ rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
+ return;
+ }
+
+ ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error puda qp destroy wqe, status = %d\n",
+ ret);
+ if (!ret) {
+ ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
+ &compl_info);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error puda qp destroy failed, status = %d\n",
+ ret);
+ }
+ rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
+}
+
+/**
+ * irdma_puda_free_cq - free cq for resource
+ * @rsrc: resource for which cq to free
+ */
+static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
+{
+ int ret;
+ struct irdma_ccq_cqe_info compl_info;
+ struct irdma_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->dev->ceq_valid) {
+ irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
+ return;
+ }
+
+ ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n");
+ if (!ret) {
+ ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
+ &compl_info);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error ieq qp destroy done\n");
+ }
+}
+
+/**
+ * irdma_puda_dele_rsrc - delete all resources during close
+ * @vsi: VSI structure of device
+ * @type: type of resource to dele
+ * @reset: true if reset chip
+ */
+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
+ bool reset)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ struct irdma_puda_rsrc *rsrc;
+ struct irdma_puda_buf *buf = NULL;
+ struct irdma_puda_buf *nextbuf = NULL;
+ struct irdma_virt_mem *vmem;
+ struct irdma_sc_ceq *ceq;
+
+ ceq = vsi->dev->ceq[0];
+ switch (type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ rsrc = vsi->ilq;
+ vmem = &vsi->ilq_mem;
+ vsi->ilq = NULL;
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ rsrc = vsi->ieq;
+ vmem = &vsi->ieq_mem;
+ vsi->ieq = NULL;
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
+ break;
+ default:
+ ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",
+ type);
+ return;
+ }
+
+ switch (rsrc->cmpl) {
+ case PUDA_HASH_CRC_COMPLETE:
+ case PUDA_QP_CREATED:
+ irdma_qp_rem_qos(&rsrc->qp);
+
+ if (!reset)
+ irdma_puda_free_qp(rsrc);
+
+ dma_free_coherent(dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ fallthrough;
+ case PUDA_CQ_CREATED:
+ if (!reset)
+ irdma_puda_free_cq(rsrc);
+
+ dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
+ rsrc->cqmem.va, rsrc->cqmem.pa);
+ rsrc->cqmem.va = NULL;
+ break;
+ default:
+ ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n");
+ break;
+ }
+ /* Free all allocated puda buffers for both tx and rx */
+ buf = rsrc->alloclist;
+ while (buf) {
+ nextbuf = buf->next;
+ irdma_puda_dele_buf(dev, buf);
+ buf = nextbuf;
+ rsrc->alloc_buf_count--;
+ }
+
+ kfree(vmem->va);
+}
+
+/**
+ * irdma_puda_allocbufs - allocate buffers for resource
+ * @rsrc: resource for buffer allocation
+ * @count: number of buffers to create
+ */
+static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
+{
+ u32 i;
+ struct irdma_puda_buf *buf;
+ struct irdma_puda_buf *nextbuf;
+
+ for (i = 0; i < count; i++) {
+ buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
+ if (!buf) {
+ rsrc->stats_buf_alloc_fail++;
+ return -ENOMEM;
+ }
+ irdma_puda_ret_bufpool(rsrc, buf);
+ rsrc->alloc_buf_count++;
+ if (!rsrc->alloclist) {
+ rsrc->alloclist = buf;
+ } else {
+ nextbuf = rsrc->alloclist;
+ rsrc->alloclist = buf;
+ buf->next = nextbuf;
+ }
+ }
+
+ rsrc->avail_buf_count = rsrc->alloc_buf_count;
+
+ return 0;
+}
+
+/**
+ * irdma_puda_create_rsrc - create resource (ilq or ieq)
+ * @vsi: sc VSI struct
+ * @info: resource information
+ */
+int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ int ret = 0;
+ struct irdma_puda_rsrc *rsrc;
+ u32 pudasize;
+ u32 sqwridsize, rqwridsize;
+ struct irdma_virt_mem *vmem;
+
+ info->count = 1;
+ pudasize = sizeof(struct irdma_puda_rsrc);
+ sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
+ rqwridsize = info->rq_size * 8;
+ switch (info->type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ vmem = &vsi->ilq_mem;
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ vmem = &vsi->ieq_mem;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ vmem->size = pudasize + sqwridsize + rqwridsize;
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
+ if (!vmem->va)
+ return -ENOMEM;
+
+ rsrc = vmem->va;
+ spin_lock_init(&rsrc->bufpool_lock);
+ switch (info->type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ vsi->ilq = vmem->va;
+ vsi->ilq_count = info->count;
+ rsrc->receive = info->receive;
+ rsrc->xmit_complete = info->xmit_complete;
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ vsi->ieq_count = info->count;
+ vsi->ieq = vmem->va;
+ rsrc->receive = irdma_ieq_receive;
+ rsrc->xmit_complete = irdma_ieq_tx_compl;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rsrc->type = info->type;
+ rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
+ ((u8 *)vmem->va + pudasize);
+ rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
+ /* Initialize all ieq lists */
+ INIT_LIST_HEAD(&rsrc->bufpool);
+ INIT_LIST_HEAD(&rsrc->txpend);
+
+ rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
+ irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
+ rsrc->qp_id = info->qp_id;
+ rsrc->cq_id = info->cq_id;
+ rsrc->sq_size = info->sq_size;
+ rsrc->rq_size = info->rq_size;
+ rsrc->cq_size = info->rq_size + info->sq_size;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ rsrc->cq_size += info->rq_size;
+ }
+ rsrc->buf_size = info->buf_size;
+ rsrc->dev = dev;
+ rsrc->vsi = vsi;
+ rsrc->stats_idx = info->stats_idx;
+ rsrc->stats_idx_valid = info->stats_idx_valid;
+
+ ret = irdma_puda_cq_create(rsrc);
+ if (!ret) {
+ rsrc->cmpl = PUDA_CQ_CREATED;
+ ret = irdma_puda_qp_create(rsrc);
+ }
+ if (ret) {
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error qp_create type=%d, status=%d\n",
+ rsrc->type, ret);
+ goto error;
+ }
+ rsrc->cmpl = PUDA_QP_CREATED;
+
+ ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
+ if (ret) {
+ ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n");
+ goto error;
+ }
+
+ rsrc->rxq_invalid_cnt = info->rq_size;
+ ret = irdma_puda_replenish_rq(rsrc, true);
+ if (ret)
+ goto error;
+
+ if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
+ rsrc->check_crc = true;
+ rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
+ }
+
+ irdma_sc_ccq_arm(&rsrc->cq);
+ return 0;
+
+error:
+ irdma_puda_dele_rsrc(vsi, info->type, false);
+
+ return ret;
+}
+
+/**
+ * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq
+ * @qp: ilq's qp resource
+ * @buf: puda buffer for rcv q
+ * @wqe_idx: wqe index of completed rcvbuf
+ */
+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf, u32 wqe_idx)
+{
+ __le64 *wqe;
+ u64 offset8, offset24;
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_device(qp->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ get_64bit_val(wqe, 24, &offset24);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ get_64bit_val(wqe, 8, &offset8);
+ if (offset24)
+ offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
+ else
+ offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
+ set_64bit_val(wqe, 8, offset8);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+ }
+ if (offset24)
+ offset24 = 0;
+ else
+ offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
+
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker
+ * @pfpdu: pointer to fpdu
+ * @datap: pointer to data in the buffer
+ * @rcv_seq: seqnum of the data buffer
+ */
+static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
+ u32 rcv_seq)
+{
+ u32 marker_seq, end_seq, blk_start;
+ u8 marker_len = pfpdu->marker_len;
+ u16 total_len = 0;
+ u16 fpdu_len;
+
+ blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
+ if (!blk_start) {
+ total_len = marker_len;
+ marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
+ if (marker_len && *(u32 *)datap)
+ return 0;
+ } else {
+ marker_seq = rcv_seq + blk_start;
+ }
+
+ datap += total_len;
+ fpdu_len = ntohs(*(__be16 *)datap);
+ fpdu_len += IRDMA_IEQ_MPA_FRAMING;
+ fpdu_len = (fpdu_len + 3) & 0xfffc;
+
+ if (fpdu_len > pfpdu->max_fpdu_data)
+ return 0;
+
+ total_len += fpdu_len;
+ end_seq = rcv_seq + total_len;
+ while ((int)(marker_seq - end_seq) < 0) {
+ total_len += marker_len;
+ end_seq += marker_len;
+ marker_seq += IRDMA_MRK_BLK_SZ;
+ }
+
+ return total_len;
+}
+
+/**
+ * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
+ * @buf: rcv buffer with partial
+ * @txbuf: tx buffer for sending back
+ * @buf_offset: rcv buffer offset to copy from
+ * @txbuf_offset: at offset in tx buf to copy
+ * @len: length of data to copy
+ */
+static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
+ struct irdma_puda_buf *txbuf,
+ u16 buf_offset, u32 txbuf_offset, u32 len)
+{
+ void *mem1 = (u8 *)buf->mem.va + buf_offset;
+ void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
+
+ memcpy(mem2, mem1, len);
+}
+
+/**
+ * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling
+ * @buf: reeive buffer with partial
+ * @txbuf: buffer to prepare
+ */
+static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
+ struct irdma_puda_buf *txbuf)
+{
+ txbuf->tcphlen = buf->tcphlen;
+ txbuf->ipv4 = buf->ipv4;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ txbuf->hdrlen = txbuf->tcphlen;
+ irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
+ txbuf->hdrlen);
+ } else {
+ txbuf->maclen = buf->maclen;
+ txbuf->hdrlen = buf->hdrlen;
+ irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
+ }
+}
+
+/**
+ * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range
+ * @buf: receive exception buffer
+ * @fps: first partial sequence number
+ */
+static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
+{
+ u32 offset;
+
+ if (buf->seqnum < fps) {
+ offset = fps - buf->seqnum;
+ if (offset > buf->datalen)
+ return;
+ buf->data += offset;
+ buf->datalen -= (u16)offset;
+ buf->seqnum = fps;
+ }
+}
+
+/**
+ * irdma_ieq_compl_pfpdu - write txbuf with full fpdu
+ * @ieq: ieq resource
+ * @rxlist: ieq's received buffer list
+ * @pbufl: temporary list for buffers for fpddu
+ * @txbuf: tx buffer for fpdu
+ * @fpdu_len: total length of fpdu
+ */
+static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct irdma_puda_buf *txbuf, u16 fpdu_len)
+{
+ struct irdma_puda_buf *buf;
+ u32 nextseqnum;
+ u16 txoffset, bufoffset;
+
+ buf = irdma_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
+
+ nextseqnum = buf->seqnum + fpdu_len;
+ irdma_ieq_setup_tx_buf(buf, txbuf);
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ txoffset = txbuf->hdrlen;
+ txbuf->totallen = txbuf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + txoffset;
+ } else {
+ txoffset = buf->hdrlen;
+ txbuf->totallen = buf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
+ }
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+
+ do {
+ if (buf->datalen >= fpdu_len) {
+ /* copied full fpdu */
+ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
+ fpdu_len);
+ buf->datalen -= fpdu_len;
+ buf->data += fpdu_len;
+ buf->seqnum = nextseqnum;
+ break;
+ }
+ /* copy partial fpdu */
+ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
+ buf->datalen);
+ txoffset += buf->datalen;
+ fpdu_len -= buf->datalen;
+ irdma_puda_ret_bufpool(ieq, buf);
+ buf = irdma_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
+
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ } while (1);
+
+ /* last buffer on the list*/
+ if (buf->datalen)
+ list_add(&buf->list, rxlist);
+ else
+ irdma_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * irdma_ieq_create_pbufl - create buffer list for single fpdu
+ * @pfpdu: pointer to fpdu
+ * @rxlist: resource list for receive ieq buffes
+ * @pbufl: temp. list for buffers for fpddu
+ * @buf: first receive buffer
+ * @fpdu_len: total length of fpdu
+ */
+static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct irdma_puda_buf *buf, u16 fpdu_len)
+{
+ int status = 0;
+ struct irdma_puda_buf *nextbuf;
+ u32 nextseqnum;
+ u16 plen = fpdu_len - buf->datalen;
+ bool done = false;
+
+ nextseqnum = buf->seqnum + buf->datalen;
+ do {
+ nextbuf = irdma_puda_get_listbuf(rxlist);
+ if (!nextbuf) {
+ status = -ENOBUFS;
+ break;
+ }
+ list_add_tail(&nextbuf->list, pbufl);
+ if (nextbuf->seqnum != nextseqnum) {
+ pfpdu->bad_seq_num++;
+ status = -ERANGE;
+ break;
+ }
+ if (nextbuf->datalen >= plen) {
+ done = true;
+ } else {
+ plen -= nextbuf->datalen;
+ nextseqnum = nextbuf->seqnum + nextbuf->datalen;
+ }
+
+ } while (!done);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_handle_partial - process partial fpdu buffer
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ * @fpdu_len: fpdu len in the buffer
+ */
+static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
+ struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf, u16 fpdu_len)
+{
+ int status = 0;
+ u8 *crcptr;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ struct list_head pbufl; /* partial buffer list */
+ struct irdma_puda_buf *txbuf = NULL;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ ieq->partials_handled++;
+
+ INIT_LIST_HEAD(&pbufl);
+ list_add(&buf->list, &pbufl);
+
+ status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
+ if (status)
+ goto error;
+
+ txbuf = irdma_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ status = -ENOBUFS;
+ goto error;
+ }
+
+ irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
+ irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
+
+ crcptr = txbuf->data + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc) {
+ status = irdma_ieq_check_mpacrc(txbuf->data, fpdu_len - 4,
+ mpacrc);
+ if (status) {
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
+ goto error;
+ }
+ }
+
+ print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
+ txbuf->mem.va, txbuf->totallen, false);
+ if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
+ txbuf->do_lpb = true;
+ irdma_puda_send_buf(ieq, txbuf);
+ pfpdu->rcv_nxt = seqnum + fpdu_len;
+ return status;
+
+error:
+ while (!list_empty(&pbufl)) {
+ buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
+ list_move(&buf->list, rxlist);
+ }
+ if (txbuf)
+ irdma_puda_ret_bufpool(ieq, txbuf);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_process_buf - process buffer rcvd for ieq
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ */
+static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
+ struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf)
+{
+ u16 fpdu_len = 0;
+ u16 datalen = buf->datalen;
+ u8 *datap = buf->data;
+ u8 *crcptr;
+ u16 ioffset = 0;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ u16 len = 0;
+ u16 full = 0;
+ bool partial = false;
+ struct irdma_puda_buf *txbuf;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ int ret = 0;
+
+ ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ while (datalen) {
+ fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
+ if (!fpdu_len) {
+ ibdev_dbg(to_ibdev(ieq->dev),
+ "IEQ: error bad fpdu len\n");
+ list_add(&buf->list, rxlist);
+ return -EINVAL;
+ }
+
+ if (datalen < fpdu_len) {
+ partial = true;
+ break;
+ }
+ crcptr = datap + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc)
+ ret = irdma_ieq_check_mpacrc(datap, fpdu_len - 4,
+ mpacrc);
+ if (ret) {
+ list_add(&buf->list, rxlist);
+ ibdev_dbg(to_ibdev(ieq->dev),
+ "ERR: IRDMA_ERR_MPA_CRC\n");
+ return -EINVAL;
+ }
+ full++;
+ pfpdu->fpdu_processed++;
+ ieq->fpdu_processed++;
+ datap += fpdu_len;
+ len += fpdu_len;
+ datalen -= fpdu_len;
+ }
+ if (full) {
+ /* copy full pdu's in the txbuf and send them out */
+ txbuf = irdma_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ list_add(&buf->list, rxlist);
+ return -ENOBUFS;
+ }
+ /* modify txbuf's buffer header */
+ irdma_ieq_setup_tx_buf(buf, txbuf);
+ /* copy full fpdu's to new buffer */
+ if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
+ txbuf->hdrlen, len);
+ txbuf->totallen = txbuf->hdrlen + len;
+ txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
+ } else {
+ irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
+ buf->hdrlen, len);
+ txbuf->totallen = buf->hdrlen + len;
+ }
+ irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
+ print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, txbuf->mem.va, txbuf->totallen,
+ false);
+ txbuf->do_lpb = true;
+ irdma_puda_send_buf(ieq, txbuf);
+
+ if (!datalen) {
+ pfpdu->rcv_nxt = buf->seqnum + len;
+ irdma_puda_ret_bufpool(ieq, buf);
+ return 0;
+ }
+ buf->data = datap;
+ buf->seqnum = seqnum + len;
+ buf->datalen = datalen;
+ pfpdu->rcv_nxt = buf->seqnum;
+ }
+ if (partial)
+ return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
+
+ return 0;
+}
+
+/**
+ * irdma_ieq_process_fpdus - process fpdu's buffers on its list
+ * @qp: qp for which partial fpdus
+ * @ieq: ieq resource
+ */
+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
+ struct irdma_puda_rsrc *ieq)
+{
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct irdma_puda_buf *buf;
+ int status;
+
+ do {
+ if (list_empty(rxlist))
+ break;
+ buf = irdma_puda_get_listbuf(rxlist);
+ if (!buf) {
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n");
+ break;
+ }
+ if (buf->seqnum != pfpdu->rcv_nxt) {
+ /* This could be out of order or missing packet */
+ pfpdu->out_of_order++;
+ list_add(&buf->list, rxlist);
+ break;
+ }
+ /* keep processing buffers from the head of the list */
+ status = irdma_ieq_process_buf(ieq, pfpdu, buf);
+ if (status == -EINVAL) {
+ pfpdu->mpa_crc_err = true;
+ while (!list_empty(rxlist)) {
+ buf = irdma_puda_get_listbuf(rxlist);
+ irdma_puda_ret_bufpool(ieq, buf);
+ pfpdu->crc_err++;
+ ieq->crc_err++;
+ }
+ /* create CQP for AE */
+ irdma_ieq_mpa_crc_ae(ieq->dev, qp);
+ }
+ } while (!status);
+}
+
+/**
+ * irdma_ieq_create_ah - create an address handle for IEQ
+ * @qp: qp pointer
+ * @buf: buf received on IEQ used to create AH
+ */
+static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf)
+{
+ struct irdma_ah_info ah_info = {};
+
+ qp->pfpdu.ah_buf = buf;
+ irdma_puda_ieq_get_ah_info(qp, &ah_info);
+ return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
+ IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
+ &qp->pfpdu.ah);
+}
+
+/**
+ * irdma_ieq_handle_exception - handle qp's exception
+ * @ieq: ieq resource
+ * @qp: qp receiving excpetion
+ * @buf: receive buffer
+ */
+static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
+ struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
+ u32 rcv_wnd = hw_host_ctx[23];
+ /* first partial seq # in q2 */
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
+ struct list_head *rxlist = &pfpdu->rxlist;
+ unsigned long flags = 0;
+ u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
+
+ print_hex_dump_debug("IEQ: IEQ RX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
+ buf->mem.va, buf->totallen, false);
+
+ spin_lock_irqsave(&pfpdu->lock, flags);
+ pfpdu->total_ieq_bufs++;
+ if (pfpdu->mpa_crc_err) {
+ pfpdu->crc_err++;
+ goto error;
+ }
+ if (pfpdu->mode && fps != pfpdu->fps) {
+ /* clean up qp as it is new partial sequence */
+ irdma_ieq_cleanup_qp(ieq, qp);
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n");
+ pfpdu->mode = false;
+ }
+
+ if (!pfpdu->mode) {
+ print_hex_dump_debug("IEQ: Q2 BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, (u64 *)qp->q2_buf, 128, false);
+ /* First_Partial_Sequence_Number check */
+ pfpdu->rcv_nxt = fps;
+ pfpdu->fps = fps;
+ pfpdu->mode = true;
+ pfpdu->max_fpdu_data = (buf->ipv4) ?
+ (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
+ (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
+ pfpdu->pmode_count++;
+ ieq->pmode_count++;
+ INIT_LIST_HEAD(rxlist);
+ irdma_ieq_check_first_buf(buf, fps);
+ }
+
+ if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
+ pfpdu->bad_seq_num++;
+ ieq->bad_seq_num++;
+ goto error;
+ }
+
+ if (!list_empty(rxlist)) {
+ if (buf->seqnum != pfpdu->nextseqnum) {
+ irdma_send_ieq_ack(qp);
+ /* throw away out-of-order, duplicates*/
+ goto error;
+ }
+ }
+ /* Insert buf before head */
+ list_add_tail(&buf->list, rxlist);
+ pfpdu->nextseqnum = buf->seqnum + buf->datalen;
+ pfpdu->lastrcv_buf = buf;
+ if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
+ irdma_ieq_create_ah(qp, buf);
+ if (!pfpdu->ah)
+ goto error;
+ goto exit;
+ }
+ if (hw_rev == IRDMA_GEN_1)
+ irdma_ieq_process_fpdus(qp, ieq);
+ else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
+ irdma_ieq_process_fpdus(qp, ieq);
+exit:
+ spin_unlock_irqrestore(&pfpdu->lock, flags);
+
+ return;
+
+error:
+ irdma_puda_ret_bufpool(ieq, buf);
+ spin_unlock_irqrestore(&pfpdu->lock, flags);
+}
+
+/**
+ * irdma_ieq_receive - received exception buffer
+ * @vsi: VSI of device
+ * @buf: exception buffer received
+ */
+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_puda_rsrc *ieq = vsi->ieq;
+ struct irdma_sc_qp *qp = NULL;
+ u32 wqe_idx = ieq->compl_rxwqe_idx;
+
+ qp = irdma_ieq_get_qp(vsi->dev, buf);
+ if (!qp) {
+ ieq->stats_bad_qp_id++;
+ irdma_puda_ret_bufpool(ieq, buf);
+ } else {
+ irdma_ieq_handle_exception(ieq, qp, buf);
+ }
+ /*
+ * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq()
+ * on which wqe_idx to start replenish rq
+ */
+ if (!ieq->rxq_invalid_cnt)
+ ieq->rx_wqe_idx = wqe_idx;
+ ieq->rxq_invalid_cnt++;
+}
+
+/**
+ * irdma_ieq_tx_compl - put back after sending completed exception buffer
+ * @vsi: sc VSI struct
+ * @sqwrid: pointer to puda buffer
+ */
+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
+{
+ struct irdma_puda_rsrc *ieq = vsi->ieq;
+ struct irdma_puda_buf *buf = sqwrid;
+
+ irdma_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * irdma_ieq_cleanup_qp - qp is being destroyed
+ * @ieq: ieq resource
+ * @qp: all pending fpdu buffers
+ */
+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
+{
+ struct irdma_puda_buf *buf;
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ if (qp->pfpdu.ah) {
+ irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
+ qp->pfpdu.ah = NULL;
+ qp->pfpdu.ah_buf = NULL;
+ }
+
+ if (!pfpdu->mode)
+ return;
+
+ while (!list_empty(rxlist)) {
+ buf = irdma_puda_get_listbuf(rxlist);
+ irdma_puda_ret_bufpool(ieq, buf);
+ }
+}
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
new file mode 100644
index 000000000000..d65041bee667
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_PUDA_H
+#define IRDMA_PUDA_H
+
+#define IRDMA_IEQ_MPA_FRAMING 6
+#define IRDMA_TCP_OFFSET 40
+#define IRDMA_IPV4_PAD 20
+#define IRDMA_MRK_BLK_SZ 512
+
+enum puda_rsrc_type {
+ IRDMA_PUDA_RSRC_TYPE_ILQ = 1,
+ IRDMA_PUDA_RSRC_TYPE_IEQ,
+ IRDMA_PUDA_RSRC_TYPE_MAX, /* Must be last entry */
+};
+
+enum puda_rsrc_complete {
+ PUDA_CQ_CREATED = 1,
+ PUDA_QP_CREATED,
+ PUDA_TX_COMPLETE,
+ PUDA_RX_COMPLETE,
+ PUDA_HASH_CRC_COMPLETE,
+};
+
+struct irdma_sc_dev;
+struct irdma_sc_qp;
+struct irdma_sc_cq;
+
+struct irdma_puda_cmpl_info {
+ struct irdma_qp_uk *qp;
+ u8 q_type;
+ u8 l3proto;
+ u8 l4proto;
+ u16 vlan;
+ u32 payload_len;
+ u32 compl_error; /* No_err=0, else major and minor err code */
+ u32 qp_id;
+ u32 wqe_idx;
+ bool ipv4:1;
+ bool smac_valid:1;
+ bool vlan_valid:1;
+ u8 smac[ETH_ALEN];
+};
+
+struct irdma_puda_send_info {
+ u64 paddr; /* Physical address */
+ u32 len;
+ u32 ah_id;
+ u8 tcplen;
+ u8 maclen;
+ bool ipv4:1;
+ bool do_lpb:1;
+ void *scratch;
+};
+
+struct irdma_puda_buf {
+ struct list_head list; /* MUST be first entry */
+ struct irdma_dma_mem mem; /* DMA memory for the buffer */
+ struct irdma_puda_buf *next; /* for alloclist in rsrc struct */
+ struct irdma_virt_mem buf_mem; /* Buffer memory for this buffer */
+ void *scratch;
+ u8 *iph;
+ u8 *tcph;
+ u8 *data;
+ u16 datalen;
+ u16 vlan_id;
+ u8 tcphlen; /* tcp length in bytes */
+ u8 maclen; /* mac length in bytes */
+ u32 totallen; /* machlen+iphlen+tcphlen+datalen */
+ refcount_t refcount;
+ u8 hdrlen;
+ bool ipv4:1;
+ bool vlan_valid:1;
+ bool do_lpb:1; /* Loopback buffer */
+ bool smac_valid:1;
+ u32 seqnum;
+ u32 ah_id;
+ u8 smac[ETH_ALEN];
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_puda_rsrc_info {
+ void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
+ void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
+ enum puda_rsrc_type type; /* ILQ or IEQ */
+ u32 count;
+ u32 pd_id;
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
+ u16 buf_size;
+ u16 stats_idx;
+ bool stats_idx_valid:1;
+ int abi_ver;
+};
+
+struct irdma_puda_rsrc {
+ struct irdma_sc_cq cq;
+ struct irdma_sc_qp qp;
+ struct irdma_sc_pd sc_pd;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_dma_mem cqmem;
+ struct irdma_dma_mem qpmem;
+ struct irdma_virt_mem ilq_mem;
+ enum puda_rsrc_complete cmpl;
+ enum puda_rsrc_type type;
+ u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 cq_size;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 compl_rxwqe_idx;
+ u32 rx_wqe_idx;
+ u32 rxq_invalid_cnt;
+ u32 tx_wqe_avail_cnt;
+ struct list_head txpend;
+ struct list_head bufpool; /* free buffers pool list for recv and xmit */
+ u32 alloc_buf_count;
+ u32 avail_buf_count; /* snapshot of currently available buffers */
+ spinlock_t bufpool_lock;
+ struct irdma_puda_buf *alloclist;
+ void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
+ void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
+ /* puda stats */
+ u64 stats_buf_alloc_fail;
+ u64 stats_pkt_rcvd;
+ u64 stats_pkt_sent;
+ u64 stats_rcvd_pkt_err;
+ u64 stats_sent_pkt_q;
+ u64 stats_bad_qp_id;
+ /* IEQ stats */
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 pmode_count;
+ u64 partials_handled;
+ u16 stats_idx;
+ bool check_crc:1;
+ bool stats_idx_valid:1;
+};
+
+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);
+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf);
+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf);
+int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info);
+int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info);
+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
+ bool reset);
+int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
+ u32 *compl_err);
+
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf);
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val);
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum);
+int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
+int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
+void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
+ struct irdma_ah_info *ah_info);
+int irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info, bool wait,
+ enum puda_rsrc_type type, void *cb_param,
+ struct irdma_sc_ah **ah);
+void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
+ struct irdma_puda_rsrc *ieq);
+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp);
+#endif /*IRDMA_PROTOS_H */
diff --git a/drivers/infiniband/hw/irdma/trace.c b/drivers/infiniband/hw/irdma/trace.c
new file mode 100644
index 000000000000..fc2f56697741
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Intel Corporation */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ipv4)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (ipv4) {
+ __be32 myaddr = htonl(*addr);
+
+ trace_seq_printf(p, "%pI4:%d", &myaddr, htons(port));
+ } else {
+ trace_seq_printf(p, "%pI6:%d", addr, htons(port));
+ }
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *parse_iw_event_type(enum iw_cm_event_type iw_type)
+{
+ switch (iw_type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ return "IwRequest";
+ case IW_CM_EVENT_CONNECT_REPLY:
+ return "IwReply";
+ case IW_CM_EVENT_ESTABLISHED:
+ return "IwEstablished";
+ case IW_CM_EVENT_DISCONNECT:
+ return "IwDisconnect";
+ case IW_CM_EVENT_CLOSE:
+ return "IwClose";
+ }
+
+ return "Unknown";
+}
+
+const char *parse_cm_event_type(enum irdma_cm_event_type cm_type)
+{
+ switch (cm_type) {
+ case IRDMA_CM_EVENT_ESTABLISHED:
+ return "CmEstablished";
+ case IRDMA_CM_EVENT_MPA_REQ:
+ return "CmMPA_REQ";
+ case IRDMA_CM_EVENT_MPA_CONNECT:
+ return "CmMPA_CONNECT";
+ case IRDMA_CM_EVENT_MPA_ACCEPT:
+ return "CmMPA_ACCEPT";
+ case IRDMA_CM_EVENT_MPA_REJECT:
+ return "CmMPA_REJECT";
+ case IRDMA_CM_EVENT_MPA_ESTABLISHED:
+ return "CmMPA_ESTABLISHED";
+ case IRDMA_CM_EVENT_CONNECTED:
+ return "CmConnected";
+ case IRDMA_CM_EVENT_RESET:
+ return "CmReset";
+ case IRDMA_CM_EVENT_ABORTED:
+ return "CmAborted";
+ case IRDMA_CM_EVENT_UNKNOWN:
+ return "none";
+ }
+ return "Unknown";
+}
+
+const char *parse_cm_state(enum irdma_cm_node_state state)
+{
+ switch (state) {
+ case IRDMA_CM_STATE_UNKNOWN:
+ return "UNKNOWN";
+ case IRDMA_CM_STATE_INITED:
+ return "INITED";
+ case IRDMA_CM_STATE_LISTENING:
+ return "LISTENING";
+ case IRDMA_CM_STATE_SYN_RCVD:
+ return "SYN_RCVD";
+ case IRDMA_CM_STATE_SYN_SENT:
+ return "SYN_SENT";
+ case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
+ return "ONE_SIDE_ESTABLISHED";
+ case IRDMA_CM_STATE_ESTABLISHED:
+ return "ESTABLISHED";
+ case IRDMA_CM_STATE_ACCEPTING:
+ return "ACCEPTING";
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ return "MPAREQ_SENT";
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ return "MPAREQ_RCVD";
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ return "MPAREJ_RECVD";
+ case IRDMA_CM_STATE_OFFLOADED:
+ return "OFFLOADED";
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ return "FIN_WAIT1";
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ return "FIN_WAIT2";
+ case IRDMA_CM_STATE_CLOSE_WAIT:
+ return "CLOSE_WAIT";
+ case IRDMA_CM_STATE_TIME_WAIT:
+ return "TIME_WAIT";
+ case IRDMA_CM_STATE_LAST_ACK:
+ return "LAST_ACK";
+ case IRDMA_CM_STATE_CLOSING:
+ return "CLOSING";
+ case IRDMA_CM_STATE_LISTENER_DESTROYED:
+ return "LISTENER_DESTROYED";
+ case IRDMA_CM_STATE_CLOSED:
+ return "CLOSED";
+ }
+ return ("Bad state");
+}
diff --git a/drivers/infiniband/hw/irdma/trace.h b/drivers/infiniband/hw/irdma/trace.h
new file mode 100644
index 000000000000..b8085a66b9f8
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Intel Corporation */
+#include "trace_cm.h"
diff --git a/drivers/infiniband/hw/irdma/trace_cm.h b/drivers/infiniband/hw/irdma/trace_cm.h
new file mode 100644
index 000000000000..0d1699b55241
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace_cm.h
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 - 2021 Intel Corporation */
+#if !defined(__TRACE_CM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_CM_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "main.h"
+
+const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ivp4);
+const char *parse_iw_event_type(enum iw_cm_event_type iw_type);
+const char *parse_cm_event_type(enum irdma_cm_event_type cm_type);
+const char *parse_cm_state(enum irdma_cm_node_state);
+#define __print_ip_addr(addr, port, ipv4) print_ip_addr(p, addr, port, ipv4)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irdma_cm
+
+TRACE_EVENT(irdma_create_listen,
+ TP_PROTO(struct irdma_device *iwdev, struct irdma_cm_info *cm_info),
+ TP_ARGS(iwdev, cm_info),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __dynamic_array(u32, laddr, 4)
+ __field(u16, lport)
+ __field(bool, ipv4)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->lport = cm_info->loc_port;
+ __entry->ipv4 = cm_info->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ cm_info->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p loc: %s",
+ __entry->iwdev,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+TRACE_EVENT(irdma_dec_refcnt_listen,
+ TP_PROTO(struct irdma_cm_listener *listener, void *caller),
+ TP_ARGS(listener, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u32, refcnt)
+ __dynamic_array(u32, laddr, 4)
+ __field(u16, lport)
+ __field(bool, ipv4)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->iwdev = listener->iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->ipv4 = listener->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS loc: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DECLARE_EVENT_CLASS(listener_template,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u16, lport)
+ __field(u16, vlan_id)
+ __field(bool, ipv4)
+ __field(enum irdma_cm_listener_state,
+ state)
+ __dynamic_array(u32, laddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = listener->iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->vlan_id = listener->vlan_id;
+ __entry->ipv4 = listener->ipv4;
+ __entry->state = listener->listener_state;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p vlan=%d loc: %s",
+ __entry->iwdev,
+ __entry->vlan_id,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(listener_template, irdma_find_listener,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener));
+
+DEFINE_EVENT(listener_template, irdma_del_multiple_qhash,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener));
+
+TRACE_EVENT(irdma_negotiate_mpa_v2,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node),
+ TP_STRUCT__entry(__field(struct irdma_cm_node *, cm_node)
+ __field(u16, ord_size)
+ __field(u16, ird_size)
+ ),
+ TP_fast_assign(__entry->cm_node = cm_node;
+ __entry->ord_size = cm_node->ord_size;
+ __entry->ird_size = cm_node->ird_size;
+ ),
+ TP_printk("MPVA2 Negotiated cm_node=%p ORD:[%d], IRD:[%d]",
+ __entry->cm_node,
+ __entry->ord_size,
+ __entry->ird_size
+ )
+);
+
+DECLARE_EVENT_CLASS(tos_template,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u8, tos)
+ __field(u8, user_pri)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->tos = tos;
+ __entry->user_pri = user_pri;
+ ),
+ TP_printk("iwdev=%p TOS:[%d] UP:[%d]",
+ __entry->iwdev,
+ __entry->tos,
+ __entry->user_pri
+ )
+);
+
+DEFINE_EVENT(tos_template, irdma_listener_tos,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri));
+
+DEFINE_EVENT(tos_template, irdma_dcb_tos,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri));
+
+DECLARE_EVENT_CLASS(qhash_template,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u16, lport)
+ __field(u16, vlan_id)
+ __field(bool, ipv4)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, mac, ETH_ALEN)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->vlan_id = listener->vlan_id;
+ __entry->ipv4 = listener->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ether_addr_copy(__get_dynamic_array(mac),
+ dev_addr);
+ ),
+ TP_printk("iwdev=%p vlan=%d MAC=%6phC loc: %s",
+ __entry->iwdev,
+ __entry->vlan_id,
+ __get_dynamic_array(mac),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(qhash_template, irdma_add_mqh_6,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr));
+
+DEFINE_EVENT(qhash_template, irdma_add_mqh_4,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr));
+
+TRACE_EVENT(irdma_addr_resolve,
+ TP_PROTO(struct irdma_device *iwdev, char *dev_addr),
+ TP_ARGS(iwdev, dev_addr),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __dynamic_array(u8, mac, ETH_ALEN)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ ether_addr_copy(__get_dynamic_array(mac), dev_addr);
+ ),
+ TP_printk("iwdev=%p MAC=%6phC", __entry->iwdev,
+ __get_dynamic_array(mac)
+ )
+);
+
+TRACE_EVENT(irdma_send_cm_event,
+ TP_PROTO(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type, int status, void *caller),
+ TP_ARGS(cm_node, cm_id, type, status, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(struct iw_cm_id *, cm_id)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __field(enum iw_cm_event_type, type)
+ __field(int, status)
+ __field(void *, caller)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->cm_id = cm_id;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->state = cm_node->state;
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ __entry->type = type;
+ __entry->status = status;
+ __entry->caller = caller;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS cm_id=%p node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s status=%d loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_id,
+ __entry->cm_node,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ parse_iw_event_type(__entry->type),
+ __entry->status,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+TRACE_EVENT(irdma_send_cm_event_no_node,
+ TP_PROTO(struct iw_cm_id *cm_id, enum iw_cm_event_type type,
+ int status, void *caller),
+ TP_ARGS(cm_id, type, status, caller),
+ TP_STRUCT__entry(__field(struct iw_cm_id *, cm_id)
+ __field(enum iw_cm_event_type, type)
+ __field(int, status)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->cm_id = cm_id;
+ __entry->type = type;
+ __entry->status = status;
+ __entry->caller = caller;
+ ),
+ TP_printk("cm_id=%p caller=%pS event_type=%s status=%d",
+ __entry->cm_id,
+ __entry->caller,
+ parse_iw_event_type(__entry->type),
+ __entry->status
+ )
+);
+
+DECLARE_EVENT_CLASS(cm_node_template,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __field(enum irdma_cm_event_type, type)
+ __field(void *, caller)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->state = cm_node->state;
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ __entry->type = type;
+ __entry->caller = caller;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_node,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ parse_cm_event_type(__entry->type),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(cm_node_template, irdma_create_event,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_accept,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_connect,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_reject,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_find_node,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_send_reset,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_rem_ref_cm_node,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_cm_event_handler,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+TRACE_EVENT(open_err_template,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, reset)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->state = cm_node->state;
+ __entry->reset = reset;
+ __entry->caller = caller;
+ ),
+ TP_printk("iwdev=%p caller=%pS node%p reset=%d state=%s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_node,
+ __entry->reset,
+ parse_cm_state(__entry->state)
+ )
+);
+
+DEFINE_EVENT(open_err_template, irdma_active_open_err,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller));
+
+DEFINE_EVENT(open_err_template, irdma_passive_open_err,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller));
+
+DECLARE_EVENT_CLASS(cm_node_ah_template,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(struct irdma_sc_ah *, ah)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->ah = cm_node->ah;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->state = cm_node->state;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p node=%p ah=%p refcnt=%d vlan_id=%d accel=%d state=%s loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->cm_node,
+ __entry->ah,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(cm_node_ah_template, irdma_cm_free_ah,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node));
+
+DEFINE_EVENT(cm_node_ah_template, irdma_create_ah,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node));
+
+#endif /* __TRACE_CM_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_cm
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
new file mode 100644
index 000000000000..4ae77cdde9dc
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -0,0 +1,1679 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_TYPE_H
+#define IRDMA_TYPE_H
+#include "osdep.h"
+#include "irdma.h"
+#include "user.h"
+#include "hmc.h"
+#include "uda.h"
+#include "ws.h"
+#include "virtchnl.h"
+
+#define IRDMA_DEBUG_ERR "ERR"
+#define IRDMA_DEBUG_INIT "INIT"
+#define IRDMA_DEBUG_DEV "DEV"
+#define IRDMA_DEBUG_CM "CM"
+#define IRDMA_DEBUG_VERBS "VERBS"
+#define IRDMA_DEBUG_PUDA "PUDA"
+#define IRDMA_DEBUG_ILQ "ILQ"
+#define IRDMA_DEBUG_IEQ "IEQ"
+#define IRDMA_DEBUG_QP "QP"
+#define IRDMA_DEBUG_CQ "CQ"
+#define IRDMA_DEBUG_MR "MR"
+#define IRDMA_DEBUG_PBLE "PBLE"
+#define IRDMA_DEBUG_WQE "WQE"
+#define IRDMA_DEBUG_AEQ "AEQ"
+#define IRDMA_DEBUG_CQP "CQP"
+#define IRDMA_DEBUG_HMC "HMC"
+#define IRDMA_DEBUG_USER "USER"
+#define IRDMA_DEBUG_VIRT "VIRT"
+#define IRDMA_DEBUG_DCB "DCB"
+#define IRDMA_DEBUG_CQE "CQE"
+#define IRDMA_DEBUG_CLNT "CLNT"
+#define IRDMA_DEBUG_WS "WS"
+#define IRDMA_DEBUG_STATS "STATS"
+
+enum irdma_page_size {
+ IRDMA_PAGE_SIZE_4K = 0,
+ IRDMA_PAGE_SIZE_2M,
+ IRDMA_PAGE_SIZE_1G,
+};
+
+enum irdma_hdrct_flags {
+ DDP_LEN_FLAG = 0x80,
+ DDP_HDR_FLAG = 0x40,
+ RDMA_HDR_FLAG = 0x20,
+};
+
+enum irdma_term_layers {
+ LAYER_RDMA = 0,
+ LAYER_DDP = 1,
+ LAYER_MPA = 2,
+};
+
+enum irdma_term_error_types {
+ RDMAP_REMOTE_PROT = 1,
+ RDMAP_REMOTE_OP = 2,
+ DDP_CATASTROPHIC = 0,
+ DDP_TAGGED_BUF = 1,
+ DDP_UNTAGGED_BUF = 2,
+ DDP_LLP = 3,
+};
+
+enum irdma_term_rdma_errors {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_INV_BOUNDS = 0x01,
+ RDMAP_ACCESS = 0x02,
+ RDMAP_UNASSOC_STAG = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_RDMAP_VER = 0x05,
+ RDMAP_UNEXPECTED_OP = 0x06,
+ RDMAP_CATASTROPHIC_LOCAL = 0x07,
+ RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff,
+};
+
+enum irdma_term_ddp_errors {
+ DDP_CATASTROPHIC_LOCAL = 0x00,
+ DDP_TAGGED_INV_STAG = 0x00,
+ DDP_TAGGED_BOUNDS = 0x01,
+ DDP_TAGGED_UNASSOC_STAG = 0x02,
+ DDP_TAGGED_TO_WRAP = 0x03,
+ DDP_TAGGED_INV_DDP_VER = 0x04,
+ DDP_UNTAGGED_INV_QN = 0x01,
+ DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+ DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+ DDP_UNTAGGED_INV_MO = 0x04,
+ DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+ DDP_UNTAGGED_INV_DDP_VER = 0x06,
+};
+
+enum irdma_term_mpa_errors {
+ MPA_CLOSED = 0x01,
+ MPA_CRC = 0x02,
+ MPA_MARKER = 0x03,
+ MPA_REQ_RSP = 0x04,
+};
+
+enum irdma_hw_stats_index {
+ /* gen1 - 32-bit */
+ IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
+ IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
+ IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
+ IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
+ IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
+ IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
+ IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
+ IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
+ IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
+ IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
+ /* gen1 - 64-bit */
+ IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10,
+ IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11,
+ IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12,
+ IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13,
+ IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14,
+ IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15,
+ IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16,
+ IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17,
+ IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18,
+ IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19,
+ IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20,
+ IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21,
+ IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22,
+ IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23,
+ IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24,
+ IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25,
+ IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26,
+ IRDMA_HW_STAT_INDEX_TCPTXSEG = 27,
+ IRDMA_HW_STAT_INDEX_RDMARXRDS = 28,
+ IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29,
+ IRDMA_HW_STAT_INDEX_RDMARXWRS = 30,
+ IRDMA_HW_STAT_INDEX_RDMATXRDS = 31,
+ IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32,
+ IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
+ IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
+ IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
+ IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
+ IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
+ IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
+ IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
+ IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
+ IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
+ IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
+ /* gen2 - 64-bit */
+ IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
+ /* gen2 - 32-bit */
+ IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
+ IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
+ IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
+ IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
+
+ /* gen3 */
+ IRDMA_HW_STAT_INDEX_RNR_SENT = 46,
+ IRDMA_HW_STAT_INDEX_RNR_RCVD = 47,
+ IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT = 48,
+ IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT = 49,
+ IRDMA_HW_STAT_INDEX_RDMARXATS = 50,
+ IRDMA_HW_STAT_INDEX_RDMATXATS = 51,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR = 52,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED = 53,
+ IRDMA_HW_STAT_INDEX_RTO = 54,
+ IRDMA_HW_STAT_INDEX_RXOOOPKTS = 55,
+ IRDMA_HW_STAT_INDEX_ICRCERR = 56,
+
+ IRDMA_HW_STAT_INDEX_MAX_GEN_3 = 57,
+};
+
+enum irdma_feature_type {
+ IRDMA_FEATURE_FW_INFO = 0,
+ IRDMA_HW_VERSION_INFO = 1,
+ IRDMA_QP_MAX_INCR = 2,
+ IRDMA_CQ_MAX_INCR = 3,
+ IRDMA_CEQ_MAX_INCR = 4,
+ IRDMA_SD_MAX_INCR = 5,
+ IRDMA_MR_MAX_INCR = 6,
+ IRDMA_Q1_MAX_INCR = 7,
+ IRDMA_AH_MAX_INCR = 8,
+ IRDMA_SRQ_MAX_INCR = 9,
+ IRDMA_TIMER_MAX_INCR = 10,
+ IRDMA_XF_MAX_INCR = 11,
+ IRDMA_RRF_MAX_INCR = 12,
+ IRDMA_PBLE_MAX_INCR = 13,
+ IRDMA_OBJ_1 = 22,
+ IRDMA_OBJ_2 = 23,
+ IRDMA_ENDPT_TRK = 24,
+ IRDMA_FTN_INLINE_MAX = 25,
+ IRDMA_QSETS_MAX = 26,
+ IRDMA_ASO = 27,
+ IRDMA_FTN_FLAGS = 32,
+ IRDMA_FTN_NOP = 33,
+ IRDMA_MAX_FEATURES, /* Must be last entry */
+};
+
+enum irdma_sched_prio_type {
+ IRDMA_PRIO_WEIGHTED_RR = 1,
+ IRDMA_PRIO_STRICT = 2,
+ IRDMA_PRIO_WEIGHTED_STRICT = 3,
+};
+
+enum irdma_vm_vf_type {
+ IRDMA_VF_TYPE = 0,
+ IRDMA_VM_TYPE,
+ IRDMA_PF_TYPE,
+};
+
+enum irdma_cqp_hmc_profile {
+ IRDMA_HMC_PROFILE_DEFAULT = 1,
+ IRDMA_HMC_PROFILE_FAVOR_VF = 2,
+ IRDMA_HMC_PROFILE_EQUAL = 3,
+};
+
+enum irdma_quad_entry_type {
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_TYPE_UDP_UNICAST,
+ IRDMA_QHASH_TYPE_UDP_MCAST,
+ IRDMA_QHASH_TYPE_ROCE_MCAST,
+ IRDMA_QHASH_TYPE_ROCEV2_HW,
+};
+
+enum irdma_quad_hash_manage_type {
+ IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ IRDMA_QHASH_MANAGE_TYPE_MODIFY,
+};
+
+enum irdma_syn_rst_handling {
+ IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
+ IRDMA_SYN_RST_HANDLING_HW_TCP,
+ IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
+ IRDMA_SYN_RST_HANDLING_FW_TCP,
+};
+
+enum irdma_queue_type {
+ IRDMA_QUEUE_TYPE_SQ_RQ = 0,
+ IRDMA_QUEUE_TYPE_CQP,
+ IRDMA_QUEUE_TYPE_SRQ,
+};
+
+struct irdma_sc_dev;
+struct irdma_vsi_pestat;
+
+struct irdma_dcqcn_cc_params {
+ u8 cc_cfg_valid;
+ u8 min_dec_factor;
+ u8 min_rate;
+ u8 dcqcn_f;
+ u16 rai_factor;
+ u16 hai_factor;
+ u16 dcqcn_t;
+ u32 dcqcn_b;
+ u32 rreduce_mperiod;
+};
+
+struct irdma_cqp_init_info {
+ u64 cqp_compl_ctx;
+ u64 host_ctx_pa;
+ u64 sq_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_cqp_quanta *sq;
+ struct irdma_dcqcn_cc_params dcqcn_params;
+ __le64 *host_ctx;
+ u64 *scratch_array;
+ u32 sq_size;
+ struct irdma_ooo_cqp_op *ooo_op_array;
+ u32 pe_en_vf_cnt;
+ u16 hw_maj_ver;
+ u16 hw_min_ver;
+ u8 struct_ver;
+ u8 hmc_profile;
+ u8 ena_vf_count;
+ u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
+ bool en_datacenter_tcp:1;
+ bool disable_packed:1;
+ bool rocev2_rto_policy:1;
+ enum irdma_protocol_used protocol_used;
+};
+
+struct irdma_terminate_hdr {
+ u8 layer_etype;
+ u8 error_code;
+ u8 hdrct;
+ u8 rsvd;
+};
+
+struct irdma_cqp_sq_wqe {
+ __le64 buf[IRDMA_CQP_WQE_SIZE];
+};
+
+struct irdma_sc_aeqe {
+ __le64 buf[IRDMA_AEQE_SIZE];
+};
+
+struct irdma_ceqe {
+ __le64 buf[IRDMA_CEQE_SIZE];
+};
+
+struct irdma_cqp_ctx {
+ __le64 buf[IRDMA_CQP_CTX_SIZE];
+};
+
+struct irdma_cq_shadow_area {
+ __le64 buf[IRDMA_SHADOW_AREA_SIZE];
+};
+
+struct irdma_dev_hw_stats_offsets {
+ u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
+};
+
+struct irdma_dev_hw_stats {
+ u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
+};
+
+struct irdma_gather_stats {
+ u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
+};
+
+struct irdma_hw_stat_map {
+ u16 byteoff;
+ u8 bitoff;
+ u64 bitmask;
+};
+
+struct irdma_stats_gather_info {
+ bool use_hmc_fcn_index:1;
+ bool use_stats_inst:1;
+ u8 hmc_fcn_index;
+ u8 stats_inst_index;
+ struct irdma_dma_mem stats_buff_mem;
+ void *gather_stats_va;
+ void *last_gather_stats_va;
+};
+
+struct irdma_vsi_pestat {
+ struct irdma_hw *hw;
+ struct irdma_dev_hw_stats hw_stats;
+ struct irdma_stats_gather_info gather_info;
+ struct timer_list stats_timer;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_dev_hw_stats last_hw_stats;
+ spinlock_t lock; /* rdma stats lock */
+};
+
+struct irdma_mmio_region {
+ u8 __iomem *addr;
+ resource_size_t len;
+ resource_size_t offset;
+};
+
+struct irdma_hw {
+ union {
+ u8 __iomem *hw_addr;
+ struct {
+ struct irdma_mmio_region rdma_reg; /* RDMA region */
+ struct irdma_mmio_region *io_regs; /* Non-RDMA MMIO regions */
+ u16 num_io_regions; /* Number of Non-RDMA MMIO regions */
+ };
+ };
+ struct device *device;
+ struct irdma_hmc_info hmc;
+};
+
+struct irdma_pfpdu {
+ struct list_head rxlist;
+ u32 rcv_nxt;
+ u32 fps;
+ u32 max_fpdu_data;
+ u32 nextseqnum;
+ u32 rcv_start_seq;
+ bool mode:1;
+ bool mpa_crc_err:1;
+ u8 marker_len;
+ u64 total_ieq_bufs;
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 no_tx_bufs;
+ u64 tx_err;
+ u64 out_of_order;
+ u64 pmode_count;
+ struct irdma_sc_ah *ah;
+ struct irdma_puda_buf *ah_buf;
+ spinlock_t lock; /* fpdu processing lock */
+ struct irdma_puda_buf *lastrcv_buf;
+};
+
+struct irdma_sc_pd {
+ struct irdma_sc_dev *dev;
+ u32 pd_id;
+ int abi_ver;
+};
+
+struct irdma_cqp_quanta {
+ __le64 elem[IRDMA_CQP_WQE_SIZE];
+};
+
+struct irdma_ooo_cqp_op {
+ struct list_head list_entry;
+ u64 scratch;
+ u32 def_info;
+ u32 sw_def_info;
+ u32 wqe_idx;
+ bool deferred:1;
+};
+
+struct irdma_sc_cqp {
+ spinlock_t ooo_list_lock; /* protects list of pending completions */
+ struct list_head ooo_avail;
+ struct list_head ooo_pnd;
+ u32 last_def_cmpl_ticket;
+ u32 sw_def_cmpl_ticket;
+ u32 size;
+ u64 sq_pa;
+ u64 host_ctx_pa;
+ void *back_cqp;
+ struct irdma_sc_dev *dev;
+ int (*process_cqp_sds)(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+ struct irdma_dma_mem sdbuf;
+ struct irdma_ring sq_ring;
+ struct irdma_cqp_quanta *sq_base;
+ struct irdma_dcqcn_cc_params dcqcn_params;
+ __le64 *host_ctx;
+ u64 *scratch_array;
+ u64 requested_ops;
+ atomic64_t completed_ops;
+ struct irdma_ooo_cqp_op *ooo_op_array;
+ u32 cqp_id;
+ u32 sq_size;
+ u32 pe_en_vf_cnt;
+ u32 hw_sq_size;
+ u16 hw_maj_ver;
+ u16 hw_min_ver;
+ u8 struct_ver;
+ u8 polarity;
+ u8 hmc_profile;
+ u8 ena_vf_count;
+ u8 timeout_count;
+ u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
+ bool en_datacenter_tcp:1;
+ bool disable_packed:1;
+ bool rocev2_rto_policy:1;
+ enum irdma_protocol_used protocol_used;
+};
+
+struct irdma_sc_aeq {
+ u32 size;
+ u64 aeq_elem_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_aeqe *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ struct irdma_ring aeq_ring;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u32 msix_idx;
+ u8 polarity;
+ bool virtual_map:1;
+ bool pasid_valid:1;
+ u32 pasid;
+};
+
+struct irdma_sc_ceq {
+ u32 size;
+ u64 ceq_elem_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_ceqe *ceqe_base;
+ void *pbl_list;
+ u32 ceq_id;
+ u32 elem_cnt;
+ struct irdma_ring ceq_ring;
+ u8 pbl_chunk_size;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ u8 polarity;
+ u16 vsi_idx;
+ struct irdma_sc_cq **reg_cq;
+ u32 reg_cq_size;
+ spinlock_t req_cq_lock; /* protect access to reg_cq array */
+ bool virtual_map:1;
+ bool tph_en:1;
+ bool itr_no_expire:1;
+ bool pasid_valid:1;
+ u32 pasid;
+};
+
+struct irdma_sc_cq {
+ struct irdma_cq_uk cq_uk;
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct irdma_sc_dev *dev;
+ u16 vsi_idx;
+ struct irdma_sc_vsi *vsi;
+ void *pbl_list;
+ void *back_cq;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u8 cq_type;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ bool ceqe_mask:1;
+ bool virtual_map:1;
+ bool check_overflow:1;
+ bool ceq_id_valid:1;
+ bool tph_en;
+};
+
+struct irdma_sc_qp {
+ struct irdma_qp_uk qp_uk;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 hw_host_ctx_pa;
+ u64 shadow_area_pa;
+ u64 q2_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_pd *pd;
+ __le64 *hw_host_ctx;
+ void *llp_stream_handle;
+ struct irdma_pfpdu pfpdu;
+ u32 ieq_qp;
+ u8 *q2_buf;
+ u64 qp_compl_ctx;
+ u32 push_idx;
+ u16 qs_handle;
+ u16 push_offset;
+ u8 flush_wqes_count;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ u8 qp_state;
+ u8 hw_sq_size;
+ u8 hw_rq_size;
+ u8 src_mac_addr_idx;
+ bool on_qoslist:1;
+ bool ieq_pass_thru:1;
+ bool sq_tph_en:1;
+ bool rq_tph_en:1;
+ bool rcv_tph_en:1;
+ bool xmit_tph_en:1;
+ bool virtual_map:1;
+ bool flush_sq:1;
+ bool flush_rq:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
+ u32 err_sq_idx;
+ u32 err_rq_idx;
+ bool sq_flush_code:1;
+ bool rq_flush_code:1;
+ u32 pkt_limit;
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
+ u8 term_flags;
+ u8 user_pri;
+ struct list_head list;
+};
+
+struct irdma_stats_inst_info {
+ bool use_hmc_fcn_index;
+ u8 hmc_fn_id;
+ u16 stats_idx;
+};
+
+struct irdma_up_info {
+ u8 map[8];
+ u8 cnp_up_override;
+ u16 hmc_fcn_idx;
+ bool use_vlan:1;
+ bool use_cnp_up_override:1;
+};
+
+#define IRDMA_MAX_WS_NODES 0x3FF
+#define IRDMA_WS_NODE_INVALID 0xFFFF
+
+struct irdma_ws_node_info {
+ u16 id;
+ u16 vsi;
+ u16 parent_id;
+ u16 qs_handle;
+ bool type_leaf:1;
+ bool enable:1;
+ u8 prio_type;
+ u8 tc;
+ u8 weight;
+};
+
+struct irdma_hmc_fpm_misc {
+ u32 max_ceqs;
+ u32 max_sds;
+ u32 loc_mem_pages;
+ u8 ird;
+ u32 xf_block_size;
+ u32 q1_block_size;
+ u32 ht_multiplier;
+ u32 timer_bucket;
+ u32 rrf_block_size;
+ u32 ooiscf_block_size;
+};
+
+#define IRDMA_VCHNL_MAX_MSG_SIZE 512
+#define IRDMA_LEAF_DEFAULT_REL_BW 64
+#define IRDMA_PARENT_DEFAULT_REL_BW 1
+
+struct irdma_qos {
+ struct list_head qplist;
+ struct mutex qos_mutex; /* protect QoS attributes per QoS level */
+ u64 lan_qos_handle;
+ u32 l2_sched_node_id;
+ u16 qs_handle;
+ u8 traffic_class;
+ u8 rel_bw;
+ u8 prio_type;
+ bool valid;
+};
+
+#define IRDMA_INVALID_STATS_IDX 0xff
+struct irdma_sc_vsi {
+ u16 vsi_idx;
+ struct irdma_sc_dev *dev;
+ void *back_vsi;
+ u32 ilq_count;
+ struct irdma_virt_mem ilq_mem;
+ struct irdma_puda_rsrc *ilq;
+ u32 ieq_count;
+ struct irdma_virt_mem ieq_mem;
+ struct irdma_puda_rsrc *ieq;
+ u32 exception_lan_q;
+ u16 mtu;
+ u16 vm_id;
+ enum irdma_vm_vf_type vm_vf_type;
+ bool stats_inst_alloc:1;
+ bool tc_change_pending:1;
+ struct irdma_vsi_pestat *pestat;
+ atomic_t qp_suspend_reqs;
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ u8 qos_rel_bw;
+ u8 qos_prio_type;
+ u8 stats_idx;
+ u8 dscp_map[DSCP_MAX];
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
+ u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
+ bool dscp_mode:1;
+};
+
+struct irdma_sc_dev {
+ struct list_head cqp_cmd_head; /* head of the CQP command list */
+ spinlock_t cqp_lock; /* protect CQP list access */
+ bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1];
+ struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ __le64 *fpm_query_buf;
+ __le64 *fpm_commit_buf;
+ struct irdma_hw *hw;
+ u8 __iomem *db_addr;
+ u32 __iomem *wqe_alloc_db;
+ u32 __iomem *cq_arm_db;
+ u32 __iomem *aeq_alloc_db;
+ u32 __iomem *cqp_db;
+ u32 __iomem *cq_ack_db;
+ u32 __iomem *ceq_itr_mask_db;
+ u32 __iomem *aeq_itr_mask_db;
+ u32 __iomem *hw_regs[IRDMA_MAX_REGS];
+ u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
+ u64 hw_masks[IRDMA_MAX_MASKS];
+ u64 hw_shifts[IRDMA_MAX_SHIFTS];
+ const struct irdma_hw_stat_map *hw_stats_map;
+ u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
+ u64 feature_info[IRDMA_MAX_FEATURES];
+ u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
+ struct irdma_hw_attrs hw_attrs;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_vchnl_rdma_caps vc_caps;
+ u8 vc_recv_buf[IRDMA_VCHNL_MAX_MSG_SIZE];
+ u16 vc_recv_len;
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_aeq *aeq;
+ struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
+ struct irdma_sc_cq *ccq;
+ const struct irdma_irq_ops *irq_ops;
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
+ struct irdma_hmc_fpm_misc hmc_fpm_misc;
+ struct irdma_ws_node *ws_tree_root;
+ struct mutex ws_mutex; /* ws tree mutex */
+ u32 vchnl_ver;
+ u16 num_vfs;
+ u16 hmc_fn_id;
+ u8 vf_id;
+ bool privileged:1;
+ bool vchnl_up:1;
+ bool ceq_valid:1;
+ bool is_pf:1;
+ u8 protocol_used;
+ struct mutex vchnl_mutex; /* mutex to synchronize RDMA virtual channel messages */
+ u8 pci_rev;
+ int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
+ void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
+ void (*ws_reset)(struct irdma_sc_vsi *vsi);
+};
+
+struct irdma_modify_cq_info {
+ u64 cq_pa;
+ struct irdma_cqe *cq_base;
+ u32 cq_size;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool virtual_map:1;
+ bool check_overflow;
+ bool cq_resize:1;
+};
+
+struct irdma_srq_init_info {
+ struct irdma_sc_pd *pd;
+ struct irdma_sc_vsi *vsi;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 wqe_size;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_value;
+ u8 pbl_chunk_size;
+ struct irdma_srq_uk_init_info srq_uk_init_info;
+};
+
+struct irdma_sc_srq {
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_pd *pd;
+ struct irdma_srq_uk srq_uk;
+ void *back_srq;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 hw_srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_val;
+};
+
+struct irdma_modify_srq_info {
+ u16 srq_limit;
+ u8 arm_limit_event;
+};
+
+struct irdma_create_qp_info {
+ bool ord_valid:1;
+ bool tcp_ctx_valid:1;
+ bool cq_num_valid:1;
+ bool arp_cache_idx_valid:1;
+ bool mac_valid:1;
+ bool force_lpb;
+ u8 next_iwarp_state;
+};
+
+struct irdma_modify_qp_info {
+ u64 rx_win0;
+ u64 rx_win1;
+ u16 new_mss;
+ u8 next_iwarp_state;
+ u8 curr_iwarp_state;
+ u8 termlen;
+ bool ord_valid:1;
+ bool tcp_ctx_valid:1;
+ bool udp_ctx_valid:1;
+ bool cq_num_valid:1;
+ bool arp_cache_idx_valid:1;
+ bool reset_tcp_conn:1;
+ bool remove_hash_idx:1;
+ bool dont_send_term:1;
+ bool dont_send_fin:1;
+ bool cached_var_valid:1;
+ bool mss_change:1;
+ bool force_lpb:1;
+ bool mac_valid:1;
+};
+
+struct irdma_ccq_cqe_info {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ u8 op_code;
+ bool error:1;
+ bool pending:1;
+};
+
+struct irdma_dcb_app_info {
+ u8 priority;
+ u8 selector;
+ u16 prot_id;
+};
+
+struct irdma_qos_tc_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+struct irdma_l2params {
+ struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
+ struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
+ u32 num_apps;
+ u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
+ u16 mtu;
+ u8 up2tc[IRDMA_MAX_USER_PRIORITY];
+ u8 dscp_map[DSCP_MAX];
+ u8 num_tc;
+ u8 vsi_rel_bw;
+ u8 vsi_prio_type;
+ bool mtu_changed:1;
+ bool tc_changed:1;
+ bool dscp_mode:1;
+};
+
+struct irdma_vsi_init_info {
+ struct irdma_sc_dev *dev;
+ void *back_vsi;
+ struct irdma_l2params *params;
+ u16 exception_lan_q;
+ u16 pf_data_vsi_num;
+ enum irdma_vm_vf_type vm_vf_type;
+ u16 vm_id;
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+};
+
+struct irdma_vsi_stats_info {
+ struct irdma_vsi_pestat *pestat;
+ u16 fcn_id;
+ bool alloc_stats_inst;
+};
+
+struct irdma_device_init_info {
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ __le64 *fpm_query_buf;
+ __le64 *fpm_commit_buf;
+ struct irdma_hw *hw;
+ void __iomem *bar0;
+ enum irdma_protocol_used protocol_used;
+ u16 hmc_fn_id;
+};
+
+struct irdma_ceq_init_info {
+ u64 ceqe_pa;
+ struct irdma_sc_dev *dev;
+ u64 *ceqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ u32 ceq_id;
+ bool virtual_map:1;
+ bool tph_en:1;
+ bool itr_no_expire:1;
+ u8 pbl_chunk_size;
+ u8 tph_val;
+ u16 vsi_idx;
+ u32 first_pm_pbl_idx;
+ struct irdma_sc_cq **reg_cq;
+ u32 reg_cq_idx;
+};
+
+struct irdma_aeq_init_info {
+ u64 aeq_elem_pa;
+ struct irdma_sc_dev *dev;
+ u32 *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u32 msix_idx;
+};
+
+struct irdma_ccq_init_info {
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_cqe *cq_base;
+ __le64 *shadow_area;
+ void *pbl_list;
+ u32 num_elem;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool ceqe_mask:1;
+ bool ceq_id_valid:1;
+ bool avoid_mem_cflct:1;
+ bool virtual_map:1;
+ bool tph_en:1;
+ u8 tph_val;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_udp_offload_info {
+ bool ipv4:1;
+ bool insert_vlan_tag:1;
+ u8 ttl;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr[4];
+ u32 snd_mss;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ u8 udp_state;
+ u32 psn_nxt;
+ u32 lsn;
+ u32 epsn;
+ u32 psn_max;
+ u32 psn_una;
+ u32 local_ipaddr[4];
+ u32 cwnd;
+ u8 rexmit_thresh;
+ u8 rnr_nak_thresh;
+ u8 rnr_nak_tmr;
+ u8 min_rnr_timer;
+};
+
+struct irdma_roce_offload_info {
+ u16 p_key;
+ u16 err_rq_idx;
+ u32 qkey;
+ u32 dest_qp;
+ u8 roce_tver;
+ u8 ack_credits;
+ u8 err_rq_idx_valid;
+ u32 pd_id;
+ u16 ord_size;
+ u16 ird_size;
+ bool is_qp1:1;
+ bool udprivcq_en:1;
+ bool dcqcn_en:1;
+ bool rcv_no_icrc:1;
+ bool wr_rdresp_en:1;
+ bool bind_en:1;
+ bool fast_reg_en:1;
+ bool priv_mode_en:1;
+ bool rd_en:1;
+ bool timely_en:1;
+ bool dctcp_en:1;
+ bool fw_cc_enable:1;
+ bool use_stats_inst:1;
+ u8 local_ack_timeout;
+ u16 t_high;
+ u16 t_low;
+ u8 last_byte_sent;
+ u8 mac_addr[ETH_ALEN];
+ u8 rtomin;
+};
+
+struct irdma_iwarp_offload_info {
+ u16 rcv_mark_offset;
+ u16 snd_mark_offset;
+ u8 ddp_ver;
+ u8 rdmap_ver;
+ u8 iwarp_mode;
+ u16 err_rq_idx;
+ u32 pd_id;
+ u16 ord_size;
+ u16 ird_size;
+ bool ib_rd_en:1;
+ bool align_hdrs:1;
+ bool rcv_no_mpa_crc:1;
+ bool err_rq_idx_valid:1;
+ bool snd_mark_en:1;
+ bool rcv_mark_en:1;
+ bool wr_rdresp_en:1;
+ bool bind_en:1;
+ bool fast_reg_en:1;
+ bool priv_mode_en:1;
+ bool rd_en:1;
+ bool timely_en:1;
+ bool use_stats_inst:1;
+ bool ecn_en:1;
+ bool dctcp_en:1;
+ u16 t_high;
+ u16 t_low;
+ u8 last_byte_sent;
+ u8 mac_addr[ETH_ALEN];
+ u8 rtomin;
+};
+
+struct irdma_tcp_offload_info {
+ bool ipv4:1;
+ bool no_nagle:1;
+ bool insert_vlan_tag:1;
+ bool time_stamp:1;
+ bool drop_ooo_seg:1;
+ bool avoid_stretch_ack:1;
+ bool wscale:1;
+ bool ignore_tcp_opt:1;
+ bool ignore_tcp_uns_opt:1;
+ u8 cwnd_inc_limit;
+ u8 dup_ack_thresh;
+ u8 ttl;
+ u8 src_mac_addr_idx;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr[4];
+ //u32 dest_ip_addr0;
+ //u32 dest_ip_addr1;
+ //u32 dest_ip_addr2;
+ //u32 dest_ip_addr3;
+ u32 snd_mss;
+ u16 syn_rst_handling;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ u8 tcp_state;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+ u32 time_stamp_recent;
+ u32 time_stamp_age;
+ u32 snd_nxt;
+ u32 snd_wnd;
+ u32 rcv_nxt;
+ u32 rcv_wnd;
+ u32 snd_max;
+ u32 snd_una;
+ u32 srtt;
+ u32 rtt_var;
+ u32 ss_thresh;
+ u32 cwnd;
+ u32 snd_wl1;
+ u32 snd_wl2;
+ u32 max_snd_window;
+ u8 rexmit_thresh;
+ u32 local_ipaddr[4];
+};
+
+struct irdma_qp_host_ctx_info {
+ u64 qp_compl_ctx;
+ union {
+ struct irdma_tcp_offload_info *tcp_info;
+ struct irdma_udp_offload_info *udp_info;
+ };
+ union {
+ struct irdma_iwarp_offload_info *iwarp_info;
+ struct irdma_roce_offload_info *roce_info;
+ };
+ u32 send_cq_num;
+ u32 rcv_cq_num;
+ u32 srq_id;
+ u32 rem_endpoint_idx;
+ u16 stats_idx;
+ bool remote_atomics_en:1;
+ bool srq_valid:1;
+ bool tcp_info_valid:1;
+ bool iwarp_info_valid:1;
+ bool stats_idx_valid:1;
+ u8 user_pri;
+};
+
+struct irdma_aeqe_info {
+ u64 compl_ctx;
+ u32 qp_cq_id;
+ u32 def_info; /* only valid for DEF_CMPL */
+ u16 ae_id;
+ u16 wqe_idx;
+ u8 tcp_state;
+ u8 iwarp_state;
+ bool qp:1;
+ bool cq:1;
+ bool sq:1;
+ bool rq:1;
+ bool srq:1;
+ bool in_rdrsp_wr:1;
+ bool out_rdrsp:1;
+ bool aeqe_overflow:1;
+ bool err_rq_idx_valid:1;
+ u8 q2_data_written;
+ u8 ae_src;
+};
+
+struct irdma_allocate_stag_info {
+ u64 total_len;
+ u64 first_pm_pbl_idx;
+ u32 chunk_size;
+ u32 stag_idx;
+ u32 page_size;
+ u32 pd_id;
+ u16 access_rights;
+ bool remote_access:1;
+ bool use_hmc_fcn_index:1;
+ bool use_pf_rid:1;
+ bool all_memory:1;
+ bool remote_atomics_en:1;
+ u16 hmc_fcn_index;
+};
+
+struct irdma_mw_alloc_info {
+ u32 mw_stag_index;
+ u32 page_size;
+ u32 pd_id;
+ bool remote_access:1;
+ bool mw_wide:1;
+ bool mw1_bind_dont_vldt_key:1;
+};
+
+struct irdma_reg_ns_stag_info {
+ u64 reg_addr_pa;
+ u64 va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index stag_idx;
+ u16 access_rights;
+ u32 pd_id;
+ irdma_stag_key stag_key;
+ bool use_hmc_fcn_index:1;
+ u8 hmc_fcn_index;
+ bool use_pf_rid:1;
+ bool all_memory:1;
+ bool remote_atomics_en:1;
+};
+
+struct irdma_fast_reg_stag_info {
+ u64 wr_id;
+ u64 reg_addr_pa;
+ u64 fbo;
+ void *va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index stag_idx;
+ u16 access_rights;
+ u32 pd_id;
+ irdma_stag_key stag_key;
+ bool local_fence:1;
+ bool read_fence:1;
+ bool signaled:1;
+ bool use_hmc_fcn_index:1;
+ u8 hmc_fcn_index;
+ bool use_pf_rid:1;
+ bool defer_flag:1;
+ bool remote_atomics_en:1;
+};
+
+struct irdma_dealloc_stag_info {
+ u32 stag_idx;
+ u32 pd_id;
+ bool mr:1;
+ bool dealloc_pbl:1;
+};
+
+struct irdma_register_shared_stag {
+ u64 va;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index new_stag_idx;
+ irdma_stag_index parent_stag_idx;
+ u32 access_rights;
+ u32 pd_id;
+ u32 page_size;
+ irdma_stag_key new_stag_key;
+};
+
+struct irdma_qp_init_info {
+ struct irdma_qp_uk_init_info qp_uk_init_info;
+ struct irdma_sc_pd *pd;
+ struct irdma_sc_vsi *vsi;
+ __le64 *host_ctx;
+ u8 *q2;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 host_ctx_pa;
+ u64 q2_pa;
+ u64 shadow_area_pa;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ bool sq_tph_en:1;
+ bool rq_tph_en:1;
+ bool rcv_tph_en:1;
+ bool xmit_tph_en:1;
+ bool virtual_map:1;
+};
+
+struct irdma_cq_init_info {
+ struct irdma_sc_dev *dev;
+ u64 cq_base_pa;
+ u64 shadow_area_pa;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool virtual_map:1;
+ bool ceqe_mask:1;
+ bool ceq_id_valid:1;
+ bool tph_en:1;
+ u8 tph_val;
+ u8 type;
+ struct irdma_cq_uk_init_info cq_uk_init_info;
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_upload_context_info {
+ u64 buf_pa;
+ u32 qp_id;
+ u8 qp_type;
+ bool freeze_qp:1;
+ bool raw_format:1;
+};
+
+struct irdma_local_mac_entry_info {
+ u8 mac_addr[6];
+ u16 entry_idx;
+};
+
+struct irdma_add_arp_cache_entry_info {
+ u8 mac_addr[ETH_ALEN];
+ u32 reach_max;
+ u16 arp_index;
+ bool permanent;
+};
+
+struct irdma_apbvt_info {
+ u16 port;
+ bool add;
+};
+
+struct irdma_qhash_table_info {
+ struct irdma_sc_vsi *vsi;
+ enum irdma_quad_hash_manage_type manage;
+ enum irdma_quad_entry_type entry_type;
+ bool vlan_valid:1;
+ bool ipv4_valid:1;
+ u8 mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ u8 user_pri;
+ u32 qp_num;
+ u32 dest_ip[4];
+ u32 src_ip[4];
+ u16 dest_port;
+ u16 src_port;
+};
+
+struct irdma_cqp_manage_push_page_info {
+ u32 push_idx;
+ u16 qs_handle;
+ u8 free_page;
+ u8 push_page_type;
+};
+
+struct irdma_qp_flush_info {
+ u32 err_sq_idx;
+ u32 err_rq_idx;
+ u16 sq_minor_code;
+ u16 sq_major_code;
+ u16 rq_minor_code;
+ u16 rq_major_code;
+ u16 ae_code;
+ u8 ae_src;
+ bool sq:1;
+ bool rq:1;
+ bool userflushcode:1;
+ bool generate_ae:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
+};
+
+struct irdma_gen_ae_info {
+ u16 ae_code;
+ u8 ae_src;
+};
+
+struct irdma_cqp_timeout {
+ u64 compl_cqp_cmds;
+ u32 count;
+};
+
+struct irdma_irq_ops {
+ void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
+ void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable);
+ void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
+ void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
+};
+
+void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
+int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq);
+int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info);
+int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_init_info *info);
+
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
+int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
+
+int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
+int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info);
+void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
+void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
+
+int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info);
+int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info);
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
+
+void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
+ int abi_ver);
+void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
+void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
+ struct irdma_sc_dev *dev);
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info);
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev);
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info);
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
+int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
+ struct irdma_ccq_cqe_info *cmpl_info);
+int irdma_sc_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info, bool post_sq);
+int irdma_sc_qp_create(struct irdma_sc_qp *qp,
+ struct irdma_create_qp_info *info, u64 scratch,
+ bool post_sq);
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq);
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
+ struct irdma_modify_qp_info *info, u64 scratch,
+ bool post_sq);
+void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag);
+
+void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
+void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info);
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info);
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
+void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers);
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info);
+
+void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
+struct cqp_info {
+ union {
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_create_qp_info info;
+ u64 scratch;
+ } qp_create;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_modify_qp_info info;
+ u64 scratch;
+ } qp_modify;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ u64 scratch;
+ bool remove_hash_idx;
+ bool ignore_mw_bnd;
+ } qp_destroy;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ u64 scratch;
+ bool check_overflow;
+ } cq_create;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ struct irdma_modify_cq_info info;
+ u64 scratch;
+ } cq_modify;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ u64 scratch;
+ } cq_destroy;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_allocate_stag_info info;
+ u64 scratch;
+ } alloc_stag;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_mw_alloc_info info;
+ u64 scratch;
+ } mw_alloc;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_reg_ns_stag_info info;
+ u64 scratch;
+ } mr_reg_non_shared;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_dealloc_stag_info info;
+ u64 scratch;
+ } dealloc_stag;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_add_arp_cache_entry_info info;
+ u64 scratch;
+ } add_arp_cache_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u16 arp_index;
+ } del_arp_cache_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_local_mac_entry_info info;
+ u64 scratch;
+ } add_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u8 entry_idx;
+ u8 ignore_ref_count;
+ } del_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ } alloc_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_cqp_manage_push_page_info info;
+ u64 scratch;
+ } manage_push_page;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_upload_context_info info;
+ u64 scratch;
+ } qp_upload_context;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_hmc_fcn_info info;
+ u64 scratch;
+ } manage_hmc_pm;
+
+ struct {
+ struct irdma_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_create;
+
+ struct {
+ struct irdma_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_destroy;
+
+ struct {
+ struct irdma_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_create;
+
+ struct {
+ struct irdma_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_destroy;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_qp_flush_info info;
+ u64 scratch;
+ } qp_flush_wqes;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_gen_ae_info info;
+ u64 scratch;
+ } gen_ae;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ void *fpm_val_va;
+ u64 fpm_val_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } query_fpm_val;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ void *fpm_val_va;
+ u64 fpm_val_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } commit_fpm_val;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_apbvt_info info;
+ u64 scratch;
+ } manage_apbvt_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_qhash_table_info info;
+ u64 scratch;
+ } manage_qhash_table_entry;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_update_sds_info info;
+ u64 scratch;
+ } update_pe_sds;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_qp *qp;
+ u64 scratch;
+ } suspend_resume;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ah_info info;
+ u64 scratch;
+ } ah_create;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ah_info info;
+ u64 scratch;
+ } ah_destroy;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_create;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_destroy;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_modify;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_stats_inst_info info;
+ u64 scratch;
+ } stats_manage;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_stats_gather_info info;
+ u64 scratch;
+ } stats_gather;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ws_node_info info;
+ u64 scratch;
+ } ws_node;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_up_info info;
+ u64 scratch;
+ } up_map;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_dma_mem query_buff_mem;
+ u64 scratch;
+ } query_rdma;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_create;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ struct irdma_modify_srq_info info;
+ u64 scratch;
+ } srq_modify;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_destroy;
+
+ } u;
+};
+
+struct cqp_cmds_info {
+ struct list_head cqp_cmd_entry;
+ u8 cqp_cmd;
+ u8 post_sq;
+ struct cqp_info in;
+};
+
+__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
+ u32 *wqe_idx);
+
+/**
+ * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
+{
+ u32 wqe_idx;
+
+ return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+#endif /* IRDMA_TYPE_H */
diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c
new file mode 100644
index 000000000000..84051266d948
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#include <linux/etherdevice.h>
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "uda.h"
+#include "uda_d.h"
+
+/**
+ * irdma_sc_access_ah() - Create, modify or delete AH
+ * @cqp: struct for cqp hw
+ * @info: ah information
+ * @op: Operation
+ * @scratch: u64 saved to be used during cqp completion
+ */
+int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
+ u32 op, u64 scratch)
+{
+ __le64 *wqe;
+ u64 qw1, qw2;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
+ qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
+ FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
+
+ qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
+
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
+
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
+ } else {
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
+
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ set_64bit_val(wqe, 16, qw2);
+
+ dma_wmb(); /* need write block before writing WQE header */
+
+ set_64bit_val(
+ wqe, 24,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
+
+ print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_create_mg_ctx() - create a mcg context
+ * @info: multicast group context info
+ */
+static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
+{
+ struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
+ u8 idx = 0; /* index in the array */
+ u8 ctx_idx = 0; /* index in the MG context */
+
+ memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
+
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ entry_info = &info->mg_ctx_info[idx];
+ if (entry_info->valid_entry) {
+ set_64bit_val((__le64 *)info->dma_mem_mc.va,
+ ctx_idx * sizeof(u64),
+ FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
+ ctx_idx++;
+ }
+ }
+}
+
+/**
+ * irdma_access_mcast_grp() - Access mcast group based on op
+ * @cqp: Control QP
+ * @info: multicast group context info
+ * @op: operation to perform
+ * @scratch: u64 saved to be used during cqp completion
+ */
+int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info, u32 op,
+ u64 scratch)
+{
+ __le64 *wqe;
+
+ if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
+ ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
+ return -EINVAL;
+ }
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe) {
+ ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
+ return -ENOMEM;
+ }
+
+ irdma_create_mg_ctx(info);
+
+ set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
+
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
+ } else {
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
+ }
+
+ dma_wmb(); /* need write memory block before writing the WQE header. */
+
+ set_64bit_val(wqe, 24,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
+
+ print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, info->dma_mem_mc.va,
+ IRDMA_MAX_MGS_PER_CTX * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_compare_mgs - Compares two multicast group structures
+ * @entry1: Multcast group info
+ * @entry2: Multcast group info in context
+ */
+static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
+ struct irdma_mcast_grp_ctx_entry_info *entry2)
+{
+ if (entry1->dest_port == entry2->dest_port &&
+ entry1->qp_id == entry2->qp_id)
+ return true;
+
+ return false;
+}
+
+/**
+ * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
+ * @ctx: Multcast group context
+ * @mg: Multcast group info
+ */
+int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
+{
+ u32 idx;
+ bool free_entry_found = false;
+ u32 free_entry_idx = 0;
+
+ /* find either an identical or a free entry for a multicast group */
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ if (ctx->mg_ctx_info[idx].valid_entry) {
+ if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
+ ctx->mg_ctx_info[idx].use_cnt++;
+ return 0;
+ }
+ continue;
+ }
+ if (!free_entry_found) {
+ free_entry_found = true;
+ free_entry_idx = idx;
+ }
+ }
+
+ if (free_entry_found) {
+ ctx->mg_ctx_info[free_entry_idx] = *mg;
+ ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
+ ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
+ ctx->no_of_mgs++;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * irdma_sc_del_mcast_grp - Delete mcast group
+ * @ctx: Multcast group context
+ * @mg: Multcast group info
+ *
+ * Finds and removes a specific mulicast group from context, all
+ * parameters must match to remove a multicast group.
+ */
+int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
+{
+ u32 idx;
+
+ /* find an entry in multicast group context */
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ if (!ctx->mg_ctx_info[idx].valid_entry)
+ continue;
+
+ if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
+ ctx->mg_ctx_info[idx].use_cnt--;
+
+ if (!ctx->mg_ctx_info[idx].use_cnt) {
+ ctx->mg_ctx_info[idx].valid_entry = false;
+ ctx->no_of_mgs--;
+ /* Remove gap if element was not the last */
+ if (idx != ctx->no_of_mgs &&
+ ctx->no_of_mgs > 0) {
+ memcpy(&ctx->mg_ctx_info[idx],
+ &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
+ sizeof(ctx->mg_ctx_info[idx]));
+ ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
+ }
+ }
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/infiniband/hw/irdma/uda.h b/drivers/infiniband/hw/irdma/uda.h
new file mode 100644
index 000000000000..27b8701cf21b
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_UDA_H
+#define IRDMA_UDA_H
+
+#define IRDMA_UDA_MAX_FSI_MGS 4096
+#define IRDMA_UDA_MAX_PFS 16
+#define IRDMA_UDA_MAX_VFS 128
+
+struct irdma_sc_cqp;
+
+struct irdma_ah_info {
+ struct irdma_sc_vsi *vsi;
+ u32 pd_idx;
+ u32 dst_arpindex;
+ u32 dest_ip_addr[4];
+ u32 src_ip_addr[4];
+ u32 flow_label;
+ u32 ah_idx;
+ u16 vlan_tag;
+ u8 insert_vlan_tag;
+ u8 tc_tos;
+ u8 hop_ttl;
+ u8 mac_addr[ETH_ALEN];
+ bool ah_valid:1;
+ bool ipv4_valid:1;
+ bool do_lpbk:1;
+};
+
+struct irdma_sc_ah {
+ struct irdma_sc_dev *dev;
+ struct irdma_ah_info ah_info;
+};
+
+int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
+ u32 op, u64 scratch);
+int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info, u32 op,
+ u64 scratch);
+
+static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
+{
+ ah->dev = dev;
+}
+
+static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info, u64 scratch)
+{
+ return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
+ scratch);
+}
+
+static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info, u64 scratch)
+{
+ return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
+ scratch);
+}
+
+static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
+ scratch);
+}
+
+static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP,
+ scratch);
+}
+
+static inline int irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP,
+ scratch);
+}
+#endif /* IRDMA_UDA_H */
diff --git a/drivers/infiniband/hw/irdma/uda_d.h b/drivers/infiniband/hw/irdma/uda_d.h
new file mode 100644
index 000000000000..4fb4daa20722
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda_d.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_UDA_D_H
+#define IRDMA_UDA_D_H
+
+/* L4 packet type */
+#define IRDMA_E_UDA_SQ_L4T_UNKNOWN 0
+#define IRDMA_E_UDA_SQ_L4T_TCP 1
+#define IRDMA_E_UDA_SQ_L4T_SCTP 2
+#define IRDMA_E_UDA_SQ_L4T_UDP 3
+
+/* Inner IP header type */
+#define IRDMA_E_UDA_SQ_IIPT_UNKNOWN 0
+#define IRDMA_E_UDA_SQ_IIPT_IPV6 1
+#define IRDMA_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2
+#define IRDMA_E_UDA_SQ_IIPT_IPV4_CSUM 3
+#define IRDMA_UDA_QPSQ_PUSHWQE BIT_ULL(56)
+#define IRDMA_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57)
+#define IRDMA_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
+#define IRDMA_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
+#define IRDMA_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42)
+#define IRDMA_UDA_QPSQ_NOCHECKSUM BIT_ULL(45)
+#define IRDMA_UDA_QPSQ_AHIDXVALID BIT_ULL(46)
+#define IRDMA_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61)
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16)
+#define IRDMA_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32)
+#define IRDMA_UDA_QPSQ_MULTICAST BIT_ULL(63)
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
+#define IRDMA_UDA_QPSQ_MACLEN_LINE 2
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
+#define IRDMA_UDA_QPSQ_IPLEN_LINE 2
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
+#define IRDMA_UDA_QPSQ_L4T_LINE 2
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
+#define IRDMA_UDA_QPSQ_IIPT_LINE 2
+
+#define IRDMA_UDA_QPSQ_DO_LPB_LINE 3
+#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45)
+#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3
+#define IRDMA_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0)
+
+/* Byte Offset 0 */
+#define IRDMA_UDAQPC_IPV4_M BIT_ULL(3)
+#define IRDMA_UDAQPC_INSERTVLANTAG BIT_ULL(5)
+#define IRDMA_UDAQPC_ISQP1 BIT_ULL(6)
+
+#define IRDMA_UDAQPC_ECNENABLE BIT_ULL(14)
+#define IRDMA_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDAQPC_DCTCPENABLE BIT_ULL(25)
+
+#define IRDMA_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN
+#define IRDMA_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN
+#define IRDMA_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN
+#define IRDMA_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN
+#define IRDMA_UDAQPC_PPIDX IRDMAQPC_PPIDX
+#define IRDMA_UDAQPC_PMENA IRDMAQPC_PMENA
+#define IRDMA_UDAQPC_INSERTTAG2 BIT_ULL(11)
+#define IRDMA_UDAQPC_INSERTTAG3 BIT_ULL(14)
+
+#define IRDMA_UDAQPC_RQSIZE IRDMAQPC_RQSIZE
+#define IRDMA_UDAQPC_SQSIZE IRDMAQPC_SQSIZE
+#define IRDMA_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM
+#define IRDMA_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM
+#define IRDMA_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX
+#define IRDMA_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL
+#define IRDMA_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL
+#define IRDMA_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE
+#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48)
+#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32)
+#define IRDMA_UDAQPC_PRIVILEGEENABLE BIT_ULL(25)
+#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26)
+#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0)
+#define IRDMA_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0)
+#define IRDMA_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3)
+#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2)
+#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1)
+#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
+#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
+#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(27, 20)
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
+#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
+#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
+#define IRDMA_UDA_CQPSQ_MAV_TC GENMASK_ULL(39, 32)
+#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32)
+#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
+#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
+#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(23, 0)
+#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
+#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
+#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
+#define IRDMA_UDA_MGCTX_VFID GENMASK_ULL(28, 22)
+#define IRDMA_UDA_MGCTX_VALIDENT BIT_ULL(31)
+#define IRDMA_UDA_MGCTX_PFID GENMASK_ULL(21, 18)
+#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30)
+#define IRDMA_UDA_MGCTX_QPID GENMASK_ULL(17, 0)
+#define IRDMA_UDA_CQPSQ_MG_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(12, 0)
+#define IRDMA_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(60)
+#define IRDMA_UDA_CQPSQ_MG_VLANVALID BIT_ULL(59)
+#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0)
+#define IRDMA_UDA_CQPSQ_MG_VLANID GENMASK_ULL(43, 32)
+#define IRDMA_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ BIT_ULL(0)
+#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
+#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
+#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID GENMASK_ULL(60, 60)
+#define IRDMA_UDA_CQPSQ_QHASH_LANFWD GENMASK_ULL(59, 59)
+#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
+#endif /* IRDMA_UDA_D_H */
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
new file mode 100644
index 000000000000..ce1ae10c30fc
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -0,0 +1,1925 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "defs.h"
+#include "user.h"
+#include "irdma.h"
+
+/**
+ * irdma_set_fragment - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: The wqe valid
+ */
+static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
+ u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, valid) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, valid));
+ }
+}
+
+/**
+ * irdma_set_fragment_gen_1 - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: wqe valid flag
+ */
+static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
+ struct ib_sge *sge, u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + 8, 0);
+ }
+}
+
+/**
+ * irdma_nop_1 - insert a NOP wqe
+ * @qp: hw qp ptr
+ */
+static int irdma_nop_1(struct irdma_qp_uk *qp)
+{
+ u64 hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ bool signaled = false;
+
+ if (!qp->sq_ring.head)
+ return -EINVAL;
+
+ wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ wqe = qp->sq_base[wqe_idx].elem;
+
+ qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ /* make sure WQE is written before valid bit is set */
+ dma_wmb();
+
+ set_64bit_val(wqe, 24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_clr_wqes - clear next 128 sq entries
+ * @qp: hw qp ptr
+ * @qp_wqe_idx: wqe_idx
+ */
+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
+{
+ struct irdma_qp_quanta *sq;
+ u32 wqe_idx;
+
+ if (!(qp_wqe_idx & 0x7F)) {
+ wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
+ sq = qp->sq_base + wqe_idx;
+ if (wqe_idx)
+ memset(sq, qp->swqe_polarity ? 0 : 0xFF,
+ 128 * sizeof(*sq));
+ else
+ memset(sq, qp->swqe_polarity ? 0xFF : 0,
+ 128 * sizeof(*sq));
+ }
+}
+
+/**
+ * irdma_uk_qp_post_wr - ring doorbell
+ * @qp: hw qp ptr
+ */
+void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
+{
+ u64 temp;
+ u32 hw_sq_tail;
+ u32 sw_sq_head;
+
+ /* valid bit is written and loads completed before reading shadow */
+ mb();
+
+ /* read the doorbell shadow area */
+ get_64bit_val(qp->shadow_area, 0, &temp);
+
+ hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
+ sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (sw_sq_head != qp->initial_ring.head) {
+ if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head > qp->initial_ring.head) {
+ if (hw_sq_tail >= qp->initial_ring.head &&
+ hw_sq_tail < sw_sq_head)
+ writel(qp->qp_id, qp->wqe_alloc_db);
+ } else {
+ if (hw_sq_tail >= qp->initial_ring.head ||
+ hw_sq_tail < sw_sq_head)
+ writel(qp->qp_id, qp->wqe_alloc_db);
+ }
+ }
+ }
+
+ qp->initial_ring.head = qp->sq_ring.head;
+}
+
+/**
+ * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ * @quanta: size of WR in quanta
+ * @total_size: size of WR in bytes
+ * @info: info on WR
+ */
+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info)
+{
+ __le64 *wqe;
+ __le64 *wqe_0 = NULL;
+ u16 avail_quanta;
+ u16 i;
+
+ avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
+ qp->uk_attrs->max_hw_sq_chunk);
+ if (quanta <= avail_quanta) {
+ /* WR fits in current chunk */
+ if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+ } else {
+ /* Need to pad with NOP */
+ if (quanta + avail_quanta >
+ IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+
+ for (i = 0; i < avail_quanta; i++) {
+ irdma_nop_1(qp);
+ IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
+ }
+ }
+
+ *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
+
+ wqe = qp->sq_base[*wqe_idx].elem;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
+ wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
+ wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
+ }
+ qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
+ qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+
+ return wqe;
+}
+
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
+{
+ int ret_code;
+ __le64 *wqe;
+
+ if (IRDMA_RING_FULL_ERR(srq->srq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ srq->srwqe_polarity = !srq->srwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
+
+ return wqe;
+}
+
+/**
+ * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ */
+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
+{
+ __le64 *wqe;
+ int ret_code;
+
+ if (IRDMA_RING_FULL_ERR(qp->rq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ qp->rwqe_polarity = !qp->rwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
+
+ return wqe;
+}
+
+/**
+ * irdma_uk_rdma_write - rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_rdma_write *op_info;
+ u32 i, wqe_idx;
+ u32 total_size = 0, byte_off;
+ int ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ op_info = &info->op.rdma_write;
+ if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
+
+ read_fence |= info->read_fence;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_lo_sges + 1;
+ else
+ frag_cnt = op_info->num_lo_sges;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ op_info->lo_sg_list,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = 32; i < op_info->num_lo_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_atomic_fetch_add - atomic fetch and add operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_fetch_add *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_fetch_add;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_atomic_compare_swap - atomic compare and swap operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_compare_swap *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_compare_swap;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->swap_data_bytes);
+ set_64bit_val(wqe, 40, op_info->compare_data_bytes);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
+ * @srq: shared rq ptr
+ * @info: post rq information
+ */
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (srq->max_srq_frag_cnt < info->num_sges)
+ return -EINVAL;
+
+ wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
+ srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ srq->srwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ srq->srwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ srq->srwqe_polarity);
+ if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16, (u64)info->wr_id);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_rdma_read - rdma read command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @inv_stag: flag for inv_stag
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq)
+{
+ struct irdma_rdma_read *op_info;
+ int ret_code;
+ u32 i, byte_off, total_size = 0;
+ bool local_fence = false;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u32 wqe_idx;
+ u16 quanta;
+ u64 hdr;
+
+ op_info = &info->op.rdma_read;
+ if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
+
+ ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ addl_frag_cnt = op_info->num_lo_sges > 1 ?
+ (op_info->num_lo_sges - 1) : 0;
+ local_fence |= info->local_fence;
+
+ qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
+ qp->swqe_polarity);
+ for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
+ !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE,
+ (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_send - rdma send command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_send *op_info;
+ u64 hdr;
+ u32 i, wqe_idx, total_size = 0, byte_off;
+ int ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ op_info = &info->op.send;
+ if (qp->max_sq_frag_cnt < op_info->num_sges)
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].length;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_sges + 1;
+ else
+ frag_cnt = op_info->num_sges;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ read_fence |= info->read_fence;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ frag_cnt ? op_info->sg_list : NULL,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = 32; i < op_info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
+ * @wqe: wqe for setting fragment
+ * @op_info: info for setting bind wqe values
+ */
+static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
+ set_64bit_val(wqe, 16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: Total inline data length
+ * @polarity: compatibility parameter
+ */
+static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
+{
+ u32 quanta_bytes_remaining = 16;
+ int i;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ /* Remaining inline bytes reside after hdr */
+ wqe += 16;
+ quanta_bytes_remaining = 32;
+ }
+ }
+ }
+}
+
+/**
+ * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
+{
+ return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
+}
+
+/**
+ * irdma_set_mw_bind_wqe - set mw bind in wqe
+ * @wqe: wqe for setting mw bind
+ * @op_info: info for setting wqe values
+ */
+static void irdma_set_mw_bind_wqe(__le64 *wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
+ set_64bit_val(wqe, 16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data - Copy inline data to wqe
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: number of SGE's
+ * @polarity: polarity of wqe valid bit
+ */
+static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
+{
+ u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
+ u32 quanta_bytes_remaining = 8;
+ bool first_quanta = true;
+ int i;
+
+ wqe += 8;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ quanta_bytes_remaining = 31;
+
+ /* Remaining inline bytes reside after hdr */
+ if (first_quanta) {
+ first_quanta = false;
+ wqe += 16;
+ } else {
+ *wqe = inline_valid;
+ wqe++;
+ }
+ }
+ }
+ }
+ if (!first_quanta && quanta_bytes_remaining < 31)
+ *(wqe + quanta_bytes_remaining) = inline_valid;
+}
+
+/**
+ * irdma_inline_data_size_to_quanta - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static u16 irdma_inline_data_size_to_quanta(u32 data_size)
+{
+ if (data_size <= 8)
+ return IRDMA_QP_WQE_MIN_QUANTA;
+ else if (data_size <= 39)
+ return 2;
+ else if (data_size <= 70)
+ return 3;
+ else if (data_size <= 101)
+ return 4;
+ else if (data_size <= 132)
+ return 5;
+ else if (data_size <= 163)
+ return 6;
+ else if (data_size <= 194)
+ return 7;
+ else
+ return 8;
+}
+
+/**
+ * irdma_uk_inline_rdma_write - inline rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_rdma_write *op_info;
+ u64 hdr = 0;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u32 i, total_size = 0;
+ u16 quanta;
+
+ op_info = &info->op.rdma_write;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
+
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ read_fence |= info->read_fence;
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
+ op_info->num_lo_sges,
+ qp->swqe_polarity);
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_inline_send - inline send operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_send *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u32 i, total_size = 0;
+ u16 quanta;
+
+ op_info = &info->op.send;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].length;
+
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+
+ read_fence |= info->read_fence;
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
+ op_info->num_sges, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_stag_local_invalidate - stag invalidate operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_inv_local_stag *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool local_fence = false;
+ struct ib_sge sge = {};
+
+ op_info = &info->op.inv_local_stag;
+ local_fence = info->local_fence;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ sge.lkey = op_info->target_stag;
+ qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
+
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_post_receive - post receive wqe
+ * @qp: hw qp ptr
+ * @info: post rq information
+ */
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (qp->max_rq_frag_cnt < info->num_sges)
+ return -EINVAL;
+
+ wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ qp->rq_wrid_array[wqe_idx] = info->wr_id;
+ addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
+ qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ qp->rwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ qp->rwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->rwqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16, 0);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_cq_resize - reset the cq buffer info
+ * @cq: cq to resize
+ * @cq_base: new cq buffer addr
+ * @cq_size: number of cqes
+ */
+void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
+{
+ cq->cq_base = cq_base;
+ cq->cq_size = cq_size;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+}
+
+/**
+ * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
+ * @cq: cq to resize
+ * @cq_cnt: the count of the resized cq buffers
+ */
+void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_next;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ sw_cq_sel += cq_cnt;
+
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
+
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+}
+
+/**
+ * irdma_uk_cq_request_notification - cq notification request (door bell)
+ * @cq: hw cq
+ * @cq_notify: notification type
+ */
+void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se = 0;
+ u8 arm_next = 0;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_seq_num++;
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next_se |= 1;
+ if (cq_notify == IRDMA_CQ_COMPL_EVENT)
+ arm_next = 1;
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ writel(cq->cq_id, cq->cqe_alloc_db);
+}
+
+/**
+ * irdma_uk_cq_poll_cmpl - get cq completion info
+ * @cq: hw cq
+ * @info: cq poll information returned
+ */
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info)
+{
+ u64 comp_ctx, qword0, qword2, qword3;
+ __le64 *cqe;
+ struct irdma_qp_uk *qp;
+ struct irdma_srq_uk *srq;
+ struct qp_err_code qp_err;
+ u8 is_srq;
+ struct irdma_ring *pring = NULL;
+ u32 wqe_idx;
+ int ret_code;
+ bool move_cq_head = true;
+ u8 polarity;
+ bool ext_valid;
+ __le64 *ext_cqe;
+
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ if (polarity != cq->polarity)
+ return -ENOENT;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
+ if (ext_valid) {
+ u64 qword6, qword7;
+ u32 peek_head;
+
+ if (cq->avoid_mem_cflct) {
+ ext_cqe = (__le64 *)((u8 *)cqe + 32);
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ } else {
+ peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
+ ext_cqe = cq->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ if (!peek_head)
+ polarity ^= 1;
+ }
+ if (polarity != cq->polarity)
+ return -ENOENT;
+
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ dma_rmb();
+
+ info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
+ if (info->imm_valid) {
+ u64 qword4;
+
+ get_64bit_val(ext_cqe, 0, &qword4);
+ info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
+ }
+ info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
+ if (info->ud_smac_valid || info->ud_vlan_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ if (info->ud_vlan_valid)
+ info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
+ if (info->ud_smac_valid) {
+ info->ud_smac[5] = qword6 & 0xFF;
+ info->ud_smac[4] = (qword6 >> 8) & 0xFF;
+ info->ud_smac[3] = (qword6 >> 16) & 0xFF;
+ info->ud_smac[2] = (qword6 >> 24) & 0xFF;
+ info->ud_smac[1] = (qword6 >> 32) & 0xFF;
+ info->ud_smac[0] = (qword6 >> 40) & 0xFF;
+ }
+ }
+ } else {
+ info->imm_valid = false;
+ info->ud_smac_valid = false;
+ info->ud_vlan_valid = false;
+ }
+
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if (is_srq)
+ get_64bit_val(cqe, 40, (u64 *)&qp);
+ else
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
+ if (info->error) {
+ info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
+ info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
+ switch (info->major_err) {
+ case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
+ qp_err = irdma_ae_to_qp_err_code(info->minor_err);
+ info->minor_err = qp_err.flush_code;
+ fallthrough;
+ case IRDMA_FLUSH_MAJOR_ERR:
+ /* Set the min error to standard flush error code for remaining cqes */
+ if (info->minor_err != FLUSH_GENERAL_ERR) {
+ qword3 &= ~IRDMA_CQ_MINERR;
+ qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
+ set_64bit_val(cqe, 24, qword3);
+ }
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ break;
+ default:
+#define IRDMA_CIE_SIGNATURE 0xE
+#define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
+ if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
+ qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
+ FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
+ == IRDMA_CIE_SIGNATURE) {
+ info->error = 0;
+ info->major_err = 0;
+ info->minor_err = 0;
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ break;
+ }
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+
+ info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
+ if (!qp || qp->destroy_pending) {
+ ret_code = -EFAULT;
+ goto exit;
+ }
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+ info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
+
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
+ srq = qp->srq_uk;
+
+ get_64bit_val(cqe, 8, &info->wr_id);
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
+ qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ IRDMA_RING_MOVE_TAIL(srq->srq_ring);
+ pring = &srq->srq_ring;
+ } else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
+ u32 array_idx;
+
+ array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
+ info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
+ ret_code = -ENOENT;
+ goto exit;
+ }
+
+ info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
+ array_idx = qp->rq_ring.tail;
+ } else {
+ info->wr_id = qp->rq_wrid_array[array_idx];
+ }
+
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
+ qp->rq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
+ qp->rq_flush_complete = true;
+ else
+ move_cq_head = false;
+ }
+ pring = &qp->rq_ring;
+ } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
+ if (qp->first_sq_wq) {
+ if (wqe_idx + 1 >= qp->conn_wqes)
+ qp->first_sq_wq = false;
+
+ if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ memset(info, 0,
+ sizeof(struct irdma_cq_poll_info));
+ return irdma_uk_cq_poll_cmpl(cq, info);
+ }
+ }
+ if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ if (!info->comp_status)
+ info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
+ } else {
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
+ ret_code = -ENOENT;
+ goto exit;
+ }
+
+ do {
+ __le64 *sw_wqe;
+ u64 wqe_qword;
+ u32 tail;
+
+ tail = qp->sq_ring.tail;
+ sw_wqe = qp->sq_base[tail].elem;
+ get_64bit_val(sw_wqe, 24,
+ &wqe_qword);
+ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
+ wqe_qword);
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ tail + qp->sq_wrtrk_array[tail].quanta);
+ if (info->op_type != IRDMAQP_OP_NOP) {
+ info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ break;
+ }
+ } while (1);
+ if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
+ info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
+ qp->sq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
+ qp->sq_flush_complete = true;
+ }
+ pring = &qp->sq_ring;
+ }
+
+ ret_code = 0;
+
+exit:
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
+ if (pring && IRDMA_RING_MORE_WORK(*pring))
+ /* Park CQ head during a flush to generate additional CQEs
+ * from SW for all unprocessed WQEs. For GEN3 and beyond
+ * FW will generate/flush these CQEs so move to the next CQE
+ */
+ move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
+ false : true;
+ }
+
+ if (move_cq_head) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+
+ if (ext_valid && !cq->avoid_mem_cflct) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+ }
+
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ if (!cq->avoid_mem_cflct && ext_valid)
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ } else {
+ qword3 &= ~IRDMA_CQ_WQEIDX;
+ qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
+ set_64bit_val(cqe, 24, qword3);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_round_up_wq - return round up qp wq depth
+ * @wqdepth: wq depth in quanta to round up
+ */
+static int irdma_round_up_wq(u32 wqdepth)
+{
+ int scount = 1;
+
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+ return ++wqdepth;
+}
+
+/**
+ * irdma_get_wqe_shift - get shift count for maximum wqe size
+ * @uk_attrs: qp HW attributes
+ * @sge: Maximum Scatter Gather Elements wqe
+ * @inline_data: Maximum inline data size
+ * @shift: Returns the shift needed based on sge
+ *
+ * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
+ * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
+ * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
+ * size of 64 bytes).
+ * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
+ * size of 256 bytes).
+ */
+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift)
+{
+ *shift = 0;
+ if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
+ if (sge > 1 || inline_data > 8) {
+ if (sge < 4 && inline_data <= 39)
+ *shift = 1;
+ else if (sge < 8 && inline_data <= 101)
+ *shift = 2;
+ else
+ *shift = 3;
+ }
+ } else if (sge > 1 || inline_data > 16) {
+ *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
+ }
+}
+
+/*
+ * irdma_get_sqdepth - get SQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @sq_size: SQ size
+ * @shift: shift which determines size of WQE
+ * @sqdepth: depth of SQ
+ *
+ */
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *sqdepth)
+{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
+ *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
+
+ if (*sqdepth < min_size)
+ *sqdepth = min_size;
+ else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * irdma_get_rqdepth - get RQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @rq_size: RQ size
+ * @shift: shift which determines size of WQE
+ * @rqdepth: depth of RQ
+ */
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *rqdepth)
+{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
+ *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*rqdepth < min_size)
+ *rqdepth = min_size;
+ else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * irdma_get_srqdepth - get SRQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @srq_size: SRQ size
+ * @shift: shift which determines size of WQE
+ * @srqdepth: depth of SRQ
+ */
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth)
+{
+ *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *srqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
+ .iw_copy_inline_data = irdma_copy_inline_data,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
+ .iw_set_fragment = irdma_set_fragment,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
+};
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
+ .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
+ .iw_set_fragment = irdma_set_fragment_gen_1,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
+};
+
+/**
+ * irdma_setup_connection_wqes - setup WQEs necessary to complete
+ * connection.
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ */
+static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info)
+{
+ u16 move_cnt = 1;
+
+ if (!info->legacy_mode &&
+ (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
+ move_cnt = 3;
+
+ qp->conn_wqes = move_cnt;
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
+}
+
+/**
+ * irdma_uk_srq_init - initialize shared qp
+ * @srq: hw srq (user and kernel)
+ * @info: srq initialization info
+ *
+ * Initializes the vars used in both user and kernel mode.
+ * The size of the wqe depends on number of max fragments
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for srq.
+ */
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info)
+{
+ u8 rqshift;
+
+ srq->uk_attrs = info->uk_attrs;
+ if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
+ srq->srq_caps = info->srq_caps;
+ srq->srq_base = info->srq;
+ srq->shadow_area = info->shadow_area;
+ srq->srq_id = info->srq_id;
+ srq->srwqe_polarity = 0;
+ srq->srq_size = info->srq_size;
+ srq->wqe_size = rqshift;
+ srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
+ ((u32)2 << rqshift) - 1);
+ IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
+ srq->wqe_size_multiplier = 1 << rqshift;
+ srq->wqe_ops = iw_wqe_uk_ops;
+
+ return 0;
+}
+
+/**
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
+ * @ukinfo: qp initialization info
+ * @sq_shift: Returns shift of SQ
+ * @rq_shift: Returns shift of RQ
+ */
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+}
+
+/**
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
+ * @ukinfo: qp initialization info
+ * @sq_depth: Returns depth of SQ
+ * @sq_shift: Returns shift of SQ
+ */
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
+ *sq_shift, sq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
+ * @ukinfo: qp initialization info
+ * @rq_depth: Returns depth of RQ
+ * @rq_shift: Returns shift of RQ
+ */
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift)
+{
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
+ *rq_shift, rq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_qp_init - initialize shared qp
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ *
+ * initializes the vars used in both user and kernel mode.
+ * size of the wqe depends on numbers of max. fragements
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for sq and rq.
+ */
+int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
+{
+ int ret_code = 0;
+ u32 sq_ring_size;
+
+ qp->uk_attrs = info->uk_attrs;
+ if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
+ info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ qp->qp_caps = info->qp_caps;
+ qp->sq_base = info->sq;
+ qp->rq_base = info->rq;
+ qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
+ qp->shadow_area = info->shadow_area;
+ qp->sq_wrtrk_array = info->sq_wrtrk_array;
+
+ qp->rq_wrid_array = info->rq_wrid_array;
+ qp->wqe_alloc_db = info->wqe_alloc_db;
+ qp->qp_id = info->qp_id;
+ qp->sq_size = info->sq_size;
+ qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+ sq_ring_size = qp->sq_size << info->sq_shift;
+ IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
+ IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
+ if (info->first_sq_wq) {
+ irdma_setup_connection_wqes(qp, info);
+ qp->swqe_polarity = 1;
+ qp->first_sq_wq = true;
+ } else {
+ qp->swqe_polarity = 0;
+ }
+ qp->swqe_polarity_deferred = 1;
+ qp->rwqe_polarity = 0;
+ qp->rq_size = info->rq_size;
+ qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+ qp->max_inline_data = info->max_inline_data;
+ qp->rq_wqe_size = info->rq_shift;
+ IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
+ qp->wqe_ops = iw_wqe_uk_ops_gen_1;
+ else
+ qp->wqe_ops = iw_wqe_uk_ops;
+ qp->srq_uk = info->srq_uk;
+ return ret_code;
+}
+
+/**
+ * irdma_uk_cq_init - initialize shared cq (user and kernel)
+ * @cq: hw cq
+ * @info: hw cq initialization info
+ */
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info)
+{
+ cq->cq_base = info->cq_base;
+ cq->cq_id = info->cq_id;
+ cq->cq_size = info->cq_size;
+ cq->cqe_alloc_db = info->cqe_alloc_db;
+ cq->cq_ack_db = info->cq_ack_db;
+ cq->shadow_area = info->shadow_area;
+ cq->avoid_mem_cflct = info->avoid_mem_cflct;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+}
+
+/**
+ * irdma_uk_clean_cq - clean cq entries
+ * @q: completion context
+ * @cq: cq to clean
+ */
+void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
+{
+ __le64 *cqe;
+ u64 qword3, comp_ctx;
+ u32 cq_head;
+ u8 polarity, temp;
+
+ cq_head = cq->cq_ring.head;
+ temp = cq->polarity;
+ do {
+ if (cq->avoid_mem_cflct)
+ cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
+ else
+ cqe = cq->cq_base[cq_head].buf;
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ if (polarity != temp)
+ break;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if ((void *)(unsigned long)comp_ctx == q)
+ set_64bit_val(cqe, 8, 0);
+
+ cq_head = (cq_head + 1) % cq->cq_ring.size;
+ if (!cq_head)
+ temp ^= 1;
+ } while (true);
+}
+
+/**
+ * irdma_nop - post a nop
+ * @qp: hw qp ptr
+ * @wr_id: work request id
+ * @signaled: signaled for completion
+ * @post_sq: ring doorbell
+ */
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 wqe_idx;
+ struct irdma_post_sq_info info = {};
+
+ info.wr_id = wr_id;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, &info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
+ * @frag_cnt: number of fragments
+ * @quanta: quanta for frag_cnt
+ */
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *quanta = IRDMA_QP_WQE_MIN_QUANTA;
+ break;
+ case 2:
+ case 3:
+ *quanta = 2;
+ break;
+ case 4:
+ case 5:
+ *quanta = 3;
+ break;
+ case 6:
+ case 7:
+ *quanta = 4;
+ break;
+ case 8:
+ case 9:
+ *quanta = 5;
+ break;
+ case 10:
+ case 11:
+ *quanta = 6;
+ break;
+ case 12:
+ case 13:
+ *quanta = 7;
+ break;
+ case 14:
+ case 15: /* when immediate data is present */
+ *quanta = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size in bytes given frag_cnt
+ */
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = 32;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ *wqe_size = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
new file mode 100644
index 000000000000..ab57f689827a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_USER_H
+#define IRDMA_USER_H
+
+#define irdma_handle void *
+#define irdma_adapter_handle irdma_handle
+#define irdma_qp_handle irdma_handle
+#define irdma_cq_handle irdma_handle
+#define irdma_pd_id irdma_handle
+#define irdma_stag_handle irdma_handle
+#define irdma_stag_index u32
+#define irdma_stag u32
+#define irdma_stag_key u8
+#define irdma_tagged_offset u64
+#define irdma_access_privileges u32
+#define irdma_physical_fragment u64
+#define irdma_address_list u64 *
+
+#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
+
+#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
+#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
+#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
+#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
+#define IRDMA_ACCESS_FLAGS_ALL 0x3f
+
+#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
+#define IRDMA_OP_TYPE_RDMA_READ 0x01
+#define IRDMA_OP_TYPE_SEND 0x03
+#define IRDMA_OP_TYPE_SEND_INV 0x04
+#define IRDMA_OP_TYPE_SEND_SOL 0x05
+#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
+#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
+#define IRDMA_OP_TYPE_BIND_MW 0x08
+#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
+#define IRDMA_OP_TYPE_INV_STAG 0x0a
+#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
+#define IRDMA_OP_TYPE_NOP 0x0c
+#define IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD 0x0f
+#define IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP 0x11
+#define IRDMA_OP_TYPE_REC 0x3e
+#define IRDMA_OP_TYPE_REC_IMM 0x3f
+
+#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_SRQ_LIMIT 0x0209
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_SRQ_CATASTROPHIC_ERROR 0x020f
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_ATOMIC_ALIGNMENT 0x0221
+#define IRDMA_AE_ATOMIC_MASK 0x0222
+#define IRDMA_AE_INVALID_REQUEST 0x0223
+#define IRDMA_AE_PCIE_ATOMIC_DISABLE 0x0224
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_REMOTE_QP_CATASTROPHIC 0x0703
+#define IRDMA_AE_LOCAL_QP_CATASTROPHIC 0x0704
+#define IRDMA_AE_RCE_QP_CATASTROPHIC 0x0705
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
+#define IRDMA_AE_CQP_DEFERRED_COMPLETE 0x0901
+#define IRDMA_AE_ADAPTER_CATASTROPHIC 0x0B0B
+
+enum irdma_device_caps_const {
+ IRDMA_WQE_SIZE = 4,
+ IRDMA_CQP_WQE_SIZE = 8,
+ IRDMA_CQE_SIZE = 4,
+ IRDMA_EXTENDED_CQE_SIZE = 8,
+ IRDMA_AEQE_SIZE = 2,
+ IRDMA_CEQE_SIZE = 1,
+ IRDMA_CQP_CTX_SIZE = 8,
+ IRDMA_SHADOW_AREA_SIZE = 8,
+ IRDMA_QUERY_FPM_BUF_SIZE = 192,
+ IRDMA_COMMIT_FPM_BUF_SIZE = 192,
+ IRDMA_GATHER_STATS_BUF_SIZE = 1024,
+ IRDMA_MIN_IW_QP_ID = 0,
+ IRDMA_MAX_IW_QP_ID = 262143,
+ IRDMA_MIN_IW_SRQ_ID = 0,
+ IRDMA_MIN_CEQID = 0,
+ IRDMA_MAX_CEQID = 1023,
+ IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
+ IRDMA_MIN_CQID = 0,
+ IRDMA_MAX_CQID = 524287,
+ IRDMA_MIN_AEQ_ENTRIES = 1,
+ IRDMA_MAX_AEQ_ENTRIES = 524287,
+ IRDMA_MAX_AEQ_ENTRIES_GEN_3 = 262144,
+ IRDMA_MIN_CEQ_ENTRIES = 1,
+ IRDMA_MAX_CEQ_ENTRIES = 262143,
+ IRDMA_MIN_CQ_SIZE = 1,
+ IRDMA_MAX_CQ_SIZE = 1048575,
+ IRDMA_DB_ID_ZERO = 0,
+ IRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
+ IRDMA_MAX_SGE_RD = 13,
+ IRDMA_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+ IRDMA_MAX_INBOUND_MSG_SIZE = 2147483647,
+ IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
+ IRDMA_MAX_PE_ENA_VF_COUNT = 32,
+ IRDMA_MAX_VF_FPM_ID = 47,
+ IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
+ IRDMA_MAX_INLINE_DATA_SIZE = 101,
+ IRDMA_MAX_WQ_ENTRIES = 32768,
+ IRDMA_Q2_BUF_SIZE = 256,
+ IRDMA_QP_CTX_SIZE = 256,
+ IRDMA_MAX_PDS = 262144,
+ IRDMA_MIN_WQ_SIZE_GEN2 = 8,
+};
+
+enum irdma_addressing_type {
+ IRDMA_ADDR_TYPE_ZERO_BASED = 0,
+ IRDMA_ADDR_TYPE_VA_BASED = 1,
+};
+
+enum irdma_flush_opcode {
+ FLUSH_INVALID = 0,
+ FLUSH_GENERAL_ERR,
+ FLUSH_PROT_ERR,
+ FLUSH_REM_ACCESS_ERR,
+ FLUSH_LOC_QP_OP_ERR,
+ FLUSH_REM_OP_ERR,
+ FLUSH_LOC_LEN_ERR,
+ FLUSH_FATAL_ERR,
+ FLUSH_RETRY_EXC_ERR,
+ FLUSH_MW_BIND_ERR,
+ FLUSH_REM_INV_REQ_ERR,
+ FLUSH_RNR_RETRY_EXC_ERR,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
+};
+
+enum irdma_cmpl_status {
+ IRDMA_COMPL_STATUS_SUCCESS = 0,
+ IRDMA_COMPL_STATUS_FLUSHED,
+ IRDMA_COMPL_STATUS_INVALID_WQE,
+ IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
+ IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
+ IRDMA_COMPL_STATUS_INVALID_STAG,
+ IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
+ IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
+ IRDMA_COMPL_STATUS_INVALID_PD_ID,
+ IRDMA_COMPL_STATUS_WRAP_ERROR,
+ IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
+ IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
+ IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
+ IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
+ IRDMA_COMPL_STATUS_INVALID_FBO,
+ IRDMA_COMPL_STATUS_INVALID_LEN,
+ IRDMA_COMPL_STATUS_INVALID_ACCESS,
+ IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
+ IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
+ IRDMA_COMPL_STATUS_INVALID_REGION,
+ IRDMA_COMPL_STATUS_INVALID_WINDOW,
+ IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
+ IRDMA_COMPL_STATUS_UNKNOWN,
+};
+
+enum irdma_cmpl_notify {
+ IRDMA_CQ_COMPL_EVENT = 0,
+ IRDMA_CQ_COMPL_SOLICITED = 1,
+};
+
+enum irdma_qp_caps {
+ IRDMA_WRITE_WITH_IMM = 1,
+ IRDMA_SEND_WITH_IMM = 2,
+ IRDMA_ROCE = 4,
+ IRDMA_PUSH_MODE = 8,
+};
+
+struct irdma_srq_uk;
+struct irdma_srq_uk_init_info;
+struct irdma_qp_uk;
+struct irdma_cq_uk;
+struct irdma_qp_uk_init_info;
+struct irdma_cq_uk_init_info;
+
+struct irdma_ring {
+ u32 head;
+ u32 tail;
+ u32 size;
+};
+
+struct irdma_cqe {
+ __le64 buf[IRDMA_CQE_SIZE];
+};
+
+struct irdma_extended_cqe {
+ __le64 buf[IRDMA_EXTENDED_CQE_SIZE];
+};
+
+struct irdma_post_send {
+ struct ib_sge *sg_list;
+ u32 num_sges;
+ u32 qkey;
+ u32 dest_qp;
+ u32 ah_id;
+};
+
+struct irdma_post_rq_info {
+ u64 wr_id;
+ struct ib_sge *sg_list;
+ u32 num_sges;
+};
+
+struct irdma_rdma_write {
+ struct ib_sge *lo_sg_list;
+ u32 num_lo_sges;
+ struct ib_sge rem_addr;
+};
+
+struct irdma_rdma_read {
+ struct ib_sge *lo_sg_list;
+ u32 num_lo_sges;
+ struct ib_sge rem_addr;
+};
+
+struct irdma_bind_window {
+ irdma_stag mr_stag;
+ u64 bind_len;
+ void *va;
+ enum irdma_addressing_type addressing_type;
+ bool ena_reads:1;
+ bool ena_writes:1;
+ irdma_stag mw_stag;
+ bool mem_window_type_1:1;
+ bool remote_atomics_en:1;
+};
+
+struct irdma_atomic_fetch_add {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 fetch_add_data_bytes;
+ u32 stag;
+ u32 remote_stag;
+};
+
+struct irdma_atomic_compare_swap {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 swap_data_bytes;
+ u64 compare_data_bytes;
+ u32 stag;
+ u32 remote_stag;
+};
+
+struct irdma_inv_local_stag {
+ irdma_stag target_stag;
+};
+
+struct irdma_post_sq_info {
+ u64 wr_id;
+ u8 op_type;
+ u8 l4len;
+ bool signaled:1;
+ bool read_fence:1;
+ bool local_fence:1;
+ bool inline_data:1;
+ bool imm_data_valid:1;
+ bool report_rtt:1;
+ bool udp_hdr:1;
+ bool defer_flag:1;
+ bool remote_atomic_en:1;
+ u32 imm_data;
+ u32 stag_to_inv;
+ union {
+ struct irdma_post_send send;
+ struct irdma_rdma_write rdma_write;
+ struct irdma_rdma_read rdma_read;
+ struct irdma_bind_window bind_window;
+ struct irdma_inv_local_stag inv_local_stag;
+ struct irdma_atomic_fetch_add atomic_fetch_add;
+ struct irdma_atomic_compare_swap atomic_compare_swap;
+ } op;
+};
+
+struct irdma_cq_poll_info {
+ u64 wr_id;
+ irdma_qp_handle qp_handle;
+ u32 bytes_xfered;
+ u32 tcp_seq_num_rtt;
+ u32 qp_id;
+ u32 ud_src_qpn;
+ u32 imm_data;
+ irdma_stag inv_stag; /* or L_R_Key */
+ enum irdma_cmpl_status comp_status;
+ u16 major_err;
+ u16 minor_err;
+ u16 ud_vlan;
+ u8 ud_smac[6];
+ u8 op_type;
+ u8 q_type;
+ bool stag_invalid_set:1; /* or L_R_Key set */
+ bool error:1;
+ bool solicited_event:1;
+ bool ipv4:1;
+ bool ud_vlan_valid:1;
+ bool ud_smac_valid:1;
+ bool imm_valid:1;
+};
+
+struct qp_err_code {
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
+};
+
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
+ bool post_sq);
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info);
+void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq);
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+
+struct irdma_wqe_uk_ops {
+ void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity);
+ u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
+ u8 valid);
+ void (*iw_set_mw_bind_wqe)(__le64 *wqe,
+ struct irdma_bind_window *op_info);
+};
+
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info);
+void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify);
+void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
+void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info);
+int irdma_uk_qp_init(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info);
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift);
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift);
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift);
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info);
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info);
+
+struct irdma_srq_uk {
+ u32 srq_caps;
+ struct irdma_qp_quanta *srq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ struct irdma_ring srq_ring;
+ struct irdma_ring initial_ring;
+ u32 srq_id;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u8 srwqe_polarity;
+ u8 wqe_size;
+ u8 wqe_size_multiplier;
+ u8 deferred_flag;
+};
+
+struct irdma_srq_uk_init_info {
+ struct irdma_qp_quanta *srq;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ u64 *srq_wrid_array;
+ u32 srq_id;
+ u32 srq_caps;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+};
+
+struct irdma_sq_uk_wr_trk_info {
+ u64 wrid;
+ u32 wr_len;
+ u16 quanta;
+ u8 reserved[2];
+};
+
+struct irdma_qp_quanta {
+ __le64 elem[IRDMA_WQE_SIZE];
+};
+
+struct irdma_qp_uk {
+ struct irdma_qp_quanta *sq_base;
+ struct irdma_qp_quanta *rq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 __iomem *wqe_alloc_db;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ __le64 *shadow_area;
+ struct irdma_ring sq_ring;
+ struct irdma_ring rq_ring;
+ struct irdma_ring initial_ring;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u16 conn_wqes;
+ u8 qp_type;
+ u8 swqe_polarity;
+ u8 swqe_polarity_deferred;
+ u8 rwqe_polarity;
+ u8 rq_wqe_size;
+ u8 rq_wqe_size_multiplier;
+ bool deferred_flag:1;
+ bool first_sq_wq:1;
+ bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
+ bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
+ bool destroy_pending:1; /* Indicates the QP is being destroyed */
+ void *back_qp;
+ u8 dbg_rq_flushed;
+ struct irdma_srq_uk *srq_uk;
+ u8 sq_flush_seen;
+ u8 rq_flush_seen;
+};
+
+struct irdma_cq_uk {
+ struct irdma_cqe *cq_base;
+ u32 __iomem *cqe_alloc_db;
+ u32 __iomem *cq_ack_db;
+ __le64 *shadow_area;
+ u32 cq_id;
+ u32 cq_size;
+ struct irdma_ring cq_ring;
+ u8 polarity;
+ bool avoid_mem_cflct:1;
+};
+
+struct irdma_qp_uk_init_info {
+ struct irdma_qp_quanta *sq;
+ struct irdma_qp_quanta *rq;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 __iomem *wqe_alloc_db;
+ __le64 *shadow_area;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ u32 sq_depth;
+ u32 rq_depth;
+ u8 first_sq_wq;
+ u8 type;
+ u8 sq_shift;
+ u8 rq_shift;
+ int abi_ver;
+ bool legacy_mode;
+ struct irdma_srq_uk *srq_uk;
+};
+
+struct irdma_cq_uk_init_info {
+ u32 __iomem *cqe_alloc_db;
+ u32 __iomem *cq_ack_db;
+ struct irdma_cqe *cq_base;
+ __le64 *shadow_area;
+ u32 cq_size;
+ u32 cq_id;
+ bool avoid_mem_cflct;
+};
+
+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info);
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx);
+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
+void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift);
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *wqdepth);
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *wqdepth);
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth);
+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+
+static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
+{
+ struct qp_err_code qp_err = {};
+
+ switch (ae_id) {
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp_err.flush_code = FLUSH_PROT_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp_err.flush_code = FLUSH_LOC_LEN_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ qp_err.flush_code = FLUSH_MW_BIND_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp_err.flush_code = FLUSH_REM_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
+ qp_err.flush_code = FLUSH_FATAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ default:
+ qp_err.flush_code = FLUSH_GENERAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ }
+
+ return qp_err;
+}
+#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
new file mode 100644
index 000000000000..8b94d87b0192
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -0,0 +1,2528 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+/**
+ * irdma_arp_table -manage arp table
+ * @rf: RDMA PCI function
+ * @ip_addr: ip address for device
+ * @ipv4: IPv4 flag
+ * @mac_addr: mac address ptr
+ * @action: modify, delete or add
+ */
+int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
+ const u8 *mac_addr, u32 action)
+{
+ unsigned long flags;
+ int arp_index;
+ u32 ip[4] = {};
+
+ if (ipv4)
+ ip[0] = *ip_addr;
+ else
+ memcpy(ip, ip_addr, sizeof(ip));
+
+ spin_lock_irqsave(&rf->arp_lock, flags);
+ for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
+ if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
+ break;
+ }
+
+ switch (action) {
+ case IRDMA_ARP_ADD:
+ if (arp_index != rf->arp_table_size) {
+ arp_index = -1;
+ break;
+ }
+
+ arp_index = 0;
+ if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
+ (u32 *)&arp_index, &rf->next_arp_index)) {
+ arp_index = -1;
+ break;
+ }
+
+ memcpy(rf->arp_table[arp_index].ip_addr, ip,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
+ break;
+ case IRDMA_ARP_RESOLVE:
+ if (arp_index == rf->arp_table_size)
+ arp_index = -1;
+ break;
+ case IRDMA_ARP_DELETE:
+ if (arp_index == rf->arp_table_size) {
+ arp_index = -1;
+ break;
+ }
+
+ memset(rf->arp_table[arp_index].ip_addr, 0,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ eth_zero_addr(rf->arp_table[arp_index].mac_addr);
+ irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
+ break;
+ default:
+ arp_index = -1;
+ break;
+ }
+
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+ return arp_index;
+}
+
+/**
+ * irdma_add_arp - add a new arp entry if needed
+ * @rf: RDMA function
+ * @ip: IP address
+ * @ipv4: IPv4 flag
+ * @mac: MAC address
+ */
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
+{
+ int arpidx;
+
+ arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
+ if (arpidx >= 0) {
+ if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
+ return arpidx;
+
+ irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
+ ipv4, IRDMA_ARP_DELETE);
+ }
+
+ irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
+
+ return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
+}
+
+/**
+ * wr32 - write 32 bits to hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ * @val: value to write to register
+ */
+inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
+ * rd32 - read a 32 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u32 rd32(struct irdma_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+/**
+ * rd64 - read a 64 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u64 rd64(struct irdma_hw *hw, u32 reg)
+{
+ return readq(hw->hw_addr + reg);
+}
+
+static void irdma_gid_change_event(struct ib_device *ibdev)
+{
+ struct ib_event ib_event;
+
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = ibdev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+}
+
+/**
+ * irdma_inetaddr_event - system notifier for ipv4 addr events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ u32 local_ipaddr;
+
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ local_ipaddr = ntohl(ifa->ifa_address);
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
+ event, &local_ipaddr, real_dev->dev_addr);
+ switch (event) {
+ case NETDEV_DOWN:
+ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
+ &local_ipaddr, true, IRDMA_ARP_DELETE);
+ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
+ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ default:
+ break;
+ }
+
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_inet6addr_event - system notifier for ipv6 addr events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ struct net_device *real_dev, *netdev = ifa->idev->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ u32 local_ipaddr6[4];
+
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
+ event, local_ipaddr6, real_dev->dev_addr);
+ switch (event) {
+ case NETDEV_DOWN:
+ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
+ local_ipaddr6, false, IRDMA_ARP_DELETE);
+ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ irdma_add_arp(iwdev->rf, local_ipaddr6, false,
+ real_dev->dev_addr);
+ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ default:
+ break;
+ }
+
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_net_event - system notifier for net events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: neighbor
+ */
+int irdma_net_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct neighbour *neigh = ptr;
+ struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ __be32 *p;
+ u32 local_ipaddr[4] = {};
+ bool ipv4 = true;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ p = (__be32 *)neigh->primary_key;
+ if (neigh->tbl->family == AF_INET6) {
+ ipv4 = false;
+ irdma_copy_ip_ntohl(local_ipaddr, p);
+ } else {
+ local_ipaddr[0] = ntohl(*p);
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
+ iwdev->netdev, neigh->nud_state, local_ipaddr,
+ neigh->ha);
+
+ if (neigh->nud_state & NUD_VALID)
+ irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
+
+ else
+ irdma_manage_arp_cache(iwdev->rf, neigh->ha,
+ local_ipaddr, ipv4,
+ IRDMA_ARP_DELETE);
+ ib_device_put(ibdev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_netdevice_event - system notifier for netdev events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ iwdev->iw_status = 1;
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ fallthrough;
+ default:
+ break;
+ }
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
+ * @iwdev: irdma device
+ */
+static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ u32 local_ipaddr6[4];
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
+ rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
+ ip_dev == iwdev->netdev) &&
+ (READ_ONCE(ip_dev->flags) & IFF_UP)) {
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
+ break;
+ }
+ list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
+ if_list) {
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr,
+ rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+
+ irdma_copy_ip_ntohl(local_ipaddr6,
+ ifp->addr.in6_u.u6_addr32);
+ irdma_manage_arp_cache(iwdev->rf,
+ ip_dev->dev_addr,
+ local_ipaddr6, false,
+ IRDMA_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
+ * @iwdev: irdma device
+ */
+static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
+{
+ struct net_device *dev;
+ struct in_device *idev;
+ u32 ip_addr;
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, dev) {
+ if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
+ rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
+ dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
+ const struct in_ifaddr *ifa;
+
+ idev = __in_dev_get_rcu(dev);
+ if (!idev)
+ continue;
+
+ in_dev_for_each_ifa_rcu(ifa, idev) {
+ ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
+ dev->dev_addr);
+
+ ip_addr = ntohl(ifa->ifa_address);
+ irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
+ &ip_addr, true,
+ IRDMA_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_add_ip - add ip addresses
+ * @iwdev: irdma device
+ *
+ * Add ipv4/ipv6 addresses to the arp cache
+ */
+void irdma_add_ip(struct irdma_device *iwdev)
+{
+ irdma_add_ipv4_addr(iwdev);
+ irdma_add_ipv6_addr(iwdev);
+}
+
+/**
+ * irdma_alloc_and_get_cqp_request - get cqp struct
+ * @cqp: device cqp ptr
+ * @wait: cqp to be used in wait mode
+ */
+struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
+ bool wait)
+{
+ struct irdma_cqp_request *cqp_request = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ if (!list_empty(&cqp->cqp_avail_reqs)) {
+ cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
+ struct irdma_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ }
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ if (!cqp_request) {
+ cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
+ if (cqp_request) {
+ cqp_request->dynamic = true;
+ if (wait)
+ init_waitqueue_head(&cqp_request->waitq);
+ }
+ }
+ if (!cqp_request) {
+ ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
+ return NULL;
+ }
+
+ cqp_request->waiting = wait;
+ refcount_set(&cqp_request->refcnt, 1);
+ memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
+
+ return cqp_request;
+}
+
+/**
+ * irdma_get_cqp_request - increase refcount for cqp_request
+ * @cqp_request: pointer to cqp_request instance
+ */
+static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
+{
+ refcount_inc(&cqp_request->refcnt);
+}
+
+/**
+ * irdma_free_cqp_request - free cqp request
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void irdma_free_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ unsigned long flags;
+
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ WRITE_ONCE(cqp_request->request_done, false);
+ cqp_request->callback_fcn = NULL;
+ cqp_request->waiting = false;
+ cqp_request->pending = false;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ }
+ wake_up(&cqp->remove_wq);
+}
+
+/**
+ * irdma_put_cqp_request - dec ref count and free if 0
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void irdma_put_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (refcount_dec_and_test(&cqp_request->refcnt))
+ irdma_free_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_free_pending_cqp_request -free pending cqp request objs
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+static void
+irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ cqp_request->compl_info.error = true;
+ WRITE_ONCE(cqp_request->request_done, true);
+ wake_up(&cqp_request->waitq);
+ }
+ wait_event_timeout(cqp->remove_wq,
+ refcount_read(&cqp_request->refcnt) == 1, 1000);
+ irdma_put_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_cleanup_deferred_cqp_ops - clean-up cqp with no completions
+ * @dev: sc_dev
+ * @cqp: cqp
+ */
+static void irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev *dev,
+ struct irdma_cqp *cqp)
+{
+ u64 scratch;
+
+ /* process all CQP requests with deferred/pending completions */
+ while ((scratch = irdma_sc_cqp_cleanup_handler(dev)))
+ irdma_free_pending_cqp_request(cqp, (struct irdma_cqp_request *)
+ (uintptr_t)scratch);
+}
+
+/**
+ * irdma_cleanup_pending_cqp_op - clean-up cqp with no
+ * completions
+ * @rf: RDMA PCI function
+ */
+void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp *cqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request = NULL;
+ struct cqp_cmds_info *pcmdinfo = NULL;
+ u32 i, pending_work, wqe_idx;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ irdma_cleanup_deferred_cqp_ops(dev, cqp);
+ pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
+ wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
+ for (i = 0; i < pending_work; i++) {
+ cqp_request = (struct irdma_cqp_request *)(unsigned long)
+ cqp->scratch_array[wqe_idx];
+ if (cqp_request)
+ irdma_free_pending_cqp_request(cqp, cqp_request);
+ wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
+ }
+
+ while (!list_empty(&dev->cqp_cmd_head)) {
+ pcmdinfo = irdma_remove_cqp_head(dev);
+ cqp_request =
+ container_of(pcmdinfo, struct irdma_cqp_request, info);
+ if (cqp_request)
+ irdma_free_pending_cqp_request(cqp, cqp_request);
+ }
+}
+
+static int irdma_get_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_timeout_s;
+
+ if (!time_s)
+ return CQP_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
+static int irdma_get_def_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_def_timeout_s;
+
+ if (!time_s)
+ return CQP_DEF_CMPL_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
+/**
+ * irdma_wait_event - wait for completion
+ * @rf: RDMA PCI function
+ * @cqp_request: cqp request to wait
+ */
+static int irdma_wait_event(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cqp_timeout cqp_timeout = {};
+ int timeout_threshold = irdma_get_timeout_threshold(&rf->sc_dev);
+ bool cqp_error = false;
+ int err_code = 0;
+
+ cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ if (wait_event_timeout(cqp_request->waitq,
+ READ_ONCE(cqp_request->request_done),
+ msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
+ break;
+
+ if (cqp_request->pending)
+ /* There was a deferred or pending completion
+ * received for this CQP request, so we need
+ * to wait longer than usual.
+ */
+ timeout_threshold =
+ irdma_get_def_timeout_threshold(&rf->sc_dev);
+
+ irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
+
+ if (cqp_timeout.count < timeout_threshold)
+ continue;
+
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+ return -ETIMEDOUT;
+ } while (1);
+
+ cqp_error = cqp_request->compl_info.error;
+ if (cqp_error) {
+ err_code = -EIO;
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+ if (cqp_request->compl_info.min_err_code == 0x8002)
+ err_code = -EBUSY;
+ else if (cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+ }
+ }
+ }
+
+ return err_code;
+}
+
+static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
+ [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
+ [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
+ [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
+ [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
+ [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
+ [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
+ [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
+ [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
+ [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
+ [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
+ [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
+ [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
+ [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
+ [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
+ [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
+ [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
+ [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
+ [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
+ [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
+ [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
+ [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
+ [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
+ [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
+ [IRDMA_OP_RESUME] = "Resume QP Cmd",
+ [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
+ [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
+ [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
+ [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
+ [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
+ [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
+ [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
+ [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
+ [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
+ [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
+ [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
+ [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
+ [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
+ [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
+ [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
+ [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
+ [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
+ [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
+ [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
+ [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
+ [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
+ [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
+ [IRDMA_OP_SRQ_CREATE] = "Create SRQ Cmd",
+ [IRDMA_OP_SRQ_MODIFY] = "Modify SRQ Cmd",
+ [IRDMA_OP_SRQ_DESTROY] = "Destroy SRQ Cmd",
+};
+
+static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
+ {0xffff, 0x8002, "Invalid State"},
+ {0xffff, 0x8006, "Flush No Wqe Pending"},
+ {0xffff, 0x8007, "Modify QP Bad Close"},
+ {0xffff, 0x8009, "LLP Closed"},
+ {0xffff, 0x800a, "Reset Not Sent"}
+};
+
+/**
+ * irdma_cqp_crit_err - check if CQP error is critical
+ * @dev: pointer to dev structure
+ * @cqp_cmd: code for last CQP operation
+ * @maj_err_code: major error code
+ * @min_err_code: minot error code
+ */
+bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
+ u16 maj_err_code, u16 min_err_code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
+ if (maj_err_code == irdma_noncrit_err_list[i].maj &&
+ min_err_code == irdma_noncrit_err_list[i].min) {
+ ibdev_dbg(to_ibdev(dev),
+ "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
+ irdma_noncrit_err_list[i].desc,
+ irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
+ min_err_code);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * irdma_handle_cqp_op - process cqp command
+ * @rf: RDMA PCI function
+ * @cqp_request: cqp request to process
+ */
+int irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct cqp_cmds_info *info = &cqp_request->info;
+ int status;
+ bool put_cqp_request = true;
+
+ if (rf->reset)
+ return -EBUSY;
+
+ irdma_get_cqp_request(cqp_request);
+ status = irdma_process_cqp_cmd(dev, info);
+ if (status)
+ goto err;
+
+ if (cqp_request->waiting) {
+ put_cqp_request = false;
+ status = irdma_wait_event(rf, cqp_request);
+ if (status)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (irdma_cqp_crit_err(dev, info->cqp_cmd,
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code))
+ ibdev_err(&rf->iwdev->ibdev,
+ "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
+ irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
+ cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+
+ if (put_cqp_request)
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+void irdma_qp_add_ref(struct ib_qp *ibqp)
+{
+ struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
+
+ refcount_inc(&iwqp->refcnt);
+}
+
+void irdma_qp_rem_ref(struct ib_qp *ibqp)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ u32 qp_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ if (!refcount_dec_and_test(&iwqp->refcnt)) {
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ return;
+ }
+
+ qp_num = iwqp->ibqp.qp_num;
+ iwdev->rf->qp_table[qp_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ complete(&iwqp->free_qp);
+}
+
+void irdma_cq_add_ref(struct ib_cq *ibcq)
+{
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+
+ refcount_inc(&iwcq->refcnt);
+}
+
+void irdma_cq_rem_ref(struct ib_cq *ibcq)
+{
+ struct ib_device *ibdev = ibcq->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags);
+ if (!refcount_dec_and_test(&iwcq->refcnt)) {
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ return;
+ }
+
+ iwdev->rf->cq_table[iwcq->cq_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ complete(&iwcq->free_cq);
+}
+
+struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
+{
+ return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
+}
+
+/**
+ * irdma_get_qp - get qp address
+ * @device: iwarp device
+ * @qpn: qp number
+ */
+struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
+{
+ struct irdma_device *iwdev = to_iwdev(device);
+
+ if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
+ return NULL;
+
+ return &iwdev->rf->qp_table[qpn]->ibqp;
+}
+
+/**
+ * irdma_remove_cqp_head - return head entry and remove
+ * @dev: device
+ */
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
+{
+ struct list_head *entry;
+ struct list_head *list = &dev->cqp_cmd_head;
+
+ if (list_empty(list))
+ return NULL;
+
+ entry = list->next;
+ list_del(entry);
+
+ return entry;
+}
+
+/**
+ * irdma_cqp_sds_cmd - create cqp command for sd
+ * @dev: hardware control device structure
+ * @sdinfo: information for sd cqp
+ *
+ */
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *sdinfo)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
+ sizeof(cqp_info->in.u.update_pe_sds.info));
+ cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.update_pe_sds.dev = dev;
+ cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
+ * @qp: hardware control qp
+ * @op: suspend or resume
+ */
+int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
+{
+ struct irdma_sc_dev *dev = qp->dev;
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.suspend_resume.cqp = cqp;
+ cqp_info->in.u.suspend_resume.qp = qp;
+ cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_term_modify_qp - modify qp for term message
+ * @qp: hardware control qp
+ * @next_state: qp's next state
+ * @term: terminate code
+ * @term_len: length
+ */
+void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
+ u8 term_len)
+{
+ struct irdma_qp *iwqp;
+
+ iwqp = qp->qp_uk.back_qp;
+ irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
+};
+
+/**
+ * irdma_terminate_done - after terminate is completed
+ * @qp: hardware control qp
+ * @timeout_occurred: indicates if terminate timer expired
+ */
+void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
+{
+ struct irdma_qp *iwqp;
+ u8 hte = 0;
+ bool first_time;
+ unsigned long flags;
+
+ iwqp = qp->qp_uk.back_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->hte_added) {
+ iwqp->hte_added = 0;
+ hte = 1;
+ }
+ first_time = !(qp->term_flags & IRDMA_TERM_DONE);
+ qp->term_flags |= IRDMA_TERM_DONE;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (first_time) {
+ if (!timeout_occurred)
+ irdma_terminate_del_timer(qp);
+
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
+ irdma_cm_disconn(iwqp);
+ }
+}
+
+static void irdma_terminate_timeout(struct timer_list *t)
+{
+ struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+
+ irdma_terminate_done(qp, 1);
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_terminate_start_timer - start terminate timeout
+ * @qp: hardware control qp
+ */
+void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
+{
+ struct irdma_qp *iwqp;
+
+ iwqp = qp->qp_uk.back_qp;
+ irdma_qp_add_ref(&iwqp->ibqp);
+ timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
+ iwqp->terminate_timer.expires = jiffies + HZ;
+
+ add_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * irdma_terminate_del_timer - delete terminate timeout
+ * @qp: hardware control qp
+ */
+void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
+{
+ struct irdma_qp *iwqp;
+ int ret;
+
+ iwqp = qp->qp_uk.back_qp;
+ ret = timer_delete(&iwqp->terminate_timer);
+ if (ret)
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_cqp_cq_create_cmd - create a cq for the cqp
+ * @dev: device pointer
+ * @cq: pointer to created cq
+ */
+int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_qp_create_cmd - create a qp for the cqp
+ * @dev: device pointer
+ * @qp: pointer to created qp
+ */
+int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_create_qp_info *qp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+ memset(qp_info, 0, sizeof(*qp_info));
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
+ cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_dealloc_push_page - free a push page for qp
+ * @rf: RDMA PCI function
+ * @qp: hardware control qp
+ */
+static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
+ struct irdma_sc_qp *qp)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
+ return;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
+ cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 1;
+ cqp_info->in.u.manage_push_page.info.push_page_type = 0;
+ cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (!status)
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+static void irdma_free_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ return;
+
+ irdma_vchnl_req_del_vport(&rf->sc_dev, iwdev->vport_id, qp_num);
+
+ if (qp_num == 1) {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else if (qp_num > 2) {
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
+}
+
+/**
+ * irdma_free_qp_rsrc - free up memory resources for qp
+ * @iwqp: qp ptr (user or kernel)
+ */
+void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ u32 qp_num = iwqp->sc_qp.qp_uk.qp_id;
+
+ irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
+ irdma_dealloc_push_page(rf, &iwqp->sc_qp);
+ if (iwqp->sc_qp.vsi) {
+ irdma_qp_rem_qos(&iwqp->sc_qp);
+ iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
+ iwqp->sc_qp.user_pri);
+ }
+
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ irdma_free_gsi_qp_rsrc(iwqp, qp_num);
+ } else {
+ if (qp_num > 2)
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
+ dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
+ iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
+ iwqp->q2_ctx_mem.va = NULL;
+ dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
+ iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
+ iwqp->kqp.dma_mem.va = NULL;
+ kfree(iwqp->kqp.sq_wrid_mem);
+ kfree(iwqp->kqp.rq_wrid_mem);
+}
+
+/**
+ * irdma_srq_wq_destroy - send srq destroy cqp
+ * @rf: RDMA PCI function
+ * @srq: hardware control srq
+ */
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_destroy.srq = srq;
+ cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_cq_wq_destroy - send cq destroy cqp
+ * @rf: RDMA PCI function
+ * @cq: hardware control cq
+ */
+void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_destroy.cq = cq;
+ cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
+ * @cqp_request: modify QP completion
+ */
+static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_qp *iwqp;
+
+ cqp_info = &cqp_request->info;
+ iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
+ atomic_dec(&iwqp->hw_mod_qp_pend);
+ wake_up(&iwqp->mod_qp_waitq);
+}
+
+/**
+ * irdma_hw_modify_qp - setup cqp for modify qp
+ * @iwdev: RDMA device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: info for modify qp
+ * @wait: flag to wait or not for modify qp completion
+ */
+int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info, bool wait)
+{
+ int status;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_modify_qp_info *m_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ if (!wait) {
+ cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
+ atomic_inc(&iwqp->hw_mod_qp_pend);
+ }
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status) {
+ if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ return status;
+
+ switch (m_info->next_iwarp_state) {
+ struct irdma_gen_ae_info ae_info;
+
+ case IRDMA_QP_STATE_RTS:
+ case IRDMA_QP_STATE_IDLE:
+ case IRDMA_QP_STATE_TERMINATE:
+ case IRDMA_QP_STATE_CLOSING:
+ if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
+ irdma_send_reset(iwqp->cm_node);
+ else
+ iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
+ if (!wait) {
+ ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
+ ae_info.ae_src = 0;
+ irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
+ } else {
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
+ wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ m_info->reset_tcp_conn = true;
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ }
+ break;
+ case IRDMA_QP_STATE_ERROR:
+ default:
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
+ * @dev: device pointer
+ * @cq: pointer to cq
+ */
+void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ irdma_cq_wq_destroy(rf, cq);
+}
+
+/**
+ * irdma_cqp_qp_destroy_cmd - destroy the cqp
+ * @dev: device pointer
+ * @qp: pointer to qp
+ */
+int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_destroy.qp = qp;
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_mpa_crc_ae - generate AE for crc error
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_gen_ae_info info = {};
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
+ info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
+ info.ae_src = IRDMA_AE_SOURCE_RQ;
+ irdma_gen_ae(rf, qp, &info, false);
+}
+
+/**
+ * irdma_ieq_check_mpacrc - check if mpa crc is OK
+ * @addr: address of buffer for crc
+ * @len: length of buffer
+ * @val: value to be compared
+ */
+int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val)
+{
+ if ((__force u32)cpu_to_le32(~crc32c(~0, addr, len)) != val)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * irdma_ieq_get_qp - get qp based on quad in puda buffer
+ * @dev: hardware control device structure
+ * @buf: receive puda buffer on exception q
+ */
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_cm_node *cm_node;
+ struct irdma_device *iwdev = buf->vsi->back_vsi;
+ u32 loc_addr[4] = {};
+ u32 rem_addr[4] = {};
+ u16 loc_port, rem_port;
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph = (struct iphdr *)buf->iph;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ if (iph->version == 4) {
+ loc_addr[0] = ntohl(iph->daddr);
+ rem_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
+ }
+ loc_port = ntohs(tcph->dest);
+ rem_port = ntohs(tcph->source);
+ cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
+ loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
+ if (!cm_node)
+ return NULL;
+
+ iwqp = cm_node->iwqp;
+ irdma_rem_ref_cm_node(cm_node);
+
+ return &iwqp->sc_qp;
+}
+
+/**
+ * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
+ * @qp: qp ptr
+ */
+void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
+{
+ struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
+ struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+
+ irdma_send_ack(cm_node);
+}
+
+/**
+ * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
+ * @qp: qp pointer
+ * @ah_info: AH info pointer
+ */
+void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
+ struct irdma_ah_info *ah_info)
+{
+ struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+
+ memset(ah_info, 0, sizeof(*ah_info));
+ ah_info->do_lpbk = true;
+ ah_info->vlan_tag = buf->vlan_id;
+ ah_info->insert_vlan_tag = buf->vlan_valid;
+ ah_info->ipv4_valid = buf->ipv4;
+ ah_info->vsi = qp->vsi;
+
+ if (buf->smac_valid)
+ ether_addr_copy(ah_info->mac_addr, buf->smac);
+
+ if (buf->ipv4) {
+ ah_info->ipv4_valid = true;
+ iph = (struct iphdr *)buf->iph;
+ ah_info->hop_ttl = iph->ttl;
+ ah_info->tc_tos = iph->tos;
+ ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
+ ah_info->src_ip_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ ah_info->hop_ttl = ip6h->hop_limit;
+ ah_info->tc_tos = ip6h->priority;
+ irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(ah_info->src_ip_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ }
+
+ ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
+ ah_info->dest_ip_addr,
+ ah_info->ipv4_valid,
+ NULL, IRDMA_ARP_RESOLVE);
+}
+
+/**
+ * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @len: length of buffer
+ * @seqnum: seq number for tcp
+ */
+static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
+ u16 len, u32 seqnum)
+{
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u16 iphlen;
+ u16 pktsize;
+ u8 *addr = buf->mem.va;
+
+ iphlen = (buf->ipv4) ? 20 : 40;
+ iph = (struct iphdr *)(addr + buf->maclen);
+ tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
+ pktsize = len + buf->tcphlen + iphlen;
+ iph->tot_len = htons(pktsize);
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * irdma_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @len: length of buffer
+ * @seqnum: seq number for tcp
+ */
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
+ u32 seqnum)
+{
+ struct tcphdr *tcph;
+ u8 *addr;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
+
+ addr = buf->mem.va;
+ tcph = (struct tcphdr *)addr;
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
+ * buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
+{
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ u16 iphlen;
+ u16 pkt_len;
+ u8 *mem = buf->mem.va;
+ struct ethhdr *ethh = buf->mem.va;
+
+ if (ethh->h_proto == htons(0x8100)) {
+ info->vlan_valid = true;
+ buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
+ VLAN_VID_MASK;
+ }
+
+ buf->maclen = (info->vlan_valid) ? 18 : 14;
+ iphlen = (info->l3proto) ? 40 : 20;
+ buf->ipv4 = (info->l3proto) ? false : true;
+ buf->iph = mem + buf->maclen;
+ iph = (struct iphdr *)buf->iph;
+ buf->tcph = buf->iph + iphlen;
+ tcph = (struct tcphdr *)buf->tcph;
+
+ if (buf->ipv4) {
+ pkt_len = ntohs(iph->tot_len);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ pkt_len = ntohs(ip6h->payload_len) + iphlen;
+ }
+
+ buf->totallen = pkt_len + buf->maclen;
+
+ if (info->payload_len < buf->totallen) {
+ ibdev_dbg(to_ibdev(buf->vsi->dev),
+ "ERR: payload_len = 0x%x totallen expected0x%x\n",
+ info->payload_len, buf->totallen);
+ return -EINVAL;
+ }
+
+ buf->tcphlen = tcph->doff << 2;
+ buf->datalen = pkt_len - iphlen - buf->tcphlen;
+ buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+
+ return 0;
+}
+
+/**
+ * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
+{
+ struct tcphdr *tcph;
+ u32 pkt_len;
+ u8 *mem;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return irdma_gen1_puda_get_tcpip_info(info, buf);
+
+ mem = buf->mem.va;
+ buf->vlan_valid = info->vlan_valid;
+ if (info->vlan_valid)
+ buf->vlan_id = info->vlan;
+
+ buf->ipv4 = info->ipv4;
+ if (buf->ipv4)
+ buf->iph = mem + IRDMA_IPV4_PAD;
+ else
+ buf->iph = mem;
+
+ buf->tcph = mem + IRDMA_TCP_OFFSET;
+ tcph = (struct tcphdr *)buf->tcph;
+ pkt_len = info->payload_len;
+ buf->totallen = pkt_len;
+ buf->tcphlen = tcph->doff << 2;
+ buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
+ buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+
+ if (info->smac_valid) {
+ ether_addr_copy(buf->smac, info->smac);
+ buf->smac_valid = true;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
+ * @t: timer_list pointer
+ */
+static void irdma_hw_stats_timeout(struct timer_list *t)
+{
+ struct irdma_vsi_pestat *pf_devstat =
+ timer_container_of(pf_devstat, t, stats_timer);
+ struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
+
+ if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
+ else
+ irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
+
+ mod_timer(&pf_devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * irdma_hw_stats_start_timer - Start periodic stats timer
+ * @vsi: vsi structure pointer
+ */
+void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_vsi_pestat *devstat = vsi->pestat;
+
+ timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
+ mod_timer(&devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * irdma_hw_stats_stop_timer - Delete periodic stats timer
+ * @vsi: pointer to vsi structure
+ */
+void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_vsi_pestat *devstat = vsi->pestat;
+
+ timer_delete_sync(&devstat->stats_timer);
+}
+
+/**
+ * irdma_process_stats - Checking for wrap and update stats
+ * @pestat: stats structure pointer
+ */
+static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
+{
+ sc_vsi_update_stats(pestat->vsi);
+}
+
+/**
+ * irdma_cqp_gather_stats_gen1 - Gather stats
+ * @dev: pointer to device structure
+ * @pestat: statistics structure
+ */
+void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat)
+{
+ struct irdma_gather_stats *gather_stats =
+ pestat->gather_info.gather_stats_va;
+ const struct irdma_hw_stat_map *map = dev->hw_stats_map;
+ u16 max_stats_idx = dev->hw_attrs.max_stat_idx;
+ u32 stats_inst_offset_32;
+ u32 stats_inst_offset_64;
+ u64 new_val;
+ u16 i;
+
+ stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
+ pestat->gather_info.stats_inst_index :
+ pestat->hw->hmc.hmc_fn_id;
+ stats_inst_offset_32 *= 4;
+ stats_inst_offset_64 = stats_inst_offset_32 * 2;
+
+ for (i = 0; i < max_stats_idx; i++) {
+ if (map[i].bitmask <= IRDMA_MAX_STATS_32)
+ new_val = rd32(dev->hw,
+ dev->hw_stats_regs[i] + stats_inst_offset_32);
+ else
+ new_val = rd64(dev->hw,
+ dev->hw_stats_regs[i] + stats_inst_offset_64);
+ gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val;
+ }
+
+ irdma_process_stats(pestat);
+}
+
+/**
+ * irdma_process_cqp_stats - Checking for wrap and update stats
+ * @cqp_request: cqp_request structure pointer
+ */
+static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_vsi_pestat *pestat = cqp_request->param;
+
+ irdma_process_stats(pestat);
+}
+
+/**
+ * irdma_cqp_gather_stats_cmd - Gather stats
+ * @dev: pointer to device structure
+ * @pestat: pointer to stats info
+ * @wait: flag to wait or not wait for stats
+ */
+int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait)
+
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.stats_gather.info = pestat->gather_info;
+ cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
+ cqp_request->param = pestat;
+ if (!wait)
+ cqp_request->callback_fcn = irdma_process_cqp_stats;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (wait)
+ irdma_process_stats(pestat);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
+ * @vsi: pointer to vsi structure
+ * @cmd: command to allocate or free
+ * @stats_info: pointer to allocate stats info
+ */
+int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info)
+{
+ struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+ bool wait = false;
+
+ if (cmd == IRDMA_OP_STATS_ALLOCATE)
+ wait = true;
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.stats_manage.info = *stats_info;
+ cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (wait)
+ stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
+ * @dev: pointer to device info
+ * @sc_ceq: pointer to ceq structure
+ * @op: Create or Destroy
+ */
+int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
+ u8 op)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.ceq_create.ceq = sc_ceq;
+ cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_aeq_cmd - Create/Destroy AEQ
+ * @dev: pointer to device info
+ * @sc_aeq: pointer to aeq structure
+ * @op: Create or Destroy
+ */
+int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
+ u8 op)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.aeq_create.aeq = sc_aeq;
+ cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
+ * @dev: pointer to device structure
+ * @cmd: Add, modify or delete
+ * @node_info: pointer to ws node info
+ */
+int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+ bool poll;
+
+ if (!rf->sc_dev.ceq_valid)
+ poll = true;
+ else
+ poll = false;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.ws_node.info = *node_info;
+ cqp_info->in.u.ws_node.cqp = cqp;
+ cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (status)
+ goto exit;
+
+ if (poll) {
+ struct irdma_ccq_cqe_info compl_info;
+
+ status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
+ &compl_info);
+ node_info->qs_handle = compl_info.op_ret_val;
+ ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
+ compl_info.op_code, compl_info.op_ret_val);
+ } else {
+ node_info->qs_handle = cqp_request->compl_info.op_ret_val;
+ }
+
+exit:
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_ah_cqp_op - perform an AH cqp operation
+ * @rf: RDMA PCI function
+ * @sc_ah: address handle
+ * @cmd: AH operation
+ * @wait: wait if true
+ * @callback_fcn: Callback function on CQP op completion
+ * @cb_param: parameter for callback function
+ *
+ * returns errno
+ */
+int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+ bool wait,
+ void (*callback_fcn)(struct irdma_cqp_request *),
+ void *cb_param)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
+ return -EINVAL;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ if (cmd == IRDMA_OP_AH_CREATE) {
+ cqp_info->in.u.ah_create.info = sc_ah->ah_info;
+ cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
+ } else if (cmd == IRDMA_OP_AH_DESTROY) {
+ cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
+ cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
+ }
+
+ if (!wait) {
+ cqp_request->callback_fcn = callback_fcn;
+ cqp_request->param = cb_param;
+ }
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ if (status)
+ return -ENOMEM;
+
+ if (wait)
+ sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
+
+ return 0;
+}
+
+/**
+ * irdma_ieq_ah_cb - callback after creation of AH for IEQ
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_qp *qp = cqp_request->param;
+ struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->pfpdu.lock, flags);
+ if (!cqp_request->compl_info.op_ret_val) {
+ sc_ah->ah_info.ah_valid = true;
+ irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
+ } else {
+ sc_ah->ah_info.ah_valid = false;
+ irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
+ }
+ spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
+}
+
+/**
+ * irdma_ilq_ah_cb - callback after creation of AH for ILQ
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cm_node *cm_node = cqp_request->param;
+ struct irdma_sc_ah *sc_ah = cm_node->ah;
+
+ sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
+ irdma_add_conn_est_qh(cm_node);
+}
+
+/**
+ * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
+ * @dev: device pointer
+ * @ah_info: Address handle info
+ * @wait: When true will wait for operation to complete
+ * @type: ILQ/IEQ
+ * @cb_param: Callback param when not waiting
+ * @ah_ret: Returned pointer to address handle if created
+ *
+ */
+int irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info, bool wait,
+ enum puda_rsrc_type type, void *cb_param,
+ struct irdma_sc_ah **ah_ret)
+{
+ struct irdma_sc_ah *ah;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int err;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ *ah_ret = ah;
+ if (!ah)
+ return -ENOMEM;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
+ &ah_info->ah_idx, &rf->next_ah);
+ if (err)
+ goto err_free;
+
+ ah->dev = dev;
+ ah->ah_info = *ah_info;
+
+ if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
+ irdma_ilq_ah_cb, cb_param);
+ else
+ err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
+ irdma_ieq_ah_cb, cb_param);
+
+ if (err)
+ goto error;
+ return 0;
+
+error:
+ irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
+err_free:
+ kfree(ah);
+ *ah_ret = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * irdma_puda_free_ah - free a puda address handle
+ * @dev: device pointer
+ * @ah: The address handle to free
+ */
+void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ if (!ah)
+ return;
+
+ if (ah->ah_info.ah_valid) {
+ irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
+ irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
+ }
+
+ kfree(ah);
+}
+
+/**
+ * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_ah *sc_ah = cqp_request->param;
+
+ if (!cqp_request->compl_info.op_ret_val)
+ sc_ah->ah_info.ah_valid = true;
+ else
+ sc_ah->ah_info.ah_valid = false;
+}
+
+/**
+ * irdma_prm_add_pble_mem - add moemory to pble resources
+ * @pprm: pble resource manager
+ * @pchunk: chunk of memory to add
+ */
+int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk)
+{
+ u64 sizeofbitmap;
+
+ if (pchunk->size & 0xfff)
+ return -EINVAL;
+
+ sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
+
+ pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+ if (!pchunk->bitmapbuf)
+ return -ENOMEM;
+
+ pchunk->sizeofbitmap = sizeofbitmap;
+ /* each pble is 8 bytes hence shift by 3 */
+ pprm->total_pble_alloc += pchunk->size >> 3;
+ pprm->free_pble_cnt += pchunk->size >> 3;
+
+ return 0;
+}
+
+/**
+ * irdma_prm_get_pbles - get pble's from prm
+ * @pprm: pble resource manager
+ * @chunkinfo: nformation about chunk where pble's were acquired
+ * @mem_size: size of pble memory needed
+ * @vaddr: returns virtual address of pble memory
+ * @fpm_addr: returns fpm address of pble memory
+ */
+int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr)
+{
+ u64 bits_needed;
+ u64 bit_idx = PBLE_INVALID_IDX;
+ struct irdma_chunk *pchunk = NULL;
+ struct list_head *chunk_entry = pprm->clist.next;
+ u32 offset;
+ unsigned long flags;
+ *vaddr = NULL;
+ *fpm_addr = 0;
+
+ bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
+
+ spin_lock_irqsave(&pprm->prm_lock, flags);
+ while (chunk_entry != &pprm->clist) {
+ pchunk = (struct irdma_chunk *)chunk_entry;
+ bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
+ pchunk->sizeofbitmap, 0,
+ bits_needed, 0);
+ if (bit_idx < pchunk->sizeofbitmap)
+ break;
+
+ /* list.next used macro */
+ chunk_entry = pchunk->list.next;
+ }
+
+ if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+ return -ENOMEM;
+ }
+
+ bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
+ offset = bit_idx << pprm->pble_shift;
+ *vaddr = pchunk->vaddr + offset;
+ *fpm_addr = pchunk->fpm_addr + offset;
+
+ chunkinfo->pchunk = pchunk;
+ chunkinfo->bit_idx = bit_idx;
+ chunkinfo->bits_used = bits_needed;
+ /* 3 is sizeof pble divide */
+ pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_prm_return_pbles - return pbles back to prm
+ * @pprm: pble resource manager
+ * @chunkinfo: chunk where pble's were acquired and to be freed
+ */
+void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pprm->prm_lock, flags);
+ pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
+ bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
+ chunkinfo->bits_used);
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+}
+
+int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
+ u32 pg_cnt)
+{
+ struct page *vm_page;
+ int i;
+ u8 *addr;
+
+ addr = (u8 *)(uintptr_t)va;
+ for (i = 0; i < pg_cnt; i++) {
+ vm_page = vmalloc_to_page(addr);
+ if (!vm_page)
+ goto err;
+
+ pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hw->device, pg_dma[i]))
+ goto err;
+
+ addr += PAGE_SIZE;
+ }
+
+ return 0;
+
+err:
+ irdma_unmap_vm_page_list(hw, pg_dma, i);
+ return -ENOMEM;
+}
+
+void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
+{
+ int i;
+
+ for (i = 0; i < pg_cnt; i++)
+ dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * irdma_pble_free_paged_mem - free virtual paged memory
+ * @chunk: chunk to free with paged memory
+ */
+void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
+{
+ if (!chunk->pg_cnt)
+ goto done;
+
+ irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
+ chunk->pg_cnt);
+
+done:
+ kfree(chunk->dmainfo.dmaaddrs);
+ chunk->dmainfo.dmaaddrs = NULL;
+ vfree(chunk->vaddr);
+ chunk->vaddr = NULL;
+ chunk->type = 0;
+}
+
+/**
+ * irdma_pble_get_paged_mem -allocate paged memory for pbles
+ * @chunk: chunk to add for paged memory
+ * @pg_cnt: number of pages needed
+ */
+int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
+{
+ u32 size;
+ void *va;
+
+ chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
+ if (!chunk->dmainfo.dmaaddrs)
+ return -ENOMEM;
+
+ size = PAGE_SIZE * pg_cnt;
+ va = vmalloc(size);
+ if (!va)
+ goto err;
+
+ if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
+ pg_cnt)) {
+ vfree(va);
+ goto err;
+ }
+ chunk->vaddr = va;
+ chunk->size = size;
+ chunk->pg_cnt = pg_cnt;
+ chunk->type = PBLE_SD_PAGED;
+
+ return 0;
+err:
+ kfree(chunk->dmainfo.dmaaddrs);
+ chunk->dmainfo.dmaaddrs = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
+ * @dev: device pointer
+ */
+u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ u32 next = 1;
+ u32 node_id;
+
+ if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
+ &node_id, &next))
+ return IRDMA_WS_NODE_INVALID;
+
+ return (u16)node_id;
+}
+
+/**
+ * irdma_free_ws_node_id - Free a tx scheduler node ID
+ * @dev: device pointer
+ * @node_id: Work scheduler node ID
+ */
+void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
+}
+
+/**
+ * irdma_modify_qp_to_err - Modify a QP to error
+ * @sc_qp: qp structure
+ */
+void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
+{
+ struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
+ struct ib_qp_attr attr;
+
+ if (qp->iwdev->rf->reset)
+ return;
+ attr.qp_state = IB_QPS_ERR;
+
+ if (rdma_protocol_roce(qp->ibqp.device, 1))
+ irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+ else
+ irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+}
+
+void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
+{
+ struct ib_event ibevent;
+
+ if (!iwqp->ibqp.event_handler)
+ return;
+
+ switch (event) {
+ case IRDMA_QP_EVENT_CATASTROPHIC:
+ ibevent.event = IB_EVENT_QP_FATAL;
+ break;
+ case IRDMA_QP_EVENT_ACCESS_ERR:
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ case IRDMA_QP_EVENT_REQ_ERR:
+ ibevent.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ }
+ ibevent.device = iwqp->ibqp.device;
+ ibevent.element.qp = &iwqp->ibqp;
+ iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
+}
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq;
+ u64 qword3;
+ __le64 *cqe;
+ u8 polarity;
+
+ ukcq = &iwcq->sc_cq.cq_uk;
+ if (ukcq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != ukcq->polarity;
+}
+
+void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
+{
+ struct irdma_cmpl_gen *cmpl_node;
+ struct list_head *tmp_node, *list_node;
+
+ list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) {
+ cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
+ list_del(&cmpl_node->list);
+ kfree(cmpl_node);
+ }
+}
+
+int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
+{
+ struct irdma_cmpl_gen *cmpl;
+
+ if (list_empty(&iwcq->cmpl_generated))
+ return -ENOENT;
+ cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
+ list_del(&cmpl->list);
+ memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
+ kfree(cmpl);
+
+ ibdev_dbg(iwcq->ibcq.device,
+ "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n",
+ __func__, cq_poll_info->qp_id, cq_poll_info->op_type,
+ cq_poll_info->wr_id);
+
+ return 0;
+}
+
+/**
+ * irdma_set_cpi_common_values - fill in values for polling info struct
+ * @cpi: resulting structure of cq_poll_info type
+ * @qp: QPair
+ * @qp_num: id of the QP
+ */
+static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
+ struct irdma_qp_uk *qp, u32 qp_num)
+{
+ cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ cpi->error = true;
+ cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
+ cpi->minor_err = FLUSH_GENERAL_ERR;
+ cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp;
+ cpi->qp_id = qp_num;
+}
+
+static inline void irdma_comp_handler(struct irdma_cq *cq)
+{
+ if (!cq->ibcq.comp_handler)
+ return;
+ if (atomic_cmpxchg(&cq->armed, 1, 0))
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+{
+ struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
+ struct irdma_ring *sq_ring = &qp->sq_ring;
+ struct irdma_ring *rq_ring = &qp->rq_ring;
+ struct irdma_cmpl_gen *cmpl;
+ __le64 *sw_wqe;
+ u64 wqe_qword;
+ u32 wqe_idx;
+ bool compl_generated = false;
+ unsigned long flags1;
+
+ spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
+ if (irdma_cq_empty(iwqp->iwscq)) {
+ unsigned long flags2;
+
+ spin_lock_irqsave(&iwqp->lock, flags2);
+ while (IRDMA_RING_MORE_WORK(*sq_ring)) {
+ cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
+ if (!cmpl) {
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ return;
+ }
+
+ wqe_idx = sq_ring->tail;
+ irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
+
+ cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ sw_wqe = qp->sq_base[wqe_idx].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
+ /* remove the SQ WR by moving SQ tail*/
+ IRDMA_RING_SET_TAIL(*sq_ring,
+ sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
+ if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
+ kfree(cmpl);
+ continue;
+ }
+ ibdev_dbg(iwqp->iwscq->ibcq.device,
+ "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
+ __func__, cmpl->cpi.wr_id, qp->qp_id);
+ list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
+ compl_generated = true;
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ if (compl_generated)
+ irdma_comp_handler(iwqp->iwscq);
+ } else {
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+
+ spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
+ if (irdma_cq_empty(iwqp->iwrcq)) {
+ unsigned long flags2;
+
+ spin_lock_irqsave(&iwqp->lock, flags2);
+ while (IRDMA_RING_MORE_WORK(*rq_ring)) {
+ cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
+ if (!cmpl) {
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ return;
+ }
+
+ wqe_idx = rq_ring->tail;
+ irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
+
+ cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
+ cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
+ /* remove the RQ WR by moving RQ tail */
+ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
+ ibdev_dbg(iwqp->iwrcq->ibcq.device,
+ "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
+ __func__, cmpl->cpi.wr_id, qp->qp_id,
+ wqe_idx);
+ list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
+
+ compl_generated = true;
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ if (compl_generated)
+ irdma_comp_handler(iwqp->iwrcq);
+ } else {
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
new file mode 100644
index 000000000000..76ce6137f2ba
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -0,0 +1,5505 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+/**
+ * irdma_query_device - get device attributes
+ * @ibdev: device pointer from stack
+ * @props: returning device attributes
+ * @udata: user data
+ */
+static int irdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct pci_dev *pcidev = iwdev->rf->pcidev;
+ struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
+
+ if (udata->inlen || udata->outlen)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ iwdev->netdev->dev_addr);
+ props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
+ irdma_fw_minor_ver(&rf->sc_dev);
+ props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+ props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ props->vendor_id = pcidev->vendor;
+ props->vendor_part_id = pcidev->device;
+
+ props->hw_ver = rf->pcidev->revision;
+ props->page_size_cap = hw_attrs->page_size_cap;
+ props->max_mr_size = hw_attrs->max_mr_size;
+ props->max_qp = rf->max_qp - rf->used_qps;
+ props->max_qp_wr = hw_attrs->max_qp_wr;
+ props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_cq = rf->max_cq - rf->used_cqs;
+ props->max_cqe = rf->max_cqe - 1;
+ props->max_mr = rf->max_mr - rf->used_mrs;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->max_mw = props->max_mr;
+ props->max_pd = rf->max_pd - rf->used_pds;
+ props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
+ props->max_qp_rd_atom = hw_attrs->max_hw_ird;
+ props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
+ props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+ }
+
+ props->max_ah = rf->max_ah;
+ props->max_mcast_grp = rf->max_mcg;
+ props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
+ props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
+ props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
+ props->max_srq = rf->max_srq - rf->used_srqs;
+ props->max_srq_wr = IRDMA_MAX_SRQ_WRS;
+ props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ if (hw_attrs->uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ props->atomic_cap = IB_ATOMIC_HCA;
+ else
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) {
+#define HCA_CORE_CLOCK_KHZ 1000000UL
+ props->timestamp_mask = GENMASK(31, 0);
+ props->hca_core_clock = HCA_CORE_CLOCK_KHZ;
+ }
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
+
+ return 0;
+}
+
+/**
+ * irdma_query_port - get port attributes
+ * @ibdev: device pointer from stack
+ * @port: port number for query
+ * @props: returning device attributes
+ */
+static int irdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct net_device *netdev = iwdev->netdev;
+
+ /* no need to zero out pros here. done by caller */
+
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
+ props->lid = 1;
+ props->lmc = 0;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+ if (netif_carrier_ok(netdev) && netif_running(netdev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
+
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->gid_tbl_len = 32;
+ props->ip_gids = true;
+ props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
+ } else {
+ props->gid_tbl_len = 1;
+ }
+ props->qkey_viol_cntr = 0;
+ props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
+ props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
+
+ return 0;
+}
+
+/**
+ * irdma_disassociate_ucontext - Disassociate user context
+ * @context: ib user context
+ */
+static void irdma_disassociate_ucontext(struct ib_ucontext *context)
+{
+}
+
+static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ u64 pfn;
+
+ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_private_data = ucontext;
+ pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
+ pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
+
+ return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot), NULL);
+}
+
+static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
+
+ kfree(entry);
+}
+
+static struct rdma_user_mmap_entry*
+irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
+ enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
+{
+ struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int ret;
+
+ if (!entry)
+ return NULL;
+
+ entry->bar_offset = bar_offset;
+ entry->mmap_flag = mmap_flag;
+
+ ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
+ &entry->rdma_entry, PAGE_SIZE);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+ *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+/**
+ * irdma_mmap - user memory map
+ * @context: context created during alloc
+ * @vma: kernel info for user memory map
+ */
+static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct irdma_user_mmap_entry *entry;
+ struct irdma_ucontext *ucontext;
+ u64 pfn;
+ int ret;
+
+ ucontext = to_ucontext(context);
+
+ /* Legacy support for libi40iw with hard-coded mmap key */
+ if (ucontext->legacy_mode)
+ return irdma_mmap_legacy(ucontext, vma);
+
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: pgoff[0x%lx] does not have valid entry\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ entry = to_irdma_mmap_entry(rdma_entry);
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
+ entry->bar_offset, entry->mmap_flag);
+
+ pfn = (entry->bar_offset +
+ pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
+
+ switch (entry->mmap_flag) {
+ case IRDMA_MMAP_IO_NC:
+ ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case IRDMA_MMAP_IO_WC:
+ ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
+ entry->bar_offset, entry->mmap_flag, ret);
+ rdma_user_mmap_entry_put(rdma_entry);
+
+ return ret;
+}
+
+/**
+ * irdma_alloc_push_page - allocate a push page for qp
+ * @iwqp: qp pointer
+ */
+static void irdma_alloc_push_page(struct irdma_qp *iwqp)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_push_page.info.push_idx = 0;
+ cqp_info->in.u.manage_push_page.info.qs_handle =
+ qp->vsi->qos[qp->user_pri].qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 0;
+ cqp_info->in.u.manage_push_page.info.push_page_type = 0;
+ cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ if (!status && cqp_request->compl_info.op_ret_val <
+ iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
+ qp->push_idx = cqp_request->compl_info.op_ret_val;
+ qp->push_offset = 0;
+ }
+
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_alloc_ucontext - Allocate the user context data structure
+ * @uctx: uverbs context pointer
+ * @udata: user data
+ *
+ * This keeps track of all objects associated with a particular
+ * user-mode client.
+ */
+static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
+{
+#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
+#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
+ struct ib_device *ibdev = uctx->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_alloc_ucontext_req req = {};
+ struct irdma_alloc_ucontext_resp uresp = {};
+ struct irdma_ucontext *ucontext = to_ucontext(uctx);
+ struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+
+ if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
+ return -EINVAL;
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
+ return -EINVAL;
+
+ if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
+ goto ver_error;
+
+ ucontext->iwdev = iwdev;
+ ucontext->abi_ver = req.userspace_ver;
+
+ if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) &&
+ uk_attrs->hw_rev >= IRDMA_GEN_3)
+ return -EOPNOTSUPP;
+
+ if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
+ ucontext->use_raw_attrs = true;
+
+ /* GEN_1 legacy support with libi40iw */
+ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
+ if (uk_attrs->hw_rev != IRDMA_GEN_1)
+ return -EOPNOTSUPP;
+
+ ucontext->legacy_mode = true;
+ uresp.max_qps = iwdev->rf->max_qp;
+ uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
+ uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
+ uresp.kernel_ver = req.userspace_ver;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen)))
+ return -EFAULT;
+ } else {
+ u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+
+ ucontext->db_mmap_entry =
+ irdma_user_mmap_entry_insert(ucontext, bar_off,
+ IRDMA_MMAP_IO_NC,
+ &uresp.db_mmap_key);
+ if (!ucontext->db_mmap_entry)
+ return -ENOMEM;
+
+ uresp.kernel_ver = IRDMA_ABI_VER;
+ uresp.feature_flags = uk_attrs->feature_flags;
+ uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
+ uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
+ uresp.max_hw_inline = uk_attrs->max_hw_inline;
+ uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
+ uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
+ uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
+ uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
+ uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
+ uresp.hw_rev = uk_attrs->hw_rev;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
+ uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
+ uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen))) {
+ rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
+ return -EFAULT;
+ }
+ }
+
+ INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
+ spin_lock_init(&ucontext->cq_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
+ spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->srq_reg_mem_list);
+ spin_lock_init(&ucontext->srq_reg_mem_list_lock);
+
+ return 0;
+
+ver_error:
+ ibdev_err(&iwdev->ibdev,
+ "Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, IRDMA_ABI_VER);
+ return -EINVAL;
+}
+
+/**
+ * irdma_dealloc_ucontext - deallocate the user context data structure
+ * @context: user context created during alloc
+ */
+static void irdma_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct irdma_ucontext *ucontext = to_ucontext(context);
+
+ rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
+}
+
+/**
+ * irdma_alloc_pd - allocate protection domain
+ * @pd: PD pointer
+ * @udata: user data
+ */
+static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
+ struct irdma_pd *iwpd = to_iwpd(pd);
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_alloc_pd_resp uresp = {};
+ struct irdma_sc_pd *sc_pd;
+ u32 pd_id = 0;
+ int err;
+
+ if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
+ return -EINVAL;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
+ &rf->next_pd);
+ if (err)
+ return err;
+
+ sc_pd = &iwpd->sc_pd;
+ if (udata) {
+ struct irdma_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
+ uresp.pd_id = pd_id;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen))) {
+ err = -EFAULT;
+ goto error;
+ }
+ } else {
+ irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
+ }
+
+ return 0;
+error:
+ irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
+
+ return err;
+}
+
+/**
+ * irdma_dealloc_pd - deallocate pd
+ * @ibpd: ptr of pd to be deallocated
+ * @udata: user data
+ */
+static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_device *iwdev = to_iwdev(ibpd->device);
+
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
+
+ return 0;
+}
+
+/**
+ * irdma_get_pbl - Retrieve pbl from a list given a virtual
+ * address
+ * @va: user virtual address
+ * @pbl_list: pbl list to search in (QP's or CQ's)
+ */
+static struct irdma_pbl *irdma_get_pbl(unsigned long va,
+ struct list_head *pbl_list)
+{
+ struct irdma_pbl *iwpbl;
+
+ list_for_each_entry (iwpbl, pbl_list, list) {
+ if (iwpbl->user_base == va) {
+ list_del(&iwpbl->list);
+ iwpbl->on_list = false;
+ return iwpbl;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_clean_cqes - clean cq entries for qp
+ * @iwqp: qp ptr (user or kernel)
+ * @iwcq: cq ptr
+ */
+static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+}
+
+static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
+{
+ if (iwqp->push_db_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
+ iwqp->push_db_mmap_entry = NULL;
+ }
+ if (iwqp->push_wqe_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
+ iwqp->push_wqe_mmap_entry = NULL;
+ }
+}
+
+static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
+ struct irdma_qp *iwqp,
+ u64 *push_wqe_mmap_key,
+ u64 *push_db_mmap_key)
+{
+ struct irdma_device *iwdev = ucontext->iwdev;
+ u64 rsvd, bar_off;
+
+ rsvd = IRDMA_PF_BAR_RSVD;
+ bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+ /* skip over db page */
+ bar_off += IRDMA_HW_PAGE_SIZE;
+ /* push wqe page */
+ bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
+ iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
+ bar_off, IRDMA_MMAP_IO_WC,
+ push_wqe_mmap_key);
+ if (!iwqp->push_wqe_mmap_entry)
+ return -ENOMEM;
+
+ /* push doorbell page */
+ bar_off += IRDMA_HW_PAGE_SIZE;
+ iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
+ bar_off, IRDMA_MMAP_IO_NC,
+ push_db_mmap_key);
+ if (!iwqp->push_db_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_destroy_qp - destroy qp
+ * @ibqp: qp's ib pointer also to get to device's qp address
+ * @udata: user data
+ */
+static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+
+ iwqp->sc_qp.qp_uk.destroy_pending = true;
+
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
+ irdma_modify_qp_to_err(&iwqp->sc_qp);
+
+ if (!iwqp->user_mode)
+ cancel_delayed_work_sync(&iwqp->dwork_flush);
+
+ if (!iwqp->user_mode) {
+ if (iwqp->iwscq) {
+ irdma_clean_cqes(iwqp, iwqp->iwscq);
+ if (iwqp->iwrcq != iwqp->iwscq)
+ irdma_clean_cqes(iwqp, iwqp->iwrcq);
+ }
+ }
+
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ wait_for_completion(&iwqp->free_qp);
+ irdma_free_lsmm_rsrc(iwqp);
+ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+
+ irdma_remove_push_mmap_entries(iwqp);
+
+ if (iwqp->sc_qp.qp_uk.qp_id == 1)
+ iwdev->rf->hwqp1_rsvd = false;
+ irdma_free_qp_rsrc(iwqp);
+
+ return 0;
+}
+
+/**
+ * irdma_setup_virt_qp - setup for allocation of virtual qp
+ * @iwdev: irdma device
+ * @iwqp: qp ptr
+ * @init_info: initialize info to return
+ */
+static void irdma_setup_virt_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *init_info)
+{
+ struct irdma_pbl *iwpbl = iwqp->iwpbl;
+ struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
+
+ iwqp->page = qpmr->sq_page;
+ init_info->shadow_area_pa = qpmr->shadow;
+ if (iwpbl->pbl_allocated) {
+ init_info->virtual_map = true;
+ init_info->sq_pa = qpmr->sq_pbl.idx;
+ /* Need to use contiguous buffer for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ init_info->rq_pa = init_info->qp_uk_init_info.srq_uk ?
+ qpmr->rq_pa : qpmr->rq_pbl.idx;
+ } else {
+ init_info->sq_pa = qpmr->sq_pbl.addr;
+ init_info->rq_pa = qpmr->rq_pbl.addr;
+ }
+}
+
+/**
+ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
+ * @udata: udata
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_umode_qp(struct ib_udata *udata,
+ struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+ struct irdma_create_qp_req req;
+ unsigned long flags;
+ int ret;
+
+ ret = ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen));
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
+ return ret;
+ }
+
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ iwqp->user_mode = 1;
+ if (req.user_wqe_bufs) {
+ info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ ret = -ENODATA;
+ ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
+ return ret;
+ }
+ }
+
+ if (!ucontext->use_raw_attrs) {
+ /**
+ * Maintain backward compat with older ABI which passes sq and
+ * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
+ * There is no way to compute the correct value of
+ * iwqp->max_send_wr/max_recv_wr in the kernel.
+ */
+ iwqp->max_send_wr = init_attr->cap.max_send_wr;
+ iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
+ ukinfo->sq_size = init_attr->cap.max_send_wr;
+ ukinfo->rq_size = init_attr->cap.max_recv_wr;
+ irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
+ &ukinfo->rq_shift);
+ } else {
+ ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (ret)
+ return ret;
+
+ ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (ret)
+ return ret;
+
+ iwqp->max_send_wr =
+ (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr =
+ (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ }
+
+ irdma_setup_virt_qp(iwdev, iwqp, info);
+
+ return 0;
+}
+
+/**
+ * irdma_setup_kmode_qp - setup initialization for kernel mode qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
+ u32 size;
+ int status;
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+
+ status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (status)
+ return status;
+
+ status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (status)
+ return status;
+
+ iwqp->kqp.sq_wrid_mem =
+ kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ if (!iwqp->kqp.sq_wrid_mem)
+ return -ENOMEM;
+
+ iwqp->kqp.rq_wrid_mem =
+ kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+
+ if (!iwqp->kqp.rq_wrid_mem) {
+ kfree(iwqp->kqp.sq_wrid_mem);
+ iwqp->kqp.sq_wrid_mem = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
+ ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
+
+ size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
+ size += (IRDMA_SHADOW_AREA_SIZE << 3);
+
+ mem->size = ALIGN(size, 256);
+ mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va) {
+ kfree(iwqp->kqp.sq_wrid_mem);
+ iwqp->kqp.sq_wrid_mem = NULL;
+ kfree(iwqp->kqp.rq_wrid_mem);
+ iwqp->kqp.rq_wrid_mem = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq = mem->va;
+ info->sq_pa = mem->pa;
+ ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
+ info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
+ info->shadow_area_pa =
+ info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ ukinfo->qp_id = info->qp_uk_init_info.qp_id;
+
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ init_attr->cap.max_send_wr = iwqp->max_send_wr;
+ init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
+
+ return 0;
+}
+
+static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
+{
+ struct irdma_pci_f *rf = iwqp->iwdev->rf;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_create_qp_info *qp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+ memset(qp_info, 0, sizeof(*qp_info));
+ qp_info->mac_valid = true;
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
+
+ cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp_info;
+
+ udp_info = &iwqp->udp_info;
+ udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
+ udp_info->cwnd = iwdev->roce_cwnd;
+ udp_info->rexmit_thresh = 2;
+ udp_info->rnr_nak_thresh = 2;
+ udp_info->src_port = 0xc000;
+ udp_info->dst_port = ROCE_V2_UDP_DPORT;
+ roce_info = &iwqp->roce_info;
+ ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
+
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI && iwqp->ibqp.qp_num != 1)
+ roce_info->is_qp1 = true;
+ roce_info->rd_en = true;
+ roce_info->wr_rdresp_en = true;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ roce_info->bind_en = true;
+ roce_info->dcqcn_en = false;
+ roce_info->rtomin = 5;
+
+ roce_info->ack_credits = iwdev->roce_ackcreds;
+ roce_info->ird_size = dev->hw_attrs.max_hw_ird;
+ roce_info->ord_size = dev->hw_attrs.max_hw_ord;
+
+ if (!iwqp->user_mode) {
+ roce_info->priv_mode_en = true;
+ roce_info->fast_reg_en = true;
+ roce_info->udprivcq_en = true;
+ }
+ roce_info->roce_tver = 0;
+
+ ctx_info->roce_info = &iwqp->roce_info;
+ ctx_info->udp_info = &iwqp->udp_info;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+}
+
+static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_iwarp_offload_info *iwarp_info;
+
+ iwarp_info = &iwqp->iwarp_info;
+ ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
+ iwarp_info->rd_en = true;
+ iwarp_info->wr_rdresp_en = true;
+ iwarp_info->ecn_en = true;
+ iwarp_info->rtomin = 5;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ iwarp_info->ib_rd_en = true;
+ if (!iwqp->user_mode) {
+ iwarp_info->priv_mode_en = true;
+ iwarp_info->fast_reg_en = true;
+ }
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->rdmap_ver = 1;
+
+ ctx_info->iwarp_info = &iwqp->iwarp_info;
+ ctx_info->iwarp_info_valid = true;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ ctx_info->iwarp_info_valid = false;
+}
+
+static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
+ struct irdma_device *iwdev)
+{
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
+
+ if (init_attr->create_flags)
+ return -EOPNOTSUPP;
+
+ if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
+ init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (init_attr->qp_type != IB_QPT_RC &&
+ init_attr->qp_type != IB_QPT_UD &&
+ init_attr->qp_type != IB_QPT_GSI)
+ return -EOPNOTSUPP;
+ } else {
+ if (init_attr->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void irdma_flush_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
+
+ irdma_generate_flush_completions(iwqp);
+}
+
+static int irdma_setup_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 *qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+ int ret;
+
+ if (rf->rdma_ver <= IRDMA_GEN_2) {
+ *qp_num = 1;
+ return 0;
+ }
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ if (!rf->hwqp1_rsvd) {
+ *qp_num = 1;
+ rf->hwqp1_rsvd = true;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ ret = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ qp_num, &rf->next_qp);
+ if (ret)
+ return ret;
+ }
+
+ ret = irdma_vchnl_req_add_vport(&rf->sc_dev, iwdev->vport_id, *qp_num,
+ (&iwdev->vsi)->qos);
+ if (ret) {
+ if (*qp_num != 1) {
+ irdma_free_rsrc(rf, rf->allocated_qps, *qp_num);
+ } else {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_create_qp - create qp
+ * @ibqp: ptr of qp
+ * @init_attr: attributes for qp
+ * @udata: user data for create qp
+ */
+static int irdma_create_qp(struct ib_qp *ibqp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
+#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
+ struct ib_pd *ibpd = ibqp->pd;
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_device *iwdev = to_iwdev(ibpd->device);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_create_qp_resp uresp = {};
+ u32 qp_num = 0;
+ int err_code;
+ struct irdma_sc_qp *qp;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
+ struct irdma_qp_init_info init_info = {};
+ struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_srq *iwsrq;
+ bool srq_valid = false;
+ u32 srq_id = 0;
+
+ if (init_attr->srq) {
+ iwsrq = to_iwsrq(init_attr->srq);
+ srq_valid = true;
+ srq_id = iwsrq->srq_num;
+ init_attr->cap.max_recv_sge = uk_attrs->max_hw_wq_frags;
+ init_attr->cap.max_recv_wr = 4;
+ init_info.qp_uk_init_info.srq_uk = &iwsrq->sc_srq.srq_uk;
+ }
+
+ err_code = irdma_validate_qp_attrs(init_attr, iwdev);
+ if (err_code)
+ return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
+ return -EINVAL;
+
+ init_info.vsi = &iwdev->vsi;
+ init_info.qp_uk_init_info.uk_attrs = uk_attrs;
+ init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
+ init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
+ init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+ init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+ init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
+
+ qp = &iwqp->sc_qp;
+ qp->qp_uk.back_qp = iwqp;
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+
+ iwqp->iwdev = iwdev;
+ iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
+ 256);
+ iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
+ iwqp->q2_ctx_mem.size,
+ &iwqp->q2_ctx_mem.pa,
+ GFP_KERNEL);
+ if (!iwqp->q2_ctx_mem.va)
+ return -ENOMEM;
+
+ init_info.q2 = iwqp->q2_ctx_mem.va;
+ init_info.q2_pa = iwqp->q2_ctx_mem.pa;
+ init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
+ init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
+
+ if (init_attr->qp_type == IB_QPT_GSI) {
+ err_code = irdma_setup_gsi_qp_rsrc(iwqp, &qp_num);
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = 1;
+ } else {
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ &qp_num, &rf->next_qp);
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = qp_num;
+ }
+
+ iwqp->iwpd = iwpd;
+ qp = &iwqp->sc_qp;
+ iwqp->iwscq = to_iwcq(init_attr->send_cq);
+ iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
+ iwqp->host_ctx.va = init_info.host_ctx;
+ iwqp->host_ctx.pa = init_info.host_ctx_pa;
+ iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
+
+ init_info.pd = &iwpd->sc_pd;
+ init_info.qp_uk_init_info.qp_id = qp_num;
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1))
+ init_info.qp_uk_init_info.first_sq_wq = 1;
+ iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+ init_waitqueue_head(&iwqp->waitq);
+ init_waitqueue_head(&iwqp->mod_qp_waitq);
+
+ if (udata) {
+ init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
+ err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
+ init_attr);
+ } else {
+ INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
+ init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
+ err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
+ }
+
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
+ goto error;
+ }
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (init_attr->qp_type == IB_QPT_RC) {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
+ IRDMA_WRITE_WITH_IMM |
+ IRDMA_ROCE;
+ } else {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
+ IRDMA_ROCE;
+ }
+ } else {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
+ init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
+
+ err_code = irdma_sc_qp_init(qp, &init_info);
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
+ goto error;
+ }
+
+ ctx_info = &iwqp->ctx_info;
+ ctx_info->srq_valid = srq_valid;
+ ctx_info->srq_id = srq_id;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (dev->ws_add(&iwdev->vsi, 0)) {
+ irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
+ err_code = -EINVAL;
+ goto error;
+ }
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
+ } else {
+ irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
+ }
+
+ err_code = irdma_cqp_create_qp_cmd(iwqp);
+ if (err_code)
+ goto error;
+
+ refcount_set(&iwqp->refcnt, 1);
+ spin_lock_init(&iwqp->lock);
+ spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
+ iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
+ rf->qp_table[qp_num] = iwqp;
+
+ if (udata) {
+ /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
+ if (udata->outlen < sizeof(uresp)) {
+ uresp.lsmm = 1;
+ uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
+ } else {
+ if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
+ uresp.lsmm = 1;
+ }
+ uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
+ uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
+ uresp.qp_id = qp_num;
+ uresp.qp_caps = qp->qp_uk.qp_caps;
+
+ err_code = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
+ irdma_destroy_qp(&iwqp->ibqp, udata);
+ return err_code;
+ }
+ }
+
+ init_completion(&iwqp->free_qp);
+ return 0;
+
+error:
+ irdma_free_qp_rsrc(iwqp);
+ return err_code;
+}
+
+static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
+{
+ int acc_flags = 0;
+
+ if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
+ if (iwqp->roce_info.wr_rdresp_en) {
+ acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ }
+ if (iwqp->roce_info.rd_en)
+ acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (iwqp->roce_info.bind_en)
+ acc_flags |= IB_ACCESS_MW_BIND;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ } else {
+ if (iwqp->iwarp_info.wr_rdresp_en) {
+ acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ }
+ if (iwqp->iwarp_info.rd_en)
+ acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ }
+ return acc_flags;
+}
+
+/**
+ * irdma_query_qp - query qp attributes
+ * @ibqp: qp pointer
+ * @attr: attributes pointer
+ * @attr_mask: Not used
+ * @init_attr: qp attributes to return
+ */
+static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
+
+ attr->qp_state = iwqp->ibqp_state;
+ attr->cur_qp_state = iwqp->ibqp_state;
+ attr->cap.max_send_wr = iwqp->max_send_wr;
+ attr->cap.max_recv_wr = iwqp->max_recv_wr;
+ attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
+ attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
+ attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
+ attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
+ attr->port_num = 1;
+ if (rdma_protocol_roce(ibqp->device, 1)) {
+ attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
+ attr->qkey = iwqp->roce_info.qkey;
+ attr->rq_psn = iwqp->udp_info.epsn;
+ attr->sq_psn = iwqp->udp_info.psn_nxt;
+ attr->dest_qp_num = iwqp->roce_info.dest_qp;
+ attr->pkey_index = iwqp->roce_info.p_key;
+ attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
+ attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
+ attr->min_rnr_timer = iwqp->udp_info.min_rnr_timer;
+ attr->max_rd_atomic = iwqp->roce_info.ord_size;
+ attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
+ }
+
+ init_attr->event_handler = iwqp->ibqp.event_handler;
+ init_attr->qp_context = iwqp->ibqp.qp_context;
+ init_attr->send_cq = iwqp->ibqp.send_cq;
+ init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->srq = iwqp->ibqp.srq;
+ init_attr->cap = attr->cap;
+
+ return 0;
+}
+
+/**
+ * irdma_query_pkey - Query partition key
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: index of pkey
+ * @pkey: pointer to store the pkey
+ */
+static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (index >= IRDMA_PKEY_TBL_SZ)
+ return -EINVAL;
+
+ *pkey = IRDMA_DEFAULT_PKEY;
+ return 0;
+}
+
+static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
+{
+ struct net_device *ndev;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev)
+ goto exit;
+ if (is_vlan_dev(ndev)) {
+ u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
+
+ prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
+exit:
+ rcu_read_unlock();
+ return prio;
+}
+
+static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
+{
+ if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
+ !iwqp->suspend_pending,
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
+ iwqp->suspend_pending = false;
+ ibdev_warn(&iwqp->iwdev->ibdev,
+ "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
+ iwqp->ibqp.qp_num, iwqp->last_aeq);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_modify_qp_roce - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
+ struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp_info;
+ struct irdma_modify_qp_info info = {};
+ struct irdma_modify_qp_resp uresp = {};
+ struct irdma_modify_qp_req ureq = {};
+ unsigned long flags;
+ u8 issue_modify_qp = 0;
+ int ret = 0;
+
+ ctx_info = &iwqp->ctx_info;
+ roce_info = &iwqp->roce_info;
+ udp_info = &iwqp->udp_info;
+
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ roce_info->dest_qp = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
+ &roce_info->p_key);
+ if (ret)
+ return ret;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ roce_info->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ udp_info->psn_nxt = attr->sq_psn;
+ udp_info->lsn = 0xffff;
+ udp_info->psn_una = attr->sq_psn;
+ udp_info->psn_max = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ udp_info->epsn = attr->rq_psn;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ udp_info->rnr_nak_thresh = attr->rnr_retry;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ udp_info->min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ udp_info->rexmit_thresh = attr->retry_cnt;
+
+ ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
+
+ if (attr_mask & IB_QP_AV) {
+ struct irdma_av *av = &iwqp->roce_ah.av;
+ const struct ib_gid_attr *sgid_attr =
+ attr->ah_attr.grh.sgid_attr;
+ u16 vlan_id = VLAN_N_VID;
+ u32 local_ip[4];
+
+ memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
+ if (attr->ah_attr.ah_flags & IB_AH_GRH) {
+ udp_info->ttl = attr->ah_attr.grh.hop_limit;
+ udp_info->flow_label = attr->ah_attr.grh.flow_label;
+ udp_info->tos = attr->ah_attr.grh.traffic_class;
+ udp_info->src_port =
+ rdma_get_udp_sport(udp_info->flow_label,
+ ibqp->qp_num,
+ roce_info->dest_qp);
+ irdma_qp_rem_qos(&iwqp->sc_qp);
+ dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
+ if (iwqp->sc_qp.vsi->dscp_mode)
+ ctx_info->user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
+ else
+ ctx_info->user_pri = rt_tos2priority(udp_info->tos);
+ }
+ ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
+ ctx_info->roce_info->mac_addr);
+ if (ret)
+ return ret;
+ ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
+ ctx_info->user_pri);
+ if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = ctx_info->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+
+ if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ vlan_id = 0;
+ if (vlan_id < VLAN_N_VID) {
+ udp_info->insert_vlan_tag = true;
+ udp_info->vlan_tag = vlan_id |
+ ctx_info->user_pri << VLAN_PRIO_SHIFT;
+ } else {
+ udp_info->insert_vlan_tag = false;
+ }
+
+ av->attrs = attr->ah_attr;
+ rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
+ av->net_type = rdma_gid_attr_network_type(sgid_attr);
+ if (av->net_type == RDMA_NETWORK_IPV6) {
+ __be32 *daddr =
+ av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+ __be32 *saddr =
+ av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+
+ irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
+ irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
+
+ udp_info->ipv4 = false;
+ irdma_copy_ip_ntohl(local_ip, daddr);
+
+ } else if (av->net_type == RDMA_NETWORK_IPV4) {
+ __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
+ __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
+
+ local_ip[0] = ntohl(daddr);
+
+ udp_info->ipv4 = true;
+ udp_info->dest_ip_addr[0] = 0;
+ udp_info->dest_ip_addr[1] = 0;
+ udp_info->dest_ip_addr[2] = 0;
+ udp_info->dest_ip_addr[3] = local_ip[0];
+
+ udp_info->local_ipaddr[0] = 0;
+ udp_info->local_ipaddr[1] = 0;
+ udp_info->local_ipaddr[2] = 0;
+ udp_info->local_ipaddr[3] = ntohl(saddr);
+ }
+ udp_info->arp_idx =
+ irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
+ attr->ah_attr.roce.dmac);
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
+ ibdev_err(&iwdev->ibdev,
+ "rd_atomic = %d, above max_hw_ord=%d\n",
+ attr->max_rd_atomic,
+ dev->hw_attrs.max_hw_ord);
+ return -EINVAL;
+ }
+ if (attr->max_rd_atomic)
+ roce_info->ord_size = attr->max_rd_atomic;
+ info.ord_valid = true;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
+ ibdev_err(&iwdev->ibdev,
+ "rd_atomic = %d, above max_hw_ird=%d\n",
+ attr->max_dest_rd_atomic,
+ dev->hw_attrs.max_hw_ird);
+ return -EINVAL;
+ }
+ if (attr->max_dest_rd_atomic)
+ roce_info->ird_size = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ roce_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ roce_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ roce_info->rd_en = true;
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ ctx_info->remote_atomics_en = true;
+ }
+
+ wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
+
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
+ __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
+ iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (attr_mask & IB_QP_STATE) {
+ if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
+ iwqp->ibqp.qp_type, attr_mask)) {
+ ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
+ iwqp->ibqp.qp_num, iwqp->ibqp_state,
+ attr->qp_state);
+ ret = -EINVAL;
+ goto exit;
+ }
+ info.curr_iwarp_state = iwqp->iwarp_state;
+
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
+ info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ break;
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTR;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_RTS:
+ if (iwqp->ibqp_state < IB_QPS_RTR ||
+ iwqp->ibqp_state == IB_QPS_ERR) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ info.ord_valid = true;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTS;
+ issue_modify_qp = 1;
+ if (iwdev->push_mode && udata &&
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_alloc_push_page(iwqp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
+ goto exit;
+
+ if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ issue_modify_qp = 1;
+ iwqp->suspend_pending = true;
+ break;
+ case IB_QPS_SQE:
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata && udata->inlen) {
+ if (ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen)))
+ return -EINVAL;
+
+ irdma_flush_wqes(iwqp,
+ (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
+ (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
+ IRDMA_REFLUSH);
+ }
+ return 0;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ issue_modify_qp = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+ }
+
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ if (issue_modify_qp) {
+ ctx_info->rem_endpoint_idx = udp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
+ if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
+ ret = irdma_wait_for_suspend(iwqp);
+ if (ret)
+ return ret;
+ }
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (iwqp->ibqp_state > IB_QPS_RTS &&
+ !iwqp->flush_issued) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
+ IRDMA_FLUSH_RQ |
+ IRDMA_FLUSH_WAIT);
+ iwqp->flush_issued = 1;
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else {
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ !iwqp->push_wqe_mmap_entry &&
+ !irdma_setup_push_mmap_entries(ucontext, iwqp,
+ &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
+ uresp.push_valid = 1;
+ uresp.push_offset = iwqp->sc_qp.push_offset;
+ }
+ ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
+ udata->outlen));
+ if (ret) {
+ irdma_remove_push_mmap_entries(iwqp);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy_to_udata failed\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_modify_qp - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_tcp_offload_info *tcp_info;
+ struct irdma_iwarp_offload_info *offload_info;
+ struct irdma_modify_qp_info info = {};
+ struct irdma_modify_qp_resp uresp = {};
+ struct irdma_modify_qp_req ureq = {};
+ u8 issue_modify_qp = 0;
+ u8 dont_wait = 0;
+ int err;
+ unsigned long flags;
+
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ ctx_info = &iwqp->ctx_info;
+ offload_info = &iwqp->iwarp_info;
+ tcp_info = &iwqp->tcp_info;
+ wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
+ __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
+ iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
+ iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (attr_mask & IB_QP_STATE) {
+ info.curr_iwarp_state = iwqp->iwarp_state;
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
+ info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ if (iwdev->push_mode && udata &&
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_alloc_push_page(iwqp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ break;
+ case IB_QPS_RTS:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
+ !iwqp->cm_id) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ issue_modify_qp = 1;
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ iwqp->hte_added = 1;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTS;
+ info.tcp_ctx_valid = true;
+ info.ord_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
+ iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_SQE:
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata && udata->inlen) {
+ if (ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen)))
+ return -EINVAL;
+
+ irdma_flush_wqes(iwqp,
+ (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
+ (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
+ IRDMA_REFLUSH);
+ }
+ return 0;
+ }
+
+ if (iwqp->sc_qp.term_flags) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_terminate_del_timer(&iwqp->sc_qp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
+ iwdev->iw_status &&
+ iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
+ info.reset_tcp_conn = true;
+ else
+ dont_wait = 1;
+
+ issue_modify_qp = 1;
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ ctx_info->iwarp_info_valid = true;
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ offload_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ offload_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ offload_info->rd_en = true;
+ }
+
+ if (ctx_info->iwarp_info_valid) {
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ if (issue_modify_qp) {
+ ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+
+ if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
+ if (dont_wait) {
+ if (iwqp->hw_tcp_state) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
+ iwqp->last_aeq = IRDMA_AE_RESET_SENT;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ irdma_cm_disconn(iwqp);
+ } else {
+ int close_timer_started;
+
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+
+ if (iwqp->cm_node) {
+ refcount_inc(&iwqp->cm_node->refcnt);
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
+ if (iwqp->cm_id && close_timer_started == 1)
+ irdma_schedule_cm_timer(iwqp->cm_node,
+ (struct irdma_puda_buf *)iwqp,
+ IRDMA_TIMER_TYPE_CLOSE, 1, 0);
+
+ irdma_rem_ref_cm_node(iwqp->cm_node);
+ } else {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ }
+ }
+ }
+ if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ !iwqp->push_wqe_mmap_entry &&
+ !irdma_setup_push_mmap_entries(ucontext, iwqp,
+ &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
+ uresp.push_valid = 1;
+ uresp.push_offset = iwqp->sc_qp.push_offset;
+ }
+
+ err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
+ udata->outlen));
+ if (err) {
+ irdma_remove_push_mmap_entries(iwqp);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy_to_udata failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return err;
+}
+
+/**
+ * irdma_srq_free_rsrc - free up resources for srq
+ * @rf: RDMA PCI function
+ * @iwsrq: srq ptr
+ */
+static void irdma_srq_free_rsrc(struct irdma_pci_f *rf, struct irdma_srq *iwsrq)
+{
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ if (!iwsrq->user_mode) {
+ dma_free_coherent(rf->sc_dev.hw->device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+ iwsrq->kmem.va = NULL;
+ }
+
+ irdma_free_rsrc(rf, rf->allocated_srqs, srq->srq_uk.srq_id);
+}
+
+/**
+ * irdma_cq_free_rsrc - free up resources for cq
+ * @rf: RDMA PCI function
+ * @iwcq: cq ptr
+ */
+static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
+{
+ struct irdma_sc_cq *cq = &iwcq->sc_cq;
+
+ if (!iwcq->user_mode) {
+ dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
+ iwcq->kmem.va, iwcq->kmem.pa);
+ iwcq->kmem.va = NULL;
+ dma_free_coherent(rf->sc_dev.hw->device,
+ iwcq->kmem_shadow.size,
+ iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
+ iwcq->kmem_shadow.va = NULL;
+ }
+
+ irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
+}
+
+/**
+ * irdma_free_cqbuf - worker to free a cq buffer
+ * @work: provides access to the cq buffer to free
+ */
+static void irdma_free_cqbuf(struct work_struct *work)
+{
+ struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
+
+ dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
+ cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
+ cq_buf->kmem_buf.va = NULL;
+ kfree(cq_buf);
+}
+
+/**
+ * irdma_process_resize_list - remove resized cq buffers from the resize_list
+ * @iwcq: cq which owns the resize_list
+ * @iwdev: irdma device
+ * @lcqe_buf: the buffer where the last cqe is received
+ */
+static int irdma_process_resize_list(struct irdma_cq *iwcq,
+ struct irdma_device *iwdev,
+ struct irdma_cq_buf *lcqe_buf)
+{
+ struct list_head *tmp_node, *list_node;
+ struct irdma_cq_buf *cq_buf;
+ int cnt = 0;
+
+ list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
+ cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
+ if (cq_buf == lcqe_buf)
+ return cnt;
+
+ list_del(&cq_buf->list);
+ queue_work(iwdev->cleanup_wq, &cq_buf->work);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+/**
+ * irdma_destroy_srq - destroy srq
+ * @ibsrq: srq pointer
+ * @udata: user data
+ */
+static int irdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ irdma_srq_wq_destroy(iwdev->rf, srq);
+ irdma_srq_free_rsrc(iwdev->rf, iwsrq);
+ return 0;
+}
+
+/**
+ * irdma_destroy_cq - destroy cq
+ * @ib_cq: cq pointer
+ * @udata: user data
+ */
+static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ib_cq->device);
+ struct irdma_cq *iwcq = to_iwcq(ib_cq);
+ struct irdma_sc_cq *cq = &iwcq->sc_cq;
+ struct irdma_sc_dev *dev = cq->dev;
+ struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
+ struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ if (!list_empty(&iwcq->cmpl_generated))
+ irdma_remove_cmpls_list(iwcq);
+ if (!list_empty(&iwcq->resize_list))
+ irdma_process_resize_list(iwcq, iwdev, NULL);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ irdma_cq_rem_ref(ib_cq);
+ wait_for_completion(&iwcq->free_cq);
+
+ irdma_cq_wq_destroy(iwdev->rf, cq);
+
+ spin_lock_irqsave(&iwceq->ce_lock, flags);
+ irdma_sc_cleanup_ceqes(cq, ceq);
+ spin_unlock_irqrestore(&iwceq->ce_lock, flags);
+ irdma_cq_free_rsrc(iwdev->rf, iwcq);
+
+ return 0;
+}
+
+/**
+ * irdma_resize_cq - resize cq
+ * @ibcq: cq to be resized
+ * @entries: desired cq size
+ * @udata: user data
+ */
+static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
+ struct ib_udata *udata)
+{
+#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_modify_cq_info *m_info;
+ struct irdma_modify_cq_info info = {};
+ struct irdma_dma_mem kmem_buf;
+ struct irdma_cq_mr *cqmr_buf;
+ struct irdma_pbl *iwpbl_buf;
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ struct irdma_cq_buf *cq_buf = NULL;
+ unsigned long flags;
+ int ret;
+
+ iwdev = to_iwdev(ibcq->device);
+ rf = iwdev->rf;
+
+ if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_CQ_RESIZE))
+ return -EOPNOTSUPP;
+
+ if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
+ return -EINVAL;
+
+ if (entries > rf->max_cqe)
+ return -EINVAL;
+
+ if (!iwcq->user_mode) {
+ entries++;
+
+ if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
+ }
+
+ info.cq_size = max(entries, 4);
+
+ if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
+ return 0;
+
+ if (udata) {
+ struct irdma_resize_cq_req req = {};
+ struct irdma_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+
+ /* CQ resize not supported with legacy GEN_1 libi40iw */
+ if (ucontext->legacy_mode)
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen)))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ if (!iwpbl_buf)
+ return -ENOMEM;
+
+ cqmr_buf = &iwpbl_buf->cq_mr;
+ if (iwpbl_buf->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
+ } else {
+ info.cq_pa = cqmr_buf->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode CQ resize */
+ int rsize;
+
+ rsize = info.cq_size * sizeof(struct irdma_cqe);
+ kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
+ kmem_buf.va = dma_alloc_coherent(dev->hw->device,
+ kmem_buf.size, &kmem_buf.pa,
+ GFP_KERNEL);
+ if (!kmem_buf.va)
+ return -ENOMEM;
+
+ info.cq_base = kmem_buf.va;
+ info.cq_pa = kmem_buf.pa;
+ cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
+ if (!cq_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
+ info.cq_resize = true;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.cq_modify.info;
+ memcpy(m_info, &info, sizeof(*m_info));
+
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
+ cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
+ cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
+ cqp_info->post_sq = 1;
+ ret = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ if (cq_buf) {
+ cq_buf->kmem_buf = iwcq->kmem;
+ cq_buf->hw = dev->hw;
+ memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
+ INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
+ list_add_tail(&cq_buf->list, &iwcq->resize_list);
+ iwcq->kmem = kmem_buf;
+ }
+
+ irdma_sc_cq_resize(&iwcq->sc_cq, &info);
+ ibcq->cqe = info.cq_size - 1;
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return 0;
+error:
+ if (!udata) {
+ dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
+ kmem_buf.pa);
+ kmem_buf.va = NULL;
+ }
+ kfree(cq_buf);
+
+ return ret;
+}
+
+/**
+ * irdma_srq_event - event notification for srq limit
+ * @srq: shared srq struct
+ */
+void irdma_srq_event(struct irdma_sc_srq *srq)
+{
+ struct irdma_srq *iwsrq = container_of(srq, struct irdma_srq, sc_srq);
+ struct ib_srq *ibsrq = &iwsrq->ibsrq;
+ struct ib_event event;
+
+ srq->srq_limit = 0;
+
+ if (!ibsrq->event_handler)
+ return;
+
+ event.device = ibsrq->device;
+ event.element.port_num = 1;
+ event.element.srq = ibsrq;
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+}
+
+/**
+ * irdma_modify_srq - modify srq request
+ * @ibsrq: srq's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+static int irdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_modify_srq_info *info;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
+ if (!(attr_mask & IB_SRQ_LIMIT))
+ return 0;
+
+ if (attr->srq_limit > iwsrq->sc_srq.srq_uk.srq_size)
+ return -EINVAL;
+
+ /* Execute this cqp op synchronously, so we can update srq_limit
+ * upon successful completion.
+ */
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.srq_modify.info;
+ info->srq_limit = attr->srq_limit;
+ if (info->srq_limit > 0xFFF)
+ info->srq_limit = 0xFFF;
+ info->arm_limit_event = 1;
+
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status)
+ return status;
+
+ iwsrq->sc_srq.srq_limit = info->srq_limit;
+
+ return 0;
+}
+
+static int irdma_setup_umode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_SRQ_MIN_REQ_LEN \
+ offsetofend(struct irdma_create_srq_req, user_shadow_area)
+ struct irdma_create_srq_req req = {};
+ struct irdma_ucontext *ucontext;
+ struct irdma_srq_mr *srqmr;
+ struct irdma_pbl *iwpbl;
+ unsigned long flags;
+
+ iwsrq->user_mode = true;
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+
+ if (udata->inlen < IRDMA_CREATE_SRQ_MIN_REQ_LEN)
+ return -EINVAL;
+
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ iwpbl = irdma_get_pbl((unsigned long)req.user_srq_buf,
+ &ucontext->srq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ if (!iwpbl)
+ return -EPROTO;
+
+ iwsrq->iwpbl = iwpbl;
+ srqmr = &iwpbl->srq_mr;
+
+ if (iwpbl->pbl_allocated) {
+ info->virtual_map = true;
+ info->pbl_chunk_size = 1;
+ info->first_pm_pbl_idx = srqmr->srq_pbl.idx;
+ info->leaf_pbl_size = 1;
+ } else {
+ info->srq_pa = srqmr->srq_pbl.addr;
+ }
+ info->shadow_area_pa = srqmr->shadow;
+
+ return 0;
+}
+
+static int irdma_setup_kmode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info, u32 depth,
+ u8 shift)
+{
+ struct irdma_srq_uk_init_info *ukinfo = &info->srq_uk_init_info;
+ struct irdma_dma_mem *mem = &iwsrq->kmem;
+ u32 size, ring_size;
+
+ ring_size = depth * IRDMA_QP_WQE_MIN_SIZE;
+ size = ring_size + (IRDMA_SHADOW_AREA_SIZE << 3);
+
+ mem->size = ALIGN(size, 256);
+ mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va)
+ return -ENOMEM;
+
+ ukinfo->srq = mem->va;
+ ukinfo->srq_size = depth >> shift;
+ ukinfo->shadow_area = mem->va + ring_size;
+
+ info->shadow_area_pa = info->srq_pa + ring_size;
+ info->srq_pa = mem->pa;
+
+ return 0;
+}
+
+/**
+ * irdma_create_srq - create srq
+ * @ibsrq: ib's srq pointer
+ * @initattrs: attributes for srq
+ * @udata: user data for create srq
+ */
+static int irdma_create_srq(struct ib_srq *ibsrq,
+ struct ib_srq_init_attr *initattrs,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct ib_srq_attr *attr = &initattrs->attr;
+ struct irdma_pd *iwpd = to_iwpd(ibsrq->pd);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk_init_info *ukinfo;
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_srq_init_info info = {};
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_uk_attrs *uk_attrs;
+ struct cqp_cmds_info *cqp_info;
+ int err_code = 0;
+ u32 depth;
+ u8 shift;
+
+ uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs;
+ ukinfo = &info.srq_uk_init_info;
+
+ if (initattrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ if (!(uk_attrs->feature_flags & IRDMA_FEATURE_SRQ) ||
+ attr->max_sge > uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ refcount_set(&iwsrq->refcnt, 1);
+ spin_lock_init(&iwsrq->lock);
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq,
+ &iwsrq->srq_num, &rf->next_srq);
+ if (err_code)
+ return err_code;
+
+ ukinfo->max_srq_frag_cnt = attr->max_sge;
+ ukinfo->uk_attrs = uk_attrs;
+ ukinfo->srq_id = iwsrq->srq_num;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_srq_frag_cnt, 0,
+ &shift);
+
+ err_code = irdma_get_srqdepth(ukinfo->uk_attrs, attr->max_wr,
+ shift, &depth);
+ if (err_code)
+ return err_code;
+
+ /* Actual SRQ size in WRs for ring and HW */
+ ukinfo->srq_size = depth >> shift;
+
+ /* Max postable WRs to SRQ */
+ iwsrq->max_wr = (depth - IRDMA_RQ_RSVD) >> shift;
+ attr->max_wr = iwsrq->max_wr;
+
+ if (udata)
+ err_code = irdma_setup_umode_srq(iwdev, iwsrq, &info, udata);
+ else
+ err_code = irdma_setup_kmode_srq(iwdev, iwsrq, &info, depth,
+ shift);
+
+ if (err_code)
+ goto free_rsrc;
+
+ info.vsi = &iwdev->vsi;
+ info.pd = &iwpd->sc_pd;
+
+ err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info);
+ if (err_code)
+ goto free_dmem;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto free_dmem;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request;
+ err_code = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (err_code)
+ goto free_dmem;
+
+ if (udata) {
+ struct irdma_create_srq_resp resp = {};
+
+ resp.srq_id = iwsrq->srq_num;
+ resp.srq_size = ukinfo->srq_size;
+ if (ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen))) {
+ err_code = -EPROTO;
+ goto srq_destroy;
+ }
+ }
+
+ return 0;
+
+srq_destroy:
+ irdma_srq_wq_destroy(rf, &iwsrq->sc_srq);
+
+free_dmem:
+ if (!iwsrq->user_mode)
+ dma_free_coherent(rf->hw.device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+free_rsrc:
+ irdma_free_rsrc(rf, rf->allocated_srqs, iwsrq->srq_num);
+ return err_code;
+}
+
+/**
+ * irdma_query_srq - get SRQ attributes
+ * @ibsrq: the SRQ to query
+ * @attr: the attributes of the SRQ
+ */
+static int irdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+
+ attr->max_wr = iwsrq->max_wr;
+ attr->max_sge = iwsrq->sc_srq.srq_uk.max_srq_frag_cnt;
+ attr->srq_limit = iwsrq->sc_srq.srq_limit;
+
+ return 0;
+}
+
+static inline int cq_validate_flags(u32 flags, u8 hw_rev)
+{
+ /* GEN1/2 does not support CQ create flags */
+ if (hw_rev <= IRDMA_GEN_2)
+ return flags ? -EOPNOTSUPP : 0;
+
+ return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * irdma_create_cq - create cq
+ * @ibcq: CQ allocated
+ * @attr: attributes for cq
+ * @attrs: uverbs attribute bundle
+ */
+static int irdma_create_cq(struct ib_cq *ibcq,
+ const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
+#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ u32 cq_num = 0;
+ struct irdma_sc_cq *cq;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cq_init_info info = {};
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
+ unsigned long flags;
+ int err_code;
+ int entries = attr->cqe;
+ bool cqe_64byte_ena;
+
+ err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
+ if (err_code)
+ return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
+ return -EINVAL;
+
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
+ &rf->next_cq);
+ if (err_code)
+ return err_code;
+
+ cq = &iwcq->sc_cq;
+ cq->back_cq = iwcq;
+ refcount_set(&iwcq->refcnt, 1);
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+ cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ?
+ true : false;
+ ukinfo->avoid_mem_cflct = cqe_64byte_ena;
+ iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
+ if (attr->comp_vector < rf->ceqs_count)
+ info.ceq_id = attr->comp_vector;
+ info.ceq_id_valid = true;
+ info.ceqe_mask = 1;
+ info.type = IRDMA_CQ_TYPE_IWARP;
+ info.vsi = &iwdev->vsi;
+
+ if (udata) {
+ struct irdma_ucontext *ucontext;
+ struct irdma_create_cq_req req = {};
+ struct irdma_cq_mr *cqmr;
+ struct irdma_pbl *iwpbl;
+ struct irdma_pbl *iwpbl_shadow;
+ struct irdma_cq_mr *cqmr_shadow;
+
+ iwcq->user_mode = true;
+ ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen))) {
+ err_code = -EFAULT;
+ goto cq_free_rsrc;
+ }
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!iwpbl) {
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl_shadow = irdma_get_pbl(
+ (unsigned long)req.user_shadow_area,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ if (!iwpbl_shadow) {
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+ } else {
+ info.shadow_area_pa = cqmr->shadow;
+ }
+ if (iwpbl->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
+ } else {
+ info.cq_base_pa = cqmr->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode allocations */
+ int rsize;
+
+ if (entries < 1 || entries > rf->max_cqe) {
+ err_code = -EINVAL;
+ goto cq_free_rsrc;
+ }
+
+ entries++;
+ if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
+
+ ukinfo->cq_size = entries;
+
+ if (cqe_64byte_ena)
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
+ else
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
+ iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
+ iwcq->kmem.size,
+ &iwcq->kmem.pa, GFP_KERNEL);
+ if (!iwcq->kmem.va) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+
+ iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
+ 64);
+ iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
+ iwcq->kmem_shadow.size,
+ &iwcq->kmem_shadow.pa,
+ GFP_KERNEL);
+ if (!iwcq->kmem_shadow.va) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+ info.shadow_area_pa = iwcq->kmem_shadow.pa;
+ ukinfo->shadow_area = iwcq->kmem_shadow.va;
+ ukinfo->cq_base = iwcq->kmem.va;
+ info.cq_base_pa = iwcq->kmem.pa;
+ }
+
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
+
+ if (irdma_sc_cq_init(cq, &info)) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.check_overflow = true;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ err_code = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (err_code)
+ goto cq_free_rsrc;
+
+ if (udata) {
+ struct irdma_create_cq_resp resp = {};
+
+ resp.cq_id = info.cq_uk_init_info.cq_id;
+ resp.cq_size = info.cq_uk_init_info.cq_size;
+ if (ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen))) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy to user data\n");
+ err_code = -EPROTO;
+ goto cq_destroy;
+ }
+ }
+ rf->cq_table[cq_num] = iwcq;
+ init_completion(&iwcq->free_cq);
+
+ return 0;
+cq_destroy:
+ irdma_cq_wq_destroy(rf, cq);
+cq_free_rsrc:
+ irdma_cq_free_rsrc(rf, iwcq);
+
+ return err_code;
+}
+
+/**
+ * irdma_get_mr_access - get hw MR access permissions from IB access flags
+ * @access: IB access flags
+ * @hw_rev: Hardware version
+ */
+static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
+{
+ u16 hw_access = 0;
+
+ hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
+ IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
+ hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
+ IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
+ hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
+ IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
+ if (hw_rev >= IRDMA_GEN_3) {
+ hw_access |= (access & IB_ACCESS_MW_BIND) ?
+ IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ }
+ hw_access |= (access & IB_ZERO_BASED) ?
+ IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
+ hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
+
+ return hw_access;
+}
+
+/**
+ * irdma_free_stag - free stag resource
+ * @iwdev: irdma device
+ * @stag: stag to free
+ */
+static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
+{
+ u32 stag_idx;
+
+ stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
+}
+
+/**
+ * irdma_create_stag - create random stag
+ * @iwdev: irdma device
+ */
+static u32 irdma_create_stag(struct irdma_device *iwdev)
+{
+ u32 stag = 0;
+ u32 stag_index = 0;
+ u32 next_stag_index;
+ u32 driver_key;
+ u32 random;
+ u8 consumer_key;
+ int ret;
+
+ get_random_bytes(&random, sizeof(random));
+ consumer_key = (u8)random;
+
+ driver_key = random & ~iwdev->rf->mr_stagmask;
+ next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
+ next_stag_index %= iwdev->rf->max_mr;
+
+ ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr, &stag_index,
+ &next_stag_index);
+ if (ret)
+ return stag;
+ stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
+ stag |= driver_key;
+ stag += (u32)consumer_key;
+
+ return stag;
+}
+
+/**
+ * irdma_next_pbl_addr - Get next pbl address
+ * @pbl: pointer to a pble
+ * @pinfo: info pointer
+ * @idx: index
+ */
+static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
+ u32 *idx)
+{
+ *idx += 1;
+ if (!(*pinfo) || *idx != (*pinfo)->cnt)
+ return ++pbl;
+ *idx = 0;
+ (*pinfo)++;
+
+ return (*pinfo)->addr;
+}
+
+/**
+ * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
+ * @iwmr: iwmr for IB's user page addresses
+ * @pbl: ple pointer to save 1 level or 0 level pble
+ * @level: indicated level 0, 1 or 2
+ */
+static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
+ enum irdma_pble_level level)
+{
+ struct ib_umem *region = iwmr->region;
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_pble_info *pinfo;
+ struct ib_block_iter biter;
+ u32 idx = 0;
+ u32 pbl_cnt = 0;
+
+ pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
+
+ if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
+ iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
+
+ rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
+ *pbl = rdma_block_iter_dma_address(&biter);
+ if (++pbl_cnt == palloc->total_cnt)
+ break;
+ pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
+ }
+}
+
+/**
+ * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
+ * @arr: lvl1 pbl array
+ * @npages: page count
+ * @pg_size: page size
+ *
+ */
+static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
+{
+ u32 pg_idx;
+
+ for (pg_idx = 0; pg_idx < npages; pg_idx++) {
+ if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * irdma_check_mr_contiguous - check if MR is physically contiguous
+ * @palloc: pbl allocation struct
+ * @pg_size: page size
+ */
+static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
+ u32 pg_size)
+{
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *leaf = lvl2->leaf;
+ u64 *arr = NULL;
+ u64 *start_addr = NULL;
+ int i;
+ bool ret;
+
+ if (palloc->level == PBLE_LEVEL_1) {
+ arr = palloc->level1.addr;
+ ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
+ pg_size);
+ return ret;
+ }
+
+ start_addr = leaf->addr;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ arr = leaf->addr;
+ if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
+ return false;
+ ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * irdma_setup_pbles - copy user pg address to pble's
+ * @rf: RDMA PCI function
+ * @iwmr: mr pointer for this memory registration
+ * @lvl: requested pble levels
+ */
+static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
+ u8 lvl)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_pble_info *pinfo;
+ u64 *pbl;
+ int status;
+ enum irdma_pble_level level = PBLE_LEVEL_1;
+
+ if (lvl) {
+ status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
+ lvl);
+ if (status)
+ return status;
+
+ iwpbl->pbl_allocated = true;
+ level = palloc->level;
+ pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
+ palloc->level2.leaf;
+ pbl = pinfo->addr;
+ } else {
+ pbl = iwmr->pgaddrmem;
+ }
+
+ irdma_copy_user_pgaddrs(iwmr, pbl, level);
+
+ if (lvl)
+ iwmr->pgaddrmem[0] = *pbl;
+
+ return 0;
+}
+
+/**
+ * irdma_handle_q_mem - handle memory for qp and cq
+ * @iwdev: irdma device
+ * @req: information for q memory management
+ * @iwpbl: pble struct
+ * @lvl: pble level mask
+ */
+static int irdma_handle_q_mem(struct irdma_device *iwdev,
+ struct irdma_mem_reg_req *req,
+ struct irdma_pbl *iwpbl, u8 lvl)
+{
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_mr *iwmr = iwpbl->iwmr;
+ struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
+ struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct irdma_srq_mr *srqmr = &iwpbl->srq_mr;
+ struct irdma_hmc_pble *hmc_p;
+ u64 *arr = iwmr->pgaddrmem;
+ u32 pg_size, total;
+ int err = 0;
+ bool ret = true;
+
+ pg_size = iwmr->page_size;
+ err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
+ if (err)
+ return err;
+
+ if (lvl)
+ arr = palloc->level1.addr;
+
+ switch (iwmr->type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ total = req->sq_pages + req->rq_pages;
+ hmc_p = &qpmr->sq_pbl;
+ qpmr->shadow = (dma_addr_t)arr[total];
+ /* Need to use physical address for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ qpmr->rq_pa = (dma_addr_t)arr[req->sq_pages];
+ if (lvl) {
+ ret = irdma_check_mem_contiguous(arr, req->sq_pages,
+ pg_size);
+ if (ret)
+ ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
+ req->rq_pages,
+ pg_size);
+ }
+
+ if (!ret) {
+ hmc_p->idx = palloc->level1.idx;
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->idx = palloc->level1.idx + req->sq_pages;
+ } else {
+ hmc_p->addr = arr[0];
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->addr = arr[req->sq_pages];
+ }
+ break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ hmc_p = &srqmr->srq_pbl;
+ srqmr->shadow = (dma_addr_t)arr[req->rq_pages];
+ if (lvl)
+ ret = irdma_check_mem_contiguous(arr, req->rq_pages,
+ pg_size);
+
+ if (!ret)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ hmc_p = &cqmr->cq_pbl;
+
+ if (!cqmr->split)
+ cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
+
+ if (lvl)
+ ret = irdma_check_mem_contiguous(arr, req->cq_pages,
+ pg_size);
+
+ if (!ret)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ break;
+ default:
+ ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
+ err = -EINVAL;
+ }
+
+ if (lvl && ret) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+
+ return err;
+}
+
+/**
+ * irdma_hw_alloc_mw - create the hw memory window
+ * @iwdev: irdma device
+ * @iwmr: pointer to memory window info
+ */
+static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
+{
+ struct irdma_mw_alloc_info *info;
+ struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.mw_alloc.info;
+ memset(info, 0, sizeof(*info));
+ if (iwmr->ibmw.type == IB_MW_TYPE_1)
+ info->mw_wide = true;
+
+ info->page_size = PAGE_SIZE;
+ info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_alloc_mw - Allocate memory window
+ * @ibmw: Memory Window
+ * @udata: user data pointer
+ */
+static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibmw->device);
+ struct irdma_mr *iwmr = to_iwmw(ibmw);
+ int err_code;
+ u32 stag;
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag)
+ return -ENOMEM;
+
+ iwmr->stag = stag;
+ ibmw->rkey = stag;
+
+ err_code = irdma_hw_alloc_mw(iwdev, iwmr);
+ if (err_code) {
+ irdma_free_stag(iwdev, stag);
+ return err_code;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_dealloc_mw - Dealloc memory window
+ * @ibmw: memory window structure.
+ */
+static int irdma_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct ib_pd *ibpd = ibmw->pd;
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
+ struct irdma_device *iwdev = to_iwdev(ibmw->device);
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_dealloc_stag_info *info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->mr = false;
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ irdma_free_stag(iwdev, iwmr->stag);
+
+ return 0;
+}
+
+/**
+ * irdma_hw_alloc_stag - cqp command to allocate stag
+ * @iwdev: irdma device
+ * @iwmr: irdma mr pointer
+ */
+static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_allocate_stag_info *info;
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
+ int status;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.alloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->page_size = PAGE_SIZE;
+ info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->total_len = iwmr->len;
+ info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return status;
+
+ iwmr->is_hwreg = 1;
+ return 0;
+}
+
+/**
+ * irdma_alloc_mr - register stag for fast memory registration
+ * @pd: ibpd pointer
+ * @mr_type: memory for stag registrion
+ * @max_num_sg: man number of pages
+ */
+static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pble_alloc *palloc;
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ u32 stag;
+ int err_code;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err_code = -ENOMEM;
+ goto err;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ palloc = &iwpbl->pble_alloc;
+ iwmr->page_cnt = max_num_sg;
+ /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
+ iwmr->len = max_num_sg * PAGE_SIZE;
+ err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
+ false);
+ if (err_code)
+ goto err_get_pble;
+
+ err_code = irdma_hw_alloc_stag(iwdev, iwmr);
+ if (err_code)
+ goto err_alloc_stag;
+
+ iwpbl->pbl_allocated = true;
+
+ return &iwmr->ibmr;
+err_alloc_stag:
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+err_get_pble:
+ irdma_free_stag(iwdev, stag);
+err:
+ kfree(iwmr);
+
+ return ERR_PTR(err_code);
+}
+
+/**
+ * irdma_set_page - populate pbl list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @addr: page dma address fro pbl list
+ */
+static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct irdma_mr *iwmr = to_iwmr(ibmr);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ u64 *pbl;
+
+ if (unlikely(iwmr->npages == iwmr->page_cnt))
+ return -ENOMEM;
+
+ if (palloc->level == PBLE_LEVEL_2) {
+ struct irdma_pble_info *palloc_info =
+ palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
+
+ palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
+ } else {
+ pbl = palloc->level1.addr;
+ pbl[iwmr->npages] = addr;
+ }
+ iwmr->npages++;
+
+ return 0;
+}
+
+/**
+ * irdma_map_mr_sg - map of sg list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @sg: scatter gather list
+ * @sg_nents: number of sg pages
+ * @sg_offset: scatter gather list for fmr
+ */
+static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct irdma_mr *iwmr = to_iwmr(ibmr);
+
+ iwmr->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
+}
+
+/**
+ * irdma_hwreg_mr - send cqp command for memory registration
+ * @iwdev: irdma device
+ * @iwmr: irdma mr pointer
+ * @access: access for MR
+ */
+static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ u16 access)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_reg_ns_stag_info *stag_info;
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int ret;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
+ memset(stag_info, 0, sizeof(*stag_info));
+ stag_info->va = iwpbl->user_base;
+ stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ stag_info->stag_key = (u8)iwmr->stag;
+ stag_info->total_len = iwmr->len;
+ stag_info->access_rights = irdma_get_mr_access(access,
+ iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ stag_info->pd_id = iwpd->sc_pd.pd_id;
+ stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
+ stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
+ else
+ stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
+ stag_info->page_size = iwmr->page_size;
+
+ if (iwpbl->pbl_allocated) {
+ if (palloc->level == PBLE_LEVEL_1) {
+ stag_info->first_pm_pbl_index = palloc->level1.idx;
+ stag_info->chunk_size = 1;
+ } else {
+ stag_info->first_pm_pbl_index = palloc->level2.root.idx;
+ stag_info->chunk_size = 3;
+ }
+ } else {
+ stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
+ }
+
+ cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
+ ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ if (!ret)
+ iwmr->is_hwreg = 1;
+
+ return ret;
+}
+
+static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
+ bool create_stag)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ u32 stag = 0;
+ u8 lvl;
+ int err;
+
+ lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
+
+ err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
+ if (err)
+ return err;
+
+ if (lvl) {
+ err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
+ iwmr->page_size);
+ if (err) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
+ if (create_stag) {
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto free_pble;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ }
+
+ err = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (err)
+ goto err_hwreg;
+
+ return 0;
+
+err_hwreg:
+ if (stag)
+ irdma_free_stag(iwdev, stag);
+
+free_pble:
+ if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+
+ return err;
+}
+
+static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
+ struct ib_pd *pd, u64 virt,
+ enum irdma_memreg_type reg_type)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ unsigned long pgsz_bitmap;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = region;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwmr->ibmr.iova = virt;
+ iwmr->type = reg_type;
+
+ pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
+ iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
+
+ iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
+ if (unlikely(!iwmr->page_size)) {
+ kfree(iwmr);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ iwmr->len = region->length;
+ iwpbl->user_base = virt;
+ iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
+
+ return iwmr;
+}
+
+static void irdma_free_iwmr(struct irdma_mr *iwmr)
+{
+ kfree(iwmr);
+}
+
+static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext = NULL;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ /* iWarp: Catch page not starting on OS page boundary */
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
+ ib_umem_offset(iwmr->region))
+ return -EINVAL;
+
+ total = req.sq_pages + req.rq_pages + 1;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ total = req.sq_pages + req.rq_pages;
+ lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
+static int irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ total = req.rq_pages + IRDMA_SHADOW_PGCNT;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ lvl = req.rq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
+static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext = NULL;
+ u8 shadow_pgcnt = 1;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+ shadow_pgcnt = 0;
+ total = req.cq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_reg_user_mr - Register a user memory region
+ * @pd: ptr of pd
+ * @start: virtual start address
+ * @len: length of mr
+ * @virt: virtual address
+ * @access: access of mr
+ * @dmah: dma handle
+ * @udata: user data
+ */
+static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ u64 virt, int access,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_mem_reg_req req = {};
+ struct ib_umem *region = NULL;
+ struct irdma_mr *iwmr = NULL;
+ int err;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
+ return ERR_PTR(-EINVAL);
+
+ region = ib_umem_get(pd->device, start, len, access);
+
+ if (IS_ERR(region)) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: Failed to create ib_umem region\n");
+ return (struct ib_mr *)region;
+ }
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
+ ib_umem_release(region);
+ return ERR_PTR(-EFAULT);
+ }
+
+ iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
+ if (IS_ERR(iwmr)) {
+ ib_umem_release(region);
+ return (struct ib_mr *)iwmr;
+ }
+
+ switch (req.reg_type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ err = irdma_reg_user_mr_type_srq(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
+ if (err)
+ goto error;
+ break;
+ case IRDMA_MEMREG_TYPE_MEM:
+ err = irdma_reg_user_mr_type_mem(iwmr, access, true);
+ if (err)
+ goto error;
+
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+
+ return &iwmr->ibmr;
+error:
+ ib_umem_release(region);
+ irdma_free_iwmr(iwmr);
+
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 len, u64 virt,
+ int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct irdma_mr *iwmr;
+ int err;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
+ if (IS_ERR(umem_dmabuf)) {
+ ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
+ return ERR_CAST(umem_dmabuf);
+ }
+
+ iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
+ if (IS_ERR(iwmr)) {
+ err = PTR_ERR(iwmr);
+ goto err_release;
+ }
+
+ err = irdma_reg_user_mr_type_mem(iwmr, access, true);
+ if (err)
+ goto err_iwmr;
+
+ return &iwmr->ibmr;
+
+err_iwmr:
+ irdma_free_iwmr(iwmr);
+
+err_release:
+ ib_umem_release(&umem_dmabuf->umem);
+
+ return ERR_PTR(err);
+}
+
+static int irdma_hwdereg_mr(struct ib_mr *ib_mr)
+{
+ struct irdma_device *iwdev = to_iwdev(ib_mr->device);
+ struct irdma_mr *iwmr = to_iwmr(ib_mr);
+ struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
+ struct irdma_dealloc_stag_info *info;
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ /* Skip HW MR de-register when it is already de-registered
+ * during an MR re-reregister and the re-registration fails
+ */
+ if (!iwmr->is_hwreg)
+ return 0;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->mr = true;
+ if (iwpbl->pbl_allocated)
+ info->dealloc_pbl = true;
+
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return status;
+
+ iwmr->is_hwreg = 0;
+ return 0;
+}
+
+/*
+ * irdma_rereg_mr_trans - Re-register a user MR for a change translation.
+ * @iwmr: ptr of iwmr
+ * @start: virtual start address
+ * @len: length of mr
+ * @virt: virtual address
+ *
+ * Re-register a user memory region when a change translation is requested.
+ * Re-register a new region while reusing the stag from the original registration.
+ */
+static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
+ u64 virt)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct ib_umem *region;
+ int err;
+
+ region = ib_umem_get(pd->device, start, len, iwmr->access);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ iwmr->region = region;
+ iwmr->ibmr.iova = virt;
+ iwmr->ibmr.pd = pd;
+ iwmr->page_size = ib_umem_find_best_pgsz(region,
+ iwdev->rf->sc_dev.hw_attrs.page_size_cap,
+ virt);
+ if (unlikely(!iwmr->page_size)) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ iwmr->len = region->length;
+ iwpbl->user_base = virt;
+ iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
+
+ err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ ib_umem_release(region);
+ return err;
+}
+
+/*
+ * irdma_rereg_user_mr - Re-Register a user memory region(MR)
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @flags: bit mask to indicate which of the attr's of MR modified
+ * @start: virtual start address
+ * @len: length of mr
+ * @virt: virtual address
+ * @new_access: bit mask of access flags
+ * @new_pd: ptr of pd
+ * @udata: user data
+ *
+ * Return:
+ * NULL - Success, existing MR updated
+ * ERR_PTR - error occurred
+ */
+static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags,
+ u64 start, u64 len, u64 virt,
+ int new_access, struct ib_pd *new_pd,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ib_mr->device);
+ struct irdma_mr *iwmr = to_iwmr(ib_mr);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ int ret;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ret = irdma_hwdereg_mr(ib_mr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (flags & IB_MR_REREG_ACCESS)
+ iwmr->access = new_access;
+
+ if (flags & IB_MR_REREG_PD) {
+ iwmr->ibmr.pd = new_pd;
+ iwmr->ibmr.device = new_pd->device;
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ if (iwpbl->pbl_allocated) {
+ irdma_free_pble(iwdev->rf->pble_rsrc,
+ &iwpbl->pble_alloc);
+ iwpbl->pbl_allocated = false;
+ }
+ if (iwmr->region) {
+ ib_umem_release(iwmr->region);
+ iwmr->region = NULL;
+ }
+
+ ret = irdma_rereg_mr_trans(iwmr, start, len, virt);
+ } else
+ ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return NULL;
+}
+
+/**
+ * irdma_reg_phys_mr - register kernel physical memory
+ * @pd: ibpd pointer
+ * @addr: physical address of memory to register
+ * @size: size of memory to register
+ * @access: Access rights
+ * @iova_start: start of virtual address for physical buffers
+ */
+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
+ u64 *iova_start)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ u32 stag;
+ int ret;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ iwpbl->user_base = *iova_start;
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.iova = *iova_start;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->page_cnt = 1;
+ iwmr->pgaddrmem[0] = addr;
+ iwmr->len = size;
+ iwmr->page_size = SZ_4K;
+ ret = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (ret) {
+ irdma_free_stag(iwdev, stag);
+ goto err;
+ }
+
+ return &iwmr->ibmr;
+
+err:
+ kfree(iwmr);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * irdma_get_dma_mr - register physical mem
+ * @pd: ptr of pd
+ * @acc: access for memory
+ */
+static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ u64 kva = 0;
+
+ return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
+}
+
+/**
+ * irdma_del_memlist - Deleting pbl list entries for CQ/QP
+ * @iwmr: iwmr for IB's user page addresses
+ * @ucontext: ptr to user context
+ */
+static void irdma_del_memlist(struct irdma_mr *iwmr,
+ struct irdma_ucontext *ucontext)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ unsigned long flags;
+
+ switch (iwmr->type) {
+ case IRDMA_MEMREG_TYPE_CQ:
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IRDMA_MEMREG_TYPE_QP:
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_dereg_mr - deregister mr
+ * @ib_mr: mr ptr for dereg
+ * @udata: user data
+ */
+static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+{
+ struct irdma_mr *iwmr = to_iwmr(ib_mr);
+ struct irdma_device *iwdev = to_iwdev(ib_mr->device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ int ret;
+
+ if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
+ if (iwmr->region) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext,
+ ibucontext);
+ irdma_del_memlist(iwmr, ucontext);
+ }
+ goto done;
+ }
+
+ ret = irdma_hwdereg_mr(ib_mr);
+ if (ret)
+ return ret;
+
+ irdma_free_stag(iwdev, iwmr->stag);
+done:
+ if (iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+
+ if (iwmr->region)
+ ib_umem_release(iwmr->region);
+
+ kfree(iwmr);
+
+ return 0;
+}
+
+/**
+ * irdma_post_send - kernel application wr
+ * @ibqp: qp ptr for wr
+ * @ib_wr: work request ptr
+ * @bad_wr: return of bad wr if err
+ */
+static int irdma_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *ib_wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_qp_uk *ukqp;
+ struct irdma_sc_dev *dev;
+ struct irdma_post_sq_info info;
+ int err = 0;
+ unsigned long flags;
+ bool inv_stag;
+ struct irdma_ah *ah;
+
+ iwqp = to_iwqp(ibqp);
+ ukqp = &iwqp->sc_qp.qp_uk;
+ dev = &iwqp->iwdev->rf->sc_dev;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ memset(&info, 0, sizeof(info));
+ inv_stag = false;
+ info.wr_id = (ib_wr->wr_id);
+ if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
+ info.signaled = true;
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ info.read_fence = true;
+ switch (ib_wr->opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP;
+ info.op.atomic_compare_swap.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_compare_swap.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_compare_swap.swap_data_bytes = atomic_wr(ib_wr)->swap;
+ info.op.atomic_compare_swap.compare_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_compare_swap.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_compare_swap.remote_stag = atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_compare_swap(ukqp, &info, false);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD;
+ info.op.atomic_fetch_add.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_fetch_add.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_fetch_add.fetch_add_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_fetch_add.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_fetch_add.remote_stag =
+ atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_fetch_add(ukqp, &info, false);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->ex.imm_data);
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ if (ib_wr->opcode == IB_WR_SEND ||
+ ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND;
+ } else {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND_INV;
+ info.stag_to_inv = ib_wr->ex.invalidate_rkey;
+ }
+
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = ib_wr->sg_list;
+ if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+ iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ ah = to_iwah(ud_wr(ib_wr)->ah);
+ info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
+ info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
+ info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_INLINE)
+ err = irdma_uk_inline_send(ukqp, &info, false);
+ else
+ err = irdma_uk_send(ukqp, &info, false);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->ex.imm_data);
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
+ case IB_WR_RDMA_WRITE:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
+
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
+ info.op.rdma_write.rem_addr.addr =
+ rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
+ if (ib_wr->send_flags & IB_SEND_INLINE)
+ err = irdma_uk_inline_rdma_write(ukqp, &info, false);
+ else
+ err = irdma_uk_rdma_write(ukqp, &info, false);
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ inv_stag = true;
+ fallthrough;
+ case IB_WR_RDMA_READ:
+ if (ib_wr->num_sge >
+ dev->hw_attrs.uk_attrs.max_hw_read_sges) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_RDMA_READ;
+ info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
+ err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
+ break;
+ case IB_WR_LOCAL_INV:
+ info.op_type = IRDMA_OP_TYPE_INV_STAG;
+ info.local_fence = info.read_fence;
+ info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
+ err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
+ break;
+ case IB_WR_REG_MR: {
+ struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
+ struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
+ struct irdma_fast_reg_stag_info stag_info = {};
+
+ stag_info.signaled = info.signaled;
+ stag_info.read_fence = info.read_fence;
+ stag_info.access_rights =
+ irdma_get_mr_access(reg_wr(ib_wr)->access,
+ dev->hw_attrs.uk_attrs.hw_rev);
+ stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
+ stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
+ stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
+ stag_info.wr_id = ib_wr->wr_id;
+ stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
+ stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
+ stag_info.total_len = iwmr->ibmr.length;
+ stag_info.reg_addr_pa = *palloc->level1.addr;
+ stag_info.first_pm_pbl_index = palloc->level1.idx;
+ stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
+ if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
+ stag_info.chunk_size = 1;
+ err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
+ true);
+ break;
+ }
+ default:
+ err = -EINVAL;
+ ibdev_dbg(&iwqp->iwdev->ibdev,
+ "VERBS: upost_send bad opcode = 0x%x\n",
+ ib_wr->opcode);
+ break;
+ }
+
+ if (err)
+ break;
+ ib_wr = ib_wr->next;
+ }
+
+ if (!iwqp->flush_issued) {
+ if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
+ irdma_uk_qp_post_wr(ukqp);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_post_srq_recv - post receive wr for kernel application
+ * @ibsrq: ib srq pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int irdma_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk *uksrq = &iwsrq->sc_srq.srq_uk;
+ struct irdma_post_rq_info post_recv = {};
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&iwsrq->lock, flags);
+ while (ib_wr) {
+ if (ib_wr->num_sge > uksrq->max_srq_frag_cnt) {
+ err = -EINVAL;
+ goto out;
+ }
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ post_recv.sg_list = ib_wr->sg_list;
+ err = irdma_uk_srq_post_receive(uksrq, &post_recv);
+ if (err)
+ goto out;
+
+ ib_wr = ib_wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&iwsrq->lock, flags);
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_post_recv - post receive wr for kernel application
+ * @ibqp: ib qp pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int irdma_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_qp_uk *ukqp;
+ struct irdma_post_rq_info post_recv = {};
+ unsigned long flags;
+ int err = 0;
+
+ iwqp = to_iwqp(ibqp);
+ ukqp = &iwqp->sc_qp.qp_uk;
+
+ if (ukqp->srq_uk) {
+ *bad_wr = ib_wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ post_recv.sg_list = ib_wr->sg_list;
+ err = irdma_uk_post_receive(ukqp, &post_recv);
+ if (err) {
+ ibdev_dbg(&iwqp->iwdev->ibdev,
+ "VERBS: post_recv err %d\n", err);
+ goto out;
+ }
+
+ ib_wr = ib_wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (iwqp->flush_issued)
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
+ * @opcode: iwarp flush code
+ */
+static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
+{
+ switch (opcode) {
+ case FLUSH_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case FLUSH_REM_ACCESS_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case FLUSH_LOC_QP_OP_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case FLUSH_REM_OP_ERR:
+ return IB_WC_REM_OP_ERR;
+ case FLUSH_LOC_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case FLUSH_GENERAL_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case FLUSH_RETRY_EXC_ERR:
+ return IB_WC_RETRY_EXC_ERR;
+ case FLUSH_MW_BIND_ERR:
+ return IB_WC_MW_BIND_ERR;
+ case FLUSH_REM_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case FLUSH_RNR_RETRY_EXC_ERR:
+ return IB_WC_RNR_RETRY_EXC_ERR;
+ case FLUSH_FATAL_ERR:
+ default:
+ return IB_WC_FATAL_ERR;
+ }
+}
+
+/**
+ * irdma_process_cqe - process cqe info
+ * @entry: processed cqe
+ * @cq_poll_info: cqe info
+ */
+static void irdma_process_cqe(struct ib_wc *entry,
+ struct irdma_cq_poll_info *cq_poll_info)
+{
+ struct irdma_sc_qp *qp;
+
+ entry->wc_flags = 0;
+ entry->pkey_index = 0;
+ entry->wr_id = cq_poll_info->wr_id;
+
+ qp = cq_poll_info->qp_handle;
+ entry->qp = qp->qp_uk.back_qp;
+
+ if (cq_poll_info->error) {
+ entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
+ irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
+
+ entry->vendor_err = cq_poll_info->major_err << 16 |
+ cq_poll_info->minor_err;
+ } else {
+ entry->status = IB_WC_SUCCESS;
+ if (cq_poll_info->imm_valid) {
+ entry->ex.imm_data = htonl(cq_poll_info->imm_data);
+ entry->wc_flags |= IB_WC_WITH_IMM;
+ }
+ if (cq_poll_info->ud_smac_valid) {
+ ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
+ entry->wc_flags |= IB_WC_WITH_SMAC;
+ }
+
+ if (cq_poll_info->ud_vlan_valid) {
+ u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+
+ entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
+ if (vlan) {
+ entry->vlan_id = vlan;
+ entry->wc_flags |= IB_WC_WITH_VLAN;
+ }
+ } else {
+ entry->sl = 0;
+ }
+ }
+
+ if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
+ set_ib_wc_op_sq(cq_poll_info, entry);
+ } else {
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ set_ib_wc_op_rq(cq_poll_info, entry,
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
+ true : false);
+ else
+ set_ib_wc_op_rq_gen_3(cq_poll_info, entry);
+ if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
+ cq_poll_info->stag_invalid_set) {
+ entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
+ entry->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+ }
+
+ if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
+ entry->src_qp = cq_poll_info->ud_src_qpn;
+ entry->slid = 0;
+ entry->wc_flags |=
+ (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
+ entry->network_hdr_type = cq_poll_info->ipv4 ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ } else {
+ entry->src_qp = cq_poll_info->qp_id;
+ }
+
+ entry->byte_len = cq_poll_info->bytes_xfered;
+}
+
+/**
+ * irdma_poll_one - poll one entry of the CQ
+ * @ukcq: ukcq to poll
+ * @cur_cqe: current CQE info to be filled in
+ * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
+ *
+ * Returns the internal irdma device error code or 0 on success
+ */
+static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
+ struct irdma_cq_poll_info *cur_cqe,
+ struct ib_wc *entry)
+{
+ int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
+
+ if (ret)
+ return ret;
+
+ irdma_process_cqe(entry, cur_cqe);
+
+ return 0;
+}
+
+/**
+ * __irdma_poll_cq - poll cq for completion (kernel apps)
+ * @iwcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of a completed entry
+ */
+static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
+{
+ struct list_head *tmp_node, *list_node;
+ struct irdma_cq_buf *last_buf = NULL;
+ struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
+ struct irdma_cq_buf *cq_buf;
+ int ret;
+ struct irdma_device *iwdev;
+ struct irdma_cq_uk *ukcq;
+ bool cq_new_cqe = false;
+ int resized_bufs = 0;
+ int npolled = 0;
+
+ iwdev = to_iwdev(iwcq->ibcq.device);
+ ukcq = &iwcq->sc_cq.cq_uk;
+
+ /* go through the list of previously resized CQ buffers */
+ list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
+ cq_buf = container_of(list_node, struct irdma_cq_buf, list);
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+ if (ret == -ENOENT)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == -EFAULT) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ /* save the resized CQ buffer which received the last cqe */
+ if (cq_new_cqe)
+ last_buf = cq_buf;
+ cq_new_cqe = false;
+ }
+
+ /* check the current CQ for new cqes */
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
+ if (ret == -ENOENT) {
+ ret = irdma_generated_cmpls(iwcq, cur_cqe);
+ if (!ret)
+ irdma_process_cqe(entry + npolled, cur_cqe);
+ }
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+
+ if (ret == -ENOENT)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == -EFAULT) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ if (cq_new_cqe)
+ /* all previous CQ resizes are complete */
+ resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
+ else if (last_buf)
+ /* only CQ resizes up to the last_buf are complete */
+ resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
+ if (resized_bufs)
+ /* report to the HW the number of complete CQ resizes */
+ irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
+
+ return npolled;
+error:
+ ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/**
+ * irdma_poll_cq - poll cq for completion (kernel apps)
+ * @ibcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of a completed entry
+ */
+static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *entry)
+{
+ struct irdma_cq *iwcq;
+ unsigned long flags;
+ int ret;
+
+ iwcq = to_iwcq(ibcq);
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ ret = __irdma_poll_cq(iwcq, num_entries, entry);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_req_notify_cq - arm cq kernel application
+ * @ibcq: cq to arm
+ * @notify_flags: notofication flags
+ */
+static int irdma_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct irdma_cq *iwcq;
+ struct irdma_cq_uk *ukcq;
+ unsigned long flags;
+ enum irdma_cmpl_notify cq_notify;
+ bool promo_event = false;
+ int ret = 0;
+
+ cq_notify = notify_flags == IB_CQ_SOLICITED ?
+ IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
+ iwcq = to_iwcq(ibcq);
+ ukcq = &iwcq->sc_cq.cq_uk;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+ if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+ promo_event = true;
+
+ if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
+ iwcq->last_notify = cq_notify;
+ irdma_uk_cq_request_notification(ukcq, cq_notify);
+ }
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
+ ret = 1;
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return ret;
+}
+
+static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
+ /* gen1 - 32-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
+ /* gen1 - 64-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts",
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "InRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "InRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "InRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "OutRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "OutRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "OutRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "RdmaBnd",
+ [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "RdmaInv",
+
+ /* gen2 - 32-bit */
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
+ /* gen2 - 64-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "RetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "InOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "InProtoErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "InSegs",
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "OutSegs",
+
+ /* gen3 */
+ [IRDMA_HW_STAT_INDEX_RNR_SENT].name = "RNR sent",
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD].name = "RNR received",
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT].name = "ord limit count",
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT].name = "ird limit count",
+ [IRDMA_HW_STAT_INDEX_RDMARXATS].name = "Rx atomics",
+ [IRDMA_HW_STAT_INDEX_RDMATXATS].name = "Tx atomics",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR].name = "Nak Sequence Error",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED].name = "Nak Sequence Error Implied",
+ [IRDMA_HW_STAT_INDEX_RTO].name = "RTO",
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS].name = "Rcvd Out of order packets",
+ [IRDMA_HW_STAT_INDEX_ICRCERR].name = "CRC errors",
+};
+
+static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
+{
+ struct irdma_device *iwdev = to_iwdev(dev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
+ irdma_fw_major_ver(&iwdev->rf->sc_dev),
+ irdma_fw_minor_ver(&iwdev->rf->sc_dev));
+}
+
+/**
+ * irdma_alloc_hw_port_stats - Allocate a hw stats structure
+ * @ibdev: device pointer from stack
+ * @port_num: port number
+ */
+static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+
+ int num_counters = dev->hw_attrs.max_stat_idx;
+ unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
+
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
+ lifespan);
+}
+
+/**
+ * irdma_get_hw_stats - Populates the rdma_hw_stats structure
+ * @ibdev: device pointer from stack
+ * @stats: stats pointer from stack
+ * @port_num: port number
+ * @index: which hw counter the stack is requesting we update
+ */
+static int irdma_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats, u32 port_num,
+ int index)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
+
+ if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
+ irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
+ else
+ irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
+
+ memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
+
+ return stats->num_counters;
+}
+
+/**
+ * irdma_query_gid - Query port GID
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: Entry index
+ * @gid: Global ID
+ */
+static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+
+ memset(gid->raw, 0, sizeof(gid->raw));
+ ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * mcast_list_add - Add a new mcast item to list
+ * @rf: RDMA PCI function
+ * @new_elem: pointer to element to add
+ */
+static void mcast_list_add(struct irdma_pci_f *rf,
+ struct mc_table_list *new_elem)
+{
+ list_add(&new_elem->list, &rf->mc_qht_list.list);
+}
+
+/**
+ * mcast_list_del - Remove an mcast item from list
+ * @mc_qht_elem: pointer to mcast table list element
+ */
+static void mcast_list_del(struct mc_table_list *mc_qht_elem)
+{
+ if (mc_qht_elem)
+ list_del(&mc_qht_elem->list);
+}
+
+/**
+ * mcast_list_lookup_ip - Search mcast list for address
+ * @rf: RDMA PCI function
+ * @ip_mcast: pointer to mcast IP address
+ */
+static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
+ u32 *ip_mcast)
+{
+ struct mc_table_list *mc_qht_el;
+ struct list_head *pos, *q;
+
+ list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
+ mc_qht_el = list_entry(pos, struct mc_table_list, list);
+ if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
+ sizeof(mc_qht_el->mc_info.dest_ip)))
+ return mc_qht_el;
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_mcast_cqp_op - perform a mcast cqp operation
+ * @iwdev: irdma device
+ * @mc_grp_ctx: mcast group info
+ * @op: operation
+ *
+ * returns error status
+ */
+static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
+ struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
+{
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cqp_request *cqp_request;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = op;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_mcast_mac - Get the multicast MAC for an IP address
+ * @ip_addr: IPv4 or IPv6 address
+ * @mac: pointer to result MAC address
+ * @ipv4: flag indicating IPv4 or IPv6
+ *
+ */
+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
+{
+ u8 *ip = (u8 *)ip_addr;
+
+ if (ipv4) {
+ unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
+ 0x00, 0x00};
+
+ mac4[3] = ip[2] & 0x7F;
+ mac4[4] = ip[1];
+ mac4[5] = ip[0];
+ ether_addr_copy(mac, mac4);
+ } else {
+ unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
+ 0x00, 0x00};
+
+ mac6[2] = ip[3];
+ mac6[3] = ip[2];
+ mac6[4] = ip[1];
+ mac6[5] = ip[0];
+ ether_addr_copy(mac, mac6);
+ }
+}
+
+/**
+ * irdma_attach_mcast - attach a qp to a multicast group
+ * @ibqp: ptr to qp
+ * @ibgid: pointer to global ID
+ * @lid: local ID
+ *
+ * returns error status
+ */
+static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct mc_table_list *mc_qht_elem;
+ struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
+ unsigned long flags;
+ u32 ip_addr[4] = {};
+ u32 mgn;
+ u32 no_mgs;
+ int ret = 0;
+ bool ipv4;
+ u16 vlan_id;
+ union irdma_sockaddr sgid_addr;
+ unsigned char dmac[ETH_ALEN];
+
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
+
+ if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
+ irdma_copy_ip_ntohl(ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
+ ipv4 = false;
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
+ ip_addr);
+ irdma_mcast_mac(ip_addr, dmac, false);
+ } else {
+ ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+ ipv4 = true;
+ vlan_id = irdma_get_vlan_ipv4(ip_addr);
+ irdma_mcast_mac(ip_addr, dmac, true);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
+ ibqp->qp_num, ip_addr, dmac);
+ }
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
+ if (!mc_qht_elem) {
+ struct irdma_dma_mem *dma_mem_mc;
+
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
+ if (!mc_qht_elem)
+ return -ENOMEM;
+
+ mc_qht_elem->mc_info.ipv4_valid = ipv4;
+ memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
+ sizeof(mc_qht_elem->mc_info.dest_ip));
+ ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
+ &mgn, &rf->next_mcg);
+ if (ret) {
+ kfree(mc_qht_elem);
+ return -ENOMEM;
+ }
+
+ mc_qht_elem->mc_info.mgn = mgn;
+ dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
+ dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
+ IRDMA_HW_PAGE_SIZE);
+ dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
+ dma_mem_mc->size,
+ &dma_mem_mc->pa,
+ GFP_KERNEL);
+ if (!dma_mem_mc->va) {
+ irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
+ kfree(mc_qht_elem);
+ return -ENOMEM;
+ }
+
+ mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
+ memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
+ sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
+ mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
+ mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
+ if (vlan_id < VLAN_N_VID)
+ mc_qht_elem->mc_grp_ctx.vlan_valid = true;
+ mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
+ mc_qht_elem->mc_grp_ctx.qs_handle =
+ iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
+ ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mcast_list_add(rf, mc_qht_elem);
+ } else {
+ if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
+ IRDMA_MAX_MGS_PER_CTX) {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ mcg_info.qp_id = iwqp->ibqp.qp_num;
+ no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
+ irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+
+ /* Only if there is a change do we need to modify or create */
+ if (!no_mgs) {
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_CREATE);
+ } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_MODIFY);
+ } else {
+ return 0;
+ }
+
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ mcast_list_del(mc_qht_elem);
+ dma_free_coherent(rf->hw.device,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
+ irdma_free_rsrc(rf, rf->allocated_mcgs,
+ mc_qht_elem->mc_grp_ctx.mg_id);
+ kfree(mc_qht_elem);
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_detach_mcast - detach a qp from a multicast group
+ * @ibqp: ptr to qp
+ * @ibgid: pointer to global ID
+ * @lid: local ID
+ *
+ * returns error status
+ */
+static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ u32 ip_addr[4] = {};
+ struct mc_table_list *mc_qht_elem;
+ struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
+ int ret;
+ unsigned long flags;
+ union irdma_sockaddr sgid_addr;
+
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
+ if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
+ irdma_copy_ip_ntohl(ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ else
+ ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
+ if (!mc_qht_elem) {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: address not found MCG\n");
+ return 0;
+ }
+
+ mcg_info.qp_id = iwqp->ibqp.qp_num;
+ irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ mcast_list_del(mc_qht_elem);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_DESTROY);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: failed MC_DESTROY MCG\n");
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mcast_list_add(rf, mc_qht_elem);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ return -EAGAIN;
+ }
+
+ dma_free_coherent(rf->hw.device,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
+ irdma_free_rsrc(rf, rf->allocated_mcgs,
+ mc_qht_elem->mc_grp_ctx.mg_id);
+ kfree(mc_qht_elem);
+ } else {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_MODIFY);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: failed Modify MCG\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+ int err;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
+ &rf->next_ah);
+ if (err)
+ return err;
+
+ err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
+ irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
+
+ if (err) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
+ goto err_ah_create;
+ }
+
+ if (!sleep) {
+ int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
+
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ mdelay(1);
+ } while (!ah->sc_ah.ah_info.ah_valid && --cnt);
+
+ if (!cnt) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
+ err = -ETIMEDOUT;
+ goto err_ah_create;
+ }
+ }
+ return 0;
+
+err_ah_create:
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
+
+ return err;
+}
+
+static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
+{
+ struct irdma_pd *pd = to_iwpd(ibah->pd);
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct rdma_ah_attr *ah_attr = attr->ah_attr;
+ const struct ib_gid_attr *sgid_attr;
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_sc_ah *sc_ah;
+ struct irdma_ah_info *ah_info;
+ union irdma_sockaddr sgid_addr, dgid_addr;
+ int err;
+ u8 dmac[ETH_ALEN];
+
+ ah->pd = pd;
+ sc_ah = &ah->sc_ah;
+ sc_ah->ah_info.vsi = &iwdev->vsi;
+ irdma_sc_init_ah(&rf->sc_dev, sc_ah);
+ ah->sgid_index = ah_attr->grh.sgid_index;
+ sgid_attr = ah_attr->grh.sgid_attr;
+ memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
+ ah->av.attrs = *ah_attr;
+ ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
+ ah_info = &sc_ah->ah_info;
+ ah_info->pd_idx = pd->sc_pd.pd_id;
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ ah_info->flow_label = ah_attr->grh.flow_label;
+ ah_info->hop_ttl = ah_attr->grh.hop_limit;
+ ah_info->tc_tos = ah_attr->grh.traffic_class;
+ }
+
+ ether_addr_copy(dmac, ah_attr->roce.dmac);
+ if (ah->av.net_type == RDMA_NETWORK_IPV4) {
+ ah_info->ipv4_valid = true;
+ ah_info->dest_ip_addr[0] =
+ ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
+ ah_info->src_ip_addr[0] =
+ ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+ ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
+ ah_info->dest_ip_addr[0]);
+ if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
+ ah_info->do_lpbk = true;
+ irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
+ }
+ } else {
+ irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
+ dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(ah_info->src_ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
+ ah_info->dest_ip_addr);
+ if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
+ ah_info->do_lpbk = true;
+ irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
+ }
+ }
+
+ err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
+ ah_info->mac_addr);
+ if (err)
+ return err;
+
+ ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
+ ah_info->ipv4_valid, dmac);
+
+ if (ah_info->dst_arpindex == -1)
+ return -EINVAL;
+
+ if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ ah_info->vlan_tag = 0;
+
+ if (ah_info->vlan_tag < VLAN_N_VID) {
+ u8 prio = rt_tos2priority(ah_info->tc_tos);
+
+ prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
+
+ ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
+ ah_info->insert_vlan_tag = true;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_ah_exists - Check for existing identical AH
+ * @iwdev: irdma device
+ * @new_ah: AH to check for
+ *
+ * returns true if AH is found, false if not found.
+ */
+static bool irdma_ah_exists(struct irdma_device *iwdev,
+ struct irdma_ah *new_ah)
+{
+ struct irdma_ah *ah;
+ u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[3];
+
+ hash_for_each_possible(iwdev->rf->ah_hash_tbl, ah, list, key) {
+ /* Set ah_valid and ah_id the same so memcmp can work */
+ new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
+ new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
+ if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
+ sizeof(ah->sc_ah.ah_info))) {
+ refcount_inc(&ah->refcnt);
+ new_ah->parent_ah = ah;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * irdma_destroy_ah - Destroy address handle
+ * @ibah: pointer to address handle
+ * @ah_flags: flags for sleepable
+ */
+static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
+{
+ struct irdma_device *iwdev = to_iwdev(ibah->device);
+ struct irdma_ah *ah = to_iwah(ibah);
+
+ if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
+ if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
+ return 0;
+ }
+ hash_del(&ah->parent_ah->list);
+ kfree(ah->parent_ah);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
+ }
+
+ irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
+ false, NULL, ah);
+
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
+ ah->sc_ah.ah_info.ah_idx);
+
+ return 0;
+}
+
+/**
+ * irdma_create_user_ah - create user address handle
+ * @ibah: address handle
+ * @attr: address handle attributes
+ * @udata: User data
+ *
+ * returns 0 on success, error otherwise
+ */
+static int irdma_create_user_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ struct irdma_create_ah_resp uresp;
+ struct irdma_ah *parent_ah;
+ int err;
+
+ if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
+ return -EINVAL;
+
+ err = irdma_setup_ah(ibah, attr);
+ if (err)
+ return err;
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
+ if (!irdma_ah_exists(iwdev, ah)) {
+ err = irdma_create_hw_ah(iwdev, ah, true);
+ if (err) {
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
+ return err;
+ }
+ /* Add new AH to list */
+ parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
+ if (parent_ah) {
+ u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[3];
+
+ ah->parent_ah = parent_ah;
+ hash_add(iwdev->rf->ah_hash_tbl, &parent_ah->list, key);
+ refcount_set(&parent_ah->refcnt, 1);
+ }
+ }
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
+
+ uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
+ err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
+ if (err)
+ irdma_destroy_ah(ibah, attr->flags);
+
+ return err;
+}
+
+/**
+ * irdma_create_ah - create address handle
+ * @ibah: address handle
+ * @attr: address handle attributes
+ * @udata: NULL
+ *
+ * returns 0 on success, error otherwise
+ */
+static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ int err;
+
+ err = irdma_setup_ah(ibah, attr);
+ if (err)
+ return err;
+ err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+
+ return err;
+}
+
+/**
+ * irdma_query_ah - Query address handle
+ * @ibah: pointer to address handle
+ * @ah_attr: address handle attributes
+ */
+static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct irdma_ah *ah = to_iwah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ if (ah->av.attrs.ah_flags & IB_AH_GRH) {
+ ah_attr->ah_flags = IB_AH_GRH;
+ ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
+ ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
+ ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
+ ah_attr->grh.sgid_index = ah->sgid_index;
+ memcpy(&ah_attr->grh.dgid, &ah->dgid,
+ sizeof(ah_attr->grh.dgid));
+ }
+
+ return 0;
+}
+
+static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static const struct ib_device_ops irdma_gen1_dev_ops = {
+ .dealloc_driver = irdma_ib_dealloc_device,
+};
+
+static const struct ib_device_ops irdma_gen3_dev_ops = {
+ .alloc_mw = irdma_alloc_mw,
+ .create_srq = irdma_create_srq,
+ .dealloc_mw = irdma_dealloc_mw,
+ .destroy_srq = irdma_destroy_srq,
+ .modify_srq = irdma_modify_srq,
+ .post_srq_recv = irdma_post_srq_recv,
+ .query_srq = irdma_query_srq,
+};
+
+static const struct ib_device_ops irdma_roce_dev_ops = {
+ .attach_mcast = irdma_attach_mcast,
+ .create_ah = irdma_create_ah,
+ .create_user_ah = irdma_create_user_ah,
+ .destroy_ah = irdma_destroy_ah,
+ .detach_mcast = irdma_detach_mcast,
+ .get_link_layer = irdma_get_link_layer,
+ .get_port_immutable = irdma_roce_port_immutable,
+ .modify_qp = irdma_modify_qp_roce,
+ .query_ah = irdma_query_ah,
+ .query_pkey = irdma_query_pkey,
+};
+
+static const struct ib_device_ops irdma_iw_dev_ops = {
+ .get_port_immutable = irdma_iw_port_immutable,
+ .iw_accept = irdma_accept,
+ .iw_add_ref = irdma_qp_add_ref,
+ .iw_connect = irdma_connect,
+ .iw_create_listen = irdma_create_listen,
+ .iw_destroy_listen = irdma_destroy_listen,
+ .iw_get_qp = irdma_get_qp,
+ .iw_reject = irdma_reject,
+ .iw_rem_ref = irdma_qp_rem_ref,
+ .modify_qp = irdma_modify_qp,
+ .query_gid = irdma_query_gid,
+};
+
+static const struct ib_device_ops irdma_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_IRDMA,
+ .uverbs_abi_ver = IRDMA_ABI_VER,
+
+ .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
+ .alloc_mr = irdma_alloc_mr,
+ .alloc_pd = irdma_alloc_pd,
+ .alloc_ucontext = irdma_alloc_ucontext,
+ .create_cq = irdma_create_cq,
+ .create_qp = irdma_create_qp,
+ .dealloc_driver = irdma_ib_dealloc_device,
+ .dealloc_mw = irdma_dealloc_mw,
+ .dealloc_pd = irdma_dealloc_pd,
+ .dealloc_ucontext = irdma_dealloc_ucontext,
+ .dereg_mr = irdma_dereg_mr,
+ .destroy_cq = irdma_destroy_cq,
+ .destroy_qp = irdma_destroy_qp,
+ .disassociate_ucontext = irdma_disassociate_ucontext,
+ .get_dev_fw_str = irdma_get_dev_fw_str,
+ .get_dma_mr = irdma_get_dma_mr,
+ .get_hw_stats = irdma_get_hw_stats,
+ .map_mr_sg = irdma_map_mr_sg,
+ .mmap = irdma_mmap,
+ .mmap_free = irdma_mmap_free,
+ .poll_cq = irdma_poll_cq,
+ .post_recv = irdma_post_recv,
+ .post_send = irdma_post_send,
+ .query_device = irdma_query_device,
+ .query_port = irdma_query_port,
+ .query_qp = irdma_query_qp,
+ .reg_user_mr = irdma_reg_user_mr,
+ .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
+ .rereg_user_mr = irdma_rereg_user_mr,
+ .req_notify_cq = irdma_req_notify_cq,
+ .resize_cq = irdma_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
+ INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, irdma_srq, ibsrq),
+};
+
+/**
+ * irdma_init_roce_device - initialization of roce rdma device
+ * @iwdev: irdma device
+ */
+static void irdma_init_roce_device(struct irdma_device *iwdev)
+{
+ iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ iwdev->netdev->dev_addr);
+ ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
+}
+
+/**
+ * irdma_init_iw_device - initialization of iwarp rdma device
+ * @iwdev: irdma device
+ */
+static void irdma_init_iw_device(struct irdma_device *iwdev)
+{
+ struct net_device *netdev = iwdev->netdev;
+
+ iwdev->ibdev.node_type = RDMA_NODE_RNIC;
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ netdev->dev_addr);
+ memcpy(iwdev->ibdev.iw_ifname, netdev->name,
+ sizeof(iwdev->ibdev.iw_ifname));
+ ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
+}
+
+/**
+ * irdma_init_rdma_device - initialization of rdma device
+ * @iwdev: irdma device
+ */
+static void irdma_init_rdma_device(struct irdma_device *iwdev)
+{
+ struct pci_dev *pcidev = iwdev->rf->pcidev;
+
+ if (iwdev->roce_mode)
+ irdma_init_roce_device(iwdev);
+ else
+ irdma_init_iw_device(iwdev);
+
+ iwdev->ibdev.phys_port_cnt = 1;
+ iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
+ iwdev->ibdev.dev.parent = &pcidev->dev;
+ ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
+ if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen1_dev_ops);
+ if (iwdev->rf->rdma_ver >= IRDMA_GEN_3)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen3_dev_ops);
+}
+
+/**
+ * irdma_port_ibevent - indicate port event
+ * @iwdev: irdma device
+ */
+void irdma_port_ibevent(struct irdma_device *iwdev)
+{
+ struct ib_event event;
+
+ event.device = &iwdev->ibdev;
+ event.element.port_num = 1;
+ event.event =
+ iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+}
+
+/**
+ * irdma_ib_unregister_device - unregister rdma device from IB
+ * core
+ * @iwdev: irdma device
+ */
+void irdma_ib_unregister_device(struct irdma_device *iwdev)
+{
+ iwdev->iw_status = 0;
+ irdma_port_ibevent(iwdev);
+ ib_unregister_device(&iwdev->ibdev);
+}
+
+/**
+ * irdma_ib_register_device - register irdma device to IB core
+ * @iwdev: irdma device
+ */
+int irdma_ib_register_device(struct irdma_device *iwdev)
+{
+ int ret;
+
+ irdma_init_rdma_device(iwdev);
+
+ ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
+ if (ret)
+ goto error;
+ dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
+ ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
+ if (ret)
+ goto error;
+
+ iwdev->iw_status = 1;
+ irdma_port_ibevent(iwdev);
+
+ return 0;
+
+error:
+ if (ret)
+ ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
+
+ return ret;
+}
+
+/**
+ * irdma_ib_dealloc_device
+ * @ibdev: ib device
+ *
+ * callback from ibdev dealloc_driver to deallocate resources
+ * unber irdma device
+ */
+void irdma_ib_dealloc_device(struct ib_device *ibdev)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+
+ irdma_rt_deinit_hw(iwdev);
+ if (!iwdev->is_vport) {
+ irdma_ctrl_deinit_hw(iwdev->rf);
+ if (iwdev->rf->vchnl_wq)
+ destroy_workqueue(iwdev->rf->vchnl_wq);
+ }
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
new file mode 100644
index 000000000000..ed21c1b56e8e
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_VERBS_H
+#define IRDMA_VERBS_H
+
+#define IRDMA_MAX_SAVED_PHY_PGADDR 4
+#define IRDMA_FLUSH_DELAY_MS 20
+
+#define IRDMA_PKEY_TBL_SZ 1
+#define IRDMA_DEFAULT_PKEY 0xFFFF
+#define IRDMA_SHADOW_PGCNT 1
+
+struct irdma_ucontext {
+ struct ib_ucontext ibucontext;
+ struct irdma_device *iwdev;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ struct list_head cq_reg_mem_list;
+ spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
+ struct list_head qp_reg_mem_list;
+ spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
+ struct list_head srq_reg_mem_list;
+ spinlock_t srq_reg_mem_list_lock; /* protect SRQ memory list */
+ int abi_ver;
+ u8 legacy_mode : 1;
+ u8 use_raw_attrs : 1;
+};
+
+struct irdma_pd {
+ struct ib_pd ibpd;
+ struct irdma_sc_pd sc_pd;
+};
+
+union irdma_sockaddr {
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+};
+
+struct irdma_av {
+ u8 macaddr[16];
+ struct rdma_ah_attr attrs;
+ union irdma_sockaddr sgid_addr;
+ union irdma_sockaddr dgid_addr;
+ u8 net_type;
+};
+
+struct irdma_ah {
+ struct ib_ah ibah;
+ struct irdma_sc_ah sc_ah;
+ struct irdma_pd *pd;
+ struct irdma_av av;
+ u8 sgid_index;
+ union ib_gid dgid;
+ struct hlist_node list;
+ refcount_t refcnt;
+ struct irdma_ah *parent_ah; /* AH from cached list */
+};
+
+struct irdma_hmc_pble {
+ union {
+ u32 idx;
+ dma_addr_t addr;
+ };
+};
+
+struct irdma_cq_mr {
+ struct irdma_hmc_pble cq_pbl;
+ dma_addr_t shadow;
+ bool split;
+};
+
+struct irdma_srq_mr {
+ struct irdma_hmc_pble srq_pbl;
+ dma_addr_t shadow;
+};
+
+struct irdma_qp_mr {
+ struct irdma_hmc_pble sq_pbl;
+ struct irdma_hmc_pble rq_pbl;
+ dma_addr_t shadow;
+ dma_addr_t rq_pa;
+ struct page *sq_page;
+};
+
+struct irdma_cq_buf {
+ struct irdma_dma_mem kmem_buf;
+ struct irdma_cq_uk cq_uk;
+ struct irdma_hw *hw;
+ struct list_head list;
+ struct work_struct work;
+};
+
+struct irdma_pbl {
+ struct list_head list;
+ union {
+ struct irdma_qp_mr qp_mr;
+ struct irdma_cq_mr cq_mr;
+ struct irdma_srq_mr srq_mr;
+ };
+
+ bool pbl_allocated:1;
+ bool on_list:1;
+ u64 user_base;
+ struct irdma_pble_alloc pble_alloc;
+ struct irdma_mr *iwmr;
+};
+
+struct irdma_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+ struct ib_umem *region;
+ int access;
+ u8 is_hwreg;
+ u16 type;
+ u32 page_cnt;
+ u64 page_size;
+ u32 npages;
+ u32 stag;
+ u64 len;
+ u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
+ struct irdma_pbl iwpbl;
+};
+
+struct irdma_srq {
+ struct ib_srq ibsrq;
+ struct irdma_sc_srq sc_srq __aligned(64);
+ struct irdma_dma_mem kmem;
+ u64 *srq_wrid_mem;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll srq */
+ struct irdma_pbl *iwpbl;
+ struct irdma_sge *sg_list;
+ u16 srq_head;
+ u32 srq_num;
+ u32 max_wr;
+ bool user_mode:1;
+};
+
+struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+ u16 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ struct completion free_cq;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll cq */
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head cmpl_generated;
+};
+
+struct irdma_cmpl_gen {
+ struct list_head list;
+ struct irdma_cq_poll_info cpi;
+};
+
+struct disconn_work {
+ struct work_struct work;
+ struct irdma_qp *iwqp;
+};
+
+struct iw_cm_id;
+
+struct irdma_qp_kmode {
+ struct irdma_dma_mem dma_mem;
+ struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
+ u64 *rq_wrid_mem;
+};
+
+struct irdma_qp {
+ struct ib_qp ibqp;
+ struct irdma_sc_qp sc_qp;
+ struct irdma_device *iwdev;
+ struct irdma_cq *iwscq;
+ struct irdma_cq *iwrcq;
+ struct irdma_pd *iwpd;
+ struct rdma_user_mmap_entry *push_wqe_mmap_entry;
+ struct rdma_user_mmap_entry *push_db_mmap_entry;
+ struct irdma_qp_host_ctx_info ctx_info;
+ union {
+ struct irdma_iwarp_offload_info iwarp_info;
+ struct irdma_roce_offload_info roce_info;
+ };
+
+ union {
+ struct irdma_tcp_offload_info tcp_info;
+ struct irdma_udp_offload_info udp_info;
+ };
+
+ struct irdma_ah roce_ah;
+ struct list_head teardown_entry;
+ refcount_t refcnt;
+ struct iw_cm_id *cm_id;
+ struct irdma_cm_node *cm_node;
+ struct delayed_work dwork_flush;
+ struct ib_mr *lsmm_mr;
+ atomic_t hw_mod_qp_pend;
+ enum ib_qp_state ibqp_state;
+ u32 qp_mem_size;
+ u32 last_aeq;
+ int max_send_wr;
+ int max_recv_wr;
+ atomic_t close_timer_started;
+ spinlock_t lock; /* serialize posting WRs to SQ/RQ */
+ struct irdma_qp_context *iwqp_context;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ u8 active_conn : 1;
+ u8 user_mode : 1;
+ u8 hte_added : 1;
+ u8 flush_issued : 1;
+ u8 sig_all : 1;
+ u8 pau_mode : 1;
+ u8 suspend_pending : 1;
+ u8 rsvd : 1;
+ u8 iwarp_state;
+ u16 term_sq_flush_code;
+ u16 term_rq_flush_code;
+ u8 hw_iwarp_state;
+ u8 hw_tcp_state;
+ struct irdma_qp_kmode kqp;
+ struct irdma_dma_mem host_ctx;
+ struct timer_list terminate_timer;
+ struct irdma_pbl *iwpbl;
+ struct irdma_dma_mem q2_ctx_mem;
+ struct irdma_dma_mem ietf_mem;
+ struct completion free_qp;
+ wait_queue_head_t waitq;
+ wait_queue_head_t mod_qp_waitq;
+ u8 rts_ae_rcvd;
+};
+
+enum irdma_mmap_flag {
+ IRDMA_MMAP_IO_NC,
+ IRDMA_MMAP_IO_WC,
+};
+
+struct irdma_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 bar_offset;
+ u8 mmap_flag;
+};
+
+static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
+{
+ return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+}
+
+static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
+{
+ return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+}
+
+static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry)
+{
+ switch (cq_poll_info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_FAST_REG_NSMR:
+ entry->opcode = IB_WC_REG_MR;
+ break;
+ case IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP:
+ entry->opcode = IB_WC_COMP_SWAP;
+ break;
+ case IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD:
+ entry->opcode = IB_WC_FETCH_ADD;
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IB_WC_LOCAL_INV;
+ break;
+ default:
+ entry->status = IB_WC_GENERAL_ERR;
+ }
+}
+
+static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info,
+ struct ib_wc *entry)
+{
+ switch (info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
+static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry, bool send_imm_support)
+{
+ /**
+ * iWARP does not support sendImm, so the presence of Imm data
+ * must be WriteImm.
+ */
+ if (!send_imm_support) {
+ entry->opcode = cq_poll_info->imm_valid ?
+ IB_WC_RECV_RDMA_WITH_IMM :
+ IB_WC_RECV;
+ return;
+ }
+
+ switch (cq_poll_info->op_type) {
+ case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+ case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
+int irdma_ib_register_device(struct irdma_device *iwdev);
+void irdma_ib_unregister_device(struct irdma_device *iwdev);
+void irdma_ib_dealloc_device(struct ib_device *ibdev);
+void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
+void irdma_generate_flush_completions(struct irdma_qp *iwqp);
+void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
+int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
+#endif /* IRDMA_VERBS_H */
diff --git a/drivers/infiniband/hw/irdma/virtchnl.c b/drivers/infiniband/hw/irdma/virtchnl.c
new file mode 100644
index 000000000000..16ad27247527
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "virtchnl.h"
+#include "ws.h"
+#include "i40iw_hw.h"
+#include "ig3rdma_hw.h"
+
+struct vchnl_reg_map_elem {
+ u16 reg_id;
+ u16 reg_idx;
+ bool pg_rel;
+};
+
+struct vchnl_regfld_map_elem {
+ u16 regfld_id;
+ u16 regfld_idx;
+};
+
+static struct vchnl_reg_map_elem vchnl_reg_map[] = {
+ {IRDMA_VCHNL_REG_ID_CQPTAIL, IRDMA_CQPTAIL, false},
+ {IRDMA_VCHNL_REG_ID_CQPDB, IRDMA_CQPDB, false},
+ {IRDMA_VCHNL_REG_ID_CCQPSTATUS, IRDMA_CCQPSTATUS, false},
+ {IRDMA_VCHNL_REG_ID_CCQPHIGH, IRDMA_CCQPHIGH, false},
+ {IRDMA_VCHNL_REG_ID_CCQPLOW, IRDMA_CCQPLOW, false},
+ {IRDMA_VCHNL_REG_ID_CQARM, IRDMA_CQARM, false},
+ {IRDMA_VCHNL_REG_ID_CQACK, IRDMA_CQACK, false},
+ {IRDMA_VCHNL_REG_ID_AEQALLOC, IRDMA_AEQALLOC, false},
+ {IRDMA_VCHNL_REG_ID_CQPERRCODES, IRDMA_CQPERRCODES, false},
+ {IRDMA_VCHNL_REG_ID_WQEALLOC, IRDMA_WQEALLOC, false},
+ {IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET, IRDMA_DB_ADDR_OFFSET, false },
+ {IRDMA_VCHNL_REG_ID_DYN_CTL, IRDMA_GLINT_DYN_CTL, false },
+ {IRDMA_VCHNL_REG_INV_ID, IRDMA_VCHNL_REG_INV_ID, false }
+};
+
+static struct vchnl_regfld_map_elem vchnl_regfld_map[] = {
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR, IRDMA_CCQPSTATUS_CCQP_ERR_M},
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE, IRDMA_CCQPSTATUS_CCQP_DONE_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID, IRDMA_CQPSQ_STAG_PDID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID, IRDMA_CQPSQ_CQ_CEQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID, IRDMA_CQPSQ_CQ_CQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT, IRDMA_COMMIT_FPM_CQCNT_M},
+ {IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID, IRDMA_CQPSQ_UPESD_HMCFNID_M},
+ {IRDMA_VCHNL_REGFLD_INV_ID, IRDMA_VCHNL_REGFLD_INV_ID}
+};
+
+#define IRDMA_VCHNL_REG_COUNT ARRAY_SIZE(vchnl_reg_map)
+#define IRDMA_VCHNL_REGFLD_COUNT ARRAY_SIZE(vchnl_regfld_map)
+#define IRDMA_VCHNL_REGFLD_BUF_SIZE \
+ (IRDMA_VCHNL_REG_COUNT * sizeof(struct irdma_vchnl_reg_info) + \
+ IRDMA_VCHNL_REGFLD_COUNT * sizeof(struct irdma_vchnl_reg_field_info))
+#define IRDMA_REGMAP_RESP_BUF_SIZE (IRDMA_VCHNL_RESP_MIN_SIZE + IRDMA_VCHNL_REGFLD_BUF_SIZE)
+
+/**
+ * irdma_sc_vchnl_init - Initialize dev virtchannel and get hw_rev
+ * @dev: dev structure to update
+ * @info: virtchannel info parameters to fill into the dev structure
+ */
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info)
+{
+ dev->vchnl_up = true;
+ dev->privileged = info->privileged;
+ dev->is_pf = info->is_pf;
+ dev->hw_attrs.uk_attrs.hw_rev = info->hw_rev;
+
+ if (!dev->privileged) {
+ int ret = irdma_vchnl_req_get_ver(dev, IRDMA_VCHNL_CHNL_VER_MAX,
+ &dev->vchnl_ver);
+
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Channel version ret = %d, version is %u\n",
+ ret, dev->vchnl_ver);
+
+ if (ret)
+ return ret;
+
+ ret = irdma_vchnl_req_get_caps(dev);
+ if (ret)
+ return ret;
+
+ dev->hw_attrs.uk_attrs.hw_rev = dev->vc_caps.hw_rev;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_verify_resp - Verify requested response size
+ * @vchnl_req: vchnl message requested
+ * @resp_len: response length sent from vchnl peer
+ */
+static int irdma_vchnl_req_verify_resp(struct irdma_vchnl_req *vchnl_req,
+ u16 resp_len)
+{
+ switch (vchnl_req->vchnl_msg->op_code) {
+ case IRDMA_VCHNL_OP_GET_VER:
+ case IRDMA_VCHNL_OP_GET_HMC_FCN:
+ case IRDMA_VCHNL_OP_PUT_HMC_FCN:
+ if (resp_len != vchnl_req->parm_len)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_RDMA_CAPS:
+ if (resp_len < IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_REG_LAYOUT:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP:
+ case IRDMA_VCHNL_OP_ADD_VPORT:
+ case IRDMA_VCHNL_OP_DEL_VPORT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void irdma_free_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req)
+{
+ kfree(vchnl_req->vchnl_msg);
+}
+
+static int irdma_alloc_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req,
+ struct irdma_vchnl_req_init_info *info)
+{
+ struct irdma_vchnl_op_buf *vchnl_msg;
+
+ vchnl_msg = kzalloc(IRDMA_VCHNL_MAX_MSG_SIZE, GFP_KERNEL);
+
+ if (!vchnl_msg)
+ return -ENOMEM;
+
+ vchnl_msg->op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->buf_len = sizeof(*vchnl_msg) + info->req_parm_len;
+ if (info->req_parm_len)
+ memcpy(vchnl_msg->buf, info->req_parm, info->req_parm_len);
+ vchnl_msg->op_code = info->op_code;
+ vchnl_msg->op_ver = info->op_ver;
+
+ vchnl_req->vchnl_msg = vchnl_msg;
+ vchnl_req->parm = info->resp_parm;
+ vchnl_req->parm_len = info->resp_parm_len;
+
+ return 0;
+}
+
+static int irdma_vchnl_req_send_sync(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req_init_info *info)
+{
+ u16 resp_len = sizeof(dev->vc_recv_buf);
+ struct irdma_vchnl_req vchnl_req = {};
+ u16 msg_len;
+ u8 *msg;
+ int ret;
+
+ ret = irdma_alloc_vchnl_req_msg(&vchnl_req, info);
+ if (ret)
+ return ret;
+
+ msg_len = vchnl_req.vchnl_msg->buf_len;
+ msg = (u8 *)vchnl_req.vchnl_msg;
+
+ mutex_lock(&dev->vchnl_mutex);
+ ret = ig3rdma_vchnl_send_sync(dev, msg, msg_len, dev->vc_recv_buf,
+ &resp_len);
+ dev->vc_recv_len = resp_len;
+ if (ret)
+ goto exit;
+
+ ret = irdma_vchnl_req_get_resp(dev, &vchnl_req);
+exit:
+ mutex_unlock(&dev->vchnl_mutex);
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: virtual channel send %s caller: %pS ret=%d op=%u op_ver=%u req_len=%u parm_len=%u resp_len=%u\n",
+ !ret ? "SUCCEEDS" : "FAILS", __builtin_return_address(0),
+ ret, vchnl_req.vchnl_msg->op_code,
+ vchnl_req.vchnl_msg->op_ver, vchnl_req.vchnl_msg->buf_len,
+ vchnl_req.parm_len, vchnl_req.resp_len);
+ irdma_free_vchnl_req_msg(&vchnl_req);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_reg_layout - Get Register Layout
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev)
+{
+ u16 reg_idx, reg_id, tmp_reg_id, regfld_idx, regfld_id, tmp_regfld_id;
+ struct irdma_vchnl_reg_field_info *regfld_array = NULL;
+ u8 resp_buffer[IRDMA_REGMAP_RESP_BUF_SIZE] = {};
+ struct vchnl_regfld_map_elem *regfld_map_array;
+ struct irdma_vchnl_req_init_info info = {};
+ struct vchnl_reg_map_elem *reg_map_array;
+ struct irdma_vchnl_reg_info *reg_array;
+ u8 num_bits, shift_cnt;
+ u16 buf_len = 0;
+ u64 bitmask;
+ u32 rindex;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_REG_LAYOUT;
+ info.op_ver = IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0;
+ info.resp_parm = resp_buffer;
+ info.resp_parm_len = sizeof(resp_buffer);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ /* parse the response buffer and update reg info*/
+ /* Parse registers till invalid */
+ /* Parse register fields till invalid */
+ reg_array = (struct irdma_vchnl_reg_info *)resp_buffer;
+ for (rindex = 0; rindex < IRDMA_VCHNL_REG_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_info);
+ if (buf_len >= sizeof(resp_buffer))
+ return -ENOMEM;
+
+ regfld_array =
+ (struct irdma_vchnl_reg_field_info *)&reg_array[rindex + 1];
+ reg_id = reg_array[rindex].reg_id;
+ if (reg_id == IRDMA_VCHNL_REG_INV_ID)
+ break;
+
+ reg_id &= ~IRDMA_VCHNL_REG_PAGE_REL;
+ if (reg_id >= IRDMA_VCHNL_REG_COUNT)
+ return -EINVAL;
+
+ /* search regmap for register index in hw_regs.*/
+ reg_map_array = vchnl_reg_map;
+ do {
+ tmp_reg_id = reg_map_array->reg_id;
+ if (tmp_reg_id == reg_id)
+ break;
+
+ reg_map_array++;
+ } while (tmp_reg_id != IRDMA_VCHNL_REG_INV_ID);
+ if (tmp_reg_id != reg_id)
+ continue;
+
+ reg_idx = reg_map_array->reg_idx;
+
+ /* Page relative, DB Offset do not need bar offset */
+ if (reg_idx == IRDMA_DB_ADDR_OFFSET ||
+ (reg_array[rindex].reg_id & IRDMA_VCHNL_REG_PAGE_REL)) {
+ dev->hw_regs[reg_idx] =
+ (u32 __iomem *)(uintptr_t)reg_array[rindex].reg_offset;
+ continue;
+ }
+
+ /* Update the local HW struct */
+ dev->hw_regs[reg_idx] = ig3rdma_get_reg_addr(dev->hw,
+ reg_array[rindex].reg_offset);
+ if (!dev->hw_regs[reg_idx])
+ return -EINVAL;
+ }
+
+ if (!regfld_array)
+ return -ENOMEM;
+
+ /* set up doorbell variables using mapped DB page */
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+
+ for (rindex = 0; rindex < IRDMA_VCHNL_REGFLD_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_field_info);
+ if ((buf_len - 1) > sizeof(resp_buffer))
+ break;
+
+ if (regfld_array[rindex].fld_id == IRDMA_VCHNL_REGFLD_INV_ID)
+ break;
+
+ regfld_id = regfld_array[rindex].fld_id;
+ regfld_map_array = vchnl_regfld_map;
+ do {
+ tmp_regfld_id = regfld_map_array->regfld_id;
+ if (tmp_regfld_id == regfld_id)
+ break;
+
+ regfld_map_array++;
+ } while (tmp_regfld_id != IRDMA_VCHNL_REGFLD_INV_ID);
+
+ if (tmp_regfld_id != regfld_id)
+ continue;
+
+ regfld_idx = regfld_map_array->regfld_idx;
+
+ num_bits = regfld_array[rindex].fld_bits;
+ shift_cnt = regfld_array[rindex].fld_shift;
+ if ((num_bits + shift_cnt > 64) || !num_bits) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: Invalid field mask id %d bits %d shift %d",
+ regfld_id, num_bits, shift_cnt);
+
+ continue;
+ }
+
+ bitmask = (1ULL << num_bits) - 1;
+ dev->hw_masks[regfld_idx] = bitmask << shift_cnt;
+ dev->hw_shifts[regfld_idx] = shift_cnt;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos)
+{
+ struct irdma_vchnl_resp_vport_info resp_vport = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+ struct irdma_vchnl_req_init_info info = { 0 };
+ int ret, i;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_ADD_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_ADD_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+ info.resp_parm = &resp_vport;
+ info.resp_parm_len = sizeof(resp_vport);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ qos[i].qs_handle = resp_vport.qs_handle[i];
+ qos[i].valid = true;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id, u32 qp1_id)
+{
+ struct irdma_vchnl_req_init_info info = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_DEL_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_DEL_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_aeq_vec_map - Map AEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = 1;
+ qv = qvl->qv_info;
+
+ qv->ceq_idx = IRDMA_Q_INVALID_IDX;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_ceq_vec_map - Map CEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @ceq_id: CEQ index
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = num_vectors;
+ qv = qvl->qv_info;
+
+ qv->aeq_idx = IRDMA_Q_INVALID_IDX;
+ qv->ceq_idx = ceq_id;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_ver - Request Channel version
+ * @dev: RDMA device pointer
+ * @ver_req: Virtual channel version requested
+ * @ver_res: Virtual channel version response
+ */
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req, u32 *ver_res)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_VER;
+ info.op_ver = ver_req;
+ info.resp_parm = ver_res;
+ info.resp_parm_len = sizeof(*ver_res);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ if (*ver_res < IRDMA_VCHNL_CHNL_VER_MIN) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: %s unsupported vchnl version 0x%0x\n",
+ __func__, *ver_res);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_hmc_fcn - Request VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_hmc_info req_hmc = {};
+ struct irdma_vchnl_resp_hmc_info resp_hmc = {};
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_HMC_FCN;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info.op_ver = IRDMA_VCHNL_OP_GET_HMC_FCN_V2;
+ req_hmc.protocol_used = dev->protocol_used;
+ info.req_parm_len = sizeof(req_hmc);
+ info.req_parm = &req_hmc;
+ info.resp_parm = &resp_hmc;
+ info.resp_parm_len = sizeof(resp_hmc);
+ }
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ int i;
+
+ dev->hmc_fn_id = resp_hmc.hmc_func;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ dev->qos[i].qs_handle = resp_hmc.qs_handle[i];
+ dev->qos[i].valid = true;
+ }
+ }
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_put_hmc_fcn - Free VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_PUT_HMC_FCN;
+ info.op_ver = IRDMA_VCHNL_OP_PUT_HMC_FCN_V0;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_get_caps - Request RDMA capabilities
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_RDMA_CAPS;
+ info.op_ver = IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0;
+ info.resp_parm = &dev->vc_caps;
+ info.resp_parm_len = sizeof(dev->vc_caps);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->vc_caps.hw_rev > IRDMA_GEN_MAX ||
+ dev->vc_caps.hw_rev < IRDMA_GEN_2) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: %s unsupported hw_rev version 0x%0x\n",
+ __func__, dev->vc_caps.hw_rev);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_resp - Receive the inbound vchnl response.
+ * @dev: Dev pointer
+ * @vchnl_req: Vchannel request
+ */
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vchnl_req)
+{
+ struct irdma_vchnl_resp_buf *vchnl_msg_resp =
+ (struct irdma_vchnl_resp_buf *)dev->vc_recv_buf;
+ u16 resp_len;
+ int ret;
+
+ if ((uintptr_t)vchnl_req != (uintptr_t)vchnl_msg_resp->op_ctx) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: error vchnl context value does not match\n");
+ return -EBADMSG;
+ }
+
+ resp_len = dev->vc_recv_len - sizeof(*vchnl_msg_resp);
+ resp_len = min(resp_len, vchnl_req->parm_len);
+
+ ret = irdma_vchnl_req_verify_resp(vchnl_req, resp_len);
+ if (ret)
+ return ret;
+
+ ret = (int)vchnl_msg_resp->op_ret;
+ if (ret)
+ return ret;
+
+ vchnl_req->resp_len = 0;
+ if (vchnl_req->parm_len && vchnl_req->parm && resp_len) {
+ memcpy(vchnl_req->parm, vchnl_msg_resp->buf, resp_len);
+ vchnl_req->resp_len = resp_len;
+ ibdev_dbg(to_ibdev(dev), "VIRT: Got response, data size %u\n",
+ resp_len);
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/virtchnl.h b/drivers/infiniband/hw/irdma/virtchnl.h
new file mode 100644
index 000000000000..aa955a9125bd
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+#ifndef IRDMA_VIRTCHNL_H
+#define IRDMA_VIRTCHNL_H
+
+#include "hmc.h"
+#include "irdma.h"
+
+/* IRDMA_VCHNL_CHNL_VER_V0 is for legacy hw, no longer supported. */
+#define IRDMA_VCHNL_CHNL_VER_V2 2
+#define IRDMA_VCHNL_CHNL_VER_MIN IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_CHNL_VER_MAX IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V1 1
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V2 2
+#define IRDMA_VCHNL_OP_PUT_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP_V0 0
+#define IRDMA_VCHNL_OP_ADD_VPORT_V0 0
+#define IRDMA_VCHNL_OP_DEL_VPORT_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE 1
+
+#define IRDMA_VCHNL_REG_ID_CQPTAIL 0
+#define IRDMA_VCHNL_REG_ID_CQPDB 1
+#define IRDMA_VCHNL_REG_ID_CCQPSTATUS 2
+#define IRDMA_VCHNL_REG_ID_CCQPHIGH 3
+#define IRDMA_VCHNL_REG_ID_CCQPLOW 4
+#define IRDMA_VCHNL_REG_ID_CQARM 5
+#define IRDMA_VCHNL_REG_ID_CQACK 6
+#define IRDMA_VCHNL_REG_ID_AEQALLOC 7
+#define IRDMA_VCHNL_REG_ID_CQPERRCODES 8
+#define IRDMA_VCHNL_REG_ID_WQEALLOC 9
+#define IRDMA_VCHNL_REG_ID_IPCONFIG0 10
+#define IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET 11
+#define IRDMA_VCHNL_REG_ID_DYN_CTL 12
+#define IRDMA_VCHNL_REG_ID_AEQITRMASK 13
+#define IRDMA_VCHNL_REG_ID_CEQITRMASK 14
+#define IRDMA_VCHNL_REG_INV_ID 0xFFFF
+#define IRDMA_VCHNL_REG_PAGE_REL 0x8000
+
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR 2
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE 5
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID 6
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID 7
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID 8
+#define IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT 9
+#define IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID 10
+#define IRDMA_VCHNL_REGFLD_INV_ID 0xFFFF
+
+#define IRDMA_VCHNL_RESP_MIN_SIZE (sizeof(struct irdma_vchnl_resp_buf))
+
+enum irdma_vchnl_ops {
+ IRDMA_VCHNL_OP_GET_VER = 0,
+ IRDMA_VCHNL_OP_GET_HMC_FCN = 1,
+ IRDMA_VCHNL_OP_PUT_HMC_FCN = 2,
+ IRDMA_VCHNL_OP_GET_REG_LAYOUT = 11,
+ IRDMA_VCHNL_OP_GET_RDMA_CAPS = 13,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP = 14,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP = 15,
+ IRDMA_VCHNL_OP_ADD_VPORT = 16,
+ IRDMA_VCHNL_OP_DEL_VPORT = 17,
+};
+
+struct irdma_vchnl_req_hmc_info {
+ u8 protocol_used;
+ u8 disable_qos;
+} __packed;
+
+struct irdma_vchnl_resp_hmc_info {
+ u16 hmc_func;
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+} __packed;
+
+struct irdma_vchnl_qv_info {
+ u32 v_idx;
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct irdma_vchnl_qvlist_info {
+ u32 num_vectors;
+ struct irdma_vchnl_qv_info qv_info[];
+};
+
+struct irdma_vchnl_req_vport_info {
+ u16 vport_id;
+ u32 qp1_id;
+};
+
+struct irdma_vchnl_resp_vport_info {
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+};
+
+struct irdma_vchnl_op_buf {
+ u16 op_code;
+ u16 op_ver;
+ u16 buf_len;
+ u16 rsvd;
+ u64 op_ctx;
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_resp_buf {
+ u64 op_ctx;
+ u16 buf_len;
+ s16 op_ret;
+ u16 rsvd[2];
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_rdma_caps {
+ u8 hw_rev;
+ u16 cqp_timeout_s;
+ u16 cqp_def_timeout_s;
+ u16 max_hw_push_len;
+} __packed;
+
+struct irdma_vchnl_init_info {
+ struct workqueue_struct *vchnl_wq;
+ enum irdma_vers hw_rev;
+ bool privileged;
+ bool is_pf;
+};
+
+struct irdma_vchnl_reg_info {
+ u32 reg_offset;
+ u16 field_cnt;
+ u16 reg_id; /* High bit of reg_id: bar or page relative */
+};
+
+struct irdma_vchnl_reg_field_info {
+ u8 fld_shift;
+ u8 fld_bits;
+ u16 fld_id;
+};
+
+struct irdma_vchnl_req {
+ struct irdma_vchnl_op_buf *vchnl_msg;
+ void *parm;
+ u32 vf_id;
+ u16 parm_len;
+ u16 resp_len;
+};
+
+struct irdma_vchnl_req_init_info {
+ void *req_parm;
+ void *resp_parm;
+ u16 req_parm_len;
+ u16 resp_parm_len;
+ u16 op_code;
+ u16 op_ver;
+} __packed;
+
+struct irdma_qos;
+
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info);
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req,
+ u32 *ver_res);
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vc_req);
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx);
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id,
+ u32 v_idx);
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos);
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id);
+#endif /* IRDMA_VIRTCHNL_H */
diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
new file mode 100644
index 000000000000..542bc0b1bb03
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ws.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+
+#include "ws.h"
+
+/**
+ * irdma_alloc_node - Allocate a WS node and init
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ * @node_type: Type of node, leaf or parent
+ * @parent: parent node pointer
+ */
+static struct irdma_ws_node *irdma_alloc_node(struct irdma_sc_vsi *vsi,
+ u8 user_pri,
+ enum irdma_ws_node_type node_type,
+ struct irdma_ws_node *parent)
+{
+ struct irdma_virt_mem ws_mem;
+ struct irdma_ws_node *node;
+ u16 node_index = 0;
+
+ ws_mem.size = sizeof(struct irdma_ws_node);
+ ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
+ if (!ws_mem.va)
+ return NULL;
+
+ if (parent) {
+ node_index = irdma_alloc_ws_node_id(vsi->dev);
+ if (node_index == IRDMA_WS_NODE_INVALID) {
+ kfree(ws_mem.va);
+ return NULL;
+ }
+ }
+
+ node = ws_mem.va;
+ node->index = node_index;
+ node->vsi_index = vsi->vsi_idx;
+ INIT_LIST_HEAD(&node->child_list_head);
+ if (node_type == WS_NODE_TYPE_LEAF) {
+ node->type_leaf = true;
+ node->traffic_class = vsi->qos[user_pri].traffic_class;
+ node->user_pri = user_pri;
+ node->rel_bw = vsi->qos[user_pri].rel_bw;
+ if (!node->rel_bw)
+ node->rel_bw = 1;
+
+ node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle;
+ node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
+ } else {
+ node->rel_bw = 1;
+ node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
+ node->enable = true;
+ }
+
+ node->parent = parent;
+
+ return node;
+}
+
+/**
+ * irdma_free_node - Free a WS node
+ * @vsi: VSI stricture of device
+ * @node: Pointer to node to free
+ */
+static void irdma_free_node(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *node)
+{
+ struct irdma_virt_mem ws_mem;
+
+ if (node->index)
+ irdma_free_ws_node_id(vsi->dev, node->index);
+
+ ws_mem.va = node;
+ ws_mem.size = sizeof(struct irdma_ws_node);
+ kfree(ws_mem.va);
+}
+
+/**
+ * irdma_ws_cqp_cmd - Post CQP work scheduler node cmd
+ * @vsi: vsi pointer
+ * @node: pointer to node
+ * @cmd: add, remove or modify
+ */
+static int irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *node, u8 cmd)
+{
+ struct irdma_ws_node_info node_info = {};
+
+ node_info.id = node->index;
+ node_info.vsi = node->vsi_index;
+ if (node->parent)
+ node_info.parent_id = node->parent->index;
+ else
+ node_info.parent_id = node_info.id;
+
+ node_info.weight = node->rel_bw;
+ node_info.tc = node->traffic_class;
+ node_info.prio_type = node->prio_type;
+ node_info.type_leaf = node->type_leaf;
+ node_info.enable = node->enable;
+ if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) {
+ ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n");
+ return -ENOMEM;
+ }
+
+ if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
+ node->qs_handle = node_info.qs_handle;
+ vsi->qos[node->user_pri].qs_handle = node_info.qs_handle;
+ }
+
+ return 0;
+}
+
+/**
+ * ws_find_node - Find SC WS node based on VSI id or TC
+ * @parent: parent node of First VSI or TC node
+ * @match_val: value to match
+ * @type: match type VSI/TC
+ */
+static struct irdma_ws_node *ws_find_node(struct irdma_ws_node *parent,
+ u16 match_val,
+ enum irdma_ws_match_type type)
+{
+ struct irdma_ws_node *node;
+
+ switch (type) {
+ case WS_MATCH_TYPE_VSI:
+ list_for_each_entry(node, &parent->child_list_head, siblings) {
+ if (node->vsi_index == match_val)
+ return node;
+ }
+ break;
+ case WS_MATCH_TYPE_TC:
+ list_for_each_entry(node, &parent->child_list_head, siblings) {
+ if (node->traffic_class == match_val)
+ return node;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_tc_in_use - Checks to see if a leaf node is in use
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+static bool irdma_tc_in_use(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ int i;
+
+ mutex_lock(&vsi->qos[user_pri].qos_mutex);
+ if (!list_empty(&vsi->qos[user_pri].qplist)) {
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ return true;
+ }
+
+ /* Check if the traffic class associated with the given user priority
+ * is in use by any other user priority. If so, nothing left to do
+ */
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->qos[i].traffic_class == vsi->qos[user_pri].traffic_class &&
+ !list_empty(&vsi->qos[i].qplist)) {
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ return true;
+ }
+ }
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+
+ return false;
+}
+
+/**
+ * irdma_remove_leaf - Remove leaf node unconditionally
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ struct irdma_ws_node *ws_tree_root, *vsi_node, *tc_node;
+ int i;
+ u16 traffic_class;
+
+ traffic_class = vsi->qos[user_pri].traffic_class;
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ if (vsi->qos[i].traffic_class == traffic_class)
+ vsi->qos[i].valid = false;
+
+ ws_tree_root = vsi->dev->ws_tree_root;
+ if (!ws_tree_root)
+ return;
+
+ vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
+ WS_MATCH_TYPE_VSI);
+ if (!vsi_node)
+ return;
+
+ tc_node = ws_find_node(vsi_node,
+ vsi->qos[user_pri].traffic_class,
+ WS_MATCH_TYPE_TC);
+ if (!tc_node)
+ return;
+
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ vsi->unregister_qset(vsi, tc_node);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
+ /* Check if VSI node can be freed */
+ if (list_empty(&vsi_node->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&vsi_node->siblings);
+ irdma_free_node(vsi, vsi_node);
+ /* Free head node there are no remaining VSI nodes */
+ if (list_empty(&ws_tree_root->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, ws_tree_root,
+ IRDMA_OP_WS_DELETE_NODE);
+ irdma_free_node(vsi, ws_tree_root);
+ vsi->dev->ws_tree_root = NULL;
+ }
+ }
+}
+
+/**
+ * irdma_ws_add - Build work scheduler tree, set RDMA qs_handle
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ struct irdma_ws_node *ws_tree_root;
+ struct irdma_ws_node *vsi_node;
+ struct irdma_ws_node *tc_node;
+ u16 traffic_class;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&vsi->dev->ws_mutex);
+ if (vsi->tc_change_pending) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (vsi->qos[user_pri].valid)
+ goto exit;
+
+ ws_tree_root = vsi->dev->ws_tree_root;
+ if (!ws_tree_root) {
+ ibdev_dbg(to_ibdev(vsi->dev), "WS: Creating root node\n");
+ ws_tree_root = irdma_alloc_node(vsi, user_pri,
+ WS_NODE_TYPE_PARENT, NULL);
+ if (!ws_tree_root) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, ws_tree_root);
+ goto exit;
+ }
+
+ vsi->dev->ws_tree_root = ws_tree_root;
+ }
+
+ /* Find a second tier node that matches the VSI */
+ vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
+ WS_MATCH_TYPE_VSI);
+
+ /* If VSI node doesn't exist, add one */
+ if (!vsi_node) {
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Node not found matching VSI %d\n",
+ vsi->vsi_idx);
+ vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT,
+ ws_tree_root);
+ if (!vsi_node) {
+ ret = -ENOMEM;
+ goto vsi_add_err;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, vsi_node);
+ goto vsi_add_err;
+ }
+
+ list_add(&vsi_node->siblings, &ws_tree_root->child_list_head);
+ }
+
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Using node %d which represents VSI %d\n",
+ vsi_node->index, vsi->vsi_idx);
+ traffic_class = vsi->qos[user_pri].traffic_class;
+ tc_node = ws_find_node(vsi_node, traffic_class,
+ WS_MATCH_TYPE_TC);
+ if (!tc_node) {
+ /* Add leaf node */
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Node not found matching VSI %d and TC %d\n",
+ vsi->vsi_idx, traffic_class);
+ tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
+ vsi_node);
+ if (!tc_node) {
+ ret = -ENOMEM;
+ goto leaf_add_err;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, tc_node);
+ goto leaf_add_err;
+ }
+
+ list_add(&tc_node->siblings, &vsi_node->child_list_head);
+ /*
+ * callback to LAN to update the LAN tree with our node
+ */
+ ret = vsi->register_qset(vsi, tc_node);
+ if (ret)
+ goto reg_err;
+
+ tc_node->enable = true;
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
+ if (ret) {
+ vsi->unregister_qset(vsi, tc_node);
+ goto reg_err;
+ }
+ }
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Using node %d which represents VSI %d TC %d\n",
+ tc_node->index, vsi->vsi_idx, traffic_class);
+ /*
+ * Iterate through other UPs and update the QS handle if they have
+ * a matching traffic class.
+ */
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->qos[i].traffic_class == traffic_class) {
+ vsi->qos[i].qs_handle = tc_node->qs_handle;
+ vsi->qos[i].lan_qos_handle = tc_node->lan_qs_handle;
+ vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id;
+ vsi->qos[i].valid = true;
+ }
+ }
+ goto exit;
+
+reg_err:
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
+leaf_add_err:
+ if (list_empty(&vsi_node->child_list_head)) {
+ if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
+ goto exit;
+ list_del(&vsi_node->siblings);
+ irdma_free_node(vsi, vsi_node);
+ }
+
+vsi_add_err:
+ /* Free head node there are no remaining VSI nodes */
+ if (list_empty(&ws_tree_root->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE);
+ vsi->dev->ws_tree_root = NULL;
+ irdma_free_node(vsi, ws_tree_root);
+ }
+
+exit:
+ mutex_unlock(&vsi->dev->ws_mutex);
+ return ret;
+}
+
+/**
+ * irdma_ws_remove - Free WS scheduler node, update WS tree
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ mutex_lock(&vsi->dev->ws_mutex);
+ if (irdma_tc_in_use(vsi, user_pri))
+ goto exit;
+ irdma_remove_leaf(vsi, user_pri);
+exit:
+ mutex_unlock(&vsi->dev->ws_mutex);
+}
+
+/**
+ * irdma_ws_reset - Reset entire WS tree
+ * @vsi: vsi pointer
+ */
+void irdma_ws_reset(struct irdma_sc_vsi *vsi)
+{
+ u8 i;
+
+ mutex_lock(&vsi->dev->ws_mutex);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; ++i)
+ irdma_remove_leaf(vsi, i);
+ mutex_unlock(&vsi->dev->ws_mutex);
+}
diff --git a/drivers/infiniband/hw/irdma/ws.h b/drivers/infiniband/hw/irdma/ws.h
new file mode 100644
index 000000000000..45490031a389
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ws.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_WS_H
+#define IRDMA_WS_H
+
+#include "osdep.h"
+
+enum irdma_ws_node_type {
+ WS_NODE_TYPE_PARENT,
+ WS_NODE_TYPE_LEAF,
+};
+
+enum irdma_ws_match_type {
+ WS_MATCH_TYPE_VSI,
+ WS_MATCH_TYPE_TC,
+};
+
+struct irdma_ws_node {
+ struct list_head siblings;
+ struct list_head child_list_head;
+ struct irdma_ws_node *parent;
+ u64 lan_qs_handle; /* opaque handle used by LAN */
+ u32 l2_sched_node_id;
+ u16 index;
+ u16 qs_handle;
+ u16 vsi_index;
+ u8 traffic_class;
+ u8 user_pri;
+ u8 rel_bw;
+ u8 abstraction_layer; /* used for splitting a TC */
+ u8 prio_type;
+ bool type_leaf:1;
+ bool enable:1;
+};
+
+struct irdma_sc_vsi;
+int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
+void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri);
+void irdma_ws_reset(struct irdma_sc_vsi *vsi);
+
+#endif /* IRDMA_WS_H */
diff --git a/drivers/infiniband/hw/mana/Kconfig b/drivers/infiniband/hw/mana/Kconfig
new file mode 100644
index 000000000000..546640657bac
--- /dev/null
+++ b/drivers/infiniband/hw/mana/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MANA_INFINIBAND
+ tristate "Microsoft Azure Network Adapter support"
+ depends on NETDEVICES && ETHERNET && PCI && MICROSOFT_MANA
+ help
+ This driver provides low-level RDMA support for Microsoft Azure
+ Network Adapter (MANA). MANA supports RDMA features that can be used
+ for workloads (e.g. DPDK, MPI etc) that uses RDMA verbs to directly
+ access hardware from user-mode processes in Microsoft Azure cloud
+ environment.
diff --git a/drivers/infiniband/hw/mana/Makefile b/drivers/infiniband/hw/mana/Makefile
new file mode 100644
index 000000000000..921c05e08b11
--- /dev/null
+++ b/drivers/infiniband/hw/mana/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MANA_INFINIBAND) += mana_ib.o
+
+mana_ib-y := device.o main.o wq.o qp.o cq.o mr.o ah.o wr.o counters.o
diff --git a/drivers/infiniband/hw/mana/ah.c b/drivers/infiniband/hw/mana/ah.c
new file mode 100644
index 000000000000..f56952eebbaa
--- /dev/null
+++ b/drivers/infiniband/hw/mana/ah.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+int mana_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev = container_of(ibah->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_ah *ah = container_of(ibah, struct mana_ib_ah, ibah);
+ struct rdma_ah_attr *ah_attr = attr->ah_attr;
+ const struct ib_global_route *grh;
+ enum rdma_network_type ntype;
+
+ if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE ||
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+
+ if (udata)
+ return -EINVAL;
+
+ ah->av = dma_pool_zalloc(mdev->av_pool, GFP_ATOMIC, &ah->dma_handle);
+ if (!ah->av)
+ return -ENOMEM;
+
+ grh = rdma_ah_read_grh(ah_attr);
+ ntype = rdma_gid_attr_network_type(grh->sgid_attr);
+
+ copy_in_reverse(ah->av->dest_mac, ah_attr->roce.dmac, ETH_ALEN);
+ ah->av->udp_src_port = rdma_flow_label_to_udp_sport(grh->flow_label);
+ ah->av->hop_limit = grh->hop_limit;
+ ah->av->dscp = (grh->traffic_class >> 2) & 0x3f;
+ ah->av->is_ipv6 = (ntype == RDMA_NETWORK_IPV6);
+
+ if (ah->av->is_ipv6) {
+ copy_in_reverse(ah->av->dest_ip, grh->dgid.raw, 16);
+ copy_in_reverse(ah->av->src_ip, grh->sgid_attr->gid.raw, 16);
+ } else {
+ ah->av->dest_ip[10] = 0xFF;
+ ah->av->dest_ip[11] = 0xFF;
+ copy_in_reverse(&ah->av->dest_ip[12], &grh->dgid.raw[12], 4);
+ copy_in_reverse(&ah->av->src_ip[12], &grh->sgid_attr->gid.raw[12], 4);
+ }
+
+ return 0;
+}
+
+int mana_ib_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct mana_ib_dev *mdev = container_of(ibah->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_ah *ah = container_of(ibah, struct mana_ib_ah, ibah);
+
+ dma_pool_free(mdev->av_pool, ah->av, ah->dma_handle);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/counters.c b/drivers/infiniband/hw/mana/counters.c
new file mode 100644
index 000000000000..e964e74be48d
--- /dev/null
+++ b/drivers/infiniband/hw/mana/counters.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#include "counters.h"
+
+static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
+ [MANA_IB_REQUESTER_TIMEOUT].name = "requester_timeout",
+ [MANA_IB_REQUESTER_OOS_NAK].name = "requester_oos_nak",
+ [MANA_IB_REQUESTER_RNR_NAK].name = "requester_rnr_nak",
+ [MANA_IB_RESPONDER_RNR_NAK].name = "responder_rnr_nak",
+ [MANA_IB_RESPONDER_OOS].name = "responder_oos",
+ [MANA_IB_RESPONDER_DUP_REQUEST].name = "responder_dup_request",
+ [MANA_IB_REQUESTER_IMPLICIT_NAK].name = "requester_implicit_nak",
+ [MANA_IB_REQUESTER_READRESP_PSN_MISMATCH].name = "requester_readresp_psn_mismatch",
+ [MANA_IB_NAK_INV_REQ].name = "nak_inv_req",
+ [MANA_IB_NAK_ACCESS_ERR].name = "nak_access_error",
+ [MANA_IB_NAK_OPP_ERR].name = "nak_opp_error",
+ [MANA_IB_NAK_INV_READ].name = "nak_inv_read",
+ [MANA_IB_RESPONDER_LOCAL_LEN_ERR].name = "responder_local_len_error",
+ [MANA_IB_REQUESTOR_LOCAL_PROT_ERR].name = "requestor_local_prot_error",
+ [MANA_IB_RESPONDER_REM_ACCESS_ERR].name = "responder_rem_access_error",
+ [MANA_IB_RESPONDER_LOCAL_QP_ERR].name = "responder_local_qp_error",
+ [MANA_IB_RESPONDER_MALFORMED_WQE].name = "responder_malformed_wqe",
+ [MANA_IB_GENERAL_HW_ERR].name = "general_hw_error",
+ [MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED].name = "requester_rnr_nak_retries_exceeded",
+ [MANA_IB_REQUESTER_RETRIES_EXCEEDED].name = "requester_retries_exceeded",
+ [MANA_IB_TOTAL_FATAL_ERR].name = "total_fatal_error",
+ [MANA_IB_RECEIVED_CNPS].name = "received_cnps",
+ [MANA_IB_NUM_QPS_CONGESTED].name = "num_qps_congested",
+ [MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events",
+ [MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered",
+ [MANA_IB_CURRENT_RATE].name = "current_rate",
+ [MANA_IB_DUP_RX_REQ].name = "dup_rx_requests",
+ [MANA_IB_TX_BYTES].name = "tx_bytes",
+ [MANA_IB_RX_BYTES].name = "rx_bytes",
+ [MANA_IB_RX_SEND_REQ].name = "rx_send_requests",
+ [MANA_IB_RX_WRITE_REQ].name = "rx_write_requests",
+ [MANA_IB_RX_READ_REQ].name = "rx_read_requests",
+ [MANA_IB_TX_PKT].name = "tx_packets",
+ [MANA_IB_RX_PKT].name = "rx_packets",
+};
+
+static const struct rdma_stat_desc mana_ib_device_stats_desc[] = {
+ [MANA_IB_SENT_CNPS].name = "sent_cnps",
+ [MANA_IB_RECEIVED_ECNS].name = "received_ecns",
+ [MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count",
+ [MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events",
+ [MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events",
+ [MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events",
+};
+
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc,
+ ARRAY_SIZE(mana_ib_device_stats_desc),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return rdma_alloc_hw_stats_struct(mana_ib_port_stats_desc,
+ ARRAY_SIZE(mana_ib_port_stats_desc),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
+ ib_dev);
+ struct mana_rnic_query_device_cntrs_resp resp = {};
+ struct mana_rnic_query_device_cntrs_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS,
+ sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
+ sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d",
+ err);
+ return err;
+ }
+
+ stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps;
+ stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns;
+ stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count;
+ stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events;
+ stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events;
+ stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events;
+
+ return ARRAY_SIZE(mana_ib_device_stats_desc);
+}
+
+static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
+ ib_dev);
+ struct mana_rnic_query_vf_cntrs_resp resp = {};
+ struct mana_rnic_query_vf_cntrs_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS,
+ sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
+ sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to query vf counters err %d",
+ err);
+ return err;
+ }
+
+ stats->value[MANA_IB_REQUESTER_TIMEOUT] = resp.requester_timeout;
+ stats->value[MANA_IB_REQUESTER_OOS_NAK] = resp.requester_oos_nak;
+ stats->value[MANA_IB_REQUESTER_RNR_NAK] = resp.requester_rnr_nak;
+ stats->value[MANA_IB_RESPONDER_RNR_NAK] = resp.responder_rnr_nak;
+ stats->value[MANA_IB_RESPONDER_OOS] = resp.responder_oos;
+ stats->value[MANA_IB_RESPONDER_DUP_REQUEST] = resp.responder_dup_request;
+ stats->value[MANA_IB_REQUESTER_IMPLICIT_NAK] =
+ resp.requester_implicit_nak;
+ stats->value[MANA_IB_REQUESTER_READRESP_PSN_MISMATCH] =
+ resp.requester_readresp_psn_mismatch;
+ stats->value[MANA_IB_NAK_INV_REQ] = resp.nak_inv_req;
+ stats->value[MANA_IB_NAK_ACCESS_ERR] = resp.nak_access_err;
+ stats->value[MANA_IB_NAK_OPP_ERR] = resp.nak_opp_err;
+ stats->value[MANA_IB_NAK_INV_READ] = resp.nak_inv_read;
+ stats->value[MANA_IB_RESPONDER_LOCAL_LEN_ERR] =
+ resp.responder_local_len_err;
+ stats->value[MANA_IB_REQUESTOR_LOCAL_PROT_ERR] =
+ resp.requestor_local_prot_err;
+ stats->value[MANA_IB_RESPONDER_REM_ACCESS_ERR] =
+ resp.responder_rem_access_err;
+ stats->value[MANA_IB_RESPONDER_LOCAL_QP_ERR] =
+ resp.responder_local_qp_err;
+ stats->value[MANA_IB_RESPONDER_MALFORMED_WQE] =
+ resp.responder_malformed_wqe;
+ stats->value[MANA_IB_GENERAL_HW_ERR] = resp.general_hw_err;
+ stats->value[MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED] =
+ resp.requester_rnr_nak_retries_exceeded;
+ stats->value[MANA_IB_REQUESTER_RETRIES_EXCEEDED] =
+ resp.requester_retries_exceeded;
+ stats->value[MANA_IB_TOTAL_FATAL_ERR] = resp.total_fatal_err;
+
+ stats->value[MANA_IB_RECEIVED_CNPS] = resp.received_cnps;
+ stats->value[MANA_IB_NUM_QPS_CONGESTED] = resp.num_qps_congested;
+ stats->value[MANA_IB_RATE_INC_EVENTS] = resp.rate_inc_events;
+ stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered;
+ stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate;
+
+ stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req;
+ stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes;
+ stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes;
+ stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req;
+ stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req;
+ stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req;
+ stats->value[MANA_IB_TX_PKT] = resp.tx_pkt;
+ stats->value[MANA_IB_RX_PKT] = resp.rx_pkt;
+
+ return ARRAY_SIZE(mana_ib_port_stats_desc);
+}
+
+int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ if (!port_num)
+ return mana_ib_get_hw_device_stats(ibdev, stats);
+ else
+ return mana_ib_get_hw_port_stats(ibdev, stats, port_num);
+}
diff --git a/drivers/infiniband/hw/mana/counters.h b/drivers/infiniband/hw/mana/counters.h
new file mode 100644
index 000000000000..f68e776bb41d
--- /dev/null
+++ b/drivers/infiniband/hw/mana/counters.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Microsoft Corporation. All rights reserved.
+ */
+
+#ifndef _COUNTERS_H_
+#define _COUNTERS_H_
+
+#include "mana_ib.h"
+
+enum mana_ib_port_counters {
+ MANA_IB_REQUESTER_TIMEOUT,
+ MANA_IB_REQUESTER_OOS_NAK,
+ MANA_IB_REQUESTER_RNR_NAK,
+ MANA_IB_RESPONDER_RNR_NAK,
+ MANA_IB_RESPONDER_OOS,
+ MANA_IB_RESPONDER_DUP_REQUEST,
+ MANA_IB_REQUESTER_IMPLICIT_NAK,
+ MANA_IB_REQUESTER_READRESP_PSN_MISMATCH,
+ MANA_IB_NAK_INV_REQ,
+ MANA_IB_NAK_ACCESS_ERR,
+ MANA_IB_NAK_OPP_ERR,
+ MANA_IB_NAK_INV_READ,
+ MANA_IB_RESPONDER_LOCAL_LEN_ERR,
+ MANA_IB_REQUESTOR_LOCAL_PROT_ERR,
+ MANA_IB_RESPONDER_REM_ACCESS_ERR,
+ MANA_IB_RESPONDER_LOCAL_QP_ERR,
+ MANA_IB_RESPONDER_MALFORMED_WQE,
+ MANA_IB_GENERAL_HW_ERR,
+ MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED,
+ MANA_IB_REQUESTER_RETRIES_EXCEEDED,
+ MANA_IB_TOTAL_FATAL_ERR,
+ MANA_IB_RECEIVED_CNPS,
+ MANA_IB_NUM_QPS_CONGESTED,
+ MANA_IB_RATE_INC_EVENTS,
+ MANA_IB_NUM_QPS_RECOVERED,
+ MANA_IB_CURRENT_RATE,
+ MANA_IB_DUP_RX_REQ,
+ MANA_IB_TX_BYTES,
+ MANA_IB_RX_BYTES,
+ MANA_IB_RX_SEND_REQ,
+ MANA_IB_RX_WRITE_REQ,
+ MANA_IB_RX_READ_REQ,
+ MANA_IB_TX_PKT,
+ MANA_IB_RX_PKT,
+};
+
+enum mana_ib_device_counters {
+ MANA_IB_SENT_CNPS,
+ MANA_IB_RECEIVED_ECNS,
+ MANA_IB_RECEIVED_CNP_COUNT,
+ MANA_IB_QP_CONGESTED_EVENTS,
+ MANA_IB_QP_RECOVERED_EVENTS,
+ MANA_IB_DEV_RATE_INC_EVENTS,
+};
+
+struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num);
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev);
+int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index);
+#endif /* _COUNTERS_H_ */
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
new file mode 100644
index 000000000000..1becc8779123
--- /dev/null
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct mana_ib_create_cq_resp resp = {};
+ struct mana_ib_ucontext *mana_ucontext;
+ struct ib_device *ibdev = ibcq->device;
+ struct mana_ib_create_cq ucmd = {};
+ struct mana_ib_dev *mdev;
+ bool is_rnic_cq;
+ u32 doorbell;
+ u32 buf_size;
+ int err;
+
+ mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
+ cq->cq_handle = INVALID_MANA_HANDLE;
+
+ if (udata) {
+ if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to copy from udata for create cq, %d\n", err);
+ return err;
+ }
+
+ is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
+
+ if ((!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) ||
+ attr->cqe > U32_MAX / COMP_ENTRY_SIZE) {
+ ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
+ return -EINVAL;
+ }
+
+ cq->cqe = attr->cqe;
+ err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
+ &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
+ return err;
+ }
+
+ mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
+ ibucontext);
+ doorbell = mana_ucontext->doorbell;
+ } else {
+ is_rnic_cq = true;
+ buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe * COMP_ENTRY_SIZE));
+ cq->cqe = buf_size / COMP_ENTRY_SIZE;
+ err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
+ return err;
+ }
+ doorbell = mdev->gdma_dev->doorbell;
+ }
+
+ if (is_rnic_cq) {
+ err = mana_ib_gd_create_cq(mdev, cq, doorbell);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
+ goto err_destroy_queue;
+ }
+
+ err = mana_ib_install_cq_cb(mdev, cq);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
+ goto err_destroy_rnic_cq;
+ }
+ }
+
+ if (udata) {
+ resp.cqid = cq->queue.id;
+ err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
+ goto err_remove_cq_cb;
+ }
+ }
+
+ spin_lock_init(&cq->cq_lock);
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
+
+ return 0;
+
+err_remove_cq_cb:
+ mana_ib_remove_cq_cb(mdev, cq);
+err_destroy_rnic_cq:
+ mana_ib_gd_destroy_cq(mdev, cq);
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &cq->queue);
+
+ return err;
+}
+
+int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct ib_device *ibdev = ibcq->device;
+ struct mana_ib_dev *mdev;
+
+ mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ mana_ib_remove_cq_cb(mdev, cq);
+
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_cq(mdev, cq);
+
+ mana_ib_destroy_queue(mdev, &cq->queue);
+
+ return 0;
+}
+
+static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
+{
+ struct mana_ib_cq *cq = ctx;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue *gdma_cq;
+
+ if (cq->queue.id >= gc->max_num_cqs)
+ return -EINVAL;
+ /* Create CQ table entry */
+ WARN_ON(gc->cq_table[cq->queue.id]);
+ if (cq->queue.kmem)
+ gdma_cq = cq->queue.kmem;
+ else
+ gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
+ if (!gdma_cq)
+ return -ENOMEM;
+
+ gdma_cq->cq.context = cq;
+ gdma_cq->type = GDMA_CQ;
+ gdma_cq->cq.callback = mana_ib_cq_handler;
+ gdma_cq->id = cq->queue.id;
+ gc->cq_table[cq->queue.id] = gdma_cq;
+ return 0;
+}
+
+void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+
+ if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
+ return;
+
+ if (cq->queue.kmem)
+ /* Then it will be cleaned and removed by the mana */
+ return;
+
+ kfree(gc->cq_table[cq->queue.id]);
+ gc->cq_table[cq->queue.id] = NULL;
+}
+
+int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct gdma_queue *gdma_cq = cq->queue.kmem;
+
+ if (!gdma_cq)
+ return -EINVAL;
+
+ mana_gd_ring_cq(gdma_cq, SET_ARM_BIT);
+ return 0;
+}
+
+static inline void handle_ud_sq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
+{
+ struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
+ struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem;
+ struct ud_sq_shadow_wqe *shadow_wqe;
+
+ shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq);
+ if (!shadow_wqe)
+ return;
+
+ shadow_wqe->header.error_code = rdma_cqe->ud_send.vendor_error;
+
+ wq->tail += shadow_wqe->header.posted_wqe_size;
+ shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+}
+
+static inline void handle_ud_rq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
+{
+ struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
+ struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem;
+ struct ud_rq_shadow_wqe *shadow_wqe;
+
+ shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_rq);
+ if (!shadow_wqe)
+ return;
+
+ shadow_wqe->byte_len = rdma_cqe->ud_recv.msg_len;
+ shadow_wqe->src_qpn = rdma_cqe->ud_recv.src_qpn;
+ shadow_wqe->header.error_code = IB_WC_SUCCESS;
+
+ wq->tail += shadow_wqe->header.posted_wqe_size;
+ shadow_queue_advance_next_to_complete(&qp->shadow_rq);
+}
+
+static void mana_handle_cqe(struct mana_ib_dev *mdev, struct gdma_comp *cqe)
+{
+ struct mana_ib_qp *qp = mana_get_qp_ref(mdev, cqe->wq_num, cqe->is_sq);
+
+ if (!qp)
+ return;
+
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) {
+ if (cqe->is_sq)
+ handle_ud_sq_cqe(qp, cqe);
+ else
+ handle_ud_rq_cqe(qp, cqe);
+ }
+
+ mana_put_qp_ref(qp);
+}
+
+static void fill_verbs_from_shadow_wqe(struct mana_ib_qp *qp, struct ib_wc *wc,
+ const struct shadow_wqe_header *shadow_wqe)
+{
+ const struct ud_rq_shadow_wqe *ud_wqe = (const struct ud_rq_shadow_wqe *)shadow_wqe;
+
+ wc->wr_id = shadow_wqe->wr_id;
+ wc->status = shadow_wqe->error_code;
+ wc->opcode = shadow_wqe->opcode;
+ wc->vendor_err = shadow_wqe->error_code;
+ wc->wc_flags = 0;
+ wc->qp = &qp->ibqp;
+ wc->pkey_index = 0;
+
+ if (shadow_wqe->opcode == IB_WC_RECV) {
+ wc->byte_len = ud_wqe->byte_len;
+ wc->src_qp = ud_wqe->src_qpn;
+ wc->wc_flags |= IB_WC_GRH;
+ }
+}
+
+static int mana_process_completions(struct mana_ib_cq *cq, int nwc, struct ib_wc *wc)
+{
+ struct shadow_wqe_header *shadow_wqe;
+ struct mana_ib_qp *qp;
+ int wc_index = 0;
+
+ /* process send shadow queue completions */
+ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
+ while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_sq))
+ != NULL) {
+ if (wc_index >= nwc)
+ goto out;
+
+ fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
+ shadow_queue_advance_consumer(&qp->shadow_sq);
+ wc_index++;
+ }
+ }
+
+ /* process recv shadow queue completions */
+ list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
+ while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_rq))
+ != NULL) {
+ if (wc_index >= nwc)
+ goto out;
+
+ fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
+ shadow_queue_advance_consumer(&qp->shadow_rq);
+ wc_index++;
+ }
+ }
+
+out:
+ return wc_index;
+}
+
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev)
+{
+ struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false);
+ struct ud_sq_shadow_wqe *shadow_wqe;
+ struct mana_ib_cq *cq;
+ unsigned long flags;
+
+ if (!qp)
+ return;
+
+ cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
+ != NULL) {
+ shadow_wqe->header.error_code = IB_WC_GENERAL_ERR;
+ shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+
+ mana_put_qp_ref(qp);
+}
+
+int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct mana_ib_dev *mdev = container_of(ibcq->device, struct mana_ib_dev, ib_dev);
+ struct gdma_queue *queue = cq->queue.kmem;
+ struct gdma_comp gdma_cqe;
+ unsigned long flags;
+ int num_polled = 0;
+ int comp_read, i;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (i = 0; i < num_entries; i++) {
+ comp_read = mana_gd_poll_cq(queue, &gdma_cqe, 1);
+ if (comp_read < 1)
+ break;
+ mana_handle_cqe(mdev, &gdma_cqe);
+ }
+
+ num_polled = mana_process_completions(cq, num_entries, wc);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return num_polled;
+}
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
new file mode 100644
index 000000000000..bdeddb642b87
--- /dev/null
+++ b/drivers/infiniband/hw/mana/device.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+#include <net/mana/mana_auxiliary.h>
+#include <net/addrconf.h>
+
+MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("NET_MANA");
+
+static const struct ib_device_ops mana_ib_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_MANA,
+ .uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
+
+ .add_gid = mana_ib_gd_add_gid,
+ .alloc_pd = mana_ib_alloc_pd,
+ .alloc_ucontext = mana_ib_alloc_ucontext,
+ .create_ah = mana_ib_create_ah,
+ .create_cq = mana_ib_create_cq,
+ .create_qp = mana_ib_create_qp,
+ .create_rwq_ind_table = mana_ib_create_rwq_ind_table,
+ .create_wq = mana_ib_create_wq,
+ .dealloc_pd = mana_ib_dealloc_pd,
+ .dealloc_ucontext = mana_ib_dealloc_ucontext,
+ .del_gid = mana_ib_gd_del_gid,
+ .dereg_mr = mana_ib_dereg_mr,
+ .destroy_ah = mana_ib_destroy_ah,
+ .destroy_cq = mana_ib_destroy_cq,
+ .destroy_qp = mana_ib_destroy_qp,
+ .destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
+ .destroy_wq = mana_ib_destroy_wq,
+ .disassociate_ucontext = mana_ib_disassociate_ucontext,
+ .get_dma_mr = mana_ib_get_dma_mr,
+ .get_link_layer = mana_ib_get_link_layer,
+ .get_port_immutable = mana_ib_get_port_immutable,
+ .mmap = mana_ib_mmap,
+ .modify_qp = mana_ib_modify_qp,
+ .modify_wq = mana_ib_modify_wq,
+ .poll_cq = mana_ib_poll_cq,
+ .post_recv = mana_ib_post_recv,
+ .post_send = mana_ib_post_send,
+ .query_device = mana_ib_query_device,
+ .query_gid = mana_ib_query_gid,
+ .query_pkey = mana_ib_query_pkey,
+ .query_port = mana_ib_query_port,
+ .reg_user_mr = mana_ib_reg_user_mr,
+ .reg_user_mr_dmabuf = mana_ib_reg_user_mr_dmabuf,
+ .req_notify_cq = mana_ib_arm_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mana_ib_rwq_ind_table,
+ ib_ind_table),
+};
+
+static const struct ib_device_ops mana_ib_stats_ops = {
+ .alloc_hw_port_stats = mana_ib_alloc_hw_port_stats,
+ .get_hw_stats = mana_ib_get_hw_stats,
+};
+
+static const struct ib_device_ops mana_ib_device_stats_ops = {
+ .alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
+};
+
+static int mana_ib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mana_ib_dev *dev = container_of(this, struct mana_ib_dev, nb);
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ struct gdma_context *gc = dev->gdma_dev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
+ struct net_device *ndev;
+ int i;
+
+ /* Only process events from our parent device */
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++)
+ if (event_dev == mc->ports[i]) {
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ /*
+ * RDMA core will setup GID based on updated netdev.
+ * It's not possible to race with the core as rtnl lock is being
+ * held.
+ */
+ ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ if (ndev)
+ netdev_put(ndev, &dev->dev_tracker);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static int mana_ib_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
+ struct gdma_context *gc = madev->mdev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
+ struct gdma_dev *mdev = madev->mdev;
+ struct net_device *ndev;
+ struct mana_ib_dev *dev;
+ u8 mac_addr[ETH_ALEN];
+ int ret, i;
+
+ dev = ib_alloc_device(mana_ib_dev, ib_dev);
+ if (!dev)
+ return -ENOMEM;
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.num_comp_vectors = gc->max_num_queues;
+ dev->ib_dev.dev.parent = gc->dev;
+ dev->gdma_dev = mdev;
+ xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
+
+ if (mana_ib_is_rnic(dev)) {
+ dev->ib_dev.phys_port_cnt = 1;
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, mc->ports[0]->dev_addr);
+ ret = mana_ib_gd_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
+
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_MULTI_PORTS_SUPPORT)
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++) {
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev,
+ "Failed to get netdev for IB port %d", i + 1);
+ goto destroy_rnic;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto destroy_rnic;
+ }
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ }
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", ret);
+ goto destroy_rnic;
+ }
+ } else {
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+ ret = mana_eth_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
+ goto free_ib_device;
+ }
+ }
+
+ dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
+ MANA_AV_BUFFER_SIZE, 0);
+ if (!dev->av_pool) {
+ ret = -ENOMEM;
+ goto deregister_net_notifier;
+ }
+
+ ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+
+ ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
+ gc->dev);
+ if (ret)
+ goto deallocate_pool;
+
+ dev_set_drvdata(&adev->dev, dev);
+
+ return 0;
+
+deallocate_pool:
+ dma_pool_destroy(dev->av_pool);
+deregister_net_notifier:
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
+destroy_rnic:
+ if (mana_ib_is_rnic(dev))
+ mana_ib_gd_destroy_rnic_adapter(dev);
+destroy_eqs:
+ if (mana_ib_is_rnic(dev))
+ mana_ib_destroy_eqs(dev);
+free_ib_device:
+ xa_destroy(&dev->qp_table_wq);
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
+}
+
+static void mana_ib_remove(struct auxiliary_device *adev)
+{
+ struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
+
+ if (mana_ib_is_rnic(dev))
+ mana_drain_gsi_sqs(dev);
+
+ ib_unregister_device(&dev->ib_dev);
+ dma_pool_destroy(dev->av_pool);
+ if (mana_ib_is_rnic(dev)) {
+ unregister_netdevice_notifier(&dev->nb);
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
+ }
+ xa_destroy(&dev->qp_table_wq);
+ ib_dealloc_device(&dev->ib_dev);
+}
+
+static const struct auxiliary_device_id mana_id_table[] = {
+ { .name = "mana.rdma", },
+ { .name = "mana.eth", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
+
+static struct auxiliary_driver mana_driver = {
+ .probe = mana_ib_probe,
+ .remove = mana_ib_remove,
+ .id_table = mana_id_table,
+};
+
+module_auxiliary_driver(mana_driver);
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
new file mode 100644
index 000000000000..fac159f7128d
--- /dev/null
+++ b/drivers/infiniband/hw/mana/main.c
@@ -0,0 +1,1134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+#include "linux/pci.h"
+
+void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
+ u32 port)
+{
+ struct mana_port_context *mpc;
+ struct net_device *ndev;
+
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
+ mpc = netdev_priv(ndev);
+
+ mutex_lock(&pd->vport_mutex);
+
+ pd->vport_use_count--;
+ WARN_ON(pd->vport_use_count < 0);
+
+ if (!pd->vport_use_count)
+ mana_uncfg_vport(mpc);
+
+ mutex_unlock(&pd->vport_mutex);
+}
+
+int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
+ u32 doorbell_id)
+{
+ struct mana_port_context *mpc;
+ struct net_device *ndev;
+ int err;
+
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
+ mpc = netdev_priv(ndev);
+
+ mutex_lock(&pd->vport_mutex);
+
+ pd->vport_use_count++;
+ if (pd->vport_use_count > 1) {
+ ibdev_dbg(&dev->ib_dev,
+ "Skip as this PD is already configured vport\n");
+ mutex_unlock(&pd->vport_mutex);
+ return 0;
+ }
+
+ err = mana_cfg_vport(mpc, pd->pdn, doorbell_id);
+ if (err) {
+ pd->vport_use_count--;
+ mutex_unlock(&pd->vport_mutex);
+
+ ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err);
+ return err;
+ }
+
+ mutex_unlock(&pd->vport_mutex);
+
+ pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
+ pd->tx_vp_offset = mpc->tx_vp_offset;
+
+ ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
+ mpc->port_handle, pd->pdn, doorbell_id);
+
+ return 0;
+}
+
+int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct ib_device *ibdev = ibpd->device;
+ struct gdma_create_pd_resp resp = {};
+ struct gdma_create_pd_req req = {};
+ enum gdma_pd_flags flags = 0;
+ struct mana_ib_dev *dev;
+ struct gdma_context *gc;
+ int err;
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(dev);
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
+ sizeof(resp));
+
+ if (!udata)
+ flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
+
+ req.flags = flags;
+ err = mana_gd_send_request(gc, sizeof(req), &req,
+ sizeof(resp), &resp);
+
+ if (err || resp.hdr.status) {
+ ibdev_dbg(&dev->ib_dev,
+ "Failed to get pd_id err %d status %u\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ pd->pd_handle = resp.pd_handle;
+ pd->pdn = resp.pd_id;
+ ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
+ pd->pd_handle, pd->pdn);
+
+ mutex_init(&pd->vport_mutex);
+ pd->vport_use_count = 0;
+ return 0;
+}
+
+int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct ib_device *ibdev = ibpd->device;
+ struct gdma_destory_pd_resp resp = {};
+ struct gdma_destroy_pd_req req = {};
+ struct mana_ib_dev *dev;
+ struct gdma_context *gc;
+ int err;
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(dev);
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
+ sizeof(resp));
+
+ req.pd_handle = pd->pd_handle;
+ err = mana_gd_send_request(gc, sizeof(req), &req,
+ sizeof(resp), &resp);
+
+ if (err || resp.hdr.status) {
+ ibdev_dbg(&dev->ib_dev,
+ "Failed to destroy pd_handle 0x%llx err %d status %u",
+ pd->pd_handle, err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ }
+
+ return err;
+}
+
+static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
+ int doorbell_page)
+{
+ struct gdma_destroy_resource_range_req req = {};
+ struct gdma_resp_hdr resp = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
+ sizeof(req), sizeof(resp));
+
+ req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
+ req.num_resources = 1;
+ req.allocated_resources = doorbell_page;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.status) {
+ dev_err(gc->dev,
+ "Failed to destroy doorbell page: ret %d, 0x%x\n",
+ err, resp.status);
+ return err ?: -EPROTO;
+ }
+
+ return 0;
+}
+
+static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
+ int *doorbell_page)
+{
+ struct gdma_allocate_resource_range_req req = {};
+ struct gdma_allocate_resource_range_resp resp = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
+ sizeof(req), sizeof(resp));
+
+ req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
+ req.num_resources = 1;
+ req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
+
+ /* Have GDMA start searching from 0 */
+ req.allocated_resources = 0;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev,
+ "Failed to allocate doorbell page: ret %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ?: -EPROTO;
+ }
+
+ *doorbell_page = resp.allocated_resources;
+
+ return 0;
+}
+
+int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
+ struct ib_udata *udata)
+{
+ struct mana_ib_ucontext *ucontext =
+ container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
+ struct ib_device *ibdev = ibcontext->device;
+ struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
+ int doorbell_page;
+ int ret;
+
+ mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(mdev);
+
+ /* Allocate a doorbell page index */
+ ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
+ if (ret) {
+ ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret);
+ return ret;
+ }
+
+ ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page);
+
+ ucontext->doorbell = doorbell_page;
+
+ return 0;
+}
+
+void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct mana_ib_ucontext *mana_ucontext =
+ container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
+ struct ib_device *ibdev = ibcontext->device;
+ struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
+ int ret;
+
+ mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(mdev);
+
+ ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
+ if (ret)
+ ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
+}
+
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+ struct mana_ib_queue *queue)
+{
+ struct gdma_queue_spec spec = {};
+ int err;
+
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+ spec.type = type;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = size;
+ err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
+ if (err)
+ return err;
+ /* take ownership into mana_ib from mana */
+ queue->gdma_region = queue->kmem->mem_info.dma_region_handle;
+ queue->kmem->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+ return 0;
+}
+
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue)
+{
+ struct ib_umem *umem;
+ int err;
+
+ queue->umem = NULL;
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+ umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
+ return PTR_ERR(umem);
+ }
+
+ err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+ goto free_umem;
+ }
+ queue->umem = umem;
+
+ ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
+
+ return 0;
+free_umem:
+ ib_umem_release(umem);
+ return err;
+}
+
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
+{
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
+ ib_umem_release(queue->umem);
+ if (queue->kmem)
+ mana_gd_destroy_queue(mdev_to_gc(mdev), queue->kmem);
+}
+
+static int
+mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
+ struct gdma_context *gc,
+ struct gdma_create_dma_region_req *create_req,
+ size_t num_pages, mana_handle_t *gdma_region,
+ u32 expected_status)
+{
+ struct gdma_create_dma_region_resp create_resp = {};
+ unsigned int create_req_msg_size;
+ int err;
+
+ create_req_msg_size =
+ struct_size(create_req, page_addr_list, num_pages);
+ create_req->page_addr_list_len = num_pages;
+
+ err = mana_gd_send_request(gc, create_req_msg_size, create_req,
+ sizeof(create_resp), &create_resp);
+ if (err || create_resp.hdr.status != expected_status) {
+ ibdev_dbg(&dev->ib_dev,
+ "Failed to create DMA region: %d, 0x%x\n",
+ err, create_resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ *gdma_region = create_resp.dma_region_handle;
+ ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
+ *gdma_region);
+
+ return 0;
+}
+
+static int
+mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
+ struct gdma_dma_region_add_pages_req *add_req,
+ unsigned int num_pages, u32 expected_status)
+{
+ unsigned int add_req_msg_size =
+ struct_size(add_req, page_addr_list, num_pages);
+ struct gdma_general_resp add_resp = {};
+ int err;
+
+ mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES,
+ add_req_msg_size, sizeof(add_resp));
+ add_req->page_addr_list_len = num_pages;
+
+ err = mana_gd_send_request(gc, add_req_msg_size, add_req,
+ sizeof(add_resp), &add_resp);
+ if (err || add_resp.hdr.status != expected_status) {
+ ibdev_dbg(&dev->ib_dev,
+ "Failed to create DMA region: %d, 0x%x\n",
+ err, add_resp.hdr.status);
+
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, unsigned long page_sz)
+{
+ struct gdma_dma_region_add_pages_req *add_req = NULL;
+ size_t num_pages_processed = 0, num_pages_to_handle;
+ struct gdma_create_dma_region_req *create_req;
+ unsigned int create_req_msg_size;
+ struct hw_channel_context *hwc;
+ struct ib_block_iter biter;
+ size_t max_pgs_add_cmd = 0;
+ size_t max_pgs_create_cmd;
+ struct gdma_context *gc;
+ size_t num_pages_total;
+ unsigned int tail = 0;
+ u64 *page_addr_list;
+ void *request_buf;
+ int err = 0;
+
+ gc = mdev_to_gc(dev);
+ hwc = gc->hwc.driver_data;
+
+ num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
+
+ max_pgs_create_cmd =
+ (hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64);
+ num_pages_to_handle =
+ min_t(size_t, num_pages_total, max_pgs_create_cmd);
+ create_req_msg_size =
+ struct_size(create_req, page_addr_list, num_pages_to_handle);
+
+ request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL);
+ if (!request_buf)
+ return -ENOMEM;
+
+ create_req = request_buf;
+ mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION,
+ create_req_msg_size,
+ sizeof(struct gdma_create_dma_region_resp));
+
+ create_req->length = umem->length;
+ create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
+ create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
+ create_req->page_count = num_pages_total;
+
+ ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
+ umem->length, num_pages_total);
+
+ ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
+ page_sz, create_req->offset_in_page);
+
+ ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
+ num_pages_to_handle, create_req->gdma_page_type);
+
+ page_addr_list = create_req->page_addr_list;
+ rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
+ u32 expected_status = 0;
+
+ page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
+ if (tail < num_pages_to_handle)
+ continue;
+
+ if (num_pages_processed + num_pages_to_handle <
+ num_pages_total)
+ expected_status = GDMA_STATUS_MORE_ENTRIES;
+
+ if (!num_pages_processed) {
+ /* First create message */
+ err = mana_ib_gd_first_dma_region(dev, gc, create_req,
+ tail, gdma_region,
+ expected_status);
+ if (err)
+ goto out;
+
+ max_pgs_add_cmd = (hwc->max_req_msg_size -
+ sizeof(*add_req)) / sizeof(u64);
+
+ add_req = request_buf;
+ add_req->dma_region_handle = *gdma_region;
+ add_req->reserved3 = 0;
+ page_addr_list = add_req->page_addr_list;
+ } else {
+ /* Subsequent create messages */
+ err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
+ expected_status);
+ if (err)
+ break;
+ }
+
+ num_pages_processed += tail;
+ tail = 0;
+
+ /* The remaining pages to create */
+ num_pages_to_handle =
+ min_t(size_t,
+ num_pages_total - num_pages_processed,
+ max_pgs_add_cmd);
+ }
+
+ if (err)
+ mana_ib_gd_destroy_dma_region(dev, *gdma_region);
+
+out:
+ kfree(request_buf);
+ return err;
+}
+
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt)
+{
+ unsigned long page_sz;
+
+ page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region)
+{
+ unsigned long page_sz;
+
+ /* Hardware requires dma region to align to chosen page size */
+ page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
+int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
+{
+ struct gdma_context *gc = mdev_to_gc(dev);
+
+ ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
+
+ return mana_gd_destroy_dma_region(gc, gdma_region);
+}
+
+int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+ struct mana_ib_ucontext *mana_ucontext =
+ container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
+ struct ib_device *ibdev = ibcontext->device;
+ struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
+ phys_addr_t pfn;
+ pgprot_t prot;
+ int ret;
+
+ mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(mdev);
+
+ if (vma->vm_pgoff != 0) {
+ ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ /* Map to the page indexed by ucontext->doorbell */
+ pfn = (gc->phys_db_page_base +
+ gc->db_page_size * mana_ucontext->doorbell) >>
+ PAGE_SHIFT;
+ prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
+ NULL);
+ if (ret)
+ ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
+ else
+ ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
+ pfn, PAGE_SIZE, ret);
+
+ return ret;
+}
+
+int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct ib_port_attr attr;
+ int err;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ if (mana_ib_is_rnic(dev)) {
+ if (port_num == 1) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
+ | RDMA_CORE_CAP_ETH_AH;
+ immutable->max_mad_size = 0;
+ }
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+ }
+
+ return 0;
+}
+
+int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
+
+ memset(props, 0, sizeof(*props));
+ props->vendor_id = pdev->vendor;
+ props->vendor_part_id = dev->gdma_dev->dev_id.type;
+ props->max_mr_size = MANA_IB_MAX_MR_SIZE;
+ props->page_size_cap = dev->adapter_caps.page_size_cap;
+ props->max_qp = dev->adapter_caps.max_qp_count;
+ props->max_qp_wr = dev->adapter_caps.max_qp_wr;
+ props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
+ props->max_send_sge = dev->adapter_caps.max_send_sge_count;
+ props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
+ props->max_sge_rd = dev->adapter_caps.max_recv_sge_count;
+ props->max_cq = dev->adapter_caps.max_cq_count;
+ props->max_cqe = dev->adapter_caps.max_qp_wr;
+ props->max_mr = dev->adapter_caps.max_mr_count;
+ props->max_pd = dev->adapter_caps.max_pd_count;
+ props->max_qp_rd_atom = dev->adapter_caps.max_inbound_read_limit;
+ props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
+ props->max_qp_init_rd_atom = dev->adapter_caps.max_outbound_read_limit;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = IB_ATOMIC_NONE;
+ props->max_ah = INT_MAX;
+ props->max_pkeys = 1;
+ props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
+ if (!mana_ib_is_rnic(dev))
+ props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
+
+ return 0;
+}
+
+int mana_ib_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
+
+ if (!ndev)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
+
+ if (netif_carrier_ok(ndev) && netif_running(ndev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = IB_SPEED_EDR;
+ props->pkey_tbl_len = 1;
+ if (mana_ib_is_rnic(dev)) {
+ props->gid_tbl_len = 16;
+ props->ip_gids = true;
+ if (port == 1)
+ props->port_cap_flags = IB_PORT_CM_SUP;
+ }
+
+ return 0;
+}
+
+enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index != 0)
+ return -EINVAL;
+ *pkey = IB_DEFAULT_PKEY_FULL;
+ return 0;
+}
+
+int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ /* This version doesn't return GID properties */
+ return 0;
+}
+
+void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
+int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct mana_ib_query_adapter_caps_resp resp = {};
+ struct mana_ib_query_adapter_caps_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
+ sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
+ req.hdr.dev_id = dev->gdma_dev->dev_id;
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
+ &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_sq_id = resp.max_sq_id;
+ caps->max_rq_id = resp.max_rq_id;
+ caps->max_cq_id = resp.max_cq_id;
+ caps->max_qp_count = resp.max_qp_count;
+ caps->max_cq_count = resp.max_cq_count;
+ caps->max_mr_count = resp.max_mr_count;
+ caps->max_pd_count = resp.max_pd_count;
+ caps->max_inbound_read_limit = resp.max_inbound_read_limit;
+ caps->max_outbound_read_limit = resp.max_outbound_read_limit;
+ caps->mw_count = resp.mw_count;
+ caps->max_srq_count = resp.max_srq_count;
+ caps->max_qp_wr = min_t(u32,
+ resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
+ resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
+ caps->max_inline_data_size = resp.max_inline_data_size;
+ caps->max_send_sge_count = resp.max_send_sge_count;
+ caps->max_recv_sge_count = resp.max_recv_sge_count;
+ caps->feature_flags = resp.feature_flags;
+
+ caps->page_size_cap = PAGE_SZ_BM;
+ if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+ caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
+ return 0;
+}
+
+int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
+ caps->max_cq_count = resp.max_cq;
+ caps->max_mr_count = resp.max_mst;
+ caps->max_pd_count = 0x6000;
+ caps->max_qp_wr = min_t(u32,
+ 0x100000 / GDMA_MAX_SQE_SIZE,
+ 0x100000 / GDMA_MAX_RQE_SIZE);
+ caps->max_send_sge_count = 30;
+ caps->max_recv_sge_count = 15;
+ caps->page_size_cap = PAGE_SZ_BM;
+
+ return 0;
+}
+
+static void
+mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
+{
+ struct mana_ib_dev *mdev = (struct mana_ib_dev *)ctx;
+ struct mana_ib_qp *qp;
+ struct ib_event ev;
+ u32 qpn;
+
+ switch (event->type) {
+ case GDMA_EQE_RNIC_QP_FATAL:
+ qpn = event->details[0];
+ qp = mana_get_qp_ref(mdev, qpn, false);
+ if (!qp)
+ break;
+ if (qp->ibqp.event_handler) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_FATAL;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ mana_put_qp_ref(qp);
+ break;
+ default:
+ break;
+ }
+}
+
+int mana_ib_create_eqs(struct mana_ib_dev *mdev)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue_spec spec = {};
+ int err, i;
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = EQ_SIZE;
+ spec.eq.callback = mana_ib_event_handler;
+ spec.eq.context = mdev;
+ spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ spec.eq.msix_index = 0;
+
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
+ if (err)
+ return err;
+
+ mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
+ GFP_KERNEL);
+ if (!mdev->eqs) {
+ err = -ENOMEM;
+ goto destroy_fatal_eq;
+ }
+ spec.eq.callback = NULL;
+ for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
+ spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
+ if (err)
+ goto destroy_eqs;
+ }
+
+ return 0;
+
+destroy_eqs:
+ while (i-- > 0)
+ mana_gd_destroy_queue(gc, mdev->eqs[i]);
+ kfree(mdev->eqs);
+destroy_fatal_eq:
+ mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+ return err;
+}
+
+void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int i;
+
+ mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+
+ for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
+ mana_gd_destroy_queue(gc, mdev->eqs[i]);
+
+ kfree(mdev->eqs);
+}
+
+int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
+{
+ struct mana_rnic_create_adapter_resp resp = {};
+ struct mana_rnic_create_adapter_req req = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
+ req.hdr.req.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.notify_eq_id = mdev->fatal_err_eq->id;
+
+ if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
+ req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
+ return err;
+ }
+ mdev->adapter_handle = resp.adapter;
+
+ return 0;
+}
+
+int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
+{
+ struct mana_rnic_destroy_adapter_resp resp = {};
+ struct mana_rnic_destroy_adapter_req req = {};
+ struct gdma_context *gc;
+ int err;
+
+ gc = mdev_to_gc(mdev);
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
+ enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
+ struct mana_rnic_config_addr_resp resp = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_config_addr_req req = {};
+ int err;
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
+ ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
+ return -EINVAL;
+ }
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = ADDR_OP_ADD;
+ req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
+ copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
+ enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
+ struct mana_rnic_config_addr_resp resp = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_config_addr_req req = {};
+ int err;
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
+ ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
+ return -EINVAL;
+ }
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = ADDR_OP_REMOVE;
+ req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
+ copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
+{
+ struct mana_rnic_config_mac_addr_resp resp = {};
+ struct mana_rnic_config_mac_addr_req req = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = op;
+ copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_create_cq_resp resp = {};
+ struct mana_rnic_create_cq_req req = {};
+ int err;
+
+ if (!mdev->eqs)
+ return -EINVAL;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.gdma_region = cq->queue.gdma_region;
+ req.eq_id = mdev->eqs[cq->comp_vector]->id;
+ req.doorbell_page = doorbell;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
+ return err;
+ }
+
+ cq->queue.id = resp.cq_id;
+ cq->cq_handle = resp.cq_handle;
+ /* The GDMA region is now owned by the CQ handle */
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ return 0;
+}
+
+int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_destroy_cq_resp resp = {};
+ struct mana_rnic_destroy_cq_req req = {};
+ int err;
+
+ if (cq->cq_handle == INVALID_MANA_HANDLE)
+ return 0;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.cq_handle = cq->cq_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
+ struct ib_qp_init_attr *attr, u32 doorbell, u64 flags)
+{
+ struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_create_qp_resp resp = {};
+ struct mana_rnic_create_qp_req req = {};
+ int err, i;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.pd_handle = pd->pd_handle;
+ req.send_cq_handle = send_cq->cq_handle;
+ req.recv_cq_handle = recv_cq->cq_handle;
+ for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++)
+ req.dma_region[i] = qp->rc_qp.queues[i].gdma_region;
+ req.doorbell_page = doorbell;
+ req.max_send_wr = attr->cap.max_send_wr;
+ req.max_recv_wr = attr->cap.max_recv_wr;
+ req.max_send_sge = attr->cap.max_send_sge;
+ req.max_recv_sge = attr->cap.max_recv_sge;
+ req.flags = flags;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create rc qp err %d", err);
+ return err;
+ }
+ qp->qp_handle = resp.rc_qp_handle;
+ for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) {
+ qp->rc_qp.queues[i].id = resp.queue_ids[i];
+ /* The GDMA regions are now owned by the RNIC QP handle */
+ qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
+ }
+ return 0;
+}
+
+int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ struct mana_rnic_destroy_rc_qp_resp resp = {0};
+ struct mana_rnic_destroy_rc_qp_req req = {0};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.rc_qp_handle = qp->qp_handle;
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err);
+ return err;
+ }
+ return 0;
+}
+
+int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
+ struct ib_qp_init_attr *attr, u32 doorbell, u32 type)
+{
+ struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_create_udqp_resp resp = {};
+ struct mana_rnic_create_udqp_req req = {};
+ int err, i;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.pd_handle = pd->pd_handle;
+ req.send_cq_handle = send_cq->cq_handle;
+ req.recv_cq_handle = recv_cq->cq_handle;
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++)
+ req.dma_region[i] = qp->ud_qp.queues[i].gdma_region;
+ req.doorbell_page = doorbell;
+ req.max_send_wr = attr->cap.max_send_wr;
+ req.max_recv_wr = attr->cap.max_recv_wr;
+ req.max_send_sge = attr->cap.max_send_sge;
+ req.max_recv_sge = attr->cap.max_recv_sge;
+ req.qp_type = type;
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
+ return err;
+ }
+ qp->qp_handle = resp.qp_handle;
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
+ qp->ud_qp.queues[i].id = resp.queue_ids[i];
+ /* The GDMA regions are now owned by the RNIC QP handle */
+ qp->ud_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
+ }
+ return 0;
+}
+
+int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ struct mana_rnic_destroy_udqp_resp resp = {0};
+ struct mana_rnic_destroy_udqp_req req = {0};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.qp_handle = qp->qp_handle;
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
+ return err;
+ }
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
new file mode 100644
index 000000000000..9d36232ed880
--- /dev/null
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -0,0 +1,738 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Microsoft Corporation. All rights reserved.
+ */
+
+#ifndef _MANA_IB_H_
+#define _MANA_IB_H_
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_umem.h>
+#include <rdma/mana-abi.h>
+#include <rdma/uverbs_ioctl.h>
+#include <linux/dmapool.h>
+
+#include <net/mana/mana.h>
+#include "shadow_queue.h"
+#include "counters.h"
+
+#define PAGE_SZ_BM \
+ (SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \
+ SZ_512K | SZ_1M | SZ_2M)
+
+/* MANA doesn't have any limit for MR size */
+#define MANA_IB_MAX_MR_SIZE U64_MAX
+
+/* Send queue ID mask */
+#define MANA_SENDQ_MASK BIT(31)
+
+/*
+ * The hardware limit of number of MRs is greater than maximum number of MRs
+ * that can possibly represent in 24 bits
+ */
+#define MANA_IB_MAX_MR 0xFFFFFFu
+
+/*
+ * The CA timeout is approx. 260ms (4us * 2^(DELAY))
+ */
+#define MANA_CA_ACK_DELAY 16
+
+/*
+ * The buffer used for writing AV
+ */
+#define MANA_AV_BUFFER_SIZE 64
+
+#define MANA_GSI_QPN (1)
+
+struct mana_ib_adapter_caps {
+ u32 max_sq_id;
+ u32 max_rq_id;
+ u32 max_cq_id;
+ u32 max_qp_count;
+ u32 max_cq_count;
+ u32 max_mr_count;
+ u32 max_pd_count;
+ u32 max_inbound_read_limit;
+ u32 max_outbound_read_limit;
+ u32 mw_count;
+ u32 max_srq_count;
+ u32 max_qp_wr;
+ u32 max_send_sge_count;
+ u32 max_recv_sge_count;
+ u32 max_inline_data_size;
+ u64 feature_flags;
+ u64 page_size_cap;
+};
+
+struct mana_ib_queue {
+ struct ib_umem *umem;
+ struct gdma_queue *kmem;
+ u64 gdma_region;
+ u64 id;
+};
+
+struct mana_ib_dev {
+ struct ib_device ib_dev;
+ struct gdma_dev *gdma_dev;
+ mana_handle_t adapter_handle;
+ struct gdma_queue *fatal_err_eq;
+ struct gdma_queue **eqs;
+ struct xarray qp_table_wq;
+ struct mana_ib_adapter_caps adapter_caps;
+ struct dma_pool *av_pool;
+ netdevice_tracker dev_tracker;
+ struct notifier_block nb;
+};
+
+struct mana_ib_wq {
+ struct ib_wq ibwq;
+ struct mana_ib_queue queue;
+ int wqe;
+ u32 wq_buf_size;
+ mana_handle_t rx_object;
+};
+
+struct mana_ib_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+ mana_handle_t pd_handle;
+
+ /* Mutex for sharing access to vport_use_count */
+ struct mutex vport_mutex;
+ int vport_use_count;
+
+ bool tx_shortform_allowed;
+ u32 tx_vp_offset;
+};
+
+struct mana_ib_av {
+ u8 dest_ip[16];
+ u8 dest_mac[ETH_ALEN];
+ u16 udp_src_port;
+ u8 src_ip[16];
+ u32 hop_limit : 8;
+ u32 reserved1 : 12;
+ u32 dscp : 6;
+ u32 reserved2 : 5;
+ u32 is_ipv6 : 1;
+ u32 reserved3 : 32;
+};
+
+struct mana_ib_ah {
+ struct ib_ah ibah;
+ struct mana_ib_av *av;
+ dma_addr_t dma_handle;
+};
+
+struct mana_ib_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ mana_handle_t mr_handle;
+};
+
+struct mana_ib_cq {
+ struct ib_cq ibcq;
+ struct mana_ib_queue queue;
+ /* protects CQ polling */
+ spinlock_t cq_lock;
+ struct list_head list_send_qp;
+ struct list_head list_recv_qp;
+ int cqe;
+ u32 comp_vector;
+ mana_handle_t cq_handle;
+};
+
+enum mana_rc_queue_type {
+ MANA_RC_SEND_QUEUE_REQUESTER = 0,
+ MANA_RC_SEND_QUEUE_RESPONDER,
+ MANA_RC_SEND_QUEUE_FMR,
+ MANA_RC_RECV_QUEUE_REQUESTER,
+ MANA_RC_RECV_QUEUE_RESPONDER,
+ MANA_RC_QUEUE_TYPE_MAX,
+};
+
+struct mana_ib_rc_qp {
+ struct mana_ib_queue queues[MANA_RC_QUEUE_TYPE_MAX];
+};
+
+enum mana_ud_queue_type {
+ MANA_UD_SEND_QUEUE = 0,
+ MANA_UD_RECV_QUEUE,
+ MANA_UD_QUEUE_TYPE_MAX,
+};
+
+struct mana_ib_ud_qp {
+ struct mana_ib_queue queues[MANA_UD_QUEUE_TYPE_MAX];
+ u32 sq_psn;
+};
+
+struct mana_ib_qp {
+ struct ib_qp ibqp;
+
+ mana_handle_t qp_handle;
+ union {
+ struct mana_ib_queue raw_sq;
+ struct mana_ib_rc_qp rc_qp;
+ struct mana_ib_ud_qp ud_qp;
+ };
+
+ /* The port on the IB device, starting with 1 */
+ u32 port;
+
+ struct list_head cq_send_list;
+ struct list_head cq_recv_list;
+ struct shadow_queue shadow_rq;
+ struct shadow_queue shadow_sq;
+
+ refcount_t refcount;
+ struct completion free;
+};
+
+struct mana_ib_ucontext {
+ struct ib_ucontext ibucontext;
+ u32 doorbell;
+};
+
+struct mana_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_ind_table;
+};
+
+enum mana_ib_command_code {
+ MANA_IB_GET_ADAPTER_CAP = 0x30001,
+ MANA_IB_CREATE_ADAPTER = 0x30002,
+ MANA_IB_DESTROY_ADAPTER = 0x30003,
+ MANA_IB_CONFIG_IP_ADDR = 0x30004,
+ MANA_IB_CONFIG_MAC_ADDR = 0x30005,
+ MANA_IB_CREATE_UD_QP = 0x30006,
+ MANA_IB_DESTROY_UD_QP = 0x30007,
+ MANA_IB_CREATE_CQ = 0x30008,
+ MANA_IB_DESTROY_CQ = 0x30009,
+ MANA_IB_CREATE_RC_QP = 0x3000a,
+ MANA_IB_DESTROY_RC_QP = 0x3000b,
+ MANA_IB_SET_QP_STATE = 0x3000d,
+ MANA_IB_QUERY_VF_COUNTERS = 0x30022,
+ MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
+};
+
+struct mana_ib_query_adapter_caps_req {
+ struct gdma_req_hdr hdr;
+}; /*HW Data */
+
+enum mana_ib_adapter_features {
+ MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
+ MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
+ MANA_IB_FEATURE_MULTI_PORTS_SUPPORT = BIT(6),
+};
+
+struct mana_ib_query_adapter_caps_resp {
+ struct gdma_resp_hdr hdr;
+ u32 max_sq_id;
+ u32 max_rq_id;
+ u32 max_cq_id;
+ u32 max_qp_count;
+ u32 max_cq_count;
+ u32 max_mr_count;
+ u32 max_pd_count;
+ u32 max_inbound_read_limit;
+ u32 max_outbound_read_limit;
+ u32 mw_count;
+ u32 max_srq_count;
+ u32 max_requester_sq_size;
+ u32 max_responder_sq_size;
+ u32 max_requester_rq_size;
+ u32 max_responder_rq_size;
+ u32 max_send_sge_count;
+ u32 max_recv_sge_count;
+ u32 max_inline_data_size;
+ u64 feature_flags;
+}; /* HW Data */
+
+enum mana_ib_adapter_features_request {
+ MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST = BIT(1),
+}; /*HW Data */
+
+struct mana_rnic_create_adapter_req {
+ struct gdma_req_hdr hdr;
+ u32 notify_eq_id;
+ u32 reserved;
+ u64 feature_flags;
+}; /*HW Data */
+
+struct mana_rnic_create_adapter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_destroy_adapter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /*HW Data */
+
+struct mana_rnic_destroy_adapter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+enum mana_ib_addr_op {
+ ADDR_OP_ADD = 1,
+ ADDR_OP_REMOVE = 2,
+};
+
+enum sgid_entry_type {
+ SGID_TYPE_IPV4 = 1,
+ SGID_TYPE_IPV6 = 2,
+};
+
+struct mana_rnic_config_addr_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ enum mana_ib_addr_op op;
+ enum sgid_entry_type sgid_type;
+ u8 ip_addr[16];
+}; /* HW Data */
+
+struct mana_rnic_config_addr_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_rnic_config_mac_addr_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ enum mana_ib_addr_op op;
+ u8 mac_addr[ETH_ALEN];
+ u8 reserved[6];
+}; /* HW Data */
+
+struct mana_rnic_config_mac_addr_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_rnic_create_cq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ u64 gdma_region;
+ u32 eq_id;
+ u32 doorbell_page;
+}; /* HW Data */
+
+struct mana_rnic_create_cq_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t cq_handle;
+ u32 cq_id;
+ u32 reserved;
+}; /* HW Data */
+
+struct mana_rnic_destroy_cq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t cq_handle;
+}; /* HW Data */
+
+struct mana_rnic_destroy_cq_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+enum mana_rnic_create_rc_flags {
+ MANA_RC_FLAG_NO_FMR = 2,
+};
+
+struct mana_rnic_create_qp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t pd_handle;
+ mana_handle_t send_cq_handle;
+ mana_handle_t recv_cq_handle;
+ u64 dma_region[MANA_RC_QUEUE_TYPE_MAX];
+ u64 deprecated[2];
+ u64 flags;
+ u32 doorbell_page;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 reserved;
+}; /* HW Data */
+
+struct mana_rnic_create_qp_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t rc_qp_handle;
+ u32 queue_ids[MANA_RC_QUEUE_TYPE_MAX];
+ u32 reserved;
+}; /* HW Data*/
+
+struct mana_rnic_destroy_rc_qp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t rc_qp_handle;
+}; /* HW Data */
+
+struct mana_rnic_destroy_rc_qp_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_rnic_create_udqp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t pd_handle;
+ mana_handle_t send_cq_handle;
+ mana_handle_t recv_cq_handle;
+ u64 dma_region[MANA_UD_QUEUE_TYPE_MAX];
+ u32 qp_type;
+ u32 doorbell_page;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+}; /* HW Data */
+
+struct mana_rnic_create_udqp_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t qp_handle;
+ u32 queue_ids[MANA_UD_QUEUE_TYPE_MAX];
+}; /* HW Data*/
+
+struct mana_rnic_destroy_udqp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t qp_handle;
+}; /* HW Data */
+
+struct mana_rnic_destroy_udqp_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_ib_ah_attr {
+ u8 src_addr[16];
+ u8 dest_addr[16];
+ u8 src_mac[ETH_ALEN];
+ u8 dest_mac[ETH_ALEN];
+ u8 src_addr_type;
+ u8 dest_addr_type;
+ u8 hop_limit;
+ u8 traffic_class;
+ u16 src_port;
+ u16 dest_port;
+ u32 flow_label;
+};
+
+struct mana_rnic_set_qp_state_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t qp_handle;
+ u64 attr_mask;
+ u32 qp_state;
+ u32 path_mtu;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 dest_qpn;
+ u32 max_dest_rd_atomic;
+ u32 retry_cnt;
+ u32 rnr_retry;
+ u32 min_rnr_timer;
+ u32 rate_limit;
+ struct mana_ib_ah_attr ah_attr;
+ u64 reserved1;
+ u32 qkey;
+ u32 qp_access_flags;
+ u8 local_ack_timeout;
+ u8 max_rd_atomic;
+ u16 reserved2;
+ u32 reserved3;
+}; /* HW Data */
+
+struct mana_rnic_set_qp_state_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+enum WQE_OPCODE_TYPES {
+ WQE_TYPE_UD_SEND = 0,
+ WQE_TYPE_UD_RECV = 8,
+}; /* HW DATA */
+
+struct rdma_send_oob {
+ u32 wqe_type : 5;
+ u32 fence : 1;
+ u32 signaled : 1;
+ u32 solicited : 1;
+ u32 psn : 24;
+
+ u32 ssn_or_rqpn : 24;
+ u32 reserved1 : 8;
+ union {
+ struct {
+ u32 remote_qkey;
+ u32 immediate;
+ u32 reserved1;
+ u32 reserved2;
+ } ud_send;
+ };
+}; /* HW DATA */
+
+struct mana_rdma_cqe {
+ union {
+ struct {
+ u8 cqe_type;
+ u8 data[GDMA_COMP_DATA_SIZE - 1];
+ };
+ struct {
+ u32 cqe_type : 8;
+ u32 vendor_error : 9;
+ u32 reserved1 : 15;
+ u32 sge_offset : 5;
+ u32 tx_wqe_offset : 27;
+ } ud_send;
+ struct {
+ u32 cqe_type : 8;
+ u32 reserved1 : 24;
+ u32 msg_len;
+ u32 src_qpn : 24;
+ u32 reserved2 : 8;
+ u32 imm_data;
+ u32 rx_wqe_offset;
+ } ud_recv;
+ };
+}; /* HW DATA */
+
+struct mana_rnic_query_vf_cntrs_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_query_vf_cntrs_resp {
+ struct gdma_resp_hdr hdr;
+ u64 requester_timeout;
+ u64 requester_oos_nak;
+ u64 requester_rnr_nak;
+ u64 responder_rnr_nak;
+ u64 responder_oos;
+ u64 responder_dup_request;
+ u64 requester_implicit_nak;
+ u64 requester_readresp_psn_mismatch;
+ u64 nak_inv_req;
+ u64 nak_access_err;
+ u64 nak_opp_err;
+ u64 nak_inv_read;
+ u64 responder_local_len_err;
+ u64 requestor_local_prot_err;
+ u64 responder_rem_access_err;
+ u64 responder_local_qp_err;
+ u64 responder_malformed_wqe;
+ u64 general_hw_err;
+ u64 requester_rnr_nak_retries_exceeded;
+ u64 requester_retries_exceeded;
+ u64 total_fatal_err;
+ u64 received_cnps;
+ u64 num_qps_congested;
+ u64 rate_inc_events;
+ u64 num_qps_recovered;
+ u64 current_rate;
+ u64 dup_rx_req;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 rx_send_req;
+ u64 rx_write_req;
+ u64 rx_read_req;
+ u64 tx_pkt;
+ u64 rx_pkt;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_resp {
+ struct gdma_resp_hdr hdr;
+ u32 sent_cnps;
+ u32 received_ecns;
+ u32 reserved1;
+ u32 received_cnp_count;
+ u32 qp_congested_events;
+ u32 qp_recovered_events;
+ u32 rate_inc_events;
+ u32 reserved2;
+}; /* HW Data */
+
+static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->gdma_context;
+}
+
+static inline struct mana_ib_qp *mana_get_qp_ref(struct mana_ib_dev *mdev,
+ u32 qid, bool is_sq)
+{
+ struct mana_ib_qp *qp;
+ unsigned long flag;
+
+ if (is_sq)
+ qid |= MANA_SENDQ_MASK;
+
+ xa_lock_irqsave(&mdev->qp_table_wq, flag);
+ qp = xa_load(&mdev->qp_table_wq, qid);
+ if (qp)
+ refcount_inc(&qp->refcount);
+ xa_unlock_irqrestore(&mdev->qp_table_wq, flag);
+ return qp;
+}
+
+static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
+{
+ if (refcount_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
+static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_context *mc = gc->mana.driver_data;
+
+ if (port < 1 || port > mc->num_ports)
+ return NULL;
+ return mc->ports[port - 1];
+}
+
+static inline void copy_in_reverse(u8 *dst, const u8 *src, u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ dst[size - 1 - i] = src[i];
+}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region);
+
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt);
+
+int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
+ mana_handle_t gdma_region);
+
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+ struct mana_ib_queue *queue);
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue);
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
+
+struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+
+int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata);
+
+int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl);
+
+struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
+
+struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 iova, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata);
+
+int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+
+int mana_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
+
+int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+
+int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id,
+ struct mana_ib_pd *pd, u32 doorbell_id);
+void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
+ u32 port);
+
+int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+
+int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+
+int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+
+int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
+ struct ib_udata *udata);
+void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
+
+int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma);
+
+int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable);
+int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
+ struct ib_udata *uhw);
+int mana_ib_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
+int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid);
+
+void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
+
+int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
+
+int mana_ib_create_eqs(struct mana_ib_dev *mdev);
+
+void mana_ib_destroy_eqs(struct mana_ib_dev *mdev);
+
+int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev);
+
+int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev);
+
+int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+
+enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num);
+
+int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context);
+
+int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context);
+
+int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac);
+
+int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell);
+
+int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+
+int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
+ struct ib_qp_init_attr *attr, u32 doorbell, u64 flags);
+int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp);
+
+int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
+ struct ib_qp_init_attr *attr, u32 doorbell, u32 type);
+int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp);
+
+int mana_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int mana_ib_destroy_ah(struct ib_ah *ah, u32 flags);
+
+int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
+int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+
+struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 iova, int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
new file mode 100644
index 000000000000..3d0245a4c1ed
--- /dev/null
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
+
+#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
+
+static enum gdma_mr_access_flags
+mana_ib_verbs_to_gdma_access_flags(int access_flags)
+{
+ enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
+
+ if (access_flags & IB_ACCESS_LOCAL_WRITE)
+ flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
+
+ if (access_flags & IB_ACCESS_REMOTE_WRITE)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
+
+ if (access_flags & IB_ACCESS_REMOTE_READ)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
+
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+
+ return flags;
+}
+
+static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
+ struct gdma_create_mr_params *mr_params)
+{
+ struct gdma_create_mr_response resp = {};
+ struct gdma_create_mr_request req = {};
+ struct gdma_context *gc = mdev_to_gc(dev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
+ sizeof(resp));
+ req.pd_handle = mr_params->pd_handle;
+ req.mr_type = mr_params->mr_type;
+
+ switch (mr_params->mr_type) {
+ case GDMA_MR_TYPE_GPA:
+ break;
+ case GDMA_MR_TYPE_GVA:
+ req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
+ req.gva.virtual_address = mr_params->gva.virtual_address;
+ req.gva.access_flags = mr_params->gva.access_flags;
+ break;
+ case GDMA_MR_TYPE_ZBVA:
+ req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
+ req.zbva.access_flags = mr_params->zbva.access_flags;
+ break;
+ default:
+ ibdev_dbg(&dev->ib_dev,
+ "invalid param (GDMA_MR_TYPE) passed, type %d\n",
+ req.mr_type);
+ return -EINVAL;
+ }
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err || resp.hdr.status) {
+ ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ mr->ibmr.lkey = resp.lkey;
+ mr->ibmr.rkey = resp.rkey;
+ mr->mr_handle = resp.mr_handle;
+
+ return 0;
+}
+
+static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
+{
+ struct gdma_destroy_mr_response resp = {};
+ struct gdma_destroy_mr_request req = {};
+ struct gdma_context *gc = mdev_to_gc(dev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
+ sizeof(resp));
+
+ req.mr_handle = mr_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ return err;
+ }
+
+ return 0;
+}
+
+struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 iova, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct gdma_create_mr_params mr_params = {};
+ struct ib_device *ibdev = ibpd->device;
+ struct mana_ib_dev *dev;
+ struct mana_ib_mr *mr;
+ u64 dma_region_handle;
+ int err;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ ibdev_dbg(ibdev,
+ "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
+ start, iova, length, access_flags);
+
+ access_flags &= ~IB_ACCESS_OPTIONAL;
+ if (access_flags & ~VALID_MR_FLAGS)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->umem = ib_umem_get(ibdev, start, length, access_flags);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ ibdev_dbg(ibdev,
+ "Failed to get umem for register user-mr, %pe\n",
+ mr->umem);
+ goto err_free;
+ }
+
+ err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
+ err);
+ goto err_umem;
+ }
+
+ ibdev_dbg(ibdev,
+ "created dma region for user-mr 0x%llx\n",
+ dma_region_handle);
+
+ mr_params.pd_handle = pd->pd_handle;
+ if (access_flags & IB_ZERO_BASED) {
+ mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
+ mr_params.zbva.dma_region_handle = dma_region_handle;
+ mr_params.zbva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ } else {
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ }
+
+ err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+ if (err)
+ goto err_dma_region;
+
+ /*
+ * There is no need to keep track of dma_region_handle after MR is
+ * successfully created. The dma_region_handle is tracked in the PF
+ * as part of the lifecycle of this MR.
+ */
+
+ return &mr->ibmr;
+
+err_dma_region:
+ mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
+
+err_umem:
+ ib_umem_release(mr->umem);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 iova, int fd, int access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct gdma_create_mr_params mr_params = {};
+ struct ib_device *ibdev = ibpd->device;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct mana_ib_dev *dev;
+ struct mana_ib_mr *mr;
+ u64 dma_region_handle;
+ int err;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ access_flags &= ~IB_ACCESS_OPTIONAL;
+ if (access_flags & ~VALID_MR_FLAGS)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
+ umem_dmabuf);
+ goto err_free;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+
+ err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
+ err);
+ goto err_umem;
+ }
+
+ mr_params.pd_handle = pd->pd_handle;
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+
+ err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+ if (err)
+ goto err_dma_region;
+
+ /*
+ * There is no need to keep track of dma_region_handle after MR is
+ * successfully created. The dma_region_handle is tracked in the PF
+ * as part of the lifecycle of this MR.
+ */
+
+ return &mr->ibmr;
+
+err_dma_region:
+ mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
+
+err_umem:
+ ib_umem_release(mr->umem);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct gdma_create_mr_params mr_params = {};
+ struct ib_device *ibdev = ibpd->device;
+ struct mana_ib_dev *dev;
+ struct mana_ib_mr *mr;
+ int err;
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ if (access_flags & ~VALID_DMA_MR_FLAGS)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr_params.pd_handle = pd->pd_handle;
+ mr_params.mr_type = GDMA_MR_TYPE_GPA;
+
+ err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+ if (err)
+ goto err_free;
+
+ return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
+ struct ib_device *ibdev = ibmr->device;
+ struct mana_ib_dev *dev;
+ int err;
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
+ if (err)
+ return err;
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
new file mode 100644
index 000000000000..48c1f4977f21
--- /dev/null
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -0,0 +1,921 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
+ struct net_device *ndev,
+ mana_handle_t default_rxobj,
+ mana_handle_t ind_table[],
+ u32 log_ind_tbl_size, u32 rx_hash_key_len,
+ u8 *rx_hash_key)
+{
+ struct mana_port_context *mpc = netdev_priv(ndev);
+ struct mana_cfg_rx_steer_req_v2 *req;
+ struct mana_cfg_rx_steer_resp resp = {};
+ struct gdma_context *gc;
+ u32 req_buf_size;
+ int i, err;
+
+ gc = mdev_to_gc(dev);
+
+ req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE);
+ req = kzalloc(req_buf_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
+ sizeof(resp));
+
+ req->hdr.req.msg_version = GDMA_MESSAGE_V2;
+
+ req->vport = mpc->port_handle;
+ req->rx_enable = 1;
+ req->update_default_rxobj = 1;
+ req->default_rxobj = default_rxobj;
+ req->hdr.dev_id = gc->mana.dev_id;
+
+ /* If there are more than 1 entries in indirection table, enable RSS */
+ if (log_ind_tbl_size)
+ req->rss_enable = true;
+
+ req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE;
+ req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
+ indir_tab);
+ req->update_indir_tab = true;
+ req->cqe_coalescing_enable = 1;
+
+ /* The ind table passed to the hardware must have
+ * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb
+ * ind_table to MANA_INDIRECT_TABLE_SIZE if required
+ */
+ ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
+ for (i = 0; i < MANA_INDIRECT_TABLE_DEF_SIZE; i++) {
+ req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
+ ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
+ req->indir_tab[i]);
+ }
+
+ req->update_hashkey = true;
+ if (rx_hash_key_len)
+ memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
+ else
+ netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
+
+ ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
+ req->vport, default_rxobj);
+
+ err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
+ if (err) {
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ goto out;
+ }
+
+ if (resp.hdr.status) {
+ netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
+ resp.hdr.status);
+ err = -EPROTO;
+ goto out;
+ }
+
+ netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
+ mpc->port_handle, log_ind_tbl_size);
+
+out:
+ kfree(req);
+ return err;
+}
+
+static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ struct mana_ib_dev *mdev =
+ container_of(pd->device, struct mana_ib_dev, ib_dev);
+ struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
+ struct mana_ib_create_qp_rss_resp resp = {};
+ struct mana_ib_create_qp_rss ucmd = {};
+ mana_handle_t *mana_ind_table;
+ struct mana_port_context *mpc;
+ unsigned int ind_tbl_size;
+ struct net_device *ndev;
+ struct mana_ib_cq *cq;
+ struct mana_ib_wq *wq;
+ struct mana_eq *eq;
+ struct ib_cq *ibcq;
+ struct ib_wq *ibwq;
+ int i = 0;
+ u32 port;
+ int ret;
+
+ if (!udata || udata->inlen < sizeof(ucmd))
+ return -EINVAL;
+
+ ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (ret) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed copy from udata for create rss-qp, err %d\n",
+ ret);
+ return ret;
+ }
+
+ if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Requested max_recv_wr %d exceeding limit\n",
+ attr->cap.max_recv_wr);
+ return -EINVAL;
+ }
+
+ if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Requested max_recv_sge %d exceeding limit\n",
+ attr->cap.max_recv_sge);
+ return -EINVAL;
+ }
+
+ ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
+ if (ind_tbl_size > MANA_INDIRECT_TABLE_DEF_SIZE) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Indirect table size %d exceeding limit\n",
+ ind_tbl_size);
+ return -EINVAL;
+ }
+
+ if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
+ ibdev_dbg(&mdev->ib_dev,
+ "RX Hash function is not supported, %d\n",
+ ucmd.rx_hash_function);
+ return -EINVAL;
+ }
+
+ /* IB ports start with 1, MANA start with 0 */
+ port = ucmd.port;
+ ndev = mana_ib_get_netdev(pd->device, port);
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
+ port);
+ return -EINVAL;
+ }
+ mpc = netdev_priv(ndev);
+
+ ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
+ ucmd.rx_hash_function, port);
+
+ mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
+ GFP_KERNEL);
+ if (!mana_ind_table) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ qp->port = port;
+
+ for (i = 0; i < ind_tbl_size; i++) {
+ struct mana_obj_spec wq_spec = {};
+ struct mana_obj_spec cq_spec = {};
+
+ ibwq = ind_tbl->ind_tbl[i];
+ wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+
+ ibcq = ibwq->cq;
+ cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+
+ wq_spec.gdma_region = wq->queue.gdma_region;
+ wq_spec.queue_size = wq->wq_buf_size;
+
+ cq_spec.gdma_region = cq->queue.gdma_region;
+ cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
+ cq_spec.modr_ctx_id = 0;
+ eq = &mpc->ac->eqs[cq->comp_vector];
+ cq_spec.attached_eq = eq->eq->id;
+
+ ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
+ &wq_spec, &cq_spec, &wq->rx_object);
+ if (ret) {
+ /* Do cleanup starting with index i-1 */
+ i--;
+ goto fail;
+ }
+
+ /* The GDMA regions are now owned by the WQ object */
+ wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ wq->queue.id = wq_spec.queue_index;
+ cq->queue.id = cq_spec.queue_index;
+
+ ibdev_dbg(&mdev->ib_dev,
+ "rx_object 0x%llx wq id %llu cq id %llu\n",
+ wq->rx_object, wq->queue.id, cq->queue.id);
+
+ resp.entries[i].cqid = cq->queue.id;
+ resp.entries[i].wqid = wq->queue.id;
+
+ mana_ind_table[i] = wq->rx_object;
+
+ /* Create CQ table entry */
+ ret = mana_ib_install_cq_cb(mdev, cq);
+ if (ret)
+ goto fail;
+ }
+ resp.num_entries = i;
+
+ ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
+ mana_ind_table,
+ ind_tbl->log_ind_tbl_size,
+ ucmd.rx_hash_key_len,
+ ucmd.rx_hash_key);
+ if (ret)
+ goto fail;
+
+ ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (ret) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed to copy to udata create rss-qp, %d\n",
+ ret);
+ goto fail;
+ }
+
+ kfree(mana_ind_table);
+
+ return 0;
+
+fail:
+ while (i-- > 0) {
+ ibwq = ind_tbl->ind_tbl[i];
+ ibcq = ibwq->cq;
+ wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+ cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+
+ mana_ib_remove_cq_cb(mdev, cq);
+ mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
+ }
+
+ kfree(mana_ind_table);
+
+ return ret;
+}
+
+static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ struct mana_ib_dev *mdev =
+ container_of(ibpd->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_cq *send_cq =
+ container_of(attr->send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_ucontext *mana_ucontext =
+ rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
+ ibucontext);
+ struct mana_ib_create_qp_resp resp = {};
+ struct mana_ib_create_qp ucmd = {};
+ struct mana_obj_spec wq_spec = {};
+ struct mana_obj_spec cq_spec = {};
+ struct mana_port_context *mpc;
+ struct net_device *ndev;
+ struct mana_eq *eq;
+ int eq_vec;
+ u32 port;
+ int err;
+
+ if (!mana_ucontext || udata->inlen < sizeof(ucmd))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed to copy from udata create qp-raw, %d\n", err);
+ return err;
+ }
+
+ if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Requested max_send_wr %d exceeding limit\n",
+ attr->cap.max_send_wr);
+ return -EINVAL;
+ }
+
+ if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Requested max_send_sge %d exceeding limit\n",
+ attr->cap.max_send_sge);
+ return -EINVAL;
+ }
+
+ port = ucmd.port;
+ ndev = mana_ib_get_netdev(ibpd->device, port);
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
+ port);
+ return -EINVAL;
+ }
+ mpc = netdev_priv(ndev);
+ ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
+
+ err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
+ if (err)
+ return -ENODEV;
+
+ qp->port = port;
+
+ ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
+ ucmd.sq_buf_addr, ucmd.port);
+
+ err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed to create queue for create qp-raw, err %d\n", err);
+ goto err_free_vport;
+ }
+
+ /* Create a WQ on the same port handle used by the Ethernet */
+ wq_spec.gdma_region = qp->raw_sq.gdma_region;
+ wq_spec.queue_size = ucmd.sq_buf_size;
+
+ cq_spec.gdma_region = send_cq->queue.gdma_region;
+ cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
+ cq_spec.modr_ctx_id = 0;
+ eq_vec = send_cq->comp_vector;
+ eq = &mpc->ac->eqs[eq_vec];
+ cq_spec.attached_eq = eq->eq->id;
+
+ err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
+ &cq_spec, &qp->qp_handle);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed to create wq for create raw-qp, err %d\n",
+ err);
+ goto err_destroy_queue;
+ }
+
+ /* The GDMA regions are now owned by the WQ object */
+ qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
+ send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ qp->raw_sq.id = wq_spec.queue_index;
+ send_cq->queue.id = cq_spec.queue_index;
+
+ /* Create CQ table entry */
+ err = mana_ib_install_cq_cb(mdev, send_cq);
+ if (err)
+ goto err_destroy_wq_obj;
+
+ ibdev_dbg(&mdev->ib_dev,
+ "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
+ qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
+
+ resp.sqid = qp->raw_sq.id;
+ resp.cqid = send_cq->queue.id;
+ resp.tx_vp_offset = pd->tx_vp_offset;
+
+ err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed copy udata for create qp-raw, %d\n",
+ err);
+ goto err_remove_cq_cb;
+ }
+
+ return 0;
+
+err_remove_cq_cb:
+ mana_ib_remove_cq_cb(mdev, send_cq);
+
+err_destroy_wq_obj:
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
+
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
+
+err_free_vport:
+ mana_ib_uncfg_vport(mdev, pd, port);
+
+ return err;
+}
+
+static u32 mana_ib_wqe_size(u32 sge, u32 oob_size)
+{
+ u32 wqe_size = sge * sizeof(struct gdma_sge) + sizeof(struct gdma_wqe) + oob_size;
+
+ return ALIGN(wqe_size, GDMA_WQE_BU_SIZE);
+}
+
+static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type)
+{
+ u32 queue_size;
+
+ switch (attr->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ if (queue_type == MANA_UD_SEND_QUEUE)
+ queue_size = attr->cap.max_send_wr *
+ mana_ib_wqe_size(attr->cap.max_send_sge, INLINE_OOB_LARGE_SIZE);
+ else
+ queue_size = attr->cap.max_recv_wr *
+ mana_ib_wqe_size(attr->cap.max_recv_sge, INLINE_OOB_SMALL_SIZE);
+ break;
+ default:
+ return 0;
+ }
+
+ return MANA_PAGE_ALIGN(roundup_pow_of_two(queue_size));
+}
+
+static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32 queue_type)
+{
+ enum gdma_queue_type type;
+
+ switch (attr->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ if (queue_type == MANA_UD_SEND_QUEUE)
+ type = GDMA_SQ;
+ else
+ type = GDMA_RQ;
+ break;
+ default:
+ type = GDMA_INVALID_QUEUE;
+ }
+ return type;
+}
+
+static int mana_table_store_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
+ GFP_KERNEL);
+}
+
+static void mana_table_remove_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
+}
+
+static int mana_table_store_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
+ u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
+ int err;
+
+ err = xa_insert_irq(&mdev->qp_table_wq, qids, qp, GFP_KERNEL);
+ if (err)
+ return err;
+
+ err = xa_insert_irq(&mdev->qp_table_wq, qidr, qp, GFP_KERNEL);
+ if (err)
+ goto remove_sq;
+
+ return 0;
+
+remove_sq:
+ xa_erase_irq(&mdev->qp_table_wq, qids);
+ return err;
+}
+
+static void mana_table_remove_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
+ u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
+
+ xa_erase_irq(&mdev->qp_table_wq, qids);
+ xa_erase_irq(&mdev->qp_table_wq, qidr);
+}
+
+static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ refcount_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ return mana_table_store_rc_qp(mdev, qp);
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return mana_table_store_ud_qp(mdev, qp);
+ default:
+ ibdev_dbg(&mdev->ib_dev, "Unknown QP type for storing in mana table, %d\n",
+ qp->ibqp.qp_type);
+ }
+
+ return -EINVAL;
+}
+
+static void mana_table_remove_qp(struct mana_ib_dev *mdev,
+ struct mana_ib_qp *qp)
+{
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ mana_table_remove_rc_qp(mdev, qp);
+ break;
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ mana_table_remove_ud_qp(mdev, qp);
+ break;
+ default:
+ ibdev_dbg(&mdev->ib_dev, "Unknown QP type for removing from mana table, %d\n",
+ qp->ibqp.qp_type);
+ return;
+ }
+ mana_put_qp_ref(qp);
+ wait_for_completion(&qp->free);
+}
+
+static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attr, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ struct mana_ib_create_rc_qp_resp resp = {};
+ struct mana_ib_ucontext *mana_ucontext;
+ struct mana_ib_create_rc_qp ucmd = {};
+ int i, err, j;
+ u64 flags = 0;
+ u32 doorbell;
+
+ if (!udata || udata->inlen < sizeof(ucmd))
+ return -EINVAL;
+
+ mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext);
+ doorbell = mana_ucontext->doorbell;
+ flags = MANA_RC_FLAG_NO_FMR;
+ err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to copy from udata, %d\n", err);
+ return err;
+ }
+
+ for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
+ /* skip FMR for user-level RC QPs */
+ if (i == MANA_RC_SEND_QUEUE_FMR) {
+ qp->rc_qp.queues[i].id = INVALID_QUEUE_ID;
+ qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
+ continue;
+ }
+ err = mana_ib_create_queue(mdev, ucmd.queue_buf[j], ucmd.queue_size[j],
+ &qp->rc_qp.queues[i]);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n", i, err);
+ goto destroy_queues;
+ }
+ j++;
+ }
+
+ err = mana_ib_gd_create_rc_qp(mdev, qp, attr, doorbell, flags);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create rc qp %d\n", err);
+ goto destroy_queues;
+ }
+ qp->ibqp.qp_num = qp->rc_qp.queues[MANA_RC_RECV_QUEUE_RESPONDER].id;
+ qp->port = attr->port_num;
+
+ if (udata) {
+ for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
+ if (i == MANA_RC_SEND_QUEUE_FMR)
+ continue;
+ resp.queue_id[j] = qp->rc_qp.queues[i].id;
+ j++;
+ }
+ err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
+ goto destroy_qp;
+ }
+ }
+
+ err = mana_table_store_qp(mdev, qp);
+ if (err)
+ goto destroy_qp;
+
+ return 0;
+
+destroy_qp:
+ mana_ib_gd_destroy_rc_qp(mdev, qp);
+destroy_queues:
+ while (i-- > 0)
+ mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
+ return err;
+}
+
+static void mana_add_qp_to_cqs(struct mana_ib_qp *qp)
+{
+ struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&send_cq->cq_lock, flags);
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ spin_unlock_irqrestore(&send_cq->cq_lock, flags);
+
+ spin_lock_irqsave(&recv_cq->cq_lock, flags);
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
+}
+
+static void mana_remove_qp_from_cqs(struct mana_ib_qp *qp)
+{
+ struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&send_cq->cq_lock, flags);
+ list_del(&qp->cq_send_list);
+ spin_unlock_irqrestore(&send_cq->cq_lock, flags);
+
+ spin_lock_irqsave(&recv_cq->cq_lock, flags);
+ list_del(&qp->cq_recv_list);
+ spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
+}
+
+static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attr, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ u32 doorbell, queue_size;
+ int i, err;
+
+ if (udata) {
+ ibdev_dbg(&mdev->ib_dev, "User-level UD QPs are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) {
+ queue_size = mana_ib_queue_size(attr, i);
+ err = mana_ib_create_kernel_queue(mdev, queue_size, mana_ib_queue_type(attr, i),
+ &qp->ud_qp.queues[i]);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n",
+ i, err);
+ goto destroy_queues;
+ }
+ }
+ doorbell = mdev->gdma_dev->doorbell;
+
+ err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
+ sizeof(struct ud_rq_shadow_wqe));
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create shadow rq err %d\n", err);
+ goto destroy_queues;
+ }
+ err = create_shadow_queue(&qp->shadow_sq, attr->cap.max_send_wr,
+ sizeof(struct ud_sq_shadow_wqe));
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create shadow sq err %d\n", err);
+ goto destroy_shadow_queues;
+ }
+
+ err = mana_ib_gd_create_ud_qp(mdev, qp, attr, doorbell, attr->qp_type);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create ud qp %d\n", err);
+ goto destroy_shadow_queues;
+ }
+ qp->ibqp.qp_num = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
+ qp->port = attr->port_num;
+
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
+ qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id;
+
+ err = mana_table_store_qp(mdev, qp);
+ if (err)
+ goto destroy_qp;
+
+ mana_add_qp_to_cqs(qp);
+
+ return 0;
+
+destroy_qp:
+ mana_ib_gd_destroy_ud_qp(mdev, qp);
+destroy_shadow_queues:
+ destroy_shadow_queue(&qp->shadow_rq);
+ destroy_shadow_queue(&qp->shadow_sq);
+destroy_queues:
+ while (i-- > 0)
+ mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
+ return err;
+}
+
+int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
+ if (attr->rwq_ind_tbl)
+ return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
+ udata);
+
+ return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
+ case IB_QPT_RC:
+ return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata);
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return mana_ib_create_ud_qp(ibqp, ibqp->pd, attr, udata);
+ default:
+ ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
+ attr->qp_type);
+ }
+
+ return -EINVAL;
+}
+
+static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev = container_of(ibqp->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ struct mana_rnic_set_qp_state_resp resp = {};
+ struct mana_rnic_set_qp_state_req req = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_port_context *mpc;
+ struct net_device *ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
+
+ req.hdr.req.msg_version = GDMA_MESSAGE_V3;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.qp_handle = qp->qp_handle;
+ req.qp_state = attr->qp_state;
+ req.attr_mask = attr_mask;
+ req.path_mtu = attr->path_mtu;
+ req.rq_psn = attr->rq_psn;
+ req.sq_psn = attr->sq_psn;
+ req.dest_qpn = attr->dest_qp_num;
+ req.max_dest_rd_atomic = attr->max_dest_rd_atomic;
+ req.retry_cnt = attr->retry_cnt;
+ req.rnr_retry = attr->rnr_retry;
+ req.min_rnr_timer = attr->min_rnr_timer;
+ req.rate_limit = attr->rate_limit;
+ req.qkey = attr->qkey;
+ req.local_ack_timeout = attr->timeout;
+ req.qp_access_flags = attr->qp_access_flags;
+ req.max_rd_atomic = attr->max_rd_atomic;
+
+ if (attr_mask & IB_QP_AV) {
+ ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n",
+ ibqp->port, ibqp->qp_num);
+ return -EINVAL;
+ }
+ mpc = netdev_priv(ndev);
+ copy_in_reverse(req.ah_attr.src_mac, mpc->mac_addr, ETH_ALEN);
+ copy_in_reverse(req.ah_attr.dest_mac, attr->ah_attr.roce.dmac, ETH_ALEN);
+ copy_in_reverse(req.ah_attr.src_addr, attr->ah_attr.grh.sgid_attr->gid.raw,
+ sizeof(union ib_gid));
+ copy_in_reverse(req.ah_attr.dest_addr, attr->ah_attr.grh.dgid.raw,
+ sizeof(union ib_gid));
+ if (rdma_gid_attr_network_type(attr->ah_attr.grh.sgid_attr) == RDMA_NETWORK_IPV4) {
+ req.ah_attr.src_addr_type = SGID_TYPE_IPV4;
+ req.ah_attr.dest_addr_type = SGID_TYPE_IPV4;
+ } else {
+ req.ah_attr.src_addr_type = SGID_TYPE_IPV6;
+ req.ah_attr.dest_addr_type = SGID_TYPE_IPV6;
+ }
+ req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
+ req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ ibqp->qp_num, attr->dest_qp_num);
+ req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
+ req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
+ req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
+ }
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed modify qp err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ switch (ibqp->qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return mana_ib_gd_modify_qp(ibqp, attr, attr_mask, udata);
+ default:
+ ibdev_dbg(ibqp->device, "Modify QP type %u not supported", ibqp->qp_type);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
+ struct ib_rwq_ind_table *ind_tbl,
+ struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev =
+ container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ struct mana_port_context *mpc;
+ struct net_device *ndev;
+ struct mana_ib_wq *wq;
+ struct ib_wq *ibwq;
+ int i;
+
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
+ mpc = netdev_priv(ndev);
+
+ for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
+ ibwq = ind_tbl->ind_tbl[i];
+ wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+ ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
+ wq->rx_object);
+ mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
+ }
+
+ return 0;
+}
+
+static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev =
+ container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ struct ib_pd *ibpd = qp->ibqp.pd;
+ struct mana_port_context *mpc;
+ struct net_device *ndev;
+ struct mana_ib_pd *pd;
+
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
+ mpc = netdev_priv(ndev);
+ pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
+
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
+
+ mana_ib_uncfg_vport(mdev, pd, qp->port);
+
+ return 0;
+}
+
+static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev =
+ container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ int i;
+
+ mana_table_remove_qp(mdev, qp);
+
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_rc_qp(mdev, qp);
+ for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i)
+ mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
+
+ return 0;
+}
+
+static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev =
+ container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ int i;
+
+ mana_remove_qp_from_cqs(qp);
+ mana_table_remove_qp(mdev, qp);
+
+ destroy_shadow_queue(&qp->shadow_rq);
+ destroy_shadow_queue(&qp->shadow_sq);
+
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_ud_qp(mdev, qp);
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
+ mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
+
+ return 0;
+}
+
+int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+
+ switch (ibqp->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ if (ibqp->rwq_ind_tbl)
+ return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
+ udata);
+
+ return mana_ib_destroy_qp_raw(qp, udata);
+ case IB_QPT_RC:
+ return mana_ib_destroy_rc_qp(qp, udata);
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return mana_ib_destroy_ud_qp(qp, udata);
+ default:
+ ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
+ ibqp->qp_type);
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/infiniband/hw/mana/shadow_queue.h b/drivers/infiniband/hw/mana/shadow_queue.h
new file mode 100644
index 000000000000..a4b3818f9c39
--- /dev/null
+++ b/drivers/infiniband/hw/mana/shadow_queue.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#ifndef _MANA_SHADOW_QUEUE_H_
+#define _MANA_SHADOW_QUEUE_H_
+
+struct shadow_wqe_header {
+ u16 opcode;
+ u16 error_code;
+ u32 posted_wqe_size;
+ u64 wr_id;
+};
+
+struct ud_rq_shadow_wqe {
+ struct shadow_wqe_header header;
+ u32 byte_len;
+ u32 src_qpn;
+};
+
+struct ud_sq_shadow_wqe {
+ struct shadow_wqe_header header;
+};
+
+struct shadow_queue {
+ /* Unmasked producer index, Incremented on wqe posting */
+ u64 prod_idx;
+ /* Unmasked consumer index, Incremented on cq polling */
+ u64 cons_idx;
+ /* Unmasked index of next-to-complete (from HW) shadow WQE */
+ u64 next_to_complete_idx;
+ /* queue size in wqes */
+ u32 length;
+ /* distance between elements in bytes */
+ u32 stride;
+ /* ring buffer holding wqes */
+ void *buffer;
+};
+
+static inline int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride)
+{
+ queue->buffer = kvmalloc_array(length, stride, GFP_KERNEL);
+ if (!queue->buffer)
+ return -ENOMEM;
+
+ queue->length = length;
+ queue->stride = stride;
+
+ return 0;
+}
+
+static inline void destroy_shadow_queue(struct shadow_queue *queue)
+{
+ kvfree(queue->buffer);
+}
+
+static inline bool shadow_queue_full(struct shadow_queue *queue)
+{
+ return (queue->prod_idx - queue->cons_idx) >= queue->length;
+}
+
+static inline bool shadow_queue_empty(struct shadow_queue *queue)
+{
+ return queue->prod_idx == queue->cons_idx;
+}
+
+static inline void *
+shadow_queue_get_element(const struct shadow_queue *queue, u64 unmasked_index)
+{
+ u32 index = unmasked_index % queue->length;
+
+ return ((u8 *)queue->buffer + index * queue->stride);
+}
+
+static inline void *
+shadow_queue_producer_entry(struct shadow_queue *queue)
+{
+ return shadow_queue_get_element(queue, queue->prod_idx);
+}
+
+static inline void *
+shadow_queue_get_next_to_consume(const struct shadow_queue *queue)
+{
+ if (queue->cons_idx == queue->next_to_complete_idx)
+ return NULL;
+
+ return shadow_queue_get_element(queue, queue->cons_idx);
+}
+
+static inline void *
+shadow_queue_get_next_to_complete(struct shadow_queue *queue)
+{
+ if (queue->next_to_complete_idx == queue->prod_idx)
+ return NULL;
+
+ return shadow_queue_get_element(queue, queue->next_to_complete_idx);
+}
+
+static inline void shadow_queue_advance_producer(struct shadow_queue *queue)
+{
+ queue->prod_idx++;
+}
+
+static inline void shadow_queue_advance_consumer(struct shadow_queue *queue)
+{
+ queue->cons_idx++;
+}
+
+static inline void shadow_queue_advance_next_to_complete(struct shadow_queue *queue)
+{
+ queue->next_to_complete_idx++;
+}
+
+#endif
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
new file mode 100644
index 000000000000..f959f4b9244f
--- /dev/null
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev =
+ container_of(pd->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_create_wq ucmd = {};
+ struct mana_ib_wq *wq;
+ int err;
+
+ if (udata->inlen < sizeof(ucmd))
+ return ERR_PTR(-EINVAL);
+
+ err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed to copy from udata for create wq, %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return ERR_PTR(-ENOMEM);
+
+ ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
+
+ err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev,
+ "Failed to create queue for create wq, %d\n", err);
+ goto err_free_wq;
+ }
+
+ wq->wqe = init_attr->max_wr;
+ wq->wq_buf_size = ucmd.wq_buf_size;
+ wq->rx_object = INVALID_MANA_HANDLE;
+ return &wq->ibwq;
+
+err_free_wq:
+ kfree(wq);
+
+ return ERR_PTR(err);
+}
+
+int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata)
+{
+ /* modify_wq is not supported by this version of the driver */
+ return -EOPNOTSUPP;
+}
+
+int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
+{
+ struct mana_ib_wq *wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+ struct ib_device *ib_dev = ibwq->device;
+ struct mana_ib_dev *mdev;
+
+ mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
+
+ mana_ib_destroy_queue(mdev, &wq->queue);
+
+ kfree(wq);
+
+ return 0;
+}
+
+int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ /*
+ * There is no additional data in ind_table to be maintained by this
+ * driver, do nothing
+ */
+ return 0;
+}
+
+int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ /*
+ * There is no additional data in ind_table to be maintained by this
+ * driver, do nothing
+ */
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/wr.c b/drivers/infiniband/hw/mana/wr.c
new file mode 100644
index 000000000000..1813567d3b16
--- /dev/null
+++ b/drivers/infiniband/hw/mana/wr.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+#define MAX_WR_SGL_NUM (2)
+
+static int mana_ib_post_recv_ud(struct mana_ib_qp *qp, const struct ib_recv_wr *wr)
+{
+ struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem;
+ struct gdma_posted_wqe_info wqe_info = {0};
+ struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM];
+ struct gdma_wqe_request wqe_req = {0};
+ struct ud_rq_shadow_wqe *shadow_wqe;
+ int err, i;
+
+ if (shadow_queue_full(&qp->shadow_rq))
+ return -EINVAL;
+
+ if (wr->num_sge > MAX_WR_SGL_NUM)
+ return -EINVAL;
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ gdma_sgl[i].address = wr->sg_list[i].addr;
+ gdma_sgl[i].mem_key = wr->sg_list[i].lkey;
+ gdma_sgl[i].size = wr->sg_list[i].length;
+ }
+ wqe_req.num_sge = wr->num_sge;
+ wqe_req.sgl = gdma_sgl;
+
+ err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info);
+ if (err)
+ return err;
+
+ shadow_wqe = shadow_queue_producer_entry(&qp->shadow_rq);
+ memset(shadow_wqe, 0, sizeof(*shadow_wqe));
+ shadow_wqe->header.opcode = IB_WC_RECV;
+ shadow_wqe->header.wr_id = wr->wr_id;
+ shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu;
+ shadow_queue_advance_producer(&qp->shadow_rq);
+
+ mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue);
+ return 0;
+}
+
+int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ int err = 0;
+
+ for (; wr; wr = wr->next) {
+ switch (ibqp->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ err = mana_ib_post_recv_ud(qp, wr);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ return err;
+ }
+ break;
+ default:
+ ibdev_dbg(ibqp->device, "Posting recv wr on qp type %u is not supported\n",
+ ibqp->qp_type);
+ return -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int mana_ib_post_send_ud(struct mana_ib_qp *qp, const struct ib_ud_wr *wr)
+{
+ struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_ah *ah = container_of(wr->ah, struct mana_ib_ah, ibah);
+ struct net_device *ndev = mana_ib_get_netdev(&mdev->ib_dev, qp->port);
+ struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem;
+ struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM + 1];
+ struct gdma_posted_wqe_info wqe_info = {0};
+ struct gdma_wqe_request wqe_req = {0};
+ struct rdma_send_oob send_oob = {0};
+ struct ud_sq_shadow_wqe *shadow_wqe;
+ int err, i;
+
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n",
+ qp->port, qp->ibqp.qp_num);
+ return -EINVAL;
+ }
+
+ if (wr->wr.opcode != IB_WR_SEND)
+ return -EINVAL;
+
+ if (shadow_queue_full(&qp->shadow_sq))
+ return -EINVAL;
+
+ if (wr->wr.num_sge > MAX_WR_SGL_NUM)
+ return -EINVAL;
+
+ gdma_sgl[0].address = ah->dma_handle;
+ gdma_sgl[0].mem_key = qp->ibqp.pd->local_dma_lkey;
+ gdma_sgl[0].size = sizeof(struct mana_ib_av);
+ for (i = 0; i < wr->wr.num_sge; ++i) {
+ gdma_sgl[i + 1].address = wr->wr.sg_list[i].addr;
+ gdma_sgl[i + 1].mem_key = wr->wr.sg_list[i].lkey;
+ gdma_sgl[i + 1].size = wr->wr.sg_list[i].length;
+ }
+
+ wqe_req.num_sge = wr->wr.num_sge + 1;
+ wqe_req.sgl = gdma_sgl;
+ wqe_req.inline_oob_size = sizeof(struct rdma_send_oob);
+ wqe_req.inline_oob_data = &send_oob;
+ wqe_req.flags = GDMA_WR_OOB_IN_SGL;
+ wqe_req.client_data_unit = ib_mtu_enum_to_int(ib_mtu_int_to_enum(ndev->mtu));
+
+ send_oob.wqe_type = WQE_TYPE_UD_SEND;
+ send_oob.fence = !!(wr->wr.send_flags & IB_SEND_FENCE);
+ send_oob.signaled = !!(wr->wr.send_flags & IB_SEND_SIGNALED);
+ send_oob.solicited = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
+ send_oob.psn = qp->ud_qp.sq_psn;
+ send_oob.ssn_or_rqpn = wr->remote_qpn;
+ send_oob.ud_send.remote_qkey =
+ qp->ibqp.qp_type == IB_QPT_GSI ? IB_QP1_QKEY : wr->remote_qkey;
+
+ err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info);
+ if (err)
+ return err;
+
+ qp->ud_qp.sq_psn++;
+ shadow_wqe = shadow_queue_producer_entry(&qp->shadow_sq);
+ memset(shadow_wqe, 0, sizeof(*shadow_wqe));
+ shadow_wqe->header.opcode = IB_WC_SEND;
+ shadow_wqe->header.wr_id = wr->wr.wr_id;
+ shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu;
+ shadow_queue_advance_producer(&qp->shadow_sq);
+
+ mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue);
+ return 0;
+}
+
+int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ int err;
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+
+ for (; wr; wr = wr->next) {
+ switch (ibqp->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ err = mana_ib_post_send_ud(qp, ud_wr(wr));
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ return err;
+ }
+ break;
+ default:
+ ibdev_dbg(ibqp->device, "Posting send wr on qp type %u is not supported\n",
+ ibqp->qp_type);
+ return -EINVAL;
+ }
+ }
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index fc01deac1d3c..f30ce9dd080a 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,9 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
select NET_VENDOR_MELLANOX
select MLX4_CORE
- ---help---
+ help
This driver provides low-level InfiniBand support for
Mellanox ConnectX PCI Express host channel adapters (HCAs).
This is required to use InfiniBand protocols such as
diff --git a/drivers/infiniband/hw/mlx4/Makefile b/drivers/infiniband/hw/mlx4/Makefile
index f4213b3a8fe1..7b6757b02857 100644
--- a/drivers/infiniband/hw/mlx4/Makefile
+++ b/drivers/infiniband/hw/mlx4/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o
mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 1688a17de4fe..7321d6ab5fe1 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -40,143 +40,195 @@
#include "mlx4_ib.h"
-static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
- struct mlx4_ib_ah *ah)
+static void create_ib_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{
- struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+ struct mlx4_ib_ah *ah = to_mah(ib_ah);
+ struct mlx4_dev *dev = to_mdev(ib_ah->device)->dev;
+
+ ah->av.ib.port_pd = cpu_to_be32(to_mpd(ib_ah->pd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) << 24));
+ ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr);
+ ah->av.ib.sl_tclass_flowlabel =
+ cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
- ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.ib.g_slid = ah_attr->src_path_bits;
- if (ah_attr->ah_flags & IB_AH_GRH) {
ah->av.ib.g_slid |= 0x80;
- ah->av.ib.gid_index = ah_attr->grh.sgid_index;
- ah->av.ib.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.ib.gid_index = grh->sgid_index;
+ ah->av.ib.hop_limit = grh->hop_limit;
ah->av.ib.sl_tclass_flowlabel |=
- cpu_to_be32((ah_attr->grh.traffic_class << 20) |
- ah_attr->grh.flow_label);
- memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16);
+ cpu_to_be32((grh->traffic_class << 20) |
+ grh->flow_label);
+ memcpy(ah->av.ib.dgid, grh->dgid.raw, 16);
}
- ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid);
- if (ah_attr->static_rate) {
- ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
- while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
- !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
- --ah->av.ib.stat_rate;
- }
- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ ah->av.ib.dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
+ if (rdma_ah_get_static_rate(ah_attr)) {
+ u8 static_rate = rdma_ah_get_static_rate(ah_attr) +
+ MLX4_STAT_RATE_OFFSET;
- return &ah->ibah;
+ while (static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << static_rate & dev->caps.stat_rate_support))
+ --static_rate;
+ ah->av.ib.stat_rate = static_rate;
+ }
}
-static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
- struct mlx4_ib_ah *ah)
+static int create_iboe_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{
- struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
+ struct mlx4_ib_dev *ibdev = to_mdev(ib_ah->device);
+ struct mlx4_ib_ah *ah = to_mah(ib_ah);
+ const struct ib_gid_attr *gid_attr;
struct mlx4_dev *dev = ibdev->dev;
int is_mcast = 0;
struct in6_addr in6;
- u16 vlan_tag;
+ u16 vlan_tag = 0xffff;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ int ret;
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
- if (rdma_is_multicast_addr(&in6)) {
+ memcpy(&in6, grh->dgid.raw, sizeof(in6));
+ if (rdma_is_multicast_addr(&in6))
is_mcast = 1;
- rdma_get_mcast_mac(&in6, ah->av.eth.mac);
+
+ memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
+ eth_zero_addr(ah->av.eth.s_mac);
+
+ /*
+ * If sgid_attr is NULL we are being called by mlx4_ib_create_ah_slave
+ * and we are directly creating an AV for a slave's gid_index.
+ */
+ gid_attr = ah_attr->grh.sgid_attr;
+ if (gid_attr) {
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_tag,
+ &ah->av.eth.s_mac[0]);
+ if (ret)
+ return ret;
+
+ ret = mlx4_ib_gid_index_to_real_index(ibdev, gid_attr);
+ if (ret < 0)
+ return ret;
+ ah->av.eth.gid_index = ret;
} else {
- memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
+ /* mlx4_ib_create_ah_slave fills in the s_mac and the vlan */
+ ah->av.eth.gid_index = ah_attr->grh.sgid_index;
}
- vlan_tag = ah_attr->vlan_id;
+
if (vlan_tag < 0x1000)
- vlan_tag |= (ah_attr->sl & 7) << 13;
- ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
+ ah->av.eth.port_pd = cpu_to_be32(to_mpd(ib_ah->pd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) << 24));
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
- if (ah_attr->static_rate) {
- ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+ ah->av.eth.hop_limit = grh->hop_limit;
+ if (rdma_ah_get_static_rate(ah_attr)) {
+ ah->av.eth.stat_rate = rdma_ah_get_static_rate(ah_attr) +
+ MLX4_STAT_RATE_OFFSET;
while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
!(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
--ah->av.eth.stat_rate;
}
-
+ ah->av.eth.sl_tclass_flowlabel |=
+ cpu_to_be32((grh->traffic_class << 20) |
+ grh->flow_label);
/*
* HW requires multicast LID so we just choose one.
*/
if (is_mcast)
ah->av.ib.dlid = cpu_to_be16(0xc000);
- memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
-
- return &ah->ibah;
+ memcpy(ah->av.eth.dgid, grh->dgid.raw, 16);
+ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(rdma_ah_get_sl(ah_attr)
+ << 29);
+ return 0;
}
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx4_ib_ah *ah;
- struct ib_ah *ret;
-
- ah = kzalloc(sizeof *ah, GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
-
- if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
- if (!(ah_attr->ah_flags & IB_AH_GRH)) {
- ret = ERR_PTR(-EINVAL);
- } else {
- /*
- * TBD: need to handle the case when we get
- * called in an atomic context and there we
- * might sleep. We don't expect this
- * currently since we're working with link
- * local addresses which we can translate
- * without going to sleep.
- */
- ret = create_iboe_ah(pd, ah_attr, ah);
- }
-
- if (IS_ERR(ret))
- kfree(ah);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+ /*
+ * TBD: need to handle the case when we get
+ * called in an atomic context and there we
+ * might sleep. We don't expect this
+ * currently since we're working with link
+ * local addresses which we can translate
+ * without going to sleep.
+ */
+ return create_iboe_ah(ib_ah, ah_attr);
+ }
+
+ create_ib_ah(ib_ah, ah_attr);
+ return 0;
+}
+int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac, u16 vlan_tag)
+{
+ struct rdma_ah_attr slave_attr = *ah_attr;
+ struct rdma_ah_init_attr init_attr = {};
+ struct mlx4_ib_ah *mah = to_mah(ah);
+ int ret;
+
+ slave_attr.grh.sgid_attr = NULL;
+ slave_attr.grh.sgid_index = slave_sgid_index;
+ init_attr.ah_attr = &slave_attr;
+ ret = mlx4_ib_create_ah(ah, &init_attr, NULL);
+ if (ret)
return ret;
- } else
- return create_ib_ah(pd, ah_attr, ah); /* never fails */
+
+ ah->type = ah_attr->type;
+
+ /* get rid of force-loopback bit */
+ mah->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
+ memcpy(mah->av.eth.s_mac, s_mac, 6);
+
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
+ mah->av.eth.vlan = cpu_to_be16(vlan_tag);
+
+ return 0;
}
-int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
{
struct mlx4_ib_ah *ah = to_mah(ibah);
- enum rdma_link_layer ll;
+ int port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
- ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
- if (ll == IB_LINK_LAYER_ETHERNET)
- ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
- else
- ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
-
- ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
- if (ah->av.ib.stat_rate)
- ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
- ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F;
+ ah_attr->type = ibah->type;
- if (mlx4_ib_ah_grh_present(ah)) {
- ah_attr->ah_flags = IB_AH_GRH;
-
- ah_attr->grh.traffic_class =
- be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20;
- ah_attr->grh.flow_label =
- be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff;
- ah_attr->grh.hop_limit = ah->av.ib.hop_limit;
- ah_attr->grh.sgid_index = ah->av.ib.gid_index;
- memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16);
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ rdma_ah_set_dlid(ah_attr, 0);
+ rdma_ah_set_sl(ah_attr,
+ be32_to_cpu(ah->av.eth.sl_tclass_flowlabel)
+ >> 29);
+ } else {
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(ah->av.ib.dlid));
+ rdma_ah_set_sl(ah_attr,
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel)
+ >> 28);
}
- return 0;
-}
+ rdma_ah_set_port_num(ah_attr, port_num);
+ if (ah->av.ib.stat_rate)
+ rdma_ah_set_static_rate(ah_attr,
+ ah->av.ib.stat_rate -
+ MLX4_STAT_RATE_OFFSET);
+ rdma_ah_set_path_bits(ah_attr, ah->av.ib.g_slid & 0x7F);
+ if (mlx4_ib_ah_grh_present(ah)) {
+ u32 tc_fl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel);
+
+ rdma_ah_set_grh(ah_attr, NULL,
+ tc_fl & 0xfffff, ah->av.ib.gid_index,
+ ah->av.ib.hop_limit,
+ tc_fl >> 20);
+ rdma_ah_set_dgid_raw(ah_attr, ah->av.ib.dgid);
+ }
-int mlx4_ib_destroy_ah(struct ib_ah *ah)
-{
- kfree(to_mah(ah));
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 21cb41a60fe8..d7327735b8d0 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -38,7 +38,6 @@
#include <rdma/ib_sa.h>
#include <rdma/ib_pack.h>
#include <linux/mlx4/cmd.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <rdma/ib_user_verbs.h>
@@ -73,12 +72,12 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
int *resched_delay_sec);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
- u8 port_num, u8 *p_data)
+ u32 port_num, u8 *p_data)
{
int i;
u64 guid_indexes;
int slave_id;
- int port_index = port_num - 1;
+ u32 port_index = port_num - 1;
if (!mlx4_is_master(dev->dev))
return;
@@ -86,7 +85,7 @@ void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
- pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+ pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes);
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
/* The location of the specific index starts from bit number 4
@@ -184,7 +183,7 @@ unlock:
* port_number - 1 or 2
*/
void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
- int block_num, u8 port_num,
+ int block_num, u32 port_num,
u8 *p_data)
{
int i;
@@ -206,7 +205,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
- pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+ pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes);
/*calculate the slaves and notify them*/
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
@@ -260,11 +259,11 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
&gen_event);
- pr_debug("slave: %d, port: %d prev_port_state: %d,"
+ pr_debug("slave: %d, port: %u prev_port_state: %d,"
" new_port_state: %d, gen_event: %d\n",
slave_id, port_num, prev_state, new_state, gen_event);
if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
- pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
+ pr_debug("sending PORT_UP event to slave: %d, port: %u\n",
slave_id, port_num);
mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
@@ -274,7 +273,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
&gen_event);
if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
- pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
+ pr_debug("sending PORT DOWN event to slave: %d, port: %u\n",
slave_id, port_num);
mlx4_gen_port_state_change_eqe(dev->dev,
slave_id,
@@ -310,7 +309,7 @@ static void aliasguid_query_handler(int status,
if (status) {
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
- rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
+ rec->time_to_run = ktime_get_boottime_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -416,7 +415,7 @@ next_entry:
be64_to_cpu((__force __be64)rec->guid_indexes),
be64_to_cpu((__force __be64)applied_guid_indexes),
be64_to_cpu((__force __be64)declined_guid_indexes));
- rec->time_to_run = ktime_get_real_ns() +
+ rec->time_to_run = ktime_get_boottime_ns() +
resched_delay_sec * NSEC_PER_SEC;
} else {
rec->status = MLX4_GUID_INFO_STATUS_SET;
@@ -499,6 +498,7 @@ static int set_guid_rec(struct ib_device *ibdev,
struct list_head *head =
&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
+ memset(&attr, 0, sizeof(attr));
err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
if (err) {
pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
@@ -527,7 +527,7 @@ static int set_guid_rec(struct ib_device *ibdev,
memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
- guid_info_rec.lid = cpu_to_be16(attr.lid);
+ guid_info_rec.lid = ib_lid_be16(attr.lid);
guid_info_rec.block_num = index;
memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
@@ -708,7 +708,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
}
}
if (resched_delay_sec) {
- u64 curr_time = ktime_get_real_ns();
+ u64 curr_time = ktime_get_boottime_ns();
*resched_delay_sec = (low_record_time < curr_time) ? 0 :
div_u64((low_record_time - curr_time), NSEC_PER_SEC);
@@ -755,10 +755,8 @@ static void alias_guid_work(struct work_struct *work)
struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
rec = kzalloc(sizeof *rec, GFP_KERNEL);
- if (!rec) {
- pr_err("alias_guid_work: No Memory\n");
+ if (!rec)
return;
- }
pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
@@ -782,7 +780,7 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
if (!dev->sriov.is_going_down) {
- /* If there is pending one should cancell then run, otherwise
+ /* If there is pending one should cancel then run, otherwise
* won't run till previous one is ended as same work
* struct is used.
*/
@@ -805,8 +803,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
unsigned long flags;
for (i = 0 ; i < dev->num_ports; i++) {
- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
det = &sriov->alias_guid.ports_guid[i];
+ cancel_delayed_work_sync(&det->alias_guid_work);
spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
while (!list_empty(&det->cb_list)) {
cb_ctx = list_entry(det->cb_list.next,
@@ -823,17 +821,14 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
}
spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
}
- for (i = 0 ; i < dev->num_ports; i++) {
- flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
+ for (i = 0 ; i < dev->num_ports; i++)
destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
- }
ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
kfree(dev->sriov.alias_guid.sa_client);
}
int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
{
- char alias_wq_name[15];
int ret = 0;
int i, j;
union ib_gid gid;
@@ -850,7 +845,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
for (i = 1; i <= dev->num_ports; ++i) {
- if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
+ if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) {
ret = -EFAULT;
goto err_unregister;
}
@@ -879,9 +874,8 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
dev->sriov.alias_guid.ports_guid[i].port = i;
- snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
dev->sriov.alias_guid.ports_guid[i].wq =
- create_singlethread_workqueue(alias_wq_name);
+ alloc_ordered_workqueue("alias_guid%d", WQ_MEM_RECLAIM, i);
if (!dev->sriov.alias_guid.ports_guid[i].wq) {
ret = -ENOMEM;
goto err_thread;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 39a488889fc7..12b481d138cf 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -39,7 +39,7 @@
#include "mlx4_ib.h"
-#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
+#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
struct id_map_entry {
struct rb_node node;
@@ -54,11 +54,20 @@ struct id_map_entry {
struct delayed_work timeout;
};
+struct rej_tmout_entry {
+ int slave;
+ u32 rem_pv_cm_id;
+ struct delayed_work timeout;
+ struct xarray *xa_rej_tmout;
+};
+
struct cm_generic_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
+ unsigned char unused[2];
+ __be16 rej_reason;
};
struct cm_sidr_generic_msg {
@@ -71,6 +80,7 @@ struct cm_req_msg {
union ib_gid primary_path_sgid;
};
+static struct workqueue_struct *cm_wq;
static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
{
@@ -168,20 +178,17 @@ static void id_map_ent_timeout(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
- struct id_map_entry *db_ent, *found_ent;
+ struct id_map_entry *found_ent;
struct mlx4_ib_dev *dev = ent->dev;
struct mlx4_ib_sriov *sriov = &dev->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map;
- int pv_id = (int) ent->pv_cm_id;
spin_lock(&sriov->id_map_lock);
- db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
- if (!db_ent)
+ if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
goto out;
found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, pv_id);
out:
list_del(&ent->list);
@@ -189,24 +196,6 @@ out:
kfree(ent);
}
-static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
-{
- struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
- struct rb_root *sl_id_map = &sriov->sl_id_map;
- struct id_map_entry *ent, *found_ent;
-
- spin_lock(&sriov->id_map_lock);
- ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
- if (!ent)
- goto out;
- found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
- if (found_ent && found_ent == ent)
- rb_erase(&found_ent->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, pv_cm_id);
-out:
- spin_unlock(&sriov->id_map_lock);
-}
-
static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
{
struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
@@ -247,10 +236,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
- if (!ent) {
- mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
+ if (!ent)
return ERR_PTR(-ENOMEM);
- }
ent->sl_cm_id = sl_cm_id;
ent->slave_id = slave_id;
@@ -258,41 +245,35 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
ent->dev = to_mdev(ibdev);
INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
- idr_preload(GFP_KERNEL);
- spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
-
- ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
+ ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
+ xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
if (ret >= 0) {
- ent->pv_cm_id = (u32)ret;
+ spin_lock(&sriov->id_map_lock);
sl_id_map_add(ibdev, ent);
list_add_tail(&ent->list, &sriov->cm_list);
- }
-
- spin_unlock(&sriov->id_map_lock);
- idr_preload_end();
-
- if (ret >= 0)
+ spin_unlock(&sriov->id_map_lock);
return ent;
+ }
/*error flow*/
kfree(ent);
- mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
+ mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
return ERR_PTR(-ENOMEM);
}
static struct id_map_entry *
-id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
+id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
{
struct id_map_entry *ent;
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
spin_lock(&sriov->id_map_lock);
if (*pv_cm_id == -1) {
- ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
+ ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
if (ent)
*pv_cm_id = (int) ent->pv_cm_id;
} else
- ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
+ ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
spin_unlock(&sriov->id_map_lock);
return ent;
@@ -306,14 +287,18 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
spin_lock(&sriov->id_map_lock);
spin_lock_irqsave(&sriov->going_down_lock, flags);
/*make sure that there is no schedule inside the scheduled work.*/
- if (!sriov->is_going_down) {
+ if (!sriov->is_going_down && !id->scheduled_delete) {
id->scheduled_delete = 1;
- schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ } else if (id->scheduled_delete) {
+ /* Adjust timeout if already scheduled */
+ mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
spin_unlock(&sriov->id_map_lock);
}
+#define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
struct ib_mad *mad)
{
@@ -322,9 +307,14 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
int pv_cm_id = -1;
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
+ (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
sl_cm_id = get_local_comm_id(mad);
+ id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
+ if (id)
+ goto cont;
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
@@ -340,26 +330,107 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
}
if (!id) {
- pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
- slave_id, sl_cm_id);
+ pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
+ slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
return -EINVAL;
}
+cont:
set_local_comm_id(mad, id->pv_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
schedule_delayed(ibdev, id);
- else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
- id_map_find_del(ibdev, pv_cm_id);
+ return 0;
+}
+
+static void rej_tmout_timeout(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
+ struct rej_tmout_entry *deleted;
+
+ deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
+
+ if (deleted != item)
+ pr_debug("deleted(%p) != item(%p)\n", deleted, item);
+
+ kfree(item);
+}
+
+static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
+{
+ struct rej_tmout_entry *item;
+ struct rej_tmout_entry *old;
+ int ret = 0;
+
+ xa_lock(&sriov->xa_rej_tmout);
+ item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
+
+ if (item) {
+ if (xa_err(item))
+ ret = xa_err(item);
+ else
+ /* If a retry, adjust delayed work */
+ mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ goto err_or_exists;
+ }
+ xa_unlock(&sriov->xa_rej_tmout);
+
+ item = kmalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
+ item->slave = slave;
+ item->rem_pv_cm_id = rem_pv_cm_id;
+ item->xa_rej_tmout = &sriov->xa_rej_tmout;
+
+ old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
+ if (old) {
+ pr_debug(
+ "Non-null old entry (%p) or error (%d) when inserting\n",
+ old, xa_err(old));
+ kfree(item);
+ return xa_err(old);
+ }
+
+ queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
return 0;
+
+err_or_exists:
+ xa_unlock(&sriov->xa_rej_tmout);
+ return ret;
+}
+
+static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
+{
+ struct rej_tmout_entry *item;
+ int slave;
+
+ xa_lock(&sriov->xa_rej_tmout);
+ item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
+
+ if (!item || xa_err(item)) {
+ pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
+ rem_pv_cm_id, xa_err(item));
+ slave = !item ? -ENOENT : xa_err(item);
+ } else {
+ slave = item->slave;
+ }
+ xa_unlock(&sriov->xa_rej_tmout);
+
+ return slave;
}
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
struct ib_mad *mad)
{
+ struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+ u32 rem_pv_cm_id = get_local_comm_id(mad);
u32 pv_cm_id;
struct id_map_entry *id;
+ int sts;
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
@@ -375,6 +446,13 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
be64_to_cpu(gid.global.interface_id));
return -ENOENT;
}
+
+ sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
+ if (sts)
+ /* Even if this fails, we pass on the REQ to the slave */
+ pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
+ rem_pv_cm_id, *slave, sts);
+
return 0;
}
@@ -382,7 +460,14 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
if (!id) {
- pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
+ if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
+ REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
+ *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
+
+ return (*slave < 0) ? *slave : 0;
+ }
+ pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
+ pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
return -ENOENT;
}
@@ -390,12 +475,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
*slave = id->slave_id;
set_remote_comm_id(mad, id->sl_cm_id);
- if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
+ if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
schedule_delayed(ibdev, id);
- else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
- id_map_find_del(ibdev, (int) pv_cm_id);
- }
return 0;
}
@@ -405,7 +487,35 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
spin_lock_init(&dev->sriov.id_map_lock);
INIT_LIST_HEAD(&dev->sriov.cm_list);
dev->sriov.sl_id_map = RB_ROOT;
- idr_init(&dev->sriov.pv_id_table);
+ xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
+ xa_init(&dev->sriov.xa_rej_tmout);
+}
+
+static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
+{
+ struct rej_tmout_entry *item;
+ bool flush_needed = false;
+ unsigned long id;
+ int cnt = 0;
+
+ xa_lock(&sriov->xa_rej_tmout);
+ xa_for_each(&sriov->xa_rej_tmout, id, item) {
+ if (slave < 0 || slave == item->slave) {
+ mod_delayed_work(cm_wq, &item->timeout, 0);
+ flush_needed = true;
+ ++cnt;
+ }
+ }
+ xa_unlock(&sriov->xa_rej_tmout);
+
+ if (flush_needed) {
+ flush_workqueue(cm_wq);
+ pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
+ cnt, slave);
+ }
+
+ if (slave < 0)
+ WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
}
/* slave = -1 ==> all slaves */
@@ -416,7 +526,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
struct rb_root *sl_id_map = &sriov->sl_id_map;
struct list_head lh;
struct rb_node *nd;
- int need_flush = 1;
+ int need_flush = 0;
struct id_map_entry *map, *tmp_map;
/* cancel all delayed work queue entries */
INIT_LIST_HEAD(&lh);
@@ -424,14 +534,14 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
if (slave < 0 || slave == map->slave_id) {
if (map->scheduled_delete)
- need_flush &= !!cancel_delayed_work(&map->timeout);
+ need_flush |= !cancel_delayed_work(&map->timeout);
}
}
spin_unlock(&sriov->id_map_lock);
- if (!need_flush)
- flush_scheduled_work(); /* make sure all timers were flushed */
+ if (need_flush)
+ flush_workqueue(cm_wq); /* make sure all timers were flushed */
/* now, remove all leftover entries from databases*/
spin_lock(&sriov->id_map_lock);
@@ -442,7 +552,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
struct id_map_entry, node);
rb_erase(&ent->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
+ xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
}
list_splice_init(&dev->sriov.cm_list, &lh);
} else {
@@ -458,7 +568,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
/* remove those nodes from databases */
list_for_each_entry_safe(map, tmp_map, &lh, list) {
rb_erase(&map->node, sl_id_map);
- idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
+ xa_erase(&sriov->pv_id_table, map->pv_cm_id);
}
/* add remaining nodes from cm_list */
@@ -475,4 +585,20 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
list_del(&map->list);
kfree(map);
}
+
+ rej_tmout_xa_cleanup(sriov, slave);
+}
+
+int mlx4_ib_cm_init(void)
+{
+ cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
+ if (!cm_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_ib_cm_destroy(void)
+{
+ destroy_workqueue(cm_wq);
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5fd49f9435f9..c592374f4a58 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -37,7 +37,8 @@
#include <linux/slab.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
+#include <rdma/uverbs_ioctl.h>
static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
{
@@ -102,7 +103,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
int err;
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
- PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+ PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
@@ -113,7 +114,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
if (err)
goto err_mtt;
@@ -134,20 +135,27 @@ static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
}
-static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
- struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
- u64 buf_addr, int cqe)
+static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
+ struct mlx4_ib_cq_buf *buf,
+ struct ib_umem **umem, u64 buf_addr, int cqe)
{
int err;
int cqe_size = dev->dev->caps.cqe_size;
+ int shift;
+ int n;
- *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
- ilog2((*umem)->page_size), &buf->mtt);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+
+ err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
if (err)
goto err_buf;
@@ -166,28 +174,27 @@ err_buf:
return err;
}
-#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
-struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
+int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- struct mlx4_ib_cq *cq;
+ struct mlx4_ib_cq *cq = to_mcq(ibcq);
struct mlx4_uar *uar;
+ void *buf_addr;
int err;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
if (entries < 1 || entries > dev->dev->caps.max_cqes)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
- return ERR_PTR(-EINVAL);
-
- cq = kmalloc(sizeof *cq, GFP_KERNEL);
- if (!cq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
entries = roundup_pow_of_two(entries + 1);
cq->ibcq.cqe = entries - 1;
@@ -199,7 +206,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
INIT_LIST_HEAD(&cq->send_qp_list);
INIT_LIST_HEAD(&cq->recv_qp_list);
- if (context) {
+ if (udata) {
struct mlx4_ib_create_cq ucmd;
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
@@ -207,19 +214,20 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
goto err_cq;
}
- err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
+ buf_addr = (void *)(unsigned long)ucmd.buf_addr;
+ err = mlx4_ib_get_cq_umem(dev, &cq->buf, &cq->umem,
ucmd.buf_addr, entries);
if (err)
goto err_cq;
- err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
- &cq->db);
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
if (err)
goto err_mtt;
- uar = &to_mucontext(context)->uar;
+ uar = &context->uar;
+ cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
} else {
- err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
@@ -232,52 +240,56 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_db;
+ buf_addr = &cq->buf.buf;
+
uar = &dev->priv_uar;
+ cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
}
if (dev->eq_table)
vector = dev->eq_table[vector % ibdev->num_comp_vectors];
- err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
- cq->db.dma, &cq->mcq, vector, 0,
- !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
+ &cq->mcq, vector, 0,
+ !!(cq->create_flags &
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
+ buf_addr, !!udata);
if (err)
goto err_dbmap;
- if (context)
+ if (udata)
cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
else
cq->mcq.comp = mlx4_ib_cq_comp;
cq->mcq.event = mlx4_ib_cq_event;
- if (context)
+ if (udata)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
- goto err_dbmap;
+ goto err_cq_free;
}
- return &cq->ibcq;
+ return 0;
+
+err_cq_free:
+ mlx4_cq_free(dev->dev, &cq->mcq);
err_dbmap:
- if (context)
- mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
+ if (udata)
+ mlx4_ib_db_unmap_user(context, &cq->db);
err_mtt:
mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
- if (context)
- ib_umem_release(cq->umem);
- else
+ ib_umem_release(cq->umem);
+ if (!udata)
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
err_db:
- if (!context)
+ if (!udata)
mlx4_db_free(dev->dev, &cq->db);
-
err_cq:
- kfree(cq);
-
- return ERR_PTR(err);
+ return err;
}
static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
@@ -288,7 +300,7 @@ static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
if (cq->resize_buf)
return -EBUSY;
- cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf)
return -ENOMEM;
@@ -316,12 +328,12 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return -EFAULT;
- cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf)
return -ENOMEM;
- err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
- &cq->resize_umem, ucmd.buf_addr, entries);
+ err = mlx4_ib_get_cq_umem(dev, &cq->resize_buf->buf, &cq->resize_umem,
+ ucmd.buf_addr, entries);
if (err) {
kfree(cq->resize_buf);
cq->resize_buf = NULL;
@@ -459,18 +471,15 @@ err_buf:
kfree(cq->resize_buf);
cq->resize_buf = NULL;
- if (cq->resize_umem) {
- ib_umem_release(cq->resize_umem);
- cq->resize_umem = NULL;
- }
-
+ ib_umem_release(cq->resize_umem);
+ cq->resize_umem = NULL;
out:
mutex_unlock(&cq->resize_mutex);
return err;
}
-int mlx4_ib_destroy_cq(struct ib_cq *cq)
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(cq->device);
struct mlx4_ib_cq *mcq = to_mcq(cq);
@@ -478,16 +487,18 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
mlx4_cq_free(dev->dev, &mcq->mcq);
mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
- if (cq->uobject) {
- mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
- ib_umem_release(mcq->umem);
+ if (udata) {
+ mlx4_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext),
+ &mcq->db);
} else {
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
mlx4_db_free(dev->dev, &mcq->db);
}
-
- kfree(mcq);
-
+ ib_umem_release(mcq->umem);
return 0;
}
@@ -562,22 +573,17 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
wc->vendor_err = cqe->vendor_err_syndrome;
}
-static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
+static int mlx4_ib_ipoib_csum_ok(__be16 status, u8 badfcs_enc, __be16 checksum)
{
- return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPV4F |
- MLX4_CQE_STATUS_IPV4OPT |
- MLX4_CQE_STATUS_IPV6 |
- MLX4_CQE_STATUS_IPOK)) ==
- cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPOK)) &&
- (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
- MLX4_CQE_STATUS_TCP)) &&
- checksum == cpu_to_be16(0xffff);
+ return ((badfcs_enc & MLX4_CQE_STATUS_L4_CSUM) ||
+ ((status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ (status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP)) &&
+ (checksum == cpu_to_be16(0xffff))));
}
-static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
- unsigned tail, struct mlx4_cqe *cqe, int is_eth)
+static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+ unsigned tail, struct mlx4_cqe *cqe, int is_eth)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
@@ -592,6 +598,7 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
wc->dlid_path_bits = 0;
if (is_eth) {
+ wc->slid = 0;
wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
@@ -600,8 +607,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
}
-
- return 0;
}
static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
@@ -634,7 +639,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
struct mlx4_ib_qp *qp;
*npolled = 0;
- /* Find uncompleted WQEs belonging to that cq and retrun
+ /* Find uncompleted WQEs belonging to that cq and return
* simulated FLUSH_ERR completions
*/
list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
@@ -689,12 +694,6 @@ repoll:
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR;
- if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
- is_send)) {
- pr_warn("Completion for NOP opcode detected!\n");
- return -EINVAL;
- }
-
/* Resize CQ in progress */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
if (cq->resize_buf) {
@@ -720,12 +719,6 @@ repoll:
*/
mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
be32_to_cpu(cqe->vlan_my_qpn));
- if (unlikely(!mqp)) {
- pr_warn("CQ %06x with entry for unknown QPN %06x\n",
- cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
- return -EINVAL;
- }
-
*cur_qp = to_mibqp(mqp);
}
@@ -738,11 +731,6 @@ repoll:
/* SRQ is also in the radix tree */
msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
srq_num);
- if (unlikely(!msrq)) {
- pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
- cq->mcq.cqn, srq_num);
- return -EINVAL;
- }
}
if (is_send) {
@@ -782,11 +770,13 @@ repoll:
switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
case MLX4_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ fallthrough;
case MLX4_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX4_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ fallthrough;
case MLX4_OPCODE_SEND:
case MLX4_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
@@ -811,14 +801,11 @@ repoll:
wc->opcode = IB_WC_MASKED_FETCH_ADD;
wc->byte_len = 8;
break;
- case MLX4_OPCODE_BIND_MW:
- wc->opcode = IB_WC_BIND_MW;
- break;
case MLX4_OPCODE_LSO:
wc->opcode = IB_WC_LSO;
break;
case MLX4_OPCODE_FMR:
- wc->opcode = IB_WC_FAST_REG_MR;
+ wc->opcode = IB_WC_REG_MR;
break;
case MLX4_OPCODE_LOCAL_INVAL:
wc->opcode = IB_WC_LOCAL_INV;
@@ -855,20 +842,23 @@ repoll:
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
- MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
- return use_tunnel_data(*cur_qp, cq, wc, tail,
- cqe, is_eth);
+ MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+ use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+ is_eth);
+ return 0;
+ }
}
- wc->slid = be16_to_cpu(cqe->rlid);
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
+ cqe->badfcs_enc,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
if (is_eth) {
+ wc->slid = 0;
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_CVLAN_PRESENT_MASK) {
@@ -880,6 +870,7 @@ repoll:
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
} else {
+ wc->slid = be16_to_cpu(cqe->rlid);
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->vlan_id = 0xffff;
}
@@ -894,7 +885,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct mlx4_ib_qp *cur_qp = NULL;
unsigned long flags;
int npolled;
- int err = 0;
struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
spin_lock_irqsave(&cq->lock, flags);
@@ -904,8 +894,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
for (npolled = 0; npolled < num_entries; ++npolled) {
- err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
- if (err)
+ if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
break;
}
@@ -914,10 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
out:
spin_unlock_irqrestore(&cq->lock, flags);
- if (err == 0 || err == -EAGAIN)
- return npolled;
- else
- return err;
+ return npolled;
}
int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index c51740986367..9bbd695a9fd5 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -31,6 +31,7 @@
*/
#include <linux/slab.h>
+#include <rdma/uverbs_ioctl.h>
#include "mlx4_ib.h"
@@ -41,11 +42,13 @@ struct mlx4_ib_user_db_page {
int refcnt;
};
-int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
+int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
int err = 0;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
mutex_lock(&context->db_page_mutex);
@@ -61,8 +64,8 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
@@ -72,7 +75,8 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 1cd75ff02251..91c714f72099 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -39,7 +39,10 @@
#include <linux/mlx4/cmd.h>
#include <linux/gfp.h>
#include <rdma/ib_pma.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"
enum {
@@ -85,15 +88,15 @@ struct mlx4_rcv_tunnel_mad {
struct ib_mad mad;
} __packed;
-static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
-static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num);
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num);
static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
int block, u32 change_bitmap);
__be64 mlx4_ib_gen_node_guid(void)
{
#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
- return cpu_to_be64(NODE_GUID_HI | prandom_u32());
+ return cpu_to_be64(NODE_GUID_HI | get_random_u32());
}
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
@@ -166,7 +169,7 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
op_modifier |= 0x4;
- in_modifier |= in_wc->slid << 16;
+ in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
}
err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
@@ -183,28 +186,29 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
return err;
}
-static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
+static void update_sm_ah(struct mlx4_ib_dev *dev, u32 port_num, u16 lid, u8 sl)
{
struct ib_ah *new_ah;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
if (!dev->send_agent[port_num - 1][0])
return;
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.dlid = lid;
- ah_attr.sl = sl;
- ah_attr.port_num = port_num;
+ ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
+ rdma_ah_set_dlid(&ah_attr, lid);
+ rdma_ah_set_sl(&ah_attr, sl);
+ rdma_ah_set_port_num(&ah_attr, port_num);
- new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
- &ah_attr);
+ new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
+ &ah_attr, 0);
if (IS_ERR(new_ah))
return;
spin_lock_irqsave(&dev->sm_lock, flags);
if (dev->sm_ah[port_num - 1])
- ib_destroy_ah(dev->sm_ah[port_num - 1]);
+ rdma_destroy_ah(dev->sm_ah[port_num - 1], 0);
dev->sm_ah[port_num - 1] = new_ah;
spin_unlock_irqrestore(&dev->sm_lock, flags);
}
@@ -213,8 +217,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
* Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
* synthesize LID change, Client-Rereg, GID change, and P_Key change events.
*/
-static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
- u16 prev_lid)
+static void smp_snoop(struct ib_device *ibdev, u32 port_num,
+ const struct ib_mad *mad, u16 prev_lid)
{
struct ib_port_info *pinfo;
u16 lid;
@@ -229,6 +233,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
mad->mad_hdr.method == IB_MGMT_METHOD_SET)
switch (mad->mad_hdr.attr_id) {
case IB_SMP_ATTR_PORT_INFO:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
lid = be16_to_cpu(pinfo->lid);
@@ -244,6 +250,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
break;
case IB_SMP_ATTR_PKEY_TABLE:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
if (!mlx4_is_mfunc(dev->dev)) {
mlx4_ib_dispatch_event(dev, port_num,
IB_EVENT_PKEY_CHANGE);
@@ -266,7 +274,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
be16_to_cpu(base[i]);
}
}
- pr_debug("PKEY Change event: port=%d, "
+ pr_debug("PKEY Change event: port=%u, "
"block=0x%x, change_bitmap=0x%x\n",
port_num, bn, pkey_change_bitmap);
@@ -280,6 +288,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
break;
case IB_SMP_ATTR_GUID_INFO:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
/* paravirtualized master's guid is guid 0 -- does not change */
if (!mlx4_is_master(dev->dev))
mlx4_ib_dispatch_event(dev, port_num,
@@ -295,6 +305,26 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
}
break;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ /* cache sl to vl mapping changes for use in
+ * filling QP1 LRH VL field when sending packets
+ */
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
+ dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
+ return;
+ if (!mlx4_is_slave(dev->dev)) {
+ union sl2vl_tbl_to_u64 sl2vl64;
+ int jj;
+
+ for (jj = 0; jj < 8; jj++) {
+ sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
+ pr_debug("port %u, sl2vl[%d] = %02x\n",
+ port_num, jj, sl2vl64.sl8[jj]);
+ }
+ atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
+ }
+ break;
+
default:
break;
}
@@ -344,12 +374,14 @@ static void node_desc_override(struct ib_device *dev,
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
- memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
}
}
-static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
+static void forward_trap(struct mlx4_ib_dev *dev, u32 port_num,
+ const struct ib_mad *mad)
{
int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
struct ib_mad_send_buf *send_buf;
@@ -398,7 +430,7 @@ static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave
return ret;
}
-int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int i;
@@ -412,7 +444,7 @@ int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
- u8 port, u16 pkey, u16 *ix)
+ u32 port, u16 pkey, u16 *ix)
{
int i, ret;
u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
@@ -452,16 +484,41 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
return -EINVAL;
}
-int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
+ enum rdma_network_type net_type;
+
+ if (version == 4)
+ net_type = RDMA_NETWORK_IPV4;
+ else if (version == 6)
+ net_type = RDMA_NETWORK_IPV6;
+ else
+ return -EINVAL;
+
+ return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+ sgid, dgid);
+}
+
+static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
+{
+ int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
+
+ return (qpn >= proxy_start && qpn <= proxy_start + 1);
+}
+
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad)
{
struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
+ struct ib_ud_wr wr;
+ const struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *tun_ctx;
struct mlx4_ib_demux_pv_qp *tun_qp;
struct mlx4_rcv_tunnel_mad *tun_mad;
- struct ib_ah_attr attr;
+ struct rdma_ah_attr attr;
struct ib_ah *ah;
struct ib_qp *src_qp = NULL;
unsigned tun_tx_ix = 0;
@@ -471,8 +528,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
u16 cached_pkey;
u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
- if (dest_qpt > IB_QPT_GSI)
+ if (dest_qpt > IB_QPT_GSI) {
+ pr_debug("dest_qpt (%d) > IB_QPT_GSI\n", dest_qpt);
return -EINVAL;
+ }
tun_ctx = dev->sriov.demux[port-1].tun[slave];
@@ -489,12 +548,20 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
if (dest_qpt) {
u16 pkey_ix;
ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
- if (ret)
+ if (ret) {
+ pr_debug("unable to get %s cached pkey for index %d, ret %d\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
+ wc->pkey_index, ret);
return -EINVAL;
+ }
ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
- if (ret)
+ if (ret) {
+ pr_debug("unable to get %s pkey ix for pkey 0x%x, ret %d\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
+ cached_pkey, ret);
return -EINVAL;
+ }
tun_pkey_ix = pkey_ix;
} else
tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
@@ -507,12 +574,18 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
/* create ah. Just need an empty one with the port num for the post send.
* The driver will set the force loopback bit in post_send */
memset(&attr, 0, sizeof attr);
- attr.port_num = port;
+ attr.type = rdma_ah_find_type(&dev->ib_dev, port);
+
+ rdma_ah_set_port_num(&attr, port);
if (is_eth) {
- memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
- attr.ah_flags = IB_AH_GRH;
+ union ib_gid sgid;
+ union ib_gid dgid;
+
+ if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
+ return -EINVAL;
+ rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0);
}
- ah = ib_create_ah(tun_ctx->pd, &attr);
+ ah = rdma_create_ah(tun_ctx->pd, &attr, 0);
if (IS_ERR(ah))
return -ENOMEM;
@@ -525,11 +598,11 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock);
if (ret)
- goto out;
+ goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah)
- ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
+ rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
tun_qp->tx_ring[tun_tx_ix].ah = ah;
ib_dma_sync_single_for_cpu(&dev->ib_dev,
tun_qp->tx_ring[tun_tx_ix].buf.map,
@@ -570,7 +643,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
} else {
tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
- tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
+ tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid);
}
ib_dma_sync_single_for_device(&dev->ib_dev,
@@ -582,31 +655,37 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
list.length = sizeof (struct mlx4_rcv_tunnel_mad);
list.lkey = tun_ctx->pd->local_dma_lkey;
- wr.wr.ud.ah = ah;
- wr.wr.ud.port_num = port;
- wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
- wr.wr.ud.remote_qpn = dqpn;
- wr.next = NULL;
- wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(src_qp, &wr, &bad_wr);
-out:
- if (ret)
- ib_destroy_ah(ah);
+ wr.ah = ah;
+ wr.port_num = port;
+ wr.remote_qkey = IB_QP_SET_QKEY;
+ wr.remote_qpn = dqpn;
+ wr.wr.next = NULL;
+ wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
+ wr.wr.sg_list = &list;
+ wr.wr.num_sge = 1;
+ wr.wr.opcode = IB_WR_SEND;
+ wr.wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
+ if (!ret)
+ return 0;
+ out:
+ spin_lock(&tun_qp->tx_lock);
+ tun_qp->tx_ix_tail++;
+ spin_unlock(&tun_qp->tx_lock);
+ tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+ rdma_destroy_ah(ah, 0);
return ret;
}
-static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
+static int mlx4_ib_demux_mad(struct ib_device *ibdev, u32 port,
struct ib_wc *wc, struct ib_grh *grh,
struct ib_mad *mad)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- int err;
- int slave;
+ int err, other_port;
+ int slave = -1;
u8 *slave_id;
int is_eth = 0;
@@ -616,6 +695,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
is_eth = 1;
if (is_eth) {
+ union ib_gid dgid;
+ union ib_gid sgid;
+
+ if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
+ return -EINVAL;
if (!(wc->wc_flags & IB_WC_GRH)) {
mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
return -EINVAL;
@@ -624,7 +708,17 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
return -EINVAL;
}
- if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
+ err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
+ if (err && mlx4_is_mf_bonded(dev->dev)) {
+ other_port = (port == 1) ? 2 : 1;
+ err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
+ if (!err) {
+ port = other_port;
+ pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
+ slave, grh->dgid.raw, port, other_port);
+ }
+ }
+ if (err) {
mlx4_ib_warn(ibdev, "failed matching grh\n");
return -ENOENT;
}
@@ -639,7 +733,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
if (err)
- pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+ pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
slave, err);
return 0;
}
@@ -657,10 +752,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
/* If a grh is present, we demux according to it */
if (wc->wc_flags & IB_WC_GRH) {
- slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
- if (slave < 0) {
- mlx4_ib_warn(ibdev, "failed matching grh\n");
- return -ENOENT;
+ if (grh->dgid.global.interface_id ==
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
+ grh->dgid.global.subnet_prefix == cpu_to_be64(
+ atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
+ slave = 0;
+ } else {
+ slave = mlx4_ib_find_real_gid(ibdev, port,
+ grh->dgid.global.interface_id);
+ if (slave < 0) {
+ mlx4_ib_warn(ibdev, "failed matching grh\n");
+ return -ENOENT;
+ }
}
}
/* Class-specific handling */
@@ -710,12 +813,13 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
if (err)
- pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+ pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
slave, err);
return 0;
}
-static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -723,26 +827,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err;
struct ib_port_attr pattr;
- if (in_wc && in_wc->qp->qp_num) {
- pr_debug("received MAD: slid:%d sqpn:%d "
- "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
- in_wc->slid, in_wc->src_qp,
- in_wc->dlid_path_bits,
- in_wc->qp->qp_num,
- in_wc->wc_flags,
- in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
- be16_to_cpu(in_mad->mad_hdr.attr_id));
- if (in_wc->wc_flags & IB_WC_GRH) {
- pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
- be64_to_cpu(in_grh->sgid.global.subnet_prefix),
- be64_to_cpu(in_grh->sgid.global.interface_id));
- pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
- be64_to_cpu(in_grh->dgid.global.subnet_prefix),
- be64_to_cpu(in_grh->dgid.global.interface_id));
- }
- }
-
- slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
forward_trap(to_mdev(ibdev), port_num, in_mad);
@@ -776,7 +861,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
- prev_lid = pattr.lid;
+ prev_lid = ib_lid_cpu16(pattr.lid);
err = mlx4_MAD_IFC(to_mdev(ibdev),
(mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
@@ -787,8 +872,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
if (!out_mad->mad_hdr.status) {
- if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
+ smp_snoop(ibdev, port_num, in_mad, prev_lid);
/* slaves get node desc from FW */
if (!mlx4_is_slave(to_mdev(ibdev)->dev))
node_desc_override(ibdev, out_mad);
@@ -805,42 +889,88 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-static void edit_counter(struct mlx4_counter *cnt,
- struct ib_pma_portcounters *pma_cnt)
+static void edit_counter(struct mlx4_counter *cnt, void *counters,
+ __be16 attr_id)
+{
+ switch (attr_id) {
+ case IB_PMA_PORT_COUNTERS:
+ {
+ struct ib_pma_portcounters *pma_cnt =
+ (struct ib_pma_portcounters *)counters;
+
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
+ (be64_to_cpu(cnt->tx_bytes) >> 2));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
+ (be64_to_cpu(cnt->rx_bytes) >> 2));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
+ be64_to_cpu(cnt->tx_frames));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
+ be64_to_cpu(cnt->rx_frames));
+ break;
+ }
+ case IB_PMA_PORT_COUNTERS_EXT:
+ {
+ struct ib_pma_portcounters_ext *pma_cnt_ext =
+ (struct ib_pma_portcounters_ext *)counters;
+
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
+ pma_cnt_ext->port_rcv_data =
+ cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
+ pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
+ pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
+ break;
+ }
+ }
+}
+
+static int iboe_process_mad_port_info(void *out_mad)
{
- ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
- (be64_to_cpu(cnt->tx_bytes) >> 2));
- ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
- (be64_to_cpu(cnt->rx_bytes) >> 2));
- ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
- be64_to_cpu(cnt->tx_frames));
- ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
- be64_to_cpu(cnt->rx_frames));
+ struct ib_class_port_info cpi = {};
+
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy(out_mad, &cpi, sizeof(cpi));
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
+static int iboe_process_mad(struct ib_device *ibdev, int mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx4_counter counter_stats;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- int err;
+ struct counter_index *tmp_counter;
+ int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL;
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
+ return iboe_process_mad_port_info((void *)(out_mad->data + 40));
+
memset(&counter_stats, 0, sizeof(counter_stats));
- err = mlx4_get_counter_stats(dev->dev,
- dev->counters[port_num - 1].index,
- &counter_stats, 0);
- if (err)
- err = IB_MAD_RESULT_FAILURE;
- else {
- memset(out_mad->data, 0, sizeof out_mad->data);
+ mutex_lock(&dev->counters_table[port_num - 1].mutex);
+ list_for_each_entry(tmp_counter,
+ &dev->counters_table[port_num - 1].counters_list,
+ list) {
+ err = mlx4_get_counter_stats(dev->dev,
+ tmp_counter->index,
+ &counter_stats, 0);
+ if (err) {
+ err = IB_MAD_RESULT_FAILURE;
+ stats_avail = 0;
+ break;
+ }
+ stats_avail = 1;
+ }
+ mutex_unlock(&dev->counters_table[port_num - 1].mutex);
+ if (stats_avail) {
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
- (void *)(out_mad->data + 40));
+ (void *)(out_mad->data + 40),
+ in_mad->mad_hdr.attr_id);
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break;
default:
@@ -851,38 +981,33 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return err;
}
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
* queries, should be called only by VFs and for that specific purpose
*/
if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) &&
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
+ in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
+ in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
+ return iboe_process_mad(ibdev, mad_flags, port_num,
+ in_wc, in_grh, in, out);
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in, out);
}
if (link == IB_LINK_LAYER_ETHERNET)
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ in_grh, in, out);
return -EINVAL;
}
@@ -891,7 +1016,7 @@ static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
if (mad_send_wc->send_buf->context[0])
- ib_destroy_ah(mad_send_wc->send_buf->context[0]);
+ rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0);
ib_free_send_mad(mad_send_wc->send_buf);
}
@@ -946,11 +1071,11 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
}
if (dev->sm_ah[p])
- ib_destroy_ah(dev->sm_ah[p]);
+ rdma_destroy_ah(dev->sm_ah[p], 0);
}
}
-static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num)
{
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
@@ -959,7 +1084,7 @@ static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
}
-static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num)
{
/* re-configure the alias-guid and mcg's */
if (mlx4_is_master(dev->dev)) {
@@ -971,6 +1096,23 @@ static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
}
}
+
+ /* Update the sl to vl table from inside client rereg
+ * only if in secure-host mode (snooping is not possible)
+ * and the sl-to-vl change event is not generated by FW.
+ */
+ if (!mlx4_is_slave(dev->dev) &&
+ dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+ !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
+ if (mlx4_is_master(dev->dev))
+ /* already in work queue from mlx4_ib_event queueing
+ * mlx4_handle_port_mgmt_change_event, which calls
+ * this procedure. Therefore, call sl2vl_update directly.
+ */
+ mlx4_ib_sl2vl_update(dev, port_num);
+ else
+ mlx4_sched_ib_sl2vl_update_work(dev, port_num);
+ }
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
}
@@ -981,7 +1123,7 @@ static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
GET_MASK_FROM_EQE(eqe));
}
-static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
+static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u32 port_num,
u32 guid_tbl_blk_num, u32 change_bitmap)
{
struct ib_smp *in_mad = NULL;
@@ -993,10 +1135,8 @@ static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
- if (!in_mad || !out_mad) {
- mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
+ if (!in_mad || !out_mad)
goto out;
- }
guid_tbl_blk_num *= 4;
@@ -1039,7 +1179,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
struct mlx4_ib_dev *dev = ew->ib_dev;
struct mlx4_eqe *eqe = &(ew->ib_eqe);
- u8 port = eqe->event.port_mgmt_change.port;
+ u32 port = eqe->event.port_mgmt_change.port;
u32 changed_attr;
u32 tbl_block;
u32 change_bitmap;
@@ -1062,6 +1202,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
/* Generate GUID changed event */
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+ if (mlx4_is_master(dev->dev)) {
+ union ib_gid gid;
+ int err = 0;
+
+ if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+ err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+ else
+ gid.global.subnet_prefix =
+ eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+ if (err) {
+ pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+ port, err);
+ } else {
+ pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+ port,
+ (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+ be64_to_cpu(gid.global.subnet_prefix));
+ atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
+ }
+ }
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
/*if master, notify all slaves*/
if (mlx4_is_master(dev->dev))
@@ -1089,6 +1250,24 @@ void handle_port_mgmt_change_event(struct work_struct *work)
handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
}
break;
+
+ case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
+ /* cache sl to vl mapping changes for use in
+ * filling QP1 LRH VL field when sending packets
+ */
+ if (!mlx4_is_slave(dev->dev)) {
+ union sl2vl_tbl_to_u64 sl2vl64;
+ int jj;
+
+ for (jj = 0; jj < 8; jj++) {
+ sl2vl64.sl8[jj] =
+ eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
+ pr_debug("port %u, sl2vl[%d] = %02x\n",
+ port, jj, sl2vl64.sl8[jj]);
+ }
+ atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
+ }
+ break;
default:
pr_warn("Unsupported subtype 0x%x for "
"Port Management Change event\n", eqe->subtype);
@@ -1097,7 +1276,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
kfree(ew);
}
-void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
enum ib_event_type type)
{
struct ib_event event;
@@ -1120,12 +1299,25 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
+static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
+{
+ unsigned long flags;
+ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
+ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
+
+ spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
+ queue_work(ctx->wi_wq, &ctx->work);
+ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+}
+
static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
struct mlx4_ib_demux_pv_qp *tun_qp,
int index)
{
struct ib_sge sg_list;
- struct ib_recv_wr recv_wr, *bad_recv_wr;
+ struct ib_recv_wr recv_wr;
+ const struct ib_recv_wr *bad_recv_wr;
int size;
size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
@@ -1161,32 +1353,23 @@ static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
return ret;
}
-static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
-{
- int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
-
- return (qpn >= proxy_start && qpn <= proxy_start + 1);
-}
-
-
-int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, u16 pkey_index,
- u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
- u8 *s_mac, struct ib_mad *mad)
+ u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
+ u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
{
struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
+ struct ib_ud_wr wr;
+ const struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *sqp_ctx;
struct mlx4_ib_demux_pv_qp *sqp;
struct mlx4_mad_snd_buf *sqp_mad;
struct ib_ah *ah;
struct ib_qp *send_qp = NULL;
unsigned wire_tx_ix = 0;
- int ret = 0;
u16 wire_pkey_ix;
int src_qpnum;
- u8 sgid_index;
-
+ int ret;
sqp_ctx = dev->sriov.sqps[port-1];
@@ -1206,29 +1389,32 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
send_qp = sqp->qp;
- /* create ah */
- sgid_index = attr->grh.sgid_index;
- attr->grh.sgid_index = 0;
- ah = ib_create_ah(sqp_ctx->pd, attr);
- if (IS_ERR(ah))
+ ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah);
+ if (!ah)
return -ENOMEM;
- attr->grh.sgid_index = sgid_index;
- to_mah(ah)->av.ib.gid_index = sgid_index;
- /* get rid of force-loopback bit */
- to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
+
+ ah->device = sqp_ctx->pd->device;
+ ah->pd = sqp_ctx->pd;
+
+ /* create ah */
+ ret = mlx4_ib_create_ah_slave(ah, attr,
+ rdma_ah_retrieve_grh(attr)->sgid_index,
+ s_mac, vlan_id);
+ if (ret)
+ goto out;
+
spin_lock(&sqp->tx_lock);
if (sqp->tx_ix_head - sqp->tx_ix_tail >=
- (MLX4_NUM_TUNNEL_BUFS - 1))
+ (MLX4_NUM_WIRE_BUFS - 1))
ret = -EAGAIN;
else
- wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
+ wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1);
spin_unlock(&sqp->tx_lock);
if (ret)
goto out;
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
- if (sqp->tx_ring[wire_tx_ix].ah)
- ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
+ kfree(sqp->tx_ring[wire_tx_ix].ah);
sqp->tx_ring[wire_tx_ix].ah = ah;
ib_dma_sync_single_for_cpu(&dev->ib_dev,
sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1246,25 +1432,28 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
list.length = sizeof (struct mlx4_mad_snd_buf);
list.lkey = sqp_ctx->pd->local_dma_lkey;
- wr.wr.ud.ah = ah;
- wr.wr.ud.port_num = port;
- wr.wr.ud.pkey_index = wire_pkey_ix;
- wr.wr.ud.remote_qkey = qkey;
- wr.wr.ud.remote_qpn = remote_qpn;
- wr.next = NULL;
- wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
- if (s_mac)
- memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
-
-
- ret = ib_post_send(send_qp, &wr, &bad_wr);
+ wr.ah = ah;
+ wr.port_num = port;
+ wr.pkey_index = wire_pkey_ix;
+ wr.remote_qkey = qkey;
+ wr.remote_qpn = remote_qpn;
+ wr.wr.next = NULL;
+ wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
+ wr.wr.sg_list = &list;
+ wr.wr.num_sge = 1;
+ wr.wr.opcode = IB_WR_SEND;
+ wr.wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+ if (!ret)
+ return 0;
+
+ spin_lock(&sqp->tx_lock);
+ sqp->tx_ix_tail++;
+ spin_unlock(&sqp->tx_lock);
+ sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- if (ret)
- ib_destroy_ah(ah);
+ kfree(ah);
return ret;
}
@@ -1276,12 +1465,13 @@ static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
}
static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
- struct ib_ah_attr *ah_attr)
+ struct rdma_ah_attr *ah_attr)
{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
- ah_attr->grh.sgid_index = slave;
+ grh->sgid_index = slave;
else
- ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
+ grh->sgid_index += get_slave_base_gid_ix(dev, slave, port);
}
static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
@@ -1291,10 +1481,14 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
struct mlx4_ib_ah ah;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
u8 *slave_id;
int slave;
int port;
+ u16 vlan_id;
+ u8 qos;
+ u8 *dmac;
+ int sts;
/* Get slave that sent this packet */
if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1331,6 +1525,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
return;
} else
*slave_id = slave;
+ break;
default:
/* nothing */;
}
@@ -1377,24 +1572,31 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
if (port < 0)
return;
ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+ ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
- if (ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
-
- memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
- ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
+ dmac = rdma_ah_retrieve_dmac(&ah_attr);
+ if (dmac)
+ memcpy(dmac, tunnel->hdr.mac, ETH_ALEN);
+ vlan_id = be16_to_cpu(tunnel->hdr.vlan);
/* if slave have default vlan use it */
- mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
- &ah_attr.vlan_id, &ah_attr.sl);
-
- mlx4_ib_send_to_wire(dev, slave, ctx->port,
- is_proxy_qp0(dev, wc->src_qp, slave) ?
- IB_QPT_SMI : IB_QPT_GSI,
- be16_to_cpu(tunnel->hdr.pkey_index),
- be32_to_cpu(tunnel->hdr.remote_qpn),
- be32_to_cpu(tunnel->hdr.qkey),
- &ah_attr, wc->smac, &tunnel->mad);
+ if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
+ &vlan_id, &qos))
+ rdma_ah_set_sl(&ah_attr, qos);
+
+ sts = mlx4_ib_send_to_wire(dev, slave, ctx->port,
+ is_proxy_qp0(dev, wc->src_qp, slave) ?
+ IB_QPT_SMI : IB_QPT_GSI,
+ be16_to_cpu(tunnel->hdr.pkey_index),
+ be32_to_cpu(tunnel->hdr.remote_qpn),
+ be32_to_cpu(tunnel->hdr.qkey),
+ &ah_attr, wc->smac, vlan_id, &tunnel->mad);
+ if (sts)
+ pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
+ slave, sts);
}
static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
@@ -1403,18 +1605,20 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
int i;
struct mlx4_ib_demux_pv_qp *tun_qp;
int rx_buf_size, tx_buf_size;
+ const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (qp_type > IB_QPT_GSI)
return -EINVAL;
tun_qp = &ctx->qp[qp_type];
- tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
+ tun_qp->ring = kcalloc(nmbr_bufs,
+ sizeof(struct mlx4_ib_buf),
GFP_KERNEL);
if (!tun_qp->ring)
return -ENOMEM;
- tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
+ tun_qp->tx_ring = kcalloc(nmbr_bufs,
sizeof (struct mlx4_ib_tun_tx_buf),
GFP_KERNEL);
if (!tun_qp->tx_ring) {
@@ -1431,7 +1635,7 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
if (!tun_qp->ring[i].addr)
goto err;
@@ -1445,7 +1649,7 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
}
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
tun_qp->tx_ring[i].buf.addr =
kmalloc(tx_buf_size, GFP_KERNEL);
if (!tun_qp->tx_ring[i].buf.addr)
@@ -1476,9 +1680,7 @@ tx_err:
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
}
- kfree(tun_qp->tx_ring);
- tun_qp->tx_ring = NULL;
- i = MLX4_NUM_TUNNEL_BUFS;
+ i = nmbr_bufs;
err:
while (i > 0) {
--i;
@@ -1486,6 +1688,8 @@ err:
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
+ kfree(tun_qp->tx_ring);
+ tun_qp->tx_ring = NULL;
kfree(tun_qp->ring);
tun_qp->ring = NULL;
return -ENOMEM;
@@ -1497,6 +1701,7 @@ static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
int i;
struct mlx4_ib_demux_pv_qp *tun_qp;
int rx_buf_size, tx_buf_size;
+ const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (qp_type > IB_QPT_GSI)
return;
@@ -1511,18 +1716,18 @@ static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
if (tun_qp->tx_ring[i].ah)
- ib_destroy_ah(tun_qp->tx_ring[i].ah);
+ rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
}
kfree(tun_qp->tx_ring);
kfree(tun_qp->ring);
@@ -1551,11 +1756,8 @@ static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
"buf:%lld\n", wc.wr_id);
break;
case IB_WC_SEND:
- pr_debug("received tunnel send completion:"
- "wrid=0x%llx, status=0x%x\n",
- wc.wr_id, wc.status);
- ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
+ rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
+ (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
spin_lock(&tun_qp->tx_lock);
@@ -1571,8 +1773,8 @@ static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
" status = %d, wrid = 0x%llx\n",
ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
- ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
+ rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
+ (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
spin_lock(&tun_qp->tx_lock);
@@ -1600,6 +1802,7 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
struct ib_qp_attr attr;
int qp_attr_mask_INIT;
+ const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (qp_type > IB_QPT_GSI)
return -EINVAL;
@@ -1610,8 +1813,8 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
qp_init_attr.init_attr.send_cq = ctx->cq;
qp_init_attr.init_attr.recv_cq = ctx->cq;
qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
- qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
+ qp_init_attr.init_attr.cap.max_send_wr = nmbr_bufs;
+ qp_init_attr.init_attr.cap.max_recv_wr = nmbr_bufs;
qp_init_attr.init_attr.cap.max_send_sge = 1;
qp_init_attr.init_attr.cap.max_recv_sge = 1;
if (create_tun) {
@@ -1633,9 +1836,9 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
if (IS_ERR(tun_qp->qp)) {
ret = PTR_ERR(tun_qp->qp);
+ pr_err("Couldn't create %s QP (%pe)\n",
+ create_tun ? "tunnel" : "special", tun_qp->qp);
tun_qp->qp = NULL;
- pr_err("Couldn't create %s QP (%d)\n",
- create_tun ? "tunnel" : "special", ret);
return ret;
}
@@ -1673,7 +1876,7 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
goto err_qp;
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
if (ret) {
pr_err(" mlx4_ib_post_pv_buf error"
@@ -1708,9 +1911,9 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
if (wc.status == IB_WC_SUCCESS) {
switch (wc.opcode) {
case IB_WC_SEND:
- ib_destroy_ah(sqp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
- sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+ kfree(sqp->tx_ring[wc.wr_id &
+ (MLX4_NUM_WIRE_BUFS - 1)].ah);
+ sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
= NULL;
spin_lock(&sqp->tx_lock);
sqp->tx_ix_tail++;
@@ -1719,18 +1922,17 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
case IB_WC_RECV:
mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
(sqp->ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
+ (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload);
grh = &(((struct mlx4_mad_rcv_buf *)
(sqp->ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
+ (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh);
mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)))
+ (MLX4_NUM_WIRE_BUFS - 1)))
pr_err("Failed reposting SQP "
"buf:%lld\n", wc.wr_id);
break;
default:
- BUG_ON(1);
break;
}
} else {
@@ -1738,9 +1940,9 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
" status = %d, wrid = 0x%llx\n",
ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
- ib_destroy_ah(sqp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
- sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+ kfree(sqp->tx_ring[wc.wr_id &
+ (MLX4_NUM_WIRE_BUFS - 1)].ah);
+ sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
= NULL;
spin_lock(&sqp->tx_lock);
sqp->tx_ix_tail++;
@@ -1757,11 +1959,8 @@ static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
*ret_ctx = NULL;
ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
- if (!ctx) {
- pr_err("failed allocating pv resource context "
- "for port %d, slave %d\n", port, slave);
+ if (!ctx)
return -ENOMEM;
- }
ctx->ib_dev = &dev->ib_dev;
ctx->port = port;
@@ -1783,6 +1982,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
{
int ret, cq_size;
struct ib_cq_init_attr cq_attr = {};
+ const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (ctx->state != DEMUX_PV_STATE_DOWN)
return -EEXIST;
@@ -1807,23 +2007,24 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
goto err_out_qp0;
}
- cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
+ cq_size = 2 * nmbr_bufs;
if (ctx->has_smi)
cq_size *= 2;
cq_attr.cqe = cq_size;
- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
+ ctx->cq = ib_create_cq(ctx->ib_dev,
+ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
- pr_err("Couldn't create tunnel CQ (%d)\n", ret);
+ pr_err("Couldn't create tunnel CQ (%pe)\n", ctx->cq);
goto err_buf;
}
- ctx->pd = ib_alloc_pd(ctx->ib_dev);
+ ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
if (IS_ERR(ctx->pd)) {
ret = PTR_ERR(ctx->pd);
- pr_err("Couldn't create tunnel PD (%d)\n", ret);
+ pr_err("Couldn't create tunnel PD (%pe)\n", ctx->pd);
goto err_cq;
}
@@ -1849,6 +2050,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
+ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
if (ret) {
@@ -1956,7 +2158,6 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
struct mlx4_ib_demux_ctx *ctx,
int port)
{
- char name[12];
int ret = 0;
int i;
@@ -1992,16 +2193,21 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_mcg;
}
- snprintf(name, sizeof name, "mlx4_ibt%d", port);
- ctx->wq = create_singlethread_workqueue(name);
+ ctx->wq = alloc_ordered_workqueue("mlx4_ibt%d", WQ_MEM_RECLAIM, port);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
ret = -ENOMEM;
goto err_wq;
}
- snprintf(name, sizeof name, "mlx4_ibud%d", port);
- ctx->ud_wq = create_singlethread_workqueue(name);
+ ctx->wi_wq = alloc_ordered_workqueue("mlx4_ibwi%d", WQ_MEM_RECLAIM, port);
+ if (!ctx->wi_wq) {
+ pr_err("Failed to create wire WQ for port %d\n", port);
+ ret = -ENOMEM;
+ goto err_wiwq;
+ }
+
+ ctx->ud_wq = alloc_ordered_workqueue("mlx4_ibud%d", WQ_MEM_RECLAIM, port);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
ret = -ENOMEM;
@@ -2011,6 +2217,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
return 0;
err_udwq:
+ destroy_workqueue(ctx->wi_wq);
+ ctx->wi_wq = NULL;
+
+err_wiwq:
destroy_workqueue(ctx->wq);
ctx->wq = NULL;
@@ -2058,12 +2268,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
}
flush_workqueue(ctx->wq);
+ flush_workqueue(ctx->wi_wq);
for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
free_pv_object(dev, i, ctx->port);
}
kfree(ctx->tun);
destroy_workqueue(ctx->ud_wq);
+ destroy_workqueue(ctx->wi_wq);
destroy_workqueue(ctx->wq);
}
}
@@ -2125,6 +2337,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
if (err)
goto demux_err;
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+ atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
&dev->sriov.sqps[i]);
if (err)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index efecdf0216d8..dd35e03402ab 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -39,8 +39,12 @@
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
+
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/devlink.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
@@ -54,11 +58,10 @@
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
#define DRV_NAME MLX4_IB_DRV_NAME
-#define DRV_VERSION "2.2-1"
-#define DRV_RELDATE "Feb 2014"
+#define DRV_VERSION "4.0-0"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
@@ -67,7 +70,6 @@
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
int mlx4_ib_sm_guid_assign = 0;
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
@@ -75,20 +77,16 @@ MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_ass
static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ DRV_VERSION "\n";
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
+static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
+ u32 port_num);
+static int mlx4_ib_event(struct notifier_block *this, unsigned long event,
+ void *param);
static struct workqueue_struct *wq;
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int check_flow_steering_support(struct mlx4_dev *dev)
{
int eth_num_ports = 0;
@@ -125,17 +123,20 @@ static int num_ib_ports(struct mlx4_dev *dev)
return ib_ports;
}
-static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
+static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
+ u32 port_num)
{
struct mlx4_ib_dev *ibdev = to_mdev(device);
- struct net_device *dev;
+ struct net_device *dev, *ret = NULL;
rcu_read_lock();
- dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
+ for_each_netdev_rcu(&init_net, dev) {
+ if (dev->dev.parent != ibdev->ib_dev.dev.parent ||
+ dev->dev_port + 1 != port_num)
+ continue;
- if (dev) {
if (mlx4_is_bonded(ibdev->dev)) {
- struct net_device *upper = NULL;
+ struct net_device *upper;
upper = netdev_master_upper_dev_get_rcu(dev);
if (upper) {
@@ -146,17 +147,19 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_n
dev = active;
}
}
- }
- if (dev)
+
dev_hold(dev);
+ ret = dev;
+ break;
+ }
rcu_read_unlock();
- return dev;
+ return ret;
}
-static int mlx4_ib_update_gids(struct gid_entry *gids,
- struct mlx4_ib_dev *ibdev,
- u8 port_num)
+static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
+ struct mlx4_ib_dev *ibdev,
+ u32 port_num)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -187,39 +190,104 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
return err;
}
-static int mlx4_ib_add_gid(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context)
+static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
+ struct mlx4_ib_dev *ibdev,
+ u32 port_num)
{
- struct mlx4_ib_dev *ibdev = to_mdev(device);
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ struct mlx4_dev *dev = ibdev->dev;
+ int i;
+ struct {
+ union ib_gid gid;
+ __be32 rsrvd1[2];
+ __be16 rsrvd2;
+ u8 type;
+ u8 version;
+ __be32 rsrvd3;
+ } *gid_tbl;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return -ENOMEM;
+
+ gid_tbl = mailbox->buf;
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
+ memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
+ if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ gid_tbl[i].version = 2;
+ if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
+ gid_tbl[i].type = 1;
+ }
+ }
+
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (mlx4_is_bonded(dev))
+ err += mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+static int mlx4_ib_update_gids(struct gid_entry *gids,
+ struct mlx4_ib_dev *ibdev,
+ u32 port_num)
+{
+ if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
+ return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
+
+ return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
+}
+
+static void free_gid_entry(struct gid_entry *entry)
+{
+ memset(&entry->gid, 0, sizeof(entry->gid));
+ kfree(entry->ctx);
+ entry->ctx = NULL;
+}
+
+static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
struct mlx4_port_gid_table *port_gid_table;
int free = -1, found = -1;
int ret = 0;
int hw_update = 0;
int i;
- struct gid_entry *gids = NULL;
+ struct gid_entry *gids;
+ u16 vlan_id = 0xffff;
+ u8 mac[ETH_ALEN];
- if (!rdma_cap_roce_gid_table(device, port_num))
+ if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
- if (port_num > MLX4_MAX_PORTS)
+ if (attr->port_num > MLX4_MAX_PORTS)
return -EINVAL;
if (!context)
return -EINVAL;
- port_gid_table = &iboe->gids[port_num - 1];
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
+ port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
- if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid))) {
+ if (!memcmp(&port_gid_table->gids[i].gid,
+ &attr->gid, sizeof(attr->gid)) &&
+ port_gid_table->gids[i].gid_type == attr->gid_type &&
+ port_gid_table->gids[i].vlan_id == vlan_id) {
found = i;
break;
}
- if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
+ if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
free = i; /* HW has space */
}
@@ -232,7 +300,9 @@ static int mlx4_ib_add_gid(struct ib_device *device,
ret = -ENOMEM;
} else {
*context = port_gid_table->gids[free].ctx;
- memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
+ port_gid_table->gids[free].gid = attr->gid;
+ port_gid_table->gids[free].gid_type = attr->gid_type;
+ port_gid_table->gids[free].vlan_id = vlan_id;
port_gid_table->gids[free].ctx->real_index = free;
port_gid_table->gids[free].ctx->refcount = 1;
hw_update = 1;
@@ -244,87 +314,98 @@ static int mlx4_ib_add_gid(struct ib_device *device,
ctx->refcount++;
}
if (!ret && hw_update) {
- gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+ gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
+ GFP_ATOMIC);
if (!gids) {
ret = -ENOMEM;
+ *context = NULL;
+ free_gid_entry(&port_gid_table->gids[free]);
} else {
- for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+ gids[i].gid_type = port_gid_table->gids[i].gid_type;
+ }
}
}
spin_unlock_bh(&iboe->lock);
if (!ret && hw_update) {
- ret = mlx4_ib_update_gids(gids, ibdev, port_num);
+ ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
+ if (ret) {
+ spin_lock_bh(&iboe->lock);
+ *context = NULL;
+ free_gid_entry(&port_gid_table->gids[free]);
+ spin_unlock_bh(&iboe->lock);
+ }
kfree(gids);
}
return ret;
}
-static int mlx4_ib_del_gid(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- void **context)
+static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct gid_cache_context *ctx = *context;
- struct mlx4_ib_dev *ibdev = to_mdev(device);
+ struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
struct mlx4_port_gid_table *port_gid_table;
int ret = 0;
int hw_update = 0;
struct gid_entry *gids = NULL;
- if (!rdma_cap_roce_gid_table(device, port_num))
+ if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
- if (port_num > MLX4_MAX_PORTS)
+ if (attr->port_num > MLX4_MAX_PORTS)
return -EINVAL;
- port_gid_table = &iboe->gids[port_num - 1];
+ port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
if (ctx) {
ctx->refcount--;
if (!ctx->refcount) {
unsigned int real_index = ctx->real_index;
- memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
- kfree(port_gid_table->gids[real_index].ctx);
- port_gid_table->gids[real_index].ctx = NULL;
+ free_gid_entry(&port_gid_table->gids[real_index]);
hw_update = 1;
}
}
if (!ret && hw_update) {
int i;
- gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+ gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
+ GFP_ATOMIC);
if (!gids) {
ret = -ENOMEM;
} else {
- for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
- memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+ memcpy(&gids[i].gid,
+ &port_gid_table->gids[i].gid,
+ sizeof(union ib_gid));
+ gids[i].gid_type =
+ port_gid_table->gids[i].gid_type;
+ }
}
}
spin_unlock_bh(&iboe->lock);
- if (!ret && hw_update) {
- ret = mlx4_ib_update_gids(gids, ibdev, port_num);
- kfree(gids);
- }
+ if (gids)
+ ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
+
+ kfree(gids);
return ret;
}
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
- u8 port_num, int index)
+ const struct ib_gid_attr *attr)
{
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
struct gid_cache_context *ctx = NULL;
- union ib_gid gid;
struct mlx4_port_gid_table *port_gid_table;
int real_index = -EINVAL;
int i;
- int ret;
unsigned long flags;
+ u32 port_num = attr->port_num;
if (port_num > MLX4_MAX_PORTS)
return -EINVAL;
@@ -333,20 +414,15 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
port_num = 1;
if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
- return index;
-
- ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid);
- if (ret)
- return ret;
-
- if (!memcmp(&gid, &zgid, sizeof(gid)))
- return -EINVAL;
+ return attr->index;
spin_lock_irqsave(&iboe->lock, flags);
port_gid_table = &iboe->gids[port_num - 1];
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
- if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid))) {
+ if (!memcmp(&port_gid_table->gids[i].gid,
+ &attr->gid, sizeof(attr->gid)) &&
+ attr->gid_type == port_gid_table->gids[i].gid_type) {
ctx = port_gid_table->gids[i].ctx;
break;
}
@@ -361,12 +437,12 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_udata *uhw)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
+ int err;
int have_ib_ports;
struct mlx4_uverbs_ex_query_device cmd;
- struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
+ struct mlx4_uverbs_ex_query_device_resp resp = {};
struct mlx4_clock_params clock_params;
if (uhw->inlen) {
@@ -388,10 +464,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(resp.response_length);
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ err = -ENOMEM;
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
@@ -407,8 +484,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_RC_RNR_NAK_GEN;
+ props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -422,9 +499,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (dev->dev->caps.max_gso_sz &&
(dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
- props->device_cap_flags |= IB_DEVICE_UD_TSO;
+ props->kernel_cap_flags |= IBK_UD_TSO;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
- props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
+ props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY;
if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
(dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
(dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
@@ -438,9 +515,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
- if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
+ if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
+ props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
@@ -452,9 +531,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
- props->max_sge = min(dev->dev->caps.max_sq_sg,
- dev->dev->caps.max_rq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_send_sge =
+ min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+ props->max_recv_sge =
+ min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+ props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
props->max_mr = dev->dev->quotas.mpt;
@@ -475,21 +556,81 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
+ props->max_ah = INT_MAX;
+
+ if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
+ mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
+ if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
+ props->rss_caps.max_rwq_indirection_tables =
+ props->max_qp;
+ props->rss_caps.max_rwq_indirection_table_size =
+ dev->dev->caps.max_rss_tbl_sz;
+ props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
+ props->max_wq_type_rq = props->max_qp;
+ }
+
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
+ }
- if (!mlx4_is_slave(dev->dev))
- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
+ props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
resp.response_length += sizeof(resp.hca_core_clock_offset);
- if (!err && !mlx4_is_slave(dev->dev)) {
- resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
+ resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
}
}
+ if (uhw->outlen >= resp.response_length +
+ sizeof(resp.max_inl_recv_sz)) {
+ resp.response_length += sizeof(resp.max_inl_recv_sz);
+ resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
+ sizeof(struct mlx4_wqe_data_seg);
+ }
+
+ if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
+ if (props->rss_caps.supported_qpts) {
+ resp.rss_caps.rx_hash_function =
+ MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP;
+
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ resp.rss_caps.rx_hash_fields_mask |=
+ MLX4_IB_RX_HASH_INNER;
+ }
+ resp.response_length = offsetof(typeof(resp), rss_caps) +
+ sizeof(resp.rss_caps);
+ }
+
+ if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
+ if (dev->dev->caps.max_gso_sz &&
+ ((mlx4_ib_port_link_layer(ibdev, 1) ==
+ IB_LINK_LAYER_ETHERNET) ||
+ (mlx4_ib_port_link_layer(ibdev, 2) ==
+ IB_LINK_LAYER_ETHERNET))) {
+ resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
+ resp.tso_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ }
+ resp.response_length = offsetof(typeof(resp), tso_caps) +
+ sizeof(resp.tso_caps);
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -503,7 +644,7 @@ out:
}
static enum rdma_link_layer
-mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
+mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
{
struct mlx4_dev *dev = to_mdev(device)->dev;
@@ -511,11 +652,11 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
-static int ib_link_query_port(struct ib_device *ibdev, u8 port,
+static int ib_link_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int ext_active_speed;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -525,7 +666,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -577,7 +718,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == IB_SPEED_QDR) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -603,11 +744,12 @@ out:
static u8 state_to_phys_state(enum ib_port_state state)
{
- return state == IB_PORT_ACTIVE ? 5 : 3;
+ return state == IB_PORT_ACTIVE ?
+ IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
}
-static int eth_link_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props, int netw_view)
+static int eth_link_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
{
struct mlx4_ib_dev *mdev = to_mdev(ibdev);
@@ -628,13 +770,17 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
if (err)
goto out;
- props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- props->active_speed = IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
+ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
+ (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+ IB_SPEED_FDR : IB_SPEED_QDR;
+ props->port_cap_flags = IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
- props->pkey_tbl_len = 1;
+ if (mdev->dev->caps.pkey_table_len[port])
+ props->pkey_tbl_len = 1;
props->max_mtu = IB_MTU_4096;
props->max_vl_num = 2;
props->state = IB_PORT_DOWN;
@@ -663,32 +809,32 @@ out:
return err;
}
-int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
int err;
- memset(props, 0, sizeof *props);
+ /* props being zeroed by the caller, avoid zeroing it here */
err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
ib_link_query_port(ibdev, port, props, netw_view) :
- eth_link_query_port(ibdev, port, props, netw_view);
+ eth_link_query_port(ibdev, port, props);
return err;
}
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
/* returns host view */
return __mlx4_ib_query_port(ibdev, port, props, 0);
}
-int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int clear = 0;
@@ -699,7 +845,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -721,7 +867,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
}
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -740,34 +886,80 @@ out:
return err;
}
-static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
- int ret;
-
if (rdma_protocol_ib(ibdev, port))
return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
+ return 0;
+}
- if (!rdma_protocol_roce(ibdev, port))
- return -ENODEV;
-
- if (!rdma_cap_roce_gid_table(ibdev, port))
- return -ENODEV;
+static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
+ u64 *sl2vl_tbl)
+{
+ union sl2vl_tbl_to_u64 sl2vl64;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
+ int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
+ int err = -ENOMEM;
+ int jj;
- ret = ib_get_cached_gid(ibdev, port, index, gid);
- if (ret == -EAGAIN) {
- memcpy(gid, &zgid, sizeof(*gid));
+ if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
+ *sl2vl_tbl = 0;
return 0;
}
- return ret;
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
+ in_mad->attr_mod = 0;
+
+ if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
+ mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
+ in_mad, out_mad);
+ if (err)
+ goto out;
+
+ for (jj = 0; jj < 8; jj++)
+ sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
+ *sl2vl_tbl = sl2vl64.sl64;
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
}
-int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
+{
+ u64 sl2vl;
+ int i;
+ int err;
+
+ for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
+ if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+ continue;
+ err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
+ if (err) {
+ pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
+ i, err);
+ sl2vl = 0;
+ }
+ atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
+ }
+}
+
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -776,7 +968,7 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -796,7 +988,8 @@ out:
return err;
}
-static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
{
return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
}
@@ -817,7 +1010,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
return -EOPNOTSUPP;
spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
/*
@@ -828,7 +1021,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (IS_ERR(mailbox))
return 0;
- memcpy(mailbox->buf, props->node_desc, 64);
+ memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -837,8 +1030,8 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
return 0;
}
-static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
- u32 cap_mask)
+static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port,
+ int reset_qkey_viols, u32 cap_mask)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -863,7 +1056,7 @@ static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_vio
return err;
}
-static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct mlx4_ib_dev *mdev = to_mdev(ibdev);
@@ -881,7 +1074,7 @@ static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
mutex_lock(&mdev->cap_mask_mutex);
- err = mlx4_ib_query_port(ibdev, port, &attr);
+ err = ib_query_port(ibdev, port, &attr);
if (err)
goto out;
@@ -897,19 +1090,21 @@ out:
return err;
}
-static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- struct mlx4_ib_ucontext *context;
+ struct mlx4_ib_ucontext *context = to_mucontext(uctx);
struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
if (!dev->ib_active)
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
- if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
+ if (ibdev->ops.uverbs_abi_ver ==
+ MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
resp_v3.qp_tab_size = dev->dev->caps.num_qps;
resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -921,306 +1116,149 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
resp.cqe_size = dev->dev->caps.cqe_size;
}
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
- if (err) {
- kfree(context);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
+ INIT_LIST_HEAD(&context->wqn_ranges_list);
+ mutex_init(&context->wqn_ranges_mutex);
+
+ if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
else
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
- kfree(context);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
- return &context->ibucontext;
+ return err;
}
-static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
- kfree(context);
-
- return 0;
-}
-
-static void mlx4_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA.
- * This is done through either mremap flow or split_vma (usually due
- * to mlock, madvise, munmap, etc.). We do not support a clone of the
- * vma, as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original vma size is exactly a single page that there will be no
- * "splitting" operations on.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx4_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
- * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting. The exiting case is handled explicitly as part
- * of mlx4_ib_disassociate_ucontext.
- */
- mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
- area->vm_private_data;
-
- /* set the vma context pointer to null in the mlx4_ib driver's private
- * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
- */
- mlx4_ib_vma_priv_data->vma = NULL;
}
-static const struct vm_operations_struct mlx4_ib_vm_ops = {
- .open = mlx4_ib_vma_open,
- .close = mlx4_ib_vma_close
-};
-
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- int i;
- int ret = 0;
- struct vm_area_struct *vma;
- struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- /* make sure that task is dead before returning, it may
- * prevent a rare case of module down in parallel to a
- * call to mlx4_ib_vma_close.
- */
- put_task_struct(owning_process);
- msleep(1);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the task struct */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- /* need to protect from a race on closing the vma as part of
- * mlx4_ib_vma_close().
- */
- down_read(&owning_mm->mmap_sem);
- for (i = 0; i < HW_BAR_COUNT; i++) {
- vma = context->hw_bar_info[i].vma;
- if (!vma)
- continue;
-
- ret = zap_vma_ptes(context->hw_bar_info[i].vma,
- context->hw_bar_info[i].vma->vm_start,
- PAGE_SIZE);
- if (ret) {
- pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
- BUG_ON(1);
- }
-
- /* context going to be destroyed, should not access ops any more */
- context->hw_bar_info[i].vma->vm_ops = NULL;
- }
-
- up_read(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
-}
-
-static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx4_ib_vma_private_data *vma_private_data)
-{
- vma_private_data->vma = vma;
- vma->vm_private_data = vma_private_data;
- vma->vm_ops = &mlx4_ib_vm_ops;
}
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct mlx4_ib_dev *dev = to_mdev(context->device);
- struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- if (vma->vm_pgoff == 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_DB].vma)
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_mucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
-
- } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_BF].vma)
+ case 1:
+ if (dev->dev->caps.bf_reg_size == 0)
return -EINVAL;
-
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn +
- dev->dev->caps.num_uars,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
-
- } else if (vma->vm_pgoff == 3) {
+ return rdma_user_mmap_io(
+ context, vma,
+ to_mucontext(context)->uar.pfn +
+ dev->dev->caps.num_uars,
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
+ NULL);
+
+ case 3: {
struct mlx4_clock_params params;
int ret;
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
- return -EINVAL;
-
ret = mlx4_get_internal_clock_params(dev->dev, &params);
-
if (ret)
return ret;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (pci_resource_start(dev->dev->persist->pdev,
- params.bar) +
- params.offset)
- >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma,
- &mucontext->hw_bar_info[HW_BAR_CLOCK]);
- } else {
- return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ (pci_resource_start(dev->dev->persist->pdev,
+ params.bar) +
+ params.offset) >>
+ PAGE_SHIFT,
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
+ NULL);
}
- return 0;
+ default:
+ return -EINVAL;
+ }
}
-static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
- struct mlx4_ib_pd *pd;
+ struct mlx4_ib_pd *pd = to_mpd(ibpd);
+ struct ib_device *ibdev = ibpd->device;
int err;
- pd = kmalloc(sizeof *pd, GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
-
- if (context)
- if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
- mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
- kfree(pd);
- return ERR_PTR(-EFAULT);
- }
+ if (err)
+ return err;
- return &pd->ibpd;
+ if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
+ return -EFAULT;
+ }
+ return 0;
}
-static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
+static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
- kfree(pd);
-
return 0;
}
-static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
{
- struct mlx4_ib_xrcd *xrcd;
+ struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
+ struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
struct ib_cq_init_attr cq_attr = {};
int err;
- if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
- return ERR_PTR(-ENOSYS);
-
- xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
- if (!xrcd)
- return ERR_PTR(-ENOMEM);
+ if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
+ return -EOPNOTSUPP;
- err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
+ err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
if (err)
- goto err1;
+ return err;
- xrcd->pd = ib_alloc_pd(ibdev);
+ xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
if (IS_ERR(xrcd->pd)) {
err = PTR_ERR(xrcd->pd);
goto err2;
}
cq_attr.cqe = 1;
- xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
+ xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
if (IS_ERR(xrcd->cq)) {
err = PTR_ERR(xrcd->cq);
goto err3;
}
- return &xrcd->ibxrcd;
+ return 0;
err3:
ib_dealloc_pd(xrcd->pd);
err2:
- mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
-err1:
- kfree(xrcd);
- return ERR_PTR(err);
+ mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
+ return err;
}
-static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
ib_destroy_cq(to_mxrcd(xrcd)->cq);
ib_dealloc_pd(to_mxrcd(xrcd)->pd);
mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
- kfree(xrcd);
-
return 0;
}
@@ -1247,6 +1285,22 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
return 0;
}
+static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
+ struct mlx4_ib_counters *ctr_table)
+{
+ struct counter_index *counter, *tmp_count;
+
+ mutex_lock(&ctr_table->mutex);
+ list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
+ list) {
+ if (counter->allocated)
+ mlx4_counter_free(ibdev->dev, counter->index);
+ list_del(&counter->list);
+ kfree(counter);
+ }
+ mutex_unlock(&ctr_table->mutex);
+}
+
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid)
{
@@ -1258,8 +1312,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
spin_lock_bh(&mdev->iboe.lock);
ndev = mdev->iboe.netdevs[mqp->port - 1];
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
spin_unlock_bh(&mdev->iboe.lock);
if (ndev) {
@@ -1276,6 +1329,19 @@ struct mlx4_ib_steering {
union ib_gid gid;
};
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IB_FIELD sl
+#define LAST_IPV4_FIELD dst_ip
+#define LAST_TCP_UDP_FIELD src_port
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field)\
+ memchr_inv((void *)&filter.field +\
+ sizeof(filter.field), 0,\
+ sizeof(filter) -\
+ offsetof(typeof(filter), field) -\
+ sizeof(filter.field))
+
static int parse_flow_attr(struct mlx4_dev *dev,
u32 qp_num,
union ib_flow_spec *ib_spec,
@@ -1285,6 +1351,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
ETH_ALEN);
@@ -1294,6 +1363,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
break;
case IB_FLOW_SPEC_IB:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_IB;
mlx4_spec->ib.l3_qpn =
cpu_to_be32(qp_num);
@@ -1303,6 +1375,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
case IB_FLOW_SPEC_IPV4:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_IPV4;
mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
@@ -1312,6 +1387,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
+ return -ENOTSUPP;
+
type = ib_spec->type == IB_FLOW_SPEC_TCP ?
MLX4_NET_TRANS_RULE_ID_TCP :
MLX4_NET_TRANS_RULE_ID_UDP;
@@ -1410,8 +1488,9 @@ static int __mlx4_ib_create_default_rules(
int i;
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
+ union ib_flow_spec ib_spec = {};
int ret;
- union ib_flow_spec ib_spec;
+
switch (pdefault_rules->rules_create_list[i]) {
case 0:
/* no rule */
@@ -1452,23 +1531,11 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
int default_flow;
- static const u16 __mlx4_domain[] = {
- [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
- [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
- [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
- [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
- };
-
if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
pr_err("Invalid priority value %d\n", flow_attr->priority);
return -EINVAL;
}
- if (domain >= IB_FLOW_DOMAIN_NUM) {
- pr_err("Invalid domain value %d\n", domain);
- return -EINVAL;
- }
-
if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
return -EINVAL;
@@ -1477,8 +1544,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
return PTR_ERR(mailbox);
ctrl = mailbox->buf;
- ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
- flow_attr->priority);
+ ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
ctrl->port = flow_attr->port;
ctrl->qpn = cpu_to_be32(qp->qp_num);
@@ -1508,15 +1574,25 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
size += ret;
}
+ if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
+ flow_attr->num_of_specs == 1) {
+ struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
+ enum ib_flow_spec_type header_spec =
+ ((union ib_flow_spec *)(flow_attr + 1))->type;
+
+ if (header_spec == IB_FLOW_SPEC_ETH)
+ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
+ }
+
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (ret == -ENOMEM)
pr_err("mcg table is full. Fail to register network rule.\n");
else if (ret == -ENXIO)
pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
else if (ret)
- pr_err("Invalid argumant. Fail to register network rule.\n");
+ pr_err("Invalid argument. Fail to register network rule.\n");
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return ret;
@@ -1527,7 +1603,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
int err;
err = mlx4_cmd(dev, reg_id, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
reg_id);
@@ -1559,9 +1635,59 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
return err;
}
+static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
+ struct ib_flow_attr *flow_attr,
+ enum mlx4_net_trans_promisc_mode *type)
+{
+ int err = 0;
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
+ (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
+ (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_attr->num_of_specs == 0) {
+ type[0] = MLX4_FS_MC_SNIFFER;
+ type[1] = MLX4_FS_UC_SNIFFER;
+ } else {
+ union ib_flow_spec *ib_spec;
+
+ ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+ if (ib_spec->type != IB_FLOW_SPEC_ETH)
+ return -EINVAL;
+
+ /* if all is zero than MC and UC */
+ if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
+ type[0] = MLX4_FS_MC_SNIFFER;
+ type[1] = MLX4_FS_UC_SNIFFER;
+ } else {
+ u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
+ ib_spec->eth.mask.dst_mac[1],
+ ib_spec->eth.mask.dst_mac[2],
+ ib_spec->eth.mask.dst_mac[3],
+ ib_spec->eth.mask.dst_mac[4],
+ ib_spec->eth.mask.dst_mac[5]};
+
+ /* Above xor was only on MC bit, non empty mask is valid
+ * only if this bit is set and rest are zero.
+ */
+ if (!is_zero_ether_addr(&mac[0]))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
+ type[0] = MLX4_FS_MC_SNIFFER;
+ else
+ type[0] = MLX4_FS_UC_SNIFFER;
+ }
+ }
+
+ return err;
+}
+
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
- struct ib_flow_attr *flow_attr,
- int domain)
+ struct ib_flow_attr *flow_attr,
+ struct ib_udata *udata)
{
int err = 0, i = 0, j = 0;
struct mlx4_ib_flow *mflow;
@@ -1569,6 +1695,17 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
+ if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+ (flow_attr->type != IB_FLOW_ATTR_NORMAL))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (udata &&
+ udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
memset(type, 0, sizeof(type));
mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
@@ -1579,7 +1716,19 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
switch (flow_attr->type) {
case IB_FLOW_ATTR_NORMAL:
- type[0] = MLX4_FS_REGULAR;
+ /* If dont trap flag (continue match) is set, under specific
+ * condition traffic be replicated to given qp,
+ * without stealing it
+ */
+ if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
+ err = mlx4_ib_add_dont_trap_rule(dev,
+ flow_attr,
+ type);
+ if (err)
+ goto err_free;
+ } else {
+ type[0] = MLX4_FS_REGULAR;
+ }
break;
case IB_FLOW_ATTR_ALL_DEFAULT:
@@ -1591,8 +1740,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
break;
case IB_FLOW_ATTR_SNIFFER:
- type[0] = MLX4_FS_UC_SNIFFER;
- type[1] = MLX4_FS_MC_SNIFFER;
+ type[0] = MLX4_FS_MIRROR_RX_PORT;
+ type[1] = MLX4_FS_MIRROR_SX_PORT;
break;
default:
@@ -1601,8 +1750,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
}
while (i < ARRAY_SIZE(type) && type[i]) {
- err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
- &mflow->reg_id[i].id);
+ err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
+ type[i], &mflow->reg_id[i].id);
if (err)
goto err_create_flow;
if (is_bonded) {
@@ -1611,7 +1760,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
*/
flow_attr->port = 2;
err = __mlx4_ib_create_flow(qp, flow_attr,
- domain, type[j],
+ MLX4_DOMAIN_UVERBS, type[j],
&mflow->reg_id[j].mirror);
flow_attr->port = 1;
if (err)
@@ -1810,11 +1959,9 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (ge) {
spin_lock_bh(&mdev->iboe.lock);
ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
spin_unlock_bh(&mdev->iboe.lock);
- if (ndev)
- dev_put(ndev);
+ dev_put(ndev);
list_del(&ge->list);
kfree(ge);
} else
@@ -1827,8 +1974,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int init_node_data(struct mlx4_ib_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -1837,7 +1984,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
if (mlx4_is_master(dev->dev))
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
@@ -1846,7 +1993,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+ memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
@@ -1863,53 +2010,266 @@ out:
return err;
}
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
- (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
- (int) dev->dev->caps.fw_ver & 0xffff);
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "%x\n", dev->dev->rev_id);
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
+
+static struct attribute *mlx4_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
+
+static const struct attribute_group mlx4_attr_group = {
+ .attrs = mlx4_class_attributes,
+};
+
+struct diag_counter {
+ const char *name;
+ u32 offset;
+};
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+#define DIAG_COUNTER(_name, _offset) \
+ { .name = #_name, .offset = _offset }
+
+static const struct diag_counter diag_basic[] = {
+ DIAG_COUNTER(rq_num_lle, 0x00),
+ DIAG_COUNTER(sq_num_lle, 0x04),
+ DIAG_COUNTER(rq_num_lqpoe, 0x08),
+ DIAG_COUNTER(sq_num_lqpoe, 0x0C),
+ DIAG_COUNTER(rq_num_lpe, 0x18),
+ DIAG_COUNTER(sq_num_lpe, 0x1C),
+ DIAG_COUNTER(rq_num_wrfe, 0x20),
+ DIAG_COUNTER(sq_num_wrfe, 0x24),
+ DIAG_COUNTER(sq_num_mwbe, 0x2C),
+ DIAG_COUNTER(sq_num_bre, 0x34),
+ DIAG_COUNTER(sq_num_rire, 0x44),
+ DIAG_COUNTER(rq_num_rire, 0x48),
+ DIAG_COUNTER(sq_num_rae, 0x4C),
+ DIAG_COUNTER(rq_num_rae, 0x50),
+ DIAG_COUNTER(sq_num_roe, 0x54),
+ DIAG_COUNTER(sq_num_tree, 0x5C),
+ DIAG_COUNTER(sq_num_rree, 0x64),
+ DIAG_COUNTER(rq_num_rnr, 0x68),
+ DIAG_COUNTER(sq_num_rnr, 0x6C),
+ DIAG_COUNTER(rq_num_oos, 0x100),
+ DIAG_COUNTER(sq_num_oos, 0x104),
+};
+
+static const struct diag_counter diag_ext[] = {
+ DIAG_COUNTER(rq_num_dup, 0x130),
+ DIAG_COUNTER(sq_num_to, 0x134),
+};
+
+static const struct diag_counter diag_device_only[] = {
+ DIAG_COUNTER(num_cqovf, 0x1A0),
+ DIAG_COUNTER(rq_num_udsdprd, 0x118),
+};
+
+static struct rdma_hw_stats *
+mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev)
{
- struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
- dev->dev->board_id);
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+
+ if (!diag[0].descs)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static struct rdma_hw_stats *
+mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+
+ if (!diag[1].descs)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+ u32 hw_value[ARRAY_SIZE(diag_device_only) +
+ ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
+ int ret;
+ int i;
+
+ ret = mlx4_query_diag_counters(dev->dev,
+ MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
+ diag[!!port].offset, hw_value,
+ diag[!!port].num_counters, port);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < diag[!!port].num_counters; i++)
+ stats->value[i] = hw_value[i];
+
+ return diag[!!port].num_counters;
+}
+
+static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
+ struct rdma_stat_desc **pdescs,
+ u32 **offset, u32 *num, bool port)
+{
+ u32 num_counters;
+
+ num_counters = ARRAY_SIZE(diag_basic);
+
+ if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
+ num_counters += ARRAY_SIZE(diag_ext);
+
+ if (!port)
+ num_counters += ARRAY_SIZE(diag_device_only);
+
+ *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc),
+ GFP_KERNEL);
+ if (!*pdescs)
+ return -ENOMEM;
+
+ *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
+ if (!*offset)
+ goto err;
+
+ *num = num_counters;
+
+ return 0;
+
+err:
+ kfree(*pdescs);
+ return -ENOMEM;
+}
+
+static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
+ struct rdma_stat_desc *descs,
+ u32 *offset, bool port)
+{
+ int i;
+ int j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
+ descs[i].name = diag_basic[i].name;
+ offset[i] = diag_basic[i].offset;
+ }
+
+ if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
+ for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
+ descs[j].name = diag_ext[i].name;
+ offset[j] = diag_ext[i].offset;
+ }
+ }
+
+ if (!port) {
+ for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
+ descs[j].name = diag_device_only[i].name;
+ offset[j] = diag_device_only[i].offset;
+ }
+ }
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
+ .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
+ .alloc_hw_port_stats = mlx4_ib_alloc_hw_port_stats,
+ .get_hw_stats = mlx4_ib_get_hw_stats,
+};
-static struct device_attribute *mlx4_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = {
+ .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
+ .get_hw_stats = mlx4_ib_get_hw_stats,
};
+static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
+{
+ struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
+ int i;
+ int ret;
+ bool per_port = !!(ibdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
+
+ if (mlx4_is_slave(ibdev->dev))
+ return 0;
+
+ for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+ /*
+ * i == 1 means we are building port counters, set a different
+ * stats ops without port stats callback.
+ */
+ if (i && !per_port) {
+ ib_set_device_ops(&ibdev->ib_dev,
+ &mlx4_ib_hw_stats_ops1);
+
+ return 0;
+ }
+
+ ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
+ &diag[i].offset,
+ &diag[i].num_counters, i);
+ if (ret)
+ goto err_alloc;
+
+ mlx4_ib_fill_diag_counters(ibdev, diag[i].descs,
+ diag[i].offset, i);
+ }
+
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
+
+ return 0;
+
+err_alloc:
+ if (i) {
+ kfree(diag[i - 1].descs);
+ kfree(diag[i - 1].offset);
+ }
+
+ return ret;
+}
+
+static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
+{
+ int i;
+
+ for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+ kfree(ibdev->diag_counters[i].offset);
+ kfree(ibdev->diag_counters[i].descs);
+ }
+}
+
#define MLX4_IB_INVALID_MAC ((u64)-1)
static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
struct net_device *dev,
@@ -1919,10 +2279,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- read_lock(&dev_base_lock);
- new_smac = mlx4_mac_to_u64(dev->dev_addr);
- read_unlock(&dev_base_lock);
-
+ new_smac = ether_addr_to_u64(dev->dev_addr);
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
@@ -1968,35 +2325,54 @@ unlock:
mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
}
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
- struct net_device *dev,
- unsigned long event)
+static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ unsigned long event)
{
- struct mlx4_ib_iboe *iboe;
- int update_qps_port = -1;
- int port;
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
ASSERT_RTNL();
- iboe = &ibdev->iboe;
+ if (dev->dev.parent != ibdev->ib_dev.dev.parent)
+ return;
spin_lock_bh(&iboe->lock);
- mlx4_foreach_ib_transport_port(port, ibdev->dev) {
- iboe->netdevs[port - 1] =
- mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
+ iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL;
- if (dev == iboe->netdevs[port - 1] &&
- (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
- event == NETDEV_UP || event == NETDEV_CHANGE))
- update_qps_port = port;
+ spin_unlock_bh(&iboe->lock);
+
+ if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER)
+ mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
+}
+
+static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev,
+ unsigned long event)
+{
+ struct mlx4_ib_dev *mlx4_ibdev =
+ container_of(ibdev, struct mlx4_ib_dev, ib_dev);
+ struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe;
+
+ if (!net_eq(dev_net(ndev), &init_net))
+ return;
+
+ ASSERT_RTNL();
+
+ if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent)
+ return;
+
+ spin_lock_bh(&iboe->lock);
+
+ iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL;
+
+ if (event == NETDEV_UP || event == NETDEV_DOWN)
+ ib_dispatch_port_state_event(&mlx4_ibdev->ib_dev, ndev);
- }
spin_unlock_bh(&iboe->lock);
- if (update_qps_port > 0)
- mlx4_ib_update_qps(ibdev, dev, update_qps_port);
+ if (event == NETDEV_UP || event == NETDEV_CHANGE)
+ mlx4_ib_update_qps(mlx4_ibdev, ndev, ndev->dev_port + 1);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -2009,7 +2385,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
- mlx4_ib_scan_netdevs(ibdev, dev, event);
+ mlx4_ib_scan_netdev(ibdev, dev, event);
return NOTIFY_DONE;
}
@@ -2097,31 +2473,148 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
ibdev->eq_table = NULL;
}
-static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
+ struct mlx4_ib_dev *mdev = to_mdev(ibdev);
int err;
- err = mlx4_ib_query_port(ibdev, port_num, &attr);
+ if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
+ if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ }
+
+ err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND)
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- else
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
-
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
return 0;
}
-static void *mlx4_ib_add(struct mlx4_dev *dev)
+static void get_fw_ver_str(struct ib_device *device, char *str)
{
+ struct mlx4_ib_dev *dev =
+ container_of(device, struct mlx4_ib_dev, ib_dev);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
+ (int) (dev->dev->caps.fw_ver >> 32),
+ (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->dev->caps.fw_ver & 0xffff);
+}
+
+static const struct ib_device_ops mlx4_ib_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_MLX4,
+ .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
+
+ .add_gid = mlx4_ib_add_gid,
+ .alloc_mr = mlx4_ib_alloc_mr,
+ .alloc_pd = mlx4_ib_alloc_pd,
+ .alloc_ucontext = mlx4_ib_alloc_ucontext,
+ .attach_mcast = mlx4_ib_mcg_attach,
+ .create_ah = mlx4_ib_create_ah,
+ .create_cq = mlx4_ib_create_cq,
+ .create_qp = mlx4_ib_create_qp,
+ .create_srq = mlx4_ib_create_srq,
+ .dealloc_pd = mlx4_ib_dealloc_pd,
+ .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
+ .del_gid = mlx4_ib_del_gid,
+ .dereg_mr = mlx4_ib_dereg_mr,
+ .destroy_ah = mlx4_ib_destroy_ah,
+ .destroy_cq = mlx4_ib_destroy_cq,
+ .destroy_qp = mlx4_ib_destroy_qp,
+ .destroy_srq = mlx4_ib_destroy_srq,
+ .detach_mcast = mlx4_ib_mcg_detach,
+ .device_group = &mlx4_attr_group,
+ .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
+ .drain_rq = mlx4_ib_drain_rq,
+ .drain_sq = mlx4_ib_drain_sq,
+ .get_dev_fw_str = get_fw_ver_str,
+ .get_dma_mr = mlx4_ib_get_dma_mr,
+ .get_link_layer = mlx4_ib_port_link_layer,
+ .get_netdev = mlx4_ib_get_netdev,
+ .get_port_immutable = mlx4_port_immutable,
+ .map_mr_sg = mlx4_ib_map_mr_sg,
+ .mmap = mlx4_ib_mmap,
+ .modify_cq = mlx4_ib_modify_cq,
+ .modify_device = mlx4_ib_modify_device,
+ .modify_port = mlx4_ib_modify_port,
+ .modify_qp = mlx4_ib_modify_qp,
+ .modify_srq = mlx4_ib_modify_srq,
+ .poll_cq = mlx4_ib_poll_cq,
+ .post_recv = mlx4_ib_post_recv,
+ .post_send = mlx4_ib_post_send,
+ .post_srq_recv = mlx4_ib_post_srq_recv,
+ .process_mad = mlx4_ib_process_mad,
+ .query_ah = mlx4_ib_query_ah,
+ .query_device = mlx4_ib_query_device,
+ .query_gid = mlx4_ib_query_gid,
+ .query_pkey = mlx4_ib_query_pkey,
+ .query_port = mlx4_ib_query_port,
+ .query_qp = mlx4_ib_query_qp,
+ .query_srq = mlx4_ib_query_srq,
+ .reg_user_mr = mlx4_ib_reg_user_mr,
+ .req_notify_cq = mlx4_ib_arm_cq,
+ .rereg_user_mr = mlx4_ib_rereg_user_mr,
+ .resize_cq = mlx4_ib_resize_cq,
+ .report_port_event = mlx4_ib_port_event,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
+ .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
+ .create_wq = mlx4_ib_create_wq,
+ .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
+ .destroy_wq = mlx4_ib_destroy_wq,
+ .modify_wq = mlx4_ib_modify_wq,
+
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
+ ib_rwq_ind_tbl),
+};
+
+static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
+ .alloc_mw = mlx4_ib_alloc_mw,
+ .dealloc_mw = mlx4_ib_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
+};
+
+static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
+ .alloc_xrcd = mlx4_ib_alloc_xrcd,
+ .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
+};
+
+static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
+ .create_flow = mlx4_ib_create_flow,
+ .destroy_flow = mlx4_ib_destroy_flow,
+};
+
+static int mlx4_ib_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
+ struct mlx4_dev *dev = madev->mdev;
struct mlx4_ib_dev *ibdev;
int num_ports = 0;
int i, j;
@@ -2131,6 +2624,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int num_req_counters;
int allocated;
u32 counter_index;
+ struct counter_index *new_counter_index;
pr_info_once("%s", mlx4_ib_version);
@@ -2140,167 +2634,84 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
/* No point in registering a device with no ports... */
if (num_ports == 0)
- return NULL;
+ return -ENODEV;
- ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
+ ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
if (!ibdev) {
dev_err(&dev->persist->pdev->dev,
"Device struct alloc failed\n");
- return NULL;
+ return -ENOMEM;
}
iboe = &ibdev->iboe;
- if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
+ err = mlx4_pd_alloc(dev, &ibdev->priv_pdn);
+ if (err)
goto err_dealloc;
- if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
+ err = mlx4_uar_alloc(dev, &ibdev->priv_uar);
+ if (err)
goto err_pd;
ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
PAGE_SIZE);
- if (!ibdev->uar_map)
+ if (!ibdev->uar_map) {
+ err = -ENOMEM;
goto err_uar;
+ }
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
ibdev->dev = dev;
ibdev->bond_next_port = 0;
- strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
- ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
ibdev->num_ports = num_ports;
ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
1 : ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
- ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
- ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
- ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
- ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
+ ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
- if (dev->caps.userspace_caps)
- ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
- else
- ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
-
- ibdev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_REREG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
- (1ull << IB_USER_VERBS_CMD_OPEN_QP);
-
- ibdev->ib_dev.query_device = mlx4_ib_query_device;
- ibdev->ib_dev.query_port = mlx4_ib_query_port;
- ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
- ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
- ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
- ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
- ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
- ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
- ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
- ibdev->ib_dev.mmap = mlx4_ib_mmap;
- ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
- ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
- ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
- ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
- ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
- ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
- ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
- ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
- ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
- ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
- ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
- ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
- ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
- ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
- ibdev->ib_dev.post_send = mlx4_ib_post_send;
- ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
- ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
- ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
- ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
- ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
- ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
- ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
- ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
- ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
- ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
- ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
- ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
- ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
- ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
- ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
- ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
- ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
- ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
- ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
-
- if (!mlx4_is_slave(ibdev->dev)) {
- ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
- ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
- ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
- ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
- }
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
- dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
- ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
- ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
- ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
+ if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
+ ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET) ||
+ (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
+ IB_LINK_LAYER_ETHERNET)))
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
- ibdev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
- }
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
+ dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
- ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
- ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
- ibdev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
- (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
}
if (check_flow_steering_support(dev)) {
ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
- ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
- ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
-
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
}
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ);
+ if (!dev->caps.userspace_caps)
+ ibdev->ib_dev.ops.uverbs_abi_ver =
+ MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
mlx4_ib_alloc_eqs(dev, ibdev);
spin_lock_init(&iboe->lock);
- if (init_node_data(ibdev))
+ err = init_node_data(ibdev);
+ if (err)
goto err_map;
+ mlx4_init_sl2vl_tbl(ibdev);
+
+ for (i = 0; i < ibdev->num_ports; ++i) {
+ mutex_init(&ibdev->counters_table[i].mutex);
+ INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
+ iboe->last_port_state[i] = IB_PORT_DOWN;
+ }
num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
for (i = 0; i < num_req_counters; ++i) {
@@ -2308,7 +2719,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
allocated = 0;
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
- err = mlx4_counter_alloc(ibdev->dev, &counter_index);
+ err = mlx4_counter_alloc(ibdev->dev, &counter_index,
+ MLX4_RES_USAGE_DRIVER);
/* if failed to allocate a new counter, use default */
if (err)
counter_index =
@@ -2320,15 +2732,37 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
counter_index = mlx4_get_default_counter_index(dev,
i + 1);
}
- ibdev->counters[i].index = counter_index;
- ibdev->counters[i].allocated = allocated;
+ new_counter_index = kmalloc(sizeof(*new_counter_index),
+ GFP_KERNEL);
+ if (!new_counter_index) {
+ err = -ENOMEM;
+ if (allocated)
+ mlx4_counter_free(ibdev->dev, counter_index);
+ goto err_counter;
+ }
+ new_counter_index->index = counter_index;
+ new_counter_index->allocated = allocated;
+ list_add_tail(&new_counter_index->list,
+ &ibdev->counters_table[i].counters_list);
+ ibdev->counters_table[i].default_counter = counter_index;
pr_info("counter index %d for port %d allocated %d\n",
counter_index, i + 1, allocated);
}
if (mlx4_is_bonded(dev))
for (i = 1; i < ibdev->num_ports ; ++i) {
- ibdev->counters[i].index = ibdev->counters[0].index;
- ibdev->counters[i].allocated = 0;
+ new_counter_index =
+ kmalloc(sizeof(struct counter_index),
+ GFP_KERNEL);
+ if (!new_counter_index) {
+ err = -ENOMEM;
+ goto err_counter;
+ }
+ new_counter_index->index = counter_index;
+ new_counter_index->allocated = 0;
+ list_add_tail(&new_counter_index->list,
+ &ibdev->counters_table[i].counters_list);
+ ibdev->counters_table[i].default_counter =
+ counter_index;
}
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
@@ -2344,60 +2778,71 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
- &ibdev->steer_qpn_base, 0);
+ &ibdev->steer_qpn_base, 0,
+ MLX4_RES_USAGE_DRIVER);
if (err)
goto err_counter;
- ibdev->ib_uc_qpns_bitmap =
- kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
- sizeof(long),
- GFP_KERNEL);
+ ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count,
+ GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap) {
- dev_err(&dev->persist->pdev->dev,
- "bit map alloc failed\n");
+ err = -ENOMEM;
goto err_steer_qp_release;
}
- bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
-
- err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
- dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_base +
- ibdev->steer_qpn_count - 1);
- if (err)
- goto err_steer_free_bitmap;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
+ bitmap_zero(ibdev->ib_uc_qpns_bitmap,
+ ibdev->steer_qpn_count);
+ err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+ dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_base +
+ ibdev->steer_qpn_count - 1);
+ if (err)
+ goto err_steer_free_bitmap;
+ } else {
+ bitmap_fill(ibdev->ib_uc_qpns_bitmap,
+ ibdev->steer_qpn_count);
+ }
}
for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
- if (ib_register_device(&ibdev->ib_dev, NULL))
+ err = mlx4_ib_alloc_diag_counters(ibdev);
+ if (err)
goto err_steer_free_bitmap;
- if (mlx4_ib_mad_init(ibdev))
+ err = ib_register_device(&ibdev->ib_dev, "mlx4_%d",
+ &dev->persist->pdev->dev);
+ if (err)
+ goto err_diag_counters;
+
+ err = mlx4_ib_mad_init(ibdev);
+ if (err)
goto err_reg;
- if (mlx4_ib_init_sriov(ibdev))
+ err = mlx4_ib_init_sriov(ibdev);
+ if (err)
goto err_mad;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
- if (!iboe->nb.notifier_call) {
- iboe->nb.notifier_call = mlx4_ib_netdev_event;
- err = register_netdevice_notifier(&iboe->nb);
- if (err) {
- iboe->nb.notifier_call = NULL;
- goto err_notif;
- }
+ if (!iboe->nb.notifier_call) {
+ iboe->nb.notifier_call = mlx4_ib_netdev_event;
+ err = register_netdevice_notifier(&iboe->nb);
+ if (err) {
+ iboe->nb.notifier_call = NULL;
+ goto err_notif;
}
}
-
- for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
- if (device_create_file(&ibdev->ib_dev.dev,
- mlx4_class_attributes[j]))
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
+ err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
+ if (err)
goto err_notif;
}
ibdev->ib_active = true;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
+ &ibdev->ib_dev);
if (mlx4_is_mfunc(ibdev->dev))
init_pkeys(ibdev);
@@ -2411,7 +2856,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
do_slave_init(ibdev, j, 1);
}
}
- return ibdev;
+
+ /* register mlx4 core notifier */
+ ibdev->mlx_nb.notifier_call = mlx4_ib_event;
+ err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb);
+ WARN(err, "failed to register mlx4 event notifier (%d)", err);
+
+ auxiliary_set_drvdata(adev, ibdev);
+ return 0;
err_notif:
if (ibdev->iboe.nb.notifier_call) {
@@ -2429,21 +2881,21 @@ err_mad:
err_reg:
ib_unregister_device(&ibdev->ib_dev);
+err_diag_counters:
+ mlx4_ib_diag_cleanup(ibdev);
+
err_steer_free_bitmap:
- kfree(ibdev->ib_uc_qpns_bitmap);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
- for (i = 0; i < ibdev->num_ports; ++i) {
- if (ibdev->counters[i].index != -1 &&
- ibdev->counters[i].allocated)
- mlx4_counter_free(ibdev->dev,
- ibdev->counters[i].index);
- }
+ for (i = 0; i < ibdev->num_ports; ++i)
+ mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
+
err_map:
+ mlx4_ib_free_eqs(dev, ibdev);
iounmap(ibdev->uar_map);
err_uar:
@@ -2455,7 +2907,7 @@ err_pd:
err_dealloc:
ib_dealloc_device(&ibdev->ib_dev);
- return NULL;
+ return err;
}
int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
@@ -2480,7 +2932,10 @@ void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
return;
- BUG_ON(qpn < dev->steer_qpn_base);
+ if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
+ qpn, dev->steer_qpn_base))
+ /* not supposed to be here */
+ return;
bitmap_release_region(dev->ib_uc_qpns_bitmap,
qpn - dev->steer_qpn_base,
@@ -2492,7 +2947,7 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
{
int err;
size_t flow_size;
- struct ib_flow_attr *flow = NULL;
+ struct ib_flow_attr *flow;
struct ib_flow_spec_ib *ib_spec;
if (is_attach) {
@@ -2510,45 +2965,49 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
/* Add an empty rule for IB L2 */
memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
- err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
- IB_FLOW_DOMAIN_NIC,
- MLX4_FS_REGULAR,
- &mqp->reg_id);
- } else {
- err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
+ err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
+ MLX4_FS_REGULAR, &mqp->reg_id);
+ kfree(flow);
+ return err;
}
- kfree(flow);
- return err;
+
+ return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
}
-static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
+static void mlx4_ib_remove(struct auxiliary_device *adev)
{
- struct mlx4_ib_dev *ibdev = ibdev_ptr;
+ struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
+ struct mlx4_dev *dev = madev->mdev;
+ struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(adev);
int p;
+ int i;
+ mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb);
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
ibdev->ib_active = false;
flush_workqueue(wq);
- mlx4_ib_close_sriov(ibdev);
- mlx4_ib_mad_cleanup(ibdev);
- ib_unregister_device(&ibdev->ib_dev);
if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
- }
+ mlx4_ib_close_sriov(ibdev);
+ mlx4_ib_mad_cleanup(ibdev);
+ ib_unregister_device(&ibdev->ib_dev);
+ mlx4_ib_diag_cleanup(ibdev);
+
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- if (ibdev->counters[p].index != -1 &&
- ibdev->counters[p].allocated)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p].index);
+ mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
+
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
@@ -2561,7 +3020,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
{
- struct mlx4_ib_demux_work **dm = NULL;
+ struct mlx4_ib_demux_work **dm;
struct mlx4_dev *dev = ibdev->dev;
int i;
unsigned long flags;
@@ -2577,15 +3036,12 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
- if (!dm) {
- pr_err("failed to allocate memory for tunneling qp update\n");
+ if (!dm)
return;
- }
for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
- pr_err("failed to allocate memory for tunneling qp update work struct\n");
while (--i >= 0)
kfree(dm[i]);
goto out;
@@ -2709,11 +3165,52 @@ static void handle_bonded_port_state_event(struct work_struct *work)
ib_dispatch_event(&ibev);
}
-static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
- enum mlx4_dev_event event, unsigned long param)
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
+{
+ u64 sl2vl;
+ int err;
+
+ err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
+ if (err) {
+ pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
+ port, err);
+ sl2vl = 0;
+ }
+ atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
+}
+
+static void ib_sl2vl_update_work(struct work_struct *work)
+{
+ struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
+ struct mlx4_ib_dev *mdev = ew->ib_dev;
+ int port = ew->port;
+
+ mlx4_ib_sl2vl_update(mdev, port);
+
+ kfree(ew);
+}
+
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+ int port)
+{
+ struct ib_event_work *ew;
+
+ ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+ if (ew) {
+ INIT_WORK(&ew->work, ib_sl2vl_update_work);
+ ew->port = port;
+ ew->ib_dev = ibdev;
+ queue_work(wq, &ew->work);
+ }
+}
+
+static int mlx4_ib_event(struct notifier_block *this, unsigned long event,
+ void *param)
{
+ struct mlx4_ib_dev *ibdev =
+ container_of(this, struct mlx4_ib_dev, mlx_nb);
+ struct mlx4_dev *dev = ibdev->dev;
struct ib_event ibev;
- struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
struct mlx4_eqe *eqe = NULL;
struct ib_event_work *ew;
int p = 0;
@@ -2723,33 +3220,43 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
(event == MLX4_DEV_EVENT_PORT_DOWN))) {
ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
if (!ew)
- return;
+ return NOTIFY_DONE;
INIT_WORK(&ew->work, handle_bonded_port_state_event);
ew->ib_dev = ibdev;
queue_work(wq, &ew->work);
- return;
+ return NOTIFY_DONE;
}
- if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
+ switch (event) {
+ case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+ break;
+ case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
eqe = (struct mlx4_eqe *)param;
- else
- p = (int) param;
+ break;
+ default:
+ p = *(int *)param;
+ break;
+ }
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
if (p > ibdev->num_ports)
- return;
- if (mlx4_is_master(dev) &&
+ return NOTIFY_DONE;
+ if (!mlx4_is_slave(dev) &&
rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
IB_LINK_LAYER_INFINIBAND) {
- mlx4_ib_invalidate_all_guid_record(ibdev, p);
+ if (mlx4_is_master(dev))
+ mlx4_ib_invalidate_all_guid_record(ibdev, p);
+ if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+ !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
+ mlx4_sched_ib_sl2vl_update_work(ibdev, p);
}
ibev.event = IB_EVENT_PORT_ACTIVE;
break;
case MLX4_DEV_EVENT_PORT_DOWN:
if (p > ibdev->num_ports)
- return;
+ return NOTIFY_DONE;
ibev.event = IB_EVENT_PORT_ERR;
break;
@@ -2761,10 +3268,8 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
- if (!ew) {
- pr_err("failed to allocate memory for events work\n");
- break;
- }
+ if (!ew)
+ return NOTIFY_DONE;
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
@@ -2774,7 +3279,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
queue_work(wq, &ew->work);
else
handle_port_mgmt_change_event(&ew->work);
- return;
+ return NOTIFY_DONE;
case MLX4_DEV_EVENT_SLAVE_INIT:
/* here, p is the slave id */
@@ -2790,7 +3295,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1);
}
}
- return;
+ return NOTIFY_DONE;
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
if (mlx4_is_master(dev)) {
@@ -2806,22 +3311,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
}
/* here, p is the slave id */
do_slave_init(ibdev, p, 0);
- return;
+ return NOTIFY_DONE;
default:
- return;
+ return NOTIFY_DONE;
}
- ibev.device = ibdev_ptr;
+ ibev.device = &ibdev->ib_dev;
ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
ib_dispatch_event(&ibev);
+ return NOTIFY_DONE;
}
-static struct mlx4_interface mlx4_ib_interface = {
- .add = mlx4_ib_add,
- .remove = mlx4_ib_remove,
- .event = mlx4_ib_event,
+static const struct auxiliary_device_id mlx4_ib_id_table[] = {
+ { .name = MLX4_ADEV_NAME ".ib" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx4_ib_id_table);
+
+static struct mlx4_adrv mlx4_ib_adrv = {
+ .adrv = {
+ .name = "ib",
+ .probe = mlx4_ib_probe,
+ .remove = mlx4_ib_remove,
+ .id_table = mlx4_ib_id_table,
+ },
.protocol = MLX4_PROT_IB_IPV6,
.flags = MLX4_INTFF_BONDING
};
@@ -2830,15 +3346,23 @@ static int __init mlx4_ib_init(void)
{
int err;
- wq = create_singlethread_workqueue("mlx4_ib");
+ wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
if (!wq)
return -ENOMEM;
- err = mlx4_ib_mcg_init();
+ err = mlx4_ib_qp_event_init();
+ if (err)
+ goto clean_qp_event;
+
+ err = mlx4_ib_cm_init();
if (err)
goto clean_wq;
- err = mlx4_register_interface(&mlx4_ib_interface);
+ err = mlx4_ib_mcg_init();
+ if (err)
+ goto clean_cm;
+
+ err = mlx4_register_auxiliary_driver(&mlx4_ib_adrv);
if (err)
goto clean_mcg;
@@ -2847,15 +3371,23 @@ static int __init mlx4_ib_init(void)
clean_mcg:
mlx4_ib_mcg_destroy();
+clean_cm:
+ mlx4_ib_cm_destroy();
+
clean_wq:
+ mlx4_ib_qp_event_cleanup();
+
+clean_qp_event:
destroy_workqueue(wq);
return err;
}
static void __exit mlx4_ib_cleanup(void)
{
- mlx4_unregister_interface(&mlx4_ib_interface);
+ mlx4_unregister_auxiliary_driver(&mlx4_ib_adrv);
mlx4_ib_mcg_destroy();
+ mlx4_ib_cm_destroy();
+ mlx4_ib_qp_event_cleanup();
destroy_workqueue(wq);
}
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 2d5bccd71fc6..e279e69b9a51 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -43,7 +43,7 @@
#define MAX_VFS 80
#define MAX_PEND_REQS_PER_FUNC 4
-#define MAD_TIMEOUT_MS 2000
+#define MAD_TIMEOUT_SEC 2
#define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
#define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
@@ -96,7 +96,7 @@ struct ib_sa_mcmember_data {
u8 scope_join_state;
u8 proxy_join;
u8 reserved[2];
-};
+} __packed __aligned(4);
struct mcast_group {
struct ib_sa_mcmember_data rec;
@@ -209,7 +209,7 @@ static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
{
struct mlx4_ib_dev *dev = ctx->dev;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
spin_lock_irqsave(&dev->sm_lock, flags);
@@ -222,7 +222,7 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
spin_unlock_irqrestore(&dev->sm_lock, flags);
return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
- &ah_attr, NULL, mad);
+ &ah_attr, NULL, 0xffff, mad);
}
static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
@@ -231,20 +231,20 @@ static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
struct mlx4_ib_dev *dev = ctx->dev;
struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
struct ib_wc wc;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
/* Our agent might not yet be registered when mads start to arrive */
if (!agent)
return -EAGAIN;
- ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
+ rdma_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
return -EINVAL;
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = ctx->port;
- wc.slid = ah_attr.dlid; /* opensm lid */
+ wc.slid = rdma_ah_get_dlid(&ah_attr); /* opensm lid */
wc.src_qp = 1;
return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
}
@@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
if (!group->members[i])
leave_state |= (1 << i);
- return leave_state & (group->rec.scope_join_state & 7);
+ return leave_state & (group->rec.scope_join_state & 0xf);
}
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
} else
mcg_warn_group(group, "DRIVER BUG\n");
} else if (group->state == MCAST_LEAVE_SENT) {
- if (group->rec.scope_join_state & 7)
- group->rec.scope_join_state &= 0xf8;
+ if (group->rec.scope_join_state & 0xf)
+ group->rec.scope_join_state &= 0xf0;
group->state = MCAST_IDLE;
mutex_unlock(&group->lock);
if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
static int handle_join_req(struct mcast_group *group, u8 join_mask,
struct mcast_req *req)
{
- u8 group_join_state = group->rec.scope_join_state & 7;
+ u8 group_join_state = group->rec.scope_join_state & 0xf;
int ref = 0;
u16 status;
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -673,7 +673,7 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
if (!list_empty(&group->pending_list))
req = list_first_entry(&group->pending_list,
struct mcast_req, group_list);
- if ((method == IB_MGMT_METHOD_GET_RESP)) {
+ if (method == IB_MGMT_METHOD_GET_RESP) {
if (req) {
send_reply_to_slave(req->func, group, &req->sa_mad, status);
--group->func[req->func].num_pend_reqs;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
u8 cur_join_state;
resp_join_state = ((struct ib_sa_mcmember_data *)
- group->response_sa_mad.data)->scope_join_state & 7;
- cur_join_state = group->rec.scope_join_state & 7;
+ group->response_sa_mad.data)->scope_join_state & 0xf;
+ cur_join_state = group->rec.scope_join_state & 0xf;
if (method == IB_MGMT_METHOD_GET_RESP) {
/* successfull join */
@@ -710,7 +710,7 @@ process_requests:
req = list_first_entry(&group->pending_list, struct mcast_req,
group_list);
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
- req_join_state = sa_data->scope_join_state & 0x7;
+ req_join_state = sa_data->scope_join_state & 0xf;
/* For a leave request, we will immediately answer the VF, and
* update our internal counters. The actual leave will be sent
@@ -747,14 +747,11 @@ static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
__be64 tid,
union ib_gid *new_mgid)
{
- struct mcast_group *group = NULL, *cur_group;
+ struct mcast_group *group = NULL, *cur_group, *n;
struct mcast_req *req;
- struct list_head *pos;
- struct list_head *n;
mutex_lock(&ctx->mcg_table_lock);
- list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
- group = list_entry(pos, struct mcast_group, mgid0_list);
+ list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
mutex_lock(&group->lock);
if (group->last_req_tid == tid) {
if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
@@ -811,8 +808,7 @@ static ssize_t sysfs_show_group(struct device *dev,
struct device_attribute *attr, char *buf);
static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
- union ib_gid *mgid, int create,
- gfp_t gfp_mask)
+ union ib_gid *mgid, int create)
{
struct mcast_group *group, *cur_group;
int is_mgid0;
@@ -828,7 +824,7 @@ static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
if (!create)
return ERR_PTR(-ENOENT);
- group = kzalloc(sizeof *group, gfp_mask);
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -895,7 +891,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
case IB_MGMT_METHOD_GET_RESP:
case IB_SA_METHOD_DELETE_RESP:
mutex_lock(&ctx->mcg_table_lock);
- group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
+ group = acquire_group(ctx, &rec->mgid, 0);
mutex_unlock(&ctx->mcg_table_lock);
if (IS_ERR(group)) {
if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
@@ -948,6 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
switch (sa_mad->mad_hdr.method) {
case IB_MGMT_METHOD_SET:
may_create = 1;
+ fallthrough;
case IB_SA_METHOD_DELETE:
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
@@ -957,7 +954,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
req->sa_mad = *sa_mad;
mutex_lock(&ctx->mcg_table_lock);
- group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
+ group = acquire_group(ctx, &rec->mgid, may_create);
mutex_unlock(&ctx->mcg_table_lock);
if (IS_ERR(group)) {
kfree(req);
@@ -991,53 +988,63 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
}
static ssize_t sysfs_show_group(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct mcast_group *group =
container_of(attr, struct mcast_group, dentry);
struct mcast_req *req = NULL;
- char pending_str[40];
char state_str[40];
- ssize_t len = 0;
- int f;
+ char pending_str[40];
+ int len;
+ int i;
+ u32 hoplimit;
if (group->state == MCAST_IDLE)
- sprintf(state_str, "%s", get_state_string(group->state));
+ scnprintf(state_str, sizeof(state_str), "%s",
+ get_state_string(group->state));
else
- sprintf(state_str, "%s(TID=0x%llx)",
- get_state_string(group->state),
- be64_to_cpu(group->last_req_tid));
+ scnprintf(state_str, sizeof(state_str), "%s(TID=0x%llx)",
+ get_state_string(group->state),
+ be64_to_cpu(group->last_req_tid));
+
if (list_empty(&group->pending_list)) {
- sprintf(pending_str, "No");
+ scnprintf(pending_str, sizeof(pending_str), "No");
} else {
- req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
- sprintf(pending_str, "Yes(TID=0x%llx)",
- be64_to_cpu(req->sa_mad.mad_hdr.tid));
+ req = list_first_entry(&group->pending_list, struct mcast_req,
+ group_list);
+ scnprintf(pending_str, sizeof(pending_str), "Yes(TID=0x%llx)",
+ be64_to_cpu(req->sa_mad.mad_hdr.tid));
+ }
+
+ len = sysfs_emit(buf, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
+ group->rec.scope_join_state & 0xf,
+ group->members[2],
+ group->members[1],
+ group->members[0],
+ atomic_read(&group->refcount),
+ pending_str,
+ state_str);
+
+ for (i = 0; i < MAX_VFS; i++) {
+ if (group->func[i].state == MCAST_MEMBER)
+ len += sysfs_emit_at(buf, len, "%d[%1x] ", i,
+ group->func[i].join_state);
}
- len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
- group->rec.scope_join_state & 0xf,
- group->members[2], group->members[1], group->members[0],
- atomic_read(&group->refcount),
- pending_str,
- state_str);
- for (f = 0; f < MAX_VFS; ++f)
- if (group->func[f].state == MCAST_MEMBER)
- len += sprintf(buf + len, "%d[%1x] ",
- f, group->func[f].join_state);
-
- len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
- "%4x %4x %2x %2x)\n",
- be16_to_cpu(group->rec.pkey),
- be32_to_cpu(group->rec.qkey),
- (group->rec.mtusel_mtu & 0xc0) >> 6,
- group->rec.mtusel_mtu & 0x3f,
- group->rec.tclass,
- (group->rec.ratesel_rate & 0xc0) >> 6,
- group->rec.ratesel_rate & 0x3f,
- (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
- (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
- be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
- group->rec.proxy_join);
+
+ hoplimit = be32_to_cpu(group->rec.sl_flowlabel_hoplimit);
+ len += sysfs_emit_at(buf, len,
+ "\t\t(%4hx %4x %2x %2x %2x %2x %2x %4x %4x %2x %2x)\n",
+ be16_to_cpu(group->rec.pkey),
+ be32_to_cpu(group->rec.qkey),
+ (group->rec.mtusel_mtu & 0xc0) >> 6,
+ (group->rec.mtusel_mtu & 0x3f),
+ group->rec.tclass,
+ (group->rec.ratesel_rate & 0xc0) >> 6,
+ (group->rec.ratesel_rate & 0x3f),
+ (hoplimit & 0xf0000000) >> 28,
+ (hoplimit & 0x0fffff00) >> 8,
+ (hoplimit & 0x000000ff),
+ group->rec.proxy_join);
return len;
}
@@ -1048,7 +1055,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
atomic_set(&ctx->tid, 0);
sprintf(name, "mlx4_ib_mcg%d", ctx->port);
- ctx->mcg_wq = create_singlethread_workqueue(name);
+ ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->mcg_wq)
return -ENOMEM;
@@ -1084,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
- end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
+ end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3);
do {
count = 0;
mutex_lock(&ctx->mcg_table_lock);
@@ -1094,7 +1101,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
if (!count)
break;
- msleep(1);
+ usleep_range(1000, 2000);
} while (time_after(end, jiffies));
flush_workqueue(ctx->mcg_wq);
@@ -1105,7 +1112,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
while ((p = rb_first(&ctx->mcg_table)) != NULL) {
group = rb_entry(p, struct mcast_group, node);
if (atomic_read(&group->refcount))
- mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
+ mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
+ atomic_read(&group->refcount), group);
force_clean_group(group);
}
@@ -1145,7 +1153,6 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
work = kmalloc(sizeof *work, GFP_KERNEL);
if (!work) {
ctx->flushing = 0;
- mcg_warn("failed allocating work for cleanup\n");
return;
}
@@ -1205,10 +1212,8 @@ static int push_deleteing_req(struct mcast_group *group, int slave)
return 0;
req = kzalloc(sizeof *req, GFP_KERNEL);
- if (!req) {
- mcg_warn_group(group, "failed allocation - may leave stall groups\n");
+ if (!req)
return -ENOMEM;
- }
if (!list_empty(&group->func[slave].pending)) {
pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
@@ -1249,7 +1254,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
int mlx4_ib_mcg_init(void)
{
- clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
+ clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
if (!clean_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 1e7b23bb2eb0..5df5b955114e 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -38,6 +38,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/notifier.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
@@ -46,6 +47,8 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
#define MLX4_IB_DRV_NAME "mlx4_ib"
@@ -55,7 +58,7 @@
#define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
#define mlx4_ib_warn(ibdev, format, arg...) \
- dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
+ dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg)
enum {
MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
@@ -78,16 +81,13 @@ enum hw_bar_type {
HW_BAR_COUNT
};
-struct mlx4_ib_vma_private_data {
- struct vm_area_struct *vma;
-};
-
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
struct list_head db_page_list;
struct mutex db_page_mutex;
- struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
+ struct list_head wqn_ranges_list;
+ struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
};
struct mlx4_ib_pd {
@@ -129,10 +129,17 @@ struct mlx4_ib_cq {
struct list_head recv_qp_list;
};
+#define MLX4_MR_PAGES_ALIGN 0x40
+
struct mlx4_ib_mr {
struct ib_mr ibmr;
+ __be64 *pages;
+ dma_addr_t page_map;
+ u32 npages;
+ u32 max_pages;
struct mlx4_mr mmr;
struct ib_umem *umem;
+ size_t page_map_size;
};
struct mlx4_ib_mw {
@@ -140,17 +147,6 @@ struct mlx4_ib_mw {
struct mlx4_mw mmw;
};
-struct mlx4_ib_fast_reg_page_list {
- struct ib_fast_reg_page_list ibfrpl;
- __be64 *mapped_page_list;
- dma_addr_t map;
-};
-
-struct mlx4_ib_fmr {
- struct ib_fmr ibfmr;
- struct mlx4_fmr mfmr;
-};
-
#define MAX_REGS_PER_FLOW 2
struct mlx4_flow_reg_id {
@@ -176,11 +172,18 @@ struct mlx4_ib_wq {
unsigned tail;
};
+enum {
+ MLX4_IB_QP_CREATE_ROCE_V2_GSI = IB_QP_CREATE_RESERVED_START
+};
+
enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
- MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
+ MLX4_IB_QP_SCATTER_FCS = IB_QP_CREATE_SCATTER_FCS,
+
+ /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
+ MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
MLX4_IB_SRIOV_SQP = 1 << 31,
};
@@ -231,7 +234,8 @@ enum mlx4_ib_mad_ifc_flags {
};
enum {
- MLX4_NUM_TUNNEL_BUFS = 256,
+ MLX4_NUM_TUNNEL_BUFS = 512,
+ MLX4_NUM_WIRE_BUFS = 2048,
};
struct mlx4_ib_tunnel_header {
@@ -282,8 +286,45 @@ struct mlx4_roce_smac_vlan_info {
int update_vid;
};
+struct mlx4_wqn_range {
+ int base_wqn;
+ int size;
+ int refcount;
+ bool dirty;
+ struct list_head list;
+};
+
+struct mlx4_ib_rss {
+ unsigned int base_qpn_tbl_sz;
+ u8 flags;
+ u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
+};
+
+enum {
+ /*
+ * Largest possible UD header: send with GRH and immediate
+ * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
+ * tag. (LRH would only use 8 bytes, so Ethernet is the
+ * biggest case)
+ */
+ MLX4_IB_UD_HEADER_SIZE = 82,
+ MLX4_IB_LSO_HEADER_SPARE = 128,
+};
+
+struct mlx4_ib_sqp {
+ int pkey_index;
+ u32 qkey;
+ u32 send_psn;
+ struct ib_ud_header ud_header;
+ u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
+ struct ib_qp *roce_v2_gsi;
+};
+
struct mlx4_ib_qp {
- struct ib_qp ibqp;
+ union {
+ struct ib_qp ibqp;
+ struct ib_wq ibwq;
+ };
struct mlx4_qp mqp;
struct mlx4_buf buf;
@@ -293,7 +334,6 @@ struct mlx4_ib_qp {
u32 doorbell_qpn;
__be32 sq_signal_bits;
unsigned sq_next_wqe;
- int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx4_ib_wq sq;
@@ -311,6 +351,7 @@ struct mlx4_ib_qp {
u8 sq_no_prefetch;
u8 state;
int mlx_type;
+ u32 inl_recv_sz;
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
@@ -320,6 +361,14 @@ struct mlx4_ib_qp {
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
+ struct counter_index *counter_index;
+ struct mlx4_wqn_range *wqn_range;
+ /* Number of RSS QP parents that uses this WQ */
+ u32 rss_usecnt;
+ union {
+ struct mlx4_ib_rss *rss_ctx;
+ struct mlx4_ib_sqp *sqp;
+ };
};
struct mlx4_ib_srq {
@@ -342,6 +391,10 @@ struct mlx4_ib_ah {
union mlx4_ext_av av;
};
+struct mlx4_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
+};
+
/****************************************/
/* alias guid support */
/****************************************/
@@ -377,7 +430,7 @@ struct mlx4_sriov_alias_guid_port_rec_det {
struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
struct workqueue_struct *wq;
struct delayed_work alias_guid_work;
- u8 port;
+ u32 port;
u32 state_flags;
struct mlx4_sriov_alias_guid *parent;
struct list_head cb_list;
@@ -430,6 +483,7 @@ struct mlx4_ib_demux_pv_ctx {
struct ib_pd *pd;
struct work_struct work;
struct workqueue_struct *wq;
+ struct workqueue_struct *wi_wq;
struct mlx4_ib_demux_pv_qp qp[2];
};
@@ -437,9 +491,10 @@ struct mlx4_ib_demux_ctx {
struct ib_device *ib_dev;
int port;
struct workqueue_struct *wq;
+ struct workqueue_struct *wi_wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
- __be64 subnet_prefix;
+ atomic64_t subnet_prefix;
__be64 guid_cache[128];
struct mlx4_ib_dev *dev;
/* the following lock protects both mcg_table and mcg_mgid0_list */
@@ -463,10 +518,12 @@ struct mlx4_ib_sriov {
struct mlx4_sriov_alias_guid alias_guid;
/* CM paravirtualization fields */
- struct list_head cm_list;
+ struct xarray pv_id_table;
+ u32 pv_id_next;
spinlock_t id_map_lock;
struct rb_root sl_id_map;
- struct idr pv_id_table;
+ struct list_head cm_list;
+ struct xarray xa_rej_tmout;
};
struct gid_cache_context {
@@ -476,7 +533,9 @@ struct gid_cache_context {
struct gid_entry {
union ib_gid gid;
+ enum ib_gid_type gid_type;
struct gid_cache_context *ctx;
+ u16 vlan_id;
};
struct mlx4_port_gid_table {
@@ -489,6 +548,7 @@ struct mlx4_ib_iboe {
atomic64_t mac[MLX4_MAX_PORTS];
struct notifier_block nb;
struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
+ enum ib_port_state last_port_state[MLX4_MAX_PORTS];
};
struct pkey_mgt {
@@ -528,10 +588,25 @@ struct mlx4_ib_iov_port {
};
struct counter_index {
+ struct list_head list;
u32 index;
u8 allocated;
};
+struct mlx4_ib_counters {
+ struct list_head counters_list;
+ struct mutex mutex; /* mutex for accessing counters list */
+ u32 default_counter;
+};
+
+#define MLX4_DIAG_COUNTERS_TYPES 2
+
+struct mlx4_ib_diag_counters {
+ struct rdma_stat_desc *descs;
+ u32 *offset;
+ u32 num_counters;
+};
+
struct mlx4_ib_dev {
struct ib_device ib_dev;
struct mlx4_dev *dev;
@@ -545,12 +620,13 @@ struct mlx4_ib_dev {
struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
struct ib_ah *sm_ah[MLX4_MAX_PORTS];
spinlock_t sm_lock;
+ atomic64_t sl2vl[MLX4_MAX_PORTS];
struct mlx4_ib_sriov sriov;
struct mutex cap_mask_mutex;
bool ib_active;
struct mlx4_ib_iboe iboe;
- struct counter_index counters[MLX4_MAX_PORTS];
+ struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS];
int *eq_table;
struct kobject *iov_parent;
struct kobject *ports_parent;
@@ -568,19 +644,22 @@ struct mlx4_ib_dev {
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
+ struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
+ struct notifier_block mlx_nb;
};
struct ib_event_work {
struct work_struct work;
struct mlx4_ib_dev *ib_dev;
struct mlx4_eqe ib_eqe;
+ int port;
};
struct mlx4_ib_qp_tunnel_init_attr {
struct ib_qp_init_attr init_attr;
int slave;
enum ib_qp_type proxy_qp_type;
- u8 port;
+ u32 port;
};
struct mlx4_uverbs_ex_query_device {
@@ -588,15 +667,8 @@ struct mlx4_uverbs_ex_query_device {
__u32 reserved;
};
-enum query_device_resp_mask {
- QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
-};
-
-struct mlx4_uverbs_ex_query_device_resp {
- __u32 comp_mask;
- __u32 response_length;
- __u64 hca_core_clock_offset;
-};
+/* 4k - 4G */
+#define MLX4_PAGE_SIZE_SUPPORTED ((unsigned long)GENMASK_ULL(31, 12))
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
@@ -638,16 +710,6 @@ static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
return container_of(ibmw, struct mlx4_ib_mw, ibmw);
}
-static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
-{
- return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
-}
-
-static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
-}
-
static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
{
return container_of(ibflow, struct mlx4_ib_flow, ibflow);
@@ -688,7 +750,7 @@ static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
-int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
+int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
struct mlx4_db *db);
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
@@ -697,87 +759,80 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem);
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
-int mlx4_ib_dereg_mr(struct ib_mr *mr);
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
- struct ib_mw_bind *mw_bind);
+int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
+int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
-struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
-struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
- int page_list_len);
-void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
-
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
-struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int mlx4_ib_destroy_cq(struct ib_cq *cq);
+int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
-int mlx4_ib_destroy_ah(struct ib_ah *ah);
+int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
+int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return 0;
+}
-struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
+int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int mlx4_ib_destroy_srq(struct ib_srq *srq);
+int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
-int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-
-struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-int mlx4_ib_destroy_qp(struct ib_qp *qp);
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+
+int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
+void mlx4_ib_drain_sq(struct ib_qp *qp);
+void mlx4_ib_drain_rq(struct ib_qp *qp);
int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
- u64 iova);
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
-int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
-int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view);
-int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view);
-int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view);
static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
- u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
+ u32 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
return true;
@@ -791,7 +846,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
int mlx4_ib_mcg_init(void);
void mlx4_ib_mcg_destroy(void);
-int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid);
int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
struct ib_sa_mad *sa_mad);
@@ -801,19 +856,19 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid);
-void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
enum ib_event_type type);
void mlx4_ib_tunnels_update_work(struct work_struct *work);
-int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad);
-int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
- u32 qkey, struct ib_ah_attr *attr, u8 *s_mac,
- struct ib_mad *mad);
+ u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac,
+ u16 vlan_id, struct ib_mad *mad);
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
@@ -834,10 +889,10 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
int block_num,
- u8 port_num, u8 *p_data);
+ u32 port_num, u8 *p_data);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
- int block_num, u8 port_num,
+ int block_num, u32 port_num,
u8 *p_data);
int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
@@ -858,11 +913,51 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
int is_attach);
-int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
- u64 start, u64 length, u64 virt_addr,
- int mr_access_flags, struct ib_pd *pd,
- struct ib_udata *udata);
+struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
- u8 port_num, int index);
+ const struct ib_gid_attr *attr);
+
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+ int port);
+
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
+
+struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+static inline int
+mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
+{
+ return 0;
+}
+static inline int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
+ u64 start,
+ int *num_of_mtts)
+{
+ unsigned long pg_sz;
+
+ pg_sz = ib_umem_find_best_pgsz(umem, MLX4_PAGE_SIZE_SUPPORTED, start);
+ if (!pg_sz)
+ return -EOPNOTSUPP;
+
+ *num_of_mtts = ib_umem_num_dma_blocks(umem, pg_sz);
+ return order_base_2(pg_sz);
+}
+
+int mlx4_ib_cm_init(void);
+void mlx4_ib_cm_destroy(void);
+
+int mlx4_ib_qp_event_init(void);
+void mlx4_ib_qp_event_cleanup(void);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 2542fd3c1a49..94464f1694d9 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -32,6 +32,7 @@
*/
#include <linux/slab.h>
+#include <rdma/ib_user_verbs.h>
#include "mlx4_ib.h"
@@ -59,7 +60,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
struct mlx4_ib_mr *mr;
int err;
- mr = kmalloc(sizeof *mr, GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -89,49 +90,56 @@ err_free:
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
- u64 *pages;
- int i, k, entry;
- int n;
- int len;
- int err = 0;
- struct scatterlist *sg;
+ struct ib_block_iter biter;
+ int err, i = 0;
+ u64 addr;
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) {
+ addr = rdma_block_iter_dma_address(&biter);
+ err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr);
+ if (err)
+ return err;
+ }
+ return 0;
+}
- i = n = 0;
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> mtt->page_shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) +
- umem->page_size * k;
- /*
- * Be friendly to mlx4_write_mtt() and
- * pass it chunks of appropriate size.
- */
- if (i == PAGE_SIZE / sizeof (u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, n,
- i, pages);
- if (err)
- goto out;
- n += i;
- i = 0;
- }
+static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
+ u64 length, int access_flags)
+{
+ /*
+ * Force registering the memory as writable if the underlying pages
+ * are writable. This is so rereg can change the access permissions
+ * from readable to writable without having to run through ib_umem_get
+ * again
+ */
+ if (!ib_access_writable(access_flags)) {
+ unsigned long untagged_start = untagged_addr(start);
+ struct vm_area_struct *vma;
+
+ mmap_read_lock(current->mm);
+ /*
+ * FIXME: Ideally this would iterate over all the vmas that
+ * cover the memory, but for now it requires a single vma to
+ * entirely cover the MR to support RO mappings.
+ */
+ vma = find_vma(current->mm, untagged_start);
+ if (vma && vma->vm_end >= untagged_start + length &&
+ vma->vm_start <= untagged_start) {
+ if (vma->vm_flags & VM_WRITE)
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
+ } else {
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
}
- }
- if (i)
- err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
+ mmap_read_unlock(current->mm);
+ }
-out:
- free_page((unsigned long) pages);
- return err;
+ return ib_umem_get(device, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
@@ -140,21 +148,24 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
int n;
- mr = kmalloc(sizeof *mr, GFP_KERNEL);
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- /* Force registering the memory as writable. */
- /* Used for memory re-registeration. HCA protects the access */
- mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags | IB_ACCESS_LOCAL_WRITE, 0);
+ mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
}
- n = ib_umem_page_count(mr->umem);
- shift = ilog2(mr->umem->page_size);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_umem;
+ }
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
@@ -170,6 +181,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
+ mr->ibmr.page_size = 1U << shift;
return &mr->ibmr;
@@ -185,10 +197,10 @@ err_free:
return ERR_PTR(err);
}
-int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
- u64 start, u64 length, u64 virt_addr,
- int mr_access_flags, struct ib_pd *pd,
- struct ib_udata *udata)
+struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(mr->device);
struct mlx4_ib_mr *mmr = to_mmr(mr);
@@ -201,9 +213,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
* race exists.
*/
err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
-
if (err)
- return err;
+ return ERR_PTR(err);
if (flags & IB_MR_REREG_PD) {
err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
@@ -214,6 +225,12 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
}
if (flags & IB_MR_REREG_ACCESS) {
+ if (ib_access_writable(mr_access_flags) &&
+ !mmr->umem->writable) {
+ err = -EPERM;
+ goto release_mpt_entry;
+ }
+
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags));
@@ -227,18 +244,16 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem = ib_umem_get(mr->uobject->context, start, length,
- mr_access_flags |
- IB_ACCESS_LOCAL_WRITE,
- 0);
+ mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
+ mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
mmr->umem = NULL;
goto release_mpt_entry;
}
- n = ib_umem_page_count(mmr->umem);
- shift = ilog2(mmr->umem->page_size);
+ n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
+ shift = PAGE_SHIFT;
err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
virt_addr, length, n, shift,
@@ -267,15 +282,66 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
release_mpt_entry:
mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
+ if (err)
+ return ERR_PTR(err);
+ return NULL;
+}
- return err;
+static int
+mlx4_alloc_priv_pages(struct ib_device *device,
+ struct mlx4_ib_mr *mr,
+ int max_pages)
+{
+ int ret;
+
+ /* Ensure that size is aligned to DMA cacheline
+ * requirements.
+ * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+ * so page_map_size will never cross PAGE_SIZE.
+ */
+ mr->page_map_size = roundup(max_pages * sizeof(u64),
+ MLX4_MR_PAGES_ALIGN);
+
+ /* Prevent cross page boundary allocation. */
+ mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+ if (!mr->pages)
+ return -ENOMEM;
+
+ mr->page_map = dma_map_single(device->dev.parent, mr->pages,
+ mr->page_map_size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(device->dev.parent, mr->page_map)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ free_page((unsigned long)mr->pages);
+ return ret;
+}
+
+static void
+mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
+{
+ if (mr->pages) {
+ struct ib_device *device = mr->ibmr.device;
+
+ dma_unmap_single(device->dev.parent, mr->page_map,
+ mr->page_map_size, DMA_TO_DEVICE);
+ free_page((unsigned long)mr->pages);
+ mr->pages = NULL;
+ }
}
-int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
+int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
int ret;
+ mlx4_free_priv_pages(mr);
+
ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
if (ret)
return ret;
@@ -286,58 +352,27 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_mw *mw;
+ struct mlx4_ib_dev *dev = to_mdev(ibmw->device);
+ struct mlx4_ib_mw *mw = to_mmw(ibmw);
int err;
- mw = kmalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
- to_mlx4_type(type), &mw->mmw);
+ err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn,
+ to_mlx4_type(ibmw->type), &mw->mmw);
if (err)
- goto err_free;
+ return err;
err = mlx4_mw_enable(dev->dev, &mw->mmw);
if (err)
goto err_mw;
- mw->ibmw.rkey = mw->mmw.key;
-
- return &mw->ibmw;
+ ibmw->rkey = mw->mmw.key;
+ return 0;
err_mw:
mlx4_mw_free(dev->dev, &mw->mmw);
-
-err_free:
- kfree(mw);
-
- return ERR_PTR(err);
-}
-
-int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
- struct ib_mw_bind *mw_bind)
-{
- struct ib_send_wr wr;
- struct ib_send_wr *bad_wr;
- int ret;
-
- memset(&wr, 0, sizeof(wr));
- wr.opcode = IB_WR_BIND_MW;
- wr.wr_id = mw_bind->wr_id;
- wr.send_flags = mw_bind->send_flags;
- wr.wr.bind_mw.mw = mw;
- wr.wr.bind_mw.bind_info = mw_bind->bind_info;
- wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey);
-
- ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
- if (!ret)
- mw->rkey = wr.wr.bind_mw.rkey;
-
- return ret;
+ return err;
}
int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
@@ -345,13 +380,10 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
struct mlx4_ib_mw *mw = to_mmw(ibmw);
mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
- kfree(mw);
-
return 0;
}
-struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
@@ -362,7 +394,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
max_num_sg > MLX4_MAX_FAST_REG_PAGES)
return ERR_PTR(-EINVAL);
- mr = kmalloc(sizeof *mr, GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -371,160 +403,57 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
if (err)
goto err_free;
+ err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
+ if (err)
+ goto err_free_mr;
+
+ mr->max_pages = max_num_sg;
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
- goto err_mr;
+ goto err_free_pl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
mr->umem = NULL;
return &mr->ibmr;
-err_mr:
+err_free_pl:
+ mr->ibmr.device = pd->device;
+ mlx4_free_priv_pages(mr);
+err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
-
err_free:
kfree(mr);
return ERR_PTR(err);
}
-struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
- int page_list_len)
-{
- struct mlx4_ib_dev *dev = to_mdev(ibdev);
- struct mlx4_ib_fast_reg_page_list *mfrpl;
- int size = page_list_len * sizeof (u64);
-
- if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
- return ERR_PTR(-EINVAL);
-
- mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
- if (!mfrpl)
- return ERR_PTR(-ENOMEM);
-
- mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
- if (!mfrpl->ibfrpl.page_list)
- goto err_free;
-
- mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
- pdev->dev,
- size, &mfrpl->map,
- GFP_KERNEL);
- if (!mfrpl->mapped_page_list)
- goto err_free;
-
- WARN_ON(mfrpl->map & 0x3f);
-
- return &mfrpl->ibfrpl;
-
-err_free:
- kfree(mfrpl->ibfrpl.page_list);
- kfree(mfrpl);
- return ERR_PTR(-ENOMEM);
-}
-
-void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
-{
- struct mlx4_ib_dev *dev = to_mdev(page_list->device);
- struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
- int size = page_list->max_page_list_len * sizeof (u64);
-
- dma_free_coherent(&dev->dev->persist->pdev->dev, size,
- mfrpl->mapped_page_list,
- mfrpl->map);
- kfree(mfrpl->ibfrpl.page_list);
- kfree(mfrpl);
-}
-
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
- struct ib_fmr_attr *fmr_attr)
+static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_fmr *fmr;
- int err = -ENOMEM;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
- fmr_attr->max_pages, fmr_attr->max_maps,
- fmr_attr->page_shift, &fmr->mfmr);
- if (err)
- goto err_free;
-
- err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
- if (err)
- goto err_mr;
-
- fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
-
- return &fmr->ibfmr;
-
-err_mr:
- (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
-
-err_free:
- kfree(fmr);
+ struct mlx4_ib_mr *mr = to_mmr(ibmr);
- return ERR_PTR(err);
-}
+ if (unlikely(mr->npages == mr->max_pages))
+ return -ENOMEM;
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int npages, u64 iova)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
+ mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
- return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
- &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
+ return 0;
}
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
- struct ib_fmr *ibfmr;
- int err;
- struct mlx4_dev *mdev = NULL;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- if (mdev && to_mdev(ibfmr->device)->dev != mdev)
- return -EINVAL;
- mdev = to_mdev(ibfmr->device)->dev;
- }
-
- if (!mdev)
- return 0;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
-
- mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
- }
-
- /*
- * Make sure all MPT status updates are visible before issuing
- * SYNC_TPT firmware command.
- */
- wmb();
-
- err = mlx4_SYNC_TPT(mdev);
- if (err)
- pr_warn("SYNC_TPT error %d when "
- "unmapping FMRs\n", err);
+ struct mlx4_ib_mr *mr = to_mmr(ibmr);
+ int rc;
- return 0;
-}
+ mr->npages = 0;
-int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
- int err;
+ ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
+ mr->page_map_size, DMA_TO_DEVICE);
- err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
+ rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
- if (!err)
- kfree(ifmr);
+ ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
+ mr->page_map_size, DMA_TO_DEVICE);
- return err;
+ return rc;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4ad9be3ad61c..f2887ae6390e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -32,6 +32,8 @@
*/
#include <linux/log2.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
@@ -39,17 +41,20 @@
#include <rdma/ib_pack.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
+#include <rdma/uverbs_ioctl.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
+ struct ib_udata *udata);
enum {
MLX4_IB_ACK_REQ_FREQ = 8,
@@ -63,30 +68,6 @@ enum {
};
enum {
- /*
- * Largest possible UD header: send with GRH and immediate
- * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
- * tag. (LRH would only use 8 bytes, so Ethernet is the
- * biggest case)
- */
- MLX4_IB_UD_HEADER_SIZE = 82,
- MLX4_IB_LSO_HEADER_SPARE = 128,
-};
-
-enum {
- MLX4_IB_IBOE_ETHERTYPE = 0x8915
-};
-
-struct mlx4_ib_sqp {
- struct mlx4_ib_qp qp;
- int pkey_index;
- u32 qkey;
- u32 send_psn;
- struct ib_ud_header ud_header;
- u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
-};
-
-enum {
MLX4_IB_MIN_SQ_STRIDE = 6,
MLX4_IB_CACHE_LINE_SIZE = 64,
};
@@ -111,16 +92,23 @@ static const __be32 mlx4_ib_opcode[] = {
[IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
[IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
[IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
- [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
+ [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
[IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
- [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW),
};
-static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
-{
- return container_of(mqp, struct mlx4_ib_sqp, qp);
-}
+enum mlx4_ib_source_type {
+ MLX4_IB_QP_SRC = 0,
+ MLX4_IB_RWQ_SRC = 1,
+};
+
+struct mlx4_ib_qp_event_work {
+ struct work_struct work;
+ struct mlx4_qp *qp;
+ enum mlx4_event type;
+};
+
+static struct workqueue_struct *mlx4_ib_qp_event_wq;
static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{
@@ -146,14 +134,17 @@ static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
/* VF or PF -- proxy SQP */
if (mlx4_is_mfunc(dev->dev)) {
for (i = 0; i < dev->dev->caps.num_ports; i++) {
- if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] ||
- qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
+ if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy ||
+ qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) {
proxy_sqp = 1;
break;
}
}
}
- return proxy_sqp;
+ if (proxy_sqp)
+ return 1;
+
+ return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP);
}
/* used for INIT/CLOSE port logic */
@@ -171,7 +162,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
/* VF or PF -- proxy QP0 */
if (mlx4_is_mfunc(dev->dev)) {
for (i = 0; i < dev->dev->caps.num_ports; i++) {
- if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
+ if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) {
proxy_qp0 = 1;
break;
}
@@ -197,135 +188,103 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
/*
* Stamp a SQ WQE so that it is invalid if prefetched by marking the
- * first four bytes of every 64 byte chunk with
- * 0x7FFFFFF | (invalid_ownership_value << 31).
- *
- * When the max work request size is less than or equal to the WQE
- * basic block size, as an optimization, we can stamp all WQEs with
- * 0xffffffff, and skip the very first chunk of each WQE.
+ * first four bytes of every 64 byte chunk with 0xffffffff, except for
+ * the very first chunk of the WQE.
*/
-static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
+static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
{
__be32 *wqe;
int i;
int s;
- int ind;
void *buf;
- __be32 stamp;
struct mlx4_wqe_ctrl_seg *ctrl;
- if (qp->sq_max_wqes_per_wr > 1) {
- s = roundup(size, 1U << qp->sq.wqe_shift);
- for (i = 0; i < s; i += 64) {
- ind = (i >> qp->sq.wqe_shift) + n;
- stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
- cpu_to_be32(0xffffffff);
- buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
- *wqe = stamp;
- }
- } else {
- ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
- s = (ctrl->fence_size & 0x3f) << 4;
- for (i = 64; i < s; i += 64) {
- wqe = buf + i;
- *wqe = cpu_to_be32(0xffffffff);
- }
+ buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+ ctrl = (struct mlx4_wqe_ctrl_seg *)buf;
+ s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
+ for (i = 64; i < s; i += 64) {
+ wqe = buf + i;
+ *wqe = cpu_to_be32(0xffffffff);
}
}
-static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
+static void mlx4_ib_handle_qp_event(struct work_struct *_work)
{
- struct mlx4_wqe_ctrl_seg *ctrl;
- struct mlx4_wqe_inline_seg *inl;
- void *wqe;
- int s;
-
- ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
- s = sizeof(struct mlx4_wqe_ctrl_seg);
+ struct mlx4_ib_qp_event_work *qpe_work =
+ container_of(_work, struct mlx4_ib_qp_event_work, work);
+ struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
+ struct ib_event event = {};
- if (qp->ibqp.qp_type == IB_QPT_UD) {
- struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
- struct mlx4_av *av = (struct mlx4_av *)dgram->av;
- memset(dgram, 0, sizeof *dgram);
- av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
- s += sizeof(struct mlx4_wqe_datagram_seg);
- }
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
- /* Pad the remainder of the WQE with an inline data segment. */
- if (size > s) {
- inl = wqe + s;
- inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
+ switch (qpe_work->type) {
+ case MLX4_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX4_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX4_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ pr_warn("Unexpected event type %d on QP %06x\n",
+ qpe_work->type, qpe_work->qp->qpn);
+ goto out;
}
- ctrl->srcrb_flags = 0;
- ctrl->fence_size = size / 16;
- /*
- * Make sure descriptor is fully written before setting ownership bit
- * (because HW can start executing as soon as we do).
- */
- wmb();
-
- ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
- (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
- stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
-}
+ ibqp->event_handler(&event, ibqp->qp_context);
-/* Post NOP WQE to prevent wrap-around in the middle of WR */
-static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
-{
- unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
- if (unlikely(s < qp->sq_max_wqes_per_wr)) {
- post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
- ind += s;
- }
- return ind;
+out:
+ mlx4_put_qp(qpe_work->qp);
+ kfree(qpe_work);
}
static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
{
- struct ib_event event;
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
+ struct mlx4_ib_qp_event_work *qpe_work;
if (type == MLX4_EVENT_TYPE_PATH_MIG)
to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
- if (ibqp->event_handler) {
- event.device = ibqp->device;
- event.element.qp = ibqp;
- switch (type) {
- case MLX4_EVENT_TYPE_PATH_MIG:
- event.event = IB_EVENT_PATH_MIG;
- break;
- case MLX4_EVENT_TYPE_COMM_EST:
- event.event = IB_EVENT_COMM_EST;
- break;
- case MLX4_EVENT_TYPE_SQ_DRAINED:
- event.event = IB_EVENT_SQ_DRAINED;
- break;
- case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
- event.event = IB_EVENT_QP_LAST_WQE_REACHED;
- break;
- case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
- event.event = IB_EVENT_QP_FATAL;
- break;
- case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
- event.event = IB_EVENT_PATH_MIG_ERR;
- break;
- case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- event.event = IB_EVENT_QP_REQ_ERR;
- break;
- case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
- event.event = IB_EVENT_QP_ACCESS_ERR;
- break;
- default:
- pr_warn("Unexpected event type %d "
- "on QP %06x\n", type, qp->qpn);
- return;
- }
+ if (!ibqp->event_handler)
+ goto out_no_handler;
- ibqp->event_handler(&event, ibqp->qp_context);
- }
+ qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
+ if (!qpe_work)
+ goto out_no_handler;
+
+ qpe_work->qp = qp;
+ qpe_work->type = type;
+ INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event);
+ queue_work(mlx4_ib_qp_event_wq, &qpe_work->work);
+ return;
+
+out_no_handler:
+ mlx4_put_qp(qp);
+}
+
+static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
+{
+ pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
+ type, qp->qpn);
}
static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
@@ -356,7 +315,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_RC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
- sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
@@ -375,7 +334,8 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
}
static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
- int is_user, int has_rq, struct mlx4_ib_qp *qp)
+ bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
+ u32 inl_recv_sz)
{
/* Sanity check RQ size before proceeding */
if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
@@ -383,18 +343,24 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
return -EINVAL;
if (!has_rq) {
- if (cap->max_recv_wr)
+ if (cap->max_recv_wr || inl_recv_sz)
return -EINVAL;
qp->rq.wqe_cnt = qp->rq.max_gs = 0;
} else {
+ u32 max_inl_recv_sz = dev->dev->caps.max_rq_sg *
+ sizeof(struct mlx4_wqe_data_seg);
+ u32 wqe_size;
+
/* HW requires >= 1 RQ entry with >= 1 gather entry */
- if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
+ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge ||
+ inl_recv_sz > max_inl_recv_sz))
return -EINVAL;
qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr));
qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
- qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
+ wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg);
+ qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz));
}
/* leave userspace return values as they were, so as not to break ABI */
@@ -440,70 +406,20 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
if (s > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
+ qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
+
/*
- * Hermon supports shrinking WQEs, such that a single work
- * request can include multiple units of 1 << wqe_shift. This
- * way, work requests can differ in size, and do not have to
- * be a power of 2 in size, saving memory and speeding up send
- * WR posting. Unfortunately, if we do this then the
- * wqe_index field in CQEs can't be used to look up the WR ID
- * anymore, so we do this only if selective signaling is off.
- *
- * Further, on 32-bit platforms, we can't use vmap() to make
- * the QP buffer virtually contiguous. Thus we have to use
- * constant-sized WRs to make sure a WR is always fully within
- * a single page-sized chunk.
- *
- * Finally, we use NOP work requests to pad the end of the
- * work queue, to avoid wrap-around in the middle of WR. We
- * set NEC bit to avoid getting completions with error for
- * these NOP WRs, but since NEC is only supported starting
- * with firmware 2.2.232, we use constant-sized WRs for older
- * firmware.
- *
- * And, since MLX QPs only support SEND, we use constant-sized
- * WRs in this case.
- *
- * We look for the smallest value of wqe_shift such that the
- * resulting number of wqes does not exceed device
- * capabilities.
- *
- * We set WQE size to at least 64 bytes, this way stamping
- * invalidates each WQE.
+ * We need to leave 2 KB + 1 WR of headroom in the SQ to
+ * allow HW to prefetch.
*/
- if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
- qp->sq_signal_bits && BITS_PER_LONG == 64 &&
- type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
- !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
- MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
- qp->sq.wqe_shift = ilog2(64);
- else
- qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
-
- for (;;) {
- qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
-
- /*
- * We need to leave 2 KB + 1 WR of headroom in the SQ to
- * allow HW to prefetch.
- */
- qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
- qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
- qp->sq_max_wqes_per_wr +
- qp->sq_spare_wqes);
-
- if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
- break;
-
- if (qp->sq_max_wqes_per_wr <= 1)
- return -EINVAL;
-
- ++qp->sq.wqe_shift;
- }
-
- qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
- (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
- send_wqe_overhead(type, qp->flags)) /
+ qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift);
+ qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
+ qp->sq_spare_wqes);
+
+ qp->sq.max_gs =
+ (min(dev->dev->caps.max_sq_desc_sz,
+ (1 << qp->sq.wqe_shift)) -
+ send_wqe_overhead(type, qp->flags)) /
sizeof (struct mlx4_wqe_data_seg);
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
@@ -517,7 +433,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
}
cap->max_send_wr = qp->sq.max_post =
- (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
+ qp->sq.wqe_cnt - qp->sq_spare_wqes;
cap->max_send_sge = min(qp->sq.max_gs,
min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg));
@@ -531,9 +447,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
struct mlx4_ib_qp *qp,
struct mlx4_ib_create_qp *ucmd)
{
+ u32 cnt;
+
/* Sanity check SQ size before proceeding */
- if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
- ucmd->log_sq_stride >
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > dev->dev->caps.max_wqes)
+ return -EINVAL;
+ if (ucmd->log_sq_stride >
ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
return -EINVAL;
@@ -552,8 +472,8 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
int i;
qp->sqp_proxy_rcv =
- kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt,
- GFP_KERNEL);
+ kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf),
+ GFP_KERNEL);
if (!qp->sqp_proxy_rcv)
return -ENOMEM;
for (i = 0; i < qp->rq.wqe_cnt; i++) {
@@ -599,10 +519,10 @@ static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
kfree(qp->sqp_proxy_rcv);
}
-static int qp_has_rq(struct ib_qp_init_attr *attr)
+static bool qp_has_rq(struct ib_qp_init_attr *attr)
{
if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
- return 0;
+ return false;
return !attr->srq;
}
@@ -611,21 +531,476 @@ static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
{
int i;
for (i = 0; i < dev->caps.num_ports; i++) {
- if (qpn == dev->caps.qp0_proxy[i])
- return !!dev->caps.qp0_qkey[i];
+ if (qpn == dev->caps.spec_qps[i].qp0_proxy)
+ return !!dev->caps.spec_qps[i].qp0_qkey;
+ }
+ return 0;
+}
+
+static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
+ struct mlx4_ib_qp *qp)
+{
+ mutex_lock(&dev->counters_table[qp->port - 1].mutex);
+ mlx4_counter_free(dev->dev, qp->counter_index->index);
+ list_del(&qp->counter_index->list);
+ mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
+
+ kfree(qp->counter_index);
+ qp->counter_index = NULL;
+}
+
+static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx4_ib_create_qp_rss *ucmd)
+{
+ rss_ctx->base_qpn_tbl_sz = init_attr->rwq_ind_tbl->ind_tbl[0]->wq_num |
+ (init_attr->rwq_ind_tbl->log_ind_tbl_size << 24);
+
+ if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) &&
+ (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) {
+ memcpy(rss_ctx->rss_key, ucmd->rx_hash_key,
+ MLX4_EN_RSS_KEY_SIZE);
+ } else {
+ pr_debug("RX Hash function is not supported\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP |
+ MLX4_IB_RX_HASH_INNER)) {
+ pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+ ucmd->rx_hash_fields_mask);
+ return (-EOPNOTSUPP);
+ }
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
+ rss_ctx->flags = MLX4_RSS_IPV4;
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
+ pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
+ rss_ctx->flags |= MLX4_RSS_IPV6;
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
+ pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
}
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
+ pr_debug("RX Hash fields_mask for UDP is not supported\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
+ rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
+ rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
+ pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
+ return (-EOPNOTSUPP);
+ }
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
+ rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
+ rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
+ pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
+ return (-EOPNOTSUPP);
+ }
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
+ pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /*
+ * Hash according to inner headers if exist, otherwise
+ * according to outer headers.
+ */
+ rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
+ } else {
+ pr_debug("RSS Hash for inner headers isn't supported\n");
+ return (-EOPNOTSUPP);
+ }
+ }
+
return 0;
}
-static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
- gfp_t gfp)
+static int create_qp_rss(struct mlx4_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx4_ib_create_qp_rss *ucmd,
+ struct mlx4_ib_qp *qp)
{
int qpn;
int err;
- struct mlx4_ib_sqp *sqp;
- struct mlx4_ib_qp *qp;
+
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
+
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage);
+ if (err)
+ return err;
+
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
+ if (err)
+ goto err_qpn;
+
+ INIT_LIST_HEAD(&qp->gid_list);
+ INIT_LIST_HEAD(&qp->steering_rules);
+
+ qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
+ qp->state = IB_QPS_RESET;
+
+ /* Set dummy send resources to be compatible with HV and PRM */
+ qp->sq_no_prefetch = 1;
+ qp->sq.wqe_cnt = 1;
+ qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
+ qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE;
+ qp->mtt = (to_mqp(
+ (struct ib_qp *)init_attr->rwq_ind_tbl->ind_tbl[0]))->mtt;
+
+ qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL);
+ if (!qp->rss_ctx) {
+ err = -ENOMEM;
+ goto err_qp_alloc;
+ }
+
+ err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(qp->rss_ctx);
+
+err_qp_alloc:
+ mlx4_qp_remove(dev->dev, &qp->mqp);
+ mlx4_qp_free(dev->dev, &qp->mqp);
+
+err_qpn:
+ mlx4_qp_release_range(dev->dev, qpn, 1);
+ return err;
+}
+
+static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_create_qp_rss ucmd = {};
+ size_t required_cmd_sz;
+ int err;
+
+ if (!udata) {
+ pr_debug("RSS QP with NULL udata\n");
+ return -EINVAL;
+ }
+
+ if (udata->outlen)
+ return -EOPNOTSUPP;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
+ sizeof(ucmd.reserved1);
+ if (udata->inlen < required_cmd_sz) {
+ pr_debug("invalid inlen\n");
+ return -EINVAL;
+ }
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ pr_debug("copy failed\n");
+ return -EFAULT;
+ }
+
+ if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
+ return -EOPNOTSUPP;
+
+ if (ucmd.comp_mask || ucmd.reserved1)
+ return -EOPNOTSUPP;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ pr_debug("inlen is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ pr_debug("RSS QP with unsupported QP type %d\n",
+ init_attr->qp_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->create_flags) {
+ pr_debug("RSS QP doesn't support create flags\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->send_cq || init_attr->cap.max_send_wr) {
+ pr_debug("RSS QP with unsupported send attributes\n");
+ return -EOPNOTSUPP;
+ }
+
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
+
+ err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
+ if (err)
+ return err;
+
+ qp->ibqp.qp_num = qp->mqp.qpn;
+ return 0;
+}
+
+/*
+ * This function allocates a WQN from a range which is consecutive and aligned
+ * to its size. In case the range is full, then it creates a new range and
+ * allocates WQN from it. The new range will be used for following allocations.
+ */
+static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context,
+ struct mlx4_ib_qp *qp, int range_size, int *wqn)
+{
+ struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
+ struct mlx4_wqn_range *range;
+ int err = 0;
+
+ mutex_lock(&context->wqn_ranges_mutex);
+
+ range = list_first_entry_or_null(&context->wqn_ranges_list,
+ struct mlx4_wqn_range, list);
+
+ if (!range || (range->refcount == range->size) || range->dirty) {
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx4_qp_reserve_range(dev->dev, range_size,
+ range_size, &range->base_wqn, 0,
+ qp->mqp.usage);
+ if (err) {
+ kfree(range);
+ goto out;
+ }
+
+ range->size = range_size;
+ list_add(&range->list, &context->wqn_ranges_list);
+ } else if (range_size != 1) {
+ /*
+ * Requesting a new range (>1) when last range is still open, is
+ * not valid.
+ */
+ err = -EINVAL;
+ goto out;
+ }
+
+ qp->wqn_range = range;
+
+ *wqn = range->base_wqn + range->refcount;
+
+ range->refcount++;
+
+out:
+ mutex_unlock(&context->wqn_ranges_mutex);
+
+ return err;
+}
+
+static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
+ struct mlx4_ib_qp *qp, bool dirty_release)
+{
+ struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
+ struct mlx4_wqn_range *range;
+
+ mutex_lock(&context->wqn_ranges_mutex);
+
+ range = qp->wqn_range;
+
+ range->refcount--;
+ if (!range->refcount) {
+ mlx4_qp_release_range(dev->dev, range->base_wqn,
+ range->size);
+ list_del(&range->list);
+ kfree(range);
+ } else if (dirty_release) {
+ /*
+ * A range which one of its WQNs is destroyed, won't be able to be
+ * reused for further WQN allocations.
+ * The next created WQ will allocate a new range.
+ */
+ range->dirty = true;
+ }
+
+ mutex_unlock(&context->wqn_ranges_mutex);
+}
+
+static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, struct mlx4_ib_qp *qp)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ int qpn;
+ int err;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
+ struct mlx4_ib_cq *mcq;
+ unsigned long flags;
+ int range_size;
+ struct mlx4_ib_create_wq wq;
+ size_t copy_len;
+ int shift;
+ int n;
+
+ qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+ INIT_LIST_HEAD(&qp->gid_list);
+ INIT_LIST_HEAD(&qp->steering_rules);
+
+ qp->state = IB_QPS_RESET;
+
+ copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
+
+ if (ib_copy_from_udata(&wq, udata, copy_len)) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
+ wq.reserved[2]) {
+ pr_debug("user command isn't supported\n");
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
+ pr_debug("WQN range size must be equal or smaller than %d\n",
+ dev->dev->caps.max_rss_tbl_sz);
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+ range_size = 1 << wq.log_range_size;
+
+ if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
+ qp->flags |= MLX4_IB_QP_SCATTER_FCS;
+
+ err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
+ if (err)
+ goto err;
+
+ qp->sq_no_prefetch = 1;
+ qp->sq.wqe_cnt = 1;
+ qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
+ qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+ (qp->sq.wqe_cnt << qp->sq.wqe_shift);
+
+ qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
+ if (IS_ERR(qp->umem)) {
+ err = PTR_ERR(qp->umem);
+ goto err;
+ }
+
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
+ if (err)
+ goto err_mtt;
+
+ err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
+ if (err)
+ goto err_mtt;
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
+
+ err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
+ if (err)
+ goto err_wrid;
+
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
+ if (err)
+ goto err_qpn;
+
+ /*
+ * Hardware wants QPN written in big-endian order (after
+ * shifting) for send doorbell. Precompute this value to save
+ * a little bit when posting sends.
+ */
+ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
+
+ qp->mqp.event = mlx4_ib_wq_event;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ /* Maintain device to QPs access, needed for further handling
+ * via reset flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling
+ * via reset flow
+ */
+ mcq = to_mcq(init_attr->send_cq);
+ list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
+ mcq = to_mcq(init_attr->recv_cq);
+ list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
+ mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+ return 0;
+
+err_qpn:
+ mlx4_ib_release_wqn(context, qp, 0);
+err_wrid:
+ mlx4_ib_db_unmap_user(context, &qp->db);
+
+err_mtt:
+ mlx4_mtt_cleanup(dev->dev, &qp->mtt);
+err_buf:
+ ib_umem_release(qp->umem);
+err:
+ return err;
+}
+
+static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, int sqpn,
+ struct mlx4_ib_qp *qp)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ int qpn;
+ int err;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq;
unsigned long flags;
@@ -671,65 +1046,78 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
sqpn = qpn;
}
- if (!*caller_qp) {
- if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
- (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
- MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
- sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
- if (!sqp)
- return -ENOMEM;
- qp = &sqp->qp;
- qp->pri.vid = 0xFFFF;
- qp->alt.vid = 0xFFFF;
- } else {
- qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
- if (!qp)
- return -ENOMEM;
- qp->pri.vid = 0xFFFF;
- qp->alt.vid = 0xFFFF;
- }
- } else
- qp = *caller_qp;
+ if (init_attr->qp_type == IB_QPT_SMI ||
+ init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI ||
+ qp_type == MLX4_IB_QPT_GSI ||
+ (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
+ MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
+ qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
+ if (!qp->sqp)
+ return -ENOMEM;
+ }
qp->mlx4_ib_qp_type = qp_type;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
INIT_LIST_HEAD(&qp->gid_list);
INIT_LIST_HEAD(&qp->steering_rules);
- qp->state = IB_QPS_RESET;
+ qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
- if (err)
- goto err;
-
- if (pd->uobject) {
+ if (udata) {
struct mlx4_ib_create_qp ucmd;
+ size_t copy_len;
+ int shift;
+ int n;
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+ copy_len = sizeof(struct mlx4_ib_create_qp);
+
+ if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
err = -EFAULT;
goto err;
}
+ qp->inl_recv_sz = ucmd.inl_recv_sz;
+
+ if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
+ if (!(dev->dev->caps.flags &
+ MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
+ pr_debug("scatter FCS is unsupported\n");
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ qp->flags |= MLX4_IB_QP_SCATTER_FCS;
+ }
+
+ err = set_rq_size(dev, &init_attr->cap, udata,
+ qp_has_rq(init_attr), qp, qp->inl_recv_sz);
+ if (err)
+ goto err;
+
qp->sq_no_prefetch = ucmd.sq_no_prefetch;
err = set_user_sq_size(dev, qp, &ucmd);
if (err)
goto err;
- qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- qp->buf_size, 0, 0);
+ qp->umem =
+ ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
}
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
- ilog2(qp->umem->page_size), &qp->mtt);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
@@ -738,16 +1126,18 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_mtt;
if (qp_has_rq(init_attr)) {
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
- ucmd.db_addr, &qp->db);
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
if (err)
goto err_mtt;
}
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
} else {
- qp->sq_no_prefetch = 0;
+ err = set_rq_size(dev, &init_attr->cap, udata,
+ qp_has_rq(init_attr), qp, 0);
+ if (err)
+ goto err;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
- qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ qp->sq_no_prefetch = 0;
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
qp->flags |= MLX4_IB_QP_LSO;
@@ -756,8 +1146,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (dev->steering_support ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
qp->flags |= MLX4_IB_QP_NETIF;
- else
+ else {
+ err = -EINVAL;
goto err;
+ }
}
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
@@ -765,14 +1157,15 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
if (qp_has_rq(init_attr)) {
- err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
*qp->db.db = 0;
}
- if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2,
+ &qp->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -782,16 +1175,19 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
+ err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
if (err)
goto err_mtt;
- qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp);
- qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp);
+ qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(u64), GFP_KERNEL);
+ qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
+ sizeof(u64), GFP_KERNEL);
if (!qp->sq.wrid || !qp->rq.wrid) {
err = -ENOMEM;
goto err_wrid;
}
+ qp->mqp.usage = MLX4_RES_USAGE_DRIVER;
}
if (sqpn) {
@@ -811,18 +1207,22 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
(init_attr->cap.max_send_wr ?
MLX4_RESERVE_ETH_BF_QP : 0) |
(init_attr->cap.max_recv_wr ?
- MLX4_RESERVE_A0_QP : 0));
+ MLX4_RESERVE_A0_QP : 0),
+ qp->mqp.usage);
else
if (qp->flags & MLX4_IB_QP_NETIF)
err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
else
err = mlx4_qp_reserve_range(dev->dev, 1, 1,
- &qpn, 0);
+ &qpn, 0, qp->mqp.usage);
if (err)
goto err_proxy;
}
- err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
+ if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err)
goto err_qpn;
@@ -837,8 +1237,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
qp->mqp.event = mlx4_ib_qp_event;
- if (!*caller_qp)
- *caller_qp = qp;
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
@@ -870,30 +1268,28 @@ err_proxy:
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
free_proxy_bufs(pd->device, qp);
err_wrid:
- if (pd->uobject) {
+ if (udata) {
if (qp_has_rq(init_attr))
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
+ mlx4_ib_db_unmap_user(context, &qp->db);
} else {
- kfree(qp->sq.wrid);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->rq.wrid);
}
err_mtt:
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
err_buf:
- if (pd->uobject)
- ib_umem_release(qp->umem);
- else
+ if (!qp->umem)
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
+ ib_umem_release(qp->umem);
err_db:
- if (!pd->uobject && qp_has_rq(init_attr))
+ if (!udata && qp_has_rq(init_attr))
mlx4_db_free(dev->dev, &qp->db);
err:
- if (!*caller_qp)
- kfree(qp);
+ kfree(qp->sqp);
return err;
}
@@ -959,7 +1355,7 @@ static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
return to_mpd(qp->ibqp.pd);
}
-static void get_cqs(struct mlx4_ib_qp *qp,
+static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src,
struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
{
switch (qp->ibqp.qp_type) {
@@ -972,14 +1368,46 @@ static void get_cqs(struct mlx4_ib_qp *qp,
*recv_cq = *send_cq;
break;
default:
- *send_cq = to_mcq(qp->ibqp.send_cq);
- *recv_cq = to_mcq(qp->ibqp.recv_cq);
+ *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) :
+ to_mcq(qp->ibwq.cq);
+ *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) :
+ *recv_cq;
break;
}
}
+static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+ if (qp->state != IB_QPS_RESET) {
+ int i;
+
+ for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size);
+ i++) {
+ struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ wq->rss_usecnt--;
+
+ mutex_unlock(&wq->mutex);
+ }
+
+ if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
+ MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
+ pr_warn("modify QP %06x to RESET failed.\n",
+ qp->mqp.qpn);
+ }
+
+ mlx4_qp_remove(dev->dev, &qp->mqp);
+ mlx4_qp_free(dev->dev, &qp->mqp);
+ mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+ del_gid_entries(qp);
+}
+
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
- int is_user)
+ enum mlx4_ib_source_type src,
+ struct ib_udata *udata)
{
struct mlx4_ib_cq *send_cq, *recv_cq;
unsigned long flags;
@@ -1012,7 +1440,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
}
}
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp, src, &send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(send_cq, recv_cq);
@@ -1021,7 +1449,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
list_del(&qp->qps_list);
list_del(&qp->cq_send_list);
list_del(&qp->cq_recv_list);
- if (!is_user) {
+ if (!udata) {
__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
if (send_cq != recv_cq)
@@ -1038,20 +1466,32 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
+ else if (src == MLX4_IB_RWQ_SRC)
+ mlx4_ib_release_wqn(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext),
+ qp, 1);
else
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
}
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
- if (is_user) {
- if (qp->rq.wqe_cnt)
- mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
- &qp->db);
- ib_umem_release(qp->umem);
+ if (udata) {
+ if (qp->rq.wqe_cnt) {
+ struct mlx4_ib_ucontext *mcontext =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext);
+
+ mlx4_ib_db_unmap_user(mcontext, &qp->db);
+ }
} else {
- kfree(qp->sq.wrid);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->rq.wrid);
if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
free_proxy_bufs(&dev->ib_dev, qp);
@@ -1059,6 +1499,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
if (qp->rq.wqe_cnt)
mlx4_db_free(dev->dev, &qp->db);
}
+ ib_umem_release(qp->umem);
del_gid_entries(qp);
}
@@ -1075,22 +1516,22 @@ static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
}
/* PF or VF -- creating proxies */
if (attr->qp_type == IB_QPT_SMI)
- return dev->dev->caps.qp0_proxy[attr->port_num - 1];
+ return dev->dev->caps.spec_qps[attr->port_num - 1].qp0_proxy;
else
- return dev->dev->caps.qp1_proxy[attr->port_num - 1];
+ return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy;
}
-struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx4_ib_qp *qp = NULL;
int err;
+ int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
u16 xrcdn = 0;
- gfp_t gfp;
- gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
- GFP_NOIO : GFP_KERNEL;
+ if (init_attr->rwq_ind_tbl)
+ return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
+
/*
* We only support LSO, vendor flag1, and multicast loopback blocking,
* and only for kernel UD QPs.
@@ -1100,106 +1541,173 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO))
- return ERR_PTR(-EINVAL);
+ MLX4_IB_QP_CREATE_ROCE_V2_GSI))
+ return -EOPNOTSUPP;
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
if (init_attr->qp_type != IB_QPT_UD)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- if (init_attr->create_flags &&
- (udata ||
- ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | MLX4_IB_QP_CREATE_USE_GFP_NOIO)) &&
- init_attr->qp_type != IB_QPT_UD) ||
- ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) &&
- init_attr->qp_type > IB_QPT_GSI)))
- return ERR_PTR(-EINVAL);
+ if (init_attr->create_flags) {
+ if (udata && init_attr->create_flags & ~(sup_u_create_flags))
+ return -EINVAL;
+
+ if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
+ MLX4_IB_QP_CREATE_ROCE_V2_GSI |
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
+ init_attr->qp_type != IB_QPT_UD) ||
+ (init_attr->create_flags & MLX4_IB_SRIOV_SQP &&
+ init_attr->qp_type > IB_QPT_GSI) ||
+ (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
+ init_attr->qp_type != IB_QPT_GSI))
+ return -EINVAL;
+ }
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
pd = to_mxrcd(init_attr->xrcd)->pd;
xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
- /* fall through */
+ fallthrough;
case IB_QPT_XRC_INI:
if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
- return ERR_PTR(-ENOSYS);
+ return -ENOSYS;
init_attr->recv_cq = init_attr->send_cq;
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
- qp = kzalloc(sizeof *qp, gfp);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ case IB_QPT_UD:
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- /* fall through */
- case IB_QPT_UD:
- {
- err = create_qp_common(to_mdev(pd->device), pd, init_attr,
- udata, 0, &qp, gfp);
+ err = create_qp_common(pd, init_attr, udata, 0, qp);
if (err)
- return ERR_PTR(err);
+ return err;
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn;
-
break;
- }
case IB_QPT_SMI:
case IB_QPT_GSI:
{
- /* Userspace is not allowed to create special QPs: */
- if (udata)
- return ERR_PTR(-EINVAL);
+ int sqpn;
+
+ if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
+ int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
+ 1, 1, &sqpn, 0,
+ MLX4_RES_USAGE_DRIVER);
+
+ if (res)
+ return res;
+ } else {
+ sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
+ }
- err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
- get_sqp_num(to_mdev(pd->device), init_attr),
- &qp, gfp);
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
+ err = create_qp_common(pd, init_attr, udata, sqpn, qp);
if (err)
- return ERR_PTR(err);
+ return err;
- qp->port = init_attr->port_num;
- qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
+ if (init_attr->create_flags &
+ (MLX4_IB_SRIOV_SQP | MLX4_IB_SRIOV_TUNNEL_QP))
+ /* Internal QP created with ib_create_qp */
+ rdma_restrack_no_track(&qp->ibqp.res);
+ qp->port = init_attr->port_num;
+ qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
+ init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1;
break;
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ return -EOPNOTSUPP;
}
+ return 0;
+}
+
+int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_device *device = ibqp->device;
+ struct mlx4_ib_dev *dev = to_mdev(device);
+ struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct ib_pd *pd = ibqp->pd;
+ int ret;
+
+ mutex_init(&qp->mutex);
+ ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
+ if (ret)
+ return ret;
+
+ if (init_attr->qp_type == IB_QPT_GSI &&
+ !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
+ struct mlx4_ib_sqp *sqp = qp->sqp;
+ int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
+
+ if (is_eth &&
+ dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
+ init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI;
+ sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
+
+ if (IS_ERR(sqp->roce_v2_gsi)) {
+ pr_err("Failed to create GSI QP for RoCEv2 (%pe)\n",
+ sqp->roce_v2_gsi);
+ sqp->roce_v2_gsi = NULL;
+ } else {
+ to_mqp(sqp->roce_v2_gsi)->flags |=
+ MLX4_IB_ROCE_V2_GSI_QP;
+ }
- return &qp->ibqp;
+ init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
+ }
+ }
+ return 0;
}
-int mlx4_ib_destroy_qp(struct ib_qp *qp)
+static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(qp->device);
struct mlx4_ib_qp *mqp = to_mqp(qp);
- struct mlx4_ib_pd *pd;
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
- if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+ if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
+ dev->qp1_proxy[mqp->port - 1] == mqp) {
mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
dev->qp1_proxy[mqp->port - 1] = NULL;
mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
}
- pd = get_pd(mqp);
- destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
+ if (mqp->counter_index)
+ mlx4_ib_free_qp_counter(dev, mqp);
- if (is_sqp(dev, mqp))
- kfree(to_msqp(mqp));
- else
- kfree(mqp);
+ if (qp->rwq_ind_tbl) {
+ destroy_qp_rss(dev, mqp);
+ } else {
+ destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
+ }
+ kfree(mqp->sqp);
return 0;
}
+int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+{
+ struct mlx4_ib_qp *mqp = to_mqp(qp);
+
+ if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
+
+ if (sqp->roce_v2_gsi)
+ ib_destroy_qp(sqp->roce_v2_gsi);
+ }
+
+ return _mlx4_ib_destroy_qp(qp, udata);
+}
+
static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
{
switch (type) {
@@ -1270,32 +1778,33 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
}
-static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
+static int _mlx4_set_path(struct mlx4_ib_dev *dev,
+ const struct rdma_ah_attr *ah,
u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
{
- int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
- IB_LINK_LAYER_ETHERNET;
int vidx;
int smac_index;
int err;
-
- path->grh_mylmc = ah->src_path_bits & 0x7f;
- path->rlid = cpu_to_be16(ah->dlid);
- if (ah->static_rate) {
- path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
+ path->grh_mylmc = rdma_ah_get_path_bits(ah) & 0x7f;
+ path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
+ if (rdma_ah_get_static_rate(ah)) {
+ path->static_rate = rdma_ah_get_static_rate(ah) +
+ MLX4_STAT_RATE_OFFSET;
while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
!(1 << path->static_rate & dev->dev->caps.stat_rate_support))
--path->static_rate;
} else
path->static_rate = 0;
- if (ah->ah_flags & IB_AH_GRH) {
- int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev,
- port,
- ah->grh.sgid_index);
+ if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah);
+ int real_sgid_index =
+ mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr);
+ if (real_sgid_index < 0)
+ return real_sgid_index;
if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
@@ -1304,19 +1813,19 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
path->grh_mylmc |= 1 << 7;
path->mgid_index = real_sgid_index;
- path->hop_limit = ah->grh.hop_limit;
+ path->hop_limit = grh->hop_limit;
path->tclass_flowlabel =
- cpu_to_be32((ah->grh.traffic_class << 20) |
- (ah->grh.flow_label));
- memcpy(path->rgid, ah->grh.dgid.raw, 16);
+ cpu_to_be32((grh->traffic_class << 20) |
+ (grh->flow_label));
+ memcpy(path->rgid, grh->dgid.raw, 16);
}
- if (is_eth) {
- if (!(ah->ah_flags & IB_AH_GRH))
+ if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH))
return -1;
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 7) << 3);
+ ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 7) << 3);
path->feup |= MLX4_FEUP_FORCE_ETH_UP;
if (vlan_tag < 0x1000) {
@@ -1375,14 +1884,13 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
} else {
smac_index = smac_info->smac_index;
}
-
- memcpy(path->dmac, ah->dmac, 6);
+ memcpy(path->dmac, ah->roce.dmac, 6);
path->ackto = MLX4_IB_LINK_TYPE_ETH;
/* put MAC table smac index for IBoE */
path->grh_mylmc = (u8) (smac_index) | 0x80;
} else {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+ ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2);
}
return 0;
@@ -1391,11 +1899,12 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
enum ib_qp_attr_mask qp_attr_mask,
struct mlx4_ib_qp *mqp,
- struct mlx4_qp_path *path, u8 port)
+ struct mlx4_qp_path *path, u8 port,
+ u16 vlan_id, u8 *smac)
{
return _mlx4_set_path(dev, &qp->ah_attr,
- mlx4_mac_to_u64((u8 *)qp->smac),
- (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
+ ether_addr_to_u64(smac),
+ vlan_id,
path, &mqp->pri, port);
}
@@ -1406,9 +1915,8 @@ static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
struct mlx4_qp_path *path, u8 port)
{
return _mlx4_set_path(dev, &qp->alt_ah_attr,
- mlx4_mac_to_u64((u8 *)qp->alt_smac),
- (qp_attr_mask & IB_QP_ALT_VID) ?
- qp->alt_vlan_id : 0xffff,
+ 0,
+ 0xffff,
path, &mqp->alt, port);
}
@@ -1424,7 +1932,8 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
}
}
-static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
+static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
+ struct mlx4_ib_qp *qp,
struct mlx4_qp_context *context)
{
u64 u64_mac;
@@ -1447,19 +1956,208 @@ static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *
return 0;
}
-static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
+static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+ struct counter_index *new_counter_index;
+ int err;
+ u32 tmp_idx;
+
+ if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
+ IB_LINK_LAYER_ETHERNET ||
+ !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
+ !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
+ return 0;
+
+ err = mlx4_counter_alloc(dev->dev, &tmp_idx, MLX4_RES_USAGE_DRIVER);
+ if (err)
+ return err;
+
+ new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL);
+ if (!new_counter_index) {
+ mlx4_counter_free(dev->dev, tmp_idx);
+ return -ENOMEM;
+ }
+
+ new_counter_index->index = tmp_idx;
+ new_counter_index->allocated = 1;
+ qp->counter_index = new_counter_index;
+
+ mutex_lock(&dev->counters_table[qp->port - 1].mutex);
+ list_add_tail(&new_counter_index->list,
+ &dev->counters_table[qp->port - 1].counters_list);
+ mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
+
+ return 0;
+}
+
+enum {
+ MLX4_QPC_ROCE_MODE_1 = 0,
+ MLX4_QPC_ROCE_MODE_2 = 2,
+ MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff
+};
+
+static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
+{
+ switch (gid_type) {
+ case IB_GID_TYPE_ROCE:
+ return MLX4_QPC_ROCE_MODE_1;
+ case IB_GID_TYPE_ROCE_UDP_ENCAP:
+ return MLX4_QPC_ROCE_MODE_2;
+ default:
+ return MLX4_QPC_ROCE_MODE_UNDEFINED;
+ }
+}
+
+/*
+ * Go over all RSS QP's childes (WQs) and apply their HW state according to
+ * their logic state if the RSS QP is the first RSS QP associated for the WQ.
+ */
+static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num,
+ struct ib_udata *udata)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
+ struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ /* Mlx4_ib restrictions:
+ * WQ's is associated to a port according to the RSS QP it is
+ * associates to.
+ * In case the WQ is associated to a different port by another
+ * RSS QP, return a failure.
+ */
+ if ((wq->rss_usecnt > 0) && (wq->port != port_num)) {
+ err = -EINVAL;
+ mutex_unlock(&wq->mutex);
+ break;
+ }
+ wq->port = port_num;
+ if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
+ err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata);
+ if (err) {
+ mutex_unlock(&wq->mutex);
+ break;
+ }
+ }
+ wq->rss_usecnt++;
+
+ mutex_unlock(&wq->mutex);
+ }
+
+ if (i && err) {
+ int j;
+
+ for (j = (i - 1); j >= 0; j--) {
+ struct ib_wq *ibwq = ind_tbl->ind_tbl[j];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ if ((wq->rss_usecnt == 1) &&
+ (ibwq->state == IB_WQS_RDY))
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET,
+ udata))
+ pr_warn("failed to reverse WQN=0x%06x\n",
+ ibwq->wq_num);
+ wq->rss_usecnt--;
+
+ mutex_unlock(&wq->mutex);
+ }
+ }
+
+ return err;
+}
+
+static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl,
+ struct ib_udata *udata)
+{
+ int i;
+
+ for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
+ struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY))
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata))
+ pr_warn("failed to reverse WQN=%x\n",
+ ibwq->wq_num);
+ wq->rss_usecnt--;
+
+ mutex_unlock(&wq->mutex);
+ }
+}
+
+static void fill_qp_rss_context(struct mlx4_qp_context *context,
+ struct mlx4_ib_qp *qp)
+{
+ struct mlx4_rss_context *rss_context;
+
+ rss_context = (void *)context + offsetof(struct mlx4_qp_context,
+ pri_path) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
+
+ rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz);
+ rss_context->default_qpn =
+ cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff);
+ if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6))
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ rss_context->flags = qp->rss_ctx->flags;
+ /* Currently support just toeplitz */
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
+
+ memcpy(rss_context->rss_key, qp->rss_ctx->rss_key,
+ MLX4_EN_RSS_KEY_SIZE);
+}
+
+static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct ib_srq *ibsrq;
+ const struct ib_gid_attr *gid_attr = NULL;
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+ enum ib_qp_type qp_type;
+ struct mlx4_ib_dev *dev;
+ struct mlx4_ib_qp *qp;
struct mlx4_ib_pd *pd;
struct mlx4_ib_cq *send_cq, *recv_cq;
+ struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
struct mlx4_qp_context *context;
enum mlx4_qp_optpar optpar = 0;
int sqd_event;
int steer_qp = 0;
int err = -EINVAL;
+ int counter_index;
+
+ if (src_type == MLX4_IB_RWQ_SRC) {
+ struct ib_wq *ibwq;
+
+ ibwq = (struct ib_wq *)src;
+ ibsrq = NULL;
+ rwq_ind_tbl = NULL;
+ qp_type = IB_QPT_RAW_PACKET;
+ qp = to_mqp((struct ib_qp *)ibwq);
+ dev = to_mdev(ibwq->device);
+ pd = to_mpd(ibwq->pd);
+ } else {
+ struct ib_qp *ibqp;
+
+ ibqp = (struct ib_qp *)src;
+ ibsrq = ibqp->srq;
+ rwq_ind_tbl = ibqp->rwq_ind_tbl;
+ qp_type = ibqp->qp_type;
+ qp = to_mqp(ibqp);
+ dev = to_mdev(ibqp->device);
+ pd = get_pd(qp);
+ }
/* APM is not supported under RoCE */
if (attr_mask & IB_QP_ALT_PATH &&
@@ -1491,16 +2189,22 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
+ if (qp->inl_recv_sz)
+ context->param3 |= cpu_to_be32(1 << 25);
+
+ if (qp->flags & MLX4_IB_QP_SCATTER_FCS)
+ context->param3 |= cpu_to_be32(1 << 29);
+
+ if (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI)
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
- else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+ else if (qp_type == IB_QPT_RAW_PACKET)
context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
- else if (ibqp->qp_type == IB_QPT_UD) {
+ else if (qp_type == IB_QPT_UD) {
if (qp->flags & MLX4_IB_QP_LSO)
context->mtu_msgmax = (IB_MTU_4096 << 5) |
ilog2(dev->dev->caps.max_gso_sz);
else
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
pr_err("path MTU (%u) is invalid\n",
@@ -1511,25 +2215,32 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
ilog2(dev->dev->caps.max_msg_sz);
}
- if (qp->rq.wqe_cnt)
- context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
- context->rq_size_stride |= qp->rq.wqe_shift - 4;
+ if (!rwq_ind_tbl) { /* PRM RSS receive side should be left zeros */
+ if (qp->rq.wqe_cnt)
+ context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
+ context->rq_size_stride |= qp->rq.wqe_shift - 4;
+ }
if (qp->sq.wqe_cnt)
context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
context->sq_size_stride |= qp->sq.wqe_shift - 4;
+ if (new_state == IB_QPS_RESET && qp->counter_index)
+ mlx4_ib_free_qp_counter(dev, qp);
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
context->xrcd = cpu_to_be32((u32) qp->xrcdn);
- if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+ if (qp_type == IB_QPT_RAW_PACKET)
context->param3 |= cpu_to_be32(1 << 30);
}
- if (qp->ibqp.uobject)
- context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
+ if (ucontext)
+ context->usr_page = cpu_to_be32(
+ mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index));
else
- context->usr_page = cpu_to_be32(dev->priv_uar.index);
+ context->usr_page = cpu_to_be32(
+ mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
if (attr_mask & IB_QP_DEST_QPN)
context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
@@ -1543,10 +2254,24 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
- if (dev->counters[qp->port - 1].index != -1) {
- context->pri_path.counter_index =
- dev->counters[qp->port - 1].index;
+ err = create_qp_lb_counter(dev, qp);
+ if (err)
+ goto out;
+
+ counter_index =
+ dev->counters_table[qp->port - 1].default_counter;
+ if (qp->counter_index)
+ counter_index = qp->counter_index->index;
+
+ if (counter_index != -1) {
+ context->pri_path.counter_index = counter_index;
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
+ if (qp->counter_index) {
+ context->pri_path.fl |=
+ MLX4_FL_ETH_SRC_CHECK_MC_LB;
+ context->pri_path.vlan_control |=
+ MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
+ }
} else
context->pri_path.counter_index =
MLX4_SINK_COUNTER_INDEX(dev->dev);
@@ -1555,6 +2280,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
mlx4_ib_steer_qp_reg(dev, qp, 1);
steer_qp = 1;
}
+
+ if (qp_type == IB_QPT_GSI) {
+ enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
+ IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE;
+ u8 qpc_roce_mode = gid_type_to_qpc(gid_type);
+
+ context->rlkey_roce_mode |= (qpc_roce_mode << 6);
+ }
}
if (attr_mask & IB_QP_PKEY_INDEX) {
@@ -1565,13 +2298,40 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_AV) {
+ u8 port_num = mlx4_is_bonded(dev->dev) ? 1 :
+ attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ u16 vlan = 0xffff;
+ u8 smac[ETH_ALEN];
+ int is_eth =
+ rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
+ rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
+
+ if (is_eth) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ err = rdma_read_gid_l2_fields(gid_attr, &vlan,
+ &smac[0]);
+ if (err)
+ goto out;
+ }
+
if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
- attr_mask & IB_QP_PORT ?
- attr->port_num : qp->port))
+ port_num, vlan, smac))
goto out;
optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
MLX4_QP_OPTPAR_SCHED_QUEUE);
+
+ if (is_eth &&
+ (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
+ u8 qpc_roce_mode = gid_type_to_qpc(gid_attr->gid_type);
+
+ if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
+ err = -EINVAL;
+ goto out;
+ }
+ context->rlkey_roce_mode |= (qpc_roce_mode << 6);
+ }
+
}
if (attr_mask & IB_QP_TIMEOUT) {
@@ -1598,15 +2358,20 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
}
- pd = get_pd(qp);
- get_cqs(qp, &send_cq, &recv_cq);
- context->pd = cpu_to_be32(pd->pdn);
+ context->pd = cpu_to_be32(pd->pdn);
+
+ if (!rwq_ind_tbl) {
+ context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
+ get_cqs(qp, src_type, &send_cq, &recv_cq);
+ } else { /* Set dummy CQs to be compatible with HV and PRM */
+ send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq);
+ recv_cq = send_cq;
+ }
context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
- context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
/* Set "fast registration enabled" for all kernel QPs */
- if (!qp->ibqp.uobject)
+ if (!ucontext)
context->params1 |= cpu_to_be32(1 << 11);
if (attr_mask & IB_QP_RNR_RETRY) {
@@ -1641,7 +2406,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
}
- if (ibqp->srq)
+ if (ibsrq)
context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
@@ -1672,17 +2437,19 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_Q_KEY;
}
- if (ibqp->srq)
- context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
+ if (ibsrq)
+ context->srqn = cpu_to_be32(1 << 24 |
+ to_msrq(ibsrq)->msrq.srqn);
- if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ if (qp->rq.wqe_cnt &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT)
context->db_rec_addr = cpu_to_be64(qp->db.dma);
if (cur_state == IB_QPS_INIT &&
new_state == IB_QPS_RTR &&
- (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
- ibqp->qp_type == IB_QPT_UD ||
- ibqp->qp_type == IB_QPT_RAW_PACKET)) {
+ (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI ||
+ qp_type == IB_QPT_UD || qp_type == IB_QPT_RAW_PACKET)) {
context->pri_path.sched_queue = (qp->port - 1) << 6;
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
qp->mlx4_ib_qp_type &
@@ -1704,7 +2471,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
- err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
+ err = handle_eth_ud_smac_index(dev, qp, context);
if (err) {
err = -EINVAL;
goto out;
@@ -1715,17 +2482,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ if (qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
/* set QP to receive both tunneled & non-tunneled packets */
- if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ if (!rwq_ind_tbl)
context->srqn = cpu_to_be32(7 << 28);
}
}
- if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
+ if (qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(
&dev->ib_dev, qp->port) ==
IB_LINK_LAYER_ETHERNET;
@@ -1735,15 +2502,16 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
-
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1;
else
sqd_event = 0;
- if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->rlkey |= (1 << 4);
+ if (!ucontext &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT)
+ context->rlkey_roce_mode |= (1 << 4);
/*
* Before passing a kernel QP to the HW, make sure that the
@@ -1751,20 +2519,28 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
* headroom is stamped so that the hardware doesn't start
* processing stale work requests.
*/
- if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ if (!ucontext &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT) {
struct mlx4_wqe_ctrl_seg *ctrl;
int i;
for (i = 0; i < qp->sq.wqe_cnt; ++i) {
ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31);
- if (qp->sq_max_wqes_per_wr == 1)
- ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);
-
- stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
+ ctrl->qpn_vlan.fence_size =
+ 1 << (qp->sq.wqe_shift - 4);
+ stamp_send_wqe(qp, i);
}
}
+ if (rwq_ind_tbl &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT) {
+ fill_qp_rss_context(context, qp);
+ context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
+ }
+
err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
to_mlx4_state(new_state), context, optpar,
sqd_event, &qp->mqp);
@@ -1785,7 +2561,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp))
- store_sqp_attrs(to_msqp(qp), attr, attr_mask);
+ store_sqp_attrs(qp->sqp, attr, attr_mask);
/*
* If we moved QP0 to RTR, bring the IB link up; if we moved
@@ -1807,9 +2583,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET) {
- if (!ibqp->uobject) {
+ if (!ucontext) {
mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
- ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+ ibsrq ? to_msrq(ibsrq) : NULL);
if (send_cq != recv_cq)
mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
@@ -1848,6 +2624,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
out:
+ if (err && qp->counter_index)
+ mlx4_ib_free_qp_counter(dev, qp);
if (err && steer_qp)
mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
@@ -1918,28 +2696,25 @@ out:
return err;
}
-int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
+enum {
+ MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK = (IB_QP_STATE |
+ IB_QP_PORT),
+};
+
+static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
- int ll;
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- ll = IB_LINK_LAYER_UNSPECIFIED;
- } else {
- int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = rdma_port_get_link_layer(&dev->ib_dev, port);
- }
-
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, ll)) {
+ attr_mask)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
@@ -1948,6 +2723,27 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
+ if (ibqp->rwq_ind_tbl) {
+ if (!(((cur_state == IB_QPS_RESET) &&
+ (new_state == IB_QPS_INIT)) ||
+ ((cur_state == IB_QPS_INIT) &&
+ (new_state == IB_QPS_RTR)))) {
+ pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n",
+ ibqp->qp_num, cur_state, new_state);
+
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (attr_mask & ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK) {
+ pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n",
+ ibqp->qp_num, attr_mask, cur_state, new_state);
+
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
if ((ibqp->qp_type == IB_QPT_RC) ||
@@ -2012,7 +2808,18 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
- err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+ if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) {
+ err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num,
+ udata);
+ if (err)
+ goto out;
+ }
+
+ err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask,
+ cur_state, new_state, udata);
+
+ if (ibqp->rwq_ind_tbl && err)
+ bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata);
if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
attr->port_num = 1;
@@ -2022,51 +2829,77 @@ out:
return err;
}
+int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ int ret;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
+
+ if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
+ int err = 0;
+
+ if (sqp->roce_v2_gsi)
+ err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask);
+ if (err)
+ pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
+ err);
+ }
+ return ret;
+}
+
static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
{
int i;
for (i = 0; i < dev->caps.num_ports; i++) {
- if (qpn == dev->caps.qp0_proxy[i] ||
- qpn == dev->caps.qp0_tunnel[i]) {
- *qkey = dev->caps.qp0_qkey[i];
+ if (qpn == dev->caps.spec_qps[i].qp0_proxy ||
+ qpn == dev->caps.spec_qps[i].qp0_tunnel) {
+ *qkey = dev->caps.spec_qps[i].qp0_qkey;
return 0;
}
}
return -EINVAL;
}
-static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
- struct ib_send_wr *wr,
+static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
+ const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
- struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
- struct ib_device *ib_dev = &mdev->ib_dev;
+ struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device);
+ struct mlx4_ib_sqp *sqp = qp->sqp;
+ struct ib_device *ib_dev = qp->ibqp.device;
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
- struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ struct mlx4_ib_ah *ah = to_mah(wr->ah);
u16 pkey;
u32 qkey;
int send_size;
int header_size;
int spc;
+ int err;
int i;
- if (wr->opcode != IB_WR_SEND)
+ if (wr->wr.opcode != IB_WR_SEND)
return -EINVAL;
send_size = 0;
- for (i = 0; i < wr->num_sge; ++i)
- send_size += wr->sg_list[i].length;
+ for (i = 0; i < wr->wr.num_sge; ++i)
+ send_size += wr->wr.sg_list[i].length;
/* for proxy-qp0 sends, need to add in size of tunnel header */
/* for tunnel-qp0 sends, tunnel header is already in s/g list */
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
send_size += sizeof (struct mlx4_ib_tunnel_header);
- ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header);
+ ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
sqp->ud_header.lrh.service_level =
be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
sqp->ud_header.lrh.destination_lid =
@@ -2082,25 +2915,27 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
mlx->rlid = sqp->ud_header.lrh.destination_lid;
sqp->ud_header.lrh.virtual_lane = 0;
- sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
- ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
+ err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey);
+ if (err)
+ return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
- sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
+ sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
else
sqp->ud_header.bth.destination_qpn =
- cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
+ cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
if (mlx4_is_master(mdev->dev)) {
- if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+ if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey))
return -EINVAL;
} else {
- if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+ if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey))
return -EINVAL;
}
sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn);
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
sqp->ud_header.immediate_present = 0;
@@ -2148,24 +2983,52 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
return 0;
}
-static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
+static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
{
- int i;
+ union sl2vl_tbl_to_u64 tmp_vltab;
+ u8 vl;
+
+ if (sl > 15)
+ return 0xf;
+ tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
+ vl = tmp_vltab.sl8[sl >> 1];
+ if (sl & 1)
+ vl &= 0x0f;
+ else
+ vl >>= 4;
+ return vl;
+}
- for (i = ETH_ALEN; i; i--) {
- dst_mac[i - 1] = src_mac & 0xff;
- src_mac >>= 8;
- }
+static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
+ int index, union ib_gid *gid,
+ enum ib_gid_type *gid_type)
+{
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ struct mlx4_port_gid_table *port_gid_table;
+ unsigned long flags;
+
+ port_gid_table = &iboe->gids[port_num - 1];
+ spin_lock_irqsave(&iboe->lock, flags);
+ memcpy(gid, &port_gid_table->gids[index].gid, sizeof(*gid));
+ *gid_type = port_gid_table->gids[index].gid_type;
+ spin_unlock_irqrestore(&iboe->lock, flags);
+ if (rdma_is_zero_gid(gid))
+ return -ENOENT;
+
+ return 0;
}
-static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
+#define MLX4_ROCEV2_QP1_SPORT 0xC000
+static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
- struct ib_device *ib_dev = sqp->qp.ibqp.device;
+ struct mlx4_ib_sqp *sqp = qp->sqp;
+ struct ib_device *ib_dev = qp->ibqp.device;
+ struct mlx4_ib_dev *ibdev = to_mdev(ib_dev);
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_ctrl_seg *ctrl = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
- struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ struct mlx4_ib_ah *ah = to_mah(wr->ah);
union ib_gid sgid;
u16 pkey;
int send_size;
@@ -2177,14 +3040,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
bool is_eth;
bool is_vlan = false;
bool is_grh;
+ bool is_udp = false;
+ int ip_version = 0;
send_size = 0;
- for (i = 0; i < wr->num_sge; ++i)
- send_size += wr->sg_list[i].length;
+ for (i = 0; i < wr->wr.num_sge; ++i)
+ send_size += wr->wr.sg_list[i].length;
- is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
+ is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET;
is_grh = mlx4_ib_ah_grh_present(ah);
if (is_eth) {
+ enum ib_gid_type gid_type;
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
/* When multi-function is enabled, the ib_core gid
* indexes don't necessarily match the hw ones, so
@@ -2195,19 +3061,31 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
if (err)
return err;
} else {
- err = ib_get_cached_gid(ib_dev,
- be32_to_cpu(ah->av.ib.port_pd) >> 24,
- ah->av.ib.gid_index, &sgid);
- if (err)
+ err = fill_gid_by_hw_index(ibdev, qp->port,
+ ah->av.ib.gid_index, &sgid,
+ &gid_type);
+ if (!err) {
+ is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
+ if (is_udp) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)&sgid))
+ ip_version = 4;
+ else
+ ip_version = 6;
+ is_grh = false;
+ }
+ } else {
return err;
+ }
}
-
if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
- is_vlan = 1;
+ is_vlan = true;
}
}
- ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
+ err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
+ ip_version, is_udp, 0, &sqp->ud_header);
+ if (err)
+ return err;
if (!is_eth) {
sqp->ud_header.lrh.service_level =
@@ -2216,48 +3094,76 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
}
- if (is_grh) {
+ if (is_grh || (ip_version == 6)) {
sqp->ud_header.grh.traffic_class =
(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
sqp->ud_header.grh.flow_label =
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
- if (is_eth)
+ if (is_eth) {
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
- else {
- if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
- /* When multi-function is enabled, the ib_core gid
- * indexes don't necessarily match the hw ones, so
- * we must use our own cache */
- sqp->ud_header.grh.source_gid.global.subnet_prefix =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- subnet_prefix;
- sqp->ud_header.grh.source_gid.global.interface_id =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- guid_cache[ah->av.ib.gid_index];
- } else
- ib_get_cached_gid(ib_dev,
- be32_to_cpu(ah->av.ib.port_pd) >> 24,
- ah->av.ib.gid_index,
- &sqp->ud_header.grh.source_gid);
+ } else {
+ if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+ /* When multi-function is enabled, the ib_core gid
+ * indexes don't necessarily match the hw ones, so
+ * we must use our own cache
+ */
+ sqp->ud_header.grh.source_gid.global
+ .subnet_prefix =
+ cpu_to_be64(atomic64_read(
+ &(to_mdev(ib_dev)
+ ->sriov
+ .demux[qp->port - 1]
+ .subnet_prefix)));
+ sqp->ud_header.grh.source_gid.global
+ .interface_id =
+ to_mdev(ib_dev)
+ ->sriov.demux[qp->port - 1]
+ .guid_cache[ah->av.ib.gid_index];
+ } else {
+ sqp->ud_header.grh.source_gid =
+ ah->ibah.sgid_attr->gid;
+ }
}
memcpy(sqp->ud_header.grh.destination_gid.raw,
ah->av.ib.dgid, 16);
}
+ if (ip_version == 4) {
+ sqp->ud_header.ip4.tos =
+ (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
+ sqp->ud_header.ip4.id = 0;
+ sqp->ud_header.ip4.frag_off = htons(IP_DF);
+ sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit;
+
+ memcpy(&sqp->ud_header.ip4.saddr,
+ sgid.raw + 12, 4);
+ memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4);
+ sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header);
+ }
+
+ if (is_udp) {
+ sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT);
+ sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT);
+ sqp->ud_header.udp.csum = 0;
+ }
+
mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
if (!is_eth) {
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
- (sqp->ud_header.lrh.destination_lid ==
- IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
- (sqp->ud_header.lrh.service_level << 8));
+ mlx->flags |=
+ cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
+ (sqp->ud_header.lrh.destination_lid ==
+ IB_LID_PERMISSIVE ?
+ MLX4_WQE_MLX_SLR :
+ 0) |
+ (sqp->ud_header.lrh.service_level << 8));
if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
mlx->flags |= cpu_to_be32(0x1); /* force loopback */
mlx->rlid = sqp->ud_header.lrh.destination_lid;
}
- switch (wr->opcode) {
+ switch (wr->wr.opcode) {
case IB_WR_SEND:
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
sqp->ud_header.immediate_present = 0;
@@ -2265,60 +3171,62 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
case IB_WR_SEND_WITH_IMM:
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
sqp->ud_header.immediate_present = 1;
- sqp->ud_header.immediate_data = wr->ex.imm_data;
+ sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
break;
default:
return -EINVAL;
}
if (is_eth) {
- struct in6_addr in6;
-
+ u16 ether_type;
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
+ ether_type = (!is_udp) ? ETH_P_IBOE:
+ (ip_version == 4 ? ETH_P_IP : ETH_P_IPV6);
+
mlx->sched_prio = cpu_to_be16(pcp);
- memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
- /* FIXME: cache smac value? */
+ ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
+ ether_addr_copy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac);
memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
- memcpy(&in6, sgid.raw, sizeof(in6));
-
- if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
- u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
- u8 smac[ETH_ALEN];
-
- mlx4_u64_to_smac(smac, mac);
- memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
- } else {
- /* use the src mac of the tunnel */
- memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
- }
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
if (!is_vlan) {
- sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+ sqp->ud_header.eth.type = cpu_to_be16(ether_type);
} else {
- sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+ sqp->ud_header.vlan.type = cpu_to_be16(ether_type);
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ sqp->ud_header.lrh.virtual_lane =
+ !qp->ibqp.qp_num ?
+ 15 :
+ sl_to_vl(to_mdev(ib_dev),
+ sqp->ud_header.lrh.service_level,
+ qp->port);
+ if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
+ return -EINVAL;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
}
- sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
- if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
+ sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
+ if (!qp->ibqp.qp_num)
+ err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index,
+ &pkey);
else
- ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey);
+ err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index,
+ &pkey);
+ if (err)
+ return err;
+
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
- sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+ sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
- sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
- sqp->qkey : wr->wr.ud.remote_qkey);
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
+ sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
+ sqp->qkey : wr->remote_qkey);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
@@ -2405,45 +3313,22 @@ static __be32 convert_access(int acc)
cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
}
-static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
+static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
+ const struct ib_reg_wr *wr)
{
- struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
- int i;
-
- for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
- mfrpl->mapped_page_list[i] =
- cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
- MLX4_MTT_FLAG_PRESENT);
+ struct mlx4_ib_mr *mr = to_mmr(wr->mr);
- fseg->flags = convert_access(wr->wr.fast_reg.access_flags);
- fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey);
- fseg->buf_list = cpu_to_be64(mfrpl->map);
- fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
- fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length);
+ fseg->flags = convert_access(wr->access);
+ fseg->mem_key = cpu_to_be32(wr->key);
+ fseg->buf_list = cpu_to_be64(mr->page_map);
+ fseg->start_addr = cpu_to_be64(mr->ibmr.iova);
+ fseg->reg_len = cpu_to_be64(mr->ibmr.length);
fseg->offset = 0; /* XXX -- is this just for ZBVA? */
- fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift);
+ fseg->page_size = cpu_to_be32(ilog2(mr->ibmr.page_size));
fseg->reserved[0] = 0;
fseg->reserved[1] = 0;
}
-static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
-{
- bseg->flags1 =
- convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
- cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
- MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
- MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
- bseg->flags2 = 0;
- if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
- bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
- if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
- bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
- bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
- bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
- bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
- bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
-}
-
static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
{
memset(iseg, 0, sizeof(*iseg));
@@ -2458,46 +3343,47 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
rseg->reserved = 0;
}
-static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
+static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
+ const struct ib_atomic_wr *wr)
{
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
- aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
- } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
- aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
+ if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->swap_add = cpu_to_be64(wr->swap);
+ aseg->compare = cpu_to_be64(wr->compare_add);
+ } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
+ aseg->swap_add = cpu_to_be64(wr->compare_add);
+ aseg->compare = cpu_to_be64(wr->compare_add_mask);
} else {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->swap_add = cpu_to_be64(wr->compare_add);
aseg->compare = 0;
}
}
static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
- struct ib_send_wr *wr)
+ const struct ib_atomic_wr *wr)
{
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
- aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
- aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
- aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
+ aseg->swap_add = cpu_to_be64(wr->swap);
+ aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
+ aseg->compare = cpu_to_be64(wr->compare_add);
+ aseg->compare_mask = cpu_to_be64(wr->compare_add_mask);
}
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
+ const struct ib_ud_wr *wr)
{
- memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
- dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
- dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
- dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
- memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
+ memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
+ dseg->dqpn = cpu_to_be32(wr->remote_qpn);
+ dseg->qkey = cpu_to_be32(wr->remote_qkey);
+ dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
+ memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
}
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
struct mlx4_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr,
+ const struct ib_ud_wr *wr,
enum mlx4_ib_qp_type qpt)
{
- union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av;
+ union mlx4_ext_av *av = &to_mah(wr->ah)->av;
struct mlx4_av sqp_av = {0};
int port = *((u8 *) &av->ib.port_pd) & 0x3;
@@ -2509,25 +3395,26 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
if (qpt == MLX4_IB_QPT_PROXY_GSI)
- dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
+ dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp1_tunnel);
else
- dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
+ dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp0_tunnel);
/* Use QKEY from the QP context, which is set by master */
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
}
-static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len)
+static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
+ unsigned *mlx_seg_len)
{
struct mlx4_wqe_inline_seg *inl = wqe;
struct mlx4_ib_tunnel_header hdr;
- struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ struct mlx4_ib_ah *ah = to_mah(wr->ah);
int spc;
int i;
memcpy(&hdr.av, &ah->av, sizeof hdr.av);
- hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
- hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
- hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+ hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
+ hdr.pkey_index = cpu_to_be16(wr->pkey_index);
+ hdr.qkey = cpu_to_be32(wr->remote_qkey);
memcpy(hdr.mac, ah->av.eth.mac, 6);
hdr.vlan = ah->av.eth.vlan;
@@ -2599,27 +3486,27 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
dseg->addr = cpu_to_be64(sg->addr);
}
-static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
- struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
- __be32 *lso_hdr_sz, __be32 *blh)
+static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
+ const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
+ unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
{
- unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
+ unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
*blh = cpu_to_be32(1 << 6);
if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
- wr->num_sge > qp->sq.max_gs - (halign >> 4)))
+ wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
return -EINVAL;
- memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+ memcpy(wqe->header, wr->header, wr->hlen);
- *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
+ *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen);
*lso_seg_len = halign;
return 0;
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static __be32 send_ieth(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
@@ -2641,8 +3528,8 @@ static void add_zero_len_inline(void *wqe)
inl->byte_count = cpu_to_be32(1 << 31);
}
-int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
void *wqe;
@@ -2652,18 +3539,37 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int err = 0;
unsigned ind;
- int uninitialized_var(stamp);
- int uninitialized_var(size);
- unsigned uninitialized_var(seglen);
+ int size;
+ unsigned seglen;
__be32 dummy;
__be32 *lso_wqe;
- __be32 uninitialized_var(lso_hdr_sz);
+ __be32 lso_hdr_sz;
__be32 blh;
int i;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
+ struct mlx4_ib_sqp *sqp = qp->sqp;
+
+ if (sqp->roce_v2_gsi) {
+ struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
+ enum ib_gid_type gid_type;
+ union ib_gid gid;
+
+ if (!fill_gid_by_hw_index(mdev, qp->port,
+ ah->av.ib.gid_index,
+ &gid, &gid_type))
+ qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
+ to_mqp(sqp->roce_v2_gsi) : qp;
+ else
+ pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
+ ah->av.ib.gid_index);
+ }
+ }
+
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -2713,11 +3619,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
- wr->wr.atomic.rkey);
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
- set_atomic_seg(wqe, wr);
+ set_atomic_seg(wqe, atomic_wr(wr));
wqe += sizeof (struct mlx4_wqe_atomic_seg);
size += (sizeof (struct mlx4_wqe_raddr_seg) +
@@ -2726,11 +3632,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
- wr->wr.atomic.rkey);
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
- set_masked_atomic_seg(wqe, wr);
+ set_masked_atomic_seg(wqe, atomic_wr(wr));
wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
size += (sizeof (struct mlx4_wqe_raddr_seg) +
@@ -2741,8 +3647,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
- wr->wr.rdma.rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
break;
@@ -2755,21 +3661,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
break;
- case IB_WR_FAST_REG_MR:
+ case IB_WR_REG_MR:
ctrl->srcrb_flags |=
cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
- set_fmr_seg(wqe, wr);
- wqe += sizeof (struct mlx4_wqe_fmr_seg);
- size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
+ set_reg_seg(wqe, reg_wr(wr));
+ wqe += sizeof(struct mlx4_wqe_fmr_seg);
+ size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
break;
- case IB_WR_BIND_MW:
- ctrl->srcrb_flags |=
- cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
- set_bind_seg(wqe, wr);
- wqe += sizeof(struct mlx4_wqe_bind_seg);
- size += sizeof(struct mlx4_wqe_bind_seg) / 16;
- break;
default:
/* No extra segments required for sends */
break;
@@ -2777,7 +3676,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case MLX4_IB_QPT_TUN_SMI_OWNER:
- err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
+ &seglen);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -2788,19 +3688,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case MLX4_IB_QPT_TUN_SMI:
case MLX4_IB_QPT_TUN_GSI:
/* this is a UD qp used in MAD responses to slaves. */
- set_datagram_seg(wqe, wr);
+ set_datagram_seg(wqe, ud_wr(wr));
/* set the forced-loopback bit in the data seg av */
*(__be32 *) wqe |= cpu_to_be32(0x80000000);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
break;
case MLX4_IB_QPT_UD:
- set_datagram_seg(wqe, wr);
+ set_datagram_seg(wqe, ud_wr(wr));
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
if (wr->opcode == IB_WR_LSO) {
- err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
+ err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
+ &lso_hdr_sz, &blh);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -2812,7 +3713,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case MLX4_IB_QPT_PROXY_SMI_OWNER:
- err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
+ &seglen);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -2823,7 +3725,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
add_zero_len_inline(wqe);
wqe += 16;
size++;
- build_tunnel_header(wr, wqe, &seglen);
+ build_tunnel_header(ud_wr(wr), wqe, &seglen);
wqe += seglen;
size += seglen / 16;
break;
@@ -2833,18 +3735,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* In this case we first add a UD segment targeting
* the tunnel qp, and then add a header with address
* information */
- set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr,
+ set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
+ ud_wr(wr),
qp->mlx4_ib_qp_type);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
- build_tunnel_header(wr, wqe, &seglen);
+ build_tunnel_header(ud_wr(wr), wqe, &seglen);
wqe += seglen;
size += seglen / 16;
break;
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
- err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
+ err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -2888,8 +3791,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wmb();
*lso_wqe = lso_hdr_sz;
- ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
- MLX4_WQE_CTRL_FENCE : 0) | size;
+ ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ?
+ MLX4_WQE_CTRL_FENCE : 0) | size;
/*
* Make sure descriptor is fully written before
@@ -2907,22 +3810,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
- stamp = ind + qp->sq_spare_wqes;
- ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
-
/*
* We can improve latency by not stamping the last
* send queue WQE until after ringing the doorbell, so
* only stamp here if there are still more WQEs to post.
- *
- * Same optimization applies to padding with NOP wqe
- * in case of WQE shrinking (used to prevent wrap-around
- * in the middle of WR).
*/
- if (wr->next) {
- stamp_send_wqe(qp, stamp, size * 16);
- ind = pad_wraparound(qp, ind);
- }
+ if (wr->next)
+ stamp_send_wqe(qp, ind + qp->sq_spare_wqes);
+ ind++;
}
out:
@@ -2935,18 +3830,11 @@ out:
*/
wmb();
- writel(qp->doorbell_qpn,
- to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
-
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- mmiowb();
+ writel_relaxed(qp->doorbell_qpn,
+ to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
- stamp_send_wqe(qp, stamp, size * 16);
+ stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
- ind = pad_wraparound(qp, ind);
qp->sq_next_wqe = ind;
}
@@ -2955,8 +3843,14 @@ out:
return err;
}
-int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return _mlx4_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
struct mlx4_wqe_data_seg *scat;
@@ -2971,7 +3865,8 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
max_gs = qp->rq.max_gs;
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -3042,6 +3937,12 @@ out:
return err;
}
+int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
{
switch (mlx4_state) {
@@ -3081,39 +3982,37 @@ static int to_ib_qp_access_flags(int mlx4_flags)
return ib_flags;
}
-static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
- struct mlx4_qp_path *path)
+static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
+ struct rdma_ah_attr *ah_attr,
+ struct mlx4_qp_path *path)
{
struct mlx4_dev *dev = ibdev->dev;
- int is_eth;
-
- memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
- ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
+ u8 port_num = path->sched_queue & 0x40 ? 2 : 1;
- if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ if (port_num == 0 || port_num > dev->caps.num_ports)
return;
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
- is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
- IB_LINK_LAYER_ETHERNET;
- if (is_eth)
- ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
- ((path->sched_queue & 4) << 1);
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
+ rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) |
+ ((path->sched_queue & 4) << 1));
else
- ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
-
- ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
- ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
- ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
- if (ib_ah_attr->ah_flags) {
- ib_ah_attr->grh.sgid_index = path->mgid_index;
- ib_ah_attr->grh.hop_limit = path->hop_limit;
- ib_ah_attr->grh.traffic_class =
- (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
- ib_ah_attr->grh.flow_label =
- be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
- memcpy(ib_ah_attr->grh.dgid.raw,
- path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+ rdma_ah_set_sl(ah_attr, (path->sched_queue >> 2) & 0xf);
+ rdma_ah_set_port_num(ah_attr, port_num);
+
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
+ rdma_ah_set_path_bits(ah_attr, path->grh_mylmc & 0x7f);
+ rdma_ah_set_static_rate(ah_attr,
+ path->static_rate ? path->static_rate - 5 : 0);
+ if (path->grh_mylmc & (1 << 7)) {
+ rdma_ah_set_grh(ah_attr, NULL,
+ be32_to_cpu(path->tclass_flowlabel) & 0xfffff,
+ path->mgid_index,
+ path->hop_limit,
+ (be32_to_cpu(path->tclass_flowlabel)
+ >> 20) & 0xff);
+ rdma_ah_set_dgid_raw(ah_attr, path->rgid);
}
}
@@ -3126,6 +4025,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx4_state;
int err = 0;
+ if (ibqp->rwq_ind_tbl)
+ return -EOPNOTSUPP;
+
mutex_lock(&qp->mutex);
if (qp->state == IB_QPS_RESET) {
@@ -3153,11 +4055,14 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
qp_attr->qp_access_flags =
to_ib_qp_access_flags(be32_to_cpu(context.params2));
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
- to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
+ to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
+ to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
- qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ qp_attr->alt_port_num =
+ rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
}
qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
@@ -3220,3 +4125,408 @@ out:
return err;
}
+struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+ struct ib_qp_init_attr ib_qp_init_attr = {};
+ struct mlx4_ib_qp *qp;
+ struct mlx4_ib_create_wq ucmd;
+ int err, required_cmd_sz;
+
+ if (!udata)
+ return ERR_PTR(-EINVAL);
+
+ required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
+ sizeof(ucmd.comp_mask);
+ if (udata->inlen < required_cmd_sz) {
+ pr_debug("invalid inlen\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ pr_debug("inlen is not supported\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (udata->outlen)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (init_attr->wq_type != IB_WQT_RQ) {
+ pr_debug("unsupported wq type %d\n", init_attr->wq_type);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
+ pr_debug("unsupported create_flags %u\n",
+ init_attr->create_flags);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&qp->mutex);
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
+
+ ib_qp_init_attr.qp_context = init_attr->wq_context;
+ ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
+ ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
+ ib_qp_init_attr.cap.max_recv_sge = init_attr->max_sge;
+ ib_qp_init_attr.recv_cq = init_attr->cq;
+ ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */
+
+ if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
+ ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
+
+ err = create_rq(pd, &ib_qp_init_attr, udata, qp);
+ if (err) {
+ kfree(qp);
+ return ERR_PTR(err);
+ }
+
+ qp->ibwq.event_handler = init_attr->event_handler;
+ qp->ibwq.wq_num = qp->mqp.qpn;
+ qp->ibwq.state = IB_WQS_RESET;
+
+ return &qp->ibwq;
+}
+
+static int ib_wq2qp_state(enum ib_wq_state state)
+{
+ switch (state) {
+ case IB_WQS_RESET:
+ return IB_QPS_RESET;
+ case IB_WQS_RDY:
+ return IB_QPS_RTR;
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
+ enum ib_qp_state qp_cur_state;
+ enum ib_qp_state qp_new_state;
+ int attr_mask;
+ int err;
+
+ /* ib_qp.state represents the WQ HW state while ib_wq.state represents
+ * the WQ logic state.
+ */
+ qp_cur_state = qp->state;
+ qp_new_state = ib_wq2qp_state(new_state);
+
+ if (ib_wq2qp_state(new_state) == qp_cur_state)
+ return 0;
+
+ if (new_state == IB_WQS_RDY) {
+ struct ib_qp_attr attr = {};
+
+ attr.port_num = qp->port;
+ attr_mask = IB_QP_PORT;
+
+ err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr,
+ attr_mask, IB_QPS_RESET, IB_QPS_INIT,
+ udata);
+ if (err) {
+ pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
+ ibwq->wq_num);
+ return err;
+ }
+
+ qp_cur_state = IB_QPS_INIT;
+ }
+
+ attr_mask = 0;
+ err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask,
+ qp_cur_state, qp_new_state, udata);
+
+ if (err && (qp_cur_state == IB_QPS_INIT)) {
+ qp_new_state = IB_QPS_RESET;
+ if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL,
+ attr_mask, IB_QPS_INIT, IB_QPS_RESET,
+ udata)) {
+ pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
+ ibwq->wq_num);
+ qp_new_state = IB_QPS_INIT;
+ }
+ }
+
+ qp->state = qp_new_state;
+
+ return err;
+}
+
+int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata)
+{
+ struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
+ struct mlx4_ib_modify_wq ucmd = {};
+ size_t required_cmd_sz;
+ enum ib_wq_state cur_state, new_state;
+ int err = 0;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved) +
+ sizeof(ucmd.reserved);
+ if (udata->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
+ return -EFAULT;
+
+ if (ucmd.comp_mask || ucmd.reserved)
+ return -EOPNOTSUPP;
+
+ if (wq_attr_mask & IB_WQ_FLAGS)
+ return -EOPNOTSUPP;
+
+ cur_state = wq_attr->curr_wq_state;
+ new_state = wq_attr->wq_state;
+
+ if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
+ return -EINVAL;
+
+ if ((new_state == IB_WQS_ERR) && (cur_state == IB_WQS_RESET))
+ return -EINVAL;
+
+ /* Need to protect against the parent RSS which also may modify WQ
+ * state.
+ */
+ mutex_lock(&qp->mutex);
+
+ /* Can update HW state only if a RSS QP has already associated to this
+ * WQ, so we can apply its port on the WQ.
+ */
+ if (qp->rss_usecnt)
+ err = _mlx4_ib_modify_wq(ibwq, new_state, udata);
+
+ if (!err)
+ ibwq->state = new_state;
+
+ mutex_unlock(&qp->mutex);
+
+ return err;
+}
+
+int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
+ struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
+
+ if (qp->counter_index)
+ mlx4_ib_free_qp_counter(dev, qp);
+
+ destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
+
+ kfree(qp);
+ return 0;
+}
+
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
+ unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
+ struct ib_device *device = rwq_ind_table->device;
+ unsigned int base_wqn;
+ size_t min_resp_len;
+ int i, err = 0;
+
+ if (udata->inlen > 0 &&
+ !ib_is_udata_cleared(udata, 0,
+ udata->inlen))
+ return -EOPNOTSUPP;
+
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return -EINVAL;
+
+ if (ind_tbl_size >
+ device->attrs.rss_caps.max_rwq_indirection_table_size) {
+ pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
+ ind_tbl_size,
+ device->attrs.rss_caps.max_rwq_indirection_table_size);
+ return -EINVAL;
+ }
+
+ base_wqn = init_attr->ind_tbl[0]->wq_num;
+
+ if (base_wqn % ind_tbl_size) {
+ pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
+ base_wqn);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < ind_tbl_size; i++) {
+ if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
+ pr_debug("indirection table's WQNs aren't consecutive\n");
+ return -EINVAL;
+ }
+ }
+
+ if (udata->outlen) {
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ }
+
+ return err;
+}
+
+struct mlx4_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx4_ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx4_ib_drain_cqe *sdrain,
+ struct mlx4_ib_dev *dev)
+{
+ struct mlx4_dev *mdev = dev->dev;
+
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
+
+ if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx4_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
+ */
+ mcq->mcq.comp(&mcq->mcq);
+ }
+
+ wait_for_completion(&sdrain->done);
+}
+
+void mlx4_ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx4_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx4_ib_dev *dev = to_mdev(qp->device);
+ struct mlx4_dev *mdev = dev->dev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ sdrain.cqe.done = mlx4_ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &sdrain, dev);
+}
+
+void mlx4_ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx4_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx4_ib_dev *dev = to_mdev(qp->device);
+ struct mlx4_dev *mdev = dev->dev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx4_ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &rdrain, dev);
+}
+
+int mlx4_ib_qp_event_init(void)
+{
+ mlx4_ib_qp_event_wq = alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0);
+ if (!mlx4_ib_qp_event_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_ib_qp_event_cleanup(void)
+{
+ destroy_workqueue(mlx4_ib_qp_event_wq);
+}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index dce5dfe3a70e..c4cf91235eee 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -36,7 +36,8 @@
#include <linux/slab.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
+#include <rdma/uverbs_ioctl.h>
static void *get_wqe(struct mlx4_ib_srq *srq, int n)
{
@@ -68,12 +69,14 @@ static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
}
}
-struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx4_ib_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_srq *srq;
+ struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
+ struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
+ struct mlx4_ib_srq *srq = to_msrq(ib_srq);
struct mlx4_wqe_srq_next_seg *next;
struct mlx4_wqe_data_seg *scatter;
u32 cqn;
@@ -83,14 +86,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
int err;
int i;
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC)
+ return -EOPNOTSUPP;
+
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
- return ERR_PTR(-EINVAL);
-
- srq = kmalloc(sizeof *srq, GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
@@ -105,23 +108,20 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
buf_size = srq->msrq.max * desc_size;
- if (pd->uobject) {
+ if (udata) {
struct mlx4_ib_create_srq ucmd;
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
- err = -EFAULT;
- goto err_srq;
- }
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
- srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- buf_size, 0, 0);
- if (IS_ERR(srq->umem)) {
- err = PTR_ERR(srq->umem);
- goto err_srq;
- }
+ srq->umem =
+ ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
+ if (IS_ERR(srq->umem))
+ return PTR_ERR(srq->umem);
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
- ilog2(srq->umem->page_size), &srq->mtt);
+ err = mlx4_mtt_init(
+ dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
+ PAGE_SHIFT, &srq->mtt);
if (err)
goto err_buf;
@@ -129,19 +129,18 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
- ucmd.db_addr, &srq->db);
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
if (err)
goto err_mtt;
} else {
- err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
- goto err_srq;
+ return err;
*srq->db.db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
- GFP_KERNEL)) {
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
+ &srq->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -166,31 +165,32 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
if (err)
goto err_mtt;
- srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
+ srq->wrid = kvmalloc_array(srq->msrq.max,
+ sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
err = -ENOMEM;
goto err_mtt;
}
}
- cqn = (init_attr->srq_type == IB_SRQT_XRC) ?
- to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0;
+ cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
(u16) dev->dev->caps.reserved_xrcds;
- err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
- srq->db.dma, &srq->msrq);
+ err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
+ &srq->mtt, srq->db.dma, &srq->msrq);
if (err)
goto err_wrid;
srq->msrq.event = mlx4_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
- if (pd->uobject)
+ if (udata)
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
err = -EFAULT;
goto err_wrid;
@@ -198,31 +198,27 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
init_attr->attr.max_wr = srq->msrq.max - 1;
- return &srq->ibsrq;
+ return 0;
err_wrid:
- if (pd->uobject)
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ if (udata)
+ mlx4_ib_db_unmap_user(ucontext, &srq->db);
else
- kfree(srq->wrid);
+ kvfree(srq->wrid);
err_mtt:
mlx4_mtt_cleanup(dev->dev, &srq->mtt);
err_buf:
- if (pd->uobject)
- ib_umem_release(srq->umem);
- else
+ if (!srq->umem)
mlx4_buf_free(dev->dev, buf_size, &srq->buf);
+ ib_umem_release(srq->umem);
err_db:
- if (!pd->uobject)
+ if (!udata)
mlx4_db_free(dev->dev, &srq->db);
-err_srq:
- kfree(srq);
-
- return ERR_PTR(err);
+ return err;
}
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -269,7 +265,7 @@ int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
return 0;
}
-int mlx4_ib_destroy_srq(struct ib_srq *srq)
+int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(srq->device);
struct mlx4_ib_srq *msrq = to_msrq(srq);
@@ -277,18 +273,20 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
mlx4_srq_free(dev->dev, &msrq->msrq);
mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
- if (srq->uobject) {
- mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
- ib_umem_release(msrq->umem);
+ if (udata) {
+ mlx4_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx4_ib_ucontext,
+ ibucontext),
+ &msrq->db);
} else {
- kfree(msrq->wrid);
+ kvfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf);
mlx4_db_free(dev->dev, &msrq->db);
}
-
- kfree(msrq);
-
+ ib_umem_release(msrq->umem);
return 0;
}
@@ -306,8 +304,8 @@ void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx4_ib_srq *srq = to_msrq(ibsrq);
struct mlx4_wqe_srq_next_seg *next;
@@ -322,7 +320,6 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
- nreq = 0;
goto out;
}
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 69fb5ba94d0f..88f534cf690e 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -56,7 +56,7 @@ static ssize_t show_admin_alias_guid(struct device *dev,
mlx4_ib_iov_dentry->entry_num,
port->num);
- return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
+ return sysfs_emit(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
}
/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
@@ -117,22 +117,24 @@ static ssize_t show_port_gid(struct device *dev,
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
union ib_gid gid;
- ssize_t ret;
+ int ret;
+ __be16 *raw;
ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num,
mlx4_ib_iov_dentry->entry_num, &gid, 1);
if (ret)
return ret;
- ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((__be16 *) gid.raw)[0]),
- be16_to_cpu(((__be16 *) gid.raw)[1]),
- be16_to_cpu(((__be16 *) gid.raw)[2]),
- be16_to_cpu(((__be16 *) gid.raw)[3]),
- be16_to_cpu(((__be16 *) gid.raw)[4]),
- be16_to_cpu(((__be16 *) gid.raw)[5]),
- be16_to_cpu(((__be16 *) gid.raw)[6]),
- be16_to_cpu(((__be16 *) gid.raw)[7]));
- return ret;
+
+ raw = (__be16 *)gid.raw;
+ return sysfs_emit(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ be16_to_cpu(raw[0]),
+ be16_to_cpu(raw[1]),
+ be16_to_cpu(raw[2]),
+ be16_to_cpu(raw[3]),
+ be16_to_cpu(raw[4]),
+ be16_to_cpu(raw[5]),
+ be16_to_cpu(raw[6]),
+ be16_to_cpu(raw[7]));
}
static ssize_t show_phys_port_pkey(struct device *dev,
@@ -151,7 +153,7 @@ static ssize_t show_phys_port_pkey(struct device *dev,
if (ret)
return ret;
- return sprintf(buf, "0x%04x\n", pkey);
+ return sysfs_emit(buf, "0x%04x\n", pkey);
}
#define DENTRY_REMOVE(_dentry) \
@@ -221,11 +223,12 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
- char buff[10];
+ char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;
+ memset(&attr, 0, sizeof(attr));
/* get the physical gid and pkey table sizes.*/
ret = __mlx4_ib_query_port(&device->ib_dev, port_num, &attr, 1);
if (ret)
@@ -352,16 +355,12 @@ err:
static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
{
- char base_name[9];
-
- /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
- strlcpy(name, pci_name(dev->dev->persist->pdev), max);
- strncpy(base_name, name, 8); /*till xxxx:yy:*/
- base_name[8] = '\0';
- /* with no ARI only 3 last bits are used so when the fn is higher than 8
+ /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n
+ * with no ARI only 3 last bits are used so when the fn is higher than 8
* need to add it to the dev num, so count in the last number will be
* modulo 8 */
- sprintf(name, "%s%.2d.%d", base_name, (i/8), (i%8));
+ snprintf(name, max, "%.8s%.2d.%d", pci_name(dev->dev->persist->pdev),
+ i / 8, i % 8);
}
struct mlx4_port {
@@ -444,16 +443,12 @@ static ssize_t show_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
- ssize_t ret = -ENODEV;
-
- if (p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index] >=
- (p->dev->dev->caps.pkey_table_len[p->port_num]))
- ret = sprintf(buf, "none\n");
- else
- ret = sprintf(buf, "%d\n",
- p->dev->pkeys.virt2phys_pkey[p->slave]
- [p->port_num - 1][tab_attr->index]);
- return ret;
+ struct pkey_mgt *m = &p->dev->pkeys;
+ u8 key = m->virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index];
+
+ if (key >= p->dev->dev->caps.pkey_table_len[p->port_num])
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%d\n", key);
}
static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
@@ -491,7 +486,7 @@ static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
static ssize_t show_port_gid_idx(struct mlx4_port *p,
struct port_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", p->slave);
+ return sysfs_emit(buf, "%d\n", p->slave);
}
static struct attribute **
@@ -545,14 +540,10 @@ static ssize_t sysfs_show_smi_enabled(struct device *dev,
{
struct mlx4_port *p =
container_of(attr, struct mlx4_port, smi_enabled);
- ssize_t len = 0;
-
- if (mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num))
- len = sprintf(buf, "%d\n", 1);
- else
- len = sprintf(buf, "%d\n", 0);
- return len;
+ return sysfs_emit(buf, "%d\n",
+ !!mlx4_vf_smi_enabled(p->dev->dev, p->slave,
+ p->port_num));
}
static ssize_t sysfs_show_enable_smi_admin(struct device *dev,
@@ -561,14 +552,10 @@ static ssize_t sysfs_show_enable_smi_admin(struct device *dev,
{
struct mlx4_port *p =
container_of(attr, struct mlx4_port, enable_smi_admin);
- ssize_t len = 0;
- if (mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num))
- len = sprintf(buf, "%d\n", 1);
- else
- len = sprintf(buf, "%d\n", 0);
-
- return len;
+ return sysfs_emit(buf, "%d\n",
+ !!mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave,
+ p->port_num));
}
static ssize_t sysfs_store_enable_smi_admin(struct device *dev,
@@ -811,15 +798,13 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
{
- int i;
+ unsigned int i;
int ret = 0;
if (!mlx4_is_master(dev->dev))
return 0;
- dev->iov_parent =
- kobject_create_and_add("iov",
- kobject_get(dev->ib_dev.ports_parent->parent));
+ dev->iov_parent = kobject_create_and_add("iov", &dev->ib_dev.dev.kobj);
if (!dev->iov_parent) {
ret = -ENOMEM;
goto err;
@@ -832,7 +817,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
goto err_ports;
}
- for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) {
+ rdma_for_each_port(&dev->ib_dev, i) {
ret = add_port_entries(dev, i);
if (ret)
goto err_add_entries;
@@ -849,7 +834,6 @@ err_add_entries:
err_ports:
kobject_put(dev->iov_parent);
err:
- kobject_put(dev->ib_dev.ports_parent->parent);
pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret);
return ret;
}
@@ -885,5 +869,4 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device)
kobject_put(device->ports_parent);
kobject_put(device->iov_parent);
kobject_put(device->iov_parent);
- kobject_put(device->ib_dev.ports_parent->parent);
}
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
deleted file mode 100644
index 07e6769ef43b..000000000000
--- a/drivers/infiniband/hw/mlx4/user.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX4_IB_USER_H
-#define MLX4_IB_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-
-#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
-#define MLX4_IB_UVERBS_ABI_VERSION 4
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mlx4_ib_alloc_ucontext_resp_v3 {
- __u32 qp_tab_size;
- __u16 bf_reg_size;
- __u16 bf_regs_per_page;
-};
-
-struct mlx4_ib_alloc_ucontext_resp {
- __u32 dev_caps;
- __u32 qp_tab_size;
- __u16 bf_reg_size;
- __u16 bf_regs_per_page;
- __u32 cqe_size;
-};
-
-struct mlx4_ib_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct mlx4_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
-};
-
-struct mlx4_ib_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mlx4_ib_resize_cq {
- __u64 buf_addr;
-};
-
-struct mlx4_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
-};
-
-struct mlx4_ib_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mlx4_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
- __u8 log_sq_bb_count;
- __u8 log_sq_stride;
- __u8 sq_no_prefetch;
- __u8 reserved[5];
-};
-
-#endif /* MLX4_IB_USER_H */
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index bce263b92821..ef1ff42eaec5 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,7 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
config MLX5_INFINIBAND
- tristate "Mellanox Connect-IB HCA support"
+ tristate "Mellanox 5th generation network adapters (ConnectX series) support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
- ---help---
+ help
This driver provides low-level InfiniBand support for
Mellanox Connect-IB PCI Express host channel adapters (HCAs).
This is required to use InfiniBand protocols such as
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 27a70159e2ea..dd7bb377f491 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,4 +1,33 @@
-obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+
+mlx5_ib-y := ah.o \
+ cmd.o \
+ cong.o \
+ counters.o \
+ cq.o \
+ data_direct.o \
+ dm.o \
+ dmah.o \
+ doorbell.o \
+ fs.o \
+ gsi.o \
+ ib_virt.o \
+ mad.o \
+ main.o \
+ mem.o \
+ mr.o \
+ qp.o \
+ qpc.o \
+ restrack.o \
+ srq.o \
+ srq_cmd.o \
+ umr.o \
+ wr.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
+mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
+ qos.o \
+ std_types.o
+mlx5_ib-$(CONFIG_MLX5_MACSEC) += macsec.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 66080580e24d..531a57f9ee7e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -32,61 +32,123 @@
#include "mlx5_ib.h"
-struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
- struct mlx5_ib_ah *ah)
+static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
+ const struct rdma_ah_attr *ah_attr)
{
- if (ah_attr->ah_flags & IB_AH_GRH) {
- memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
- ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label |
+ enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type;
+ __be16 sport;
+
+ if ((gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
+ (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK))
+ sport = cpu_to_be16(
+ rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ else
+ sport = mlx5_get_roce_udp_sport_min(dev,
+ ah_attr->grh.sgid_attr);
+
+ return sport;
+}
+
+static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ struct rdma_ah_init_attr *init_attr)
+{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ enum ib_gid_type gid_type;
+ int rate_val;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
+ memcpy(ah->av.rgid, &grh->dgid, 16);
+ ah->av.grh_gid_fl = cpu_to_be32(grh->flow_label |
(1 << 30) |
- ah_attr->grh.sgid_index << 20);
- ah->av.hop_limit = ah_attr->grh.hop_limit;
- ah->av.tclass = ah_attr->grh.traffic_class;
+ grh->sgid_index << 20);
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.tclass = grh->traffic_class;
}
- ah->av.rlid = cpu_to_be16(ah_attr->dlid);
- ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
- ah->av.stat_rate_sl = (ah_attr->static_rate << 4) | (ah_attr->sl & 0xf);
+ rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr));
+ if (rate_val < 0)
+ return rate_val;
+ ah->av.stat_rate_sl = rate_val << 4;
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (init_attr->xmit_slave)
+ ah->xmit_port =
+ mlx5_lag_get_slave_port(dev->mdev,
+ init_attr->xmit_slave);
+ gid_type = ah_attr->grh.sgid_attr->gid_type;
- return &ah->ibah;
+ memcpy(ah->av.rmac, ah_attr->roce.dmac,
+ sizeof(ah_attr->roce.dmac));
+ ah->av.udp_sport = mlx5_ah_get_udp_sport(dev, ah_attr);
+ ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+#define MLX5_ECN_ENABLED BIT(1)
+ ah->av.tclass |= MLX5_ECN_ENABLED;
+ } else {
+ ah->av.rlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
+ ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
+ ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
+ }
+
+ return 0;
}
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+
{
- struct mlx5_ib_ah *ah;
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ struct mlx5_ib_ah *ah = to_mah(ibah);
+ struct mlx5_ib_dev *dev = to_mdev(ibah->device);
+ enum rdma_ah_attr_type ah_type = ah_attr->type;
+
+ if ((ah_type == RDMA_AH_ATTR_TYPE_ROCE) &&
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) {
+ int err;
+ struct mlx5_ib_create_ah_resp resp = {};
+ u32 min_resp_len =
+ offsetofend(struct mlx5_ib_create_ah_resp, dmac);
- return create_ib_ah(ah_attr, ah); /* never fails */
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+
+ resp.response_length = min_resp_len;
+
+ memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ return err;
+ }
+
+ return create_ib_ah(dev, ah, init_attr);
}
-int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
{
struct mlx5_ib_ah *ah = to_mah(ibah);
u32 tmp;
memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = ibah->type;
tmp = be32_to_cpu(ah->av.grh_gid_fl);
if (tmp & (1 << 30)) {
- ah_attr->ah_flags = IB_AH_GRH;
- ah_attr->grh.sgid_index = (tmp >> 20) & 0xff;
- ah_attr->grh.flow_label = tmp & 0xfffff;
- memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16);
- ah_attr->grh.hop_limit = ah->av.hop_limit;
- ah_attr->grh.traffic_class = ah->av.tclass;
+ rdma_ah_set_grh(ah_attr, NULL,
+ tmp & 0xfffff,
+ (tmp >> 20) & 0xff,
+ ah->av.hop_limit,
+ ah->av.tclass);
+ rdma_ah_set_dgid_raw(ah_attr, ah->av.rgid);
}
- ah_attr->dlid = be16_to_cpu(ah->av.rlid);
- ah_attr->static_rate = ah->av.stat_rate_sl >> 4;
- ah_attr->sl = ah->av.stat_rate_sl & 0xf;
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(ah->av.rlid));
+ rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate_sl >> 4);
+ rdma_ah_set_sl(ah_attr, ah->av.stat_rate_sl & 0xf);
return 0;
}
-
-int mlx5_ib_destroy_ah(struct ib_ah *ah)
-{
- kfree(to_mah(ah));
- return 0;
-}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
new file mode 100644
index 000000000000..7c08e3008927
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include "cmd.h"
+
+int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ bool is_terminate, is_dump, is_null;
+ int err;
+
+ is_terminate = MLX5_CAP_GEN(dev->mdev, terminate_scatter_list_mkey);
+ is_dump = MLX5_CAP_GEN(dev->mdev, dump_fill_mkey);
+ is_null = MLX5_CAP_GEN(dev->mdev, null_mkey);
+
+ dev->mkeys.terminate_scatter_list_mkey = MLX5_TERMINATE_SCATTER_LIST_LKEY;
+ if (!is_terminate && !is_dump && !is_null)
+ return 0;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_special_contexts, in, out);
+ if (err)
+ return err;
+
+ if (is_dump)
+ dev->mkeys.dump_fill_mkey = MLX5_GET(query_special_contexts_out,
+ out, dump_fill_mkey);
+
+ if (is_null)
+ dev->mkeys.null_mkey = cpu_to_be32(
+ MLX5_GET(query_special_contexts_out, out, null_mkey));
+
+ if (is_terminate)
+ dev->mkeys.terminate_scatter_list_mkey =
+ cpu_to_be32(MLX5_GET(query_special_contexts_out, out,
+ terminate_scatter_list_mkey));
+
+ return 0;
+}
+
+int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
+ void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
+
+ MLX5_SET(query_cong_params_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS);
+ MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
+
+ return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
+}
+
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+ MLX5_SET(destroy_tir_in, in, uid, uid);
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
+}
+
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+ MLX5_SET(destroy_tis_in, in, uid, uid);
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
+}
+
+int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(destroy_rqt_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, destroy_rqt, in);
+}
+
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+ int err;
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(alloc_transport_domain_in, in, uid, uid);
+
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
+}
+
+int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ MLX5_SET(dealloc_pd_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_pd, in);
+}
+
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ MLX5_SET(attach_to_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
+}
+
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ MLX5_SET(detach_from_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
+}
+
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ MLX5_SET(alloc_xrcd_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return err;
+}
+
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ MLX5_SET(dealloc_xrcd_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
+}
+
+int mlx5_cmd_mad_ifc(struct mlx5_ib_dev *dev, const void *inb, void *outb,
+ u16 opmod, u8 port)
+{
+ int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
+ int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
+ int err = -ENOMEM;
+ void *data;
+ void *resp;
+ u32 *out;
+ u32 *in;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!in || !out)
+ goto out;
+
+ MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
+ MLX5_SET(mad_ifc_in, in, op_mod, opmod);
+ if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
+ MLX5_SET(mad_ifc_in, in, plane_index, port);
+ MLX5_SET(mad_ifc_in, in, port,
+ smi_to_native_portnum(dev, port));
+ } else {
+ MLX5_SET(mad_ifc_in, in, port, port);
+ }
+
+ data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
+ memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
+
+ err = mlx5_cmd_exec_inout(dev->mdev, mad_ifc, in, out);
+ if (err)
+ goto out;
+
+ resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
+ memcpy(outb, resp,
+ MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
+
+out:
+ kfree(out);
+ kfree(in);
+ return err;
+}
+
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+ MLX5_SET(alloc_uar_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
+ if (err)
+ return err;
+
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
+ return 0;
+}
+
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
+
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+ MLX5_SET(dealloc_uar_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_uar, in);
+}
+
+int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct,
+ char *out_vuid)
+{
+ u8 out[MLX5_ST_SZ_BYTES(query_vuid_out) +
+ MLX5_ST_SZ_BYTES(array1024_auto)] = {};
+ u8 in[MLX5_ST_SZ_BYTES(query_vuid_in)] = {};
+ char *vuid;
+ int err;
+
+ MLX5_SET(query_vuid_in, in, opcode, MLX5_CMD_OPCODE_QUERY_VUID);
+ MLX5_SET(query_vuid_in, in, vhca_id, MLX5_CAP_GEN(dev, vhca_id));
+ MLX5_SET(query_vuid_in, in, data_direct, data_direct);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ vuid = MLX5_ADDR_OF(query_vuid_out, out, vuid);
+ memcpy(out_vuid, vuid, MLX5_ST_SZ_BYTES(array1024_auto));
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
new file mode 100644
index 000000000000..e6c88b6ebd0d
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_IB_CMD_H
+#define MLX5_IB_CMD_H
+
+#include "mlx5_ib.h"
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+
+int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev);
+int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
+ void *out);
+int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
+int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid);
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid);
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
+int mlx5_cmd_mad_ifc(struct mlx5_ib_dev *dev, const void *inb, void *outb,
+ u16 opmod, u8 port);
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
+int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct,
+ char *out_vuid);
+#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
new file mode 100644
index 000000000000..a78a067e3ce7
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2013-2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mlx5_ib.h"
+#include "cmd.h"
+
+enum mlx5_ib_cong_node_type {
+ MLX5_IB_RROCE_ECN_RP = 1,
+ MLX5_IB_RROCE_ECN_NP = 2,
+ MLX5_IB_RROCE_GENERAL = 3,
+};
+
+static const char * const mlx5_ib_dbg_cc_name[] = {
+ "rp_clamp_tgt_rate",
+ "rp_clamp_tgt_rate_ati",
+ "rp_time_reset",
+ "rp_byte_reset",
+ "rp_threshold",
+ "rp_ai_rate",
+ "rp_max_rate",
+ "rp_hai_rate",
+ "rp_min_dec_fac",
+ "rp_min_rate",
+ "rp_rate_to_set_on_first_cnp",
+ "rp_dce_tcp_g",
+ "rp_dce_tcp_rtt",
+ "rp_rate_reduce_monitor_period",
+ "rp_initial_alpha_value",
+ "rp_gd",
+ "np_min_time_between_cnps",
+ "np_cnp_dscp",
+ "np_cnp_prio_mode",
+ "np_cnp_prio",
+ "rtt_resp_dscp_valid",
+ "rtt_resp_dscp",
+};
+
+#define MLX5_IB_RP_CLAMP_TGT_RATE_ATTR BIT(1)
+#define MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR BIT(2)
+#define MLX5_IB_RP_TIME_RESET_ATTR BIT(3)
+#define MLX5_IB_RP_BYTE_RESET_ATTR BIT(4)
+#define MLX5_IB_RP_THRESHOLD_ATTR BIT(5)
+#define MLX5_IB_RP_MAX_RATE_ATTR BIT(6)
+#define MLX5_IB_RP_AI_RATE_ATTR BIT(7)
+#define MLX5_IB_RP_HAI_RATE_ATTR BIT(8)
+#define MLX5_IB_RP_MIN_DEC_FAC_ATTR BIT(9)
+#define MLX5_IB_RP_MIN_RATE_ATTR BIT(10)
+#define MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR BIT(11)
+#define MLX5_IB_RP_DCE_TCP_G_ATTR BIT(12)
+#define MLX5_IB_RP_DCE_TCP_RTT_ATTR BIT(13)
+#define MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR BIT(14)
+#define MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR BIT(15)
+#define MLX5_IB_RP_GD_ATTR BIT(16)
+
+#define MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR BIT(2)
+#define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3)
+#define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4)
+
+#define MLX5_IB_GENERAL_RTT_RESP_DSCP_ATTR BIT(0)
+
+static enum mlx5_ib_cong_node_type
+mlx5_ib_param_to_node(enum mlx5_ib_dbg_cc_types param_offset)
+{
+ if (param_offset <= MLX5_IB_DBG_CC_RP_GD)
+ return MLX5_IB_RROCE_ECN_RP;
+
+ if (param_offset <= MLX5_IB_DBG_CC_NP_CNP_PRIO)
+ return MLX5_IB_RROCE_ECN_NP;
+
+ return MLX5_IB_RROCE_GENERAL;
+}
+
+static u32 mlx5_get_cc_param_val(void *field, int offset)
+{
+ switch (offset) {
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate);
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate_after_time_inc);
+ case MLX5_IB_DBG_CC_RP_TIME_RESET:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_time_reset);
+ case MLX5_IB_DBG_CC_RP_BYTE_RESET:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_byte_reset);
+ case MLX5_IB_DBG_CC_RP_THRESHOLD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_threshold);
+ case MLX5_IB_DBG_CC_RP_AI_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_ai_rate);
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate);
+ case MLX5_IB_DBG_CC_RP_HAI_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_hai_rate);
+ case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_dec_fac);
+ case MLX5_IB_DBG_CC_RP_MIN_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_rate);
+ case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rate_to_set_on_first_cnp);
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_g);
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_rtt);
+ case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rate_reduce_monitor_period);
+ case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ initial_alpha_value);
+ case MLX5_IB_DBG_CC_RP_GD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_gd);
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps);
+ case MLX5_IB_DBG_CC_NP_CNP_DSCP:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_dscp);
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_prio_mode);
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_802p_prio);
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID:
+ return MLX5_GET(cong_control_r_roce_general, field,
+ rtt_resp_dscp_valid);
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP:
+ return MLX5_GET(cong_control_r_roce_general, field,
+ rtt_resp_dscp);
+ default:
+ return 0;
+ }
+}
+
+static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
+ u32 var, u32 *attr_mask)
+{
+ switch (offset) {
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
+ *attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
+ *attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate_after_time_inc, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_TIME_RESET:
+ *attr_mask |= MLX5_IB_RP_TIME_RESET_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_time_reset, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_BYTE_RESET:
+ *attr_mask |= MLX5_IB_RP_BYTE_RESET_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_byte_reset, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_THRESHOLD:
+ *attr_mask |= MLX5_IB_RP_THRESHOLD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_threshold, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_AI_RATE:
+ *attr_mask |= MLX5_IB_RP_AI_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_ai_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ *attr_mask |= MLX5_IB_RP_MAX_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_HAI_RATE:
+ *attr_mask |= MLX5_IB_RP_HAI_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_hai_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
+ *attr_mask |= MLX5_IB_RP_MIN_DEC_FAC_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_dec_fac, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MIN_RATE:
+ *attr_mask |= MLX5_IB_RP_MIN_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
+ *attr_mask |= MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rate_to_set_on_first_cnp, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
+ *attr_mask |= MLX5_IB_RP_DCE_TCP_G_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_g, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
+ *attr_mask |= MLX5_IB_RP_DCE_TCP_RTT_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_rtt, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
+ *attr_mask |= MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rate_reduce_monitor_period, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
+ *attr_mask |= MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ initial_alpha_value, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_GD:
+ *attr_mask |= MLX5_IB_RP_GD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_gd, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ *attr_mask |= MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_DSCP:
+ *attr_mask |= MLX5_IB_NP_CNP_DSCP_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_dscp, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
+ *attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO:
+ *attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, 0);
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_802p_prio, var);
+ break;
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID:
+ *attr_mask |= MLX5_IB_GENERAL_RTT_RESP_DSCP_ATTR;
+ MLX5_SET(cong_control_r_roce_general, field, rtt_resp_dscp_valid, var);
+ break;
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP:
+ *attr_mask |= MLX5_IB_GENERAL_RTT_RESP_DSCP_ATTR;
+ MLX5_SET(cong_control_r_roce_general, field, rtt_resp_dscp_valid, 1);
+ MLX5_SET(cong_control_r_roce_general, field, rtt_resp_dscp, var);
+ break;
+ }
+}
+
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
+ int offset, u32 *var)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
+ void *out;
+ void *field;
+ int err;
+ enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ node = mlx5_ib_param_to_node(offset);
+
+ err = mlx5_cmd_query_cong_params(mdev, node, out);
+ if (err)
+ goto free;
+
+ field = MLX5_ADDR_OF(query_cong_params_out, out, congestion_parameters);
+ *var = mlx5_get_cc_param_val(field, offset);
+
+free:
+ kvfree(out);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+ return err;
+}
+
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
+ int offset, u32 var)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
+ void *in;
+ void *field;
+ enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+ u32 attr_mask = 0;
+ int err;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ MLX5_SET(modify_cong_params_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS);
+
+ node = mlx5_ib_param_to_node(offset);
+ MLX5_SET(modify_cong_params_in, in, cong_protocol, node);
+
+ field = MLX5_ADDR_OF(modify_cong_params_in, in, congestion_parameters);
+ mlx5_ib_set_cc_param_mask_val(field, offset, var, &attr_mask);
+
+ field = MLX5_ADDR_OF(modify_cong_params_in, in, field_select);
+ MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
+ attr_mask);
+
+ err = mlx5_cmd_exec_in(dev->mdev, modify_cong_params, in);
+ kvfree(in);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+ return err;
+}
+
+static ssize_t set_param(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_dbg_param *param = filp->private_data;
+ int offset = param->offset;
+ char lbuf[11] = { };
+ u32 var;
+ int ret;
+
+ if (count > sizeof(lbuf))
+ return -EINVAL;
+
+ if (copy_from_user(lbuf, buf, count))
+ return -EFAULT;
+
+ lbuf[sizeof(lbuf) - 1] = '\0';
+
+ if (kstrtou32(lbuf, 0, &var))
+ return -EINVAL;
+
+ ret = mlx5_ib_set_cc_params(param->dev, param->port_num, offset, var);
+ return ret ? ret : count;
+}
+
+static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_ib_dbg_param *param = filp->private_data;
+ int offset = param->offset;
+ u32 var = 0;
+ int ret;
+ char lbuf[11];
+
+ ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
+ if (ret)
+ return ret;
+
+ ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var);
+ if (ret < 0)
+ return ret;
+
+ return simple_read_from_buffer(buf, count, pos, lbuf, ret);
+}
+
+static const struct file_operations dbg_cc_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = set_param,
+ .read = get_param,
+};
+
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ if (!mlx5_debugfs_root ||
+ !dev->port[port_num].dbg_cc_params ||
+ !dev->port[port_num].dbg_cc_params->root)
+ return;
+
+ debugfs_remove_recursive(dev->port[port_num].dbg_cc_params->root);
+ kfree(dev->port[port_num].dbg_cc_params);
+ dev->port[port_num].dbg_cc_params = NULL;
+}
+
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_core_dev *mdev;
+ int i;
+
+ if (!mlx5_debugfs_root)
+ return;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return;
+
+ if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
+ !MLX5_CAP_GEN(mdev, cc_modify_allowed))
+ goto put_mdev;
+
+ dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
+ if (!dbg_cc_params)
+ goto err;
+
+ dev->port[port_num].dbg_cc_params = dbg_cc_params;
+
+ dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
+
+ for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
+ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
+ if (!MLX5_CAP_GEN(mdev, roce) ||
+ !MLX5_CAP_ROCE(mdev, roce_cc_general))
+ continue;
+
+ dbg_cc_params->params[i].offset = i;
+ dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
+ dbg_cc_params->params[i].dentry =
+ debugfs_create_file(mlx5_ib_dbg_cc_name[i],
+ 0600, dbg_cc_params->root,
+ &dbg_cc_params->params[i],
+ &dbg_cc_fops);
+ }
+
+put_mdev:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+ return;
+
+err:
+ mlx5_ib_warn(dev, "cong debugfs failure\n");
+ mlx5_ib_cleanup_cong_debugfs(dev, port_num);
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+
+ /*
+ * We don't want to fail driver if debugfs failed to initialize,
+ * so we are not forwarding error to the user.
+ */
+ return;
+}
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
new file mode 100644
index 000000000000..e042e0719ead
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -0,0 +1,1279 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include "mlx5_ib.h"
+#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/vport.h>
+#include "counters.h"
+#include "ib_rep.h"
+#include "qp.h"
+
+struct mlx5_ib_counter {
+ const char *name;
+ size_t offset;
+ u32 type;
+};
+
+struct mlx5_rdma_counter {
+ struct rdma_counter rdma_counter;
+
+ struct mlx5_fc *fc[MLX5_IB_OPCOUNTER_MAX];
+ struct xarray qpn_opfc_xa;
+};
+
+static struct mlx5_rdma_counter *to_mcounter(struct rdma_counter *counter)
+{
+ return container_of(counter, struct mlx5_rdma_counter, rdma_counter);
+}
+
+#define INIT_Q_COUNTER(_name) \
+ { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
+
+#define INIT_VPORT_Q_COUNTER(_name) \
+ { .name = "vport_" #_name, .offset = \
+ MLX5_BYTE_OFF(query_q_counter_out, _name)}
+
+static const struct mlx5_ib_counter basic_q_cnts[] = {
+ INIT_Q_COUNTER(rx_write_requests),
+ INIT_Q_COUNTER(rx_read_requests),
+ INIT_Q_COUNTER(rx_atomic_requests),
+ INIT_Q_COUNTER(rx_dct_connect),
+ INIT_Q_COUNTER(out_of_buffer),
+};
+
+static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
+ INIT_Q_COUNTER(out_of_sequence),
+};
+
+static const struct mlx5_ib_counter retrans_q_cnts[] = {
+ INIT_Q_COUNTER(duplicate_request),
+ INIT_Q_COUNTER(rnr_nak_retry_err),
+ INIT_Q_COUNTER(packet_seq_err),
+ INIT_Q_COUNTER(implied_nak_seq_err),
+ INIT_Q_COUNTER(local_ack_timeout_err),
+};
+
+static const struct mlx5_ib_counter vport_basic_q_cnts[] = {
+ INIT_VPORT_Q_COUNTER(rx_write_requests),
+ INIT_VPORT_Q_COUNTER(rx_read_requests),
+ INIT_VPORT_Q_COUNTER(rx_atomic_requests),
+ INIT_VPORT_Q_COUNTER(rx_dct_connect),
+ INIT_VPORT_Q_COUNTER(out_of_buffer),
+};
+
+static const struct mlx5_ib_counter vport_out_of_seq_q_cnts[] = {
+ INIT_VPORT_Q_COUNTER(out_of_sequence),
+};
+
+static const struct mlx5_ib_counter vport_retrans_q_cnts[] = {
+ INIT_VPORT_Q_COUNTER(duplicate_request),
+ INIT_VPORT_Q_COUNTER(rnr_nak_retry_err),
+ INIT_VPORT_Q_COUNTER(packet_seq_err),
+ INIT_VPORT_Q_COUNTER(implied_nak_seq_err),
+ INIT_VPORT_Q_COUNTER(local_ack_timeout_err),
+};
+
+#define INIT_CONG_COUNTER(_name) \
+ { .name = #_name, .offset = \
+ MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
+
+static const struct mlx5_ib_counter cong_cnts[] = {
+ INIT_CONG_COUNTER(rp_cnp_ignored),
+ INIT_CONG_COUNTER(rp_cnp_handled),
+ INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
+ INIT_CONG_COUNTER(np_cnp_sent),
+};
+
+static const struct mlx5_ib_counter extended_err_cnts[] = {
+ INIT_Q_COUNTER(resp_local_length_error),
+ INIT_Q_COUNTER(resp_cqe_error),
+ INIT_Q_COUNTER(req_cqe_error),
+ INIT_Q_COUNTER(req_remote_invalid_request),
+ INIT_Q_COUNTER(req_remote_access_errors),
+ INIT_Q_COUNTER(resp_remote_access_errors),
+ INIT_Q_COUNTER(resp_cqe_flush_error),
+ INIT_Q_COUNTER(req_cqe_flush_error),
+ INIT_Q_COUNTER(req_transport_retries_exceeded),
+ INIT_Q_COUNTER(req_rnr_retries_exceeded),
+};
+
+static const struct mlx5_ib_counter roce_accl_cnts[] = {
+ INIT_Q_COUNTER(roce_adp_retrans),
+ INIT_Q_COUNTER(roce_adp_retrans_to),
+ INIT_Q_COUNTER(roce_slow_restart),
+ INIT_Q_COUNTER(roce_slow_restart_cnps),
+ INIT_Q_COUNTER(roce_slow_restart_trans),
+};
+
+static const struct mlx5_ib_counter vport_extended_err_cnts[] = {
+ INIT_VPORT_Q_COUNTER(resp_local_length_error),
+ INIT_VPORT_Q_COUNTER(resp_cqe_error),
+ INIT_VPORT_Q_COUNTER(req_cqe_error),
+ INIT_VPORT_Q_COUNTER(req_remote_invalid_request),
+ INIT_VPORT_Q_COUNTER(req_remote_access_errors),
+ INIT_VPORT_Q_COUNTER(resp_remote_access_errors),
+ INIT_VPORT_Q_COUNTER(resp_cqe_flush_error),
+ INIT_VPORT_Q_COUNTER(req_cqe_flush_error),
+ INIT_VPORT_Q_COUNTER(req_transport_retries_exceeded),
+ INIT_VPORT_Q_COUNTER(req_rnr_retries_exceeded),
+};
+
+static const struct mlx5_ib_counter vport_roce_accl_cnts[] = {
+ INIT_VPORT_Q_COUNTER(roce_adp_retrans),
+ INIT_VPORT_Q_COUNTER(roce_adp_retrans_to),
+ INIT_VPORT_Q_COUNTER(roce_slow_restart),
+ INIT_VPORT_Q_COUNTER(roce_slow_restart_cnps),
+ INIT_VPORT_Q_COUNTER(roce_slow_restart_trans),
+};
+
+#define INIT_EXT_PPCNT_COUNTER(_name) \
+ { .name = #_name, .offset = \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
+
+static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
+ INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
+};
+
+#define INIT_OP_COUNTER(_name, _type) \
+ { .name = #_name, .type = MLX5_IB_OPCOUNTER_##_type}
+
+static const struct mlx5_ib_counter basic_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_ce_pkts, CC_RX_CE_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmarx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_cnp_pkts, CC_RX_CNP_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmatx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_tx_cnp_pkts, CC_TX_CNP_PKTS),
+};
+
+static const struct mlx5_ib_counter packets_op_cnts[] = {
+ INIT_OP_COUNTER(rdma_tx_packets, RDMA_TX_PACKETS),
+ INIT_OP_COUNTER(rdma_tx_bytes, RDMA_TX_BYTES),
+ INIT_OP_COUNTER(rdma_rx_packets, RDMA_RX_PACKETS),
+ INIT_OP_COUNTER(rdma_rx_bytes, RDMA_RX_BYTES),
+};
+
+static int mlx5_ib_read_counters(struct ib_counters *counters,
+ struct ib_counters_read_attr *read_attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ struct mlx5_read_counters_attr mread_attr = {};
+ struct mlx5_ib_flow_counters_desc *desc;
+ int ret, i;
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ if (mcounters->cntrs_max_index > read_attr->ncounters) {
+ ret = -EINVAL;
+ goto err_bound;
+ }
+
+ mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
+ GFP_KERNEL);
+ if (!mread_attr.out) {
+ ret = -ENOMEM;
+ goto err_bound;
+ }
+
+ mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
+ mread_attr.flags = read_attr->flags;
+ ret = mcounters->read_counters(counters->device, &mread_attr);
+ if (ret)
+ goto err_read;
+
+ /* do the pass over the counters data array to assign according to the
+ * descriptions and indexing pairs
+ */
+ desc = mcounters->counters_data;
+ for (i = 0; i < mcounters->ncounters; i++)
+ read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
+
+err_read:
+ kfree(mread_attr.out);
+err_bound:
+ mutex_unlock(&mcounters->mcntrs_mutex);
+ return ret;
+}
+
+static int mlx5_ib_destroy_counters(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ mlx5_ib_counters_clear_description(counters);
+ if (mcounters->hw_cntrs_hndl)
+ mlx5_fc_destroy(to_mdev(counters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+ return 0;
+}
+
+static int mlx5_ib_create_counters(struct ib_counters *counters,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ mutex_init(&mcounters->mcntrs_mutex);
+ return 0;
+}
+
+static bool vport_qcounters_supported(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_GEN(dev->mdev, q_counter_other_vport) &&
+ MLX5_CAP_GEN(dev->mdev, q_counter_aggregation);
+}
+
+static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
+ u32 port_num)
+{
+ if ((is_mdev_switchdev_mode(dev->mdev) &&
+ !vport_qcounters_supported(dev)) || !port_num)
+ return &dev->port[0].cnts;
+
+ return is_mdev_switchdev_mode(dev->mdev) ?
+ &dev->port[1].cnts : &dev->port[port_num - 1].cnts;
+}
+
+/**
+ * mlx5_ib_get_counters_id - Returns counters id to use for device+port
+ * @dev: Pointer to mlx5 IB device
+ * @port_num: Zero based port number
+ *
+ * mlx5_ib_get_counters_id() Returns counters set id to use for given
+ * device port combination in switchdev and non switchdev mode of the
+ * parent device.
+ */
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num + 1);
+
+ return cnts->set_id;
+}
+
+static struct rdma_hw_stats *do_alloc_stats(const struct mlx5_ib_counters *cnts)
+{
+ struct rdma_hw_stats *stats;
+ u32 num_hw_counters;
+ int i;
+
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ stats = rdma_alloc_hw_stats_struct(cnts->descs,
+ num_hw_counters +
+ cnts->num_op_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ if (!stats)
+ return NULL;
+
+ for (i = 0; i < cnts->num_op_counters; i++)
+ set_bit(num_hw_counters + i, stats->is_disabled);
+
+ return stats;
+}
+
+static struct rdma_hw_stats *
+mlx5_ib_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts = &dev->port[0].cnts;
+
+ return do_alloc_stats(cnts);
+}
+
+static struct rdma_hw_stats *
+mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
+
+ return do_alloc_stats(cnts);
+}
+
+static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
+ const struct mlx5_ib_counters *cnts,
+ struct rdma_hw_stats *stats,
+ u16 set_id)
+{
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+ __be32 val;
+ int ret, i;
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, set_id);
+ ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cnts->num_q_counters; i++) {
+ val = *(__be32 *)((void *)out + cnts->offsets[i]);
+ stats->value[i] = (u64)be32_to_cpu(val);
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_counters *cnts,
+ struct rdma_hw_stats *stats)
+{
+ int offset = cnts->num_q_counters + cnts->num_cong_counters;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int ret, i;
+ void *out;
+
+ out = kvzalloc(sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ ret = mlx5_core_access_reg(dev->mdev, in, sz, out, sz, MLX5_REG_PPCNT,
+ 0, 0);
+ if (ret)
+ goto free;
+
+ for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
+ stats->value[i + offset] =
+ be64_to_cpup((__be64 *)(out +
+ cnts->offsets[i + offset]));
+free:
+ kvfree(out);
+ return ret;
+}
+
+static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev,
+ u32 port_num,
+ const struct mlx5_ib_counters *cnts,
+ struct rdma_hw_stats *stats)
+
+{
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+ struct mlx5_core_dev *mdev;
+ __be32 val;
+ int ret, i;
+
+ if (!dev->port[port_num].rep ||
+ dev->port[port_num].rep->vport == MLX5_VPORT_UPLINK)
+ return 0;
+
+ mdev = mlx5_eswitch_get_core_dev(dev->port[port_num].rep->esw);
+ if (!mdev)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, other_vport, 1);
+ MLX5_SET(query_q_counter_in, in, vport_number,
+ dev->port[port_num].rep->vport);
+ MLX5_SET(query_q_counter_in, in, aggregate, 1);
+ ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cnts->num_q_counters; i++) {
+ val = *(__be32 *)((void *)out + cnts->offsets[i]);
+ stats->value[i] = (u64)be32_to_cpu(val);
+ }
+
+ return 0;
+}
+
+static int do_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
+ struct mlx5_core_dev *mdev;
+ int ret, num_counters;
+
+ if (!stats)
+ return -EINVAL;
+
+ num_counters = cnts->num_q_counters +
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+
+ if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0)
+ ret = mlx5_ib_query_q_counters_vport(dev, port_num - 1, cnts,
+ stats);
+ else
+ ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats,
+ cnts->set_id);
+ if (ret)
+ return ret;
+
+ /* We don't expose device counters over Vports */
+ if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0)
+ goto done;
+
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
+ if (ret)
+ return ret;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ if (!port_num)
+ port_num = 1;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
+ if (!mdev) {
+ /* If port is not affiliated yet, its in down state
+ * which doesn't have any counters yet, so it would be
+ * zero. So no need to read from the HCA.
+ */
+ goto done;
+ }
+ ret = mlx5_lag_query_cong_counters(mdev,
+ stats->value +
+ cnts->num_q_counters,
+ cnts->num_cong_counters,
+ cnts->offsets +
+ cnts->num_q_counters);
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ if (ret)
+ return ret;
+ }
+
+done:
+ return num_counters;
+}
+
+static bool is_rdma_bytes_counter(u32 type)
+{
+ if (type == MLX5_IB_OPCOUNTER_RDMA_TX_BYTES ||
+ type == MLX5_IB_OPCOUNTER_RDMA_RX_BYTES ||
+ type == MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP ||
+ type == MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP)
+ return true;
+
+ return false;
+}
+
+static int do_per_qp_get_op_stat(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port);
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+ int i, ret, index, num_hw_counters;
+ u64 packets = 0, bytes = 0;
+
+ for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
+ if (!mcounter->fc[i])
+ continue;
+
+ ret = mlx5_fc_query(dev->mdev, mcounter->fc[i],
+ &packets, &bytes);
+ if (ret)
+ return ret;
+
+ num_hw_counters = cnts->num_q_counters +
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+
+ index = i - MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP +
+ num_hw_counters;
+
+ if (is_rdma_bytes_counter(i))
+ counter->stats->value[index] = bytes;
+ else
+ counter->stats->value[index] = packets;
+
+ clear_bit(index, counter->stats->is_disabled);
+ }
+ return 0;
+}
+
+static int do_get_op_stat(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ const struct mlx5_ib_op_fc *opfcs;
+ u64 packets, bytes;
+ u32 type;
+ int ret;
+
+ cnts = get_counters(dev, port_num);
+
+ opfcs = cnts->opfcs;
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ if (!opfcs[type].fc)
+ goto out;
+
+ ret = mlx5_fc_query(dev->mdev, opfcs[type].fc,
+ &packets, &bytes);
+ if (ret)
+ return ret;
+
+ if (is_rdma_bytes_counter(type))
+ stats->value[index] = bytes;
+ else
+ stats->value[index] = packets;
+out:
+ return index;
+}
+
+static int do_get_op_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ int index, ret, num_hw_counters;
+
+ cnts = get_counters(dev, port_num);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ for (index = num_hw_counters;
+ index < (num_hw_counters + cnts->num_op_counters); index++) {
+ ret = do_get_op_stat(ibdev, stats, port_num, index);
+ if (ret != index)
+ return ret;
+ }
+
+ return cnts->num_op_counters;
+}
+
+static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ int num_counters, num_hw_counters, num_op_counters;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+
+ cnts = get_counters(dev, port_num);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ num_counters = num_hw_counters + cnts->num_op_counters;
+
+ if (index < 0 || index > num_counters)
+ return -EINVAL;
+ else if (index > 0 && index < num_hw_counters)
+ return do_get_hw_stats(ibdev, stats, port_num, index);
+ else if (index >= num_hw_counters && index < num_counters)
+ return do_get_op_stat(ibdev, stats, port_num, index);
+
+ num_hw_counters = do_get_hw_stats(ibdev, stats, port_num, index);
+ if (num_hw_counters < 0)
+ return num_hw_counters;
+
+ num_op_counters = do_get_op_stats(ibdev, stats, port_num);
+ if (num_op_counters < 0)
+ return num_op_counters;
+
+ return num_hw_counters + num_op_counters;
+}
+
+static struct rdma_hw_stats *
+mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port);
+
+ return do_alloc_stats(cnts);
+}
+
+static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port);
+ int ret;
+
+ ret = mlx5_ib_query_q_counters(dev->mdev, cnts, counter->stats,
+ counter->id);
+ if (ret)
+ return ret;
+
+ if (!counter->mode.bind_opcnt)
+ return 0;
+
+ return do_per_qp_get_op_stat(counter);
+}
+
+static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
+{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+
+ if (!counter->id)
+ return 0;
+
+ WARN_ON(!xa_empty(&mcounter->qpn_opfc_xa));
+ mlx5r_fs_destroy_fcs(dev, mcounter->fc);
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+}
+
+static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *qp, u32 port)
+{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ bool new = false;
+ int err;
+
+ if (!counter->id) {
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
+
+ MLX5_SET(alloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ MLX5_SET(alloc_q_counter_in, in, uid, MLX5_SHARED_RESOURCE_UID);
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
+ if (err)
+ return err;
+ counter->id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ new = true;
+ }
+
+ err = mlx5_ib_qp_set_counter(qp, counter);
+ if (err)
+ goto fail_set_counter;
+
+ if (!counter->mode.bind_opcnt)
+ return 0;
+
+ err = mlx5r_fs_bind_op_fc(qp, mcounter->fc, &mcounter->qpn_opfc_xa,
+ port);
+ if (err)
+ goto fail_bind_op_fc;
+
+ return 0;
+
+fail_bind_op_fc:
+ mlx5_ib_qp_set_counter(qp, NULL);
+fail_set_counter:
+ if (new) {
+ mlx5_ib_counter_dealloc(counter);
+ counter->id = 0;
+ }
+
+ return err;
+}
+
+static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
+{
+ struct rdma_counter *counter = qp->counter;
+ struct mlx5_rdma_counter *mcounter;
+ int err;
+
+ mcounter = to_mcounter(counter);
+
+ mlx5r_fs_unbind_op_fc(qp, &mcounter->qpn_opfc_xa);
+
+ err = mlx5_ib_qp_set_counter(qp, NULL);
+ if (err)
+ goto fail_set_counter;
+
+ return 0;
+
+fail_set_counter:
+ if (counter->mode.bind_opcnt)
+ mlx5r_fs_bind_op_fc(qp, mcounter->fc,
+ &mcounter->qpn_opfc_xa, port);
+ return err;
+}
+
+static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
+ struct rdma_stat_desc *descs, size_t *offsets,
+ u32 port_num)
+{
+ bool is_vport = is_mdev_switchdev_mode(dev->mdev) &&
+ port_num != MLX5_VPORT_PF;
+ const struct mlx5_ib_counter *names;
+ int j = 0, i, size;
+
+ names = is_vport ? vport_basic_q_cnts : basic_q_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) :
+ ARRAY_SIZE(basic_q_cnts);
+ for (i = 0; i < size; i++, j++) {
+ descs[j].name = names[i].name;
+ offsets[j] = names[i].offset;
+ }
+
+ names = is_vport ? vport_out_of_seq_q_cnts : out_of_seq_q_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) :
+ ARRAY_SIZE(out_of_seq_q_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
+ for (i = 0; i < size; i++, j++) {
+ descs[j].name = names[i].name;
+ offsets[j] = names[i].offset;
+ }
+ }
+
+ names = is_vport ? vport_retrans_q_cnts : retrans_q_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) :
+ ARRAY_SIZE(retrans_q_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
+ for (i = 0; i < size; i++, j++) {
+ descs[j].name = names[i].name;
+ offsets[j] = names[i].offset;
+ }
+ }
+
+ names = is_vport ? vport_extended_err_cnts : extended_err_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) :
+ ARRAY_SIZE(extended_err_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
+ for (i = 0; i < size; i++, j++) {
+ descs[j].name = names[i].name;
+ offsets[j] = names[i].offset;
+ }
+ }
+
+ names = is_vport ? vport_roce_accl_cnts : roce_accl_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) :
+ ARRAY_SIZE(roce_accl_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
+ for (i = 0; i < size; i++, j++) {
+ descs[j].name = names[i].name;
+ offsets[j] = names[i].offset;
+ }
+ }
+
+ if (is_vport)
+ return;
+
+ if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
+ descs[j].name = cong_cnts[i].name;
+ offsets[j] = cong_cnts[i].offset;
+ }
+ }
+
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
+ descs[j].name = ext_ppcnt_cnts[i].name;
+ offsets[j] = ext_ppcnt_cnts[i].offset;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(basic_op_cnts); i++, j++) {
+ descs[j].name = basic_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &basic_op_cnts[i].type;
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmarx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmarx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmarx_cnp_op_cnts[i].type;
+ }
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmatx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmatx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmatx_cnp_op_cnts[i].type;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(packets_op_cnts); i++, j++) {
+ descs[j].name = packets_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &packets_op_cnts[i].type;
+ }
+}
+
+
+static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_counters *cnts, u32 port_num)
+{
+ bool is_vport = is_mdev_switchdev_mode(dev->mdev) &&
+ port_num != MLX5_VPORT_PF;
+ u32 num_counters, num_op_counters = 0, size;
+
+ size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) :
+ ARRAY_SIZE(basic_q_cnts);
+ num_counters = size;
+
+ size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) :
+ ARRAY_SIZE(out_of_seq_q_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
+ num_counters += size;
+
+ size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) :
+ ARRAY_SIZE(retrans_q_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
+ num_counters += size;
+
+ size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) :
+ ARRAY_SIZE(extended_err_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
+ num_counters += size;
+
+ size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) :
+ ARRAY_SIZE(roce_accl_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, roce_accl))
+ num_counters += size;
+
+ cnts->num_q_counters = num_counters;
+
+ if (is_vport)
+ goto skip_non_qcounters;
+
+ if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
+ num_counters += ARRAY_SIZE(cong_cnts);
+ }
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
+ num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
+ }
+
+ num_op_counters = ARRAY_SIZE(basic_op_cnts);
+
+ num_op_counters += ARRAY_SIZE(packets_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmarx_cnp_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmatx_cnp_op_cnts);
+
+skip_non_qcounters:
+ cnts->num_op_counters = num_op_counters;
+ num_counters += num_op_counters;
+ cnts->descs = kcalloc(num_counters,
+ sizeof(struct rdma_stat_desc), GFP_KERNEL);
+ if (!cnts->descs)
+ return -ENOMEM;
+
+ cnts->offsets = kcalloc(num_counters,
+ sizeof(*cnts->offsets), GFP_KERNEL);
+ if (!cnts->offsets)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(cnts->descs);
+ cnts->descs = NULL;
+ return -ENOMEM;
+}
+
+/*
+ * Checks if the given flow counter type should be sharing the same flow counter
+ * with another type and if it should, checks if that other type flow counter
+ * was already created, if both conditions are met return true and the counter
+ * else return false.
+ */
+bool mlx5r_is_opfc_shared_and_in_use(struct mlx5_ib_op_fc *opfcs, u32 type,
+ struct mlx5_ib_op_fc **opfc)
+{
+ u32 shared_fc_type;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ default:
+ return false;
+ }
+
+ *opfc = &opfcs[shared_fc_type];
+ if (!(*opfc)->fc)
+ return false;
+
+ return true;
+}
+
+static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+ int num_cnt_ports = dev->num_ports;
+ struct mlx5_ib_op_fc *in_use_opfc;
+ int i, j;
+
+ if (is_mdev_switchdev_mode(dev->mdev))
+ num_cnt_ports = min(2, num_cnt_ports);
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+
+ for (i = 0; i < num_cnt_ports; i++) {
+ if (dev->port[i].cnts.set_id) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ dev->port[i].cnts.set_id);
+ mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+ }
+ kfree(dev->port[i].cnts.descs);
+ kfree(dev->port[i].cnts.offsets);
+
+ for (j = 0; j < MLX5_IB_OPCOUNTER_MAX; j++) {
+ if (!dev->port[i].cnts.opfcs[j].fc)
+ continue;
+
+ if (mlx5r_is_opfc_shared_and_in_use(
+ dev->port[i].cnts.opfcs, j, &in_use_opfc))
+ goto skip;
+
+ mlx5_ib_fs_remove_op_fc(dev,
+ &dev->port[i].cnts.opfcs[j], j);
+ mlx5_fc_destroy(dev->mdev,
+ dev->port[i].cnts.opfcs[j].fc);
+skip:
+ dev->port[i].cnts.opfcs[j].fc = NULL;
+ }
+ }
+}
+
+static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
+ int num_cnt_ports = dev->num_ports;
+ int err = 0;
+ int i;
+ bool is_shared;
+
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
+
+ /*
+ * In switchdev we need to allocate two ports, one that is used for
+ * the device Q_counters and it is essentially the real Q_counters of
+ * this device, while the other is used as a helper for PF to be able to
+ * query all other vports.
+ */
+ if (is_mdev_switchdev_mode(dev->mdev))
+ num_cnt_ports = min(2, num_cnt_ports);
+
+ for (i = 0; i < num_cnt_ports; i++) {
+ err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts, i);
+ if (err)
+ goto err_alloc;
+
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs,
+ dev->port[i].cnts.offsets, i);
+
+ MLX5_SET(alloc_q_counter_in, in, uid,
+ is_shared ? MLX5_SHARED_RESOURCE_UID : 0);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
+ if (err) {
+ mlx5_ib_warn(dev,
+ "couldn't allocate queue counter for port %d, err %d\n",
+ i + 1, err);
+ goto err_alloc;
+ }
+
+ dev->port[i].cnts.set_id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ }
+ return 0;
+
+err_alloc:
+ mlx5_ib_dealloc_counters(dev);
+ return err;
+}
+
+static int read_flow_counters(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr)
+{
+ struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ return mlx5_fc_query(dev->mdev, fc,
+ &read_attr->out[IB_COUNTER_PACKETS],
+ &read_attr->out[IB_COUNTER_BYTES]);
+}
+
+/* flow counters currently expose two counters packets and bytes */
+#define FLOW_COUNTERS_NUM 2
+static int counters_set_description(
+ struct ib_counters *counters, enum mlx5_ib_counters_type counters_type,
+ struct mlx5_ib_flow_counters_desc *desc_data, u32 ncounters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ u32 cntrs_max_index = 0;
+ int i;
+
+ if (counters_type != MLX5_IB_COUNTERS_FLOW)
+ return -EINVAL;
+
+ /* init the fields for the object */
+ mcounters->type = counters_type;
+ mcounters->read_counters = read_flow_counters;
+ mcounters->counters_num = FLOW_COUNTERS_NUM;
+ mcounters->ncounters = ncounters;
+ /* each counter entry have both description and index pair */
+ for (i = 0; i < ncounters; i++) {
+ if (desc_data[i].description > IB_COUNTER_BYTES)
+ return -EINVAL;
+
+ if (cntrs_max_index <= desc_data[i].index)
+ cntrs_max_index = desc_data[i].index + 1;
+ }
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ mcounters->counters_data = desc_data;
+ mcounters->cntrs_max_index = cntrs_max_index;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+
+ return 0;
+}
+
+#define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
+int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters,
+ struct mlx5_ib_create_flow *ucmd)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
+ struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
+ struct mlx5_ib_flow_counters_desc *desc_data = NULL;
+ bool hw_hndl = false;
+ int ret = 0;
+
+ if (ucmd && ucmd->ncounters_data != 0) {
+ cntrs_data = ucmd->data;
+ if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
+ return -EINVAL;
+
+ desc_data = kcalloc(cntrs_data->ncounters,
+ sizeof(*desc_data),
+ GFP_KERNEL);
+ if (!desc_data)
+ return -ENOMEM;
+
+ if (copy_from_user(desc_data,
+ u64_to_user_ptr(cntrs_data->counters_data),
+ sizeof(*desc_data) * cntrs_data->ncounters)) {
+ ret = -EFAULT;
+ goto free;
+ }
+ }
+
+ if (!mcounters->hw_cntrs_hndl) {
+ mcounters->hw_cntrs_hndl = mlx5_fc_create(
+ to_mdev(ibcounters->device)->mdev, false);
+ if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+ ret = PTR_ERR(mcounters->hw_cntrs_hndl);
+ goto free;
+ }
+ hw_hndl = true;
+ }
+
+ if (desc_data) {
+ /* counters already bound to at least one flow */
+ if (mcounters->cntrs_max_index) {
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ ret = counters_set_description(ibcounters,
+ MLX5_IB_COUNTERS_FLOW,
+ desc_data,
+ cntrs_data->ncounters);
+ if (ret)
+ goto free_hndl;
+
+ } else if (!mcounters->cntrs_max_index) {
+ /* counters not bound yet, must have udata passed */
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ return 0;
+
+free_hndl:
+ if (hw_hndl) {
+ mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+ mcounters->hw_cntrs_hndl = NULL;
+ }
+free:
+ kfree(desc_data);
+ return ret;
+}
+
+void mlx5_ib_counters_clear_description(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters;
+
+ if (!counters || atomic_read(&counters->usecnt) != 1)
+ return;
+
+ mcounters = to_mcounters(counters);
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ kfree(mcounters->counters_data);
+ mcounters->counters_data = NULL;
+ mcounters->cntrs_max_index = 0;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+}
+
+static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
+ unsigned int index, bool enable)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_op_fc *opfc, *in_use_opfc;
+ struct mlx5_ib_counters *cnts;
+ u32 num_hw_counters, type;
+ int ret;
+
+ cnts = &dev->port[port - 1].cnts;
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ if (index < num_hw_counters ||
+ index >= (num_hw_counters + cnts->num_op_counters))
+ return -EINVAL;
+
+ if (!(cnts->descs[index].flags & IB_STAT_FLAG_OPTIONAL))
+ return -EINVAL;
+
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ opfc = &cnts->opfcs[type];
+
+ if (enable) {
+ if (opfc->fc)
+ return -EEXIST;
+
+ if (mlx5r_is_opfc_shared_and_in_use(cnts->opfcs, type,
+ &in_use_opfc)) {
+ opfc->fc = in_use_opfc->fc;
+ opfc->rule[0] = in_use_opfc->rule[0];
+ return 0;
+ }
+
+ opfc->fc = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(opfc->fc))
+ return PTR_ERR(opfc->fc);
+
+ ret = mlx5_ib_fs_add_op_fc(dev, port, opfc, type);
+ if (ret) {
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+ opfc->fc = NULL;
+ }
+ return ret;
+ }
+
+ if (!opfc->fc)
+ return -EINVAL;
+
+ if (mlx5r_is_opfc_shared_and_in_use(cnts->opfcs, type, &in_use_opfc))
+ goto out;
+
+ mlx5_ib_fs_remove_op_fc(dev, opfc, type);
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+out:
+ opfc->fc = NULL;
+ return 0;
+}
+
+static void mlx5_ib_counter_init(struct rdma_counter *counter)
+{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+
+ xa_init(&mcounter->qpn_opfc_xa);
+}
+
+static const struct ib_device_ops hw_stats_ops = {
+ .alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
+ .get_hw_stats = mlx5_ib_get_hw_stats,
+ .counter_bind_qp = mlx5_ib_counter_bind_qp,
+ .counter_unbind_qp = mlx5_ib_counter_unbind_qp,
+ .counter_dealloc = mlx5_ib_counter_dealloc,
+ .counter_alloc_stats = mlx5_ib_counter_alloc_stats,
+ .counter_update_stats = mlx5_ib_counter_update_stats,
+ .modify_hw_stat = mlx5_ib_modify_stat,
+ .counter_init = mlx5_ib_counter_init,
+
+ INIT_RDMA_OBJ_SIZE(rdma_counter, mlx5_rdma_counter, rdma_counter),
+};
+
+static const struct ib_device_ops hw_switchdev_vport_op = {
+ .alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
+};
+
+static const struct ib_device_ops hw_switchdev_stats_ops = {
+ .alloc_hw_device_stats = mlx5_ib_alloc_hw_device_stats,
+ .get_hw_stats = mlx5_ib_get_hw_stats,
+ .counter_bind_qp = mlx5_ib_counter_bind_qp,
+ .counter_unbind_qp = mlx5_ib_counter_unbind_qp,
+ .counter_dealloc = mlx5_ib_counter_dealloc,
+ .counter_alloc_stats = mlx5_ib_counter_alloc_stats,
+ .counter_update_stats = mlx5_ib_counter_update_stats,
+ .counter_init = mlx5_ib_counter_init,
+
+ INIT_RDMA_OBJ_SIZE(rdma_counter, mlx5_rdma_counter, rdma_counter),
+};
+
+static const struct ib_device_ops counters_ops = {
+ .create_counters = mlx5_ib_create_counters,
+ .destroy_counters = mlx5_ib_destroy_counters,
+ .read_counters = mlx5_ib_read_counters,
+
+ INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
+};
+
+int mlx5_ib_counters_init(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &counters_ops);
+
+ if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ return 0;
+
+ if (is_mdev_switchdev_mode(dev->mdev)) {
+ ib_set_device_ops(&dev->ib_dev, &hw_switchdev_stats_ops);
+ if (vport_qcounters_supported(dev))
+ ib_set_device_ops(&dev->ib_dev, &hw_switchdev_vport_op);
+ } else
+ ib_set_device_ops(&dev->ib_dev, &hw_stats_ops);
+ return mlx5_ib_alloc_counters(dev);
+}
+
+void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ return;
+
+ mlx5_ib_dealloc_counters(dev);
+}
diff --git a/drivers/infiniband/hw/mlx5/counters.h b/drivers/infiniband/hw/mlx5/counters.h
new file mode 100644
index 000000000000..a04e7dd59455
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/counters.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_COUNTERS_H
+#define _MLX5_IB_COUNTERS_H
+
+#include "mlx5_ib.h"
+
+int mlx5_ib_counters_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev);
+void mlx5_ib_counters_clear_description(struct ib_counters *counters);
+int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters,
+ struct mlx5_ib_create_flow *ucmd);
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num);
+bool mlx5r_is_opfc_shared_and_in_use(struct mlx5_ib_op_fc *opfcs, u32 type,
+ struct mlx5_ib_op_fc **opfc);
+#endif /* _MLX5_IB_COUNTERS_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 2d0dbbf38ceb..a23b364e24ff 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -35,9 +35,13 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
-#include "user.h"
+#include "srq.h"
+#include "qp.h"
-static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
@@ -65,14 +69,9 @@ static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
}
}
-static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
-{
- return mlx5_buf_offset(&buf->buf, n * size);
-}
-
static void *get_cqe(struct mlx5_ib_cq *cq, int n)
{
- return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
+ return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
}
static u8 sw_ownership_bit(int n, int nent)
@@ -87,7 +86,7 @@ static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
- if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
!((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
return cqe;
} else {
@@ -109,8 +108,8 @@ static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
case IB_WR_LOCAL_INV:
return IB_WC_LOCAL_INV;
- case IB_WR_FAST_REG_MR:
- return IB_WC_FAST_REG_MR;
+ case IB_WR_REG_MR:
+ return IB_WC_REG_MR;
default:
pr_warn("unknown completion status\n");
@@ -125,11 +124,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
case MLX5_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ fallthrough;
case MLX5_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ fallthrough;
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
@@ -154,9 +155,6 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->opcode = IB_WC_MASKED_FETCH_ADD;
wc->byte_len = 8;
break;
- case MLX5_OPCODE_BIND_MW:
- wc->opcode = IB_WC_BIND_MW;
- break;
case MLX5_OPCODE_UMR:
wc->opcode = get_umr_comp(wq, idx);
break;
@@ -171,19 +169,22 @@ enum {
static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_ib_qp *qp)
{
+ enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
- struct mlx5_ib_srq *srq;
+ struct mlx5_ib_srq *srq = NULL;
struct mlx5_ib_wq *wq;
u16 wqe_ctr;
+ u8 roce_packet_type;
+ bool vlan_present;
u8 g;
if (qp->ibqp.srq || qp->ibqp.xrcd) {
struct mlx5_core_srq *msrq = NULL;
if (qp->ibqp.xrcd) {
- msrq = mlx5_core_get_srq(dev->mdev,
- be32_to_cpu(cqe->srqn));
- srq = to_mibsrq(msrq);
+ msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
+ if (msrq)
+ srq = to_mibsrq(msrq);
} else {
srq = to_msrq(qp->ibqp.srq);
}
@@ -191,8 +192,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wqe_ctr = be16_to_cpu(cqe->wqe_counter);
wc->wr_id = srq->wrid[wqe_ctr];
mlx5_ib_free_srq_wqe(srq, wqe_ctr);
- if (msrq && atomic_dec_and_test(&msrq->refcount))
- complete(&msrq->free);
+ if (msrq)
+ mlx5_core_res_put(&msrq->common);
}
} else {
wq = &qp->rq;
@@ -201,60 +202,88 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
}
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
- switch (cqe->op_own >> 4) {
+ switch (get_cqe_opcode(cqe)) {
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
+ wc->wc_flags = IB_WC_IP_CSUM_OK;
+ if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))))
+ wc->wc_flags = 0;
break;
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
break;
}
- wc->slid = be16_to_cpu(cqe->slid);
- wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
- if (unlikely(is_qp1(qp->ibqp.qp_type))) {
- u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+ if (is_qp1(qp->type)) {
+ u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
&wc->pkey_index);
} else {
wc->pkey_index = 0;
}
+
+ if (ll != IB_LINK_LAYER_ETHERNET) {
+ wc->slid = be16_to_cpu(cqe->slid);
+ wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
+ return;
+ }
+
+ wc->slid = 0;
+ vlan_present = cqe->l4_l3_hdr_type & 0x1;
+ roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
+ if (vlan_present) {
+ wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
+ wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ } else {
+ wc->sl = 0;
+ }
+
+ switch (roce_packet_type) {
+ case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
+ wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
+ break;
+ case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ break;
+ case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ break;
+ }
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
}
-static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
+static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe,
+ struct ib_wc *wc, const char *level)
{
- __be32 *p = (__be32 *)cqe;
- int i;
-
- mlx5_ib_warn(dev, "dump error cqe\n");
- for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
- pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
- be32_to_cpu(p[1]), be32_to_cpu(p[2]),
- be32_to_cpu(p[3]));
+ mlx5_ib_log(level, dev, "WC error: %d, Message: %s\n", wc->status,
+ ib_wc_status_msg(wc->status));
+ print_hex_dump(level, "cqe_dump: ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), false);
}
static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
struct mlx5_err_cqe *cqe,
struct ib_wc *wc)
{
- int dump = 1;
+ const char *dump = KERN_WARNING;
switch (cqe->syndrome) {
case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
@@ -264,10 +293,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
wc->status = IB_WC_LOC_QP_OP_ERR;
break;
case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
+ dump = KERN_DEBUG;
wc->status = IB_WC_LOC_PROT_ERR;
break;
case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
- dump = 0;
+ dump = NULL;
wc->status = IB_WC_WR_FLUSH_ERR;
break;
case MLX5_CQE_SYNDROME_MW_BIND_ERR:
@@ -283,18 +313,20 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
wc->status = IB_WC_REM_INV_REQ_ERR;
break;
case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
+ dump = KERN_DEBUG;
wc->status = IB_WC_REM_ACCESS_ERR;
break;
case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
+ dump = KERN_DEBUG;
wc->status = IB_WC_REM_OP_ERR;
break;
case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
+ dump = NULL;
wc->status = IB_WC_RETRY_EXC_ERR;
- dump = 0;
break;
case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
+ dump = NULL;
wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- dump = 0;
break;
case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
wc->status = IB_WC_REM_ABORT_ERR;
@@ -306,51 +338,7 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
wc->vendor_err = cqe->vendor_err_synd;
if (dump)
- dump_cqe(dev, cqe);
-}
-
-static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
-{
- /* TBD: waiting decision
- */
- return 0;
-}
-
-static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
-{
- struct mlx5_wqe_data_seg *dpseg;
- void *addr;
-
- dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_atomic_seg);
- addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
- return addr;
-}
-
-static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
- uint16_t idx)
-{
- void *addr;
- int byte_count;
- int i;
-
- if (!is_atomic_response(qp, idx))
- return;
-
- byte_count = be32_to_cpu(cqe64->byte_cnt);
- addr = mlx5_get_atomic_laddr(qp, idx);
-
- if (byte_count == 4) {
- *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
- } else {
- for (i = 0; i < byte_count; i += 8) {
- *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
- addr += 8;
- }
- }
-
- return;
+ dump_cqe(dev, cqe, wc, dump);
}
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
@@ -360,7 +348,6 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
do {
idx = tail & (qp->sq.wqe_cnt - 1);
- handle_atomic(qp, cqe64, idx);
if (idx == head)
break;
@@ -372,7 +359,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
- mlx5_buf_free(dev->mdev, &buf->buf);
+ mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -407,6 +394,59 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
item->key = be32_to_cpu(cqe->mkey);
}
+static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
+ int *npolled, bool is_send)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned int cur;
+ int np;
+ int i;
+
+ wq = (is_send) ? &qp->sq : &qp->rq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ unsigned int idx;
+
+ idx = (is_send) ? wq->last_poll : wq->tail;
+ idx &= (wq->wqe_cnt - 1);
+ wc->wr_id = wq->wrid[idx];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ if (is_send)
+ wq->last_poll = wq->w_list[idx].next;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ }
+ *npolled = np;
+}
+
+static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_qp *qp;
+
+ *npolled = 0;
+ /* Find uncompleted WQEs belonging to that cq and return mmics ones */
+ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
+ sw_comp(qp, num_entries, wc + *npolled, npolled, true);
+ if (*npolled >= num_entries)
+ return;
+ }
+
+ list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
+ sw_comp(qp, num_entries, wc + *npolled, npolled, false);
+ if (*npolled >= num_entries)
+ return;
+ }
+}
+
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -416,9 +456,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
- struct mlx5_sig_err_cqe *sig_err_cqe;
- struct mlx5_core_mr *mmr;
- struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
@@ -439,7 +476,7 @@ repoll:
*/
rmb();
- opcode = cqe64->op_own >> 4;
+ opcode = get_cqe_opcode(cqe64);
if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
if (likely(cq->resize_buf)) {
free_cq_buf(dev, &cq->buf);
@@ -453,18 +490,12 @@ repoll:
}
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
- if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
+ if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) {
/* We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed
* from the table.
*/
- mqp = __mlx5_qp_lookup(dev->mdev, qpn);
- if (unlikely(!mqp)) {
- mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
- cq->mcq.cqn, qpn);
- return -EINVAL;
- }
-
+ mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
*cur_qp = to_mibqp(mqp);
}
@@ -498,6 +529,10 @@ repoll:
"Requestor" : "Responder", cq->mcq.cqn);
mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
err_cqe->syndrome, err_cqe->vendor_err_synd);
+ if (wc->status != IB_WC_WR_FLUSH_ERR &&
+ (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
+ dev->umrc.state = MLX5_UMR_STATE_RECOVER;
+
if (opcode == MLX5_CQE_REQ_ERR) {
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
@@ -519,177 +554,331 @@ repoll:
}
}
break;
- case MLX5_CQE_SIG_ERR:
- sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
-
- read_lock(&dev->mdev->priv.mr_table.lock);
- mmr = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- if (unlikely(!mmr)) {
- read_unlock(&dev->mdev->priv.mr_table.lock);
- mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
- cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
- return -EINVAL;
- }
-
- mr = to_mibmr(mmr);
- get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
- mr->sig->sig_err_exists = true;
- mr->sig->sigerr_count++;
+ case MLX5_CQE_SIG_ERR: {
+ struct mlx5_sig_err_cqe *sig_err_cqe =
+ (struct mlx5_sig_err_cqe *)cqe64;
+ struct mlx5_core_sig_ctx *sig;
+
+ xa_lock(&dev->sig_mrs);
+ sig = xa_load(&dev->sig_mrs,
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ get_sig_err_item(sig_err_cqe, &sig->err_item);
+ sig->sig_err_exists = true;
+ sig->sigerr_count++;
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
- cq->mcq.cqn, mr->sig->err_item.key,
- mr->sig->err_item.err_type,
- mr->sig->err_item.sig_err_offset,
- mr->sig->err_item.expected,
- mr->sig->err_item.actual);
+ cq->mcq.cqn, sig->err_item.key,
+ sig->err_item.err_type,
+ sig->err_item.sig_err_offset,
+ sig->err_item.expected,
+ sig->err_item.actual);
- read_unlock(&dev->mdev->priv.mr_table.lock);
+ xa_unlock(&dev->sig_mrs);
goto repoll;
}
+ }
return 0;
}
+static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, bool is_fatal_err)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_ib_wc *soft_wc, *next;
+ int npolled = 0;
+
+ list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
+ if (npolled >= num_entries)
+ break;
+
+ mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
+ cq->mcq.cqn);
+
+ if (unlikely(is_fatal_err)) {
+ soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
+ soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ }
+ wc[npolled++] = soft_wc->wc;
+ list_del(&soft_wc->list);
+ kfree(soft_wc);
+ }
+
+ return npolled;
+}
+
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL;
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
+ int soft_polled = 0;
int npolled;
- int err = 0;
spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ /* make sure no soft wqe's are waiting */
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc, true);
+
+ mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
+ wc + soft_polled, &npolled);
+ goto out;
+ }
- for (npolled = 0; npolled < num_entries; npolled++) {
- err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
- if (err)
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc, false);
+
+ for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
+ if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
break;
}
if (npolled)
mlx5_cq_set_ci(&cq->mcq);
-
+out:
spin_unlock_irqrestore(&cq->lock, flags);
- if (err == 0 || err == -EAGAIN)
- return npolled;
- else
- return err;
+ return soft_polled + npolled;
}
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
- void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ void __iomem *uar_page = mdev->priv.bfreg.up->map;
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, irq_flags);
+ if (cq->notify_flags != IB_CQ_NEXT_COMP)
+ cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
- mlx5_cq_arm(&to_mcq(ibcq)->mcq,
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
+ ret = 1;
+ spin_unlock_irqrestore(&cq->lock, irq_flags);
+
+ mlx5_cq_arm(&cq->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
- uar_page,
- MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
- to_mcq(ibcq)->mcq.cons_index);
+ uar_page, to_mcq(ibcq)->mcq.cons_index);
- return 0;
+ return ret;
}
-static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
- int nent, int cqe_size)
+static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_cq_buf *buf,
+ int nent,
+ int cqe_size)
{
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
+ u8 log_wq_sz = ilog2(cqe_size);
int err;
- err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
+ err = mlx5_frag_buf_alloc_node(dev->mdev,
+ nent * cqe_size,
+ frag_buf,
+ dev->mdev->priv.numa_node);
if (err)
return err;
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+
buf->cqe_size = cqe_size;
buf->nent = nent;
return 0;
}
+enum {
+ MLX5_CQE_RES_FORMAT_HASH = 0,
+ MLX5_CQE_RES_FORMAT_CSUM = 1,
+ MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
+};
+
+static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
+{
+ switch (format) {
+ case MLX5_IB_CQE_RES_FORMAT_HASH:
+ return MLX5_CQE_RES_FORMAT_HASH;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM:
+ return MLX5_CQE_RES_FORMAT_CSUM;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
- struct ib_ucontext *context, struct mlx5_ib_cq *cq,
- int entries, struct mlx5_create_cq_mbox_in **cqb,
- int *cqe_size, int *index, int *inlen)
+ struct mlx5_ib_cq *cq, int entries, u32 **cqb,
+ int *cqe_size, int *index, int *inlen,
+ struct uverbs_attr_bundle *attrs)
{
- struct mlx5_ib_create_cq ucmd;
+ struct mlx5_ib_create_cq ucmd = {};
+ unsigned long page_size;
+ unsigned int page_offset_quantized;
size_t ucmdlen;
- int page_shift;
- int npages;
+ __be64 *pas;
int ncont;
+ void *cqc;
int err;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
- ucmdlen =
- (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
- sizeof(ucmd)) ? (sizeof(ucmd) -
- sizeof(ucmd.reserved)) : sizeof(ucmd);
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
+ if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
+ return -EINVAL;
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
- if (ucmdlen == sizeof(ucmd) &&
- ucmd.reserved != 0)
+ if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX |
+ MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)))
return -EINVAL;
- if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
+ if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
+ ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
*cqe_size = ucmd.cqe_size;
- cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
- entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ cq->buf.umem =
+ ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
}
- err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
- &cq->db);
+ page_size = mlx5_umem_find_best_cq_quantized_pgoff(
+ cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
+
+ err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
if (err)
goto err_umem;
- mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
- &ncont, NULL);
- mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
- ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
+ ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
+ mlx5_ib_dbg(
+ dev,
+ "addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
+ ucmd.buf_addr, entries * ucmd.cqe_size,
+ ib_umem_num_pages(cq->buf.umem), page_size, ncont);
- *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
- *cqb = mlx5_vzalloc(*inlen);
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
}
- mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
- (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- *index = to_mucontext(context)->uuari.uars[0].index;
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX)) {
+ err = uverbs_copy_from(index, attrs, MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX);
+ if (err)
+ goto err_cqb;
+ } else if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
+ *index = ucmd.uar_page_index;
+ } else if (context->bfregi.lib_uar_dyn) {
+ err = -EINVAL;
+ goto err_cqb;
+ } else {
+ *index = context->bfregi.sys_pages[0];
+ }
+
+ if (ucmd.cqe_comp_en == 1) {
+ int mini_cqe_format;
+
+ if (!((*cqe_size == 128 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
+ (*cqe_size == 64 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ mini_cqe_format =
+ mini_cqe_res_format_to_hw(dev,
+ ucmd.cqe_comp_res_format);
+ if (mini_cqe_format < 0) {
+ err = mini_cqe_format;
+ mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
+ ucmd.cqe_comp_res_format, err);
+ goto err_cqb;
+ }
+
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
+ }
+
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
+ if (*cqe_size != 128 ||
+ !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev,
+ "CQE padding is not supported for CQE size of %dB!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
+ }
+
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
+ MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
return 0;
+err_cqb:
+ kvfree(*cqb);
+
err_db:
- mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
+ mlx5_ib_db_unmap_user(context, &cq->db);
err_umem:
ib_umem_release(cq->buf.umem);
return err;
}
-static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
+static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
{
- mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ mlx5_ib_db_unmap_user(context, &cq->db);
ib_umem_release(cq->buf.umem);
}
-static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < buf->nent; i++) {
- cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
@@ -697,9 +886,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size,
- struct mlx5_create_cq_mbox_in **cqb,
- int *index, int *inlen)
+ u32 **cqb, int *index, int *inlen)
{
+ __be64 *pas;
+ void *cqc;
int err;
err = mlx5_db_alloc(dev->mdev, &cq->db);
@@ -710,22 +900,30 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
cq->mcq.arm_db = cq->db.db + 1;
cq->mcq.cqe_sz = cqe_size;
- err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
+ err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
if (err)
goto err_db;
- init_cq_buf(cq, &cq->buf);
+ init_cq_frag_buf(&cq->buf);
- *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
- *cqb = mlx5_vzalloc(*inlen);
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+ cq->buf.frag_buf.npages;
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
}
- mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
- (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- *index = dev->mdev->priv.uuari.uars[0].index;
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ cq->buf.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+
+ *index = dev->mdev->priv.bfreg.up->index;
return 0;
@@ -743,77 +941,99 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
mlx5_db_free(dev->mdev, &cq->db);
}
-struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static void notify_soft_wc_handler(struct work_struct *work)
{
+ struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
+ notify_work);
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
int entries = attr->cqe;
int vector = attr->comp_vector;
- struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_cq *cq;
- int uninitialized_var(index);
- int uninitialized_var(inlen);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ int index;
+ int inlen;
+ u32 *cqb = NULL;
+ void *cqc;
int cqe_size;
- int irqn;
int eqn;
int err;
- if (attr->flags)
- return ERR_PTR(-EINVAL);
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
+ return -EINVAL;
- if (entries < 0)
- return ERR_PTR(-EINVAL);
+ if (check_cq_create_flags(attr->flags))
+ return -EOPNOTSUPP;
entries = roundup_pow_of_two(entries + 1);
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
- return ERR_PTR(-EINVAL);
-
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
cq->ibcq.cqe = entries - 1;
mutex_init(&cq->resize_mutex);
spin_lock_init(&cq->lock);
cq->resize_buf = NULL;
cq->resize_umem = NULL;
+ cq->create_flags = attr->flags;
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
- if (context) {
- err = create_cq_user(dev, udata, context, cq, entries,
- &cqb, &cqe_size, &index, &inlen);
+ if (udata) {
+ err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
+ &index, &inlen, attrs);
if (err)
- goto err_create;
+ return err;
} else {
- /* for now choose 64 bytes till we have a proper interface */
- cqe_size = 64;
+ cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
- goto err_create;
+ return err;
+
+ INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
- cq->cqe_size = cqe_size;
- cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
- cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
- err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
+ err = mlx5_comp_eqn_get(dev->mdev, vector, &eqn);
if (err)
goto err_cqb;
- cqb->ctx.c_eqn = cpu_to_be16(eqn);
- cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
+ cq->cqe_size = cqe_size;
- err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
+ cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+ MLX5_SET(cqc, cqc, uar_page, index);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
+ MLX5_SET(cqc, cqc, oi, 1);
+
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
if (err)
goto err_cqb;
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
- cq->mcq.irqn = irqn;
- cq->mcq.comp = mlx5_ib_cq_comp;
+ if (udata)
+ cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
+ else
+ cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
- if (context)
+ INIT_LIST_HEAD(&cq->wc_list);
+
+ if (udata)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
err = -EFAULT;
goto err_cmd;
@@ -821,42 +1041,45 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
kvfree(cqb);
- return &cq->ibcq;
+ return 0;
err_cmd:
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
err_cqb:
kvfree(cqb);
- if (context)
- destroy_cq_user(cq, context);
+ if (udata)
+ destroy_cq_user(cq, udata);
else
destroy_cq_kernel(dev, cq);
-
-err_create:
- kfree(cq);
-
- return ERR_PTR(err);
+ return err;
}
-
-int mlx5_ib_destroy_cq(struct ib_cq *cq)
+int mlx5_ib_pre_destroy_cq(struct ib_cq *cq)
{
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
- struct ib_ucontext *context = NULL;
- if (cq->uobject)
- context = cq->uobject->context;
+ return mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+}
- mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
- if (context)
- destroy_cq_user(mcq, context);
- else
- destroy_cq_kernel(dev, mcq);
+void mlx5_ib_post_destroy_cq(struct ib_cq *cq)
+{
+ destroy_cq_kernel(to_mdev(cq->device), to_mcq(cq));
+}
+
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+{
+ int ret;
- kfree(mcq);
+ ret = mlx5_ib_pre_destroy_cq(cq);
+ if (ret)
+ return ret;
+ if (udata)
+ destroy_cq_user(to_mcq(cq), udata);
+ else
+ mlx5_ib_post_destroy_cq(cq);
return 0;
}
@@ -928,27 +1151,18 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
- struct mlx5_modify_cq_mbox_in *in;
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
int err;
- u32 fsel;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
- return -ENOSYS;
+ return -EOPNOTSUPP;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- in->cqn = cpu_to_be32(mcq->mcq.cqn);
- fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
- in->ctx.cq_period = cpu_to_be16(cq_period);
- in->ctx.cq_max_count = cpu_to_be16(cq_count);
- in->field_select = cpu_to_be32(fsel);
- err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
- kfree(in);
+ if (cq_period > MLX5_MAX_CQ_PERIOD)
+ return -EINVAL;
+ err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
+ cq_period, cq_count);
if (err)
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
@@ -956,14 +1170,12 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
}
static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
- int entries, struct ib_udata *udata, int *npas,
- int *page_shift, int *cqe_size)
+ int entries, struct ib_udata *udata,
+ int *cqe_size)
{
struct mlx5_ib_resize_cq ucmd;
struct ib_umem *umem;
int err;
- int npages;
- struct ib_ucontext *context = cq->buf.umem->context;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (err)
@@ -972,27 +1184,24 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ /* check multiplication overflow */
+ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+ return -EINVAL;
+
+ umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ (size_t)ucmd.cqe_size * entries,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
}
- mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
- npas, NULL);
-
cq->resize_umem = umem;
*cqe_size = ucmd.cqe_size;
return 0;
}
-static void un_resize_user(struct mlx5_ib_cq *cq)
-{
- ib_umem_release(cq->resize_umem);
-}
-
static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size)
{
@@ -1002,11 +1211,11 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (!cq->resize_buf)
return -ENOMEM;
- err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
+ err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
if (err)
goto ex;
- init_cq_buf(cq, cq->resize_buf);
+ init_cq_frag_buf(cq->resize_buf);
return 0;
@@ -1015,12 +1224,6 @@ ex:
return err;
}
-static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
-{
- free_cq_buf(dev, cq->resize_buf);
- cq->resize_buf = NULL;
-}
-
static int copy_resize_cqes(struct mlx5_ib_cq *cq)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
@@ -1050,10 +1253,9 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
return -EINVAL;
}
- while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
- dcqe = get_cqe_from_buf(cq->resize_buf,
- (i + 1) & (cq->resize_buf->nent),
- dsize);
+ while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
+ dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
+ (i + 1) & cq->resize_buf->nent);
dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
memcpy(dcqe, scqe, dsize);
@@ -1081,12 +1283,15 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
- struct mlx5_modify_cq_mbox_in *in;
+ void *cqc;
+ u32 *in;
int err;
int npas;
- int page_shift;
+ __be64 *pas;
+ unsigned int page_offset_quantized = 0;
+ unsigned int page_shift;
int inlen;
- int uninitialized_var(cqe_size);
+ int cqe_size;
unsigned long flags;
if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
@@ -1094,11 +1299,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -ENOSYS;
}
- if (entries < 1)
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
+ }
entries = roundup_pow_of_two(entries + 1);
- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
@@ -1106,42 +1316,69 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
mutex_lock(&cq->resize_mutex);
if (udata) {
- err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
- &cqe_size);
+ unsigned long page_size;
+
+ err = resize_user(dev, cq, entries, udata, &cqe_size);
+ if (err)
+ goto ex;
+
+ page_size = mlx5_umem_find_best_cq_quantized_pgoff(
+ cq->resize_umem, cqc, log_page_size,
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto ex_resize;
+ }
+ npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
+ page_shift = order_base_2(page_size);
} else {
+ struct mlx5_frag_buf *frag_buf;
+
cqe_size = 64;
err = resize_kernel(dev, cq, entries, cqe_size);
- if (!err) {
- npas = cq->resize_buf->buf.npages;
- page_shift = cq->resize_buf->buf.page_shift;
- }
+ if (err)
+ goto ex;
+ frag_buf = &cq->resize_buf->frag_buf;
+ npas = frag_buf->npages;
+ page_shift = frag_buf->page_shift;
}
- if (err)
- goto ex;
+ inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
+ MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
- inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto ex_resize;
}
+ pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata)
- mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
- in->pas, 0);
+ mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
+ 0);
else
- mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
-
- in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
- MLX5_MODIFY_CQ_MASK_PG_OFFSET |
- MLX5_MODIFY_CQ_MASK_PG_SIZE);
- in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
- in->ctx.page_offset = 0;
- in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
- in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
- in->cqn = cpu_to_be32(cq->mcq.cqn);
+ mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
+
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.resize_field_select.resize_field_select,
+ MLX5_MODIFY_CQ_MASK_LOG_SIZE |
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
+ MLX5_MODIFY_CQ_MASK_PG_SIZE);
+
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+
+ MLX5_SET(cqc, cqc, log_page_size,
+ page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+
+ MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
+ MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
@@ -1181,16 +1418,17 @@ ex_alloc:
kvfree(in);
ex_resize:
- if (udata)
- un_resize_user(cq);
- else
- un_resize_kernel(dev, cq);
+ ib_umem_release(cq->resize_umem);
+ if (!udata) {
+ free_cq_buf(dev, cq->resize_buf);
+ cq->resize_buf = NULL;
+ }
ex:
mutex_unlock(&cq->resize_mutex);
return err;
}
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
{
struct mlx5_ib_cq *cq;
@@ -1200,3 +1438,41 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
cq = to_mcq(ibcq);
return cq->cqe_size;
}
+
+/* Called from atomic context */
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
+{
+ struct mlx5_ib_wc *soft_wc;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ unsigned long flags;
+
+ soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
+ if (!soft_wc)
+ return -ENOMEM;
+
+ soft_wc->wc = *wc;
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&soft_wc->list, &cq->wc_list);
+ if (cq->notify_flags == IB_CQ_NEXT_COMP ||
+ wc->status != IB_WC_SUCCESS) {
+ cq->notify_flags = 0;
+ schedule_work(&cq->notify_work);
+ }
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return 0;
+}
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_cq_create,
+ UVERBS_OBJECT_CQ,
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
+
+const struct uapi_definition mlx5_ib_create_cq_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_CQ, &mlx5_ib_cq_create),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/data_direct.c b/drivers/infiniband/hw/mlx5/data_direct.c
new file mode 100644
index 000000000000..b81ac5709b56
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/data_direct.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include "mlx5_ib.h"
+#include "data_direct.h"
+
+static LIST_HEAD(mlx5_data_direct_dev_list);
+static LIST_HEAD(mlx5_data_direct_reg_list);
+
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_data_direct_mutex);
+
+struct mlx5_data_direct_registration {
+ struct mlx5_ib_dev *ibdev;
+ char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1];
+ struct list_head list;
+};
+
+static const struct pci_device_id mlx5_data_direct_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x2100) }, /* ConnectX-8 Data Direct */
+ { 0, }
+};
+
+static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
+ int ret;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_err(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
+ return PTR_ERR(vpd_data);
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "VU", &kw_len);
+ if (start < 0) {
+ ret = start;
+ pci_err(pdev, "VU keyword not found, err=%d\n", ret);
+ goto end;
+ }
+
+ dev->vuid = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
+ ret = dev->vuid ? 0 : -ENOMEM;
+
+end:
+ kfree(vpd_data);
+ return ret;
+}
+
+static void mlx5_data_direct_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+}
+
+static int mlx5_data_direct_set_dma_caps(struct pci_dev *pdev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_warn(&pdev->dev,
+ "Warning: couldn't set 64-bit PCI DMA mask, err=%d\n", err);
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, err=%d\n", err);
+ return err;
+ }
+ }
+
+ dma_set_max_seg_size(&pdev->dev, SZ_2G);
+ return 0;
+}
+
+int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid)
+{
+ struct mlx5_data_direct_registration *reg;
+ struct mlx5_data_direct_dev *dev;
+
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->ibdev = ibdev;
+ strcpy(reg->vuid, vuid);
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(dev, &mlx5_data_direct_dev_list, list) {
+ if (strcmp(dev->vuid, vuid) == 0) {
+ mlx5_ib_data_direct_bind(ibdev, dev);
+ break;
+ }
+ }
+
+ /* Add the registration to its global list, to be used upon bind/unbind
+ * of its affiliated data direct device
+ */
+ list_add_tail(&reg->list, &mlx5_data_direct_reg_list);
+ mutex_unlock(&mlx5_data_direct_mutex);
+ return 0;
+}
+
+void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (reg->ibdev == ibdev) {
+ list_del(&reg->list);
+ kfree(reg);
+ goto end;
+ }
+ }
+
+ WARN_ON(true);
+end:
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static void mlx5_data_direct_dev_reg(struct mlx5_data_direct_dev *dev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (strcmp(dev->vuid, reg->vuid) == 0)
+ mlx5_ib_data_direct_bind(reg->ibdev, dev);
+ }
+
+ /* Add the data direct device to the global list, further IB devices may
+ * use it later as well
+ */
+ list_add_tail(&dev->list, &mlx5_data_direct_dev_list);
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static void mlx5_data_direct_dev_unreg(struct mlx5_data_direct_dev *dev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ /* Prevent any further affiliations */
+ list_del(&dev->list);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (strcmp(dev->vuid, reg->vuid) == 0)
+ mlx5_ib_data_direct_unbind(reg->ibdev);
+ }
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static int mlx5_data_direct_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlx5_data_direct_dev *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->device = &pdev->dev;
+ dev->pdev = pdev;
+
+ pci_set_drvdata(dev->pdev, dev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev->device, "Cannot enable PCI device, err=%d\n", err);
+ goto err;
+ }
+
+ pci_set_master(pdev);
+ err = mlx5_data_direct_set_dma_caps(pdev);
+ if (err)
+ goto err_disable;
+
+ if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
+ dev_dbg(dev->device, "Enabling pci atomics failed\n");
+
+ err = mlx5_data_direct_vpd_get_vuid(dev);
+ if (err)
+ goto err_disable;
+
+ mlx5_data_direct_dev_reg(dev);
+ return 0;
+
+err_disable:
+ pci_disable_device(pdev);
+err:
+ kfree(dev);
+ return err;
+}
+
+static void mlx5_data_direct_remove(struct pci_dev *pdev)
+{
+ struct mlx5_data_direct_dev *dev = pci_get_drvdata(pdev);
+
+ mlx5_data_direct_dev_unreg(dev);
+ pci_disable_device(pdev);
+ kfree(dev->vuid);
+ kfree(dev);
+}
+
+static struct pci_driver mlx5_data_direct_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mlx5_data_direct_pci_table,
+ .probe = mlx5_data_direct_probe,
+ .remove = mlx5_data_direct_remove,
+ .shutdown = mlx5_data_direct_shutdown,
+};
+
+int mlx5_data_direct_driver_register(void)
+{
+ return pci_register_driver(&mlx5_data_direct_driver);
+}
+
+void mlx5_data_direct_driver_unregister(void)
+{
+ pci_unregister_driver(&mlx5_data_direct_driver);
+}
diff --git a/drivers/infiniband/hw/mlx5/data_direct.h b/drivers/infiniband/hw/mlx5/data_direct.h
new file mode 100644
index 000000000000..2fd2bdbe8f69
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/data_direct.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _MLX5_IB_DATA_DIRECT_H
+#define _MLX5_IB_DATA_DIRECT_H
+
+struct mlx5_ib_dev;
+
+struct mlx5_data_direct_dev {
+ struct device *device;
+ struct pci_dev *pdev;
+ char *vuid;
+ struct list_head list;
+};
+
+int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid);
+void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev);
+int mlx5_data_direct_driver_register(void);
+void mlx5_data_direct_driver_unregister(void);
+
+#endif
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
new file mode 100644
index 000000000000..8b506417ad2f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -0,0 +1,3228 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/uverbs_std_types.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include <rdma/ib_ucaps.h>
+#include "mlx5_ib.h"
+#include "devx.h"
+#include "qp.h"
+#include <linux/xarray.h>
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static void dispatch_event_fd(struct list_head *fd_list, const void *data);
+
+enum devx_obj_flags {
+ DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
+ DEVX_OBJ_FLAGS_DCT = 1 << 1,
+ DEVX_OBJ_FLAGS_CQ = 1 << 2,
+ DEVX_OBJ_FLAGS_HW_FREED = 1 << 3,
+};
+
+#define MAX_ASYNC_CMDS 8
+
+struct mlx5_async_cmd {
+ struct ib_uobject *uobject;
+ void *in;
+ int in_size;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ int err;
+ struct mlx5_async_work cb_work;
+ struct completion comp;
+};
+
+struct devx_async_data {
+ struct mlx5_ib_dev *mdev;
+ struct list_head list;
+ struct devx_async_cmd_event_file *ev_file;
+ struct mlx5_async_work cb_work;
+ u16 cmd_out_len;
+ /* must be last field in this structure */
+ struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
+};
+
+struct devx_async_event_data {
+ struct list_head list; /* headed in ev_file->event_list */
+ struct mlx5_ib_uapi_devx_async_event_hdr hdr;
+};
+
+/* first level XA value data structure */
+struct devx_event {
+ struct xarray object_ids; /* second XA level, Key = object id */
+ struct list_head unaffiliated_list;
+};
+
+/* second level XA value data structure */
+struct devx_obj_event {
+ struct rcu_head rcu;
+ struct list_head obj_sub_list;
+};
+
+struct devx_event_subscription {
+ struct list_head file_list; /* headed in ev_file->
+ * subscribed_events_list
+ */
+ struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
+ * devx_obj_event->obj_sub_list
+ */
+ struct list_head obj_list; /* headed in devx_object */
+ struct list_head event_list; /* headed in ev_file->event_list or in
+ * temp list via subscription
+ */
+
+ u8 is_cleaned:1;
+ u32 xa_key_level1;
+ u32 xa_key_level2;
+ struct rcu_head rcu;
+ u64 cookie;
+ struct devx_async_event_file *ev_file;
+ struct eventfd_ctx *eventfd;
+};
+
+struct devx_async_event_file {
+ struct ib_uobject uobj;
+ /* Head of events that are subscribed to this FD */
+ struct list_head subscribed_events_list;
+ spinlock_t lock;
+ wait_queue_head_t poll_wait;
+ struct list_head event_list;
+ struct mlx5_ib_dev *dev;
+ u8 omit_data:1;
+ u8 is_overflow_err:1;
+ u8 is_destroyed:1;
+};
+
+struct devx_umem {
+ struct mlx5_core_dev *mdev;
+ struct ib_umem *umem;
+ u32 dinlen;
+ u32 dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
+};
+
+struct devx_umem_reg_cmd {
+ void *in;
+ u32 inlen;
+ u32 out[MLX5_ST_SZ_DW(create_umem_out)];
+};
+
+static struct mlx5_ib_ucontext *
+devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
+{
+ return to_mucontext(ib_uverbs_get_ucontext(attrs));
+}
+
+static int set_uctx_ucaps(struct mlx5_ib_dev *dev, u64 req_ucaps, u32 *cap)
+{
+ if (UCAP_ENABLED(req_ucaps, RDMA_UCAP_MLX5_CTRL_LOCAL)) {
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
+ *cap |= MLX5_UCTX_CAP_RDMA_CTRL;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ if (UCAP_ENABLED(req_ucaps, RDMA_UCAP_MLX5_CTRL_OTHER_VHCA)) {
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA)
+ *cap |= MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user, u64 req_ucaps)
+{
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
+ void *uctx;
+ int err;
+ u16 uid;
+ u32 cap = 0;
+
+ /* 0 means not supported */
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
+ return -EINVAL;
+
+ uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
+ if (is_user &&
+ (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX) &&
+ rdma_dev_has_raw_cap(&dev->ib_dev))
+ cap |= MLX5_UCTX_CAP_RAW_TX;
+ if (is_user &&
+ (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_INTERNAL_DEV_RES) &&
+ capable(CAP_SYS_RAWIO))
+ cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
+
+ if (req_ucaps) {
+ err = set_uctx_ucaps(dev, req_ucaps, &cap);
+ if (err)
+ return err;
+ }
+
+ MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
+ MLX5_SET(uctx, uctx, cap, cap);
+
+ err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ uid = MLX5_GET(create_uctx_out, out, uid);
+ return uid;
+}
+
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
+
+ MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
+ MLX5_SET(destroy_uctx_in, in, uid, uid);
+
+ mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static bool is_legacy_unaffiliated_event_num(u16 event_num)
+{
+ switch (event_num) {
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_legacy_obj_event_num(u16 event_num)
+{
+ switch (event_num) {
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_COMP:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u16 get_legacy_obj_type(u16 opcode)
+{
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
+ return MLX5_EVENT_QUEUE_TYPE_RQ;
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_EVENT_QUEUE_TYPE_QP;
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_EVENT_QUEUE_TYPE_SQ;
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_EVENT_QUEUE_TYPE_DCT;
+ default:
+ return 0;
+ }
+}
+
+static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
+{
+ u16 opcode;
+
+ opcode = (obj->obj_id >> 32) & 0xffff;
+
+ if (is_legacy_obj_event_num(event_num))
+ return get_legacy_obj_type(opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ return (obj->obj_id >> 48);
+ case MLX5_CMD_OP_CREATE_RQ:
+ return MLX5_OBJ_TYPE_RQ;
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_OBJ_TYPE_QP;
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_OBJ_TYPE_SQ;
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_OBJ_TYPE_DCT;
+ case MLX5_CMD_OP_CREATE_TIR:
+ return MLX5_OBJ_TYPE_TIR;
+ case MLX5_CMD_OP_CREATE_TIS:
+ return MLX5_OBJ_TYPE_TIS;
+ case MLX5_CMD_OP_CREATE_PSV:
+ return MLX5_OBJ_TYPE_PSV;
+ case MLX5_OBJ_TYPE_MKEY:
+ return MLX5_OBJ_TYPE_MKEY;
+ case MLX5_CMD_OP_CREATE_RMP:
+ return MLX5_OBJ_TYPE_RMP;
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return MLX5_OBJ_TYPE_XRC_SRQ;
+ case MLX5_CMD_OP_CREATE_XRQ:
+ return MLX5_OBJ_TYPE_XRQ;
+ case MLX5_CMD_OP_CREATE_RQT:
+ return MLX5_OBJ_TYPE_RQT;
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ return MLX5_OBJ_TYPE_FLOW_COUNTER;
+ case MLX5_CMD_OP_CREATE_CQ:
+ return MLX5_OBJ_TYPE_CQ;
+ default:
+ return 0;
+ }
+}
+
+static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
+{
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ return eqe->data.qp_srq.type;
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
+ return 0;
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+ return MLX5_EVENT_QUEUE_TYPE_DCT;
+ default:
+ return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
+ }
+}
+
+static u32 get_dec_obj_id(u64 obj_id)
+{
+ return (obj_id & 0xffffffff);
+}
+
+/*
+ * As the obj_id in the firmware is not globally unique the object type
+ * must be considered upon checking for a valid object id.
+ * For that the opcode of the creator command is encoded as part of the obj_id.
+ */
+static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
+{
+ return ((u64)opcode << 32) | obj_id;
+}
+
+static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
+{
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ case MLX5_CMD_OP_CREATE_UMEM:
+ return MLX5_GET(create_umem_out, out, umem_id);
+ case MLX5_CMD_OP_CREATE_MKEY:
+ return MLX5_GET(create_mkey_out, out, mkey_index);
+ case MLX5_CMD_OP_CREATE_CQ:
+ return MLX5_GET(create_cq_out, out, cqn);
+ case MLX5_CMD_OP_ALLOC_PD:
+ return MLX5_GET(alloc_pd_out, out, pd);
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ return MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+ case MLX5_CMD_OP_CREATE_RMP:
+ return MLX5_GET(create_rmp_out, out, rmpn);
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_GET(create_sq_out, out, sqn);
+ case MLX5_CMD_OP_CREATE_RQ:
+ return MLX5_GET(create_rq_out, out, rqn);
+ case MLX5_CMD_OP_CREATE_RQT:
+ return MLX5_GET(create_rqt_out, out, rqtn);
+ case MLX5_CMD_OP_CREATE_TIR:
+ return MLX5_GET(create_tir_out, out, tirn);
+ case MLX5_CMD_OP_CREATE_TIS:
+ return MLX5_GET(create_tis_out, out, tisn);
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ return MLX5_GET(create_flow_table_out, out, table_id);
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ return MLX5_GET(create_flow_group_out, out, group_id);
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ return MLX5_GET(set_fte_in, in, flow_index);
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ return MLX5_GET(alloc_packet_reformat_context_out, out,
+ packet_reformat_id);
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ return MLX5_GET(alloc_modify_header_context_out, out,
+ modify_header_id);
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ return MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ return MLX5_GET(set_l2_table_entry_in, in, table_index);
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_GET(create_qp_out, out, qpn);
+ case MLX5_CMD_OP_CREATE_SRQ:
+ return MLX5_GET(create_srq_out, out, srqn);
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_GET(create_dct_out, out, dctn);
+ case MLX5_CMD_OP_CREATE_XRQ:
+ return MLX5_GET(create_xrq_out, out, xrqn);
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ return MLX5_GET(attach_to_mcg_in, in, qpn);
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return MLX5_GET(alloc_xrcd_out, out, xrcd);
+ case MLX5_CMD_OP_CREATE_PSV:
+ return MLX5_GET(create_psv_out, out, psv0_index);
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ return 0;
+ }
+}
+
+static u64 devx_get_obj_id(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ u64 obj_id;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_type) << 16,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
+ break;
+ case MLX5_CMD_OP_QUERY_MKEY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
+ MLX5_GET(query_mkey_in, in,
+ mkey_index));
+ break;
+ case MLX5_CMD_OP_QUERY_CQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(query_cq_in, in, cqn));
+ break;
+ case MLX5_CMD_OP_MODIFY_CQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(modify_cq_in, in, cqn));
+ break;
+ case MLX5_CMD_OP_QUERY_SQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(query_sq_in, in, sqn));
+ break;
+ case MLX5_CMD_OP_MODIFY_SQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(modify_sq_in, in, sqn));
+ break;
+ case MLX5_CMD_OP_QUERY_RQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(query_rq_in, in, rqn));
+ break;
+ case MLX5_CMD_OP_MODIFY_RQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(modify_rq_in, in, rqn));
+ break;
+ case MLX5_CMD_OP_QUERY_RMP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(query_rmp_in, in, rmpn));
+ break;
+ case MLX5_CMD_OP_MODIFY_RMP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(modify_rmp_in, in, rmpn));
+ break;
+ case MLX5_CMD_OP_QUERY_RQT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(query_rqt_in, in, rqtn));
+ break;
+ case MLX5_CMD_OP_MODIFY_RQT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(modify_rqt_in, in, rqtn));
+ break;
+ case MLX5_CMD_OP_QUERY_TIR:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(query_tir_in, in, tirn));
+ break;
+ case MLX5_CMD_OP_MODIFY_TIR:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(modify_tir_in, in, tirn));
+ break;
+ case MLX5_CMD_OP_QUERY_TIS:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(query_tis_in, in, tisn));
+ break;
+ case MLX5_CMD_OP_MODIFY_TIS:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(modify_tis_in, in, tisn));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(query_flow_table_in, in,
+ table_id));
+ break;
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(modify_flow_table_in, in,
+ table_id));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
+ MLX5_GET(query_flow_group_in, in,
+ group_id));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(query_fte_in, in,
+ flow_index));
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(set_fte_in, in, flow_index));
+ break;
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
+ MLX5_GET(query_q_counter_in, in,
+ counter_set_id));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
+ MLX5_GET(query_flow_counter_in, in,
+ flow_counter_id));
+ break;
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
+ MLX5_GET(query_modify_header_context_in,
+ in, modify_header_id));
+ break;
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(query_scheduling_element_in,
+ in, scheduling_element_id));
+ break;
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(modify_scheduling_element_in,
+ in, scheduling_element_id));
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
+ MLX5_GET(add_vxlan_udp_dport_in, in,
+ vxlan_udp_port));
+ break;
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(query_l2_table_entry_in, in,
+ table_index));
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(set_l2_table_entry_in, in,
+ table_index));
+ break;
+ case MLX5_CMD_OP_QUERY_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(query_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rst2init_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2init_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2rtr_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rtr2rts_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rts2rts_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(sqerr2rts_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2err_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_2RST_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2rst_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_QUERY_DCT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(query_dct_in, in, dctn));
+ break;
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(query_xrq_in, in, xrqn));
+ break;
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(query_xrc_srq_in, in,
+ xrc_srqn));
+ break;
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
+ break;
+ case MLX5_CMD_OP_QUERY_SRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
+ MLX5_GET(query_srq_in, in, srqn));
+ break;
+ case MLX5_CMD_OP_ARM_RQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(arm_rq_in, in, srq_number));
+ break;
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(drain_dct_in, in, dctn));
+ break;
+ case MLX5_CMD_OP_ARM_XRQ:
+ case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_MODIFY_XRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(arm_xrq_in, in, xrqn));
+ break;
+ case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
+ obj_id = get_enc_obj_id
+ (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
+ MLX5_GET(query_packet_reformat_context_in,
+ in, packet_reformat_id));
+ break;
+ default:
+ obj_id = 0;
+ }
+
+ return obj_id;
+}
+
+static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
+ struct ib_uobject *uobj, const void *in)
+{
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ u64 obj_id = devx_get_obj_id(in);
+
+ if (!obj_id)
+ return false;
+
+ switch (uobj_get_object_id(uobj)) {
+ case UVERBS_OBJECT_CQ:
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ to_mcq(uobj->object)->mcq.cqn) ==
+ obj_id;
+
+ case UVERBS_OBJECT_SRQ:
+ {
+ struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
+ u16 opcode;
+
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
+ break;
+ case MLX5_RES_XRQ:
+ opcode = MLX5_CMD_OP_CREATE_XRQ;
+ break;
+ default:
+ if (!dev->mdev->issi)
+ opcode = MLX5_CMD_OP_CREATE_SRQ;
+ else
+ opcode = MLX5_CMD_OP_CREATE_RMP;
+ }
+
+ return get_enc_obj_id(opcode,
+ to_msrq(uobj->object)->msrq.srqn) ==
+ obj_id;
+ }
+
+ case UVERBS_OBJECT_QP:
+ {
+ struct mlx5_ib_qp *qp = to_mqp(uobj->object);
+
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp =
+ &qp->raw_packet_qp;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+
+ return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ rq->base.mqp.qpn) == obj_id ||
+ get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ sq->base.mqp.qpn) == obj_id ||
+ get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ rq->tirn) == obj_id ||
+ get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ sq->tisn) == obj_id);
+ }
+
+ if (qp->type == MLX5_IB_QPT_DCT)
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ qp->dct.mdct.mqp.qpn) == obj_id;
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ qp->ibqp.qp_num) == obj_id;
+ }
+
+ case UVERBS_OBJECT_WQ:
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ to_mrwq(uobj->object)->core_qp.qpn) ==
+ obj_id;
+
+ case UVERBS_OBJECT_RWQ_IND_TBL:
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ to_mrwq_ind_table(uobj->object)->rqtn) ==
+ obj_id;
+
+ case MLX5_IB_OBJECT_DEVX_OBJ:
+ {
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ struct devx_obj *devx_uobj = uobj->object;
+
+ if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
+ devx_uobj->flow_counter_bulk_size) {
+ u64 end;
+
+ end = devx_uobj->obj_id +
+ devx_uobj->flow_counter_bulk_size;
+ return devx_uobj->obj_id <= obj_id && end > obj_id;
+ }
+
+ return devx_uobj->obj_id == obj_id;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static void devx_set_umem_valid(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ {
+ void *cqc;
+
+ MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
+ break;
+ }
+ case MLX5_CMD_OP_CREATE_QP:
+ {
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
+ MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_RQ:
+ {
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_SQ:
+ {
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_MODIFY_CQ:
+ MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
+ break;
+
+ case MLX5_CMD_OP_CREATE_RMP:
+ {
+ void *rmpc, *wq;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRQ:
+ {
+ void *xrqc, *wq;
+
+ xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
+ wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ {
+ void *xrc_srqc;
+
+ MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
+ xrc_srq_context_entry);
+ MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
+ break;
+ }
+
+ default:
+ return;
+ }
+}
+
+static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
+{
+ *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (*opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_CREATE_XRQ:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+ if (op_mod == 0)
+ return true;
+ return false;
+ }
+ case MLX5_CMD_OP_CREATE_PSV:
+ {
+ u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
+
+ if (num_psv == 1)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_modify_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_ARM_XRQ:
+ case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_MODIFY_XRQ:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+
+ if (op_mod == 1)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_query_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_whitelist_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
+{
+ if (devx_is_whitelist_cmd(cmd_in)) {
+ struct mlx5_ib_dev *dev;
+
+ if (c->devx_uid)
+ return c->devx_uid;
+
+ dev = to_mdev(c->ibucontext.device);
+ if (dev->devx_whitelist_uid)
+ return dev->devx_whitelist_uid;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ return c->devx_uid;
+}
+
+static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
+ if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
+ MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
+ (opcode >= MLX5_CMD_OP_GENERAL_START &&
+ opcode < MLX5_CMD_OP_GENERAL_END))
+ return true;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int user_vector;
+ int dev_eqn;
+ int err;
+
+ if (uverbs_copy_from(&user_vector, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
+ return -EFAULT;
+
+ c = devx_ufile2uctx(attrs);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn);
+ if (err < 0)
+ return err;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ &dev_eqn, sizeof(dev_eqn)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *Security note:
+ * The hardware protection mechanism works like this: Each device object that
+ * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
+ * the device specification manual) upon its creation. Then upon doorbell,
+ * hardware fetches the object context for which the doorbell was rang, and
+ * validates that the UAR through which the DB was rang matches the UAR ID
+ * of the object.
+ * If no match the doorbell is silently ignored by the hardware. Of course,
+ * the user cannot ring a doorbell on a UAR that was not mapped to it.
+ * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
+ * mailboxes (except tagging them with UID), we expose to the user its UAR
+ * ID, so it can embed it in these objects in the expected specification
+ * format. So the only thing the user can do is hurt itself by creating a
+ * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
+ * may ring a doorbell on its objects.
+ * The consequence of that will be that another user can schedule a QP/SQ
+ * of the buggy user for execution (just insert it to the hardware schedule
+ * queue or arm its CQ for event generation), no further harm is expected.
+ */
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 user_idx;
+ s32 dev_idx;
+
+ c = devx_ufile2uctx(attrs);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (uverbs_copy_from(&user_idx, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
+ return -EFAULT;
+
+ dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
+ if (dev_idx < 0)
+ return dev_idx;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ &dev_idx, sizeof(dev_idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ void *cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
+ void *cmd_out;
+ int err, err2;
+ int uid;
+
+ c = devx_ufile2uctx(attrs);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ /* Only white list of some general HCA commands are allowed for this method. */
+ if (!devx_is_general_cmd(cmd_in, dev))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ err = mlx5_cmd_do(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
+ return err;
+
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
+ cmd_out_len);
+
+ return err2 ?: err;
+}
+
+static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ u32 *dinlen,
+ u32 *obj_id)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
+
+ *obj_id = devx_get_created_obj_id(in, out, opcode);
+ *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
+ MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
+ break;
+
+ case MLX5_CMD_OP_CREATE_UMEM:
+ MLX5_SET(destroy_umem_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_UMEM);
+ MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(destroy_mkey_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_PD:
+ MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ MLX5_SET(dealloc_transport_domain_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
+ *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_RMP:
+ MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_SQ:
+ MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_RQ:
+ MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_RQT:
+ MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_TIR:
+ MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_TIS:
+ MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ MLX5_SET(dealloc_q_counter_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
+ MLX5_SET(destroy_flow_table_in, din, other_vport,
+ MLX5_GET(create_flow_table_in, in, other_vport));
+ MLX5_SET(destroy_flow_table_in, din, vport_number,
+ MLX5_GET(create_flow_table_in, in, vport_number));
+ MLX5_SET(destroy_flow_table_in, din, table_type,
+ MLX5_GET(create_flow_table_in, in, table_type));
+ MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
+ MLX5_SET(destroy_flow_table_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
+ MLX5_SET(destroy_flow_group_in, din, other_vport,
+ MLX5_GET(create_flow_group_in, in, other_vport));
+ MLX5_SET(destroy_flow_group_in, din, vport_number,
+ MLX5_GET(create_flow_group_in, in, vport_number));
+ MLX5_SET(destroy_flow_group_in, din, table_type,
+ MLX5_GET(create_flow_group_in, in, table_type));
+ MLX5_SET(destroy_flow_group_in, din, table_id,
+ MLX5_GET(create_flow_group_in, in, table_id));
+ MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
+ MLX5_SET(destroy_flow_group_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
+ MLX5_SET(delete_fte_in, din, other_vport,
+ MLX5_GET(set_fte_in, in, other_vport));
+ MLX5_SET(delete_fte_in, din, vport_number,
+ MLX5_GET(set_fte_in, in, vport_number));
+ MLX5_SET(delete_fte_in, din, table_type,
+ MLX5_GET(set_fte_in, in, table_type));
+ MLX5_SET(delete_fte_in, din, table_id,
+ MLX5_GET(set_fte_in, in, table_id));
+ MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
+ MLX5_SET(delete_fte_in, din, opcode,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ MLX5_SET(dealloc_flow_counter_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
+ *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, din,
+ packet_reformat_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ MLX5_SET(dealloc_modify_header_context_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, din,
+ modify_header_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_hierarchy,
+ MLX5_GET(create_scheduling_element_in, in,
+ scheduling_hierarchy));
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_element_id, *obj_id);
+ MLX5_SET(destroy_scheduling_element_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
+ MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
+ MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
+ MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
+ MLX5_SET(delete_l2_table_entry_in, din, opcode,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_CREATE_QP:
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_SRQ:
+ MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ MLX5_SET(destroy_xrc_srq_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_DCT:
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_XRQ:
+ MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
+ MLX5_SET(detach_from_mcg_in, din, qpn,
+ MLX5_GET(attach_to_mcg_in, in, qpn));
+ memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
+ MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
+ MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
+ MLX5_SET(detach_from_mcg_in, din, opcode,
+ MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ MLX5_SET(dealloc_xrcd_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_PSV:
+ MLX5_SET(destroy_psv_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_PSV);
+ MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
+ break;
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ break;
+ }
+}
+
+static int devx_handle_mkey_indirect(struct devx_obj *obj,
+ struct mlx5_ib_dev *dev,
+ void *in, void *out)
+{
+ struct mlx5_ib_mkey *mkey = &obj->mkey;
+ void *mkc;
+ u8 key;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ key = MLX5_GET(mkc, mkc, mkey_7_0);
+ mkey->key = mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, out, mkey_index)) | key;
+ mkey->type = MLX5_MKEY_INDIRECT_DEVX;
+ mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+ init_waitqueue_head(&mkey->wait);
+
+ return mlx5r_store_odp_mkey(dev, mkey);
+}
+
+static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
+ struct devx_obj *obj,
+ void *in, int in_len)
+{
+ int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
+ MLX5_FLD_SZ_BYTES(create_mkey_in,
+ memory_key_mkey_entry);
+ void *mkc;
+ u8 access_mode;
+
+ if (in_len < min_len)
+ return -EINVAL;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
+ access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
+ access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
+ return 0;
+ }
+
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ /* TPH is not allowed to bypass the regular kernel's verbs flow */
+ MLX5_SET(mkc, mkc, pcie_tph_en, 0);
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index,
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX);
+ return 0;
+}
+
+static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
+ struct devx_event_subscription *sub)
+{
+ struct devx_event *event;
+ struct devx_obj_event *xa_val_level2;
+
+ if (sub->is_cleaned)
+ return;
+
+ sub->is_cleaned = 1;
+ list_del_rcu(&sub->xa_list);
+
+ if (list_empty(&sub->obj_list))
+ return;
+
+ list_del_rcu(&sub->obj_list);
+ /* check whether key level 1 for this obj_sub_list is empty */
+ event = xa_load(&dev->devx_event_table.event_xa,
+ sub->xa_key_level1);
+ WARN_ON(!event);
+
+ xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
+ if (list_empty(&xa_val_level2->obj_sub_list)) {
+ xa_erase(&event->object_ids,
+ sub->xa_key_level2);
+ kfree_rcu(xa_val_level2, rcu);
+ }
+}
+
+static int devx_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_devx_event_table *devx_event_table;
+ struct devx_obj *obj = uobject->object;
+ struct devx_event_subscription *sub_entry, *tmp;
+ struct mlx5_ib_dev *dev;
+ int ret;
+
+ dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
+ xa_erase(&obj->ib_dev->odp_mkeys,
+ mlx5_base_mkey(obj->mkey.key)))
+ /*
+ * The pagefault_single_data_segment() does commands against
+ * the mmkey, we must wait for that to stop before freeing the
+ * mkey, as another allocation could get the same mkey #.
+ */
+ mlx5r_deref_wait_odp_mkey(&obj->mkey);
+
+ if (obj->flags & DEVX_OBJ_FLAGS_HW_FREED)
+ ret = 0;
+ else if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
+ else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
+ ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
+ else
+ ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
+ obj->dinlen, out, sizeof(out));
+ if (ret)
+ return ret;
+
+ devx_event_table = &dev->devx_event_table;
+
+ mutex_lock(&devx_event_table->event_xa_lock);
+ list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
+ devx_cleanup_subscription(dev, sub_entry);
+ mutex_unlock(&devx_event_table->event_xa_lock);
+
+ kfree(obj);
+ return ret;
+}
+
+static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
+{
+ struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
+ struct mlx5_devx_event_table *table;
+ struct devx_event *event;
+ struct devx_obj_event *obj_event;
+ u32 obj_id = mcq->cqn;
+
+ table = &obj->ib_dev->devx_event_table;
+ rcu_read_lock();
+ event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
+ if (!event)
+ goto out;
+
+ obj_event = xa_load(&event->object_ids, obj_id);
+ if (!obj_event)
+ goto out;
+
+ dispatch_event_fd(&obj_event->obj_sub_list, eqe);
+out:
+ rcu_read_unlock();
+}
+
+static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, apu) ||
+ !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
+ return false;
+
+ return true;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
+ int cmd_in_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
+ void *cmd_out;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct devx_obj *obj;
+ u16 obj_type = 0;
+ int err, err2 = 0;
+ int uid;
+ u32 obj_id;
+ u16 opcode;
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_create_cmd(cmd_in, &opcode))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
+ err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
+ if (err)
+ goto obj_free;
+ } else {
+ devx_set_umem_valid(cmd_in);
+ }
+
+ if (opcode == MLX5_CMD_OP_CREATE_DCT) {
+ obj->flags |= DEVX_OBJ_FLAGS_DCT;
+ err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
+ cmd_in_len, cmd_out, cmd_out_len);
+ } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
+ !is_apu_cq(dev, cmd_in)) {
+ obj->flags |= DEVX_OBJ_FLAGS_CQ;
+ obj->core_cq.comp = devx_cq_comp;
+ err = mlx5_create_cq(dev->mdev, &obj->core_cq,
+ cmd_in, cmd_in_len, cmd_out,
+ cmd_out_len);
+ } else {
+ err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
+ cmd_out, cmd_out_len);
+ }
+
+ if (err == -EREMOTEIO)
+ err2 = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+ u32 bulk = MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk_log_size);
+
+ if (bulk)
+ bulk = 1 << bulk;
+ else
+ bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk);
+ obj->flow_counter_bulk_size = bulk;
+ }
+
+ uobj->object = obj;
+ INIT_LIST_HEAD(&obj->event_sub);
+ obj->ib_dev = dev;
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
+ &obj_id);
+ WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
+ if (err)
+ goto obj_destroy;
+
+ if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
+ obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
+ obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
+
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
+ if (err)
+ goto obj_destroy;
+ }
+ return 0;
+
+obj_destroy:
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
+ else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
+ mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
+ else
+ mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
+obj_free:
+ kfree(obj);
+ return err2 ?: err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
+ void *cmd_out;
+ int err, err2;
+ int uid;
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_modify_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
+ err = mlx5_cmd_do(mdev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
+ return err;
+
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ cmd_out, cmd_out_len);
+
+ return err2 ?: err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ void *cmd_out;
+ int err, err2;
+ int uid;
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ err = mlx5_cmd_do(mdev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
+ return err;
+
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ cmd_out, cmd_out_len);
+
+ return err2 ?: err;
+}
+
+struct devx_async_event_queue {
+ spinlock_t lock;
+ wait_queue_head_t poll_wait;
+ struct list_head event_list;
+ atomic_t bytes_in_use;
+ u8 is_destroyed:1;
+};
+
+struct devx_async_cmd_event_file {
+ struct ib_uobject uobj;
+ struct devx_async_event_queue ev_queue;
+ struct mlx5_async_ctx async_ctx;
+};
+
+static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
+{
+ spin_lock_init(&ev_queue->lock);
+ INIT_LIST_HEAD(&ev_queue->event_list);
+ init_waitqueue_head(&ev_queue->poll_wait);
+ atomic_set(&ev_queue->bytes_in_use, 0);
+ ev_queue->is_destroyed = 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct devx_async_cmd_event_file *ev_file;
+
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
+
+ ev_file = container_of(uobj, struct devx_async_cmd_event_file,
+ uobj);
+ devx_init_event_queue(&ev_file->ev_queue);
+ mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
+ struct devx_async_event_file *ev_file;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ u32 flags;
+ int err;
+
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
+ MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
+
+ if (err)
+ return err;
+
+ ev_file = container_of(uobj, struct devx_async_event_file,
+ uobj);
+ spin_lock_init(&ev_file->lock);
+ INIT_LIST_HEAD(&ev_file->event_list);
+ init_waitqueue_head(&ev_file->poll_wait);
+ if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
+ ev_file->omit_data = 1;
+ INIT_LIST_HEAD(&ev_file->subscribed_events_list);
+ ev_file->dev = dev;
+ get_device(&dev->ib_dev.dev);
+ return 0;
+}
+
+static void devx_query_callback(int status, struct mlx5_async_work *context)
+{
+ struct devx_async_data *async_data =
+ container_of(context, struct devx_async_data, cb_work);
+ struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
+ struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
+ unsigned long flags;
+
+ /*
+ * Note that if the struct devx_async_cmd_event_file uobj begins to be
+ * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
+ * routine returns, ensuring that it always remains valid here.
+ */
+ spin_lock_irqsave(&ev_queue->lock, flags);
+ list_add_tail(&async_data->list, &ev_queue->event_list);
+ spin_unlock_irqrestore(&ev_queue->lock, flags);
+
+ wake_up_interruptible(&ev_queue->poll_wait);
+}
+
+#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
+ u16 cmd_out_len;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct ib_uobject *fd_uobj;
+ int err;
+ int uid;
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
+ struct devx_async_cmd_event_file *ev_file;
+ struct devx_async_data *async_data;
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ err = uverbs_get_const(&cmd_out_len, attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
+ if (err)
+ return err;
+
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
+ return -EINVAL;
+
+ fd_uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
+ if (IS_ERR(fd_uobj))
+ return PTR_ERR(fd_uobj);
+
+ ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
+ uobj);
+
+ if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
+ MAX_ASYNC_BYTES_IN_USE) {
+ atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
+ return -EAGAIN;
+ }
+
+ async_data = kvzalloc(struct_size(async_data, hdr.out_data,
+ cmd_out_len), GFP_KERNEL);
+ if (!async_data) {
+ err = -ENOMEM;
+ goto sub_bytes;
+ }
+
+ err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
+ if (err)
+ goto free_async;
+
+ async_data->cmd_out_len = cmd_out_len;
+ async_data->mdev = mdev;
+ async_data->ev_file = ev_file;
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
+ uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
+ async_data->hdr.out_data,
+ async_data->cmd_out_len,
+ devx_query_callback, &async_data->cb_work);
+
+ if (err)
+ goto free_async;
+
+ return 0;
+
+free_async:
+ kvfree(async_data);
+sub_bytes:
+ atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
+ return err;
+}
+
+static void
+subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
+ u32 key_level1,
+ bool is_level2,
+ u32 key_level2)
+{
+ struct devx_event *event;
+ struct devx_obj_event *xa_val_level2;
+
+ /* Level 1 is valid for future use, no need to free */
+ if (!is_level2)
+ return;
+
+ event = xa_load(&devx_event_table->event_xa, key_level1);
+ WARN_ON(!event);
+
+ xa_val_level2 = xa_load(&event->object_ids,
+ key_level2);
+ if (list_empty(&xa_val_level2->obj_sub_list)) {
+ xa_erase(&event->object_ids,
+ key_level2);
+ kfree_rcu(xa_val_level2, rcu);
+ }
+}
+
+static int
+subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
+ u32 key_level1,
+ bool is_level2,
+ u32 key_level2)
+{
+ struct devx_obj_event *obj_event;
+ struct devx_event *event;
+ int err;
+
+ event = xa_load(&devx_event_table->event_xa, key_level1);
+ if (!event) {
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&event->unaffiliated_list);
+ xa_init(&event->object_ids);
+
+ err = xa_insert(&devx_event_table->event_xa,
+ key_level1,
+ event,
+ GFP_KERNEL);
+ if (err) {
+ kfree(event);
+ return err;
+ }
+ }
+
+ if (!is_level2)
+ return 0;
+
+ obj_event = xa_load(&event->object_ids, key_level2);
+ if (!obj_event) {
+ obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
+ if (!obj_event)
+ /* Level1 is valid for future use, no need to free */
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&obj_event->obj_sub_list);
+ err = xa_insert(&event->object_ids,
+ key_level2,
+ obj_event,
+ GFP_KERNEL);
+ if (err) {
+ kfree(obj_event);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
+ struct devx_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < num_events; i++) {
+ if (obj) {
+ if (!is_legacy_obj_event_num(event_type_num_list[i]))
+ return false;
+ } else if (!is_legacy_unaffiliated_event_num(
+ event_type_num_list[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#define MAX_SUPP_EVENT_NUM 255
+static bool is_valid_events(struct mlx5_core_dev *dev,
+ int num_events, u16 *event_type_num_list,
+ struct devx_obj *obj)
+{
+ __be64 *aff_events;
+ __be64 *unaff_events;
+ int mask_entry;
+ int mask_bit;
+ int i;
+
+ if (MLX5_CAP_GEN(dev, event_cap)) {
+ aff_events = MLX5_CAP_DEV_EVENT(dev,
+ user_affiliated_events);
+ unaff_events = MLX5_CAP_DEV_EVENT(dev,
+ user_unaffiliated_events);
+ } else {
+ return is_valid_events_legacy(num_events, event_type_num_list,
+ obj);
+ }
+
+ for (i = 0; i < num_events; i++) {
+ if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
+ return false;
+
+ mask_entry = event_type_num_list[i] / 64;
+ mask_bit = event_type_num_list[i] % 64;
+
+ if (obj) {
+ /* CQ completion */
+ if (event_type_num_list[i] == 0)
+ continue;
+
+ if (!(be64_to_cpu(aff_events[mask_entry]) &
+ (1ull << mask_bit)))
+ return false;
+
+ continue;
+ }
+
+ if (!(be64_to_cpu(unaff_events[mask_entry]) &
+ (1ull << mask_bit)))
+ return false;
+ }
+
+ return true;
+}
+
+#define MAX_NUM_EVENTS 16
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
+ attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ struct ib_uobject *fd_uobj;
+ struct devx_obj *obj = NULL;
+ struct devx_async_event_file *ev_file;
+ struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
+ u16 *event_type_num_list;
+ struct devx_event_subscription *event_sub, *tmp_sub;
+ struct list_head sub_list;
+ int redirect_fd;
+ bool use_eventfd = false;
+ int num_events;
+ u16 obj_type = 0;
+ u64 cookie = 0;
+ u32 obj_id = 0;
+ int err;
+ int i;
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!IS_ERR(devx_uobj)) {
+ obj = (struct devx_obj *)devx_uobj->object;
+ if (obj)
+ obj_id = get_dec_obj_id(obj->obj_id);
+ }
+
+ fd_uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
+ if (IS_ERR(fd_uobj))
+ return PTR_ERR(fd_uobj);
+
+ ev_file = container_of(fd_uobj, struct devx_async_event_file,
+ uobj);
+
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
+ err = uverbs_copy_from(&redirect_fd, attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
+ if (err)
+ return err;
+
+ use_eventfd = true;
+ }
+
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
+ if (use_eventfd)
+ return -EINVAL;
+
+ err = uverbs_copy_from(&cookie, attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
+ if (err)
+ return err;
+ }
+
+ num_events = uverbs_attr_ptr_get_array_size(
+ attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
+ sizeof(u16));
+
+ if (num_events < 0)
+ return num_events;
+
+ if (num_events > MAX_NUM_EVENTS)
+ return -EINVAL;
+
+ event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
+
+ if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&sub_list);
+
+ /* Protect from concurrent subscriptions to same XA entries to allow
+ * both to succeed
+ */
+ mutex_lock(&devx_event_table->event_xa_lock);
+ for (i = 0; i < num_events; i++) {
+ u32 key_level1;
+
+ if (obj)
+ obj_type = get_dec_obj_type(obj,
+ event_type_num_list[i]);
+ key_level1 = event_type_num_list[i] | obj_type << 16;
+
+ err = subscribe_event_xa_alloc(devx_event_table,
+ key_level1,
+ obj,
+ obj_id);
+ if (err)
+ goto err;
+
+ event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
+ if (!event_sub) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ list_add_tail(&event_sub->event_list, &sub_list);
+ uverbs_uobject_get(&ev_file->uobj);
+ if (use_eventfd) {
+ event_sub->eventfd =
+ eventfd_ctx_fdget(redirect_fd);
+
+ if (IS_ERR(event_sub->eventfd)) {
+ err = PTR_ERR(event_sub->eventfd);
+ event_sub->eventfd = NULL;
+ goto err;
+ }
+ }
+
+ event_sub->cookie = cookie;
+ event_sub->ev_file = ev_file;
+ /* May be needed upon cleanup the devx object/subscription */
+ event_sub->xa_key_level1 = key_level1;
+ event_sub->xa_key_level2 = obj_id;
+ INIT_LIST_HEAD(&event_sub->obj_list);
+ }
+
+ /* Once all the allocations and the XA data insertions were done we
+ * can go ahead and add all the subscriptions to the relevant lists
+ * without concern of a failure.
+ */
+ list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
+ struct devx_event *event;
+ struct devx_obj_event *obj_event;
+
+ list_del_init(&event_sub->event_list);
+
+ spin_lock_irq(&ev_file->lock);
+ list_add_tail_rcu(&event_sub->file_list,
+ &ev_file->subscribed_events_list);
+ spin_unlock_irq(&ev_file->lock);
+
+ event = xa_load(&devx_event_table->event_xa,
+ event_sub->xa_key_level1);
+ WARN_ON(!event);
+
+ if (!obj) {
+ list_add_tail_rcu(&event_sub->xa_list,
+ &event->unaffiliated_list);
+ continue;
+ }
+
+ obj_event = xa_load(&event->object_ids, obj_id);
+ WARN_ON(!obj_event);
+ list_add_tail_rcu(&event_sub->xa_list,
+ &obj_event->obj_sub_list);
+ list_add_tail_rcu(&event_sub->obj_list,
+ &obj->event_sub);
+ }
+
+ mutex_unlock(&devx_event_table->event_xa_lock);
+ return 0;
+
+err:
+ list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
+ list_del(&event_sub->event_list);
+
+ subscribe_event_xa_dealloc(devx_event_table,
+ event_sub->xa_key_level1,
+ obj,
+ obj_id);
+
+ if (event_sub->eventfd)
+ eventfd_ctx_put(event_sub->eventfd);
+ uverbs_uobject_put(&event_sub->ev_file->uobj);
+ kfree(event_sub);
+ }
+
+ mutex_unlock(&devx_event_table->event_xa_lock);
+ return err;
+}
+
+static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
+ struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj, u32 access_flags)
+{
+ u64 addr;
+ size_t size;
+ int err;
+
+ if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
+ uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
+ return -EFAULT;
+
+ err = ib_check_mr_access(&dev->ib_dev, access_flags);
+ if (err)
+ return err;
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int dmabuf_fd;
+
+ err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
+ if (err)
+ return -EFAULT;
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(
+ &dev->ib_dev, addr, size, dmabuf_fd, access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return PTR_ERR(umem_dmabuf);
+ obj->umem = &umem_dmabuf->umem;
+ } else {
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+ }
+ return 0;
+}
+
+static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
+ unsigned long pgsz_bitmap)
+{
+ unsigned long page_size;
+
+ /* Don't bother checking larger page sizes as offset must be zero and
+ * total DEVX umem length must be equal to total umem length.
+ */
+ pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
+ PAGE_SHIFT),
+ MLX5_ADAPTER_PAGE_SHIFT);
+ if (!pgsz_bitmap)
+ return 0;
+
+ page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
+ if (!page_size)
+ return 0;
+
+ /* If the page_size is less than the CPU page size then we can use the
+ * offset and create a umem which is a subset of the page list.
+ * For larger page sizes we can't be sure the DMA list reflects the
+ * VA so we must ensure that the umem extent is exactly equal to the
+ * page list. Reduce the page size until one of these cases is true.
+ */
+ while ((ib_umem_dma_offset(umem, page_size) != 0 ||
+ (umem->length % page_size) != 0) &&
+ page_size > PAGE_SIZE)
+ page_size /= 2;
+
+ return page_size;
+}
+
+static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
+ struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd,
+ int access)
+{
+ unsigned long pgsz_bitmap;
+ unsigned int page_size;
+ __be64 *mtt;
+ void *umem;
+ int ret;
+
+ /*
+ * If the user does not pass in pgsz_bitmap then the user promises not
+ * to use umem_offset!=0 in any commands that allocate on top of the
+ * umem.
+ *
+ * If the user wants to use a umem_offset then it must pass in
+ * pgsz_bitmap which guides the maximum page size and thus maximum
+ * object alignment inside the umem. See the PRM.
+ *
+ * Users are not allowed to use IOVA here, mkeys are not supported on
+ * umem.
+ */
+ ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ GENMASK_ULL(63,
+ min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
+ if (ret)
+ return ret;
+
+ page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
+ if (!page_size)
+ return -EINVAL;
+
+ cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
+ (MLX5_ST_SZ_BYTES(mtt) *
+ ib_umem_num_dma_blocks(obj->umem, page_size));
+ cmd->in = uverbs_zalloc(attrs, cmd->inlen);
+ if (IS_ERR(cmd->in))
+ return PTR_ERR(cmd->in);
+
+ umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
+ mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
+
+ MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
+ MLX5_SET64(umem, umem, num_of_mtt,
+ ib_umem_num_dma_blocks(obj->umem, page_size));
+ MLX5_SET(umem, umem, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(umem, umem, page_offset,
+ ib_umem_dma_offset(obj->umem, page_size));
+
+ if (mlx5_umem_needs_ats(dev, obj->umem, access))
+ MLX5_SET(umem, umem, ats, 1);
+
+ mlx5_ib_populate_pas(obj->umem, page_size, mtt,
+ (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
+ MLX5_IB_MTT_READ);
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct devx_umem_reg_cmd cmd;
+ struct devx_umem *obj;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
+ u32 obj_id;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int access_flags;
+ int err;
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ err = uverbs_get_flags32(&access_flags, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (err)
+ return err;
+
+ obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
+ if (err)
+ goto err_obj_free;
+
+ err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
+ if (err)
+ goto err_umem_release;
+
+ MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
+ sizeof(cmd.out));
+ if (err)
+ goto err_umem_release;
+
+ obj->mdev = dev->mdev;
+ uobj->object = obj;
+ devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
+ sizeof(obj_id));
+ return err;
+
+err_umem_release:
+ ib_umem_release(obj->umem);
+err_obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int devx_umem_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct devx_umem *obj = uobject->object;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ int err;
+
+ err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (err)
+ return err;
+
+ ib_umem_release(obj->umem);
+ kfree(obj);
+ return 0;
+}
+
+static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
+ unsigned long event_type)
+{
+ __be64 *unaff_events;
+ int mask_entry;
+ int mask_bit;
+
+ if (!MLX5_CAP_GEN(dev, event_cap))
+ return is_legacy_unaffiliated_event_num(event_type);
+
+ unaff_events = MLX5_CAP_DEV_EVENT(dev,
+ user_unaffiliated_events);
+ WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
+
+ mask_entry = event_type / 64;
+ mask_bit = event_type % 64;
+
+ if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
+ return false;
+
+ return true;
+}
+
+static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
+{
+ struct mlx5_eqe *eqe = data;
+ u32 obj_id = 0;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ break;
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
+ obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
+ break;
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+ obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ break;
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
+ break;
+ default:
+ obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
+ break;
+ }
+
+ return obj_id;
+}
+
+static int deliver_event(struct devx_event_subscription *event_sub,
+ const void *data)
+{
+ struct devx_async_event_file *ev_file;
+ struct devx_async_event_data *event_data;
+ unsigned long flags;
+
+ ev_file = event_sub->ev_file;
+
+ if (ev_file->omit_data) {
+ spin_lock_irqsave(&ev_file->lock, flags);
+ if (!list_empty(&event_sub->event_list) ||
+ ev_file->is_destroyed) {
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ return 0;
+ }
+
+ list_add_tail(&event_sub->event_list, &ev_file->event_list);
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ wake_up_interruptible(&ev_file->poll_wait);
+ return 0;
+ }
+
+ event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
+ GFP_ATOMIC);
+ if (!event_data) {
+ spin_lock_irqsave(&ev_file->lock, flags);
+ ev_file->is_overflow_err = 1;
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ return -ENOMEM;
+ }
+
+ event_data->hdr.cookie = event_sub->cookie;
+ memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
+
+ spin_lock_irqsave(&ev_file->lock, flags);
+ if (!ev_file->is_destroyed)
+ list_add_tail(&event_data->list, &ev_file->event_list);
+ else
+ kfree(event_data);
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ wake_up_interruptible(&ev_file->poll_wait);
+
+ return 0;
+}
+
+static void dispatch_event_fd(struct list_head *fd_list,
+ const void *data)
+{
+ struct devx_event_subscription *item;
+
+ list_for_each_entry_rcu(item, fd_list, xa_list) {
+ if (item->eventfd)
+ eventfd_signal(item->eventfd);
+ else
+ deliver_event(item, data);
+ }
+}
+
+static int devx_event_notifier(struct notifier_block *nb,
+ unsigned long event_type, void *data)
+{
+ struct mlx5_devx_event_table *table;
+ struct mlx5_ib_dev *dev;
+ struct devx_event *event;
+ struct devx_obj_event *obj_event;
+ u16 obj_type = 0;
+ bool is_unaffiliated;
+ u32 obj_id;
+
+ /* Explicit filtering to kernel events which may occur frequently */
+ if (event_type == MLX5_EVENT_TYPE_CMD ||
+ event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
+ return NOTIFY_OK;
+
+ table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
+ dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
+ is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
+
+ if (!is_unaffiliated)
+ obj_type = get_event_obj_type(event_type, data);
+
+ rcu_read_lock();
+ event = xa_load(&table->event_xa, event_type | (obj_type << 16));
+ if (!event) {
+ rcu_read_unlock();
+ return NOTIFY_DONE;
+ }
+
+ if (is_unaffiliated) {
+ dispatch_event_fd(&event->unaffiliated_list, data);
+ rcu_read_unlock();
+ return NOTIFY_OK;
+ }
+
+ obj_id = devx_get_obj_id_from_event(event_type, data);
+ obj_event = xa_load(&event->object_ids, obj_id);
+ if (!obj_event) {
+ rcu_read_unlock();
+ return NOTIFY_DONE;
+ }
+
+ dispatch_event_fd(&obj_event->obj_sub_list, data);
+
+ rcu_read_unlock();
+ return NOTIFY_OK;
+}
+
+int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_devx_event_table *table = &dev->devx_event_table;
+ int uid;
+
+ uid = mlx5_ib_devx_create(dev, false, 0);
+ if (uid > 0) {
+ dev->devx_whitelist_uid = uid;
+ xa_init(&table->event_xa);
+ mutex_init(&table->event_xa_lock);
+ MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
+ mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
+ }
+
+ return 0;
+}
+
+void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_devx_event_table *table = &dev->devx_event_table;
+ struct devx_event_subscription *sub, *tmp;
+ struct devx_event *event;
+ void *entry;
+ unsigned long id;
+
+ if (dev->devx_whitelist_uid) {
+ mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
+ xa_for_each(&table->event_xa, id, entry) {
+ event = entry;
+ list_for_each_entry_safe(
+ sub, tmp, &event->unaffiliated_list, xa_list)
+ devx_cleanup_subscription(dev, sub);
+ kfree(entry);
+ }
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
+ xa_destroy(&table->event_xa);
+
+ mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
+ }
+}
+
+static void devx_async_destroy_cb(int status, struct mlx5_async_work *context)
+{
+ struct mlx5_async_cmd *devx_out = container_of(context,
+ struct mlx5_async_cmd, cb_work);
+ struct devx_obj *obj = devx_out->uobject->object;
+
+ if (!status)
+ obj->flags |= DEVX_OBJ_FLAGS_HW_FREED;
+
+ complete(&devx_out->comp);
+}
+
+static void devx_async_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_async_cmd *cmd)
+{
+ init_completion(&cmd->comp);
+ cmd->err = mlx5_cmd_exec_cb(&dev->async_ctx, cmd->in, cmd->in_size,
+ &cmd->out, sizeof(cmd->out),
+ devx_async_destroy_cb, &cmd->cb_work);
+}
+
+static void devx_wait_async_destroy(struct mlx5_async_cmd *cmd)
+{
+ if (!cmd->err)
+ wait_for_completion(&cmd->comp);
+ atomic_set(&cmd->uobject->usecnt, 0);
+}
+
+void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
+{
+ struct mlx5_async_cmd *async_cmd;
+ struct ib_ucontext *ucontext = ufile->ucontext;
+ struct ib_device *device = ucontext->device;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct ib_uobject *uobject;
+ struct devx_obj *obj;
+ int head = 0;
+ int tail = 0;
+
+ async_cmd = kcalloc(MAX_ASYNC_CMDS, sizeof(*async_cmd), GFP_KERNEL);
+ if (!async_cmd)
+ return;
+
+ list_for_each_entry(uobject, &ufile->uobjects, list) {
+ WARN_ON(uverbs_try_lock_object(uobject, UVERBS_LOOKUP_WRITE));
+
+ /*
+ * Currently we only support QP destruction, if other objects
+ * are to be destroyed need to add type synchronization to the
+ * cleanup algorithm and handle pre/post FW cleanup for the
+ * new types if needed.
+ */
+ if (uobj_get_object_id(uobject) != MLX5_IB_OBJECT_DEVX_OBJ ||
+ (get_dec_obj_type(uobject->object, MLX5_EVENT_TYPE_MAX) !=
+ MLX5_OBJ_TYPE_QP)) {
+ atomic_set(&uobject->usecnt, 0);
+ continue;
+ }
+
+ obj = uobject->object;
+
+ async_cmd[tail % MAX_ASYNC_CMDS].in = obj->dinbox;
+ async_cmd[tail % MAX_ASYNC_CMDS].in_size = obj->dinlen;
+ async_cmd[tail % MAX_ASYNC_CMDS].uobject = uobject;
+
+ devx_async_destroy(dev, &async_cmd[tail % MAX_ASYNC_CMDS]);
+ tail++;
+
+ if (tail - head == MAX_ASYNC_CMDS) {
+ devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]);
+ head++;
+ }
+ }
+
+ while (head != tail) {
+ devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]);
+ head++;
+ }
+
+ kfree(async_cmd);
+}
+
+static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ struct devx_async_data *event;
+ int ret = 0;
+ size_t eventsz;
+
+ spin_lock_irq(&ev_queue->lock);
+
+ while (list_empty(&ev_queue->event_list)) {
+ spin_unlock_irq(&ev_queue->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(
+ ev_queue->poll_wait,
+ (!list_empty(&ev_queue->event_list) ||
+ ev_queue->is_destroyed))) {
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irq(&ev_queue->lock);
+ if (ev_queue->is_destroyed) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+ }
+ }
+
+ event = list_entry(ev_queue->event_list.next,
+ struct devx_async_data, list);
+ eventsz = event->cmd_out_len +
+ sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
+
+ if (eventsz > count) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -ENOSPC;
+ }
+
+ list_del(ev_queue->event_list.next);
+ spin_unlock_irq(&ev_queue->lock);
+
+ if (copy_to_user(buf, &event->hdr, eventsz))
+ ret = -EFAULT;
+ else
+ ret = eventsz;
+
+ atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
+ kvfree(event);
+ return ret;
+}
+
+static __poll_t devx_async_cmd_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ __poll_t pollflags = 0;
+
+ poll_wait(filp, &ev_queue->poll_wait, wait);
+
+ spin_lock_irq(&ev_queue->lock);
+ if (ev_queue->is_destroyed)
+ pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ else if (!list_empty(&ev_queue->event_list))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irq(&ev_queue->lock);
+
+ return pollflags;
+}
+
+static const struct file_operations devx_async_cmd_event_fops = {
+ .owner = THIS_MODULE,
+ .read = devx_async_cmd_event_read,
+ .poll = devx_async_cmd_event_poll,
+ .release = uverbs_uobject_fd_release,
+};
+
+static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct devx_async_event_file *ev_file = filp->private_data;
+ struct devx_event_subscription *event_sub;
+ struct devx_async_event_data *event;
+ int ret = 0;
+ size_t eventsz;
+ bool omit_data;
+ void *event_data;
+
+ omit_data = ev_file->omit_data;
+
+ spin_lock_irq(&ev_file->lock);
+
+ if (ev_file->is_overflow_err) {
+ ev_file->is_overflow_err = 0;
+ spin_unlock_irq(&ev_file->lock);
+ return -EOVERFLOW;
+ }
+
+
+ while (list_empty(&ev_file->event_list)) {
+ spin_unlock_irq(&ev_file->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(ev_file->poll_wait,
+ (!list_empty(&ev_file->event_list) ||
+ ev_file->is_destroyed))) {
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irq(&ev_file->lock);
+ if (ev_file->is_destroyed) {
+ spin_unlock_irq(&ev_file->lock);
+ return -EIO;
+ }
+ }
+
+ if (omit_data) {
+ event_sub = list_first_entry(&ev_file->event_list,
+ struct devx_event_subscription,
+ event_list);
+ eventsz = sizeof(event_sub->cookie);
+ event_data = &event_sub->cookie;
+ } else {
+ event = list_first_entry(&ev_file->event_list,
+ struct devx_async_event_data, list);
+ eventsz = sizeof(struct mlx5_eqe) +
+ sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
+ event_data = &event->hdr;
+ }
+
+ if (eventsz > count) {
+ spin_unlock_irq(&ev_file->lock);
+ return -EINVAL;
+ }
+
+ if (omit_data)
+ list_del_init(&event_sub->event_list);
+ else
+ list_del(&event->list);
+
+ spin_unlock_irq(&ev_file->lock);
+
+ if (copy_to_user(buf, event_data, eventsz))
+ /* This points to an application issue, not a kernel concern */
+ ret = -EFAULT;
+ else
+ ret = eventsz;
+
+ if (!omit_data)
+ kfree(event);
+ return ret;
+}
+
+static __poll_t devx_async_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct devx_async_event_file *ev_file = filp->private_data;
+ __poll_t pollflags = 0;
+
+ poll_wait(filp, &ev_file->poll_wait, wait);
+
+ spin_lock_irq(&ev_file->lock);
+ if (ev_file->is_destroyed)
+ pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ else if (!list_empty(&ev_file->event_list))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irq(&ev_file->lock);
+
+ return pollflags;
+}
+
+static void devx_free_subscription(struct rcu_head *rcu)
+{
+ struct devx_event_subscription *event_sub =
+ container_of(rcu, struct devx_event_subscription, rcu);
+
+ if (event_sub->eventfd)
+ eventfd_ctx_put(event_sub->eventfd);
+ uverbs_uobject_put(&event_sub->ev_file->uobj);
+ kfree(event_sub);
+}
+
+static const struct file_operations devx_async_event_fops = {
+ .owner = THIS_MODULE,
+ .read = devx_async_event_read,
+ .poll = devx_async_event_poll,
+ .release = uverbs_uobject_fd_release,
+};
+
+static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct devx_async_cmd_event_file *comp_ev_file =
+ container_of(uobj, struct devx_async_cmd_event_file,
+ uobj);
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ struct devx_async_data *entry, *tmp;
+
+ spin_lock_irq(&ev_queue->lock);
+ ev_queue->is_destroyed = 1;
+ spin_unlock_irq(&ev_queue->lock);
+ wake_up_interruptible(&ev_queue->poll_wait);
+
+ mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
+
+ spin_lock_irq(&comp_ev_file->ev_queue.lock);
+ list_for_each_entry_safe(entry, tmp,
+ &comp_ev_file->ev_queue.event_list, list) {
+ list_del(&entry->list);
+ kvfree(entry);
+ }
+ spin_unlock_irq(&comp_ev_file->ev_queue.lock);
+};
+
+static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct devx_async_event_file *ev_file =
+ container_of(uobj, struct devx_async_event_file,
+ uobj);
+ struct devx_event_subscription *event_sub, *event_sub_tmp;
+ struct mlx5_ib_dev *dev = ev_file->dev;
+
+ spin_lock_irq(&ev_file->lock);
+ ev_file->is_destroyed = 1;
+
+ /* free the pending events allocation */
+ if (ev_file->omit_data) {
+ struct devx_event_subscription *event_sub, *tmp;
+
+ list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
+ event_list)
+ list_del_init(&event_sub->event_list);
+
+ } else {
+ struct devx_async_event_data *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ spin_unlock_irq(&ev_file->lock);
+ wake_up_interruptible(&ev_file->poll_wait);
+
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
+ /* delete the subscriptions which are related to this FD */
+ list_for_each_entry_safe(event_sub, event_sub_tmp,
+ &ev_file->subscribed_events_list, file_list) {
+ devx_cleanup_subscription(dev, event_sub);
+ list_del_rcu(&event_sub->file_list);
+ /* subscription may not be used by the read API any more */
+ call_rcu(&event_sub->rcu, devx_free_subscription);
+ }
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
+
+ put_device(&dev->ib_dev.dev);
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_UMEM_REG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
+ UA_OPTIONAL),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ enum ib_access_flags),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ u64),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_UMEM_DEREG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_EQN,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_UAR,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OTHER,
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
+ u16, UA_MANDATORY),
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
+
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
+ devx_async_cmd_event_destroy_uobj,
+ &devx_async_cmd_event_fops, "[devx_async_cmd]",
+ O_RDONLY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
+ enum mlx5_ib_uapi_devx_create_event_channel_flags,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
+ devx_async_event_destroy_uobj,
+ &devx_async_event_fops, "[devx_async_event]",
+ O_RDONLY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
+
+static bool devx_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
+}
+
+const struct uapi_definition mlx5_ib_devx_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h
new file mode 100644
index 000000000000..ee9e7d3af93f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/devx.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_DEVX_H
+#define _MLX5_IB_DEVX_H
+
+#include "mlx5_ib.h"
+
+#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
+struct devx_obj {
+ struct mlx5_ib_dev *ib_dev;
+ u64 obj_id;
+ u32 dinlen; /* destroy inbox length */
+ u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
+ u32 flags;
+ union {
+ struct mlx5_ib_mkey mkey;
+ struct mlx5_core_dct core_dct;
+ struct mlx5_core_cq core_cq;
+ u32 flow_counter_bulk_size;
+ };
+ struct list_head event_sub; /* holds devx_event_subscription entries */
+};
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user, u64 req_ucaps);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
+int mlx5_ib_devx_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev);
+void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile);
+#else
+static inline int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user,
+ u64 req_ucaps)
+{
+ return -EOPNOTSUPP;
+}
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
+static inline int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
+static inline void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
+{
+}
+static inline void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
+{
+}
+#endif
+#endif /* _MLX5_IB_DEVX_H */
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
new file mode 100644
index 000000000000..9ded2b7c1e31
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "dm.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
+ u64 length, u32 alignment)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
+ >> PAGE_SHIFT;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
+ u32 mlx5_alignment;
+ u64 page_idx = 0;
+ int ret = 0;
+
+ if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
+ return -EINVAL;
+
+ /* mlx5 device sets alignment as 64*2^driver_value
+ * so normalizing is needed.
+ */
+ mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
+ alignment - MLX5_MEMIC_BASE_ALIGN;
+ if (mlx5_alignment > max_alignment)
+ return -EINVAL;
+
+ MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
+ MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
+ MLX5_SET(alloc_memic_in, in, memic_size, length);
+ MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
+ mlx5_alignment);
+
+ while (page_idx < num_memic_hw_pages) {
+ spin_lock(&dm->lock);
+ page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
+ num_memic_hw_pages,
+ page_idx,
+ num_pages, 0);
+
+ if (page_idx < num_memic_hw_pages)
+ bitmap_set(dm->memic_alloc_pages,
+ page_idx, num_pages);
+
+ spin_unlock(&dm->lock);
+
+ if (page_idx >= num_memic_hw_pages)
+ break;
+
+ MLX5_SET64(alloc_memic_in, in, range_start_addr,
+ hw_start_addr + (page_idx * PAGE_SIZE));
+
+ ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
+ if (ret) {
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ page_idx, num_pages);
+ spin_unlock(&dm->lock);
+
+ if (ret == -EAGAIN) {
+ page_idx++;
+ continue;
+ }
+
+ return ret;
+ }
+
+ *addr = dev->bar_addr +
+ MLX5_GET64(alloc_memic_out, out, memic_start_addr);
+
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
+ u64 start_page_idx;
+ int err;
+
+ addr -= dev->bar_addr;
+ start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
+
+ MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
+ MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
+ MLX5_SET(dealloc_memic_in, in, memic_size, length);
+
+ err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
+ if (err)
+ return;
+
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ start_page_idx, num_pages);
+ spin_unlock(&dm->lock);
+}
+
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ mlx5_cmd_exec_in(dev, modify_memic, in);
+}
+
+static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation, phys_addr_t *op_addr)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+ int err;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
+ if (err)
+ return err;
+
+ *op_addr = dev->bar_addr +
+ MLX5_GET64(modify_memic_out, out, memic_operation_addr);
+ return 0;
+}
+
+static int add_dm_mmap_entry(struct ib_ucontext *context,
+ struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
+ size_t size, u64 address)
+{
+ mentry->mmap_flag = mmap_flag;
+ mentry->address = address;
+
+ return rdma_user_mmap_entry_insert_range(
+ context, &mentry->rdma_entry, size,
+ MLX5_IB_MMAP_DEVICE_MEM << 16,
+ (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
+}
+
+static void mlx5_ib_dm_memic_free(struct kref *kref)
+{
+ struct mlx5_ib_dm_memic *dm =
+ container_of(kref, struct mlx5_ib_dm_memic, ref);
+ struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
+
+ mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+}
+
+static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
+ struct uverbs_attr_bundle *attrs)
+{
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ start_offset = op_entry->op_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+}
+
+static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+
+ op_entry = xa_load(&dm->ops, op);
+ if (!op_entry)
+ return -ENOENT;
+
+ return copy_op_to_user(op_entry, attrs);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct ib_dm *ibdm = uobj->object;
+ struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
+ struct mlx5_ib_dm_op_entry *op_entry;
+ int err;
+ u8 op;
+
+ err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
+ if (err)
+ return err;
+
+ if (op >= BITS_PER_TYPE(u32))
+ return -EOPNOTSUPP;
+
+ if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dm->ops_xa_lock);
+ err = map_existing_op(dm, op, attrs);
+ if (!err || err != -ENOENT)
+ goto err_unlock;
+
+ op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
+ if (!op_entry)
+ goto err_unlock;
+
+ err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
+ &op_entry->op_addr);
+ if (err) {
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ op_entry->op = op;
+ op_entry->dm = dm;
+
+ err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
+ op_entry->op_addr & PAGE_MASK);
+ if (err) {
+ mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ /* From this point, entry will be freed by mmap_free */
+ kref_get(&dm->ref);
+
+ err = copy_op_to_user(op_entry, attrs);
+ if (err)
+ goto err_remove;
+
+ err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
+ if (err)
+ goto err_remove;
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return 0;
+
+err_remove:
+ rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
+err_unlock:
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return err;
+}
+
+static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ struct mlx5_ib_dm_memic *dm;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+ u64 address;
+
+ if (!dm_db || !MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
+ dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
+ dm->base.ibdm.device = ctx->device;
+
+ kref_init(&dm->ref);
+ xa_init(&dm->ops);
+ mutex_init(&dm->ops_xa_lock);
+ dm->req_length = attr->length;
+
+ err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
+ dm->base.size, attr->alignment);
+ if (err) {
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ address = dm->base.dev_addr & PAGE_MASK;
+ err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
+ dm->base.size, address);
+ if (err) {
+ mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ goto err_copy;
+
+ start_offset = dm->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ goto err_copy;
+
+ return &dm->base.ibdm;
+
+err_copy:
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+ return ERR_PTR(err);
+}
+
+static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
+{
+ switch (uapi_type) {
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ return MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
+ return MLX5_SW_ICM_TYPE_SW_ENCAP;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ default:
+ return MLX5_SW_ICM_TYPE_STEERING;
+ }
+}
+
+static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs,
+ int type)
+{
+ struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
+ enum mlx5_sw_icm_type icm_type;
+ struct mlx5_ib_dm_icm *dm;
+ u64 act_size;
+ int err;
+
+ if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW))
+ return ERR_PTR(-EPERM);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
+ if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2)))
+ return ERR_PTR(-EOPNOTSUPP);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))
+ return ERR_PTR(-EOPNOTSUPP);
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = type;
+ dm->base.ibdm.device = ctx->device;
+
+ /* Allocation size must a multiple of the basic block size
+ * and a power of 2.
+ */
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ act_size = roundup_pow_of_two(act_size);
+
+ dm->base.size = act_size;
+ icm_type = get_icm_type(type);
+
+ err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
+ to_mucontext(ctx)->devx_uid,
+ &dm->base.dev_addr, &dm->obj_id);
+ if (err)
+ goto free;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &dm->base.dev_addr, sizeof(dm->base.dev_addr));
+ if (err) {
+ mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
+ to_mucontext(ctx)->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ goto free;
+ }
+ return &dm->base.ibdm;
+free:
+ kfree(dm);
+ return ERR_PTR(err);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ enum mlx5_ib_uapi_dm_type type;
+ int err;
+
+ err = uverbs_get_const_default(&type, attrs,
+ MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ MLX5_IB_UAPI_DM_TYPE_MEMIC);
+ if (err)
+ return ERR_PTR(err);
+
+ mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+ type, attr->length, attr->alignment);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ return handle_alloc_dm_memic(context, attr, attrs);
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
+ return handle_alloc_dm_sw_icm(context, attr, attrs, type);
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
+{
+ struct mlx5_ib_dm_op_entry *entry;
+ unsigned long idx;
+
+ mutex_lock(&dm->ops_xa_lock);
+ xa_for_each(&dm->ops, idx, entry) {
+ xa_erase(&dm->ops, idx);
+ rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
+ }
+ mutex_unlock(&dm->ops_xa_lock);
+}
+
+static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
+{
+ dm_memic_remove_ops(dm);
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+}
+
+static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
+ struct mlx5_ib_dm_icm *dm)
+{
+ enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
+ struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
+ int err;
+
+ err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ if (!err)
+ kfree(dm);
+ return 0;
+}
+
+static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+
+ switch (dm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ mlx5_dm_memic_dealloc(to_memic(ibdm));
+ return 0;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
+ return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dm *ibdm =
+ uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+ struct mlx5_ib_dm_memic *memic;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
+ return -EOPNOTSUPP;
+
+ memic = to_memic(ibdm);
+ page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ start_offset = memic->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ &memic->req_length,
+ sizeof(memic->req_length));
+}
+
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+ struct mlx5_ib_dm_memic *mdm;
+
+ switch (mentry->mmap_flag) {
+ case MLX5_IB_MMAP_TYPE_MEMIC:
+ mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
+ mentry);
+ mdm = op_entry->dm;
+ mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
+ op_entry->op);
+ kfree(op_entry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ default:
+ WARN_ON(true);
+ }
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ, UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_MAP_OP_ADDR,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
+
+const struct uapi_definition mlx5_ib_dm_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
+ {},
+};
+
+const struct ib_device_ops mlx5_ib_dev_dm_ops = {
+ .alloc_dm = mlx5_ib_alloc_dm,
+ .dealloc_dm = mlx5_ib_dealloc_dm,
+ .reg_dm_mr = mlx5_ib_reg_dm_mr,
+};
diff --git a/drivers/infiniband/hw/mlx5/dm.h b/drivers/infiniband/hw/mlx5/dm.h
new file mode 100644
index 000000000000..9674a80d8d70
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_DM_H
+#define _MLX5_IB_DM_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dm_ops;
+extern const struct uapi_definition mlx5_ib_dm_defs[];
+
+struct mlx5_ib_dm {
+ struct ib_dm ibdm;
+ u32 type;
+ phys_addr_t dev_addr;
+ size_t size;
+};
+
+struct mlx5_ib_dm_op_entry {
+ struct mlx5_user_mmap_entry mentry;
+ phys_addr_t op_addr;
+ struct mlx5_ib_dm_memic *dm;
+ u8 op;
+};
+
+struct mlx5_ib_dm_memic {
+ struct mlx5_ib_dm base;
+ struct mlx5_user_mmap_entry mentry;
+ struct xarray ops;
+ struct mutex ops_xa_lock;
+ struct kref ref;
+ size_t req_length;
+};
+
+struct mlx5_ib_dm_icm {
+ struct mlx5_ib_dm base;
+ u32 obj_id;
+};
+
+static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm, ibdm);
+}
+
+static inline struct mlx5_ib_dm_memic *to_memic(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_memic, base.ibdm);
+}
+
+static inline struct mlx5_ib_dm_icm *to_icm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_icm, base.ibdm);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry);
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length);
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation);
+
+#endif /* _MLX5_IB_DM_H */
diff --git a/drivers/infiniband/hw/mlx5/dmah.c b/drivers/infiniband/hw/mlx5/dmah.c
new file mode 100644
index 000000000000..362a88992ffa
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dmah.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include <linux/pci-tph.h>
+#include "dmah.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_ib_alloc_dmah(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_core_dev *mdev = to_mdev(ibdmah->device)->mdev;
+ struct mlx5_ib_dmah *dmah = to_mdmah(ibdmah);
+ u16 st_bits = BIT(IB_DMAH_CPU_ID_EXISTS) |
+ BIT(IB_DMAH_MEM_TYPE_EXISTS);
+ int err;
+
+ /* PH is a must for TPH following PCIe spec 6.2-1.0 */
+ if (!(ibdmah->valid_fields & BIT(IB_DMAH_PH_EXISTS)))
+ return -EINVAL;
+
+ /* ST is optional; however, partial data for it is not allowed */
+ if (ibdmah->valid_fields & st_bits) {
+ if ((ibdmah->valid_fields & st_bits) != st_bits)
+ return -EINVAL;
+ err = mlx5_st_alloc_index(mdev, ibdmah->mem_type,
+ ibdmah->cpu_id, &dmah->st_index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_dealloc_dmah(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dmah *dmah = to_mdmah(ibdmah);
+ struct mlx5_core_dev *mdev = to_mdev(ibdmah->device)->mdev;
+
+ if (ibdmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ return mlx5_st_dealloc_index(mdev, dmah->st_index);
+
+ return 0;
+}
+
+const struct ib_device_ops mlx5_ib_dev_dmah_ops = {
+ .alloc_dmah = mlx5_ib_alloc_dmah,
+ .dealloc_dmah = mlx5_ib_dealloc_dmah,
+};
diff --git a/drivers/infiniband/hw/mlx5/dmah.h b/drivers/infiniband/hw/mlx5/dmah.h
new file mode 100644
index 000000000000..68de72b4744a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dmah.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _MLX5_IB_DMAH_H
+#define _MLX5_IB_DMAH_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dmah_ops;
+
+struct mlx5_ib_dmah {
+ struct ib_dmah ibdmah;
+ u16 st_index;
+};
+
+static inline struct mlx5_ib_dmah *to_mdmah(struct ib_dmah *ibdmah)
+{
+ return container_of(ibdmah, struct mlx5_ib_dmah, ibdmah);
+}
+
+#endif /* _MLX5_IB_DMAH_H */
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index a0e4e6ddb71a..e32111117a5e 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -32,6 +32,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
@@ -41,6 +42,7 @@ struct mlx5_ib_user_db_page {
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
+ struct mm_struct *mm;
};
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
@@ -52,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if ((current->mm == page->mm) &&
+ (page->user_virt == (virt & PAGE_MASK)))
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -63,18 +66,21 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
goto out;
}
+ mmgrab(current->mm);
+ page->mm = current->mm;
list_add(&page->list, &context->db_page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
@@ -90,6 +96,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
+ mmdrop(db->u.user_page->mm);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
new file mode 100644
index 000000000000..b0f7663c24c1
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -0,0 +1,3501 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/uverbs_std_types.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/ib_hdrs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_ucaps.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/fs_helpers.h>
+#include <linux/mlx5/eswitch.h>
+#include <net/inet_ecn.h>
+#include "mlx5_ib.h"
+#include "counters.h"
+#include "devx.h"
+#include "fs.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+enum {
+ MATCH_CRITERIA_ENABLE_OUTER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC_BIT,
+ MATCH_CRITERIA_ENABLE_INNER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC2_BIT
+};
+
+
+struct mlx5_per_qp_opfc {
+ struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
+};
+
+#define HEADER_IS_ZERO(match_criteria, headers) \
+ !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
+ 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
+
+static u8 get_match_criteria_enable(u32 *match_criteria)
+{
+ u8 match_criteria_enable;
+
+ match_criteria_enable =
+ (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
+ MATCH_CRITERIA_ENABLE_OUTER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
+ MATCH_CRITERIA_ENABLE_MISC_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
+ MATCH_CRITERIA_ENABLE_INNER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
+ MATCH_CRITERIA_ENABLE_MISC2_BIT;
+
+ return match_criteria_enable;
+}
+
+static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
+{
+ u8 entry_mask;
+ u8 entry_val;
+ int err = 0;
+
+ if (!mask)
+ goto out;
+
+ entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
+ ip_protocol);
+ entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
+ ip_protocol);
+ if (!entry_mask) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
+ goto out;
+ }
+ /* Don't override existing ip protocol */
+ if (mask != entry_mask || val != entry_val)
+ err = -EINVAL;
+out:
+ return err;
+}
+
+static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
+ bool inner)
+{
+ if (inner) {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, inner_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, inner_ipv6_flow_label, val);
+ } else {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, outer_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, outer_ipv6_flow_label, val);
+ }
+}
+
+static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
+{
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
+}
+
+static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
+{
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IPV4_FIELD tos
+#define LAST_IPV6_FIELD traffic_class
+#define LAST_TCP_UDP_FIELD src_port
+#define LAST_TUNNEL_FIELD tunnel_id
+#define LAST_FLOW_TAG_FIELD tag_id
+#define LAST_DROP_FIELD size
+#define LAST_COUNTERS_FIELD counters
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field) \
+ memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \
+ sizeof(filter) - offsetofend(typeof(filter), field))
+
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action)
+{
+
+ switch (maction->ib_action.type) {
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action->modify_hdr =
+ maction->flow_action_raw.modify_hdr;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_DECAP) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
+ if (action->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EINVAL;
+ action->action |=
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ action->pkt_reformat =
+ maction->flow_action_raw.pkt_reformat;
+ return 0;
+ }
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int parse_flow_attr(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ const union ib_flow_spec *ib_spec,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_act *action, u32 prev_type)
+{
+ struct mlx5_flow_context *flow_context = &spec->flow_context;
+ u32 *match_c = spec->match_criteria;
+ u32 *match_v = spec->match_value;
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters);
+ void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters_2);
+ void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters_2);
+ void *headers_c;
+ void *headers_v;
+ int match_ipv;
+ int ret;
+
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ inner_headers);
+ match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_ip_version);
+ } else {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ }
+
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
+ case IB_FLOW_SPEC_ETH:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+ return -EOPNOTSUPP;
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dmac_47_16),
+ ib_spec->eth.mask.dst_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16),
+ ib_spec->eth.val.dst_mac);
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ smac_47_16),
+ ib_spec->eth.mask.src_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ smac_47_16),
+ ib_spec->eth.val.src_mac);
+
+ if (ib_spec->eth.mask.vlan_tag) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ cvlan_tag, 1);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ first_vid, ntohs(ib_spec->eth.val.vlan_tag));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ first_cfi,
+ ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ first_cfi,
+ ntohs(ib_spec->eth.val.vlan_tag) >> 12);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ first_prio,
+ ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ first_prio,
+ ntohs(ib_spec->eth.val.vlan_tag) >> 13);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ethertype, ntohs(ib_spec->eth.mask.ether_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ethertype, ntohs(ib_spec->eth.val.ether_type));
+ break;
+ case IB_FLOW_SPEC_IPV4:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+ return -EOPNOTSUPP;
+
+ if (match_ipv) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ip_version, 0xf);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ip_version, MLX5_FS_IPV4_VERSION);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ethertype, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ethertype, ETH_P_IP);
+ }
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.mask.src_ip,
+ sizeof(ib_spec->ipv4.mask.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.val.src_ip,
+ sizeof(ib_spec->ipv4.val.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.mask.dst_ip,
+ sizeof(ib_spec->ipv4.mask.dst_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.val.dst_ip,
+ sizeof(ib_spec->ipv4.val.dst_ip));
+
+ set_tos(headers_c, headers_v,
+ ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
+
+ if (set_proto(headers_c, headers_v,
+ ib_spec->ipv4.mask.proto,
+ ib_spec->ipv4.val.proto))
+ return -EINVAL;
+ break;
+ case IB_FLOW_SPEC_IPV6:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
+ return -EOPNOTSUPP;
+
+ if (match_ipv) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ip_version, 0xf);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ip_version, MLX5_FS_IPV6_VERSION);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ethertype, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ethertype, ETH_P_IPV6);
+ }
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.src_ip,
+ sizeof(ib_spec->ipv6.mask.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.src_ip,
+ sizeof(ib_spec->ipv6.val.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.dst_ip,
+ sizeof(ib_spec->ipv6.mask.dst_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.dst_ip,
+ sizeof(ib_spec->ipv6.val.dst_ip));
+
+ set_tos(headers_c, headers_v,
+ ib_spec->ipv6.mask.traffic_class,
+ ib_spec->ipv6.val.traffic_class);
+
+ if (set_proto(headers_c, headers_v,
+ ib_spec->ipv6.mask.next_hdr,
+ ib_spec->ipv6.val.next_hdr))
+ return -EINVAL;
+
+ set_flow_label(misc_params_c, misc_params_v,
+ ntohl(ib_spec->ipv6.mask.flow_label),
+ ntohl(ib_spec->ipv6.val.flow_label),
+ ib_spec->type & IB_FLOW_SPEC_INNER);
+ break;
+ case IB_FLOW_SPEC_ESP:
+ return -EOPNOTSUPP;
+ case IB_FLOW_SPEC_TCP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -EOPNOTSUPP;
+
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
+ ntohs(ib_spec->tcp_udp.val.src_port));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
+ ntohs(ib_spec->tcp_udp.mask.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
+ ntohs(ib_spec->tcp_udp.val.dst_port));
+ break;
+ case IB_FLOW_SPEC_UDP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -EOPNOTSUPP;
+
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+ ntohs(ib_spec->tcp_udp.val.src_port));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
+ ntohs(ib_spec->tcp_udp.mask.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ ntohs(ib_spec->tcp_udp.val.dst_port));
+ break;
+ case IB_FLOW_SPEC_GRE:
+ if (ib_spec->gre.mask.c_ks_res0_ver)
+ return -EOPNOTSUPP;
+
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ IPPROTO_GRE);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
+ ntohs(ib_spec->gre.mask.protocol));
+ MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
+ ntohs(ib_spec->gre.val.protocol));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
+ gre_key.nvgre.hi),
+ &ib_spec->gre.mask.key,
+ sizeof(ib_spec->gre.mask.key));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
+ gre_key.nvgre.hi),
+ &ib_spec->gre.val.key,
+ sizeof(ib_spec->gre.val.key));
+ break;
+ case IB_FLOW_SPEC_MPLS:
+ switch (prev_type) {
+ case IB_FLOW_SPEC_UDP:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ case IB_FLOW_SPEC_GRE:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ default:
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ inner_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ inner_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ } else {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ }
+ }
+ break;
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
+ LAST_TUNNEL_FIELD))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
+ ntohl(ib_spec->tunnel.mask.tunnel_id));
+ MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
+ ntohl(ib_spec->tunnel.val.tunnel_id));
+ break;
+ case IB_FLOW_SPEC_ACTION_TAG:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
+ LAST_FLOW_TAG_FIELD))
+ return -EOPNOTSUPP;
+ if (ib_spec->flow_tag.tag_id >= BIT(24))
+ return -EINVAL;
+
+ flow_context->flow_tag = ib_spec->flow_tag.tag_id;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ break;
+ case IB_FLOW_SPEC_ACTION_DROP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
+ LAST_DROP_FIELD))
+ return -EOPNOTSUPP;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ break;
+ case IB_FLOW_SPEC_ACTION_HANDLE:
+ ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
+ flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
+ if (ret)
+ return ret;
+ break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
+ LAST_COUNTERS_FIELD))
+ return -EOPNOTSUPP;
+
+ /* for now support only one counters spec per flow */
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ return -EINVAL;
+
+ action->counters = ib_spec->flow_count.counters;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* If a flow could catch both multicast and unicast packets,
+ * it won't fall into the multicast flow steering table and this rule
+ * could steal other multicast packets.
+ */
+static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
+{
+ union ib_flow_spec *flow_spec;
+
+ if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
+ ib_attr->num_of_specs < 1)
+ return false;
+
+ flow_spec = (union ib_flow_spec *)(ib_attr + 1);
+ if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
+ struct ib_flow_spec_ipv4 *ipv4_spec;
+
+ ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
+ if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
+ return true;
+
+ return false;
+ }
+
+ if (flow_spec->type == IB_FLOW_SPEC_ETH) {
+ struct ib_flow_spec_eth *eth_spec;
+
+ eth_spec = (struct ib_flow_spec_eth *)flow_spec;
+ return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
+ is_multicast_ether_addr(eth_spec->val.dst_mac);
+ }
+
+ return false;
+}
+
+static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
+ const struct ib_flow_attr *flow_attr,
+ bool check_inner)
+{
+ union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+ int match_ipv = check_inner ?
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_ip_version) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
+ bool ipv4_spec_valid, ipv6_spec_valid;
+ unsigned int ip_spec_type = 0;
+ bool has_ethertype = false;
+ unsigned int spec_index;
+ bool mask_valid = true;
+ u16 eth_type = 0;
+ bool type_valid;
+
+ /* Validate that ethertype is correct */
+ for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
+ if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
+ ib_spec->eth.mask.ether_type) {
+ mask_valid = (ib_spec->eth.mask.ether_type ==
+ htons(0xffff));
+ has_ethertype = true;
+ eth_type = ntohs(ib_spec->eth.val.ether_type);
+ } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
+ (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
+ ip_spec_type = ib_spec->type;
+ }
+ ib_spec = (void *)ib_spec + ib_spec->size;
+ }
+
+ type_valid = (!has_ethertype) || (!ip_spec_type);
+ if (!type_valid && mask_valid) {
+ ipv4_spec_valid = (eth_type == ETH_P_IP) &&
+ (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
+ ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
+ (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
+
+ type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
+ (((eth_type == ETH_P_MPLS_UC) ||
+ (eth_type == ETH_P_MPLS_MC)) && match_ipv);
+ }
+
+ return type_valid;
+}
+
+static bool is_valid_attr(struct mlx5_core_dev *mdev,
+ const struct ib_flow_attr *flow_attr)
+{
+ return is_valid_ethertype(mdev, flow_attr, false) &&
+ is_valid_ethertype(mdev, flow_attr, true);
+}
+
+static void put_flow_table(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *prio, bool ft_added)
+{
+ prio->refcount -= !!ft_added;
+ if (!prio->refcount) {
+ mlx5_destroy_flow_table(prio->flow_table);
+ prio->flow_table = NULL;
+ }
+}
+
+static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
+{
+ struct mlx5_ib_flow_handler *handler = container_of(flow_id,
+ struct mlx5_ib_flow_handler,
+ ibflow);
+ struct mlx5_ib_flow_handler *iter, *tmp;
+ struct mlx5_ib_dev *dev = handler->dev;
+
+ mutex_lock(&dev->flow_db->lock);
+
+ list_for_each_entry_safe(iter, tmp, &handler->list, list) {
+ mlx5_del_flow_rules(iter->rule);
+ put_flow_table(dev, iter->prio, true);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+
+ mlx5_del_flow_rules(handler->rule);
+ put_flow_table(dev, handler->prio, true);
+ mlx5_ib_counters_clear_description(handler->ibcounters);
+ mutex_unlock(&dev->flow_db->lock);
+ if (handler->flow_matcher)
+ atomic_dec(&handler->flow_matcher->usecnt);
+ kfree(handler);
+
+ return 0;
+}
+
+static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
+{
+ priority *= 2;
+ if (!dont_trap)
+ priority++;
+ return priority;
+}
+
+enum flow_table_type {
+ MLX5_IB_FT_RX,
+ MLX5_IB_FT_TX
+};
+
+#define MLX5_FS_MAX_TYPES 6
+#define MLX5_FS_MAX_ENTRIES BIT(16)
+
+static bool __maybe_unused mlx5_ib_shared_ft_allowed(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed);
+}
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_namespace *ns,
+ struct mlx5_ib_flow_prio *prio,
+ int priority,
+ int num_entries, int num_groups,
+ u32 flags, u16 vport)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+
+ ft_attr.prio = priority;
+ ft_attr.max_fte = num_entries;
+ ft_attr.flags = flags;
+ ft_attr.vport = vport;
+ ft_attr.autogroup.max_num_groups = num_groups;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft))
+ return ERR_CAST(ft);
+
+ prio->flow_table = ft;
+ prio->refcount = 0;
+ return prio;
+}
+
+static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
+ struct ib_flow_attr *flow_attr,
+ enum flow_table_type ft_type)
+{
+ bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
+ struct mlx5_flow_namespace *ns = NULL;
+ enum mlx5_flow_namespace_type fn_type;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_table *ft;
+ int max_table_size;
+ int num_entries;
+ int num_groups;
+ bool esw_encap;
+ u32 flags = 0;
+ int priority;
+
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ switch (flow_attr->type) {
+ case IB_FLOW_ATTR_NORMAL:
+ if (flow_is_multicast_only(flow_attr) && !dont_trap)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(flow_attr->priority,
+ dont_trap);
+ if (ft_type == MLX5_IB_FT_RX) {
+ fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
+ prio = &dev->flow_db->prios[priority];
+ if (!dev->is_rep && !esw_encap &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (!dev->is_rep && !esw_encap &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else {
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(
+ dev->mdev, log_max_ft_size));
+ fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ prio = &dev->flow_db->egress_prios[priority];
+ if (!dev->is_rep && !esw_encap &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ num_entries = MLX5_FS_MAX_ENTRIES;
+ num_groups = MLX5_FS_MAX_TYPES;
+ break;
+ case IB_FLOW_ATTR_ALL_DEFAULT:
+ case IB_FLOW_ATTR_MC_DEFAULT:
+ ns = mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_LEFTOVERS);
+ build_leftovers_ft_param(&priority, &num_entries, &num_groups);
+ prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
+ break;
+ case IB_FLOW_ATTR_SNIFFER:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ allow_sniffer_and_nic_rx_shared_tir))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ns = mlx5_get_flow_namespace(
+ dev->mdev, ft_type == MLX5_IB_FT_RX ?
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX :
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX);
+
+ prio = &dev->flow_db->sniffer[ft_type];
+ priority = 0;
+ num_entries = 1;
+ num_groups = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!ns)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ max_table_size = min_t(int, num_entries, max_table_size);
+
+ ft = prio->flow_table;
+ if (!ft)
+ return _get_prio(dev, ns, prio, priority, max_table_size,
+ num_groups, flags, 0);
+
+ return prio;
+}
+
+enum {
+ RDMA_RX_ECN_OPCOUNTER_PER_QP_PRIO,
+ RDMA_RX_CNP_OPCOUNTER_PER_QP_PRIO,
+ RDMA_RX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO,
+ RDMA_RX_ECN_OPCOUNTER_PRIO,
+ RDMA_RX_CNP_OPCOUNTER_PRIO,
+ RDMA_RX_PKTS_BYTES_OPCOUNTER_PRIO,
+};
+
+enum {
+ RDMA_TX_CNP_OPCOUNTER_PER_QP_PRIO,
+ RDMA_TX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO,
+ RDMA_TX_CNP_OPCOUNTER_PRIO,
+ RDMA_TX_PKTS_BYTES_OPCOUNTER_PRIO,
+};
+
+static int set_vhca_port_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.source_vhca_port) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ ft_field_support.source_vhca_port))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, &spec->match_criteria,
+ misc_parameters.source_vhca_port);
+ MLX5_SET(fte_match_param, &spec->match_value,
+ misc_parameters.source_vhca_port, port_num);
+
+ return 0;
+}
+
+static int set_ecn_ce_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec, int ipv)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.outer_ip_version))
+ return -EOPNOTSUPP;
+
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_ecn);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_ecn,
+ INET_ECN_CE);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
+ ipv);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+static int set_cnp_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.bth_opcode);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.bth_opcode,
+ IB_BTH_OPCODE_CNP);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+/* Returns the prio we should use for the given optional counter type,
+ * whereas for bytes type we use the packet type, since they share the same
+ * resources.
+ */
+static struct mlx5_ib_flow_prio *get_opfc_prio(struct mlx5_ib_dev *dev,
+ u32 type)
+{
+ u32 prio_type;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ default:
+ prio_type = type;
+ }
+
+ return &dev->flow_db->opfcs[prio_type];
+}
+
+static void put_per_qp_prio(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_ib_optional_counter_type per_qp_type;
+ struct mlx5_ib_flow_prio *prio;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ default:
+ return;
+ }
+
+ prio = get_opfc_prio(dev, per_qp_type);
+ put_flow_table(dev, prio, true);
+}
+
+static int get_per_qp_prio(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_ib_optional_counter_type per_qp_type;
+ enum mlx5_flow_namespace_type fn_type;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_ib_flow_prio *prio;
+ int priority;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_ECN_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_CNP_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_CNP_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ prio = get_opfc_prio(dev, per_qp_type);
+ if (prio->flow_table)
+ return 0;
+
+ prio = _get_prio(dev, ns, prio, priority, MLX5_FS_MAX_POOL_SIZE, 1, 0, 0);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+
+ prio->refcount = 1;
+
+ return 0;
+}
+
+static struct mlx5_per_qp_opfc *get_per_qp_opfc(struct xarray *qpn_opfc_xa,
+ u32 qp_num, bool *new)
+{
+ struct mlx5_per_qp_opfc *per_qp_opfc;
+
+ *new = false;
+
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp_num);
+ if (per_qp_opfc)
+ return per_qp_opfc;
+ per_qp_opfc = kzalloc(sizeof(*per_qp_opfc), GFP_KERNEL);
+
+ if (!per_qp_opfc)
+ return NULL;
+
+ *new = true;
+ return per_qp_opfc;
+}
+
+static int add_op_fc_rules(struct mlx5_ib_dev *dev,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa,
+ struct mlx5_per_qp_opfc *per_qp_opfc,
+ struct mlx5_ib_flow_prio *prio,
+ enum mlx5_ib_optional_counter_type type,
+ u32 qp_num, u32 port_num)
+{
+ struct mlx5_ib_op_fc *opfc = &per_qp_opfc->opfcs[type], *in_use_opfc;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dst;
+ struct mlx5_flow_spec *spec;
+ int i, err, spec_num;
+ bool is_tx;
+
+ if (opfc->fc)
+ return -EEXIST;
+
+ if (mlx5r_is_opfc_shared_and_in_use(per_qp_opfc->opfcs, type,
+ &in_use_opfc)) {
+ opfc->fc = in_use_opfc->fc;
+ opfc->rule[0] = in_use_opfc->rule[0];
+ return 0;
+ }
+
+ opfc->fc = fc_arr[type];
+
+ spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto null_fc;
+ }
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP:
+ if (set_ecn_ce_spec(dev, port_num, &spec[0],
+ MLX5_FS_IPV4_VERSION) ||
+ set_ecn_ce_spec(dev, port_num, &spec[1],
+ MLX5_FS_IPV6_VERSION)) {
+ err = -EOPNOTSUPP;
+ goto free_spec;
+ }
+ spec_num = 2;
+ is_tx = false;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec[1].match_criteria,
+ misc_parameters.bth_dst_qp);
+ MLX5_SET(fte_match_param, spec[1].match_value,
+ misc_parameters.bth_dst_qp, qp_num);
+ spec[1].match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP:
+ if (!MLX5_CAP_FLOWTABLE(
+ dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free_spec;
+ }
+ spec_num = 1;
+ is_tx = false;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP:
+ if (!MLX5_CAP_FLOWTABLE(
+ dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free_spec;
+ }
+ spec_num = 1;
+ is_tx = true;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP:
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ spec_num = 1;
+ is_tx = true;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP:
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ spec_num = 1;
+ is_tx = false;
+ break;
+ default:
+ err = -EINVAL;
+ goto free_spec;
+ }
+
+ if (is_tx) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.source_sqn);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.source_sqn, qp_num);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.bth_dst_qp);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.bth_dst_qp, qp_num);
+ }
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst.counter = opfc->fc;
+
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ for (i = 0; i < spec_num; i++) {
+ opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i],
+ &flow_act, &dst, 1);
+ if (IS_ERR(opfc->rule[i])) {
+ err = PTR_ERR(opfc->rule[i]);
+ goto del_rules;
+ }
+ }
+ prio->refcount += spec_num;
+
+ err = xa_err(xa_store(qpn_opfc_xa, qp_num, per_qp_opfc, GFP_KERNEL));
+ if (err)
+ goto del_rules;
+
+ kfree(spec);
+
+ return 0;
+
+del_rules:
+ while (i--)
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, prio, false);
+free_spec:
+ kfree(spec);
+null_fc:
+ opfc->fc = NULL;
+ return err;
+}
+
+static bool
+is_fc_shared_and_in_use(struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX], u32 type,
+ struct mlx5_fc **fc)
+{
+ u32 shared_fc_type;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ default:
+ return false;
+ }
+
+ *fc = fc_arr[shared_fc_type];
+ if (!(*fc))
+ return false;
+
+ return true;
+}
+
+void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX])
+{
+ struct mlx5_fc *in_use_fc;
+ int i;
+
+ for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
+ if (!fc_arr[i])
+ continue;
+
+ if (is_fc_shared_and_in_use(fc_arr, i, &in_use_fc)) {
+ fc_arr[i] = NULL;
+ continue;
+ }
+
+ mlx5_fc_destroy(dev->mdev, fc_arr[i]);
+ fc_arr[i] = NULL;
+ }
+}
+
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_flow_namespace_type fn_type;
+ int priority, i, err, spec_num;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dst;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_spec *spec;
+
+ spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ if (set_ecn_ce_spec(dev, port_num, &spec[0],
+ MLX5_FS_IPV4_VERSION) ||
+ set_ecn_ce_spec(dev, port_num, &spec[1],
+ MLX5_FS_IPV6_VERSION)) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 2;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_ECN_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_PKTS_BYTES_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_PKTS_BYTES_OPCOUNTER_PRIO;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ prio = get_opfc_prio(dev, type);
+ if (!prio->flow_table) {
+ err = get_per_qp_prio(dev, type);
+ if (err)
+ goto free;
+
+ prio = _get_prio(dev, ns, prio, priority,
+ dev->num_ports * MAX_OPFC_RULES, 1, 0, 0);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto put_prio;
+ }
+ }
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst.counter = opfc->fc;
+
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ for (i = 0; i < spec_num; i++) {
+ opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i],
+ &flow_act, &dst, 1);
+ if (IS_ERR(opfc->rule[i])) {
+ err = PTR_ERR(opfc->rule[i]);
+ goto del_rules;
+ }
+ }
+ prio->refcount += spec_num;
+ kfree(spec);
+
+ return 0;
+
+del_rules:
+ for (i -= 1; i >= 0; i--)
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, prio, false);
+put_prio:
+ put_per_qp_prio(dev, type);
+free:
+ kfree(spec);
+ return err;
+}
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ struct mlx5_ib_flow_prio *prio;
+ int i;
+
+ prio = get_opfc_prio(dev, type);
+
+ for (i = 0; i < MAX_OPFC_RULES && opfc->rule[i]; i++) {
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, prio, true);
+ }
+
+ put_per_qp_prio(dev, type);
+}
+
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_per_qp_opfc *per_qp_opfc;
+ struct mlx5_ib_op_fc *in_use_opfc;
+ struct mlx5_ib_flow_prio *prio;
+ int i, j;
+
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp->qp_num);
+ if (!per_qp_opfc)
+ return;
+
+ for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
+ if (!per_qp_opfc->opfcs[i].fc)
+ continue;
+
+ if (mlx5r_is_opfc_shared_and_in_use(per_qp_opfc->opfcs, i,
+ &in_use_opfc)) {
+ per_qp_opfc->opfcs[i].fc = NULL;
+ continue;
+ }
+
+ for (j = 0; j < MAX_OPFC_RULES; j++) {
+ if (!per_qp_opfc->opfcs[i].rule[j])
+ continue;
+ mlx5_del_flow_rules(per_qp_opfc->opfcs[i].rule[j]);
+ prio = get_opfc_prio(dev, i);
+ put_flow_table(dev, prio, true);
+ }
+ per_qp_opfc->opfcs[i].fc = NULL;
+ }
+
+ kfree(per_qp_opfc);
+ xa_erase(qpn_opfc_xa, qp->qp_num);
+}
+
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa, u32 port)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_per_qp_opfc *per_qp_opfc;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_ib_counters *cnts;
+ struct mlx5_ib_op_fc *opfc;
+ struct mlx5_fc *in_use_fc;
+ int i, err, per_qp_type;
+ bool new;
+
+ cnts = &dev->port[port - 1].cnts;
+
+ for (i = 0; i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES; i++) {
+ opfc = &cnts->opfcs[i];
+ if (!opfc->fc)
+ continue;
+
+ per_qp_type = i + MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ prio = get_opfc_prio(dev, per_qp_type);
+ WARN_ON(!prio->flow_table);
+
+ if (is_fc_shared_and_in_use(fc_arr, per_qp_type, &in_use_fc))
+ fc_arr[per_qp_type] = in_use_fc;
+
+ if (!fc_arr[per_qp_type]) {
+ fc_arr[per_qp_type] = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(fc_arr[per_qp_type]))
+ return PTR_ERR(fc_arr[per_qp_type]);
+ }
+
+ per_qp_opfc = get_per_qp_opfc(qpn_opfc_xa, qp->qp_num, &new);
+ if (!per_qp_opfc) {
+ err = -ENOMEM;
+ goto free_fc;
+ }
+ err = add_op_fc_rules(dev, fc_arr, qpn_opfc_xa, per_qp_opfc,
+ prio, per_qp_type, qp->qp_num, port);
+ if (err)
+ goto del_rules;
+ }
+
+ return 0;
+
+del_rules:
+ mlx5r_fs_unbind_op_fc(qp, qpn_opfc_xa);
+ if (new)
+ kfree(per_qp_opfc);
+free_fc:
+ if (xa_empty(qpn_opfc_xa))
+ mlx5r_fs_destroy_fcs(dev, fc_arr);
+ return err;
+}
+
+static void set_underlay_qp(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ u32 underlay_qpn)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ if (underlay_qpn &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ ft_field_support.bth_dst_qp)) {
+ MLX5_SET(fte_match_set_misc,
+ misc_params_v, bth_dst_qp, underlay_qpn);
+ MLX5_SET(fte_match_set_misc,
+ misc_params_c, bth_dst_qp, 0xffffff);
+ }
+}
+
+static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ void *misc;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(rep->esw,
+ rep->vport));
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ }
+}
+
+static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst,
+ u32 underlay_qpn,
+ struct mlx5_ib_create_flow *ucmd)
+{
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_destination dest_arr[2] = {};
+ struct mlx5_flow_destination *rule_dst = dest_arr;
+ const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
+ unsigned int spec_index;
+ u32 prev_type = 0;
+ int err = 0;
+ int dest_num = 0;
+ bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
+
+ if (!is_valid_attr(dev->mdev, flow_attr))
+ return ERR_PTR(-EINVAL);
+
+ if (dev->is_rep && is_egress)
+ return ERR_PTR(-EINVAL);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !spec) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
+ err = parse_flow_attr(dev->mdev, spec,
+ ib_flow, flow_attr, &flow_act,
+ prev_type);
+ if (err < 0)
+ goto free;
+
+ prev_type = ((union ib_flow_spec *)ib_flow)->type;
+ ib_flow += ((union ib_flow_spec *)ib_flow)->size;
+ }
+
+ if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
+ memcpy(&dest_arr[0], dst, sizeof(*dst));
+ dest_num++;
+ }
+
+ if (!flow_is_multicast_only(flow_attr))
+ set_underlay_qp(dev, spec, underlay_qpn);
+
+ if (dev->is_rep && flow_attr->type != IB_FLOW_ATTR_SNIFFER) {
+ struct mlx5_eswitch_rep *rep;
+
+ rep = dev->port[flow_attr->port - 1].rep;
+ if (!rep) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ mlx5_ib_set_rule_source_port(dev, spec, rep);
+ }
+
+ spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ struct mlx5_ib_mcounters *mcounters;
+
+ err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd);
+ if (err)
+ goto free;
+
+ mcounters = to_mcounters(flow_act.counters);
+ handler->ibcounters = flow_act.counters;
+ dest_arr[dest_num].type =
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest_arr[dest_num].counter =
+ mcounters->hw_cntrs_hndl;
+ dest_num++;
+ }
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ if (!dest_num)
+ rule_dst = NULL;
+ } else {
+ if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)
+ flow_act.action |=
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ if (is_egress)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ else if (dest_num)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
+ (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+ mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
+ spec->flow_context.flow_tag, flow_attr->type);
+ err = -EINVAL;
+ goto free;
+ }
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act,
+ rule_dst, dest_num);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
+ handler->dev = dev;
+
+ ft_prio->flow_table = ft;
+free:
+ if (err && handler) {
+ mlx5_ib_counters_clear_description(handler->ibcounters);
+ kfree(handler);
+ }
+ kvfree(spec);
+ return err ? ERR_PTR(err) : handler;
+}
+
+static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
+}
+
+static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_ucast = NULL;
+ struct mlx5_ib_flow_handler *handler = NULL;
+
+ static struct {
+ struct ib_flow_spec_eth eth_flow;
+ struct ib_flow_attr flow_attr;
+ } leftovers_wc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_wc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = { 0x1 } } } };
+
+ static struct {
+ struct ib_flow_spec_eth eth_flow;
+ struct ib_flow_attr flow_attr;
+ } leftovers_uc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_uc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = {} } } };
+
+ handler = create_flow_rule(dev, ft_prio, &leftovers_wc.flow_attr, dst);
+ if (!IS_ERR(handler) &&
+ flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
+ handler_ucast = create_flow_rule(dev, ft_prio,
+ &leftovers_uc.flow_attr, dst);
+ if (IS_ERR(handler_ucast)) {
+ mlx5_del_flow_rules(handler->rule);
+ ft_prio->refcount--;
+ kfree(handler);
+ handler = handler_ucast;
+ } else {
+ list_add(&handler_ucast->list, &handler->list);
+ }
+ }
+
+ return handler;
+}
+
+static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_rx,
+ struct mlx5_ib_flow_prio *ft_tx,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_rx;
+ struct mlx5_ib_flow_handler *handler_tx;
+ int err;
+ static const struct ib_flow_attr flow_attr = {
+ .num_of_specs = 0,
+ .type = IB_FLOW_ATTR_SNIFFER,
+ .size = sizeof(flow_attr)
+ };
+
+ handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
+ if (IS_ERR(handler_rx)) {
+ err = PTR_ERR(handler_rx);
+ goto err;
+ }
+
+ handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
+ if (IS_ERR(handler_tx)) {
+ err = PTR_ERR(handler_tx);
+ goto err_tx;
+ }
+
+ list_add(&handler_tx->list, &handler_rx->list);
+
+ return handler_rx;
+
+err_tx:
+ mlx5_del_flow_rules(handler_rx->rule);
+ ft_rx->refcount--;
+ kfree(handler_rx);
+err:
+ return ERR_PTR(err);
+}
+
+static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_flow_handler *handler = NULL;
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
+ struct mlx5_ib_flow_prio *ft_prio;
+ bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
+ struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
+ size_t min_ucmd_sz, required_ucmd_sz;
+ int err;
+ int underlay_qpn;
+
+ if (udata && udata->inlen) {
+ min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved);
+ if (udata->inlen < min_ucmd_sz)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
+ if (err)
+ return ERR_PTR(err);
+
+ /* currently supports only one counters data */
+ if (ucmd_hdr.ncounters_data > 1)
+ return ERR_PTR(-EINVAL);
+
+ required_ucmd_sz = min_ucmd_sz +
+ sizeof(struct mlx5_ib_flow_counters_data) *
+ ucmd_hdr.ncounters_data;
+ if (udata->inlen > required_ucmd_sz &&
+ !ib_is_udata_cleared(udata, required_ucmd_sz,
+ udata->inlen - required_ucmd_sz))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
+ if (!ucmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
+ if (err)
+ goto free_ucmd;
+ }
+
+ if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
+
+ if (flow_attr->flags &
+ ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
+
+ if (is_egress &&
+ (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
+
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = get_flow_table(dev, flow_attr,
+ is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+ if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
+ if (IS_ERR(ft_prio_tx)) {
+ err = PTR_ERR(ft_prio_tx);
+ ft_prio_tx = NULL;
+ goto destroy_ft;
+ }
+ }
+
+ if (is_egress) {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ if (mqp->is_rss)
+ dst->tir_num = mqp->rss_qp.tirn;
+ else
+ dst->tir_num = mqp->raw_packet_qp.rq.tirn;
+ }
+
+ switch (flow_attr->type) {
+ case IB_FLOW_ATTR_NORMAL:
+ underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ mqp->underlay_qpn :
+ 0;
+ handler = _create_flow_rule(dev, ft_prio, flow_attr, dst,
+ underlay_qpn, ucmd);
+ break;
+ case IB_FLOW_ATTR_ALL_DEFAULT:
+ case IB_FLOW_ATTR_MC_DEFAULT:
+ handler = create_leftovers_rule(dev, ft_prio, flow_attr, dst);
+ break;
+ case IB_FLOW_ATTR_SNIFFER:
+ handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
+ break;
+ default:
+ err = -EINVAL;
+ goto destroy_ft;
+ }
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ handler = NULL;
+ goto destroy_ft;
+ }
+
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+ kfree(ucmd);
+
+ return &handler->ibflow;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+ if (ft_prio_tx)
+ put_flow_table(dev, ft_prio_tx, false);
+unlock:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+free_ucmd:
+ kfree(ucmd);
+ return ERR_PTR(err);
+}
+
+static int mlx5_ib_fill_transport_ns_info(struct mlx5_ib_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ u32 *flags, u16 *vport_idx,
+ u16 *vport,
+ struct mlx5_core_dev **ft_mdev,
+ u32 ib_port)
+{
+ struct mlx5_core_dev *esw_mdev;
+
+ if (!is_mdev_switchdev_mode(dev->mdev))
+ return 0;
+
+ if (!MLX5_CAP_ADV_RDMA(dev->mdev, rdma_transport_manager))
+ return -EOPNOTSUPP;
+
+ if (!dev->port[ib_port - 1].rep)
+ return -EINVAL;
+
+ esw_mdev = mlx5_eswitch_get_core_dev(dev->port[ib_port - 1].rep->esw);
+ if (esw_mdev != dev->mdev)
+ return -EOPNOTSUPP;
+
+ *flags |= MLX5_FLOW_TABLE_OTHER_VPORT;
+ *ft_mdev = esw_mdev;
+ *vport = dev->port[ib_port - 1].rep->vport;
+ *vport_idx = dev->port[ib_port - 1].rep->vport_index;
+
+ return 0;
+}
+
+static struct mlx5_ib_flow_prio *
+_get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
+ enum mlx5_flow_namespace_type ns_type,
+ bool mcast, u32 ib_port)
+{
+ struct mlx5_core_dev *ft_mdev = dev->mdev;
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_ib_flow_prio *prio = NULL;
+ int max_table_size = 0;
+ u16 vport_idx = 0;
+ bool esw_encap;
+ u32 flags = 0;
+ u16 vport = 0;
+ int priority;
+ int ret;
+
+ if (mcast)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(user_priority, false);
+
+ esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ switch (ns_type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2) &&
+ !esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) &&
+ !esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ break;
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ max_table_size = BIT(
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev,
+ reformat_l3_tunnel_to_l2) &&
+ esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ priority = user_priority;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size));
+ priority = user_priority;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
+ priority = user_priority;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (ib_port == 0 ||
+ user_priority >= MLX5_RDMA_TRANSPORT_BYPASS_PRIO)
+ return ERR_PTR(-EINVAL);
+ ret = mlx5_ib_fill_transport_ns_info(dev, ns_type, &flags,
+ &vport_idx, &vport,
+ &ft_mdev, ib_port);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(
+ ft_mdev, log_max_ft_size));
+ else
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(
+ ft_mdev, log_max_ft_size));
+ priority = user_priority;
+ break;
+ default:
+ break;
+ }
+
+ max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX)
+ ns = mlx5_get_flow_vport_namespace(ft_mdev, ns_type, vport_idx);
+ else
+ ns = mlx5_get_flow_namespace(ft_mdev, ns_type);
+
+ if (!ns)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ switch (ns_type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ prio = &dev->flow_db->prios[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ prio = &dev->flow_db->egress_prios[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ prio = &dev->flow_db->fdb[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ prio = &dev->flow_db->rdma_rx[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ prio = &dev->flow_db->rdma_tx[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ prio = &dev->flow_db->rdma_transport_rx[priority][ib_port - 1];
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ prio = &dev->flow_db->rdma_transport_tx[priority][ib_port - 1];
+ break;
+ default: return ERR_PTR(-EINVAL);
+ }
+
+ if (!prio)
+ return ERR_PTR(-EINVAL);
+
+ if (prio->flow_table)
+ return prio;
+
+ return _get_prio(dev, ns, prio, priority, max_table_size,
+ MLX5_FS_MAX_TYPES, flags, vport);
+}
+
+static struct mlx5_ib_flow_handler *
+_create_raw_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct mlx5_flow_destination *dst,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
+ struct mlx5_flow_act *flow_act,
+ void *cmd_in, int inlen,
+ int dst_num)
+{
+ struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !spec) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ memcpy(spec->match_value, cmd_in, inlen);
+ memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
+ fs_matcher->mask_len);
+ spec->match_criteria_enable = fs_matcher->match_criteria_enable;
+ spec->flow_context = *flow_context;
+
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ flow_act, dst, dst_num);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
+ handler->dev = dev;
+ ft_prio->flow_table = ft;
+
+free:
+ if (err)
+ kfree(handler);
+ kvfree(spec);
+ return err ? ERR_PTR(err) : handler;
+}
+
+static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
+ void *match_v)
+{
+ void *match_c;
+ void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
+ void *dmac, *dmac_mask;
+ void *ipv4, *ipv4_mask;
+
+ if (!(fs_matcher->match_criteria_enable &
+ (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
+ return false;
+
+ match_c = fs_matcher->matcher_mask.match_params;
+ match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+
+ dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dmac_47_16);
+ dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dmac_47_16);
+
+ if (is_multicast_ether_addr(dmac) &&
+ is_multicast_ether_addr(dmac_mask))
+ return true;
+
+ ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
+ ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
+ return true;
+
+ return false;
+}
+
+static struct mlx5_ib_flow_handler *raw_fs_rule_add(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
+ struct mlx5_fc *counter, void *cmd_in, int inlen, int dest_id, int dest_type)
+{
+ struct mlx5_flow_destination *dst;
+ struct mlx5_ib_flow_prio *ft_prio;
+ struct mlx5_ib_flow_handler *handler;
+ int dst_num = 0;
+ bool mcast;
+ int err;
+
+ if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
+ return ERR_PTR(-ENOMEM);
+
+ dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+
+ mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = _get_flow_table(dev, fs_matcher->priority,
+ fs_matcher->ns_type, mcast,
+ fs_matcher->ib_port);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+
+ switch (dest_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ dst[dst_num].type = dest_type;
+ dst[dst_num++].tir_num = dest_id;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
+ dst[dst_num++].ft_num = dest_id;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ break;
+ default:
+ break;
+ }
+
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (WARN_ON(!counter)) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst[dst_num].counter = counter;
+ dst_num++;
+ }
+
+ handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
+ fs_matcher, flow_context, flow_act,
+ cmd_in, inlen, dst_num);
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ goto destroy_ft;
+ }
+
+ mutex_unlock(&dev->flow_db->lock);
+ atomic_inc(&fs_matcher->usecnt);
+ handler->flow_matcher = fs_matcher;
+
+ kfree(dst);
+
+ return handler;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+unlock:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+
+ return ERR_PTR(err);
+}
+
+static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ switch (maction->flow_action_raw.sub_type) {
+ case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
+ mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.modify_hdr);
+ break;
+ case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
+ mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.pkt_reformat);
+ break;
+ case MLX5_IB_FLOW_ACTION_DECAP:
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
+{
+ struct mlx5_ib_flow_action *maction = to_mflow_act(action);
+
+ switch (action->type) {
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ destroy_flow_action_raw(maction);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+
+ kfree(maction);
+ return 0;
+}
+
+static int
+mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
+ enum mlx5_flow_namespace_type *namespace)
+{
+ switch (table_type) {
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
+ *namespace = MLX5_FLOW_NAMESPACE_FDB_BYPASS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TRANSPORT_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TRANSPORT_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
+ [MLX5_IB_FLOW_TYPE_NORMAL] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ .u.ptr = {
+ .len = sizeof(u16), /* data is priority */
+ .min_len = sizeof(u16),
+ }
+ },
+ [MLX5_IB_FLOW_TYPE_SNIFFER] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+};
+
+static bool is_flow_dest(void *obj, int *dest_id, int *dest_type)
+{
+ struct devx_obj *devx_obj = obj;
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_DESTROY_TIR:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
+ obj_id);
+ return true;
+
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
+ table_id);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int get_dests(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
+ int *dest_type, struct ib_qp **qp, u32 *flags)
+{
+ bool dest_devx, dest_qp;
+ void *devx_obj;
+ int err;
+
+ dest_devx = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_qp = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+
+ *flags = 0;
+ err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
+ if (err)
+ return err;
+
+ /* Both flags are not allowed */
+ if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
+ *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
+ return -EINVAL;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ if (dest_devx && (dest_qp || *flags))
+ return -EINVAL;
+ else if (dest_qp && *flags)
+ return -EINVAL;
+ }
+
+ /* Allow only DEVX object, drop as dest for FDB */
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
+ !(dest_devx || (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
+ return -EINVAL;
+
+ /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX) &&
+ ((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
+ return -EINVAL;
+
+ *qp = NULL;
+ if (dest_devx) {
+ devx_obj =
+ uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+
+ /* Verify that the given DEVX object is a flow
+ * steering destination.
+ */
+ if (!is_flow_dest(devx_obj, dest_id, dest_type))
+ return -EINVAL;
+ /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX) &&
+ *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ return -EINVAL;
+ } else if (dest_qp) {
+ struct mlx5_ib_qp *mqp;
+
+ *qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(*qp))
+ return PTR_ERR(*qp);
+
+ if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ mqp = to_mqp(*qp);
+ if (mqp->is_rss)
+ *dest_id = mqp->rss_qp.tirn;
+ else
+ *dest_id = mqp->raw_packet_qp.rq.tirn;
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX) &&
+ !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ }
+
+ if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool
+is_flow_counter(void *obj, u32 offset, u32 *counter_id, u32 *fc_bulk_size)
+{
+ struct devx_obj *devx_obj = obj;
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
+
+ if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
+
+ if (offset && offset >= devx_obj->flow_counter_bulk_size)
+ return false;
+
+ *fc_bulk_size = devx_obj->flow_counter_bulk_size;
+ *counter_id = MLX5_GET(dealloc_flow_counter_in,
+ devx_obj->dinbox,
+ flow_counter_id);
+ *counter_id += offset;
+ return true;
+ }
+
+ return false;
+}
+
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_flow_context flow_context = {.flow_tag =
+ MLX5_FS_DEFAULT_FLOW_TAG};
+ int dest_id, dest_type = -1, inlen, len, ret, i;
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_fc *counter = NULL;
+ struct ib_qp *qp = NULL;
+ void *devx_obj, *cmd_in;
+ struct ib_uobject *uobj;
+ struct mlx5_ib_dev *dev;
+ u32 flags;
+
+ if (!rdma_uattrs_has_raw_cap(attrs))
+ return -EPERM;
+
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+
+ if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
+ return -EINVAL;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ len = uverbs_attr_get_uobjs_arr(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
+ if (len) {
+ u32 *offset_attr, fc_bulk_size, offset = 0, counter_id = 0;
+ devx_obj = arr_flow_actions[0]->object;
+
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
+
+ int num_offsets = uverbs_attr_ptr_get_array_size(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ sizeof(u32));
+
+ if (num_offsets != 1)
+ return -EINVAL;
+
+ offset_attr = uverbs_attr_get_alloced_ptr(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
+ offset = *offset_attr;
+ }
+
+ if (!is_flow_counter(devx_obj, offset, &counter_id, &fc_bulk_size))
+ return -EINVAL;
+ counter = mlx5_fc_local_create(counter_id, offset, fc_bulk_size);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ }
+
+ cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+
+ uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
+ if (!uflow_res) {
+ ret = -ENOMEM;
+ goto destroy_counter;
+ }
+
+ len = uverbs_attr_get_uobjs_arr(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
+ for (i = 0; i < len; i++) {
+ struct mlx5_ib_flow_action *maction =
+ to_mflow_act(arr_flow_actions[i]->object);
+
+ ret = parse_flow_flow_action(maction, false, &flow_act);
+ if (ret)
+ goto err_out;
+ flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
+ arr_flow_actions[i]->object);
+ }
+
+ ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG);
+ if (!ret) {
+ if (flow_context.flow_tag >= BIT(24)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+ flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
+ }
+
+ flow_handler =
+ raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
+ counter, cmd_in, inlen, dest_id, dest_type);
+ if (IS_ERR(flow_handler)) {
+ ret = PTR_ERR(flow_handler);
+ goto err_out;
+ }
+
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
+
+ return 0;
+err_out:
+ ib_uverbs_flow_resources_free(uflow_res);
+destroy_counter:
+ if (counter)
+ mlx5_fc_local_destroy(counter);
+ return ret;
+}
+
+static int flow_matcher_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_flow_matcher *obj = uobject->object;
+
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
+
+ kfree(obj);
+ return 0;
+}
+
+static int steering_anchor_create_ft(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+
+ if (ft_prio->anchor.ft)
+ return 0;
+
+ ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
+ ft_attr.prio = 0;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ ft_prio->anchor.ft = ft;
+
+ return 0;
+}
+
+static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.ft) {
+ mlx5_destroy_flow_table(ft_prio->anchor.ft);
+ ft_prio->anchor.ft = NULL;
+ }
+}
+
+static int
+steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ void *flow_group_in;
+ int err = 0;
+
+ if (ft_prio->anchor.fg_drop)
+ return 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto out;
+ }
+
+ ft_prio->anchor.fg_drop = fg;
+
+out:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
+static void
+steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.fg_drop) {
+ mlx5_destroy_flow_group(ft_prio->anchor.fg_drop);
+ ft_prio->anchor.fg_drop = NULL;
+ }
+}
+
+static int
+steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ void *flow_group_in;
+ int err = 0;
+
+ if (ft_prio->anchor.fg_goto_table)
+ return 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto out;
+ }
+ ft_prio->anchor.fg_goto_table = fg;
+
+out:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
+static void
+steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.fg_goto_table) {
+ mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table);
+ ft_prio->anchor.fg_goto_table = NULL;
+ }
+}
+
+static int
+steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *handle;
+
+ if (ft_prio->anchor.rule_drop)
+ return 0;
+
+ flow_act.fg = ft_prio->anchor.fg_drop;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
+ NULL, 0);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ft_prio->anchor.rule_drop = handle;
+
+ return 0;
+}
+
+static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.rule_drop) {
+ mlx5_del_flow_rules(ft_prio->anchor.rule_drop);
+ ft_prio->anchor.rule_drop = NULL;
+ }
+}
+
+static int
+steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *handle;
+
+ if (ft_prio->anchor.rule_goto_table)
+ return 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act.fg = ft_prio->anchor.fg_goto_table;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = ft_prio->flow_table;
+
+ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ft_prio->anchor.rule_goto_table = handle;
+
+ return 0;
+}
+
+static void
+steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.rule_goto_table) {
+ mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table);
+ ft_prio->anchor.rule_goto_table = NULL;
+ }
+}
+
+static int steering_anchor_create_res(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ int err;
+
+ err = steering_anchor_create_ft(dev, ft_prio, ns_type);
+ if (err)
+ return err;
+
+ err = steering_anchor_create_fg_drop(ft_prio);
+ if (err)
+ goto destroy_ft;
+
+ err = steering_anchor_create_fg_goto_table(ft_prio);
+ if (err)
+ goto destroy_fg_drop;
+
+ err = steering_anchor_create_rule_drop(ft_prio);
+ if (err)
+ goto destroy_fg_goto_table;
+
+ err = steering_anchor_create_rule_goto_table(ft_prio);
+ if (err)
+ goto destroy_rule_drop;
+
+ return 0;
+
+destroy_rule_drop:
+ steering_anchor_destroy_rule_drop(ft_prio);
+destroy_fg_goto_table:
+ steering_anchor_destroy_fg_goto_table(ft_prio);
+destroy_fg_drop:
+ steering_anchor_destroy_fg_drop(ft_prio);
+destroy_ft:
+ steering_anchor_destroy_ft(ft_prio);
+
+ return err;
+}
+
+static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio)
+{
+ steering_anchor_destroy_rule_goto_table(ft_prio);
+ steering_anchor_destroy_rule_drop(ft_prio);
+ steering_anchor_destroy_fg_goto_table(ft_prio);
+ steering_anchor_destroy_fg_drop(ft_prio);
+ steering_anchor_destroy_ft(ft_prio);
+}
+
+static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_steering_anchor *obj = uobject->object;
+
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
+
+ mutex_lock(&obj->dev->flow_db->lock);
+ if (!--obj->ft_prio->anchor.rule_goto_table_ref)
+ steering_anchor_destroy_rule_goto_table(obj->ft_prio);
+
+ put_flow_table(obj->dev, obj->ft_prio, true);
+ mutex_unlock(&obj->dev->flow_db->lock);
+
+ kfree(obj);
+ return 0;
+}
+
+static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio,
+ int count)
+{
+ while (count--)
+ mlx5_steering_anchor_destroy_res(&prio[count]);
+}
+
+void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev)
+{
+ fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS);
+ fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS);
+ fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS);
+ fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT);
+}
+
+static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *obj)
+{
+ enum mlx5_ib_uapi_flow_table_type ft_type =
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX;
+ u32 flags;
+ int err;
+
+ /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
+ * users should switch to it. We leave this to not break userspace
+ */
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) &&
+ uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS))
+ return -EINVAL;
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) {
+ err = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) {
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ IB_FLOW_ATTR_FLAGS_EGRESS);
+ if (err)
+ return err;
+
+ if (flags)
+ return mlx5_ib_ft_type_to_namespace(
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
+ &obj->ns_type);
+ }
+
+ obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
+
+ return 0;
+}
+
+static bool verify_context_caps(struct mlx5_ib_dev *dev, u64 enabled_caps)
+{
+ if (is_mdev_switchdev_mode(dev->mdev))
+ return UCAP_ENABLED(enabled_caps,
+ RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+
+ return UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_LOCAL);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ struct mlx5_ib_flow_matcher *obj;
+ int err;
+
+ obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->mask_len = uverbs_attr_get_len(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ err = uverbs_copy_from(&obj->matcher_mask,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ if (err)
+ goto end;
+
+ obj->flow_type = uverbs_attr_get_enum_id(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+
+ if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
+ err = uverbs_copy_from(&obj->priority,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+ if (err)
+ goto end;
+ }
+
+ err = uverbs_copy_from(&obj->match_criteria_enable,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
+ if (err)
+ goto end;
+
+ err = mlx5_ib_matcher_ns(attrs, obj);
+ if (err)
+ goto end;
+
+ if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
+ mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_IB_PORT)) {
+ err = uverbs_copy_from(&obj->ib_port, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_IB_PORT);
+ if (err)
+ goto end;
+ if (!rdma_is_port_valid(&dev->ib_dev, obj->ib_port)) {
+ err = -EINVAL;
+ goto end;
+ }
+ if (obj->ns_type != MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX &&
+ obj->ns_type != MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX) {
+ err = -EINVAL;
+ goto end;
+ }
+ if (!verify_context_caps(dev, uobj->context->enabled_caps)) {
+ err = -EOPNOTSUPP;
+ goto end;
+ }
+ }
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ atomic_set(&obj->usecnt, 0);
+ return 0;
+
+end:
+ kfree(obj);
+ return err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_table_type ib_uapi_ft_type;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_ib_steering_anchor *obj;
+ struct mlx5_ib_flow_prio *ft_prio;
+ u16 priority;
+ u32 ft_id;
+ int err;
+
+ if (!rdma_dev_has_raw_cap(&dev->ib_dev))
+ return -EPERM;
+
+ err = uverbs_get_const(&ib_uapi_ft_type, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ib_uapi_ft_type, &ns_type);
+ if (err)
+ return err;
+
+ err = uverbs_copy_from(&priority, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY);
+ if (err)
+ return err;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = _get_flow_table(dev, priority, ns_type, 0, 0);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto free_obj;
+ }
+
+ ft_prio->refcount++;
+
+ if (!ft_prio->anchor.rule_goto_table_ref) {
+ err = steering_anchor_create_res(dev, ft_prio, ns_type);
+ if (err)
+ goto put_flow_table;
+ }
+
+ ft_prio->anchor.rule_goto_table_ref++;
+
+ ft_id = mlx5_flow_table_id(ft_prio->anchor.ft);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ &ft_id, sizeof(ft_id));
+ if (err)
+ goto destroy_res;
+
+ mutex_unlock(&dev->flow_db->lock);
+
+ uobj->object = obj;
+ obj->dev = dev;
+ obj->ft_prio = ft_prio;
+ atomic_set(&obj->usecnt, 0);
+
+ return 0;
+
+destroy_res:
+ --ft_prio->anchor.rule_goto_table_ref;
+ mlx5_steering_anchor_destroy_res(ft_prio);
+put_flow_table:
+ put_flow_table(dev, ft_prio, true);
+free_obj:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(obj);
+
+ return err;
+}
+
+static struct ib_flow_action *
+mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_uapi_flow_table_type ft_type,
+ u8 num_actions, void *in)
+{
+ enum mlx5_flow_namespace_type namespace;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return ERR_PTR(-ENOMEM);
+
+ maction->flow_action_raw.modify_hdr =
+ mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
+
+ if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
+ ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
+ kfree(maction);
+ return ERR_PTR(ret);
+ }
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
+ maction->flow_action_raw.dev = dev;
+
+ return &maction->ib_action;
+}
+
+static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ max_modify_header_actions);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct ib_flow_action *action;
+ int num_actions;
+ void *in;
+ int ret;
+
+ if (!mlx5_ib_modify_header_supported(mdev))
+ return -EOPNOTSUPP;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+
+ num_actions = uverbs_attr_ptr_get_array_size(
+ attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
+ if (num_actions < 0)
+ return num_actions;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
+ if (ret)
+ return ret;
+ action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
+ if (IS_ERR(action))
+ return PTR_ERR(action);
+
+ uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
+ IB_FLOW_ACTION_UNSPECIFIED);
+
+ return 0;
+}
+
+static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
+ u8 packet_reformat_type,
+ u8 ft_type)
+{
+ switch (packet_reformat_type) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE(ibdev->mdev,
+ encap_general_header);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
+ reformat_l2_to_l3_tunnel);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
+ reformat_l3_tunnel_to_l2);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
+{
+ switch (dv_prt) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_flow_action_create_packet_reformat_ctx(
+ struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_action *maction,
+ u8 ft_type, u8 dv_prt,
+ void *in, size_t len)
+{
+ struct mlx5_pkt_reformat_params reformat_params;
+ enum mlx5_flow_namespace_type namespace;
+ u8 prm_prt;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
+ if (ret)
+ return ret;
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = prm_prt;
+ reformat_params.size = len;
+ reformat_params.data = in;
+ maction->flow_action_raw.pkt_reformat =
+ mlx5_packet_reformat_alloc(dev->mdev, &reformat_params,
+ namespace);
+ if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
+ ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
+ return ret;
+ }
+
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
+ maction->flow_action_raw.dev = dev;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&dv_prt, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
+ if (ret)
+ return ret;
+
+ if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
+ return -EOPNOTSUPP;
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return -ENOMEM;
+
+ if (dv_prt ==
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_DECAP;
+ maction->flow_action_raw.dev = mdev;
+ } else {
+ void *in;
+ int len;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto free_maction;
+ }
+
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+
+ ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
+ maction, ft_type, dv_prt, in, len);
+ if (ret)
+ goto free_maction;
+ }
+
+ uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
+ IB_FLOW_ACTION_UNSPECIFIED);
+ return 0;
+
+free_maction:
+ kfree(maction);
+ return ret;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_CREATE_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_READ, 1,
+ MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ, 1, 1,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ enum mlx5_ib_create_flow_flags,
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DESTROY_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_fs,
+ UVERBS_OBJECT_FLOW,
+ &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
+ set_add_copy_action_in_auto)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+ UVERBS_ATTR_MIN_SIZE(1),
+ UA_ALLOC_AND_COPY,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(
+ mlx5_ib_flow_actions,
+ UVERBS_OBJECT_FLOW_ACTION,
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY),
+ UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
+ mlx5_ib_flow_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ enum ib_flow_flags,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_IB_PORT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_TYPE_ALLOC_IDR(steering_anchor_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
+
+const struct uapi_definition mlx5_ib_flow_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_FLOW_MATCHER),
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_FLOW,
+ &mlx5_ib_fs),
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
+ &mlx5_ib_flow_actions),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
+ {},
+};
+
+static const struct ib_device_ops flow_ops = {
+ .create_flow = mlx5_ib_create_flow,
+ .destroy_flow = mlx5_ib_destroy_flow,
+ .destroy_flow_action = mlx5_ib_destroy_flow_action,
+};
+
+int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
+{
+ int i, j;
+
+ dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
+
+ if (!dev->flow_db)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ dev->flow_db->rdma_transport_rx[i] =
+ kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio), GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_rx[i])
+ goto free_rdma_transport_rx;
+ }
+
+ for (j = 0; j < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; j++) {
+ dev->flow_db->rdma_transport_tx[j] =
+ kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio), GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_tx[j])
+ goto free_rdma_transport_tx;
+ }
+
+ mutex_init(&dev->flow_db->lock);
+
+ ib_set_device_ops(&dev->ib_dev, &flow_ops);
+ return 0;
+
+free_rdma_transport_tx:
+ while (j--)
+ kfree(dev->flow_db->rdma_transport_tx[j]);
+free_rdma_transport_rx:
+ while (i--)
+ kfree(dev->flow_db->rdma_transport_rx[i]);
+ kfree(dev->flow_db);
+ return -ENOMEM;
+}
diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
new file mode 100644
index 000000000000..7abba0e2837c
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/fs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_FS_H
+#define _MLX5_IB_FS_H
+
+#include "mlx5_ib.h"
+
+int mlx5_ib_fs_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
+
+static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
+{
+ int i;
+
+ /* When a steering anchor is created, a special flow table is also
+ * created for the user to reference. Since the user can reference it,
+ * the kernel cannot trust that when the user destroys the steering
+ * anchor, they no longer reference the flow table.
+ *
+ * To address this issue, when a user destroys a steering anchor, only
+ * the flow steering rule in the table is destroyed, but the table
+ * itself is kept to deal with the above scenario. The remaining
+ * resources are only removed when the RDMA device is destroyed, which
+ * is a safe assumption that all references are gone.
+ */
+ mlx5_ib_fs_cleanup_anchor(dev);
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++)
+ kfree(dev->flow_db->rdma_transport_tx[i]);
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++)
+ kfree(dev->flow_db->rdma_transport_rx[i]);
+ kfree(dev->flow_db);
+}
+#endif /* _MLX5_IB_FS_H */
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
new file mode 100644
index 000000000000..d5487834ed25
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx5_ib.h"
+
+struct mlx5_ib_gsi_wr {
+ struct ib_cqe cqe;
+ struct ib_wc wc;
+ bool completed:1;
+};
+
+static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
+}
+
+/* Call with gsi->lock locked */
+static void generate_completions(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ struct ib_cq *gsi_cq = mqp->ibqp.send_cq;
+ struct mlx5_ib_gsi_wr *wr;
+ u32 index;
+
+ for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
+ index++) {
+ wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
+
+ if (!wr->completed)
+ break;
+
+ WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
+ wr->completed = false;
+ }
+
+ gsi->outstanding_ci = index;
+}
+
+static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
+ struct mlx5_ib_gsi_wr *wr =
+ container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
+ struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi);
+ u64 wr_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ wr->completed = true;
+ wr_id = wr->wc.wr_id;
+ wr->wc = *wc;
+ wr->wc.wr_id = wr_id;
+ wr->wc.qp = &mqp->ibqp;
+
+ generate_completions(mqp);
+ spin_unlock_irqrestore(&gsi->lock, flags);
+}
+
+int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
+ struct ib_qp_init_attr *attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_gsi_qp *gsi;
+ struct ib_qp_init_attr hw_init_attr = *attr;
+ const u8 port_num = attr->port_num;
+ int num_qps = 0;
+ int ret;
+
+ if (mlx5_ib_deth_sqpn_cap(dev)) {
+ if (MLX5_CAP_GEN(dev->mdev,
+ port_type) == MLX5_CAP_PORT_TYPE_IB)
+ num_qps = pd->device->attrs.max_pkeys;
+ else if (dev->lag_active)
+ num_qps = dev->lag_ports;
+ }
+
+ gsi = &mqp->gsi;
+ gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
+ if (!gsi->tx_qps)
+ return -ENOMEM;
+
+ gsi->outstanding_wrs =
+ kcalloc(attr->cap.max_send_wr, sizeof(*gsi->outstanding_wrs),
+ GFP_KERNEL);
+ if (!gsi->outstanding_wrs) {
+ ret = -ENOMEM;
+ goto err_free_tx;
+ }
+
+ if (dev->devr.ports[port_num - 1].gsi) {
+ mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
+ port_num);
+ ret = -EBUSY;
+ goto err_free_wrs;
+ }
+ gsi->num_qps = num_qps;
+ spin_lock_init(&gsi->lock);
+
+ gsi->cap = attr->cap;
+ gsi->port_num = port_num;
+
+ gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(gsi->cq)) {
+ mlx5_ib_warn(dev,
+ "unable to create send CQ for GSI QP. error %pe\n",
+ gsi->cq);
+ ret = PTR_ERR(gsi->cq);
+ goto err_free_wrs;
+ }
+
+ hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
+ hw_init_attr.send_cq = gsi->cq;
+ if (num_qps) {
+ hw_init_attr.cap.max_send_wr = 0;
+ hw_init_attr.cap.max_send_sge = 0;
+ hw_init_attr.cap.max_inline_data = 0;
+ }
+
+ gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
+ if (IS_ERR(gsi->rx_qp)) {
+ mlx5_ib_warn(dev,
+ "unable to create hardware GSI QP. error %pe\n",
+ gsi->rx_qp);
+ ret = PTR_ERR(gsi->rx_qp);
+ goto err_destroy_cq;
+ }
+
+ dev->devr.ports[attr->port_num - 1].gsi = gsi;
+ return 0;
+
+err_destroy_cq:
+ ib_free_cq(gsi->cq);
+err_free_wrs:
+ kfree(gsi->outstanding_wrs);
+err_free_tx:
+ kfree(gsi->tx_qps);
+ return ret;
+}
+
+int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ const int port_num = gsi->port_num;
+ int qp_index;
+ int ret;
+
+ ret = ib_destroy_qp(gsi->rx_qp);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
+ ret);
+ return ret;
+ }
+ dev->devr.ports[port_num - 1].gsi = NULL;
+ gsi->rx_qp = NULL;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
+ if (!gsi->tx_qps[qp_index])
+ continue;
+ WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
+ gsi->tx_qps[qp_index] = NULL;
+ }
+
+ ib_free_cq(gsi->cq);
+
+ kfree(gsi->outstanding_wrs);
+ kfree(gsi->tx_qps);
+ return 0;
+}
+
+static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
+{
+ struct ib_pd *pd = gsi->rx_qp->pd;
+ struct ib_qp_init_attr init_attr = {
+ .event_handler = gsi->rx_qp->event_handler,
+ .qp_context = gsi->rx_qp->qp_context,
+ .send_cq = gsi->cq,
+ .recv_cq = gsi->rx_qp->recv_cq,
+ .cap = {
+ .max_send_wr = gsi->cap.max_send_wr,
+ .max_send_sge = gsi->cap.max_send_sge,
+ .max_inline_data = gsi->cap.max_inline_data,
+ },
+ .qp_type = IB_QPT_UD,
+ .create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
+ };
+
+ return ib_create_qp(pd, &init_attr);
+}
+
+static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
+ u16 pkey_index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct ib_qp_attr attr;
+ int mask;
+ int ret;
+
+ mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
+ attr.qp_state = IB_QPS_INIT;
+ attr.pkey_index = pkey_index;
+ attr.qkey = IB_QP1_QKEY;
+ attr.port_num = gsi->port_num;
+ ret = ib_modify_qp(qp, &attr, mask);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTS;
+ attr.sq_psn = 0;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
+{
+ struct ib_device *device = gsi->rx_qp->device;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ int pkey_index = qp_index;
+ struct mlx5_ib_qp *mqp;
+ struct ib_qp *qp;
+ unsigned long flags;
+ u16 pkey;
+ int ret;
+
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ pkey_index = 0;
+
+ ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ if (!pkey) {
+ mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ qp = gsi->tx_qps[qp_index];
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ if (qp) {
+ mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ qp = create_gsi_ud_qp(gsi);
+ if (IS_ERR(qp)) {
+ mlx5_ib_warn(dev,
+ "unable to create hardware UD QP for GSI: %pe\n",
+ qp);
+ return;
+ }
+
+ mqp = to_mqp(qp);
+ if (dev->lag_active)
+ mqp->gsi_lag_port = qp_index + 1;
+ ret = modify_to_rts(gsi, qp, pkey_index);
+ if (ret)
+ goto err_destroy_qp;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ WARN_ON_ONCE(gsi->tx_qps[qp_index]);
+ gsi->tx_qps[qp_index] = qp;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+
+ return;
+
+err_destroy_qp:
+ WARN_ON_ONCE(qp);
+}
+
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ u16 qp_index;
+ int ret;
+
+ mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
+
+ ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
+ return ret;
+ }
+
+ if (to_mqp(gsi->rx_qp)->state != IB_QPS_RTS)
+ return 0;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
+ return 0;
+}
+
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ int ret;
+
+ ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
+ qp_init_attr->cap = gsi->cap;
+ return ret;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_qp *mqp,
+ struct ib_ud_wr *wr, struct ib_wc *wc)
+{
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_gsi_wr *gsi_wr;
+
+ if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
+ mlx5_ib_warn(dev, "no available GSI work request.\n");
+ return -ENOMEM;
+ }
+
+ gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
+ gsi->cap.max_send_wr];
+ gsi->outstanding_pi++;
+
+ if (!wc) {
+ memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
+ gsi_wr->wc.pkey_index = wr->pkey_index;
+ gsi_wr->wc.wr_id = wr->wr.wr_id;
+ } else {
+ gsi_wr->wc = *wc;
+ gsi_wr->completed = true;
+ }
+
+ gsi_wr->cqe.done = &handle_single_completion;
+ wr->wr.wr_cqe = &gsi_wr->cqe;
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr)
+{
+ struct ib_wc wc = {
+ { .wr_id = wr->wr.wr_id },
+ .status = IB_WC_SUCCESS,
+ .opcode = IB_WC_SEND,
+ .qp = &mqp->ibqp,
+ };
+ int ret;
+
+ ret = mlx5_ib_add_outstanding_wr(mqp, wr, &wc);
+ if (ret)
+ return ret;
+
+ generate_completions(mqp);
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_ah *ah = to_mah(wr->ah);
+ int qp_index = wr->pkey_index;
+
+ if (!gsi->num_qps)
+ return gsi->rx_qp;
+
+ if (dev->lag_active && ah->xmit_port)
+ qp_index = ah->xmit_port - 1;
+
+ if (qp_index >= gsi->num_qps)
+ return NULL;
+
+ return gsi->tx_qps[qp_index];
+}
+
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ struct ib_qp *tx_qp;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct ib_ud_wr cur_wr = *ud_wr(wr);
+
+ cur_wr.wr.next = NULL;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ tx_qp = get_tx_qp(gsi, &cur_wr);
+ if (!tx_qp) {
+ ret = mlx5_ib_gsi_silent_drop(mqp, &cur_wr);
+ if (ret)
+ goto err;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ continue;
+ }
+
+ ret = mlx5_ib_add_outstanding_wr(mqp, &cur_wr, NULL);
+ if (ret)
+ goto err;
+
+ ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
+ if (ret) {
+ /* Undo the effect of adding the outstanding wr */
+ gsi->outstanding_pi--;
+ goto err;
+ }
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ }
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ *bad_wr = wr;
+ return ret;
+}
+
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+
+ return ib_post_recv(gsi->rx_qp, wr, bad_wr);
+}
+
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
+{
+ u16 qp_index;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
new file mode 100644
index 000000000000..cc8859d3c2f5
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/mlx5/vport.h>
+#include "ib_rep.h"
+#include "srq.h"
+
+static int
+mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep,
+ int vport_index)
+{
+ struct mlx5_ib_dev *ibdev;
+ struct net_device *ndev;
+
+ ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
+ if (!ibdev)
+ return -EINVAL;
+
+ ibdev->port[vport_index].rep = rep;
+ rep->rep_data[REP_IB].priv = ibdev;
+ ndev = mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
+
+ return ib_device_set_netdev(&ibdev->ib_dev, ndev, vport_index + 1);
+}
+
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
+
+static void mlx5_ib_num_ports_update(struct mlx5_core_dev *dev, u32 *num_ports)
+{
+ struct mlx5_core_dev *peer_dev;
+ int i;
+
+ mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
+ u32 peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
+
+ if (mlx5_lag_is_mpesw(peer_dev))
+ *num_ports += peer_num_ports;
+ else
+ /* Only 1 ib port is the representor for all uplinks */
+ *num_ports += peer_num_ports - 1;
+ }
+}
+
+static int
+mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ u32 num_ports = mlx5_eswitch_get_total_vports(dev);
+ struct mlx5_core_dev *lag_master = dev;
+ const struct mlx5_ib_profile *profile;
+ struct mlx5_core_dev *peer_dev;
+ struct mlx5_ib_dev *ibdev;
+ int new_uplink = false;
+ int vport_index;
+ int ret;
+ int i;
+
+ vport_index = rep->vport_index;
+
+ if (mlx5_lag_is_shared_fdb(dev)) {
+ if (mlx5_lag_is_master(dev)) {
+ mlx5_ib_num_ports_update(dev, &num_ports);
+ } else {
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ if (!mlx5_lag_is_mpesw(dev))
+ return 0;
+ new_uplink = true;
+ }
+ mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
+ u32 peer_n_ports = mlx5_eswitch_get_total_vports(peer_dev);
+
+ if (mlx5_lag_is_master(peer_dev))
+ lag_master = peer_dev;
+ else if (!mlx5_lag_is_mpesw(dev))
+ /* Only 1 ib port is the representor for all uplinks */
+ peer_n_ports--;
+
+ if (mlx5_get_dev_index(peer_dev) < mlx5_get_dev_index(dev))
+ vport_index += peer_n_ports;
+ }
+ }
+ }
+
+ if (rep->vport == MLX5_VPORT_UPLINK && !new_uplink)
+ profile = &raw_eth_profile;
+ else
+ return mlx5_ib_set_vport_rep(lag_master, rep, vport_index);
+
+ ibdev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(lag_master));
+ if (!ibdev)
+ return -ENOMEM;
+
+ ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
+ GFP_KERNEL);
+ if (!ibdev->port) {
+ ret = -ENOMEM;
+ goto fail_port;
+ }
+
+ ibdev->is_rep = true;
+ vport_index = rep->vport_index;
+ ibdev->port[vport_index].rep = rep;
+ ibdev->mdev = lag_master;
+ ibdev->num_ports = num_ports;
+ ibdev->ib_dev.phys_port_cnt = num_ports;
+ ret = ib_device_set_netdev(&ibdev->ib_dev,
+ mlx5_ib_get_rep_netdev(lag_master->priv.eswitch,
+ rep->vport),
+ vport_index + 1);
+ if (ret)
+ goto fail_add;
+
+ ret = __mlx5_ib_add(ibdev, profile);
+ if (ret)
+ goto fail_add;
+
+ rep->rep_data[REP_IB].priv = ibdev;
+ if (mlx5_lag_is_shared_fdb(lag_master))
+ mlx5_ib_register_peer_vport_reps(lag_master);
+
+ return 0;
+
+fail_add:
+ kfree(ibdev->port);
+fail_port:
+ ib_dealloc_device(&ibdev->ib_dev);
+ return ret;
+}
+
+static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
+{
+ return rep->rep_data[REP_IB].priv;
+}
+
+static void
+mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw);
+ struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
+ int vport_index = rep->vport_index;
+ struct mlx5_ib_port *port;
+ int i;
+
+ if (WARN_ON(!mdev))
+ return;
+
+ if (!dev)
+ return;
+
+ if (mlx5_lag_is_shared_fdb(mdev) &&
+ !mlx5_lag_is_master(mdev)) {
+ if (rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(mdev))
+ return;
+ for (i = 0; i < dev->num_ports; i++) {
+ if (dev->port[i].rep == rep)
+ break;
+ }
+ if (WARN_ON(i == dev->num_ports))
+ return;
+ vport_index = i;
+ }
+
+ port = &dev->port[vport_index];
+
+ ib_device_set_netdev(&dev->ib_dev, NULL, vport_index + 1);
+ rep->rep_data[REP_IB].priv = NULL;
+ port->rep = NULL;
+
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+
+ if (mlx5_lag_is_shared_fdb(mdev) && !mlx5_lag_is_master(mdev))
+ return;
+
+ if (mlx5_lag_is_shared_fdb(mdev)) {
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5_eswitch *esw;
+
+ mlx5_lag_for_each_peer_mdev(mdev, peer_mdev, i) {
+ esw = peer_mdev->priv.eswitch;
+ mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
+ }
+ }
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+ }
+}
+
+static const struct mlx5_eswitch_rep_ops rep_ops = {
+ .load = mlx5_ib_vport_rep_load,
+ .unload = mlx5_ib_vport_rep_unload,
+ .get_proto_dev = mlx5_ib_rep_to_dev,
+};
+
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5_eswitch *esw;
+ int i;
+
+ mlx5_lag_for_each_peer_mdev(mdev, peer_mdev, i) {
+ esw = peer_mdev->priv.eswitch;
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
+ }
+}
+
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
+}
+
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u32 port)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep;
+
+ if (!dev->is_rep || !port)
+ return NULL;
+
+ if (!dev->port[port - 1].rep)
+ return ERR_PTR(-EINVAL);
+
+ rep = dev->port[port - 1].rep;
+
+ return mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sq->base.mqp.qpn);
+}
+
+static int mlx5r_rep_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ struct mlx5_eswitch *esw;
+
+ esw = mdev->priv.eswitch;
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
+ return 0;
+}
+
+static void mlx5r_rep_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ struct mlx5_eswitch *esw;
+
+ esw = mdev->priv.eswitch;
+ mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
+}
+
+static const struct auxiliary_device_id mlx5r_rep_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".rdma-rep", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5r_rep_id_table);
+
+static struct auxiliary_driver mlx5r_rep_driver = {
+ .name = "rep",
+ .probe = mlx5r_rep_probe,
+ .remove = mlx5r_rep_remove,
+ .id_table = mlx5r_rep_id_table,
+};
+
+int mlx5r_rep_init(void)
+{
+ return auxiliary_driver_register(&mlx5r_rep_driver);
+}
+
+void mlx5r_rep_cleanup(void)
+{
+ auxiliary_driver_unregister(&mlx5r_rep_driver);
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
new file mode 100644
index 000000000000..9c55e5c528b4
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLX5_IB_REP_H__
+#define __MLX5_IB_REP_H__
+
+#include <linux/mlx5/eswitch.h>
+#include "mlx5_ib.h"
+
+extern const struct mlx5_ib_profile raw_eth_profile;
+
+#ifdef CONFIG_MLX5_ESWITCH
+int mlx5r_rep_init(void);
+void mlx5r_rep_cleanup(void);
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u32 port);
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ u16 vport_num);
+#else /* CONFIG_MLX5_ESWITCH */
+static inline int mlx5r_rep_init(void) { return 0; }
+static inline void mlx5r_rep_cleanup(void) {}
+static inline
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u32 port)
+{
+ return NULL;
+}
+
+static inline
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return NULL;
+}
+#endif
+#endif /* __MLX5_IB_REP_H__ */
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
new file mode 100644
index 000000000000..afeb5e53254f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/vport.h>
+#include "mlx5_ib.h"
+
+static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
+{
+ switch (mlx_policy) {
+ case MLX5_POLICY_DOWN:
+ return IFLA_VF_LINK_STATE_DISABLE;
+ case MLX5_POLICY_UP:
+ return IFLA_VF_LINK_STATE_ENABLE;
+ case MLX5_POLICY_FOLLOW:
+ return IFLA_VF_LINK_STATE_AUTO;
+ default:
+ return __IFLA_VF_LINK_STATE_MAX;
+ }
+}
+
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_info *info)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
+ vf, err);
+ goto free;
+ }
+ memset(info, 0, sizeof(*info));
+ info->linkstate = mlx_to_net_policy(rep->policy);
+ if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
+ err = -EINVAL;
+
+free:
+ kfree(rep);
+ return err;
+}
+
+static inline enum port_state_policy net_to_mlx_policy(int policy)
+{
+ switch (policy) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ return MLX5_POLICY_DOWN;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ return MLX5_POLICY_UP;
+ case IFLA_VF_LINK_STATE_AUTO:
+ return MLX5_POLICY_FOLLOW;
+ default:
+ return MLX5_POLICY_INVALID;
+ }
+}
+
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u32 port, int state)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->policy = net_to_mlx_policy(state);
+ if (in->policy == MLX5_POLICY_INVALID) {
+ err = -EINVAL;
+ goto out;
+ }
+ in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err)
+ vfs_ctx[vf].policy = in->policy;
+
+out:
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u32 port, struct ifla_vf_stats *stats)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *dev;
+ void *out;
+ int err;
+
+ dev = to_mdev(device);
+ mdev = dev->mdev;
+
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_vport_counter(mdev, true, vf, port, out);
+ if (err)
+ goto ex;
+
+ stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
+ stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
+ stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
+ stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
+ stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
+
+ex:
+ kfree(out);
+ return err;
+}
+
+static int set_vf_node_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
+ in->node_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err) {
+ vfs_ctx[vf].node_guid = guid;
+ vfs_ctx[vf].node_guid_valid = 1;
+ }
+ kfree(in);
+ return err;
+}
+
+static int set_vf_port_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
+ in->port_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err) {
+ vfs_ctx[vf].port_guid = guid;
+ vfs_ctx[vf].port_guid_valid = 1;
+ }
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid, int type)
+{
+ if (type == IFLA_VF_IB_NODE_GUID)
+ return set_vf_node_guid(device, vf, port, guid);
+ else if (type == IFLA_VF_IB_PORT_GUID)
+ return set_vf_port_guid(device, vf, port, guid);
+
+ return -EINVAL;
+}
+
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+
+ node_guid->guid =
+ vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
+ port_guid->guid =
+ vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/macsec.c b/drivers/infiniband/hw/mlx5/macsec.c
new file mode 100644
index 000000000000..3c56eb5eddf3
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/macsec.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "macsec.h"
+#include <linux/mlx5/macsec.h>
+
+struct mlx5_reserved_gids {
+ int macsec_index;
+ const struct ib_gid_attr *physical_gid;
+};
+
+struct mlx5_roce_gids {
+ struct list_head roce_gid_list_entry;
+ u16 gid_idx;
+ union {
+ struct sockaddr_in sockaddr_in;
+ struct sockaddr_in6 sockaddr_in6;
+ } addr;
+};
+
+struct mlx5_macsec_device {
+ struct list_head macsec_devices_list_entry;
+ void *macdev;
+ struct list_head macsec_roce_gids;
+ struct list_head tx_rules_list;
+ struct list_head rx_rules_list;
+};
+
+static void cleanup_macsec_device(struct mlx5_macsec_device *macsec_device)
+{
+ if (!list_empty(&macsec_device->tx_rules_list) ||
+ !list_empty(&macsec_device->rx_rules_list) ||
+ !list_empty(&macsec_device->macsec_roce_gids))
+ return;
+
+ list_del(&macsec_device->macsec_devices_list_entry);
+ kfree(macsec_device);
+}
+
+static struct mlx5_macsec_device *get_macsec_device(void *macdev,
+ struct list_head *macsec_devices_list)
+{
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+
+ list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+
+ if (macsec_device)
+ return macsec_device;
+
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device)
+ return NULL;
+
+ macsec_device->macdev = macdev;
+ INIT_LIST_HEAD(&macsec_device->tx_rules_list);
+ INIT_LIST_HEAD(&macsec_device->rx_rules_list);
+ INIT_LIST_HEAD(&macsec_device->macsec_roce_gids);
+ list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list);
+
+ return macsec_device;
+}
+
+static void mlx5_macsec_del_roce_gid(struct mlx5_macsec_device *macsec_device, u16 gid_idx)
+{
+ struct mlx5_roce_gids *current_gid, *next_gid;
+
+ list_for_each_entry_safe(current_gid, next_gid, &macsec_device->macsec_roce_gids,
+ roce_gid_list_entry)
+ if (current_gid->gid_idx == gid_idx) {
+ list_del(&current_gid->roce_gid_list_entry);
+ kfree(current_gid);
+ }
+}
+
+static void mlx5_macsec_save_roce_gid(struct mlx5_macsec_device *macsec_device,
+ const struct sockaddr *addr, u16 gid_idx)
+{
+ struct mlx5_roce_gids *roce_gids;
+
+ roce_gids = kzalloc(sizeof(*roce_gids), GFP_KERNEL);
+ if (!roce_gids)
+ return;
+
+ roce_gids->gid_idx = gid_idx;
+ if (addr->sa_family == AF_INET)
+ memcpy(&roce_gids->addr.sockaddr_in, addr, sizeof(roce_gids->addr.sockaddr_in));
+ else
+ memcpy(&roce_gids->addr.sockaddr_in6, addr, sizeof(roce_gids->addr.sockaddr_in6));
+
+ list_add_tail(&roce_gids->roce_gid_list_entry, &macsec_device->macsec_roce_gids);
+}
+
+static void handle_macsec_gids(struct list_head *macsec_devices_list,
+ struct mlx5_macsec_event_data *data)
+{
+ struct mlx5_macsec_device *macsec_device;
+ struct mlx5_roce_gids *gid;
+
+ macsec_device = get_macsec_device(data->macdev, macsec_devices_list);
+ if (!macsec_device)
+ return;
+
+ list_for_each_entry(gid, &macsec_device->macsec_roce_gids, roce_gid_list_entry) {
+ mlx5_macsec_add_roce_sa_rules(data->fs_id, (struct sockaddr *)&gid->addr,
+ gid->gid_idx, &macsec_device->tx_rules_list,
+ &macsec_device->rx_rules_list, data->macsec_fs,
+ data->is_tx);
+ }
+}
+
+static void del_sa_roce_rule(struct list_head *macsec_devices_list,
+ struct mlx5_macsec_event_data *data)
+{
+ struct mlx5_macsec_device *macsec_device;
+
+ macsec_device = get_macsec_device(data->macdev, macsec_devices_list);
+ WARN_ON(!macsec_device);
+
+ mlx5_macsec_del_roce_sa_rules(data->fs_id, data->macsec_fs,
+ &macsec_device->tx_rules_list,
+ &macsec_device->rx_rules_list, data->is_tx);
+}
+
+static int macsec_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_macsec *macsec = container_of(nb, struct mlx5_macsec, blocking_events_nb);
+
+ mutex_lock(&macsec->lock);
+ switch (event) {
+ case MLX5_DRIVER_EVENT_MACSEC_SA_ADDED:
+ handle_macsec_gids(&macsec->macsec_devices_list, data);
+ break;
+ case MLX5_DRIVER_EVENT_MACSEC_SA_DELETED:
+ del_sa_roce_rule(&macsec->macsec_devices_list, data);
+ break;
+ default:
+ mutex_unlock(&macsec->lock);
+ return NOTIFY_DONE;
+ }
+ mutex_unlock(&macsec->lock);
+ return NOTIFY_OK;
+}
+
+void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return;
+ }
+
+ dev->macsec.blocking_events_nb.notifier_call = macsec_event;
+ blocking_notifier_chain_register(&dev->mdev->macsec_nh,
+ &dev->macsec.blocking_events_nb);
+}
+
+void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return;
+ }
+
+ blocking_notifier_chain_unregister(&dev->mdev->macsec_nh,
+ &dev->macsec.blocking_events_nb);
+}
+
+int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev)
+{
+ int i, j, max_gids;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return 0;
+ }
+
+ max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
+ for (i = 0; i < dev->num_ports; i++) {
+ dev->port[i].reserved_gids = kcalloc(max_gids,
+ sizeof(*dev->port[i].reserved_gids),
+ GFP_KERNEL);
+ if (!dev->port[i].reserved_gids)
+ goto err;
+
+ for (j = 0; j < max_gids; j++)
+ dev->port[i].reserved_gids[j].macsec_index = -1;
+ }
+
+ INIT_LIST_HEAD(&dev->macsec.macsec_devices_list);
+ mutex_init(&dev->macsec.lock);
+
+ return 0;
+err:
+ while (i >= 0) {
+ kfree(dev->port[i].reserved_gids);
+ i--;
+ }
+ return -ENOMEM;
+}
+
+void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev)
+{
+ int i;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev))
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+
+ for (i = 0; i < dev->num_ports; i++)
+ kfree(dev->port[i].reserved_gids);
+
+ mutex_destroy(&dev->macsec.lock);
+}
+
+int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(attr->device);
+ struct mlx5_macsec_device *macsec_device;
+ const struct ib_gid_attr *physical_gid;
+ struct mlx5_reserved_gids *mgids;
+ struct net_device *ndev;
+ int ret = 0;
+ union {
+ struct sockaddr_in sockaddr_in;
+ struct sockaddr_in6 sockaddr_in6;
+ } addr;
+
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return 0;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return 0;
+ }
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+
+ if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ dev_hold(ndev);
+ rcu_read_unlock();
+
+ mutex_lock(&dev->macsec.lock);
+ macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list);
+ if (!macsec_device) {
+ ret = -ENOMEM;
+ goto dev_err;
+ }
+
+ physical_gid = rdma_find_gid(attr->device, &attr->gid,
+ attr->gid_type, NULL);
+ if (!IS_ERR(physical_gid)) {
+ ret = set_roce_addr(to_mdev(physical_gid->device),
+ physical_gid->port_num,
+ physical_gid->index, NULL,
+ physical_gid);
+ if (ret)
+ goto gid_err;
+
+ mgids = &dev->port[attr->port_num - 1].reserved_gids[physical_gid->index];
+ mgids->macsec_index = attr->index;
+ mgids->physical_gid = physical_gid;
+ }
+
+ /* Proceed with adding steering rules, regardless if there was gid ambiguity or not.*/
+ rdma_gid2ip((struct sockaddr *)&addr, &attr->gid);
+ ret = mlx5_macsec_add_roce_rule(ndev, (struct sockaddr *)&addr, attr->index,
+ &macsec_device->tx_rules_list,
+ &macsec_device->rx_rules_list, dev->mdev->macsec_fs);
+ if (ret && !IS_ERR(physical_gid))
+ goto rule_err;
+
+ mlx5_macsec_save_roce_gid(macsec_device, (struct sockaddr *)&addr, attr->index);
+
+ dev_put(ndev);
+ mutex_unlock(&dev->macsec.lock);
+ return ret;
+
+rule_err:
+ set_roce_addr(to_mdev(physical_gid->device), physical_gid->port_num,
+ physical_gid->index, &physical_gid->gid, physical_gid);
+ mgids->macsec_index = -1;
+gid_err:
+ rdma_put_gid_attr(physical_gid);
+ cleanup_macsec_device(macsec_device);
+dev_err:
+ dev_put(ndev);
+ mutex_unlock(&dev->macsec.lock);
+ return ret;
+}
+
+void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(attr->device);
+ struct mlx5_macsec_device *macsec_device;
+ struct mlx5_reserved_gids *mgids;
+ struct net_device *ndev;
+ int i, max_gids;
+
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return;
+ }
+
+ mgids = &dev->port[attr->port_num - 1].reserved_gids[attr->index];
+ if (mgids->macsec_index != -1) { /* Checking if physical gid has ambiguous IP */
+ rdma_put_gid_attr(mgids->physical_gid);
+ mgids->macsec_index = -1;
+ return;
+ }
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
+ rcu_read_unlock();
+ return;
+ }
+ dev_hold(ndev);
+ rcu_read_unlock();
+
+ mutex_lock(&dev->macsec.lock);
+ max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
+ for (i = 0; i < max_gids; i++) { /* Checking if macsec gid has ambiguous IP */
+ mgids = &dev->port[attr->port_num - 1].reserved_gids[i];
+ if (mgids->macsec_index == attr->index) {
+ const struct ib_gid_attr *physical_gid = mgids->physical_gid;
+
+ set_roce_addr(to_mdev(physical_gid->device),
+ physical_gid->port_num,
+ physical_gid->index,
+ &physical_gid->gid, physical_gid);
+
+ rdma_put_gid_attr(physical_gid);
+ mgids->macsec_index = -1;
+ break;
+ }
+ }
+ macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list);
+ mlx5_macsec_del_roce_rule(attr->index, dev->mdev->macsec_fs,
+ &macsec_device->tx_rules_list, &macsec_device->rx_rules_list);
+ mlx5_macsec_del_roce_gid(macsec_device, attr->index);
+ cleanup_macsec_device(macsec_device);
+
+ dev_put(ndev);
+ mutex_unlock(&dev->macsec.lock);
+}
diff --git a/drivers/infiniband/hw/mlx5/macsec.h b/drivers/infiniband/hw/mlx5/macsec.h
new file mode 100644
index 000000000000..9b77ba90f0f4
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/macsec.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_MACSEC_H__
+#define __MLX5_MACSEC_H__
+
+#include <net/macsec.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+#include "mlx5_ib.h"
+
+#ifdef CONFIG_MLX5_MACSEC
+struct mlx5_reserved_gids;
+
+int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr);
+void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr);
+int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev);
+void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev);
+void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev);
+void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev);
+#else
+static inline int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr) { return 0; }
+static inline void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr) {}
+static inline int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev) { return 0; }
+static inline void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev) {}
+static inline void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev) {}
+static inline void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev) {}
+#endif
+#endif /* __MLX5_MACSEC_H__ */
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index b84d13a487cc..2453ae4384a7 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -30,22 +30,37 @@
* SOFTWARE.
*/
-#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/vport.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
#include "mlx5_ib.h"
+#include "cmd.h"
enum {
MLX5_IB_VENDOR_CLASS1 = 0x9,
MLX5_IB_VENDOR_CLASS2 = 0xa
};
-int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const void *in_mad, void *response_mad)
+static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct ib_mad *in_mad)
+{
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+ in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return true;
+ return dev->port_caps[port_num - 1].has_smi;
+}
+
+static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
+ int ignore_bkey, u32 port, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const void *in_mad,
+ void *response_mad)
{
u8 op_modifier = 0;
+ if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad))
+ return -EPERM;
+
/* Key check traps can't be generated unless we have in_wc to
* tell us where to send the trap.
*/
@@ -54,73 +69,301 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;
- return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
+ return mlx5_cmd_mad_ifc(dev, in_mad, response_mad, op_modifier,
+ port);
+}
+
+static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
+ void *out)
+{
+#define MLX5_SUM_CNT(p, cntr1, cntr2) \
+ (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
+ MLX5_GET64(query_vport_counter_out, p, cntr2))
+
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
+ transmitted_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_rcv_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
+ received_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_xmit_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
+ transmitted_ib_multicast.packets));
+ pma_cnt_ext->port_rcv_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
+ received_ib_multicast.packets));
+ pma_cnt_ext->port_unicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_unicast.packets);
+ pma_cnt_ext->port_unicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_unicast.packets);
+ pma_cnt_ext->port_multicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_multicast.packets);
+ pma_cnt_ext->port_multicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_multicast.packets);
+}
+
+static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
+ void *out)
+{
+ /* Traffic counters will be reported in
+ * their 64bit form via ib_pma_portcounters_ext by default.
+ */
+ void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set);
+
+#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
+ counter_var = MLX5_GET_BE(typeof(counter_var), \
+ ib_port_cntrs_grp_data_layout, \
+ out_pma, counter_name); \
+ }
+
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
+ symbol_error_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
+ link_error_recovery_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
+ link_downed_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
+ port_rcv_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
+ port_rcv_remote_physical_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
+ port_rcv_switch_relay_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
+ port_xmit_discards);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
+ port_xmit_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_wait,
+ port_xmit_wait);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
+ port_rcv_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
+ link_overrun_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
+ vl_15_dropped);
+}
+
+static void pma_cnt_ext_assign_ppcnt(struct ib_pma_portcounters_ext *cnt_ext,
+ void *out)
+{
+ void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set);
+
+#define MLX5_GET_EXT_CNTR(counter_name) \
+ MLX5_GET64(ib_ext_port_cntrs_grp_data_layout, \
+ out_pma, counter_name##_high)
+
+ cnt_ext->port_xmit_data =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_xmit_data) >> 2);
+ cnt_ext->port_rcv_data =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_rcv_data) >> 2);
+
+ cnt_ext->port_xmit_packets =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_xmit_pkts));
+ cnt_ext->port_rcv_packets =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_rcv_pkts));
+
+ cnt_ext->port_unicast_xmit_packets =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_unicast_xmit_pkts));
+ cnt_ext->port_unicast_rcv_packets =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_unicast_rcv_pkts));
+
+ cnt_ext->port_multicast_xmit_packets =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_multicast_xmit_pkts));
+ cnt_ext->port_multicast_rcv_packets =
+ cpu_to_be64(MLX5_GET_EXT_CNTR(port_multicast_rcv_pkts));
+}
+
+static int query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, u8 plane_num,
+ void *out, size_t sz, bool ext)
+{
+ u32 *in;
+ int err;
+
+ in = kvzalloc(sz, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ MLX5_SET(ppcnt_reg, in, local_port, port_num);
+ MLX5_SET(ppcnt_reg, in, plane_ind, plane_num);
+
+ if (ext)
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP);
+ else
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+ err = mlx5_core_access_reg(dev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ kvfree(in);
+ return err;
}
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx5_core_dev *mdev;
+ bool native_port = true;
+ u32 mdev_port_num;
+ void *out_cnt;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* Fail to get the native port, likely due to 2nd port is still
+ * unaffiliated. In such case default to 1st port and attached
+ * PF device.
+ */
+ native_port = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev) &&
+ dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ /* Declaring support of extended counters */
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+ struct ib_class_port_info cpi = {};
+
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ goto done;
+ }
+
+ if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
+ struct ib_pma_portcounters_ext *pma_cnt_ext =
+ (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ int sz = max(MLX5_ST_SZ_BYTES(query_vport_counter_out),
+ MLX5_ST_SZ_BYTES(ppcnt_reg));
+
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
+ if (!out_cnt) {
+ err = IB_MAD_RESULT_FAILURE;
+ goto done;
+ }
+
+ if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
+ err = query_ib_ppcnt(mdev, mdev_port_num,
+ port_num, out_cnt, sz, 1);
+ if (!err)
+ pma_cnt_ext_assign_ppcnt(pma_cnt_ext, out_cnt);
+ } else {
+ err = mlx5_core_query_vport_counter(mdev, 0, 0,
+ mdev_port_num,
+ out_cnt);
+ if (!err)
+ pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
+ }
+ } else {
+ struct ib_pma_portcounters *pma_cnt =
+ (struct ib_pma_portcounters *)(out_mad->data + 40);
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
+ if (!out_cnt) {
+ err = IB_MAD_RESULT_FAILURE;
+ goto done;
+ }
+
+ if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI)
+ err = query_ib_ppcnt(mdev, mdev_port_num, port_num,
+ out_cnt, sz, 0);
+ else
+ err = query_ib_ppcnt(mdev, mdev_port_num, 0,
+ out_cnt, sz, 0);
+
+ if (!err)
+ pma_cnt_assign(pma_cnt, out_cnt);
+ }
+ kvfree(out_cnt);
+ err = err ? IB_MAD_RESULT_FAILURE :
+ IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+done:
+ if (native_port)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
+}
+
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ u8 mgmt_class = in->mad_hdr.mgmt_class;
+ u8 method = in->mad_hdr.method;
u16 slid;
int err;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
+ be16_to_cpu(IB_LID_PERMISSIVE);
- slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
+ if (method == IB_MGMT_METHOD_TRAP && !slid)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ switch (mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET &&
+ method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS;
/* Don't process SMInfo queries -- the SMA can't handle them.
*/
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ } break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
+ method == IB_MGMT_METHOD_GET)
+ return process_pma_cmd(dev, port_num, in, out);
+ fallthrough;
+ case MLX5_IB_VENDOR_CLASS1:
+ case MLX5_IB_VENDOR_CLASS2:
+ case IB_MGMT_CLASS_CONG_MGMT: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS;
- } else {
+ } break;
+ default:
return IB_MAD_RESULT_SUCCESS;
}
- err = mlx5_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
+ err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
if (err)
return IB_MAD_RESULT_FAILURE;
/* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ if (method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
u16 packet_error;
@@ -129,7 +372,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -137,7 +380,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
packet_error = be16_to_cpu(out_mad->status);
- dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
+ dev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
@@ -146,17 +389,17 @@ out:
return err;
}
-int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
- struct ib_smp *out_mad)
+static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad)
{
- struct ib_smp *in_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *in_mad;
+ int err;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
if (!in_mad)
return -ENOMEM;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
@@ -169,8 +412,8 @@ int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -191,8 +434,8 @@ out:
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
u16 *max_pkeys)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -213,8 +456,8 @@ out:
int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -234,8 +477,8 @@ out:
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -243,14 +486,14 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- memcpy(node_desc, out_mad->data, 64);
+ memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
out:
kfree(in_mad);
kfree(out_mad);
@@ -259,8 +502,8 @@ out:
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -268,7 +511,7 @@ int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
@@ -282,11 +525,11 @@ out:
return err;
}
-int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -294,7 +537,7 @@ int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -311,11 +554,11 @@ out:
return err;
}
-int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -323,7 +566,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -334,7 +577,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -351,29 +594,24 @@ out:
return err;
}
-int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int ext_active_speed;
int err = -ENOMEM;
- if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
- mlx5_ib_warn(dev, "invalid port number %d\n", port);
- return -EINVAL;
- }
-
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
- memset(props, 0, sizeof(*props));
+ /* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -392,7 +630,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
props->gid_tbl_len = out_mad->data[50];
props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
- props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
+ props->pkey_tbl_len = dev->pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
@@ -403,6 +641,14 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = out_mad->data[41] >> 4;
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) {
+ props->port_cap_flags2 =
+ be16_to_cpup((__be16 *)(out_mad->data + 60));
+
+ if (props->port_cap_flags2 & IB_PORT_LINK_WIDTH_2X_SUP)
+ props->active_width = out_mad->data[31] & 0x1f;
+ }
+
/* Check if extended speeds (EDR/FDR/...) are supported */
if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
ext_active_speed = out_mad->data[62] >> 4;
@@ -414,14 +660,37 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
case 2:
props->active_speed = 32; /* EDR */
break;
+ case 4:
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
+ props->active_speed = IB_SPEED_HDR;
+ break;
+ case 8:
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP)
+ props->active_speed = IB_SPEED_NDR;
+ break;
+ }
+ }
+
+ /* Check if extended speeds 2 (XDR/...) are supported */
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_EXTENDED_SPEEDS2_SUP) {
+ ext_active_speed = (out_mad->data[56] >> 4) & 0x6;
+
+ switch (ext_active_speed) {
+ case 2:
+ if (props->port_cap_flags2 & IB_PORT_LINK_SPEED_XDR_SUP)
+ props->active_speed = IB_SPEED_XDR;
+ break;
}
}
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
- if (mdev->port_caps[port - 1].ext_port_cap &
+ if (dev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 41d6911e244e..fc1e86f6c409 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1,74 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*/
-#include <asm-generic/kmap_types.h>
+#include <linux/debugfs.h>
+#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/io-mapping.h>
+#include <linux/bitmap.h>
+#include <linux/log2.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
+#include <linux/delay.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/driver.h>
+#include <linux/list.h>
#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include "user.h"
+#include <rdma/ib_umem_odp.h>
+#include <rdma/lag.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
#include "mlx5_ib.h"
-
-#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "2.2-1"
-#define DRIVER_RELDATE "Feb 2014"
+#include "ib_rep.h"
+#include "cmd.h"
+#include "devx.h"
+#include "dm.h"
+#include "fs.h"
+#include "srq.h"
+#include "qp.h"
+#include "wr.h"
+#include "restrack.h"
+#include "counters.h"
+#include "umr.h"
+#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_ucaps.h>
+#include "macsec.h"
+#include "data_direct.h"
+#include "dmah.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRIVER_VERSION);
-static int deprecated_prof_sel = 2;
-module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
-MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
+struct mlx5_ib_event_work {
+ struct work_struct work;
+ union {
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_multiport_info *mpi;
+ };
+ bool is_slave;
+ unsigned int event;
+ void *param;
+};
-static char mlx5_version[] =
- DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
- DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
+enum {
+ MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
+};
-static enum rdma_link_layer
-mlx5_ib_port_link_layer(struct ib_device *device)
+static struct workqueue_struct *mlx5_ib_event_wq;
+static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
+static LIST_HEAD(mlx5_ib_dev_list);
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
+
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
{
- struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_dev *dev;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ dev = mpi->ibdev;
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return dev;
+}
- switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
+static enum rdma_link_layer
+mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
+{
+ switch (port_type_cap) {
case MLX5_CAP_PORT_TYPE_IB:
return IB_LINK_LAYER_INFINIBAND;
case MLX5_CAP_PORT_TYPE_ETH:
@@ -78,9 +106,606 @@ mlx5_ib_port_link_layer(struct ib_device *device)
}
}
+static enum rdma_link_layer
+mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
+
+ return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+}
+
+static int get_port_state(struct ib_device *ibdev,
+ u32 port_num,
+ enum ib_port_state *state)
+{
+ struct ib_port_attr attr;
+ int ret;
+
+ memset(&attr, 0, sizeof(attr));
+ ret = ibdev->ops.query_port(ibdev, port_num, &attr);
+ if (!ret)
+ *state = attr.state;
+ return ret;
+}
+
+static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
+ struct net_device *ndev,
+ struct net_device *upper,
+ u32 *port_num)
+{
+ struct net_device *rep_ndev;
+ struct mlx5_ib_port *port;
+ int i;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ port = &dev->port[i];
+ if (!port->rep)
+ continue;
+
+ if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
+ *port_num = i + 1;
+ return &port->roce;
+ }
+
+ if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
+ continue;
+ rep_ndev = ib_device_get_netdev(&dev->ib_dev, i + 1);
+ if (rep_ndev && rep_ndev == ndev) {
+ dev_put(rep_ndev);
+ *port_num = i + 1;
+ return &port->roce;
+ }
+
+ dev_put(rep_ndev);
+ }
+
+ return NULL;
+}
+
+static bool mlx5_netdev_send_event(struct mlx5_ib_dev *dev,
+ struct net_device *ndev,
+ struct net_device *upper,
+ struct net_device *ib_ndev)
+{
+ if (!dev->ib_active)
+ return false;
+
+ /* Event is about our upper device */
+ if (upper == ndev)
+ return true;
+
+ /* RDMA device is not in lag and not in switchdev */
+ if (!dev->is_rep && !upper && ndev == ib_ndev)
+ return true;
+
+ /* RDMA devie is in switchdev */
+ if (dev->is_rep && ndev == ib_ndev)
+ return true;
+
+ return false;
+}
+
+static struct net_device *mlx5_ib_get_rep_uplink_netdev(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_port *port;
+ int i;
+
+ for (i = 0; i < ibdev->num_ports; i++) {
+ port = &ibdev->port[i];
+ if (port->rep && port->rep->vport == MLX5_VPORT_UPLINK) {
+ return ib_device_get_netdev(&ibdev->ib_dev, i + 1);
+ }
+ }
+
+ return NULL;
+}
+
+static int mlx5_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ u32 port_num = roce->native_port_num;
+ struct net_device *ib_ndev = NULL;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = roce->dev;
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ /* Should already be registered during the load */
+ if (ibdev->is_rep)
+ break;
+
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+ /* Exit if already registered */
+ if (ib_ndev)
+ goto put_ndev;
+
+ if (ndev->dev.parent == mdev->device)
+ ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num);
+ break;
+
+ case NETDEV_UNREGISTER:
+ /* In case of reps, ib device goes away before the netdevs */
+ if (ibdev->is_rep)
+ break;
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+ if (ib_ndev == ndev)
+ ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num);
+ goto put_ndev;
+
+ case NETDEV_CHANGE:
+ case NETDEV_UP:
+ case NETDEV_DOWN: {
+ struct net_device *upper = NULL;
+
+ if (!netif_is_lag_master(ndev) && !netif_is_lag_port(ndev) &&
+ !mlx5_core_mp_enabled(mdev))
+ return NOTIFY_DONE;
+
+ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
+ struct net_device *lag_ndev;
+
+ if(mlx5_lag_is_roce(mdev))
+ lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1);
+ else /* sriov lag */
+ lag_ndev = mlx5_ib_get_rep_uplink_netdev(ibdev);
+
+ if (lag_ndev) {
+ upper = netdev_master_upper_dev_get(lag_ndev);
+ dev_put(lag_ndev);
+ } else {
+ goto done;
+ }
+ }
+
+ if (ibdev->is_rep)
+ roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
+ if (!roce)
+ return NOTIFY_DONE;
+
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+
+ if (mlx5_netdev_send_event(ibdev, ndev, upper, ib_ndev)) {
+ struct ib_event ibev = { };
+ enum ib_port_state port_state;
+
+ if (get_port_state(&ibdev->ib_dev, port_num,
+ &port_state))
+ goto put_ndev;
+
+ if (roce->last_port_state == port_state)
+ goto put_ndev;
+
+ roce->last_port_state = port_state;
+ ibev.device = &ibdev->ib_dev;
+ if (port_state == IB_PORT_DOWN)
+ ibev.event = IB_EVENT_PORT_ERR;
+ else if (port_state == IB_PORT_ACTIVE)
+ ibev.event = IB_EVENT_PORT_ACTIVE;
+ else
+ goto put_ndev;
+
+ ibev.element.port_num = port_num;
+ ib_dispatch_event(&ibev);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+put_ndev:
+ dev_put(ib_ndev);
+done:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
+ return NOTIFY_DONE;
+}
+
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
+ u32 ib_port_num,
+ u32 *native_port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ ib_port_num);
+ struct mlx5_core_dev *mdev = NULL;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (ibdev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
+ if (native_port_num)
+ *native_port_num = smi_to_native_portnum(ibdev,
+ ib_port_num);
+ return ibdev->mdev;
+
+ }
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) ||
+ ll != IB_LINK_LAYER_ETHERNET) {
+ if (native_port_num)
+ *native_port_num = ib_port_num;
+ return ibdev->mdev;
+ }
+
+ if (native_port_num)
+ *native_port_num = 1;
+
+ port = &ibdev->port[ib_port_num - 1];
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[ib_port_num - 1].mp.mpi;
+ if (mpi && !mpi->unaffiliate) {
+ mdev = mpi->mdev;
+ /* If it's the master no need to refcount, it'll exist
+ * as long as the ib_dev exists.
+ */
+ if (!mpi->is_master)
+ mpi->mdev_refcnt++;
+ }
+ spin_unlock(&port->mp.mpi_lock);
+
+ return mdev;
+}
+
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ port_num);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ port = &ibdev->port[port_num - 1];
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[port_num - 1].mp.mpi;
+ if (mpi->is_master)
+ goto out;
+
+ mpi->mdev_refcnt--;
+ if (mpi->unaffiliate)
+ complete(&mpi->unref_comp);
+out:
+ spin_unlock(&port->mp.mpi_lock);
+}
+
+static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
+ u16 *active_speed, u8 *active_width)
+{
+ switch (eth_proto_oper) {
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
+ case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
+ case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
+ case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
+ case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
+ case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ u8 *active_width)
+{
+ switch (eth_proto_oper) {
+ case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_DDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_XDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_XDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_XDR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ u8 *active_width, bool ext)
+{
+ return ext ?
+ translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
+ active_width) :
+ translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
+ active_width);
+}
+
+static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
+ struct mlx5_core_dev *mdev;
+ struct net_device *ndev, *upper;
+ enum ib_mtu ndev_ib_mtu;
+ bool put_mdev = true;
+ u32 eth_prot_oper;
+ u32 mdev_port_num;
+ bool ext;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* This means the port isn't affiliated yet. Get the
+ * info for the master port instead.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ port_num = 1;
+ }
+
+ /* Possible bad flows are checked before filling out props so in case
+ * of an error it will still be zeroed out.
+ * Use native port in case of reps
+ */
+ if (dev->is_rep)
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ 1, 0);
+ else
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ mdev_port_num, 0);
+ if (err)
+ goto out;
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
+ eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
+
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = IB_SPEED_QDR;
+
+ translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
+ &props->active_width, ext);
+
+ if (!dev->is_rep && dev->mdev->roce.roce_en) {
+ u16 qkey_viol_cntr;
+
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
+ props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
+ roce_address_table_size);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
+ props->qkey_viol_cntr = qkey_viol_cntr;
+ }
+ props->max_mtu = IB_MTU_4096;
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ props->pkey_tbl_len = 1;
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ /* If this is a stub query for an unaffiliated port stop here */
+ if (!put_mdev)
+ goto out;
+
+ ndev = ib_device_get_netdev(device, port_num);
+ if (!ndev)
+ goto out;
+
+ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
+ rcu_read_lock();
+ upper = netdev_master_upper_dev_get_rcu(ndev);
+ if (upper) {
+ dev_put(ndev);
+ ndev = upper;
+ dev_hold(ndev);
+ }
+ rcu_read_unlock();
+ }
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ }
+
+ ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
+
+ dev_put(ndev);
+
+ props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
+out:
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
+}
+
+int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+{
+ enum ib_gid_type gid_type;
+ u16 vlan_id = 0xffff;
+ u8 roce_version = 0;
+ u8 roce_l3_type = 0;
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ gid_type = attr->gid_type;
+ if (gid) {
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
+ }
+
+ switch (gid_type) {
+ case IB_GID_TYPE_ROCE:
+ roce_version = MLX5_ROCE_VERSION_1;
+ break;
+ case IB_GID_TYPE_ROCE_UDP_ENCAP:
+ roce_version = MLX5_ROCE_VERSION_2;
+ if (gid && ipv6_addr_v4mapped((void *)gid))
+ roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
+ else
+ roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
+ break;
+
+ default:
+ mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
+ }
+
+ return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
+ roce_l3_type, gid->raw, mac,
+ vlan_id < VLAN_CFI_MASK, vlan_id,
+ port_num);
+}
+
+static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
+ __always_unused void **context)
+{
+ int ret;
+
+ ret = mlx5r_add_gid_macsec_operations(attr);
+ if (ret)
+ return ret;
+
+ return set_roce_addr(to_mdev(attr->device), attr->port_num,
+ attr->index, &attr->gid, attr);
+}
+
+static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
+ __always_unused void **context)
+{
+ int ret;
+
+ ret = set_roce_addr(to_mdev(attr->device), attr->port_num,
+ attr->index, NULL, attr);
+ if (ret)
+ return ret;
+
+ mlx5r_del_gid_macsec_operations(attr);
+ return 0;
+}
+
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
+{
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return 0;
+
+ return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
+}
+
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
- return !dev->mdev->issi;
+ if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ return 0;
}
enum {
@@ -94,13 +719,43 @@ static int mlx5_get_vport_access_method(struct ib_device *ibdev)
if (mlx5_use_mad_ifc(to_mdev(ibdev)))
return MLX5_VPORT_ACCESS_METHOD_MAD;
- if (mlx5_ib_port_link_layer(ibdev) ==
+ if (mlx5_ib_port_link_layer(ibdev, 1) ==
IB_LINK_LAYER_ETHERNET)
return MLX5_VPORT_ACCESS_METHOD_NIC;
return MLX5_VPORT_ACCESS_METHOD_HCA;
}
+static void get_atomic_caps(struct mlx5_ib_dev *dev,
+ u8 atomic_size_qp,
+ struct ib_device_attr *props)
+{
+ u8 tmp;
+ u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ u8 atomic_req_8B_endianness_mode =
+ MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
+
+ /* Check if HW supports 8 bytes standard atomic operations and capable
+ * of host endianness respond
+ */
+ tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
+ if (((atomic_operations & tmp) == tmp) &&
+ (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
+ (atomic_req_8B_endianness_mode)) {
+ props->atomic_cap = IB_ATOMIC_HCA;
+ } else {
+ props->atomic_cap = IB_ATOMIC_NONE;
+ }
+}
+
+static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -116,13 +771,21 @@ static int mlx5_query_system_image_guid(struct ib_device *ibdev,
case MLX5_VPORT_ACCESS_METHOD_HCA:
err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
- if (!err)
- *sys_image_guid = cpu_to_be64(tmp);
- return err;
+ break;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ break;
default:
return -EINVAL;
}
+
+ if (!err)
+ *sys_image_guid = cpu_to_be64(tmp);
+
+ return err;
+
}
static int mlx5_query_max_pkeys(struct ib_device *ibdev,
@@ -176,17 +839,24 @@ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
case MLX5_VPORT_ACCESS_METHOD_HCA:
err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
- if (!err)
- *node_guid = cpu_to_be64(tmp);
- return err;
+ break;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
+ break;
default:
return -EINVAL;
}
+
+ if (!err)
+ *node_guid = cpu_to_be64(tmp);
+
+ return err;
}
struct mlx5_reg_node_desc {
- u8 desc[64];
+ u8 desc[IB_DEVICE_NODE_DESC_MAX];
};
static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
@@ -203,18 +873,86 @@ static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
MLX5_REG_NODE_DESC, 0, 0);
}
+static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
+ struct mlx5_ib_query_device_resp *resp)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ u16 vport = mlx5_eswitch_manager_vport(mdev);
+
+ resp->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(esw,
+ vport);
+ resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+}
+
+/*
+ * Calculate maximum SQ overhead across all QP types.
+ * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
+ * have smaller overhead than the types calculated below,
+ * so they are implicitly included.
+ */
+static u32 mlx5_ib_calc_max_sq_overhead(void)
+{
+ u32 max_overhead_xrc, overhead_ud_lso, a, b;
+
+ /* XRC_INI */
+ max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
+ max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
+ a = sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg);
+ b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
+ max_overhead_xrc += max(a, b);
+
+ /* UD with LSO */
+ overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
+
+ return max(max_overhead_xrc, overhead_ud_lso);
+}
+
+static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ u32 max_wqe_size;
+ /* max QP overhead + 1 SGE, no inline, no special features */
+ max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
+ sizeof(struct mlx5_wqe_data_seg);
+
+ max_wqe_size = roundup_pow_of_two(max_wqe_size);
+
+ max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
+
+ return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
{
+ size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
+ int max_sq_desc;
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ bool raw_support = !mlx5_core_mp_enabled(mdev);
+ struct mlx5_ib_query_device_resp resp = {};
+ size_t resp_len;
+ u64 max_tso;
- if (uhw->inlen || uhw->outlen)
+ resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
+ if (uhw_outlen && uhw_outlen < resp_len)
+ return -EINVAL;
+
+ resp.response_length = resp_len;
+
+ if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
memset(props, 0, sizeof(*props));
@@ -223,9 +961,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (err)
return err;
- err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
- if (err)
- return err;
+ props->max_pkeys = dev->pkey_table_len;
err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
if (err)
@@ -245,12 +981,20 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
if (MLX5_CAP_GEN(mdev, apm))
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
- props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
if (MLX5_CAP_GEN(mdev, xrc))
props->device_cap_flags |= IB_DEVICE_XRC;
- props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ if (MLX5_CAP_GEN(mdev, imaicl)) {
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B;
+ props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ /* We support 'Gappy' memory registration too */
+ props->kernel_cap_flags |= IBK_SG_GAPS_REG;
+ }
+ /* IB_WR_REG_MR always requires changing the entity size with UMR */
+ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (MLX5_CAP_GEN(mdev, sho)) {
- props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
+ props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
/* At this stage no support for signature handover */
props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
IB_PROT_T10DIF_TYPE_2 |
@@ -259,7 +1003,83 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
IB_GUARD_T10DIF_CSUM;
}
if (MLX5_CAP_GEN(mdev, block_lb_mc))
- props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
+
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
+ if (MLX5_CAP_ETH(mdev, csum_cap)) {
+ /* Legacy bit to support old userspace libraries */
+ props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
+ }
+
+ if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
+ props->raw_packet_caps |=
+ IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
+
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
+ max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
+ if (max_tso) {
+ resp.tso_caps.max_tso = 1 << max_tso;
+ resp.tso_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ resp.response_length += sizeof(resp.tso_caps);
+ }
+ }
+
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
+ resp.rss_caps.rx_hash_function =
+ MLX5_RX_HASH_FUNC_TOEPLITZ;
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX5_RX_HASH_SRC_IPV4 |
+ MLX5_RX_HASH_DST_IPV4 |
+ MLX5_RX_HASH_SRC_IPV6 |
+ MLX5_RX_HASH_DST_IPV6 |
+ MLX5_RX_HASH_SRC_PORT_TCP |
+ MLX5_RX_HASH_DST_PORT_TCP |
+ MLX5_RX_HASH_SRC_PORT_UDP |
+ MLX5_RX_HASH_DST_PORT_UDP |
+ MLX5_RX_HASH_INNER;
+ resp.response_length += sizeof(resp.rss_caps);
+ }
+ } else {
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
+ resp.response_length += sizeof(resp.tso_caps);
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
+ resp.response_length += sizeof(resp.rss_caps);
+ }
+
+ if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+ props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+ props->kernel_cap_flags |= IBK_UD_TSO;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
+ MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
+ raw_support)
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
+
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
+ MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
+ props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
+ raw_support) {
+ /* Legacy bit to support old userspace libraries */
+ props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
+ }
+
+ if (MLX5_CAP_DEV_MEM(mdev, memic)) {
+ props->max_dm_size =
+ MLX5_CAP_DEV_MEM(mdev, max_memic_size);
+ }
+
+ if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
+ if (MLX5_CAP_GEN(mdev, end_pad))
+ props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -267,16 +1087,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = ~(min_page_size - 1);
props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
- props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
- max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
- sizeof(struct mlx5_wqe_ctrl_seg)) /
- sizeof(struct mlx5_wqe_data_seg);
- props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = props->max_sge;
+ max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
+ max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ props->max_send_sge = max_sq_sg;
+ props->max_recv_sge = max_rq_sg;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
- props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
+ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
@@ -286,57 +1108,271 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
props->max_srq_sge = max_rq_sg - 1;
- props->max_fast_reg_page_list_len = (unsigned int)-1;
- props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_fast_reg_page_list_len =
+ 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
+ props->max_pi_fast_reg_page_list_len =
+ props->max_fast_reg_page_list_len / 2;
+ props->max_sgl_rd =
+ MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
+ get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+ props->max_ah = INT_MAX;
+ props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
+ props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
+ props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
+ props->odp_caps = dev->odp_caps;
+ if (!uhw) {
+ /* ODP for kernel QPs is not implemented for receive
+ * WQEs and SRQ WQEs
+ */
+ props->odp_caps.per_transport_caps.rc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.uc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.ud_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.xrc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ }
+ }
+
+ if (mlx5_core_is_vf(mdev))
+ props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
+
+ if (mlx5_ib_port_link_layer(ibdev, 1) ==
+ IB_LINK_LAYER_ETHERNET && raw_support) {
+ props->rss_caps.max_rwq_indirection_tables =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
+ props->rss_caps.max_rwq_indirection_table_size =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
+ props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
+ props->max_wq_type_rq =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
+ }
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(mdev, pg))
- props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
- props->odp_caps = dev->odp_caps;
-#endif
+ if (MLX5_CAP_GEN(mdev, tag_matching)) {
+ props->tm_caps.max_num_tags =
+ (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
+ props->tm_caps.max_ops =
+ 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
+ }
+
+ if (MLX5_CAP_GEN(mdev, tag_matching) &&
+ MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
+ props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
+ props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+ props->cq_caps.max_cq_moderation_count =
+ MLX5_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period =
+ MLX5_MAX_CQ_PERIOD;
+ }
+
+ if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.cqe_comp_caps);
+
+ if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
+ resp.cqe_comp_caps.max_num =
+ MLX5_CAP_GEN(dev->mdev,
+ cqe_compression_max_num);
+
+ resp.cqe_comp_caps.supported_format =
+ MLX5_IB_CQE_RES_FORMAT_HASH |
+ MLX5_IB_CQE_RES_FORMAT_CSUM;
+
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ resp.cqe_comp_caps.supported_format |=
+ MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
+ }
+ }
+
+ if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
+ raw_support) {
+ if (MLX5_CAP_QOS(mdev, packet_pacing) &&
+ MLX5_CAP_GEN(mdev, qos)) {
+ resp.packet_pacing_caps.qp_rate_limit_max =
+ MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
+ resp.packet_pacing_caps.qp_rate_limit_min =
+ MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
+ resp.packet_pacing_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
+ MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
+ resp.packet_pacing_caps.cap_flags |=
+ MLX5_IB_PP_SUPPORT_BURST;
+ }
+ resp.response_length += sizeof(resp.packet_pacing_caps);
+ }
+
+ if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
+ uhw_outlen) {
+ if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
+ MLX5_IB_ALLOW_MPW;
+
+ if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes |=
+ MLX5_IB_SUPPORT_EMPW;
+
+ resp.response_length +=
+ sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+ }
+
+ if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.flags);
+
+ if (MLX5_CAP_GEN(mdev, cqe_compression_128))
+ resp.flags |=
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
+
+ if (MLX5_CAP_GEN(mdev, cqe_128_always))
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
+ if (MLX5_CAP_GEN(mdev, qp_packet_based))
+ resp.flags |=
+ MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
+
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
+
+ if (MLX5_CAP_GEN_2(mdev, dp_ordering_force) &&
+ (MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc) ||
+ MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc) ||
+ MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc) ||
+ MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud) ||
+ MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc)))
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_OOO_DP;
+ }
+
+ if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.sw_parsing_caps);
+ if (MLX5_CAP_ETH(mdev, swp)) {
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING;
+
+ if (MLX5_CAP_ETH(mdev, swp_csum))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_CSUM;
+
+ if (MLX5_CAP_ETH(mdev, swp_lso))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_LSO;
+
+ if (resp.sw_parsing_caps.sw_parsing_offloads)
+ resp.sw_parsing_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
+ if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
+ raw_support) {
+ resp.response_length += sizeof(resp.striding_rq_caps);
+ if (MLX5_CAP_GEN(mdev, striding_rq)) {
+ resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
+ if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ else
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
+ if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.tunnel_offloads_caps);
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
+ }
+
+ if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.dci_streams_caps);
+
+ resp.dci_streams_caps.max_log_num_concurent =
+ MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
+
+ resp.dci_streams_caps.max_log_num_errored =
+ MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
+ }
+
+ if (offsetofend(typeof(resp), reserved) <= uhw_outlen)
+ resp.response_length += sizeof(resp.reserved);
+
+ if (offsetofend(typeof(resp), reg_c0) <= uhw_outlen) {
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ resp.response_length += sizeof(resp.reg_c0);
+
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw))
+ fill_esw_mgr_reg_c0(mdev, &resp);
+ }
+
+ if (uhw_outlen) {
+ err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+
+ if (err)
+ return err;
+ }
return 0;
}
-enum mlx5_ib_width {
- MLX5_IB_WIDTH_1X = 1 << 0,
- MLX5_IB_WIDTH_2X = 1 << 1,
- MLX5_IB_WIDTH_4X = 1 << 2,
- MLX5_IB_WIDTH_8X = 1 << 3,
- MLX5_IB_WIDTH_12X = 1 << 4
-};
-
-static int translate_active_width(struct ib_device *ibdev, u8 active_width,
- u8 *ib_width)
+static void translate_active_width(struct ib_device *ibdev, u16 active_width,
+ u8 *ib_width)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- int err = 0;
- if (active_width & MLX5_IB_WIDTH_1X) {
+ if (active_width & MLX5_PTYS_WIDTH_1X)
*ib_width = IB_WIDTH_1X;
- } else if (active_width & MLX5_IB_WIDTH_2X) {
- mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
- (int)active_width);
- err = -EINVAL;
- } else if (active_width & MLX5_IB_WIDTH_4X) {
+ else if (active_width & MLX5_PTYS_WIDTH_2X)
+ *ib_width = IB_WIDTH_2X;
+ else if (active_width & MLX5_PTYS_WIDTH_4X)
*ib_width = IB_WIDTH_4X;
- } else if (active_width & MLX5_IB_WIDTH_8X) {
+ else if (active_width & MLX5_PTYS_WIDTH_8X)
*ib_width = IB_WIDTH_8X;
- } else if (active_width & MLX5_IB_WIDTH_12X) {
+ else if (active_width & MLX5_PTYS_WIDTH_12X)
*ib_width = IB_WIDTH_12X;
- } else {
- mlx5_ib_dbg(dev, "Invalid active_width %d\n",
- (int)active_width);
- err = -EINVAL;
+ else {
+ mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
+ active_width);
+ *ib_width = IB_WIDTH_4X;
}
- return err;
+ return;
}
static int mlx5_mtu_to_ib_mtu(int mtu)
@@ -400,17 +1436,17 @@ static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
return 0;
}
-static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep;
- int max_mtu;
- int oper_mtu;
+ u8 vl_hw_cap, plane_index = 0;
+ u16 max_mtu;
+ u16 oper_mtu;
int err;
- u8 ib_link_width_oper;
- u8 vl_hw_cap;
+ u16 ib_link_width_oper;
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep) {
@@ -418,7 +1454,12 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
goto out;
}
- memset(props, 0, sizeof(*props));
+ /* props being zeroed by the caller, avoid zeroing it here */
+
+ if (ibdev->type == RDMA_DEVICE_TYPE_SMI) {
+ plane_index = port;
+ port = smi_to_native_portnum(dev, port);
+ }
err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
if (err)
@@ -430,7 +1471,14 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
props->sm_sl = rep->sm_sl;
props->state = rep->vport_state;
props->phys_state = rep->port_physical_state;
- props->port_cap_flags = rep->cap_mask1;
+
+ props->port_cap_flags = rep->cap_mask1;
+ if (dev->num_plane) {
+ props->port_cap_flags |= IB_PORT_SM_DISABLED;
+ props->port_cap_flags &= ~IB_PORT_SM;
+ } else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
+ props->port_cap_flags &= ~IB_PORT_CM_SUP;
+
props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
@@ -439,19 +1487,16 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
props->subnet_timeout = rep->subnet_timeout;
props->init_type_reply = rep->init_type_reply;
- err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
- if (err)
- goto out;
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
+ props->port_cap_flags2 = rep->cap_mask2;
- err = translate_active_width(ibdev, ib_link_width_oper,
- &props->active_width);
- if (err)
- goto out;
- err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
- port);
+ err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
+ &props->active_speed, port, plane_index);
if (err)
goto out;
+ translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
+
mlx5_query_port_max_mtu(mdev, &max_mtu, port);
props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
@@ -471,22 +1516,68 @@ out:
return err;
}
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
+ unsigned int count;
+ int ret;
+
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
- return mlx5_query_mad_ifc_port(ibdev, port, props);
+ ret = mlx5_query_mad_ifc_port(ibdev, port, props);
+ break;
case MLX5_VPORT_ACCESS_METHOD_HCA:
- return mlx5_query_hca_port(ibdev, port, props);
+ ret = mlx5_query_hca_port(ibdev, port, props);
+ break;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ ret = mlx5_query_port_roce(ibdev, port, props);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+ if (!ret && props) {
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
+ if (!mdev) {
+ /* If the port isn't affiliated yet query the master.
+ * The master and slave will have the same values.
+ */
+ mdev = dev->mdev;
+ port = 1;
+ put_mdev = false;
+ }
+ count = mlx5_core_reserved_gids_count(mdev);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+ props->gid_tbl_len -= count;
+ }
+ return ret;
+}
+
+static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ return mlx5_query_port_roce(ibdev, port, props);
}
-static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ /* Default special Pkey for representor device port as per the
+ * IB specification 1.3 section 10.9.1.2.
+ */
+ *pkey = 0xffff;
+ return 0;
+}
+
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -505,20 +1596,43 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
+ u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+ u32 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
+ if (!mdev) {
+ /* The port isn't affiliated yet, get the PKey from the master
+ * port. For RoCE the PKey tables will be the same.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
+ index, pkey);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+ return err;
+}
+
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
- return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
- pkey);
+ return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
default:
return -EINVAL;
}
@@ -542,28 +1656,78 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
* If possible, pass node desc to FW, so it can generate
* a 144 trap. If cmd fails, just ignore.
*/
- memcpy(&in, props->node_desc, 64);
+ memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
if (err)
return err;
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
return err;
}
-static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
+ u32 value)
+{
+ struct mlx5_hca_vport_context ctx = {};
+ struct mlx5_core_dev *mdev;
+ u32 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return -ENODEV;
+
+ err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
+ if (err)
+ goto out;
+
+ if (~ctx.cap_mask1_perm & mask) {
+ mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
+ mask, ctx.cap_mask1_perm);
+ err = -EINVAL;
+ goto out;
+ }
+
+ ctx.cap_mask1 = value;
+ ctx.cap_mask1_perm = mask;
+ err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
+ 0, &ctx);
+
+out:
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+
+ return err;
+}
+
+static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct ib_port_attr attr;
u32 tmp;
int err;
+ u32 change_mask;
+ u32 value;
+ bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
+ IB_LINK_LAYER_INFINIBAND);
+
+ /* CM layer calls ib_modify_port() regardless of the link layer. For
+ * Ethernet ports, qkey violation and Port capabilities are meaningless.
+ */
+ if (!is_ib)
+ return 0;
+
+ if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
+ change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
+ value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
+ return set_port_caps_atomic(dev, port, change_mask, value);
+ }
mutex_lock(&dev->cap_mask_mutex);
- err = mlx5_ib_query_port(ibdev, port, &attr);
+ err = ib_query_port(ibdev, port, &attr);
if (err)
goto out;
@@ -577,164 +1741,518 @@ out:
return err;
}
-static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
+{
+ mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
+ caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
+}
+
+static u16 calc_dynamic_bfregs(int uars_per_sys_page)
+{
+ /* Large page with non 4k uar support might limit the dynamic size */
+ if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
+ return MLX5_MIN_DYN_BFREGS;
+
+ return MLX5_MAX_DYN_BFREGS;
+}
+
+static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
+ struct mlx5_ib_alloc_ucontext_req_v2 *req,
+ struct mlx5_bfreg_info *bfregi)
+{
+ int uars_per_sys_page;
+ int bfregs_per_sys_page;
+ int ref_bfregs = req->total_num_bfregs;
+
+ if (req->total_num_bfregs == 0)
+ return -EINVAL;
+
+ BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
+ BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
+
+ if (req->total_num_bfregs > MLX5_MAX_BFREGS)
+ return -ENOMEM;
+
+ uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
+ bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+ /* This holds the required static allocation asked by the user */
+ req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
+ if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
+ return -EINVAL;
+
+ bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+ bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
+ bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
+ bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
+
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
+ MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
+ lib_uar_4k ? "yes" : "no", ref_bfregs,
+ req->total_num_bfregs, bfregi->total_num_bfregs,
+ bfregi->num_sys_pages);
+
+ return 0;
+}
+
+static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+ struct mlx5_bfreg_info *bfregi;
+ int err;
+ int i;
+
+ bfregi = &context->bfregi;
+ for (i = 0; i < bfregi->num_static_sys_pages; i++) {
+ err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
+ context->devx_uid);
+ if (err)
+ goto error;
+
+ mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
+ }
+
+ for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
+ bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
+
+ return 0;
+
+error:
+ for (--i; i >= 0; i--)
+ if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid))
+ mlx5_ib_warn(dev, "failed to free uar %d\n", i);
+
+ return err;
+}
+
+static void deallocate_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
+{
+ struct mlx5_bfreg_info *bfregi;
+ int i;
+
+ bfregi = &context->bfregi;
+ for (i = 0; i < bfregi->num_sys_pages; i++)
+ if (i < bfregi->num_static_sys_pages ||
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
+ mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid);
+}
+
+static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
+{
+ int err;
+
+ err = mlx5_nic_vport_update_local_lb(master, true);
+ if (err)
+ return err;
+
+ err = mlx5_nic_vport_update_local_lb(slave, true);
+ if (err)
+ goto out;
+
+ lb_state->force_enable = true;
+ return 0;
+
+out:
+ mlx5_nic_vport_update_local_lb(master, false);
+ return err;
+}
+
+static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
+{
+ mlx5_nic_vport_update_local_lb(slave, false);
+ mlx5_nic_vport_update_local_lb(master, false);
+
+ lb_state->force_enable = false;
+}
+
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ int err = 0;
+
+ if (dev->lb.force_enable)
+ return 0;
+
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td++;
+ if (qp)
+ dev->lb.qps++;
+
+ if (dev->lb.user_td == 2 ||
+ dev->lb.qps == 1) {
+ if (!dev->lb.enabled) {
+ err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
+ dev->lb.enabled = true;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+
+ return err;
+}
+
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
+ if (dev->lb.force_enable)
+ return;
+
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td--;
+ if (qp)
+ dev->lb.qps--;
+
+ if (dev->lb.user_td == 1 &&
+ dev->lb.qps == 0) {
+ if (dev->lb.enabled) {
+ mlx5_nic_vport_update_local_lb(dev->mdev, false);
+ dev->lb.enabled = false;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+}
+
+static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
+ u16 uid)
+{
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return 0;
+
+ err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ return err;
+
+ return mlx5_ib_enable_lb(dev, true, false);
+}
+
+static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
+ u16 uid)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return;
+
+ mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ return;
+
+ mlx5_ib_disable_lb(dev, true, false);
+}
+
+static int set_ucontext_resp(struct ib_ucontext *uctx,
+ struct mlx5_ib_alloc_ucontext_resp *resp)
+{
+ struct ib_device *ibdev = uctx->device;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_ucontext *context = to_mucontext(uctx);
+ struct mlx5_bfreg_info *bfregi = &context->bfregi;
+
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
+ }
+
+ resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
+ if (mlx5_wc_support_get(dev->mdev))
+ resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_bf_reg_size);
+ resp->cache_line_size = cache_line_size();
+ resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
+ resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
+ resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ resp->cqe_version = context->cqe_version;
+ resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
+ resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_CAP_GEN(dev->mdev,
+ num_of_uars_per_page) : 1;
+ resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
+ bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
+ resp->num_ports = dev->num_ports;
+ resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
+ MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
+
+ if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
+ mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
+ resp->eth_min_inline++;
+ }
+
+ if (dev->mdev->clock_info)
+ resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
+
+ /*
+ * We don't want to expose information from the PCI bar that is located
+ * after 4096 bytes, so if the arch only supports larger pages, let's
+ * pretend we don't support reading the HCA's core clock. This is also
+ * forced by mmap function.
+ */
+ if (PAGE_SIZE <= 4096) {
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
+ resp->hca_core_clock_offset =
+ offsetof(struct mlx5_init_seg,
+ internal_timer_h) % PAGE_SIZE;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
+
+ if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
+ rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
+ rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
+
+ resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
+
+ if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
+ resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
+
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
+
+ return 0;
+}
+
+static bool uctx_rdma_ctrl_is_enabled(u64 enabled_caps)
+{
+ return UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_LOCAL) ||
+ UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+}
+
+static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = uctx->device;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_alloc_ucontext_req_v2 req;
- struct mlx5_ib_alloc_ucontext_resp resp;
- struct mlx5_ib_ucontext *context;
- struct mlx5_uuar_info *uuari;
- struct mlx5_uar *uars;
- int gross_uuars;
- int num_uars;
+ struct mlx5_ib_alloc_ucontext_req_v2 req = {};
+ struct mlx5_ib_alloc_ucontext_resp resp = {};
+ struct mlx5_ib_ucontext *context = to_mucontext(uctx);
+ struct mlx5_bfreg_info *bfregi;
int ver;
- int uuarn;
int err;
- int i;
- size_t reqlen;
+ size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
+ max_cqe_version);
+ bool lib_uar_4k;
+ bool lib_uar_dyn;
if (!dev->ib_active)
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
- memset(&req, 0, sizeof(req));
- reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
- if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
ver = 0;
- else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ else if (udata->inlen >= min_req_v2)
ver = 2;
else
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- err = ib_copy_from_udata(&req, udata, reqlen);
+ err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
if (err)
- return ERR_PTR(err);
+ return err;
- if (req.flags || req.reserved)
- return ERR_PTR(-EINVAL);
+ if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
+ return -EOPNOTSUPP;
- if (req.total_num_uuars > MLX5_MAX_UUARS)
- return ERR_PTR(-ENOMEM);
+ if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
+ return -EOPNOTSUPP;
- if (req.total_num_uuars == 0)
- return ERR_PTR(-EINVAL);
-
- req.total_num_uuars = ALIGN(req.total_num_uuars,
- MLX5_NON_FP_BF_REGS_PER_PAGE);
- if (req.num_low_latency_uuars > req.total_num_uuars - 1)
- return ERR_PTR(-EINVAL);
-
- num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
- gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
- resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
- resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
- resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
- resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
- resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
- resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
+ req.total_num_bfregs = ALIGN(req.total_num_bfregs,
+ MLX5_NON_FP_BFREGS_PER_UAR);
+ if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
+ return -EINVAL;
- uuari = &context->uuari;
- mutex_init(&uuari->lock);
- uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
- if (!uars) {
- err = -ENOMEM;
- goto out_ctx;
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
+ err = mlx5_ib_devx_create(dev, true, uctx->enabled_caps);
+ if (err < 0)
+ goto out_ctx;
+ context->devx_uid = err;
+
+ if (uctx_rdma_ctrl_is_enabled(uctx->enabled_caps)) {
+ err = mlx5_cmd_add_privileged_uid(dev->mdev,
+ context->devx_uid);
+ if (err)
+ goto out_devx;
+ }
+ }
+
+ lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
+ lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
+ bfregi = &context->bfregi;
+
+ if (lib_uar_dyn) {
+ bfregi->lib_uar_dyn = lib_uar_dyn;
+ goto uar_done;
}
- uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
- sizeof(*uuari->bitmap),
+ /* updates req->total_num_bfregs */
+ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
+ if (err)
+ goto out_ucap;
+
+ mutex_init(&bfregi->lock);
+ bfregi->lib_uar_4k = lib_uar_4k;
+ bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
- if (!uuari->bitmap) {
+ if (!bfregi->count) {
err = -ENOMEM;
- goto out_uar_ctx;
- }
- /*
- * clear all fast path uuars
- */
- for (i = 0; i < gross_uuars; i++) {
- uuarn = i & 3;
- if (uuarn == 2 || uuarn == 3)
- set_bit(i, uuari->bitmap);
+ goto out_ucap;
}
- uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
- if (!uuari->count) {
+ bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
+ sizeof(*bfregi->sys_pages),
+ GFP_KERNEL);
+ if (!bfregi->sys_pages) {
err = -ENOMEM;
- goto out_bitmap;
+ goto out_count;
}
- for (i = 0; i < num_uars; i++) {
- err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
- if (err)
- goto out_count;
- }
+ err = allocate_uars(dev, context);
+ if (err)
+ goto out_sys_pages;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
-#endif
+uar_done:
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
+ context->devx_uid);
+ if (err)
+ goto out_uars;
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- resp.tot_uuars = req.total_num_uuars;
- resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
- err = ib_copy_to_udata(udata, &resp,
- sizeof(resp) - sizeof(resp.reserved));
+ context->cqe_version = min_t(__u8,
+ (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
+ req.max_cqe_version);
+
+ err = set_ucontext_resp(uctx, &resp);
if (err)
- goto out_uars;
+ goto out_mdev;
+
+ resp.response_length = min(udata->outlen, sizeof(resp));
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto out_mdev;
+
+ bfregi->ver = ver;
+ bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
+ context->lib_caps = req.lib_caps;
+ print_lib_caps(dev, context->lib_caps);
+
+ if (mlx5_ib_lag_should_assign_affinity(dev)) {
+ u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
+
+ atomic_set(&context->tx_port_affinity,
+ atomic_add_return(
+ 1, &dev->port[port].roce.tx_port_affinity));
+ }
+
+ return 0;
- uuari->ver = ver;
- uuari->num_low_latency_uuars = req.num_low_latency_uuars;
- uuari->uars = uars;
- uuari->num_uars = num_uars;
- return &context->ibucontext;
+out_mdev:
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
out_uars:
- for (i--; i >= 0; i--)
- mlx5_cmd_free_uar(dev->mdev, uars[i].index);
+ deallocate_uars(dev, context);
+
+out_sys_pages:
+ kfree(bfregi->sys_pages);
+
out_count:
- kfree(uuari->count);
+ kfree(bfregi->count);
-out_bitmap:
- kfree(uuari->bitmap);
+out_ucap:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX &&
+ uctx_rdma_ctrl_is_enabled(uctx->enabled_caps))
+ mlx5_cmd_remove_privileged_uid(dev->mdev, context->devx_uid);
-out_uar_ctx:
- kfree(uars);
+out_devx:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
out_ctx:
- kfree(context);
- return ERR_PTR(err);
+ return err;
}
-static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_alloc_ucontext_resp uctx_resp = {};
+ int ret;
+
+ ret = set_ucontext_resp(ibcontext, &uctx_resp);
+ if (ret)
+ return ret;
+
+ uctx_resp.response_length =
+ min_t(size_t,
+ uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX),
+ sizeof(uctx_resp));
+
+ ret = uverbs_copy_to_struct_or_zero(attrs,
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
+ &uctx_resp,
+ sizeof(uctx_resp));
+ return ret;
+}
+
+static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
- struct mlx5_uuar_info *uuari = &context->uuari;
- int i;
+ struct mlx5_bfreg_info *bfregi;
- for (i = 0; i < uuari->num_uars; i++) {
- if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
- mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
+ bfregi = &context->bfregi;
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+
+ deallocate_uars(dev, context);
+ kfree(bfregi->sys_pages);
+ kfree(bfregi->count);
+
+ if (context->devx_uid) {
+ if (uctx_rdma_ctrl_is_enabled(ibcontext->enabled_caps))
+ mlx5_cmd_remove_privileged_uid(dev->mdev,
+ context->devx_uid);
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
}
+}
- kfree(uuari->count);
- kfree(uuari->bitmap);
- kfree(uuari->uars);
- kfree(context);
+static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
+ int uar_idx)
+{
+ int fw_uars_per_page;
- return 0;
+ fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
+
+ return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
-static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
+static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
+ int uar_idx)
{
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
+ unsigned int fw_uars_per_page;
+
+ fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_UARS_IN_PAGE : 1;
+
+ return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
}
static int get_command(unsigned long offset)
@@ -752,153 +2270,349 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
-static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+/* Index resides in an extra byte to enable larger values than 255 */
+static int get_extended_index(unsigned long offset)
{
- struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
- struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
- struct mlx5_uuar_info *uuari = &context->uuari;
- unsigned long command;
- unsigned long idx;
- phys_addr_t pfn;
+ return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
+}
- command = get_command(vma->vm_pgoff);
- switch (command) {
+
+static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
+static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
+{
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ return "WC";
case MLX5_IB_MMAP_REGULAR_PAGE:
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
+ return "best effort WC";
+ case MLX5_IB_MMAP_NC_PAGE:
+ return "NC";
+ case MLX5_IB_MMAP_DEVICE_MEM:
+ return "Device Memory";
+ default:
+ return "Unknown";
+ }
+}
- idx = get_index(vma->vm_pgoff);
- if (idx >= uuari->num_uars)
- return -EINVAL;
+static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
+{
+ if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
+ !(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
- pfn = uar_index2pfn(dev, uuari->uars[idx].index);
- mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
- (unsigned long long)pfn);
+ if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
+ return -EOPNOTSUPP;
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
+ if (vma->vm_flags & (VM_WRITE | VM_EXEC))
+ return -EPERM;
+ vm_flags_clear(vma, VM_MAYWRITE);
- mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
- vma->vm_start,
- (unsigned long long)pfn << PAGE_SHIFT);
- break;
+ if (!dev->mdev->clock_info)
+ return -EOPNOTSUPP;
- case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
- return -ENOSYS;
+ return vm_insert_page(vma, vma->vm_start,
+ virt_to_page(dev->mdev->clock_info));
+}
+static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
+{
+ struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
+ struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
+ struct mlx5_var_table *var_table = &dev->var_table;
+ struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
+
+ switch (mentry->mmap_flag) {
+ case MLX5_IB_MMAP_TYPE_MEMIC:
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ mlx5_ib_dm_mmap_free(dev, mentry);
+ break;
+ case MLX5_IB_MMAP_TYPE_VAR:
+ mutex_lock(&var_table->bitmap_lock);
+ clear_bit(mentry->page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+ kfree(mentry);
+ break;
+ case MLX5_IB_MMAP_TYPE_UAR_WC:
+ case MLX5_IB_MMAP_TYPE_UAR_NC:
+ mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
+ context->devx_uid);
+ kfree(mentry);
+ break;
default:
- return -EINVAL;
+ WARN_ON(true);
}
-
- return 0;
}
-static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
+static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
{
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *seg;
- struct mlx5_core_mr mr;
+ struct mlx5_bfreg_info *bfregi = &context->bfregi;
int err;
+ unsigned long idx;
+ phys_addr_t pfn;
+ pgprot_t prot;
+ u32 bfreg_dyn_idx = 0;
+ u32 uar_index;
+ int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
+ int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
+ bfregi->num_static_sys_pages;
+
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
- seg = &in->seg;
- seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
- seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- seg->start_addr = 0;
+ if (dyn_uar)
+ idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
+ else
+ idx = get_index(vma->vm_pgoff);
- err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
- NULL, NULL, NULL);
- if (err) {
- mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
- goto err_in;
+ if (idx >= max_valid_idx) {
+ mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
+ idx, max_valid_idx);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
+ prot = pgprot_writecombine(vma->vm_page_prot);
+ break;
+ case MLX5_IB_MMAP_NC_PAGE:
+ prot = pgprot_noncached(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
}
- kfree(in);
- *key = mr.key;
+ if (dyn_uar) {
+ int uars_per_page;
+
+ uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+ bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
+ if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
+ mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
+ bfreg_dyn_idx, bfregi->total_num_bfregs);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bfregi->lock);
+ /* Fail if uar already allocated, first bfreg index of each
+ * page holds its count.
+ */
+ if (bfregi->count[bfreg_dyn_idx]) {
+ mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
+ mutex_unlock(&bfregi->lock);
+ return -EINVAL;
+ }
+
+ bfregi->count[bfreg_dyn_idx]++;
+ mutex_unlock(&bfregi->lock);
+
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
+ context->devx_uid);
+ if (err) {
+ mlx5_ib_warn(dev, "UAR alloc failed\n");
+ goto free_bfreg;
+ }
+ } else {
+ uar_index = bfregi->sys_pages[idx];
+ }
+ pfn = uar_index2pfn(dev, uar_index);
+ mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
+
+ err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
+ prot, NULL);
+ if (err) {
+ mlx5_ib_err(dev,
+ "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
+ err, mmap_cmd2str(cmd));
+ goto err;
+ }
+
+ if (dyn_uar)
+ bfregi->sys_pages[idx] = uar_index;
return 0;
-err_in:
- kfree(in);
+err:
+ if (!dyn_uar)
+ return err;
+
+ mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
+
+free_bfreg:
+ mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
return err;
}
-static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
+static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
{
- struct mlx5_core_mr mr;
- int err;
+ unsigned long idx;
+ u8 command;
- memset(&mr, 0, sizeof(mr));
- mr.key = key;
- err = mlx5_core_destroy_mkey(dev->mdev, &mr);
- if (err)
- mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
+ command = get_command(vma->vm_pgoff);
+ idx = get_extended_index(vma->vm_pgoff);
+
+ return (command << 16 | idx);
}
-static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct ib_ucontext *ucontext)
{
- struct mlx5_ib_alloc_pd_resp resp;
- struct mlx5_ib_pd *pd;
- int err;
+ struct mlx5_user_mmap_entry *mentry;
+ struct rdma_user_mmap_entry *entry;
+ unsigned long pgoff;
+ pgprot_t prot;
+ phys_addr_t pfn;
+ int ret;
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
+ pgoff = mlx5_vma_to_pgoff(vma);
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
+ if (!entry)
+ return -EINVAL;
- err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
+ mentry = to_mmmap(entry);
+ pfn = (mentry->address >> PAGE_SHIFT);
+ if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
+ mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
+ prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ prot = pgprot_writecombine(vma->vm_page_prot);
+ ret = rdma_user_mmap_io(ucontext, vma, pfn,
+ entry->npages * PAGE_SIZE,
+ prot,
+ entry);
+ rdma_user_mmap_entry_put(&mentry->rdma_entry);
+ return ret;
+}
+
+static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
+{
+ u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
+ u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
+
+ return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
+ (index & 0xFF)) << PAGE_SHIFT;
+}
+
+static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+ struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+ struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
+ unsigned long command;
+ phys_addr_t pfn;
+
+ command = get_command(vma->vm_pgoff);
+ switch (command) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
+ if (!mlx5_wc_support_get(dev->mdev))
+ return -EPERM;
+ fallthrough;
+ case MLX5_IB_MMAP_NC_PAGE:
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ return uar_mmap(dev, command, vma, context);
+
+ case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
+ return -ENOSYS;
+
+ case MLX5_IB_MMAP_CORE_CLOCK:
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ vm_flags_clear(vma, VM_MAYWRITE);
+
+ /* Don't expose to user-space information it shouldn't have */
+ if (PAGE_SIZE > 4096)
+ return -EOPNOTSUPP;
+
+ pfn = (dev->mdev->iseg_base +
+ offsetof(struct mlx5_init_seg, internal_timer_h)) >>
+ PAGE_SHIFT;
+ return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
+ case MLX5_IB_MMAP_CLOCK_INFO:
+ return mlx5_ib_mmap_clock_info_page(dev, vma, context);
+
+ default:
+ return mlx5_ib_mmap_offset(dev, vma, ibcontext);
}
- if (context) {
+ return 0;
+}
+
+static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct mlx5_ib_pd *pd = to_mpd(ibpd);
+ struct ib_device *ibdev = ibpd->device;
+ struct mlx5_ib_alloc_pd_resp resp;
+ int err;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u16 uid = 0;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ uid = context ? context->devx_uid : 0;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ MLX5_SET(alloc_pd_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
+ if (err)
+ return err;
+
+ pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
+ pd->uid = uid;
+ if (udata) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
- kfree(pd);
- return ERR_PTR(-EFAULT);
- }
- } else {
- err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
- if (err) {
- mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
- kfree(pd);
- return ERR_PTR(err);
+ mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
+ return -EFAULT;
}
}
- return &pd->ibpd;
+ return 0;
}
-static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
+static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- if (!pd->uobject)
- free_pa_mkey(mdev, mpd->pa_lkey);
-
- mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
- kfree(mpd);
-
- return 0;
+ return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
}
static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(ibqp);
int err;
+ u16 uid;
+
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
- err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
+ if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -910,8 +2624,11 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
int err;
+ u16 uid;
- err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
+ err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -932,674 +2649,2528 @@ static int init_node_data(struct mlx5_ib_dev *dev)
return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
}
-static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t fw_pages_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
- return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
+ return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
}
+static DEVICE_ATTR_RO(fw_pages);
-static ssize_t show_reg_pages(struct device *device,
+static ssize_t reg_pages_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
- return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
+ return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
+static DEVICE_ATTR_RO(reg_pages);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
- fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%x\n", dev->mdev->rev_id);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
+ dev->mdev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
+
+static struct attribute *mlx5_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_fw_pages.attr,
+ &dev_attr_reg_pages.attr,
+ NULL,
+};
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
- dev->mdev->board_id);
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
-static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
-
-static struct device_attribute *mlx5_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_fw_pages,
- &dev_attr_reg_pages,
+static const struct attribute_group mlx5_attr_group = {
+ .attrs = mlx5_class_attributes,
};
-static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param)
+static void pkey_change_handler(struct work_struct *work)
{
- struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
- struct ib_event ibev;
+ struct mlx5_ib_port_resources *ports =
+ container_of(work, struct mlx5_ib_port_resources,
+ pkey_change_work);
+
+ if (!ports->gsi)
+ /*
+ * We got this event before device was fully configured
+ * and MAD registration code wasn't called/finished yet.
+ */
+ return;
- u8 port = 0;
+ mlx5_ib_gsi_pkey_change(ports->gsi);
+}
- switch (event) {
- case MLX5_DEV_EVENT_SYS_ERROR:
- ibdev->ib_active = false;
- ibev.event = IB_EVENT_DEVICE_FATAL;
- break;
+static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_qp *mqp;
+ struct mlx5_ib_cq *send_mcq, *recv_mcq;
+ struct mlx5_core_cq *mcq;
+ struct list_head cq_armed_list;
+ unsigned long flags_qp;
+ unsigned long flags_cq;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&cq_armed_list);
+
+ /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+ list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+ spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+ if (mqp->sq.tail != mqp->sq.head) {
+ send_mcq = to_mcq(mqp->ibqp.send_cq);
+ spin_lock_irqsave(&send_mcq->lock, flags_cq);
+ if (send_mcq->mcq.comp &&
+ mqp->ibqp.send_cq->comp_handler) {
+ if (!send_mcq->mcq.reset_notify_added) {
+ send_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&send_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+ }
+ spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+ spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+ /* no handling is needed for SRQ */
+ if (!mqp->ibqp.srq) {
+ if (mqp->rq.tail != mqp->rq.head) {
+ recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+ spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+ if (recv_mcq->mcq.comp &&
+ mqp->ibqp.recv_cq->comp_handler) {
+ if (!recv_mcq->mcq.reset_notify_added) {
+ recv_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&recv_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&recv_mcq->lock,
+ flags_cq);
+ }
+ }
+ spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+ }
+ /*At that point all inflight post send were put to be executed as of we
+ * lock/unlock above locks Now need to arm all involved CQs.
+ */
+ list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
+ mcq->comp(mcq, NULL);
+ }
+ spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+}
- case MLX5_DEV_EVENT_PORT_UP:
- ibev.event = IB_EVENT_PORT_ACTIVE;
- port = (u8)param;
+static void delay_drop_handler(struct work_struct *work)
+{
+ int err;
+ struct mlx5_ib_delay_drop *delay_drop =
+ container_of(work, struct mlx5_ib_delay_drop,
+ delay_drop_work);
+
+ atomic_inc(&delay_drop->events_cnt);
+
+ mutex_lock(&delay_drop->lock);
+ err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
+ if (err) {
+ mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
+ delay_drop->timeout);
+ delay_drop->activate = false;
+ }
+ mutex_unlock(&delay_drop->lock);
+}
+
+static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
+ struct ib_event *ibev)
+{
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
+
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ schedule_work(&ibdev->delay_drop.delay_drop_work);
break;
+ default: /* do nothing */
+ return;
+ }
+}
+
+static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
+ struct ib_event *ibev)
+{
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
+
+ ibev->element.port_num = port;
+
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
+ /* In RoCE, port up/down events are handled in
+ * mlx5_netdev_event().
+ */
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ return -EINVAL;
- case MLX5_DEV_EVENT_PORT_DOWN:
- ibev.event = IB_EVENT_PORT_ERR;
- port = (u8)param;
+ ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break;
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* not used by ULPs */
- return;
+ case MLX5_PORT_CHANGE_SUBTYPE_LID:
+ ibev->event = IB_EVENT_LID_CHANGE;
+ break;
- case MLX5_DEV_EVENT_LID_CHANGE:
- ibev.event = IB_EVENT_LID_CHANGE;
- port = (u8)param;
+ case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
+ ibev->event = IB_EVENT_PKEY_CHANGE;
+ schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
- case MLX5_DEV_EVENT_PKEY_CHANGE:
- ibev.event = IB_EVENT_PKEY_CHANGE;
- port = (u8)param;
+ case MLX5_PORT_CHANGE_SUBTYPE_GUID:
+ ibev->event = IB_EVENT_GID_CHANGE;
break;
- case MLX5_DEV_EVENT_GUID_CHANGE:
- ibev.event = IB_EVENT_GID_CHANGE;
- port = (u8)param;
+ case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ ibev->event = IB_EVENT_CLIENT_REREGISTER;
break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
- case MLX5_DEV_EVENT_CLIENT_REREG:
- ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = (u8)param;
+static void mlx5_ib_handle_event(struct work_struct *_work)
+{
+ struct mlx5_ib_event_work *work =
+ container_of(_work, struct mlx5_ib_event_work, work);
+ struct mlx5_ib_dev *ibdev;
+ struct ib_event ibev;
+ bool fatal = false;
+
+ if (work->is_slave) {
+ ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
+ if (!ibdev)
+ goto out;
+ } else {
+ ibdev = work->dev;
+ }
+
+ switch (work->event) {
+ case MLX5_DEV_EVENT_SYS_ERROR:
+ ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx5_ib_handle_internal_error(ibdev);
+ ibev.element.port_num = (u8)(unsigned long)work->param;
+ fatal = true;
break;
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ if (handle_port_change(ibdev, work->param, &ibev))
+ goto out;
+ break;
+ case MLX5_EVENT_TYPE_GENERAL_EVENT:
+ handle_general_event(ibdev, work->param, &ibev);
+ fallthrough;
+ default:
+ goto out;
}
- ibev.device = &ibdev->ib_dev;
- ibev.element.port_num = port;
+ ibev.device = &ibdev->ib_dev;
- if (port < 1 || port > ibdev->num_ports) {
- mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
- return;
+ if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
+ mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
+ goto out;
}
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
+
+ if (fatal)
+ ibdev->ib_active = false;
+out:
+ kfree(work);
}
-static void get_ext_port_caps(struct mlx5_ib_dev *dev)
+static int mlx5_ib_event(struct notifier_block *nb,
+ unsigned long event, void *param)
{
- int port;
+ struct mlx5_ib_event_work *work;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
- mlx5_query_ext_port_caps(dev, port);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
+ work->is_slave = false;
+ work->param = param;
+ work->event = event;
+
+ queue_work(mlx5_ib_event_wq, &work->work);
+
+ return NOTIFY_OK;
}
-static int get_port_caps(struct mlx5_ib_dev *dev)
+static int mlx5_ib_event_slave_port(struct notifier_block *nb,
+ unsigned long event, void *param)
{
- struct ib_device_attr *dprops = NULL;
- struct ib_port_attr *pprops = NULL;
- int err = -ENOMEM;
- int port;
- struct ib_udata uhw = {.inlen = 0, .outlen = 0};
+ struct mlx5_ib_event_work *work;
- pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
- if (!pprops)
- goto out;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
- dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
- if (!dprops)
- goto out;
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
+ work->is_slave = true;
+ work->param = param;
+ work->event = event;
+ queue_work(mlx5_ib_event_wq, &work->work);
- err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
- if (err) {
- mlx5_ib_warn(dev, "query_device failed %d\n", err);
- goto out;
- }
+ return NOTIFY_OK;
+}
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
- err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
- if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n",
- port, err);
- break;
- }
- dev->mdev->port_caps[port - 1].pkey_table_len =
- dprops->max_pkeys;
- dev->mdev->port_caps[port - 1].gid_table_len =
- pprops->gid_tbl_len;
- mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
- dprops->max_pkeys, pprops->gid_tbl_len);
- }
+static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
+{
+ struct mlx5_hca_vport_context vport_ctx;
+ int err;
-out:
- kfree(pprops);
- kfree(dprops);
+ *num_plane = 0;
+ if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
+ return 0;
- return err;
+ err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
+ if (err)
+ return err;
+
+ *num_plane = vport_ctx.num_plane;
+ return 0;
}
-static void destroy_umrc_res(struct mlx5_ib_dev *dev)
+static int set_has_smi_cap(struct mlx5_ib_dev *dev)
{
+ struct mlx5_hca_vport_context vport_ctx;
int err;
+ int port;
- err = mlx5_mr_cache_cleanup(dev);
- if (err)
- mlx5_ib_warn(dev, "mr cache cleanup failed\n");
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (dev->num_plane) {
+ dev->port_caps[port - 1].has_smi = false;
+ continue;
+ } else if (!MLX5_CAP_GEN(dev->mdev, ib_virt) ||
+ dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
+ }
+
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
+ }
+
+ return 0;
+}
+
+static void get_ext_port_caps(struct mlx5_ib_dev *dev)
+{
+ unsigned int port;
- mlx5_ib_destroy_qp(dev->umrc.qp);
- ib_destroy_cq(dev->umrc.cq);
- ib_dealloc_pd(dev->umrc.pd);
+ rdma_for_each_port (&dev->ib_dev, port)
+ mlx5_query_ext_port_caps(dev, port);
}
-enum {
- MAX_UMR_WR = 128,
-};
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+ switch (umr_fence_cap) {
+ case MLX5_CAP_UMR_FENCE_NONE:
+ return MLX5_FENCE_MODE_NONE;
+ case MLX5_CAP_UMR_FENCE_SMALL:
+ return MLX5_FENCE_MODE_INITIATOR_SMALL;
+ default:
+ return MLX5_FENCE_MODE_STRONG_ORDERING;
+ }
+}
-static int create_umr_res(struct mlx5_ib_dev *dev)
+int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
{
- struct ib_qp_init_attr *init_attr = NULL;
- struct ib_qp_attr *attr = NULL;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ struct ib_cq_init_attr cq_attr = {.cqe = 1};
+ struct ib_device *ibdev;
struct ib_pd *pd;
struct ib_cq *cq;
- struct ib_qp *qp;
- struct ib_cq_init_attr cq_attr = {};
- int ret;
+ int ret = 0;
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
- if (!attr || !init_attr) {
- ret = -ENOMEM;
- goto error_0;
- }
- pd = ib_alloc_pd(&dev->ib_dev);
+ /*
+ * devr->c0 is set once, never changed until device unload.
+ * Avoid taking the mutex if initialization is already done.
+ */
+ if (devr->c0)
+ return 0;
+
+ mutex_lock(&devr->cq_lock);
+ if (devr->c0)
+ goto unlock;
+
+ ibdev = &dev->ib_dev;
+ pd = ib_alloc_pd(ibdev, 0);
if (IS_ERR(pd)) {
- mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
ret = PTR_ERR(pd);
- goto error_0;
+ mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%pe\n",
+ pd);
+ goto unlock;
}
- cq_attr.cqe = 128;
- cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
- &cq_attr);
+ cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
if (IS_ERR(cq)) {
- mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
ret = PTR_ERR(cq);
- goto error_2;
- }
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-
- init_attr->send_cq = cq;
- init_attr->recv_cq = cq;
- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr->cap.max_send_wr = MAX_UMR_WR;
- init_attr->cap.max_send_sge = 1;
- init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
- init_attr->port_num = 1;
- qp = mlx5_ib_create_qp(pd, init_attr, NULL);
- if (IS_ERR(qp)) {
- mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
- ret = PTR_ERR(qp);
- goto error_3;
- }
- qp->device = &dev->ib_dev;
- qp->real_qp = qp;
- qp->uobject = NULL;
- qp->qp_type = MLX5_IB_QPT_REG_UMR;
-
- attr->qp_state = IB_QPS_INIT;
- attr->port_num = 1;
- ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
- IB_QP_PORT, NULL);
- if (ret) {
- mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
- goto error_4;
+ mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%pe\n",
+ cq);
+ ib_dealloc_pd(pd);
+ goto unlock;
}
- memset(attr, 0, sizeof(*attr));
- attr->qp_state = IB_QPS_RTR;
- attr->path_mtu = IB_MTU_256;
+ devr->p0 = pd;
+ devr->c0 = cq;
- ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
- if (ret) {
- mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
- goto error_4;
- }
+unlock:
+ mutex_unlock(&devr->cq_lock);
+ return ret;
+}
- memset(attr, 0, sizeof(*attr));
- attr->qp_state = IB_QPS_RTS;
- ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
- if (ret) {
- mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
- goto error_4;
- }
+int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+ struct ib_srq_init_attr attr;
+ struct ib_srq *s0, *s1;
+ int ret = 0;
- dev->umrc.qp = qp;
- dev->umrc.cq = cq;
- dev->umrc.pd = pd;
+ /*
+ * devr->s1 is set once, never changed until device unload.
+ * Avoid taking the mutex if initialization is already done.
+ */
+ if (devr->s1)
+ return 0;
- sema_init(&dev->umrc.sem, MAX_UMR_WR);
- ret = mlx5_mr_cache_init(dev);
- if (ret) {
- mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
- goto error_4;
- }
+ mutex_lock(&devr->srq_lock);
+ if (devr->s1)
+ goto unlock;
- kfree(attr);
- kfree(init_attr);
+ ret = mlx5_ib_dev_res_cq_init(dev);
+ if (ret)
+ goto unlock;
- return 0;
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_XRC;
+ attr.ext.cq = devr->c0;
+
+ s0 = ib_create_srq(devr->p0, &attr);
+ if (IS_ERR(s0)) {
+ ret = PTR_ERR(s0);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 0 for res init, err=%pe\n",
+ s0);
+ goto unlock;
+ }
-error_4:
- mlx5_ib_destroy_qp(qp);
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_BASIC;
-error_3:
- ib_destroy_cq(cq);
+ s1 = ib_create_srq(devr->p0, &attr);
+ if (IS_ERR(s1)) {
+ ret = PTR_ERR(s1);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 1 for res init, err=%pe\n",
+ s1);
+ ib_destroy_srq(s0);
+ }
-error_2:
- ib_dealloc_pd(pd);
+ devr->s0 = s0;
+ devr->s1 = s1;
-error_0:
- kfree(attr);
- kfree(init_attr);
+unlock:
+ mutex_unlock(&devr->srq_lock);
return ret;
}
-static int create_dev_resources(struct mlx5_ib_resources *devr)
+static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
{
- struct ib_srq_init_attr attr;
- struct mlx5_ib_dev *dev;
- struct ib_cq_init_attr cq_attr = {.cqe = 1};
- u32 rsvd_lkey;
- int ret = 0;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int ret;
- dev = container_of(devr, struct mlx5_ib_dev, devr);
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ return -EOPNOTSUPP;
+
+ ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
+ if (ret)
+ return ret;
- ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
+ ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
if (ret) {
- pr_err("Failed to query special context %d\n", ret);
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
return ret;
}
- dev->ib_dev.local_dma_lkey = rsvd_lkey;
-
- devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
- if (IS_ERR(devr->p0)) {
- ret = PTR_ERR(devr->p0);
- goto error0;
- }
- devr->p0->device = &dev->ib_dev;
- devr->p0->uobject = NULL;
- atomic_set(&devr->p0->usecnt, 0);
-
- devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
- if (IS_ERR(devr->c0)) {
- ret = PTR_ERR(devr->c0);
- goto error1;
- }
- devr->c0->device = &dev->ib_dev;
- devr->c0->uobject = NULL;
- devr->c0->comp_handler = NULL;
- devr->c0->event_handler = NULL;
- devr->c0->cq_context = NULL;
- atomic_set(&devr->c0->usecnt, 0);
-
- devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
- if (IS_ERR(devr->x0)) {
- ret = PTR_ERR(devr->x0);
- goto error2;
- }
- devr->x0->device = &dev->ib_dev;
- devr->x0->inode = NULL;
- atomic_set(&devr->x0->usecnt, 0);
- mutex_init(&devr->x0->tgt_qp_mutex);
- INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
-
- devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
- if (IS_ERR(devr->x1)) {
- ret = PTR_ERR(devr->x1);
- goto error3;
- }
- devr->x1->device = &dev->ib_dev;
- devr->x1->inode = NULL;
- atomic_set(&devr->x1->usecnt, 0);
- mutex_init(&devr->x1->tgt_qp_mutex);
- INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
- memset(&attr, 0, sizeof(attr));
- attr.attr.max_sge = 1;
- attr.attr.max_wr = 1;
- attr.srq_type = IB_SRQT_XRC;
- attr.ext.xrc.cq = devr->c0;
- attr.ext.xrc.xrcd = devr->x0;
-
- devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
- if (IS_ERR(devr->s0)) {
- ret = PTR_ERR(devr->s0);
- goto error4;
- }
- devr->s0->device = &dev->ib_dev;
- devr->s0->pd = devr->p0;
- devr->s0->uobject = NULL;
- devr->s0->event_handler = NULL;
- devr->s0->srq_context = NULL;
- devr->s0->srq_type = IB_SRQT_XRC;
- devr->s0->ext.xrc.xrcd = devr->x0;
- devr->s0->ext.xrc.cq = devr->c0;
- atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
- atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
- atomic_inc(&devr->p0->usecnt);
- atomic_set(&devr->s0->usecnt, 0);
+ mutex_init(&devr->cq_lock);
+ mutex_init(&devr->srq_lock);
- memset(&attr, 0, sizeof(attr));
- attr.attr.max_sge = 1;
- attr.attr.max_wr = 1;
- attr.srq_type = IB_SRQT_BASIC;
- devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
- if (IS_ERR(devr->s1)) {
- ret = PTR_ERR(devr->s1);
- goto error5;
- }
- devr->s1->device = &dev->ib_dev;
- devr->s1->pd = devr->p0;
- devr->s1->uobject = NULL;
- devr->s1->event_handler = NULL;
- devr->s1->srq_context = NULL;
- devr->s1->srq_type = IB_SRQT_BASIC;
- devr->s1->ext.xrc.cq = devr->c0;
- atomic_inc(&devr->p0->usecnt);
- atomic_set(&devr->s0->usecnt, 0);
+ return 0;
+}
+
+static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+
+ /* After s0/s1 init, they are not unset during the device lifetime. */
+ if (devr->s1) {
+ ib_destroy_srq(devr->s1);
+ ib_destroy_srq(devr->s0);
+ }
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
+ /* After p0/c0 init, they are not unset during the device lifetime. */
+ if (devr->c0) {
+ ib_destroy_cq(devr->c0);
+ ib_dealloc_pd(devr->p0);
+ }
+ mutex_destroy(&devr->cq_lock);
+ mutex_destroy(&devr->srq_lock);
+}
+
+static int
+mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ bool ro_supp = false;
+ void *mkc;
+ u32 mkey;
+ u32 pdn;
+ u32 *in;
+ int err;
+
+ err = mlx5_core_alloc_pd(mdev, &pdn);
+ if (err)
+ return err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ MLX5_SET(create_mkey_in, in, data_direct, 1);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
+ if (err)
+ goto err_mkey;
+
+ dev->ddr.mkey = mkey;
+ dev->ddr.pdn = pdn;
+
+ /* create another mkey with RO support */
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+ ro_supp = true;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ ro_supp = true;
+ }
+ if (ro_supp) {
+ err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
+ /* RO is defined as best effort */
+ if (!err) {
+ dev->ddr.mkey_ro = mkey;
+ dev->ddr.mkey_ro_valid = true;
+ }
+ }
+
+ kvfree(in);
return 0;
-error5:
- mlx5_ib_destroy_srq(devr->s0);
-error4:
- mlx5_ib_dealloc_xrcd(devr->x1);
-error3:
- mlx5_ib_dealloc_xrcd(devr->x0);
-error2:
- mlx5_ib_destroy_cq(devr->c0);
-error1:
- mlx5_ib_dealloc_pd(devr->p0);
-error0:
- return ret;
+err_mkey:
+ kvfree(in);
+err:
+ mlx5_core_dealloc_pd(mdev, pdn);
+ return err;
}
-static void destroy_dev_resources(struct mlx5_ib_resources *devr)
+static void
+mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
{
- mlx5_ib_destroy_srq(devr->s1);
- mlx5_ib_destroy_srq(devr->s0);
- mlx5_ib_dealloc_xrcd(devr->x0);
- mlx5_ib_dealloc_xrcd(devr->x1);
- mlx5_ib_destroy_cq(devr->c0);
- mlx5_ib_dealloc_pd(devr->p0);
+
+ if (dev->ddr.mkey_ro_valid)
+ mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey_ro);
+
+ mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
+ mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
+}
+
+static u32 get_core_cap_flags(struct ib_device *ibdev,
+ struct mlx5_hca_vport_context *rep)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
+ u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
+ u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
+ bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
+ u32 ret = 0;
+
+ if (rep->grh_required)
+ ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
+
+ if (dev->num_plane)
+ return ret | RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_IB_MAD |
+ RDMA_CORE_CAP_IB_CM | RDMA_CORE_CAP_IB_SA |
+ RDMA_CORE_CAP_AF_IB;
+ else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
+ return ret | RDMA_CORE_CAP_IB_MAD | RDMA_CORE_CAP_IB_SMI;
+
+ if (ll == IB_LINK_LAYER_INFINIBAND)
+ return ret | RDMA_CORE_PORT_IBA_IB;
+
+ if (raw_support)
+ ret |= RDMA_CORE_PORT_RAW_PACKET;
+
+ if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
+ return ret;
+
+ if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
+ return ret;
+
+ if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
+ ret |= RDMA_CORE_PORT_IBA_ROCE;
+
+ if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
+ ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ return ret;
}
-static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
+ struct mlx5_hca_vport_context rep = {0};
int err;
- err = mlx5_ib_query_port(ibdev, port_num, &attr);
+ err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
+ port_num = smi_to_native_portnum(dev, port_num);
+
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
+ &rep);
+ if (err)
+ return err;
+ }
+
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+ immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
-static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
{
- struct mlx5_ib_dev *dev;
+ struct ib_port_attr attr;
int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+
+ return 0;
+}
+
+static void get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(ibdev, struct mlx5_ib_dev, ib_dev);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
+ fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
+ fw_rev_sub(dev->mdev));
+}
+
+static int lag_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev,
+ lag_events);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct ib_device *ibdev = &dev->ib_dev;
+ struct net_device *old_ndev = NULL;
+ struct mlx5_ib_port *port;
+ struct net_device *ndev;
+ u32 portnum = 0;
+ int ret = 0;
int i;
- /* don't create IB instance over Eth ports, no RoCE yet! */
- if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
- return NULL;
+ switch (event) {
+ case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE:
+ ndev = data;
+ if (ndev) {
+ if (!mlx5_lag_is_roce(mdev)) {
+ // sriov lag
+ for (i = 0; i < dev->num_ports; i++) {
+ port = &dev->port[i];
+ if (port->rep && port->rep->vport ==
+ MLX5_VPORT_UPLINK) {
+ portnum = i;
+ break;
+ }
+ }
+ }
+ old_ndev = ib_device_get_netdev(ibdev, portnum + 1);
+ ret = ib_device_set_netdev(ibdev, ndev, portnum + 1);
+ if (ret)
+ goto out;
+
+ if (old_ndev)
+ roce_del_all_netdev_gids(ibdev, portnum + 1,
+ old_ndev);
+ rdma_roce_rescan_port(ibdev, portnum + 1);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
- printk_once(KERN_INFO "%s", mlx5_version);
+out:
+ dev_put(old_ndev);
+ return notifier_from_errno(ret);
+}
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
- if (!dev)
- return NULL;
+static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev)
+{
+ dev->lag_events.notifier_call = lag_event;
+ blocking_notifier_chain_register(&dev->mdev->priv.lag_nh,
+ &dev->lag_events);
+}
- dev->mdev = mdev;
+static void mlx5e_lag_event_unregister(struct mlx5_ib_dev *dev)
+{
+ blocking_notifier_chain_unregister(&dev->mdev->priv.lag_nh,
+ &dev->lag_events);
+}
+
+static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
+ MLX5_FLOW_NAMESPACE_LAG);
+ struct mlx5_flow_table *ft;
+ int err;
- err = get_port_caps(dev);
+ if (!ns || !mlx5_lag_is_active(mdev))
+ return 0;
+
+ err = mlx5_cmd_create_vport_lag(mdev);
if (err)
- goto err_dealloc;
+ return err;
- if (mlx5_use_mad_ifc(dev))
- get_ext_port_caps(dev);
+ ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_destroy_vport_lag;
+ }
+
+ mlx5e_lag_event_register(dev);
+ dev->flow_db->lag_demux_ft = ft;
+ dev->lag_ports = mlx5_lag_get_num_ports(mdev);
+ dev->lag_active = true;
+ return 0;
+
+err_destroy_vport_lag:
+ mlx5_cmd_destroy_vport_lag(mdev);
+ return err;
+}
+
+static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (dev->lag_active) {
+ dev->lag_active = false;
- MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
-
- strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
- dev->ib_dev.owner = THIS_MODULE;
- dev->ib_dev.node_type = RDMA_NODE_IB_CA;
- dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
- dev->ib_dev.phys_port_cnt = dev->num_ports;
- dev->ib_dev.num_comp_vectors =
- dev->mdev->priv.eq_table.num_comp_vectors;
- dev->ib_dev.dma_device = &mdev->pdev->dev;
-
- dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
- dev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
- (1ull << IB_USER_VERBS_CMD_OPEN_QP);
- dev->ib_dev.uverbs_ex_cmd_mask =
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
-
- dev->ib_dev.query_device = mlx5_ib_query_device;
- dev->ib_dev.query_port = mlx5_ib_query_port;
- dev->ib_dev.query_gid = mlx5_ib_query_gid;
- dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
- dev->ib_dev.modify_device = mlx5_ib_modify_device;
- dev->ib_dev.modify_port = mlx5_ib_modify_port;
- dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
- dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
- dev->ib_dev.mmap = mlx5_ib_mmap;
- dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
- dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
- dev->ib_dev.create_ah = mlx5_ib_create_ah;
- dev->ib_dev.query_ah = mlx5_ib_query_ah;
- dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
- dev->ib_dev.create_srq = mlx5_ib_create_srq;
- dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
- dev->ib_dev.query_srq = mlx5_ib_query_srq;
- dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
- dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
- dev->ib_dev.create_qp = mlx5_ib_create_qp;
- dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
- dev->ib_dev.query_qp = mlx5_ib_query_qp;
- dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
- dev->ib_dev.post_send = mlx5_ib_post_send;
- dev->ib_dev.post_recv = mlx5_ib_post_recv;
- dev->ib_dev.create_cq = mlx5_ib_create_cq;
- dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
- dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
- dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
- dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
- dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
- dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
- dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
- dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
- dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
- dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
- dev->ib_dev.process_mad = mlx5_ib_process_mad;
- dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
- dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
- dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
- dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
- dev->ib_dev.get_port_immutable = mlx5_port_immutable;
-
- mlx5_ib_internal_fill_odp_caps(dev);
-
- if (MLX5_CAP_GEN(mdev, xrc)) {
- dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
- dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
- dev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
- (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
+ mlx5e_lag_event_unregister(dev);
+ mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
+ dev->flow_db->lag_demux_ft = NULL;
+
+ mlx5_cmd_destroy_vport_lag(mdev);
}
+}
- err = init_node_data(dev);
+static void mlx5_netdev_notifier_register(struct mlx5_roce *roce,
+ struct net_device *netdev)
+{
+ int err;
+
+ if (roce->tracking_netdev)
+ return;
+ roce->tracking_netdev = netdev;
+ roce->nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier_dev_net(netdev, &roce->nb, &roce->nn);
+ WARN_ON(err);
+}
+
+static void mlx5_netdev_notifier_unregister(struct mlx5_roce *roce)
+{
+ if (!roce->tracking_netdev)
+ return;
+ unregister_netdevice_notifier_dev_net(roce->tracking_netdev, &roce->nb,
+ &roce->nn);
+ roce->tracking_netdev = NULL;
+}
+
+static int mlx5e_mdev_notifier_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5_roce *roce = container_of(nb, struct mlx5_roce, mdev_nb);
+ struct net_device *netdev = data;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
+ if (netdev)
+ mlx5_netdev_notifier_register(roce, netdev);
+ else
+ mlx5_netdev_notifier_unregister(roce);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_roce *roce = &dev->port[port_num].roce;
+
+ roce->mdev_nb.notifier_call = mlx5e_mdev_notifier_event;
+ mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
+ mlx5_core_uplink_netdev_event_replay(dev->mdev);
+}
+
+static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_roce *roce = &dev->port[port_num].roce;
+
+ mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
+ mlx5_netdev_notifier_unregister(roce);
+}
+
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ if (!dev->is_rep && dev->profile != &raw_eth_profile) {
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+ }
+
+ err = mlx5_eth_lag_init(dev);
if (err)
- goto err_dealloc;
+ goto err_disable_roce;
- mutex_init(&dev->cap_mask_mutex);
+ return 0;
+
+err_disable_roce:
+ if (!dev->is_rep && dev->profile != &raw_eth_profile)
+ mlx5_nic_vport_disable_roce(dev->mdev);
+
+ return err;
+}
+
+static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
+{
+ mlx5_eth_lag_cleanup(dev);
+ if (!dev->is_rep && dev->profile != &raw_eth_profile)
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
+
+static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
+{
+ if (type != RDMA_NETDEV_IPOIB)
+ return -EOPNOTSUPP;
+
+ return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
+}
+
+static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
+ char lbuf[20];
+ int len;
+
+ len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
+ return simple_read_from_buffer(buf, count, pos, lbuf, len);
+}
+
+static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
+ u32 timeout;
+ u32 var;
+
+ if (kstrtouint_from_user(buf, count, 0, &var))
+ return -EFAULT;
+
+ timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
+ 1000);
+ if (timeout != var)
+ mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
+ timeout);
+
+ delay_drop->timeout = timeout;
+
+ return count;
+}
+
+static const struct file_operations fops_delay_drop_timeout = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = delay_drop_timeout_write,
+ .read = delay_drop_timeout_read,
+};
+
+static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ struct mlx5_ib_port *port = &ibdev->port[port_num];
+ int comps;
+ int err;
+ int i;
+
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
- err = create_dev_resources(&dev->devr);
+ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
+
+ mlx5_core_mp_event_replay(ibdev->mdev,
+ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ NULL);
+ mlx5_core_mp_event_replay(mpi->mdev,
+ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ NULL);
+
+ mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
+
+ spin_lock(&port->mp.mpi_lock);
+ if (!mpi->ibdev) {
+ spin_unlock(&port->mp.mpi_lock);
+ return;
+ }
+
+ mpi->ibdev = NULL;
+
+ spin_unlock(&port->mp.mpi_lock);
+ if (mpi->mdev_events.notifier_call)
+ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+ mpi->mdev_events.notifier_call = NULL;
+ mlx5_mdev_netdev_untrack(ibdev, port_num);
+ spin_lock(&port->mp.mpi_lock);
+
+ comps = mpi->mdev_refcnt;
+ if (comps) {
+ mpi->unaffiliate = true;
+ init_completion(&mpi->unref_comp);
+ spin_unlock(&port->mp.mpi_lock);
+
+ for (i = 0; i < comps; i++)
+ wait_for_completion(&mpi->unref_comp);
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi->unaffiliate = false;
+ }
+
+ port->mp.mpi = NULL;
+
+ spin_unlock(&port->mp.mpi_lock);
+
+ err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
+
+ mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
+ /* Log an error, still needed to cleanup the pointers and add
+ * it back to the list.
+ */
if (err)
- goto err_dealloc;
+ mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
+ port_num + 1);
+
+ ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
+}
+
+static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ u64 key;
+ int err;
+
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
+ spin_lock(&ibdev->port[port_num].mp.mpi_lock);
+ if (ibdev->port[port_num].mp.mpi) {
+ mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
+ port_num + 1);
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+ return false;
+ }
- err = mlx5_ib_odp_init_one(dev);
+ ibdev->port[port_num].mp.mpi = mpi;
+ mpi->ibdev = ibdev;
+ mpi->mdev_events.notifier_call = NULL;
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+
+ err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
if (err)
- goto err_rsrc;
+ goto unbind;
+
+ mlx5_mdev_netdev_track(ibdev, port_num);
+
+ mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
+ mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
- err = ib_register_device(&dev->ib_dev, NULL);
+ mlx5_ib_init_cong_debugfs(ibdev, port_num);
+
+ key = mpi->mdev->priv.adev_idx;
+ mlx5_core_mp_event_replay(mpi->mdev,
+ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ &key);
+ mlx5_core_mp_event_replay(ibdev->mdev,
+ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ &key);
+
+ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
if (err)
- goto err_odp;
+ goto unbind;
+
+ return true;
+
+unbind:
+ mlx5_ib_unbind_slave_port(ibdev, mpi);
+ return false;
+}
+
+static int mlx5_ib_data_direct_init(struct mlx5_ib_dev *dev)
+{
+ char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1] = {};
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
+ !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
+ return 0;
+
+ ret = mlx5_cmd_query_vuid(dev->mdev, true, vuid);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_create_data_direct_resources(dev);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&dev->data_direct_mr_list);
+ ret = mlx5_data_direct_ib_reg(dev, vuid);
+ if (ret)
+ mlx5_ib_free_data_direct_resources(dev);
+
+ return ret;
+}
+
+static void mlx5_ib_data_direct_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
+ !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
+ return;
- err = create_umr_res(dev);
+ mlx5_data_direct_ib_unreg(dev);
+ mlx5_ib_free_data_direct_resources(dev);
+}
+
+static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+{
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ struct mlx5_ib_multiport_info *mpi;
+ int err;
+ u32 i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return 0;
+
+ err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
+ &dev->sys_image_guid);
if (err)
- goto err_dev;
+ return err;
- for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
- err = device_create_file(&dev->ib_dev.dev,
- mlx5_class_attributes[i]);
- if (err)
- goto err_umrc;
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ bool bound = false;
+
+ /* build a stub multiport info struct for the native port. */
+ if (i == port_num) {
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi) {
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ mlx5_nic_vport_disable_roce(dev->mdev);
+ return -ENOMEM;
+ }
+
+ mpi->is_master = true;
+ mpi->mdev = dev->mdev;
+ mpi->sys_image_guid = dev->sys_image_guid;
+ dev->port[i].mp.mpi = mpi;
+ mpi->ibdev = dev;
+ mpi = NULL;
+ continue;
+ }
+
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
+ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+ if (bound) {
+ dev_dbg(mpi->mdev->device,
+ "removing port from unaffiliated list.\n");
+ mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
+ list_del(&mpi->list);
+ break;
+ }
+ }
+ if (!bound)
+ mlx5_ib_dbg(dev, "no free port found for port %d\n",
+ i + 1);
}
- dev->ib_active = true;
+ list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return err;
+}
- return dev;
+static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
+{
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ u32 i;
-err_umrc:
- destroy_umrc_res(dev);
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
-err_dev:
- ib_unregister_device(&dev->ib_dev);
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ if (dev->port[i].mp.mpi) {
+ /* Destroy the native port stub */
+ if (i == port_num) {
+ kfree(dev->port[i].mp.mpi);
+ dev->port[i].mp.mpi = NULL;
+ } else {
+ mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
+ i + 1);
+ list_add_tail(&dev->port[i].mp.mpi->list,
+ &mlx5_ib_unaffiliated_port_list);
+ mlx5_ib_unbind_slave_port(dev,
+ dev->port[i].mp.mpi);
+ }
+ }
+ }
-err_odp:
- mlx5_ib_odp_remove_one(dev);
+ mlx5_ib_dbg(dev, "removing from devlist\n");
+ list_del(&dev->ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
-err_rsrc:
- destroy_dev_resources(&dev->devr);
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
-err_dealloc:
- ib_dealloc_device((struct ib_device *)dev);
+static int mmap_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_user_mmap_entry *obj = uobject->object;
- return NULL;
+ rdma_user_mmap_entry_remove(&obj->rdma_entry);
+ return 0;
}
-static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
+static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
+ struct mlx5_user_mmap_entry *entry,
+ size_t length)
{
- struct mlx5_ib_dev *dev = context;
+ return rdma_user_mmap_entry_insert_range(
+ &c->ibucontext, &entry->rdma_entry, length,
+ (MLX5_IB_MMAP_OFFSET_START << 16),
+ ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
+}
- ib_unregister_device(&dev->ib_dev);
- destroy_umrc_res(dev);
- mlx5_ib_odp_remove_one(dev);
- destroy_dev_resources(&dev->devr);
- ib_dealloc_device(&dev->ib_dev);
+static struct mlx5_user_mmap_entry *
+alloc_var_entry(struct mlx5_ib_ucontext *c)
+{
+ struct mlx5_user_mmap_entry *entry;
+ struct mlx5_var_table *var_table;
+ u32 page_idx;
+ int err;
+
+ var_table = &to_mdev(c->ibucontext.device)->var_table;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&var_table->bitmap_lock);
+ page_idx = find_first_zero_bit(var_table->bitmap,
+ var_table->num_var_hw_entries);
+ if (page_idx >= var_table->num_var_hw_entries) {
+ err = -ENOSPC;
+ mutex_unlock(&var_table->bitmap_lock);
+ goto end;
+ }
+
+ set_bit(page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+
+ entry->address = var_table->hw_start_addr +
+ (page_idx * var_table->stride_size);
+ entry->page_idx = page_idx;
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
+
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry,
+ var_table->stride_size);
+ if (err)
+ goto err_insert;
+
+ return entry;
+
+err_insert:
+ mutex_lock(&var_table->bitmap_lock);
+ clear_bit(page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+end:
+ kfree(entry);
+ return ERR_PTR(err);
}
-static struct mlx5_interface mlx5_ib_interface = {
- .add = mlx5_ib_add,
- .remove = mlx5_ib_remove,
- .event = mlx5_ib_event,
- .protocol = MLX5_INTERFACE_PROTOCOL_IB,
-};
+static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_user_mmap_entry *entry;
+ u64 mmap_offset;
+ u32 length;
+ int err;
-static int __init mlx5_ib_init(void)
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ entry = alloc_var_entry(c);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ mmap_offset = mlx5_entry_to_mmap_offset(entry);
+ length = entry->rdma_entry.npages * PAGE_SIZE;
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+ &entry->page_idx, sizeof(entry->page_idx));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ &length, sizeof(length));
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_VAR_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_VAR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_VAR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
+
+static bool var_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
+}
+
+static struct mlx5_user_mmap_entry *
+alloc_uar_entry(struct mlx5_ib_ucontext *c,
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type)
+{
+ struct mlx5_user_mmap_entry *entry;
+ struct mlx5_ib_dev *dev;
+ u32 uar_index;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ dev = to_mdev(c->ibucontext.device);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
+ if (err)
+ goto end;
+
+ entry->page_idx = uar_index;
+ entry->address = uar_index2paddress(dev, uar_index);
+ if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
+ else
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
+
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
+ if (err)
+ goto err_insert;
+
+ return entry;
+
+err_insert:
+ mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
+end:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_user_mmap_entry *entry;
+ u64 mmap_offset;
+ u32 length;
int err;
- if (deprecated_prof_sel != 2)
- pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ err = uverbs_get_const(&alloc_type, attrs,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
+ if (err)
+ return err;
+
+ if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
+ alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
+ return -EOPNOTSUPP;
+
+ if (!mlx5_wc_support_get(to_mdev(c->ibucontext.device)->mdev) &&
+ alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ return -EOPNOTSUPP;
+
+ entry = alloc_uar_entry(c, alloc_type);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
- err = mlx5_ib_odp_init();
+ mmap_offset = mlx5_entry_to_mmap_offset(entry);
+ length = entry->rdma_entry.npages * PAGE_SIZE;
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
if (err)
return err;
- err = mlx5_register_interface(&mlx5_ib_interface);
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ &entry->page_idx, sizeof(entry->page_idx));
if (err)
- goto clean_odp;
+ return err;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ &length, sizeof(length));
return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_UAR_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
+ enum mlx5_ib_uapi_uar_alloc_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_UAR_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_query_context,
+ UVERBS_OBJECT_DEVICE,
+ UVERBS_METHOD_QUERY_CONTEXT,
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
+ UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp,
+ dump_fill_mkey),
+ UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_reg_dmabuf_mr,
+ UVERBS_OBJECT_MR,
+ UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ enum mlx5_ib_uapi_reg_dmabuf_flags,
+ UA_OPTIONAL));
+
+static const struct uapi_definition mlx5_ib_defs[] = {
+ UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs),
+
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, &mlx5_ib_reg_dmabuf_mr),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
+ {}
+};
+
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_data_direct_cleanup(dev);
+ mlx5_ib_cleanup_multiport_master(dev);
+ WARN_ON(!xa_empty(&dev->odp_mkeys));
+ mutex_destroy(&dev->cap_mask_mutex);
+ WARN_ON(!xa_empty(&dev->sig_mrs));
+ WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+ mlx5r_macsec_dealloc_gids(dev);
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err, i;
+
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
+ dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ spin_lock_init(&dev->port[i].mp.mpi_lock);
+ dev->port[i].roce.dev = dev;
+ dev->port[i].roce.native_port_num = i + 1;
+ dev->port[i].roce.last_port_state = IB_PORT_DOWN;
+ }
+
+ err = mlx5r_cmd_query_special_mkeys(dev);
+ if (err)
+ return err;
+
+ err = mlx5r_macsec_init_gids_and_devlist(dev);
+ if (err)
+ return err;
+
+ err = mlx5_ib_init_multiport_master(dev);
+ if (err)
+ goto err;
+
+ err = set_has_smi_cap(dev);
+ if (err)
+ goto err_mp;
+
+ err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
+ if (err)
+ goto err_mp;
+
+ if (mlx5_use_mad_ifc(dev))
+ get_ext_port_caps(dev);
+
+ dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
+
+ mutex_init(&dev->cap_mask_mutex);
+ mutex_init(&dev->data_direct_lock);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+ xa_init(&dev->odp_mkeys);
+ xa_init(&dev->sig_mrs);
+ atomic_set(&dev->mkey_var, 0);
+
+ spin_lock_init(&dev->dm.lock);
+ dev->dm.dev = mdev;
+ err = mlx5_ib_data_direct_init(dev);
+ if (err)
+ goto err_mp;
-clean_odp:
- mlx5_ib_odp_cleanup();
+ return 0;
+err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+err:
+ mlx5r_macsec_dealloc_gids(dev);
return err;
}
+static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
+ enum rdma_nl_dev_type type,
+ const char *name);
+static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev);
+
+static const struct ib_device_ops mlx5_ib_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_MLX5,
+ .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
+
+ .add_gid = mlx5_ib_add_gid,
+ .add_sub_dev = mlx5_ib_add_sub_dev,
+ .alloc_mr = mlx5_ib_alloc_mr,
+ .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
+ .alloc_pd = mlx5_ib_alloc_pd,
+ .alloc_ucontext = mlx5_ib_alloc_ucontext,
+ .attach_mcast = mlx5_ib_mcg_attach,
+ .check_mr_status = mlx5_ib_check_mr_status,
+ .create_ah = mlx5_ib_create_ah,
+ .create_cq = mlx5_ib_create_cq,
+ .create_qp = mlx5_ib_create_qp,
+ .create_srq = mlx5_ib_create_srq,
+ .create_user_ah = mlx5_ib_create_ah,
+ .dealloc_pd = mlx5_ib_dealloc_pd,
+ .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
+ .del_gid = mlx5_ib_del_gid,
+ .del_sub_dev = mlx5_ib_del_sub_dev,
+ .dereg_mr = mlx5_ib_dereg_mr,
+ .destroy_ah = mlx5_ib_destroy_ah,
+ .destroy_cq = mlx5_ib_destroy_cq,
+ .destroy_qp = mlx5_ib_destroy_qp,
+ .destroy_srq = mlx5_ib_destroy_srq,
+ .detach_mcast = mlx5_ib_mcg_detach,
+ .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
+ .drain_rq = mlx5_ib_drain_rq,
+ .drain_sq = mlx5_ib_drain_sq,
+ .device_group = &mlx5_attr_group,
+ .get_dev_fw_str = get_dev_fw_str,
+ .get_dma_mr = mlx5_ib_get_dma_mr,
+ .get_link_layer = mlx5_ib_port_link_layer,
+ .map_mr_sg = mlx5_ib_map_mr_sg,
+ .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
+ .mmap = mlx5_ib_mmap,
+ .mmap_free = mlx5_ib_mmap_free,
+ .modify_cq = mlx5_ib_modify_cq,
+ .modify_device = mlx5_ib_modify_device,
+ .modify_port = mlx5_ib_modify_port,
+ .modify_qp = mlx5_ib_modify_qp,
+ .modify_srq = mlx5_ib_modify_srq,
+ .pre_destroy_cq = mlx5_ib_pre_destroy_cq,
+ .poll_cq = mlx5_ib_poll_cq,
+ .post_destroy_cq = mlx5_ib_post_destroy_cq,
+ .post_recv = mlx5_ib_post_recv_nodrain,
+ .post_send = mlx5_ib_post_send_nodrain,
+ .post_srq_recv = mlx5_ib_post_srq_recv,
+ .process_mad = mlx5_ib_process_mad,
+ .query_ah = mlx5_ib_query_ah,
+ .query_device = mlx5_ib_query_device,
+ .query_gid = mlx5_ib_query_gid,
+ .query_pkey = mlx5_ib_query_pkey,
+ .query_qp = mlx5_ib_query_qp,
+ .query_srq = mlx5_ib_query_srq,
+ .query_ucontext = mlx5_ib_query_ucontext,
+ .reg_user_mr = mlx5_ib_reg_user_mr,
+ .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
+ .req_notify_cq = mlx5_ib_arm_cq,
+ .rereg_user_mr = mlx5_ib_rereg_user_mr,
+ .resize_cq = mlx5_ib_resize_cq,
+ .ufile_hw_cleanup = mlx5_ib_ufile_hw_cleanup,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
+ INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_dmah, mlx5_ib_dmah, ibdmah),
+ INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
+ .rdma_netdev_get_params = mlx5_ib_rn_get_params,
+};
+
+static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
+ .get_vf_config = mlx5_ib_get_vf_config,
+ .get_vf_guid = mlx5_ib_get_vf_guid,
+ .get_vf_stats = mlx5_ib_get_vf_stats,
+ .set_vf_guid = mlx5_ib_set_vf_guid,
+ .set_vf_link_state = mlx5_ib_set_vf_link_state,
+};
+
+static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
+ .alloc_mw = mlx5_ib_alloc_mw,
+ .dealloc_mw = mlx5_ib_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
+};
+
+static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
+ .alloc_xrcd = mlx5_ib_alloc_xrcd,
+ .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
+};
+
+static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_var_table *var_table = &dev->var_table;
+ u8 log_doorbell_bar_size;
+ u8 log_doorbell_stride;
+ u64 bar_size;
+
+ log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+ log_doorbell_bar_size);
+ log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+ log_doorbell_stride);
+ var_table->hw_start_addr = dev->mdev->bar_addr +
+ MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
+ doorbell_bar_offset);
+ bar_size = (1ULL << log_doorbell_bar_size) * 4096;
+ var_table->stride_size = 1ULL << log_doorbell_stride;
+ var_table->num_var_hw_entries = div_u64(bar_size,
+ var_table->stride_size);
+ mutex_init(&var_table->bitmap_lock);
+ var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
+ GFP_KERNEL);
+ return (var_table->bitmap) ? 0 : -ENOMEM;
+}
+
+static void mlx5_ib_cleanup_ucaps(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
+ ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
+
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA)
+ ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+}
+
+static int mlx5_ib_init_ucaps(struct mlx5_ib_dev *dev)
+{
+ int ret;
+
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL) {
+ ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
+ if (ret)
+ return ret;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA) {
+ ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+ if (ret)
+ goto remove_local;
+ }
+
+ return 0;
+
+remove_local:
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
+ ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
+ return ret;
+}
+
+static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL)
+ mlx5_ib_cleanup_ucaps(dev);
+
+ bitmap_free(dev->var_table.bitmap);
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
+ IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
+ ib_set_device_ops(&dev->ib_dev,
+ &mlx5_ib_dev_ipoib_enhanced_ops);
+
+ if (mlx5_core_is_pf(mdev))
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
+
+ dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
+ if (MLX5_CAP_GEN(mdev, imaicl))
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
+
+ if (MLX5_CAP_GEN(mdev, xrc))
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
+
+ if (MLX5_CAP_DEV_MEM(mdev, memic) ||
+ MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
+
+ if (mdev->st)
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dmah_ops);
+
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
+ dev->ib_dev.driver_def = mlx5_ib_defs;
+
+ err = init_node_data(dev);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ mutex_init(&dev->lb.mutex);
+
+ if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
+ err = mlx5_ib_init_var_table(dev);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL) {
+ err = mlx5_ib_init_ucaps(dev);
+ if (err)
+ return err;
+ }
+
+ dev->ib_dev.use_cq_dim = true;
+
+ return 0;
+}
+
+static const struct ib_device_ops mlx5_ib_dev_port_ops = {
+ .get_port_immutable = mlx5_port_immutable,
+ .query_port = mlx5_ib_query_port,
+};
+
+static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
+ return 0;
+}
+
+static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
+ .get_port_immutable = mlx5_port_rep_immutable,
+ .query_port = mlx5_ib_rep_query_port,
+ .query_pkey = mlx5_ib_rep_query_pkey,
+};
+
+static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
+ return 0;
+}
+
+static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
+ .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
+ .create_wq = mlx5_ib_create_wq,
+ .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
+ .destroy_wq = mlx5_ib_destroy_wq,
+ .modify_wq = mlx5_ib_modify_wq,
+
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
+ ib_rwq_ind_tbl),
+};
+
+static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u32 port_num = 0;
+ int err;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+
+ /* Register only for native ports */
+ mlx5_mdev_netdev_track(dev, port_num);
+
+ err = mlx5_enable_eth(dev);
+ if (err)
+ goto cleanup;
+ }
+
+ return 0;
+cleanup:
+ mlx5_mdev_netdev_untrack(dev, port_num);
+ return err;
+}
+
+static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u32 port_num;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ mlx5_disable_eth(dev);
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ mlx5_mdev_netdev_untrack(dev, port_num);
+ }
+}
+
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_init_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+ return 0;
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
+ if (err)
+ return err;
+
+ err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
+ if (err)
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+
+ return err;
+}
+
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
+
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+ const char *name;
+
+ if (dev->sub_dev_name) {
+ name = dev->sub_dev_name;
+ ib_mark_name_assigned_by_user(&dev->ib_dev);
+ } else if (!mlx5_lag_is_active(dev->mdev))
+ name = "mlx5_%d";
+ else
+ name = "mlx5_bond_%d";
+ return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
+}
+
+static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_mkey_cache_cleanup(dev);
+ mlx5r_umr_resource_cleanup(dev);
+ mlx5r_umr_cleanup(dev);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ ib_unregister_device(&dev->ib_dev);
+}
+
+static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+{
+ int ret;
+
+ ret = mlx5r_umr_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mlx5_mkey_cache_init(dev);
+ if (ret)
+ mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+ return ret;
+}
+
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
+ struct dentry *root;
+
+ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
+ return 0;
+
+ mutex_init(&dev->delay_drop.lock);
+ dev->delay_drop.dev = dev;
+ dev->delay_drop.activate = false;
+ dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
+ INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
+ atomic_set(&dev->delay_drop.rqs_cnt, 0);
+ atomic_set(&dev->delay_drop.events_cnt, 0);
+
+ if (!mlx5_debugfs_root)
+ return 0;
+
+ root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
+ dev->delay_drop.dir_debugfs = root;
+
+ debugfs_create_atomic_t("num_timeout_events", 0400, root,
+ &dev->delay_drop.events_cnt);
+ debugfs_create_atomic_t("num_rqs", 0400, root,
+ &dev->delay_drop.rqs_cnt);
+ debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
+ &fops_delay_drop_timeout);
+ return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
+ return;
+
+ cancel_work_sync(&dev->delay_drop.delay_drop_work);
+ if (!dev->delay_drop.dir_debugfs)
+ return;
+
+ debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
+ dev->delay_drop.dir_debugfs = NULL;
+}
+
+static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int port;
+
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
+ INIT_WORK(&devr->ports[port].pkey_change_work,
+ pkey_change_handler);
+
+ dev->mdev_events.notifier_call = mlx5_ib_event;
+ mlx5_notifier_register(dev->mdev, &dev->mdev_events);
+
+ mlx5r_macsec_event_register(dev);
+
+ return 0;
+}
+
+static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int port;
+
+ mlx5r_macsec_event_unregister(dev);
+ mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
+
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
+ cancel_work_sync(&devr->ports[port].pkey_change_work);
+}
+
+void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
+ struct mlx5_data_direct_dev *dev)
+{
+ mutex_lock(&ibdev->data_direct_lock);
+ ibdev->data_direct_dev = dev;
+ mutex_unlock(&ibdev->data_direct_lock);
+}
+
+void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev)
+{
+ mutex_lock(&ibdev->data_direct_lock);
+ mlx5_ib_revoke_data_direct_mrs(ibdev);
+ ibdev->data_direct_dev = NULL;
+ mutex_unlock(&ibdev->data_direct_lock);
+}
+
+void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
+{
+ dev->ib_active = false;
+
+ /* Number of stages to cleanup */
+ while (stage) {
+ stage--;
+ if (profile->stage[stage].cleanup)
+ profile->stage[stage].cleanup(dev);
+ }
+
+ kfree(dev->port);
+ ib_dealloc_device(&dev->ib_dev);
+}
+
+int __mlx5_ib_add(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile)
+{
+ int err;
+ int i;
+
+ dev->profile = profile;
+
+ for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+ if (profile->stage[i].init) {
+ err = profile->stage[i].init(dev);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ dev->ib_active = true;
+ return 0;
+
+err_out:
+ /* Clean up stages which were initialized */
+ while (i) {
+ i--;
+ if (profile->stage[i].cleanup)
+ profile->stage[i].cleanup(dev);
+ }
+ return -ENOMEM;
+}
+
+static const struct mlx5_ib_profile pf_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_FS,
+ mlx5_ib_fs_init,
+ mlx5_ib_fs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ mlx5_ib_stage_caps_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_non_default_cb,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_roce_init,
+ mlx5_ib_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_dev_res_init,
+ mlx5_ib_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ mlx5_ib_odp_init_one,
+ mlx5_ib_odp_cleanup_one),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_counters_init,
+ mlx5_ib_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ NULL,
+ mlx5_ib_stage_pre_ib_reg_umr_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+ mlx5_ib_devx_init,
+ mlx5_ib_devx_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ mlx5_ib_stage_dev_notifier_init,
+ mlx5_ib_stage_dev_notifier_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ mlx5_ib_stage_post_ib_reg_umr_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
+ mlx5_ib_restrack_init,
+ NULL),
+};
+
+const struct mlx5_ib_profile raw_eth_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_FS,
+ mlx5_ib_fs_init,
+ mlx5_ib_fs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ mlx5_ib_stage_caps_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_raw_eth_non_default_cb,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_roce_init,
+ mlx5_ib_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_dev_res_init,
+ mlx5_ib_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_counters_init,
+ mlx5_ib_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ NULL,
+ mlx5_ib_stage_pre_ib_reg_umr_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+ mlx5_ib_devx_init,
+ mlx5_ib_devx_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ mlx5_ib_stage_dev_notifier_init,
+ mlx5_ib_stage_dev_notifier_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ mlx5_ib_stage_post_ib_reg_umr_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
+ mlx5_ib_restrack_init,
+ NULL),
+};
+
+static const struct mlx5_ib_profile plane_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ mlx5_ib_stage_caps_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_non_default_cb,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_dev_res_init,
+ mlx5_ib_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+};
+
+static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
+ enum rdma_nl_dev_type type,
+ const char *name)
+{
+ struct mlx5_ib_dev *mparent = to_mdev(parent), *mplane;
+ enum rdma_link_layer ll;
+ int ret;
+
+ if (mparent->smi_dev)
+ return ERR_PTR(-EEXIST);
+
+ ll = mlx5_port_type_cap_to_rdma_ll(MLX5_CAP_GEN(mparent->mdev,
+ port_type));
+ if (type != RDMA_DEVICE_TYPE_SMI || !mparent->num_plane ||
+ ll != IB_LINK_LAYER_INFINIBAND ||
+ !MLX5_CAP_GEN_2(mparent->mdev, multiplane_qp_ud))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mplane = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(mparent->mdev));
+ if (!mplane)
+ return ERR_PTR(-ENOMEM);
+
+ mplane->port = kcalloc(mparent->num_plane * mparent->num_ports,
+ sizeof(*mplane->port), GFP_KERNEL);
+ if (!mplane->port) {
+ ret = -ENOMEM;
+ goto fail_kcalloc;
+ }
+
+ mplane->ib_dev.type = type;
+ mplane->mdev = mparent->mdev;
+ mplane->num_ports = mparent->num_plane;
+ mplane->sub_dev_name = name;
+ mplane->ib_dev.phys_port_cnt = mplane->num_ports;
+
+ ret = __mlx5_ib_add(mplane, &plane_profile);
+ if (ret)
+ goto fail_ib_add;
+
+ mparent->smi_dev = mplane;
+ return &mplane->ib_dev;
+
+fail_ib_add:
+ kfree(mplane->port);
+fail_kcalloc:
+ ib_dealloc_device(&mplane->ib_dev);
+ return ERR_PTR(ret);
+}
+
+static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(sub_dev);
+
+ to_mdev(sub_dev->parent)->smi_dev = NULL;
+ __mlx5_ib_remove(mdev, mdev->profile, MLX5_IB_STAGE_MAX);
+}
+
+static int mlx5r_mp_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
+ bool bound = false;
+ int err;
+
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi)
+ return -ENOMEM;
+
+ mpi->mdev = mdev;
+ err = mlx5_query_nic_vport_system_image_guid(mdev,
+ &mpi->sys_image_guid);
+ if (err) {
+ kfree(mpi);
+ return err;
+ }
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+
+ if (bound) {
+ rdma_roce_rescan_device(&dev->ib_dev);
+ mpi->ibdev->ib_active = true;
+ break;
+ }
+ }
+
+ if (!bound) {
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+ dev_dbg(mdev->device,
+ "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ }
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+
+ auxiliary_set_drvdata(adev, mpi);
+ return 0;
+}
+
+static void mlx5r_mp_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_ib_multiport_info *mpi;
+
+ mpi = auxiliary_get_drvdata(adev);
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ if (mpi->ibdev)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ else
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ kfree(mpi);
+}
+
+static int mlx5r_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ const struct mlx5_ib_profile *profile;
+ int port_type_cap, num_ports, ret;
+ enum rdma_link_layer ll;
+ struct mlx5_ib_dev *dev;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
+ dev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(mdev));
+ if (!dev)
+ return -ENOMEM;
+
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ ret = mlx5_ib_get_plane_num(mdev, &dev->num_plane);
+ if (ret)
+ goto fail;
+ }
+
+ dev->port = kcalloc(num_ports, sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dev->mdev = mdev;
+ dev->num_ports = num_ports;
+ dev->ib_dev.phys_port_cnt = num_ports;
+
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
+ profile = &raw_eth_profile;
+ else
+ profile = &pf_profile;
+
+ ret = __mlx5_ib_add(dev, profile);
+ if (ret)
+ goto fail_ib_add;
+
+ auxiliary_set_drvdata(adev, dev);
+ return 0;
+
+fail_ib_add:
+ kfree(dev->port);
+fail:
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
+}
+
+static void mlx5r_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_ib_dev *dev;
+
+ dev = auxiliary_get_drvdata(adev);
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+}
+
+static const struct auxiliary_device_id mlx5r_mp_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".multiport", },
+ {},
+};
+
+static const struct auxiliary_device_id mlx5r_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table);
+MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table);
+
+static struct auxiliary_driver mlx5r_mp_driver = {
+ .name = "multiport",
+ .probe = mlx5r_mp_probe,
+ .remove = mlx5r_mp_remove,
+ .id_table = mlx5r_mp_id_table,
+};
+
+static struct auxiliary_driver mlx5r_driver = {
+ .name = "rdma",
+ .probe = mlx5r_probe,
+ .remove = mlx5r_remove,
+ .id_table = mlx5r_id_table,
+};
+
+static int __init mlx5_ib_init(void)
+{
+ int ret;
+
+ xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!xlt_emergency_page)
+ return -ENOMEM;
+
+ mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
+ if (!mlx5_ib_event_wq) {
+ free_page((unsigned long)xlt_emergency_page);
+ return -ENOMEM;
+ }
+
+ ret = mlx5_ib_qp_event_init();
+ if (ret)
+ goto qp_event_err;
+
+ mlx5_ib_odp_init();
+ ret = mlx5r_rep_init();
+ if (ret)
+ goto rep_err;
+ ret = mlx5_data_direct_driver_register();
+ if (ret)
+ goto dd_err;
+ ret = auxiliary_driver_register(&mlx5r_mp_driver);
+ if (ret)
+ goto mp_err;
+ ret = auxiliary_driver_register(&mlx5r_driver);
+ if (ret)
+ goto drv_err;
+
+ return 0;
+
+drv_err:
+ auxiliary_driver_unregister(&mlx5r_mp_driver);
+mp_err:
+ mlx5_data_direct_driver_unregister();
+dd_err:
+ mlx5r_rep_cleanup();
+rep_err:
+ mlx5_ib_qp_event_cleanup();
+qp_event_err:
+ destroy_workqueue(mlx5_ib_event_wq);
+ free_page((unsigned long)xlt_emergency_page);
+ return ret;
+}
+
static void __exit mlx5_ib_cleanup(void)
{
- mlx5_unregister_interface(&mlx5_ib_interface);
- mlx5_ib_odp_cleanup();
+ mlx5_data_direct_driver_unregister();
+ auxiliary_driver_unregister(&mlx5r_driver);
+ auxiliary_driver_unregister(&mlx5r_mp_driver);
+ mlx5r_rep_cleanup();
+
+ mlx5_ib_qp_event_cleanup();
+ destroy_workqueue(mlx5_ib_event_wq);
+ free_page((unsigned long)xlt_emergency_page);
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 40df2cca0609..af321f6ef7f5 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,196 +30,66 @@
* SOFTWARE.
*/
-#include <linux/module.h>
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
-/* @umem: umem object to scan
- * @addr: ib virtual address requested by the user
- * @count: number of PAGE_SIZE pages covered by umem
- * @shift: page shift for the compound pages found in the region
- * @ncont: number of compund pages
- * @order: log2 of the number of compound pages
+/*
+ * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
+ * filled in the pas array.
*/
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
- int *ncont, int *order)
+void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
+ u64 access_flags)
{
- unsigned long tmp;
- unsigned long m;
- int i, k;
- u64 base = 0;
- int p = 0;
- int skip;
- int mask;
- u64 len;
- u64 pfn;
- struct scatterlist *sg;
- int entry;
- unsigned long page_shift = ilog2(umem->page_size);
-
- /* With ODP we must always match OS page size. */
- if (umem->odp_data) {
- *count = ib_umem_page_count(umem);
- *shift = PAGE_SHIFT;
- *ncont = *count;
- if (order)
- *order = ilog2(roundup_pow_of_two(*count));
-
- return;
- }
+ struct ib_block_iter biter;
- addr = addr >> page_shift;
- tmp = (unsigned long)addr;
- m = find_first_bit(&tmp, sizeof(tmp));
- skip = 1 << m;
- mask = skip - 1;
- i = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> page_shift;
- pfn = sg_dma_address(sg) >> page_shift;
- for (k = 0; k < len; k++) {
- if (!(i & mask)) {
- tmp = (unsigned long)pfn;
- m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- } else {
- if (base + p != pfn) {
- tmp = (unsigned long)p;
- m = find_first_bit(&tmp, sizeof(tmp));
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- }
- }
- p++;
- i++;
- }
+ rdma_umem_for_each_dma_block (umem, &biter, page_size) {
+ *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ access_flags);
+ pas++;
}
-
- if (i) {
- m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
-
- if (order)
- *order = ilog2(roundup_pow_of_two(i) >> m);
-
- *ncont = DIV_ROUND_UP(i, (1 << m));
- } else {
- m = 0;
-
- if (order)
- *order = 0;
-
- *ncont = 0;
- }
- *shift = page_shift + m;
- *count = i;
-}
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
- u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
- if (umem_dma & ODP_READ_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_READ;
- if (umem_dma & ODP_WRITE_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_WRITE;
-
- return mtt_entry;
}
-#endif
/*
- * Populate the given array with bus addresses from the umem.
- *
- * dev - mlx5_ib device
- * umem - umem to use to fill the pages
- * page_shift - determines the page size used in the resulting array
- * offset - offset into the umem to start from,
- * only implemented for ODP umems
- * num_pages - total number of pages to fill
- * pas - bus addresses array to fill
- * access_flags - access flags to set on all present pages.
- use enum mlx5_ib_mtt_access_flags for this.
+ * Compute the page shift and page_offset for mailboxes that use a quantized
+ * page_offset. The granulatity of the page offset scales according to page
+ * size.
*/
-void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, size_t offset, size_t num_pages,
- __be64 *pas, int access_flags)
+unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ struct ib_umem *umem, unsigned long pgsz_bitmap,
+ unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
+ unsigned int *page_offset_quantized)
{
- unsigned long umem_page_shift = ilog2(umem->page_size);
- int shift = page_shift - umem_page_shift;
- int mask = (1 << shift) - 1;
- int i, k;
- u64 cur = 0;
- u64 base;
- int len;
- struct scatterlist *sg;
- int entry;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- const bool odp = umem->odp_data != NULL;
-
- if (odp) {
- WARN_ON(shift != 0);
- WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
-
- for (i = 0; i < num_pages; ++i) {
- dma_addr_t pa = umem->odp_data->dma_list[offset + i];
-
- pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
- }
- return;
- }
-#endif
-
- i = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> umem_page_shift;
- base = sg_dma_address(sg);
- for (k = 0; k < len; k++) {
- if (!(i & mask)) {
- cur = base + (k << umem_page_shift);
- cur |= access_flags;
-
- pas[i >> shift] = cpu_to_be64(cur);
- mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
- i >> shift, be64_to_cpu(pas[i >> shift]));
- } else
- mlx5_ib_dbg(dev, "=====> 0x%llx\n",
- base + (k << umem_page_shift));
- i++;
- }
+ const u64 page_offset_mask = (1UL << page_offset_bits) - 1;
+ unsigned long page_size;
+ u64 page_offset;
+
+ page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask);
+ if (!page_size)
+ return 0;
+
+ /*
+ * page size is the largest possible page size.
+ *
+ * Reduce the page_size, and thus the page_offset and quanta, until the
+ * page_offset fits into the mailbox field. Once page_size < scale this
+ * loop is guaranteed to terminate.
+ */
+ page_offset = ib_umem_dma_offset(umem, page_size);
+ while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) {
+ page_size /= 2;
+ page_offset = ib_umem_dma_offset(umem, page_size);
}
-}
-
-void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int access_flags)
-{
- return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
- ib_umem_num_pages(umem), pas,
- access_flags);
-}
-int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
-{
- u64 page_size;
- u64 page_mask;
- u64 off_size;
- u64 off_mask;
- u64 buf_off;
-
- page_size = (u64)1 << page_shift;
- page_mask = page_size - 1;
- buf_off = addr & page_mask;
- off_size = page_size >> 6;
- off_mask = off_size - 1;
-
- if (buf_off & off_mask)
- return -EINVAL;
- *offset = buf_off >> ilog2(off_size);
- return 0;
+ /*
+ * The address is not aligned, or otherwise cannot be represented by the
+ * page_offset.
+ */
+ if (!(pgsz_bitmap & page_size))
+ return 0;
+
+ *page_offset_quantized =
+ (unsigned long)page_offset / (page_size / scale);
+ if (WARN_ON(*page_offset_quantized > page_offset_mask))
+ return 0;
+ return page_size;
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bb8cda79e881..09d82d5f95e3 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*/
#ifndef MLX5_IB_H
@@ -36,35 +10,110 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
#include <rdma/ib_smi.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
+#include <linux/mlx5/fs.h>
#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
#include <linux/types.h>
+#include <linux/mlx5/transobj.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/mlx5-abi.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+
+#include "srq.h"
+#include "qp.h"
+#include "macsec.h"
+
+#define mlx5_ib_dbg(_dev, format, arg...) \
+ dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
+
+#define mlx5_ib_err(_dev, format, arg...) \
+ dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
+
+#define mlx5_ib_warn(_dev, format, arg...) \
+ dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
+
+#define mlx5_ib_log(lvl, _dev, format, arg...) \
+ dev_printk(lvl, &(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##arg)
+
+#define MLX5_IB_DEFAULT_UIDX 0xffffff
+#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
+
+static __always_inline unsigned long
+__mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
+ unsigned int pgsz_shift)
+{
+ unsigned int largest_pg_shift =
+ min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
+ BITS_PER_LONG - 1);
+
+ /*
+ * Despite a command allowing it, the device does not support lower than
+ * 4k page size.
+ */
+ pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
+ return GENMASK(largest_pg_shift, pgsz_shift);
+}
+
+static __always_inline unsigned long
+__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
+ unsigned int offset_shift)
+{
+ unsigned int largest_offset_shift =
+ min_t(unsigned long, page_offset_bits - 1 + offset_shift,
+ BITS_PER_LONG - 1);
-#define mlx5_ib_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+ return GENMASK(largest_offset_shift, offset_shift);
+}
-#define mlx5_ib_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+/*
+ * QP/CQ/WQ/etc type commands take a page offset that satisifies:
+ * page_offset_quantized * (page_size/scale) = page_offset
+ * Which restricts allowed page sizes to ones that satisify the above.
+ */
+unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ struct ib_umem *umem, unsigned long pgsz_bitmap,
+ unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
+ unsigned int *page_offset_quantized);
+#define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \
+ pgsz_shift, page_offset_fld, \
+ scale, page_offset_quantized) \
+ __mlx5_umem_find_best_quantized_pgoff( \
+ umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
+ __mlx5_bit_sz(typ, page_offset_fld), \
+ GENMASK(31, order_base_2(scale)), scale, \
+ page_offset_quantized)
+
+#define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \
+ pgsz_shift, page_offset_fld, \
+ scale, page_offset_quantized) \
+ __mlx5_umem_find_best_quantized_pgoff( \
+ umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
+ __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
+ page_offset_quantized)
-#define mlx5_ib_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+enum {
+ MLX5_IB_MMAP_OFFSET_START = 9,
+ MLX5_IB_MMAP_OFFSET_END = 255,
+};
enum {
MLX5_IB_MMAP_CMD_SHIFT = 8,
MLX5_IB_MMAP_CMD_MASK = 0xff,
};
-enum mlx5_ib_mmap_cmd {
- MLX5_IB_MMAP_REGULAR_PAGE = 0,
- MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */
-};
-
enum {
MLX5_RES_SCAT_DATA32_CQE = 0x1,
MLX5_RES_SCAT_DATA64_CQE = 0x2,
@@ -72,19 +121,67 @@ enum {
MLX5_REQ_SCAT_DATA64_CQE = 0x22,
};
-enum mlx5_ib_latency_class {
- MLX5_IB_LATENCY_CLASS_LOW,
- MLX5_IB_LATENCY_CLASS_MEDIUM,
- MLX5_IB_LATENCY_CLASS_HIGH,
- MLX5_IB_LATENCY_CLASS_FAST_PATH
-};
-
enum mlx5_ib_mad_ifc_flags {
MLX5_MAD_IFC_IGNORE_MKEY = 1,
MLX5_MAD_IFC_IGNORE_BKEY = 2,
MLX5_MAD_IFC_NET_VIEW = 4,
};
+enum {
+ MLX5_CROSS_CHANNEL_BFREG = 0,
+};
+
+enum {
+ MLX5_CQE_VERSION_V0,
+ MLX5_CQE_VERSION_V1,
+};
+
+enum {
+ MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
+ MLX5_TM_MAX_SGE = 1,
+};
+
+enum {
+ MLX5_IB_INVALID_UAR_INDEX = BIT(31),
+ MLX5_IB_INVALID_BFREG = BIT(31),
+};
+
+enum {
+ MLX5_MAX_MEMIC_PAGES = 0x100,
+ MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
+};
+
+enum {
+ MLX5_MEMIC_BASE_ALIGN = 6,
+ MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
+};
+
+enum mlx5_ib_mmap_type {
+ MLX5_IB_MMAP_TYPE_MEMIC = 1,
+ MLX5_IB_MMAP_TYPE_VAR = 2,
+ MLX5_IB_MMAP_TYPE_UAR_WC = 3,
+ MLX5_IB_MMAP_TYPE_UAR_NC = 4,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
+};
+
+struct mlx5_bfreg_info {
+ u32 *sys_pages;
+ int num_low_latency_bfregs;
+ unsigned int *count;
+
+ /*
+ * protect bfreg allocation data structs
+ */
+ struct mutex lock;
+ u32 ver;
+ u8 lib_uar_4k : 1;
+ u8 lib_uar_dyn : 1;
+ u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
+};
+
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
@@ -92,7 +189,15 @@ struct mlx5_ib_ucontext {
/* protect doorbell record alloc/free
*/
struct mutex db_page_mutex;
- struct mlx5_uuar_info uuari;
+ struct mlx5_bfreg_info bfregi;
+ u8 cqe_version;
+ /* Transport Domain number */
+ u32 tdn;
+
+ u64 lib_caps;
+ u16 devx_uid;
+ /* For RoCE LAG TX affinity */
+ atomic_t tx_port_affinity;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -103,25 +208,158 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
struct mlx5_ib_pd {
struct ib_pd ibpd;
u32 pdn;
- u32 pa_lkey;
+ u16 uid;
+};
+
+enum {
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
+ MLX5_IB_FLOW_ACTION_DECAP,
+};
+
+#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
+#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
+#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
+#error "Invalid number of bypass priorities"
+#endif
+#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
+
+#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
+#define MLX5_IB_NUM_SNIFFER_FTS 2
+#define MLX5_IB_NUM_EGRESS_FTS 1
+#define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
+
+struct mlx5_ib_anchor {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg_goto_table;
+ struct mlx5_flow_group *fg_drop;
+ struct mlx5_flow_handle *rule_goto_table;
+ struct mlx5_flow_handle *rule_drop;
+ unsigned int rule_goto_table_ref;
+};
+
+struct mlx5_ib_flow_prio {
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_ib_anchor anchor;
+ unsigned int refcount;
+};
+
+struct mlx5_ib_flow_handler {
+ struct list_head list;
+ struct ib_flow ibflow;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_handle *rule;
+ struct ib_counters *ibcounters;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_flow_matcher *flow_matcher;
+};
+
+struct mlx5_ib_flow_matcher {
+ struct mlx5_ib_match_params matcher_mask;
+ int mask_len;
+ enum mlx5_ib_flow_type flow_type;
+ enum mlx5_flow_namespace_type ns_type;
+ u16 priority;
+ struct mlx5_core_dev *mdev;
+ atomic_t usecnt;
+ u8 match_criteria_enable;
+ u32 ib_port;
+};
+
+struct mlx5_ib_steering_anchor {
+ struct mlx5_ib_flow_prio *ft_prio;
+ struct mlx5_ib_dev *dev;
+ atomic_t usecnt;
+};
+
+struct mlx5_ib_pp {
+ u16 index;
+ struct mlx5_core_dev *mdev;
+};
+
+enum mlx5_ib_optional_counter_type {
+ MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
+ MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
+ MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
+ MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS,
+ MLX5_IB_OPCOUNTER_RDMA_TX_BYTES,
+ MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS,
+ MLX5_IB_OPCOUNTER_RDMA_RX_BYTES,
+
+ MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP,
+ MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP,
+ MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP,
+
+ MLX5_IB_OPCOUNTER_MAX,
+};
+
+struct mlx5_ib_flow_db {
+ struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
+ struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
+ struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS];
+ struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
+ struct mlx5_flow_table *lag_demux_ft;
+ struct mlx5_ib_flow_prio *rdma_transport_rx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
+ struct mlx5_ib_flow_prio *rdma_transport_tx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
+ /* Protect flow steering bypass flow tables
+ * when add/del flow rules.
+ * only single add/removal of flow steering rule could be done
+ * simultaneously.
+ */
+ struct mutex lock;
};
/* Use macros here so that don't have to duplicate
- * enum ib_send_flags and enum ib_qp_type for low-level driver
+ * enum ib_qp_type for low-level driver
*/
-#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
-#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
-#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
+/*
+ * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
+ * creates the actual hardware QP.
+ */
+#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
+#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
+#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
+#define MLX5_IB_UPD_XLT_ZAP BIT(0)
+#define MLX5_IB_UPD_XLT_ENABLE BIT(1)
+#define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
+#define MLX5_IB_UPD_XLT_ADDR BIT(3)
+#define MLX5_IB_UPD_XLT_PD BIT(4)
+#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
+#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
+#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
+#define MLX5_IB_UPD_XLT_KEEP_PGSZ BIT(8)
+
+/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
+ *
+ * These flags are intended for internal use by the mlx5_ib driver, and they
+ * rely on the range reserved for that use in the ib_qp_create_flags enum.
+ */
+#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
+
struct wr_list {
u16 opcode;
u16 next;
};
+enum mlx5_ib_rq_flags {
+ MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
+ MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
+};
+
struct mlx5_ib_wq {
+ struct mlx5_frag_buf_ctrl fbc;
u64 *wrid;
u32 *wr_data;
struct wr_list *w_list;
@@ -140,128 +378,189 @@ struct mlx5_ib_wq {
unsigned tail;
u16 cur_post;
u16 last_poll;
- void *qend;
+ void *cur_edge;
};
-enum {
- MLX5_QP_USER,
- MLX5_QP_KERNEL,
- MLX5_QP_EMPTY
+enum mlx5_ib_wq_flags {
+ MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
+ MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
};
-/*
- * Connect-IB can trigger up to four concurrent pagefaults
- * per-QP.
- */
-enum mlx5_ib_pagefault_context {
- MLX5_IB_PAGEFAULT_RESPONDER_READ,
- MLX5_IB_PAGEFAULT_REQUESTOR_READ,
- MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
- MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
- MLX5_IB_PAGEFAULT_CONTEXTS
+#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
+#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
+#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
+#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
+
+struct mlx5_ib_rwq {
+ struct ib_wq ibwq;
+ struct mlx5_core_qp core_qp;
+ u32 rq_num_pas;
+ u32 log_rq_stride;
+ u32 log_rq_size;
+ u32 rq_page_offset;
+ u32 log_page_size;
+ u32 log_num_strides;
+ u32 two_byte_shift_en;
+ u32 single_stride_log_num_of_bytes;
+ struct ib_umem *umem;
+ size_t buf_size;
+ unsigned int page_shift;
+ struct mlx5_db db;
+ u32 user_index;
+ u32 wqe_count;
+ u32 wqe_shift;
+ int wq_sig;
+ u32 create_flags; /* Use enum mlx5_ib_wq_flags */
};
-static inline enum mlx5_ib_pagefault_context
- mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
-{
- return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
-}
+struct mlx5_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
+ u32 rqtn;
+ u16 uid;
+};
+
+struct mlx5_ib_ubuffer {
+ struct ib_umem *umem;
+ int buf_size;
+ u64 buf_addr;
+};
-struct mlx5_ib_pfault {
- struct work_struct work;
- struct mlx5_pagefault mpfault;
+struct mlx5_ib_qp_base {
+ struct mlx5_ib_qp *container_mibqp;
+ struct mlx5_core_qp mqp;
+ struct mlx5_ib_ubuffer ubuffer;
+};
+
+struct mlx5_ib_qp_trans {
+ struct mlx5_ib_qp_base base;
+ u16 xrcdn;
+ u32 alt_port;
+ u8 atomic_rd_en;
+ u8 resp_depth;
+};
+
+struct mlx5_ib_rss_qp {
+ u32 tirn;
+};
+
+struct mlx5_ib_rq {
+ struct mlx5_ib_qp_base base;
+ struct mlx5_ib_wq *rq;
+ struct mlx5_ib_ubuffer ubuffer;
+ struct mlx5_db *doorbell;
+ u32 tirn;
+ u8 state;
+ u32 flags;
+};
+
+struct mlx5_ib_sq {
+ struct mlx5_ib_qp_base base;
+ struct mlx5_ib_wq *sq;
+ struct mlx5_ib_ubuffer ubuffer;
+ struct mlx5_db *doorbell;
+ struct mlx5_flow_handle *flow_rule;
+ u32 tisn;
+ u8 state;
+};
+
+struct mlx5_ib_raw_packet_qp {
+ struct mlx5_ib_sq sq;
+ struct mlx5_ib_rq rq;
+};
+
+struct mlx5_bf {
+ int buf_size;
+ unsigned long offset;
+ struct mlx5_sq_bfreg *bfreg;
+};
+
+struct mlx5_ib_dct {
+ struct mlx5_core_dct mdct;
+ u32 *in;
+};
+
+struct mlx5_ib_gsi_qp {
+ struct ib_qp *rx_qp;
+ u32 port_num;
+ struct ib_qp_cap cap;
+ struct ib_cq *cq;
+ struct mlx5_ib_gsi_wr *outstanding_wrs;
+ u32 outstanding_pi, outstanding_ci;
+ int num_qps;
+ /* Protects access to the tx_qps. Post send operations synchronize
+ * with tx_qp creation in setup_qp(). Also protects the
+ * outstanding_wrs array and indices.
+ */
+ spinlock_t lock;
+ struct ib_qp **tx_qps;
};
struct mlx5_ib_qp {
struct ib_qp ibqp;
- struct mlx5_core_qp mqp;
- struct mlx5_buf buf;
+ union {
+ struct mlx5_ib_qp_trans trans_qp;
+ struct mlx5_ib_raw_packet_qp raw_packet_qp;
+ struct mlx5_ib_rss_qp rss_qp;
+ struct mlx5_ib_dct dct;
+ struct mlx5_ib_gsi_qp gsi;
+ };
+ struct mlx5_frag_buf buf;
struct mlx5_db db;
struct mlx5_ib_wq rq;
- u32 doorbell_qpn;
u8 sq_signal_bits;
- u8 fm_cache;
- int sq_max_wqes_per_wr;
- int sq_spare_wqes;
+ u8 next_fence;
struct mlx5_ib_wq sq;
- struct ib_umem *umem;
- int buf_size;
-
/* serialize qp state modifications
*/
struct mutex mutex;
- u16 xrcdn;
+ /* cached variant of create_flags from struct ib_qp_init_attr */
u32 flags;
- u8 port;
- u8 alt_port;
- u8 atomic_rd_en;
- u8 resp_depth;
+ u32 port;
u8 state;
- int mlx_type;
- int wq_sig;
- int scat_cqe;
int max_inline_data;
- struct mlx5_bf *bf;
- int has_rq;
+ struct mlx5_bf bf;
+ u8 has_rq:1;
+ u8 is_rss:1;
+ u8 is_ooo_rq:1;
/* only for user space QPs. For kernel
* we have it from the bf object
*/
- int uuarn;
-
- int create_type;
- u32 pa_lkey;
-
- /* Store signature errors */
- bool signature_en;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ int bfregn;
+
+ struct list_head qps_list;
+ struct list_head cq_recv_list;
+ struct list_head cq_send_list;
+ struct mlx5_rate_limit rl;
+ u32 underlay_qpn;
+ u32 flags_en;
/*
- * A flag that is true for QP's that are in a state that doesn't
- * allow page faults, and shouldn't schedule any more faults.
+ * IB/core doesn't store low-level QP types, so
+ * store both MLX and IBTA types in the field below.
*/
- int disable_page_faults;
- /*
- * The disable_page_faults_lock protects a QP's disable_page_faults
- * field, allowing for a thread to atomically check whether the QP
- * allows page faults, and if so schedule a page fault.
+ enum ib_qp_type type;
+ /* A flag to indicate if there's a new counter is configured
+ * but not take effective
*/
- spinlock_t disable_page_faults_lock;
- struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
-#endif
+ u32 counter_pending;
+ u16 gsi_lag_port;
};
struct mlx5_ib_cq_buf {
- struct mlx5_buf buf;
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
struct ib_umem *umem;
int cqe_size;
int nent;
};
-enum mlx5_ib_qp_flags {
- MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
- MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
-};
-
-struct mlx5_umr_wr {
- union {
- u64 virt_addr;
- u64 offset;
- } target;
- struct ib_pd *pd;
- unsigned int page_shift;
- unsigned int npages;
- u32 length;
- int access_flags;
- u32 mkey;
-};
-
-struct mlx5_shared_mr_info {
- int mr_id;
- struct ib_umem *umem;
+enum mlx5_ib_cq_pr_flags {
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
+ MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
};
struct mlx5_ib_cq {
@@ -280,13 +579,26 @@ struct mlx5_ib_cq {
struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem;
int cqe_size;
+ struct list_head list_send_qp;
+ struct list_head list_recv_qp;
+ u32 create_flags;
+ struct list_head wc_list;
+ enum ib_cq_notify_flags notify_flags;
+ struct work_struct notify_work;
+ u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
+};
+
+struct mlx5_ib_wc {
+ struct ib_wc wc;
+ struct list_head list;
};
struct mlx5_ib_srq {
struct ib_srq ibsrq;
struct mlx5_core_srq msrq;
- struct mlx5_buf buf;
+ struct mlx5_frag_buf buf;
struct mlx5_db db;
+ struct mlx5_frag_buf_ctrl fbc;
u64 *wrid;
/* protect SRQ hanlding
*/
@@ -311,136 +623,612 @@ enum mlx5_ib_mtt_access_flags {
MLX5_IB_MTT_WRITE = (1 << 1),
};
+struct mlx5_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u8 mmap_flag;
+ u64 address;
+ u32 page_idx;
+};
+
+enum mlx5_mkey_type {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+ MLX5_MKEY_INDIRECT_DEVX,
+ MLX5_MKEY_NULL,
+ MLX5_MKEY_IMPLICIT_CHILD,
+};
+
+/* Used for non-existent ph value */
+#define MLX5_IB_NO_PH 0xff
+
+struct mlx5r_cache_rb_key {
+ u8 ats:1;
+ u8 ph;
+ u16 st_index;
+ unsigned int access_mode;
+ unsigned int access_flags;
+ unsigned int ndescs;
+};
+
+struct mlx5_ib_mkey {
+ u32 key;
+ enum mlx5_mkey_type type;
+ unsigned int ndescs;
+ struct wait_queue_head wait;
+ refcount_t usecount;
+ /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
+ struct mlx5r_cache_rb_key rb_key;
+ struct mlx5_cache_ent *cache_ent;
+ u8 cacheable : 1;
+};
+
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
+ IB_ACCESS_REMOTE_WRITE |\
+ IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC |\
+ IB_ZERO_BASED)
+
+#define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
+ IB_ACCESS_REMOTE_WRITE |\
+ IB_ACCESS_REMOTE_READ |\
+ IB_ZERO_BASED)
+
+#define mlx5_update_odp_stats(mr, counter_name, value) \
+ atomic64_add(value, &((mr)->odp_stats.counter_name))
+
+#define mlx5_update_odp_stats_with_handled(mr, counter_name, value) \
+ do { \
+ mlx5_update_odp_stats(mr, counter_name, value); \
+ atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
+ } while (0)
+
struct mlx5_ib_mr {
- struct ib_mr ibmr;
- struct mlx5_core_mr mmr;
- struct ib_umem *umem;
- struct mlx5_shared_mr_info *smr_info;
- struct list_head list;
- int order;
- int umred;
- int npages;
- struct mlx5_ib_dev *dev;
- struct mlx5_create_mkey_mbox_out out;
- struct mlx5_core_sig_ctx *sig;
- int live;
+ struct ib_mr ibmr;
+ struct mlx5_ib_mkey mmkey;
+
+ struct ib_umem *umem;
+ /* The mr is data direct related */
+ u8 data_direct :1;
+
+ union {
+ /* Used only by kernel MRs (umem == NULL) */
+ struct {
+ void *descs;
+ void *descs_alloc;
+ dma_addr_t desc_map;
+ int max_descs;
+ int desc_size;
+ int access_mode;
+
+ /* For Kernel IB_MR_TYPE_INTEGRITY */
+ struct mlx5_core_sig_ctx *sig;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr *klm_mr;
+ struct mlx5_ib_mr *mtt_mr;
+ u64 data_iova;
+ u64 pi_iova;
+ int meta_ndescs;
+ int meta_length;
+ int data_length;
+ };
+
+ /* Used only by User MRs (umem != NULL) */
+ struct {
+ unsigned int page_shift;
+ /* Current access_flags */
+ int access_flags;
+
+ /* For User ODP */
+ struct mlx5_ib_mr *parent;
+ struct xarray implicit_children;
+ union {
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+ /* The affilated data direct crossed mr */
+ struct mlx5_ib_mr *dd_crossed_mr;
+ struct list_head dd_node;
+ u8 revoked :1;
+ /* Indicates previous dmabuf page fault occurred */
+ u8 dmabuf_faulted:1;
+ struct mlx5_ib_mkey null_mmkey;
+ };
+ };
};
-struct mlx5_ib_fast_reg_page_list {
- struct ib_fast_reg_page_list ibfrpl;
- __be64 *mapped_page_list;
- dma_addr_t map;
+static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_odp;
+}
+
+static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_dmabuf;
+}
+
+struct mlx5_ib_mw {
+ struct ib_mw ibmw;
+ struct mlx5_ib_mkey mmkey;
};
struct mlx5_ib_umr_context {
+ struct ib_cqe cqe;
enum ib_wc_status status;
struct completion done;
};
-static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
-{
- context->status = -1;
- init_completion(&context->done);
-}
+enum {
+ MLX5_UMR_STATE_UNINIT,
+ MLX5_UMR_STATE_ACTIVE,
+ MLX5_UMR_STATE_RECOVER,
+ MLX5_UMR_STATE_ERR,
+};
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- /* control access to UMR QP
+ /* Protects from UMR QP overflow
*/
struct semaphore sem;
+ /* Protects from using UMR while the UMR is not active
+ */
+ struct mutex lock;
+ unsigned int state;
+ /* Protects from repeat UMR QP creation */
+ struct mutex init_lock;
};
-enum {
- MLX5_FMR_INVALID,
- MLX5_FMR_VALID,
- MLX5_FMR_BUSY,
+#define NUM_MKEYS_PER_PAGE \
+ ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
+
+struct mlx5_mkeys_page {
+ u32 mkeys[NUM_MKEYS_PER_PAGE];
+ struct list_head list;
};
+static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
-struct mlx5_ib_fmr {
- struct ib_fmr ibfmr;
- struct mlx5_core_mr mr;
- int access_flags;
- int state;
- /* protect fmr state
- */
- spinlock_t lock;
- u64 wrid;
- struct ib_send_wr wr[2];
- u8 page_shift;
- struct ib_fast_reg_page_list page_list;
+struct mlx5_mkeys_queue {
+ struct list_head pages_list;
+ u32 num_pages;
+ unsigned long ci;
+ spinlock_t lock; /* sync list ops */
};
struct mlx5_cache_ent {
- struct list_head head;
- /* sync access to the cahce entry
- */
- spinlock_t lock;
-
+ struct mlx5_mkeys_queue mkeys_queue;
+ u32 pending;
- struct dentry *dir;
char name[4];
- u32 order;
- u32 size;
- u32 cur;
- u32 miss;
- u32 limit;
- struct dentry *fsize;
- struct dentry *fcur;
- struct dentry *fmiss;
- struct dentry *flimit;
+ struct rb_node node;
+ struct mlx5r_cache_rb_key rb_key;
+
+ u8 is_tmp:1;
+ u8 disabled:1;
+ u8 fill_to_high_water:1;
+ u8 tmp_cleanup_scheduled:1;
+
+ /*
+ * - limit is the low water mark for stored mkeys, 2* limit is the
+ * upper water mark.
+ */
+ u32 in_use;
+ u32 limit;
+
+ /* Statistics */
+ u32 miss;
struct mlx5_ib_dev *dev;
- struct work_struct work;
struct delayed_work dwork;
- int pending;
};
-struct mlx5_mr_cache {
+struct mlx5r_async_create_mkey {
+ union {
+ u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+ };
+ struct mlx5_async_work cb_work;
+ struct mlx5_cache_ent *ent;
+ u32 mkey;
+};
+
+struct mlx5_mkey_cache {
struct workqueue_struct *wq;
- struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
- int stopped;
- struct dentry *root;
+ struct rb_root rb_root;
+ struct mutex rb_lock;
+ struct dentry *fs_root;
unsigned long last_add;
};
+struct mlx5_ib_port_resources {
+ struct mlx5_ib_gsi_qp *gsi;
+ struct work_struct pkey_change_work;
+};
+
+struct mlx5_data_direct_resources {
+ u32 pdn;
+ u32 mkey;
+ u32 mkey_ro;
+ u8 mkey_ro_valid :1;
+};
+
struct mlx5_ib_resources {
struct ib_cq *c0;
- struct ib_xrcd *x0;
- struct ib_xrcd *x1;
+ struct mutex cq_lock;
+ u32 xrcdn0;
+ u32 xrcdn1;
struct ib_pd *p0;
struct ib_srq *s0;
struct ib_srq *s1;
+ struct mutex srq_lock;
+ struct mlx5_ib_port_resources ports[2];
+};
+
+#define MAX_OPFC_RULES 2
+
+struct mlx5_ib_op_fc {
+ struct mlx5_fc *fc;
+ struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
+};
+
+struct mlx5_ib_counters {
+ struct rdma_stat_desc *descs;
+ size_t *offsets;
+ u32 num_q_counters;
+ u32 num_cong_counters;
+ u32 num_ext_ppcnt_counters;
+ u32 num_op_counters;
+ u16 set_id;
+ struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
+};
+
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa, u32 port);
+
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa);
+
+void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX]);
+
+struct mlx5_ib_multiport_info;
+
+struct mlx5_ib_multiport {
+ struct mlx5_ib_multiport_info *mpi;
+ /* To be held when accessing the multiport info */
+ spinlock_t mpi_lock;
+};
+
+struct mlx5_roce {
+ /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
+ * netdev pointer
+ */
+ struct notifier_block nb;
+ struct netdev_net_notifier nn;
+ struct notifier_block mdev_nb;
+ struct net_device *tracking_netdev;
+ atomic_t tx_port_affinity;
+ enum ib_port_state last_port_state;
+ struct mlx5_ib_dev *dev;
+ u32 native_port_num;
+};
+
+struct mlx5_ib_port {
+ struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_roce roce;
+ struct mlx5_eswitch_rep *rep;
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_reserved_gids *reserved_gids;
+#endif
+};
+
+struct mlx5_ib_dbg_param {
+ int offset;
+ struct mlx5_ib_dev *dev;
+ struct dentry *dentry;
+ u32 port_num;
+};
+
+enum mlx5_ib_dbg_cc_types {
+ MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
+ MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
+ MLX5_IB_DBG_CC_RP_TIME_RESET,
+ MLX5_IB_DBG_CC_RP_BYTE_RESET,
+ MLX5_IB_DBG_CC_RP_THRESHOLD,
+ MLX5_IB_DBG_CC_RP_AI_RATE,
+ MLX5_IB_DBG_CC_RP_MAX_RATE,
+ MLX5_IB_DBG_CC_RP_HAI_RATE,
+ MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
+ MLX5_IB_DBG_CC_RP_MIN_RATE,
+ MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
+ MLX5_IB_DBG_CC_RP_DCE_TCP_G,
+ MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
+ MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
+ MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
+ MLX5_IB_DBG_CC_RP_GD,
+ MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
+ MLX5_IB_DBG_CC_NP_CNP_DSCP,
+ MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
+ MLX5_IB_DBG_CC_NP_CNP_PRIO,
+ MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID,
+ MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP,
+ MLX5_IB_DBG_CC_MAX,
+};
+
+struct mlx5_ib_dbg_cc_params {
+ struct dentry *root;
+ struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
+};
+
+enum {
+ MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
+};
+
+struct mlx5_ib_delay_drop {
+ struct mlx5_ib_dev *dev;
+ struct work_struct delay_drop_work;
+ /* serialize setting of delay drop */
+ struct mutex lock;
+ u32 timeout;
+ bool activate;
+ atomic_t events_cnt;
+ atomic_t rqs_cnt;
+ struct dentry *dir_debugfs;
+};
+
+enum mlx5_ib_stages {
+ MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_FS,
+ MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_NON_DEFAULT_CB,
+ MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_QP,
+ MLX5_IB_STAGE_SRQ,
+ MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_ODP,
+ MLX5_IB_STAGE_COUNTERS,
+ MLX5_IB_STAGE_CONG_DEBUGFS,
+ MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ MLX5_IB_STAGE_WHITELIST_UID,
+ MLX5_IB_STAGE_IB_REG,
+ MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ MLX5_IB_STAGE_POST_IB_REG_UMR,
+ MLX5_IB_STAGE_DELAY_DROP,
+ MLX5_IB_STAGE_RESTRACK,
+ MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+ int (*init)(struct mlx5_ib_dev *dev);
+ void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+ .stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+ struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
+struct mlx5_ib_multiport_info {
+ struct list_head list;
+ struct mlx5_ib_dev *ibdev;
+ struct mlx5_core_dev *mdev;
+ struct notifier_block mdev_events;
+ struct completion unref_comp;
+ u64 sys_image_guid;
+ u32 mdev_refcnt;
+ bool is_master;
+ bool unaffiliate;
+};
+
+struct mlx5_ib_flow_action {
+ struct ib_flow_action ib_action;
+ union {
+ struct {
+ u64 ib_flags;
+ struct mlx5_accel_esp_xfrm *ctx;
+ } esp_aes_gcm;
+ struct {
+ struct mlx5_ib_dev *dev;
+ u32 sub_type;
+ union {
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ };
+ } flow_action_raw;
+ };
+};
+
+struct mlx5_dm {
+ struct mlx5_core_dev *dev;
+ /* This lock is used to protect the access to the shared
+ * allocation map when concurrent requests by different
+ * processes are handled.
+ */
+ spinlock_t lock;
+ DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
+};
+
+struct mlx5_read_counters_attr {
+ struct mlx5_fc *hw_cntrs_hndl;
+ u64 *out;
+ u32 flags;
+};
+
+enum mlx5_ib_counters_type {
+ MLX5_IB_COUNTERS_FLOW,
+};
+
+struct mlx5_ib_mcounters {
+ struct ib_counters ibcntrs;
+ enum mlx5_ib_counters_type type;
+ /* number of counters supported for this counters type */
+ u32 counters_num;
+ struct mlx5_fc *hw_cntrs_hndl;
+ /* read function for this counters type */
+ int (*read_counters)(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr);
+ /* max index set as part of create_flow */
+ u32 cntrs_max_index;
+ /* number of counters data entries (<description,index> pair) */
+ u32 ncounters;
+ /* counters data array for descriptions and indexes */
+ struct mlx5_ib_flow_counters_desc *counters_data;
+ /* protects access to mcounters internal data */
+ struct mutex mcntrs_mutex;
+};
+
+static inline struct mlx5_ib_mcounters *
+to_mcounters(struct ib_counters *ibcntrs)
+{
+ return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
+}
+
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action);
+struct mlx5_ib_lb_state {
+ /* protect the user_td */
+ struct mutex mutex;
+ u32 user_td;
+ int qps;
+ bool enabled;
+ bool force_enable;
+};
+
+struct mlx5_ib_pf_eq {
+ struct notifier_block irq_nb;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_eq *core;
+ struct work_struct work;
+ spinlock_t lock; /* Pagefaults spinlock */
+ struct workqueue_struct *wq;
+ mempool_t *pool;
+};
+
+struct mlx5_devx_event_table {
+ struct mlx5_nb devx_nb;
+ /* serialize updating the event_xa */
+ struct mutex event_xa_lock;
+ struct xarray event_xa;
+};
+
+struct mlx5_var_table {
+ /* serialize updating the bitmap */
+ struct mutex bitmap_lock;
+ unsigned long *bitmap;
+ u64 hw_start_addr;
+ u32 stride_size;
+ u64 num_var_hw_entries;
+};
+
+struct mlx5_port_caps {
+ bool has_smi;
+ u8 ext_port_cap;
+};
+
+
+struct mlx5_special_mkeys {
+ u32 dump_fill_mkey;
+ __be32 null_mkey;
+ __be32 terminate_scatter_list_mkey;
+};
+
+struct mlx5_macsec {
+ struct mutex lock; /* Protects mlx5_macsec internal contexts */
+ struct list_head macsec_devices_list;
+ struct notifier_block blocking_events_nb;
};
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
- MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
+ struct mlx5_data_direct_dev *data_direct_dev;
+ /* protect accessing data_direct_dev */
+ struct mutex data_direct_lock;
+ struct notifier_block mdev_events;
+ struct notifier_block lag_events;
int num_ports;
/* serialize update of capability mask
*/
struct mutex cap_mask_mutex;
- bool ib_active;
+ u8 ib_active:1;
+ u8 is_rep:1;
+ u8 lag_active:1;
+ u8 fill_delay;
struct umr_common umrc;
/* sync used page count stats
*/
struct mlx5_ib_resources devr;
- struct mlx5_mr_cache cache;
+
+ atomic_t mkey_var;
+ struct mlx5_mkey_cache cache;
struct timer_list delay_timer;
- int fill_delay;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /* Prevents soft lock on massive reg MRs */
+ struct mutex slow_path_mutex;
struct ib_odp_caps odp_caps;
- /*
- * Sleepable RCU that prevents destruction of MRs while they are still
- * being used by a page fault handler.
- */
- struct srcu_struct mr_srcu;
+ u64 odp_max_size;
+ struct mutex odp_eq_mutex;
+ struct mlx5_ib_pf_eq odp_pf_eq;
+
+ struct xarray odp_mkeys;
+
+ struct mlx5_ib_flow_db *flow_db;
+ /* protect resources needed as part of reset flow */
+ spinlock_t reset_flow_resource_lock;
+ struct list_head qp_list;
+ struct list_head data_direct_mr_list;
+ /* Array with num_ports elements */
+ struct mlx5_ib_port *port;
+ struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg fp_bfreg;
+ struct mlx5_ib_delay_drop delay_drop;
+ const struct mlx5_ib_profile *profile;
+
+ struct mlx5_ib_lb_state lb;
+ u8 umr_fence;
+ struct list_head ib_dev_list;
+ u64 sys_image_guid;
+ struct mlx5_dm dm;
+ u16 devx_whitelist_uid;
+ struct mlx5_srq_table srq_table;
+ struct mlx5_qp_table qp_table;
+ struct mlx5_async_ctx async_ctx;
+ struct mlx5_devx_event_table devx_event_table;
+ struct mlx5_var_table var_table;
+
+ struct xarray sig_mrs;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u16 pkey_table_len;
+ u8 lag_ports;
+ struct mlx5_special_mkeys mkeys;
+ struct mlx5_data_direct_resources ddr;
+
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_macsec macsec;
#endif
+
+ u8 num_plane;
+ struct mlx5_ib_dev *smi_dev;
+ const char *sub_dev_name;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -458,9 +1246,17 @@ static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
}
-static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
+static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
{
- return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr);
+ return to_mdev(mr->ibmr.device);
+}
+
+static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ return to_mdev(context->ibucontext.device);
}
static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
@@ -470,12 +1266,12 @@ static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
{
- return container_of(mqp, struct mlx5_ib_qp, mqp);
+ return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
}
-static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
+static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
{
- return container_of(mmr, struct mlx5_ib_mr, mmr);
+ return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
}
static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
@@ -493,6 +1289,16 @@ static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
return container_of(ibqp, struct mlx5_ib_qp, ibqp);
}
+static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
+{
+ return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
+}
+
+static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
+}
+
static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
{
return container_of(msrq, struct mlx5_ib_srq, msrq);
@@ -503,100 +1309,116 @@ static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
return container_of(ibmr, struct mlx5_ib_mr, ibmr);
}
-static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
+static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
{
- return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
+ return container_of(ibmw, struct mlx5_ib_mw, ibmw);
}
-struct mlx5_ib_ah {
- struct ib_ah ibah;
- struct mlx5_av av;
-};
+static inline struct mlx5_ib_flow_action *
+to_mflow_act(struct ib_flow_action *ibact)
+{
+ return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
+}
-static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
+static inline struct mlx5_user_mmap_entry *
+to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
{
- return container_of(ibah, struct mlx5_ib_ah, ibah);
+ return container_of(rdma_entry,
+ struct mlx5_user_mmap_entry, rdma_entry);
}
+int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev);
+int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev);
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
-int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const void *in_mad, void *response_mad);
-struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
- struct mlx5_ib_ah *ah);
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
-int mlx5_ib_destroy_ah(struct ib_ah *ah);
-struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
+int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return 0;
+}
+int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
-int mlx5_ib_destroy_srq(struct ib_srq *srq);
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
+int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx5_ib_destroy_qp(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
-int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
- void *buffer, u32 length);
-struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int mlx5_ib_destroy_cq(struct ib_cq *cq);
+int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
+void mlx5_ib_drain_sq(struct ib_qp *qp);
+void mlx5_ib_drain_rq(struct ib_qp *qp);
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mlx5_ib_pre_destroy_cq(struct ib_cq *cq);
+void mlx5_ib_post_destroy_cq(struct ib_cq *cq);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
-int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
- int npages, int zap);
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
-struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+int mlx5_ib_advise_mr(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags,
+ struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs);
+int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
+int mlx5_ib_dealloc_mw(struct ib_mw *mw);
+struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
+ int access_flags);
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
+struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 virt_addr, int access_flags,
+ struct ib_pd *pd, struct ib_udata *udata);
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
-struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
- int page_list_len);
-void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
-struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
- struct ib_fmr_attr *fmr_attr);
-int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int npages, u64 iova);
-int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
-int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_sg,
+ u32 max_num_meta_sg);
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset);
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
-struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
-int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
-int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
-int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
- struct ib_smp *out_mad);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid);
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
@@ -605,87 +1427,399 @@ int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id);
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
-int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey);
-int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
-int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
-int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
- int *ncont, int *order);
-void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, size_t offset, size_t num_pages,
- __be64 *pas, int access_flags);
-void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int access_flags);
-void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
+void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
+ u64 access_flags);
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
+void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
+struct mlx5_cache_ent *
+mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
+ struct mlx5r_cache_rb_key rb_key,
+ bool persistent_entry);
+
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ int access_flags, int access_mode,
+ int ndescs);
+
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
+ struct mlx5_data_direct_dev *dev);
+void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev);
+void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-extern struct workqueue_struct *mlx5_ib_page_fault_wq;
-
-void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
-void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault);
-void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
+void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
-void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
- unsigned long end);
-
+int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
+
+int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge);
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
+static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_pf_eq *eq)
{
- return;
+ return 0;
}
-
-static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
-static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
-static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
+static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
-static inline void mlx5_ib_odp_cleanup(void) {}
-static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
-static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
+static inline void mlx5_ib_odp_cleanup(void) {}
+static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
+static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline int
+mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice, u32 flags,
+ struct ib_sge *sg_list, u32 num_sge)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline void init_query_mad(struct ib_smp *mad)
+extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
+
+/* Needed for rep profile */
+void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage);
+int __mlx5_ib_add(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile);
+
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+ u32 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u32 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u32 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid, int type);
+
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
+
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
+
+/* GSI QP helper functions */
+int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
+ struct ib_qp_init_attr *attr);
+int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask);
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
+
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
+ int bfregn);
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
+ u32 ib_port_num,
+ u32 *native_port_num);
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
+ u32 port_num);
+
+extern const struct uapi_definition mlx5_ib_devx_defs[];
+extern const struct uapi_definition mlx5_ib_flow_defs[];
+extern const struct uapi_definition mlx5_ib_qos_defs[];
+extern const struct uapi_definition mlx5_ib_std_types_defs[];
+extern const struct uapi_definition mlx5_ib_create_cq_defs[];
+
+static inline int is_qp1(enum ib_qp_type qp_type)
{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
+ return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
}
-static inline u8 convert_access(int acc)
+static inline u32 check_cq_create_flags(u32 flags)
{
- return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
- (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
- MLX5_PERM_LOCAL_READ;
+ /*
+ * It returns non-zero value for unsupported CQ
+ * create flags, otherwise it returns zero.
+ */
+ return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
}
-static inline int is_qp1(enum ib_qp_type qp_type)
+static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
+ u32 *user_index)
+{
+ if (cqe_version) {
+ if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
+ (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
+ return -EINVAL;
+ *user_index = cmd_uidx;
+ } else {
+ *user_index = MLX5_IB_DEFAULT_UIDX;
+ }
+
+ return 0;
+}
+
+static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_qp *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
+
+static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_srq *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
+
+static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
+{
+ return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_UARS_IN_PAGE : 1;
+}
+
+extern void *xlt_emergency_page;
+
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg);
+
+static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mmkey)
+{
+ refcount_set(&mmkey->usecount, 1);
+
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
+ mmkey, GFP_KERNEL));
+}
+
+/* deref an mkey that can participate in ODP flow */
+static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
+{
+ if (refcount_dec_and_test(&mmkey->usecount))
+ wake_up(&mmkey->wait);
+}
+
+/* deref an mkey that can participate in ODP flow and wait for relese */
+static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
+{
+ mlx5r_deref_odp_mkey(mmkey);
+ wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
+}
+
+static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
+{
+ /*
+ * If the driver is in hash mode and the port_select_flow_table_bypass cap
+ * is supported, it means that the driver no longer needs to assign the port
+ * affinity by default. If a user wants to set the port affinity explicitly,
+ * the user has a dedicated API to do that, so there is no need to assign
+ * the port affinity by default.
+ */
+ if (dev->lag_active &&
+ mlx5_lag_mode_is_hash(dev->mdev) &&
+ MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
+ return 0;
+
+ if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
+ return 0;
+
+ return dev->lag_active ||
+ (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
+ MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
+}
+
+static inline bool rt_supported(int ts_cap)
+{
+ return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
+ ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+}
+
+/*
+ * PCI Peer to Peer is a trainwreck. If no switch is present then things
+ * sometimes work, depending on the pci_distance_p2p logic for excluding broken
+ * root complexes. However if a switch is present in the path, then things get
+ * really ugly depending on how the switch is setup. This table assumes that the
+ * root complex is strict and is validating that all req/reps are matches
+ * perfectly - so any scenario where it sees only half the transaction is a
+ * failure.
+ *
+ * CR/RR/DT ATS RO P2P
+ * 00X X X OK
+ * 010 X X fails (request is routed to root but root never sees comp)
+ * 011 0 X fails (request is routed to root but root never sees comp)
+ * 011 1 X OK
+ * 10X X 1 OK
+ * 101 X 0 fails (completion is routed to root but root didn't see req)
+ * 110 X 0 SLOW
+ * 111 0 0 SLOW
+ * 111 1 0 fails (completion is routed to root but root didn't see req)
+ * 111 1 1 OK
+ *
+ * Unfortunately we cannot reliably know if a switch is present or what the
+ * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
+ * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
+ *
+ * For now assume if the umem is a dma_buf then it is P2P.
+ */
+static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
+ struct ib_umem *umem, int access_flags)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
+ return false;
+ return access_flags & IB_ACCESS_RELAXED_ORDERING;
+}
+
+int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr);
+
+static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
+{
+ return (port - 1) / dev->num_ports + 1;
+}
+
+static inline unsigned int get_max_log_entity_size_cap(struct mlx5_ib_dev *dev,
+ int access_mode)
+{
+ int max_log_size = 0;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ max_log_size =
+ MLX5_CAP_GEN_2(dev->mdev, max_mkey_log_entity_size_mtt);
+ else if (access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+ max_log_size = MLX5_CAP_GEN_2(
+ dev->mdev, max_mkey_log_entity_size_fixed_buffer);
+
+ if (!max_log_size ||
+ (max_log_size > 31 &&
+ !MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5)))
+ max_log_size = 31;
+
+ return max_log_size;
+}
+
+static inline unsigned int get_min_log_entity_size_cap(struct mlx5_ib_dev *dev,
+ int access_mode)
{
- return qp_type == IB_QPT_GSI;
+ int min_log_size = 0;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM &&
+ MLX5_CAP_GEN_2(dev->mdev,
+ min_mkey_log_entity_size_fixed_buffer_valid))
+ min_log_size = MLX5_CAP_GEN_2(
+ dev->mdev, min_mkey_log_entity_size_fixed_buffer);
+ else
+ min_log_size =
+ MLX5_CAP_GEN_2(dev->mdev, log_min_mkey_entity_size);
+
+ min_log_size = max(min_log_size, MLX5_ADAPTER_PAGE_SHIFT);
+ return min_log_size;
}
-#define MLX5_MAX_UMR_SHIFT 16
-#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
+/*
+ * For mkc users, instead of a page_offset the command has a start_iova which
+ * specifies both the page_offset and the on-the-wire IOVA
+ */
+static __always_inline unsigned long
+mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ u64 iova, int access_mode)
+{
+ unsigned int max_log_entity_size_cap, min_log_entity_size_cap;
+ unsigned long bitmap;
+
+ max_log_entity_size_cap = get_max_log_entity_size_cap(dev, access_mode);
+ min_log_entity_size_cap = get_min_log_entity_size_cap(dev, access_mode);
+
+ bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
+
+ /* In KSM mode HW requires IOVA and mkey's page size to be aligned */
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM && iova)
+ bitmap &= GENMASK_ULL(__ffs64(iova), 0);
+
+ return ib_umem_find_best_pgsz(umem, bitmap, iova);
+}
+
+static inline unsigned long
+mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf,
+ int access_mode)
+{
+ return mlx5_umem_mkc_find_best_pgsz(to_mdev(umem_dmabuf->umem.ibdev),
+ &umem_dmabuf->umem,
+ umem_dmabuf->umem.iova,
+ access_mode);
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 54a15b5d336d..325fa04cbe8a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -36,179 +37,346 @@
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/delay.h>
-#include <rdma/ib_umem.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <rdma/ib_umem_odp.h>
-#include <rdma/ib_verbs.h>
+#include "dm.h"
#include "mlx5_ib.h"
+#include "umr.h"
+#include "data_direct.h"
+#include "dmah.h"
enum {
MAX_PENDING_REG_MR = 8,
};
+#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4
#define MLX5_UMR_ALIGN 2048
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-static __be64 mlx5_ib_update_mtt_emergency_buffer[
- MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
- __aligned(MLX5_UMR_ALIGN);
-static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
-#endif
-static int clean_mr(struct mlx5_ib_mr *mr);
+static void
+create_mkey_callback(int status, struct mlx5_async_work *context);
+static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags,
+ unsigned long page_size, bool populate,
+ int access_mode, u16 st_index, u8 ph);
+static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
+
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+ struct ib_pd *pd)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ if (acc & IB_ACCESS_RELAXED_ORDERING) {
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+ (MLX5_CAP_GEN(dev->mdev,
+ relaxed_ordering_read_pci_enabled) &&
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev)))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ }
+
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+}
+
+static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
+{
+ u8 key = atomic_inc_return(&dev->mkey_var);
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, mkey_7_0, key);
+ *mkey = key;
+}
+
+static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
+{
+ int ret;
+
+ assign_mkey_variant(dev, &mkey->key, in);
+ ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
+ if (!ret)
+ init_waitqueue_head(&mkey->wait);
+
+ return ret;
+}
+
+static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
+{
+ struct mlx5_ib_dev *dev = async_create->ent->dev;
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out);
+
+ MLX5_SET(create_mkey_in, async_create->in, opcode,
+ MLX5_CMD_OP_CREATE_MKEY);
+ assign_mkey_variant(dev, &async_create->mkey, async_create->in);
+ return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen,
+ async_create->out, outlen, create_mkey_callback,
+ &async_create->cb_work);
+}
+
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- /* Wait until all page fault handlers using the mr complete. */
- synchronize_srcu(&dev->mr_srcu);
-#endif
+ return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
+}
- return err;
+static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
+{
+ if (status == -ENXIO) /* core driver is not available */
+ return;
+
+ mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
+ if (status != -EREMOTEIO) /* driver specific failure */
+ return;
+
+ /* Failed in FW, print cmd out failure details */
+ mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
}
-static int order2idx(struct mlx5_ib_dev *dev, int order)
+static int push_mkey_locked(struct mlx5_cache_ent *ent, u32 mkey)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ unsigned long tmp = ent->mkeys_queue.ci % NUM_MKEYS_PER_PAGE;
+ struct mlx5_mkeys_page *page;
+
+ lockdep_assert_held(&ent->mkeys_queue.lock);
+ if (ent->mkeys_queue.ci >=
+ ent->mkeys_queue.num_pages * NUM_MKEYS_PER_PAGE) {
+ page = kzalloc(sizeof(*page), GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+ ent->mkeys_queue.num_pages++;
+ list_add_tail(&page->list, &ent->mkeys_queue.pages_list);
+ } else {
+ page = list_last_entry(&ent->mkeys_queue.pages_list,
+ struct mlx5_mkeys_page, list);
+ }
- if (order < cache->ent[0].order)
- return 0;
- else
- return order - cache->ent[0].order;
+ page->mkeys[tmp] = mkey;
+ ent->mkeys_queue.ci++;
+ return 0;
}
-static void reg_mr_callback(int status, void *context)
+static int pop_mkey_locked(struct mlx5_cache_ent *ent)
{
- struct mlx5_ib_mr *mr = context;
- struct mlx5_ib_dev *dev = mr->dev;
- struct mlx5_mr_cache *cache = &dev->cache;
- int c = order2idx(dev, mr->order);
- struct mlx5_cache_ent *ent = &cache->ent[c];
- u8 key;
+ unsigned long tmp = (ent->mkeys_queue.ci - 1) % NUM_MKEYS_PER_PAGE;
+ struct mlx5_mkeys_page *last_page;
+ u32 mkey;
+
+ lockdep_assert_held(&ent->mkeys_queue.lock);
+ last_page = list_last_entry(&ent->mkeys_queue.pages_list,
+ struct mlx5_mkeys_page, list);
+ mkey = last_page->mkeys[tmp];
+ last_page->mkeys[tmp] = 0;
+ ent->mkeys_queue.ci--;
+ if (ent->mkeys_queue.num_pages > 1 && !tmp) {
+ list_del(&last_page->list);
+ ent->mkeys_queue.num_pages--;
+ kfree(last_page);
+ }
+ return mkey;
+}
+
+static void create_mkey_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5r_async_create_mkey *mkey_out =
+ container_of(context, struct mlx5r_async_create_mkey, cb_work);
+ struct mlx5_cache_ent *ent = mkey_out->ent;
+ struct mlx5_ib_dev *dev = ent->dev;
unsigned long flags;
- struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
- int err;
- spin_lock_irqsave(&ent->lock, flags);
- ent->pending--;
- spin_unlock_irqrestore(&ent->lock, flags);
if (status) {
- mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
- kfree(mr);
- dev->fill_delay = 1;
+ create_mkey_warn(dev, status, mkey_out->out);
+ kfree(mkey_out);
+ spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
+ ent->pending--;
+ WRITE_ONCE(dev->fill_delay, 1);
+ spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
mod_timer(&dev->delay_timer, jiffies + HZ);
return;
}
- if (mr->out.hdr.status) {
- mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
- mr->out.hdr.status,
- be32_to_cpu(mr->out.hdr.syndrome));
- kfree(mr);
- dev->fill_delay = 1;
- mod_timer(&dev->delay_timer, jiffies + HZ);
- return;
- }
+ mkey_out->mkey |= mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
+ WRITE_ONCE(dev->cache.last_add, jiffies);
- spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
- key = dev->mdev->priv.mkey_key++;
- spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
- mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+ spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
+ push_mkey_locked(ent, mkey_out->mkey);
+ ent->pending--;
+ /* If we are doing fill_to_high_water then keep going. */
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
+ kfree(mkey_out);
+}
- cache->last_add = jiffies;
+static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
+{
+ int ret = 0;
- spin_lock_irqsave(&ent->lock, flags);
- list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- ent->size++;
- spin_unlock_irqrestore(&ent->lock, flags);
+ switch (access_mode) {
+ case MLX5_MKC_ACCESS_MODE_MTT:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_mtt));
+ break;
+ case MLX5_MKC_ACCESS_MODE_KSM:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_klm));
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
+}
- write_lock_irqsave(&table->lock, flags);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
- &mr->mmr);
- if (err)
- pr_err("Error inserting to mr tree. 0x%x\n", -err);
- write_unlock_irqrestore(&table->lock, flags);
+static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
+{
+ set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
+ ent->dev->umrc.pd);
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2,
+ (ent->rb_key.access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
+
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_mkc_octo_size(ent->rb_key.access_mode,
+ ent->rb_key.ndescs));
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+
+ if (ent->rb_key.ph != MLX5_IB_NO_PH) {
+ MLX5_SET(mkc, mkc, pcie_tph_en, 1);
+ MLX5_SET(mkc, mkc, pcie_tph_ph, ent->rb_key.ph);
+ if (ent->rb_key.st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index,
+ ent->rb_key.st_index);
+ }
}
-static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
+/* Asynchronously schedule new MRs to be populated in the cache. */
+static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_ib_mr *mr;
- int npages = 1 << ent->order;
+ struct mlx5r_async_create_mkey *async_create;
+ void *mkc;
int err = 0;
int i;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
for (i = 0; i < num; i++) {
+ async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey),
+ GFP_KERNEL);
+ if (!async_create)
+ return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
+ memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
+ async_create->ent = ent;
+
+ spin_lock_irq(&ent->mkeys_queue.lock);
if (ent->pending >= MAX_PENDING_REG_MR) {
err = -EAGAIN;
- break;
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
- break;
+ goto free_async_create;
}
- mr->order = ent->order;
- mr->umred = 1;
- mr->dev = dev;
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
- in->seg.log2_page_size = 12;
-
- spin_lock_irq(&ent->lock);
ent->pending++;
- spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
- sizeof(*in), reg_mr_callback,
- mr, &mr->out);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+
+ err = mlx5_ib_create_mkey_cb(async_create);
if (err) {
- spin_lock_irq(&ent->lock);
- ent->pending--;
- spin_unlock_irq(&ent->lock);
- mlx5_ib_warn(dev, "create mkey failed %d\n", err);
- kfree(mr);
- break;
+ mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
+ goto err_create_mkey;
}
}
+ return 0;
+
+err_create_mkey:
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->pending--;
+free_async_create:
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ kfree(async_create);
+ return err;
+}
+
+/* Synchronously create a MR in the cache */
+static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
+{
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
+
+ err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
+ if (err)
+ goto free_in;
+
+ WRITE_ONCE(ent->dev->cache.last_add, jiffies);
+free_in:
kfree(in);
return err;
}
-static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
+static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
+{
+ u32 mkey;
+
+ lockdep_assert_held(&ent->mkeys_queue.lock);
+ if (!ent->mkeys_queue.ci)
+ return;
+ mkey = pop_mkey_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+}
+
+static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
+ bool limit_fill)
+ __acquires(&ent->mkeys_queue.lock) __releases(&ent->mkeys_queue.lock)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_ib_mr *mr;
int err;
- int i;
- for (i = 0; i < num; i++) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
- return;
+ lockdep_assert_held(&ent->mkeys_queue.lock);
+
+ while (true) {
+ if (limit_fill)
+ target = ent->limit * 2;
+ if (target == ent->pending + ent->mkeys_queue.ci)
+ return 0;
+ if (target > ent->pending + ent->mkeys_queue.ci) {
+ u32 todo = target - (ent->pending + ent->mkeys_queue.ci);
+
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ err = add_keys(ent, todo);
+ if (err == -EAGAIN)
+ usleep_range(3000, 5000);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (err) {
+ if (err != -EAGAIN)
+ return err;
+ } else
+ return 0;
+ } else {
+ remove_cache_mr_locked(ent);
}
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->cur--;
- ent->size--;
- spin_unlock_irq(&ent->lock);
- err = destroy_mkey(dev, mr);
- if (err)
- mlx5_ib_warn(dev, "failed destroy mkey\n");
- else
- kfree(mr);
}
}
@@ -216,37 +384,38 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_cache_ent *ent = filp->private_data;
- struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
- u32 var;
+ u32 target;
int err;
- int c;
-
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
- return -EFAULT;
-
- c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
-
- if (sscanf(lbuf, "%u", &var) != 1)
- return -EINVAL;
-
- if (var < ent->limit)
- return -EINVAL;
- if (var > ent->size) {
- do {
- err = add_keys(dev, c, var - ent->size);
- if (err && err != -EAGAIN)
- return err;
+ err = kstrtou32_from_user(buf, count, 0, &target);
+ if (err)
+ return err;
- usleep_range(3000, 5000);
- } while (err);
- } else if (var < ent->size) {
- remove_keys(dev, c, ent->size - var);
+ /*
+ * Target is the new value of total_mrs the user requests, however we
+ * cannot free MRs that are in use. Compute the target value for stored
+ * mkeys.
+ */
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (target < ent->in_use) {
+ err = -EINVAL;
+ goto err_unlock;
}
+ target = target - ent->in_use;
+ if (target < ent->limit || target > ent->limit*2) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ err = resize_available_mrs(ent, target, false);
+ if (err)
+ goto err_unlock;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
return count;
+
+err_unlock:
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ return err;
}
static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
@@ -256,19 +425,12 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
- err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
+ err = snprintf(lbuf, sizeof(lbuf), "%ld\n",
+ ent->mkeys_queue.ci + ent->in_use);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations size_fops = {
@@ -282,32 +444,23 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_cache_ent *ent = filp->private_data;
- struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
u32 var;
int err;
- int c;
-
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
- return -EFAULT;
-
- c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
- if (sscanf(lbuf, "%u", &var) != 1)
- return -EINVAL;
-
- if (var > ent->size)
- return -EINVAL;
+ err = kstrtou32_from_user(buf, count, 0, &var);
+ if (err)
+ return err;
+ /*
+ * Upon set we immediately fill the cache to high water mark implied by
+ * the limit.
+ */
+ spin_lock_irq(&ent->mkeys_queue.lock);
ent->limit = var;
-
- if (ent->cur < ent->limit) {
- err = add_keys(dev, c, 2 * ent->limit - ent->cur);
- if (err)
- return err;
- }
-
+ err = resize_available_mrs(ent, 0, true);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ if (err)
+ return err;
return count;
}
@@ -318,19 +471,11 @@ static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations limit_fops = {
@@ -340,56 +485,140 @@ static const struct file_operations limit_fops = {
.read = limit_read,
};
-static int someone_adding(struct mlx5_mr_cache *cache)
+static bool someone_adding(struct mlx5_mkey_cache *cache)
{
- int i;
+ struct mlx5_cache_ent *ent;
+ struct rb_node *node;
+ bool ret;
+
+ mutex_lock(&cache->rb_lock);
+ for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ret = ent->mkeys_queue.ci < ent->limit;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ if (ret) {
+ mutex_unlock(&cache->rb_lock);
+ return true;
+ }
+ }
+ mutex_unlock(&cache->rb_lock);
+ return false;
+}
+
+/*
+ * Check if the bucket is outside the high/low water mark and schedule an async
+ * update. The cache refill has hysteresis, once the low water mark is hit it is
+ * refilled up to the high mark.
+ */
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
+{
+ lockdep_assert_held(&ent->mkeys_queue.lock);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
- if (cache->ent[i].cur < cache->ent[i].limit)
- return 1;
+ if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp)
+ return;
+ if (ent->mkeys_queue.ci < ent->limit) {
+ ent->fill_to_high_water = true;
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
+ } else if (ent->fill_to_high_water &&
+ ent->mkeys_queue.ci + ent->pending < 2 * ent->limit) {
+ /*
+ * Once we start populating due to hitting a low water mark
+ * continue until we pass the high water mark.
+ */
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
+ } else if (ent->mkeys_queue.ci == 2 * ent->limit) {
+ ent->fill_to_high_water = false;
+ } else if (ent->mkeys_queue.ci > 2 * ent->limit) {
+ /* Queue deletion of excess entries */
+ ent->fill_to_high_water = false;
+ if (ent->pending)
+ queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ secs_to_jiffies(1));
+ else
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
+}
- return 0;
+static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
+{
+ u32 mkey;
+
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ while (ent->mkeys_queue.ci) {
+ mkey = pop_mkey_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ mlx5_core_destroy_mkey(dev->mdev, mkey);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ }
+ ent->tmp_cleanup_scheduled = false;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
}
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
- struct mlx5_mr_cache *cache = &dev->cache;
- int i = order2idx(dev, ent->order);
+ struct mlx5_mkey_cache *cache = &dev->cache;
int err;
- if (cache->stopped)
- return;
-
- ent = &dev->cache.ent[i];
- if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
- err = add_keys(dev, i, 1);
- if (ent->cur < 2 * ent->limit) {
- if (err == -EAGAIN) {
- mlx5_ib_dbg(dev, "returned eagain, order %d\n",
- i + 2);
- queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(3));
- } else if (err) {
- mlx5_ib_warn(dev, "command failed order %d, err %d\n",
- i + 2, err);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (ent->disabled)
+ goto out;
+
+ if (ent->fill_to_high_water &&
+ ent->mkeys_queue.ci + ent->pending < 2 * ent->limit &&
+ !READ_ONCE(dev->fill_delay)) {
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ err = add_keys(ent, 1);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (ent->disabled)
+ goto out;
+ if (err) {
+ /*
+ * EAGAIN only happens if there are pending MRs, so we
+ * will be rescheduled when storing them. The only
+ * failure path here is ENOMEM.
+ */
+ if (err != -EAGAIN) {
+ mlx5_ib_warn(
+ dev,
+ "add keys command failed, err %d\n",
+ err);
queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(1000));
- } else {
- queue_work(cache->wq, &ent->work);
+ secs_to_jiffies(1));
}
}
- } else if (ent->cur > 2 * ent->limit) {
- if (!someone_adding(cache) &&
- time_after(jiffies, cache->last_add + 300 * HZ)) {
- remove_keys(dev, i, 1);
- if (ent->cur > ent->limit)
- queue_work(cache->wq, &ent->work);
- } else {
+ } else if (ent->mkeys_queue.ci > 2 * ent->limit) {
+ bool need_delay;
+
+ /*
+ * The remove_cache_mr() logic is performed as garbage
+ * collection task. Such task is intended to be run when no
+ * other active processes are running.
+ *
+ * The need_resched() will return TRUE if there are user tasks
+ * to be activated in near future.
+ *
+ * In such case, we don't execute remove_cache_mr() and postpone
+ * the garbage collection work to try to run in next cycle, in
+ * order to free CPU resources to other tasks.
+ */
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ need_delay = need_resched() || someone_adding(cache) ||
+ !time_after(jiffies,
+ READ_ONCE(cache->last_add) + 300 * HZ);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (ent->disabled)
+ goto out;
+ if (need_delay) {
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
+ goto out;
}
+ remove_cache_mr_locked(ent);
+ queue_adjust_cache_locked(ent);
}
+out:
+ spin_unlock_irq(&ent->mkeys_queue.lock);
}
static void delayed_cache_work_func(struct work_struct *work)
@@ -397,262 +626,465 @@ static void delayed_cache_work_func(struct work_struct *work)
struct mlx5_cache_ent *ent;
ent = container_of(work, struct mlx5_cache_ent, dwork.work);
- __cache_work_func(ent);
+ /* temp entries are never filled, only cleaned */
+ if (ent->is_tmp)
+ clean_keys(ent->dev, ent);
+ else
+ __cache_work_func(ent);
}
-static void cache_work_func(struct work_struct *work)
+static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
+ struct mlx5r_cache_rb_key key2)
{
- struct mlx5_cache_ent *ent;
-
- ent = container_of(work, struct mlx5_cache_ent, work);
- __cache_work_func(ent);
+ int res;
+
+ res = key1.ats - key2.ats;
+ if (res)
+ return res;
+
+ res = key1.access_mode - key2.access_mode;
+ if (res)
+ return res;
+
+ res = key1.access_flags - key2.access_flags;
+ if (res)
+ return res;
+
+ res = key1.st_index - key2.st_index;
+ if (res)
+ return res;
+
+ res = key1.ph - key2.ph;
+ if (res)
+ return res;
+
+ /*
+ * keep ndescs the last in the compare table since the find function
+ * searches for an exact match on all properties and only closest
+ * match in size.
+ */
+ return key1.ndescs - key2.ndescs;
}
-static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
+static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
+ struct mlx5_cache_ent *ent)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_ib_mr *mr = NULL;
- struct mlx5_cache_ent *ent;
- int c;
- int i;
-
- c = order2idx(dev, order);
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
- return NULL;
+ struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL;
+ struct mlx5_cache_ent *cur;
+ int cmp;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ cur = rb_entry(*new, struct mlx5_cache_ent, node);
+ parent = *new;
+ cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key);
+ if (cmp > 0)
+ new = &((*new)->rb_left);
+ if (cmp < 0)
+ new = &((*new)->rb_right);
+ if (cmp == 0)
+ return -EEXIST;
}
- for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
- ent = &cache->ent[i];
+ /* Add new node and rebalance tree. */
+ rb_link_node(&ent->node, parent, new);
+ rb_insert_color(&ent->node, &cache->rb_root);
- mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
+ return 0;
+}
- spin_lock_irq(&ent->lock);
- if (!list_empty(&ent->head)) {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
- list);
- list_del(&mr->list);
- ent->cur--;
- spin_unlock_irq(&ent->lock);
- if (ent->cur < ent->limit)
- queue_work(cache->wq, &ent->work);
- break;
+static struct mlx5_cache_ent *
+mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
+ struct mlx5r_cache_rb_key rb_key)
+{
+ struct rb_node *node = dev->cache.rb_root.rb_node;
+ struct mlx5_cache_ent *cur, *smallest = NULL;
+ u64 ndescs_limit;
+ int cmp;
+
+ /*
+ * Find the smallest ent with order >= requested_order.
+ */
+ while (node) {
+ cur = rb_entry(node, struct mlx5_cache_ent, node);
+ cmp = cache_ent_key_cmp(cur->rb_key, rb_key);
+ if (cmp > 0) {
+ smallest = cur;
+ node = node->rb_left;
}
- spin_unlock_irq(&ent->lock);
-
- queue_work(cache->wq, &ent->work);
+ if (cmp < 0)
+ node = node->rb_right;
+ if (cmp == 0)
+ return cur;
}
+ /*
+ * Limit the usage of mkeys larger than twice the required size while
+ * also allowing the usage of smallest cache entry for small MRs.
+ */
+ ndescs_limit = max_t(u64, rb_key.ndescs * 2,
+ MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS);
+
+ return (smallest &&
+ smallest->rb_key.access_mode == rb_key.access_mode &&
+ smallest->rb_key.access_flags == rb_key.access_flags &&
+ smallest->rb_key.ats == rb_key.ats &&
+ smallest->rb_key.st_index == rb_key.st_index &&
+ smallest->rb_key.ph == rb_key.ph &&
+ smallest->rb_key.ndescs <= ndescs_limit) ?
+ smallest :
+ NULL;
+}
+
+static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ struct mlx5_cache_ent *ent)
+{
+ struct mlx5_ib_mr *mr;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
- cache->ent[c].miss++;
+ return ERR_PTR(-ENOMEM);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->in_use++;
+
+ if (!ent->mkeys_queue.ci) {
+ queue_adjust_cache_locked(ent);
+ ent->miss++;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ err = create_cache_mkey(ent, &mr->mmkey.key);
+ if (err) {
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->in_use--;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ kfree(mr);
+ return ERR_PTR(err);
+ }
+ } else {
+ mr->mmkey.key = pop_mkey_locked(ent);
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ }
+ mr->mmkey.cache_ent = ent;
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->mmkey.rb_key = ent->rb_key;
+ mr->mmkey.cacheable = true;
+ init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
-static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
+ int access_flags)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
- int shrink = 0;
- int c;
+ int ret = 0;
- c = order2idx(dev, mr->order);
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
- return;
- }
- ent = &cache->ent[c];
- spin_lock_irq(&ent->lock);
- list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- if (ent->cur > 2 * ent->limit)
- shrink = 1;
- spin_unlock_irq(&ent->lock);
+ if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ MLX5_CAP_GEN(dev->mdev, atomic) &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ ret |= IB_ACCESS_REMOTE_ATOMIC;
+
+ if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ ret |= IB_ACCESS_RELAXED_ORDERING;
- if (shrink)
- queue_work(cache->wq, &ent->work);
+ if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+ (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ ret |= IB_ACCESS_RELAXED_ORDERING;
+
+ return ret;
}
-static void clean_keys(struct mlx5_ib_dev *dev, int c)
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ int access_flags, int access_mode,
+ int ndescs)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_ib_mr *mr;
- int err;
+ struct mlx5r_cache_rb_key rb_key = {
+ .ndescs = ndescs,
+ .access_mode = access_mode,
+ .access_flags = get_unchangeable_access_flags(dev, access_flags),
+ .ph = MLX5_IB_NO_PH,
+ };
+ struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key);
+
+ if (!ent)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return _mlx5_mr_cache_alloc(dev, ent);
+}
- cancel_delayed_work(&ent->dwork);
- while (1) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
- return;
- }
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->cur--;
- ent->size--;
- spin_unlock_irq(&ent->lock);
- err = destroy_mkey(dev, mr);
- if (err)
- mlx5_ib_warn(dev, "failed destroy mkey\n");
- else
- kfree(mr);
- }
+static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_debugfs_root || dev->is_rep)
+ return;
+
+ debugfs_remove_recursive(dev->cache.fs_root);
+ dev->cache.fs_root = NULL;
}
-static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev,
+ struct mlx5_cache_ent *ent)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
- int i;
+ int order = order_base_2(ent->rb_key.ndescs);
+ struct dentry *dir;
- if (!mlx5_debugfs_root)
- return 0;
+ if (!mlx5_debugfs_root || dev->is_rep)
+ return;
- cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
- if (!cache->root)
- return -ENOMEM;
+ if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+ order = MLX5_IMR_KSM_CACHE_ENTRY + 2;
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
- ent = &cache->ent[i];
- sprintf(ent->name, "%d", ent->order);
- ent->dir = debugfs_create_dir(ent->name, cache->root);
- if (!ent->dir)
- return -ENOMEM;
+ sprintf(ent->name, "%d", order);
+ dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
+ debugfs_create_file("size", 0600, dir, ent, &size_fops);
+ debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
+ debugfs_create_ulong("cur", 0400, dir, &ent->mkeys_queue.ci);
+ debugfs_create_u32("miss", 0600, dir, &ent->miss);
+}
- ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
- &size_fops);
- if (!ent->fsize)
- return -ENOMEM;
+static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev);
+ struct mlx5_mkey_cache *cache = &dev->cache;
- ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
- &limit_fops);
- if (!ent->flimit)
- return -ENOMEM;
+ if (!mlx5_debugfs_root || dev->is_rep)
+ return;
- ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
- &ent->cur);
- if (!ent->fcur)
- return -ENOMEM;
+ cache->fs_root = debugfs_create_dir("mr_cache", dbg_root);
+}
- ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
- &ent->miss);
- if (!ent->fmiss)
- return -ENOMEM;
- }
+static void delay_time_func(struct timer_list *t)
+{
+ struct mlx5_ib_dev *dev = timer_container_of(dev, t, delay_timer);
+
+ WRITE_ONCE(dev->fill_delay, 0);
+}
+static int mlx5r_mkeys_init(struct mlx5_cache_ent *ent)
+{
+ struct mlx5_mkeys_page *page;
+
+ page = kzalloc(sizeof(*page), GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&ent->mkeys_queue.pages_list);
+ spin_lock_init(&ent->mkeys_queue.lock);
+ list_add_tail(&page->list, &ent->mkeys_queue.pages_list);
+ ent->mkeys_queue.num_pages++;
return 0;
}
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5r_mkeys_uninit(struct mlx5_cache_ent *ent)
{
- if (!mlx5_debugfs_root)
- return;
+ struct mlx5_mkeys_page *page;
- debugfs_remove_recursive(dev->cache.root);
+ WARN_ON(ent->mkeys_queue.ci || ent->mkeys_queue.num_pages > 1);
+ page = list_last_entry(&ent->mkeys_queue.pages_list,
+ struct mlx5_mkeys_page, list);
+ list_del(&page->list);
+ kfree(page);
}
-static void delay_time_func(unsigned long ctx)
+struct mlx5_cache_ent *
+mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
+ struct mlx5r_cache_rb_key rb_key,
+ bool persistent_entry)
{
- struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+ struct mlx5_cache_ent *ent;
+ int order;
+ int ret;
- dev->fill_delay = 0;
+ ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ return ERR_PTR(-ENOMEM);
+
+ ret = mlx5r_mkeys_init(ent);
+ if (ret)
+ goto mkeys_err;
+ ent->rb_key = rb_key;
+ ent->dev = dev;
+ ent->is_tmp = !persistent_entry;
+
+ INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
+
+ ret = mlx5_cache_ent_insert(&dev->cache, ent);
+ if (ret)
+ goto ent_insert_err;
+
+ if (persistent_entry) {
+ if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+ order = MLX5_IMR_KSM_CACHE_ENTRY;
+ else
+ order = order_base_2(rb_key.ndescs) - 2;
+
+ if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
+ !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
+ mlx5r_umr_can_load_pas(dev, 0))
+ ent->limit = dev->mdev->profile.mr_cache[order].limit;
+ else
+ ent->limit = 0;
+
+ mlx5_mkey_cache_debugfs_add_ent(dev, ent);
+ }
+
+ return ent;
+ent_insert_err:
+ mlx5r_mkeys_uninit(ent);
+mkeys_err:
+ kfree(ent);
+ return ERR_PTR(ret);
}
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+static void mlx5r_destroy_cache_entries(struct mlx5_ib_dev *dev)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct rb_root *root = &dev->cache.rb_root;
struct mlx5_cache_ent *ent;
- int limit;
- int err;
+ struct rb_node *node;
+
+ mutex_lock(&dev->cache.rb_lock);
+ node = rb_first(root);
+ while (node) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ node = rb_next(node);
+ clean_keys(dev, ent);
+ rb_erase(&ent->node, root);
+ mlx5r_mkeys_uninit(ent);
+ kfree(ent);
+ }
+ mutex_unlock(&dev->cache.rb_lock);
+}
+
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mkey_cache *cache = &dev->cache;
+ struct rb_root *root = &dev->cache.rb_root;
+ struct mlx5r_cache_rb_key rb_key = {
+ .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
+ .ph = MLX5_IB_NO_PH,
+ };
+ struct mlx5_cache_ent *ent;
+ struct rb_node *node;
+ int ret;
int i;
- cache->wq = create_singlethread_workqueue("mkey_cache");
+ mutex_init(&dev->slow_path_mutex);
+ mutex_init(&dev->cache.rb_lock);
+ dev->cache.rb_root = RB_ROOT;
+ cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
return -ENOMEM;
}
- setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
- INIT_LIST_HEAD(&cache->ent[i].head);
- spin_lock_init(&cache->ent[i].lock);
-
- ent = &cache->ent[i];
- INIT_LIST_HEAD(&ent->head);
- spin_lock_init(&ent->lock);
- ent->order = i + 2;
- ent->dev = dev;
+ mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
+ timer_setup(&dev->delay_timer, delay_time_func, 0);
+ mlx5_mkey_cache_debugfs_init(dev);
+ mutex_lock(&cache->rb_lock);
+ for (i = 0; i <= mkey_cache_max_order(dev); i++) {
+ rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i;
+ ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
+ if (IS_ERR(ent)) {
+ ret = PTR_ERR(ent);
+ goto err;
+ }
+ }
- if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
- limit = dev->mdev->profile->mr_cache[i].limit;
- else
- limit = 0;
+ ret = mlx5_odp_init_mkey_cache(dev);
+ if (ret)
+ goto err;
- INIT_WORK(&ent->work, cache_work_func);
- INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
- ent->limit = limit;
- queue_work(cache->wq, &ent->work);
+ mutex_unlock(&cache->rb_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
}
- err = mlx5_mr_cache_debugfs_init(dev);
- if (err)
- mlx5_ib_warn(dev, "cache debugfs failure\n");
-
return 0;
+
+err:
+ mutex_unlock(&cache->rb_lock);
+ mlx5_mkey_cache_debugfs_cleanup(dev);
+ mlx5r_destroy_cache_entries(dev);
+ destroy_workqueue(cache->wq);
+ mlx5_ib_warn(dev, "failed to create mkey cache entry\n");
+ return ret;
}
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
{
- int i;
+ struct rb_root *root = &dev->cache.rb_root;
+ struct mlx5_cache_ent *ent;
+ struct rb_node *node;
- dev->cache.stopped = 1;
+ if (!dev->cache.wq)
+ return;
+
+ mutex_lock(&dev->cache.rb_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->disabled = true;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ cancel_delayed_work(&ent->dwork);
+ }
+ mutex_unlock(&dev->cache.rb_lock);
+
+ /*
+ * After all entries are disabled and will not reschedule on WQ,
+ * flush it and all async commands.
+ */
flush_workqueue(dev->cache.wq);
- mlx5_mr_cache_debugfs_cleanup(dev);
+ mlx5_mkey_cache_debugfs_cleanup(dev);
+ mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
- clean_keys(dev, i);
+ /* At this point all entries are disabled and have no concurrent work. */
+ mlx5r_destroy_cache_entries(dev);
destroy_workqueue(dev->cache.wq);
- del_timer_sync(&dev->delay_timer);
-
- return 0;
+ timer_delete_sync(&dev->delay_timer);
}
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *seg;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
- seg = &in->seg;
- seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
- seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- seg->start_addr = 0;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, length64, 1);
+ set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
+ pd);
+ MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
- err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
- NULL);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
kfree(in);
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
mr->umem = NULL;
return &mr->ibmr;
@@ -666,657 +1098,1423 @@ err_free:
return ERR_PTR(err);
}
-static int get_octo_len(u64 addr, u64 len, int page_size)
+static int get_octo_len(u64 addr, u64 len, int page_shift)
{
+ u64 page_size = 1ULL << page_shift;
u64 offset;
int npages;
offset = addr & (page_size - 1);
- npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
+ npages = ALIGN(len + offset, page_size) >> page_shift;
return (npages + 1) / 2;
}
-static int use_umr(int order)
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
{
- return order <= MLX5_MAX_UMR_SHIFT;
+ if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ return MKEY_CACHE_LAST_STD_ENTRY;
+ return MLX5_MAX_UMR_SHIFT;
}
-static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
- struct ib_sge *sg, u64 dma, int n, u32 key,
- int page_shift, u64 virt_addr, u64 len,
- int access_flags)
+static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+ u64 length, int access_flags, u64 iova)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->ibmr.length = length;
+ mr->ibmr.device = &dev->ib_dev;
+ mr->ibmr.iova = iova;
+ mr->access_flags = access_flags;
+}
- sg->addr = dma;
- sg->length = ALIGN(sizeof(u64) * n, 64);
- sg->lkey = dev->umrc.pd->local_dma_lkey;
+static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
+ u64 iova)
+{
+ /*
+ * The alignment of iova has already been checked upon entering
+ * UVERBS_METHOD_REG_DMABUF_MR
+ */
+ umem->iova = iova;
+ return PAGE_SIZE;
+}
- wr->next = NULL;
- wr->send_flags = 0;
- wr->sg_list = sg;
- if (n)
- wr->num_sge = 1;
- else
- wr->num_sge = 0;
+static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ struct ib_umem *umem, u64 iova,
+ int access_flags, int access_mode,
+ u16 st_index, u8 ph)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5r_cache_rb_key rb_key = {};
+ struct mlx5_cache_ent *ent;
+ struct mlx5_ib_mr *mr;
+ unsigned long page_size;
- wr->opcode = MLX5_IB_WR_UMR;
+ if (umem->is_dmabuf)
+ page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
+ else
+ page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova,
+ access_mode);
+ if (WARN_ON(!page_size))
+ return ERR_PTR(-EINVAL);
+
+ rb_key.access_mode = access_mode;
+ rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
+ rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
+ rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
+ rb_key.st_index = st_index;
+ rb_key.ph = ph;
+ ent = mkey_cache_ent_from_rb_key(dev, rb_key);
+ /*
+ * If the MR can't come from the cache then synchronously create an uncached
+ * one.
+ */
+ if (!ent) {
+ mutex_lock(&dev->slow_path_mutex);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode,
+ st_index, ph);
+ mutex_unlock(&dev->slow_path_mutex);
+ if (IS_ERR(mr))
+ return mr;
+ mr->mmkey.rb_key = rb_key;
+ mr->mmkey.cacheable = true;
+ return mr;
+ }
- umrwr->npages = n;
- umrwr->page_shift = page_shift;
- umrwr->mkey = key;
- umrwr->target.virt_addr = virt_addr;
- umrwr->length = len;
- umrwr->access_flags = access_flags;
- umrwr->pd = pd;
-}
+ mr = _mlx5_mr_cache_alloc(dev, ent);
+ if (IS_ERR(mr))
+ return mr;
-static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
- struct ib_send_wr *wr, u32 key)
-{
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+ mr->ibmr.pd = pd;
+ mr->umem = umem;
+ mr->page_shift = order_base_2(page_size);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
- wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
- wr->opcode = MLX5_IB_WR_UMR;
- umrwr->mkey = key;
+ return mr;
}
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
+static struct ib_mr *
+reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_flags,
+ u32 crossed_lkey)
{
- struct mlx5_ib_umr_context *context;
- struct ib_wc wc;
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int access_mode = MLX5_MKC_ACCESS_MODE_CROSSING;
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ int inlen;
+ u32 *in;
int err;
- while (1) {
- err = ib_poll_cq(cq, 1, &wc);
- if (err < 0) {
- pr_warn("poll cq error %d\n", err);
- return;
- }
- if (err == 0)
- break;
+ if (!MLX5_CAP_GEN(dev->mdev, crossing_vhca_mkey))
+ return ERR_PTR(-EOPNOTSUPP);
- context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
- context->status = wc.status;
- complete(&context->done);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_1;
}
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, crossing_target_vhca_id,
+ MLX5_CAP_GEN(dev->mdev, vhca_id));
+ MLX5_SET(mkc, mkc, translations_octword_size, crossed_lkey);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
+
+ /* for this crossing mkey IOVA should be 0 and len should be IOVA + len */
+ set_mkc_access_pd_addr_fields(mkc, access_flags, 0, pd);
+ MLX5_SET64(mkc, mkc, len, iova + length);
+
+ MLX5_SET(mkc, mkc, free, 0);
+ MLX5_SET(mkc, mkc, umr_en, 0);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_2;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ set_mr_fields(dev, mr, length, access_flags, iova);
+ mr->ibmr.pd = pd;
+ kvfree(in);
+ mlx5_ib_dbg(dev, "crossing mkey = 0x%x\n", mr->mmkey.key);
+
+ return &mr->ibmr;
+err_2:
+ kvfree(in);
+err_1:
+ kfree(mr);
+ return ERR_PTR(err);
}
-static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
- u64 virt_addr, u64 len, int npages,
- int page_shift, int order, int access_flags)
+/*
+ * If ibmr is NULL it will be allocated by reg_create.
+ * Else, the given ibmr will be used.
+ */
+static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags,
+ unsigned long page_size, bool populate,
+ int access_mode, u16 st_index, u8 ph)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct device *ddev = dev->ib_dev.dma_device;
- struct umr_common *umrc = &dev->umrc;
- struct mlx5_ib_umr_context umr_context;
- struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr;
- struct ib_sge sg;
- int size;
- __be64 *mr_pas;
__be64 *pas;
- dma_addr_t dma;
- int err = 0;
- int i;
+ void *mkc;
+ int inlen;
+ u32 *in;
+ int err;
+ bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)) &&
+ (access_mode == MLX5_MKC_ACCESS_MODE_MTT) &&
+ (ph == MLX5_IB_NO_PH);
+ bool ksm_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
- for (i = 0; i < 1; i++) {
- mr = alloc_cached_mr(dev, order);
- if (mr)
- break;
+ if (!page_size)
+ return ERR_PTR(-EINVAL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
- err = add_keys(dev, order2idx(dev, order), 1);
- if (err && err != -EAGAIN) {
- mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
- break;
+ mr->ibmr.pd = pd;
+ mr->access_flags = access_flags;
+ mr->page_shift = order_base_2(page_size);
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ if (populate)
+ inlen += sizeof(*pas) *
+ roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_1;
+ }
+ pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ if (populate) {
+ if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND || ksm_mode)) {
+ err = -EINVAL;
+ goto err_2;
}
+ mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
+ pg_cap ? MLX5_IB_MTT_PRESENT : 0);
}
- if (!mr)
- return ERR_PTR(-EAGAIN);
-
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the pas array, we allocate
- * a little more. */
- size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
- mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
- if (!mr_pas) {
- err = -ENOMEM;
- goto free_mr;
+ /* The pg_access bit allows setting the access flags
+ * in the page list submitted with the command.
+ */
+ MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
+ populate ? pd : dev->umrc.pd);
+ /* In case a data direct flow, overwrite the pdn field by its internal kernel PD */
+ if (umem->is_dmabuf && ksm_mode)
+ MLX5_SET(mkc, mkc, pd, dev->ddr.pdn);
+
+ MLX5_SET(mkc, mkc, free, !populate);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+
+ MLX5_SET64(mkc, mkc, len, umem->length);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ if (ksm_mode)
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(iova, umem->length, mr->page_shift) * 2);
+ else
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(iova, umem->length, mr->page_shift));
+ MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
+ if (mlx5_umem_needs_ats(dev, umem, access_flags))
+ MLX5_SET(mkc, mkc, ma_translation_mode, 1);
+ if (populate) {
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(iova, umem->length, mr->page_shift));
}
- pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
- mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
- /* Clear padding after the actual pages. */
- memset(pas + npages, 0, size - npages * sizeof(u64));
+ if (ph != MLX5_IB_NO_PH) {
+ MLX5_SET(mkc, mkc, pcie_tph_en, 1);
+ MLX5_SET(mkc, mkc, pcie_tph_ph, ph);
+ if (st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index, st_index);
+ }
- dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, dma)) {
- err = -ENOMEM;
- goto free_pas;
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err) {
+ mlx5_ib_warn(dev, "create mkey failed\n");
+ goto err_2;
}
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift);
+ mr->umem = umem;
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
+ kvfree(in);
- memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
- virt_addr, len, access_flags);
+ mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
- mlx5_ib_init_umr_context(&umr_context);
- down(&umrc->sem);
- err = ib_post_send(umrc->qp, &wr, &bad);
- if (err) {
- mlx5_ib_warn(dev, "post send failed, err %d\n", err);
- goto unmap_dma;
- } else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "reg umr failed\n");
- err = -EFAULT;
- }
+ return mr;
+
+err_2:
+ kvfree(in);
+err_1:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
+ u64 length, int acc, int mode)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
}
- mr->mmr.iova = virt_addr;
- mr->mmr.size = len;
- mr->mmr.pd = to_mpd(pd)->pdn;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- mr->live = 1;
+ MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
+ MLX5_SET64(mkc, mkc, len, length);
+ set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
-unmap_dma:
- up(&umrc->sem);
- dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_in;
-free_pas:
- kfree(mr_pas);
+ kfree(in);
-free_mr:
- if (err) {
- free_cached_mr(dev, mr);
- return ERR_PTR(err);
+ set_mr_fields(dev, mr, length, acc, start_addr);
+
+ return &mr->ibmr;
+
+err_in:
+ kfree(in);
+
+err_free:
+ kfree(mr);
+
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_advise_mr(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags,
+ struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs)
+{
+ if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
+ sg_list, num_sge);
+}
+
+struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm *mdm = to_mdm(dm);
+ struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
+ u64 start_addr = mdm->dev_addr + attr->offset;
+ int mode;
+
+ switch (mdm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
+ return ERR_PTR(-EINVAL);
+
+ mode = MLX5_MKC_ACCESS_MODE_MEMIC;
+ start_addr -= pci_resource_start(dev->pdev, 0);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
+ if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
+ return ERR_PTR(-EINVAL);
+
+ mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
}
- return mr;
+ return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
+ attr->access_flags, mode);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
- int zap)
+static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags,
+ struct ib_dmah *dmah)
{
- struct mlx5_ib_dev *dev = mr->dev;
- struct device *ddev = dev->ib_dev.dma_device;
- struct umr_common *umrc = &dev->umrc;
- struct mlx5_ib_umr_context umr_context;
- struct ib_umem *umem = mr->umem;
- int size;
- __be64 *pas;
- dma_addr_t dma;
- struct ib_send_wr wr, *bad;
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
- struct ib_sge sg;
- int err = 0;
- const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
- const int page_index_mask = page_index_alignment - 1;
- size_t pages_mapped = 0;
- size_t pages_to_map = 0;
- size_t pages_iter = 0;
- int use_emergency_buf = 0;
-
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
- * so we need to align the offset and length accordingly */
- if (start_page_index & page_index_mask) {
- npages += start_page_index & page_index_mask;
- start_page_index &= ~page_index_mask;
- }
-
- pages_to_map = ALIGN(npages, page_index_alignment);
-
- if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
- return -EINVAL;
-
- size = sizeof(u64) * pages_to_map;
- size = min_t(int, PAGE_SIZE, size);
- /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
- * code, when we are called from an invalidation. The pas buffer must
- * be 2k-aligned for Connect-IB. */
- pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
- if (!pas) {
- mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
- pas = mlx5_ib_update_mtt_emergency_buffer;
- size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
- use_emergency_buf = 1;
- mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
- memset(pas, 0, size);
- }
- pages_iter = size / sizeof(u64);
- dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, dma)) {
- mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
- err = -ENOMEM;
- goto free_pas;
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ bool xlt_with_umr;
+ u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
+ u8 ph = MLX5_IB_NO_PH;
+ int err;
+
+ if (dmah) {
+ struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
+
+ ph = dmah->ph;
+ if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ st_index = mdmah->st_index;
}
- for (pages_mapped = 0;
- pages_mapped < pages_to_map && !err;
- pages_mapped += pages_iter, start_page_index += pages_iter) {
- dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
+ xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
+ if (xlt_with_umr) {
+ mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT,
+ st_index, ph);
+ } else {
+ unsigned long page_size = mlx5_umem_mkc_find_best_pgsz(
+ dev, umem, iova, MLX5_MKC_ACCESS_MODE_MTT);
+
+ mutex_lock(&dev->slow_path_mutex);
+ mr = reg_create(pd, umem, iova, access_flags, page_size,
+ true, MLX5_MKC_ACCESS_MODE_MTT,
+ st_index, ph);
+ mutex_unlock(&dev->slow_path_mutex);
+ }
+ if (IS_ERR(mr)) {
+ ib_umem_release(umem);
+ return ERR_CAST(mr);
+ }
- npages = min_t(size_t,
- pages_iter,
- ib_umem_num_pages(umem) - start_page_index);
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
- if (!zap) {
- __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
- start_page_index, npages, pas,
- MLX5_IB_MTT_PRESENT);
- /* Clear padding after the pages brought from the
- * umem. */
- memset(pas + npages, 0, size - npages * sizeof(u64));
- }
+ atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
-
- memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)&umr_context;
-
- sg.addr = dma;
- sg.length = ALIGN(npages * sizeof(u64),
- MLX5_UMR_MTT_ALIGNMENT);
- sg.lkey = dev->umrc.pd->local_dma_lkey;
-
- wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
- MLX5_IB_SEND_UMR_UPDATE_MTT;
- wr.sg_list = &sg;
- wr.num_sge = 1;
- wr.opcode = MLX5_IB_WR_UMR;
- umrwr->npages = sg.length / sizeof(u64);
- umrwr->page_shift = PAGE_SHIFT;
- umrwr->mkey = mr->mmr.key;
- umrwr->target.offset = start_page_index;
-
- mlx5_ib_init_umr_context(&umr_context);
- down(&umrc->sem);
- err = ib_post_send(umrc->qp, &wr, &bad);
+ if (xlt_with_umr) {
+ /*
+ * If the MR was created with reg_create then it will be
+ * configured properly but left disabled. It is safe to go ahead
+ * and configure it again via UMR while enabling it.
+ */
+ err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
if (err) {
- mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
- } else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_err(dev, "UMR completion failed, code %d\n",
- umr_context.status);
- err = -EFAULT;
- }
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ return ERR_PTR(err);
}
- up(&umrc->sem);
}
- dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
-
-free_pas:
- if (!use_emergency_buf)
- free_page((unsigned long)pas);
- else
- mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
-
- return err;
+ return &mr->ibmr;
}
-#endif
-static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
- u64 length, struct ib_umem *umem,
- int npages, int page_shift,
- int access_flags)
+static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 iova, int access_flags,
+ struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in;
+ struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
- int inlen;
int err;
- bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
- inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- err = -ENOMEM;
- goto err_1;
- }
- mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
- pg_cap ? MLX5_IB_MTT_PRESENT : 0);
-
- /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
- * in the page list submitted with the command. */
- in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
- in->seg.flags = convert_access(access_flags) |
- MLX5_ACCESS_MODE_MTT;
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->seg.start_addr = cpu_to_be64(virt_addr);
- in->seg.len = cpu_to_be64(length);
- in->seg.bsfs_octo_size = 0;
- in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
- in->seg.log2_page_size = page_shift;
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
- 1 << page_shift));
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
- NULL, NULL);
- if (err) {
- mlx5_ib_warn(dev, "create mkey failed\n");
- goto err_2;
+ err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
+ if (err)
+ return ERR_PTR(err);
+ if (!start && length == U64_MAX) {
+ if (iova != 0)
+ return ERR_PTR(-EINVAL);
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return ERR_PTR(-EINVAL);
+
+ mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
+ if (IS_ERR(mr))
+ return ERR_CAST(mr);
+ return &mr->ibmr;
}
- mr->umem = umem;
- mr->dev = dev;
- mr->live = 1;
- kvfree(in);
- mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
+ /* ODP requires xlt update via umr to work. */
+ if (!mlx5r_umr_can_load_pas(dev, length))
+ return ERR_PTR(-EINVAL);
- return mr;
+ odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
+ &mlx5_mn_ops);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
-err_2:
- kvfree(in);
+ mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT,
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX,
+ MLX5_IB_NO_PH);
+ if (IS_ERR(mr)) {
+ ib_umem_release(&odp->umem);
+ return ERR_CAST(mr);
+ }
+ xa_init(&mr->implicit_children);
-err_1:
- kfree(mr);
+ odp->private = mr;
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+
+ err = mlx5_ib_init_odp_mr(mr);
+ if (err)
+ goto err_dereg_mr;
+ return &mr->ibmr;
+err_dereg_mr:
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ERR_PTR(err);
}
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int access_flags,
+ u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_ib_mr *mr = NULL;
struct ib_umem *umem;
- int page_shift;
- int npages;
- int ncont;
- int order;
int err;
- mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
- start, virt_addr, length, access_flags);
- umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
- 0);
- if (IS_ERR(umem)) {
- mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
- return (void *)umem;
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ ((access_flags & IB_ACCESS_ON_DEMAND) && dmah))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, iova, length, access_flags);
+
+ err = mlx5r_umr_resource_init(dev);
+ if (err)
+ return ERR_PTR(err);
+
+ if (access_flags & IB_ACCESS_ON_DEMAND)
+ return create_user_odp_mr(pd, start, length, iova, access_flags,
+ udata);
+ umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+ return create_real_mr(pd, umem, iova, access_flags, dmah);
+}
+
+static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+ struct mlx5_ib_mr *mr = umem_dmabuf->private;
+
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!umem_dmabuf->sgt || !mr)
+ return;
+
+ mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+}
+
+static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
+ .allow_peer2peer = 1,
+ .move_notify = mlx5_ib_dmabuf_invalidate_cb,
+};
+
+static struct ib_mr *
+reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
+ u64 offset, u64 length, u64 virt_addr,
+ int fd, int access_flags, int access_mode,
+ struct ib_dmah *dmah)
+{
+ bool pinned_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
+ u8 ph = MLX5_IB_NO_PH;
+ int err;
+
+ err = mlx5r_umr_resource_init(dev);
+ if (err)
+ return ERR_PTR(err);
+
+ if (!pinned_mode)
+ umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev,
+ offset, length, fd,
+ access_flags,
+ &mlx5_ib_dmabuf_attach_ops);
+ else
+ umem_dmabuf = ib_umem_dmabuf_get_pinned_with_dma_device(&dev->ib_dev,
+ dma_device, offset, length,
+ fd, access_flags);
+
+ if (IS_ERR(umem_dmabuf)) {
+ mlx5_ib_dbg(dev, "umem_dmabuf get failed (%pe)\n", umem_dmabuf);
+ return ERR_CAST(umem_dmabuf);
}
- mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
- if (!npages) {
- mlx5_ib_warn(dev, "avoid zero region\n");
- err = -EINVAL;
- goto error;
+ if (dmah) {
+ struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
+
+ ph = dmah->ph;
+ if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ st_index = mdmah->st_index;
}
- mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
- npages, ncont, order, page_shift);
+ mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
+ access_flags, access_mode,
+ st_index, ph);
+ if (IS_ERR(mr)) {
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_CAST(mr);
+ }
- if (use_umr(order)) {
- mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
- order, access_flags);
- if (PTR_ERR(mr) == -EAGAIN) {
- mlx5_ib_dbg(dev, "cache empty for order %d", order);
- mr = NULL;
- }
- } else if (access_flags & IB_ACCESS_ON_DEMAND) {
- err = -EINVAL;
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
- goto error;
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
+
+ atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
+ umem_dmabuf->private = mr;
+ if (!pinned_mode) {
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+ } else {
+ mr->data_direct = true;
}
- if (!mr)
- mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
- access_flags);
+ err = mlx5_ib_init_dmabuf_mr(mr);
+ if (err)
+ goto err_dereg_mr;
+ return &mr->ibmr;
- if (IS_ERR(mr)) {
- err = PTR_ERR(mr);
- goto error;
+err_dereg_mr:
+ __mlx5_ib_dereg_mr(&mr->ibmr);
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *
+reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_data_direct_dev *data_direct_dev;
+ struct ib_mr *crossing_mr;
+ struct ib_mr *crossed_mr;
+ int ret = 0;
+
+ /* As of HW behaviour the IOVA must be page aligned in KSM mode */
+ if (!PAGE_ALIGNED(virt_addr) || (access_flags & IB_ACCESS_ON_DEMAND))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&dev->data_direct_lock);
+ data_direct_dev = dev->data_direct_dev;
+ if (!data_direct_dev) {
+ ret = -EINVAL;
+ goto end;
}
- mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
+ /* If no device's 'data direct mkey' with RO flags exists
+ * mask it out accordingly.
+ */
+ if (!dev->ddr.mkey_ro_valid)
+ access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
+ crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
+ offset, length, virt_addr, fd,
+ access_flags, MLX5_MKC_ACCESS_MODE_KSM,
+ NULL);
+ if (IS_ERR(crossed_mr)) {
+ ret = PTR_ERR(crossed_mr);
+ goto end;
+ }
- mr->umem = umem;
- mr->npages = npages;
- atomic_add(npages, &dev->mdev->priv.reg_pages);
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ mutex_lock(&dev->slow_path_mutex);
+ crossing_mr = reg_create_crossing_vhca_mr(pd, virt_addr, length, access_flags,
+ crossed_mr->lkey);
+ mutex_unlock(&dev->slow_path_mutex);
+ if (IS_ERR(crossing_mr)) {
+ __mlx5_ib_dereg_mr(crossed_mr);
+ ret = PTR_ERR(crossing_mr);
+ goto end;
+ }
+
+ list_add_tail(&to_mmr(crossed_mr)->dd_node, &dev->data_direct_mr_list);
+ to_mmr(crossing_mr)->dd_crossed_mr = to_mmr(crossed_mr);
+ to_mmr(crossing_mr)->data_direct = true;
+end:
+ mutex_unlock(&dev->data_direct_lock);
+ return ret ? ERR_PTR(ret) : crossing_mr;
+}
+
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int mlx5_access_flags = 0;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS)) {
+ err = uverbs_get_flags32(&mlx5_access_flags, attrs,
+ MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ mlx5_ib_dbg(dev,
+ "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x, mlx5_access_flags 0x%x\n",
+ offset, virt_addr, length, fd, access_flags, mlx5_access_flags);
+
+ /* dmabuf requires xlt update via umr to work. */
+ if (!mlx5r_umr_can_load_pas(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ if (mlx5_access_flags & MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT)
+ return reg_user_mr_dmabuf_by_data_direct(pd, offset, length, virt_addr,
+ fd, access_flags);
+
+ return reg_user_mr_dmabuf(pd, pd->device->dma_device,
+ offset, length, virt_addr,
+ fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT,
+ dmah);
+}
+
+/*
+ * True if the change in access flags can be done via UMR, only some access
+ * flags can be updated.
+ */
+static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
+ unsigned int current_access_flags,
+ unsigned int target_access_flags)
+{
+ unsigned int diffs = current_access_flags ^ target_access_flags;
+
+ if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
+ IB_ACCESS_REMOTE_ATOMIC))
+ return false;
+ return mlx5r_umr_can_reconfig(dev, current_access_flags,
+ target_access_flags);
+}
+
+static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
+ struct ib_umem *new_umem,
+ int new_access_flags, u64 iova,
+ unsigned long *page_size)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+
+ /* We only track the allocated sizes of MRs from the cache */
+ if (!mr->mmkey.cache_ent)
+ return false;
+ if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
+ return false;
+
+ *page_size = mlx5_umem_mkc_find_best_pgsz(
+ dev, new_umem, iova, mr->mmkey.cache_ent->rb_key.access_mode);
+ if (WARN_ON(!*page_size))
+ return false;
+ return (mr->mmkey.cache_ent->rb_key.ndescs) >=
+ ib_umem_num_dma_blocks(new_umem, *page_size);
+}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem->odp_data) {
+static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags, int flags, struct ib_umem *new_umem,
+ u64 iova, unsigned long page_size)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
+ struct ib_umem *old_umem = mr->umem;
+ int err;
+
+ /*
+ * To keep everything simple the MR is revoked before we start to mess
+ * with it. This ensure the change is atomic relative to any use of the
+ * MR.
+ */
+ err = mlx5r_umr_revoke_mr(mr);
+ if (err)
+ return err;
+
+ if (flags & IB_MR_REREG_PD) {
+ mr->ibmr.pd = pd;
+ upd_flags |= MLX5_IB_UPD_XLT_PD;
+ }
+ if (flags & IB_MR_REREG_ACCESS) {
+ mr->access_flags = access_flags;
+ upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
+ }
+
+ mr->ibmr.iova = iova;
+ mr->ibmr.length = new_umem->length;
+ mr->page_shift = order_base_2(page_size);
+ mr->umem = new_umem;
+ err = mlx5r_umr_update_mr_pas(mr, upd_flags);
+ if (err) {
/*
- * This barrier prevents the compiler from moving the
- * setting of umem->odp_data->private to point to our
- * MR, before reg_umr finished, to ensure that the MR
- * initialization have finished before starting to
- * handle invalidations.
+ * The MR is revoked at this point so there is no issue to free
+ * new_umem.
*/
- smp_wmb();
- mr->umem->odp_data->private = mr;
+ mr->umem = old_umem;
+ return err;
+ }
+
+ atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
+ ib_umem_release(old_umem);
+ atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
+ return 0;
+}
+
+struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 iova, int new_access_flags,
+ struct ib_pd *new_pd,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ib_mr);
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct ||
+ mr->mmkey.rb_key.ph != MLX5_IB_NO_PH)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(
+ dev,
+ "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, iova, length, new_access_flags);
+
+ if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!(flags & IB_MR_REREG_ACCESS))
+ new_access_flags = mr->access_flags;
+ if (!(flags & IB_MR_REREG_PD))
+ new_pd = ib_mr->pd;
+
+ if (!(flags & IB_MR_REREG_TRANS)) {
+ struct ib_umem *umem;
+
+ /* Fast path for PD/access change */
+ if (can_use_umr_rereg_access(dev, mr->access_flags,
+ new_access_flags)) {
+ err = mlx5r_umr_rereg_pd_access(mr, new_pd,
+ new_access_flags);
+ if (err)
+ return ERR_PTR(err);
+ return NULL;
+ }
+ /* DM or ODP MR's don't have a normal umem so we can't re-use it */
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
+ goto recreate;
+
/*
- * Make sure we will see the new
- * umem->odp_data->private value in the invalidation
- * routines, before we can get page faults on the
- * MR. Page faults can happen once we put the MR in
- * the tree, below this line. Without the barrier,
- * there can be a fault handling and an invalidation
- * before umem->odp_data->private == mr is visible to
- * the invalidation handler.
+ * Only one active MR can refer to a umem at one time, revoke
+ * the old MR before assigning the umem to the new one.
*/
- smp_wmb();
+ err = mlx5r_umr_revoke_mr(mr);
+ if (err)
+ return ERR_PTR(err);
+ umem = mr->umem;
+ mr->umem = NULL;
+ atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
+
+ return create_real_mr(new_pd, umem, mr->ibmr.iova,
+ new_access_flags, NULL);
}
-#endif
- return &mr->ibmr;
+ /*
+ * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
+ * but the logic around releasing the umem is different
+ */
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
+ goto recreate;
+
+ if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
+ can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
+ struct ib_umem *new_umem;
+ unsigned long page_size;
+
+ new_umem = ib_umem_get(&dev->ib_dev, start, length,
+ new_access_flags);
+ if (IS_ERR(new_umem))
+ return ERR_CAST(new_umem);
+
+ /* Fast path for PAS change */
+ if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
+ &page_size)) {
+ err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
+ new_umem, iova, page_size);
+ if (err) {
+ ib_umem_release(new_umem);
+ return ERR_PTR(err);
+ }
+ return NULL;
+ }
+ return create_real_mr(new_pd, new_umem, iova, new_access_flags, NULL);
+ }
-error:
- ib_umem_release(umem);
- return ERR_PTR(err);
+ /*
+ * Everything else has no state we can preserve, just create a new MR
+ * from scratch
+ */
+recreate:
+ return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
+ new_access_flags, NULL, udata);
}
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+static int
+mlx5_alloc_priv_descs(struct ib_device *device,
+ struct mlx5_ib_mr *mr,
+ int ndescs,
+ int desc_size)
{
- struct umr_common *umrc = &dev->umrc;
- struct mlx5_ib_umr_context umr_context;
- struct ib_send_wr wr, *bad;
- int err;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ int size = ndescs * desc_size;
+ int add_size;
+ int ret;
- memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
+ add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+ if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) {
+ int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size));
- mlx5_ib_init_umr_context(&umr_context);
- down(&umrc->sem);
- err = ib_post_send(umrc->qp, &wr, &bad);
- if (err) {
- up(&umrc->sem);
- mlx5_ib_dbg(dev, "err %d\n", err);
- goto error;
- } else {
- wait_for_completion(&umr_context.done);
- up(&umrc->sem);
+ add_size = min_t(int, end - size, add_size);
}
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "unreg umr failed\n");
- err = -EFAULT;
- goto error;
+
+ mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
+ if (!mr->descs_alloc)
+ return -ENOMEM;
+
+ mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
+
+ mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, mr->desc_map)) {
+ ret = -ENOMEM;
+ goto err;
}
+
return 0;
+err:
+ kfree(mr->descs_alloc);
-error:
- return err;
+ return ret;
}
-static int clean_mr(struct mlx5_ib_mr *mr)
+static void
+mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+{
+ if (!mr->umem && !mr->data_direct &&
+ mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
+ struct ib_device *device = mr->ibmr.device;
+ int size = mr->max_descs * mr->desc_size;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+ DMA_TO_DEVICE);
+ kfree(mr->descs_alloc);
+ mr->descs = NULL;
+ }
+}
+
+static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *mr)
+{
+ struct mlx5_mkey_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int ret;
+
+ if (mr->mmkey.cache_ent) {
+ spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
+ goto end;
+ }
+
+ mutex_lock(&cache->rb_lock);
+ ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
+ if (ent) {
+ if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {
+ if (ent->disabled) {
+ mutex_unlock(&cache->rb_lock);
+ return -EOPNOTSUPP;
+ }
+ mr->mmkey.cache_ent = ent;
+ spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
+ mutex_unlock(&cache->rb_lock);
+ goto end;
+ }
+ }
+
+ ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false);
+ mutex_unlock(&cache->rb_lock);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ mr->mmkey.cache_ent = ent;
+ spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
+
+end:
+ ret = push_mkey_locked(mr->mmkey.cache_ent, mr->mmkey.key);
+ spin_unlock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
+ return ret;
+}
+
+static int mlx5_ib_revoke_data_direct_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- int umred = mr->umred;
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
int err;
- if (mr->sig) {
+ lockdep_assert_held(&dev->data_direct_lock);
+ mr->revoked = true;
+ err = mlx5r_umr_revoke_mr(mr);
+ if (WARN_ON(err))
+ return err;
+
+ ib_umem_dmabuf_revoke(umem_dmabuf);
+ return 0;
+}
+
+void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_mr *mr, *next;
+
+ lockdep_assert_held(&dev->data_direct_lock);
+
+ list_for_each_entry_safe(mr, next, &dev->data_direct_mr_list, dd_node) {
+ list_del(&mr->dd_node);
+ mlx5_ib_revoke_data_direct_mr(mr);
+ }
+}
+
+static int mlx5_umr_revoke_mr_with_lock(struct mlx5_ib_mr *mr)
+{
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
+ bool is_odp = is_odp_mr(mr);
+ int ret;
+
+ if (is_odp)
+ mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
+ NULL);
+
+ ret = mlx5r_umr_revoke_mr(mr);
+
+ if (is_odp) {
+ if (!ret)
+ to_ib_umem_odp(mr->umem)->private = NULL;
+ mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ }
+
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(
+ to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
+
+ return ret;
+}
+
+static int mlx5r_handle_mkey_cleanup(struct mlx5_ib_mr *mr)
+{
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ bool is_odp = is_odp_mr(mr);
+ bool from_cache = !!ent;
+ int ret;
+
+ if (mr->mmkey.cacheable && !mlx5_umr_revoke_mr_with_lock(mr) &&
+ !cache_ent_find_and_store(dev, mr)) {
+ ent = mr->mmkey.cache_ent;
+ /* upon storing to a clean temp entry - schedule its cleanup */
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (from_cache)
+ ent->in_use--;
+ if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ secs_to_jiffies(30));
+ ent->tmp_cleanup_scheduled = true;
+ }
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ return 0;
+ }
+
+ if (ent) {
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->in_use--;
+ mr->mmkey.cache_ent = NULL;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ }
+
+ if (is_odp)
+ mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
+ NULL);
+ ret = destroy_mkey(dev, mr);
+ if (is_odp) {
+ if (!ret)
+ to_ib_umem_odp(mr->umem)->private = NULL;
+ mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ }
+
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(
+ to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
+ return ret;
+}
+
+static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ int rc;
+
+ /*
+ * Any async use of the mr must hold the refcount, once the refcount
+ * goes to zero no other thread, such as ODP page faults, prefetch, any
+ * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
+ */
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ refcount_read(&mr->mmkey.usecount) != 0 &&
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
+ xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, NULL, GFP_KERNEL);
+
+ if (mr->mtt_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->mtt_mr = NULL;
+ }
+ if (mr->klm_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->klm_mr = NULL;
+ }
+
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_wire.psv_idx))
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
kfree(mr->sig);
mr->sig = NULL;
}
- if (!umred) {
- err = destroy_mkey(dev, mr);
- if (err) {
- mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
- mr->mmr.key, err);
- return err;
- }
- } else {
- err = unreg_umr(dev, mr);
- if (err) {
- mlx5_ib_warn(dev, "failed unregister\n");
- return err;
- }
- free_cached_mr(dev, mr);
+ /* Stop DMA */
+ rc = mlx5r_handle_mkey_cleanup(mr);
+ if (rc)
+ return rc;
+
+ if (mr->umem) {
+ bool is_odp = is_odp_mr(mr);
+
+ if (!is_odp)
+ atomic_sub(ib_umem_num_pages(mr->umem),
+ &dev->mdev->priv.reg_pages);
+ ib_umem_release(mr->umem);
+ if (is_odp)
+ mlx5_ib_free_odp_mr(mr);
}
- if (!umred)
- kfree(mr);
+ if (!mr->mmkey.cache_ent)
+ mlx5_free_priv_descs(mr);
+ kfree(mr);
return 0;
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+static int dereg_crossing_data_direct_mr(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_mr *dd_crossed_mr = mr->dd_crossed_mr;
+ int ret;
+
+ ret = __mlx5_ib_dereg_mr(&mr->ibmr);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->data_direct_lock);
+ if (!dd_crossed_mr->revoked)
+ list_del(&dd_crossed_mr->dd_node);
+
+ ret = __mlx5_ib_dereg_mr(&dd_crossed_mr->ibmr);
+ mutex_unlock(&dev->data_direct_lock);
+ return ret;
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
struct mlx5_ib_mr *mr = to_mmr(ibmr);
- int npages = mr->npages;
- struct ib_umem *umem = mr->umem;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem && umem->odp_data) {
- /* Prevent new page faults from succeeding */
- mr->live = 0;
- /* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&dev->mr_srcu);
- /* Destroy all page mappings */
- mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
- ib_umem_end(umem));
- /*
- * We kill the umem before the MR for ODP,
- * so that there will not be any invalidations in
- * flight, looking at the *mr struct.
- */
- ib_umem_release(umem);
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+
+ if (mr->data_direct)
+ return dereg_crossing_data_direct_mr(dev, mr);
+
+ return __mlx5_ib_dereg_mr(ibmr);
+}
+
+static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
+ int access_mode, int page_shift)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ /* This is only used from the kernel, so setting the PD is OK. */
+ set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd);
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ if (access_mode == MLX5_MKC_ACCESS_MODE_PA ||
+ access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
+}
+
+static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, int desc_size, int page_shift,
+ int access_mode, u32 *in, int inlen)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int err;
+
+ mr->access_mode = access_mode;
+ mr->desc_size = desc_size;
+ mr->max_descs = ndescs;
+
+ err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
+ if (err)
+ return err;
+
+ mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
+
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_free_descs;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+
+ return 0;
+
+err_free_descs:
+ mlx5_free_priv_descs(mr);
+ return err;
+}
- /* Avoid double-freeing the umem. */
- umem = NULL;
+static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
+ u32 max_num_sg, u32 max_num_meta_sg,
+ int desc_size, int access_mode)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
+ int page_shift = 0;
+ struct mlx5_ib_mr *mr;
+ u32 *in;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->ibmr.pd = pd;
+ mr->ibmr.device = pd->device;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
}
-#endif
- clean_mr(mr);
+ if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ page_shift = PAGE_SHIFT;
- if (umem) {
- ib_umem_release(umem);
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
+ access_mode, in, inlen);
+ if (err)
+ goto err_free_in;
+
+ mr->umem = NULL;
+ kfree(in);
+
+ return mr;
+
+err_free_in:
+ kfree(in);
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, u32 *in, int inlen)
+{
+ return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
+ PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
+ inlen);
+}
+
+static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, u32 *in, int inlen)
+{
+ return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
+ 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
+}
+
+static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int max_num_sg, int max_num_meta_sg,
+ u32 *in, int inlen)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ u32 psv_index[2];
+ void *mkc;
+ int err;
+
+ mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
+ if (!mr->sig)
+ return -ENOMEM;
+
+ /* create mem & wire PSVs */
+ err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
+ if (err)
+ goto err_free_sig;
+
+ mr->sig->psv_memory.psv_idx = psv_index[0];
+ mr->sig->psv_wire.psv_idx = psv_index[1];
+
+ mr->sig->sig_status_checked = true;
+ mr->sig->sig_err_exists = false;
+ /* Next UMR, Arm SIGERR */
+ ++mr->sig->sigerr_count;
+ mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
+ sizeof(struct mlx5_klm),
+ MLX5_MKC_ACCESS_MODE_KLMS);
+ if (IS_ERR(mr->klm_mr)) {
+ err = PTR_ERR(mr->klm_mr);
+ goto err_destroy_psv;
+ }
+ mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
+ sizeof(struct mlx5_mtt),
+ MLX5_MKC_ACCESS_MODE_MTT);
+ if (IS_ERR(mr->mtt_mr)) {
+ err = PTR_ERR(mr->mtt_mr);
+ goto err_free_klm_mr;
}
+ /* Set bsf descriptors for mkey */
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, bsf_en, 1);
+ MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
+
+ err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
+ MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
+ if (err)
+ goto err_free_mtt_mr;
+
+ err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, GFP_KERNEL));
+ if (err)
+ goto err_free_descs;
return 0;
+
+err_free_descs:
+ destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
+err_free_mtt_mr:
+ mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
+ mr->mtt_mr = NULL;
+err_free_klm_mr:
+ mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
+ mr->klm_mr = NULL;
+err_destroy_psv:
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+err_free_sig:
+ kfree(mr->sig);
+
+ return err;
}
-struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type, u32 max_num_sg,
+ u32 max_num_meta_sg)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ int ndescs = ALIGN(max_num_sg, 4);
struct mlx5_ib_mr *mr;
- int access_mode, err;
- int ndescs = roundup(max_num_sg, 4);
+ u32 *in;
+ int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32(ndescs);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
-
- if (mr_type == IB_MR_TYPE_MEM_REG) {
- access_mode = MLX5_ACCESS_MODE_MTT;
- in->seg.log2_page_size = PAGE_SHIFT;
- } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
- u32 psv_index[2];
-
- in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
- MLX5_MKEY_BSF_EN);
- in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
- mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
- if (!mr->sig) {
- err = -ENOMEM;
- goto err_free_in;
- }
-
- /* create mem & wire PSVs */
- err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
- 2, psv_index);
- if (err)
- goto err_free_sig;
-
- access_mode = MLX5_ACCESS_MODE_KLM;
- mr->sig->psv_memory.psv_idx = psv_index[0];
- mr->sig->psv_wire.psv_idx = psv_index[1];
+ mr->ibmr.device = pd->device;
+ mr->umem = NULL;
- mr->sig->sig_status_checked = true;
- mr->sig->sig_err_exists = false;
- /* Next UMR, Arm SIGERR */
- ++mr->sig->sigerr_count;
- } else {
+ switch (mr_type) {
+ case IB_MR_TYPE_MEM_REG:
+ err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
+ break;
+ case IB_MR_TYPE_SG_GAPS:
+ err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
+ break;
+ case IB_MR_TYPE_INTEGRITY:
+ err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
+ max_num_meta_sg, in, inlen);
+ break;
+ default:
mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
err = -EINVAL;
- goto err_free_in;
}
- in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
- NULL, NULL, NULL);
if (err)
- goto err_destroy_psv;
+ goto err_free_in;
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
- mr->umem = NULL;
kfree(in);
return &mr->ibmr;
-err_destroy_psv:
- if (mr->sig) {
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_memory.psv_idx))
- mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
- mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_wire.psv_idx))
- mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
- mr->sig->psv_wire.psv_idx);
- }
-err_free_sig:
- kfree(mr->sig);
err_free_in:
kfree(in);
err_free:
@@ -1324,46 +2522,109 @@ err_free:
return ERR_PTR(err);
}
-struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
- int page_list_len)
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
+}
+
+struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_sg, u32 max_num_meta_sg)
{
- struct mlx5_ib_fast_reg_page_list *mfrpl;
- int size = page_list_len * sizeof(u64);
+ return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
+ max_num_meta_sg);
+}
- mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
- if (!mfrpl)
- return ERR_PTR(-ENOMEM);
+int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_ib_mw *mw = to_mmw(ibmw);
+ unsigned int ndescs;
+ u32 *in = NULL;
+ void *mkc;
+ int err;
+ struct mlx5_ib_alloc_mw req = {};
+ struct {
+ __u32 comp_mask;
+ __u32 response_length;
+ } resp = {};
- mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
- if (!mfrpl->ibfrpl.page_list)
- goto err_free;
+ err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
+ if (err)
+ return err;
- mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
- size, &mfrpl->map,
- GFP_KERNEL);
- if (!mfrpl->mapped_page_list)
- goto err_free;
+ if (req.comp_mask || req.reserved1 || req.reserved2)
+ return -EOPNOTSUPP;
- WARN_ON(mfrpl->map & 0x3f);
+ if (udata->inlen > sizeof(req) &&
+ !ib_is_udata_cleared(udata, sizeof(req),
+ udata->inlen - sizeof(req)))
+ return -EOPNOTSUPP;
- return &mfrpl->ibfrpl;
+ ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
-err_free:
- kfree(mfrpl->ibfrpl.page_list);
- kfree(mfrpl);
- return ERR_PTR(-ENOMEM);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
+ if (err)
+ goto free;
+
+ mw->mmkey.type = MLX5_MKEY_MW;
+ ibmw->rkey = mw->mmkey.key;
+ mw->mmkey.ndescs = ndescs;
+
+ resp.response_length =
+ min(offsetofend(typeof(resp), response_length), udata->outlen);
+ if (resp.response_length) {
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto free_mkey;
+ }
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
+ if (err)
+ goto free_mkey;
+ }
+
+ kfree(in);
+ return 0;
+
+free_mkey:
+ mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
+free:
+ kfree(in);
+ return err;
}
-void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
+int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
- struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
- struct mlx5_ib_dev *dev = to_mdev(page_list->device);
- int size = page_list->max_page_list_len * sizeof(u64);
+ struct mlx5_ib_dev *dev = to_mdev(mw->device);
+ struct mlx5_ib_mw *mmw = to_mmw(mw);
- dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
- mfrpl->map);
- kfree(mfrpl->ibfrpl.page_list);
- kfree(mfrpl);
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
+ /*
+ * pagefault_single_data_segment() may be accessing mmw
+ * if the user bound an ODP MR to this MW.
+ */
+ mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
+
+ return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
@@ -1406,3 +2667,312 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
done:
return ret;
}
+
+static int
+mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ unsigned int sg_offset = 0;
+ int n = 0;
+
+ mr->meta_length = 0;
+ if (data_sg_nents == 1) {
+ n++;
+ mr->mmkey.ndescs = 1;
+ if (data_sg_offset)
+ sg_offset = *data_sg_offset;
+ mr->data_length = sg_dma_len(data_sg) - sg_offset;
+ mr->data_iova = sg_dma_address(data_sg) + sg_offset;
+ if (meta_sg_nents == 1) {
+ n++;
+ mr->meta_ndescs = 1;
+ if (meta_sg_offset)
+ sg_offset = *meta_sg_offset;
+ else
+ sg_offset = 0;
+ mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
+ mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
+ }
+ ibmr->length = mr->data_length + mr->meta_length;
+ }
+
+ return n;
+}
+
+static int
+mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
+ struct scatterlist *sgl,
+ unsigned short sg_nents,
+ unsigned int *sg_offset_p,
+ struct scatterlist *meta_sgl,
+ unsigned short meta_sg_nents,
+ unsigned int *meta_sg_offset_p)
+{
+ struct scatterlist *sg = sgl;
+ struct mlx5_klm *klms = mr->descs;
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+ u32 lkey = mr->ibmr.pd->local_dma_lkey;
+ int i, j = 0;
+
+ mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
+ mr->ibmr.length = 0;
+
+ for_each_sg(sgl, sg, sg_nents, i) {
+ if (unlikely(i >= mr->max_descs))
+ break;
+ klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
+ klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
+ klms[i].key = cpu_to_be32(lkey);
+ mr->ibmr.length += sg_dma_len(sg) - sg_offset;
+
+ sg_offset = 0;
+ }
+
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+
+ mr->mmkey.ndescs = i;
+ mr->data_length = mr->ibmr.length;
+
+ if (meta_sg_nents) {
+ sg = meta_sgl;
+ sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
+ for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
+ if (unlikely(i + j >= mr->max_descs))
+ break;
+ klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
+ sg_offset);
+ klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
+ sg_offset);
+ klms[i + j].key = cpu_to_be32(lkey);
+ mr->ibmr.length += sg_dma_len(sg) - sg_offset;
+
+ sg_offset = 0;
+ }
+ if (meta_sg_offset_p)
+ *meta_sg_offset_p = sg_offset;
+
+ mr->meta_ndescs = j;
+ mr->meta_length = mr->ibmr.length - mr->data_length;
+ }
+
+ return i + j;
+}
+
+static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ __be64 *descs;
+
+ if (unlikely(mr->mmkey.ndescs == mr->max_descs))
+ return -ENOMEM;
+
+ descs = mr->descs;
+ descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ __be64 *descs;
+
+ if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
+ return -ENOMEM;
+
+ descs = mr->descs;
+ descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
+ cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int
+mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
+ int n;
+
+ pi_mr->mmkey.ndescs = 0;
+ pi_mr->meta_ndescs = 0;
+ pi_mr->meta_length = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ pi_mr->ibmr.page_size = ibmr->page_size;
+ n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
+ mlx5_set_page);
+ if (n != data_sg_nents)
+ return n;
+
+ pi_mr->data_iova = pi_mr->ibmr.iova;
+ pi_mr->data_length = pi_mr->ibmr.length;
+ pi_mr->ibmr.length = pi_mr->data_length;
+ ibmr->length = pi_mr->data_length;
+
+ if (meta_sg_nents) {
+ u64 page_mask = ~((u64)ibmr->page_size - 1);
+ u64 iova = pi_mr->data_iova;
+
+ n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
+ meta_sg_offset, mlx5_set_page_pi);
+
+ pi_mr->meta_length = pi_mr->ibmr.length;
+ /*
+ * PI address for the HW is the offset of the metadata address
+ * relative to the first data page address.
+ * It equals to first data page address + size of data pages +
+ * metadata offset at the first metadata page
+ */
+ pi_mr->pi_iova = (iova & page_mask) +
+ pi_mr->mmkey.ndescs * ibmr->page_size +
+ (pi_mr->ibmr.iova & ~page_mask);
+ /*
+ * In order to use one MTT MR for data and metadata, we register
+ * also the gaps between the end of the data and the start of
+ * the metadata (the sig MR will verify that the HW will access
+ * to right addresses). This mapping is safe because we use
+ * internal mkey for the registration.
+ */
+ pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
+ pi_mr->ibmr.iova = iova;
+ ibmr->length += pi_mr->meta_length;
+ }
+
+ ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ return n;
+}
+
+static int
+mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = mr->klm_mr;
+ int n;
+
+ pi_mr->mmkey.ndescs = 0;
+ pi_mr->meta_ndescs = 0;
+ pi_mr->meta_length = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
+ meta_sg, meta_sg_nents, meta_sg_offset);
+
+ ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ /* This is zero-based memory region */
+ pi_mr->data_iova = 0;
+ pi_mr->ibmr.iova = 0;
+ pi_mr->pi_iova = pi_mr->data_length;
+ ibmr->length = pi_mr->ibmr.length;
+
+ return n;
+}
+
+int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = NULL;
+ int n;
+
+ WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
+
+ mr->mmkey.ndescs = 0;
+ mr->data_length = 0;
+ mr->data_iova = 0;
+ mr->meta_ndescs = 0;
+ mr->pi_iova = 0;
+ /*
+ * As a performance optimization, if possible, there is no need to
+ * perform UMR operation to register the data/metadata buffers.
+ * First try to map the sg lists to PA descriptors with local_dma_lkey.
+ * Fallback to UMR only in case of a failure.
+ */
+ n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (n == data_sg_nents + meta_sg_nents)
+ goto out;
+ /*
+ * As a performance optimization, if possible, there is no need to map
+ * the sg lists to KLM descriptors. First try to map the sg lists to MTT
+ * descriptors and fallback to KLM only in case of a failure.
+ * It's more efficient for the HW to work with MTT descriptors
+ * (especially in high load).
+ * Use KLM (indirect access) only if it's mandatory.
+ */
+ pi_mr = mr->mtt_mr;
+ n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (n == data_sg_nents + meta_sg_nents)
+ goto out;
+
+ pi_mr = mr->klm_mr;
+ n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (unlikely(n != data_sg_nents + meta_sg_nents))
+ return -ENOMEM;
+
+out:
+ /* This is zero-based memory region */
+ ibmr->iova = 0;
+ mr->pi_mr = pi_mr;
+ if (pi_mr)
+ ibmr->sig_attrs->meta_length = pi_mr->meta_length;
+ else
+ ibmr->sig_attrs->meta_length = mr->meta_length;
+
+ return 0;
+}
+
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ int n;
+
+ mr->mmkey.ndescs = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
+ mr->desc_size * mr->max_descs,
+ DMA_TO_DEVICE);
+
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
+ n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
+ NULL);
+ else
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+ mlx5_set_page);
+
+ ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
+ mr->desc_size * mr->max_descs,
+ DMA_TO_DEVICE);
+
+ return n;
+}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index aa8391e75385..0e8ae85af5a6 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -30,10 +30,66 @@
* SOFTWARE.
*/
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
+#include <linux/kernel.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
+#include <linux/pci-p2pdma.h>
#include "mlx5_ib.h"
+#include "cmd.h"
+#include "umr.h"
+#include "qp.h"
+
+#include <linux/mlx5/eq.h>
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u64 token;
+ u8 event_subtype;
+ u8 type;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * Number of resource holding WQE, depends on type.
+ */
+ u32 wq_num;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ struct {
+ u64 va;
+ u32 mkey;
+ u32 fault_byte_count;
+ u32 prefetch_before_byte_count;
+ u32 prefetch_after_byte_count;
+ u8 flags;
+ } memory;
+ };
+
+ struct mlx5_ib_pf_eq *eq;
+ struct work_struct work;
+};
#define MAX_PREFETCH_LEN (4*1024*1024U)
@@ -41,29 +97,209 @@
* a pagefault. */
#define MMU_NOTIFIER_TIMEOUT 1000
-struct workqueue_struct *mlx5_ib_page_fault_wq;
+#define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
+#define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
+#define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
+#define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
+#define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
+
+#define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
+
+static u64 mlx5_imr_ksm_entries;
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
- unsigned long end)
+static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
{
+ struct mlx5_core_dev *dev = mr_to_mdev(imr)->mdev;
+ struct mlx5_klm *end = pklm + nentries;
+ int step = MLX5_CAP_ODP(dev, mem_page_fault) ? MLX5_IMR_MTT_SIZE : 0;
+ __be32 key = MLX5_CAP_ODP(dev, mem_page_fault) ?
+ cpu_to_be32(imr->null_mmkey.key) :
+ mr_to_mdev(imr)->mkeys.null_mkey;
+ u64 va =
+ MLX5_CAP_ODP(dev, mem_page_fault) ? idx * MLX5_IMR_MTT_SIZE : 0;
+
+ if (flags & MLX5_IB_UPD_XLT_ZAP) {
+ for (; pklm != end; pklm++, idx++, va += step) {
+ pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
+ pklm->key = key;
+ pklm->va = cpu_to_be64(va);
+ }
+ return;
+ }
+
+ /*
+ * The locking here is pretty subtle. Ideally the implicit_children
+ * xarray would be protected by the umem_mutex, however that is not
+ * possible. Instead this uses a weaker update-then-lock pattern:
+ *
+ * xa_store()
+ * mutex_lock(umem_mutex)
+ * mlx5r_umr_update_xlt()
+ * mutex_unlock(umem_mutex)
+ * destroy lkey
+ *
+ * ie any change the xarray must be followed by the locked update_xlt
+ * before destroying.
+ *
+ * The umem_mutex provides the acquire/release semantic needed to make
+ * the xa_store() visible to a racing thread.
+ */
+ lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
+
+ for (; pklm != end; pklm++, idx++, va += step) {
+ struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
+
+ pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
+ if (mtt) {
+ pklm->key = cpu_to_be32(mtt->ibmr.lkey);
+ pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
+ } else {
+ pklm->key = key;
+ pklm->va = cpu_to_be64(va);
+ }
+ }
+}
+
+static int populate_mtt(__be64 *pas, size_t start, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE;
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ struct ib_device *dev = odp->umem.ibdev;
+ size_t i;
+
+ if (flags & MLX5_IB_UPD_XLT_ZAP)
+ return 0;
+
+ for (i = 0; i < nentries; i++) {
+ unsigned long pfn = odp->map.pfn_list[start + i];
+ dma_addr_t dma_addr;
+
+ pfn = odp->map.pfn_list[start + i];
+ if (!(pfn & HMM_PFN_VALID))
+ /* ODP initialization */
+ continue;
+
+ dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map,
+ start + i, &p2pdma_state);
+ if (ib_dma_mapping_error(dev, dma_addr))
+ return -EFAULT;
+
+ dma_addr |= MLX5_IB_MTT_READ;
+ if ((pfn & HMM_PFN_WRITE) && !downgrade)
+ dma_addr |= MLX5_IB_MTT_WRITE;
+
+ pas[i] = cpu_to_be64(dma_addr);
+ odp->npages++;
+ }
+ return 0;
+}
+
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
+ populate_klm(xlt, idx, nentries, mr, flags);
+ return 0;
+ } else {
+ return populate_mtt(xlt, idx, nentries, mr, flags);
+ }
+}
+
+/*
+ * This must be called after the mr has been removed from implicit_children.
+ * NOTE: The MR does not necessarily have to be
+ * empty here, parallel page faults could have raced with the free process and
+ * added pages to it.
+ */
+static void free_implicit_child_mr_work(struct work_struct *work)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
+ struct mlx5_ib_mr *imr = mr->parent;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ mutex_lock(&odp_imr->umem_mutex);
+ mlx5r_umr_update_xlt(mr->parent,
+ ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0,
+ MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+
+ mlx5r_deref_odp_mkey(&imr->mmkey);
+}
+
+static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ struct mlx5_ib_mr *imr = mr->parent;
+
+ /*
+ * If userspace is racing freeing the parent implicit ODP MR then we can
+ * loose the race with parent destruction. In this case
+ * mlx5_ib_free_odp_mr() will free everything in the implicit_children
+ * xarray so NOP is fine. This child MR cannot be destroyed here because
+ * we are under its umem_mutex.
+ */
+ if (!refcount_inc_not_zero(&imr->mmkey.usecount))
+ return;
+
+ xa_lock(&imr->implicit_children);
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
+ mr) {
+ xa_unlock(&imr->implicit_children);
+ mlx5r_deref_odp_mkey(&imr->mmkey);
+ return;
+ }
+
+ if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
+ xa_unlock(&imr->implicit_children);
+
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
+}
+
+static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(mni, struct ib_umem_odp, notifier);
struct mlx5_ib_mr *mr;
- const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1;
+ const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1;
u64 idx = 0, blk_start_idx = 0;
+ u64 invalidations = 0;
+ unsigned long start;
+ unsigned long end;
int in_block = 0;
u64 addr;
- if (!umem || !umem->odp_data) {
- pr_err("invalidation called on NULL umem or non-ODP umem\n");
- return;
- }
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- mr = umem->odp_data->private;
-
- if (!mr || !mr->ibmr.pd)
- return;
+ mutex_lock(&umem_odp->umem_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+ /*
+ * If npages is zero then umem_odp->private may not be setup yet. This
+ * does not complete until after the first page is mapped for DMA.
+ */
+ if (!umem_odp->npages)
+ goto out;
+ mr = umem_odp->private;
+ if (!mr)
+ goto out;
- start = max_t(u64, ib_umem_start(umem), start);
- end = min_t(u64, ib_umem_end(umem), end);
+ start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ end = min_t(u64, ib_umem_end(umem_odp), range->end);
/*
* Iteration one - zap the HW's MTTs. The notifiers_count ensures that
@@ -71,17 +307,15 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
-
- for (addr = start; addr < end; addr += (u64)umem->page_size) {
- idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
+ for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
+ idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
* Strive to write the MTTs in chunks, but avoid overwriting
* non-existing MTTs. The huristic here can be improved to
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem->odp_data->dma_list[idx] &
- (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) {
if (!in_block) {
blk_start_idx = idx;
in_block = 1;
@@ -90,15 +324,26 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
u64 umr_offset = idx & umr_block_mask;
if (in_block && umr_offset == 0) {
- mlx5_ib_update_mtt(mr, blk_start_idx,
- idx - blk_start_idx, 1);
+ mlx5r_umr_update_xlt(mr, blk_start_idx,
+ idx - blk_start_idx, 0,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC);
in_block = 0;
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
}
}
}
- if (in_block)
- mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1,
- 1);
+ if (in_block) {
+ mlx5r_umr_update_xlt(mr, blk_start_idx,
+ idx - blk_start_idx + 1, 0,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
+ }
+
+ mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
/*
* We are now sure that the device will not access the
@@ -106,223 +351,834 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* needed.
*/
- ib_umem_odp_unmap_dma_pages(umem, start, end);
+ ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
+
+ if (unlikely(!umem_odp->npages && mr->parent))
+ destroy_unused_implicit_child_mr(mr);
+out:
+ mutex_unlock(&umem_odp->umem_mutex);
+ return true;
}
-void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
+const struct mmu_interval_notifier_ops mlx5_mn_ops = {
+ .invalidate = mlx5_ib_invalidate_range,
+};
+
+static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
struct ib_odp_caps *caps = &dev->odp_caps;
memset(caps, 0, sizeof(*caps));
- if (!MLX5_CAP_GEN(dev->mdev, pg))
+ if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0))
return;
caps->general_caps = IB_ODP_SUPPORT;
- if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+ if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ dev->odp_max_size = U64_MAX;
+ else
+ dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.send))
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.srq_receive))
+ caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.send))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.receive))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.write))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.read))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
- return;
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.atomic))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.srq_receive))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.send))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.receive))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.write))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.read))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.atomic))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.srq_receive))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
+ MLX5_CAP_GEN(dev->mdev, null_mkey) &&
+ MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
+ caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
}
-static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
- u32 key)
+static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault,
+ int error)
{
- u32 base_key = mlx5_base_mkey(key);
- struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
- struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr);
-
- if (!mmr || mmr->key != key || !mr->live)
- return NULL;
+ int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
+ pfault->wqe.wq_num : pfault->token;
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
+ void *info;
+ int err;
+
+ MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
+
+ if (pfault->event_subtype == MLX5_PFAULT_SUBTYPE_MEMORY) {
+ info = MLX5_ADDR_OF(page_fault_resume_in, in,
+ page_fault_info.mem_page_fault_info);
+ MLX5_SET(mem_page_fault_info, info, fault_token_31_0,
+ pfault->token & 0xffffffff);
+ MLX5_SET(mem_page_fault_info, info, fault_token_47_32,
+ (pfault->token >> 32) & 0xffff);
+ MLX5_SET(mem_page_fault_info, info, error, !!error);
+ } else {
+ info = MLX5_ADDR_OF(page_fault_resume_in, in,
+ page_fault_info.trans_page_fault_info);
+ MLX5_SET(trans_page_fault_info, info, page_fault_type,
+ pfault->type);
+ MLX5_SET(trans_page_fault_info, info, fault_token,
+ pfault->token);
+ MLX5_SET(trans_page_fault_info, info, wq_number, wq_num);
+ MLX5_SET(trans_page_fault_info, info, error, !!error);
+ }
- return container_of(mmr, struct mlx5_ib_mr, mmr);
+ err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
+ if (err)
+ mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
+ wq_num, err);
}
-static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault,
- int error) {
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
- int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn,
- pfault->mpfault.flags,
- error);
- if (ret)
- pr_err("Failed to resolve the page fault on QP 0x%x\n",
- qp->mqp.qpn);
+static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ unsigned long idx)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(imr);
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *ret;
+ int err;
+
+ odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
+ idx * MLX5_IMR_MTT_SIZE,
+ MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+
+ mr = mlx5_mr_cache_alloc(dev, imr->access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT,
+ MLX5_IMR_MTT_ENTRIES);
+ if (IS_ERR(mr)) {
+ ib_umem_odp_release(odp);
+ return mr;
+ }
+
+ mr->access_flags = imr->access_flags;
+ mr->ibmr.pd = imr->ibmr.pd;
+ mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
+ mr->umem = &odp->umem;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->parent = imr;
+ odp->private = mr;
+
+ /*
+ * First refcount is owned by the xarray and second refconut
+ * is returned to the caller.
+ */
+ refcount_set(&mr->mmkey.usecount, 2);
+
+ err = mlx5r_umr_update_xlt(mr, 0,
+ MLX5_IMR_MTT_ENTRIES,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out_mr;
+ }
+
+ xa_lock(&imr->implicit_children);
+ ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
+ if (unlikely(ret)) {
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto out_lock;
+ }
+ /*
+ * Another thread beat us to creating the child mr, use
+ * theirs.
+ */
+ refcount_inc(&ret->mmkey.usecount);
+ goto out_lock;
+ }
+
+ if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+ ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ __xa_erase(&imr->implicit_children, idx);
+ goto out_lock;
+ }
+ mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
+ }
+ xa_unlock(&imr->implicit_children);
+ mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
+ return mr;
+
+out_lock:
+ xa_unlock(&imr->implicit_children);
+out_mr:
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ return ret;
}
/*
- * Handle a single data segment in a page-fault WQE.
- *
- * Returns number of pages retrieved on success. The caller will continue to
- * the next data segment.
- * Can return the following error codes:
- * -EAGAIN to designate a temporary error. The caller will abort handling the
- * page fault and resolve it.
- * -EFAULT when there's an error mapping the requested pages. The caller will
- * abort the page fault handling and possibly move the QP to an error state.
- * On other errors the QP should also be closed with an error.
+ * When using memory scheme ODP, implicit MRs can't use the reserved null mkey
+ * and each implicit MR needs to assign a private null mkey to get the page
+ * faults on.
+ * The null mkey is created with the properties to enable getting the page
+ * fault for every time it is accessed and having all relevant access flags.
*/
-static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault,
- u32 key, u64 io_virt, size_t bcnt,
- u32 *bytes_mapped)
+static int alloc_implicit_mr_null_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *imr,
+ struct mlx5_ib_pd *pd)
+{
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + 64;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 4);
+ MLX5_SET(create_mkey_in, in, pg_access, 1);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, free, 0);
+ MLX5_SET(mkc, mkc, umr_en, 0);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+
+ MLX5_SET(mkc, mkc, translations_octword_size, 4);
+ MLX5_SET(mkc, mkc, log_page_size, 61);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, pd, pd->pdn);
+ MLX5_SET64(mkc, mkc, start_addr, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(dev->mdev, &imr->null_mmkey.key, in, inlen);
+ if (err)
+ goto free_in;
+
+ imr->null_mmkey.type = MLX5_MKEY_NULL;
+
+free_in:
+ kfree(in);
+ return err;
+}
+
+struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *imr;
+ int err;
+
+ if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
+ if (IS_ERR(umem_odp))
+ return ERR_CAST(umem_odp);
+
+ imr = mlx5_mr_cache_alloc(dev, access_flags, MLX5_MKC_ACCESS_MODE_KSM,
+ mlx5_imr_ksm_entries);
+ if (IS_ERR(imr)) {
+ ib_umem_odp_release(umem_odp);
+ return imr;
+ }
+
+ imr->access_flags = access_flags;
+ imr->ibmr.pd = &pd->ibpd;
+ imr->ibmr.iova = 0;
+ imr->umem = &umem_odp->umem;
+ imr->ibmr.lkey = imr->mmkey.key;
+ imr->ibmr.rkey = imr->mmkey.key;
+ imr->ibmr.device = &dev->ib_dev;
+ imr->is_odp_implicit = true;
+ xa_init(&imr->implicit_children);
+
+ if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+ err = alloc_implicit_mr_null_mkey(dev, imr, pd);
+ if (err)
+ goto out_mr;
+
+ err = mlx5r_store_odp_mkey(dev, &imr->null_mmkey);
+ if (err)
+ goto out_mr;
+ }
+
+ err = mlx5r_umr_update_xlt(imr, 0,
+ mlx5_imr_ksm_entries,
+ MLX5_KSM_PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err)
+ goto out_mr;
+
+ err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
+ if (err)
+ goto out_mr;
+
+ mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
+ return imr;
+out_mr:
+ mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
+ mlx5_ib_dereg_mr(&imr->ibmr, NULL);
+ return ERR_PTR(err);
+}
+
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device);
- int srcu_key;
- unsigned int current_seq;
+ struct mlx5_ib_mr *mtt;
+ unsigned long idx;
+
+ /*
+ * If this is an implicit MR it is already invalidated so we can just
+ * delete the children mkeys.
+ */
+ xa_for_each(&mr->implicit_children, idx, mtt) {
+ xa_erase(&mr->implicit_children, idx);
+ mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
+ }
+
+ if (mr->null_mmkey.key) {
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->null_mmkey.key));
+
+ mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev,
+ mr->null_mmkey.key);
+ }
+}
+
+#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
+#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
+#define MLX5_PF_FLAGS_ENABLE BIT(3)
+static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
+ u64 user_va, size_t bcnt, u32 *bytes_mapped,
+ u32 flags)
+{
+ int page_shift, ret, np;
+ bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
+ u64 access_mask = 0;
u64 start_idx;
- int npages = 0, ret = 0;
- struct mlx5_ib_mr *mr;
- u64 access_mask = ODP_READ_ALLOWED_BIT;
+ bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
+ u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
+
+ if (flags & MLX5_PF_FLAGS_ENABLE)
+ xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+
+ if (flags & MLX5_PF_FLAGS_DOWNGRADE)
+ xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE;
+
+ page_shift = odp->page_shift;
+ start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
+
+ if (odp->umem.writable && !downgrade)
+ access_mask |= HMM_PFN_WRITE;
+
+ np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
+ if (np < 0)
+ return np;
- srcu_key = srcu_read_lock(&mib_dev->mr_srcu);
- mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key);
/*
- * If we didn't find the MR, it means the MR was closed while we were
- * handling the ODP event. In this case we return -EFAULT so that the
- * QP will be closed.
+ * No need to check whether the MTTs really belong to this MR, since
+ * ib_umem_odp_map_dma_and_lock already checks this.
*/
- if (!mr || !mr->ibmr.pd) {
- pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
- key);
- ret = -EFAULT;
- goto srcu_unlock;
- }
- if (!mr->umem->odp_data) {
- pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
- key);
- if (bytes_mapped)
- *bytes_mapped +=
- (bcnt - pfault->mpfault.bytes_committed);
- goto srcu_unlock;
- }
- if (mr->ibmr.pd != qp->ibqp.pd) {
- pr_err("Page-fault with different PDs for QP and MR.\n");
- ret = -EFAULT;
- goto srcu_unlock;
+ ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
+ mutex_unlock(&odp->umem_mutex);
+
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ mlx5_ib_err(mr_to_mdev(mr),
+ "Failed to update mkey page tables\n");
+ goto out;
+ }
+
+ if (bytes_mapped) {
+ u32 new_mappings = (np << page_shift) -
+ (user_va - round_down(user_va, 1 << page_shift));
+
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
}
- current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq);
+ return np << (page_shift - PAGE_SHIFT);
+
+out:
+ return ret;
+}
+
+static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
+ struct ib_umem_odp *odp_imr, u64 user_va,
+ size_t bcnt, u32 *bytes_mapped, u32 flags)
+{
+ unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long upd_start_idx = end_idx + 1;
+ unsigned long upd_len = 0;
+ unsigned long npages = 0;
+ int err;
+ int ret;
+
+ if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
+ mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
+ return -EFAULT;
+
+ /* Fault each child mr that intersects with our interval. */
+ while (bcnt) {
+ unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *mtt;
+ u64 len;
+
+ xa_lock(&imr->implicit_children);
+ mtt = xa_load(&imr->implicit_children, idx);
+ if (unlikely(!mtt)) {
+ xa_unlock(&imr->implicit_children);
+ mtt = implicit_get_child_mr(imr, idx);
+ if (IS_ERR(mtt)) {
+ ret = PTR_ERR(mtt);
+ goto out;
+ }
+ upd_start_idx = min(upd_start_idx, idx);
+ upd_len = idx - upd_start_idx + 1;
+ } else {
+ refcount_inc(&mtt->mmkey.usecount);
+ xa_unlock(&imr->implicit_children);
+ }
+
+ umem_odp = to_ib_umem_odp(mtt->umem);
+ len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
+ user_va;
+
+ ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
+ bytes_mapped, flags);
+
+ mlx5r_deref_odp_mkey(&mtt->mmkey);
+
+ if (ret < 0)
+ goto out;
+ user_va += len;
+ bcnt -= len;
+ npages += ret;
+ }
+
+ ret = npages;
+
/*
- * Ensure the sequence number is valid for some time before we call
- * gup.
+ * Any time the implicit_children are changed we must perform an
+ * update of the xlt before exiting to ensure the HW and the
+ * implicit_children remains synchronized.
*/
- smp_rmb();
+out:
+ if (likely(!upd_len))
+ return ret;
/*
- * Avoid branches - this code will perform correctly
- * in all iterations (in iteration 2 and above,
- * bytes_committed == 0).
+ * Notice this is not strictly ordered right, the KSM is updated after
+ * the implicit_children is updated, so a parallel page fault could
+ * see a MR that is not yet visible in the KSM. This is similar to a
+ * parallel page fault seeing a MR that is being concurrently removed
+ * from the KSM. Both of these improbable situations are resolved
+ * safely by resuming the HW and then taking another page fault. The
+ * next pagefault handler will see the new information.
*/
- io_virt += pfault->mpfault.bytes_committed;
- bcnt -= pfault->mpfault.bytes_committed;
+ mutex_lock(&odp_imr->umem_mutex);
+ err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ if (err) {
+ mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
+ return err;
+ }
+ return ret;
+}
- start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT;
+static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ int access_mode = mr->data_direct ? MLX5_MKC_ACCESS_MODE_KSM :
+ MLX5_MKC_ACCESS_MODE_MTT;
+ unsigned int old_page_shift = mr->page_shift;
+ unsigned int page_shift;
+ unsigned long page_size;
+ u32 xlt_flags = 0;
+ int err;
+
+ if (flags & MLX5_PF_FLAGS_ENABLE)
+ xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err) {
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+ return err;
+ }
+
+ page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf, access_mode);
+ if (!page_size) {
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ err = -EINVAL;
+ } else {
+ page_shift = order_base_2(page_size);
+ if (page_shift != mr->page_shift && mr->dmabuf_faulted) {
+ err = mlx5r_umr_dmabuf_update_pgsz(mr, xlt_flags,
+ page_shift);
+ } else {
+ mr->page_shift = page_shift;
+ if (mr->data_direct)
+ err = mlx5r_umr_update_data_direct_ksm_pas(
+ mr, xlt_flags);
+ else
+ err = mlx5r_umr_update_mr_pas(mr,
+ xlt_flags);
+ }
+ }
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
- if (mr->umem->writable)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
- npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt,
- access_mask, current_seq);
- if (npages < 0) {
- ret = npages;
- goto srcu_unlock;
+ if (err) {
+ mr->page_shift = old_page_shift;
+ return err;
}
- if (npages > 0) {
- mutex_lock(&mr->umem->odp_data->umem_mutex);
- if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
+ mr->dmabuf_faulted = 1;
+
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+
+ return ib_umem_num_pages(mr->umem);
+}
+
+/*
+ * Returns:
+ * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
+ * not accessible, or the MR is no longer valid.
+ * -EAGAIN/-ENOMEM: The operation should be retried
+ *
+ * -EINVAL/others: General internal malfunction
+ * >0: Number of pages mapped
+ */
+static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped, u32 flags, bool permissive_fault)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault)
+ return -EFAULT;
+
+ if (mr->umem->is_dmabuf)
+ return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
+
+ if (!odp->is_implicit_odp) {
+ u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova;
+ u64 user_va;
+
+ if (check_add_overflow(offset, (u64)odp->umem.address,
+ &user_va))
+ return -EFAULT;
+
+ if (permissive_fault) {
+ if (user_va < ib_umem_start(odp))
+ user_va = ib_umem_start(odp);
+ if ((user_va + bcnt) > ib_umem_end(odp))
+ bcnt = ib_umem_end(odp) - user_va;
+ } else if (unlikely(user_va >= ib_umem_end(odp) ||
+ ib_umem_end(odp) - user_va < bcnt))
+ return -EFAULT;
+ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
+ flags);
+ }
+ return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+}
+
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
+{
+ int ret;
+
+ ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
+ mr->umem->length, NULL,
+ MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
+ return ret >= 0 ? 0 : ret;
+}
+
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ int ret;
+
+ ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
+ MLX5_PF_FLAGS_ENABLE);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+struct pf_frame {
+ struct pf_frame *next;
+ u32 key;
+ u64 io_virt;
+ size_t bcnt;
+ int depth;
+};
+
+static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
+{
+ if (!mmkey)
+ return false;
+ if (mmkey->type == MLX5_MKEY_MW ||
+ mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
+ return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
+ return mmkey->key == key;
+}
+
+static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key)
+{
+ struct mlx5_ib_mkey *mmkey;
+
+ xa_lock(&dev->odp_mkeys);
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ mmkey = ERR_PTR(-ENOENT);
+ goto out;
+ }
+ if (!mkey_is_eq(mmkey, key)) {
+ mmkey = ERR_PTR(-EFAULT);
+ goto out;
+ }
+ refcount_inc(&mmkey->usecount);
+out:
+ xa_unlock(&dev->odp_mkeys);
+
+ return mmkey;
+}
+
+/*
+ * Handle a single data segment in a page-fault WQE or RDMA region.
+ *
+ * Returns zero on success. The caller may continue to the next data segment.
+ * Can return the following error codes:
+ * -EAGAIN to designate a temporary error. The caller will abort handling the
+ * page fault and resolve it.
+ * -EFAULT when there's an error mapping the requested pages. The caller will
+ * abort the page fault handling.
+ */
+static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ struct ib_pd *pd, u32 key,
+ u64 io_virt, size_t bcnt,
+ u32 *bytes_committed,
+ u32 *bytes_mapped)
+{
+ int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
+ struct pf_frame *head = NULL, *frame;
+ struct mlx5_ib_mkey *mmkey;
+ struct mlx5_ib_mr *mr;
+ struct mlx5_klm *pklm;
+ u32 *out = NULL;
+ size_t offset;
+
+ io_virt += *bytes_committed;
+ bcnt -= *bytes_committed;
+next_mr:
+ mmkey = find_odp_mkey(dev, key);
+ if (IS_ERR(mmkey)) {
+ ret = PTR_ERR(mmkey);
+ if (ret == -ENOENT) {
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
/*
- * No need to check whether the MTTs really belong to
- * this MR, since ib_umem_odp_map_dma_pages already
- * checks this.
+ * The user could specify a SGL with multiple lkeys and
+ * only some of them are ODP. Treat the non-ODP ones as
+ * fully faulted.
*/
- ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0);
- } else {
- ret = -EAGAIN;
+ ret = 0;
}
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
- if (ret < 0) {
- if (ret != -EAGAIN)
- pr_err("Failed to update mkey page tables\n");
- goto srcu_unlock;
+ goto end;
+ }
+
+ switch (mmkey->type) {
+ case MLX5_MKEY_MR:
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+ pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
+ (io_virt & PAGE_MASK)) >>
+ PAGE_SHIFT;
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
+ if (ret < 0)
+ goto end;
+
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
+
+ if (ret < pages_in_range) {
+ ret = -EFAULT;
+ goto end;
}
- if (bytes_mapped) {
- u32 new_mappings = npages * PAGE_SIZE -
- (io_virt - round_down(io_virt, PAGE_SIZE));
- *bytes_mapped += min_t(u32, new_mappings, bcnt);
+ ret = 0;
+ break;
+
+ case MLX5_MKEY_MW:
+ case MLX5_MKEY_INDIRECT_DEVX:
+ if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
+ mlx5_ib_dbg(dev, "indirection level exceeded\n");
+ ret = -EFAULT;
+ goto end;
}
- }
-srcu_unlock:
- if (ret == -EAGAIN) {
- if (!mr->umem->odp_data->dying) {
- struct ib_umem_odp *odp_data = mr->umem->odp_data;
- unsigned long timeout =
- msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
- if (!wait_for_completion_timeout(
- &odp_data->notifier_completion,
- timeout)) {
- pr_warn("timeout waiting for mmu notifier completion\n");
+ outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
+ sizeof(*pklm) * (mmkey->ndescs - 2);
+
+ if (outlen > cur_outlen) {
+ kfree(out);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out) {
+ ret = -ENOMEM;
+ goto end;
}
- } else {
- /* The MR is being killed, kill the QP as well. */
- ret = -EFAULT;
+ cur_outlen = outlen;
}
+
+ pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
+ bsf0_klm0_pas_mtt0_1);
+
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
+ if (ret)
+ goto end;
+
+ offset = io_virt - MLX5_GET64(query_mkey_out, out,
+ memory_key_mkey_entry.start_addr);
+
+ for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
+ if (offset >= be32_to_cpu(pklm->bcount)) {
+ offset -= be32_to_cpu(pklm->bcount);
+ continue;
+ }
+
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ frame->key = be32_to_cpu(pklm->key);
+ frame->io_virt = be64_to_cpu(pklm->va) + offset;
+ frame->bcnt = min_t(size_t, bcnt,
+ be32_to_cpu(pklm->bcount) - offset);
+ frame->depth = depth + 1;
+ frame->next = head;
+ head = frame;
+
+ bcnt -= frame->bcnt;
+ offset = 0;
+ }
+ break;
+
+ default:
+ mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
+ ret = -EFAULT;
+ goto end;
+ }
+
+ if (head) {
+ frame = head;
+ head = frame->next;
+
+ key = frame->key;
+ io_virt = frame->io_virt;
+ bcnt = frame->bcnt;
+ depth = frame->depth;
+ kfree(frame);
+
+ mlx5r_deref_odp_mkey(mmkey);
+ goto next_mr;
+ }
+
+end:
+ if (!IS_ERR(mmkey))
+ mlx5r_deref_odp_mkey(mmkey);
+ while (head) {
+ frame = head;
+ head = frame->next;
+ kfree(frame);
}
- srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
- pfault->mpfault.bytes_committed = 0;
- return ret ? ret : npages;
+ kfree(out);
+
+ *bytes_committed = 0;
+ return ret;
}
-/**
+/*
* Parse a series of data segments for page fault handling.
*
- * @qp the QP on which the fault occurred.
- * @pfault contains page fault information.
- * @wqe points at the first data segment in the WQE.
- * @wqe_end points after the end of the WQE.
- * @bytes_mapped receives the number of bytes that the function was able to
- * map. This allows the caller to decide intelligently whether
- * enough memory was mapped to resolve the page fault
- * successfully (e.g. enough for the next MTU, or the entire
- * WQE).
- * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
- * the committed bytes).
+ * @dev: Pointer to mlx5 IB device
+ * @pfault: contains page fault information.
+ * @wqe: points at the first data segment in the WQE.
+ * @wqe_end: points after the end of the WQE.
+ * @bytes_mapped: receives the number of bytes that the function was able to
+ * map. This allows the caller to decide intelligently whether
+ * enough memory was mapped to resolve the page fault
+ * successfully (e.g. enough for the next MTU, or the entire
+ * WQE).
+ * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
+ * the committed bytes).
+ * @receive_queue: receive WQE end of sg list
*
- * Returns the number of pages loaded if positive, zero for an empty WQE, or a
- * negative error code.
+ * Returns zero for success or a negative error code.
*/
-static int pagefault_data_segments(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault, void *wqe,
+static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault,
+ void *wqe,
void *wqe_end, u32 *bytes_mapped,
- u32 *total_wqe_bytes, int receive_queue)
+ u32 *total_wqe_bytes, bool receive_queue)
{
- int ret = 0, npages = 0;
+ int ret = 0;
u64 io_virt;
- u32 key;
+ __be32 key;
u32 byte_count;
size_t bcnt;
int inline_segment;
- /* Skip SRQ next-WQE segment. */
- if (receive_queue && qp->ibqp.srq)
- wqe += sizeof(struct mlx5_wqe_srq_next_seg);
-
if (bytes_mapped)
*bytes_mapped = 0;
if (total_wqe_bytes)
@@ -332,7 +1188,7 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
struct mlx5_wqe_data_seg *dseg = wqe;
io_virt = be64_to_cpu(dseg->addr);
- key = be32_to_cpu(dseg->lkey);
+ key = dseg->lkey;
byte_count = be32_to_cpu(dseg->byte_count);
inline_segment = !!(byte_count & MLX5_INLINE_SEG);
bcnt = byte_count & ~MLX5_INLINE_SEG;
@@ -346,34 +1202,36 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
}
/* receive WQE end of sg list. */
- if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
+ if (receive_queue && bcnt == 0 &&
+ key == dev->mkeys.terminate_scatter_list_mkey &&
io_virt == 0)
break;
if (!inline_segment && total_wqe_bytes) {
*total_wqe_bytes += bcnt - min_t(size_t, bcnt,
- pfault->mpfault.bytes_committed);
+ pfault->bytes_committed);
}
/* A zero length data segment designates a length of 2GB. */
if (bcnt == 0)
bcnt = 1U << 31;
- if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) {
- pfault->mpfault.bytes_committed -=
+ if (inline_segment || bcnt <= pfault->bytes_committed) {
+ pfault->bytes_committed -=
min_t(size_t, bcnt,
- pfault->mpfault.bytes_committed);
+ pfault->bytes_committed);
continue;
}
- ret = pagefault_single_data_segment(qp, pfault, key, io_virt,
- bcnt, bytes_mapped);
+ ret = pagefault_single_data_segment(dev, NULL, be32_to_cpu(key),
+ io_virt, bcnt,
+ &pfault->bytes_committed,
+ bytes_mapped);
if (ret < 0)
break;
- npages += ret;
}
- return ret < 0 ? ret : npages;
+ return ret;
}
/*
@@ -381,16 +1239,14 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
* scatter-gather list, and set wqe_end to the end of the WQE.
*/
static int mlx5_ib_mr_initiator_pfault_handler(
- struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
- void **wqe, void **wqe_end, int wqe_length)
+ struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
+ struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
- u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ u16 wqe_index = pfault->wqe.wqe_index;
+ struct mlx5_base_av *av;
unsigned ds, opcode;
-#if defined(DEBUG)
- u32 ctrl_wqe_index, ctrl_qpn;
-#endif
+ u32 qpn = qp->trans_qp.base.mqp.qpn;
ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
@@ -401,104 +1257,73 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (ds == 0) {
mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
- wqe_index, qp->mqp.qpn);
- return -EFAULT;
- }
-
-#if defined(DEBUG)
- ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
- MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
- MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
- if (wqe_index != ctrl_wqe_index) {
- mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
- wqe_index, qp->mqp.qpn,
- ctrl_wqe_index);
- return -EFAULT;
- }
-
- ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
- MLX5_WQE_CTRL_QPN_SHIFT;
- if (qp->mqp.qpn != ctrl_qpn) {
- mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
- wqe_index, qp->mqp.qpn,
- ctrl_qpn);
+ wqe_index, qpn);
return -EFAULT;
}
-#endif /* DEBUG */
*wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
*wqe += sizeof(*ctrl);
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK;
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- switch (opcode) {
- case MLX5_OPCODE_SEND:
- case MLX5_OPCODE_SEND_IMM:
- case MLX5_OPCODE_SEND_INVAL:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_SEND))
- goto invalid_transport_or_opcode;
- break;
- case MLX5_OPCODE_RDMA_WRITE:
- case MLX5_OPCODE_RDMA_WRITE_IMM:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_WRITE))
- goto invalid_transport_or_opcode;
- *wqe += sizeof(struct mlx5_wqe_raddr_seg);
- break;
- case MLX5_OPCODE_RDMA_READ:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_READ))
- goto invalid_transport_or_opcode;
- *wqe += sizeof(struct mlx5_wqe_raddr_seg);
- break;
- default:
- goto invalid_transport_or_opcode;
- }
+
+ if (qp->type == IB_QPT_XRC_INI)
+ *wqe += sizeof(struct mlx5_wqe_xrc_seg);
+
+ if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
+ av = *wqe;
+ if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
+ *wqe += sizeof(struct mlx5_av);
+ else
+ *wqe += sizeof(struct mlx5_base_av);
+ }
+
+ switch (opcode) {
+ case MLX5_OPCODE_RDMA_WRITE:
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ case MLX5_OPCODE_RDMA_READ:
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
break;
- case IB_QPT_UD:
- switch (opcode) {
- case MLX5_OPCODE_SEND:
- case MLX5_OPCODE_SEND_IMM:
- if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
- IB_ODP_SUPPORT_SEND))
- goto invalid_transport_or_opcode;
- *wqe += sizeof(struct mlx5_wqe_datagram_seg);
- break;
- default:
- goto invalid_transport_or_opcode;
- }
+ case MLX5_OPCODE_ATOMIC_CS:
+ case MLX5_OPCODE_ATOMIC_FA:
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ *wqe += sizeof(struct mlx5_wqe_atomic_seg);
break;
- default:
-invalid_transport_or_opcode:
- mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
- qp->ibqp.qp_type, opcode);
- return -EFAULT;
}
return 0;
}
/*
- * Parse responder WQE. Advances the wqe pointer to point at the
- * scatter-gather list, and set wqe_end to the end of the WQE.
+ * Parse responder WQE and set wqe_end to the end of the WQE.
*/
-static int mlx5_ib_mr_responder_pfault_handler(
- struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
- void **wqe, void **wqe_end, int wqe_length)
+static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_srq *srq,
+ void **wqe, void **wqe_end,
+ int wqe_length)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
- struct mlx5_ib_wq *wq = &qp->rq;
- int wqe_size = 1 << wq->wqe_shift;
+ int wqe_size = 1 << srq->msrq.wqe_shift;
- if (qp->ibqp.srq) {
- mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
+ if (wqe_size > wqe_length) {
+ mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
return -EFAULT;
}
- if (qp->wq_sig) {
+ *wqe_end = *wqe + wqe_size;
+ *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
+
+ return 0;
+}
+
+static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ void *wqe, void **wqe_end,
+ int wqe_length)
+{
+ struct mlx5_ib_wq *wq = &qp->rq;
+ int wqe_size = 1 << wq->wqe_shift;
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
return -EFAULT;
}
@@ -508,104 +1333,148 @@ static int mlx5_ib_mr_responder_pfault_handler(
return -EFAULT;
}
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_RECV))
- goto invalid_transport_or_opcode;
+ *wqe_end = wqe + wqe_size;
+
+ return 0;
+}
+
+static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
+ u32 wq_num, int pf_type)
+{
+ struct mlx5_core_rsc_common *common = NULL;
+ struct mlx5_core_srq *srq;
+
+ switch (pf_type) {
+ case MLX5_WQE_PF_TYPE_RMP:
+ srq = mlx5_cmd_get_srq(dev, wq_num);
+ if (srq)
+ common = &srq->common;
+ break;
+ case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
+ case MLX5_WQE_PF_TYPE_RESP:
+ case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
+ common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
break;
default:
-invalid_transport_or_opcode:
- mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
- qp->ibqp.qp_type);
- return -EFAULT;
+ break;
}
- *wqe_end = *wqe + wqe_size;
+ return common;
+}
- return 0;
+static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
+{
+ struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
+
+ return to_mibqp(mqp);
}
-static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault)
+static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
- int ret;
- void *wqe, *wqe_end;
- u32 bytes_mapped, total_wqe_bytes;
- char *buffer = NULL;
- int resume_with_error = 0;
- u16 wqe_index = pfault->mpfault.wqe.wqe_index;
- int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
+ struct mlx5_core_srq *msrq =
+ container_of(res, struct mlx5_core_srq, common);
- buffer = (char *)__get_free_page(GFP_KERNEL);
- if (!buffer) {
- mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
- resume_with_error = 1;
- goto resolve_page_fault;
+ return to_mibsrq(msrq);
+}
+
+static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
+{
+ bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
+ u16 wqe_index = pfault->wqe.wqe_index;
+ void *wqe, *wqe_start = NULL, *wqe_end = NULL;
+ u32 bytes_mapped, total_wqe_bytes;
+ struct mlx5_core_rsc_common *res;
+ int resume_with_error = 1;
+ struct mlx5_ib_qp *qp;
+ size_t bytes_copied;
+ int ret = 0;
+
+ res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
+ if (!res) {
+ mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
+ return;
}
- ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
- PAGE_SIZE);
- if (ret < 0) {
- mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
- -ret, wqe_index, qp->mqp.qpn);
- resume_with_error = 1;
+ if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
+ res->res != MLX5_RES_XSRQ) {
+ mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
+ pfault->type);
goto resolve_page_fault;
}
- wqe = buffer;
- if (requestor)
- ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe,
- &wqe_end, ret);
- else
- ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe,
- &wqe_end, ret);
- if (ret < 0) {
- resume_with_error = 1;
+ wqe_start = (void *)__get_free_page(GFP_KERNEL);
+ if (!wqe_start) {
+ mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
goto resolve_page_fault;
}
- if (wqe >= wqe_end) {
- mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
- resume_with_error = 1;
- goto resolve_page_fault;
+ wqe = wqe_start;
+ qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
+ if (qp && sq) {
+ ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_initiator_pfault_handler(
+ dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
+ } else if (qp && !sq) {
+ ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_responder_pfault_handler_rq(
+ dev, qp, wqe, &wqe_end, bytes_copied);
+ } else if (!qp) {
+ struct mlx5_ib_srq *srq = res_to_srq(res);
+
+ ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_responder_pfault_handler_srq(
+ dev, srq, &wqe, &wqe_end, bytes_copied);
}
- ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped,
- &total_wqe_bytes, !requestor);
- if (ret == -EAGAIN) {
+ if (ret < 0 || wqe >= wqe_end)
goto resolve_page_fault;
- } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
- mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n",
- -ret);
- resume_with_error = 1;
+
+ ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
+ &total_wqe_bytes, !sq);
+ if (ret == -EAGAIN)
+ goto out;
+
+ if (ret < 0 || total_wqe_bytes > bytes_mapped)
goto resolve_page_fault;
- }
-resolve_page_fault:
- mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
- mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
- qp->mqp.qpn, resume_with_error, pfault->mpfault.flags);
+out:
+ ret = 0;
+ resume_with_error = 0;
- free_page((unsigned long)buffer);
-}
+read_user:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %llx\n",
+ ret, wqe_index, pfault->token);
-static int pages_in_range(u64 address, u32 length)
-{
- return (ALIGN(address + length, PAGE_SIZE) -
- (address & PAGE_MASK)) >> PAGE_SHIFT;
+resolve_page_fault:
+ mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
+ pfault->wqe.wq_num, resume_with_error,
+ pfault->type);
+ mlx5_core_res_put(res);
+ free_page((unsigned long)wqe_start);
}
-static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault)
+static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
{
- struct mlx5_pagefault *mpfault = &pfault->mpfault;
u64 address;
u32 length;
- u32 prefetch_len = mpfault->bytes_committed;
+ u32 prefetch_len = pfault->bytes_committed;
int prefetch_activated = 0;
- u32 rkey = mpfault->rdma.r_key;
+ u32 rkey = pfault->rdma.r_key;
int ret;
/* The RDMA responder handler handles the page fault in two parts.
@@ -614,38 +1483,40 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
* prefetches more pages. The second operation cannot use the pfault
* context and therefore uses the dummy_pfault context allocated on
* the stack */
- struct mlx5_ib_pfault dummy_pfault = {};
-
- dummy_pfault.mpfault.bytes_committed = 0;
-
- mpfault->rdma.rdma_va += mpfault->bytes_committed;
- mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed,
- mpfault->rdma.rdma_op_len);
- mpfault->bytes_committed = 0;
+ pfault->rdma.rdma_va += pfault->bytes_committed;
+ pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
+ pfault->rdma.rdma_op_len);
+ pfault->bytes_committed = 0;
- address = mpfault->rdma.rdma_va;
- length = mpfault->rdma.rdma_op_len;
+ address = pfault->rdma.rdma_va;
+ length = pfault->rdma.rdma_op_len;
/* For some operations, the hardware cannot tell the exact message
* length, and in those cases it reports zero. Use prefetch
* logic. */
if (length == 0) {
prefetch_activated = 1;
- length = mpfault->rdma.packet_size;
+ length = pfault->rdma.packet_size;
prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
}
- ret = pagefault_single_data_segment(qp, pfault, rkey, address, length,
- NULL);
+ ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
+ &pfault->bytes_committed, NULL);
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
- } else if (ret < 0 || pages_in_range(address, length) > ret) {
- mlx5_ib_page_fault_resume(qp, pfault, 1);
+ } else if (ret < 0) {
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
+ if (ret != -ENOENT)
+ mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
+ ret, pfault->token, pfault->type);
return;
}
- mlx5_ib_page_fault_resume(qp, pfault, 0);
+ mlx5_ib_page_fault_resume(dev, pfault, 0);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%llx, type: 0x%x, prefetch_activated: %d\n",
+ pfault->token, pfault->type,
+ prefetch_activated);
/* At this point, there might be a new pagefault already arriving in
* the eq, switch to the dummy pagefault for the rest of the
@@ -653,139 +1524,575 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
* work-queue is being fenced. */
if (prefetch_activated) {
- ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey,
- address,
+ u32 bytes_committed = 0;
+
+ ret = pagefault_single_data_segment(dev, NULL, rkey, address,
prefetch_len,
- NULL);
- if (ret < 0) {
- pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
- ret, prefetch_activated,
- qp->ibqp.qp_num, address, prefetch_len);
+ &bytes_committed, NULL);
+ if (ret < 0 && ret != -EAGAIN) {
+ mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%llx, address: 0x%.16llx, length = 0x%.16x\n",
+ ret, pfault->token, address, prefetch_len);
}
}
}
-void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault)
+#define MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST BIT(7)
+static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
{
- u8 event_subtype = pfault->mpfault.event_subtype;
+ u64 prefetch_va =
+ pfault->memory.va - pfault->memory.prefetch_before_byte_count;
+ size_t prefetch_size = pfault->memory.prefetch_before_byte_count +
+ pfault->memory.fault_byte_count +
+ pfault->memory.prefetch_after_byte_count;
+ struct mlx5_ib_mkey *mmkey;
+ struct mlx5_ib_mr *mr, *child_mr;
+ int ret = 0;
+
+ mmkey = find_odp_mkey(dev, pfault->memory.mkey);
+ if (IS_ERR(mmkey))
+ goto err;
+
+ switch (mmkey->type) {
+ case MLX5_MKEY_IMPLICIT_CHILD:
+ child_mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ mr = child_mr->parent;
+ break;
+ case MLX5_MKEY_NULL:
+ mr = container_of(mmkey, struct mlx5_ib_mr, null_mmkey);
+ break;
+ default:
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ break;
+ }
+
+ /* If prefetch fails, handle only demanded page fault */
+ ret = pagefault_mr(mr, prefetch_va, prefetch_size, NULL, 0, true);
+ if (ret < 0) {
+ ret = pagefault_mr(mr, pfault->memory.va,
+ pfault->memory.fault_byte_count, NULL, 0,
+ true);
+ if (ret < 0)
+ goto err;
+ }
+
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
+ mlx5r_deref_odp_mkey(mmkey);
+
+ if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)
+ mlx5_ib_page_fault_resume(dev, pfault, 0);
+
+ mlx5_ib_dbg(
+ dev,
+ "PAGE FAULT completed %s. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x\n",
+ pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST ?
+ "" :
+ "without resume cmd",
+ pfault->token, pfault->memory.mkey, pfault->memory.va,
+ pfault->memory.fault_byte_count);
+
+ return;
+
+err:
+ if (!IS_ERR(mmkey))
+ mlx5r_deref_odp_mkey(mmkey);
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
+ mlx5_ib_dbg(
+ dev,
+ "PAGE FAULT error. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x, err: %d\n",
+ pfault->token, pfault->memory.mkey, pfault->memory.va,
+ pfault->memory.fault_byte_count, ret);
+}
+
+static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
+{
+ u8 event_subtype = pfault->event_subtype;
switch (event_subtype) {
case MLX5_PFAULT_SUBTYPE_WQE:
- mlx5_ib_mr_wqe_pfault_handler(qp, pfault);
+ mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
break;
case MLX5_PFAULT_SUBTYPE_RDMA:
- mlx5_ib_mr_rdma_pfault_handler(qp, pfault);
+ mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
break;
- default:
- pr_warn("Invalid page fault event subtype: 0x%x\n",
- event_subtype);
- mlx5_ib_page_fault_resume(qp, pfault, 1);
+ case MLX5_PFAULT_SUBTYPE_MEMORY:
+ mlx5_ib_mr_memory_pfault_handler(dev, pfault);
break;
+ default:
+ mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
+ event_subtype);
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
}
}
-static void mlx5_ib_qp_pfault_action(struct work_struct *work)
+static void mlx5_ib_eqe_pf_action(struct work_struct *work)
{
- struct mlx5_ib_pfault *pfault = container_of(work,
- struct mlx5_ib_pfault,
+ struct mlx5_pagefault *pfault = container_of(work,
+ struct mlx5_pagefault,
work);
- enum mlx5_ib_pagefault_context context =
- mlx5_ib_get_pagefault_context(&pfault->mpfault);
- struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
- pagefaults[context]);
- mlx5_ib_mr_pfault_handler(qp, pfault);
+ struct mlx5_ib_pf_eq *eq = pfault->eq;
+
+ mlx5_ib_pfault(eq->dev, pfault);
+ mempool_free(pfault, eq->pool);
}
-void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
+static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
{
- unsigned long flags;
+ struct mlx5_eqe_page_fault *pf_eqe;
+ struct mlx5_pagefault *pfault;
+ struct mlx5_eqe *eqe;
+ int cc = 0;
+
+ while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
+ pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
+ if (!pfault) {
+ schedule_work(&eq->work);
+ break;
+ }
- spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
- qp->disable_page_faults = 1;
- spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+ pf_eqe = &eqe->data.page_fault;
+ pfault->event_subtype = eqe->sub_type;
+
+ switch (eqe->sub_type) {
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ /* RDMA based event */
+ pfault->bytes_committed =
+ be32_to_cpu(pf_eqe->rdma.bytes_committed);
+ pfault->type =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
+ pfault->token =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) &
+ MLX5_24BIT_MASK;
+ pfault->rdma.r_key =
+ be32_to_cpu(pf_eqe->rdma.r_key);
+ pfault->rdma.packet_size =
+ be16_to_cpu(pf_eqe->rdma.packet_length);
+ pfault->rdma.rdma_op_len =
+ be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+ pfault->rdma.rdma_va =
+ be64_to_cpu(pf_eqe->rdma.rdma_va);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, r_key: 0x%08x\n",
+ eqe->sub_type, pfault->bytes_committed,
+ pfault->type, pfault->token,
+ pfault->rdma.r_key);
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
+ pfault->rdma.rdma_op_len,
+ pfault->rdma.rdma_va);
+ break;
- /*
- * Note that at this point, we are guarenteed that no more
- * work queue elements will be posted to the work queue with
- * the QP we are closing.
- */
- flush_workqueue(mlx5_ib_page_fault_wq);
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ /* WQE based event */
+ pfault->bytes_committed =
+ be32_to_cpu(pf_eqe->wqe.bytes_committed);
+ pfault->type =
+ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
+ pfault->token =
+ be32_to_cpu(pf_eqe->wqe.token);
+ pfault->wqe.wq_num =
+ be32_to_cpu(pf_eqe->wqe.pftype_wq) &
+ MLX5_24BIT_MASK;
+ pfault->wqe.wqe_index =
+ be16_to_cpu(pf_eqe->wqe.wqe_index);
+ pfault->wqe.packet_size =
+ be16_to_cpu(pf_eqe->wqe.packet_length);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+ eqe->sub_type, pfault->bytes_committed,
+ pfault->type, pfault->token, pfault->wqe.wq_num,
+ pfault->wqe.wqe_index);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_MEMORY:
+ /* Memory based event */
+ pfault->bytes_committed = 0;
+ pfault->token =
+ be32_to_cpu(pf_eqe->memory.token31_0) |
+ ((u64)be16_to_cpu(pf_eqe->memory.token47_32)
+ << 32);
+ pfault->memory.va = be64_to_cpu(pf_eqe->memory.va);
+ pfault->memory.mkey = be32_to_cpu(pf_eqe->memory.mkey);
+ pfault->memory.fault_byte_count = (be32_to_cpu(
+ pf_eqe->memory.demand_fault_pages) >> 12) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.prefetch_before_byte_count =
+ be16_to_cpu(
+ pf_eqe->memory.pre_demand_fault_pages) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.prefetch_after_byte_count =
+ be16_to_cpu(
+ pf_eqe->memory.post_demand_fault_pages) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.flags = pf_eqe->memory.flags;
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, token: 0x%06llx, mkey: 0x%06x, fault_byte_count: 0x%06x, va: 0x%016llx, flags: 0x%02x\n",
+ eqe->sub_type, pfault->token,
+ pfault->memory.mkey,
+ pfault->memory.fault_byte_count,
+ pfault->memory.va, pfault->memory.flags);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: prefetch size: before: 0x%06x, after 0x%06x\n",
+ pfault->memory.prefetch_before_byte_count,
+ pfault->memory.prefetch_after_byte_count);
+ break;
+
+ default:
+ mlx5_ib_warn(eq->dev,
+ "Unsupported page fault event sub-type: 0x%02hhx\n",
+ eqe->sub_type);
+ /* Unsupported page faults should still be
+ * resolved by the page fault handler
+ */
+ }
+
+ pfault->eq = eq;
+ INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
+ queue_work(eq->wq, &pfault->work);
+
+ cc = mlx5_eq_update_cc(eq->core, ++cc);
+ }
+
+ mlx5_eq_update_ci(eq->core, cc, 1);
}
-void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
+static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
+ void *data)
{
+ struct mlx5_ib_pf_eq *eq =
+ container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
unsigned long flags;
- spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
- qp->disable_page_faults = 0;
- spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+ if (spin_trylock_irqsave(&eq->lock, flags)) {
+ mlx5_ib_eq_pf_process(eq);
+ spin_unlock_irqrestore(&eq->lock, flags);
+ } else {
+ schedule_work(&eq->work);
+ }
+
+ return IRQ_HANDLED;
}
-static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
- struct mlx5_pagefault *pfault)
+/* mempool_refill() was proposed but unfortunately wasn't accepted
+ * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
+ * Cheap workaround.
+ */
+static void mempool_refill(mempool_t *pool)
{
- /*
- * Note that we will only get one fault event per QP per context
- * (responder/initiator, read/write), until we resolve the page fault
- * with the mlx5_ib_page_fault_resume command. Since this function is
- * called from within the work element, there is no risk of missing
- * events.
- */
- struct mlx5_ib_qp *mibqp = to_mibqp(qp);
- enum mlx5_ib_pagefault_context context =
- mlx5_ib_get_pagefault_context(pfault);
- struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
+ while (pool->curr_nr < pool->min_nr)
+ mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
+}
+
+static void mlx5_ib_eq_pf_action(struct work_struct *work)
+{
+ struct mlx5_ib_pf_eq *eq =
+ container_of(work, struct mlx5_ib_pf_eq, work);
- qp_pfault->mpfault = *pfault;
+ mempool_refill(eq->pool);
- /* No need to stop interrupts here since we are in an interrupt */
- spin_lock(&mibqp->disable_page_faults_lock);
- if (!mibqp->disable_page_faults)
- queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
- spin_unlock(&mibqp->disable_page_faults_lock);
+ spin_lock_irq(&eq->lock);
+ mlx5_ib_eq_pf_process(eq);
+ spin_unlock_irq(&eq->lock);
}
-void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
+enum {
+ MLX5_IB_NUM_PF_EQE = 0x1000,
+ MLX5_IB_NUM_PF_DRAIN = 64,
+};
+
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{
- int i;
+ struct mlx5_eq_param param = {};
+ int err = 0;
+
+ mutex_lock(&dev->odp_eq_mutex);
+ if (eq->core)
+ goto unlock;
+ INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
+ spin_lock_init(&eq->lock);
+ eq->dev = dev;
+
+ eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
+ sizeof(struct mlx5_pagefault));
+ if (!eq->pool) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ eq->wq = alloc_workqueue("mlx5_ib_page_fault",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
+ MLX5_NUM_CMD_EQE);
+ if (!eq->wq) {
+ err = -ENOMEM;
+ goto err_mempool;
+ }
- qp->disable_page_faults = 1;
- spin_lock_init(&qp->disable_page_faults_lock);
+ eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
+ param = (struct mlx5_eq_param) {
+ .nent = MLX5_IB_NUM_PF_EQE,
+ };
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
+ eq->core = mlx5_eq_create_generic(dev->mdev, &param);
+ if (IS_ERR(eq->core)) {
+ err = PTR_ERR(eq->core);
+ goto err_wq;
+ }
+ err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
+ if (err) {
+ mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
+ goto err_eq;
+ }
+
+ mutex_unlock(&dev->odp_eq_mutex);
+ return 0;
+err_eq:
+ mlx5_eq_destroy_generic(dev->mdev, eq->core);
+err_wq:
+ eq->core = NULL;
+ destroy_workqueue(eq->wq);
+err_mempool:
+ mempool_destroy(eq->pool);
+unlock:
+ mutex_unlock(&dev->odp_eq_mutex);
+ return err;
+}
+
+static int
+mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+{
+ int err;
- qp->mqp.pfault_handler = mlx5_ib_pfault_handler;
+ if (!eq->core)
+ return 0;
+ mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
+ err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
+ cancel_work_sync(&eq->work);
+ destroy_workqueue(eq->wq);
+ mempool_destroy(eq->pool);
- for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
- INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
+ return err;
}
-int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
+int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{
- int ret;
+ struct mlx5r_cache_rb_key rb_key = {
+ .access_mode = MLX5_MKC_ACCESS_MODE_KSM,
+ .ndescs = mlx5_imr_ksm_entries,
+ .ph = MLX5_IB_NO_PH,
+ };
+ struct mlx5_cache_ent *ent;
- ret = init_srcu_struct(&ibdev->mr_srcu);
- if (ret)
- return ret;
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return 0;
+
+ ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return 0;
+}
+static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
+ .advise_mr = mlx5_ib_advise_mr,
+};
+
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
+{
+ internal_fill_odp_caps(dev);
+
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
+ return 0;
+
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
+
+ mutex_init(&dev->odp_eq_mutex);
return 0;
}
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
+void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
+{
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
+ return;
+
+ mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
+}
+
+int mlx5_ib_odp_init(void)
{
- cleanup_srcu_struct(&ibdev->mr_srcu);
+ mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
+ MLX5_IMR_MTT_BITS);
+
+ return 0;
}
-int __init mlx5_ib_odp_init(void)
+struct prefetch_mr_work {
+ struct work_struct work;
+ u32 pf_flags;
+ u32 num_sge;
+ struct {
+ u64 io_virt;
+ struct mlx5_ib_mr *mr;
+ size_t length;
+ } frags[];
+};
+
+static void destroy_prefetch_work(struct prefetch_mr_work *work)
{
- mlx5_ib_page_fault_wq =
- create_singlethread_workqueue("mlx5_ib_page_faults");
- if (!mlx5_ib_page_fault_wq)
- return -ENOMEM;
+ u32 i;
+
+ for (i = 0; i < work->num_sge; ++i)
+ mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
+
+ kvfree(work);
+}
+
+static struct mlx5_ib_mr *
+get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 lkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ struct mlx5_ib_mkey *mmkey;
+
+ xa_lock(&dev->odp_mkeys);
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
+ if (!mmkey || mmkey->key != lkey) {
+ mr = ERR_PTR(-ENOENT);
+ goto end;
+ }
+ if (mmkey->type != MLX5_MKEY_MR) {
+ mr = ERR_PTR(-EINVAL);
+ goto end;
+ }
+
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+ if (mr->ibmr.pd != pd) {
+ mr = ERR_PTR(-EPERM);
+ goto end;
+ }
+
+ /* prefetch with write-access must be supported by the MR */
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !mr->umem->writable) {
+ mr = ERR_PTR(-EPERM);
+ goto end;
+ }
+
+ refcount_inc(&mmkey->usecount);
+end:
+ xa_unlock(&dev->odp_mkeys);
+ return mr;
+}
+
+static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ u32 bytes_mapped = 0;
+ int ret;
+ u32 i;
+
+ /* We rely on IB/core that work is executed if we have num_sge != 0 only. */
+ WARN_ON(!work->num_sge);
+ for (i = 0; i < work->num_sge; ++i) {
+ ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+ work->pf_flags, false);
+ if (ret <= 0)
+ continue;
+ mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
+ }
+
+ destroy_prefetch_work(work);
+}
+
+static int init_prefetch_work(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct prefetch_mr_work *work,
+ struct ib_sge *sg_list, u32 num_sge)
+{
+ u32 i;
+
+ INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
+
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr)) {
+ work->num_sge = i;
+ return PTR_ERR(mr);
+ }
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
+ }
+ work->num_sge = num_sge;
+ return 0;
+}
+
+static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
+{
+ u32 bytes_mapped = 0;
+ int ret = 0;
+ u32 i;
+
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+ ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
+ &bytes_mapped, pf_flags, false);
+ if (ret < 0) {
+ mlx5r_deref_odp_mkey(&mr->mmkey);
+ return ret;
+ }
+ mlx5_update_odp_stats(mr, prefetch, ret);
+ mlx5r_deref_odp_mkey(&mr->mmkey);
+ }
return 0;
}
-void mlx5_ib_odp_cleanup(void)
+int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
- destroy_workqueue(mlx5_ib_page_fault_wq);
+ u32 pf_flags = 0;
+ struct prefetch_mr_work *work;
+ int rc;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
+ pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
+
+ if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
+ return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
+ num_sge);
+
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+ if (rc) {
+ destroy_prefetch_work(work);
+ return rc;
+ }
+ queue_work(system_unbound_wq, &work->work);
+ return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c
new file mode 100644
index 000000000000..dce92554142a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qos.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static bool pp_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return (MLX5_CAP_GEN(dev->mdev, qos) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_pp *pp_entry;
+ void *in_ctx;
+ u16 uid;
+ int inlen;
+ u32 flags;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ /* The allocated entry can be used only by a DEVX context */
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ dev = to_mdev(c->ibucontext.device);
+ pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
+ if (!pp_entry)
+ return -ENOMEM;
+
+ in_ctx = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ memcpy(rl_raw, in_ctx, inlen);
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
+ if (err)
+ goto err;
+
+ uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
+ c->devx_uid : MLX5_SHARED_RESOURCE_UID;
+
+ err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
+ (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
+ &pp_entry->index);
+ if (err)
+ goto err;
+
+ pp_entry->mdev = dev->mdev;
+ uobj->object = pp_entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ &pp_entry->index, sizeof(pp_entry->index));
+ return err;
+
+err:
+ kfree(pp_entry);
+ return err;
+}
+
+static int pp_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_pp *pp_entry = uobject->object;
+
+ mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
+ kfree(pp_entry);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_PP_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
+ UVERBS_ATTR_SIZE(1,
+ MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ enum mlx5_ib_uapi_pp_alloc_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_PP_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
+ UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
+
+
+const struct uapi_definition mlx5_ib_qos_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_PP,
+ UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c745c6c5e10d..88724d15705d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -30,13 +30,19 @@
* SOFTWARE.
*/
-#include <linux/module.h>
+#include <linux/etherdevice.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/rdma_counter.h>
+#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
-#include "user.h"
-
-/* not supported currently */
-static int wq_signature;
+#include "ib_rep.h"
+#include "counters.h"
+#include "cmd.h"
+#include "umr.h"
+#include "qp.h"
+#include "wr.h"
enum {
MLX5_IB_ACK_REQ_FREQ = 8,
@@ -49,27 +55,37 @@ enum {
MLX5_IB_LINK_TYPE_ETH = 1
};
+enum raw_qp_set_mask_map {
+ MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
+ MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
+};
+
enum {
- MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
+ MLX5_QP_RM_GO_BACK_N = 0x1,
+};
+
+struct mlx5_modify_raw_qp_param {
+ u16 operation;
+
+ u32 set_mask; /* raw_qp_set_mask_map */
+
+ struct mlx5_rate_limit rl;
+
+ u8 rq_q_ctr_id;
+ u32 port;
};
-static const u32 mlx5_ib_opcode[] = {
- [IB_WR_SEND] = MLX5_OPCODE_SEND,
- [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
- [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
- [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
- [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
- [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
- [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
- [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
- [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
- [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
+struct mlx5_ib_qp_event_work {
+ struct work_struct work;
+ struct mlx5_core_qp *qp;
+ int type;
};
+static struct workqueue_struct *mlx5_ib_qp_event_wq;
+
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
+ struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
static int is_qp0(enum ib_qp_type qp_type)
{
@@ -81,135 +97,337 @@ static int is_sqp(enum ib_qp_type qp_type)
return is_qp0(qp_type) || is_qp1(qp_type);
}
-static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
+/**
+ * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
+ * to kernel buffer
+ *
+ * @umem: User space memory where the WQ is
+ * @buffer: buffer to copy to
+ * @buflen: buffer length
+ * @wqe_index: index of WQE to copy from
+ * @wq_offset: offset to start of WQ
+ * @wq_wqe_cnt: number of WQEs in WQ
+ * @wq_wqe_shift: log2 of WQE size
+ * @bcnt: number of bytes to copy
+ * @bytes_copied: number of bytes to copy (return value)
+ *
+ * Copies from start of WQE bcnt or less bytes.
+ * Does not gurantee to copy the entire WQE.
+ *
+ * Return: zero on success, or an error code.
+ */
+static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
+ size_t buflen, int wqe_index,
+ int wq_offset, int wq_wqe_cnt,
+ int wq_wqe_shift, int bcnt,
+ size_t *bytes_copied)
{
- return mlx5_buf_offset(&qp->buf, offset);
-}
+ size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
+ size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
+ size_t copy_length;
+ int ret;
-static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
-{
- return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
+ /* don't copy more than requested, more than buffer length or
+ * beyond WQ end
+ */
+ copy_length = min_t(u32, buflen, wq_end - offset);
+ copy_length = min_t(u32, copy_length, bcnt);
+
+ ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
+ if (ret)
+ return ret;
+
+ if (!ret && bytes_copied)
+ *bytes_copied = copy_length;
+
+ return 0;
}
-void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
+static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
- return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied = 0;
+ size_t wqe_length;
+ void *p;
+ int ds;
+
+ wqe_index = wqe_index & qp->sq.fbc.sz_m1;
+
+ /* read the control segment first */
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ ctrl = p;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* read rest of WQE if it spreads over more than one stride */
+ while (bytes_copied < wqe_length) {
+ size_t copy_length =
+ min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
+
+ if (!copy_length)
+ break;
+
+ memcpy(buffer + bytes_copied, p, copy_length);
+ bytes_copied += copy_length;
+
+ wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ }
+ *bc = bytes_copied;
+ return 0;
}
-/**
- * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
- *
- * @qp: QP to copy from.
- * @send: copy from the send queue when non-zero, use the receive queue
- * otherwise.
- * @wqe_index: index to start copying from. For send work queues, the
- * wqe_index is in units of MLX5_SEND_WQE_BB.
- * For receive work queue, it is the number of work queue
- * element in the queue.
- * @buffer: destination buffer.
- * @length: maximum number of bytes to copy.
- *
- * Copies at least a single WQE, but may copy more data.
- *
- * Return: the number of bytes copied, or an error code.
- */
-int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
- void *buffer, u32 length)
-{
- struct ib_device *ibdev = qp->ibqp.device;
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
- size_t offset;
- size_t wq_end;
- struct ib_umem *umem = qp->umem;
- u32 first_copy_length;
- int wqe_length;
+static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->sq;
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied;
+ size_t bytes_copied2;
+ size_t wqe_length;
int ret;
+ int ds;
- if (wq->wqe_cnt == 0) {
- mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
- qp->ibqp.qp_type);
+ /* at first read as much as possible */
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
+ &bytes_copied);
+ if (ret)
+ return ret;
+
+ /* we need at least control segment size to proceed */
+ if (bytes_copied < sizeof(*ctrl))
return -EINVAL;
+
+ ctrl = buffer;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* if we copied enough then we are done */
+ if (bytes_copied >= wqe_length) {
+ *bc = bytes_copied;
+ return 0;
}
- offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
- wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
+ /* otherwise this a wrapped around wqe
+ * so read the remaining bytes starting
+ * from wqe_index 0
+ */
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
+ buflen - bytes_copied, 0, wq->offset,
+ wq->wqe_cnt, wq->wqe_shift,
+ wqe_length - bytes_copied,
+ &bytes_copied2);
- if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
- return -EINVAL;
+ if (ret)
+ return ret;
+ *bc = bytes_copied + bytes_copied2;
+ return 0;
+}
- if (offset > umem->length ||
- (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+
+ if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
return -EINVAL;
- first_copy_length = min_t(u32, offset + length, wq_end) - offset;
- ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
+ if (!umem)
+ return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
+ buflen, bc);
+
+ return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t bytes_copied;
+ int ret;
+
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
+ &bytes_copied);
+
if (ret)
return ret;
+ *bc = bytes_copied;
+ return 0;
+}
- if (send) {
- struct mlx5_wqe_ctrl_seg *ctrl = buffer;
- int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t wqe_size = 1 << wq->wqe_shift;
- wqe_length = ds * MLX5_WQE_DS_UNITS;
- } else {
- wqe_length = 1 << wq->wqe_shift;
- }
+ if (buflen < wqe_size)
+ return -EINVAL;
- if (wqe_length <= first_copy_length)
- return first_copy_length;
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t bytes_copied;
+ int ret;
+
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
+ srq->msrq.max, srq->msrq.wqe_shift,
+ buflen, &bytes_copied);
- ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
- wqe_length - first_copy_length);
if (ret)
return ret;
+ *bc = bytes_copied;
+ return 0;
+}
+
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t wqe_size = 1 << srq->msrq.wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
+}
+
+static void mlx5_ib_qp_err_syndrome(struct ib_qp *ibqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ void *pas_ext_union, *err_syn;
+ u32 *outb;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) ||
+ !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome))
+ return;
+
+ outb = kzalloc(outlen, GFP_KERNEL);
+ if (!outb)
+ return;
+
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
+ true);
+ if (err)
+ goto out;
- return wqe_length;
+ pas_ext_union =
+ MLX5_ADDR_OF(query_qp_out, outb, qp_pas_or_qpc_ext_and_pas);
+ err_syn = MLX5_ADDR_OF(qpc_extension_and_pas_list_in, pas_ext_union,
+ qpc_data_extension.error_syndrome);
+
+ pr_err("%s/%d: QP %d error: %s (0x%x 0x%x 0x%x)\n",
+ ibqp->device->name, ibqp->port, ibqp->qp_num,
+ ib_wc_status_msg(
+ MLX5_GET(cqe_error_syndrome, err_syn, syndrome)),
+ MLX5_GET(cqe_error_syndrome, err_syn, vendor_error_syndrome),
+ MLX5_GET(cqe_error_syndrome, err_syn, hw_syndrome_type),
+ MLX5_GET(cqe_error_syndrome, err_syn, hw_error_syndrome));
+out:
+ kfree(outb);
+}
+
+static void mlx5_ib_handle_qp_event(struct work_struct *_work)
+{
+ struct mlx5_ib_qp_event_work *qpe_work =
+ container_of(_work, struct mlx5_ib_qp_event_work, work);
+ struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
+ struct ib_event event = {};
+
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (qpe_work->type) {
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX5_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n",
+ qpe_work->type, qpe_work->qp->qpn);
+ goto out;
+ }
+
+ if ((event.event == IB_EVENT_QP_FATAL) ||
+ (event.event == IB_EVENT_QP_ACCESS_ERR))
+ mlx5_ib_qp_err_syndrome(ibqp);
+
+ ibqp->event_handler(&event, ibqp->qp_context);
+
+out:
+ mlx5_core_res_put(&qpe_work->qp->common);
+ kfree(qpe_work);
}
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
- struct ib_event event;
+ struct mlx5_ib_qp_event_work *qpe_work;
- if (type == MLX5_EVENT_TYPE_PATH_MIG)
- to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
+ if (type == MLX5_EVENT_TYPE_PATH_MIG) {
+ /* This event is only valid for trans_qps */
+ to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
+ }
- if (ibqp->event_handler) {
- event.device = ibqp->device;
- event.element.qp = ibqp;
- switch (type) {
- case MLX5_EVENT_TYPE_PATH_MIG:
- event.event = IB_EVENT_PATH_MIG;
- break;
- case MLX5_EVENT_TYPE_COMM_EST:
- event.event = IB_EVENT_COMM_EST;
- break;
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- event.event = IB_EVENT_SQ_DRAINED;
- break;
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- event.event = IB_EVENT_QP_LAST_WQE_REACHED;
- break;
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- event.event = IB_EVENT_QP_FATAL;
- break;
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- event.event = IB_EVENT_PATH_MIG_ERR;
- break;
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- event.event = IB_EVENT_QP_REQ_ERR;
- break;
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- event.event = IB_EVENT_QP_ACCESS_ERR;
- break;
- default:
- pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
- return;
- }
+ if (!ibqp->event_handler)
+ goto out_no_handler;
- ibqp->event_handler(&event, ibqp->qp_context);
- }
+ qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
+ if (!qpe_work)
+ goto out_no_handler;
+
+ qpe_work->qp = qp;
+ qpe_work->type = type;
+ INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event);
+ queue_work(mlx5_ib_qp_event_wq, &qpe_work->work);
+ return;
+
+out_no_handler:
+ mlx5_core_res_put(&qp->common);
}
static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
@@ -226,14 +444,29 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
} else {
+ int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
+
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+ return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ if ((1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) <
+ wq_sig)
+ return -EINVAL;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
- wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
+ wqe_size =
+ wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
+ 0;
wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
wqe_size = roundup_pow_of_two(wqe_size);
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
@@ -247,7 +480,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
}
}
@@ -255,18 +491,22 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
return 0;
}
-static int sq_overhead(enum ib_qp_type qp_type)
+static int sq_overhead(struct ib_qp_init_attr *attr)
{
int size = 0;
- switch (qp_type) {
+ switch (attr->qp_type) {
case IB_QPT_XRC_INI:
size += sizeof(struct mlx5_wqe_xrc_seg);
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_atomic_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ max(sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg),
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
+ MLX5_IB_UMR_OCTOWORD);
break;
case IB_QPT_XRC_TGT:
@@ -274,14 +514,18 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_umr_ctrl_seg) +
- sizeof(struct mlx5_mkey_seg);
+ max(sizeof(struct mlx5_wqe_raddr_seg),
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg));
break;
case IB_QPT_UD:
+ if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ size += sizeof(struct mlx5_wqe_eth_pad) +
+ sizeof(struct mlx5_wqe_eth_seg);
+ fallthrough;
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_datagram_seg);
break;
@@ -304,7 +548,7 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
int inl_size = 0;
int size;
- size = sq_overhead(attr->qp_type);
+ size = sq_overhead(attr);
if (size < 0)
return size;
@@ -314,13 +558,36 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
}
size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
- if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
+ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
- return MLX5_SIG_WQE_SIZE;
+ return MLX5_SIG_WQE_SIZE;
else
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
}
+static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
+{
+ int max_sge;
+
+ if (attr->qp_type == IB_QPT_RC)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else if (attr->qp_type == IB_QPT_XRC_INI)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_xrc_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else
+ max_sge = (wqe_size - sq_overhead(attr)) /
+ sizeof(struct mlx5_wqe_data_seg);
+
+ return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
+ sizeof(struct mlx5_wqe_data_seg));
+}
+
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
struct mlx5_ib_qp *qp)
{
@@ -341,23 +608,25 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
return -EINVAL;
}
- qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
- sizeof(struct mlx5_wqe_inline_seg);
+ qp->max_inline_data = wqe_size - sq_overhead(attr) -
+ sizeof(struct mlx5_wqe_inline_seg);
attr->cap.max_inline_data = qp->max_inline_data;
- if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
- qp->signature_en = true;
-
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
- mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+ mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
+ attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
qp->sq.wqe_cnt,
1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
- qp->sq.max_gs = attr->cap.max_send_sge;
+ qp->sq.max_gs = get_send_sge(attr, wqe_size);
+ if (qp->sq.max_gs < attr->cap.max_send_sge)
+ return -ENOMEM;
+
+ attr->cap.max_send_sge = qp->sq.max_gs;
qp->sq.max_post = wq_size / wqe_size;
attr->cap.max_send_wr = qp->sq.max_post;
@@ -366,7 +635,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
static int set_user_buf_size(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
- struct mlx5_ib_create_qp *ucmd)
+ struct mlx5_ib_create_qp *ucmd,
+ struct mlx5_ib_qp_base *base,
+ struct ib_qp_init_attr *attr)
{
int desc_sz = 1 << qp->sq.wqe_shift;
@@ -376,9 +647,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
return -EINVAL;
}
- if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
- mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
- ucmd->sq_wqe_count, ucmd->sq_wqe_count);
+ if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
+ mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
+ ucmd->sq_wqe_count);
return -EINVAL;
}
@@ -391,8 +662,14 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
return -EINVAL;
}
- qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
- (qp->sq.wqe_cnt << 6);
+ if (attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+ qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
+ } else {
+ base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+ (qp->sq.wqe_cnt << 6);
+ }
return 0;
}
@@ -408,60 +685,55 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
-static int first_med_uuar(void)
-{
- return 1;
-}
+enum {
+ /* this is the first blue flame register in the array of bfregs assigned
+ * to a processes. Since we do not use it for blue flame but rather
+ * regular 64 bit doorbells, we do not need a lock for maintaiing
+ * "odd/even" order
+ */
+ NUM_NON_BLUE_FLAME_BFREGS = 1,
+};
-static int next_uuar(int n)
+static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
- n++;
-
- while (((n % 4) & 2))
- n++;
-
- return n;
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
+ bfregi->num_static_sys_pages * MLX5_NON_FP_BFREGS_PER_UAR;
}
-static int num_med_uuar(struct mlx5_uuar_info *uuari)
+static int num_med_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
int n;
- n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
- uuari->num_low_latency_uuars - 1;
+ n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
+ NUM_NON_BLUE_FLAME_BFREGS;
return n >= 0 ? n : 0;
}
-static int max_uuari(struct mlx5_uuar_info *uuari)
+static int first_med_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- return uuari->num_uars * 4;
+ return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
}
-static int first_hi_uuar(struct mlx5_uuar_info *uuari)
+static int first_hi_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
int med;
- int i;
- int t;
- med = num_med_uuar(uuari);
- for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
- t++;
- if (t == med)
- return next_uuar(i);
- }
-
- return 0;
+ med = num_med_bfreg(dev, bfregi);
+ return ++med;
}
-static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
+static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
int i;
- for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
- if (!test_bit(i, uuari->bitmap)) {
- set_bit(i, uuari->bitmap);
- uuari->count[i]++;
+ for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
+ if (!bfregi->count[i]) {
+ bfregi->count[i]++;
return i;
}
}
@@ -469,87 +741,56 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
return -ENOMEM;
}
-static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
+static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- int minidx = first_med_uuar();
+ int minidx = first_med_bfreg(dev, bfregi);
int i;
- for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
- if (uuari->count[i] < uuari->count[minidx])
+ if (minidx < 0)
+ return minidx;
+
+ for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
+ if (bfregi->count[i] < bfregi->count[minidx])
minidx = i;
+ if (!bfregi->count[minidx])
+ break;
}
- uuari->count[minidx]++;
+ bfregi->count[minidx]++;
return minidx;
}
-static int alloc_uuar(struct mlx5_uuar_info *uuari,
- enum mlx5_ib_latency_class lat)
+static int alloc_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- int uuarn = -EINVAL;
-
- mutex_lock(&uuari->lock);
- switch (lat) {
- case MLX5_IB_LATENCY_CLASS_LOW:
- uuarn = 0;
- uuari->count[uuarn]++;
- break;
+ int bfregn = -ENOMEM;
- case MLX5_IB_LATENCY_CLASS_MEDIUM:
- if (uuari->ver < 2)
- uuarn = -ENOMEM;
- else
- uuarn = alloc_med_class_uuar(uuari);
- break;
-
- case MLX5_IB_LATENCY_CLASS_HIGH:
- if (uuari->ver < 2)
- uuarn = -ENOMEM;
- else
- uuarn = alloc_high_class_uuar(uuari);
- break;
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
- case MLX5_IB_LATENCY_CLASS_FAST_PATH:
- uuarn = 2;
- break;
+ mutex_lock(&bfregi->lock);
+ if (bfregi->ver >= 2) {
+ bfregn = alloc_high_class_bfreg(dev, bfregi);
+ if (bfregn < 0)
+ bfregn = alloc_med_class_bfreg(dev, bfregi);
}
- mutex_unlock(&uuari->lock);
- return uuarn;
-}
+ if (bfregn < 0) {
+ BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
+ bfregn = 0;
+ bfregi->count[bfregn]++;
+ }
+ mutex_unlock(&bfregi->lock);
-static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
-{
- clear_bit(uuarn, uuari->bitmap);
- --uuari->count[uuarn];
+ return bfregn;
}
-static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{
- clear_bit(uuarn, uuari->bitmap);
- --uuari->count[uuarn];
-}
-
-static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
-{
- int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
- int high_uuar = nuuars - uuari->num_low_latency_uuars;
-
- mutex_lock(&uuari->lock);
- if (uuarn == 0) {
- --uuari->count[uuarn];
- goto out;
- }
-
- if (uuarn < high_uuar) {
- free_med_class_uuar(uuari, uuarn);
- goto out;
- }
-
- free_high_class_uuar(uuari, uuarn);
-
-out:
- mutex_unlock(&uuari->lock);
+ mutex_lock(&bfregi->lock);
+ bfregi->count[bfregn]--;
+ mutex_unlock(&bfregi->lock);
}
static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
@@ -576,211 +817,361 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_XRC_INI:
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
- case IB_QPT_GSI: return MLX5_QP_ST_QP1;
- case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
- case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
- case IB_QPT_RAW_PACKET:
- case IB_QPT_MAX:
+ case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
+ case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE;
default: return -EINVAL;
}
}
-static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
+static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg)
{
- return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
+ unsigned int bfregs_per_sys_page;
+ u32 index_of_sys_page;
+ u32 offset;
+
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
+ bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
+ MLX5_NON_FP_BFREGS_PER_UAR;
+ index_of_sys_page = bfregn / bfregs_per_sys_page;
+
+ if (dyn_bfreg) {
+ index_of_sys_page += bfregi->num_static_sys_pages;
+
+ if (index_of_sys_page >= bfregi->num_sys_pages)
+ return -EINVAL;
+
+ if (bfregn > bfregi->num_dyn_bfregs ||
+ bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
+ mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
+ return -EINVAL;
+ }
+ }
+
+ offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+ return bfregi->sys_pages[index_of_sys_page] + offset;
}
-static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct ib_udata *udata,
- struct mlx5_create_qp_mbox_in **in,
- struct mlx5_ib_create_qp_resp *resp, int *inlen)
+static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *context;
- struct mlx5_ib_create_qp ucmd;
- int page_shift = 0;
- int uar_index;
- int npages;
+ struct mlx5_ib_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext);
+
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
+ atomic_dec(&dev->delay_drop.rqs_cnt);
+
+ mlx5_ib_db_unmap_user(context, &rwq->db);
+ ib_umem_release(rwq->umem);
+}
+
+static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
+ struct mlx5_ib_create_wq *ucmd)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ unsigned long page_size = 0;
u32 offset = 0;
- int uuarn;
- int ncont = 0;
int err;
- err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
+ if (!ucmd->buf_addr)
+ return -EINVAL;
+
+ rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
+ if (IS_ERR(rwq->umem)) {
+ mlx5_ib_dbg(dev, "umem_get failed\n");
+ err = PTR_ERR(rwq->umem);
return err;
}
- context = to_mucontext(pd->uobject->context);
- /*
- * TBD: should come from the verbs when we have the API
- */
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
- if (uuarn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
- mlx5_ib_dbg(dev, "reverting to medium latency\n");
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
- if (uuarn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
- mlx5_ib_dbg(dev, "reverting to high latency\n");
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
- if (uuarn < 0) {
- mlx5_ib_warn(dev, "uuar allocation failed\n");
- return uuarn;
- }
- }
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &rwq->rq_page_offset);
+ if (!page_size) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ err = -EINVAL;
+ goto err_umem;
+ }
+
+ rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size);
+ rwq->page_shift = order_base_2(page_size);
+ rwq->log_page_size = rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
+
+ mlx5_ib_dbg(
+ dev,
+ "addr 0x%llx, size %zd, npages %zu, page_size %ld, ncont %d, offset %d\n",
+ (unsigned long long)ucmd->buf_addr, rwq->buf_size,
+ ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas,
+ offset);
+
+ err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db);
+ if (err) {
+ mlx5_ib_dbg(dev, "map failed\n");
+ goto err_umem;
}
- uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
- mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
+ return 0;
+
+err_umem:
+ ib_umem_release(rwq->umem);
+ return err;
+}
+
+static int adjust_bfregn(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, int bfregn)
+{
+ return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
+ bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
+}
+
+static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp, struct ib_udata *udata,
+ struct ib_qp_init_attr *attr, u32 **in,
+ struct mlx5_ib_create_qp_resp *resp, int *inlen,
+ struct mlx5_ib_qp_base *base,
+ struct mlx5_ib_create_qp *ucmd)
+{
+ struct mlx5_ib_ucontext *context;
+ struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
+ unsigned int page_offset_quantized = 0;
+ unsigned long page_size = 0;
+ int uar_index = 0;
+ int bfregn;
+ int ncont = 0;
+ __be64 *pas;
+ void *qpc;
+ int err;
+ u16 uid;
+ u32 uar_flags;
+
+ context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
+ ibucontext);
+ uar_flags = qp->flags_en &
+ (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX);
+ switch (uar_flags) {
+ case MLX5_QP_FLAG_UAR_PAGE_INDEX:
+ uar_index = ucmd->bfreg_index;
+ bfregn = MLX5_IB_INVALID_BFREG;
+ break;
+ case MLX5_QP_FLAG_BFREG_INDEX:
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi,
+ ucmd->bfreg_index, true);
+ if (uar_index < 0)
+ return uar_index;
+ bfregn = MLX5_IB_INVALID_BFREG;
+ break;
+ case 0:
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ return -EINVAL;
+ bfregn = alloc_bfreg(dev, &context->bfregi);
+ if (bfregn < 0)
+ return bfregn;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
+ false);
qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
- err = set_user_buf_size(dev, qp, &ucmd);
+ err = set_user_buf_size(dev, qp, ucmd, base, attr);
if (err)
- goto err_uuar;
-
- if (ucmd.buf_addr && qp->buf_size) {
- qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- qp->buf_size, 0, 0);
- if (IS_ERR(qp->umem)) {
- mlx5_ib_dbg(dev, "umem_get failed\n");
- err = PTR_ERR(qp->umem);
- goto err_uuar;
+ goto err_bfreg;
+
+ if (ucmd->buf_addr && ubuffer->buf_size) {
+ ubuffer->buf_addr = ucmd->buf_addr;
+ ubuffer->umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
+ ubuffer->buf_size, 0);
+ if (IS_ERR(ubuffer->umem)) {
+ err = PTR_ERR(ubuffer->umem);
+ goto err_bfreg;
}
- } else {
- qp->umem = NULL;
- }
-
- if (qp->umem) {
- mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
- &ncont, NULL);
- err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
- if (err) {
- mlx5_ib_warn(dev, "bad offset\n");
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ ubuffer->umem, qpc, log_page_size,
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
goto err_umem;
}
- mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
- ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
+ ncont = ib_umem_num_dma_blocks(ubuffer->umem, page_size);
+ } else {
+ ubuffer->umem = NULL;
}
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
- *in = mlx5_vzalloc(*inlen);
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
+ *in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_umem;
}
- if (qp->umem)
- mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
- (*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
- (*in)->ctx.params2 = cpu_to_be32(offset << 6);
- (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
- resp->uuar_index = uuarn;
- qp->uuarn = uuarn;
+ uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
+ MLX5_SET(create_qp_in, *in, uid, uid);
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+ pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
+ if (ubuffer->umem) {
+ mlx5_ib_populate_pas(ubuffer->umem, page_size, pas, 0);
+ MLX5_SET(qpc, qpc, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, page_offset, page_offset_quantized);
+ }
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ else
+ resp->bfreg_index = MLX5_IB_INVALID_BFREG;
+ qp->bfregn = bfregn;
- err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
+ err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_free;
}
- err = ib_copy_to_udata(udata, resp, sizeof(*resp));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- goto err_unmap;
- }
- qp->create_type = MLX5_QP_USER;
-
return 0;
-err_unmap:
- mlx5_ib_db_unmap_user(context, &qp->db);
-
err_free:
kvfree(*in);
err_umem:
- if (qp->umem)
- ib_umem_release(qp->umem);
+ ib_umem_release(ubuffer->umem);
-err_uuar:
- free_uuar(&context->uuari, uuarn);
+err_bfreg:
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
return err;
}
-static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
+static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_ib_qp_base *base, struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *context;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (udata) {
+ /* User QP */
+ mlx5_ib_db_unmap_user(context, &qp->db);
+ ib_umem_release(base->ubuffer.umem);
+
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
+ return;
+ }
- context = to_mucontext(pd->uobject->context);
- mlx5_ib_db_unmap_user(context, &qp->db);
- if (qp->umem)
- ib_umem_release(qp->umem);
- free_uuar(&context->uuari, qp->uuarn);
+ /* Kernel QP */
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
+ if (qp->db.db)
+ mlx5_db_free(dev->mdev, &qp->db);
+ if (qp->buf.frags)
+ mlx5_frag_buf_free(dev->mdev, &qp->buf);
}
-static int create_kernel_qp(struct mlx5_ib_dev *dev,
- struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_qp *qp,
- struct mlx5_create_qp_mbox_in **in, int *inlen)
+static int _create_kernel_qp(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_qp *qp, u32 **in, int *inlen,
+ struct mlx5_ib_qp_base *base)
{
- enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
- struct mlx5_uuar_info *uuari;
int uar_index;
- int uuarn;
+ void *qpc;
int err;
- uuari = &dev->mdev->priv.uuari;
- if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
- return -EINVAL;
-
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
- lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
-
- uuarn = alloc_uuar(uuari, lc);
- if (uuarn < 0) {
- mlx5_ib_dbg(dev, "\n");
- return -ENOMEM;
- }
+ qp->bf.bfreg = &dev->fp_bfreg;
+ else
+ qp->bf.bfreg = &dev->bfreg;
- qp->bf = &uuari->bfs[uuarn];
- uar_index = qp->bf->uar->index;
+ /* We need to divide by two since each register is comprised of
+ * two buffers of identical size, namely odd and even
+ */
+ qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
+ uar_index = qp->bf.bfreg->index;
err = calc_sq_size(dev, init_attr, qp);
if (err < 0) {
mlx5_ib_dbg(dev, "err %d\n", err);
- goto err_uuar;
+ return err;
}
qp->rq.offset = 0;
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
- qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
+ base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
- err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
+ err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
+ &qp->buf, dev->mdev->priv.numa_node);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
- goto err_uuar;
+ return err;
}
- qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
- *in = mlx5_vzalloc(*inlen);
+ if (qp->rq.wqe_cnt)
+ mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
+ ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
+
+ if (qp->sq.wqe_cnt) {
+ int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) /
+ MLX5_SEND_WQE_BB;
+ mlx5_init_fbc_offset(qp->buf.frags +
+ (qp->sq.offset / PAGE_SIZE),
+ ilog2(MLX5_SEND_WQE_BB),
+ ilog2(qp->sq.wqe_cnt),
+ sq_strides_offset, &qp->sq.fbc);
+
+ qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
+ }
+
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
+ *in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_buf;
}
- (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
- (*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
+
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
+ MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
+ MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
/* Set "fast registration enabled" for all kernel QPs */
- (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
- (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
+
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
- mlx5_fill_page_array(&qp->buf, (*in)->pas);
+ mlx5_fill_page_frag_array(&qp->buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in,
+ *in, pas));
err = mlx5_db_alloc(dev->mdev, &qp->db);
if (err) {
@@ -788,262 +1179,1388 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
goto err_free;
}
- qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
- qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
- qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
- qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
- qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
+ qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wrid), GFP_KERNEL);
+ qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wr_data), GFP_KERNEL);
+ qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
+ sizeof(*qp->rq.wrid), GFP_KERNEL);
+ qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.w_list), GFP_KERNEL);
+ qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wqe_head), GFP_KERNEL);
if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
!qp->sq.w_list || !qp->sq.wqe_head) {
err = -ENOMEM;
goto err_wrid;
}
- qp->create_type = MLX5_QP_KERNEL;
return 0;
err_wrid:
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
- kfree(qp->sq.wqe_head);
- kfree(qp->sq.w_list);
- kfree(qp->sq.wrid);
- kfree(qp->sq.wr_data);
- kfree(qp->rq.wrid);
err_free:
kvfree(*in);
err_buf:
- mlx5_buf_free(dev->mdev, &qp->buf);
+ mlx5_frag_buf_free(dev->mdev, &qp->buf);
+ return err;
+}
+
+static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
+{
+ if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
+ return MLX5_SRQ_RQ;
+ else if (!qp->has_rq)
+ return MLX5_ZERO_LEN_RQ;
+
+ return MLX5_NON_ZERO_RQ;
+}
+
+static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_ib_sq *sq, u32 tdn,
+ struct ib_pd *pd)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
+ MLX5_SET(tisc, tisc, transport_domain, tdn);
+ if (!mlx5_ib_lag_should_assign_affinity(dev) &&
+ mlx5_lag_is_lacp_owner(dev->mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
+
+ return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
+}
+
+static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq, struct ib_pd *pd)
+{
+ mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
+}
+
+static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
+{
+ if (sq->flow_rule)
+ mlx5_del_flow_rules(sq->flow_rule);
+ sq->flow_rule = NULL;
+}
+
+static bool fr_supported(int ts_cap)
+{
+ return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+}
+
+static int get_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ bool fr_sup, bool rt_sup)
+{
+ if (cq->private_flags & MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS) {
+ if (!rt_sup) {
+ mlx5_ib_dbg(dev,
+ "Real time TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_TIMESTAMP_FORMAT_REAL_TIME;
+ }
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ if (!fr_sup) {
+ mlx5_ib_dbg(dev,
+ "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ return fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+}
+
+static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq)
+{
+ u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format);
+
+ return get_ts_format(dev, recv_cq, fr_supported(ts_cap),
+ rt_supported(ts_cap));
+}
+
+static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
+{
+ u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format);
+
+ return get_ts_format(dev, send_cq, fr_supported(ts_cap),
+ rt_supported(ts_cap));
+}
+
+static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq)
+{
+ u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format);
+ bool fr_sup = fr_supported(ts_cap);
+ bool rt_sup = rt_supported(ts_cap);
+ u8 default_ts = fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+ int send_ts_format =
+ send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) :
+ default_ts;
+ int recv_ts_format =
+ recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) :
+ default_ts;
+
+ if (send_ts_format < 0 || recv_ts_format < 0)
+ return -EOPNOTSUPP;
+
+ if (send_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
+ recv_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
+ send_ts_format != recv_ts_format) {
+ mlx5_ib_dbg(
+ dev,
+ "The send ts_format does not match the receive ts_format\n");
+ return -EOPNOTSUPP;
+ }
+
+ return send_ts_format == default_ts ? recv_ts_format : send_ts_format;
+}
+
+static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata,
+ struct mlx5_ib_sq *sq, void *qpin,
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
+{
+ struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
+ __be64 *pas;
+ void *in;
+ void *sqc;
+ void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
+ void *wq;
+ int inlen;
+ int err;
+ unsigned int page_offset_quantized;
+ unsigned long page_size;
+ int ts_format;
+
+ ts_format = get_sq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
+
+ sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
+ ubuffer->buf_size, 0);
+ if (IS_ERR(sq->ubuffer.umem))
+ return PTR_ERR(sq->ubuffer.umem);
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ ubuffer->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) *
+ ib_umem_num_dma_blocks(sq->ubuffer.umem, page_size);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_umem;
+ }
+
+ MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
+ MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+ MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
+ MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, swp))
+ MLX5_SET(sqc, sqc, allow_swp, 1);
+
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
+ MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
+ MLX5_SET(wq, wq, log_wq_pg_sz,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(wq, wq, page_offset, page_offset_quantized);
+
+ pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(sq->ubuffer.umem, page_size, pas, 0);
+
+ err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
+
+ kvfree(in);
+
+ if (err)
+ goto err_umem;
+
+ return 0;
+
+err_umem:
+ ib_umem_release(sq->ubuffer.umem);
+ sq->ubuffer.umem = NULL;
-err_uuar:
- free_uuar(&dev->mdev->priv.uuari, uuarn);
return err;
}
-static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq)
{
- mlx5_db_free(dev->mdev, &qp->db);
- kfree(qp->sq.wqe_head);
- kfree(qp->sq.w_list);
- kfree(qp->sq.wrid);
- kfree(qp->sq.wr_data);
- kfree(qp->rq.wrid);
- mlx5_buf_free(dev->mdev, &qp->buf);
- free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
+ destroy_flow_rule_vport_sq(sq);
+ mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
+ ib_umem_release(sq->ubuffer.umem);
}
-static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
+static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq, void *qpin,
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
{
- if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
- (attr->qp_type == IB_QPT_XRC_INI))
- return cpu_to_be32(MLX5_SRQ_RQ);
- else if (!qp->has_rq)
- return cpu_to_be32(MLX5_ZERO_LEN_RQ);
- else
- return cpu_to_be32(MLX5_NON_ZERO_RQ);
+ struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
+ __be64 *pas;
+ void *in;
+ void *rqc;
+ void *wq;
+ void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
+ struct ib_umem *umem = rq->base.ubuffer.umem;
+ unsigned int page_offset_quantized;
+ unsigned long page_size = 0;
+ int ts_format;
+ size_t inlen;
+ int err;
+
+ ts_format = get_rq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
+
+ page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz,
+ MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size)
+ return -EINVAL;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * ib_umem_num_dma_blocks(umem, page_size);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
+ MLX5_SET(rqc, rqc, vsd, 1);
+ MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
+ MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
+
+ if (mqp->flags & IB_QP_CREATE_SCATTER_FCS)
+ MLX5_SET(rqc, rqc, scatter_fcs, 1);
+
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, page_offset, page_offset_quantized);
+ MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_pg_sz,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
+
+ pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(umem, page_size, pas, 0);
+
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
+
+ kvfree(in);
+
+ return err;
}
-static int is_connected(enum ib_qp_type qp_type)
+static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq)
{
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
- return 1;
+ mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
+}
+
+static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u32 qp_flags_en,
+ struct ib_pd *pd)
+{
+ if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
+}
+
+static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq, u32 tdn,
+ u32 *qp_flags_en, struct ib_pd *pd,
+ u32 *out)
+{
+ u8 lb_flag = 0;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ if (dev->is_rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
+ rq->tirn = MLX5_GET(create_tir_out, out, tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ destroy_raw_packet_qp_tir(dev, rq, 0, pd);
+ }
+ kvfree(in);
+
+ return err;
+}
+
+static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ u32 *in, size_t inlen, struct ib_pd *pd,
+ struct ib_udata *udata,
+ struct mlx5_ib_create_qp_resp *resp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ int err;
+ u32 tdn = mucontext->tdn;
+ u16 uid = to_mpd(pd)->uid;
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+
+ if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
+ return -EINVAL;
+ if (qp->sq.wqe_cnt) {
+ err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
+ if (err)
+ return err;
+
+ err = create_raw_packet_qp_sq(dev, udata, sq, in, pd,
+ to_mcq(init_attr->send_cq));
+ if (err)
+ goto err_destroy_tis;
+
+ if (uid) {
+ resp->tisn = sq->tisn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
+ resp->sqn = sq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
+ }
+
+ sq->base.container_mibqp = qp;
+ sq->base.mqp.event = mlx5_ib_qp_event;
+ }
+
+ if (qp->rq.wqe_cnt) {
+ rq->base.container_mibqp = qp;
+
+ if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
+ rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
+ rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
+ err = create_raw_packet_qp_rq(dev, rq, in, pd,
+ to_mcq(init_attr->recv_cq));
+ if (err)
+ goto err_destroy_sq;
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
+ out);
+ if (err)
+ goto err_destroy_rq;
+
+ if (uid) {
+ resp->rqn = rq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
+ resp->tirn = rq->tirn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
+ resp->tir_icm_addr = MLX5_GET(
+ create_tir_out, out, icm_address_31_0);
+ resp->tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ resp->tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ resp->comp_mask |=
+ MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
+ }
+ }
+ }
+
+ qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
+ rq->base.mqp.qpn;
return 0;
+
+err_destroy_rq:
+ destroy_raw_packet_qp_rq(dev, rq);
+err_destroy_sq:
+ if (!qp->sq.wqe_cnt)
+ return err;
+ destroy_raw_packet_qp_sq(dev, sq);
+err_destroy_tis:
+ destroy_raw_packet_qp_tis(dev, sq, pd);
+
+ return err;
}
-static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, struct mlx5_ib_qp *qp)
+static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp)
{
- struct mlx5_ib_resources *devr = &dev->devr;
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+
+ if (qp->rq.wqe_cnt) {
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
+ destroy_raw_packet_qp_rq(dev, rq);
+ }
+
+ if (qp->sq.wqe_cnt) {
+ destroy_raw_packet_qp_sq(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
+ }
+}
+
+static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp)
+{
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+
+ sq->sq = &qp->sq;
+ rq->rq = &qp->rq;
+ sq->doorbell = &qp->db;
+ rq->doorbell = &qp->db;
+}
+
+static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+{
+ if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(qp->ibqp.pd)->uid);
+}
+
+struct mlx5_create_qp_params {
+ struct ib_udata *udata;
+ size_t inlen;
+ size_t outlen;
+ size_t ucmd_size;
+ void *ucmd;
+ u8 is_rss_raw : 1;
+ struct ib_qp_init_attr *attr;
+ u32 uidx;
struct mlx5_ib_create_qp_resp resp;
- struct mlx5_create_qp_mbox_in *in;
- struct mlx5_ib_create_qp ucmd;
- int inlen = sizeof(*in);
+};
+
+static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
+ struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ int inlen;
+ int outlen;
int err;
+ u32 *in;
+ u32 *out;
+ void *tirc;
+ void *hfso;
+ u32 selected_fields = 0;
+ u32 outer_l4;
+ u32 tdn = mucontext->tdn;
+ u8 lb_flag = 0;
+
+ if (ucmd->comp_mask) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ }
- mlx5_ib_odp_create_qp(qp);
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
+ return -EOPNOTSUPP;
+ }
- mutex_init(&qp->mutex);
- spin_lock_init(&qp->sq.lock);
- spin_lock_init(&qp->rq.lock);
+ if (dev->is_rep)
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
- mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
- return -EINVAL;
- } else {
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ outlen = MLX5_ST_SZ_BYTES(create_tir_out);
+ in = kvzalloc(inlen + outlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ out = in + MLX5_ST_SZ_DW(create_tir_in);
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ init_attr->rwq_ind_tbl->ind_tbl_num);
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
+
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ switch (ucmd->rx_hash_function) {
+ case MLX5_RX_HASH_FUNC_TOEPLITZ:
+ {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+
+ if (len != ucmd->rx_key_len) {
+ err = -EINVAL;
+ goto err;
}
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+ memcpy(rss_key, ucmd->rx_hash_key, len);
+ break;
+ }
+ default:
+ err = -EOPNOTSUPP;
+ goto err;
}
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+ if (!ucmd->rx_hash_fields_mask) {
+ /* special case when this TIR serves as steering entry without hashing */
+ if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
+ goto create_tir;
+ err = -EINVAL;
+ goto err;
+ }
- if (pd && pd->uobject) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EFAULT;
+ if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+
+ outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ << 0 |
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ << 1 |
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
+
+ /* Check that only one l4 protocol is set */
+ if (outer_l4 & (outer_l4 - 1)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
+
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
+ selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
+
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
+
+create_tir:
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
+
+ qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(pd)->uid);
+ }
+
+ if (err)
+ goto err;
+
+ if (mucontext->devx_uid) {
+ params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ params->resp.tirn = qp->rss_qp.tirn;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
+ params->resp.tir_icm_addr =
+ MLX5_GET(create_tir_out, out, icm_address_31_0);
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ params->resp.comp_mask |=
+ MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
}
+ }
- qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
- } else {
- qp->wq_sig = !!wq_signature;
+ kvfree(in);
+ /* qpn is reserved for that QP */
+ qp->trans_qp.base.mqp.qpn = 0;
+ qp->is_rss = true;
+ return 0;
+
+err:
+ kvfree(in);
+ return err;
+}
+
+static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ void *qpc)
+{
+ int scqe_sz;
+ bool allow_scat_cqe = false;
+
+ allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+
+ if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
+ return;
+
+ scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
+ if (scqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
+ MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
+}
+
+static int atomic_size_to_mode(int size_mask)
+{
+ /* driver does not support atomic_size > 256B
+ * and does not know how to translate bigger sizes
+ */
+ int supported_size_mask = size_mask & 0x1ff;
+ int log_max_size;
+
+ if (!supported_size_mask)
+ return -EOPNOTSUPP;
+
+ log_max_size = __fls(supported_size_mask);
+
+ if (log_max_size > 3)
+ return log_max_size;
+
+ return MLX5_ATOMIC_MODE_8B;
+}
+
+static int get_atomic_mode(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp)
+{
+ u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
+ int atomic_mode = -EOPNOTSUPP;
+ int atomic_size_mask;
+
+ if (!atomic)
+ return -EOPNOTSUPP;
+
+ if (qp->type == MLX5_IB_QPT_DCT)
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+ else
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
+ (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
+ atomic_mode = atomic_size_to_mode(atomic_size_mask);
+
+ if (atomic_mode <= 0 &&
+ (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
+ atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
+ atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
+
+ /* OOO DP QPs do not support larger than 8-Bytes atomic operations */
+ if (atomic_mode > MLX5_ATOMIC_MODE_8B && qp->is_ooo_rq)
+ atomic_mode = MLX5_ATOMIC_MODE_8B;
+
+ return atomic_mode;
+}
+
+static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp_base *base;
+ unsigned long flags;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
+
+ MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
+ base = &qp->trans_qp.base;
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+ kvfree(in);
+ if (err)
+ return err;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
+ return 0;
+}
+
+static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int ts_format;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
qp->has_rq = qp_has_rq(init_attr);
- err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
- qp, (pd && pd->uobject) ? &ucmd : NULL);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
}
- if (pd) {
- if (pd->uobject) {
- __u32 max_wqes =
- 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
- if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
- ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
- mlx5_ib_dbg(dev, "invalid rq params\n");
- return -EINVAL;
- }
- if (ucmd.sq_wqe_count > max_wqes) {
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd.sq_wqe_count, max_wqes);
- return -EINVAL;
- }
- err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- } else {
- err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- else
- qp->pa_lkey = to_mpd(pd)->pa_lkey;
- }
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
- if (err)
- return err;
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
+
+ ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+
+ if (ts_format < 0)
+ return ts_format;
+
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ MLX5_SET(qpc, qpc, wq_signature, 1);
+
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE)
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) {
+ MLX5_SET(qpc, qpc, log_num_dci_stream_channels,
+ ucmd->dci_streams.log_num_concurent);
+ MLX5_SET(qpc, qpc, log_num_dci_errored_streams,
+ ucmd->dci_streams.log_num_errored);
+ }
+
+ MLX5_SET(qpc, qpc, ts_format, ts_format);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
+
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+
+ /* Set default resources */
+ if (init_attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(init_attr->srq)->msrq.srqn);
} else {
- in = mlx5_vzalloc(sizeof(*in));
- if (!in)
- return -ENOMEM;
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
+ }
+
+ if (init_attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd,
+ to_mcq(init_attr->send_cq)->mcq.cqn);
+
+ if (init_attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv,
+ to_mcq(init_attr->recv_cq)->mcq.cqn);
+
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- qp->create_type = MLX5_QP_EMPTY;
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
- if (is_sqp(init_attr->qp_type))
- qp->port = init_attr->port_num;
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
- in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
- MLX5_QP_PM_MIGRATED << 11);
+ kvfree(in);
+ if (err)
+ goto err_create;
- if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
- in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
- else
- in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- if (qp->wq_sig)
- in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
+ return 0;
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
- in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
+err_create:
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
- if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
- int rcqe_sz;
- int scqe_sz;
+static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int ts_format;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
- rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
- scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
- if (rcqe_sz == 128)
- in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
- else
- in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
- if (scqe_sz == 128)
- in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
- else
- in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
- }
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ qp->underlay_qpn = init_attr->source_qpn;
+
+ base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(init_attr);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
}
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
+
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
+
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ if (ts_format < 0)
+ return ts_format;
+ }
+
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
+
+ if (is_sqp(init_attr->qp_type))
+ qp->port = init_attr->port_num;
+
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ MLX5_SET(qpc, qpc, wq_signature, 1);
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
+ if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
+ MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (init_attr->qp_type == IB_QPT_RC ||
+ init_attr->qp_type == IB_QPT_UC)) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ MLX5_SET(qpc, qpc, cs_res,
+ rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+ MLX5_RES_SCAT_DATA32_CQE);
+ }
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
+
if (qp->rq.wqe_cnt) {
- in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
- in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
- in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET)
+ MLX5_SET(qpc, qpc, ts_format, ts_format);
- if (qp->sq.wqe_cnt)
- in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
- else
- in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
+
+ if (qp->sq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+ } else {
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ if (init_attr->srq &&
+ init_attr->srq->srq_type == IB_SRQT_TM)
+ MLX5_SET(qpc, qpc, offload_type,
+ MLX5_QPC_OFFLOAD_TYPE_RNDV);
+ }
/* Set default resources */
switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
- break;
case IB_QPT_XRC_INI:
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
break;
default:
if (init_attr->srq) {
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
} else {
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |=
- cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
}
}
if (init_attr->send_cq)
- in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
if (init_attr->recv_cq)
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
- in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
- if (err) {
- mlx5_ib_dbg(dev, "create qp failed\n");
- goto err_create;
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
+ init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
+ raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &params->resp, init_attr);
+ } else
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+
kvfree(in);
- /* Hardware wants QPN written in big-endian order (after
- * shifting) for send doorbell. Precompute this value to save
- * a little bit when posting sends.
- */
- qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
+ if (err)
+ goto err_create;
- qp->mqp.event = mlx5_ib_qp_event;
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0;
err_create:
- if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(pd, qp);
- else if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
+
+static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(attr);
+ err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+ err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
+ if (err)
+ return err;
+
+ if (is_sqp(attr->qp_type))
+ qp->port = attr->port_num;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+
+ if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
+ else
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
+ }
+
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
+
+ if (qp->sq.wqe_cnt)
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+ else
+ MLX5_SET(qpc, qpc, no_sq, 1);
+
+ if (attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(attr->srq)->msrq.srqn);
+ } else {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
+ }
+
+ if (attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
+
+ if (attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
+
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+
+ if (qp->flags & IB_QP_CREATE_INTEGRITY_EN &&
+ MLX5_CAP_GEN(mdev, go_back_n))
+ MLX5_SET(qpc, qpc, retry_mode, MLX5_QP_RM_GO_BACK_N);
+
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+
+ get_cqs(qp->type, attr->send_cq, attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, NULL);
return err;
}
@@ -1053,23 +2570,23 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv
if (send_cq) {
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock,
SINGLE_DEPTH_NESTING);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock,
SINGLE_DEPTH_NESTING);
}
} else {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
}
} else if (recv_cq) {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else {
__acquire(&send_cq->lock);
@@ -1084,59 +2601,51 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
} else {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
}
} else if (recv_cq) {
__release(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
} else {
__release(&recv_cq->lock);
__release(&send_cq->lock);
}
}
-static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
-{
- return to_mpd(qp->ibqp.pd);
-}
-
-static void get_cqs(struct mlx5_ib_qp *qp,
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
{
- switch (qp->ibqp.qp_type) {
+ switch (qp_type) {
case IB_QPT_XRC_TGT:
*send_cq = NULL;
*recv_cq = NULL;
break;
case MLX5_IB_QPT_REG_UMR:
case IB_QPT_XRC_INI:
- *send_cq = to_mcq(qp->ibqp.send_cq);
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = NULL;
break;
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
- *send_cq = to_mcq(qp->ibqp.send_cq);
- *recv_cq = to_mcq(qp->ibqp.recv_cq);
- break;
-
case IB_QPT_RAW_PACKET:
- case IB_QPT_MAX:
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+ *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
+ break;
default:
*send_cq = NULL;
*recv_cq = NULL;
@@ -1144,200 +2653,755 @@ static void get_cqs(struct mlx5_ib_qp *qp,
}
}
-static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 lag_tx_affinity);
+
+static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_udata *udata)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
- struct mlx5_modify_qp_mbox_in *in;
+ struct mlx5_ib_qp_base *base;
+ unsigned long flags;
int err;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
+ if (qp->is_rss) {
+ destroy_rss_raw_qp_tir(dev, qp);
return;
+ }
+
+ base = (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
- mlx5_ib_qp_disable_pagefaults(qp);
- if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
- MLX5_QP_STATE_RST, in, 0, &qp->mqp))
- mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
- qp->mqp.qpn);
+ if (qp->type != IB_QPT_RAW_PACKET &&
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
+ err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
+ NULL, &base->mqp, NULL);
+ } else {
+ struct mlx5_modify_raw_qp_param raw_qp_param = {
+ .operation = MLX5_CMD_OP_2RST_QP
+ };
+
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
+ }
+ if (err)
+ mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
+ base->mqp.qpn);
}
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+ &recv_cq);
- if (qp->create_type == MLX5_QP_KERNEL) {
- mlx5_ib_lock_cqs(send_cq, recv_cq);
- __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ if (send_cq)
+ list_del(&qp->cq_send_list);
+
+ if (recv_cq)
+ list_del(&qp->cq_recv_list);
+
+ if (!udata) {
+ __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
- __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
- mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ __mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
+ NULL);
}
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
- if (err)
- mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
- kfree(in);
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ destroy_raw_packet_qp(dev, qp);
+ } else {
+ err = mlx5_core_destroy_qp(dev, &base->mqp);
+ if (err)
+ mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
+ base->mqp.qpn);
+ }
+ destroy_qp(dev, qp, base, udata);
+}
- if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
- else if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(&get_pd(qp)->ibpd, qp);
+static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 uidx = params->uidx;
+ void *dctc;
+
+ if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct))
+ return -EOPNOTSUPP;
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in)
+ return -ENOMEM;
+
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
+
+ if (rcqe_sz == 128)
+ MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ }
+
+ qp->state = IB_QPS_RESET;
+ return 0;
}
-static const char *ib_qp_type_str(enum ib_qp_type type)
+static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ enum ib_qp_type *type)
{
- switch (type) {
- case IB_QPT_SMI:
- return "IB_QPT_SMI";
- case IB_QPT_GSI:
- return "IB_QPT_GSI";
+ if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
+ goto out;
+
+ switch (attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_XRC_INI:
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ goto out;
+ fallthrough;
case IB_QPT_RC:
- return "IB_QPT_RC";
case IB_QPT_UC:
- return "IB_QPT_UC";
- case IB_QPT_UD:
- return "IB_QPT_UD";
- case IB_QPT_RAW_IPV6:
- return "IB_QPT_RAW_IPV6";
- case IB_QPT_RAW_ETHERTYPE:
- return "IB_QPT_RAW_ETHERTYPE";
- case IB_QPT_XRC_INI:
- return "IB_QPT_XRC_INI";
- case IB_QPT_XRC_TGT:
- return "IB_QPT_XRC_TGT";
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case IB_QPT_DRIVER:
+ case IB_QPT_GSI:
case IB_QPT_RAW_PACKET:
- return "IB_QPT_RAW_PACKET";
+ case IB_QPT_UD:
case MLX5_IB_QPT_REG_UMR:
- return "MLX5_IB_QPT_REG_UMR";
- case IB_QPT_MAX:
+ break;
default:
- return "Invalid QP type";
+ goto out;
}
+
+ *type = attr->qp_type;
+ return 0;
+
+out:
+ mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
+ return -EOPNOTSUPP;
}
-struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev;
- struct mlx5_ib_qp *qp;
- u16 xrcdn = 0;
- int err;
-
- if (pd) {
- dev = to_mdev(pd->device);
- } else {
- /* being cautious here */
- if (init_attr->qp_type != IB_QPT_XRC_TGT &&
- init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
- pr_warn("%s: no PD for transport %s\n", __func__,
- ib_qp_type_str(init_attr->qp_type));
- return ERR_PTR(-EINVAL);
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (!udata) {
+ /* Kernel create_qp callers */
+ if (attr->rwq_ind_tbl)
+ return -EOPNOTSUPP;
+
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_DRIVER:
+ return -EOPNOTSUPP;
+ default:
+ return 0;
}
- dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
- switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- case IB_QPT_XRC_INI:
- if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
- mlx5_ib_dbg(dev, "XRC not supported\n");
- return ERR_PTR(-ENOSYS);
- }
- init_attr->recv_cq = NULL;
- if (init_attr->qp_type == IB_QPT_XRC_TGT) {
- xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
- init_attr->send_cq = NULL;
- }
+ /* Userspace create_qp callers */
+ if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
+ mlx5_ib_dbg(dev,
+ "Raw Packet QP is only supported for CQE version > 0\n");
+ return -EINVAL;
+ }
- /* fall through */
+ if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
+ mlx5_ib_dbg(dev,
+ "Wrong QP type %d for the RWQ indirect table\n",
+ attr->qp_type);
+ return -EINVAL;
+ }
+
+ /*
+ * We don't need to see this warning, it means that kernel code
+ * missing ib_pd. Placed here to catch developer's mistakes.
+ */
+ WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
+ "There is a missing PD pointer assignment\n");
+ return 0;
+}
+
+static bool get_dp_ooo_cap(struct mlx5_core_dev *mdev, enum ib_qp_type qp_type)
+{
+ if (!MLX5_CAP_GEN_2(mdev, dp_ordering_force))
+ return false;
+
+ switch (qp_type) {
case IB_QPT_RC:
+ return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc);
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc);
case IB_QPT_UC:
+ return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc);
case IB_QPT_UD:
- case IB_QPT_SMI:
+ return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud);
+ case MLX5_IB_QPT_DCI:
+ case MLX5_IB_QPT_DCT:
+ return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc);
+ default:
+ return false;
+ }
+}
+
+static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
+
+ if (cond) {
+ qp->flags_en |= flag;
+ *flags &= ~flag;
+ return;
+ }
+
+ switch (flag) {
+ case MLX5_QP_FLAG_SCATTER_CQE:
+ case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
+ /*
+ * We don't return error if these flags were provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
+ return;
+ default:
+ break;
+ }
+ mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
+}
+
+static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ void *ucmd, struct ib_qp_init_attr *attr)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ bool cond;
+ int flags;
+
+ if (attr->rwq_ind_tbl)
+ flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
+ else
+ flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
+
+ switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
+ case MLX5_QP_FLAG_TYPE_DCI:
+ qp->type = MLX5_IB_QPT_DCI;
+ break;
+ case MLX5_QP_FLAG_TYPE_DCT:
+ qp->type = MLX5_IB_QPT_DCT;
+ break;
+ default:
+ if (qp->type != IB_QPT_DRIVER)
+ break;
+ /*
+ * It is IB_QPT_DRIVER and or no subtype or
+ * wrong subtype were provided.
+ */
+ return -EINVAL;
+ }
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM,
+ MLX5_CAP_GEN(mdev, log_max_dci_stream_channels),
+ qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+
+ if (qp->type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
+ cond, qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
+ qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
+ qp);
+ }
+
+ if (qp->type == IB_QPT_RC)
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
+ MLX5_CAP_GEN(mdev, qp_packet_based), qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
+
+ cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC);
+ if (attr->rwq_ind_tbl && cond) {
+ mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n",
+ cond);
+ return -EINVAL;
+ }
+
+ if (flags)
+ mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
+
+ return (flags) ? -EINVAL : 0;
+ }
+
+static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
+
+ if (cond) {
+ qp->flags |= flag;
+ *flags &= ~flag;
+ return;
+ }
+
+ mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
+}
+
+static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
+{
+ enum ib_qp_type qp_type = qp->type;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int create_flags = attr->create_flags;
+ bool cond;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return (create_flags) ? -EINVAL : 0;
+
+ if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
+ return (create_flags) ? -EINVAL : 0;
+
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+ mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS),
+ qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_INTEGRITY_EN,
+ MLX5_CAP_GEN(mdev, sho), qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX5_CAP_GEN(mdev, block_lb_mc), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV,
+ MLX5_CAP_GEN(mdev, cd), qp);
+
+ if (qp_type == IB_QPT_UD) {
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_IPOIB_UD_LSO,
+ MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
+ qp);
+ cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN,
+ cond, qp);
+ }
+
+ if (qp_type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, scatter_fcs);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_SCATTER_FCS, cond, qp);
+
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, vlan_cap);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
+ }
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING,
+ MLX5_CAP_GEN(mdev, end_pad), qp);
+
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
+ true, qp);
+
+ if (create_flags) {
+ mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
+ create_flags);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int process_udata_size(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ size_t ucmd = sizeof(struct mlx5_ib_create_qp);
+ struct ib_udata *udata = params->udata;
+ size_t outlen = udata->outlen;
+ size_t inlen = udata->inlen;
+
+ params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp));
+ params->ucmd_size = ucmd;
+ if (!params->is_rss_raw) {
+ /* User has old rdma-core, which doesn't support ECE */
+ size_t min_inlen =
+ offsetof(struct mlx5_ib_create_qp, ece_options);
+
+ /*
+ * We will check in check_ucmd_data() that user
+ * cleared everything after inlen.
+ */
+ params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
+ goto out;
+ }
+
+ /* RSS RAW QP */
+ if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags))
+ return -EINVAL;
+
+ if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index))
+ return -EINVAL;
+
+ ucmd = sizeof(struct mlx5_ib_create_qp_rss);
+ params->ucmd_size = ucmd;
+ if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
+ return -EINVAL;
+
+ params->inlen = min(ucmd, inlen);
+out:
+ if (!params->inlen)
+ mlx5_ib_dbg(dev, "udata is too small\n");
+
+ return (params->inlen) ? 0 : -EINVAL;
+}
+
+static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ int err;
+
+ if (params->is_rss_raw) {
+ err = create_rss_raw_qp_tir(dev, pd, qp, params);
+ goto out;
+ }
+
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ err = create_dct(dev, pd, qp, params);
+ break;
+ case MLX5_IB_QPT_DCI:
+ err = create_dci(dev, pd, qp, params);
+ break;
+ case IB_QPT_XRC_TGT:
+ err = create_xrc_tgt_qp(dev, qp, params);
+ break;
case IB_QPT_GSI:
+ err = mlx5_ib_create_gsi(pd, qp, params->attr);
+ break;
+ case MLX5_IB_QPT_HW_GSI:
+ rdma_restrack_no_track(&qp->ibqp.res);
+ fallthrough;
case MLX5_IB_QPT_REG_UMR:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ default:
+ if (params->udata)
+ err = create_user_qp(dev, pd, qp, params);
+ else
+ err = create_kernel_qp(dev, pd, qp, params);
+ }
- err = create_qp_common(dev, pd, init_attr, udata, qp);
+out:
+ if (err) {
+ mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
+ return err;
+ }
+
+ if (is_qp0(qp->type))
+ qp->ibqp.qp_num = 0;
+ else if (is_qp1(qp->type))
+ qp->ibqp.qp_num = 1;
+ else
+ qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
+
+ mlx5_ib_dbg(dev,
+ "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n",
+ qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
+ params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
+ -1,
+ params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
+ -1,
+ params->resp.ece_options);
+
+ return 0;
+}
+
+static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
+{
+ int ret = 0;
+
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
+ break;
+ case MLX5_IB_QPT_DCI:
+ ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
+ -EINVAL :
+ 0;
+ break;
+ case IB_QPT_RAW_PACKET:
+ ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
+
+ return ret;
+}
+
+static int get_qp_uidx(struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (params->is_rss_raw)
+ return 0;
+
+ return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
+}
+
+static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+
+ if (mqp->state == IB_QPS_RTR) {
+ int err;
+
+ err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
if (err) {
- mlx5_ib_dbg(dev, "create_qp_common failed\n");
- kfree(qp);
- return ERR_PTR(err);
+ mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
+ return err;
}
+ }
- if (is_qp0(init_attr->qp_type))
- qp->ibqp.qp_num = 0;
- else if (is_qp1(init_attr->qp_type))
- qp->ibqp.qp_num = 1;
- else
- qp->ibqp.qp_num = qp->mqp.qpn;
+ kfree(mqp->dct.in);
+ return 0;
+}
- mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
- qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
- to_mcq(init_attr->send_cq)->mcq.cqn);
+static int check_ucmd_data(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_udata *udata = params->udata;
+ size_t size, last;
+ int ret;
- qp->xrcdn = xrcdn;
+ if (params->is_rss_raw)
+ /*
+ * These QPs don't have "reserved" field in their
+ * create_qp input struct, so their data is always valid.
+ */
+ last = sizeof(struct mlx5_ib_create_qp_rss);
+ else
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
- break;
+ if (udata->inlen <= last)
+ return 0;
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
- case IB_QPT_RAW_PACKET:
- case IB_QPT_MAX:
+ /*
+ * User provides different create_qp structures based on the
+ * flow and we need to know if he cleared memory after our
+ * struct create_qp ends.
+ */
+ size = udata->inlen - last;
+ ret = ib_is_udata_cleared(params->udata, last, size);
+ if (!ret)
+ mlx5_ib_dbg(
+ dev,
+ "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
+ udata->inlen, params->ucmd_size, last, size);
+ return ret ? 0 : -EINVAL;
+}
+
+int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_create_qp_params params = {};
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct ib_pd *pd = ibqp->pd;
+ enum ib_qp_type type;
+ int err;
+
+ err = mlx5_ib_dev_res_srq_init(dev);
+ if (err)
+ return err;
+
+ err = check_qp_type(dev, attr, &type);
+ if (err)
+ return err;
+
+ err = check_valid_flow(dev, pd, attr, udata);
+ if (err)
+ return err;
+
+ params.udata = udata;
+ params.uidx = MLX5_IB_DEFAULT_UIDX;
+ params.attr = attr;
+ params.is_rss_raw = !!attr->rwq_ind_tbl;
+
+ if (udata) {
+ err = process_udata_size(dev, &params);
+ if (err)
+ return err;
+
+ err = check_ucmd_data(dev, &params);
+ if (err)
+ return err;
+
+ params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
+ if (!params.ucmd)
+ return -ENOMEM;
+
+ err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
+ if (err)
+ goto free_ucmd;
+ }
+
+ mutex_init(&qp->mutex);
+ qp->type = type;
+ if (udata) {
+ err = process_vendor_flags(dev, qp, params.ucmd, attr);
+ if (err)
+ goto free_ucmd;
+
+ err = get_qp_uidx(qp, &params);
+ if (err)
+ goto free_ucmd;
+ }
+ err = process_create_flags(dev, qp, attr);
+ if (err)
+ goto free_ucmd;
+
+ err = check_qp_attr(dev, qp, attr);
+ if (err)
+ goto free_ucmd;
+
+ err = create_qp(dev, pd, qp, &params);
+ if (err)
+ goto free_ucmd;
+
+ kfree(params.ucmd);
+ params.ucmd = NULL;
+
+ if (udata)
+ /*
+ * It is safe to copy response for all user create QP flows,
+ * including MLX5_IB_QPT_DCT, which doesn't need it.
+ * In that case, resp will be filled with zeros.
+ */
+ err = ib_copy_to_udata(udata, &params.resp, params.outlen);
+ if (err)
+ goto destroy_qp;
+
+ return 0;
+
+destroy_qp:
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ mlx5_ib_destroy_dct(qp);
+ break;
+ case IB_QPT_GSI:
+ mlx5_ib_destroy_gsi(qp);
+ break;
default:
- mlx5_ib_dbg(dev, "unsupported qp type %d\n",
- init_attr->qp_type);
- /* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ destroy_qp_common(dev, qp, udata);
}
- return &qp->ibqp;
+free_ucmd:
+ kfree(params.ucmd);
+ return err;
}
-int mlx5_ib_destroy_qp(struct ib_qp *qp)
+int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
- destroy_qp_common(dev, mqp);
+ if (mqp->type == IB_QPT_GSI)
+ return mlx5_ib_destroy_gsi(mqp);
- kfree(mqp);
+ if (mqp->type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_destroy_dct(mqp);
+ destroy_qp_common(dev, mqp, udata);
return 0;
}
-static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
- int attr_mask)
+static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ void *qpc)
{
- u32 hw_access_flags = 0;
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
u8 dest_rd_atomic;
u32 access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
else
- dest_rd_atomic = qp->resp_depth;
+ dest_rd_atomic = qp->trans_qp.resp_depth;
if (attr_mask & IB_QP_ACCESS_FLAGS)
access_flags = attr->qp_access_flags;
else
- access_flags = qp->atomic_rd_en;
+ access_flags = qp->trans_qp.atomic_rd_en;
if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE;
- if (access_flags & IB_ACCESS_REMOTE_READ)
- hw_access_flags |= MLX5_QP_BIT_RRE;
- if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
- hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
- if (access_flags & IB_ACCESS_REMOTE_WRITE)
- hw_access_flags |= MLX5_QP_BIT_RWE;
+ MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ));
+
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, qp);
+ if (atomic_mode < 0)
+ return -EOPNOTSUPP;
- return cpu_to_be32(hw_access_flags);
+ MLX5_SET(qpc, qpc, rae, 1);
+ MLX5_SET(qpc, qpc, atomic_mode, atomic_mode);
+ }
+
+ MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ return 0;
}
enum {
@@ -1346,64 +3410,206 @@ enum {
MLX5_PATH_FLAG_COUNTER = 1 << 2,
};
-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+static int mlx5_to_ib_rate_map(u8 rate)
{
- if (rate == IB_RATE_PORT_CURRENT) {
+ static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
+ IB_RATE_25_GBPS, IB_RATE_100_GBPS,
+ IB_RATE_200_GBPS, IB_RATE_50_GBPS,
+ IB_RATE_400_GBPS };
+
+ if (rate < ARRAY_SIZE(rates))
+ return rates[rate];
+
+ return rate - MLX5_STAT_RATE_OFFSET;
+}
+
+static int ib_to_mlx5_rate_map(u8 rate)
+{
+ switch (rate) {
+ case IB_RATE_PORT_CURRENT:
return 0;
- } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
- return -EINVAL;
- } else {
- while (rate != IB_RATE_2_5_GBPS &&
- !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
- --rate;
+ case IB_RATE_56_GBPS:
+ return 1;
+ case IB_RATE_25_GBPS:
+ return 2;
+ case IB_RATE_100_GBPS:
+ return 3;
+ case IB_RATE_200_GBPS:
+ return 4;
+ case IB_RATE_50_GBPS:
+ return 5;
+ case IB_RATE_400_GBPS:
+ return 6;
+ default:
+ return rate + MLX5_STAT_RATE_OFFSET;
}
- return rate + MLX5_STAT_RATE_OFFSET;
+ return 0;
+}
+
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
+{
+ u32 stat_rate_support;
+
+ if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
+ return 0;
+
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
+ return -EINVAL;
+
+ stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
+ while (rate != IB_RATE_PORT_CURRENT &&
+ !(1 << ib_to_mlx5_rate_map(rate) & stat_rate_support))
+ --rate;
+
+ return ib_to_mlx5_rate_map(rate);
+}
+
+static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
+ struct mlx5_ib_sq *sq, u8 sl,
+ struct ib_pd *pd)
+{
+ void *in;
+ void *tisc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
+
+ tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
+
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
+
+ kvfree(in);
+
+ return err;
}
-static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
- struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr)
+static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
+ struct mlx5_ib_sq *sq, u8 tx_affinity,
+ struct ib_pd *pd)
{
+ void *in;
+ void *tisc;
+ int inlen;
int err;
- path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
+ inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
+
+ tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
+
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah,
+ u32 lqpn, u32 rqpn)
+
+{
+ u32 fl = ah->grh.flow_label;
+
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl));
+}
+
+static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct rdma_ah_attr *ah, void *path, u8 port,
+ int attr_mask, u32 path_flags,
+ const struct ib_qp_attr *attr, bool alt)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah);
+ int err;
+ enum ib_gid_type gid_type;
+ u8 ah_flags = rdma_ah_get_ah_flags(ah);
+ u8 sl = rdma_ah_get_sl(ah);
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = attr->pkey_index;
+ MLX5_SET(ads, path, pkey_index,
+ alt ? attr->alt_pkey_index : attr->pkey_index);
- path->grh_mlid = ah->src_path_bits & 0x7f;
- path->rlid = cpu_to_be16(ah->dlid);
+ if (ah_flags & IB_AH_GRH) {
+ const struct ib_port_immutable *immutable;
- if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >=
- dev->mdev->port_caps[port - 1].gid_table_len) {
+ immutable = ib_port_immutable_read(&dev->ib_dev, port);
+ if (grh->sgid_index >= immutable->gid_tbl_len) {
pr_err("sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index,
- dev->mdev->port_caps[port - 1].gid_table_len);
+ grh->sgid_index,
+ immutable->gid_tbl_len);
return -EINVAL;
}
- path->grh_mlid |= 1 << 7;
- path->mgid_index = ah->grh.sgid_index;
- path->hop_limit = ah->grh.hop_limit;
- path->tclass_flowlabel =
- cpu_to_be32((ah->grh.traffic_class << 20) |
- (ah->grh.flow_label));
- memcpy(path->rgid, ah->grh.dgid.raw, 16);
}
- err = ib_rate_to_mlx5(dev, ah->static_rate);
+ if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (!(ah_flags & IB_AH_GRH))
+ return -EINVAL;
+
+ ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
+ ah->roce.dmac);
+ if ((qp->type == IB_QPT_RC ||
+ qp->type == IB_QPT_UC ||
+ qp->type == IB_QPT_XRC_INI ||
+ qp->type == IB_QPT_XRC_TGT) &&
+ (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (attr_mask & IB_QP_DEST_QPN))
+ mlx5_set_path_udp_sport(path, ah,
+ qp->ibqp.qp_num,
+ attr->dest_qp_num);
+ MLX5_SET(ads, path, eth_prio, sl & 0x7);
+ gid_type = ah->grh.sgid_attr->gid_type;
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ MLX5_SET(ads, path, dscp, grh->traffic_class >> 2);
+ } else {
+ MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL));
+ MLX5_SET(ads, path, free_ar,
+ !!(path_flags & MLX5_PATH_FLAG_FREE_AR));
+ MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah));
+ MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah));
+ MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH));
+ MLX5_SET(ads, path, sl, sl);
+ }
+
+ if (ah_flags & IB_AH_GRH) {
+ MLX5_SET(ads, path, src_addr_index, grh->sgid_index);
+ MLX5_SET(ads, path, hop_limit, grh->hop_limit);
+ MLX5_SET(ads, path, tclass, grh->traffic_class);
+ MLX5_SET(ads, path, flow_label, grh->flow_label);
+ memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw,
+ sizeof(grh->dgid.raw));
+ }
+
+ err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
- path->static_rate = err;
- path->port = port;
+ MLX5_SET(ads, path, stat_rate, err);
+ MLX5_SET(ads, path, vhca_port_num, port);
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = attr->timeout << 3;
+ MLX5_SET(ads, path, ack_timeout,
+ alt ? attr->alt_timeout : attr->timeout);
- path->sl = ah->sl & 0xf;
+ if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
+ return modify_raw_packet_eth_prio(dev->mdev,
+ &qp->raw_packet_qp.sq,
+ sl & 0xf, qp->ibqp.pd);
return 0;
}
@@ -1415,23 +3621,33 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_PRI_PORT,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
@@ -1440,7 +3656,8 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
},
[MLX5_QP_STATE_RTR] = {
@@ -1455,6 +3672,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PM_STATE,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT,
},
},
[MLX5_QP_STATE_RTS] = {
@@ -1471,6 +3694,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_SRQN |
MLX5_QP_OPTPAR_CQN_RCV,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
},
},
[MLX5_QP_STATE_SQER] = {
@@ -1482,6 +3711,21 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RRE,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
+ },
+ },
+ [MLX5_QP_STATE_SQD] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
},
},
};
@@ -1551,54 +3795,432 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
return result;
}
+static int modify_raw_packet_qp_rq(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
+{
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, state, new_state);
+
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
+ if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
+ } else
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "RAW PACKET QP counters are not supported on current FW\n");
+ }
+
+ err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
+ if (err)
+ goto out;
+
+ rq->state = new_state;
+
+out:
+ kvfree(in);
+ return err;
+}
+
+static int modify_raw_packet_qp_sq(
+ struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
+{
+ struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
+ struct mlx5_rate_limit old_rl = ibqp->rl;
+ struct mlx5_rate_limit new_rl = old_rl;
+ bool new_rate_added = false;
+ u16 rl_index = 0;
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
+ MLX5_SET(modify_sq_in, in, sq_state, sq->state);
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, new_state);
+
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
+ if (new_state != MLX5_SQC_STATE_RDY)
+ pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
+ __func__);
+ else
+ new_rl = raw_qp_param->rl;
+ }
+
+ if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
+ if (new_rl.rate) {
+ err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
+ if (err) {
+ pr_err("Failed configuring rate limit(err %d): \
+ rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+ err, new_rl.rate, new_rl.max_burst_sz,
+ new_rl.typical_pkt_sz);
+
+ goto out;
+ }
+ new_rate_added = true;
+ }
+
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+ /* index 0 means no limit */
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ }
+
+ err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in);
+ if (err) {
+ /* Remove new rate from table if failed */
+ if (new_rate_added)
+ mlx5_rl_remove_rate(dev, &new_rl);
+ goto out;
+ }
+
+ /* Only remove the old rate after new rate was set */
+ if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+ (new_state != MLX5_SQC_STATE_RDY)) {
+ mlx5_rl_remove_rate(dev, &old_rl);
+ if (new_state != MLX5_SQC_STATE_RDY)
+ memset(&new_rl, 0, sizeof(new_rl));
+ }
+
+ ibqp->rl = new_rl;
+ sq->state = new_state;
+
+out:
+ kvfree(in);
+ return err;
+}
+
+static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 tx_affinity)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ int modify_rq = !!qp->rq.wqe_cnt;
+ int modify_sq = !!qp->sq.wqe_cnt;
+ int rq_state;
+ int sq_state;
+ int err;
+
+ switch (raw_qp_param->operation) {
+ case MLX5_CMD_OP_RST2INIT_QP:
+ rq_state = MLX5_RQC_STATE_RDY;
+ sq_state = MLX5_SQC_STATE_RST;
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ rq_state = MLX5_RQC_STATE_ERR;
+ sq_state = MLX5_SQC_STATE_ERR;
+ break;
+ case MLX5_CMD_OP_2RST_QP:
+ rq_state = MLX5_RQC_STATE_RST;
+ sq_state = MLX5_SQC_STATE_RST;
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ if (raw_qp_param->set_mask & ~MLX5_RAW_QP_RATE_LIMIT)
+ return -EINVAL;
+
+ modify_rq = 0;
+ sq_state = MLX5_SQC_STATE_RDY;
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ if (raw_qp_param->set_mask)
+ return -EINVAL;
+ else
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (modify_rq) {
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
+ qp->ibqp.pd);
+ if (err)
+ return err;
+ }
+
+ if (modify_sq) {
+ struct mlx5_flow_handle *flow_rule;
+
+ if (tx_affinity) {
+ err = modify_raw_packet_tx_affinity(dev->mdev, sq,
+ tx_affinity,
+ qp->ibqp.pd);
+ if (err)
+ return err;
+ }
+
+ flow_rule = create_flow_rule_vport_sq(dev, sq,
+ raw_qp_param->port);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+
+ err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+ raw_qp_param, qp->ibqp.pd);
+ if (err) {
+ if (flow_rule)
+ mlx5_del_flow_rules(flow_rule);
+ return err;
+ }
+
+ if (flow_rule) {
+ destroy_flow_rule_vport_sq(sq);
+ sq->flow_rule = flow_rule;
+ }
+
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ atomic_t *tx_port_affinity;
+
+ if (ucontext)
+ tx_port_affinity = &ucontext->tx_port_affinity;
+ else
+ tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
+
+ return (unsigned int)atomic_add_return(1, tx_port_affinity) %
+ (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1;
+}
+
+static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
+{
+ if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) ||
+ (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) ||
+ (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI))
+ return true;
+ return false;
+}
+
+static unsigned int get_tx_affinity(struct ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, u8 init,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_qp_base *qp_base;
+ unsigned int tx_affinity;
+
+ if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
+ qp_supports_affinity(mqp)))
+ return 0;
+
+ if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ tx_affinity = mqp->gsi_lag_port;
+ else if (init)
+ tx_affinity = get_tx_affinity_rr(dev, udata);
+ else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
+ tx_affinity =
+ mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
+ else
+ return 0;
+
+ qp_base = &mqp->trans_qp.base;
+ if (ucontext)
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
+ tx_affinity, qp_base->mqp.qpn, ucontext);
+ else
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
+ tx_affinity, qp_base->mqp.qpn);
+ return tx_affinity;
+}
+
+static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
+ void *rqc;
+
+ if (!qp->rq.wqe_cnt)
+ return 0;
+
+ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, set_id);
+
+ return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
+}
+
+static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {};
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_qp_base *base;
+ u32 set_id;
+ u32 *qpc;
+
+ if (counter)
+ set_id = counter->id;
+ else
+ set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
+
+ if (mqp->type == IB_QPT_RAW_PACKET)
+ return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
+
+ base = &mqp->trans_qp.base;
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+ MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid);
+ MLX5_SET(rts2rts_qp_in, in, opt_param_mask,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID);
+
+ qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
+ return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
+}
+
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ const struct mlx5_ib_modify_qp *ucmd,
+ struct mlx5_ib_modify_qp_resp *resp,
+ struct ib_udata *udata)
{
+ static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
+ [MLX5_QP_STATE_RST] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
+ },
+ [MLX5_QP_STATE_INIT] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
+ [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
+ },
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
+ },
+ [MLX5_QP_STATE_SQD] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP,
+ },
+ [MLX5_QP_STATE_SQER] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
+ },
+ [MLX5_QP_STATE_ERR] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ }
+ };
+
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
- struct mlx5_qp_context *context;
- struct mlx5_modify_qp_mbox_in *in;
struct mlx5_ib_pd *pd;
enum mlx5_qp_state mlx5_cur, mlx5_new;
- enum mlx5_qp_optpar optpar;
- int sqd_event;
+ void *qpc, *pri_path, *alt_path;
+ enum mlx5_qp_optpar optpar = 0;
+ u32 set_id = 0;
int mlx5_st;
int err;
+ u16 op;
+ u8 tx_affinity = 0;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
- context = &in->ctx;
- err = to_mlx5_st(ibqp->qp_type);
- if (err < 0)
- goto out;
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc)
+ return -ENOMEM;
- context->flags = cpu_to_be32(err << 16);
+ pd = to_mpd(qp->ibqp.pd);
+ MLX5_SET(qpc, qpc, st, mlx5_st);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
} else {
switch (attr->path_mig_state) {
case IB_MIG_MIGRATED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
break;
case IB_MIG_REARM:
- context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM);
break;
case IB_MIG_ARMED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED);
break;
}
}
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
- context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
- } else if (ibqp->qp_type == IB_QPT_UD ||
- ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT, udata);
+
+ MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity);
+ if (tx_affinity && new_state == IB_QPS_RTR &&
+ MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
+ optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
+
+ if (is_sqp(qp->type)) {
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
+ MLX5_SET(qpc, qpc, log_msg_max, 8);
+ } else if ((qp->type == IB_QPT_UD &&
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
+ qp->type == MLX5_IB_QPT_REG_UMR) {
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, 12);
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
@@ -1606,198 +4228,552 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
goto out;
}
- context->mtu_msgmax = (attr->path_mtu << 5) |
- (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
+ MLX5_SET(qpc, qpc, log_msg_max,
+ MLX5_CAP_GEN(dev->mdev, log_max_msg));
}
if (attr_mask & IB_QP_DEST_QPN)
- context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
+ MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
+
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = attr->pkey_index;
+ MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
/* todo implement counter_index functionality */
- if (is_sqp(ibqp->qp_type))
- context->pri_path.port = qp->port;
+ if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI && is_qp0(qp->type)) {
+ MLX5_SET(ads, pri_path, vhca_port_num,
+ smi_to_native_portnum(dev, qp->port));
+ if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)
+ MLX5_SET(ads, pri_path, plane_index, qp->port);
+ } else if (is_sqp(qp->type))
+ MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
if (attr_mask & IB_QP_PORT)
- context->pri_path.port = attr->port_num;
+ MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
if (attr_mask & IB_QP_AV) {
- err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
- attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
- attr_mask, 0, attr);
+ err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port,
+ attr_mask, 0, attr, false);
if (err)
goto out;
}
if (attr_mask & IB_QP_TIMEOUT)
- context->pri_path.ackto_lt |= attr->timeout << 3;
+ MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
if (attr_mask & IB_QP_ALT_PATH) {
- err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
- attr->alt_port_num, attr_mask, 0, attr);
+ err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
+ attr->alt_port_num,
+ attr_mask | IB_QP_PKEY_INDEX |
+ IB_QP_TIMEOUT,
+ 0, attr, true);
if (err)
goto out;
}
- pd = get_pd(qp);
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
+
+ MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
+ if (send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn);
+ if (recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn);
- context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
- context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
- context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
- context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
+ MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ);
if (attr_mask & IB_QP_RNR_RETRY)
- context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
+ MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
if (attr_mask & IB_QP_RETRY_CNT)
- context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
+ MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
- if (attr->max_rd_atomic)
- context->params1 |=
- cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
+ MLX5_SET(qpc, qpc, log_sra_max, fls(attr->max_rd_atomic - 1));
if (attr_mask & IB_QP_SQ_PSN)
- context->next_send_psn = cpu_to_be32(attr->sq_psn);
+ MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- if (attr->max_dest_rd_atomic)
- context->params2 |=
- cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
+ MLX5_SET(qpc, qpc, log_rra_max,
+ fls(attr->max_dest_rd_atomic - 1));
- if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
- context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+ err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
+ if (err)
+ goto out;
+ }
if (attr_mask & IB_QP_MIN_RNR_TIMER)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
+ MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
if (attr_mask & IB_QP_RQ_PSN)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
+ MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
if (attr_mask & IB_QP_QKEY)
- context->qkey = cpu_to_be32(attr->qkey);
+ MLX5_SET(qpc, qpc, q_key, attr->qkey);
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->db_rec_addr = cpu_to_be64(qp->db.dma);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
- attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
- sqd_event = 1;
- else
- sqd_event = 0;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port) - 1;
+
+ /* Underlay port should be used - index 0 function per port */
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ port_num = 0;
+
+ if (ibqp->counter)
+ set_id = ibqp->counter->id;
+ else
+ set_id = mlx5_ib_get_counters_id(dev, port_num);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
+ }
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->sq_crq_size |= cpu_to_be16(1 << 4);
+ MLX5_SET(qpc, qpc, rlky, 1);
+
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
+ if (qp->is_ooo_rq && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ MLX5_SET(qpc, qpc, dp_ordering_1, 1);
+ MLX5_SET(qpc, qpc, dp_ordering_force, 1);
+ }
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
- mlx5_st = to_mlx5_st(ibqp->qp_type);
- if (mlx5_st < 0)
- goto out;
- /* If moving to a reset or error state, we must disable page faults on
- * this QP and flush all current page faults. Otherwise a stale page
- * fault may attempt to work on this QP after it is reset and moved
- * again to RTS, and may cause the driver and the device to get out of
- * sync. */
- if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
- (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
- mlx5_ib_qp_disable_pagefaults(qp);
+ if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
+ !optab[mlx5_cur][mlx5_new]) {
+ err = -EINVAL;
+ goto out;
+ }
- optpar = ib_mask_to_mlx5_opt(attr_mask);
+ op = optab[mlx5_cur][mlx5_new];
+ optpar |= ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
- in->optparam = cpu_to_be32(optpar);
- err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
- to_mlx5_state(new_state), in, sqd_event,
- &qp->mqp);
+
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ struct mlx5_modify_raw_qp_param raw_qp_param = {};
+
+ raw_qp_param.operation = op;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ raw_qp_param.rq_q_ctr_id = set_id;
+ raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
+ }
+
+ if (attr_mask & IB_QP_PORT)
+ raw_qp_param.port = attr->port_num;
+
+ if (attr_mask & IB_QP_RATE_LIMIT) {
+ raw_qp_param.rl.rate = attr->rate_limit;
+
+ if (ucmd->burst_info.max_burst_sz) {
+ if (attr->rate_limit &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
+ raw_qp_param.rl.max_burst_sz =
+ ucmd->burst_info.max_burst_sz;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (ucmd->burst_info.typical_pkt_sz) {
+ if (attr->rate_limit &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
+ raw_qp_param.rl.typical_pkt_sz =
+ ucmd->burst_info.typical_pkt_sz;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
+ }
+
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
+ } else {
+ if (udata) {
+ /* For the kernel flows, the resp will stay zero */
+ resp->ece_options =
+ MLX5_CAP_GEN(dev->mdev, ece_support) ?
+ ucmd->ece_options : 0;
+ resp->response_length = sizeof(*resp);
+ }
+ err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp,
+ &resp->ece_options);
+ }
+
if (err)
goto out;
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- mlx5_ib_qp_enable_pagefaults(qp);
-
qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->atomic_rd_en = attr->qp_access_flags;
+ qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->resp_depth = attr->max_dest_rd_atomic;
+ qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT)
qp->port = attr->port_num;
if (attr_mask & IB_QP_ALT_PATH)
- qp->alt_port = attr->alt_port_num;
+ qp->trans_qp.alt_port = attr->alt_port_num;
/*
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
+ if (new_state == IB_QPS_RESET &&
+ !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) {
+ mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
ibqp->srq ? to_msrq(ibqp->srq) : NULL);
if (send_cq != recv_cq)
- mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+ mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
qp->rq.head = 0;
qp->rq.tail = 0;
qp->sq.head = 0;
qp->sq.tail = 0;
qp->sq.cur_post = 0;
+ if (qp->sq.wqe_cnt)
+ qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
qp->sq.last_poll = 0;
qp->db.db[MLX5_RCV_DBR] = 0;
qp->db.db[MLX5_SND_DBR] = 0;
}
+ if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
+ err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter);
+ if (!err)
+ qp->counter_pending = 0;
+ }
+
out:
- kfree(in);
+ kfree(qpc);
return err;
}
+static inline bool is_valid_mask(int mask, int req, int opt)
+{
+ if ((mask & req) != req)
+ return false;
+
+ if (mask & ~(req | opt))
+ return false;
+
+ return true;
+}
+
+/* check valid transition for driver QP types
+ * for now the only QP type that this function supports is DCI
+ */
+static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
+ enum ib_qp_attr_mask attr_mask)
+{
+ int req = IB_QP_STATE;
+ int opt = 0;
+
+ if (new_state == IB_QPS_RESET) {
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ req |= IB_QP_PATH_MTU;
+ opt = IB_QP_PKEY_INDEX | IB_QP_AV;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
+ return is_valid_mask(attr_mask, req, opt);
+ }
+ return false;
+}
+
+/* mlx5_ib_modify_dct: modify a DCT QP
+ * valid transitions are:
+ * RESET to INIT: must set access_flags, pkey_index and port
+ * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
+ * mtu, gid_index and hop_limit
+ * Other transitions and attributes are illegal
+ */
+static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct mlx5_ib_modify_qp *ucmd,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ enum ib_qp_state cur_state, new_state;
+ int required = IB_QP_STATE;
+ void *dctc;
+ int err;
+
+ if (!(attr_mask & IB_QP_STATE))
+ return -EINVAL;
+
+ cur_state = qp->state;
+ new_state = attr->qp_state;
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
+ /*
+ * DCT doesn't initialize QP till modify command is executed,
+ * so we need to overwrite previously set ECE field if user
+ * provided any value except zero, which means not set/not
+ * valid.
+ */
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u16 set_id;
+
+ required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+
+ if (attr->port_num == 0 ||
+ attr->port_num > dev->num_ports) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
+ return -EINVAL;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ MLX5_SET(dctc, dctc, rre, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ MLX5_SET(dctc, dctc, rwe, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, qp);
+ if (atomic_mode < 0)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
+ MLX5_SET(dctc, dctc, rae, 1);
+ }
+ MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
+ if (mlx5_lag_is_active(dev->mdev))
+ MLX5_SET(dctc, dctc, port,
+ get_tx_affinity_rr(dev, udata));
+ else
+ MLX5_SET(dctc, dctc, port, attr->port_num);
+
+ set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
+ MLX5_SET(dctc, dctc, counter_set_id, set_id);
+
+ qp->port = attr->port_num;
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ struct mlx5_ib_modify_qp_resp resp = {};
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
+ u32 min_resp_len = offsetofend(typeof(resp), dctn);
+
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+ /*
+ * If we don't have enough space for the ECE options,
+ * simply indicate it with resp.response_length.
+ */
+ resp.response_length = (udata->outlen < sizeof(resp)) ?
+ min_resp_len :
+ sizeof(resp);
+
+ required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+ MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
+ MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
+ MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
+ if (qp->is_ooo_rq) {
+ MLX5_SET(dctc, dctc, dp_ordering_1, 1);
+ MLX5_SET(dctc, dctc, dp_ordering_force, 1);
+ }
+
+ err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in), out,
+ sizeof(out));
+ err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
+ if (err)
+ return err;
+ resp.dctn = qp->dct.mdct.mqp.qpn;
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.ece_options = MLX5_GET(create_dct_out, out, ece);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_dct(dev, &qp->dct.mdct);
+ return err;
+ }
+ } else {
+ mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
+ return -EINVAL;
+ }
+
+ qp->state = new_state;
+ return 0;
+}
+
+static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp)
+{
+ if (dev->profile != &raw_eth_profile)
+ return true;
+
+ if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR)
+ return true;
+
+ return false;
+}
+
+static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_type qp_type)
+{
+ int log_max_ra_res;
+ int log_max_ra_req;
+
+ if (qp_type == MLX5_IB_QPT_DCI) {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_dc);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_dc);
+ } else {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_qp);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_qp);
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > log_max_ra_res) {
+ mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+ attr->max_rd_atomic);
+ return false;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > log_max_ra_req) {
+ mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+ attr->max_dest_rd_atomic);
+ return false;
+ }
+ return true;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_modify_qp_resp resp = {};
struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_modify_qp ucmd = {};
+ enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
- int port;
+
+ if (!mlx5_ib_modify_qp_allowed(dev, qp))
+ return -EOPNOTSUPP;
+
+ if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
+ return -EOPNOTSUPP;
+
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
+
+ if (udata && udata->inlen) {
+ if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&ucmd, udata,
+ min(udata->inlen, sizeof(ucmd))))
+ return -EFAULT;
+
+ if (ucmd.comp_mask & ~MLX5_IB_MODIFY_QP_OOO_DP ||
+ memchr_inv(&ucmd.burst_info.reserved, 0,
+ sizeof(ucmd.burst_info.reserved)))
+ return -EOPNOTSUPP;
+
+ if (ucmd.comp_mask & MLX5_IB_MODIFY_QP_OOO_DP) {
+ if (!get_dp_ooo_cap(dev->mdev, qp->type))
+ return -EOPNOTSUPP;
+ qp->is_ooo_rq = 1;
+ }
+ }
+
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
+
+ qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_UNSPECIFIED))
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
+ mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
+ attr_mask);
+ goto out;
+ }
+ } else if (qp_type != MLX5_IB_QPT_REG_UMR &&
+ qp_type != MLX5_IB_QPT_DCI &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
+ attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp->type, attr_mask);
goto out;
+ } else if (qp_type == MLX5_IB_QPT_DCI &&
+ !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp_type, attr_mask);
+ goto out;
+ }
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
+ attr->port_num > dev->num_ports)) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
goto out;
-
- if (attr_mask & IB_QP_PKEY_INDEX) {
- port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- if (attr->pkey_index >=
- dev->mdev->port_caps[port - 1].pkey_table_len)
- goto out;
}
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= dev->pkey_table_len) {
+ mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index);
goto out;
+ }
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
- attr->max_dest_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
+ if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -1805,1358 +4781,1106 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
- err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+ err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
+ new_state, &ucmd, &resp, udata);
+
+ /* resp.response_length is set in ECE supported flows only */
+ if (!err && resp.response_length &&
+ udata->outlen >= resp.response_length)
+ /* Return -EFAULT to the user and expect him to destroy QP. */
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
out:
mutex_unlock(&qp->mutex);
return err;
}
-static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
-{
- struct mlx5_ib_cq *cq;
- unsigned cur;
-
- cur = wq->head - wq->tail;
- if (likely(cur + nreq < wq->max_post))
- return 0;
-
- cq = to_mcq(ib_cq);
- spin_lock(&cq->lock);
- cur = wq->head - wq->tail;
- spin_unlock(&cq->lock);
-
- return cur + nreq >= wq->max_post;
-}
-
-static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
- u64 remote_addr, u32 rkey)
-{
- rseg->raddr = cpu_to_be64(remote_addr);
- rseg->rkey = cpu_to_be32(rkey);
- rseg->reserved = 0;
-}
-
-static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
-{
- memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
- dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
- dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
-}
-
-static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
+static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
- dseg->byte_count = cpu_to_be32(sg->length);
- dseg->lkey = cpu_to_be32(sg->lkey);
- dseg->addr = cpu_to_be64(sg->addr);
+ switch (mlx5_state) {
+ case MLX5_QP_STATE_RST: return IB_QPS_RESET;
+ case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
+ case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
+ case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
+ case MLX5_QP_STATE_SQ_DRAINING:
+ case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
+ case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
+ case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
}
-static __be16 get_klm_octo(int npages)
+static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
{
- return cpu_to_be16(ALIGN(npages, 8) / 2);
+ switch (mlx5_mig_state) {
+ case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
+ case MLX5_QP_PM_REARM: return IB_MIG_REARM;
+ case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
+ default: return -1;
+ }
}
-static __be64 frwr_mkey_mask(void)
+static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
+ struct rdma_ah_attr *ah_attr, void *path)
{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE;
+ int port = MLX5_GET(ads, path, vhca_port_num);
+ int static_rate;
- return cpu_to_be64(result);
-}
+ memset(ah_attr, 0, sizeof(*ah_attr));
-static __be64 sig_mkey_mask(void)
-{
- u64 result;
+ if (!port || port > ibdev->num_ports)
+ return;
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_SIGERR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE |
- MLX5_MKEY_MASK_BSF_EN;
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port);
- return cpu_to_be64(result);
-}
+ rdma_ah_set_port_num(ah_attr, port);
+ rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl));
-static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr, int li)
-{
- memset(umr, 0, sizeof(*umr));
+ rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid));
+ rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
- if (li) {
- umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
- umr->flags = 1 << 7;
- return;
+ static_rate = MLX5_GET(ads, path, stat_rate);
+ rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
+ if (MLX5_GET(ads, path, grh) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
+ MLX5_GET(ads, path, src_addr_index),
+ MLX5_GET(ads, path, hop_limit),
+ MLX5_GET(ads, path, tclass));
+ rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
}
-
- umr->flags = (1 << 5); /* fail if not free */
- umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
- umr->mkey_mask = frwr_mkey_mask();
}
-static __be64 get_umr_reg_mr_mask(void)
+static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u8 *sq_state)
{
- u64 result;
+ int err;
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_PD |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
- MLX5_MKEY_MASK_FREE;
+ err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
+ if (err)
+ goto out;
+ sq->state = *sq_state;
- return cpu_to_be64(result);
+out:
+ return err;
}
-static __be64 get_umr_unreg_mr_mask(void)
+static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u8 *rq_state)
{
- u64 result;
-
- result = MLX5_MKEY_MASK_FREE;
+ void *out;
+ void *rqc;
+ int inlen;
+ int err;
- return cpu_to_be64(result);
-}
+ inlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ out = kvzalloc(inlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
-static __be64 get_umr_update_mtt_mask(void)
-{
- u64 result;
+ err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
+ if (err)
+ goto out;
- result = MLX5_MKEY_MASK_FREE;
+ rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+ *rq_state = MLX5_GET(rqc, rqc, state);
+ rq->state = *rq_state;
- return cpu_to_be64(result);
+out:
+ kvfree(out);
+ return err;
}
-static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr)
+static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
+ struct mlx5_ib_qp *qp, u8 *qp_state)
{
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+ static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
+ [MLX5_RQC_STATE_RST] = {
+ [MLX5_SQC_STATE_RST] = IB_QPS_RESET,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD,
+ [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD,
+ [MLX5_SQ_STATE_NA] = IB_QPS_RESET,
+ },
+ [MLX5_RQC_STATE_RDY] = {
+ [MLX5_SQC_STATE_RST] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_ERR] = IB_QPS_SQE,
+ [MLX5_SQ_STATE_NA] = MLX5_QP_STATE,
+ },
+ [MLX5_RQC_STATE_ERR] = {
+ [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD,
+ [MLX5_SQC_STATE_ERR] = IB_QPS_ERR,
+ [MLX5_SQ_STATE_NA] = IB_QPS_ERR,
+ },
+ [MLX5_RQ_STATE_NA] = {
+ [MLX5_SQC_STATE_RST] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE,
+ [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD,
+ },
+ };
- memset(umr, 0, sizeof(*umr));
+ *qp_state = sqrq_trans[rq_state][sq_state];
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
- else
- umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
-
- if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
- umr->klm_octowords = get_klm_octo(umrwr->npages);
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
- umr->mkey_mask = get_umr_update_mtt_mask();
- umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
- umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
- } else {
- umr->mkey_mask = get_umr_reg_mr_mask();
- }
- } else {
- umr->mkey_mask = get_umr_unreg_mr_mask();
+ if (*qp_state == MLX5_QP_STATE_BAD) {
+ WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
+ qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
+ qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
+ return -EINVAL;
}
- if (!wr->num_sge)
- umr->flags |= MLX5_UMR_INLINE;
-}
+ if (*qp_state == MLX5_QP_STATE)
+ *qp_state = qp->state;
-static u8 get_umr_flags(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
- (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
- MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
+ return 0;
}
-static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
- int li, int *writ)
+static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ u8 *raw_packet_qp_state)
{
- memset(seg, 0, sizeof(*seg));
- if (li) {
- seg->status = MLX5_MKEY_STATUS_FREE;
- return;
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ int err;
+ u8 sq_state = MLX5_SQ_STATE_NA;
+ u8 rq_state = MLX5_RQ_STATE_NA;
+
+ if (qp->sq.wqe_cnt) {
+ err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
+ if (err)
+ return err;
+ }
+
+ if (qp->rq.wqe_cnt) {
+ err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
+ if (err)
+ return err;
}
- seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
- MLX5_ACCESS_MODE_MTT;
- *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
- seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
- seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
- seg->len = cpu_to_be64(wr->wr.fast_reg.length);
- seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
- seg->log2_page_size = wr->wr.fast_reg.page_shift;
+ return sqrq_state_to_qp_state(sq_state, rq_state, qp,
+ raw_packet_qp_state);
}
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
+static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_attr *qp_attr)
{
- struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
+ void *qpc, *pri_path, *alt_path;
+ u32 *outb;
+ int err;
- memset(seg, 0, sizeof(*seg));
- if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
- seg->status = MLX5_MKEY_STATUS_FREE;
- return;
- }
+ outb = kzalloc(outlen, GFP_KERNEL);
+ if (!outb)
+ return -ENOMEM;
- seg->flags = convert_access(umrwr->access_flags);
- if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
- seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
+ false);
+ if (err)
+ goto out;
+
+ qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc);
+
+ qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
+ if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING)
+ qp_attr->sq_draining = 1;
+
+ qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu);
+ qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state));
+ qp_attr->qkey = MLX5_GET(qpc, qpc, q_key);
+ qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn);
+ qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn);
+ qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn);
+
+ if (MLX5_GET(qpc, qpc, rre))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(qpc, qpc, rwe))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(qpc, qpc, rae))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max);
+ qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max);
+ qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak);
+ qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count);
+ qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry);
+
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
+
+ if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC ||
+ qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) {
+ to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
+ to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
+ qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
+ qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num);
}
- seg->len = cpu_to_be64(umrwr->length);
- seg->log2_page_size = umrwr->page_shift;
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
- mlx5_mkey_variant(umrwr->mkey));
-}
-static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
- struct ib_send_wr *wr,
- struct mlx5_core_dev *mdev,
- struct mlx5_ib_pd *pd,
- int writ)
-{
- struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
- u64 *page_list = wr->wr.fast_reg.page_list->page_list;
- u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
- int i;
+ qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index);
+ qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num);
+ qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout);
+ qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout);
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
- mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
- dseg->addr = cpu_to_be64(mfrpl->map);
- dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
- dseg->lkey = cpu_to_be32(pd->pa_lkey);
+out:
+ kfree(outb);
+ return err;
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
{
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- return wr->ex.imm_data;
+ struct mlx5_core_dct *dct = &mqp->dct.mdct;
+ u32 *out;
+ u32 access_flags = 0;
+ int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
+ void *dctc;
+ int err;
+ int supported_mask = IB_QP_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_PKEY_INDEX;
+
+ if (qp_attr_mask & ~supported_mask)
+ return -EINVAL;
+ if (mqp->state != IB_QPS_RTR)
+ return -EINVAL;
- case IB_WR_SEND_WITH_INV:
- return cpu_to_be32(wr->ex.invalidate_rkey);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
- default:
- return 0;
- }
-}
+ err = mlx5_core_dct_query(dev, dct, out, outlen);
+ if (err)
+ goto out;
-static u8 calc_sig(void *wqe, int size)
-{
- u8 *p = wqe;
- u8 res = 0;
- int i;
+ dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
- for (i = 0; i < size; i++)
- res ^= p[i];
+ if (qp_attr_mask & IB_QP_STATE)
+ qp_attr->qp_state = IB_QPS_RTR;
- return ~res;
-}
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (MLX5_GET(dctc, dctc, rre))
+ access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(dctc, dctc, rwe))
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(dctc, dctc, rae))
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ qp_attr->qp_access_flags = access_flags;
+ }
-static u8 wq_sig(void *wqe)
-{
- return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
+ if (qp_attr_mask & IB_QP_PORT)
+ qp_attr->port_num = mqp->port;
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ if (qp_attr_mask & IB_QP_AV) {
+ qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
+ qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
+ qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
+ qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
+ }
+ if (qp_attr_mask & IB_QP_PATH_MTU)
+ qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
+out:
+ kfree(out);
+ return err;
}
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
- void *wqe, int *sz)
+int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
- struct mlx5_wqe_inline_seg *seg;
- void *qend = qp->sq.qend;
- void *addr;
- int inl = 0;
- int copy;
- int len;
- int i;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ int err = 0;
+ u8 raw_packet_qp_state;
- seg = wqe;
- wqe += sizeof(*seg);
- for (i = 0; i < wr->num_sge; i++) {
- addr = (void *)(unsigned long)(wr->sg_list[i].addr);
- len = wr->sg_list[i].length;
- inl += len;
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
- if (unlikely(inl > qp->max_inline_data))
- return -ENOMEM;
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
+ qp_init_attr);
- if (unlikely(wqe + len > qend)) {
- copy = qend - wqe;
- memcpy(wqe, addr, copy);
- addr += copy;
- len -= copy;
- wqe = mlx5_get_send_wqe(qp, 0);
- }
- memcpy(wqe, addr, len);
- wqe += len;
- }
+ /* Not all of output fields are applicable, make sure to zero them */
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+ memset(qp_attr, 0, sizeof(*qp_attr));
- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+ if (unlikely(qp->type == MLX5_IB_QPT_DCT))
+ return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
+ qp_attr_mask, qp_init_attr);
- *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
-
- return 0;
-}
+ mutex_lock(&qp->mutex);
-static u16 prot_field_size(enum ib_signature_type type)
-{
- switch (type) {
- case IB_SIG_TYPE_T10_DIF:
- return MLX5_DIF_SIZE;
- default:
- return 0;
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
+ if (err)
+ goto out;
+ qp->state = raw_packet_qp_state;
+ qp_attr->port_num = 1;
+ } else {
+ err = query_qp_attr(dev, qp, qp_attr);
+ if (err)
+ goto out;
}
-}
-static u8 bs_selector(int block_size)
-{
- switch (block_size) {
- case 512: return 0x1;
- case 520: return 0x2;
- case 4096: return 0x3;
- case 4160: return 0x4;
- case 1073741824: return 0x5;
- default: return 0;
+ qp_attr->qp_state = qp->state;
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
}
-}
-static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
- struct mlx5_bsf_inl *inl)
-{
- /* Valid inline section and allow BSF refresh */
- inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
- MLX5_BSF_REFRESH_DIF);
- inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
- inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
- /* repeating block */
- inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
- inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
- MLX5_DIF_CRC : MLX5_DIF_IPCS;
+ qp_init_attr->qp_type = qp->type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ qp_init_attr->cap = qp_attr->cap;
- if (domain->sig.dif.ref_remap)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
+ qp_init_attr->create_flags = qp->flags;
- if (domain->sig.dif.app_escape) {
- if (domain->sig.dif.ref_escape)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
- else
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
- }
+ qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
+ IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
- inl->dif_app_bitmask_check =
- cpu_to_be16(domain->sig.dif.apptag_check_mask);
+out:
+ mutex_unlock(&qp->mutex);
+ return err;
}
-static int mlx5_set_bsf(struct ib_mr *sig_mr,
- struct ib_sig_attrs *sig_attrs,
- struct mlx5_bsf *bsf, u32 data_size)
+int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
{
- struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
- struct mlx5_bsf_basic *basic = &bsf->basic;
- struct ib_sig_domain *mem = &sig_attrs->mem;
- struct ib_sig_domain *wire = &sig_attrs->wire;
-
- memset(bsf, 0, sizeof(*bsf));
+ struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device);
+ struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
- /* Basic + Extended + Inline */
- basic->bsf_size_sbs = 1 << 7;
- /* Input domain check byte mask */
- basic->check_byte_mask = sig_attrs->check_mask;
- basic->raw_data_size = cpu_to_be32(data_size);
-
- /* Memory domain */
- switch (sig_attrs->mem.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
- basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
- mlx5_fill_inl_bsf(mem, &bsf->m_inl);
- break;
- default:
- return -EINVAL;
- }
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ return -EOPNOTSUPP;
- /* Wire domain */
- switch (sig_attrs->wire.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
- mem->sig_type == wire->sig_type) {
- /* Same block structure */
- basic->bsf_size_sbs |= 1 << 4;
- if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
- basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
- if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
- if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
- } else
- basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
+ return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
+}
- basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
- mlx5_fill_inl_bsf(wire, &bsf->w_inl);
- break;
- default:
- return -EINVAL;
- }
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
+ u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
- return 0;
+ return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
}
-static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
- void **seg, int *size)
+static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
{
- struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
- struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
- struct mlx5_bsf *bsf;
- u32 data_len = wr->sg_list->length;
- u32 data_key = wr->sg_list->lkey;
- u64 data_va = wr->sg_list->addr;
- int ret;
- int wqe_size;
+ struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
+ struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
+ struct ib_event event;
- if (!wr->wr.sig_handover.prot ||
- (data_key == wr->wr.sig_handover.prot->lkey &&
- data_va == wr->wr.sig_handover.prot->addr &&
- data_len == wr->wr.sig_handover.prot->length)) {
- /**
- * Source domain doesn't contain signature information
- * or data and protection are interleaved in memory.
- * So need construct:
- * ------------------
- * | data_klm |
- * ------------------
- * | BSF |
- * ------------------
- **/
- struct mlx5_klm *data_klm = *seg;
-
- data_klm->bcount = cpu_to_be32(data_len);
- data_klm->key = cpu_to_be32(data_key);
- data_klm->va = cpu_to_be64(data_va);
- wqe_size = ALIGN(sizeof(*data_klm), 64);
- } else {
- /**
- * Source domain contains signature information
- * So need construct a strided block format:
- * ---------------------------
- * | stride_block_ctrl |
- * ---------------------------
- * | data_klm |
- * ---------------------------
- * | prot_klm |
- * ---------------------------
- * | BSF |
- * ---------------------------
- **/
- struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
- struct mlx5_stride_block_entry *data_sentry;
- struct mlx5_stride_block_entry *prot_sentry;
- u32 prot_key = wr->wr.sig_handover.prot->lkey;
- u64 prot_va = wr->wr.sig_handover.prot->addr;
- u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
- int prot_size;
-
- sblock_ctrl = *seg;
- data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
- prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
-
- prot_size = prot_field_size(sig_attrs->mem.sig_type);
- if (!prot_size) {
- pr_err("Bad block size given: %u\n", block_size);
- return -EINVAL;
+ if (rwq->ibwq.event_handler) {
+ event.device = rwq->ibwq.device;
+ event.element.wq = &rwq->ibwq;
+ switch (type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_WQ_FATAL;
+ break;
+ default:
+ mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
+ return;
}
- sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
- prot_size);
- sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
- sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
- sblock_ctrl->num_entries = cpu_to_be16(2);
-
- data_sentry->bcount = cpu_to_be16(block_size);
- data_sentry->key = cpu_to_be32(data_key);
- data_sentry->va = cpu_to_be64(data_va);
- data_sentry->stride = cpu_to_be16(block_size);
- prot_sentry->bcount = cpu_to_be16(prot_size);
- prot_sentry->key = cpu_to_be32(prot_key);
- prot_sentry->va = cpu_to_be64(prot_va);
- prot_sentry->stride = cpu_to_be16(prot_size);
-
- wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
- sizeof(*prot_sentry), 64);
+ rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
}
+}
- *seg += wqe_size;
- *size += wqe_size / 16;
- if (unlikely((*seg == qp->sq.qend)))
- *seg = mlx5_get_send_wqe(qp, 0);
+static int set_delay_drop(struct mlx5_ib_dev *dev)
+{
+ int err = 0;
- bsf = *seg;
- ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
- if (ret)
- return -EINVAL;
+ mutex_lock(&dev->delay_drop.lock);
+ if (dev->delay_drop.activate)
+ goto out;
+
+ err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
+ if (err)
+ goto out;
- *seg += sizeof(*bsf);
- *size += sizeof(*bsf) / 16;
- if (unlikely((*seg == qp->sq.qend)))
- *seg = mlx5_get_send_wqe(qp, 0);
+ dev->delay_drop.activate = true;
+out:
+ mutex_unlock(&dev->delay_drop.lock);
- return 0;
+ if (!err)
+ atomic_inc(&dev->delay_drop.rqs_cnt);
+ return err;
}
-static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_send_wr *wr, u32 nelements,
- u32 length, u32 pdn)
+static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr)
{
- struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
- u32 sig_key = sig_mr->rkey;
- u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
+ struct mlx5_ib_dev *dev;
+ int has_net_offloads;
+ __be64 *rq_pas0;
+ int ts_format;
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
- memset(seg, 0, sizeof(*seg));
+ dev = to_mdev(pd->device);
- seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
- MLX5_ACCESS_MODE_KLM;
- seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
- MLX5_MKEY_BSF_EN | pdn);
- seg->len = cpu_to_be64(length);
- seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
- seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
-}
+ ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
+ if (ts_format < 0)
+ return ts_format;
-static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr, u32 nelements)
-{
- memset(umr, 0, sizeof(*umr));
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
- umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
- umr->klm_octowords = get_klm_octo(nelements);
- umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
- umr->mkey_mask = sig_mkey_mask();
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, mem_rq_type,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
+ MLX5_SET(rqc, rqc, user_index, rwq->user_index);
+ MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, wq_type,
+ rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
+ if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ } else {
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ }
+ }
+ MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ /*
+ * In Firmware number of strides in each WQE is:
+ * "512 * 2^single_wqe_log_num_of_strides"
+ * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
+ * accepted as 0 to 9
+ */
+ static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
+ 2, 3, 4, 5, 6, 7, 8, 9 };
+ MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ rwq->single_stride_log_num_of_bytes -
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ fw_map[rwq->log_num_strides -
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
+ }
+ MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
+ MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
+ MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
+ MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
+ MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
+ has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
+ if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) {
+ if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
+ mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ } else {
+ MLX5_SET(rqc, rqc, vsd, 1);
+ }
+ if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) {
+ if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
+ mlx5_ib_dbg(dev, "Scatter FCS is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ MLX5_SET(rqc, rqc, scatter_fcs, 1);
+ }
+ if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
+ if (!(dev->ib_dev.attrs.raw_packet_caps &
+ IB_RAW_PACKET_CAP_DELAY_DROP)) {
+ mlx5_ib_dbg(dev, "Delay drop is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ MLX5_SET(rqc, rqc, delay_drop_en, 1);
+ }
+ rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
+ if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
+ err = set_delay_drop(dev);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
+ err);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+ } else {
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
+ }
+ }
+out:
+ kvfree(in);
+ return err;
}
-
-static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
- void **seg, int *size)
+static int set_user_rq_size(struct mlx5_ib_dev *dev,
+ struct ib_wq_init_attr *wq_init_attr,
+ struct mlx5_ib_create_wq *ucmd,
+ struct mlx5_ib_rwq *rwq)
{
- struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
- u32 pdn = get_pd(qp)->pdn;
- u32 klm_oct_size;
- int region_len, ret;
+ /* Sanity check RQ size before proceeding */
+ if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
+ return -EINVAL;
- if (unlikely(wr->num_sge != 1) ||
- unlikely(wr->wr.sig_handover.access_flags &
- IB_ACCESS_REMOTE_ATOMIC) ||
- unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
- unlikely(!sig_mr->sig->sig_status_checked))
+ if (!ucmd->rq_wqe_count)
return -EINVAL;
- /* length of the protected region, data + protection */
- region_len = wr->sg_list->length;
- if (wr->wr.sig_handover.prot &&
- (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
- wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
- wr->wr.sig_handover.prot->length != wr->sg_list->length))
- region_len += wr->wr.sig_handover.prot->length;
-
- /**
- * KLM octoword size - if protection was provided
- * then we use strided block format (3 octowords),
- * else we use single KLM (1 octoword)
- **/
- klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
-
- set_sig_umr_segment(*seg, wr, klm_oct_size);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- if (unlikely((*seg == qp->sq.qend)))
- *seg = mlx5_get_send_wqe(qp, 0);
-
- set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- if (unlikely((*seg == qp->sq.qend)))
- *seg = mlx5_get_send_wqe(qp, 0);
-
- ret = set_sig_data_segment(wr, qp, seg, size);
- if (ret)
- return ret;
+ rwq->wqe_count = ucmd->rq_wqe_count;
+ rwq->wqe_shift = ucmd->rq_wqe_shift;
+ if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
+ return -EINVAL;
- sig_mr->sig->sig_status_checked = false;
+ rwq->log_rq_stride = rwq->wqe_shift;
+ rwq->log_rq_size = ilog2(rwq->wqe_count);
return 0;
}
-static int set_psv_wr(struct ib_sig_domain *domain,
- u32 psv_idx, void **seg, int *size)
+static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
{
- struct mlx5_seg_set_psv *psv_seg = *seg;
+ if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
- memset(psv_seg, 0, sizeof(*psv_seg));
- psv_seg->psv_num = cpu_to_be32(psv_idx);
- switch (domain->sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
- domain->sig.dif.app_tag);
- psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
- break;
- default:
- pr_err("Bad signature type given.\n");
- return 1;
- }
+ if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
+ (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
- *seg += sizeof(*psv_seg);
- *size += sizeof(*psv_seg) / 16;
-
- return 0;
+ return true;
}
-static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
- struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
+static int prepare_user_rq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct mlx5_ib_rwq *rwq)
{
- int writ = 0;
- int li;
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_create_wq ucmd = {};
+ int err;
+ size_t required_cmd_sz;
- li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
- if (unlikely(wr->send_flags & IB_SEND_INLINE))
+ required_cmd_sz = offsetofend(struct mlx5_ib_create_wq,
+ single_stride_log_num_of_bytes);
+ if (udata->inlen < required_cmd_sz) {
+ mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
+ }
- set_frwr_umr_segment(*seg, wr, li);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- if (unlikely((*seg == qp->sq.qend)))
- *seg = mlx5_get_send_wqe(qp, 0);
- set_mkey_segment(*seg, wr, li, &writ);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- if (unlikely((*seg == qp->sq.qend)))
- *seg = mlx5_get_send_wqe(qp, 0);
- if (!li) {
- if (unlikely(wr->wr.fast_reg.page_list_len >
- wr->wr.fast_reg.page_list->max_page_list_len))
- return -ENOMEM;
-
- set_frwr_pages(*seg, wr, mdev, pd, writ);
- *seg += sizeof(struct mlx5_wqe_data_seg);
- *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ mlx5_ib_dbg(dev, "inlen is not supported\n");
+ return -EOPNOTSUPP;
}
- return 0;
-}
-static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
-{
- __be32 *p = NULL;
- int tidx = idx;
- int i, j;
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EFAULT;
+ }
- pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
- for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
- if ((i & 0xf) == 0) {
- void *buf = mlx5_get_send_wqe(qp, tidx);
- tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
- p = buf;
- j = 0;
+ if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
+ if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
+ mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
+ return -EOPNOTSUPP;
}
- pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
- be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
- be32_to_cpu(p[j + 3]));
+ if ((ucmd.single_stride_log_num_of_bytes <
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
+ (ucmd.single_stride_log_num_of_bytes >
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
+ mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
+ ucmd.single_stride_log_num_of_bytes,
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
+ return -EINVAL;
+ }
+ if (!log_of_strides_valid(dev,
+ ucmd.single_wqe_log_num_of_strides)) {
+ mlx5_ib_dbg(
+ dev,
+ "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ return -EINVAL;
+ }
+ rwq->single_stride_log_num_of_bytes =
+ ucmd.single_stride_log_num_of_bytes;
+ rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
+ rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
}
-}
-static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
- unsigned bytecnt, struct mlx5_ib_qp *qp)
-{
- while (bytecnt > 0) {
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- bytecnt -= 64;
- if (unlikely(src == qp->sq.qend))
- src = mlx5_get_send_wqe(qp, 0);
+ err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
}
+
+ err = create_user_rq(dev, pd, udata, rwq, &ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ rwq->user_index = ucmd.user_index;
+ return 0;
}
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
- wr->send_flags & IB_SEND_FENCE))
- return MLX5_FENCE_MODE_STRONG_ORDERING;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_rwq *rwq;
+ struct mlx5_ib_create_wq_resp resp = {};
+ size_t min_resp_len;
+ int err;
- if (unlikely(fence)) {
- if (wr->send_flags & IB_SEND_FENCE)
- return MLX5_FENCE_MODE_SMALL_AND_FENCE;
- else
- return fence;
+ if (!udata)
+ return ERR_PTR(-ENOSYS);
- } else {
- return 0;
- }
-}
+ min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- struct ib_send_wr *wr, unsigned *idx,
- int *size, int nreq)
-{
- int err = 0;
+ if (!capable(CAP_SYS_RAWIO) &&
+ init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
+ return ERR_PTR(-EPERM);
- if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
- err = -ENOMEM;
- return err;
+ dev = to_mdev(pd->device);
+ switch (init_attr->wq_type) {
+ case IB_WQT_RQ:
+ rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
+ if (!rwq)
+ return ERR_PTR(-ENOMEM);
+ err = prepare_user_rq(pd, init_attr, udata, rwq);
+ if (err)
+ goto err;
+ err = create_rq(rwq, pd, init_attr);
+ if (err)
+ goto err_user_rq;
+ break;
+ default:
+ mlx5_ib_dbg(dev, "unsupported wq type %d\n",
+ init_attr->wq_type);
+ return ERR_PTR(-EINVAL);
}
- *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
- *seg = mlx5_get_send_wqe(qp, *idx);
- *ctrl = *seg;
- *(uint32_t *)(*seg + 8) = 0;
- (*ctrl)->imm = send_ieth(wr);
- (*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (wr->send_flags & IB_SEND_SIGNALED ?
- MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- MLX5_WQE_CTRL_SOLICITED : 0);
-
- *seg += sizeof(**ctrl);
- *size = sizeof(**ctrl) / 16;
+ rwq->ibwq.wq_num = rwq->core_qp.qpn;
+ rwq->ibwq.state = IB_WQS_RESET;
+ if (udata->outlen) {
+ resp.response_length = offsetofend(
+ struct mlx5_ib_create_wq_resp, response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
- return err;
+ rwq->core_qp.event = mlx5_ib_wq_event;
+ rwq->ibwq.event_handler = init_attr->event_handler;
+ return &rwq->ibwq;
+
+err_copy:
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+err_user_rq:
+ destroy_user_rq(dev, pd, rwq, udata);
+err:
+ kfree(rwq);
+ return ERR_PTR(err);
}
-static void finish_wqe(struct mlx5_ib_qp *qp,
- struct mlx5_wqe_ctrl_seg *ctrl,
- u8 size, unsigned idx, u64 wr_id,
- int nreq, u8 fence, u8 next_fence,
- u32 mlx5_opcode)
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{
- u8 opmod = 0;
-
- ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
- mlx5_opcode | ((u32)opmod << 24));
- ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
- ctrl->fm_ce_se |= fence;
- qp->fm_cache = next_fence;
- if (unlikely(qp->wq_sig))
- ctrl->signature = wq_sig(ctrl);
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+ int ret;
- qp->sq.wrid[idx] = wr_id;
- qp->sq.w_list[idx].opcode = mlx5_opcode;
- qp->sq.wqe_head[idx] = qp->sq.head + nreq;
- qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
- qp->sq.w_list[idx].next = qp->sq.cur_post;
+ ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+ if (ret)
+ return ret;
+ destroy_user_rq(dev, wq->pd, rwq, udata);
+ kfree(rwq);
+ return 0;
}
-
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
- struct mlx5_ib_mr *mr;
- struct mlx5_wqe_data_seg *dpseg;
- struct mlx5_wqe_xrc_seg *xrc;
- struct mlx5_bf *bf = qp->bf;
- int uninitialized_var(size);
- void *qend = qp->sq.qend;
- unsigned long flags;
- unsigned idx;
- int err = 0;
- int inl = 0;
- int num_sge;
- void *seg;
- int nreq;
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
+ to_mrwq_ind_table(ib_rwq_ind_table);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
+ int sz = 1 << init_attr->log_ind_tbl_size;
+ struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
+ size_t min_resp_len;
+ int inlen;
+ int err;
int i;
- u8 next_fence = 0;
- u8 fence;
-
- spin_lock_irqsave(&qp->sq.lock, flags);
+ u32 *in;
+ void *rqtc;
+
+ if (udata->inlen > 0 &&
+ !ib_is_udata_cleared(udata, 0,
+ udata->inlen))
+ return -EOPNOTSUPP;
+
+ if (init_attr->log_ind_tbl_size >
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
+ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
+ init_attr->log_ind_tbl_size,
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
+ return -EINVAL;
+ }
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
- mlx5_ib_warn(dev, "\n");
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
+ min_resp_len =
+ offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return -EINVAL;
- fence = qp->fm_cache;
- num_sge = wr->num_sge;
- if (unlikely(num_sge > qp->sq.max_gs)) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
- err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
- switch (ibqp->qp_type) {
- case IB_QPT_XRC_INI:
- xrc = seg;
- xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
- seg += sizeof(*xrc);
- size += sizeof(*xrc) / 16;
- /* fall through */
- case IB_QPT_RC:
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, wr->wr.rdma.remote_addr,
- wr->wr.rdma.rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
- err = -ENOSYS;
- *bad_wr = wr;
- goto out;
+ for (i = 0; i < sz; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
- case IB_WR_LOCAL_INV:
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
- qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
- ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
- err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- num_sge = 0;
- break;
+ rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
+ MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
- case IB_WR_FAST_REG_MR:
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
- qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
- ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
- err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- num_sge = 0;
- break;
+ err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
+ kvfree(in);
+ if (err)
+ return err;
- case IB_WR_REG_SIG_MR:
- qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
- mr = to_mmr(wr->wr.sig_handover.sig_mr);
+ rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
+ if (udata->outlen) {
+ resp.response_length =
+ offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp,
+ response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
- ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
- err = set_sig_umr_wr(wr, qp, &seg, &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
+ return 0;
- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
- nreq, get_fence(fence, wr),
- next_fence, MLX5_OPCODE_UMR);
- /*
- * SET_PSV WQEs are not signaled and solicited
- * on error
- */
- wr->send_flags &= ~IB_SEND_SIGNALED;
- wr->send_flags |= IB_SEND_SOLICITED;
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
+err_copy:
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
+ return err;
+}
- err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
- mr->sig->psv_memory.psv_idx, &seg,
- &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
- nreq, get_fence(fence, wr),
- next_fence, MLX5_OPCODE_SET_PSV);
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
+ return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
+}
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
- err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
- mr->sig->psv_wire.psv_idx, &seg,
- &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+ struct mlx5_ib_modify_wq ucmd = {};
+ size_t required_cmd_sz;
+ int curr_wq_state;
+ int wq_state;
+ int inlen;
+ int err;
+ void *rqc;
+ void *in;
- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
- nreq, get_fence(fence, wr),
- next_fence, MLX5_OPCODE_SET_PSV);
- num_sge = 0;
- goto skip_psv;
+ required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved);
+ if (udata->inlen < required_cmd_sz)
+ return -EINVAL;
- default:
- break;
- }
- break;
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
- case IB_QPT_UC:
- switch (wr->opcode) {
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, wr->wr.rdma.remote_addr,
- wr->wr.rdma.rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
+ return -EFAULT;
- default:
- break;
- }
- break;
+ if (ucmd.comp_mask || ucmd.reserved)
+ return -EOPNOTSUPP;
- case IB_QPT_UD:
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
- size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
- if (unlikely((seg == qend)))
- seg = mlx5_get_send_wqe(qp, 0);
- break;
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
- case MLX5_IB_QPT_REG_UMR:
- if (wr->opcode != MLX5_IB_WR_UMR) {
- err = -EINVAL;
- mlx5_ib_warn(dev, "bad opcode\n");
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ curr_wq_state = wq_attr->curr_wq_state;
+ wq_state = wq_attr->wq_state;
+ if (curr_wq_state == IB_WQS_ERR)
+ curr_wq_state = MLX5_RQC_STATE_ERR;
+ if (wq_state == IB_WQS_ERR)
+ wq_state = MLX5_RQC_STATE_ERR;
+ MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
+ MLX5_SET(rqc, rqc, state, wq_state);
+
+ if (wq_attr_mask & IB_WQ_FLAGS) {
+ if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
+ if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
+ mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
+ err = -EOPNOTSUPP;
goto out;
}
- qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
- ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
- set_reg_umr_segment(seg, wr);
- seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- if (unlikely((seg == qend)))
- seg = mlx5_get_send_wqe(qp, 0);
- set_reg_mkey_segment(seg, wr);
- seg += sizeof(struct mlx5_mkey_seg);
- size += sizeof(struct mlx5_mkey_seg) / 16;
- if (unlikely((seg == qend)))
- seg = mlx5_get_send_wqe(qp, 0);
- break;
-
- default:
- break;
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
+ MLX5_SET(rqc, rqc, vsd,
+ (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
}
- if (wr->send_flags & IB_SEND_INLINE && num_sge) {
- int uninitialized_var(sz);
-
- err = set_data_inl_seg(qp, wr, seg, &sz);
- if (unlikely(err)) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- inl = 1;
- size += sz;
- } else {
- dpseg = seg;
- for (i = 0; i < num_sge; i++) {
- if (unlikely(dpseg == qend)) {
- seg = mlx5_get_send_wqe(qp, 0);
- dpseg = seg;
- }
- if (likely(wr->sg_list[i].length)) {
- set_data_ptr_seg(dpseg, wr->sg_list + i);
- size += sizeof(struct mlx5_wqe_data_seg) / 16;
- dpseg++;
- }
- }
+ if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
}
-
- finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
- get_fence(fence, wr), next_fence,
- mlx5_ib_opcode[wr->opcode]);
-skip_psv:
- if (0)
- dump_wqe(qp, idx, size);
}
-out:
- if (likely(nreq)) {
- qp->sq.head += nreq;
-
- /* Make sure that descriptors are written before
- * updating doorbell record and ringing the doorbell
- */
- wmb();
-
- qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
-
- /* Make sure doorbell record is visible to the HCA before
- * we hit doorbell */
- wmb();
+ if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
+ u16 set_id;
- if (bf->need_lock)
- spin_lock(&bf->lock);
- else
- __acquire(&bf->lock);
-
- /* TBD enable WC */
- if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
- mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
- /* wc_wmb(); */
- } else {
- mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
- MLX5_GET_DOORBELL_LOCK(&bf->lock32));
- /* Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- mmiowb();
- }
- bf->offset ^= bf->buf_size;
- if (bf->need_lock)
- spin_unlock(&bf->lock);
- else
- __release(&bf->lock);
+ set_id = mlx5_ib_get_counters_id(dev, 0);
+ if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, set_id);
+ } else
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "Receive WQ counters are not supported on current FW\n");
}
- spin_unlock_irqrestore(&qp->sq.lock, flags);
+ err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
+ if (!err)
+ rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
+out:
+ kvfree(in);
return err;
}
-static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
-{
- sig->signature = calc_sig(sig, size);
-}
+struct mlx5_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
- struct mlx5_wqe_data_seg *scat;
- struct mlx5_rwqe_sig *sig;
- unsigned long flags;
- int err = 0;
- int nreq;
- int ind;
- int i;
-
- spin_lock_irqsave(&qp->rq.lock, flags);
-
- ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
+ struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx5_ib_drain_cqe,
+ cqe);
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- if (unlikely(wr->num_sge > qp->rq.max_gs)) {
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
+ complete(&cqe->done);
+}
- scat = get_recv_wqe(qp, ind);
- if (qp->wq_sig)
- scat++;
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx5_ib_drain_cqe *sdrain,
+ struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
- for (i = 0; i < wr->num_sge; i++)
- set_data_ptr_seg(scat + i, wr->sg_list + i);
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
- if (i < qp->rq.max_gs) {
- scat[i].byte_count = 0;
- scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
- scat[i].addr = 0;
- }
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
- if (qp->wq_sig) {
- sig = (struct mlx5_rwqe_sig *)scat;
- set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
}
- qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
- }
-
-out:
- if (likely(nreq)) {
- qp->rq.head += nreq;
-
- /* Make sure that descriptors are written before
- * doorbell record.
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
*/
- wmb();
-
- *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ mcq->mcq.comp(&mcq->mcq, NULL);
}
- spin_unlock_irqrestore(&qp->rq.lock, flags);
-
- return err;
+ wait_for_completion(&sdrain->done);
}
-static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
+void mlx5_ib_drain_sq(struct ib_qp *qp)
{
- switch (mlx5_state) {
- case MLX5_QP_STATE_RST: return IB_QPS_RESET;
- case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
- case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
- case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
- case MLX5_QP_STATE_SQ_DRAINING:
- case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
- case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
- case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
- default: return -1;
- }
-}
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
-static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
-{
- switch (mlx5_mig_state) {
- case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
- case MLX5_QP_PM_REARM: return IB_MIG_REARM;
- case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
- default: return -1;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
}
-}
-static int to_ib_qp_access_flags(int mlx5_flags)
-{
- int ib_flags = 0;
+ sdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&sdrain.done);
- if (mlx5_flags & MLX5_QP_BIT_RRE)
- ib_flags |= IB_ACCESS_REMOTE_READ;
- if (mlx5_flags & MLX5_QP_BIT_RWE)
- ib_flags |= IB_ACCESS_REMOTE_WRITE;
- if (mlx5_flags & MLX5_QP_BIT_RAE)
- ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
- return ib_flags;
+ handle_drain_completion(cq, &sdrain, dev);
}
-static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
- struct mlx5_qp_path *path)
+void mlx5_ib_drain_rq(struct ib_qp *qp)
{
- struct mlx5_core_dev *dev = ibdev->mdev;
-
- memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
- ib_ah_attr->port_num = path->port;
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
- if (ib_ah_attr->port_num == 0 ||
- ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
+ }
- ib_ah_attr->sl = path->sl & 0xf;
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&rdrain.done);
- ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
- ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
- ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
- if (ib_ah_attr->ah_flags) {
- ib_ah_attr->grh.sgid_index = path->mgid_index;
- ib_ah_attr->grh.hop_limit = path->hop_limit;
- ib_ah_attr->grh.traffic_class =
- (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
- ib_ah_attr->grh.flow_label =
- be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
- memcpy(ib_ah_attr->grh.dgid.raw,
- path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
+ ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
}
+
+ handle_drain_completion(cq, &rdrain, dev);
}
-int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
+/*
+ * Bind a qp to a counter. If @counter is NULL then bind the qp to
+ * the default counter
+ */
+int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
{
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
- struct mlx5_query_qp_mbox_out *outb;
- struct mlx5_qp_context *context;
- int mlx5_state;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
int err = 0;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- /*
- * Wait for any outstanding page faults, in case the user frees memory
- * based upon this query's result.
- */
- flush_workqueue(mlx5_ib_page_fault_wq);
-#endif
-
- mutex_lock(&qp->mutex);
- outb = kzalloc(sizeof(*outb), GFP_KERNEL);
- if (!outb) {
- err = -ENOMEM;
+ mutex_lock(&mqp->mutex);
+ if (mqp->state == IB_QPS_RESET) {
+ qp->counter = counter;
goto out;
}
- context = &outb->ctx;
- err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
- if (err)
- goto out_free;
- mlx5_state = be32_to_cpu(context->flags) >> 28;
-
- qp->state = to_ib_qp_state(mlx5_state);
- qp_attr->qp_state = qp->state;
- qp_attr->path_mtu = context->mtu_msgmax >> 5;
- qp_attr->path_mig_state =
- to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
- qp_attr->qkey = be32_to_cpu(context->qkey);
- qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
- qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
- qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
- qp_attr->qp_access_flags =
- to_ib_qp_access_flags(be32_to_cpu(context->params2));
-
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
- to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
- qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
- }
-
- qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
- qp_attr->port_num = context->pri_path.port;
-
- /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
- qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
-
- qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
-
- qp_attr->max_dest_rd_atomic =
- 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
- qp_attr->min_rnr_timer =
- (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
- qp_attr->timeout = context->pri_path.ackto_lt >> 3;
- qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
- qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
- qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
- qp_attr->cur_qp_state = qp_attr->qp_state;
- qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = qp->rq.max_gs;
-
- if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = qp->sq.max_gs;
- } else {
- qp_attr->cap.max_send_wr = 0;
- qp_attr->cap.max_send_sge = 0;
+ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
+ err = -EOPNOTSUPP;
+ goto out;
}
- /* We don't support inline sends for kernel QPs (yet), and we
- * don't know what userspace's value should be.
- */
- qp_attr->cap.max_inline_data = 0;
+ if (mqp->state == IB_QPS_RTS) {
+ err = __mlx5_ib_qp_set_counter(qp, counter);
+ if (!err)
+ qp->counter = counter;
- qp_init_attr->cap = qp_attr->cap;
-
- qp_init_attr->create_flags = 0;
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
- qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
-
- qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
- IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+ goto out;
+ }
-out_free:
- kfree(outb);
+ mqp->counter_pending = 1;
+ qp->counter = counter;
out:
- mutex_unlock(&qp->mutex);
+ mutex_unlock(&mqp->mutex);
return err;
}
-struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int mlx5_ib_qp_event_init(void)
{
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_xrcd *xrcd;
- int err;
-
- if (!MLX5_CAP_GEN(dev->mdev, xrc))
- return ERR_PTR(-ENOSYS);
-
- xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
- if (!xrcd)
- return ERR_PTR(-ENOMEM);
-
- err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
- if (err) {
- kfree(xrcd);
- return ERR_PTR(-ENOMEM);
- }
+ mlx5_ib_qp_event_wq = alloc_ordered_workqueue("mlx5_ib_qp_event_wq", 0);
+ if (!mlx5_ib_qp_event_wq)
+ return -ENOMEM;
- return &xrcd->ibxrcd;
+ return 0;
}
-int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+void mlx5_ib_qp_event_cleanup(void)
{
- struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
- u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
- int err;
-
- err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
- if (err) {
- mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
- return err;
- }
-
- kfree(xrcd);
-
- return 0;
+ destroy_workqueue(mlx5_ib_qp_event_wq);
}
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
new file mode 100644
index 000000000000..2530e7730635
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_QP_H
+#define _MLX5_IB_QP_H
+
+struct mlx5_ib_dev;
+
+struct mlx5_qp_table {
+ struct notifier_block nb;
+ struct xarray dct_xa;
+
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
+
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
+ u32 *in, int inlen, u32 *out, int outlen);
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out);
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece);
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *out, int outlen, bool qpc_ext);
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
+
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
+
+int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *sq);
+
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
+int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+int mlx5_ib_qp_event_init(void);
+void mlx5_ib_qp_event_cleanup(void);
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
+#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
new file mode 100644
index 000000000000..146d03ae40bd
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+#include "qp.h"
+
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct);
+
+static struct mlx5_core_rsc_common *
+mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
+{
+ struct mlx5_core_rsc_common *common;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+
+ common = radix_tree_lookup(&table->tree, rsn);
+ if (common && !common->invalid)
+ refcount_inc(&common->refcount);
+ else
+ common = NULL;
+
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ return common;
+}
+
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
+{
+ if (refcount_dec_and_test(&common->refcount))
+ complete(&common->free);
+}
+
+static u64 qp_allowed_event_types(void)
+{
+ u64 mask;
+
+ mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
+ BIT(MLX5_EVENT_TYPE_COMM_EST) |
+ BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
+ BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
+ BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
+ BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
+ BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
+ BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
+
+ return mask;
+}
+
+static u64 rq_allowed_event_types(void)
+{
+ u64 mask;
+
+ mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
+ BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
+
+ return mask;
+}
+
+static u64 sq_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
+}
+
+static u64 dct_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
+}
+
+static bool is_event_type_allowed(int rsc_type, int event_type)
+{
+ switch (rsc_type) {
+ case MLX5_EVENT_QUEUE_TYPE_QP:
+ return BIT(event_type) & qp_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_RQ:
+ return BIT(event_type) & rq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_SQ:
+ return BIT(event_type) & sq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_DCT:
+ return BIT(event_type) & dct_allowed_event_types();
+ default:
+ WARN(1, "Event arrived for unknown resource type");
+ return false;
+ }
+}
+
+static int dct_event_notifier(struct mlx5_ib_dev *dev, struct mlx5_eqe *eqe)
+{
+ struct mlx5_core_dct *dct;
+ unsigned long flags;
+ u32 qpn;
+
+ qpn = be32_to_cpu(eqe->data.dct.dctn) & 0xFFFFFF;
+ xa_lock_irqsave(&dev->qp_table.dct_xa, flags);
+ dct = xa_load(&dev->qp_table.dct_xa, qpn);
+ if (dct)
+ complete(&dct->drained);
+ xa_unlock_irqrestore(&dev->qp_table.dct_xa, flags);
+ return NOTIFY_OK;
+}
+
+static int rsc_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(nb, struct mlx5_ib_dev, qp_table.nb);
+ struct mlx5_core_rsc_common *common;
+ struct mlx5_eqe *eqe = data;
+ u8 event_type = (u8)type;
+ struct mlx5_core_qp *qp;
+ u32 rsn;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ return dct_event_notifier(dev, eqe);
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ common = mlx5_get_rsc(&dev->qp_table, rsn);
+ if (!common)
+ return NOTIFY_OK;
+
+ if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
+ goto out;
+
+ switch (common->res) {
+ case MLX5_RES_QP:
+ case MLX5_RES_RQ:
+ case MLX5_RES_SQ:
+ qp = (struct mlx5_core_qp *)common;
+ qp->event(qp, event_type);
+ /* Need to put resource in event handler */
+ return NOTIFY_OK;
+ default:
+ break;
+ }
+out:
+ mlx5_core_put_rsc(common);
+
+ return NOTIFY_OK;
+}
+
+static int create_resource_common(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ int err;
+
+ qp->common.res = rsc_type;
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree,
+ qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
+ qp);
+ spin_unlock_irq(&table->lock);
+ if (err)
+ return err;
+
+ refcount_set(&qp->common.refcount, 1);
+ init_completion(&qp->common.free);
+ qp->pid = current->pid;
+
+ return 0;
+}
+
+static void modify_resource_common_state(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp,
+ bool invalid)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ qp->common.invalid = invalid;
+ spin_unlock_irqrestore(&table->lock, flags);
+}
+
+static void destroy_resource_common(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ radix_tree_delete(&table->tree,
+ qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
+ spin_unlock_irqrestore(&table->lock, flags);
+ mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
+ wait_for_completion(&qp->common.free);
+}
+
+static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, in, uid, qp->uid);
+ return mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
+}
+
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *in, int inlen, u32 *out, int outlen)
+{
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ init_completion(&dct->drained);
+ MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen);
+ if (err)
+ return err;
+
+ qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ qp->uid = MLX5_GET(create_dct_in, in, uid);
+ err = xa_err(xa_store_irq(&dev->qp_table.dct_xa, qp->qpn, dct, GFP_KERNEL));
+ if (err)
+ goto err_cmd;
+
+ return 0;
+err_cmd:
+ _mlx5_core_destroy_dct(dev, dct);
+ return err;
+}
+
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out)
+{
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+ int err;
+
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
+ MLX5_ST_SZ_BYTES(create_qp_out));
+ if (err)
+ return err;
+
+ qp->uid = MLX5_GET(create_qp_in, in, uid);
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
+
+ err = create_resource_common(dev, qp, MLX5_RES_QP);
+ if (err)
+ goto err_cmd;
+
+ if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
+ mlx5_debug_qp_add(dev->mdev, qp);
+
+ return 0;
+
+err_cmd:
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, din, uid, qp->uid);
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
+ return err;
+}
+
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
+ MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(drain_dct_in, in, uid, qp->uid);
+ return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
+}
+
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ struct mlx5_core_dct *tmp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ goto destroy;
+
+ return err;
+ }
+ wait_for_completion(&dct->drained);
+
+destroy:
+ tmp = xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, dct, XA_ZERO_ENTRY, GFP_KERNEL);
+ if (WARN_ON(tmp != dct))
+ return xa_err(tmp) ?: -EINVAL;
+
+ err = _mlx5_core_destroy_dct(dev, dct);
+ if (err) {
+ xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, XA_ZERO_ENTRY, dct, 0);
+ return err;
+ }
+ xa_erase_irq(&table->dct_xa, dct->mqp.qpn);
+ return 0;
+}
+
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
+ mlx5_debug_qp_remove(dev->mdev, qp);
+
+ destroy_resource_common(dev, qp);
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, in, uid, qp->uid);
+ return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+}
+
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
+ u32 timeout_usec)
+{
+ u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
+
+ MLX5_SET(set_delay_drop_params_in, in, opcode,
+ MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
+ MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
+ timeout_usec / 100);
+ return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
+}
+
+struct mbox_info {
+ u32 *in;
+ u32 *out;
+ int inlen;
+ int outlen;
+};
+
+static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
+{
+ mbox->inlen = inlen;
+ mbox->outlen = outlen;
+ mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
+ mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
+ if (!mbox->in || !mbox->out) {
+ kfree(mbox->in);
+ kfree(mbox->out);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mbox_free(struct mbox_info *mbox)
+{
+ kfree(mbox->in);
+ kfree(mbox->out);
+}
+
+static int get_ece_from_mbox(void *out, u16 opcode)
+{
+ int ece = 0;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ ece = MLX5_GET(init2init_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ ece = MLX5_GET(init2rtr_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ ece = MLX5_GET(rtr2rts_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ ece = MLX5_GET(rts2rts_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ ece = MLX5_GET(rst2init_qp_out, out, ece);
+ break;
+ default:
+ break;
+ }
+
+ return ece;
+}
+
+static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
+ u32 opt_param_mask, void *qpc,
+ struct mbox_info *mbox, u16 uid, u32 ece)
+{
+ mbox->out = NULL;
+ mbox->in = NULL;
+
+#define MBOX_ALLOC(mbox, typ) \
+ mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
+
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
+ do { \
+ MLX5_SET(typ##_in, in, opcode, _opcode); \
+ MLX5_SET(typ##_in, in, qpn, _qpn); \
+ MLX5_SET(typ##_in, in, uid, _uid); \
+ } while (0)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
+ do { \
+ MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
+ MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+ memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
+ MLX5_ST_SZ_BYTES(qpc)); \
+ } while (0)
+
+ switch (opcode) {
+ /* 2RST & 2ERR */
+ case MLX5_CMD_OP_2RST_QP:
+ if (MBOX_ALLOC(mbox, qp_2rst))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ if (MBOX_ALLOC(mbox, qp_2err))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
+ break;
+
+ /* MODIFY with QPC */
+ case MLX5_CMD_OP_RST2INIT_QP:
+ if (MBOX_ALLOC(mbox, rst2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ if (MBOX_ALLOC(mbox, init2rtr_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ if (MBOX_ALLOC(mbox, rtr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ if (MBOX_ALLOC(mbox, rts2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ if (MBOX_ALLOC(mbox, sqerr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ break;
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ if (MBOX_ALLOC(mbox, sqd2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqd2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ if (MBOX_ALLOC(mbox, init2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece)
+{
+ struct mbox_info mbox;
+ int err;
+
+ err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
+ qpc, &mbox, qp->uid, (ece) ? *ece : 0);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
+ mbox.outlen);
+
+ if (ece)
+ *ece = get_ece_from_mbox(mbox.out, opcode);
+
+ mbox_free(&mbox);
+ return err;
+}
+
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+
+ spin_lock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+ xa_init(&table->dct_xa);
+
+ if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
+ mlx5_qp_debugfs_init(dev->mdev);
+
+ table->nb.notifier_call = rsc_event_notifier;
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
+}
+
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
+ if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
+ mlx5_qp_debugfs_cleanup(dev->mdev);
+}
+
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *out, int outlen, bool qpc_ext)
+{
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
+
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(query_qp_in, in, qpc_ext, qpc_ext);
+
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
+ MLX5_SET(query_dct_in, in, dctn, qp->qpn);
+
+ return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
+ outlen);
+}
+
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return err;
+}
+
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
+}
+
+static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+ MLX5_SET(destroy_rq_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
+}
+
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq)
+{
+ int err;
+ u32 rqn;
+
+ err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
+ if (err)
+ return err;
+
+ rq->uid = MLX5_GET(create_rq_in, in, uid);
+ rq->qpn = rqn;
+ err = create_resource_common(dev, rq, MLX5_RES_RQ);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ destroy_rq_tracked(dev, rq->qpn, rq->uid);
+
+ return err;
+}
+
+int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq)
+{
+ int ret;
+
+ /* The rq destruction can be called again in case it fails, hence we
+ * mark the common resource as invalid and only once FW destruction
+ * is completed successfully we actually destroy the resources.
+ */
+ modify_resource_common_state(dev, rq, true);
+ ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ if (ret) {
+ modify_resource_common_state(dev, rq, false);
+ return ret;
+ }
+ destroy_resource_common(dev, rq);
+ return 0;
+}
+
+static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+ MLX5_SET(destroy_sq_in, in, uid, uid);
+ mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
+}
+
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
+ if (err)
+ return err;
+
+ sq->qpn = MLX5_GET(create_sq_out, out, sqn);
+ sq->uid = MLX5_GET(create_sq_in, in, uid);
+ err = create_resource_common(dev, sq, MLX5_RES_SQ);
+ if (err)
+ goto err_destroy_sq;
+
+ return 0;
+
+err_destroy_sq:
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
+
+ return err;
+}
+
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *sq)
+{
+ destroy_resource_common(dev, sq);
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
+}
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type)
+{
+ u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
+ struct mlx5_qp_table *table = &dev->qp_table;
+
+ return mlx5_get_rsc(table, rsn);
+}
+
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
+{
+ mlx5_core_put_rsc(res);
+}
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
new file mode 100644
index 000000000000..67841922c7b8
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#include <uapi/rdma/rdma_netlink.h>
+#include <linux/mlx5/rsc_dump.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/restrack.h>
+#include "mlx5_ib.h"
+#include "restrack.h"
+
+#define MAX_DUMP_SIZE 1024
+
+static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type,
+ int index, void *data, int *data_len)
+{
+ struct mlx5_core_dev *mdev = dev;
+ struct mlx5_rsc_dump_cmd *cmd;
+ struct mlx5_rsc_key key = {};
+ struct page *page;
+ int offset = 0;
+ int err = 0;
+ int cmd_err;
+ int size;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ key.size = PAGE_SIZE;
+ key.rsc = type;
+ key.index1 = index;
+ key.num_of_obj1 = 1;
+
+ cmd = mlx5_rsc_dump_cmd_create(mdev, &key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+
+ do {
+ cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
+ if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) {
+ err = cmd_err;
+ goto destroy_cmd;
+ }
+ memcpy(data + offset, page_address(page), size);
+ offset += size;
+ } while (cmd_err > 0);
+ *data_len = offset;
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+free_page:
+ __free_page(page);
+ return err;
+}
+
+static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev,
+ enum mlx5_sgmt_type type, u32 key)
+{
+ int len = 0;
+ void *data;
+ int err;
+
+ data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = dump_rsc(dev->mdev, type, key, data, &len);
+ if (err)
+ goto out;
+
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+out:
+ kfree(data);
+ return err;
+}
+
+static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+
+ if (!table_attr)
+ goto err;
+
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
+ atomic64_read(&mr->odp_stats.faults)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_faults_handled",
+ atomic64_read(&mr->odp_stats.faults_handled)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations",
+ atomic64_read(&mr->odp_stats.invalidations)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations_handled",
+ atomic64_read(&mr->odp_stats.invalidations_handled)))
+ goto err_table;
+
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
+ atomic64_read(&mr->odp_stats.prefetch)))
+ goto err_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+
+ return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+ mlx5_mkey_to_idx(mr->mmkey.key));
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (mr->is_odp_implicit) {
+ if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
+ goto err;
+ } else {
+ if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+
+ return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ int ret;
+
+ if (qp->type < IB_QPT_DRIVER)
+ return 0;
+
+ switch (qp->type) {
+ case MLX5_IB_QPT_REG_UMR:
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE,
+ "REG_UMR");
+ break;
+ case MLX5_IB_QPT_DCT:
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCT");
+ break;
+ case MLX5_IB_QPT_DCI:
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCI");
+ break;
+ default:
+ return 0;
+ }
+ if (ret)
+ return ret;
+
+ return nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, IB_QPT_DRIVER);
+}
+
+static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+
+ return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP,
+ ibqp->qp_num);
+}
+
+static const struct ib_device_ops restrack_ops = {
+ .fill_res_cq_entry_raw = fill_res_cq_entry_raw,
+ .fill_res_mr_entry = fill_res_mr_entry,
+ .fill_res_mr_entry_raw = fill_res_mr_entry_raw,
+ .fill_res_qp_entry = fill_res_qp_entry,
+ .fill_res_qp_entry_raw = fill_res_qp_entry_raw,
+ .fill_stat_mr_entry = fill_stat_mr_entry,
+};
+
+int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &restrack_ops);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/restrack.h b/drivers/infiniband/hw/mlx5/restrack.h
new file mode 100644
index 000000000000..e8d81270f1b6
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_RESTRACK_H
+#define _MLX5_IB_RESTRACK_H
+
+#include "mlx5_ib.h"
+
+int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev);
+
+#endif /* _MLX5_IB_RESTRACK_H */
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index e008505e96e9..bcb6b324af50 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -1,51 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
*/
-#include <linux/module.h>
#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
#include <linux/slab.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
-
#include "mlx5_ib.h"
-#include "user.h"
-
-/* not supported currently */
-static int srq_signature;
+#include "srq.h"
static void *get_wqe(struct mlx5_ib_srq *srq, int n)
{
- return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
+ return mlx5_frag_buf_get_wqe(&srq->fbc, n);
}
static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
@@ -74,75 +41,61 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
}
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
- struct mlx5_create_srq_mbox_in **in,
- struct ib_udata *udata, int buf_size, int *inlen)
+ struct mlx5_srq_attr *in,
+ struct ib_udata *udata, int buf_size)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_ib_create_srq ucmd;
+ struct mlx5_ib_create_srq ucmd = {};
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
size_t ucmdlen;
int err;
- int npages;
- int page_shift;
- int ncont;
- u32 offset;
+ u32 uidx = MLX5_IB_DEFAULT_UIDX;
- ucmdlen =
- (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
- sizeof(ucmd)) ? (sizeof(ucmd) -
- sizeof(ucmd.reserved)) : sizeof(ucmd);
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_dbg(dev, "failed copy udata\n");
return -EFAULT;
}
- if (ucmdlen == sizeof(ucmd) &&
- ucmd.reserved != 0)
+ if (ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
return -EINVAL;
+ if (in->type != IB_SRQT_BASIC) {
+ err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx);
+ if (err)
+ return err;
+ }
+
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
- 0, 0);
+ srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
return err;
}
+ in->umem = srq->umem;
- mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
- &page_shift, &ncont, NULL);
- err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
- &offset);
- if (err) {
- mlx5_ib_warn(dev, "bad offset\n");
- goto err_umem;
- }
-
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
- *in = mlx5_vzalloc(*inlen);
- if (!(*in)) {
- err = -ENOMEM;
- goto err_umem;
- }
-
- mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
-
- err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
- ucmd.db_addr, &srq->db);
+ err = mlx5_ib_db_map_user(ucontext, ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_dbg(dev, "map doorbell failed\n");
- goto err_in;
+ goto err_umem;
}
- (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
+ in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type != IB_SRQT_BASIC)
+ in->user_index = uidx;
return 0;
-err_in:
- kvfree(*in);
-
err_umem:
ib_umem_release(srq->umem);
@@ -150,14 +103,11 @@ err_umem:
}
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
- struct mlx5_create_srq_mbox_in **in, int buf_size,
- int *inlen)
+ struct mlx5_srq_attr *in, int buf_size)
{
int err;
int i;
struct mlx5_wqe_srq_next_seg *next;
- int page_shift;
- int npages;
err = mlx5_db_alloc(dev->mdev, &srq->db);
if (err) {
@@ -165,12 +115,15 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return err;
}
- if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
+ if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
+ dev->mdev->priv.numa_node)) {
mlx5_ib_dbg(dev, "buf alloc failed\n");
err = -ENOMEM;
goto err_db;
}
- page_shift = srq->buf.page_shift;
+
+ mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max),
+ &srq->fbc);
srq->head = 0;
srq->tail = srq->msrq.max - 1;
@@ -182,81 +135,90 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
cpu_to_be16((i + 1) & (srq->msrq.max - 1));
}
- npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
- mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
- buf_size, page_shift, srq->buf.npages, npages);
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
- *in = mlx5_vzalloc(*inlen);
- if (!*in) {
+ mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
+ in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
+ if (!in->pas) {
err = -ENOMEM;
goto err_buf;
}
- mlx5_fill_page_array(&srq->buf, (*in)->pas);
+ mlx5_fill_page_frag_array(&srq->buf, in->pas);
- srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
+ srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
- mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
- (unsigned long)(srq->msrq.max * sizeof(u64)));
err = -ENOMEM;
goto err_in;
}
- srq->wq_sig = !!srq_signature;
+ srq->wq_sig = 0;
- (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type != IB_SRQT_BASIC)
+ in->user_index = MLX5_IB_DEFAULT_UIDX;
return 0;
err_in:
- kvfree(*in);
+ kvfree(in->pas);
err_buf:
- mlx5_buf_free(dev->mdev, &srq->buf);
+ mlx5_frag_buf_free(dev->mdev, &srq->buf);
err_db:
mlx5_db_free(dev->mdev, &srq->db);
return err;
}
-static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
+static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ struct ib_udata *udata)
{
- mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ mlx5_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext),
+ &srq->db);
ib_umem_release(srq->umem);
}
static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
{
- kfree(srq->wrid);
- mlx5_buf_free(dev->mdev, &srq->buf);
+ kvfree(srq->wrid);
+ mlx5_frag_buf_free(dev->mdev, &srq->buf);
mlx5_db_free(dev->mdev, &srq->db);
}
-struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_ib_srq *srq;
- int desc_size;
- int buf_size;
+ struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
+ struct mlx5_ib_srq *srq = to_msrq(ib_srq);
+ size_t desc_size;
+ size_t buf_size;
int err;
- struct mlx5_create_srq_mbox_in *uninitialized_var(in);
- int uninitialized_var(inlen);
- int is_xrc;
- u32 flgs, xrcdn;
+ struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
-
- /* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= max_srq_wqes) {
- mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
- init_attr->attr.max_wr,
- max_srq_wqes);
- return ERR_PTR(-EINVAL);
+ __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
+
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC &&
+ init_attr->srq_type != IB_SRQT_TM)
+ return -EOPNOTSUPP;
+
+ /* Sanity check SRQ and sge size before proceeding */
+ if (init_attr->attr.max_wr >= max_srq_wqes ||
+ init_attr->attr.max_sge > max_sge_sz) {
+ mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
+ init_attr->attr.max_wr, max_srq_wqes,
+ init_attr->attr.max_sge, max_sge_sz);
+ return -EINVAL;
}
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
+ err = mlx5_ib_dev_res_cq_init(dev);
+ if (err)
+ return err;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
@@ -265,45 +227,65 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+ return -EINVAL;
+
desc_size = roundup_pow_of_two(desc_size);
- desc_size = max_t(int, 32, desc_size);
+ desc_size = max_t(size_t, 32, desc_size);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+ return -EINVAL;
+
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
- desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
- srq->msrq.max_avail_gather);
+ if (buf_size < desc_size)
+ return -EINVAL;
- if (pd->uobject)
- err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
+ in.type = init_attr->srq_type;
+
+ if (udata)
+ err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
else
- err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
+ err = create_srq_kernel(dev, srq, &in, buf_size);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
- pd->uobject ? "user" : "kernel", err);
- goto err_srq;
+ udata ? "user" : "kernel", err);
+ return err;
}
- is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
- in->ctx.state_log_sz = ilog2(srq->msrq.max);
- flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
- xrcdn = 0;
- if (is_xrc) {
- xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
- in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
- } else if (init_attr->srq_type == IB_SRQT_BASIC) {
- xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
- in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
+ in.log_size = ilog2(srq->msrq.max);
+ in.wqe_shift = srq->msrq.wqe_shift - 4;
+ if (srq->wq_sig)
+ in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
+
+ if (init_attr->srq_type == IB_SRQT_XRC && init_attr->ext.xrc.xrcd)
+ in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
+ else
+ in.xrcd = dev->devr.xrcdn0;
+
+ if (init_attr->srq_type == IB_SRQT_TM) {
+ in.tm_log_list_size =
+ ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
+ if (in.tm_log_list_size >
+ MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
+ mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
+ err = -EINVAL;
+ goto err_usr_kern_srq;
+ }
+ in.flags |= MLX5_SRQ_FLAG_RNDV;
}
- in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
+ if (ib_srq_has_cq(init_attr->srq_type))
+ in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
+ else
+ in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
- in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->ctx.db_record = cpu_to_be64(srq->db.dma);
- err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
- kvfree(in);
+ in.pd = to_mpd(ib_srq->pd)->pdn;
+ in.db_record = srq->db.dma;
+ err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
+ kvfree(in.pas);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;
@@ -314,30 +296,33 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
srq->msrq.event = mlx5_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
- if (pd->uobject)
- if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
+ if (udata) {
+ struct mlx5_ib_create_srq_resp resp = {
+ .srqn = srq->msrq.srqn,
+ };
+
+ if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
+ sizeof(resp)))) {
mlx5_ib_dbg(dev, "copy to user failed\n");
err = -EFAULT;
goto err_core;
}
+ }
init_attr->attr.max_wr = srq->msrq.max - 1;
- return &srq->ibsrq;
+ return 0;
err_core:
- mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
+ mlx5_cmd_destroy_srq(dev, &srq->msrq);
err_usr_kern_srq:
- if (pd->uobject)
- destroy_srq_user(pd, srq);
+ if (udata)
+ destroy_srq_user(ib_srq->pd, srq, udata);
else
destroy_srq_kernel(dev, srq);
-err_srq:
- kfree(srq);
-
- return ERR_PTR(err);
+ return err;
}
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -356,7 +341,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
mutex_lock(&srq->mutex);
- ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
+ ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
mutex_unlock(&srq->mutex);
if (ret)
@@ -371,17 +356,17 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
int ret;
- struct mlx5_query_srq_mbox_out *out;
+ struct mlx5_srq_attr *out;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return -ENOMEM;
- ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
+ ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
if (ret)
goto out_box;
- srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
+ srq_attr->srq_limit = out->lwm;
srq_attr->max_wr = srq->msrq.max - 1;
srq_attr->max_sge = srq->msrq.max_gs;
@@ -390,21 +375,20 @@ out_box:
return ret;
}
-int mlx5_ib_destroy_srq(struct ib_srq *srq)
+int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq);
+ int ret;
- mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
+ ret = mlx5_cmd_destroy_srq(dev, &msrq->msrq);
+ if (ret)
+ return ret;
- if (srq->uobject) {
- mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
- ib_umem_release(msrq->umem);
- } else {
+ if (udata)
+ destroy_srq_user(srq->pd, msrq, udata);
+ else
destroy_srq_kernel(dev, msrq);
- }
-
- kfree(srq);
return 0;
}
@@ -422,12 +406,14 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;
struct mlx5_wqe_data_seg *scat;
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int err = 0;
int nreq;
@@ -435,6 +421,12 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ goto out;
+ }
+
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
err = -EINVAL;
@@ -462,7 +454,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (i < srq->msrq.max_avail_gather) {
scat[i].byte_count = 0;
- scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey;
scat[i].addr = 0;
}
}
@@ -477,7 +469,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
*srq->db.db = cpu_to_be32(srq->wqe_ctr);
}
-
+out:
spin_unlock_irqrestore(&srq->lock, flags);
return err;
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
new file mode 100644
index 000000000000..a7e3dc5564ac
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2018, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef MLX5_IB_SRQ_H
+#define MLX5_IB_SRQ_H
+
+enum {
+ MLX5_SRQ_FLAG_ERR = (1 << 0),
+ MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+ MLX5_SRQ_FLAG_RNDV = (1 << 2),
+};
+
+struct mlx5_srq_attr {
+ u32 type;
+ u32 flags;
+ u32 log_size;
+ u32 wqe_shift;
+ u32 log_page_size;
+ u32 wqe_cnt;
+ u32 srqn;
+ u32 xrcd;
+ u32 page_offset;
+ u32 cqn;
+ u32 pd;
+ u32 lwm;
+ u32 user_index;
+ u64 db_record;
+ __be64 *pas;
+ struct ib_umem *umem;
+ u32 tm_log_list_size;
+ u32 tm_next_tag;
+ u32 tm_hw_phase_cnt;
+ u32 tm_sw_phase_cnt;
+ u16 uid;
+};
+
+struct mlx5_ib_dev;
+
+struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
+ u32 srqn;
+ int max;
+ size_t max_gs;
+ size_t max_avail_gather;
+ int wqe_shift;
+ void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
+
+ u16 uid;
+};
+
+struct mlx5_srq_table {
+ struct notifier_block nb;
+ struct xarray array;
+};
+
+int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in);
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
+int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out);
+int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq);
+struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn);
+
+int mlx5_init_srq_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev);
+#endif /* MLX5_IB_SRQ_H */
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
new file mode 100644
index 000000000000..8b3385396599
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -0,0 +1,774 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+#include "srq.h"
+#include "qp.h"
+
+static int get_pas_size(struct mlx5_srq_attr *in)
+{
+ u32 log_page_size = in->log_page_size + 12;
+ u32 log_srq_size = in->log_size;
+ u32 log_rq_stride = in->wqe_shift;
+ u32 page_offset = in->page_offset;
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size);
+
+ return rq_num_pas * sizeof(u64);
+}
+
+static void set_wq(void *wq, struct mlx5_srq_attr *in)
+{
+ MLX5_SET(wq, wq, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
+ MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
+ MLX5_SET(wq, wq, log_wq_sz, in->log_size);
+ MLX5_SET(wq, wq, page_offset, in->page_offset);
+ MLX5_SET(wq, wq, lwm, in->lwm);
+ MLX5_SET(wq, wq, pd, in->pd);
+ MLX5_SET64(wq, wq, dbr_addr, in->db_record);
+}
+
+static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
+ MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
+ MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
+ MLX5_SET(srqc, srqc, page_offset, in->page_offset);
+ MLX5_SET(srqc, srqc, lwm, in->lwm);
+ MLX5_SET(srqc, srqc, pd, in->pd);
+ MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
+ MLX5_SET(srqc, srqc, xrcd, in->xrcd);
+ MLX5_SET(srqc, srqc, cqn, in->cqn);
+}
+
+static void get_wq(void *wq, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(wq, wq, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
+ in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
+ in->log_size = MLX5_GET(wq, wq, log_wq_sz);
+ in->page_offset = MLX5_GET(wq, wq, page_offset);
+ in->lwm = MLX5_GET(wq, wq, lwm);
+ in->pd = MLX5_GET(wq, wq, pd);
+ in->db_record = MLX5_GET64(wq, wq, dbr_addr);
+}
+
+static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(srqc, srqc, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
+ in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
+ in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
+ in->page_offset = MLX5_GET(srqc, srqc, page_offset);
+ in->lwm = MLX5_GET(srqc, srqc, lwm);
+ in->pd = MLX5_GET(srqc, srqc, pd);
+ in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
+}
+
+struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+ struct mlx5_core_srq *srq;
+
+ xa_lock_irq(&table->array);
+ srq = xa_load(&table->array, srqn);
+ if (srq)
+ refcount_inc(&srq->common.refcount);
+ xa_unlock_irq(&table->array);
+
+ return srq;
+}
+
+static int __set_srq_page_size(struct mlx5_srq_attr *in,
+ unsigned long page_size)
+{
+ if (!page_size)
+ return -EINVAL;
+ in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
+
+ if (WARN_ON(get_pas_size(in) !=
+ ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64)))
+ return -EINVAL;
+ return 0;
+}
+
+#define set_srq_page_size(in, typ, log_pgsz_fld) \
+ __set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff( \
+ (in)->umem, typ, log_pgsz_fld, \
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, \
+ 64, &(in)->page_offset))
+
+static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
+ void *create_in;
+ void *srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, srqc, log_page_size);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!create_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_srq_in, create_in, uid, in->uid);
+ srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
+ pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
+
+ set_srqc(srqc, in);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+
+ MLX5_SET(create_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_SRQ);
+
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
+ sizeof(create_out));
+ kvfree(create_in);
+ if (!err) {
+ srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
+ srq->uid = in->uid;
+ }
+
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
+
+ MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
+ MLX5_SET(destroy_srq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
+}
+
+static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
+
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
+}
+
+static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
+ u32 *srq_out;
+ void *srqc;
+ int err;
+
+ srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
+ if (!srq_out)
+ return -ENOMEM;
+
+ MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
+ get_srqc(srqc, out);
+ if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+out:
+ kvfree(srq_out);
+ return err;
+}
+
+static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ void *create_in;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, xrc_srqc, log_page_size);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!create_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+ xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ set_srqc(xrc_srqc, in);
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+ MLX5_SET(create_xrc_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(create_out, 0, sizeof(create_out));
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
+ sizeof(create_out));
+ if (err)
+ goto out;
+
+ srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+ srq->uid = in->uid;
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
+}
+
+static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
+ u32 *xrcsrq_out;
+ void *xrc_srqc;
+ int err;
+
+ xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
+ if (!xrcsrq_out)
+ return -ENOMEM;
+
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
+ if (err)
+ goto out;
+
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+ xrc_srq_context_entry);
+ get_srqc(xrc_srqc, out);
+ if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ void *create_out = NULL;
+ void *create_in = NULL;
+ void *rmpc;
+ void *wq;
+ void *pas;
+ int pas_size;
+ int outlen;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, wq, log_wq_pg_sz);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ create_out = kvzalloc(outlen, GFP_KERNEL);
+ if (!create_in || !create_out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(create_rmp_in, create_in, uid, in->uid);
+ pas = MLX5_ADDR_OF(rmpc, rmpc, wq.pas);
+
+ set_wq(wq, in);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+
+ MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
+ if (!err) {
+ srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
+ srq->uid = in->uid;
+ }
+
+out:
+ kvfree(create_in);
+ kvfree(create_out);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
+ return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
+}
+
+static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ void *out = NULL;
+ void *in = NULL;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int outlen;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
+ outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(modify_rmp_in, in, uid, srq->uid);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
+
+out:
+ kvfree(in);
+ kvfree(out);
+ return err;
+}
+
+static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 *rmp_out = NULL;
+ u32 *rmp_in = NULL;
+ void *rmpc;
+ int outlen;
+ int inlen;
+ int err;
+
+ outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+ inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
+
+ rmp_out = kvzalloc(outlen, GFP_KERNEL);
+ rmp_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!rmp_out || !rmp_in) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
+ if (err)
+ goto out;
+
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
+ if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+
+out:
+ kvfree(rmp_out);
+ kvfree(rmp_in);
+ return err;
+}
+
+static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
+ void *create_in;
+ void *xrqc;
+ void *wq;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, wq, log_wq_pg_sz);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
+ wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ pas = MLX5_ADDR_OF(xrqc, xrqc, wq.pas);
+
+ set_wq(wq, in);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+
+ if (in->type == IB_SRQT_TM) {
+ MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
+ if (in->flags & MLX5_SRQ_FLAG_RNDV)
+ MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
+ MLX5_SET(xrqc, xrqc,
+ tag_matching_topology_context.log_matching_list_sz,
+ in->tm_log_list_size);
+ }
+ MLX5_SET(xrqc, xrqc, user_index, in->user_index);
+ MLX5_SET(xrqc, xrqc, cqn, in->cqn);
+ MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
+ MLX5_SET(create_xrq_in, create_in, uid, in->uid);
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
+ sizeof(create_out));
+ kvfree(create_in);
+ if (!err) {
+ srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
+ srq->uid = in->uid;
+ }
+
+ return err;
+}
+
+static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
+
+ MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
+ MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
+}
+
+static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
+
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
+ MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
+}
+
+static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
+ u32 *xrq_out;
+ int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
+ void *xrqc;
+ int err;
+
+ xrq_out = kvzalloc(outlen, GFP_KERNEL);
+ if (!xrq_out)
+ return -ENOMEM;
+
+ MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
+ MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
+ if (err)
+ goto out;
+
+ xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
+ get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
+ if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+ out->tm_next_tag =
+ MLX5_GET(xrqc, xrqc,
+ tag_matching_topology_context.append_next_index);
+ out->tm_hw_phase_cnt =
+ MLX5_GET(xrqc, xrqc,
+ tag_matching_topology_context.hw_phase_cnt);
+ out->tm_sw_phase_cnt =
+ MLX5_GET(xrqc, xrqc,
+ tag_matching_topology_context.sw_phase_cnt);
+
+out:
+ kvfree(xrq_out);
+ return err;
+}
+
+static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ if (!dev->mdev->issi)
+ return create_srq_cmd(dev, srq, in);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return create_xrc_srq_cmd(dev, srq, in);
+ case MLX5_RES_XRQ:
+ return create_xrq_cmd(dev, srq, in);
+ default:
+ return create_rmp_cmd(dev, srq, in);
+ }
+}
+
+static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ if (!dev->mdev->issi)
+ return destroy_srq_cmd(dev, srq);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return destroy_xrc_srq_cmd(dev, srq);
+ case MLX5_RES_XRQ:
+ return destroy_xrq_cmd(dev, srq);
+ default:
+ return destroy_rmp_cmd(dev, srq);
+ }
+}
+
+int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+ int err;
+
+ switch (in->type) {
+ case IB_SRQT_XRC:
+ srq->common.res = MLX5_RES_XSRQ;
+ break;
+ case IB_SRQT_TM:
+ srq->common.res = MLX5_RES_XRQ;
+ break;
+ default:
+ srq->common.res = MLX5_RES_SRQ;
+ }
+
+ err = create_srq_split(dev, srq, in);
+ if (err)
+ return err;
+
+ refcount_set(&srq->common.refcount, 1);
+ init_completion(&srq->common.free);
+
+ err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
+ if (err)
+ goto err_destroy_srq_split;
+
+ return 0;
+
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
+ return err;
+}
+
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+ struct mlx5_core_srq *tmp;
+ int err;
+
+ /* Delete entry, but leave index occupied */
+ tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
+ if (WARN_ON(tmp != srq))
+ return xa_err(tmp) ?: -EINVAL;
+
+ err = destroy_srq_split(dev, srq);
+ if (err) {
+ /*
+ * We don't need to check returned result for an error,
+ * because we are storing in pre-allocated space xarray
+ * entry and it can't fail at this stage.
+ */
+ xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
+ return err;
+ }
+ xa_erase_irq(&table->array, srq->srqn);
+
+ mlx5_core_res_put(&srq->common);
+ wait_for_completion(&srq->common.free);
+ return 0;
+}
+
+int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ if (!dev->mdev->issi)
+ return query_srq_cmd(dev, srq, out);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return query_xrc_srq_cmd(dev, srq, out);
+ case MLX5_RES_XRQ:
+ return query_xrq_cmd(dev, srq, out);
+ default:
+ return query_rmp_cmd(dev, srq, out);
+ }
+}
+
+int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ if (!dev->mdev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ case MLX5_RES_XRQ:
+ return arm_xrq_cmd(dev, srq, lwm);
+ default:
+ return arm_rmp_cmd(dev, srq, lwm);
+ }
+}
+
+static int srq_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_srq_table *table;
+ struct mlx5_core_srq *srq;
+ struct mlx5_eqe *eqe;
+ u32 srqn;
+
+ if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
+ type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
+ return NOTIFY_DONE;
+
+ table = container_of(nb, struct mlx5_srq_table, nb);
+
+ eqe = data;
+ srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+
+ xa_lock(&table->array);
+ srq = xa_load(&table->array, srqn);
+ if (srq)
+ refcount_inc(&srq->common.refcount);
+ xa_unlock(&table->array);
+
+ if (!srq)
+ return NOTIFY_OK;
+
+ srq->event(srq, eqe->type);
+
+ mlx5_core_res_put(&srq->common);
+
+ return NOTIFY_OK;
+}
+
+int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+
+ memset(table, 0, sizeof(*table));
+ xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
+
+ table->nb.notifier_call = srq_event_notifier;
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
+}
+
+void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
+}
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
new file mode 100644
index 000000000000..2fcf553044e1
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_ib.h"
+#include "data_direct.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_PD_HANDLE);
+ struct mlx5_ib_pd *mpd = to_mpd(pd);
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
+ &mpd->pdn, sizeof(mpd->pdn));
+}
+
+static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+ bool sw_owner_supp;
+ u64 icm_rx;
+ u64 icm_tx;
+ int err;
+
+ sw_owner_supp = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ icm_rx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_rx);
+ icm_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_tx);
+ } else {
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
+ MLX5_SET(query_esw_vport_context_in, in, other_vport, true);
+
+ err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in,
+ out);
+
+ if (err)
+ return err;
+
+ icm_rx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_rx);
+
+ icm_tx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_tx);
+ }
+
+ if (sw_owner_supp && icm_rx) {
+ info->vport_steering_icm_rx = icm_rx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX;
+ }
+
+ if (sw_owner_supp && icm_tx) {
+ info->vport_steering_icm_tx = icm_tx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX;
+ }
+
+ return 0;
+}
+
+static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ int err = mlx5_vport_get_vhca_id(mdev, vport, &info->vport_vhca_id);
+
+ if (err)
+ return err;
+
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
+
+ return 0;
+}
+
+static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
+ if (!mdev)
+ return -EINVAL;
+
+ info->vport_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+
+ return 0;
+}
+
+static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ rep = dev->port[port_num - 1].rep;
+ if (!rep)
+ return -EOPNOTSUPP;
+
+ mdev = mlx5_eswitch_get_core_dev(rep->esw);
+ if (!mdev)
+ return -EINVAL;
+
+ info->vport = rep->vport;
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
+
+ if (rep->vport != MLX5_VPORT_UPLINK) {
+ err = fill_vport_vhca_id(mdev, rep->vport, info);
+ if (err)
+ return err;
+ }
+
+ info->esw_owner_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID;
+
+ err = fill_vport_icm_addr(mdev, rep->vport, info);
+ if (err)
+ return err;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(rep->esw)) {
+ info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
+ rep->esw, rep->vport);
+ info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
+ }
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_uapi_query_port info = {};
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 port_num;
+ int ret;
+
+ if (uverbs_copy_from(&port_num, attrs,
+ MLX5_IB_ATTR_QUERY_PORT_PORT_NUM))
+ return -EFAULT;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (!rdma_is_port_valid(&dev->ib_dev, port_num))
+ return -EINVAL;
+
+ if (mlx5_eswitch_mode(dev->mdev) == MLX5_ESWITCH_OFFLOADS) {
+ ret = fill_switchdev_info(dev, port_num, &info);
+ if (ret)
+ return ret;
+ } else if (mlx5_core_mp_enabled(dev->mdev)) {
+ ret = fill_multiport_info(dev, port_num, &info);
+ if (ret)
+ return ret;
+ }
+
+ return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
+ sizeof(info));
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_data_direct_dev *data_direct_dev;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH);
+ u32 dev_path_len;
+ char *dev_path;
+ int ret;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+ mutex_lock(&dev->data_direct_lock);
+ data_direct_dev = dev->data_direct_dev;
+ if (!data_direct_dev) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ dev_path = kobject_get_path(&data_direct_dev->device->kobj, GFP_KERNEL);
+ if (!dev_path) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ dev_path_len = strlen(dev_path) + 1;
+ if (dev_path_len > out_len) {
+ ret = -ENOSPC;
+ goto end;
+ }
+
+ ret = uverbs_copy_to(attrs, MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH, dev_path,
+ dev_path_len);
+ kfree(dev_path);
+
+end:
+ mutex_unlock(&dev->data_direct_lock);
+ return ret;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_QUERY_PORT,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
+ UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_QUERY_PORT,
+ UVERBS_ATTR_STRUCT(struct mlx5_ib_uapi_query_port,
+ reg_c0),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH,
+ UVERBS_ATTR_MIN_SIZE(0),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_device,
+ UVERBS_OBJECT_DEVICE,
+ &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT),
+ &UVERBS_METHOD(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_PD_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_pd,
+ UVERBS_OBJECT_PD,
+ &UVERBS_METHOD(MLX5_IB_METHOD_PD_QUERY));
+
+const struct uapi_definition mlx5_ib_std_types_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_PD,
+ &mlx5_ib_pd),
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_DEVICE,
+ &mlx5_ib_device),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
new file mode 100644
index 000000000000..4e562e0dd9e1
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -0,0 +1,1129 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <rdma/ib_umem_odp.h>
+#include "mlx5_ib.h"
+#include "umr.h"
+#include "wr.h"
+
+/*
+ * We can't use an array for xlt_emergency_page because dma_map_single doesn't
+ * work on kernel modules memory
+ */
+void *xlt_emergency_page;
+static DEFINE_MUTEX(xlt_emergency_page_mutex);
+
+static __be64 get_umr_enable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_disable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_translation_mask(struct mlx5_ib_dev *dev)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR;
+ if (MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5))
+ result |= MLX5_MKEY_MASK_PAGE_SIZE_5;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(struct mlx5_ib_dev *dev)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW;
+
+ if (MLX5_CAP_GEN(dev->mdev, atomic))
+ result |= MLX5_MKEY_MASK_A;
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_PD;
+
+ return cpu_to_be64(result);
+}
+
+static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
+{
+ if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ return -EPERM;
+
+ if (mask & MLX5_MKEY_MASK_A &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ return -EPERM;
+
+ if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ return -EPERM;
+
+ if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ return -EPERM;
+
+ return 0;
+}
+
+enum {
+ MAX_UMR_WR = 128,
+};
+
+static int mlx5r_umr_qp_rst2rts(struct mlx5_ib_dev *dev, struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = {};
+ int ret;
+
+ attr.qp_state = IB_QPS_INIT;
+ attr.port_num = 1;
+ ret = ib_modify_qp(qp, &attr,
+ IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
+ if (ret) {
+ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+ return ret;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.qp_state = IB_QPS_RTR;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
+ return ret;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.qp_state = IB_QPS_RTS;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
+{
+ struct ib_qp_init_attr init_attr = {};
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+ int ret = 0;
+
+
+ /*
+ * UMR qp is set once, never changed until device unload.
+ * Avoid taking the mutex if initialization is already done.
+ */
+ if (dev->umrc.qp)
+ return 0;
+
+ mutex_lock(&dev->umrc.init_lock);
+ /* First user allocates the UMR resources. Skip if already allocated. */
+ if (dev->umrc.qp)
+ goto unlock;
+
+ cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
+ if (IS_ERR(cq)) {
+ mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
+ ret = PTR_ERR(cq);
+ goto unlock;
+ }
+
+ init_attr.send_cq = cq;
+ init_attr.recv_cq = cq;
+ init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr.cap.max_send_wr = MAX_UMR_WR;
+ init_attr.cap.max_send_sge = 1;
+ init_attr.qp_type = MLX5_IB_QPT_REG_UMR;
+ init_attr.port_num = 1;
+ qp = ib_create_qp(dev->umrc.pd, &init_attr);
+ if (IS_ERR(qp)) {
+ mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
+ ret = PTR_ERR(qp);
+ goto destroy_cq;
+ }
+
+ ret = mlx5r_umr_qp_rst2rts(dev, qp);
+ if (ret)
+ goto destroy_qp;
+
+ dev->umrc.cq = cq;
+
+ sema_init(&dev->umrc.sem, MAX_UMR_WR);
+ mutex_init(&dev->umrc.lock);
+ dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
+ dev->umrc.qp = qp;
+
+ mutex_unlock(&dev->umrc.init_lock);
+ return 0;
+
+destroy_qp:
+ ib_destroy_qp(qp);
+destroy_cq:
+ ib_free_cq(cq);
+unlock:
+ mutex_unlock(&dev->umrc.init_lock);
+ return ret;
+}
+
+void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
+ return;
+ mutex_destroy(&dev->umrc.lock);
+ /* After device init, UMR cp/qp are not unset during the lifetime. */
+ ib_destroy_qp(dev->umrc.qp);
+ ib_free_cq(dev->umrc.cq);
+}
+
+int mlx5r_umr_init(struct mlx5_ib_dev *dev)
+{
+ struct ib_pd *pd;
+
+ pd = ib_alloc_pd(&dev->ib_dev, 0);
+ if (IS_ERR(pd)) {
+ mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
+ return PTR_ERR(pd);
+ }
+ dev->umrc.pd = pd;
+
+ mutex_init(&dev->umrc.init_lock);
+
+ return 0;
+}
+
+void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!dev->umrc.pd)
+ return;
+
+ mutex_destroy(&dev->umrc.init_lock);
+ ib_dealloc_pd(dev->umrc.pd);
+}
+
+
+static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ unsigned int wqe_size =
+ with_data ? sizeof(struct mlx5r_umr_wqe) :
+ sizeof(struct mlx5r_umr_wqe) -
+ sizeof(struct mlx5_wqe_data_seg);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ union {
+ struct ib_cqe *ib_cqe;
+ u64 wr_id;
+ } id;
+ void *cur_edge, *seg;
+ unsigned long flags;
+ unsigned int idx;
+ int size, err;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ err = mlx5r_begin_wqe(qp, &seg, &ctrl, &idx, &size, &cur_edge, 0,
+ cpu_to_be32(mkey), false, false);
+ if (WARN_ON(err))
+ goto out;
+
+ qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
+
+ mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size);
+
+ id.ib_cqe = cqe;
+ mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
+ MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
+
+ mlx5r_ring_db(qp, 1, ctrl);
+
+out:
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey,
+ struct mlx5r_umr_context *umr_context,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ mutex_lock(&umrc->lock);
+ /* Preventing any further WRs to be sent now */
+ if (umrc->state != MLX5_UMR_STATE_RECOVER) {
+ mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n",
+ umrc->state);
+ umrc->state = MLX5_UMR_STATE_RECOVER;
+ }
+ mutex_unlock(&umrc->lock);
+
+ /* Sending a final/barrier WR (the failed one) and wait for its completion.
+ * This will ensure that all the previous WRs got a completion before
+ * we set the QP state to RESET.
+ */
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe,
+ with_data);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err);
+ goto err;
+ }
+
+ /* Since the QP is in an error state, it will only receive
+ * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier
+ * we don't care about its status.
+ */
+ wait_for_completion(&umr_context->done);
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err);
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err);
+ goto err;
+ }
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
+static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_umr_context *context =
+ container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
+
+ context->status = wc->status;
+ complete(&context->done);
+}
+
+static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context)
+{
+ context->cqe.done = mlx5r_umr_done;
+ init_completion(&context->done);
+}
+
+static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct mlx5r_umr_context umr_context;
+ int err;
+
+ err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask));
+ if (WARN_ON(err))
+ return err;
+
+ mlx5r_umr_init_context(&umr_context);
+
+ down(&umrc->sem);
+ while (true) {
+ mutex_lock(&umrc->lock);
+ if (umrc->state == MLX5_UMR_STATE_ERR) {
+ mutex_unlock(&umrc->lock);
+ err = -EFAULT;
+ break;
+ }
+
+ if (umrc->state == MLX5_UMR_STATE_RECOVER) {
+ mutex_unlock(&umrc->lock);
+ usleep_range(3000, 5000);
+ continue;
+ }
+
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
+ with_data);
+ mutex_unlock(&umrc->lock);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
+ err);
+ break;
+ }
+
+ wait_for_completion(&umr_context.done);
+
+ if (umr_context.status == IB_WC_SUCCESS)
+ break;
+
+ if (umr_context.status == IB_WC_WR_FLUSH_ERR)
+ continue;
+
+ WARN_ON_ONCE(1);
+ mlx5_ib_warn(dev,
+ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
+ umr_context.status, mkey);
+ err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data);
+ if (err)
+ mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
+ err);
+ err = -EFAULT;
+ break;
+ }
+ up(&umrc->sem);
+ return err;
+}
+
+/**
+ * mlx5r_umr_revoke_mr - Fence all DMA on the MR
+ * @mr: The MR to fence
+ *
+ * Upon return the NIC will not be doing any DMA to the pages under the MR,
+ * and any DMA in progress will be completed. Failure of this function
+ * indicates the HW has failed catastrophically.
+ */
+int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+
+ if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return 0;
+
+ wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+ wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask();
+ wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
+
+ MLX5_SET(mkc, &wqe.mkey_seg, free, 1);
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(dev->umrc.pd)->pdn);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+}
+
+static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+ struct mlx5_mkey_seg *seg,
+ unsigned int access_flags)
+{
+ bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+ (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev));
+
+ MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, seg, lr, 1);
+ MLX5_SET(mkc, seg, relaxed_ordering_write,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+ MLX5_SET(mkc, seg, relaxed_ordering_read, ro_read);
+}
+
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+ int err;
+
+ wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev);
+ wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+ wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE;
+ wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
+
+ mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags);
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+ if (err)
+ return err;
+
+ mr->access_flags = access_flags;
+ return 0;
+}
+
+#define MLX5_MAX_UMR_CHUNK \
+ ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_FLEX_ALIGNMENT)
+#define MLX5_SPARE_UMR_CHUNK 0x10000
+
+/*
+ * Allocate a temporary buffer to hold the per-page information to transfer to
+ * HW. For efficiency this should be as large as it can be, but buffer
+ * allocation failure is not allowed, so try smaller sizes.
+ */
+static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
+{
+ const size_t xlt_chunk_align = MLX5_UMR_FLEX_ALIGNMENT / ent_size;
+ size_t size;
+ void *res = NULL;
+
+ static_assert(PAGE_SIZE % MLX5_UMR_FLEX_ALIGNMENT == 0);
+
+ /*
+ * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
+ * allocation can't trigger any kind of reclaim.
+ */
+ might_sleep();
+
+ gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
+
+ /*
+ * If the system already has a suitable high order page then just use
+ * that, but don't try hard to create one. This max is about 1M, so a
+ * free x86 huge page will satisfy it.
+ */
+ size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
+ MLX5_MAX_UMR_CHUNK);
+ *nents = size / ent_size;
+ res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
+ get_order(size));
+ if (res)
+ return res;
+
+ if (size > MLX5_SPARE_UMR_CHUNK) {
+ size = MLX5_SPARE_UMR_CHUNK;
+ *nents = size / ent_size;
+ res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
+ get_order(size));
+ if (res)
+ return res;
+ }
+
+ *nents = PAGE_SIZE / ent_size;
+ res = (void *)__get_free_page(gfp_mask);
+ if (res)
+ return res;
+
+ mutex_lock(&xlt_emergency_page_mutex);
+ memset(xlt_emergency_page, 0, PAGE_SIZE);
+ return xlt_emergency_page;
+}
+
+static void mlx5r_umr_free_xlt(void *xlt, size_t length)
+{
+ if (xlt == xlt_emergency_page) {
+ mutex_unlock(&xlt_emergency_page_mutex);
+ return;
+ }
+
+ free_pages((unsigned long)xlt, get_order(length));
+}
+
+static void mlx5r_umr_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
+ struct ib_sge *sg)
+{
+ struct device *ddev = &dev->mdev->pdev->dev;
+
+ dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
+ mlx5r_umr_free_xlt(xlt, sg->length);
+}
+
+/*
+ * Create an XLT buffer ready for submission.
+ */
+static void *mlx5r_umr_create_xlt(struct mlx5_ib_dev *dev, struct ib_sge *sg,
+ size_t nents, size_t ent_size,
+ unsigned int flags)
+{
+ struct device *ddev = &dev->mdev->pdev->dev;
+ dma_addr_t dma;
+ void *xlt;
+
+ xlt = mlx5r_umr_alloc_xlt(&nents, ent_size,
+ flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
+ GFP_KERNEL);
+ sg->length = nents * ent_size;
+ dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
+ mlx5r_umr_free_xlt(xlt, sg->length);
+ return NULL;
+ }
+ sg->addr = dma;
+ sg->lkey = dev->umrc.pd->local_dma_lkey;
+
+ return xlt;
+}
+
+static void
+mlx5r_umr_set_update_xlt_ctrl_seg(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
+ unsigned int flags, struct ib_sge *sg)
+{
+ if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
+ /* fail if free */
+ ctrl_seg->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ ctrl_seg->flags = MLX5_UMR_CHECK_NOT_FREE;
+ ctrl_seg->xlt_octowords =
+ cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
+}
+
+static void mlx5r_umr_set_update_xlt_mkey_seg(struct mlx5_ib_dev *dev,
+ struct mlx5_mkey_seg *mkey_seg,
+ struct mlx5_ib_mr *mr,
+ unsigned int page_shift)
+{
+ mlx5r_umr_set_access_flags(dev, mkey_seg, mr->access_flags);
+ MLX5_SET(mkc, mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn);
+ MLX5_SET64(mkc, mkey_seg, start_addr, mr->ibmr.iova);
+ MLX5_SET64(mkc, mkey_seg, len, mr->ibmr.length);
+ MLX5_SET(mkc, mkey_seg, log_page_size, page_shift);
+ MLX5_SET(mkc, mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, mkey_seg, mkey_7_0, mlx5_mkey_variant(mr->mmkey.key));
+}
+
+static void
+mlx5r_umr_set_update_xlt_data_seg(struct mlx5_wqe_data_seg *data_seg,
+ struct ib_sge *sg)
+{
+ data_seg->byte_count = cpu_to_be32(sg->length);
+ data_seg->lkey = cpu_to_be32(sg->lkey);
+ data_seg->addr = cpu_to_be64(sg->addr);
+}
+
+static void mlx5r_umr_update_offset(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
+ u64 offset)
+{
+ u64 octo_offset = mlx5r_umr_get_xlt_octo(offset);
+
+ ctrl_seg->xlt_offset = cpu_to_be16(octo_offset & 0xffff);
+ ctrl_seg->xlt_offset_47_16 = cpu_to_be32(octo_offset >> 16);
+ ctrl_seg->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+}
+
+static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
+ struct mlx5r_umr_wqe *wqe,
+ struct mlx5_ib_mr *mr, struct ib_sge *sg,
+ unsigned int flags)
+{
+ bool update_pd_access, update_translation;
+
+ if (flags & MLX5_IB_UPD_XLT_ENABLE)
+ wqe->ctrl_seg.mkey_mask |= get_umr_enable_mr_mask();
+
+ update_pd_access = flags & MLX5_IB_UPD_XLT_ENABLE ||
+ flags & MLX5_IB_UPD_XLT_PD ||
+ flags & MLX5_IB_UPD_XLT_ACCESS;
+
+ if (update_pd_access) {
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_access_mask(dev);
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+ }
+
+ update_translation =
+ flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR;
+
+ if (update_translation) {
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask(dev);
+ if (!mr->ibmr.length)
+ MLX5_SET(mkc, &wqe->mkey_seg, length64, 1);
+ if (flags & MLX5_IB_UPD_XLT_KEEP_PGSZ)
+ wqe->ctrl_seg.mkey_mask &=
+ cpu_to_be64(~MLX5_MKEY_MASK_PAGE_SIZE);
+ }
+
+ wqe->ctrl_seg.xlt_octowords =
+ cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
+ wqe->data_seg.byte_count = cpu_to_be32(sg->length);
+}
+
+static void
+_mlx5r_umr_init_wqe(struct mlx5_ib_mr *mr, struct mlx5r_umr_wqe *wqe,
+ struct ib_sge *sg, unsigned int flags,
+ unsigned int page_shift, bool dd)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+
+ mlx5r_umr_set_update_xlt_ctrl_seg(&wqe->ctrl_seg, flags, sg);
+ mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe->mkey_seg, mr, page_shift);
+ if (dd) /* Use the data direct internal kernel PD */
+ MLX5_SET(mkc, &wqe->mkey_seg, pd, dev->ddr.pdn);
+ mlx5r_umr_set_update_xlt_data_seg(&wqe->data_seg, sg);
+}
+
+static int
+_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd,
+ size_t start_block, size_t nblocks)
+{
+ size_t ent_size = dd ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt);
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ struct mlx5r_umr_wqe wqe = {};
+ size_t processed_blocks = 0;
+ struct ib_block_iter biter;
+ size_t cur_block_idx = 0;
+ struct mlx5_ksm *cur_ksm;
+ struct mlx5_mtt *cur_mtt;
+ size_t orig_sg_length;
+ size_t total_blocks;
+ size_t final_size;
+ void *curr_entry;
+ struct ib_sge sg;
+ void *entry;
+ u64 offset;
+ int err = 0;
+
+ total_blocks = ib_umem_num_dma_blocks(mr->umem, 1UL << mr->page_shift);
+ if (start_block > total_blocks)
+ return -EINVAL;
+
+ /* nblocks 0 means update all blocks starting from start_block */
+ if (nblocks)
+ total_blocks = nblocks;
+
+ entry = mlx5r_umr_create_xlt(dev, &sg, total_blocks, ent_size, flags);
+ if (!entry)
+ return -ENOMEM;
+
+ orig_sg_length = sg.length;
+
+ _mlx5r_umr_init_wqe(mr, &wqe, &sg, flags, mr->page_shift, dd);
+
+ /* Set initial translation offset to start_block */
+ offset = (u64)start_block * ent_size;
+ mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
+
+ if (dd)
+ cur_ksm = entry;
+ else
+ cur_mtt = entry;
+
+ curr_entry = entry;
+
+ rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) {
+ if (cur_block_idx < start_block) {
+ cur_block_idx++;
+ continue;
+ }
+
+ if (nblocks && processed_blocks >= nblocks)
+ break;
+
+ if (curr_entry == entry + sg.length) {
+ dma_sync_single_for_device(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe,
+ true);
+ if (err)
+ goto err;
+ dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ offset += sg.length;
+ mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
+ if (dd)
+ cur_ksm = entry;
+ else
+ cur_mtt = entry;
+ }
+
+ if (dd) {
+ cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
+ if (mr->access_flags & IB_ACCESS_RELAXED_ORDERING &&
+ dev->ddr.mkey_ro_valid)
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey_ro);
+ else
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ if (mr->umem->is_dmabuf &&
+ (flags & MLX5_IB_UPD_XLT_ZAP)) {
+ cur_ksm->va = 0;
+ cur_ksm->key = 0;
+ }
+ cur_ksm++;
+ curr_entry = cur_ksm;
+ } else {
+ cur_mtt->ptag =
+ cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ MLX5_IB_MTT_PRESENT);
+ if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
+ cur_mtt->ptag = 0;
+ cur_mtt++;
+ curr_entry = cur_mtt;
+ }
+
+ processed_blocks++;
+ }
+
+ final_size = curr_entry - entry;
+ sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT);
+ memset(curr_entry, 0, sg.length - final_size);
+ mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
+
+ dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
+
+err:
+ sg.length = orig_sg_length;
+ mlx5r_umr_unmap_free_xlt(dev, entry, &sg);
+ return err;
+}
+
+int mlx5r_umr_update_data_direct_ksm_pas_range(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ size_t start_block,
+ size_t nblocks)
+{
+ /* No invalidation flow is expected */
+ if (WARN_ON(!mr->umem->is_dmabuf) || ((flags & MLX5_IB_UPD_XLT_ZAP) &&
+ !(flags & MLX5_IB_UPD_XLT_KEEP_PGSZ)))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, true, start_block, nblocks);
+}
+
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr,
+ unsigned int flags)
+{
+ return mlx5r_umr_update_data_direct_ksm_pas_range(mr, flags, 0, 0);
+}
+
+int mlx5r_umr_update_mr_pas_range(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks)
+{
+ if (WARN_ON(mr->umem->is_odp))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, false, start_block, nblocks);
+}
+
+/*
+ * Send the DMA list to the HW for a normal MR using UMR.
+ * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
+ * flag may be used.
+ */
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+{
+ return mlx5r_umr_update_mr_pas_range(mr, flags, 0, 0);
+}
+
+static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
+{
+ return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
+}
+
+int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+ int page_shift, int flags)
+{
+ int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
+ ? sizeof(struct mlx5_klm)
+ : sizeof(struct mlx5_mtt);
+ const int page_align = MLX5_UMR_FLEX_ALIGNMENT / desc_size;
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ const int page_mask = page_align - 1;
+ struct mlx5r_umr_wqe wqe = {};
+ size_t pages_mapped = 0;
+ size_t pages_to_map = 0;
+ size_t size_to_map = 0;
+ size_t orig_sg_length;
+ size_t pages_iter;
+ struct ib_sge sg;
+ int err = 0;
+ void *xlt;
+
+ if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
+ !umr_can_use_indirect_mkey(dev))
+ return -EPERM;
+
+ if (WARN_ON(!mr->umem->is_odp))
+ return -EINVAL;
+
+ /* UMR copies MTTs in units of MLX5_UMR_FLEX_ALIGNMENT bytes,
+ * so we need to align the offset and length accordingly
+ */
+ if (idx & page_mask) {
+ npages += idx & page_mask;
+ idx &= ~page_mask;
+ }
+ pages_to_map = ALIGN(npages, page_align);
+
+ xlt = mlx5r_umr_create_xlt(dev, &sg, npages, desc_size, flags);
+ if (!xlt)
+ return -ENOMEM;
+
+ pages_iter = sg.length / desc_size;
+ orig_sg_length = sg.length;
+
+ if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
+
+ pages_to_map = min_t(size_t, pages_to_map, max_pages);
+ }
+
+ mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
+ mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, page_shift);
+ mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
+
+ for (pages_mapped = 0;
+ pages_mapped < pages_to_map && !err;
+ pages_mapped += pages_iter, idx += pages_iter) {
+ npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
+ size_to_map = npages * desc_size;
+ dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ /*
+ * npages is the maximum number of pages to map, but we
+ * can't guarantee that all pages are actually mapped.
+ *
+ * For example, if page is p2p of type which is not supported
+ * for mapping, the number of pages mapped will be less than
+ * requested.
+ */
+ err = mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ if (err)
+ return err;
+ dma_sync_single_for_device(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
+
+ if (pages_mapped + pages_iter >= pages_to_map)
+ mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
+ mlx5r_umr_update_offset(&wqe.ctrl_seg, idx * desc_size);
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
+ }
+ sg.length = orig_sg_length;
+ mlx5r_umr_unmap_free_xlt(dev, xlt, &sg);
+ return err;
+}
+
+/*
+ * Update only the page-size (log_page_size) field of an existing memory key
+ * using UMR. This is useful when the MR's physical layout stays the same
+ * but the optimal page shift has changed (e.g. dmabuf after pages are
+ * pinned and the HW can switch from 4K to huge-page alignment).
+ */
+int mlx5r_umr_update_mr_page_shift(struct mlx5_ib_mr *mr,
+ unsigned int page_shift,
+ bool dd)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+ int err;
+
+ /* Build UMR wqe: we touch only PAGE_SIZE, so use the dedicated mask */
+ wqe.ctrl_seg.mkey_mask = get_umr_update_translation_mask(dev);
+
+ /* MR must be free while page size is modified */
+ wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE | MLX5_UMR_INLINE;
+
+ /* Fill mkey segment with the new page size, keep the rest unchanged */
+ MLX5_SET(mkc, &wqe.mkey_seg, log_page_size, page_shift);
+
+ if (dd)
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn);
+ else
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn);
+
+ MLX5_SET64(mkc, &wqe.mkey_seg, start_addr, mr->ibmr.iova);
+ MLX5_SET64(mkc, &wqe.mkey_seg, len, mr->ibmr.length);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+ if (!err)
+ mr->page_shift = page_shift;
+
+ return err;
+}
+
+static inline int
+_mlx5r_dmabuf_umr_update_pas(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks, bool dd)
+{
+ if (dd)
+ return mlx5r_umr_update_data_direct_ksm_pas_range(mr, flags,
+ start_block,
+ nblocks);
+ else
+ return mlx5r_umr_update_mr_pas_range(mr, flags, start_block,
+ nblocks);
+}
+
+/**
+ * This function makes an mkey non-present by zapping the translation entries of
+ * the mkey by zapping (zeroing out) the first N entries, where N is determined
+ * by the largest page size supported by the device and the MR length.
+ * It then updates the mkey's page size to the largest possible value, ensuring
+ * the MR is completely non-present and safe for further updates.
+ * It is useful to update the page size of a dmabuf MR on a page fault.
+ *
+ * Return: On success, returns the number of entries that were zapped.
+ * On error, returns a negative error code.
+ */
+static int _mlx5r_umr_zap_mkey(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ unsigned int page_shift,
+ size_t *nblocks,
+ bool dd)
+{
+ unsigned int old_page_shift = mr->page_shift;
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ unsigned int max_page_shift;
+ size_t page_shift_nblocks;
+ unsigned int max_log_size;
+ int access_mode;
+ int err;
+
+ access_mode = dd ? MLX5_MKC_ACCESS_MODE_KSM : MLX5_MKC_ACCESS_MODE_MTT;
+ flags |= MLX5_IB_UPD_XLT_KEEP_PGSZ | MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC;
+ max_log_size = get_max_log_entity_size_cap(dev, access_mode);
+ max_page_shift = order_base_2(mr->ibmr.length);
+ max_page_shift = min(max(max_page_shift, page_shift), max_log_size);
+ /* Count blocks in units of max_page_shift, we will zap exactly this
+ * many to make the whole MR non-present.
+ * Block size must be aligned to MLX5_UMR_FLEX_ALIGNMENT since it may
+ * be used as offset into the XLT later on.
+ */
+ *nblocks = ib_umem_num_dma_blocks(mr->umem, 1UL << max_page_shift);
+ if (dd)
+ *nblocks = ALIGN(*nblocks, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ else
+ *nblocks = ALIGN(*nblocks, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT);
+ page_shift_nblocks = ib_umem_num_dma_blocks(mr->umem,
+ 1UL << page_shift);
+ /* If the number of blocks at max possible page shift is greater than
+ * the number of blocks at the new page size, we should just go over the
+ * whole mkey entries.
+ */
+ if (*nblocks >= page_shift_nblocks)
+ *nblocks = 0;
+
+ /* Make the first nblocks entries non-present without changing
+ * page size yet.
+ */
+ if (*nblocks)
+ mr->page_shift = max_page_shift;
+ err = _mlx5r_dmabuf_umr_update_pas(mr, flags, 0, *nblocks, dd);
+ if (err) {
+ mr->page_shift = old_page_shift;
+ return err;
+ }
+
+ /* Change page size to the max page size now that the MR is completely
+ * non-present.
+ */
+ if (*nblocks) {
+ err = mlx5r_umr_update_mr_page_shift(mr, max_page_shift, dd);
+ if (err) {
+ mr->page_shift = old_page_shift;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mlx5r_umr_dmabuf_update_pgsz - Safely update DMABUF MR page size and its
+ * entries accordingly
+ * @mr: The memory region to update
+ * @xlt_flags: Translation table update flags
+ * @page_shift: The new (optimized) page shift to use
+ *
+ * This function updates the page size and mkey translation entries for a DMABUF
+ * MR in a safe, multi-step process to avoid exposing partially updated mappings
+ * The update is performed in 5 steps:
+ * 1. Make the first X entries non-present, while X is calculated to be
+ * minimal according to a large page shift that can be used to cover the
+ * MR length.
+ * 2. Update the page size to the large supported page size
+ * 3. Load the remaining N-X entries according to the (optimized) page_shift
+ * 4. Update the page size according to the (optimized) page_shift
+ * 5. Load the first X entries with the correct translations
+ *
+ * This ensures that at no point is the MR accessible with a partially updated
+ * translation table, maintaining correctness and preventing access to stale or
+ * inconsistent mappings.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int mlx5r_umr_dmabuf_update_pgsz(struct mlx5_ib_mr *mr, u32 xlt_flags,
+ unsigned int page_shift)
+{
+ unsigned int old_page_shift = mr->page_shift;
+ size_t zapped_blocks;
+ size_t total_blocks;
+ int err;
+
+ err = _mlx5r_umr_zap_mkey(mr, xlt_flags, page_shift, &zapped_blocks,
+ mr->data_direct);
+ if (err)
+ return err;
+
+ /* _mlx5r_umr_zap_mkey already enables the mkey */
+ xlt_flags &= ~MLX5_IB_UPD_XLT_ENABLE;
+ mr->page_shift = page_shift;
+ total_blocks = ib_umem_num_dma_blocks(mr->umem, 1UL << mr->page_shift);
+ if (zapped_blocks && zapped_blocks < total_blocks) {
+ /* Update PAS according to the new page size but don't update
+ * the page size in the mkey yet.
+ */
+ err = _mlx5r_dmabuf_umr_update_pas(
+ mr,
+ xlt_flags | MLX5_IB_UPD_XLT_KEEP_PGSZ,
+ zapped_blocks,
+ total_blocks - zapped_blocks,
+ mr->data_direct);
+ if (err)
+ goto err;
+ }
+
+ err = mlx5r_umr_update_mr_page_shift(mr, mr->page_shift,
+ mr->data_direct);
+ if (err)
+ goto err;
+ err = _mlx5r_dmabuf_umr_update_pas(mr, xlt_flags, 0, zapped_blocks,
+ mr->data_direct);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mr->page_shift = old_page_shift;
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h
new file mode 100644
index 000000000000..e9361f0140e7
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef _MLX5_IB_UMR_H
+#define _MLX5_IB_UMR_H
+
+#include "mlx5_ib.h"
+
+
+#define MLX5_MAX_UMR_SHIFT 16
+#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
+
+#define MLX5_IB_UMR_OCTOWORD 16
+#define MLX5_IB_UMR_XLT_ALIGNMENT 64
+
+int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
+void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
+
+int mlx5r_umr_init(struct mlx5_ib_dev *dev);
+void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev);
+
+static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
+ size_t length)
+{
+ /*
+ * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
+ * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
+ * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
+ * can never be enabled without this capability. Simplify this weird
+ * quirky hardware by just saying it can't use PAS lists with UMR at
+ * all.
+ */
+ if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ return false;
+
+ /*
+ * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
+ * used.
+ */
+ if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
+ return false;
+ return true;
+}
+
+/*
+ * true if an existing MR can be reconfigured to new access_flags using UMR.
+ * Older HW cannot use UMR to update certain elements of the MKC. See
+ * get_umr_update_access_mask() and umr_check_mkey_mask()
+ */
+static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
+ unsigned int current_access_flags,
+ unsigned int target_access_flags)
+{
+ unsigned int diffs = current_access_flags ^ target_access_flags;
+
+ if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
+ MLX5_CAP_GEN(dev->mdev, atomic) &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ return false;
+
+ if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ return false;
+
+ if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
+ (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ return false;
+
+ return true;
+}
+
+static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
+{
+ return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
+ MLX5_IB_UMR_OCTOWORD;
+}
+
+struct mlx5r_umr_context {
+ struct ib_cqe cqe;
+ enum ib_wc_status status;
+ struct completion done;
+};
+
+struct mlx5r_umr_wqe {
+ struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
+ struct mlx5_mkey_seg mkey_seg;
+ struct mlx5_wqe_data_seg data_seg;
+};
+
+int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags);
+int mlx5r_umr_update_data_direct_ksm_pas_range(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ size_t start_block,
+ size_t nblocks);
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_mr_pas_range(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks);
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+ int page_shift, int flags);
+int mlx5r_umr_update_mr_page_shift(struct mlx5_ib_mr *mr,
+ unsigned int page_shift,
+ bool dd);
+int mlx5r_umr_dmabuf_update_pgsz(struct mlx5_ib_mr *mr, u32 xlt_flags,
+ unsigned int page_shift);
+
+#endif /* _MLX5_IB_UMR_H */
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
deleted file mode 100644
index 76fb7b927d37..000000000000
--- a/drivers/infiniband/hw/mlx5/user.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_IB_USER_H
-#define MLX5_IB_USER_H
-
-#include <linux/types.h>
-
-enum {
- MLX5_QP_FLAG_SIGNATURE = 1 << 0,
- MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
-};
-
-enum {
- MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
-};
-
-
-/* Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define MLX5_IB_UVERBS_ABI_VERSION 1
-
-/* Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mlx5_ib_alloc_ucontext_req {
- __u32 total_num_uuars;
- __u32 num_low_latency_uuars;
-};
-
-struct mlx5_ib_alloc_ucontext_req_v2 {
- __u32 total_num_uuars;
- __u32 num_low_latency_uuars;
- __u32 flags;
- __u32 reserved;
-};
-
-struct mlx5_ib_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 bf_reg_size;
- __u32 tot_uuars;
- __u32 cache_line_size;
- __u16 max_sq_desc_sz;
- __u16 max_rq_desc_sz;
- __u32 max_send_wqebb;
- __u32 max_recv_wr;
- __u32 max_srq_recv_wr;
- __u16 num_ports;
- __u16 reserved;
-};
-
-struct mlx5_ib_alloc_pd_resp {
- __u32 pdn;
-};
-
-struct mlx5_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
- __u32 cqe_size;
- __u32 reserved; /* explicit padding (optional on i386) */
-};
-
-struct mlx5_ib_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mlx5_ib_resize_cq {
- __u64 buf_addr;
- __u16 cqe_size;
- __u16 reserved0;
- __u32 reserved1;
-};
-
-struct mlx5_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
- __u32 flags;
- __u32 reserved; /* explicit padding (optional on i386) */
-};
-
-struct mlx5_ib_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mlx5_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
- __u32 sq_wqe_count;
- __u32 rq_wqe_count;
- __u32 rq_wqe_shift;
- __u32 flags;
-};
-
-struct mlx5_ib_create_qp_resp {
- __u32 uuar_index;
-};
-#endif /* MLX5_IB_USER_H */
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
new file mode 100644
index 000000000000..9947feb7fb8a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -0,0 +1,1284 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/driver.h>
+#include "wr.h"
+#include "umr.h"
+
+static const u32 mlx5_ib_opcode[] = {
+ [IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_LSO] = MLX5_OPCODE_LSO,
+ [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
+ [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
+ [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
+ [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
+ [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
+ [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
+ [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
+};
+
+int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
+{
+ struct mlx5_ib_cq *cq;
+ unsigned int cur;
+
+ cur = wq->head - wq->tail;
+ if (likely(cur + nreq < wq->max_post))
+ return 0;
+
+ cq = to_mcq(ib_cq);
+ spin_lock(&cq->lock);
+ cur = wq->head - wq->tail;
+ spin_unlock(&cq->lock);
+
+ return cur + nreq >= wq->max_post;
+}
+
+static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ void **seg, int *size, void **cur_edge)
+{
+ struct mlx5_wqe_eth_seg *eseg = *seg;
+
+ memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+ if (wr->send_flags & IB_SEND_IP_CSUM)
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+
+ if (wr->opcode == IB_WR_LSO) {
+ struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+ size_t left, copysz;
+ void *pdata = ud_wr->header;
+ size_t stride;
+
+ left = ud_wr->hlen;
+ eseg->mss = cpu_to_be16(ud_wr->mss);
+ eseg->inline_hdr.sz = cpu_to_be16(left);
+
+ /* mlx5r_memcpy_send_wqe should get a 16B align address. Hence,
+ * we first copy up to the current edge and then, if needed,
+ * continue to mlx5r_memcpy_send_wqe.
+ */
+ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
+ left);
+ memcpy(eseg->inline_hdr.data, pdata, copysz);
+ stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
+ sizeof(eseg->inline_hdr.start) + copysz, 16);
+ *size += stride / 16;
+ *seg += stride;
+
+ if (copysz < left) {
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ left -= copysz;
+ pdata += copysz;
+ mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size,
+ pdata, left);
+ }
+
+ return;
+ }
+
+ *seg += sizeof(struct mlx5_wqe_eth_seg);
+ *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+}
+
+static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
+ const struct ib_send_wr *wr)
+{
+ memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct =
+ cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
+}
+
+static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
+static __be64 frwr_mkey_mask(bool atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 sig_mkey_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_SIGERR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE |
+ MLX5_MKEY_MASK_BSF_EN;
+
+ return cpu_to_be64(result);
+}
+
+static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
+{
+ int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
+
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = flags;
+ umr->xlt_octowords = cpu_to_be16(mlx5r_umr_get_xlt_octo(size));
+ umr->mkey_mask = frwr_mkey_mask(atomic);
+}
+
+static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
+{
+ memset(umr, 0, sizeof(*umr));
+ umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr->flags = MLX5_UMR_INLINE;
+}
+
+static u8 get_umr_flags(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
+ MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
+}
+
+static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
+ struct mlx5_ib_mr *mr,
+ u32 key, int access)
+{
+ int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ seg->log2_page_size = ilog2(mr->ibmr.page_size);
+ else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
+ seg->flags = get_umr_flags(access) | mr->access_mode;
+ seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ seg->start_addr = cpu_to_be64(mr->ibmr.iova);
+ seg->len = cpu_to_be64(mr->ibmr.length);
+ seg->xlt_oct_size = cpu_to_be32(ndescs);
+}
+
+static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
+{
+ memset(seg, 0, sizeof(*seg));
+ seg->status = MLX5_MKEY_STATUS_FREE;
+}
+
+static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
+ struct mlx5_ib_mr *mr,
+ struct mlx5_ib_pd *pd)
+{
+ int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs);
+
+ dseg->addr = cpu_to_be64(mr->desc_map);
+ dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
+ dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
+}
+
+static __be32 send_ieth(const struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return wr->ex.imm_data;
+
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_be32(wr->ex.invalidate_rkey);
+
+ default:
+ return 0;
+ }
+}
+
+static u8 calc_sig(void *wqe, int size)
+{
+ u8 *p = wqe;
+ u8 res = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ res ^= p[i];
+
+ return ~res;
+}
+
+static u8 wq_sig(void *wqe)
+{
+ return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
+}
+
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **wqe, int *wqe_sz, void **cur_edge)
+{
+ struct mlx5_wqe_inline_seg *seg;
+ size_t offset;
+ int inl = 0;
+ int i;
+
+ seg = *wqe;
+ *wqe += sizeof(*seg);
+ offset = sizeof(*seg);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ size_t len = wr->sg_list[i].length;
+ void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
+
+ inl += len;
+
+ if (unlikely(inl > qp->max_inline_data))
+ return -ENOMEM;
+
+ while (likely(len)) {
+ size_t leftlen;
+ size_t copysz;
+
+ handle_post_send_edge(&qp->sq, wqe,
+ *wqe_sz + (offset >> 4),
+ cur_edge);
+
+ leftlen = *cur_edge - *wqe;
+ copysz = min_t(size_t, leftlen, len);
+
+ memcpy(*wqe, addr, copysz);
+ len -= copysz;
+ addr += copysz;
+ *wqe += copysz;
+ offset += copysz;
+ }
+ }
+
+ seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+
+ *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
+
+ return 0;
+}
+
+static u16 prot_field_size(enum ib_signature_type type)
+{
+ switch (type) {
+ case IB_SIG_TYPE_T10_DIF:
+ return MLX5_DIF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static u8 bs_selector(int block_size)
+{
+ switch (block_size) {
+ case 512: return 0x1;
+ case 520: return 0x2;
+ case 4096: return 0x3;
+ case 4160: return 0x4;
+ case 1073741824: return 0x5;
+ default: return 0;
+ }
+}
+
+static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
+ struct mlx5_bsf_inl *inl)
+{
+ /* Valid inline section and allow BSF refresh */
+ inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
+ MLX5_BSF_REFRESH_DIF);
+ inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
+ inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
+ /* repeating block */
+ inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
+ inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
+ MLX5_DIF_CRC : MLX5_DIF_IPCS;
+
+ if (domain->sig.dif.ref_remap)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
+
+ if (domain->sig.dif.app_escape) {
+ if (domain->sig.dif.ref_escape)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
+ else
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
+ }
+
+ inl->dif_app_bitmask_check =
+ cpu_to_be16(domain->sig.dif.apptag_check_mask);
+}
+
+static int mlx5_set_bsf(struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_bsf *bsf, u32 data_size)
+{
+ struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
+ struct mlx5_bsf_basic *basic = &bsf->basic;
+ struct ib_sig_domain *mem = &sig_attrs->mem;
+ struct ib_sig_domain *wire = &sig_attrs->wire;
+
+ memset(bsf, 0, sizeof(*bsf));
+
+ /* Basic + Extended + Inline */
+ basic->bsf_size_sbs = 1 << 7;
+ /* Input domain check byte mask */
+ basic->check_byte_mask = sig_attrs->check_mask;
+ basic->raw_data_size = cpu_to_be32(data_size);
+
+ /* Memory domain */
+ switch (sig_attrs->mem.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
+ basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
+ mlx5_fill_inl_bsf(mem, &bsf->m_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Wire domain */
+ switch (sig_attrs->wire.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
+ mem->sig_type == wire->sig_type) {
+ /* Same block structure */
+ basic->bsf_size_sbs |= 1 << 4;
+ if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
+ basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
+ if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
+ if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
+ } else
+ basic->wire.bs_selector =
+ bs_selector(wire->sig.dif.pi_interval);
+
+ basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
+ mlx5_fill_inl_bsf(wire, &bsf->w_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int set_sig_data_segment(const struct ib_send_wr *send_wr,
+ struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ struct mlx5_bsf *bsf;
+ u32 data_len;
+ u32 data_key;
+ u64 data_va;
+ u32 prot_len = 0;
+ u32 prot_key = 0;
+ u64 prot_va = 0;
+ bool prot = false;
+ int ret;
+ int wqe_size;
+ struct mlx5_ib_mr *mr = to_mmr(sig_mr);
+ struct mlx5_ib_mr *pi_mr = mr->pi_mr;
+
+ data_len = pi_mr->data_length;
+ data_key = pi_mr->ibmr.lkey;
+ data_va = pi_mr->data_iova;
+ if (pi_mr->meta_ndescs) {
+ prot_len = pi_mr->meta_length;
+ prot_key = pi_mr->ibmr.lkey;
+ prot_va = pi_mr->pi_iova;
+ prot = true;
+ }
+
+ if (!prot || (data_key == prot_key && data_va == prot_va &&
+ data_len == prot_len)) {
+ /**
+ * Source domain doesn't contain signature information
+ * or data and protection are interleaved in memory.
+ * So need construct:
+ * ------------------
+ * | data_klm |
+ * ------------------
+ * | BSF |
+ * ------------------
+ **/
+ struct mlx5_klm *data_klm = *seg;
+
+ data_klm->bcount = cpu_to_be32(data_len);
+ data_klm->key = cpu_to_be32(data_key);
+ data_klm->va = cpu_to_be64(data_va);
+ wqe_size = ALIGN(sizeof(*data_klm), 64);
+ } else {
+ /**
+ * Source domain contains signature information
+ * So need construct a strided block format:
+ * ---------------------------
+ * | stride_block_ctrl |
+ * ---------------------------
+ * | data_klm |
+ * ---------------------------
+ * | prot_klm |
+ * ---------------------------
+ * | BSF |
+ * ---------------------------
+ **/
+ struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
+ struct mlx5_stride_block_entry *data_sentry;
+ struct mlx5_stride_block_entry *prot_sentry;
+ u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
+ int prot_size;
+
+ sblock_ctrl = *seg;
+ data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
+ prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
+
+ prot_size = prot_field_size(sig_attrs->mem.sig_type);
+ if (!prot_size) {
+ pr_err("Bad block size given: %u\n", block_size);
+ return -EINVAL;
+ }
+ sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
+ prot_size);
+ sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
+ sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
+ sblock_ctrl->num_entries = cpu_to_be16(2);
+
+ data_sentry->bcount = cpu_to_be16(block_size);
+ data_sentry->key = cpu_to_be32(data_key);
+ data_sentry->va = cpu_to_be64(data_va);
+ data_sentry->stride = cpu_to_be16(block_size);
+
+ prot_sentry->bcount = cpu_to_be16(prot_size);
+ prot_sentry->key = cpu_to_be32(prot_key);
+ prot_sentry->va = cpu_to_be64(prot_va);
+ prot_sentry->stride = cpu_to_be16(prot_size);
+
+ wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
+ sizeof(*prot_sentry), 64);
+ }
+
+ *seg += wqe_size;
+ *size += wqe_size / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ bsf = *seg;
+ ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
+ if (ret)
+ return -EINVAL;
+
+ *seg += sizeof(*bsf);
+ *size += sizeof(*bsf) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ return 0;
+}
+
+static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
+ struct ib_mr *sig_mr, int access_flags,
+ u32 size, u32 length, u32 pdn)
+{
+ u32 sig_key = sig_mr->rkey;
+ u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
+ seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
+ MLX5_MKEY_BSF_EN | pdn);
+ seg->len = cpu_to_be64(length);
+ seg->xlt_oct_size = cpu_to_be32(mlx5r_umr_get_xlt_octo(size));
+ seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+}
+
+static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
+ u32 size)
+{
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
+ umr->xlt_octowords = cpu_to_be16(mlx5r_umr_get_xlt_octo(size));
+ umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
+ umr->mkey_mask = sig_mkey_mask();
+}
+
+static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ const struct ib_reg_wr *wr = reg_wr(send_wr);
+ struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
+ struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
+ struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
+ u32 pdn = to_mpd(qp->ibqp.pd)->pdn;
+ u32 xlt_size;
+ int region_len, ret;
+
+ if (unlikely(send_wr->num_sge != 0) ||
+ unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
+ unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
+ unlikely(!sig_mr->sig->sig_status_checked))
+ return -EINVAL;
+
+ /* length of the protected region, data + protection */
+ region_len = pi_mr->ibmr.length;
+
+ /**
+ * KLM octoword size - if protection was provided
+ * then we use strided block format (3 octowords),
+ * else we use single KLM (1 octoword)
+ **/
+ if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
+ xlt_size = 0x30;
+ else
+ xlt_size = sizeof(struct mlx5_klm);
+
+ set_sig_umr_segment(*seg, xlt_size);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
+ pdn);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
+ cur_edge);
+ if (ret)
+ return ret;
+
+ sig_mr->sig->sig_status_checked = false;
+ return 0;
+}
+
+static int set_psv_wr(struct ib_sig_domain *domain,
+ u32 psv_idx, void **seg, int *size)
+{
+ struct mlx5_seg_set_psv *psv_seg = *seg;
+
+ memset(psv_seg, 0, sizeof(*psv_seg));
+ psv_seg->psv_num = cpu_to_be32(psv_idx);
+ switch (domain->sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
+ domain->sig.dif.app_tag);
+ psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
+ break;
+ default:
+ pr_err("Bad signature type (%d) is given.\n",
+ domain->sig_type);
+ return -EINVAL;
+ }
+
+ *seg += sizeof(*psv_seg);
+ *size += sizeof(*psv_seg) / 16;
+
+ return 0;
+}
+
+static int set_reg_wr(struct mlx5_ib_qp *qp,
+ const struct ib_reg_wr *wr,
+ void **seg, int *size, void **cur_edge,
+ bool check_not_free)
+{
+ struct mlx5_ib_mr *mr = to_mmr(wr->mr);
+ struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
+ int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
+ u8 flags = 0;
+
+ /* Matches access in mlx5_set_umr_free_mkey().
+ * Relaxed Ordering is set implicitly in mlx5_set_umr_free_mkey() and
+ * kernel ULPs are not aware of it, so we don't set it here.
+ */
+ if (!mlx5r_umr_can_reconfig(dev, 0, wr->access)) {
+ mlx5_ib_warn(
+ to_mdev(qp->ibqp.device),
+ "Fast update for MR access flags is not possible\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Invalid IB_SEND_INLINE send flag\n");
+ return -EINVAL;
+ }
+
+ if (check_not_free)
+ flags |= MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ flags |= MLX5_UMR_INLINE;
+
+ set_reg_umr_seg(*seg, mr, flags, atomic);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ if (umr_inline) {
+ mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
+ mr_list_size);
+ *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
+ } else {
+ set_reg_data_seg(*seg, mr, pd);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
+ return 0;
+}
+
+static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ set_linv_umr_seg(*seg);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_linv_mkey_seg(*seg);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
+{
+ __be32 *p = NULL;
+ int i, j;
+
+ pr_debug("dump WQE index %u:\n", idx);
+ for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
+ if ((i & 0xf) == 0) {
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+ pr_debug("WQBB at %p:\n", (void *)p);
+ j = 0;
+ idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
+ }
+ pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
+ be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
+ be32_to_cpu(p[j + 3]));
+ }
+}
+
+int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
+ int *size, void **cur_edge, int nreq, __be32 general_id,
+ bool send_signaled, bool solicited)
+{
+ if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+ return -ENOMEM;
+
+ *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
+ *ctrl = *seg;
+ *(uint32_t *)(*seg + 8) = 0;
+ (*ctrl)->general_id = general_id;
+ (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
+
+ *seg += sizeof(**ctrl);
+ *size = sizeof(**ctrl) / 16;
+ *cur_edge = qp->sq.cur_edge;
+
+ return 0;
+}
+
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx, int *size,
+ void **cur_edge, int nreq)
+{
+ return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq,
+ send_ieth(wr), wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
+void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
+ void *seg, u8 size, void *cur_edge, unsigned int idx,
+ u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode)
+{
+ u8 opmod = 0;
+
+ ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+ if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
+ ctrl->signature = wq_sig(ctrl);
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = mlx5_opcode;
+ qp->sq.wqe_head[idx] = qp->sq.head + nreq;
+ qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+
+ /* We save the edge which was possibly updated during the WQE
+ * construction, into SQ's cache.
+ */
+ seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
+ qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
+ get_sq_edge(&qp->sq, qp->sq.cur_post &
+ (qp->sq.wqe_cnt - 1)) :
+ cur_edge;
+}
+
+static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
+ *seg += sizeof(struct mlx5_wqe_raddr_seg);
+ *size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
+}
+
+static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
+ (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
+ set_linv_wr(qp, seg, size, cur_edge);
+}
+
+static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_REG_MR;
+ (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
+ return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
+}
+
+static int handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq,
+ struct ib_sig_domain *domain, u32 psv_index,
+ u8 next_fence)
+{
+ int err;
+
+ /*
+ * SET_PSV WQEs are not signaled and solicited on error.
+ */
+ err = mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq,
+ send_ieth(wr), false, true);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ err = set_psv_wr(domain, psv_index, seg, size);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
+ nreq, next_fence, MLX5_OPCODE_SET_PSV);
+
+out:
+ return err;
+}
+
+static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge,
+ unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence)
+{
+ struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr pa_pi_mr;
+ struct ib_sig_attrs *sig_attrs;
+ struct ib_reg_wr reg_pi_wr;
+ int err;
+
+ qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY;
+
+ mr = to_mmr(reg_wr(wr)->mr);
+ pi_mr = mr->pi_mr;
+
+ if (pi_mr) {
+ memset(&reg_pi_wr, 0,
+ sizeof(struct ib_reg_wr));
+
+ reg_pi_wr.mr = &pi_mr->ibmr;
+ reg_pi_wr.access = reg_wr(wr)->access;
+ reg_pi_wr.key = pi_mr->ibmr.rkey;
+
+ (*ctrl)->imm = cpu_to_be32(reg_pi_wr.key);
+ /* UMR for data + prot registration */
+ err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false);
+ if (unlikely(err))
+ goto out;
+
+ mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx,
+ wr->wr_id, nreq, fence, MLX5_OPCODE_UMR);
+
+ err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ } else {
+ memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
+ /* No UMR, use local_dma_lkey */
+ pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
+ pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs;
+ pa_pi_mr.data_length = mr->data_length;
+ pa_pi_mr.data_iova = mr->data_iova;
+ if (mr->meta_ndescs) {
+ pa_pi_mr.meta_ndescs = mr->meta_ndescs;
+ pa_pi_mr.meta_length = mr->meta_length;
+ pa_pi_mr.pi_iova = mr->pi_iova;
+ }
+
+ pa_pi_mr.ibmr.length = mr->ibmr.length;
+ mr->pi_mr = &pa_pi_mr;
+ }
+ (*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey);
+ /* UMR for sig MR */
+ err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
+ nreq, fence, MLX5_OPCODE_UMR);
+
+ sig_attrs = mr->ibmr.sig_attrs;
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->mem, mr->sig->psv_memory.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->wire, mr->sig->psv_wire.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+
+out:
+ return err;
+}
+
+static int handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence, int *num_sge)
+{
+ int err = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+ err = -EOPNOTSUPP;
+ goto out;
+
+ case IB_WR_LOCAL_INV:
+ handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR:
+ err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR_INTEGRITY:
+ err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
+ cur_edge, idx, nreq, fence,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ return err;
+}
+
+static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_qpt_hw_gsi(struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr, void **seg,
+ int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **seg, int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ /* handle qp that supports ud offload */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ struct mlx5_wqe_eth_pad *pad;
+
+ pad = *seg;
+ memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+ *seg += sizeof(struct mlx5_wqe_eth_pad);
+ *size += sizeof(struct mlx5_wqe_eth_pad) / 16;
+ set_eth_seg(wr, qp, seg, size, cur_edge);
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ }
+}
+
+void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
+ struct mlx5_wqe_ctrl_seg *ctrl)
+{
+ struct mlx5_bf *bf = &qp->bf;
+
+ qp->sq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell.
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
+ /* Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ bf->offset ^= bf->buf_size;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_xrc_seg *xrc;
+ void *cur_edge;
+ int size;
+ unsigned long flags;
+ unsigned int idx;
+ int err = 0;
+ int num_sge;
+ void *seg;
+ int nreq;
+ int i;
+ u8 next_fence = 0;
+ u8 fence;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
+ nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (wr->opcode == IB_WR_REG_MR ||
+ wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ fence = dev->umr_fence;
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ } else {
+ if (wr->send_flags & IB_SEND_FENCE) {
+ if (qp->next_fence)
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ fence = MLX5_FENCE_MODE_FENCE;
+ } else {
+ fence = qp->next_fence;
+ }
+ }
+
+ switch (qp->type) {
+ case IB_QPT_XRC_INI:
+ xrc = seg;
+ seg += sizeof(*xrc);
+ size += sizeof(*xrc) / 16;
+ fallthrough;
+ case IB_QPT_RC:
+ err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
+ &cur_edge, &idx, nreq, fence,
+ next_fence, &num_sge);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto out;
+ } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ goto skip_psv;
+ }
+ break;
+
+ case IB_QPT_UC:
+ handle_qpt_uc(wr, &seg, &size);
+ break;
+ case IB_QPT_SMI:
+ if (unlikely(!dev->port_caps[qp->port - 1].has_smi)) {
+ mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
+ err = -EPERM;
+ *bad_wr = wr;
+ goto out;
+ }
+ fallthrough;
+ case MLX5_IB_QPT_HW_GSI:
+ handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case IB_QPT_UD:
+ handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
+ break;
+
+ default:
+ break;
+ }
+
+ if (wr->send_flags & IB_SEND_INLINE && num_sge) {
+ err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ } else {
+ for (i = 0; i < num_sge; i++) {
+ handle_post_send_edge(&qp->sq, &seg, size,
+ &cur_edge);
+ if (unlikely(!wr->sg_list[i].length))
+ continue;
+
+ set_data_ptr_seg(
+ (struct mlx5_wqe_data_seg *)seg,
+ wr->sg_list + i);
+ size += sizeof(struct mlx5_wqe_data_seg) / 16;
+ seg += sizeof(struct mlx5_wqe_data_seg);
+ }
+ }
+
+ qp->next_fence = next_fence;
+ mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id,
+ nreq, fence, mlx5_ib_opcode[wr->opcode]);
+skip_psv:
+ if (0)
+ dump_wqe(qp, idx, size);
+ }
+
+out:
+ if (likely(nreq))
+ mlx5r_ring_db(qp, nreq, ctrl);
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static void set_sig_seg(struct mlx5_rwqe_sig *sig, int max_gs)
+{
+ sig->signature = calc_sig(sig, (max_gs + 1) << 2);
+}
+
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (mlx5r_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ scat++;
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_ptr_seg(scat + i, wr->sg_list + i);
+
+ if (i < qp->rq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey;
+ scat[i].addr = 0;
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
+ sig = (struct mlx5_rwqe_sig *)scat;
+ set_sig_seg(sig, qp->rq.max_gs);
+ }
+
+ qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->rq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/wr.h b/drivers/infiniband/hw/mlx5/wr.h
new file mode 100644
index 000000000000..2dc89438000d
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_WR_H
+#define _MLX5_IB_WR_H
+
+#include "mlx5_ib.h"
+
+enum {
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
+};
+
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+
+/* get_sq_edge - Get the next nearby edge.
+ *
+ * An 'edge' is defined as the first following address after the end
+ * of the fragment or the SQ. Accordingly, during the WQE construction
+ * which repetitively increases the pointer to write the next data, it
+ * simply should check if it gets to an edge.
+ *
+ * @sq - SQ buffer.
+ * @idx - Stride index in the SQ buffer.
+ *
+ * Return:
+ * The new edge.
+ */
+static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
+{
+ void *fragment_end;
+
+ fragment_end = mlx5_frag_buf_get_wqe
+ (&sq->fbc,
+ mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+
+ return fragment_end + MLX5_SEND_WQE_BB;
+}
+
+/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
+ * next nearby edge and get new address translation for current WQE position.
+ * @sq: SQ buffer.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @cur_edge: Updated current edge.
+ */
+static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
+ u32 wqe_sz, void **cur_edge)
+{
+ u32 idx;
+
+ if (likely(*seg != *cur_edge))
+ return;
+
+ idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
+ *cur_edge = get_sq_edge(sq, idx);
+
+ *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
+}
+
+/* mlx5r_memcpy_send_wqe - copy data from src to WQE and update the relevant
+ * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
+ * @sq: SQ buffer.
+ * @cur_edge: Updated current edge.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @src: Pointer to copy from.
+ * @n: Number of bytes to copy.
+ */
+static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
+ void **seg, u32 *wqe_sz,
+ const void *src, size_t n)
+{
+ while (likely(n)) {
+ size_t leftlen = *cur_edge - *seg;
+ size_t copysz = min_t(size_t, leftlen, n);
+ size_t stride;
+
+ memcpy(*seg, src, copysz);
+
+ n -= copysz;
+ src += copysz;
+ stride = !n ? ALIGN(copysz, 16) : copysz;
+ *seg += stride;
+ *wqe_sz += stride >> 4;
+ handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
+ }
+}
+
+int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
+int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
+ int *size, void **cur_edge, int nreq, __be32 general_id,
+ bool send_signaled, bool solicited);
+void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
+ void *seg, u8 size, void *cur_edge, unsigned int idx,
+ u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
+void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
+ struct mlx5_wqe_ctrl_seg *ctrl);
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain);
+
+static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
+}
+
+static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
+}
+#endif /* _MLX5_IB_WR_H */
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig
index da314c3fec23..faa7381f7d63 100644
--- a/drivers/infiniband/hw/mthca/Kconfig
+++ b/drivers/infiniband/hw/mthca/Kconfig
@@ -1,7 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_MTHCA
tristate "Mellanox HCA support"
depends on PCI
- ---help---
+ help
This is a low-level driver for Mellanox InfiniHost host
channel adapters (HCAs), including the MT23108 PCI-X HCA
("Tavor") and the MT25208 PCI Express HCA ("Arbel").
@@ -10,7 +11,7 @@ config INFINIBAND_MTHCA_DEBUG
bool "Verbose debugging output" if EXPERT
depends on INFINIBAND_MTHCA
default y
- ---help---
+ help
This option causes debugging code to be compiled into the
mthca driver. The output can be turned on via the
debug_level module parameter (which can also be set after
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile
index e388d95d0cf1..3a09e9ffd634 100644
--- a/drivers/infiniband/hw/mthca/Makefile
+++ b/drivers/infiniband/hw/mthca/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index b4e0cf4e95cd..9f0f79d02d3c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
}
if (obj < alloc->max) {
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj |= alloc->top;
} else
obj = -1;
@@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
alloc->last = min(alloc->last, obj);
alloc->top = (alloc->top + alloc->max) & alloc->mask;
@@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
u32 reserved)
{
- int i;
-
/* num must be a power of 2 */
if (num != 1 << (ffs(num) - 1))
return -EINVAL;
@@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
alloc->max = num;
alloc->mask = mask;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void mthca_alloc_cleanup(struct mthca_alloc *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
/*
@@ -162,7 +157,8 @@ int mthca_array_init(struct mthca_array *array, int nent)
int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
int i;
- array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL);
+ array->page_list = kmalloc_array(npage, sizeof(*array->page_list),
+ GFP_KERNEL);
if (!array->page_list)
return -ENOMEM;
@@ -213,14 +209,13 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
dma_unmap_addr_set(&buf->direct, mapping, t);
- memset(buf->direct.buf, 0, size);
-
while (t & ((1 << shift) - 1)) {
--shift;
npages *= 2;
}
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ dma_list = kmalloc_array(npages, sizeof(*dma_list),
+ GFP_KERNEL);
if (!dma_list)
goto err_free;
@@ -231,12 +226,14 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
shift = PAGE_SHIFT;
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ dma_list = kmalloc_array(npages, sizeof(*dma_list),
+ GFP_KERNEL);
if (!dma_list)
return -ENOMEM;
- buf->page_list = kmalloc(npages * sizeof *buf->page_list,
- GFP_KERNEL);
+ buf->page_list = kmalloc_array(npages,
+ sizeof(*buf->page_list),
+ GFP_KERNEL);
if (!buf->page_list)
goto err_out;
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 32f6c6315454..3df1f5ff7932 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -91,7 +91,7 @@ static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
}
}
-enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port)
{
if (mthca_is_memfree(dev)) {
/* Handle old Arbel FW */
@@ -115,7 +115,7 @@ static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
switch ((cur_rate - 1) / req_rate) {
case 0: return MTHCA_RATE_MEMFREE_FULL;
case 1: return MTHCA_RATE_MEMFREE_HALF;
- case 2: /* fall through */
+ case 2:
case 3: return MTHCA_RATE_MEMFREE_QUARTER;
default: return MTHCA_RATE_MEMFREE_EIGHTH;
}
@@ -131,7 +131,7 @@ static u8 ib_rate_to_tavor(u8 static_rate)
}
}
-u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
+u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port)
{
u8 rate;
@@ -152,7 +152,7 @@ u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
int mthca_create_ah(struct mthca_dev *dev,
struct mthca_pd *pd,
- struct ib_ah_attr *ah_attr,
+ struct rdma_ah_attr *ah_attr,
struct mthca_ah *ah)
{
u32 index = -1;
@@ -186,8 +186,8 @@ int mthca_create_ah(struct mthca_dev *dev,
on_hca_fail:
if (ah->type == MTHCA_AH_PCI_POOL) {
- ah->av = pci_pool_alloc(dev->av_table.pool,
- GFP_ATOMIC, &ah->avdma);
+ ah->av = dma_pool_zalloc(dev->av_table.pool,
+ GFP_ATOMIC, &ah->avdma);
if (!ah->av)
return -ENOMEM;
@@ -196,23 +196,26 @@ on_hca_fail:
ah->key = pd->ntmr.ibmr.lkey;
- memset(av, 0, MTHCA_AV_SIZE);
-
- av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
- av->g_slid = ah_attr->src_path_bits;
- av->dlid = cpu_to_be16(ah_attr->dlid);
+ av->port_pd = cpu_to_be32(pd->pd_num |
+ (rdma_ah_get_port_num(ah_attr) << 24));
+ av->g_slid = rdma_ah_get_path_bits(ah_attr);
+ av->dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
av->msg_sr = (3 << 4) | /* 2K message */
- mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num);
- av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
- if (ah_attr->ah_flags & IB_AH_GRH) {
+ mthca_get_rate(dev, rdma_ah_get_static_rate(ah_attr),
+ rdma_ah_get_port_num(ah_attr));
+ av->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
av->g_slid |= 0x80;
- av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len +
- ah_attr->grh.sgid_index;
- av->hop_limit = ah_attr->grh.hop_limit;
+ av->gid_index = (rdma_ah_get_port_num(ah_attr) - 1) *
+ dev->limits.gid_table_len +
+ grh->sgid_index;
+ av->hop_limit = grh->hop_limit;
av->sl_tclass_flowlabel |=
- cpu_to_be32((ah_attr->grh.traffic_class << 20) |
- ah_attr->grh.flow_label);
- memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
+ cpu_to_be32((grh->traffic_class << 20) |
+ grh->flow_label);
+ memcpy(av->dgid, grh->dgid.raw, 16);
} else {
/* Arbel workaround -- low byte of GID must be 2 */
av->dgid[3] = cpu_to_be32(2);
@@ -247,7 +250,7 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
break;
case MTHCA_AH_PCI_POOL:
- pci_pool_free(dev->av_table.pool, ah->av, ah->avdma);
+ dma_pool_free(dev->av_table.pool, ah->av, ah->avdma);
break;
case MTHCA_AH_KMALLOC:
@@ -278,10 +281,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
header->grh.flow_label =
ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
header->grh.hop_limit = ah->av->hop_limit;
- ib_get_cached_gid(&dev->ib_dev,
- be32_to_cpu(ah->av->port_pd) >> 24,
- ah->av->gid_index % dev->limits.gid_table_len,
- &header->grh.source_gid);
+ header->grh.source_gid = ah->ibah.sgid_attr->gid;
memcpy(header->grh.destination_gid.raw,
ah->av->dgid, 16);
}
@@ -289,33 +289,35 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
return 0;
}
-int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr)
+int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
{
struct mthca_ah *ah = to_mah(ibah);
struct mthca_dev *dev = to_mdev(ibah->device);
+ u32 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
/* Only implement for MAD and memfree ah for now. */
if (ah->type == MTHCA_AH_ON_HCA)
return -ENOSYS;
memset(attr, 0, sizeof *attr);
- attr->dlid = be16_to_cpu(ah->av->dlid);
- attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
- attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24;
- attr->static_rate = mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
- attr->port_num);
- attr->src_path_bits = ah->av->g_slid & 0x7F;
- attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0;
-
- if (attr->ah_flags) {
- attr->grh.traffic_class =
- be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20;
- attr->grh.flow_label =
- be32_to_cpu(ah->av->sl_tclass_flowlabel) & 0xfffff;
- attr->grh.hop_limit = ah->av->hop_limit;
- attr->grh.sgid_index = ah->av->gid_index &
- (dev->limits.gid_table_len - 1);
- memcpy(attr->grh.dgid.raw, ah->av->dgid, 16);
+ attr->type = ibah->type;
+ rdma_ah_set_dlid(attr, be16_to_cpu(ah->av->dlid));
+ rdma_ah_set_sl(attr, be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28);
+ rdma_ah_set_port_num(attr, port_num);
+ rdma_ah_set_static_rate(attr,
+ mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
+ port_num));
+ rdma_ah_set_path_bits(attr, ah->av->g_slid & 0x7F);
+ if (mthca_ah_grh_present(ah)) {
+ u32 tc_fl = be32_to_cpu(ah->av->sl_tclass_flowlabel);
+
+ rdma_ah_set_grh(attr, NULL,
+ tc_fl & 0xfffff,
+ ah->av->gid_index &
+ (dev->limits.gid_table_len - 1),
+ ah->av->hop_limit,
+ (tc_fl >> 20) & 0xff);
+ rdma_ah_set_dgid_raw(attr, ah->av->dgid);
}
return 0;
@@ -335,7 +337,7 @@ int mthca_init_av_table(struct mthca_dev *dev)
if (err)
return err;
- dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev,
+ dev->av_table.pool = dma_pool_create("mthca_av", &dev->pdev->dev,
MTHCA_AV_SIZE,
MTHCA_AV_SIZE, 0);
if (!dev->av_table.pool)
@@ -355,7 +357,7 @@ int mthca_init_av_table(struct mthca_dev *dev)
return 0;
out_free_pool:
- pci_pool_destroy(dev->av_table.pool);
+ dma_pool_destroy(dev->av_table.pool);
out_free_alloc:
mthca_alloc_cleanup(&dev->av_table.alloc);
@@ -369,6 +371,6 @@ void mthca_cleanup_av_table(struct mthca_dev *dev)
if (dev->av_table.av_map)
iounmap(dev->av_table.av_map);
- pci_pool_destroy(dev->av_table.pool);
+ dma_pool_destroy(dev->av_table.pool);
mthca_alloc_cleanup(&dev->av_table.alloc);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 712d2a30fbe5..f1d79968c985 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -130,9 +130,9 @@ static void handle_catas(struct mthca_dev *dev)
spin_unlock_irqrestore(&catas_lock, flags);
}
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
{
- struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
+ struct mthca_dev *dev = timer_container_of(dev, t, catas_err.timer);
int i;
for (i = 0; i < dev->catas_err.size; ++i)
@@ -149,7 +149,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
{
phys_addr_t addr;
- init_timer(&dev->catas_err.timer);
+ timer_setup(&dev->catas_err.timer, poll_catas, 0);
dev->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, 0) +
@@ -164,8 +164,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
return;
}
- dev->catas_err.timer.data = (unsigned long) dev;
- dev->catas_err.timer.function = poll_catas;
dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
INIT_LIST_HEAD(&dev->catas_err.list);
add_timer(&dev->catas_err.timer);
@@ -173,7 +171,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
void mthca_stop_catas_poll(struct mthca_dev *dev)
{
- del_timer_sync(&dev->catas_err.timer);
+ timer_delete_sync(&dev->catas_err.timer);
if (dev->catas_err.map)
iounmap(dev->catas_err.map);
@@ -187,7 +185,7 @@ int __init mthca_catas_init(void)
{
INIT_WORK(&catas_work, catas_reset);
- catas_wq = create_singlethread_workqueue("mthca_catas");
+ catas_wq = alloc_ordered_workqueue("mthca_catas", WQ_MEM_RECLAIM);
if (!catas_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index c7f49bbb0c72..8fe0cef7e2be 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -292,12 +292,6 @@ static int mthca_cmd_post(struct mthca_dev *dev,
err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
op_modifier, op, token, event);
- /*
- * Make sure that our HCR writes don't get mixed in with
- * writes from another CPU starting a FW command.
- */
- mmiowb();
-
mutex_unlock(&dev->cmd.hcr_mutex);
return err;
}
@@ -367,12 +361,16 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
goto out;
}
- if (out_is_imm)
+ if (out_is_imm && out_param) {
*out_param =
(u64) be32_to_cpu((__force __be32)
__raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
+ } else if (out_is_imm) {
+ err = -EINVAL;
+ goto out;
+ }
status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
if (status) {
@@ -450,8 +448,12 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
err = mthca_status_to_errno(context->status);
}
- if (out_is_imm)
+ if (out_is_imm && out_param) {
*out_param = context->out_param;
+ } else if (out_is_imm) {
+ err = -EINVAL;
+ goto out;
+ }
out:
spin_lock(&dev->cmd.context_lock);
@@ -530,7 +532,7 @@ int mthca_cmd_init(struct mthca_dev *dev)
return -ENOMEM;
}
- dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
+ dev->cmd.pool = dma_pool_create("mthca_cmd", &dev->pdev->dev,
MTHCA_MAILBOX_SIZE,
MTHCA_MAILBOX_SIZE, 0);
if (!dev->cmd.pool) {
@@ -543,7 +545,7 @@ int mthca_cmd_init(struct mthca_dev *dev)
void mthca_cmd_cleanup(struct mthca_dev *dev)
{
- pci_pool_destroy(dev->cmd.pool);
+ dma_pool_destroy(dev->cmd.pool);
iounmap(dev->hcr);
if (dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS)
iounmap(dev->cmd.dbell_map);
@@ -557,9 +559,9 @@ int mthca_cmd_use_events(struct mthca_dev *dev)
{
int i;
- dev->cmd.context = kmalloc(dev->cmd.max_cmds *
- sizeof (struct mthca_cmd_context),
- GFP_KERNEL);
+ dev->cmd.context = kmalloc_array(dev->cmd.max_cmds,
+ sizeof(struct mthca_cmd_context),
+ GFP_KERNEL);
if (!dev->cmd.context)
return -ENOMEM;
@@ -613,7 +615,7 @@ struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
if (!mailbox)
return ERR_PTR(-ENOMEM);
- mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
+ mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
@@ -627,13 +629,13 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
if (!mailbox)
return;
- pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
+ dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
int mthca_SYS_EN(struct mthca_dev *dev)
{
- u64 out;
+ u64 out = 0;
int ret;
ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
@@ -690,7 +692,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt);
- virt += 1 << lg;
+ virt += 1ULL << lg;
}
pages[nent * 2 + 1] =
@@ -1250,7 +1252,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
@@ -1913,7 +1915,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET);
- MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
+ MTHCA_PUT(inbox, ib_lid_cpu16(in_wc->slid), MAD_IFC_RLID_OFFSET);
MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
if (in_grh)
@@ -1921,7 +1923,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
op_modifier |= 0x4;
- in_modifier |= in_wc->slid << 16;
+ in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
}
err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
@@ -1953,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
u16 *hash)
{
- u64 imm;
+ u64 imm = 0;
int err;
err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 40ba83338155..26c3408dcaca 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -77,7 +77,7 @@ struct mthca_cq_context {
__be32 ci_db; /* Arbel only */
__be32 state_db; /* Arbel only */
u32 reserved;
-} __attribute__((packed));
+} __packed;
#define MTHCA_CQ_STATUS_OK ( 0 << 28)
#define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
@@ -211,11 +211,6 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
dev->kar + MTHCA_CQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of CQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
}
}
@@ -608,11 +603,8 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
entry->opcode = IB_WC_FETCH_ADD;
entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
break;
- case MTHCA_OPCODE_BIND_MW:
- entry->opcode = IB_WC_BIND_MW;
- break;
default:
- entry->opcode = MTHCA_OPCODE_INVALID;
+ entry->opcode = 0xFF;
break;
}
} else {
@@ -811,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox))
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
goto err_out_arm;
+ }
cq_context = mailbox->buf;
@@ -854,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
spin_lock_irq(&dev->cq_table.lock);
- if (mthca_array_set(&dev->cq_table.cq,
- cq->cqn & (dev->limits.num_cqs - 1),
- cq)) {
+ err = mthca_array_set(&dev->cq_table.cq,
+ cq->cqn & (dev->limits.num_cqs - 1), cq);
+ if (err) {
spin_unlock_irq(&dev->cq_table.lock);
goto err_out_free_mr;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 4393a022867b..a4a9d871d00e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -105,7 +105,6 @@ enum {
MTHCA_OPCODE_ATOMIC_CS = 0x11,
MTHCA_OPCODE_ATOMIC_FA = 0x12,
MTHCA_OPCODE_BIND_MW = 0x18,
- MTHCA_OPCODE_INVALID = 0xff
};
enum {
@@ -118,7 +117,7 @@ enum {
};
struct mthca_cmd {
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
@@ -263,7 +262,7 @@ struct mthca_qp_table {
};
struct mthca_av_table {
- struct pci_pool *pool;
+ struct dma_pool *pool;
int num_ddr_avs;
u64 ddr_av_base;
void __iomem *av_map;
@@ -478,16 +477,6 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr);
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr);
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *fmr);
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr);
-
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt);
void mthca_unmap_eq_icm(struct mthca_dev *dev);
@@ -510,7 +499,8 @@ int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent
void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
- struct ib_srq_attr *attr, struct mthca_srq *srq);
+ struct ib_srq_attr *attr, struct mthca_srq *srq,
+ struct ib_udata *udata);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
@@ -519,10 +509,10 @@ int mthca_max_srq_sge(struct mthca_dev *dev);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
-int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mthca_tavor_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
@@ -530,14 +520,14 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
struct ib_qp_init_attr *qp_init_attr);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
-int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int index, int *dbd, __be32 *new_wqe);
int mthca_alloc_qp(struct mthca_dev *dev,
@@ -547,7 +537,8 @@ int mthca_alloc_qp(struct mthca_dev *dev,
enum ib_qp_type type,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
- struct mthca_qp *qp);
+ struct mthca_qp *qp,
+ struct ib_udata *udata);
int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
@@ -555,32 +546,29 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
int qpn,
- int port,
- struct mthca_sqp *sqp);
+ u32 port,
+ struct mthca_qp *qp,
+ struct ib_udata *udata);
void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
int mthca_create_ah(struct mthca_dev *dev,
struct mthca_pd *pd,
- struct ib_ah_attr *ah_attr,
+ struct rdma_ah_attr *ah_attr,
struct mthca_ah *ah);
int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah);
int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header);
-int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr);
+int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr);
int mthca_ah_grh_present(struct mthca_ah *ah);
-u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port);
-enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
+u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port);
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 690201738993..97287c544da8 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -63,7 +63,7 @@ struct mthca_eq_context {
__be32 consumer_index;
__be32 producer_index;
u32 reserved3[4];
-} __attribute__((packed));
+} __packed;
#define MTHCA_EQ_STATUS_OK ( 0 << 28)
#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
@@ -130,7 +130,7 @@ struct mthca_eqe {
u32 raw[6];
struct {
__be32 cqn;
- } __attribute__((packed)) comp;
+ } __packed comp;
struct {
u16 reserved1;
__be16 token;
@@ -138,27 +138,27 @@ struct mthca_eqe {
u8 reserved3[3];
u8 status;
__be64 out_param;
- } __attribute__((packed)) cmd;
+ } __packed cmd;
struct {
__be32 qpn;
- } __attribute__((packed)) qp;
+ } __packed qp;
struct {
__be32 srqn;
- } __attribute__((packed)) srq;
+ } __packed srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
- } __attribute__((packed)) cq_err;
+ } __packed cq_err;
struct {
u32 reserved1[2];
__be32 port;
- } __attribute__((packed)) port_change;
+ } __packed port_change;
} event;
u8 reserved3[3];
u8 owner;
-} __attribute__((packed));
+} __packed;
#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
@@ -479,15 +479,15 @@ static int mthca_create_eq(struct mthca_dev *dev,
eq->nent = roundup_pow_of_two(max(nent, 2));
npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
- eq->page_list = kmalloc(npages * sizeof *eq->page_list,
- GFP_KERNEL);
+ eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
+ GFP_KERNEL);
if (!eq->page_list)
goto err_out;
for (i = 0; i < npages; ++i)
eq->page_list[i].buf = NULL;
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL);
if (!dma_list)
goto err_out_free;
@@ -617,9 +617,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
mthca_free_mr(dev, &eq->mr);
for (i = 0; i < npages; ++i)
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- eq->page_list[i].buf,
- dma_unmap_addr(&eq->page_list[i], mapping));
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ dma_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
mthca_free_mailbox(dev, mailbox);
@@ -739,17 +739,18 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
if (!dev->eq_table.icm_page)
return -ENOMEM;
- dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
+ dev->eq_table.icm_dma =
+ dma_map_page(&dev->pdev->dev, dev->eq_table.icm_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, dev->eq_table.icm_dma)) {
__free_page(dev->eq_table.icm_page);
return -ENOMEM;
}
ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
if (ret) {
- pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
@@ -759,8 +760,8 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
void mthca_unmap_eq_icm(struct mthca_dev *dev)
{
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
- pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7c3f2fb44ba5..04252700790e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -58,8 +58,9 @@ static int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
ret = ib_query_port(&dev->ib_dev, port_num, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
- ret, dev->ib_dev.name, port_num);
+ dev_warn(&dev->ib_dev.dev,
+ "ib_query_port failed (%d) forport %d\n", ret,
+ port_num);
goto out;
}
@@ -75,25 +76,26 @@ static void update_sm_ah(struct mthca_dev *dev,
u8 port_num, u16 lid, u8 sl)
{
struct ib_ah *new_ah;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
if (!dev->send_agent[port_num - 1][0])
return;
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.dlid = lid;
- ah_attr.sl = sl;
- ah_attr.port_num = port_num;
+ ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
+ rdma_ah_set_dlid(&ah_attr, lid);
+ rdma_ah_set_sl(&ah_attr, sl);
+ rdma_ah_set_port_num(&ah_attr, port_num);
- new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
- &ah_attr);
+ new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
+ &ah_attr, 0);
if (IS_ERR(new_ah))
return;
spin_lock_irqsave(&dev->sm_lock, flags);
if (dev->sm_ah[port_num - 1])
- ib_destroy_ah(dev->sm_ah[port_num - 1]);
+ rdma_destroy_ah(dev->sm_ah[port_num - 1], 0);
dev->sm_ah[port_num - 1] = new_ah;
spin_unlock_irqrestore(&dev->sm_lock, flags);
}
@@ -153,13 +155,14 @@ static void node_desc_override(struct ib_device *dev,
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
mutex_lock(&to_mdev(dev)->cap_mask_mutex);
- memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
mutex_unlock(&to_mdev(dev)->cap_mask_mutex);
}
}
static void forward_trap(struct mthca_dev *dev,
- u8 port_num,
+ u32 port_num,
const struct ib_mad *mad)
{
int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
@@ -193,30 +196,19 @@ static void forward_trap(struct mthca_dev *dev,
}
}
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int err;
- u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+ u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
- slid == 0) {
- forward_trap(to_mdev(ibdev), port_num, in_mad);
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) {
+ forward_trap(to_mdev(ibdev), port_num, in);
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
@@ -226,40 +218,39 @@ int mthca_process_mad(struct ib_device *ibdev,
* Only handle PMA and Mellanox vendor-specific class gets and
* sets for other classes.
*/
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS;
/*
* Don't process SMInfo queries or vendor-specific
* MADs -- the SMA can't handle them.
*/
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
- ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+ ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
IB_SMP_ATTR_VENDOR_MASK))
return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS;
} else
return IB_MAD_RESULT_SUCCESS;
- if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ in->mad_hdr.method == IB_MGMT_METHOD_SET &&
+ in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
- prev_lid = pattr.lid;
+ prev_lid = ib_lid_cpu16(pattr.lid);
- err = mthca_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
+ err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS;
else if (err) {
@@ -267,16 +258,16 @@ int mthca_process_mad(struct ib_device *ibdev,
return IB_MAD_RESULT_FAILURE;
}
- if (!out_mad->mad_hdr.status) {
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
- node_desc_override(ibdev, out_mad);
+ if (!out->mad_hdr.status) {
+ smp_snoop(ibdev, port_num, in, prev_lid);
+ node_desc_override(ibdev, out);
}
/* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
@@ -344,6 +335,7 @@ void mthca_free_agents(struct mthca_dev *dev)
}
if (dev->sm_ah[p])
- ib_destroy_ah(dev->sm_ah[p]);
+ rdma_destroy_ah(dev->sm_ah[p],
+ RDMA_DESTROY_AH_SLEEPABLE);
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index ded76c101dde..1ab268b77096 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -49,7 +49,6 @@
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
@@ -383,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
struct mthca_init_hca_param *init_hca,
u64 icm_size)
{
- u64 aux_pages;
+ u64 aux_pages = 0;
int err;
err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
@@ -474,11 +473,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
goto err_unmap_eqp;
}
- mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
- dev_lim->cqc_entry_sz,
- mdev->limits.num_cqs,
- mdev->limits.reserved_cqs,
- 0, 0);
+ mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
+ dev_lim->cqc_entry_sz,
+ mdev->limits.num_cqs,
+ mdev->limits.reserved_cqs,
+ 0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
@@ -851,20 +850,18 @@ err_uar_table_free:
static int mthca_enable_msi_x(struct mthca_dev *mdev)
{
- struct msix_entry entries[3];
int err;
- entries[0].entry = 0;
- entries[1].entry = 1;
- entries[2].entry = 2;
-
- err = pci_enable_msix_exact(mdev->pdev, entries, ARRAY_SIZE(entries));
- if (err)
+ err = pci_alloc_irq_vectors(mdev->pdev, 3, 3, PCI_IRQ_MSIX);
+ if (err < 0)
return err;
- mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
- mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
- mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
+ mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector =
+ pci_irq_vector(mdev->pdev, 0);
+ mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector =
+ pci_irq_vector(mdev->pdev, 1);
+ mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector =
+ pci_irq_vector(mdev->pdev, 2);
return 0;
}
@@ -940,31 +937,16 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
- goto err_free_res;
- }
- }
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
- "consistent PCI DMA mask.\n");
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
- "aborting.\n");
- goto err_free_res;
- }
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ goto err_free_res;
}
/* We can handle large RDMA requests, so allow larger segments. */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
+ mdev = ib_alloc_device(mthca_dev, ib_dev);
if (!mdev) {
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
@@ -989,7 +971,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_dev;
}
- if (mthca_cmd_init(mdev)) {
+ err = mthca_cmd_init(mdev);
+ if (err) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
@@ -1017,8 +1000,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
err = mthca_setup_hca(mdev);
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
- if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
@@ -1062,7 +1044,7 @@ err_cleanup:
err_close:
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
mthca_close_hca(mdev);
@@ -1113,7 +1095,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
mthca_cmd_cleanup(mdev);
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
ib_dealloc_device(&mdev->ib_dev);
pci_release_regions(pdev);
@@ -1164,7 +1146,7 @@ static void mthca_remove_one(struct pci_dev *pdev)
mutex_unlock(&mthca_device_mutex);
}
-static struct pci_device_id mthca_pci_table[] = {
+static const struct pci_device_id mthca_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 7d2e42dd6926..f2734a5c5f26 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -58,7 +58,7 @@ struct mthca_user_db_table {
u64 uvirt;
struct scatterlist mem;
int refcount;
- } page[0];
+ } page[];
};
static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
@@ -66,8 +66,8 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
int i;
if (chunk->nsg > 0)
- pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
+ DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]),
@@ -184,9 +184,10 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
if (coherent)
++chunk->nsg;
else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg =
+ dma_map_sg(&dev->pdev->dev, chunk->mem,
+ chunk->npages,
+ DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
@@ -204,9 +205,8 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
}
if (!coherent && chunk) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem,
+ chunk->npages, DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
@@ -367,7 +367,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
- table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
+ table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL);
if (!table)
return NULL;
@@ -472,25 +472,27 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- pages, NULL);
+ ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
+ FOLL_WRITE | FOLL_LONGTERM, pages);
if (ret < 0)
goto out;
sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
uaddr & ~PAGE_MASK);
- ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+ ret = dma_map_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
+ DMA_TO_DEVICE);
if (ret < 0) {
- put_page(pages[0]);
+ unpin_user_page(pages[0]);
goto out;
}
ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
mthca_uarc_virt(dev, uar, i));
if (ret) {
- pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_page(sg_page(&db_tab->page[i].mem));
+ dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
+ DMA_TO_DEVICE);
+ unpin_user_page(sg_page(&db_tab->page[i].mem));
goto out;
}
@@ -530,7 +532,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
return NULL;
npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
- db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
+ db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL);
if (!db_tab)
return ERR_PTR(-ENOMEM);
@@ -555,8 +557,9 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
- pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_page(sg_page(&db_tab->page[i].mem));
+ dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
+ DMA_TO_DEVICE);
+ unpin_user_page(sg_page(&db_tab->page[i].mem));
}
}
@@ -624,13 +627,13 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
+ MTHCA_ICM_PAGE_SIZE, &page->mapping,
+ GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
}
- memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i));
@@ -714,9 +717,9 @@ int mthca_init_db_tab(struct mthca_dev *dev)
dev->db_tab->max_group1 = 0;
dev->db_tab->min_group2 = dev->db_tab->npages - 1;
- dev->db_tab->page = kmalloc(dev->db_tab->npages *
- sizeof *dev->db_tab->page,
- GFP_KERNEL);
+ dev->db_tab->page = kmalloc_array(dev->db_tab->npages,
+ sizeof(*dev->db_tab->page),
+ GFP_KERNEL);
if (!dev->db_tab->page) {
kfree(dev->db_tab);
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index da9b8f9b884f..61d5bbba293a 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -68,7 +68,7 @@ struct mthca_icm_table {
int lowmem;
int coherent;
struct mutex mutex;
- struct mthca_icm *icm[0];
+ struct mthca_icm *icm[] __counted_by(num_icm);
};
struct mthca_icm_iter {
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index ed9a989e501b..dacb8ceeebe0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -60,7 +60,7 @@ struct mthca_mpt_entry {
__be64 mtt_seg;
__be32 mtt_sz; /* Arbel only */
u32 reserved[2];
-} __attribute__((packed));
+} __packed;
#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MTHCA_MPT_FLAG_MIO (1 << 17)
@@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
return -1;
found:
- clear_bit(seg, buddy->bits[o]);
+ __clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
- set_bit(seg ^ 1, buddy->bits[o]);
+ __set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
@@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
- clear_bit(seg ^ 1, buddy->bits[order]);
+ __clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
- set_bit(seg, buddy->bits[order]);
+ __set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
@@ -139,12 +139,12 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
{
- int i, s;
+ int i;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
@@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
+ buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i),
+ GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
- bitmap_zero(buddy->bits[i],
- 1 << (buddy->max_order - i));
}
- set_bit(0, buddy->bits[buddy->max_order]);
+ __set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@@ -469,8 +467,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
- memset(&mpt_entry->lkey, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
+ memset_startat(mpt_entry, 0, lkey);
if (mr->mtt)
mpt_entry->mtt_seg =
@@ -541,7 +538,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
return err;
}
-/* Free mr or fmr */
+/* Free mr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
@@ -564,266 +561,6 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
mthca_free_mtt(dev, mr->mtt);
}
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *mr)
-{
- struct mthca_mpt_entry *mpt_entry;
- struct mthca_mailbox *mailbox;
- u64 mtt_seg;
- u32 key, idx;
- int list_len = mr->attr.max_pages;
- int err = -ENOMEM;
- int i;
-
- if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
- return -EINVAL;
-
- /* For Arbel, all MTTs must fit in the same page. */
- if (mthca_is_memfree(dev) &&
- mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
- return -EINVAL;
-
- mr->maps = 0;
-
- key = mthca_alloc(&dev->mr_table.mpt_alloc);
- if (key == -1)
- return -ENOMEM;
- key = adjust_key(dev, key);
-
- idx = key & (dev->limits.num_mpts - 1);
- mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
-
- if (mthca_is_memfree(dev)) {
- err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
- if (err)
- goto err_out_mpt_free;
-
- mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
- BUG_ON(!mr->mem.arbel.mpt);
- } else
- mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
- sizeof *(mr->mem.tavor.mpt) * idx;
-
- mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
- if (IS_ERR(mr->mtt)) {
- err = PTR_ERR(mr->mtt);
- goto err_out_table;
- }
-
- mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
-
- if (mthca_is_memfree(dev)) {
- mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
- mr->mtt->first_seg,
- &mr->mem.arbel.dma_handle);
- BUG_ON(!mr->mem.arbel.mtts);
- } else
- mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
-
- mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- goto err_out_free_mtt;
- }
-
- mpt_entry = mailbox->buf;
-
- mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
- MTHCA_MPT_FLAG_MIO |
- MTHCA_MPT_FLAG_REGION |
- access);
-
- mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
- mpt_entry->key = cpu_to_be32(key);
- mpt_entry->pd = cpu_to_be32(pd);
- memset(&mpt_entry->start, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
- mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
-
- if (0) {
- mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
- for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
- if (i % 4 == 0)
- printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
- if ((i + 1) % 4 == 0)
- printk("\n");
- }
- }
-
- err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1));
- if (err) {
- mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
- goto err_out_mailbox_free;
- }
-
- mthca_free_mailbox(dev, mailbox);
- return 0;
-
-err_out_mailbox_free:
- mthca_free_mailbox(dev, mailbox);
-
-err_out_free_mtt:
- mthca_free_mtt(dev, mr->mtt);
-
-err_out_table:
- mthca_table_put(dev, dev->mr_table.mpt_table, key);
-
-err_out_mpt_free:
- mthca_free(&dev->mr_table.mpt_alloc, key);
- return err;
-}
-
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (fmr->maps)
- return -EBUSY;
-
- mthca_free_region(dev, fmr->ibmr.lkey);
- mthca_free_mtt(dev, fmr->mtt);
-
- return 0;
-}
-
-static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
- int list_len, u64 iova)
-{
- int i, page_mask;
-
- if (list_len > fmr->attr.max_pages)
- return -EINVAL;
-
- page_mask = (1 << fmr->attr.page_shift) - 1;
-
- /* We are getting page lists, so va must be page aligned. */
- if (iova & page_mask)
- return -EINVAL;
-
- /* Trust the user not to pass misaligned data in page_list */
- if (0)
- for (i = 0; i < list_len; ++i) {
- if (page_list[i] & ~page_mask)
- return -EINVAL;
- }
-
- if (fmr->maps >= fmr->attr.max_maps)
- return -EINVAL;
-
- return 0;
-}
-
-
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- struct mthca_mpt_entry mpt_entry;
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = tavor_key_to_hw_index(fmr->ibmr.lkey);
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-
- for (i = 0; i < list_len; ++i) {
- __be64 mtt_entry = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
- mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
- }
-
- mpt_entry.lkey = cpu_to_be32(key);
- mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- mpt_entry.start = cpu_to_be64(iova);
-
- __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
- memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
- offsetof(struct mthca_mpt_entry, window_count) -
- offsetof(struct mthca_mpt_entry, start));
-
- writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
-
- return 0;
-}
-
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = arbel_key_to_hw_index(fmr->ibmr.lkey);
- if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- key += SINAI_FMR_KEY_INC;
- else
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-
- wmb();
-
- dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- for (i = 0; i < list_len; ++i)
- fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
-
- dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- fmr->mem.arbel.mpt->key = cpu_to_be32(key);
- fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
- fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
-
- wmb();
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
-
- wmb();
-
- return 0;
-}
-
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-}
-
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-}
-
int mthca_init_mr_table(struct mthca_dev *dev)
{
phys_addr_t addr;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 15d064479ef6..69af65f1b332 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -79,7 +77,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
struct mthca_resource *profile;
int i, j;
- profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
+ profile = kcalloc(MTHCA_RES_NUM, sizeof(*profile), GFP_KERNEL);
if (!profile)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index dc2d48c59e62..dd572d76866c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -37,6 +37,7 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -46,22 +47,14 @@
#include "mthca_dev.h"
#include "mthca_cmd.h"
-#include "mthca_user.h"
+#include <rdma/mthca-abi.h>
#include "mthca_memfree.h"
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
struct mthca_dev *mdev = to_mdev(ibdev);
@@ -77,7 +70,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->fw_ver = mdev->fw_ver;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
@@ -96,8 +89,9 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->page_size_cap = mdev->limits.page_size_cap;
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
props->max_qp_wr = mdev->limits.max_wqes;
- props->max_sge = mdev->limits.max_sg;
- props->max_sge_rd = props->max_sge;
+ props->max_send_sge = mdev->limits.max_sg;
+ props->max_recv_sge = mdev->limits.max_sg;
+ props->max_sge_rd = mdev->limits.max_sg;
props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
props->max_cqe = mdev->limits.max_cqes;
props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
@@ -116,16 +110,6 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- /*
- * If Sinai memory key optimization is being used, then only
- * the 8-bit key portion will change. For other HCAs, the
- * unused index bits will also be used for FMR remapping.
- */
- if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- props->max_map_per_fmr = 255;
- else
- props->max_map_per_fmr =
- (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
err = 0;
out:
@@ -135,10 +119,10 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
}
static int mthca_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
+ u32 port, struct ib_port_attr *props)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -146,9 +130,9 @@ static int mthca_query_port(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- memset(props, 0, sizeof *props);
+ /* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -193,7 +177,8 @@ static int mthca_modify_device(struct ib_device *ibdev,
if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
}
@@ -201,7 +186,7 @@ static int mthca_modify_device(struct ib_device *ibdev,
}
static int mthca_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
+ u32 port, int port_modify_mask,
struct ib_port_modify *props)
{
struct mthca_set_ib_param set_ib;
@@ -211,7 +196,7 @@ static int mthca_modify_port(struct ib_device *ibdev,
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
- err = mthca_query_port(ibdev, port, &attr);
+ err = ib_query_port(ibdev, port, &attr);
if (err)
goto out;
@@ -230,10 +215,10 @@ out:
}
static int mthca_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 *pkey)
+ u32 port, u16 index, u16 *pkey)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -241,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -258,11 +243,11 @@ static int mthca_query_pkey(struct ib_device *ibdev,
return err;
}
-static int mthca_query_gid(struct ib_device *ibdev, u8 port,
+static int mthca_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -270,7 +255,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -281,7 +266,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -298,17 +283,16 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
return err;
}
-static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int mthca_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
{
- struct mthca_alloc_ucontext_resp uresp;
- struct mthca_ucontext *context;
+ struct ib_device *ibdev = uctx->device;
+ struct mthca_alloc_ucontext_resp uresp = {};
+ struct mthca_ucontext *context = to_mucontext(uctx);
int err;
if (!(to_mdev(ibdev)->active))
- return ERR_PTR(-EAGAIN);
-
- memset(&uresp, 0, sizeof uresp);
+ return -EAGAIN;
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
if (mthca_is_memfree(to_mdev(ibdev)))
@@ -316,44 +300,33 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
else
uresp.uarc_size = 0;
- context = kmalloc(sizeof *context, GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
- if (err) {
- kfree(context);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
if (IS_ERR(context->db_tab)) {
err = PTR_ERR(context->db_tab);
mthca_uar_free(to_mdev(ibdev), &context->uar);
- kfree(context);
- return ERR_PTR(err);
+ return err;
}
- if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
mthca_uar_free(to_mdev(ibdev), &context->uar);
- kfree(context);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
context->reg_mr_warned = 0;
- return &context->ibucontext;
+ return 0;
}
-static int mthca_dealloc_ucontext(struct ib_ucontext *context)
+static void mthca_dealloc_ucontext(struct ib_ucontext *context)
{
mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab);
mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
- kfree(to_mucontext(context));
-
- return 0;
}
static int mthca_mmap_uar(struct ib_ucontext *context,
@@ -372,193 +345,151 @@ static int mthca_mmap_uar(struct ib_ucontext *context,
return 0;
}
-static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
- struct mthca_pd *pd;
+ struct ib_device *ibdev = ibpd->device;
+ struct mthca_pd *pd = to_mpd(ibpd);
int err;
- pd = kmalloc(sizeof *pd, GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
- err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
+ err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
+ if (err)
+ return err;
- if (context) {
+ if (udata) {
if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
mthca_pd_free(to_mdev(ibdev), pd);
- kfree(pd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
}
- return &pd->ibpd;
+ return 0;
}
-static int mthca_dealloc_pd(struct ib_pd *pd)
+static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
- kfree(pd);
-
return 0;
}
-static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
-{
- int err;
- struct mthca_ah *ah;
-
- ah = kmalloc(sizeof *ah, GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+static int mthca_ah_create(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
- err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
- if (err) {
- kfree(ah);
- return ERR_PTR(err);
- }
+{
+ struct mthca_ah *ah = to_mah(ibah);
- return &ah->ibah;
+ return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd),
+ init_attr->ah_attr, ah);
}
-static int mthca_ah_destroy(struct ib_ah *ah)
+static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
{
mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
- kfree(ah);
-
return 0;
}
-static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+static int mthca_create_srq(struct ib_srq *ibsrq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct mthca_create_srq ucmd;
- struct mthca_ucontext *context = NULL;
- struct mthca_srq *srq;
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
+ struct mthca_srq *srq = to_msrq(ibsrq);
int err;
if (init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-ENOSYS);
-
- srq = kmalloc(sizeof *srq, GFP_KERNEL);
- if (!srq)
- return ERR_PTR(-ENOMEM);
-
- if (pd->uobject) {
- context = to_mucontext(pd->uobject->context);
+ return -EOPNOTSUPP;
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
- err = -EFAULT;
- goto err_free;
- }
+ if (udata) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
- err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar,
context->db_tab, ucmd.db_index,
ucmd.db_page);
if (err)
- goto err_free;
+ return err;
srq->mr.ibmr.lkey = ucmd.lkey;
srq->db_index = ucmd.db_index;
}
- err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
- &init_attr->attr, srq);
+ err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd),
+ &init_attr->attr, srq, udata);
- if (err && pd->uobject)
- mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
+ if (err && udata)
+ mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar,
context->db_tab, ucmd.db_index);
if (err)
- goto err_free;
+ return err;
- if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
- mthca_free_srq(to_mdev(pd->device), srq);
- err = -EFAULT;
- goto err_free;
+ if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
+ mthca_free_srq(to_mdev(ibsrq->device), srq);
+ return -EFAULT;
}
- return &srq->ibsrq;
-
-err_free:
- kfree(srq);
-
- return ERR_PTR(err);
+ return 0;
}
-static int mthca_destroy_srq(struct ib_srq *srq)
+static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
- struct mthca_ucontext *context;
-
- if (srq->uobject) {
- context = to_mucontext(srq->uobject->context);
+ if (udata) {
+ struct mthca_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mthca_ucontext,
+ ibucontext);
mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
context->db_tab, to_msrq(srq)->db_index);
}
mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
- kfree(srq);
-
return 0;
}
-static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static int mthca_create_qp(struct ib_qp *ibqp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
struct mthca_create_qp ucmd;
- struct mthca_qp *qp;
+ struct mthca_qp *qp = to_mqp(ibqp);
+ struct mthca_dev *dev = to_mdev(ibqp->device);
int err;
if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
+ return -EOPNOTSUPP;
switch (init_attr->qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
{
- struct mthca_ucontext *context;
+ if (udata) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
- qp = kmalloc(sizeof *qp, GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
-
- if (pd->uobject) {
- context = to_mucontext(pd->uobject->context);
-
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
- kfree(qp);
- return ERR_PTR(-EFAULT);
- }
-
- err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ err = mthca_map_user_db(dev, &context->uar,
context->db_tab,
- ucmd.sq_db_index, ucmd.sq_db_page);
- if (err) {
- kfree(qp);
- return ERR_PTR(err);
- }
+ ucmd.sq_db_index,
+ ucmd.sq_db_page);
+ if (err)
+ return err;
- err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ err = mthca_map_user_db(dev, &context->uar,
context->db_tab,
- ucmd.rq_db_index, ucmd.rq_db_page);
+ ucmd.rq_db_index,
+ ucmd.rq_db_page);
if (err) {
- mthca_unmap_user_db(to_mdev(pd->device),
- &context->uar,
+ mthca_unmap_user_db(dev, &context->uar,
context->db_tab,
ucmd.sq_db_index);
- kfree(qp);
- return ERR_PTR(err);
+ return err;
}
qp->mr.ibmr.lkey = ucmd.lkey;
@@ -566,22 +497,16 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
qp->rq.db_index = ucmd.rq_db_index;
}
- err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
+ err = mthca_alloc_qp(dev, to_mpd(ibqp->pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->qp_type, init_attr->sq_sig_type,
- &init_attr->cap, qp);
+ &init_attr->cap, qp, udata);
- if (err && pd->uobject) {
- context = to_mucontext(pd->uobject->context);
-
- mthca_unmap_user_db(to_mdev(pd->device),
- &context->uar,
- context->db_tab,
+ if (err && udata) {
+ mthca_unmap_user_db(dev, &context->uar, context->db_tab,
ucmd.sq_db_index);
- mthca_unmap_user_db(to_mdev(pd->device),
- &context->uar,
- context->db_tab,
+ mthca_unmap_user_db(dev, &context->uar, context->db_tab,
ucmd.rq_db_index);
}
@@ -591,32 +516,28 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
case IB_QPT_SMI:
case IB_QPT_GSI:
{
- /* Don't allow userspace to create special QPs */
- if (pd->uobject)
- return ERR_PTR(-EINVAL);
-
- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
+ if (!qp->sqp)
+ return -ENOMEM;
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
- err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
+ err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->sq_sig_type, &init_attr->cap,
- qp->ibqp.qp_num, init_attr->port_num,
- to_msqp(qp));
+ qp->ibqp.qp_num, init_attr->port_num, qp,
+ udata);
break;
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-ENOSYS);
+ return -EOPNOTSUPP;
}
if (err) {
- kfree(qp);
- return ERR_PTR(err);
+ kfree(qp->sqp);
+ return err;
}
init_attr->cap.max_send_wr = qp->sq.max;
@@ -625,67 +546,72 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
init_attr->cap.max_recv_sge = qp->rq.max_gs;
init_attr->cap.max_inline_data = qp->max_inline_data;
- return &qp->ibqp;
+ return 0;
}
-static int mthca_destroy_qp(struct ib_qp *qp)
+static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
- if (qp->uobject) {
+ if (udata) {
+ struct mthca_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mthca_ucontext,
+ ibucontext);
+
mthca_unmap_user_db(to_mdev(qp->device),
- &to_mucontext(qp->uobject->context)->uar,
- to_mucontext(qp->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mqp(qp)->sq.db_index);
mthca_unmap_user_db(to_mdev(qp->device),
- &to_mucontext(qp->uobject->context)->uar,
- to_mucontext(qp->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mqp(qp)->rq.db_index);
}
mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
- kfree(qp);
+ kfree(to_mqp(qp)->sqp);
return 0;
}
-static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mthca_create_cq(struct ib_cq *ibcq,
+ const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
int entries = attr->cqe;
struct mthca_create_cq ucmd;
struct mthca_cq *cq;
int nent;
int err;
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
if (attr->flags)
- return ERR_PTR(-EINVAL);
+ return -EOPNOTSUPP;
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- if (context) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
- return ERR_PTR(-EFAULT);
+ if (udata) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
- err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab,
- ucmd.set_db_index, ucmd.set_db_page);
+ err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.set_db_index,
+ ucmd.set_db_page);
if (err)
- return ERR_PTR(err);
+ return err;
- err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab,
- ucmd.arm_db_index, ucmd.arm_db_page);
+ err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.arm_db_index,
+ ucmd.arm_db_page);
if (err)
goto err_unmap_set;
}
- cq = kmalloc(sizeof *cq, GFP_KERNEL);
- if (!cq) {
- err = -ENOMEM;
- goto err_unmap_arm;
- }
+ cq = to_mcq(ibcq);
- if (context) {
+ if (udata) {
cq->buf.mr.ibmr.lkey = ucmd.lkey;
cq->set_ci_db_index = ucmd.set_db_index;
cq->arm_db_index = ucmd.arm_db_index;
@@ -694,37 +620,33 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
for (nent = 1; nent <= entries; nent <<= 1)
; /* nothing */
- err = mthca_init_cq(to_mdev(ibdev), nent,
- context ? to_mucontext(context) : NULL,
- context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
+ err = mthca_init_cq(to_mdev(ibdev), nent, context,
+ udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
cq);
if (err)
- goto err_free;
+ goto err_unmap_arm;
- if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
+ if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
mthca_free_cq(to_mdev(ibdev), cq);
err = -EFAULT;
- goto err_free;
+ goto err_unmap_arm;
}
cq->resize_buf = NULL;
- return &cq->ibcq;
-
-err_free:
- kfree(cq);
+ return 0;
err_unmap_arm:
- if (context)
- mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab, ucmd.arm_db_index);
+ if (udata)
+ mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.arm_db_index);
err_unmap_set:
- if (context)
- mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
- to_mucontext(context)->db_tab, ucmd.set_db_index);
+ if (udata)
+ mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
+ context->db_tab, ucmd.set_db_index);
- return ERR_PTR(err);
+ return err;
}
static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
@@ -848,21 +770,25 @@ out:
return ret;
}
-static int mthca_destroy_cq(struct ib_cq *cq)
+static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
- if (cq->uobject) {
+ if (udata) {
+ struct mthca_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mthca_ucontext,
+ ibucontext);
+
mthca_unmap_user_db(to_mdev(cq->device),
- &to_mucontext(cq->uobject->context)->uar,
- to_mucontext(cq->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mcq(cq)->arm_db_index);
mthca_unmap_user_db(to_mdev(cq->device),
- &to_mucontext(cq->uobject->context)->uar,
- to_mucontext(cq->uobject->context)->db_tab,
+ &context->uar,
+ context->db_tab,
to_mcq(cq)->set_ci_db_index);
}
mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
- kfree(cq);
-
return 0;
}
@@ -898,109 +824,31 @@ static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
return &mr->ibmr;
}
-static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf,
- int acc,
- u64 *iova_start)
-{
- struct mthca_mr *mr;
- u64 *page_list;
- u64 total_size;
- unsigned long mask;
- int shift;
- int npages;
- int err;
- int i, j, n;
-
- mask = buffer_list[0].addr ^ *iova_start;
- total_size = 0;
- for (i = 0; i < num_phys_buf; ++i) {
- if (i != 0)
- mask |= buffer_list[i].addr;
- if (i != num_phys_buf - 1)
- mask |= buffer_list[i].addr + buffer_list[i].size;
-
- total_size += buffer_list[i].size;
- }
-
- if (mask & ~PAGE_MASK)
- return ERR_PTR(-EINVAL);
-
- shift = __ffs(mask | 1 << 31);
-
- buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
- buffer_list[0].addr &= ~0ull << shift;
-
- mr = kmalloc(sizeof *mr, GFP_KERNEL);
- if (!mr)
- return ERR_PTR(-ENOMEM);
-
- npages = 0;
- for (i = 0; i < num_phys_buf; ++i)
- npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
-
- if (!npages)
- return &mr->ibmr;
-
- page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
- if (!page_list) {
- kfree(mr);
- return ERR_PTR(-ENOMEM);
- }
-
- n = 0;
- for (i = 0; i < num_phys_buf; ++i)
- for (j = 0;
- j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
- ++j)
- page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
-
- mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
- "in PD %x; shift %d, npages %d.\n",
- (unsigned long long) buffer_list[0].addr,
- (unsigned long long) *iova_start,
- to_mpd(pd)->pd_num,
- shift, npages);
-
- err = mthca_mr_alloc_phys(to_mdev(pd->device),
- to_mpd(pd)->pd_num,
- page_list, shift, npages,
- *iova_start, total_size,
- convert_access(acc), mr);
-
- if (err) {
- kfree(page_list);
- kfree(mr);
- return ERR_PTR(err);
- }
-
- kfree(page_list);
- mr->umem = NULL;
-
- return &mr->ibmr;
-}
-
static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
- struct scatterlist *sg;
+ struct ib_block_iter biter;
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
struct mthca_mr *mr;
struct mthca_reg_mr ucmd;
u64 *pages;
- int shift, n, len;
- int i, k, entry;
+ int n, i;
int err = 0;
int write_mtt_size;
- if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
- if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (udata->inlen < sizeof ucmd) {
+ if (!context->reg_mr_warned) {
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
current->comm);
mthca_warn(dev, " Update libmthca to fix this.\n");
}
- ++to_mucontext(pd->uobject->context)->reg_mr_warned;
+ ++context->reg_mr_warned;
ucmd.mr_attrs = 0;
} else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
@@ -1009,16 +857,13 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
-
+ mr->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
}
- shift = ffs(mr->umem->page_size) - 1;
- n = mr->umem->nmap;
+ n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE);
mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) {
@@ -1036,22 +881,19 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
- for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) +
- mr->umem->page_size * k;
- /*
- * Be friendly to write_mtt and pass it chunks
- * of appropriate size.
- */
- if (i == write_mtt_size) {
- err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
- if (err)
- goto mtt_done;
- n += i;
- i = 0;
- }
+ rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) {
+ pages[i++] = rdma_block_iter_dma_address(&biter);
+
+ /*
+ * Be friendly to write_mtt and pass it chunks
+ * of appropriate size.
+ */
+ if (i == write_mtt_size) {
+ err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
+ if (err)
+ goto mtt_done;
+ n += i;
+ i = 0;
}
}
@@ -1062,7 +904,7 @@ mtt_done:
if (err)
goto err_mtt;
- err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
+ err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
convert_access(acc), mr);
if (err)
@@ -1081,143 +923,79 @@ err:
return ERR_PTR(err);
}
-static int mthca_dereg_mr(struct ib_mr *mr)
+static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
{
struct mthca_mr *mmr = to_mmr(mr);
mthca_free_mr(to_mdev(mr->device), mmr);
- if (mmr->umem)
- ib_umem_release(mmr->umem);
+ ib_umem_release(mmr->umem);
kfree(mmr);
return 0;
}
-static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mthca_fmr *fmr;
- int err;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
- err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
- convert_access(mr_access_flags), fmr);
-
- if (err) {
- kfree(fmr);
- return ERR_PTR(err);
- }
-
- return &fmr->ibmr;
-}
-
-static int mthca_dealloc_fmr(struct ib_fmr *fmr)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct mthca_fmr *mfmr = to_mfmr(fmr);
- int err;
-
- err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
- if (err)
- return err;
+ struct mthca_dev *dev =
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
- kfree(mfmr);
- return 0;
+ return sysfs_emit(buf, "%x\n", dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static int mthca_unmap_fmr(struct list_head *fmr_list)
+static const char *hca_type_string(int hca_type)
{
- struct ib_fmr *fmr;
- int err;
- struct mthca_dev *mdev = NULL;
-
- list_for_each_entry(fmr, fmr_list, list) {
- if (mdev && to_mdev(fmr->device) != mdev)
- return -EINVAL;
- mdev = to_mdev(fmr->device);
+ switch (hca_type) {
+ case PCI_DEVICE_ID_MELLANOX_TAVOR:
+ return "MT23108";
+ case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
+ return "MT25208 (MT23108 compat mode)";
+ case PCI_DEVICE_ID_MELLANOX_ARBEL:
+ return "MT25208";
+ case PCI_DEVICE_ID_MELLANOX_SINAI:
+ case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
+ return "MT25204";
}
- if (!mdev)
- return 0;
-
- if (mthca_is_memfree(mdev)) {
- list_for_each_entry(fmr, fmr_list, list)
- mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
-
- wmb();
- } else
- list_for_each_entry(fmr, fmr_list, list)
- mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
-
- err = mthca_SYNC_TPT(mdev);
- return err;
+ return "unknown";
}
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
- return sprintf(buf, "%x\n", dev->rev_id);
-}
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
- (int) (dev->fw_ver >> 16) & 0xffff,
- (int) dev->fw_ver & 0xffff);
+ return sysfs_emit(buf, "%s\n", hca_type_string(dev->pdev->device));
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
- switch (dev->pdev->device) {
- case PCI_DEVICE_ID_MELLANOX_TAVOR:
- return sprintf(buf, "MT23108\n");
- case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
- return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
- case PCI_DEVICE_ID_MELLANOX_ARBEL:
- return sprintf(buf, "MT25208\n");
- case PCI_DEVICE_ID_MELLANOX_SINAI:
- case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
- return sprintf(buf, "MT25204\n");
- default:
- return sprintf(buf, "unknown\n");
- }
-}
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
- return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
+ return sysfs_emit(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mthca_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mthca_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mthca_attr_group = {
+ .attrs = mthca_dev_attributes,
};
static int mthca_init_node_data(struct mthca_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -1225,7 +1003,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mthca_MAD_IFC(dev, 1, 1,
@@ -1233,7 +1011,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
if (err)
goto out;
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+ memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
@@ -1252,144 +1030,147 @@ out:
return err;
}
-static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mthca_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
int err;
- err = mthca_query_port(ibdev, port_num, &attr);
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+
+ err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
+static void get_dev_fw_str(struct ib_device *device, char *str)
+{
+ struct mthca_dev *dev =
+ container_of(device, struct mthca_dev, ib_dev);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
+ (int) (dev->fw_ver >> 32),
+ (int) (dev->fw_ver >> 16) & 0xffff,
+ (int) dev->fw_ver & 0xffff);
+}
+
+static const struct ib_device_ops mthca_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_MTHCA,
+ .uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION,
+ .uverbs_no_driver_id_binding = 1,
+
+ .alloc_pd = mthca_alloc_pd,
+ .alloc_ucontext = mthca_alloc_ucontext,
+ .attach_mcast = mthca_multicast_attach,
+ .create_ah = mthca_ah_create,
+ .create_cq = mthca_create_cq,
+ .create_qp = mthca_create_qp,
+ .dealloc_pd = mthca_dealloc_pd,
+ .dealloc_ucontext = mthca_dealloc_ucontext,
+ .dereg_mr = mthca_dereg_mr,
+ .destroy_ah = mthca_ah_destroy,
+ .destroy_cq = mthca_destroy_cq,
+ .destroy_qp = mthca_destroy_qp,
+ .detach_mcast = mthca_multicast_detach,
+ .device_group = &mthca_attr_group,
+ .get_dev_fw_str = get_dev_fw_str,
+ .get_dma_mr = mthca_get_dma_mr,
+ .get_port_immutable = mthca_port_immutable,
+ .mmap = mthca_mmap_uar,
+ .modify_device = mthca_modify_device,
+ .modify_port = mthca_modify_port,
+ .modify_qp = mthca_modify_qp,
+ .poll_cq = mthca_poll_cq,
+ .process_mad = mthca_process_mad,
+ .query_ah = mthca_ah_query,
+ .query_device = mthca_query_device,
+ .query_gid = mthca_query_gid,
+ .query_pkey = mthca_query_pkey,
+ .query_port = mthca_query_port,
+ .query_qp = mthca_query_qp,
+ .reg_user_mr = mthca_reg_user_mr,
+ .resize_cq = mthca_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
+ .create_srq = mthca_create_srq,
+ .destroy_srq = mthca_destroy_srq,
+ .modify_srq = mthca_modify_srq,
+ .post_srq_recv = mthca_arbel_post_srq_recv,
+ .query_srq = mthca_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
+};
+
+static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
+ .create_srq = mthca_create_srq,
+ .destroy_srq = mthca_destroy_srq,
+ .modify_srq = mthca_modify_srq,
+ .post_srq_recv = mthca_tavor_post_srq_recv,
+ .query_srq = mthca_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
+};
+
+static const struct ib_device_ops mthca_dev_arbel_ops = {
+ .post_recv = mthca_arbel_post_receive,
+ .post_send = mthca_arbel_post_send,
+ .req_notify_cq = mthca_arbel_arm_cq,
+};
+
+static const struct ib_device_ops mthca_dev_tavor_ops = {
+ .post_recv = mthca_tavor_post_receive,
+ .post_send = mthca_tavor_post_send,
+ .req_notify_cq = mthca_tavor_arm_cq,
+};
+
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
- int i;
ret = mthca_init_node_data(dev);
if (ret)
return ret;
- strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
- dev->ib_dev.owner = THIS_MODULE;
-
- dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
- dev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.num_comp_vectors = 1;
- dev->ib_dev.dma_device = &dev->pdev->dev;
- dev->ib_dev.query_device = mthca_query_device;
- dev->ib_dev.query_port = mthca_query_port;
- dev->ib_dev.modify_device = mthca_modify_device;
- dev->ib_dev.modify_port = mthca_modify_port;
- dev->ib_dev.query_pkey = mthca_query_pkey;
- dev->ib_dev.query_gid = mthca_query_gid;
- dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
- dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
- dev->ib_dev.mmap = mthca_mmap_uar;
- dev->ib_dev.alloc_pd = mthca_alloc_pd;
- dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
- dev->ib_dev.create_ah = mthca_ah_create;
- dev->ib_dev.query_ah = mthca_ah_query;
- dev->ib_dev.destroy_ah = mthca_ah_destroy;
+ dev->ib_dev.dev.parent = &dev->pdev->dev;
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
- dev->ib_dev.create_srq = mthca_create_srq;
- dev->ib_dev.modify_srq = mthca_modify_srq;
- dev->ib_dev.query_srq = mthca_query_srq;
- dev->ib_dev.destroy_srq = mthca_destroy_srq;
- dev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
-
if (mthca_is_memfree(dev))
- dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
+ ib_set_device_ops(&dev->ib_dev,
+ &mthca_dev_arbel_srq_ops);
else
- dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
+ ib_set_device_ops(&dev->ib_dev,
+ &mthca_dev_tavor_srq_ops);
}
- dev->ib_dev.create_qp = mthca_create_qp;
- dev->ib_dev.modify_qp = mthca_modify_qp;
- dev->ib_dev.query_qp = mthca_query_qp;
- dev->ib_dev.destroy_qp = mthca_destroy_qp;
- dev->ib_dev.create_cq = mthca_create_cq;
- dev->ib_dev.resize_cq = mthca_resize_cq;
- dev->ib_dev.destroy_cq = mthca_destroy_cq;
- dev->ib_dev.poll_cq = mthca_poll_cq;
- dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
- dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
- dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
- dev->ib_dev.dereg_mr = mthca_dereg_mr;
- dev->ib_dev.get_port_immutable = mthca_port_immutable;
-
- if (dev->mthca_flags & MTHCA_FLAG_FMR) {
- dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
- dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
- dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
- if (mthca_is_memfree(dev))
- dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
- else
- dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
- }
+ ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
- dev->ib_dev.attach_mcast = mthca_multicast_attach;
- dev->ib_dev.detach_mcast = mthca_multicast_detach;
- dev->ib_dev.process_mad = mthca_process_mad;
-
- if (mthca_is_memfree(dev)) {
- dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
- dev->ib_dev.post_send = mthca_arbel_post_send;
- dev->ib_dev.post_recv = mthca_arbel_post_receive;
- } else {
- dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
- dev->ib_dev.post_send = mthca_tavor_post_send;
- dev->ib_dev.post_recv = mthca_tavor_post_receive;
- }
+ if (mthca_is_memfree(dev))
+ ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops);
+ else
+ ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops);
mutex_init(&dev->cap_mask_mutex);
- ret = ib_register_device(&dev->ib_dev, NULL);
+ ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
- ret = device_create_file(&dev->ib_dev.dev,
- mthca_dev_attributes[i]);
- if (ret) {
- ib_unregister_device(&dev->ib_dev);
- return ret;
- }
- }
-
mthca_start_catas_poll(dev);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 596acc45569b..8a77483bb33c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -76,24 +76,6 @@ struct mthca_mr {
struct mthca_mtt *mtt;
};
-struct mthca_fmr {
- struct ib_fmr ibmr;
- struct ib_fmr_attr attr;
- struct mthca_mtt *mtt;
- int maps;
- union {
- struct {
- struct mthca_mpt_entry __iomem *mpt;
- u64 __iomem *mtts;
- } tavor;
- struct {
- struct mthca_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- } arbel;
- } mem;
-};
-
struct mthca_pd {
struct ib_pd ibpd;
u32 pd_num;
@@ -258,6 +240,16 @@ struct mthca_wq {
__be32 *db;
};
+struct mthca_sqp {
+ int pkey_index;
+ u32 qkey;
+ u32 send_psn;
+ struct ib_ud_header ud_header;
+ int header_buf_size;
+ void *header_buf;
+ dma_addr_t header_dma;
+};
+
struct mthca_qp {
struct ib_qp ibqp;
int refcount;
@@ -283,17 +275,7 @@ struct mthca_qp {
wait_queue_head_t wait;
struct mutex mutex;
-};
-
-struct mthca_sqp {
- struct mthca_qp qp;
- int pkey_index;
- u32 qkey;
- u32 send_psn;
- struct ib_ud_header ud_header;
- int header_buf_size;
- void *header_buf;
- dma_addr_t header_dma;
+ struct mthca_sqp *sqp;
};
static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -301,11 +283,6 @@ static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext
return container_of(ibucontext, struct mthca_ucontext, ibucontext);
}
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
-{
- return container_of(ibmr, struct mthca_fmr, ibmr);
-}
-
static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct mthca_mr, ibmr);
@@ -336,9 +313,4 @@ static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
return container_of(ibqp, struct mthca_qp, ibqp);
}
-static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
-{
- return container_of(qp, struct mthca_sqp, qp);
-}
-
#endif /* MTHCA_PROVIDER_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index e354b2f04ad9..53f43649f7d0 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -42,6 +42,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
+#include <rdma/uverbs_ioctl.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
@@ -114,7 +115,7 @@ struct mthca_qp_path {
u8 hop_limit;
__be32 sl_tclass_flowlabel;
u8 rgid[16];
-} __attribute__((packed));
+} __packed;
struct mthca_qp_context {
__be32 flags;
@@ -153,14 +154,14 @@ struct mthca_qp_context {
__be16 rq_wqe_counter; /* reserved on Tavor */
__be16 sq_wqe_counter; /* reserved on Tavor */
u32 reserved3[18];
-} __attribute__((packed));
+} __packed;
struct mthca_qp_param {
__be32 opt_param_mask;
u32 reserved1;
struct mthca_qp_context context;
u32 reserved2[62];
-} __attribute__((packed));
+} __packed;
enum {
MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
@@ -393,31 +394,36 @@ static int to_ib_qp_access_flags(int mthca_flags)
return ib_flags;
}
-static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
- struct mthca_qp_path *path)
+static void to_rdma_ah_attr(struct mthca_dev *dev,
+ struct rdma_ah_attr *ah_attr,
+ struct mthca_qp_path *path)
{
- memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
- ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+ u8 port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
- if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
- return;
+ memset(ah_attr, 0, sizeof(*ah_attr));
- ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
- ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
- ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
- path->static_rate & 0xf,
- ib_ah_attr->port_num);
- ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
- if (ib_ah_attr->ah_flags) {
- ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
- ib_ah_attr->grh.hop_limit = path->hop_limit;
- ib_ah_attr->grh.traffic_class =
- (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
- ib_ah_attr->grh.flow_label =
- be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
- memcpy(ib_ah_attr->grh.dgid.raw,
- path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+ if (port_num == 0 || port_num > dev->limits.num_ports)
+ return;
+ ah_attr->type = rdma_ah_find_type(&dev->ib_dev, port_num);
+ rdma_ah_set_port_num(ah_attr, port_num);
+
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
+ rdma_ah_set_sl(ah_attr, be32_to_cpu(path->sl_tclass_flowlabel) >> 28);
+ rdma_ah_set_path_bits(ah_attr, path->g_mylmc & 0x7f);
+ rdma_ah_set_static_rate(ah_attr,
+ mthca_rate_to_ib(dev,
+ path->static_rate & 0xf,
+ port_num));
+ if (path->g_mylmc & (1 << 7)) {
+ u32 tc_fl = be32_to_cpu(path->sl_tclass_flowlabel);
+
+ rdma_ah_set_grh(ah_attr, NULL,
+ tc_fl & 0xfffff,
+ path->mgid_index &
+ (dev->limits.gid_table_len - 1),
+ path->hop_limit,
+ (tc_fl >> 20) & 0xff);
+ rdma_ah_set_dgid_raw(ah_attr, path->rgid);
}
}
@@ -468,11 +474,12 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
to_ib_qp_access_flags(be32_to_cpu(context->params2));
if (qp->transport == RC || qp->transport == UC) {
- to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
- to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+ to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+ to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
qp_attr->alt_pkey_index =
be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
- qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ qp_attr->alt_port_num =
+ rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
}
qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
@@ -512,40 +519,50 @@ out:
return err;
}
-static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
+static int mthca_path_set(struct mthca_dev *dev, const struct rdma_ah_attr *ah,
struct mthca_qp_path *path, u8 port)
{
- path->g_mylmc = ah->src_path_bits & 0x7f;
- path->rlid = cpu_to_be16(ah->dlid);
- path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
+ path->g_mylmc = rdma_ah_get_path_bits(ah) & 0x7f;
+ path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
+ path->static_rate = mthca_get_rate(dev, rdma_ah_get_static_rate(ah),
+ port);
+
+ if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah);
- if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
+ if (grh->sgid_index >= dev->limits.gid_table_len) {
mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index, dev->limits.gid_table_len-1);
+ grh->sgid_index,
+ dev->limits.gid_table_len - 1);
return -1;
}
path->g_mylmc |= 1 << 7;
- path->mgid_index = ah->grh.sgid_index;
- path->hop_limit = ah->grh.hop_limit;
+ path->mgid_index = grh->sgid_index;
+ path->hop_limit = grh->hop_limit;
path->sl_tclass_flowlabel =
- cpu_to_be32((ah->sl << 28) |
- (ah->grh.traffic_class << 20) |
- (ah->grh.flow_label));
- memcpy(path->rgid, ah->grh.dgid.raw, 16);
- } else
- path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
+ cpu_to_be32((rdma_ah_get_sl(ah) << 28) |
+ (grh->traffic_class << 20) |
+ (grh->flow_label));
+ memcpy(path->rgid, grh->dgid.raw, 16);
+ } else {
+ path->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah) <<
+ 28);
+ }
return 0;
}
static int __mthca_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
struct mthca_mailbox *mailbox;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
@@ -607,8 +624,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
/* leave arbel_sched_queue as 0 */
if (qp->ibqp.uobject)
- qp_context->usr_page =
- cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
+ qp_context->usr_page = cpu_to_be32(context->uar.index);
else
qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
qp_context->local_qpn = cpu_to_be32(qp->qpn);
@@ -680,7 +696,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
}
if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
- attr->alt_ah_attr.port_num))
+ rdma_ah_get_port_num(&attr->alt_ah_attr)))
goto out_mailbox;
qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
@@ -793,7 +809,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp))
- store_attrs(to_msqp(qp), attr, attr_mask);
+ store_attrs(qp->sqp, attr, attr_mask);
/*
* If we moved QP0 to RTR, bring the IB link up; if we moved
@@ -847,6 +863,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
mutex_lock(&qp->mutex);
if (attr_mask & IB_QP_CUR_STATE) {
cur_state = attr->cur_qp_state;
@@ -860,8 +879,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_UNSPECIFIED)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state,
@@ -901,7 +920,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
goto out;
}
- err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+ err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state,
+ udata);
out:
mutex_unlock(&qp->mutex);
@@ -969,7 +989,8 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev,
*/
static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
struct mthca_pd *pd,
- struct mthca_qp *qp)
+ struct mthca_qp *qp,
+ struct ib_udata *udata)
{
int size;
int err = -ENOMEM;
@@ -1036,14 +1057,14 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
* allocate anything. All we need is to calculate the WQE
* sizes and the send_wqe_offset, so we're done now.
*/
- if (pd->ibpd.uobject)
+ if (udata)
return 0;
size = PAGE_ALIGN(qp->send_wqe_offset +
(qp->sq.max << qp->sq.wqe_shift));
- qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
- GFP_KERNEL);
+ qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64),
+ GFP_KERNEL);
if (!qp->wrid)
goto err_out;
@@ -1143,7 +1164,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_sig_type send_policy,
- struct mthca_qp *qp)
+ struct mthca_qp *qp,
+ struct ib_udata *udata)
{
int ret;
int i;
@@ -1166,7 +1188,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
if (ret)
return ret;
- ret = mthca_alloc_wqe_buf(dev, pd, qp);
+ ret = mthca_alloc_wqe_buf(dev, pd, qp, udata);
if (ret) {
mthca_unmap_memfree(dev, qp);
return ret;
@@ -1179,7 +1201,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
* will be allocated and buffers will be initialized in
* userspace.
*/
- if (pd->ibpd.uobject)
+ if (udata)
return 0;
ret = mthca_alloc_memfree(dev, qp);
@@ -1273,7 +1295,8 @@ int mthca_alloc_qp(struct mthca_dev *dev,
enum ib_qp_type type,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
- struct mthca_qp *qp)
+ struct mthca_qp *qp,
+ struct ib_udata *udata)
{
int err;
@@ -1296,7 +1319,7 @@ int mthca_alloc_qp(struct mthca_dev *dev,
qp->port = 0;
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
- send_policy, qp);
+ send_policy, qp, udata);
if (err) {
mthca_free(&dev->qp_table.alloc, qp->qpn);
return err;
@@ -1347,39 +1370,41 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
int qpn,
- int port,
- struct mthca_sqp *sqp)
+ u32 port,
+ struct mthca_qp *qp,
+ struct ib_udata *udata)
{
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err;
- sqp->qp.transport = MLX;
- err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
+ qp->transport = MLX;
+ err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;
- sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
- sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
- &sqp->header_dma, GFP_KERNEL);
- if (!sqp->header_buf)
+ qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE;
+ qp->sqp->header_buf =
+ dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ &qp->sqp->header_dma, GFP_KERNEL);
+ if (!qp->sqp->header_buf)
return -ENOMEM;
spin_lock_irq(&dev->qp_table.lock);
if (mthca_array_get(&dev->qp_table.qp, mqpn))
err = -EBUSY;
else
- mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
+ mthca_array_set(&dev->qp_table.qp, mqpn, qp);
spin_unlock_irq(&dev->qp_table.lock);
if (err)
goto err_out;
- sqp->qp.port = port;
- sqp->qp.qpn = mqpn;
- sqp->qp.transport = MLX;
+ qp->port = port;
+ qp->qpn = mqpn;
+ qp->transport = MLX;
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
- send_policy, &sqp->qp);
+ send_policy, qp, udata);
if (err)
goto err_out_free;
@@ -1400,10 +1425,9 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
mthca_unlock_cqs(send_cq, recv_cq);
- err_out:
- dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
- sqp->header_buf, sqp->header_dma);
-
+err_out:
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ qp->sqp->header_buf, qp->sqp->header_dma);
return err;
}
@@ -1466,40 +1490,39 @@ void mthca_free_qp(struct mthca_dev *dev,
if (is_sqp(dev, qp)) {
atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
- dma_free_coherent(&dev->pdev->dev,
- to_msqp(qp)->header_buf_size,
- to_msqp(qp)->header_buf,
- to_msqp(qp)->header_dma);
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ qp->sqp->header_buf, qp->sqp->header_dma);
} else
mthca_free(&dev->qp_table.alloc, qp->qpn);
}
/* Create UD header for an MLX send and build a data segment for it */
-static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
- int ind, struct ib_send_wr *wr,
+static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind,
+ const struct ib_ud_wr *wr,
struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data)
{
+ struct mthca_sqp *sqp = qp->sqp;
int header_size;
int err;
u16 pkey;
ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
- mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
+ mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0,
&sqp->ud_header);
- err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
+ err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header);
if (err)
return err;
mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
+ mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
(sqp->ud_header.lrh.destination_lid ==
IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
(sqp->ud_header.lrh.service_level << 8));
mlx->rlid = sqp->ud_header.lrh.destination_lid;
mlx->vcrc = 0;
- switch (wr->opcode) {
+ switch (wr->wr.opcode) {
case IB_WR_SEND:
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
sqp->ud_header.immediate_present = 0;
@@ -1507,35 +1530,35 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
case IB_WR_SEND_WITH_IMM:
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
sqp->ud_header.immediate_present = 1;
- sqp->ud_header.immediate_data = wr->ex.imm_data;
+ sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
break;
default:
return -EINVAL;
}
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
- sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
- if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
- sqp->pkey_index, &pkey);
+ sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
+ if (!qp->ibqp.qp_num)
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index,
+ &pkey);
else
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
- wr->wr.ud.pkey_index, &pkey);
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index,
+ &pkey);
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
- sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+ sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
- sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
- sqp->qkey : wr->wr.ud.remote_qkey);
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
+ sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
+ sqp->qkey : wr->remote_qkey);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
header_size = ib_ud_header_pack(&sqp->ud_header,
sqp->header_buf +
ind * MTHCA_UD_HEADER_SIZE);
data->byte_count = cpu_to_be32(header_size);
- data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
+ data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey);
data->addr = cpu_to_be64(sqp->header_dma +
ind * MTHCA_UD_HEADER_SIZE);
@@ -1569,38 +1592,38 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
}
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
- struct ib_send_wr *wr)
+ const struct ib_atomic_wr *wr)
{
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
- aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->swap_add = cpu_to_be64(wr->swap);
+ aseg->compare = cpu_to_be64(wr->compare_add);
} else {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->swap_add = cpu_to_be64(wr->compare_add);
aseg->compare = 0;
}
}
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
- struct ib_send_wr *wr)
+ const struct ib_ud_wr *wr)
{
- useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
- useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
- useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
- useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+ useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
+ useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
+ useg->dqpn = cpu_to_be32(wr->remote_qpn);
+ useg->qkey = cpu_to_be32(wr->remote_qkey);
}
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
- struct ib_send_wr *wr)
+ const struct ib_ud_wr *wr)
{
- memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
- useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
- useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+ memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
+ useg->dqpn = cpu_to_be32(wr->remote_qpn);
+ useg->qkey = cpu_to_be32(wr->remote_qkey);
}
-int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -1618,8 +1641,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -1664,11 +1687,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
- wr->wr.atomic.rkey);
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
wqe += sizeof (struct mthca_raddr_seg);
- set_atomic_seg(wqe, wr);
+ set_atomic_seg(wqe, atomic_wr(wr));
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
@@ -1677,8 +1700,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
case IB_WR_RDMA_READ:
- set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
- wr->wr.rdma.rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -1694,8 +1717,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
- wr->wr.rdma.rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -1708,15 +1731,15 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case UD:
- set_tavor_ud_seg(wqe, wr);
+ set_tavor_ud_seg(wqe, ud_wr(wr));
wqe += sizeof (struct mthca_tavor_ud_seg);
size += sizeof (struct mthca_tavor_ud_seg) / 16;
break;
case MLX:
- err = build_mlx_header(dev, to_msqp(qp), ind, wr,
- wqe - sizeof (struct mthca_next_seg),
- wqe);
+ err = build_mlx_header(
+ dev, qp, ind, ud_wr(wr),
+ wqe - sizeof(struct mthca_next_seg), wqe);
if (err) {
*bad_wr = wr;
goto out;
@@ -1788,11 +1811,6 @@ out:
(qp->qpn << 8) | size0,
dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
}
qp->sq.next_ind = ind;
@@ -1802,8 +1820,8 @@ out:
return err;
}
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -1819,7 +1837,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
* without initializing size0, and it is in fact never used
* uninitialized.
*/
- int uninitialized_var(size0);
+ int size0;
int ind;
void *wqe;
void *prev_wqe;
@@ -1903,18 +1921,12 @@ out:
qp->rq.next_ind = ind;
qp->rq.head += nreq;
- /*
- * Make sure doorbells don't leak out of RQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->rq.lock, flags);
return err;
}
-int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -1933,8 +1945,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -2005,11 +2017,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
- wr->wr.atomic.rkey);
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
wqe += sizeof (struct mthca_raddr_seg);
- set_atomic_seg(wqe, wr);
+ set_atomic_seg(wqe, atomic_wr(wr));
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
@@ -2018,8 +2030,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
- wr->wr.rdma.rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -2035,8 +2047,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
- wr->wr.rdma.rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -2049,15 +2061,15 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case UD:
- set_arbel_ud_seg(wqe, wr);
+ set_arbel_ud_seg(wqe, ud_wr(wr));
wqe += sizeof (struct mthca_arbel_ud_seg);
size += sizeof (struct mthca_arbel_ud_seg) / 16;
break;
case MLX:
- err = build_mlx_header(dev, to_msqp(qp), ind, wr,
- wqe - sizeof (struct mthca_next_seg),
- wqe);
+ err = build_mlx_header(
+ dev, qp, ind, ud_wr(wr),
+ wqe - sizeof(struct mthca_next_seg), wqe);
if (err) {
*bad_wr = wr;
goto out;
@@ -2143,18 +2155,12 @@ out:
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 74c6a9426047..2a6979e4ae1c 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -96,9 +96,7 @@ int mthca_reset(struct mthca_dev *mdev)
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
- mthca_err(mdev, "Couldn't allocate memory to save HCA "
- "PCI header, aborting.\n");
- goto out;
+ goto put_dev;
}
for (i = 0; i < 64; ++i) {
@@ -108,7 +106,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA "
"PCI header, aborting.\n");
- goto out;
+ goto free_hca;
}
}
@@ -119,9 +117,7 @@ int mthca_reset(struct mthca_dev *mdev)
bridge_header = kmalloc(256, GFP_KERNEL);
if (!bridge_header) {
err = -ENOMEM;
- mthca_err(mdev, "Couldn't allocate memory to save HCA "
- "bridge PCI header, aborting.\n");
- goto out;
+ goto free_hca;
}
for (i = 0; i < 64; ++i) {
@@ -131,7 +127,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA bridge "
"PCI header, aborting.\n");
- goto out;
+ goto free_bh;
}
}
bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
@@ -139,7 +135,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't locate HCA bridge "
"PCI-X capability, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -152,7 +148,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM;
mthca_err(mdev, "Couldn't map HCA reset register, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
writel(MTHCA_RESET_VALUE, reset);
@@ -172,7 +168,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't access HCA after reset, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
if (v != 0xffffffff)
@@ -184,7 +180,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "PCI device did not come back after reset, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
good:
@@ -195,14 +191,14 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
"split transaction control, aborting.\n");
- goto out;
+ goto free_bh;
}
if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
"split transaction control, aborting.\n");
- goto out;
+ goto free_bh;
}
/*
* Bridge control register is at 0x3e, so we'll
@@ -216,7 +212,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
"aborting.\n", i);
- goto out;
+ goto free_bh;
}
}
@@ -225,7 +221,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -235,7 +231,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI-X "
"command register, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -246,7 +242,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Device Control register, aborting.\n");
- goto out;
+ goto free_bh;
}
linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL,
@@ -254,7 +250,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Link control register, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -266,7 +262,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA reg %x, "
"aborting.\n", i);
- goto out;
+ goto free_bh;
}
}
@@ -275,14 +271,12 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA COMMAND, "
"aborting.\n");
- goto out;
}
-
-out:
- if (bridge)
- pci_dev_put(bridge);
+free_bh:
kfree(bridge_header);
+free_hca:
kfree(hca_header);
-
+put_dev:
+ pci_dev_put(bridge);
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index d22f970480c0..a85935ccce88 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -36,6 +36,8 @@
#include <asm/io.h>
+#include <rdma/uverbs_ioctl.h>
+
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"
@@ -95,17 +97,20 @@ static inline int *wqe_to_link(void *wqe)
static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_srq *srq,
- struct mthca_tavor_srq_context *context)
+ struct mthca_tavor_srq_context *context,
+ struct ib_udata *udata)
{
+ struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
+
memset(context, 0, sizeof *context);
context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
context->state_pd = cpu_to_be32(pd->pd_num);
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
- if (pd->ibpd.uobject)
- context->uar =
- cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ if (udata)
+ context->uar = cpu_to_be32(ucontext->uar.index);
else
context->uar = cpu_to_be32(dev->driver_uar.index);
}
@@ -113,8 +118,11 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_srq *srq,
- struct mthca_arbel_srq_context *context)
+ struct mthca_arbel_srq_context *context,
+ struct ib_udata *udata)
{
+ struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
int logsize, max;
memset(context, 0, sizeof *context);
@@ -129,9 +137,8 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
context->db_index = cpu_to_be32(srq->db_index);
context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
- if (pd->ibpd.uobject)
- context->logstride_usrpage |=
- cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ if (udata)
+ context->logstride_usrpage |= cpu_to_be32(ucontext->uar.index);
else
context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
@@ -145,17 +152,17 @@ static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
}
static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
- struct mthca_srq *srq)
+ struct mthca_srq *srq, struct ib_udata *udata)
{
struct mthca_data_seg *scatter;
void *wqe;
int err;
int i;
- if (pd->ibpd.uobject)
+ if (udata)
return 0;
- srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
+ srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid)
return -ENOMEM;
@@ -197,7 +204,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
}
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
- struct ib_srq_attr *attr, struct mthca_srq *srq)
+ struct ib_srq_attr *attr, struct mthca_srq *srq,
+ struct ib_udata *udata)
{
struct mthca_mailbox *mailbox;
int ds;
@@ -235,7 +243,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
if (err)
goto err_out;
- if (!pd->ibpd.uobject) {
+ if (!udata) {
srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
srq->srqn, &srq->db);
if (srq->db_index < 0) {
@@ -251,7 +259,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
goto err_out_db;
}
- err = mthca_alloc_srq_buf(dev, pd, srq);
+ err = mthca_alloc_srq_buf(dev, pd, srq, udata);
if (err)
goto err_out_mailbox;
@@ -261,9 +269,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
mutex_init(&srq->mutex);
if (mthca_is_memfree(dev))
- mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
+ mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
else
- mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
+ mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
@@ -297,14 +305,14 @@ err_out_free_srq:
mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
err_out_free_buf:
- if (!pd->ibpd.uobject)
+ if (!udata)
mthca_free_srq_buf(dev, srq);
err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
err_out_db:
- if (!pd->ibpd.uobject && mthca_is_memfree(dev))
+ if (!udata && mthca_is_memfree(dev))
mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
err_out_icm:
@@ -472,8 +480,8 @@ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
spin_unlock(&srq->lock);
}
-int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
@@ -562,18 +570,12 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SRQ spinlock and
- * reach the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&srq->lock, flags);
return err;
}
-int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
deleted file mode 100644
index 5fe56e810739..000000000000
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MTHCA_USER_H
-#define MTHCA_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define MTHCA_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mthca_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 uarc_size;
-};
-
-struct mthca_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct mthca_reg_mr {
-/*
- * Mark the memory region with a DMA attribute that causes
- * in-flight DMA to be flushed when the region is written to:
- */
-#define MTHCA_MR_DMASYNC 0x1
- __u32 mr_attrs;
- __u32 reserved;
-};
-
-struct mthca_create_cq {
- __u32 lkey;
- __u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
- __u32 arm_db_index;
- __u32 set_db_index;
-};
-
-struct mthca_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mthca_resize_cq {
- __u32 lkey;
- __u32 reserved;
-};
-
-struct mthca_create_srq {
- __u32 lkey;
- __u32 db_index;
- __u64 db_page;
-};
-
-struct mthca_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mthca_create_qp {
- __u32 lkey;
- __u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
- __u32 sq_db_index;
- __u32 rq_db_index;
-};
-
-#endif /* MTHCA_USER_H */
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
deleted file mode 100644
index 846dc97cf260..000000000000
--- a/drivers/infiniband/hw/nes/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config INFINIBAND_NES
- tristate "NetEffect RNIC Driver"
- depends on PCI && INET && INFINIBAND
- select LIBCRC32C
- select INET_LRO
- ---help---
- This is the RDMA Network Interface Card (RNIC) driver for
- NetEffect Ethernet Cluster Server Adapters.
-
-config INFINIBAND_NES_DEBUG
- bool "Verbose debugging output"
- depends on INFINIBAND_NES
- default n
- ---help---
- This option enables debug messages from the NetEffect RNIC
- driver. Select this if you are diagnosing a problem.
diff --git a/drivers/infiniband/hw/nes/Makefile b/drivers/infiniband/hw/nes/Makefile
deleted file mode 100644
index 97820c23ecef..000000000000
--- a/drivers/infiniband/hw/nes/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o
-
-iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o nes_mgt.o
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
deleted file mode 100644
index 9f9d5c563a61..000000000000
--- a/drivers/infiniband/hw/nes/nes.c
+++ /dev/null
@@ -1,1270 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/if_arp.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <rdma/ib_smi.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-#include <rdma/iw_cm.h>
-
-#include "nes.h"
-
-#include <net/netevent.h>
-#include <net/neighbour.h>
-#include <linux/route.h>
-#include <net/ip_fib.h>
-
-MODULE_AUTHOR("NetEffect");
-MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-int max_mtu = 9000;
-int interrupt_mod_interval = 0;
-
-/* Interoperability */
-int mpa_version = 1;
-module_param(mpa_version, int, 0644);
-MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
-
-/* Interoperability */
-int disable_mpa_crc = 0;
-module_param(disable_mpa_crc, int, 0644);
-MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
-
-unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU;
-module_param(nes_drv_opt, int, 0644);
-MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
-
-unsigned int nes_debug_level = 0;
-module_param_named(debug_level, nes_debug_level, uint, 0644);
-MODULE_PARM_DESC(debug_level, "Enable debug output level");
-
-unsigned int wqm_quanta = 0x10000;
-module_param(wqm_quanta, int, 0644);
-MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
-
-static bool limit_maxrdreqsz;
-module_param(limit_maxrdreqsz, bool, 0644);
-MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
-
-LIST_HEAD(nes_adapter_list);
-static LIST_HEAD(nes_dev_list);
-
-atomic_t qps_destroyed;
-
-static unsigned int ee_flsh_adapter;
-static unsigned int sysfs_nonidx_addr;
-static unsigned int sysfs_idx_addr;
-
-static struct pci_device_id nes_pci_table[] = {
- { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), },
- { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), },
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, nes_pci_table);
-
-/* registered nes netlink callbacks */
-static struct ibnl_client_cbs nes_nl_cb_table[] = {
- [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
- [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
- [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
- [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
- [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
- [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
- [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
-static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
-static int nes_net_event(struct notifier_block *, unsigned long, void *);
-static int nes_notifiers_registered;
-
-
-static struct notifier_block nes_inetaddr_notifier = {
- .notifier_call = nes_inetaddr_event
-};
-
-static struct notifier_block nes_net_notifier = {
- .notifier_call = nes_net_event
-};
-
-/**
- * nes_inetaddr_event
- */
-static int nes_inetaddr_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct in_ifaddr *ifa = ptr;
- struct net_device *event_netdev = ifa->ifa_dev->dev;
- struct nes_device *nesdev;
- struct net_device *netdev;
- struct net_device *upper_dev;
- struct nes_vnic *nesvnic;
- unsigned int is_bonded;
-
- nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n",
- &ifa->ifa_address, &ifa->ifa_mask);
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
- nesdev, nesdev->netdev[0]->name);
- netdev = nesdev->netdev[0];
- nesvnic = netdev_priv(netdev);
- upper_dev = netdev_master_upper_dev_get(netdev);
- is_bonded = netif_is_bond_slave(netdev) &&
- (upper_dev == event_netdev);
- if ((netdev == event_netdev) || is_bonded) {
- if (nesvnic->rdma_enabled == 0) {
- nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
- " RDMA is not enabled.\n",
- netdev->name);
- return NOTIFY_OK;
- }
- /* we have ifa->ifa_address/mask here if we need it */
- switch (event) {
- case NETDEV_DOWN:
- nes_debug(NES_DBG_NETDEV, "event:DOWN\n");
- nes_write_indexed(nesdev,
- NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0);
-
- nes_manage_arp_cache(netdev, netdev->dev_addr,
- ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
- nesvnic->local_ipaddr = 0;
- if (is_bonded)
- continue;
- else
- return NOTIFY_OK;
- break;
- case NETDEV_UP:
- nes_debug(NES_DBG_NETDEV, "event:UP\n");
-
- if (nesvnic->local_ipaddr != 0) {
- nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
- return NOTIFY_OK;
- }
- /* fall through */
- case NETDEV_CHANGEADDR:
- /* Add the address to the IP table */
- if (upper_dev)
- nesvnic->local_ipaddr =
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
- else
- nesvnic->local_ipaddr = ifa->ifa_address;
-
- nes_write_indexed(nesdev,
- NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
- ntohl(nesvnic->local_ipaddr));
- nes_manage_arp_cache(netdev, netdev->dev_addr,
- ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
- if (is_bonded)
- continue;
- else
- return NOTIFY_OK;
- break;
- default:
- break;
- }
- }
- }
-
- return NOTIFY_DONE;
-}
-
-
-/**
- * nes_net_event
- */
-static int nes_net_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct neighbour *neigh = ptr;
- struct nes_device *nesdev;
- struct net_device *netdev;
- struct nes_vnic *nesvnic;
-
- switch (event) {
- case NETEVENT_NEIGH_UPDATE:
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- /* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */
- netdev = nesdev->netdev[0];
- nesvnic = netdev_priv(netdev);
- if (netdev == neigh->dev) {
- if (nesvnic->rdma_enabled == 0) {
- nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n",
- netdev->name);
- } else {
- if (neigh->nud_state & NUD_VALID) {
- nes_manage_arp_cache(neigh->dev, neigh->ha,
- ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD);
- } else {
- nes_manage_arp_cache(neigh->dev, neigh->ha,
- ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE);
- }
- }
- return NOTIFY_OK;
- }
- }
- break;
- default:
- nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-
-/**
- * nes_add_ref
- */
-void nes_add_ref(struct ib_qp *ibqp)
-{
- struct nes_qp *nesqp;
-
- nesqp = to_nesqp(ibqp);
- nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n",
- ibqp->qp_num, atomic_read(&nesqp->refcount));
- atomic_inc(&nesqp->refcount);
-}
-
-static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
-{
- unsigned long flags;
- struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
- atomic_inc(&qps_destroyed);
-
- /* Free the control structures */
-
- if (nesqp->pbl_vbase) {
- pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- nesadapter->free_256pbl++;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
- nesqp->pbl_vbase = NULL;
-
- } else {
- pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
- }
- nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id);
-
- nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
- kfree(nesqp->allocated_buffer);
-
-}
-
-/**
- * nes_rem_ref
- */
-void nes_rem_ref(struct ib_qp *ibqp)
-{
- u64 u64temp;
- struct nes_qp *nesqp;
- struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- u32 opcode;
-
- nesqp = to_nesqp(ibqp);
-
- if (atomic_read(&nesqp->refcount) == 0) {
- printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
- __func__, ibqp->qp_num, nesqp->last_aeq);
- BUG();
- }
-
- if (atomic_dec_and_test(&nesqp->refcount)) {
- if (nesqp->pau_mode)
- nes_destroy_pau_qp(nesdev, nesqp);
-
- /* Destroy the QP */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
- return;
- }
- cqp_request->waiting = 0;
- cqp_request->callback = 1;
- cqp_request->cqp_callback = nes_cqp_rem_ref_callback;
- cqp_request->cqp_callback_pointer = nesqp;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP;
-
- if (nesqp->hte_added) {
- opcode |= NES_CQP_QP_DEL_HTE;
- nesqp->hte_added = 0;
- }
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
- u64temp = (u64)nesqp->nesqp_context_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
- nes_post_cqp_request(nesdev, cqp_request);
- }
-}
-
-
-/**
- * nes_get_qp
- */
-struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
-{
- struct nes_vnic *nesvnic = to_nesvnic(device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
- if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp)))
- return NULL;
-
- return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp;
-}
-
-
-/**
- * nes_print_macaddr
- */
-static void nes_print_macaddr(struct net_device *netdev)
-{
- nes_debug(NES_DBG_INIT, "%s: %pM, IRQ %u\n",
- netdev->name, netdev->dev_addr, netdev->irq);
-}
-
-/**
- * nes_interrupt - handle interrupts
- */
-static irqreturn_t nes_interrupt(int irq, void *dev_id)
-{
- struct nes_device *nesdev = (struct nes_device *)dev_id;
- int handled = 0;
- u32 int_mask;
- u32 int_req;
- u32 int_stat;
- u32 intf_int_stat;
- u32 timer_stat;
-
- if (nesdev->msi_enabled) {
- /* No need to read the interrupt pending register if msi is enabled */
- handled = 1;
- } else {
- if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) {
- /* Master interrupt enable provides synchronization for kicking off bottom half
- when interrupt sharing is going on */
- int_mask = nes_read32(nesdev->regs + NES_INT_MASK);
- if (int_mask & 0x80000000) {
- /* Check interrupt status to see if this might be ours */
- int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
- int_req = nesdev->int_req;
- if (int_stat&int_req) {
- /* if interesting CEQ or AEQ is pending, claim the interrupt */
- if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) {
- handled = 1;
- } else {
- if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) {
- /* Timer might be running but might be for another function */
- timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
- if ((timer_stat & nesdev->timer_int_req) != 0) {
- handled = 1;
- }
- }
- if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) &&
- (handled == 0)) {
- intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
- if ((intf_int_stat & nesdev->intf_int_req) != 0) {
- handled = 1;
- }
- }
- }
- if (handled) {
- nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000));
- int_mask = nes_read32(nesdev->regs+NES_INT_MASK);
- /* Save off the status to save an additional read */
- nesdev->int_stat = int_stat;
- nesdev->napi_isr_ran = 1;
- }
- }
- }
- } else {
- handled = nes_read32(nesdev->regs+NES_INT_PENDING);
- }
- }
-
- if (handled) {
-
- if (nes_napi_isr(nesdev) == 0) {
- tasklet_schedule(&nesdev->dpc_tasklet);
-
- }
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
-}
-
-
-/**
- * nes_probe - Device initialization
- */
-static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
-{
- struct net_device *netdev = NULL;
- struct nes_device *nesdev = NULL;
- int ret = 0;
- void __iomem *mmio_regs = NULL;
- u8 hw_rev;
-
- assert(pcidev != NULL);
- assert(ent != NULL);
-
- printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
- DRV_VERSION, pci_name(pcidev));
-
- ret = pci_enable_device(pcidev);
- if (ret) {
- printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev));
- goto bail0;
- }
-
- nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n",
- (long unsigned int)pci_resource_start(pcidev, BAR_0),
- (long unsigned int)pci_resource_len(pcidev, BAR_0));
- nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n",
- (long unsigned int)pci_resource_start(pcidev, BAR_1),
- (long unsigned int)pci_resource_len(pcidev, BAR_1));
-
- /* Make sure PCI base addr are MMIO */
- if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) ||
- !(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
- ret = -ENODEV;
- goto bail1;
- }
-
- /* Reserve PCI I/O and memory resources */
- ret = pci_request_regions(pcidev, DRV_NAME);
- if (ret) {
- printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev));
- goto bail1;
- }
-
- if ((sizeof(dma_addr_t) > 4)) {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
- if (ret < 0) {
- printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
- goto bail2;
- }
- ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
- if (ret) {
- printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
- goto bail2;
- }
- } else {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (ret < 0) {
- printk(KERN_ERR PFX "32b DMA mask configuration failed\n");
- goto bail2;
- }
- ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (ret) {
- printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n");
- goto bail2;
- }
- }
-
- pci_set_master(pcidev);
-
- /* Allocate hardware structure */
- nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
- if (!nesdev) {
- printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
- ret = -ENOMEM;
- goto bail2;
- }
-
- nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev);
- nesdev->pcidev = pcidev;
- pci_set_drvdata(pcidev, nesdev);
-
- pci_read_config_byte(pcidev, 0x0008, &hw_rev);
- nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev);
-
- spin_lock_init(&nesdev->indexed_regs_lock);
-
- /* Remap the PCI registers in adapter BAR0 to kernel VA space */
- mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0),
- pci_resource_len(pcidev, BAR_0));
- if (mmio_regs == NULL) {
- printk(KERN_ERR PFX "Unable to remap BAR0\n");
- ret = -EIO;
- goto bail3;
- }
- nesdev->regs = mmio_regs;
- nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs;
-
- /* Ensure interrupts are disabled */
- nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
-
- if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) {
- if (!pci_enable_msi(nesdev->pcidev)) {
- nesdev->msi_enabled = 1;
- nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n",
- pci_name(pcidev));
- } else {
- nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n",
- pci_name(pcidev));
- }
- } else {
- nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n",
- pci_name(pcidev));
- }
-
- nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0);
- nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1);
-
- /* Init the adapter */
- nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev);
- if (!nesdev->nesadapter) {
- printk(KERN_ERR PFX "Unable to initialize adapter.\n");
- ret = -ENOMEM;
- goto bail5;
- }
- nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
- nesdev->nesadapter->wqm_quanta = wqm_quanta;
-
- /* nesdev->base_doorbell_index =
- nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
- nesdev->base_doorbell_index = 1;
- nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
- if (nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
- switch (PCI_FUNC(nesdev->pcidev->devfn) %
- nesdev->nesadapter->port_count) {
- case 1:
- nesdev->mac_index = 2;
- break;
- case 2:
- nesdev->mac_index = 1;
- break;
- case 3:
- nesdev->mac_index = 3;
- break;
- case 0:
- default:
- nesdev->mac_index = 0;
- }
- } else {
- nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) %
- nesdev->nesadapter->port_count;
- }
-
- if ((limit_maxrdreqsz ||
- ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) &&
- (hw_rev == NE020_REV1))) &&
- (pcie_get_readrq(pcidev) > 256)) {
- if (pcie_set_readrq(pcidev, 256))
- printk(KERN_ERR PFX "Unable to set max read request"
- " to 256 bytes\n");
- else
- nes_debug(NES_DBG_INIT, "Max read request size set"
- " to 256 bytes\n");
- }
-
- tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
-
- /* bring up the Control QP */
- if (nes_init_cqp(nesdev)) {
- ret = -ENODEV;
- goto bail6;
- }
-
- /* Arm the CCQ */
- nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
- PCI_FUNC(nesdev->pcidev->devfn));
- nes_read32(nesdev->regs+NES_CQE_ALLOC);
-
- /* Enable the interrupts */
- nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
- (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
- if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
- nesdev->int_req |= (1 << (PCI_FUNC(nesdev->mac_index)+24));
- }
-
- /* TODO: This really should be the first driver to load, not function 0 */
- if (PCI_FUNC(nesdev->pcidev->devfn) == 0) {
- /* pick up PCI and critical errors if the first driver to load */
- nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR;
- nesdev->int_req |= NES_INT_INTF;
- } else {
- nesdev->intf_int_req = 0;
- }
- nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0);
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0);
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265);
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804);
-
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790);
-
- /* deal with both periodic and one_shot */
- nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn);
- nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req;
- nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n",
- PCI_FUNC(nesdev->pcidev->devfn),
- nesdev->timer_int_req, nesdev->nesadapter->timer_int_req);
-
- nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
-
- list_add_tail(&nesdev->list, &nes_dev_list);
-
- /* Request an interrupt line for the driver */
- ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev);
- if (ret) {
- printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
- pci_name(pcidev), pcidev->irq);
- goto bail65;
- }
-
- nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
-
- if (nes_notifiers_registered == 0) {
- register_inetaddr_notifier(&nes_inetaddr_notifier);
- register_netevent_notifier(&nes_net_notifier);
- }
- nes_notifiers_registered++;
-
- if (ibnl_add_client(RDMA_NL_NES, RDMA_NL_IWPM_NUM_OPS, nes_nl_cb_table))
- printk(KERN_ERR PFX "%s[%u]: Failed to add netlink callback\n",
- __func__, __LINE__);
-
- ret = iwpm_init(RDMA_NL_NES);
- if (ret) {
- printk(KERN_ERR PFX "%s: port mapper initialization failed\n",
- pci_name(pcidev));
- goto bail7;
- }
-
- INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
-
- /* Initialize network devices */
- netdev = nes_netdev_init(nesdev, mmio_regs);
- if (netdev == NULL) {
- ret = -ENOMEM;
- goto bail7;
- }
-
- /* Register network device */
- ret = register_netdev(netdev);
- if (ret) {
- printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
- nes_netdev_destroy(netdev);
- goto bail7;
- }
-
- nes_print_macaddr(netdev);
-
- nesdev->netdev_count++;
- nesdev->nesadapter->netdev_count++;
-
- printk(KERN_INFO PFX "%s: NetEffect RNIC driver successfully loaded.\n",
- pci_name(pcidev));
- return 0;
-
- bail7:
- printk(KERN_ERR PFX "bail7\n");
- while (nesdev->netdev_count > 0) {
- nesdev->netdev_count--;
- nesdev->nesadapter->netdev_count--;
-
- unregister_netdev(nesdev->netdev[nesdev->netdev_count]);
- nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]);
- }
-
- nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
- nesdev->netdev_count, nesdev->nesadapter->netdev_count);
- ibnl_remove_client(RDMA_NL_NES);
-
- nes_notifiers_registered--;
- if (nes_notifiers_registered == 0) {
- unregister_netevent_notifier(&nes_net_notifier);
- unregister_inetaddr_notifier(&nes_inetaddr_notifier);
- }
-
- list_del(&nesdev->list);
- nes_destroy_cqp(nesdev);
-
- bail65:
- printk(KERN_ERR PFX "bail65\n");
- free_irq(pcidev->irq, nesdev);
- if (nesdev->msi_enabled) {
- pci_disable_msi(pcidev);
- }
- bail6:
- printk(KERN_ERR PFX "bail6\n");
- tasklet_kill(&nesdev->dpc_tasklet);
- /* Deallocate the Adapter Structure */
- nes_destroy_adapter(nesdev->nesadapter);
-
- bail5:
- printk(KERN_ERR PFX "bail5\n");
- iounmap(nesdev->regs);
-
- bail3:
- printk(KERN_ERR PFX "bail3\n");
- kfree(nesdev);
-
- bail2:
- pci_release_regions(pcidev);
-
- bail1:
- pci_disable_device(pcidev);
-
- bail0:
- return ret;
-}
-
-
-/**
- * nes_remove - unload from kernel
- */
-static void nes_remove(struct pci_dev *pcidev)
-{
- struct nes_device *nesdev = pci_get_drvdata(pcidev);
- struct net_device *netdev;
- int netdev_index = 0;
- unsigned long flags;
-
- if (nesdev->netdev_count) {
- netdev = nesdev->netdev[netdev_index];
- if (netdev) {
- netif_stop_queue(netdev);
- unregister_netdev(netdev);
- nes_netdev_destroy(netdev);
-
- nesdev->netdev[netdev_index] = NULL;
- nesdev->netdev_count--;
- nesdev->nesadapter->netdev_count--;
- }
- }
- ibnl_remove_client(RDMA_NL_NES);
- iwpm_exit(RDMA_NL_NES);
-
- nes_notifiers_registered--;
- if (nes_notifiers_registered == 0) {
- unregister_netevent_notifier(&nes_net_notifier);
- unregister_inetaddr_notifier(&nes_inetaddr_notifier);
- }
-
- list_del(&nesdev->list);
- nes_destroy_cqp(nesdev);
-
- free_irq(pcidev->irq, nesdev);
- tasklet_kill(&nesdev->dpc_tasklet);
-
- spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
- if (nesdev->link_recheck) {
- spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
- cancel_delayed_work_sync(&nesdev->work);
- } else {
- spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
- }
-
- /* Deallocate the Adapter Structure */
- nes_destroy_adapter(nesdev->nesadapter);
-
- if (nesdev->msi_enabled) {
- pci_disable_msi(pcidev);
- }
-
- iounmap(nesdev->regs);
- kfree(nesdev);
-
- /* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */
- pci_release_regions(pcidev);
- pci_disable_device(pcidev);
- pci_set_drvdata(pcidev, NULL);
-}
-
-
-static struct pci_driver nes_pci_driver = {
- .name = DRV_NAME,
- .id_table = nes_pci_table,
- .probe = nes_probe,
- .remove = nes_remove,
-};
-
-static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
-{
- unsigned int devfn = 0xffffffff;
- unsigned char bus_number = 0xff;
- unsigned int i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- devfn = nesdev->pcidev->devfn;
- bus_number = nesdev->pcidev->bus->number;
- break;
- }
- i++;
- }
-
- return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn);
-}
-
-static ssize_t nes_store_adapter(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
-
- ee_flsh_adapter = simple_strtoul(p, &p, 10);
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf)
-{
- u32 eeprom_cmd = 0xdead;
- u32 i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND);
- break;
- }
- i++;
- }
- return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd);
-}
-
-static ssize_t nes_store_ee_cmd(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
- u32 val;
- u32 i = 0;
- struct nes_device *nesdev;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- val = simple_strtoul(p, &p, 16);
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val);
- break;
- }
- i++;
- }
- }
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf)
-{
- u32 eeprom_data = 0xdead;
- u32 i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA);
- break;
- }
- i++;
- }
-
- return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data);
-}
-
-static ssize_t nes_store_ee_data(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
- u32 val;
- u32 i = 0;
- struct nes_device *nesdev;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- val = simple_strtoul(p, &p, 16);
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nes_write32(nesdev->regs + NES_EEPROM_DATA, val);
- break;
- }
- i++;
- }
- }
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf)
-{
- u32 flash_cmd = 0xdead;
- u32 i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND);
- break;
- }
- i++;
- }
-
- return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd);
-}
-
-static ssize_t nes_store_flash_cmd(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
- u32 val;
- u32 i = 0;
- struct nes_device *nesdev;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- val = simple_strtoul(p, &p, 16);
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nes_write32(nesdev->regs + NES_FLASH_COMMAND, val);
- break;
- }
- i++;
- }
- }
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf)
-{
- u32 flash_data = 0xdead;
- u32 i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA);
- break;
- }
- i++;
- }
-
- return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data);
-}
-
-static ssize_t nes_store_flash_data(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
- u32 val;
- u32 i = 0;
- struct nes_device *nesdev;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- val = simple_strtoul(p, &p, 16);
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nes_write32(nesdev->regs + NES_FLASH_DATA, val);
- break;
- }
- i++;
- }
- }
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr);
-}
-
-static ssize_t nes_store_nonidx_addr(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
- sysfs_nonidx_addr = simple_strtoul(p, &p, 16);
-
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf)
-{
- u32 nonidx_data = 0xdead;
- u32 i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr);
- break;
- }
- i++;
- }
-
- return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data);
-}
-
-static ssize_t nes_store_nonidx_data(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
- u32 val;
- u32 i = 0;
- struct nes_device *nesdev;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- val = simple_strtoul(p, &p, 16);
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nes_write32(nesdev->regs + sysfs_nonidx_addr, val);
- break;
- }
- i++;
- }
- }
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr);
-}
-
-static ssize_t nes_store_idx_addr(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
- sysfs_idx_addr = simple_strtoul(p, &p, 16);
-
- return strnlen(buf, count);
-}
-
-static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf)
-{
- u32 idx_data = 0xdead;
- u32 i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- idx_data = nes_read_indexed(nesdev, sysfs_idx_addr);
- break;
- }
- i++;
- }
-
- return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data);
-}
-
-static ssize_t nes_store_idx_data(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- char *p = (char *)buf;
- u32 val;
- u32 i = 0;
- struct nes_device *nesdev;
-
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- val = simple_strtoul(p, &p, 16);
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nes_write_indexed(nesdev, sysfs_idx_addr, val);
- break;
- }
- i++;
- }
- }
- return strnlen(buf, count);
-}
-
-
-/**
- * nes_show_wqm_quanta
- */
-static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
-{
- u32 wqm_quanta_value = 0xdead;
- u32 i = 0;
- struct nes_device *nesdev;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- wqm_quanta_value = nesdev->nesadapter->wqm_quanta;
- break;
- }
- i++;
- }
-
- return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value);
-}
-
-
-/**
- * nes_store_wqm_quanta
- */
-static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- unsigned long wqm_quanta_value;
- u32 wqm_config1;
- u32 i = 0;
- struct nes_device *nesdev;
-
- if (kstrtoul(buf, 0, &wqm_quanta_value) < 0)
- return -EINVAL;
-
- list_for_each_entry(nesdev, &nes_dev_list, list) {
- if (i == ee_flsh_adapter) {
- nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
- wqm_config1 = nes_read_indexed(nesdev,
- NES_IDX_WQM_CONFIG1);
- nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1,
- ((wqm_quanta_value << 1) |
- (wqm_config1 & 0x00000001)));
- break;
- }
- i++;
- }
- return strnlen(buf, count);
-}
-
-static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
- nes_show_adapter, nes_store_adapter);
-static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
- nes_show_ee_cmd, nes_store_ee_cmd);
-static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR,
- nes_show_ee_data, nes_store_ee_data);
-static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR,
- nes_show_flash_cmd, nes_store_flash_cmd);
-static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR,
- nes_show_flash_data, nes_store_flash_data);
-static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR,
- nes_show_nonidx_addr, nes_store_nonidx_addr);
-static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR,
- nes_show_nonidx_data, nes_store_nonidx_data);
-static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
- nes_show_idx_addr, nes_store_idx_addr);
-static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
- nes_show_idx_data, nes_store_idx_data);
-static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR,
- nes_show_wqm_quanta, nes_store_wqm_quanta);
-
-static int nes_create_driver_sysfs(struct pci_driver *drv)
-{
- int error;
- error = driver_create_file(&drv->driver, &driver_attr_adapter);
- error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd);
- error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data);
- error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd);
- error |= driver_create_file(&drv->driver, &driver_attr_flash_data);
- error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr);
- error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
- error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
- error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
- error |= driver_create_file(&drv->driver, &driver_attr_wqm_quanta);
- return error;
-}
-
-static void nes_remove_driver_sysfs(struct pci_driver *drv)
-{
- driver_remove_file(&drv->driver, &driver_attr_adapter);
- driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd);
- driver_remove_file(&drv->driver, &driver_attr_eeprom_data);
- driver_remove_file(&drv->driver, &driver_attr_flash_cmd);
- driver_remove_file(&drv->driver, &driver_attr_flash_data);
- driver_remove_file(&drv->driver, &driver_attr_nonidx_addr);
- driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
- driver_remove_file(&drv->driver, &driver_attr_idx_addr);
- driver_remove_file(&drv->driver, &driver_attr_idx_data);
- driver_remove_file(&drv->driver, &driver_attr_wqm_quanta);
-}
-
-/**
- * nes_init_module - module initialization entry point
- */
-static int __init nes_init_module(void)
-{
- int retval;
- int retval1;
-
- retval = nes_cm_start();
- if (retval) {
- printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n");
- return retval;
- }
- retval = pci_register_driver(&nes_pci_driver);
- if (retval >= 0) {
- retval1 = nes_create_driver_sysfs(&nes_pci_driver);
- if (retval1 < 0)
- printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n");
- }
- return retval;
-}
-
-
-/**
- * nes_exit_module - module unload entry point
- */
-static void __exit nes_exit_module(void)
-{
- nes_cm_stop();
- nes_remove_driver_sysfs(&nes_pci_driver);
-
- pci_unregister_driver(&nes_pci_driver);
-}
-
-
-module_init(nes_init_module);
-module_exit(nes_exit_module);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
deleted file mode 100644
index bd9d132f11c7..000000000000
--- a/drivers/infiniband/hw/nes/nes.h
+++ /dev/null
@@ -1,582 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __NES_H
-#define __NES_H
-
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/crc32c.h>
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-#include <rdma/rdma_cm.h>
-#include <rdma/iw_cm.h>
-#include <rdma/rdma_netlink.h>
-#include <rdma/iw_portmap.h>
-
-#define NES_SEND_FIRST_WRITE
-
-#define QUEUE_DISCONNECTS
-
-#define DRV_NAME "iw_nes"
-#define DRV_VERSION "1.5.0.1"
-#define PFX DRV_NAME ": "
-
-/*
- * NetEffect PCI vendor id and NE010 PCI device id.
- */
-#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
-#define PCI_VENDOR_ID_NETEFFECT 0x1678
-#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
-#define PCI_DEVICE_ID_NETEFFECT_NE020_KR 0x0110
-#endif
-
-#define NE020_REV 4
-#define NE020_REV1 5
-
-#define BAR_0 0
-#define BAR_1 2
-
-#define RX_BUF_SIZE (1536 + 8)
-#define NES_REG0_SIZE (4 * 1024)
-#define NES_TX_TIMEOUT (6*HZ)
-#define NES_FIRST_QPN 64
-#define NES_SW_CONTEXT_ALIGN 1024
-
-#define NES_NIC_MAX_NICS 16
-#define NES_MAX_ARP_TABLE_SIZE 4096
-
-#define NES_NIC_CEQ_SIZE 8
-/* NICs will be on a separate CQ */
-#define NES_CCEQ_SIZE ((nesadapter->max_cq / nesadapter->port_count) - 32)
-
-#define NES_MAX_PORT_COUNT 4
-
-#define MAX_DPC_ITERATIONS 128
-
-#define NES_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
-#define NES_DRV_OPT_DISABLE_MPA_CRC 0x00000002
-#define NES_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
-#define NES_DRV_OPT_DISABLE_INTF 0x00000008
-#define NES_DRV_OPT_ENABLE_MSI 0x00000010
-#define NES_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
-#define NES_DRV_OPT_SUPRESS_OPTION_BC 0x00000040
-#define NES_DRV_OPT_NO_INLINE_DATA 0x00000080
-#define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100
-#define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
-#define NES_DRV_OPT_ENABLE_PAU 0x00000400
-
-#define NES_AEQ_EVENT_TIMEOUT 2500
-#define NES_DISCONNECT_EVENT_TIMEOUT 2000
-
-/* debug levels */
-/* must match userspace */
-#define NES_DBG_HW 0x00000001
-#define NES_DBG_INIT 0x00000002
-#define NES_DBG_ISR 0x00000004
-#define NES_DBG_PHY 0x00000008
-#define NES_DBG_NETDEV 0x00000010
-#define NES_DBG_CM 0x00000020
-#define NES_DBG_CM1 0x00000040
-#define NES_DBG_NIC_RX 0x00000080
-#define NES_DBG_NIC_TX 0x00000100
-#define NES_DBG_CQP 0x00000200
-#define NES_DBG_MMAP 0x00000400
-#define NES_DBG_MR 0x00000800
-#define NES_DBG_PD 0x00001000
-#define NES_DBG_CQ 0x00002000
-#define NES_DBG_QP 0x00004000
-#define NES_DBG_MOD_QP 0x00008000
-#define NES_DBG_AEQ 0x00010000
-#define NES_DBG_IW_RX 0x00020000
-#define NES_DBG_IW_TX 0x00040000
-#define NES_DBG_SHUTDOWN 0x00080000
-#define NES_DBG_PAU 0x00100000
-#define NES_DBG_NLMSG 0x00200000
-#define NES_DBG_RSVD1 0x10000000
-#define NES_DBG_RSVD2 0x20000000
-#define NES_DBG_RSVD3 0x40000000
-#define NES_DBG_RSVD4 0x80000000
-#define NES_DBG_ALL 0xffffffff
-
-#ifdef CONFIG_INFINIBAND_NES_DEBUG
-#define nes_debug(level, fmt, args...) \
-do { \
- if (level & nes_debug_level) \
- printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args); \
-} while (0)
-
-#define assert(expr) \
-do { \
- if (!(expr)) { \
- printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-
-#define NES_EVENT_TIMEOUT 1200000
-#else
-#define nes_debug(level, fmt, args...)
-#define assert(expr) do {} while (0)
-
-#define NES_EVENT_TIMEOUT 100000
-#endif
-
-#include "nes_hw.h"
-#include "nes_verbs.h"
-#include "nes_context.h"
-#include "nes_user.h"
-#include "nes_cm.h"
-#include "nes_mgt.h"
-
-extern int max_mtu;
-#define max_frame_len (max_mtu+ETH_HLEN)
-extern int interrupt_mod_interval;
-extern int nes_if_count;
-extern int mpa_version;
-extern int disable_mpa_crc;
-extern unsigned int nes_drv_opt;
-extern unsigned int nes_debug_level;
-extern unsigned int wqm_quanta;
-extern struct list_head nes_adapter_list;
-
-extern atomic_t cm_connects;
-extern atomic_t cm_accepts;
-extern atomic_t cm_disconnects;
-extern atomic_t cm_closes;
-extern atomic_t cm_connecteds;
-extern atomic_t cm_connect_reqs;
-extern atomic_t cm_rejects;
-extern atomic_t mod_qp_timouts;
-extern atomic_t qps_created;
-extern atomic_t qps_destroyed;
-extern atomic_t sw_qps_destroyed;
-extern u32 mh_detected;
-extern u32 mh_pauses_sent;
-extern u32 cm_packets_sent;
-extern u32 cm_packets_bounced;
-extern u32 cm_packets_created;
-extern u32 cm_packets_received;
-extern u32 cm_packets_dropped;
-extern u32 cm_packets_retrans;
-extern atomic_t cm_listens_created;
-extern atomic_t cm_listens_destroyed;
-extern u32 cm_backlog_drops;
-extern atomic_t cm_loopbacks;
-extern atomic_t cm_nodes_created;
-extern atomic_t cm_nodes_destroyed;
-extern atomic_t cm_accel_dropped_pkts;
-extern atomic_t cm_resets_recvd;
-extern atomic_t pau_qps_created;
-extern atomic_t pau_qps_destroyed;
-
-extern u32 int_mod_timer_init;
-extern u32 int_mod_cq_depth_256;
-extern u32 int_mod_cq_depth_128;
-extern u32 int_mod_cq_depth_32;
-extern u32 int_mod_cq_depth_24;
-extern u32 int_mod_cq_depth_16;
-extern u32 int_mod_cq_depth_4;
-extern u32 int_mod_cq_depth_1;
-
-struct nes_device {
- struct nes_adapter *nesadapter;
- void __iomem *regs;
- void __iomem *index_reg;
- struct pci_dev *pcidev;
- struct net_device *netdev[NES_NIC_MAX_NICS];
- u64 link_status_interrupts;
- struct tasklet_struct dpc_tasklet;
- spinlock_t indexed_regs_lock;
- unsigned long csr_start;
- unsigned long doorbell_region;
- unsigned long doorbell_start;
- unsigned long mac_tx_errors;
- unsigned long mac_pause_frames_sent;
- unsigned long mac_pause_frames_received;
- unsigned long mac_rx_errors;
- unsigned long mac_rx_crc_errors;
- unsigned long mac_rx_symbol_err_frames;
- unsigned long mac_rx_jabber_frames;
- unsigned long mac_rx_oversized_frames;
- unsigned long mac_rx_short_frames;
- unsigned long port_rx_discards;
- unsigned long port_tx_discards;
- unsigned int mac_index;
- unsigned int nes_stack_start;
-
- /* Control Structures */
- void *cqp_vbase;
- dma_addr_t cqp_pbase;
- u32 cqp_mem_size;
- u8 ceq_index;
- u8 nic_ceq_index;
- struct nes_hw_cqp cqp;
- struct nes_hw_cq ccq;
- struct list_head cqp_avail_reqs;
- struct list_head cqp_pending_reqs;
- struct nes_cqp_request *nes_cqp_requests;
-
- u32 int_req;
- u32 int_stat;
- u32 timer_int_req;
- u32 timer_only_int_count;
- u32 intf_int_req;
- u32 last_mac_tx_pauses;
- u32 last_used_chunks_tx;
- struct list_head list;
-
- u16 base_doorbell_index;
- u16 currcq_count;
- u16 deepcq_count;
- u8 iw_status;
- u8 msi_enabled;
- u8 netdev_count;
- u8 napi_isr_ran;
- u8 disable_rx_flow_control;
- u8 disable_tx_flow_control;
-
- struct delayed_work work;
- u8 link_recheck;
-};
-
-/* Receive skb private area - must fit in skb->cb area */
-struct nes_rskb_cb {
- u64 busaddr;
- u32 maplen;
- u32 seqnum;
- u8 *data_start;
- struct nes_qp *nesqp;
-};
-
-static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad)
-{
- u32 crc_value;
- crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
-
- /*
- * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc
- * state in cpu order"), behavior of crc32c changes on
- * big-endian platforms. Our algorithm expects the previous
- * behavior; otherwise we have RDMA connection establishment
- * issue on big-endian.
- */
- return cpu_to_le32(crc_value);
-}
-
-static inline void
-set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
-{
- wqe_words[index] = cpu_to_le32((u32) value);
- wqe_words[index + 1] = cpu_to_le32(upper_32_bits(value));
-}
-
-static inline void
-set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value)
-{
- wqe_words[index] = cpu_to_le32(value);
-}
-
-static inline void
-nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev)
-{
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_LOW_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_HIGH_IDX] = 0;
-}
-
-static inline void
-nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head)
-{
- u32 value;
- value = ((u32)((unsigned long) nesqp)) | head;
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX,
- (u32)(upper_32_bits((unsigned long)(nesqp))));
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value);
-}
-
-/* Read from memory-mapped device */
-static inline u32 nes_read_indexed(struct nes_device *nesdev, u32 reg_index)
-{
- unsigned long flags;
- void __iomem *addr = nesdev->index_reg;
- u32 value;
-
- spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
-
- writel(reg_index, addr);
- value = readl((void __iomem *)addr + 4);
-
- spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
- return value;
-}
-
-static inline u32 nes_read32(const void __iomem *addr)
-{
- return readl(addr);
-}
-
-static inline u16 nes_read16(const void __iomem *addr)
-{
- return readw(addr);
-}
-
-static inline u8 nes_read8(const void __iomem *addr)
-{
- return readb(addr);
-}
-
-/* Write to memory-mapped device */
-static inline void nes_write_indexed(struct nes_device *nesdev, u32 reg_index, u32 val)
-{
- unsigned long flags;
- void __iomem *addr = nesdev->index_reg;
-
- spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
-
- writel(reg_index, addr);
- writel(val, (void __iomem *)addr + 4);
-
- spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
-}
-
-static inline void nes_write32(void __iomem *addr, u32 val)
-{
- writel(val, addr);
-}
-
-static inline void nes_write16(void __iomem *addr, u16 val)
-{
- writew(val, addr);
-}
-
-static inline void nes_write8(void __iomem *addr, u8 val)
-{
- writeb(val, addr);
-}
-
-enum nes_resource {
- NES_RESOURCE_MW = 1,
- NES_RESOURCE_FAST_MR,
- NES_RESOURCE_PHYS_MR,
- NES_RESOURCE_USER_MR,
- NES_RESOURCE_PD,
- NES_RESOURCE_QP,
- NES_RESOURCE_CQ,
- NES_RESOURCE_ARP
-};
-
-static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
- unsigned long *resource_array, u32 max_resources,
- u32 *req_resource_num, u32 *next, enum nes_resource resource_type)
-{
- unsigned long flags;
- u32 resource_num;
-
- spin_lock_irqsave(&nesadapter->resource_lock, flags);
-
- resource_num = find_next_zero_bit(resource_array, max_resources, *next);
- if (resource_num >= max_resources) {
- resource_num = find_first_zero_bit(resource_array, max_resources);
- if (resource_num >= max_resources) {
- printk(KERN_ERR PFX "%s: No available resources [type=%u].\n", __func__, resource_type);
- spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
- return -EMFILE;
- }
- }
- set_bit(resource_num, resource_array);
- *next = resource_num+1;
- if (*next == max_resources) {
- *next = 0;
- }
- spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
- *req_resource_num = resource_num;
-
- return 0;
-}
-
-static inline int nes_is_resource_allocated(struct nes_adapter *nesadapter,
- unsigned long *resource_array, u32 resource_num)
-{
- unsigned long flags;
- int bit_is_set;
-
- spin_lock_irqsave(&nesadapter->resource_lock, flags);
-
- bit_is_set = test_bit(resource_num, resource_array);
- nes_debug(NES_DBG_HW, "resource_num %u is%s allocated.\n",
- resource_num, (bit_is_set ? "": " not"));
- spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
-
- return bit_is_set;
-}
-
-static inline void nes_free_resource(struct nes_adapter *nesadapter,
- unsigned long *resource_array, u32 resource_num)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&nesadapter->resource_lock, flags);
- clear_bit(resource_num, resource_array);
- spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
-}
-
-static inline struct nes_vnic *to_nesvnic(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct nes_ib_device, ibdev)->nesvnic;
-}
-
-static inline struct nes_pd *to_nespd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct nes_pd, ibpd);
-}
-
-static inline struct nes_ucontext *to_nesucontext(struct ib_ucontext *ibucontext)
-{
- return container_of(ibucontext, struct nes_ucontext, ibucontext);
-}
-
-static inline struct nes_mr *to_nesmr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct nes_mr, ibmr);
-}
-
-static inline struct nes_mr *to_nesmr_from_ibfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct nes_mr, ibfmr);
-}
-
-static inline struct nes_mr *to_nesmw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct nes_mr, ibmw);
-}
-
-static inline struct nes_fmr *to_nesfmr(struct nes_mr *nesmr)
-{
- return container_of(nesmr, struct nes_fmr, nesmr);
-}
-
-static inline struct nes_cq *to_nescq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct nes_cq, ibcq);
-}
-
-static inline struct nes_qp *to_nesqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct nes_qp, ibqp);
-}
-
-
-
-/* nes.c */
-void nes_add_ref(struct ib_qp *);
-void nes_rem_ref(struct ib_qp *);
-struct ib_qp *nes_get_qp(struct ib_device *, int);
-
-
-/* nes_hw.c */
-struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
-void nes_nic_init_timer_defaults(struct nes_device *, u8);
-void nes_destroy_adapter(struct nes_adapter *);
-int nes_init_cqp(struct nes_device *);
-int nes_init_phy(struct nes_device *);
-int nes_init_nic_qp(struct nes_device *, struct net_device *);
-void nes_destroy_nic_qp(struct nes_vnic *);
-int nes_napi_isr(struct nes_device *);
-void nes_dpc(unsigned long);
-void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
-void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
-int nes_destroy_cqp(struct nes_device *);
-int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
-void nes_recheck_link_status(struct work_struct *work);
-void nes_terminate_timeout(unsigned long context);
-
-/* nes_nic.c */
-struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
-void nes_netdev_destroy(struct net_device *);
-int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
-
-/* nes_cm.c */
-void *nes_cm_create(struct net_device *);
-int nes_cm_recv(struct sk_buff *, struct net_device *);
-void nes_update_arp(unsigned char *, u32, u32, u16, u16);
-void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
-void nes_sock_release(struct nes_qp *, unsigned long *);
-void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
-int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
-int nes_cm_disconn(struct nes_qp *);
-void nes_cm_disconn_worker(void *);
-
-/* nes_verbs.c */
-int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
-int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
-struct nes_ib_device *nes_init_ofa_device(struct net_device *);
-void nes_port_ibevent(struct nes_vnic *nesvnic);
-void nes_destroy_ofa_device(struct nes_ib_device *);
-int nes_register_ofa_device(struct nes_ib_device *);
-
-/* nes_util.c */
-int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
-void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16);
-void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
-void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16, u16);
-void nes_read_10G_phy_reg(struct nes_device *, u8, u8, u16);
-struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
-void nes_free_cqp_request(struct nes_device *nesdev,
- struct nes_cqp_request *cqp_request);
-void nes_put_cqp_request(struct nes_device *nesdev,
- struct nes_cqp_request *cqp_request);
-void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *);
-int nes_arp_table(struct nes_device *, u32, u8 *, u32);
-void nes_mh_fix(unsigned long);
-void nes_clc(unsigned long);
-void nes_dump_mem(unsigned int, void *, int);
-u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
-
-#endif /* __NES_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
deleted file mode 100644
index 8a3ad170d790..000000000000
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ /dev/null
@@ -1,4192 +0,0 @@
-/*
- * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-
-#define TCPOPT_TIMESTAMP 8
-
-#include <linux/atomic.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/if_arp.h>
-#include <linux/if_vlan.h>
-#include <linux/notifier.h>
-#include <linux/net.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/netdevice.h>
-#include <linux/random.h>
-#include <linux/list.h>
-#include <linux/threads.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-#include <net/neighbour.h>
-#include <net/route.h>
-#include <net/ip_fib.h>
-#include <net/tcp.h>
-#include <linux/fcntl.h>
-
-#include "nes.h"
-
-u32 cm_packets_sent;
-u32 cm_packets_bounced;
-u32 cm_packets_dropped;
-u32 cm_packets_retrans;
-u32 cm_packets_created;
-u32 cm_packets_received;
-atomic_t cm_listens_created;
-atomic_t cm_listens_destroyed;
-u32 cm_backlog_drops;
-atomic_t cm_loopbacks;
-atomic_t cm_nodes_created;
-atomic_t cm_nodes_destroyed;
-atomic_t cm_accel_dropped_pkts;
-atomic_t cm_resets_recvd;
-
-static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
-static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
-static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
-static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, struct nes_vnic *, u16, void *, struct nes_cm_info *);
-static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
-static int mini_cm_accept(struct nes_cm_core *, struct nes_cm_node *);
-static int mini_cm_reject(struct nes_cm_core *, struct nes_cm_node *);
-static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
-static int mini_cm_dealloc_core(struct nes_cm_core *);
-static int mini_cm_get(struct nes_cm_core *);
-static int mini_cm_set(struct nes_cm_core *, u32, u32);
-
-static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, void *, u32, void *, u32, u8);
-static int add_ref_cm_node(struct nes_cm_node *);
-static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
-
-static int nes_cm_disconn_true(struct nes_qp *);
-static int nes_cm_post_event(struct nes_cm_event *event);
-static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
-static void nes_disconnect_worker(struct work_struct *work);
-
-static int send_mpa_request(struct nes_cm_node *, struct sk_buff *);
-static int send_mpa_reject(struct nes_cm_node *);
-static int send_syn(struct nes_cm_node *, u32, struct sk_buff *);
-static int send_reset(struct nes_cm_node *, struct sk_buff *);
-static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb);
-static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
-static void process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
-
-static void active_open_err(struct nes_cm_node *, struct sk_buff *, int);
-static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int);
-static void cleanup_retrans_entry(struct nes_cm_node *);
-static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *);
-static void free_retrans_entry(struct nes_cm_node *cm_node);
-static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb, int optionsize, int passive);
-
-/* CM event handler functions */
-static void cm_event_connected(struct nes_cm_event *);
-static void cm_event_connect_error(struct nes_cm_event *);
-static void cm_event_reset(struct nes_cm_event *);
-static void cm_event_mpa_req(struct nes_cm_event *);
-static void cm_event_mpa_reject(struct nes_cm_event *);
-static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node);
-
-/* MPA build functions */
-static int cm_build_mpa_frame(struct nes_cm_node *, u8 **, u16 *, u8 *, u8);
-static void build_mpa_v2(struct nes_cm_node *, void *, u8);
-static void build_mpa_v1(struct nes_cm_node *, void *, u8);
-static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **);
-
-static void print_core(struct nes_cm_core *core);
-static void record_ird_ord(struct nes_cm_node *, u16, u16);
-
-/* External CM API Interface */
-/* instance of function pointers for client API */
-/* set address of this instance to cm_core->cm_ops at cm_core alloc */
-static struct nes_cm_ops nes_cm_api = {
- mini_cm_accelerated,
- mini_cm_listen,
- mini_cm_del_listen,
- mini_cm_connect,
- mini_cm_close,
- mini_cm_accept,
- mini_cm_reject,
- mini_cm_recv_pkt,
- mini_cm_dealloc_core,
- mini_cm_get,
- mini_cm_set
-};
-
-static struct nes_cm_core *g_cm_core;
-
-atomic_t cm_connects;
-atomic_t cm_accepts;
-atomic_t cm_disconnects;
-atomic_t cm_closes;
-atomic_t cm_connecteds;
-atomic_t cm_connect_reqs;
-atomic_t cm_rejects;
-
-int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
-{
- return add_ref_cm_node(cm_node);
-}
-
-int nes_rem_ref_cm_node(struct nes_cm_node *cm_node)
-{
- return rem_ref_cm_node(cm_node->cm_core, cm_node);
-}
-/**
- * create_event
- */
-static struct nes_cm_event *create_event(struct nes_cm_node * cm_node,
- enum nes_cm_event_type type)
-{
- struct nes_cm_event *event;
-
- if (!cm_node->cm_id)
- return NULL;
-
- /* allocate an empty event */
- event = kzalloc(sizeof(*event), GFP_ATOMIC);
-
- if (!event)
- return NULL;
-
- event->type = type;
- event->cm_node = cm_node;
- event->cm_info.rem_addr = cm_node->rem_addr;
- event->cm_info.loc_addr = cm_node->loc_addr;
- event->cm_info.rem_port = cm_node->rem_port;
- event->cm_info.loc_port = cm_node->loc_port;
- event->cm_info.cm_id = cm_node->cm_id;
-
- nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, "
- "dst_addr=%08x[%x], src_addr=%08x[%x]\n",
- cm_node, event, type, event->cm_info.loc_addr,
- event->cm_info.loc_port, event->cm_info.rem_addr,
- event->cm_info.rem_port);
-
- nes_cm_post_event(event);
- return event;
-}
-
-
-/**
- * send_mpa_request
- */
-static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
-{
- u8 start_addr = 0;
- u8 *start_ptr = &start_addr;
- u8 **start_buff = &start_ptr;
- u16 buff_len = 0;
-
- if (!skb) {
- nes_debug(NES_DBG_CM, "skb set to NULL\n");
- return -1;
- }
-
- /* send an MPA Request frame */
- cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REQUEST);
- form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK);
-
- return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
-}
-
-
-
-static int send_mpa_reject(struct nes_cm_node *cm_node)
-{
- struct sk_buff *skb = NULL;
- u8 start_addr = 0;
- u8 *start_ptr = &start_addr;
- u8 **start_buff = &start_ptr;
- u16 buff_len = 0;
- struct ietf_mpa_v1 *mpa_frame;
-
- skb = dev_alloc_skb(MAX_CM_BUFFER);
- if (!skb) {
- nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
- return -ENOMEM;
- }
-
- /* send an MPA reject frame */
- cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY);
- mpa_frame = (struct ietf_mpa_v1 *)*start_buff;
- mpa_frame->flags |= IETF_MPA_FLAGS_REJECT;
- form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN);
-
- cm_node->state = NES_CM_STATE_FIN_WAIT1;
- return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
-}
-
-
-/**
- * recv_mpa - process a received TCP pkt, we are expecting an
- * IETF MPA frame
- */
-static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
- u32 len)
-{
- struct ietf_mpa_v1 *mpa_frame;
- struct ietf_mpa_v2 *mpa_v2_frame;
- struct ietf_rtr_msg *rtr_msg;
- int mpa_hdr_len;
- int priv_data_len;
-
- *type = NES_MPA_REQUEST_ACCEPT;
-
- /* assume req frame is in tcp data payload */
- if (len < sizeof(struct ietf_mpa_v1)) {
- nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
- return -EINVAL;
- }
-
- /* points to the beginning of the frame, which could be MPA V1 or V2 */
- mpa_frame = (struct ietf_mpa_v1 *)buffer;
- mpa_hdr_len = sizeof(struct ietf_mpa_v1);
- priv_data_len = ntohs(mpa_frame->priv_data_len);
-
- /* make sure mpa private data len is less than 512 bytes */
- if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
- nes_debug(NES_DBG_CM, "The received Length of Private"
- " Data field exceeds 512 octets\n");
- return -EINVAL;
- }
- /*
- * make sure MPA receiver interoperate with the
- * received MPA version and MPA key information
- *
- */
- if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
- nes_debug(NES_DBG_CM, "The received mpa version"
- " is not supported\n");
- return -EINVAL;
- }
- /*
- * backwards compatibility only
- */
- if (mpa_frame->rev > cm_node->mpa_frame_rev) {
- nes_debug(NES_DBG_CM, "The received mpa version"
- " can not be interoperated\n");
- return -EINVAL;
- } else {
- cm_node->mpa_frame_rev = mpa_frame->rev;
- }
-
- if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
- if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
- nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
- return -EINVAL;
- }
- } else {
- if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
- nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
- return -EINVAL;
- }
- }
-
- if (priv_data_len + mpa_hdr_len != len) {
- nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
- " complete (%x + %x != %x)\n",
- priv_data_len, mpa_hdr_len, len);
- return -EINVAL;
- }
- /* make sure it does not exceed the max size */
- if (len > MAX_CM_BUFFER) {
- nes_debug(NES_DBG_CM, "The received ietf buffer was too large"
- " (%x + %x != %x)\n",
- priv_data_len, mpa_hdr_len, len);
- return -EINVAL;
- }
-
- cm_node->mpa_frame_size = priv_data_len;
-
- switch (mpa_frame->rev) {
- case IETF_MPA_V2: {
- u16 ird_size;
- u16 ord_size;
- u16 rtr_ctrl_ird;
- u16 rtr_ctrl_ord;
-
- mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
- mpa_hdr_len += IETF_RTR_MSG_SIZE;
- cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE;
- rtr_msg = &mpa_v2_frame->rtr_msg;
-
- /* parse rtr message */
- rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird);
- rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord);
- ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD;
- ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD;
-
- if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) {
- /* send reset */
- return -EINVAL;
- }
- if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD)
- cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
-
- if (cm_node->mpav2_ird_ord != IETF_NO_IRD_ORD) {
- /* responder */
- if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
- /* we are still negotiating */
- if (ord_size > NES_MAX_IRD) {
- cm_node->ird_size = NES_MAX_IRD;
- } else {
- cm_node->ird_size = ord_size;
- if (ord_size == 0 &&
- (rtr_ctrl_ord & IETF_RDMA0_READ)) {
- cm_node->ird_size = 1;
- nes_debug(NES_DBG_CM,
- "%s: Remote peer doesn't support RDMA0_READ (ord=%u)\n",
- __func__, ord_size);
- }
- }
- if (ird_size > NES_MAX_ORD)
- cm_node->ord_size = NES_MAX_ORD;
- else
- cm_node->ord_size = ird_size;
- } else { /* initiator */
- if (ord_size > NES_MAX_IRD) {
- nes_debug(NES_DBG_CM,
- "%s: Unable to support the requested (ord =%u)\n",
- __func__, ord_size);
- return -EINVAL;
- }
- cm_node->ird_size = ord_size;
-
- if (ird_size > NES_MAX_ORD) {
- cm_node->ord_size = NES_MAX_ORD;
- } else {
- if (ird_size == 0 &&
- (rtr_ctrl_ord & IETF_RDMA0_READ)) {
- nes_debug(NES_DBG_CM,
- "%s: Remote peer doesn't support RDMA0_READ (ird=%u)\n",
- __func__, ird_size);
- return -EINVAL;
- } else {
- cm_node->ord_size = ird_size;
- }
- }
- }
- }
-
- if (rtr_ctrl_ord & IETF_RDMA0_READ) {
- cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
-
- } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) {
- cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
- } else { /* Not supported RDMA0 operation */
- return -EINVAL;
- }
- break;
- }
- case IETF_MPA_V1:
- default:
- break;
- }
-
- /* copy entire MPA frame to our cm_node's frame */
- memcpy(cm_node->mpa_frame_buf, buffer + mpa_hdr_len, cm_node->mpa_frame_size);
-
- if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
- *type = NES_MPA_REQUEST_REJECT;
- return 0;
-}
-
-
-/**
- * form_cm_frame - get a free packet and build empty frame Use
- * node info to build.
- */
-static void form_cm_frame(struct sk_buff *skb,
- struct nes_cm_node *cm_node, void *options, u32 optionsize,
- void *data, u32 datasize, u8 flags)
-{
- struct tcphdr *tcph;
- struct iphdr *iph;
- struct ethhdr *ethh;
- u8 *buf;
- u16 packetsize = sizeof(*iph);
-
- packetsize += sizeof(*tcph);
- packetsize += optionsize + datasize;
-
- skb_trim(skb, 0);
- memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph));
-
- buf = skb_put(skb, packetsize + ETH_HLEN);
-
- ethh = (struct ethhdr *)buf;
- buf += ETH_HLEN;
-
- iph = (struct iphdr *)buf;
- buf += sizeof(*iph);
- tcph = (struct tcphdr *)buf;
- skb_reset_mac_header(skb);
- skb_set_network_header(skb, ETH_HLEN);
- skb_set_transport_header(skb, ETH_HLEN + sizeof(*iph));
- buf += sizeof(*tcph);
-
- skb->ip_summed = CHECKSUM_PARTIAL;
- if (!(cm_node->netdev->features & NETIF_F_IP_CSUM))
- skb->ip_summed = CHECKSUM_NONE;
- skb->protocol = htons(0x800);
- skb->data_len = 0;
- skb->mac_len = ETH_HLEN;
-
- memcpy(ethh->h_dest, cm_node->rem_mac, ETH_ALEN);
- memcpy(ethh->h_source, cm_node->loc_mac, ETH_ALEN);
- ethh->h_proto = htons(0x0800);
-
- iph->version = IPVERSION;
- iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
- iph->tos = 0;
- iph->tot_len = htons(packetsize);
- iph->id = htons(++cm_node->tcp_cntxt.loc_id);
-
- iph->frag_off = htons(0x4000);
- iph->ttl = 0x40;
- iph->protocol = 0x06; /* IPPROTO_TCP */
-
- iph->saddr = htonl(cm_node->mapped_loc_addr);
- iph->daddr = htonl(cm_node->mapped_rem_addr);
-
- tcph->source = htons(cm_node->mapped_loc_port);
- tcph->dest = htons(cm_node->mapped_rem_port);
- tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
-
- if (flags & SET_ACK) {
- cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
- tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
- tcph->ack = 1;
- } else {
- tcph->ack_seq = 0;
- }
-
- if (flags & SET_SYN) {
- cm_node->tcp_cntxt.loc_seq_num++;
- tcph->syn = 1;
- } else {
- cm_node->tcp_cntxt.loc_seq_num += datasize;
- }
-
- if (flags & SET_FIN) {
- cm_node->tcp_cntxt.loc_seq_num++;
- tcph->fin = 1;
- }
-
- if (flags & SET_RST)
- tcph->rst = 1;
-
- tcph->doff = (u16)((sizeof(*tcph) + optionsize + 3) >> 2);
- tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
- tcph->urg_ptr = 0;
- if (optionsize)
- memcpy(buf, options, optionsize);
- buf += optionsize;
- if (datasize)
- memcpy(buf, data, datasize);
-
- skb_shinfo(skb)->nr_frags = 0;
- cm_packets_created++;
-}
-
-/*
- * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct
- */
-static void nes_create_sockaddr(__be32 ip_addr, __be16 port,
- struct sockaddr_storage *addr)
-{
- struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr;
- nes_sockaddr->sin_family = AF_INET;
- memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32));
- nes_sockaddr->sin_port = port;
-}
-
-/*
- * nes_create_mapinfo - Create a mapinfo object in the port mapper data base
- */
-static int nes_create_mapinfo(struct nes_cm_info *cm_info)
-{
- struct sockaddr_storage local_sockaddr;
- struct sockaddr_storage mapped_sockaddr;
-
- nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
- &local_sockaddr);
- nes_create_sockaddr(htonl(cm_info->mapped_loc_addr),
- htons(cm_info->mapped_loc_port), &mapped_sockaddr);
-
- return iwpm_create_mapinfo(&local_sockaddr,
- &mapped_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base
- * and send a remove mapping op message to
- * the userspace port mapper
- */
-static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port,
- u32 mapped_loc_addr, u16 mapped_loc_port)
-{
- struct sockaddr_storage local_sockaddr;
- struct sockaddr_storage mapped_sockaddr;
-
- nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr);
- nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port),
- &mapped_sockaddr);
-
- iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr);
- return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_form_pm_msg - Form a port mapper message with mapping info
- */
-static void nes_form_pm_msg(struct nes_cm_info *cm_info,
- struct iwpm_sa_data *pm_msg)
-{
- nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
- &pm_msg->loc_addr);
- nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port),
- &pm_msg->rem_addr);
-}
-
-/*
- * nes_form_reg_msg - Form a port mapper message with dev info
- */
-static void nes_form_reg_msg(struct nes_vnic *nesvnic,
- struct iwpm_dev_data *pm_msg)
-{
- memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name,
- IWPM_DEVNAME_SIZE);
- memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
-}
-
-static void record_sockaddr_info(struct sockaddr_storage *addr_info,
- nes_addr_t *ip_addr, u16 *port_num)
-{
- struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
-
- if (in_addr->sin_family == AF_INET) {
- *ip_addr = ntohl(in_addr->sin_addr.s_addr);
- *port_num = ntohs(in_addr->sin_port);
- }
-}
-
-/*
- * nes_record_pm_msg - Save the received mapping info
- */
-static void nes_record_pm_msg(struct nes_cm_info *cm_info,
- struct iwpm_sa_data *pm_msg)
-{
- record_sockaddr_info(&pm_msg->mapped_loc_addr,
- &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
-
- record_sockaddr_info(&pm_msg->mapped_rem_addr,
- &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
-}
-
-/*
- * nes_get_reminfo - Get the address info of the remote connecting peer
- */
-static int nes_get_remote_addr(struct nes_cm_node *cm_node)
-{
- struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
- struct sockaddr_storage remote_addr;
- int ret;
-
- nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
- htons(cm_node->mapped_loc_port), &mapped_loc_addr);
- nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
- htons(cm_node->mapped_rem_port), &mapped_rem_addr);
-
- ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
- &remote_addr, RDMA_NL_NES);
- if (ret)
- nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
- else
- record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
- &cm_node->rem_port);
- return ret;
-}
-
-/**
- * print_core - dump a cm core
- */
-static void print_core(struct nes_cm_core *core)
-{
- nes_debug(NES_DBG_CM, "---------------------------------------------\n");
- nes_debug(NES_DBG_CM, "CM Core -- (core = %p )\n", core);
- if (!core)
- return;
- nes_debug(NES_DBG_CM, "---------------------------------------------\n");
-
- nes_debug(NES_DBG_CM, "State : %u \n", core->state);
-
- nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt));
- nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt));
-
- nes_debug(NES_DBG_CM, "core : %p \n", core);
-
- nes_debug(NES_DBG_CM, "-------------- end core ---------------\n");
-}
-
-static void record_ird_ord(struct nes_cm_node *cm_node,
- u16 conn_ird, u16 conn_ord)
-{
- if (conn_ird > NES_MAX_IRD)
- conn_ird = NES_MAX_IRD;
-
- if (conn_ord > NES_MAX_ORD)
- conn_ord = NES_MAX_ORD;
-
- cm_node->ird_size = conn_ird;
- cm_node->ord_size = conn_ord;
-}
-
-/**
- * cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame
- */
-static int cm_build_mpa_frame(struct nes_cm_node *cm_node, u8 **start_buff,
- u16 *buff_len, u8 *pci_mem, u8 mpa_key)
-{
- int ret = 0;
-
- *start_buff = (pci_mem) ? pci_mem : &cm_node->mpa_frame_buf[0];
-
- switch (cm_node->mpa_frame_rev) {
- case IETF_MPA_V1:
- *start_buff = (u8 *)*start_buff + sizeof(struct ietf_rtr_msg);
- *buff_len = sizeof(struct ietf_mpa_v1) + cm_node->mpa_frame_size;
- build_mpa_v1(cm_node, *start_buff, mpa_key);
- break;
- case IETF_MPA_V2:
- *buff_len = sizeof(struct ietf_mpa_v2) + cm_node->mpa_frame_size;
- build_mpa_v2(cm_node, *start_buff, mpa_key);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-/**
- * build_mpa_v2 - build a MPA V2 frame
- */
-static void build_mpa_v2(struct nes_cm_node *cm_node,
- void *start_addr, u8 mpa_key)
-{
- struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
- struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
- u16 ctrl_ird;
- u16 ctrl_ord;
-
- /* initialize the upper 5 bytes of the frame */
- build_mpa_v1(cm_node, start_addr, mpa_key);
- mpa_frame->flags |= IETF_MPA_V2_FLAG; /* set a bit to indicate MPA V2 */
- mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
-
- /* initialize RTR msg */
- if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
- ctrl_ird = IETF_NO_IRD_ORD;
- ctrl_ord = IETF_NO_IRD_ORD;
- } else {
- ctrl_ird = cm_node->ird_size & IETF_NO_IRD_ORD;
- ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
- }
- ctrl_ird |= IETF_PEER_TO_PEER;
- ctrl_ird |= IETF_FLPDU_ZERO_LEN;
-
- switch (mpa_key) {
- case MPA_KEY_REQUEST:
- ctrl_ord |= IETF_RDMA0_WRITE;
- ctrl_ord |= IETF_RDMA0_READ;
- break;
- case MPA_KEY_REPLY:
- switch (cm_node->send_rdma0_op) {
- case SEND_RDMA_WRITE_ZERO:
- ctrl_ord |= IETF_RDMA0_WRITE;
- break;
- case SEND_RDMA_READ_ZERO:
- ctrl_ord |= IETF_RDMA0_READ;
- break;
- }
- }
- rtr_msg->ctrl_ird = htons(ctrl_ird);
- rtr_msg->ctrl_ord = htons(ctrl_ord);
-}
-
-/**
- * build_mpa_v1 - build a MPA V1 frame
- */
-static void build_mpa_v1(struct nes_cm_node *cm_node, void *start_addr, u8 mpa_key)
-{
- struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
-
- switch (mpa_key) {
- case MPA_KEY_REQUEST:
- memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
- break;
- case MPA_KEY_REPLY:
- memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
- break;
- }
- mpa_frame->flags = IETF_MPA_FLAGS_CRC;
- mpa_frame->rev = cm_node->mpa_frame_rev;
- mpa_frame->priv_data_len = htons(cm_node->mpa_frame_size);
-}
-
-static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_addr)
-{
- u64 u64temp;
- struct nes_qp *nesqp = *nesqp_addr;
- struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0];
-
- u64temp = (unsigned long)nesqp->nesuqp_addr;
- u64temp |= NES_SW_CONTEXT_ALIGN >> 1;
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp);
-
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
-
- switch (cm_node->send_rdma0_op) {
- case SEND_RDMA_WRITE_ZERO:
- nes_debug(NES_DBG_CM, "Sending first write.\n");
- wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
- cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
- break;
-
- case SEND_RDMA_READ_ZERO:
- default:
- if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO)
- WARN(1, "Unsupported RDMA0 len operation=%u\n",
- cm_node->send_rdma0_op);
- nes_debug(NES_DBG_CM, "Sending first rdma operation.\n");
- wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
- cpu_to_le32(NES_IWARP_SQ_OP_RDMAR);
- wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = 1;
- wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_STAG_IDX] = 1;
- wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 1;
- break;
- }
-
- if (nesqp->sq_kmapped) {
- nesqp->sq_kmapped = 0;
- kunmap(nesqp->page);
- }
-
- /*use the reserved spot on the WQ for the extra first WQE*/
- nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
- NES_QPCONTEXT_ORDIRD_WRPDU |
- NES_QPCONTEXT_ORDIRD_ALSMM));
- nesqp->skip_lsmm = 1;
- nesqp->hwqp.sq_tail = 0;
-}
-
-/**
- * schedule_nes_timer
- * note - cm_node needs to be protected before calling this. Encase in:
- * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node);
- */
-int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
- enum nes_timer_type type, int send_retrans,
- int close_when_complete)
-{
- unsigned long flags;
- struct nes_cm_core *cm_core = cm_node->cm_core;
- struct nes_timer_entry *new_send;
- int ret = 0;
-
- new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
- if (!new_send)
- return -ENOMEM;
-
- /* new_send->timetosend = currenttime */
- new_send->retrycount = NES_DEFAULT_RETRYS;
- new_send->retranscount = NES_DEFAULT_RETRANS;
- new_send->skb = skb;
- new_send->timetosend = jiffies;
- new_send->type = type;
- new_send->netdev = cm_node->netdev;
- new_send->send_retrans = send_retrans;
- new_send->close_when_complete = close_when_complete;
-
- if (type == NES_TIMER_TYPE_CLOSE) {
- new_send->timetosend += (HZ / 10);
- if (cm_node->recv_entry) {
- kfree(new_send);
- WARN_ON(1);
- return -EINVAL;
- }
- cm_node->recv_entry = new_send;
- }
-
- if (type == NES_TIMER_TYPE_SEND) {
- new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
- atomic_inc(&new_send->skb->users);
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- cm_node->send_entry = new_send;
- add_ref_cm_node(cm_node);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
-
- ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
- if (ret != NETDEV_TX_OK) {
- nes_debug(NES_DBG_CM, "Error sending packet %p "
- "(jiffies = %lu)\n", new_send, jiffies);
- new_send->timetosend = jiffies;
- ret = NETDEV_TX_OK;
- } else {
- cm_packets_sent++;
- if (!send_retrans) {
- cleanup_retrans_entry(cm_node);
- if (close_when_complete)
- rem_ref_cm_node(cm_core, cm_node);
- return ret;
- }
- }
- }
-
- if (!timer_pending(&cm_core->tcp_timer))
- mod_timer(&cm_core->tcp_timer, new_send->timetosend);
-
- return ret;
-}
-
-static void nes_retrans_expired(struct nes_cm_node *cm_node)
-{
- struct iw_cm_id *cm_id = cm_node->cm_id;
- enum nes_cm_node_state state = cm_node->state;
- cm_node->state = NES_CM_STATE_CLOSED;
-
- switch (state) {
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_CLOSING:
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- break;
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_FIN_WAIT1:
- if (cm_node->cm_id)
- cm_id->rem_ref(cm_id);
- send_reset(cm_node, NULL);
- break;
- default:
- add_ref_cm_node(cm_node);
- send_reset(cm_node, NULL);
- create_event(cm_node, NES_CM_EVENT_ABORTED);
- }
-}
-
-static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
-{
- struct nes_timer_entry *recv_entry = cm_node->recv_entry;
- struct iw_cm_id *cm_id = cm_node->cm_id;
- struct nes_qp *nesqp;
- unsigned long qplockflags;
-
- if (!recv_entry)
- return;
- nesqp = (struct nes_qp *)recv_entry->skb;
- if (nesqp) {
- spin_lock_irqsave(&nesqp->lock, qplockflags);
- if (nesqp->cm_id) {
- nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
- "refcount = %d: HIT A "
- "NES_TIMER_TYPE_CLOSE with something "
- "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
- atomic_read(&nesqp->refcount));
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
- nesqp->ibqp_state = IB_QPS_ERR;
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_cm_disconn(nesqp);
- } else {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
- "refcount = %d: HIT A "
- "NES_TIMER_TYPE_CLOSE with nothing "
- "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
- atomic_read(&nesqp->refcount));
- }
- } else if (rem_node) {
- /* TIME_WAIT state */
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- }
- if (cm_node->cm_id)
- cm_id->rem_ref(cm_id);
- kfree(recv_entry);
- cm_node->recv_entry = NULL;
-}
-
-/**
- * nes_cm_timer_tick
- */
-static void nes_cm_timer_tick(unsigned long pass)
-{
- unsigned long flags;
- unsigned long nexttimeout = jiffies + NES_LONG_TIME;
- struct nes_cm_node *cm_node;
- struct nes_timer_entry *send_entry, *recv_entry;
- struct list_head *list_core_temp;
- struct list_head *list_node;
- struct nes_cm_core *cm_core = g_cm_core;
- u32 settimer = 0;
- unsigned long timetosend;
- int ret = NETDEV_TX_OK;
-
- struct list_head timer_list;
-
- INIT_LIST_HEAD(&timer_list);
- spin_lock_irqsave(&cm_core->ht_lock, flags);
-
- list_for_each_safe(list_node, list_core_temp,
- &cm_core->connected_nodes) {
- cm_node = container_of(list_node, struct nes_cm_node, list);
- if ((cm_node->recv_entry) || (cm_node->send_entry)) {
- add_ref_cm_node(cm_node);
- list_add(&cm_node->timer_entry, &timer_list);
- }
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- list_for_each_safe(list_node, list_core_temp, &timer_list) {
- cm_node = container_of(list_node, struct nes_cm_node,
- timer_entry);
- recv_entry = cm_node->recv_entry;
-
- if (recv_entry) {
- if (time_after(recv_entry->timetosend, jiffies)) {
- if (nexttimeout > recv_entry->timetosend ||
- !settimer) {
- nexttimeout = recv_entry->timetosend;
- settimer = 1;
- }
- } else {
- handle_recv_entry(cm_node, 1);
- }
- }
-
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- do {
- send_entry = cm_node->send_entry;
- if (!send_entry)
- break;
- if (time_after(send_entry->timetosend, jiffies)) {
- if (cm_node->state != NES_CM_STATE_TSA) {
- if ((nexttimeout >
- send_entry->timetosend) ||
- !settimer) {
- nexttimeout =
- send_entry->timetosend;
- settimer = 1;
- }
- } else {
- free_retrans_entry(cm_node);
- }
- break;
- }
-
- if ((cm_node->state == NES_CM_STATE_TSA) ||
- (cm_node->state == NES_CM_STATE_CLOSED)) {
- free_retrans_entry(cm_node);
- break;
- }
-
- if (!send_entry->retranscount ||
- !send_entry->retrycount) {
- cm_packets_dropped++;
- free_retrans_entry(cm_node);
-
- spin_unlock_irqrestore(
- &cm_node->retrans_list_lock, flags);
- nes_retrans_expired(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- spin_lock_irqsave(&cm_node->retrans_list_lock,
- flags);
- break;
- }
- atomic_inc(&send_entry->skb->users);
- cm_packets_retrans++;
- nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
- "for node %p, jiffies = %lu, time to send = "
- "%lu, retranscount = %u, send_entry->seq_num = "
- "0x%08X, cm_node->tcp_cntxt.rem_ack_num = "
- "0x%08X\n", send_entry, cm_node, jiffies,
- send_entry->timetosend,
- send_entry->retranscount,
- send_entry->seq_num,
- cm_node->tcp_cntxt.rem_ack_num);
-
- spin_unlock_irqrestore(&cm_node->retrans_list_lock,
- flags);
- ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- if (ret != NETDEV_TX_OK) {
- nes_debug(NES_DBG_CM, "rexmit failed for "
- "node=%p\n", cm_node);
- cm_packets_bounced++;
- send_entry->retrycount--;
- nexttimeout = jiffies + NES_SHORT_TIME;
- settimer = 1;
- break;
- } else {
- cm_packets_sent++;
- }
- nes_debug(NES_DBG_CM, "Packet Sent: retrans count = "
- "%u, retry count = %u.\n",
- send_entry->retranscount,
- send_entry->retrycount);
- if (send_entry->send_retrans) {
- send_entry->retranscount--;
- timetosend = (NES_RETRY_TIMEOUT <<
- (NES_DEFAULT_RETRANS - send_entry->retranscount));
-
- send_entry->timetosend = jiffies +
- min(timetosend, NES_MAX_TIMEOUT);
- if (nexttimeout > send_entry->timetosend ||
- !settimer) {
- nexttimeout = send_entry->timetosend;
- settimer = 1;
- }
- } else {
- int close_when_complete;
- close_when_complete =
- send_entry->close_when_complete;
- nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n",
- cm_node, cm_node->state);
- free_retrans_entry(cm_node);
- if (close_when_complete)
- rem_ref_cm_node(cm_node->cm_core,
- cm_node);
- }
- } while (0);
-
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- }
-
- if (settimer) {
- if (!timer_pending(&cm_core->tcp_timer))
- mod_timer(&cm_core->tcp_timer, nexttimeout);
- }
-}
-
-
-/**
- * send_syn
- */
-static int send_syn(struct nes_cm_node *cm_node, u32 sendack,
- struct sk_buff *skb)
-{
- int ret;
- int flags = SET_SYN;
- char optionsbuffer[sizeof(struct option_mss) +
- sizeof(struct option_windowscale) + sizeof(struct option_base) +
- TCP_OPTIONS_PADDING];
-
- int optionssize = 0;
- /* Sending MSS option */
- union all_known_options *options;
-
- if (!cm_node)
- return -EINVAL;
-
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_mss.optionnum = OPTION_NUMBER_MSS;
- options->as_mss.length = sizeof(struct option_mss);
- options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
- optionssize += sizeof(struct option_mss);
-
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
- options->as_windowscale.length = sizeof(struct option_windowscale);
- options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
- optionssize += sizeof(struct option_windowscale);
-
- if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)) {
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_base.optionnum = OPTION_NUMBER_WRITE0;
- options->as_base.length = sizeof(struct option_base);
- optionssize += sizeof(struct option_base);
- /* we need the size to be a multiple of 4 */
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_end = 1;
- optionssize += 1;
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_end = 1;
- optionssize += 1;
- }
-
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_end = OPTION_NUMBER_END;
- optionssize += 1;
-
- if (!skb)
- skb = dev_alloc_skb(MAX_CM_BUFFER);
- if (!skb) {
- nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
- return -1;
- }
-
- if (sendack)
- flags |= SET_ACK;
-
- form_cm_frame(skb, cm_node, optionsbuffer, optionssize, NULL, 0, flags);
- ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
-
- return ret;
-}
-
-
-/**
- * send_reset
- */
-static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb)
-{
- int ret;
- int flags = SET_RST | SET_ACK;
-
- if (!skb)
- skb = dev_alloc_skb(MAX_CM_BUFFER);
- if (!skb) {
- nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
- return -ENOMEM;
- }
-
- form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
- ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1);
-
- return ret;
-}
-
-
-/**
- * send_ack
- */
-static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb)
-{
- int ret;
-
- if (!skb)
- skb = dev_alloc_skb(MAX_CM_BUFFER);
-
- if (!skb) {
- nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
- return -1;
- }
-
- form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK);
- ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 0);
-
- return ret;
-}
-
-
-/**
- * send_fin
- */
-static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
-{
- int ret;
-
- /* if we didn't get a frame get one */
- if (!skb)
- skb = dev_alloc_skb(MAX_CM_BUFFER);
-
- if (!skb) {
- nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
- return -1;
- }
-
- form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK | SET_FIN);
- ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
-
- return ret;
-}
-
-
-/**
- * find_node - find a cm node that matches the reference cm node
- */
-static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
- u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr)
-{
- unsigned long flags;
- struct list_head *hte;
- struct nes_cm_node *cm_node;
-
- /* get a handle on the hte */
- hte = &cm_core->connected_nodes;
-
- /* walk list and find cm_node associated with this session ID */
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_for_each_entry(cm_node, hte, list) {
- /* compare quad, return node handle if a match */
- nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
- cm_node->loc_addr, cm_node->loc_port,
- loc_addr, loc_port,
- cm_node->rem_addr, cm_node->rem_port,
- rem_addr, rem_port);
- if ((cm_node->mapped_loc_addr == loc_addr) &&
- (cm_node->mapped_loc_port == loc_port) &&
- (cm_node->mapped_rem_addr == rem_addr) &&
- (cm_node->mapped_rem_port == rem_port)) {
-
- add_ref_cm_node(cm_node);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- return cm_node;
- }
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- /* no owner node */
- return NULL;
-}
-
-
-/**
- * find_listener - find a cm node listening on this addr-port pair
- */
-static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
- nes_addr_t dst_addr, u16 dst_port,
- enum nes_cm_listener_state listener_state, int local)
-{
- unsigned long flags;
- struct nes_cm_listener *listen_node;
- nes_addr_t listen_addr;
- u16 listen_port;
-
- /* walk list and find cm_node associated with this session ID */
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
- if (local) {
- listen_addr = listen_node->loc_addr;
- listen_port = listen_node->loc_port;
- } else {
- listen_addr = listen_node->mapped_loc_addr;
- listen_port = listen_node->mapped_loc_port;
- }
- /* compare node pair, return node handle if a match */
- if (((listen_addr == dst_addr) ||
- listen_addr == 0x00000000) &&
- (listen_port == dst_port) &&
- (listener_state & listen_node->listener_state)) {
- atomic_inc(&listen_node->ref_count);
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- return listen_node;
- }
- }
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
-
- /* no listener */
- return NULL;
-}
-
-/**
- * add_hte_node - add a cm node to the hash table
- */
-static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
-{
- unsigned long flags;
- struct list_head *hte;
-
- if (!cm_node || !cm_core)
- return -EINVAL;
-
- nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n",
- cm_node);
-
- spin_lock_irqsave(&cm_core->ht_lock, flags);
-
- /* get a handle on the hash table element (list head for this slot) */
- hte = &cm_core->connected_nodes;
- list_add_tail(&cm_node->list, hte);
- atomic_inc(&cm_core->ht_node_cnt);
-
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- return 0;
-}
-
-
-/**
- * mini_cm_dec_refcnt_listen
- */
-static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
- struct nes_cm_listener *listener, int free_hanging_nodes)
-{
- int ret = -EINVAL;
- int err = 0;
- unsigned long flags;
- struct list_head *list_pos = NULL;
- struct list_head *list_temp = NULL;
- struct nes_cm_node *cm_node = NULL;
- struct list_head reset_list;
-
- nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, "
- "refcnt=%d\n", listener, free_hanging_nodes,
- atomic_read(&listener->ref_count));
- /* free non-accelerated child nodes for this listener */
- INIT_LIST_HEAD(&reset_list);
- if (free_hanging_nodes) {
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_for_each_safe(list_pos, list_temp,
- &g_cm_core->connected_nodes) {
- cm_node = container_of(list_pos, struct nes_cm_node,
- list);
- if ((cm_node->listener == listener) &&
- (!cm_node->accelerated)) {
- add_ref_cm_node(cm_node);
- list_add(&cm_node->reset_entry, &reset_list);
- }
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- }
-
- list_for_each_safe(list_pos, list_temp, &reset_list) {
- cm_node = container_of(list_pos, struct nes_cm_node,
- reset_entry);
- {
- struct nes_cm_node *loopback = cm_node->loopbackpartner;
- enum nes_cm_node_state old_state;
- if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- } else {
- if (!loopback) {
- cleanup_retrans_entry(cm_node);
- err = send_reset(cm_node, NULL);
- if (err) {
- cm_node->state =
- NES_CM_STATE_CLOSED;
- WARN_ON(1);
- } else {
- old_state = cm_node->state;
- cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
- if (old_state != NES_CM_STATE_MPAREQ_RCVD)
- rem_ref_cm_node(
- cm_node->cm_core,
- cm_node);
- }
- } else {
- struct nes_cm_event event;
-
- event.cm_node = loopback;
- event.cm_info.rem_addr =
- loopback->rem_addr;
- event.cm_info.loc_addr =
- loopback->loc_addr;
- event.cm_info.rem_port =
- loopback->rem_port;
- event.cm_info.loc_port =
- loopback->loc_port;
- event.cm_info.cm_id = loopback->cm_id;
- add_ref_cm_node(loopback);
- loopback->state = NES_CM_STATE_CLOSED;
- cm_event_connect_error(&event);
- cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
-
- rem_ref_cm_node(cm_node->cm_core,
- cm_node);
-
- }
- }
- }
- }
-
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- if (!atomic_dec_return(&listener->ref_count)) {
- list_del(&listener->list);
-
- /* decrement our listen node count */
- atomic_dec(&cm_core->listen_node_cnt);
-
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
-
- if (listener->nesvnic) {
- nes_manage_apbvt(listener->nesvnic,
- listener->mapped_loc_port,
- PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_DEL);
-
- nes_remove_mapinfo(listener->loc_addr,
- listener->loc_port,
- listener->mapped_loc_addr,
- listener->mapped_loc_port);
- nes_debug(NES_DBG_NLMSG,
- "Delete APBVT mapped_loc_port = %04X\n",
- listener->mapped_loc_port);
- }
-
- nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
-
- kfree(listener);
- listener = NULL;
- ret = 0;
- atomic_inc(&cm_listens_destroyed);
- } else {
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- }
- if (listener) {
- if (atomic_read(&listener->pend_accepts_cnt) > 0)
- nes_debug(NES_DBG_CM, "destroying listener (%p)"
- " with non-zero pending accepts=%u\n",
- listener, atomic_read(&listener->pend_accepts_cnt));
- }
-
- return ret;
-}
-
-
-/**
- * mini_cm_del_listen
- */
-static int mini_cm_del_listen(struct nes_cm_core *cm_core,
- struct nes_cm_listener *listener)
-{
- listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE;
- listener->cm_id = NULL; /* going to be destroyed pretty soon */
- return mini_cm_dec_refcnt_listen(cm_core, listener, 1);
-}
-
-
-/**
- * mini_cm_accelerated
- */
-static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
- struct nes_cm_node *cm_node)
-{
- cm_node->accelerated = 1;
-
- if (cm_node->accept_pend) {
- BUG_ON(!cm_node->listener);
- atomic_dec(&cm_node->listener->pend_accepts_cnt);
- cm_node->accept_pend = 0;
- BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
- }
-
- if (!timer_pending(&cm_core->tcp_timer))
- mod_timer(&cm_core->tcp_timer, (jiffies + NES_SHORT_TIME));
-
- return 0;
-}
-
-
-/**
- * nes_addr_resolve_neigh
- */
-static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
-{
- struct rtable *rt;
- struct neighbour *neigh;
- int rc = arpindex;
- struct net_device *netdev;
- struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
- __be32 dst_ipaddr = htonl(dst_ip);
-
- rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
- if (IS_ERR(rt)) {
- printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
- __func__, dst_ip);
- return rc;
- }
-
- if (netif_is_bond_slave(nesvnic->netdev))
- netdev = netdev_master_upper_dev_get(nesvnic->netdev);
- else
- netdev = nesvnic->netdev;
-
- neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
-
- rcu_read_lock();
- if (neigh) {
- if (neigh->nud_state & NUD_VALID) {
- nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
- " is %pM, Gateway is 0x%08X \n", dst_ip,
- neigh->ha, ntohl(rt->rt_gateway));
-
- if (arpindex >= 0) {
- if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
- /* Mac address same as in nes_arp_table */
- goto out;
- }
-
- nes_manage_arp_cache(nesvnic->netdev,
- nesadapter->arp_table[arpindex].mac_addr,
- dst_ip, NES_ARP_DELETE);
- }
-
- nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
- dst_ip, NES_ARP_ADD);
- rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
- NES_ARP_RESOLVE);
- } else {
- neigh_event_send(neigh, NULL);
- }
- }
-out:
- rcu_read_unlock();
-
- if (neigh)
- neigh_release(neigh);
-
- ip_rt_put(rt);
- return rc;
-}
-
-/**
- * make_cm_node - create a new instance of a cm node
- */
-static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct nes_cm_info *cm_info,
- struct nes_cm_listener *listener)
-{
- struct nes_cm_node *cm_node;
- struct timespec ts;
- int oldarpindex = 0;
- int arpindex = 0;
- struct nes_device *nesdev;
- struct nes_adapter *nesadapter;
-
- /* create an hte and cm_node for this instance */
- cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
- if (!cm_node)
- return NULL;
-
- /* set our node specific transport info */
- if (listener) {
- cm_node->loc_addr = listener->loc_addr;
- cm_node->loc_port = listener->loc_port;
- } else {
- cm_node->loc_addr = cm_info->loc_addr;
- cm_node->loc_port = cm_info->loc_port;
- }
- cm_node->rem_addr = cm_info->rem_addr;
- cm_node->rem_port = cm_info->rem_port;
-
- cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
- cm_node->mapped_rem_addr = cm_info->mapped_rem_addr;
- cm_node->mapped_loc_port = cm_info->mapped_loc_port;
- cm_node->mapped_rem_port = cm_info->mapped_rem_port;
-
- cm_node->mpa_frame_rev = mpa_version;
- cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
- cm_node->mpav2_ird_ord = 0;
- cm_node->ird_size = 0;
- cm_node->ord_size = 0;
-
- nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n",
- &cm_node->loc_addr, cm_node->loc_port,
- &cm_node->rem_addr, cm_node->rem_port);
- cm_node->listener = listener;
- if (listener)
- cm_node->tos = listener->tos;
- cm_node->netdev = nesvnic->netdev;
- cm_node->cm_id = cm_info->cm_id;
- memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
-
- nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener,
- cm_node->cm_id);
-
- spin_lock_init(&cm_node->retrans_list_lock);
-
- cm_node->loopbackpartner = NULL;
- atomic_set(&cm_node->ref_count, 1);
- /* associate our parent CM core */
- cm_node->cm_core = cm_core;
- cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID;
- cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
- cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
- NES_CM_DEFAULT_RCV_WND_SCALE;
- ts = current_kernel_time();
- cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
- cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
- sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN;
- cm_node->tcp_cntxt.rcv_nxt = 0;
- /* get a unique session ID , add thread_id to an upcounter to handle race */
- atomic_inc(&cm_core->node_cnt);
- cm_node->conn_type = cm_info->conn_type;
- cm_node->apbvt_set = 0;
- cm_node->accept_pend = 0;
-
- cm_node->nesvnic = nesvnic;
- /* get some device handles, for arp lookup */
- nesdev = nesvnic->nesdev;
- nesadapter = nesdev->nesadapter;
-
- cm_node->loopbackpartner = NULL;
-
- /* get the mac addr for the remote node */
- oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr,
- NULL, NES_ARP_RESOLVE);
- arpindex = nes_addr_resolve_neigh(nesvnic,
- cm_node->mapped_rem_addr, oldarpindex);
- if (arpindex < 0) {
- kfree(cm_node);
- return NULL;
- }
-
- /* copy the mac addr to node context */
- memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
- nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %pM\n",
- cm_node->rem_mac);
-
- add_hte_node(cm_core, cm_node);
- atomic_inc(&cm_nodes_created);
-
- return cm_node;
-}
-
-
-/**
- * add_ref_cm_node - destroy an instance of a cm node
- */
-static int add_ref_cm_node(struct nes_cm_node *cm_node)
-{
- atomic_inc(&cm_node->ref_count);
- return 0;
-}
-
-
-/**
- * rem_ref_cm_node - destroy an instance of a cm node
- */
-static int rem_ref_cm_node(struct nes_cm_core *cm_core,
- struct nes_cm_node *cm_node)
-{
- unsigned long flags;
- struct nes_qp *nesqp;
-
- if (!cm_node)
- return -EINVAL;
-
- spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
- if (atomic_dec_return(&cm_node->ref_count)) {
- spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
- return 0;
- }
- list_del(&cm_node->list);
- atomic_dec(&cm_core->ht_node_cnt);
- spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
-
- /* if the node is destroyed before connection was accelerated */
- if (!cm_node->accelerated && cm_node->accept_pend) {
- BUG_ON(!cm_node->listener);
- atomic_dec(&cm_node->listener->pend_accepts_cnt);
- BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
- }
- WARN_ON(cm_node->send_entry);
- if (cm_node->recv_entry)
- handle_recv_entry(cm_node, 0);
- if (cm_node->listener) {
- mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
- } else {
- if (cm_node->apbvt_set && cm_node->nesvnic) {
- nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port,
- PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_DEL);
- }
- nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n",
- cm_node->mapped_loc_port);
- nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port,
- cm_node->mapped_loc_addr, cm_node->mapped_loc_port);
- }
-
- atomic_dec(&cm_core->node_cnt);
- atomic_inc(&cm_nodes_destroyed);
- nesqp = cm_node->nesqp;
- if (nesqp) {
- nesqp->cm_node = NULL;
- nes_rem_ref(&nesqp->ibqp);
- cm_node->nesqp = NULL;
- }
-
- kfree(cm_node);
- return 0;
-}
-
-/**
- * process_options
- */
-static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
- u32 optionsize, u32 syn_packet)
-{
- u32 tmp;
- u32 offset = 0;
- union all_known_options *all_options;
- char got_mss_option = 0;
-
- while (offset < optionsize) {
- all_options = (union all_known_options *)(optionsloc + offset);
- switch (all_options->as_base.optionnum) {
- case OPTION_NUMBER_END:
- offset = optionsize;
- break;
- case OPTION_NUMBER_NONE:
- offset += 1;
- continue;
- case OPTION_NUMBER_MSS:
- nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d "
- "Size: %d\n", __func__,
- all_options->as_mss.length, offset, optionsize);
- got_mss_option = 1;
- if (all_options->as_mss.length != 4) {
- return 1;
- } else {
- tmp = ntohs(all_options->as_mss.mss);
- if (tmp > 0 && tmp <
- cm_node->tcp_cntxt.mss)
- cm_node->tcp_cntxt.mss = tmp;
- }
- break;
- case OPTION_NUMBER_WINDOW_SCALE:
- cm_node->tcp_cntxt.snd_wscale =
- all_options->as_windowscale.shiftcount;
- break;
- default:
- nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
- all_options->as_base.optionnum);
- break;
- }
- offset += all_options->as_base.length;
- }
- if ((!got_mss_option) && (syn_packet))
- cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
- return 0;
-}
-
-static void drop_packet(struct sk_buff *skb)
-{
- atomic_inc(&cm_accel_dropped_pkts);
- dev_kfree_skb_any(skb);
-}
-
-static void handle_fin_pkt(struct nes_cm_node *cm_node)
-{
- nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
- "refcnt=%d\n", cm_node, cm_node->state,
- atomic_read(&cm_node->ref_count));
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_MPAREJ_RCVD:
- cm_node->tcp_cntxt.rcv_nxt++;
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_LAST_ACK;
- send_fin(cm_node, NULL);
- break;
- case NES_CM_STATE_MPAREQ_SENT:
- create_event(cm_node, NES_CM_EVENT_ABORTED);
- cm_node->tcp_cntxt.rcv_nxt++;
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- add_ref_cm_node(cm_node);
- send_reset(cm_node, NULL);
- break;
- case NES_CM_STATE_FIN_WAIT1:
- cm_node->tcp_cntxt.rcv_nxt++;
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSING;
- send_ack(cm_node, NULL);
- /* Wait for ACK as this is simultaneous close..
- * After we receive ACK, do not send anything..
- * Just rm the node.. Done.. */
- break;
- case NES_CM_STATE_FIN_WAIT2:
- cm_node->tcp_cntxt.rcv_nxt++;
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_TIME_WAIT;
- send_ack(cm_node, NULL);
- schedule_nes_timer(cm_node, NULL, NES_TIMER_TYPE_CLOSE, 1, 0);
- break;
- case NES_CM_STATE_TIME_WAIT:
- cm_node->tcp_cntxt.rcv_nxt++;
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- break;
- case NES_CM_STATE_TSA:
- default:
- nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n",
- cm_node, cm_node->state);
- break;
- }
-}
-
-
-static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct tcphdr *tcph)
-{
-
- int reset = 0; /* whether to send reset in case of err.. */
- atomic_inc(&cm_resets_recvd);
- nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
- " refcnt=%d\n", cm_node, cm_node->state,
- atomic_read(&cm_node->ref_count));
- cleanup_retrans_entry(cm_node);
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_MPAREQ_SENT:
- nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
- "listener=%p state=%d\n", __func__, __LINE__, cm_node,
- cm_node->listener, cm_node->state);
- switch (cm_node->mpa_frame_rev) {
- case IETF_MPA_V2:
- cm_node->mpa_frame_rev = IETF_MPA_V1;
- /* send a syn and goto syn sent state */
- cm_node->state = NES_CM_STATE_SYN_SENT;
- if (send_syn(cm_node, 0, NULL)) {
- active_open_err(cm_node, skb, reset);
- }
- break;
- case IETF_MPA_V1:
- default:
- active_open_err(cm_node, skb, reset);
- break;
- }
- break;
- case NES_CM_STATE_MPAREQ_RCVD:
- atomic_inc(&cm_node->passive_state);
- dev_kfree_skb_any(skb);
- break;
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_LISTENING:
- nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
- passive_open_err(cm_node, skb, reset);
- break;
- case NES_CM_STATE_TSA:
- active_open_err(cm_node, skb, reset);
- break;
- case NES_CM_STATE_CLOSED:
- drop_packet(skb);
- break;
- case NES_CM_STATE_FIN_WAIT2:
- case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_LAST_ACK:
- cm_node->cm_id->rem_ref(cm_node->cm_id);
- case NES_CM_STATE_TIME_WAIT:
- cm_node->state = NES_CM_STATE_CLOSED;
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- drop_packet(skb);
- break;
- default:
- drop_packet(skb);
- break;
- }
-}
-
-
-static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
-{
- int ret = 0;
- int datasize = skb->len;
- u8 *dataloc = skb->data;
-
- enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN;
- u32 res_type;
-
- ret = parse_mpa(cm_node, dataloc, &res_type, datasize);
- if (ret) {
- nes_debug(NES_DBG_CM, "didn't like MPA Request\n");
- if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) {
- nes_debug(NES_DBG_CM, "%s[%u] create abort for "
- "cm_node=%p listener=%p state=%d\n", __func__,
- __LINE__, cm_node, cm_node->listener,
- cm_node->state);
- active_open_err(cm_node, skb, 1);
- } else {
- passive_open_err(cm_node, skb, 1);
- }
- return;
- }
-
- switch (cm_node->state) {
- case NES_CM_STATE_ESTABLISHED:
- if (res_type == NES_MPA_REQUEST_REJECT)
- /*BIG problem as we are receiving the MPA.. So should
- * not be REJECT.. This is Passive Open.. We can
- * only receive it Reject for Active Open...*/
- WARN_ON(1);
- cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
- type = NES_CM_EVENT_MPA_REQ;
- atomic_set(&cm_node->passive_state,
- NES_PASSIVE_STATE_INDICATED);
- break;
- case NES_CM_STATE_MPAREQ_SENT:
- cleanup_retrans_entry(cm_node);
- if (res_type == NES_MPA_REQUEST_REJECT) {
- type = NES_CM_EVENT_MPA_REJECT;
- cm_node->state = NES_CM_STATE_MPAREJ_RCVD;
- } else {
- type = NES_CM_EVENT_CONNECTED;
- cm_node->state = NES_CM_STATE_TSA;
- }
-
- break;
- default:
- WARN_ON(1);
- break;
- }
- dev_kfree_skb_any(skb);
- create_event(cm_node, type);
-}
-
-static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
-{
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_MPAREQ_SENT:
- nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
- "listener=%p state=%d\n", __func__, __LINE__, cm_node,
- cm_node->listener, cm_node->state);
- active_open_err(cm_node, skb, 1);
- break;
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_SYN_RCVD:
- passive_open_err(cm_node, skb, 1);
- break;
- case NES_CM_STATE_TSA:
- default:
- drop_packet(skb);
- }
-}
-
-static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph,
- struct sk_buff *skb)
-{
- int err;
-
- err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num)) ? 0 : 1;
- if (err)
- active_open_err(cm_node, skb, 1);
-
- return err;
-}
-
-static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph,
- struct sk_buff *skb)
-{
- int err = 0;
- u32 seq;
- u32 ack_seq;
- u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
- u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
- u32 rcv_wnd;
-
- seq = ntohl(tcph->seq);
- ack_seq = ntohl(tcph->ack_seq);
- rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
- if (ack_seq != loc_seq_num)
- err = 1;
- else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
- err = 1;
- if (err) {
- nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
- "listener=%p state=%d\n", __func__, __LINE__, cm_node,
- cm_node->listener, cm_node->state);
- indicate_pkt_err(cm_node, skb);
- nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X "
- "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt,
- rcv_wnd);
- }
- return err;
-}
-
-/*
- * handle_syn_pkt() is for Passive node. The syn packet is received when a node
- * is created with a listener or it may comein as rexmitted packet which in
- * that case will be just dropped.
- */
-static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct tcphdr *tcph)
-{
- int ret;
- u32 inc_sequence;
- int optionsize;
-
- optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
- skb_trim(skb, 0);
- inc_sequence = ntohl(tcph->seq);
-
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_MPAREQ_SENT:
- /* Rcvd syn on active open connection*/
- active_open_err(cm_node, skb, 1);
- break;
- case NES_CM_STATE_LISTENING:
- /* Passive OPEN */
- if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
- cm_node->listener->backlog) {
- nes_debug(NES_DBG_CM, "drop syn due to backlog "
- "pressure \n");
- cm_backlog_drops++;
- passive_open_err(cm_node, skb, 0);
- break;
- }
- ret = handle_tcp_options(cm_node, tcph, skb, optionsize,
- 1);
- if (ret) {
- passive_open_err(cm_node, skb, 0);
- /* drop pkt */
- break;
- }
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
- BUG_ON(cm_node->send_entry);
- cm_node->accept_pend = 1;
- atomic_inc(&cm_node->listener->pend_accepts_cnt);
-
- cm_node->state = NES_CM_STATE_SYN_RCVD;
- send_syn(cm_node, 1, skb);
- break;
- case NES_CM_STATE_CLOSED:
- cleanup_retrans_entry(cm_node);
- add_ref_cm_node(cm_node);
- send_reset(cm_node, skb);
- break;
- case NES_CM_STATE_TSA:
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_FIN_WAIT2:
- case NES_CM_STATE_MPAREQ_RCVD:
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_CLOSING:
- case NES_CM_STATE_UNKNOWN:
- default:
- drop_packet(skb);
- break;
- }
-}
-
-static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct tcphdr *tcph)
-{
- int ret;
- u32 inc_sequence;
- int optionsize;
-
- optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
- skb_trim(skb, 0);
- inc_sequence = ntohl(tcph->seq);
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_SENT:
- cleanup_retrans_entry(cm_node);
- /* active open */
- if (check_syn(cm_node, tcph, skb))
- return;
- cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
- /* setup options */
- ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0);
- if (ret) {
- nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n",
- cm_node);
- break;
- }
- cleanup_retrans_entry(cm_node);
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
- send_mpa_request(cm_node, skb);
- cm_node->state = NES_CM_STATE_MPAREQ_SENT;
- break;
- case NES_CM_STATE_MPAREQ_RCVD:
- /* passive open, so should not be here */
- passive_open_err(cm_node, skb, 1);
- break;
- case NES_CM_STATE_LISTENING:
- cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- send_reset(cm_node, skb);
- break;
- case NES_CM_STATE_CLOSED:
- cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
- cleanup_retrans_entry(cm_node);
- add_ref_cm_node(cm_node);
- send_reset(cm_node, skb);
- break;
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_FIN_WAIT2:
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_TSA:
- case NES_CM_STATE_CLOSING:
- case NES_CM_STATE_UNKNOWN:
- case NES_CM_STATE_MPAREQ_SENT:
- default:
- drop_packet(skb);
- break;
- }
-}
-
-static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct tcphdr *tcph)
-{
- int datasize = 0;
- u32 inc_sequence;
- int ret = 0;
- int optionsize;
-
- optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
-
- if (check_seq(cm_node, tcph, skb))
- return -EINVAL;
-
- skb_pull(skb, tcph->doff << 2);
- inc_sequence = ntohl(tcph->seq);
- datasize = skb->len;
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_RCVD:
- /* Passive OPEN */
- cleanup_retrans_entry(cm_node);
- ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 1);
- if (ret)
- break;
- cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
- cm_node->state = NES_CM_STATE_ESTABLISHED;
- if (datasize) {
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- nes_get_remote_addr(cm_node);
- handle_rcv_mpa(cm_node, skb);
- } else { /* rcvd ACK only */
- dev_kfree_skb_any(skb);
- }
- break;
- case NES_CM_STATE_ESTABLISHED:
- /* Passive OPEN */
- cleanup_retrans_entry(cm_node);
- if (datasize) {
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- handle_rcv_mpa(cm_node, skb);
- } else {
- drop_packet(skb);
- }
- break;
- case NES_CM_STATE_MPAREQ_SENT:
- cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
- if (datasize) {
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- handle_rcv_mpa(cm_node, skb);
- } else { /* Could be just an ack pkt.. */
- dev_kfree_skb_any(skb);
- }
- break;
- case NES_CM_STATE_LISTENING:
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- send_reset(cm_node, skb);
- break;
- case NES_CM_STATE_CLOSED:
- cleanup_retrans_entry(cm_node);
- add_ref_cm_node(cm_node);
- send_reset(cm_node, skb);
- break;
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_CLOSING:
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- cm_node->cm_id->rem_ref(cm_node->cm_id);
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- drop_packet(skb);
- break;
- case NES_CM_STATE_FIN_WAIT1:
- cleanup_retrans_entry(cm_node);
- drop_packet(skb);
- cm_node->state = NES_CM_STATE_FIN_WAIT2;
- break;
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_FIN_WAIT2:
- case NES_CM_STATE_TSA:
- case NES_CM_STATE_MPAREQ_RCVD:
- case NES_CM_STATE_UNKNOWN:
- default:
- cleanup_retrans_entry(cm_node);
- drop_packet(skb);
- break;
- }
- return ret;
-}
-
-
-
-static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
- struct sk_buff *skb, int optionsize, int passive)
-{
- u8 *optionsloc = (u8 *)&tcph[1];
-
- if (optionsize) {
- if (process_options(cm_node, optionsloc, optionsize,
- (u32)tcph->syn)) {
- nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n",
- __func__, cm_node);
- if (passive)
- passive_open_err(cm_node, skb, 1);
- else
- active_open_err(cm_node, skb, 1);
- return 1;
- }
- }
-
- cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
- cm_node->tcp_cntxt.snd_wscale;
-
- if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
- cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
- return 0;
-}
-
-/*
- * active_open_err() will send reset() if flag set..
- * It will also send ABORT event.
- */
-static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
- int reset)
-{
- cleanup_retrans_entry(cm_node);
- if (reset) {
- nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, "
- "state=%d\n", cm_node, cm_node->state);
- add_ref_cm_node(cm_node);
- send_reset(cm_node, skb);
- } else {
- dev_kfree_skb_any(skb);
- }
-
- cm_node->state = NES_CM_STATE_CLOSED;
- create_event(cm_node, NES_CM_EVENT_ABORTED);
-}
-
-/*
- * passive_open_err() will either do a reset() or will free up the skb and
- * remove the cm_node.
- */
-static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
- int reset)
-{
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- if (reset) {
- nes_debug(NES_DBG_CM, "passive_open_err sending RST for "
- "cm_node=%p state =%d\n", cm_node, cm_node->state);
- send_reset(cm_node, skb);
- } else {
- dev_kfree_skb_any(skb);
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- }
-}
-
-/*
- * free_retrans_entry() routines assumes that the retrans_list_lock has
- * been acquired before calling.
- */
-static void free_retrans_entry(struct nes_cm_node *cm_node)
-{
- struct nes_timer_entry *send_entry;
-
- send_entry = cm_node->send_entry;
- if (send_entry) {
- cm_node->send_entry = NULL;
- dev_kfree_skb_any(send_entry->skb);
- kfree(send_entry);
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- }
-}
-
-static void cleanup_retrans_entry(struct nes_cm_node *cm_node)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- free_retrans_entry(cm_node);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
-}
-
-/**
- * process_packet
- * Returns skb if to be freed, else it will return NULL if already used..
- */
-static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct nes_cm_core *cm_core)
-{
- enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN;
- struct tcphdr *tcph = tcp_hdr(skb);
- u32 fin_set = 0;
- int ret = 0;
-
- skb_pull(skb, ip_hdr(skb)->ihl << 2);
-
- nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
- "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn,
- tcph->ack, tcph->rst, tcph->fin);
-
- if (tcph->rst) {
- pkt_type = NES_PKT_TYPE_RST;
- } else if (tcph->syn) {
- pkt_type = NES_PKT_TYPE_SYN;
- if (tcph->ack)
- pkt_type = NES_PKT_TYPE_SYNACK;
- } else if (tcph->ack) {
- pkt_type = NES_PKT_TYPE_ACK;
- }
- if (tcph->fin)
- fin_set = 1;
-
- switch (pkt_type) {
- case NES_PKT_TYPE_SYN:
- handle_syn_pkt(cm_node, skb, tcph);
- break;
- case NES_PKT_TYPE_SYNACK:
- handle_synack_pkt(cm_node, skb, tcph);
- break;
- case NES_PKT_TYPE_ACK:
- ret = handle_ack_pkt(cm_node, skb, tcph);
- if (fin_set && !ret)
- handle_fin_pkt(cm_node);
- break;
- case NES_PKT_TYPE_RST:
- handle_rst_pkt(cm_node, skb, tcph);
- break;
- default:
- if ((fin_set) && (!check_seq(cm_node, tcph, skb)))
- handle_fin_pkt(cm_node);
- drop_packet(skb);
- break;
- }
-}
-
-/**
- * mini_cm_listen - create a listen node with params
- */
-static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
-{
- struct nes_cm_listener *listener;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
- unsigned long flags;
- int iwpm_err = 0;
-
- nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
- cm_info->loc_addr, cm_info->loc_port);
-
- /* cannot have multiple matching listeners */
- listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port,
- NES_CM_LISTENER_EITHER_STATE, 1);
-
- if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
- /* find automatically incs ref count ??? */
- atomic_dec(&listener->ref_count);
- nes_debug(NES_DBG_CM, "Not creating listener since it already exists\n");
- return NULL;
- }
-
- if (!listener) {
- nes_form_reg_msg(nesvnic, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
- if (iwpm_err) {
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- nes_form_pm_msg(cm_info, &pm_msg);
- iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES);
- if (iwpm_err)
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper query fail (err = %d).\n", iwpm_err);
- else
- nes_record_pm_msg(cm_info, &pm_msg);
- }
-
- /* create a CM listen node (1/2 node to compare incoming traffic to) */
- listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
- if (!listener) {
- nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
- return NULL;
- }
-
- listener->loc_addr = cm_info->loc_addr;
- listener->loc_port = cm_info->loc_port;
- listener->mapped_loc_addr = cm_info->mapped_loc_addr;
- listener->mapped_loc_port = cm_info->mapped_loc_port;
- listener->reused_node = 0;
-
- atomic_set(&listener->ref_count, 1);
- }
- /* pasive case */
- /* find already inc'ed the ref count */
- else {
- listener->reused_node = 1;
- }
-
- listener->cm_id = cm_info->cm_id;
- atomic_set(&listener->pend_accepts_cnt, 0);
- listener->cm_core = cm_core;
- listener->nesvnic = nesvnic;
- atomic_inc(&cm_core->node_cnt);
-
- listener->conn_type = cm_info->conn_type;
- listener->backlog = cm_info->backlog;
- listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
-
- if (!listener->reused_node) {
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_add(&listener->list, &cm_core->listen_list.list);
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- atomic_inc(&cm_core->listen_node_cnt);
- }
-
- nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x,"
- " listener = %p, backlog = %d, cm_id = %p.\n",
- cm_info->loc_addr, cm_info->loc_port,
- listener, listener->backlog, listener->cm_id);
-
- return listener;
-}
-
-
-/**
- * mini_cm_connect - make a connection node with params
- */
-static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, u16 private_data_len,
- void *private_data, struct nes_cm_info *cm_info)
-{
- int ret = 0;
- struct nes_cm_node *cm_node;
- struct nes_cm_listener *loopbackremotelistener;
- struct nes_cm_node *loopbackremotenode;
- struct nes_cm_info loopback_cm_info;
- u8 *start_buff;
-
- /* create a CM connection node */
- cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
- if (!cm_node)
- return NULL;
-
- /* set our node side to client (active) side */
- cm_node->tcp_cntxt.client = 1;
- cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
-
- if (cm_info->loc_addr == cm_info->rem_addr) {
- loopbackremotelistener = find_listener(cm_core,
- cm_node->mapped_loc_addr, cm_node->mapped_rem_port,
- NES_CM_LISTENER_ACTIVE_STATE, 0);
- if (loopbackremotelistener == NULL) {
- create_event(cm_node, NES_CM_EVENT_ABORTED);
- } else {
- loopback_cm_info = *cm_info;
- loopback_cm_info.loc_port = cm_info->rem_port;
- loopback_cm_info.rem_port = cm_info->loc_port;
- loopback_cm_info.mapped_loc_port =
- cm_info->mapped_rem_port;
- loopback_cm_info.mapped_rem_port =
- cm_info->mapped_loc_port;
- loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
- loopbackremotenode = make_cm_node(cm_core, nesvnic,
- &loopback_cm_info, loopbackremotelistener);
- if (!loopbackremotenode) {
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- return NULL;
- }
- atomic_inc(&cm_loopbacks);
- loopbackremotenode->loopbackpartner = cm_node;
- loopbackremotenode->tcp_cntxt.rcv_wscale =
- NES_CM_DEFAULT_RCV_WND_SCALE;
- cm_node->loopbackpartner = loopbackremotenode;
- memcpy(loopbackremotenode->mpa_frame_buf, private_data,
- private_data_len);
- loopbackremotenode->mpa_frame_size = private_data_len;
-
- /* we are done handling this state. */
- /* set node to a TSA state */
- cm_node->state = NES_CM_STATE_TSA;
- cm_node->tcp_cntxt.rcv_nxt =
- loopbackremotenode->tcp_cntxt.loc_seq_num;
- loopbackremotenode->tcp_cntxt.rcv_nxt =
- cm_node->tcp_cntxt.loc_seq_num;
- cm_node->tcp_cntxt.max_snd_wnd =
- loopbackremotenode->tcp_cntxt.rcv_wnd;
- loopbackremotenode->tcp_cntxt.max_snd_wnd =
- cm_node->tcp_cntxt.rcv_wnd;
- cm_node->tcp_cntxt.snd_wnd =
- loopbackremotenode->tcp_cntxt.rcv_wnd;
- loopbackremotenode->tcp_cntxt.snd_wnd =
- cm_node->tcp_cntxt.rcv_wnd;
- cm_node->tcp_cntxt.snd_wscale =
- loopbackremotenode->tcp_cntxt.rcv_wscale;
- loopbackremotenode->tcp_cntxt.snd_wscale =
- cm_node->tcp_cntxt.rcv_wscale;
- loopbackremotenode->state = NES_CM_STATE_MPAREQ_RCVD;
- create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
- }
- return cm_node;
- }
-
- start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2);
- cm_node->mpa_frame_size = private_data_len;
-
- memcpy(start_buff, private_data, private_data_len);
-
- /* send a syn and goto syn sent state */
- cm_node->state = NES_CM_STATE_SYN_SENT;
- ret = send_syn(cm_node, 0, NULL);
-
- if (ret) {
- /* error in sending the syn free up the cm_node struct */
- nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest "
- "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n",
- cm_node->rem_addr, cm_node->rem_port, cm_node,
- cm_node->cm_id);
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- cm_node = NULL;
- }
-
- if (cm_node) {
- nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X,"
- "port=0x%04x, cm_node=%p, cm_id = %p.\n",
- cm_node->rem_addr, cm_node->rem_port, cm_node,
- cm_node->cm_id);
- }
-
- return cm_node;
-}
-
-
-/**
- * mini_cm_accept - accept a connection
- * This function is never called
- */
-static int mini_cm_accept(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
-{
- return 0;
-}
-
-
-/**
- * mini_cm_reject - reject and teardown a connection
- */
-static int mini_cm_reject(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
-{
- int ret = 0;
- int err = 0;
- int passive_state;
- struct nes_cm_event event;
- struct iw_cm_id *cm_id = cm_node->cm_id;
- struct nes_cm_node *loopback = cm_node->loopbackpartner;
-
- nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n",
- __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
-
- if (cm_node->tcp_cntxt.client)
- return ret;
- cleanup_retrans_entry(cm_node);
-
- if (!loopback) {
- passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT) {
- cm_node->state = NES_CM_STATE_CLOSED;
- rem_ref_cm_node(cm_core, cm_node);
- } else {
- if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
- rem_ref_cm_node(cm_core, cm_node);
- } else {
- ret = send_mpa_reject(cm_node);
- if (ret) {
- cm_node->state = NES_CM_STATE_CLOSED;
- err = send_reset(cm_node, NULL);
- if (err)
- WARN_ON(1);
- } else {
- cm_id->add_ref(cm_id);
- }
- }
- }
- } else {
- cm_node->cm_id = NULL;
- if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
- rem_ref_cm_node(cm_core, cm_node);
- rem_ref_cm_node(cm_core, loopback);
- } else {
- event.cm_node = loopback;
- event.cm_info.rem_addr = loopback->rem_addr;
- event.cm_info.loc_addr = loopback->loc_addr;
- event.cm_info.rem_port = loopback->rem_port;
- event.cm_info.loc_port = loopback->loc_port;
- event.cm_info.cm_id = loopback->cm_id;
- cm_event_mpa_reject(&event);
- rem_ref_cm_node(cm_core, cm_node);
- loopback->state = NES_CM_STATE_CLOSING;
-
- cm_id = loopback->cm_id;
- rem_ref_cm_node(cm_core, loopback);
- cm_id->rem_ref(cm_id);
- }
- }
-
- return ret;
-}
-
-
-/**
- * mini_cm_close
- */
-static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
-{
- int ret = 0;
-
- if (!cm_core || !cm_node)
- return -EINVAL;
-
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_ACCEPTING:
- case NES_CM_STATE_MPAREQ_SENT:
- case NES_CM_STATE_MPAREQ_RCVD:
- cleanup_retrans_entry(cm_node);
- send_reset(cm_node, NULL);
- break;
- case NES_CM_STATE_CLOSE_WAIT:
- cm_node->state = NES_CM_STATE_LAST_ACK;
- send_fin(cm_node, NULL);
- break;
- case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_FIN_WAIT2:
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_TIME_WAIT:
- case NES_CM_STATE_CLOSING:
- ret = -1;
- break;
- case NES_CM_STATE_LISTENING:
- cleanup_retrans_entry(cm_node);
- send_reset(cm_node, NULL);
- break;
- case NES_CM_STATE_MPAREJ_RCVD:
- case NES_CM_STATE_UNKNOWN:
- case NES_CM_STATE_INITED:
- case NES_CM_STATE_CLOSED:
- case NES_CM_STATE_LISTENER_DESTROYED:
- ret = rem_ref_cm_node(cm_core, cm_node);
- break;
- case NES_CM_STATE_TSA:
- if (cm_node->send_entry)
- printk(KERN_ERR "ERROR Close got called from STATE_TSA "
- "send_entry=%p\n", cm_node->send_entry);
- ret = rem_ref_cm_node(cm_core, cm_node);
- break;
- }
- return ret;
-}
-
-
-/**
- * recv_pkt - recv an ETHERNET packet, and process it through CM
- * node state machine
- */
-static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct sk_buff *skb)
-{
- struct nes_cm_node *cm_node = NULL;
- struct nes_cm_listener *listener = NULL;
- struct iphdr *iph;
- struct tcphdr *tcph;
- struct nes_cm_info nfo;
- int skb_handled = 1;
- __be32 tmp_daddr, tmp_saddr;
-
- if (!skb)
- return 0;
- if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr))
- return 0;
-
- iph = (struct iphdr *)skb->data;
- tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
-
- nfo.loc_addr = ntohl(iph->daddr);
- nfo.loc_port = ntohs(tcph->dest);
- nfo.rem_addr = ntohl(iph->saddr);
- nfo.rem_port = ntohs(tcph->source);
-
- /* If port mapper is available these should be mapped address info */
- nfo.mapped_loc_addr = ntohl(iph->daddr);
- nfo.mapped_loc_port = ntohs(tcph->dest);
- nfo.mapped_rem_addr = ntohl(iph->saddr);
- nfo.mapped_rem_port = ntohs(tcph->source);
-
- tmp_daddr = cpu_to_be32(iph->daddr);
- tmp_saddr = cpu_to_be32(iph->saddr);
-
- nes_debug(NES_DBG_CM, "Received packet: dest=%pI4:0x%04X src=%pI4:0x%04X\n",
- &tmp_daddr, tcph->dest, &tmp_saddr, tcph->source);
-
- do {
- cm_node = find_node(cm_core,
- nfo.mapped_rem_port, nfo.mapped_rem_addr,
- nfo.mapped_loc_port, nfo.mapped_loc_addr);
-
- if (!cm_node) {
- /* Only type of packet accepted are for */
- /* the PASSIVE open (syn only) */
- if ((!tcph->syn) || (tcph->ack)) {
- skb_handled = 0;
- break;
- }
- listener = find_listener(cm_core, nfo.mapped_loc_addr,
- nfo.mapped_loc_port,
- NES_CM_LISTENER_ACTIVE_STATE, 0);
- if (!listener) {
- nfo.cm_id = NULL;
- nfo.conn_type = 0;
- nes_debug(NES_DBG_CM, "Unable to find listener for the pkt\n");
- skb_handled = 0;
- break;
- }
- nfo.cm_id = listener->cm_id;
- nfo.conn_type = listener->conn_type;
- cm_node = make_cm_node(cm_core, nesvnic, &nfo,
- listener);
- if (!cm_node) {
- nes_debug(NES_DBG_CM, "Unable to allocate "
- "node\n");
- cm_packets_dropped++;
- atomic_dec(&listener->ref_count);
- dev_kfree_skb_any(skb);
- break;
- }
- if (!tcph->rst && !tcph->fin) {
- cm_node->state = NES_CM_STATE_LISTENING;
- } else {
- cm_packets_dropped++;
- rem_ref_cm_node(cm_core, cm_node);
- dev_kfree_skb_any(skb);
- break;
- }
- add_ref_cm_node(cm_node);
- } else if (cm_node->state == NES_CM_STATE_TSA) {
- if (cm_node->nesqp->pau_mode)
- nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
- else {
- rem_ref_cm_node(cm_core, cm_node);
- atomic_inc(&cm_accel_dropped_pkts);
- dev_kfree_skb_any(skb);
- }
- break;
- }
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, sizeof(*tcph));
- skb->len = ntohs(iph->tot_len);
- process_packet(cm_node, skb, cm_core);
- rem_ref_cm_node(cm_core, cm_node);
- } while (0);
- return skb_handled;
-}
-
-
-/**
- * nes_cm_alloc_core - allocate a top level instance of a cm core
- */
-static struct nes_cm_core *nes_cm_alloc_core(void)
-{
- struct nes_cm_core *cm_core;
-
- /* setup the CM core */
- /* alloc top level core control structure */
- cm_core = kzalloc(sizeof(*cm_core), GFP_KERNEL);
- if (!cm_core)
- return NULL;
-
- INIT_LIST_HEAD(&cm_core->connected_nodes);
- init_timer(&cm_core->tcp_timer);
- cm_core->tcp_timer.function = nes_cm_timer_tick;
-
- cm_core->mtu = NES_CM_DEFAULT_MTU;
- cm_core->state = NES_CM_STATE_INITED;
- cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
-
- atomic_set(&cm_core->events_posted, 0);
-
- cm_core->api = &nes_cm_api;
-
- spin_lock_init(&cm_core->ht_lock);
- spin_lock_init(&cm_core->listen_list_lock);
-
- INIT_LIST_HEAD(&cm_core->listen_list.list);
-
- nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
-
- nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
- cm_core->event_wq = create_singlethread_workqueue("nesewq");
- cm_core->post_event = nes_cm_post_event;
- nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
- cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
-
- print_core(cm_core);
- return cm_core;
-}
-
-
-/**
- * mini_cm_dealloc_core - deallocate a top level instance of a cm core
- */
-static int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
-{
- nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
-
- if (!cm_core)
- return -EINVAL;
-
- barrier();
-
- if (timer_pending(&cm_core->tcp_timer))
- del_timer(&cm_core->tcp_timer);
-
- destroy_workqueue(cm_core->event_wq);
- destroy_workqueue(cm_core->disconn_wq);
- nes_debug(NES_DBG_CM, "\n");
- kfree(cm_core);
-
- return 0;
-}
-
-
-/**
- * mini_cm_get
- */
-static int mini_cm_get(struct nes_cm_core *cm_core)
-{
- return cm_core->state;
-}
-
-
-/**
- * mini_cm_set
- */
-static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
-{
- int ret = 0;
-
- switch (type) {
- case NES_CM_SET_PKT_SIZE:
- cm_core->mtu = value;
- break;
- case NES_CM_SET_FREE_PKT_Q_SIZE:
- cm_core->free_tx_pkt_max = value;
- break;
- default:
- /* unknown set option */
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-
-/**
- * nes_cm_init_tsa_conn setup HW; MPA frames must be
- * successfully exchanged when this is called
- */
-static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_node)
-{
- int ret = 0;
-
- if (!nesqp)
- return -EINVAL;
-
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 |
- NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG |
- NES_QPCONTEXT_MISC_DROS);
-
- if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale)
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE);
-
- nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT);
-
- nesqp->nesqp_context->misc2 |= cpu_to_le32(
- cm_node->tos << NES_QPCONTEXT_MISC2_TOS_SHIFT);
-
- nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16);
-
- nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32(
- (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT);
-
- nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
- (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) &
- NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK);
-
- nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
- (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) &
- NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK);
-
- nesqp->nesqp_context->keepalive = cpu_to_le32(0x80);
- nesqp->nesqp_context->ts_recent = 0;
- nesqp->nesqp_context->ts_age = 0;
- nesqp->nesqp_context->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
- nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
- nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
- nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
- cm_node->tcp_cntxt.rcv_wscale);
- nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
- nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
- nesqp->nesqp_context->srtt = 0;
- nesqp->nesqp_context->rttvar = cpu_to_le32(0x6);
- nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000);
- nesqp->nesqp_context->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
- nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
- nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
- nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
-
- nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X,"
- " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n",
- nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
- le32_to_cpu(nesqp->nesqp_context->snd_nxt),
- cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale),
- le32_to_cpu(nesqp->nesqp_context->rcv_wnd),
- le32_to_cpu(nesqp->nesqp_context->misc));
- nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd));
- nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd));
- nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd));
-
- nes_debug(NES_DBG_CM, "Change cm_node state to TSA\n");
- cm_node->state = NES_CM_STATE_TSA;
-
- return ret;
-}
-
-
-/**
- * nes_cm_disconn
- */
-int nes_cm_disconn(struct nes_qp *nesqp)
-{
- struct disconn_work *work;
-
- work = kzalloc(sizeof *work, GFP_ATOMIC);
- if (!work)
- return -ENOMEM; /* Timer will clean up */
-
- nes_add_ref(&nesqp->ibqp);
- work->nesqp = nesqp;
- INIT_WORK(&work->work, nes_disconnect_worker);
- queue_work(g_cm_core->disconn_wq, &work->work);
- return 0;
-}
-
-
-/**
- * nes_disconnect_worker
- */
-static void nes_disconnect_worker(struct work_struct *work)
-{
- struct disconn_work *dwork = container_of(work, struct disconn_work, work);
- struct nes_qp *nesqp = dwork->nesqp;
-
- kfree(dwork);
- nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
- nesqp->last_aeq, nesqp->hwqp.qp_id);
- nes_cm_disconn_true(nesqp);
- nes_rem_ref(&nesqp->ibqp);
-}
-
-
-/**
- * nes_cm_disconn_true
- */
-static int nes_cm_disconn_true(struct nes_qp *nesqp)
-{
- unsigned long flags;
- int ret = 0;
- struct iw_cm_id *cm_id;
- struct iw_cm_event cm_event;
- struct nes_vnic *nesvnic;
- u16 last_ae;
- u8 original_hw_tcp_state;
- u8 original_ibqp_state;
- int disconn_status = 0;
- int issue_disconn = 0;
- int issue_close = 0;
- int issue_flush = 0;
- u32 flush_q = NES_CQP_FLUSH_RQ;
- struct ib_event ibevent;
-
- if (!nesqp) {
- nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
- return -1;
- }
-
- spin_lock_irqsave(&nesqp->lock, flags);
- cm_id = nesqp->cm_id;
- /* make sure we havent already closed this connection */
- if (!cm_id) {
- nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
- nesqp->hwqp.qp_id);
- spin_unlock_irqrestore(&nesqp->lock, flags);
- return -1;
- }
-
- nesvnic = to_nesvnic(nesqp->ibqp.device);
- nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id);
-
- original_hw_tcp_state = nesqp->hw_tcp_state;
- original_ibqp_state = nesqp->ibqp_state;
- last_ae = nesqp->last_aeq;
-
- if (nesqp->term_flags) {
- issue_disconn = 1;
- issue_close = 1;
- nesqp->cm_id = NULL;
- del_timer(&nesqp->terminate_timer);
- if (nesqp->flush_issued == 0) {
- nesqp->flush_issued = 1;
- issue_flush = 1;
- }
- } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- ((original_ibqp_state == IB_QPS_RTS) &&
- (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
- issue_disconn = 1;
- if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
- disconn_status = -ECONNRESET;
- }
-
- if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
- (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
- (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
- (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
- issue_close = 1;
- nesqp->cm_id = NULL;
- if (nesqp->flush_issued == 0) {
- nesqp->flush_issued = 1;
- issue_flush = 1;
- }
- }
-
- spin_unlock_irqrestore(&nesqp->lock, flags);
-
- if ((issue_flush) && (nesqp->destroyed == 0)) {
- /* Flush the queue(s) */
- if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
- flush_q |= NES_CQP_FLUSH_SQ;
- flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
-
- if (nesqp->term_flags) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.event = nesqp->terminate_eventtype;
- ibevent.element.qp = &nesqp->ibqp;
- if (nesqp->ibqp.event_handler)
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- }
-
- if ((cm_id) && (cm_id->event_handler)) {
- if (issue_disconn) {
- atomic_inc(&cm_disconnects);
- cm_event.event = IW_CM_EVENT_DISCONNECT;
- cm_event.status = disconn_status;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
-
- nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event"
- " for QP%u, SQ Head = %u, SQ Tail = %u. "
- "cm_id = %p, refcount = %u.\n",
- nesqp->hwqp.qp_id, nesqp->hwqp.sq_head,
- nesqp->hwqp.sq_tail, cm_id,
- atomic_read(&nesqp->refcount));
-
- ret = cm_id->event_handler(cm_id, &cm_event);
- if (ret)
- nes_debug(NES_DBG_CM, "OFA CM event_handler "
- "returned, ret=%d\n", ret);
- }
-
- if (issue_close) {
- atomic_inc(&cm_closes);
- nes_disconnect(nesqp, 1);
-
- cm_id->provider_data = nesqp;
- /* Send up the close complete event */
- cm_event.event = IW_CM_EVENT_CLOSE;
- cm_event.status = 0;
- cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
-
- ret = cm_id->event_handler(cm_id, &cm_event);
- if (ret)
- nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
-
- cm_id->rem_ref(cm_id);
- }
- }
-
- return 0;
-}
-
-
-/**
- * nes_disconnect
- */
-static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
-{
- int ret = 0;
- struct nes_vnic *nesvnic;
- struct nes_device *nesdev;
- struct nes_ib_device *nesibdev;
-
- nesvnic = to_nesvnic(nesqp->ibqp.device);
- if (!nesvnic)
- return -EINVAL;
-
- nesdev = nesvnic->nesdev;
- nesibdev = nesvnic->nesibdev;
-
- nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
- netdev_refcnt_read(nesvnic->netdev));
-
- if (nesqp->active_conn) {
-
- /* indicate this connection is NOT active */
- nesqp->active_conn = 0;
- } else {
- /* Need to free the Last Streaming Mode Message */
- if (nesqp->ietf_frame) {
- if (nesqp->lsmm_mr)
- nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
- pci_free_consistent(nesdev->pcidev,
- nesqp->private_data_len + nesqp->ietf_frame_size,
- nesqp->ietf_frame, nesqp->ietf_frame_pbase);
- }
- }
-
- /* close the CM node down if it is still active */
- if (nesqp->cm_node) {
- nes_debug(NES_DBG_CM, "Call close API\n");
-
- g_cm_core->api->close(g_cm_core, nesqp->cm_node);
- }
-
- return ret;
-}
-
-
-/**
- * nes_accept
- */
-int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- u64 u64temp;
- struct ib_qp *ibqp;
- struct nes_qp *nesqp;
- struct nes_vnic *nesvnic;
- struct nes_device *nesdev;
- struct nes_cm_node *cm_node;
- struct nes_adapter *adapter;
- struct ib_qp_attr attr;
- struct iw_cm_event cm_event;
- struct nes_hw_qp_wqe *wqe;
- struct nes_v4_quad nes_quad;
- u32 crc_value;
- int ret;
- int passive_state;
- struct nes_ib_device *nesibdev;
- struct ib_mr *ibmr = NULL;
- struct ib_phys_buf ibphysbuf;
- struct nes_pd *nespd;
- u64 tagged_offset;
- u8 mpa_frame_offset = 0;
- struct ietf_mpa_v2 *mpa_v2_frame;
- u8 start_addr = 0;
- u8 *start_ptr = &start_addr;
- u8 **start_buff = &start_ptr;
- u16 buff_len = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
- ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
- if (!ibqp)
- return -EINVAL;
-
- /* get all our handles */
- nesqp = to_nesqp(ibqp);
- nesvnic = to_nesvnic(nesqp->ibqp.device);
- nesdev = nesvnic->nesdev;
- adapter = nesdev->nesadapter;
-
- cm_node = (struct nes_cm_node *)cm_id->provider_data;
- nes_debug(NES_DBG_CM, "nes_accept: cm_node= %p nesvnic=%p, netdev=%p,"
- "%s\n", cm_node, nesvnic, nesvnic->netdev,
- nesvnic->netdev->name);
-
- if (NES_CM_STATE_LISTENER_DESTROYED == cm_node->state) {
- if (cm_node->loopbackpartner)
- rem_ref_cm_node(cm_node->cm_core, cm_node->loopbackpartner);
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- return -EINVAL;
- }
-
- passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT) {
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- return -ECONNRESET;
- }
- /* associate the node with the QP */
- nesqp->cm_node = (void *)cm_node;
- cm_node->nesqp = nesqp;
-
-
- nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
- nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
- atomic_inc(&cm_accepts);
-
- nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
- netdev_refcnt_read(nesvnic->netdev));
-
- nesqp->ietf_frame_size = sizeof(struct ietf_mpa_v2);
- /* allocate the ietf frame and space for private data */
- nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
- nesqp->ietf_frame_size + conn_param->private_data_len,
- &nesqp->ietf_frame_pbase);
-
- if (!nesqp->ietf_frame) {
- nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
- return -ENOMEM;
- }
- mpa_v2_frame = (struct ietf_mpa_v2 *)nesqp->ietf_frame;
-
- if (cm_node->mpa_frame_rev == IETF_MPA_V1)
- mpa_frame_offset = 4;
-
- if (cm_node->mpa_frame_rev == IETF_MPA_V1 ||
- cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
- record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
- }
-
- memcpy(mpa_v2_frame->priv_data, conn_param->private_data,
- conn_param->private_data_len);
-
- cm_build_mpa_frame(cm_node, start_buff, &buff_len, nesqp->ietf_frame, MPA_KEY_REPLY);
- nesqp->private_data_len = conn_param->private_data_len;
-
- /* setup our first outgoing iWarp send WQE (the IETF frame response) */
- wqe = &nesqp->hwqp.sq_vbase[0];
-
- if (raddr->sin_addr.s_addr != laddr->sin_addr.s_addr) {
- u64temp = (unsigned long)nesqp;
- nesibdev = nesvnic->nesibdev;
- nespd = nesqp->nespd;
- ibphysbuf.addr = nesqp->ietf_frame_pbase + mpa_frame_offset;
- ibphysbuf.size = buff_len;
- tagged_offset = (u64)(unsigned long)*start_buff;
- ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd,
- &ibphysbuf, 1,
- IB_ACCESS_LOCAL_WRITE,
- &tagged_offset);
- if (!ibmr) {
- nes_debug(NES_DBG_CM, "Unable to register memory region"
- "for lSMM for cm_node = %p \n",
- cm_node);
- pci_free_consistent(nesdev->pcidev,
- nesqp->private_data_len + nesqp->ietf_frame_size,
- nesqp->ietf_frame, nesqp->ietf_frame_pbase);
- return -ENOMEM;
- }
-
- ibmr->pd = &nespd->ibpd;
- ibmr->device = nespd->ibpd.device;
- nesqp->lsmm_mr = ibmr;
-
- u64temp |= NES_SW_CONTEXT_ALIGN >> 1;
- set_wqe_64bit_value(wqe->wqe_words,
- NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
- u64temp);
- wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
- cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING |
- NES_IWARP_SQ_WQE_WRPDU);
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
- cpu_to_le32(buff_len);
- set_wqe_64bit_value(wqe->wqe_words,
- NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
- (u64)(unsigned long)(*start_buff));
- wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
- cpu_to_le32(buff_len);
- wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
- if (nesqp->sq_kmapped) {
- nesqp->sq_kmapped = 0;
- kunmap(nesqp->page);
- }
-
- nesqp->nesqp_context->ird_ord_sizes |=
- cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
- NES_QPCONTEXT_ORDIRD_WRPDU);
- } else {
- nesqp->nesqp_context->ird_ord_sizes |=
- cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU);
- }
- nesqp->skip_lsmm = 1;
-
- /* Cache the cm_id in the qp */
- nesqp->cm_id = cm_id;
- cm_node->cm_id = cm_id;
-
- /* nesqp->cm_node = (void *)cm_id->provider_data; */
- cm_id->provider_data = nesqp;
- nesqp->active_conn = 0;
-
- if (cm_node->state == NES_CM_STATE_TSA)
- nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n",
- cm_node);
-
- nes_cm_init_tsa_conn(nesqp, cm_node);
-
- nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(cm_node->mapped_loc_port);
- nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(cm_node->mapped_rem_port);
-
- nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
-
- nesqp->nesqp_context->misc2 |= cpu_to_le32(
- (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
- NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
-
- nesqp->nesqp_context->arp_index_vlan |=
- cpu_to_le32(nes_arp_table(nesdev,
- le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
- NES_ARP_RESOLVE) << 16);
-
- nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
- jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
-
- nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
-
- nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
- ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
- nesqp->nesqp_context->ird_ord_sizes |=
- cpu_to_le32((u32)cm_node->ord_size);
-
- memset(&nes_quad, 0, sizeof(nes_quad));
- nes_quad.DstIpAdrIndex =
- cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
- nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
- nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
-
- /* Produce hash key */
- crc_value = get_crc_value(&nes_quad);
- nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
- nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
- nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
-
- nesqp->hte_index &= adapter->hte_index_mask;
- nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
-
- cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
-
- nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = "
- "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + "
- "private data length=%u.\n", nesqp->hwqp.qp_id,
- ntohl(raddr->sin_addr.s_addr), ntohs(raddr->sin_port),
- ntohl(laddr->sin_addr.s_addr), ntohs(laddr->sin_port),
- le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
- le32_to_cpu(nesqp->nesqp_context->snd_nxt),
- buff_len);
-
- /* notify OF layer that accept event was successful */
- cm_id->add_ref(cm_id);
- nes_add_ref(&nesqp->ibqp);
-
- cm_event.event = IW_CM_EVENT_ESTABLISHED;
- cm_event.status = 0;
- cm_event.provider_data = (void *)nesqp;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
- cm_event.ird = cm_node->ird_size;
- cm_event.ord = cm_node->ord_size;
-
- ret = cm_id->event_handler(cm_id, &cm_event);
- attr.qp_state = IB_QPS_RTS;
- nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
- if (cm_node->loopbackpartner) {
- cm_node->loopbackpartner->mpa_frame_size =
- nesqp->private_data_len;
- /* copy entire MPA frame to our cm_node's frame */
- memcpy(cm_node->loopbackpartner->mpa_frame_buf,
- conn_param->private_data, conn_param->private_data_len);
- create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
- }
- if (ret)
- printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
- "ret=%d\n", __func__, __LINE__, ret);
-
- return 0;
-}
-
-
-/**
- * nes_reject
- */
-int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct nes_cm_node *cm_node;
- struct nes_cm_node *loopback;
- struct nes_cm_core *cm_core;
- u8 *start_buff;
-
- atomic_inc(&cm_rejects);
- cm_node = (struct nes_cm_node *)cm_id->provider_data;
- loopback = cm_node->loopbackpartner;
- cm_core = cm_node->cm_core;
- cm_node->cm_id = cm_id;
-
- if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
- return -EINVAL;
-
- if (loopback) {
- memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
- loopback->mpa_frame.priv_data_len = pdata_len;
- loopback->mpa_frame_size = pdata_len;
- } else {
- start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2);
- cm_node->mpa_frame_size = pdata_len;
- memcpy(start_buff, pdata, pdata_len);
- }
- return cm_core->api->reject(cm_core, cm_node);
-}
-
-
-/**
- * nes_connect
- * setup and launch cm connect node
- */
-int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct ib_qp *ibqp;
- struct nes_qp *nesqp;
- struct nes_vnic *nesvnic;
- struct nes_device *nesdev;
- struct nes_cm_node *cm_node;
- struct nes_cm_info cm_info;
- int apbvt_set = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
- int iwpm_err = 0;
-
- if (cm_id->remote_addr.ss_family != AF_INET)
- return -ENOSYS;
- ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
- if (!ibqp)
- return -EINVAL;
- nesqp = to_nesqp(ibqp);
- if (!nesqp)
- return -EINVAL;
- nesvnic = to_nesvnic(nesqp->ibqp.device);
- if (!nesvnic)
- return -EINVAL;
- nesdev = nesvnic->nesdev;
- if (!nesdev)
- return -EINVAL;
-
- if (!laddr->sin_port || !raddr->sin_port)
- return -EINVAL;
-
- nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
- "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
- ntohl(nesvnic->local_ipaddr), ntohl(raddr->sin_addr.s_addr),
- ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
- ntohs(laddr->sin_port));
-
- atomic_inc(&cm_connects);
- nesqp->active_conn = 1;
-
- /* cache the cm_id in the qp */
- nesqp->cm_id = cm_id;
- cm_id->provider_data = nesqp;
- nesqp->private_data_len = conn_param->private_data_len;
-
- nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
- nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
- conn_param->private_data_len);
-
- /* set up the connection params for the node */
- cm_info.loc_addr = ntohl(laddr->sin_addr.s_addr);
- cm_info.loc_port = ntohs(laddr->sin_port);
- cm_info.rem_addr = ntohl(raddr->sin_addr.s_addr);
- cm_info.rem_port = ntohs(raddr->sin_port);
- cm_info.cm_id = cm_id;
- cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
-
- /* No port mapper available, go with the specified peer information */
- cm_info.mapped_loc_addr = cm_info.loc_addr;
- cm_info.mapped_loc_port = cm_info.loc_port;
- cm_info.mapped_rem_addr = cm_info.rem_addr;
- cm_info.mapped_rem_port = cm_info.rem_port;
-
- nes_form_reg_msg(nesvnic, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
- if (iwpm_err) {
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- nes_form_pm_msg(&cm_info, &pm_msg);
- iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES);
- if (iwpm_err)
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper query fail (err = %d).\n", iwpm_err);
- else
- nes_record_pm_msg(&cm_info, &pm_msg);
- }
-
- if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {
- nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
- PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
- apbvt_set = 1;
- }
-
- if (nes_create_mapinfo(&cm_info))
- return -ENOMEM;
-
- cm_id->add_ref(cm_id);
-
- /* create a connect CM node connection */
- cm_node = g_cm_core->api->connect(g_cm_core, nesvnic,
- conn_param->private_data_len, (void *)conn_param->private_data,
- &cm_info);
- if (!cm_node) {
- if (apbvt_set)
- nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
- PCI_FUNC(nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_DEL);
-
- nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n",
- cm_info.mapped_loc_port);
- nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port,
- cm_info.mapped_loc_addr, cm_info.mapped_loc_port);
- cm_id->rem_ref(cm_id);
- return -ENOMEM;
- }
-
- record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
- if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
- cm_node->ord_size == 0)
- cm_node->ord_size = 1;
-
- cm_node->apbvt_set = apbvt_set;
- cm_node->tos = cm_id->tos;
- nesqp->cm_node = cm_node;
- cm_node->nesqp = nesqp;
- nes_add_ref(&nesqp->ibqp);
-
- return 0;
-}
-
-
-/**
- * nes_create_listen
- */
-int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
-{
- struct nes_vnic *nesvnic;
- struct nes_cm_listener *cm_node;
- struct nes_cm_info cm_info;
- int err;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-
- nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
- cm_id, ntohs(laddr->sin_port));
-
- if (cm_id->local_addr.ss_family != AF_INET)
- return -ENOSYS;
- nesvnic = to_nesvnic(cm_id->device);
- if (!nesvnic)
- return -EINVAL;
-
- nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
- nesvnic, nesvnic->netdev, nesvnic->netdev->name);
-
- nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n",
- nesvnic->local_ipaddr, laddr->sin_addr.s_addr);
-
- /* setup listen params in our api call struct */
- cm_info.loc_addr = ntohl(nesvnic->local_ipaddr);
- cm_info.loc_port = ntohs(laddr->sin_port);
- cm_info.backlog = backlog;
- cm_info.cm_id = cm_id;
-
- cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
-
- /* No port mapper available, go with the specified info */
- cm_info.mapped_loc_addr = cm_info.loc_addr;
- cm_info.mapped_loc_port = cm_info.loc_port;
-
- cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
- if (!cm_node) {
- printk(KERN_ERR "%s[%u] Error returned from listen API call\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- cm_id->provider_data = cm_node;
- cm_node->tos = cm_id->tos;
-
- if (!cm_node->reused_node) {
- if (nes_create_mapinfo(&cm_info))
- return -ENOMEM;
-
- err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port,
- PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_ADD);
- if (err) {
- printk(KERN_ERR "nes_manage_apbvt call returned %d.\n",
- err);
- g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
- return err;
- }
- atomic_inc(&cm_listens_created);
- }
-
- cm_id->add_ref(cm_id);
- cm_id->provider_data = (void *)cm_node;
-
-
- return 0;
-}
-
-
-/**
- * nes_destroy_listen
- */
-int nes_destroy_listen(struct iw_cm_id *cm_id)
-{
- if (cm_id->provider_data)
- g_cm_core->api->stop_listener(g_cm_core, cm_id->provider_data);
- else
- nes_debug(NES_DBG_CM, "cm_id->provider_data was NULL\n");
-
- cm_id->rem_ref(cm_id);
-
- return 0;
-}
-
-
-/**
- * nes_cm_recv
- */
-int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice)
-{
- int rc = 0;
-
- cm_packets_received++;
- if ((g_cm_core) && (g_cm_core->api))
- rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb);
- else
- nes_debug(NES_DBG_CM, "Unable to process packet for CM,"
- " cm is not setup properly.\n");
-
- return rc;
-}
-
-
-/**
- * nes_cm_start
- * Start and init a cm core module
- */
-int nes_cm_start(void)
-{
- nes_debug(NES_DBG_CM, "\n");
- /* create the primary CM core, pass this handle to subsequent core inits */
- g_cm_core = nes_cm_alloc_core();
- if (g_cm_core)
- return 0;
- else
- return -ENOMEM;
-}
-
-
-/**
- * nes_cm_stop
- * stop and dealloc all cm core instances
- */
-int nes_cm_stop(void)
-{
- g_cm_core->api->destroy_cm_core(g_cm_core);
- return 0;
-}
-
-
-/**
- * cm_event_connected
- * handle a connected event, setup QPs and HW
- */
-static void cm_event_connected(struct nes_cm_event *event)
-{
- struct nes_qp *nesqp;
- struct nes_vnic *nesvnic;
- struct nes_device *nesdev;
- struct nes_cm_node *cm_node;
- struct nes_adapter *nesadapter;
- struct ib_qp_attr attr;
- struct iw_cm_id *cm_id;
- struct iw_cm_event cm_event;
- struct nes_v4_quad nes_quad;
- u32 crc_value;
- int ret;
- struct sockaddr_in *laddr;
- struct sockaddr_in *raddr;
- struct sockaddr_in *cm_event_laddr;
-
- /* get all our handles */
- cm_node = event->cm_node;
- cm_id = cm_node->cm_id;
- nes_debug(NES_DBG_CM, "cm_event_connected - %p - cm_id = %p\n", cm_node, cm_id);
- nesqp = (struct nes_qp *)cm_id->provider_data;
- nesvnic = to_nesvnic(nesqp->ibqp.device);
- nesdev = nesvnic->nesdev;
- nesadapter = nesdev->nesadapter;
- laddr = (struct sockaddr_in *)&cm_id->local_addr;
- raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- cm_event_laddr = (struct sockaddr_in *)&cm_event.local_addr;
-
- if (nesqp->destroyed)
- return;
- atomic_inc(&cm_connecteds);
- nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
- " local port 0x%04X. jiffies = %lu.\n",
- nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
- ntohs(raddr->sin_port), ntohs(laddr->sin_port), jiffies);
-
- nes_cm_init_tsa_conn(nesqp, cm_node);
-
- /* set the QP tsa context */
- nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(cm_node->mapped_loc_port);
- nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(cm_node->mapped_rem_port);
- nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
-
- nesqp->nesqp_context->misc2 |= cpu_to_le32(
- (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
- NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
- nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
- nes_arp_table(nesdev,
- le32_to_cpu(nesqp->nesqp_context->ip0),
- NULL, NES_ARP_RESOLVE) << 16);
- nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
- jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
- nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
- nesqp->nesqp_context->ird_ord_sizes |=
- cpu_to_le32((u32)1 <<
- NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
- nesqp->nesqp_context->ird_ord_sizes |=
- cpu_to_le32((u32)cm_node->ord_size);
-
- /* Adjust tail for not having a LSMM */
- /*nesqp->hwqp.sq_tail = 1;*/
-
- build_rdma0_msg(cm_node, &nesqp);
-
- nes_write32(nesdev->regs + NES_WQE_ALLOC,
- (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
-
- memset(&nes_quad, 0, sizeof(nes_quad));
-
- nes_quad.DstIpAdrIndex =
- cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
- nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
- nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
-
- /* Produce hash key */
- crc_value = get_crc_value(&nes_quad);
- nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
- nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
- nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
-
- nesqp->hte_index &= nesadapter->hte_index_mask;
- nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
-
- nesqp->ietf_frame = &cm_node->mpa_frame;
- nesqp->private_data_len = (u8)cm_node->mpa_frame_size;
- cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
-
- /* notify OF layer we successfully created the requested connection */
- cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.status = 0;
- cm_event.provider_data = cm_id->provider_data;
- cm_event_laddr->sin_family = AF_INET;
- cm_event_laddr->sin_port = laddr->sin_port;
- cm_event.remote_addr = cm_id->remote_addr;
-
- cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
- cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size;
- cm_event.ird = cm_node->ird_size;
- cm_event.ord = cm_node->ord_size;
-
- cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
- ret = cm_id->event_handler(cm_id, &cm_event);
- nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
-
- if (ret)
- printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
- "ret=%d\n", __func__, __LINE__, ret);
- attr.qp_state = IB_QPS_RTS;
- nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
-
- nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = "
- "%lu\n", nesqp->hwqp.qp_id, jiffies);
-
- return;
-}
-
-
-/**
- * cm_event_connect_error
- */
-static void cm_event_connect_error(struct nes_cm_event *event)
-{
- struct nes_qp *nesqp;
- struct iw_cm_id *cm_id;
- struct iw_cm_event cm_event;
- /* struct nes_cm_info cm_info; */
- int ret;
-
- if (!event->cm_node)
- return;
-
- cm_id = event->cm_node->cm_id;
- if (!cm_id)
- return;
-
- nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id);
- nesqp = cm_id->provider_data;
-
- if (!nesqp)
- return;
-
- /* notify OF layer about this connection error event */
- /* cm_id->rem_ref(cm_id); */
- nesqp->cm_id = NULL;
- cm_id->provider_data = NULL;
- cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.status = -ECONNRESET;
- cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
-
-#ifdef CONFIG_INFINIBAND_NES_DEBUG
- {
- struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
- &cm_event.local_addr;
- struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
- &cm_event.remote_addr;
- nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remote_addr=%08x\n",
- cm_event_laddr->sin_addr.s_addr, cm_event_raddr->sin_addr.s_addr);
- }
-#endif
-
- ret = cm_id->event_handler(cm_id, &cm_event);
- nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
- if (ret)
- printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
- "ret=%d\n", __func__, __LINE__, ret);
- cm_id->rem_ref(cm_id);
-
- rem_ref_cm_node(event->cm_node->cm_core, event->cm_node);
- return;
-}
-
-
-/**
- * cm_event_reset
- */
-static void cm_event_reset(struct nes_cm_event *event)
-{
- struct nes_qp *nesqp;
- struct iw_cm_id *cm_id;
- struct iw_cm_event cm_event;
- /* struct nes_cm_info cm_info; */
- int ret;
-
- if (!event->cm_node)
- return;
-
- if (!event->cm_node->cm_id)
- return;
-
- cm_id = event->cm_node->cm_id;
-
- nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
- nesqp = cm_id->provider_data;
- if (!nesqp)
- return;
-
- nesqp->cm_id = NULL;
- /* cm_id->provider_data = NULL; */
- cm_event.event = IW_CM_EVENT_DISCONNECT;
- cm_event.status = -ECONNRESET;
- cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
-
- cm_id->add_ref(cm_id);
- ret = cm_id->event_handler(cm_id, &cm_event);
- atomic_inc(&cm_closes);
- cm_event.event = IW_CM_EVENT_CLOSE;
- cm_event.status = 0;
- cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
- nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node);
- ret = cm_id->event_handler(cm_id, &cm_event);
-
- nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
-
-
- /* notify OF layer about this connection error event */
- cm_id->rem_ref(cm_id);
-
- return;
-}
-
-
-/**
- * cm_event_mpa_req
- */
-static void cm_event_mpa_req(struct nes_cm_event *event)
-{
- struct iw_cm_id *cm_id;
- struct iw_cm_event cm_event;
- int ret;
- struct nes_cm_node *cm_node;
- struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
- &cm_event.local_addr;
- struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
- &cm_event.remote_addr;
-
- cm_node = event->cm_node;
- if (!cm_node)
- return;
- cm_id = cm_node->cm_id;
-
- atomic_inc(&cm_connect_reqs);
- nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
- cm_node, cm_id, jiffies);
-
- cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
- cm_event.status = 0;
- cm_event.provider_data = (void *)cm_node;
-
- cm_event_laddr->sin_family = AF_INET;
- cm_event_laddr->sin_port = htons(event->cm_info.loc_port);
- cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
-
- cm_event_raddr->sin_family = AF_INET;
- cm_event_raddr->sin_port = htons(event->cm_info.rem_port);
- cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
- cm_event.private_data = cm_node->mpa_frame_buf;
- cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
- if (cm_node->mpa_frame_rev == IETF_MPA_V1) {
- cm_event.ird = NES_MAX_IRD;
- cm_event.ord = NES_MAX_ORD;
- } else {
- cm_event.ird = cm_node->ird_size;
- cm_event.ord = cm_node->ord_size;
- }
-
- ret = cm_id->event_handler(cm_id, &cm_event);
- if (ret)
- printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
- __func__, __LINE__, ret);
- return;
-}
-
-
-static void cm_event_mpa_reject(struct nes_cm_event *event)
-{
- struct iw_cm_id *cm_id;
- struct iw_cm_event cm_event;
- struct nes_cm_node *cm_node;
- int ret;
- struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
- &cm_event.local_addr;
- struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
- &cm_event.remote_addr;
-
- cm_node = event->cm_node;
- if (!cm_node)
- return;
- cm_id = cm_node->cm_id;
-
- atomic_inc(&cm_connect_reqs);
- nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
- cm_node, cm_id, jiffies);
-
- cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.status = -ECONNREFUSED;
- cm_event.provider_data = cm_id->provider_data;
-
- cm_event_laddr->sin_family = AF_INET;
- cm_event_laddr->sin_port = htons(event->cm_info.loc_port);
- cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
-
- cm_event_raddr->sin_family = AF_INET;
- cm_event_raddr->sin_port = htons(event->cm_info.rem_port);
- cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
-
- cm_event.private_data = cm_node->mpa_frame_buf;
- cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
-
- nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, "
- "remove_addr=%08x\n",
- cm_event_laddr->sin_addr.s_addr,
- cm_event_raddr->sin_addr.s_addr);
-
- ret = cm_id->event_handler(cm_id, &cm_event);
- if (ret)
- printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
- __func__, __LINE__, ret);
-
- return;
-}
-
-
-static void nes_cm_event_handler(struct work_struct *);
-
-/**
- * nes_cm_post_event
- * post an event to the cm event handler
- */
-static int nes_cm_post_event(struct nes_cm_event *event)
-{
- atomic_inc(&event->cm_node->cm_core->events_posted);
- add_ref_cm_node(event->cm_node);
- event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
- INIT_WORK(&event->event_work, nes_cm_event_handler);
- nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n",
- event->cm_node, event);
-
- queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
-
- nes_debug(NES_DBG_CM, "Exit\n");
- return 0;
-}
-
-
-/**
- * nes_cm_event_handler
- * worker function to handle cm events
- * will free instance of nes_cm_event
- */
-static void nes_cm_event_handler(struct work_struct *work)
-{
- struct nes_cm_event *event = container_of(work, struct nes_cm_event,
- event_work);
- struct nes_cm_core *cm_core;
-
- if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core))
- return;
-
- cm_core = event->cm_node->cm_core;
- nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
- event, event->type, atomic_read(&cm_core->events_posted));
-
- switch (event->type) {
- case NES_CM_EVENT_MPA_REQ:
- cm_event_mpa_req(event);
- nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n",
- event->cm_node);
- break;
- case NES_CM_EVENT_RESET:
- nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n",
- event->cm_node);
- cm_event_reset(event);
- break;
- case NES_CM_EVENT_CONNECTED:
- if ((!event->cm_node->cm_id) ||
- (event->cm_node->state != NES_CM_STATE_TSA))
- break;
- cm_event_connected(event);
- nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
- break;
- case NES_CM_EVENT_MPA_REJECT:
- if ((!event->cm_node->cm_id) ||
- (event->cm_node->state == NES_CM_STATE_TSA))
- break;
- cm_event_mpa_reject(event);
- nes_debug(NES_DBG_CM, "CM Event: REJECT\n");
- break;
-
- case NES_CM_EVENT_ABORTED:
- if ((!event->cm_node->cm_id) ||
- (event->cm_node->state == NES_CM_STATE_TSA))
- break;
- cm_event_connect_error(event);
- nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
- break;
- case NES_CM_EVENT_DROPPED_PKT:
- nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
- break;
- default:
- nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
- break;
- }
-
- atomic_dec(&cm_core->events_posted);
- event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
- rem_ref_cm_node(cm_core, event->cm_node);
- kfree(event);
-
- return;
-}
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
deleted file mode 100644
index 32a6420c2940..000000000000
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef NES_CM_H
-#define NES_CM_H
-
-#define QUEUE_EVENTS
-
-#define NES_MANAGE_APBVT_DEL 0
-#define NES_MANAGE_APBVT_ADD 1
-
-#define NES_MPA_REQUEST_ACCEPT 1
-#define NES_MPA_REQUEST_REJECT 2
-
-/* IETF MPA -- defines, enums, structs */
-#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
-#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
-#define IETF_MPA_KEY_SIZE 16
-#define IETF_MPA_VERSION 1
-#define IETF_MAX_PRIV_DATA_LEN 512
-#define IETF_MPA_FRAME_SIZE 20
-#define IETF_RTR_MSG_SIZE 4
-#define IETF_MPA_V2_FLAG 0x10
-
-/* IETF RTR MSG Fields */
-#define IETF_PEER_TO_PEER 0x8000
-#define IETF_FLPDU_ZERO_LEN 0x4000
-#define IETF_RDMA0_WRITE 0x8000
-#define IETF_RDMA0_READ 0x4000
-#define IETF_NO_IRD_ORD 0x3FFF
-#define NES_MAX_IRD 0x40
-#define NES_MAX_ORD 0x7F
-
-enum ietf_mpa_flags {
- IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
- IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
- IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
-};
-
-struct ietf_mpa_v1 {
- u8 key[IETF_MPA_KEY_SIZE];
- u8 flags;
- u8 rev;
- __be16 priv_data_len;
- u8 priv_data[0];
-};
-
-#define ietf_mpa_req_resp_frame ietf_mpa_frame
-
-struct ietf_rtr_msg {
- __be16 ctrl_ird;
- __be16 ctrl_ord;
-};
-
-struct ietf_mpa_v2 {
- u8 key[IETF_MPA_KEY_SIZE];
- u8 flags;
- u8 rev;
- __be16 priv_data_len;
- struct ietf_rtr_msg rtr_msg;
- u8 priv_data[0];
-};
-
-struct nes_v4_quad {
- u32 rsvd0;
- __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */
- __be32 SrcIpadr;
- __be16 TcpPorts[2]; /* src is low, dest is high */
-};
-
-struct nes_cm_node;
-enum nes_timer_type {
- NES_TIMER_TYPE_SEND,
- NES_TIMER_TYPE_RECV,
- NES_TIMER_NODE_CLEANUP,
- NES_TIMER_TYPE_CLOSE,
-};
-
-#define NES_PASSIVE_STATE_INDICATED 0
-#define NES_DO_NOT_SEND_RESET_EVENT 1
-#define NES_SEND_RESET_EVENT 2
-
-#define MAX_NES_IFS 4
-
-#define SET_ACK 1
-#define SET_SYN 2
-#define SET_FIN 4
-#define SET_RST 8
-
-#define TCP_OPTIONS_PADDING 3
-
-struct option_base {
- u8 optionnum;
- u8 length;
-};
-
-enum option_numbers {
- OPTION_NUMBER_END,
- OPTION_NUMBER_NONE,
- OPTION_NUMBER_MSS,
- OPTION_NUMBER_WINDOW_SCALE,
- OPTION_NUMBER_SACK_PERM,
- OPTION_NUMBER_SACK,
- OPTION_NUMBER_WRITE0 = 0xbc
-};
-
-struct option_mss {
- u8 optionnum;
- u8 length;
- __be16 mss;
-};
-
-struct option_windowscale {
- u8 optionnum;
- u8 length;
- u8 shiftcount;
-};
-
-union all_known_options {
- char as_end;
- struct option_base as_base;
- struct option_mss as_mss;
- struct option_windowscale as_windowscale;
-};
-
-struct nes_timer_entry {
- struct list_head list;
- unsigned long timetosend; /* jiffies */
- struct sk_buff *skb;
- u32 type;
- u32 retrycount;
- u32 retranscount;
- u32 context;
- u32 seq_num;
- u32 send_retrans;
- int close_when_complete;
- struct net_device *netdev;
-};
-
-#define NES_DEFAULT_RETRYS 64
-#define NES_DEFAULT_RETRANS 8
-#ifdef CONFIG_INFINIBAND_NES_DEBUG
-#define NES_RETRY_TIMEOUT (1000*HZ/1000)
-#else
-#define NES_RETRY_TIMEOUT (3000*HZ/1000)
-#endif
-#define NES_SHORT_TIME (10)
-#define NES_LONG_TIME (2000*HZ/1000)
-#define NES_MAX_TIMEOUT ((unsigned long) (12*HZ))
-
-#define NES_CM_HASHTABLE_SIZE 1024
-#define NES_CM_TCP_TIMER_INTERVAL 3000
-#define NES_CM_DEFAULT_MTU 1540
-#define NES_CM_DEFAULT_FRAME_CNT 10
-#define NES_CM_THREAD_STACK_SIZE 256
-#define NES_CM_DEFAULT_RCV_WND 64240 // before we know that window scaling is allowed
-#define NES_CM_DEFAULT_RCV_WND_SCALED 256960 // after we know that window scaling is allowed
-#define NES_CM_DEFAULT_RCV_WND_SCALE 2
-#define NES_CM_DEFAULT_FREE_PKTS 0x000A
-#define NES_CM_FREE_PKT_LO_WATERMARK 2
-
-#define NES_CM_DEFAULT_MSS 536
-
-#define NES_CM_DEF_SEQ 0x159bf75f
-#define NES_CM_DEF_LOCAL_ID 0x3b47
-
-#define NES_CM_DEF_SEQ2 0x18ed5740
-#define NES_CM_DEF_LOCAL_ID2 0xb807
-#define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_RTR_MSG_SIZE + IETF_MAX_PRIV_DATA_LEN)
-
-typedef u32 nes_addr_t;
-
-#define nes_cm_tsa_context nes_qp_context
-
-struct nes_qp;
-
-/* cm node transition states */
-enum nes_cm_node_state {
- NES_CM_STATE_UNKNOWN,
- NES_CM_STATE_INITED,
- NES_CM_STATE_LISTENING,
- NES_CM_STATE_SYN_RCVD,
- NES_CM_STATE_SYN_SENT,
- NES_CM_STATE_ONE_SIDE_ESTABLISHED,
- NES_CM_STATE_ESTABLISHED,
- NES_CM_STATE_ACCEPTING,
- NES_CM_STATE_MPAREQ_SENT,
- NES_CM_STATE_MPAREQ_RCVD,
- NES_CM_STATE_MPAREJ_RCVD,
- NES_CM_STATE_TSA,
- NES_CM_STATE_FIN_WAIT1,
- NES_CM_STATE_FIN_WAIT2,
- NES_CM_STATE_CLOSE_WAIT,
- NES_CM_STATE_TIME_WAIT,
- NES_CM_STATE_LAST_ACK,
- NES_CM_STATE_CLOSING,
- NES_CM_STATE_LISTENER_DESTROYED,
- NES_CM_STATE_CLOSED
-};
-
-enum mpa_frame_version {
- IETF_MPA_V1 = 1,
- IETF_MPA_V2 = 2
-};
-
-enum mpa_frame_key {
- MPA_KEY_REQUEST,
- MPA_KEY_REPLY
-};
-
-enum send_rdma0 {
- SEND_RDMA_READ_ZERO = 1,
- SEND_RDMA_WRITE_ZERO = 2
-};
-
-enum nes_tcpip_pkt_type {
- NES_PKT_TYPE_UNKNOWN,
- NES_PKT_TYPE_SYN,
- NES_PKT_TYPE_SYNACK,
- NES_PKT_TYPE_ACK,
- NES_PKT_TYPE_FIN,
- NES_PKT_TYPE_RST
-};
-
-
-/* type of nes connection */
-enum nes_cm_conn_type {
- NES_CM_IWARP_CONN_TYPE,
-};
-
-/* CM context params */
-struct nes_cm_tcp_context {
- u8 client;
-
- u32 loc_seq_num;
- u32 loc_ack_num;
- u32 rem_ack_num;
- u32 rcv_nxt;
-
- u32 loc_id;
- u32 rem_id;
-
- u32 snd_wnd;
- u32 max_snd_wnd;
-
- u32 rcv_wnd;
- u32 mss;
- u8 snd_wscale;
- u8 rcv_wscale;
-
- struct nes_cm_tsa_context tsa_cntxt;
- struct timeval sent_ts;
-};
-
-
-enum nes_cm_listener_state {
- NES_CM_LISTENER_PASSIVE_STATE = 1,
- NES_CM_LISTENER_ACTIVE_STATE = 2,
- NES_CM_LISTENER_EITHER_STATE = 3
-};
-
-struct nes_cm_listener {
- struct list_head list;
- struct nes_cm_core *cm_core;
- u8 loc_mac[ETH_ALEN];
- nes_addr_t loc_addr, mapped_loc_addr;
- u16 loc_port, mapped_loc_port;
- struct iw_cm_id *cm_id;
- enum nes_cm_conn_type conn_type;
- atomic_t ref_count;
- struct nes_vnic *nesvnic;
- atomic_t pend_accepts_cnt;
- int backlog;
- enum nes_cm_listener_state listener_state;
- u32 reused_node;
- u8 tos;
-};
-
-/* per connection node and node state information */
-struct nes_cm_node {
- nes_addr_t loc_addr, rem_addr;
- nes_addr_t mapped_loc_addr, mapped_rem_addr;
- u16 loc_port, rem_port;
- u16 mapped_loc_port, mapped_rem_port;
-
- u8 loc_mac[ETH_ALEN];
- u8 rem_mac[ETH_ALEN];
-
- enum nes_cm_node_state state;
- struct nes_cm_tcp_context tcp_cntxt;
- struct nes_cm_core *cm_core;
- struct sk_buff_head resend_list;
- atomic_t ref_count;
- struct net_device *netdev;
-
- struct nes_cm_node *loopbackpartner;
-
- struct nes_timer_entry *send_entry;
- struct nes_timer_entry *recv_entry;
- spinlock_t retrans_list_lock;
- enum send_rdma0 send_rdma0_op;
-
- union {
- struct ietf_mpa_v1 mpa_frame;
- struct ietf_mpa_v2 mpa_v2_frame;
- u8 mpa_frame_buf[MAX_CM_BUFFER];
- };
- enum mpa_frame_version mpa_frame_rev;
- u16 ird_size;
- u16 ord_size;
- u16 mpav2_ird_ord;
-
- u16 mpa_frame_size;
- struct iw_cm_id *cm_id;
- struct list_head list;
- int accelerated;
- struct nes_cm_listener *listener;
- enum nes_cm_conn_type conn_type;
- struct nes_vnic *nesvnic;
- int apbvt_set;
- int accept_pend;
- struct list_head timer_entry;
- struct list_head reset_entry;
- struct nes_qp *nesqp;
- atomic_t passive_state;
- u8 tos;
-};
-
-/* structure for client or CM to fill when making CM api calls. */
-/* - only need to set relevant data, based on op. */
-struct nes_cm_info {
- union {
- struct iw_cm_id *cm_id;
- struct net_device *netdev;
- };
-
- u16 loc_port;
- u16 rem_port;
- nes_addr_t loc_addr;
- nes_addr_t rem_addr;
- u16 mapped_loc_port;
- u16 mapped_rem_port;
- nes_addr_t mapped_loc_addr;
- nes_addr_t mapped_rem_addr;
-
- enum nes_cm_conn_type conn_type;
- int backlog;
-};
-
-/* CM event codes */
-enum nes_cm_event_type {
- NES_CM_EVENT_UNKNOWN,
- NES_CM_EVENT_ESTABLISHED,
- NES_CM_EVENT_MPA_REQ,
- NES_CM_EVENT_MPA_CONNECT,
- NES_CM_EVENT_MPA_ACCEPT,
- NES_CM_EVENT_MPA_REJECT,
- NES_CM_EVENT_MPA_ESTABLISHED,
- NES_CM_EVENT_CONNECTED,
- NES_CM_EVENT_CLOSED,
- NES_CM_EVENT_RESET,
- NES_CM_EVENT_DROPPED_PKT,
- NES_CM_EVENT_CLOSE_IMMED,
- NES_CM_EVENT_CLOSE_HARD,
- NES_CM_EVENT_CLOSE_CLEAN,
- NES_CM_EVENT_ABORTED,
- NES_CM_EVENT_SEND_FIRST
-};
-
-/* event to post to CM event handler */
-struct nes_cm_event {
- enum nes_cm_event_type type;
-
- struct nes_cm_info cm_info;
- struct work_struct event_work;
- struct nes_cm_node *cm_node;
-};
-
-struct nes_cm_core {
- enum nes_cm_node_state state;
-
- atomic_t listen_node_cnt;
- struct nes_cm_node listen_list;
- spinlock_t listen_list_lock;
-
- u32 mtu;
- u32 free_tx_pkt_max;
- u32 rx_pkt_posted;
- atomic_t ht_node_cnt;
- struct list_head connected_nodes;
- /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */
- spinlock_t ht_lock;
-
- struct timer_list tcp_timer;
-
- struct nes_cm_ops *api;
-
- int (*post_event)(struct nes_cm_event *event);
- atomic_t events_posted;
- struct workqueue_struct *event_wq;
- struct workqueue_struct *disconn_wq;
-
- atomic_t node_cnt;
- u64 aborted_connects;
- u32 options;
-
- struct nes_cm_node *current_listen_node;
-};
-
-
-#define NES_CM_SET_PKT_SIZE (1 << 1)
-#define NES_CM_SET_FREE_PKT_Q_SIZE (1 << 2)
-
-/* CM ops/API for client interface */
-struct nes_cm_ops {
- int (*accelerated)(struct nes_cm_core *, struct nes_cm_node *);
- struct nes_cm_listener * (*listen)(struct nes_cm_core *, struct nes_vnic *,
- struct nes_cm_info *);
- int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *);
- struct nes_cm_node * (*connect)(struct nes_cm_core *,
- struct nes_vnic *, u16, void *,
- struct nes_cm_info *);
- int (*close)(struct nes_cm_core *, struct nes_cm_node *);
- int (*accept)(struct nes_cm_core *, struct nes_cm_node *);
- int (*reject)(struct nes_cm_core *, struct nes_cm_node *);
- int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
- struct sk_buff *);
- int (*destroy_cm_core)(struct nes_cm_core *);
- int (*get)(struct nes_cm_core *);
- int (*set)(struct nes_cm_core *, u32, u32);
-};
-
-int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
- enum nes_timer_type, int, int);
-
-int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
-int nes_reject(struct iw_cm_id *, const void *, u8);
-int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
-int nes_create_listen(struct iw_cm_id *, int);
-int nes_destroy_listen(struct iw_cm_id *);
-
-int nes_cm_recv(struct sk_buff *, struct net_device *);
-int nes_cm_start(void);
-int nes_cm_stop(void);
-int nes_add_ref_cm_node(struct nes_cm_node *cm_node);
-int nes_rem_ref_cm_node(struct nes_cm_node *cm_node);
-
-#endif /* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
deleted file mode 100644
index a69eef16d72d..000000000000
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef NES_CONTEXT_H
-#define NES_CONTEXT_H
-
-struct nes_qp_context {
- __le32 misc;
- __le32 cqs;
- __le32 sq_addr_low;
- __le32 sq_addr_high;
- __le32 rq_addr_low;
- __le32 rq_addr_high;
- __le32 misc2;
- __le16 tcpPorts[2];
- __le32 ip0;
- __le32 ip1;
- __le32 ip2;
- __le32 ip3;
- __le32 mss;
- __le32 arp_index_vlan;
- __le32 tcp_state_flow_label;
- __le32 pd_index_wscale;
- __le32 keepalive;
- u32 ts_recent;
- u32 ts_age;
- __le32 snd_nxt;
- __le32 snd_wnd;
- __le32 rcv_nxt;
- __le32 rcv_wnd;
- __le32 snd_max;
- __le32 snd_una;
- u32 srtt;
- __le32 rttvar;
- __le32 ssthresh;
- __le32 cwnd;
- __le32 snd_wl1;
- __le32 snd_wl2;
- __le32 max_snd_wnd;
- __le32 ts_val_delta;
- u32 retransmit;
- u32 probe_cnt;
- u32 hte_index;
- __le32 q2_addr_low;
- __le32 q2_addr_high;
- __le32 ird_index;
- u32 Rsvd3;
- __le32 ird_ord_sizes;
- u32 mrkr_offset;
- __le32 aeq_token_low;
- __le32 aeq_token_high;
-};
-
-/* QP Context Misc Field */
-
-#define NES_QPCONTEXT_MISC_IWARP_VER_MASK 0x00000003
-#define NES_QPCONTEXT_MISC_IWARP_VER_SHIFT 0
-#define NES_QPCONTEXT_MISC_EFB_SIZE_MASK 0x000000C0
-#define NES_QPCONTEXT_MISC_EFB_SIZE_SHIFT 6
-#define NES_QPCONTEXT_MISC_RQ_SIZE_MASK 0x00000300
-#define NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT 8
-#define NES_QPCONTEXT_MISC_SQ_SIZE_MASK 0x00000c00
-#define NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT 10
-#define NES_QPCONTEXT_MISC_PCI_FCN_MASK 0x00007000
-#define NES_QPCONTEXT_MISC_PCI_FCN_SHIFT 12
-#define NES_QPCONTEXT_MISC_DUP_ACKS_MASK 0x00070000
-#define NES_QPCONTEXT_MISC_DUP_ACKS_SHIFT 16
-
-enum nes_qp_context_misc_bits {
- NES_QPCONTEXT_MISC_RX_WQE_SIZE = 0x00000004,
- NES_QPCONTEXT_MISC_IPV4 = 0x00000008,
- NES_QPCONTEXT_MISC_DO_NOT_FRAG = 0x00000010,
- NES_QPCONTEXT_MISC_INSERT_VLAN = 0x00000020,
- NES_QPCONTEXT_MISC_DROS = 0x00008000,
- NES_QPCONTEXT_MISC_WSCALE = 0x00080000,
- NES_QPCONTEXT_MISC_KEEPALIVE = 0x00100000,
- NES_QPCONTEXT_MISC_TIMESTAMP = 0x00200000,
- NES_QPCONTEXT_MISC_SACK = 0x00400000,
- NES_QPCONTEXT_MISC_RDMA_WRITE_EN = 0x00800000,
- NES_QPCONTEXT_MISC_RDMA_READ_EN = 0x01000000,
- NES_QPCONTEXT_MISC_WBIND_EN = 0x10000000,
- NES_QPCONTEXT_MISC_FAST_REGISTER_EN = 0x20000000,
- NES_QPCONTEXT_MISC_PRIV_EN = 0x40000000,
- NES_QPCONTEXT_MISC_NO_NAGLE = 0x80000000
-};
-
-enum nes_qp_acc_wq_sizes {
- HCONTEXT_TSA_WQ_SIZE_4 = 0,
- HCONTEXT_TSA_WQ_SIZE_32 = 1,
- HCONTEXT_TSA_WQ_SIZE_128 = 2,
- HCONTEXT_TSA_WQ_SIZE_512 = 3
-};
-
-/* QP Context Misc2 Fields */
-#define NES_QPCONTEXT_MISC2_TTL_MASK 0x000000ff
-#define NES_QPCONTEXT_MISC2_TTL_SHIFT 0
-#define NES_QPCONTEXT_MISC2_HOP_LIMIT_MASK 0x000000ff
-#define NES_QPCONTEXT_MISC2_HOP_LIMIT_SHIFT 0
-#define NES_QPCONTEXT_MISC2_LIMIT_MASK 0x00000300
-#define NES_QPCONTEXT_MISC2_LIMIT_SHIFT 8
-#define NES_QPCONTEXT_MISC2_NIC_INDEX_MASK 0x0000fc00
-#define NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT 10
-#define NES_QPCONTEXT_MISC2_SRC_IP_MASK 0x001f0000
-#define NES_QPCONTEXT_MISC2_SRC_IP_SHIFT 16
-#define NES_QPCONTEXT_MISC2_TOS_MASK 0xff000000
-#define NES_QPCONTEXT_MISC2_TOS_SHIFT 24
-#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_MASK 0xff000000
-#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_SHIFT 24
-
-/* QP Context Tcp State/Flow Label Fields */
-#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_MASK 0x000fffff
-#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_SHIFT 0
-#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_MASK 0xf0000000
-#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT 28
-
-enum nes_qp_tcp_state {
- NES_QPCONTEXT_TCPSTATE_CLOSED = 1,
- NES_QPCONTEXT_TCPSTATE_EST = 5,
- NES_QPCONTEXT_TCPSTATE_TIME_WAIT = 11,
-};
-
-/* QP Context PD Index/wscale Fields */
-#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK 0x0000000f
-#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT 0
-#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK 0x00000f00
-#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT 8
-#define NES_QPCONTEXT_PDWSCALE_PDINDEX_MASK 0xffff0000
-#define NES_QPCONTEXT_PDWSCALE_PDINDEX_SHIFT 16
-
-/* QP Context Keepalive Fields */
-#define NES_QPCONTEXT_KEEPALIVE_DELTA_MASK 0x0000ffff
-#define NES_QPCONTEXT_KEEPALIVE_DELTA_SHIFT 0
-#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_MASK 0x00ff0000
-#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_SHIFT 16
-#define NES_QPCONTEXT_KEEPALIVE_INTV_MASK 0xff000000
-#define NES_QPCONTEXT_KEEPALIVE_INTV_SHIFT 24
-
-/* QP Context ORD/IRD Fields */
-#define NES_QPCONTEXT_ORDIRD_ORDSIZE_MASK 0x0000007f
-#define NES_QPCONTEXT_ORDIRD_ORDSIZE_SHIFT 0
-#define NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK 0x00030000
-#define NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT 16
-#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_MASK 0x30000000
-#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT 28
-
-enum nes_ord_ird_bits {
- NES_QPCONTEXT_ORDIRD_WRPDU = 0x02000000,
- NES_QPCONTEXT_ORDIRD_LSMM_PRESENT = 0x04000000,
- NES_QPCONTEXT_ORDIRD_ALSMM = 0x08000000,
- NES_QPCONTEXT_ORDIRD_AAH = 0x40000000,
- NES_QPCONTEXT_ORDIRD_RNMC = 0x80000000
-};
-
-enum nes_iwarp_qp_state {
- NES_QPCONTEXT_IWARP_STATE_NONEXIST = 0,
- NES_QPCONTEXT_IWARP_STATE_IDLE = 1,
- NES_QPCONTEXT_IWARP_STATE_RTS = 2,
- NES_QPCONTEXT_IWARP_STATE_CLOSING = 3,
- NES_QPCONTEXT_IWARP_STATE_TERMINATE = 5,
- NES_QPCONTEXT_IWARP_STATE_ERROR = 6
-};
-
-
-#endif /* NES_CONTEXT_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
deleted file mode 100644
index 4713dd7ed764..000000000000
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ /dev/null
@@ -1,3937 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include <linux/inet_lro.h>
-#include <linux/slab.h>
-
-#include "nes.h"
-
-static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
-module_param(nes_lro_max_aggr, uint, 0444);
-MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
-
-static int wide_ppm_offset;
-module_param(wide_ppm_offset, int, 0644);
-MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
-
-static u32 crit_err_count;
-u32 int_mod_timer_init;
-u32 int_mod_cq_depth_256;
-u32 int_mod_cq_depth_128;
-u32 int_mod_cq_depth_32;
-u32 int_mod_cq_depth_24;
-u32 int_mod_cq_depth_16;
-u32 int_mod_cq_depth_4;
-u32 int_mod_cq_depth_1;
-static const u8 nes_max_critical_error_count = 100;
-#include "nes_cm.h"
-
-static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
-static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
-static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
- struct nes_adapter *nesadapter, u8 OneG_Mode);
-static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
-static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
-static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
-static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
- struct nes_hw_aeqe *aeqe);
-static void process_critical_error(struct nes_device *nesdev);
-static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
-static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
-static void nes_terminate_start_timer(struct nes_qp *nesqp);
-
-#ifdef CONFIG_INFINIBAND_NES_DEBUG
-static unsigned char *nes_iwarp_state_str[] = {
- "Non-Existent",
- "Idle",
- "RTS",
- "Closing",
- "RSVD1",
- "Terminate",
- "Error",
- "RSVD2",
-};
-
-static unsigned char *nes_tcp_state_str[] = {
- "Non-Existent",
- "Closed",
- "Listen",
- "SYN Sent",
- "SYN Rcvd",
- "Established",
- "Close Wait",
- "FIN Wait 1",
- "Closing",
- "Last Ack",
- "FIN Wait 2",
- "Time Wait",
- "RSVD1",
- "RSVD2",
- "RSVD3",
- "RSVD4",
-};
-#endif
-
-static inline void print_ip(struct nes_cm_node *cm_node)
-{
- unsigned char *rem_addr;
- if (cm_node) {
- rem_addr = (unsigned char *)&cm_node->rem_addr;
- printk(KERN_ERR PFX "Remote IP addr: %pI4\n", rem_addr);
- }
-}
-
-/**
- * nes_nic_init_timer_defaults
- */
-void nes_nic_init_timer_defaults(struct nes_device *nesdev, u8 jumbomode)
-{
- unsigned long flags;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
-
- spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
-
- shared_timer->timer_in_use_min = NES_NIC_FAST_TIMER_LOW;
- shared_timer->timer_in_use_max = NES_NIC_FAST_TIMER_HIGH;
- if (jumbomode) {
- shared_timer->threshold_low = DEFAULT_JUMBO_NES_QL_LOW;
- shared_timer->threshold_target = DEFAULT_JUMBO_NES_QL_TARGET;
- shared_timer->threshold_high = DEFAULT_JUMBO_NES_QL_HIGH;
- } else {
- shared_timer->threshold_low = DEFAULT_NES_QL_LOW;
- shared_timer->threshold_target = DEFAULT_NES_QL_TARGET;
- shared_timer->threshold_high = DEFAULT_NES_QL_HIGH;
- }
-
- /* todo use netdev->mtu to set thresholds */
- spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
-}
-
-
-/**
- * nes_nic_init_timer
- */
-static void nes_nic_init_timer(struct nes_device *nesdev)
-{
- unsigned long flags;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
-
- spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
-
- if (shared_timer->timer_in_use_old == 0) {
- nesdev->deepcq_count = 0;
- shared_timer->timer_direction_upward = 0;
- shared_timer->timer_direction_downward = 0;
- shared_timer->timer_in_use = NES_NIC_FAST_TIMER;
- shared_timer->timer_in_use_old = 0;
-
- }
- if (shared_timer->timer_in_use != shared_timer->timer_in_use_old) {
- shared_timer->timer_in_use_old = shared_timer->timer_in_use;
- nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
- 0x80000000 | ((u32)(shared_timer->timer_in_use*8)));
- }
- /* todo use netdev->mtu to set thresholds */
- spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
-}
-
-
-/**
- * nes_nic_tune_timer
- */
-static void nes_nic_tune_timer(struct nes_device *nesdev)
-{
- unsigned long flags;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
- u16 cq_count = nesdev->currcq_count;
-
- spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
-
- if (shared_timer->cq_count_old <= cq_count)
- shared_timer->cq_direction_downward = 0;
- else
- shared_timer->cq_direction_downward++;
- shared_timer->cq_count_old = cq_count;
- if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) {
- if (cq_count <= shared_timer->threshold_low &&
- shared_timer->threshold_low > 4) {
- shared_timer->threshold_low = shared_timer->threshold_low/2;
- shared_timer->cq_direction_downward=0;
- nesdev->currcq_count = 0;
- spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
- return;
- }
- }
-
- if (cq_count > 1) {
- nesdev->deepcq_count += cq_count;
- if (cq_count <= shared_timer->threshold_low) { /* increase timer gently */
- shared_timer->timer_direction_upward++;
- shared_timer->timer_direction_downward = 0;
- } else if (cq_count <= shared_timer->threshold_target) { /* balanced */
- shared_timer->timer_direction_upward = 0;
- shared_timer->timer_direction_downward = 0;
- } else if (cq_count <= shared_timer->threshold_high) { /* decrease timer gently */
- shared_timer->timer_direction_downward++;
- shared_timer->timer_direction_upward = 0;
- } else if (cq_count <= (shared_timer->threshold_high) * 2) {
- shared_timer->timer_in_use -= 2;
- shared_timer->timer_direction_upward = 0;
- shared_timer->timer_direction_downward++;
- } else {
- shared_timer->timer_in_use -= 4;
- shared_timer->timer_direction_upward = 0;
- shared_timer->timer_direction_downward++;
- }
-
- if (shared_timer->timer_direction_upward > 3 ) { /* using history */
- shared_timer->timer_in_use += 3;
- shared_timer->timer_direction_upward = 0;
- shared_timer->timer_direction_downward = 0;
- }
- if (shared_timer->timer_direction_downward > 5) { /* using history */
- shared_timer->timer_in_use -= 4 ;
- shared_timer->timer_direction_downward = 0;
- shared_timer->timer_direction_upward = 0;
- }
- }
-
- /* boundary checking */
- if (shared_timer->timer_in_use > shared_timer->threshold_high)
- shared_timer->timer_in_use = shared_timer->threshold_high;
- else if (shared_timer->timer_in_use < shared_timer->threshold_low)
- shared_timer->timer_in_use = shared_timer->threshold_low;
-
- nesdev->currcq_count = 0;
-
- spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
-}
-
-
-/**
- * nes_init_adapter - initialize adapter
- */
-struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
- struct nes_adapter *nesadapter = NULL;
- unsigned long num_pds;
- u32 u32temp;
- u32 port_count;
- u16 max_rq_wrs;
- u16 max_sq_wrs;
- u32 max_mr;
- u32 max_256pbl;
- u32 max_4kpbl;
- u32 max_qp;
- u32 max_irrq;
- u32 max_cq;
- u32 hte_index_mask;
- u32 adapter_size;
- u32 arp_table_size;
- u16 vendor_id;
- u16 device_id;
- u8 OneG_Mode;
- u8 func_index;
-
- /* search the list of existing adapters */
- list_for_each_entry(nesadapter, &nes_adapter_list, list) {
- nes_debug(NES_DBG_INIT, "Searching Adapter list for PCI devfn = 0x%X,"
- " adapter PCI slot/bus = %u/%u, pci devices PCI slot/bus = %u/%u, .\n",
- nesdev->pcidev->devfn,
- PCI_SLOT(nesadapter->devfn),
- nesadapter->bus_number,
- PCI_SLOT(nesdev->pcidev->devfn),
- nesdev->pcidev->bus->number );
- if ((PCI_SLOT(nesadapter->devfn) == PCI_SLOT(nesdev->pcidev->devfn)) &&
- (nesadapter->bus_number == nesdev->pcidev->bus->number)) {
- nesadapter->ref_count++;
- return nesadapter;
- }
- }
-
- /* no adapter found */
- num_pds = pci_resource_len(nesdev->pcidev, BAR_1) >> PAGE_SHIFT;
- if ((hw_rev != NE020_REV) && (hw_rev != NE020_REV1)) {
- nes_debug(NES_DBG_INIT, "NE020 driver detected unknown hardware revision 0x%x\n",
- hw_rev);
- return NULL;
- }
-
- nes_debug(NES_DBG_INIT, "Determine Soft Reset, QP_control=0x%x, CPU0=0x%x, CPU1=0x%x, CPU2=0x%x\n",
- nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + PCI_FUNC(nesdev->pcidev->devfn) * 8),
- nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS),
- nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 4),
- nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 8));
-
- nes_debug(NES_DBG_INIT, "Reset and init NE020\n");
-
-
- if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0)
- return NULL;
-
- max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE);
- nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp);
-
- u32temp = nes_read_indexed(nesdev, NES_IDX_QUAD_HASH_TABLE_SIZE);
- if (max_qp > ((u32)1 << (u32temp & 0x001f))) {
- nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to hash table size = 0x%08X\n",
- max_qp, u32temp);
- max_qp = (u32)1 << (u32temp & 0x001f);
- }
-
- hte_index_mask = ((u32)1 << ((u32temp & 0x001f)+1))-1;
- nes_debug(NES_DBG_INIT, "Max QP = %u, hte_index_mask = 0x%08X.\n",
- max_qp, hte_index_mask);
-
- u32temp = nes_read_indexed(nesdev, NES_IDX_IRRQ_COUNT);
-
- max_irrq = 1 << (u32temp & 0x001f);
-
- if (max_qp > max_irrq) {
- max_qp = max_irrq;
- nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to Available Q1s.\n",
- max_qp);
- }
-
- /* there should be no reason to allocate more pds than qps */
- if (num_pds > max_qp)
- num_pds = max_qp;
-
- u32temp = nes_read_indexed(nesdev, NES_IDX_MRT_SIZE);
- max_mr = (u32)8192 << (u32temp & 0x7);
-
- u32temp = nes_read_indexed(nesdev, NES_IDX_PBL_REGION_SIZE);
- max_256pbl = (u32)1 << (u32temp & 0x0000001f);
- max_4kpbl = (u32)1 << ((u32temp >> 16) & 0x0000001f);
- max_cq = nes_read_indexed(nesdev, NES_IDX_CQ_CTX_SIZE);
-
- u32temp = nes_read_indexed(nesdev, NES_IDX_ARP_CACHE_SIZE);
- arp_table_size = 1 << u32temp;
-
- adapter_size = (sizeof(struct nes_adapter) +
- (sizeof(unsigned long)-1)) & (~(sizeof(unsigned long)-1));
- adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
- adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
- adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
- adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
- adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
- adapter_size += sizeof(struct nes_qp **) * max_qp;
-
- /* allocate a new adapter struct */
- nesadapter = kzalloc(adapter_size, GFP_KERNEL);
- if (nesadapter == NULL) {
- return NULL;
- }
-
- nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
- nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
-
- if (nes_read_eeprom_values(nesdev, nesadapter)) {
- printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
- kfree(nesadapter);
- return NULL;
- }
-
- nesadapter->vendor_id = (((u32) nesadapter->mac_addr_high) << 8) |
- (nesadapter->mac_addr_low >> 24);
-
- pci_bus_read_config_word(nesdev->pcidev->bus, nesdev->pcidev->devfn,
- PCI_DEVICE_ID, &device_id);
- nesadapter->vendor_part_id = device_id;
-
- if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
- OneG_Mode)) {
- kfree(nesadapter);
- return NULL;
- }
- nes_init_csr_ne020(nesdev, hw_rev, port_count);
-
- memset(nesadapter->pft_mcast_map, 255,
- sizeof nesadapter->pft_mcast_map);
-
- /* populate the new nesadapter */
- nesadapter->devfn = nesdev->pcidev->devfn;
- nesadapter->bus_number = nesdev->pcidev->bus->number;
- nesadapter->ref_count = 1;
- nesadapter->timer_int_req = 0xffff0000;
- nesadapter->OneG_Mode = OneG_Mode;
- nesadapter->doorbell_start = nesdev->doorbell_region;
-
- /* nesadapter->tick_delta = clk_divisor; */
- nesadapter->hw_rev = hw_rev;
- nesadapter->port_count = port_count;
-
- nesadapter->max_qp = max_qp;
- nesadapter->hte_index_mask = hte_index_mask;
- nesadapter->max_irrq = max_irrq;
- nesadapter->max_mr = max_mr;
- nesadapter->max_256pbl = max_256pbl - 1;
- nesadapter->max_4kpbl = max_4kpbl - 1;
- nesadapter->max_cq = max_cq;
- nesadapter->free_256pbl = max_256pbl - 1;
- nesadapter->free_4kpbl = max_4kpbl - 1;
- nesadapter->max_pd = num_pds;
- nesadapter->arp_table_size = arp_table_size;
-
- nesadapter->et_pkt_rate_low = NES_TIMER_ENABLE_LIMIT;
- if (nes_drv_opt & NES_DRV_OPT_DISABLE_INT_MOD) {
- nesadapter->et_use_adaptive_rx_coalesce = 0;
- nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
- nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
- } else {
- nesadapter->et_use_adaptive_rx_coalesce = 1;
- nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
- nesadapter->et_rx_coalesce_usecs_irq = 0;
- printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __func__);
- }
- /* Setup and enable the periodic timer */
- if (nesadapter->et_rx_coalesce_usecs_irq)
- nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 |
- ((u32)(nesadapter->et_rx_coalesce_usecs_irq * 8)));
- else
- nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x00000000);
-
- nesadapter->base_pd = 1;
-
- nesadapter->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
-
- nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
- [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
- nesadapter->allocated_cqs = &nesadapter->allocated_qps[BITS_TO_LONGS(max_qp)];
- nesadapter->allocated_mrs = &nesadapter->allocated_cqs[BITS_TO_LONGS(max_cq)];
- nesadapter->allocated_pds = &nesadapter->allocated_mrs[BITS_TO_LONGS(max_mr)];
- nesadapter->allocated_arps = &nesadapter->allocated_pds[BITS_TO_LONGS(num_pds)];
- nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
-
-
- /* mark the usual suspect QPs, MR and CQs as in use */
- for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
- set_bit(u32temp, nesadapter->allocated_qps);
- set_bit(u32temp, nesadapter->allocated_cqs);
- }
- set_bit(0, nesadapter->allocated_mrs);
-
- for (u32temp = 0; u32temp < 20; u32temp++)
- set_bit(u32temp, nesadapter->allocated_pds);
- u32temp = nes_read_indexed(nesdev, NES_IDX_QP_MAX_CFG_SIZES);
-
- max_rq_wrs = ((u32temp >> 8) & 3);
- switch (max_rq_wrs) {
- case 0:
- max_rq_wrs = 4;
- break;
- case 1:
- max_rq_wrs = 16;
- break;
- case 2:
- max_rq_wrs = 32;
- break;
- case 3:
- max_rq_wrs = 512;
- break;
- }
-
- max_sq_wrs = (u32temp & 3);
- switch (max_sq_wrs) {
- case 0:
- max_sq_wrs = 4;
- break;
- case 1:
- max_sq_wrs = 16;
- break;
- case 2:
- max_sq_wrs = 32;
- break;
- case 3:
- max_sq_wrs = 512;
- break;
- }
- nesadapter->max_qp_wr = min(max_rq_wrs, max_sq_wrs);
- nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
-
- nesadapter->max_sge = 4;
- nesadapter->max_cqe = 32766;
-
- if (nes_read_eeprom_values(nesdev, nesadapter)) {
- printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
- kfree(nesadapter);
- return NULL;
- }
-
- u32temp = nes_read_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG);
- nes_write_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG,
- (u32temp & 0xff000000) | (nesadapter->tcp_timer_core_clk_divisor & 0x00ffffff));
-
- /* setup port configuration */
- if (nesadapter->port_count == 1) {
- nesadapter->log_port = 0x00000000;
- if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT)
- nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002);
- else
- nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
- } else {
- if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
- nesadapter->log_port = 0x000000D8;
- } else {
- if (nesadapter->port_count == 2)
- nesadapter->log_port = 0x00000044;
- else
- nesadapter->log_port = 0x000000e4;
- }
- nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
- }
-
- nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT,
- nesadapter->log_port);
- nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n",
- nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT));
-
- spin_lock_init(&nesadapter->resource_lock);
- spin_lock_init(&nesadapter->phy_lock);
- spin_lock_init(&nesadapter->pbl_lock);
- spin_lock_init(&nesadapter->periodic_timer_lock);
-
- INIT_LIST_HEAD(&nesadapter->nesvnic_list[0]);
- INIT_LIST_HEAD(&nesadapter->nesvnic_list[1]);
- INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]);
- INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]);
-
- if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
- u32 pcs_control_status0, pcs_control_status1;
- u32 reset_value;
- u32 i = 0;
- u32 int_cnt = 0;
- u32 ext_cnt = 0;
- unsigned long flags;
- u32 j = 0;
-
- pcs_control_status0 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0);
- pcs_control_status1 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
-
- for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
- pcs_control_status0 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0);
- pcs_control_status1 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
- if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
- || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
- int_cnt++;
- msleep(1);
- }
- if (int_cnt > 1) {
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
- mh_detected++;
- reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
- reset_value |= 0x0000003d;
- nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
-
- while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
- & 0x00000040) != 0x00000040) && (j++ < 5000));
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
-
- pcs_control_status0 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0);
- pcs_control_status1 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
-
- for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
- pcs_control_status0 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0);
- pcs_control_status1 = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
- if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
- || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) {
- if (++ext_cnt > int_cnt) {
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1,
- 0x0000F088);
- mh_detected++;
- reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
- reset_value |= 0x0000003d;
- nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
-
- while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
- & 0x00000040) != 0x00000040) && (j++ < 5000));
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- break;
- }
- }
- msleep(1);
- }
- }
- }
-
- if (nesadapter->hw_rev == NE020_REV) {
- init_timer(&nesadapter->mh_timer);
- nesadapter->mh_timer.function = nes_mh_fix;
- nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */
- nesadapter->mh_timer.data = (unsigned long)nesdev;
- add_timer(&nesadapter->mh_timer);
- } else {
- nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000);
- }
-
- init_timer(&nesadapter->lc_timer);
- nesadapter->lc_timer.function = nes_clc;
- nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
- nesadapter->lc_timer.data = (unsigned long)nesdev;
- add_timer(&nesadapter->lc_timer);
-
- list_add_tail(&nesadapter->list, &nes_adapter_list);
-
- for (func_index = 0; func_index < 8; func_index++) {
- pci_bus_read_config_word(nesdev->pcidev->bus,
- PCI_DEVFN(PCI_SLOT(nesdev->pcidev->devfn),
- func_index), 0, &vendor_id);
- if (vendor_id == 0xffff)
- break;
- }
- nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __func__,
- func_index, pci_name(nesdev->pcidev));
- nesadapter->adapter_fcn_count = func_index;
-
- return nesadapter;
-}
-
-
-/**
- * nes_reset_adapter_ne020
- */
-static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
-{
- u32 port_count;
- u32 u32temp;
- u32 i;
-
- u32temp = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
- port_count = ((u32temp & 0x00000300) >> 8) + 1;
- /* TODO: assuming that both SERDES are set the same for now */
- *OneG_Mode = (u32temp & 0x00003c00) ? 0 : 1;
- nes_debug(NES_DBG_INIT, "Initial Software Reset = 0x%08X, port_count=%u\n",
- u32temp, port_count);
- if (*OneG_Mode)
- nes_debug(NES_DBG_INIT, "Running in 1G mode.\n");
- u32temp &= 0xff00ffc0;
- switch (port_count) {
- case 1:
- u32temp |= 0x00ee0000;
- break;
- case 2:
- u32temp |= 0x00cc0000;
- break;
- case 4:
- u32temp |= 0x00000000;
- break;
- default:
- return 0;
- break;
- }
-
- /* check and do full reset if needed */
- if (nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))) {
- nes_debug(NES_DBG_INIT, "Issuing Full Soft reset = 0x%08X\n", u32temp | 0xd);
- nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
-
- i = 0;
- while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
- mdelay(1);
- if (i > 10000) {
- nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
- return 0;
- }
-
- i = 0;
- while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
- mdelay(1);
- if (i > 10000) {
- printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
- nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
- return 0;
- }
- }
-
- /* port reset */
- switch (port_count) {
- case 1:
- u32temp |= 0x00ee0010;
- break;
- case 2:
- u32temp |= 0x00cc0030;
- break;
- case 4:
- u32temp |= 0x00000030;
- break;
- }
-
- nes_debug(NES_DBG_INIT, "Issuing Port Soft reset = 0x%08X\n", u32temp | 0xd);
- nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
-
- i = 0;
- while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
- mdelay(1);
- if (i > 10000) {
- nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
- return 0;
- }
-
- /* serdes 0 */
- i = 0;
- while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
- & 0x0000000f)) != 0x0000000f) && i++ < 5000)
- mdelay(1);
- if (i > 5000) {
- nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
- return 0;
- }
-
- /* serdes 1 */
- if (port_count > 1) {
- i = 0;
- while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
- & 0x0000000f)) != 0x0000000f) && i++ < 5000)
- mdelay(1);
- if (i > 5000) {
- nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
- return 0;
- }
- }
-
- return port_count;
-}
-
-
-/**
- * nes_init_serdes
- */
-static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
- struct nes_adapter *nesadapter, u8 OneG_Mode)
-{
- int i;
- u32 u32temp;
- u32 sds;
-
- if (hw_rev != NE020_REV) {
- /* init serdes 0 */
- switch (nesadapter->phy_type[0]) {
- case NES_PHY_TYPE_CX4:
- if (wide_ppm_offset)
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
- else
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
- break;
- case NES_PHY_TYPE_KR:
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
- break;
- case NES_PHY_TYPE_PUMA_1G:
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
- sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
- sds |= 0x00000100;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
- break;
- default:
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
- break;
- }
-
- if (!OneG_Mode)
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
-
- if (port_count < 2)
- return 0;
-
- /* init serdes 1 */
- if (!(OneG_Mode && (nesadapter->phy_type[1] != NES_PHY_TYPE_PUMA_1G)))
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
-
- switch (nesadapter->phy_type[1]) {
- case NES_PHY_TYPE_ARGUS:
- case NES_PHY_TYPE_SFP_D:
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
- break;
- case NES_PHY_TYPE_CX4:
- if (wide_ppm_offset)
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
- break;
- case NES_PHY_TYPE_KR:
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
- break;
- case NES_PHY_TYPE_PUMA_1G:
- sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
- sds |= 0x000000100;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
- }
- if (!OneG_Mode) {
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
- sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
- sds &= 0xFFFFFFBF;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
- }
- } else {
- /* init serdes 0 */
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
- i = 0;
- while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
- & 0x0000000f)) != 0x0000000f) && i++ < 5000)
- mdelay(1);
- if (i > 5000) {
- nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
- return 1;
- }
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
- if (OneG_Mode)
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
- else
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
-
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
- if (port_count > 1) {
- /* init serdes 1 */
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x00000048);
- i = 0;
- while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
- & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
- mdelay(1);
- if (i > 5000) {
- printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
- /* return 1; */
- }
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE1, 0x9ce73000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE1, 0x0ff00000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET1, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS1, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL1, 0xf0002222);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000ff);
- }
- }
- return 0;
-}
-
-
-/**
- * nes_init_csr_ne020
- * Initialize registers for ne020 hardware
- */
-static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
-{
- u32 u32temp;
-
- nes_debug(NES_DBG_INIT, "port_count=%d\n", port_count);
-
- nes_write_indexed(nesdev, 0x000001E4, 0x00000007);
- /* nes_write_indexed(nesdev, 0x000001E8, 0x000208C4); */
- nes_write_indexed(nesdev, 0x000001E8, 0x00020874);
- nes_write_indexed(nesdev, 0x000001D8, 0x00048002);
- /* nes_write_indexed(nesdev, 0x000001D8, 0x0004B002); */
- nes_write_indexed(nesdev, 0x000001FC, 0x00050005);
- nes_write_indexed(nesdev, 0x00000600, 0x55555555);
- nes_write_indexed(nesdev, 0x00000604, 0x55555555);
-
- /* TODO: move these MAC register settings to NIC bringup */
- nes_write_indexed(nesdev, 0x00002000, 0x00000001);
- nes_write_indexed(nesdev, 0x00002004, 0x00000001);
- nes_write_indexed(nesdev, 0x00002008, 0x0000FFFF);
- nes_write_indexed(nesdev, 0x0000200C, 0x00000001);
- nes_write_indexed(nesdev, 0x00002010, 0x000003c1);
- nes_write_indexed(nesdev, 0x0000201C, 0x75345678);
- if (port_count > 1) {
- nes_write_indexed(nesdev, 0x00002200, 0x00000001);
- nes_write_indexed(nesdev, 0x00002204, 0x00000001);
- nes_write_indexed(nesdev, 0x00002208, 0x0000FFFF);
- nes_write_indexed(nesdev, 0x0000220C, 0x00000001);
- nes_write_indexed(nesdev, 0x00002210, 0x000003c1);
- nes_write_indexed(nesdev, 0x0000221C, 0x75345678);
- nes_write_indexed(nesdev, 0x00000908, 0x20000001);
- }
- if (port_count > 2) {
- nes_write_indexed(nesdev, 0x00002400, 0x00000001);
- nes_write_indexed(nesdev, 0x00002404, 0x00000001);
- nes_write_indexed(nesdev, 0x00002408, 0x0000FFFF);
- nes_write_indexed(nesdev, 0x0000240C, 0x00000001);
- nes_write_indexed(nesdev, 0x00002410, 0x000003c1);
- nes_write_indexed(nesdev, 0x0000241C, 0x75345678);
- nes_write_indexed(nesdev, 0x00000910, 0x20000001);
-
- nes_write_indexed(nesdev, 0x00002600, 0x00000001);
- nes_write_indexed(nesdev, 0x00002604, 0x00000001);
- nes_write_indexed(nesdev, 0x00002608, 0x0000FFFF);
- nes_write_indexed(nesdev, 0x0000260C, 0x00000001);
- nes_write_indexed(nesdev, 0x00002610, 0x000003c1);
- nes_write_indexed(nesdev, 0x0000261C, 0x75345678);
- nes_write_indexed(nesdev, 0x00000918, 0x20000001);
- }
-
- nes_write_indexed(nesdev, 0x00005000, 0x00018000);
- /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */
- nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1, (wqm_quanta << 1) |
- 0x00000001);
- nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F);
- nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F);
- nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F);
- nes_write_indexed(nesdev, 0x00005020, 0x1F1F1F1F);
- nes_write_indexed(nesdev, 0x00006090, 0xFFFFFFFF);
-
- /* TODO: move this to code, get from EEPROM */
- nes_write_indexed(nesdev, 0x00000900, 0x20000001);
- nes_write_indexed(nesdev, 0x000060C0, 0x0000028e);
- nes_write_indexed(nesdev, 0x000060C8, 0x00000020);
-
- nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0);
- /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */
-
- if (hw_rev != NE020_REV) {
- u32temp = nes_read_indexed(nesdev, 0x000008e8);
- u32temp |= 0x80000000;
- nes_write_indexed(nesdev, 0x000008e8, u32temp);
- u32temp = nes_read_indexed(nesdev, 0x000021f8);
- u32temp &= 0x7fffffff;
- u32temp |= 0x7fff0010;
- nes_write_indexed(nesdev, 0x000021f8, u32temp);
- if (port_count > 1) {
- u32temp = nes_read_indexed(nesdev, 0x000023f8);
- u32temp &= 0x7fffffff;
- u32temp |= 0x7fff0010;
- nes_write_indexed(nesdev, 0x000023f8, u32temp);
- }
- }
-}
-
-
-/**
- * nes_destroy_adapter - destroy the adapter structure
- */
-void nes_destroy_adapter(struct nes_adapter *nesadapter)
-{
- struct nes_adapter *tmp_adapter;
-
- list_for_each_entry(tmp_adapter, &nes_adapter_list, list) {
- nes_debug(NES_DBG_SHUTDOWN, "Nes Adapter list entry = 0x%p.\n",
- tmp_adapter);
- }
-
- nesadapter->ref_count--;
- if (!nesadapter->ref_count) {
- if (nesadapter->hw_rev == NE020_REV) {
- del_timer(&nesadapter->mh_timer);
- }
- del_timer(&nesadapter->lc_timer);
-
- list_del(&nesadapter->list);
- kfree(nesadapter);
- }
-}
-
-
-/**
- * nes_init_cqp
- */
-int nes_init_cqp(struct nes_device *nesdev)
-{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_cqp_qp_context *cqp_qp_context;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_hw_ceq *ceq;
- struct nes_hw_ceq *nic_ceq;
- struct nes_hw_aeq *aeq;
- void *vmem;
- dma_addr_t pmem;
- u32 count=0;
- u32 cqp_head;
- u64 u64temp;
- u32 u32temp;
-
- /* allocate CQP memory */
- /* Need to add max_cq to the aeq size once cq overflow checking is added back */
- /* SQ is 512 byte aligned, others are 256 byte aligned */
- nesdev->cqp_mem_size = 512 +
- (sizeof(struct nes_hw_cqp_wqe) * NES_CQP_SQ_SIZE) +
- (sizeof(struct nes_hw_cqe) * NES_CCQ_SIZE) +
- max(((u32)sizeof(struct nes_hw_ceqe) * NES_CCEQ_SIZE), (u32)256) +
- max(((u32)sizeof(struct nes_hw_ceqe) * NES_NIC_CEQ_SIZE), (u32)256) +
- (sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) +
- sizeof(struct nes_hw_cqp_qp_context);
-
- nesdev->cqp_vbase = pci_zalloc_consistent(nesdev->pcidev,
- nesdev->cqp_mem_size,
- &nesdev->cqp_pbase);
- if (!nesdev->cqp_vbase) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n");
- return -ENOMEM;
- }
-
- /* Allocate a twice the number of CQP requests as the SQ size */
- nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
- 2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
- if (nesdev->nes_cqp_requests == NULL) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
- pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
- nesdev->cqp.sq_pbase);
- return -ENOMEM;
- }
-
- nes_debug(NES_DBG_INIT, "Allocated CQP structures at %p (phys = %016lX), size = %u.\n",
- nesdev->cqp_vbase, (unsigned long)nesdev->cqp_pbase, nesdev->cqp_mem_size);
-
- spin_lock_init(&nesdev->cqp.lock);
- init_waitqueue_head(&nesdev->cqp.waitq);
-
- /* Setup Various Structures */
- vmem = (void *)(((unsigned long)nesdev->cqp_vbase + (512 - 1)) &
- ~(unsigned long)(512 - 1));
- pmem = (dma_addr_t)(((unsigned long long)nesdev->cqp_pbase + (512 - 1)) &
- ~(unsigned long long)(512 - 1));
-
- nesdev->cqp.sq_vbase = vmem;
- nesdev->cqp.sq_pbase = pmem;
- nesdev->cqp.sq_size = NES_CQP_SQ_SIZE;
- nesdev->cqp.sq_head = 0;
- nesdev->cqp.sq_tail = 0;
- nesdev->cqp.qp_id = PCI_FUNC(nesdev->pcidev->devfn);
-
- vmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
- pmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
-
- nesdev->ccq.cq_vbase = vmem;
- nesdev->ccq.cq_pbase = pmem;
- nesdev->ccq.cq_size = NES_CCQ_SIZE;
- nesdev->ccq.cq_head = 0;
- nesdev->ccq.ce_handler = nes_cqp_ce_handler;
- nesdev->ccq.cq_number = PCI_FUNC(nesdev->pcidev->devfn);
-
- vmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
- pmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
-
- nesdev->ceq_index = PCI_FUNC(nesdev->pcidev->devfn);
- ceq = &nesadapter->ceq[nesdev->ceq_index];
- ceq->ceq_vbase = vmem;
- ceq->ceq_pbase = pmem;
- ceq->ceq_size = NES_CCEQ_SIZE;
- ceq->ceq_head = 0;
-
- vmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
- pmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
-
- nesdev->nic_ceq_index = PCI_FUNC(nesdev->pcidev->devfn) + 8;
- nic_ceq = &nesadapter->ceq[nesdev->nic_ceq_index];
- nic_ceq->ceq_vbase = vmem;
- nic_ceq->ceq_pbase = pmem;
- nic_ceq->ceq_size = NES_NIC_CEQ_SIZE;
- nic_ceq->ceq_head = 0;
-
- vmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
- pmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
-
- aeq = &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)];
- aeq->aeq_vbase = vmem;
- aeq->aeq_pbase = pmem;
- aeq->aeq_size = nesadapter->max_qp;
- aeq->aeq_head = 0;
-
- /* Setup QP Context */
- vmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
- pmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
-
- cqp_qp_context = vmem;
- cqp_qp_context->context_words[0] =
- cpu_to_le32((PCI_FUNC(nesdev->pcidev->devfn) << 12) + (2 << 10));
- cqp_qp_context->context_words[1] = 0;
- cqp_qp_context->context_words[2] = cpu_to_le32((u32)nesdev->cqp.sq_pbase);
- cqp_qp_context->context_words[3] = cpu_to_le32(((u64)nesdev->cqp.sq_pbase) >> 32);
-
-
- /* Write the address to Create CQP */
- if ((sizeof(dma_addr_t) > 4)) {
- nes_write_indexed(nesdev,
- NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
- ((u64)pmem) >> 32);
- } else {
- nes_write_indexed(nesdev,
- NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), 0);
- }
- nes_write_indexed(nesdev,
- NES_IDX_CREATE_CQP_LOW + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
- (u32)pmem);
-
- INIT_LIST_HEAD(&nesdev->cqp_avail_reqs);
- INIT_LIST_HEAD(&nesdev->cqp_pending_reqs);
-
- for (count = 0; count < 2*NES_CQP_SQ_SIZE; count++) {
- init_waitqueue_head(&nesdev->nes_cqp_requests[count].waitq);
- list_add_tail(&nesdev->nes_cqp_requests[count].list, &nesdev->cqp_avail_reqs);
- }
-
- /* Write Create CCQ WQE */
- cqp_head = nesdev->cqp.sq_head++;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
- NES_CQP_CQ_CHK_OVERFLOW | ((u32)nesdev->ccq.cq_size << 16)));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- (nesdev->ccq.cq_number |
- ((u32)nesdev->ceq_index << 16)));
- u64temp = (u64)nesdev->ccq.cq_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
- u64temp = (unsigned long)&nesdev->ccq;
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
- cpu_to_le32((u32)(u64temp >> 1));
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
- cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
-
- /* Write Create CEQ WQE */
- cqp_head = nesdev->cqp.sq_head++;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_CREATE_CEQ + ((u32)nesdev->ceq_index << 8)));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, ceq->ceq_size);
- u64temp = (u64)ceq->ceq_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
-
- /* Write Create AEQ WQE */
- cqp_head = nesdev->cqp.sq_head++;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_CREATE_AEQ + ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8)));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX, aeq->aeq_size);
- u64temp = (u64)aeq->aeq_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
-
- /* Write Create NIC CEQ WQE */
- cqp_head = nesdev->cqp.sq_head++;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_CREATE_CEQ + ((u32)nesdev->nic_ceq_index << 8)));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, nic_ceq->ceq_size);
- u64temp = (u64)nic_ceq->ceq_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
-
- /* Poll until CCQP done */
- count = 0;
- do {
- if (count++ > 1000) {
- printk(KERN_ERR PFX "Error creating CQP\n");
- pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
- nesdev->cqp_vbase, nesdev->cqp_pbase);
- return -1;
- }
- udelay(10);
- } while (!(nes_read_indexed(nesdev,
- NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn) * 8)) & (1 << 8)));
-
- nes_debug(NES_DBG_INIT, "CQP Status = 0x%08X\n", nes_read_indexed(nesdev,
- NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
-
- u32temp = 0x04800000;
- nes_write32(nesdev->regs+NES_WQE_ALLOC, u32temp | nesdev->cqp.qp_id);
-
- /* wait for the CCQ, CEQ, and AEQ to get created */
- count = 0;
- do {
- if (count++ > 1000) {
- printk(KERN_ERR PFX "Error creating CCQ, CEQ, and AEQ\n");
- pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
- nesdev->cqp_vbase, nesdev->cqp_pbase);
- return -1;
- }
- udelay(10);
- } while (((nes_read_indexed(nesdev,
- NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15<<8)) != (15<<8)));
-
- /* dump the QP status value */
- nes_debug(NES_DBG_INIT, "QP Status = 0x%08X\n", nes_read_indexed(nesdev,
- NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
-
- nesdev->cqp.sq_tail++;
-
- return 0;
-}
-
-
-/**
- * nes_destroy_cqp
- */
-int nes_destroy_cqp(struct nes_device *nesdev)
-{
- struct nes_hw_cqp_wqe *cqp_wqe;
- u32 count = 0;
- u32 cqp_head;
- unsigned long flags;
-
- do {
- if (count++ > 1000)
- break;
- udelay(10);
- } while (!(nesdev->cqp.sq_head == nesdev->cqp.sq_tail));
-
- /* Reset CCQ */
- nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_RESET |
- nesdev->ccq.cq_number);
-
- /* Disable device interrupts */
- nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
-
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
-
- /* Destroy the AEQ */
- cqp_head = nesdev->cqp.sq_head++;
- nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_AEQ |
- ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0;
-
- /* Destroy the NIC CEQ */
- cqp_head = nesdev->cqp.sq_head++;
- nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
- ((u32)nesdev->nic_ceq_index << 8));
-
- /* Destroy the CEQ */
- cqp_head = nesdev->cqp.sq_head++;
- nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
- (nesdev->ceq_index << 8));
-
- /* Destroy the CCQ */
- cqp_head = nesdev->cqp.sq_head++;
- nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CQ);
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->ccq.cq_number |
- ((u32)nesdev->ceq_index << 16));
-
- /* Destroy CQP */
- cqp_head = nesdev->cqp.sq_head++;
- nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_QP |
- NES_CQP_QP_TYPE_CQP);
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->cqp.qp_id);
-
- barrier();
- /* Ring doorbell (5 WQEs) */
- nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x05800000 | nesdev->cqp.qp_id);
-
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-
- /* wait for the CCQ, CEQ, and AEQ to get destroyed */
- count = 0;
- do {
- if (count++ > 1000) {
- printk(KERN_ERR PFX "Function%d: Error destroying CCQ, CEQ, and AEQ\n",
- PCI_FUNC(nesdev->pcidev->devfn));
- break;
- }
- udelay(10);
- } while (((nes_read_indexed(nesdev,
- NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15 << 8)) != 0));
-
- /* dump the QP status value */
- nes_debug(NES_DBG_SHUTDOWN, "Function%d: QP Status = 0x%08X\n",
- PCI_FUNC(nesdev->pcidev->devfn),
- nes_read_indexed(nesdev,
- NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
-
- kfree(nesdev->nes_cqp_requests);
-
- /* Free the control structures */
- pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
- nesdev->cqp.sq_pbase);
-
- return 0;
-}
-
-
-/**
- * nes_init_1g_phy
- */
-static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
-{
- u32 counter = 0;
- u16 phy_data;
- int ret = 0;
-
- nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
-
- /* Reset the PHY */
- nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
- udelay(100);
- counter = 0;
- do {
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- if (counter++ > 100) {
- ret = -1;
- break;
- }
- } while (phy_data & 0x8000);
-
- /* Setting no phy loopback */
- phy_data &= 0xbfff;
- phy_data |= 0x1140;
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
-
- /* Setting the interrupt mask */
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
-
- /* turning on flow control */
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
-
- /* Clear Half duplex */
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
-
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
-
- return ret;
-}
-
-
-/**
- * nes_init_2025_phy
- */
-static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
-{
- u32 temp_phy_data = 0;
- u32 temp_phy_data2 = 0;
- u32 counter = 0;
- u32 sds;
- u32 mac_index = nesdev->mac_index;
- int ret = 0;
- unsigned int first_attempt = 1;
-
- /* Check firmware heartbeat */
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- udelay(1500);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
-
- if (temp_phy_data != temp_phy_data2) {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((temp_phy_data & 0xff) > 0x20)
- return 0;
- printk(PFX "Reinitialize external PHY\n");
- }
-
- /* no heartbeat, configure the PHY */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
-
- switch (phy_type) {
- case NES_PHY_TYPE_ARGUS:
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
-
- /* setup LEDs */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
- break;
-
- case NES_PHY_TYPE_SFP_D:
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
-
- /* setup LEDs */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
- break;
-
- case NES_PHY_TYPE_KR:
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
-
- /* setup LEDs */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004);
-
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020);
- break;
- }
-
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
-
- /* Bring PHY out of reset */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
-
- /* Check for heartbeat */
- counter = 0;
- mdelay(690);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- do {
- if (counter++ > 150) {
- printk(PFX "No PHY heartbeat\n");
- break;
- }
- mdelay(1);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- } while ((temp_phy_data2 == temp_phy_data));
-
- /* wait for tracking */
- counter = 0;
- do {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if (counter++ > 300) {
- if (((temp_phy_data & 0xff) == 0x0) && first_attempt) {
- first_attempt = 0;
- counter = 0;
- /* reset AMCC PHY and try again */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
- continue;
- } else {
- ret = 1;
- break;
- }
- }
- mdelay(10);
- } while ((temp_phy_data & 0xff) < 0x30);
-
- /* setup signal integrity */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
- if (phy_type == NES_PHY_TYPE_KR) {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C);
- } else {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
- }
-
- /* reset serdes */
- sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200);
- sds |= 0x1;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
- sds &= 0xfffffffe;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
-
- counter = 0;
- while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
- && (counter++ < 5000))
- ;
-
- return ret;
-}
-
-
-/**
- * nes_init_phy
- */
-int nes_init_phy(struct nes_device *nesdev)
-{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 mac_index = nesdev->mac_index;
- u32 tx_config = 0;
- unsigned long flags;
- u8 phy_type = nesadapter->phy_type[mac_index];
- u8 phy_index = nesadapter->phy_index[mac_index];
- int ret = 0;
-
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- if (phy_type == NES_PHY_TYPE_1G) {
- /* setup 1G MDIO operation */
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x04;
- } else {
- /* setup 10G MDIO operation */
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x1D;
- }
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
-
- spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
-
- switch (phy_type) {
- case NES_PHY_TYPE_1G:
- ret = nes_init_1g_phy(nesdev, phy_type, phy_index);
- break;
- case NES_PHY_TYPE_ARGUS:
- case NES_PHY_TYPE_SFP_D:
- case NES_PHY_TYPE_KR:
- ret = nes_init_2025_phy(nesdev, phy_type, phy_index);
- break;
- }
-
- spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
-
- return ret;
-}
-
-
-/**
- * nes_replenish_nic_rq
- */
-static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
-{
- unsigned long flags;
- dma_addr_t bus_address;
- struct sk_buff *skb;
- struct nes_hw_nic_rq_wqe *nic_rqe;
- struct nes_hw_nic *nesnic;
- struct nes_device *nesdev;
- struct nes_rskb_cb *cb;
- u32 rx_wqes_posted = 0;
-
- nesnic = &nesvnic->nic;
- nesdev = nesvnic->nesdev;
- spin_lock_irqsave(&nesnic->rq_lock, flags);
- if (nesnic->replenishing_rq !=0) {
- if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
- (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
- atomic_set(&nesvnic->rx_skb_timer_running, 1);
- spin_unlock_irqrestore(&nesnic->rq_lock, flags);
- nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
- add_timer(&nesvnic->rq_wqes_timer);
- } else
- spin_unlock_irqrestore(&nesnic->rq_lock, flags);
- return;
- }
- nesnic->replenishing_rq = 1;
- spin_unlock_irqrestore(&nesnic->rq_lock, flags);
- do {
- skb = dev_alloc_skb(nesvnic->max_frame_size);
- if (skb) {
- skb->dev = nesvnic->netdev;
-
- bus_address = pci_map_single(nesdev->pcidev,
- skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- cb->busaddr = bus_address;
- cb->maplen = nesvnic->max_frame_size;
-
- nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head];
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
- cpu_to_le32(nesvnic->max_frame_size);
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
- cpu_to_le32((u32)bus_address);
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
- cpu_to_le32((u32)((u64)bus_address >> 32));
- nesnic->rx_skb[nesnic->rq_head] = skb;
- nesnic->rq_head++;
- nesnic->rq_head &= nesnic->rq_size - 1;
- atomic_dec(&nesvnic->rx_skbs_needed);
- barrier();
- if (++rx_wqes_posted == 255) {
- nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
- rx_wqes_posted = 0;
- }
- } else {
- spin_lock_irqsave(&nesnic->rq_lock, flags);
- if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
- (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
- atomic_set(&nesvnic->rx_skb_timer_running, 1);
- spin_unlock_irqrestore(&nesnic->rq_lock, flags);
- nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
- add_timer(&nesvnic->rq_wqes_timer);
- } else
- spin_unlock_irqrestore(&nesnic->rq_lock, flags);
- break;
- }
- } while (atomic_read(&nesvnic->rx_skbs_needed));
- barrier();
- if (rx_wqes_posted)
- nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
- nesnic->replenishing_rq = 0;
-}
-
-
-/**
- * nes_rq_wqes_timeout
- */
-static void nes_rq_wqes_timeout(unsigned long parm)
-{
- struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
- printk("%s: Timer fired.\n", __func__);
- atomic_set(&nesvnic->rx_skb_timer_running, 0);
- if (atomic_read(&nesvnic->rx_skbs_needed))
- nes_replenish_nic_rq(nesvnic);
-}
-
-
-static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- unsigned int ip_len;
- struct iphdr *iph;
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
- return 0;
-}
-
-
-/**
- * nes_init_nic_qp
- */
-int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
-{
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_hw_nic_sq_wqe *nic_sqe;
- struct nes_hw_nic_qp_context *nic_context;
- struct sk_buff *skb;
- struct nes_hw_nic_rq_wqe *nic_rqe;
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- unsigned long flags;
- void *vmem;
- dma_addr_t pmem;
- u64 u64temp;
- int ret;
- u32 cqp_head;
- u32 counter;
- u32 wqe_count;
- struct nes_rskb_cb *cb;
- u8 jumbomode=0;
-
- /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */
- nesvnic->nic_mem_size = 256 +
- (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)) +
- (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)) +
- (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)) +
- (NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) +
- sizeof(struct nes_hw_nic_qp_context);
-
- nesvnic->nic_vbase = pci_zalloc_consistent(nesdev->pcidev,
- nesvnic->nic_mem_size,
- &nesvnic->nic_pbase);
- if (!nesvnic->nic_vbase) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n");
- return -ENOMEM;
- }
- nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n",
- nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size);
-
- vmem = (void *)(((unsigned long)nesvnic->nic_vbase + (256 - 1)) &
- ~(unsigned long)(256 - 1));
- pmem = (dma_addr_t)(((unsigned long long)nesvnic->nic_pbase + (256 - 1)) &
- ~(unsigned long long)(256 - 1));
-
- /* Setup the first Fragment buffers */
- nesvnic->nic.first_frag_vbase = vmem;
-
- for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
- nesvnic->nic.frag_paddr[counter] = pmem;
- pmem += sizeof(struct nes_first_frag);
- }
-
- /* setup the SQ */
- vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag));
-
- nesvnic->nic.sq_vbase = (void *)vmem;
- nesvnic->nic.sq_pbase = pmem;
- nesvnic->nic.sq_head = 0;
- nesvnic->nic.sq_tail = 0;
- nesvnic->nic.sq_size = NES_NIC_WQ_SIZE;
- for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
- nic_sqe = &nesvnic->nic.sq_vbase[counter];
- nic_sqe->wqe_words[NES_NIC_SQ_WQE_MISC_IDX] =
- cpu_to_le32(NES_NIC_SQ_WQE_DISABLE_CHKSUM |
- NES_NIC_SQ_WQE_COMPLETION);
- nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX] =
- cpu_to_le32((u32)NES_FIRST_FRAG_SIZE << 16);
- nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX] =
- cpu_to_le32((u32)nesvnic->nic.frag_paddr[counter]);
- nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX] =
- cpu_to_le32((u32)((u64)nesvnic->nic.frag_paddr[counter] >> 32));
- }
-
- nesvnic->get_cqp_request = nes_get_cqp_request;
- nesvnic->post_cqp_request = nes_post_cqp_request;
- nesvnic->mcrq_mcast_filter = NULL;
-
- spin_lock_init(&nesvnic->nic.rq_lock);
-
- /* setup the RQ */
- vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
- pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
-
-
- nesvnic->nic.rq_vbase = vmem;
- nesvnic->nic.rq_pbase = pmem;
- nesvnic->nic.rq_head = 0;
- nesvnic->nic.rq_tail = 0;
- nesvnic->nic.rq_size = NES_NIC_WQ_SIZE;
-
- /* setup the CQ */
- vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
- pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
-
- if (nesdev->nesadapter->netdev_count > 2)
- nesvnic->mcrq_qp_id = nesvnic->nic_index + 32;
- else
- nesvnic->mcrq_qp_id = nesvnic->nic.qp_id + 4;
-
- nesvnic->nic_cq.cq_vbase = vmem;
- nesvnic->nic_cq.cq_pbase = pmem;
- nesvnic->nic_cq.cq_head = 0;
- nesvnic->nic_cq.cq_size = NES_NIC_WQ_SIZE * 2;
-
- nesvnic->nic_cq.ce_handler = nes_nic_napi_ce_handler;
-
- /* Send CreateCQ request to CQP */
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- cqp_head = nesdev->cqp.sq_head;
-
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
- NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
- ((u32)nesvnic->nic_cq.cq_size << 16));
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
- nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16));
- u64temp = (u64)nesvnic->nic_cq.cq_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
- u64temp = (unsigned long)&nesvnic->nic_cq;
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
- cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- /* Send CreateQP request to CQP */
- nic_context = (void *)(&nesvnic->nic_cq.cq_vbase[nesvnic->nic_cq.cq_size]);
- nic_context->context_words[NES_NIC_CTX_MISC_IDX] =
- cpu_to_le32((u32)NES_NIC_CTX_SIZE |
- ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
- nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
- nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
- nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
- if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) {
- nic_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
- }
-
- u64temp = (u64)nesvnic->nic.sq_pbase;
- nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
- nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
- u64temp = (u64)nesvnic->nic.rq_pbase;
- nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
- nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
-
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
- NES_CQP_QP_TYPE_NIC);
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesvnic->nic.qp_id);
- u64temp = (u64)nesvnic->nic_cq.cq_pbase +
- (nesvnic->nic_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
-
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
- nesdev->cqp.sq_head = cqp_head;
-
- barrier();
-
- /* Ring doorbell (2 WQEs) */
- nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
-
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- nes_debug(NES_DBG_INIT, "Waiting for create NIC QP%u to complete.\n",
- nesvnic->nic.qp_id);
-
- ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_INIT, "Create NIC QP%u completed, wait_event_timeout ret = %u.\n",
- nesvnic->nic.qp_id, ret);
- if (!ret) {
- nes_debug(NES_DBG_INIT, "NIC QP%u create timeout expired\n", nesvnic->nic.qp_id);
- pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
- nesvnic->nic_pbase);
- return -EIO;
- }
-
- /* Populate the RQ */
- for (counter = 0; counter < (NES_NIC_WQ_SIZE - 1); counter++) {
- skb = dev_alloc_skb(nesvnic->max_frame_size);
- if (!skb) {
- nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
-
- nes_destroy_nic_qp(nesvnic);
- return -ENOMEM;
- }
-
- skb->dev = netdev;
-
- pmem = pci_map_single(nesdev->pcidev, skb->data,
- nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- cb->busaddr = pmem;
- cb->maplen = nesvnic->max_frame_size;
-
- nic_rqe = &nesvnic->nic.rq_vbase[counter];
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size);
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
- nesvnic->nic.rx_skb[counter] = skb;
- }
-
- wqe_count = NES_NIC_WQ_SIZE - 1;
- nesvnic->nic.rq_head = wqe_count;
- barrier();
- do {
- counter = min(wqe_count, ((u32)255));
- wqe_count -= counter;
- nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id);
- } while (wqe_count);
- init_timer(&nesvnic->rq_wqes_timer);
- nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout;
- nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic;
- nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
- if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
- {
- nes_nic_init_timer(nesdev);
- if (netdev->mtu > 1500)
- jumbomode = 1;
- nes_nic_init_timer_defaults(nesdev, jumbomode);
- }
- if ((nesdev->nesadapter->allow_unaligned_fpdus) &&
- (nes_init_mgt_qp(nesdev, netdev, nesvnic))) {
- nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n", netdev->name);
- nes_destroy_nic_qp(nesvnic);
- return -ENOMEM;
- }
-
- nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr;
- nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS;
- nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc;
- nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
- nesvnic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- nesvnic->lro_mgr.dev = netdev;
- nesvnic->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
- return 0;
-}
-
-
-/**
- * nes_destroy_nic_qp
- */
-void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
-{
- u64 u64temp;
- dma_addr_t bus_address;
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_hw_nic_sq_wqe *nic_sqe;
- __le16 *wqe_fragment_length;
- u16 wqe_fragment_index;
- u32 cqp_head;
- u32 wqm_cfg0;
- unsigned long flags;
- struct sk_buff *rx_skb;
- struct nes_rskb_cb *cb;
- int ret;
-
- if (nesdev->nesadapter->allow_unaligned_fpdus)
- nes_destroy_mgt(nesvnic);
-
- /* clear wqe stall before destroying NIC QP */
- wqm_cfg0 = nes_read_indexed(nesdev, NES_IDX_WQM_CONFIG0);
- nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG0, wqm_cfg0 & 0xFFFF7FFF);
-
- /* Free remaining NIC receive buffers */
- while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
- rx_skb = nesvnic->nic.rx_skb[nesvnic->nic.rq_tail];
- cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
- pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen,
- PCI_DMA_FROMDEVICE);
-
- dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
- nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
- }
-
- /* Free remaining NIC transmit buffers */
- while (nesvnic->nic.sq_head != nesvnic->nic.sq_tail) {
- nic_sqe = &nesvnic->nic.sq_vbase[nesvnic->nic.sq_tail];
- wqe_fragment_index = 1;
- wqe_fragment_length = (__le16 *)
- &nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
- /* bump past the vlan tag */
- wqe_fragment_length++;
- if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
- u64temp = (u64)le32_to_cpu(
- nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
- wqe_fragment_index*2]);
- u64temp += ((u64)le32_to_cpu(
- nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX
- + wqe_fragment_index*2]))<<32;
- bus_address = (dma_addr_t)u64temp;
- if (test_and_clear_bit(nesvnic->nic.sq_tail,
- nesvnic->nic.first_frag_overflow)) {
- pci_unmap_single(nesdev->pcidev,
- bus_address,
- le16_to_cpu(wqe_fragment_length[
- wqe_fragment_index++]),
- PCI_DMA_TODEVICE);
- }
- for (; wqe_fragment_index < 5; wqe_fragment_index++) {
- if (wqe_fragment_length[wqe_fragment_index]) {
- u64temp = le32_to_cpu(
- nic_sqe->wqe_words[
- NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
- wqe_fragment_index*2]);
- u64temp += ((u64)le32_to_cpu(
- nic_sqe->wqe_words[
- NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+
- wqe_fragment_index*2]))<<32;
- bus_address = (dma_addr_t)u64temp;
- pci_unmap_page(nesdev->pcidev,
- bus_address,
- le16_to_cpu(
- wqe_fragment_length[
- wqe_fragment_index]),
- PCI_DMA_TODEVICE);
- } else
- break;
- }
- }
- if (nesvnic->nic.tx_skb[nesvnic->nic.sq_tail])
- dev_kfree_skb(
- nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]);
-
- nesvnic->nic.sq_tail = (nesvnic->nic.sq_tail + 1)
- & (nesvnic->nic.sq_size - 1);
- }
-
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
-
- /* Destroy NIC QP */
- cqp_head = nesdev->cqp.sq_head;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- nesvnic->nic.qp_id);
-
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
-
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
-
- /* Destroy NIC CQ */
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_DESTROY_CQ | ((u32)nesvnic->nic_cq.cq_size << 16)));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- (nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16)));
-
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
-
- nesdev->cqp.sq_head = cqp_head;
- barrier();
-
- /* Ring doorbell (2 WQEs) */
- nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
-
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
- " cqp.sq_tail=%u, cqp.sq_size=%u\n",
- cqp_head, nesdev->cqp.sq_head,
- nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
-
- ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
- NES_EVENT_TIMEOUT);
-
- nes_debug(NES_DBG_SHUTDOWN, "Destroy NIC QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
- " cqp.sq_head=%u, cqp.sq_tail=%u\n",
- ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
- if (!ret) {
- nes_debug(NES_DBG_SHUTDOWN, "NIC QP%u destroy timeout expired\n",
- nesvnic->nic.qp_id);
- }
-
- pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
- nesvnic->nic_pbase);
-
- /* restore old wqm_cfg0 value */
- nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG0, wqm_cfg0);
-}
-
-/**
- * nes_napi_isr
- */
-int nes_napi_isr(struct nes_device *nesdev)
-{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 int_stat;
-
- if (nesdev->napi_isr_ran) {
- /* interrupt status has already been read in ISR */
- int_stat = nesdev->int_stat;
- } else {
- int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
- nesdev->int_stat = int_stat;
- nesdev->napi_isr_ran = 1;
- }
-
- int_stat &= nesdev->int_req;
- /* iff NIC, process here, else wait for DPC */
- if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) {
- nesdev->napi_isr_ran = 0;
- nes_write32(nesdev->regs + NES_INT_STAT,
- (int_stat &
- ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 | NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3)));
-
- /* Process the CEQs */
- nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]);
-
- if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) &&
- (!nesadapter->et_use_adaptive_rx_coalesce)) ||
- ((nesadapter->et_use_adaptive_rx_coalesce) &&
- (nesdev->deepcq_count > nesadapter->et_pkt_rate_low))))) {
- if ((nesdev->int_req & NES_INT_TIMER) == 0) {
- /* Enable Periodic timer interrupts */
- nesdev->int_req |= NES_INT_TIMER;
- /* ack any pending periodic timer interrupts so we don't get an immediate interrupt */
- /* TODO: need to also ack other unused periodic timer values, get from nesadapter */
- nes_write32(nesdev->regs+NES_TIMER_STAT,
- nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
- nes_write32(nesdev->regs+NES_INTF_INT_MASK,
- ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
- }
-
- if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
- {
- nes_nic_init_timer(nesdev);
- }
- /* Enable interrupts, except CEQs */
- nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
- } else {
- /* Enable interrupts, make sure timer is off */
- nesdev->int_req &= ~NES_INT_TIMER;
- nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
- nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
- }
- nesdev->deepcq_count = 0;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void process_critical_error(struct nes_device *nesdev)
-{
- u32 debug_error;
- u32 nes_idx_debug_error_masks0 = 0;
- u16 error_module = 0;
-
- debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
- printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
- (u16)debug_error);
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
- 0x01010000 | (debug_error & 0x0000ffff));
- if (crit_err_count++ > 10)
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
- error_module = (u16) (debug_error & 0x1F00) >> 8;
- if (++nesdev->nesadapter->crit_error_count[error_module-1] >=
- nes_max_critical_error_count) {
- printk(KERN_ERR PFX "Masking off critical error for module "
- "0x%02X\n", (u16)error_module);
- nes_idx_debug_error_masks0 = nes_read_indexed(nesdev,
- NES_IDX_DEBUG_ERROR_MASKS0);
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0,
- nes_idx_debug_error_masks0 | (1 << error_module));
- }
-}
-/**
- * nes_dpc
- */
-void nes_dpc(unsigned long param)
-{
- struct nes_device *nesdev = (struct nes_device *)param;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 counter;
- u32 loop_counter = 0;
- u32 int_status_bit;
- u32 int_stat;
- u32 timer_stat;
- u32 temp_int_stat;
- u32 intf_int_stat;
- u32 processed_intf_int = 0;
- u16 processed_timer_int = 0;
- u16 completion_ints = 0;
- u16 timer_ints = 0;
-
- /* nes_debug(NES_DBG_ISR, "\n"); */
-
- do {
- timer_stat = 0;
- if (nesdev->napi_isr_ran) {
- nesdev->napi_isr_ran = 0;
- int_stat = nesdev->int_stat;
- } else
- int_stat = nes_read32(nesdev->regs+NES_INT_STAT);
- if (processed_intf_int != 0)
- int_stat &= nesdev->int_req & ~NES_INT_INTF;
- else
- int_stat &= nesdev->int_req;
- if (processed_timer_int == 0) {
- processed_timer_int = 1;
- if (int_stat & NES_INT_TIMER) {
- timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
- if ((timer_stat & nesdev->timer_int_req) == 0) {
- int_stat &= ~NES_INT_TIMER;
- }
- }
- } else {
- int_stat &= ~NES_INT_TIMER;
- }
-
- if (int_stat) {
- if (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0|
- NES_INT_MAC1|NES_INT_MAC2 | NES_INT_MAC3)) {
- /* Ack the interrupts */
- nes_write32(nesdev->regs+NES_INT_STAT,
- (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0|
- NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3)));
- }
-
- temp_int_stat = int_stat;
- for (counter = 0, int_status_bit = 1; counter < 16; counter++) {
- if (int_stat & int_status_bit) {
- nes_process_ceq(nesdev, &nesadapter->ceq[counter]);
- temp_int_stat &= ~int_status_bit;
- completion_ints = 1;
- }
- if (!(temp_int_stat & 0x0000ffff))
- break;
- int_status_bit <<= 1;
- }
-
- /* Process the AEQ for this pci function */
- int_status_bit = 1 << (16 + PCI_FUNC(nesdev->pcidev->devfn));
- if (int_stat & int_status_bit) {
- nes_process_aeq(nesdev, &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]);
- }
-
- /* Process the MAC interrupt for this pci function */
- int_status_bit = 1 << (24 + nesdev->mac_index);
- if (int_stat & int_status_bit) {
- nes_process_mac_intr(nesdev, nesdev->mac_index);
- }
-
- if (int_stat & NES_INT_TIMER) {
- if (timer_stat & nesdev->timer_int_req) {
- nes_write32(nesdev->regs + NES_TIMER_STAT,
- (timer_stat & nesdev->timer_int_req) |
- ~(nesdev->nesadapter->timer_int_req));
- timer_ints = 1;
- }
- }
-
- if (int_stat & NES_INT_INTF) {
- processed_intf_int = 1;
- intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
- intf_int_stat &= nesdev->intf_int_req;
- if (NES_INTF_INT_CRITERR & intf_int_stat) {
- process_critical_error(nesdev);
- }
- if (NES_INTF_INT_PCIERR & intf_int_stat) {
- printk(KERN_ERR PFX "PCI Error reported by device!!!\n");
- BUG();
- }
- if (NES_INTF_INT_AEQ_OFLOW & intf_int_stat) {
- printk(KERN_ERR PFX "AEQ Overflow reported by device!!!\n");
- BUG();
- }
- nes_write32(nesdev->regs+NES_INTF_INT_STAT, intf_int_stat);
- }
-
- if (int_stat & NES_INT_TSW) {
- }
- }
- /* Don't use the interface interrupt bit stay in loop */
- int_stat &= ~NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 |
- NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3;
- } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS));
-
- if (timer_ints == 1) {
- if ((nesadapter->et_rx_coalesce_usecs_irq) || (nesadapter->et_use_adaptive_rx_coalesce)) {
- if (completion_ints == 0) {
- nesdev->timer_only_int_count++;
- if (nesdev->timer_only_int_count>=nesadapter->timer_int_limit) {
- nesdev->timer_only_int_count = 0;
- nesdev->int_req &= ~NES_INT_TIMER;
- nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
- nes_write32(nesdev->regs + NES_INT_MASK, ~nesdev->int_req);
- } else {
- nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
- }
- } else {
- if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
- {
- nes_nic_init_timer(nesdev);
- }
- nesdev->timer_only_int_count = 0;
- nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
- }
- } else {
- nesdev->timer_only_int_count = 0;
- nesdev->int_req &= ~NES_INT_TIMER;
- nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
- nes_write32(nesdev->regs+NES_TIMER_STAT,
- nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
- nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
- }
- } else {
- if ( (completion_ints == 1) &&
- (((nesadapter->et_rx_coalesce_usecs_irq) &&
- (!nesadapter->et_use_adaptive_rx_coalesce)) ||
- ((nesdev->deepcq_count > nesadapter->et_pkt_rate_low) &&
- (nesadapter->et_use_adaptive_rx_coalesce) )) ) {
- /* nes_debug(NES_DBG_ISR, "Enabling periodic timer interrupt.\n" ); */
- nesdev->timer_only_int_count = 0;
- nesdev->int_req |= NES_INT_TIMER;
- nes_write32(nesdev->regs+NES_TIMER_STAT,
- nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
- nes_write32(nesdev->regs+NES_INTF_INT_MASK,
- ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
- nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
- } else {
- nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
- }
- }
- nesdev->deepcq_count = 0;
-}
-
-
-/**
- * nes_process_ceq
- */
-static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
-{
- u64 u64temp;
- struct nes_hw_cq *cq;
- u32 head;
- u32 ceq_size;
-
- /* nes_debug(NES_DBG_CQ, "\n"); */
- head = ceq->ceq_head;
- ceq_size = ceq->ceq_size;
-
- do {
- if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) &
- NES_CEQE_VALID) {
- u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]))) << 32) |
- ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX])));
- u64temp <<= 1;
- cq = *((struct nes_hw_cq **)&u64temp);
- /* nes_debug(NES_DBG_CQ, "pCQ = %p\n", cq); */
- barrier();
- ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX] = 0;
-
- /* call the event handler */
- cq->ce_handler(nesdev, cq);
-
- if (++head >= ceq_size)
- head = 0;
- } else {
- break;
- }
-
- } while (1);
-
- ceq->ceq_head = head;
-}
-
-
-/**
- * nes_process_aeq
- */
-static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
-{
- /* u64 u64temp; */
- u32 head;
- u32 aeq_size;
- u32 aeqe_misc;
- u32 aeqe_cq_id;
- struct nes_hw_aeqe volatile *aeqe;
-
- head = aeq->aeq_head;
- aeq_size = aeq->aeq_size;
-
- do {
- aeqe = &aeq->aeq_vbase[head];
- if ((le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]) & NES_AEQE_VALID) == 0)
- break;
- aeqe_misc = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
- aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
- if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) {
- if (aeqe_cq_id >= NES_FIRST_QPN) {
- /* dealing with an accelerated QP related AE */
- /*
- * u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX]))) << 32) |
- * ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX])));
- */
- nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe);
- } else {
- /* TODO: dealing with a CQP related AE */
- nes_debug(NES_DBG_AEQ, "Processing CQP related AE, misc = 0x%04X\n",
- (u16)(aeqe_misc >> 16));
- }
- }
-
- aeqe->aeqe_words[NES_AEQE_MISC_IDX] = 0;
-
- if (++head >= aeq_size)
- head = 0;
-
- nes_write32(nesdev->regs + NES_AEQ_ALLOC, 1 << 16);
- }
- while (1);
- aeq->aeq_head = head;
-}
-
-static void nes_reset_link(struct nes_device *nesdev, u32 mac_index)
-{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 reset_value;
- u32 i=0;
- u32 u32temp;
-
- if (nesadapter->hw_rev == NE020_REV) {
- return;
- }
- mh_detected++;
-
- reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
-
- if ((mac_index == 0) || ((mac_index == 1) && (nesadapter->OneG_Mode)))
- reset_value |= 0x0000001d;
- else
- reset_value |= 0x0000002d;
-
- if (4 <= (nesadapter->link_interrupt_count[mac_index] / ((u16)NES_MAX_LINK_INTERRUPTS))) {
- if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
- nesadapter->link_interrupt_count[0] = 0;
- nesadapter->link_interrupt_count[1] = 0;
- u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
- if (0x00000040 & u32temp)
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
- else
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
-
- reset_value |= 0x0000003d;
- }
- nesadapter->link_interrupt_count[mac_index] = 0;
- }
-
- nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
-
- while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
- & 0x00000040) != 0x00000040) && (i++ < 5000));
-
- if (0x0000003d == (reset_value & 0x0000003d)) {
- u32 pcs_control_status0, pcs_control_status1;
-
- for (i = 0; i < 10; i++) {
- pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0);
- pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
- if (((0x0F000000 == (pcs_control_status0 & 0x0F000000))
- && (pcs_control_status0 & 0x00100000))
- || ((0x0F000000 == (pcs_control_status1 & 0x0F000000))
- && (pcs_control_status1 & 0x00100000)))
- continue;
- else
- break;
- }
- if (10 == i) {
- u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
- if (0x00000040 & u32temp)
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
- else
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
-
- nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
-
- while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET)
- & 0x00000040) != 0x00000040) && (i++ < 5000));
- }
- }
-}
-
-/**
- * nes_process_mac_intr
- */
-static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
-{
- unsigned long flags;
- u32 pcs_control_status;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_vnic *nesvnic;
- u32 mac_status;
- u32 mac_index = nesdev->mac_index;
- u32 u32temp;
- u16 phy_data;
- u16 temp_phy_data;
- u32 pcs_val = 0x0f0f0000;
- u32 pcs_mask = 0x0f1f0000;
- u32 cdr_ctrl;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
- if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- return;
- }
- nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
-
- /* ack the MAC interrupt */
- mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
- /* Clear the interrupt */
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200), mac_status);
-
- nes_debug(NES_DBG_PHY, "MAC%u interrupt status = 0x%X.\n", mac_number, mac_status);
-
- if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
- nesdev->link_status_interrupts++;
- if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
- nes_reset_link(nesdev, mac_index);
-
- /* read the PHY interrupt status register */
- if ((nesadapter->OneG_Mode) &&
- (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
- do {
- nes_read_1G_phy_reg(nesdev, 0x1a,
- nesadapter->phy_index[mac_index], &phy_data);
- nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1a = 0x%X.\n",
- nesadapter->phy_index[mac_index], phy_data);
- } while (phy_data&0x8000);
-
- temp_phy_data = 0;
- do {
- nes_read_1G_phy_reg(nesdev, 0x11,
- nesadapter->phy_index[mac_index], &phy_data);
- nes_debug(NES_DBG_PHY, "Phy%d data from register 0x11 = 0x%X.\n",
- nesadapter->phy_index[mac_index], phy_data);
- if (temp_phy_data == phy_data)
- break;
- temp_phy_data = phy_data;
- } while (1);
-
- nes_read_1G_phy_reg(nesdev, 0x1e,
- nesadapter->phy_index[mac_index], &phy_data);
- nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1e = 0x%X.\n",
- nesadapter->phy_index[mac_index], phy_data);
-
- nes_read_1G_phy_reg(nesdev, 1,
- nesadapter->phy_index[mac_index], &phy_data);
- nes_debug(NES_DBG_PHY, "1G phy%u data from register 1 = 0x%X\n",
- nesadapter->phy_index[mac_index], phy_data);
-
- if (temp_phy_data & 0x1000) {
- nes_debug(NES_DBG_PHY, "The Link is up according to the PHY\n");
- phy_data = 4;
- } else {
- nes_debug(NES_DBG_PHY, "The Link is down according to the PHY\n");
- }
- }
- nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n",
- nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0),
- nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200));
-
- if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_PUMA_1G) {
- switch (mac_index) {
- case 1:
- case 3:
- pcs_control_status = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
- break;
- default:
- pcs_control_status = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0);
- break;
- }
- } else {
- pcs_control_status = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200));
- pcs_control_status = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200));
- }
-
- nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n",
- mac_index, pcs_control_status);
- if ((nesadapter->OneG_Mode) &&
- (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
- u32temp = 0x01010000;
- if (nesadapter->port_count > 2) {
- u32temp |= 0x02020000;
- }
- if ((pcs_control_status & u32temp)!= u32temp) {
- phy_data = 0;
- nes_debug(NES_DBG_PHY, "PCS says the link is down\n");
- }
- } else {
- switch (nesadapter->phy_type[mac_index]) {
- case NES_PHY_TYPE_ARGUS:
- case NES_PHY_TYPE_SFP_D:
- case NES_PHY_TYPE_KR:
- /* clear the alarms */
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc002);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc005);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc006);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005);
- /* check link status */
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
-
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
- nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
- phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
-
- phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
-
- nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
- __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
- break;
-
- case NES_PHY_TYPE_PUMA_1G:
- if (mac_index < 2)
- pcs_val = pcs_mask = 0x01010000;
- else
- pcs_val = pcs_mask = 0x02020000;
- /* fall through */
- default:
- phy_data = (pcs_val == (pcs_control_status & pcs_mask)) ? 0x4 : 0x0;
- break;
- }
- }
-
- if (phy_data & 0x0004) {
- if (wide_ppm_offset &&
- (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) &&
- (nesadapter->hw_rev != NE020_REV)) {
- cdr_ctrl = nes_read_indexed(nesdev,
- NES_IDX_ETH_SERDES_CDR_CONTROL0 +
- mac_index * 0x200);
- nes_write_indexed(nesdev,
- NES_IDX_ETH_SERDES_CDR_CONTROL0 +
- mac_index * 0x200,
- cdr_ctrl | 0x000F0000);
- }
- nesadapter->mac_link_down[mac_index] = 0;
- list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
- nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n",
- nesvnic->linkup);
- if (nesvnic->linkup == 0) {
- printk(PFX "The Link is now up for port %s, netdev %p.\n",
- nesvnic->netdev->name, nesvnic->netdev);
- if (netif_queue_stopped(nesvnic->netdev))
- netif_start_queue(nesvnic->netdev);
- nesvnic->linkup = 1;
- netif_carrier_on(nesvnic->netdev);
-
- spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
- }
- }
- spin_unlock(&nesvnic->port_ibevent_lock);
- }
- }
- } else {
- if (wide_ppm_offset &&
- (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) &&
- (nesadapter->hw_rev != NE020_REV)) {
- cdr_ctrl = nes_read_indexed(nesdev,
- NES_IDX_ETH_SERDES_CDR_CONTROL0 +
- mac_index * 0x200);
- nes_write_indexed(nesdev,
- NES_IDX_ETH_SERDES_CDR_CONTROL0 +
- mac_index * 0x200,
- cdr_ctrl & 0xFFF0FFFF);
- }
- nesadapter->mac_link_down[mac_index] = 1;
- list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
- nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
- nesvnic->linkup);
- if (nesvnic->linkup == 1) {
- printk(PFX "The Link is now down for port %s, netdev %p.\n",
- nesvnic->netdev->name, nesvnic->netdev);
- if (!(netif_queue_stopped(nesvnic->netdev)))
- netif_stop_queue(nesvnic->netdev);
- nesvnic->linkup = 0;
- netif_carrier_off(nesvnic->netdev);
-
- spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
- }
- }
- spin_unlock(&nesvnic->port_ibevent_lock);
- }
- }
- }
- if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
- nesdev->link_recheck = 1;
- mod_delayed_work(system_wq, &nesdev->work,
- NES_LINK_RECHECK_DELAY);
- }
- }
-
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
-
- nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
-}
-
-void nes_recheck_link_status(struct work_struct *work)
-{
- unsigned long flags;
- struct nes_device *nesdev = container_of(work, struct nes_device, work.work);
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_vnic *nesvnic;
- u32 mac_index = nesdev->mac_index;
- u16 phy_data;
- u16 temp_phy_data;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
-
- /* check link status */
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
-
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
- nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
- phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
-
- phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
-
- nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
- __func__, phy_data,
- nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
-
- if (phy_data & 0x0004) {
- nesadapter->mac_link_down[mac_index] = 0;
- list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
- if (nesvnic->linkup == 0) {
- printk(PFX "The Link is now up for port %s, netdev %p.\n",
- nesvnic->netdev->name, nesvnic->netdev);
- if (netif_queue_stopped(nesvnic->netdev))
- netif_start_queue(nesvnic->netdev);
- nesvnic->linkup = 1;
- netif_carrier_on(nesvnic->netdev);
-
- spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
- }
- }
- spin_unlock(&nesvnic->port_ibevent_lock);
- }
- }
-
- } else {
- nesadapter->mac_link_down[mac_index] = 1;
- list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
- if (nesvnic->linkup == 1) {
- printk(PFX "The Link is now down for port %s, netdev %p.\n",
- nesvnic->netdev->name, nesvnic->netdev);
- if (!(netif_queue_stopped(nesvnic->netdev)))
- netif_stop_queue(nesvnic->netdev);
- nesvnic->linkup = 0;
- netif_carrier_off(nesvnic->netdev);
-
- spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
- }
- }
- spin_unlock(&nesvnic->port_ibevent_lock);
- }
- }
- }
- if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX)
- schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
- else
- nesdev->link_recheck = 0;
-
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
-}
-
-
-static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
-{
- struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
-
- napi_schedule(&nesvnic->napi);
-}
-
-
-/* The MAX_RQES_TO_PROCESS defines how many max read requests to complete before
-* getting out of nic_ce_handler
-*/
-#define MAX_RQES_TO_PROCESS 384
-
-/**
- * nes_nic_ce_handler
- */
-void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
-{
- u64 u64temp;
- dma_addr_t bus_address;
- struct nes_hw_nic *nesnic;
- struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_nic_rq_wqe *nic_rqe;
- struct nes_hw_nic_sq_wqe *nic_sqe;
- struct sk_buff *skb;
- struct sk_buff *rx_skb;
- struct nes_rskb_cb *cb;
- __le16 *wqe_fragment_length;
- u32 head;
- u32 cq_size;
- u32 rx_pkt_size;
- u32 cqe_count=0;
- u32 cqe_errv;
- u32 cqe_misc;
- u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
- u16 vlan_tag;
- u16 pkt_type;
- u16 rqes_processed = 0;
- u8 sq_cqes = 0;
- u8 nes_use_lro = 0;
-
- head = cq->cq_head;
- cq_size = cq->cq_size;
- cq->cqes_pending = 1;
- if (nesvnic->netdev->features & NETIF_F_LRO)
- nes_use_lro = 1;
- do {
- if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
- NES_NIC_CQE_VALID) {
- nesnic = &nesvnic->nic;
- cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
- if (cqe_misc & NES_NIC_CQE_SQ) {
- sq_cqes++;
- wqe_fragment_index = 1;
- nic_sqe = &nesnic->sq_vbase[nesnic->sq_tail];
- skb = nesnic->tx_skb[nesnic->sq_tail];
- wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
- /* bump past the vlan tag */
- wqe_fragment_length++;
- if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
- u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX +
- wqe_fragment_index * 2]);
- u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX +
- wqe_fragment_index * 2])) << 32;
- bus_address = (dma_addr_t)u64temp;
- if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) {
- pci_unmap_single(nesdev->pcidev,
- bus_address,
- le16_to_cpu(wqe_fragment_length[wqe_fragment_index++]),
- PCI_DMA_TODEVICE);
- }
- for (; wqe_fragment_index < 5; wqe_fragment_index++) {
- if (wqe_fragment_length[wqe_fragment_index]) {
- u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX +
- wqe_fragment_index * 2]);
- u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX
- + wqe_fragment_index * 2])) <<32;
- bus_address = (dma_addr_t)u64temp;
- pci_unmap_page(nesdev->pcidev,
- bus_address,
- le16_to_cpu(wqe_fragment_length[wqe_fragment_index]),
- PCI_DMA_TODEVICE);
- } else
- break;
- }
- }
- if (skb)
- dev_kfree_skb_any(skb);
- nesnic->sq_tail++;
- nesnic->sq_tail &= nesnic->sq_size-1;
- if (sq_cqes > 128) {
- barrier();
- /* restart the queue if it had been stopped */
- if (netif_queue_stopped(nesvnic->netdev))
- netif_wake_queue(nesvnic->netdev);
- sq_cqes = 0;
- }
- } else {
- rqes_processed ++;
-
- cq->rx_cqes_completed++;
- cq->rx_pkts_indicated++;
- rx_pkt_size = cqe_misc & 0x0000ffff;
- nic_rqe = &nesnic->rq_vbase[nesnic->rq_tail];
- /* Get the skb */
- rx_skb = nesnic->rx_skb[nesnic->rq_tail];
- nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_tail];
- bus_address = (dma_addr_t)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
- bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
- pci_unmap_single(nesdev->pcidev, bus_address,
- nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
- cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
- cb->busaddr = 0;
- /* rx_skb->tail = rx_skb->data + rx_pkt_size; */
- /* rx_skb->len = rx_pkt_size; */
- rx_skb->len = 0; /* TODO: see if this is necessary */
- skb_put(rx_skb, rx_pkt_size);
- rx_skb->protocol = eth_type_trans(rx_skb, nesvnic->netdev);
- nesnic->rq_tail++;
- nesnic->rq_tail &= nesnic->rq_size - 1;
-
- atomic_inc(&nesvnic->rx_skbs_needed);
- if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) {
- nes_write32(nesdev->regs+NES_CQE_ALLOC,
- cq->cq_number | (cqe_count << 16));
- /* nesadapter->tune_timer.cq_count += cqe_count; */
- nesdev->currcq_count += cqe_count;
- cqe_count = 0;
- nes_replenish_nic_rq(nesvnic);
- }
- pkt_type = (u16)(le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]));
- cqe_errv = (cqe_misc & NES_NIC_CQE_ERRV_MASK) >> NES_NIC_CQE_ERRV_SHIFT;
- rx_skb->ip_summed = CHECKSUM_NONE;
-
- if ((NES_PKT_TYPE_TCPV4_BITS == (pkt_type & NES_PKT_TYPE_TCPV4_MASK)) ||
- (NES_PKT_TYPE_UDPV4_BITS == (pkt_type & NES_PKT_TYPE_UDPV4_MASK))) {
- if ((cqe_errv &
- (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR |
- NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
- if (nesvnic->netdev->features & NETIF_F_RXCSUM)
- rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else
- nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
- " errv = 0x%X, pkt_type = 0x%X.\n",
- nesvnic->netdev->name, cqe_errv, pkt_type);
-
- } else if ((pkt_type & NES_PKT_TYPE_IPV4_MASK) == NES_PKT_TYPE_IPV4_BITS) {
- if ((cqe_errv &
- (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR |
- NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
- if (nesvnic->netdev->features & NETIF_F_RXCSUM) {
- rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n",
- nesvnic->netdev->name); */
- }
- } else
- nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
- " errv = 0x%X, pkt_type = 0x%X.\n",
- nesvnic->netdev->name, cqe_errv, pkt_type);
- }
- /* nes_debug(NES_DBG_CQ, "pkt_type=%x, APBVT_MASK=%x\n",
- pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */
-
- if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) {
- if (nes_cm_recv(rx_skb, nesvnic->netdev))
- rx_skb = NULL;
- }
- if (rx_skb == NULL)
- goto skip_rx_indicate0;
-
-
- if (cqe_misc & NES_NIC_CQE_TAG_VALID) {
- vlan_tag = (u16)(le32_to_cpu(
- cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])
- >> 16);
- nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
- nesvnic->netdev->name, vlan_tag);
-
- __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
- }
- if (nes_use_lro)
- lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
- else
- netif_receive_skb(rx_skb);
-
-skip_rx_indicate0:
- ;
- /* nesvnic->netstats.rx_packets++; */
- /* nesvnic->netstats.rx_bytes += rx_pkt_size; */
- }
-
- cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
- /* Accounting... */
- cqe_count++;
- if (++head >= cq_size)
- head = 0;
- if (cqe_count == 255) {
- /* Replenish Nic CQ */
- nes_write32(nesdev->regs+NES_CQE_ALLOC,
- cq->cq_number | (cqe_count << 16));
- /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */
- nesdev->currcq_count += cqe_count;
- cqe_count = 0;
- }
-
- if (cq->rx_cqes_completed >= nesvnic->budget)
- break;
- } else {
- cq->cqes_pending = 0;
- break;
- }
-
- } while (1);
-
- if (nes_use_lro)
- lro_flush_all(&nesvnic->lro_mgr);
- if (sq_cqes) {
- barrier();
- /* restart the queue if it had been stopped */
- if (netif_queue_stopped(nesvnic->netdev))
- netif_wake_queue(nesvnic->netdev);
- }
- cq->cq_head = head;
- /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n",
- cq->cq_number, cqe_count, cq->cq_head); */
- cq->cqe_allocs_pending = cqe_count;
- if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
- {
- /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */
- nesdev->currcq_count += cqe_count;
- nes_nic_tune_timer(nesdev);
- }
- if (atomic_read(&nesvnic->rx_skbs_needed))
- nes_replenish_nic_rq(nesvnic);
-}
-
-
-
-/**
- * nes_cqp_ce_handler
- */
-static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
-{
- u64 u64temp;
- unsigned long flags;
- struct nes_hw_cqp *cqp = NULL;
- struct nes_cqp_request *cqp_request;
- struct nes_hw_cqp_wqe *cqp_wqe;
- u32 head;
- u32 cq_size;
- u32 cqe_count=0;
- u32 error_code;
- u32 opcode;
- u32 ctx_index;
- /* u32 counter; */
-
- head = cq->cq_head;
- cq_size = cq->cq_size;
-
- do {
- /* process the CQE */
- /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head,
- le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */
-
- opcode = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]);
- if (opcode & NES_CQE_VALID) {
- cqp = &nesdev->cqp;
-
- error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]);
- if (error_code) {
- nes_debug(NES_DBG_CQP, "Bad Completion code for opcode 0x%02X from CQP,"
- " Major/Minor codes = 0x%04X:%04X.\n",
- le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f,
- (u16)(error_code >> 16),
- (u16)error_code);
- }
-
- u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
- cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) |
- ((u64)(le32_to_cpu(cq->cq_vbase[head].
- cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
-
- cqp_request = (struct nes_cqp_request *)(unsigned long)u64temp;
- if (cqp_request) {
- if (cqp_request->waiting) {
- /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */
- cqp_request->major_code = (u16)(error_code >> 16);
- cqp_request->minor_code = (u16)error_code;
- barrier();
- cqp_request->request_done = 1;
- wake_up(&cqp_request->waitq);
- nes_put_cqp_request(nesdev, cqp_request);
- } else {
- if (cqp_request->callback)
- cqp_request->cqp_callback(nesdev, cqp_request);
- nes_free_cqp_request(nesdev, cqp_request);
- }
- } else {
- wake_up(&nesdev->cqp.waitq);
- }
-
- cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
- nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (1 << 16));
- if (++cqp->sq_tail >= cqp->sq_size)
- cqp->sq_tail = 0;
-
- /* Accounting... */
- cqe_count++;
- if (++head >= cq_size)
- head = 0;
- } else {
- break;
- }
- } while (1);
- cq->cq_head = head;
-
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- while ((!list_empty(&nesdev->cqp_pending_reqs)) &&
- ((((nesdev->cqp.sq_tail+nesdev->cqp.sq_size)-nesdev->cqp.sq_head) &
- (nesdev->cqp.sq_size - 1)) != 1)) {
- cqp_request = list_entry(nesdev->cqp_pending_reqs.next,
- struct nes_cqp_request, list);
- list_del_init(&cqp_request->list);
- head = nesdev->cqp.sq_head++;
- nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
- cqp_wqe = &nesdev->cqp.sq_vbase[head];
- memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
- barrier();
-
- opcode = cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX];
- if ((opcode & NES_CQP_OPCODE_MASK) == NES_CQP_DOWNLOAD_SEGMENT)
- ctx_index = NES_CQP_WQE_DL_COMP_CTX_LOW_IDX;
- else
- ctx_index = NES_CQP_WQE_COMP_CTX_LOW_IDX;
- cqp_wqe->wqe_words[ctx_index] =
- cpu_to_le32((u32)((unsigned long)cqp_request));
- cqp_wqe->wqe_words[ctx_index + 1] =
- cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request)));
- nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n",
- cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head);
- /* Ring doorbell (1 WQEs) */
- barrier();
- nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
- }
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-
- /* Arm the CCQ */
- nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
- cq->cq_number);
- nes_read32(nesdev->regs+NES_CQE_ALLOC);
-}
-
-static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
-{
- if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
- /* skip over ethernet header */
- pkt += ETH_HLEN;
-
- /* Skip over IP and TCP headers */
- pkt += 4 * (pkt[0] & 0x0f);
- pkt += 4 * ((pkt[12] >> 4) & 0x0f);
- }
- return pkt;
-}
-
-/* Determine if incoming error pkt is rdma layer */
-static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
-{
- u8 *pkt;
- u16 *mpa;
- u32 opcode = 0xffffffff;
-
- if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
- pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
- mpa = (u16 *)locate_mpa(pkt, aeq_info);
- opcode = be16_to_cpu(mpa[1]) & 0xf;
- }
-
- return opcode;
-}
-
-/* Build iWARP terminate header */
-static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
-{
- u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
- u16 ddp_seg_len;
- int copy_len = 0;
- u8 is_tagged = 0;
- u8 flush_code = 0;
- struct nes_terminate_hdr *termhdr;
-
- termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
- memset(termhdr, 0, 64);
-
- if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
-
- /* Use data from offending packet to fill in ddp & rdma hdrs */
- pkt = locate_mpa(pkt, aeq_info);
- ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
- if (ddp_seg_len) {
- copy_len = 2;
- termhdr->hdrct = DDP_LEN_FLAG;
- if (pkt[2] & 0x80) {
- is_tagged = 1;
- if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
- copy_len += TERM_DDP_LEN_TAGGED;
- termhdr->hdrct |= DDP_HDR_FLAG;
- }
- } else {
- if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
- copy_len += TERM_DDP_LEN_UNTAGGED;
- termhdr->hdrct |= DDP_HDR_FLAG;
- }
-
- if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
- if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
- copy_len += TERM_RDMA_LEN;
- termhdr->hdrct |= RDMA_HDR_FLAG;
- }
- }
- }
- }
- }
-
- switch (async_event_id) {
- case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
- switch (iwarp_opcode(nesqp, aeq_info)) {
- case IWARP_OPCODE_WRITE:
- flush_code = IB_WC_LOC_PROT_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
- termhdr->error_code = DDP_TAGGED_INV_STAG;
- break;
- default:
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_INV_STAG;
- }
- break;
- case NES_AEQE_AEID_AMP_INVALID_STAG:
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_INV_STAG;
- break;
- case NES_AEQE_AEID_AMP_BAD_QP:
- flush_code = IB_WC_LOC_QP_OP_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
- termhdr->error_code = DDP_UNTAGGED_INV_QN;
- break;
- case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
- case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
- switch (iwarp_opcode(nesqp, aeq_info)) {
- case IWARP_OPCODE_SEND_INV:
- case IWARP_OPCODE_SEND_SE_INV:
- flush_code = IB_WC_REM_OP_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
- termhdr->error_code = RDMAP_CANT_INV_STAG;
- break;
- default:
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_INV_STAG;
- }
- break;
- case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
- if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
- flush_code = IB_WC_LOC_PROT_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
- termhdr->error_code = DDP_TAGGED_BOUNDS;
- } else {
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_INV_BOUNDS;
- }
- break;
- case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
- case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_ACCESS;
- break;
- case NES_AEQE_AEID_AMP_TO_WRAP:
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_TO_WRAP;
- break;
- case NES_AEQE_AEID_AMP_BAD_PD:
- switch (iwarp_opcode(nesqp, aeq_info)) {
- case IWARP_OPCODE_WRITE:
- flush_code = IB_WC_LOC_PROT_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
- termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
- break;
- case IWARP_OPCODE_SEND_INV:
- case IWARP_OPCODE_SEND_SE_INV:
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_CANT_INV_STAG;
- break;
- default:
- flush_code = IB_WC_REM_ACCESS_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
- termhdr->error_code = RDMAP_UNASSOC_STAG;
- }
- break;
- case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
- flush_code = IB_WC_LOC_LEN_ERR;
- termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
- termhdr->error_code = MPA_MARKER;
- break;
- case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
- flush_code = IB_WC_GENERAL_ERR;
- termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
- termhdr->error_code = MPA_CRC;
- break;
- case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
- case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
- flush_code = IB_WC_LOC_LEN_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
- termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
- break;
- case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
- case NES_AEQE_AEID_DDP_NO_L_BIT:
- flush_code = IB_WC_FATAL_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
- termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
- break;
- case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
- case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
- flush_code = IB_WC_GENERAL_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
- termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
- break;
- case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- flush_code = IB_WC_LOC_LEN_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
- termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
- break;
- case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
- flush_code = IB_WC_GENERAL_ERR;
- if (is_tagged) {
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
- termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
- } else {
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
- termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
- }
- break;
- case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
- flush_code = IB_WC_GENERAL_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
- termhdr->error_code = DDP_UNTAGGED_INV_MO;
- break;
- case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
- flush_code = IB_WC_REM_OP_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
- termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
- break;
- case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
- flush_code = IB_WC_GENERAL_ERR;
- termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
- termhdr->error_code = DDP_UNTAGGED_INV_QN;
- break;
- case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
- flush_code = IB_WC_GENERAL_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
- termhdr->error_code = RDMAP_INV_RDMAP_VER;
- break;
- case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
- flush_code = IB_WC_LOC_QP_OP_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
- termhdr->error_code = RDMAP_UNEXPECTED_OP;
- break;
- default:
- flush_code = IB_WC_FATAL_ERR;
- termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
- termhdr->error_code = RDMAP_UNSPECIFIED;
- break;
- }
-
- if (copy_len)
- memcpy(termhdr + 1, pkt, copy_len);
-
- if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
- if (aeq_info & NES_AEQE_SQ)
- nesqp->term_sq_flush_code = flush_code;
- else
- nesqp->term_rq_flush_code = flush_code;
- }
-
- return sizeof(struct nes_terminate_hdr) + copy_len;
-}
-
-static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
- struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
-{
- u64 context;
- unsigned long flags;
- u32 aeq_info;
- u16 async_event_id;
- u8 tcp_state;
- u8 iwarp_state;
- u32 termlen = 0;
- u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
- NES_CQP_QP_TERM_DONT_SEND_FIN;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
- if (nesqp->term_flags & NES_TERM_SENT)
- return; /* Sanity check */
-
- aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
- tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
- iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
- async_event_id = (u16)aeq_info;
-
- context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
- aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
- if (!context) {
- WARN_ON(!context);
- return;
- }
-
- nesqp = (struct nes_qp *)(unsigned long)context;
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- nesqp->terminate_eventtype = eventtype;
- spin_unlock_irqrestore(&nesqp->lock, flags);
-
- if (nesadapter->send_term_ok)
- termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
- else
- mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
-
- if (!nesdev->iw_status) {
- nesqp->term_flags = NES_TERM_DONE;
- nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_ERROR, 0, 0);
- nes_cm_disconn(nesqp);
- } else {
- nes_terminate_start_timer(nesqp);
- nesqp->term_flags |= NES_TERM_SENT;
- nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
- }
-}
-
-static void nes_terminate_send_fin(struct nes_device *nesdev,
- struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
-{
- u32 aeq_info;
- u16 async_event_id;
- u8 tcp_state;
- u8 iwarp_state;
- unsigned long flags;
-
- aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
- tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
- iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
- async_event_id = (u16)aeq_info;
-
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
-
- /* Send the fin only */
- nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
- NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
-}
-
-/* Cleanup after a terminate sent or received */
-static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
-{
- u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
- unsigned long flags;
- struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
- struct nes_device *nesdev = nesvnic->nesdev;
- u8 first_time = 0;
-
- spin_lock_irqsave(&nesqp->lock, flags);
- if (nesqp->hte_added) {
- nesqp->hte_added = 0;
- next_iwarp_state |= NES_CQP_QP_DEL_HTE;
- }
-
- first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
- nesqp->term_flags |= NES_TERM_DONE;
- spin_unlock_irqrestore(&nesqp->lock, flags);
-
- /* Make sure we go through this only once */
- if (first_time) {
- if (timeout_occurred == 0)
- del_timer(&nesqp->terminate_timer);
- else
- next_iwarp_state |= NES_CQP_QP_RESET;
-
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
- nes_cm_disconn(nesqp);
- }
-}
-
-static void nes_terminate_received(struct nes_device *nesdev,
- struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
-{
- u32 aeq_info;
- u8 *pkt;
- u32 *mpa;
- u8 ddp_ctl;
- u8 rdma_ctl;
- u16 aeq_id = 0;
-
- aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
- if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
- /* Terminate is not a performance path so the silicon */
- /* did not validate the frame - do it now */
- pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
- mpa = (u32 *)locate_mpa(pkt, aeq_info);
- ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
- rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
- if ((ddp_ctl & 0xc0) != 0x40)
- aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
- else if ((ddp_ctl & 0x03) != 1)
- aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
- else if (be32_to_cpu(mpa[2]) != 2)
- aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
- else if (be32_to_cpu(mpa[3]) != 1)
- aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
- else if (be32_to_cpu(mpa[4]) != 0)
- aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
- else if ((rdma_ctl & 0xc0) != 0x40)
- aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
-
- if (aeq_id) {
- /* Bad terminate recvd - send back a terminate */
- aeq_info = (aeq_info & 0xffff0000) | aeq_id;
- aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
- nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
- return;
- }
- }
-
- nesqp->term_flags |= NES_TERM_RCVD;
- nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
- nes_terminate_start_timer(nesqp);
- nes_terminate_send_fin(nesdev, nesqp, aeqe);
-}
-
-/* Timeout routine in case terminate fails to complete */
-void nes_terminate_timeout(unsigned long context)
-{
- struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
-
- nes_terminate_done(nesqp, 1);
-}
-
-/* Set a timer in case hw cannot complete the terminate sequence */
-static void nes_terminate_start_timer(struct nes_qp *nesqp)
-{
- mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
-}
-
-/**
- * nes_process_iwarp_aeqe
- */
-static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
- struct nes_hw_aeqe *aeqe)
-{
- u64 context;
- unsigned long flags;
- struct nes_qp *nesqp;
- struct nes_hw_cq *hw_cq;
- struct nes_cq *nescq;
- int resource_allocated;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 aeq_info;
- u32 next_iwarp_state = 0;
- u32 aeqe_cq_id;
- u16 async_event_id;
- u8 tcp_state;
- u8 iwarp_state;
- struct ib_event ibevent;
-
- nes_debug(NES_DBG_AEQ, "\n");
- aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
- if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
- context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
- context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
- } else {
- context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
- aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
- BUG_ON(!context);
- }
-
- /* context is nesqp unless async_event_id == CQ ERROR */
- nesqp = (struct nes_qp *)(unsigned long)context;
- async_event_id = (u16)aeq_info;
- tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
- iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
- nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
- " Tcp state = %s, iWARP state = %s\n",
- async_event_id,
- le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
- nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
-
- aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
- if (aeq_info & NES_AEQE_QP) {
- if (!nes_is_resource_allocated(nesadapter,
- nesadapter->allocated_qps,
- aeqe_cq_id))
- return;
- }
-
- switch (async_event_id) {
- case NES_AEQE_AEID_LLP_FIN_RECEIVED:
- if (nesqp->term_flags)
- return; /* Ignore it, wait for close complete */
-
- if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
- if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
- (nesqp->ibqp_state == IB_QPS_RTS)) {
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
- nes_cm_disconn(nesqp);
- }
- nesqp->cm_id->add_ref(nesqp->cm_id);
- schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
- NES_TIMER_TYPE_CLOSE, 1, 0);
- nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d),"
- " need ae to finish up, original_last_aeq = 0x%04X."
- " last_aeq = 0x%04X, scheduling timer. TCP state = %d\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
- async_event_id, nesqp->last_aeq, tcp_state);
- }
- break;
- case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_cm_disconn(nesqp);
- break;
-
- case NES_AEQE_AEID_RESET_SENT:
- tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- nesqp->hte_added = 0;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
- nes_cm_disconn(nesqp);
- break;
-
- case NES_AEQE_AEID_LLP_CONNECTION_RESET:
- if (atomic_read(&nesqp->close_timer_started))
- return;
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_cm_disconn(nesqp);
- break;
-
- case NES_AEQE_AEID_TERMINATE_SENT:
- nes_terminate_send_fin(nesdev, nesqp, aeqe);
- break;
-
- case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
- nes_terminate_received(nesdev, nesqp, aeqe);
- break;
-
- case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
- case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
- case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
- case NES_AEQE_AEID_AMP_INVALID_STAG:
- case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
- case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
- case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
- case NES_AEQE_AEID_AMP_TO_WRAP:
- printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n",
- nesqp->hwqp.qp_id, async_event_id);
- nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
- break;
-
- case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
- case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
- case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
- case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
- if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
- aeq_info &= 0xffff0000;
- aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
- aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
- }
-
- case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
- case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
- case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
- case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
- case NES_AEQE_AEID_AMP_BAD_QP:
- case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
- case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
- case NES_AEQE_AEID_DDP_NO_L_BIT:
- case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
- case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
- case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
- case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
- case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
- case NES_AEQE_AEID_AMP_BAD_PD:
- case NES_AEQE_AEID_AMP_FASTREG_SHARED:
- case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
- case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
- case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
- case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
- case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
- case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
- case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
- case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
- case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
- case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
- case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
- case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
- case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
- case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
- case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
- case NES_AEQE_AEID_BAD_CLOSE:
- case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
- case NES_AEQE_AEID_STAG_ZERO_INVALID:
- case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
- case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
- printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n",
- nesqp->hwqp.qp_id, async_event_id);
- print_ip(nesqp->cm_node);
- if (!atomic_read(&nesqp->close_timer_started))
- nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
- break;
-
- case NES_AEQE_AEID_CQ_OPERATION_ERROR:
- context <<= 1;
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
- le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), (void *)(unsigned long)context);
- resource_allocated = nes_is_resource_allocated(nesadapter, nesadapter->allocated_cqs,
- le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
- if (resource_allocated) {
- printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
- __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
- hw_cq = (struct nes_hw_cq *)(unsigned long)context;
- if (hw_cq) {
- nescq = container_of(hw_cq, struct nes_cq, hw_cq);
- if (nescq->ibcq.event_handler) {
- ibevent.device = nescq->ibcq.device;
- ibevent.event = IB_EVENT_CQ_ERR;
- ibevent.element.cq = &nescq->ibcq;
- nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
- }
- }
- }
- break;
-
- default:
- nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
- async_event_id);
- break;
- }
-
-}
-
-/**
- * nes_iwarp_ce_handler
- */
-void nes_iwarp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *hw_cq)
-{
- struct nes_cq *nescq = container_of(hw_cq, struct nes_cq, hw_cq);
-
- /* nes_debug(NES_DBG_CQ, "Processing completion event for iWARP CQ%u.\n",
- nescq->hw_cq.cq_number); */
- nes_write32(nesdev->regs+NES_CQ_ACK, nescq->hw_cq.cq_number);
-
- if (nescq->ibcq.comp_handler)
- nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context);
-
- return;
-}
-
-
-/**
- * nes_manage_apbvt()
- */
-int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port,
- u32 nic_index, u32 add_port)
-{
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- int ret = 0;
- u16 major_code;
-
- /* Send manage APBVT request to CQP */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
- return -ENOMEM;
- }
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- nes_debug(NES_DBG_QP, "%s APBV for local port=%u(0x%04x), nic_index=%u\n",
- (add_port == NES_MANAGE_APBVT_ADD) ? "ADD" : "DEL",
- accel_local_port, accel_local_port, nic_index);
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_MANAGE_APBVT |
- ((add_port == NES_MANAGE_APBVT_ADD) ? NES_CQP_APBVT_ADD : 0)));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- ((nic_index << NES_CQP_APBVT_NIC_SHIFT) | accel_local_port));
-
- nes_debug(NES_DBG_QP, "Waiting for CQP completion for APBVT.\n");
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- if (add_port == NES_MANAGE_APBVT_ADD)
- ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_QP, "Completed, ret=%u, CQP Major:Minor codes = 0x%04X:0x%04X\n",
- ret, cqp_request->major_code, cqp_request->minor_code);
- major_code = cqp_request->major_code;
-
- nes_put_cqp_request(nesdev, cqp_request);
-
- if (!ret)
- return -ETIME;
- else if (major_code)
- return -EIO;
- else
- return 0;
-}
-
-
-/**
- * nes_manage_arp_cache
- */
-void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
- u32 ip_addr, u32 action)
-{
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev;
- struct nes_cqp_request *cqp_request;
- int arp_index;
-
- nesdev = nesvnic->nesdev;
- arp_index = nes_arp_table(nesdev, ip_addr, mac_addr, action);
- if (arp_index == -1) {
- return;
- }
-
- /* update the ARP entry */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_NETDEV, "Failed to get a cqp_request.\n");
- return;
- }
- cqp_request->waiting = 0;
- cqp_wqe = &cqp_request->cqp_wqe;
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
- NES_CQP_MANAGE_ARP_CACHE | NES_CQP_ARP_PERM);
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(
- (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_CQP_ARP_AEQ_INDEX_SHIFT);
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(arp_index);
-
- if (action == NES_ARP_ADD) {
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID);
- cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32(
- (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
- (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
- cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
- (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
- } else {
- cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
- }
-
- nes_debug(NES_DBG_NETDEV, "Not waiting for CQP, cqp.sq_head=%u, cqp.sq_tail=%u\n",
- nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
-
- atomic_set(&cqp_request->refcount, 1);
- nes_post_cqp_request(nesdev, cqp_request);
-}
-
-
-/**
- * flush_wqes
- */
-void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
- u32 which_wq, u32 wait_completion)
-{
- struct nes_cqp_request *cqp_request;
- struct nes_hw_cqp_wqe *cqp_wqe;
- u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
- u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
- int ret;
-
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
- return;
- }
- if (wait_completion) {
- cqp_request->waiting = 1;
- atomic_set(&cqp_request->refcount, 2);
- } else {
- cqp_request->waiting = 0;
- }
- cqp_wqe = &cqp_request->cqp_wqe;
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- /* If wqe in error was identified, set code to be put into cqe */
- if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
- which_wq |= NES_CQP_FLUSH_MAJ_MIN;
- sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
- nesqp->term_sq_flush_code = 0;
- }
-
- if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
- which_wq |= NES_CQP_FLUSH_MAJ_MIN;
- rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
- nesqp->term_rq_flush_code = 0;
- }
-
- if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
- cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
- cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
- }
-
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
- cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
-
- nes_post_cqp_request(nesdev, cqp_request);
-
- if (wait_completion) {
- /* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_QP, "Flush SQ QP WQEs completed, ret=%u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X\n",
- ret, cqp_request->major_code, cqp_request->minor_code);
- nes_put_cqp_request(nesdev, cqp_request);
- }
-}
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
deleted file mode 100644
index d748e4b31b8d..000000000000
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ /dev/null
@@ -1,1392 +0,0 @@
-/*
-* Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenIB.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*/
-
-#ifndef __NES_HW_H
-#define __NES_HW_H
-
-#include <linux/inet_lro.h>
-
-#define NES_PHY_TYPE_CX4 1
-#define NES_PHY_TYPE_1G 2
-#define NES_PHY_TYPE_ARGUS 4
-#define NES_PHY_TYPE_PUMA_1G 5
-#define NES_PHY_TYPE_PUMA_10G 6
-#define NES_PHY_TYPE_GLADIUS 7
-#define NES_PHY_TYPE_SFP_D 8
-#define NES_PHY_TYPE_KR 9
-
-#define NES_MULTICAST_PF_MAX 8
-#define NES_A0 3
-
-#define NES_ENABLE_PAU 0x07000001
-#define NES_DISABLE_PAU 0x07000000
-#define NES_PAU_COUNTER 10
-#define NES_CQP_OPCODE_MASK 0x3f
-
-enum pci_regs {
- NES_INT_STAT = 0x0000,
- NES_INT_MASK = 0x0004,
- NES_INT_PENDING = 0x0008,
- NES_INTF_INT_STAT = 0x000C,
- NES_INTF_INT_MASK = 0x0010,
- NES_TIMER_STAT = 0x0014,
- NES_PERIODIC_CONTROL = 0x0018,
- NES_ONE_SHOT_CONTROL = 0x001C,
- NES_EEPROM_COMMAND = 0x0020,
- NES_EEPROM_DATA = 0x0024,
- NES_FLASH_COMMAND = 0x0028,
- NES_FLASH_DATA = 0x002C,
- NES_SOFTWARE_RESET = 0x0030,
- NES_CQ_ACK = 0x0034,
- NES_WQE_ALLOC = 0x0040,
- NES_CQE_ALLOC = 0x0044,
- NES_AEQ_ALLOC = 0x0048
-};
-
-enum indexed_regs {
- NES_IDX_CREATE_CQP_LOW = 0x0000,
- NES_IDX_CREATE_CQP_HIGH = 0x0004,
- NES_IDX_QP_CONTROL = 0x0040,
- NES_IDX_FLM_CONTROL = 0x0080,
- NES_IDX_INT_CPU_STATUS = 0x00a0,
- NES_IDX_GPR_TRIGGER = 0x00bc,
- NES_IDX_GPIO_CONTROL = 0x00f0,
- NES_IDX_GPIO_DATA = 0x00f4,
- NES_IDX_GPR2 = 0x010c,
- NES_IDX_TCP_CONFIG0 = 0x01e4,
- NES_IDX_TCP_TIMER_CONFIG = 0x01ec,
- NES_IDX_TCP_NOW = 0x01f0,
- NES_IDX_QP_MAX_CFG_SIZES = 0x0200,
- NES_IDX_QP_CTX_SIZE = 0x0218,
- NES_IDX_TCP_TIMER_SIZE0 = 0x0238,
- NES_IDX_TCP_TIMER_SIZE1 = 0x0240,
- NES_IDX_ARP_CACHE_SIZE = 0x0258,
- NES_IDX_CQ_CTX_SIZE = 0x0260,
- NES_IDX_MRT_SIZE = 0x0278,
- NES_IDX_PBL_REGION_SIZE = 0x0280,
- NES_IDX_IRRQ_COUNT = 0x02b0,
- NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x02f0,
- NES_IDX_RX_WINDOW_BUFFER_SIZE = 0x0300,
- NES_IDX_DST_IP_ADDR = 0x0400,
- NES_IDX_PCIX_DIAG = 0x08e8,
- NES_IDX_MPP_DEBUG = 0x0a00,
- NES_IDX_PORT_RX_DISCARDS = 0x0a30,
- NES_IDX_PORT_TX_DISCARDS = 0x0a34,
- NES_IDX_MPP_LB_DEBUG = 0x0b00,
- NES_IDX_DENALI_CTL_22 = 0x1058,
- NES_IDX_MAC_TX_CONTROL = 0x2000,
- NES_IDX_MAC_TX_CONFIG = 0x2004,
- NES_IDX_MAC_TX_PAUSE_QUANTA = 0x2008,
- NES_IDX_MAC_RX_CONTROL = 0x200c,
- NES_IDX_MAC_RX_CONFIG = 0x2010,
- NES_IDX_MAC_EXACT_MATCH_BOTTOM = 0x201c,
- NES_IDX_MAC_MDIO_CONTROL = 0x2084,
- NES_IDX_MAC_TX_OCTETS_LOW = 0x2100,
- NES_IDX_MAC_TX_OCTETS_HIGH = 0x2104,
- NES_IDX_MAC_TX_FRAMES_LOW = 0x2108,
- NES_IDX_MAC_TX_FRAMES_HIGH = 0x210c,
- NES_IDX_MAC_TX_PAUSE_FRAMES = 0x2118,
- NES_IDX_MAC_TX_ERRORS = 0x2138,
- NES_IDX_MAC_RX_OCTETS_LOW = 0x213c,
- NES_IDX_MAC_RX_OCTETS_HIGH = 0x2140,
- NES_IDX_MAC_RX_FRAMES_LOW = 0x2144,
- NES_IDX_MAC_RX_FRAMES_HIGH = 0x2148,
- NES_IDX_MAC_RX_BC_FRAMES_LOW = 0x214c,
- NES_IDX_MAC_RX_MC_FRAMES_HIGH = 0x2150,
- NES_IDX_MAC_RX_PAUSE_FRAMES = 0x2154,
- NES_IDX_MAC_RX_SHORT_FRAMES = 0x2174,
- NES_IDX_MAC_RX_OVERSIZED_FRAMES = 0x2178,
- NES_IDX_MAC_RX_JABBER_FRAMES = 0x217c,
- NES_IDX_MAC_RX_CRC_ERR_FRAMES = 0x2180,
- NES_IDX_MAC_RX_LENGTH_ERR_FRAMES = 0x2184,
- NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES = 0x2188,
- NES_IDX_MAC_INT_STATUS = 0x21f0,
- NES_IDX_MAC_INT_MASK = 0x21f4,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 = 0x2800,
- NES_IDX_PHY_PCS_CONTROL_STATUS1 = 0x2a00,
- NES_IDX_ETH_SERDES_COMMON_CONTROL0 = 0x2808,
- NES_IDX_ETH_SERDES_COMMON_CONTROL1 = 0x2a08,
- NES_IDX_ETH_SERDES_COMMON_STATUS0 = 0x280c,
- NES_IDX_ETH_SERDES_COMMON_STATUS1 = 0x2a0c,
- NES_IDX_ETH_SERDES_TX_EMP0 = 0x2810,
- NES_IDX_ETH_SERDES_TX_EMP1 = 0x2a10,
- NES_IDX_ETH_SERDES_TX_DRIVE0 = 0x2814,
- NES_IDX_ETH_SERDES_TX_DRIVE1 = 0x2a14,
- NES_IDX_ETH_SERDES_RX_MODE0 = 0x2818,
- NES_IDX_ETH_SERDES_RX_MODE1 = 0x2a18,
- NES_IDX_ETH_SERDES_RX_SIGDET0 = 0x281c,
- NES_IDX_ETH_SERDES_RX_SIGDET1 = 0x2a1c,
- NES_IDX_ETH_SERDES_BYPASS0 = 0x2820,
- NES_IDX_ETH_SERDES_BYPASS1 = 0x2a20,
- NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0 = 0x2824,
- NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1 = 0x2a24,
- NES_IDX_ETH_SERDES_RX_EQ_CONTROL0 = 0x2828,
- NES_IDX_ETH_SERDES_RX_EQ_CONTROL1 = 0x2a28,
- NES_IDX_ETH_SERDES_RX_EQ_STATUS0 = 0x282c,
- NES_IDX_ETH_SERDES_RX_EQ_STATUS1 = 0x2a2c,
- NES_IDX_ETH_SERDES_CDR_RESET0 = 0x2830,
- NES_IDX_ETH_SERDES_CDR_RESET1 = 0x2a30,
- NES_IDX_ETH_SERDES_CDR_CONTROL0 = 0x2834,
- NES_IDX_ETH_SERDES_CDR_CONTROL1 = 0x2a34,
- NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0 = 0x2838,
- NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1 = 0x2a38,
- NES_IDX_ENDNODE0_NSTAT_RX_DISCARD = 0x3080,
- NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO = 0x3000,
- NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI = 0x3004,
- NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO = 0x3008,
- NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI = 0x300c,
- NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO = 0x7000,
- NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
- NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
- NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
- NES_IDX_WQM_CONFIG0 = 0x5000,
- NES_IDX_WQM_CONFIG1 = 0x5004,
- NES_IDX_CM_CONFIG = 0x5100,
- NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
- NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
- NES_IDX_NIC_ACTIVE = 0x6010,
- NES_IDX_NIC_UNICAST_ALL = 0x6018,
- NES_IDX_NIC_MULTICAST_ALL = 0x6020,
- NES_IDX_NIC_MULTICAST_ENABLE = 0x6028,
- NES_IDX_NIC_BROADCAST_ON = 0x6030,
- NES_IDX_USED_CHUNKS_TX = 0x60b0,
- NES_IDX_TX_POOL_SIZE = 0x60b8,
- NES_IDX_QUAD_HASH_TABLE_SIZE = 0x6148,
- NES_IDX_PERFECT_FILTER_LOW = 0x6200,
- NES_IDX_PERFECT_FILTER_HIGH = 0x6204,
- NES_IDX_IPV4_TCP_REXMITS = 0x7080,
- NES_IDX_DEBUG_ERROR_CONTROL_STATUS = 0x913c,
- NES_IDX_DEBUG_ERROR_MASKS0 = 0x9140,
- NES_IDX_DEBUG_ERROR_MASKS1 = 0x9144,
- NES_IDX_DEBUG_ERROR_MASKS2 = 0x9148,
- NES_IDX_DEBUG_ERROR_MASKS3 = 0x914c,
- NES_IDX_DEBUG_ERROR_MASKS4 = 0x9150,
- NES_IDX_DEBUG_ERROR_MASKS5 = 0x9154,
-};
-
-#define NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE 1
-#define NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE (1 << 17)
-
-enum nes_cqp_opcodes {
- NES_CQP_CREATE_QP = 0x00,
- NES_CQP_MODIFY_QP = 0x01,
- NES_CQP_DESTROY_QP = 0x02,
- NES_CQP_CREATE_CQ = 0x03,
- NES_CQP_MODIFY_CQ = 0x04,
- NES_CQP_DESTROY_CQ = 0x05,
- NES_CQP_ALLOCATE_STAG = 0x09,
- NES_CQP_REGISTER_STAG = 0x0a,
- NES_CQP_QUERY_STAG = 0x0b,
- NES_CQP_REGISTER_SHARED_STAG = 0x0c,
- NES_CQP_DEALLOCATE_STAG = 0x0d,
- NES_CQP_MANAGE_ARP_CACHE = 0x0f,
- NES_CQP_DOWNLOAD_SEGMENT = 0x10,
- NES_CQP_SUSPEND_QPS = 0x11,
- NES_CQP_UPLOAD_CONTEXT = 0x13,
- NES_CQP_CREATE_CEQ = 0x16,
- NES_CQP_DESTROY_CEQ = 0x18,
- NES_CQP_CREATE_AEQ = 0x19,
- NES_CQP_DESTROY_AEQ = 0x1b,
- NES_CQP_LMI_ACCESS = 0x20,
- NES_CQP_FLUSH_WQES = 0x22,
- NES_CQP_MANAGE_APBVT = 0x23,
- NES_CQP_MANAGE_QUAD_HASH = 0x25
-};
-
-enum nes_cqp_wqe_word_idx {
- NES_CQP_WQE_OPCODE_IDX = 0,
- NES_CQP_WQE_ID_IDX = 1,
- NES_CQP_WQE_COMP_CTX_LOW_IDX = 2,
- NES_CQP_WQE_COMP_CTX_HIGH_IDX = 3,
- NES_CQP_WQE_COMP_SCRATCH_LOW_IDX = 4,
- NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5,
-};
-
-enum nes_cqp_wqe_word_download_idx { /* format differs from other cqp ops */
- NES_CQP_WQE_DL_OPCODE_IDX = 0,
- NES_CQP_WQE_DL_COMP_CTX_LOW_IDX = 1,
- NES_CQP_WQE_DL_COMP_CTX_HIGH_IDX = 2,
- NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX = 3
- /* For index values 4-15 use NES_NIC_SQ_WQE_ values */
-};
-
-enum nes_cqp_cq_wqeword_idx {
- NES_CQP_CQ_WQE_PBL_LOW_IDX = 6,
- NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7,
- NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX = 8,
- NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX = 9,
- NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX = 10,
-};
-
-enum nes_cqp_stag_wqeword_idx {
- NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX = 1,
- NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX = 6,
- NES_CQP_STAG_WQE_LEN_LOW_IDX = 7,
- NES_CQP_STAG_WQE_STAG_IDX = 8,
- NES_CQP_STAG_WQE_VA_LOW_IDX = 10,
- NES_CQP_STAG_WQE_VA_HIGH_IDX = 11,
- NES_CQP_STAG_WQE_PA_LOW_IDX = 12,
- NES_CQP_STAG_WQE_PA_HIGH_IDX = 13,
- NES_CQP_STAG_WQE_PBL_LEN_IDX = 14
-};
-
-#define NES_CQP_OP_LOGICAL_PORT_SHIFT 26
-#define NES_CQP_OP_IWARP_STATE_SHIFT 28
-#define NES_CQP_OP_TERMLEN_SHIFT 28
-
-enum nes_cqp_qp_bits {
- NES_CQP_QP_ARP_VALID = (1<<8),
- NES_CQP_QP_WINBUF_VALID = (1<<9),
- NES_CQP_QP_CONTEXT_VALID = (1<<10),
- NES_CQP_QP_ORD_VALID = (1<<11),
- NES_CQP_QP_WINBUF_DATAIND_EN = (1<<12),
- NES_CQP_QP_VIRT_WQS = (1<<13),
- NES_CQP_QP_DEL_HTE = (1<<14),
- NES_CQP_QP_CQS_VALID = (1<<15),
- NES_CQP_QP_TYPE_TSA = 0,
- NES_CQP_QP_TYPE_IWARP = (1<<16),
- NES_CQP_QP_TYPE_CQP = (4<<16),
- NES_CQP_QP_TYPE_NIC = (5<<16),
- NES_CQP_QP_MSS_CHG = (1<<20),
- NES_CQP_QP_STATIC_RESOURCES = (1<<21),
- NES_CQP_QP_IGNORE_MW_BOUND = (1<<22),
- NES_CQP_QP_VWQ_USE_LMI = (1<<23),
- NES_CQP_QP_IWARP_STATE_IDLE = (1<<NES_CQP_OP_IWARP_STATE_SHIFT),
- NES_CQP_QP_IWARP_STATE_RTS = (2<<NES_CQP_OP_IWARP_STATE_SHIFT),
- NES_CQP_QP_IWARP_STATE_CLOSING = (3<<NES_CQP_OP_IWARP_STATE_SHIFT),
- NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
- NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
- NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
- NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
- NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
- NES_CQP_QP_RESET = (1<<31),
-};
-
-enum nes_cqp_qp_wqe_word_idx {
- NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
- NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
- NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
- NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
- NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
-};
-
-enum nes_nic_ctx_bits {
- NES_NIC_CTX_RQ_SIZE_32 = (3<<8),
- NES_NIC_CTX_RQ_SIZE_512 = (3<<8),
- NES_NIC_CTX_SQ_SIZE_32 = (1<<10),
- NES_NIC_CTX_SQ_SIZE_512 = (3<<10),
-};
-
-enum nes_nic_qp_ctx_word_idx {
- NES_NIC_CTX_MISC_IDX = 0,
- NES_NIC_CTX_SQ_LOW_IDX = 2,
- NES_NIC_CTX_SQ_HIGH_IDX = 3,
- NES_NIC_CTX_RQ_LOW_IDX = 4,
- NES_NIC_CTX_RQ_HIGH_IDX = 5,
-};
-
-enum nes_cqp_cq_bits {
- NES_CQP_CQ_CEQE_MASK = (1<<9),
- NES_CQP_CQ_CEQ_VALID = (1<<10),
- NES_CQP_CQ_RESIZE = (1<<11),
- NES_CQP_CQ_CHK_OVERFLOW = (1<<12),
- NES_CQP_CQ_4KB_CHUNK = (1<<14),
- NES_CQP_CQ_VIRT = (1<<15),
-};
-
-enum nes_cqp_stag_bits {
- NES_CQP_STAG_VA_TO = (1<<9),
- NES_CQP_STAG_DEALLOC_PBLS = (1<<10),
- NES_CQP_STAG_PBL_BLK_SIZE = (1<<11),
- NES_CQP_STAG_MR = (1<<13),
- NES_CQP_STAG_RIGHTS_LOCAL_READ = (1<<16),
- NES_CQP_STAG_RIGHTS_LOCAL_WRITE = (1<<17),
- NES_CQP_STAG_RIGHTS_REMOTE_READ = (1<<18),
- NES_CQP_STAG_RIGHTS_REMOTE_WRITE = (1<<19),
- NES_CQP_STAG_RIGHTS_WINDOW_BIND = (1<<20),
- NES_CQP_STAG_REM_ACC_EN = (1<<21),
- NES_CQP_STAG_LEAVE_PENDING = (1<<31),
-};
-
-enum nes_cqp_ceq_wqeword_idx {
- NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX = 1,
- NES_CQP_CEQ_WQE_PBL_LOW_IDX = 6,
- NES_CQP_CEQ_WQE_PBL_HIGH_IDX = 7,
-};
-
-enum nes_cqp_ceq_bits {
- NES_CQP_CEQ_4KB_CHUNK = (1<<14),
- NES_CQP_CEQ_VIRT = (1<<15),
-};
-
-enum nes_cqp_aeq_wqeword_idx {
- NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX = 1,
- NES_CQP_AEQ_WQE_PBL_LOW_IDX = 6,
- NES_CQP_AEQ_WQE_PBL_HIGH_IDX = 7,
-};
-
-enum nes_cqp_aeq_bits {
- NES_CQP_AEQ_4KB_CHUNK = (1<<14),
- NES_CQP_AEQ_VIRT = (1<<15),
-};
-
-enum nes_cqp_lmi_wqeword_idx {
- NES_CQP_LMI_WQE_LMI_OFFSET_IDX = 1,
- NES_CQP_LMI_WQE_FRAG_LOW_IDX = 8,
- NES_CQP_LMI_WQE_FRAG_HIGH_IDX = 9,
- NES_CQP_LMI_WQE_FRAG_LEN_IDX = 10,
-};
-
-enum nes_cqp_arp_wqeword_idx {
- NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX = 6,
- NES_CQP_ARP_WQE_MAC_HIGH_IDX = 7,
- NES_CQP_ARP_WQE_REACHABILITY_MAX_IDX = 1,
-};
-
-enum nes_cqp_upload_wqeword_idx {
- NES_CQP_UPLOAD_WQE_CTXT_LOW_IDX = 6,
- NES_CQP_UPLOAD_WQE_CTXT_HIGH_IDX = 7,
- NES_CQP_UPLOAD_WQE_HTE_IDX = 8,
-};
-
-enum nes_cqp_arp_bits {
- NES_CQP_ARP_VALID = (1<<8),
- NES_CQP_ARP_PERM = (1<<9),
-};
-
-enum nes_cqp_flush_bits {
- NES_CQP_FLUSH_SQ = (1<<30),
- NES_CQP_FLUSH_RQ = (1<<31),
- NES_CQP_FLUSH_MAJ_MIN = (1<<28),
-};
-
-enum nes_cqe_opcode_bits {
- NES_CQE_STAG_VALID = (1<<6),
- NES_CQE_ERROR = (1<<7),
- NES_CQE_SQ = (1<<8),
- NES_CQE_SE = (1<<9),
- NES_CQE_PSH = (1<<29),
- NES_CQE_FIN = (1<<30),
- NES_CQE_VALID = (1<<31),
-};
-
-
-enum nes_cqe_word_idx {
- NES_CQE_PAYLOAD_LENGTH_IDX = 0,
- NES_CQE_COMP_COMP_CTX_LOW_IDX = 2,
- NES_CQE_COMP_COMP_CTX_HIGH_IDX = 3,
- NES_CQE_INV_STAG_IDX = 4,
- NES_CQE_QP_ID_IDX = 5,
- NES_CQE_ERROR_CODE_IDX = 6,
- NES_CQE_OPCODE_IDX = 7,
-};
-
-enum nes_ceqe_word_idx {
- NES_CEQE_CQ_CTX_LOW_IDX = 0,
- NES_CEQE_CQ_CTX_HIGH_IDX = 1,
-};
-
-enum nes_ceqe_status_bit {
- NES_CEQE_VALID = (1<<31),
-};
-
-enum nes_int_bits {
- NES_INT_CEQ0 = (1<<0),
- NES_INT_CEQ1 = (1<<1),
- NES_INT_CEQ2 = (1<<2),
- NES_INT_CEQ3 = (1<<3),
- NES_INT_CEQ4 = (1<<4),
- NES_INT_CEQ5 = (1<<5),
- NES_INT_CEQ6 = (1<<6),
- NES_INT_CEQ7 = (1<<7),
- NES_INT_CEQ8 = (1<<8),
- NES_INT_CEQ9 = (1<<9),
- NES_INT_CEQ10 = (1<<10),
- NES_INT_CEQ11 = (1<<11),
- NES_INT_CEQ12 = (1<<12),
- NES_INT_CEQ13 = (1<<13),
- NES_INT_CEQ14 = (1<<14),
- NES_INT_CEQ15 = (1<<15),
- NES_INT_AEQ0 = (1<<16),
- NES_INT_AEQ1 = (1<<17),
- NES_INT_AEQ2 = (1<<18),
- NES_INT_AEQ3 = (1<<19),
- NES_INT_AEQ4 = (1<<20),
- NES_INT_AEQ5 = (1<<21),
- NES_INT_AEQ6 = (1<<22),
- NES_INT_AEQ7 = (1<<23),
- NES_INT_MAC0 = (1<<24),
- NES_INT_MAC1 = (1<<25),
- NES_INT_MAC2 = (1<<26),
- NES_INT_MAC3 = (1<<27),
- NES_INT_TSW = (1<<28),
- NES_INT_TIMER = (1<<29),
- NES_INT_INTF = (1<<30),
-};
-
-enum nes_intf_int_bits {
- NES_INTF_INT_PCIERR = (1<<0),
- NES_INTF_PERIODIC_TIMER = (1<<2),
- NES_INTF_ONE_SHOT_TIMER = (1<<3),
- NES_INTF_INT_CRITERR = (1<<14),
- NES_INTF_INT_AEQ0_OFLOW = (1<<16),
- NES_INTF_INT_AEQ1_OFLOW = (1<<17),
- NES_INTF_INT_AEQ2_OFLOW = (1<<18),
- NES_INTF_INT_AEQ3_OFLOW = (1<<19),
- NES_INTF_INT_AEQ4_OFLOW = (1<<20),
- NES_INTF_INT_AEQ5_OFLOW = (1<<21),
- NES_INTF_INT_AEQ6_OFLOW = (1<<22),
- NES_INTF_INT_AEQ7_OFLOW = (1<<23),
- NES_INTF_INT_AEQ_OFLOW = (0xff<<16),
-};
-
-enum nes_mac_int_bits {
- NES_MAC_INT_LINK_STAT_CHG = (1<<1),
- NES_MAC_INT_XGMII_EXT = (1<<2),
- NES_MAC_INT_TX_UNDERFLOW = (1<<6),
- NES_MAC_INT_TX_ERROR = (1<<7),
-};
-
-enum nes_cqe_allocate_bits {
- NES_CQE_ALLOC_INC_SELECT = (1<<28),
- NES_CQE_ALLOC_NOTIFY_NEXT = (1<<29),
- NES_CQE_ALLOC_NOTIFY_SE = (1<<30),
- NES_CQE_ALLOC_RESET = (1<<31),
-};
-
-enum nes_nic_rq_wqe_word_idx {
- NES_NIC_RQ_WQE_LENGTH_1_0_IDX = 0,
- NES_NIC_RQ_WQE_LENGTH_3_2_IDX = 1,
- NES_NIC_RQ_WQE_FRAG0_LOW_IDX = 2,
- NES_NIC_RQ_WQE_FRAG0_HIGH_IDX = 3,
- NES_NIC_RQ_WQE_FRAG1_LOW_IDX = 4,
- NES_NIC_RQ_WQE_FRAG1_HIGH_IDX = 5,
- NES_NIC_RQ_WQE_FRAG2_LOW_IDX = 6,
- NES_NIC_RQ_WQE_FRAG2_HIGH_IDX = 7,
- NES_NIC_RQ_WQE_FRAG3_LOW_IDX = 8,
- NES_NIC_RQ_WQE_FRAG3_HIGH_IDX = 9,
-};
-
-enum nes_nic_sq_wqe_word_idx {
- NES_NIC_SQ_WQE_MISC_IDX = 0,
- NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX = 1,
- NES_NIC_SQ_WQE_LSO_INFO_IDX = 2,
- NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX = 3,
- NES_NIC_SQ_WQE_LENGTH_2_1_IDX = 4,
- NES_NIC_SQ_WQE_LENGTH_4_3_IDX = 5,
- NES_NIC_SQ_WQE_FRAG0_LOW_IDX = 6,
- NES_NIC_SQ_WQE_FRAG0_HIGH_IDX = 7,
- NES_NIC_SQ_WQE_FRAG1_LOW_IDX = 8,
- NES_NIC_SQ_WQE_FRAG1_HIGH_IDX = 9,
- NES_NIC_SQ_WQE_FRAG2_LOW_IDX = 10,
- NES_NIC_SQ_WQE_FRAG2_HIGH_IDX = 11,
- NES_NIC_SQ_WQE_FRAG3_LOW_IDX = 12,
- NES_NIC_SQ_WQE_FRAG3_HIGH_IDX = 13,
- NES_NIC_SQ_WQE_FRAG4_LOW_IDX = 14,
- NES_NIC_SQ_WQE_FRAG4_HIGH_IDX = 15,
-};
-
-enum nes_iwarp_sq_wqe_word_idx {
- NES_IWARP_SQ_WQE_MISC_IDX = 0,
- NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX = 1,
- NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX = 2,
- NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX = 3,
- NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
- NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
- NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX = 7,
- NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX = 8,
- NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX = 9,
- NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX = 10,
- NES_IWARP_SQ_WQE_RDMA_STAG_IDX = 11,
- NES_IWARP_SQ_WQE_IMM_DATA_START_IDX = 12,
- NES_IWARP_SQ_WQE_FRAG0_LOW_IDX = 16,
- NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX = 17,
- NES_IWARP_SQ_WQE_LENGTH0_IDX = 18,
- NES_IWARP_SQ_WQE_STAG0_IDX = 19,
- NES_IWARP_SQ_WQE_FRAG1_LOW_IDX = 20,
- NES_IWARP_SQ_WQE_FRAG1_HIGH_IDX = 21,
- NES_IWARP_SQ_WQE_LENGTH1_IDX = 22,
- NES_IWARP_SQ_WQE_STAG1_IDX = 23,
- NES_IWARP_SQ_WQE_FRAG2_LOW_IDX = 24,
- NES_IWARP_SQ_WQE_FRAG2_HIGH_IDX = 25,
- NES_IWARP_SQ_WQE_LENGTH2_IDX = 26,
- NES_IWARP_SQ_WQE_STAG2_IDX = 27,
- NES_IWARP_SQ_WQE_FRAG3_LOW_IDX = 28,
- NES_IWARP_SQ_WQE_FRAG3_HIGH_IDX = 29,
- NES_IWARP_SQ_WQE_LENGTH3_IDX = 30,
- NES_IWARP_SQ_WQE_STAG3_IDX = 31,
-};
-
-enum nes_iwarp_sq_bind_wqe_word_idx {
- NES_IWARP_SQ_BIND_WQE_MR_IDX = 6,
- NES_IWARP_SQ_BIND_WQE_MW_IDX = 7,
- NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX = 8,
- NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX = 9,
- NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX = 10,
- NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX = 11,
-};
-
-enum nes_iwarp_sq_fmr_wqe_word_idx {
- NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX = 7,
- NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX = 8,
- NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX = 9,
- NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX = 10,
- NES_IWARP_SQ_FMR_WQE_VA_FBO_HIGH_IDX = 11,
- NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX = 12,
- NES_IWARP_SQ_FMR_WQE_PBL_ADDR_HIGH_IDX = 13,
- NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
-};
-
-enum nes_iwarp_sq_fmr_opcodes {
- NES_IWARP_SQ_FMR_WQE_ZERO_BASED = (1<<6),
- NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K = (0<<7),
- NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M = (1<<7),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ = (1<<16),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE = (1<<17),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ = (1<<18),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE = (1<<19),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND = (1<<20),
-};
-
-#define NES_IWARP_SQ_FMR_WQE_MR_LENGTH_HIGH_MASK 0xFF;
-
-enum nes_iwarp_sq_locinv_wqe_word_idx {
- NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
-};
-
-enum nes_iwarp_rq_wqe_word_idx {
- NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
- NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
- NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX = 3,
- NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
- NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
- NES_IWARP_RQ_WQE_FRAG0_LOW_IDX = 8,
- NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX = 9,
- NES_IWARP_RQ_WQE_LENGTH0_IDX = 10,
- NES_IWARP_RQ_WQE_STAG0_IDX = 11,
- NES_IWARP_RQ_WQE_FRAG1_LOW_IDX = 12,
- NES_IWARP_RQ_WQE_FRAG1_HIGH_IDX = 13,
- NES_IWARP_RQ_WQE_LENGTH1_IDX = 14,
- NES_IWARP_RQ_WQE_STAG1_IDX = 15,
- NES_IWARP_RQ_WQE_FRAG2_LOW_IDX = 16,
- NES_IWARP_RQ_WQE_FRAG2_HIGH_IDX = 17,
- NES_IWARP_RQ_WQE_LENGTH2_IDX = 18,
- NES_IWARP_RQ_WQE_STAG2_IDX = 19,
- NES_IWARP_RQ_WQE_FRAG3_LOW_IDX = 20,
- NES_IWARP_RQ_WQE_FRAG3_HIGH_IDX = 21,
- NES_IWARP_RQ_WQE_LENGTH3_IDX = 22,
- NES_IWARP_RQ_WQE_STAG3_IDX = 23,
-};
-
-enum nes_nic_sq_wqe_bits {
- NES_NIC_SQ_WQE_PHDR_CS_READY = (1<<21),
- NES_NIC_SQ_WQE_LSO_ENABLE = (1<<22),
- NES_NIC_SQ_WQE_TAGVALUE_ENABLE = (1<<23),
- NES_NIC_SQ_WQE_DISABLE_CHKSUM = (1<<30),
- NES_NIC_SQ_WQE_COMPLETION = (1<<31),
-};
-
-enum nes_nic_cqe_word_idx {
- NES_NIC_CQE_ACCQP_ID_IDX = 0,
- NES_NIC_CQE_HASH_RCVNXT = 1,
- NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2,
- NES_NIC_CQE_MISC_IDX = 3,
-};
-
-#define NES_PKT_TYPE_APBVT_BITS 0xC112
-#define NES_PKT_TYPE_APBVT_MASK 0xff3e
-
-#define NES_PKT_TYPE_PVALID_BITS 0x10000000
-#define NES_PKT_TYPE_PVALID_MASK 0x30000000
-
-#define NES_PKT_TYPE_TCPV4_BITS 0x0110
-#define NES_PKT_TYPE_TCPV4_MASK 0x3f30
-
-#define NES_PKT_TYPE_UDPV4_BITS 0x0210
-#define NES_PKT_TYPE_UDPV4_MASK 0x3f30
-
-#define NES_PKT_TYPE_IPV4_BITS 0x0010
-#define NES_PKT_TYPE_IPV4_MASK 0x3f30
-
-#define NES_PKT_TYPE_OTHER_BITS 0x0000
-#define NES_PKT_TYPE_OTHER_MASK 0x0030
-
-#define NES_NIC_CQE_ERRV_SHIFT 16
-enum nes_nic_ev_bits {
- NES_NIC_ERRV_BITS_MODE = (1<<0),
- NES_NIC_ERRV_BITS_IPV4_CSUM_ERR = (1<<1),
- NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR = (1<<2),
- NES_NIC_ERRV_BITS_WQE_OVERRUN = (1<<3),
- NES_NIC_ERRV_BITS_IPH_ERR = (1<<4),
-};
-
-enum nes_nic_cqe_bits {
- NES_NIC_CQE_ERRV_MASK = (0xff<<NES_NIC_CQE_ERRV_SHIFT),
- NES_NIC_CQE_SQ = (1<<24),
- NES_NIC_CQE_ACCQP_PORT = (1<<28),
- NES_NIC_CQE_ACCQP_VALID = (1<<29),
- NES_NIC_CQE_TAG_VALID = (1<<30),
- NES_NIC_CQE_VALID = (1<<31),
-};
-
-enum nes_aeqe_word_idx {
- NES_AEQE_COMP_CTXT_LOW_IDX = 0,
- NES_AEQE_COMP_CTXT_HIGH_IDX = 1,
- NES_AEQE_COMP_QP_CQ_ID_IDX = 2,
- NES_AEQE_MISC_IDX = 3,
-};
-
-enum nes_aeqe_bits {
- NES_AEQE_QP = (1<<16),
- NES_AEQE_CQ = (1<<17),
- NES_AEQE_SQ = (1<<18),
- NES_AEQE_INBOUND_RDMA = (1<<19),
- NES_AEQE_IWARP_STATE_MASK = (7<<20),
- NES_AEQE_TCP_STATE_MASK = (0xf<<24),
- NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
- NES_AEQE_VALID = (1<<31),
-};
-
-#define NES_AEQE_IWARP_STATE_SHIFT 20
-#define NES_AEQE_TCP_STATE_SHIFT 24
-#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
-#define NES_AEQE_Q2_DATA_MPA (1<<29)
-
-enum nes_aeqe_iwarp_state {
- NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
- NES_AEQE_IWARP_STATE_IDLE = 1,
- NES_AEQE_IWARP_STATE_RTS = 2,
- NES_AEQE_IWARP_STATE_CLOSING = 3,
- NES_AEQE_IWARP_STATE_TERMINATE = 5,
- NES_AEQE_IWARP_STATE_ERROR = 6
-};
-
-enum nes_aeqe_tcp_state {
- NES_AEQE_TCP_STATE_NON_EXISTANT = 0,
- NES_AEQE_TCP_STATE_CLOSED = 1,
- NES_AEQE_TCP_STATE_LISTEN = 2,
- NES_AEQE_TCP_STATE_SYN_SENT = 3,
- NES_AEQE_TCP_STATE_SYN_RCVD = 4,
- NES_AEQE_TCP_STATE_ESTABLISHED = 5,
- NES_AEQE_TCP_STATE_CLOSE_WAIT = 6,
- NES_AEQE_TCP_STATE_FIN_WAIT_1 = 7,
- NES_AEQE_TCP_STATE_CLOSING = 8,
- NES_AEQE_TCP_STATE_LAST_ACK = 9,
- NES_AEQE_TCP_STATE_FIN_WAIT_2 = 10,
- NES_AEQE_TCP_STATE_TIME_WAIT = 11
-};
-
-enum nes_aeqe_aeid {
- NES_AEQE_AEID_AMP_UNALLOCATED_STAG = 0x0102,
- NES_AEQE_AEID_AMP_INVALID_STAG = 0x0103,
- NES_AEQE_AEID_AMP_BAD_QP = 0x0104,
- NES_AEQE_AEID_AMP_BAD_PD = 0x0105,
- NES_AEQE_AEID_AMP_BAD_STAG_KEY = 0x0106,
- NES_AEQE_AEID_AMP_BAD_STAG_INDEX = 0x0107,
- NES_AEQE_AEID_AMP_BOUNDS_VIOLATION = 0x0108,
- NES_AEQE_AEID_AMP_RIGHTS_VIOLATION = 0x0109,
- NES_AEQE_AEID_AMP_TO_WRAP = 0x010a,
- NES_AEQE_AEID_AMP_FASTREG_SHARED = 0x010b,
- NES_AEQE_AEID_AMP_FASTREG_VALID_STAG = 0x010c,
- NES_AEQE_AEID_AMP_FASTREG_MW_STAG = 0x010d,
- NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS = 0x010e,
- NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW = 0x010f,
- NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH = 0x0110,
- NES_AEQE_AEID_AMP_INVALIDATE_SHARED = 0x0111,
- NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS = 0x0112,
- NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS = 0x0113,
- NES_AEQE_AEID_AMP_MWBIND_VALID_STAG = 0x0114,
- NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG = 0x0115,
- NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG = 0x0116,
- NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG = 0x0117,
- NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS = 0x0118,
- NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS = 0x0119,
- NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT = 0x011a,
- NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED = 0x011b,
- NES_AEQE_AEID_BAD_CLOSE = 0x0201,
- NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE = 0x0202,
- NES_AEQE_AEID_CQ_OPERATION_ERROR = 0x0203,
- NES_AEQE_AEID_PRIV_OPERATION_DENIED = 0x0204,
- NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO = 0x0205,
- NES_AEQE_AEID_STAG_ZERO_INVALID = 0x0206,
- NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN = 0x0301,
- NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID = 0x0302,
- NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER = 0x0303,
- NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION = 0x0304,
- NES_AEQE_AEID_DDP_UBE_INVALID_MO = 0x0305,
- NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE = 0x0306,
- NES_AEQE_AEID_DDP_UBE_INVALID_QN = 0x0307,
- NES_AEQE_AEID_DDP_NO_L_BIT = 0x0308,
- NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION = 0x0311,
- NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE = 0x0312,
- NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST = 0x0313,
- NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP = 0x0314,
- NES_AEQE_AEID_INVALID_ARP_ENTRY = 0x0401,
- NES_AEQE_AEID_INVALID_TCP_OPTION_RCVD = 0x0402,
- NES_AEQE_AEID_STALE_ARP_ENTRY = 0x0403,
- NES_AEQE_AEID_LLP_CLOSE_COMPLETE = 0x0501,
- NES_AEQE_AEID_LLP_CONNECTION_RESET = 0x0502,
- NES_AEQE_AEID_LLP_FIN_RECEIVED = 0x0503,
- NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH = 0x0504,
- NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR = 0x0505,
- NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE = 0x0506,
- NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL = 0x0507,
- NES_AEQE_AEID_LLP_SYN_RECEIVED = 0x0508,
- NES_AEQE_AEID_LLP_TERMINATE_RECEIVED = 0x0509,
- NES_AEQE_AEID_LLP_TOO_MANY_RETRIES = 0x050a,
- NES_AEQE_AEID_LLP_TOO_MANY_KEEPALIVE_RETRIES = 0x050b,
- NES_AEQE_AEID_RESET_SENT = 0x0601,
- NES_AEQE_AEID_TERMINATE_SENT = 0x0602,
- NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC = 0x0700
-};
-
-enum nes_iwarp_sq_opcodes {
- NES_IWARP_SQ_WQE_WRPDU = (1<<15),
- NES_IWARP_SQ_WQE_PSH = (1<<21),
- NES_IWARP_SQ_WQE_STREAMING = (1<<23),
- NES_IWARP_SQ_WQE_IMM_DATA = (1<<28),
- NES_IWARP_SQ_WQE_READ_FENCE = (1<<29),
- NES_IWARP_SQ_WQE_LOCAL_FENCE = (1<<30),
- NES_IWARP_SQ_WQE_SIGNALED_COMPL = (1<<31),
-};
-
-enum nes_iwarp_sq_wqe_bits {
- NES_IWARP_SQ_OP_RDMAW = 0,
- NES_IWARP_SQ_OP_RDMAR = 1,
- NES_IWARP_SQ_OP_SEND = 3,
- NES_IWARP_SQ_OP_SENDINV = 4,
- NES_IWARP_SQ_OP_SENDSE = 5,
- NES_IWARP_SQ_OP_SENDSEINV = 6,
- NES_IWARP_SQ_OP_BIND = 8,
- NES_IWARP_SQ_OP_FAST_REG = 9,
- NES_IWARP_SQ_OP_LOCINV = 10,
- NES_IWARP_SQ_OP_RDMAR_LOCINV = 11,
- NES_IWARP_SQ_OP_NOP = 12,
-};
-
-enum nes_iwarp_cqe_major_code {
- NES_IWARP_CQE_MAJOR_FLUSH = 1,
- NES_IWARP_CQE_MAJOR_DRV = 0x8000
-};
-
-enum nes_iwarp_cqe_minor_code {
- NES_IWARP_CQE_MINOR_FLUSH = 1
-};
-
-#define NES_EEPROM_READ_REQUEST (1<<16)
-#define NES_MAC_ADDR_VALID (1<<20)
-
-/*
- * NES index registers init values.
- */
-struct nes_init_values {
- u32 index;
- u32 data;
- u8 wrt;
-};
-
-/*
- * NES registers in BAR0.
- */
-struct nes_pci_regs {
- u32 int_status;
- u32 int_mask;
- u32 int_pending;
- u32 intf_int_status;
- u32 intf_int_mask;
- u32 other_regs[59]; /* pad out to 256 bytes for now */
-};
-
-#define NES_CQP_SQ_SIZE 128
-#define NES_CCQ_SIZE 128
-#define NES_NIC_WQ_SIZE 512
-#define NES_NIC_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_512) | (NES_NIC_CTX_SQ_SIZE_512))
-#define NES_NIC_BACK_STORE 0x00038000
-
-struct nes_device;
-
-struct nes_hw_nic_qp_context {
- __le32 context_words[6];
-};
-
-struct nes_hw_nic_sq_wqe {
- __le32 wqe_words[16];
-};
-
-struct nes_hw_nic_rq_wqe {
- __le32 wqe_words[16];
-};
-
-struct nes_hw_nic_cqe {
- __le32 cqe_words[4];
-};
-
-struct nes_hw_cqp_qp_context {
- __le32 context_words[4];
-};
-
-struct nes_hw_cqp_wqe {
- __le32 wqe_words[16];
-};
-
-struct nes_hw_qp_wqe {
- __le32 wqe_words[32];
-};
-
-struct nes_hw_cqe {
- __le32 cqe_words[8];
-};
-
-struct nes_hw_ceqe {
- __le32 ceqe_words[2];
-};
-
-struct nes_hw_aeqe {
- __le32 aeqe_words[4];
-};
-
-struct nes_cqp_request {
- union {
- u64 cqp_callback_context;
- void *cqp_callback_pointer;
- };
- wait_queue_head_t waitq;
- struct nes_hw_cqp_wqe cqp_wqe;
- struct list_head list;
- atomic_t refcount;
- void (*cqp_callback)(struct nes_device *nesdev, struct nes_cqp_request *cqp_request);
- u16 major_code;
- u16 minor_code;
- u8 waiting;
- u8 request_done;
- u8 dynamic;
- u8 callback;
-};
-
-struct nes_hw_cqp {
- struct nes_hw_cqp_wqe *sq_vbase;
- dma_addr_t sq_pbase;
- spinlock_t lock;
- wait_queue_head_t waitq;
- u16 qp_id;
- u16 sq_head;
- u16 sq_tail;
- u16 sq_size;
-};
-
-#define NES_FIRST_FRAG_SIZE 128
-struct nes_first_frag {
- u8 buffer[NES_FIRST_FRAG_SIZE];
-};
-
-struct nes_hw_nic {
- struct nes_first_frag *first_frag_vbase; /* virtual address of first frags */
- struct nes_hw_nic_sq_wqe *sq_vbase; /* virtual address of sq */
- struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */
- struct sk_buff *tx_skb[NES_NIC_WQ_SIZE];
- struct sk_buff *rx_skb[NES_NIC_WQ_SIZE];
- dma_addr_t frag_paddr[NES_NIC_WQ_SIZE];
- unsigned long first_frag_overflow[BITS_TO_LONGS(NES_NIC_WQ_SIZE)];
- dma_addr_t sq_pbase; /* PCI memory for host rings */
- dma_addr_t rq_pbase; /* PCI memory for host rings */
-
- u16 qp_id;
- u16 sq_head;
- u16 sq_tail;
- u16 sq_size;
- u16 rq_head;
- u16 rq_tail;
- u16 rq_size;
- u8 replenishing_rq;
- u8 reserved;
-
- spinlock_t rq_lock;
-};
-
-struct nes_hw_nic_cq {
- struct nes_hw_nic_cqe volatile *cq_vbase; /* PCI memory for host rings */
- void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
- dma_addr_t cq_pbase; /* PCI memory for host rings */
- int rx_cqes_completed;
- int cqe_allocs_pending;
- int rx_pkts_indicated;
- u16 cq_head;
- u16 cq_size;
- u16 cq_number;
- u8 cqes_pending;
-};
-
-struct nes_hw_qp {
- struct nes_hw_qp_wqe *sq_vbase; /* PCI memory for host rings */
- struct nes_hw_qp_wqe *rq_vbase; /* PCI memory for host rings */
- void *q2_vbase; /* PCI memory for host rings */
- dma_addr_t sq_pbase; /* PCI memory for host rings */
- dma_addr_t rq_pbase; /* PCI memory for host rings */
- dma_addr_t q2_pbase; /* PCI memory for host rings */
- u32 qp_id;
- u16 sq_head;
- u16 sq_tail;
- u16 sq_size;
- u16 rq_head;
- u16 rq_tail;
- u16 rq_size;
- u8 rq_encoded_size;
- u8 sq_encoded_size;
-};
-
-struct nes_hw_cq {
- struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */
- void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
- dma_addr_t cq_pbase; /* PCI memory for host rings */
- u16 cq_head;
- u16 cq_size;
- u16 cq_number;
-};
-
-struct nes_hw_ceq {
- struct nes_hw_ceqe volatile *ceq_vbase; /* PCI memory for host rings */
- dma_addr_t ceq_pbase; /* PCI memory for host rings */
- u16 ceq_head;
- u16 ceq_size;
-};
-
-struct nes_hw_aeq {
- struct nes_hw_aeqe volatile *aeq_vbase; /* PCI memory for host rings */
- dma_addr_t aeq_pbase; /* PCI memory for host rings */
- u16 aeq_head;
- u16 aeq_size;
-};
-
-struct nic_qp_map {
- u8 qpid;
- u8 nic_index;
- u8 logical_port;
- u8 is_hnic;
-};
-
-#define NES_CQP_ARP_AEQ_INDEX_MASK 0x000f0000
-#define NES_CQP_ARP_AEQ_INDEX_SHIFT 16
-
-#define NES_CQP_APBVT_ADD 0x00008000
-#define NES_CQP_APBVT_NIC_SHIFT 16
-
-#define NES_ARP_ADD 1
-#define NES_ARP_DELETE 2
-#define NES_ARP_RESOLVE 3
-
-#define NES_MAC_SW_IDLE 0
-#define NES_MAC_SW_INTERRUPT 1
-#define NES_MAC_SW_MH 2
-
-struct nes_arp_entry {
- u32 ip_addr;
- u8 mac_addr[ETH_ALEN];
-};
-
-#define NES_NIC_FAST_TIMER 96
-#define NES_NIC_FAST_TIMER_LOW 40
-#define NES_NIC_FAST_TIMER_HIGH 1000
-#define DEFAULT_NES_QL_HIGH 256
-#define DEFAULT_NES_QL_LOW 16
-#define DEFAULT_NES_QL_TARGET 64
-#define DEFAULT_JUMBO_NES_QL_LOW 12
-#define DEFAULT_JUMBO_NES_QL_TARGET 40
-#define DEFAULT_JUMBO_NES_QL_HIGH 128
-#define NES_NIC_CQ_DOWNWARD_TREND 16
-#define NES_PFT_SIZE 48
-
-#define NES_MGT_WQ_COUNT 32
-#define NES_MGT_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_32) | (NES_NIC_CTX_SQ_SIZE_32))
-#define NES_MGT_QP_OFFSET 36
-#define NES_MGT_QP_COUNT 4
-
-struct nes_hw_tune_timer {
- /* u16 cq_count; */
- u16 threshold_low;
- u16 threshold_target;
- u16 threshold_high;
- u16 timer_in_use;
- u16 timer_in_use_old;
- u16 timer_in_use_min;
- u16 timer_in_use_max;
- u8 timer_direction_upward;
- u8 timer_direction_downward;
- u16 cq_count_old;
- u8 cq_direction_downward;
-};
-
-#define NES_TIMER_INT_LIMIT 2
-#define NES_TIMER_INT_LIMIT_DYNAMIC 10
-#define NES_TIMER_ENABLE_LIMIT 4
-#define NES_MAX_LINK_INTERRUPTS 128
-#define NES_MAX_LINK_CHECK 200
-#define NES_MAX_LRO_DESCRIPTORS 32
-#define NES_LRO_MAX_AGGR 64
-
-struct nes_adapter {
- u64 fw_ver;
- unsigned long *allocated_qps;
- unsigned long *allocated_cqs;
- unsigned long *allocated_mrs;
- unsigned long *allocated_pds;
- unsigned long *allocated_arps;
- struct nes_qp **qp_table;
- struct workqueue_struct *work_q;
-
- struct list_head list;
- struct list_head active_listeners;
- /* list of the netdev's associated with each logical port */
- struct list_head nesvnic_list[4];
-
- struct timer_list mh_timer;
- struct timer_list lc_timer;
- struct work_struct work;
- spinlock_t resource_lock;
- spinlock_t phy_lock;
- spinlock_t pbl_lock;
- spinlock_t periodic_timer_lock;
-
- struct nes_arp_entry arp_table[NES_MAX_ARP_TABLE_SIZE];
-
- /* Adapter CEQ and AEQs */
- struct nes_hw_ceq ceq[16];
- struct nes_hw_aeq aeq[8];
-
- struct nes_hw_tune_timer tune_timer;
-
- unsigned long doorbell_start;
-
- u32 hw_rev;
- u32 vendor_id;
- u32 vendor_part_id;
- u32 device_cap_flags;
- u32 tick_delta;
- u32 timer_int_req;
- u32 arp_table_size;
- u32 next_arp_index;
-
- u32 max_mr;
- u32 max_256pbl;
- u32 max_4kpbl;
- u32 free_256pbl;
- u32 free_4kpbl;
- u32 max_mr_size;
- u32 max_qp;
- u32 next_qp;
- u32 max_irrq;
- u32 max_qp_wr;
- u32 max_sge;
- u32 max_cq;
- u32 next_cq;
- u32 max_cqe;
- u32 max_pd;
- u32 base_pd;
- u32 next_pd;
- u32 hte_index_mask;
-
- /* EEPROM information */
- u32 rx_pool_size;
- u32 tx_pool_size;
- u32 rx_threshold;
- u32 tcp_timer_core_clk_divisor;
- u32 iwarp_config;
- u32 cm_config;
- u32 sws_timer_config;
- u32 tcp_config1;
- u32 wqm_wat;
- u32 core_clock;
- u32 firmware_version;
- u32 eeprom_version;
-
- u32 nic_rx_eth_route_err;
-
- u32 et_rx_coalesce_usecs;
- u32 et_rx_max_coalesced_frames;
- u32 et_rx_coalesce_usecs_irq;
- u32 et_rx_max_coalesced_frames_irq;
- u32 et_pkt_rate_low;
- u32 et_rx_coalesce_usecs_low;
- u32 et_rx_max_coalesced_frames_low;
- u32 et_pkt_rate_high;
- u32 et_rx_coalesce_usecs_high;
- u32 et_rx_max_coalesced_frames_high;
- u32 et_rate_sample_interval;
- u32 timer_int_limit;
- u32 wqm_quanta;
- u8 allow_unaligned_fpdus;
-
- /* Adapter base MAC address */
- u32 mac_addr_low;
- u16 mac_addr_high;
-
- u16 firmware_eeprom_offset;
- u16 software_eeprom_offset;
-
- u16 max_irrq_wr;
-
- /* pd config for each port */
- u16 pd_config_size[4];
- u16 pd_config_base[4];
-
- u16 link_interrupt_count[4];
- u8 crit_error_count[32];
-
- /* the phy index for each port */
- u8 phy_index[4];
- u8 mac_sw_state[4];
- u8 mac_link_down[4];
- u8 phy_type[4];
- u8 log_port;
-
- /* PCI information */
- unsigned int devfn;
- unsigned char bus_number;
- unsigned char OneG_Mode;
-
- unsigned char ref_count;
- u8 netdev_count;
- u8 netdev_max; /* from host nic address count in EEPROM */
- u8 port_count;
- u8 virtwq;
- u8 send_term_ok;
- u8 et_use_adaptive_rx_coalesce;
- u8 adapter_fcn_count;
- u8 pft_mcast_map[NES_PFT_SIZE];
-};
-
-struct nes_pbl {
- u64 *pbl_vbase;
- dma_addr_t pbl_pbase;
- struct page *page;
- unsigned long user_base;
- u32 pbl_size;
- struct list_head list;
- /* TODO: need to add list for two level tables */
-};
-
-#define NES_4K_PBL_CHUNK_SIZE 4096
-
-struct nes_fast_mr_wqe_pbl {
- u64 *kva;
- dma_addr_t paddr;
-};
-
-struct nes_ib_fast_reg_page_list {
- struct ib_fast_reg_page_list ibfrpl;
- struct nes_fast_mr_wqe_pbl nes_wqe_pbl;
- u64 pbl;
-};
-
-struct nes_listener {
- struct work_struct work;
- struct workqueue_struct *wq;
- struct nes_vnic *nesvnic;
- struct iw_cm_id *cm_id;
- struct list_head list;
- unsigned long socket;
- u8 accept_failed;
-};
-
-struct nes_ib_device;
-
-#define NES_EVENT_DELAY msecs_to_jiffies(100)
-
-struct nes_vnic {
- struct nes_ib_device *nesibdev;
- u64 sq_full;
- u64 tso_requests;
- u64 segmented_tso_requests;
- u64 linearized_skbs;
- u64 tx_sw_dropped;
- u64 endnode_nstat_rx_discard;
- u64 endnode_nstat_rx_octets;
- u64 endnode_nstat_rx_frames;
- u64 endnode_nstat_tx_octets;
- u64 endnode_nstat_tx_frames;
- u64 endnode_ipv4_tcp_retransmits;
- /* void *mem; */
- struct nes_device *nesdev;
- struct net_device *netdev;
- atomic_t rx_skbs_needed;
- atomic_t rx_skb_timer_running;
- int budget;
- u32 msg_enable;
- /* u32 tx_avail; */
- __be32 local_ipaddr;
- struct napi_struct napi;
- spinlock_t tx_lock; /* could use netdev tx lock? */
- struct timer_list rq_wqes_timer;
- u32 nic_mem_size;
- void *nic_vbase;
- dma_addr_t nic_pbase;
- struct nes_hw_nic nic;
- struct nes_hw_nic_cq nic_cq;
- u32 mcrq_qp_id;
- struct nes_ucontext *mcrq_ucontext;
- struct nes_cqp_request* (*get_cqp_request)(struct nes_device *nesdev);
- void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *);
- int (*mcrq_mcast_filter)( struct nes_vnic* nesvnic, __u8* dmi_addr );
- struct net_device_stats netstats;
- /* used to put the netdev on the adapters logical port list */
- struct list_head list;
- u16 max_frame_size;
- u8 netdev_open;
- u8 linkup;
- u8 logical_port;
- u8 netdev_index; /* might not be needed, indexes nesdev->netdev */
- u8 perfect_filter_index;
- u8 nic_index;
- u8 qp_nic_index[4];
- u8 next_qp_nic_index;
- u8 of_device_registered;
- u8 rdma_enabled;
- u32 lro_max_aggr;
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
- struct timer_list event_timer;
- enum ib_event_type delayed_event;
- enum ib_event_type last_dispatched_event;
- spinlock_t port_ibevent_lock;
- u32 mgt_mem_size;
- void *mgt_vbase;
- dma_addr_t mgt_pbase;
- struct nes_vnic_mgt *mgtvnic[NES_MGT_QP_COUNT];
- struct task_struct *mgt_thread;
- wait_queue_head_t mgt_wait_queue;
- struct sk_buff_head mgt_skb_list;
-
-};
-
-struct nes_ib_device {
- struct ib_device ibdev;
- struct nes_vnic *nesvnic;
-
- /* Virtual RNIC Limits */
- u32 max_mr;
- u32 max_qp;
- u32 max_cq;
- u32 max_pd;
- u32 num_mr;
- u32 num_qp;
- u32 num_cq;
- u32 num_pd;
-};
-
-enum nes_hdrct_flags {
- DDP_LEN_FLAG = 0x80,
- DDP_HDR_FLAG = 0x40,
- RDMA_HDR_FLAG = 0x20
-};
-
-enum nes_term_layers {
- LAYER_RDMA = 0,
- LAYER_DDP = 1,
- LAYER_MPA = 2
-};
-
-enum nes_term_error_types {
- RDMAP_CATASTROPHIC = 0,
- RDMAP_REMOTE_PROT = 1,
- RDMAP_REMOTE_OP = 2,
- DDP_CATASTROPHIC = 0,
- DDP_TAGGED_BUFFER = 1,
- DDP_UNTAGGED_BUFFER = 2,
- DDP_LLP = 3
-};
-
-enum nes_term_rdma_errors {
- RDMAP_INV_STAG = 0x00,
- RDMAP_INV_BOUNDS = 0x01,
- RDMAP_ACCESS = 0x02,
- RDMAP_UNASSOC_STAG = 0x03,
- RDMAP_TO_WRAP = 0x04,
- RDMAP_INV_RDMAP_VER = 0x05,
- RDMAP_UNEXPECTED_OP = 0x06,
- RDMAP_CATASTROPHIC_LOCAL = 0x07,
- RDMAP_CATASTROPHIC_GLOBAL = 0x08,
- RDMAP_CANT_INV_STAG = 0x09,
- RDMAP_UNSPECIFIED = 0xff
-};
-
-enum nes_term_ddp_errors {
- DDP_CATASTROPHIC_LOCAL = 0x00,
- DDP_TAGGED_INV_STAG = 0x00,
- DDP_TAGGED_BOUNDS = 0x01,
- DDP_TAGGED_UNASSOC_STAG = 0x02,
- DDP_TAGGED_TO_WRAP = 0x03,
- DDP_TAGGED_INV_DDP_VER = 0x04,
- DDP_UNTAGGED_INV_QN = 0x01,
- DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
- DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
- DDP_UNTAGGED_INV_MO = 0x04,
- DDP_UNTAGGED_INV_TOO_LONG = 0x05,
- DDP_UNTAGGED_INV_DDP_VER = 0x06
-};
-
-enum nes_term_mpa_errors {
- MPA_CLOSED = 0x01,
- MPA_CRC = 0x02,
- MPA_MARKER = 0x03,
- MPA_REQ_RSP = 0x04,
-};
-
-struct nes_terminate_hdr {
- u8 layer_etype;
- u8 error_code;
- u8 hdrct;
- u8 rsvd;
-};
-
-/* Used to determine how to fill in terminate error codes */
-#define IWARP_OPCODE_WRITE 0
-#define IWARP_OPCODE_READREQ 1
-#define IWARP_OPCODE_READRSP 2
-#define IWARP_OPCODE_SEND 3
-#define IWARP_OPCODE_SEND_INV 4
-#define IWARP_OPCODE_SEND_SE 5
-#define IWARP_OPCODE_SEND_SE_INV 6
-#define IWARP_OPCODE_TERM 7
-
-/* These values are used only during terminate processing */
-#define TERM_DDP_LEN_TAGGED 14
-#define TERM_DDP_LEN_UNTAGGED 18
-#define TERM_RDMA_LEN 28
-#define RDMA_OPCODE_MASK 0x0f
-#define RDMA_READ_REQ_OPCODE 1
-#define BAD_FRAME_OFFSET 64
-#define CQE_MAJOR_DRV 0x8000
-
-/* Used for link status recheck after interrupt processing */
-#define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50)
-#define NES_LINK_RECHECK_MAX 60
-
-#endif /* __NES_HW_H */
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
deleted file mode 100644
index 416645259b0f..000000000000
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ /dev/null
@@ -1,1160 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/kthread.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <net/tcp.h>
-#include "nes.h"
-#include "nes_mgt.h"
-
-atomic_t pau_qps_created;
-atomic_t pau_qps_destroyed;
-
-static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
-{
- unsigned long flags;
- dma_addr_t bus_address;
- struct sk_buff *skb;
- struct nes_hw_nic_rq_wqe *nic_rqe;
- struct nes_hw_mgt *nesmgt;
- struct nes_device *nesdev;
- struct nes_rskb_cb *cb;
- u32 rx_wqes_posted = 0;
-
- nesmgt = &mgtvnic->mgt;
- nesdev = mgtvnic->nesvnic->nesdev;
- spin_lock_irqsave(&nesmgt->rq_lock, flags);
- if (nesmgt->replenishing_rq != 0) {
- if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
- (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
- atomic_set(&mgtvnic->rx_skb_timer_running, 1);
- spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
- mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
- add_timer(&mgtvnic->rq_wqes_timer);
- } else {
- spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
- }
- return;
- }
- nesmgt->replenishing_rq = 1;
- spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
- do {
- skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size);
- if (skb) {
- skb->dev = mgtvnic->nesvnic->netdev;
-
- bus_address = pci_map_single(nesdev->pcidev,
- skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- cb->busaddr = bus_address;
- cb->maplen = mgtvnic->nesvnic->max_frame_size;
-
- nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head];
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
- cpu_to_le32(mgtvnic->nesvnic->max_frame_size);
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
- cpu_to_le32((u32)bus_address);
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
- cpu_to_le32((u32)((u64)bus_address >> 32));
- nesmgt->rx_skb[nesmgt->rq_head] = skb;
- nesmgt->rq_head++;
- nesmgt->rq_head &= nesmgt->rq_size - 1;
- atomic_dec(&mgtvnic->rx_skbs_needed);
- barrier();
- if (++rx_wqes_posted == 255) {
- nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
- rx_wqes_posted = 0;
- }
- } else {
- spin_lock_irqsave(&nesmgt->rq_lock, flags);
- if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
- (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
- atomic_set(&mgtvnic->rx_skb_timer_running, 1);
- spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
- mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
- add_timer(&mgtvnic->rq_wqes_timer);
- } else {
- spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
- }
- break;
- }
- } while (atomic_read(&mgtvnic->rx_skbs_needed));
- barrier();
- if (rx_wqes_posted)
- nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
- nesmgt->replenishing_rq = 0;
-}
-
-/**
- * nes_mgt_rq_wqes_timeout
- */
-static void nes_mgt_rq_wqes_timeout(unsigned long parm)
-{
- struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
-
- atomic_set(&mgtvnic->rx_skb_timer_running, 0);
- if (atomic_read(&mgtvnic->rx_skbs_needed))
- nes_replenish_mgt_rq(mgtvnic);
-}
-
-/**
- * nes_mgt_free_skb - unmap and free skb
- */
-static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir)
-{
- struct nes_rskb_cb *cb;
-
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir);
- cb->busaddr = 0;
- dev_kfree_skb_any(skb);
-}
-
-/**
- * nes_download_callback - handle download completions
- */
-static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
-{
- struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer;
- struct nes_qp *nesqp = fpdu_info->nesqp;
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < fpdu_info->frag_cnt; i++) {
- skb = fpdu_info->frags[i].skb;
- if (fpdu_info->frags[i].cmplt) {
- nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
- nes_rem_ref_cm_node(nesqp->cm_node);
- }
- }
-
- if (fpdu_info->hdr_vbase)
- pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len,
- fpdu_info->hdr_vbase, fpdu_info->hdr_pbase);
- kfree(fpdu_info);
-}
-
-/**
- * nes_get_seq - Get the seq, ack_seq and window from the packet
- */
-static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
-{
- struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0];
- struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
- struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
-
- *ack = be32_to_cpu(tcph->ack_seq);
- *wnd = be16_to_cpu(tcph->window);
- *fin_rcvd = tcph->fin;
- *rst_rcvd = tcph->rst;
- return be32_to_cpu(tcph->seq);
-}
-
-/**
- * nes_get_next_skb - Get the next skb based on where current skb is in the queue
- */
-static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp,
- struct sk_buff *skb, u32 nextseq, u32 *ack,
- u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
-{
- u32 seq;
- bool processacks;
- struct sk_buff *old_skb;
-
- if (skb) {
- /* Continue processing fpdu */
- if (skb->next == (struct sk_buff *)&nesqp->pau_list)
- goto out;
- skb = skb->next;
- processacks = false;
- } else {
- /* Starting a new one */
- if (skb_queue_empty(&nesqp->pau_list))
- goto out;
- skb = skb_peek(&nesqp->pau_list);
- processacks = true;
- }
-
- while (1) {
- if (skb_queue_empty(&nesqp->pau_list))
- goto out;
-
- seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
- if (seq == nextseq) {
- if (skb->len || processacks)
- break;
- } else if (after(seq, nextseq)) {
- goto out;
- }
-
- old_skb = skb;
- skb = skb->next;
- skb_unlink(old_skb, &nesqp->pau_list);
- nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
- nes_rem_ref_cm_node(nesqp->cm_node);
- if (skb == (struct sk_buff *)&nesqp->pau_list)
- goto out;
- }
- return skb;
-
-out:
- return NULL;
-}
-
-/**
- * get_fpdu_info - Find the next complete fpdu and return its fragments.
- */
-static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
- struct pau_fpdu_info **pau_fpdu_info)
-{
- struct sk_buff *skb;
- struct iphdr *iph;
- struct tcphdr *tcph;
- struct nes_rskb_cb *cb;
- struct pau_fpdu_info *fpdu_info = NULL;
- struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
- u32 fpdu_len = 0;
- u32 tmp_len;
- int frag_cnt = 0;
- u32 tot_len;
- u32 frag_tot;
- u32 ack;
- u32 fin_rcvd;
- u32 rst_rcvd;
- u16 wnd;
- int i;
- int rc = 0;
-
- *pau_fpdu_info = NULL;
-
- skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb)
- goto out;
-
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- if (skb->len) {
- fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
- fpdu_len = (fpdu_len + 3) & 0xfffffffc;
- tmp_len = fpdu_len;
-
- /* See if we have all of the fpdu */
- frag_tot = 0;
- memset(&frags, 0, sizeof frags);
- for (i = 0; i < MAX_FPDU_FRAGS; i++) {
- frags[i].physaddr = cb->busaddr;
- frags[i].physaddr += skb->data - cb->data_start;
- frags[i].frag_len = min(tmp_len, skb->len);
- frags[i].skb = skb;
- frags[i].cmplt = (skb->len == frags[i].frag_len);
- frag_tot += frags[i].frag_len;
- frag_cnt++;
-
- tmp_len -= frags[i].frag_len;
- if (tmp_len == 0)
- break;
-
- skb = nes_get_next_skb(nesdev, nesqp, skb,
- nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb)
- goto out;
- if (rst_rcvd) {
- /* rst received in the middle of fpdu */
- for (; i >= 0; i--) {
- skb_unlink(frags[i].skb, &nesqp->pau_list);
- nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE);
- }
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- frags[0].physaddr = cb->busaddr;
- frags[0].physaddr += skb->data - cb->data_start;
- frags[0].frag_len = skb->len;
- frags[0].skb = skb;
- frags[0].cmplt = true;
- frag_cnt = 1;
- break;
- }
-
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- }
- } else {
- /* no data */
- frags[0].physaddr = cb->busaddr;
- frags[0].frag_len = 0;
- frags[0].skb = skb;
- frags[0].cmplt = true;
- frag_cnt = 1;
- }
-
- /* Found one */
- fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
- if (fpdu_info == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
- rc = -ENOMEM;
- goto out;
- }
-
- fpdu_info->cqp_request = nes_get_cqp_request(nesdev);
- if (fpdu_info->cqp_request == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
- rc = -ENOMEM;
- goto out;
- }
-
- cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0];
- iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
- tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
- fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start;
- fpdu_info->data_len = fpdu_len;
- tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN;
-
- if (frags[0].cmplt) {
- fpdu_info->hdr_pbase = cb->busaddr;
- fpdu_info->hdr_vbase = NULL;
- } else {
- fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev,
- fpdu_info->hdr_len, &fpdu_info->hdr_pbase);
- if (!fpdu_info->hdr_vbase) {
- nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n");
- rc = -ENOMEM;
- goto out;
- }
-
- /* Copy hdrs, adjusting len and seqnum */
- memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len);
- iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN);
- tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
- }
-
- iph->tot_len = cpu_to_be16(tot_len);
- iph->saddr = cpu_to_be32(0x7f000001);
-
- tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
- tcph->ack_seq = cpu_to_be32(ack);
- tcph->window = cpu_to_be16(wnd);
-
- nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd;
-
- memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags));
- fpdu_info->frag_cnt = frag_cnt;
- fpdu_info->nesqp = nesqp;
- *pau_fpdu_info = fpdu_info;
-
- /* Update skb's for next pass */
- for (i = 0; i < frag_cnt; i++) {
- cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0];
- skb_pull(frags[i].skb, frags[i].frag_len);
-
- if (frags[i].skb->len == 0) {
- /* Pull skb off the list - it will be freed in the callback */
- if (!skb_queue_empty(&nesqp->pau_list))
- skb_unlink(frags[i].skb, &nesqp->pau_list);
- } else {
- /* Last skb still has data so update the seq */
- iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
- tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
- tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
- }
- }
-
-out:
- if (rc) {
- if (fpdu_info) {
- if (fpdu_info->cqp_request)
- nes_put_cqp_request(nesdev, fpdu_info->cqp_request);
- kfree(fpdu_info);
- }
- }
- return rc;
-}
-
-/**
- * forward_fpdu - send complete fpdus, one at a time
- */
-static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
-{
- struct nes_device *nesdev = nesvnic->nesdev;
- struct pau_fpdu_info *fpdu_info;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- unsigned long flags;
- u64 u64tmp;
- u32 u32tmp;
- int rc;
-
- while (1) {
- spin_lock_irqsave(&nesqp->pau_lock, flags);
- rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
- if (rc || (fpdu_info == NULL)) {
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
- return rc;
- }
-
- cqp_request = fpdu_info->cqp_request;
- cqp_wqe = &cqp_request->cqp_wqe;
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX,
- NES_CQP_DOWNLOAD_SEGMENT |
- (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT));
-
- u32tmp = fpdu_info->hdr_len << 16;
- u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len;
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX,
- u32tmp);
-
- u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len;
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX,
- u32tmp);
-
- u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len;
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX,
- u32tmp);
-
- u64tmp = (u64)fpdu_info->hdr_pbase;
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
- lower_32_bits(u64tmp));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
- upper_32_bits(u64tmp));
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
- lower_32_bits(fpdu_info->frags[0].physaddr));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX,
- upper_32_bits(fpdu_info->frags[0].physaddr));
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX,
- lower_32_bits(fpdu_info->frags[1].physaddr));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX,
- upper_32_bits(fpdu_info->frags[1].physaddr));
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX,
- lower_32_bits(fpdu_info->frags[2].physaddr));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX,
- upper_32_bits(fpdu_info->frags[2].physaddr));
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX,
- lower_32_bits(fpdu_info->frags[3].physaddr));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX,
- upper_32_bits(fpdu_info->frags[3].physaddr));
-
- cqp_request->cqp_callback_pointer = fpdu_info;
- cqp_request->callback = 1;
- cqp_request->cqp_callback = nes_download_callback;
-
- atomic_set(&cqp_request->refcount, 1);
- nes_post_cqp_request(nesdev, cqp_request);
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
- }
-
- return 0;
-}
-
-static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
-{
- int again = 1;
- unsigned long flags;
-
- do {
- /* Ignore rc - if it failed, tcp retries will cause it to try again */
- forward_fpdus(nesvnic, nesqp);
-
- spin_lock_irqsave(&nesqp->pau_lock, flags);
- if (nesqp->pau_pending) {
- nesqp->pau_pending = 0;
- } else {
- nesqp->pau_busy = 0;
- again = 0;
- }
-
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
- } while (again);
-}
-
-/**
- * queue_fpdus - Handle fpdu's that hw passed up to sw
- */
-static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
-{
- struct sk_buff *tmpskb;
- struct nes_rskb_cb *cb;
- struct iphdr *iph;
- struct tcphdr *tcph;
- unsigned char *tcph_end;
- u32 rcv_nxt;
- u32 rcv_wnd;
- u32 seqnum;
- u32 len;
- bool process_it = false;
- unsigned long flags;
-
- /* Move data ptr to after tcp header */
- iph = (struct iphdr *)skb->data;
- tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
- seqnum = be32_to_cpu(tcph->seq);
- tcph_end = (((char *)tcph) + (4 * tcph->doff));
-
- len = be16_to_cpu(iph->tot_len);
- if (skb->len > len)
- skb_trim(skb, len);
- skb_pull(skb, tcph_end - skb->data);
-
- /* Initialize tracking values */
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- cb->seqnum = seqnum;
-
- /* Make sure data is in the receive window */
- rcv_nxt = nesqp->pau_rcv_nxt;
- rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd);
- if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) {
- nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE);
- nes_rem_ref_cm_node(nesqp->cm_node);
- return;
- }
-
- spin_lock_irqsave(&nesqp->pau_lock, flags);
-
- if (nesqp->pau_busy)
- nesqp->pau_pending = 1;
- else
- nesqp->pau_busy = 1;
-
- /* Queue skb by sequence number */
- if (skb_queue_len(&nesqp->pau_list) == 0) {
- skb_queue_head(&nesqp->pau_list, skb);
- } else {
- tmpskb = nesqp->pau_list.next;
- while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
- cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
- if (before(seqnum, cb->seqnum))
- break;
- tmpskb = tmpskb->next;
- }
- skb_insert(tmpskb, skb, &nesqp->pau_list);
- }
- if (nesqp->pau_state == PAU_READY)
- process_it = true;
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
-
- if (process_it)
- process_fpdus(nesvnic, nesqp);
-
- return;
-}
-
-/**
- * mgt_thread - Handle mgt skbs in a safe context
- */
-static int mgt_thread(void *context)
-{
- struct nes_vnic *nesvnic = context;
- struct sk_buff *skb;
- struct nes_rskb_cb *cb;
-
- while (!kthread_should_stop()) {
- wait_event_interruptible(nesvnic->mgt_wait_queue,
- skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop());
- while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) {
- skb = skb_dequeue(&nesvnic->mgt_skb_list);
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- cb->data_start = skb->data - ETH_HLEN;
- cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start,
- nesvnic->max_frame_size, PCI_DMA_TODEVICE);
- queue_fpdus(skb, nesvnic, cb->nesqp);
- }
- }
-
- /* Closing down so delete any entries on the queue */
- while (skb_queue_len(&nesvnic->mgt_skb_list)) {
- skb = skb_dequeue(&nesvnic->mgt_skb_list);
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- nes_rem_ref_cm_node(cb->nesqp->cm_node);
- dev_kfree_skb_any(skb);
- }
- return 0;
-}
-
-/**
- * nes_queue_skbs - Queue skb so it can be handled in a thread context
- */
-void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
-{
- struct nes_rskb_cb *cb;
-
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- cb->nesqp = nesqp;
- skb_queue_tail(&nesvnic->mgt_skb_list, skb);
- wake_up_interruptible(&nesvnic->mgt_wait_queue);
-}
-
-void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
-{
- struct sk_buff *skb;
- unsigned long flags;
- atomic_inc(&pau_qps_destroyed);
-
- /* Free packets that have not yet been forwarded */
- /* Lock is acquired by skb_dequeue when removing the skb */
- spin_lock_irqsave(&nesqp->pau_lock, flags);
- while (skb_queue_len(&nesqp->pau_list)) {
- skb = skb_dequeue(&nesqp->pau_list);
- nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
- nes_rem_ref_cm_node(nesqp->cm_node);
- }
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
-}
-
-static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
-{
- struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer;
- struct nes_cqp_request *new_request;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_adapter *nesadapter;
- struct nes_qp *nesqp;
- struct nes_v4_quad nes_quad;
- u32 crc_value;
- u64 u64temp;
-
- nesadapter = nesdev->nesadapter;
- nesqp = qh_chg->nesqp;
-
- /* Should we handle the bad completion */
- if (cqp_request->major_code)
- WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
- cqp_request->major_code);
-
- switch (nesqp->pau_state) {
- case PAU_DEL_QH:
- /* Old hash code deleted, now set the new one */
- nesqp->pau_state = PAU_ADD_LB_QH;
- new_request = nes_get_cqp_request(nesdev);
- if (new_request == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n");
- WARN_ON(1);
- return;
- }
-
- memset(&nes_quad, 0, sizeof(nes_quad));
- nes_quad.DstIpAdrIndex =
- cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = cpu_to_be32(0x7f000001);
- nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]);
- nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]);
-
- /* Produce hash key */
- crc_value = get_crc_value(&nes_quad);
- nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
- nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n",
- nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
-
- nesqp->hte_index &= nesadapter->hte_index_mask;
- nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
- nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001);
- nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt);
-
- cqp_wqe = &new_request->cqp_wqe;
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words,
- NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH |
- NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
- u64temp = (u64)nesqp->nesqp_context_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
-
- nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n");
-
- new_request->cqp_callback_pointer = qh_chg;
- new_request->callback = 1;
- new_request->cqp_callback = nes_chg_qh_handler;
- atomic_set(&new_request->refcount, 1);
- nes_post_cqp_request(nesdev, new_request);
- break;
-
- case PAU_ADD_LB_QH:
- /* Start processing the queued fpdu's */
- nesqp->pau_state = PAU_READY;
- process_fpdus(qh_chg->nesvnic, qh_chg->nesqp);
- kfree(qh_chg);
- break;
- }
-}
-
-/**
- * nes_change_quad_hash
- */
-static int nes_change_quad_hash(struct nes_device *nesdev,
- struct nes_vnic *nesvnic, struct nes_qp *nesqp)
-{
- struct nes_cqp_request *cqp_request = NULL;
- struct pau_qh_chg *qh_chg = NULL;
- u64 u64temp;
- struct nes_hw_cqp_wqe *cqp_wqe;
- int ret = 0;
-
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
- ret = -ENOMEM;
- goto chg_qh_err;
- }
-
- qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
- if (qh_chg == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
- ret = -ENOMEM;
- goto chg_qh_err;
- }
- qh_chg->nesdev = nesdev;
- qh_chg->nesvnic = nesvnic;
- qh_chg->nesqp = nesqp;
- nesqp->pau_state = PAU_DEL_QH;
-
- cqp_wqe = &cqp_request->cqp_wqe;
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words,
- NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE |
- NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
- u64temp = (u64)nesqp->nesqp_context_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
-
- nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n");
-
- cqp_request->cqp_callback_pointer = qh_chg;
- cqp_request->callback = 1;
- cqp_request->cqp_callback = nes_chg_qh_handler;
- atomic_set(&cqp_request->refcount, 1);
- nes_post_cqp_request(nesdev, cqp_request);
-
- return ret;
-
-chg_qh_err:
- kfree(qh_chg);
- if (cqp_request)
- nes_put_cqp_request(nesdev, cqp_request);
- return ret;
-}
-
-/**
- * nes_mgt_ce_handler
- * This management code deals with any packed and unaligned (pau) fpdu's
- * that the hardware cannot handle.
- */
-static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
-{
- struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq);
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 head;
- u32 cq_size;
- u32 cqe_count = 0;
- u32 cqe_misc;
- u32 qp_id = 0;
- u32 skbs_needed;
- unsigned long context;
- struct nes_qp *nesqp;
- struct sk_buff *rx_skb;
- struct nes_rskb_cb *cb;
-
- head = cq->cq_head;
- cq_size = cq->cq_size;
-
- while (1) {
- cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
- if (!(cqe_misc & NES_NIC_CQE_VALID))
- break;
-
- nesqp = NULL;
- if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) {
- qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]);
- qp_id &= 0x001fffff;
- if (qp_id < nesadapter->max_qp) {
- context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN];
- nesqp = (struct nes_qp *)context;
- }
- }
-
- if (nesqp) {
- if (nesqp->pau_mode == false) {
- nesqp->pau_mode = true; /* First time for this qp */
- nesqp->pau_rcv_nxt = le32_to_cpu(
- cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
- skb_queue_head_init(&nesqp->pau_list);
- spin_lock_init(&nesqp->pau_lock);
- atomic_inc(&pau_qps_created);
- nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
- }
-
- rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
- rx_skb->len = 0;
- skb_put(rx_skb, cqe_misc & 0x0000ffff);
- rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev);
- cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
- pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE);
- cb->busaddr = 0;
- mgtvnic->mgt.rq_tail++;
- mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1;
-
- nes_add_ref_cm_node(nesqp->cm_node);
- nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp);
- } else {
- printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id);
- }
-
- cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
- cqe_count++;
- if (++head >= cq_size)
- head = 0;
-
- if (cqe_count == 255) {
- /* Replenish mgt CQ */
- nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16));
- nesdev->currcq_count += cqe_count;
- cqe_count = 0;
- }
-
- skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed);
- if (skbs_needed > (mgtvnic->mgt.rq_size >> 1))
- nes_replenish_mgt_rq(mgtvnic);
- }
-
- cq->cq_head = head;
- nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
- cq->cq_number | (cqe_count << 16));
- nes_read32(nesdev->regs + NES_CQE_ALLOC);
- nesdev->currcq_count += cqe_count;
-}
-
-/**
- * nes_init_mgt_qp
- */
-int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic)
-{
- struct nes_vnic_mgt *mgtvnic;
- u32 counter;
- void *vmem;
- dma_addr_t pmem;
- struct nes_hw_cqp_wqe *cqp_wqe;
- u32 cqp_head;
- unsigned long flags;
- struct nes_hw_nic_qp_context *mgt_context;
- u64 u64temp;
- struct nes_hw_nic_rq_wqe *mgt_rqe;
- struct sk_buff *skb;
- u32 wqe_count;
- struct nes_rskb_cb *cb;
- u32 mgt_mem_size;
- void *mgt_vbase;
- dma_addr_t mgt_pbase;
- int i;
- int ret;
-
- /* Allocate space the all mgt QPs once */
- mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
- if (mgtvnic == NULL) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
- return -ENOMEM;
- }
-
- /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
- /* We are not sending from this NIC so sq is not allocated */
- mgt_mem_size = 256 +
- (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) +
- (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) +
- sizeof(struct nes_hw_nic_qp_context);
- mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
- mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase);
- if (!mgt_vbase) {
- kfree(mgtvnic);
- nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n");
- return -ENOMEM;
- }
-
- nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size;
- nesvnic->mgt_vbase = mgt_vbase;
- nesvnic->mgt_pbase = mgt_pbase;
-
- skb_queue_head_init(&nesvnic->mgt_skb_list);
- init_waitqueue_head(&nesvnic->mgt_wait_queue);
- nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread");
-
- for (i = 0; i < NES_MGT_QP_COUNT; i++) {
- mgtvnic->nesvnic = nesvnic;
- mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i;
- memset(mgt_vbase, 0, mgt_mem_size);
- nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n",
- mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size);
-
- vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) &
- ~(unsigned long)(256 - 1));
- pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) &
- ~(unsigned long long)(256 - 1));
-
- spin_lock_init(&mgtvnic->mgt.rq_lock);
-
- /* setup the RQ */
- mgtvnic->mgt.rq_vbase = vmem;
- mgtvnic->mgt.rq_pbase = pmem;
- mgtvnic->mgt.rq_head = 0;
- mgtvnic->mgt.rq_tail = 0;
- mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT;
-
- /* setup the CQ */
- vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
- pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
-
- mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id;
- mgtvnic->mgt_cq.cq_vbase = vmem;
- mgtvnic->mgt_cq.cq_pbase = pmem;
- mgtvnic->mgt_cq.cq_head = 0;
- mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT;
-
- mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler;
-
- /* Send CreateCQ request to CQP */
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- cqp_head = nesdev->cqp.sq_head;
-
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
- NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
- ((u32)mgtvnic->mgt_cq.cq_size << 16));
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
- mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16));
- u64temp = (u64)mgtvnic->mgt_cq.cq_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
- u64temp = (unsigned long)&mgtvnic->mgt_cq;
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
- cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
-
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- /* Send CreateQP request to CQP */
- mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]);
- mgt_context->context_words[NES_NIC_CTX_MISC_IDX] =
- cpu_to_le32((u32)NES_MGT_CTX_SIZE |
- ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
- nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
- nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
- nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
- if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0)
- mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
-
- u64temp = (u64)mgtvnic->mgt.rq_pbase;
- mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
- mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
- u64temp = (u64)mgtvnic->mgt.rq_pbase;
- mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
- mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
-
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
- NES_CQP_QP_TYPE_NIC);
- cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id);
- u64temp = (u64)mgtvnic->mgt_cq.cq_pbase +
- (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
-
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
- nesdev->cqp.sq_head = cqp_head;
-
- barrier();
-
- /* Ring doorbell (2 WQEs) */
- nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
-
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n",
- mgtvnic->mgt.qp_id);
-
- ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n",
- mgtvnic->mgt.qp_id, ret);
- if (!ret) {
- nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id);
- if (i == 0) {
- pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
- nesvnic->mgt_pbase);
- kfree(mgtvnic);
- } else {
- nes_destroy_mgt(nesvnic);
- }
- return -EIO;
- }
-
- /* Populate the RQ */
- for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) {
- skb = dev_alloc_skb(nesvnic->max_frame_size);
- if (!skb) {
- nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
- return -ENOMEM;
- }
-
- skb->dev = netdev;
-
- pmem = pci_map_single(nesdev->pcidev, skb->data,
- nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
- cb = (struct nes_rskb_cb *)&skb->cb[0];
- cb->busaddr = pmem;
- cb->maplen = nesvnic->max_frame_size;
-
- mgt_rqe = &mgtvnic->mgt.rq_vbase[counter];
- mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size);
- mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
- mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
- mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
- mgtvnic->mgt.rx_skb[counter] = skb;
- }
-
- init_timer(&mgtvnic->rq_wqes_timer);
- mgtvnic->rq_wqes_timer.function = nes_mgt_rq_wqes_timeout;
- mgtvnic->rq_wqes_timer.data = (unsigned long)mgtvnic;
-
- wqe_count = NES_MGT_WQ_COUNT - 1;
- mgtvnic->mgt.rq_head = wqe_count;
- barrier();
- do {
- counter = min(wqe_count, ((u32)255));
- wqe_count -= counter;
- nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id);
- } while (wqe_count);
-
- nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
- mgtvnic->mgt_cq.cq_number);
- nes_read32(nesdev->regs + NES_CQE_ALLOC);
-
- mgt_vbase += mgt_mem_size;
- mgt_pbase += mgt_mem_size;
- nesvnic->mgtvnic[i] = mgtvnic++;
- }
- return 0;
-}
-
-
-void nes_destroy_mgt(struct nes_vnic *nesvnic)
-{
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_vnic_mgt *mgtvnic;
- struct nes_vnic_mgt *first_mgtvnic;
- unsigned long flags;
- struct nes_hw_cqp_wqe *cqp_wqe;
- u32 cqp_head;
- struct sk_buff *rx_skb;
- int i;
- int ret;
-
- kthread_stop(nesvnic->mgt_thread);
-
- /* Free remaining NIC receive buffers */
- first_mgtvnic = nesvnic->mgtvnic[0];
- for (i = 0; i < NES_MGT_QP_COUNT; i++) {
- mgtvnic = nesvnic->mgtvnic[i];
- if (mgtvnic == NULL)
- continue;
-
- while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) {
- rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
- nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE);
- mgtvnic->mgt.rq_tail++;
- mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1);
- }
-
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
-
- /* Destroy NIC QP */
- cqp_head = nesdev->cqp.sq_head;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- mgtvnic->mgt.qp_id);
-
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
-
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
-
- /* Destroy NIC CQ */
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16)));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
-
- if (++cqp_head >= nesdev->cqp.sq_size)
- cqp_head = 0;
-
- nesdev->cqp.sq_head = cqp_head;
- barrier();
-
- /* Ring doorbell (2 WQEs) */
- nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
-
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
- " cqp.sq_tail=%u, cqp.sq_size=%u\n",
- cqp_head, nesdev->cqp.sq_head,
- nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
-
- ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
- NES_EVENT_TIMEOUT);
-
- nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
- " cqp.sq_head=%u, cqp.sq_tail=%u\n",
- ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
- if (!ret)
- nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n",
- mgtvnic->mgt.qp_id);
-
- nesvnic->mgtvnic[i] = NULL;
- }
-
- if (nesvnic->mgt_vbase) {
- pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
- nesvnic->mgt_pbase);
- nesvnic->mgt_vbase = NULL;
- nesvnic->mgt_pbase = 0;
- }
-
- kfree(first_mgtvnic);
-}
diff --git a/drivers/infiniband/hw/nes/nes_mgt.h b/drivers/infiniband/hw/nes/nes_mgt.h
deleted file mode 100644
index 4f7f701c4a81..000000000000
--- a/drivers/infiniband/hw/nes/nes_mgt.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-* Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenIB.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*/
-
-#ifndef __NES_MGT_H
-#define __NES_MGT_H
-
-#define MPA_FRAMING 6 /* length is 2 bytes, crc is 4 bytes */
-
-int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic);
-void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp);
-void nes_destroy_mgt(struct nes_vnic *nesvnic);
-void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp);
-
-struct nes_hw_mgt {
- struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */
- dma_addr_t rq_pbase; /* PCI memory for host rings */
- struct sk_buff *rx_skb[NES_NIC_WQ_SIZE];
- u16 qp_id;
- u16 sq_head;
- u16 rq_head;
- u16 rq_tail;
- u16 rq_size;
- u8 replenishing_rq;
- u8 reserved;
- spinlock_t rq_lock;
-};
-
-struct nes_vnic_mgt {
- struct nes_vnic *nesvnic;
- struct nes_hw_mgt mgt;
- struct nes_hw_nic_cq mgt_cq;
- atomic_t rx_skbs_needed;
- struct timer_list rq_wqes_timer;
- atomic_t rx_skb_timer_running;
-};
-
-#define MAX_FPDU_FRAGS 4
-struct pau_fpdu_frag {
- struct sk_buff *skb;
- u64 physaddr;
- u32 frag_len;
- bool cmplt;
-};
-
-struct pau_fpdu_info {
- struct nes_qp *nesqp;
- struct nes_cqp_request *cqp_request;
- void *hdr_vbase;
- dma_addr_t hdr_pbase;
- int hdr_len;
- u16 data_len;
- u16 frag_cnt;
- struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
-};
-
-enum pau_qh_state {
- PAU_DEL_QH,
- PAU_ADD_LB_QH,
- PAU_READY
-};
-
-struct pau_qh_chg {
- struct nes_device *nesdev;
- struct nes_vnic *nesvnic;
- struct nes_qp *nesqp;
-};
-
-#endif /* __NES_MGT_H */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
deleted file mode 100644
index 70acda91eb2a..000000000000
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ /dev/null
@@ -1,1883 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if_arp.h>
-#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
-#include <linux/slab.h>
-#include <net/tcp.h>
-
-#include <net/inet_common.h>
-#include <linux/inet.h>
-
-#include "nes.h"
-
-static struct nic_qp_map nic_qp_mapping_0[] = {
- {16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0},
- {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0},
- {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
- {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
-};
-
-static struct nic_qp_map nic_qp_mapping_1[] = {
- {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
- {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
-};
-
-static struct nic_qp_map nic_qp_mapping_2[] = {
- {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}
-};
-
-static struct nic_qp_map nic_qp_mapping_3[] = {
- {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
-};
-
-static struct nic_qp_map nic_qp_mapping_4[] = {
- {28,8,0,0},{32,12,0,0}
-};
-
-static struct nic_qp_map nic_qp_mapping_5[] = {
- {29,9,1,0},{33,13,1,0}
-};
-
-static struct nic_qp_map nic_qp_mapping_6[] = {
- {30,10,2,0},{34,14,2,0}
-};
-
-static struct nic_qp_map nic_qp_mapping_7[] = {
- {31,11,3,0},{35,15,3,0}
-};
-
-static struct nic_qp_map *nic_qp_mapping_per_function[] = {
- nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3,
- nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7
-};
-
-static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
- | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-static int debug = -1;
-static int nics_per_function = 1;
-
-/**
- * nes_netdev_poll
- */
-static int nes_netdev_poll(struct napi_struct *napi, int budget)
-{
- struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
-
- nesvnic->budget = budget;
- nescq->cqes_pending = 0;
- nescq->rx_cqes_completed = 0;
- nescq->cqe_allocs_pending = 0;
- nescq->rx_pkts_indicated = 0;
-
- nes_nic_ce_handler(nesdev, nescq);
-
- if (nescq->cqes_pending == 0) {
- napi_complete(napi);
- /* clear out completed cqes and arm */
- nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
- nescq->cq_number | (nescq->cqe_allocs_pending << 16));
- nes_read32(nesdev->regs+NES_CQE_ALLOC);
- } else {
- /* clear out completed cqes but don't arm */
- nes_write32(nesdev->regs+NES_CQE_ALLOC,
- nescq->cq_number | (nescq->cqe_allocs_pending << 16));
- nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
- nesvnic->netdev->name);
- }
- return nescq->rx_pkts_indicated;
-}
-
-
-/**
- * nes_netdev_open - Activate the network interface; ifconfig
- * ethx up.
- */
-static int nes_netdev_open(struct net_device *netdev)
-{
- u32 macaddr_low;
- u16 macaddr_high;
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- int ret;
- int i;
- struct nes_vnic *first_nesvnic = NULL;
- u32 nic_active_bit;
- u32 nic_active;
- struct list_head *list_pos, *list_temp;
- unsigned long flags;
-
- assert(nesdev != NULL);
-
- if (nesvnic->netdev_open == 1)
- return 0;
-
- if (netif_msg_ifup(nesvnic))
- printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
-
- ret = nes_init_nic_qp(nesdev, netdev);
- if (ret) {
- return ret;
- }
-
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
- if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) {
- nesvnic->nesibdev = nes_init_ofa_device(netdev);
- if (nesvnic->nesibdev == NULL) {
- printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name);
- } else {
- nesvnic->nesibdev->nesvnic = nesvnic;
- ret = nes_register_ofa_device(nesvnic->nesibdev);
- if (ret) {
- printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n",
- netdev->name, ret);
- }
- }
- }
- /* Set packet filters */
- nic_active_bit = 1 << nesvnic->nic_index;
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
-
- macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
- macaddr_high += (u16)netdev->dev_addr[1];
-
- macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
- macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
- macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
- macaddr_low += (u32)netdev->dev_addr[5];
-
- /* Program the various MAC regs */
- for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
- if (nesvnic->qp_nic_index[i] == 0xf) {
- break;
- }
- nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
- " (Addr:%08X) = %08X, HIGH = %08X.\n",
- i, nesvnic->qp_nic_index[i],
- NES_IDX_PERFECT_FILTER_LOW+
- (nesvnic->qp_nic_index[i] * 8),
- macaddr_low,
- (u32)macaddr_high | NES_MAC_ADDR_VALID |
- ((((u32)nesvnic->nic_index) << 16)));
- nes_write_indexed(nesdev,
- NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
- macaddr_low);
- nes_write_indexed(nesdev,
- NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
- (u32)macaddr_high | NES_MAC_ADDR_VALID |
- ((((u32)nesvnic->nic_index) << 16)));
- }
-
-
- nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
- nesvnic->nic_cq.cq_number);
- nes_read32(nesdev->regs+NES_CQE_ALLOC);
- list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
- first_nesvnic = container_of(list_pos, struct nes_vnic, list);
- if (first_nesvnic->netdev_open == 1)
- break;
- }
- if (first_nesvnic->netdev_open == 0) {
- nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index),
- ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
- NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
- first_nesvnic = nesvnic;
- }
-
- if (first_nesvnic->linkup) {
- /* Enable network packets */
- nesvnic->linkup = 1;
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- }
-
- spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
- nesdev->link_recheck = 1;
- mod_delayed_work(system_wq, &nesdev->work,
- NES_LINK_RECHECK_DELAY);
- }
- spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
-
- spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
- if (nesvnic->of_device_registered) {
- nesdev->nesadapter->send_term_ok = 1;
- if (nesvnic->linkup == 1) {
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
- }
- } else {
- nesdev->iw_status = 0;
- }
- }
- spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
-
- napi_enable(&nesvnic->napi);
- nesvnic->netdev_open = 1;
-
- return 0;
-}
-
-
-/**
- * nes_netdev_stop
- */
-static int nes_netdev_stop(struct net_device *netdev)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- u32 nic_active_mask;
- u32 nic_active;
- struct nes_vnic *first_nesvnic = NULL;
- struct list_head *list_pos, *list_temp;
- unsigned long flags;
-
- nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
- nesvnic, nesdev, netdev, netdev->name);
- if (nesvnic->netdev_open == 0)
- return 0;
-
- if (netif_msg_ifdown(nesvnic))
- printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
- netif_carrier_off(netdev);
-
- /* Disable network packets */
- napi_disable(&nesvnic->napi);
- netif_stop_queue(netdev);
- list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
- first_nesvnic = container_of(list_pos, struct nes_vnic, list);
- if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic))
- break;
- }
-
- if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) &&
- (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) !=
- PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+
- (0x200*nesdev->mac_index), 0xffffffff);
- nes_write_indexed(first_nesvnic->nesdev,
- NES_IDX_MAC_INT_MASK+
- (0x200*first_nesvnic->nesdev->mac_index),
- ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
- NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
- } else {
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
- }
-
- nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
- nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+
- (nesvnic->perfect_filter_index*8), 0);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
- nic_active &= nic_active_mask;
- nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
- nic_active &= nic_active_mask;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
- nic_active &= nic_active_mask;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
- nic_active &= nic_active_mask;
- nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
- nic_active &= nic_active_mask;
- nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
-
- spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
- if (nesvnic->of_device_registered) {
- nesdev->nesadapter->send_term_ok = 0;
- nesdev->iw_status = 0;
- if (nesvnic->linkup == 1)
- nes_port_ibevent(nesvnic);
- }
- del_timer_sync(&nesvnic->event_timer);
- nesvnic->event_timer.function = NULL;
- spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
-
- nes_destroy_nic_qp(nesvnic);
-
- nesvnic->netdev_open = 0;
-
- return 0;
-}
-
-
-/**
- * nes_nic_send
- */
-static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_hw_nic *nesnic = &nesvnic->nic;
- struct nes_hw_nic_sq_wqe *nic_sqe;
- struct tcphdr *tcph;
- __le16 *wqe_fragment_length;
- u32 wqe_misc;
- u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
- u16 skb_fragment_index;
- dma_addr_t bus_address;
-
- nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
- wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
-
- /* setup the VLAN tag if present */
- if (skb_vlan_tag_present(skb)) {
- nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name, skb_vlan_tag_get(skb));
- wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
- } else
- wqe_misc = 0;
-
- /* bump past the vlan tag */
- wqe_fragment_length++;
- /* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
- wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb_is_gso(skb)) {
- tcph = tcp_hdr(skb);
- /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... is_gso = %u seg size = %u\n",
- netdev->name, skb_is_gso(skb), skb_shinfo(skb)->gso_size); */
- wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE | (u16)skb_shinfo(skb)->gso_size;
- set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
- ((u32)tcph->doff) |
- (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
- }
- } else { /* CHECKSUM_HW */
- wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM;
- }
-
- set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
- skb->len);
- memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
- skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb)));
- wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
- skb_headlen(skb)));
- wqe_fragment_length[1] = 0;
- if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
- if ((skb_shinfo(skb)->nr_frags + 1) > 4) {
- nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n",
- netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
- kfree_skb(skb);
- nesvnic->tx_sw_dropped++;
- return NETDEV_TX_LOCKED;
- }
- set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
- bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
- skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE);
- wqe_fragment_length[wqe_fragment_index++] =
- cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE);
- wqe_fragment_length[wqe_fragment_index] = 0;
- set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
- ((u64)(bus_address)));
- nesnic->tx_skb[nesnic->sq_head] = skb;
- }
-
- if (skb_headlen(skb) == skb->len) {
- if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
- nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
- nesnic->tx_skb[nesnic->sq_head] = skb;
- }
- } else {
- /* Deal with Fragments */
- nesnic->tx_skb[nesnic->sq_head] = skb;
- for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
- skb_fragment_index++) {
- skb_frag_t *frag =
- &skb_shinfo(skb)->frags[skb_fragment_index];
- bus_address = skb_frag_dma_map(&nesdev->pcidev->dev,
- frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- wqe_fragment_length[wqe_fragment_index] =
- cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index]));
- set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
- bus_address);
- wqe_fragment_index++;
- if (wqe_fragment_index < 5)
- wqe_fragment_length[wqe_fragment_index] = 0;
- }
- }
-
- set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
- nesnic->sq_head++;
- nesnic->sq_head &= nesnic->sq_size - 1;
-
- return NETDEV_TX_OK;
-}
-
-
-/**
- * nes_netdev_start_xmit
- */
-static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_hw_nic *nesnic = &nesvnic->nic;
- struct nes_hw_nic_sq_wqe *nic_sqe;
- struct tcphdr *tcph;
- /* struct udphdr *udph; */
-#define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS
- /* 64K segment plus overflow on each side */
- dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
- dma_addr_t bus_address;
- u32 tso_frag_index;
- u32 tso_frag_count;
- u32 tso_wqe_length;
- u32 curr_tcp_seq;
- u32 wqe_count=1;
- u32 send_rc;
- struct iphdr *iph;
- __le16 *wqe_fragment_length;
- u32 nr_frags;
- u32 original_first_length;
- /* u64 *wqe_fragment_address; */
- /* first fragment (0) is used by copy buffer */
- u16 wqe_fragment_index=1;
- u16 hoffset;
- u16 nhoffset;
- u16 wqes_needed;
- u16 wqes_available;
- u32 wqe_misc;
-
- /*
- * nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
- * " (%u frags), tso_size=%u\n",
- * netdev->name, skb->len, skb_headlen(skb),
- * skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
- */
-
- if (!netif_carrier_ok(netdev))
- return NETDEV_TX_OK;
-
- if (netif_queue_stopped(netdev))
- return NETDEV_TX_BUSY;
-
- /* Check if SQ is full */
- if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
- if (!netif_queue_stopped(netdev)) {
- netif_stop_queue(netdev);
- barrier();
- if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) {
- netif_start_queue(netdev);
- goto sq_no_longer_full;
- }
- }
- nesvnic->sq_full++;
- return NETDEV_TX_BUSY;
- }
-
-sq_no_longer_full:
- nr_frags = skb_shinfo(skb)->nr_frags;
- if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
- nr_frags++;
- }
- /* Check if too many fragments */
- if (unlikely((nr_frags > 4))) {
- if (skb_is_gso(skb)) {
- nesvnic->segmented_tso_requests++;
- nesvnic->tso_requests++;
- /* Basically 4 fragments available per WQE with extended fragments */
- wqes_needed = nr_frags >> 2;
- wqes_needed += (nr_frags&3)?1:0;
- wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) &
- (nesnic->sq_size - 1);
-
- if (unlikely(wqes_needed > wqes_available)) {
- if (!netif_queue_stopped(netdev)) {
- netif_stop_queue(netdev);
- barrier();
- wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) &
- (nesnic->sq_size - 1);
- if (wqes_needed <= wqes_available) {
- netif_start_queue(netdev);
- goto tso_sq_no_longer_full;
- }
- }
- nesvnic->sq_full++;
- nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
- netdev->name);
- return NETDEV_TX_BUSY;
- }
-tso_sq_no_longer_full:
- /* Map all the buffers */
- for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
- tso_frag_count++) {
- skb_frag_t *frag =
- &skb_shinfo(skb)->frags[tso_frag_count];
- tso_bus_address[tso_frag_count] =
- skb_frag_dma_map(&nesdev->pcidev->dev,
- frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- }
-
- tso_frag_index = 0;
- curr_tcp_seq = ntohl(tcp_hdr(skb)->seq);
- hoffset = skb_transport_header(skb) - skb->data;
- nhoffset = skb_network_header(skb) - skb->data;
- original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2);
-
- for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) {
- tso_wqe_length = 0;
- nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
- wqe_fragment_length =
- (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
- /* setup the VLAN tag if present */
- if (skb_vlan_tag_present(skb)) {
- nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name,
- skb_vlan_tag_get(skb));
- wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
- } else
- wqe_misc = 0;
-
- /* bump past the vlan tag */
- wqe_fragment_length++;
-
- /* Assumes header totally fits in allocated buffer and is in first fragment */
- if (original_first_length > NES_FIRST_FRAG_SIZE) {
- nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
- original_first_length, NES_FIRST_FRAG_SIZE);
- nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
- " (%u frags), is_gso = %u tso_size=%u\n",
- netdev->name,
- skb->len, skb_headlen(skb),
- skb_shinfo(skb)->nr_frags, skb_is_gso(skb), skb_shinfo(skb)->gso_size);
- }
- memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
- skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
- original_first_length));
- iph = (struct iphdr *)
- (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]);
- tcph = (struct tcphdr *)
- (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]);
- if ((wqe_count+1)!=(u32)wqes_needed) {
- tcph->fin = 0;
- tcph->psh = 0;
- tcph->rst = 0;
- tcph->urg = 0;
- }
- if (wqe_count) {
- tcph->syn = 0;
- }
- tcph->seq = htonl(curr_tcp_seq);
- wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
- original_first_length));
-
- wqe_fragment_index = 1;
- if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) {
- set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
- bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length,
- skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE);
- wqe_fragment_length[wqe_fragment_index++] =
- cpu_to_le16(skb_headlen(skb) - original_first_length);
- wqe_fragment_length[wqe_fragment_index] = 0;
- set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
- bus_address);
- tso_wqe_length += skb_headlen(skb) -
- original_first_length;
- }
- while (wqe_fragment_index < 5) {
- wqe_fragment_length[wqe_fragment_index] =
- cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index]));
- set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
- (u64)tso_bus_address[tso_frag_index]);
- wqe_fragment_index++;
- tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]);
- if (wqe_fragment_index < 5)
- wqe_fragment_length[wqe_fragment_index] = 0;
- if (tso_frag_index == tso_frag_count)
- break;
- }
- if ((wqe_count+1) == (u32)wqes_needed) {
- nesnic->tx_skb[nesnic->sq_head] = skb;
- } else {
- nesnic->tx_skb[nesnic->sq_head] = NULL;
- }
- wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size;
- if ((tso_wqe_length + original_first_length) > skb_shinfo(skb)->gso_size) {
- wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
- } else {
- iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
- }
-
- set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX,
- wqe_misc);
- set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
- ((u32)tcph->doff) | (((u32)hoffset) << 4));
-
- set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
- tso_wqe_length + original_first_length);
- curr_tcp_seq += tso_wqe_length;
- nesnic->sq_head++;
- nesnic->sq_head &= nesnic->sq_size-1;
- }
- } else {
- nesvnic->linearized_skbs++;
- hoffset = skb_transport_header(skb) - skb->data;
- nhoffset = skb_network_header(skb) - skb->data;
- skb_linearize(skb);
- skb_set_transport_header(skb, hoffset);
- skb_set_network_header(skb, nhoffset);
- send_rc = nes_nic_send(skb, netdev);
- if (send_rc != NETDEV_TX_OK)
- return NETDEV_TX_OK;
- }
- } else {
- send_rc = nes_nic_send(skb, netdev);
- if (send_rc != NETDEV_TX_OK)
- return NETDEV_TX_OK;
- }
-
- barrier();
-
- if (wqe_count)
- nes_write32(nesdev->regs+NES_WQE_ALLOC,
- (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
-
- netdev->trans_start = jiffies;
-
- return NETDEV_TX_OK;
-}
-
-
-/**
- * nes_netdev_get_stats
- */
-static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- u64 u64temp;
- u32 u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->endnode_nstat_rx_discard += u32temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
-
- nesvnic->endnode_nstat_rx_octets += u64temp;
- nesvnic->netstats.rx_bytes += u64temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
-
- nesvnic->endnode_nstat_rx_frames += u64temp;
- nesvnic->netstats.rx_packets += u64temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
-
- nesvnic->endnode_nstat_tx_octets += u64temp;
- nesvnic->netstats.tx_bytes += u64temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
-
- nesvnic->endnode_nstat_tx_frames += u64temp;
- nesvnic->netstats.tx_packets += u64temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_short_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_length_errors += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_crc_errors += u32temp;
- nesvnic->netstats.rx_crc_errors += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->nesdev->mac_tx_errors += u32temp;
- nesvnic->netstats.tx_errors += u32temp;
-
- return &nesvnic->netstats;
-}
-
-
-/**
- * nes_netdev_tx_timeout
- */
-static void nes_netdev_tx_timeout(struct net_device *netdev)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
-
- if (netif_msg_timer(nesvnic))
- nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name);
-}
-
-
-/**
- * nes_netdev_set_mac_address
- */
-static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct sockaddr *mac_addr = p;
- int i;
- u32 macaddr_low;
- u16 macaddr_high;
-
- if (!is_valid_ether_addr(mac_addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
- printk(PFX "%s: Address length = %d, Address = %pM\n",
- __func__, netdev->addr_len, mac_addr->sa_data);
- macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
- macaddr_high += (u16)netdev->dev_addr[1];
- macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
- macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
- macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
- macaddr_low += (u32)netdev->dev_addr[5];
-
- for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
- if (nesvnic->qp_nic_index[i] == 0xf) {
- break;
- }
- nes_write_indexed(nesdev,
- NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
- macaddr_low);
- nes_write_indexed(nesdev,
- NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
- (u32)macaddr_high | NES_MAC_ADDR_VALID |
- ((((u32)nesvnic->nic_index) << 16)));
- }
- return 0;
-}
-
-
-static void set_allmulti(struct nes_device *nesdev, u32 nic_active_bit)
-{
- u32 nic_active;
-
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
- nic_active &= ~nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
-}
-
-#define get_addr(addrs, index) ((addrs) + (index) * ETH_ALEN)
-
-/**
- * nes_netdev_set_multicast_list
- */
-static void nes_netdev_set_multicast_list(struct net_device *netdev)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
- u32 nic_active_bit;
- u32 nic_active;
- u32 perfect_filter_register_address;
- u32 macaddr_low;
- u16 macaddr_high;
- u8 mc_all_on = 0;
- u8 mc_index;
- int mc_nic_index = -1;
- u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count *
- nics_per_function, 4);
- u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated;
- unsigned long flags;
- int mc_count = netdev_mc_count(netdev);
-
- spin_lock_irqsave(&nesadapter->resource_lock, flags);
- nic_active_bit = 1 << nesvnic->nic_index;
-
- if (netdev->flags & IFF_PROMISC) {
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
- mc_all_on = 1;
- } else if ((netdev->flags & IFF_ALLMULTI) ||
- (nesvnic->nic_index > 3)) {
- set_allmulti(nesdev, nic_active_bit);
- mc_all_on = 1;
- } else {
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
- nic_active &= ~nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
- nic_active &= ~nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
- }
-
- nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscuous = %d, All Multicast = %d.\n",
- mc_count, !!(netdev->flags & IFF_PROMISC),
- !!(netdev->flags & IFF_ALLMULTI));
- if (!mc_all_on) {
- char *addrs;
- int i;
- struct netdev_hw_addr *ha;
-
- addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
- if (!addrs) {
- set_allmulti(nesdev, nic_active_bit);
- goto unlock;
- }
- i = 0;
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
-
- perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
- pft_entries_preallocated * 0x8;
- for (i = 0, mc_index = 0; mc_index < max_pft_entries_avaiable;
- mc_index++) {
- while (i < mc_count && nesvnic->mcrq_mcast_filter &&
- ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic,
- get_addr(addrs, i++))) == 0));
- if (mc_nic_index < 0)
- mc_nic_index = nesvnic->nic_index;
- while (nesadapter->pft_mcast_map[mc_index] < 16 &&
- nesadapter->pft_mcast_map[mc_index] !=
- nesvnic->nic_index &&
- mc_index < max_pft_entries_avaiable) {
- nes_debug(NES_DBG_NIC_RX,
- "mc_index=%d skipping nic_index=%d, "
- "used for=%d \n", mc_index,
- nesvnic->nic_index,
- nesadapter->pft_mcast_map[mc_index]);
- mc_index++;
- }
- if (mc_index >= max_pft_entries_avaiable)
- break;
- if (i < mc_count) {
- char *addr = get_addr(addrs, i++);
-
- nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %pM to register 0x%04X nic_idx=%d\n",
- addr,
- perfect_filter_register_address+(mc_index * 8),
- mc_nic_index);
- macaddr_high = ((u8) addr[0]) << 8;
- macaddr_high += (u8) addr[1];
- macaddr_low = ((u8) addr[2]) << 24;
- macaddr_low += ((u8) addr[3]) << 16;
- macaddr_low += ((u8) addr[4]) << 8;
- macaddr_low += (u8) addr[5];
-
- nes_write_indexed(nesdev,
- perfect_filter_register_address+(mc_index * 8),
- macaddr_low);
- nes_write_indexed(nesdev,
- perfect_filter_register_address+4+(mc_index * 8),
- (u32)macaddr_high | NES_MAC_ADDR_VALID |
- ((((u32)(1<<mc_nic_index)) << 16)));
- nesadapter->pft_mcast_map[mc_index] =
- nesvnic->nic_index;
- } else {
- nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
- perfect_filter_register_address+(mc_index * 8));
- nes_write_indexed(nesdev,
- perfect_filter_register_address+4+(mc_index * 8),
- 0);
- nesadapter->pft_mcast_map[mc_index] = 255;
- }
- }
- kfree(addrs);
- /* PFT is not large enough */
- if (i < mc_count)
- set_allmulti(nesdev, nic_active_bit);
- }
-
-unlock:
- spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
-}
-
-
-/**
- * nes_netdev_change_mtu
- */
-static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- int ret = 0;
- u8 jumbomode = 0;
- u32 nic_active;
- u32 nic_active_bit;
- u32 uc_all_active;
- u32 mc_all_active;
-
- if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
- return -EINVAL;
-
- netdev->mtu = new_mtu;
- nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
-
- if (netdev->mtu > 1500) {
- jumbomode=1;
- }
- nes_nic_init_timer_defaults(nesdev, jumbomode);
-
- if (netif_running(netdev)) {
- nic_active_bit = 1 << nesvnic->nic_index;
- mc_all_active = nes_read_indexed(nesdev,
- NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit;
- uc_all_active = nes_read_indexed(nesdev,
- NES_IDX_NIC_UNICAST_ALL) & nic_active_bit;
-
- nes_netdev_stop(netdev);
- nes_netdev_open(netdev);
-
- nic_active = nes_read_indexed(nesdev,
- NES_IDX_NIC_MULTICAST_ALL);
- nic_active |= mc_all_active;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
- nic_active);
-
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
- nic_active |= uc_all_active;
- nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
- }
-
- return ret;
-}
-
-
-static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
- "Link Change Interrupts",
- "Linearized SKBs",
- "T/GSO Requests",
- "Pause Frames Sent",
- "Pause Frames Received",
- "Internal Routing Errors",
- "SQ SW Dropped SKBs",
- "SQ Full",
- "Segmented TSO Requests",
- "Rx Symbol Errors",
- "Rx Jabber Errors",
- "Rx Oversized Frames",
- "Rx Short Frames",
- "Rx Length Errors",
- "Rx CRC Errors",
- "Rx Port Discard",
- "Endnode Rx Discards",
- "Endnode Rx Octets",
- "Endnode Rx Frames",
- "Endnode Tx Octets",
- "Endnode Tx Frames",
- "Tx Errors",
- "mh detected",
- "mh pauses",
- "Retransmission Count",
- "CM Connects",
- "CM Accepts",
- "Disconnects",
- "Connected Events",
- "Connect Requests",
- "CM Rejects",
- "ModifyQP Timeouts",
- "CreateQPs",
- "SW DestroyQPs",
- "DestroyQPs",
- "CM Closes",
- "CM Packets Sent",
- "CM Packets Bounced",
- "CM Packets Created",
- "CM Packets Rcvd",
- "CM Packets Dropped",
- "CM Packets Retrans",
- "CM Listens Created",
- "CM Listens Destroyed",
- "CM Backlog Drops",
- "CM Loopbacks",
- "CM Nodes Created",
- "CM Nodes Destroyed",
- "CM Accel Drops",
- "CM Resets Received",
- "Free 4Kpbls",
- "Free 256pbls",
- "Timer Inits",
- "LRO aggregated",
- "LRO flushed",
- "LRO no_desc",
- "PAU CreateQPs",
- "PAU DestroyQPs",
-};
-#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
-
-
-/**
- * nes_netdev_get_sset_count
- */
-static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset)
-{
- if (stringset == ETH_SS_STATS)
- return NES_ETHTOOL_STAT_COUNT;
- else
- return -EINVAL;
-}
-
-
-/**
- * nes_netdev_get_strings
- */
-static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
- u8 *ethtool_strings)
-{
- if (stringset == ETH_SS_STATS)
- memcpy(ethtool_strings,
- &nes_ethtool_stringset,
- sizeof(nes_ethtool_stringset));
-}
-
-
-/**
- * nes_netdev_get_ethtool_stats
- */
-
-static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
-{
- u64 u64temp;
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 nic_count;
- u32 u32temp;
- u32 index = 0;
-
- target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
- target_stat_values[index] = nesvnic->nesdev->link_status_interrupts;
- target_stat_values[++index] = nesvnic->linearized_skbs;
- target_stat_values[++index] = nesvnic->tso_requests;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->nesdev->mac_pause_frames_sent += u32temp;
- target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->nesdev->mac_pause_frames_received += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
- nesvnic->nesdev->port_rx_discards += u32temp;
- nesvnic->netstats.rx_dropped += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
- nesvnic->nesdev->port_tx_discards += u32temp;
- nesvnic->netstats.tx_dropped += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_short_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->netstats.rx_length_errors += u32temp;
- nesvnic->nesdev->mac_rx_errors += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->nesdev->mac_rx_errors += u32temp;
- nesvnic->nesdev->mac_rx_crc_errors += u32temp;
- nesvnic->netstats.rx_crc_errors += u32temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
- nesvnic->nesdev->mac_tx_errors += u32temp;
- nesvnic->netstats.tx_errors += u32temp;
-
- for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
- if (nesvnic->qp_nic_index[nic_count] == 0xf)
- break;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_DISCARD +
- (nesvnic->qp_nic_index[nic_count]*0x200));
- nesvnic->netstats.rx_dropped += u32temp;
- nesvnic->endnode_nstat_rx_discard += u32temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO +
- (nesvnic->qp_nic_index[nic_count]*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI +
- (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
-
- nesvnic->endnode_nstat_rx_octets += u64temp;
- nesvnic->netstats.rx_bytes += u64temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO +
- (nesvnic->qp_nic_index[nic_count]*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI +
- (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
-
- nesvnic->endnode_nstat_rx_frames += u64temp;
- nesvnic->netstats.rx_packets += u64temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO +
- (nesvnic->qp_nic_index[nic_count]*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI +
- (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
-
- nesvnic->endnode_nstat_tx_octets += u64temp;
- nesvnic->netstats.tx_bytes += u64temp;
-
- u64temp = (u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO +
- (nesvnic->qp_nic_index[nic_count]*0x200));
- u64temp += ((u64)nes_read_indexed(nesdev,
- NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI +
- (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
-
- nesvnic->endnode_nstat_tx_frames += u64temp;
- nesvnic->netstats.tx_packets += u64temp;
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200));
- nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
- }
-
- target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received;
- target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err;
- target_stat_values[++index] = nesvnic->tx_sw_dropped;
- target_stat_values[++index] = nesvnic->sq_full;
- target_stat_values[++index] = nesvnic->segmented_tso_requests;
- target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames;
- target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
- target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
- target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
- target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
- target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
- target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
- target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
- target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
- target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
- target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
- target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
- target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
- target_stat_values[++index] = mh_detected;
- target_stat_values[++index] = mh_pauses_sent;
- target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
- target_stat_values[++index] = atomic_read(&cm_connects);
- target_stat_values[++index] = atomic_read(&cm_accepts);
- target_stat_values[++index] = atomic_read(&cm_disconnects);
- target_stat_values[++index] = atomic_read(&cm_connecteds);
- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
- target_stat_values[++index] = atomic_read(&cm_rejects);
- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
- target_stat_values[++index] = atomic_read(&qps_created);
- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
- target_stat_values[++index] = atomic_read(&qps_destroyed);
- target_stat_values[++index] = atomic_read(&cm_closes);
- target_stat_values[++index] = cm_packets_sent;
- target_stat_values[++index] = cm_packets_bounced;
- target_stat_values[++index] = cm_packets_created;
- target_stat_values[++index] = cm_packets_received;
- target_stat_values[++index] = cm_packets_dropped;
- target_stat_values[++index] = cm_packets_retrans;
- target_stat_values[++index] = atomic_read(&cm_listens_created);
- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
- target_stat_values[++index] = cm_backlog_drops;
- target_stat_values[++index] = atomic_read(&cm_loopbacks);
- target_stat_values[++index] = atomic_read(&cm_nodes_created);
- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
- target_stat_values[++index] = nesadapter->free_4kpbl;
- target_stat_values[++index] = nesadapter->free_256pbl;
- target_stat_values[++index] = int_mod_timer_init;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
- target_stat_values[++index] = atomic_read(&pau_qps_created);
- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
-}
-
-/**
- * nes_netdev_get_drvinfo
- */
-static void nes_netdev_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
-
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev),
- sizeof(drvinfo->bus_info));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%u.%u", nesadapter->firmware_version >> 16,
- nesadapter->firmware_version & 0x000000ff);
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- drvinfo->testinfo_len = 0;
- drvinfo->eedump_len = 0;
- drvinfo->regdump_len = 0;
-}
-
-
-/**
- * nes_netdev_set_coalesce
- */
-static int nes_netdev_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *et_coalesce)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
- unsigned long flags;
-
- spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
- if (et_coalesce->rx_max_coalesced_frames_low) {
- shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
- }
- if (et_coalesce->rx_max_coalesced_frames_irq) {
- shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
- }
- if (et_coalesce->rx_max_coalesced_frames_high) {
- shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high;
- }
- if (et_coalesce->rx_coalesce_usecs_low) {
- shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low;
- }
- if (et_coalesce->rx_coalesce_usecs_high) {
- shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high;
- }
- spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
-
- /* using this to drive total interrupt moderation */
- nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
- if (et_coalesce->use_adaptive_rx_coalesce) {
- nesadapter->et_use_adaptive_rx_coalesce = 1;
- nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
- nesadapter->et_rx_coalesce_usecs_irq = 0;
- if (et_coalesce->pkt_rate_low) {
- nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
- }
- } else {
- nesadapter->et_use_adaptive_rx_coalesce = 0;
- nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
- if (nesadapter->et_rx_coalesce_usecs_irq) {
- nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
- 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
- }
- }
- return 0;
-}
-
-
-/**
- * nes_netdev_get_coalesce
- */
-static int nes_netdev_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *et_coalesce)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct ethtool_coalesce temp_et_coalesce;
- struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
- unsigned long flags;
-
- memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
- temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
- temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
- temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
- temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
- spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
- temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
- temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
- temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
- temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
- temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
- if (nesadapter->et_use_adaptive_rx_coalesce) {
- temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
- }
- spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
- memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
- return 0;
-}
-
-
-/**
- * nes_netdev_get_pauseparam
- */
-static void nes_netdev_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *et_pauseparam)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
-
- et_pauseparam->autoneg = 0;
- et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0;
- et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0;
-}
-
-
-/**
- * nes_netdev_set_pauseparam
- */
-static int nes_netdev_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *et_pauseparam)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- u32 u32temp;
-
- if (et_pauseparam->autoneg) {
- /* TODO: should return unsupported */
- return 0;
- }
- if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) {
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
- u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
- nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
- nesdev->disable_tx_flow_control = 0;
- } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
- u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
- nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
- nesdev->disable_tx_flow_control = 1;
- }
- if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
- u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
- nes_write_indexed(nesdev,
- NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
- nesdev->disable_rx_flow_control = 0;
- } else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) {
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
- u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
- nes_write_indexed(nesdev,
- NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
- nesdev->disable_rx_flow_control = 1;
- }
-
- return 0;
-}
-
-
-/**
- * nes_netdev_get_settings
- */
-static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 mac_index = nesdev->mac_index;
- u8 phy_type = nesadapter->phy_type[mac_index];
- u8 phy_index = nesadapter->phy_index[mac_index];
- u16 phy_data;
-
- et_cmd->duplex = DUPLEX_FULL;
- et_cmd->port = PORT_MII;
- et_cmd->maxtxpkt = 511;
- et_cmd->maxrxpkt = 511;
-
- if (nesadapter->OneG_Mode) {
- ethtool_cmd_speed_set(et_cmd, SPEED_1000);
- if (phy_type == NES_PHY_TYPE_PUMA_1G) {
- et_cmd->supported = SUPPORTED_1000baseT_Full;
- et_cmd->advertising = ADVERTISED_1000baseT_Full;
- et_cmd->autoneg = AUTONEG_DISABLE;
- et_cmd->transceiver = XCVR_INTERNAL;
- et_cmd->phy_address = mac_index;
- } else {
- unsigned long flags;
- et_cmd->supported = SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg;
- et_cmd->advertising = ADVERTISED_1000baseT_Full
- | ADVERTISED_Autoneg;
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- if (phy_data & 0x1000)
- et_cmd->autoneg = AUTONEG_ENABLE;
- else
- et_cmd->autoneg = AUTONEG_DISABLE;
- et_cmd->transceiver = XCVR_EXTERNAL;
- et_cmd->phy_address = phy_index;
- }
- return 0;
- }
- if ((phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D) ||
- (phy_type == NES_PHY_TYPE_KR)) {
- et_cmd->transceiver = XCVR_EXTERNAL;
- et_cmd->port = PORT_FIBRE;
- et_cmd->supported = SUPPORTED_FIBRE;
- et_cmd->advertising = ADVERTISED_FIBRE;
- et_cmd->phy_address = phy_index;
- } else {
- et_cmd->transceiver = XCVR_INTERNAL;
- et_cmd->supported = SUPPORTED_10000baseT_Full;
- et_cmd->advertising = ADVERTISED_10000baseT_Full;
- et_cmd->phy_address = mac_index;
- }
- ethtool_cmd_speed_set(et_cmd, SPEED_10000);
- et_cmd->autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-
-/**
- * nes_netdev_set_settings
- */
-static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
- if ((nesadapter->OneG_Mode) &&
- (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
- unsigned long flags;
- u16 phy_data;
- u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- if (et_cmd->autoneg) {
- /* Turn on Full duplex, Autoneg, and restart autonegotiation */
- phy_data |= 0x1300;
- } else {
- /* Turn off autoneg */
- phy_data &= ~0x1000;
- }
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- }
-
- return 0;
-}
-
-
-static const struct ethtool_ops nes_ethtool_ops = {
- .get_link = ethtool_op_get_link,
- .get_settings = nes_netdev_get_settings,
- .set_settings = nes_netdev_set_settings,
- .get_strings = nes_netdev_get_strings,
- .get_sset_count = nes_netdev_get_sset_count,
- .get_ethtool_stats = nes_netdev_get_ethtool_stats,
- .get_drvinfo = nes_netdev_get_drvinfo,
- .get_coalesce = nes_netdev_get_coalesce,
- .set_coalesce = nes_netdev_set_coalesce,
- .get_pauseparam = nes_netdev_get_pauseparam,
- .set_pauseparam = nes_netdev_set_pauseparam,
-};
-
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
-{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 u32temp;
- unsigned long flags;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
-
- nes_debug(NES_DBG_NETDEV, "%s: %s\n", __func__, netdev->name);
-
- /* Enable/Disable VLAN Stripping */
- u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- u32temp &= 0xfdffffff;
- else
- u32temp |= 0x02000000;
-
- nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
-}
-
-static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
-{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
- */
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- features |= NETIF_F_HW_VLAN_CTAG_TX;
- else
- features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-
- return features;
-}
-
-static int nes_set_features(struct net_device *netdev, netdev_features_t features)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- u32 changed = netdev->features ^ features;
-
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
- nes_vlan_mode(netdev, nesdev, features);
-
- return 0;
-}
-
-static const struct net_device_ops nes_netdev_ops = {
- .ndo_open = nes_netdev_open,
- .ndo_stop = nes_netdev_stop,
- .ndo_start_xmit = nes_netdev_start_xmit,
- .ndo_get_stats = nes_netdev_get_stats,
- .ndo_tx_timeout = nes_netdev_tx_timeout,
- .ndo_set_mac_address = nes_netdev_set_mac_address,
- .ndo_set_rx_mode = nes_netdev_set_multicast_list,
- .ndo_change_mtu = nes_netdev_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_fix_features = nes_fix_features,
- .ndo_set_features = nes_set_features,
-};
-
-/**
- * nes_netdev_init - initialize network device
- */
-struct net_device *nes_netdev_init(struct nes_device *nesdev,
- void __iomem *mmio_addr)
-{
- u64 u64temp;
- struct nes_vnic *nesvnic;
- struct net_device *netdev;
- struct nic_qp_map *curr_qp_map;
- u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
-
- netdev = alloc_etherdev(sizeof(struct nes_vnic));
- if (!netdev) {
- printk(KERN_ERR PFX "nesvnic etherdev alloc failed");
- return NULL;
- }
- nesvnic = netdev_priv(netdev);
-
- nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name);
-
- SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev);
-
- netdev->watchdog_timeo = NES_TX_TIMEOUT;
- netdev->irq = nesdev->pcidev->irq;
- netdev->mtu = ETH_DATA_LEN;
- netdev->hard_header_len = ETH_HLEN;
- netdev->addr_len = ETH_ALEN;
- netdev->type = ARPHRD_ETHER;
- netdev->netdev_ops = &nes_netdev_ops;
- netdev->ethtool_ops = &nes_ethtool_ops;
- netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
- nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
-
- /* Fill in the port structure */
- nesvnic->netdev = netdev;
- nesvnic->nesdev = nesdev;
- nesvnic->msg_enable = netif_msg_init(debug, default_msg);
- nesvnic->netdev_index = nesdev->netdev_count;
- nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
- nesvnic->max_frame_size = netdev->mtu + netdev->hard_header_len + VLAN_HLEN;
-
- curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
- nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
- nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index;
- nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port;
-
- /* Setup the burned in MAC address */
- u64temp = (u64)nesdev->nesadapter->mac_addr_low;
- u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32;
- u64temp += nesvnic->nic_index;
- netdev->dev_addr[0] = (u8)(u64temp>>40);
- netdev->dev_addr[1] = (u8)(u64temp>>32);
- netdev->dev_addr[2] = (u8)(u64temp>>24);
- netdev->dev_addr[3] = (u8)(u64temp>>16);
- netdev->dev_addr[4] = (u8)(u64temp>>8);
- netdev->dev_addr[5] = (u8)u64temp;
-
- netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
- if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
- netdev->hw_features |= NETIF_F_TSO;
-
- netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
- netdev->hw_features |= NETIF_F_LRO;
-
- nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
- " nic_index = %d, logical_port = %d, mac_index = %d.\n",
- nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
- nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
-
- if (nesvnic->nesdev->nesadapter->port_count == 1 &&
- nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) {
-
- nesvnic->qp_nic_index[0] = nesvnic->nic_index;
- nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
- if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
- nesvnic->qp_nic_index[2] = 0xf;
- nesvnic->qp_nic_index[3] = 0xf;
- } else {
- nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2;
- nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
- }
- } else {
- if (nesvnic->nesdev->nesadapter->port_count == 2 ||
- (nesvnic->nesdev->nesadapter->port_count == 1 &&
- nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) {
- nesvnic->qp_nic_index[0] = nesvnic->nic_index;
- nesvnic->qp_nic_index[1] = nesvnic->nic_index
- + 2;
- nesvnic->qp_nic_index[2] = 0xf;
- nesvnic->qp_nic_index[3] = 0xf;
- } else {
- nesvnic->qp_nic_index[0] = nesvnic->nic_index;
- nesvnic->qp_nic_index[1] = 0xf;
- nesvnic->qp_nic_index[2] = 0xf;
- nesvnic->qp_nic_index[3] = 0xf;
- }
- }
- nesvnic->next_qp_nic_index = 0;
-
- if (nesdev->netdev_count == 0) {
- nesvnic->rdma_enabled = 1;
- } else {
- nesvnic->rdma_enabled = 0;
- }
- nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
- init_timer(&nesvnic->event_timer);
- nesvnic->event_timer.function = NULL;
- spin_lock_init(&nesvnic->tx_lock);
- spin_lock_init(&nesvnic->port_ibevent_lock);
- nesdev->netdev[nesdev->netdev_count] = netdev;
-
- nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
- nesvnic, nesdev->mac_index);
- list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
-
- if ((nesdev->netdev_count == 0) &&
- ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
- ((phy_type == NES_PHY_TYPE_PUMA_1G) &&
- (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
- ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
- u32 u32temp;
- u32 link_mask = 0;
- u32 link_val = 0;
- u16 temp_phy_data;
- u16 phy_data = 0;
- unsigned long flags;
-
- u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
- (0x200 * (nesdev->mac_index & 1)));
- if (phy_type != NES_PHY_TYPE_PUMA_1G) {
- u32temp |= 0x00200000;
- nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
- (0x200 * (nesdev->mac_index & 1)), u32temp);
- }
-
- /* Check and set linkup here. This is for back to back */
- /* configuration where second port won't get link interrupt */
- switch (phy_type) {
- case NES_PHY_TYPE_PUMA_1G:
- if (nesdev->mac_index < 2) {
- link_mask = 0x01010000;
- link_val = 0x01010000;
- } else {
- link_mask = 0x02020000;
- link_val = 0x02020000;
- }
- break;
- case NES_PHY_TYPE_SFP_D:
- spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
- nes_read_10G_phy_reg(nesdev,
- nesdev->nesadapter->phy_index[nesdev->mac_index],
- 1, 0x9003);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- nes_read_10G_phy_reg(nesdev,
- nesdev->nesadapter->phy_index[nesdev->mac_index],
- 3, 0x0021);
- nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- nes_read_10G_phy_reg(nesdev,
- nesdev->nesadapter->phy_index[nesdev->mac_index],
- 3, 0x0021);
- phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
- phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
- break;
- default:
- link_mask = 0x0f1f0000;
- link_val = 0x0f0f0000;
- break;
- }
-
- u32temp = nes_read_indexed(nesdev,
- NES_IDX_PHY_PCS_CONTROL_STATUS0 +
- (0x200 * (nesdev->mac_index & 1)));
-
- if (phy_type == NES_PHY_TYPE_SFP_D) {
- if (phy_data & 0x0004)
- nesvnic->linkup = 1;
- } else {
- if ((u32temp & link_mask) == link_val)
- nesvnic->linkup = 1;
- }
-
- /* clear the MAC interrupt status, assumes direct logical to physical mapping */
- u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
- nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
-
- nes_init_phy(nesdev);
- }
-
- nes_vlan_mode(netdev, nesdev, netdev->features);
-
- return netdev;
-}
-
-
-/**
- * nes_netdev_destroy - destroy network device structure
- */
-void nes_netdev_destroy(struct net_device *netdev)
-{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
-
- /* make sure 'stop' method is called by Linux stack */
- /* nes_netdev_stop(netdev); */
-
- list_del(&nesvnic->list);
-
- if (nesvnic->of_device_registered) {
- nes_destroy_ofa_device(nesvnic->nesibdev);
- }
-
- free_netdev(netdev);
-}
-
-
-/**
- * nes_nic_cm_xmit -- CM calls this to send out pkts
- */
-int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- int ret;
-
- skb->dev = netdev;
- ret = dev_queue_xmit(skb);
- if (ret) {
- nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret);
- }
-
- return ret;
-}
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
deleted file mode 100644
index 529c421bb15c..000000000000
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef NES_USER_H
-#define NES_USER_H
-
-#include <linux/types.h>
-
-#define NES_ABI_USERSPACE_VER 2
-#define NES_ABI_KERNEL_VER 2
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct nes_alloc_ucontext_req {
- __u32 reserved32;
- __u8 userspace_ver;
- __u8 reserved8[3];
-};
-
-struct nes_alloc_ucontext_resp {
- __u32 max_pds; /* maximum pds allowed for this user process */
- __u32 max_qps; /* maximum qps allowed for this user process */
- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
- __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
- __u8 kernel_ver;
- __u8 reserved[2];
-};
-
-struct nes_alloc_pd_resp {
- __u32 pd_id;
- __u32 mmap_db_index;
-};
-
-struct nes_create_cq_req {
- __u64 user_cq_buffer;
- __u32 mcrqf;
- __u8 reserved[4];
-};
-
-struct nes_create_qp_req {
- __u64 user_wqe_buffers;
- __u64 user_qp_buffer;
-};
-
-enum iwnes_memreg_type {
- IWNES_MEMREG_TYPE_MEM = 0x0000,
- IWNES_MEMREG_TYPE_QP = 0x0001,
- IWNES_MEMREG_TYPE_CQ = 0x0002,
- IWNES_MEMREG_TYPE_MW = 0x0003,
- IWNES_MEMREG_TYPE_FMR = 0x0004,
- IWNES_MEMREG_TYPE_FMEM = 0x0005,
-};
-
-struct nes_mem_reg_req {
- __u32 reg_type; /* indicates if id is memory, QP or CQ */
- __u32 reserved;
-};
-
-struct nes_create_cq_resp {
- __u32 cq_id;
- __u32 cq_size;
- __u32 mmap_db_index;
- __u32 reserved;
-};
-
-struct nes_create_qp_resp {
- __u32 qp_id;
- __u32 actual_sq_size;
- __u32 actual_rq_size;
- __u32 mmap_sq_db_index;
- __u32 mmap_rq_db_index;
- __u32 nes_drv_opt;
-};
-
-#endif /* NES_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
deleted file mode 100644
index 2042c0f29759..000000000000
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ /dev/null
@@ -1,972 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/slab.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include "nes.h"
-
-static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
-
-u32 mh_detected;
-u32 mh_pauses_sent;
-
-static u32 nes_set_pau(struct nes_device *nesdev)
-{
- u32 ret = 0;
- u32 counter;
-
- nes_write_indexed(nesdev, NES_IDX_GPR2, NES_ENABLE_PAU);
- nes_write_indexed(nesdev, NES_IDX_GPR_TRIGGER, 1);
-
- for (counter = 0; counter < NES_PAU_COUNTER; counter++) {
- udelay(30);
- if (!nes_read_indexed(nesdev, NES_IDX_GPR2)) {
- printk(KERN_INFO PFX "PAU is supported.\n");
- break;
- }
- nes_write_indexed(nesdev, NES_IDX_GPR_TRIGGER, 1);
- }
- if (counter == NES_PAU_COUNTER) {
- printk(KERN_INFO PFX "PAU is not supported.\n");
- return -EPERM;
- }
- return ret;
-}
-
-/**
- * nes_read_eeprom_values -
- */
-int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesadapter)
-{
- u32 mac_addr_low;
- u16 mac_addr_high;
- u16 eeprom_data;
- u16 eeprom_offset;
- u16 next_section_address;
- u16 sw_section_ver;
- u8 major_ver = 0;
- u8 minor_ver = 0;
-
- /* TODO: deal with EEPROM endian issues */
- if (nesadapter->firmware_eeprom_offset == 0) {
- /* Read the EEPROM Parameters */
- eeprom_data = nes_read16_eeprom(nesdev->regs, 0);
- nes_debug(NES_DBG_HW, "EEPROM Offset 0 = 0x%04X\n", eeprom_data);
- eeprom_offset = 2 + (((eeprom_data & 0x007f) << 3) <<
- ((eeprom_data & 0x0080) >> 7));
- nes_debug(NES_DBG_HW, "Firmware Offset = 0x%04X\n", eeprom_offset);
- nesadapter->firmware_eeprom_offset = eeprom_offset;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
- if (eeprom_data != 0x5746) {
- nes_debug(NES_DBG_HW, "Not a valid Firmware Image = 0x%04X\n", eeprom_data);
- return -1;
- }
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
- nes_debug(NES_DBG_HW, "EEPROM Offset %u = 0x%04X\n",
- eeprom_offset + 2, eeprom_data);
- eeprom_offset += ((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8);
- nes_debug(NES_DBG_HW, "Software Offset = 0x%04X\n", eeprom_offset);
- nesadapter->software_eeprom_offset = eeprom_offset;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
- if (eeprom_data != 0x5753) {
- printk("Not a valid Software Image = 0x%04X\n", eeprom_data);
- return -1;
- }
- sw_section_ver = nes_read16_eeprom(nesdev->regs, nesadapter->software_eeprom_offset + 6);
- nes_debug(NES_DBG_HW, "Software section version number = 0x%04X\n",
- sw_section_ver);
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
- nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
- eeprom_offset + 2, eeprom_data);
- next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
- ((eeprom_data & 0x0100) >> 8));
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
- if (eeprom_data != 0x414d) {
- nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
- eeprom_data);
- goto no_fw_rev;
- }
- eeprom_offset = next_section_address;
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
- nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
- eeprom_offset + 2, eeprom_data);
- next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
- ((eeprom_data & 0x0100) >> 8));
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
- if (eeprom_data != 0x4f52) {
- nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x4f52 but was 0x%04X\n",
- eeprom_data);
- goto no_fw_rev;
- }
- eeprom_offset = next_section_address;
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
- nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
- eeprom_offset + 2, eeprom_data);
- next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
- if (eeprom_data != 0x5746) {
- nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5746 but was 0x%04X\n",
- eeprom_data);
- goto no_fw_rev;
- }
- eeprom_offset = next_section_address;
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
- nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
- eeprom_offset + 2, eeprom_data);
- next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
- if (eeprom_data != 0x5753) {
- nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5753 but was 0x%04X\n",
- eeprom_data);
- goto no_fw_rev;
- }
- eeprom_offset = next_section_address;
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
- nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
- eeprom_offset + 2, eeprom_data);
- next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
- if (eeprom_data != 0x414d) {
- nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
- eeprom_data);
- goto no_fw_rev;
- }
- eeprom_offset = next_section_address;
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
- nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
- eeprom_offset + 2, eeprom_data);
- next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
- if (eeprom_data != 0x464e) {
- nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x464e but was 0x%04X\n",
- eeprom_data);
- goto no_fw_rev;
- }
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 8);
- printk(PFX "Firmware version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
- major_ver = (u8)(eeprom_data >> 8);
- minor_ver = (u8)(eeprom_data);
-
- if (nes_drv_opt & NES_DRV_OPT_DISABLE_VIRT_WQ) {
- nes_debug(NES_DBG_HW, "Virtual WQs have been disabled\n");
- } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
- nesadapter->virtwq = 1;
- }
- if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
- nesadapter->send_term_ok = 1;
-
- if (nes_drv_opt & NES_DRV_OPT_ENABLE_PAU) {
- if (!nes_set_pau(nesdev))
- nesadapter->allow_unaligned_fpdus = 1;
- }
-
- nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
- (u32)((u8)eeprom_data);
-
- eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 10);
- printk(PFX "EEPROM version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
- nesadapter->eeprom_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
- (u32)((u8)eeprom_data);
-
-no_fw_rev:
- /* eeprom is valid */
- eeprom_offset = nesadapter->software_eeprom_offset;
- eeprom_offset += 8;
- nesadapter->netdev_max = (u8)nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- mac_addr_high = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- mac_addr_low = (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- mac_addr_low <<= 16;
- mac_addr_low += (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "Base MAC Address = 0x%04X%08X\n",
- mac_addr_high, mac_addr_low);
- nes_debug(NES_DBG_HW, "MAC Address count = %u\n", nesadapter->netdev_max);
-
- nesadapter->mac_addr_low = mac_addr_low;
- nesadapter->mac_addr_high = mac_addr_high;
-
- /* Read the Phy Type array */
- eeprom_offset += 10;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->phy_type[0] = (u8)(eeprom_data >> 8);
- nesadapter->phy_type[1] = (u8)eeprom_data;
-
- /* Read the port array */
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->phy_type[2] = (u8)(eeprom_data >> 8);
- nesadapter->phy_type[3] = (u8)eeprom_data;
- /* port_count is set by soft reset reg */
- nes_debug(NES_DBG_HW, "port_count = %u, port 0 -> %u, port 1 -> %u,"
- " port 2 -> %u, port 3 -> %u\n",
- nesadapter->port_count,
- nesadapter->phy_type[0], nesadapter->phy_type[1],
- nesadapter->phy_type[2], nesadapter->phy_type[3]);
-
- /* Read PD config array */
- eeprom_offset += 10;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_size[0] = eeprom_data;
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_base[0] = eeprom_data;
- nes_debug(NES_DBG_HW, "PD0 config, size=0x%04x, base=0x%04x\n",
- nesadapter->pd_config_size[0], nesadapter->pd_config_base[0]);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_size[1] = eeprom_data;
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_base[1] = eeprom_data;
- nes_debug(NES_DBG_HW, "PD1 config, size=0x%04x, base=0x%04x\n",
- nesadapter->pd_config_size[1], nesadapter->pd_config_base[1]);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_size[2] = eeprom_data;
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_base[2] = eeprom_data;
- nes_debug(NES_DBG_HW, "PD2 config, size=0x%04x, base=0x%04x\n",
- nesadapter->pd_config_size[2], nesadapter->pd_config_base[2]);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_size[3] = eeprom_data;
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->pd_config_base[3] = eeprom_data;
- nes_debug(NES_DBG_HW, "PD3 config, size=0x%04x, base=0x%04x\n",
- nesadapter->pd_config_size[3], nesadapter->pd_config_base[3]);
-
- /* Read Rx Pool Size */
- eeprom_offset += 22; /* 46 */
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->rx_pool_size = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "rx_pool_size = 0x%08X\n", nesadapter->rx_pool_size);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->tx_pool_size = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "tx_pool_size = 0x%08X\n", nesadapter->tx_pool_size);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->rx_threshold = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "rx_threshold = 0x%08X\n", nesadapter->rx_threshold);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->tcp_timer_core_clk_divisor = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "tcp_timer_core_clk_divisor = 0x%08X\n",
- nesadapter->tcp_timer_core_clk_divisor);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->iwarp_config = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "iwarp_config = 0x%08X\n", nesadapter->iwarp_config);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->cm_config = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "cm_config = 0x%08X\n", nesadapter->cm_config);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->sws_timer_config = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "sws_timer_config = 0x%08X\n", nesadapter->sws_timer_config);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->tcp_config1 = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "tcp_config1 = 0x%08X\n", nesadapter->tcp_config1);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->wqm_wat = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "wqm_wat = 0x%08X\n", nesadapter->wqm_wat);
-
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- eeprom_offset += 2;
- nesadapter->core_clock = (((u32)eeprom_data) << 16) +
- nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nes_debug(NES_DBG_HW, "core_clock = 0x%08X\n", nesadapter->core_clock);
-
- if ((sw_section_ver) && (nesadapter->hw_rev != NE020_REV)) {
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->phy_index[0] = (eeprom_data & 0xff00)>>8;
- nesadapter->phy_index[1] = eeprom_data & 0x00ff;
- eeprom_offset += 2;
- eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
- nesadapter->phy_index[2] = (eeprom_data & 0xff00)>>8;
- nesadapter->phy_index[3] = eeprom_data & 0x00ff;
- } else {
- nesadapter->phy_index[0] = 4;
- nesadapter->phy_index[1] = 5;
- nesadapter->phy_index[2] = 6;
- nesadapter->phy_index[3] = 7;
- }
- nes_debug(NES_DBG_HW, "Phy address map = 0 > %u, 1 > %u, 2 > %u, 3 > %u\n",
- nesadapter->phy_index[0],nesadapter->phy_index[1],
- nesadapter->phy_index[2],nesadapter->phy_index[3]);
- }
-
- return 0;
-}
-
-
-/**
- * nes_read16_eeprom
- */
-static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
-{
- writel(NES_EEPROM_READ_REQUEST + (offset >> 1),
- (void __iomem *)addr + NES_EEPROM_COMMAND);
-
- do {
- } while (readl((void __iomem *)addr + NES_EEPROM_COMMAND) &
- NES_EEPROM_READ_REQUEST);
-
- return readw((void __iomem *)addr + NES_EEPROM_DATA);
-}
-
-
-/**
- * nes_write_1G_phy_reg
- */
-void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
-{
- u32 u32temp;
- u32 counter;
-
- nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
- 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
- for (counter = 0; counter < 100 ; counter++) {
- udelay(30);
- u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
- if (u32temp & 1) {
- /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
- break;
- }
- }
- if (!(u32temp & 1))
- nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
- u32temp);
-}
-
-
-/**
- * nes_read_1G_phy_reg
- * This routine only issues the read, the data must be read
- * separately.
- */
-void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
-{
- u32 u32temp;
- u32 counter;
-
- /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
- phy_addr, nesdev->mac_index); */
-
- nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
- 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
- for (counter = 0; counter < 100 ; counter++) {
- udelay(30);
- u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
- if (u32temp & 1) {
- /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
- break;
- }
- }
- if (!(u32temp & 1)) {
- nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
- u32temp);
- *data = 0xffff;
- } else {
- *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- }
-}
-
-
-/**
- * nes_write_10G_phy_reg
- */
-void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_addr, u8 dev_addr, u16 phy_reg,
- u16 data)
-{
- u32 port_addr;
- u32 u32temp;
- u32 counter;
-
- port_addr = phy_addr;
-
- /* set address */
- nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
- 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
- for (counter = 0; counter < 100 ; counter++) {
- udelay(30);
- u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
- if (u32temp & 1) {
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
- break;
- }
- }
- if (!(u32temp & 1))
- nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
- u32temp);
-
- /* set data */
- nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
- 0x10020000 | (u32)data | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
- for (counter = 0; counter < 100 ; counter++) {
- udelay(30);
- u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
- if (u32temp & 1) {
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
- break;
- }
- }
- if (!(u32temp & 1))
- nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
- u32temp);
-}
-
-
-/**
- * nes_read_10G_phy_reg
- * This routine only issues the read, the data must be read
- * separately.
- */
-void nes_read_10G_phy_reg(struct nes_device *nesdev, u8 phy_addr, u8 dev_addr, u16 phy_reg)
-{
- u32 port_addr;
- u32 u32temp;
- u32 counter;
-
- port_addr = phy_addr;
-
- /* set address */
- nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
- 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
- for (counter = 0; counter < 100 ; counter++) {
- udelay(30);
- u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
- if (u32temp & 1) {
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
- break;
- }
- }
- if (!(u32temp & 1))
- nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
- u32temp);
-
- /* issue read */
- nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
- 0x30020000 | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
- for (counter = 0; counter < 100 ; counter++) {
- udelay(30);
- u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
- if (u32temp & 1) {
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
- break;
- }
- }
- if (!(u32temp & 1))
- nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
- u32temp);
-}
-
-
-/**
- * nes_get_cqp_request
- */
-struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
-{
- unsigned long flags;
- struct nes_cqp_request *cqp_request = NULL;
-
- if (!list_empty(&nesdev->cqp_avail_reqs)) {
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- if (!list_empty(&nesdev->cqp_avail_reqs)) {
- cqp_request = list_entry(nesdev->cqp_avail_reqs.next,
- struct nes_cqp_request, list);
- list_del_init(&cqp_request->list);
- }
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- }
- if (cqp_request == NULL) {
- cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
- if (cqp_request) {
- cqp_request->dynamic = 1;
- INIT_LIST_HEAD(&cqp_request->list);
- }
- }
-
- if (cqp_request) {
- init_waitqueue_head(&cqp_request->waitq);
- cqp_request->waiting = 0;
- cqp_request->request_done = 0;
- cqp_request->callback = 0;
- init_waitqueue_head(&cqp_request->waitq);
- nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n",
- cqp_request);
- } else
- printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
- __func__);
-
- return cqp_request;
-}
-
-void nes_free_cqp_request(struct nes_device *nesdev,
- struct nes_cqp_request *cqp_request)
-{
- unsigned long flags;
-
- nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
- cqp_request,
- le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX]) & 0x3f);
-
- if (cqp_request->dynamic) {
- kfree(cqp_request);
- } else {
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- }
-}
-
-void nes_put_cqp_request(struct nes_device *nesdev,
- struct nes_cqp_request *cqp_request)
-{
- if (atomic_dec_and_test(&cqp_request->refcount))
- nes_free_cqp_request(nesdev, cqp_request);
-}
-
-
-/**
- * nes_post_cqp_request
- */
-void nes_post_cqp_request(struct nes_device *nesdev,
- struct nes_cqp_request *cqp_request)
-{
- struct nes_hw_cqp_wqe *cqp_wqe;
- unsigned long flags;
- u32 cqp_head;
- u64 u64temp;
- u32 opcode;
- int ctx_index = NES_CQP_WQE_COMP_CTX_LOW_IDX;
-
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
-
- if (((((nesdev->cqp.sq_tail+(nesdev->cqp.sq_size*2))-nesdev->cqp.sq_head) &
- (nesdev->cqp.sq_size - 1)) != 1)
- && (list_empty(&nesdev->cqp_pending_reqs))) {
- cqp_head = nesdev->cqp.sq_head++;
- nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
- cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
- memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
- opcode = le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]);
- if ((opcode & NES_CQP_OPCODE_MASK) == NES_CQP_DOWNLOAD_SEGMENT)
- ctx_index = NES_CQP_WQE_DL_COMP_CTX_LOW_IDX;
- barrier();
- u64temp = (unsigned long)cqp_request;
- set_wqe_64bit_value(cqp_wqe->wqe_words, ctx_index, u64temp);
- nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ,"
- " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u,"
- " waiting = %d, refcount = %d.\n",
- opcode & NES_CQP_OPCODE_MASK,
- le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request,
- nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
- cqp_request->waiting, atomic_read(&cqp_request->refcount));
-
- barrier();
-
- /* Ring doorbell (1 WQEs) */
- nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
-
- barrier();
- } else {
- nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X), line 1 = 0x%08X"
- " put on the pending queue.\n",
- cqp_request,
- le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
- le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_ID_IDX]));
- list_add_tail(&cqp_request->list, &nesdev->cqp_pending_reqs);
- }
-
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-
- return;
-}
-
-/**
- * nes_arp_table
- */
-int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 action)
-{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- int arp_index;
- int err = 0;
- __be32 tmp_addr;
-
- for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) {
- if (nesadapter->arp_table[arp_index].ip_addr == ip_addr)
- break;
- }
-
- if (action == NES_ARP_ADD) {
- if (arp_index != nesadapter->arp_table_size) {
- return -1;
- }
-
- arp_index = 0;
- err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps,
- nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index, NES_RESOURCE_ARP);
- if (err) {
- nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err);
- return err;
- }
- nes_debug(NES_DBG_NETDEV, "ADD, arp_index=%d\n", arp_index);
-
- nesadapter->arp_table[arp_index].ip_addr = ip_addr;
- memcpy(nesadapter->arp_table[arp_index].mac_addr, mac_addr, ETH_ALEN);
- return arp_index;
- }
-
- /* DELETE or RESOLVE */
- if (arp_index == nesadapter->arp_table_size) {
- tmp_addr = cpu_to_be32(ip_addr);
- nes_debug(NES_DBG_NETDEV, "MAC for %pI4 not in ARP table - cannot %s\n",
- &tmp_addr, action == NES_ARP_RESOLVE ? "resolve" : "delete");
- return -1;
- }
-
- if (action == NES_ARP_RESOLVE) {
- nes_debug(NES_DBG_NETDEV, "RESOLVE, arp_index=%d\n", arp_index);
- return arp_index;
- }
-
- if (action == NES_ARP_DELETE) {
- nes_debug(NES_DBG_NETDEV, "DELETE, arp_index=%d\n", arp_index);
- nesadapter->arp_table[arp_index].ip_addr = 0;
- memset(nesadapter->arp_table[arp_index].mac_addr, 0x00, ETH_ALEN);
- nes_free_resource(nesadapter, nesadapter->allocated_arps, arp_index);
- return arp_index;
- }
-
- return -1;
-}
-
-
-/**
- * nes_mh_fix
- */
-void nes_mh_fix(unsigned long parm)
-{
- unsigned long flags;
- struct nes_device *nesdev = (struct nes_device *)parm;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_vnic *nesvnic;
- u32 used_chunks_tx;
- u32 temp_used_chunks_tx;
- u32 temp_last_used_chunks_tx;
- u32 used_chunks_mask;
- u32 mac_tx_frames_low;
- u32 mac_tx_frames_high;
- u32 mac_tx_pauses;
- u32 serdes_status;
- u32 reset_value;
- u32 tx_control;
- u32 tx_config;
- u32 tx_pause_quanta;
- u32 rx_control;
- u32 rx_config;
- u32 mac_exact_match;
- u32 mpp_debug;
- u32 i=0;
- u32 chunks_tx_progress = 0;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
- if ((nesadapter->mac_sw_state[0] != NES_MAC_SW_IDLE) || (nesadapter->mac_link_down[0])) {
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- goto no_mh_work;
- }
- nesadapter->mac_sw_state[0] = NES_MAC_SW_MH;
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- do {
- mac_tx_frames_low = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_LOW);
- mac_tx_frames_high = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_HIGH);
- mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
- used_chunks_tx = nes_read_indexed(nesdev, NES_IDX_USED_CHUNKS_TX);
- nesdev->mac_pause_frames_sent += mac_tx_pauses;
- used_chunks_mask = 0;
- temp_used_chunks_tx = used_chunks_tx;
- temp_last_used_chunks_tx = nesdev->last_used_chunks_tx;
-
- if (nesdev->netdev[0]) {
- nesvnic = netdev_priv(nesdev->netdev[0]);
- } else {
- break;
- }
-
- for (i=0; i<4; i++) {
- used_chunks_mask <<= 8;
- if (nesvnic->qp_nic_index[i] != 0xff) {
- used_chunks_mask |= 0xff;
- if ((temp_used_chunks_tx&0xff)<(temp_last_used_chunks_tx&0xff)) {
- chunks_tx_progress = 1;
- }
- }
- temp_used_chunks_tx >>= 8;
- temp_last_used_chunks_tx >>= 8;
- }
- if ((mac_tx_frames_low) || (mac_tx_frames_high) ||
- (!(used_chunks_tx&used_chunks_mask)) ||
- (!(nesdev->last_used_chunks_tx&used_chunks_mask)) ||
- (chunks_tx_progress) ) {
- nesdev->last_used_chunks_tx = used_chunks_tx;
- break;
- }
- nesdev->last_used_chunks_tx = used_chunks_tx;
- barrier();
-
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000005);
- mh_pauses_sent++;
- mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
- if (mac_tx_pauses) {
- nesdev->mac_pause_frames_sent += mac_tx_pauses;
- break;
- }
-
- tx_control = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONTROL);
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_pause_quanta = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA);
- rx_control = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONTROL);
- rx_config = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONFIG);
- mac_exact_match = nes_read_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM);
- mpp_debug = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG);
-
- /* one last ditch effort to avoid a false positive */
- mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
- if (mac_tx_pauses) {
- nesdev->last_mac_tx_pauses = nesdev->mac_pause_frames_sent;
- nes_debug(NES_DBG_HW, "failsafe caught slow outbound pause\n");
- break;
- }
- mh_detected++;
-
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, 0x00000000);
- reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
-
- nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value | 0x0000001d);
-
- while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
- & 0x00000040) != 0x00000040) && (i++ < 5000)) {
- /* mdelay(1); */
- }
-
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
- serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
-
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
- if (nesadapter->OneG_Mode) {
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
- } else {
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
- }
- serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
-
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control);
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA, tx_pause_quanta);
- nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONTROL, rx_control);
- nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONFIG, rx_config);
- nes_write_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM, mac_exact_match);
- nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG, mpp_debug);
-
- } while (0);
-
- nesadapter->mac_sw_state[0] = NES_MAC_SW_IDLE;
-no_mh_work:
- nesdev->nesadapter->mh_timer.expires = jiffies + (HZ/5);
- add_timer(&nesdev->nesadapter->mh_timer);
-}
-
-/**
- * nes_clc
- */
-void nes_clc(unsigned long parm)
-{
- unsigned long flags;
- struct nes_device *nesdev = (struct nes_device *)parm;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nesadapter->link_interrupt_count[0] = 0;
- nesadapter->link_interrupt_count[1] = 0;
- nesadapter->link_interrupt_count[2] = 0;
- nesadapter->link_interrupt_count[3] = 0;
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
-
- nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
- add_timer(&nesadapter->lc_timer);
-}
-
-
-/**
- * nes_dump_mem
- */
-void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
-{
- char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
- 'a', 'b', 'c', 'd', 'e', 'f'};
- char *ptr;
- char hex_buf[80];
- char ascii_buf[20];
- int num_char;
- int num_ascii;
- int num_hex;
-
- if (!(nes_debug_level & dump_debug_level)) {
- return;
- }
-
- ptr = addr;
- if (length > 0x100) {
- nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
- length = 0x100;
- }
- nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
-
- memset(ascii_buf, 0, 20);
- memset(hex_buf, 0, 80);
-
- num_ascii = 0;
- num_hex = 0;
- for (num_char = 0; num_char < length; num_char++) {
- if (num_ascii == 8) {
- ascii_buf[num_ascii++] = ' ';
- hex_buf[num_hex++] = '-';
- hex_buf[num_hex++] = ' ';
- }
-
- if (*ptr < 0x20 || *ptr > 0x7e)
- ascii_buf[num_ascii++] = '.';
- else
- ascii_buf[num_ascii++] = *ptr;
- hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
- hex_buf[num_hex++] = xlate[*ptr & 0x0f];
- hex_buf[num_hex++] = ' ';
- ptr++;
-
- if (num_ascii >= 17) {
- /* output line and reset */
- nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
- memset(ascii_buf, 0, 20);
- memset(hex_buf, 0, 80);
- num_ascii = 0;
- num_hex = 0;
- }
- }
-
- /* output the rest */
- if (num_ascii) {
- while (num_ascii < 17) {
- if (num_ascii == 8) {
- hex_buf[num_hex++] = ' ';
- hex_buf[num_hex++] = ' ';
- }
- hex_buf[num_hex++] = ' ';
- hex_buf[num_hex++] = ' ';
- hex_buf[num_hex++] = ' ';
- num_ascii++;
- }
-
- nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
- }
-}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
deleted file mode 100644
index 44cb513f9a87..000000000000
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ /dev/null
@@ -1,4090 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/random.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/iw_cm.h>
-#include <rdma/ib_user_verbs.h>
-
-#include "nes.h"
-
-#include <rdma/ib_umem.h>
-
-atomic_t mod_qp_timouts;
-atomic_t qps_created;
-atomic_t sw_qps_destroyed;
-
-static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
-
-/**
- * nes_alloc_mw
- */
-static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type)
-{
- struct nes_pd *nespd = to_nespd(ibpd);
- struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_cqp_request *cqp_request;
- struct nes_mr *nesmr;
- struct ib_mw *ibmw;
- struct nes_hw_cqp_wqe *cqp_wqe;
- int ret;
- u32 stag;
- u32 stag_index = 0;
- u32 next_stag_index = 0;
- u32 driver_key = 0;
- u8 stag_key = 0;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- get_random_bytes(&next_stag_index, sizeof(next_stag_index));
- stag_key = (u8)next_stag_index;
-
- driver_key = 0;
-
- next_stag_index >>= 8;
- next_stag_index %= nesadapter->max_mr;
-
- ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
- nesadapter->max_mr, &stag_index, &next_stag_index, NES_RESOURCE_MW);
- if (ret) {
- return ERR_PTR(ret);
- }
-
- nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
- if (!nesmr) {
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- return ERR_PTR(-ENOMEM);
- }
-
- stag = stag_index << 8;
- stag |= driver_key;
- stag += (u32)stag_key;
-
- nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
- stag, stag_index);
-
- /* Register the region with the adapter */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- kfree(nesmr);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- return ERR_PTR(-ENOMEM);
- }
-
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
- cpu_to_le32( NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_RIGHTS_REMOTE_READ |
- NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_VA_TO |
- NES_CQP_STAG_REM_ACC_EN);
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- stag, ret, cqp_request->major_code, cqp_request->minor_code);
- if ((!ret) || (cqp_request->major_code)) {
- nes_put_cqp_request(nesdev, cqp_request);
- kfree(nesmr);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- if (!ret) {
- return ERR_PTR(-ETIME);
- } else {
- return ERR_PTR(-ENOMEM);
- }
- }
- nes_put_cqp_request(nesdev, cqp_request);
-
- nesmr->ibmw.rkey = stag;
- nesmr->mode = IWNES_MEMREG_TYPE_MW;
- ibmw = &nesmr->ibmw;
- nesmr->pbl_4k = 0;
- nesmr->pbls_used = 0;
-
- return ibmw;
-}
-
-
-/**
- * nes_dealloc_mw
- */
-static int nes_dealloc_mw(struct ib_mw *ibmw)
-{
- struct nes_mr *nesmr = to_nesmw(ibmw);
- struct nes_vnic *nesvnic = to_nesvnic(ibmw->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- int err = 0;
- int ret;
-
- /* Deallocate the window with the adapter */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
- return -ENOMEM;
- }
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_DEALLOCATE_STAG);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ibmw->rkey);
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X to complete.\n",
- ibmw->rkey);
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_MR, "Deallocate STag completed, wait_event_timeout ret = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- ret, cqp_request->major_code, cqp_request->minor_code);
- if (!ret)
- err = -ETIME;
- else if (cqp_request->major_code)
- err = -EIO;
-
- nes_put_cqp_request(nesdev, cqp_request);
-
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- (ibmw->rkey & 0x0fffff00) >> 8);
- kfree(nesmr);
-
- return err;
-}
-
-
-/**
- * nes_bind_mw
- */
-static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
- struct ib_mw_bind *ibmw_bind)
-{
- u64 u64temp;
- struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- /* struct nes_mr *nesmr = to_nesmw(ibmw); */
- struct nes_qp *nesqp = to_nesqp(ibqp);
- struct nes_hw_qp_wqe *wqe;
- unsigned long flags = 0;
- u32 head;
- u32 wqe_misc = 0;
- u32 qsize;
-
- if (nesqp->ibqp_state > IB_QPS_RTS)
- return -EINVAL;
-
- spin_lock_irqsave(&nesqp->lock, flags);
-
- head = nesqp->hwqp.sq_head;
- qsize = nesqp->hwqp.sq_tail;
-
- /* Check for SQ overflow */
- if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
- spin_unlock_irqrestore(&nesqp->lock, flags);
- return -ENOMEM;
- }
-
- wqe = &nesqp->hwqp.sq_vbase[head];
- /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */
- nes_fill_init_qp_wqe(wqe, nesqp, head);
- u64temp = ibmw_bind->wr_id;
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp);
- wqe_misc = NES_IWARP_SQ_OP_BIND;
-
- wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
-
- if (ibmw_bind->send_flags & IB_SEND_SIGNALED)
- wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
-
- if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_WRITE)
- wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE;
- if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_READ)
- wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ;
-
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX,
- ibmw_bind->bind_info.mr->lkey);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX,
- ibmw_bind->bind_info.length);
- wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
- u64temp = (u64)ibmw_bind->bind_info.addr;
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);
-
- head++;
- if (head >= qsize)
- head = 0;
-
- nesqp->hwqp.sq_head = head;
- barrier();
-
- nes_write32(nesdev->regs+NES_WQE_ALLOC,
- (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
-
- spin_unlock_irqrestore(&nesqp->lock, flags);
-
- return 0;
-}
-
-
-/*
- * nes_alloc_fast_mr
- */
-static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
- u32 stag, u32 page_count)
-{
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- unsigned long flags;
- int ret;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 opcode = 0;
- u16 major_code;
- u64 region_length = page_count * PAGE_SIZE;
-
-
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
- return -ENOMEM;
- }
- nes_debug(NES_DBG_MR, "alloc_fast_reg_mr: page_count = %d, "
- "region_length = %llu\n",
- page_count, region_length);
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (nesadapter->free_4kpbl > 0) {
- nesadapter->free_4kpbl--;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- } else {
- /* No 4kpbl's available: */
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- nes_debug(NES_DBG_MR, "Out of Pbls\n");
- nes_free_cqp_request(nesdev, cqp_request);
- return -ENOMEM;
- }
-
- opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_MR |
- NES_CQP_STAG_PBL_BLK_SIZE | NES_CQP_STAG_VA_TO |
- NES_CQP_STAG_REM_ACC_EN;
- /*
- * The current OFED API does not support the zero based TO option.
- * If added then need to changed the NES_CQP_STAG_VA* option. Also,
- * the API does not support that ability to have the MR set for local
- * access only when created and not allow the SQ op to override. Given
- * this the remote enable must be set here.
- */
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, 1);
-
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
- cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
- cpu_to_le32(nespd->pd_id & 0x00007fff);
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, 0);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, 0);
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, 0);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (page_count * 8));
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
- barrier();
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq,
- (0 != cqp_request->request_done),
- NES_EVENT_TIMEOUT);
-
- nes_debug(NES_DBG_MR, "Allocate STag 0x%08X completed, "
- "wait_event_timeout ret = %u, CQP Major:Minor codes = "
- "0x%04X:0x%04X.\n", stag, ret, cqp_request->major_code,
- cqp_request->minor_code);
- major_code = cqp_request->major_code;
- nes_put_cqp_request(nesdev, cqp_request);
-
- if (!ret || major_code) {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- nesadapter->free_4kpbl++;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- }
-
- if (!ret)
- return -ETIME;
- else if (major_code)
- return -EIO;
- return 0;
-}
-
-/*
- * nes_alloc_mr
- */
-static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
- struct nes_pd *nespd = to_nespd(ibpd);
- struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
- u32 next_stag_index;
- u8 stag_key = 0;
- u32 driver_key = 0;
- int err = 0;
- u32 stag_index = 0;
- struct nes_mr *nesmr;
- u32 stag;
- int ret;
- struct ib_mr *ibmr;
-
- if (mr_type != IB_MR_TYPE_MEM_REG)
- return ERR_PTR(-EINVAL);
-
- if (max_num_sg > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
- return ERR_PTR(-E2BIG);
-
-/*
- * Note: Set to always use a fixed length single page entry PBL. This is to allow
- * for the fast_reg_mr operation to always know the size of the PBL.
- */
- if (max_num_sg > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
- return ERR_PTR(-E2BIG);
-
- get_random_bytes(&next_stag_index, sizeof(next_stag_index));
- stag_key = (u8)next_stag_index;
- next_stag_index >>= 8;
- next_stag_index %= nesadapter->max_mr;
-
- err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
- nesadapter->max_mr, &stag_index,
- &next_stag_index, NES_RESOURCE_FAST_MR);
- if (err)
- return ERR_PTR(err);
-
- nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
- if (!nesmr) {
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- return ERR_PTR(-ENOMEM);
- }
-
- stag = stag_index << 8;
- stag |= driver_key;
- stag += (u32)stag_key;
-
- nes_debug(NES_DBG_MR, "Allocating STag 0x%08X index = 0x%08X\n",
- stag, stag_index);
-
- ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_num_sg);
-
- if (ret == 0) {
- nesmr->ibmr.rkey = stag;
- nesmr->ibmr.lkey = stag;
- nesmr->mode = IWNES_MEMREG_TYPE_FMEM;
- ibmr = &nesmr->ibmr;
- } else {
- kfree(nesmr);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- ibmr = ERR_PTR(-ENOMEM);
- }
- return ibmr;
-}
-
-/*
- * nes_alloc_fast_reg_page_list
- */
-static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
- struct ib_device *ibdev,
- int page_list_len)
-{
- struct nes_vnic *nesvnic = to_nesvnic(ibdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct ib_fast_reg_page_list *pifrpl;
- struct nes_ib_fast_reg_page_list *pnesfrpl;
-
- if (page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
- return ERR_PTR(-E2BIG);
- /*
- * Allocate the ib_fast_reg_page_list structure, the
- * nes_fast_bpl structure, and the PLB table.
- */
- pnesfrpl = kmalloc(sizeof(struct nes_ib_fast_reg_page_list) +
- page_list_len * sizeof(u64), GFP_KERNEL);
-
- if (!pnesfrpl)
- return ERR_PTR(-ENOMEM);
-
- pifrpl = &pnesfrpl->ibfrpl;
- pifrpl->page_list = &pnesfrpl->pbl;
- pifrpl->max_page_list_len = page_list_len;
- /*
- * Allocate the WQE PBL
- */
- pnesfrpl->nes_wqe_pbl.kva = pci_alloc_consistent(nesdev->pcidev,
- page_list_len * sizeof(u64),
- &pnesfrpl->nes_wqe_pbl.paddr);
-
- if (!pnesfrpl->nes_wqe_pbl.kva) {
- kfree(pnesfrpl);
- return ERR_PTR(-ENOMEM);
- }
- nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
- "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
- "pbl.paddr = %llx\n", pnesfrpl, &pnesfrpl->ibfrpl,
- pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
- (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr);
-
- return pifrpl;
-}
-
-/*
- * nes_free_fast_reg_page_list
- */
-static void nes_free_fast_reg_page_list(struct ib_fast_reg_page_list *pifrpl)
-{
- struct nes_vnic *nesvnic = to_nesvnic(pifrpl->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_ib_fast_reg_page_list *pnesfrpl;
-
- pnesfrpl = container_of(pifrpl, struct nes_ib_fast_reg_page_list, ibfrpl);
- /*
- * Free the WQE PBL.
- */
- pci_free_consistent(nesdev->pcidev,
- pifrpl->max_page_list_len * sizeof(u64),
- pnesfrpl->nes_wqe_pbl.kva,
- pnesfrpl->nes_wqe_pbl.paddr);
- /*
- * Free the PBL structure
- */
- kfree(pnesfrpl);
-}
-
-/**
- * nes_query_device
- */
-static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- struct nes_vnic *nesvnic = to_nesvnic(ibdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_ib_device *nesibdev = nesvnic->nesibdev;
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- memset(props, 0, sizeof(*props));
- memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6);
-
- props->fw_ver = nesdev->nesadapter->firmware_version;
- props->device_cap_flags = nesdev->nesadapter->device_cap_flags;
- props->vendor_id = nesdev->nesadapter->vendor_id;
- props->vendor_part_id = nesdev->nesadapter->vendor_part_id;
- props->hw_ver = nesdev->nesadapter->hw_rev;
- props->max_mr_size = 0x80000000;
- props->max_qp = nesibdev->max_qp;
- props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
- props->max_sge = nesdev->nesadapter->max_sge;
- props->max_cq = nesibdev->max_cq;
- props->max_cqe = nesdev->nesadapter->max_cqe;
- props->max_mr = nesibdev->max_mr;
- props->max_mw = nesibdev->max_mr;
- props->max_pd = nesibdev->max_pd;
- props->max_sge_rd = 1;
- switch (nesdev->nesadapter->max_irrq_wr) {
- case 0:
- props->max_qp_rd_atom = 2;
- break;
- case 1:
- props->max_qp_rd_atom = 8;
- break;
- case 2:
- props->max_qp_rd_atom = 32;
- break;
- case 3:
- props->max_qp_rd_atom = 64;
- break;
- default:
- props->max_qp_rd_atom = 0;
- }
- props->max_qp_init_rd_atom = props->max_qp_rd_atom;
- props->atomic_cap = IB_ATOMIC_NONE;
- props->max_map_per_fmr = 1;
-
- return 0;
-}
-
-
-/**
- * nes_query_port
- */
-static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
-{
- struct nes_vnic *nesvnic = to_nesvnic(ibdev);
- struct net_device *netdev = nesvnic->netdev;
-
- memset(props, 0, sizeof(*props));
-
- props->max_mtu = IB_MTU_4096;
-
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
-
- props->lid = 1;
- props->lmc = 0;
- props->sm_lid = 0;
- props->sm_sl = 0;
- if (netif_queue_stopped(netdev))
- props->state = IB_PORT_DOWN;
- else if (nesvnic->linkup)
- props->state = IB_PORT_ACTIVE;
- else
- props->state = IB_PORT_DOWN;
- props->phys_state = 0;
- props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->qkey_viol_cntr = 0;
- props->active_width = IB_WIDTH_4X;
- props->active_speed = IB_SPEED_SDR;
- props->max_msg_sz = 0x80000000;
-
- return 0;
-}
-
-/**
- * nes_query_pkey
- */
-static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
-{
- *pkey = 0;
- return 0;
-}
-
-
-/**
- * nes_query_gid
- */
-static int nes_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct nes_vnic *nesvnic = to_nesvnic(ibdev);
-
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), nesvnic->netdev->dev_addr, 6);
-
- return 0;
-}
-
-
-/**
- * nes_alloc_ucontext - Allocate the user context data structure. This keeps track
- * of all objects associated with a particular user-mode client.
- */
-static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct nes_vnic *nesvnic = to_nesvnic(ibdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_alloc_ucontext_req req;
- struct nes_alloc_ucontext_resp uresp;
- struct nes_ucontext *nes_ucontext;
- struct nes_ib_device *nesibdev = nesvnic->nesibdev;
-
-
- if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) {
- printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (req.userspace_ver != NES_ABI_USERSPACE_VER) {
- printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
- req.userspace_ver, NES_ABI_USERSPACE_VER);
- return ERR_PTR(-EINVAL);
- }
-
-
- memset(&uresp, 0, sizeof uresp);
-
- uresp.max_qps = nesibdev->max_qp;
- uresp.max_pds = nesibdev->max_pd;
- uresp.wq_size = nesdev->nesadapter->max_qp_wr * 2;
- uresp.virtwq = nesadapter->virtwq;
- uresp.kernel_ver = NES_ABI_KERNEL_VER;
-
- nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL);
- if (!nes_ucontext)
- return ERR_PTR(-ENOMEM);
-
- nes_ucontext->nesdev = nesdev;
- nes_ucontext->mmap_wq_offset = uresp.max_pds;
- nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
- ((sizeof(struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) /
- PAGE_SIZE;
-
-
- if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
- kfree(nes_ucontext);
- return ERR_PTR(-EFAULT);
- }
-
- INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list);
- INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list);
- atomic_set(&nes_ucontext->usecnt, 1);
- return &nes_ucontext->ibucontext;
-}
-
-
-/**
- * nes_dealloc_ucontext
- */
-static int nes_dealloc_ucontext(struct ib_ucontext *context)
-{
- /* struct nes_vnic *nesvnic = to_nesvnic(context->device); */
- /* struct nes_device *nesdev = nesvnic->nesdev; */
- struct nes_ucontext *nes_ucontext = to_nesucontext(context);
-
- if (!atomic_dec_and_test(&nes_ucontext->usecnt))
- return 0;
- kfree(nes_ucontext);
- return 0;
-}
-
-
-/**
- * nes_mmap
- */
-static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- unsigned long index;
- struct nes_vnic *nesvnic = to_nesvnic(context->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- /* struct nes_adapter *nesadapter = nesdev->nesadapter; */
- struct nes_ucontext *nes_ucontext;
- struct nes_qp *nesqp;
-
- nes_ucontext = to_nesucontext(context);
-
-
- if (vma->vm_pgoff >= nes_ucontext->mmap_wq_offset) {
- index = (vma->vm_pgoff - nes_ucontext->mmap_wq_offset) * PAGE_SIZE;
- index /= ((sizeof(struct nes_hw_qp_wqe) * nesdev->nesadapter->max_qp_wr * 2) +
- PAGE_SIZE-1) & (~(PAGE_SIZE-1));
- if (!test_bit(index, nes_ucontext->allocated_wqs)) {
- nes_debug(NES_DBG_MMAP, "wq %lu not allocated\n", index);
- return -EFAULT;
- }
- nesqp = nes_ucontext->mmap_nesqp[index];
- if (nesqp == NULL) {
- nes_debug(NES_DBG_MMAP, "wq %lu has a NULL QP base.\n", index);
- return -EFAULT;
- }
- if (remap_pfn_range(vma, vma->vm_start,
- virt_to_phys(nesqp->hwqp.sq_vbase) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot)) {
- nes_debug(NES_DBG_MMAP, "remap_pfn_range failed.\n");
- return -EAGAIN;
- }
- vma->vm_private_data = nesqp;
- return 0;
- } else {
- index = vma->vm_pgoff;
- if (!test_bit(index, nes_ucontext->allocated_doorbells))
- return -EFAULT;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (nesdev->doorbell_start +
- ((nes_ucontext->mmap_db_index[index] - nesdev->base_doorbell_index) * 4096))
- >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
- vma->vm_private_data = nes_ucontext;
- return 0;
- }
-
- return -ENOSYS;
-}
-
-
-/**
- * nes_alloc_pd
- */
-static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context, struct ib_udata *udata)
-{
- struct nes_pd *nespd;
- struct nes_vnic *nesvnic = to_nesvnic(ibdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_ucontext *nesucontext;
- struct nes_alloc_pd_resp uresp;
- u32 pd_num = 0;
- int err;
-
- nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
- nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
- netdev_refcnt_read(nesvnic->netdev));
-
- err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
- nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD);
- if (err) {
- return ERR_PTR(err);
- }
-
- nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL);
- if (!nespd) {
- nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
- return ERR_PTR(-ENOMEM);
- }
-
- nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
- nespd, nesvnic->nesibdev->ibdev.name);
-
- nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
-
- if (context) {
- nesucontext = to_nesucontext(context);
- nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells,
- NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
- nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
- nespd->mmap_db_index, nespd->pd_id);
- if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) {
- nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
- nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
- kfree(nespd);
- return ERR_PTR(-ENOMEM);
- }
-
- uresp.pd_id = nespd->pd_id;
- uresp.mmap_db_index = nespd->mmap_db_index;
- if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) {
- nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
- kfree(nespd);
- return ERR_PTR(-EFAULT);
- }
-
- set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
- nesucontext->mmap_db_index[nespd->mmap_db_index] = nespd->pd_id;
- nesucontext->first_free_db = nespd->mmap_db_index + 1;
- }
-
- nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd);
- return &nespd->ibpd;
-}
-
-
-/**
- * nes_dealloc_pd
- */
-static int nes_dealloc_pd(struct ib_pd *ibpd)
-{
- struct nes_ucontext *nesucontext;
- struct nes_pd *nespd = to_nespd(ibpd);
- struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
- if ((ibpd->uobject) && (ibpd->uobject->context)) {
- nesucontext = to_nesucontext(ibpd->uobject->context);
- nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
- nespd->mmap_db_index);
- clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
- nesucontext->mmap_db_index[nespd->mmap_db_index] = 0;
- if (nesucontext->first_free_db > nespd->mmap_db_index) {
- nesucontext->first_free_db = nespd->mmap_db_index;
- }
- }
-
- nes_debug(NES_DBG_PD, "Deallocating PD%u structure located @%p.\n",
- nespd->pd_id, nespd);
- nes_free_resource(nesadapter, nesadapter->allocated_pds,
- (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12));
- kfree(nespd);
-
- return 0;
-}
-
-
-/**
- * nes_create_ah
- */
-static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-
-/**
- * nes_destroy_ah
- */
-static int nes_destroy_ah(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-
-/**
- * nes_get_encoded_size
- */
-static inline u8 nes_get_encoded_size(int *size)
-{
- u8 encoded_size = 0;
- if (*size <= 32) {
- *size = 32;
- encoded_size = 1;
- } else if (*size <= 128) {
- *size = 128;
- encoded_size = 2;
- } else if (*size <= 512) {
- *size = 512;
- encoded_size = 3;
- }
- return (encoded_size);
-}
-
-
-
-/**
- * nes_setup_virt_qp
- */
-static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
- struct nes_vnic *nesvnic, int sq_size, int rq_size)
-{
- unsigned long flags;
- void *mem;
- __le64 *pbl = NULL;
- __le64 *tpbl;
- __le64 *pblbuffer;
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 pbl_entries;
- u8 rq_pbl_entries;
- u8 sq_pbl_entries;
-
- pbl_entries = nespbl->pbl_size >> 3;
- nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%lx\n",
- nespbl->pbl_size, pbl_entries,
- (void *)nespbl->pbl_vbase,
- (unsigned long) nespbl->pbl_pbase);
- pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */
- /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
- /* the first pbl to be fro the rq_vbase... */
- rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
- sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
- nesqp->hwqp.sq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
- if (!nespbl->page) {
- nes_debug(NES_DBG_QP, "QP nespbl->page is NULL \n");
- kfree(nespbl);
- return -ENOMEM;
- }
-
- nesqp->hwqp.sq_vbase = kmap(nespbl->page);
- nesqp->page = nespbl->page;
- if (!nesqp->hwqp.sq_vbase) {
- nes_debug(NES_DBG_QP, "QP sq_vbase kmap failed\n");
- kfree(nespbl);
- return -ENOMEM;
- }
-
- /* Now to get to sq.. we need to calculate how many */
- /* PBL entries were used by the rq.. */
- pbl += sq_pbl_entries;
- nesqp->hwqp.rq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
- /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
- /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
-
- nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%lx rq_vbase=%p rq_pbase=%lx\n",
- nesqp->hwqp.sq_vbase, (unsigned long) nesqp->hwqp.sq_pbase,
- nesqp->hwqp.rq_vbase, (unsigned long) nesqp->hwqp.rq_pbase);
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (!nesadapter->free_256pbl) {
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
- nespbl->pbl_pbase);
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- kunmap(nesqp->page);
- kfree(nespbl);
- return -ENOMEM;
- }
- nesadapter->free_256pbl--;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
-
- nesqp->pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 256, &nesqp->pbl_pbase);
- pblbuffer = nesqp->pbl_vbase;
- if (!nesqp->pbl_vbase) {
- /* memory allocated during nes_reg_user_mr() */
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
- nespbl->pbl_pbase);
- kfree(nespbl);
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- nesadapter->free_256pbl++;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- kunmap(nesqp->page);
- return -ENOMEM;
- }
- memset(nesqp->pbl_vbase, 0, 256);
- /* fill in the page address in the pbl buffer.. */
- tpbl = pblbuffer + 16;
- pbl = (__le64 *)nespbl->pbl_vbase;
- while (sq_pbl_entries--)
- *tpbl++ = *pbl++;
- tpbl = pblbuffer;
- while (rq_pbl_entries--)
- *tpbl++ = *pbl++;
-
- /* done with memory allocated during nes_reg_user_mr() */
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
- nespbl->pbl_pbase);
- kfree(nespbl);
-
- nesqp->qp_mem_size =
- max((u32)sizeof(struct nes_qp_context), ((u32)256)) + 256; /* this is Q2 */
- /* Round up to a multiple of a page */
- nesqp->qp_mem_size += PAGE_SIZE - 1;
- nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
-
- mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- &nesqp->hwqp.q2_pbase);
-
- if (!mem) {
- pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
- nesqp->pbl_vbase = NULL;
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- nesadapter->free_256pbl++;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- kunmap(nesqp->page);
- return -ENOMEM;
- }
- nesqp->sq_kmapped = 1;
- nesqp->hwqp.q2_vbase = mem;
- mem += 256;
- memset(nesqp->hwqp.q2_vbase, 0, 256);
- nesqp->nesqp_context = mem;
- memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
- nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
-
- return 0;
-}
-
-
-/**
- * nes_setup_mmap_qp
- */
-static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
- int sq_size, int rq_size)
-{
- void *mem;
- struct nes_device *nesdev = nesvnic->nesdev;
-
- nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
- (sizeof(struct nes_hw_qp_wqe) * rq_size) +
- max((u32)sizeof(struct nes_qp_context), ((u32)256)) +
- 256; /* this is Q2 */
- /* Round up to a multiple of a page */
- nesqp->qp_mem_size += PAGE_SIZE - 1;
- nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
-
- mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- &nesqp->hwqp.sq_pbase);
- if (!mem)
- return -ENOMEM;
- nes_debug(NES_DBG_QP, "PCI consistent memory for "
- "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
- mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
-
- memset(mem, 0, nesqp->qp_mem_size);
-
- nesqp->hwqp.sq_vbase = mem;
- mem += sizeof(struct nes_hw_qp_wqe) * sq_size;
-
- nesqp->hwqp.rq_vbase = mem;
- nesqp->hwqp.rq_pbase = nesqp->hwqp.sq_pbase +
- sizeof(struct nes_hw_qp_wqe) * sq_size;
- mem += sizeof(struct nes_hw_qp_wqe) * rq_size;
-
- nesqp->hwqp.q2_vbase = mem;
- nesqp->hwqp.q2_pbase = nesqp->hwqp.rq_pbase +
- sizeof(struct nes_hw_qp_wqe) * rq_size;
- mem += 256;
- memset(nesqp->hwqp.q2_vbase, 0, 256);
-
- nesqp->nesqp_context = mem;
- nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
- memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
- return 0;
-}
-
-
-/**
- * nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
- */
-static inline void nes_free_qp_mem(struct nes_device *nesdev,
- struct nes_qp *nesqp, int virt_wqs)
-{
- unsigned long flags;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- if (!virt_wqs) {
- pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
- }else {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- nesadapter->free_256pbl++;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
- pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
- nesqp->pbl_vbase = NULL;
- if (nesqp->sq_kmapped) {
- nesqp->sq_kmapped = 0;
- kunmap(nesqp->page);
- }
- }
-}
-
-
-/**
- * nes_create_qp
- */
-static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr, struct ib_udata *udata)
-{
- u64 u64temp= 0;
- u64 u64nesqp = 0;
- struct nes_pd *nespd = to_nespd(ibpd);
- struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_qp *nesqp;
- struct nes_cq *nescq;
- struct nes_ucontext *nes_ucontext;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- struct nes_create_qp_req req;
- struct nes_create_qp_resp uresp;
- struct nes_pbl *nespbl = NULL;
- u32 qp_num = 0;
- u32 opcode = 0;
- /* u32 counter = 0; */
- void *mem;
- unsigned long flags;
- int ret;
- int err;
- int virt_wqs = 0;
- int sq_size;
- int rq_size;
- u8 sq_encoded_size;
- u8 rq_encoded_size;
- /* int counter; */
-
- if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
-
- atomic_inc(&qps_created);
- switch (init_attr->qp_type) {
- case IB_QPT_RC:
- if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
- init_attr->cap.max_inline_data = 0;
- } else {
- init_attr->cap.max_inline_data = 64;
- }
- sq_size = init_attr->cap.max_send_wr;
- rq_size = init_attr->cap.max_recv_wr;
-
- /* check if the encoded sizes are OK or not... */
- sq_encoded_size = nes_get_encoded_size(&sq_size);
- rq_encoded_size = nes_get_encoded_size(&rq_size);
-
- if ((!sq_encoded_size) || (!rq_encoded_size)) {
- nes_debug(NES_DBG_QP, "ERROR bad rq (%u) or sq (%u) size\n",
- rq_size, sq_size);
- return ERR_PTR(-EINVAL);
- }
-
- init_attr->cap.max_send_wr = sq_size -2;
- init_attr->cap.max_recv_wr = rq_size -1;
- nes_debug(NES_DBG_QP, "RQ size=%u, SQ Size=%u\n", rq_size, sq_size);
-
- ret = nes_alloc_resource(nesadapter, nesadapter->allocated_qps,
- nesadapter->max_qp, &qp_num, &nesadapter->next_qp, NES_RESOURCE_QP);
- if (ret) {
- return ERR_PTR(ret);
- }
-
- /* Need 512 (actually now 1024) byte alignment on this structure */
- mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
- if (!mem) {
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
- return ERR_PTR(-ENOMEM);
- }
- u64nesqp = (unsigned long)mem;
- u64nesqp += ((u64)NES_SW_CONTEXT_ALIGN) - 1;
- u64temp = ((u64)NES_SW_CONTEXT_ALIGN) - 1;
- u64nesqp &= ~u64temp;
- nesqp = (struct nes_qp *)(unsigned long)u64nesqp;
- /* nes_debug(NES_DBG_QP, "nesqp=%p, allocated buffer=%p. Rounded to closest %u\n",
- nesqp, mem, NES_SW_CONTEXT_ALIGN); */
- nesqp->allocated_buffer = mem;
-
- if (udata) {
- if (ib_copy_from_udata(&req, udata, sizeof(struct nes_create_qp_req))) {
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- kfree(nesqp->allocated_buffer);
- nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
- return ERR_PTR(-EFAULT);
- }
- if (req.user_wqe_buffers) {
- virt_wqs = 1;
- }
- if (req.user_qp_buffer)
- nesqp->nesuqp_addr = req.user_qp_buffer;
- if ((ibpd->uobject) && (ibpd->uobject->context)) {
- nesqp->user_mode = 1;
- nes_ucontext = to_nesucontext(ibpd->uobject->context);
- if (virt_wqs) {
- err = 1;
- list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
- if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) {
- list_del(&nespbl->list);
- err = 0;
- nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n",
- nespbl, nespbl->user_base);
- break;
- }
- }
- if (err) {
- nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n",
- (long long unsigned int)req.user_wqe_buffers);
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- kfree(nesqp->allocated_buffer);
- return ERR_PTR(-EFAULT);
- }
- }
-
- nes_ucontext = to_nesucontext(ibpd->uobject->context);
- nesqp->mmap_sq_db_index =
- find_next_zero_bit(nes_ucontext->allocated_wqs,
- NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
- /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
- nespd->mmap_db_index); */
- if (nesqp->mmap_sq_db_index >= NES_MAX_USER_WQ_REGIONS) {
- nes_debug(NES_DBG_QP,
- "db index > max user regions, failing create QP\n");
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- if (virt_wqs) {
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
- nespbl->pbl_pbase);
- kfree(nespbl);
- }
- kfree(nesqp->allocated_buffer);
- return ERR_PTR(-ENOMEM);
- }
- set_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
- nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = nesqp;
- nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index + 1;
- } else {
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- kfree(nesqp->allocated_buffer);
- return ERR_PTR(-EFAULT);
- }
- }
- err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) :
- nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size);
- if (err) {
- nes_debug(NES_DBG_QP,
- "error geting qp mem code = %d\n", err);
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- kfree(nesqp->allocated_buffer);
- return ERR_PTR(-ENOMEM);
- }
-
- nesqp->hwqp.sq_size = sq_size;
- nesqp->hwqp.sq_encoded_size = sq_encoded_size;
- nesqp->hwqp.sq_head = 1;
- nesqp->hwqp.rq_size = rq_size;
- nesqp->hwqp.rq_encoded_size = rq_encoded_size;
- /* nes_debug(NES_DBG_QP, "nesqp->nesqp_context_pbase = %p\n",
- (void *)nesqp->nesqp_context_pbase);
- */
- nesqp->hwqp.qp_id = qp_num;
- nesqp->ibqp.qp_num = nesqp->hwqp.qp_id;
- nesqp->nespd = nespd;
-
- nescq = to_nescq(init_attr->send_cq);
- nesqp->nesscq = nescq;
- nescq = to_nescq(init_attr->recv_cq);
- nesqp->nesrcq = nescq;
-
- nesqp->nesqp_context->misc |= cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) <<
- NES_QPCONTEXT_MISC_PCI_FCN_SHIFT);
- nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.rq_encoded_size <<
- NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
- nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
- NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
- if (!udata) {
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
- }
- nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
- ((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
- u64temp = (u64)nesqp->hwqp.sq_pbase;
- nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
- nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
-
-
- if (!virt_wqs) {
- u64temp = (u64)nesqp->hwqp.sq_pbase;
- nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
- nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
- u64temp = (u64)nesqp->hwqp.rq_pbase;
- nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
- nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
- } else {
- u64temp = (u64)nesqp->pbl_pbase;
- nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
- nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
- }
-
- /* nes_debug(NES_DBG_QP, "next_qp_nic_index=%u, using nic_index=%d\n",
- nesvnic->next_qp_nic_index,
- nesvnic->qp_nic_index[nesvnic->next_qp_nic_index]); */
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- nesqp->nesqp_context->misc2 |= cpu_to_le32(
- (u32)nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] <<
- NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT);
- nesvnic->next_qp_nic_index++;
- if ((nesvnic->next_qp_nic_index > 3) ||
- (nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] == 0xf)) {
- nesvnic->next_qp_nic_index = 0;
- }
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-
- nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32((u32)nesqp->nespd->pd_id << 16);
- u64temp = (u64)nesqp->hwqp.q2_pbase;
- nesqp->nesqp_context->q2_addr_low = cpu_to_le32((u32)u64temp);
- nesqp->nesqp_context->q2_addr_high = cpu_to_le32((u32)(u64temp >> 32));
- nesqp->nesqp_context->aeq_token_low = cpu_to_le32((u32)((unsigned long)(nesqp)));
- nesqp->nesqp_context->aeq_token_high = cpu_to_le32((u32)(upper_32_bits((unsigned long)(nesqp))));
- nesqp->nesqp_context->ird_ord_sizes = cpu_to_le32(NES_QPCONTEXT_ORDIRD_ALSMM |
- NES_QPCONTEXT_ORDIRD_AAH |
- ((((u32)nesadapter->max_irrq_wr) <<
- NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT) & NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK));
- if (disable_mpa_crc) {
- nes_debug(NES_DBG_QP, "Disabling MPA crc checking due to module option.\n");
- nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_RNMC);
- }
-
-
- /* Create the QP */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_QP, "Failed to get a cqp_request\n");
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- nes_free_qp_mem(nesdev, nesqp,virt_wqs);
- kfree(nesqp->allocated_buffer);
- return ERR_PTR(-ENOMEM);
- }
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- if (!virt_wqs) {
- opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP |
- NES_CQP_QP_IWARP_STATE_IDLE;
- } else {
- opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_VIRT_WQS |
- NES_CQP_QP_IWARP_STATE_IDLE;
- }
- opcode |= NES_CQP_QP_CQS_VALID;
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
-
- u64temp = (u64)nesqp->nesqp_context_pbase;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- nes_debug(NES_DBG_QP, "Waiting for create iWARP QP%u to complete.\n",
- nesqp->hwqp.qp_id);
- ret = wait_event_timeout(cqp_request->waitq,
- (cqp_request->request_done != 0), NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_QP, "Create iwarp QP%u completed, wait_event_timeout ret=%u,"
- " nesdev->cqp_head = %u, nesdev->cqp.sq_tail = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- nesqp->hwqp.qp_id, ret, nesdev->cqp.sq_head, nesdev->cqp.sq_tail,
- cqp_request->major_code, cqp_request->minor_code);
- if ((!ret) || (cqp_request->major_code)) {
- nes_put_cqp_request(nesdev, cqp_request);
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- nes_free_qp_mem(nesdev, nesqp,virt_wqs);
- kfree(nesqp->allocated_buffer);
- if (!ret) {
- return ERR_PTR(-ETIME);
- } else {
- return ERR_PTR(-EIO);
- }
- }
-
- nes_put_cqp_request(nesdev, cqp_request);
-
- if (ibpd->uobject) {
- uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
- uresp.mmap_rq_db_index = 0;
- uresp.actual_sq_size = sq_size;
- uresp.actual_rq_size = rq_size;
- uresp.qp_id = nesqp->hwqp.qp_id;
- uresp.nes_drv_opt = nes_drv_opt;
- if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- nes_free_qp_mem(nesdev, nesqp,virt_wqs);
- kfree(nesqp->allocated_buffer);
- return ERR_PTR(-EFAULT);
- }
- }
-
- nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n",
- nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp));
- spin_lock_init(&nesqp->lock);
- nes_add_ref(&nesqp->ibqp);
- break;
- default:
- nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
- return ERR_PTR(-EINVAL);
- }
-
- nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
- init_timer(&nesqp->terminate_timer);
- nesqp->terminate_timer.function = nes_terminate_timeout;
- nesqp->terminate_timer.data = (unsigned long)nesqp;
-
- /* update the QP table */
- nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
- nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
- netdev_refcnt_read(nesvnic->netdev));
-
- return &nesqp->ibqp;
-}
-
-/**
- * nes_clean_cq
- */
-static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
-{
- u32 cq_head;
- u32 lo;
- u32 hi;
- u64 u64temp;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&nescq->lock, flags);
-
- cq_head = nescq->hw_cq.cq_head;
- while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
- rmb();
- lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
- hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
- u64temp = (((u64)hi) << 32) | ((u64)lo);
- u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
- if (u64temp == (u64)(unsigned long)nesqp) {
- /* Zero the context value so cqe will be ignored */
- nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
- nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
- }
-
- if (++cq_head >= nescq->hw_cq.cq_size)
- cq_head = 0;
- }
-
- spin_unlock_irqrestore(&nescq->lock, flags);
-}
-
-
-/**
- * nes_destroy_qp
- */
-static int nes_destroy_qp(struct ib_qp *ibqp)
-{
- struct nes_qp *nesqp = to_nesqp(ibqp);
- struct nes_ucontext *nes_ucontext;
- struct ib_qp_attr attr;
- struct iw_cm_id *cm_id;
- struct iw_cm_event cm_event;
- int ret = 0;
-
- atomic_inc(&sw_qps_destroyed);
- nesqp->destroyed = 1;
-
- /* Blow away the connection if it exists. */
- if (nesqp->ibqp_state >= IB_QPS_INIT && nesqp->ibqp_state <= IB_QPS_RTS) {
- /* if (nesqp->ibqp_state == IB_QPS_RTS) { */
- attr.qp_state = IB_QPS_ERR;
- nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
- }
-
- if (((nesqp->ibqp_state == IB_QPS_INIT) ||
- (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) {
- cm_id = nesqp->cm_id;
- cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.status = -ETIMEDOUT;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
-
- nes_debug(NES_DBG_QP, "Generating a CM Timeout Event for "
- "QP%u. cm_id = %p, refcount = %u. \n",
- nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount));
-
- cm_id->rem_ref(cm_id);
- ret = cm_id->event_handler(cm_id, &cm_event);
- if (ret)
- nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
- }
-
- if (nesqp->user_mode) {
- if ((ibqp->uobject)&&(ibqp->uobject->context)) {
- nes_ucontext = to_nesucontext(ibqp->uobject->context);
- clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
- nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL;
- if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
- nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
- }
- }
- if (nesqp->pbl_pbase && nesqp->sq_kmapped) {
- nesqp->sq_kmapped = 0;
- kunmap(nesqp->page);
- }
- } else {
- /* Clean any pending completions from the cq(s) */
- if (nesqp->nesscq)
- nes_clean_cq(nesqp, nesqp->nesscq);
-
- if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
- nes_clean_cq(nesqp, nesqp->nesrcq);
- }
- nes_rem_ref(&nesqp->ibqp);
- return 0;
-}
-
-
-/**
- * nes_create_cq
- */
-static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- u64 u64temp;
- struct nes_vnic *nesvnic = to_nesvnic(ibdev);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_cq *nescq;
- struct nes_ucontext *nes_ucontext = NULL;
- struct nes_cqp_request *cqp_request;
- void *mem = NULL;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_pbl *nespbl = NULL;
- struct nes_create_cq_req req;
- struct nes_create_cq_resp resp;
- u32 cq_num = 0;
- u32 opcode = 0;
- u32 pbl_entries = 1;
- int err;
- unsigned long flags;
- int ret;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- if (entries > nesadapter->max_cqe)
- return ERR_PTR(-EINVAL);
-
- err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
- nesadapter->max_cq, &cq_num, &nesadapter->next_cq, NES_RESOURCE_CQ);
- if (err) {
- return ERR_PTR(err);
- }
-
- nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
- if (!nescq) {
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
- return ERR_PTR(-ENOMEM);
- }
-
- nescq->hw_cq.cq_size = max(entries + 1, 5);
- nescq->hw_cq.cq_number = cq_num;
- nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
-
-
- if (context) {
- nes_ucontext = to_nesucontext(context);
- if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) {
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-EFAULT);
- }
- nesvnic->mcrq_ucontext = nes_ucontext;
- nes_ucontext->mcrqf = req.mcrqf;
- if (nes_ucontext->mcrqf) {
- if (nes_ucontext->mcrqf & 0x80000000)
- nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 28 + 2 * ((nes_ucontext->mcrqf & 0xf) - 1);
- else if (nes_ucontext->mcrqf & 0x40000000)
- nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
- else
- nescq->hw_cq.cq_number = nesvnic->mcrq_qp_id + nes_ucontext->mcrqf-1;
- nescq->mcrqf = nes_ucontext->mcrqf;
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- }
- nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n",
- (unsigned long)req.user_cq_buffer, entries);
- err = 1;
- list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) {
- if (nespbl->user_base == (unsigned long )req.user_cq_buffer) {
- list_del(&nespbl->list);
- err = 0;
- nes_debug(NES_DBG_CQ, "Found PBL for virtual CQ. nespbl=%p.\n",
- nespbl);
- break;
- }
- }
- if (err) {
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-EFAULT);
- }
-
- pbl_entries = nespbl->pbl_size >> 3;
- nescq->cq_mem_size = 0;
- } else {
- nescq->cq_mem_size = nescq->hw_cq.cq_size * sizeof(struct nes_hw_cqe);
- nes_debug(NES_DBG_CQ, "Attempting to allocate pci memory (%u entries, %u bytes) for CQ%u.\n",
- entries, nescq->cq_mem_size, nescq->hw_cq.cq_number);
-
- /* allocate the physical buffer space */
- mem = pci_zalloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
- &nescq->hw_cq.cq_pbase);
- if (!mem) {
- printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n");
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-ENOMEM);
- }
-
- nescq->hw_cq.cq_vbase = mem;
- nescq->hw_cq.cq_head = 0;
- nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n",
- nescq->hw_cq.cq_number, nescq->hw_cq.cq_vbase,
- (u32)nescq->hw_cq.cq_pbase);
- }
-
- nescq->hw_cq.ce_handler = nes_iwarp_ce_handler;
- spin_lock_init(&nescq->lock);
-
- /* send CreateCQ request to CQP */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
- if (!context)
- pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
- nescq->hw_cq.cq_pbase);
- else {
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
- nespbl->pbl_vbase, nespbl->pbl_pbase);
- kfree(nespbl);
- }
-
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-ENOMEM);
- }
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- opcode = NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
- NES_CQP_CQ_CHK_OVERFLOW |
- NES_CQP_CQ_CEQE_MASK | ((u32)nescq->hw_cq.cq_size << 16);
-
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
-
- if (pbl_entries != 1) {
- if (pbl_entries > 32) {
- /* use 4k pbl */
- nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries);
- if (nesadapter->free_4kpbl == 0) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- nes_free_cqp_request(nesdev, cqp_request);
- if (!context)
- pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
- nescq->hw_cq.cq_pbase);
- else {
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
- nespbl->pbl_vbase, nespbl->pbl_pbase);
- kfree(nespbl);
- }
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-ENOMEM);
- } else {
- opcode |= (NES_CQP_CQ_VIRT | NES_CQP_CQ_4KB_CHUNK);
- nescq->virtual_cq = 2;
- nesadapter->free_4kpbl--;
- }
- } else {
- /* use 256 byte pbl */
- nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries);
- if (nesadapter->free_256pbl == 0) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- nes_free_cqp_request(nesdev, cqp_request);
- if (!context)
- pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
- nescq->hw_cq.cq_pbase);
- else {
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
- nespbl->pbl_vbase, nespbl->pbl_pbase);
- kfree(nespbl);
- }
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-ENOMEM);
- } else {
- opcode |= NES_CQP_CQ_VIRT;
- nescq->virtual_cq = 1;
- nesadapter->free_256pbl--;
- }
- }
- }
-
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
-
- if (context) {
- if (pbl_entries != 1)
- u64temp = (u64)nespbl->pbl_pbase;
- else
- u64temp = le64_to_cpu(nespbl->pbl_vbase[0]);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX,
- nes_ucontext->mmap_db_index[0]);
- } else {
- u64temp = (u64)nescq->hw_cq.cq_pbase;
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
- }
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
- u64temp = (u64)(unsigned long)&nescq->hw_cq;
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
- cpu_to_le32((u32)(u64temp >> 1));
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
- cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- nes_debug(NES_DBG_CQ, "Waiting for create iWARP CQ%u to complete.\n",
- nescq->hw_cq.cq_number);
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
- NES_EVENT_TIMEOUT * 2);
- nes_debug(NES_DBG_CQ, "Create iWARP CQ%u completed, wait_event_timeout ret = %d.\n",
- nescq->hw_cq.cq_number, ret);
- if ((!ret) || (cqp_request->major_code)) {
- nes_put_cqp_request(nesdev, cqp_request);
- if (!context)
- pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
- nescq->hw_cq.cq_pbase);
- else {
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
- nespbl->pbl_vbase, nespbl->pbl_pbase);
- kfree(nespbl);
- }
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-EIO);
- }
- nes_put_cqp_request(nesdev, cqp_request);
-
- if (context) {
- /* free the nespbl */
- pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
- nespbl->pbl_pbase);
- kfree(nespbl);
- resp.cq_id = nescq->hw_cq.cq_number;
- resp.cq_size = nescq->hw_cq.cq_size;
- resp.mmap_db_index = 0;
- if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) {
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- kfree(nescq);
- return ERR_PTR(-EFAULT);
- }
- }
-
- return &nescq->ibcq;
-}
-
-
-/**
- * nes_destroy_cq
- */
-static int nes_destroy_cq(struct ib_cq *ib_cq)
-{
- struct nes_cq *nescq;
- struct nes_device *nesdev;
- struct nes_vnic *nesvnic;
- struct nes_adapter *nesadapter;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- unsigned long flags;
- u32 opcode = 0;
- int ret;
-
- if (ib_cq == NULL)
- return 0;
-
- nescq = to_nescq(ib_cq);
- nesvnic = to_nesvnic(ib_cq->device);
- nesdev = nesvnic->nesdev;
- nesadapter = nesdev->nesadapter;
-
- nes_debug(NES_DBG_CQ, "Destroy CQ%u\n", nescq->hw_cq.cq_number);
-
- /* Send DestroyCQ request to CQP */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
- return -ENOMEM;
- }
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
- opcode = NES_CQP_DESTROY_CQ | (nescq->hw_cq.cq_size << 16);
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (nescq->virtual_cq == 1) {
- nesadapter->free_256pbl++;
- if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
- printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n",
- __func__, nesadapter->free_256pbl, nesadapter->max_256pbl);
- }
- } else if (nescq->virtual_cq == 2) {
- nesadapter->free_4kpbl++;
- if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
- printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n",
- __func__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
- }
- opcode |= NES_CQP_CQ_4KB_CHUNK;
- }
-
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
- (nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16)));
- if (!nescq->mcrqf)
- nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n",
- nescq->hw_cq.cq_number);
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_CQ, "Destroy iWARP CQ%u completed, wait_event_timeout ret = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- nescq->hw_cq.cq_number, ret, cqp_request->major_code,
- cqp_request->minor_code);
- if (!ret) {
- nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n",
- nescq->hw_cq.cq_number);
- ret = -ETIME;
- } else if (cqp_request->major_code) {
- nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n",
- nescq->hw_cq.cq_number);
- ret = -EIO;
- } else {
- ret = 0;
- }
- nes_put_cqp_request(nesdev, cqp_request);
-
- if (nescq->cq_mem_size)
- pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
- nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
- kfree(nescq);
-
- return ret;
-}
-
-/**
- * root_256
- */
-static u32 root_256(struct nes_device *nesdev,
- struct nes_root_vpbl *root_vpbl,
- struct nes_root_vpbl *new_root,
- u16 pbl_count_4k)
-{
- u64 leaf_pbl;
- int i, j, k;
-
- if (pbl_count_4k == 1) {
- new_root->pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
- 512, &new_root->pbl_pbase);
-
- if (new_root->pbl_vbase == NULL)
- return 0;
-
- leaf_pbl = (u64)root_vpbl->pbl_pbase;
- for (i = 0; i < 16; i++) {
- new_root->pbl_vbase[i].pa_low =
- cpu_to_le32((u32)leaf_pbl);
- new_root->pbl_vbase[i].pa_high =
- cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
- leaf_pbl += 256;
- }
- } else {
- for (i = 3; i >= 0; i--) {
- j = i * 16;
- root_vpbl->pbl_vbase[j] = root_vpbl->pbl_vbase[i];
- leaf_pbl = le32_to_cpu(root_vpbl->pbl_vbase[j].pa_low) +
- (((u64)le32_to_cpu(root_vpbl->pbl_vbase[j].pa_high))
- << 32);
- for (k = 1; k < 16; k++) {
- leaf_pbl += 256;
- root_vpbl->pbl_vbase[j + k].pa_low =
- cpu_to_le32((u32)leaf_pbl);
- root_vpbl->pbl_vbase[j + k].pa_high =
- cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
- }
- }
- }
-
- return 1;
-}
-
-
-/**
- * nes_reg_mr
- */
-static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
- u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
- dma_addr_t single_buffer, u16 pbl_count_4k,
- u16 residual_page_count_4k, int acc, u64 *iova_start,
- u16 *actual_pbl_cnt, u8 *used_4k_pbls)
-{
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- unsigned long flags;
- int ret;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- uint pg_cnt = 0;
- u16 pbl_count_256 = 0;
- u16 pbl_count = 0;
- u8 use_256_pbls = 0;
- u8 use_4k_pbls = 0;
- u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
- struct nes_root_vpbl new_root = { 0, NULL, NULL };
- u32 opcode = 0;
- u16 major_code;
-
- /* Register the region with the adapter */
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
- return -ENOMEM;
- }
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- if (pbl_count_4k) {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
-
- pg_cnt = ((pbl_count_4k - 1) * 512) + residual_page_count_4k;
- pbl_count_256 = (pg_cnt + 31) / 32;
- if (pg_cnt <= 32) {
- if (pbl_count_256 <= nesadapter->free_256pbl)
- use_256_pbls = 1;
- else if (pbl_count_4k <= nesadapter->free_4kpbl)
- use_4k_pbls = 1;
- } else if (pg_cnt <= 2048) {
- if (((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) &&
- (nesadapter->free_4kpbl > (nesadapter->max_4kpbl >> 1))) {
- use_4k_pbls = 1;
- } else if ((pbl_count_256 + 1) <= nesadapter->free_256pbl) {
- use_256_pbls = 1;
- use_two_level = 1;
- } else if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
- use_4k_pbls = 1;
- }
- } else {
- if ((pbl_count_4k + 1) <= nesadapter->free_4kpbl)
- use_4k_pbls = 1;
- }
-
- if (use_256_pbls) {
- pbl_count = pbl_count_256;
- nesadapter->free_256pbl -= pbl_count + use_two_level;
- } else if (use_4k_pbls) {
- pbl_count = pbl_count_4k;
- nesadapter->free_4kpbl -= pbl_count + use_two_level;
- } else {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- nes_debug(NES_DBG_MR, "Out of Pbls\n");
- nes_free_cqp_request(nesdev, cqp_request);
- return -ENOMEM;
- }
-
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- }
-
- if (use_256_pbls && use_two_level) {
- if (root_256(nesdev, root_vpbl, &new_root, pbl_count_4k) == 1) {
- if (new_root.pbl_pbase != 0)
- root_vpbl = &new_root;
- } else {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- nesadapter->free_256pbl += pbl_count_256 + use_two_level;
- use_256_pbls = 0;
-
- if (pbl_count_4k == 1)
- use_two_level = 0;
- pbl_count = pbl_count_4k;
-
- if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
- nesadapter->free_4kpbl -= pbl_count + use_two_level;
- use_4k_pbls = 1;
- }
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
-
- if (use_4k_pbls == 0)
- return -ENOMEM;
- }
- }
-
- opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
- NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
- if (acc & IB_ACCESS_LOCAL_WRITE)
- opcode |= NES_CQP_STAG_RIGHTS_LOCAL_WRITE;
- if (acc & IB_ACCESS_REMOTE_WRITE)
- opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_REM_ACC_EN;
- if (acc & IB_ACCESS_REMOTE_READ)
- opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_REM_ACC_EN;
- if (acc & IB_ACCESS_MW_BIND)
- opcode |= NES_CQP_STAG_RIGHTS_WINDOW_BIND | NES_CQP_STAG_REM_ACC_EN;
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, *iova_start);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, region_length);
-
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
- cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
- cpu_to_le32(nespd->pd_id & 0x00007fff);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
-
- if (pbl_count == 0) {
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, single_buffer);
- } else {
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (pg_cnt * 8));
-
- if (use_4k_pbls)
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
- }
- barrier();
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- stag, ret, cqp_request->major_code, cqp_request->minor_code);
- major_code = cqp_request->major_code;
- nes_put_cqp_request(nesdev, cqp_request);
-
- if ((!ret || major_code) && pbl_count != 0) {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (use_256_pbls)
- nesadapter->free_256pbl += pbl_count + use_two_level;
- else if (use_4k_pbls)
- nesadapter->free_4kpbl += pbl_count + use_two_level;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- }
- if (new_root.pbl_pbase)
- pci_free_consistent(nesdev->pcidev, 512, new_root.pbl_vbase,
- new_root.pbl_pbase);
-
- if (!ret)
- return -ETIME;
- else if (major_code)
- return -EIO;
-
- *actual_pbl_cnt = pbl_count + use_two_level;
- *used_4k_pbls = use_4k_pbls;
- return 0;
-}
-
-
-/**
- * nes_reg_phys_mr
- */
-static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
- struct ib_phys_buf *buffer_list, int num_phys_buf, int acc,
- u64 * iova_start)
-{
- u64 region_length;
- struct nes_pd *nespd = to_nespd(ib_pd);
- struct nes_vnic *nesvnic = to_nesvnic(ib_pd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_mr *nesmr;
- struct ib_mr *ibmr;
- struct nes_vpbl vpbl;
- struct nes_root_vpbl root_vpbl;
- u32 stag;
- u32 i;
- unsigned long mask;
- u32 stag_index = 0;
- u32 next_stag_index = 0;
- u32 driver_key = 0;
- u32 root_pbl_index = 0;
- u32 cur_pbl_index = 0;
- int err = 0;
- int ret = 0;
- u16 pbl_count = 0;
- u8 single_page = 1;
- u8 stag_key = 0;
-
- region_length = 0;
- vpbl.pbl_vbase = NULL;
- root_vpbl.pbl_vbase = NULL;
- root_vpbl.pbl_pbase = 0;
-
- get_random_bytes(&next_stag_index, sizeof(next_stag_index));
- stag_key = (u8)next_stag_index;
-
- driver_key = 0;
-
- next_stag_index >>= 8;
- next_stag_index %= nesadapter->max_mr;
- if (num_phys_buf > (1024*512)) {
- return ERR_PTR(-E2BIG);
- }
-
- if ((buffer_list[0].addr ^ *iova_start) & ~PAGE_MASK)
- return ERR_PTR(-EINVAL);
-
- err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, nesadapter->max_mr,
- &stag_index, &next_stag_index, NES_RESOURCE_PHYS_MR);
- if (err) {
- return ERR_PTR(err);
- }
-
- nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
- if (!nesmr) {
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- return ERR_PTR(-ENOMEM);
- }
-
- for (i = 0; i < num_phys_buf; i++) {
-
- if ((i & 0x01FF) == 0) {
- if (root_pbl_index == 1) {
- /* Allocate the root PBL */
- root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
- &root_vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
- root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
- if (!root_vpbl.pbl_vbase) {
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- kfree(nesmr);
- return ERR_PTR(-ENOMEM);
- }
- root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
- if (!root_vpbl.leaf_vpbl) {
- pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
- root_vpbl.pbl_pbase);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- kfree(nesmr);
- return ERR_PTR(-ENOMEM);
- }
- root_vpbl.pbl_vbase[0].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[0].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
- root_vpbl.leaf_vpbl[0] = vpbl;
- }
- /* Allocate a 4K buffer for the PBL */
- vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%016lX\n",
- vpbl.pbl_vbase, (unsigned long)vpbl.pbl_pbase);
- if (!vpbl.pbl_vbase) {
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- ibmr = ERR_PTR(-ENOMEM);
- kfree(nesmr);
- goto reg_phys_err;
- }
- /* Fill in the root table */
- if (1 <= root_pbl_index) {
- root_vpbl.pbl_vbase[root_pbl_index].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[root_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
- root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
- }
- root_pbl_index++;
- cur_pbl_index = 0;
- }
-
- mask = !buffer_list[i].size;
- if (i != 0)
- mask |= buffer_list[i].addr;
- if (i != num_phys_buf - 1)
- mask |= buffer_list[i].addr + buffer_list[i].size;
-
- if (mask & ~PAGE_MASK) {
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- nes_debug(NES_DBG_MR, "Invalid buffer addr or size\n");
- ibmr = ERR_PTR(-EINVAL);
- kfree(nesmr);
- goto reg_phys_err;
- }
-
- region_length += buffer_list[i].size;
- if ((i != 0) && (single_page)) {
- if ((buffer_list[i-1].addr+PAGE_SIZE) != buffer_list[i].addr)
- single_page = 0;
- }
- vpbl.pbl_vbase[cur_pbl_index].pa_low = cpu_to_le32((u32)buffer_list[i].addr & PAGE_MASK);
- vpbl.pbl_vbase[cur_pbl_index++].pa_high =
- cpu_to_le32((u32)((((u64)buffer_list[i].addr) >> 32)));
- }
-
- stag = stag_index << 8;
- stag |= driver_key;
- stag += (u32)stag_key;
-
- nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%016lX,"
- " length = 0x%016lX, index = 0x%08X\n",
- stag, (unsigned long)*iova_start, (unsigned long)region_length, stag_index);
-
- /* Make the leaf PBL the root if only one PBL */
- if (root_pbl_index == 1) {
- root_vpbl.pbl_pbase = vpbl.pbl_pbase;
- }
-
- if (single_page) {
- pbl_count = 0;
- } else {
- pbl_count = root_pbl_index;
- }
- ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
- buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start,
- &nesmr->pbls_used, &nesmr->pbl_4k);
-
- if (ret == 0) {
- nesmr->ibmr.rkey = stag;
- nesmr->ibmr.lkey = stag;
- nesmr->mode = IWNES_MEMREG_TYPE_MEM;
- ibmr = &nesmr->ibmr;
- } else {
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- }
-
- reg_phys_err:
- /* free the resources */
- if (root_pbl_index == 1) {
- /* single PBL case */
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, vpbl.pbl_pbase);
- } else {
- for (i=0; i<root_pbl_index; i++) {
- pci_free_consistent(nesdev->pcidev, 4096, root_vpbl.leaf_vpbl[i].pbl_vbase,
- root_vpbl.leaf_vpbl[i].pbl_pbase);
- }
- kfree(root_vpbl.leaf_vpbl);
- pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
- root_vpbl.pbl_pbase);
- }
-
- return ibmr;
-}
-
-
-/**
- * nes_get_dma_mr
- */
-static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct ib_phys_buf bl;
- u64 kva = 0;
-
- nes_debug(NES_DBG_MR, "\n");
-
- bl.size = (u64)0xffffffffffULL;
- bl.addr = 0;
- return nes_reg_phys_mr(pd, &bl, 1, acc, &kva);
-}
-
-
-/**
- * nes_reg_user_mr
- */
-static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- u64 iova_start;
- __le64 *pbl;
- u64 region_length;
- dma_addr_t last_dma_addr = 0;
- dma_addr_t first_dma_addr = 0;
- struct nes_pd *nespd = to_nespd(pd);
- struct nes_vnic *nesvnic = to_nesvnic(pd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct ib_mr *ibmr = ERR_PTR(-EINVAL);
- struct scatterlist *sg;
- struct nes_ucontext *nes_ucontext;
- struct nes_pbl *nespbl;
- struct nes_mr *nesmr;
- struct ib_umem *region;
- struct nes_mem_reg_req req;
- struct nes_vpbl vpbl;
- struct nes_root_vpbl root_vpbl;
- int entry, page_index;
- int page_count = 0;
- int err, pbl_depth = 0;
- int chunk_pages;
- int ret;
- u32 stag;
- u32 stag_index = 0;
- u32 next_stag_index;
- u32 driver_key;
- u32 root_pbl_index = 0;
- u32 cur_pbl_index = 0;
- u32 skip_pages;
- u16 pbl_count;
- u8 single_page = 1;
- u8 stag_key;
- int first_page = 1;
-
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
- if (IS_ERR(region)) {
- return (struct ib_mr *)region;
- }
-
- nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
- " offset = %u, page size = %u.\n",
- (unsigned long int)start, (unsigned long int)virt, (u32)length,
- ib_umem_offset(region), region->page_size);
-
- skip_pages = ((u32)ib_umem_offset(region)) >> 12;
-
- if (ib_copy_from_udata(&req, udata, sizeof(req))) {
- ib_umem_release(region);
- return ERR_PTR(-EFAULT);
- }
- nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type);
-
- switch (req.reg_type) {
- case IWNES_MEMREG_TYPE_MEM:
- pbl_depth = 0;
- region_length = 0;
- vpbl.pbl_vbase = NULL;
- root_vpbl.pbl_vbase = NULL;
- root_vpbl.pbl_pbase = 0;
-
- get_random_bytes(&next_stag_index, sizeof(next_stag_index));
- stag_key = (u8)next_stag_index;
-
- driver_key = next_stag_index & 0x70000000;
-
- next_stag_index >>= 8;
- next_stag_index %= nesadapter->max_mr;
-
- err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
- nesadapter->max_mr, &stag_index, &next_stag_index, NES_RESOURCE_USER_MR);
- if (err) {
- ib_umem_release(region);
- return ERR_PTR(err);
- }
-
- nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
- if (!nesmr) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- return ERR_PTR(-ENOMEM);
- }
- nesmr->region = region;
-
- for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
- if (sg_dma_address(sg) & ~PAGE_MASK) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
- (unsigned int) sg_dma_address(sg));
- ibmr = ERR_PTR(-EINVAL);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
-
- if (!sg_dma_len(sg)) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
- ibmr = ERR_PTR(-EINVAL);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
-
- region_length += sg_dma_len(sg);
- chunk_pages = sg_dma_len(sg) >> 12;
- region_length -= skip_pages << 12;
- for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
- skip_pages = 0;
- if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
- goto enough_pages;
- if ((page_count&0x01FF) == 0) {
- if (page_count >= 1024 * 512) {
- ib_umem_release(region);
- nes_free_resource(nesadapter,
- nesadapter->allocated_mrs, stag_index);
- kfree(nesmr);
- ibmr = ERR_PTR(-E2BIG);
- goto reg_user_mr_err;
- }
- if (root_pbl_index == 1) {
- root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
- 8192, &root_vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
- root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
- if (!root_vpbl.pbl_vbase) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_err;
- }
- root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
- GFP_KERNEL);
- if (!root_vpbl.leaf_vpbl) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
- root_vpbl.pbl_pbase);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_err;
- }
- root_vpbl.pbl_vbase[0].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[0].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
- root_vpbl.leaf_vpbl[0] = vpbl;
- }
- vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
- vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
- if (!vpbl.pbl_vbase) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- ibmr = ERR_PTR(-ENOMEM);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
- if (1 <= root_pbl_index) {
- root_vpbl.pbl_vbase[root_pbl_index].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[root_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
- root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
- }
- root_pbl_index++;
- cur_pbl_index = 0;
- }
- if (single_page) {
- if (page_count != 0) {
- if ((last_dma_addr+4096) !=
- (sg_dma_address(sg)+
- (page_index*4096)))
- single_page = 0;
- last_dma_addr = sg_dma_address(sg)+
- (page_index*4096);
- } else {
- first_dma_addr = sg_dma_address(sg)+
- (page_index*4096);
- last_dma_addr = first_dma_addr;
- }
- }
-
- vpbl.pbl_vbase[cur_pbl_index].pa_low =
- cpu_to_le32((u32)(sg_dma_address(sg)+
- (page_index*4096)));
- vpbl.pbl_vbase[cur_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)(sg_dma_address(sg)+
- (page_index*4096))) >> 32)));
- cur_pbl_index++;
- page_count++;
- }
- }
-
- enough_pages:
- nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
- " stag_key=0x%08x\n",
- stag_index, driver_key, stag_key);
- stag = stag_index << 8;
- stag |= driver_key;
- stag += (u32)stag_key;
-
- iova_start = virt;
- /* Make the leaf PBL the root if only one PBL */
- if (root_pbl_index == 1) {
- root_vpbl.pbl_pbase = vpbl.pbl_pbase;
- }
-
- if (single_page) {
- pbl_count = 0;
- } else {
- pbl_count = root_pbl_index;
- first_dma_addr = 0;
- }
- nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X,"
- " index = 0x%08X, region->length=0x%08llx, pbl_count = %u\n",
- stag, (unsigned int)iova_start,
- (unsigned int)region_length, stag_index,
- (unsigned long long)region->length, pbl_count);
- ret = nes_reg_mr(nesdev, nespd, stag, region->length, &root_vpbl,
- first_dma_addr, pbl_count, (u16)cur_pbl_index, acc,
- &iova_start, &nesmr->pbls_used, &nesmr->pbl_4k);
-
- nes_debug(NES_DBG_MR, "ret=%d\n", ret);
-
- if (ret == 0) {
- nesmr->ibmr.rkey = stag;
- nesmr->ibmr.lkey = stag;
- nesmr->mode = IWNES_MEMREG_TYPE_MEM;
- ibmr = &nesmr->ibmr;
- } else {
- ib_umem_release(region);
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- }
-
- reg_user_mr_err:
- /* free the resources */
- if (root_pbl_index == 1) {
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- } else {
- for (page_index=0; page_index<root_pbl_index; page_index++) {
- pci_free_consistent(nesdev->pcidev, 4096,
- root_vpbl.leaf_vpbl[page_index].pbl_vbase,
- root_vpbl.leaf_vpbl[page_index].pbl_pbase);
- }
- kfree(root_vpbl.leaf_vpbl);
- pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
- root_vpbl.pbl_pbase);
- }
-
- nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr);
-
- return ibmr;
- case IWNES_MEMREG_TYPE_QP:
- case IWNES_MEMREG_TYPE_CQ:
- if (!region->length) {
- nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
- ib_umem_release(region);
- return ERR_PTR(-EINVAL);
- }
- nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
- if (!nespbl) {
- nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
- ib_umem_release(region);
- return ERR_PTR(-ENOMEM);
- }
- nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
- if (!nesmr) {
- ib_umem_release(region);
- kfree(nespbl);
- nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
- return ERR_PTR(-ENOMEM);
- }
- nesmr->region = region;
- nes_ucontext = to_nesucontext(pd->uobject->context);
- pbl_depth = region->length >> 12;
- pbl_depth += (region->length & (4096-1)) ? 1 : 0;
- nespbl->pbl_size = pbl_depth*sizeof(u64);
- if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
- nes_debug(NES_DBG_MR, "Attempting to allocate QP PBL memory");
- } else {
- nes_debug(NES_DBG_MR, "Attempting to allocate CP PBL memory");
- }
-
- nes_debug(NES_DBG_MR, " %u bytes, %u entries.\n",
- nespbl->pbl_size, pbl_depth);
- pbl = pci_alloc_consistent(nesdev->pcidev, nespbl->pbl_size,
- &nespbl->pbl_pbase);
- if (!pbl) {
- ib_umem_release(region);
- kfree(nesmr);
- kfree(nespbl);
- nes_debug(NES_DBG_MR, "Unable to allocate PBL memory\n");
- return ERR_PTR(-ENOMEM);
- }
-
- nespbl->pbl_vbase = (u64 *)pbl;
- nespbl->user_base = start;
- nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%lx,"
- " pbl_vbase=%p user_base=0x%lx\n",
- nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
- (void *) nespbl->pbl_vbase, nespbl->user_base);
-
- for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
- chunk_pages = sg_dma_len(sg) >> 12;
- chunk_pages += (sg_dma_len(sg) & (4096-1)) ? 1 : 0;
- if (first_page) {
- nespbl->page = sg_page(sg);
- first_page = 0;
- }
-
- for (page_index = 0; page_index < chunk_pages; page_index++) {
- ((__le32 *)pbl)[0] = cpu_to_le32((u32)
- (sg_dma_address(sg)+
- (page_index*4096)));
- ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
- (sg_dma_address(sg)+
- (page_index*4096)))>>32);
- nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
- (unsigned long long)*pbl,
- le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
- pbl++;
- }
- }
-
- if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
- list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list);
- } else {
- list_add_tail(&nespbl->list, &nes_ucontext->cq_reg_mem_list);
- }
- nesmr->ibmr.rkey = -1;
- nesmr->ibmr.lkey = -1;
- nesmr->mode = req.reg_type;
- return &nesmr->ibmr;
- }
-
- ib_umem_release(region);
- return ERR_PTR(-ENOSYS);
-}
-
-
-/**
- * nes_dereg_mr
- */
-static int nes_dereg_mr(struct ib_mr *ib_mr)
-{
- struct nes_mr *nesmr = to_nesmr(ib_mr);
- struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
- unsigned long flags;
- int ret;
- u16 major_code;
- u16 minor_code;
-
- if (nesmr->region) {
- ib_umem_release(nesmr->region);
- }
- if (nesmr->mode != IWNES_MEMREG_TYPE_MEM) {
- kfree(nesmr);
- return 0;
- }
-
- /* Deallocate the region with the adapter */
-
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
- return -ENOMEM;
- }
- cqp_request->waiting = 1;
- cqp_wqe = &cqp_request->cqp_wqe;
-
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
- NES_CQP_STAG_DEALLOC_PBLS | NES_CQP_STAG_MR);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ib_mr->rkey);
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X completed\n", ib_mr->rkey);
- ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_MR, "Deallocate STag 0x%08X completed, wait_event_timeout ret = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X\n",
- ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
-
- major_code = cqp_request->major_code;
- minor_code = cqp_request->minor_code;
-
- nes_put_cqp_request(nesdev, cqp_request);
-
- if (!ret) {
- nes_debug(NES_DBG_MR, "Timeout waiting to destroy STag,"
- " ib_mr=%p, rkey = 0x%08X\n",
- ib_mr, ib_mr->rkey);
- return -ETIME;
- } else if (major_code) {
- nes_debug(NES_DBG_MR, "Error (0x%04X:0x%04X) while attempting"
- " to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
- major_code, minor_code, ib_mr, ib_mr->rkey);
- return -EIO;
- }
-
- if (nesmr->pbls_used != 0) {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (nesmr->pbl_4k) {
- nesadapter->free_4kpbl += nesmr->pbls_used;
- if (nesadapter->free_4kpbl > nesadapter->max_4kpbl)
- printk(KERN_ERR PFX "free 4KB PBLs(%u) has "
- "exceeded the max(%u)\n",
- nesadapter->free_4kpbl,
- nesadapter->max_4kpbl);
- } else {
- nesadapter->free_256pbl += nesmr->pbls_used;
- if (nesadapter->free_256pbl > nesadapter->max_256pbl)
- printk(KERN_ERR PFX "free 256B PBLs(%u) has "
- "exceeded the max(%u)\n",
- nesadapter->free_256pbl,
- nesadapter->max_256pbl);
- }
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- }
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- (ib_mr->rkey & 0x0fffff00) >> 8);
-
- kfree(nesmr);
-
- return 0;
-}
-
-
-/**
- * show_rev
- */
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct nes_ib_device *nesibdev =
- container_of(dev, struct nes_ib_device, ibdev.dev);
- struct nes_vnic *nesvnic = nesibdev->nesvnic;
-
- nes_debug(NES_DBG_INIT, "\n");
- return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev);
-}
-
-
-/**
- * show_fw_ver
- */
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct nes_ib_device *nesibdev =
- container_of(dev, struct nes_ib_device, ibdev.dev);
- struct nes_vnic *nesvnic = nesibdev->nesvnic;
-
- nes_debug(NES_DBG_INIT, "\n");
- return sprintf(buf, "%u.%u\n",
- (nesvnic->nesdev->nesadapter->firmware_version >> 16),
- (nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff));
-}
-
-
-/**
- * show_hca
- */
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return sprintf(buf, "NES020\n");
-}
-
-
-/**
- * show_board
- */
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
-}
-
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *nes_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id
-};
-
-
-/**
- * nes_query_qp
- */
-static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr)
-{
- struct nes_qp *nesqp = to_nesqp(ibqp);
-
- nes_debug(NES_DBG_QP, "\n");
-
- attr->qp_access_flags = 0;
- attr->cap.max_send_wr = nesqp->hwqp.sq_size;
- attr->cap.max_recv_wr = nesqp->hwqp.rq_size;
- attr->cap.max_recv_sge = 1;
- if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA)
- attr->cap.max_inline_data = 0;
- else
- attr->cap.max_inline_data = 64;
-
- init_attr->event_handler = nesqp->ibqp.event_handler;
- init_attr->qp_context = nesqp->ibqp.qp_context;
- init_attr->send_cq = nesqp->ibqp.send_cq;
- init_attr->recv_cq = nesqp->ibqp.recv_cq;
- init_attr->srq = nesqp->ibqp.srq;
- init_attr->cap = attr->cap;
-
- return 0;
-}
-
-
-/**
- * nes_hw_modify_qp
- */
-int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
- u32 next_iwarp_state, u32 termlen, u32 wait_completion)
-{
- struct nes_hw_cqp_wqe *cqp_wqe;
- /* struct iw_cm_id *cm_id = nesqp->cm_id; */
- /* struct iw_cm_event cm_event; */
- struct nes_cqp_request *cqp_request;
- int ret;
- u16 major_code;
-
- nes_debug(NES_DBG_MOD_QP, "QP%u, refcount=%d\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
-
- cqp_request = nes_get_cqp_request(nesdev);
- if (cqp_request == NULL) {
- nes_debug(NES_DBG_MOD_QP, "Failed to get a cqp_request.\n");
- return -ENOMEM;
- }
- if (wait_completion) {
- cqp_request->waiting = 1;
- } else {
- cqp_request->waiting = 0;
- }
- cqp_wqe = &cqp_request->cqp_wqe;
-
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
- NES_CQP_MODIFY_QP | NES_CQP_QP_TYPE_IWARP | next_iwarp_state);
- nes_debug(NES_DBG_MOD_QP, "using next_iwarp_state=%08x, wqe_words=%08x\n",
- next_iwarp_state, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]));
- nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
-
- /* If sending a terminate message, fill in the length (in words) */
- if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
- !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
- termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
- }
-
- atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request);
-
- /* Wait for CQP */
- if (wait_completion) {
- /* nes_debug(NES_DBG_MOD_QP, "Waiting for modify iWARP QP%u to complete.\n",
- nesqp->hwqp.qp_id); */
- ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u completed, wait_event_timeout ret=%u, "
- "CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- nesqp->hwqp.qp_id, ret, cqp_request->major_code, cqp_request->minor_code);
- major_code = cqp_request->major_code;
- if (major_code) {
- nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u failed"
- "CQP Major:Minor codes = 0x%04X:0x%04X, intended next state = 0x%08X.\n",
- nesqp->hwqp.qp_id, cqp_request->major_code,
- cqp_request->minor_code, next_iwarp_state);
- }
-
- nes_put_cqp_request(nesdev, cqp_request);
-
- if (!ret)
- return -ETIME;
- else if (major_code)
- return -EIO;
- else
- return 0;
- } else {
- return 0;
- }
-}
-
-
-/**
- * nes_modify_qp
- */
-int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct nes_qp *nesqp = to_nesqp(ibqp);
- struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- /* u32 cqp_head; */
- /* u32 counter; */
- u32 next_iwarp_state = 0;
- int err;
- unsigned long qplockflags;
- int ret;
- u16 original_last_aeq;
- u8 issue_modify_qp = 0;
- u8 dont_wait = 0;
-
- nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u,"
- " iwarp_state=0x%X, refcount=%d\n",
- nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state,
- nesqp->iwarp_state, atomic_read(&nesqp->refcount));
-
- spin_lock_irqsave(&nesqp->lock, qplockflags);
-
- nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X,"
- " QP Access Flags=0x%X, attr_mask = 0x%0x\n",
- nesqp->hwqp.qp_id, nesqp->hw_iwarp_state,
- nesqp->hw_tcp_state, attr->qp_access_flags, attr_mask);
-
- if (attr_mask & IB_QP_STATE) {
- switch (attr->qp_state) {
- case IB_QPS_INIT:
- nes_debug(NES_DBG_MOD_QP, "QP%u: new state = init\n",
- nesqp->hwqp.qp_id);
- if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- }
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
- issue_modify_qp = 1;
- break;
- case IB_QPS_RTR:
- nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rtr\n",
- nesqp->hwqp.qp_id);
- if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- }
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
- issue_modify_qp = 1;
- break;
- case IB_QPS_RTS:
- nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rts\n",
- nesqp->hwqp.qp_id);
- if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- }
- if (nesqp->cm_id == NULL) {
- nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n",
- nesqp->hwqp.qp_id );
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- }
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS;
- if (nesqp->iwarp_state != NES_CQP_QP_IWARP_STATE_RTS)
- next_iwarp_state |= NES_CQP_QP_CONTEXT_VALID |
- NES_CQP_QP_ARP_VALID | NES_CQP_QP_ORD_VALID;
- issue_modify_qp = 1;
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_ESTABLISHED;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_RTS;
- nesqp->hte_added = 1;
- break;
- case IB_QPS_SQD:
- issue_modify_qp = 1;
- nes_debug(NES_DBG_MOD_QP, "QP%u: new state=closing. SQ head=%u, SQ tail=%u\n",
- nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail);
- if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return 0;
- } else {
- if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
- nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
- " ignored due to current iWARP state\n",
- nesqp->hwqp.qp_id);
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- }
- if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) {
- nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
- " already done based on hw state.\n",
- nesqp->hwqp.qp_id);
- issue_modify_qp = 0;
- }
- switch (nesqp->hw_iwarp_state) {
- case NES_AEQE_IWARP_STATE_CLOSING:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- break;
- case NES_AEQE_IWARP_STATE_TERMINATE:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
- break;
- case NES_AEQE_IWARP_STATE_ERROR:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
- break;
- default:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
- break;
- }
- }
- break;
- case IB_QPS_SQE:
- nes_debug(NES_DBG_MOD_QP, "QP%u: new state = terminate\n",
- nesqp->hwqp.qp_id);
- if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- }
- /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
- issue_modify_qp = 1;
- break;
- case IB_QPS_ERR:
- case IB_QPS_RESET:
- if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- }
- nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
- nesqp->hwqp.qp_id);
- if (nesqp->term_flags)
- del_timer(&nesqp->terminate_timer);
-
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
- /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
- if (nesqp->hte_added) {
- nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
- next_iwarp_state |= NES_CQP_QP_DEL_HTE;
- nesqp->hte_added = 0;
- }
- if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
- (nesdev->iw_status) &&
- (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
- next_iwarp_state |= NES_CQP_QP_RESET;
- } else {
- nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n",
- nesqp->hwqp.qp_id, nesqp->hw_tcp_state);
- dont_wait = 1;
- }
- issue_modify_qp = 1;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
- break;
- default:
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- return -EINVAL;
- break;
- }
-
- nesqp->ibqp_state = attr->qp_state;
- nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
- nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
- nesqp->iwarp_state);
- }
-
- if (attr_mask & IB_QP_ACCESS_FLAGS) {
- if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) {
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
- NES_QPCONTEXT_MISC_RDMA_READ_EN);
- issue_modify_qp = 1;
- }
- if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) {
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN);
- issue_modify_qp = 1;
- }
- if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) {
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_READ_EN);
- issue_modify_qp = 1;
- }
- if (attr->qp_access_flags & IB_ACCESS_MW_BIND) {
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WBIND_EN);
- issue_modify_qp = 1;
- }
-
- if (nesqp->user_mode) {
- nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
- NES_QPCONTEXT_MISC_RDMA_READ_EN);
- issue_modify_qp = 1;
- }
- }
-
- original_last_aeq = nesqp->last_aeq;
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
-
- nes_debug(NES_DBG_MOD_QP, "issue_modify_qp=%u\n", issue_modify_qp);
-
- ret = 0;
-
-
- if (issue_modify_qp) {
- nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
- ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
- if (ret)
- nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
- " failed for QP%u.\n",
- next_iwarp_state, nesqp->hwqp.qp_id);
-
- }
-
- if ((issue_modify_qp) && (nesqp->ibqp_state > IB_QPS_RTS)) {
- nes_debug(NES_DBG_MOD_QP, "QP%u Issued ModifyQP refcount (%d),"
- " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
- original_last_aeq, nesqp->last_aeq);
- if (!ret || original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
- if (dont_wait) {
- if (nesqp->cm_id && nesqp->hw_tcp_state != 0) {
- nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d),"
- " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
- original_last_aeq, nesqp->last_aeq);
- /* this one is for the cm_disconnect thread */
- spin_lock_irqsave(&nesqp->lock, qplockflags);
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_cm_disconn(nesqp);
- } else {
- nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
- }
- } else {
- spin_lock_irqsave(&nesqp->lock, qplockflags);
- if (nesqp->cm_id) {
- /* These two are for the timer thread */
- if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
- nesqp->cm_id->add_ref(nesqp->cm_id);
- nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
- " need ae to finish up, original_last_aeq = 0x%04X."
- " last_aeq = 0x%04X, scheduling timer.\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
- original_last_aeq, nesqp->last_aeq);
- schedule_nes_timer(nesqp->cm_node, (struct sk_buff *) nesqp, NES_TIMER_TYPE_CLOSE, 1, 0);
- }
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- } else {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
- " need ae to finish up, original_last_aeq = 0x%04X."
- " last_aeq = 0x%04X.\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
- original_last_aeq, nesqp->last_aeq);
- }
- }
- } else {
- nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
- " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
- original_last_aeq, nesqp->last_aeq);
- }
- } else {
- nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
- " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
- original_last_aeq, nesqp->last_aeq);
- }
-
- err = 0;
-
- nes_debug(NES_DBG_MOD_QP, "QP%u Leaving, refcount=%d\n",
- nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
-
- return err;
-}
-
-
-/**
- * nes_muticast_attach
- */
-static int nes_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
-
-/**
- * nes_multicast_detach
- */
-static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
-
-/**
- * nes_process_mad
- */
-static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
- u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
-static inline void
-fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey)
-{
- int sge_index;
- int total_payload_length = 0;
- for (sge_index = 0; sge_index < ib_wr->num_sge; sge_index++) {
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
- ib_wr->sg_list[sge_index].addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_LENGTH0_IDX + (sge_index*4),
- ib_wr->sg_list[sge_index].length);
- if (uselkey)
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4),
- (ib_wr->sg_list[sge_index].lkey));
- else
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), 0);
-
- total_payload_length += ib_wr->sg_list[sge_index].length;
- }
- nes_debug(NES_DBG_IW_TX, "UC UC UC, sending total_payload_length=%u \n",
- total_payload_length);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
- total_payload_length);
-}
-
-/**
- * nes_post_send
- */
-static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr)
-{
- u64 u64temp;
- unsigned long flags = 0;
- struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_qp *nesqp = to_nesqp(ibqp);
- struct nes_hw_qp_wqe *wqe;
- int err = 0;
- u32 qsize = nesqp->hwqp.sq_size;
- u32 head;
- u32 wqe_misc = 0;
- u32 wqe_count = 0;
- u32 counter;
-
- if (nesqp->ibqp_state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&nesqp->lock, flags);
-
- head = nesqp->hwqp.sq_head;
-
- while (ib_wr) {
- /* Check for QP error */
- if (nesqp->term_flags) {
- err = -EINVAL;
- break;
- }
-
- /* Check for SQ overflow */
- if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
- err = -ENOMEM;
- break;
- }
-
- wqe = &nesqp->hwqp.sq_vbase[head];
- /* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n",
- nesqp->hwqp.qp_id, wqe, head); */
- nes_fill_init_qp_wqe(wqe, nesqp, head);
- u64temp = (u64)(ib_wr->wr_id);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
- u64temp);
- switch (ib_wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (IB_WR_SEND == ib_wr->opcode) {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- wqe_misc = NES_IWARP_SQ_OP_SENDSE;
- else
- wqe_misc = NES_IWARP_SQ_OP_SEND;
- } else {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- wqe_misc = NES_IWARP_SQ_OP_SENDSEINV;
- else
- wqe_misc = NES_IWARP_SQ_OP_SENDINV;
-
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
- ib_wr->ex.invalidate_rkey);
- }
-
- if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
- err = -EINVAL;
- break;
- }
-
- if (ib_wr->send_flags & IB_SEND_FENCE)
- wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
-
- if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
- (ib_wr->sg_list[0].length <= 64)) {
- memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
- (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
- ib_wr->sg_list[0].length);
- wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
- } else {
- fill_wqe_sg_send(wqe, ib_wr, 1);
- }
-
- break;
- case IB_WR_RDMA_WRITE:
- wqe_misc = NES_IWARP_SQ_OP_RDMAW;
- if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
- nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
- ib_wr->num_sge, nesdev->nesadapter->max_sge);
- err = -EINVAL;
- break;
- }
-
- if (ib_wr->send_flags & IB_SEND_FENCE)
- wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
-
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
- ib_wr->wr.rdma.rkey);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
- ib_wr->wr.rdma.remote_addr);
-
- if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
- (ib_wr->sg_list[0].length <= 64)) {
- memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
- (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
- ib_wr->sg_list[0].length);
- wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
- } else {
- fill_wqe_sg_send(wqe, ib_wr, 1);
- }
-
- wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
- break;
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_READ_WITH_INV:
- /* iWARP only supports 1 sge for RDMA reads */
- if (ib_wr->num_sge > 1) {
- nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
- ib_wr->num_sge);
- err = -EINVAL;
- break;
- }
- if (ib_wr->opcode == IB_WR_RDMA_READ) {
- wqe_misc = NES_IWARP_SQ_OP_RDMAR;
- } else {
- wqe_misc = NES_IWARP_SQ_OP_RDMAR_LOCINV;
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
- ib_wr->ex.invalidate_rkey);
- }
-
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
- ib_wr->wr.rdma.remote_addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
- ib_wr->wr.rdma.rkey);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
- ib_wr->sg_list->length);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
- ib_wr->sg_list->addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
- ib_wr->sg_list->lkey);
- break;
- case IB_WR_LOCAL_INV:
- wqe_misc = NES_IWARP_SQ_OP_LOCINV;
- set_wqe_32bit_value(wqe->wqe_words,
- NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX,
- ib_wr->ex.invalidate_rkey);
- break;
- case IB_WR_FAST_REG_MR:
- {
- int i;
- int flags = ib_wr->wr.fast_reg.access_flags;
- struct nes_ib_fast_reg_page_list *pnesfrpl =
- container_of(ib_wr->wr.fast_reg.page_list,
- struct nes_ib_fast_reg_page_list,
- ibfrpl);
- u64 *src_page_list = pnesfrpl->ibfrpl.page_list;
- u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva;
-
- if (ib_wr->wr.fast_reg.page_list_len >
- (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) {
- nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n");
- err = -EINVAL;
- break;
- }
- wqe_misc = NES_IWARP_SQ_OP_FAST_REG;
- set_wqe_64bit_value(wqe->wqe_words,
- NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX,
- ib_wr->wr.fast_reg.iova_start);
- set_wqe_32bit_value(wqe->wqe_words,
- NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
- ib_wr->wr.fast_reg.length);
- set_wqe_32bit_value(wqe->wqe_words,
- NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
- set_wqe_32bit_value(wqe->wqe_words,
- NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
- ib_wr->wr.fast_reg.rkey);
- /* Set page size: */
- if (ib_wr->wr.fast_reg.page_shift == 12) {
- wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K;
- } else if (ib_wr->wr.fast_reg.page_shift == 21) {
- wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M;
- } else {
- nes_debug(NES_DBG_IW_TX, "Invalid page shift,"
- " ib_wr=%u, max=1\n", ib_wr->num_sge);
- err = -EINVAL;
- break;
- }
- /* Set access_flags */
- wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ;
- if (flags & IB_ACCESS_LOCAL_WRITE)
- wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE;
-
- if (flags & IB_ACCESS_REMOTE_WRITE)
- wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE;
-
- if (flags & IB_ACCESS_REMOTE_READ)
- wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ;
-
- if (flags & IB_ACCESS_MW_BIND)
- wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND;
-
- /* Fill in PBL info: */
- if (ib_wr->wr.fast_reg.page_list_len >
- pnesfrpl->ibfrpl.max_page_list_len) {
- nes_debug(NES_DBG_IW_TX, "Invalid page list length,"
- " ib_wr=%p, value=%u, max=%u\n",
- ib_wr, ib_wr->wr.fast_reg.page_list_len,
- pnesfrpl->ibfrpl.max_page_list_len);
- err = -EINVAL;
- break;
- }
-
- set_wqe_64bit_value(wqe->wqe_words,
- NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX,
- pnesfrpl->nes_wqe_pbl.paddr);
-
- set_wqe_32bit_value(wqe->wqe_words,
- NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX,
- ib_wr->wr.fast_reg.page_list_len * 8);
-
- for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
- dst_page_list[i] = cpu_to_le64(src_page_list[i]);
-
- nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, "
- "length: %d, rkey: %0x, pgl_paddr: %llx, "
- "page_list_len: %u, wqe_misc: %x\n",
- (unsigned long long) ib_wr->wr.fast_reg.iova_start,
- ib_wr->wr.fast_reg.length,
- ib_wr->wr.fast_reg.rkey,
- (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr,
- ib_wr->wr.fast_reg.page_list_len,
- wqe_misc);
- break;
- }
- default:
- /* error */
- err = -EINVAL;
- break;
- }
-
- if (err)
- break;
-
- if ((ib_wr->send_flags & IB_SEND_SIGNALED) || nesqp->sig_all)
- wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
-
- wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
-
- ib_wr = ib_wr->next;
- head++;
- wqe_count++;
- if (head >= qsize)
- head = 0;
-
- }
-
- nesqp->hwqp.sq_head = head;
- barrier();
- while (wqe_count) {
- counter = min(wqe_count, ((u32)255));
- wqe_count -= counter;
- nes_write32(nesdev->regs + NES_WQE_ALLOC,
- (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
- }
-
- spin_unlock_irqrestore(&nesqp->lock, flags);
-
-out:
- if (err)
- *bad_wr = ib_wr;
- return err;
-}
-
-
-/**
- * nes_post_recv
- */
-static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr)
-{
- u64 u64temp;
- unsigned long flags = 0;
- struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_qp *nesqp = to_nesqp(ibqp);
- struct nes_hw_qp_wqe *wqe;
- int err = 0;
- int sge_index;
- u32 qsize = nesqp->hwqp.rq_size;
- u32 head;
- u32 wqe_count = 0;
- u32 counter;
- u32 total_payload_length;
-
- if (nesqp->ibqp_state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&nesqp->lock, flags);
-
- head = nesqp->hwqp.rq_head;
-
- while (ib_wr) {
- /* Check for QP error */
- if (nesqp->term_flags) {
- err = -EINVAL;
- break;
- }
-
- if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
- err = -EINVAL;
- break;
- }
- /* Check for RQ overflow */
- if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
- err = -ENOMEM;
- break;
- }
-
- nes_debug(NES_DBG_IW_RX, "ibwr sge count = %u.\n", ib_wr->num_sge);
- wqe = &nesqp->hwqp.rq_vbase[head];
-
- /* nes_debug(NES_DBG_IW_RX, "QP%u:processing rq wqe at %p, head = %u.\n",
- nesqp->hwqp.qp_id, wqe, head); */
- nes_fill_init_qp_wqe(wqe, nesqp, head);
- u64temp = (u64)(ib_wr->wr_id);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
- u64temp);
- total_payload_length = 0;
- for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) {
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
- ib_wr->sg_list[sge_index].addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_LENGTH0_IDX+(sge_index*4),
- ib_wr->sg_list[sge_index].length);
- set_wqe_32bit_value(wqe->wqe_words,NES_IWARP_RQ_WQE_STAG0_IDX+(sge_index*4),
- ib_wr->sg_list[sge_index].lkey);
-
- total_payload_length += ib_wr->sg_list[sge_index].length;
- }
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX,
- total_payload_length);
-
- ib_wr = ib_wr->next;
- head++;
- wqe_count++;
- if (head >= qsize)
- head = 0;
- }
-
- nesqp->hwqp.rq_head = head;
- barrier();
- while (wqe_count) {
- counter = min(wqe_count, ((u32)255));
- wqe_count -= counter;
- nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id);
- }
-
- spin_unlock_irqrestore(&nesqp->lock, flags);
-
-out:
- if (err)
- *bad_wr = ib_wr;
- return err;
-}
-
-
-/**
- * nes_poll_cq
- */
-static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- u64 u64temp;
- u64 wrid;
- unsigned long flags = 0;
- struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_cq *nescq = to_nescq(ibcq);
- struct nes_qp *nesqp;
- struct nes_hw_cqe cqe;
- u32 head;
- u32 wq_tail = 0;
- u32 cq_size;
- u32 cqe_count = 0;
- u32 wqe_index;
- u32 u32temp;
- u32 move_cq_head = 1;
- u32 err_code;
-
- nes_debug(NES_DBG_CQ, "\n");
-
- spin_lock_irqsave(&nescq->lock, flags);
-
- head = nescq->hw_cq.cq_head;
- cq_size = nescq->hw_cq.cq_size;
-
- while (cqe_count < num_entries) {
- if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
- NES_CQE_VALID) == 0)
- break;
-
- /*
- * Make sure we read CQ entry contents *after*
- * we've checked the valid bit.
- */
- rmb();
-
- cqe = nescq->hw_cq.cq_vbase[head];
- u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
- wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
- u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
- /* parse CQE, get completion context from WQE (either rq or sq) */
- u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
- ((u64)u32temp);
-
- if (u64temp) {
- nesqp = (struct nes_qp *)(unsigned long)u64temp;
- memset(entry, 0, sizeof *entry);
- if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
- entry->status = IB_WC_SUCCESS;
- } else {
- err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
- if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
- entry->status = err_code & 0x0000ffff;
-
- /* The rest of the cqe's will be marked as flushed */
- nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
- cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
- NES_IWARP_CQE_MINOR_FLUSH);
- } else
- entry->status = IB_WC_WR_FLUSH_ERR;
- }
-
- entry->qp = &nesqp->ibqp;
- entry->src_qp = nesqp->hwqp.qp_id;
-
- if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
- if (nesqp->skip_lsmm) {
- nesqp->skip_lsmm = 0;
- nesqp->hwqp.sq_tail++;
- }
-
- /* Working on a SQ Completion*/
- wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
- wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
- ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
- wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
- entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
- wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
-
- switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
- wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
- case NES_IWARP_SQ_OP_RDMAW:
- nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
- entry->opcode = IB_WC_RDMA_WRITE;
- break;
- case NES_IWARP_SQ_OP_RDMAR:
- nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
- entry->opcode = IB_WC_RDMA_READ;
- entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
- wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
- break;
- case NES_IWARP_SQ_OP_SENDINV:
- case NES_IWARP_SQ_OP_SENDSEINV:
- case NES_IWARP_SQ_OP_SEND:
- case NES_IWARP_SQ_OP_SENDSE:
- nes_debug(NES_DBG_CQ, "Operation = Send.\n");
- entry->opcode = IB_WC_SEND;
- break;
- case NES_IWARP_SQ_OP_LOCINV:
- entry->opcode = IB_WC_LOCAL_INV;
- break;
- case NES_IWARP_SQ_OP_FAST_REG:
- entry->opcode = IB_WC_FAST_REG_MR;
- break;
- }
-
- nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
- if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
- move_cq_head = 0;
- wq_tail = nesqp->hwqp.sq_tail;
- }
- } else {
- /* Working on a RQ Completion*/
- entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
- wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
- ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
- entry->opcode = IB_WC_RECV;
-
- nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
- if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
- move_cq_head = 0;
- wq_tail = nesqp->hwqp.rq_tail;
- }
- }
-
- entry->wr_id = wrid;
- entry++;
- cqe_count++;
- }
-
- if (move_cq_head) {
- nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
- if (++head >= cq_size)
- head = 0;
- nescq->polled_completions++;
-
- if ((nescq->polled_completions > (cq_size / 2)) ||
- (nescq->polled_completions == 255)) {
- nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
- " are pending %u of %u.\n",
- nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
- nes_write32(nesdev->regs+NES_CQE_ALLOC,
- nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
- nescq->polled_completions = 0;
- }
- } else {
- /* Update the wqe index and set status to flush */
- wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
- wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
- nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
- cpu_to_le32(wqe_index);
- move_cq_head = 1; /* ready for next pass */
- }
- }
-
- if (nescq->polled_completions) {
- nes_write32(nesdev->regs+NES_CQE_ALLOC,
- nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
- nescq->polled_completions = 0;
- }
-
- nescq->hw_cq.cq_head = head;
- nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n",
- cqe_count, nescq->hw_cq.cq_number);
-
- spin_unlock_irqrestore(&nescq->lock, flags);
-
- return cqe_count;
-}
-
-
-/**
- * nes_req_notify_cq
- */
-static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
- {
- struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_cq *nescq = to_nescq(ibcq);
- u32 cq_arm;
-
- nes_debug(NES_DBG_CQ, "Requesting notification for CQ%u.\n",
- nescq->hw_cq.cq_number);
-
- cq_arm = nescq->hw_cq.cq_number;
- if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
- cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT;
- else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- cq_arm |= NES_CQE_ALLOC_NOTIFY_SE;
- else
- return -EINVAL;
-
- nes_write32(nesdev->regs+NES_CQE_ALLOC, cq_arm);
- nes_read32(nesdev->regs+NES_CQE_ALLOC);
-
- return 0;
-}
-
-static int nes_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = nes_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- return 0;
-}
-
-/**
- * nes_init_ofa_device
- */
-struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
-{
- struct nes_ib_device *nesibdev;
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
-
- nesibdev = (struct nes_ib_device *)ib_alloc_device(sizeof(struct nes_ib_device));
- if (nesibdev == NULL) {
- return NULL;
- }
- strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX);
- nesibdev->ibdev.owner = THIS_MODULE;
-
- nesibdev->ibdev.node_type = RDMA_NODE_RNIC;
- memset(&nesibdev->ibdev.node_guid, 0, sizeof(nesibdev->ibdev.node_guid));
- memcpy(&nesibdev->ibdev.node_guid, netdev->dev_addr, 6);
-
- nesibdev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
- (1ull << IB_USER_VERBS_CMD_BIND_MW) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_MW) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND);
-
- nesibdev->ibdev.phys_port_cnt = 1;
- nesibdev->ibdev.num_comp_vectors = 1;
- nesibdev->ibdev.dma_device = &nesdev->pcidev->dev;
- nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev;
- nesibdev->ibdev.query_device = nes_query_device;
- nesibdev->ibdev.query_port = nes_query_port;
- nesibdev->ibdev.query_pkey = nes_query_pkey;
- nesibdev->ibdev.query_gid = nes_query_gid;
- nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext;
- nesibdev->ibdev.dealloc_ucontext = nes_dealloc_ucontext;
- nesibdev->ibdev.mmap = nes_mmap;
- nesibdev->ibdev.alloc_pd = nes_alloc_pd;
- nesibdev->ibdev.dealloc_pd = nes_dealloc_pd;
- nesibdev->ibdev.create_ah = nes_create_ah;
- nesibdev->ibdev.destroy_ah = nes_destroy_ah;
- nesibdev->ibdev.create_qp = nes_create_qp;
- nesibdev->ibdev.modify_qp = nes_modify_qp;
- nesibdev->ibdev.query_qp = nes_query_qp;
- nesibdev->ibdev.destroy_qp = nes_destroy_qp;
- nesibdev->ibdev.create_cq = nes_create_cq;
- nesibdev->ibdev.destroy_cq = nes_destroy_cq;
- nesibdev->ibdev.poll_cq = nes_poll_cq;
- nesibdev->ibdev.get_dma_mr = nes_get_dma_mr;
- nesibdev->ibdev.reg_phys_mr = nes_reg_phys_mr;
- nesibdev->ibdev.reg_user_mr = nes_reg_user_mr;
- nesibdev->ibdev.dereg_mr = nes_dereg_mr;
- nesibdev->ibdev.alloc_mw = nes_alloc_mw;
- nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
- nesibdev->ibdev.bind_mw = nes_bind_mw;
-
- nesibdev->ibdev.alloc_mr = nes_alloc_mr;
- nesibdev->ibdev.alloc_fast_reg_page_list = nes_alloc_fast_reg_page_list;
- nesibdev->ibdev.free_fast_reg_page_list = nes_free_fast_reg_page_list;
-
- nesibdev->ibdev.attach_mcast = nes_multicast_attach;
- nesibdev->ibdev.detach_mcast = nes_multicast_detach;
- nesibdev->ibdev.process_mad = nes_process_mad;
-
- nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
- nesibdev->ibdev.post_send = nes_post_send;
- nesibdev->ibdev.post_recv = nes_post_recv;
-
- nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
- if (nesibdev->ibdev.iwcm == NULL) {
- ib_dealloc_device(&nesibdev->ibdev);
- return NULL;
- }
- nesibdev->ibdev.iwcm->add_ref = nes_add_ref;
- nesibdev->ibdev.iwcm->rem_ref = nes_rem_ref;
- nesibdev->ibdev.iwcm->get_qp = nes_get_qp;
- nesibdev->ibdev.iwcm->connect = nes_connect;
- nesibdev->ibdev.iwcm->accept = nes_accept;
- nesibdev->ibdev.iwcm->reject = nes_reject;
- nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
- nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
- nesibdev->ibdev.get_port_immutable = nes_port_immutable;
-
- return nesibdev;
-}
-
-
-/**
- * nes_handle_delayed_event
- */
-static void nes_handle_delayed_event(unsigned long data)
-{
- struct nes_vnic *nesvnic = (void *) data;
-
- if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
- struct ib_event event;
-
- event.device = &nesvnic->nesibdev->ibdev;
- if (!event.device)
- goto stop_timer;
- event.event = nesvnic->delayed_event;
- event.element.port_num = nesvnic->logical_port + 1;
- ib_dispatch_event(&event);
- }
-
-stop_timer:
- nesvnic->event_timer.function = NULL;
-}
-
-
-void nes_port_ibevent(struct nes_vnic *nesvnic)
-{
- struct nes_ib_device *nesibdev = nesvnic->nesibdev;
- struct nes_device *nesdev = nesvnic->nesdev;
- struct ib_event event;
- event.device = &nesibdev->ibdev;
- event.element.port_num = nesvnic->logical_port + 1;
- event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
-
- if (!nesvnic->event_timer.function) {
- ib_dispatch_event(&event);
- nesvnic->last_dispatched_event = event.event;
- nesvnic->event_timer.function = nes_handle_delayed_event;
- nesvnic->event_timer.data = (unsigned long) nesvnic;
- nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
- add_timer(&nesvnic->event_timer);
- } else {
- mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY);
- }
- nesvnic->delayed_event = event.event;
-}
-
-
-/**
- * nes_destroy_ofa_device
- */
-void nes_destroy_ofa_device(struct nes_ib_device *nesibdev)
-{
- if (nesibdev == NULL)
- return;
-
- nes_unregister_ofa_device(nesibdev);
-
- kfree(nesibdev->ibdev.iwcm);
- ib_dealloc_device(&nesibdev->ibdev);
-}
-
-
-/**
- * nes_register_ofa_device
- */
-int nes_register_ofa_device(struct nes_ib_device *nesibdev)
-{
- struct nes_vnic *nesvnic = nesibdev->nesvnic;
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- int i, ret;
-
- ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
- if (ret) {
- return ret;
- }
-
- /* Get the resources allocated to this device */
- nesibdev->max_cq = (nesadapter->max_cq-NES_FIRST_QPN) / nesadapter->port_count;
- nesibdev->max_mr = nesadapter->max_mr / nesadapter->port_count;
- nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
- nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
-
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- ret = device_create_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- if (ret) {
- while (i > 0) {
- i--;
- device_remove_file(&nesibdev->ibdev.dev,
- nes_dev_attributes[i]);
- }
- ib_unregister_device(&nesibdev->ibdev);
- return ret;
- }
- }
-
- nesvnic->of_device_registered = 1;
-
- return 0;
-}
-
-
-/**
- * nes_unregister_ofa_device
- */
-static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
-{
- struct nes_vnic *nesvnic = nesibdev->nesvnic;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- device_remove_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- }
-
- if (nesvnic->of_device_registered) {
- ib_unregister_device(&nesibdev->ibdev);
- }
-
- nesvnic->of_device_registered = 0;
-}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
deleted file mode 100644
index 309b31c31ae1..000000000000
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef NES_VERBS_H
-#define NES_VERBS_H
-
-struct nes_device;
-
-#define NES_MAX_USER_DB_REGIONS 4096
-#define NES_MAX_USER_WQ_REGIONS 4096
-
-#define NES_TERM_SENT 0x01
-#define NES_TERM_RCVD 0x02
-#define NES_TERM_DONE 0x04
-
-struct nes_ucontext {
- struct ib_ucontext ibucontext;
- struct nes_device *nesdev;
- unsigned long mmap_wq_offset;
- unsigned long mmap_cq_offset; /* to be removed */
- int index; /* rnic index (minor) */
- unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
- u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
- u16 first_free_db;
- unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
- struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
- u16 first_free_wq;
- struct list_head cq_reg_mem_list;
- struct list_head qp_reg_mem_list;
- u32 mcrqf;
- atomic_t usecnt;
-};
-
-struct nes_pd {
- struct ib_pd ibpd;
- u16 pd_id;
- atomic_t sqp_count;
- u16 mmap_db_index;
-};
-
-struct nes_mr {
- union {
- struct ib_mr ibmr;
- struct ib_mw ibmw;
- struct ib_fmr ibfmr;
- };
- struct ib_umem *region;
- u16 pbls_used;
- u8 mode;
- u8 pbl_4k;
-};
-
-struct nes_hw_pb {
- __le32 pa_low;
- __le32 pa_high;
-};
-
-struct nes_vpbl {
- dma_addr_t pbl_pbase;
- struct nes_hw_pb *pbl_vbase;
-};
-
-struct nes_root_vpbl {
- dma_addr_t pbl_pbase;
- struct nes_hw_pb *pbl_vbase;
- struct nes_vpbl *leaf_vpbl;
-};
-
-struct nes_fmr {
- struct nes_mr nesmr;
- u32 leaf_pbl_cnt;
- struct nes_root_vpbl root_vpbl;
- struct ib_qp *ib_qp;
- int access_rights;
- struct ib_fmr_attr attr;
-};
-
-struct nes_av;
-
-struct nes_cq {
- struct ib_cq ibcq;
- struct nes_hw_cq hw_cq;
- u32 polled_completions;
- u32 cq_mem_size;
- spinlock_t lock;
- u8 virtual_cq;
- u8 pad[3];
- u32 mcrqf;
-};
-
-struct nes_wq {
- spinlock_t lock;
-};
-
-struct disconn_work {
- struct work_struct work;
- struct nes_qp *nesqp;
-};
-
-struct iw_cm_id;
-struct ietf_mpa_frame;
-
-struct nes_qp {
- struct ib_qp ibqp;
- void *allocated_buffer;
- struct iw_cm_id *cm_id;
- struct nes_cq *nesscq;
- struct nes_cq *nesrcq;
- struct nes_pd *nespd;
- void *cm_node; /* handle of the node this QP is associated with */
- void *ietf_frame;
- u8 ietf_frame_size;
- dma_addr_t ietf_frame_pbase;
- struct ib_mr *lsmm_mr;
- struct nes_hw_qp hwqp;
- struct work_struct work;
- enum ib_qp_state ibqp_state;
- u32 iwarp_state;
- u32 hte_index;
- u32 last_aeq;
- u32 qp_mem_size;
- atomic_t refcount;
- atomic_t close_timer_started;
- u32 mmap_sq_db_index;
- u32 mmap_rq_db_index;
- spinlock_t lock;
- spinlock_t pau_lock;
- struct nes_qp_context *nesqp_context;
- dma_addr_t nesqp_context_pbase;
- void *pbl_vbase;
- dma_addr_t pbl_pbase;
- struct page *page;
- struct timer_list terminate_timer;
- enum ib_event_type terminate_eventtype;
- struct sk_buff_head pau_list;
- u32 pau_rcv_nxt;
- u16 active_conn:1;
- u16 skip_lsmm:1;
- u16 user_mode:1;
- u16 hte_added:1;
- u16 flush_issued:1;
- u16 destroyed:1;
- u16 sig_all:1;
- u16 pau_mode:1;
- u16 rsvd:8;
- u16 private_data_len;
- u16 term_sq_flush_code;
- u16 term_rq_flush_code;
- u8 hw_iwarp_state;
- u8 hw_tcp_state;
- u8 term_flags;
- u8 sq_kmapped;
- u8 pau_busy;
- u8 pau_pending;
- u8 pau_state;
- __u64 nesuqp_addr;
-};
-#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig
index c0cddc0192d1..54bd70bc4d1a 100644
--- a/drivers/infiniband/hw/ocrdma/Kconfig
+++ b/drivers/infiniband/hw/ocrdma/Kconfig
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_OCRDMA
tristate "Emulex One Connect HCA support"
depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
select NET_VENDOR_EMULEX
select BE2NET
- ---help---
+ help
This driver provides low-level InfiniBand over Ethernet
support for Emulex One Connect host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
index d1bfd4f4cdde..14fba95021d8 100644
--- a/drivers/infiniband/hw/ocrdma/Makefile
+++ b/drivers/infiniband/hw/ocrdma/Makefile
@@ -1,4 +1,5 @@
-ccflags-y := -Idrivers/net/ethernet/emulex/benet
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/emulex/benet
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b4091ab48db0..5eb61c110090 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -55,7 +55,7 @@
#include <be_roce.h>
#include "ocrdma_sli.h"
-#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0"
+#define OCRDMA_ROCE_DRV_VERSION "11.0.0.0"
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -98,7 +98,6 @@ struct ocrdma_dev_attr {
u64 max_mr_size;
u32 max_num_mr_pbl;
int max_mw;
- int max_fmr;
int max_map_per_fmr;
int max_pages_per_frmr;
u16 max_ord_per_qp;
@@ -114,6 +113,7 @@ struct ocrdma_dev_attr {
u8 local_ca_ack_delay;
u8 ird;
u8 num_ird_pages;
+ u8 udp_encap;
};
struct ocrdma_dma_mem {
@@ -185,7 +185,6 @@ struct ocrdma_hw_mr {
u32 num_pbes;
u32 pbl_size;
u32 pbe_size;
- u64 fbo;
u64 va;
};
@@ -193,6 +192,8 @@ struct ocrdma_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct ocrdma_hw_mr hwmr;
+ u64 *pages;
+ u32 npages;
};
struct ocrdma_stats {
@@ -230,6 +231,10 @@ struct phy_info {
u16 interface_type;
};
+enum ocrdma_flags {
+ OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
+};
+
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
@@ -278,7 +283,6 @@ struct ocrdma_dev {
u32 hba_port_num;
struct list_head entry;
- struct rcu_head rcu;
int id;
u64 *stag_arr;
u8 sl; /* service level */
@@ -286,6 +290,7 @@ struct ocrdma_dev {
atomic_t update_sl;
u16 pvid;
u32 asic_id;
+ u32 flags;
ulong last_stats_time;
struct mutex stats_lock; /* provide synch for debugfs operations */
@@ -317,9 +322,6 @@ struct ocrdma_cq {
*/
u32 max_hw_cqe;
bool phase_change;
- bool deferred_arm, deferred_sol;
- bool first_arm;
-
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
* to cq polling
*/
@@ -353,6 +355,7 @@ struct ocrdma_ah {
struct ocrdma_av *av;
u16 sgid_index;
u32 id;
+ u8 hdr_type;
};
struct ocrdma_qp_hwq_info {
@@ -522,17 +525,17 @@ static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
}
static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
- struct ib_ah_attr *ah_attr, u8 *mac_addr)
+ struct rdma_ah_attr *ah_attr, u8 *mac_addr)
{
struct in6_addr in6;
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ memcpy(&in6, rdma_ah_read_grh(ah_attr)->dgid.raw, sizeof(in6));
if (rdma_is_multicast_addr(&in6))
rdma_get_mcast_mac(&in6, mac_addr);
else if (rdma_link_local_addr(&in6))
rdma_get_ll_mac(&in6, mac_addr);
else
- memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
+ memcpy(mac_addr, ah_attr->roce.dmac, ETH_ALEN);
return 0;
}
@@ -590,4 +593,15 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state)
(state & OCRDMA_STATE_FLAG_SYNC);
}
+static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
+{
+ return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
+}
+
+static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev)
+{
+ return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) ||
+ (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6);
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
deleted file mode 100644
index 430b1350fe96..000000000000
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* This file is part of the Emulex RoCE Device Driver for
- * RoCE (RDMA over Converged Ethernet) adapters.
- * Copyright (C) 2012-2015 Emulex. All rights reserved.
- * EMULEX and SLI are trademarks of Emulex.
- * www.emulex.com
- *
- * This software is available to you under a choice of one of two licenses.
- * You may choose to be licensed under the terms of the GNU General Public
- * License (GPL) Version 2, available from the file COPYING in the main
- * directory of this source tree, or the BSD license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-#ifndef __OCRDMA_ABI_H__
-#define __OCRDMA_ABI_H__
-
-#define OCRDMA_ABI_VERSION 2
-#define OCRDMA_BE_ROCE_ABI_VERSION 1
-/* user kernel communication data structures. */
-
-struct ocrdma_alloc_ucontext_resp {
- u32 dev_id;
- u32 wqe_size;
- u32 max_inline_data;
- u32 dpp_wqe_size;
- u64 ah_tbl_page;
- u32 ah_tbl_len;
- u32 rqe_size;
- u8 fw_ver[32];
- /* for future use/new features in progress */
- u64 rsvd1;
- u64 rsvd2;
-};
-
-struct ocrdma_alloc_pd_ureq {
- u64 rsvd1;
-};
-
-struct ocrdma_alloc_pd_uresp {
- u32 id;
- u32 dpp_enabled;
- u32 dpp_page_addr_hi;
- u32 dpp_page_addr_lo;
- u64 rsvd1;
-};
-
-struct ocrdma_create_cq_ureq {
- u32 dpp_cq;
- u32 rsvd; /* pad */
-};
-
-#define MAX_CQ_PAGES 8
-struct ocrdma_create_cq_uresp {
- u32 cq_id;
- u32 page_size;
- u32 num_pages;
- u32 max_hw_cqe;
- u64 page_addr[MAX_CQ_PAGES];
- u64 db_page_addr;
- u32 db_page_size;
- u32 phase_change;
- /* for future use/new features in progress */
- u64 rsvd1;
- u64 rsvd2;
-};
-
-#define MAX_QP_PAGES 8
-#define MAX_UD_AV_PAGES 8
-
-struct ocrdma_create_qp_ureq {
- u8 enable_dpp_cq;
- u8 rsvd;
- u16 dpp_cq_id;
- u32 rsvd1; /* pad */
-};
-
-struct ocrdma_create_qp_uresp {
- u16 qp_id;
- u16 sq_dbid;
- u16 rq_dbid;
- u16 resv0; /* pad */
- u32 sq_page_size;
- u32 rq_page_size;
- u32 num_sq_pages;
- u32 num_rq_pages;
- u64 sq_page_addr[MAX_QP_PAGES];
- u64 rq_page_addr[MAX_QP_PAGES];
- u64 db_page_addr;
- u32 db_page_size;
- u32 dpp_credit;
- u32 dpp_offset;
- u32 num_wqe_allocated;
- u32 num_rqe_allocated;
- u32 db_sq_offset;
- u32 db_rq_offset;
- u32 db_shift;
- u64 rsvd[11];
-} __packed;
-
-struct ocrdma_create_srq_uresp {
- u16 rq_dbid;
- u16 resv0; /* pad */
- u32 resv1;
-
- u32 rq_page_size;
- u32 num_rq_pages;
-
- u64 rq_page_addr[MAX_QP_PAGES];
- u64 db_page_addr;
-
- u32 db_page_size;
- u32 num_rqe_allocated;
- u32 db_rq_offset;
- u32 db_shift;
-
- u64 rsvd2;
- u64 rsvd3;
-};
-
-#endif /* __OCRDMA_ABI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 44766fee1f4e..88c45928301f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -45,6 +45,7 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
+#include <rdma/ib_cache.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
@@ -54,21 +55,47 @@
#define OCRDMA_VID_PCP_SHIFT 0xD
+static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
+{
+ switch (hdr_type) {
+ case OCRDMA_L3_TYPE_IB_GRH:
+ return (u16)ETH_P_IBOE;
+ case OCRDMA_L3_TYPE_IPV4:
+ return (u16)0x0800;
+ case OCRDMA_L3_TYPE_IPV6:
+ return (u16)0x86dd;
+ default:
+ pr_err("ocrdma%d: Invalid network header\n", devid);
+ return 0;
+ }
+}
+
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
- struct ib_ah_attr *attr, union ib_gid *sgid,
- int pdid, bool *isvlan)
+ struct rdma_ah_attr *attr, const union ib_gid *sgid,
+ int pdid, bool *isvlan, u16 vlan_tag)
{
- int status = 0;
- u16 vlan_tag;
+ int status;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
+ u16 proto_num = 0;
+ u8 nxthdr = 0x11;
+ struct iphdr ipv4;
+ const struct ib_global_route *ib_grh;
+ union {
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
memset(&eth, 0, sizeof(eth));
memset(&grh, 0, sizeof(grh));
+ /* Protocol Number */
+ proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
+ if (!proto_num)
+ return -EINVAL;
+ nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
/* VLAN */
- vlan_tag = attr->vlan_id;
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag || dev->pfc_state) {
@@ -79,13 +106,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
dev->id);
}
eth.eth_type = cpu_to_be16(0x8100);
- eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+ eth.roce_eth_type = cpu_to_be16(proto_num);
vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
*isvlan = true;
} else {
- eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+ eth.eth_type = cpu_to_be16(proto_num);
eth_sz = sizeof(struct ocrdma_eth_basic);
}
/* MAC */
@@ -93,158 +120,149 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
- ah->sgid_index = attr->grh.sgid_index;
- memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
- memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
-
- grh.tclass_flow = cpu_to_be32((6 << 28) |
- (attr->grh.traffic_class << 24) |
- attr->grh.flow_label);
- /* 0x1b is next header value in GRH */
- grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
- (0x1b << 8) | attr->grh.hop_limit);
+ ib_grh = rdma_ah_read_grh(attr);
+ ah->sgid_index = ib_grh->sgid_index;
/* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
- memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+ if (ah->hdr_type == RDMA_NETWORK_IPV4) {
+ *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
+ ib_grh->traffic_class);
+ ipv4.id = cpu_to_be16(pdid);
+ ipv4.frag_off = htons(IP_DF);
+ ipv4.tot_len = htons(0);
+ ipv4.ttl = ib_grh->hop_limit;
+ ipv4.protocol = nxthdr;
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+ ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
+ rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
+ ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
+ memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
+ } else {
+ memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+ grh.tclass_flow = cpu_to_be32((6 << 28) |
+ (ib_grh->traffic_class << 24) |
+ ib_grh->flow_label);
+ memcpy(&grh.dgid[0], ib_grh->dgid.raw,
+ sizeof(ib_grh->dgid.raw));
+ grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
+ (nxthdr << 8) |
+ ib_grh->hop_limit);
+ memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+ }
if (*isvlan)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
ah->av->valid = cpu_to_le32(ah->av->valid);
return status;
}
-struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
u32 *ahid_addr;
- bool isvlan = false;
int status;
- struct ocrdma_ah *ah;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- union ib_gid sgid;
+ struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
+ bool isvlan = false;
+ u16 vlan_tag = 0xffff;
+ const struct ib_gid_attr *sgid_attr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
- if (!(attr->ah_flags & IB_AH_GRH))
- return ERR_PTR(-EINVAL);
+ if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
+ !(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
+ return -EINVAL;
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+
+ sgid_attr = attr->grh.sgid_attr;
+ status = rdma_read_gid_l2_fields(sgid_attr, &vlan_tag, NULL);
+ if (status)
+ return status;
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
- status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
- if (status) {
- pr_err("%s(): Failed to query sgid, status = %d\n",
- __func__, status);
- goto av_conf_err;
- }
-
- if ((pd->uctx) &&
- (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
- (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
- status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
- attr->dmac, &attr->vlan_id);
- if (status) {
- pr_err("%s(): Failed to resolve dmac from gid."
- "status = %d\n", __func__, status);
- goto av_conf_err;
- }
- }
+ /* Get network header type for this GID */
+ ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
- status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
+ status = set_av_attr(dev, ah, attr, &sgid_attr->gid, pd->id,
+ &isvlan, vlan_tag);
if (status)
goto av_conf_err;
/* if pd is for the user process, pass the ah_id to user space */
if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
- ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
+ ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
*ahid_addr = 0;
*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ *ahid_addr |= ((u32)ah->hdr_type &
+ OCRDMA_AH_L3_TYPE_MASK) <<
+ OCRDMA_AH_L3_TYPE_SHIFT;
+ }
if (isvlan)
*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
OCRDMA_AH_VLAN_VALID_SHIFT);
}
- return &ah->ibah;
+ return 0;
av_conf_err:
ocrdma_free_av(dev, ah);
av_err:
- kfree(ah);
- return ERR_PTR(status);
+ return status;
}
-int ocrdma_destroy_ah(struct ib_ah *ibah)
+int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
ocrdma_free_av(dev, ah);
- kfree(ah);
return 0;
}
-int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
+int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_av *av = ah->av;
struct ocrdma_grh *grh;
- attr->ah_flags |= IB_AH_GRH;
+
+ attr->type = ibah->type;
if (ah->av->valid & OCRDMA_AV_VALID) {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_vlan));
- attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
+ rdma_ah_set_sl(attr, be16_to_cpu(av->eth_hdr.vlan_tag) >> 13);
} else {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_basic));
- attr->sl = 0;
+ rdma_ah_set_sl(attr, 0);
}
- memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid));
- attr->grh.sgid_index = ah->sgid_index;
- attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff;
- attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24;
- attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff;
+ rdma_ah_set_grh(attr, NULL,
+ be32_to_cpu(grh->tclass_flow) & 0xffffffff,
+ ah->sgid_index,
+ be32_to_cpu(grh->pdid_hoplimit) & 0xff,
+ be32_to_cpu(grh->tclass_flow) >> 24);
+ rdma_ah_set_dgid_raw(attr, &grh->dgid[0]);
return 0;
}
-int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
-{
- /* modify_ah is unsupported */
- return -ENOSYS;
-}
-
-int ocrdma_process_mad(struct ib_device *ibdev,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
- int status;
+ int status = IB_MAD_RESULT_SUCCESS;
struct ocrdma_dev *dev;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_PERF_MGMT:
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
dev = get_ocrdma_dev(ibdev);
- if (!ocrdma_pma_counters(dev, out_mad))
- status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
- else
- status = IB_MAD_RESULT_SUCCESS;
- break;
- default:
- status = IB_MAD_RESULT_SUCCESS;
- break;
+ ocrdma_pma_counters(dev, out);
+ status |= IB_MAD_RESULT_REPLY;
}
+
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 04a30ae67473..2626679df31d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -46,20 +46,19 @@
enum {
OCRDMA_AH_ID_MASK = 0x3FF,
OCRDMA_AH_VLAN_VALID_MASK = 0x01,
- OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F
+ OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F,
+ OCRDMA_AH_L3_TYPE_MASK = 0x03,
+ OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
-int ocrdma_destroy_ah(struct ib_ah *);
-int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
-int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
+int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int ocrdma_process_mad(struct ib_device *,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
#endif /* __OCRDMA_AH_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index aab391a15db4..56f06c68f31a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -44,9 +44,11 @@
#include <linux/interrupt.h>
#include <linux/log2.h>
#include <linux/dma-mapping.h>
+#include <linux/if_ether.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
@@ -250,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status)
case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
err_num = -EAGAIN;
break;
+ default:
+ err_num = -EFAULT;
}
+ break;
default:
err_num = -EFAULT;
}
@@ -375,11 +380,10 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
+ GFP_KERNEL);
if (!q->va)
return -ENOMEM;
- memset(q->va, 0, q->size);
return 0;
}
@@ -578,6 +582,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
+ /* Request link events on this MQ. */
+ cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
@@ -678,11 +684,33 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
int dev_event = 0;
int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
+ u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK;
+ u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK;
+
+ /*
+ * Some FW version returns wrong qp or cq ids in CQEs.
+ * Checking whether the IDs are valid
+ */
+
+ if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) {
+ if (qpid < dev->attr.max_qp)
+ qp = dev->qp_tbl[qpid];
+ if (qp == NULL) {
+ pr_err("ocrdma%d:Async event - qpid %u is not valid\n",
+ dev->id, qpid);
+ return;
+ }
+ }
- if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)
- qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
- if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
- cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
+ if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) {
+ if (cqid < dev->attr.max_cq)
+ cq = dev->cq_tbl[cqid];
+ if (cq == NULL) {
+ pr_err("ocrdma%d:Async event - cqid %u is not valid\n",
+ dev->id, cqid);
+ return;
+ }
+ }
memset(&ib_evt, 0, sizeof(ib_evt));
@@ -764,7 +792,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.
srq_context);
} else if (dev_event) {
- pr_err("%s: Fatal event received\n", dev->ibdev.name);
+ dev_err(&dev->ibdev.dev, "Fatal event received\n");
ib_dispatch_event(&ib_evt);
}
@@ -796,20 +824,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
}
}
+static void ocrdma_process_link_state(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_ae_lnkst_mcqe *evt;
+ u8 lstate;
+
+ evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
+ lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
+
+ if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
+ return;
+
+ if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
+ ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
+}
+
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
struct ocrdma_ae_mcqe *cqe = ae_cqe;
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
-
- if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
+ switch (evt_code) {
+ case OCRDMA_ASYNC_LINK_EVE_CODE:
+ ocrdma_process_link_state(dev, cqe);
+ break;
+ case OCRDMA_ASYNC_RDMA_EVE_CODE:
ocrdma_dispatch_ibevent(dev, cqe);
- else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
+ break;
+ case OCRDMA_ASYNC_GRP5_EVE_CODE:
ocrdma_process_grp5_aync(dev, cqe);
- else
+ break;
+ default:
pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
dev->id, evt_code);
+ }
}
static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
@@ -1042,7 +1092,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp = &mqe->u.rsp;
if (cqe_status || ext_status) {
- pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
+ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
__func__, cqe_status, ext_status);
if (rsp) {
/* This is for embedded cmds. */
@@ -1066,7 +1116,7 @@ mbx_err:
static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
void *payload_va)
{
- int status = 0;
+ int status;
struct ocrdma_mbx_rsp *rsp = payload_va;
if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
@@ -1097,6 +1147,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+ attr->udp_encap = (rsp->max_pd_ca_ack_delay &
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
attr->max_dpp_pds =
(rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
@@ -1106,18 +1159,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq =
(rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
- attr->max_send_sge = ((rsp->max_write_send_sge &
+ attr->max_send_sge = ((rsp->max_recv_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
- attr->max_recv_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+ attr->max_recv_sge = (rsp->max_recv_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
- attr->max_rdma_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
+ attr->max_rdma_sge = (rsp->max_wr_rd_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1137,7 +1190,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
rsp->max_mr_size_lo;
- attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
attr->max_cqe = rsp->max_cq_cqes_per_cq &
@@ -1298,7 +1350,6 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
mqe->u.nonemb_req.sge[0].len = dma.size;
- memset(dma.va, 0, dma.size);
ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
OCRDMA_SUBSYS_COMMON,
@@ -1312,8 +1363,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
- strncpy(dev->model_number,
- hba_attribs->controller_model_number, 31);
+ strscpy(dev->model_number,
+ hba_attribs->controller_model_number,
+ sizeof(dev->model_number));
}
dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
free_mqe:
@@ -1340,7 +1392,8 @@ mbx_err:
return status;
}
-int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
+ u8 *lnk_state)
{
int status = -ENOMEM;
struct ocrdma_get_link_speed_rsp *rsp;
@@ -1361,8 +1414,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
- *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
- >> OCRDMA_PHY_PS_SHIFT;
+ if (lnk_speed)
+ *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
+ >> OCRDMA_PHY_PS_SHIFT;
+ if (lnk_state)
+ *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
mbx_err:
kfree(cmd);
@@ -1450,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
- size_t pd_bitmap_size;
struct ocrdma_alloc_pd_range *cmd;
struct ocrdma_alloc_pd_range_rsp *rsp;
@@ -1472,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
- pd_bitmap_size =
- BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
}
@@ -1491,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_normal_pd = rsp->pd_count;
- pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
@@ -1542,10 +1594,9 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
GFP_KERNEL);
- if (!dev->pd_mgr) {
- pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+ if (!dev->pd_mgr)
return;
- }
+
status = ocrdma_mbx_alloc_pd_range(dev);
if (status) {
pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
@@ -1556,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
{
ocrdma_mbx_dealloc_pd_range(dev);
- kfree(dev->pd_mgr->pd_norm_bitmap);
- kfree(dev->pd_mgr->pd_dpp_bitmap);
+ bitmap_free(dev->pd_mgr->pd_norm_bitmap);
+ bitmap_free(dev->pd_mgr->pd_dpp_bitmap);
kfree(dev->pd_mgr);
}
@@ -1588,7 +1639,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
{
int i;
- int status = 0;
+ int status = -ENOMEM;
int max_ah;
struct ocrdma_create_ah_tbl *cmd;
struct ocrdma_create_ah_tbl_rsp *rsp;
@@ -1633,7 +1684,6 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
goto mem_err_ah;
dev->av_tbl.pa = pa;
dev->av_tbl.num_ah = max_ah;
- memset(dev->av_tbl.va, 0, dev->av_tbl.size);
pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
@@ -1767,7 +1817,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
status = -ENOMEM;
goto mem_err;
}
- memset(cq->va, 0, cq->len);
page_size = cq->len / hw_pages;
cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
@@ -1832,14 +1881,13 @@ mem_err:
return status;
}
-int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
+void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
{
- int status = -ENOMEM;
struct ocrdma_destroy_cq *cmd;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
if (!cmd)
- return status;
+ return;
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
@@ -1847,11 +1895,10 @@ int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
(cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
OCRDMA_DESTROY_CQ_QID_MASK;
- status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
ocrdma_unbind_eq(dev, cq->eqn);
dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
kfree(cmd);
- return status;
}
int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
@@ -1890,7 +1937,7 @@ mbx_err:
int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
{
- int status = -ENOMEM;
+ int status;
struct ocrdma_dealloc_lkey *cmd;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
@@ -1899,9 +1946,7 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
cmd->lkey = lkey;
cmd->rsvd_frmr = fr_mr ? 1 : 0;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
-mbx_err:
+
kfree(cmd);
return status;
}
@@ -1913,6 +1958,7 @@ static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
int i;
struct ocrdma_reg_nsmr *cmd;
struct ocrdma_reg_nsmr_rsp *rsp;
+ u64 fbo = hwmr->va & (hwmr->pbe_size - 1);
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
if (!cmd)
@@ -1938,8 +1984,8 @@ static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
cmd->totlen_low = hwmr->len;
cmd->totlen_high = upper_32_bits(hwmr->len);
- cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
- cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
+ cmd->fbo_low = lower_32_bits(fbo);
+ cmd->fbo_high = upper_32_bits(fbo);
cmd->va_loaddr = (u32) hwmr->va;
cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
@@ -1961,7 +2007,7 @@ static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
u32 pbl_offset, u32 last)
{
- int status = -ENOMEM;
+ int status;
int i;
struct ocrdma_reg_nsmr_cont *cmd;
@@ -1980,9 +2026,7 @@ static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
}
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
-mbx_err:
+
kfree(cmd);
return status;
}
@@ -2087,7 +2131,6 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
enum ib_qp_state *old_ib_state)
{
unsigned long flags;
- int status = 0;
enum ocrdma_qp_state new_state;
new_state = get_ocrdma_qp_state(new_ib_state);
@@ -2112,7 +2155,7 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
qp->state = new_state;
spin_unlock_irqrestore(&qp->q_lock, flags);
- return status;
+ return 0;
}
static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
@@ -2161,7 +2204,6 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
- memset(qp->sq.va, 0, len);
qp->sq.len = len;
qp->sq.pa = pa;
qp->sq.entry_size = dev->attr.wqe_size;
@@ -2212,7 +2254,6 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
- memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
qp->rq.entry_size = dev->attr.rqe_size;
@@ -2266,11 +2307,10 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
- &pa, GFP_KERNEL);
+ qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
- memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
for (; i < ird_q_len / dev->attr.rqe_size; i++) {
@@ -2446,64 +2486,83 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
int attr_mask)
{
int status;
- struct ib_ah_attr *ah_attr = &attrs->ah_attr;
- union ib_gid sgid, zgid;
- u32 vlan_id = 0xFFFF;
- u8 mac_addr[6];
+ struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
+ const struct ib_gid_attr *sgid_attr;
+ u16 vlan_id = 0xFFFF;
+ u8 mac_addr[6], hdr_type;
+ union {
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+ const struct ib_global_route *grh;
- if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
+ if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) == 0)
return -EINVAL;
+ grh = rdma_ah_read_grh(ah_attr);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
cmd->params.tclass_sq_psn |=
- (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
+ (grh->traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
- (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
- cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
+ (grh->flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
+ cmd->params.rnt_rc_sl_fl |= (rdma_ah_get_sl(ah_attr) <<
+ OCRDMA_QP_PARAMS_SL_SHIFT);
cmd->params.hop_lmt_rq_psn |=
- (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
+ (grh->hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
- memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
+
+ /* GIDs */
+ memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
sizeof(cmd->params.dgid));
- status = ocrdma_query_gid(&dev->ibdev, 1,
- ah_attr->grh.sgid_index, &sgid);
+
+ sgid_attr = ah_attr->grh.sgid_attr;
+ status = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, &mac_addr[0]);
if (status)
return status;
- memset(&zgid, 0, sizeof(zgid));
- if (!memcmp(&sgid, &zgid, sizeof(zgid)))
- return -EINVAL;
-
- qp->sgid_idx = ah_attr->grh.sgid_index;
- memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
+ qp->sgid_idx = grh->sgid_index;
+ memcpy(&cmd->params.sgid[0], &sgid_attr->gid.raw[0],
+ sizeof(cmd->params.sgid));
status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
if (status)
return status;
+
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
+
+ hdr_type = rdma_gid_attr_network_type(sgid_attr);
+ if (hdr_type == RDMA_NETWORK_IPV4) {
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
+ memcpy(&cmd->params.dgid[0],
+ &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ memcpy(&cmd->params.sgid[0],
+ &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ }
/* convert them to LE format. */
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
- if (attr_mask & IB_QP_VID) {
- vlan_id = attrs->vlan_id;
- } else if (dev->pfc_state) {
- vlan_id = 0;
- pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
- dev->id);
- pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
- dev->id);
- }
- if (vlan_id < 0x1000) {
+ if (vlan_id == 0xFFFF)
+ vlan_id = 0;
+ if (vlan_id || dev->pfc_state) {
+ if (!vlan_id) {
+ pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
+ dev->id);
+ pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+ dev->id);
+ }
cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
cmd->params.rnt_rc_sl_fl |=
(dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
}
-
+ cmd->params.max_sge_recv_flags |= ((hdr_type <<
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
return 0;
}
@@ -2796,27 +2855,25 @@ int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
return status;
}
-int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
{
- int status = -ENOMEM;
struct ocrdma_destroy_srq *cmd;
struct pci_dev *pdev = dev->nic_info.pdev;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
if (!cmd)
- return status;
+ return;
cmd->id = srq->id;
- status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (srq->rq.va)
dma_free_coherent(&pdev->dev, srq->rq.len,
srq->rq.va, srq->rq.pa);
kfree(cmd);
- return status;
}
static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
struct ocrdma_dcbx_cfg *dcbxcfg)
{
- int status = 0;
+ int status;
dma_addr_t pa;
struct ocrdma_mqe cmd;
@@ -2840,7 +2897,6 @@ static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
mqe_sge->pa_hi = (u32) upper_32_bits(pa);
mqe_sge->len = cmd.hdr.pyld_len;
- memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
req->param_type = ptype;
@@ -2909,7 +2965,7 @@ static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
OCRDMA_APP_PARAM_APP_PROTO_MASK;
if (
- valid && proto == OCRDMA_APP_PROTO_ROCE &&
+ valid && proto == ETH_P_IBOE &&
proto_sel == OCRDMA_PROTO_SELECT_L2) {
for (slindx = 0; slindx <
OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
@@ -3000,13 +3056,12 @@ int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
return status;
}
-int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
+void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
{
unsigned long flags;
spin_lock_irqsave(&dev->av_tbl.lock, flags);
ah->av->valid = 0;
spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
- return 0;
}
static int ocrdma_create_eqs(struct ocrdma_dev *dev)
@@ -3027,7 +3082,7 @@ static int ocrdma_create_eqs(struct ocrdma_dev *dev)
if (!num_eq)
return -EINVAL;
- dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
+ dev->eq_tbl = kcalloc(num_eq, sizeof(struct ocrdma_eq), GFP_KERNEL);
if (!dev->eq_tbl)
return -ENOMEM;
@@ -3058,12 +3113,12 @@ done:
static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
int num)
{
- int i, status = -ENOMEM;
+ int i, status;
struct ocrdma_modify_eqd_req *cmd;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
if (!cmd)
- return status;
+ return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
@@ -3076,9 +3131,7 @@ static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
(eq[i].aic_obj.prev_eqd * 65)/100;
}
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
-mbx_err:
+
kfree(cmd);
return status;
}
@@ -3104,8 +3157,8 @@ void ocrdma_eqd_set_task(struct work_struct *work)
{
struct ocrdma_dev *dev =
container_of(work, struct ocrdma_dev, eqd_work.work);
- struct ocrdma_eq *eq = 0;
- int i, num = 0, status = -EINVAL;
+ struct ocrdma_eq *eq = NULL;
+ int i, num = 0;
u64 eq_intr;
for (i = 0; i < dev->eq_cnt; i++) {
@@ -3127,7 +3180,7 @@ void ocrdma_eqd_set_task(struct work_struct *work)
}
if (num)
- status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+ ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 7ed885c1851e..12c23a7652b9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */
-int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
+ u8 *lnk_st);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
@@ -121,7 +122,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
u32 pd_id, int acc);
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
int entries, int dpp_cq, u16 pd_id);
-int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
+void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq);
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
@@ -136,10 +137,10 @@ int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
struct ocrdma_pd *);
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
-int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq);
-int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
-int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
+int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
+void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
enum ib_qp_state *old_ib_state);
@@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *);
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
void ocrdma_free_pd_range(struct ocrdma_dev *dev);
+void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 87aa55df7c82..5d4b3bc16493 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -56,182 +56,193 @@
#include "be_roce.h"
#include "ocrdma_hw.h"
#include "ocrdma_stats.h"
-#include "ocrdma_abi.h"
+#include <rdma/ocrdma-abi.h>
-MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("Dual BSD/GPL");
-static LIST_HEAD(ocrdma_dev_list);
-static DEFINE_SPINLOCK(ocrdma_devlist_lock);
-static DEFINE_IDR(ocrdma_dev_id);
-
-void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
-{
- u8 mac_addr[6];
-
- memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- guid[0] = mac_addr[0] ^ 2;
- guid[1] = mac_addr[1];
- guid[2] = mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = mac_addr[3];
- guid[6] = mac_addr[4];
- guid[7] = mac_addr[5];
-}
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
-static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int ocrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
+ struct ocrdma_dev *dev;
int err;
- err = ocrdma_query_port(ibdev, port_num, &attr);
+ dev = get_ocrdma_dev(ibdev);
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (ocrdma_is_udp_encap_supported(dev))
+ immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
+
+ err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
+static void get_dev_fw_str(struct ib_device *device, char *str)
+{
+ struct ocrdma_dev *dev = get_ocrdma_dev(device);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", &dev->attr.fw_ver[0]);
+}
+
+/* OCRDMA sysfs interface */
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev =
+ rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", dev->nic_info.pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev =
+ rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
+
+ return sysfs_emit(buf, "%s\n", &dev->model_number[0]);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ocrdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ocrdma_attr_group = {
+ .attrs = ocrdma_attributes,
+};
+
+static const struct ib_device_ops ocrdma_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_OCRDMA,
+ .uverbs_abi_ver = OCRDMA_ABI_VERSION,
+
+ .alloc_mr = ocrdma_alloc_mr,
+ .alloc_pd = ocrdma_alloc_pd,
+ .alloc_ucontext = ocrdma_alloc_ucontext,
+ .create_ah = ocrdma_create_ah,
+ .create_cq = ocrdma_create_cq,
+ .create_qp = ocrdma_create_qp,
+ .create_user_ah = ocrdma_create_ah,
+ .dealloc_pd = ocrdma_dealloc_pd,
+ .dealloc_ucontext = ocrdma_dealloc_ucontext,
+ .dereg_mr = ocrdma_dereg_mr,
+ .destroy_ah = ocrdma_destroy_ah,
+ .destroy_cq = ocrdma_destroy_cq,
+ .destroy_qp = ocrdma_destroy_qp,
+ .device_group = &ocrdma_attr_group,
+ .get_dev_fw_str = get_dev_fw_str,
+ .get_dma_mr = ocrdma_get_dma_mr,
+ .get_link_layer = ocrdma_link_layer,
+ .get_port_immutable = ocrdma_port_immutable,
+ .map_mr_sg = ocrdma_map_mr_sg,
+ .mmap = ocrdma_mmap,
+ .modify_qp = ocrdma_modify_qp,
+ .poll_cq = ocrdma_poll_cq,
+ .post_recv = ocrdma_post_recv,
+ .post_send = ocrdma_post_send,
+ .process_mad = ocrdma_process_mad,
+ .query_ah = ocrdma_query_ah,
+ .query_device = ocrdma_query_device,
+ .query_pkey = ocrdma_query_pkey,
+ .query_port = ocrdma_query_port,
+ .query_qp = ocrdma_query_qp,
+ .reg_user_mr = ocrdma_reg_user_mr,
+ .req_notify_cq = ocrdma_arm_cq,
+ .resize_cq = ocrdma_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, ocrdma_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops ocrdma_dev_srq_ops = {
+ .create_srq = ocrdma_create_srq,
+ .destroy_srq = ocrdma_destroy_srq,
+ .modify_srq = ocrdma_modify_srq,
+ .post_srq_recv = ocrdma_post_srq_recv,
+ .query_srq = ocrdma_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, ocrdma_srq, ibsrq),
+};
+
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
- strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
- ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
+ int ret;
+
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->nic_info.mac_addr);
+ BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
- dev->ibdev.owner = THIS_MODULE;
- dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION;
- dev->ibdev.uverbs_cmd_mask =
- OCRDMA_UVERBS(GET_CONTEXT) |
- OCRDMA_UVERBS(QUERY_DEVICE) |
- OCRDMA_UVERBS(QUERY_PORT) |
- OCRDMA_UVERBS(ALLOC_PD) |
- OCRDMA_UVERBS(DEALLOC_PD) |
- OCRDMA_UVERBS(REG_MR) |
- OCRDMA_UVERBS(DEREG_MR) |
- OCRDMA_UVERBS(CREATE_COMP_CHANNEL) |
- OCRDMA_UVERBS(CREATE_CQ) |
- OCRDMA_UVERBS(RESIZE_CQ) |
- OCRDMA_UVERBS(DESTROY_CQ) |
- OCRDMA_UVERBS(REQ_NOTIFY_CQ) |
- OCRDMA_UVERBS(CREATE_QP) |
- OCRDMA_UVERBS(MODIFY_QP) |
- OCRDMA_UVERBS(QUERY_QP) |
- OCRDMA_UVERBS(DESTROY_QP) |
- OCRDMA_UVERBS(POLL_CQ) |
- OCRDMA_UVERBS(POST_SEND) |
- OCRDMA_UVERBS(POST_RECV);
-
- dev->ibdev.uverbs_cmd_mask |=
- OCRDMA_UVERBS(CREATE_AH) |
- OCRDMA_UVERBS(MODIFY_AH) |
- OCRDMA_UVERBS(QUERY_AH) |
- OCRDMA_UVERBS(DESTROY_AH);
dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->eq_cnt;
- /* mandatory verbs. */
- dev->ibdev.query_device = ocrdma_query_device;
- dev->ibdev.query_port = ocrdma_query_port;
- dev->ibdev.modify_port = ocrdma_modify_port;
- dev->ibdev.query_gid = ocrdma_query_gid;
- dev->ibdev.get_netdev = ocrdma_get_netdev;
- dev->ibdev.add_gid = ocrdma_add_gid;
- dev->ibdev.del_gid = ocrdma_del_gid;
- dev->ibdev.get_link_layer = ocrdma_link_layer;
- dev->ibdev.alloc_pd = ocrdma_alloc_pd;
- dev->ibdev.dealloc_pd = ocrdma_dealloc_pd;
-
- dev->ibdev.create_cq = ocrdma_create_cq;
- dev->ibdev.destroy_cq = ocrdma_destroy_cq;
- dev->ibdev.resize_cq = ocrdma_resize_cq;
-
- dev->ibdev.create_qp = ocrdma_create_qp;
- dev->ibdev.modify_qp = ocrdma_modify_qp;
- dev->ibdev.query_qp = ocrdma_query_qp;
- dev->ibdev.destroy_qp = ocrdma_destroy_qp;
-
- dev->ibdev.query_pkey = ocrdma_query_pkey;
- dev->ibdev.create_ah = ocrdma_create_ah;
- dev->ibdev.destroy_ah = ocrdma_destroy_ah;
- dev->ibdev.query_ah = ocrdma_query_ah;
- dev->ibdev.modify_ah = ocrdma_modify_ah;
-
- dev->ibdev.poll_cq = ocrdma_poll_cq;
- dev->ibdev.post_send = ocrdma_post_send;
- dev->ibdev.post_recv = ocrdma_post_recv;
- dev->ibdev.req_notify_cq = ocrdma_arm_cq;
-
- dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
- dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr;
- dev->ibdev.dereg_mr = ocrdma_dereg_mr;
- dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
-
- dev->ibdev.alloc_mr = ocrdma_alloc_mr;
- dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
- dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
-
/* mandatory to support user space verbs consumer. */
- dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
- dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
- dev->ibdev.mmap = ocrdma_mmap;
- dev->ibdev.dma_device = &dev->nic_info.pdev->dev;
-
- dev->ibdev.process_mad = ocrdma_process_mad;
- dev->ibdev.get_port_immutable = ocrdma_port_immutable;
-
- if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
- dev->ibdev.uverbs_cmd_mask |=
- OCRDMA_UVERBS(CREATE_SRQ) |
- OCRDMA_UVERBS(MODIFY_SRQ) |
- OCRDMA_UVERBS(QUERY_SRQ) |
- OCRDMA_UVERBS(DESTROY_SRQ) |
- OCRDMA_UVERBS(POST_SRQ_RECV);
-
- dev->ibdev.create_srq = ocrdma_create_srq;
- dev->ibdev.modify_srq = ocrdma_modify_srq;
- dev->ibdev.query_srq = ocrdma_query_srq;
- dev->ibdev.destroy_srq = ocrdma_destroy_srq;
- dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
- }
- return ib_register_device(&dev->ibdev, NULL);
+ dev->ibdev.dev.parent = &dev->nic_info.pdev->dev;
+
+ ib_set_device_ops(&dev->ibdev, &ocrdma_dev_ops);
+
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R)
+ ib_set_device_ops(&dev->ibdev, &ocrdma_dev_srq_ops);
+
+ ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(&dev->nic_info.pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "ocrdma%d",
+ &dev->nic_info.pdev->dev);
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
{
mutex_init(&dev->dev_lock);
- dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *
- OCRDMA_MAX_CQ, GFP_KERNEL);
+ dev->cq_tbl = kcalloc(OCRDMA_MAX_CQ, sizeof(struct ocrdma_cq *),
+ GFP_KERNEL);
if (!dev->cq_tbl)
goto alloc_err;
if (dev->attr.max_qp) {
- dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) *
- OCRDMA_MAX_QP, GFP_KERNEL);
+ dev->qp_tbl = kcalloc(OCRDMA_MAX_QP,
+ sizeof(struct ocrdma_qp *),
+ GFP_KERNEL);
if (!dev->qp_tbl)
goto alloc_err;
}
- dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL);
+ dev->stag_arr = kcalloc(OCRDMA_MAX_STAG, sizeof(u64), GFP_KERNEL);
if (dev->stag_arr == NULL)
goto alloc_err;
ocrdma_alloc_pd_pool(dev);
+ if (!ocrdma_alloc_stats_resources(dev)) {
+ pr_err("%s: stats resource allocation failed\n", __func__);
+ goto alloc_err;
+ }
+
spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock);
return 0;
@@ -242,73 +253,30 @@ alloc_err:
static void ocrdma_free_resources(struct ocrdma_dev *dev)
{
+ ocrdma_release_stats_resources(dev);
kfree(dev->stag_arr);
kfree(dev->qp_tbl);
kfree(dev->cq_tbl);
}
-/* OCRDMA sysfs interface */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
-}
-
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
-}
-
-static ssize_t show_hca_type(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
-
-static struct device_attribute *ocrdma_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type
-};
-
-static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
-}
-
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
- int status = 0, i;
+ int status = 0;
+ u8 lstate = 0;
struct ocrdma_dev *dev;
- dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
+ dev = ib_alloc_device(ocrdma_dev, ibdev);
if (!dev) {
pr_err("Unable to allocate ib device\n");
return NULL;
}
+
dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
if (!dev->mbx_cmd)
- goto idr_err;
+ goto init_err;
memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
- dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
- if (dev->id < 0)
- goto idr_err;
-
+ dev->id = PCI_FUNC(dev->nic_info.pdev->devfn);
status = ocrdma_init_hw(dev);
if (status)
goto init_err;
@@ -322,12 +290,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
- goto sysfs_err;
- spin_lock(&ocrdma_devlist_lock);
- list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
- spin_unlock(&ocrdma_devlist_lock);
+ /* Query Link state and update */
+ status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
+ if (!status)
+ ocrdma_update_link_state(dev, lstate);
+
/* Init stats */
ocrdma_add_port_stats(dev);
/* Interrupt Moderation */
@@ -342,25 +309,19 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
dev_name(&dev->nic_info.pdev->dev), dev->id);
return dev;
-sysfs_err:
- ocrdma_remove_sysfiles(dev);
alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
init_err:
- idr_remove(&ocrdma_dev_id, dev->id);
-idr_err:
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
pr_err("%s() leaving. ret=%d\n", __func__, status);
return NULL;
}
-static void ocrdma_remove_free(struct rcu_head *rcu)
+static void ocrdma_remove_free(struct ocrdma_dev *dev)
{
- struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
- idr_remove(&ocrdma_dev_id, dev->id);
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
}
@@ -371,22 +332,15 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
* of the registered clients.
*/
cancel_delayed_work_sync(&dev->eqd_work);
- ocrdma_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
ocrdma_rem_port_stats(dev);
-
- spin_lock(&ocrdma_devlist_lock);
- list_del_rcu(&dev->entry);
- spin_unlock(&ocrdma_devlist_lock);
-
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
-
- call_rcu(&dev->rcu, ocrdma_remove_free);
+ ocrdma_remove_free(dev);
}
-static int ocrdma_open(struct ocrdma_dev *dev)
+static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
{
struct ib_event port_event;
@@ -397,32 +351,9 @@ static int ocrdma_open(struct ocrdma_dev *dev)
return 0;
}
-static int ocrdma_close(struct ocrdma_dev *dev)
+static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
{
- int i;
- struct ocrdma_qp *qp, **cur_qp;
struct ib_event err_event;
- struct ib_qp_attr attrs;
- int attr_mask = IB_QP_STATE;
-
- attrs.qp_state = IB_QPS_ERR;
- mutex_lock(&dev->dev_lock);
- if (dev->qp_tbl) {
- cur_qp = dev->qp_tbl;
- for (i = 0; i < OCRDMA_MAX_QP; i++) {
- qp = cur_qp[i];
- if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
- /* change the QP state to ERROR */
- _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
-
- err_event.event = IB_EVENT_QP_FATAL;
- err_event.element.qp = &qp->ibqp;
- err_event.device = &dev->ibdev;
- ib_dispatch_event(&err_event);
- }
- }
- }
- mutex_unlock(&dev->dev_lock);
err_event.event = IB_EVENT_PORT_ERR;
err_event.element.port_num = 1;
@@ -433,7 +364,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
static void ocrdma_shutdown(struct ocrdma_dev *dev)
{
- ocrdma_close(dev);
+ ocrdma_dispatch_port_error(dev);
ocrdma_remove(dev);
}
@@ -444,18 +375,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev)
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
{
switch (event) {
- case BE_DEV_UP:
- ocrdma_open(dev);
- break;
- case BE_DEV_DOWN:
- ocrdma_close(dev);
- break;
case BE_DEV_SHUTDOWN:
ocrdma_shutdown(dev);
break;
+ default:
+ break;
}
}
+void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
+{
+ if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
+ dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
+ if (!lstate)
+ return;
+ }
+
+ if (!lstate)
+ ocrdma_dispatch_port_error(dev);
+ else
+ ocrdma_dispatch_port_active(dev);
+}
+
static struct ocrdma_driver ocrdma_drv = {
.name = "ocrdma_driver",
.add = ocrdma_add,
@@ -485,7 +426,6 @@ static void __exit ocrdma_exit_module(void)
{
be_roce_unregister_driver(&ocrdma_drv);
ocrdma_rem_debugfs();
- idr_destroy(&ocrdma_dev_id);
}
module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 6a38268bbe9f..c2e0d0fa44be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -140,7 +140,11 @@ enum {
OCRDMA_DB_RQ_SHIFT = 24
};
-#define OCRDMA_ROUDP_FLAGS_SHIFT 0x03
+enum {
+ OCRDMA_L3_TYPE_IB_GRH = 0x00,
+ OCRDMA_L3_TYPE_IPV4 = 0x01,
+ OCRDMA_L3_TYPE_IPV6 = 0x02
+};
#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
#define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */
@@ -465,8 +469,11 @@ struct ocrdma_ae_qp_mcqe {
u32 valid_ae_event;
};
-#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
-#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
+enum ocrdma_async_event_code {
+ OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
+ OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
+ OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
+};
enum ocrdma_async_grp5_events {
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
@@ -489,6 +496,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_MAX_ASYNC_ERRORS
};
+struct ocrdma_ae_lnkst_mcqe {
+ u32 speed_state_ptn;
+ u32 qos_reason_falut;
+ u32 evt_tag;
+ u32 valid_ae_event;
+};
+
+enum {
+ OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
+ OCRDMA_AE_LSC_PT_SHIFT = 0x06,
+ OCRDMA_AE_LSC_PT_MASK = (0x03 <<
+ OCRDMA_AE_LSC_PT_SHIFT),
+ OCRDMA_AE_LSC_LS_SHIFT = 0x08,
+ OCRDMA_AE_LSC_LS_MASK = (0xFF <<
+ OCRDMA_AE_LSC_LS_SHIFT),
+ OCRDMA_AE_LSC_LD_SHIFT = 0x10,
+ OCRDMA_AE_LSC_LD_MASK = (0xFF <<
+ OCRDMA_AE_LSC_LD_SHIFT),
+ OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
+ OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
+ OCRDMA_AE_LSC_PPS_SHIFT),
+ OCRDMA_AE_LSC_PPF_MASK = 0xFF,
+ OCRDMA_AE_LSC_ER_SHIFT = 0x08,
+ OCRDMA_AE_LSC_ER_MASK = (0xFF <<
+ OCRDMA_AE_LSC_ER_SHIFT),
+ OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
+ OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
+ OCRDMA_AE_LSC_QOS_SHIFT)
+};
+
+enum {
+ OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
+ OCRDMA_AE_LSC_PLINK_UP = 0x01,
+ OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
+ OCRDMA_AE_LSC_LLINK_MASK = 0x02,
+ OCRDMA_AE_LSC_LLINK_UP = 0x03
+};
+
/* mailbox command request and responses */
enum {
OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
@@ -505,12 +550,13 @@ enum {
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8,
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF <<
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT,
-
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT = 3,
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -566,6 +612,8 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF,
};
struct ocrdma_mbx_query_config {
@@ -573,7 +621,7 @@ struct ocrdma_mbx_query_config {
struct ocrdma_mbx_rsp rsp;
u32 qp_srq_cq_ird_ord;
u32 max_pd_ca_ack_delay;
- u32 max_write_send_sge;
+ u32 max_recv_send_sge;
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
@@ -593,6 +641,8 @@ struct ocrdma_mbx_query_config {
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
+ u32 max_wr_rd_sge;
+ u32 ird_pgsz_num_pages;
};
struct ocrdma_fw_ver_rsp {
@@ -676,7 +726,7 @@ enum {
OCRDMA_PHY_PFLT_SHIFT = 0x18,
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
OCRDMA_QOS_LNKSP_SHIFT = 0x10,
- OCRDMA_LLST_MASK = 0xFF,
+ OCRDMA_LINK_ST_MASK = 0x01,
OCRDMA_PLFC_MASK = 0x00000400,
OCRDMA_PLFC_SHIFT = 0x8,
OCRDMA_PLRFC_MASK = 0x00000200,
@@ -691,7 +741,7 @@ struct ocrdma_get_link_speed_rsp {
u32 pflt_pps_ld_pnum;
u32 qos_lsp;
- u32 res_lls;
+ u32 res_lnk_st;
};
enum {
@@ -1066,6 +1116,8 @@ enum {
OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7),
OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8),
OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9),
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT = 11,
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK = BIT(11) | BIT(12) | BIT(13),
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16,
OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF <<
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT,
@@ -1694,8 +1746,11 @@ enum {
/* w1 */
OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16,
+ OCRDMA_CQE_UD_XFER_LEN_MASK = 0x1FFF,
OCRDMA_CQE_PKEY_SHIFT = 0,
OCRDMA_CQE_PKEY_MASK = 0xFFFF,
+ OCRDMA_CQE_UD_L3TYPE_SHIFT = 29,
+ OCRDMA_CQE_UD_L3TYPE_MASK = 0x07,
/* w2 */
OCRDMA_CQE_QPN_SHIFT = 0,
@@ -1820,7 +1875,7 @@ struct ocrdma_ewqe_ud_hdr {
u32 rsvd_dest_qpn;
u32 qkey;
u32 rsvd_ahid;
- u32 rsvd;
+ u32 hdr_type;
};
/* extended wqe followed by hdr_wqe for Fast Memory register */
@@ -1846,7 +1901,6 @@ struct ocrdma_eth_vlan {
u8 smac[6];
__be16 eth_type;
__be16 vlan_tag;
-#define OCRDMA_ROCE_ETH_TYPE 0x8915
__be16 roce_eth_type;
} __packed;
@@ -1980,7 +2034,7 @@ struct ocrdma_rx_stats {
};
struct ocrdma_rx_qp_err_stats {
- u32 nak_invalid_requst_errors;
+ u32 nak_invalid_request_errors;
u32 nak_remote_operation_errors;
u32 nak_count_remote_access_errors;
u32 local_length_errors;
@@ -2124,10 +2178,6 @@ enum OCRDMA_DCBX_PARAM_TYPE {
OCRDMA_PARAMETER_TYPE_PEER = 0x02
};
-enum OCRDMA_DCBX_APP_PROTO {
- OCRDMA_APP_PROTO_ROCE = 0x8915
-};
-
enum OCRDMA_DCBX_PROTO {
OCRDMA_PROTO_SELECT_L2 = 0x00,
OCRDMA_PROTO_SELECT_L4 = 0x01
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 69334e214571..0834416cb3f8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -46,7 +46,7 @@
static struct dentry *ocrdma_dbgfs_dir;
-static int ocrdma_add_stat(char *start, char *pcur,
+static noinline_for_stack int ocrdma_add_stat(char *start, char *pcur,
char *name, u64 count)
{
char buff[128] = {0};
@@ -64,44 +64,42 @@ static int ocrdma_add_stat(char *start, char *pcur,
return cpy_len;
}
-static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
{
struct stats_mem *mem = &dev->stats_mem;
+ mutex_init(&dev->stats_lock);
/* Alloc mbox command mem*/
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}
- memset(mem->va, 0, mem->size);
-
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
- if (!mem->debugfs_mem) {
- pr_err("%s: stats debugfs mem allocation failed\n", __func__);
+ if (!mem->debugfs_mem)
return false;
- }
return true;
}
-static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
{
struct stats_mem *mem = &dev->stats_mem;
if (mem->va)
dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
mem->va, mem->pa);
+ mem->va = NULL;
kfree(mem->debugfs_mem);
}
-static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_resource_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
@@ -218,7 +216,7 @@ static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
return stats;
}
-static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_rx_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
@@ -286,7 +284,7 @@ static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
rx_stats->roce_frame_bytes_hi))/4;
}
-static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_tx_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
@@ -360,7 +358,7 @@ static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
tx_stats->read_rsp_bytes_hi))/4;
}
-static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
@@ -393,7 +391,7 @@ static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
return stats;
}
-static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_db_errstats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
@@ -414,7 +412,7 @@ static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
return stats;
}
-static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
@@ -425,8 +423,8 @@ static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
- pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
- (u64)rx_qp_err_stats->nak_invalid_requst_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_request_errors",
+ (u64)rx_qp_err_stats->nak_invalid_request_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
(u64)rx_qp_err_stats->nak_remote_operation_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
@@ -440,7 +438,7 @@ static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
return stats;
}
-static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
@@ -464,7 +462,7 @@ static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
return stats;
}
-static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
{
int i;
char *pstats = dev->stats_mem.debugfs_mem;
@@ -482,7 +480,7 @@ static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
return dev->stats_mem.debugfs_mem;
}
-static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
{
int i;
char *pstats = dev->stats_mem.debugfs_mem;
@@ -500,7 +498,7 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
return dev->stats_mem.debugfs_mem;
}
-static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
+static noinline_for_stack char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -608,7 +606,7 @@ static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
static void ocrdma_update_stats(struct ocrdma_dev *dev)
{
ulong now = jiffies, secs;
- int status = 0;
+ int status;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
@@ -639,11 +637,11 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
{
char tmp_str[32];
long reset;
- int status = 0;
+ int status;
struct ocrdma_stats *pstats = filp->private_data;
struct ocrdma_dev *dev = pstats->dev;
- if (count > 32)
+ if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
goto err;
if (copy_from_user(tmp_str, buffer, count))
@@ -658,7 +656,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
if (reset) {
status = ocrdma_mbx_rdma_stats(dev, true);
if (status) {
- pr_err("Failed to reset stats = %d", status);
+ pr_err("Failed to reset stats = %d\n", status);
goto err;
}
}
@@ -672,12 +670,10 @@ err:
return -EFAULT;
}
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad)
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad)
{
struct ib_pma_portcounters *pma_cnt;
- memset(out_mad->data, 0, sizeof out_mad->data);
pma_cnt = (void *)(out_mad->data + 40);
ocrdma_update_stats(dev);
@@ -685,7 +681,6 @@ int ocrdma_pma_counters(struct ocrdma_dev *dev,
pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
- return 0;
}
static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
@@ -762,102 +757,73 @@ static const struct file_operations ocrdma_dbg_ops = {
void ocrdma_add_port_stats(struct ocrdma_dev *dev)
{
+ const struct pci_dev *pdev = dev->nic_info.pdev;
+
if (!ocrdma_dbgfs_dir)
return;
/* Create post stats base dir */
- dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
- if (!dev->dir)
- goto err;
+ dev->dir = debugfs_create_dir(pci_name(pdev), ocrdma_dbgfs_dir);
dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
dev->rsrc_stats.dev = dev;
- if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
- &dev->rsrc_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
+ &dev->rsrc_stats, &ocrdma_dbg_ops);
dev->rx_stats.type = OCRDMA_RXSTATS;
dev->rx_stats.dev = dev;
- if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
- &dev->rx_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("rx_stats", S_IRUSR, dev->dir, &dev->rx_stats,
+ &ocrdma_dbg_ops);
dev->wqe_stats.type = OCRDMA_WQESTATS;
dev->wqe_stats.dev = dev;
- if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
- &dev->wqe_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, &dev->wqe_stats,
+ &ocrdma_dbg_ops);
dev->tx_stats.type = OCRDMA_TXSTATS;
dev->tx_stats.dev = dev;
- if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
- &dev->tx_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("tx_stats", S_IRUSR, dev->dir, &dev->tx_stats,
+ &ocrdma_dbg_ops);
dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
dev->db_err_stats.dev = dev;
- if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
- &dev->db_err_stats, &ocrdma_dbg_ops))
- goto err;
-
+ debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
+ &dev->db_err_stats, &ocrdma_dbg_ops);
dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
dev->tx_qp_err_stats.dev = dev;
- if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
- &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->tx_qp_err_stats, &ocrdma_dbg_ops);
dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
dev->rx_qp_err_stats.dev = dev;
- if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
- &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
- goto err;
-
+ debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->rx_qp_err_stats, &ocrdma_dbg_ops);
dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
dev->tx_dbg_stats.dev = dev;
- if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
- &dev->tx_dbg_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->tx_dbg_stats, &ocrdma_dbg_ops);
dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
dev->rx_dbg_stats.dev = dev;
- if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
- &dev->rx_dbg_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->rx_dbg_stats, &ocrdma_dbg_ops);
dev->driver_stats.type = OCRDMA_DRV_STATS;
dev->driver_stats.dev = dev;
- if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
- &dev->driver_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
+ &dev->driver_stats, &ocrdma_dbg_ops);
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
- &dev->reset_stats, &ocrdma_dbg_ops))
- goto err;
-
- /* Now create dma_mem for stats mbx command */
- if (!ocrdma_alloc_stats_mem(dev))
- goto err;
-
- mutex_init(&dev->stats_lock);
-
- return;
-err:
- ocrdma_release_stats_mem(dev);
- debugfs_remove_recursive(dev->dir);
- dev->dir = NULL;
+ debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats,
+ &ocrdma_dbg_ops);
}
void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
{
- if (!dev->dir)
- return;
- mutex_destroy(&dev->stats_lock);
- ocrdma_release_stats_mem(dev);
- debugfs_remove(dev->dir);
+ debugfs_remove_recursive(dev->dir);
}
void ocrdma_init_debugfs(void)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index c9e58d04c7b8..98feca26ac55 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -65,9 +65,10 @@ enum OCRDMA_STATS_TYPE {
void ocrdma_rem_debugfs(void);
void ocrdma_init_debugfs(void);
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad);
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad);
#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 1f3affb6a477..46d911fd38de 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -41,63 +41,29 @@
*/
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
#include "ocrdma_verbs.h"
-#include "ocrdma_abi.h"
+#include <rdma/ocrdma-abi.h>
-int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
- if (index > 1)
+ if (index > 0)
return -EINVAL;
*pkey = 0xffff;
return 0;
}
-int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *sgid)
-{
- int ret;
- struct ocrdma_dev *dev;
-
- dev = get_ocrdma_dev(ibdev);
- memset(sgid, 0, sizeof(*sgid));
- if (index >= OCRDMA_MAX_SGID)
- return -EINVAL;
-
- ret = ib_get_cached_gid(ibdev, port, index, sgid);
- if (ret == -EAGAIN) {
- memcpy(sgid, &zgid, sizeof(*sgid));
- return 0;
- }
-
- return ret;
-}
-
-int ocrdma_add_gid(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context) {
- return 0;
-}
-
-int ocrdma_del_gid(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- void **context) {
- return 0;
-}
-
int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
struct ib_udata *uhw)
{
@@ -109,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
memset(attr, 0, sizeof *attr);
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
- ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->nic_info.mac_addr);
attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor;
@@ -123,18 +90,17 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
- attr->max_sge_rd = 0;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ attr->max_send_sge = dev->attr.max_send_sge;
+ attr->max_recv_sge = dev->attr.max_recv_sge;
+ attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
attr->max_mw = dev->attr.max_mw;
attr->max_pd = dev->attr.max_pd;
attr->atomic_cap = 0;
- attr->max_fmr = 0;
- attr->max_map_per_fmr = 0;
attr->max_qp_rd_atom =
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
@@ -147,31 +113,13 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
return 0;
}
-struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
-{
- struct ocrdma_dev *dev;
- struct net_device *ndev = NULL;
-
- rcu_read_lock();
-
- dev = get_ocrdma_dev(ibdev);
- if (dev)
- ndev = dev->nic_info.netdev;
- if (ndev)
- dev_hold(ndev);
-
- rcu_read_unlock();
-
- return ndev;
-}
-
static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
- u8 *ib_speed, u8 *ib_width)
+ u16 *ib_speed, u8 *ib_width)
{
int status;
u8 speed;
- status = ocrdma_mbx_get_link_speed(dev, &speed);
+ status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
if (status)
speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
@@ -204,25 +152,21 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
}
int ocrdma_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
+ u32 port, struct ib_port_attr *props)
{
enum ib_port_state port_state;
struct ocrdma_dev *dev;
struct net_device *netdev;
+ /* props being zeroed by the caller, avoid zeroing it here */
dev = get_ocrdma_dev(ibdev);
- if (port > 1) {
- pr_err("%s(%d) invalid_port=0x%x\n", __func__,
- dev->id, port);
- return -EINVAL;
- }
netdev = dev->nic_info.netdev;
if (netif_running(netdev) && netif_oper_up(netdev)) {
port_state = IB_PORT_ACTIVE;
- props->phys_state = 5;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
port_state = IB_PORT_DOWN;
- props->phys_state = 3;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(netdev->mtu);
@@ -231,11 +175,10 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->sm_lid = 0;
props->sm_sl = 0;
props->state = port_state;
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP |
- IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -247,19 +190,6 @@ int ocrdma_query_port(struct ib_device *ibdev,
return 0;
}
-int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- struct ocrdma_dev *dev;
-
- dev = get_ocrdma_dev(ibdev);
- if (port > 1) {
- pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
- return -EINVAL;
- }
- return 0;
-}
-
static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
unsigned long len)
{
@@ -317,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
{
u16 pd_bitmap_idx = 0;
- const unsigned long *pd_bitmap;
+ unsigned long *pd_bitmap;
if (dpp_pool) {
pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_dpp_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_dpp_count++;
if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
@@ -331,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_normal_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_norm_count++;
if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
@@ -371,7 +301,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
return 0;
}
-static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
bool dpp_pool)
{
int status;
@@ -414,16 +344,21 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
return status;
}
-static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
- struct ocrdma_ucontext *uctx,
- struct ib_udata *udata)
+/*
+ * NOTE:
+ *
+ * ocrdma_ucontext must be used here because this function is also
+ * called from ocrdma_alloc_ucontext where ib_udata does not have
+ * valid ib_ucontext pointer. ib_uverbs_get_context does not call
+ * uobj_{alloc|get_xxx} helpers which are used to store the
+ * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
+ * ib_udata does NOT imply valid ib_ucontext here!
+ */
+static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
{
- struct ocrdma_pd *pd = NULL;
- int status = 0;
-
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
+ int status;
if (udata && uctx && dev->attr.max_dpp_pds) {
pd->dpp_enabled =
@@ -433,15 +368,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
dev->attr.wqe_size) : 0;
}
- if (dev->pd_mgr->pd_prealloc_valid) {
- status = ocrdma_get_pd_num(dev, pd);
- if (status == 0) {
- return pd;
- } else {
- kfree(pd);
- return ERR_PTR(status);
- }
- }
+ if (dev->pd_mgr->pd_prealloc_valid)
+ return ocrdma_get_pd_num(dev, pd);
retry:
status = ocrdma_mbx_alloc_pd(dev, pd);
@@ -450,45 +378,46 @@ retry:
pd->dpp_enabled = false;
pd->num_dpp_qp = 0;
goto retry;
- } else {
- kfree(pd);
- return ERR_PTR(status);
}
+ return status;
}
- return pd;
+ return 0;
}
static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
struct ocrdma_pd *pd)
{
- return (uctx->cntxt_pd == pd ? true : false);
+ return (uctx->cntxt_pd == pd);
}
-static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
+static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
struct ocrdma_pd *pd)
{
- int status = 0;
-
if (dev->pd_mgr->pd_prealloc_valid)
- status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
+ ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
else
- status = ocrdma_mbx_dealloc_pd(dev, pd);
-
- kfree(pd);
- return status;
+ ocrdma_mbx_dealloc_pd(dev, pd);
}
static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
struct ocrdma_ucontext *uctx,
struct ib_udata *udata)
{
- int status = 0;
+ struct ib_device *ibdev = &dev->ibdev;
+ struct ib_pd *pd;
+ int status;
- uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
- if (IS_ERR(uctx->cntxt_pd)) {
- status = PTR_ERR(uctx->cntxt_pd);
- uctx->cntxt_pd = NULL;
+ pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->device = ibdev;
+ uctx->cntxt_pd = get_ocrdma_pd(pd);
+
+ status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
+ if (status) {
+ kfree(uctx->cntxt_pd);
goto err;
}
@@ -498,7 +427,7 @@ err:
return status;
}
-static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
+static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
{
struct ocrdma_pd *pd = uctx->cntxt_pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
@@ -508,8 +437,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
__func__, dev->id, pd->id);
}
uctx->cntxt_pd = NULL;
- (void)_ocrdma_dealloc_pd(dev, pd);
- return 0;
+ _ocrdma_dealloc_pd(dev, pd);
+ kfree(pd);
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -533,34 +462,28 @@ static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
mutex_unlock(&uctx->mm_list_lock);
}
-struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
int status;
- struct ocrdma_ucontext *ctx;
- struct ocrdma_alloc_ucontext_resp resp;
+ struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
+ struct ocrdma_alloc_ucontext_resp resp = {};
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct pci_dev *pdev = dev->nic_info.pdev;
u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
if (!udata)
- return ERR_PTR(-EFAULT);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ return -EFAULT;
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
&ctx->ah_tbl.pa, GFP_KERNEL);
- if (!ctx->ah_tbl.va) {
- kfree(ctx);
- return ERR_PTR(-ENOMEM);
- }
- memset(ctx->ah_tbl.va, 0, map_len);
+ if (!ctx->ah_tbl.va)
+ return -ENOMEM;
+
ctx->ah_tbl.len = map_len;
- memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len;
resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
@@ -582,27 +505,26 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
status = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (status)
goto cpy_err;
- return &ctx->ibucontext;
+ return 0;
cpy_err:
+ ocrdma_dealloc_ucontext_pd(ctx);
pd_err:
ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
map_err:
dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
ctx->ah_tbl.pa);
- kfree(ctx);
- return ERR_PTR(status);
+ return status;
}
-int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
- int status = 0;
struct ocrdma_mm *mm, *tmp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
struct pci_dev *pdev = dev->nic_info.pdev;
- status = ocrdma_dealloc_ucontext_pd(uctx);
+ ocrdma_dealloc_ucontext_pd(uctx);
ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
@@ -612,8 +534,6 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
list_del(&mm->entry);
kfree(mm);
}
- kfree(uctx);
- return status;
}
int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
@@ -623,7 +543,7 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
unsigned long len = (vma->vm_end - vma->vm_start);
- int status = 0;
+ int status;
bool found;
if (vma->vm_start & (PAGE_SIZE - 1))
@@ -660,7 +580,6 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
- struct ib_ucontext *ib_ctx,
struct ib_udata *udata)
{
int status;
@@ -668,7 +587,8 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
u64 dpp_page_addr = 0;
u32 db_page_size;
struct ocrdma_alloc_pd_uresp rsp;
- struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
memset(&rsp, 0, sizeof(rsp));
rsp.id = pd->id;
@@ -706,18 +626,17 @@ dpp_map_err:
return status;
}
-struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
+ struct ib_device *ibdev = ibpd->device;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_pd *pd;
- struct ocrdma_ucontext *uctx = NULL;
int status;
u8 is_uctx_pd = false;
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
- if (udata && context) {
- uctx = get_ocrdma_ucontext(context);
+ if (udata) {
pd = ocrdma_get_ucontext_pd(uctx);
if (pd) {
is_uctx_pd = true;
@@ -725,36 +644,33 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
}
}
- pd = _ocrdma_alloc_pd(dev, uctx, udata);
- if (IS_ERR(pd)) {
- status = PTR_ERR(pd);
+ pd = get_ocrdma_pd(ibpd);
+ status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
+ if (status)
goto exit;
- }
pd_mapping:
- if (udata && context) {
- status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
+ if (udata) {
+ status = ocrdma_copy_pd_uresp(dev, pd, udata);
if (status)
goto err;
}
- return &pd->ibpd;
+ return 0;
err:
- if (is_uctx_pd) {
+ if (is_uctx_pd)
ocrdma_release_ucontext_pd(uctx);
- } else {
- status = _ocrdma_dealloc_pd(dev, pd);
- }
+ else
+ _ocrdma_dealloc_pd(dev, pd);
exit:
- return ERR_PTR(status);
+ return status;
}
-int ocrdma_dealloc_pd(struct ib_pd *ibpd)
+int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_ucontext *uctx = NULL;
- int status = 0;
u64 usr_db;
uctx = pd->uctx;
@@ -768,11 +684,11 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
if (is_ucontext_pd(uctx, pd)) {
ocrdma_release_ucontext_pd(uctx);
- return status;
+ return 0;
}
}
- status = _ocrdma_dealloc_pd(dev, pd);
- return status;
+ _ocrdma_dealloc_pd(dev, pd);
+ return 0;
}
static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
@@ -878,8 +794,8 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
void *va;
dma_addr_t pa;
- mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
- mr->num_pbls, GFP_KERNEL);
+ mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
+ GFP_KERNEL);
if (!mr->pbl_table)
return -ENOMEM;
@@ -891,21 +807,19 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
status = -ENOMEM;
break;
}
- memset(va, 0, dma_len);
mr->pbl_table[i].va = va;
mr->pbl_table[i].pa = pa;
}
return status;
}
-static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
- u32 num_pbes)
+static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr)
{
struct ocrdma_pbe *pbe;
- struct scatterlist *sg;
+ struct ib_block_iter biter;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
- struct ib_umem *umem = mr->umem;
- int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
+ int pbe_cnt;
+ u64 pg_addr;
if (!mr->hwmr.num_pbes)
return;
@@ -913,51 +827,36 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0;
- shift = ilog2(umem->page_size);
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> shift;
- for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
- /* store the page address in pbe */
- pbe->pa_lo =
- cpu_to_le32(sg_dma_address
- (sg) +
- (umem->page_size * pg_cnt));
- pbe->pa_hi =
- cpu_to_le32(upper_32_bits
- ((sg_dma_address
- (sg) +
- umem->page_size * pg_cnt)));
- pbe_cnt += 1;
- total_num_pbes += 1;
- pbe++;
-
- /* if done building pbes, issue the mbx cmd. */
- if (total_num_pbes == num_pbes)
- return;
-
- /* if the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt ==
- (mr->hwmr.pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- pbe_cnt = 0;
- }
+ rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) {
+ /* store the page address in pbe */
+ pg_addr = rdma_block_iter_dma_address(&biter);
+ pbe->pa_lo = cpu_to_le32(pg_addr);
+ pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
+ pbe_cnt += 1;
+ pbe++;
+ /* if the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ pbe_cnt = 0;
}
}
}
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 usr_addr, int acc, struct ib_udata *udata)
+ u64 usr_addr, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
int status = -ENOMEM;
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_mr *mr;
struct ocrdma_pd *pd;
- u32 num_pbes;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
pd = get_ocrdma_pd(ibpd);
@@ -967,18 +866,17 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
}
- num_pbes = ib_umem_page_count(mr->umem);
- status = ocrdma_get_pbl_info(dev, mr, num_pbes);
+ status = ocrdma_get_pbl_info(
+ dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE));
if (status)
goto umem_err;
- mr->hwmr.pbe_size = mr->umem->page_size;
- mr->hwmr.fbo = ib_umem_offset(mr->umem);
+ mr->hwmr.pbe_size = PAGE_SIZE;
mr->hwmr.va = usr_addr;
mr->hwmr.len = len;
mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
@@ -989,7 +887,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
if (status)
goto umem_err;
- build_user_pbes(dev, mr, num_pbes);
+ build_user_pbes(dev, mr);
status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
if (status)
goto mbx_err;
@@ -1006,18 +904,18 @@ umem_err:
return ERR_PTR(status);
}
-int ocrdma_dereg_mr(struct ib_mr *ib_mr)
+int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
(void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+ kfree(mr->pages);
ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
/* it could be user registered memory. */
- if (mr->umem)
- ib_umem_release(mr->umem);
+ ib_umem_release(mr->umem);
kfree(mr);
/* Don't stop cleanup, in case FW is unresponsive */
@@ -1029,13 +927,17 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
}
static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
- struct ib_udata *udata,
- struct ib_ucontext *ib_ctx)
+ struct ib_udata *udata)
{
int status;
- struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
struct ocrdma_create_cq_uresp uresp;
+ /* this must be user flow! */
+ if (!udata)
+ return -EINVAL;
+
memset(&uresp, 0, sizeof(uresp));
uresp.cq_id = cq->id;
uresp.page_size = PAGE_ALIGN(cq->len);
@@ -1064,60 +966,53 @@ err:
return status;
}
-struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_ctx,
- struct ib_udata *udata)
+int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
int entries = attr->cqe;
- struct ocrdma_cq *cq;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
- struct ocrdma_ucontext *uctx = NULL;
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
u16 pd_id = 0;
int status;
struct ocrdma_create_cq_ureq ureq;
if (attr->flags)
- return ERR_PTR(-EINVAL);
+ return -EOPNOTSUPP;
if (udata) {
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
} else
ureq.dpp_cq = 0;
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq)
- return ERR_PTR(-ENOMEM);
spin_lock_init(&cq->cq_lock);
spin_lock_init(&cq->comp_handler_lock);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
- cq->first_arm = true;
- if (ib_ctx) {
- uctx = get_ocrdma_ucontext(ib_ctx);
+ if (udata)
pd_id = uctx->cntxt_pd->id;
- }
status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
- if (status) {
- kfree(cq);
- return ERR_PTR(status);
- }
- if (ib_ctx) {
- status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
+ if (status)
+ return status;
+
+ if (udata) {
+ status = ocrdma_copy_cq_uresp(dev, cq, udata);
if (status)
goto ctx_err;
}
cq->phase = OCRDMA_CQE_VALID;
dev->cq_tbl[cq->id] = cq;
- return &cq->ibcq;
+ return 0;
ctx_err:
ocrdma_mbx_destroy_cq(dev, cq);
- kfree(cq);
- return ERR_PTR(status);
+ return status;
}
int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
@@ -1160,7 +1055,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
spin_unlock_irqrestore(&cq->cq_lock, flags);
}
-int ocrdma_destroy_cq(struct ib_cq *ibcq)
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_eq *eq = NULL;
@@ -1170,15 +1065,13 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
dev->cq_tbl[cq->id] = NULL;
indx = ocrdma_get_eq_table_index(dev, cq->eqn);
- if (indx == -EINVAL)
- BUG();
eq = &dev->eq_tbl[indx];
irq = ocrdma_get_irq(dev, eq);
synchronize_irq(irq);
ocrdma_flush_cq(cq);
- (void)ocrdma_mbx_destroy_cq(dev, cq);
+ ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
pdid = cq->ucontext->cntxt_pd->id;
ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1187,8 +1080,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
ocrdma_get_db_addr(dev, pdid),
dev->nic_info.db_page_size);
}
-
- kfree(cq);
return 0;
}
@@ -1209,7 +1100,8 @@ static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
}
static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
- struct ib_qp_init_attr *attrs)
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
{
if ((attrs->qp_type != IB_QPT_GSI) &&
(attrs->qp_type != IB_QPT_RC) &&
@@ -1217,7 +1109,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
(attrs->qp_type != IB_QPT_UD)) {
pr_err("%s(%d) unsupported qp type=0x%x requested\n",
__func__, dev->id, attrs->qp_type);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
/* Skip the check for QP1 to support CM size of 128 */
if ((attrs->qp_type != IB_QPT_GSI) &&
@@ -1257,7 +1149,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
return -EINVAL;
}
/* unprivileged user space cannot create special QP */
- if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+ if (udata && attrs->qp_type == IB_QPT_GSI) {
pr_err
("%s(%d) Userspace can't create special QPs of type=0x%x\n",
__func__, dev->id, attrs->qp_type);
@@ -1285,7 +1177,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
struct ib_udata *udata, int dpp_offset,
int dpp_credit_lmt, int srq)
{
- int status = 0;
+ int status;
u64 usr_db;
struct ocrdma_create_qp_uresp uresp;
struct ocrdma_pd *pd = qp->pd;
@@ -1363,12 +1255,12 @@ static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
{
qp->wqe_wr_id_tbl =
- kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
+ kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
GFP_KERNEL);
if (qp->wqe_wr_id_tbl == NULL)
return -ENOMEM;
qp->rqe_wr_id_tbl =
- kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
+ kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
if (qp->rqe_wr_id_tbl == NULL)
return -ENOMEM;
@@ -1390,7 +1282,7 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->rq.max_sges = attrs->cap.max_recv_sge;
qp->state = OCRDMA_QPS_RST;
- qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
}
static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
@@ -1403,30 +1295,28 @@ static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
}
}
-struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
+int ocrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
{
int status;
+ struct ib_pd *ibpd = ibqp->pd;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_qp *qp;
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset;
- status = ocrdma_check_qp_params(ibpd, dev, attrs);
+ if (attrs->create_flags)
+ return -EOPNOTSUPP;
+
+ status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
if (status)
goto gen_err;
memset(&ureq, 0, sizeof(ureq));
if (udata) {
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return ERR_PTR(-EFAULT);
- }
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- status = -ENOMEM;
- goto gen_err;
+ return -EFAULT;
}
ocrdma_set_qp_init_params(qp, pd, attrs);
if (udata == NULL)
@@ -1461,7 +1351,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
ocrdma_store_gsi_qp_cq(dev, attrs);
qp->ibqp.qp_num = qp->id;
mutex_unlock(&dev->dev_lock);
- return &qp->ibqp;
+ return 0;
cpy_err:
ocrdma_del_qpn_map(dev, qp);
@@ -1471,10 +1361,9 @@ mbx_err:
mutex_unlock(&dev->dev_lock);
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
- kfree(qp);
pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
gen_err:
- return ERR_PTR(status);
+ return status;
}
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1494,9 +1383,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*/
if (status < 0)
return status;
- status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
-
- return status;
+ return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
}
int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1508,6 +1395,9 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ocrdma_dev *dev;
enum ib_qp_state old_qps, new_qps;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
qp = get_ocrdma_qp(ibqp);
dev = get_ocrdma_dev(ibqp->device);
@@ -1522,8 +1412,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
__func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
@@ -1603,23 +1492,24 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
- memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
- sizeof(params.dgid));
- qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
- OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
- qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
- qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
- OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
- OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
- qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_TCLASS_MASK) >>
- OCRDMA_QP_PARAMS_TCLASS_SHIFT;
-
- qp_attr->ah_attr.ah_flags = IB_AH_GRH;
- qp_attr->ah_attr.port_num = 1;
- qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
- OCRDMA_QP_PARAMS_SL_MASK) >>
- OCRDMA_QP_PARAMS_SL_SHIFT;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
+ params.rnt_rc_sl_fl &
+ OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
+ qp->sgid_idx,
+ (params.hop_lmt_rq_psn &
+ OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
+ OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
+ (params.tclass_sq_psn &
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_SHIFT);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
+
+ rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
+ rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
+ OCRDMA_QP_PARAMS_SL_MASK) >>
+ OCRDMA_QP_PARAMS_SL_SHIFT);
qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
@@ -1632,8 +1522,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
qp_attr->min_rnr_timer = 0;
qp_attr->pkey_index = 0;
qp_attr->port_num = 1;
- qp_attr->ah_attr.src_path_bits = 0;
- qp_attr->ah_attr.static_rate = 0;
+ rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
+ rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
qp_attr->alt_pkey_index = 0;
qp_attr->alt_port_num = 0;
qp_attr->alt_timeout = 0;
@@ -1704,7 +1594,6 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
{
unsigned long cq_flags;
unsigned long flags;
- int discard_cnt = 0;
u32 cur_getp, stop_getp;
struct ocrdma_cqe *cqe;
u32 qpn = 0, wqe_idx = 0;
@@ -1743,8 +1632,7 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
OCRDMA_CQE_BUFTAG_SHIFT) &
qp->srq->rq.max_wqe_idx;
- if (wqe_idx < 1)
- BUG();
+ BUG_ON(wqe_idx < 1);
spin_lock_irqsave(&qp->srq->q_lock, flags);
ocrdma_hwq_inc_tail(&qp->srq->rq);
ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
@@ -1757,7 +1645,6 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
/* mark cqe discarded so that it is not picked up later
* in the poll_cq().
*/
- discard_cnt += 1;
cqe->cmn.qpn = 0;
skip_cqe:
cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
@@ -1784,7 +1671,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
spin_unlock_irqrestore(&dev->flush_q_lock, flags);
}
-int ocrdma_destroy_qp(struct ib_qp *ibqp)
+int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct ocrdma_pd *pd;
struct ocrdma_qp *qp;
@@ -1816,13 +1703,13 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
* protect against proessing in-flight CQEs for this QP.
*/
spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
- if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+ if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
spin_lock(&qp->rq_cq->cq_lock);
-
- ocrdma_del_qpn_map(dev, qp);
-
- if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+ ocrdma_del_qpn_map(dev, qp);
spin_unlock(&qp->rq_cq->cq_lock);
+ } else {
+ ocrdma_del_qpn_map(dev, qp);
+ }
spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
if (!pd->uctx) {
@@ -1843,7 +1730,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
- kfree(qp);
return 0;
}
@@ -1880,43 +1766,46 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
return status;
}
-struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- int status = -ENOMEM;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- struct ocrdma_srq *srq;
+ int status;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
+ struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
+
+ if (init_attr->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (init_attr->attr.max_wr > dev->attr.max_rqe)
- return ERR_PTR(-EINVAL);
-
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq)
- return ERR_PTR(status);
+ return -EINVAL;
spin_lock_init(&srq->q_lock);
srq->pd = pd;
srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
if (status)
- goto err;
+ return status;
- if (udata == NULL) {
- srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
- GFP_KERNEL);
- if (srq->rqe_wr_id_tbl == NULL)
+ if (!udata) {
+ srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
+ GFP_KERNEL);
+ if (!srq->rqe_wr_id_tbl) {
+ status = -ENOMEM;
goto arm_err;
+ }
srq->bit_fields_len = (srq->rq.max_cnt / 32) +
(srq->rq.max_cnt % 32 ? 1 : 0);
srq->idx_bit_fields =
- kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
- if (srq->idx_bit_fields == NULL)
+ kmalloc_array(srq->bit_fields_len, sizeof(u32),
+ GFP_KERNEL);
+ if (!srq->idx_bit_fields) {
+ status = -ENOMEM;
goto arm_err;
+ }
memset(srq->idx_bit_fields, 0xff,
srq->bit_fields_len * sizeof(u32));
}
@@ -1933,15 +1822,13 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto arm_err;
}
- return &srq->ibsrq;
+ return 0;
arm_err:
ocrdma_mbx_destroy_srq(dev, srq);
-err:
kfree(srq->rqe_wr_id_tbl);
kfree(srq->idx_bit_fields);
- kfree(srq);
- return ERR_PTR(status);
+ return status;
}
int ocrdma_modify_srq(struct ib_srq *ibsrq,
@@ -1949,7 +1836,7 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata)
{
- int status = 0;
+ int status;
struct ocrdma_srq *srq;
srq = get_ocrdma_srq(ibsrq);
@@ -1962,23 +1849,20 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
{
- int status;
struct ocrdma_srq *srq;
srq = get_ocrdma_srq(ibsrq);
- status = ocrdma_mbx_query_srq(srq, srq_attr);
- return status;
+ return ocrdma_mbx_query_srq(srq, srq_attr);
}
-int ocrdma_destroy_srq(struct ib_srq *ibsrq)
+int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
- int status;
struct ocrdma_srq *srq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
srq = get_ocrdma_srq(ibsrq);
- status = ocrdma_mbx_destroy_srq(dev, srq);
+ ocrdma_mbx_destroy_srq(dev, srq);
if (srq->pd->uctx)
ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
@@ -1986,25 +1870,25 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
kfree(srq->idx_bit_fields);
kfree(srq->rqe_wr_id_tbl);
- kfree(srq);
- return status;
+ return 0;
}
/* unprivileged verbs and their support functions. */
static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct ocrdma_ewqe_ud_hdr *ud_hdr =
(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
- struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
+ struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
- ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
+ ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
if (qp->qp_type == IB_QPT_GSI)
ud_hdr->qkey = qp->qkey;
else
- ud_hdr->qkey = wr->wr.ud.remote_qkey;
+ ud_hdr->qkey = ud_wr(wr)->remote_qkey;
ud_hdr->rsvd_ahid = ah->id;
+ ud_hdr->hdr_type = ah->hdr_type;
if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
}
@@ -2039,7 +1923,7 @@ static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
struct ocrdma_sge *sge,
- struct ib_send_wr *wr, u32 wqe_size)
+ const struct ib_send_wr *wr, u32 wqe_size)
{
int i;
char *dpp_addr;
@@ -2077,9 +1961,8 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
}
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
- int status;
struct ocrdma_sge *sge;
u32 wqe_size = sizeof(*hdr);
@@ -2091,12 +1974,11 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
sge = (struct ocrdma_sge *)(hdr + 1);
}
- status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
- return status;
+ return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
}
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
int status;
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
@@ -2106,15 +1988,15 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
if (status)
return status;
- ext_rw->addr_lo = wr->wr.rdma.remote_addr;
- ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
- ext_rw->lrkey = wr->wr.rdma.rkey;
+ ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
+ ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
+ ext_rw->lrkey = rdma_wr(wr)->rkey;
ext_rw->len = hdr->total_len;
return 0;
}
static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
struct ocrdma_sge *sge = ext_rw + 1;
@@ -2126,46 +2008,12 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
- ext_rw->addr_lo = wr->wr.rdma.remote_addr;
- ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
- ext_rw->lrkey = wr->wr.rdma.rkey;
+ ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
+ ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
+ ext_rw->lrkey = rdma_wr(wr)->rkey;
ext_rw->len = hdr->total_len;
}
-static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
- struct ocrdma_hw_mr *hwmr)
-{
- int i;
- u64 buf_addr = 0;
- int num_pbes;
- struct ocrdma_pbe *pbe;
-
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- num_pbes = 0;
-
- /* go through the OS phy regions & fill hw pbe entries into pbls. */
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- /* number of pbes can be more for one OS buf, when
- * buffers are of different sizes.
- * split the ib_buf to one or more pbes.
- */
- buf_addr = wr->wr.fast_reg.page_list->page_list[i];
- pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
- pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
- num_pbes += 1;
- pbe++;
-
- /* if the pbl is full storing the pbes,
- * move to next pbl.
- */
- if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- }
- }
- return;
-}
-
static int get_encoded_page_size(int pg_sz)
{
/* Max size is 256M 4096 << 16 */
@@ -2176,48 +2024,59 @@ static int get_encoded_page_size(int pg_sz)
return i;
}
-
-static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+static int ocrdma_build_reg(struct ocrdma_qp *qp,
+ struct ocrdma_hdr_wqe *hdr,
+ const struct ib_reg_wr *wr)
{
u64 fbo;
struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
- struct ocrdma_mr *mr;
- struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+ struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
+ struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
+ struct ocrdma_pbe *pbe;
u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
+ int num_pbes = 0, i;
wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
- if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
- return -EINVAL;
-
hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
- if (wr->wr.fast_reg.page_list_len == 0)
- BUG();
- if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
+ if (wr->access & IB_ACCESS_LOCAL_WRITE)
hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
- if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
+ if (wr->access & IB_ACCESS_REMOTE_WRITE)
hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
- if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
+ if (wr->access & IB_ACCESS_REMOTE_READ)
hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
- hdr->lkey = wr->wr.fast_reg.rkey;
- hdr->total_len = wr->wr.fast_reg.length;
+ hdr->lkey = wr->key;
+ hdr->total_len = mr->ibmr.length;
- fbo = wr->wr.fast_reg.iova_start -
- (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
+ fbo = mr->ibmr.iova - mr->pages[0];
- fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
- fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
+ fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
+ fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
fast_reg->fbo_hi = upper_32_bits(fbo);
fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
- fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
- fast_reg->size_sge =
- get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
- mr = (struct ocrdma_mr *) (unsigned long)
- dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
- build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
+ fast_reg->num_sges = mr->npages;
+ fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
+
+ pbe = pbl_tbl->va;
+ for (i = 0; i < mr->npages; i++) {
+ u64 buf_addr = mr->pages[i];
+
+ pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
+ num_pbes += 1;
+ pbe++;
+
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ }
+ }
+
return 0;
}
@@ -2228,8 +2087,8 @@ static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
iowrite32(val, qp->sq_db);
}
-int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int status = 0;
struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
@@ -2272,6 +2131,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_SEND_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
+ fallthrough;
case IB_WR_SEND:
hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
ocrdma_build_send(qp, hdr, wr);
@@ -2285,6 +2145,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
+ fallthrough;
case IB_WR_RDMA_WRITE:
hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
status = ocrdma_build_write(qp, hdr, wr);
@@ -2300,8 +2161,8 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
hdr->lkey = wr->ex.invalidate_rkey;
break;
- case IB_WR_FAST_REG_MR:
- status = ocrdma_build_fr(qp, hdr, wr);
+ case IB_WR_REG_MR:
+ status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
break;
default:
status = -EINVAL;
@@ -2338,8 +2199,8 @@ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
iowrite32(val, qp->rq_db);
}
-static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
- u16 tag)
+static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
+ const struct ib_recv_wr *wr, u16 tag)
{
u32 wqe_size = 0;
struct ocrdma_sge *sge;
@@ -2359,8 +2220,8 @@ static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
ocrdma_cpu_to_le32(rqe, wqe_size);
}
-int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int status = 0;
unsigned long flags;
@@ -2412,15 +2273,13 @@ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
if (srq->idx_bit_fields[row]) {
indx = ffs(srq->idx_bit_fields[row]);
indx = (row * 32) + (indx - 1);
- if (indx >= srq->rq.max_cnt)
- BUG();
+ BUG_ON(indx >= srq->rq.max_cnt);
ocrdma_srq_toggle_bit(srq, indx);
break;
}
}
- if (row == srq->bit_fields_len)
- BUG();
+ BUG_ON(row == srq->bit_fields_len);
return indx + 1; /* Use from index 1 */
}
@@ -2431,8 +2290,8 @@ static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
}
-int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int status = 0;
unsigned long flags;
@@ -2567,7 +2426,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
ibwc->opcode = IB_WC_SEND;
break;
case OCRDMA_FR_MR:
- ibwc->opcode = IB_WC_FAST_REG_MR;
+ ibwc->opcode = IB_WC_REG_MR;
break;
case OCRDMA_LKEY_INV:
ibwc->opcode = IB_WC_LOCAL_INV;
@@ -2740,19 +2599,30 @@ static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
return expand;
}
-static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
+static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
+ struct ocrdma_cqe *cqe)
{
int status;
+ u16 hdr_type = 0;
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_SRCQP_MASK;
- ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
- OCRDMA_CQE_PKEY_MASK;
+ ibwc->pkey_index = 0;
ibwc->wc_flags = IB_WC_GRH;
ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
- OCRDMA_CQE_UD_XFER_LEN_SHIFT);
+ OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
+ OCRDMA_CQE_UD_XFER_LEN_MASK;
+
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
+ OCRDMA_CQE_UD_L3TYPE_SHIFT) &
+ OCRDMA_CQE_UD_L3TYPE_MASK;
+ ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ ibwc->network_hdr_type = hdr_type;
+ }
+
return status;
}
@@ -2767,8 +2637,7 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
srq = get_ocrdma_srq(qp->ibqp.srq);
wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
- if (wqe_idx < 1)
- BUG();
+ BUG_ON(wqe_idx < 1);
ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
spin_lock_irqsave(&srq->q_lock, flags);
@@ -2815,12 +2684,15 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
{
+ struct ocrdma_dev *dev;
+
+ dev = get_ocrdma_dev(qp->ibqp.device);
ibwc->opcode = IB_WC_RECV;
ibwc->qp = &qp->ibqp;
ibwc->status = IB_WC_SUCCESS;
if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
- ocrdma_update_ud_rcqe(ibwc, cqe);
+ ocrdma_update_ud_rcqe(dev, ibwc, cqe);
else
ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
@@ -2933,17 +2805,9 @@ expand_cqe:
}
stop_cqe:
cq->getp = cur_getp;
- if (cq->deferred_arm) {
- ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
- polled_hw_cqes);
- cq->deferred_arm = false;
- cq->deferred_sol = false;
- } else {
- /* We need to pop the CQE. No need to arm */
- ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
- polled_hw_cqes);
- cq->deferred_sol = false;
- }
+
+ if (polled_hw_cqes)
+ ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
return i;
}
@@ -3027,20 +2891,13 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
if (cq_flags & IB_CQ_SOLICITED)
sol_needed = true;
- if (cq->first_arm) {
- ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
- cq->first_arm = false;
- }
-
- cq->deferred_arm = true;
- cq->deferred_sol = sol_needed;
+ ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
spin_unlock_irqrestore(&cq->cq_lock, flags);
return 0;
}
-struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
- enum ib_mr_type mr_type,
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
int status;
@@ -3058,6 +2915,12 @@ struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
if (!mr)
return ERR_PTR(-ENOMEM);
+ mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
+ if (!mr->pages) {
+ status = -ENOMEM;
+ goto pl_err;
+ }
+
status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
if (status)
goto pbl_err;
@@ -3081,189 +2944,30 @@ struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
mbx_err:
ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
pbl_err:
+ kfree(mr->pages);
+pl_err:
kfree(mr);
return ERR_PTR(-ENOMEM);
}
-struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
- *ibdev,
- int page_list_len)
-{
- struct ib_fast_reg_page_list *frmr_list;
- int size;
-
- size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
- frmr_list = kzalloc(size, GFP_KERNEL);
- if (!frmr_list)
- return ERR_PTR(-ENOMEM);
- frmr_list->page_list = (u64 *)(frmr_list + 1);
- return frmr_list;
-}
-
-void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
+static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
{
- kfree(page_list);
-}
-
-#define MAX_KERNEL_PBE_SIZE 65536
-static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
- int buf_cnt, u32 *pbe_size)
-{
- u64 total_size = 0;
- u64 buf_size = 0;
- int i;
- *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
- *pbe_size = roundup_pow_of_two(*pbe_size);
-
- /* find the smallest PBE size that we can have */
- for (i = 0; i < buf_cnt; i++) {
- /* first addr may not be page aligned, so ignore checking */
- if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
- (buf_list[i].size & ~PAGE_MASK))) {
- return 0;
- }
-
- /* if configured PBE size is greater then the chosen one,
- * reduce the PBE size.
- */
- buf_size = roundup(buf_list[i].size, PAGE_SIZE);
- /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
- buf_size = roundup_pow_of_two(buf_size);
- if (*pbe_size > buf_size)
- *pbe_size = buf_size;
+ struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
- total_size += buf_size;
- }
- *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
- (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
+ if (unlikely(mr->npages == mr->hwmr.num_pbes))
+ return -ENOMEM;
- /* num_pbes = total_size / (*pbe_size); this is implemented below. */
+ mr->pages[mr->npages++] = addr;
- return total_size >> ilog2(*pbe_size);
+ return 0;
}
-static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
- u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
- struct ocrdma_hw_mr *hwmr)
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
- int i;
- int idx;
- int pbes_per_buf = 0;
- u64 buf_addr = 0;
- int num_pbes;
- struct ocrdma_pbe *pbe;
- int total_num_pbes = 0;
+ struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
- if (!hwmr->num_pbes)
- return;
+ mr->npages = 0;
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- num_pbes = 0;
-
- /* go through the OS phy regions & fill hw pbe entries into pbls. */
- for (i = 0; i < ib_buf_cnt; i++) {
- buf_addr = buf_list[i].addr;
- pbes_per_buf =
- roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
- pbe_size;
- hwmr->len += buf_list[i].size;
- /* number of pbes can be more for one OS buf, when
- * buffers are of different sizes.
- * split the ib_buf to one or more pbes.
- */
- for (idx = 0; idx < pbes_per_buf; idx++) {
- /* we program always page aligned addresses,
- * first unaligned address is taken care by fbo.
- */
- if (i == 0) {
- /* for non zero fbo, assign the
- * start of the page.
- */
- pbe->pa_lo =
- cpu_to_le32((u32) (buf_addr & PAGE_MASK));
- pbe->pa_hi =
- cpu_to_le32((u32) upper_32_bits(buf_addr));
- } else {
- pbe->pa_lo =
- cpu_to_le32((u32) (buf_addr & 0xffffffff));
- pbe->pa_hi =
- cpu_to_le32((u32) upper_32_bits(buf_addr));
- }
- buf_addr += pbe_size;
- num_pbes += 1;
- total_num_pbes += 1;
- pbe++;
-
- if (total_num_pbes == hwmr->num_pbes)
- goto mr_tbl_done;
- /* if the pbl is full storing the pbes,
- * move to next pbl.
- */
- if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- num_pbes = 0;
- }
- }
- }
-mr_tbl_done:
- return;
-}
-
-struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
- struct ib_phys_buf *buf_list,
- int buf_cnt, int acc, u64 *iova_start)
-{
- int status = -ENOMEM;
- struct ocrdma_mr *mr;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- u32 num_pbes;
- u32 pbe_size = 0;
-
- if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
- return ERR_PTR(-EINVAL);
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return ERR_PTR(status);
-
- num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
- if (num_pbes == 0) {
- status = -EINVAL;
- goto pbl_err;
- }
- status = ocrdma_get_pbl_info(dev, mr, num_pbes);
- if (status)
- goto pbl_err;
-
- mr->hwmr.pbe_size = pbe_size;
- mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
- mr->hwmr.va = *iova_start;
- mr->hwmr.local_rd = 1;
- mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
- mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
- mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
- mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
- mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
-
- status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
- if (status)
- goto pbl_err;
- build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
- &mr->hwmr);
- status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
- if (status)
- goto mbx_err;
-
- mr->ibmr.lkey = mr->hwmr.lkey;
- if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
- mr->ibmr.rkey = mr->hwmr.lkey;
- return &mr->ibmr;
-
-mbx_err:
- ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
-pbl_err:
- kfree(mr);
- return ERR_PTR(status);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 308c16857a5d..6c5c3755b8a9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -43,59 +43,39 @@
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
-int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
- struct ib_send_wr **bad_wr);
-int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_wr);
+int ocrdma_post_send(struct ib_qp *, const struct ib_send_wr *,
+ const struct ib_send_wr **bad_wr);
+int ocrdma_post_recv(struct ib_qp *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_wr);
int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
struct ib_udata *uhw);
-int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
+int ocrdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
enum rdma_protocol_type
-ocrdma_query_protocol(struct ib_device *device, u8 port_num);
+ocrdma_query_protocol(struct ib_device *device, u32 port_num);
-void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
-int ocrdma_query_gid(struct ib_device *, u8 port,
- int index, union ib_gid *gid);
-struct net_device *ocrdma_get_netdev(struct ib_device *device, u8 port_num);
-int ocrdma_add_gid(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context);
-int ocrdma_del_gid(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- void **context);
-int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
-struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
- struct ib_udata *);
-int ocrdma_dealloc_ucontext(struct ib_ucontext *);
+int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
-struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
- struct ib_ucontext *, struct ib_udata *);
-int ocrdma_dealloc_pd(struct ib_pd *pd);
+int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *ib_ctx,
- struct ib_udata *udata);
+int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
-int ocrdma_destroy_cq(struct ib_cq *);
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
-struct ib_qp *ocrdma_create_qp(struct ib_pd *,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *);
+int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask);
int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
@@ -103,31 +83,26 @@ int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
-int ocrdma_destroy_qp(struct ib_qp *);
+int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
-struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
- struct ib_udata *);
+int ocrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
enum ib_srq_attr_mask, struct ib_udata *);
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
-int ocrdma_destroy_srq(struct ib_srq *);
-int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_recv_wr);
+int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_recv_wr);
-int ocrdma_dereg_mr(struct ib_mr *);
+int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
-struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *);
-struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *);
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
-struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
- *ibdev,
- int page_list_len);
-void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
#endif /* __OCRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
new file mode 100644
index 000000000000..9e31d13b03c3
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_QEDR
+ tristate "QLogic RoCE driver"
+ depends on 64BIT && QEDE
+ depends on PCI
+ select QED_LL2
+ select QED_OOO
+ select QED_RDMA
+ help
+ This driver provides low-level InfiniBand over Ethernet
+ support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
new file mode 100644
index 000000000000..c75679837a85
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
+
+qedr-y := main.o verbs.o qedr_roce_cm.o qedr_iw_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
new file mode 100644
index 000000000000..ecdfeff3d44f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -0,0 +1,1056 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_mad.h>
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include <net/addrconf.h>
+
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_iw_cm.h"
+
+MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+
+#define QEDR_WQ_MULTIPLIER_DFT (3)
+
+static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
+ enum ib_event_type type)
+{
+ struct ib_event ibev;
+
+ ibev.device = &dev->ibdev;
+ ibev.element.port_num = port_num;
+ ibev.event = type;
+
+ ib_dispatch_event(&ibev);
+}
+
+static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct qedr_dev *qedr = get_qedr_dev(ibdev);
+ u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+ (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+ (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+}
+
+static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->gid_tbl_len = 1;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ immutable->max_mad_size = 0;
+
+ return 0;
+}
+
+/* QEDR sysfs interface */
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev =
+ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qedr_dev *dev =
+ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+ return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device,
+ rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" :
+ "RoCE");
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group qedr_attr_group = {
+ .attrs = qedr_attributes,
+};
+
+static const struct ib_device_ops qedr_iw_dev_ops = {
+ .get_port_immutable = qedr_iw_port_immutable,
+ .iw_accept = qedr_iw_accept,
+ .iw_add_ref = qedr_iw_qp_add_ref,
+ .iw_connect = qedr_iw_connect,
+ .iw_create_listen = qedr_iw_create_listen,
+ .iw_destroy_listen = qedr_iw_destroy_listen,
+ .iw_get_qp = qedr_iw_get_qp,
+ .iw_reject = qedr_iw_reject,
+ .iw_rem_ref = qedr_iw_qp_rem_ref,
+ .query_gid = qedr_iw_query_gid,
+};
+
+static int qedr_iw_register_device(struct qedr_dev *dev)
+{
+ dev->ibdev.node_type = RDMA_NODE_RNIC;
+
+ ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
+
+ memcpy(dev->ibdev.iw_ifname,
+ dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
+
+ return 0;
+}
+
+static const struct ib_device_ops qedr_roce_dev_ops = {
+ .alloc_xrcd = qedr_alloc_xrcd,
+ .dealloc_xrcd = qedr_dealloc_xrcd,
+ .get_port_immutable = qedr_roce_port_immutable,
+ .query_pkey = qedr_query_pkey,
+};
+
+static void qedr_roce_register_device(struct qedr_dev *dev)
+{
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+
+ ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
+}
+
+static const struct ib_device_ops qedr_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_QEDR,
+ .uverbs_abi_ver = QEDR_ABI_VERSION,
+
+ .alloc_mr = qedr_alloc_mr,
+ .alloc_pd = qedr_alloc_pd,
+ .alloc_ucontext = qedr_alloc_ucontext,
+ .create_ah = qedr_create_ah,
+ .create_cq = qedr_create_cq,
+ .create_qp = qedr_create_qp,
+ .create_srq = qedr_create_srq,
+ .dealloc_pd = qedr_dealloc_pd,
+ .dealloc_ucontext = qedr_dealloc_ucontext,
+ .dereg_mr = qedr_dereg_mr,
+ .destroy_ah = qedr_destroy_ah,
+ .destroy_cq = qedr_destroy_cq,
+ .destroy_qp = qedr_destroy_qp,
+ .destroy_srq = qedr_destroy_srq,
+ .device_group = &qedr_attr_group,
+ .get_dev_fw_str = qedr_get_dev_fw_str,
+ .get_dma_mr = qedr_get_dma_mr,
+ .get_link_layer = qedr_link_layer,
+ .map_mr_sg = qedr_map_mr_sg,
+ .mmap = qedr_mmap,
+ .mmap_free = qedr_mmap_free,
+ .modify_qp = qedr_modify_qp,
+ .modify_srq = qedr_modify_srq,
+ .poll_cq = qedr_poll_cq,
+ .post_recv = qedr_post_recv,
+ .post_send = qedr_post_send,
+ .post_srq_recv = qedr_post_srq_recv,
+ .process_mad = qedr_process_mad,
+ .query_device = qedr_query_device,
+ .query_port = qedr_query_port,
+ .query_qp = qedr_query_qp,
+ .query_srq = qedr_query_srq,
+ .reg_user_mr = qedr_reg_user_mr,
+ .req_notify_cq = qedr_arm_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
+};
+
+static int qedr_register_device(struct qedr_dev *dev)
+{
+ int rc;
+
+ dev->ibdev.node_guid = dev->attr.node_guid;
+ memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
+
+ if (IS_IWARP(dev)) {
+ rc = qedr_iw_register_device(dev);
+ if (rc)
+ return rc;
+ } else {
+ qedr_roce_register_device(dev);
+ }
+
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = dev->num_cnq;
+ dev->ibdev.dev.parent = &dev->pdev->dev;
+
+ ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
+
+ rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
+ if (rc)
+ return rc;
+
+ dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
+}
+
+/* This function allocates fast-path status block memory */
+static int qedr_alloc_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+ if (!sb_virt)
+ return -ENOMEM;
+
+ rc = dev->ops->common->sb_init(dev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_CNQ);
+ if (rc) {
+ pr_err("Status block initialization failed\n");
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qedr_free_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, int sb_id)
+{
+ if (sb_info->sb_virt) {
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_CNQ);
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+ }
+}
+
+static void qedr_free_resources(struct qedr_dev *dev)
+{
+ int i;
+
+ if (IS_IWARP(dev))
+ destroy_workqueue(dev->iwarp_wq);
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ }
+
+ kfree(dev->cnq_array);
+ kfree(dev->sb_array);
+ kfree(dev->sgid_tbl);
+}
+
+static int qedr_alloc_resources(struct qedr_dev *dev)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .elem_size = sizeof(struct regpair *),
+ };
+ struct qedr_cnq *cnq;
+ __le16 *cons_pi;
+ int i, rc;
+
+ dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
+ GFP_KERNEL);
+ if (!dev->sgid_tbl)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->sgid_lock);
+ xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
+
+ if (IS_IWARP(dev)) {
+ xa_init(&dev->qps);
+ dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+ if (!dev->iwarp_wq) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ /* Allocate Status blocks for CNQ */
+ dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
+ GFP_KERNEL);
+ if (!dev->sb_array) {
+ rc = -ENOMEM;
+ goto err_destroy_wq;
+ }
+
+ dev->cnq_array = kcalloc(dev->num_cnq,
+ sizeof(*dev->cnq_array), GFP_KERNEL);
+ if (!dev->cnq_array) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
+
+ /* Allocate CNQ PBLs */
+ params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
+ QEDR_ROCE_MAX_CNQ_SIZE);
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ cnq = &dev->cnq_array[i];
+
+ rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
+ dev->sb_start + i);
+ if (rc)
+ goto err3;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
+ &params);
+ if (rc)
+ goto err4;
+
+ cnq->dev = dev;
+ cnq->sb = &dev->sb_array[i];
+ cons_pi = dev->sb_array[i].sb_virt->pi_array;
+ cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
+ cnq->index = i;
+ sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
+ i, qed_chain_get_cons_idx(&cnq->pbl));
+ }
+
+ return 0;
+err4:
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+err3:
+ for (--i; i >= 0; i--) {
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ }
+ kfree(dev->cnq_array);
+err2:
+ kfree(dev->sb_array);
+err_destroy_wq:
+ if (IS_IWARP(dev))
+ destroy_workqueue(dev->iwarp_wq);
+err1:
+ kfree(dev->sgid_tbl);
+ return rc;
+}
+
+static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
+{
+ int rc = pci_enable_atomic_ops_to_root(pdev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+
+ if (rc) {
+ dev->atomic_cap = IB_ATOMIC_NONE;
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
+ } else {
+ dev->atomic_cap = IB_ATOMIC_GLOB;
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
+ }
+}
+
+static const struct qed_rdma_ops *qed_ops;
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+static irqreturn_t qedr_irq_handler(int irq, void *handle)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+ struct qedr_cnq *cnq = handle;
+ struct regpair *cq_handle;
+ struct qedr_cq *cq;
+
+ qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
+
+ qed_sb_update_sb_idx(cnq->sb);
+
+ hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ /* Align protocol-index and chain reads */
+ rmb();
+
+ while (sw_comp_cons != hw_comp_cons) {
+ cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
+ cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
+ cq_handle->lo);
+
+ if (cq == NULL) {
+ DP_ERR(cnq->dev,
+ "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
+ cq_handle->hi, cq_handle->lo, sw_comp_cons,
+ hw_comp_cons);
+
+ break;
+ }
+
+ if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
+ DP_ERR(cnq->dev,
+ "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
+ cq_handle->hi, cq_handle->lo, cq);
+ break;
+ }
+
+ cq->arm_flags = 0;
+
+ if (!cq->destroyed && cq->ibcq.comp_handler)
+ (*cq->ibcq.comp_handler)
+ (&cq->ibcq, cq->ibcq.cq_context);
+
+ /* The CQ's CNQ notification counter is checked before
+ * destroying the CQ in a busy-wait loop that waits for all of
+ * the CQ's CNQ interrupts to be processed. It is increased
+ * here, only after the completion handler, to ensure that
+ * the handler is not running when the CQ is destroyed.
+ */
+ cq->cnq_notif++;
+
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ cnq->n_comp++;
+ }
+
+ qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
+ sw_comp_cons);
+
+ qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void qedr_sync_free_irqs(struct qedr_dev *dev)
+{
+ u32 vector;
+ u16 idx;
+ int i;
+
+ for (i = 0; i < dev->int_info.used_cnt; i++) {
+ if (dev->int_info.msix_cnt) {
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ vector = dev->int_info.msix[idx].vector;
+ free_irq(vector, &dev->cnq_array[i]);
+ }
+ }
+
+ dev->int_info.used_cnt = 0;
+}
+
+static int qedr_req_msix_irqs(struct qedr_dev *dev)
+{
+ int i, rc = 0;
+ u16 idx;
+
+ if (dev->num_cnq > dev->int_info.msix_cnt) {
+ DP_ERR(dev,
+ "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
+ dev->num_cnq, dev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ rc = request_irq(dev->int_info.msix[idx].vector,
+ qedr_irq_handler, 0, dev->cnq_array[i].name,
+ &dev->cnq_array[i]);
+ if (rc) {
+ DP_ERR(dev, "Request cnq %d irq failed\n", i);
+ qedr_sync_free_irqs(dev);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
+ dev->cnq_array[i].name, i,
+ &dev->cnq_array[i]);
+ dev->int_info.used_cnt++;
+ }
+ }
+
+ return rc;
+}
+
+static int qedr_setup_irqs(struct qedr_dev *dev)
+{
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
+
+ /* Learn Interrupt configuration */
+ rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
+ if (rc < 0)
+ return rc;
+
+ rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
+ if (rc) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
+ return rc;
+ }
+
+ if (dev->int_info.msix_cnt) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
+ dev->int_info.msix_cnt);
+ rc = qedr_req_msix_irqs(dev);
+ if (rc)
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
+
+ return 0;
+}
+
+static int qedr_set_device_attr(struct qedr_dev *dev)
+{
+ struct qed_rdma_device *qed_attr;
+ struct qedr_device_attr *attr;
+ u32 page_size;
+
+ /* Part 1 - query core capabilities */
+ qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+ /* Part 2 - check capabilities */
+ page_size = ~qed_attr->page_size_caps + 1;
+ if (page_size > PAGE_SIZE) {
+ DP_ERR(dev,
+ "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+ PAGE_SIZE, page_size);
+ return -ENODEV;
+ }
+
+ /* Part 3 - copy and update capabilities */
+ attr = &dev->attr;
+ attr->vendor_id = qed_attr->vendor_id;
+ attr->vendor_part_id = qed_attr->vendor_part_id;
+ attr->hw_ver = qed_attr->hw_ver;
+ attr->fw_ver = qed_attr->fw_ver;
+ attr->node_guid = qed_attr->node_guid;
+ attr->sys_image_guid = qed_attr->sys_image_guid;
+ attr->max_cnq = qed_attr->max_cnq;
+ attr->max_sge = qed_attr->max_sge;
+ attr->max_inline = qed_attr->max_inline;
+ attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
+ attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
+ attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
+ attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
+ attr->max_dev_resp_rd_atomic_resc =
+ qed_attr->max_dev_resp_rd_atomic_resc;
+ attr->max_cq = qed_attr->max_cq;
+ attr->max_qp = qed_attr->max_qp;
+ attr->max_mr = qed_attr->max_mr;
+ attr->max_mr_size = qed_attr->max_mr_size;
+ attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
+ attr->max_mw = qed_attr->max_mw;
+ attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
+ attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
+ attr->max_pd = qed_attr->max_pd;
+ attr->max_ah = qed_attr->max_ah;
+ attr->max_pkey = qed_attr->max_pkey;
+ attr->max_srq = qed_attr->max_srq;
+ attr->max_srq_wr = qed_attr->max_srq_wr;
+ attr->dev_caps = qed_attr->dev_caps;
+ attr->page_size_caps = qed_attr->page_size_caps;
+ attr->dev_ack_delay = qed_attr->dev_ack_delay;
+ attr->reserved_lkey = qed_attr->reserved_lkey;
+ attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
+ attr->max_stats_queues = qed_attr->max_stats_queues;
+
+ return 0;
+}
+
+static void qedr_unaffiliated_event(void *context, u8 event_code)
+{
+ pr_err("unaffiliated event not implemented yet\n");
+}
+
+static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+{
+#define EVENT_TYPE_NOT_DEFINED 0
+#define EVENT_TYPE_CQ 1
+#define EVENT_TYPE_QP 2
+#define EVENT_TYPE_SRQ 3
+ struct qedr_dev *dev = (struct qedr_dev *)context;
+ struct regpair *async_handle = (struct regpair *)fw_handle;
+ u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
+ u8 event_type = EVENT_TYPE_NOT_DEFINED;
+ struct ib_event event;
+ struct ib_srq *ibsrq;
+ struct qedr_srq *srq;
+ unsigned long flags;
+ struct ib_cq *ibcq;
+ struct ib_qp *ibqp;
+ struct qedr_cq *cq;
+ struct qedr_qp *qp;
+ u16 srq_id;
+
+ if (IS_ROCE(dev)) {
+ switch (e_code) {
+ case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ case ROCE_ASYNC_EVENT_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_QP_FATAL;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_SRQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ case ROCE_ASYNC_EVENT_SRQ_EMPTY:
+ event.event = IB_EVENT_SRQ_ERR;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n",
+ e_code, roce_handle64);
+ }
+ } else {
+ switch (e_code) {
+ case QED_IWARP_EVENT_SRQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ case QED_IWARP_EVENT_SRQ_EMPTY:
+ event.event = IB_EVENT_SRQ_ERR;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
+ roce_handle64);
+ }
+ }
+ switch (event_type) {
+ case EVENT_TYPE_CQ:
+ cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
+ if (cq) {
+ ibcq = &cq->ibcq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+ } else {
+ WARN(1,
+ "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
+ break;
+ case EVENT_TYPE_QP:
+ qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
+ if (qp) {
+ ibqp = &qp->ibqp;
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+ } else {
+ WARN(1,
+ "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
+ break;
+ case EVENT_TYPE_SRQ:
+ srq_id = (u16)roce_handle64;
+ xa_lock_irqsave(&dev->srqs, flags);
+ srq = xa_load(&dev->srqs, srq_id);
+ if (srq) {
+ ibsrq = &srq->ibsrq;
+ if (ibsrq->event_handler) {
+ event.device = ibsrq->device;
+ event.element.srq = ibsrq;
+ ibsrq->event_handler(&event,
+ ibsrq->srq_context);
+ }
+ } else {
+ DP_NOTICE(dev,
+ "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
+ roce_handle64);
+ }
+ xa_unlock_irqrestore(&dev->srqs, flags);
+ DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
+ break;
+ default:
+ break;
+ }
+}
+
+static int qedr_init_hw(struct qedr_dev *dev)
+{
+ struct qed_rdma_add_user_out_params out_params;
+ struct qed_rdma_start_in_params *in_params;
+ struct qed_rdma_cnq_params *cur_pbl;
+ struct qed_rdma_events events;
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
+ int rc = 0;
+ int i;
+
+ in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
+ if (!in_params) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ in_params->desired_cnq = dev->num_cnq;
+ for (i = 0; i < dev->num_cnq; i++) {
+ cur_pbl = &in_params->cnq_pbl_list[i];
+
+ page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
+ cur_pbl->num_pbl_pages = page_cnt;
+
+ p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
+ cur_pbl->pbl_ptr = (u64)p_phys_table;
+ }
+
+ events.affiliated_event = qedr_affiliated_event;
+ events.unaffiliated_event = qedr_unaffiliated_event;
+ events.context = dev;
+
+ in_params->events = &events;
+ in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
+ in_params->max_mtu = dev->ndev->mtu;
+ dev->iwarp_max_mtu = dev->ndev->mtu;
+ ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
+
+ rc = dev->ops->rdma_init(dev->cdev, in_params);
+ if (rc)
+ goto out;
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
+ if (rc)
+ goto out;
+
+ dev->db_addr = out_params.dpi_addr;
+ dev->db_phys_addr = out_params.dpi_phys_addr;
+ dev->db_size = out_params.dpi_size;
+ dev->dpi = out_params.dpi;
+
+ rc = qedr_set_device_attr(dev);
+out:
+ kfree(in_params);
+ if (rc)
+ DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
+
+ return rc;
+}
+
+static void qedr_stop_hw(struct qedr_dev *dev)
+{
+ dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
+ dev->ops->rdma_stop(dev->rdma_ctx);
+}
+
+static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
+ struct net_device *ndev)
+{
+ struct qed_dev_rdma_info dev_info;
+ struct qedr_dev *dev;
+ int rc = 0;
+
+ dev = ib_alloc_device(qedr_dev, ibdev);
+ if (!dev) {
+ pr_err("Unable to allocate ib device\n");
+ return NULL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
+
+ dev->pdev = pdev;
+ dev->ndev = ndev;
+ dev->cdev = cdev;
+
+ qed_ops = qed_get_rdma_ops();
+ if (!qed_ops) {
+ DP_ERR(dev, "Failed to get qed roce operations\n");
+ goto init_err;
+ }
+
+ dev->ops = qed_ops;
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto init_err;
+
+ dev->user_dpm_enabled = dev_info.user_dpm_enabled;
+ dev->rdma_type = dev_info.rdma_type;
+ dev->num_hwfns = dev_info.common.num_hwfns;
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
+ rc = dev->ops->iwarp_set_engine_affin(cdev, false);
+ if (rc) {
+ DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
+ goto init_err;
+ }
+ }
+ dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
+
+ dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
+
+ dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
+ if (!dev->num_cnq) {
+ DP_ERR(dev, "Failed. At least one CNQ is required.\n");
+ rc = -ENOMEM;
+ goto init_err;
+ }
+
+ dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
+
+ qedr_pci_set_atomic(dev, pdev);
+
+ rc = qedr_alloc_resources(dev);
+ if (rc)
+ goto init_err;
+
+ rc = qedr_init_hw(dev);
+ if (rc)
+ goto alloc_err;
+
+ rc = qedr_setup_irqs(dev);
+ if (rc)
+ goto irq_err;
+
+ rc = qedr_register_device(dev);
+ if (rc) {
+ DP_ERR(dev, "Unable to allocate register device\n");
+ goto reg_err;
+ }
+
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+ return dev;
+
+reg_err:
+ qedr_sync_free_irqs(dev);
+irq_err:
+ qedr_stop_hw(dev);
+alloc_err:
+ qedr_free_resources(dev);
+init_err:
+ ib_dealloc_device(&dev->ibdev);
+ DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
+
+ return NULL;
+}
+
+static void qedr_remove(struct qedr_dev *dev)
+{
+ /* First unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
+ ib_unregister_device(&dev->ibdev);
+
+ qedr_stop_hw(dev);
+ qedr_sync_free_irqs(dev);
+ qedr_free_resources(dev);
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
+ dev->ops->iwarp_set_engine_affin(dev->cdev, true);
+
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static void qedr_close(struct qedr_dev *dev)
+{
+ if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
+}
+
+static void qedr_shutdown(struct qedr_dev *dev)
+{
+ qedr_close(dev);
+ qedr_remove(dev);
+}
+
+static void qedr_open(struct qedr_dev *dev)
+{
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
+static void qedr_mac_address_change(struct qedr_dev *dev)
+{
+ union ib_gid *sgid = &dev->sgid_tbl[0];
+ u8 guid[8], mac_addr[6];
+ int rc;
+
+ /* Update SGID */
+ ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
+ guid[0] = mac_addr[0] ^ 2;
+ guid[1] = mac_addr[1];
+ guid[2] = mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = mac_addr[3];
+ guid[6] = mac_addr[4];
+ guid[7] = mac_addr[5];
+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ memcpy(&sgid->raw[8], guid, sizeof(guid));
+
+ /* Update LL2 */
+ rc = dev->ops->ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address,
+ dev->ndev->dev_addr);
+
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
+
+ if (rc)
+ DP_ERR(dev, "Error updating mac filter\n");
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
+{
+ switch (event) {
+ case QEDE_UP:
+ qedr_open(dev);
+ break;
+ case QEDE_DOWN:
+ qedr_close(dev);
+ break;
+ case QEDE_CLOSE:
+ qedr_shutdown(dev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qedr_mac_address_change(dev);
+ break;
+ case QEDE_CHANGE_MTU:
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ if (dev->ndev->mtu != dev->iwarp_max_mtu)
+ DP_NOTICE(dev,
+ "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
+ dev->iwarp_max_mtu, dev->ndev->mtu);
+ break;
+ default:
+ pr_err("Event not supported\n");
+ }
+}
+
+static struct qedr_driver qedr_drv = {
+ .name = "qedr_driver",
+ .add = qedr_add,
+ .remove = qedr_remove,
+ .notify = qedr_notify,
+};
+
+static int __init qedr_init_module(void)
+{
+ return qede_rdma_register_driver(&qedr_drv);
+}
+
+static void __exit qedr_exit_module(void)
+{
+ qede_rdma_unregister_driver(&qedr_drv);
+}
+
+module_init(qedr_init_module);
+module_exit(qedr_exit_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
new file mode 100644
index 000000000000..db9ef3e1eb97
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -0,0 +1,642 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_H__
+#define __QEDR_H__
+
+#include <linux/pci.h>
+#include <linux/xarray.h>
+#include <rdma/ib_addr.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_rdma_if.h>
+#include <linux/qed/qede_rdma.h>
+#include <linux/qed/roce_common.h>
+#include <linux/completion.h>
+#include "qedr_hsi_rdma.h"
+
+#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+#define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev)
+#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
+#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
+
+#define DP_DEBUG(dev, module, fmt, ...) \
+ pr_debug("(%s) " module ": " fmt, \
+ DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
+
+#define QEDR_MSG_INIT "INIT"
+#define QEDR_MSG_MISC "MISC"
+#define QEDR_MSG_CQ " CQ"
+#define QEDR_MSG_MR " MR"
+#define QEDR_MSG_RQ " RQ"
+#define QEDR_MSG_SQ " SQ"
+#define QEDR_MSG_QP " QP"
+#define QEDR_MSG_SRQ " SRQ"
+#define QEDR_MSG_GSI " GSI"
+#define QEDR_MSG_IWARP " IW"
+
+#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+
+#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
+#define FW_PAGE_SHIFT (12)
+
+struct qedr_dev;
+
+struct qedr_cnq {
+ struct qedr_dev *dev;
+ struct qed_chain pbl;
+ struct qed_sb_info *sb;
+ char name[32];
+ u64 n_comp;
+ __le16 *hw_cons_ptr;
+ u8 index;
+};
+
+#define QEDR_MAX_SGID 128
+
+struct qedr_device_attr {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+ u64 node_guid;
+ u64 sys_image_guid;
+ u8 max_cnq;
+ u8 max_sge;
+ u16 max_inline;
+ u32 max_sqe;
+ u32 max_rqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u8 max_srq_sge;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+#define QEDR_ENET_STATE_BIT (0)
+
+struct qedr_dev {
+ struct ib_device ibdev;
+ struct qed_dev *cdev;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+
+ enum ib_atomic_cap atomic_cap;
+
+ void *rdma_ctx;
+ struct qedr_device_attr attr;
+
+ const struct qed_rdma_ops *ops;
+ struct qed_int_info int_info;
+
+ struct qed_sb_info *sb_array;
+ struct qedr_cnq *cnq_array;
+ int num_cnq;
+ int sb_start;
+
+ void __iomem *db_addr;
+ u64 db_phys_addr;
+ u32 db_size;
+ u16 dpi;
+
+ union ib_gid *sgid_tbl;
+
+ /* Lock for sgid table */
+ spinlock_t sgid_lock;
+
+ u64 guid;
+
+ u32 dp_module;
+ u8 dp_level;
+ u8 num_hwfns;
+#define QEDR_IS_CMT(dev) ((dev)->num_hwfns > 1)
+ u8 affin_hwfn_idx;
+ u8 gsi_ll2_handle;
+
+ uint wq_multiplier;
+ u8 gsi_ll2_mac_address[ETH_ALEN];
+ int gsi_qp_created;
+ struct qedr_cq *gsi_sqcq;
+ struct qedr_cq *gsi_rqcq;
+ struct qedr_qp *gsi_qp;
+ enum qed_rdma_type rdma_type;
+ struct xarray qps;
+ struct xarray srqs;
+ struct workqueue_struct *iwarp_wq;
+ u16 iwarp_max_mtu;
+
+ unsigned long enet_state;
+
+ u8 user_dpm_enabled;
+};
+
+#define QEDR_MAX_SQ_PBL (0x8000)
+#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
+#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_SQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
+/* RQ */
+#define QEDR_MAX_RQ_PBL (0x2000)
+#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
+#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_RQE_ELEMENT_SIZE)
+#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_RQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
+
+#define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
+#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
+#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
+ sizeof(u64)) - 1)
+#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
+ (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
+
+#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
+
+#define QEDR_MAX_PORT (1)
+#define QEDR_PORT (1)
+
+#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define QEDR_ROCE_PKEY_MAX 1
+#define QEDR_ROCE_PKEY_TABLE_LEN 1
+#define QEDR_ROCE_PKEY_DEFAULT 0xffff
+
+struct qedr_pbl {
+ struct list_head list_entry;
+ void *va;
+ dma_addr_t pa;
+};
+
+struct qedr_ucontext {
+ struct ib_ucontext ibucontext;
+ struct qedr_dev *dev;
+ struct qedr_pd *pd;
+ void __iomem *dpi_addr;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+ u16 dpi;
+ bool db_rec;
+ u8 edpm_mode;
+};
+
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
+};
+
+union db_prod64 {
+ struct rdma_pwm_val32_data data;
+ u64 raw;
+};
+
+enum qedr_cq_type {
+ QEDR_CQ_TYPE_GSI,
+ QEDR_CQ_TYPE_KERNEL,
+ QEDR_CQ_TYPE_USER,
+};
+
+struct qedr_pbl_info {
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ bool two_layered;
+};
+
+struct qedr_userq {
+ struct ib_umem *umem;
+ struct qedr_pbl_info pbl_info;
+ struct qedr_pbl *pbl_tbl;
+ u64 buf_addr;
+ size_t buf_len;
+
+ /* doorbell recovery */
+ void __iomem *db_addr;
+ struct qedr_user_db_rec *db_rec_data;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ void __iomem *db_rec_db2_addr;
+ union db_prod32 db_rec_db2_data;
+};
+
+struct qedr_cq {
+ struct ib_cq ibcq;
+
+ enum qedr_cq_type cq_type;
+ u32 sig;
+
+ u16 icid;
+
+ /* Lock to protect multiplem CQ's */
+ spinlock_t cq_lock;
+ u8 arm_flags;
+ struct qed_chain pbl;
+
+ void __iomem *db_addr;
+ union db_prod64 db;
+
+ u8 pbl_toggle;
+ union rdma_cqe *latest_cqe;
+ union rdma_cqe *toggle_cqe;
+
+ u32 cq_cons;
+
+ struct qedr_userq q;
+ u8 destroyed;
+ u16 cnq_notif;
+};
+
+struct qedr_pd {
+ struct ib_pd ibpd;
+ u32 pd_id;
+ struct qedr_ucontext *uctx;
+};
+
+struct qedr_xrcd {
+ struct ib_xrcd ibxrcd;
+ u16 xrcd_id;
+};
+
+struct qedr_qp_hwq_info {
+ /* WQE Elements */
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 max_sges;
+
+ /* WQE */
+ u16 prod;
+ u16 cons;
+ u16 wqe_cons;
+ u16 gsi_cons;
+ u16 max_wr;
+
+ /* DB */
+ void __iomem *db;
+ union db_prod32 db_data;
+
+ void __iomem *iwarp_db2;
+ union db_prod32 iwarp_db2_data;
+};
+
+#define QEDR_INC_SW_IDX(p_info, index) \
+ do { \
+ p_info->index = (p_info->index + 1) & \
+ qed_chain_get_capacity(p_info->pbl) \
+ } while (0)
+
+struct qedr_srq_hwq_info {
+ u32 max_sges;
+ u32 max_wr;
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 wqe_prod;
+ u32 sge_prod;
+ u32 wr_prod_cnt;
+ atomic_t wr_cons_cnt;
+ u32 num_elems;
+
+ struct rdma_srq_producers *virt_prod_pair_addr;
+ dma_addr_t phy_prod_pair_addr;
+};
+
+struct qedr_srq {
+ struct ib_srq ibsrq;
+ struct qedr_dev *dev;
+
+ struct qedr_userq usrq;
+ struct qedr_srq_hwq_info hw_srq;
+ struct ib_umem *prod_umem;
+ u16 srq_id;
+ u32 srq_limit;
+ bool is_xrc;
+ /* lock to protect srq recv post */
+ spinlock_t lock;
+};
+
+enum qedr_qp_err_bitmap {
+ QEDR_QP_ERR_SQ_FULL = 1,
+ QEDR_QP_ERR_RQ_FULL = 2,
+ QEDR_QP_ERR_BAD_SR = 4,
+ QEDR_QP_ERR_BAD_RR = 8,
+ QEDR_QP_ERR_SQ_PBL_FULL = 16,
+ QEDR_QP_ERR_RQ_PBL_FULL = 32,
+};
+
+enum qedr_qp_create_type {
+ QEDR_QP_CREATE_NONE,
+ QEDR_QP_CREATE_USER,
+ QEDR_QP_CREATE_KERNEL,
+};
+
+enum qedr_iwarp_cm_flags {
+ QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0),
+ QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
+};
+
+struct qedr_qp {
+ struct ib_qp ibqp; /* must be first */
+ struct qedr_dev *dev;
+ struct qedr_qp_hwq_info sq;
+ struct qedr_qp_hwq_info rq;
+
+ u32 max_inline_data;
+
+ /* Lock for QP's */
+ spinlock_t q_lock;
+ struct qedr_cq *sq_cq;
+ struct qedr_cq *rq_cq;
+ struct qedr_srq *srq;
+ enum qed_roce_qp_state state;
+ u32 id;
+ struct qedr_pd *pd;
+ enum ib_qp_type qp_type;
+ enum qedr_qp_create_type create_type;
+ struct qed_rdma_qp *qed_qp;
+ u32 qp_id;
+ u16 icid;
+ u16 mtu;
+ int sgid_idx;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 qkey;
+ u32 dest_qp_num;
+ u8 timeout;
+
+ /* Relevant to qps created from kernel space only (ULPs) */
+ u8 prev_wqe_size;
+ u16 wqe_cons;
+ u32 err_bitmap;
+ bool signaled;
+
+ /* SQ shadow */
+ struct {
+ u64 wr_id;
+ enum ib_wc_opcode opcode;
+ u32 bytes_len;
+ u8 wqe_size;
+ bool signaled;
+ dma_addr_t icrc_mapping;
+ u32 *icrc;
+ struct qedr_mr *mr;
+ } *wqe_wr_id;
+
+ /* RQ shadow */
+ struct {
+ u64 wr_id;
+ struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
+ u8 wqe_size;
+
+ u8 smac[ETH_ALEN];
+ u16 vlan;
+ int rc;
+ } *rqe_wr_id;
+
+ /* Relevant to qps created from user space only (applications) */
+ struct qedr_userq usq;
+ struct qedr_userq urq;
+
+ /* synchronization objects used with iwarp ep */
+ struct kref refcnt;
+ struct completion iwarp_cm_comp;
+ struct completion qp_rel_comp;
+ unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
+};
+
+struct qedr_ah {
+ struct ib_ah ibah;
+ struct rdma_ah_attr attr;
+};
+
+enum qedr_mr_type {
+ QEDR_MR_USER,
+ QEDR_MR_KERNEL,
+ QEDR_MR_DMA,
+ QEDR_MR_FRMR,
+};
+
+struct mr_info {
+ struct qedr_pbl *pbl_table;
+ struct qedr_pbl_info pbl_info;
+ struct list_head free_pbl_list;
+ struct list_head inuse_pbl_list;
+ u32 completed;
+ u32 completed_handled;
+};
+
+struct qedr_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+
+ struct qed_rdma_register_tid_in_params hw_mr;
+ enum qedr_mr_type type;
+
+ struct qedr_dev *dev;
+ struct mr_info info;
+
+ u64 *pages;
+ u32 npages;
+};
+
+struct qedr_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ struct qedr_dev *dev;
+ union {
+ u64 io_address;
+ void *address;
+ };
+ size_t length;
+ u16 dpi;
+ u8 mmap_flag;
+};
+
+#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
+
+#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
+ RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
+#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
+ RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
+#define QEDR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
+ RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
+
+static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
+{
+ info->cons = (info->cons + 1) % info->max_wr;
+ info->wqe_cons++;
+}
+
+static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
+{
+ info->prod = (info->prod + 1) % info->max_wr;
+}
+
+static inline int qedr_get_dmac(struct qedr_dev *dev,
+ struct rdma_ah_attr *ah_attr, u8 *mac_addr)
+{
+ union ib_gid zero_sgid = { { 0 } };
+ struct in6_addr in6;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ u8 *dmac;
+
+ if (!memcmp(&grh->dgid, &zero_sgid, sizeof(union ib_gid))) {
+ DP_ERR(dev, "Local port GID not supported\n");
+ eth_zero_addr(mac_addr);
+ return -EINVAL;
+ }
+
+ memcpy(&in6, grh->dgid.raw, sizeof(in6));
+ dmac = rdma_ah_retrieve_dmac(ah_attr);
+ if (!dmac)
+ return -EINVAL;
+ ether_addr_copy(mac_addr, dmac);
+
+ return 0;
+}
+
+struct qedr_iw_listener {
+ struct qedr_dev *dev;
+ struct iw_cm_id *cm_id;
+ int backlog;
+ void *qed_handle;
+};
+
+struct qedr_iw_ep {
+ struct qedr_dev *dev;
+ struct iw_cm_id *cm_id;
+ struct qedr_qp *qp;
+ void *qed_context;
+ struct kref refcnt;
+};
+
+static inline
+struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct qedr_ucontext, ibucontext);
+}
+
+static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qedr_dev, ibdev);
+}
+
+static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qedr_pd, ibpd);
+}
+
+static inline struct qedr_xrcd *get_qedr_xrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct qedr_xrcd, ibxrcd);
+}
+
+static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qedr_cq, ibcq);
+}
+
+static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qedr_qp, ibqp);
+}
+
+static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qedr_ah, ibah);
+}
+
+static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qedr_mr, ibmr);
+}
+
+static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct qedr_srq, ibsrq);
+}
+
+static inline bool qedr_qp_has_srq(struct qedr_qp *qp)
+{
+ return qp->srq;
+}
+
+static inline bool qedr_qp_has_sq(struct qedr_qp *qp)
+{
+ if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT)
+ return false;
+
+ return true;
+}
+
+static inline bool qedr_qp_has_rq(struct qedr_qp *qp)
+{
+ if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp))
+ return false;
+
+ return true;
+}
+
+static inline struct qedr_user_mmap_entry *
+get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct qedr_user_mmap_entry,
+ rdma_entry);
+}
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
new file mode 100644
index 000000000000..228dd7d49622
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -0,0 +1,751 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_RDMA__
+#define __QED_HSI_RDMA__
+
+#include <linux/qed/rdma_common.h>
+
+/* rdma completion notification queue element */
+struct rdma_cnqe {
+ struct regpair cq_handle;
+};
+
+struct rdma_cqe_responder {
+ struct regpair srq_wr_id;
+ struct regpair qp_handle;
+ __le32 imm_data_or_inv_r_Key;
+ __le32 length;
+ __le32 imm_data_hi;
+ __le16 rq_cons_or_srq_id;
+ u8 flags;
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
+#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
+#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
+#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
+#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
+#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
+#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
+ u8 status;
+};
+
+struct rdma_cqe_requester {
+ __le16 sq_cons;
+ __le16 reserved0;
+ __le32 reserved1;
+ struct regpair qp_handle;
+ struct regpair reserved2;
+ __le32 reserved3;
+ __le16 reserved4;
+ u8 flags;
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
+#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
+#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
+#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
+ u8 status;
+};
+
+struct rdma_cqe_common {
+ struct regpair reserved0;
+ struct regpair qp_handle;
+ __le16 reserved1[7];
+ u8 flags;
+#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_COMMON_TYPE_MASK 0x3
+#define RDMA_CQE_COMMON_TYPE_SHIFT 1
+#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
+#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
+ u8 status;
+};
+
+/* rdma completion queue element */
+union rdma_cqe {
+ struct rdma_cqe_responder resp;
+ struct rdma_cqe_requester req;
+ struct rdma_cqe_common cmn;
+};
+
+/* * CQE requester status enumeration */
+enum rdma_cqe_requester_status_enum {
+ RDMA_CQE_REQ_STS_OK,
+ RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+ RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
+ RDMA_CQE_REQ_STS_SIG_ERR,
+ MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+};
+
+/* CQE responder status enumeration */
+enum rdma_cqe_responder_status_enum {
+ RDMA_CQE_RESP_STS_OK,
+ RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
+};
+
+/* CQE type enumeration */
+enum rdma_cqe_type {
+ RDMA_CQE_TYPE_REQUESTER,
+ RDMA_CQE_TYPE_RESPONDER_RQ,
+ RDMA_CQE_TYPE_RESPONDER_SRQ,
+ RDMA_CQE_TYPE_RESPONDER_XRC_SRQ,
+ RDMA_CQE_TYPE_INVALID,
+ MAX_RDMA_CQE_TYPE
+};
+
+struct rdma_sq_sge {
+ __le32 length;
+ struct regpair addr;
+ __le32 l_key;
+};
+
+struct rdma_rq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 flags;
+#define RDMA_RQ_SGE_L_KEY_LO_MASK 0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_LO_SHIFT 0
+#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
+#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
+#define RDMA_RQ_SGE_L_KEY_HI_MASK 0x7
+#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29
+};
+
+struct rdma_srq_wqe_header {
+ struct regpair wr_id;
+ u8 num_sges /* number of SGEs in WQE */;
+ u8 reserved2[7];
+};
+
+struct rdma_srq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 l_key;
+};
+
+union rdma_srq_elm {
+ struct rdma_srq_wqe_header header;
+ struct rdma_srq_sge sge;
+};
+
+/* Rdma doorbell data for flags update */
+struct rdma_pwm_flags_data {
+ __le16 icid; /* internal CID */
+ u8 agg_flags; /* aggregative flags */
+ u8 reserved;
+};
+
+/* Rdma doorbell data for SQ and RQ */
+struct rdma_pwm_val16_data {
+ __le16 icid;
+ __le16 value;
+};
+
+union rdma_pwm_val16_data_union {
+ struct rdma_pwm_val16_data as_struct;
+ __le32 as_dword;
+};
+
+/* Rdma doorbell data for CQ */
+struct rdma_pwm_val32_data {
+ __le16 icid;
+ u8 agg_flags;
+ u8 params;
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
+ __le32 value;
+};
+
+/* DIF Block size options */
+enum rdma_dif_block_size {
+ RDMA_DIF_BLOCK_512 = 0,
+ RDMA_DIF_BLOCK_4096 = 1,
+ MAX_RDMA_DIF_BLOCK_SIZE
+};
+
+/* DIF CRC initial value */
+enum rdma_dif_crc_seed {
+ RDMA_DIF_CRC_SEED_0000 = 0,
+ RDMA_DIF_CRC_SEED_FFFF = 1,
+ MAX_RDMA_DIF_CRC_SEED
+};
+
+/* RDMA DIF Error Result Structure */
+struct rdma_dif_error_result {
+ __le32 error_intervals;
+ __le32 dif_error_1st_interval;
+ u8 flags;
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
+ u8 reserved1[55];
+};
+
+/* DIF IO direction */
+enum rdma_dif_io_direction_flg {
+ RDMA_DIF_DIR_RX = 0,
+ RDMA_DIF_DIR_TX = 1,
+ MAX_RDMA_DIF_IO_DIRECTION_FLG
+};
+
+struct rdma_dif_params {
+ __le32 base_ref_tag;
+ __le16 app_tag;
+ __le16 app_tag_mask;
+ __le16 runt_crc_value;
+ __le16 flags;
+#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK 0x1
+#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT 1
+#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_DIF_PARAMS_CRC_SEED_MASK 0x1
+#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT 6
+#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK 0x1
+#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT 7
+#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK 0x1
+#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT 8
+#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK 0x1
+#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT 9
+#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK 0x1
+#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT 10
+#define RDMA_DIF_PARAMS_RESERVED4_MASK 0x1F
+#define RDMA_DIF_PARAMS_RESERVED4_SHIFT 11
+ __le32 reserved5;
+};
+
+
+struct rdma_sq_atomic_wqe {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+/* First element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_1st {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+};
+
+/* Third element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_3rd {
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+struct rdma_sq_bind_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x7F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 1
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+ struct rdma_dif_params dif_params;
+};
+
+/* First element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_2nd {
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x7F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 1
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* Third element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_3rd {
+ struct rdma_dif_params dif_params;
+};
+
+/* Structure with only the SQ WQE common
+ * fields. Size is of one SQ element (16B)
+ */
+struct rdma_sq_common_wqe {
+ __le32 reserved1[3];
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_fmr_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+};
+
+/* First element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_2nd {
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+};
+
+
+struct rdma_sq_local_inv_wqe {
+ struct regpair reserved;
+ __le32 inv_l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_rdma_wqe {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 7
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RESERVED2_MASK 0x7F
+#define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT 1
+ u8 reserved3[3];
+};
+
+/* First element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_1st {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 7
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* SQ WQE req type enumeration */
+enum rdma_sq_req_type {
+ RDMA_SQ_REQ_TYPE_SEND,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_RDMA_WR,
+ RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_RDMA_RD,
+ RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
+ RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
+ RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_FAST_MR,
+ RDMA_SQ_REQ_TYPE_BIND,
+ RDMA_SQ_REQ_TYPE_INVALID,
+ MAX_RDMA_SQ_REQ_TYPE
+};
+
+struct rdma_sq_send_wqe {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ __le32 reserved1[4];
+};
+
+struct rdma_sq_send_wqe_1st {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_send_wqe_2st {
+ __le32 reserved1[4];
+};
+
+#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
new file mode 100644
index 000000000000..259303b9907c
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -0,0 +1,819 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/addrconf.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+#include <net/flow.h>
+#include "qedr.h"
+#include "qedr_iw_cm.h"
+
+static inline void
+qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_info->local_port);
+ raddr->sin_port = htons(cm_info->remote_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]);
+ raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]);
+}
+
+static inline void
+qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 =
+ (struct sockaddr_in6 *)&event->remote_addr;
+ int i;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_info->local_port);
+ raddr6->sin6_port = htons(cm_info->remote_port);
+
+ for (i = 0; i < 4; i++) {
+ laddr6->sin6_addr.in6_u.u6_addr32[i] =
+ htonl(cm_info->local_ip[i]);
+ raddr6->sin6_addr.in6_u.u6_addr32[i] =
+ htonl(cm_info->remote_ip[i]);
+ }
+}
+
+static void qedr_iw_free_qp(struct kref *ref)
+{
+ struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
+
+ complete(&qp->qp_rel_comp);
+}
+
+static void
+qedr_iw_free_ep(struct kref *ref)
+{
+ struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
+
+ if (ep->qp)
+ kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
+
+ if (ep->cm_id)
+ ep->cm_id->rem_ref(ep->cm_id);
+
+ kfree(ep);
+}
+
+static void
+qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context;
+ struct qedr_dev *dev = listener->dev;
+ struct iw_cm_event event;
+ struct qedr_iw_ep *ep;
+
+ ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+ if (!ep)
+ return;
+
+ ep->dev = dev;
+ ep->qed_context = params->ep_context;
+ kref_init(&ep->refcnt);
+
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ event.status = params->status;
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ params->cm_info->ip_version == QED_TCP_IPV4)
+ qedr_fill_sockaddr4(params->cm_info, &event);
+ else
+ qedr_fill_sockaddr6(params->cm_info, &event);
+
+ event.provider_data = (void *)ep;
+ event.private_data = (void *)params->cm_info->private_data;
+ event.private_data_len = (u8)params->cm_info->private_data_len;
+ event.ord = params->cm_info->ord;
+ event.ird = params->cm_info->ird;
+
+ listener->cm_id->event_handler(listener->cm_id, &event);
+}
+
+static void
+qedr_iw_issue_event(void *context,
+ struct qed_iwarp_cm_event_params *params,
+ enum iw_cm_event_type event_type)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct iw_cm_event event;
+
+ memset(&event, 0, sizeof(event));
+ event.status = params->status;
+ event.event = event_type;
+
+ if (params->cm_info) {
+ event.ird = params->cm_info->ird;
+ event.ord = params->cm_info->ord;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
+ }
+
+ if (ep->cm_id)
+ ep->cm_id->event_handler(ep->cm_id, &event);
+}
+
+static void
+qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ if (ep->cm_id)
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
+
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+}
+
+static void
+qedr_iw_qp_event(void *context,
+ struct qed_iwarp_cm_event_params *params,
+ enum ib_event_type ib_event, char *str)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct ib_qp *ibqp = &ep->qp->ibqp;
+ struct ib_event event;
+
+ DP_NOTICE(dev, "QP error received: %s\n", str);
+
+ if (ibqp->event_handler) {
+ event.event = ib_event;
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+struct qedr_discon_work {
+ struct work_struct work;
+ struct qedr_iw_ep *ep;
+ enum qed_iwarp_event_type event;
+ int status;
+};
+
+static void qedr_iw_disconnect_worker(struct work_struct *work)
+{
+ struct qedr_discon_work *dwork =
+ container_of(work, struct qedr_discon_work, work);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_iw_ep *ep = dwork->ep;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp = ep->qp;
+ struct iw_cm_event event;
+
+ /* The qp won't be released until we release the ep.
+ * the ep's refcnt was increased before calling this
+ * function, therefore it is safe to access qp
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ goto out;
+
+ memset(&event, 0, sizeof(event));
+ event.status = dwork->status;
+ event.event = IW_CM_EVENT_DISCONNECT;
+
+ /* Success means graceful disconnect was requested. modifying
+ * to SQD is translated to graceful disconnect. O/w reset is sent
+ */
+ if (dwork->status)
+ qp_params.new_state = QED_ROCE_QP_STATE_ERR;
+ else
+ qp_params.new_state = QED_ROCE_QP_STATE_SQD;
+
+
+ if (ep->cm_id)
+ ep->cm_id->event_handler(ep->cm_id, &event);
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+
+ dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
+
+ complete(&ep->qp->iwarp_cm_comp);
+out:
+ kfree(dwork);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+}
+
+static void
+qedr_iw_disconnect_event(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_discon_work *work;
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ /* We can't get a close event before disconnect, but since
+ * we're scheduling a work queue we need to make sure close
+ * won't delete the ep, so we increase the refcnt
+ */
+ kref_get(&ep->refcnt);
+
+ work->ep = ep;
+ work->event = params->event;
+ work->status = params->status;
+
+ INIT_WORK(&work->work, qedr_iw_disconnect_worker);
+ queue_work(dev->iwarp_wq, &work->work);
+}
+
+static void
+qedr_iw_passive_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+
+ /* We will only reach the following state if MPA_REJECT was called on
+ * passive. In this case there will be no associated QP.
+ */
+ if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "PASSIVE connection refused releasing ep...\n");
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+ return;
+ }
+
+ complete(&ep->qp->iwarp_cm_comp);
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
+
+ if (params->status < 0)
+ qedr_iw_close_event(context, params);
+}
+
+static void
+qedr_iw_active_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ complete(&ep->qp->iwarp_cm_comp);
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
+
+ if (params->status < 0)
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+}
+
+static int
+qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct qed_iwarp_send_rtr_in rtr_in;
+
+ rtr_in.ep_context = params->ep_context;
+
+ return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
+}
+
+static int
+qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+
+ switch (params->event) {
+ case QED_IWARP_EVENT_MPA_REQUEST:
+ qedr_iw_mpa_request(context, params);
+ break;
+ case QED_IWARP_EVENT_ACTIVE_MPA_REPLY:
+ qedr_iw_mpa_reply(context, params);
+ break;
+ case QED_IWARP_EVENT_PASSIVE_COMPLETE:
+ qedr_iw_passive_complete(context, params);
+ break;
+ case QED_IWARP_EVENT_ACTIVE_COMPLETE:
+ qedr_iw_active_complete(context, params);
+ break;
+ case QED_IWARP_EVENT_DISCONNECT:
+ qedr_iw_disconnect_event(context, params);
+ break;
+ case QED_IWARP_EVENT_CLOSE:
+ qedr_iw_close_event(context, params);
+ break;
+ case QED_IWARP_EVENT_RQ_EMPTY:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_RQ_EMPTY");
+ break;
+ case QED_IWARP_EVENT_IRQ_FULL:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_IRQ_FULL");
+ break;
+ case QED_IWARP_EVENT_LLP_TIMEOUT:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_LLP_TIMEOUT");
+ break;
+ case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+ "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR");
+ break;
+ case QED_IWARP_EVENT_CQ_OVERFLOW:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_CQ_OVERFLOW");
+ break;
+ case QED_IWARP_EVENT_QP_CATASTROPHIC:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_QP_CATASTROPHIC");
+ break;
+ case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+ "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR");
+ break;
+ case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR");
+ break;
+ case QED_IWARP_EVENT_TERMINATE_RECEIVED:
+ DP_NOTICE(dev, "Got terminate message\n");
+ break;
+ default:
+ DP_NOTICE(dev, "Unknown event received %d\n", params->event);
+ break;
+ }
+ return 0;
+}
+
+static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr)
+{
+ struct net_device *ndev;
+ u16 vlan_id = 0;
+
+ ndev = ip_dev_find(&init_net, htonl(addr[0]));
+
+ if (ndev) {
+ vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ dev_put(ndev);
+ }
+ if (vlan_id == 0xffff)
+ vlan_id = 0;
+ return vlan_id;
+}
+
+static u16 qedr_iw_get_vlan_ipv6(u32 *addr)
+{
+ struct net_device *ndev = NULL;
+ struct in6_addr laddr6;
+ u16 vlan_id = 0;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return vlan_id;
+
+ for (i = 0; i < 4; i++)
+ laddr6.in6_u.u6_addr32[i] = htonl(addr[i]);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ndev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) {
+ vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ if (vlan_id == 0xffff)
+ vlan_id = 0;
+
+ return vlan_id;
+}
+
+static int
+qedr_addr4_resolve(struct qedr_dev *dev,
+ struct sockaddr_in *src_in,
+ struct sockaddr_in *dst_in, u8 *dst_mac)
+{
+ __be32 src_ip = src_in->sin_addr.s_addr;
+ __be32 dst_ip = dst_in->sin_addr.s_addr;
+ struct neighbour *neigh = NULL;
+ struct rtable *rt = NULL;
+ int rc = 0;
+
+ rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0,
+ RT_SCOPE_UNIVERSE);
+ if (IS_ERR(rt)) {
+ DP_ERR(dev, "ip_route_output returned error\n");
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
+
+ if (neigh) {
+ rcu_read_lock();
+ if (neigh->nud_state & NUD_VALID) {
+ ether_addr_copy(dst_mac, neigh->ha);
+ DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ rcu_read_unlock();
+ neigh_release(neigh);
+ }
+
+ ip_rt_put(rt);
+
+ return rc;
+}
+
+static int
+qedr_addr6_resolve(struct qedr_dev *dev,
+ struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in, u8 *dst_mac)
+{
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int rc = 0;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+
+ if ((!dst) || dst->error) {
+ if (dst) {
+ DP_ERR(dev,
+ "ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ dst_release(dst);
+ }
+ return -EINVAL;
+ }
+ neigh = dst_neigh_lookup(dst, &fl6.daddr);
+ if (neigh) {
+ rcu_read_lock();
+ if (neigh->nud_state & NUD_VALID) {
+ ether_addr_copy(dst_mac, neigh->ha);
+ DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ rcu_read_unlock();
+ neigh_release(neigh);
+ }
+
+ dst_release(dst);
+
+ return rc;
+}
+
+static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
+{
+ struct qedr_qp *qp;
+
+ xa_lock(&dev->qps);
+ qp = xa_load(&dev->qps, qpn);
+ if (qp)
+ kref_get(&qp->refcnt);
+ xa_unlock(&dev->qps);
+
+ return qp;
+}
+
+int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ struct qed_iwarp_connect_out out_params;
+ struct qed_iwarp_connect_in in_params;
+ struct qed_iwarp_cm_info *cm_info;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct qedr_iw_ep *ep;
+ struct qedr_qp *qp;
+ int rc = 0;
+ int i;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
+ ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
+ ntohs(raddr->sin_port));
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "Connect source address: %pISpc, remote address: %pISpc\n",
+ &cm_id->local_addr, &cm_id->remote_addr);
+
+ if (!laddr->sin_port || !raddr->sin_port)
+ return -EINVAL;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->dev = dev;
+ kref_init(&ep->refcnt);
+
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
+ if (!qp) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ ep->qp = qp;
+ cm_id->add_ref(cm_id);
+ ep->cm_id = cm_id;
+
+ in_params.event_cb = qedr_iw_event_handler;
+ in_params.cb_context = ep;
+
+ cm_info = &in_params.cm_info;
+ memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip));
+ memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip));
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ cm_id->remote_addr.ss_family == AF_INET) {
+ cm_info->ip_version = QED_TCP_IPV4;
+
+ cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info->remote_port = ntohs(raddr->sin_port);
+ cm_info->local_port = ntohs(laddr->sin_port);
+ cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip);
+
+ rc = qedr_addr4_resolve(dev, laddr, raddr,
+ (u8 *)in_params.remote_mac_addr);
+
+ in_params.mss = dev->iwarp_max_mtu -
+ (sizeof(struct iphdr) + sizeof(struct tcphdr));
+
+ } else {
+ in_params.cm_info.ip_version = QED_TCP_IPV6;
+
+ for (i = 0; i < 4; i++) {
+ cm_info->remote_ip[i] =
+ ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]);
+ cm_info->local_ip[i] =
+ ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+ }
+
+ cm_info->local_port = ntohs(laddr6->sin6_port);
+ cm_info->remote_port = ntohs(raddr6->sin6_port);
+
+ in_params.mss = dev->iwarp_max_mtu -
+ (sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
+
+ cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip);
+
+ rc = qedr_addr6_resolve(dev, laddr6, raddr6,
+ (u8 *)in_params.remote_mac_addr);
+ }
+ if (rc)
+ goto err;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n",
+ conn_param->ord, conn_param->ird, conn_param->private_data,
+ conn_param->private_data_len, qp->rq_psn);
+
+ cm_info->ord = conn_param->ord;
+ cm_info->ird = conn_param->ird;
+ cm_info->private_data = conn_param->private_data;
+ cm_info->private_data_len = conn_param->private_data_len;
+ in_params.qp = qp->qed_qp;
+ memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags)) {
+ rc = -ENODEV;
+ goto err; /* QP already being destroyed */
+ }
+
+ rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
+ goto err;
+ }
+
+ return rc;
+
+err:
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+ return rc;
+}
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ struct qedr_iw_listener *listener;
+ struct qed_iwarp_listen_in iparams;
+ struct qed_iwarp_listen_out oparams;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ int rc;
+ int i;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "Create Listener address: %pISpc\n", &cm_id->local_addr);
+
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return -ENOMEM;
+
+ listener->dev = dev;
+ cm_id->add_ref(cm_id);
+ listener->cm_id = cm_id;
+ listener->backlog = backlog;
+
+ iparams.cb_context = listener;
+ iparams.event_cb = qedr_iw_event_handler;
+ iparams.max_backlog = backlog;
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ cm_id->local_addr.ss_family == AF_INET) {
+ iparams.ip_version = QED_TCP_IPV4;
+ memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr));
+
+ iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ iparams.port = ntohs(laddr->sin_port);
+ iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr);
+ } else {
+ iparams.ip_version = QED_TCP_IPV6;
+
+ for (i = 0; i < 4; i++) {
+ iparams.ip_addr[i] =
+ ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+ }
+
+ iparams.port = ntohs(laddr6->sin6_port);
+
+ iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr);
+ }
+ rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
+ if (rc)
+ goto err;
+
+ listener->qed_handle = oparams.handle;
+ cm_id->provider_data = listener;
+ return rc;
+
+err:
+ cm_id->rem_ref(cm_id);
+ kfree(listener);
+ return rc;
+}
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct qedr_iw_listener *listener = cm_id->provider_data;
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ int rc = 0;
+
+ if (listener->qed_handle)
+ rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx,
+ listener->qed_handle);
+
+ cm_id->rem_ref(cm_id);
+ kfree(listener);
+ return rc;
+}
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp;
+ struct qed_iwarp_accept_in params;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
+
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
+ if (!qp) {
+ DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
+ return -EINVAL;
+ }
+
+ ep->qp = qp;
+ cm_id->add_ref(cm_id);
+ ep->cm_id = cm_id;
+
+ params.ep_context = ep->qed_context;
+ params.cb_context = ep;
+ params.qp = ep->qp->qed_qp;
+ params.private_data = conn_param->private_data;
+ params.private_data_len = conn_param->private_data_len;
+ params.ird = conn_param->ird;
+ params.ord = conn_param->ord;
+
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags)) {
+ rc = -EINVAL;
+ goto err; /* QP already destroyed */
+ }
+
+ rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
+ goto err;
+ }
+
+ return rc;
+
+err:
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+
+ return rc;
+}
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+ struct qedr_dev *dev = ep->dev;
+ struct qed_iwarp_reject_in params;
+
+ params.ep_context = ep->qed_context;
+ params.cb_context = ep;
+ params.private_data = pdata;
+ params.private_data_len = pdata_len;
+ ep->qp = NULL;
+
+ return dev->ops->iwarp_reject(dev->rdma_ctx, &params);
+}
+
+void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+ kref_get(&qp->refcnt);
+}
+
+void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+ kref_put(&qp->refcnt, qedr_iw_free_qp);
+}
+
+struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ return xa_load(&dev->qps, qpn);
+}
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.h b/drivers/infiniband/hw/qedr/qedr_iw_cm.h
index 7ae983a91b8b..08f4b1067e6c 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.h
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.h
@@ -1,9 +1,5 @@
-#ifndef _QIB_DEBUGFS_H
-#define _QIB_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -21,7 +17,7 @@
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
+ * disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
@@ -33,13 +29,21 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <rdma/iw_cm.h>
+
+int qedr_iw_connect(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
-struct qib_ibdev;
-void qib_dbg_ibdev_init(struct qib_ibdev *ibd);
-void qib_dbg_ibdev_exit(struct qib_ibdev *ibd);
-void qib_dbg_init(void);
-void qib_dbg_exit(void);
+void qedr_iw_qp_add_ref(struct ib_qp *qp);
-#endif
+void qedr_iw_qp_rem_ref(struct ib_qp *qp);
-#endif /* _QIB_DEBUGFS_H */
+struct ib_qp *qedr_iw_get_qp(struct ib_device *dev, int qpn);
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
new file mode 100644
index 000000000000..859f66a51bd2
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -0,0 +1,729 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_rdma_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_roce_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+ info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ dev->gsi_qp_created = 1;
+ dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+ dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+ dev->gsi_qp = qp;
+}
+
+static void qedr_ll2_complete_tx_packet(void *cxt, u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)cxt;
+ struct qed_roce_ll2_packet *pkt = cookie;
+ struct qedr_cq *cq = dev->gsi_sqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+ dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+ cq->ibcq.comp_handler ? "Yes" : "No");
+
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+ pkt->header.baddr);
+ kfree(pkt);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+ qedr_inc_sw_gsi_cons(&qp->sq);
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler)
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+}
+
+static void qedr_ll2_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)cxt;
+ struct qedr_cq *cq = dev->gsi_rqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
+ -EINVAL : 0;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
+ /* note: length stands for data length i.e. GRH is excluded */
+ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
+ data->length.data_length;
+ *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
+ ntohl(data->opaque_data_0);
+ *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
+ ntohs((u16)data->opaque_data_1);
+
+ qedr_inc_sw_gsi_cons(&qp->rq);
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler)
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+}
+
+static void qedr_ll2_release_rx_packet(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t rx_buf_addr,
+ bool b_last_packet)
+{
+ /* Do nothing... */
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qedr_cq *cq;
+
+ cq = get_qedr_cq(attrs->send_cq);
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+ cq = get_qedr_cq(attrs->recv_cq);
+ /* if a dedicated recv_cq was used, delete it too */
+ if (iparams.icid != cq->icid) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+ attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+ attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+ attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qedr_ll2_post_tx(struct qedr_dev *dev,
+ struct qed_roce_ll2_packet *pkt)
+{
+ enum qed_ll2_roce_flavor_type roce_flavor;
+ struct qed_ll2_tx_pkt_info ll2_tx_pkt;
+ int rc;
+ int i;
+
+ memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
+
+ roce_flavor = (pkt->roce_mode == ROCE_V1) ?
+ QED_LL2_ROCE : QED_LL2_RROCE;
+
+ if (pkt->roce_mode == ROCE_V2_IPV4)
+ ll2_tx_pkt.enable_ip_cksum = 1;
+
+ ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg;
+ ll2_tx_pkt.vlan = 0;
+ ll2_tx_pkt.tx_dest = pkt->tx_dest;
+ ll2_tx_pkt.qed_roce_flavor = roce_flavor;
+ ll2_tx_pkt.first_frag = pkt->header.baddr;
+ ll2_tx_pkt.first_frag_len = pkt->header.len;
+ ll2_tx_pkt.cookie = pkt;
+
+ /* tx header */
+ rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
+ dev->gsi_ll2_handle,
+ &ll2_tx_pkt, 1);
+ if (rc) {
+ /* TX failed while posting header - release resources */
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+ pkt->header.vaddr, pkt->header.baddr);
+ kfree(pkt);
+
+ DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+ return rc;
+ }
+
+ /* tx payload */
+ for (i = 0; i < pkt->n_seg; i++) {
+ rc = dev->ops->ll2_set_fragment_of_tx_packet(
+ dev->rdma_ctx,
+ dev->gsi_ll2_handle,
+ pkt->payload[i].baddr,
+ pkt->payload[i].len);
+
+ if (rc) {
+ /* if failed not much to do here, partial packet has
+ * been posted we can't free memory, will need to wait
+ * for completion
+ */
+ DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qedr_ll2_stop(struct qedr_dev *dev)
+{
+ int rc;
+
+ if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ /* remove LL2 MAC address filter */
+ rc = dev->ops->ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address, NULL);
+
+ rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
+ dev->gsi_ll2_handle);
+ if (rc)
+ DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
+
+ dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+ dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc;
+}
+
+static int qedr_ll2_start(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
+{
+ struct qed_ll2_acquire_data data;
+ struct qed_ll2_cbs cbs;
+ int rc;
+
+ /* configure and start LL2 */
+ cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
+ cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
+ cbs.rx_release_cb = qedr_ll2_release_rx_packet;
+ cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
+ cbs.cookie = dev;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_ROCE;
+ data.input.mtu = dev->ndev->mtu;
+ data.input.rx_num_desc = attrs->cap.max_recv_wr;
+ data.input.rx_drop_ttl0_flg = true;
+ data.input.rx_vlan_removal_en = false;
+ data.input.tx_num_desc = attrs->cap.max_send_wr;
+ data.input.tx_tc = 0;
+ data.input.tx_dest = QED_LL2_TX_DEST_NW;
+ data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
+ data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
+ data.input.gsi_enable = 1;
+ data.p_connection_handle = &dev->gsi_ll2_handle;
+ data.cbs = &cbs;
+
+ rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
+ if (rc) {
+ DP_ERR(dev,
+ "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+ rc);
+ return rc;
+ }
+
+ rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
+ dev->gsi_ll2_handle);
+ if (rc) {
+ DP_ERR(dev,
+ "ll2 start: failed to establish LL2 connection (rc=%d)\n",
+ rc);
+ goto err1;
+ }
+
+ rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
+ if (rc)
+ goto err2;
+
+ return 0;
+
+err2:
+ dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+err1:
+ dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+ return rc;
+}
+
+int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp)
+{
+ int rc;
+
+ rc = qedr_check_gsi_qp_attrs(dev, attrs);
+ if (rc)
+ return rc;
+
+ rc = qedr_ll2_start(dev, attrs, qp);
+ if (rc) {
+ DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+ return rc;
+ }
+
+ /* create QP */
+ qp->ibqp.qp_num = 1;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
+
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ goto err;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ goto err;
+
+ qedr_store_gsi_qp_cq(dev, qp, attrs);
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ /* the GSI CQ is handled by the driver so remove it from the FW */
+ qedr_destroy_gsi_cq(dev, attrs);
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+ return 0;
+
+err:
+ kfree(qp->rqe_wr_id);
+
+ rc = qedr_ll2_stop(dev);
+ if (rc)
+ DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+ return -ENOMEM;
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+ return qedr_ll2_stop(dev);
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE (100)
+#define QEDR_GSI_QPN (1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ const struct ib_send_wr *swr,
+ struct ib_ud_header *udh,
+ int *roce_mode)
+{
+ bool has_vlan = false, has_grh_ipv6 = true;
+ struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
+ int send_size = 0;
+ u16 vlan_id = 0;
+ u16 ether_type;
+ int rc;
+ int ip_ver = 0;
+
+ bool has_udp = false;
+ int i;
+
+ rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
+ if (rc)
+ return rc;
+
+ if (vlan_id < VLAN_CFI_MASK)
+ has_vlan = true;
+
+ send_size = 0;
+ for (i = 0; i < swr->num_sge; ++i)
+ send_size += swr->sg_list[i].length;
+
+ has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ if (!has_udp) {
+ /* RoCE v1 */
+ ether_type = ETH_P_IBOE;
+ *roce_mode = ROCE_V1;
+ } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
+ /* RoCE v2 IPv4 */
+ ip_ver = 4;
+ ether_type = ETH_P_IP;
+ has_grh_ipv6 = false;
+ *roce_mode = ROCE_V2_IPV4;
+ } else {
+ /* RoCE v2 IPv6 */
+ ip_ver = 6;
+ ether_type = ETH_P_IPV6;
+ *roce_mode = ROCE_V2_IPV6;
+ }
+
+ rc = ib_ud_header_init(send_size, false, true, has_vlan,
+ has_grh_ipv6, ip_ver, has_udp, 0, udh);
+ if (rc) {
+ DP_ERR(dev, "gsi post send: failed to init header\n");
+ return rc;
+ }
+
+ /* ENET + VLAN headers */
+ ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
+ ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+ if (has_vlan) {
+ udh->eth.type = htons(ETH_P_8021Q);
+ udh->vlan.tag = htons(vlan_id);
+ udh->vlan.type = htons(ether_type);
+ } else {
+ udh->eth.type = htons(ether_type);
+ }
+
+ /* BTH */
+ udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+ udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+ udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+ udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+ /* DETH */
+ udh->deth.qkey = htonl(0x80010000);
+ udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+ if (has_grh_ipv6) {
+ /* GRH / IPv6 header */
+ udh->grh.traffic_class = grh->traffic_class;
+ udh->grh.flow_label = grh->flow_label;
+ udh->grh.hop_limit = grh->hop_limit;
+ udh->grh.destination_gid = grh->dgid;
+ memcpy(&udh->grh.source_gid.raw, sgid_attr->gid.raw,
+ sizeof(udh->grh.source_gid.raw));
+ } else {
+ /* IPv4 header */
+ u32 ipv4_addr;
+
+ udh->ip4.protocol = IPPROTO_UDP;
+ udh->ip4.tos = htonl(grh->flow_label);
+ udh->ip4.frag_off = htons(IP_DF);
+ udh->ip4.ttl = grh->hop_limit;
+
+ ipv4_addr = qedr_get_ipv4_from_gid(sgid_attr->gid.raw);
+ udh->ip4.saddr = ipv4_addr;
+ ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
+ udh->ip4.daddr = ipv4_addr;
+ /* note: checksum is calculated by the device */
+ }
+
+ /* UDP */
+ if (has_udp) {
+ udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+ udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+ udh->udp.csum = 0;
+ /* UDP length is untouched hence is zero */
+ }
+ return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ const struct ib_send_wr *swr,
+ struct qed_roce_ll2_packet **p_packet)
+{
+ u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+ struct qed_roce_ll2_packet *packet;
+ struct pci_dev *pdev = dev->pdev;
+ int roce_mode, header_size;
+ struct ib_ud_header udh;
+ int i, rc;
+
+ *p_packet = NULL;
+
+ rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+ if (rc)
+ return rc;
+
+ header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+ &packet->header.baddr,
+ GFP_ATOMIC);
+ if (!packet->header.vaddr) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+ packet->tx_dest = QED_LL2_TX_DEST_LB;
+ else
+ packet->tx_dest = QED_LL2_TX_DEST_NW;
+
+ packet->roce_mode = roce_mode;
+ memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+ packet->header.len = header_size;
+ packet->n_seg = swr->num_sge;
+ for (i = 0; i < packet->n_seg; i++) {
+ packet->payload[i].baddr = swr->sg_list[i].addr;
+ packet->payload[i].len = swr->sg_list[i].length;
+ }
+
+ *p_packet = packet;
+
+ return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct qed_roce_ll2_packet *pkt = NULL;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int rc;
+
+ if (qp->state != QED_ROCE_QP_STATE_RTS) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+ DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+ wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (wr->opcode != IB_WR_SEND) {
+ DP_ERR(dev,
+ "gsi post send: failed due to unsupported opcode %d\n",
+ wr->opcode);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+ if (rc) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ goto err;
+ }
+
+ rc = qedr_ll2_post_tx(dev, pkt);
+
+ if (!rc) {
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+ qedr_inc_sw_prod(&qp->sq);
+ DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+ "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode,
+ wr->wr_id);
+ } else {
+ DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+ rc = -EAGAIN;
+ *bad_wr = wr;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (wr->next) {
+ DP_ERR(dev,
+ "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+ *bad_wr = wr->next;
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ *bad_wr = wr;
+ return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ unsigned long flags;
+ int rc = 0;
+
+ if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+ (qp->state != QED_ROCE_QP_STATE_RTS)) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ while (wr) {
+ if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+ wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+ goto err;
+ }
+
+ rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
+ dev->gsi_ll2_handle,
+ wr->sg_list[0].addr,
+ wr->sg_list[0].length,
+ NULL /* cookie */,
+ 1 /* notify_fw */);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+ sizeof(qp->rqe_wr_id[qp->rq.prod]));
+ qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return rc;
+err:
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+ u16 vlan_id;
+ int i = 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc[i].opcode = IB_WC_RECV;
+ wc[i].pkey_index = 0;
+ wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+ IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+ /* 0 - currently only one recv sg is supported */
+ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+ wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+ ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+ wc[i].wc_flags |= IB_WC_WITH_SMAC;
+
+ vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_VID_MASK;
+ if (vlan_id) {
+ wc[i].wc_flags |= IB_WC_WITH_VLAN;
+ wc[i].vlan_id = vlan_id;
+ wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
+
+ qedr_inc_sw_cons(&qp->rq);
+ i++;
+ }
+
+ while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc[i].opcode = IB_WC_SEND;
+ wc[i].status = IB_WC_SUCCESS;
+
+ qedr_inc_sw_cons(&qp->sq);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+ num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+ qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
index 10b8c444dd31..f3432f035ec6 100644
--- a/drivers/infiniband/hw/qib/qib_pio_copy.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (c) 2009 QLogic Corporation. All rights reserved.
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -17,7 +17,7 @@
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
+ * disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
@@ -29,36 +29,31 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
-#include "qib.h"
+#define QEDR_GSI_MAX_RECV_WR (4096)
+#define QEDR_GSI_MAX_SEND_WR (4096)
-/**
- * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
- * @to: destination, in MMIO space (must be 64-bit aligned)
- * @from: source (must be 64-bit aligned)
- * @count: number of 32-bit quantities to copy
- *
- * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
- * time. Order of access is not guaranteed, nor is a memory barrier
- * performed afterwards.
- */
-void qib_pio_copy(void __iomem *to, const void *from, size_t count)
-{
-#ifdef CONFIG_64BIT
- u64 __iomem *dst = to;
- const u64 *src = from;
- const u64 *end = src + (count >> 1);
+#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
- while (src < end)
- __raw_writeq(*src++, dst++);
- if (count & 1)
- __raw_writel(*(const u32 *)src, dst);
-#else
- u32 __iomem *dst = to;
- const u32 *src = from;
- const u32 *end = src + count;
+#define QEDR_ROCE_V2_UDP_SPORT (0000)
- while (src < end)
- __raw_writel(*src++, dst++);
-#endif
+static inline u32 qedr_get_ipv4_from_gid(const u8 *gid)
+{
+ return *(u32 *)(void *)&gid[12];
}
+
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
+#endif
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
new file mode 100644
index 000000000000..ab9bf0922979
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -0,0 +1,4499 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/iommu.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include <linux/qed/common_hsi.h>
+#include "qedr_hsi_rdma.h"
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_roce_cm.h"
+#include "qedr_iw_cm.h"
+
+#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
+#define RDMA_MAX_SGE_PER_SRQ (4)
+#define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
+enum {
+ QEDR_USER_MMAP_IO_WC = 0,
+ QEDR_USER_MMAP_PHYS_PAGE,
+};
+
+static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
+ size_t len)
+{
+ size_t min_len = min_t(size_t, len, udata->outlen);
+
+ return ib_copy_to_udata(udata, src, min_len);
+}
+
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
+ return 0;
+}
+
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
+ int index, union ib_gid *sgid)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ memset(sgid->raw, 0, sizeof(sgid->raw));
+ ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
+ sgid->global.interface_id, sgid->global.subnet_prefix);
+
+ return 0;
+}
+
+int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_device_attr *qattr = &dev->attr;
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+ srq_attr->srq_limit = srq->srq_limit;
+ srq_attr->max_wr = qattr->max_srq_wr;
+ srq_attr->max_sge = qattr->max_sge;
+
+ return 0;
+}
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev,
+ "qedr_query_device called with invalid params rdma_ctx=%p\n",
+ dev->rdma_ctx);
+ return -EINVAL;
+ }
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->fw_ver = qattr->fw_ver;
+ attr->sys_image_guid = qattr->sys_image_guid;
+ attr->max_mr_size = qattr->max_mr_size;
+ attr->page_size_cap = qattr->page_size_caps;
+ attr->vendor_id = qattr->vendor_id;
+ attr->vendor_part_id = qattr->vendor_part_id;
+ attr->hw_ver = qattr->hw_ver;
+ attr->max_qp = qattr->max_qp;
+ attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
+ attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+ IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+ attr->device_cap_flags |= IB_DEVICE_XRC;
+ attr->max_send_sge = qattr->max_sge;
+ attr->max_recv_sge = qattr->max_sge;
+ attr->max_sge_rd = qattr->max_sge;
+ attr->max_cq = qattr->max_cq;
+ attr->max_cqe = qattr->max_cqe;
+ attr->max_mr = qattr->max_mr;
+ attr->max_mw = qattr->max_mw;
+ attr->max_pd = qattr->max_pd;
+ attr->atomic_cap = dev->atomic_cap;
+ attr->max_qp_init_rd_atom =
+ 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
+ attr->max_qp_rd_atom =
+ min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
+ attr->max_qp_init_rd_atom);
+
+ attr->max_srq = qattr->max_srq;
+ attr->max_srq_sge = qattr->max_srq_sge;
+ attr->max_srq_wr = qattr->max_srq_wr;
+
+ attr->local_ca_ack_delay = qattr->dev_ack_delay;
+ attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
+ attr->max_pkeys = qattr->max_pkey;
+ attr->max_ah = qattr->max_ah;
+
+ return 0;
+}
+
+static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
+ u8 *ib_width)
+{
+ switch (speed) {
+ case 1000:
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+ *ib_speed = IB_SPEED_HDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 100000:
+ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+}
+
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct qedr_dev *dev;
+ struct qed_rdma_port *rdma_port;
+
+ dev = get_qedr_dev(ibdev);
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "rdma_ctx is NULL\n");
+ return -EINVAL;
+ }
+
+ rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
+
+ /* *attr being zeroed by the caller, avoid zeroing it here */
+ if (rdma_port->port_state == QED_RDMA_PORT_UP) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+ attr->max_mtu = IB_MTU_4096;
+ attr->lid = 0;
+ attr->lmc = 0;
+ attr->sm_lid = 0;
+ attr->sm_sl = 0;
+ attr->ip_gids = true;
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
+ attr->gid_tbl_len = 1;
+ } else {
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
+ attr->gid_tbl_len = QEDR_MAX_SGID;
+ attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ }
+ attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
+ attr->qkey_viol_cntr = 0;
+ get_link_speed_and_width(rdma_port->link_speed,
+ &attr->active_speed, &attr->active_width);
+ attr->max_msg_sz = rdma_port->max_msg_size;
+ attr->max_vl_num = 4;
+
+ return 0;
+}
+
+int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = uctx->device;
+ int rc;
+ struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
+ struct qedr_alloc_ucontext_resp uresp = {};
+ struct qedr_alloc_ucontext_req ureq = {};
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_add_user_out_params oparams;
+ struct qedr_user_mmap_entry *entry;
+
+ if (!udata)
+ return -EFAULT;
+
+ if (udata->inlen) {
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return -EFAULT;
+ }
+ ctx->edpm_mode = !!(ureq.context_flags &
+ QEDR_ALLOC_UCTX_EDPM_MODE);
+ ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
+ }
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
+ if (rc) {
+ DP_ERR(dev,
+ "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
+ rc);
+ return rc;
+ }
+
+ ctx->dpi = oparams.dpi;
+ ctx->dpi_addr = oparams.dpi_addr;
+ ctx->dpi_phys_addr = oparams.dpi_phys_addr;
+ ctx->dpi_size = oparams.dpi_size;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ entry->io_address = ctx->dpi_phys_addr;
+ entry->length = ctx->dpi_size;
+ entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
+ entry->dpi = ctx->dpi;
+ entry->dev = dev;
+ rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
+ ctx->dpi_size);
+ if (rc) {
+ kfree(entry);
+ goto err;
+ }
+ ctx->db_mmap_entry = &entry->rdma_entry;
+
+ if (!dev->user_dpm_enabled)
+ uresp.dpm_flags = 0;
+ else if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
+ else
+ uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
+ QEDR_DPM_TYPE_ROCE_LEGACY |
+ QEDR_DPM_TYPE_ROCE_EDPM_MODE;
+
+ if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
+ uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
+ uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
+ uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
+ uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
+ }
+
+ uresp.wids_enabled = 1;
+ uresp.wid_count = oparams.wid_count;
+ uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
+ uresp.db_size = ctx->dpi_size;
+ uresp.max_send_wr = dev->attr.max_sqe;
+ uresp.max_recv_wr = dev->attr.max_rqe;
+ uresp.max_srq_wr = dev->attr.max_srq_wr;
+ uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
+ uresp.max_cqes = QEDR_MAX_CQES;
+
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ goto err;
+
+ ctx->dev = dev;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
+ &ctx->ibucontext);
+ return 0;
+
+err:
+ if (!ctx->db_mmap_entry)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
+ else
+ rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
+
+ return rc;
+}
+
+void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
+ uctx);
+
+ rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
+}
+
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
+ struct qedr_dev *dev = entry->dev;
+
+ if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
+ free_page((unsigned long)entry->address);
+ else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
+
+ kfree(entry);
+}
+
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
+{
+ struct ib_device *dev = ucontext->device;
+ size_t length = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct qedr_user_mmap_entry *entry;
+ int rc = 0;
+ u64 pfn;
+
+ ibdev_dbg(dev,
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
+
+ rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+ entry = get_qedr_mmap_entry(rdma_entry);
+ ibdev_dbg(dev,
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->io_address, length, entry->mmap_flag);
+
+ switch (entry->mmap_flag) {
+ case QEDR_USER_MMAP_IO_WC:
+ pfn = entry->io_address >> PAGE_SHIFT;
+ rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case QEDR_USER_MMAP_PHYS_PAGE:
+ rc = vm_insert_page(vma, vma->vm_start,
+ virt_to_page(entry->address));
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ ibdev_dbg(dev,
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->io_address, length, entry->mmap_flag, rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
+}
+
+int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibpd->device;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ u16 pd_id;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
+ udata ? "User Lib" : "Kernel");
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "invalid RDMA context\n");
+ return -EINVAL;
+ }
+
+ rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ if (rc)
+ return rc;
+
+ pd->pd_id = pd_id;
+
+ if (udata) {
+ struct qedr_alloc_pd_uresp uresp = {
+ .pd_id = pd_id,
+ };
+ struct qedr_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct qedr_ucontext, ibucontext);
+
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc) {
+ DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+ return rc;
+ }
+
+ pd->uctx = context;
+ pd->uctx->pd = pd;
+ }
+
+ return 0;
+}
+
+int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+ return 0;
+}
+
+
+int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
+
+ return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
+}
+
+int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
+ u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
+
+ dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
+ return 0;
+}
+static void qedr_free_pbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i;
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ if (!pbl[i].va)
+ continue;
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl[i].va, pbl[i].pa);
+ }
+
+ kfree(pbl);
+}
+
+#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
+#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
+
+#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
+#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
+#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
+
+static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ gfp_t flags)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct qedr_pbl *pbl_table;
+ dma_addr_t *pbl_main_tbl;
+ dma_addr_t pa;
+ void *va;
+ int i;
+
+ pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
+ if (!pbl_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
+ flags);
+ if (!va)
+ goto err;
+
+ pbl_table[i].va = va;
+ pbl_table[i].pa = pa;
+ }
+
+ /* Two-Layer PBLs, if we have more than one pbl we need to initialize
+ * the first one with physical pointers to all of the rest
+ */
+ pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
+ for (i = 0; i < pbl_info->num_pbls - 1; i++)
+ pbl_main_tbl[i] = pbl_table[i + 1].pa;
+
+ return pbl_table;
+
+err:
+ for (i--; i >= 0; i--)
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl_table[i].va, pbl_table[i].pa);
+
+ qedr_free_pbl(dev, pbl_info, pbl_table);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ u32 num_pbes, int two_layer_capable)
+{
+ u32 pbl_capacity;
+ u32 pbl_size;
+ u32 num_pbls;
+
+ if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
+ if (num_pbes > MAX_PBES_TWO_LAYER) {
+ DP_ERR(dev, "prepare pbl table: too many pages %d\n",
+ num_pbes);
+ return -EINVAL;
+ }
+
+ /* calculate required pbl page size */
+ pbl_size = MIN_FW_PBL_PAGE_SIZE;
+ pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
+ NUM_PBES_ON_PAGE(pbl_size);
+
+ while (pbl_capacity < num_pbes) {
+ pbl_size *= 2;
+ pbl_capacity = pbl_size / sizeof(u64);
+ pbl_capacity = pbl_capacity * pbl_capacity;
+ }
+
+ num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
+ num_pbls++; /* One for the layer0 ( points to the pbls) */
+ pbl_info->two_layered = true;
+ } else {
+ /* One layered PBL */
+ num_pbls = 1;
+ pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
+ roundup_pow_of_two((num_pbes * sizeof(u64))));
+ pbl_info->two_layered = false;
+ }
+
+ pbl_info->num_pbls = num_pbls;
+ pbl_info->pbl_size = pbl_size;
+ pbl_info->num_pbes = num_pbes;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
+ pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
+
+ return 0;
+}
+
+static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
+ struct qedr_pbl *pbl,
+ struct qedr_pbl_info *pbl_info, u32 pg_shift)
+{
+ int pbe_cnt, total_num_pbes = 0;
+ struct qedr_pbl *pbl_tbl;
+ struct ib_block_iter biter;
+ struct regpair *pbe;
+
+ if (!pbl_info->num_pbes)
+ return;
+
+ /* If we have a two layered pbl, the first pbl points to the rest
+ * of the pbls and the first entry lays on the second pbl in the table
+ */
+ if (pbl_info->two_layered)
+ pbl_tbl = &pbl[1];
+ else
+ pbl_tbl = pbl;
+
+ pbe = (struct regpair *)pbl_tbl->va;
+ if (!pbe) {
+ DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
+ return;
+ }
+
+ pbe_cnt = 0;
+
+ rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
+ u64 pg_addr = rdma_block_iter_dma_address(&biter);
+
+ pbe->lo = cpu_to_le32(pg_addr);
+ pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
+
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
+
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes, move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+ }
+}
+
+static int qedr_db_recovery_add(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return 0;
+ }
+
+ return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
+ db_width, db_space);
+}
+
+static void qedr_db_recovery_del(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return;
+ }
+
+ /* Ignore return code as there is not much we can do about it. Error
+ * log will be printed inside.
+ */
+ dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
+}
+
+static int qedr_copy_cq_uresp(struct qedr_dev *dev,
+ struct qedr_cq *cq, struct ib_udata *udata,
+ u32 db_offset)
+{
+ struct qedr_create_cq_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_offset = db_offset;
+ uresp.icid = cq->icid;
+ if (cq->q.db_mmap_entry)
+ uresp.db_rec_addr =
+ rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
+
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
+
+ return rc;
+}
+
+static void consume_cqe(struct qedr_cq *cq)
+{
+ if (cq->latest_cqe == cq->toggle_cqe)
+ cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+
+ cq->latest_cqe = qed_chain_consume(&cq->pbl);
+}
+
+static inline int qedr_align_cq_entries(int entries)
+{
+ u64 size, aligned_size;
+
+ /* We allocate an extra entry that we don't report to the FW. */
+ size = (entries + 1) * QEDR_CQE_SIZE;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+
+ return aligned_size / QEDR_CQE_SIZE;
+}
+
+static int qedr_init_user_db_rec(struct ib_udata *udata,
+ struct qedr_dev *dev, struct qedr_userq *q,
+ bool requires_db_rec)
+{
+ struct qedr_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ struct qedr_user_mmap_entry *entry;
+ int rc;
+
+ /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
+ if (requires_db_rec == 0 || !uctx->db_rec)
+ return 0;
+
+ /* Allocate a page for doorbell recovery, add to mmap */
+ q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
+ if (!q->db_rec_data) {
+ DP_ERR(dev, "get_zeroed_page failed\n");
+ return -ENOMEM;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto err_free_db_data;
+
+ entry->address = q->db_rec_data;
+ entry->length = PAGE_SIZE;
+ entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
+ rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
+ &entry->rdma_entry,
+ PAGE_SIZE);
+ if (rc)
+ goto err_free_entry;
+
+ q->db_mmap_entry = &entry->rdma_entry;
+
+ return 0;
+
+err_free_entry:
+ kfree(entry);
+
+err_free_db_data:
+ free_page((unsigned long)q->db_rec_data);
+ q->db_rec_data = NULL;
+ return -ENOMEM;
+}
+
+static inline int qedr_init_user_queue(struct ib_udata *udata,
+ struct qedr_dev *dev,
+ struct qedr_userq *q, u64 buf_addr,
+ size_t buf_len, bool requires_db_rec,
+ int access,
+ int alloc_and_init)
+{
+ u32 fw_pages;
+ int rc;
+
+ q->buf_addr = buf_addr;
+ q->buf_len = buf_len;
+ q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
+ if (IS_ERR(q->umem)) {
+ DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
+ PTR_ERR(q->umem));
+ return PTR_ERR(q->umem);
+ }
+
+ fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
+ rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
+ if (rc)
+ goto err0;
+
+ if (alloc_and_init) {
+ q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+ if (IS_ERR(q->pbl_tbl)) {
+ rc = PTR_ERR(q->pbl_tbl);
+ goto err0;
+ }
+ qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
+ FW_PAGE_SHIFT);
+ } else {
+ q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
+ if (!q->pbl_tbl) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+ }
+
+ /* mmap the user address used to store doorbell data for recovery */
+ return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
+
+err0:
+ ib_umem_release(q->umem);
+ q->umem = NULL;
+
+ return rc;
+}
+
+static inline void qedr_init_cq_params(struct qedr_cq *cq,
+ struct qedr_ucontext *ctx,
+ struct qedr_dev *dev, int vector,
+ int chain_entries, int page_cnt,
+ u64 pbl_ptr,
+ struct qed_rdma_create_cq_in_params
+ *params)
+{
+ memset(params, 0, sizeof(*params));
+ params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
+ params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
+ params->cnq_id = vector;
+ params->cq_size = chain_entries - 1;
+ params->dpi = (ctx) ? ctx->dpi : dev->dpi;
+ params->pbl_num_pages = page_cnt;
+ params->pbl_ptr = pbl_ptr;
+ params->pbl_two_level = 0;
+}
+
+static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
+{
+ cq->db.data.agg_flags = flags;
+ cq->db.data.value = cpu_to_le32(cons);
+ writeq(cq->db.raw, cq->db_addr);
+}
+
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ unsigned long sflags;
+ struct qedr_dev *dev;
+
+ dev = get_qedr_dev(ibcq->device);
+
+ if (cq->destroyed) {
+ DP_ERR(dev,
+ "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
+ cq, cq->icid);
+ return -EINVAL;
+ }
+
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, sflags);
+
+ cq->arm_flags = 0;
+
+ if (flags & IB_CQ_SOLICITED)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
+
+ if (flags & IB_CQ_NEXT_COMP)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
+
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, sflags);
+
+ return 0;
+}
+
+int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
+ struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct qedr_ucontext, ibucontext);
+ struct qed_rdma_destroy_cq_out_params destroy_oparams;
+ struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qed_chain_init_params chain_params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = sizeof(union rdma_cqe),
+ };
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_create_cq_in_params params;
+ struct qedr_create_cq_ureq ureq = {};
+ int vector = attr->comp_vector;
+ int entries = attr->cqe;
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ int chain_entries;
+ u32 db_offset;
+ int page_cnt;
+ u64 pbl_ptr;
+ u16 icid;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "create_cq: called from %s. entries=%d, vector=%d\n",
+ udata ? "User Lib" : "Kernel", entries, vector);
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ if (entries > QEDR_MAX_CQES) {
+ DP_ERR(dev,
+ "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
+ entries, QEDR_MAX_CQES);
+ return -EINVAL;
+ }
+
+ chain_entries = qedr_align_cq_entries(entries);
+ chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ chain_params.num_elems = chain_entries;
+
+ /* calc db offset. user will add DPI base, kernel will add db addr */
+ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+
+ if (udata) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
+ DP_ERR(dev,
+ "create cq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ if (!ureq.len) {
+ DP_ERR(dev,
+ "create cq: cannot create a cq with 0 entries\n");
+ goto err0;
+ }
+
+ cq->cq_type = QEDR_CQ_TYPE_USER;
+
+ rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
+ ureq.len, true, IB_ACCESS_LOCAL_WRITE,
+ 1);
+ if (rc)
+ goto err0;
+
+ pbl_ptr = cq->q.pbl_tbl->pa;
+ page_cnt = cq->q.pbl_info.num_pbes;
+
+ cq->ibcq.cqe = chain_entries;
+ cq->q.db_addr = ctx->dpi_addr + db_offset;
+ } else {
+ cq->cq_type = QEDR_CQ_TYPE_KERNEL;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
+ &chain_params);
+ if (rc)
+ goto err0;
+
+ page_cnt = qed_chain_get_page_cnt(&cq->pbl);
+ pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ cq->ibcq.cqe = cq->pbl.capacity;
+ }
+
+ qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
+ pbl_ptr, &params);
+
+ rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
+ if (rc)
+ goto err1;
+
+ cq->icid = icid;
+ cq->sig = QEDR_CQ_MAGIC_NUMBER;
+ spin_lock_init(&cq->cq_lock);
+
+ if (udata) {
+ rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
+ if (rc)
+ goto err2;
+
+ rc = qedr_db_recovery_add(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data,
+ DB_REC_WIDTH_64B,
+ DB_REC_USER);
+ if (rc)
+ goto err2;
+
+ } else {
+ /* Generate doorbell address. */
+ cq->db.data.icid = cq->icid;
+ cq->db_addr = dev->db_addr + db_offset;
+ cq->db.data.params = DB_AGG_CMD_MAX <<
+ RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+
+ /* point to the very last element, passing it we will toggle */
+ cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
+ cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+ cq->latest_cqe = NULL;
+ consume_cqe(cq);
+ cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ if (rc)
+ goto err2;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
+ cq->icid, cq, params.cq_size);
+
+ return 0;
+
+err2:
+ destroy_iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
+ &destroy_oparams);
+err1:
+ if (udata) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ ib_umem_release(cq->q.umem);
+ if (cq->q.db_mmap_entry)
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ } else {
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+err0:
+ return -EINVAL;
+}
+
+#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
+#define QEDR_DESTROY_CQ_ITER_DURATION (10)
+
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ int iter;
+
+ DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
+
+ cq->destroyed = 1;
+
+ /* GSIs CQs are handled by driver, so they don't exist in the FW */
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
+ return 0;
+ }
+
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+ if (udata) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ ib_umem_release(cq->q.umem);
+
+ if (cq->q.db_rec_data) {
+ qedr_db_recovery_del(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ }
+ } else {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
+ }
+
+ /* We don't want the IRQ handler to handle a non-existing CQ so we
+ * wait until all CNQ interrupts, if any, are received. This will always
+ * happen and will always happen very fast. If not, then a serious error
+ * has occured. That is why we can use a long delay.
+ * We spin for a short time so we don’t lose time on context switching
+ * in case all the completions are handled in that span. Otherwise
+ * we sleep for a while and check again. Since the CNQ may be
+ * associated with (only) the current CPU we use msleep to allow the
+ * current CPU to be freed.
+ * The CNQ notification is increased in qedr_irq_handler().
+ */
+ iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
+ while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
+ udelay(QEDR_DESTROY_CQ_ITER_DURATION);
+ iter--;
+ }
+
+ iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
+ while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
+ msleep(QEDR_DESTROY_CQ_ITER_DURATION);
+ iter--;
+ }
+
+ /* Note that we don't need to have explicit code to wait for the
+ * completion of the event handler because it is invoked from the EQ.
+ * Since the destroy CQ ramrod has also been received on the EQ we can
+ * be certain that there's no event handler in process.
+ */
+ return 0;
+}
+
+static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct qed_rdma_modify_qp_in_params
+ *qp_params)
+{
+ const struct ib_gid_attr *gid_attr;
+ enum rdma_network_type nw_type;
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ u32 ipv4_addr;
+ int ret;
+ int i;
+
+ gid_attr = grh->sgid_attr;
+ ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
+ if (ret)
+ return ret;
+
+ nw_type = rdma_gid_attr_network_type(gid_attr);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV6:
+ memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &grh->dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V2_IPV6;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ break;
+ case RDMA_NETWORK_ROCE_V1:
+ memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &grh->dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V1;
+ break;
+ case RDMA_NETWORK_IPV4:
+ memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
+ memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
+ ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
+ qp_params->sgid.ipv4_addr = ipv4_addr;
+ ipv4_addr =
+ qedr_get_ipv4_from_gid(grh->dgid.raw);
+ qp_params->dgid.ipv4_addr = ipv4_addr;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ qp_params->roce_mode = ROCE_V2_IPV4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4; i++) {
+ qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
+ qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
+ }
+
+ if (qp_params->vlan_id >= VLAN_CFI_MASK)
+ qp_params->vlan_id = 0;
+
+ return 0;
+}
+
+static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ /* QP0... attrs->qp_type == IB_QPT_GSI */
+ if (attrs->qp_type != IB_QPT_RC &&
+ attrs->qp_type != IB_QPT_GSI &&
+ attrs->qp_type != IB_QPT_XRC_INI &&
+ attrs->qp_type != IB_QPT_XRC_TGT) {
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: unsupported qp type=0x%x requested\n",
+ attrs->qp_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (attrs->cap.max_send_wr > qattr->max_sqe) {
+ DP_ERR(dev,
+ "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
+ attrs->cap.max_send_wr, qattr->max_sqe);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_inline_data > qattr->max_inline) {
+ DP_ERR(dev,
+ "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
+ attrs->cap.max_inline_data, qattr->max_inline);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
+ attrs->cap.max_send_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
+ attrs->cap.max_recv_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ /* verify consumer QPs are not trying to use GSI QP's CQ.
+ * TGT QP isn't associated with RQ/SQ
+ */
+ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
+ (attrs->qp_type != IB_QPT_XRC_TGT) &&
+ (attrs->qp_type != IB_QPT_XRC_INI)) {
+ struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
+ struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
+
+ if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
+ (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
+ DP_ERR(dev,
+ "create qp: consumer QP cannot use GSI CQs.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int qedr_copy_srq_uresp(struct qedr_dev *dev,
+ struct qedr_srq *srq, struct ib_udata *udata)
+{
+ struct qedr_create_srq_uresp uresp = {};
+ int rc;
+
+ uresp.srq_id = srq->srq_id;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "create srq: problem copying data to user space\n");
+
+ return rc;
+}
+
+static void qedr_copy_rq_uresp(struct qedr_dev *dev,
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ /* iWARP requires two doorbells per RQ. */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ uresp->rq_db_offset =
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+ uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+ } else {
+ uresp->rq_db_offset =
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ }
+
+ uresp->rq_icid = qp->icid;
+ if (qp->urq.db_mmap_entry)
+ uresp->rq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
+}
+
+static void qedr_copy_sq_uresp(struct qedr_dev *dev,
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+
+ /* iWARP uses the same cid for rq and sq */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ uresp->sq_icid = qp->icid;
+ else
+ uresp->sq_icid = qp->icid + 1;
+
+ if (qp->usq.db_mmap_entry)
+ uresp->sq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
+}
+
+static int qedr_copy_qp_uresp(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_udata *udata,
+ struct qedr_create_qp_uresp *uresp)
+{
+ int rc;
+
+ memset(uresp, 0, sizeof(*uresp));
+
+ if (qedr_qp_has_sq(qp))
+ qedr_copy_sq_uresp(dev, uresp, qp);
+
+ if (qedr_qp_has_rq(qp))
+ qedr_copy_rq_uresp(dev, uresp, qp);
+
+ uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp->qp_id = qp->qp_id;
+
+ rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
+ if (rc)
+ DP_ERR(dev,
+ "create qp: failed a copy to user space with qp icid=0x%x.\n",
+ qp->icid);
+
+ return rc;
+}
+
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+ qed_chain_reset(&qph->pbl);
+ qph->prod = 0;
+ qph->cons = 0;
+ qph->wqe_cons = 0;
+ qph->db_data.data.value = cpu_to_le16(0);
+}
+
+static void qedr_set_common_qp_params(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_pd *pd,
+ struct ib_qp_init_attr *attrs)
+{
+ spin_lock_init(&qp->q_lock);
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ kref_init(&qp->refcnt);
+ init_completion(&qp->iwarp_cm_comp);
+ init_completion(&qp->qp_rel_comp);
+ }
+
+ qp->pd = pd;
+ qp->qp_type = attrs->qp_type;
+ qp->max_inline_data = attrs->cap.max_inline_data;
+ qp->state = QED_ROCE_QP_STATE_RESET;
+
+ qp->prev_wqe_size = 0;
+
+ qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
+ qp->dev = dev;
+ if (qedr_qp_has_sq(qp)) {
+ qedr_reset_qp_hwq_info(&qp->sq);
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+ }
+
+ if (attrs->srq)
+ qp->srq = get_qedr_srq(attrs->srq);
+
+ if (qedr_qp_has_rq(qp)) {
+ qedr_reset_qp_hwq_info(&qp->rq);
+ qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+ qp->rq.max_sges, qp->rq_cq->icid);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
+ pd->pd_id, qp->qp_type, qp->max_inline_data,
+ qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+}
+
+static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ int rc = 0;
+
+ if (qedr_qp_has_sq(qp)) {
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid + 1;
+ rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc)
+ return rc;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+ rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc && qedr_qp_has_sq(qp))
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
+ }
+
+ return rc;
+}
+
+static int qedr_check_srq_params(struct qedr_dev *dev,
+ struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (attrs->attr.max_wr > qattr->max_srq_wr) {
+ DP_ERR(dev,
+ "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
+ attrs->attr.max_wr, qattr->max_srq_wr);
+ return -EINVAL;
+ }
+
+ if (attrs->attr.max_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
+ attrs->attr.max_sge, qattr->max_sge);
+ }
+
+ if (!udata && attrs->srq_type == IB_SRQT_XRC) {
+ DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qedr_free_srq_user_params(struct qedr_srq *srq)
+{
+ qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+ ib_umem_release(srq->usrq.umem);
+ ib_umem_release(srq->prod_umem);
+}
+
+static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
+{
+ struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ struct qedr_dev *dev = srq->dev;
+
+ dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
+
+ dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+ hw_srq->virt_prod_pair_addr,
+ hw_srq->phy_prod_pair_addr);
+}
+
+static int qedr_init_srq_user_params(struct ib_udata *udata,
+ struct qedr_srq *srq,
+ struct qedr_create_srq_ureq *ureq,
+ int access)
+{
+ struct scatterlist *sg;
+ int rc;
+
+ rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
+ ureq->srq_len, false, access, 1);
+ if (rc)
+ return rc;
+
+ srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
+ sizeof(struct rdma_srq_producers), access);
+ if (IS_ERR(srq->prod_umem)) {
+ qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+ ib_umem_release(srq->usrq.umem);
+ DP_ERR(srq->dev,
+ "create srq: failed ib_umem_get for producer, got %ld\n",
+ PTR_ERR(srq->prod_umem));
+ return PTR_ERR(srq->prod_umem);
+ }
+
+ sg = srq->prod_umem->sgt_append.sgt.sgl;
+ srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
+
+ return 0;
+}
+
+static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
+ struct qedr_dev *dev,
+ struct ib_srq_init_attr *init_attr)
+{
+ struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
+ };
+ dma_addr_t phy_prod_pair_addr;
+ u32 num_elems;
+ void *va;
+ int rc;
+
+ va = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(struct rdma_srq_producers),
+ &phy_prod_pair_addr, GFP_KERNEL);
+ if (!va) {
+ DP_ERR(dev,
+ "create srq: failed to allocate dma memory for producer\n");
+ return -ENOMEM;
+ }
+
+ hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
+ hw_srq->virt_prod_pair_addr = va;
+
+ num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
+ params.num_elems = num_elems;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
+ if (rc)
+ goto err0;
+
+ hw_srq->num_elems = num_elems;
+
+ return 0;
+
+err0:
+ dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+ va, phy_prod_pair_addr);
+ return rc;
+}
+
+int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct qed_rdma_destroy_srq_in_params destroy_in_params;
+ struct qed_rdma_create_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qed_rdma_create_srq_out_params out_params;
+ struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
+ struct qedr_create_srq_ureq ureq = {};
+ u64 pbl_base_addr, phy_prod_pair_addr;
+ struct qedr_srq_hwq_info *hw_srq;
+ u32 page_cnt, page_size;
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create SRQ called from %s (pd %p)\n",
+ (udata) ? "User lib" : "kernel", pd);
+
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC)
+ return -EOPNOTSUPP;
+
+ rc = qedr_check_srq_params(dev, init_attr, udata);
+ if (rc)
+ return -EINVAL;
+
+ srq->dev = dev;
+ srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
+ hw_srq = &srq->hw_srq;
+ spin_lock_init(&srq->lock);
+
+ hw_srq->max_wr = init_attr->attr.max_wr;
+ hw_srq->max_sges = init_attr->attr.max_sge;
+
+ if (udata) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
+ DP_ERR(dev,
+ "create srq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
+ if (rc)
+ goto err0;
+
+ page_cnt = srq->usrq.pbl_info.num_pbes;
+ pbl_base_addr = srq->usrq.pbl_tbl->pa;
+ phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+ page_size = PAGE_SIZE;
+ } else {
+ struct qed_chain *pbl;
+
+ rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
+ if (rc)
+ goto err0;
+
+ pbl = &hw_srq->pbl;
+ page_cnt = qed_chain_get_page_cnt(pbl);
+ pbl_base_addr = qed_chain_get_pbl_phys(pbl);
+ phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+ page_size = QED_CHAIN_PAGE_SIZE;
+ }
+
+ in_params.pd_id = pd->pd_id;
+ in_params.pbl_base_addr = pbl_base_addr;
+ in_params.prod_pair_addr = phy_prod_pair_addr;
+ in_params.num_pages = page_cnt;
+ in_params.page_size = page_size;
+ if (srq->is_xrc) {
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
+ struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
+
+ in_params.is_xrc = 1;
+ in_params.xrcd_id = xrcd->xrcd_id;
+ in_params.cq_cid = cq->icid;
+ }
+
+ rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
+ if (rc)
+ goto err1;
+
+ srq->srq_id = out_params.srq_id;
+
+ if (udata) {
+ rc = qedr_copy_srq_uresp(dev, srq, udata);
+ if (rc)
+ goto err2;
+ }
+
+ rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
+ if (rc)
+ goto err2;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
+ return 0;
+
+err2:
+ destroy_in_params.srq_id = srq->srq_id;
+
+ dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
+err1:
+ if (udata)
+ qedr_free_srq_user_params(srq);
+ else
+ qedr_free_srq_kernel_params(srq);
+err0:
+ return -EFAULT;
+}
+
+int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct qed_rdma_destroy_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+ xa_erase_irq(&dev->srqs, srq->srq_id);
+ in_params.srq_id = srq->srq_id;
+ in_params.is_xrc = srq->is_xrc;
+ dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
+
+ if (ibsrq->uobject)
+ qedr_free_srq_user_params(srq);
+ else
+ qedr_free_srq_kernel_params(srq);
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "destroy srq: destroyed srq with srq_id=0x%0x\n",
+ srq->srq_id);
+ return 0;
+}
+
+int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct qed_rdma_modify_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+ int rc;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ DP_ERR(dev,
+ "modify srq: invalid attribute mask=0x%x specified for %p\n",
+ attr_mask, srq);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit >= srq->hw_srq.max_wr) {
+ DP_ERR(dev,
+ "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
+ attr->srq_limit, srq->hw_srq.max_wr);
+ return -EINVAL;
+ }
+
+ in_params.srq_id = srq->srq_id;
+ in_params.wqe_limit = attr->srq_limit;
+ rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
+ if (rc)
+ return rc;
+ }
+
+ srq->srq_limit = attr->srq_limit;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
+
+ return 0;
+}
+
+static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
+{
+ switch (ib_qp_type) {
+ case IB_QPT_RC:
+ return QED_RDMA_QP_TYPE_RC;
+ case IB_QPT_XRC_INI:
+ return QED_RDMA_QP_TYPE_XRC_INI;
+ case IB_QPT_XRC_TGT:
+ return QED_RDMA_QP_TYPE_XRC_TGT;
+ default:
+ return QED_RDMA_QP_TYPE_INVAL;
+ }
+}
+
+static inline void
+qedr_init_common_qp_in_params(struct qedr_dev *dev,
+ struct qedr_pd *pd,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ bool fmr_and_reserved_lkey,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ /* QP handle to be written in an async event */
+ params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
+ params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
+
+ params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
+ params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
+ params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
+ params->stats_queue = 0;
+
+ if (pd) {
+ params->pd = pd->pd_id;
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+ }
+
+ if (qedr_qp_has_sq(qp))
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+
+ if (qedr_qp_has_rq(qp))
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+
+ if (qedr_qp_has_srq(qp)) {
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+ params->srq_id = qp->srq->srq_id;
+ params->use_srq = true;
+ } else {
+ params->srq_id = 0;
+ params->use_srq = false;
+ }
+}
+
+static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
+ "qp=%p. "
+ "sq_addr=0x%llx, "
+ "sq_len=%zd, "
+ "rq_addr=0x%llx, "
+ "rq_len=%zd"
+ "\n",
+ qp,
+ qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
+ qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
+ qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
+ qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
+}
+
+static inline void
+qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
+ qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
+
+ qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
+ &qp->usq.pbl_info, FW_PAGE_SHIFT);
+ if (!qp->srq) {
+ qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+ qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+ }
+
+ qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
+ &qp->urq.pbl_info, FW_PAGE_SHIFT);
+}
+
+static void qedr_cleanup_user(struct qedr_dev *dev,
+ struct qedr_ucontext *ctx,
+ struct qedr_qp *qp)
+{
+ if (qedr_qp_has_sq(qp)) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
+ ib_umem_release(qp->urq.umem);
+ qp->urq.umem = NULL;
+ }
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ kfree(qp->urq.pbl_tbl);
+ }
+
+ if (qp->usq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
+ }
+
+ if (qp->urq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
+ }
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data);
+}
+
+static int qedr_create_user_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_pd *ibpd,
+ struct ib_udata *udata,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qed_rdma_create_qp_in_params in_params;
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qedr_create_qp_uresp uresp = {};
+ struct qedr_create_qp_ureq ureq = {};
+ int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
+ struct qedr_ucontext *ctx = NULL;
+ struct qedr_pd *pd = NULL;
+ int rc = 0;
+
+ qp->create_type = QEDR_QP_CREATE_USER;
+
+ if (ibpd) {
+ pd = get_qedr_pd(ibpd);
+ ctx = pd->uctx;
+ }
+
+ if (udata) {
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return rc;
+ }
+ }
+
+ if (qedr_qp_has_sq(qp)) {
+ /* SQ - read access only (0) */
+ rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
+ ureq.sq_len, true, 0, alloc_and_init);
+ if (rc)
+ return rc;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
+ /* RQ - read access only (0) */
+ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
+ ureq.rq_len, true, 0, alloc_and_init);
+ if (rc) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info,
+ qp->usq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ }
+ return rc;
+ }
+ }
+
+ memset(&in_params, 0, sizeof(in_params));
+ qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
+ in_params.qp_handle_lo = ureq.qp_handle_lo;
+ in_params.qp_handle_hi = ureq.qp_handle_hi;
+
+ if (qp->qp_type == IB_QPT_XRC_TGT) {
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
+
+ in_params.xrcd_id = xrcd->xrcd_id;
+ in_params.qp_handle_lo = qp->qp_id;
+ in_params.use_srq = 1;
+ }
+
+ if (qedr_qp_has_sq(qp)) {
+ in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
+ in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
+ in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
+ in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ }
+
+ if (ctx)
+ SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ &in_params, &out_params);
+
+ if (!qp->qed_qp) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iwarp_populate_user_qp(dev, qp, &out_params);
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+
+ if (udata) {
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
+ if (rc)
+ goto err;
+ }
+
+ /* db offset was calculated in copy_qp_uresp, now set in the user q */
+ if (qedr_qp_has_sq(qp)) {
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+
+ rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
+ qedr_qp_user_print(dev, qp);
+ return rc;
+err:
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
+
+err1:
+ qedr_cleanup_user(dev, ctx, qp);
+ return rc;
+}
+
+static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ int rc;
+
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid;
+
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+ qp->rq.iwarp_db2 = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+ qp->rq.iwarp_db2_data.data.icid = qp->icid;
+ qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
+}
+
+static int
+qedr_roce_create_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qed_rdma_create_qp_in_params *in_params,
+ u32 n_sq_elems, u32 n_rq_elems)
+{
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
+ int rc;
+
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
+ if (rc)
+ return rc;
+
+ in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
+ in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
+
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.num_elems = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
+ if (rc)
+ return rc;
+
+ in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
+ in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ in_params, &out_params);
+
+ if (!qp->qed_qp)
+ return -EINVAL;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+
+ return qedr_set_roce_db_info(dev, qp);
+}
+
+static int
+qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qed_rdma_create_qp_in_params *in_params,
+ u32 n_sq_elems, u32 n_rq_elems)
+{
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
+ int rc;
+
+ in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ QED_CHAIN_PAGE_SIZE,
+ QED_CHAIN_MODE_PBL);
+ in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ QED_CHAIN_PAGE_SIZE,
+ QED_CHAIN_MODE_PBL);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ in_params, &out_params);
+
+ if (!qp->qed_qp)
+ return -EINVAL;
+
+ /* Now we allocate the chain */
+
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.sq_pbl_virt;
+ params.ext_pbl_phys = out_params.sq_pbl_phys;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
+ if (rc)
+ goto err;
+
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.num_elems = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.rq_pbl_virt;
+ params.ext_pbl_phys = out_params.rq_pbl_phys;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
+ if (rc)
+ goto err;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+
+ return qedr_set_iwarp_db_info(dev, qp);
+
+err:
+ dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+
+ return rc;
+}
+
+static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ kfree(qp->wqe_wr_id);
+
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ kfree(qp->rqe_wr_id);
+
+ /* GSI qp is not registered to db mechanism so no need to delete */
+ if (qp->qp_type == IB_QPT_GSI)
+ return;
+
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
+
+ if (!qp->srq) {
+ qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data);
+ }
+}
+
+static int qedr_create_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qed_rdma_create_qp_in_params in_params;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ int rc = -EINVAL;
+ u32 n_rq_elems;
+ u32 n_sq_elems;
+ u32 n_sq_entries;
+
+ memset(&in_params, 0, sizeof(in_params));
+ qp->create_type = QEDR_QP_CREATE_KERNEL;
+
+ /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+ * the ring. The ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ * We should add an extra WR since the prod and cons indices of
+ * wqe_wr_id are managed in such a way that the WQ is considered full
+ * when (prod+1)%max_wr==cons. We currently don't do that because we
+ * double the number of entries due an iSER issue that pushes far more
+ * WRs than indicated. If we decline its ib_post_send() then we get
+ * error prints in the dmesg we'd like to avoid.
+ */
+ qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
+ dev->attr.max_sqe);
+
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id) {
+ DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
+ return -ENOMEM;
+ }
+
+ /* QP handle to be written in CQE */
+ in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
+ in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
+
+ /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
+ * the ring. There ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
+
+ /* Allocate driver internal RQ array */
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id) {
+ DP_ERR(dev,
+ "create qp: failed RQ shadow memory allocation\n");
+ kfree(qp->wqe_wr_id);
+ return -ENOMEM;
+ }
+
+ qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
+
+ n_sq_entries = attrs->cap.max_send_wr;
+ n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
+ n_sq_entries = max_t(u32, n_sq_entries, 1);
+ n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+
+ n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
+ n_sq_elems, n_rq_elems);
+ else
+ rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
+ n_sq_elems, n_rq_elems);
+ if (rc)
+ qedr_cleanup_kernel(dev, qp);
+
+ return rc;
+}
+
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_udata *udata)
+{
+ struct qedr_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ int rc;
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ return rc;
+ }
+
+ if (qp->create_type == QEDR_QP_CREATE_USER)
+ qedr_cleanup_user(dev, ctx, qp);
+ else
+ qedr_cleanup_kernel(dev, qp);
+
+ return 0;
+}
+
+int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_xrcd *xrcd = NULL;
+ struct ib_pd *ibpd = ibqp->pd;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ int rc = 0;
+
+ if (attrs->create_flags)
+ return -EOPNOTSUPP;
+
+ if (attrs->qp_type == IB_QPT_XRC_TGT)
+ xrcd = get_qedr_xrcd(attrs->xrcd);
+ else
+ pd = get_qedr_pd(ibpd);
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
+ udata ? "user library" : "kernel", pd);
+
+ rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
+ if (rc)
+ return rc;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
+ udata ? "user library" : "kernel", attrs->event_handler, pd,
+ get_qedr_cq(attrs->send_cq),
+ get_qedr_cq(attrs->send_cq)->icid,
+ get_qedr_cq(attrs->recv_cq),
+ attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
+
+ qedr_set_common_qp_params(dev, qp, pd, attrs);
+
+ if (attrs->qp_type == IB_QPT_GSI)
+ return qedr_create_gsi_qp(dev, attrs, qp);
+
+ if (udata || xrcd)
+ rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
+ else
+ rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
+
+ if (rc)
+ return rc;
+
+ qp->ibqp.qp_num = qp->qp_id;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
+ if (rc)
+ goto out_free_qp_resources;
+ }
+
+ return 0;
+
+out_free_qp_resources:
+ qedr_free_qp_resources(dev, qp, udata);
+ return -EFAULT;
+}
+
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+{
+ switch (qp_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ return IB_QPS_RESET;
+ case QED_ROCE_QP_STATE_INIT:
+ return IB_QPS_INIT;
+ case QED_ROCE_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case QED_ROCE_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case QED_ROCE_QP_STATE_SQD:
+ return IB_QPS_SQD;
+ case QED_ROCE_QP_STATE_ERR:
+ return IB_QPS_ERR;
+ case QED_ROCE_QP_STATE_SQE:
+ return IB_QPS_SQE;
+ }
+ return IB_QPS_ERR;
+}
+
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+ enum ib_qp_state qp_state)
+{
+ switch (qp_state) {
+ case IB_QPS_RESET:
+ return QED_ROCE_QP_STATE_RESET;
+ case IB_QPS_INIT:
+ return QED_ROCE_QP_STATE_INIT;
+ case IB_QPS_RTR:
+ return QED_ROCE_QP_STATE_RTR;
+ case IB_QPS_RTS:
+ return QED_ROCE_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return QED_ROCE_QP_STATE_SQD;
+ case IB_QPS_ERR:
+ return QED_ROCE_QP_STATE_ERR;
+ default:
+ return QED_ROCE_QP_STATE_ERR;
+ }
+}
+
+static int qedr_update_qp_state(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ enum qed_roce_qp_state cur_state,
+ enum qed_roce_qp_state new_state)
+{
+ int status = 0;
+
+ if (new_state == cur_state)
+ return 0;
+
+ switch (cur_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_INIT:
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ break;
+ case QED_ROCE_QP_STATE_INIT:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTR:
+ /* Update doorbell (in case post_recv was
+ * done before move to RTR)
+ */
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ writel(qp->rq.db_data.raw, qp->rq.db);
+ }
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ }
+ break;
+ case QED_ROCE_QP_STATE_RTR:
+ /* RTR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ }
+ break;
+ case QED_ROCE_QP_STATE_RTS:
+ /* RTS->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_SQD:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ }
+ break;
+ case QED_ROCE_QP_STATE_SQD:
+ /* SQD->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ }
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ /* ERR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ if ((qp->rq.prod != qp->rq.cons) ||
+ (qp->sq.prod != qp->sq.cons)) {
+ DP_NOTICE(dev,
+ "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+ qp->rq.prod, qp->rq.cons, qp->sq.prod,
+ qp->sq.cons);
+ status = -EINVAL;
+ }
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+
+ return status;
+}
+
+int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ enum ib_qp_state old_qp_state, new_qp_state;
+ enum qed_roce_qp_state cur_state;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
+ attr->qp_state);
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ old_qp_state = qedr_get_ibqp_state(qp->state);
+ if (attr_mask & IB_QP_STATE)
+ new_qp_state = attr->qp_state;
+ else
+ new_qp_state = old_qp_state;
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
+ ibqp->qp_type, attr_mask)) {
+ DP_ERR(dev,
+ "modify qp: invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+ attr_mask, qp->qp_id, ibqp->qp_type,
+ old_qp_state, new_qp_state);
+ rc = -EINVAL;
+ goto err;
+ }
+ }
+
+ /* Translate the masks... */
+ if (attr_mask & IB_QP_STATE) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+ qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
+ }
+
+ if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp_params.sqd_async = true;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
+ if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
+ qp_params.incoming_rdma_read_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ;
+ qp_params.incoming_rdma_write_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE;
+ qp_params.incoming_atomic_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC;
+ }
+
+ if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ return -EINVAL;
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > IB_MTU_4096) {
+ pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
+ ib_mtu_enum_to_int(iboe_get_mtu
+ (dev->ndev->mtu)));
+ }
+
+ if (!qp->mtu) {
+ qp->mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
+
+ qp_params.traffic_class_tos = grh->traffic_class;
+ qp_params.flow_label = grh->flow_label;
+ qp_params.hop_limit_ttl = grh->hop_limit;
+
+ qp->sgid_idx = grh->sgid_index;
+
+ rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
+ if (rc) {
+ DP_ERR(dev,
+ "modify qp: problems with GID index %d (rc=%d)\n",
+ grh->sgid_index, rc);
+ return rc;
+ }
+
+ rc = qedr_get_dmac(dev, &attr->ah_attr,
+ qp_params.remote_mac_addr);
+ if (rc)
+ return rc;
+
+ qp_params.use_local_mac = true;
+ ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
+ qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
+ qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
+ qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
+ qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
+ qp_params.remote_mac_addr);
+
+ qp_params.mtu = qp->mtu;
+ qp_params.lb_indication = false;
+ }
+
+ if (!qp_params.mtu) {
+ /* Stay with current MTU */
+ if (qp->mtu)
+ qp_params.mtu = qp->mtu;
+ else
+ qp_params.mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
+
+ /* The received timeout value is an exponent used like this:
+ * "12.7.34 LOCAL ACK TIMEOUT
+ * Value representing the transport (ACK) timeout for use by
+ * the remote, expressed as: 4.096 * 2^timeout [usec]"
+ * The FW expects timeout in msec so we need to divide the usec
+ * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
+ * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
+ * The value of zero means infinite so we use a 'max_t' to make
+ * sure that sub 1 msec values will be configured as 1 msec.
+ */
+ if (attr->timeout)
+ qp_params.ack_timeout =
+ 1 << max_t(int, attr->timeout - 8, 0);
+ else
+ qp_params.ack_timeout = 0;
+
+ qp->timeout = attr->timeout;
+ }
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
+ qp_params.retry_cnt = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
+ qp_params.rnr_retry_cnt = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
+ qp_params.rq_psn = attr->rq_psn;
+ qp->rq_psn = attr->rq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
+ rc = -EINVAL;
+ DP_ERR(dev,
+ "unsupported max_rd_atomic=%d, supported=%d\n",
+ attr->max_rd_atomic,
+ dev->attr.max_qp_req_rd_atomic_resc);
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
+ qp_params.max_rd_atomic_req = attr->max_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
+ qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
+ qp_params.sq_psn = attr->sq_psn;
+ qp->sq_psn = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic >
+ dev->attr.max_qp_resp_rd_atomic_resc) {
+ DP_ERR(dev,
+ "unsupported max_dest_rd_atomic=%d, supported=%d\n",
+ attr->max_dest_rd_atomic,
+ dev->attr.max_qp_resp_rd_atomic_resc);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
+ qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
+
+ qp_params.dest_qp = attr->dest_qp_num;
+ qp->dest_qp_num = attr->dest_qp_num;
+ }
+
+ cur_state = qp->state;
+
+ /* Update the QP state before the actual ramrod to prevent a race with
+ * fast path. Modifying the QP state to error will cause the device to
+ * flush the CQEs and while polling the flushed CQEs will considered as
+ * a potential issue if the QP isn't in error state.
+ */
+ if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
+ !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
+ qp->state = QED_ROCE_QP_STATE_ERR;
+
+ if (qp->qp_type != IB_QPT_GSI)
+ rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
+ qp->qed_qp, &qp_params);
+
+ if (attr_mask & IB_QP_STATE) {
+ if ((qp->qp_type != IB_QPT_GSI) && (!udata))
+ rc = qedr_update_qp_state(dev, qp, cur_state,
+ qp_params.new_state);
+ qp->state = qp_params.new_state;
+ }
+
+err:
+ return rc;
+}
+
+static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
+{
+ int ib_qp_acc_flags = 0;
+
+ if (params->incoming_rdma_write_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (params->incoming_rdma_read_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (params->incoming_atomic_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ return ib_qp_acc_flags;
+}
+
+int qedr_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *qp_attr,
+ int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct qed_rdma_query_qp_out_params params;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ int rc = 0;
+
+ memset(&params, 0, sizeof(params));
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ } else {
+ qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
+ }
+
+ qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
+ qp_attr->path_mig_state = IB_MIG_MIGRATED;
+ qp_attr->rq_psn = params.rq_psn;
+ qp_attr->sq_psn = params.sq_psn;
+ qp_attr->dest_qp_num = params.dest_qp;
+
+ qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
+
+ qp_attr->cap.max_send_wr = qp->sq.max_wr;
+ qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+ qp_attr->cap.max_inline_data = dev->attr.max_inline;
+ qp_init_attr->cap = qp_attr->cap;
+
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
+ params.flow_label, qp->sgid_idx,
+ params.hop_limit_ttl, params.traffic_class_tos);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
+ rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
+ rdma_ah_set_sl(&qp_attr->ah_attr, 0);
+ qp_attr->timeout = qp->timeout;
+ qp_attr->rnr_retry = params.rnr_retry;
+ qp_attr->retry_cnt = params.retry_cnt;
+ qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+ qp_attr->pkey_index = params.pkey_index;
+ qp_attr->port_num = 1;
+ rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
+ rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
+ qp_attr->alt_pkey_index = 0;
+ qp_attr->alt_port_num = 0;
+ qp_attr->alt_timeout = 0;
+ memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+
+ qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
+ qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
+ qp_attr->max_rd_atomic = params.max_rd_atomic;
+ qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
+ qp_attr->cap.max_inline_data);
+
+err:
+ return rc;
+}
+
+int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ struct ib_qp_attr attr;
+ int attr_mask = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
+ qp, qp->qp_type);
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_INIT)) {
+
+ attr.qp_state = IB_QPS_ERR;
+ attr_mask |= IB_QP_STATE;
+
+ /* Change the QP state to ERROR */
+ qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ }
+ } else {
+ /* If connection establishment started the WAIT_FOR_CONNECT
+ * bit will be on and we need to Wait for the establishment
+ * to complete before destroying the qp.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
+
+ /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
+ * bit will be on, and we need to wait for the disconnect to
+ * complete before continuing. We can use the same completion,
+ * iwarp_cm_comp, since this is the only place that waits for
+ * this completion and it is sequential. In addition,
+ * disconnect can't occur before the connection is fully
+ * established, therefore if WAIT_FOR_DISCONNECT is on it
+ * means WAIT_FOR_CONNECT is also on and the completion for
+ * CONNECT already occurred.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
+ }
+
+ if (qp->qp_type == IB_QPT_GSI)
+ qedr_destroy_gsi_qp(dev);
+
+ /* We need to remove the entry from the xarray before we release the
+ * qp_id to avoid a race of the qp_id being reallocated and failing
+ * on xa_insert
+ */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ xa_erase(&dev->qps, qp->qp_id);
+
+ qedr_free_qp_resources(dev, qp, udata);
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+ wait_for_completion(&qp->qp_rel_comp);
+ }
+
+ return 0;
+}
+
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct qedr_ah *ah = get_qedr_ah(ibah);
+
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
+
+ return 0;
+}
+
+int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct qedr_ah *ah = get_qedr_ah(ibah);
+
+ rdma_destroy_ah_attr(&ah->attr);
+ return 0;
+}
+
+static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
+{
+ struct qedr_pbl *pbl, *tmp;
+
+ if (info->pbl_table)
+ list_add_tail(&info->pbl_table->list_entry,
+ &info->free_pbl_list);
+
+ if (!list_empty(&info->inuse_pbl_list))
+ list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
+
+ list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
+ list_del(&pbl->list_entry);
+ qedr_free_pbl(dev, &info->pbl_info, pbl);
+ }
+}
+
+static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
+ size_t page_list_len, bool two_layered)
+{
+ struct qedr_pbl *tmp;
+ int rc;
+
+ INIT_LIST_HEAD(&info->free_pbl_list);
+ INIT_LIST_HEAD(&info->inuse_pbl_list);
+
+ rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
+ page_list_len, two_layered);
+ if (rc)
+ goto done;
+
+ info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (IS_ERR(info->pbl_table)) {
+ rc = PTR_ERR(info->pbl_table);
+ goto done;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
+ &info->pbl_table->pa);
+
+ /* in usual case we use 2 PBLs, so we add one to free
+ * list and allocating another one
+ */
+ tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (IS_ERR(tmp)) {
+ DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
+ goto done;
+ }
+
+ list_add_tail(&tmp->list_entry, &info->free_pbl_list);
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
+
+done:
+ if (rc)
+ free_mr_info(dev, info);
+
+ return rc;
+}
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 usr_addr, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ struct qedr_pd *pd;
+ int rc = -ENOMEM;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ pd = get_qedr_pd(ibpd);
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
+ pd->pd_id, start, len, usr_addr, acc);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->type = QEDR_MR_USER;
+
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
+ if (IS_ERR(mr->umem)) {
+ rc = -EFAULT;
+ goto err0;
+ }
+
+ rc = init_mr_info(dev, &mr->info,
+ ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
+ if (rc)
+ goto err1;
+
+ qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
+ &mr->info.pbl_info, PAGE_SHIFT);
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
+ goto err1;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.page_size_log = PAGE_SHIFT;
+ mr->hw_mr.length = len;
+ mr->hw_mr.vaddr = usr_addr;
+ mr->hw_mr.phy_mr = false;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
+ mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+{
+ struct qedr_mr *mr = get_qedr_mr(ib_mr);
+ struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
+ int rc = 0;
+
+ rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
+ if (rc)
+ return rc;
+
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+ if (mr->type != QEDR_MR_DMA)
+ free_mr_info(dev, &mr->info);
+
+ /* it could be user registered memory. */
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return rc;
+}
+
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+ int max_page_list_len)
+{
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ int rc = -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
+ max_page_list_len);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->dev = dev;
+ mr->type = QEDR_MR_FRMR;
+
+ rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
+ if (rc)
+ goto err0;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
+ goto err1;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = 0;
+ mr->hw_mr.remote_read = 0;
+ mr->hw_mr.remote_write = 0;
+ mr->hw_mr.remote_atomic = 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = 0;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.length = 0;
+ mr->hw_mr.vaddr = 0;
+ mr->hw_mr.phy_mr = true;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ mr->ibmr.rkey = mr->ibmr.lkey;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
+ return mr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct qedr_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __qedr_alloc_mr(ibpd, max_num_sg);
+
+ if (IS_ERR(mr))
+ return ERR_PTR(-EINVAL);
+
+ return &mr->ibmr;
+}
+
+static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+ struct qedr_pbl *pbl_table;
+ struct regpair *pbe;
+ u32 pbes_in_page;
+
+ if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+ DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
+ mr->npages, addr);
+
+ pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
+ pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
+ pbe = (struct regpair *)pbl_table->va;
+ pbe += mr->npages % pbes_in_page;
+ pbe->lo = cpu_to_le32((u32)addr);
+ pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
+
+ mr->npages++;
+
+ return 0;
+}
+
+static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
+{
+ int work = info->completed - info->completed_handled - 1;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
+ while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
+ struct qedr_pbl *pbl;
+
+ /* Free all the page list that are possible to be freed
+ * (all the ones that were invalidated), under the assumption
+ * that if an FMR was completed successfully that means that
+ * if there was an invalidate operation before it also ended
+ */
+ pbl = list_first_entry(&info->inuse_pbl_list,
+ struct qedr_pbl, list_entry);
+ list_move_tail(&pbl->list_entry, &info->free_pbl_list);
+ info->completed_handled++;
+ }
+}
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+
+ mr->npages = 0;
+
+ handle_completed_mrs(mr->dev, &mr->info);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+}
+
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_mr *mr;
+ int rc;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = QEDR_MR_DMA;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
+ goto err1;
+ }
+
+ /* index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.dma_mr = true;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
+{
+ return (((wq->prod + 1) % wq->max_wr) == wq->cons);
+}
+
+static int sge_data_len(struct ib_sge *sg_list, int num_sge)
+{
+ int i, len = 0;
+
+ for (i = 0; i < num_sge; i++)
+ len += sg_list[i].length;
+
+ return len;
+}
+
+static void swap_wqe_data64(u64 *p)
+{
+ int i;
+
+ for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
+ *p = cpu_to_be64(cpu_to_le64(*p));
+}
+
+static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
+ struct qedr_qp *qp, u8 *wqe_size,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr,
+ u8 *bits, u8 bit)
+{
+ u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
+ char *seg_prt, *wqe;
+ int i, seg_siz;
+
+ if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
+ DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
+ *bad_wr = wr;
+ return 0;
+ }
+
+ if (!data_size)
+ return data_size;
+
+ *bits |= bit;
+
+ seg_prt = NULL;
+ wqe = NULL;
+ seg_siz = 0;
+
+ /* Copy data inline */
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 len = wr->sg_list[i].length;
+ void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
+
+ while (len > 0) {
+ u32 cur;
+
+ /* New segment required */
+ if (!seg_siz) {
+ wqe = (char *)qed_chain_produce(&qp->sq.pbl);
+ seg_prt = wqe;
+ seg_siz = sizeof(struct rdma_sq_common_wqe);
+ (*wqe_size)++;
+ }
+
+ /* Calculate currently allowed length */
+ cur = min_t(u32, len, seg_siz);
+ memcpy(seg_prt, src, cur);
+
+ /* Update segment variables */
+ seg_prt += cur;
+ seg_siz -= cur;
+
+ /* Update sge variables */
+ src += cur;
+ len -= cur;
+
+ /* Swap fully-completed segments */
+ if (!seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+ }
+ }
+
+ /* swap last not completed segment */
+ if (seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+
+ return data_size;
+}
+
+#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->flags = cpu_to_le32(vflags); \
+ } while (0)
+
+#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
+ do { \
+ DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
+ (hdr)->num_sges = num_sge; \
+ } while (0)
+
+#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->l_key = cpu_to_le32(vlkey); \
+ } while (0)
+
+static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
+ const struct ib_send_wr *wr)
+{
+ u32 data_size = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
+
+ DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
+ sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
+ sge->length = cpu_to_le32(wr->sg_list[i].length);
+ data_size += wr->sg_list[i].length;
+ }
+
+ if (wqe_size)
+ *wqe_size += wr->num_sge;
+
+ return data_size;
+}
+
+static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_rdma_wqe_1st *rwqe,
+ struct rdma_sq_rdma_wqe_2nd *rwqe2,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
+ DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
+
+ if (wr->send_flags & IB_SEND_INLINE &&
+ (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE)) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
+ bad_wr, &rwqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
+}
+
+static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_send_wqe_1st *swqe,
+ struct rdma_sq_send_wqe_2st *swqe2,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ memset(swqe2, 0, sizeof(*swqe2));
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
+ bad_wr, &swqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
+}
+
+static int qedr_prepare_reg(struct qedr_qp *qp,
+ struct rdma_sq_fmr_wqe_1st *fwqe1,
+ const struct ib_reg_wr *wr)
+{
+ struct qedr_mr *mr = get_qedr_mr(wr->mr);
+ struct rdma_sq_fmr_wqe_2nd *fwqe2;
+
+ fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
+ fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
+ fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
+ fwqe1->l_key = wr->key;
+
+ fwqe2->access_ctrl = 0;
+
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
+ !!(wr->access & IB_ACCESS_REMOTE_READ));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
+ !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
+ !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
+ !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+ fwqe2->fmr_ctrl = 0;
+
+ SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
+ ilog2(mr->ibmr.page_size) - 12);
+
+ fwqe2->length_hi = 0;
+ fwqe2->length_lo = mr->ibmr.length;
+ fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
+ fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
+
+ qp->wqe_wr_id[qp->sq.prod].mr = mr;
+
+ return 0;
+}
+
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ return IB_WC_SEND;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_WC_FETCH_ADD;
+ case IB_WR_REG_MR:
+ return IB_WC_REG_MR;
+ case IB_WR_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ default:
+ return IB_WC_SEND;
+ }
+}
+
+static inline bool qedr_can_post_send(struct qedr_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ int wq_is_full, err_wr, pbl_is_full;
+ struct qedr_dev *dev = qp->dev;
+
+ /* prevent SQ overflow and/or processing of a bad WR */
+ err_wr = wr->num_sge > qp->sq.max_sges;
+ wq_is_full = qedr_wq_is_full(&qp->sq);
+ pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
+ QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ if (wq_is_full || err_wr || pbl_is_full) {
+ if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
+ DP_ERR(dev,
+ "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
+ }
+
+ if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
+ DP_ERR(dev,
+ "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
+ }
+
+ if (pbl_is_full &&
+ !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
+ DP_ERR(dev,
+ "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
+ }
+ return false;
+ }
+ return true;
+}
+
+static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct rdma_sq_atomic_wqe_1st *awqe1;
+ struct rdma_sq_atomic_wqe_2nd *awqe2;
+ struct rdma_sq_atomic_wqe_3rd *awqe3;
+ struct rdma_sq_send_wqe_2st *swqe2;
+ struct rdma_sq_local_inv_wqe *iwqe;
+ struct rdma_sq_rdma_wqe_2nd *rwqe2;
+ struct rdma_sq_send_wqe_1st *swqe;
+ struct rdma_sq_rdma_wqe_1st *rwqe;
+ struct rdma_sq_fmr_wqe_1st *fwqe1;
+ struct rdma_sq_common_wqe *wqe;
+ u32 length;
+ int rc = 0;
+ bool comp;
+
+ if (!qedr_can_post_send(qp, wr)) {
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = qed_chain_produce(&qp->sq.pbl);
+ qp->wqe_wr_id[qp->sq.prod].signaled =
+ !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
+
+ wqe->flags = 0;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
+ !!(wr->send_flags & IB_SEND_SOLICITED));
+ comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
+ !!(wr->send_flags & IB_SEND_FENCE));
+ wqe->prev_wqe_size = qp->prev_wqe_size;
+
+ qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+
+ swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ swqe->wqe_size = 2;
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
+ fallthrough; /* same is identical to RDMA READ */
+
+ case IB_WR_RDMA_READ:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
+ awqe1->wqe_size = 4;
+
+ awqe2 = qed_chain_produce(&qp->sq.pbl);
+ DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
+ awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
+
+ awqe3 = qed_chain_produce(&qp->sq.pbl);
+
+ if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->compare_add);
+ } else {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->swap);
+ DMA_REGPAIR_LE(awqe3->cmp_data,
+ atomic_wr(wr)->compare_add);
+ }
+
+ qedr_prepare_sq_sges(qp, NULL, wr);
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
+ qp->prev_wqe_size = awqe1->wqe_size;
+ break;
+
+ case IB_WR_LOCAL_INV:
+ iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
+ iwqe->wqe_size = 1;
+
+ iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
+ iwqe->inv_l_key = wr->ex.invalidate_rkey;
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
+ qp->prev_wqe_size = iwqe->wqe_size;
+ break;
+ case IB_WR_REG_MR:
+ DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
+ wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
+ fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
+ fwqe1->wqe_size = 2;
+
+ rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
+ if (rc) {
+ DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
+ *bad_wr = wr;
+ break;
+ }
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
+ qp->prev_wqe_size = fwqe1->wqe_size;
+ break;
+ default:
+ DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (*bad_wr) {
+ u16 value;
+
+ /* Restore prod to its position before
+ * this WR was processed
+ */
+ value = le16_to_cpu(qp->sq.db_data.data.value);
+ qed_chain_set_prod(&qp->sq.pbl, value, wqe);
+
+ /* Restore prev_wqe_size */
+ qp->prev_wqe_size = wqe->prev_wqe_size;
+ rc = -EINVAL;
+ DP_ERR(dev, "POST SEND FAILED\n");
+ }
+
+ return rc;
+}
+
+int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ unsigned long flags;
+ int rc = 0;
+
+ *bad_wr = NULL;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_send(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_SQD)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "QP in wrong state! QP icid=0x%x state %d\n",
+ qp->icid, qp->state);
+ return -EINVAL;
+ }
+ }
+
+ while (wr) {
+ rc = __qedr_post_send(ibqp, wr, bad_wr);
+ if (rc)
+ break;
+
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->sq);
+
+ qp->sq.db_data.data.value++;
+
+ wr = wr->next;
+ }
+
+ /* Trigger doorbell
+ * If there was a failure in the first WR then it will be triggered in
+ * vane. However this is not harmful (as long as the producer value is
+ * unchanged). For performance reasons we avoid checking for this
+ * redundant doorbell.
+ *
+ * qp->wqe_wr_id is accessed during qedr_poll_cq, as
+ * soon as we give the doorbell, we could get a completion
+ * for this wr, therefore we need to make sure that the
+ * memory is updated before giving the doorbell.
+ * During qedr_poll_cq, rmb is called before accessing the
+ * cqe. This covers for the smp_rmb as well.
+ */
+ smp_wmb();
+ writel(qp->sq.db_data.raw, qp->sq.db);
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return rc;
+}
+
+static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
+{
+ u32 used;
+
+ /* Calculate number of elements used based on producer
+ * count and consumer count and subtract it from max
+ * work request supported so that we get elements left.
+ */
+ used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
+
+ return hw_srq->max_wr - used;
+}
+
+int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+ struct qedr_srq_hwq_info *hw_srq;
+ struct qedr_dev *dev = srq->dev;
+ struct qed_chain *pbl;
+ unsigned long flags;
+ int status = 0;
+ u32 num_sge;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ hw_srq = &srq->hw_srq;
+ pbl = &srq->hw_srq.pbl;
+ while (wr) {
+ struct rdma_srq_wqe_header *hdr;
+ int i;
+
+ if (!qedr_srq_elem_left(hw_srq) ||
+ wr->num_sge > srq->hw_srq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
+ hw_srq->wr_prod_cnt,
+ atomic_read(&hw_srq->wr_cons_cnt),
+ wr->num_sge, srq->hw_srq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ hdr = qed_chain_produce(pbl);
+ num_sge = wr->num_sge;
+ /* Set number of sge and work request id in header */
+ SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
+
+ srq->hw_srq.wr_prod_cnt++;
+ hw_srq->wqe_prod++;
+ hw_srq->sge_prod++;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
+ wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
+
+ /* Set SGE length, lkey and address */
+ SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
+ wr->sg_list[i].length, wr->sg_list[i].lkey);
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "[%d]: len %d key %x addr %x:%x\n",
+ i, srq_sge->length, srq_sge->l_key,
+ srq_sge->addr.hi, srq_sge->addr.lo);
+ hw_srq->sge_prod++;
+ }
+
+ /* Update WQE and SGE information before
+ * updating producer.
+ */
+ dma_wmb();
+
+ /* SRQ producer is 8 bytes. Need to update SGE producer index
+ * in first 4 bytes and need to update WQE producer in
+ * next 4 bytes.
+ */
+ srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
+ /* Make sure sge producer is updated first */
+ dma_wmb();
+ srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
+
+ wr = wr->next;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
+ qed_chain_get_elem_left(pbl));
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return status;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int status = 0;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ while (wr) {
+ int i;
+
+ if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
+ wr->num_sge > qp->rq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
+ qed_chain_get_elem_left_u32(&qp->rq.pbl),
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
+ qp->rq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ if (!i)
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+ wr->num_sge);
+
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
+ wr->sg_list[i].lkey);
+
+ RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+ wr->sg_list[i].length, flags);
+ }
+
+ /* Special case of no sges. FW requires between 1-4 sges...
+ * in this case we need to post 1 sge with length zero. this is
+ * because rdma write with immediate consumes an RQ.
+ */
+ if (!wr->num_sge) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+
+ RQ_SGE_SET(rqe, 0, 0, flags);
+ i = 1;
+ }
+
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+ qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
+ * soon as we give the doorbell, we could get a completion
+ * for this wr, therefore we need to make sure that the
+ * memory is update before giving the doorbell.
+ * During qedr_poll_cq, rmb is called before accessing the
+ * cqe. This covers for the smp_rmb as well.
+ */
+ smp_wmb();
+
+ qp->rq.db_data.data.value++;
+
+ writel(qp->rq.db_data.raw, qp->rq.db);
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
+ }
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+}
+
+static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
+ cq->pbl_toggle;
+}
+
+static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+ struct qedr_qp *qp;
+
+ qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
+ resp_cqe->qp_handle.lo,
+ u64);
+ return qp;
+}
+
+static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
+}
+
+/* Return latest CQE (needs processing) */
+static union rdma_cqe *get_cqe(struct qedr_cq *cq)
+{
+ return cq->latest_cqe;
+}
+
+/* In fmr we need to increase the number of fmr completed counter for the fmr
+ * algorithm determining whether we can free a pbl or not.
+ * we need to perform this whether the work request was signaled or not. for
+ * this purpose we call this function from the condition that checks if a wr
+ * should be skipped, to make sure we don't miss it ( possibly this fmr
+ * operation was not signalted)
+ */
+static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
+{
+ if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+}
+
+static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
+ int force)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->sq.wqe_cons != hw_cons) {
+ if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
+ qedr_chk_if_fmr(qp);
+ /* skip WC */
+ goto next_cqe;
+ }
+
+ /* fill WC */
+ wc->status = status;
+ wc->vendor_err = 0;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+
+ wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
+
+ switch (wc->opcode) {
+ case IB_WC_RDMA_WRITE:
+ wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+ break;
+ case IB_WC_COMP_SWAP:
+ case IB_WC_FETCH_ADD:
+ wc->byte_len = 8;
+ break;
+ case IB_WC_REG_MR:
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+ break;
+ case IB_WC_RDMA_READ:
+ case IB_WC_SEND:
+ wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+ break;
+ default:
+ break;
+ }
+
+ num_entries--;
+ wc++;
+ cnt++;
+next_cqe:
+ while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
+ qed_chain_consume(&qp->sq.pbl);
+ qedr_inc_sw_cons(&qp->sq);
+ }
+
+ return cnt;
+}
+
+static int qedr_poll_cq_req(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc,
+ struct rdma_cqe_requester *req)
+{
+ int cnt = 0;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_OK:
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_SUCCESS, 0);
+ break;
+ case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
+ if (qp->state != QED_ROCE_QP_STATE_ERR)
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_WR_FLUSH_ERR, 1);
+ break;
+ default:
+ /* process all WQE before the cosumer */
+ qp->state = QED_ROCE_QP_STATE_ERR;
+ cnt = process_req(dev, qp, cq, num_entries, wc,
+ req->sq_cons - 1, IB_WC_SUCCESS, 0);
+ wc += cnt;
+ /* if we have extra WC fill it with actual error info */
+ if (cnt < num_entries) {
+ enum ib_wc_status wc_status;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_BAD_RESP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RETRY_EXC_ERR;
+ break;
+ default:
+ DP_ERR(dev,
+ "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_GENERAL_ERR;
+ }
+ cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
+ wc_status, 1);
+ }
+ }
+
+ return cnt;
+}
+
+static inline int qedr_cqe_resp_status_to_ib(u8 status)
+{
+ switch (status) {
+ case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
+ return IB_WC_MW_BIND_ERR;
+ case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
+ return IB_WC_REM_INV_RD_REQ_ERR;
+ case RDMA_CQE_RESP_STS_OK:
+ return IB_WC_SUCCESS;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
+ struct ib_wc *wc)
+{
+ wc->status = IB_WC_SUCCESS;
+ wc->byte_len = le32_to_cpu(resp->length);
+
+ if (resp->flags & QEDR_RESP_IMM) {
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
+ wc->wc_flags |= IB_WC_WITH_IMM;
+
+ if (resp->flags & QEDR_RESP_RDMA)
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+
+ if (resp->flags & QEDR_RESP_INV)
+ return -EINVAL;
+
+ } else if (resp->flags & QEDR_RESP_INV) {
+ wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+
+ if (resp->flags & QEDR_RESP_RDMA)
+ return -EINVAL;
+
+ } else if (resp->flags & QEDR_RESP_RDMA) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp, u64 wr_id)
+{
+ /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+
+ if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
+ if (qedr_set_ok_cqe_resp_wc(resp, wc))
+ DP_ERR(dev,
+ "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
+ cq, cq->icid, resp->flags);
+
+ } else {
+ wc->status = qedr_cqe_resp_status_to_ib(resp->status);
+ if (wc->status == IB_WC_GENERAL_ERR)
+ DP_ERR(dev,
+ "CQ %p (icid=%d) contains an invalid CQE status %d\n",
+ cq, cq->icid, resp->status);
+ }
+
+ /* Fill the rest of the WC */
+ wc->vendor_err = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wr_id;
+}
+
+static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ struct qedr_srq *srq = qp->srq;
+ u64 wr_id;
+
+ wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
+ le32_to_cpu(resp->srq_wr_id.lo), u64);
+
+ if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
+ wc->wr_id = wr_id;
+ wc->byte_len = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wr_id;
+ } else {
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+ }
+ atomic_inc(&srq->hw_srq.wr_cons_cnt);
+
+ return 1;
+}
+static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+
+ return 1;
+}
+
+static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc, u16 hw_cons)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->rq.wqe_cons != hw_cons) {
+ /* fill WC */
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->byte_len = 0;
+ wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc->qp = &qp->ibqp;
+ num_entries--;
+ wc++;
+ cnt++;
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+ }
+
+ return cnt;
+}
+
+static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_responder *resp, int *update)
+{
+ if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ int cnt;
+
+ cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
+ consume_cqe(cq);
+
+ return cnt;
+}
+
+static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, struct rdma_cqe_responder *resp,
+ int *update)
+{
+ int cnt;
+
+ if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+ cnt = process_resp_flush(qp, cq, num_entries, wc,
+ resp->rq_cons_or_srq_id);
+ try_consume_resp_cqe(cq, qp, resp, update);
+ } else {
+ cnt = process_resp_one(dev, qp, cq, wc, resp);
+ consume_cqe(cq);
+ *update |= 1;
+ }
+
+ return cnt;
+}
+
+static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_requester *req, int *update)
+{
+ if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ union rdma_cqe *cqe;
+ u32 old_cons, new_cons;
+ unsigned long flags;
+ int update = 0;
+ int done = 0;
+
+ if (cq->destroyed) {
+ DP_ERR(dev,
+ "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
+ cq, cq->icid);
+ return 0;
+ }
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return qedr_gsi_poll_cq(ibcq, num_entries, wc);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ cqe = cq->latest_cqe;
+ old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ while (num_entries && is_valid_cqe(cq, cqe)) {
+ struct qedr_qp *qp;
+ int cnt = 0;
+
+ /* prevent speculative reads of any field of CQE */
+ rmb();
+
+ qp = cqe_get_qp(cqe);
+ if (!qp) {
+ WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
+ break;
+ }
+
+ wc->qp = &qp->ibqp;
+
+ switch (cqe_get_type(cqe)) {
+ case RDMA_CQE_TYPE_REQUESTER:
+ cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
+ &cqe->req);
+ try_consume_req_cqe(cq, qp, &cqe->req, &update);
+ break;
+ case RDMA_CQE_TYPE_RESPONDER_RQ:
+ cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
+ &cqe->resp, &update);
+ break;
+ case RDMA_CQE_TYPE_RESPONDER_SRQ:
+ cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
+ wc, &cqe->resp);
+ update = 1;
+ break;
+ case RDMA_CQE_TYPE_INVALID:
+ default:
+ DP_ERR(dev, "Error: invalid CQE type = %d\n",
+ cqe_get_type(cqe));
+ }
+ num_entries -= cnt;
+ wc += cnt;
+ done += cnt;
+
+ cqe = get_cqe(cq);
+ }
+ new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ cq->cq_cons += new_cons - old_cons;
+
+ if (update)
+ /* doorbell notifies abount latest VALID entry,
+ * but chain already point to the next INVALID one
+ */
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return done;
+}
+
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ return IB_MAD_RESULT_SUCCESS;
+}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
new file mode 100644
index 000000000000..62420a15101b
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -0,0 +1,103 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_VERBS_H__
+#define __QEDR_VERBS_H__
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata);
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
+
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
+ int index, union ib_gid *gid);
+
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+
+int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
+
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma);
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
+int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
+int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
+int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *);
+int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+
+int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
+int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_recv_wr);
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
+
+int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *);
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
+ const struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_wr);
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in_mad,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+
+int qedr_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable);
+#endif
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
deleted file mode 100644
index 495be09781b1..000000000000
--- a/drivers/infiniband/hw/qib/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-config INFINIBAND_QIB
- tristate "Intel PCIe HCA support"
- depends on 64BIT
- ---help---
- This is a low-level driver for Intel PCIe QLE InfiniBand host
- channel adapters. This driver does not support the Intel
- HyperTransport card (model QHT7140).
-
-config INFINIBAND_QIB_DCA
- bool "QIB DCA support"
- depends on INFINIBAND_QIB && DCA && SMP && !(INFINIBAND_QIB=y && DCA=m)
- default y
- ---help---
- Setting this enables DCA support on some Intel chip sets
- with the iba7322 HCA.
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
deleted file mode 100644
index 57f8103e51f8..000000000000
--- a/drivers/infiniband/hw/qib/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
-
-ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
- qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
- qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
- qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
- qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
- qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
- qib_sd7220.o qib_iba7322.o qib_verbs.o
-
-# 6120 has no fallback if no MSI interrupts, others can do INTx
-ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
-
-ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
-ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
-ib_qib-$(CONFIG_DEBUG_FS) += qib_debugfs.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
deleted file mode 100644
index 7df16f74bb45..000000000000
--- a/drivers/infiniband/hw/qib/qib.h
+++ /dev/null
@@ -1,1543 +0,0 @@
-#ifndef _QIB_KERNEL_H
-#define _QIB_KERNEL_H
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This header file is the base header file for qlogic_ib kernel code
- * qib_user.h serves a similar purpose for user code.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/fs.h>
-#include <linux/completion.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-
-#include "qib_common.h"
-#include "qib_verbs.h"
-
-/* only s/w major version of QLogic_IB we can handle */
-#define QIB_CHIP_VERS_MAJ 2U
-
-/* don't care about this except printing */
-#define QIB_CHIP_VERS_MIN 0U
-
-/* The Organization Unique Identifier (Mfg code), and its position in GUID */
-#define QIB_OUI 0x001175
-#define QIB_OUI_LSB 40
-
-/*
- * per driver stats, either not device nor port-specific, or
- * summed over all of the devices and ports.
- * They are described by name via ipathfs filesystem, so layout
- * and number of elements can change without breaking compatibility.
- * If members are added or deleted qib_statnames[] in qib_fs.c must
- * change to match.
- */
-struct qlogic_ib_stats {
- __u64 sps_ints; /* number of interrupts handled */
- __u64 sps_errints; /* number of error interrupts */
- __u64 sps_txerrs; /* tx-related packet errors */
- __u64 sps_rcverrs; /* non-crc rcv packet errors */
- __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
- __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
- __u64 sps_ctxts; /* number of contexts currently open */
- __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
- __u64 sps_buffull;
- __u64 sps_hdrfull;
-};
-
-extern struct qlogic_ib_stats qib_stats;
-extern const struct pci_error_handlers qib_pci_err_handler;
-
-#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
-/*
- * First-cut critierion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
-
-/*
- * Struct used to indicate which errors are logged in each of the
- * error-counters that are logged to EEPROM. A counter is incremented
- * _once_ (saturating at 255) for each event with any bits set in
- * the error or hwerror register masks below.
- */
-#define QIB_EEP_LOG_CNT (4)
-struct qib_eep_log_mask {
- u64 errs_to_log;
- u64 hwerrs_to_log;
-};
-
-/*
- * Below contains all data related to a single context (formerly called port).
- */
-
-#ifdef CONFIG_DEBUG_FS
-struct qib_opcode_stats_perctx;
-#endif
-
-struct qib_ctxtdata {
- void **rcvegrbuf;
- dma_addr_t *rcvegrbuf_phys;
- /* rcvhdrq base, needs mmap before useful */
- void *rcvhdrq;
- /* kernel virtual address where hdrqtail is updated */
- void *rcvhdrtail_kvaddr;
- /*
- * temp buffer for expected send setup, allocated at open, instead
- * of each setup call
- */
- void *tid_pg_list;
- /*
- * Shared page for kernel to signal user processes that send buffers
- * need disarming. The process should call QIB_CMD_DISARM_BUFS
- * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
- */
- unsigned long *user_event_mask;
- /* when waiting for rcv or pioavail */
- wait_queue_head_t wait;
- /*
- * rcvegr bufs base, physical, must fit
- * in 44 bits so 32 bit programs mmap64 44 bit works)
- */
- dma_addr_t rcvegr_phys;
- /* mmap of hdrq, must fit in 44 bits */
- dma_addr_t rcvhdrq_phys;
- dma_addr_t rcvhdrqtailaddr_phys;
-
- /*
- * number of opens (including slave sub-contexts) on this instance
- * (ignoring forks, dup, etc. for now)
- */
- int cnt;
- /*
- * how much space to leave at start of eager TID entries for
- * protocol use, on each TID
- */
- /* instead of calculating it */
- unsigned ctxt;
- /* local node of context */
- int node_id;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_cnt;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_id;
- /* number of eager TID entries. */
- u16 rcvegrcnt;
- /* index of first eager TID entry. */
- u16 rcvegr_tid_base;
- /* number of pio bufs for this ctxt (all procs, if shared) */
- u32 piocnt;
- /* first pio buffer for this ctxt */
- u32 pio_base;
- /* chip offset of PIO buffers for this ctxt */
- u32 piobufs;
- /* how many alloc_pages() chunks in rcvegrbuf_pages */
- u32 rcvegrbuf_chunks;
- /* how many egrbufs per chunk */
- u16 rcvegrbufs_perchunk;
- /* ilog2 of above */
- u16 rcvegrbufs_perchunk_shift;
- /* order for rcvegrbuf_pages */
- size_t rcvegrbuf_size;
- /* rcvhdrq size (for freeing) */
- size_t rcvhdrq_size;
- /* per-context flags for fileops/intr communication */
- unsigned long flag;
- /* next expected TID to check when looking for free */
- u32 tidcursor;
- /* WAIT_RCV that timed out, no interrupt */
- u32 rcvwait_to;
- /* WAIT_PIO that timed out, no interrupt */
- u32 piowait_to;
- /* WAIT_RCV already happened, no wait */
- u32 rcvnowait;
- /* WAIT_PIO already happened, no wait */
- u32 pionowait;
- /* total number of polled urgent packets */
- u32 urgent;
- /* saved total number of polled urgent packets for poll edge trigger */
- u32 urgent_poll;
- /* pid of process using this ctxt */
- pid_t pid;
- pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
- /* same size as task_struct .comm[], command that opened context */
- char comm[16];
- /* pkeys set by this use of this ctxt */
- u16 pkeys[4];
- /* so file ops can get at unit */
- struct qib_devdata *dd;
- /* so funcs that need physical port can get it easily */
- struct qib_pportdata *ppd;
- /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
- void *subctxt_uregbase;
- /* An array of pages for the eager receive buffers * N */
- void *subctxt_rcvegrbuf;
- /* An array of pages for the eager header queue entries * N */
- void *subctxt_rcvhdr_base;
- /* The version of the library which opened this ctxt */
- u32 userversion;
- /* Bitmask of active slaves */
- u32 active_slaves;
- /* Type of packets or conditions we want to poll for */
- u16 poll_type;
- /* receive packet sequence counter */
- u8 seq_cnt;
- u8 redirect_seq_cnt;
- /* ctxt rcvhdrq head offset */
- u32 head;
- /* lookaside fields */
- struct qib_qp *lookaside_qp;
- u32 lookaside_qpn;
- /* QPs waiting for context processing */
- struct list_head qp_wait_list;
-#ifdef CONFIG_DEBUG_FS
- /* verbs stats per CTX */
- struct qib_opcode_stats_perctx *opstats;
-#endif
-};
-
-struct qib_sge_state;
-
-struct qib_sdma_txreq {
- int flags;
- int sg_count;
- dma_addr_t addr;
- void (*callback)(struct qib_sdma_txreq *, int);
- u16 start_idx; /* sdma private */
- u16 next_descq_idx; /* sdma private */
- struct list_head list; /* sdma private */
-};
-
-struct qib_sdma_desc {
- __le64 qw[2];
-};
-
-struct qib_verbs_txreq {
- struct qib_sdma_txreq txreq;
- struct qib_qp *qp;
- struct qib_swqe *wqe;
- u32 dwords;
- u16 hdr_dwords;
- u16 hdr_inx;
- struct qib_pio_header *align_buf;
- struct qib_mregion *mr;
- struct qib_sge_state *ss;
-};
-
-#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
-#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
-#define QIB_SDMA_TXREQ_F_INTREQ 0x4
-#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
-#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
-
-#define QIB_SDMA_TXREQ_S_OK 0
-#define QIB_SDMA_TXREQ_S_SENDERROR 1
-#define QIB_SDMA_TXREQ_S_ABORTED 2
-#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
-
-/*
- * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
- * Mostly for MADs that set or query link parameters, also ipath
- * config interfaces
- */
-#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
-#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
-#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
-#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
-#define QIB_IB_CFG_SPD 5 /* current Link spd */
-#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
-#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
-#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
-#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
-#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
-#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
-#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
-#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
-#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
-#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
-#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
-#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
-#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
-#define QIB_IB_CFG_VL_HIGH_LIMIT 19
-#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
-#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
-
-/*
- * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
- * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
- * QIB_IB_CFG_LINKDEFAULT cmd
- */
-#define IB_LINKCMD_DOWN (0 << 16)
-#define IB_LINKCMD_ARMED (1 << 16)
-#define IB_LINKCMD_ACTIVE (2 << 16)
-#define IB_LINKINITCMD_NOP 0
-#define IB_LINKINITCMD_POLL 1
-#define IB_LINKINITCMD_SLEEP 2
-#define IB_LINKINITCMD_DISABLE 3
-
-/*
- * valid states passed to qib_set_linkstate() user call
- */
-#define QIB_IB_LINKDOWN 0
-#define QIB_IB_LINKARM 1
-#define QIB_IB_LINKACTIVE 2
-#define QIB_IB_LINKDOWN_ONLY 3
-#define QIB_IB_LINKDOWN_SLEEP 4
-#define QIB_IB_LINKDOWN_DISABLE 5
-
-/*
- * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
- * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
- * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
- * are also the the possible values for qib_link_speed_enabled and active
- * The values were chosen to match values used within the IB spec.
- */
-#define QIB_IB_SDR 1
-#define QIB_IB_DDR 2
-#define QIB_IB_QDR 4
-
-#define QIB_DEFAULT_MTU 4096
-
-/* max number of IB ports supported per HCA */
-#define QIB_MAX_IB_PORTS 2
-
-/*
- * Possible IB config parameters for f_get/set_ib_table()
- */
-#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
-#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
-
-/*
- * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
- * these are bits so they can be combined, e.g.
- * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
- */
-#define QIB_RCVCTRL_TAILUPD_ENB 0x01
-#define QIB_RCVCTRL_TAILUPD_DIS 0x02
-#define QIB_RCVCTRL_CTXT_ENB 0x04
-#define QIB_RCVCTRL_CTXT_DIS 0x08
-#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
-#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
-#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
-#define QIB_RCVCTRL_PKEY_DIS 0x80
-#define QIB_RCVCTRL_BP_ENB 0x0100
-#define QIB_RCVCTRL_BP_DIS 0x0200
-#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
-#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
-
-/*
- * Possible "operations" for f_sendctrl(ppd, op, var)
- * these are bits so they can be combined, e.g.
- * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
- * Some operations (e.g. DISARM, ABORT) are known to
- * be "one-shot", so do not modify shadow.
- */
-#define QIB_SENDCTRL_DISARM (0x1000)
-#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
- /* available (0x2000) */
-#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
-#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
-#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
-#define QIB_SENDCTRL_SEND_DIS (0x20000)
-#define QIB_SENDCTRL_SEND_ENB (0x40000)
-#define QIB_SENDCTRL_FLUSH (0x80000)
-#define QIB_SENDCTRL_CLEAR (0x100000)
-#define QIB_SENDCTRL_DISARM_ALL (0x200000)
-
-/*
- * These are the generic indices for requesting per-port
- * counter values via the f_portcntr function. They
- * are always returned as 64 bit values, although most
- * are 32 bit counters.
- */
-/* send-related counters */
-#define QIBPORTCNTR_PKTSEND 0U
-#define QIBPORTCNTR_WORDSEND 1U
-#define QIBPORTCNTR_PSXMITDATA 2U
-#define QIBPORTCNTR_PSXMITPKTS 3U
-#define QIBPORTCNTR_PSXMITWAIT 4U
-#define QIBPORTCNTR_SENDSTALL 5U
-/* receive-related counters */
-#define QIBPORTCNTR_PKTRCV 6U
-#define QIBPORTCNTR_PSRCVDATA 7U
-#define QIBPORTCNTR_PSRCVPKTS 8U
-#define QIBPORTCNTR_RCVEBP 9U
-#define QIBPORTCNTR_RCVOVFL 10U
-#define QIBPORTCNTR_WORDRCV 11U
-/* IB link related error counters */
-#define QIBPORTCNTR_RXLOCALPHYERR 12U
-#define QIBPORTCNTR_RXVLERR 13U
-#define QIBPORTCNTR_ERRICRC 14U
-#define QIBPORTCNTR_ERRVCRC 15U
-#define QIBPORTCNTR_ERRLPCRC 16U
-#define QIBPORTCNTR_BADFORMAT 17U
-#define QIBPORTCNTR_ERR_RLEN 18U
-#define QIBPORTCNTR_IBSYMBOLERR 19U
-#define QIBPORTCNTR_INVALIDRLEN 20U
-#define QIBPORTCNTR_UNSUPVL 21U
-#define QIBPORTCNTR_EXCESSBUFOVFL 22U
-#define QIBPORTCNTR_ERRLINK 23U
-#define QIBPORTCNTR_IBLINKDOWN 24U
-#define QIBPORTCNTR_IBLINKERRRECOV 25U
-#define QIBPORTCNTR_LLI 26U
-/* other error counters */
-#define QIBPORTCNTR_RXDROPPKT 27U
-#define QIBPORTCNTR_VL15PKTDROP 28U
-#define QIBPORTCNTR_ERRPKEY 29U
-#define QIBPORTCNTR_KHDROVFL 30U
-/* sampling counters (these are actually control registers) */
-#define QIBPORTCNTR_PSINTERVAL 31U
-#define QIBPORTCNTR_PSSTART 32U
-#define QIBPORTCNTR_PSSTAT 33U
-
-/* how often we check for packet activity for "power on hours (in seconds) */
-#define ACTIVITY_TIMER 5
-
-#define MAX_NAME_SIZE 64
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-struct qib_irq_notify;
-#endif
-
-struct qib_msix_entry {
- struct msix_entry msix;
- void *arg;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int dca;
- int rcv;
- struct qib_irq_notify *notifier;
-#endif
- char name[MAX_NAME_SIZE];
- cpumask_var_t mask;
-};
-
-/* Below is an opaque struct. Each chip (device) can maintain
- * private data needed for its operation, but not germane to the
- * rest of the driver. For convenience, we define another that
- * is chip-specific, per-port
- */
-struct qib_chip_specific;
-struct qib_chipport_specific;
-
-enum qib_sdma_states {
- qib_sdma_state_s00_hw_down,
- qib_sdma_state_s10_hw_start_up_wait,
- qib_sdma_state_s20_idle,
- qib_sdma_state_s30_sw_clean_up_wait,
- qib_sdma_state_s40_hw_clean_up_wait,
- qib_sdma_state_s50_hw_halt_wait,
- qib_sdma_state_s99_running,
-};
-
-enum qib_sdma_events {
- qib_sdma_event_e00_go_hw_down,
- qib_sdma_event_e10_go_hw_start,
- qib_sdma_event_e20_hw_started,
- qib_sdma_event_e30_go_running,
- qib_sdma_event_e40_sw_cleaned,
- qib_sdma_event_e50_hw_cleaned,
- qib_sdma_event_e60_hw_halted,
- qib_sdma_event_e70_go_idle,
- qib_sdma_event_e7220_err_halted,
- qib_sdma_event_e7322_err_halted,
- qib_sdma_event_e90_timer_tick,
-};
-
-extern char *qib_sdma_state_names[];
-extern char *qib_sdma_event_names[];
-
-struct sdma_set_state_action {
- unsigned op_enable:1;
- unsigned op_intenable:1;
- unsigned op_halt:1;
- unsigned op_drain:1;
- unsigned go_s99_running_tofalse:1;
- unsigned go_s99_running_totrue:1;
-};
-
-struct qib_sdma_state {
- struct kref kref;
- struct completion comp;
- enum qib_sdma_states current_state;
- struct sdma_set_state_action *set_state_action;
- unsigned current_op;
- unsigned go_s99_running;
- unsigned first_sendbuf;
- unsigned last_sendbuf; /* really last +1 */
- /* debugging/devel */
- enum qib_sdma_states previous_state;
- unsigned previous_op;
- enum qib_sdma_events last_event;
-};
-
-struct xmit_wait {
- struct timer_list timer;
- u64 counter;
- u8 flags;
- struct cache {
- u64 psxmitdata;
- u64 psrcvdata;
- u64 psxmitpkts;
- u64 psrcvpkts;
- u64 psxmitwait;
- } counter_cache;
-};
-
-/*
- * The structure below encapsulates data relevant to a physical IB Port.
- * Current chips support only one such port, but the separation
- * clarifies things a bit. Note that to conform to IB conventions,
- * port-numbers are one-based. The first or only port is port1.
- */
-struct qib_pportdata {
- struct qib_ibport ibport_data;
-
- struct qib_devdata *dd;
- struct qib_chippport_specific *cpspec; /* chip-specific per-port */
- struct kobject pport_kobj;
- struct kobject pport_cc_kobj;
- struct kobject sl2vl_kobj;
- struct kobject diagc_kobj;
-
- /* GUID for this interface, in network order */
- __be64 guid;
-
- /* QIB_POLL, etc. link-state specific flags, per port */
- u32 lflags;
- /* qib_lflags driver is waiting for */
- u32 state_wanted;
- spinlock_t lflags_lock;
-
- /* ref count for each pkey */
- atomic_t pkeyrefs[4];
-
- /*
- * this address is mapped readonly into user processes so they can
- * get status cheaply, whenever they want. One qword of status per port
- */
- u64 *statusp;
-
- /* SendDMA related entries */
-
- /* read mostly */
- struct qib_sdma_desc *sdma_descq;
- struct workqueue_struct *qib_wq;
- struct qib_sdma_state sdma_state;
- dma_addr_t sdma_descq_phys;
- volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
- dma_addr_t sdma_head_phys;
- u16 sdma_descq_cnt;
-
- /* read/write using lock */
- spinlock_t sdma_lock ____cacheline_aligned_in_smp;
- struct list_head sdma_activelist;
- struct list_head sdma_userpending;
- u64 sdma_descq_added;
- u64 sdma_descq_removed;
- u16 sdma_descq_tail;
- u16 sdma_descq_head;
- u8 sdma_generation;
- u8 sdma_intrequest;
-
- struct tasklet_struct sdma_sw_clean_up_task
- ____cacheline_aligned_in_smp;
-
- wait_queue_head_t state_wait; /* for state_wanted */
-
- /* HoL blocking for SMP replies */
- unsigned hol_state;
- struct timer_list hol_timer;
-
- /*
- * Shadow copies of registers; size indicates read access size.
- * Most of them are readonly, but some are write-only register,
- * where we manipulate the bits in the shadow copy, and then write
- * the shadow copy to qlogic_ib.
- *
- * We deliberately make most of these 32 bits, since they have
- * restricted range. For any that we read, we won't to generate 32
- * bit accesses, since Opteron will generate 2 separate 32 bit HT
- * transactions for a 64 bit read, and we want to avoid unnecessary
- * bus transactions.
- */
-
- /* This is the 64 bit group */
- /* last ibcstatus. opaque outside chip-specific code */
- u64 lastibcstat;
-
- /* these are the "32 bit" regs */
-
- /*
- * the following two are 32-bit bitmasks, but {test,clear,set}_bit
- * all expect bit fields to be "unsigned long"
- */
- unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
- unsigned long p_sendctrl; /* shadow per-port sendctrl */
-
- u32 ibmtu; /* The MTU programmed for this unit */
- /*
- * Current max size IB packet (in bytes) including IB headers, that
- * we can send. Changes when ibmtu changes.
- */
- u32 ibmaxlen;
- /*
- * ibmaxlen at init time, limited by chip and by receive buffer
- * size. Not changed after init.
- */
- u32 init_ibmaxlen;
- /* LID programmed for this instance */
- u16 lid;
- /* list of pkeys programmed; 0 if not set */
- u16 pkeys[4];
- /* LID mask control */
- u8 lmc;
- u8 link_width_supported;
- u8 link_speed_supported;
- u8 link_width_enabled;
- u8 link_speed_enabled;
- u8 link_width_active;
- u8 link_speed_active;
- u8 vls_supported;
- u8 vls_operational;
- /* Rx Polarity inversion (compensate for ~tx on partner) */
- u8 rx_pol_inv;
-
- u8 hw_pidx; /* physical port index */
- u8 port; /* IB port number and index into dd->pports - 1 */
-
- u8 delay_mult;
-
- /* used to override LED behavior */
- u8 led_override; /* Substituted for normal value, if non-zero */
- u16 led_override_timeoff; /* delta to next timer event */
- u8 led_override_vals[2]; /* Alternates per blink-frame */
- u8 led_override_phase; /* Just counts, LSB picks from vals[] */
- atomic_t led_override_timer_active;
- /* Used to flash LEDs in override mode */
- struct timer_list led_override_timer;
- struct xmit_wait cong_stats;
- struct timer_list symerr_clear_timer;
-
- /* Synchronize access between driver writes and sysfs reads */
- spinlock_t cc_shadow_lock
- ____cacheline_aligned_in_smp;
-
- /* Shadow copy of the congestion control table */
- struct cc_table_shadow *ccti_entries_shadow;
-
- /* Shadow copy of the congestion control entries */
- struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
-
- /* List of congestion control table entries */
- struct ib_cc_table_entry_shadow *ccti_entries;
-
- /* 16 congestion entries with each entry corresponding to a SL */
- struct ib_cc_congestion_entry_shadow *congestion_entries;
-
- /* Maximum number of congestion control entries that the agent expects
- * the manager to send.
- */
- u16 cc_supported_table_entries;
-
- /* Total number of congestion control table entries */
- u16 total_cct_entry;
-
- /* Bit map identifying service level */
- u16 cc_sl_control_map;
-
- /* maximum congestion control table index */
- u16 ccti_limit;
-
- /* CA's max number of 64 entry units in the congestion control table */
- u8 cc_max_table_entries;
-};
-
-/* Observers. Not to be taken lightly, possibly not to ship. */
-/*
- * If a diag read or write is to (bottom <= offset <= top),
- * the "hoook" is called, allowing, e.g. shadows to be
- * updated in sync with the driver. struct diag_observer
- * is the "visible" part.
- */
-struct diag_observer;
-
-typedef int (*diag_hook) (struct qib_devdata *dd,
- const struct diag_observer *op,
- u32 offs, u64 *data, u64 mask, int only_32);
-
-struct diag_observer {
- diag_hook hook;
- u32 bottom;
- u32 top;
-};
-
-extern int qib_register_observer(struct qib_devdata *dd,
- const struct diag_observer *op);
-
-/* Only declared here, not defined. Private to diags */
-struct diag_observer_list_elt;
-
-/* device data struct now contains only "general per-device" info.
- * fields related to a physical IB port are in a qib_pportdata struct,
- * described above) while fields only used by a particular chip-type are in
- * a qib_chipdata struct, whose contents are opaque to this file.
- */
-struct qib_devdata {
- struct qib_ibdev verbs_dev; /* must be first */
- struct list_head list;
- /* pointers to related structs for this device */
- /* pci access data structure */
- struct pci_dev *pcidev;
- struct cdev *user_cdev;
- struct cdev *diag_cdev;
- struct device *user_device;
- struct device *diag_device;
-
- /* mem-mapped pointer to base of chip regs */
- u64 __iomem *kregbase;
- /* end of mem-mapped chip space excluding sendbuf and user regs */
- u64 __iomem *kregend;
- /* physical address of chip for io_remap, etc. */
- resource_size_t physaddr;
- /* qib_cfgctxts pointers */
- struct qib_ctxtdata **rcd; /* Receive Context Data */
-
- /* qib_pportdata, points to array of (physical) port-specific
- * data structs, indexed by pidx (0..n-1)
- */
- struct qib_pportdata *pport;
- struct qib_chip_specific *cspec; /* chip-specific */
-
- /* kvirt address of 1st 2k pio buffer */
- void __iomem *pio2kbase;
- /* kvirt address of 1st 4k pio buffer */
- void __iomem *pio4kbase;
- /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
- void __iomem *piobase;
- /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
- u64 __iomem *userbase;
- void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
- /*
- * points to area where PIOavail registers will be DMA'ed.
- * Has to be on a page of it's own, because the page will be
- * mapped into user program space. This copy is *ONLY* ever
- * written by DMA, not by the driver! Need a copy per device
- * when we get to multiple devices
- */
- volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
- /* physical address where updates occur */
- dma_addr_t pioavailregs_phys;
-
- /* device-specific implementations of functions needed by
- * common code. Contrary to previous consensus, we can't
- * really just point to a device-specific table, because we
- * may need to "bend", e.g. *_f_put_tid
- */
- /* fallback to alternate interrupt type if possible */
- int (*f_intr_fallback)(struct qib_devdata *);
- /* hard reset chip */
- int (*f_reset)(struct qib_devdata *);
- void (*f_quiet_serdes)(struct qib_pportdata *);
- int (*f_bringup_serdes)(struct qib_pportdata *);
- int (*f_early_init)(struct qib_devdata *);
- void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
- void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
- u32, unsigned long);
- void (*f_cleanup)(struct qib_devdata *);
- void (*f_setextled)(struct qib_pportdata *, u32);
- /* fill out chip-specific fields */
- int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
- /* free irq */
- void (*f_free_irq)(struct qib_devdata *);
- struct qib_message_header *(*f_get_msgheader)
- (struct qib_devdata *, __le32 *);
- void (*f_config_ctxts)(struct qib_devdata *);
- int (*f_get_ib_cfg)(struct qib_pportdata *, int);
- int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
- int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
- int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
- int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
- u32 (*f_iblink_state)(u64);
- u8 (*f_ibphys_portstate)(u64);
- void (*f_xgxs_reset)(struct qib_pportdata *);
- /* per chip actions needed for IB Link up/down changes */
- int (*f_ib_updown)(struct qib_pportdata *, int, u64);
- u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
- /* Read/modify/write of GPIO pins (potentially chip-specific */
- int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
- u32 mask);
- /* Enable writes to config EEPROM (if supported) */
- int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
- /*
- * modify rcvctrl shadow[s] and write to appropriate chip-regs.
- * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
- * (ctxt == -1) means "all contexts", only meaningful for
- * clearing. Could remove if chip_spec shutdown properly done.
- */
- void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
- int ctxt);
- /* Read/modify/write sendctrl appropriately for op and port. */
- void (*f_sendctrl)(struct qib_pportdata *, u32 op);
- void (*f_set_intr_state)(struct qib_devdata *, u32);
- void (*f_set_armlaunch)(struct qib_devdata *, u32);
- void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
- int (*f_late_initreg)(struct qib_devdata *);
- int (*f_init_sdma_regs)(struct qib_pportdata *);
- u16 (*f_sdma_gethead)(struct qib_pportdata *);
- int (*f_sdma_busy)(struct qib_pportdata *);
- void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
- void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
- void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
- void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
- void (*f_sdma_hw_start_up)(struct qib_pportdata *);
- void (*f_sdma_init_early)(struct qib_pportdata *);
- void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
- void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
- u32 (*f_hdrqempty)(struct qib_ctxtdata *);
- u64 (*f_portcntr)(struct qib_pportdata *, u32);
- u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
- u64 **);
- u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
- char **, u64 **);
- u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
- void (*f_initvl15_bufs)(struct qib_devdata *);
- void (*f_init_ctxt)(struct qib_ctxtdata *);
- void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
- struct qib_ctxtdata *);
- void (*f_writescratch)(struct qib_devdata *, u32);
- int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
-#endif
-
- char *boardname; /* human readable board info */
-
- /* template for writing TIDs */
- u64 tidtemplate;
- /* value to write to free TIDs */
- u64 tidinvalid;
-
- /* number of registers used for pioavail */
- u32 pioavregs;
- /* device (not port) flags, basically device capabilities */
- u32 flags;
- /* last buffer for user use */
- u32 lastctxt_piobuf;
-
- /* reset value */
- u64 z_int_counter;
- /* percpu intcounter */
- u64 __percpu *int_counter;
-
- /* pio bufs allocated per ctxt */
- u32 pbufsctxt;
- /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
- u32 ctxts_extrabuf;
- /*
- * number of ctxts configured as max; zero is set to number chip
- * supports, less gives more pio bufs/ctxt, etc.
- */
- u32 cfgctxts;
- /*
- * number of ctxts available for PSM open
- */
- u32 freectxts;
-
- /*
- * hint that we should update pioavailshadow before
- * looking for a PIO buffer
- */
- u32 upd_pio_shadow;
-
- /* internal debugging stats */
- u32 maxpkts_call;
- u32 avgpkts_call;
- u64 nopiobufs;
-
- /* PCI Vendor ID (here for NodeInfo) */
- u16 vendorid;
- /* PCI Device ID (here for NodeInfo) */
- u16 deviceid;
- /* for write combining settings */
- int wc_cookie;
- unsigned long wc_base;
- unsigned long wc_len;
-
- /* shadow copy of struct page *'s for exp tid pages */
- struct page **pageshadow;
- /* shadow copy of dma handles for exp tid pages */
- dma_addr_t *physshadow;
- u64 __iomem *egrtidbase;
- spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
- /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
- spinlock_t uctxt_lock; /* rcd and user context changes */
- /*
- * per unit status, see also portdata statusp
- * mapped readonly into user processes so they can get unit and
- * IB link status cheaply
- */
- u64 *devstatusp;
- char *freezemsg; /* freeze msg if hw error put chip in freeze */
- u32 freezelen; /* max length of freezemsg */
- /* timer used to prevent stats overflow, error throttling, etc. */
- struct timer_list stats_timer;
-
- /* timer to verify interrupts work, and fallback if possible */
- struct timer_list intrchk_timer;
- unsigned long ureg_align; /* user register alignment */
-
- /*
- * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
- * pio_writing.
- */
- spinlock_t pioavail_lock;
- /*
- * index of last buffer to optimize search for next
- */
- u32 last_pio;
- /*
- * min kernel pio buffer to optimize search
- */
- u32 min_kernel_pio;
- /*
- * Shadow copies of registers; size indicates read access size.
- * Most of them are readonly, but some are write-only register,
- * where we manipulate the bits in the shadow copy, and then write
- * the shadow copy to qlogic_ib.
- *
- * We deliberately make most of these 32 bits, since they have
- * restricted range. For any that we read, we won't to generate 32
- * bit accesses, since Opteron will generate 2 separate 32 bit HT
- * transactions for a 64 bit read, and we want to avoid unnecessary
- * bus transactions.
- */
-
- /* This is the 64 bit group */
-
- unsigned long pioavailshadow[6];
- /* bitmap of send buffers available for the kernel to use with PIO. */
- unsigned long pioavailkernel[6];
- /* bitmap of send buffers which need to be disarmed. */
- unsigned long pio_need_disarm[3];
- /* bitmap of send buffers which are being written to. */
- unsigned long pio_writing[3];
- /* kr_revision shadow */
- u64 revision;
- /* Base GUID for device (from eeprom, network order) */
- __be64 base_guid;
-
- /*
- * kr_sendpiobufbase value (chip offset of pio buffers), and the
- * base of the 2KB buffer s(user processes only use 2K)
- */
- u64 piobufbase;
- u32 pio2k_bufbase;
-
- /* these are the "32 bit" regs */
-
- /* number of GUIDs in the flash for this interface */
- u32 nguid;
- /*
- * the following two are 32-bit bitmasks, but {test,clear,set}_bit
- * all expect bit fields to be "unsigned long"
- */
- unsigned long rcvctrl; /* shadow per device rcvctrl */
- unsigned long sendctrl; /* shadow per device sendctrl */
-
- /* value we put in kr_rcvhdrcnt */
- u32 rcvhdrcnt;
- /* value we put in kr_rcvhdrsize */
- u32 rcvhdrsize;
- /* value we put in kr_rcvhdrentsize */
- u32 rcvhdrentsize;
- /* kr_ctxtcnt value */
- u32 ctxtcnt;
- /* kr_pagealign value */
- u32 palign;
- /* number of "2KB" PIO buffers */
- u32 piobcnt2k;
- /* size in bytes of "2KB" PIO buffers */
- u32 piosize2k;
- /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
- u32 piosize2kmax_dwords;
- /* number of "4KB" PIO buffers */
- u32 piobcnt4k;
- /* size in bytes of "4KB" PIO buffers */
- u32 piosize4k;
- /* kr_rcvegrbase value */
- u32 rcvegrbase;
- /* kr_rcvtidbase value */
- u32 rcvtidbase;
- /* kr_rcvtidcnt value */
- u32 rcvtidcnt;
- /* kr_userregbase */
- u32 uregbase;
- /* shadow the control register contents */
- u32 control;
-
- /* chip address space used by 4k pio buffers */
- u32 align4k;
- /* size of each rcvegrbuffer */
- u16 rcvegrbufsize;
- /* log2 of above */
- u16 rcvegrbufsize_shift;
- /* localbus width (1, 2,4,8,16,32) from config space */
- u32 lbus_width;
- /* localbus speed in MHz */
- u32 lbus_speed;
- int unit; /* unit # of this chip */
-
- /* start of CHIP_SPEC move to chipspec, but need code changes */
- /* low and high portions of MSI capability/vector */
- u32 msi_lo;
- /* saved after PCIe init for restore after reset */
- u32 msi_hi;
- /* MSI data (vector) saved for restore */
- u16 msi_data;
- /* so we can rewrite it after a chip reset */
- u32 pcibar0;
- /* so we can rewrite it after a chip reset */
- u32 pcibar1;
- u64 rhdrhead_intr_off;
-
- /*
- * ASCII serial number, from flash, large enough for original
- * all digit strings, and longer QLogic serial number format
- */
- u8 serial[16];
- /* human readable board version */
- u8 boardversion[96];
- u8 lbus_info[32]; /* human readable localbus info */
- /* chip major rev, from qib_revision */
- u8 majrev;
- /* chip minor rev, from qib_revision */
- u8 minrev;
-
- /* Misc small ints */
- /* Number of physical ports available */
- u8 num_pports;
- /* Lowest context number which can be used by user processes */
- u8 first_user_ctxt;
- u8 n_krcv_queues;
- u8 qpn_mask;
- u8 skip_kctxt_mask;
-
- u16 rhf_offset; /* offset of RHF within receive header entry */
-
- /*
- * GPIO pins for twsi-connected devices, and device code for eeprom
- */
- u8 gpio_sda_num;
- u8 gpio_scl_num;
- u8 twsi_eeprom_dev;
- u8 board_atten;
-
- /* Support (including locks) for EEPROM logging of errors and time */
- /* control access to actual counters, timer */
- spinlock_t eep_st_lock;
- /* control high-level access to EEPROM */
- struct mutex eep_lock;
- uint64_t traffic_wds;
- /*
- * masks for which bits of errs, hwerrs that cause
- * each of the counters to increment.
- */
- struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
- struct qib_diag_client *diag_client;
- spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
- struct diag_observer_list_elt *diag_observer_list;
-
- u8 psxmitwait_supported;
- /* cycle length of PS* counters in HW (in picoseconds) */
- u16 psxmitwait_check_rate;
- /* high volume overflow errors defered to tasklet */
- struct tasklet_struct error_tasklet;
- /* per device cq worker */
- struct kthread_worker *worker;
-
- int assigned_node_id; /* NUMA node closest to HCA */
-};
-
-/* hol_state values */
-#define QIB_HOL_UP 0
-#define QIB_HOL_INIT 1
-
-#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
-#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
-#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
-#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
-#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
-
-/* operation types for f_txchk_change() */
-#define TXCHK_CHG_TYPE_DIS1 3
-#define TXCHK_CHG_TYPE_ENAB1 2
-#define TXCHK_CHG_TYPE_KERN 1
-#define TXCHK_CHG_TYPE_USER 0
-
-#define QIB_CHASE_TIME msecs_to_jiffies(145)
-#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
-
-/* Private data for file operations */
-struct qib_filedata {
- struct qib_ctxtdata *rcd;
- unsigned subctxt;
- unsigned tidcursor;
- struct qib_user_sdma_queue *pq;
- int rec_cpu_num; /* for cpu affinity; -1 if none */
-};
-
-extern struct list_head qib_dev_list;
-extern spinlock_t qib_devs_lock;
-extern struct qib_devdata *qib_lookup(int unit);
-extern u32 qib_cpulist_count;
-extern unsigned long *qib_cpulist;
-
-extern unsigned qib_cc_table_size;
-int qib_init(struct qib_devdata *, int);
-int init_chip_wc_pat(struct qib_devdata *dd, u32);
-int qib_enable_wc(struct qib_devdata *dd);
-void qib_disable_wc(struct qib_devdata *dd);
-int qib_count_units(int *npresentp, int *nupp);
-int qib_count_active_units(void);
-
-int qib_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp);
-void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
-int qib_dev_init(void);
-void qib_dev_cleanup(void);
-
-int qib_diag_add(struct qib_devdata *);
-void qib_diag_remove(struct qib_devdata *);
-void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
-void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
-
-int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
-void qib_bad_intrstatus(struct qib_devdata *);
-void qib_handle_urcv(struct qib_devdata *, u64);
-
-/* clean up any per-chip chip-specific stuff */
-void qib_chip_cleanup(struct qib_devdata *);
-/* clean up any chip type-specific stuff */
-void qib_chip_done(void);
-
-/* check to see if we have to force ordering for write combining */
-int qib_unordered_wc(void);
-void qib_pio_copy(void __iomem *to, const void *from, size_t count);
-
-void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
-int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
-void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
-void qib_cancel_sends(struct qib_pportdata *);
-
-int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
-int qib_setup_eagerbufs(struct qib_ctxtdata *);
-void qib_set_ctxtcnt(struct qib_devdata *);
-int qib_create_ctxts(struct qib_devdata *dd);
-struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
-int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
-void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
-
-u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
-int qib_reset_device(int);
-int qib_wait_linkstate(struct qib_pportdata *, u32, int);
-int qib_set_linkstate(struct qib_pportdata *, u8);
-int qib_set_mtu(struct qib_pportdata *, u16);
-int qib_set_lid(struct qib_pportdata *, u32, u8);
-void qib_hol_down(struct qib_pportdata *);
-void qib_hol_init(struct qib_pportdata *);
-void qib_hol_up(struct qib_pportdata *);
-void qib_hol_event(unsigned long);
-void qib_disable_after_error(struct qib_devdata *);
-int qib_set_uevent_bits(struct qib_pportdata *, const int);
-
-/* for use in system calls, where we want to know device type, etc. */
-#define ctxt_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->rcd)
-#define subctxt_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->subctxt)
-#define tidcursor_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->tidcursor)
-#define user_sdma_queue_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->pq)
-
-static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
-{
- return ppd->dd;
-}
-
-static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
-{
- return container_of(dev, struct qib_devdata, verbs_dev);
-}
-
-static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
-{
- return dd_from_dev(to_idev(ibdev));
-}
-
-static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
-{
- return container_of(ibp, struct qib_pportdata, ibport_data);
-}
-
-static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- WARN_ON(pidx >= dd->num_pports);
- return &dd->pport[pidx].ibport_data;
-}
-
-/*
- * values for dd->flags (_device_ related flags) and
- */
-#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
-#define QIB_INITTED 0x2 /* chip and driver up and initted */
-#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
-#define QIB_PRESENT 0x8 /* chip accesses can be done */
-#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
-#define QIB_HAS_THRESH_UPDATE 0x40
-#define QIB_HAS_SDMA_TIMEOUT 0x80
-#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
-#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
-#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
-#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
-#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
-#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
-#define QIB_BADINTR 0x8000 /* severe interrupt problems */
-#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
-#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
-
-/*
- * values for ppd->lflags (_ib_port_ related flags)
- */
-#define QIBL_LINKV 0x1 /* IB link state valid */
-#define QIBL_LINKDOWN 0x8 /* IB link is down */
-#define QIBL_LINKINIT 0x10 /* IB link level is up */
-#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
-#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
-/* leave a gap for more IB-link state */
-#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
-#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
-#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
- * Do not try to bring up */
-#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
-
-/* IB dword length mask in PBC (lower 11 bits); same for all chips */
-#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
-
-
-/* ctxt_flag bit offsets */
- /* waiting for a packet to arrive */
-#define QIB_CTXT_WAITING_RCV 2
- /* master has not finished initializing */
-#define QIB_CTXT_MASTER_UNINIT 4
- /* waiting for an urgent packet to arrive */
-#define QIB_CTXT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-void qib_free_data(struct qib_ctxtdata *dd);
-void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
- u32, struct qib_ctxtdata *);
-struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
- const struct pci_device_id *);
-struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
- const struct pci_device_id *);
-struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
- const struct pci_device_id *);
-void qib_free_devdata(struct qib_devdata *);
-struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
-
-#define QIB_TWSI_NO_DEV 0xFF
-/* Below qib_twsi_ functions must be called with eep_lock held */
-int qib_twsi_reset(struct qib_devdata *dd);
-int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
- int len);
-int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
- const void *buffer, int len);
-void qib_get_eeprom_info(struct qib_devdata *);
-#define qib_inc_eeprom_err(dd, eidx, incr)
-void qib_dump_lookup_output_queue(struct qib_devdata *);
-void qib_force_pio_avail_update(struct qib_devdata *);
-void qib_clear_symerror_on_linkup(unsigned long opaque);
-
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
-void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
-
-/* send dma routines */
-int qib_setup_sdma(struct qib_pportdata *);
-void qib_teardown_sdma(struct qib_pportdata *);
-void __qib_sdma_intr(struct qib_pportdata *);
-void qib_sdma_intr(struct qib_pportdata *);
-void qib_user_sdma_send_desc(struct qib_pportdata *dd,
- struct list_head *pktlist);
-int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
- u32, struct qib_verbs_txreq *);
-/* ppd->sdma_lock should be locked before calling this. */
-int qib_sdma_make_progress(struct qib_pportdata *dd);
-
-static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
-{
- return ppd->sdma_descq_added == ppd->sdma_descq_removed;
-}
-
-/* must be called under qib_sdma_lock */
-static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
-{
- return ppd->sdma_descq_cnt -
- (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
-}
-
-static inline int __qib_sdma_running(struct qib_pportdata *ppd)
-{
- return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
-}
-int qib_sdma_running(struct qib_pportdata *);
-void dump_sdma_state(struct qib_pportdata *ppd);
-void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
-void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
-
-/*
- * number of words used for protocol header if not set by qib_userinit();
- */
-#define QIB_DFLT_RCVHDRSIZE 9
-
-/*
- * We need to be able to handle an IB header of at least 24 dwords.
- * We need the rcvhdrq large enough to handle largest IB header, but
- * still have room for a 2KB MTU standard IB packet.
- * Additionally, some processor/memory controller combinations
- * benefit quite strongly from having the DMA'ed data be cacheline
- * aligned and a cacheline multiple, so we set the size to 32 dwords
- * (2 64-byte primary cachelines for pretty much all processors of
- * interest). The alignment hurts nothing, other than using somewhat
- * more memory.
- */
-#define QIB_RCVHDR_ENTSIZE 32
-
-int qib_get_user_pages(unsigned long, size_t, struct page **);
-void qib_release_user_pages(struct page **, size_t);
-int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
-int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
-u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
-void qib_sendbuf_done(struct qib_devdata *, unsigned);
-
-static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
-{
- *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
-{
- /*
- * volatile because it's a DMA target from the chip, routine is
- * inlined, and don't want register caching or reordering.
- */
- return (u32) le64_to_cpu(
- *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
-}
-
-static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
-{
- const struct qib_devdata *dd = rcd->dd;
- u32 hdrqtail;
-
- if (dd->flags & QIB_NODMA_RTAIL) {
- __le32 *rhf_addr;
- u32 seq;
-
- rhf_addr = (__le32 *) rcd->rcvhdrq +
- rcd->head + dd->rhf_offset;
- seq = qib_hdrget_seq(rhf_addr);
- hdrqtail = rcd->head;
- if (seq == rcd->seq_cnt)
- hdrqtail++;
- } else
- hdrqtail = qib_get_rcvhdrtail(rcd);
-
- return hdrqtail;
-}
-
-/*
- * sysfs interface.
- */
-
-extern const char ib_qib_version[];
-
-int qib_device_create(struct qib_devdata *);
-void qib_device_remove(struct qib_devdata *);
-
-int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
- struct kobject *kobj);
-int qib_verbs_register_sysfs(struct qib_devdata *);
-void qib_verbs_unregister_sysfs(struct qib_devdata *);
-/* Hook for sysfs read of QSFP */
-extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
-
-int __init qib_init_qibfs(void);
-int __exit qib_exit_qibfs(void);
-
-int qibfs_add(struct qib_devdata *);
-int qibfs_remove(struct qib_devdata *);
-
-int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
-int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
- const struct pci_device_id *);
-void qib_pcie_ddcleanup(struct qib_devdata *);
-int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
-int qib_reinit_intr(struct qib_devdata *);
-void qib_enable_intx(struct pci_dev *);
-void qib_nomsi(struct qib_devdata *);
-void qib_nomsix(struct qib_devdata *);
-void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
-void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
-/* interrupts for device */
-u64 qib_int_counter(struct qib_devdata *);
-/* interrupt for all devices */
-u64 qib_sps_ints(void);
-
-/*
- * dma_addr wrappers - all 0's invalid for hw
- */
-dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
- size_t, int);
-const char *qib_get_unit_name(int unit);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-static inline void qib_flush_wc(void)
-{
-#if defined(CONFIG_X86_64)
- asm volatile("sfence" : : : "memory");
-#else
- wmb(); /* no reorder around wc flush */
-#endif
-}
-
-/* global module parameter variables */
-extern unsigned qib_ibmtu;
-extern ushort qib_cfgctxts;
-extern ushort qib_num_cfg_vls;
-extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
-extern unsigned qib_n_krcv_queues;
-extern unsigned qib_sdma_fetch_arb;
-extern unsigned qib_compat_ddr_negotiate;
-extern int qib_special_trigger;
-extern unsigned qib_numa_aware;
-
-extern struct mutex qib_mutex;
-
-/* Number of seconds before our card status check... */
-#define STATUS_TIMEOUT 60
-
-#define QIB_DRV_NAME "ib_qib"
-#define QIB_USER_MINOR_BASE 0
-#define QIB_TRACE_MINOR 127
-#define QIB_DIAGPKT_MINOR 128
-#define QIB_DIAG_MINOR_BASE 129
-#define QIB_NMINORS 255
-
-#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_VENDOR_ID_QLOGIC 0x1077
-#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
-#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
-#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
-
-/*
- * qib_early_err is used (only!) to print early errors before devdata is
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
- * cleanup when devdata may have been freed, etc. qib_dev_porterr is
- * the same as qib_dev_err, but is used when the message really needs
- * the IB port# to be definitive as to what's happening..
- * All of these go to the trace log, and the trace log entry is done
- * first to avoid possible serial port delays from printk.
- */
-#define qib_early_err(dev, fmt, ...) \
- dev_err(dev, fmt, ##__VA_ARGS__)
-
-#define qib_dev_err(dd, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
-
-#define qib_dev_warn(dd, fmt, ...) \
- dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
-
-#define qib_dev_porterr(dd, port, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
- ##__VA_ARGS__)
-
-#define qib_devinfo(pcidev, fmt, ...) \
- dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
-
-/*
- * this is used for formatting hw error messages...
- */
-struct qib_hwerror_msgs {
- u64 mask;
- const char *msg;
- size_t sz;
-};
-
-#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
-
-/* in qib_intr.c... */
-void qib_format_hwerrors(u64 hwerrs,
- const struct qib_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t lmsg);
-#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
deleted file mode 100644
index e16cb6f7de2c..000000000000
--- a/drivers/infiniband/hw/qib/qib_6120_regs.h
+++ /dev/null
@@ -1,977 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_6120_Revision_OFFS 0x0
-#define QIB_6120_Revision_R_Simulator_LSB 0x3F
-#define QIB_6120_Revision_R_Simulator_RMASK 0x1
-#define QIB_6120_Revision_Reserved_LSB 0x28
-#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
-#define QIB_6120_Revision_BoardID_LSB 0x20
-#define QIB_6120_Revision_BoardID_RMASK 0xFF
-#define QIB_6120_Revision_R_SW_LSB 0x18
-#define QIB_6120_Revision_R_SW_RMASK 0xFF
-#define QIB_6120_Revision_R_Arch_LSB 0x10
-#define QIB_6120_Revision_R_Arch_RMASK 0xFF
-#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_6120_Control_OFFS 0x8
-#define QIB_6120_Control_TxLatency_LSB 0x4
-#define QIB_6120_Control_TxLatency_RMASK 0x1
-#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_6120_Control_LinkEn_LSB 0x2
-#define QIB_6120_Control_LinkEn_RMASK 0x1
-#define QIB_6120_Control_FreezeMode_LSB 0x1
-#define QIB_6120_Control_FreezeMode_RMASK 0x1
-#define QIB_6120_Control_SyncReset_LSB 0x0
-#define QIB_6120_Control_SyncReset_RMASK 0x1
-
-#define QIB_6120_PageAlign_OFFS 0x10
-
-#define QIB_6120_PortCnt_OFFS 0x18
-
-#define QIB_6120_SendRegBase_OFFS 0x30
-
-#define QIB_6120_UserRegBase_OFFS 0x38
-
-#define QIB_6120_CntrRegBase_OFFS 0x40
-
-#define QIB_6120_Scratch_OFFS 0x48
-#define QIB_6120_Scratch_TopHalf_LSB 0x20
-#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
-#define QIB_6120_Scratch_BottomHalf_LSB 0x0
-#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
-
-#define QIB_6120_IntBlocked_OFFS 0x60
-#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
-#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
-#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
-#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
-#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_Reserved_LSB 0xF
-#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
-#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
-#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
-#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
-#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
-#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
-#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
-#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
-#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
-#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
-#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
-#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
-#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
-
-#define QIB_6120_IntMask_OFFS 0x68
-#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
-#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
-#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
-#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
-#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
-#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
-#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
-#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
-#define QIB_6120_IntMask_Reserved_LSB 0x11
-#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
-#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
-#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
-#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
-#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
-#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
-#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
-#define QIB_6120_IntMask_Reserved1_LSB 0x5
-#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
-#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
-#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
-#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
-#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
-#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
-#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
-
-#define QIB_6120_IntStatus_OFFS 0x70
-#define QIB_6120_IntStatus_Error_LSB 0x1F
-#define QIB_6120_IntStatus_Error_RMASK 0x1
-#define QIB_6120_IntStatus_PioSent_LSB 0x1E
-#define QIB_6120_IntStatus_PioSent_RMASK 0x1
-#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
-#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
-#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
-#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
-#define QIB_6120_IntStatus_Reserved_LSB 0xF
-#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
-#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
-#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
-#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
-#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
-#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
-#define QIB_6120_IntStatus_Reserved1_LSB 0x5
-#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
-#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
-#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
-#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
-#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
-#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
-#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
-
-#define QIB_6120_IntClear_OFFS 0x78
-#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
-#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
-#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
-#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
-#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
-#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
-#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
-#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
-#define QIB_6120_IntClear_Reserved_LSB 0xF
-#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
-#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
-#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
-#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
-#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
-#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
-#define QIB_6120_IntClear_Reserved1_LSB 0x5
-#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
-#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
-#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
-#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
-#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
-#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
-#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
-
-#define QIB_6120_ErrMask_OFFS 0x80
-#define QIB_6120_ErrMask_Reserved_LSB 0x34
-#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
-#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
-#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
-#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
-#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
-#define QIB_6120_ErrMask_Reserved1_LSB 0x26
-#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
-#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
-#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
-#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_Reserved2_LSB 0x12
-#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
-#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
-#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
-#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
-#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
-#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
-#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
-#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
-#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
-#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_6120_ErrStatus_OFFS 0x88
-#define QIB_6120_ErrStatus_Reserved_LSB 0x34
-#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
-#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
-#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
-#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
-#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
-#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
-#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
-#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
-#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
-#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
-#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
-#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
-#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
-#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
-#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
-#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
-#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
-#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
-#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
-#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
-#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
-#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
-#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
-#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
-#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
-#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
-#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
-#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
-#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
-
-#define QIB_6120_ErrClear_OFFS 0x90
-#define QIB_6120_ErrClear_Reserved_LSB 0x34
-#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
-#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
-#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
-#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
-#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
-#define QIB_6120_ErrClear_Reserved1_LSB 0x26
-#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
-#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
-#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
-#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_Reserved2_LSB 0x12
-#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
-#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
-#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
-#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
-#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
-#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
-#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
-#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
-#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
-#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_6120_HwErrMask_OFFS 0x98
-#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
-#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
-#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
-#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
-#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
-#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
-#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
-#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
-#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
-#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
-#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
-#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
-#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
-#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
-#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
-#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
-#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
-#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
-#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
-#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
-#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
-#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
-
-#define QIB_6120_HwErrStatus_OFFS 0xA0
-#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
-#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
-#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
-#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
-#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
-#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
-#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
-#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
-#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
-#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
-#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
-#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
-#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
-#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
-#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
-#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
-#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
-#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
-#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
-
-#define QIB_6120_HwErrClear_OFFS 0xA8
-#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
-#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
-#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
-#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
-#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
-#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
-#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
-#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
-#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
-#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
-#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
-#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
-#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
-#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
-#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
-#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
-#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
-#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
-#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
-#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
-#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
-#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
-#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
-#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
-
-#define QIB_6120_HwDiagCtrl_OFFS 0xB0
-#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
-#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
-#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
-#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
-#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
-#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
-#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
-#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
-#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
-#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
-#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
-#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
-#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
-
-#define QIB_6120_IBCStatus_OFFS 0xC0
-#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
-#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
-#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
-#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
-#define QIB_6120_IBCStatus_Reserved_LSB 0x7
-#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
-#define QIB_6120_IBCStatus_LinkState_LSB 0x4
-#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
-#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
-#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
-
-#define QIB_6120_IBCCtrl_OFFS 0xC8
-#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
-#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
-#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
-#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
-#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
-#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
-#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
-#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
-#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
-#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
-#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
-#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
-#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
-#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
-#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
-#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
-#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
-#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
-#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
-#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
-#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
-#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_6120_EXTStatus_OFFS 0xD0
-#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_Reserved_LSB 0x20
-#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
-#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
-#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
-#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
-#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
-#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
-
-#define QIB_6120_EXTCtrl_OFFS 0xD8
-#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
-#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
-#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
-#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
-#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
-#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
-#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
-
-#define QIB_6120_GPIOOut_OFFS 0xE0
-
-#define QIB_6120_GPIOMask_OFFS 0xE8
-
-#define QIB_6120_GPIOStatus_OFFS 0xF0
-
-#define QIB_6120_GPIOClear_OFFS 0xF8
-
-#define QIB_6120_RcvCtrl_OFFS 0x100
-#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
-#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
-#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
-#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
-#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
-#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
-#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
-#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
-#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
-#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
-#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
-#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
-
-#define QIB_6120_RcvBTHQP_OFFS 0x108
-#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
-#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
-#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
-#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
-#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
-#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_6120_RcvHdrSize_OFFS 0x110
-
-#define QIB_6120_RcvHdrCnt_OFFS 0x118
-
-#define QIB_6120_RcvHdrEntSize_OFFS 0x120
-
-#define QIB_6120_RcvTIDBase_OFFS 0x128
-
-#define QIB_6120_RcvTIDCnt_OFFS 0x130
-
-#define QIB_6120_RcvEgrBase_OFFS 0x138
-
-#define QIB_6120_RcvEgrCnt_OFFS 0x140
-
-#define QIB_6120_RcvBufBase_OFFS 0x148
-
-#define QIB_6120_RcvBufSize_OFFS 0x150
-
-#define QIB_6120_RxIntMemBase_OFFS 0x158
-
-#define QIB_6120_RxIntMemSize_OFFS 0x160
-
-#define QIB_6120_RcvPartitionKey_OFFS 0x168
-
-#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
-#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
-#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
-#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_6120_SendCtrl_OFFS 0x1C0
-#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
-#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
-#define QIB_6120_SendCtrl_Reserved_LSB 0x17
-#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
-#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
-#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
-#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
-#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
-#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
-#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
-#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
-#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
-#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
-#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
-#define QIB_6120_SendCtrl_Abort_LSB 0x0
-#define QIB_6120_SendCtrl_Abort_RMASK 0x1
-
-#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
-#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
-#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
-#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
-#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
-#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_6120_SendPIOSize_OFFS 0x1D0
-#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
-#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
-#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
-#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
-#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
-#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
-#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
-#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
-#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
-#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
-#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
-
-#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
-#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
-#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
-#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
-#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
-
-#define QIB_6120_SendBufErr0_OFFS 0x240
-#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
-#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
-
-#define QIB_6120_RcvHdrAddr0_OFFS 0x280
-#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
-#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
-#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
-
-#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
-#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
-#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
-#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
-
-#define QIB_6120_SerdesCfg0_OFFS 0x3C0
-#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
-#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
-#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
-#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
-#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
-#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
-#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
-#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
-#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
-#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
-#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
-#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
-#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
-#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
-#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
-#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
-#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
-#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
-#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
-#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
-#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
-#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
-#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
-#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
-#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
-#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
-#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
-#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
-#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
-#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
-#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
-#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
-#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
-#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
-#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
-#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
-#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
-#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
-#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
-#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
-#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
-#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
-#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
-#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
-#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
-#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
-
-#define QIB_6120_SerdesStat_OFFS 0x3D0
-#define QIB_6120_SerdesStat_Reserved_LSB 0xC
-#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
-#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
-#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
-#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
-#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
-#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
-#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
-#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
-#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
-#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
-#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
-#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
-#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
-#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
-
-#define QIB_6120_XGXSCfg_OFFS 0x3D8
-#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
-#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
-#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
-#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
-#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
-#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
-#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
-#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
-#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
-#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
-#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
-#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
-#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
-#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
-#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
-#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
-#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
-#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
-
-#define QIB_6120_LBIntCnt_OFFS 0x12000
-
-#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
-
-#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
-
-#define QIB_6120_TxDataPktCnt_OFFS 0x12020
-
-#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
-
-#define QIB_6120_TxDwordCnt_OFFS 0x12030
-
-#define QIB_6120_TxLenErrCnt_OFFS 0x12038
-
-#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
-
-#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
-
-#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
-
-#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
-
-#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
-
-#define QIB_6120_RxDataPktCnt_OFFS 0x12068
-
-#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
-
-#define QIB_6120_RxDwordCnt_OFFS 0x12078
-
-#define QIB_6120_RxLenErrCnt_OFFS 0x12080
-
-#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
-
-#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
-
-#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
-
-#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
-
-#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
-
-#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
-
-#define QIB_6120_RxEBPCnt_OFFS 0x120B8
-
-#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
-
-#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
-
-#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
-
-#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
-
-#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
-
-#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
-
-#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
-
-#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
-
-#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
-
-#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
-
-#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
-
-#define QIB_6120_RcvEgrArray0_OFFS 0x14000
-
-#define QIB_6120_RcvTIDArray0_OFFS 0x54000
-
-#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
-
-#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
-
-#define QIB_6120_RcvBuf1_OFFS 0x72000
-
-#define QIB_6120_RcvBuf2_OFFS 0x75000
-
-#define QIB_6120_RcvFlags_OFFS 0x77000
-
-#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
-
-#define QIB_6120_RcvDMABuf_OFFS 0x7B000
-
-#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
-
-#define QIB_6120_PCIERcvBuf_OFFS 0x80000
-
-#define QIB_6120_PCIERetryBuf_OFFS 0x82000
-
-#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
-
-#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
deleted file mode 100644
index a5356cb4252e..000000000000
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ /dev/null
@@ -1,149 +0,0 @@
-#ifndef _QIB_7220_H
-#define _QIB_7220_H
-/*
- * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* grab register-defs auto-generated by HW */
-#include "qib_7220_regs.h"
-
-/* The number of eager receive TIDs for context zero. */
-#define IBA7220_KRCVEGRCNT 2048U
-
-#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7220_LT_STATE_TXREVLANES 0x0d
-#define IB_7220_LT_STATE_CFGENH 0x10
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- u64 *portcntrs;
- spinlock_t sdepb_lock; /* serdes EPB bus */
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 hwerrmask;
- u64 errormask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- u32 ncntrs;
- u32 nportcntrs;
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 numctxts;
- u32 rcvegrcnt;
- u32 autoneg_tries;
- u32 serdes_first_init_done;
- u32 sdmabufcnt;
- u32 lastbuf_for_pio;
- u32 updthresh; /* current AvailUpdThld */
- u32 updthresh_dflt; /* default AvailUpdThld */
- int irq;
- u8 presets_needed;
- u8 relock_timer_active;
- char emsgbuf[128];
- char sdmamsgbuf[192];
- char bitsmsgbuf[64];
- struct timer_list relock_timer;
- unsigned int relock_interval; /* in jiffies */
-};
-
-struct qib_chippport_specific {
- struct qib_pportdata pportdata;
- wait_queue_head_t autoneg_wait;
- struct delayed_work autoneg_work;
- struct timer_list chase_timer;
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 ibcctrl; /* kr_ibcctrl shadow */
- u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
- unsigned long chase_end;
- u32 last_delay_mult;
-};
-
-/*
- * This header file provides the declarations and common definitions
- * for (mostly) manipulation of the SerDes blocks within the IBA7220.
- * the functions declared should only be called from within other
- * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
- */
-int qib_sd7220_presets(struct qib_devdata *dd);
-int qib_sd7220_init(struct qib_devdata *dd);
-void qib_sd7220_clr_ibpar(struct qib_devdata *);
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB, which is the
- * only one currently used
- */
-#define IB_7220_SERDES 2
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *)&dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
-
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u16 regno, u64 value)
-{
- if (dd->kregbase)
- writeq(value, &dd->kregbase[regno]);
-}
-
-void set_7220_relock_poll(struct qib_devdata *, int);
-void shutdown_7220_relock_poll(struct qib_devdata *);
-void toggle_7220_rclkrls(struct qib_devdata *);
-
-
-#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
deleted file mode 100644
index 0da5bb750e52..000000000000
--- a/drivers/infiniband/hw/qib/qib_7220_regs.h
+++ /dev/null
@@ -1,1496 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_7220_Revision_OFFS 0x0
-#define QIB_7220_Revision_R_Simulator_LSB 0x3F
-#define QIB_7220_Revision_R_Simulator_RMASK 0x1
-#define QIB_7220_Revision_R_Emulation_LSB 0x3E
-#define QIB_7220_Revision_R_Emulation_RMASK 0x1
-#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
-#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
-#define QIB_7220_Revision_BoardID_LSB 0x20
-#define QIB_7220_Revision_BoardID_RMASK 0xFF
-#define QIB_7220_Revision_R_SW_LSB 0x18
-#define QIB_7220_Revision_R_SW_RMASK 0xFF
-#define QIB_7220_Revision_R_Arch_LSB 0x10
-#define QIB_7220_Revision_R_Arch_RMASK 0xFF
-#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_7220_Control_OFFS 0x8
-#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
-#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
-#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
-#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
-#define QIB_7220_Control_Reserved_LSB 0x5
-#define QIB_7220_Control_Reserved_RMASK 0x1
-#define QIB_7220_Control_TxLatency_LSB 0x4
-#define QIB_7220_Control_TxLatency_RMASK 0x1
-#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_7220_Control_LinkEn_LSB 0x2
-#define QIB_7220_Control_LinkEn_RMASK 0x1
-#define QIB_7220_Control_FreezeMode_LSB 0x1
-#define QIB_7220_Control_FreezeMode_RMASK 0x1
-#define QIB_7220_Control_SyncReset_LSB 0x0
-#define QIB_7220_Control_SyncReset_RMASK 0x1
-
-#define QIB_7220_PageAlign_OFFS 0x10
-
-#define QIB_7220_PortCnt_OFFS 0x18
-
-#define QIB_7220_SendRegBase_OFFS 0x30
-
-#define QIB_7220_UserRegBase_OFFS 0x38
-
-#define QIB_7220_CntrRegBase_OFFS 0x40
-
-#define QIB_7220_Scratch_OFFS 0x48
-
-#define QIB_7220_IntMask_OFFS 0x68
-#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
-#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
-#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
-#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
-#define QIB_7220_IntMask_Reserved_LSB 0x31
-#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
-#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
-#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
-#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
-#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
-#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
-#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
-#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
-#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
-#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
-#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
-#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
-#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
-#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
-#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
-#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
-#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
-#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
-#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
-#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
-#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
-#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
-#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
-#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
-#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
-#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
-#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
-#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
-#define QIB_7220_IntMask_JIntMask_LSB 0x1A
-#define QIB_7220_IntMask_JIntMask_RMASK 0x1
-#define QIB_7220_IntMask_Reserved1_LSB 0x11
-#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
-#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
-#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
-#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
-#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
-#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
-#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
-#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
-#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
-#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
-#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
-#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
-#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
-#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
-#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
-#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
-#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
-#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
-
-#define QIB_7220_IntStatus_OFFS 0x70
-#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
-#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
-#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
-#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
-#define QIB_7220_IntStatus_Reserved_LSB 0x31
-#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
-#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
-#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
-#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
-#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
-#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
-#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
-#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
-#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
-#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
-#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
-#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
-#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
-#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
-#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
-#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
-#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
-#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
-#define QIB_7220_IntStatus_Error_LSB 0x1F
-#define QIB_7220_IntStatus_Error_RMASK 0x1
-#define QIB_7220_IntStatus_PioSent_LSB 0x1E
-#define QIB_7220_IntStatus_PioSent_RMASK 0x1
-#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
-#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
-#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
-#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
-#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
-#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
-#define QIB_7220_IntStatus_JInt_LSB 0x1A
-#define QIB_7220_IntStatus_JInt_RMASK 0x1
-#define QIB_7220_IntStatus_Reserved1_LSB 0x11
-#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
-#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
-#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
-#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
-#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
-#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
-#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
-#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
-#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
-#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
-#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
-#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
-#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
-#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
-#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
-#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
-#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
-#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
-
-#define QIB_7220_IntClear_OFFS 0x78
-#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
-#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
-#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
-#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
-#define QIB_7220_IntClear_Reserved_LSB 0x31
-#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
-#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
-#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
-#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
-#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
-#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
-#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
-#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
-#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
-#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
-#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
-#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
-#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
-#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
-#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
-#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
-#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
-#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
-#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
-#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
-#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
-#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
-#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
-#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
-#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
-#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
-#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
-#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
-#define QIB_7220_IntClear_JIntClear_LSB 0x1A
-#define QIB_7220_IntClear_JIntClear_RMASK 0x1
-#define QIB_7220_IntClear_Reserved1_LSB 0x11
-#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
-#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
-#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
-#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
-#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
-#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
-#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
-#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
-#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
-#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
-#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
-#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
-#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
-#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
-#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
-#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
-#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
-#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
-
-#define QIB_7220_ErrMask_OFFS 0x80
-#define QIB_7220_ErrMask_Reserved_LSB 0x36
-#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
-#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
-#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
-#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
-#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
-#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
-#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
-#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
-#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
-#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
-#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
-#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
-#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
-#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
-#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
-#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
-#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
-#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
-#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
-#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
-#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
-#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_Reserved1_LSB 0x12
-#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
-#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
-#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
-#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
-#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
-#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
-#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
-#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
-#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
-#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_7220_ErrStatus_OFFS 0x88
-#define QIB_7220_ErrStatus_Reserved_LSB 0x36
-#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
-#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
-#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
-#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
-#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
-#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
-#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
-#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
-#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
-#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
-#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
-#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
-#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
-#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
-#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
-#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
-#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
-#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
-#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
-#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
-#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
-#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
-#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
-#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
-#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
-#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
-#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
-#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
-#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
-#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
-#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
-#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
-#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
-#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
-#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
-#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
-#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
-#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
-#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
-#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
-#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
-#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
-#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
-
-#define QIB_7220_ErrClear_OFFS 0x90
-#define QIB_7220_ErrClear_Reserved_LSB 0x36
-#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
-#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
-#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
-#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
-#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
-#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
-#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
-#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
-#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
-#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
-#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
-#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
-#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
-#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
-#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
-#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
-#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
-#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
-#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
-#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
-#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
-#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_Reserved1_LSB 0x12
-#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
-#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
-#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
-#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
-#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
-#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
-#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
-#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
-#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
-#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_7220_HwErrMask_OFFS 0x98
-#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
-#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
-#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
-#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
-#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
-#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
-#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
-#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
-#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved_LSB 0x37
-#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
-#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
-#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
-#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
-#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
-#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
-#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
-#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
-#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
-#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
-#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
-#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
-#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
-#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
-#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
-#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
-#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
-
-#define QIB_7220_HwErrStatus_OFFS 0xA0
-#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
-#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
-#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
-#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
-#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
-#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
-#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
-#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
-#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
-#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
-#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
-#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
-#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
-#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
-#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
-#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
-#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
-#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
-#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
-#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
-#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
-#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
-#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
-#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
-
-#define QIB_7220_HwErrClear_OFFS 0xA8
-#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
-#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
-#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
-#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
-#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
-#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
-#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
-#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
-#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved_LSB 0x37
-#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
-#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
-#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
-#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
-#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
-#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
-#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
-#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
-#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
-#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
-#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
-#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
-#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
-#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
-#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
-#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
-#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
-#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
-#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
-
-#define QIB_7220_HwDiagCtrl_OFFS 0xB0
-#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
-#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
-#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
-#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
-#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
-#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
-#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
-#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
-#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
-#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
-#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
-#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
-#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
-#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
-
-#define QIB_7220_REG_0000B8_OFFS 0xB8
-
-#define QIB_7220_IBCStatus_OFFS 0xC0
-#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
-#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
-#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
-#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
-#define QIB_7220_IBCStatus_Reserved_LSB 0xE
-#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
-#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
-#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
-#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
-#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
-#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
-#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
-#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
-#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
-#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
-#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkState_LSB 0x5
-#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
-#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
-#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
-
-#define QIB_7220_IBCCtrl_OFFS 0xC8
-#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
-#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
-#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
-#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
-#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
-#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
-#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
-#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
-#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
-#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
-#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
-#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
-#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
-#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
-#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
-#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
-#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
-#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
-#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
-#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_7220_EXTStatus_OFFS 0xD0
-#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_Reserved_LSB 0x20
-#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
-#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
-#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
-#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
-#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
-#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
-
-#define QIB_7220_EXTCtrl_OFFS 0xD8
-#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
-#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
-#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
-#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
-#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
-#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
-#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
-
-#define QIB_7220_GPIOOut_OFFS 0xE0
-
-#define QIB_7220_GPIOMask_OFFS 0xE8
-
-#define QIB_7220_GPIOStatus_OFFS 0xF0
-
-#define QIB_7220_GPIOClear_OFFS 0xF8
-
-#define QIB_7220_RcvCtrl_OFFS 0x100
-#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
-#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
-#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
-#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
-#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
-#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
-#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
-#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
-#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
-#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
-#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
-#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
-
-#define QIB_7220_RcvBTHQP_OFFS 0x108
-#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
-#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
-#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
-#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_7220_RcvHdrSize_OFFS 0x110
-
-#define QIB_7220_RcvHdrCnt_OFFS 0x118
-
-#define QIB_7220_RcvHdrEntSize_OFFS 0x120
-
-#define QIB_7220_RcvTIDBase_OFFS 0x128
-
-#define QIB_7220_RcvTIDCnt_OFFS 0x130
-
-#define QIB_7220_RcvEgrBase_OFFS 0x138
-
-#define QIB_7220_RcvEgrCnt_OFFS 0x140
-
-#define QIB_7220_RcvBufBase_OFFS 0x148
-
-#define QIB_7220_RcvBufSize_OFFS 0x150
-
-#define QIB_7220_RxIntMemBase_OFFS 0x158
-
-#define QIB_7220_RxIntMemSize_OFFS 0x160
-
-#define QIB_7220_RcvPartitionKey_OFFS 0x168
-
-#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
-#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
-#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
-#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
-#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
-
-#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
-#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
-#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
-#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_7220_IBCDDRCtrl_OFFS 0x180
-#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
-#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
-#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
-#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
-#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
-#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
-#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
-#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
-#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
-#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
-#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
-#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
-#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
-#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
-#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
-#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
-#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
-#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
-#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
-#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
-#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
-
-#define QIB_7220_HRTBT_GUID_OFFS 0x188
-
-#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
-#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
-#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
-#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
-#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
-
-#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
-#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
-#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
-#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
-#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
-#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
-#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
-#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
-#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
-#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
-#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
-
-#define QIB_7220_JIntReload_OFFS 0x1B0
-#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
-#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
-#define QIB_7220_JIntReload_J_reload_LSB 0x0
-#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
-
-#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
-#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
-#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
-#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
-#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
-#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
-#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
-
-#define QIB_7220_SendCtrl_OFFS 0x1C0
-#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
-#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
-#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
-#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
-#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
-#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
-#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
-#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
-#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
-#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
-#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
-#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
-#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
-#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
-#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
-#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
-#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
-#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
-#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
-#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
-#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
-#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
-#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
-#define QIB_7220_SendCtrl_Abort_LSB 0x0
-#define QIB_7220_SendCtrl_Abort_RMASK 0x1
-
-#define QIB_7220_SendBufBase_OFFS 0x1C8
-#define QIB_7220_SendBufBase_Reserved_LSB 0x35
-#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
-#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
-#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
-#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_7220_SendBufSize_OFFS 0x1D0
-#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
-#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
-#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
-#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
-#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
-#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
-#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_7220_SendBufCnt_OFFS 0x1D8
-#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
-#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
-#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
-#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
-#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
-#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
-#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
-#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
-
-#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
-#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
-#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
-#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
-#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
-
-#define QIB_7220_TxIntMemBase_OFFS 0x1E8
-
-#define QIB_7220_TxIntMemSize_OFFS 0x1F0
-
-#define QIB_7220_SendDmaBase_OFFS 0x1F8
-#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
-#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
-#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7220_SendDmaLenGen_OFFS 0x200
-#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
-#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
-#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
-#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
-#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
-#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
-#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaTail_OFFS 0x208
-#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
-#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
-#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
-#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaHead_OFFS 0x210
-#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
-#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
-#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
-#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
-#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
-#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
-#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
-#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7220_SendDmaBufMask0_OFFS 0x220
-#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
-#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
-
-#define QIB_7220_SendDmaStatus_OFFS 0x238
-#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
-#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
-#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
-#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
-#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
-#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
-#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
-#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
-#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
-#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
-#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
-#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
-#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
-#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
-#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
-#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
-#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
-#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
-#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
-#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
-#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
-#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
-#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
-
-#define QIB_7220_SendBufErr0_OFFS 0x240
-#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
-#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
-
-#define QIB_7220_RcvHdrAddr0_OFFS 0x270
-#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
-#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
-#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
-
-#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
-#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
-#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
-#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
-
-#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
-#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
-#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
-
-#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
-#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
-
-#define QIB_7220_XGXSCfg_OFFS 0x3D8
-#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
-#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
-#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
-#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
-#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
-#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
-#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
-#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
-#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
-#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
-#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
-#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
-#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
-#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
-
-#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
-#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
-#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
-#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
-#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
-#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
-#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
-#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
-#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
-#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
-#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
-#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
-#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
-#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
-#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
-#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
-#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
-#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
-#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
-#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
-
-#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
-#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
-#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
-
-#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
-#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
-
-#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
-#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
-#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
-#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
-#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
-
-#define QIB_7220_LBIntCnt_OFFS 0x13000
-
-#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
-
-#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
-
-#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
-
-#define QIB_7220_TxDataPktCnt_OFFS 0x13020
-
-#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
-
-#define QIB_7220_TxDwordCnt_OFFS 0x13030
-
-#define QIB_7220_TxLenErrCnt_OFFS 0x13038
-
-#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
-
-#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
-
-#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
-
-#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
-
-#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
-
-#define QIB_7220_RxDataPktCnt_OFFS 0x13068
-
-#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
-
-#define QIB_7220_RxDwordCnt_OFFS 0x13078
-
-#define QIB_7220_RxLenErrCnt_OFFS 0x13080
-
-#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
-
-#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
-
-#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
-
-#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
-
-#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
-
-#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
-
-#define QIB_7220_RxEBPCnt_OFFS 0x130B8
-
-#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
-
-#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
-
-#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
-
-#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
-
-#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
-
-#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
-
-#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
-
-#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
-
-#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
-
-#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
-
-#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
-
-#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
-
-#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
-
-#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
-
-#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
-
-#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
-
-#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
-
-#define QIB_7220_CNT_0131C8_OFFS 0x131C8
-
-#define QIB_7220_PSStat_OFFS 0x13200
-
-#define QIB_7220_PSStart_OFFS 0x13208
-
-#define QIB_7220_PSInterval_OFFS 0x13210
-
-#define QIB_7220_PSRcvDataCount_OFFS 0x13218
-
-#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
-
-#define QIB_7220_PSXmitDataCount_OFFS 0x13228
-
-#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
-
-#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
-
-#define QIB_7220_CNT_013240_OFFS 0x13240
-
-#define QIB_7220_RcvEgrArray_OFFS 0x14000
-
-#define QIB_7220_MEM_038000_OFFS 0x38000
-
-#define QIB_7220_RcvTIDArray0_OFFS 0x53000
-
-#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
-
-#define QIB_7220_MEM_064480_OFFS 0x64480
-
-#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
-
-#define QIB_7220_MEM_064C80_OFFS 0x64C80
-
-#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
-
-#define QIB_7220_MEM_065080_OFFS 0x65080
-
-#define QIB_7220_ScoreBoard_OFFS 0x65400
-
-#define QIB_7220_MEM_065440_OFFS 0x65440
-
-#define QIB_7220_DescriptorFIFO_OFFS 0x65800
-
-#define QIB_7220_MEM_065880_OFFS 0x65880
-
-#define QIB_7220_RcvBuf1_OFFS 0x72000
-
-#define QIB_7220_MEM_074800_OFFS 0x74800
-
-#define QIB_7220_RcvBuf2_OFFS 0x75000
-
-#define QIB_7220_MEM_076400_OFFS 0x76400
-
-#define QIB_7220_RcvFlags_OFFS 0x77000
-
-#define QIB_7220_MEM_078400_OFFS 0x78400
-
-#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
-
-#define QIB_7220_MEM_07A400_OFFS 0x7A400
-
-#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
-
-#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
-
-#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
-
-#define QIB_7220_MEM_07D400_OFFS 0x7D400
-
-#define QIB_7220_PCIERcvBuf_OFFS 0x80000
-
-#define QIB_7220_PCIERetryBuf_OFFS 0x84000
-
-#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
-
-#define QIB_7220_PCIECplBuf_OFFS 0x90000
-
-#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
-
-#define QIB_7220_MEM_095000_OFFS 0x95000
-
-#define QIB_7220_SendBuf0_MA_OFFS 0x100000
-
-#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
deleted file mode 100644
index 32dc81ff8d4a..000000000000
--- a/drivers/infiniband/hw/qib/qib_7322_regs.h
+++ /dev/null
@@ -1,3163 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_7322_Revision_OFFS 0x0
-#define QIB_7322_Revision_DEF 0x0000000002010601
-#define QIB_7322_Revision_R_Simulator_LSB 0x3F
-#define QIB_7322_Revision_R_Simulator_MSB 0x3F
-#define QIB_7322_Revision_R_Simulator_RMASK 0x1
-#define QIB_7322_Revision_R_Emulation_LSB 0x3E
-#define QIB_7322_Revision_R_Emulation_MSB 0x3E
-#define QIB_7322_Revision_R_Emulation_RMASK 0x1
-#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
-#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
-#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
-#define QIB_7322_Revision_BoardID_LSB 0x20
-#define QIB_7322_Revision_BoardID_MSB 0x27
-#define QIB_7322_Revision_BoardID_RMASK 0xFF
-#define QIB_7322_Revision_R_SW_LSB 0x18
-#define QIB_7322_Revision_R_SW_MSB 0x1F
-#define QIB_7322_Revision_R_SW_RMASK 0xFF
-#define QIB_7322_Revision_R_Arch_LSB 0x10
-#define QIB_7322_Revision_R_Arch_MSB 0x17
-#define QIB_7322_Revision_R_Arch_RMASK 0xFF
-#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
-#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
-#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_7322_Control_OFFS 0x8
-#define QIB_7322_Control_DEF 0x0000000000000000
-#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
-#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
-#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
-#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
-#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
-#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
-#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
-#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_7322_Control_FreezeMode_LSB 0x1
-#define QIB_7322_Control_FreezeMode_MSB 0x1
-#define QIB_7322_Control_FreezeMode_RMASK 0x1
-#define QIB_7322_Control_SyncReset_LSB 0x0
-#define QIB_7322_Control_SyncReset_MSB 0x0
-#define QIB_7322_Control_SyncReset_RMASK 0x1
-
-#define QIB_7322_PageAlign_OFFS 0x10
-#define QIB_7322_PageAlign_DEF 0x0000000000001000
-
-#define QIB_7322_ContextCnt_OFFS 0x18
-#define QIB_7322_ContextCnt_DEF 0x0000000000000012
-
-#define QIB_7322_Scratch_OFFS 0x20
-#define QIB_7322_Scratch_DEF 0x0000000000000000
-
-#define QIB_7322_CntrRegBase_OFFS 0x28
-#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
-
-#define QIB_7322_SendRegBase_OFFS 0x30
-#define QIB_7322_SendRegBase_DEF 0x0000000000003000
-
-#define QIB_7322_UserRegBase_OFFS 0x38
-#define QIB_7322_UserRegBase_DEF 0x0000000000200000
-
-#define QIB_7322_IntMask_OFFS 0x68
-#define QIB_7322_IntMask_DEF 0x0000000000000000
-#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
-#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
-#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
-#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
-#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
-#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
-#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
-#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
-#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
-#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
-#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
-#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
-#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
-#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
-#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
-#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
-#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
-#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
-#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
-#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
-#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
-#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
-#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
-#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
-#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
-#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
-#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
-#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
-#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
-#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
-#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
-#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
-#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
-#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
-#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
-#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
-#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
-#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
-#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
-#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
-#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
-#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
-#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
-#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
-#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
-#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
-#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
-#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
-#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
-#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
-#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
-#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
-#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
-#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
-#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
-#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
-#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
-#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
-#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
-#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
-#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
-#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
-#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
-#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
-#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
-#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
-#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
-#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
-#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
-#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
-#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
-#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
-#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
-#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
-#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
-#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
-#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
-#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
-#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
-#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
-#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
-#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
-#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
-#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
-#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
-#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
-#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
-#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
-#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
-#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
-#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
-
-#define QIB_7322_IntStatus_OFFS 0x70
-#define QIB_7322_IntStatus_DEF 0x0000000000000000
-#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
-#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
-#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
-#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
-#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
-#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
-#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
-#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
-#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
-#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
-#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
-#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
-#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
-#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
-#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
-#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
-#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
-#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
-#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
-#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
-#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
-#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
-#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
-#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
-#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
-#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
-#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
-#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
-#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
-#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
-#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
-#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
-#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
-#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
-#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
-#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
-#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
-#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
-#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
-#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
-#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
-#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
-#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
-#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
-#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
-#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
-#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
-#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
-#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
-#define QIB_7322_IntStatus_Err_1_LSB 0x1F
-#define QIB_7322_IntStatus_Err_1_MSB 0x1F
-#define QIB_7322_IntStatus_Err_1_RMASK 0x1
-#define QIB_7322_IntStatus_Err_0_LSB 0x1E
-#define QIB_7322_IntStatus_Err_0_MSB 0x1E
-#define QIB_7322_IntStatus_Err_0_RMASK 0x1
-#define QIB_7322_IntStatus_Err_LSB 0x1D
-#define QIB_7322_IntStatus_Err_MSB 0x1D
-#define QIB_7322_IntStatus_Err_RMASK 0x1
-#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
-#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
-#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
-#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
-#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
-#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
-#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
-#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
-#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
-#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
-#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
-#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
-#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
-#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
-#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
-#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
-#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
-#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
-#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
-#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
-#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
-#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
-#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
-#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
-#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
-#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
-#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
-#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
-#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
-#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
-#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
-#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
-#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
-#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
-#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
-#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
-#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
-#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
-#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
-#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
-#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
-#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
-#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
-#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
-#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
-#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
-#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
-#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
-
-#define QIB_7322_IntClear_OFFS 0x78
-#define QIB_7322_IntClear_DEF 0x0000000000000000
-#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
-#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
-#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
-#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
-#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
-#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
-#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
-#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
-#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
-#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
-#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
-#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
-#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
-#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
-#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
-#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
-#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
-#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
-#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
-#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
-#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
-#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
-#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
-#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
-#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
-#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
-#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
-#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
-#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
-#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
-#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
-#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
-#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
-#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
-#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
-#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
-#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
-#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
-#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
-#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
-#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
-#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
-#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
-#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
-#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
-#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
-#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
-#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
-#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
-#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
-#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
-#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
-#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
-#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
-#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
-#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
-#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
-#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
-#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
-#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
-#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
-#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
-#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
-#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
-#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
-#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
-#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
-#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
-#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
-#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
-#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
-#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
-#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
-#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
-#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
-#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
-#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
-#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
-#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
-#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
-#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
-#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
-#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
-#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
-#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
-#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
-#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
-#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
-#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
-#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
-#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
-
-#define QIB_7322_ErrMask_OFFS 0x80
-#define QIB_7322_ErrMask_DEF 0x0000000000000000
-#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
-#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
-#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
-#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
-#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
-#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
-#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
-#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
-#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-
-#define QIB_7322_ErrStatus_OFFS 0x88
-#define QIB_7322_ErrStatus_DEF 0x0000000000000000
-#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
-#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
-#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
-#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
-#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
-#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
-#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
-#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
-#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
-#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
-#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
-#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
-#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
-#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
-#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
-#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
-#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
-
-#define QIB_7322_ErrClear_OFFS 0x90
-#define QIB_7322_ErrClear_DEF 0x0000000000000000
-#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
-#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
-#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
-#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
-#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
-#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
-#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
-#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
-#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-
-#define QIB_7322_HwErrMask_OFFS 0x98
-#define QIB_7322_HwErrMask_DEF 0x0000000000000000
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
-#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
-#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
-#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1
-#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
-#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
-#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
-
-#define QIB_7322_HwErrStatus_OFFS 0xA0
-#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
-#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
-#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
-#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
-#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
-#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1
-#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
-#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
-#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
-
-#define QIB_7322_HwErrClear_OFFS 0xA8
-#define QIB_7322_HwErrClear_DEF 0x0000000000000000
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
-#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
-#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
-#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
-#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
-#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC
-#define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC
-#define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1
-#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
-#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
-#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
-
-#define QIB_7322_HwDiagCtrl_OFFS 0xB0
-#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
-#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
-#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
-#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
-#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1
-
-#define QIB_7322_EXTStatus_OFFS 0xC0
-#define QIB_7322_EXTStatus_DEF 0x000000000000X000
-#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
-#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
-#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
-#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
-#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
-#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
-
-#define QIB_7322_EXTCtrl_OFFS 0xC8
-#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
-#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
-#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
-#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
-
-#define QIB_7322_GPIOOut_OFFS 0xE0
-#define QIB_7322_GPIOOut_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOMask_OFFS 0xE8
-#define QIB_7322_GPIOMask_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOStatus_OFFS 0xF0
-#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOClear_OFFS 0xF8
-#define QIB_7322_GPIOClear_DEF 0x0000000000000000
-
-#define QIB_7322_RcvCtrl_OFFS 0x100
-#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
-#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
-#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
-#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
-#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
-#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
-#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
-#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
-#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
-#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
-#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
-#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
-#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
-#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
-#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
-#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
-#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
-#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
-#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
-#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
-
-#define QIB_7322_RcvHdrSize_OFFS 0x110
-#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrCnt_OFFS 0x118
-#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrEntSize_OFFS 0x120
-#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
-
-#define QIB_7322_RcvTIDBase_OFFS 0x128
-#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
-
-#define QIB_7322_RcvTIDCnt_OFFS 0x130
-#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
-
-#define QIB_7322_RcvEgrBase_OFFS 0x138
-#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
-
-#define QIB_7322_RcvEgrCnt_OFFS 0x140
-#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
-
-#define QIB_7322_RcvBufBase_OFFS 0x148
-#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
-
-#define QIB_7322_RcvBufSize_OFFS 0x150
-#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
-
-#define QIB_7322_RxIntMemBase_OFFS 0x158
-#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
-
-#define QIB_7322_RxIntMemSize_OFFS 0x160
-#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
-
-#define QIB_7322_feature_mask_OFFS 0x190
-#define QIB_7322_feature_mask_DEF 0x00000000000000XX
-
-#define QIB_7322_active_feature_mask_OFFS 0x198
-#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
-
-#define QIB_7322_SendCtrl_OFFS 0x1C0
-#define QIB_7322_SendCtrl_DEF 0x0000000000000000
-#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
-#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
-#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
-#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
-#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
-#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
-#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
-#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
-#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
-#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
-#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
-#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
-#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
-#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
-#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
-
-#define QIB_7322_SendBufBase_OFFS 0x1C8
-#define QIB_7322_SendBufBase_DEF 0x0018000000100000
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_7322_SendBufSize_OFFS 0x1D0
-#define QIB_7322_SendBufSize_DEF 0x0000108000000880
-#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
-#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
-#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
-#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
-#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_7322_SendBufCnt_OFFS 0x1D8
-#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
-
-#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
-#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
-
-#define QIB_7322_SendBufErr0_OFFS 0x240
-#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
-
-#define QIB_7322_AvailUpdCount_OFFS 0x268
-#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
-#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
-#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
-#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
-
-#define QIB_7322_RcvHdrAddr0_OFFS 0x280
-#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
-
-#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
-#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
-
-#define QIB_7322_ahb_access_ctrl_OFFS 0x460
-#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
-
-#define QIB_7322_ahb_transaction_reg_OFFS 0x468
-#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
-#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
-#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
-#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
-#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
-#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
-#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
-#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
-
-#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
-#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
-
-#define QIB_7322_SendCheckMask0_OFFS 0x4C0
-#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
-
-#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
-#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
-
-#define QIB_7322_SendIBPacketMask0_OFFS 0x500
-#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
-
-#define QIB_7322_IntRedirect0_OFFS 0x540
-#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
-#define QIB_7322_IntRedirect0_vec11_LSB 0x37
-#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
-#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec10_LSB 0x32
-#define QIB_7322_IntRedirect0_vec10_MSB 0x36
-#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
-#define QIB_7322_IntRedirect0_vec9_MSB 0x31
-#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec8_LSB 0x28
-#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
-#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec7_LSB 0x23
-#define QIB_7322_IntRedirect0_vec7_MSB 0x27
-#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
-#define QIB_7322_IntRedirect0_vec6_MSB 0x22
-#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec5_LSB 0x19
-#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
-#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec4_LSB 0x14
-#define QIB_7322_IntRedirect0_vec4_MSB 0x18
-#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec3_LSB 0xF
-#define QIB_7322_IntRedirect0_vec3_MSB 0x13
-#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec2_LSB 0xA
-#define QIB_7322_IntRedirect0_vec2_MSB 0xE
-#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec1_LSB 0x5
-#define QIB_7322_IntRedirect0_vec1_MSB 0x9
-#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec0_LSB 0x0
-#define QIB_7322_IntRedirect0_vec0_MSB 0x4
-#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
-
-#define QIB_7322_Int_Granted_OFFS 0x570
-#define QIB_7322_Int_Granted_DEF 0x0000000000000000
-
-#define QIB_7322_vec_clr_without_int_OFFS 0x578
-#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
-
-#define QIB_7322_DCACtrlA_OFFS 0x580
-#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
-
-#define QIB_7322_DCACtrlB_OFFS 0x588
-#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlC_OFFS 0x590
-#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlD_OFFS 0x598
-#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlE_OFFS 0x5A0
-#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlF_OFFS 0x5A8
-#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
-
-#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
-#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
-
-#define QIB_7322_CntrRegBase_0_OFFS 0x1028
-#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
-
-#define QIB_7322_ErrMask_0_OFFS 0x1080
-#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
-#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
-#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_7322_ErrStatus_0_OFFS 0x1088
-#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
-#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
-#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
-#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
-#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
-#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
-#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
-#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
-#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
-#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
-#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
-#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
-#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
-#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
-#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
-#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
-
-#define QIB_7322_ErrClear_0_OFFS 0x1090
-#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
-#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
-#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_7322_TXEStatus_0_OFFS 0x10B8
-#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
-
-#define QIB_7322_RcvCtrl_0_OFFS 0x1100
-#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
-
-#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
-#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
-#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
-#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
-#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
-#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
-#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
-#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
-
-#define QIB_7322_PSStat_0_OFFS 0x1140
-#define QIB_7322_PSStat_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSStart_0_OFFS 0x1148
-#define QIB_7322_PSStart_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSInterval_0_OFFS 0x1150
-#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvStatus_0_OFFS 0x1160
-#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
-#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
-#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
-#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
-
-#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
-#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
-#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
-
-#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
-#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
-#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
-#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
-#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendCtrl_0_OFFS 0x11C0
-#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
-#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
-#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
-#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
-#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
-#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
-#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
-#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
-#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
-
-#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
-#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
-#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
-#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
-#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
-#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
-#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
-#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
-#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
-#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaTail_0_OFFS 0x1208
-#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
-#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
-#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaHead_0_OFFS 0x1210
-#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
-#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
-#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
-#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
-#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
-#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
-
-#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
-#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
-#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
-#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
-#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
-#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
-#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
-#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
-
-#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
-#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
-
-#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
-#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
-
-#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
-#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
-
-#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
-#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
-#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
-#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
-#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
-#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
-#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
-#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
-#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
-
-#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
-#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
-
-#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
-#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
-
-#define QIB_7322_IBCStatusA_0_OFFS 0x1540
-#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
-#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
-#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
-#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
-#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
-#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
-#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
-
-#define QIB_7322_IBCStatusB_0_OFFS 0x1548
-#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
-
-#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
-#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
-#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
-#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
-#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
-#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
-#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
-#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
-#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
-#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
-#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
-#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
-#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
-#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
-#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
-
-#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
-#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
-
-#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
-#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
-
-#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
-#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
-
-#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
-#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
-
-#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
-#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
-
-#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
-#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
-#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
-
-#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
-#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
-#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
-#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
-#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
-#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
-#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
-#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
-#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
-#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
-
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
-#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
-
-#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
-#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
-#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
-#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
-#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
-
-#define QIB_7322_LowPriority0_0_OFFS 0x1C00
-#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
-#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
-#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
-#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
-#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
-#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
-#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
-
-#define QIB_7322_HighPriority0_0_OFFS 0x1E00
-#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
-#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
-#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
-#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
-#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
-#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
-#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
-
-#define QIB_7322_CntrRegBase_1_OFFS 0x2028
-#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
-
-#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
-
-#define QIB_7322_SendCtrl_1_OFFS 0x21C0
-
-#define QIB_7322_SendBufAvail0_OFFS 0x3000
-#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
-
-#define QIB_7322_MsixTable_OFFS 0x8000
-#define QIB_7322_MsixTable_DEF 0x0000000000000000
-
-#define QIB_7322_MsixPba_OFFS 0x9000
-#define QIB_7322_MsixPba_DEF 0x0000000000000000
-
-#define QIB_7322_LAMemory_OFFS 0xA000
-#define QIB_7322_LAMemory_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_OFFS 0x11000
-#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
-
-#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
-#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
-#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
-#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
-#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
-
-#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
-#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
-#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_0_OFFS 0x12000
-#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
-#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
-#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
-#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
-#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
-#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
-#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
-#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
-#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
-#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
-#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
-#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
-#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
-#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
-#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
-#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
-#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
-#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
-#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
-#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
-#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
-#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
-#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
-#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
-#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
-#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
-#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
-#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
-#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
-#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
-#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
-#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
-#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
-#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
-#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
-#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
-#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
-#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
-#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
-#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
-#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
-#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
-#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
-#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
-#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_1_OFFS 0x13000
-#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
-#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
-#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
-#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
-#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
-#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
-#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
-#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
-#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
-#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
-#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
-#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
-#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
-#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
-#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
-#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
-#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
-#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
-#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
-#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
-#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
-#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
-#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
-#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
-#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
-#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
-#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
-#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
-#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
-#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
-#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
-#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
-#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
-#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
-#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
-#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
-#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
-#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
-#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
-#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
-#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
-#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
-#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
-#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
-#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_RcvEgrArray_OFFS 0x14000
-#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
-#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
-#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
-#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
-#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
-#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
-#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
-
-#define QIB_7322_RcvTIDArray0_OFFS 0x50000
-#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
-#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
-#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
-#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
-#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
-#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
-#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
-
-#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
-#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrTail0_OFFS 0x200000
-#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrHead0_OFFS 0x200008
-#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
-#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
-#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
-
-#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
-#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
-#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
-#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
-#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
-#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
deleted file mode 100644
index 4fb78abd8ba1..000000000000
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ /dev/null
@@ -1,812 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _QIB_COMMON_H
-#define _QIB_COMMON_H
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
-#define QIB_SRC_OUI_1 0x00
-#define QIB_SRC_OUI_2 0x11
-#define QIB_SRC_OUI_3 0x75
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fastpath code
- * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in faspath code
- * _QIB_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- */
-
-/*
- * The value in the BTH QP field that QLogic_IB uses to differentiate
- * an qlogic_ib protocol IB packet vs standard IB transport
- * This it needs to be even (0x656b78), because the LSB is sometimes
- * used for the MSB of context. The change may cause a problem
- * interoperating with older software.
- */
-#define QIB_KD_QP 0x656b78
-
-/*
- * These are the status bits readable (in ascii form, 64bit value)
- * from the "status" sysfs file. For binary compatibility, values
- * must remain as is; removed states can be reused for different
- * purposes.
- */
-#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
-/* Chip has been found and initted */
-#define QIB_STATUS_CHIP_PRESENT 0x20
-/* IB link is at ACTIVE, usable for data traffic */
-#define QIB_STATUS_IB_READY 0x40
-/* link is configured, LID, MTU, etc. have been set */
-#define QIB_STATUS_IB_CONF 0x80
-/* A Fatal hardware error has occurred. */
-#define QIB_STATUS_HWERROR 0x200
-
-/*
- * The list of usermode accessible registers. Also see Reg_* later in file.
- */
-enum qib_ureg {
- /* (RO) DMA RcvHdr to be used next. */
- ur_rcvhdrtail = 0,
- /* (RW) RcvHdr entry to be processed next by host. */
- ur_rcvhdrhead = 1,
- /* (RO) Index of next Eager index to use. */
- ur_rcvegrindextail = 2,
- /* (RW) Eager TID to be processed next */
- ur_rcvegrindexhead = 3,
- /* For internal use only; max register number. */
- _QIB_UregMax
-};
-
-/* bit values for spi_runtime_flags */
-#define QIB_RUNTIME_PCIE 0x0002
-#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
-#define QIB_RUNTIME_RCVHDR_COPY 0x0008
-#define QIB_RUNTIME_MASTER 0x0010
-#define QIB_RUNTIME_RCHK 0x0020
-#define QIB_RUNTIME_NODMA_RTAIL 0x0080
-#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
-#define QIB_RUNTIME_SDMA 0x0200
-#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
-#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
-#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
-#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
-#define QIB_RUNTIME_HDRSUPP 0x4000
-
-/*
- * This structure is returned by qib_userinit() immediately after
- * open to get implementation-specific info, and info specific to this
- * instance.
- *
- * This struct must have explict pad fields where type sizes
- * may result in different alignments between 32 and 64 bit
- * programs, since the 64 bit * bit kernel requires the user code
- * to have matching offsets
- */
-struct qib_base_info {
- /* version of hardware, for feature checking. */
- __u32 spi_hw_version;
- /* version of software, for feature checking. */
- __u32 spi_sw_version;
- /* QLogic_IB context assigned, goes into sent packets */
- __u16 spi_ctxt;
- __u16 spi_subctxt;
- /*
- * IB MTU, packets IB data must be less than this.
- * The MTU is in bytes, and will be a multiple of 4 bytes.
- */
- __u32 spi_mtu;
- /*
- * Size of a PIO buffer. Any given packet's total size must be less
- * than this (in words). Included is the starting control word, so
- * if 513 is returned, then total pkt size is 512 words or less.
- */
- __u32 spi_piosize;
- /* size of the TID cache in qlogic_ib, in entries */
- __u32 spi_tidcnt;
- /* size of the TID Eager list in qlogic_ib, in entries */
- __u32 spi_tidegrcnt;
- /* size of a single receive header queue entry in words. */
- __u32 spi_rcvhdrent_size;
- /*
- * Count of receive header queue entries allocated.
- * This may be less than the spu_rcvhdrcnt passed in!.
- */
- __u32 spi_rcvhdr_cnt;
-
- /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
- __u32 spi_runtime_flags;
-
- /* address where hardware receive header queue is mapped */
- __u64 spi_rcvhdr_base;
-
- /* user program. */
-
- /* base address of eager TID receive buffers used by hardware. */
- __u64 spi_rcv_egrbufs;
-
- /* Allocated by initialization code, not by protocol. */
-
- /*
- * Size of each TID buffer in host memory, starting at
- * spi_rcv_egrbufs. The buffers are virtually contiguous.
- */
- __u32 spi_rcv_egrbufsize;
- /*
- * The special QP (queue pair) value that identifies an qlogic_ib
- * protocol packet from standard IB packets. More, probably much
- * more, to be added.
- */
- __u32 spi_qpair;
-
- /*
- * User register base for init code, not to be used directly by
- * protocol or applications. Always points to chip registers,
- * for normal or shared context.
- */
- __u64 spi_uregbase;
- /*
- * Maximum buffer size in bytes that can be used in a single TID
- * entry (assuming the buffer is aligned to this boundary). This is
- * the minimum of what the hardware and software support Guaranteed
- * to be a power of 2.
- */
- __u32 spi_tid_maxsize;
- /*
- * alignment of each pio send buffer (byte count
- * to add to spi_piobufbase to get to second buffer)
- */
- __u32 spi_pioalign;
- /*
- * The index of the first pio buffer available to this process;
- * needed to do lookup in spi_pioavailaddr; not added to
- * spi_piobufbase.
- */
- __u32 spi_pioindex;
- /* number of buffers mapped for this process */
- __u32 spi_piocnt;
-
- /*
- * Base address of writeonly pio buffers for this process.
- * Each buffer has spi_piosize words, and is aligned on spi_pioalign
- * boundaries. spi_piocnt buffers are mapped from this address
- */
- __u64 spi_piobufbase;
-
- /*
- * Base address of readonly memory copy of the pioavail registers.
- * There are 2 bits for each buffer.
- */
- __u64 spi_pioavailaddr;
-
- /*
- * Address where driver updates a copy of the interface and driver
- * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
- * link status qword (formerly combined with driver status), then a
- * string indicating hardware error, if there was one.
- */
- __u64 spi_status;
-
- /* number of chip ctxts available to user processes */
- __u32 spi_nctxts;
- __u16 spi_unit; /* unit number of chip we are using */
- __u16 spi_port; /* IB port number we are using */
- /* num bufs in each contiguous set */
- __u32 spi_rcv_egrperchunk;
- /* size in bytes of each contiguous set */
- __u32 spi_rcv_egrchunksize;
- /* total size of mmap to cover full rcvegrbuffers */
- __u32 spi_rcv_egrbuftotlen;
- __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
- /* address of readonly memory copy of the rcvhdrq tail register. */
- __u64 spi_rcvhdr_tailaddr;
-
- /*
- * shared memory pages for subctxts if ctxt is shared; these cover
- * all the processes in the group sharing a single context.
- * all have enough space for the num_subcontexts value on this job.
- */
- __u64 spi_subctxt_uregbase;
- __u64 spi_subctxt_rcvegrbuf;
- __u64 spi_subctxt_rcvhdr_base;
-
- /* shared memory page for send buffer disarm status */
- __u64 spi_sendbuf_status;
-} __aligned(8);
-
-/*
- * This version number is given to the driver by the user code during
- * initialization in the spu_userversion field of qib_user_info, so
- * the driver can check for compatibility with user code.
- *
- * The major version changes when data structures
- * change in an incompatible way. The driver must be the same or higher
- * for initialization to succeed. In some cases, a higher version
- * driver will not interoperate with older software, and initialization
- * will return an error.
- */
-#define QIB_USER_SWMAJOR 1
-
-/*
- * Minor version differences are always compatible
- * a within a major version, however if user software is larger
- * than driver software, some new features and/or structure fields
- * may not be implemented; the user code must deal with this if it
- * cares, or it must abort after initialization reports the difference.
- */
-#define QIB_USER_SWMINOR 13
-
-#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
-
-#ifndef QIB_KERN_TYPE
-#define QIB_KERN_TYPE 0
-#endif
-
-/*
- * Similarly, this is the kernel version going back to the user. It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a QLogic release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of qib_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
-
-/*
- * Define the driver version number. This is something that refers only
- * to the driver itself, not the software interfaces it supports.
- */
-#define QIB_DRIVER_VERSION_BASE "1.11"
-
-/* create the final driver version string */
-#ifdef QIB_IDSTR
-#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE " " QIB_IDSTR
-#else
-#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE
-#endif
-
-/*
- * If the unit is specified via open, HCA choice is fixed. If port is
- * specified, it's also fixed. Otherwise we try to spread contexts
- * across ports and HCAs, using different algorithims. WITHIN is
- * the old default, prior to this mechanism.
- */
-#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
- * ports; this is the default */
-#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
- * active ports within), then next HCA */
-#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
-
-/*
- * This structure is passed to qib_userinit() to tell the driver where
- * user code buffers are, sizes, etc. The offsets and sizes of the
- * fields must remain unchanged, for binary compatibility. It can
- * be extended, if userversion is changed so user code can tell, if needed
- */
-struct qib_user_info {
- /*
- * version of user software, to detect compatibility issues.
- * Should be set to QIB_USER_SWVERSION.
- */
- __u32 spu_userversion;
-
- __u32 _spu_unused2;
-
- /* size of struct base_info to write to */
- __u32 spu_base_info_size;
-
- __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
-
- /*
- * If two or more processes wish to share a context, each process
- * must set the spu_subctxt_cnt and spu_subctxt_id to the same
- * values. The only restriction on the spu_subctxt_id is that
- * it be unique for a given node.
- */
- __u16 spu_subctxt_cnt;
- __u16 spu_subctxt_id;
-
- __u32 spu_port; /* IB port requested by user if > 0 */
-
- /*
- * address of struct base_info to write to
- */
- __u64 spu_base_info;
-
-} __aligned(8);
-
-/* User commands. */
-
-/* 16 available, was: old set up userspace (for old user code) */
-#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
-#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
-#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
-#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
-#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
-/* 22 available, was: return info on slave processes (for old user code) */
-#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
-#define QIB_CMD_USER_INIT 24 /* set up userspace */
-#define QIB_CMD_UNUSED_1 25
-#define QIB_CMD_UNUSED_2 26
-#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
-#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
-#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
-/* 30 is unused */
-#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
-#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
-/* 33 available, was a testing feature */
-#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
-#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
-#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
- * processes: qib_cpus_list */
-
-/*
- * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
- * compatibility with libraries from previous release. The ACK_EVENT
- * will take appropriate driver action (if any, just DISARM for now),
- * then clear the bits passed in as part of the mask. These bits are
- * in the first 64bit word at spi_sendbuf_status, and are passed to
- * the driver in the event_mask union as well.
- */
-#define _QIB_EVENT_DISARM_BUFS_BIT 0
-#define _QIB_EVENT_LINKDOWN_BIT 1
-#define _QIB_EVENT_LID_CHANGE_BIT 2
-#define _QIB_EVENT_LMC_CHANGE_BIT 3
-#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
-#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
-
-#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
-#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
-#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
-#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
-#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
-
-
-/*
- * Poll types
- */
-#define QIB_POLL_TYPE_ANYRCV 0x0
-#define QIB_POLL_TYPE_URGENT 0x1
-
-struct qib_ctxt_info {
- __u16 num_active; /* number of active units */
- __u16 unit; /* unit (chip) assigned to caller */
- __u16 port; /* IB port assigned to caller (1-based) */
- __u16 ctxt; /* ctxt on unit assigned to caller */
- __u16 subctxt; /* subctxt on unit assigned to caller */
- __u16 num_ctxts; /* number of ctxts available on unit */
- __u16 num_subctxts; /* number of subctxts opened on ctxt */
- __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
-};
-
-struct qib_tid_info {
- __u32 tidcnt;
- /* make structure same size in 32 and 64 bit */
- __u32 tid__unused;
- /* virtual address of first page in transfer */
- __u64 tidvaddr;
- /* pointer (same size 32/64 bit) to __u16 tid array */
- __u64 tidlist;
-
- /*
- * pointer (same size 32/64 bit) to bitmap of TIDs used
- * for this call; checked for being large enough at open
- */
- __u64 tidmap;
-};
-
-struct qib_cmd {
- __u32 type; /* command type */
- union {
- struct qib_tid_info tid_info;
- struct qib_user_info user_info;
-
- /*
- * address in userspace where we should put the sdma
- * inflight counter
- */
- __u64 sdma_inflight;
- /*
- * address in userspace where we should put the sdma
- * completion counter
- */
- __u64 sdma_complete;
- /* address in userspace of struct qib_ctxt_info to
- write result to */
- __u64 ctxt_info;
- /* enable/disable receipt of packets */
- __u32 recv_ctrl;
- /* enable/disable armlaunch errors (non-zero to enable) */
- __u32 armlaunch_ctrl;
- /* partition key to set */
- __u16 part_key;
- /* user address of __u32 bitmask of active slaves */
- __u64 slave_mask_addr;
- /* type of polling we want */
- __u16 poll_type;
- /* back pressure enable bit for one particular context */
- __u8 ctxt_bp;
- /* qib_user_event_ack(), IPATH_EVENT_* bits */
- __u64 event_mask;
- } cmd;
-};
-
-struct qib_iovec {
- /* Pointer to data, but same size 32 and 64 bit */
- __u64 iov_base;
-
- /*
- * Length of data; don't need 64 bits, but want
- * qib_sendpkt to remain same size as before 32 bit changes, so...
- */
- __u64 iov_len;
-};
-
-/*
- * Describes a single packet for send. Each packet can have one or more
- * buffers, but the total length (exclusive of IB headers) must be less
- * than the MTU, and if using the PIO method, entire packet length,
- * including IB headers, must be less than the qib_piosize value (words).
- * Use of this necessitates including sys/uio.h
- */
-struct __qib_sendpkt {
- __u32 sps_flags; /* flags for packet (TBD) */
- __u32 sps_cnt; /* number of entries to use in sps_iov */
- /* array of iov's describing packet. TEMPORARY */
- struct qib_iovec sps_iov[4];
-};
-
-/*
- * Diagnostics can send a packet by "writing" the following
- * structs to the diag data special file.
- * This allows a custom
- * pbc (+ static rate) qword, so that special modes and deliberate
- * changes to CRCs can be used. The elements were also re-ordered
- * for better alignment and to avoid padding issues.
- */
-#define _DIAG_XPKT_VERS 3
-struct qib_diag_xpkt {
- __u16 version;
- __u16 unit;
- __u16 port;
- __u16 len;
- __u64 data;
- __u64 pbc_wd;
-};
-
-/*
- * Data layout in I2C flash (for GUID, etc.)
- * All fields are little-endian binary unless otherwise stated
- */
-#define QIB_FLASH_VERSION 2
-struct qib_flash {
- /* flash layout version (QIB_FLASH_VERSION) */
- __u8 if_fversion;
- /* checksum protecting if_length bytes */
- __u8 if_csum;
- /*
- * valid length (in use, protected by if_csum), including
- * if_fversion and if_csum themselves)
- */
- __u8 if_length;
- /* the GUID, in network order */
- __u8 if_guid[8];
- /* number of GUIDs to use, starting from if_guid */
- __u8 if_numguid;
- /* the (last 10 characters of) board serial number, in ASCII */
- char if_serial[12];
- /* board mfg date (YYYYMMDD ASCII) */
- char if_mfgdate[8];
- /* last board rework/test date (YYYYMMDD ASCII) */
- char if_testdate[8];
- /* logging of error counts, TBD */
- __u8 if_errcntp[4];
- /* powered on hours, updated at driver unload */
- __u8 if_powerhour[2];
- /* ASCII free-form comment field */
- char if_comment[32];
- /* Backwards compatible prefix for longer QLogic Serial Numbers */
- char if_sprefix[4];
- /* 82 bytes used, min flash size is 128 bytes */
- __u8 if_future[46];
-};
-
-/*
- * These are the counters implemented in the chip, and are listed in order.
- * The InterCaps naming is taken straight from the chip spec.
- */
-struct qlogic_ib_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 TxSDmaDescCnt; /* was Reserved1 */
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 RxP9HdrEgrOvflCnt;
- __u64 RxP10HdrEgrOvflCnt;
- __u64 RxP11HdrEgrOvflCnt;
- __u64 RxP12HdrEgrOvflCnt;
- __u64 RxP13HdrEgrOvflCnt;
- __u64 RxP14HdrEgrOvflCnt;
- __u64 RxP15HdrEgrOvflCnt;
- __u64 RxP16HdrEgrOvflCnt;
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
- __u64 RxVL15DroppedPktCnt;
- __u64 RxOtherLocalPhyErrCnt;
- __u64 PcieRetryBufDiagQwordCnt;
- __u64 ExcessBufferOvflCnt;
- __u64 LocalLinkIntegrityErrCnt;
- __u64 RxVlErrCnt;
- __u64 RxDlidFltrCnt;
-};
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software.
- */
-
-/* RcvHdrFlags bits */
-#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
-#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
-#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
-#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
-#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
-#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
-#define QLOGIC_IB_RHF_SEQ_MASK 0xF
-#define QLOGIC_IB_RHF_SEQ_SHIFT 0
-#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
-#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
-#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
-#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
-#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
-#define QLOGIC_IB_RHF_H_LENERR 0x10000000
-#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
-#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
-#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
-#define QLOGIC_IB_RHF_H_MKERR 0x01000000
-#define QLOGIC_IB_RHF_H_IBERR 0x00800000
-#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
-#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
-#define QLOGIC_IB_RHF_L_SWA 0x00008000
-#define QLOGIC_IB_RHF_L_SWB 0x00004000
-
-/* qlogic_ib header fields */
-#define QLOGIC_IB_I_VERS_MASK 0xF
-#define QLOGIC_IB_I_VERS_SHIFT 28
-#define QLOGIC_IB_I_CTXT_MASK 0xF
-#define QLOGIC_IB_I_CTXT_SHIFT 24
-#define QLOGIC_IB_I_TID_MASK 0x7FF
-#define QLOGIC_IB_I_TID_SHIFT 13
-#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
-#define QLOGIC_IB_I_OFFSET_SHIFT 0
-
-/* K_PktFlags bits */
-#define QLOGIC_IB_KPF_INTR 0x1
-#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
-#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
-
-#define QLOGIC_IB_MAX_SUBCTXT 4
-
-/* SendPIO per-buffer control */
-#define QLOGIC_IB_SP_TEST 0x40
-#define QLOGIC_IB_SP_TESTEBP 0x20
-#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
-
-/* SendPIOAvail bits */
-#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
-#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
-
-/* qlogic_ib header format */
-struct qib_header {
- /*
- * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
- * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
- * Context 4, TID 11, offset 13.
- */
- __le32 ver_ctxt_tid_offset;
- __le16 chksum;
- __le16 pkt_flags;
-};
-
-/*
- * qlogic_ib user message header format.
- * This structure contains the first 4 fields common to all protocols
- * that employ qlogic_ib.
- */
-struct qib_message_header {
- __be16 lrh[4];
- __be32 bth[3];
- /* fields below this point are in host byte order */
- struct qib_header iph;
- /* fields below are simplified, but should match PSM */
- /* some are accessed by driver when packet spliting is needed */
- __u8 sub_opcode;
- __u8 flags;
- __u16 commidx;
- __u32 ack_seq_num;
- __u8 flowid;
- __u8 hdr_dlen;
- __u16 mqhdr;
- __u32 uwords[4];
-};
-
-/* sequence number bits for message */
-union qib_seqnum {
- struct {
- __u32 seq:11;
- __u32 gen:8;
- __u32 flow:5;
- };
- struct {
- __u32 pkt:16;
- __u32 msg:8;
- };
- __u32 val;
-};
-
-/* qib receiving-dma tid-session-member */
-struct qib_tid_session_member {
- __u16 tid;
- __u16 offset;
- __u16 length;
-};
-
-/* IB - LRH header consts */
-#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
-#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define QIB_DEFAULT_P_KEY 0xFFFF
-#define QIB_PERMISSIVE_LID 0xFFFF
-#define QIB_AETH_CREDIT_SHIFT 24
-#define QIB_AETH_CREDIT_MASK 0x1F
-#define QIB_AETH_CREDIT_INVAL 0x1F
-#define QIB_PSN_MASK 0xFFFFFF
-#define QIB_MSN_MASK 0xFFFFFF
-#define QIB_QPN_MASK 0xFFFFFF
-#define QIB_MULTICAST_LID_BASE 0xC000
-#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
-#define QIB_MULTICAST_QPN 0xFFFFFF
-
-/* Receive Header Queue: receive type (from qlogic_ib) */
-#define RCVHQ_RCV_TYPE_EXPECTED 0
-#define RCVHQ_RCV_TYPE_EAGER 1
-#define RCVHQ_RCV_TYPE_NON_KD 2
-#define RCVHQ_RCV_TYPE_ERROR 3
-
-#define QIB_HEADER_QUEUE_WORDS 9
-
-/* functions for extracting fields from rcvhdrq entries for the driver.
- */
-static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
-{
- return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
-}
-
-static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
- QLOGIC_IB_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
-{
- return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
- QLOGIC_IB_RHF_LENGTH_MASK) << 2;
-}
-
-static inline __u32 qib_hdrget_index(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
- QLOGIC_IB_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
- QLOGIC_IB_RHF_SEQ_MASK;
-}
-
-static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
- QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
-{
- return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
-}
-
-static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
-{
- return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
- QLOGIC_IB_I_VERS_MASK;
-}
-
-#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
deleted file mode 100644
index 2b45d0b02300..000000000000
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/kthread.h>
-
-#include "qib_verbs.h"
-#include "qib.h"
-
-/**
- * qib_cq_enter - add a new entry to the completion queue
- * @cq: completion queue
- * @entry: work completion entry to add
- * @sig: true if @entry is a solicitated entry
- *
- * This may be called with qp->s_lock held.
- */
-void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
-{
- struct qib_cq_wc *wc;
- unsigned long flags;
- u32 head;
- u32 next;
-
- spin_lock_irqsave(&cq->lock, flags);
-
- /*
- * Note that the head pointer might be writable by user processes.
- * Take care to verify it is a sane value.
- */
- wc = cq->queue;
- head = wc->head;
- if (head >= (unsigned) cq->ibcq.cqe) {
- head = cq->ibcq.cqe;
- next = 0;
- } else
- next = head + 1;
- if (unlikely(next == wc->tail)) {
- spin_unlock_irqrestore(&cq->lock, flags);
- if (cq->ibcq.event_handler) {
- struct ib_event ev;
-
- ev.device = cq->ibcq.device;
- ev.element.cq = &cq->ibcq;
- ev.event = IB_EVENT_CQ_ERR;
- cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
- }
- return;
- }
- if (cq->ip) {
- wc->uqueue[head].wr_id = entry->wr_id;
- wc->uqueue[head].status = entry->status;
- wc->uqueue[head].opcode = entry->opcode;
- wc->uqueue[head].vendor_err = entry->vendor_err;
- wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data =
- (__u32 __force)entry->ex.imm_data;
- wc->uqueue[head].qp_num = entry->qp->qp_num;
- wc->uqueue[head].src_qp = entry->src_qp;
- wc->uqueue[head].wc_flags = entry->wc_flags;
- wc->uqueue[head].pkey_index = entry->pkey_index;
- wc->uqueue[head].slid = entry->slid;
- wc->uqueue[head].sl = entry->sl;
- wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
- wc->uqueue[head].port_num = entry->port_num;
- /* Make sure entry is written before the head index. */
- smp_wmb();
- } else
- wc->kqueue[head] = *entry;
- wc->head = next;
-
- if (cq->notify == IB_CQ_NEXT_COMP ||
- (cq->notify == IB_CQ_SOLICITED &&
- (solicited || entry->status != IB_WC_SUCCESS))) {
- struct kthread_worker *worker;
- /*
- * This will cause send_complete() to be called in
- * another thread.
- */
- smp_rmb();
- worker = cq->dd->worker;
- if (likely(worker)) {
- cq->notify = IB_CQ_NONE;
- cq->triggered++;
- queue_kthread_work(worker, &cq->comptask);
- }
- }
-
- spin_unlock_irqrestore(&cq->lock, flags);
-}
-
-/**
- * qib_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context. Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct qib_cq *cq = to_icq(ibcq);
- struct qib_cq_wc *wc;
- unsigned long flags;
- int npolled;
- u32 tail;
-
- /* The kernel can only poll a kernel completion queue */
- if (cq->ip) {
- npolled = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&cq->lock, flags);
-
- wc = cq->queue;
- tail = wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (tail == wc->head)
- break;
- /* The kernel doesn't need a RMB since it has the lock. */
- *entry = wc->kqueue[tail];
- if (tail >= cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- wc->tail = tail;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
- return npolled;
-}
-
-static void send_complete(struct kthread_work *work)
-{
- struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
-
- /*
- * The completion handler will most likely rearm the notification
- * and poll for all pending entries. If a new completion entry
- * is added while we are in this routine, queue_work()
- * won't call us again until we return so we check triggered to
- * see if we need to call the handler again.
- */
- for (;;) {
- u8 triggered = cq->triggered;
-
- /*
- * IPoIB connected mode assumes the callback is from a
- * soft IRQ. We simulate this by blocking "bottom halves".
- * See the implementation for ipoib_cm_handle_tx_wc(),
- * netif_tx_lock_bh() and netif_tx_lock().
- */
- local_bh_disable();
- cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
- local_bh_enable();
-
- if (cq->triggered == triggered)
- return;
- }
-}
-
-/**
- * qib_create_cq - create a completion queue
- * @ibdev: the device this completion queue is attached to
- * @attr: creation attributes
- * @context: unused by the QLogic_IB driver
- * @udata: user data for libibverbs.so
- *
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
- * Called by ib_create_cq() in the generic verbs code.
- */
-struct ib_cq *qib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_cq *cq;
- struct qib_cq_wc *wc;
- struct ib_cq *ret;
- u32 sz;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- if (entries < 1 || entries > ib_qib_max_cqes) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- /* Allocate the completion queue structure. */
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Allocate the completion queue entries and head/tail pointers.
- * This is allocated separately so that it can be resized and
- * also mapped into user space.
- * We need to use vmalloc() in order to support mmap and large
- * numbers of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
- else
- sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_cq;
- }
-
- /*
- * Return the address of the WC as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
-
- cq->ip = qib_create_mmap_info(dev, sz, context, wc);
- if (!cq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wc;
- }
-
- err = ib_copy_to_udata(udata, &cq->ip->offset,
- sizeof(cq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- cq->ip = NULL;
-
- spin_lock(&dev->n_cqs_lock);
- if (dev->n_cqs_allocated == ib_qib_max_cqs) {
- spin_unlock(&dev->n_cqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_cqs_allocated++;
- spin_unlock(&dev->n_cqs_lock);
-
- if (cq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- /*
- * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
- * The number of entries should be >= the number requested or return
- * an error.
- */
- cq->dd = dd_from_dev(dev);
- cq->ibcq.cqe = entries;
- cq->notify = IB_CQ_NONE;
- cq->triggered = 0;
- spin_lock_init(&cq->lock);
- init_kthread_work(&cq->comptask, send_complete);
- wc->head = 0;
- wc->tail = 0;
- cq->queue = wc;
-
- ret = &cq->ibcq;
-
- goto done;
-
-bail_ip:
- kfree(cq->ip);
-bail_wc:
- vfree(wc);
-bail_cq:
- kfree(cq);
-done:
- return ret;
-}
-
-/**
- * qib_destroy_cq - destroy a completion queue
- * @ibcq: the completion queue to destroy.
- *
- * Returns 0 for success.
- *
- * Called by ib_destroy_cq() in the generic verbs code.
- */
-int qib_destroy_cq(struct ib_cq *ibcq)
-{
- struct qib_ibdev *dev = to_idev(ibcq->device);
- struct qib_cq *cq = to_icq(ibcq);
-
- flush_kthread_work(&cq->comptask);
- spin_lock(&dev->n_cqs_lock);
- dev->n_cqs_allocated--;
- spin_unlock(&dev->n_cqs_lock);
- if (cq->ip)
- kref_put(&cq->ip->ref, qib_release_mmap_info);
- else
- vfree(cq->queue);
- kfree(cq);
-
- return 0;
-}
-
-/**
- * qib_req_notify_cq - change the notification type for a completion queue
- * @ibcq: the completion queue
- * @notify_flags: the type of notification to request
- *
- * Returns 0 for success.
- *
- * This may be called from interrupt context. Also called by
- * ib_req_notify_cq() in the generic verbs code.
- */
-int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
- struct qib_cq *cq = to_icq(ibcq);
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&cq->lock, flags);
- /*
- * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
- * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
- */
- if (cq->notify != IB_CQ_NEXT_COMP)
- cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
-
- if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- cq->queue->head != cq->queue->tail)
- ret = 1;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
- return ret;
-}
-
-/**
- * qib_resize_cq - change the size of the CQ
- * @ibcq: the completion queue
- *
- * Returns 0 for success.
- */
-int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
-{
- struct qib_cq *cq = to_icq(ibcq);
- struct qib_cq_wc *old_wc;
- struct qib_cq_wc *wc;
- u32 head, tail, n;
- int ret;
- u32 sz;
-
- if (cqe < 1 || cqe > ib_qib_max_cqes) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
- else
- sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->outlen >= sizeof(__u64)) {
- __u64 offset = 0;
-
- ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&cq->lock);
- /*
- * Make sure head and tail are sane since they
- * might be user writable.
- */
- old_wc = cq->queue;
- head = old_wc->head;
- if (head > (u32) cq->ibcq.cqe)
- head = (u32) cq->ibcq.cqe;
- tail = old_wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- if (head < tail)
- n = cq->ibcq.cqe + 1 + head - tail;
- else
- n = head - tail;
- if (unlikely((u32)cqe < n)) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- for (n = 0; tail != head; n++) {
- if (cq->ip)
- wc->uqueue[n] = old_wc->uqueue[tail];
- else
- wc->kqueue[n] = old_wc->kqueue[tail];
- if (tail == (u32) cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- cq->ibcq.cqe = cqe;
- wc->head = n;
- wc->tail = 0;
- cq->queue = wc;
- spin_unlock_irq(&cq->lock);
-
- vfree(old_wc);
-
- if (cq->ip) {
- struct qib_ibdev *dev = to_idev(ibcq->device);
- struct qib_mmap_info *ip = cq->ip;
-
- qib_update_mmap_info(dev, ip, sz, wc);
-
- /*
- * Return the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = 0;
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&cq->lock);
-bail_free:
- vfree(wc);
-bail:
- return ret;
-}
-
-int qib_cq_init(struct qib_devdata *dd)
-{
- int ret = 0;
- int cpu;
- struct task_struct *task;
-
- if (dd->worker)
- return 0;
- dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
- if (!dd->worker)
- return -ENOMEM;
- init_kthread_worker(dd->worker);
- task = kthread_create_on_node(
- kthread_worker_fn,
- dd->worker,
- dd->assigned_node_id,
- "qib_cq%d", dd->unit);
- if (IS_ERR(task))
- goto task_fail;
- cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
- kthread_bind(task, cpu);
- wake_up_process(task);
-out:
- return ret;
-task_fail:
- ret = PTR_ERR(task);
- kfree(dd->worker);
- dd->worker = NULL;
- goto out;
-}
-
-void qib_cq_exit(struct qib_devdata *dd)
-{
- struct kthread_worker *worker;
-
- worker = dd->worker;
- if (!worker)
- return;
- /* blocks future queuing from send_complete() */
- dd->worker = NULL;
- smp_wmb();
- flush_kthread_worker(worker);
- kthread_stop(worker->task);
- kfree(worker);
-}
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
deleted file mode 100644
index 5e75b43c596b..000000000000
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ /dev/null
@@ -1,283 +0,0 @@
-#ifdef CONFIG_DEBUG_FS
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-
-#include "qib.h"
-#include "qib_verbs.h"
-#include "qib_debugfs.h"
-
-static struct dentry *qib_dbg_root;
-
-#define DEBUGFS_FILE(name) \
-static const struct seq_operations _##name##_seq_ops = { \
- .start = _##name##_seq_start, \
- .next = _##name##_seq_next, \
- .stop = _##name##_seq_stop, \
- .show = _##name##_seq_show \
-}; \
-static int _##name##_open(struct inode *inode, struct file *s) \
-{ \
- struct seq_file *seq; \
- int ret; \
- ret = seq_open(s, &_##name##_seq_ops); \
- if (ret) \
- return ret; \
- seq = s->private_data; \
- seq->private = inode->i_private; \
- return 0; \
-} \
-static const struct file_operations _##name##_file_ops = { \
- .owner = THIS_MODULE, \
- .open = _##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = seq_release \
-};
-
-#define DEBUGFS_FILE_CREATE(name) \
-do { \
- struct dentry *ent; \
- ent = debugfs_create_file(#name , 0400, ibd->qib_ibdev_dbg, \
- ibd, &_##name##_file_ops); \
- if (!ent) \
- pr_warn("create of " #name " failed\n"); \
-} while (0)
-
-static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct qib_opcode_stats_perctx *opstats;
-
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct qib_opcode_stats_perctx *opstats;
-
- ++*pos;
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-
-static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-{
- /* nothing allocated */
-}
-
-static int _opcode_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
- loff_t i = *spos, j;
- u64 n_packets = 0, n_bytes = 0;
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- for (j = 0; j < dd->first_user_ctxt; j++) {
- if (!dd->rcd[j])
- continue;
- n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
- n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
- }
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu\n", i,
- (unsigned long long) n_packets,
- (unsigned long long) n_bytes);
-
- return 0;
-}
-
-DEBUGFS_FILE(opcode_stats)
-
-static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (!*pos)
- return SEQ_START_TOKEN;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN)
- return pos;
-
- ++*pos;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void _ctx_stats_seq_stop(struct seq_file *s, void *v)
-{
- /* nothing allocated */
-}
-
-static int _ctx_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos;
- loff_t i, j;
- u64 n_packets = 0;
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN) {
- seq_puts(s, "Ctx:npkts\n");
- return 0;
- }
-
- spos = v;
- i = *spos;
-
- if (!dd->rcd[i])
- return SEQ_SKIP;
-
- for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
- n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
-
- if (!n_packets)
- return SEQ_SKIP;
-
- seq_printf(s, " %llu:%llu\n", i, n_packets);
- return 0;
-}
-
-DEBUGFS_FILE(ctx_stats)
-
-static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct qib_qp_iter *iter;
- loff_t n = *pos;
-
- rcu_read_lock();
- iter = qib_qp_iter_init(s->private);
- if (!iter)
- return NULL;
-
- while (n--) {
- if (qib_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
- }
-
- return iter;
-}
-
-static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
- loff_t *pos)
-{
- struct qib_qp_iter *iter = iter_ptr;
-
- (*pos)++;
-
- if (qib_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
-
- return iter;
-}
-
-static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-{
- rcu_read_unlock();
-}
-
-static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
-{
- struct qib_qp_iter *iter = iter_ptr;
-
- if (!iter)
- return 0;
-
- qib_qp_iter_print(s, iter);
-
- return 0;
-}
-
-DEBUGFS_FILE(qp_stats)
-
-void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
-{
- char name[10];
-
- snprintf(name, sizeof(name), "qib%d", dd_from_dev(ibd)->unit);
- ibd->qib_ibdev_dbg = debugfs_create_dir(name, qib_dbg_root);
- if (!ibd->qib_ibdev_dbg) {
- pr_warn("create of %s failed\n", name);
- return;
- }
- DEBUGFS_FILE_CREATE(opcode_stats);
- DEBUGFS_FILE_CREATE(ctx_stats);
- DEBUGFS_FILE_CREATE(qp_stats);
-}
-
-void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
-{
- if (!qib_dbg_root)
- goto out;
- debugfs_remove_recursive(ibd->qib_ibdev_dbg);
-out:
- ibd->qib_ibdev_dbg = NULL;
-}
-
-void qib_dbg_init(void)
-{
- qib_dbg_root = debugfs_create_dir(QIB_DRV_NAME, NULL);
- if (!qib_dbg_root)
- pr_warn("init of debugfs failed\n");
-}
-
-void qib_dbg_exit(void)
-{
- debugfs_remove_recursive(qib_dbg_root);
- qib_dbg_root = NULL;
-}
-
-#endif
-
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
deleted file mode 100644
index 8c34b23e5bf6..000000000000
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ /dev/null
@@ -1,916 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains support for diagnostic functions. It is accessed by
- * opening the qib_diag device, normally minor number 129. Diagnostic use
- * of the QLogic_IB chip may render the chip or board unusable until the
- * driver is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-/*
- * Each client that opens the diag device must read then write
- * offset 0, to prevent lossage from random cat or od. diag_state
- * sequences this "handshake".
- */
-enum diag_state { UNUSED = 0, OPENED, INIT, READY };
-
-/* State for an individual client. PID so children cannot abuse handshake */
-static struct qib_diag_client {
- struct qib_diag_client *next;
- struct qib_devdata *dd;
- pid_t pid;
- enum diag_state state;
-} *client_pool;
-
-/*
- * Get a client struct. Recycled if possible, else kmalloc.
- * Must be called with qib_mutex held
- */
-static struct qib_diag_client *get_client(struct qib_devdata *dd)
-{
- struct qib_diag_client *dc;
-
- dc = client_pool;
- if (dc)
- /* got from pool remove it and use */
- client_pool = dc->next;
- else
- /* None in pool, alloc and init */
- dc = kmalloc(sizeof(*dc), GFP_KERNEL);
-
- if (dc) {
- dc->next = NULL;
- dc->dd = dd;
- dc->pid = current->pid;
- dc->state = OPENED;
- }
- return dc;
-}
-
-/*
- * Return to pool. Must be called with qib_mutex held
- */
-static void return_client(struct qib_diag_client *dc)
-{
- struct qib_devdata *dd = dc->dd;
- struct qib_diag_client *tdc, *rdc;
-
- rdc = NULL;
- if (dc == dd->diag_client) {
- dd->diag_client = dc->next;
- rdc = dc;
- } else {
- tdc = dc->dd->diag_client;
- while (tdc) {
- if (dc == tdc->next) {
- tdc->next = dc->next;
- rdc = dc;
- break;
- }
- tdc = tdc->next;
- }
- }
- if (rdc) {
- rdc->state = UNUSED;
- rdc->dd = NULL;
- rdc->pid = 0;
- rdc->next = client_pool;
- client_pool = rdc;
- }
-}
-
-static int qib_diag_open(struct inode *in, struct file *fp);
-static int qib_diag_release(struct inode *in, struct file *fp);
-static ssize_t qib_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off);
-static ssize_t qib_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diag_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_diag_write,
- .read = qib_diag_read,
- .open = qib_diag_open,
- .release = qib_diag_release,
- .llseek = default_llseek,
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev *diagpkt_cdev;
-static struct device *diagpkt_device;
-
-static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_diagpkt_write,
- .llseek = noop_llseek,
-};
-
-int qib_diag_add(struct qib_devdata *dd)
-{
- char name[16];
- int ret = 0;
-
- if (atomic_inc_return(&diagpkt_count) == 1) {
- ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
- &diagpkt_file_ops, &diagpkt_cdev,
- &diagpkt_device);
- if (ret)
- goto done;
- }
-
- snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
- ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
- &diag_file_ops, &dd->diag_cdev,
- &dd->diag_device);
-done:
- return ret;
-}
-
-static void qib_unregister_observers(struct qib_devdata *dd);
-
-void qib_diag_remove(struct qib_devdata *dd)
-{
- struct qib_diag_client *dc;
-
- if (atomic_dec_and_test(&diagpkt_count))
- qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
-
- qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
-
- /*
- * Return all diag_clients of this device. There should be none,
- * as we are "guaranteed" that no clients are still open
- */
- while (dd->diag_client)
- return_client(dd->diag_client);
-
- /* Now clean up all unused client structs */
- while (client_pool) {
- dc = client_pool;
- client_pool = dc->next;
- kfree(dc);
- }
- /* Clean up observer list */
- qib_unregister_observers(dd);
-}
-
-/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
- *
- * @dd: the qlogic_ib device
- * @offs: the offset in chip-space
- * @cntp: Pointer to max (byte) count for transfer starting at offset
- * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
- * mapping. It is needed because with the use of PAT for control of
- * write-combining, the logically contiguous address-space of the chip
- * may be split into virtually non-contiguous spaces, with different
- * attributes, which are them mapped to contiguous physical space
- * based from the first BAR.
- *
- * The code below makes the same assumptions as were made in
- * init_chip_wc_pat() (qib_init.c), copied here:
- * Assumes chip address space looks like:
- * - kregs + sregs + cregs + uregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * or:
- * - kregs + sregs + cregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * - uregs
- *
- * If cntp is non-NULL, returns how many bytes from offset can be accessed
- * Returns 0 if the offset is not mapped.
- */
-static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
- u32 *cntp)
-{
- u32 kreglen;
- u32 snd_bottom, snd_lim = 0;
- u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
- u32 __iomem *map = NULL;
- u32 cnt = 0;
- u32 tot4k, offs4k;
-
- /* First, simplest case, offset is within the first map. */
- kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
- if (offset < kreglen) {
- map = krb32 + (offset / sizeof(u32));
- cnt = kreglen - offset;
- goto mapped;
- }
-
- /*
- * Next check for user regs, the next most common case,
- * and a cheap check because if they are not in the first map
- * they are last in chip.
- */
- if (dd->userbase) {
- /* If user regs mapped, they are after send, so set limit. */
- u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
-
- if (!dd->piovl15base)
- snd_lim = dd->uregbase;
- krb32 = (u32 __iomem *)dd->userbase;
- if (offset >= dd->uregbase && offset < ulim) {
- map = krb32 + (offset - dd->uregbase) / sizeof(u32);
- cnt = ulim - offset;
- goto mapped;
- }
- }
-
- /*
- * Lastly, check for offset within Send Buffers.
- * This is gnarly because struct devdata is deliberately vague
- * about things like 7322 VL15 buffers, and we are not in
- * chip-specific code here, so should not make many assumptions.
- * The one we _do_ make is that the only chip that has more sndbufs
- * than we admit is the 7322, and it has userregs above that, so
- * we know the snd_lim.
- */
- /* Assume 2K buffers are first. */
- snd_bottom = dd->pio2k_bufbase;
- if (snd_lim == 0) {
- u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
-
- snd_lim = snd_bottom + tot2k;
- }
- /* If 4k buffers exist, account for them by bumping
- * appropriate limit.
- */
- tot4k = dd->piobcnt4k * dd->align4k;
- offs4k = dd->piobufbase >> 32;
- if (dd->piobcnt4k) {
- if (snd_bottom > offs4k)
- snd_bottom = offs4k;
- else {
- /* 4k above 2k. Bump snd_lim, if needed*/
- if (!dd->userbase || dd->piovl15base)
- snd_lim = offs4k + tot4k;
- }
- }
- /*
- * Judgement call: can we ignore the space between SendBuffs and
- * UserRegs, where we would like to see vl15 buffs, but not more?
- */
- if (offset >= snd_bottom && offset < snd_lim) {
- offset -= snd_bottom;
- map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
- cnt = snd_lim - offset;
- }
-
- if (!map && offs4k && dd->piovl15base) {
- snd_lim = offs4k + tot4k + 2 * dd->align4k;
- if (offset >= (offs4k + tot4k) && offset < snd_lim) {
- map = (u32 __iomem *)dd->piovl15base +
- ((offset - (offs4k + tot4k)) / sizeof(u32));
- cnt = snd_lim - offset;
- }
- }
-
-mapped:
- if (cntp)
- *cntp = cnt;
- return map;
-}
-
-/*
- * qib_read_umem64 - read a 64-bit quantity from the chip into user space
- * @dd: the qlogic_ib device
- * @uaddr: the location to store the data in user memory
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @count: number of bytes to copy (multiple of 32 bits)
- *
- * This function also localizes all chip memory accesses.
- * The copy should be written such that we read full cacheline packets
- * from the chip. This is usually used for a single qword
- *
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
- u32 regoffs, size_t count)
-{
- const u64 __iomem *reg_addr;
- const u64 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u64));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u64 data = readq(reg_addr);
-
- if (copy_to_user(uaddr, &data, sizeof(u64))) {
- ret = -EFAULT;
- goto bail;
- }
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_write_umem64 - write a 64-bit quantity to the chip from user space
- * @dd: the qlogic_ib device
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @uaddr: the source of the data in user memory
- * @count: the number of bytes to copy (multiple of 32 bits)
- *
- * This is usually used for a single qword
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-
-static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
- const void __user *uaddr, size_t count)
-{
- u64 __iomem *reg_addr;
- const u64 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u64));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u64 data;
-
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writeq(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_read_umem32 - read a 32-bit quantity from the chip into user space
- * @dd: the qlogic_ib device
- * @uaddr: the location to store the data in user memory
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @count: number of bytes to copy
- *
- * read 32 bit values, not 64 bit; for memories that only
- * support 32 bit reads; usually a single dword.
- */
-static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
- u32 regoffs, size_t count)
-{
- const u32 __iomem *reg_addr;
- const u32 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u32));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u32 data = readl(reg_addr);
-
- if (copy_to_user(uaddr, &data, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
-
- reg_addr++;
- uaddr += sizeof(u32);
-
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_write_umem32 - write a 32-bit quantity to the chip from user space
- * @dd: the qlogic_ib device
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @uaddr: the source of the data in user memory
- * @count: number of bytes to copy
- *
- * write 32 bit values, not 64 bit; for memories that only
- * support 32 bit write; usually a single dword.
- */
-
-static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
- const void __user *uaddr, size_t count)
-{
- u32 __iomem *reg_addr;
- const u32 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u32));
-
- while (reg_addr < reg_end) {
- u32 data;
-
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writel(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u32);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-static int qib_diag_open(struct inode *in, struct file *fp)
-{
- int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
- struct qib_devdata *dd;
- struct qib_diag_client *dc;
- int ret;
-
- mutex_lock(&qib_mutex);
-
- dd = qib_lookup(unit);
-
- if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
- !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
-
- dc = get_client(dd);
- if (!dc) {
- ret = -ENOMEM;
- goto bail;
- }
- dc->next = dd->diag_client;
- dd->diag_client = dc;
- fp->private_data = dc;
- ret = 0;
-bail:
- mutex_unlock(&qib_mutex);
-
- return ret;
-}
-
-/**
- * qib_diagpkt_write - write an IB packet
- * @fp: the diag data device file pointer
- * @data: qib_diag_pkt structure saying where to get the packet
- * @count: size of data to write
- * @off: unused by this code
- */
-static ssize_t qib_diagpkt_write(struct file *fp,
- const char __user *data,
- size_t count, loff_t *off)
-{
- u32 __iomem *piobuf;
- u32 plen, pbufn, maxlen_reserve;
- struct qib_diag_xpkt dp;
- u32 *tmpbuf = NULL;
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- ssize_t ret = 0;
-
- if (count != sizeof(dp)) {
- ret = -EINVAL;
- goto bail;
- }
- if (copy_from_user(&dp, data, sizeof(dp))) {
- ret = -EFAULT;
- goto bail;
- }
-
- dd = qib_lookup(dp.unit);
- if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
- if (!(dd->flags & QIB_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -ENODEV;
- goto bail;
- }
-
- if (dp.version != _DIAG_XPKT_VERS) {
- qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
- dp.version);
- ret = -EINVAL;
- goto bail;
- }
- /* send count must be an exact number of dwords */
- if (dp.len & 3) {
- ret = -EINVAL;
- goto bail;
- }
- if (!dp.port || dp.port > dd->num_pports) {
- ret = -EINVAL;
- goto bail;
- }
- ppd = &dd->pport[dp.port - 1];
-
- /*
- * need total length before first word written, plus 2 Dwords. One Dword
- * is for padding so we get the full user data when not aligned on
- * a word boundary. The other Dword is to make sure we have room for the
- * ICRC which gets tacked on later.
- */
- maxlen_reserve = 2 * sizeof(u32);
- if (dp.len > ppd->ibmaxlen - maxlen_reserve) {
- ret = -EINVAL;
- goto bail;
- }
-
- plen = sizeof(u32) + dp.len;
-
- tmpbuf = vmalloc(plen);
- if (!tmpbuf) {
- qib_devinfo(dd->pcidev,
- "Unable to allocate tmp buffer, failing\n");
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmpbuf,
- (const void __user *) (unsigned long) dp.data,
- dp.len)) {
- ret = -EFAULT;
- goto bail;
- }
-
- plen >>= 2; /* in dwords */
-
- if (dp.pbc_wd == 0)
- dp.pbc_wd = plen;
-
- piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
- if (!piobuf) {
- ret = -EBUSY;
- goto bail;
- }
- /* disarm it just to be extra sure */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
-
- /* disable header check on pbufn for this packet */
- dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
-
- writeq(dp.pbc_wd, piobuf);
- /*
- * Copy all but the trigger word, then flush, so it's written
- * to chip before trigger word, then write trigger word, then
- * flush again, so packet is sent.
- */
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, tmpbuf, plen - 1);
- qib_flush_wc();
- __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
- } else
- qib_pio_copy(piobuf + 2, tmpbuf, plen);
-
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
-
- /*
- * Ensure buffer is written to the chip, then re-enable
- * header checks (if supported by chip). The txchk
- * code will ensure seen by chip before returning.
- */
- qib_flush_wc();
- qib_sendbuf_done(dd, pbufn);
- dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
-
- ret = sizeof(dp);
-
-bail:
- vfree(tmpbuf);
- return ret;
-}
-
-static int qib_diag_release(struct inode *in, struct file *fp)
-{
- mutex_lock(&qib_mutex);
- return_client(fp->private_data);
- fp->private_data = NULL;
- mutex_unlock(&qib_mutex);
- return 0;
-}
-
-/*
- * Chip-specific code calls to register its interest in
- * a specific range.
- */
-struct diag_observer_list_elt {
- struct diag_observer_list_elt *next;
- const struct diag_observer *op;
-};
-
-int qib_register_observer(struct qib_devdata *dd,
- const struct diag_observer *op)
-{
- struct diag_observer_list_elt *olp;
- unsigned long flags;
-
- if (!dd || !op)
- return -EINVAL;
- olp = vmalloc(sizeof(*olp));
- if (!olp) {
- pr_err("vmalloc for observer failed\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp->op = op;
- olp->next = dd->diag_observer_list;
- dd->diag_observer_list = olp;
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
-
- return 0;
-}
-
-/* Remove all registered observers when device is closed */
-static void qib_unregister_observers(struct qib_devdata *dd)
-{
- struct diag_observer_list_elt *olp;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp = dd->diag_observer_list;
- while (olp) {
- /* Pop one observer, let go of lock */
- dd->diag_observer_list = olp->next;
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- vfree(olp);
- /* try again. */
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp = dd->diag_observer_list;
- }
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
-}
-
-/*
- * Find the observer, if any, for the specified address. Initial implementation
- * is simple stack of observers. This must be called with diag transaction
- * lock held.
- */
-static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
- u32 addr)
-{
- struct diag_observer_list_elt *olp;
- const struct diag_observer *op = NULL;
-
- olp = dd->diag_observer_list;
- while (olp) {
- op = olp->op;
- if (addr >= op->bottom && addr <= op->top)
- break;
- olp = olp->next;
- }
- if (!olp)
- op = NULL;
-
- return op;
-}
-
-static ssize_t qib_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off)
-{
- struct qib_diag_client *dc = fp->private_data;
- struct qib_devdata *dd = dc->dd;
- void __iomem *kreg_base;
- ssize_t ret;
-
- if (dc->pid != current->pid) {
- ret = -EPERM;
- goto bail;
- }
-
- kreg_base = dd->kregbase;
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if (dc->state < READY && (*off || count != 8))
- ret = -EINVAL; /* prevent cat /dev/qib_diag* */
- else {
- unsigned long flags;
- u64 data64 = 0;
- int use_32;
- const struct diag_observer *op;
-
- use_32 = (count % 8) || (*off % 8);
- ret = -1;
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- /*
- * Check for observer on this address range.
- * we only support a single 32 or 64-bit read
- * via observer, currently.
- */
- op = diag_get_observer(dd, *off);
- if (op) {
- u32 offset = *off;
-
- ret = op->hook(dd, op, offset, &data64, 0, use_32);
- }
- /*
- * We need to release lock before any copy_to_user(),
- * whether implicit in qib_read_umem* or explicit below.
- */
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- if (!op) {
- if (use_32)
- /*
- * Address or length is not 64-bit aligned;
- * do 32-bit rd
- */
- ret = qib_read_umem32(dd, data, (u32) *off,
- count);
- else
- ret = qib_read_umem64(dd, data, (u32) *off,
- count);
- } else if (ret == count) {
- /* Below finishes case where observer existed */
- ret = copy_to_user(data, &data64, use_32 ?
- sizeof(u32) : sizeof(u64));
- if (ret)
- ret = -EFAULT;
- }
- }
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (dc->state == OPENED)
- dc->state = INIT;
- }
-bail:
- return ret;
-}
-
-static ssize_t qib_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct qib_diag_client *dc = fp->private_data;
- struct qib_devdata *dd = dc->dd;
- void __iomem *kreg_base;
- ssize_t ret;
-
- if (dc->pid != current->pid) {
- ret = -EPERM;
- goto bail;
- }
-
- kreg_base = dd->kregbase;
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if (dc->state < READY &&
- ((*off || count != 8) || dc->state != INIT))
- /* No writes except second-step of init seq */
- ret = -EINVAL; /* before any other write allowed */
- else {
- unsigned long flags;
- const struct diag_observer *op = NULL;
- int use_32 = (count % 8) || (*off % 8);
-
- /*
- * Check for observer on this address range.
- * We only support a single 32 or 64-bit write
- * via observer, currently. This helps, because
- * we would otherwise have to jump through hoops
- * to make "diag transaction" meaningful when we
- * cannot do a copy_from_user while holding the lock.
- */
- if (count == 4 || count == 8) {
- u64 data64;
- u32 offset = *off;
-
- ret = copy_from_user(&data64, data, count);
- if (ret) {
- ret = -EFAULT;
- goto bail;
- }
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- op = diag_get_observer(dd, *off);
- if (op)
- ret = op->hook(dd, op, offset, &data64, ~0Ull,
- use_32);
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- }
-
- if (!op) {
- if (use_32)
- /*
- * Address or length is not 64-bit aligned;
- * do 32-bit write
- */
- ret = qib_write_umem32(dd, (u32) *off, data,
- count);
- else
- ret = qib_write_umem64(dd, (u32) *off, data,
- count);
- }
- }
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (dc->state == INIT)
- dc->state = READY; /* all read/write OK now */
- }
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
deleted file mode 100644
index 59fe092b4b0f..000000000000
--- a/drivers/infiniband/hw/qib/qib_dma.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-
-#include "qib_verbs.h"
-
-#define BAD_DMA_ADDRESS ((u64) 0)
-
-/*
- * The following functions implement driver specific replacements
- * for the ib_dma_*() functions.
- *
- * These functions return kernel virtual addresses instead of
- * device bus addresses since the driver uses the CPU to copy
- * data instead of using hardware DMA.
- */
-
-static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
- return dma_addr == BAD_DMA_ADDRESS;
-}
-
-static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return (u64) cpu_addr;
-}
-
-static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- u64 addr;
-
- BUG_ON(!valid_dma_direction(direction));
-
- if (offset + size > PAGE_SIZE) {
- addr = BAD_DMA_ADDRESS;
- goto done;
- }
-
- addr = (u64) page_address(page);
- if (addr)
- addr += offset;
- /* TODO: handle highmem pages */
-
-done:
- return addr;
-}
-
-static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- u64 addr;
- int i;
- int ret = nents;
-
- BUG_ON(!valid_dma_direction(direction));
-
- for_each_sg(sgl, sg, nents, i) {
- addr = (u64) page_address(sg_page(sg));
- /* TODO: handle highmem pages */
- if (!addr) {
- ret = 0;
- break;
- }
- sg->dma_address = addr + sg->offset;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- }
- return ret;
-}
-
-static void qib_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
- size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
- u64 *dma_handle, gfp_t flag)
-{
- struct page *p;
- void *addr = NULL;
-
- p = alloc_pages(flag, get_order(size));
- if (p)
- addr = page_address(p);
- if (dma_handle)
- *dma_handle = (u64) addr;
- return addr;
-}
-
-static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
- void *cpu_addr, u64 dma_handle)
-{
- free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
-struct ib_dma_mapping_ops qib_dma_mapping_ops = {
- .mapping_error = qib_mapping_error,
- .map_single = qib_dma_map_single,
- .unmap_single = qib_dma_unmap_single,
- .map_page = qib_dma_map_page,
- .unmap_page = qib_dma_unmap_page,
- .map_sg = qib_map_sg,
- .unmap_sg = qib_unmap_sg,
- .sync_single_for_cpu = qib_sync_single_for_cpu,
- .sync_single_for_device = qib_sync_single_for_device,
- .alloc_coherent = qib_dma_alloc_coherent,
- .free_coherent = qib_dma_free_coherent
-};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
deleted file mode 100644
index f58fdc3d25a2..000000000000
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ /dev/null
@@ -1,820 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/prefetch.h>
-
-#include "qib.h"
-
-/*
- * The size has to be longer than this string, so we can append
- * board/chip information to it in the init code.
- */
-const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
-
-DEFINE_SPINLOCK(qib_devs_lock);
-LIST_HEAD(qib_dev_list);
-DEFINE_MUTEX(qib_mutex); /* general driver use */
-
-unsigned qib_ibmtu;
-module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
-MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
-
-unsigned qib_compat_ddr_negotiate = 1;
-module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(compat_ddr_negotiate,
- "Attempt pre-IBTA 1.2 DDR speed negotiation");
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel <ibsupport@intel.com>");
-MODULE_DESCRIPTION("Intel IB driver");
-MODULE_VERSION(QIB_DRIVER_VERSION);
-
-/*
- * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
- * PIO send buffers. This is well beyond anything currently
- * defined in the InfiniBand spec.
- */
-#define QIB_PIO_MAXIBHDR 128
-
-/*
- * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
- */
-#define QIB_MAX_PKT_RECV 64
-
-struct qlogic_ib_stats qib_stats;
-
-const char *qib_get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), "infinipath%u", unit);
- return iname;
-}
-
-/*
- * Return count of units with at least one port ACTIVE.
- */
-int qib_count_active_units(void)
-{
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- unsigned long flags;
- int pidx, nunits_active = 0;
-
- spin_lock_irqsave(&qib_devs_lock, flags);
- list_for_each_entry(dd, &qib_dev_list, list) {
- if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
- continue;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE))) {
- nunits_active++;
- break;
- }
- }
- }
- spin_unlock_irqrestore(&qib_devs_lock, flags);
- return nunits_active;
-}
-
-/*
- * Return count of all units, optionally return in arguments
- * the number of usable (present) units, and the number of
- * ports that are up.
- */
-int qib_count_units(int *npresentp, int *nupp)
-{
- int nunits = 0, npresent = 0, nup = 0;
- struct qib_devdata *dd;
- unsigned long flags;
- int pidx;
- struct qib_pportdata *ppd;
-
- spin_lock_irqsave(&qib_devs_lock, flags);
-
- list_for_each_entry(dd, &qib_dev_list, list) {
- nunits++;
- if ((dd->flags & QIB_PRESENT) && dd->kregbase)
- npresent++;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE)))
- nup++;
- }
- }
-
- spin_unlock_irqrestore(&qib_devs_lock, flags);
-
- if (npresentp)
- *npresentp = npresent;
- if (nupp)
- *nupp = nup;
-
- return nunits;
-}
-
-/**
- * qib_wait_linkstate - wait for an IB link state change to occur
- * @dd: the qlogic_ib device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * wait up to msecs milliseconds for IB link state change to occur for
- * now, take the easy polling route. Currently used only by
- * qib_set_linkstate. Returns 0 if state reached, otherwise
- * -ETIMEDOUT state can have multiple states set, for any of several
- * transitions.
- */
-int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- if (ppd->state_wanted) {
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ret = -EBUSY;
- goto bail;
- }
- ppd->state_wanted = state;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wait_event_interruptible_timeout(ppd->state_wait,
- (ppd->lflags & state),
- msecs_to_jiffies(msecs));
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->state_wanted = 0;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (!(ppd->lflags & state))
- ret = -ETIMEDOUT;
- else
- ret = 0;
-bail:
- return ret;
-}
-
-int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
-{
- u32 lstate;
- int ret;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- switch (newstate) {
- case QIB_IB_LINKDOWN_ONLY:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN_SLEEP:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN_DISABLE:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKARM:
- if (ppd->lflags & QIBL_LINKARMED) {
- ret = 0;
- goto bail;
- }
- if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
- ret = -EINVAL;
- goto bail;
- }
- /*
- * Since the port can be ACTIVE when we ask for ARMED,
- * clear QIBL_LINKV so we can wait for a transition.
- * If the link isn't ARMED, then something else happened
- * and there is no point waiting for ARMED.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
- lstate = QIBL_LINKV;
- break;
-
- case QIB_IB_LINKACTIVE:
- if (ppd->lflags & QIBL_LINKACTIVE) {
- ret = 0;
- goto bail;
- }
- if (!(ppd->lflags & QIBL_LINKARMED)) {
- ret = -EINVAL;
- goto bail;
- }
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
- lstate = QIBL_LINKACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ret = qib_wait_linkstate(ppd, lstate, 10);
-
-bail:
- return ret;
-}
-
-/*
- * Get address of eager buffer from it's index (allocated in chunks, not
- * contiguous).
- */
-static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
-{
- const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
- const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
-
- return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
-}
-
-/*
- * Returns 1 if error was a CRC, else 0.
- * Needed for some chip's synthesized error counters.
- */
-static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
- u32 ctxt, u32 eflags, u32 l, u32 etail,
- __le32 *rhf_addr, struct qib_message_header *rhdr)
-{
- u32 ret = 0;
-
- if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
- ret = 1;
- else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
- /* For TIDERR and RC QPs premptively schedule a NAK */
- struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
- struct qib_other_headers *ohdr = NULL;
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct qib_qp *qp = NULL;
- u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
- u16 lid = be16_to_cpu(hdr->lrh[1]);
- int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- u32 qp_num;
- u32 opcode;
- u32 psn;
- int diff;
-
- /* Sanity check packet */
- if (tlen < 24)
- goto drop;
-
- if (lid < QIB_MULTICAST_LID_BASE) {
- lid &= ~((1 << ppd->lmc) - 1);
- if (unlikely(lid != ppd->lid))
- goto drop;
- }
-
- /* Check for GRH */
- if (lnh == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == QIB_LRH_GRH) {
- u32 vtf;
-
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- } else
- goto drop;
-
- /* Get opcode and PSN from packet */
- opcode = be32_to_cpu(ohdr->bth[0]);
- opcode >>= 24;
- psn = be32_to_cpu(ohdr->bth[2]);
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
- if (qp_num != QIB_MULTICAST_QPN) {
- int ruc_res;
-
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
- goto drop;
-
- /*
- * Handle only RC QPs - for other QP types drop error
- * packet.
- */
- spin_lock(&qp->r_lock);
-
- /* Check for valid receive state. */
- if (!(ib_qib_state_ops[qp->state] &
- QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
- goto unlock;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- ruc_res =
- qib_ruc_check_hdr(
- ibp, hdr,
- lnh == QIB_LRH_GRH,
- qp,
- be32_to_cpu(ohdr->bth[0]));
- if (ruc_res)
- goto unlock;
-
- /* Only deal with RDMA Writes for now */
- if (opcode <
- IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
- diff = qib_cmp24(psn, qp->r_psn);
- if (!qp->r_nak_state && diff >= 0) {
- ibp->n_rc_seqnak++;
- qp->r_nak_state =
- IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence
- * NAK until all packets
- * in the receive queue have
- * been processed.
- * Otherwise, we end up
- * propagating congestion.
- */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |=
- QIB_R_RSP_NAK;
- atomic_inc(
- &qp->refcount);
- list_add_tail(
- &qp->rspwait,
- &rcd->qp_wait_list);
- }
- } /* Out of sequence NAK */
- } /* QP Request NAKs */
- break;
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- case IB_QPT_UC:
- default:
- /* For now don't handle any other QP types */
- break;
- }
-
-unlock:
- spin_unlock(&qp->r_lock);
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for us to finish.
- */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- } /* Unicast QP */
- } /* Valid packet with TIDErr */
-
-drop:
- return ret;
-}
-
-/*
- * qib_kreceive - receive a packet
- * @rcd: the qlogic_ib context
- * @llic: gets count of good packets needed to clear lli,
- * (used with chips that need need to track crcs for lli)
- *
- * called from interrupt handler for errors or receive interrupt
- * Returns number of CRC error packets, needed by some chips for
- * local link integrity tracking. crcs are adjusted down by following
- * good packets, if any, and count of good packets is also tracked.
- */
-u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- __le32 *rhf_addr;
- void *ebuf;
- const u32 rsize = dd->rcvhdrentsize; /* words */
- const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
- u32 etail = -1, l, hdrqtail;
- struct qib_message_header *hdr;
- u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
- int last;
- u64 lval;
- struct qib_qp *qp, *nqp;
-
- l = rcd->head;
- rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
- if (dd->flags & QIB_NODMA_RTAIL) {
- u32 seq = qib_hdrget_seq(rhf_addr);
-
- if (seq != rcd->seq_cnt)
- goto bail;
- hdrqtail = 0;
- } else {
- hdrqtail = qib_get_rcvhdrtail(rcd);
- if (l == hdrqtail)
- goto bail;
- smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
- }
-
- for (last = 0, i = 1; !last; i += !last) {
- hdr = dd->f_get_msgheader(dd, rhf_addr);
- eflags = qib_hdrget_err_flags(rhf_addr);
- etype = qib_hdrget_rcv_type(rhf_addr);
- /* total length */
- tlen = qib_hdrget_length_in_bytes(rhf_addr);
- ebuf = NULL;
- if ((dd->flags & QIB_NODMA_RTAIL) ?
- qib_hdrget_use_egr_buf(rhf_addr) :
- (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
- etail = qib_hdrget_index(rhf_addr);
- updegr = 1;
- if (tlen > sizeof(*hdr) ||
- etype >= RCVHQ_RCV_TYPE_NON_KD) {
- ebuf = qib_get_egrbuf(rcd, etail);
- prefetch_range(ebuf, tlen - sizeof(*hdr));
- }
- }
- if (!eflags) {
- u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
-
- if (lrh_len != tlen) {
- qib_stats.sps_lenerrs++;
- goto move_along;
- }
- }
- if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
- ebuf == NULL &&
- tlen > (dd->rcvhdrentsize - 2 + 1 -
- qib_hdrget_offset(rhf_addr)) << 2) {
- goto move_along;
- }
-
- /*
- * Both tiderr and qibhdrerr are set for all plain IB
- * packets; only qibhdrerr should be set.
- */
- if (unlikely(eflags))
- crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
- etail, rhf_addr, hdr);
- else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
- qib_ib_rcv(rcd, hdr, ebuf, tlen);
- if (crcs)
- crcs--;
- else if (llic && *llic)
- --*llic;
- }
-move_along:
- l += rsize;
- if (l >= maxcnt)
- l = 0;
- if (i == QIB_MAX_PKT_RECV)
- last = 1;
-
- rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
- if (dd->flags & QIB_NODMA_RTAIL) {
- u32 seq = qib_hdrget_seq(rhf_addr);
-
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (seq != rcd->seq_cnt)
- last = 1;
- } else if (l == hdrqtail)
- last = 1;
- /*
- * Update head regs etc., every 16 packets, if not last pkt,
- * to help prevent rcvhdrq overflows, when many packets
- * are processed and queue is nearly full.
- * Don't request an interrupt for intermediate updates.
- */
- lval = l;
- if (!last && !(i & 0xf)) {
- dd->f_update_usrhead(rcd, lval, updegr, etail, i);
- updegr = 0;
- }
- }
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for lookaside_qp to finish.
- */
- if (rcd->lookaside_qp) {
- if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
- wake_up(&rcd->lookaside_qp->wait);
- rcd->lookaside_qp = NULL;
- }
-
- rcd->head = l;
-
- /*
- * Iterate over all QPs waiting to respond.
- * The list won't change since the IRQ is only run on one CPU.
- */
- list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
- list_del_init(&qp->rspwait);
- if (qp->r_flags & QIB_R_RSP_NAK) {
- qp->r_flags &= ~QIB_R_RSP_NAK;
- qib_send_rc_ack(qp);
- }
- if (qp->r_flags & QIB_R_RSP_SEND) {
- unsigned long flags;
-
- qp->r_flags &= ~QIB_R_RSP_SEND;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] &
- QIB_PROCESS_OR_FLUSH_SEND)
- qib_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-
-bail:
- /* Report number of packets consumed */
- if (npkts)
- *npkts = i;
-
- /*
- * Always write head at end, and setup rcv interrupt, even
- * if no packets were processed.
- */
- lval = (u64)rcd->head | dd->rhdrhead_intr_off;
- dd->f_update_usrhead(rcd, lval, updegr, etail, i);
- return crcs;
-}
-
-/**
- * qib_set_mtu - set the MTU
- * @ppd: the perport data
- * @arg: the new MTU
- *
- * We can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size. For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link INIT state...
- */
-int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
-{
- u32 piosize;
- int ret, chk;
-
- if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
- arg != 4096) {
- ret = -EINVAL;
- goto bail;
- }
- chk = ib_mtu_enum_to_int(qib_ibmtu);
- if (chk > 0 && arg > chk) {
- ret = -EINVAL;
- goto bail;
- }
-
- piosize = ppd->ibmaxlen;
- ppd->ibmtu = arg;
-
- if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
- /* Only if it's not the initial value (or reset to it) */
- if (piosize != ppd->init_ibmaxlen) {
- if (arg > piosize && arg <= ppd->init_ibmaxlen)
- piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
- ppd->ibmaxlen = piosize;
- }
- } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
- piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
- ppd->ibmaxlen = piosize;
- }
-
- ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
-{
- struct qib_devdata *dd = ppd->dd;
-
- ppd->lid = lid;
- ppd->lmc = lmc;
-
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
- lid | (~((1U << lmc) - 1)) << 16);
-
- qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
- dd->unit, ppd->port, lid);
-
- return 0;
-}
-
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDS, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
-
-static void qib_run_led_override(unsigned long opaque)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
- struct qib_devdata *dd = ppd->dd;
- int timeoff;
- int ph_idx;
-
- if (!(dd->flags & QIB_INITTED))
- return;
-
- ph_idx = ppd->led_override_phase++ & 1;
- ppd->led_override = ppd->led_override_vals[ph_idx];
- timeoff = ppd->led_override_timeoff;
-
- dd->f_setextled(ppd, 1);
- /*
- * don't re-fire the timer if user asked for it to be off; we let
- * it fire one more time after they turn it off to simplify
- */
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + timeoff);
-}
-
-void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
-{
- struct qib_devdata *dd = ppd->dd;
- int timeoff, freq;
-
- if (!(dd->flags & QIB_INITTED))
- return;
-
- /* First check if we are blinking. If not, use 1HZ polling */
- timeoff = HZ;
- freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
-
- if (freq) {
- /* For blink, set each phase from one nybble of val */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = (val >> 4) & 0xF;
- timeoff = (HZ << 4)/freq;
- } else {
- /* Non-blink set both phases the same. */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = val & 0xF;
- }
- ppd->led_override_timeoff = timeoff;
-
- /*
- * If the timer has not already been started, do so. Use a "quick"
- * timeout so the function will be called soon, to look at our request.
- */
- if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
- /* Need to start timer */
- init_timer(&ppd->led_override_timer);
- ppd->led_override_timer.function = qib_run_led_override;
- ppd->led_override_timer.data = (unsigned long) ppd;
- ppd->led_override_timer.expires = jiffies + 1;
- add_timer(&ppd->led_override_timer);
- } else {
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + 1);
- atomic_dec(&ppd->led_override_timer_active);
- }
-}
-
-/**
- * qib_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload). We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize. For
- * now, we only allow this if no user contexts are open that use chip resources
- */
-int qib_reset_device(int unit)
-{
- int ret, i;
- struct qib_devdata *dd = qib_lookup(unit);
- struct qib_pportdata *ppd;
- unsigned long flags;
- int pidx;
-
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
-
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
- qib_devinfo(dd->pcidev,
- "Invalid unit number %u or not initialized or not present\n",
- unit);
- ret = -ENXIO;
- goto bail;
- }
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- if (dd->rcd)
- for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
- if (!dd->rcd[i] || !dd->rcd[i]->cnt)
- continue;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- ret = -EBUSY;
- goto bail;
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (atomic_read(&ppd->led_override_timer_active)) {
- /* Need to stop LED timer, _then_ shut off LEDs */
- del_timer_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
-
- /* Shut off LEDs after we are sure timer is not running */
- ppd->led_override = LED_OVER_BOTH_OFF;
- dd->f_setextled(ppd, 0);
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_teardown_sdma(ppd);
- }
-
- ret = dd->f_reset(dd);
- if (ret == 1)
- ret = qib_init(dd, 1);
- else
- ret = -EAGAIN;
- if (ret)
- qib_dev_err(dd,
- "Reinitialize unit %u after reset failed with %d\n",
- unit, ret);
- else
- qib_devinfo(dd->pcidev,
- "Reinitialized unit %u after resetting\n",
- unit);
-
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
deleted file mode 100644
index 311ee6c3dd5e..000000000000
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-
-/*
- * Functions specific to the serial EEPROM on cards handled by ib_qib.
- * The actual serail interface code is in qib_twsi.c. This file is a client
- */
-
-/**
- * qib_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the qlogic_ib device
- * @eeprom_offset: address to read from
- * @buffer: where to store result
- * @len: number of bytes to receive
- */
-int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
- void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (!ret) {
- ret = qib_twsi_reset(dd);
- if (ret)
- qib_dev_err(dd, "EEPROM Reset for read failed\n");
- else
- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
- eeprom_offset, buff, len);
- mutex_unlock(&dd->eep_lock);
- }
-
- return ret;
-}
-
-/*
- * Actually update the eeprom, first doing write enable if
- * needed, then restoring write enable state.
- * Must be called with eep_lock held
- */
-static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
- const void *buf, int len)
-{
- int ret, pwen;
-
- pwen = dd->f_eeprom_wen(dd, 1);
- ret = qib_twsi_reset(dd);
- if (ret)
- qib_dev_err(dd, "EEPROM Reset for write failed\n");
- else
- ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
- offset, buf, len);
- dd->f_eeprom_wen(dd, pwen);
- return ret;
-}
-
-/**
- * qib_eeprom_write - writes data to the eeprom via I2C
- * @dd: the qlogic_ib device
- * @eeprom_offset: where to place data
- * @buffer: data to write
- * @len: number of bytes to write
- */
-int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
- const void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (!ret) {
- ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
- mutex_unlock(&dd->eep_lock);
- }
-
- return ret;
-}
-
-static u8 flash_csum(struct qib_flash *ifp, int adjust)
-{
- u8 *ip = (u8 *) ifp;
- u8 csum = 0, len;
-
- /*
- * Limit length checksummed to max length of actual data.
- * Checksum of erased eeprom will still be bad, but we avoid
- * reading past the end of the buffer we were passed.
- */
- len = ifp->if_length;
- if (len > sizeof(struct qib_flash))
- len = sizeof(struct qib_flash);
- while (len--)
- csum += *ip++;
- csum -= ifp->if_csum;
- csum = ~csum;
- if (adjust)
- ifp->if_csum = csum;
-
- return csum;
-}
-
-/**
- * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
- * @dd: the qlogic_ib device
- *
- * We have the capability to use the nguid field, and get
- * the guid from the first chip's flash, to use for all of them.
- */
-void qib_get_eeprom_info(struct qib_devdata *dd)
-{
- void *buf;
- struct qib_flash *ifp;
- __be64 guid;
- int len, eep_stat;
- u8 csum, *bguid;
- int t = dd->unit;
- struct qib_devdata *dd0 = qib_lookup(0);
-
- if (t && dd0->nguid > 1 && t <= dd0->nguid) {
- u8 oguid;
-
- dd->base_guid = dd0->base_guid;
- bguid = (u8 *) &dd->base_guid;
-
- oguid = bguid[7];
- bguid[7] += t;
- if (oguid > bguid[7]) {
- if (bguid[6] == 0xff) {
- if (bguid[5] == 0xff) {
- qib_dev_err(dd,
- "Can't set %s GUID from base, wraps to OUI!\n",
- qib_get_unit_name(t));
- dd->base_guid = 0;
- goto bail;
- }
- bguid[5]++;
- }
- bguid[6]++;
- }
- dd->nguid = 1;
- goto bail;
- }
-
- /*
- * Read full flash, not just currently used part, since it may have
- * been written with a newer definition.
- * */
- len = sizeof(struct qib_flash);
- buf = vmalloc(len);
- if (!buf) {
- qib_dev_err(dd,
- "Couldn't allocate memory to read %u bytes from eeprom for GUID\n",
- len);
- goto bail;
- }
-
- /*
- * Use "public" eeprom read function, which does locking and
- * figures out device. This will migrate to chip-specific.
- */
- eep_stat = qib_eeprom_read(dd, 0, buf, len);
-
- if (eep_stat) {
- qib_dev_err(dd, "Failed reading GUID from eeprom\n");
- goto done;
- }
- ifp = (struct qib_flash *)buf;
-
- csum = flash_csum(ifp, 0);
- if (csum != ifp->if_csum) {
- qib_devinfo(dd->pcidev,
- "Bad I2C flash checksum: 0x%x, not 0x%x\n",
- csum, ifp->if_csum);
- goto done;
- }
- if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
- *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
- qib_dev_err(dd,
- "Invalid GUID %llx from flash; ignoring\n",
- *(unsigned long long *) ifp->if_guid);
- /* don't allow GUID if all 0 or all 1's */
- goto done;
- }
-
- /* complain, but allow it */
- if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
- qib_devinfo(dd->pcidev,
- "Warning, GUID %llx is default, probably not correct!\n",
- *(unsigned long long *) ifp->if_guid);
-
- bguid = ifp->if_guid;
- if (!bguid[0] && !bguid[1] && !bguid[2]) {
- /*
- * Original incorrect GUID format in flash; fix in
- * core copy, by shifting up 2 octets; don't need to
- * change top octet, since both it and shifted are 0.
- */
- bguid[1] = bguid[3];
- bguid[2] = bguid[4];
- bguid[3] = 0;
- bguid[4] = 0;
- guid = *(__be64 *) ifp->if_guid;
- } else
- guid = *(__be64 *) ifp->if_guid;
- dd->base_guid = guid;
- dd->nguid = ifp->if_numguid;
- /*
- * Things are slightly complicated by the desire to transparently
- * support both the Pathscale 10-digit serial number and the QLogic
- * 13-character version.
- */
- if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
- ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
- char *snp = dd->serial;
-
- /*
- * This board has a Serial-prefix, which is stored
- * elsewhere for backward-compatibility.
- */
- memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
- snp[sizeof(ifp->if_sprefix)] = '\0';
- len = strlen(snp);
- snp += len;
- len = sizeof(dd->serial) - len;
- if (len > sizeof(ifp->if_serial))
- len = sizeof(ifp->if_serial);
- memcpy(snp, ifp->if_serial, len);
- } else {
- memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
- }
- if (!strstr(ifp->if_comment, "Tested successfully"))
- qib_dev_err(dd,
- "Board SN %s did not pass functional test: %s\n",
- dd->serial, ifp->if_comment);
-
-done:
- vfree(buf);
-
-bail:;
-}
-
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
deleted file mode 100644
index e449e394963f..000000000000
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ /dev/null
@@ -1,2418 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/swap.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <asm/pgtable.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/uio.h>
-
-#include "qib.h"
-#include "qib_common.h"
-#include "qib_user_sdma.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-static int qib_open(struct inode *, struct file *);
-static int qib_close(struct inode *, struct file *);
-static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
-static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
-static unsigned int qib_poll(struct file *, struct poll_table_struct *);
-static int qib_mmapf(struct file *, struct vm_area_struct *);
-
-/*
- * This is really, really weird shit - write() and writev() here
- * have completely unrelated semantics. Sucky userland ABI,
- * film at 11.
- */
-static const struct file_operations qib_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_write,
- .write_iter = qib_write_iter,
- .open = qib_open,
- .release = qib_close,
- .poll = qib_poll,
- .mmap = qib_mmapf,
- .llseek = noop_llseek,
-};
-
-/*
- * Convert kernel virtual addresses to physical addresses so they don't
- * potentially conflict with the chip addresses used as mmap offsets.
- * It doesn't really matter what mmap offset we use as long as we can
- * interpret it correctly.
- */
-static u64 cvt_kvaddr(void *p)
-{
- struct page *page;
- u64 paddr = 0;
-
- page = vmalloc_to_page(p);
- if (page)
- paddr = page_to_pfn(page) << PAGE_SHIFT;
-
- return paddr;
-}
-
-static int qib_get_base_info(struct file *fp, void __user *ubase,
- size_t ubase_size)
-{
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- int ret = 0;
- struct qib_base_info *kinfo = NULL;
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- unsigned subctxt_cnt;
- int shared, master;
- size_t sz;
-
- subctxt_cnt = rcd->subctxt_cnt;
- if (!subctxt_cnt) {
- shared = 0;
- master = 0;
- subctxt_cnt = 1;
- } else {
- shared = 1;
- master = !subctxt_fp(fp);
- }
-
- sz = sizeof(*kinfo);
- /* If context sharing is not requested, allow the old size structure */
- if (!shared)
- sz -= 7 * sizeof(u64);
- if (ubase_size < sz) {
- ret = -EINVAL;
- goto bail;
- }
-
- kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
- if (kinfo == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
-
- ret = dd->f_get_base_info(rcd, kinfo);
- if (ret < 0)
- goto bail;
-
- kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
- kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
- kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
- kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
- /*
- * have to mmap whole thing
- */
- kinfo->spi_rcv_egrbuftotlen =
- rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
- kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
- kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
- rcd->rcvegrbuf_chunks;
- kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
- if (master)
- kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
- /*
- * for this use, may be cfgctxts summed over all chips that
- * are are configured and present
- */
- kinfo->spi_nctxts = dd->cfgctxts;
- /* unit (chip/board) our context is on */
- kinfo->spi_unit = dd->unit;
- kinfo->spi_port = ppd->port;
- /* for now, only a single page */
- kinfo->spi_tid_maxsize = PAGE_SIZE;
-
- /*
- * Doing this per context, and based on the skip value, etc. This has
- * to be the actual buffer size, since the protocol code treats it
- * as an array.
- *
- * These have to be set to user addresses in the user code via mmap.
- * These values are used on return to user code for the mmap target
- * addresses only. For 32 bit, same 44 bit address problem, so use
- * the physical address, not virtual. Before 2.6.11, using the
- * page_address() macro worked, but in 2.6.11, even that returns the
- * full 64 bit address (upper bits all 1's). So far, using the
- * physical addresses (or chip offsets, for chip mapping) works, but
- * no doubt some future kernel release will change that, and we'll be
- * on to yet another method of dealing with this.
- * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
- * since the chips with non-zero rhf_offset don't normally
- * enable tail register updates to host memory, but for testing,
- * both can be enabled and used.
- */
- kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
- kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
- kinfo->spi_rhf_offset = dd->rhf_offset;
- kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
- kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
- /* setup per-unit (not port) status area for user programs */
- kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
- (char *) ppd->statusp -
- (char *) dd->pioavailregs_dma;
- kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
- if (!shared) {
- kinfo->spi_piocnt = rcd->piocnt;
- kinfo->spi_piobufbase = (u64) rcd->piobufs;
- kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
- } else if (master) {
- kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
- (rcd->piocnt % subctxt_cnt);
- /* Master's PIO buffers are after all the slave's */
- kinfo->spi_piobufbase = (u64) rcd->piobufs +
- dd->palign *
- (rcd->piocnt - kinfo->spi_piocnt);
- } else {
- unsigned slave = subctxt_fp(fp) - 1;
-
- kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
- kinfo->spi_piobufbase = (u64) rcd->piobufs +
- dd->palign * kinfo->spi_piocnt * slave;
- }
-
- if (shared) {
- kinfo->spi_sendbuf_status =
- cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
- /* only spi_subctxt_* fields should be set in this block! */
- kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
-
- kinfo->spi_subctxt_rcvegrbuf =
- cvt_kvaddr(rcd->subctxt_rcvegrbuf);
- kinfo->spi_subctxt_rcvhdr_base =
- cvt_kvaddr(rcd->subctxt_rcvhdr_base);
- }
-
- /*
- * All user buffers are 2KB buffers. If we ever support
- * giving 4KB buffers to user processes, this will need some
- * work. Can't use piobufbase directly, because it has
- * both 2K and 4K buffer base values.
- */
- kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
- dd->palign;
- kinfo->spi_pioalign = dd->palign;
- kinfo->spi_qpair = QIB_KD_QP;
- /*
- * user mode PIO buffers are always 2KB, even when 4KB can
- * be received, and sent via the kernel; this is ibmaxlen
- * for 2K MTU.
- */
- kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
- kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
- kinfo->spi_ctxt = rcd->ctxt;
- kinfo->spi_subctxt = subctxt_fp(fp);
- kinfo->spi_sw_version = QIB_KERN_SWVERSION;
- kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
- kinfo->spi_hw_version = dd->revision;
-
- if (master)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
-
- sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
- if (copy_to_user(ubase, kinfo, sz))
- ret = -EFAULT;
-bail:
- kfree(kinfo);
- return ret;
-}
-
-/**
- * qib_tid_update - update a context TID
- * @rcd: the context
- * @fp: the qib device file
- * @ti: the TID information
- *
- * The new implementation as of Oct 2004 is that the driver assigns
- * the tid and returns it to the caller. To reduce search time, we
- * keep a cursor for each context, walking the shadow tid array to find
- * one that's not in use.
- *
- * For now, if we can't allocate the full list, we fail, although
- * in the long run, we'll allocate as many as we can, and the
- * caller will deal with that by trying the remaining pages later.
- * That means that when we fail, we have to mark the tids as not in
- * use again, in our shadow copy.
- *
- * It's up to the caller to free the tids when they are done.
- * We'll unlock the pages as they free them.
- *
- * Also, right now we are locking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.
- */
-static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
- const struct qib_tid_info *ti)
-{
- int ret = 0, ntids;
- u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
- u16 *tidlist;
- struct qib_devdata *dd = rcd->dd;
- u64 physaddr;
- unsigned long vaddr;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
- struct page **pagep = NULL;
- unsigned subctxt = subctxt_fp(fp);
-
- if (!dd->pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- cnt = ti->tidcnt;
- if (!cnt) {
- ret = -EFAULT;
- goto done;
- }
- ctxttid = rcd->ctxt * dd->rcvtidcnt;
- if (!rcd->subctxt_cnt) {
- tidcnt = dd->rcvtidcnt;
- tid = rcd->tidcursor;
- tidoff = 0;
- } else if (!subctxt) {
- tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
- (dd->rcvtidcnt % rcd->subctxt_cnt);
- tidoff = dd->rcvtidcnt - tidcnt;
- ctxttid += tidoff;
- tid = tidcursor_fp(fp);
- } else {
- tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
- tidoff = tidcnt * (subctxt - 1);
- ctxttid += tidoff;
- tid = tidcursor_fp(fp);
- }
- if (cnt > tidcnt) {
- /* make sure it all fits in tid_pg_list */
- qib_devinfo(dd->pcidev,
- "Process tried to allocate %u TIDs, only trying max (%u)\n",
- cnt, tidcnt);
- cnt = tidcnt;
- }
- pagep = (struct page **) rcd->tid_pg_list;
- tidlist = (u16 *) &pagep[dd->rcvtidcnt];
- pagep += tidoff;
- tidlist += tidoff;
-
- memset(tidmap, 0, sizeof(tidmap));
- /* before decrement; chip actual # */
- ntids = tidcnt;
- tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
- dd->rcvtidbase +
- ctxttid * sizeof(*tidbase));
-
- /* virtual address of first page in transfer */
- vaddr = ti->tidvaddr;
- if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
- cnt * PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- ret = qib_get_user_pages(vaddr, cnt, pagep);
- if (ret) {
- /*
- * if (ret == -EBUSY)
- * We can't continue because the pagep array won't be
- * initialized. This should never happen,
- * unless perhaps the user has mpin'ed the pages
- * themselves.
- */
- qib_devinfo(
- dd->pcidev,
- "Failed to lock addr %p, %u pages: errno %d\n",
- (void *) vaddr, cnt, -ret);
- goto done;
- }
- for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
- for (; ntids--; tid++) {
- if (tid == tidcnt)
- tid = 0;
- if (!dd->pageshadow[ctxttid + tid])
- break;
- }
- if (ntids < 0) {
- /*
- * Oops, wrapped all the way through their TIDs,
- * and didn't have enough free; see comments at
- * start of routine
- */
- i--; /* last tidlist[i] not filled in */
- ret = -ENOMEM;
- break;
- }
- tidlist[i] = tid + tidoff;
- /* we "know" system pages and TID pages are same size */
- dd->pageshadow[ctxttid + tid] = pagep[i];
- dd->physshadow[ctxttid + tid] =
- qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- /*
- * don't need atomic or it's overhead
- */
- __set_bit(tid, tidmap);
- physaddr = dd->physshadow[ctxttid + tid];
- /* PERFORMANCE: below should almost certainly be cached */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED, physaddr);
- /*
- * don't check this tid in qib_ctxtshadow, since we
- * just filled it in; start with the next one.
- */
- tid++;
- }
-
- if (ret) {
- u32 limit;
-cleanup:
- /* jump here if copy out of updated info failed... */
- /* same code that's in qib_free_tid() */
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit((const unsigned long *)tidmap, limit);
- for (; tid < limit; tid++) {
- if (!test_bit(tid, tidmap))
- continue;
- if (dd->pageshadow[ctxttid + tid]) {
- dma_addr_t phys;
-
- phys = dd->physshadow[ctxttid + tid];
- dd->physshadow[ctxttid + tid] = dd->tidinvalid;
- /* PERFORMANCE: below should almost certainly
- * be cached
- */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED,
- dd->tidinvalid);
- pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- dd->pageshadow[ctxttid + tid] = NULL;
- }
- }
- qib_release_user_pages(pagep, cnt);
- } else {
- /*
- * Copy the updated array, with qib_tid's filled in, back
- * to user. Since we did the copy in already, this "should
- * never fail" If it does, we have to clean up...
- */
- if (copy_to_user((void __user *)
- (unsigned long) ti->tidlist,
- tidlist, cnt * sizeof(*tidlist))) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
- tidmap, sizeof(tidmap))) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (tid == tidcnt)
- tid = 0;
- if (!rcd->subctxt_cnt)
- rcd->tidcursor = tid;
- else
- tidcursor_fp(fp) = tid;
- }
-
-done:
- return ret;
-}
-
-/**
- * qib_tid_free - free a context TID
- * @rcd: the context
- * @subctxt: the subcontext
- * @ti: the TID info
- *
- * right now we are unlocking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance. We check that the TID is in range for this context
- * but otherwise don't check validity; if user has an error and
- * frees the wrong tid, it's only their own data that can thereby
- * be corrupted. We do check that the TID was in use, for sanity
- * We always use our idea of the saved address, not the address that
- * they pass in to us.
- */
-static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
- const struct qib_tid_info *ti)
-{
- int ret = 0;
- u32 tid, ctxttid, cnt, limit, tidcnt;
- struct qib_devdata *dd = rcd->dd;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
-
- if (!dd->pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
- sizeof(tidmap))) {
- ret = -EFAULT;
- goto done;
- }
-
- ctxttid = rcd->ctxt * dd->rcvtidcnt;
- if (!rcd->subctxt_cnt)
- tidcnt = dd->rcvtidcnt;
- else if (!subctxt) {
- tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
- (dd->rcvtidcnt % rcd->subctxt_cnt);
- ctxttid += dd->rcvtidcnt - tidcnt;
- } else {
- tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
- ctxttid += tidcnt * (subctxt - 1);
- }
- tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxttid * sizeof(*tidbase));
-
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit(tidmap, limit);
- for (cnt = 0; tid < limit; tid++) {
- /*
- * small optimization; if we detect a run of 3 or so without
- * any set, use find_first_bit again. That's mainly to
- * accelerate the case where we wrapped, so we have some at
- * the beginning, and some at the end, and a big gap
- * in the middle.
- */
- if (!test_bit(tid, tidmap))
- continue;
- cnt++;
- if (dd->pageshadow[ctxttid + tid]) {
- struct page *p;
- dma_addr_t phys;
-
- p = dd->pageshadow[ctxttid + tid];
- dd->pageshadow[ctxttid + tid] = NULL;
- phys = dd->physshadow[ctxttid + tid];
- dd->physshadow[ctxttid + tid] = dd->tidinvalid;
- /* PERFORMANCE: below should almost certainly be
- * cached
- */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
- pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- qib_release_user_pages(&p, 1);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_set_part_key - set a partition key
- * @rcd: the context
- * @key: the key
- *
- * We can have up to 4 active at a time (other than the default, which is
- * always allowed). This is somewhat tricky, since multiple contexts may set
- * the same key, so we reference count them, and clean up at exit. All 4
- * partition keys are packed into a single qlogic_ib register. It's an
- * error for a process to set the same pkey multiple times. We provide no
- * mechanism to de-allocate a pkey at this time, we may eventually need to
- * do that. I've used the atomic operations, and no locking, and only make
- * a single pass through what's available. This should be more than
- * adequate for some time. I'll think about spinlocks or the like if and as
- * it's necessary.
- */
-static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
-{
- struct qib_pportdata *ppd = rcd->ppd;
- int i, any = 0, pidx = -1;
- u16 lkey = key & 0x7FFF;
- int ret;
-
- if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
- /* nothing to do; this key always valid */
- ret = 0;
- goto bail;
- }
-
- if (!lkey) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Set the full membership bit, because it has to be
- * set in the register or the packet, and it seems
- * cleaner to set in the register than to force all
- * callers to set it.
- */
- key |= 0x8000;
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- if (!rcd->pkeys[i] && pidx == -1)
- pidx = i;
- if (rcd->pkeys[i] == key) {
- ret = -EEXIST;
- goto bail;
- }
- }
- if (pidx == -1) {
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i]) {
- any++;
- continue;
- }
- if (ppd->pkeys[i] == key) {
- atomic_t *pkrefs = &ppd->pkeyrefs[i];
-
- if (atomic_inc_return(pkrefs) > 1) {
- rcd->pkeys[pidx] = key;
- ret = 0;
- goto bail;
- } else {
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- any++;
- }
- }
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
- /*
- * It makes no sense to have both the limited and
- * full membership PKEY set at the same time since
- * the unlimited one will disable the limited one.
- */
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i] &&
- atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
- rcd->pkeys[pidx] = key;
- ppd->pkeys[i] = key;
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
- ret = 0;
- goto bail;
- }
- }
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-/**
- * qib_manage_rcvq - manage a context's receive queue
- * @rcd: the context
- * @subctxt: the subcontext
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the context, for use in queue
- * overflow conditions. start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
- int start_stop)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned int rcvctrl_op;
-
- if (subctxt)
- goto bail;
- /* atomically clear receive enable ctxt. */
- if (start_stop) {
- /*
- * On enable, force in-memory copy of the tail register to
- * 0, so that protocol code doesn't have to worry about
- * whether or not the chip has yet updated the in-memory
- * copy or not on return from the system call. The chip
- * always resets it's tail register back to 0 on a
- * transition from disabled to enabled.
- */
- if (rcd->rcvhdrtail_kvaddr)
- qib_clear_rcvhdrtail(rcd);
- rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
- } else
- rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
- dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
- /* always; new head should be equal to new tail; see above */
-bail:
- return 0;
-}
-
-static void qib_clean_part_key(struct qib_ctxtdata *rcd,
- struct qib_devdata *dd)
-{
- int i, j, pchanged = 0;
- u64 oldpkey;
- struct qib_pportdata *ppd = rcd->ppd;
-
- /* for debugging only */
- oldpkey = (u64) ppd->pkeys[0] |
- ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- if (!rcd->pkeys[i])
- continue;
- for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
- /* check for match independent of the global bit */
- if ((ppd->pkeys[j] & 0x7fff) !=
- (rcd->pkeys[i] & 0x7fff))
- continue;
- if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
- ppd->pkeys[j] = 0;
- pchanged++;
- }
- break;
- }
- rcd->pkeys[i] = 0;
- }
- if (pchanged)
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
-}
-
-/* common code for the mappings on dma_alloc_coherent mem */
-static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
- unsigned len, void *kvaddr, u32 write_ok, char *what)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned long pfn;
- int ret;
-
- if ((vma->vm_end - vma->vm_start) > len) {
- qib_devinfo(dd->pcidev,
- "FAIL on %s: len %lx > %x\n", what,
- vma->vm_end - vma->vm_start, len);
- ret = -EFAULT;
- goto bail;
- }
-
- /*
- * shared context user code requires rcvhdrq mapped r/w, others
- * only allowed readonly mapping.
- */
- if (!write_ok) {
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "%s must be mapped readonly\n", what);
- ret = -EPERM;
- goto bail;
- }
-
- /* don't allow them to later change with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
- }
-
- pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, vma->vm_start, pfn,
- len, vma->vm_page_prot);
- if (ret)
- qib_devinfo(dd->pcidev,
- "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
- what, rcd->ctxt, pfn, len, ret);
-bail:
- return ret;
-}
-
-static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
- u64 ureg)
-{
- unsigned long phys;
- unsigned long sz;
- int ret;
-
- /*
- * This is real hardware, so use io_remap. This is the mechanism
- * for the user process to update the head registers for their ctxt
- * in the chip.
- */
- sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
- if ((vma->vm_end - vma->vm_start) > sz) {
- qib_devinfo(dd->pcidev,
- "FAIL mmap userreg: reqlen %lx > PAGE\n",
- vma->vm_end - vma->vm_start);
- ret = -EFAULT;
- } else {
- phys = dd->physaddr + ureg;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- ret = io_remap_pfn_range(vma, vma->vm_start,
- phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- }
- return ret;
-}
-
-static int mmap_piobufs(struct vm_area_struct *vma,
- struct qib_devdata *dd,
- struct qib_ctxtdata *rcd,
- unsigned piobufs, unsigned piocnt)
-{
- unsigned long phys;
- int ret;
-
- /*
- * When we map the PIO buffers in the chip, we want to map them as
- * writeonly, no read possible; unfortunately, x86 doesn't allow
- * for this in hardware, but we still prevent users from asking
- * for it.
- */
- if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
- qib_devinfo(dd->pcidev,
- "FAIL mmap piobufs: reqlen %lx > PAGE\n",
- vma->vm_end - vma->vm_start);
- ret = -EINVAL;
- goto bail;
- }
-
- phys = dd->physaddr + piobufs;
-
-#if defined(__powerpc__)
- /* There isn't a generic way to specify writethrough mappings */
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
- pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
-#endif
-
- /*
- * don't allow them to later change to readable with mprotect (for when
- * not initially mapped readable, as is normally the case)
- */
- vma->vm_flags &= ~VM_MAYREAD;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-
- /* We used PAT if wc_cookie == 0 */
- if (!dd->wc_cookie)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-bail:
- return ret;
-}
-
-static int mmap_rcvegrbufs(struct vm_area_struct *vma,
- struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned long start, size;
- size_t total_size, i;
- unsigned long pfn;
- int ret;
-
- size = rcd->rcvegrbuf_size;
- total_size = rcd->rcvegrbuf_chunks * size;
- if ((vma->vm_end - vma->vm_start) > total_size) {
- qib_devinfo(dd->pcidev,
- "FAIL on egr bufs: reqlen %lx > actual %lx\n",
- vma->vm_end - vma->vm_start,
- (unsigned long) total_size);
- ret = -EINVAL;
- goto bail;
- }
-
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "Can't map eager buffers as writable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /* don't allow them to later change to writeable with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
-
- start = vma->vm_start;
-
- for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
- pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, start, pfn, size,
- vma->vm_page_prot);
- if (ret < 0)
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * qib_file_vma_fault - handle a VMA page fault.
- */
-static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *page;
-
- page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
- if (!page)
- return VM_FAULT_SIGBUS;
-
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static const struct vm_operations_struct qib_file_vm_ops = {
- .fault = qib_file_vma_fault,
-};
-
-static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
- struct qib_ctxtdata *rcd, unsigned subctxt)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned subctxt_cnt;
- unsigned long len;
- void *addr;
- size_t size;
- int ret = 0;
-
- subctxt_cnt = rcd->subctxt_cnt;
- size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
-
- /*
- * Each process has all the subctxt uregbase, rcvhdrq, and
- * rcvegrbufs mmapped - as an array for all the processes,
- * and also separately for this process.
- */
- if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
- addr = rcd->subctxt_uregbase;
- size = PAGE_SIZE * subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
- addr = rcd->subctxt_rcvhdr_base;
- size = rcd->rcvhdrq_size * subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
- addr = rcd->subctxt_rcvegrbuf;
- size *= subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
- PAGE_SIZE * subctxt)) {
- addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
- size = PAGE_SIZE;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
- rcd->rcvhdrq_size * subctxt)) {
- addr = rcd->subctxt_rcvhdr_base +
- rcd->rcvhdrq_size * subctxt;
- size = rcd->rcvhdrq_size;
- } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
- addr = rcd->user_event_mask;
- size = PAGE_SIZE;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
- size * subctxt)) {
- addr = rcd->subctxt_rcvegrbuf + size * subctxt;
- /* rcvegrbufs are read-only on the slave */
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "Can't map eager buffers as writable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /*
- * Don't allow permission to later change to writeable
- * with mprotect.
- */
- vma->vm_flags &= ~VM_MAYWRITE;
- } else
- goto bail;
- len = vma->vm_end - vma->vm_start;
- if (len > size) {
- ret = -EINVAL;
- goto bail;
- }
-
- vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
- vma->vm_ops = &qib_file_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * qib_mmapf - mmap various structures into user space
- * @fp: the file pointer
- * @vma: the VM area
- *
- * We use this to have a shared buffer between the kernel and the user code
- * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
- * buffers in the chip. We have the open and close entries so we can bump
- * the ref count and keep the driver from being unloaded while still mapped.
- */
-static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
-{
- struct qib_ctxtdata *rcd;
- struct qib_devdata *dd;
- u64 pgaddr, ureg;
- unsigned piobufs, piocnt;
- int ret, match = 1;
-
- rcd = ctxt_fp(fp);
- if (!rcd || !(vma->vm_flags & VM_SHARED)) {
- ret = -EINVAL;
- goto bail;
- }
- dd = rcd->dd;
-
- /*
- * This is the qib_do_user_init() code, mapping the shared buffers
- * and per-context user registers into the user process. The address
- * referred to by vm_pgoff is the file offset passed via mmap().
- * For shared contexts, this is the kernel vmalloc() address of the
- * pages to share with the master.
- * For non-shared or master ctxts, this is a physical address.
- * We only do one mmap for each space mapped.
- */
- pgaddr = vma->vm_pgoff << PAGE_SHIFT;
-
- /*
- * Check for 0 in case one of the allocations failed, but user
- * called mmap anyway.
- */
- if (!pgaddr) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Physical addresses must fit in 40 bits for our hardware.
- * Check for kernel virtual addresses first, anything else must
- * match a HW or memory address.
- */
- ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
- if (ret) {
- if (ret > 0)
- ret = 0;
- goto bail;
- }
-
- ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
- if (!rcd->subctxt_cnt) {
- /* ctxt is not shared */
- piocnt = rcd->piocnt;
- piobufs = rcd->piobufs;
- } else if (!subctxt_fp(fp)) {
- /* caller is the master */
- piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
- (rcd->piocnt % rcd->subctxt_cnt);
- piobufs = rcd->piobufs +
- dd->palign * (rcd->piocnt - piocnt);
- } else {
- unsigned slave = subctxt_fp(fp) - 1;
-
- /* caller is a slave */
- piocnt = rcd->piocnt / rcd->subctxt_cnt;
- piobufs = rcd->piobufs + dd->palign * piocnt * slave;
- }
-
- if (pgaddr == ureg)
- ret = mmap_ureg(vma, dd, ureg);
- else if (pgaddr == piobufs)
- ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
- else if (pgaddr == dd->pioavailregs_phys)
- /* in-memory copy of pioavail registers */
- ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
- (void *) dd->pioavailregs_dma, 0,
- "pioavail registers");
- else if (pgaddr == rcd->rcvegr_phys)
- ret = mmap_rcvegrbufs(vma, rcd);
- else if (pgaddr == (u64) rcd->rcvhdrq_phys)
- /*
- * The rcvhdrq itself; multiple pages, contiguous
- * from an i/o perspective. Shared contexts need
- * to map r/w, so we allow writing.
- */
- ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
- rcd->rcvhdrq, 1, "rcvhdrq");
- else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
- /* in-memory copy of rcvhdrq tail register */
- ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
- rcd->rcvhdrtail_kvaddr, 0,
- "rcvhdrq tail");
- else
- match = 0;
- if (!match)
- ret = -EINVAL;
-
- vma->vm_private_data = NULL;
-
- if (ret < 0)
- qib_devinfo(dd->pcidev,
- "mmap Failure %d: off %llx len %lx\n",
- -ret, (unsigned long long)pgaddr,
- vma->vm_end - vma->vm_start);
-bail:
- return ret;
-}
-
-static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
-
- poll_wait(fp, &rcd->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (rcd->urgent != rcd->urgent_poll) {
- pollflag = POLLIN | POLLRDNORM;
- rcd->urgent_poll = rcd->urgent;
- } else {
- pollflag = 0;
- set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
- }
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
-
- poll_wait(fp, &rcd->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (dd->f_hdrqempty(rcd)) {
- set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
- pollflag = 0;
- } else
- pollflag = POLLIN | POLLRDNORM;
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
-{
- struct qib_ctxtdata *rcd;
- unsigned pollflag;
-
- rcd = ctxt_fp(fp);
- if (!rcd)
- pollflag = POLLERR;
- else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
- pollflag = qib_poll_urgent(rcd, fp, pt);
- else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
- pollflag = qib_poll_next(rcd, fp, pt);
- else /* invalid */
- pollflag = POLLERR;
-
- return pollflag;
-}
-
-static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
-{
- struct qib_filedata *fd = fp->private_data;
- const unsigned int weight = cpumask_weight(&current->cpus_allowed);
- const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
- int local_cpu;
-
- /*
- * If process has NOT already set it's affinity, select and
- * reserve a processor for it on the local NUMA node.
- */
- if ((weight >= qib_cpulist_count) &&
- (cpumask_weight(local_mask) <= qib_cpulist_count)) {
- for_each_cpu(local_cpu, local_mask)
- if (!test_and_set_bit(local_cpu, qib_cpulist)) {
- fd->rec_cpu_num = local_cpu;
- return;
- }
- }
-
- /*
- * If process has NOT already set it's affinity, select and
- * reserve a processor for it, as a rendevous for all
- * users of the driver. If they don't actually later
- * set affinity to this cpu, or set it to some other cpu,
- * it just means that sooner or later we don't recommend
- * a cpu, and let the scheduler do it's best.
- */
- if (weight >= qib_cpulist_count) {
- int cpu;
-
- cpu = find_first_zero_bit(qib_cpulist,
- qib_cpulist_count);
- if (cpu == qib_cpulist_count)
- qib_dev_err(dd,
- "no cpus avail for affinity PID %u\n",
- current->pid);
- else {
- __set_bit(cpu, qib_cpulist);
- fd->rec_cpu_num = cpu;
- }
- }
-}
-
-/*
- * Check that userland and driver are compatible for subcontexts.
- */
-static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
-{
- /* this code is written long-hand for clarity */
- if (QIB_USER_SWMAJOR != user_swmajor) {
- /* no promise of compatibility if major mismatch */
- return 0;
- }
- if (QIB_USER_SWMAJOR == 1) {
- switch (QIB_USER_SWMINOR) {
- case 0:
- case 1:
- case 2:
- /* no subctxt implementation so cannot be compatible */
- return 0;
- case 3:
- /* 3 is only compatible with itself */
- return user_swminor == 3;
- default:
- /* >= 4 are compatible (or are expected to be) */
- return user_swminor <= QIB_USER_SWMINOR;
- }
- }
- /* make no promises yet for future major versions */
- return 0;
-}
-
-static int init_subctxts(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd,
- const struct qib_user_info *uinfo)
-{
- int ret = 0;
- unsigned num_subctxts;
- size_t size;
-
- /*
- * If the user is requesting zero subctxts,
- * skip the subctxt allocation.
- */
- if (uinfo->spu_subctxt_cnt <= 0)
- goto bail;
- num_subctxts = uinfo->spu_subctxt_cnt;
-
- /* Check for subctxt compatibility */
- if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
- uinfo->spu_userversion & 0xffff)) {
- qib_devinfo(dd->pcidev,
- "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
- (int) (uinfo->spu_userversion >> 16),
- (int) (uinfo->spu_userversion & 0xffff),
- QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
- goto bail;
- }
- if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
- ret = -EINVAL;
- goto bail;
- }
-
- rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
- if (!rcd->subctxt_uregbase) {
- ret = -ENOMEM;
- goto bail;
- }
- /* Note: rcd->rcvhdrq_size isn't initialized yet. */
- size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE) * num_subctxts;
- rcd->subctxt_rcvhdr_base = vmalloc_user(size);
- if (!rcd->subctxt_rcvhdr_base) {
- ret = -ENOMEM;
- goto bail_ureg;
- }
-
- rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
- rcd->rcvegrbuf_size *
- num_subctxts);
- if (!rcd->subctxt_rcvegrbuf) {
- ret = -ENOMEM;
- goto bail_rhdr;
- }
-
- rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
- rcd->subctxt_id = uinfo->spu_subctxt_id;
- rcd->active_slaves = 1;
- rcd->redirect_seq_cnt = 1;
- set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
- goto bail;
-
-bail_rhdr:
- vfree(rcd->subctxt_rcvhdr_base);
-bail_ureg:
- vfree(rcd->subctxt_uregbase);
- rcd->subctxt_uregbase = NULL;
-bail:
- return ret;
-}
-
-static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
- struct file *fp, const struct qib_user_info *uinfo)
-{
- struct qib_filedata *fd = fp->private_data;
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- void *ptmp = NULL;
- int ret;
- int numa_id;
-
- assign_ctxt_affinity(fp, dd);
-
- numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
- cpu_to_node(fd->rec_cpu_num) :
- numa_node_id()) : dd->assigned_node_id;
-
- rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
-
- /*
- * Allocate memory for use in qib_tid_update() at open to
- * reduce cost of expected send setup per message segment
- */
- if (rcd)
- ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
- dd->rcvtidcnt * sizeof(struct page **),
- GFP_KERNEL);
-
- if (!rcd || !ptmp) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata memory, failing open\n");
- ret = -ENOMEM;
- goto bailerr;
- }
- rcd->userversion = uinfo->spu_userversion;
- ret = init_subctxts(dd, rcd, uinfo);
- if (ret)
- goto bailerr;
- rcd->tid_pg_list = ptmp;
- rcd->pid = current->pid;
- init_waitqueue_head(&dd->rcd[ctxt]->wait);
- strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
- ctxt_fp(fp) = rcd;
- qib_stats.sps_ctxts++;
- dd->freectxts--;
- ret = 0;
- goto bail;
-
-bailerr:
- if (fd->rec_cpu_num != -1)
- __clear_bit(fd->rec_cpu_num, qib_cpulist);
-
- dd->rcd[ctxt] = NULL;
- kfree(rcd);
- kfree(ptmp);
-bail:
- return ret;
-}
-
-static inline int usable(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
-
- return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
- (ppd->lflags & QIBL_LINKACTIVE);
-}
-
-/*
- * Select a context on the given device, either using a requested port
- * or the port based on the context number.
- */
-static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
- const struct qib_user_info *uinfo)
-{
- struct qib_pportdata *ppd = NULL;
- int ret, ctxt;
-
- if (port) {
- if (!usable(dd->pport + port - 1)) {
- ret = -ENETDOWN;
- goto done;
- } else
- ppd = dd->pport + port - 1;
- }
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
- ctxt++)
- ;
- if (ctxt == dd->cfgctxts) {
- ret = -EBUSY;
- goto done;
- }
- if (!ppd) {
- u32 pidx = ctxt % dd->num_pports;
-
- if (usable(dd->pport + pidx))
- ppd = dd->pport + pidx;
- else {
- for (pidx = 0; pidx < dd->num_pports && !ppd;
- pidx++)
- if (usable(dd->pport + pidx))
- ppd = dd->pport + pidx;
- }
- }
- ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
-done:
- return ret;
-}
-
-static int find_free_ctxt(int unit, struct file *fp,
- const struct qib_user_info *uinfo)
-{
- struct qib_devdata *dd = qib_lookup(unit);
- int ret;
-
- if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
- ret = -ENODEV;
- else
- ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
-
- return ret;
-}
-
-static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
- unsigned alg)
-{
- struct qib_devdata *udd = NULL;
- int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
- u32 port = uinfo->spu_port, ctxt;
-
- devmax = qib_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (nup == 0) {
- ret = -ENETDOWN;
- goto done;
- }
-
- if (alg == QIB_PORT_ALG_ACROSS) {
- unsigned inuse = ~0U;
-
- /* find device (with ACTIVE ports) with fewest ctxts in use */
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
- unsigned cused = 0, cfree = 0, pusable = 0;
-
- if (!dd)
- continue;
- if (port && port <= dd->num_pports &&
- usable(dd->pport + port - 1))
- pusable = 1;
- else
- for (i = 0; i < dd->num_pports; i++)
- if (usable(dd->pport + i))
- pusable++;
- if (!pusable)
- continue;
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
- ctxt++)
- if (dd->rcd[ctxt])
- cused++;
- else
- cfree++;
- if (cfree && cused < inuse) {
- udd = dd;
- inuse = cused;
- }
- }
- if (udd) {
- ret = choose_port_ctxt(fp, udd, port, uinfo);
- goto done;
- }
- } else {
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- if (dd) {
- ret = choose_port_ctxt(fp, dd, port, uinfo);
- if (!ret)
- goto done;
- if (ret == -EBUSY)
- dusable++;
- }
- }
- }
- ret = dusable ? -EBUSY : -ENETDOWN;
-
-done:
- return ret;
-}
-
-static int find_shared_ctxt(struct file *fp,
- const struct qib_user_info *uinfo)
-{
- int devmax, ndev, i;
- int ret = 0;
-
- devmax = qib_count_units(NULL, NULL);
-
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- /* device portion of usable() */
- if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
- continue;
- for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- /* Skip ctxts which are not yet open */
- if (!rcd || !rcd->cnt)
- continue;
- /* Skip ctxt if it doesn't match the requested one */
- if (rcd->subctxt_id != uinfo->spu_subctxt_id)
- continue;
- /* Verify the sharing process matches the master */
- if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
- rcd->userversion != uinfo->spu_userversion ||
- rcd->cnt >= rcd->subctxt_cnt) {
- ret = -EINVAL;
- goto done;
- }
- ctxt_fp(fp) = rcd;
- subctxt_fp(fp) = rcd->cnt++;
- rcd->subpid[subctxt_fp(fp)] = current->pid;
- tidcursor_fp(fp) = 0;
- rcd->active_slaves |= 1 << subctxt_fp(fp);
- ret = 1;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-static int qib_open(struct inode *in, struct file *fp)
-{
- /* The real work is performed later in qib_assign_ctxt() */
- fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
- if (fp->private_data) /* no cpu affinity by default */
- ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
- return fp->private_data ? 0 : -ENOMEM;
-}
-
-static int find_hca(unsigned int cpu, int *unit)
-{
- int ret = 0, devmax, npresent, nup, ndev;
-
- *unit = -1;
-
- devmax = qib_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (!nup) {
- ret = -ENETDOWN;
- goto done;
- }
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- if (dd) {
- if (pcibus_to_node(dd->pcidev->bus) < 0) {
- ret = -EINVAL;
- goto done;
- }
- if (cpu_to_node(cpu) ==
- pcibus_to_node(dd->pcidev->bus)) {
- *unit = ndev;
- goto done;
- }
- }
- }
-done:
- return ret;
-}
-
-static int do_qib_user_sdma_queue_create(struct file *fp)
-{
- struct qib_filedata *fd = fp->private_data;
- struct qib_ctxtdata *rcd = fd->rcd;
- struct qib_devdata *dd = rcd->dd;
-
- if (dd->flags & QIB_HAS_SEND_DMA) {
-
- fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
- dd->unit,
- rcd->ctxt,
- fd->subctxt);
- if (!fd->pq)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/*
- * Get ctxt early, so can set affinity prior to memory allocation.
- */
-static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
-{
- int ret;
- int i_minor;
- unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
-
- /* Check to be sure we haven't already initialized this file */
- if (ctxt_fp(fp)) {
- ret = -EINVAL;
- goto done;
- }
-
- /* for now, if major version is different, bail */
- swmajor = uinfo->spu_userversion >> 16;
- if (swmajor != QIB_USER_SWMAJOR) {
- ret = -ENODEV;
- goto done;
- }
-
- swminor = uinfo->spu_userversion & 0xffff;
-
- if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
- alg = uinfo->spu_port_alg;
-
- mutex_lock(&qib_mutex);
-
- if (qib_compatible_subctxts(swmajor, swminor) &&
- uinfo->spu_subctxt_cnt) {
- ret = find_shared_ctxt(fp, uinfo);
- if (ret > 0) {
- ret = do_qib_user_sdma_queue_create(fp);
- if (!ret)
- assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
- goto done_ok;
- }
- }
-
- i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
- if (i_minor)
- ret = find_free_ctxt(i_minor - 1, fp, uinfo);
- else {
- int unit;
- const unsigned int cpu = cpumask_first(&current->cpus_allowed);
- const unsigned int weight =
- cpumask_weight(&current->cpus_allowed);
-
- if (weight == 1 && !test_bit(cpu, qib_cpulist))
- if (!find_hca(cpu, &unit) && unit >= 0)
- if (!find_free_ctxt(unit, fp, uinfo)) {
- ret = 0;
- goto done_chk_sdma;
- }
- ret = get_a_ctxt(fp, uinfo, alg);
- }
-
-done_chk_sdma:
- if (!ret)
- ret = do_qib_user_sdma_queue_create(fp);
-done_ok:
- mutex_unlock(&qib_mutex);
-
-done:
- return ret;
-}
-
-
-static int qib_do_user_init(struct file *fp,
- const struct qib_user_info *uinfo)
-{
- int ret;
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- struct qib_devdata *dd;
- unsigned uctxt;
-
- /* Subctxts don't need to initialize anything since master did it. */
- if (subctxt_fp(fp)) {
- ret = wait_event_interruptible(rcd->wait,
- !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
- goto bail;
- }
-
- dd = rcd->dd;
-
- /* some ctxts may get extra buffers, calculate that here */
- uctxt = rcd->ctxt - dd->first_user_ctxt;
- if (uctxt < dd->ctxts_extrabuf) {
- rcd->piocnt = dd->pbufsctxt + 1;
- rcd->pio_base = rcd->piocnt * uctxt;
- } else {
- rcd->piocnt = dd->pbufsctxt;
- rcd->pio_base = rcd->piocnt * uctxt +
- dd->ctxts_extrabuf;
- }
-
- /*
- * All user buffers are 2KB buffers. If we ever support
- * giving 4KB buffers to user processes, this will need some
- * work. Can't use piobufbase directly, because it has
- * both 2K and 4K buffer base values. So check and handle.
- */
- if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
- if (rcd->pio_base >= dd->piobcnt2k) {
- qib_dev_err(dd,
- "%u:ctxt%u: no 2KB buffers available\n",
- dd->unit, rcd->ctxt);
- ret = -ENOBUFS;
- goto bail;
- }
- rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
- qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
- rcd->ctxt, rcd->piocnt);
- }
-
- rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
- qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
- TXCHK_CHG_TYPE_USER, rcd);
- /*
- * try to ensure that processes start up with consistent avail update
- * for their own range, at least. If system very quiet, it might
- * have the in-memory copy out of date at startup for this range of
- * buffers, when a context gets re-used. Do after the chg_pioavail
- * and before the rest of setup, so it's "almost certain" the dma
- * will have occurred (can't 100% guarantee, but should be many
- * decimals of 9s, with this ordering), given how much else happens
- * after this.
- */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
-
- /*
- * Now allocate the rcvhdr Q and eager TIDs; skip the TID
- * array for time being. If rcd->ctxt > chip-supported,
- * we need to do extra stuff here to handle by handling overflow
- * through ctxt 0, someday
- */
- ret = qib_create_rcvhdrq(dd, rcd);
- if (!ret)
- ret = qib_setup_eagerbufs(rcd);
- if (ret)
- goto bail_pio;
-
- rcd->tidcursor = 0; /* start at beginning after open */
-
- /* initialize poll variables... */
- rcd->urgent = 0;
- rcd->urgent_poll = 0;
-
- /*
- * Now enable the ctxt for receive.
- * For chips that are set to DMA the tail register to memory
- * when they change (and when the update bit transitions from
- * 0 to 1. So for those chips, we turn it off and then back on.
- * This will (very briefly) affect any other open ctxts, but the
- * duration is very short, and therefore isn't an issue. We
- * explicitly set the in-memory tail copy to 0 beforehand, so we
- * don't have to wait to be sure the DMA update has happened
- * (chip resets head/tail to 0 on transition to enable).
- */
- if (rcd->rcvhdrtail_kvaddr)
- qib_clear_rcvhdrtail(rcd);
-
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
- rcd->ctxt);
-
- /* Notify any waiting slaves */
- if (rcd->subctxt_cnt) {
- clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
- wake_up(&rcd->wait);
- }
- return 0;
-
-bail_pio:
- qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
- TXCHK_CHG_TYPE_KERN, rcd);
-bail:
- return ret;
-}
-
-/**
- * unlock_exptid - unlock any expected TID entries context still had in use
- * @rcd: ctxt
- *
- * We don't actually update the chip here, because we do a bulk update
- * below, using f_clear_tids.
- */
-static void unlock_expected_tids(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
- int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
-
- for (i = ctxt_tidbase; i < maxtid; i++) {
- struct page *p = dd->pageshadow[i];
- dma_addr_t phys;
-
- if (!p)
- continue;
-
- phys = dd->physshadow[i];
- dd->physshadow[i] = dd->tidinvalid;
- dd->pageshadow[i] = NULL;
- pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- qib_release_user_pages(&p, 1);
- cnt++;
- }
-}
-
-static int qib_close(struct inode *in, struct file *fp)
-{
- int ret = 0;
- struct qib_filedata *fd;
- struct qib_ctxtdata *rcd;
- struct qib_devdata *dd;
- unsigned long flags;
- unsigned ctxt;
- pid_t pid;
-
- mutex_lock(&qib_mutex);
-
- fd = fp->private_data;
- fp->private_data = NULL;
- rcd = fd->rcd;
- if (!rcd) {
- mutex_unlock(&qib_mutex);
- goto bail;
- }
-
- dd = rcd->dd;
-
- /* ensure all pio buffer writes in progress are flushed */
- qib_flush_wc();
-
- /* drain user sdma queue */
- if (fd->pq) {
- qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
- qib_user_sdma_queue_destroy(fd->pq);
- }
-
- if (fd->rec_cpu_num != -1)
- __clear_bit(fd->rec_cpu_num, qib_cpulist);
-
- if (--rcd->cnt) {
- /*
- * XXX If the master closes the context before the slave(s),
- * revoke the mmap for the eager receive queue so
- * the slave(s) don't wait for receive data forever.
- */
- rcd->active_slaves &= ~(1 << fd->subctxt);
- rcd->subpid[fd->subctxt] = 0;
- mutex_unlock(&qib_mutex);
- goto bail;
- }
-
- /* early; no interrupt users after this */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- ctxt = rcd->ctxt;
- dd->rcd[ctxt] = NULL;
- pid = rcd->pid;
- rcd->pid = 0;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- if (rcd->rcvwait_to || rcd->piowait_to ||
- rcd->rcvnowait || rcd->pionowait) {
- rcd->rcvwait_to = 0;
- rcd->piowait_to = 0;
- rcd->rcvnowait = 0;
- rcd->pionowait = 0;
- }
- if (rcd->flag)
- rcd->flag = 0;
-
- if (dd->kregbase) {
- /* atomically clear receive enable ctxt and intr avail. */
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
-
- /* clean up the pkeys for this ctxt user */
- qib_clean_part_key(rcd, dd);
- qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
- qib_chg_pioavailkernel(dd, rcd->pio_base,
- rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
-
- dd->f_clear_tids(dd, rcd);
-
- if (dd->pageshadow)
- unlock_expected_tids(rcd);
- qib_stats.sps_ctxts--;
- dd->freectxts++;
- }
-
- mutex_unlock(&qib_mutex);
- qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
-
-bail:
- kfree(fd);
- return ret;
-}
-
-static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
-{
- struct qib_ctxt_info info;
- int ret;
- size_t sz;
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- struct qib_filedata *fd;
-
- fd = fp->private_data;
-
- info.num_active = qib_count_active_units();
- info.unit = rcd->dd->unit;
- info.port = rcd->ppd->port;
- info.ctxt = rcd->ctxt;
- info.subctxt = subctxt_fp(fp);
- /* Number of user ctxts available for this device. */
- info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
- info.num_subctxts = rcd->subctxt_cnt;
- info.rec_cpu = fd->rec_cpu_num;
- sz = sizeof(info);
-
- if (copy_to_user(uinfo, &info, sz)) {
- ret = -EFAULT;
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
- u32 __user *inflightp)
-{
- const u32 val = qib_user_sdma_inflight_counter(pq);
-
- if (put_user(val, inflightp))
- return -EFAULT;
-
- return 0;
-}
-
-static int qib_sdma_get_complete(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- u32 __user *completep)
-{
- u32 val;
- int err;
-
- if (!pq)
- return -EINVAL;
-
- err = qib_user_sdma_make_progress(ppd, pq);
- if (err < 0)
- return err;
-
- val = qib_user_sdma_complete_counter(pq);
- if (put_user(val, completep))
- return -EFAULT;
-
- return 0;
-}
-
-static int disarm_req_delay(struct qib_ctxtdata *rcd)
-{
- int ret = 0;
-
- if (!usable(rcd->ppd)) {
- int i;
- /*
- * if link is down, or otherwise not usable, delay
- * the caller up to 30 seconds, so we don't thrash
- * in trying to get the chip back to ACTIVE, and
- * set flag so they make the call again.
- */
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- for (i = 0; !usable(rcd->ppd) && i < 300; i++)
- msleep(100);
- ret = -ENETDOWN;
- }
- return ret;
-}
-
-/*
- * Find all user contexts in use, and set the specified bit in their
- * event mask.
- * See also find_ctxt() for a similar use, that is specific to send buffers.
- */
-int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
-{
- struct qib_ctxtdata *rcd;
- unsigned ctxt;
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
- for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
- ctxt++) {
- rcd = ppd->dd->rcd[ctxt];
- if (!rcd)
- continue;
- if (rcd->user_event_mask) {
- int i;
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(evtbit, &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(evtbit, &rcd->user_event_mask[i]);
- }
- ret = 1;
- break;
- }
- spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
-
- return ret;
-}
-
-/*
- * clear the event notifier events for this context.
- * For the DISARM_BUFS case, we also take action (this obsoletes
- * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
- * compatibility.
- * Other bits don't currently require actions, just atomically clear.
- * User process then performs actions appropriate to bit having been
- * set, if desired, and checks again in future.
- */
-static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
- unsigned long events)
-{
- int ret = 0, i;
-
- for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
- if (!test_bit(i, &events))
- continue;
- if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
- (void)qib_disarm_piobufs_ifneeded(rcd);
- ret = disarm_req_delay(rcd);
- } else
- clear_bit(i, &rcd->user_event_mask[subctxt]);
- }
- return ret;
-}
-
-static ssize_t qib_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- const struct qib_cmd __user *ucmd;
- struct qib_ctxtdata *rcd;
- const void __user *src;
- size_t consumed, copy = 0;
- struct qib_cmd cmd;
- ssize_t ret = 0;
- void *dest;
-
- if (count < sizeof(cmd.type)) {
- ret = -EINVAL;
- goto bail;
- }
-
- ucmd = (const struct qib_cmd __user *) data;
-
- if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed = sizeof(cmd.type);
-
- switch (cmd.type) {
- case QIB_CMD_ASSIGN_CTXT:
- case QIB_CMD_USER_INIT:
- copy = sizeof(cmd.cmd.user_info);
- dest = &cmd.cmd.user_info;
- src = &ucmd->cmd.user_info;
- break;
-
- case QIB_CMD_RECV_CTRL:
- copy = sizeof(cmd.cmd.recv_ctrl);
- dest = &cmd.cmd.recv_ctrl;
- src = &ucmd->cmd.recv_ctrl;
- break;
-
- case QIB_CMD_CTXT_INFO:
- copy = sizeof(cmd.cmd.ctxt_info);
- dest = &cmd.cmd.ctxt_info;
- src = &ucmd->cmd.ctxt_info;
- break;
-
- case QIB_CMD_TID_UPDATE:
- case QIB_CMD_TID_FREE:
- copy = sizeof(cmd.cmd.tid_info);
- dest = &cmd.cmd.tid_info;
- src = &ucmd->cmd.tid_info;
- break;
-
- case QIB_CMD_SET_PART_KEY:
- copy = sizeof(cmd.cmd.part_key);
- dest = &cmd.cmd.part_key;
- src = &ucmd->cmd.part_key;
- break;
-
- case QIB_CMD_DISARM_BUFS:
- case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
- copy = 0;
- src = NULL;
- dest = NULL;
- break;
-
- case QIB_CMD_POLL_TYPE:
- copy = sizeof(cmd.cmd.poll_type);
- dest = &cmd.cmd.poll_type;
- src = &ucmd->cmd.poll_type;
- break;
-
- case QIB_CMD_ARMLAUNCH_CTRL:
- copy = sizeof(cmd.cmd.armlaunch_ctrl);
- dest = &cmd.cmd.armlaunch_ctrl;
- src = &ucmd->cmd.armlaunch_ctrl;
- break;
-
- case QIB_CMD_SDMA_INFLIGHT:
- copy = sizeof(cmd.cmd.sdma_inflight);
- dest = &cmd.cmd.sdma_inflight;
- src = &ucmd->cmd.sdma_inflight;
- break;
-
- case QIB_CMD_SDMA_COMPLETE:
- copy = sizeof(cmd.cmd.sdma_complete);
- dest = &cmd.cmd.sdma_complete;
- src = &ucmd->cmd.sdma_complete;
- break;
-
- case QIB_CMD_ACK_EVENT:
- copy = sizeof(cmd.cmd.event_mask);
- dest = &cmd.cmd.event_mask;
- src = &ucmd->cmd.event_mask;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
-
- if (copy) {
- if ((count - consumed) < copy) {
- ret = -EINVAL;
- goto bail;
- }
- if (copy_from_user(dest, src, copy)) {
- ret = -EFAULT;
- goto bail;
- }
- consumed += copy;
- }
-
- rcd = ctxt_fp(fp);
- if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
- ret = -EINVAL;
- goto bail;
- }
-
- switch (cmd.type) {
- case QIB_CMD_ASSIGN_CTXT:
- ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- break;
-
- case QIB_CMD_USER_INIT:
- ret = qib_do_user_init(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- ret = qib_get_base_info(fp, (void __user *) (unsigned long)
- cmd.cmd.user_info.spu_base_info,
- cmd.cmd.user_info.spu_base_info_size);
- break;
-
- case QIB_CMD_RECV_CTRL:
- ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
- break;
-
- case QIB_CMD_CTXT_INFO:
- ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
- (unsigned long) cmd.cmd.ctxt_info);
- break;
-
- case QIB_CMD_TID_UPDATE:
- ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
- break;
-
- case QIB_CMD_TID_FREE:
- ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
- break;
-
- case QIB_CMD_SET_PART_KEY:
- ret = qib_set_part_key(rcd, cmd.cmd.part_key);
- break;
-
- case QIB_CMD_DISARM_BUFS:
- (void)qib_disarm_piobufs_ifneeded(rcd);
- ret = disarm_req_delay(rcd);
- break;
-
- case QIB_CMD_PIOAVAILUPD:
- qib_force_pio_avail_update(rcd->dd);
- break;
-
- case QIB_CMD_POLL_TYPE:
- rcd->poll_type = cmd.cmd.poll_type;
- break;
-
- case QIB_CMD_ARMLAUNCH_CTRL:
- rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
- break;
-
- case QIB_CMD_SDMA_INFLIGHT:
- ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_inflight);
- break;
-
- case QIB_CMD_SDMA_COMPLETE:
- ret = qib_sdma_get_complete(rcd->ppd,
- user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_complete);
- break;
-
- case QIB_CMD_ACK_EVENT:
- ret = qib_user_event_ack(rcd, subctxt_fp(fp),
- cmd.cmd.event_mask);
- break;
- }
-
- if (ret >= 0)
- ret = consumed;
-
-bail:
- return ret;
-}
-
-static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct qib_filedata *fp = iocb->ki_filp->private_data;
- struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
- struct qib_user_sdma_queue *pq = fp->pq;
-
- if (!iter_is_iovec(from) || !from->nr_segs || !pq)
- return -EINVAL;
-
- return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
-}
-
-static struct class *qib_class;
-static dev_t qib_dev;
-
-int qib_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp)
-{
- const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
- struct cdev *cdev;
- struct device *device = NULL;
- int ret;
-
- cdev = cdev_alloc();
- if (!cdev) {
- pr_err("Could not allocate cdev for minor %d, %s\n",
- minor, name);
- ret = -ENOMEM;
- goto done;
- }
-
- cdev->owner = THIS_MODULE;
- cdev->ops = fops;
- kobject_set_name(&cdev->kobj, name);
-
- ret = cdev_add(cdev, dev, 1);
- if (ret < 0) {
- pr_err("Could not add cdev for minor %d, %s (err %d)\n",
- minor, name, -ret);
- goto err_cdev;
- }
-
- device = device_create(qib_class, NULL, dev, NULL, "%s", name);
- if (!IS_ERR(device))
- goto done;
- ret = PTR_ERR(device);
- device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
-err_cdev:
- cdev_del(cdev);
- cdev = NULL;
-done:
- *cdevp = cdev;
- *devp = device;
- return ret;
-}
-
-void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
-{
- struct device *device = *devp;
-
- if (device) {
- device_unregister(device);
- *devp = NULL;
- }
-
- if (*cdevp) {
- cdev_del(*cdevp);
- *cdevp = NULL;
- }
-}
-
-static struct cdev *wildcard_cdev;
-static struct device *wildcard_device;
-
-int __init qib_dev_init(void)
-{
- int ret;
-
- ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
- if (ret < 0) {
- pr_err("Could not allocate chrdev region (err %d)\n", -ret);
- goto done;
- }
-
- qib_class = class_create(THIS_MODULE, "ipath");
- if (IS_ERR(qib_class)) {
- ret = PTR_ERR(qib_class);
- pr_err("Could not create device class (err %d)\n", -ret);
- unregister_chrdev_region(qib_dev, QIB_NMINORS);
- }
-
-done:
- return ret;
-}
-
-void qib_dev_cleanup(void)
-{
- if (qib_class) {
- class_destroy(qib_class);
- qib_class = NULL;
- }
-
- unregister_chrdev_region(qib_dev, QIB_NMINORS);
-}
-
-static atomic_t user_count = ATOMIC_INIT(0);
-
-static void qib_user_remove(struct qib_devdata *dd)
-{
- if (atomic_dec_return(&user_count) == 0)
- qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
-
- qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
-}
-
-static int qib_user_add(struct qib_devdata *dd)
-{
- char name[10];
- int ret;
-
- if (atomic_inc_return(&user_count) == 1) {
- ret = qib_cdev_init(0, "ipath", &qib_file_ops,
- &wildcard_cdev, &wildcard_device);
- if (ret)
- goto done;
- }
-
- snprintf(name, sizeof(name), "ipath%d", dd->unit);
- ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
- &dd->user_cdev, &dd->user_device);
- if (ret)
- qib_user_remove(dd);
-done:
- return ret;
-}
-
-/*
- * Create per-unit files in /dev
- */
-int qib_device_create(struct qib_devdata *dd)
-{
- int r, ret;
-
- r = qib_user_add(dd);
- ret = qib_diag_add(dd);
- if (r && !ret)
- ret = r;
- return ret;
-}
-
-/*
- * Remove per-unit files in /dev
- * void, core kernel returns no errors for this stuff
- */
-void qib_device_remove(struct qib_devdata *dd)
-{
- qib_user_remove(dd);
- qib_diag_remove(dd);
-}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
deleted file mode 100644
index 13ef22bd9459..000000000000
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-
-#include "qib.h"
-
-#define QIBFS_MAGIC 0x726a77
-
-static struct super_block *qib_super;
-
-#define private2dd(file) (file_inode(file)->i_private)
-
-static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, const struct file_operations *fops,
- void *data)
-{
- int error;
- struct inode *inode = new_inode(dir->i_sb);
-
- if (!inode) {
- error = -EPERM;
- goto bail;
- }
-
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_blocks = 0;
- inode->i_atime = CURRENT_TIME;
- inode->i_mtime = inode->i_atime;
- inode->i_ctime = inode->i_atime;
- inode->i_private = data;
- if (S_ISDIR(mode)) {
- inode->i_op = &simple_dir_inode_operations;
- inc_nlink(inode);
- inc_nlink(dir);
- }
-
- inode->i_fop = fops;
-
- d_instantiate(dentry, inode);
- error = 0;
-
-bail:
- return error;
-}
-
-static int create_file(const char *name, umode_t mode,
- struct dentry *parent, struct dentry **dentry,
- const struct file_operations *fops, void *data)
-{
- int error;
-
- mutex_lock(&d_inode(parent)->i_mutex);
- *dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(*dentry))
- error = qibfs_mknod(d_inode(parent), *dentry,
- mode, fops, data);
- else
- error = PTR_ERR(*dentry);
- mutex_unlock(&d_inode(parent)->i_mutex);
-
- return error;
-}
-
-static ssize_t driver_stats_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- qib_stats.sps_ints = qib_sps_ints();
- return simple_read_from_buffer(buf, count, ppos, &qib_stats,
- sizeof(qib_stats));
-}
-
-/*
- * driver stats field names, one line per stat, single string. Used by
- * programs like ipathstats to print the stats in a way which works for
- * different versions of drivers, without changing program source.
- * if qlogic_ib_stats changes, this needs to change. Names need to be
- * 12 chars or less (w/o newline), for proper display by ipathstats utility.
- */
-static const char qib_statnames[] =
- "KernIntr\n"
- "ErrorIntr\n"
- "Tx_Errs\n"
- "Rcv_Errs\n"
- "H/W_Errs\n"
- "NoPIOBufs\n"
- "CtxtsOpen\n"
- "RcvLen_Errs\n"
- "EgrBufFull\n"
- "EgrHdrFull\n"
- ;
-
-static ssize_t driver_names_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return simple_read_from_buffer(buf, count, ppos, qib_statnames,
- sizeof(qib_statnames) - 1); /* no null */
-}
-
-static const struct file_operations driver_ops[] = {
- { .read = driver_stats_read, .llseek = generic_file_llseek, },
- { .read = driver_names_read, .llseek = generic_file_llseek, },
-};
-
-/* read the per-device counters */
-static ssize_t dev_counters_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-/* read the per-device counters */
-static ssize_t dev_names_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
- return simple_read_from_buffer(buf, count, ppos, names, avail);
-}
-
-static const struct file_operations cntr_ops[] = {
- { .read = dev_counters_read, .llseek = generic_file_llseek, },
- { .read = dev_names_read, .llseek = generic_file_llseek, },
-};
-
-/*
- * Could use file_inode(file)->i_ino to figure out which file,
- * instead of separate routine for each, but for now, this works...
- */
-
-/* read the per-port names (same for each port) */
-static ssize_t portnames_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
- return simple_read_from_buffer(buf, count, ppos, names, avail);
-}
-
-/* read the per-port counters for port 1 (pidx 0) */
-static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-/* read the per-port counters for port 2 (pidx 1) */
-static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-static const struct file_operations portcntr_ops[] = {
- { .read = portnames_read, .llseek = generic_file_llseek, },
- { .read = portcntrs_1_read, .llseek = generic_file_llseek, },
- { .read = portcntrs_2_read, .llseek = generic_file_llseek, },
-};
-
-/*
- * read the per-port QSFP data for port 1 (pidx 0)
- */
-static ssize_t qsfp_1_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd = private2dd(file);
- char *tmp;
- int ret;
-
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- kfree(tmp);
- return ret;
-}
-
-/*
- * read the per-port QSFP data for port 2 (pidx 1)
- */
-static ssize_t qsfp_2_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd = private2dd(file);
- char *tmp;
- int ret;
-
- if (dd->num_pports < 2)
- return -ENODEV;
-
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- kfree(tmp);
- return ret;
-}
-
-static const struct file_operations qsfp_ops[] = {
- { .read = qsfp_1_read, .llseek = generic_file_llseek, },
- { .read = qsfp_2_read, .llseek = generic_file_llseek, },
-};
-
-static ssize_t flash_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if (pos < 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (pos >= sizeof(struct qib_flash)) {
- ret = 0;
- goto bail;
- }
-
- if (count > sizeof(struct qib_flash) - pos)
- count = sizeof(struct qib_flash) - pos;
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
-
- dd = private2dd(file);
- if (qib_eeprom_read(dd, pos, tmp, count)) {
- qib_dev_err(dd, "failed to read from flash\n");
- ret = -ENXIO;
- goto bail_tmp;
- }
-
- if (copy_to_user(buf, tmp, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
-
-bail:
- return ret;
-}
-
-static ssize_t flash_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if (pos != 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (count != sizeof(struct qib_flash)) {
- ret = -EINVAL;
- goto bail;
- }
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmp, buf, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
-
- dd = private2dd(file);
- if (qib_eeprom_write(dd, pos, tmp, count)) {
- ret = -ENXIO;
- qib_dev_err(dd, "failed to write to flash\n");
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
-
-bail:
- return ret;
-}
-
-static const struct file_operations flash_ops = {
- .read = flash_read,
- .write = flash_write,
- .llseek = default_llseek,
-};
-
-static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
-{
- struct dentry *dir, *tmp;
- char unit[10];
- int ret, i;
-
- /* create the per-unit directory */
- snprintf(unit, sizeof(unit), "%u", dd->unit);
- ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
- &simple_dir_operations, dd);
- if (ret) {
- pr_err("create_file(%s) failed: %d\n", unit, ret);
- goto bail;
- }
-
- /* create the files in the new directory */
- ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
- &cntr_ops[0], dd);
- if (ret) {
- pr_err("create_file(%s/counters) failed: %d\n",
- unit, ret);
- goto bail;
- }
- ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
- &cntr_ops[1], dd);
- if (ret) {
- pr_err("create_file(%s/counter_names) failed: %d\n",
- unit, ret);
- goto bail;
- }
- ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
- &portcntr_ops[0], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, "portcounter_names", ret);
- goto bail;
- }
- for (i = 1; i <= dd->num_pports; i++) {
- char fname[24];
-
- sprintf(fname, "port%dcounters", i);
- /* create the files in the new directory */
- ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
- &portcntr_ops[i], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, fname, ret);
- goto bail;
- }
- if (!(dd->flags & QIB_HAS_QSFP))
- continue;
- sprintf(fname, "qsfp%d", i);
- ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
- &qsfp_ops[i - 1], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, fname, ret);
- goto bail;
- }
- }
-
- ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
- &flash_ops, dd);
- if (ret)
- pr_err("create_file(%s/flash) failed: %d\n",
- unit, ret);
-bail:
- return ret;
-}
-
-static int remove_file(struct dentry *parent, char *name)
-{
- struct dentry *tmp;
- int ret;
-
- tmp = lookup_one_len(name, parent, strlen(name));
-
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto bail;
- }
-
- spin_lock(&tmp->d_lock);
- if (simple_positive(tmp)) {
- __d_drop(tmp);
- spin_unlock(&tmp->d_lock);
- simple_unlink(d_inode(parent), tmp);
- } else {
- spin_unlock(&tmp->d_lock);
- }
- dput(tmp);
-
- ret = 0;
-bail:
- /*
- * We don't expect clients to care about the return value, but
- * it's there if they need it.
- */
- return ret;
-}
-
-static int remove_device_files(struct super_block *sb,
- struct qib_devdata *dd)
-{
- struct dentry *dir, *root;
- char unit[10];
- int ret, i;
-
- root = dget(sb->s_root);
- mutex_lock(&d_inode(root)->i_mutex);
- snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_one_len(unit, root, strlen(unit));
-
- if (IS_ERR(dir)) {
- ret = PTR_ERR(dir);
- pr_err("Lookup of %s failed\n", unit);
- goto bail;
- }
-
- mutex_lock(&d_inode(dir)->i_mutex);
- remove_file(dir, "counters");
- remove_file(dir, "counter_names");
- remove_file(dir, "portcounter_names");
- for (i = 0; i < dd->num_pports; i++) {
- char fname[24];
-
- sprintf(fname, "port%dcounters", i + 1);
- remove_file(dir, fname);
- if (dd->flags & QIB_HAS_QSFP) {
- sprintf(fname, "qsfp%d", i + 1);
- remove_file(dir, fname);
- }
- }
- remove_file(dir, "flash");
- mutex_unlock(&d_inode(dir)->i_mutex);
- ret = simple_rmdir(d_inode(root), dir);
- d_delete(dir);
- dput(dir);
-
-bail:
- mutex_unlock(&d_inode(root)->i_mutex);
- dput(root);
- return ret;
-}
-
-/*
- * This fills everything in when the fs is mounted, to handle umount/mount
- * after device init. The direct add_cntr_files() call handles adding
- * them from the init code, when the fs is already mounted.
- */
-static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct qib_devdata *dd, *tmp;
- unsigned long flags;
- int ret;
-
- static struct tree_descr files[] = {
- [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
- [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
- {""},
- };
-
- ret = simple_fill_super(sb, QIBFS_MAGIC, files);
- if (ret) {
- pr_err("simple_fill_super failed: %d\n", ret);
- goto bail;
- }
-
- spin_lock_irqsave(&qib_devs_lock, flags);
-
- list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
- spin_unlock_irqrestore(&qib_devs_lock, flags);
- ret = add_cntr_files(sb, dd);
- if (ret)
- goto bail;
- spin_lock_irqsave(&qib_devs_lock, flags);
- }
-
- spin_unlock_irqrestore(&qib_devs_lock, flags);
-
-bail:
- return ret;
-}
-
-static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct dentry *ret;
-
- ret = mount_single(fs_type, flags, data, qibfs_fill_super);
- if (!IS_ERR(ret))
- qib_super = ret->d_sb;
- return ret;
-}
-
-static void qibfs_kill_super(struct super_block *s)
-{
- kill_litter_super(s);
- qib_super = NULL;
-}
-
-int qibfs_add(struct qib_devdata *dd)
-{
- int ret;
-
- /*
- * On first unit initialized, qib_super will not yet exist
- * because nobody has yet tried to mount the filesystem, so
- * we can't consider that to be an error; if an error occurs
- * during the mount, that will get a complaint, so this is OK.
- * add_cntr_files() for all units is done at mount from
- * qibfs_fill_super(), so one way or another, everything works.
- */
- if (qib_super == NULL)
- ret = 0;
- else
- ret = add_cntr_files(qib_super, dd);
- return ret;
-}
-
-int qibfs_remove(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (qib_super)
- ret = remove_device_files(qib_super, dd);
-
- return ret;
-}
-
-static struct file_system_type qibfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "ipathfs",
- .mount = qibfs_mount,
- .kill_sb = qibfs_kill_super,
-};
-MODULE_ALIAS_FS("ipathfs");
-
-int __init qib_init_qibfs(void)
-{
- return register_filesystem(&qibfs_fs_type);
-}
-
-int __exit qib_exit_qibfs(void)
-{
- return unregister_filesystem(&qibfs_fs_type);
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
deleted file mode 100644
index 4b927809d1a1..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ /dev/null
@@ -1,3600 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * QLogic_IB 6120 PCIe chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_6120_regs.h"
-
-static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
-static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
-static u8 qib_6120_phys_portstate(u64);
-static u32 qib_6120_iblink_state(u64);
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the Intel Intel_IB PCI-Express chip.
- *
- */
-
-/* KREG_IDX uses machine-generated #defines */
-#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_rcvctrl KREG_IDX(RcvCtrl)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_partitionkey KREG_IDX(RcvPartitionKey)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibcctrl KREG_IDX(IBCCtrl)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0)
-#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_palign KREG_IDX(PageAlign)
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
-#define kr_sendpiosize KREG_IDX(SendPIOSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_control KREG_IDX(Control)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_revision KREG_IDX(Revision)
-#define kr_portcnt KREG_IDX(PortCnt)
-#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
-#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
-#define kr_serdes_stat KREG_IDX(SerdesStat)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-
-/* These must only be written via qib_write_kreg_ctxt() */
-#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
- QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
-
-#define cr_badformat CREG_IDX(RxBadFormatCnt)
-#define cr_erricrc CREG_IDX(RxICRCErrCnt)
-#define cr_errlink CREG_IDX(RxLinkProblemCnt)
-#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
-#define cr_err_rlen CREG_IDX(RxLenErrCnt)
-#define cr_errslen CREG_IDX(TxLenErrCnt)
-#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
-#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
-#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define cr_lbint CREG_IDX(LBIntCnt)
-#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
-#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
-#define cr_pktrcv CREG_IDX(RxDataPktCnt)
-#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define cr_pktsend CREG_IDX(TxDataPktCnt)
-#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
-#define cr_rcvebp CREG_IDX(RxEBPCnt)
-#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
-#define cr_sendstall CREG_IDX(TxFlowStallCnt)
-#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
-#define cr_wordrcv CREG_IDX(RxDwordCnt)
-#define cr_wordsend CREG_IDX(TxDwordCnt)
-#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_6120_##regname##_##fldname##_RMASK)
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_6120_##regname##_##fldname##_RMASK << \
- QIB_6120_##regname##_##fldname##_LSB)
-#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
-
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-
-/* link training states, from IBC */
-#define IB_6120_LT_STATE_DISABLED 0x00
-#define IB_6120_LT_STATE_LINKUP 0x01
-#define IB_6120_LT_STATE_POLLACTIVE 0x02
-#define IB_6120_LT_STATE_POLLQUIET 0x03
-#define IB_6120_LT_STATE_SLEEPDELAY 0x04
-#define IB_6120_LT_STATE_SLEEPQUIET 0x05
-#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
-#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
-#define IB_6120_LT_STATE_CFGIDLE 0x0b
-#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
-
-/* link state machine states from IBC */
-#define IB_6120_L_STATE_DOWN 0x0
-#define IB_6120_L_STATE_INIT 0x1
-#define IB_6120_L_STATE_ARM 0x2
-#define IB_6120_L_STATE_ACTIVE 0x3
-#define IB_6120_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_6120_physportstate[0x20] = {
- [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_6120_LT_STATE_CFGDEBOUNCE] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_6120_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_6120_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- u64 *portcntrs;
- void *dummy_hdrq; /* used after ctxt close */
- dma_addr_t dummy_hdrq_phys;
- spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
- spinlock_t user_tid_lock; /* no back to back user TID writes */
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 hwerrmask;
- u64 errormask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 ibcctrl; /* shadow for kr_ibcctrl */
- u32 lastlinkrecov; /* link recovery issue */
- int irq;
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 ncntrs;
- u32 nportcntrs;
- /* used with gpio interrupts to implement IB counters */
- u32 rxfc_unsupvl_errs;
- u32 overrun_thresh_errs;
- /*
- * these count only cases where _successive_ LocalLinkIntegrity
- * errors were seen in the receive headers of IB standard packets
- */
- u32 lli_errs;
- u32 lli_counter;
- u64 lli_thresh;
- u64 sword; /* total dwords sent (sample result) */
- u64 rword; /* total dwords received (sample result) */
- u64 spkts; /* total packets sent (sample result) */
- u64 rpkts; /* total packets received (sample result) */
- u64 xmit_wait; /* # of ticks no data sent (sample result) */
- struct timer_list pma_timer;
- char emsgbuf[128];
- char bitsmsgbuf[64];
- u8 pma_sample_status;
-};
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner. It also gives us some error
- * checking. 64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible. User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
-
- if (dd->userbase)
- return readl(regno + (u64 __iomem *)
- ((char __iomem *)dd->userbase +
- dd->ureg_align * ctxt));
- else
- return readl(regno + (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *)dd->kregbase +
- dd->ureg_align * ctxt));
-}
-
-/**
- * qib_write_ureg - write 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *)&dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
-
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u16 regno, u64 value)
-{
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->kregbase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline void write_6120_creg(const struct qib_devdata *dd,
- u16 regno, u64 value)
-{
- if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->cspec->cregbase[regno]);
-}
-
-static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-}
-
-static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-}
-
-/* kr_control bits */
-#define QLOGIC_IB_C_RESET 1U
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
-#define QLOGIC_IB_I_RCVURG_SHIFT 0
-#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
-#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
-
-#define QLOGIC_IB_C_FREEZEMODE 0x00000002
-#define QLOGIC_IB_C_LINKENABLE 0x00000004
-#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
-#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
-#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
-#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
-#define QLOGIC_IB_I_BITSEXTANT \
- ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
- (QLOGIC_IB_I_RCVAVAIL_MASK << \
- QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
- QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
- QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-
-
-/* kr_extstatus bits */
-#define QLOGIC_IB_EXTS_FREQSEL 0x2
-#define QLOGIC_IB_EXTS_SERDESSEL 0x4
-#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define QLOGIC_IB_XGXS_RESET 0x5ULL
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-
-/* Bits in GPIO for the added IB link interrupts */
-#define GPIO_RXUVL_BIT 3
-#define GPIO_OVRUN_BIT 4
-#define GPIO_LLI_BIT 5
-#define GPIO_ERRINTR_MASK 0x38
-
-
-#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
-#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
- ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
-#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
-#define QLOGIC_IB_RT_IS_VALID(tid) \
- (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
- ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
-#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
-#define QLOGIC_IB_RT_ADDR_SHIFT 10
-
-#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
-#define QLOGIC_IB_R_TAILUPD_SHIFT 31
-#define IBA6120_R_PKEY_DIS_SHIFT 30
-
-#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
-
-#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
-#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
-
-#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
- ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
-
-#define TXEMEMPARITYERR_PIOBUF \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
-#define TXEMEMPARITYERR_PIOPBC \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
-#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
-
-#define RXEMEMPARITYERR_RCVBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
-#define RXEMEMPARITYERR_LOOKUPQ \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
-#define RXEMEMPARITYERR_EXPTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
-#define RXEMEMPARITYERR_EAGERTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
-#define RXEMEMPARITYERR_FLAGBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
-#define RXEMEMPARITYERR_DATAINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
-#define RXEMEMPARITYERR_HDRINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
-
-/* 6120 specific hardware errors... */
-static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
- /* generic hardware errors */
- QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
- QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
-
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
- "TXE PIOBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
- "TXE PIOPBC Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
- "TXE PIOLAUNCHFIFO Memory Parity"),
-
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
- "RXE RCVBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
- "RXE LOOKUPQ Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
- "RXE EAGERTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
- "RXE EXPTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
- "RXE FLAGBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
- "RXE DATAINFO Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
- "RXE HDRINFO Memory Parity"),
-
- /* chip-specific hardware errors */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
- "PCIe Poisoned TLP"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
- "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
- "PCIePLL1"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
- "PCIePLL0"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
- "PCIe XTLH core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
- "PCIe ADM TX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
- "PCIe ADM RX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
- "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
-#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP)
-
- /* variables for sanity checking interrupt and errors */
-#define IB_HWE_BITSEXTANT \
- (HWE_MASK(RXEMemParityErr) | \
- HWE_MASK(TXEMemParityErr) | \
- (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
- QLOGIC_IB_HWE_PCIE1PLLFAILED | \
- QLOGIC_IB_HWE_PCIE0PLLFAILED | \
- QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
- QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
- QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
- HWE_MASK(PowerOnBISTFailed) | \
- QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP | \
- QLOGIC_IB_HWE_SERDESPLLFAILED | \
- HWE_MASK(IBCBusToSPCParityErr) | \
- HWE_MASK(IBCBusFromSPCParityErr))
-
-#define IB_E_BITSEXTANT \
- (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
- ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
- ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
- ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
- ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
- ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
- ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
- ERR_MASK(HardwareErr))
-
-#define QLOGIC_IB_E_PKTERRS ( \
- ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | \
- ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvEBPErr))
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
- (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
- ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
- ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
- ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
- (ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(InvalidAddrErr))
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers. Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
- (ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendPktLenErr))
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received. This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
- (ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvUnexpectedCharErr))
-
-static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
- u32, unsigned long);
-
-/*
- * On platforms using this chip, and not having ordered WC stores, we
- * can get TXE parity errors due to speculative reads to the PIO buffers,
- * and this, due to a chip issue can result in (many) false parity error
- * reports. So it's a debug print on those, and an info print on systems
- * where the speculative reads don't occur.
- */
-static void qib_6120_txe_recover(struct qib_devdata *dd)
-{
- if (!qib_unordered_wc())
- qib_devinfo(dd->pcidev,
- "Recovering from TXE PIO parity error\n");
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, ~0ULL);
- /* force re-interrupt of any pending interrupts. */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips
- */
-static void qib_6120_clear_freeze(struct qib_devdata *dd)
-{
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwriten */
- qib_6120_set_intr_state(dd, 0);
-
- qib_cancel_sends(dd->pport);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /* force in-memory update now we are out of freeze */
- qib_force_pio_avail_update(dd);
-
- /*
- * force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- qib_6120_set_intr_state(dd, 1);
-}
-
-/**
- * qib_handle_6120_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. Reuse the same message buffer as
- * handle_6120_errors() to avoid excessive stack usage.
- */
-static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char *bitsmsg;
- int log_idx;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- return;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- return;
- }
- qib_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support */
- qib_write_kreg(dd, kr_hwerrclear,
- hwerrs & ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
-
- /*
- * Make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds.
- */
- if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd,
- "hwerror interrupt with unknown errors %llx set\n",
- (unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT));
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
- /*
- * Parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue. They can
- * occur if a processor speculative read is done to the PIO
- * buffer while we are sending a packet, for example.
- */
- if (hwerrs & TXE_PIO_PARITY) {
- qib_6120_txe_recover(dd);
- hwerrs &= ~TXE_PIO_PARITY;
- }
-
- if (!hwerrs) {
- static u32 freeze_cnt;
-
- freeze_cnt++;
- qib_6120_clear_freeze(dd);
- } else
- isfatal = 1;
- }
-
- *msg = '\0';
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strlcat(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
- ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
-
- bitsmsg = dd->cspec->bitsmsgbuf;
- if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
- if (hwerrs & _QIB_PLL_FAIL) {
- isfatal = 1;
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _QIB_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the external
- * interface is unused
- */
- dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs)
- /*
- * if any set that we aren't ignoring; only
- * make the complaint once, in case it's stuck
- * or recurring, and we get here multiple
- * times.
- */
- qib_dev_err(dd, "%s hardware error\n", msg);
- else
- *msg = 0; /* recovered from all of them */
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * for /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else. Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
- u64 err)
-{
- int iserr = 1;
-
- *buf = '\0';
- if (err & QLOGIC_IB_E_PKTERRS) {
- if (!(err & ~QLOGIC_IB_E_PKTERRS))
- iserr = 0;
- if ((err & ERR_MASK(RcvICRCErr)) &&
- !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
- strlcat(buf, "CRC ", blen);
- if (!iserr)
- goto done;
- }
- if (err & ERR_MASK(RcvHdrLenErr))
- strlcat(buf, "rhdrlen ", blen);
- if (err & ERR_MASK(RcvBadTidErr))
- strlcat(buf, "rbadtid ", blen);
- if (err & ERR_MASK(RcvBadVersionErr))
- strlcat(buf, "rbadversion ", blen);
- if (err & ERR_MASK(RcvHdrErr))
- strlcat(buf, "rhdr ", blen);
- if (err & ERR_MASK(RcvLongPktLenErr))
- strlcat(buf, "rlongpktlen ", blen);
- if (err & ERR_MASK(RcvMaxPktLenErr))
- strlcat(buf, "rmaxpktlen ", blen);
- if (err & ERR_MASK(RcvMinPktLenErr))
- strlcat(buf, "rminpktlen ", blen);
- if (err & ERR_MASK(SendMinPktLenErr))
- strlcat(buf, "sminpktlen ", blen);
- if (err & ERR_MASK(RcvFormatErr))
- strlcat(buf, "rformaterr ", blen);
- if (err & ERR_MASK(RcvUnsupportedVLErr))
- strlcat(buf, "runsupvl ", blen);
- if (err & ERR_MASK(RcvUnexpectedCharErr))
- strlcat(buf, "runexpchar ", blen);
- if (err & ERR_MASK(RcvIBFlowErr))
- strlcat(buf, "ribflow ", blen);
- if (err & ERR_MASK(SendUnderRunErr))
- strlcat(buf, "sunderrun ", blen);
- if (err & ERR_MASK(SendPioArmLaunchErr))
- strlcat(buf, "spioarmlaunch ", blen);
- if (err & ERR_MASK(SendUnexpectedPktNumErr))
- strlcat(buf, "sunexperrpktnum ", blen);
- if (err & ERR_MASK(SendDroppedSmpPktErr))
- strlcat(buf, "sdroppedsmppkt ", blen);
- if (err & ERR_MASK(SendMaxPktLenErr))
- strlcat(buf, "smaxpktlen ", blen);
- if (err & ERR_MASK(SendUnsupportedVLErr))
- strlcat(buf, "sunsupVL ", blen);
- if (err & ERR_MASK(InvalidAddrErr))
- strlcat(buf, "invalidaddr ", blen);
- if (err & ERR_MASK(RcvEgrFullErr))
- strlcat(buf, "rcvegrfull ", blen);
- if (err & ERR_MASK(RcvHdrFullErr))
- strlcat(buf, "rcvhdrfull ", blen);
- if (err & ERR_MASK(IBStatusChanged))
- strlcat(buf, "ibcstatuschg ", blen);
- if (err & ERR_MASK(RcvIBLostLinkErr))
- strlcat(buf, "riblostlink ", blen);
- if (err & ERR_MASK(HardwareErr))
- strlcat(buf, "hardware ", blen);
- if (err & ERR_MASK(ResetNegated))
- strlcat(buf, "reset ", blen);
-done:
- return iserr;
-}
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- */
-static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
-{
- unsigned long sbuf[2];
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
-
- if (sbuf[0] || sbuf[1])
- qib_disarm_piobufs_set(dd, sbuf,
- dd->piobcnt2k + dd->piobcnt4k);
-}
-
-static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
-{
- int ret = 1;
- u32 ibstate = qib_6120_iblink_state(ibcs);
- u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
-
- if (linkrecov != dd->cspec->lastlinkrecov) {
- /* and no more until active again */
- dd->cspec->lastlinkrecov = 0;
- qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
- ret = 0;
- }
- if (ibstate == IB_PORT_ACTIVE)
- dd->cspec->lastlinkrecov =
- read_6120_creg32(dd, cr_iblinkerrrecov);
- return ret;
-}
-
-static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
-{
- char *msg;
- u64 ignore_this_time = 0;
- u64 iserr = 0;
- int log_idx;
- struct qib_pportdata *ppd = dd->pport;
- u64 mask;
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & ERR_MASK(HardwareErr))
- qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
-
- if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd,
- "error interrupt with unknown errors %llx set\n",
- (unsigned long long) (errs & ~IB_E_BITSEXTANT));
-
- if (errs & E_SUM_ERRS) {
- qib_disarm_6120_senderrbufs(ppd);
- if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
- } else if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- qib_write_kreg(dd, kr_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- /*
- * The ones we mask off are handled specially below
- * or above.
- */
- mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
- ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
- qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
-
- if (errs & E_SUM_PKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & E_SUM_ERRS)
- qib_stats.sps_txerrs++;
-
- iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
-
- if (errs & ERR_MASK(IBStatusChanged)) {
- u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
- u32 ibstate = qib_6120_iblink_state(ibcs);
- int handle = 1;
-
- if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
- handle = chk_6120_linkrecovery(dd, ibcs);
- /*
- * Since going into a recovery state causes the link state
- * to go down and since recovery is transitory, it is better
- * if we "miss" ever seeing the link training state go into
- * recovery (i.e., ignore this transition for link state
- * special handling purposes) without updating lastibcstat.
- */
- if (handle && qib_6120_phys_portstate(ibcs) ==
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
- handle = 0;
- if (handle)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
-
- if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-done:
- return;
-}
-
-/**
- * qib_6120_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_6120_init_hwerrors(struct qib_devdata *dd)
-{
- u64 val;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
-
- if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
- qib_dev_err(dd, "MemBIST did not complete!\n");
-
- /* init so all hwerrors interrupt, and enter freeze, ajdust below */
- val = ~0ULL;
- if (dd->minrev < 2) {
- /*
- * Avoid problem with internal interface bus parity
- * checking. Fixed in Rev2.
- */
- val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
- }
- /* avoid some intel cpu's speculative read freeze mode issue */
- val &= ~TXEMEMPARITYERR_PIOBUF;
-
- dd->cspec->hwerrmask = val;
-
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- /* clear any interrupts up to this point (ints still not enabled) */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-
- qib_write_kreg(dd, kr_rcvbthqp,
- dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
- QIB_KD_QP);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear,
- ERR_MASK(SendPioArmLaunchErr));
- dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
- } else
- dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
- /* write to chip to prevent back-to-back writes of control reg */
- qib_write_kreg(dd, kr_scratch, 0);
-}
-
-/**
- * qib_6120_bringup_serdes - bring up the serdes
- * @dd: the qlogic_ib device
- */
-static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, config1, prev_val, hwstat, ibc;
-
- /* Put IBC in reset, sends disabled */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, 0ULL);
-
- dd->cspec->ibdeltainprog = 1;
- dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
- dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
- /*
- * How often flowctrl sent. More or less in usecs; balance against
- * watermark value, so that in theory senders always get a flow
- * control update in time to not let the IB link go idle.
- */
- ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
- /* max error tolerance */
- dd->cspec->lli_thresh = 0xf;
- ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
- /* use "real" buffer space for */
- ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
- dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
-
- /* initially come up waiting for TS1, without sending anything. */
- val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg(dd, kr_ibcctrl, val);
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
- config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
-
- /*
- * Force reset on, also set rxdetect enable. Must do before reading
- * serdesstatus at least for simulation, or some of the bits in
- * serdes status will come back as undefined and cause simulation
- * failures
- */
- val |= SYM_MASK(SerdesCfg0, ResetPLL) |
- SYM_MASK(SerdesCfg0, RxDetEnX) |
- (SYM_MASK(SerdesCfg0, L1PwrDnA) |
- SYM_MASK(SerdesCfg0, L1PwrDnB) |
- SYM_MASK(SerdesCfg0, L1PwrDnC) |
- SYM_MASK(SerdesCfg0, L1PwrDnD));
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- qib_read_kreg64(dd, kr_scratch);
- udelay(5); /* need pll reset set at least for a bit */
- /*
- * after PLL is reset, set the per-lane Resets and TxIdle and
- * clear the PLL reset and rxdetect (to get falling edge).
- * Leave L1PWR bits set (permanently)
- */
- val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
- SYM_MASK(SerdesCfg0, ResetPLL) |
- (SYM_MASK(SerdesCfg0, L1PwrDnA) |
- SYM_MASK(SerdesCfg0, L1PwrDnB) |
- SYM_MASK(SerdesCfg0, L1PwrDnC) |
- SYM_MASK(SerdesCfg0, L1PwrDnD)));
- val |= (SYM_MASK(SerdesCfg0, ResetA) |
- SYM_MASK(SerdesCfg0, ResetB) |
- SYM_MASK(SerdesCfg0, ResetC) |
- SYM_MASK(SerdesCfg0, ResetD)) |
- SYM_MASK(SerdesCfg0, TxIdeEnX);
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- (void) qib_read_kreg64(dd, kr_scratch);
- /* need PLL reset clear for at least 11 usec before lane
- * resets cleared; give it a few more to be sure */
- udelay(15);
- val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
- SYM_MASK(SerdesCfg0, ResetB) |
- SYM_MASK(SerdesCfg0, ResetC) |
- SYM_MASK(SerdesCfg0, ResetD)) |
- SYM_MASK(SerdesCfg0, TxIdeEnX));
-
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- (void) qib_read_kreg64(dd, kr_scratch);
-
- val = qib_read_kreg64(dd, kr_xgxs_cfg);
- prev_val = val;
- if (val & QLOGIC_IB_XGXS_RESET)
- val &= ~QLOGIC_IB_XGXS_RESET;
- if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
- /* need to compensate for Tx inversion in partner */
- val &= ~SYM_MASK(XGXSCfg, polarity_inv);
- val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
- }
- if (val != prev_val)
- qib_write_kreg(dd, kr_xgxs_cfg, val);
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
-
- /* clear current and de-emphasis bits */
- config1 &= ~0x0ffffffff00ULL;
- /* set current to 20ma */
- config1 |= 0x00000000000ULL;
- /* set de-emphasis to -5.68dB */
- config1 |= 0x0cccc000000ULL;
- qib_write_kreg(dd, kr_serdes_cfg1, config1);
-
- /* base and port guid same for single port */
- ppd->guid = dd->base_guid;
-
- /*
- * the process of setting and un-resetting the serdes normally
- * causes a serdes PLL error, so check for that and clear it
- * here. Also clearr hwerr bit in errstatus, but not others.
- */
- hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
- if (hwstat) {
- /* should just have PLL, clear all set, in an case */
- qib_write_kreg(dd, kr_hwerrclear, hwstat);
- qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
- }
-
- dd->control |= QLOGIC_IB_C_LINKENABLE;
- dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
- qib_write_kreg(dd, kr_control, dd->control);
-
- return 0;
-}
-
-/**
- * qib_6120_quiet_serdes - set serdes to txidle
- * @ppd: physical port of the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val;
-
- qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- /* disable IBC */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control,
- dd->control | QLOGIC_IB_C_FREEZEMODE);
-
- if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
- dd->cspec->ibdeltainprog) {
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
- val = read_6120_creg32(dd, cr_ibsymbolerr);
- if (dd->cspec->ibdeltainprog)
- val -= val - dd->cspec->ibsymsnap;
- val -= dd->cspec->ibsymdelta;
- write_6120_creg(dd, cr_ibsymbolerr, val);
- }
- if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
- val = read_6120_creg32(dd, cr_iblinkerrrecov);
- if (dd->cspec->ibdeltainprog)
- val -= val - dd->cspec->iblnkerrsnap;
- val -= dd->cspec->iblnkerrdelta;
- write_6120_creg(dd, cr_iblinkerrrecov, val);
- }
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
- val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
- qib_write_kreg(dd, kr_serdes_cfg0, val);
-}
-
-/**
- * qib_6120_setup_setextled - set the state of the two external LEDs
- * @dd: the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
-
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
-{
- u64 extctl, val, lst, ltst;
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (ppd->led_override) {
- ltst = (ppd->led_override & QIB_LED_PHYS) ?
- IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
- lst = (ppd->led_override & QIB_LED_LOG) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- } else if (on) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- ltst = qib_6120_phys_portstate(val);
- lst = qib_6120_iblink_state(val);
- } else {
- ltst = 0;
- lst = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
- SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
-
- if (ltst == IB_PHYSPORTSTATE_LINKUP)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
- if (lst == IB_PORT_ACTIVE)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-}
-
-static void qib_6120_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_nomsi(dd);
-}
-
-/**
- * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the qlogic_ib device
- *
- * This is called during driver unload.
-*/
-static void qib_6120_setup_cleanup(struct qib_devdata *dd)
-{
- qib_6120_free_irq(dd);
- kfree(dd->cspec->cntrs);
- kfree(dd->cspec->portcntrs);
- if (dd->cspec->dummy_hdrq) {
- dma_free_coherent(&dd->pcidev->dev,
- ALIGN(dd->rcvhdrcnt *
- dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE),
- dd->cspec->dummy_hdrq,
- dd->cspec->dummy_hdrq_phys);
- dd->cspec->dummy_hdrq = NULL;
- }
-}
-
-static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
- else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling
- */
-static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
-{
- if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
- qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
- istat & ~QLOGIC_IB_I_BITSEXTANT);
-
- if (istat & QLOGIC_IB_I_ERROR) {
- u64 estat = 0;
-
- qib_stats.sps_errints++;
- estat = qib_read_kreg64(dd, kr_errstatus);
- if (!estat)
- qib_devinfo(dd->pcidev,
- "error interrupt (%Lx), but no error bits set!\n",
- istat);
- handle_6120_errors(dd, estat);
- }
-
- if (istat & QLOGIC_IB_I_GPIO) {
- u32 gpiostatus;
- u32 to_clear = 0;
-
- /*
- * GPIO_3..5 on IBA6120 Rev2 chips indicate
- * errors that we need to count.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /* First the error-counter case. */
- if (gpiostatus & GPIO_ERRINTR_MASK) {
- /* want to clear the bits we see asserted. */
- to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
-
- /*
- * Count appropriately, clear bits out of our copy,
- * as they have been "handled".
- */
- if (gpiostatus & (1 << GPIO_RXUVL_BIT))
- dd->cspec->rxfc_unsupvl_errs++;
- if (gpiostatus & (1 << GPIO_OVRUN_BIT))
- dd->cspec->overrun_thresh_errs++;
- if (gpiostatus & (1 << GPIO_LLI_BIT))
- dd->cspec->lli_errs++;
- gpiostatus &= ~GPIO_ERRINTR_MASK;
- }
- if (gpiostatus) {
- /*
- * Some unexpected bits remain. If they could have
- * caused the interrupt, complain and clear.
- * To avoid repetition of this condition, also clear
- * the mask. It is almost certainly due to error.
- */
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
-
- /*
- * Also check that the chip reflects our shadow,
- * and report issues, If they caused the interrupt.
- * we will suppress by refreshing from the shadow.
- */
- if (mask & gpiostatus) {
- to_clear |= (gpiostatus & mask);
- dd->cspec->gpio_mask &= ~(gpiostatus & mask);
- qib_write_kreg(dd, kr_gpio_mask,
- dd->cspec->gpio_mask);
- }
- }
- if (to_clear)
- qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
- }
-}
-
-static irqreturn_t qib_6120intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u32 istat, ctxtrbits, rmask, crcs = 0;
- unsigned i;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg32(dd, kr_intstatus);
-
- if (unlikely(!istat)) {
- ret = IRQ_NONE; /* not our interrupt, or already handled */
- goto bail;
- }
- if (unlikely(istat == -1)) {
- qib_bad_intrstatus(dd);
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
-
- if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
- QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
- unlikely_6120_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat &
- ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
- if (ctxtrbits) {
- rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (1U << QLOGIC_IB_I_RCVURG_SHIFT);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- crcs += qib_kreceive(dd->rcd[i],
- &dd->cspec->lli_counter,
- NULL);
- }
- rmask <<= 1;
- }
- if (crcs) {
- u32 cntr = dd->cspec->lli_counter;
-
- cntr += crcs;
- if (cntr) {
- if (cntr > dd->cspec->lli_thresh) {
- dd->cspec->lli_counter = 0;
- dd->cspec->lli_errs++;
- } else
- dd->cspec->lli_counter += cntr;
- }
- }
-
-
- if (ctxtrbits) {
- ctxtrbits =
- (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Set up our chip-specific interrupt handler
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- */
-static void qib_setup_6120_interrupt(struct qib_devdata *dd)
-{
- /*
- * If the chip supports added error indication via GPIO pins,
- * enable interrupts on those bits so the interrupt routine
- * can count the events. Also set flag so interrupt routine
- * can know they are expected.
- */
- if (SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor) > 1) {
- /* Rev2+ reports extra errors via internal GPIO pins */
- dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
-
- if (!dd->cspec->irq)
- qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- else {
- int ret;
-
- ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
- QIB_DRV_NAME, dd);
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup interrupt (irq=%d): %d\n",
- dd->cspec->irq, ret);
- }
-}
-
-/**
- * pe_boardname - fill in the board name
- * @dd: the qlogic_ib device
- *
- * info is based on the board revision register
- */
-static void pe_boardname(struct qib_devdata *dd)
-{
- char *n;
- u32 boardid, namelen;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
-
- switch (boardid) {
- case 2:
- n = "InfiniPath_QLE7140";
- break;
- default:
- qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
- n = "Unknown_InfiniPath_6120";
- break;
- }
- namelen = strlen(n) + 1;
- dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
- snprintf(dd->boardname, namelen, "%s", n);
-
- if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
-
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
- */
-static int qib_6120_setup_reset(struct qib_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
- u8 int_line, clinesz;
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- /* Use ERROR so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- /* no interrupts till re-initted */
- qib_6120_set_intr_state(dd, 0);
-
- dd->cspec->ibdeltainprog = 0;
- dd->cspec->ibsymdelta = 0;
- dd->cspec->iblnkerrdelta = 0;
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
- mb(); /* prevent compiler re-ordering around actual reset */
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision) {
- dd->flags |= QIB_PRESENT; /* it's back */
- ret = qib_reinit_intr(dd);
- goto bail;
- }
- }
- ret = 0; /* failed */
-
-bail:
- if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
- /* clear the reset error, init error/hwerror mask */
- qib_6120_init_hwerrors(dd);
- /* for Rev2 error interrupts; nop for rev 1 */
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- /* clear the reset error, init error/hwerror mask */
- qib_6120_init_hwerrors(dd);
- }
- return ret;
-}
-
-/**
- * qib_6120_put_tid - write a TID in chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
- * for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- unsigned long flags;
- int tidx;
- spinlock_t *tidlockp; /* select appropriate spinlock */
-
- if (!dd->kregbase)
- return;
-
- if (pa != dd->tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- pa >>= 11;
- if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
-
- /*
- * Avoid chip issue by writing the scratch register
- * before and after the TID, and with an io write barrier.
- * We use a spinlock around the writes, so they can't intermix
- * with other TID (eager or expected) writes (the chip problem
- * is triggered by back to back TID writes). Unfortunately, this
- * call can be done from interrupt level for the ctxt 0 eager TIDs,
- * so we have to use irqsave locks.
- */
- /*
- * Assumes tidptr always > egrtidbase
- * if type == RCVHQ_RCV_TYPE_EAGER.
- */
- tidx = tidptr - dd->egrtidbase;
-
- tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
- ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
- spin_lock_irqsave(tidlockp, flags);
- qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
- writel(pa, tidp32);
- qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
- mmiowb();
- spin_unlock_irqrestore(tidlockp, flags);
-}
-
-/**
- * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
- * for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- u32 tidx;
-
- if (!dd->kregbase)
- return;
-
- if (pa != dd->tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- pa >>= 11;
- if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
- tidx = tidptr - dd->egrtidbase;
- writel(pa, tidp32);
- mmiowb();
-}
-
-
-/**
- * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
- * @dd: the qlogic_ib device
- * @ctxt: the context
- *
- * clear all TID entries for a context, expected and eager.
- * Used from qib_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void qib_6120_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- /* use func pointer because could be one of two funcs */
- dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- /* use func pointer because could be one of two funcs */
- dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_6120_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_6120_tidtemplate(struct qib_devdata *dd)
-{
- u32 egrsize = dd->rcvegrbufsize;
-
- /*
- * For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per ctxt instead of per device
- * for those who want to reduce memory footprint. Note that the
- * rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (egrsize == 2048)
- dd->tidtemplate = 1U << 29;
- else if (egrsize == 4096)
- dd->tidtemplate = 2U << 29;
- dd->tidinvalid = 0;
-}
-
-int __attribute__((weak)) qib_unordered_wc(void)
-{
- return 0;
-}
-
-/**
- * qib_6120_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- if (qib_unordered_wc())
- kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
-
- kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
- QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
- return 0;
-}
-
-
-static struct qib_message_header *
-qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- return (struct qib_message_header *)
- &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void qib_6120_config_ctxts(struct qib_devdata *dd)
-{
- dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
- if (qib_n_krcv_queues > 1) {
- dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
- if (dd->first_user_ctxt > dd->ctxtcnt)
- dd->first_user_ctxt = dd->ctxtcnt;
- dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
- } else
- dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
-}
-
-static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
-}
-
-static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-/*
- * Used when we close any ctxt, for DMA already in flight
- * at close. Can't be done until we know hdrq size, so not
- * early in chip init.
- */
-static void alloc_dummy_hdrq(struct qib_devdata *dd)
-{
- dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
- dd->rcd[0]->rcvhdrq_size,
- &dd->cspec->dummy_hdrq_phys,
- GFP_ATOMIC | __GFP_COMP);
- if (!dd->cspec->dummy_hdrq) {
- qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
- /* fallback to just 0'ing */
- dd->cspec->dummy_hdrq_phys = 0UL;
- }
-}
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specific, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
-
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
- if (ctxt < 0)
- mask = (1ULL << dd->ctxtcnt) - 1;
- else
- mask = (1ULL << ctxt);
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /* always done for specific ctxt */
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
- if (!(dd->flags & QIB_NODMA_RTAIL))
- dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->rcd[ctxt]->rcvhdrq_phys);
-
- if (ctxt == 0 && !dd->cspec->dummy_hdrq)
- alloc_dummy_hdrq(dd);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
- dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- /*
- * Be paranoid, and never write 0's to these, just use an
- * unused page. Of course,
- * rcvhdraddr points to a large chunk of memory, so this
- * could still trash things, but at least it won't trash
- * page 0, and by disabling the ctxt, it should stop "soon",
- * even if a packet or two is in already in flight after we
- * disabled the ctxt. Only 6120 has this issue.
- */
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->cspec->dummy_hdrq_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->cspec->dummy_hdrq_phys);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
- i, dd->cspec->dummy_hdrq_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
- i, dd->cspec->dummy_hdrq_phys);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function there may be multiple such registers with
- * slightly different layouts. Only operations actually used
- * are implemented yet.
- * Chip requires no back-back sendctrl writes, so write
- * scratch register after writing sendctrl
- */
-static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_SEND_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- /*
- * disarm any that are not yet launched, disabling sends
- * and updates until done.
- */
- last = dd->piobcnt2k + dd->piobcnt4k;
- tmp_dd_sendctrl &=
- ~(SYM_MASK(SendCtrl, PIOEnable) |
- SYM_MASK(SendCtrl, PIOBufAvailUpd));
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_FLUSH)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmPIOBuf));
- if (op & QIB_SENDCTRL_AVAIL_BLIP)
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
-
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-/**
- * qib_portcntr_6120 - read a per-port counter
- * @dd: the qlogic_ib device
- * @creg: the counter to snapshot
- */
-static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
-{
- u64 ret = 0ULL;
- struct qib_devdata *dd = ppd->dd;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u16 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = cr_pktsend,
- [QIBPORTCNTR_WORDSEND] = cr_wordsend,
- [QIBPORTCNTR_PSXMITDATA] = 0xffff,
- [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
- [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
- [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
- [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
- [QIBPORTCNTR_PSRCVDATA] = 0xffff,
- [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
- [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
- [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
- [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
- [QIBPORTCNTR_RXVLERR] = 0xffff,
- [QIBPORTCNTR_ERRICRC] = cr_erricrc,
- [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = cr_badformat,
- [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
- [QIBPORTCNTR_ERRLINK] = cr_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = 0xffff,
- [QIBPORTCNTR_PSINTERVAL] = 0xffff,
- [QIBPORTCNTR_PSSTART] = 0xffff,
- [QIBPORTCNTR_PSSTAT] = 0xffff,
- [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
- [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg];
-
- /* handle counters requests not implemented as chip counters */
- if (reg == QIBPORTCNTR_LLI)
- ret = dd->cspec->lli_errs;
- else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
- ret = dd->cspec->overrun_thresh_errs;
- else if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts */
- for (i = 0; i < dd->first_user_ctxt; i++)
- ret += read_6120_creg32(dd, cr_portovfl + i);
- } else if (reg == QIBPORTCNTR_PSSTAT)
- ret = dd->cspec->pma_sample_status;
- if (creg == 0xffff)
- goto done;
-
- /*
- * only fast incrementing counters are 64bit; use 32 bit reads to
- * avoid two independent reads when on opteron
- */
- if (creg == cr_wordsend || creg == cr_wordrcv ||
- creg == cr_pktsend || creg == cr_pktrcv)
- ret = read_6120_creg(dd, creg);
- else
- ret = read_6120_creg32(dd, creg);
- if (creg == cr_ibsymbolerr) {
- if (dd->cspec->ibdeltainprog)
- ret -= ret - dd->cspec->ibsymsnap;
- ret -= dd->cspec->ibsymdelta;
- } else if (creg == cr_iblinkerrrecov) {
- if (dd->cspec->ibdeltainprog)
- ret -= ret - dd->cspec->iblnkerrsnap;
- ret -= dd->cspec->iblnkerrdelta;
- }
- if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
- ret += dd->cspec->rxfc_unsupvl_errs;
-
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr6120indices contains the corresponding register indices.
- */
-static const char cntr6120names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n";
-
-static const size_t cntr6120indices[] = {
- cr_lbint,
- cr_lbflowstall,
- cr_errtidfull,
- cr_errtidvalid,
- cr_portovfl + 0,
- cr_portovfl + 1,
- cr_portovfl + 2,
- cr_portovfl + 3,
- cr_portovfl + 4,
-};
-
-/*
- * same as cntr6120names and cntr6120indices, but for port-specific counters.
- * portcntr6120indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr6120names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "E IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- ;
-
-#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
-static const size_t portcntr6120indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- cr_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- cr_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- cr_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- cr_rcvflowctrl_err,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- cr_invalidslen,
- cr_senddropped,
- cr_errslen,
- cr_sendunderrun,
- cr_txunsupvl,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_6120_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr6120names;
- dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
- * sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
-
- for (i = 0, s = (char *)portcntr6120names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
- dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
- * sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->portcntrs)
- qib_dev_err(dd, "Failed allocation for portcounters\n");
-}
-
-static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)cntr6120names;
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- if (pos >= ret) {
- ret = 0; /* final read after getting everything */
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)portcntr6120names;
- } else {
- u64 *cntr = dd->cspec->portcntrs;
- struct qib_pportdata *ppd = &dd->pport[port];
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_6120(ppd,
- portcntr6120indices[i] &
- ~_PORT_VIRT_FLAG);
- else
- *cntr++ = read_6120_creg32(dd,
- portcntr6120indices[i]);
- }
- }
-done:
- return ret;
-}
-
-static void qib_chk_6120_errormask(struct qib_devdata *dd)
-{
- static u32 fixed;
- u32 ctrl;
- unsigned long errormask;
- unsigned long hwerrs;
-
- if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
- return;
-
- errormask = qib_read_kreg64(dd, kr_errmask);
-
- if (errormask == dd->cspec->errormask)
- return;
- fixed++;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- ctrl = qib_read_kreg32(dd, kr_control);
-
- qib_write_kreg(dd, kr_errmask,
- dd->cspec->errormask);
-
- if ((hwerrs & dd->cspec->hwerrmask) ||
- (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, 0ULL);
- /* force re-interrupt of pending events, just in case */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- qib_devinfo(dd->pcidev,
- "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
- fixed, errormask, (unsigned long)dd->cspec->errormask,
- ctrl, hwerrs);
- }
-}
-
-/**
- * qib_get_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
- *
- * This needs more work; in particular, decision on whether we really
- * need traffic_wds done the way it is
- * called from add_timer
- */
-static void qib_get_6120_faststats(unsigned long opaque)
-{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
- struct qib_pportdata *ppd = dd->pport;
- unsigned long flags;
- u64 traffic_wds;
-
- /*
- * don't access the chip while running diags, or memory diags can
- * fail
- */
- if (!(dd->flags & QIB_INITTED) || dd->diag_client)
- /* but re-arm the timer, for diags case; won't hurt other */
- goto done;
-
- /*
- * We now try to maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
- qib_portcntr_6120(ppd, cr_wordrcv);
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- traffic_wds -= dd->traffic_wds;
- dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-
- qib_chk_6120_errormask(dd);
-done:
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/* no interrupt fallback for these chips */
-static int qib_6120_nointr_fallback(struct qib_devdata *dd)
-{
- return 0;
-}
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
-{
- u64 val, prev_val;
- struct qib_devdata *dd = ppd->dd;
-
- prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
- val = prev_val | QLOGIC_IB_XGXS_RESET;
- prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
- qib_write_kreg(dd, kr_control,
- dd->control & ~QLOGIC_IB_C_LINKENABLE);
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
- qib_write_kreg(dd, kr_control, dd->control);
-}
-
-static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
- int ret;
-
- switch (which) {
- case QIB_IB_CFG_LWID:
- ret = ppd->link_width_active;
- break;
-
- case QIB_IB_CFG_SPD:
- ret = ppd->link_speed_active;
- break;
-
- case QIB_IB_CFG_LWID_ENB:
- ret = ppd->link_width_enabled;
- break;
-
- case QIB_IB_CFG_SPD_ENB:
- ret = ppd->link_speed_enabled;
- break;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- break;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 0;
- break;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 0;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- break;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- break;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->dd->cspec->ibcctrl &
- SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- break;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- ret = 0; /* no heartbeat on this chip */
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- ret = 250; /* 1 usec. */
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-/*
- * We assume range checking is already done, if needed.
- */
-static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- int ret = 0;
- u64 val64;
- u16 lcmd, licmd;
-
- switch (which) {
- case QIB_IB_CFG_LWID_ENB:
- ppd->link_width_enabled = val;
- break;
-
- case QIB_IB_CFG_SPD_ENB:
- ppd->link_speed_enabled = val;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- if (val64 != val) {
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, OverrunThreshold);
- dd->cspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, OverrunThreshold);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- break;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- if (val64 != val) {
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, PhyerrThreshold);
- dd->cspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, PhyerrThreshold);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- break;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg(dd, kr_partitionkey, val64);
- break;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
- else /* SLEEP */
- dd->cspec->ibcctrl |=
- SYM_MASK(IBCCtrl, LinkDownDefaultState);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- break;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
- dd->cspec->ibcctrl |= (u64)val <<
- SYM_LSB(IBCCtrl, MaxPktLen);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- break;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- if (!dd->cspec->ibdeltainprog) {
- dd->cspec->ibdeltainprog = 1;
- dd->cspec->ibsymsnap =
- read_6120_creg32(dd, cr_ibsymbolerr);
- dd->cspec->iblnkerrsnap =
- read_6120_creg32(dd, cr_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_6120_lstate(ppd, lcmd, licmd);
- goto bail;
-
- case QIB_IB_CFG_HRTBT:
- ret = -EINVAL;
- break;
-
- default:
- ret = -EINVAL;
- }
-bail:
- return ret;
-}
-
-static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
-
- if (!strncmp(what, "ibc", 3)) {
- ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void pma_6120_timer(unsigned long data)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *)data;
- struct qib_chip_specific *cs = ppd->dd->cspec;
- struct qib_ibport *ibp = &ppd->ibport_data;
- unsigned long flags;
-
- spin_lock_irqsave(&ibp->lock, flags);
- if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
- &cs->spkts, &cs->rpkts, &cs->xmit_wait);
- mod_timer(&cs->pma_timer,
- jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
- } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
- u64 ta, tb, tc, td, te;
-
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
-
- cs->sword = ta - cs->sword;
- cs->rword = tb - cs->rword;
- cs->spkts = tc - cs->spkts;
- cs->rpkts = td - cs->rpkts;
- cs->xmit_wait = te - cs->xmit_wait;
- }
- spin_unlock_irqrestore(&ibp->lock, flags);
-}
-
-/*
- * Note that the caller has the ibp->lock held.
- */
-static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- struct qib_chip_specific *cs = ppd->dd->cspec;
-
- if (start && intv) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
- mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
- } else if (intv) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
- &cs->spkts, &cs->rpkts, &cs->xmit_wait);
- mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
- } else {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- cs->sword = 0;
- cs->rword = 0;
- cs->spkts = 0;
- cs->rpkts = 0;
- cs->xmit_wait = 0;
- }
-}
-
-static u32 qib_6120_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
-
- switch (state) {
- case IB_6120_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_6120_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_6120_L_STATE_ACTIVE:
- /* fall through */
- case IB_6120_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default: /* fall through */
- case IB_6120_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_6120_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
- return qib_6120_physportstate[state];
-}
-
-static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (ibup) {
- if (ppd->dd->cspec->ibdeltainprog) {
- ppd->dd->cspec->ibdeltainprog = 0;
- ppd->dd->cspec->ibsymdelta +=
- read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
- ppd->dd->cspec->ibsymsnap;
- ppd->dd->cspec->iblnkerrdelta +=
- read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
- ppd->dd->cspec->iblnkerrsnap;
- }
- qib_hol_init(ppd);
- } else {
- ppd->dd->cspec->lli_counter = 0;
- if (!ppd->dd->cspec->ibdeltainprog) {
- ppd->dd->cspec->ibdeltainprog = 1;
- ppd->dd->cspec->ibsymsnap =
- read_6120_creg32(ppd->dd, cr_ibsymbolerr);
- ppd->dd->cspec->iblnkerrsnap =
- read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
- }
- qib_hol_down(ppd);
- }
-
- qib_6120_setup_setextled(ppd, ibup);
-
- return 0;
-}
-
-/* Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_6120_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->palign = qib_read_kreg32(dd, kr_palign);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
-
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport->ibmtu = (u32)mtu;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
- if (dd->piobcnt4k) {
- dd->pio4kbase = (u32 __iomem *)
- (((char __iomem *) dd->kregbase) +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
- }
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * get_6120_chip_params(), so split out as separate function
- */
-static void set_6120_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
-
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
- dd->cspec->cregbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + cregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-}
-
-/*
- * Write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_6120_initreg(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int init_6120_variables(struct qib_devdata *dd)
-{
- int ret = 0;
- struct qib_pportdata *ppd;
- u32 sbufs;
-
- ppd = (struct qib_pportdata *)(dd + 1);
- dd->pport = ppd;
- dd->num_pports = 1;
-
- dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
- ppd->cpspec = NULL; /* not used in this chip */
-
- spin_lock_init(&dd->cspec->kernel_tid_lock);
- spin_lock_init(&dd->cspec->user_tid_lock);
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor);
-
- get_6120_chip_params(dd);
- pe_boardname(dd); /* fill in boardname */
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
-
- if (qib_unordered_wc())
- dd->flags |= QIB_PIO_FLUSH_WC;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
-
- /* Ignore errors in PIO/PBC on systems with unordered write-combining */
- if (qib_unordered_wc())
- dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
-
- dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
-
- dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
-
- ret = qib_init_pportdata(ppd, dd, 0, 1);
- if (ret)
- goto bail;
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_speed_supported = QIB_IB_SDR;
- ppd->link_width_enabled = IB_WIDTH_4X;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /* these can't change for this chip, so set once */
- ppd->link_width_active = ppd->link_width_enabled;
- ppd->link_speed_active = ppd->link_speed_enabled;
- ppd->vls_supported = IB_VL_VL0;
- ppd->vls_operational = ppd->vls_supported;
-
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset = 0;
-
- /* we always allocate at least 2048 bytes for eager buffers */
- ret = ib_mtu_enum_to_int(qib_ibmtu);
- dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
- BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_6120_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->rhdrhead_intr_off = 1ULL << 32;
-
- /* setup the stats timer; the add_timer is done at end of init */
- init_timer(&dd->stats_timer);
- dd->stats_timer.function = qib_get_6120_faststats;
- dd->stats_timer.data = (unsigned long) dd;
-
- init_timer(&dd->cspec->pma_timer);
- dd->cspec->pma_timer.function = pma_6120_timer;
- dd->cspec->pma_timer.data = (unsigned long) ppd;
-
- dd->ureg_align = qib_read_kreg32(dd, kr_palign);
-
- dd->piosize2kmax_dwords = dd->piosize2k >> 2;
- qib_6120_config_ctxts(dd);
- qib_set_ctxtcnt(dd);
-
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- set_6120_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
-
- qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
-
- ret = qib_create_ctxts(dd);
- init_6120_cntrnames(dd);
-
- /* use all of 4KB buffers for the kernel, otherwise 16 */
- sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
-
- dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
- dd->pbufsctxt = dd->lastctxt_piobuf /
- (dd->cfgctxts - dd->first_user_ctxt);
-
- if (ret)
- goto bail;
-bail:
- return ret;
-}
-
-/*
- * For this chip, we want to use the same buffer every time
- * when we are trying to bring the link up (they are always VL15
- * packets). At that link state the packet should always go out immediately
- * (or at least be discarded at the tx interface if the link is down).
- * If it doesn't, and the buffer isn't available, that means some other
- * sender has gotten ahead of us, and is preventing our packet from going
- * out. In that case, we flush all packets, and try again. If that still
- * fails, we fail the request, and hope things work the next time around.
- *
- * We don't need very complicated heuristics on whether the packet had
- * time to go out or not, since even at SDR 1X, it goes out in very short
- * time periods, covered by the chip reads done here and as part of the
- * flush.
- */
-static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
-{
- u32 __iomem *buf;
- u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
-
- /*
- * always blip to get avail list updated, since it's almost
- * always needed, and is fairly cheap.
- */
- sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- if (buf)
- goto done;
-
- sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
- QIB_SENDCTRL_AVAIL_BLIP);
- ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
-done:
- return buf;
-}
-
-static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *buf;
-
- if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
- buf = get_6120_link_buf(ppd, pbufnum);
- else {
-
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- /* try 4k if all 2k busy, so same last for both sizes */
- last = dd->piobcnt2k + dd->piobcnt4k - 1;
- buf = qib_getsendbuf_range(dd, pbufnum, first, last);
- }
- return buf;
-}
-
-static int init_sdma_6120_regs(struct qib_pportdata *ppd)
-{
- return -ENODEV;
-}
-
-static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
-{
- return 0;
-}
-
-static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
-{
- return 0;
-}
-
-static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
-{
-}
-
-static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
-}
-
-static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
-}
-
-/*
- * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
- * The chip ignores the bit if set.
- */
-static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
-}
-
-static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
-{
-}
-
-static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
-{
- rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
- rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
-}
-
-static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 avail, struct qib_ctxtdata *rcd)
-{
-}
-
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- (void) qib_write_kreg(dd, kr_scratch, val);
-}
-
-static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- return -ENXIO;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- return 0;
-}
-#endif
-
-/* Dummy function, as 6120 boards never disable EEPROM Write */
-static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- return 1;
-}
-
-/**
- * qib_init_iba6120_funcs - set up the chip-specific function pointers
- * @pdev: pci_dev of the qlogic_ib device
- * @ent: pci_device_id matching this chip
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- *
- * It also allocates/partially-inits the qib_devdata struct for
- * this device.
- */
-struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret;
-
- dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
- sizeof(struct qib_chip_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_6120_bringup_serdes;
- dd->f_cleanup = qib_6120_setup_cleanup;
- dd->f_clear_tids = qib_6120_clear_tids;
- dd->f_free_irq = qib_6120_free_irq;
- dd->f_get_base_info = qib_6120_get_base_info;
- dd->f_get_msgheader = qib_6120_get_msgheader;
- dd->f_getsendbuf = qib_6120_getsendbuf;
- dd->f_gpio_mod = gpio_6120_mod;
- dd->f_eeprom_wen = qib_6120_eeprom_wen;
- dd->f_hdrqempty = qib_6120_hdrqempty;
- dd->f_ib_updown = qib_6120_ib_updown;
- dd->f_init_ctxt = qib_6120_init_ctxt;
- dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
- dd->f_intr_fallback = qib_6120_nointr_fallback;
- dd->f_late_initreg = qib_late_6120_initreg;
- dd->f_setpbc_control = qib_6120_setpbc_control;
- dd->f_portcntr = qib_portcntr_6120;
- dd->f_put_tid = (dd->minrev >= 2) ?
- qib_6120_put_tid_2 :
- qib_6120_put_tid;
- dd->f_quiet_serdes = qib_6120_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_6120_mod;
- dd->f_read_cntrs = qib_read_6120cntrs;
- dd->f_read_portcntrs = qib_read_6120portcntrs;
- dd->f_reset = qib_6120_setup_reset;
- dd->f_init_sdma_regs = init_sdma_6120_regs;
- dd->f_sdma_busy = qib_sdma_6120_busy;
- dd->f_sdma_gethead = qib_sdma_6120_gethead;
- dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
- dd->f_sendctrl = sendctrl_6120_mod;
- dd->f_set_armlaunch = qib_set_6120_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
- dd->f_iblink_state = qib_6120_iblink_state;
- dd->f_ibphys_portstate = qib_6120_phys_portstate;
- dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
- dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
- dd->f_set_ib_loopback = qib_6120_set_loopback;
- dd->f_set_intr_state = qib_6120_set_intr_state;
- dd->f_setextled = qib_6120_setup_setextled;
- dd->f_txchk_change = qib_6120_txchk_change;
- dd->f_update_usrhead = qib_update_6120_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
- dd->f_xgxs_reset = qib_6120_xgxs_reset;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_6120_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_6120_notify_dca;
-#endif
- /*
- * Do remaining pcie setup and save pcie values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped and accessible,
- * but chip registers are not set up until start of
- * init_6120_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = init_6120_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init)
- goto bail;
-
- if (qib_pcie_params(dd, 8, NULL, NULL))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
- dd->cspec->irq = pdev->irq; /* save IRQ */
-
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-
- if (qib_read_kreg64(dd, kr_hwerrstatus) &
- QLOGIC_IB_HWE_SERDESPLLFAILED)
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_SERDESPLLFAILED);
-
- /* setup interrupt handler (interrupt type handled above) */
- qib_setup_6120_interrupt(dd);
- /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
- qib_6120_init_hwerrors(dd);
-
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
deleted file mode 100644
index 00b2af211157..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ /dev/null
@@ -1,4657 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * QLogic_IB 7220 chip (except that specific to the SerDes)
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_7220.h"
-
-static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
-static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
-static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
-static u32 qib_7220_iblink_state(u64);
-static u8 qib_7220_phys_portstate(u64);
-static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
-static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in qib_sd7220.c.
- */
-
-/* Below uses machine-generated qib_chipnum_regs.h file */
-#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_control KREG_IDX(Control)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_ibcctrl KREG_IDX(IBCCtrl)
-#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
-#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
-#define kr_palign KREG_IDX(PageAlign)
-#define kr_partitionkey KREG_IDX(RcvPartitionKey)
-#define kr_portcnt KREG_IDX(PortCnt)
-#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
-#define kr_rcvctrl KREG_IDX(RcvCtrl)
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_revision KREG_IDX(Revision)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_senddmabase KREG_IDX(SendDmaBase)
-#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
-#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
-#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
-#define kr_senddmahead KREG_IDX(SendDmaHead)
-#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
-#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
-#define kr_senddmastatus KREG_IDX(SendDmaStatus)
-#define kr_senddmatail KREG_IDX(SendDmaTail)
-#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
-#define kr_sendpiosize KREG_IDX(SendBufSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-
-/* These must only be written via qib_write_kreg_ctxt() */
-#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-
-#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
- QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
-
-#define cr_badformat CREG_IDX(RxVersionErrCnt)
-#define cr_erricrc CREG_IDX(RxICRCErrCnt)
-#define cr_errlink CREG_IDX(RxLinkMalformCnt)
-#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
-#define cr_err_rlen CREG_IDX(RxLenErrCnt)
-#define cr_errslen CREG_IDX(TxLenErrCnt)
-#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
-#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
-#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define cr_lbint CREG_IDX(LBIntCnt)
-#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
-#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
-#define cr_pktrcv CREG_IDX(RxDataPktCnt)
-#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define cr_pktsend CREG_IDX(TxDataPktCnt)
-#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
-#define cr_rcvebp CREG_IDX(RxEBPCnt)
-#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
-#define cr_sendstall CREG_IDX(TxFlowStallCnt)
-#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
-#define cr_wordrcv CREG_IDX(RxDwordCnt)
-#define cr_wordsend CREG_IDX(TxDwordCnt)
-#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
-#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
-#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
-#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
-#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
-#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
-#define cr_psstat CREG_IDX(PSStat)
-#define cr_psstart CREG_IDX(PSStart)
-#define cr_psinterval CREG_IDX(PSInterval)
-#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
-#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
-#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
-#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
-#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
-#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
-#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_7220_##regname##_##fldname##_RMASK)
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_7220_##regname##_##fldname##_RMASK << \
- QIB_7220_##regname##_##fldname##_LSB)
-#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-
-#define BLOB_7220_IBCHG 0x81
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner. It also gives us some error
- * checking. 64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible. User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
-
- if (dd->userbase)
- return readl(regno + (u64 __iomem *)
- ((char __iomem *)dd->userbase +
- dd->ureg_align * ctxt));
- else
- return readl(regno + (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *)dd->kregbase +
- dd->ureg_align * ctxt));
-}
-
-/**
- * qib_write_ureg - write 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline void write_7220_creg(const struct qib_devdata *dd,
- u16 regno, u64 value)
-{
- if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->cspec->cregbase[regno]);
-}
-
-static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-}
-
-static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-}
-
-/* kr_revision bits */
-#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
-#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
-
-/* kr_control bits */
-#define QLOGIC_IB_C_RESET (1U << 7)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
-#define QLOGIC_IB_I_RCVURG_SHIFT 32
-#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
-#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
-#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
-
-#define QLOGIC_IB_C_FREEZEMODE 0x00000002
-#define QLOGIC_IB_C_LINKENABLE 0x00000004
-
-#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
-#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
-#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
-#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
-#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
-#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
-
-/* variables for sanity checking interrupt and errors */
-#define QLOGIC_IB_I_BITSEXTANT \
- (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
- (QLOGIC_IB_I_RCVAVAIL_MASK << \
- QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
- QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
- QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
- QLOGIC_IB_I_SERDESTRIMDONE)
-
-#define IB_HWE_BITSEXTANT \
- (HWE_MASK(RXEMemParityErr) | \
- HWE_MASK(TXEMemParityErr) | \
- (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
- QLOGIC_IB_HWE_PCIE1PLLFAILED | \
- QLOGIC_IB_HWE_PCIE0PLLFAILED | \
- QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
- QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
- QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
- HWE_MASK(PowerOnBISTFailed) | \
- QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP | \
- QLOGIC_IB_HWE_SERDESPLLFAILED | \
- HWE_MASK(IBCBusToSPCParityErr) | \
- HWE_MASK(IBCBusFromSPCParityErr) | \
- QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
- QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
- QLOGIC_IB_HWE_SDMAMEMREADERR | \
- QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
- QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
- QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
- QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
- QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
- QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
-
-#define IB_E_BITSEXTANT \
- (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
- ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
- ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
- ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
- ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
- ERR_MASK(SendSpecialTriggerErr) | \
- ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
- ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
- ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaUnexpDataErr) | \
- ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
- ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(InvalidEEPCmd))
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-/* specific to this chip */
-#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
-#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
-#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
-#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
-#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
-
-#define IBA7220_IBCC_LINKCMD_SHIFT 19
-
-/* kr_ibcddrctrl bits */
-#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
-#define IBA7220_IBC_DLIDLMC_SHIFT 32
-
-#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
- SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
-#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
-
-#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
-#define IBA7220_IBC_LREV_MASK 1
-#define IBA7220_IBC_LREV_SHIFT 8
-#define IBA7220_IBC_RXPOL_MASK 1
-#define IBA7220_IBC_RXPOL_SHIFT 7
-#define IBA7220_IBC_WIDTH_SHIFT 5
-#define IBA7220_IBC_WIDTH_MASK 0x3
-#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
-#define IBA7220_IBC_SPEED_SDR (1 << 2)
-#define IBA7220_IBC_SPEED_DDR (1 << 3)
-#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
-#define IBA7220_IBC_IBTA_1_2_MASK (1)
-
-/* kr_ibcddrstatus */
-/* link latency shift is 0, don't bother defining */
-#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
-
-/* kr_extstatus bits */
-#define QLOGIC_IB_EXTS_FREQSEL 0x2
-#define QLOGIC_IB_EXTS_SERDESSEL 0x4
-#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define QLOGIC_IB_XGXS_RESET 0x5ULL
-#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
-
-/* kr_rcvpktledcnt */
-#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
-#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
-#define QIB_TWSI_TEMP_DEV 0x98
-
-/* HW counter clock is at 4nsec */
-#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
-
-#define IBA7220_R_INTRAVAIL_SHIFT 17
-#define IBA7220_R_PKEY_DIS_SHIFT 34
-#define IBA7220_R_TAILUPD_SHIFT 35
-#define IBA7220_R_CTXTCFG_SHIFT 36
-
-#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
-#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
-#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
-#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
-#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
-
-#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
-
-/* packet rate matching delay multiplier */
-static u8 rate_to_delay[2][2] = {
- /* 1x, 4x */
- { 8, 2 }, /* SDR */
- { 4, 1 } /* DDR */
-};
-
-static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
- [IB_RATE_2_5_GBPS] = 8,
- [IB_RATE_5_GBPS] = 4,
- [IB_RATE_10_GBPS] = 2,
- [IB_RATE_20_GBPS] = 1
-};
-
-#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
-#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
-
-/* link training states, from IBC */
-#define IB_7220_LT_STATE_DISABLED 0x00
-#define IB_7220_LT_STATE_LINKUP 0x01
-#define IB_7220_LT_STATE_POLLACTIVE 0x02
-#define IB_7220_LT_STATE_POLLQUIET 0x03
-#define IB_7220_LT_STATE_SLEEPDELAY 0x04
-#define IB_7220_LT_STATE_SLEEPQUIET 0x05
-#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7220_LT_STATE_CFGIDLE 0x0b
-#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
-
-/* link state machine states from IBC */
-#define IB_7220_L_STATE_DOWN 0x0
-#define IB_7220_L_STATE_INIT 0x1
-#define IB_7220_L_STATE_ARM 0x2
-#define IB_7220_L_STATE_ACTIVE 0x3
-#define IB_7220_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_7220_physportstate[0x20] = {
- [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7220_LT_STATE_CFGDEBOUNCE] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7220_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7220_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-int qib_special_trigger;
-module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
-MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
-
-#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
-#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
-
-#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
- (1ULL << (SYM_LSB(regname, fldname) + (bit))))
-
-#define TXEMEMPARITYERR_PIOBUF \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
-#define TXEMEMPARITYERR_PIOPBC \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
-#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
-
-#define RXEMEMPARITYERR_RCVBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
-#define RXEMEMPARITYERR_LOOKUPQ \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
-#define RXEMEMPARITYERR_EXPTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
-#define RXEMEMPARITYERR_EAGERTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
-#define RXEMEMPARITYERR_FLAGBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
-#define RXEMEMPARITYERR_DATAINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
-#define RXEMEMPARITYERR_HDRINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
-
-/* 7220 specific hardware errors... */
-static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
- /* generic hardware errors */
- QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
- QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
-
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
- "TXE PIOBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
- "TXE PIOPBC Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
- "TXE PIOLAUNCHFIFO Memory Parity"),
-
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
- "RXE RCVBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
- "RXE LOOKUPQ Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
- "RXE EAGERTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
- "RXE EXPTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
- "RXE FLAGBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
- "RXE DATAINFO Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
- "RXE HDRINFO Memory Parity"),
-
- /* chip-specific hardware errors */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
- "PCIe Poisoned TLP"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
- "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
- "PCIePLL1"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
- "PCIePLL0"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
- "PCIe XTLH core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
- "PCIe ADM TX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
- "PCIe ADM RX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
- "SerDes PLL"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
- "PCIe cpl header queue"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
- "PCIe cpl data queue"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
- "Send DMA memory read"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
- "uC PLL clock not locked"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
- "PCIe serdes Q0 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
- "PCIe serdes Q1 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
- "PCIe serdes Q2 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
- "PCIe serdes Q3 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
- "DDS RXEQ memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
- "IB uC memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
- "PCIe uC oct0 memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
- "PCIe uC oct1 memory parity"),
-};
-
-#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
-
-#define QLOGIC_IB_E_PKTERRS (\
- ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | \
- ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvEBPErr))
-
-/* Convenience for decoding Send DMA errors */
-#define QLOGIC_IB_E_SDMAERRS ( \
- ERR_MASK(SDmaGenMismatchErr) | \
- ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaUnexpDataErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(SDmaDisabledErr) | \
- ERR_MASK(SendBufMisuseErr))
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
- (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
- ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
- ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
- ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
- (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(InvalidAddrErr))
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers. Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
- (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendPktLenErr))
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received. This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
- (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvUnexpectedCharErr))
-
-static void autoneg_7220_work(struct work_struct *);
-static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- * because we don't need to force the update of pioavail.
- */
-static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
-{
- unsigned long sbuf[3];
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- /* read these before writing errorclear */
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
- sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
-
- if (sbuf[0] || sbuf[1] || sbuf[2])
- qib_disarm_piobufs_set(dd, sbuf,
- dd->piobcnt2k + dd->piobcnt4k);
-}
-
-static void qib_7220_txe_recover(struct qib_devdata *dd)
-{
- qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
- qib_disarm_7220_senderrbufs(dd->pport);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 set_sendctrl = 0;
- u64 clr_sendctrl = 0;
-
- if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_HALT)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
-
- spin_lock(&dd->sendctrl_lock);
-
- dd->sendctrl |= set_sendctrl;
- dd->sendctrl &= ~clr_sendctrl;
-
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- spin_unlock(&dd->sendctrl_lock);
-}
-
-static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
- u64 err, char *buf, size_t blen)
-{
- static const struct {
- u64 err;
- const char *msg;
- } errs[] = {
- { ERR_MASK(SDmaGenMismatchErr),
- "SDmaGenMismatch" },
- { ERR_MASK(SDmaOutOfBoundErr),
- "SDmaOutOfBound" },
- { ERR_MASK(SDmaTailOutOfBoundErr),
- "SDmaTailOutOfBound" },
- { ERR_MASK(SDmaBaseErr),
- "SDmaBase" },
- { ERR_MASK(SDma1stDescErr),
- "SDma1stDesc" },
- { ERR_MASK(SDmaRpyTagErr),
- "SDmaRpyTag" },
- { ERR_MASK(SDmaDwEnErr),
- "SDmaDwEn" },
- { ERR_MASK(SDmaMissingDwErr),
- "SDmaMissingDw" },
- { ERR_MASK(SDmaUnexpDataErr),
- "SDmaUnexpData" },
- { ERR_MASK(SDmaDescAddrMisalignErr),
- "SDmaDescAddrMisalign" },
- { ERR_MASK(SendBufMisuseErr),
- "SendBufMisuse" },
- { ERR_MASK(SDmaDisabledErr),
- "SDmaDisabled" },
- };
- int i;
- size_t bidx = 0;
-
- for (i = 0; i < ARRAY_SIZE(errs); i++) {
- if (err & errs[i].err)
- bidx += scnprintf(buf + bidx, blen - bidx,
- "%s ", errs[i].msg);
- }
-}
-
-/*
- * This is called as part of link down clean up so disarm and flush
- * all send buffers so that SMP packets can be sent.
- */
-static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
-{
- /* This will trigger the Abort interrupt */
- sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
- QIB_SENDCTRL_AVAIL_BLIP);
- ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
-}
-
-static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
-{
- /*
- * Set SendDmaLenGen and clear and set
- * the MSB of the generation count to enable generation checking
- * and load the internal generation counter.
- */
- qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
- qib_write_kreg(ppd->dd, kr_senddmalengen,
- ppd->sdma_descq_cnt |
- (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
-}
-
-static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- qib_sdma_7220_setlengen(ppd);
- qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
- ppd->sdma_head_dma[0] = 0;
-}
-
-#define DISABLES_SDMA ( \
- ERR_MASK(SDmaDisabledErr) | \
- ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | \
- ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDma1stDescErr) | \
- ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaGenMismatchErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaDwEnErr))
-
-static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
-{
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
- char *msg;
-
- errs &= QLOGIC_IB_E_SDMAERRS;
-
- msg = dd->cspec->sdmamsgbuf;
- qib_decode_7220_sdma_errs(ppd, errs, msg,
- sizeof(dd->cspec->sdmamsgbuf));
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- if (errs & ERR_MASK(SendBufMisuseErr)) {
- unsigned long sbuf[3];
-
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
- sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
-
- qib_dev_err(ppd->dd,
- "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
- ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
- sbuf[0]);
- }
-
- if (errs & ERR_MASK(SDmaUnexpDataErr))
- qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
- ppd->port);
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- /* handled in intr path */
- break;
-
- case qib_sdma_state_s20_idle:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- if (errs & ERR_MASK(SDmaDisabledErr))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e50_hw_cleaned);
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- /* handled in intr path */
- break;
-
- case qib_sdma_state_s99_running:
- if (errs & DISABLES_SDMA)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e7220_err_halted);
- break;
- }
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else. Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
- u64 err)
-{
- int iserr = 1;
-
- *buf = '\0';
- if (err & QLOGIC_IB_E_PKTERRS) {
- if (!(err & ~QLOGIC_IB_E_PKTERRS))
- iserr = 0;
- if ((err & ERR_MASK(RcvICRCErr)) &&
- !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
- strlcat(buf, "CRC ", blen);
- if (!iserr)
- goto done;
- }
- if (err & ERR_MASK(RcvHdrLenErr))
- strlcat(buf, "rhdrlen ", blen);
- if (err & ERR_MASK(RcvBadTidErr))
- strlcat(buf, "rbadtid ", blen);
- if (err & ERR_MASK(RcvBadVersionErr))
- strlcat(buf, "rbadversion ", blen);
- if (err & ERR_MASK(RcvHdrErr))
- strlcat(buf, "rhdr ", blen);
- if (err & ERR_MASK(SendSpecialTriggerErr))
- strlcat(buf, "sendspecialtrigger ", blen);
- if (err & ERR_MASK(RcvLongPktLenErr))
- strlcat(buf, "rlongpktlen ", blen);
- if (err & ERR_MASK(RcvMaxPktLenErr))
- strlcat(buf, "rmaxpktlen ", blen);
- if (err & ERR_MASK(RcvMinPktLenErr))
- strlcat(buf, "rminpktlen ", blen);
- if (err & ERR_MASK(SendMinPktLenErr))
- strlcat(buf, "sminpktlen ", blen);
- if (err & ERR_MASK(RcvFormatErr))
- strlcat(buf, "rformaterr ", blen);
- if (err & ERR_MASK(RcvUnsupportedVLErr))
- strlcat(buf, "runsupvl ", blen);
- if (err & ERR_MASK(RcvUnexpectedCharErr))
- strlcat(buf, "runexpchar ", blen);
- if (err & ERR_MASK(RcvIBFlowErr))
- strlcat(buf, "ribflow ", blen);
- if (err & ERR_MASK(SendUnderRunErr))
- strlcat(buf, "sunderrun ", blen);
- if (err & ERR_MASK(SendPioArmLaunchErr))
- strlcat(buf, "spioarmlaunch ", blen);
- if (err & ERR_MASK(SendUnexpectedPktNumErr))
- strlcat(buf, "sunexperrpktnum ", blen);
- if (err & ERR_MASK(SendDroppedSmpPktErr))
- strlcat(buf, "sdroppedsmppkt ", blen);
- if (err & ERR_MASK(SendMaxPktLenErr))
- strlcat(buf, "smaxpktlen ", blen);
- if (err & ERR_MASK(SendUnsupportedVLErr))
- strlcat(buf, "sunsupVL ", blen);
- if (err & ERR_MASK(InvalidAddrErr))
- strlcat(buf, "invalidaddr ", blen);
- if (err & ERR_MASK(RcvEgrFullErr))
- strlcat(buf, "rcvegrfull ", blen);
- if (err & ERR_MASK(RcvHdrFullErr))
- strlcat(buf, "rcvhdrfull ", blen);
- if (err & ERR_MASK(IBStatusChanged))
- strlcat(buf, "ibcstatuschg ", blen);
- if (err & ERR_MASK(RcvIBLostLinkErr))
- strlcat(buf, "riblostlink ", blen);
- if (err & ERR_MASK(HardwareErr))
- strlcat(buf, "hardware ", blen);
- if (err & ERR_MASK(ResetNegated))
- strlcat(buf, "reset ", blen);
- if (err & QLOGIC_IB_E_SDMAERRS)
- qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
- if (err & ERR_MASK(InvalidEEPCmd))
- strlcat(buf, "invalideepromcmd ", blen);
-done:
- return iserr;
-}
-
-static void reenable_7220_chase(unsigned long opaque)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
-
- ppd->cpspec->chase_timer.expires = 0;
- qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_POLL);
-}
-
-static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
-{
- u8 ibclt;
- unsigned long tnow;
-
- ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
-
- /*
- * Detect and handle the state chase issue, where we can
- * get stuck if we are unlucky on timing on both sides of
- * the link. If we are, we disable, set a timer, and
- * then re-enable.
- */
- switch (ibclt) {
- case IB_7220_LT_STATE_CFGRCVFCFG:
- case IB_7220_LT_STATE_CFGWAITRMT:
- case IB_7220_LT_STATE_TXREVLANES:
- case IB_7220_LT_STATE_CFGENH:
- tnow = jiffies;
- if (ppd->cpspec->chase_end &&
- time_after(tnow, ppd->cpspec->chase_end)) {
- ppd->cpspec->chase_end = 0;
- qib_set_ib_7220_lstate(ppd,
- QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- ppd->cpspec->chase_timer.expires = jiffies +
- QIB_CHASE_DIS_TIME;
- add_timer(&ppd->cpspec->chase_timer);
- } else if (!ppd->cpspec->chase_end)
- ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
- break;
-
- default:
- ppd->cpspec->chase_end = 0;
- break;
- }
-}
-
-static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
-{
- char *msg;
- u64 ignore_this_time = 0;
- u64 iserr = 0;
- int log_idx;
- struct qib_pportdata *ppd = dd->pport;
- u64 mask;
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & ERR_MASK(HardwareErr))
- qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
-
- if (errs & QLOGIC_IB_E_SDMAERRS)
- sdma_7220_errors(ppd, errs);
-
- if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd,
- "error interrupt with unknown errors %llx set\n",
- (unsigned long long) (errs & ~IB_E_BITSEXTANT));
-
- if (errs & E_SUM_ERRS) {
- qib_disarm_7220_senderrbufs(ppd);
- if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
- } else if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- qib_write_kreg(dd, kr_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- /*
- * The ones we mask off are handled specially below
- * or above. Also mask SDMADISABLED by default as it
- * is too chatty.
- */
- mask = ERR_MASK(IBStatusChanged) |
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
- ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
-
- qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
-
- if (errs & E_SUM_PKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & E_SUM_ERRS)
- qib_stats.sps_txerrs++;
- iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
- ERR_MASK(SDmaDisabledErr));
-
- if (errs & ERR_MASK(IBStatusChanged)) {
- u64 ibcs;
-
- ibcs = qib_read_kreg64(dd, kr_ibcstatus);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- handle_7220_chase(ppd, ibcs);
-
- /* Update our picture of width and speed from chip */
- ppd->link_width_active =
- ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- ppd->link_speed_active =
- ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
- QIB_IB_DDR : QIB_IB_SDR;
-
- /*
- * Since going into a recovery state causes the link state
- * to go down and since recovery is transitory, it is better
- * if we "miss" ever seeing the link training state go into
- * recovery (i.e., ignore this transition for link state
- * special handling purposes) without updating lastibcstat.
- */
- if (qib_7220_phys_portstate(ibcs) !=
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
-
- if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-done:
- return;
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, ~0ULL);
- /* force re-interrupt of any pending interrupts. */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips.
- */
-static void qib_7220_clear_freeze(struct qib_devdata *dd)
-{
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwriten */
- qib_7220_set_intr_state(dd, 0);
-
- qib_cancel_sends(dd->pport);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /* force in-memory update now we are out of freeze */
- qib_force_pio_avail_update(dd);
-
- /*
- * force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- qib_7220_set_intr_state(dd, 1);
-}
-
-/**
- * qib_7220_handle_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * handle_7220_errors() to avoid excessive stack usage.
- */
-static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char *bitsmsg;
- int log_idx;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- goto bail;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- goto bail;
- }
- qib_stats.sps_hwerrs++;
-
- /*
- * Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support.
- */
- qib_write_kreg(dd, kr_hwerrclear,
- hwerrs & ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
- if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
- RXE_PARITY))
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd,
- "hwerror interrupt with unknown errors %llx set\n",
- (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT));
-
- if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
- qib_sd7220_clr_ibpar(dd);
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
- /*
- * Parity errors in send memory are recoverable by h/w
- * just do housekeeping, exit freeze mode and continue.
- */
- if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
- TXEMEMPARITYERR_PIOPBC)) {
- qib_7220_txe_recover(dd);
- hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
- TXEMEMPARITYERR_PIOPBC);
- }
- if (hwerrs)
- isfatal = 1;
- else
- qib_7220_clear_freeze(dd);
- }
-
- *msg = '\0';
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strlcat(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
- ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
-
- bitsmsg = dd->cspec->bitsmsgbuf;
- if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP)
-
- if (hwerrs & _QIB_PLL_FAIL) {
- isfatal = 1;
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _QIB_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the eternal
- * interface is unused.
- */
- dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_dev_err(dd, "%s hardware error\n", msg);
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * For /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-bail:;
-}
-
-/**
- * qib_7220_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_7220_init_hwerrors(struct qib_devdata *dd)
-{
- u64 val;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
-
- if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
- QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
- qib_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
- qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
-
- val = ~0ULL; /* default to all hwerrors become interrupts, */
-
- val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
- dd->cspec->hwerrmask = val;
-
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- /* clear any interrupts up to this point (ints still not enabled) */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
- dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
- } else
- dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-}
-
-/*
- * All detailed interaction with the SerDes has been moved to qib_sd7220.c
- *
- * The portion of IBA7220-specific bringup_serdes() that actually deals with
- * registers and memory within the SerDes itself is qib_sd7220_init().
- */
-
-/**
- * qib_7220_bringup_serdes - bring up the serdes
- * @ppd: physical port on the qlogic_ib device
- */
-static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, prev_val, guid, ibc;
- int ret = 0;
-
- /* Put IBC in reset, sends disabled */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, 0ULL);
-
- if (qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7220_creg32(dd, cr_iblinkerrrecov);
- }
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
- /*
- * How often flowctrl sent. More or less in usecs; balance against
- * watermark value, so that in theory senders always get a flow
- * control update in time to not let the IB link go idle.
- */
- ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
- /* max error tolerance */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
- /* use "real" buffer space for */
- ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
- ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
-
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg(dd, kr_ibcctrl, val);
-
- if (!ppd->cpspec->ibcddrctrl) {
- /* not on re-init after reset */
- ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
-
- if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- ppd->cpspec->ibcddrctrl |=
- ppd->link_speed_enabled == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
- if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
- (IB_WIDTH_1X | IB_WIDTH_4X))
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
- else
- ppd->cpspec->ibcddrctrl |=
- ppd->link_width_enabled == IB_WIDTH_4X ?
- IBA7220_IBC_WIDTH_4X_ONLY :
- IBA7220_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
-
- /* enable automatic lane reversal detection for receive */
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
- } else
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
- qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
- qib_write_kreg(dd, kr_scratch, 0);
-
- ret = qib_sd7220_init(dd);
-
- val = qib_read_kreg64(dd, kr_xgxs_cfg);
- prev_val = val;
- val |= QLOGIC_IB_XGXS_FC_SAFE;
- if (val != prev_val) {
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- }
- if (val & QLOGIC_IB_XGXS_RESET)
- val &= ~QLOGIC_IB_XGXS_RESET;
- if (val != prev_val)
- qib_write_kreg(dd, kr_xgxs_cfg, val);
-
- /* first time through, set port guid */
- if (!ppd->guid)
- ppd->guid = dd->base_guid;
- guid = be64_to_cpu(ppd->guid);
-
- qib_write_kreg(dd, kr_hrtbt_guid, guid);
- if (!ret) {
- dd->control |= QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, dd->control);
- } else
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
- return ret;
-}
-
-/**
- * qib_7220_quiet_serdes - set serdes to txidle
- * @ppd: physical port of the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
-{
- u64 val;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- /* disable IBC */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control,
- dd->control | QLOGIC_IB_C_FREEZEMODE);
-
- ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.data) /* if initted */
- del_timer_sync(&ppd->cpspec->chase_timer);
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
- ppd->cpspec->ibdeltainprog) {
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7220_creg32(dd, cr_ibsymbolerr);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->ibsymsnap;
- val -= ppd->cpspec->ibsymdelta;
- write_7220_creg(dd, cr_ibsymbolerr, val);
- }
- if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7220_creg32(dd, cr_iblinkerrrecov);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->iblnkerrsnap;
- val -= ppd->cpspec->iblnkerrdelta;
- write_7220_creg(dd, cr_iblinkerrrecov, val);
- }
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
- qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
-
- shutdown_7220_relock_poll(ppd->dd);
- val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
- val |= QLOGIC_IB_XGXS_RESET;
- qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
-}
-
-/**
- * qib_setup_7220_setextled - set the state of the two external LEDs
- * @dd: the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 extctl, ledblink = 0, val, lst, ltst;
- unsigned long flags;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- if (ppd->led_override) {
- ltst = (ppd->led_override & QIB_LED_PHYS) ?
- IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
- lst = (ppd->led_override & QIB_LED_LOG) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- } else if (on) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- ltst = qib_7220_phys_portstate(val);
- lst = qib_7220_iblink_state(val);
- } else {
- ltst = 0;
- lst = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
- SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
- if (ltst == IB_PHYSPORTSTATE_LINKUP) {
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
- /*
- * counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd
- */
- ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
- | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
- }
- if (lst == IB_PORT_ACTIVE)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
-}
-
-static void qib_7220_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_nomsi(dd);
-}
-
-/*
- * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the qlogic_ib device
- *
- * This is called during driver unload.
- *
- */
-static void qib_setup_7220_cleanup(struct qib_devdata *dd)
-{
- qib_7220_free_irq(dd);
- kfree(dd->cspec->cntrs);
- kfree(dd->cspec->portcntrs);
-}
-
-/*
- * This is only called for SDmaInt.
- * SDmaDisabled is handled on the error path.
- */
-static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
- break;
-
- case qib_sdma_state_s20_idle:
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
- break;
-
- case qib_sdma_state_s99_running:
- /* too chatty to print here */
- __qib_sdma_intr(ppd);
- break;
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint) {
- if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- goto done;
- /*
- * blip the availupd off, next write will be on, so
- * we ensure an avail update, regardless of threshold or
- * buffers becoming free, whenever we want an interrupt
- */
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
- ~SYM_MASK(SendCtrl, SendBufAvailUpd));
- qib_write_kreg(dd, kr_scratch, 0ULL);
- dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
- } else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-done:
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * Handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling.
- */
-static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
-{
- if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
- qib_dev_err(dd,
- "interrupt with unknown interrupts %Lx set\n",
- istat & ~QLOGIC_IB_I_BITSEXTANT);
-
- if (istat & QLOGIC_IB_I_GPIO) {
- u32 gpiostatus;
-
- /*
- * Boards for this chip currently don't use GPIO interrupts,
- * so clear by writing GPIOstatus to GPIOclear, and complain
- * to alert developer. To avoid endless repeats, clear
- * the bits in the mask, since there is some kind of
- * programming error or chip problem.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /*
- * In theory, writing GPIOstatus to GPIOclear could
- * have a bad side-effect on some diagnostic that wanted
- * to poll for a status-change, but the various shadows
- * make that problematic at best. Diags will just suppress
- * all GPIO interrupts during such tests.
- */
- qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
-
- if (gpiostatus) {
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
- u32 gpio_irq = mask & gpiostatus;
-
- /*
- * A bit set in status and (chip) Mask register
- * would cause an interrupt. Since we are not
- * expecting any, report it. Also check that the
- * chip reflects our shadow, report issues,
- * and refresh from the shadow.
- */
- /*
- * Clear any troublemakers, and update chip
- * from shadow
- */
- dd->cspec->gpio_mask &= ~gpio_irq;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
- }
-
- if (istat & QLOGIC_IB_I_ERROR) {
- u64 estat;
-
- qib_stats.sps_errints++;
- estat = qib_read_kreg64(dd, kr_errstatus);
- if (!estat)
- qib_devinfo(dd->pcidev,
- "error interrupt (%Lx), but no error bits set!\n",
- istat);
- else
- handle_7220_errors(dd, estat);
- }
-}
-
-static irqreturn_t qib_7220intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u64 istat;
- u64 ctxtrbits;
- u64 rmask;
- unsigned i;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg64(dd, kr_intstatus);
-
- if (unlikely(!istat)) {
- ret = IRQ_NONE; /* not our interrupt, or already handled */
- goto bail;
- }
- if (unlikely(istat == -1)) {
- qib_bad_intrstatus(dd);
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
- if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
- QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
- unlikely_7220_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat &
- ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
- if (ctxtrbits) {
- rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- qib_kreceive(dd->rcd[i], NULL, NULL);
- }
- rmask <<= 1;
- }
- if (ctxtrbits) {
- ctxtrbits =
- (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- /* only call for SDmaInt */
- if (istat & QLOGIC_IB_I_SDMAINT)
- sdma_7220_intr(dd->pport, istat);
-
- if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Set up our chip-specific interrupt handler.
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- * If we are using MSI interrupts, we may fall back to
- * INTx later, if the interrupt handler doesn't get called
- * within 1/2 second (see verify_interrupt()).
- */
-static void qib_setup_7220_interrupt(struct qib_devdata *dd)
-{
- if (!dd->cspec->irq)
- qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- else {
- int ret = request_irq(dd->cspec->irq, qib_7220intr,
- dd->msi_lo ? 0 : IRQF_SHARED,
- QIB_DRV_NAME, dd);
-
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup %s interrupt (irq=%d): %d\n",
- dd->msi_lo ? "MSI" : "INTx",
- dd->cspec->irq, ret);
- }
-}
-
-/**
- * qib_7220_boardname - fill in the board name
- * @dd: the qlogic_ib device
- *
- * info is based on the board revision register
- */
-static void qib_7220_boardname(struct qib_devdata *dd)
-{
- char *n;
- u32 boardid, namelen;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
-
- switch (boardid) {
- case 1:
- n = "InfiniPath_QLE7240";
- break;
- case 2:
- n = "InfiniPath_QLE7280";
- break;
- default:
- qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
- n = "Unknown_InfiniPath_7220";
- break;
- }
-
- namelen = strlen(n) + 1;
- dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
- snprintf(dd->boardname, namelen, "%s", n);
-
- if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context.
- */
-static int qib_setup_7220_reset(struct qib_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
- u8 int_line, clinesz;
- unsigned long flags;
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- /* Use dev_err so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- /* no interrupts till re-initted */
- qib_7220_set_intr_state(dd, 0);
-
- dd->pport->cpspec->ibdeltainprog = 0;
- dd->pport->cpspec->ibsymdelta = 0;
- dd->pport->cpspec->iblnkerrdelta = 0;
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
- mb(); /* prevent compiler reordering around actual reset */
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision) {
- dd->flags |= QIB_PRESENT; /* it's back */
- ret = qib_reinit_intr(dd);
- goto bail;
- }
- }
- ret = 0; /* failed */
-
-bail:
- if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
-
- /* hold IBC in reset, no sends, etc till later */
- qib_write_kreg(dd, kr_control, 0ULL);
-
- /* clear the reset error, init error/hwerror mask */
- qib_7220_init_hwerrors(dd);
-
- /* do setup similar to speed or link-width changes */
- if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
- dd->cspec->presets_needed = 1;
- spin_lock_irqsave(&dd->pport->lflags_lock, flags);
- dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
- dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
- }
-
- return ret;
-}
-
-/**
- * qib_7220_put_tid - write a TID to the chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- */
-static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (pa != dd->tidinvalid) {
- u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7220_TID_SZ_4K;
- pa = chippa;
- }
- writeq(pa, tidptr);
- mmiowb();
-}
-
-/**
- * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
- * @dd: the qlogic_ib device
- * @ctxt: the ctxt
- *
- * clear all TID entries for a ctxt, expected and eager.
- * Used from qib_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void qib_7220_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_7220_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_7220_tidtemplate(struct qib_devdata *dd)
-{
- if (dd->rcvegrbufsize == 2048)
- dd->tidtemplate = IBA7220_TID_SZ_2K;
- else if (dd->rcvegrbufsize == 4096)
- dd->tidtemplate = IBA7220_TID_SZ_4K;
- dd->tidinvalid = 0;
-}
-
-/**
- * qib_init_7220_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
- QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
-
- if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
-
- return 0;
-}
-
-static struct qib_message_header *
-qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = qib_hdrget_offset(rhf_addr);
-
- return (struct qib_message_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
-static void qib_7220_config_ctxts(struct qib_devdata *dd)
-{
- unsigned long flags;
- u32 nchipctxts;
-
- nchipctxts = qib_read_kreg32(dd, kr_portcnt);
- dd->cspec->numctxts = nchipctxts;
- if (qib_n_krcv_queues > 1) {
- dd->qpn_mask = 0x3e;
- dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
- if (dd->first_user_ctxt > nchipctxts)
- dd->first_user_ctxt = nchipctxts;
- } else
- dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
-
- if (!qib_cfgctxts) {
- int nctxts = dd->first_user_ctxt + num_online_cpus();
-
- if (nctxts <= 5)
- dd->ctxtcnt = 5;
- else if (nctxts <= 9)
- dd->ctxtcnt = 9;
- else if (nctxts <= nchipctxts)
- dd->ctxtcnt = nchipctxts;
- } else if (qib_cfgctxts <= nchipctxts)
- dd->ctxtcnt = qib_cfgctxts;
- if (!dd->ctxtcnt) /* none of the above, set to max */
- dd->ctxtcnt = nchipctxts;
-
- /*
- * Chip can be configured for 5, 9, or 17 ctxts, and choice
- * affects number of eager TIDs per ctxt (1K, 2K, 4K).
- * Lock to be paranoid about later motion, etc.
- */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (dd->ctxtcnt > 9)
- dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
- else if (dd->ctxtcnt > 5)
- dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
- /* else configure for default 5 receive ctxts */
- if (dd->qpn_mask)
- dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* kr_rcvegrcnt changes based on the number of contexts enabled */
- dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
-}
-
-static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = ppd->link_width_enabled;
- goto done;
-
- case QIB_IB_CFG_LWID: /* Get currently active Link-width */
- ret = ppd->link_width_active;
- goto done;
-
- case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = ppd->link_speed_enabled;
- goto done;
-
- case QIB_IB_CFG_SPD: /* Get current Link spd */
- ret = ppd->link_speed_active;
- goto done;
-
- case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case QIB_IB_CFG_LINKLATENCY:
- ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
- & IBA7220_DDRSTAT_LINKLAT_MASK;
- goto done;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- goto done;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 0;
- goto done;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 0;
- goto done;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- goto done;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- goto done;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->cpspec->ibcctrl &
- SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- goto done;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- /*
- * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
- * Since the clock is always 250MHz, the value is 1 or 0.
- */
- ret = (ppd->link_speed_active == QIB_IB_DDR);
- goto done;
-
- default:
- ret = -EINVAL;
- goto done;
- }
- ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
-done:
- return ret;
-}
-
-static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 maskr; /* right-justified mask */
- int lsb, ret = 0, setforce = 0;
- u16 lcmd, licmd;
- unsigned long flags;
- u32 tmp = 0;
-
- switch (which) {
- case QIB_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7220_IBC_DLIDLMC_SHIFT;
- maskr = IBA7220_IBC_DLIDLMC_MASK;
- break;
-
- case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
- /*
- * As with speed, only write the actual register if
- * the link is currently down, otherwise takes effect
- * on next link change.
- */
- ppd->link_width_enabled = val;
- if (!(ppd->lflags & QIBL_LINKDOWN))
- goto bail;
- /*
- * We set the QIBL_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_width_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- val--; /* convert from IB to chip */
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- setforce = 1;
- break;
-
- case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * If we turn off IB1.2, need to preset SerDes defaults,
- * but not right now. Set a flag for the next time
- * we command the link down. As with width, only write the
- * actual register if the link is currently down, otherwise
- * takes effect on next link change. Since setting is being
- * explicitly requested (via MAD or sysfs), clear autoneg
- * failure status if speed autoneg is enabled.
- */
- ppd->link_speed_enabled = val;
- if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
- !(val & (val - 1)))
- dd->cspec->presets_needed = 1;
- if (!(ppd->lflags & QIBL_LINKDOWN))
- goto bail;
- /*
- * We set the QIBL_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_speed_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
- val = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else
- val = val == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
- maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- /* IBTA 1.2 mode + speed bits are contiguous */
- lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
- setforce = 1;
- break;
-
- case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, OverrunThreshold);
- ppd->cpspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, OverrunThreshold);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- goto bail;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, PhyerrThreshold);
- ppd->cpspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, PhyerrThreshold);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- goto bail;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg(dd, kr_partitionkey, maskr);
- goto bail;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
- else /* SLEEP */
- ppd->cpspec->ibcctrl |=
- SYM_MASK(IBCCtrl, LinkDownDefaultState);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- goto bail;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
- ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- goto bail;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- if (!ppd->cpspec->ibdeltainprog &&
- qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap =
- read_7220_creg32(dd, cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7220_creg32(dd, cr_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- ppd->cpspec->chase_end = 0;
- /*
- * stop state chase counter and timer, if running.
- * wait forpending timer, but don't clear .data (ppd)!
- */
- if (ppd->cpspec->chase_timer.expires) {
- del_timer_sync(&ppd->cpspec->chase_timer);
- ppd->cpspec->chase_timer.expires = 0;
- }
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_7220_lstate(ppd, lcmd, licmd);
-
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
- /* If the width active on the chip does not match the
- * width in the shadow register, write the new active
- * width to the chip.
- * We don't have to worry about speed as the speed is taken
- * care of by set_7220_ibspeed_fast called by ib_updown.
- */
- if (ppd->link_width_enabled-1 != tmp) {
- ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
- ppd->cpspec->ibcddrctrl |=
- (((u64)(ppd->link_width_enabled-1) & maskr) <<
- lsb);
- qib_write_kreg(dd, kr_ibcddrctrl,
- ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
- goto bail;
-
- case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val > IBA7220_IBC_HRTBT_MASK) {
- ret = -EINVAL;
- goto bail;
- }
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
- ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
- qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- if (setforce) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-bail:
- return ret;
-}
-
-static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
- u64 val, ddr;
-
- if (!strncmp(what, "ibc", 3)) {
- ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
- val = 0; /* disable heart beat, so link will come up */
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
- /* enable heart beat again */
- val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
- << IBA7220_IBC_HRTBT_SHIFT);
- ppd->cpspec->ibcddrctrl = ddr | val;
- qib_write_kreg(ppd->dd, kr_ibcddrctrl,
- ppd->cpspec->ibcddrctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
-}
-
-static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specifc, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
- if (ctxt < 0)
- mask = (1ULL << dd->ctxtcnt) - 1;
- else
- mask = (1ULL << ctxt);
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /* always done for specific ctxt */
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
- if (!(dd->flags & QIB_NODMA_RTAIL))
- dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->rcd[ctxt]->rcvhdrq_phys);
- dd->rcd[ctxt]->seq_cnt = 1;
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
- dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
- i, 0);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function there may be multiple such registers with
- * slightly different layouts. To start, we assume the
- * "canonical" register layout of the first chips.
- * Chip requires no back-back sendctrl writes, so write
- * scratch register after writing sendctrl
- */
-static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_SEND_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB) {
- dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
- if (dd->flags & QIB_USE_SPCL_TRIG)
- dd->sendctrl |= SYM_MASK(SendCtrl,
- SSpecialTriggerEn);
- }
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- /*
- * disarm any that are not yet launched, disabling sends
- * and updates until done.
- */
- last = dd->piobcnt2k + dd->piobcnt4k;
- tmp_dd_sendctrl &=
- ~(SYM_MASK(SendCtrl, SPioEnable) |
- SYM_MASK(SendCtrl, SendBufAvailUpd));
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl,
- tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_FLUSH)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmPIOBuf));
- if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
- (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-/**
- * qib_portcntr_7220 - read a per-port counter
- * @dd: the qlogic_ib device
- * @creg: the counter to snapshot
- */
-static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
-{
- u64 ret = 0ULL;
- struct qib_devdata *dd = ppd->dd;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u16 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = cr_pktsend,
- [QIBPORTCNTR_WORDSEND] = cr_wordsend,
- [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
- [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
- [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
- [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
- [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
- [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
- [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
- [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
- [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
- [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
- [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
- [QIBPORTCNTR_ERRICRC] = cr_erricrc,
- [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = cr_badformat,
- [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
- [QIBPORTCNTR_ERRLINK] = cr_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
- [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
- [QIBPORTCNTR_PSSTART] = cr_psstart,
- [QIBPORTCNTR_PSSTAT] = cr_psstat,
- [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
- [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg];
-
- if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts */
- for (i = 0; i < dd->first_user_ctxt; i++)
- ret += read_7220_creg32(dd, cr_portovfl + i);
- }
- if (creg == 0xffff)
- goto done;
-
- /*
- * only fast incrementing counters are 64bit; use 32 bit reads to
- * avoid two independent reads when on opteron
- */
- if ((creg == cr_wordsend || creg == cr_wordrcv ||
- creg == cr_pktsend || creg == cr_pktrcv))
- ret = read_7220_creg(dd, creg);
- else
- ret = read_7220_creg32(dd, creg);
- if (creg == cr_ibsymbolerr) {
- if (dd->pport->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->ibsymsnap;
- ret -= dd->pport->cpspec->ibsymdelta;
- } else if (creg == cr_iblinkerrrecov) {
- if (dd->pport->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->iblnkerrsnap;
- ret -= dd->pport->cpspec->iblnkerrdelta;
- }
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr7220indices contains the corresponding register indices.
- */
-static const char cntr7220names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n"
- "Ctxt5EgrOvfl\n"
- "Ctxt6EgrOvfl\n"
- "Ctxt7EgrOvfl\n"
- "Ctxt8EgrOvfl\n"
- "Ctxt9EgrOvfl\n"
- "Ctx10EgrOvfl\n"
- "Ctx11EgrOvfl\n"
- "Ctx12EgrOvfl\n"
- "Ctx13EgrOvfl\n"
- "Ctx14EgrOvfl\n"
- "Ctx15EgrOvfl\n"
- "Ctx16EgrOvfl\n";
-
-static const size_t cntr7220indices[] = {
- cr_lbint,
- cr_lbflowstall,
- cr_errtidfull,
- cr_errtidvalid,
- cr_portovfl + 0,
- cr_portovfl + 1,
- cr_portovfl + 2,
- cr_portovfl + 3,
- cr_portovfl + 4,
- cr_portovfl + 5,
- cr_portovfl + 6,
- cr_portovfl + 7,
- cr_portovfl + 8,
- cr_portovfl + 9,
- cr_portovfl + 10,
- cr_portovfl + 11,
- cr_portovfl + 12,
- cr_portovfl + 13,
- cr_portovfl + 14,
- cr_portovfl + 15,
- cr_portovfl + 16,
-};
-
-/*
- * same as cntr7220names and cntr7220indices, but for port-specific counters.
- * portcntr7220indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr7220names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "TxDmaDesc\n" /* 7220 and 7322-only */
- "E RxDlidFltr\n" /* 7220 and 7322-only */
- "IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- "RxLclPhyErr\n" /* 7220 and 7322-only */
- "RxVL15Drop\n" /* 7220 and 7322-only */
- "RxVlErr\n" /* 7220 and 7322-only */
- "XcessBufOvfl\n" /* 7220 and 7322-only */
- ;
-
-#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
-static const size_t portcntr7220indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- cr_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- cr_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- cr_txsdmadesc,
- cr_rxdlidfltr,
- cr_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- cr_rcvflowctrl_err,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- cr_invalidslen,
- cr_senddropped,
- cr_errslen,
- cr_sendunderrun,
- cr_txunsupvl,
- QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_7220_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr7220names;
- dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
- * sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
-
- for (i = 0, s = (char *)portcntr7220names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
- dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
- * sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->portcntrs)
- qib_dev_err(dd, "Failed allocation for portcounters\n");
-}
-
-static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (!dd->cspec->cntrs) {
- ret = 0;
- goto done;
- }
-
- if (namep) {
- *namep = (char *)cntr7220names;
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
-
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (!dd->cspec->portcntrs) {
- ret = 0;
- goto done;
- }
- if (namep) {
- *namep = (char *)portcntr7220names;
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- } else {
- u64 *cntr = dd->cspec->portcntrs;
- struct qib_pportdata *ppd = &dd->pport[port];
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_7220(ppd,
- portcntr7220indices[i] &
- ~_PORT_VIRT_FLAG);
- else
- *cntr++ = read_7220_creg32(dd,
- portcntr7220indices[i]);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_get_7220_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
- *
- * This needs more work; in particular, decision on whether we really
- * need traffic_wds done the way it is
- * called from add_timer
- */
-static void qib_get_7220_faststats(unsigned long opaque)
-{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
- struct qib_pportdata *ppd = dd->pport;
- unsigned long flags;
- u64 traffic_wds;
-
- /*
- * don't access the chip while running diags, or memory diags can
- * fail
- */
- if (!(dd->flags & QIB_INITTED) || dd->diag_client)
- /* but re-arm the timer, for diags case; won't hurt other */
- goto done;
-
- /*
- * We now try to maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
- qib_portcntr_7220(ppd, cr_wordrcv);
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- traffic_wds -= dd->traffic_wds;
- dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-done:
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/*
- * If we are using MSI, try to fallback to INTx.
- */
-static int qib_7220_intr_fallback(struct qib_devdata *dd)
-{
- if (!dd->msi_lo)
- return 0;
-
- qib_devinfo(dd->pcidev,
- "MSI interrupt not detected, trying INTx interrupts\n");
- qib_7220_free_irq(dd);
- qib_enable_intx(dd->pcidev);
- /*
- * Some newer kernels require free_irq before disable_msi,
- * and irq can be changed during disable and INTx enable
- * and we need to therefore use the pcidev->irq value,
- * not our saved MSI value.
- */
- dd->cspec->irq = dd->pcidev->irq;
- qib_setup_7220_interrupt(dd);
- return 1;
-}
-
-/*
- * Reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
-{
- u64 val, prev_val;
- struct qib_devdata *dd = ppd->dd;
-
- prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
- val = prev_val | QLOGIC_IB_XGXS_RESET;
- prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
- qib_write_kreg(dd, kr_control,
- dd->control & ~QLOGIC_IB_C_LINKENABLE);
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
- qib_write_kreg(dd, kr_control, dd->control);
-}
-
-/*
- * For this chip, we want to use the same buffer every time
- * when we are trying to bring the link up (they are always VL15
- * packets). At that link state the packet should always go out immediately
- * (or at least be discarded at the tx interface if the link is down).
- * If it doesn't, and the buffer isn't available, that means some other
- * sender has gotten ahead of us, and is preventing our packet from going
- * out. In that case, we flush all packets, and try again. If that still
- * fails, we fail the request, and hope things work the next time around.
- *
- * We don't need very complicated heuristics on whether the packet had
- * time to go out or not, since even at SDR 1X, it goes out in very short
- * time periods, covered by the chip reads done here and as part of the
- * flush.
- */
-static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
-{
- u32 __iomem *buf;
- u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
- int do_cleanup;
- unsigned long flags;
-
- /*
- * always blip to get avail list updated, since it's almost
- * always needed, and is fairly cheap.
- */
- sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- if (buf)
- goto done;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
- ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
- __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
- do_cleanup = 0;
- } else {
- do_cleanup = 1;
- qib_7220_sdma_hw_clean_up(ppd);
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- if (do_cleanup) {
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- }
-done:
- return buf;
-}
-
-/*
- * This code for non-IBTA-compliant IB speed negotiation is only known to
- * work for the SDR to DDR transition, and only between an HCA and a switch
- * with recent firmware. It is based on observed heuristics, rather than
- * actual knowledge of the non-compliant speed negotiation.
- * It has a number of hard-coded fields, since the hope is to rewrite this
- * when a spec is available on how the negoation is intended to work.
- */
-static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
- u32 dcnt, u32 *data)
-{
- int i;
- u64 pbc;
- u32 __iomem *piobuf;
- u32 pnum;
- struct qib_devdata *dd = ppd->dd;
-
- i = 0;
- pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- pbc |= PBC_7220_VL15_SEND;
- while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
- if (i++ > 5)
- return;
- udelay(2);
- }
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
- writeq(pbc, piobuf);
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, 7);
- qib_pio_copy(piobuf + 9, data, dcnt);
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
- qib_flush_wc();
- qib_sendbuf_done(dd, pnum);
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
-{
- struct qib_devdata *dd = ppd->dd;
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
-
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
-
- autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
- autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
-}
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
-{
- ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK);
-
- if (speed == (QIB_IB_SDR | QIB_IB_DDR))
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
-
- qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-/*
- * This routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_7220_autoneg(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- /*
- * Required for older non-IB1.2 DDR switches. Newer
- * non-IB-compliant switches don't need it, but so far,
- * aren't bothered by it either. "Magic constant"
- */
- qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- autoneg_7220_send(ppd, 0);
- set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
-
- toggle_7220_rclkrls(ppd->dd);
- /* 2 msec is minimum length of a poll cycle */
- queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
-}
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_7220_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
- struct qib_devdata *dd;
- u64 startms;
- u32 i;
- unsigned long flags;
-
- ppd = &container_of(work, struct qib_chippport_specific,
- autoneg_work.work)->pportdata;
- dd = ppd->dd;
-
- startms = jiffies_to_msecs(jiffies);
-
- /*
- * Busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
- == IB_7220_LT_STATE_POLLQUIET) {
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
-
- toggle_7220_rclkrls(dd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
-
- set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
- toggle_7220_rclkrls(dd);
-
- /*
- * Wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early.
- */
- wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
- ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
- dd->cspec->autoneg_tries = 0;
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
- }
-}
-
-static u32 qib_7220_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
-
- switch (state) {
- case IB_7220_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_7220_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_7220_L_STATE_ACTIVE:
- /* fall through */
- case IB_7220_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default: /* fall through */
- case IB_7220_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_7220_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
- return qib_7220_physportstate[state];
-}
-
-static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (!ibup) {
- /*
- * When the link goes down we don't want AEQ running, so it
- * won't interfere with IBC training, etc., and we need
- * to go back to the static SerDes preset values.
- */
- if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)))
- set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- qib_sd7220_presets(dd);
- qib_cancel_sends(ppd); /* initial disarm, etc. */
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (__qib_sdma_running(ppd))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e70_go_idle);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- /* this might better in qib_sd7220_presets() */
- set_7220_relock_poll(dd, ibup);
- } else {
- if (qib_compat_ddr_negotiate &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)) &&
- ppd->link_speed_active == QIB_IB_SDR &&
- (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
- (QIB_IB_DDR | QIB_IB_SDR) &&
- dd->cspec->autoneg_tries < AUTONEG_TRIES) {
- /* we are SDR, and DDR auto-negotiation enabled */
- ++dd->cspec->autoneg_tries;
- if (!ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
- cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
- cr_iblinkerrrecov);
- }
- try_7220_autoneg(ppd);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- ppd->link_speed_active == QIB_IB_SDR) {
- autoneg_7220_send(ppd, 1);
- set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
- udelay(2);
- toggle_7220_rclkrls(dd);
- ret = 1; /* no other IB status change processing */
- } else {
- if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- (ppd->link_speed_active & QIB_IB_DDR)) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
- QIBL_IB_AUTONEG_FAILED);
- spin_unlock_irqrestore(&ppd->lflags_lock,
- flags);
- dd->cspec->autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_7220_ibspeed_fast(ppd,
- ppd->link_speed_enabled);
- wake_up(&ppd->cpspec->autoneg_wait);
- symadj = 1;
- } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
- /*
- * Clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to a
- * different device).
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock,
- flags);
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_IBTA_1_2_MASK;
- qib_write_kreg(dd, kr_ncmodectrl, 0);
- symadj = 1;
- }
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- symadj = 1;
-
- if (!ret) {
- ppd->delay_mult = rate_to_delay
- [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
- [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
-
- set_7220_relock_poll(dd, ibup);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /*
- * Unlike 7322, the 7220 needs this, due to lack of
- * interrupt in some cases when we have sdma active
- * when the link goes down.
- */
- if (ppd->sdma_state.current_state !=
- qib_sdma_state_s20_idle)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e00_go_hw_down);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
-
- if (symadj) {
- if (ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 0;
- ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
- cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
- cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
- }
- } else if (!ibup && qib_compat_ddr_negotiate &&
- !ppd->cpspec->ibdeltainprog &&
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
- cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
- cr_iblinkerrrecov);
- }
-
- if (!ret)
- qib_setup_7220_setextled(ppd, ibup);
- return ret;
-}
-
-/*
- * Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_7220_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->palign = qib_read_kreg32(dd, kr_palign);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport->ibmtu = (u32)mtu;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
- if (dd->piobcnt4k) {
- dd->pio4kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
- }
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * qib_get_7220_chip_params(), so split out as separate function
- */
-static void set_7220_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
- /* init after possible re-map in init_chip_wc_pat() */
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
- dd->cspec->cregbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + cregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-}
-
-
-#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
- SYM_MASK(SendCtrl, SPioEnable) | \
- SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
- SYM_MASK(SendCtrl, SendBufAvailUpd) | \
- SYM_MASK(SendCtrl, AvailUpdThld) | \
- SYM_MASK(SendCtrl, SDmaEnable) | \
- SYM_MASK(SendCtrl, SDmaIntEnable) | \
- SYM_MASK(SendCtrl, SDmaHalt) | \
- SYM_MASK(SendCtrl, SDmaSingleDescriptor))
-
-static int sendctrl_hook(struct qib_devdata *dd,
- const struct diag_observer *op,
- u32 offs, u64 *data, u64 mask, int only_32)
-{
- unsigned long flags;
- unsigned idx = offs / sizeof(u64);
- u64 local_data, all_bits;
-
- if (idx != kr_sendctrl) {
- qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
- offs, only_32 ? "32" : "64");
- return 0;
- }
-
- all_bits = ~0ULL;
- if (only_32)
- all_bits >>= 32;
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if ((mask & all_bits) != all_bits) {
- /*
- * At least some mask bits are zero, so we need
- * to read. The judgement call is whether from
- * reg or shadow. First-cut: read reg, and complain
- * if any bits which should be shadowed are different
- * from their shadowed value.
- */
- if (only_32)
- local_data = (u64)qib_read_kreg32(dd, idx);
- else
- local_data = qib_read_kreg64(dd, idx);
- qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
- (u32)local_data, (u32)dd->sendctrl);
- if ((local_data & SENDCTRL_SHADOWED) !=
- (dd->sendctrl & SENDCTRL_SHADOWED))
- qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
- (u32)local_data, (u32) dd->sendctrl);
- *data = (local_data & ~mask) | (*data & mask);
- }
- if (mask) {
- /*
- * At least some mask bits are one, so we need
- * to write, but only shadow some bits.
- */
- u64 sval, tval; /* Shadowed, transient */
-
- /*
- * New shadow val is bits we don't want to touch,
- * ORed with bits we do, that are intended for shadow.
- */
- sval = (dd->sendctrl & ~mask);
- sval |= *data & SENDCTRL_SHADOWED & mask;
- dd->sendctrl = sval;
- tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
- qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
- (u32)tval, (u32)sval);
- qib_write_kreg(dd, kr_sendctrl, tval);
- qib_write_kreg(dd, kr_scratch, 0Ull);
- }
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- return only_32 ? 4 : 8;
-}
-
-static const struct diag_observer sendctrl_observer = {
- sendctrl_hook, kr_sendctrl * sizeof(u64),
- kr_sendctrl * sizeof(u64)
-};
-
-/*
- * write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_7220_initreg(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
- qib_register_observer(dd, &sendctrl_observer);
- return ret;
-}
-
-static int qib_init_7220_variables(struct qib_devdata *dd)
-{
- struct qib_chippport_specific *cpspec;
- struct qib_pportdata *ppd;
- int ret = 0;
- u32 sbufs, updthresh;
-
- cpspec = (struct qib_chippport_specific *)(dd + 1);
- ppd = &cpspec->pportdata;
- dd->pport = ppd;
- dd->num_pports = 1;
-
- dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
- ppd->cpspec = cpspec;
-
- spin_lock_init(&dd->cspec->sdepb_lock);
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor);
-
- get_7220_chip_params(dd);
- qib_7220_boardname(dd);
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
-
- dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
- QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
- dd->flags |= qib_special_trigger ?
- QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
-
- dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
-
- dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
-
- init_waitqueue_head(&cpspec->autoneg_wait);
- INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
-
- ret = qib_init_pportdata(ppd, dd, 0, 1);
- if (ret)
- goto bail;
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
-
- ppd->link_width_enabled = ppd->link_width_supported;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /*
- * Set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- ppd->link_width_active = IB_WIDTH_4X;
- ppd->link_speed_active = QIB_IB_SDR;
- ppd->delay_mult = rate_to_delay[0][1];
- ppd->vls_supported = IB_VL_VL0;
- ppd->vls_operational = ppd->vls_supported;
-
- if (!qib_mini_init)
- qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
-
- init_timer(&ppd->cpspec->chase_timer);
- ppd->cpspec->chase_timer.function = reenable_7220_chase;
- ppd->cpspec->chase_timer.data = (unsigned long)ppd;
-
- qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
-
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset =
- dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- /* we always allocate at least 2048 bytes for eager buffers */
- ret = ib_mtu_enum_to_int(qib_ibmtu);
- dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
- BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_7220_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->rhdrhead_intr_off = 1ULL << 32;
-
- /* setup the stats timer; the add_timer is done at end of init */
- init_timer(&dd->stats_timer);
- dd->stats_timer.function = qib_get_7220_faststats;
- dd->stats_timer.data = (unsigned long) dd;
- dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
-
- /*
- * Control[4] has been added to change the arbitration within
- * the SDMA engine between favoring data fetches over descriptor
- * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
- */
- if (qib_sdma_fetch_arb)
- dd->control |= 1 << 4;
-
- dd->ureg_align = 0x10000; /* 64KB alignment */
-
- dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
- qib_7220_config_ctxts(dd);
- qib_set_ctxtcnt(dd); /* needed for PAT setup */
-
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- set_7220_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
-
- ret = qib_create_ctxts(dd);
- init_7220_cntrnames(dd);
-
- /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
- * reserve the update threshold amount for other kernel use, such
- * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
- * unless we aren't enabling SDMA, in which case we want to use
- * all the 4k bufs for the kernel.
- * if this was less than the update threshold, we could wait
- * a long time for an update. Coded this way because we
- * sometimes change the update threshold for various reasons,
- * and we want this to remain robust.
- */
- updthresh = 8U; /* update threshold */
- if (dd->flags & QIB_HAS_SEND_DMA) {
- dd->cspec->sdmabufcnt = dd->piobcnt4k;
- sbufs = updthresh > 3 ? updthresh : 3;
- } else {
- dd->cspec->sdmabufcnt = 0;
- sbufs = dd->piobcnt4k;
- }
-
- dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
- dd->cspec->sdmabufcnt;
- dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
- dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
- dd->last_pio = dd->cspec->lastbuf_for_pio;
- dd->pbufsctxt = dd->lastctxt_piobuf /
- (dd->cfgctxts - dd->first_user_ctxt);
-
- /*
- * if we are at 16 user contexts, we will have one 7 sbufs
- * per context, so drop the update threshold to match. We
- * want to update before we actually run out, at low pbufs/ctxt
- * so give ourselves some margin
- */
- if ((dd->pbufsctxt - 2) < updthresh)
- updthresh = dd->pbufsctxt - 2;
-
- dd->cspec->updthresh_dflt = updthresh;
- dd->cspec->updthresh = updthresh;
-
- /* before full enable, no interrupts, no locking needed */
- dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
-
- dd->psxmitwait_supported = 1;
- dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
-bail:
- return ret;
-}
-
-static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *buf;
-
- if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
- buf = get_7220_link_buf(ppd, pbufnum);
- else {
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- /* try 4k if all 2k busy, so same last for both sizes */
- last = dd->cspec->lastbuf_for_pio;
- buf = qib_getsendbuf_range(dd, pbufnum, first, last);
- }
- return buf;
-}
-
-/* these 2 "counters" are really control registers, and are always RW */
-static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- write_7220_creg(ppd->dd, cr_psinterval, intv);
- write_7220_creg(ppd->dd, cr_psstart, start);
-}
-
-/*
- * NOTE: no real attempt is made to generalize the SDMA stuff.
- * At some point "soon" we will have a new more generalized
- * set of sdma interface, and then we'll clean this up.
- */
-
-/* Must be called with sdma_lock held, or before init finished */
-static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
-{
- /* Commit writes to memory and advance the tail on the chip */
- wmb();
- ppd->sdma_descq_tail = tail;
- qib_write_kreg(ppd->dd, kr_senddmatail, tail);
-}
-
-static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
-}
-
-static struct sdma_set_state_action sdma_7220_action_table[] = {
- [qib_sdma_state_s00_hw_down] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .go_s99_running_tofalse = 1,
- },
- [qib_sdma_state_s10_hw_start_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s20_idle] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s30_sw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 0,
- },
- [qib_sdma_state_s40_hw_clean_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s50_hw_halt_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s99_running] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 0,
- .go_s99_running_totrue = 1,
- },
-};
-
-static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
-{
- ppd->sdma_state.set_state_action = sdma_7220_action_table;
-}
-
-static int init_sdma_7220_regs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned i, n;
- u64 senddmabufmask[3] = { 0 };
-
- /* Set SendDmaBase */
- qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
- qib_sdma_7220_setlengen(ppd);
- qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
- /* Set SendDmaHeadAddr */
- qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
-
- /*
- * Reserve all the former "kernel" piobufs, using high number range
- * so we get as many 4K buffers as possible
- */
- n = dd->piobcnt2k + dd->piobcnt4k;
- i = n - dd->cspec->sdmabufcnt;
-
- for (; i < n; ++i) {
- unsigned word = i / 64;
- unsigned bit = i & 63;
-
- BUG_ON(word >= 3);
- senddmabufmask[word] |= 1ULL << bit;
- }
- qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
- qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
- qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
-
- ppd->sdma_state.first_sendbuf = i;
- ppd->sdma_state.last_sendbuf = n;
-
- return 0;
-}
-
-/* sdma_lock must be held */
-static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- int sane;
- int use_dmahead;
- u16 swhead;
- u16 swtail;
- u16 cnt;
- u16 hwhead;
-
- use_dmahead = __qib_sdma_running(ppd) &&
- (dd->flags & QIB_HAS_SDMA_TIMEOUT);
-retry:
- hwhead = use_dmahead ?
- (u16)le64_to_cpu(*ppd->sdma_head_dma) :
- (u16)qib_read_kreg32(dd, kr_senddmahead);
-
- swhead = ppd->sdma_descq_head;
- swtail = ppd->sdma_descq_tail;
- cnt = ppd->sdma_descq_cnt;
-
- if (swhead < swtail) {
- /* not wrapped */
- sane = (hwhead >= swhead) & (hwhead <= swtail);
- } else if (swhead > swtail) {
- /* wrapped around */
- sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
- (hwhead <= swtail);
- } else {
- /* empty */
- sane = (hwhead == swhead);
- }
-
- if (unlikely(!sane)) {
- if (use_dmahead) {
- /* try one more time, directly from the register */
- use_dmahead = 0;
- goto retry;
- }
- /* assume no progress */
- hwhead = swhead;
- }
-
- return hwhead;
-}
-
-static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
-{
- u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
-
- return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
-}
-
-/*
- * Compute the amount of delay before sending the next packet if the
- * port's send rate differs from the static rate set for the QP.
- * Since the delay affects this packet but the amount of the delay is
- * based on the length of the previous packet, use the last delay computed
- * and save the delay count for this packet to be used next time
- * we get here.
- */
-static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- u8 snd_mult = ppd->delay_mult;
- u8 rcv_mult = ib_rate_to_delay[srate];
- u32 ret = ppd->cpspec->last_delay_mult;
-
- ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
- (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
-
- /* Indicate VL15, if necessary */
- if (vl == 15)
- ret |= PBC_7220_VL15_SEND_CTRL;
- return ret;
-}
-
-static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
-{
-}
-
-static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
-{
- if (!rcd->ctxt) {
- rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
- rcd->rcvegr_tid_base = 0;
- } else {
- rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
- rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
- (rcd->ctxt - 1) * rcd->rcvegrcnt;
- }
-}
-
-static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 which, struct qib_ctxtdata *rcd)
-{
- int i;
- unsigned long flags;
-
- switch (which) {
- case TXCHK_CHG_TYPE_KERN:
- /* see if we need to raise avail update threshold */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- for (i = dd->first_user_ctxt;
- dd->cspec->updthresh != dd->cspec->updthresh_dflt
- && i < dd->cfgctxts; i++)
- if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
- ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
- < dd->cspec->updthresh_dflt)
- break;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- if (i == dd->cfgctxts) {
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- dd->cspec->updthresh = dd->cspec->updthresh_dflt;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld)) <<
- SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- }
- break;
- case TXCHK_CHG_TYPE_USER:
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
- / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
- dd->cspec->updthresh = (rcd->piocnt /
- rcd->subctxt_cnt) - 1;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- } else
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- break;
- }
-}
-
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- qib_write_kreg(dd, kr_scratch, val);
-}
-
-#define VALID_TS_RD_REG_MASK 0xBF
-/**
- * qib_7220_tempsense_read - read register of temp sensor via TWSI
- * @dd: the qlogic_ib device
- * @regnum: register to read from
- *
- * returns reg contents (0..255) or < 0 for error
- */
-static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- int ret;
- u8 rdata;
-
- if (regnum > 7) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* return a bogus value for (the one) register we do not have */
- if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
- ret = 0;
- goto bail;
- }
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto bail;
-
- ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
- if (!ret)
- ret = rdata;
-
- mutex_unlock(&dd->eep_lock);
-
- /*
- * There are three possibilities here:
- * ret is actual value (0..255)
- * ret is -ENXIO or -EINVAL from twsi code or this file
- * ret is -EINTR from mutex_lock_interruptible.
- */
-bail:
- return ret;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- return 0;
-}
-#endif
-
-/* Dummy function, as 7220 boards never disable EEPROM Write */
-static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- return 1;
-}
-
-/**
- * qib_init_iba7220_funcs - set up the chip-specific function pointers
- * @dev: the pci_dev for qlogic_ib device
- * @ent: pci_device_id struct for this dev
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret;
- u32 boardid, minwidth;
-
- dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
- sizeof(struct qib_chippport_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_7220_bringup_serdes;
- dd->f_cleanup = qib_setup_7220_cleanup;
- dd->f_clear_tids = qib_7220_clear_tids;
- dd->f_free_irq = qib_7220_free_irq;
- dd->f_get_base_info = qib_7220_get_base_info;
- dd->f_get_msgheader = qib_7220_get_msgheader;
- dd->f_getsendbuf = qib_7220_getsendbuf;
- dd->f_gpio_mod = gpio_7220_mod;
- dd->f_eeprom_wen = qib_7220_eeprom_wen;
- dd->f_hdrqempty = qib_7220_hdrqempty;
- dd->f_ib_updown = qib_7220_ib_updown;
- dd->f_init_ctxt = qib_7220_init_ctxt;
- dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
- dd->f_intr_fallback = qib_7220_intr_fallback;
- dd->f_late_initreg = qib_late_7220_initreg;
- dd->f_setpbc_control = qib_7220_setpbc_control;
- dd->f_portcntr = qib_portcntr_7220;
- dd->f_put_tid = qib_7220_put_tid;
- dd->f_quiet_serdes = qib_7220_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_7220_mod;
- dd->f_read_cntrs = qib_read_7220cntrs;
- dd->f_read_portcntrs = qib_read_7220portcntrs;
- dd->f_reset = qib_setup_7220_reset;
- dd->f_init_sdma_regs = init_sdma_7220_regs;
- dd->f_sdma_busy = qib_sdma_7220_busy;
- dd->f_sdma_gethead = qib_sdma_7220_gethead;
- dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
- dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
- dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
- dd->f_sdma_init_early = qib_7220_sdma_init_early;
- dd->f_sendctrl = sendctrl_7220_mod;
- dd->f_set_armlaunch = qib_set_7220_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
- dd->f_iblink_state = qib_7220_iblink_state;
- dd->f_ibphys_portstate = qib_7220_phys_portstate;
- dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
- dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
- dd->f_set_ib_loopback = qib_7220_set_loopback;
- dd->f_set_intr_state = qib_7220_set_intr_state;
- dd->f_setextled = qib_setup_7220_setextled;
- dd->f_txchk_change = qib_7220_txchk_change;
- dd->f_update_usrhead = qib_update_7220_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
- dd->f_xgxs_reset = qib_7220_xgxs_reset;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_7220_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_7220_notify_dca;
-#endif
- /*
- * Do remaining pcie setup and save pcie values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped, but chip registers
- * are not set up until start of qib_init_7220_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = qib_init_7220_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init)
- goto bail;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
- switch (boardid) {
- case 0:
- case 2:
- case 10:
- case 12:
- minwidth = 16; /* x16 capable boards */
- break;
- default:
- minwidth = 8; /* x8 capable boards */
- break;
- }
- if (qib_pcie_params(dd, minwidth, NULL, NULL))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
-
- /* save IRQ for possible later use */
- dd->cspec->irq = pdev->irq;
-
- if (qib_read_kreg64(dd, kr_hwerrstatus) &
- QLOGIC_IB_HWE_SERDESPLLFAILED)
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_SERDESPLLFAILED);
-
- /* setup interrupt handler (interrupt type handled above) */
- qib_setup_7220_interrupt(dd);
- qib_7220_init_hwerrors(dd);
-
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
deleted file mode 100644
index 6c8ff10101c0..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ /dev/null
@@ -1,8574 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath 7322 chip
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-#include <linux/dca.h>
-#endif
-
-#include "qib.h"
-#include "qib_7322_regs.h"
-#include "qib_qsfp.h"
-
-#include "qib_mad.h"
-#include "qib_verbs.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
-
-static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
-static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
-static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
-static irqreturn_t qib_7322intr(int irq, void *data);
-static irqreturn_t qib_7322bufavail(int irq, void *data);
-static irqreturn_t sdma_intr(int irq, void *data);
-static irqreturn_t sdma_idle_intr(int irq, void *data);
-static irqreturn_t sdma_progress_intr(int irq, void *data);
-static irqreturn_t sdma_cleanup_intr(int irq, void *data);
-static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
- struct qib_ctxtdata *rcd);
-static u8 qib_7322_phys_portstate(u64);
-static u32 qib_7322_iblink_state(u64);
-static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd);
-static void force_h1(struct qib_pportdata *);
-static void adj_tx_serdes(struct qib_pportdata *);
-static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
-static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
-
-static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
-static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
-static void serdes_7322_los_enable(struct qib_pportdata *, int);
-static int serdes_7322_init_old(struct qib_pportdata *);
-static int serdes_7322_init_new(struct qib_pportdata *);
-static void dump_sdma_7322_state(struct qib_pportdata *);
-
-#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
-
-/* LE2 serdes values for different cases */
-#define LE2_DEFAULT 5
-#define LE2_5m 4
-#define LE2_QME 0
-
-/* Below is special-purpose, so only really works for the IB SerDes blocks. */
-#define IBSD(hw_pidx) (hw_pidx + 2)
-
-/* these are variables for documentation and experimentation purposes */
-static const unsigned rcv_int_timeout = 375;
-static const unsigned rcv_int_count = 16;
-static const unsigned sdma_idle_cnt = 64;
-
-/* Time to stop altering Rx Equalization parameters, after link up. */
-#define RXEQ_DISABLE_MSECS 2500
-
-/*
- * Number of VLs we are configured to use (to allow for more
- * credits per vl, etc.)
- */
-ushort qib_num_cfg_vls = 2;
-module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
-MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
-
-static ushort qib_chase = 1;
-module_param_named(chase, qib_chase, ushort, S_IRUGO);
-MODULE_PARM_DESC(chase, "Enable state chase handling");
-
-static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
-module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation,
- "attenuation cutoff (dB) for long copper cable setup");
-
-static ushort qib_singleport;
-module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
-MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
-
-static ushort qib_krcvq01_no_msi;
-module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
-MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
-
-/*
- * Receive header queue sizes
- */
-static unsigned qib_rcvhdrcnt;
-module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
-
-static unsigned qib_rcvhdrsize;
-module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
-
-static unsigned qib_rcvhdrentsize;
-module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
-
-#define MAX_ATTEN_LEN 64 /* plenty for any real system */
-/* for read back, default index is ~5m copper cable */
-static char txselect_list[MAX_ATTEN_LEN] = "10";
-static struct kparam_string kp_txselect = {
- .string = txselect_list,
- .maxlen = MAX_ATTEN_LEN
-};
-static int setup_txselect(const char *, struct kernel_param *);
-module_param_call(txselect, setup_txselect, param_get_string,
- &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect,
- "Tx serdes indices (for no QSFP or invalid QSFP data)");
-
-#define BOARD_QME7342 5
-#define BOARD_QMH7342 6
-#define BOARD_QMH7360 9
-#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
- BOARD_QMH7342)
-#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
- BOARD_QME7342)
-
-#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
-
-#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
-
-#define MASK_ACROSS(lsb, msb) \
- (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_7322_##regname##_##fldname##_RMASK)
-
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_7322_##regname##_##fldname##_RMASK << \
- QIB_7322_##regname##_##fldname##_LSB)
-
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-
-/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
-#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
- (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
-
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
-#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
-#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
-/* Below because most, but not all, fields of IntMask have that full suffix */
-#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
-
-
-#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
-#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
-#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
-#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-
-#define SendIBSLIDAssignMask \
- QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
-#define SendIBSLMCMask \
- QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
-
-#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
-#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
-#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
-#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
-#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
-#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-#define QIB_EEPROM_WEN_NUM 14
-#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
-
-/* HW counter clock is at 4nsec */
-#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
-
-/* full speed IB port 1 only */
-#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
-#define PORT_SPD_CAP_SHIFT 3
-
-/* full speed featuremask, both ports */
-#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
- */
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_contextcnt KREG_IDX(ContextCnt)
-#define kr_control KREG_IDX(Control)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_debugportval KREG_IDX(DebugPortValueReg)
-#define kr_fmask KREG_IDX(feature_mask)
-#define kr_act_fmask KREG_IDX(active_feature_mask)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intredirect KREG_IDX(IntRedirect0)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_pagealign KREG_IDX(PageAlign)
-#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
-#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_revision KREG_IDX(Revision)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
-#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
-#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
-#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
-#define kr_sendpiosize KREG_IDX(SendBufSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_intgranted KREG_IDX(Int_Granted)
-#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
-#define kr_intblocked KREG_IDX(IntBlocked)
-#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
-
-/*
- * per-port kernel registers. Access only with qib_read_kreg_port()
- * or qib_write_kreg_port()
- */
-#define krp_errclear KREG_IBPORT_IDX(ErrClear)
-#define krp_errmask KREG_IBPORT_IDX(ErrMask)
-#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
-#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
-#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
-#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
-#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
-#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
-#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
-#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
-#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
-#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
-#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
-#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
-#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
-#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
-#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
-#define krp_psstart KREG_IBPORT_IDX(PSStart)
-#define krp_psstat KREG_IBPORT_IDX(PSStat)
-#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
-#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
-#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
-#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
-#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
-#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
-#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
-#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
-#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
-#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
-#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
-#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
-#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
-#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
-#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
-#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
-#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
-#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
-#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
-#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
-#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
-#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
-#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
-#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
-#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
-#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
-#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
-#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
-#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
-#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
-#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
-
-/*
- * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
- * or qib_write_kreg_ctxt()
- */
-#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-/*
- * TID Flow table, per context. Reduces
- * number of hdrq updates to one per flow (or on errors).
- * context 0 and 1 share same memory, but have distinct
- * addresses. Since for now, we never use expected sends
- * on kernel contexts, we don't worry about that (we initialize
- * those entries for ctxt 0/1 on driver load twice, for example).
- */
-#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
-#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
-
-/* these are the error bits in the tid flows, and are W1C */
-#define TIDFLOW_ERRBITS ( \
- (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
- SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
- (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
- SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
-
-/* Most (not all) Counters are per-IBport.
- * Requires LBIntCnt is at offset 0 in the group
- */
-#define CREG_IDX(regname) \
-((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
-
-#define crp_badformat CREG_IDX(RxVersionErrCnt)
-#define crp_err_rlen CREG_IDX(RxLenErrCnt)
-#define crp_erricrc CREG_IDX(RxICRCErrCnt)
-#define crp_errlink CREG_IDX(RxLinkMalformCnt)
-#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
-#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
-#define crp_pktrcv CREG_IDX(RxDataPktCnt)
-#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define crp_pktsend CREG_IDX(TxDataPktCnt)
-#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
-#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
-#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
-#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
-#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
-#define crp_rcvebp CREG_IDX(RxEBPCnt)
-#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
-#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
-#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
-#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
-#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
-#define crp_sendstall CREG_IDX(TxFlowStallCnt)
-#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
-#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
-#define crp_txlenerr CREG_IDX(TxLenErrCnt)
-#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
-#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
-#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
-#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
-#define crp_wordrcv CREG_IDX(RxDwordCnt)
-#define crp_wordsend CREG_IDX(TxDwordCnt)
-#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
-
-/* these are the (few) counters that are not port-specific */
-#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
- QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
-#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
-#define cr_lbint CREG_DEVIDX(LBIntCnt)
-#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
-#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
-#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
-#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
-#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
-
-/* no chip register for # of IB ports supported, so define */
-#define NUM_IB_PORTS 2
-
-/* 1 VL15 buffer per hardware IB port, no register for this, so define */
-#define NUM_VL15_BUFS NUM_IB_PORTS
-
-/*
- * context 0 and 1 are special, and there is no chip register that
- * defines this value, so we have to define it here.
- * These are all allocated to either 0 or 1 for single port
- * hardware configuration, otherwise each gets half
- */
-#define KCTXT0_EGRCNT 2048
-
-/* values for vl and port fields in PBC, 7322-specific */
-#define PBC_PORT_SEL_LSB 26
-#define PBC_PORT_SEL_RMASK 1
-#define PBC_VL_NUM_LSB 27
-#define PBC_VL_NUM_RMASK 7
-#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
-#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
-
-static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
- [IB_RATE_2_5_GBPS] = 16,
- [IB_RATE_5_GBPS] = 8,
- [IB_RATE_10_GBPS] = 4,
- [IB_RATE_20_GBPS] = 2,
- [IB_RATE_30_GBPS] = 2,
- [IB_RATE_40_GBPS] = 1
-};
-
-#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
-#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
-
-/* link training states, from IBC */
-#define IB_7322_LT_STATE_DISABLED 0x00
-#define IB_7322_LT_STATE_LINKUP 0x01
-#define IB_7322_LT_STATE_POLLACTIVE 0x02
-#define IB_7322_LT_STATE_POLLQUIET 0x03
-#define IB_7322_LT_STATE_SLEEPDELAY 0x04
-#define IB_7322_LT_STATE_SLEEPQUIET 0x05
-#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7322_LT_STATE_CFGIDLE 0x0b
-#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_7322_LT_STATE_TXREVLANES 0x0d
-#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
-#define IB_7322_LT_STATE_CFGENH 0x10
-#define IB_7322_LT_STATE_CFGTEST 0x11
-#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
-#define IB_7322_LT_STATE_CFGWAITENH 0x13
-
-/* link state machine states from IBC */
-#define IB_7322_L_STATE_DOWN 0x0
-#define IB_7322_L_STATE_INIT 0x1
-#define IB_7322_L_STATE_ARM 0x2
-#define IB_7322_L_STATE_ACTIVE 0x3
-#define IB_7322_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_7322_physportstate[0x20] = {
- [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
- [IB_7322_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
- [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITRMTTEST] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITENH] =
- IB_PHYSPORTSTATE_CFG_WAIT_ENH,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-struct qib_irq_notify {
- int rcv;
- void *arg;
- struct irq_affinity_notify notify;
-};
-#endif
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 main_int_mask; /* clear bits which have dedicated handlers */
- u64 int_enable_mask; /* for per port interrupts in single port mode */
- u64 errormask;
- u64 hwerrmask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- u32 ncntrs;
- u32 nportcntrs;
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 numctxts;
- u32 rcvegrcnt;
- u32 updthresh; /* current AvailUpdThld */
- u32 updthresh_dflt; /* default AvailUpdThld */
- u32 r1;
- int irq;
- u32 num_msix_entries;
- u32 sdmabufcnt;
- u32 lastbuf_for_pio;
- u32 stay_in_freeze;
- u32 recovery_ports_initted;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- u32 dca_ctrl;
- int rhdr_cpu[18];
- int sdma_cpu[2];
- u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
-#endif
- struct qib_msix_entry *msix_entries;
- unsigned long *sendchkenable;
- unsigned long *sendgrhchk;
- unsigned long *sendibchk;
- u32 rcvavail_timeout[18];
- char emsgbuf[128]; /* for device error interrupt msg buffer */
-};
-
-/* Table of entries in "human readable" form Tx Emphasis. */
-struct txdds_ent {
- u8 amp;
- u8 pre;
- u8 main;
- u8 post;
-};
-
-struct vendor_txdds_ent {
- u8 oui[QSFP_VOUI_LEN];
- u8 *partnum;
- struct txdds_ent sdr;
- struct txdds_ent ddr;
- struct txdds_ent qdr;
-};
-
-static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
-
-#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
-#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
-#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
-#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
-
-#define H1_FORCE_VAL 8
-#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
-#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
-
-/* The static and dynamic registers are paired, and the pairs indexed by spd */
-#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
- + ((spd) * 2))
-
-#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
-#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
-#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
-#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
-#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
-
-struct qib_chippport_specific {
- u64 __iomem *kpregbase;
- u64 __iomem *cpregbase;
- u64 *portcntrs;
- struct qib_pportdata *ppd;
- wait_queue_head_t autoneg_wait;
- struct delayed_work autoneg_work;
- struct delayed_work ipg_work;
- struct timer_list chase_timer;
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 iblnkdownsnap;
- u64 iblnkdowndelta;
- u64 ibmalfdelta;
- u64 ibmalfsnap;
- u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
- u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
- unsigned long qdr_dfe_time;
- unsigned long chase_end;
- u32 autoneg_tries;
- u32 recovery_init;
- u32 qdr_dfe_on;
- u32 qdr_reforce;
- /*
- * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
- * entry zero is unused, to simplify indexing
- */
- u8 h1_val;
- u8 no_eep; /* txselect table index to use if no qsfp info */
- u8 ipg_tries;
- u8 ibmalfusesnap;
- struct qib_qsfp_data qsfp_data;
- char epmsgbuf[192]; /* for port error interrupt msg buffer */
- char sdmamsgbuf[192]; /* for per-port sdma error messages */
-};
-
-static struct {
- const char *name;
- irq_handler_t handler;
- int lsb;
- int port; /* 0 if not port-specific, else port # */
- int dca;
-} irq_table[] = {
- { "", qib_7322intr, -1, 0, 0 },
- { " (buf avail)", qib_7322bufavail,
- SYM_LSB(IntStatus, SendBufAvail), 0, 0},
- { " (sdma 0)", sdma_intr,
- SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
- { " (sdma 1)", sdma_intr,
- SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
- { " (sdmaI 0)", sdma_idle_intr,
- SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
- { " (sdmaI 1)", sdma_idle_intr,
- SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
- { " (sdmaP 0)", sdma_progress_intr,
- SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
- { " (sdmaP 1)", sdma_progress_intr,
- SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
- { " (sdmaC 0)", sdma_cleanup_intr,
- SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
- { " (sdmaC 1)", sdma_cleanup_intr,
- SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static const struct dca_reg_map {
- int shadow_inx;
- int lsb;
- u64 mask;
- u16 regno;
-} dca_rcvhdr_reg_map[] = {
- { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
- { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
- ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
- { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
- ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
-};
-#endif
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-
-#define BLOB_7322_IBCHG 0x101
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u32 regno, u64 value);
-static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
-static void write_7322_initregs(struct qib_devdata *);
-static void write_7322_init_portregs(struct qib_pportdata *);
-static void setup_7322_link_recovery(struct qib_pportdata *, u32);
-static void check_7322_rxe_status(struct qib_pportdata *);
-static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static void qib_setup_dca(struct qib_devdata *dd);
-static void setup_dca_notifier(struct qib_devdata *dd,
- struct qib_msix_entry *m);
-static void reset_dca_notifier(struct qib_devdata *dd,
- struct qib_msix_entry *m);
-#endif
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(regno + (u64 __iomem *)(
- (dd->ureg_align * ctxt) + (dd->userbase ?
- (char __iomem *)dd->userbase :
- (char __iomem *)dd->kregbase + dd->uregbase)));
-}
-
-/**
- * qib_read_ureg - read virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u64 qib_read_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
-
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(regno + (u64 __iomem *)(
- (dd->ureg_align * ctxt) + (dd->userbase ?
- (char __iomem *)dd->userbase :
- (char __iomem *)dd->kregbase + dd->uregbase)));
-}
-
-/**
- * qib_write_ureg - write virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u32 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *) &dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u32 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u32 regno, u64 value)
-{
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->kregbase[regno]);
-}
-
-/*
- * not many sanity checks for the port-specific kernel register routines,
- * since they are only used when it's known to be safe.
-*/
-static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
- const u16 regno)
-{
- if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
- return 0ULL;
- return readq(&ppd->cpspec->kpregbase[regno]);
-}
-
-static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
- const u16 regno, u64 value)
-{
- if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
- (ppd->dd->flags & QIB_PRESENT))
- writeq(value, &ppd->cpspec->kpregbase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-
-
-}
-
-static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-
-
-}
-
-static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
- u16 regno, u64 value)
-{
- if (ppd->cpspec && ppd->cpspec->cpregbase &&
- (ppd->dd->flags & QIB_PRESENT))
- writeq(value, &ppd->cpspec->cpregbase[regno]);
-}
-
-static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
- u16 regno)
-{
- if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
- !(ppd->dd->flags & QIB_PRESENT))
- return 0;
- return readq(&ppd->cpspec->cpregbase[regno]);
-}
-
-static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
- u16 regno)
-{
- if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
- !(ppd->dd->flags & QIB_PRESENT))
- return 0;
- return readl(&ppd->cpspec->cpregbase[regno]);
-}
-
-/* bits in Control register */
-#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
-#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
-
-/* bits in general interrupt regs */
-#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
-#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
-#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
-#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
-#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
-#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
-#define QIB_I_C_ERROR INT_MASK(Err)
-
-#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
-#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
-#define QIB_I_GPIO INT_MASK(AssertGPIO)
-#define QIB_I_P_SDMAINT(pidx) \
- (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
- INT_MASK_P(SDmaProgress, pidx) | \
- INT_MASK_PM(SDmaCleanupDone, pidx))
-
-/* Interrupt bits that are "per port" */
-#define QIB_I_P_BITSEXTANT(pidx) \
- (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
- INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
- INT_MASK_P(SDmaProgress, pidx) | \
- INT_MASK_PM(SDmaCleanupDone, pidx))
-
-/* Interrupt bits that are common to a device */
-/* currently unused: QIB_I_SPIOSENT */
-#define QIB_I_C_BITSEXTANT \
- (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
- QIB_I_SPIOSENT | \
- QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
-
-#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
- QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
-
-/*
- * Error bits that are "per port".
- */
-#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
-#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
-#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
-#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
-#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
-#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
-#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
-#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
-#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
-#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
-#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
-#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
-#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
-#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
-#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
-#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
-#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
-#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
-#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
-#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
-#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
-#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
-#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
-#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
-#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
-#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
-#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
-#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
-
-#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
-#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
-#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
-#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
-#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
-#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
-#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
-#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
-#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
-#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
-#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
-
-/* Error bits that are common to a device */
-#define QIB_E_RESET ERR_MASK(ResetNegated)
-#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
-#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
-
-
-/*
- * Per chip (rather than per-port) errors. Most either do
- * nothing but trigger a print (because they self-recover, or
- * always occur in tandem with other errors that handle the
- * issue), or because they indicate errors with no recovery,
- * but we want to know that they happened.
- */
-#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
-#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
-#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
-#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
-#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
-#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
-#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
-#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
-
-/* SDMA chip errors (not per port)
- * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
- * the SDMAHALT error immediately, so we just print the dup error via the
- * E_AUTO mechanism. This is true of most of the per-port fatal errors
- * as well, but since this is port-independent, by definition, it's
- * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
- * packet send errors, and so are handled in the same manner as other
- * per-packet errors.
- */
-#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
-#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
-#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
-
-/*
- * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
- * it is used to print "common" packet errors.
- */
-#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
- QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
- QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
- QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
- QIB_E_P_REBP)
-
-/* Error Bits that Packet-related (Receive, per-port) */
-#define QIB_E_P_RPKTERRS (\
- QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
- QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
- QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
- QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
- QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
- QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
-
-/*
- * Error bits that are Send-related (per port)
- * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
- * All of these potentially need to have a buffer disarmed
- */
-#define QIB_E_P_SPKTERRS (\
- QIB_E_P_SUNEXP_PKTNUM |\
- QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
- QIB_E_P_SMAXPKTLEN |\
- QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
- QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
- QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
-
-#define QIB_E_SPKTERRS ( \
- QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
- ERR_MASK_N(SendUnsupportedVLErr) | \
- QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
-
-#define QIB_E_P_SDMAERRS ( \
- QIB_E_P_SDMAHALT | \
- QIB_E_P_SDMADESCADDRMISALIGN | \
- QIB_E_P_SDMAUNEXPDATA | \
- QIB_E_P_SDMAMISSINGDW | \
- QIB_E_P_SDMADWEN | \
- QIB_E_P_SDMARPYTAG | \
- QIB_E_P_SDMA1STDESC | \
- QIB_E_P_SDMABASE | \
- QIB_E_P_SDMATAILOUTOFBOUND | \
- QIB_E_P_SDMAOUTOFBOUND | \
- QIB_E_P_SDMAGENMISMATCH)
-
-/*
- * This sets some bits more than once, but makes it more obvious which
- * bits are not handled under other categories, and the repeat definition
- * is not a problem.
- */
-#define QIB_E_P_BITSEXTANT ( \
- QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
- QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
- QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
- QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
- )
-
-/*
- * These are errors that can occur when the link
- * changes state while a packet is being sent or received. This doesn't
- * cover things like EBP or VCRC that can be the result of a sending
- * having the link change state, so we receive a "known bad" packet.
- * All of these are "per port", so renamed:
- */
-#define QIB_E_P_LINK_PKTERRS (\
- QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
- QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
- QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
- QIB_E_P_RUNEXPCHAR)
-
-/*
- * This sets some bits more than once, but makes it more obvious which
- * bits are not handled under other categories (such as QIB_E_SPKTERRS),
- * and the repeat definition is not a problem.
- */
-#define QIB_E_C_BITSEXTANT (\
- QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
- QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
- QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
-
-/* Likewise Neuter E_SPKT_ERRS_IGNORE */
-#define E_SPKT_ERRS_IGNORE 0
-
-#define QIB_EXTS_MEMBIST_DISABLED \
- SYM_MASK(EXTStatus, MemBISTDisabled)
-#define QIB_EXTS_MEMBIST_ENDTEST \
- SYM_MASK(EXTStatus, MemBISTEndTest)
-
-#define QIB_E_SPIOARMLAUNCH \
- ERR_MASK(SendArmLaunchErr)
-
-#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
-#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
-
-/*
- * IBTA_1_2 is set when multiple speeds are enabled (normal),
- * and also if forced QDR (only QDR enabled). It's enabled for the
- * forced QDR case so that scrambling will be enabled by the TS3
- * exchange, when supported by both sides of the link.
- */
-#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
-#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
-#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
-#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
-#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
-#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
- SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
-#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
-
-#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
-#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
-
-#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
-#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
-#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
-
-#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
-#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
-#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
- SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
-#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
- SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
-#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
-
-#define IBA7322_REDIRECT_VEC_PER_REG 12
-
-#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
-#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
-#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
-#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
-#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
-
-#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
-
-#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
- .msg = #fldname , .sz = sizeof(#fldname) }
-#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
- fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
- HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
- HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
- HWE_AUTO(PCIESerdesPClkNotDetect),
- HWE_AUTO(PowerOnBISTFailed),
- HWE_AUTO(TempsenseTholdReached),
- HWE_AUTO(MemoryErr),
- HWE_AUTO(PCIeBusParityErr),
- HWE_AUTO(PcieCplTimeout),
- HWE_AUTO(PciePoisonedTLP),
- HWE_AUTO_P(SDmaMemReadErr, 1),
- HWE_AUTO_P(SDmaMemReadErr, 0),
- HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
- HWE_AUTO_P(IBCBusToSPCParityErr, 1),
- HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
- HWE_AUTO(statusValidNoEop),
- HWE_AUTO(LATriggered),
- { .mask = 0, .sz = 0 }
-};
-
-#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
- E_AUTO(RcvEgrFullErr),
- E_AUTO(RcvHdrFullErr),
- E_AUTO(ResetNegated),
- E_AUTO(HardwareErr),
- E_AUTO(InvalidAddrErr),
- E_AUTO(SDmaVL15Err),
- E_AUTO(SBufVL15MisUseErr),
- E_AUTO(InvalidEEPCmd),
- E_AUTO(RcvContextShareErr),
- E_AUTO(SendVLMismatchErr),
- E_AUTO(SendArmLaunchErr),
- E_AUTO(SendSpecialTriggerErr),
- E_AUTO(SDmaWrongPortErr),
- E_AUTO(SDmaBufMaskDuplicateErr),
- { .mask = 0, .sz = 0 }
-};
-
-static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
- E_P_AUTO(IBStatusChanged),
- E_P_AUTO(SHeadersErr),
- E_P_AUTO(VL15BufMisuseErr),
- /*
- * SDmaHaltErr is not really an error, make it clearer;
- */
- {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
- .sz = 11},
- E_P_AUTO(SDmaDescAddrMisalignErr),
- E_P_AUTO(SDmaUnexpDataErr),
- E_P_AUTO(SDmaMissingDwErr),
- E_P_AUTO(SDmaDwEnErr),
- E_P_AUTO(SDmaRpyTagErr),
- E_P_AUTO(SDma1stDescErr),
- E_P_AUTO(SDmaBaseErr),
- E_P_AUTO(SDmaTailOutOfBoundErr),
- E_P_AUTO(SDmaOutOfBoundErr),
- E_P_AUTO(SDmaGenMismatchErr),
- E_P_AUTO(SendBufMisuseErr),
- E_P_AUTO(SendUnsupportedVLErr),
- E_P_AUTO(SendUnexpectedPktNumErr),
- E_P_AUTO(SendDroppedDataPktErr),
- E_P_AUTO(SendDroppedSmpPktErr),
- E_P_AUTO(SendPktLenErr),
- E_P_AUTO(SendUnderRunErr),
- E_P_AUTO(SendMaxPktLenErr),
- E_P_AUTO(SendMinPktLenErr),
- E_P_AUTO(RcvIBLostLinkErr),
- E_P_AUTO(RcvHdrErr),
- E_P_AUTO(RcvHdrLenErr),
- E_P_AUTO(RcvBadTidErr),
- E_P_AUTO(RcvBadVersionErr),
- E_P_AUTO(RcvIBFlowErr),
- E_P_AUTO(RcvEBPErr),
- E_P_AUTO(RcvUnsupportedVLErr),
- E_P_AUTO(RcvUnexpectedCharErr),
- E_P_AUTO(RcvShortPktLenErr),
- E_P_AUTO(RcvLongPktLenErr),
- E_P_AUTO(RcvMaxPktLenErr),
- E_P_AUTO(RcvMinPktLenErr),
- E_P_AUTO(RcvICRCErr),
- E_P_AUTO(RcvVCRCErr),
- E_P_AUTO(RcvFormatErr),
- { .mask = 0, .sz = 0 }
-};
-
-/*
- * Below generates "auto-message" for interrupts not specific to any port or
- * context
- */
-#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-/* Below generates "auto-message" for interrupts specific to a port */
-#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##Mask##_0), \
- SYM_LSB(IntMask, fldname##Mask##_1)), \
- .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
-/* For some reason, the SerDesTrimDone bits are reversed */
-#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##Mask##_1), \
- SYM_LSB(IntMask, fldname##Mask##_0)), \
- .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
-/*
- * Below generates "auto-message" for interrupts specific to a context,
- * with ctxt-number appended
- */
-#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##0IntMask), \
- SYM_LSB(IntMask, fldname##17IntMask)), \
- .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
-
-static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
- INTR_AUTO_P(SDmaInt),
- INTR_AUTO_P(SDmaProgressInt),
- INTR_AUTO_P(SDmaIdleInt),
- INTR_AUTO_P(SDmaCleanupDone),
- INTR_AUTO_C(RcvUrg),
- INTR_AUTO_P(ErrInt),
- INTR_AUTO(ErrInt), /* non-port-specific errs */
- INTR_AUTO(AssertGPIOInt),
- INTR_AUTO_P(SendDoneInt),
- INTR_AUTO(SendBufAvailInt),
- INTR_AUTO_C(RcvAvail),
- { .mask = 0, .sz = 0 }
-};
-
-#define TXSYMPTOM_AUTO_P(fldname) \
- { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs hdrchk_msgs[] = {
- TXSYMPTOM_AUTO_P(NonKeyPacket),
- TXSYMPTOM_AUTO_P(GRHFail),
- TXSYMPTOM_AUTO_P(PkeyFail),
- TXSYMPTOM_AUTO_P(QPFail),
- TXSYMPTOM_AUTO_P(SLIDFail),
- TXSYMPTOM_AUTO_P(RawIPV6),
- TXSYMPTOM_AUTO_P(PacketTooSmall),
- { .mask = 0, .sz = 0 }
-};
-
-#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used,
- * because we don't need to force the update of pioavail
- */
-static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 i;
- int any;
- u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
- unsigned long sbuf[4];
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- any = 0;
- for (i = 0; i < regcnt; ++i) {
- sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
- if (sbuf[i]) {
- any = 1;
- qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
- }
- }
-
- if (any)
- qib_disarm_piobufs_set(dd, sbuf, piobcnt);
-}
-
-/* No txe_recover yet, if ever */
-
-/* No decode__errors yet */
-static void err_decode(char *msg, size_t len, u64 errs,
- const struct qib_hwerror_msgs *msp)
-{
- u64 these, lmask;
- int took, multi, n = 0;
-
- while (errs && msp && msp->mask) {
- multi = (msp->mask & (msp->mask - 1));
- while (errs & msp->mask) {
- these = (errs & msp->mask);
- lmask = (these & (these - 1)) ^ these;
- if (len) {
- if (n++) {
- /* separate the strings */
- *msg++ = ',';
- len--;
- }
- BUG_ON(!msp->sz);
- /* msp->sz counts the nul */
- took = min_t(size_t, msp->sz - (size_t)1, len);
- memcpy(msg, msp->msg, took);
- len -= took;
- msg += took;
- if (len)
- *msg = '\0';
- }
- errs &= ~lmask;
- if (len && multi) {
- /* More than one bit this mask */
- int idx = -1;
-
- while (lmask & msp->mask) {
- ++idx;
- lmask >>= 1;
- }
- took = scnprintf(msg, len, "_%d", idx);
- len -= took;
- msg += took;
- }
- }
- ++msp;
- }
- /* If some bits are left, show in hex. */
- if (len && errs)
- snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
- (unsigned long long) errs);
-}
-
-/* only called if r1 set */
-static void flush_fifo(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *piobuf;
- u32 bufn;
- u32 *hdr;
- u64 pbc;
- const unsigned hdrwords = 7;
- static struct qib_ib_header ibhdr = {
- .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
- .lrh[1] = IB_LID_PERMISSIVE,
- .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
- .lrh[3] = IB_LID_PERMISSIVE,
- .u.oth.bth[0] = cpu_to_be32(
- (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
- .u.oth.bth[1] = cpu_to_be32(0),
- .u.oth.bth[2] = cpu_to_be32(0),
- .u.oth.u.ud.deth[0] = cpu_to_be32(0),
- .u.oth.u.ud.deth[1] = cpu_to_be32(0),
- };
-
- /*
- * Send a dummy VL15 packet to flush the launch FIFO.
- * This will not actually be sent since the TxeBypassIbc bit is set.
- */
- pbc = PBC_7322_VL15_SEND |
- (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
- (hdrwords + SIZE_OF_CRC);
- piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
- if (!piobuf)
- return;
- writeq(pbc, piobuf);
- hdr = (u32 *) &ibhdr;
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
- qib_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf + 2, hdr, hdrwords);
- qib_sendbuf_done(dd, bufn);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 set_sendctrl = 0;
- u64 clr_sendctrl = 0;
-
- if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_HALT)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
-
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
- set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
- SYM_MASK(SendCtrl_0, TxeAbortIbc) |
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
- SYM_MASK(SendCtrl_0, TxeAbortIbc) |
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
-
- spin_lock(&dd->sendctrl_lock);
-
- /* If we are draining everything, block sends first */
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
- ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- ppd->p_sendctrl |= set_sendctrl;
- ppd->p_sendctrl &= ~clr_sendctrl;
-
- if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
- qib_write_kreg_port(ppd, krp_sendctrl,
- ppd->p_sendctrl |
- SYM_MASK(SendCtrl_0, SDmaCleanup));
- else
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock(&dd->sendctrl_lock);
-
- if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
- flush_fifo(ppd);
-}
-
-static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
-{
- __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
-}
-
-static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
-{
- /*
- * Set SendDmaLenGen and clear and set
- * the MSB of the generation count to enable generation checking
- * and load the internal generation counter.
- */
- qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
- qib_write_kreg_port(ppd, krp_senddmalengen,
- ppd->sdma_descq_cnt |
- (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
-}
-
-/*
- * Must be called with sdma_lock held, or before init finished.
- */
-static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
-{
- /* Commit writes to memory and advance the tail on the chip */
- wmb();
- ppd->sdma_descq_tail = tail;
- qib_write_kreg_port(ppd, krp_senddmatail, tail);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- /*
- * Drain all FIFOs.
- * The hardware doesn't require this but we do it so that verbs
- * and user applications don't wait for link active to send stale
- * data.
- */
- sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
-
- qib_sdma_7322_setlengen(ppd);
- qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
- ppd->sdma_head_dma[0] = 0;
- qib_7322_sdma_sendctrl(ppd,
- ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
-}
-
-#define DISABLES_SDMA ( \
- QIB_E_P_SDMAHALT | \
- QIB_E_P_SDMADESCADDRMISALIGN | \
- QIB_E_P_SDMAMISSINGDW | \
- QIB_E_P_SDMADWEN | \
- QIB_E_P_SDMARPYTAG | \
- QIB_E_P_SDMA1STDESC | \
- QIB_E_P_SDMABASE | \
- QIB_E_P_SDMATAILOUTOFBOUND | \
- QIB_E_P_SDMAOUTOFBOUND | \
- QIB_E_P_SDMAGENMISMATCH)
-
-static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
-{
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
-
- errs &= QIB_E_P_SDMAERRS;
- err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
- errs, qib_7322p_error_msgs);
-
- if (errs & QIB_E_P_SDMAUNEXPDATA)
- qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
- ppd->port);
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- if (errs != QIB_E_P_SDMAHALT) {
- /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
- qib_dev_porterr(dd, ppd->port,
- "SDMA %s 0x%016llx %s\n",
- qib_sdma_state_names[ppd->sdma_state.current_state],
- errs, ppd->cpspec->sdmamsgbuf);
- dump_sdma_7322_state(ppd);
- }
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e20_hw_started);
- break;
-
- case qib_sdma_state_s20_idle:
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e50_hw_cleaned);
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e60_hw_halted);
- break;
-
- case qib_sdma_state_s99_running:
- __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
- __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
- break;
- }
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * handle per-device errors (not per-port errors)
- */
-static noinline void handle_7322_errors(struct qib_devdata *dd)
-{
- char *msg;
- u64 iserr = 0;
- u64 errs;
- u64 mask;
- int log_idx;
-
- qib_stats.sps_errints++;
- errs = qib_read_kreg64(dd, kr_errstatus);
- if (!errs) {
- qib_devinfo(dd->pcidev,
- "device error interrupt, but no error bits set!\n");
- goto done;
- }
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & QIB_E_HARDWARE) {
- *msg = '\0';
- qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- } else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
-
- if (errs & QIB_E_SPKTERRS) {
- qib_disarm_7322_senderrbufs(dd->pport);
- qib_stats.sps_txerrs++;
- } else if (errs & QIB_E_INVALIDADDR)
- qib_stats.sps_txerrs++;
- else if (errs & QIB_E_ARMLAUNCH) {
- qib_stats.sps_txerrs++;
- qib_disarm_7322_senderrbufs(dd->pport);
- }
- qib_write_kreg(dd, kr_errclear, errs);
-
- /*
- * The ones we mask off are handled specially below
- * or above. Also mask SDMADISABLED by default as it
- * is too chatty.
- */
- mask = QIB_E_HARDWARE;
- *msg = '\0';
-
- err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
- qib_7322error_msgs);
-
- /*
- * Getting reset is a tragedy for all ports. Mark the device
- * _and_ the ports as "offline" in way meaningful to each.
- */
- if (errs & QIB_E_RESET) {
- int pidx;
-
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_err(dd, "%s error\n", msg);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-
-done:
- return;
-}
-
-static void qib_error_tasklet(unsigned long data)
-{
- struct qib_devdata *dd = (struct qib_devdata *)data;
-
- handle_7322_errors(dd);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-static void reenable_chase(unsigned long opaque)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
-
- ppd->cpspec->chase_timer.expires = 0;
- qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_POLL);
-}
-
-static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
- u8 ibclt)
-{
- ppd->cpspec->chase_end = 0;
-
- if (!qib_chase)
- return;
-
- qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
- add_timer(&ppd->cpspec->chase_timer);
-}
-
-static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
-{
- u8 ibclt;
- unsigned long tnow;
-
- ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
-
- /*
- * Detect and handle the state chase issue, where we can
- * get stuck if we are unlucky on timing on both sides of
- * the link. If we are, we disable, set a timer, and
- * then re-enable.
- */
- switch (ibclt) {
- case IB_7322_LT_STATE_CFGRCVFCFG:
- case IB_7322_LT_STATE_CFGWAITRMT:
- case IB_7322_LT_STATE_TXREVLANES:
- case IB_7322_LT_STATE_CFGENH:
- tnow = jiffies;
- if (ppd->cpspec->chase_end &&
- time_after(tnow, ppd->cpspec->chase_end))
- disable_chase(ppd, tnow, ibclt);
- else if (!ppd->cpspec->chase_end)
- ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
- break;
- default:
- ppd->cpspec->chase_end = 0;
- break;
- }
-
- if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
- ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
- ibclt == IB_7322_LT_STATE_LINKUP) &&
- (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
- force_h1(ppd);
- ppd->cpspec->qdr_reforce = 1;
- if (!ppd->dd->cspec->r1)
- serdes_7322_los_enable(ppd, 0);
- } else if (ppd->cpspec->qdr_reforce &&
- (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
- (ibclt == IB_7322_LT_STATE_CFGENH ||
- ibclt == IB_7322_LT_STATE_CFGIDLE ||
- ibclt == IB_7322_LT_STATE_LINKUP))
- force_h1(ppd);
-
- if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
- ppd->link_speed_enabled == QIB_IB_QDR &&
- (ibclt == IB_7322_LT_STATE_CFGTEST ||
- ibclt == IB_7322_LT_STATE_CFGENH ||
- (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
- adj_tx_serdes(ppd);
-
- if (ibclt != IB_7322_LT_STATE_LINKUP) {
- u8 ltstate = qib_7322_phys_portstate(ibcst);
- u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
- LinkTrainingState);
- if (!ppd->dd->cspec->r1 &&
- pibclt == IB_7322_LT_STATE_LINKUP &&
- ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
- /* If the link went down (but no into recovery,
- * turn LOS back on */
- serdes_7322_los_enable(ppd, 1);
- if (!ppd->cpspec->qdr_dfe_on &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
- ppd->cpspec->qdr_dfe_on = 1;
- ppd->cpspec->qdr_dfe_time = 0;
- /* On link down, reenable QDR adaptation */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 :
- QDR_STATIC_ADAPT_DOWN);
- pr_info(
- "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
- ppd->dd->unit, ppd->port, ibclt);
- }
- }
-}
-
-static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
-
-/*
- * This is per-pport error handling.
- * will likely get it's own MSIx interrupt (one for each port,
- * although just a single handler).
- */
-static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
-{
- char *msg;
- u64 ignore_this_time = 0, iserr = 0, errs, fmask;
- struct qib_devdata *dd = ppd->dd;
-
- /* do this as soon as possible */
- fmask = qib_read_kreg64(dd, kr_act_fmask);
- if (!fmask)
- check_7322_rxe_status(ppd);
-
- errs = qib_read_kreg_port(ppd, krp_errstatus);
- if (!errs)
- qib_devinfo(dd->pcidev,
- "Port%d error interrupt, but no error bits set!\n",
- ppd->port);
- if (!fmask)
- errs &= ~QIB_E_P_IBSTATUSCHANGED;
- if (!errs)
- goto done;
-
- msg = ppd->cpspec->epmsgbuf;
- *msg = '\0';
-
- if (errs & ~QIB_E_P_BITSEXTANT) {
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
- errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
- if (!*msg)
- snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
- "no others");
- qib_dev_porterr(dd, ppd->port,
- "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
- (errs & ~QIB_E_P_BITSEXTANT), msg);
- *msg = '\0';
- }
-
- if (errs & QIB_E_P_SHDR) {
- u64 symptom;
-
- /* determine cause, then write to clear */
- symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
- qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
- hdrchk_msgs);
- *msg = '\0';
- /* senderrbuf cleared in SPKTERRS below */
- }
-
- if (errs & QIB_E_P_SPKTERRS) {
- if ((errs & QIB_E_P_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
- (errs & QIB_E_P_LINK_PKTERRS),
- qib_7322p_error_msgs);
- *msg = '\0';
- ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
- }
- qib_disarm_7322_senderrbufs(ppd);
- } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
- qib_7322p_error_msgs);
- ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
- *msg = '\0';
- }
-
- qib_write_kreg_port(ppd, krp_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- if (errs & QIB_E_P_RPKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & QIB_E_P_SPKTERRS)
- qib_stats.sps_txerrs++;
-
- iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
-
- if (errs & QIB_E_P_SDMAERRS)
- sdma_7322_p_errors(ppd, errs);
-
- if (errs & QIB_E_P_IBSTATUSCHANGED) {
- u64 ibcs;
- u8 ltstate;
-
- ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
- ltstate = qib_7322_phys_portstate(ibcs);
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- handle_serdes_issues(ppd, ibcs);
- if (!(ppd->cpspec->ibcctrl_a &
- SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
- /*
- * We got our interrupt, so init code should be
- * happy and not try alternatives. Now squelch
- * other "chatter" from link-negotiation (pre Init)
- */
- ppd->cpspec->ibcctrl_a |=
- SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- }
-
- /* Update our picture of width and speed from chip */
- ppd->link_width_active =
- (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
- LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
- SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
- QIB_IB_DDR : QIB_IB_SDR;
-
- if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
- IB_PHYSPORTSTATE_DISABLED)
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- else
- /*
- * Since going into a recovery state causes the link
- * state to go down and since recovery is transitory,
- * it is better if we "miss" ever seeing the link
- * training state go into recovery (i.e., ignore this
- * transition for link state special handling purposes)
- * without updating lastibcstat.
- */
- if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-done:
- return;
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
- /* cause any pending enabled interrupts to be re-delivered */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- if (dd->cspec->num_msix_entries) {
- /* and same for MSIx */
- u64 val = qib_read_kreg64(dd, kr_intgranted);
-
- if (val)
- qib_write_kreg(dd, kr_intgranted, val);
- }
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips.
- */
-static void qib_7322_clear_freeze(struct qib_devdata *dd)
-{
- int pidx;
-
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- qib_write_kreg_port(dd->pport + pidx, krp_errmask,
- 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwriten */
- qib_7322_set_intr_state(dd, 0);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /*
- * Force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- /* We need to purge per-port errs and reset mask, too */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- if (!dd->pport[pidx].link_speed_supported)
- continue;
- qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
- qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
- }
- qib_7322_set_intr_state(dd, 1);
-}
-
-/* no error handling to speak of */
-/**
- * qib_7322_handle_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * qib_handle_errors() to avoid excessive stack usage.
- */
-static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 ctrl;
- int isfatal = 0;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- goto bail;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- goto bail;
- }
- qib_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except BIST fail */
- qib_write_kreg(dd, kr_hwerrclear, hwerrs &
- ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- /* no EEPROM logging, yet */
-
- if (hwerrs)
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
- /*
- * No recovery yet...
- */
- if ((hwerrs & ~HWE_MASK(LATriggered)) ||
- dd->cspec->stay_in_freeze) {
- /*
- * If any set that we aren't ignoring only make the
- * complaint once, in case it's stuck or recurring,
- * and we get here multiple times
- * Force link down, so switch knows, and
- * LEDs are turned off.
- */
- if (dd->flags & QIB_INITTED)
- isfatal = 1;
- } else
- qib_7322_clear_freeze(dd);
- }
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strlcpy(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
-
- /* Ignore esoteric PLL failures et al. */
-
- qib_dev_err(dd, "%s hardware error\n", msg);
-
- if (hwerrs &
- (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
- int pidx = 0;
- int err;
- unsigned long flags;
- struct qib_pportdata *ppd = dd->pport;
-
- for (; pidx < dd->num_pports; ++pidx, ppd++) {
- err = 0;
- if (pidx == 0 && (hwerrs &
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
- err++;
- if (pidx == 1 && (hwerrs &
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
- err++;
- if (err) {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- dump_sdma_7322_state(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
- }
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * for /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-bail:;
-}
-
-/**
- * qib_7322_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_7322_init_hwerrors(struct qib_devdata *dd)
-{
- int pidx;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
- if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
- QIB_EXTS_MEMBIST_ENDTEST)))
- qib_dev_err(dd, "MemBIST did not complete!\n");
-
- /* never clear BIST failure, so reported on each driver load */
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- qib_write_kreg_port(dd->pport + pidx, krp_errmask,
- ~0ULL);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
- dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
- } else
- dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- * Also reset everything that we can, so we start
- * completely clean when re-enabled (before we
- * actually issue the disable to the IBC)
- */
- qib_7322_mini_pcs_reset(ppd);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- /*
- * Clear status change interrupt reduction so the
- * new state is seen.
- */
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- }
-
- mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
- mod_wd);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
-}
-
-/*
- * The total RCV buffer memory is 64KB, used for both ports, and is
- * in units of 64 bytes (same as IB flow control credit unit).
- * The consumedVL unit in the same registers are in 32 byte units!
- * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
- * and we can therefore allocate just 9 IB credits for 2 VL15 packets
- * in krp_rxcreditvl15, rather than 10.
- */
-#define RCV_BUF_UNITSZ 64
-#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
-
-static void set_vls(struct qib_pportdata *ppd)
-{
- int i, numvls, totcred, cred_vl, vl0extra;
- struct qib_devdata *dd = ppd->dd;
- u64 val;
-
- numvls = qib_num_vls(ppd->vls_operational);
-
- /*
- * Set up per-VL credits. Below is kluge based on these assumptions:
- * 1) port is disabled at the time early_init is called.
- * 2) give VL15 17 credits, for two max-plausible packets.
- * 3) Give VL0-N the rest, with any rounding excess used for VL0
- */
- /* 2 VL15 packets @ 288 bytes each (including IB headers) */
- totcred = NUM_RCV_BUF_UNITS(dd);
- cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
- totcred -= cred_vl;
- qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
- cred_vl = totcred / numvls;
- vl0extra = totcred - cred_vl * numvls;
- qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
- for (i = 1; i < numvls; i++)
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
- for (; i < 8; i++) /* no buffer space for other VLs */
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
-
- /* Notify IBC that credits need to be recalculated */
- val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
- val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
-
- for (i = 0; i < numvls; i++)
- val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
- val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
-
- /* Change the number of operational VLs */
- ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
- ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
- ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-}
-
-/*
- * The code that deals with actual SerDes is in serdes_7322_init().
- * Compared to the code for iba7220, it is minimal.
- */
-static int serdes_7322_init(struct qib_pportdata *ppd);
-
-/**
- * qib_7322_bringup_serdes - bring up the serdes
- * @ppd: physical port on the qlogic_ib device
- */
-static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, guid, ibc;
- unsigned long flags;
- int ret = 0;
-
- /*
- * SerDes model not in Pd, but still need to
- * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
- * eventually.
- */
- /* Put IBC in reset, sends disabled (should be in reset already) */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-
- /* ensure previous Tx parameters are not still forced */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- if (qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
- /*
- * Flow control is sent this often, even if no changes in
- * buffer space occur. Units are 128ns for this chip.
- * Set to 3usec.
- */
- ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
- /* max error tolerance */
- ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
- SYM_LSB(IBCCtrlA_0, MaxPktLen);
- ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
-
- /*
- * Reset the PCS interface to the serdes (and also ibc, which is still
- * in reset from above). Writes new value of ibcctrl_a as last step.
- */
- qib_7322_mini_pcs_reset(ppd);
-
- if (!ppd->cpspec->ibcctrl_b) {
- unsigned lse = ppd->link_speed_enabled;
-
- /*
- * Not on re-init after reset, establish shadow
- * and force initial config.
- */
- ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
- krp_ibcctrl_b);
- ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
- IBA7322_IBC_SPEED_DDR |
- IBA7322_IBC_SPEED_SDR |
- IBA7322_IBC_WIDTH_AUTONEG |
- SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
- if (lse & (lse - 1)) /* Muliple speeds enabled */
- ppd->cpspec->ibcctrl_b |=
- (lse << IBA7322_IBC_SPEED_LSB) |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- else
- ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
- IBA7322_IBC_SPEED_QDR |
- IBA7322_IBC_IBTA_1_2_MASK :
- (lse == QIB_IB_DDR) ?
- IBA7322_IBC_SPEED_DDR :
- IBA7322_IBC_SPEED_SDR;
- if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
- (IB_WIDTH_1X | IB_WIDTH_4X))
- ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
- else
- ppd->cpspec->ibcctrl_b |=
- ppd->link_width_enabled == IB_WIDTH_4X ?
- IBA7322_IBC_WIDTH_4X_ONLY :
- IBA7322_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
- IBA7322_IBC_HRTBT_MASK);
- }
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
-
- /* setup so we have more time at CFGTEST to change H1 */
- val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
- val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
- val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
- qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
-
- serdes_7322_init(ppd);
-
- guid = be64_to_cpu(ppd->guid);
- if (!guid) {
- if (dd->base_guid)
- guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
- ppd->guid = cpu_to_be64(guid);
- }
-
- qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
- /* Enable port */
- ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
- set_vls(ppd);
-
- /* initially come up DISABLED, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
-
- /* be paranoid against later code motion, etc. */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
- qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* Also enable IBSTATUSCHG interrupt. */
- val = qib_read_kreg_port(ppd, krp_errmask);
- qib_write_kreg_port(ppd, krp_errmask,
- val | ERR_MASK_N(IBStatusChanged));
-
- /* Always zero until we start messing with SerDes for real */
- return ret;
-}
-
-/**
- * qib_7322_quiet_serdes - set serdes to txidle
- * @dd: the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
-{
- u64 val;
- unsigned long flags;
-
- qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
- if (ppd->dd->cspec->r1)
- cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
-
- ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.data) /* if initted */
- del_timer_sync(&ppd->cpspec->chase_timer);
-
- /*
- * Despite the name, actually disables IBC as well. Do it when
- * we are as sure as possible that no more packets can be
- * received, following the down and the PCS reset.
- * The actual disabling happens in qib_7322_mini_pci_reset(),
- * along with the PCS being reset.
- */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
- qib_7322_mini_pcs_reset(ppd);
-
- /*
- * Update the adjusted counters so the adjustment persists
- * across driver reload.
- */
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
- ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
- struct qib_devdata *dd = ppd->dd;
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->ibsymsnap;
- val -= ppd->cpspec->ibsymdelta;
- write_7322_creg_port(ppd, crp_ibsymbolerr, val);
- }
- if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->iblnkerrsnap;
- val -= ppd->cpspec->iblnkerrdelta;
- write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
- }
- if (ppd->cpspec->iblnkdowndelta) {
- val = read_7322_creg32_port(ppd, crp_iblinkdown);
- val += ppd->cpspec->iblnkdowndelta;
- write_7322_creg_port(ppd, crp_iblinkdown, val);
- }
- /*
- * No need to save ibmalfdelta since IB perfcounters
- * are cleared on driver reload.
- */
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
-}
-
-/**
- * qib_setup_7322_setextled - set the state of the two external LEDs
- * @ppd: physical port on the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- */
-static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 extctl, ledblink = 0, val;
- unsigned long flags;
- int yel, grn;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (ppd->led_override) {
- grn = (ppd->led_override & QIB_LED_PHYS);
- yel = (ppd->led_override & QIB_LED_LOG);
- } else if (on) {
- val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
- grn = qib_7322_phys_portstate(val) ==
- IB_PHYSPORTSTATE_LINKUP;
- yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
- } else {
- grn = 0;
- yel = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & (ppd->port == 1 ?
- ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
- if (grn) {
- extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
- /*
- * Counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd.
- */
- ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
- ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
- }
- if (yel)
- extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- switch (event) {
- case DCA_PROVIDER_ADD:
- if (dd->flags & QIB_DCA_ENABLED)
- break;
- if (!dca_add_requester(&dd->pcidev->dev)) {
- qib_devinfo(dd->pcidev, "DCA enabled\n");
- dd->flags |= QIB_DCA_ENABLED;
- qib_setup_dca(dd);
- }
- break;
- case DCA_PROVIDER_REMOVE:
- if (dd->flags & QIB_DCA_ENABLED) {
- dca_remove_requester(&dd->pcidev->dev);
- dd->flags &= ~QIB_DCA_ENABLED;
- dd->cspec->dca_ctrl = 0;
- qib_write_kreg(dd, KREG_IDX(DCACtrlA),
- dd->cspec->dca_ctrl);
- }
- break;
- }
- return 0;
-}
-
-static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_chip_specific *cspec = dd->cspec;
-
- if (!(dd->flags & QIB_DCA_ENABLED))
- return;
- if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
- const struct dca_reg_map *rmp;
-
- cspec->rhdr_cpu[rcd->ctxt] = cpu;
- rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
- (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
- qib_devinfo(dd->pcidev,
- "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
- (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
- qib_write_kreg(dd, rmp->regno,
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
- cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
- }
-}
-
-static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_chip_specific *cspec = dd->cspec;
- unsigned pidx = ppd->port - 1;
-
- if (!(dd->flags & QIB_DCA_ENABLED))
- return;
- if (cspec->sdma_cpu[pidx] != cpu) {
- cspec->sdma_cpu[pidx] = cpu;
- cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
- SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
- SYM_MASK(DCACtrlF, SendDma0DCAOPH));
- cspec->dca_rcvhdr_ctrl[4] |=
- (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
- (ppd->hw_pidx ?
- SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
- SYM_LSB(DCACtrlF, SendDma0DCAOPH));
- qib_devinfo(dd->pcidev,
- "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
- (long long) cspec->dca_rcvhdr_ctrl[4]);
- qib_write_kreg(dd, KREG_IDX(DCACtrlF),
- cspec->dca_rcvhdr_ctrl[4]);
- cspec->dca_ctrl |= ppd->hw_pidx ?
- SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
- SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
- }
-}
-
-static void qib_setup_dca(struct qib_devdata *dd)
-{
- struct qib_chip_specific *cspec = dd->cspec;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
- cspec->rhdr_cpu[i] = -1;
- for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
- cspec->sdma_cpu[i] = -1;
- cspec->dca_rcvhdr_ctrl[0] =
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[1] =
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[2] =
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[3] =
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[4] =
- (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
- for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
- qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
- cspec->dca_rcvhdr_ctrl[i]);
- for (i = 0; i < cspec->num_msix_entries; i++)
- setup_dca_notifier(dd, &cspec->msix_entries[i]);
-}
-
-static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct qib_irq_notify *n =
- container_of(notify, struct qib_irq_notify, notify);
- int cpu = cpumask_first(mask);
-
- if (n->rcv) {
- struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
-
- qib_update_rhdrq_dca(rcd, cpu);
- } else {
- struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
-
- qib_update_sdma_dca(ppd, cpu);
- }
-}
-
-static void qib_irq_notifier_release(struct kref *ref)
-{
- struct qib_irq_notify *n =
- container_of(ref, struct qib_irq_notify, notify.kref);
- struct qib_devdata *dd;
-
- if (n->rcv) {
- struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
-
- dd = rcd->dd;
- } else {
- struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
-
- dd = ppd->dd;
- }
- qib_devinfo(dd->pcidev,
- "release on HCA notify 0x%p n 0x%p\n", ref, n);
- kfree(n);
-}
-#endif
-
-/*
- * Disable MSIx interrupt if enabled, call generic MSIx code
- * to cleanup, and clear pending MSIx interrupts.
- * Used for fallback to INTx, after reset, and when MSIx setup fails.
- */
-static void qib_7322_nomsix(struct qib_devdata *dd)
-{
- u64 intgranted;
- int n;
-
- dd->cspec->main_int_mask = ~0ULL;
- n = dd->cspec->num_msix_entries;
- if (n) {
- int i;
-
- dd->cspec->num_msix_entries = 0;
- for (i = 0; i < n; i++) {
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
-#endif
- irq_set_affinity_hint(
- dd->cspec->msix_entries[i].msix.vector, NULL);
- free_cpumask_var(dd->cspec->msix_entries[i].mask);
- free_irq(dd->cspec->msix_entries[i].msix.vector,
- dd->cspec->msix_entries[i].arg);
- }
- qib_nomsix(dd);
- }
- /* make sure no MSIx interrupts are left pending */
- intgranted = qib_read_kreg64(dd, kr_intgranted);
- if (intgranted)
- qib_write_kreg(dd, kr_intgranted, intgranted);
-}
-
-static void qib_7322_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_7322_nomsix(dd);
-}
-
-static void qib_setup_7322_cleanup(struct qib_devdata *dd)
-{
- int i;
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- if (dd->flags & QIB_DCA_ENABLED) {
- dca_remove_requester(&dd->pcidev->dev);
- dd->flags &= ~QIB_DCA_ENABLED;
- dd->cspec->dca_ctrl = 0;
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
- }
-#endif
-
- qib_7322_free_irq(dd);
- kfree(dd->cspec->cntrs);
- kfree(dd->cspec->sendchkenable);
- kfree(dd->cspec->sendgrhchk);
- kfree(dd->cspec->sendibchk);
- kfree(dd->cspec->msix_entries);
- for (i = 0; i < dd->num_pports; i++) {
- unsigned long flags;
- u32 mask = QSFP_GPIO_MOD_PRS_N |
- (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
-
- kfree(dd->pport[i].cpspec->portcntrs);
- if (dd->flags & QIB_HAS_QSFP) {
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->gpio_mask &= ~mask;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
- }
- if (dd->pport[i].ibport_data.smi_ah)
- ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
- }
-}
-
-/* handle SDMA interrupts */
-static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
-{
- struct qib_pportdata *ppd0 = &dd->pport[0];
- struct qib_pportdata *ppd1 = &dd->pport[1];
- u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
- INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
- u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
- INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
-
- if (intr0)
- qib_sdma_intr(ppd0);
- if (intr1)
- qib_sdma_intr(ppd1);
-
- if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
- qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
- if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
- qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
-}
-
-/*
- * Set or clear the Send buffer available interrupt enable bit.
- */
-static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint)
- dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
- else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * Somehow got an interrupt with reserved bits set in interrupt status.
- * Print a message so we know it happened, then clear them.
- * keep mainline interrupt handler cache-friendly
- */
-static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
-{
- u64 kills;
- char msg[128];
-
- kills = istat & ~QIB_I_BITSEXTANT;
- qib_dev_err(dd,
- "Clearing reserved interrupt(s) 0x%016llx: %s\n",
- (unsigned long long) kills, msg);
- qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
-}
-
-/* keep mainline interrupt handler cache-friendly */
-static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
-{
- u32 gpiostatus;
- int handled = 0;
- int pidx;
-
- /*
- * Boards for this chip currently don't use GPIO interrupts,
- * so clear by writing GPIOstatus to GPIOclear, and complain
- * to developer. To avoid endless repeats, clear
- * the bits in the mask, since there is some kind of
- * programming error or chip problem.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /*
- * In theory, writing GPIOstatus to GPIOclear could
- * have a bad side-effect on some diagnostic that wanted
- * to poll for a status-change, but the various shadows
- * make that problematic at best. Diags will just suppress
- * all GPIO interrupts during such tests.
- */
- qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
- /*
- * Check for QSFP MOD_PRS changes
- * only works for single port if IB1 != pidx1
- */
- for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
- ++pidx) {
- struct qib_pportdata *ppd;
- struct qib_qsfp_data *qd;
- u32 mask;
-
- if (!dd->pport[pidx].link_speed_supported)
- continue;
- mask = QSFP_GPIO_MOD_PRS_N;
- ppd = dd->pport + pidx;
- mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
- if (gpiostatus & dd->cspec->gpio_mask & mask) {
- u64 pins;
-
- qd = &ppd->cpspec->qsfp_data;
- gpiostatus &= ~mask;
- pins = qib_read_kreg64(dd, kr_extstatus);
- pins >>= SYM_LSB(EXTStatus, GPIOIn);
- if (!(pins & mask)) {
- ++handled;
- qd->t_insert = jiffies;
- queue_work(ib_wq, &qd->work);
- }
- }
- }
-
- if (gpiostatus && !handled) {
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
- u32 gpio_irq = mask & gpiostatus;
-
- /*
- * Clear any troublemakers, and update chip from shadow
- */
- dd->cspec->gpio_mask &= ~gpio_irq;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
-}
-
-/*
- * Handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling.
- */
-static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
-{
- if (istat & ~QIB_I_BITSEXTANT)
- unknown_7322_ibits(dd, istat);
- if (istat & QIB_I_GPIO)
- unknown_7322_gpio_intr(dd);
- if (istat & QIB_I_C_ERROR) {
- qib_write_kreg(dd, kr_errmask, 0ULL);
- tasklet_schedule(&dd->error_tasklet);
- }
- if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
- handle_7322_p_errors(dd->rcd[0]->ppd);
- if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
- handle_7322_p_errors(dd->rcd[1]->ppd);
-}
-
-/*
- * Dynamically adjust the rcv int timeout for a context based on incoming
- * packet rate.
- */
-static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
-{
- struct qib_devdata *dd = rcd->dd;
- u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
-
- /*
- * Dynamically adjust idle timeout on chip
- * based on number of packets processed.
- */
- if (npkts < rcv_int_count && timeout > 2)
- timeout >>= 1;
- else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
- timeout = min(timeout << 1, rcv_int_timeout);
- else
- return;
-
- dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
- qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
-}
-
-/*
- * This is the main interrupt handler.
- * It will normally only be used for low frequency interrupts but may
- * have to handle all interrupts if INTx is enabled or fewer than normal
- * MSIx interrupts were allocated.
- * This routine should ignore the interrupt bits for any of the
- * dedicated MSIx handlers.
- */
-static irqreturn_t qib_7322intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u64 istat;
- u64 ctxtrbits;
- u64 rmask;
- unsigned i;
- u32 npkts;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg64(dd, kr_intstatus);
-
- if (unlikely(istat == ~0ULL)) {
- qib_bad_intrstatus(dd);
- qib_dev_err(dd, "Interrupt status all f's, skipping\n");
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- istat &= dd->cspec->main_int_mask;
- if (unlikely(!istat)) {
- /* already handled, or shared and not us */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
-
- /* handle "errors" of various kinds first, device ahead of port */
- if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
- QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
- INT_MASK_P(Err, 1))))
- unlikely_7322_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
- if (ctxtrbits) {
- rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
- (1ULL << QIB_I_RCVURG_LSB);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- if (dd->rcd[i])
- qib_kreceive(dd->rcd[i], NULL, &npkts);
- }
- rmask <<= 1;
- }
- if (ctxtrbits) {
- ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
- (ctxtrbits >> QIB_I_RCVURG_LSB);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
- sdma_7322_intr(dd, istat);
-
- if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Dedicated receive packet available interrupt handler.
- */
-static irqreturn_t qib_7322pintr(int irq, void *data)
-{
- struct qib_ctxtdata *rcd = data;
- struct qib_devdata *dd = rcd->dd;
- u32 npkts;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
- (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
-
- qib_kreceive(rcd, NULL, &npkts);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send buffer available interrupt handler.
- */
-static irqreturn_t qib_7322bufavail(int irq, void *data)
-{
- struct qib_devdata *dd = data;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
-
- /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
- if (dd->flags & QIB_INITTED)
- qib_ib_piobufavail(dd);
- else
- qib_wantpiobuf_7322_intr(dd, 0);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA interrupt handler.
- */
-static irqreturn_t sdma_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA idle interrupt handler.
- */
-static irqreturn_t sdma_idle_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA progress interrupt handler.
- */
-static irqreturn_t sdma_progress_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDmaProgress, 1) :
- INT_MASK_P(SDmaProgress, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA cleanup interrupt handler.
- */
-static irqreturn_t sdma_cleanup_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_PM(SDmaCleanupDone, 1) :
- INT_MASK_PM(SDmaCleanupDone, 0));
- qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
-{
- if (!m->dca)
- return;
- qib_devinfo(dd->pcidev,
- "Disabling notifier on HCA %d irq %d\n",
- dd->unit,
- m->msix.vector);
- irq_set_affinity_notifier(
- m->msix.vector,
- NULL);
- m->notifier = NULL;
-}
-
-static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
-{
- struct qib_irq_notify *n;
-
- if (!m->dca)
- return;
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (n) {
- int ret;
-
- m->notifier = n;
- n->notify.irq = m->msix.vector;
- n->notify.notify = qib_irq_notifier_notify;
- n->notify.release = qib_irq_notifier_release;
- n->arg = m->arg;
- n->rcv = m->rcv;
- qib_devinfo(dd->pcidev,
- "set notifier irq %d rcv %d notify %p\n",
- n->notify.irq, n->rcv, &n->notify);
- ret = irq_set_affinity_notifier(
- n->notify.irq,
- &n->notify);
- if (ret) {
- m->notifier = NULL;
- kfree(n);
- }
- }
-}
-
-#endif
-
-/*
- * Set up our chip-specific interrupt handler.
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- * If we are using MSIx interrupts, we may fall back to
- * INTx later, if the interrupt handler doesn't get called
- * within 1/2 second (see verify_interrupt()).
- */
-static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
-{
- int ret, i, msixnum;
- u64 redirect[6];
- u64 mask;
- const struct cpumask *local_mask;
- int firstcpu, secondcpu = 0, currrcvcpu = 0;
-
- if (!dd->num_pports)
- return;
-
- if (clearpend) {
- /*
- * if not switching interrupt types, be sure interrupts are
- * disabled, and then clear anything pending at this point,
- * because we are starting clean.
- */
- qib_7322_set_intr_state(dd, 0);
-
- /* clear the reset error, init error/hwerror mask */
- qib_7322_init_hwerrors(dd);
-
- /* clear any interrupt bits that might be set */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-
- /* make sure no pending MSIx intr, and clear diag reg */
- qib_write_kreg(dd, kr_intgranted, ~0ULL);
- qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
- }
-
- if (!dd->cspec->num_msix_entries) {
- /* Try to get INTx interrupt */
-try_intx:
- if (!dd->pcidev->irq) {
- qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- goto bail;
- }
- ret = request_irq(dd->pcidev->irq, qib_7322intr,
- IRQF_SHARED, QIB_DRV_NAME, dd);
- if (ret) {
- qib_dev_err(dd,
- "Couldn't setup INTx interrupt (irq=%d): %d\n",
- dd->pcidev->irq, ret);
- goto bail;
- }
- dd->cspec->irq = dd->pcidev->irq;
- dd->cspec->main_int_mask = ~0ULL;
- goto bail;
- }
-
- /* Try to get MSIx interrupts */
- memset(redirect, 0, sizeof(redirect));
- mask = ~0ULL;
- msixnum = 0;
- local_mask = cpumask_of_pcibus(dd->pcidev->bus);
- firstcpu = cpumask_first(local_mask);
- if (firstcpu >= nr_cpu_ids ||
- cpumask_weight(local_mask) == num_online_cpus()) {
- local_mask = topology_core_cpumask(0);
- firstcpu = cpumask_first(local_mask);
- }
- if (firstcpu < nr_cpu_ids) {
- secondcpu = cpumask_next(firstcpu, local_mask);
- if (secondcpu >= nr_cpu_ids)
- secondcpu = firstcpu;
- currrcvcpu = secondcpu;
- }
- for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
- irq_handler_t handler;
- void *arg;
- u64 val;
- int lsb, reg, sh;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int dca = 0;
-#endif
-
- dd->cspec->msix_entries[msixnum].
- name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
- = '\0';
- if (i < ARRAY_SIZE(irq_table)) {
- if (irq_table[i].port) {
- /* skip if for a non-configured port */
- if (irq_table[i].port > dd->num_pports)
- continue;
- arg = dd->pport + irq_table[i].port - 1;
- } else
- arg = dd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca = irq_table[i].dca;
-#endif
- lsb = irq_table[i].lsb;
- handler = irq_table[i].handler;
- snprintf(dd->cspec->msix_entries[msixnum].name,
- sizeof(dd->cspec->msix_entries[msixnum].name)
- - 1,
- QIB_DRV_NAME "%d%s", dd->unit,
- irq_table[i].name);
- } else {
- unsigned ctxt;
-
- ctxt = i - ARRAY_SIZE(irq_table);
- /* per krcvq context receive interrupt */
- arg = dd->rcd[ctxt];
- if (!arg)
- continue;
- if (qib_krcvq01_no_msi && ctxt < 2)
- continue;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca = 1;
-#endif
- lsb = QIB_I_RCVAVAIL_LSB + ctxt;
- handler = qib_7322pintr;
- snprintf(dd->cspec->msix_entries[msixnum].name,
- sizeof(dd->cspec->msix_entries[msixnum].name)
- - 1,
- QIB_DRV_NAME "%d (kctx)", dd->unit);
- }
- ret = request_irq(
- dd->cspec->msix_entries[msixnum].msix.vector,
- handler, 0, dd->cspec->msix_entries[msixnum].name,
- arg);
- if (ret) {
- /*
- * Shouldn't happen since the enable said we could
- * have as many as we are trying to setup here.
- */
- qib_dev_err(dd,
- "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
- msixnum,
- dd->cspec->msix_entries[msixnum].msix.vector,
- ret);
- qib_7322_nomsix(dd);
- goto try_intx;
- }
- dd->cspec->msix_entries[msixnum].arg = arg;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->cspec->msix_entries[msixnum].dca = dca;
- dd->cspec->msix_entries[msixnum].rcv =
- handler == qib_7322pintr;
-#endif
- if (lsb >= 0) {
- reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
- sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
- SYM_LSB(IntRedirect0, vec1);
- mask &= ~(1ULL << lsb);
- redirect[reg] |= ((u64) msixnum) << sh;
- }
- val = qib_read_kreg64(dd, 2 * msixnum + 1 +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- if (firstcpu < nr_cpu_ids &&
- zalloc_cpumask_var(
- &dd->cspec->msix_entries[msixnum].mask,
- GFP_KERNEL)) {
- if (handler == qib_7322pintr) {
- cpumask_set_cpu(currrcvcpu,
- dd->cspec->msix_entries[msixnum].mask);
- currrcvcpu = cpumask_next(currrcvcpu,
- local_mask);
- if (currrcvcpu >= nr_cpu_ids)
- currrcvcpu = secondcpu;
- } else {
- cpumask_set_cpu(firstcpu,
- dd->cspec->msix_entries[msixnum].mask);
- }
- irq_set_affinity_hint(
- dd->cspec->msix_entries[msixnum].msix.vector,
- dd->cspec->msix_entries[msixnum].mask);
- }
- msixnum++;
- }
- /* Initialize the vector mapping */
- for (i = 0; i < ARRAY_SIZE(redirect); i++)
- qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
- dd->cspec->main_int_mask = mask;
- tasklet_init(&dd->error_tasklet, qib_error_tasklet,
- (unsigned long)dd);
-bail:;
-}
-
-/**
- * qib_7322_boardname - fill in the board name and note features
- * @dd: the qlogic_ib device
- *
- * info will be based on the board revision register
- */
-static unsigned qib_7322_boardname(struct qib_devdata *dd)
-{
- /* Will need enumeration of board-types here */
- char *n;
- u32 boardid, namelen;
- unsigned features = DUAL_PORT_CAP;
-
- boardid = SYM_FIELD(dd->revision, Revision, BoardID);
-
- switch (boardid) {
- case 0:
- n = "InfiniPath_QLE7342_Emulation";
- break;
- case 1:
- n = "InfiniPath_QLE7340";
- dd->flags |= QIB_HAS_QSFP;
- features = PORT_SPD_CAP;
- break;
- case 2:
- n = "InfiniPath_QLE7342";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case 3:
- n = "InfiniPath_QMI7342";
- break;
- case 4:
- n = "InfiniPath_Unsupported7342";
- qib_dev_err(dd, "Unsupported version of QMH7342\n");
- features = 0;
- break;
- case BOARD_QMH7342:
- n = "InfiniPath_QMH7342";
- features = 0x24;
- break;
- case BOARD_QME7342:
- n = "InfiniPath_QME7342";
- break;
- case 8:
- n = "InfiniPath_QME7362";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case BOARD_QMH7360:
- n = "Intel IB QDR 1P FLR-QSFP Adptr";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case 15:
- n = "InfiniPath_QLE7342_TEST";
- dd->flags |= QIB_HAS_QSFP;
- break;
- default:
- n = "InfiniPath_QLE73xy_UNKNOWN";
- qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
- break;
- }
- dd->board_atten = 1; /* index into txdds_Xdr */
-
- namelen = strlen(n) + 1;
- dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
- snprintf(dd->boardname, namelen, "%s", n);
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
-
- if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
- qib_devinfo(dd->pcidev,
- "IB%u: Forced to single port mode by module parameter\n",
- dd->unit);
- features &= PORT_SPD_CAP;
- }
-
- return features;
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context.
- */
-static int qib_do_7322_reset(struct qib_devdata *dd)
-{
- u64 val;
- u64 *msix_vecsave;
- int i, msix_entries, ret = 1;
- u16 cmdval;
- u8 int_line, clinesz;
- unsigned long flags;
-
- /* Use dev_err so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- msix_entries = dd->cspec->num_msix_entries;
-
- /* no interrupts till re-initted */
- qib_7322_set_intr_state(dd, 0);
-
- if (msix_entries) {
- qib_7322_nomsix(dd);
- /* can be up to 512 bytes, too big for stack */
- msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
- sizeof(u64), GFP_KERNEL);
- if (!msix_vecsave)
- qib_dev_err(dd, "No mem to save MSIx data\n");
- } else
- msix_vecsave = NULL;
-
- /*
- * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
- * info that is set up by the BIOS, so we have to save and restore
- * it ourselves. There is some risk something could change it,
- * after we save it, but since we have disabled the MSIx, it
- * shouldn't be touched...
- */
- for (i = 0; i < msix_entries; i++) {
- u64 vecaddr, vecdata;
-
- vecaddr = qib_read_kreg64(dd, 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- vecdata = qib_read_kreg64(dd, 1 + 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- if (msix_vecsave) {
- msix_vecsave[2 * i] = vecaddr;
- /* save it without the masked bit set */
- msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
- }
- }
-
- dd->pport->cpspec->ibdeltainprog = 0;
- dd->pport->cpspec->ibsymdelta = 0;
- dd->pport->cpspec->iblnkerrdelta = 0;
- dd->pport->cpspec->ibmalfdelta = 0;
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
- dd->flags |= QIB_DOING_RESET;
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 3000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision)
- break;
- if (i == 5) {
- qib_dev_err(dd,
- "Failed to initialize after reset, unusable\n");
- ret = 0;
- goto bail;
- }
- }
-
- dd->flags |= QIB_PRESENT; /* it's back */
-
- if (msix_entries) {
- /* restore the MSIx vector address and data if saved above */
- for (i = 0; i < msix_entries; i++) {
- dd->cspec->msix_entries[i].msix.entry = i;
- if (!msix_vecsave || !msix_vecsave[2 * i])
- continue;
- qib_write_kreg(dd, 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)),
- msix_vecsave[2 * i]);
- qib_write_kreg(dd, 1 + 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)),
- msix_vecsave[1 + 2 * i]);
- }
- }
-
- /* initialize the remaining registers. */
- for (i = 0; i < dd->num_pports; ++i)
- write_7322_init_portregs(&dd->pport[i]);
- write_7322_initregs(dd);
-
- if (qib_pcie_params(dd, dd->lbus_width,
- &dd->cspec->num_msix_entries,
- dd->cspec->msix_entries))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
-
- qib_setup_7322_interrupt(dd, 1);
-
- for (i = 0; i < dd->num_pports; ++i) {
- struct qib_pportdata *ppd = &dd->pport[i];
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
-bail:
- dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
- kfree(msix_vecsave);
- return ret;
-}
-
-/**
- * qib_7322_put_tid - write a TID to the chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- */
-static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (!(dd->flags & QIB_PRESENT))
- return;
- if (pa != dd->tidinvalid) {
- u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7322_TID_SZ_4K;
- pa = chippa;
- }
- writeq(pa, tidptr);
- mmiowb();
-}
-
-/**
- * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
- * @dd: the qlogic_ib device
- * @ctxt: the ctxt
- *
- * clear all TID entries for a ctxt, expected and eager.
- * Used from qib_close().
- */
-static void qib_7322_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_7322_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_7322_tidtemplate(struct qib_devdata *dd)
-{
- /*
- * For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make it per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (dd->rcvegrbufsize == 2048)
- dd->tidtemplate = IBA7322_TID_SZ_2K;
- else if (dd->rcvegrbufsize == 4096)
- dd->tidtemplate = IBA7322_TID_SZ_4K;
- dd->tidinvalid = 0;
-}
-
-/**
- * qib_init_7322_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-
-static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
- QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
- QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
- if (rcd->dd->cspec->r1)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
- if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
-
- return 0;
-}
-
-static struct qib_message_header *
-qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = qib_hdrget_offset(rhf_addr);
-
- return (struct qib_message_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
-/*
- * Configure number of contexts.
- */
-static void qib_7322_config_ctxts(struct qib_devdata *dd)
-{
- unsigned long flags;
- u32 nchipctxts;
-
- nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
- dd->cspec->numctxts = nchipctxts;
- if (qib_n_krcv_queues > 1 && dd->num_pports) {
- dd->first_user_ctxt = NUM_IB_PORTS +
- (qib_n_krcv_queues - 1) * dd->num_pports;
- if (dd->first_user_ctxt > nchipctxts)
- dd->first_user_ctxt = nchipctxts;
- dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
- } else {
- dd->first_user_ctxt = NUM_IB_PORTS;
- dd->n_krcv_queues = 1;
- }
-
- if (!qib_cfgctxts) {
- int nctxts = dd->first_user_ctxt + num_online_cpus();
-
- if (nctxts <= 6)
- dd->ctxtcnt = 6;
- else if (nctxts <= 10)
- dd->ctxtcnt = 10;
- else if (nctxts <= nchipctxts)
- dd->ctxtcnt = nchipctxts;
- } else if (qib_cfgctxts < dd->num_pports)
- dd->ctxtcnt = dd->num_pports;
- else if (qib_cfgctxts <= nchipctxts)
- dd->ctxtcnt = qib_cfgctxts;
- if (!dd->ctxtcnt) /* none of the above, set to max */
- dd->ctxtcnt = nchipctxts;
-
- /*
- * Chip can be configured for 6, 10, or 18 ctxts, and choice
- * affects number of eager TIDs per ctxt (1K, 2K, 4K).
- * Lock to be paranoid about later motion, etc.
- */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (dd->ctxtcnt > 10)
- dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
- else if (dd->ctxtcnt > 6)
- dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
- /* else configure for default 6 receive ctxts */
-
- /* The XRC opcode is 5. */
- dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
-
- /*
- * RcvCtrl *must* be written here so that the
- * chip understands how to change rcvegrcnt below.
- */
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* kr_rcvegrcnt changes based on the number of contexts enabled */
- dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- if (qib_rcvhdrcnt)
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
- else
- dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
- dd->num_pports > 1 ? 1024U : 2048U);
-}
-
-static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
-
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
-
- case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = ppd->link_width_enabled;
- goto done;
-
- case QIB_IB_CFG_LWID: /* Get currently active Link-width */
- ret = ppd->link_width_active;
- goto done;
-
- case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = ppd->link_speed_enabled;
- goto done;
-
- case QIB_IB_CFG_SPD: /* Get current Link spd */
- ret = ppd->link_speed_active;
- goto done;
-
- case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- break;
-
- case QIB_IB_CFG_LINKLATENCY:
- ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
- SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
- goto done;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- goto done;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 16;
- goto done;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 16;
- goto done;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- OverrunThreshold);
- goto done;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- PhyerrThreshold);
- goto done;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->cpspec->ibcctrl_a &
- SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- goto done;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7322_IBC_HRTBT_LSB;
- maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- /*
- * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
- * Since the clock is always 250MHz, the value is 3, 1 or 0.
- */
- if (ppd->link_speed_active == QIB_IB_QDR)
- ret = 3;
- else if (ppd->link_speed_active == QIB_IB_DDR)
- ret = 1;
- else
- ret = 0;
- goto done;
-
- default:
- ret = -EINVAL;
- goto done;
- }
- ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
-done:
- return ret;
-}
-
-/*
- * Below again cribbed liberally from older version. Do not lean
- * heavily on it.
- */
-#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
-#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
- | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
-
-static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 maskr; /* right-justified mask */
- int lsb, ret = 0;
- u16 lcmd, licmd;
- unsigned long flags;
-
- switch (which) {
- case QIB_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7322_IBC_DLIDLMC_SHIFT;
- maskr = IBA7322_IBC_DLIDLMC_MASK;
- /*
- * For header-checking, the SLID in the packet will
- * be masked with SendIBSLMCMask, and compared
- * with SendIBSLIDAssignMask. Make sure we do not
- * set any bits not covered by the mask, or we get
- * false-positives.
- */
- qib_write_kreg_port(ppd, krp_sendslid,
- val & (val >> 16) & SendIBSLIDAssignMask);
- qib_write_kreg_port(ppd, krp_sendslidmask,
- (val >> 16) & SendIBSLMCMask);
- break;
-
- case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
- ppd->link_width_enabled = val;
- /* convert IB value to chip register value */
- if (val == IB_WIDTH_1X)
- val = 0;
- else if (val == IB_WIDTH_4X)
- val = 1;
- else
- val = 3;
- maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
- lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
- break;
-
- case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * As with width, only write the actual register if the
- * link is currently down, otherwise takes effect on next
- * link change. Since setting is being explicitly requested
- * (via MAD or sysfs), clear autoneg failure status if speed
- * autoneg is enabled.
- */
- ppd->link_speed_enabled = val;
- val <<= IBA7322_IBC_SPEED_LSB;
- maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- if (val & (val - 1)) {
- /* Muliple speeds enabled */
- val |= IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (val & IBA7322_IBC_SPEED_QDR)
- val |= IBA7322_IBC_IBTA_1_2_MASK;
- /* IBTA 1.2 mode + min/max + speed bits are contiguous */
- lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
- break;
-
- case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- OverrunThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
- ppd->cpspec->ibcctrl_a |= (u64) val <<
- SYM_LSB(IBCCtrlA_0, OverrunThreshold);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- }
- goto bail;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- PhyerrThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
- ppd->cpspec->ibcctrl_a |= (u64) val <<
- SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- }
- goto bail;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg_port(ppd, krp_partitionkey, maskr);
- goto bail;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
- else /* SLEEP */
- ppd->cpspec->ibcctrl_a |=
- SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- goto bail;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
- ppd->cpspec->ibcctrl_a |= (u64)val <<
- SYM_LSB(IBCCtrlA_0, MaxPktLen);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- goto bail;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- ppd->cpspec->ibmalfusesnap = 1;
- ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
- crp_errlink);
- if (!ppd->cpspec->ibdeltainprog &&
- qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap =
- read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- if (ppd->cpspec->ibmalfusesnap) {
- ppd->cpspec->ibmalfusesnap = 0;
- ppd->cpspec->ibmalfdelta +=
- read_7322_creg32_port(ppd,
- crp_errlink) -
- ppd->cpspec->ibmalfsnap;
- }
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- ppd->cpspec->chase_end = 0;
- /*
- * stop state chase counter and timer, if running.
- * wait forpending timer, but don't clear .data (ppd)!
- */
- if (ppd->cpspec->chase_timer.expires) {
- del_timer_sync(&ppd->cpspec->chase_timer);
- ppd->cpspec->chase_timer.expires = 0;
- }
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_7322_lstate(ppd, lcmd, licmd);
- goto bail;
-
- case QIB_IB_CFG_OP_VLS:
- if (ppd->vls_operational != val) {
- ppd->vls_operational = val;
- set_vls(ppd);
- }
- goto bail;
-
- case QIB_IB_CFG_VL_HIGH_LIMIT:
- qib_write_kreg_port(ppd, krp_highprio_limit, val);
- goto bail;
-
- case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val > 3) {
- ret = -EINVAL;
- goto bail;
- }
- lsb = IBA7322_IBC_HRTBT_LSB;
- maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
- break;
-
- case QIB_IB_CFG_PORT:
- /* val is the port number of the switch we are connected to. */
- if (ppd->dd->cspec->r1) {
- cancel_delayed_work(&ppd->cpspec->ipg_work);
- ppd->cpspec->ipg_tries = 0;
- }
- goto bail;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
- ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
- qib_write_kreg(dd, kr_scratch, 0);
-bail:
- return ret;
-}
-
-static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
- u64 val, ctrlb;
-
- /* only IBC loopback, may add serdes and xgxs loopbacks later */
- if (!strncmp(what, "ibc", 3)) {
- ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
- Loopback);
- val = 0; /* disable heart beat, so link will come up */
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
- Loopback);
- /* enable heart beat again */
- val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
- << IBA7322_IBC_HRTBT_LSB);
- ppd->cpspec->ibcctrl_b = ctrlb | val;
- qib_write_kreg_port(ppd, krp_ibcctrl_b,
- ppd->cpspec->ibcctrl_b);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
- struct ib_vl_weight_elem *vl)
-{
- unsigned i;
-
- for (i = 0; i < 16; i++, regno++, vl++) {
- u32 val = qib_read_kreg_port(ppd, regno);
-
- vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
- SYM_RMASK(LowPriority0_0, VirtualLane);
- vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
- SYM_RMASK(LowPriority0_0, Weight);
- }
-}
-
-static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
- struct ib_vl_weight_elem *vl)
-{
- unsigned i;
-
- for (i = 0; i < 16; i++, regno++, vl++) {
- u64 val;
-
- val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
- SYM_LSB(LowPriority0_0, VirtualLane)) |
- ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
- SYM_LSB(LowPriority0_0, Weight));
- qib_write_kreg_port(ppd, regno, val);
- }
- if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- }
-}
-
-static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
-{
- switch (which) {
- case QIB_IB_TBL_VL_HIGH_ARB:
- get_vl_weights(ppd, krp_highprio_0, t);
- break;
-
- case QIB_IB_TBL_VL_LOW_ARB:
- get_vl_weights(ppd, krp_lowprio_0, t);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
-{
- switch (which) {
- case QIB_IB_TBL_VL_HIGH_ARB:
- set_vl_weights(ppd, krp_highprio_0, t);
- break;
-
- case QIB_IB_TBL_VL_LOW_ARB:
- set_vl_weights(ppd, krp_lowprio_0, t);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- /*
- * Need to write timeout register before updating rcvhdrhead to ensure
- * that the timer is enabled on reception of a packet.
- */
- if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
- adjust_rcv_timeout(rcd, npkts);
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
-}
-
-static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
- QIB_RCVCTRL_CTXT_DIS | \
- QIB_RCVCTRL_TIDFLOW_ENB | \
- QIB_RCVCTRL_TIDFLOW_DIS | \
- QIB_RCVCTRL_TAILUPD_ENB | \
- QIB_RCVCTRL_TAILUPD_DIS | \
- QIB_RCVCTRL_INTRAVAIL_ENB | \
- QIB_RCVCTRL_INTRAVAIL_DIS | \
- QIB_RCVCTRL_BP_ENB | \
- QIB_RCVCTRL_BP_DIS)
-
-#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
- QIB_RCVCTRL_CTXT_DIS | \
- QIB_RCVCTRL_PKEY_DIS | \
- QIB_RCVCTRL_PKEY_ENB)
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specifc, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
-
- if (op & QIB_RCVCTRL_TIDFLOW_ENB)
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
- if (op & QIB_RCVCTRL_TIDFLOW_DIS)
- dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
- if (ctxt < 0) {
- mask = (1ULL << dd->ctxtcnt) - 1;
- rcd = NULL;
- } else {
- mask = (1ULL << ctxt);
- rcd = dd->rcd[ctxt];
- }
- if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
- ppd->p_rcvctrl |=
- (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
- if (!(dd->flags & QIB_NODMA_RTAIL)) {
- op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
- }
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
- rcd->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
- rcd->rcvhdrq_phys);
- rcd->seq_cnt = 1;
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- ppd->p_rcvctrl &=
- ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
- if (op & QIB_RCVCTRL_BP_ENB)
- dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
- if (op & QIB_RCVCTRL_BP_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
- /*
- * Decide which registers to write depending on the ops enabled.
- * Special case is "flush" (no bits set at all)
- * which needs to write both.
- */
- if (op == 0 || (op & RCVCTRL_COMMON_MODS))
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if (op == 0 || (op & RCVCTRL_PORT_MODS))
- qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
- if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- /* be sure enabling write seen; hd/tl should be 0 */
- (void) qib_read_kreg32(dd, kr_scratch);
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
- dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- unsigned f;
-
- /* Now that the context is disabled, clear these registers. */
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
- for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
- qib_write_ureg(dd, ur_rcvflowtable + f,
- TIDFLOW_ERRBITS, ctxt);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
- i, 0);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
- for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
- qib_write_ureg(dd, ur_rcvflowtable + f,
- TIDFLOW_ERRBITS, i);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function where there are multiple such registers with
- * slightly different layouts.
- * The chip doesn't allow back-to-back sendctrl writes, so write
- * the scratch register after writing sendctrl.
- *
- * Which register is written depends on the operation.
- * Most operate on the common register, while
- * SEND_ENB and SEND_DIS operate on the per-port ones.
- * SEND_ENB is included in common because it can change SPCL_TRIG
- */
-#define SENDCTRL_COMMON_MODS (\
- QIB_SENDCTRL_CLEAR | \
- QIB_SENDCTRL_AVAIL_DIS | \
- QIB_SENDCTRL_AVAIL_ENB | \
- QIB_SENDCTRL_AVAIL_BLIP | \
- QIB_SENDCTRL_DISARM | \
- QIB_SENDCTRL_DISARM_ALL | \
- QIB_SENDCTRL_SEND_ENB)
-
-#define SENDCTRL_PORT_MODS (\
- QIB_SENDCTRL_CLEAR | \
- QIB_SENDCTRL_SEND_ENB | \
- QIB_SENDCTRL_SEND_DIS | \
- QIB_SENDCTRL_FLUSH)
-
-static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the dd ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB) {
- dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
- if (dd->flags & QIB_USE_SPCL_TRIG)
- dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
- }
-
- /* Then the ppd ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_SEND_DIS)
- ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB)
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- /*
- * Disarm any buffers that are not yet launched,
- * disabling updates until done.
- */
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl,
- tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
-
- /*
- * Now drain all the fifos. The Abort bit should never be
- * needed, so for now, at least, we don't use it.
- */
- tmp_ppd_sendctrl |=
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
- SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
- SYM_MASK(SendCtrl_0, TxeBypassIbc);
- qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmSendBuf));
- if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
- (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
-#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
-#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
-
-/**
- * qib_portcntr_7322 - read a per-port chip counter
- * @ppd: the qlogic_ib pport
- * @creg: the counter to read (not a chip offset)
- */
-static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 ret = 0ULL;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u32 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
- [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
- [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
- [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
- [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
- [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
- [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
- [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
- [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
- [QIBPORTCNTR_ERRICRC] = crp_erricrc,
- [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = crp_badformat,
- [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
- [QIBPORTCNTR_ERRLINK] = crp_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
- [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
- [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
- /*
- * the next 3 aren't really counters, but were implemented
- * as counters in older chips, so still get accessed as
- * though they were counters from this code.
- */
- [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
- [QIBPORTCNTR_PSSTART] = krp_psstart,
- [QIBPORTCNTR_PSSTAT] = krp_psstat,
- /* pseudo-counter, summed for all ports */
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg] & _PORT_CNTR_IDXMASK;
-
- /* handle non-counters and special cases first */
- if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts (skip if mini_init) */
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- if (!rcd || rcd->ppd != ppd)
- continue;
- ret += read_7322_creg32(dd, cr_base_egrovfl + i);
- }
- goto done;
- } else if (reg == QIBPORTCNTR_RXDROPPKT) {
- /*
- * Used as part of the synthesis of port_rcv_errors
- * in the verbs code for IBTA counters. Not needed for 7322,
- * because all the errors are already counted by other cntrs.
- */
- goto done;
- } else if (reg == QIBPORTCNTR_PSINTERVAL ||
- reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
- /* were counters in older chips, now per-port kernel regs */
- ret = qib_read_kreg_port(ppd, creg);
- goto done;
- }
-
- /*
- * Only fast increment counters are 64 bits; use 32 bit reads to
- * avoid two independent reads when on Opteron.
- */
- if (xlator[reg] & _PORT_64BIT_FLAG)
- ret = read_7322_creg_port(ppd, creg);
- else
- ret = read_7322_creg32_port(ppd, creg);
- if (creg == crp_ibsymbolerr) {
- if (ppd->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->ibsymsnap;
- ret -= ppd->cpspec->ibsymdelta;
- } else if (creg == crp_iblinkerrrecov) {
- if (ppd->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->iblnkerrsnap;
- ret -= ppd->cpspec->iblnkerrdelta;
- } else if (creg == crp_errlink)
- ret -= ppd->cpspec->ibmalfdelta;
- else if (creg == crp_iblinkdown)
- ret += ppd->cpspec->iblnkdowndelta;
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr7322indices contains the corresponding register indices.
- */
-static const char cntr7322names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "RxTIDFloDrop\n" /* 7322 only */
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n"
- "Ctxt5EgrOvfl\n"
- "Ctxt6EgrOvfl\n"
- "Ctxt7EgrOvfl\n"
- "Ctxt8EgrOvfl\n"
- "Ctxt9EgrOvfl\n"
- "Ctx10EgrOvfl\n"
- "Ctx11EgrOvfl\n"
- "Ctx12EgrOvfl\n"
- "Ctx13EgrOvfl\n"
- "Ctx14EgrOvfl\n"
- "Ctx15EgrOvfl\n"
- "Ctx16EgrOvfl\n"
- "Ctx17EgrOvfl\n"
- ;
-
-static const u32 cntr7322indices[] = {
- cr_lbint | _PORT_64BIT_FLAG,
- cr_lbstall | _PORT_64BIT_FLAG,
- cr_tidfull,
- cr_tidinvalid,
- cr_rxtidflowdrop,
- cr_base_egrovfl + 0,
- cr_base_egrovfl + 1,
- cr_base_egrovfl + 2,
- cr_base_egrovfl + 3,
- cr_base_egrovfl + 4,
- cr_base_egrovfl + 5,
- cr_base_egrovfl + 6,
- cr_base_egrovfl + 7,
- cr_base_egrovfl + 8,
- cr_base_egrovfl + 9,
- cr_base_egrovfl + 10,
- cr_base_egrovfl + 11,
- cr_base_egrovfl + 12,
- cr_base_egrovfl + 13,
- cr_base_egrovfl + 14,
- cr_base_egrovfl + 15,
- cr_base_egrovfl + 16,
- cr_base_egrovfl + 17,
-};
-
-/*
- * same as cntr7322names and cntr7322indices, but for port-specific counters.
- * portcntr7322indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr7322names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "TxDmaDesc\n" /* 7220 and 7322-only */
- "E RxDlidFltr\n" /* 7220 and 7322-only */
- "IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
- "RxVL15Drop\n"
- "RxVlErr\n"
- "XcessBufOvfl\n"
- "RxQPBadCtxt\n" /* 7322-only from here down */
- "TXBadHeader\n"
- ;
-
-static const u32 portcntr7322indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- crp_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- crp_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- crp_txsdmadesc | _PORT_64BIT_FLAG,
- crp_rxdlidfltr,
- crp_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- crp_rcvflowctrlviol,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- crp_txminmaxlenerr,
- crp_txdroppedpkt,
- crp_txlenerr,
- crp_txunderrun,
- crp_txunsupvl,
- QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
- crp_rxqpinvalidctxt,
- crp_txhdrerr,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_7322_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr7322names;
- dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
- * sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
-
- for (i = 0, s = (char *)portcntr7322names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
- for (i = 0; i < dd->num_pports; ++i) {
- dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
- * sizeof(u64), GFP_KERNEL);
- if (!dd->pport[i].cpspec->portcntrs)
- qib_dev_err(dd,
- "Failed allocation for portcounters\n");
- }
-}
-
-static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *) cntr7322names;
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- if (cntr7322indices[i] & _PORT_64BIT_FLAG)
- *cntr++ = read_7322_creg(dd,
- cntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else
- *cntr++ = read_7322_creg32(dd,
- cntr7322indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)portcntr7322names;
- } else {
- struct qib_pportdata *ppd = &dd->pport[port];
- u64 *cntr = ppd->cpspec->portcntrs;
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_7322(ppd,
- portcntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
- *cntr++ = read_7322_creg_port(ppd,
- portcntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else
- *cntr++ = read_7322_creg32_port(ppd,
- portcntr7322indices[i]);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_get_7322_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
- *
- * VESTIGIAL IBA7322 has no "small fast counters", so the only
- * real purpose of this function is to maintain the notion of
- * "active time", which in turn is only logged into the eeprom,
- * which we don;t have, yet, for 7322-based boards.
- *
- * called from add_timer
- */
-static void qib_get_7322_faststats(unsigned long opaque)
-{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
- struct qib_pportdata *ppd;
- unsigned long flags;
- u64 traffic_wds;
- int pidx;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- /*
- * If port isn't enabled or not operational ports, or
- * diags is running (can cause memory diags to fail)
- * skip this port this time.
- */
- if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
- || dd->diag_client)
- continue;
-
- /*
- * Maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
- qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
- spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
- traffic_wds -= ppd->dd->traffic_wds;
- ppd->dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
- if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
- QIB_IB_QDR) &&
- (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE)) &&
- ppd->cpspec->qdr_dfe_time &&
- time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
- ppd->cpspec->qdr_dfe_on = 0;
-
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_INIT_R1 :
- QDR_STATIC_ADAPT_INIT);
- force_h1(ppd);
- }
- }
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/*
- * If we were using MSIx, try to fallback to INTx.
- */
-static int qib_7322_intr_fallback(struct qib_devdata *dd)
-{
- if (!dd->cspec->num_msix_entries)
- return 0; /* already using INTx */
-
- qib_devinfo(dd->pcidev,
- "MSIx interrupt not detected, trying INTx interrupts\n");
- qib_7322_nomsix(dd);
- qib_enable_intx(dd->pcidev);
- qib_setup_7322_interrupt(dd, 0);
- return 1;
-}
-
-/*
- * Reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well, then return to previous state (which may be still in reset)
- * NOTE: some callers of this "know" this writes the current value
- * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
- * check all callers.
- */
-static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
-{
- u64 val;
- struct qib_devdata *dd = ppd->dd;
- const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
- SYM_MASK(IBPCSConfig_0, xcv_treset) |
- SYM_MASK(IBPCSConfig_0, tx_rx_reset);
-
- val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a &
- ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
-
- qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- qib_write_kreg(dd, kr_hwerrclear,
- SYM_MASK(HwErrClear, statusValidNoEopClear));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-}
-
-/*
- * This code for non-IBTA-compliant IB speed negotiation is only known to
- * work for the SDR to DDR transition, and only between an HCA and a switch
- * with recent firmware. It is based on observed heuristics, rather than
- * actual knowledge of the non-compliant speed negotiation.
- * It has a number of hard-coded fields, since the hope is to rewrite this
- * when a spec is available on how the negoation is intended to work.
- */
-static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
- u32 dcnt, u32 *data)
-{
- int i;
- u64 pbc;
- u32 __iomem *piobuf;
- u32 pnum, control, len;
- struct qib_devdata *dd = ppd->dd;
-
- i = 0;
- len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- control = qib_7322_setpbc_control(ppd, len, 0, 15);
- pbc = ((u64) control << 32) | len;
- while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
- if (i++ > 15)
- return;
- udelay(2);
- }
- /* disable header check on this packet, since it can't be valid */
- dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
- writeq(pbc, piobuf);
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, 7);
- qib_pio_copy(piobuf + 9, data, dcnt);
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
- qib_flush_wc();
- qib_sendbuf_done(dd, pnum);
- /* and re-enable hdr check */
- dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
-{
- struct qib_devdata *dd = ppd->dd;
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
-
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
-
- autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
- autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
-}
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
-{
- u64 newctrlb;
-
- newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK);
-
- if (speed & (speed - 1)) /* multiple speeds */
- newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- else
- newctrlb |= speed == QIB_IB_QDR ?
- IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
- ((speed == QIB_IB_DDR ?
- IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
-
- if (newctrlb == ppd->cpspec->ibcctrl_b)
- return;
-
- ppd->cpspec->ibcctrl_b = newctrlb;
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-/*
- * This routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_7322_autoneg(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- qib_autoneg_7322_send(ppd, 0);
- set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
- qib_7322_mini_pcs_reset(ppd);
- /* 2 msec is minimum length of a poll cycle */
- queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
-}
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_7322_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
- struct qib_devdata *dd;
- u64 startms;
- u32 i;
- unsigned long flags;
-
- ppd = container_of(work, struct qib_chippport_specific,
- autoneg_work.work)->ppd;
- dd = ppd->dd;
-
- startms = jiffies_to_msecs(jiffies);
-
- /*
- * Busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
- == IB_7322_LT_STATE_POLLQUIET) {
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
- qib_7322_mini_pcs_reset(ppd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
- qib_7322_mini_pcs_reset(ppd);
-
- set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
-
- /*
- * Wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early.
- */
- wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
- ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
- ppd->cpspec->autoneg_tries = 0;
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- }
-}
-
-/*
- * This routine is used to request IPG set in the QLogic switch.
- * Only called if r1.
- */
-static void try_7322_ipg(struct qib_pportdata *ppd)
-{
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct ib_mad_send_buf *send_buf;
- struct ib_mad_agent *agent;
- struct ib_smp *smp;
- unsigned delay;
- int ret;
-
- agent = ibp->send_agent;
- if (!agent)
- goto retry;
-
- send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
- IB_MGMT_MAD_DATA, GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(send_buf))
- goto retry;
-
- if (!ibp->smi_ah) {
- struct ib_ah *ah;
-
- ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
- if (IS_ERR(ah))
- ret = PTR_ERR(ah);
- else {
- send_buf->ah = ah;
- ibp->smi_ah = to_iah(ah);
- ret = 0;
- }
- } else {
- send_buf->ah = &ibp->smi_ah->ibah;
- ret = 0;
- }
-
- smp = send_buf->mad;
- smp->base_version = IB_MGMT_BASE_VERSION;
- smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
- smp->class_version = 1;
- smp->method = IB_MGMT_METHOD_SEND;
- smp->hop_cnt = 1;
- smp->attr_id = QIB_VENDOR_IPG;
- smp->attr_mod = 0;
-
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (ret)
- ib_free_send_mad(send_buf);
-retry:
- delay = 2 << ppd->cpspec->ipg_tries;
- queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
- msecs_to_jiffies(delay));
-}
-
-/*
- * Timeout handler for setting IPG.
- * Only called if r1.
- */
-static void ipg_7322_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
-
- ppd = container_of(work, struct qib_chippport_specific,
- ipg_work.work)->ppd;
- if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
- && ++ppd->cpspec->ipg_tries <= 10)
- try_7322_ipg(ppd);
-}
-
-static u32 qib_7322_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
-
- switch (state) {
- case IB_7322_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_7322_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_7322_L_STATE_ACTIVE:
- /* fall through */
- case IB_7322_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default: /* fall through */
- case IB_7322_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_7322_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
- return qib_7322_physportstate[state];
-}
-
-static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- unsigned long flags;
- int mult;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- /* Update our picture of width and speed from chip */
- if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
- ppd->link_speed_active = QIB_IB_QDR;
- mult = 4;
- } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
- ppd->link_speed_active = QIB_IB_DDR;
- mult = 2;
- } else {
- ppd->link_speed_active = QIB_IB_SDR;
- mult = 1;
- }
- if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
- ppd->link_width_active = IB_WIDTH_4X;
- mult *= 4;
- } else
- ppd->link_width_active = IB_WIDTH_1X;
- ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
-
- if (!ibup) {
- u64 clr;
-
- /* Link went down. */
- /* do IPG MAD again after linkdown, even if last time failed */
- ppd->cpspec->ipg_tries = 0;
- clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
- (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
- SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
- if (clr)
- qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
- if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)))
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- struct qib_qsfp_data *qd =
- &ppd->cpspec->qsfp_data;
- /* unlock the Tx settings, speed may change */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
- qib_cancel_sends(ppd);
- /* on link down, ensure sane pcs state */
- qib_7322_mini_pcs_reset(ppd);
- /* schedule the qsfp refresh which should turn the link
- off */
- if (ppd->dd->flags & QIB_HAS_QSFP) {
- qd->t_insert = jiffies;
- queue_work(ib_wq, &qd->work);
- }
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (__qib_sdma_running(ppd))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e70_go_idle);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- clr = read_7322_creg32_port(ppd, crp_iblinkdown);
- if (clr == ppd->cpspec->iblnkdownsnap)
- ppd->cpspec->iblnkdowndelta++;
- } else {
- if (qib_compat_ddr_negotiate &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)) &&
- ppd->link_speed_active == QIB_IB_SDR &&
- (ppd->link_speed_enabled & QIB_IB_DDR)
- && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
- /* we are SDR, and auto-negotiation enabled */
- ++ppd->cpspec->autoneg_tries;
- if (!ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymdelta +=
- read_7322_creg32_port(ppd,
- crp_ibsymbolerr) -
- ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta +=
- read_7322_creg32_port(ppd,
- crp_iblinkerrrecov) -
- ppd->cpspec->iblnkerrsnap;
- }
- try_7322_autoneg(ppd);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- ppd->link_speed_active == QIB_IB_SDR) {
- qib_autoneg_7322_send(ppd, 1);
- set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
- qib_7322_mini_pcs_reset(ppd);
- udelay(2);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- (ppd->link_speed_active & QIB_IB_DDR)) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
- QIBL_IB_AUTONEG_FAILED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ppd->cpspec->autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- wake_up(&ppd->cpspec->autoneg_wait);
- symadj = 1;
- } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
- /*
- * Clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to a
- * different device).
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
- symadj = 1;
- }
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- symadj = 1;
- if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
- try_7322_ipg(ppd);
- if (!ppd->cpspec->recovery_init)
- setup_7322_link_recovery(ppd, 0);
- ppd->cpspec->qdr_dfe_time = jiffies +
- msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
- }
- ppd->cpspec->ibmalfusesnap = 0;
- ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
- crp_errlink);
- }
- if (symadj) {
- ppd->cpspec->iblnkdownsnap =
- read_7322_creg32_port(ppd, crp_iblinkdown);
- if (ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 0;
- ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
- crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
- crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
- }
- } else if (!ibup && qib_compat_ddr_negotiate &&
- !ppd->cpspec->ibdeltainprog &&
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
-
- if (!ret)
- qib_setup_7322_setextled(ppd, ibup);
- return ret;
-}
-
-/*
- * Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/* Enable writes to config EEPROM, if possible. Returns previous state */
-static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- int prev_wen;
- u32 mask;
-
- mask = 1 << QIB_EEPROM_WEN_NUM;
- prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
- gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
-
- return prev_wen & 1;
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_7322_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->palign = qib_read_kreg32(dd, kr_pagealign);
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport[0].ibmtu = (u32)mtu;
- dd->pport[1].ibmtu = (u32)mtu;
-
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
- dd->pio4kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * get_7322_chip_params(), so split out as separate function
- */
-static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
-
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
-
- dd->cspec->cregbase = (u64 __iomem *)(cregbase +
- (char __iomem *)dd->kregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-
- /* port registers are defined as relative to base of chip */
- dd->pport[0].cpspec->kpregbase =
- (u64 __iomem *)((char __iomem *)dd->kregbase);
- dd->pport[1].cpspec->kpregbase =
- (u64 __iomem *)(dd->palign +
- (char __iomem *)dd->kregbase);
- dd->pport[0].cpspec->cpregbase =
- (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
- kr_counterregbase) + (char __iomem *)dd->kregbase);
- dd->pport[1].cpspec->cpregbase =
- (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
- kr_counterregbase) + (char __iomem *)dd->kregbase);
-}
-
-/*
- * This is a fairly special-purpose observer, so we only support
- * the port-specific parts of SendCtrl
- */
-
-#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
- SYM_MASK(SendCtrl_0, SDmaEnable) | \
- SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
- SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
- SYM_MASK(SendCtrl_0, SDmaHalt) | \
- SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
- SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
-
-static int sendctrl_hook(struct qib_devdata *dd,
- const struct diag_observer *op, u32 offs,
- u64 *data, u64 mask, int only_32)
-{
- unsigned long flags;
- unsigned idx;
- unsigned pidx;
- struct qib_pportdata *ppd = NULL;
- u64 local_data, all_bits;
-
- /*
- * The fixed correspondence between Physical ports and pports is
- * severed. We need to hunt for the ppd that corresponds
- * to the offset we got. And we have to do that without admitting
- * we know the stride, apparently.
- */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- u64 __iomem *psptr;
- u32 psoffs;
-
- ppd = dd->pport + pidx;
- if (!ppd->cpspec->kpregbase)
- continue;
-
- psptr = ppd->cpspec->kpregbase + krp_sendctrl;
- psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
- if (psoffs == offs)
- break;
- }
-
- /* If pport is not being managed by driver, just avoid shadows. */
- if (pidx >= dd->num_pports)
- ppd = NULL;
-
- /* In any case, "idx" is flat index in kreg space */
- idx = offs / sizeof(u64);
-
- all_bits = ~0ULL;
- if (only_32)
- all_bits >>= 32;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (!ppd || (mask & all_bits) != all_bits) {
- /*
- * At least some mask bits are zero, so we need
- * to read. The judgement call is whether from
- * reg or shadow. First-cut: read reg, and complain
- * if any bits which should be shadowed are different
- * from their shadowed value.
- */
- if (only_32)
- local_data = (u64)qib_read_kreg32(dd, idx);
- else
- local_data = qib_read_kreg64(dd, idx);
- *data = (local_data & ~mask) | (*data & mask);
- }
- if (mask) {
- /*
- * At least some mask bits are one, so we need
- * to write, but only shadow some bits.
- */
- u64 sval, tval; /* Shadowed, transient */
-
- /*
- * New shadow val is bits we don't want to touch,
- * ORed with bits we do, that are intended for shadow.
- */
- if (ppd) {
- sval = ppd->p_sendctrl & ~mask;
- sval |= *data & SENDCTRL_SHADOWED & mask;
- ppd->p_sendctrl = sval;
- } else
- sval = *data & SENDCTRL_SHADOWED & mask;
- tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
- qib_write_kreg(dd, idx, tval);
- qib_write_kreg(dd, kr_scratch, 0Ull);
- }
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- return only_32 ? 4 : 8;
-}
-
-static const struct diag_observer sendctrl_0_observer = {
- sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
- KREG_IDX(SendCtrl_0) * sizeof(u64)
-};
-
-static const struct diag_observer sendctrl_1_observer = {
- sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
- KREG_IDX(SendCtrl_1) * sizeof(u64)
-};
-
-static ushort sdma_fetch_prio = 8;
-module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
-MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
-
-/* Besides logging QSFP events, we set appropriate TxDDS values */
-static void init_txdds_table(struct qib_pportdata *ppd, int override);
-
-static void qsfp_7322_event(struct work_struct *work)
-{
- struct qib_qsfp_data *qd;
- struct qib_pportdata *ppd;
- unsigned long pwrup;
- unsigned long flags;
- int ret;
- u32 le2;
-
- qd = container_of(work, struct qib_qsfp_data, work);
- ppd = qd->ppd;
- pwrup = qd->t_insert +
- msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
-
- /* Delay for 20 msecs to allow ModPrs resistor to setup */
- mdelay(QSFP_MODPRS_LAG_MSEC);
-
- if (!qib_qsfp_mod_present(ppd)) {
- ppd->cpspec->qsfp_data.modpresent = 0;
- /* Set the physical link to disabled */
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else {
- /*
- * Some QSFP's not only do not respond until the full power-up
- * time, but may behave badly if we try. So hold off responding
- * to insertion.
- */
- while (1) {
- if (time_is_before_jiffies(pwrup))
- break;
- msleep(20);
- }
-
- ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
-
- /*
- * Need to change LE2 back to defaults if we couldn't
- * read the cable type (to handle cable swaps), so do this
- * even on failure to read cable information. We don't
- * get here for QME, so IS_QME check not needed here.
- */
- if (!ret && !ppd->dd->cspec->r1) {
- if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
- le2 = LE2_QME;
- else if (qd->cache.atten[1] >= qib_long_atten &&
- QSFP_IS_CU(qd->cache.tech))
- le2 = LE2_5m;
- else
- le2 = LE2_DEFAULT;
- } else
- le2 = LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
- /*
- * We always change parameteters, since we can choose
- * values for cables without eeproms, and the cable may have
- * changed from a cable with full or partial eeprom content
- * to one with partial or no content.
- */
- init_txdds_table(ppd, 0);
- /* The physical link is being re-enabled only when the
- * previous state was DISABLED and the VALID bit is not
- * set. This should only happen when the cable has been
- * physically pulled. */
- if (!ppd->cpspec->qsfp_data.modpresent &&
- (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
- ppd->cpspec->qsfp_data.modpresent = 1;
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
- }
-}
-
-/*
- * There is little we can do but complain to the user if QSFP
- * initialization fails.
- */
-static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
-{
- unsigned long flags;
- struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
- struct qib_devdata *dd = ppd->dd;
- u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
-
- mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
- qd->ppd = ppd;
- qib_qsfp_init(qd, qsfp_7322_event);
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
- dd->cspec->gpio_mask |= mod_prs_bit;
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-}
-
-/*
- * called at device initialization time, and also if the txselect
- * module parameter is changed. This is used for cables that don't
- * have valid QSFP EEPROMs (not present, or attenuation is zero).
- * We initialize to the default, then if there is a specific
- * unit,port match, we use that (and set it immediately, for the
- * current speed, if the link is at INIT or better).
- * String format is "default# unit#,port#=# ... u,p=#", separators must
- * be a SPACE character. A newline terminates. The u,p=# tuples may
- * optionally have "u,p=#,#", where the final # is the H1 value
- * The last specific match is used (actually, all are used, but last
- * one is the one that winds up set); if none at all, fall back on default.
- */
-static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
-{
- char *nxt, *str;
- u32 pidx, unit, port, deflt, h1;
- unsigned long val;
- int any = 0, seth1;
- int txdds_size;
-
- str = txselect_list;
-
- /* default number is validated in setup_txselect() */
- deflt = simple_strtoul(str, &nxt, 0);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->pport[pidx].cpspec->no_eep = deflt;
-
- txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
- if (IS_QME(dd) || IS_QMH(dd))
- txdds_size += TXDDS_MFG_SZ;
-
- while (*nxt && nxt[1]) {
- str = ++nxt;
- unit = simple_strtoul(str, &nxt, 0);
- if (nxt == str || !*nxt || *nxt != ',') {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- str = ++nxt;
- port = simple_strtoul(str, &nxt, 0);
- if (nxt == str || *nxt != '=') {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- str = ++nxt;
- val = simple_strtoul(str, &nxt, 0);
- if (nxt == str) {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- if (val >= txdds_size)
- continue;
- seth1 = 0;
- h1 = 0; /* gcc thinks it might be used uninitted */
- if (*nxt == ',' && nxt[1]) {
- str = ++nxt;
- h1 = (u32)simple_strtoul(str, &nxt, 0);
- if (nxt == str)
- while (*nxt && *nxt++ != ' ') /* skip */
- ;
- else
- seth1 = 1;
- }
- for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
- ++pidx) {
- struct qib_pportdata *ppd = &dd->pport[pidx];
-
- if (ppd->port != port || !ppd->link_speed_supported)
- continue;
- ppd->cpspec->no_eep = val;
- if (seth1)
- ppd->cpspec->h1_val = h1;
- /* now change the IBC and serdes, overriding generic */
- init_txdds_table(ppd, 1);
- /* Re-enable the physical state machine on mezz boards
- * now that the correct settings have been set.
- * QSFP boards are handles by the QSFP event handler */
- if (IS_QMH(dd) || IS_QME(dd))
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
- any++;
- }
- if (*nxt == '\n')
- break; /* done */
- }
- if (change && !any) {
- /* no specific setting, use the default.
- * Change the IBC and serdes, but since it's
- * general, don't override specific settings.
- */
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- init_txdds_table(&dd->pport[pidx], 0);
- }
-}
-
-/* handle the txselect parameter changing */
-static int setup_txselect(const char *str, struct kernel_param *kp)
-{
- struct qib_devdata *dd;
- unsigned long val;
- char *n;
-
- if (strlen(str) >= MAX_ATTEN_LEN) {
- pr_info("txselect_values string too long\n");
- return -ENOSPC;
- }
- val = simple_strtoul(str, &n, 0);
- if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
- TXDDS_MFG_SZ)) {
- pr_info("txselect_values must start with a number < %d\n",
- TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
- return -EINVAL;
- }
- strcpy(txselect_list, str);
-
- list_for_each_entry(dd, &qib_dev_list, list)
- if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
- set_no_qsfp_atten(dd, 1);
- return 0;
-}
-
-/*
- * Write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_7322_initreg(struct qib_devdata *dd)
-{
- int ret = 0, n;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
-
- n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
- /* driver sends get pkey, lid, etc. checking also, to catch bugs */
- qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
-
- qib_register_observer(dd, &sendctrl_0_observer);
- qib_register_observer(dd, &sendctrl_1_observer);
-
- dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
- qib_write_kreg(dd, kr_control, dd->control);
- /*
- * Set SendDmaFetchPriority and init Tx params, including
- * QSFP handler on boards that have QSFP.
- * First set our default attenuation entry for cables that
- * don't have valid attenuation.
- */
- set_no_qsfp_atten(dd, 0);
- for (n = 0; n < dd->num_pports; ++n) {
- struct qib_pportdata *ppd = dd->pport + n;
-
- qib_write_kreg_port(ppd, krp_senddmaprioritythld,
- sdma_fetch_prio & 0xf);
- /* Initialize qsfp if present on board. */
- if (dd->flags & QIB_HAS_QSFP)
- qib_init_7322_qsfp(ppd);
- }
- dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
- qib_write_kreg(dd, kr_control, dd->control);
-
- return ret;
-}
-
-/* per IB port errors. */
-#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
- MASK_ACROSS(8, 15))
-#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
-#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
- MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
- MASK_ACROSS(0, 11))
-
-/*
- * Write the initialization per-port registers that need to be done at
- * driver load and after reset completes (i.e., that aren't done as part
- * of other init procedures called from qib_init.c).
- * Some of these should be redundant on reset, but play safe.
- */
-static void write_7322_init_portregs(struct qib_pportdata *ppd)
-{
- u64 val;
- int i;
-
- if (!ppd->link_speed_supported) {
- /* no buffer credits for this port */
- for (i = 1; i < 8; i++)
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- return;
- }
-
- /*
- * Set the number of supported virtual lanes in IBC,
- * for flow control packet handling on unsupported VLs
- */
- val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
- val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
- val |= (u64)(ppd->vls_supported - 1) <<
- SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
-
- qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
-
- /* enable tx header checking */
- qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
- IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
- IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
-
- qib_write_kreg_port(ppd, krp_ncmodectrl,
- SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
-
- /*
- * Unconditionally clear the bufmask bits. If SDMA is
- * enabled, we'll set them appropriately later.
- */
- qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
- qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
- qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
- if (ppd->dd->cspec->r1)
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
-}
-
-/*
- * Write the initialization per-device registers that need to be done at
- * driver load and after reset completes (i.e., that aren't done as part
- * of other init procedures called from qib_init.c). Also write per-port
- * registers that are affected by overall device config, such as QP mapping
- * Some of these should be redundant on reset, but play safe.
- */
-static void write_7322_initregs(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int i, pidx;
- u64 val;
-
- /* Set Multicast QPs received by port 2 to map to context one. */
- qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- unsigned n, regno;
- unsigned long flags;
-
- if (dd->n_krcv_queues < 2 ||
- !dd->pport[pidx].link_speed_supported)
- continue;
-
- ppd = &dd->pport[pidx];
-
- /* be paranoid against later code motion, etc. */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* Initialize QP to context mapping */
- regno = krp_rcvqpmaptable;
- val = 0;
- if (dd->num_pports > 1)
- n = dd->first_user_ctxt / dd->num_pports;
- else
- n = dd->first_user_ctxt - 1;
- for (i = 0; i < 32; ) {
- unsigned ctxt;
-
- if (dd->num_pports > 1)
- ctxt = (i % n) * dd->num_pports + pidx;
- else if (i % n)
- ctxt = (i % n) + 1;
- else
- ctxt = ppd->hw_pidx;
- val |= ctxt << (5 * (i % 6));
- i++;
- if (i % 6 == 0) {
- qib_write_kreg_port(ppd, regno, val);
- val = 0;
- regno++;
- }
- }
- qib_write_kreg_port(ppd, regno, val);
- }
-
- /*
- * Setup up interrupt mitigation for kernel contexts, but
- * not user contexts (user contexts use interrupts when
- * stalled waiting for any packet, so want those interrupts
- * right away).
- */
- for (i = 0; i < dd->first_user_ctxt; i++) {
- dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
- qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
- }
-
- /*
- * Initialize as (disabled) rcvflow tables. Application code
- * will setup each flow as it uses the flow.
- * Doesn't clear any of the error bits that might be set.
- */
- val = TIDFLOW_ERRBITS; /* these are W1C */
- for (i = 0; i < dd->cfgctxts; i++) {
- int flow;
-
- for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
- qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
- }
-
- /*
- * dual cards init to dual port recovery, single port cards to
- * the one port. Dual port cards may later adjust to 1 port,
- * and then back to dual port if both ports are connected
- * */
- if (dd->num_pports)
- setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
-}
-
-static int qib_init_7322_variables(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- unsigned features, pidx, sbufcnt;
- int ret, mtu;
- u32 sbufs, updthresh;
- resource_size_t vl15off;
-
- /* pport structs are contiguous, allocated after devdata */
- ppd = (struct qib_pportdata *)(dd + 1);
- dd->pport = ppd;
- ppd[0].dd = dd;
- ppd[1].dd = dd;
-
- dd->cspec = (struct qib_chip_specific *)(ppd + 2);
-
- ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
- ppd[1].cpspec = &ppd[0].cpspec[1];
- ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
- ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
-
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
- dd->cspec->r1 = dd->minrev == 1;
-
- get_7322_chip_params(dd);
- features = qib_7322_boardname(dd);
-
- /* now that piobcnt2k and 4k set, we can allocate these */
- sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
- NUM_VL15_BUFS + BITS_PER_LONG - 1;
- sbufcnt /= BITS_PER_LONG;
- dd->cspec->sendchkenable = kmalloc(sbufcnt *
- sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
- dd->cspec->sendgrhchk = kmalloc(sbufcnt *
- sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
- dd->cspec->sendibchk = kmalloc(sbufcnt *
- sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
- if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
- !dd->cspec->sendibchk) {
- qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
- ret = -ENOMEM;
- goto bail;
- }
-
- ppd = dd->pport;
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
-
- dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
- QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
- QIB_HAS_THRESH_UPDATE |
- (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
- dd->flags |= qib_special_trigger ?
- QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
-
- /*
- * Setup initial values. These may change when PAT is enabled, but
- * we need these to do initial chip register accesses.
- */
- qib_7322_set_baseaddrs(dd);
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
-
- dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
- /* all hwerrors become interrupts, unless special purposed */
- dd->cspec->hwerrmask = ~0ULL;
- /* link_recovery setup causes these errors, so ignore them,
- * other than clearing them when they occur */
- dd->cspec->hwerrmask &=
- ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
- SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
- HWE_MASK(LATriggered));
-
- for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
- struct qib_chippport_specific *cp = ppd->cpspec;
-
- ppd->link_speed_supported = features & PORT_SPD_CAP;
- features >>= PORT_SPD_CAP_SHIFT;
- if (!ppd->link_speed_supported) {
- /* single port mode (7340, or configured) */
- dd->skip_kctxt_mask |= 1 << pidx;
- if (pidx == 0) {
- /* Make sure port is disabled. */
- qib_write_kreg_port(ppd, krp_rcvctrl, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
- ppd[0] = ppd[1];
- dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
- IBSerdesPClkNotDetectMask_0)
- | SYM_MASK(HwErrMask,
- SDmaMemReadErrMask_0));
- dd->cspec->int_enable_mask &= ~(
- SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
- SYM_MASK(IntMask, SDmaIdleIntMask_0) |
- SYM_MASK(IntMask, SDmaProgressIntMask_0) |
- SYM_MASK(IntMask, SDmaIntMask_0) |
- SYM_MASK(IntMask, ErrIntMask_0) |
- SYM_MASK(IntMask, SendDoneIntMask_0));
- } else {
- /* Make sure port is disabled. */
- qib_write_kreg_port(ppd, krp_rcvctrl, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
- dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
- IBSerdesPClkNotDetectMask_1)
- | SYM_MASK(HwErrMask,
- SDmaMemReadErrMask_1));
- dd->cspec->int_enable_mask &= ~(
- SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
- SYM_MASK(IntMask, SDmaIdleIntMask_1) |
- SYM_MASK(IntMask, SDmaProgressIntMask_1) |
- SYM_MASK(IntMask, SDmaIntMask_1) |
- SYM_MASK(IntMask, ErrIntMask_1) |
- SYM_MASK(IntMask, SendDoneIntMask_1));
- }
- continue;
- }
-
- dd->num_pports++;
- ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
- if (ret) {
- dd->num_pports--;
- goto bail;
- }
-
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_width_enabled = IB_WIDTH_4X;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /*
- * Set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- ppd->link_width_active = IB_WIDTH_4X;
- ppd->link_speed_active = QIB_IB_SDR;
- ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
- switch (qib_num_cfg_vls) {
- case 1:
- ppd->vls_supported = IB_VL_VL0;
- break;
- case 2:
- ppd->vls_supported = IB_VL_VL0_1;
- break;
- default:
- qib_devinfo(dd->pcidev,
- "Invalid num_vls %u, using 4 VLs\n",
- qib_num_cfg_vls);
- qib_num_cfg_vls = 4;
- /* fall through */
- case 4:
- ppd->vls_supported = IB_VL_VL0_3;
- break;
- case 8:
- if (mtu <= 2048)
- ppd->vls_supported = IB_VL_VL0_7;
- else {
- qib_devinfo(dd->pcidev,
- "Invalid num_vls %u for MTU %d , using 4 VLs\n",
- qib_num_cfg_vls, mtu);
- ppd->vls_supported = IB_VL_VL0_3;
- qib_num_cfg_vls = 4;
- }
- break;
- }
- ppd->vls_operational = ppd->vls_supported;
-
- init_waitqueue_head(&cp->autoneg_wait);
- INIT_DELAYED_WORK(&cp->autoneg_work,
- autoneg_7322_work);
- if (ppd->dd->cspec->r1)
- INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
-
- /*
- * For Mez and similar cards, no qsfp info, so do
- * the "cable info" setup here. Can be overridden
- * in adapter-specific routines.
- */
- if (!(dd->flags & QIB_HAS_QSFP)) {
- if (!IS_QMH(dd) && !IS_QME(dd))
- qib_devinfo(dd->pcidev,
- "IB%u:%u: Unknown mezzanine card type\n",
- dd->unit, ppd->port);
- cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
- /*
- * Choose center value as default tx serdes setting
- * until changed through module parameter.
- */
- ppd->cpspec->no_eep = IS_QMH(dd) ?
- TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
- } else
- cp->h1_val = H1_FORCE_VAL;
-
- /* Avoid writes to chip for mini_init */
- if (!qib_mini_init)
- write_7322_init_portregs(ppd);
-
- init_timer(&cp->chase_timer);
- cp->chase_timer.function = reenable_chase;
- cp->chase_timer.data = (unsigned long)ppd;
-
- ppd++;
- }
-
- dd->rcvhdrentsize = qib_rcvhdrentsize ?
- qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = qib_rcvhdrsize ?
- qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- /* we always allocate at least 2048 bytes for eager buffers */
- dd->rcvegrbufsize = max(mtu, 2048);
- BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_7322_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset.
- */
- dd->rhdrhead_intr_off =
- (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
-
- /* setup the stats timer; the add_timer is done at end of init */
- init_timer(&dd->stats_timer);
- dd->stats_timer.function = qib_get_7322_faststats;
- dd->stats_timer.data = (unsigned long) dd;
-
- dd->ureg_align = 0x10000; /* 64KB alignment */
-
- dd->piosize2kmax_dwords = dd->piosize2k >> 2;
-
- qib_7322_config_ctxts(dd);
- qib_set_ctxtcnt(dd);
-
- /*
- * We do not set WC on the VL15 buffers to avoid
- * a rare problem with unaligned writes from
- * interrupt-flushed store buffers, so we need
- * to map those separately here. We can't solve
- * this for the rarely used mtrr case.
- */
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
-
- /* vl15 buffers start just after the 4k buffers */
- vl15off = dd->physaddr + (dd->piobufbase >> 32) +
- dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap_nocache(vl15off,
- NUM_VL15_BUFS * dd->align4k);
- if (!dd->piovl15base) {
- ret = -ENOMEM;
- goto bail;
- }
-
- qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
- if (!dd->num_pports) {
- qib_dev_err(dd, "No ports enabled, giving up initialization\n");
- goto bail; /* no error, so can still figure out why err */
- }
-
- write_7322_initregs(dd);
- ret = qib_create_ctxts(dd);
- init_7322_cntrnames(dd);
-
- updthresh = 8U; /* update threshold */
-
- /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
- * reserve the update threshold amount for other kernel use, such
- * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
- * unless we aren't enabling SDMA, in which case we want to use
- * all the 4k bufs for the kernel.
- * if this was less than the update threshold, we could wait
- * a long time for an update. Coded this way because we
- * sometimes change the update threshold for various reasons,
- * and we want this to remain robust.
- */
- if (dd->flags & QIB_HAS_SEND_DMA) {
- dd->cspec->sdmabufcnt = dd->piobcnt4k;
- sbufs = updthresh > 3 ? updthresh : 3;
- } else {
- dd->cspec->sdmabufcnt = 0;
- sbufs = dd->piobcnt4k;
- }
- dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
- dd->cspec->sdmabufcnt;
- dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
- dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
- dd->last_pio = dd->cspec->lastbuf_for_pio;
- dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
- dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
-
- /*
- * If we have 16 user contexts, we will have 7 sbufs
- * per context, so reduce the update threshold to match. We
- * want to update before we actually run out, at low pbufs/ctxt
- * so give ourselves some margin.
- */
- if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
- updthresh = dd->pbufsctxt - 2;
- dd->cspec->updthresh_dflt = updthresh;
- dd->cspec->updthresh = updthresh;
-
- /* before full enable, no interrupts, no locking needed */
- dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld)) |
- SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
-
- dd->psxmitwait_supported = 1;
- dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
-bail:
- if (!dd->ctxtcnt)
- dd->ctxtcnt = 1; /* for other initialization code */
-
- return ret;
-}
-
-static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
-
- /* last is same for 2k and 4k, because we use 4k if all 2k busy */
- if (pbc & PBC_7322_VL15_SEND) {
- first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
- last = first;
- } else {
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- last = dd->cspec->lastbuf_for_pio;
- }
- return qib_getsendbuf_range(dd, pbufnum, first, last);
-}
-
-static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- qib_write_kreg_port(ppd, krp_psinterval, intv);
- qib_write_kreg_port(ppd, krp_psstart, start);
-}
-
-/*
- * Must be called with sdma_lock held, or before init finished.
- */
-static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
- qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
-}
-
-/*
- * sdma_lock should be acquired before calling this routine
- */
-static void dump_sdma_7322_state(struct qib_pportdata *ppd)
-{
- u64 reg, reg1, reg2;
-
- reg = qib_read_kreg_port(ppd, krp_senddmastatus);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmastatus: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_sendctrl);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sendctrl: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmabase);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmabase: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
-
- /* get bufuse bits, clear them, and print them again if non-zero */
- reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
- /* 0 and 1 should always be zero, so print as short form */
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
- reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
- /* 0 and 1 should always be zero, so print as short form */
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
-
- reg = qib_read_kreg_port(ppd, krp_senddmatail);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmatail: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmahead);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmahead: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmaheadaddr: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmalengen);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmalengen: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmadesccnt: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmaidlecnt: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmapriorityhld: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmareloadcnt: 0x%016llx\n", reg);
-
- dump_sdma_state(ppd);
-}
-
-static struct sdma_set_state_action sdma_7322_action_table[] = {
- [qib_sdma_state_s00_hw_down] = {
- .go_s99_running_tofalse = 1,
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_drain = 0,
- },
- [qib_sdma_state_s10_hw_start_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s20_idle] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s30_sw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s40_hw_clean_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s50_hw_halt_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 1,
- },
- [qib_sdma_state_s99_running] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 0,
- .op_drain = 0,
- .go_s99_running_totrue = 1,
- },
-};
-
-static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
-{
- ppd->sdma_state.set_state_action = sdma_7322_action_table;
-}
-
-static int init_sdma_7322_regs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned lastbuf, erstbuf;
- u64 senddmabufmask[3] = { 0 };
- int n, ret = 0;
-
- qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
- qib_sdma_7322_setlengen(ppd);
- qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
- qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
- qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
- qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
-
- if (dd->num_pports)
- n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
- else
- n = dd->cspec->sdmabufcnt; /* failsafe for init */
- erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
- ((dd->num_pports == 1 || ppd->port == 2) ? n :
- dd->cspec->sdmabufcnt);
- lastbuf = erstbuf + n;
-
- ppd->sdma_state.first_sendbuf = erstbuf;
- ppd->sdma_state.last_sendbuf = lastbuf;
- for (; erstbuf < lastbuf; ++erstbuf) {
- unsigned word = erstbuf / BITS_PER_LONG;
- unsigned bit = erstbuf & (BITS_PER_LONG - 1);
-
- BUG_ON(word >= 3);
- senddmabufmask[word] |= 1ULL << bit;
- }
- qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
- qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
- qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
- return ret;
-}
-
-/* sdma_lock must be held */
-static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- int sane;
- int use_dmahead;
- u16 swhead;
- u16 swtail;
- u16 cnt;
- u16 hwhead;
-
- use_dmahead = __qib_sdma_running(ppd) &&
- (dd->flags & QIB_HAS_SDMA_TIMEOUT);
-retry:
- hwhead = use_dmahead ?
- (u16) le64_to_cpu(*ppd->sdma_head_dma) :
- (u16) qib_read_kreg_port(ppd, krp_senddmahead);
-
- swhead = ppd->sdma_descq_head;
- swtail = ppd->sdma_descq_tail;
- cnt = ppd->sdma_descq_cnt;
-
- if (swhead < swtail)
- /* not wrapped */
- sane = (hwhead >= swhead) & (hwhead <= swtail);
- else if (swhead > swtail)
- /* wrapped around */
- sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
- (hwhead <= swtail);
- else
- /* empty */
- sane = (hwhead == swhead);
-
- if (unlikely(!sane)) {
- if (use_dmahead) {
- /* try one more time, directly from the register */
- use_dmahead = 0;
- goto retry;
- }
- /* proceed as if no progress */
- hwhead = swhead;
- }
-
- return hwhead;
-}
-
-static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
-{
- u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
-
- return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
-}
-
-/*
- * Compute the amount of delay before sending the next packet if the
- * port's send rate differs from the static rate set for the QP.
- * The delay affects the next packet and the amount of the delay is
- * based on the length of the this packet.
- */
-static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- u8 snd_mult = ppd->delay_mult;
- u8 rcv_mult = ib_rate_to_delay[srate];
- u32 ret;
-
- ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
-
- /* Indicate VL15, else set the VL in the control word */
- if (vl == 15)
- ret |= PBC_7322_VL15_SEND_CTRL;
- else
- ret |= vl << PBC_VL_NUM_LSB;
- ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
-
- return ret;
-}
-
-/*
- * Enable the per-port VL15 send buffers for use.
- * They follow the rest of the buffers, without a config parameter.
- * This was in initregs, but that is done before the shadow
- * is set up, and this has to be done after the shadow is
- * set up.
- */
-static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
-{
- unsigned vl15bufs;
-
- vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
- qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
- TXCHK_CHG_TYPE_KERN, NULL);
-}
-
-static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
-{
- if (rcd->ctxt < NUM_IB_PORTS) {
- if (rcd->dd->num_pports > 1) {
- rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
- rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
- } else {
- rcd->rcvegrcnt = KCTXT0_EGRCNT;
- rcd->rcvegr_tid_base = 0;
- }
- } else {
- rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
- rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
- (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
- }
-}
-
-#define QTXSLEEPS 5000
-static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 which, struct qib_ctxtdata *rcd)
-{
- int i;
- const int last = start + len - 1;
- const int lastr = last / BITS_PER_LONG;
- u32 sleeps = 0;
- int wait = rcd != NULL;
- unsigned long flags;
-
- while (wait) {
- unsigned long shadow;
- int cstart, previ = -1;
-
- /*
- * when flipping from kernel to user, we can't change
- * the checking type if the buffer is allocated to the
- * driver. It's OK the other direction, because it's
- * from close, and we have just disarm'ed all the
- * buffers. All the kernel to kernel changes are also
- * OK.
- */
- for (cstart = start; cstart <= last; cstart++) {
- i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
- / BITS_PER_LONG;
- if (i != previ) {
- shadow = (unsigned long)
- le64_to_cpu(dd->pioavailregs_dma[i]);
- previ = i;
- }
- if (test_bit(((2 * cstart) +
- QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
- % BITS_PER_LONG, &shadow))
- break;
- }
-
- if (cstart > last)
- break;
-
- if (sleeps == QTXSLEEPS)
- break;
- /* make sure we see an updated copy next time around */
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- sleeps++;
- msleep(20);
- }
-
- switch (which) {
- case TXCHK_CHG_TYPE_DIS1:
- /*
- * disable checking on a range; used by diags; just
- * one buffer, but still written generically
- */
- for (i = start; i <= last; i++)
- clear_bit(i, dd->cspec->sendchkenable);
- break;
-
- case TXCHK_CHG_TYPE_ENAB1:
- /*
- * (re)enable checking on a range; used by diags; just
- * one buffer, but still written generically; read
- * scratch to be sure buffer actually triggered, not
- * just flushed from processor.
- */
- qib_read_kreg32(dd, kr_scratch);
- for (i = start; i <= last; i++)
- set_bit(i, dd->cspec->sendchkenable);
- break;
-
- case TXCHK_CHG_TYPE_KERN:
- /* usable by kernel */
- for (i = start; i <= last; i++) {
- set_bit(i, dd->cspec->sendibchk);
- clear_bit(i, dd->cspec->sendgrhchk);
- }
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- /* see if we need to raise avail update threshold */
- for (i = dd->first_user_ctxt;
- dd->cspec->updthresh != dd->cspec->updthresh_dflt
- && i < dd->cfgctxts; i++)
- if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
- ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
- < dd->cspec->updthresh_dflt)
- break;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- if (i == dd->cfgctxts) {
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- dd->cspec->updthresh = dd->cspec->updthresh_dflt;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld)) <<
- SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- }
- break;
-
- case TXCHK_CHG_TYPE_USER:
- /* for user process */
- for (i = start; i <= last; i++) {
- clear_bit(i, dd->cspec->sendibchk);
- set_bit(i, dd->cspec->sendgrhchk);
- }
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
- / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
- dd->cspec->updthresh = (rcd->piocnt /
- rcd->subctxt_cnt) - 1;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- } else
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- break;
-
- default:
- break;
- }
-
- for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
- qib_write_kreg(dd, kr_sendcheckmask + i,
- dd->cspec->sendchkenable[i]);
-
- for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
- qib_write_kreg(dd, kr_sendgrhcheckmask + i,
- dd->cspec->sendgrhchk[i]);
- qib_write_kreg(dd, kr_sendibpktmask + i,
- dd->cspec->sendibchk[i]);
- }
-
- /*
- * Be sure whatever we did was seen by the chip and acted upon,
- * before we return. Mostly important for which >= 2.
- */
- qib_read_kreg32(dd, kr_scratch);
-}
-
-
-/* useful for trigger analyzers, etc. */
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- qib_write_kreg(dd, kr_scratch, val);
-}
-
-/* Dummy for now, use chip regs soon */
-static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- return -ENXIO;
-}
-
-/**
- * qib_init_iba7322_funcs - set up the chip-specific function pointers
- * @dev: the pci_dev for qlogic_ib device
- * @ent: pci_device_id struct for this dev
- *
- * Also allocates, inits, and returns the devdata struct for this
- * device instance
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret, i;
- u32 tabsize, actual_cnt = 0;
-
- dd = qib_alloc_devdata(pdev,
- NUM_IB_PORTS * sizeof(struct qib_pportdata) +
- sizeof(struct qib_chip_specific) +
- NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_7322_bringup_serdes;
- dd->f_cleanup = qib_setup_7322_cleanup;
- dd->f_clear_tids = qib_7322_clear_tids;
- dd->f_free_irq = qib_7322_free_irq;
- dd->f_get_base_info = qib_7322_get_base_info;
- dd->f_get_msgheader = qib_7322_get_msgheader;
- dd->f_getsendbuf = qib_7322_getsendbuf;
- dd->f_gpio_mod = gpio_7322_mod;
- dd->f_eeprom_wen = qib_7322_eeprom_wen;
- dd->f_hdrqempty = qib_7322_hdrqempty;
- dd->f_ib_updown = qib_7322_ib_updown;
- dd->f_init_ctxt = qib_7322_init_ctxt;
- dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
- dd->f_intr_fallback = qib_7322_intr_fallback;
- dd->f_late_initreg = qib_late_7322_initreg;
- dd->f_setpbc_control = qib_7322_setpbc_control;
- dd->f_portcntr = qib_portcntr_7322;
- dd->f_put_tid = qib_7322_put_tid;
- dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_7322_mod;
- dd->f_read_cntrs = qib_read_7322cntrs;
- dd->f_read_portcntrs = qib_read_7322portcntrs;
- dd->f_reset = qib_do_7322_reset;
- dd->f_init_sdma_regs = init_sdma_7322_regs;
- dd->f_sdma_busy = qib_sdma_7322_busy;
- dd->f_sdma_gethead = qib_sdma_7322_gethead;
- dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
- dd->f_sendctrl = sendctrl_7322_mod;
- dd->f_set_armlaunch = qib_set_7322_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
- dd->f_iblink_state = qib_7322_iblink_state;
- dd->f_ibphys_portstate = qib_7322_phys_portstate;
- dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
- dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
- dd->f_set_ib_loopback = qib_7322_set_loopback;
- dd->f_get_ib_table = qib_7322_get_ib_table;
- dd->f_set_ib_table = qib_7322_set_ib_table;
- dd->f_set_intr_state = qib_7322_set_intr_state;
- dd->f_setextled = qib_setup_7322_setextled;
- dd->f_txchk_change = qib_7322_txchk_change;
- dd->f_update_usrhead = qib_update_7322_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
- dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
- dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
- dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
- dd->f_sdma_init_early = qib_7322_sdma_init_early;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_7322_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_7322_notify_dca;
-#endif
- /*
- * Do remaining PCIe setup and save PCIe values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped, but chip registers
- * are not set up until start of qib_init_7322_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = qib_init_7322_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init || !dd->num_pports)
- goto bail;
-
- /*
- * Determine number of vectors we want; depends on port count
- * and number of configured kernel receive queues actually used.
- * Should also depend on whether sdma is enabled or not, but
- * that's such a rare testing case it's not worth worrying about.
- */
- tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
- for (i = 0; i < tabsize; i++)
- if ((i < ARRAY_SIZE(irq_table) &&
- irq_table[i].port <= dd->num_pports) ||
- (i >= ARRAY_SIZE(irq_table) &&
- dd->rcd[i - ARRAY_SIZE(irq_table)]))
- actual_cnt++;
- /* reduce by ctxt's < 2 */
- if (qib_krcvq01_no_msi)
- actual_cnt -= dd->num_pports;
-
- tabsize = actual_cnt;
- dd->cspec->msix_entries = kzalloc(tabsize *
- sizeof(struct qib_msix_entry), GFP_KERNEL);
- if (!dd->cspec->msix_entries) {
- qib_dev_err(dd, "No memory for MSIx table\n");
- tabsize = 0;
- }
- for (i = 0; i < tabsize; i++)
- dd->cspec->msix_entries[i].msix.entry = i;
-
- if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
- /* may be less than we wanted, if not enough available */
- dd->cspec->num_msix_entries = tabsize;
-
- /* setup interrupt handler */
- qib_setup_7322_interrupt(dd, 1);
-
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- if (!dca_add_requester(&pdev->dev)) {
- qib_devinfo(dd->pcidev, "DCA enabled\n");
- dd->flags |= QIB_DCA_ENABLED;
- qib_setup_dca(dd);
- }
-#endif
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
-
-/*
- * Set the table entry at the specified index from the table specifed.
- * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
- * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
- * 'idx' below addresses the correct entry, while its 4 LSBs select the
- * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
- */
-#define DDS_ENT_AMP_LSB 14
-#define DDS_ENT_MAIN_LSB 9
-#define DDS_ENT_POST_LSB 5
-#define DDS_ENT_PRE_XTRA_LSB 3
-#define DDS_ENT_PRE_LSB 0
-
-/*
- * Set one entry in the TxDDS table for spec'd port
- * ridx picks one of the entries, while tp points
- * to the appropriate table entry.
- */
-static void set_txdds(struct qib_pportdata *ppd, int ridx,
- const struct txdds_ent *tp)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 pack_ent;
- int regidx;
-
- /* Get correct offset in chip-space, and in source table */
- regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
- /*
- * We do not use qib_write_kreg_port() because it was intended
- * only for registers in the lower "port specific" pages.
- * So do index calculation by hand.
- */
- if (ppd->hw_pidx)
- regidx += (dd->palign / sizeof(u64));
-
- pack_ent = tp->amp << DDS_ENT_AMP_LSB;
- pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
- pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
- pack_ent |= tp->post << DDS_ENT_POST_LSB;
- qib_write_kreg(dd, regidx, pack_ent);
- /* Prevent back-to-back writes by hitting scratch */
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-static const struct vendor_txdds_ent vendor_txdds[] = {
- { /* Amphenol 1m 30awg NoEq */
- { 0x41, 0x50, 0x48 }, "584470002 ",
- { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
- },
- { /* Amphenol 3m 28awg NoEq */
- { 0x41, 0x50, 0x48 }, "584470004 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
- },
- { /* Finisar 3m OM2 Optical */
- { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
- { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
- },
- { /* Finisar 30m OM2 Optical */
- { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
- { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
- },
- { /* Finisar Default OM2 Optical */
- { 0x00, 0x90, 0x65 }, NULL,
- { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
- },
- { /* Gore 1m 30awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
- },
- { /* Gore 2m 30awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
- },
- { /* Gore 1m 28awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
- },
- { /* Gore 3m 28awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
- { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
- },
- { /* Gore 5m 24awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
- { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
- },
- { /* Gore 7m 24awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
- { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
- },
- { /* Gore 5m 26awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
- },
- { /* Gore 7m 26awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
- },
- { /* Intersil 12m 24awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
- { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
- },
- { /* Intersil 10m 28awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
- { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
- },
- { /* Intersil 7m 30awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
- { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
- },
- { /* Intersil 5m 32awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
- { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
- },
- { /* Intersil Default Active */
- { 0x00, 0x30, 0xB4 }, NULL,
- { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
- },
- { /* Luxtera 20m Active Optical */
- { 0x00, 0x25, 0x63 }, NULL,
- { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
- },
- { /* Molex 1M Cu loopback */
- { 0x00, 0x09, 0x3A }, "74763-0025 ",
- { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
- },
- { /* Molex 2m 28awg NoEq */
- { 0x00, 0x09, 0x3A }, "74757-2201 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
- },
-};
-
-static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 0, 0, 1 }, /* 2 dB */
- { 0, 0, 0, 2 }, /* 3 dB */
- { 0, 0, 0, 3 }, /* 4 dB */
- { 0, 0, 0, 4 }, /* 5 dB */
- { 0, 0, 0, 5 }, /* 6 dB */
- { 0, 0, 0, 6 }, /* 7 dB */
- { 0, 0, 0, 7 }, /* 8 dB */
- { 0, 0, 0, 8 }, /* 9 dB */
- { 0, 0, 0, 9 }, /* 10 dB */
- { 0, 0, 0, 10 }, /* 11 dB */
- { 0, 0, 0, 11 }, /* 12 dB */
- { 0, 0, 0, 12 }, /* 13 dB */
- { 0, 0, 0, 13 }, /* 14 dB */
- { 0, 0, 0, 14 }, /* 15 dB */
- { 0, 0, 0, 15 }, /* 16 dB */
-};
-
-static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 0, 0, 8 }, /* 2 dB */
- { 0, 0, 0, 8 }, /* 3 dB */
- { 0, 0, 0, 9 }, /* 4 dB */
- { 0, 0, 0, 9 }, /* 5 dB */
- { 0, 0, 0, 10 }, /* 6 dB */
- { 0, 0, 0, 10 }, /* 7 dB */
- { 0, 0, 0, 11 }, /* 8 dB */
- { 0, 0, 0, 11 }, /* 9 dB */
- { 0, 0, 0, 12 }, /* 10 dB */
- { 0, 0, 0, 12 }, /* 11 dB */
- { 0, 0, 0, 13 }, /* 12 dB */
- { 0, 0, 0, 13 }, /* 13 dB */
- { 0, 0, 0, 14 }, /* 14 dB */
- { 0, 0, 0, 14 }, /* 15 dB */
- { 0, 0, 0, 15 }, /* 16 dB */
-};
-
-static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
- { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
- { 0, 1, 0, 11 }, /* 4 dB */
- { 0, 1, 0, 13 }, /* 5 dB */
- { 0, 1, 0, 15 }, /* 6 dB */
- { 0, 1, 3, 15 }, /* 7 dB */
- { 0, 1, 7, 15 }, /* 8 dB */
- { 0, 1, 7, 15 }, /* 9 dB */
- { 0, 1, 8, 15 }, /* 10 dB */
- { 0, 1, 9, 15 }, /* 11 dB */
- { 0, 1, 10, 15 }, /* 12 dB */
- { 0, 2, 6, 15 }, /* 13 dB */
- { 0, 2, 7, 15 }, /* 14 dB */
- { 0, 2, 8, 15 }, /* 15 dB */
- { 0, 2, 9, 15 }, /* 16 dB */
-};
-
-/*
- * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
- * These are mostly used for mez cards going through connectors
- * and backplane traces, but can be used to add other "unusual"
- * table values as well.
- */
-static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 0 }, /* QME7342 mfg settings */
- { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
-};
-
-static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
- unsigned atten)
-{
- /*
- * The attenuation table starts at 2dB for entry 1,
- * with entry 0 being the loopback entry.
- */
- if (atten <= 2)
- atten = 1;
- else if (atten > TXDDS_TABLE_SZ)
- atten = TXDDS_TABLE_SZ - 1;
- else
- atten--;
- return txdds + atten;
-}
-
-/*
- * if override is set, the module parameter txselect has a value
- * for this specific port, so use it, rather than our normal mechanism.
- */
-static void find_best_ent(struct qib_pportdata *ppd,
- const struct txdds_ent **sdr_dds,
- const struct txdds_ent **ddr_dds,
- const struct txdds_ent **qdr_dds, int override)
-{
- struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
- int idx;
-
- /* Search table of known cables */
- for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
- const struct vendor_txdds_ent *v = vendor_txdds + idx;
-
- if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
- (!v->partnum ||
- !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
- *sdr_dds = &v->sdr;
- *ddr_dds = &v->ddr;
- *qdr_dds = &v->qdr;
- return;
- }
- }
-
- /* Active cables don't have attenuation so we only set SERDES
- * settings to account for the attenuation of the board traces. */
- if (!override && QSFP_IS_ACTIVE(qd->tech)) {
- *sdr_dds = txdds_sdr + ppd->dd->board_atten;
- *ddr_dds = txdds_ddr + ppd->dd->board_atten;
- *qdr_dds = txdds_qdr + ppd->dd->board_atten;
- return;
- }
-
- if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
- qd->atten[1])) {
- *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
- *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
- *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
- return;
- } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
- /*
- * If we have no (or incomplete) data from the cable
- * EEPROM, or no QSFP, or override is set, use the
- * module parameter value to index into the attentuation
- * table.
- */
- idx = ppd->cpspec->no_eep;
- *sdr_dds = &txdds_sdr[idx];
- *ddr_dds = &txdds_ddr[idx];
- *qdr_dds = &txdds_qdr[idx];
- } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
- /* similar to above, but index into the "extra" table. */
- idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
- *sdr_dds = &txdds_extra_sdr[idx];
- *ddr_dds = &txdds_extra_ddr[idx];
- *qdr_dds = &txdds_extra_qdr[idx];
- } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
- ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
- TXDDS_MFG_SZ)) {
- idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
- pr_info("IB%u:%u use idx %u into txdds_mfg\n",
- ppd->dd->unit, ppd->port, idx);
- *sdr_dds = &txdds_extra_mfg[idx];
- *ddr_dds = &txdds_extra_mfg[idx];
- *qdr_dds = &txdds_extra_mfg[idx];
- } else {
- /* this shouldn't happen, it's range checked */
- *sdr_dds = txdds_sdr + qib_long_atten;
- *ddr_dds = txdds_ddr + qib_long_atten;
- *qdr_dds = txdds_qdr + qib_long_atten;
- }
-}
-
-static void init_txdds_table(struct qib_pportdata *ppd, int override)
-{
- const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
- struct txdds_ent *dds;
- int idx;
- int single_ent = 0;
-
- find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
-
- /* for mez cards or override, use the selected value for all entries */
- if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
- single_ent = 1;
-
- /* Fill in the first entry with the best entry found. */
- set_txdds(ppd, 0, sdr_dds);
- set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
- set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
- if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE)) {
- dds = (struct txdds_ent *)(ppd->link_speed_active ==
- QIB_IB_QDR ? qdr_dds :
- (ppd->link_speed_active ==
- QIB_IB_DDR ? ddr_dds : sdr_dds));
- write_tx_serdes_param(ppd, dds);
- }
-
- /* Fill in the remaining entries with the default table values. */
- for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
- set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
- set_txdds(ppd, idx + TXDDS_TABLE_SZ,
- single_ent ? ddr_dds : txdds_ddr + idx);
- set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
- single_ent ? qdr_dds : txdds_qdr + idx);
- }
-}
-
-#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
-#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
-#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
-#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
-#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
-#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
-#define AHB_TRANS_TRIES 10
-
-/*
- * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
- * 5=subsystem which is why most calls have "chan + chan >> 1"
- * for the channel argument.
- */
-static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
- u32 data, u32 mask)
-{
- u32 rd_data, wr_data, sz_mask;
- u64 trans, acc, prev_acc;
- u32 ret = 0xBAD0BAD;
- int tries;
-
- prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
- /* From this point on, make sure we return access */
- acc = (quad << 1) | 1;
- qib_write_kreg(dd, KR_AHB_ACC, acc);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
- goto bail;
- }
-
- /* If mask is not all 1s, we need to read, but different SerDes
- * entities have different sizes
- */
- sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
- wr_data = data & mask & sz_mask;
- if ((~mask & sz_mask) != 0) {
- trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
- qib_write_kreg(dd, KR_AHB_TRANS, trans);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
- AHB_TRANS_TRIES);
- goto bail;
- }
- /* Re-read in case host split reads and read data first */
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
- wr_data |= (rd_data & ~mask & sz_mask);
- }
-
- /* If mask is not zero, we need to write. */
- if (mask & sz_mask) {
- trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
- trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
- trans |= AHB_WR;
- qib_write_kreg(dd, KR_AHB_TRANS, trans);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
- AHB_TRANS_TRIES);
- goto bail;
- }
- }
- ret = wr_data;
-bail:
- qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
- return ret;
-}
-
-static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
- unsigned mask)
-{
- struct qib_devdata *dd = ppd->dd;
- int chan;
- u32 rbc;
-
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
- data, mask);
- rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- addr, 0, 0);
- }
-}
-
-static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
-{
- u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
- u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
-
- if (enable && !state) {
- pr_info("IB%u:%u Turning LOS on\n",
- ppd->dd->unit, ppd->port);
- data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- } else if (!enable && state) {
- pr_info("IB%u:%u Turning LOS off\n",
- ppd->dd->unit, ppd->port);
- data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- }
- qib_write_kreg_port(ppd, krp_serdesctrl, data);
-}
-
-static int serdes_7322_init(struct qib_pportdata *ppd)
-{
- int ret = 0;
-
- if (ppd->dd->cspec->r1)
- ret = serdes_7322_init_old(ppd);
- else
- ret = serdes_7322_init_new(ppd);
- return ret;
-}
-
-static int serdes_7322_init_old(struct qib_pportdata *ppd)
-{
- u32 le_val;
-
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
- /* ensure no tx overrides from earlier driver loads */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- /* Patch some SerDes defaults to "Better for IB" */
- /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
-
- /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
- ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
- /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
- ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
-
- /* May be overridden in qsfp_7322_event */
- le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
-
- /* enable LE1 adaptation for all but QME, which is disabled */
- le_val = IS_QME(ppd->dd) ? 0 : 1;
- ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
-
- /* Clear cmode-override, may be set from older driver */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
-
- /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
- ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
-
- /* setup LoS params; these are subsystem, so chan == 5 */
- /* LoS filter threshold_count on, ch 0-3, set to 8 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
-
- /* LoS filter threshold_count off, ch 0-3, set to 4 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
-
- /* LoS filter select enabled */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
-
- /* LoS target data: SDR=4, DDR=2, QDR=1 */
- ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
- ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
- ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
-
- serdes_7322_los_enable(ppd, 1);
-
- /* rxbistena; set 0 to avoid effects of it switch later */
- ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
-
- /* Configure 4 DFE taps, and only they adapt */
- ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
-
- /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
- le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
- ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
-
- /*
- * Set receive adaptation mode. SDR and DDR adaptation are
- * always on, and QDR is initially enabled; later disabled.
- */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
- ppd->cpspec->qdr_dfe_on = 1;
-
- /* FLoop LOS gate: PPM filter enabled */
- ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
-
- /* rx offset center enabled */
- ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
-
- if (!ppd->dd->cspec->r1) {
- ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
- ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
- }
-
- /* Set the frequency loop bandwidth to 15 */
- ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
-
- return 0;
-}
-
-static int serdes_7322_init_new(struct qib_pportdata *ppd)
-{
- unsigned long tend;
- u32 le_val, rxcaldone;
- int chan, chan_done = (1 << SERDES_CHANS) - 1;
-
- /* Clear cmode-override, may be set from older driver */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
-
- /* ensure no tx overrides from earlier driver loads */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- /* START OF LSI SUGGESTED SERDES BRINGUP */
- /* Reset - Calibration Setup */
- /* Stop DFE adaptaion */
- ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
- /* Disable LE1 */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
- /* Disable autoadapt for LE1 */
- ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
- /* Disable LE2 */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
- /* Disable VGA */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
- /* Disable AFE Offset Cancel */
- ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
- /* Disable Timing Loop */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
- /* Disable Frequency Loop */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
- /* Disable Baseline Wander Correction */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
- /* Disable RX Calibration */
- ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
- /* Disable RX Offset Calibration */
- ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
- /* Select BB CDR */
- ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
- /* CDR Step Size */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
- /* Enable phase Calibration */
- ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
- /* DFE Bandwidth [2:14-12] */
- ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
- /* DFE Config (4 taps only) */
- ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
- /* Gain Loop Bandwidth */
- if (!ppd->dd->cspec->r1) {
- ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
- ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
- } else {
- ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
- }
- /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
- /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
- /* Data Rate Select [5:7-6] (leave as default) */
- /* RX Parallel Word Width [3:10-8] (leave as default) */
-
- /* RX REST */
- /* Single- or Multi-channel reset */
- /* RX Analog reset */
- /* RX Digital reset */
- ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
- msleep(20);
- /* RX Analog reset */
- ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
- msleep(20);
- /* RX Digital reset */
- ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
- msleep(20);
-
- /* setup LoS params; these are subsystem, so chan == 5 */
- /* LoS filter threshold_count on, ch 0-3, set to 8 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
-
- /* LoS filter threshold_count off, ch 0-3, set to 4 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
-
- /* LoS filter select enabled */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
-
- /* LoS target data: SDR=4, DDR=2, QDR=1 */
- ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
- ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
- ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
-
- /* Turn on LOS on initial SERDES init */
- serdes_7322_los_enable(ppd, 1);
- /* FLoop LOS gate: PPM filter enabled */
- ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
-
- /* RX LATCH CALIBRATION */
- /* Enable Eyefinder Phase Calibration latch */
- ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
- /* Enable RX Offset Calibration latch */
- ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
- msleep(20);
- /* Start Calibration */
- ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
- tend = jiffies + msecs_to_jiffies(500);
- while (chan_done && !time_is_before_jiffies(tend)) {
- msleep(20);
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
- (chan + (chan >> 1)),
- 25, 0, 0);
- if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
- (~chan_done & (1 << chan)) == 0)
- chan_done &= ~(1 << chan);
- }
- }
- if (chan_done) {
- pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
- IBSD(ppd->hw_pidx), chan_done);
- } else {
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
- (chan + (chan >> 1)),
- 25, 0, 0);
- if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
- pr_info("Serdes %d chan %d calibration failed\n",
- IBSD(ppd->hw_pidx), chan);
- }
- }
-
- /* Turn off Calibration */
- ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
- msleep(20);
-
- /* BRING RX UP */
- /* Set LE2 value (May be overridden in qsfp_7322_event) */
- le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
- /* Set LE2 Loop bandwidth */
- ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
- /* Enable LE2 */
- ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
- msleep(20);
- /* Enable H0 only */
- ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
- /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
- le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
- ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
- /* Enable VGA */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
- msleep(20);
- /* Set Frequency Loop Bandwidth */
- ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
- /* Enable Frequency Loop */
- ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
- /* Set Timing Loop Bandwidth */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
- /* Enable Timing Loop */
- ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
- msleep(50);
- /* Enable DFE
- * Set receive adaptation mode. SDR and DDR adaptation are
- * always on, and QDR is initially enabled; later disabled.
- */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
- ppd->cpspec->qdr_dfe_on = 1;
- /* Disable LE1 */
- ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
- /* Disable auto adapt for LE1 */
- ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
- msleep(20);
- /* Enable AFE Offset Cancel */
- ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
- /* Enable Baseline Wander Correction */
- ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
- /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
- ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
- /* VGA output common mode */
- ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
-
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
- return 0;
-}
-
-/* start adjust QMH serdes parameters */
-
-static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
-{
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 9, code << 9, 0x3f << 9);
-}
-
-static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
- int enable, u32 tapenable)
-{
- if (enable)
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 1, 3 << 10, 0x1f << 10);
- else
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 1, 0, 0x1f << 10);
-}
-
-/* Set clock to 1, 0, 1, 0 */
-static void clock_man(struct qib_pportdata *ppd, int chan)
-{
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0x4000, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0x4000, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0, 0x4000);
-}
-
-/*
- * write the current Tx serdes pre,post,main,amp settings into the serdes.
- * The caller must pass the settings appropriate for the current speed,
- * or not care if they are correct for the current speed.
- */
-static void write_tx_serdes_param(struct qib_pportdata *ppd,
- struct txdds_ent *txdds)
-{
- u64 deemph;
-
- deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
- /* field names for amp, main, post, pre, respectively */
- deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
-
- deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- tx_override_deemphasis_select);
- deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txampcntl_d2a);
- deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txc0_ena);
- deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcp1_ena);
- deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcn1_ena);
- qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
-}
-
-/*
- * Set the parameters for mez cards on link bounce, so they are
- * always exactly what was requested. Similar logic to init_txdds
- * but does just the serdes.
- */
-static void adj_tx_serdes(struct qib_pportdata *ppd)
-{
- const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
- struct txdds_ent *dds;
-
- find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
- dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
- qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
- ddr_dds : sdr_dds));
- write_tx_serdes_param(ppd, dds);
-}
-
-/* set QDR forced value for H1, if needed */
-static void force_h1(struct qib_pportdata *ppd)
-{
- int chan;
-
- ppd->cpspec->qdr_reforce = 0;
- if (!ppd->dd->cspec->r1)
- return;
-
- for (chan = 0; chan < SERDES_CHANS; chan++) {
- set_man_mode_h1(ppd, chan, 1, 0);
- set_man_code(ppd, chan, ppd->cpspec->h1_val);
- clock_man(ppd, chan);
- set_man_mode_h1(ppd, chan, 0, 0);
- }
-}
-
-#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
-#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
-
-#define R_OPCODE_LSB 3
-#define R_OP_NOP 0
-#define R_OP_SHIFT 2
-#define R_OP_UPDATE 3
-#define R_TDI_LSB 2
-#define R_TDO_LSB 1
-#define R_RDY 1
-
-static int qib_r_grab(struct qib_devdata *dd)
-{
- u64 val = SJA_EN;
-
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- return 0;
-}
-
-/* qib_r_wait_for_rdy() not only waits for the ready bit, it
- * returns the current state of R_TDO
- */
-static int qib_r_wait_for_rdy(struct qib_devdata *dd)
-{
- u64 val;
- int timeout;
-
- for (timeout = 0; timeout < 100 ; ++timeout) {
- val = qib_read_kreg32(dd, kr_r_access);
- if (val & R_RDY)
- return (val >> R_TDO_LSB) & 1;
- }
- return -1;
-}
-
-static int qib_r_shift(struct qib_devdata *dd, int bisten,
- int len, u8 *inp, u8 *outp)
-{
- u64 valbase, val;
- int ret, pos;
-
- valbase = SJA_EN | (bisten << BISTEN_LSB) |
- (R_OP_SHIFT << R_OPCODE_LSB);
- ret = qib_r_wait_for_rdy(dd);
- if (ret < 0)
- goto bail;
- for (pos = 0; pos < len; ++pos) {
- val = valbase;
- if (outp) {
- outp[pos >> 3] &= ~(1 << (pos & 7));
- outp[pos >> 3] |= (ret << (pos & 7));
- }
- if (inp) {
- int tdi = inp[pos >> 3] >> (pos & 7);
-
- val |= ((tdi & 1) << R_TDI_LSB);
- }
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- ret = qib_r_wait_for_rdy(dd);
- if (ret < 0)
- break;
- }
- /* Restore to NOP between operations. */
- val = SJA_EN | (bisten << BISTEN_LSB);
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- ret = qib_r_wait_for_rdy(dd);
-
- if (ret >= 0)
- ret = pos;
-bail:
- return ret;
-}
-
-static int qib_r_update(struct qib_devdata *dd, int bisten)
-{
- u64 val;
- int ret;
-
- val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
- ret = qib_r_wait_for_rdy(dd);
- if (ret >= 0) {
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- }
- return ret;
-}
-
-#define BISTEN_PORT_SEL 15
-#define LEN_PORT_SEL 625
-#define BISTEN_AT 17
-#define LEN_AT 156
-#define BISTEN_ETM 16
-#define LEN_ETM 632
-
-#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
-
-/* these are common for all IB port use cases. */
-static u8 reset_at[BIT2BYTE(LEN_AT)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
-};
-static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
- 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
- 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
- 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
- 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
-};
-static u8 at[BIT2BYTE(LEN_AT)] = {
- 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
-};
-
-/* used for IB1 or IB2, only one in use */
-static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
- 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
- 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
-};
-
-/* used when both IB1 and IB2 are in use */
-static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
- 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
- 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
- 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
- 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
- 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
-};
-
-/* used when only IB1 is in use */
-static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
- 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/* used when only IB2 is in use */
-static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
- 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
- 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
- 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
- 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
- 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
-};
-
-/* used when both IB1 and IB2 are in use */
-static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
- 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
- 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/*
- * Do setup to properly handle IB link recovery; if port is zero, we
- * are initializing to cover both ports; otherwise we are initializing
- * to cover a single port card, or the port has reached INIT and we may
- * need to switch coverage types.
- */
-static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
-{
- u8 *portsel, *etm;
- struct qib_devdata *dd = ppd->dd;
-
- if (!ppd->dd->cspec->r1)
- return;
- if (!both) {
- dd->cspec->recovery_ports_initted++;
- ppd->cpspec->recovery_init = 1;
- }
- if (!both && dd->cspec->recovery_ports_initted == 1) {
- portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
- etm = atetm_1port;
- } else {
- portsel = portsel_2port;
- etm = atetm_2port;
- }
-
- if (qib_r_grab(dd) < 0 ||
- qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
- qib_r_update(dd, BISTEN_ETM) < 0 ||
- qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
- qib_r_update(dd, BISTEN_AT) < 0 ||
- qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
- portsel, NULL) < 0 ||
- qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
- qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
- qib_r_update(dd, BISTEN_AT) < 0 ||
- qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
- qib_r_update(dd, BISTEN_ETM) < 0)
- qib_dev_err(dd, "Failed IB link recovery setup\n");
-}
-
-static void check_7322_rxe_status(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 fmask;
-
- if (dd->cspec->recovery_ports_initted != 1)
- return; /* rest doesn't apply to dualport */
- qib_write_kreg(dd, kr_control, dd->control |
- SYM_MASK(Control, FreezeMode));
- (void)qib_read_kreg64(dd, kr_scratch);
- udelay(3); /* ibcreset asserted 400ns, be sure that's over */
- fmask = qib_read_kreg64(dd, kr_act_fmask);
- if (!fmask) {
- /*
- * require a powercycle before we'll work again, and make
- * sure we get no more interrupts, and don't turn off
- * freeze.
- */
- ppd->dd->cspec->stay_in_freeze = 1;
- qib_7322_set_intr_state(ppd->dd, 0);
- qib_write_kreg(dd, kr_fmask, 0ULL);
- qib_dev_err(dd, "HCA unusable until powercycled\n");
- return; /* eventually reset */
- }
-
- qib_write_kreg(ppd->dd, kr_hwerrclear,
- SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
-
- /* don't do the full clear_freeze(), not needed for this */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
- /* take IBC out of reset */
- if (ppd->link_speed_supported) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_read_kreg32(dd, kr_scratch);
- if (ppd->lflags & QIBL_IB_LINK_DISABLED)
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
deleted file mode 100644
index 7e00470adc30..000000000000
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ /dev/null
@@ -1,1847 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-#include <linux/dca.h>
-#endif
-
-#include "qib.h"
-#include "qib_common.h"
-#include "qib_mad.h"
-#ifdef CONFIG_DEBUG_FS
-#include "qib_debugfs.h"
-#include "qib_verbs.h"
-#endif
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-/*
- * min buffers we want to have per context, after driver
- */
-#define QIB_MIN_USER_CTXT_BUFCNT 7
-
-#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
-#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
-#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
-
-/*
- * Number of ctxts we are configured to use (to allow for more pio
- * buffers per ctxt, etc.) Zero means use chip value.
- */
-ushort qib_cfgctxts;
-module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
-MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
-
-unsigned qib_numa_aware;
-module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
-MODULE_PARM_DESC(numa_aware,
- "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
-
-/*
- * If set, do not write to any regs if avoidable, hack to allow
- * check for deranged default register values.
- */
-ushort qib_mini_init;
-module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
-MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
-
-unsigned qib_n_krcv_queues;
-module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
-MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
-
-unsigned qib_cc_table_size;
-module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-
-static void verify_interrupt(unsigned long);
-
-static struct idr qib_unit_table;
-u32 qib_cpulist_count;
-unsigned long *qib_cpulist;
-
-/* set number of contexts we'll actually use */
-void qib_set_ctxtcnt(struct qib_devdata *dd)
-{
- if (!qib_cfgctxts) {
- dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
- if (dd->cfgctxts > dd->ctxtcnt)
- dd->cfgctxts = dd->ctxtcnt;
- } else if (qib_cfgctxts < dd->num_pports)
- dd->cfgctxts = dd->ctxtcnt;
- else if (qib_cfgctxts <= dd->ctxtcnt)
- dd->cfgctxts = qib_cfgctxts;
- else
- dd->cfgctxts = dd->ctxtcnt;
- dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
- dd->cfgctxts - dd->first_user_ctxt;
-}
-
-/*
- * Common code for creating the receive context array.
- */
-int qib_create_ctxts(struct qib_devdata *dd)
-{
- unsigned i;
- int local_node_id = pcibus_to_node(dd->pcidev->bus);
-
- if (local_node_id < 0)
- local_node_id = numa_node_id();
- dd->assigned_node_id = local_node_id;
-
- /*
- * Allocate full ctxtcnt array, rather than just cfgctxts, because
- * cleanup iterates across all possible ctxts.
- */
- dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
- if (!dd->rcd) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata array, failing\n");
- return -ENOMEM;
- }
-
- /* create (one or more) kctxt */
- for (i = 0; i < dd->first_user_ctxt; ++i) {
- struct qib_pportdata *ppd;
- struct qib_ctxtdata *rcd;
-
- if (dd->skip_kctxt_mask & (1 << i))
- continue;
-
- ppd = dd->pport + (i % dd->num_pports);
-
- rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
- if (!rcd) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
- kfree(dd->rcd);
- dd->rcd = NULL;
- return -ENOMEM;
- }
- rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
- rcd->seq_cnt = 1;
- }
- return 0;
-}
-
-/*
- * Common code for user and kernel context setup.
- */
-struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
- int node_id)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
-
- rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
- if (rcd) {
- INIT_LIST_HEAD(&rcd->qp_wait_list);
- rcd->node_id = node_id;
- rcd->ppd = ppd;
- rcd->dd = dd;
- rcd->cnt = 1;
- rcd->ctxt = ctxt;
- dd->rcd[ctxt] = rcd;
-#ifdef CONFIG_DEBUG_FS
- if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
- rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
- GFP_KERNEL, node_id);
- if (!rcd->opstats) {
- kfree(rcd);
- qib_dev_err(dd,
- "Unable to allocate per ctxt stats buffer\n");
- return NULL;
- }
- }
-#endif
- dd->f_init_ctxt(rcd);
-
- /*
- * To avoid wasting a lot of memory, we allocate 32KB chunks
- * of physically contiguous memory, advance through it until
- * used up and then allocate more. Of course, we need
- * memory to store those extra pointers, now. 32KB seems to
- * be the most that is "safe" under memory pressure
- * (creating large files and then copying them over
- * NFS while doing lots of MPI jobs). The OOM killer can
- * get invoked, even though we say we can sleep and this can
- * cause significant system problems....
- */
- rcd->rcvegrbuf_size = 0x8000;
- rcd->rcvegrbufs_perchunk =
- rcd->rcvegrbuf_size / dd->rcvegrbufsize;
- rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
- rcd->rcvegrbufs_perchunk - 1) /
- rcd->rcvegrbufs_perchunk;
- BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
- rcd->rcvegrbufs_perchunk_shift =
- ilog2(rcd->rcvegrbufs_perchunk);
- }
- return rcd;
-}
-
-/*
- * Common code for initializing the physical port structure.
- */
-int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
- u8 hw_pidx, u8 port)
-{
- int size;
-
- ppd->dd = dd;
- ppd->hw_pidx = hw_pidx;
- ppd->port = port; /* IB port number, not index */
-
- spin_lock_init(&ppd->sdma_lock);
- spin_lock_init(&ppd->lflags_lock);
- spin_lock_init(&ppd->cc_shadow_lock);
- init_waitqueue_head(&ppd->state_wait);
-
- init_timer(&ppd->symerr_clear_timer);
- ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
- ppd->symerr_clear_timer.data = (unsigned long)ppd;
-
- ppd->qib_wq = NULL;
- ppd->ibport_data.pmastats =
- alloc_percpu(struct qib_pma_counters);
- if (!ppd->ibport_data.pmastats)
- return -ENOMEM;
-
- if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
- goto bail;
-
- ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
- IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
-
- ppd->cc_max_table_entries =
- ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
-
- size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
- * IB_CCT_ENTRIES;
- ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries) {
- qib_dev_err(dd,
- "failed to allocate congestion control table for port %d!\n",
- port);
- goto bail;
- }
-
- size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
- ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries) {
- qib_dev_err(dd,
- "failed to allocate congestion setting list for port %d!\n",
- port);
- goto bail_1;
- }
-
- size = sizeof(struct cc_table_shadow);
- ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries_shadow) {
- qib_dev_err(dd,
- "failed to allocate shadow ccti list for port %d!\n",
- port);
- goto bail_2;
- }
-
- size = sizeof(struct ib_cc_congestion_setting_attr);
- ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries_shadow) {
- qib_dev_err(dd,
- "failed to allocate shadow congestion setting list for port %d!\n",
- port);
- goto bail_3;
- }
-
- return 0;
-
-bail_3:
- kfree(ppd->ccti_entries_shadow);
- ppd->ccti_entries_shadow = NULL;
-bail_2:
- kfree(ppd->congestion_entries);
- ppd->congestion_entries = NULL;
-bail_1:
- kfree(ppd->ccti_entries);
- ppd->ccti_entries = NULL;
-bail:
- /* User is intentionally disabling the congestion control agent */
- if (!qib_cc_table_size)
- return 0;
-
- if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
- qib_cc_table_size = 0;
- qib_dev_err(dd,
- "Congestion Control table size %d less than minimum %d for port %d\n",
- qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
- }
-
- qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
- port);
- return 0;
-}
-
-static int init_pioavailregs(struct qib_devdata *dd)
-{
- int ret, pidx;
- u64 *status_page;
-
- dd->pioavailregs_dma = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
- GFP_KERNEL);
- if (!dd->pioavailregs_dma) {
- qib_dev_err(dd,
- "failed to allocate PIOavail reg area in memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
- /*
- * We really want L2 cache aligned, but for current CPUs of
- * interest, they are the same.
- */
- status_page = (u64 *)
- ((char *) dd->pioavailregs_dma +
- ((2 * L1_CACHE_BYTES +
- dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
- /* device status comes first, for backwards compatibility */
- dd->devstatusp = status_page;
- *status_page++ = 0;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- dd->pport[pidx].statusp = status_page;
- *status_page++ = 0;
- }
-
- /*
- * Setup buffer to hold freeze and other messages, accessible to
- * apps, following statusp. This is per-unit, not per port.
- */
- dd->freezemsg = (char *) status_page;
- *dd->freezemsg = 0;
- /* length of msg buffer is "whatever is left" */
- ret = (char *) status_page - (char *) dd->pioavailregs_dma;
- dd->freezelen = PAGE_SIZE - ret;
-
- ret = 0;
-
-done:
- return ret;
-}
-
-/**
- * init_shadow_tids - allocate the shadow TID array
- * @dd: the qlogic_ib device
- *
- * allocate the shadow TID array, so we can qib_munlock previous
- * entries. It may make more sense to move the pageshadow to the
- * ctxt data structure, so we only allocate memory for ctxts actually
- * in use, since we at 8k per ctxt, now.
- * We don't want failures here to prevent use of the driver/chip,
- * so no return value.
- */
-static void init_shadow_tids(struct qib_devdata *dd)
-{
- struct page **pages;
- dma_addr_t *addrs;
-
- pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
- if (!pages) {
- qib_dev_err(dd,
- "failed to allocate shadow page * array, no expected sends!\n");
- goto bail;
- }
-
- addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
- if (!addrs) {
- qib_dev_err(dd,
- "failed to allocate shadow dma handle array, no expected sends!\n");
- goto bail_free;
- }
-
- dd->pageshadow = pages;
- dd->physshadow = addrs;
- return;
-
-bail_free:
- vfree(pages);
-bail:
- dd->pageshadow = NULL;
-}
-
-/*
- * Do initialization for device that is only needed on
- * first detect, not on resets.
- */
-static int loadtime_init(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
- QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
- qib_dev_err(dd,
- "Driver only handles version %d, chip swversion is %d (%llx), failng\n",
- QIB_CHIP_SWVERSION,
- (int)(dd->revision >>
- QLOGIC_IB_R_SOFTWARE_SHIFT) &
- QLOGIC_IB_R_SOFTWARE_MASK,
- (unsigned long long) dd->revision);
- ret = -ENOSYS;
- goto done;
- }
-
- if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
- qib_devinfo(dd->pcidev, "%s", dd->boardversion);
-
- spin_lock_init(&dd->pioavail_lock);
- spin_lock_init(&dd->sendctrl_lock);
- spin_lock_init(&dd->uctxt_lock);
- spin_lock_init(&dd->qib_diag_trans_lock);
- spin_lock_init(&dd->eep_st_lock);
- mutex_init(&dd->eep_lock);
-
- if (qib_mini_init)
- goto done;
-
- ret = init_pioavailregs(dd);
- init_shadow_tids(dd);
-
- qib_get_eeprom_info(dd);
-
- /* setup time (don't start yet) to verify we got interrupt */
- init_timer(&dd->intrchk_timer);
- dd->intrchk_timer.function = verify_interrupt;
- dd->intrchk_timer.data = (unsigned long) dd;
-
- ret = qib_cq_init(dd);
-done:
- return ret;
-}
-
-/**
- * init_after_reset - re-initialize after a reset
- * @dd: the qlogic_ib device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_after_reset(struct qib_devdata *dd)
-{
- int i;
-
- /*
- * Ensure chip does no sends or receives, tail updates, or
- * pioavail updates while we re-initialize. This is mostly
- * for the driver data structures, not chip registers.
- */
- for (i = 0; i < dd->num_pports; ++i) {
- /*
- * ctxt == -1 means "all contexts". Only really safe for
- * _dis_abling things, as here.
- */
- dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS |
- QIB_RCVCTRL_TAILUPD_DIS, -1);
- /* Redundant across ports for some, but no big deal. */
- dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
- QIB_SENDCTRL_AVAIL_DIS);
- }
-
- return 0;
-}
-
-static void enable_chip(struct qib_devdata *dd)
-{
- u64 rcvmask;
- int i;
-
- /*
- * Enable PIO send, and update of PIOavail regs to memory.
- */
- for (i = 0; i < dd->num_pports; ++i)
- dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
- QIB_SENDCTRL_AVAIL_ENB);
- /*
- * Enable kernel ctxts' receive and receive interrupt.
- * Other ctxts done as user opens and inits them.
- */
- rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
- rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
- QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- if (rcd)
- dd->f_rcvctrl(rcd->ppd, rcvmask, i);
- }
-}
-
-static void verify_interrupt(unsigned long opaque)
-{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
- u64 int_counter;
-
- if (!dd)
- return; /* being torn down */
-
- /*
- * If we don't have a lid or any interrupts, let the user know and
- * don't bother checking again.
- */
- int_counter = qib_int_counter(dd) - dd->z_int_counter;
- if (int_counter == 0) {
- if (!dd->f_intr_fallback(dd))
- dev_err(&dd->pcidev->dev,
- "No interrupts detected, not usable.\n");
- else /* re-arm the timer to see if fallback works */
- mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
- }
-}
-
-static void init_piobuf_state(struct qib_devdata *dd)
-{
- int i, pidx;
- u32 uctxts;
-
- /*
- * Ensure all buffers are free, and fifos empty. Buffers
- * are common, so only do once for port 0.
- *
- * After enable and qib_chg_pioavailkernel so we can safely
- * enable pioavail updates and PIOENABLE. After this, packets
- * are ready and able to go out.
- */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
-
- /*
- * If not all sendbufs are used, add the one to each of the lower
- * numbered contexts. pbufsctxt and lastctxt_piobuf are
- * calculated in chip-specific code because it may cause some
- * chip-specific adjustments to be made.
- */
- uctxts = dd->cfgctxts - dd->first_user_ctxt;
- dd->ctxts_extrabuf = dd->pbufsctxt ?
- dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
-
- /*
- * Set up the shadow copies of the piobufavail registers,
- * which we compare against the chip registers for now, and
- * the in memory DMA'ed copies of the registers.
- * By now pioavail updates to memory should have occurred, so
- * copy them into our working/shadow registers; this is in
- * case something went wrong with abort, but mostly to get the
- * initial values of the generation bit correct.
- */
- for (i = 0; i < dd->pioavregs; i++) {
- __le64 tmp;
-
- tmp = dd->pioavailregs_dma[i];
- /*
- * Don't need to worry about pioavailkernel here
- * because we will call qib_chg_pioavailkernel() later
- * in initialization, to busy out buffers as needed.
- */
- dd->pioavailshadow[i] = le64_to_cpu(tmp);
- }
- while (i < ARRAY_SIZE(dd->pioavailshadow))
- dd->pioavailshadow[i++] = 0; /* for debugging sanity */
-
- /* after pioavailshadow is setup */
- qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
- TXCHK_CHG_TYPE_KERN, NULL);
- dd->f_initvl15_bufs(dd);
-}
-
-/**
- * qib_create_workqueues - create per port workqueues
- * @dd: the qlogic_ib device
- */
-static int qib_create_workqueues(struct qib_devdata *dd)
-{
- int pidx;
- struct qib_pportdata *ppd;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (!ppd->qib_wq) {
- char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
-
- snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
- dd->unit, pidx);
- ppd->qib_wq =
- create_singlethread_workqueue(wq_name);
- if (!ppd->qib_wq)
- goto wq_error;
- }
- }
- return 0;
-wq_error:
- pr_err("create_singlethread_workqueue failed for port %d\n",
- pidx + 1);
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->qib_wq) {
- destroy_workqueue(ppd->qib_wq);
- ppd->qib_wq = NULL;
- }
- }
- return -ENOMEM;
-}
-
-static void qib_free_pportdata(struct qib_pportdata *ppd)
-{
- free_percpu(ppd->ibport_data.pmastats);
- ppd->ibport_data.pmastats = NULL;
-}
-
-/**
- * qib_init - do the actual initialization sequence on the chip
- * @dd: the qlogic_ib device
- * @reinit: reinitializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip. This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0). We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int qib_init(struct qib_devdata *dd, int reinit)
-{
- int ret = 0, pidx, lastfail = 0;
- u32 portok = 0;
- unsigned i;
- struct qib_ctxtdata *rcd;
- struct qib_pportdata *ppd;
- unsigned long flags;
-
- /* Set linkstate to unknown, so we can watch for a transition. */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
- QIBL_LINKDOWN | QIBL_LINKINIT |
- QIBL_LINKV);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- if (reinit)
- ret = init_after_reset(dd);
- else
- ret = loadtime_init(dd);
- if (ret)
- goto done;
-
- /* Bypass most chip-init, to get to device creation */
- if (qib_mini_init)
- return 0;
-
- ret = dd->f_late_initreg(dd);
- if (ret)
- goto done;
-
- /* dd->rcd can be NULL if early init failed */
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
- /*
- * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
- * re-init, the simplest way to handle this is to free
- * existing, and re-allocate.
- * Need to re-create rest of ctxt 0 ctxtdata as well.
- */
- rcd = dd->rcd[i];
- if (!rcd)
- continue;
-
- lastfail = qib_create_rcvhdrq(dd, rcd);
- if (!lastfail)
- lastfail = qib_setup_eagerbufs(rcd);
- if (lastfail) {
- qib_dev_err(dd,
- "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
- continue;
- }
- }
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- int mtu;
-
- if (lastfail)
- ret = lastfail;
- ppd = dd->pport + pidx;
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1) {
- mtu = QIB_DEFAULT_MTU;
- qib_ibmtu = 0; /* don't leave invalid value */
- }
- /* set max we can ever have for this driver load */
- ppd->init_ibmaxlen = min(mtu > 2048 ?
- dd->piosize4k : dd->piosize2k,
- dd->rcvegrbufsize +
- (dd->rcvhdrentsize << 2));
- /*
- * Have to initialize ibmaxlen, but this will normally
- * change immediately in qib_set_mtu().
- */
- ppd->ibmaxlen = ppd->init_ibmaxlen;
- qib_set_mtu(ppd, mtu);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- lastfail = dd->f_bringup_serdes(ppd);
- if (lastfail) {
- qib_devinfo(dd->pcidev,
- "Failed to bringup IB port %u\n", ppd->port);
- lastfail = -ENETDOWN;
- continue;
- }
-
- portok++;
- }
-
- if (!portok) {
- /* none of the ports initialized */
- if (!ret && lastfail)
- ret = lastfail;
- else if (!ret)
- ret = -ENETDOWN;
- /* but continue on, so we can debug cause */
- }
-
- enable_chip(dd);
-
- init_piobuf_state(dd);
-
-done:
- if (!ret) {
- /* chip is OK for user apps; mark it as initialized */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- /*
- * Set status even if port serdes is not initialized
- * so that diags will work.
- */
- *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
- QIB_STATUS_INITTED;
- if (!ppd->link_speed_enabled)
- continue;
- if (dd->flags & QIB_HAS_SEND_DMA)
- ret = qib_setup_sdma(ppd);
- init_timer(&ppd->hol_timer);
- ppd->hol_timer.function = qib_hol_event;
- ppd->hol_timer.data = (unsigned long)ppd;
- ppd->hol_state = QIB_HOL_UP;
- }
-
- /* now we can enable all interrupts from the chip */
- dd->f_set_intr_state(dd, 1);
-
- /*
- * Setup to verify we get an interrupt, and fallback
- * to an alternate if necessary and possible.
- */
- mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
- /* start stats retrieval timer */
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
- }
-
- /* if ret is non-zero, we probably should do some cleanup here... */
- return ret;
-}
-
-/*
- * These next two routines are placeholders in case we don't have per-arch
- * code for controlling write combining. If explicit control of write
- * combining is not available, performance will probably be awful.
- */
-
-int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
-{
- return -EOPNOTSUPP;
-}
-
-void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
-{
-}
-
-static inline struct qib_devdata *__qib_lookup(int unit)
-{
- return idr_find(&qib_unit_table, unit);
-}
-
-struct qib_devdata *qib_lookup(int unit)
-{
- struct qib_devdata *dd;
- unsigned long flags;
-
- spin_lock_irqsave(&qib_devs_lock, flags);
- dd = __qib_lookup(unit);
- spin_unlock_irqrestore(&qib_devs_lock, flags);
-
- return dd;
-}
-
-/*
- * Stop the timers during unit shutdown, or after an error late
- * in initialization.
- */
-static void qib_stop_timers(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int pidx;
-
- if (dd->stats_timer.data) {
- del_timer_sync(&dd->stats_timer);
- dd->stats_timer.data = 0;
- }
- if (dd->intrchk_timer.data) {
- del_timer_sync(&dd->intrchk_timer);
- dd->intrchk_timer.data = 0;
- }
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->hol_timer.data)
- del_timer_sync(&ppd->hol_timer);
- if (ppd->led_override_timer.data) {
- del_timer_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
- if (ppd->symerr_clear_timer.data)
- del_timer_sync(&ppd->symerr_clear_timer);
- }
-}
-
-/**
- * qib_shutdown_device - shut down a device
- * @dd: the qlogic_ib device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled. It does not free any data structures.
- * Everything it does has to be setup again by qib_init(dd, 1)
- */
-static void qib_shutdown_device(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- unsigned pidx;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- spin_lock_irq(&ppd->lflags_lock);
- ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE |
- QIBL_LINKV);
- spin_unlock_irq(&ppd->lflags_lock);
- *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
- }
- dd->flags &= ~QIB_INITTED;
-
- /* mask interrupts, but not errors */
- dd->f_set_intr_state(dd, 0);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
- QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS |
- QIB_RCVCTRL_PKEY_ENB, -1);
- /*
- * Gracefully stop all sends allowing any in progress to
- * trickle out first.
- */
- dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
- }
-
- /*
- * Enough for anything that's going to trickle out to have actually
- * done so.
- */
- udelay(20);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- dd->f_setextled(ppd, 0); /* make sure LEDs are off */
-
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_teardown_sdma(ppd);
-
- dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
- QIB_SENDCTRL_SEND_DIS);
- /*
- * Clear SerdesEnable.
- * We can't count on interrupts since we are stopping.
- */
- dd->f_quiet_serdes(ppd);
-
- if (ppd->qib_wq) {
- destroy_workqueue(ppd->qib_wq);
- ppd->qib_wq = NULL;
- }
- qib_free_pportdata(ppd);
- }
-
-}
-
-/**
- * qib_free_ctxtdata - free a context's allocated data
- * @dd: the qlogic_ib device
- * @rcd: the ctxtdata structure
- *
- * free up any allocated data for a context
- * This should not touch anything that would affect a simultaneous
- * re-allocation of context data, because it is called after qib_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- */
-void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
-{
- if (!rcd)
- return;
-
- if (rcd->rcvhdrq) {
- dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
- rcd->rcvhdrq, rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
- if (rcd->rcvhdrtail_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- rcd->rcvhdrtail_kvaddr,
- rcd->rcvhdrqtailaddr_phys);
- rcd->rcvhdrtail_kvaddr = NULL;
- }
- }
- if (rcd->rcvegrbuf) {
- unsigned e;
-
- for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
- void *base = rcd->rcvegrbuf[e];
- size_t size = rcd->rcvegrbuf_size;
-
- dma_free_coherent(&dd->pcidev->dev, size,
- base, rcd->rcvegrbuf_phys[e]);
- }
- kfree(rcd->rcvegrbuf);
- rcd->rcvegrbuf = NULL;
- kfree(rcd->rcvegrbuf_phys);
- rcd->rcvegrbuf_phys = NULL;
- rcd->rcvegrbuf_chunks = 0;
- }
-
- kfree(rcd->tid_pg_list);
- vfree(rcd->user_event_mask);
- vfree(rcd->subctxt_uregbase);
- vfree(rcd->subctxt_rcvegrbuf);
- vfree(rcd->subctxt_rcvhdr_base);
-#ifdef CONFIG_DEBUG_FS
- kfree(rcd->opstats);
- rcd->opstats = NULL;
-#endif
- kfree(rcd);
-}
-
-/*
- * Perform a PIO buffer bandwidth write test, to verify proper system
- * configuration. Even when all the setup calls work, occasionally
- * BIOS or other issues can prevent write combining from working, or
- * can cause other bandwidth problems to the chip.
- *
- * This test simply writes the same buffer over and over again, and
- * measures close to the peak bandwidth to the chip (not testing
- * data bandwidth to the wire). On chips that use an address-based
- * trigger to send packets to the wire, this is easy. On chips that
- * use a count to trigger, we want to make sure that the packet doesn't
- * go out on the wire, or trigger flow control checks.
- */
-static void qib_verify_pioperf(struct qib_devdata *dd)
-{
- u32 pbnum, cnt, lcnt;
- u32 __iomem *piobuf;
- u32 *addr;
- u64 msecs, emsecs;
-
- piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
- if (!piobuf) {
- qib_devinfo(dd->pcidev,
- "No PIObufs for checking perf, skipping\n");
- return;
- }
-
- /*
- * Enough to give us a reasonable test, less than piobuf size, and
- * likely multiple of store buffer length.
- */
- cnt = 1024;
-
- addr = vmalloc(cnt);
- if (!addr) {
- qib_devinfo(dd->pcidev,
- "Couldn't get memory for checking PIO perf, skipping\n");
- goto done;
- }
-
- preempt_disable(); /* we want reasonably accurate elapsed time */
- msecs = 1 + jiffies_to_msecs(jiffies);
- for (lcnt = 0; lcnt < 10000U; lcnt++) {
- /* wait until we cross msec boundary */
- if (jiffies_to_msecs(jiffies) >= msecs)
- break;
- udelay(1);
- }
-
- dd->f_set_armlaunch(dd, 0);
-
- /*
- * length 0, no dwords actually sent
- */
- writeq(0, piobuf);
- qib_flush_wc();
-
- /*
- * This is only roughly accurate, since even with preempt we
- * still take interrupts that could take a while. Running for
- * >= 5 msec seems to get us "close enough" to accurate values.
- */
- msecs = jiffies_to_msecs(jiffies);
- for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
- qib_pio_copy(piobuf + 64, addr, cnt >> 2);
- emsecs = jiffies_to_msecs(jiffies) - msecs;
- }
-
- /* 1 GiB/sec, slightly over IB SDR line rate */
- if (lcnt < (emsecs * 1024U))
- qib_dev_err(dd,
- "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
- lcnt / (u32) emsecs);
-
- preempt_enable();
-
- vfree(addr);
-
-done:
- /* disarm piobuf, so it's available again */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
- qib_sendbuf_done(dd, pbnum);
- dd->f_set_armlaunch(dd, 1);
-}
-
-void qib_free_devdata(struct qib_devdata *dd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&qib_devs_lock, flags);
- idr_remove(&qib_unit_table, dd->unit);
- list_del(&dd->list);
- spin_unlock_irqrestore(&qib_devs_lock, flags);
-
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_exit(&dd->verbs_dev);
-#endif
- free_percpu(dd->int_counter);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
-}
-
-u64 qib_int_counter(struct qib_devdata *dd)
-{
- int cpu;
- u64 int_counter = 0;
-
- for_each_possible_cpu(cpu)
- int_counter += *per_cpu_ptr(dd->int_counter, cpu);
- return int_counter;
-}
-
-u64 qib_sps_ints(void)
-{
- unsigned long flags;
- struct qib_devdata *dd;
- u64 sps_ints = 0;
-
- spin_lock_irqsave(&qib_devs_lock, flags);
- list_for_each_entry(dd, &qib_dev_list, list) {
- sps_ints += qib_int_counter(dd);
- }
- spin_unlock_irqrestore(&qib_devs_lock, flags);
- return sps_ints;
-}
-
-/*
- * Allocate our primary per-unit data structure. Must be done via verbs
- * allocator, because the verbs cleanup process both does cleanup and
- * free of the data structure.
- * "extra" is for chip-specific data.
- *
- * Use the idr mechanism to get a unit number for this unit.
- */
-struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
-{
- unsigned long flags;
- struct qib_devdata *dd;
- int ret;
-
- dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
- if (!dd)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&dd->list);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&qib_devs_lock, flags);
-
- ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
- if (ret >= 0) {
- dd->unit = ret;
- list_add(&dd->list, &qib_dev_list);
- }
-
- spin_unlock_irqrestore(&qib_devs_lock, flags);
- idr_preload_end();
-
- if (ret < 0) {
- qib_early_err(&pdev->dev,
- "Could not allocate unit ID: error %d\n", -ret);
- goto bail;
- }
- dd->int_counter = alloc_percpu(u64);
- if (!dd->int_counter) {
- ret = -ENOMEM;
- qib_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
- goto bail;
- }
-
- if (!qib_cpulist_count) {
- u32 count = num_online_cpus();
-
- qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
- sizeof(long), GFP_KERNEL);
- if (qib_cpulist)
- qib_cpulist_count = count;
- else
- qib_early_err(&pdev->dev,
- "Could not alloc cpulist info, cpu affinity might be wrong\n");
- }
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_init(&dd->verbs_dev);
-#endif
- return dd;
-bail:
- if (!list_empty(&dd->list))
- list_del_init(&dd->list);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
- return ERR_PTR(ret);
-}
-
-/*
- * Called from freeze mode handlers, and from PCI error
- * reporting code. Should be paranoid about state of
- * system and data structures.
- */
-void qib_disable_after_error(struct qib_devdata *dd)
-{
- if (dd->flags & QIB_INITTED) {
- u32 pidx;
-
- dd->flags &= ~QIB_INITTED;
- if (dd->pport)
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- struct qib_pportdata *ppd;
-
- ppd = dd->pport + pidx;
- if (dd->flags & QIB_PRESENT) {
- qib_set_linkstate(ppd,
- QIB_IB_LINKDOWN_DISABLE);
- dd->f_setextled(ppd, 0);
- }
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- }
- }
-
- /*
- * Mark as having had an error for driver, and also
- * for /sys and status word mapped to user programs.
- * This marks unit as not usable, until reset.
- */
- if (dd->devstatusp)
- *dd->devstatusp |= QIB_STATUS_HWERROR;
-}
-
-static void qib_remove_one(struct pci_dev *);
-static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
-
-#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
-#define PFX QIB_DRV_NAME ": "
-
-static const struct pci_device_id qib_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
-
-static struct pci_driver qib_driver = {
- .name = QIB_DRV_NAME,
- .probe = qib_init_one,
- .remove = qib_remove_one,
- .id_table = qib_pci_tbl,
- .err_handler = &qib_pci_err_handler,
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
-static struct notifier_block dca_notifier = {
- .notifier_call = qib_notify_dca,
- .next = NULL,
- .priority = 0
-};
-
-static int qib_notify_dca_device(struct device *device, void *data)
-{
- struct qib_devdata *dd = dev_get_drvdata(device);
- unsigned long event = *(unsigned long *)data;
-
- return dd->f_notify_dca(dd, event);
-}
-
-static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
- void *p)
-{
- int rval;
-
- rval = driver_for_each_device(&qib_driver.driver, NULL,
- &event, qib_notify_dca_device);
- return rval ? NOTIFY_BAD : NOTIFY_DONE;
-}
-
-#endif
-
-/*
- * Do all the generic driver unit- and chip-independent memory
- * allocation and initialization.
- */
-static int __init qib_ib_init(void)
-{
- int ret;
-
- ret = qib_dev_init();
- if (ret)
- goto bail;
-
- /*
- * These must be called before the driver is registered with
- * the PCI subsystem.
- */
- idr_init(&qib_unit_table);
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_register_notify(&dca_notifier);
-#endif
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_init();
-#endif
- ret = pci_register_driver(&qib_driver);
- if (ret < 0) {
- pr_err("Unable to register driver: error %d\n", -ret);
- goto bail_dev;
- }
-
- /* not fatal if it doesn't work */
- if (qib_init_qibfs())
- pr_err("Unable to register ipathfs\n");
- goto bail; /* all OK */
-
-bail_dev:
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_unregister_notify(&dca_notifier);
-#endif
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_exit();
-#endif
- idr_destroy(&qib_unit_table);
- qib_dev_cleanup();
-bail:
- return ret;
-}
-
-module_init(qib_ib_init);
-
-/*
- * Do the non-unit driver cleanup, memory free, etc. at unload.
- */
-static void __exit qib_ib_cleanup(void)
-{
- int ret;
-
- ret = qib_exit_qibfs();
- if (ret)
- pr_err(
- "Unable to cleanup counter filesystem: error %d\n",
- -ret);
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_unregister_notify(&dca_notifier);
-#endif
- pci_unregister_driver(&qib_driver);
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_exit();
-#endif
-
- qib_cpulist_count = 0;
- kfree(qib_cpulist);
-
- idr_destroy(&qib_unit_table);
- qib_dev_cleanup();
-}
-
-module_exit(qib_ib_cleanup);
-
-/* this can only be called after a successful initialization */
-static void cleanup_device_data(struct qib_devdata *dd)
-{
- int ctxt;
- int pidx;
- struct qib_ctxtdata **tmp;
- unsigned long flags;
-
- /* users can't do anything more with chip */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- if (dd->pport[pidx].statusp)
- *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
-
- spin_lock(&dd->pport[pidx].cc_shadow_lock);
-
- kfree(dd->pport[pidx].congestion_entries);
- dd->pport[pidx].congestion_entries = NULL;
- kfree(dd->pport[pidx].ccti_entries);
- dd->pport[pidx].ccti_entries = NULL;
- kfree(dd->pport[pidx].ccti_entries_shadow);
- dd->pport[pidx].ccti_entries_shadow = NULL;
- kfree(dd->pport[pidx].congestion_entries_shadow);
- dd->pport[pidx].congestion_entries_shadow = NULL;
-
- spin_unlock(&dd->pport[pidx].cc_shadow_lock);
- }
-
- qib_disable_wc(dd);
-
- if (dd->pioavailregs_dma) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *) dd->pioavailregs_dma,
- dd->pioavailregs_phys);
- dd->pioavailregs_dma = NULL;
- }
-
- if (dd->pageshadow) {
- struct page **tmpp = dd->pageshadow;
- dma_addr_t *tmpd = dd->physshadow;
- int i;
-
- for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
- int ctxt_tidbase = ctxt * dd->rcvtidcnt;
- int maxtid = ctxt_tidbase + dd->rcvtidcnt;
-
- for (i = ctxt_tidbase; i < maxtid; i++) {
- if (!tmpp[i])
- continue;
- pci_unmap_page(dd->pcidev, tmpd[i],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
- qib_release_user_pages(&tmpp[i], 1);
- tmpp[i] = NULL;
- }
- }
-
- dd->pageshadow = NULL;
- vfree(tmpp);
- dd->physshadow = NULL;
- vfree(tmpd);
- }
-
- /*
- * Free any resources still in use (usually just kernel contexts)
- * at unload; we do for ctxtcnt, because that's what we allocate.
- * We acquire lock to be really paranoid that rcd isn't being
- * accessed from some interrupt-related code (that should not happen,
- * but best to be sure).
- */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- tmp = dd->rcd;
- dd->rcd = NULL;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
- struct qib_ctxtdata *rcd = tmp[ctxt];
-
- tmp[ctxt] = NULL; /* debugging paranoia */
- qib_free_ctxtdata(dd, rcd);
- }
- kfree(tmp);
- kfree(dd->boardname);
- qib_cq_exit(dd);
-}
-
-/*
- * Clean up on unit shutdown, or error during unit load after
- * successful initialization.
- */
-static void qib_postinit_cleanup(struct qib_devdata *dd)
-{
- /*
- * Clean up chip-specific stuff.
- * We check for NULL here, because it's outside
- * the kregbase check, and we need to call it
- * after the free_irq. Thus it's possible that
- * the function pointers were never initialized.
- */
- if (dd->f_cleanup)
- dd->f_cleanup(dd);
-
- qib_pcie_ddcleanup(dd);
-
- cleanup_device_data(dd);
-
- qib_free_devdata(dd);
-}
-
-static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret, j, pidx, initfail;
- struct qib_devdata *dd = NULL;
-
- ret = qib_pcie_init(pdev, ent);
- if (ret)
- goto bail;
-
- /*
- * Do device-specific initialiation, function table setup, dd
- * allocation, etc.
- */
- switch (ent->device) {
- case PCI_DEVICE_ID_QLOGIC_IB_6120:
-#ifdef CONFIG_PCI_MSI
- dd = qib_init_iba6120_funcs(pdev, ent);
-#else
- qib_early_err(&pdev->dev,
- "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
- ent->device);
- dd = ERR_PTR(-ENODEV);
-#endif
- break;
-
- case PCI_DEVICE_ID_QLOGIC_IB_7220:
- dd = qib_init_iba7220_funcs(pdev, ent);
- break;
-
- case PCI_DEVICE_ID_QLOGIC_IB_7322:
- dd = qib_init_iba7322_funcs(pdev, ent);
- break;
-
- default:
- qib_early_err(&pdev->dev,
- "Failing on unknown Intel deviceid 0x%x\n",
- ent->device);
- ret = -ENODEV;
- }
-
- if (IS_ERR(dd))
- ret = PTR_ERR(dd);
- if (ret)
- goto bail; /* error already printed */
-
- ret = qib_create_workqueues(dd);
- if (ret)
- goto bail;
-
- /* do the generic initialization */
- initfail = qib_init(dd, 0);
-
- ret = qib_register_ib_device(dd);
-
- /*
- * Now ready for use. this should be cleared whenever we
- * detect a reset, or initiate one. If earlier failure,
- * we still create devices, so diags, etc. can be used
- * to determine cause of problem.
- */
- if (!qib_mini_init && !initfail && !ret)
- dd->flags |= QIB_INITTED;
-
- j = qib_device_create(dd);
- if (j)
- qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
- j = qibfs_add(dd);
- if (j)
- qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
- -j);
-
- if (qib_mini_init || initfail || ret) {
- qib_stop_timers(dd);
- flush_workqueue(ib_wq);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->f_quiet_serdes(dd->pport + pidx);
- if (qib_mini_init)
- goto bail;
- if (!j) {
- (void) qibfs_remove(dd);
- qib_device_remove(dd);
- }
- if (!ret)
- qib_unregister_ib_device(dd);
- qib_postinit_cleanup(dd);
- if (initfail)
- ret = initfail;
- goto bail;
- }
-
- ret = qib_enable_wc(dd);
- if (ret) {
- qib_dev_err(dd,
- "Write combining not enabled (err %d): performance may be poor\n",
- -ret);
- ret = 0;
- }
-
- qib_verify_pioperf(dd);
-bail:
- return ret;
-}
-
-static void qib_remove_one(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- int ret;
-
- /* unregister from IB core */
- qib_unregister_ib_device(dd);
-
- /*
- * Disable the IB link, disable interrupts on the device,
- * clear dma engines, etc.
- */
- if (!qib_mini_init)
- qib_shutdown_device(dd);
-
- qib_stop_timers(dd);
-
- /* wait until all of our (qsfp) queue_work() calls complete */
- flush_workqueue(ib_wq);
-
- ret = qibfs_remove(dd);
- if (ret)
- qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
- -ret);
-
- qib_device_remove(dd);
-
- qib_postinit_cleanup(dd);
-}
-
-/**
- * qib_create_rcvhdrq - create a receive header queue
- * @dd: the qlogic_ib device
- * @rcd: the context data
- *
- * This must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
-{
- unsigned amt;
- int old_node_id;
-
- if (!rcd->rcvhdrq) {
- dma_addr_t phys_hdrqtail;
- gfp_t gfp_flags;
-
- amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE);
- gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
- GFP_USER : GFP_KERNEL;
-
- old_node_id = dev_to_node(&dd->pcidev->dev);
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvhdrq = dma_alloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
- gfp_flags | __GFP_COMP);
- set_dev_node(&dd->pcidev->dev, old_node_id);
-
- if (!rcd->rcvhdrq) {
- qib_dev_err(dd,
- "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
- amt, rcd->ctxt);
- goto bail;
- }
-
- if (rcd->ctxt >= dd->first_user_ctxt) {
- rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
- if (!rcd->user_event_mask)
- goto bail_free_hdrq;
- }
-
- if (!(dd->flags & QIB_NODMA_RTAIL)) {
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
- gfp_flags);
- set_dev_node(&dd->pcidev->dev, old_node_id);
- if (!rcd->rcvhdrtail_kvaddr)
- goto bail_free;
- rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
- }
-
- rcd->rcvhdrq_size = amt;
- }
-
- /* clear for security and sanity on each use */
- memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
- if (rcd->rcvhdrtail_kvaddr)
- memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
- return 0;
-
-bail_free:
- qib_dev_err(dd,
- "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
- rcd->ctxt);
- vfree(rcd->user_event_mask);
- rcd->user_event_mask = NULL;
-bail_free_hdrq:
- dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
- rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
-bail:
- return -ENOMEM;
-}
-
-/**
- * allocate eager buffers, both kernel and user contexts.
- * @rcd: the context we are setting up.
- *
- * Allocate the eager TID buffers and program them into hip.
- * They are no longer completely contiguous, we do multiple allocation
- * calls. Otherwise we get the OOM code involved, by asking for too
- * much per call, with disastrous results on some kernels.
- */
-int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
- size_t size;
- gfp_t gfp_flags;
- int old_node_id;
-
- /*
- * GFP_USER, but without GFP_FS, so buffer cache can be
- * coalesced (we hope); otherwise, even at order 4,
- * heavy filesystem activity makes these fail, and we can
- * use compound pages.
- */
- gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
-
- egrcnt = rcd->rcvegrcnt;
- egroff = rcd->rcvegr_tid_base;
- egrsize = dd->rcvegrbufsize;
-
- chunk = rcd->rcvegrbuf_chunks;
- egrperchunk = rcd->rcvegrbufs_perchunk;
- size = rcd->rcvegrbuf_size;
- if (!rcd->rcvegrbuf) {
- rcd->rcvegrbuf =
- kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]),
- GFP_KERNEL, rcd->node_id);
- if (!rcd->rcvegrbuf)
- goto bail;
- }
- if (!rcd->rcvegrbuf_phys) {
- rcd->rcvegrbuf_phys =
- kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
- GFP_KERNEL, rcd->node_id);
- if (!rcd->rcvegrbuf_phys)
- goto bail_rcvegrbuf;
- }
- for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
- if (rcd->rcvegrbuf[e])
- continue;
-
- old_node_id = dev_to_node(&dd->pcidev->dev);
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvegrbuf[e] =
- dma_alloc_coherent(&dd->pcidev->dev, size,
- &rcd->rcvegrbuf_phys[e],
- gfp_flags);
- set_dev_node(&dd->pcidev->dev, old_node_id);
- if (!rcd->rcvegrbuf[e])
- goto bail_rcvegrbuf_phys;
- }
-
- rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
-
- for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
- dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
- unsigned i;
-
- /* clear for security and sanity on each use */
- memset(rcd->rcvegrbuf[chunk], 0, size);
-
- for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
- dd->f_put_tid(dd, e + egroff +
- (u64 __iomem *)
- ((char __iomem *)
- dd->kregbase +
- dd->rcvegrbase),
- RCVHQ_RCV_TYPE_EAGER, pa);
- pa += egrsize;
- }
- cond_resched(); /* don't hog the cpu */
- }
-
- return 0;
-
-bail_rcvegrbuf_phys:
- for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
- dma_free_coherent(&dd->pcidev->dev, size,
- rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
- kfree(rcd->rcvegrbuf_phys);
- rcd->rcvegrbuf_phys = NULL;
-bail_rcvegrbuf:
- kfree(rcd->rcvegrbuf);
- rcd->rcvegrbuf = NULL;
-bail:
- return -ENOMEM;
-}
-
-/*
- * Note: Changes to this routine should be mirrored
- * for the diagnostics routine qib_remap_ioaddr32().
- * There is also related code for VL15 buffers in qib_init_7322_variables().
- * The teardown code that unmaps is in qib_pcie_ddcleanup()
- */
-int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
-{
- u64 __iomem *qib_kregbase = NULL;
- void __iomem *qib_piobase = NULL;
- u64 __iomem *qib_userbase = NULL;
- u64 qib_kreglen;
- u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
- u64 qib_pio4koffset = dd->piobufbase >> 32;
- u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
- u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
- u64 qib_physaddr = dd->physaddr;
- u64 qib_piolen;
- u64 qib_userlen = 0;
-
- /*
- * Free the old mapping because the kernel will try to reuse the
- * old mapping and not create a new mapping with the
- * write combining attribute.
- */
- iounmap(dd->kregbase);
- dd->kregbase = NULL;
-
- /*
- * Assumes chip address space looks like:
- * - kregs + sregs + cregs + uregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * or:
- * - kregs + sregs + cregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * - uregs
- */
- if (dd->piobcnt4k == 0) {
- qib_kreglen = qib_pio2koffset;
- qib_piolen = qib_pio2klen;
- } else if (qib_pio2koffset < qib_pio4koffset) {
- qib_kreglen = qib_pio2koffset;
- qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
- } else {
- qib_kreglen = qib_pio4koffset;
- qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
- }
- qib_piolen += vl15buflen;
- /* Map just the configured ports (not all hw ports) */
- if (dd->uregbase > qib_kreglen)
- qib_userlen = dd->ureg_align * dd->cfgctxts;
-
- /* Sanity checks passed, now create the new mappings */
- qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
- if (!qib_kregbase)
- goto bail;
-
- qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
- if (!qib_piobase)
- goto bail_kregbase;
-
- if (qib_userlen) {
- qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
- qib_userlen);
- if (!qib_userbase)
- goto bail_piobase;
- }
-
- dd->kregbase = qib_kregbase;
- dd->kregend = (u64 __iomem *)
- ((char __iomem *) qib_kregbase + qib_kreglen);
- dd->piobase = qib_piobase;
- dd->pio2kbase = (void __iomem *)
- (((char __iomem *) dd->piobase) +
- qib_pio2koffset - qib_kreglen);
- if (dd->piobcnt4k)
- dd->pio4kbase = (void __iomem *)
- (((char __iomem *) dd->piobase) +
- qib_pio4koffset - qib_kreglen);
- if (qib_userlen)
- /* ureg will now be accessed relative to dd->userbase */
- dd->userbase = qib_userbase;
- return 0;
-
-bail_piobase:
- iounmap(qib_piobase);
-bail_kregbase:
- iounmap(qib_kregbase);
-bail:
- return -ENOMEM;
-}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
deleted file mode 100644
index 086616d071b9..000000000000
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-/**
- * qib_format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
- */
-static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * qib_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
- */
-void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t msgl)
-{
- int i;
-
- for (i = 0; i < nhwerrmsgs; i++)
- if (hwerrs & hwerrmsgs[i].mask)
- qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-}
-
-static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
-{
- struct ib_event event;
- struct qib_devdata *dd = ppd->dd;
-
- event.device = &dd->verbs_dev.ibdev;
- event.element.port_num = ppd->port;
- event.event = ev;
- ib_dispatch_event(&event);
-}
-
-void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- u32 lstate;
- u8 ltstate;
- enum ib_event_type ev = 0;
-
- lstate = dd->f_iblink_state(ibcs); /* linkstate */
- ltstate = dd->f_ibphys_portstate(ibcs);
-
- /*
- * If linkstate transitions into INIT from any of the various down
- * states, or if it transitions from any of the up (INIT or better)
- * states into any of the down states (except link recovery), then
- * call the chip-specific code to take appropriate actions.
- *
- * ppd->lflags could be 0 if this is the first time the interrupt
- * handlers has been called but the link is already up.
- */
- if (lstate >= IB_PORT_INIT &&
- (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
- ltstate == IB_PHYSPORTSTATE_LINKUP) {
- /* transitioned to UP */
- if (dd->f_ib_updown(ppd, 1, ibcs))
- goto skip_ibchange; /* chip-code handled */
- } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
- if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
- ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
- dd->f_ib_updown(ppd, 0, ibcs))
- goto skip_ibchange; /* chip-code handled */
- qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
- }
-
- if (lstate != IB_PORT_DOWN) {
- /* lstate is INIT, ARMED, or ACTIVE */
- if (lstate != IB_PORT_ACTIVE) {
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- if (ppd->lflags & QIBL_LINKACTIVE)
- ev = IB_EVENT_PORT_ERR;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- if (lstate == IB_PORT_ARMED) {
- ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKDOWN | QIBL_LINKACTIVE);
- } else {
- ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKARMED |
- QIBL_LINKDOWN | QIBL_LINKACTIVE);
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- /* start a 75msec timer to clear symbol errors */
- mod_timer(&ppd->symerr_clear_timer,
- msecs_to_jiffies(75));
- } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /* active, but not active defered */
- qib_hol_up(ppd); /* useful only for 6120 now */
- *ppd->statusp |=
- QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
- qib_clear_symerror_on_linkup((unsigned long)ppd);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKDOWN | QIBL_LINKARMED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_sdma_process_event(ppd,
- qib_sdma_event_e30_go_running);
- ev = IB_EVENT_PORT_ACTIVE;
- dd->f_setextled(ppd, 1);
- }
- } else { /* down */
- if (ppd->lflags & QIBL_LINKACTIVE)
- ev = IB_EVENT_PORT_ERR;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKACTIVE | QIBL_LINKARMED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- }
-
-skip_ibchange:
- ppd->lastibcstat = ibcs;
- if (ev)
- signal_ib_event(ppd, ev);
-}
-
-void qib_clear_symerror_on_linkup(unsigned long opaque)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
-
- if (ppd->lflags & QIBL_LINKACTIVE)
- return;
-
- ppd->ibport_data.z_symbol_error_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
-}
-
-/*
- * Handle receive interrupts for user ctxts; this means a user
- * process was waiting for a packet to arrive, and didn't want
- * to poll.
- */
-void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
-{
- struct qib_ctxtdata *rcd;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
- if (!(ctxtr & (1ULL << i)))
- continue;
- rcd = dd->rcd[i];
- if (!rcd || !rcd->cnt)
- continue;
-
- if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
- wake_up_interruptible(&rcd->wait);
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
- rcd->ctxt);
- } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
- &rcd->flag)) {
- rcd->urgent++;
- wake_up_interruptible(&rcd->wait);
- }
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-}
-
-void qib_bad_intrstatus(struct qib_devdata *dd)
-{
- static int allbits;
-
- /* separate routine, for better optimization of qib_intr() */
-
- /*
- * We print the message and disable interrupts, in hope of
- * having a better chance of debugging the problem.
- */
- qib_dev_err(dd,
- "Read of chip interrupt status failed disabling interrupts\n");
- if (allbits++) {
- /* disable interrupt delivery, something is very wrong */
- if (allbits == 2)
- dd->f_set_intr_state(dd, 0);
- if (allbits == 3) {
- qib_dev_err(dd,
- "2nd bad interrupt status, unregistering interrupts\n");
- dd->flags |= QIB_BADINTR;
- dd->flags &= ~QIB_INITTED;
- dd->f_free_irq(dd);
- }
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
deleted file mode 100644
index 5afaa218508d..000000000000
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/**
- * qib_alloc_lkey - allocate an lkey
- * @mr: memory region that this lkey protects
- * @dma_region: 0->normal key, 1->restricted DMA key
- *
- * Returns 0 if successful, otherwise returns -errno.
- *
- * Increments mr reference count as required.
- *
- * Sets the lkey field mr for non-dma regions.
- *
- */
-
-int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
-{
- unsigned long flags;
- u32 r;
- u32 n;
- int ret = 0;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct qib_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
-
- /* special case for dma_mr lkey == 0 */
- if (dma_region) {
- struct qib_mregion *tmr;
-
- tmr = rcu_access_pointer(dev->dma_mr);
- if (!tmr) {
- qib_get_mr(mr);
- rcu_assign_pointer(dev->dma_mr, mr);
- mr->lkey_published = 1;
- }
- goto success;
- }
-
- /* Find the next available LKEY */
- r = rkt->next;
- n = r;
- for (;;) {
- if (rkt->table[r] == NULL)
- break;
- r = (r + 1) & (rkt->max - 1);
- if (r == n)
- goto bail;
- }
- rkt->next = (r + 1) & (rkt->max - 1);
- /*
- * Make sure lkey is never zero which is reserved to indicate an
- * unrestricted LKEY.
- */
- rkt->gen++;
- /*
- * bits are capped in qib_verbs.c to insure enough bits
- * for generation number
- */
- mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
- ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
- << 8);
- if (mr->lkey == 0) {
- mr->lkey |= 1 << 8;
- rkt->gen++;
- }
- qib_get_mr(mr);
- rcu_assign_pointer(rkt->table[r], mr);
- mr->lkey_published = 1;
-success:
- spin_unlock_irqrestore(&rkt->lock, flags);
-out:
- return ret;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = -ENOMEM;
- goto out;
-}
-
-/**
- * qib_free_lkey - free an lkey
- * @mr: mr to free from tables
- */
-void qib_free_lkey(struct qib_mregion *mr)
-{
- unsigned long flags;
- u32 lkey = mr->lkey;
- u32 r;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct qib_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (!mr->lkey_published)
- goto out;
- if (lkey == 0)
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- else {
- r = lkey >> (32 - ib_qib_lkey_table_size);
- RCU_INIT_POINTER(rkt->table[r], NULL);
- }
- qib_put_mr(mr);
- mr->lkey_published = 0;
-out:
- spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * qib_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @pd: protection domain
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * increments the reference count upon success
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
- struct qib_sge *isge, struct ib_sge *sge, int acc)
-{
- struct qib_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /*
- * We use LKEY == zero for kernel virtual addresses
- * (see qib_get_dma_mr and qib_dma.c).
- */
- rcu_read_lock();
- if (sge->lkey == 0) {
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- isge->mr = mr;
- isge->vaddr = (void *) sge->addr;
- isge->length = sge->length;
- isge->sge_length = sge->length;
- isge->m = 0;
- isge->n = 0;
- goto ok;
- }
- mr = rcu_dereference(
- rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
- goto bail;
-
- off = sge->addr - mr->user_base;
- if (unlikely(sge->addr < mr->user_base ||
- off + sge->length > mr->length ||
- (mr->access_flags & acc) != acc))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off/QIB_SEGSZ;
- n = entries_spanned_by_off%QIB_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- isge->mr = mr;
- isge->vaddr = mr->map[m]->segs[n].vaddr + off;
- isge->length = mr->map[m]->segs[n].length - off;
- isge->sge_length = sge->length;
- isge->m = m;
- isge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
-/**
- * qib_rkey_ok - check the IB virtual address, length, and RKEY
- * @qp: qp for validation
- * @sge: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- *
- * increments the reference count upon success
- */
-int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc)
-{
- struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct qib_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /*
- * We use RKEY == zero for kernel virtual addresses
- * (see qib_get_dma_mr and qib_dma.c).
- */
- rcu_read_lock();
- if (rkey == 0) {
- struct qib_pd *pd = to_ipd(qp->ibqp.pd);
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- sge->mr = mr;
- sge->vaddr = (void *) vaddr;
- sge->length = len;
- sge->sge_length = len;
- sge->m = 0;
- sge->n = 0;
- goto ok;
- }
-
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
- goto bail;
-
- off = vaddr - mr->iova;
- if (unlikely(vaddr < mr->iova || off + len > mr->length ||
- (mr->access_flags & acc) == 0))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off/QIB_SEGSZ;
- n = entries_spanned_by_off%QIB_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- sge->mr = mr;
- sge->vaddr = mr->map[m]->segs[n].vaddr + off;
- sge->length = mr->map[m]->segs[n].length - off;
- sge->sge_length = len;
- sge->m = m;
- sge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
-/*
- * Initialize the memory region specified by the work reqeust.
- */
-int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
-{
- struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct qib_pd *pd = to_ipd(qp->ibqp.pd);
- struct qib_mregion *mr;
- u32 rkey = wr->wr.fast_reg.rkey;
- unsigned i, n, m;
- int ret = -EINVAL;
- unsigned long flags;
- u64 *page_list;
- size_t ps;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (pd->user || rkey == 0)
- goto bail;
-
- mr = rcu_dereference_protected(
- rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))],
- lockdep_is_held(&rkt->lock));
- if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
- goto bail;
-
- if (wr->wr.fast_reg.page_list_len > mr->max_segs)
- goto bail;
-
- ps = 1UL << wr->wr.fast_reg.page_shift;
- if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
- goto bail;
-
- mr->user_base = wr->wr.fast_reg.iova_start;
- mr->iova = wr->wr.fast_reg.iova_start;
- mr->lkey = rkey;
- mr->length = wr->wr.fast_reg.length;
- mr->access_flags = wr->wr.fast_reg.access_flags;
- page_list = wr->wr.fast_reg.page_list->page_list;
- m = 0;
- n = 0;
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- mr->map[m]->segs[n].vaddr = (void *) page_list[i];
- mr->map[m]->segs[n].length = ps;
- if (++n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
-
- ret = 0;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
deleted file mode 100644
index 9625e7c438e5..000000000000
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ /dev/null
@@ -1,2542 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-static int reply(struct ib_smp *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static int reply_failure(struct ib_smp *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
-}
-
-static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
-{
- struct ib_mad_send_buf *send_buf;
- struct ib_mad_agent *agent;
- struct ib_smp *smp;
- int ret;
- unsigned long flags;
- unsigned long timeout;
-
- agent = ibp->send_agent;
- if (!agent)
- return;
-
- /* o14-3.2.1 */
- if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
- return;
-
- /* o14-2 */
- if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
- return;
-
- send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
- IB_MGMT_MAD_DATA, GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(send_buf))
- return;
-
- smp = send_buf->mad;
- smp->base_version = IB_MGMT_BASE_VERSION;
- smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- smp->class_version = 1;
- smp->method = IB_MGMT_METHOD_TRAP;
- ibp->tid++;
- smp->tid = cpu_to_be64(ibp->tid);
- smp->attr_id = IB_SMP_ATTR_NOTICE;
- /* o14-1: smp->mkey = 0; */
- memcpy(smp->data, data, len);
-
- spin_lock_irqsave(&ibp->lock, flags);
- if (!ibp->sm_ah) {
- if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
- struct ib_ah *ah;
-
- ah = qib_create_qp0_ah(ibp, ibp->sm_lid);
- if (IS_ERR(ah))
- ret = PTR_ERR(ah);
- else {
- send_buf->ah = ah;
- ibp->sm_ah = to_iah(ah);
- ret = 0;
- }
- } else
- ret = -EINVAL;
- } else {
- send_buf->ah = &ibp->sm_ah->ibah;
- ret = 0;
- }
- spin_unlock_irqrestore(&ibp->lock, flags);
-
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (!ret) {
- /* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
- ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
- } else {
- ib_free_send_mad(send_buf);
- ibp->trap_timeout = 0;
- }
-}
-
-/*
- * Send a bad [PQ]_Key trap (ch. 14.3.8).
- */
-void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
-{
- struct ib_mad_notice_attr data;
-
- if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
- ibp->pkey_violations++;
- else
- ibp->qkey_violations++;
- ibp->n_pkt_drops++;
-
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = trap_num;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_257_258.lid1 = lid1;
- data.details.ntc_257_258.lid2 = lid2;
- data.details.ntc_257_258.key = cpu_to_be32(key);
- data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
- data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a bad M_Key trap (ch. 14.3.9).
- */
-static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
-{
- struct ib_mad_notice_attr data;
-
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_256.lid = data.issuer_lid;
- data.details.ntc_256.method = smp->method;
- data.details.ntc_256.attr_id = smp->attr_id;
- data.details.ntc_256.attr_mod = smp->attr_mod;
- data.details.ntc_256.mkey = smp->mkey;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- u8 hop_cnt;
-
- data.details.ntc_256.dr_slid = smp->dr_slid;
- data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
- hop_cnt = smp->hop_cnt;
- if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
- data.details.ntc_256.dr_trunc_hop |=
- IB_NOTICE_TRAP_DR_TRUNC;
- hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
- }
- data.details.ntc_256.dr_trunc_hop |= hop_cnt;
- memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
- hop_cnt);
- }
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Port Capability Mask Changed trap (ch. 14.3.11).
- */
-void qib_cap_mask_chg(struct qib_ibport *ibp)
-{
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a System Image GUID Changed trap (ch. 14.3.12).
- */
-void qib_sys_guid_chg(struct qib_ibport *ibp)
-{
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_145.lid = data.issuer_lid;
- data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Node Description Changed trap (ch. 14.3.13).
- */
-void qib_node_desc_chg(struct qib_ibport *ibp)
-{
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.local_changes = 1;
- data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-static int subn_get_nodedescription(struct ib_smp *smp,
- struct ib_device *ibdev)
-{
- if (smp->attr_mod)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
-
- return reply(smp);
-}
-
-static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 vendor, majrev, minrev;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* GUID 0 is illegal */
- if (smp->attr_mod || pidx >= dd->num_pports ||
- dd->pport[pidx].guid == 0)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- nip->port_guid = dd->pport[pidx].guid;
-
- nip->base_version = 1;
- nip->class_version = 1;
- nip->node_type = 1; /* channel adapter */
- nip->num_ports = ibdev->phys_port_cnt;
- /* This is already in network order */
- nip->sys_guid = ib_qib_sys_image_guid;
- nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
- nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
- nip->device_id = cpu_to_be16(dd->deviceid);
- majrev = dd->majrev;
- minrev = dd->minrev;
- nip->revision = cpu_to_be32((majrev << 16) | minrev);
- nip->local_port_num = port;
- vendor = dd->vendorid;
- nip->vendor_id[0] = QIB_SRC_OUI_1;
- nip->vendor_id[1] = QIB_SRC_OUI_2;
- nip->vendor_id[2] = QIB_SRC_OUI_3;
-
- return reply(smp);
-}
-
-static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
- __be64 *p = (__be64 *) smp->data;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* 32 blocks of 8 64-bit GUIDs per block */
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (startgx == 0 && pidx < dd->num_pports) {
- struct qib_pportdata *ppd = dd->pport + pidx;
- struct qib_ibport *ibp = &ppd->ibport_data;
- __be64 g = ppd->guid;
- unsigned i;
-
- /* GUID 0 is illegal */
- if (g == 0)
- smp->status |= IB_SMP_INVALID_FIELD;
- else {
- /* The first is a copy of the read-only HW GUID. */
- p[0] = g;
- for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
- p[i] = ibp->guids[i - 1];
- }
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
-}
-
-static int get_overrunthreshold(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
-}
-
-/**
- * set_overrunthreshold - set the overrun threshold
- * @ppd: the physical port data
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
- (u32)n);
- return 0;
-}
-
-static int get_phyerrthreshold(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
-}
-
-/**
- * set_phyerrthreshold - set the physical error threshold
- * @ppd: the physical port data
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
- (u32)n);
- return 0;
-}
-
-/**
- * get_linkdowndefaultstate - get the default linkdown state
- * @ppd: the physical port data
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
- IB_LINKINITCMD_SLEEP;
-}
-
-static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
-{
- int valid_mkey = 0;
- int ret = 0;
-
- /* Is the mkey in the process of expiring? */
- if (ibp->mkey_lease_timeout &&
- time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
- /* Clear timeout and mkey protection field. */
- ibp->mkey_lease_timeout = 0;
- ibp->mkeyprot = 0;
- }
-
- if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
- ibp->mkey == smp->mkey)
- valid_mkey = 1;
-
- /* Unset lease timeout on any valid Get/Set/TrapRepress */
- if (valid_mkey && ibp->mkey_lease_timeout &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET ||
- smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
- ibp->mkey_lease_timeout = 0;
-
- if (!valid_mkey) {
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- /* Bad mkey not a violation below level 2 */
- if (ibp->mkeyprot < 2)
- break;
- case IB_MGMT_METHOD_SET:
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (ibp->mkey_violations != 0xFFFF)
- ++ibp->mkey_violations;
- if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
- ibp->mkey_lease_timeout = jiffies +
- ibp->mkey_lease_period * HZ;
- /* Generate a trap notice. */
- qib_bad_mkey(ibp, smp);
- ret = 1;
- }
- }
-
- return ret;
-}
-
-static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- u8 mtu;
- int ret;
- u32 state;
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- if (port_num == 0)
- port_num = port;
- else {
- if (port_num > ibdev->phys_port_cnt) {
- smp->status |= IB_SMP_INVALID_FIELD;
- ret = reply(smp);
- goto bail;
- }
- if (port_num != port) {
- ibp = to_iport(ibdev, port_num);
- ret = check_mkey(ibp, smp, 0);
- if (ret) {
- ret = IB_MAD_RESULT_FAILURE;
- goto bail;
- }
- }
- }
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hdw from 0 */
- ppd = dd->pport + (port_num - 1);
- ibp = &ppd->ibport_data;
-
- /* Clear all fields. Only set the non-zero fields. */
- memset(smp->data, 0, sizeof(smp->data));
-
- /* Only return the mkey if the protection field allows it. */
- if (!(smp->method == IB_MGMT_METHOD_GET &&
- ibp->mkey != smp->mkey &&
- ibp->mkeyprot == 1))
- pip->mkey = ibp->mkey;
- pip->gid_prefix = ibp->gid_prefix;
- pip->lid = cpu_to_be16(ppd->lid);
- pip->sm_lid = cpu_to_be16(ibp->sm_lid);
- pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
- /* pip->diag_code; */
- pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
- pip->local_port_num = port;
- pip->link_width_enabled = ppd->link_width_enabled;
- pip->link_width_supported = ppd->link_width_supported;
- pip->link_width_active = ppd->link_width_active;
- state = dd->f_iblink_state(ppd->lastibcstat);
- pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
-
- pip->portphysstate_linkdown =
- (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
- (get_linkdowndefaultstate(ppd) ? 1 : 2);
- pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
- pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
- ppd->link_speed_enabled;
- switch (ppd->ibmtu) {
- default: /* something is wrong; fall through */
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- }
- pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
- pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
- pip->vl_high_limit = ibp->vl_high_limit;
- pip->vl_arb_high_cap =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
- pip->vl_arb_low_cap =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
- /* InitTypeReply = 0 */
- pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
- /* HCAs ignore VLStallCount and HOQLife */
- /* pip->vlstallcnt_hoqlife; */
- pip->operationalvl_pei_peo_fpi_fpo =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
- pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
- /* P_KeyViolations are counted by hardware. */
- pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
- pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
- /* Only the hardware GUID is supported for now */
- pip->guid_cap = QIB_GUIDS_PER_PORT;
- pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
- /* 32.768 usec. response time (guessing) */
- pip->resv_resptimevalue = 3;
- pip->localphyerrors_overrunerrors =
- (get_phyerrthreshold(ppd) << 4) |
- get_overrunthreshold(ppd);
- /* pip->max_credit_hint; */
- if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
- u32 v;
-
- v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
- pip->link_roundtrip_latency[0] = v >> 16;
- pip->link_roundtrip_latency[1] = v >> 8;
- pip->link_roundtrip_latency[2] = v;
- }
-
- ret = reply(smp);
-
-bail:
- return ret;
-}
-
-/**
- * get_pkeys - return the PKEY table
- * @dd: the qlogic_ib device
- * @port: the IB port number
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
-{
- struct qib_pportdata *ppd = dd->pport + port - 1;
- /*
- * always a kernel context, no locking needed.
- * If we get here with ppd setup, no need to check
- * that pd is valid.
- */
- struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
-
- memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
-
- return 0;
-}
-
-static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- u16 *p = (u16 *) smp->data;
- __be16 *q = (__be16 *) smp->data;
-
- /* 64 blocks of 32 16-bit P_Key entries */
-
- memset(smp->data, 0, sizeof(smp->data));
- if (startpx == 0) {
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned i, n = qib_get_npkeys(dd);
-
- get_pkeys(dd, port, p);
-
- for (i = 0; i < n; i++)
- q[i] = cpu_to_be16(p[i]);
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
- __be64 *p = (__be64 *) smp->data;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* 32 blocks of 8 64-bit GUIDs per block */
-
- if (startgx == 0 && pidx < dd->num_pports) {
- struct qib_pportdata *ppd = dd->pport + pidx;
- struct qib_ibport *ibp = &ppd->ibport_data;
- unsigned i;
-
- /* The first entry is read-only. */
- for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
- ibp->guids[i - 1] = p[i];
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- /* The only GUID we support is the first read-only entry. */
- return subn_get_guidinfo(smp, ibdev, port);
-}
-
-/**
- * subn_set_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- * Set Portinfo (see ch. 14.2.5.6).
- */
-static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- struct ib_event event;
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
- unsigned long flags;
- u16 lid, smlid;
- u8 lwe;
- u8 lse;
- u8 state;
- u8 vls;
- u8 msl;
- u16 lstate;
- int ret, ore, mtu;
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- if (port_num == 0)
- port_num = port;
- else {
- if (port_num > ibdev->phys_port_cnt)
- goto err;
- /* Port attributes can only be set on the receiving port */
- if (port_num != port)
- goto get_only;
- }
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hdw from 0 */
- ppd = dd->pport + (port_num - 1);
- ibp = &ppd->ibport_data;
- event.device = ibdev;
- event.element.port_num = port;
-
- ibp->mkey = pip->mkey;
- ibp->gid_prefix = pip->gid_prefix;
- ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
-
- lid = be16_to_cpu(pip->lid);
- /* Must be a valid unicast LID address. */
- if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
- if (ppd->lid != lid)
- qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
- if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
- qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
- qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
- event.event = IB_EVENT_LID_CHANGE;
- ib_dispatch_event(&event);
- }
-
- smlid = be16_to_cpu(pip->sm_lid);
- msl = pip->neighbormtu_mastersmsl & 0xF;
- /* Must be a valid unicast LID address. */
- if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
- spin_lock_irqsave(&ibp->lock, flags);
- if (ibp->sm_ah) {
- if (smlid != ibp->sm_lid)
- ibp->sm_ah->attr.dlid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_ah->attr.sl = msl;
- }
- spin_unlock_irqrestore(&ibp->lock, flags);
- if (smlid != ibp->sm_lid)
- ibp->sm_lid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_sl = msl;
- event.event = IB_EVENT_SM_CHANGE;
- ib_dispatch_event(&event);
- }
-
- /* Allow 1x or 4x to be set (see 14.2.6.6). */
- lwe = pip->link_width_enabled;
- if (lwe) {
- if (lwe == 0xFF)
- set_link_width_enabled(ppd, ppd->link_width_supported);
- else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (lwe != ppd->link_width_enabled)
- set_link_width_enabled(ppd, lwe);
- }
-
- lse = pip->linkspeedactive_enabled & 0xF;
- if (lse) {
- /*
- * The IB 1.2 spec. only allows link speed values
- * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
- * speeds.
- */
- if (lse == 15)
- set_link_speed_enabled(ppd,
- ppd->link_speed_supported);
- else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (lse != ppd->link_speed_enabled)
- set_link_speed_enabled(ppd, lse);
- }
-
- /* Set link down default state. */
- switch (pip->portphysstate_linkdown & 0xF) {
- case 0: /* NOP */
- break;
- case 1: /* SLEEP */
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
- IB_LINKINITCMD_SLEEP);
- break;
- case 2: /* POLL */
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
- IB_LINKINITCMD_POLL);
- break;
- default:
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
- ibp->vl_high_limit = pip->vl_high_limit;
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
- ibp->vl_high_limit);
-
- mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
- if (mtu == -1)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- qib_set_mtu(ppd, mtu);
-
- /* Set operational VLs */
- vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
- if (vls) {
- if (vls > ppd->vls_supported)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
- }
-
- if (pip->mkey_violations == 0)
- ibp->mkey_violations = 0;
-
- if (pip->pkey_violations == 0)
- ibp->pkey_violations = 0;
-
- if (pip->qkey_violations == 0)
- ibp->qkey_violations = 0;
-
- ore = pip->localphyerrors_overrunerrors;
- if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- if (set_overrunthreshold(ppd, (ore & 0xF)))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
-
- /*
- * Do the port state change now that the other link parameters
- * have been set.
- * Changing the port physical state only makes sense if the link
- * is down or is being set to down.
- */
- state = pip->linkspeed_portstate & 0xF;
- lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
- if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- /*
- * Only state changes of DOWN, ARM, and ACTIVE are valid
- * and must be in the correct state to take effect (see 7.2.6).
- */
- switch (state) {
- case IB_PORT_NOP:
- if (lstate == 0)
- break;
- /* FALLTHROUGH */
- case IB_PORT_DOWN:
- if (lstate == 0)
- lstate = QIB_IB_LINKDOWN_ONLY;
- else if (lstate == 1)
- lstate = QIB_IB_LINKDOWN_SLEEP;
- else if (lstate == 2)
- lstate = QIB_IB_LINKDOWN;
- else if (lstate == 3)
- lstate = QIB_IB_LINKDOWN_DISABLE;
- else {
- smp->status |= IB_SMP_INVALID_FIELD;
- break;
- }
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- qib_set_linkstate(ppd, lstate);
- /*
- * Don't send a reply if the response would be sent
- * through the disabled port.
- */
- if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- goto done;
- }
- qib_wait_linkstate(ppd, QIBL_LINKV, 10);
- break;
- case IB_PORT_ARMED:
- qib_set_linkstate(ppd, QIB_IB_LINKARM);
- break;
- case IB_PORT_ACTIVE:
- qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
- break;
- default:
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- if (clientrereg) {
- event.event = IB_EVENT_CLIENT_REREGISTER;
- ib_dispatch_event(&event);
- }
-
- ret = subn_get_portinfo(smp, ibdev, port);
-
- /* restore re-reg bit per o14-12.2.1 */
- pip->clientrereg_resv_subnetto |= clientrereg;
-
- goto get_only;
-
-err:
- smp->status |= IB_SMP_INVALID_FIELD;
-get_only:
- ret = subn_get_portinfo(smp, ibdev, port);
-done:
- return ret;
-}
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the qlogic_ib device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct qib_pportdata *ppd, u16 key)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (ppd->pkeys[i] != key)
- continue;
- if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
- ppd->pkeys[i] = 0;
- ret = 1;
- goto bail;
- }
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @dd: the qlogic_ib device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct qib_pportdata *ppd, u16 key)
-{
- int i;
- u16 lkey = key & 0x7FFF;
- int any = 0;
- int ret;
-
- if (lkey == 0x7FFF) {
- ret = 0;
- goto bail;
- }
-
- /* Look for an empty slot or a matching PKEY. */
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i]) {
- any++;
- continue;
- }
- /* If it matches exactly, try to increment the ref count */
- if (ppd->pkeys[i] == key) {
- if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
- ret = 0;
- goto bail;
- }
- /* Lost the race. Look for an empty slot below. */
- atomic_dec(&ppd->pkeyrefs[i]);
- any++;
- }
- /*
- * It makes no sense to have both the limited and unlimited
- * PKEY set at the same time since the unlimited one will
- * disable the limited one.
- */
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
- }
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i] &&
- atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
- /* for qibstats, etc. */
- ppd->pkeys[i] = key;
- ret = 1;
- goto bail;
- }
- }
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-/**
- * set_pkeys - set the PKEY table for ctxt 0
- * @dd: the qlogic_ib device
- * @port: the IB port number
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
-{
- struct qib_pportdata *ppd;
- struct qib_ctxtdata *rcd;
- int i;
- int changed = 0;
-
- /*
- * IB port one/two always maps to context zero/one,
- * always a kernel context, no locking needed
- * If we get here with ppd setup, no need to check
- * that rcd is valid.
- */
- ppd = dd->pport + (port - 1);
- rcd = dd->rcd[ppd->hw_pidx];
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- u16 key = pkeys[i];
- u16 okey = rcd->pkeys[i];
-
- if (key == okey)
- continue;
- /*
- * The value of this PKEY table entry is changing.
- * Remove the old entry in the hardware's array of PKEYs.
- */
- if (okey & 0x7FFF)
- changed |= rm_pkey(ppd, okey);
- if (key & 0x7FFF) {
- int ret = add_pkey(ppd, key);
-
- if (ret < 0)
- key = 0;
- else
- changed |= ret;
- }
- rcd->pkeys[i] = key;
- }
- if (changed) {
- struct ib_event event;
-
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
- }
- return 0;
-}
-
-static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- __be16 *p = (__be16 *) smp->data;
- u16 *q = (u16 *) smp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned i, n = qib_get_npkeys(dd);
-
- for (i = 0; i < n; i++)
- q[i] = be16_to_cpu(p[i]);
-
- if (startpx != 0 || set_pkeys(dd, port, q) != 0)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return subn_get_pkeytable(smp, ibdev, port);
-}
-
-static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- u8 *p = (u8 *) smp->data;
- unsigned i;
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
- smp->status |= IB_SMP_UNSUP_METHOD;
- else
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
- *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
-
- return reply(smp);
-}
-
-static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- u8 *p = (u8 *) smp->data;
- unsigned i;
-
- if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
- smp->status |= IB_SMP_UNSUP_METHOD;
- return reply(smp);
- }
-
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
- ibp->sl_to_vl[i] = *p >> 4;
- ibp->sl_to_vl[i + 1] = *p & 0xF;
- }
- qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
- _QIB_EVENT_SL2VL_CHANGE_BIT);
-
- return subn_get_sl_to_vl(smp, ibdev, port);
-}
-
-static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
- struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (ppd->vls_supported == IB_VL_VL0)
- smp->status |= IB_SMP_UNSUP_METHOD;
- else if (which == IB_VLARB_LOWPRI_0_31)
- (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
- smp->data);
- else if (which == IB_VLARB_HIGHPRI_0_31)
- (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
- smp->data);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
- struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
-
- if (ppd->vls_supported == IB_VL_VL0)
- smp->status |= IB_SMP_UNSUP_METHOD;
- else if (which == IB_VLARB_LOWPRI_0_31)
- (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
- smp->data);
- else if (which == IB_VLARB_HIGHPRI_0_31)
- (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
- smp->data);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return subn_get_vl_arb(smp, ibdev, port);
-}
-
-static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- /*
- * For now, we only send the trap once so no need to process this.
- * o13-6, o13-7,
- * o14-3.a4 The SMA shall not send any message in response to a valid
- * SubnTrapRepress() message.
- */
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-}
-
-static int pma_get_classportinfo(struct ib_pma_mad *pmp,
- struct ib_device *ibdev)
-{
- struct ib_class_port_info *p =
- (struct ib_class_port_info *)pmp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- if (pmp->mad_hdr.attr_mod != 0)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- /* Note that AllPortSelect is not valid */
- p->base_version = 1;
- p->class_version = 1;
- p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
- /*
- * Set the most significant bit of CM2 to indicate support for
- * congestion statistics
- */
- p->reserved[0] = dd->psxmitwait_supported << 7;
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
- */
- p->resp_time_value = 18;
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 port_select = p->port_select;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
- spin_lock_irqsave(&ibp->lock, flags);
- p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
- p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->counter_width = 4; /* 32 bit counters */
- p->counter_mask0_9 = COUNTER_MASK0_9;
- p->sample_start = cpu_to_be32(ibp->pma_sample_start);
- p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
- p->tag = cpu_to_be16(ibp->pma_tag);
- p->counter_select[0] = ibp->pma_counter_select[0];
- p->counter_select[1] = ibp->pma_counter_select[1];
- p->counter_select[2] = ibp->pma_counter_select[2];
- p->counter_select[3] = ibp->pma_counter_select[3];
- p->counter_select[4] = ibp->pma_counter_select[4];
- spin_unlock_irqrestore(&ibp->lock, flags);
-
-bail:
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status, xmit_flags;
- int ret;
-
- if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- spin_lock_irqsave(&ibp->lock, flags);
-
- /* Port Sampling code owns the PS* HW counters */
- xmit_flags = ppd->cong_stats.flags;
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- if (status == IB_PMA_SAMPLE_STATUS_DONE ||
- (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
- xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
- ibp->pma_sample_start = be32_to_cpu(p->sample_start);
- ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
- ibp->pma_tag = be16_to_cpu(p->tag);
- ibp->pma_counter_select[0] = p->counter_select[0];
- ibp->pma_counter_select[1] = p->counter_select[1];
- ibp->pma_counter_select[2] = p->counter_select[2];
- ibp->pma_counter_select[3] = p->counter_select[3];
- ibp->pma_counter_select[4] = p->counter_select[4];
- dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
- ibp->pma_sample_start);
- }
- spin_unlock_irqrestore(&ibp->lock, flags);
-
- ret = pma_get_portsamplescontrol(pmp, ibdev, port);
-
-bail:
- return ret;
-}
-
-static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
- __be16 sel)
-{
- u64 ret;
-
- switch (sel) {
- case IB_PMA_PORT_XMIT_DATA:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
- break;
- case IB_PMA_PORT_RCV_DATA:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
- break;
- case IB_PMA_PORT_XMIT_PKTS:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
- break;
- case IB_PMA_PORT_RCV_PKTS:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
- break;
- case IB_PMA_PORT_XMIT_WAIT:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
-
-/* This function assumes that the xmit_wait lock is already held */
-static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
-{
- u32 delta;
-
- delta = get_counter(&ppd->ibport_data, ppd,
- IB_PMA_PORT_XMIT_WAIT);
- return ppd->cong_stats.counter + delta;
-}
-
-static void cache_hw_sample_counters(struct qib_pportdata *ppd)
-{
- struct qib_ibport *ibp = &ppd->ibport_data;
-
- ppd->cong_stats.counter_cache.psxmitdata =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
- ppd->cong_stats.counter_cache.psrcvdata =
- get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
- ppd->cong_stats.counter_cache.psxmitpkts =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
- ppd->cong_stats.counter_cache.psrcvpkts =
- get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
- ppd->cong_stats.counter_cache.psxmitwait =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
-}
-
-static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
- __be16 sel)
-{
- u64 ret;
-
- switch (sel) {
- case IB_PMA_PORT_XMIT_DATA:
- ret = ppd->cong_stats.counter_cache.psxmitdata;
- break;
- case IB_PMA_PORT_RCV_DATA:
- ret = ppd->cong_stats.counter_cache.psrcvdata;
- break;
- case IB_PMA_PORT_XMIT_PKTS:
- ret = ppd->cong_stats.counter_cache.psxmitpkts;
- break;
- case IB_PMA_PORT_RCV_PKTS:
- ret = ppd->cong_stats.counter_cache.psrcvpkts;
- break;
- case IB_PMA_PORT_XMIT_WAIT:
- ret = ppd->cong_stats.counter_cache.psxmitwait;
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
-
-static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplesresult *p =
- (struct ib_pma_portsamplesresult *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status;
- int i;
-
- memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->lock, flags);
- p->tag = cpu_to_be16(ibp->pma_tag);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
- p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- else {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->sample_status = cpu_to_be16(status);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.counter =
- xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd,
- QIB_CONG_TIMER_PSINTERVAL, 0);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- }
- }
- for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
- p->counter[i] = cpu_to_be32(
- get_cache_hw_sample_counters(
- ppd, ibp->pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->lock, flags);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplesresult_ext *p =
- (struct ib_pma_portsamplesresult_ext *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status;
- int i;
-
- /* Port Sampling code owns the PS* HW counters */
- memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->lock, flags);
- p->tag = cpu_to_be16(ibp->pma_tag);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
- p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- else {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->sample_status = cpu_to_be16(status);
- /* 64 bits */
- p->extended_width = cpu_to_be32(0x80000000);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.counter =
- xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd,
- QIB_CONG_TIMER_PSINTERVAL, 0);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- }
- }
- for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
- p->counter[i] = cpu_to_be64(
- get_cache_hw_sample_counters(
- ppd, ibp->pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->lock, flags);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_counters cntrs;
- u8 port_select = p->port_select;
-
- qib_get_counters(ppd, &cntrs);
-
- /* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
- cntrs.link_error_recovery_counter -=
- ibp->z_link_error_recovery_counter;
- cntrs.link_downed_counter -= ibp->z_link_downed_counter;
- cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
- cntrs.port_xmit_data -= ibp->z_port_xmit_data;
- cntrs.port_rcv_data -= ibp->z_port_rcv_data;
- cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
- cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
- cntrs.local_link_integrity_errors -=
- ibp->z_local_link_integrity_errors;
- cntrs.excessive_buffer_overrun_errors -=
- ibp->z_excessive_buffer_overrun_errors;
- cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->n_vl15_dropped;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- if (cntrs.symbol_error_counter > 0xFFFFUL)
- p->symbol_error_counter = cpu_to_be16(0xFFFF);
- else
- p->symbol_error_counter =
- cpu_to_be16((u16)cntrs.symbol_error_counter);
- if (cntrs.link_error_recovery_counter > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter =
- (u8)cntrs.link_error_recovery_counter;
- if (cntrs.link_downed_counter > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter = (u8)cntrs.link_downed_counter;
- if (cntrs.port_rcv_errors > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors =
- cpu_to_be16((u16) cntrs.port_rcv_errors);
- if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors =
- cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
- if (cntrs.port_xmit_discards > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards =
- cpu_to_be16((u16)cntrs.port_xmit_discards);
- if (cntrs.local_link_integrity_errors > 0xFUL)
- cntrs.local_link_integrity_errors = 0xFUL;
- if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
- cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
- cntrs.excessive_buffer_overrun_errors;
- if (cntrs.vl15_dropped > 0xFFFFUL)
- p->vl15_dropped = cpu_to_be16(0xFFFF);
- else
- p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
- if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
- p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
- if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
- p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
- if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
- p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_packets =
- cpu_to_be32((u32)cntrs.port_xmit_packets);
- if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
- p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_packets =
- cpu_to_be32((u32) cntrs.port_rcv_packets);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- /* Congestion PMA packets start at offset 24 not 64 */
- struct ib_pma_portcounters_cong *p =
- (struct ib_pma_portcounters_cong *)pmp->reserved;
- struct qib_verbs_counters cntrs;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
- u64 xmit_wait_counter;
- unsigned long flags;
-
- /*
- * This check is performed only in the GET method because the
- * SET method ends up calling this anyway.
- */
- if (!dd->psxmitwait_supported)
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- if (port_select != port)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- qib_get_counters(ppd, &cntrs);
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
- xmit_wait_counter = xmit_wait_get_value_delta(ppd);
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
-
- /* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
- cntrs.link_error_recovery_counter -=
- ibp->z_link_error_recovery_counter;
- cntrs.link_downed_counter -= ibp->z_link_downed_counter;
- cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -=
- ibp->z_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
- cntrs.local_link_integrity_errors -=
- ibp->z_local_link_integrity_errors;
- cntrs.excessive_buffer_overrun_errors -=
- ibp->z_excessive_buffer_overrun_errors;
- cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->n_vl15_dropped;
- cntrs.port_xmit_data -= ibp->z_port_xmit_data;
- cntrs.port_rcv_data -= ibp->z_port_rcv_data;
- cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
- cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
-
- memset(pmp->reserved, 0, sizeof(pmp->reserved) +
- sizeof(pmp->data));
-
- /*
- * Set top 3 bits to indicate interval in picoseconds in
- * remaining bits.
- */
- p->port_check_rate =
- cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
- (dd->psxmitwait_check_rate &
- ~(QIB_XMIT_RATE_PICO << 13)));
- p->port_adr_events = cpu_to_be64(0);
- p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
- p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
- p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
- p->port_xmit_packets =
- cpu_to_be64(cntrs.port_xmit_packets);
- p->port_rcv_packets =
- cpu_to_be64(cntrs.port_rcv_packets);
- if (cntrs.symbol_error_counter > 0xFFFFUL)
- p->symbol_error_counter = cpu_to_be16(0xFFFF);
- else
- p->symbol_error_counter =
- cpu_to_be16(
- (u16)cntrs.symbol_error_counter);
- if (cntrs.link_error_recovery_counter > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter =
- (u8)cntrs.link_error_recovery_counter;
- if (cntrs.link_downed_counter > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter =
- (u8)cntrs.link_downed_counter;
- if (cntrs.port_rcv_errors > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors =
- cpu_to_be16((u16) cntrs.port_rcv_errors);
- if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors =
- cpu_to_be16(
- (u16)cntrs.port_rcv_remphys_errors);
- if (cntrs.port_xmit_discards > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards =
- cpu_to_be16((u16)cntrs.port_xmit_discards);
- if (cntrs.local_link_integrity_errors > 0xFUL)
- cntrs.local_link_integrity_errors = 0xFUL;
- if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
- cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
- cntrs.excessive_buffer_overrun_errors;
- if (cntrs.vl15_dropped > 0xFFFFUL)
- p->vl15_dropped = cpu_to_be16(0xFFFF);
- else
- p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
-
- return reply((struct ib_smp *)pmp);
-}
-
-static void qib_snapshot_pmacounters(
- struct qib_ibport *ibp,
- struct qib_pma_counters *pmacounters)
-{
- struct qib_pma_counters *p;
- int cpu;
-
- memset(pmacounters, 0, sizeof(*pmacounters));
- for_each_possible_cpu(cpu) {
- p = per_cpu_ptr(ibp->pmastats, cpu);
- pmacounters->n_unicast_xmit += p->n_unicast_xmit;
- pmacounters->n_unicast_rcv += p->n_unicast_rcv;
- pmacounters->n_multicast_xmit += p->n_multicast_xmit;
- pmacounters->n_multicast_rcv += p->n_multicast_rcv;
- }
-}
-
-static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters_ext *p =
- (struct ib_pma_portcounters_ext *)pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 swords, rwords, spkts, rpkts, xwait;
- struct qib_pma_counters pma;
- u8 port_select = p->port_select;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
-
- qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
-
- /* Adjust counters for any resets done. */
- swords -= ibp->z_port_xmit_data;
- rwords -= ibp->z_port_rcv_data;
- spkts -= ibp->z_port_xmit_packets;
- rpkts -= ibp->z_port_rcv_packets;
-
- p->port_xmit_data = cpu_to_be64(swords);
- p->port_rcv_data = cpu_to_be64(rwords);
- p->port_xmit_packets = cpu_to_be64(spkts);
- p->port_rcv_packets = cpu_to_be64(rpkts);
-
- qib_snapshot_pmacounters(ibp, &pma);
-
- p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
- - ibp->z_unicast_xmit);
- p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
- - ibp->z_unicast_rcv);
- p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
- - ibp->z_multicast_xmit);
- p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
- - ibp->z_multicast_rcv);
-
-bail:
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_set_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_counters cntrs;
-
- /*
- * Since the HW doesn't support clearing counters, we save the
- * current count and subtract it from future responses.
- */
- qib_get_counters(ppd, &cntrs);
-
- if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
- ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
- ibp->z_link_downed_counter = cntrs.link_downed_counter;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
- ibp->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
- ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
-
- if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
-
- if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
- ibp->n_vl15_dropped = 0;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- }
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
-
- return pma_get_portcounters(pmp, ibdev, port);
-}
-
-static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- struct qib_verbs_counters cntrs;
- u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
- int ret = 0;
- unsigned long flags;
-
- qib_get_counters(ppd, &cntrs);
- /* Get counter values before we save them */
- ret = pma_get_portcounters_cong(pmp, ibdev, port);
-
- if (counter_select & IB_PMA_SEL_CONG_XMIT) {
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
- ppd->cong_stats.counter = 0;
- dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
- 0x0);
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
- }
- if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
- }
- if (counter_select & IB_PMA_SEL_CONG_ALL) {
- ibp->z_symbol_error_counter =
- cntrs.symbol_error_counter;
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- ibp->z_link_downed_counter =
- cntrs.link_downed_counter;
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
- ibp->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
- ibp->z_port_xmit_discards =
- cntrs.port_xmit_discards;
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
- ibp->n_vl15_dropped = 0;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- }
-
- return ret;
-}
-
-static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 swords, rwords, spkts, rpkts, xwait;
- struct qib_pma_counters pma;
-
- qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
- ibp->z_port_xmit_data = swords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
- ibp->z_port_rcv_data = rwords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
- ibp->z_port_xmit_packets = spkts;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
- ibp->z_port_rcv_packets = rpkts;
-
- qib_snapshot_pmacounters(ibp, &pma);
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
- ibp->z_unicast_xmit = pma.n_unicast_xmit;
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
- ibp->z_unicast_rcv = pma.n_unicast_rcv;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
- ibp->z_multicast_xmit = pma.n_multicast_xmit;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
- ibp->z_multicast_rcv = pma.n_multicast_rcv;
-
- return pma_get_portcounters_ext(pmp, ibdev, port);
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_smp *smp = (struct ib_smp *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int ret;
-
- *out_mad = *in_mad;
- if (smp->class_version != 1) {
- smp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply(smp);
- goto bail;
- }
-
- ret = check_mkey(ibp, smp, mad_flags);
- if (ret) {
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- /*
- * If this is a get/set portinfo, we already check the
- * M_Key if the MAD is for another port and the M_Key
- * is OK on the receiving port. This check is needed
- * to increment the error counters when the M_Key
- * fails to match on *both* ports.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET) &&
- port_num && port_num <= ibdev->phys_port_cnt &&
- port != port_num)
- (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
- ret = IB_MAD_RESULT_FAILURE;
- goto bail;
- }
-
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_NODE_DESC:
- ret = subn_get_nodedescription(smp, ibdev);
- goto bail;
- case IB_SMP_ATTR_NODE_INFO:
- ret = subn_get_nodeinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_GUID_INFO:
- ret = subn_get_guidinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = subn_get_portinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = subn_get_pkeytable(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SL_TO_VL_TABLE:
- ret = subn_get_sl_to_vl(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = subn_get_vl_arb(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (ibp->port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- /* FALLTHROUGH */
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_GUID_INFO:
- ret = subn_set_guidinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = subn_set_portinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = subn_set_pkeytable(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SL_TO_VL_TABLE:
- ret = subn_set_sl_to_vl(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = subn_set_vl_arb(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (ibp->port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- /* FALLTHROUGH */
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (smp->attr_id == IB_SMP_ATTR_NOTICE)
- ret = subn_trap_repress(smp, ibdev, port);
- else {
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- }
- goto bail;
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_REPORT:
- case IB_MGMT_METHOD_REPORT_RESP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- case IB_MGMT_METHOD_SEND:
- if (ib_get_smp_direction(smp) &&
- smp->attr_id == QIB_VENDOR_IPG) {
- ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
- smp->data[0]);
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- } else
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- default:
- smp->status |= IB_SMP_UNSUP_METHOD;
- ret = reply(smp);
- }
-
-bail:
- return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port,
- const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
- int ret;
-
- *out_mad = *in_mad;
- if (pmp->mad_hdr.class_version != 1) {
- pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- switch (pmp->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_CLASS_PORT_INFO:
- ret = pma_get_classportinfo(pmp, ibdev);
- goto bail;
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = pma_get_portsamplescontrol(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT:
- ret = pma_get_portsamplesresult(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT_EXT:
- ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = pma_get_portcounters(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = pma_get_portcounters_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_CONG:
- ret = pma_get_portcounters_cong(pmp, ibdev, port);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = pma_set_portsamplescontrol(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = pma_set_portcounters(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = pma_set_portcounters_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_CONG:
- ret = pma_set_portcounters_cong(pmp, ibdev, port);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_smp *) pmp);
- }
-
-bail:
- return ret;
-}
-
-static int cc_get_classportinfo(struct ib_cc_mad *ccp,
- struct ib_device *ibdev)
-{
- struct ib_cc_classportinfo_attr *p =
- (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
-
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
- p->base_version = 1;
- p->class_version = 1;
- p->cap_mask = 0;
-
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
- */
- p->resp_time_value = 18;
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_info(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_info_attr *p =
- (struct ib_cc_info_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
- p->congestion_info = 0;
- p->control_table_cap = ppd->cc_max_table_entries;
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- int i;
- struct ib_cc_congestion_setting_attr *p =
- (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct ib_cc_congestion_entry_shadow *entries;
-
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
- spin_lock(&ppd->cc_shadow_lock);
-
- entries = ppd->congestion_entries_shadow->entries;
- p->port_control = cpu_to_be16(
- ppd->congestion_entries_shadow->port_control);
- p->control_map = cpu_to_be16(
- ppd->congestion_entries_shadow->control_map);
- for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
- p->entries[i].ccti_increase = entries[i].ccti_increase;
- p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
- p->entries[i].trigger_threshold = entries[i].trigger_threshold;
- p->entries[i].ccti_min = entries[i].ccti_min;
- }
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_table_attr *p =
- (struct ib_cc_table_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
- u32 max_cct_block;
- u32 cct_entry;
- struct ib_cc_table_entry_shadow *entries;
- int i;
-
- /* Is the table index more than what is supported? */
- if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
- goto bail;
-
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
- spin_lock(&ppd->cc_shadow_lock);
-
- max_cct_block =
- (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
- max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
-
- if (cct_block_index > max_cct_block) {
- spin_unlock(&ppd->cc_shadow_lock);
- goto bail;
- }
-
- ccp->attr_mod = cpu_to_be32(cct_block_index);
-
- cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
-
- cct_entry--;
-
- p->ccti_limit = cpu_to_be16(cct_entry);
-
- entries = &ppd->ccti_entries_shadow->
- entries[IB_CCT_ENTRIES * cct_block_index];
- cct_entry %= IB_CCT_ENTRIES;
-
- for (i = 0; i <= cct_entry; i++)
- p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-
-bail:
- return reply_failure((struct ib_smp *) ccp);
-}
-
-static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_congestion_setting_attr *p =
- (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int i;
-
- ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
-
- for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
- ppd->congestion_entries[i].ccti_increase =
- p->entries[i].ccti_increase;
-
- ppd->congestion_entries[i].ccti_timer =
- be16_to_cpu(p->entries[i].ccti_timer);
-
- ppd->congestion_entries[i].trigger_threshold =
- p->entries[i].trigger_threshold;
-
- ppd->congestion_entries[i].ccti_min =
- p->entries[i].ccti_min;
- }
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_table_attr *p =
- (struct ib_cc_table_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
- u32 cct_entry;
- struct ib_cc_table_entry_shadow *entries;
- int i;
-
- /* Is the table index more than what is supported? */
- if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
- goto bail;
-
- /* If this packet is the first in the sequence then
- * zero the total table entry count.
- */
- if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
- ppd->total_cct_entry = 0;
-
- cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
-
- /* ccti_limit is 0 to 63 */
- ppd->total_cct_entry += (cct_entry + 1);
-
- if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
- goto bail;
-
- ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
-
- entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
-
- for (i = 0; i <= cct_entry; i++)
- entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
-
- spin_lock(&ppd->cc_shadow_lock);
-
- ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
- memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
- (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
-
- ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
- ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
- memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
- IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-
-bail:
- return reply_failure((struct ib_smp *) ccp);
-}
-
-static int check_cc_key(struct qib_ibport *ibp,
- struct ib_cc_mad *ccp, int mad_flags)
-{
- return 0;
-}
-
-static int process_cc(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- int ret;
-
- *out_mad = *in_mad;
-
- if (ccp->class_version != 2) {
- ccp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_smp *)ccp);
- goto bail;
- }
-
- ret = check_cc_key(ibp, ccp, mad_flags);
- if (ret)
- goto bail;
-
- switch (ccp->method) {
- case IB_MGMT_METHOD_GET:
- switch (ccp->attr_id) {
- case IB_CC_ATTR_CLASSPORTINFO:
- ret = cc_get_classportinfo(ccp, ibdev);
- goto bail;
-
- case IB_CC_ATTR_CONGESTION_INFO:
- ret = cc_get_congestion_info(ccp, ibdev, port);
- goto bail;
-
- case IB_CC_ATTR_CA_CONGESTION_SETTING:
- ret = cc_get_congestion_setting(ccp, ibdev, port);
- goto bail;
-
- case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- ret = cc_get_congestion_control_table(ccp, ibdev, port);
- goto bail;
-
- /* FALLTHROUGH */
- default:
- ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) ccp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (ccp->attr_id) {
- case IB_CC_ATTR_CA_CONGESTION_SETTING:
- ret = cc_set_congestion_setting(ccp, ibdev, port);
- goto bail;
-
- case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- ret = cc_set_congestion_control_table(ccp, ibdev, port);
- goto bail;
-
- /* FALLTHROUGH */
- default:
- ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) ccp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- case IB_MGMT_METHOD_TRAP:
- default:
- ccp->status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_smp *) ccp);
- }
-
-bail:
- return ret;
-}
-
-/**
- * qib_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- int ret;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
- goto bail;
-
- case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in_mad, out_mad);
- goto bail;
-
- case IB_MGMT_CLASS_CONG_MGMT:
- if (!ppd->congestion_entries_shadow ||
- !qib_cc_table_size) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
- goto bail;
-
- default:
- ret = IB_MAD_RESULT_SUCCESS;
- }
-
-bail:
- return ret;
-}
-
-static void send_handler(struct ib_mad_agent *agent,
- struct ib_mad_send_wc *mad_send_wc)
-{
- ib_free_send_mad(mad_send_wc->send_buf);
-}
-
-static void xmit_wait_timer_func(unsigned long opaque)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
- struct qib_devdata *dd = dd_from_ppd(ppd);
- unsigned long flags;
- u8 status;
-
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- /* save counter cache */
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- } else
- goto done;
- }
- ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
-done:
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
- mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
-}
-
-int qib_create_agents(struct qib_ibdev *dev)
-{
- struct qib_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct qib_ibport *ibp;
- int p;
- int ret;
-
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
- NULL, 0, send_handler,
- NULL, NULL, 0);
- if (IS_ERR(agent)) {
- ret = PTR_ERR(agent);
- goto err;
- }
-
- /* Initialize xmit_wait structure */
- dd->pport[p].cong_stats.counter = 0;
- init_timer(&dd->pport[p].cong_stats.timer);
- dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
- dd->pport[p].cong_stats.timer.data =
- (unsigned long)(&dd->pport[p]);
- dd->pport[p].cong_stats.timer.expires = 0;
- add_timer(&dd->pport[p].cong_stats.timer);
-
- ibp->send_agent = agent;
- }
-
- return 0;
-
-err:
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- }
-
- return ret;
-}
-
-void qib_free_agents(struct qib_ibdev *dev)
-{
- struct qib_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct qib_ibport *ibp;
- int p;
-
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- if (ibp->sm_ah) {
- ib_destroy_ah(&ibp->sm_ah->ibah);
- ibp->sm_ah = NULL;
- }
- if (dd->pport[p].cong_stats.timer.data)
- del_timer_sync(&dd->pport[p].cong_stats.timer);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
deleted file mode 100644
index 57e99dc0d80c..000000000000
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _QIB_MAD_H
-#define _QIB_MAD_H
-
-#include <rdma/ib_pma.h>
-
-#define IB_SMP_UNSUP_VERSION \
-cpu_to_be16(IB_MGMT_MAD_STATUS_BAD_VERSION)
-
-#define IB_SMP_UNSUP_METHOD \
-cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD)
-
-#define IB_SMP_UNSUP_METH_ATTR \
-cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
-
-#define IB_SMP_INVALID_FIELD \
-cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
-
-#define IB_VLARB_LOWPRI_0_31 1
-#define IB_VLARB_LOWPRI_32_63 2
-#define IB_VLARB_HIGHPRI_0_31 3
-#define IB_VLARB_HIGHPRI_32_63 4
-
-#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
-
-struct ib_pma_portcounters_cong {
- u8 reserved;
- u8 reserved1;
- __be16 port_check_rate;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved2;
- u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
- __be16 reserved3;
- __be16 vl15_dropped;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_xmit_wait;
- __be64 port_adr_events;
-} __packed;
-
-#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
-#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
-
-#define QIB_XMIT_RATE_UNSUPPORTED 0x0
-#define QIB_XMIT_RATE_PICO 0x7
-/* number of 4nsec cycles equaling 2secs */
-#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
-
-#define IB_PMA_SEL_CONG_ALL 0x01
-#define IB_PMA_SEL_CONG_PORT_DATA 0x02
-#define IB_PMA_SEL_CONG_XMIT 0x04
-#define IB_PMA_SEL_CONG_ROUTING 0x08
-
-/*
- * Congestion control class attributes
- */
-#define IB_CC_ATTR_CLASSPORTINFO cpu_to_be16(0x0001)
-#define IB_CC_ATTR_NOTICE cpu_to_be16(0x0002)
-#define IB_CC_ATTR_CONGESTION_INFO cpu_to_be16(0x0011)
-#define IB_CC_ATTR_CONGESTION_KEY_INFO cpu_to_be16(0x0012)
-#define IB_CC_ATTR_CONGESTION_LOG cpu_to_be16(0x0013)
-#define IB_CC_ATTR_SWITCH_CONGESTION_SETTING cpu_to_be16(0x0014)
-#define IB_CC_ATTR_SWITCH_PORT_CONGESTION_SETTING cpu_to_be16(0x0015)
-#define IB_CC_ATTR_CA_CONGESTION_SETTING cpu_to_be16(0x0016)
-#define IB_CC_ATTR_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0017)
-#define IB_CC_ATTR_TIME_STAMP cpu_to_be16(0x0018)
-
-/* generalizations for threshold values */
-#define IB_CC_THRESHOLD_NONE 0x0
-#define IB_CC_THRESHOLD_MIN 0x1
-#define IB_CC_THRESHOLD_MAX 0xf
-
-/* CCA MAD header constants */
-#define IB_CC_MAD_LOGDATA_LEN 32
-#define IB_CC_MAD_MGMTDATA_LEN 192
-
-struct ib_cc_mad {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 class_specific;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- __be64 cckey;
-
- /* For CongestionLog attribute only */
- u8 log_data[IB_CC_MAD_LOGDATA_LEN];
-
- u8 mgmt_data[IB_CC_MAD_MGMTDATA_LEN];
-} __packed;
-
-/*
- * Congestion Control class portinfo capability mask bits
- */
-#define IB_CC_CPI_CM_TRAP_GEN cpu_to_be16(1 << 0)
-#define IB_CC_CPI_CM_GET_SET_NOTICE cpu_to_be16(1 << 1)
-#define IB_CC_CPI_CM_CAP2 cpu_to_be16(1 << 2)
-#define IB_CC_CPI_CM_ENHANCEDPORT0_CC cpu_to_be16(1 << 8)
-
-struct ib_cc_classportinfo_attr {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __packed;
-
-/* Congestion control traps */
-#define IB_CC_TRAP_KEY_VIOLATION 0x0000
-
-struct ib_cc_trap_key_violation_attr {
- __be16 source_lid;
- u8 method;
- u8 reserved1;
- __be16 attrib_id;
- __be32 attrib_mod;
- __be32 qp;
- __be64 cckey;
- u8 sgid[16];
- u8 padding[24];
-} __packed;
-
-/* Congestion info flags */
-#define IB_CC_CI_FLAGS_CREDIT_STARVATION 0x1
-#define IB_CC_TABLE_CAP_DEFAULT 31
-
-struct ib_cc_info_attr {
- __be16 congestion_info;
- u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
-} __packed;
-
-struct ib_cc_key_info_attr {
- __be64 cckey;
- u8 protect;
- __be16 lease_period;
- __be16 violations;
-} __packed;
-
-#define IB_CC_CL_CA_LOGEVENTS_LEN 208
-
-struct ib_cc_log_attr {
- u8 log_type;
- u8 congestion_flags;
- __be16 threshold_event_counter;
- __be16 threshold_congestion_event_map;
- __be16 current_time_stamp;
- u8 log_events[IB_CC_CL_CA_LOGEVENTS_LEN];
-} __packed;
-
-#define IB_CC_CLEC_SERVICETYPE_RC 0x0
-#define IB_CC_CLEC_SERVICETYPE_UC 0x1
-#define IB_CC_CLEC_SERVICETYPE_RD 0x2
-#define IB_CC_CLEC_SERVICETYPE_UD 0x3
-
-struct ib_cc_log_event {
- u8 local_qp_cn_entry;
- u8 remote_qp_number_cn_entry[3];
- u8 sl_cn_entry:4;
- u8 service_type_cn_entry:4;
- __be32 remote_lid_cn_entry;
- __be32 timestamp_cn_entry;
-} __packed;
-
-/* Sixteen congestion entries */
-#define IB_CC_CCS_ENTRIES 16
-
-/* Port control flags */
-#define IB_CC_CCS_PC_SL_BASED 0x01
-
-struct ib_cc_congestion_entry {
- u8 ccti_increase;
- __be16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct ib_cc_congestion_entry_shadow {
- u8 ccti_increase;
- u16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct ib_cc_congestion_setting_attr {
- __be16 port_control;
- __be16 control_map;
- struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES];
-} __packed;
-
-struct ib_cc_congestion_setting_attr_shadow {
- u16 port_control;
- u16 control_map;
- struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES];
-} __packed;
-
-#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
-#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
-
-/* 64 Congestion Control table entries in a single MAD */
-#define IB_CCT_ENTRIES 64
-#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
-
-struct ib_cc_table_entry {
- __be16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_entry_shadow {
- u16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_attr {
- __be16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-struct ib_cc_table_attr_shadow {
- u16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-#define CC_TABLE_SHADOW_MAX \
- (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
-
-struct cc_table_shadow {
- u16 ccti_last_entry;
- struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
-} __packed;
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 \
- cpu_to_be32(COUNTER_MASK(1, 0) | \
- COUNTER_MASK(1, 1) | \
- COUNTER_MASK(1, 2) | \
- COUNTER_MASK(1, 3) | \
- COUNTER_MASK(1, 4))
-
-#endif /* _QIB_MAD_H */
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
deleted file mode 100644
index 34927b700b0e..000000000000
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <asm/pgtable.h>
-
-#include "qib_verbs.h"
-
-/**
- * qib_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct qib_mmap_info
- */
-void qib_release_mmap_info(struct kref *ref)
-{
- struct qib_mmap_info *ip =
- container_of(ref, struct qib_mmap_info, ref);
- struct qib_ibdev *dev = to_idev(ip->context->device);
-
- spin_lock_irq(&dev->pending_lock);
- list_del(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- vfree(ip->obj);
- kfree(ip);
-}
-
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void qib_vma_open(struct vm_area_struct *vma)
-{
- struct qib_mmap_info *ip = vma->vm_private_data;
-
- kref_get(&ip->ref);
-}
-
-static void qib_vma_close(struct vm_area_struct *vma)
-{
- struct qib_mmap_info *ip = vma->vm_private_data;
-
- kref_put(&ip->ref, qib_release_mmap_info);
-}
-
-static const struct vm_operations_struct qib_vm_ops = {
- .open = qib_vma_open,
- .close = qib_vma_close,
-};
-
-/**
- * qib_mmap - create a new mmap region
- * @context: the IB user context of the process making the mmap() call
- * @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
- */
-int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- struct qib_ibdev *dev = to_idev(context->device);
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct qib_mmap_info *ip, *pp;
- int ret = -EINVAL;
-
- /*
- * Search the device's list of objects waiting for a mmap call.
- * Normally, this list is very short since a call to create a
- * CQ, QP, or SRQ is soon followed by a call to mmap().
- */
- spin_lock_irq(&dev->pending_lock);
- list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
- pending_mmaps) {
- /* Only the creator is allowed to mmap the object */
- if (context != ip->context || (__u64) offset != ip->offset)
- continue;
- /* Don't allow a mmap larger than the object. */
- if (size > ip->size)
- break;
-
- list_del_init(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- ret = remap_vmalloc_range(vma, ip->obj, 0);
- if (ret)
- goto done;
- vma->vm_ops = &qib_vm_ops;
- vma->vm_private_data = ip;
- qib_vma_open(vma);
- goto done;
- }
- spin_unlock_irq(&dev->pending_lock);
-done:
- return ret;
-}
-
-/*
- * Allocate information for qib_mmap
- */
-struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj) {
- struct qib_mmap_info *ip;
-
- ip = kmalloc(sizeof(*ip), GFP_KERNEL);
- if (!ip)
- goto bail;
-
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- INIT_LIST_HEAD(&ip->pending_mmaps);
- ip->size = size;
- ip->context = context;
- ip->obj = obj;
- kref_init(&ip->ref);
-
-bail:
- return ip;
-}
-
-void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
- u32 size, void *obj)
-{
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- ip->size = size;
- ip->obj = obj;
-}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
deleted file mode 100644
index 19220dcb9a3b..000000000000
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-
-/* Fast memory region */
-struct qib_fmr {
- struct ib_fmr ibfmr;
- struct qib_mregion mr; /* must be last */
-};
-
-static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct qib_fmr, ibfmr);
-}
-
-static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
- int count)
-{
- int m, i = 0;
- int rval = 0;
-
- m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
- for (; i < m; i++) {
- mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
- if (!mr->map[i])
- goto bail;
- }
- mr->mapsz = m;
- init_completion(&mr->comp);
- /* count returning the ptr to user */
- atomic_set(&mr->refcount, 1);
- mr->pd = pd;
- mr->max_segs = count;
-out:
- return rval;
-bail:
- while (i)
- kfree(mr->map[--i]);
- rval = -ENOMEM;
- goto out;
-}
-
-static void deinit_qib_mregion(struct qib_mregion *mr)
-{
- int i = mr->mapsz;
-
- mr->mapsz = 0;
- while (i)
- kfree(mr->map[--i]);
-}
-
-
-/**
- * qib_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see qib_dma.c).
- */
-struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct qib_mr *mr = NULL;
- struct ib_mr *ret;
- int rval;
-
- if (to_ipd(pd)->user) {
- ret = ERR_PTR(-EPERM);
- goto bail;
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- rval = init_qib_mregion(&mr->mr, pd, 0);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail;
- }
-
-
- rval = qib_alloc_lkey(&mr->mr, 1);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail_mregion;
- }
-
- mr->mr.access_flags = acc;
- ret = &mr->ibmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_qib_mregion(&mr->mr);
-bail:
- kfree(mr);
- goto done;
-}
-
-static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
-{
- struct qib_mr *mr;
- int rval = -ENOMEM;
- int m;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
- mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
- if (!mr)
- goto bail;
-
- rval = init_qib_mregion(&mr->mr, pd, count);
- if (rval)
- goto bail;
- /*
- * ib_reg_phys_mr() will initialize mr->ibmr except for
- * lkey and rkey.
- */
- rval = qib_alloc_lkey(&mr->mr, 0);
- if (rval)
- goto bail_mregion;
- mr->ibmr.lkey = mr->mr.lkey;
- mr->ibmr.rkey = mr->mr.lkey;
-done:
- return mr;
-
-bail_mregion:
- deinit_qib_mregion(&mr->mr);
-bail:
- kfree(mr);
- mr = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * qib_reg_phys_mr - register a physical memory region
- * @pd: protection domain for this memory region
- * @buffer_list: pointer to the list of physical buffers to register
- * @num_phys_buf: the number of physical buffers to register
- * @iova_start: the starting address passed over IB which maps to this MR
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start)
-{
- struct qib_mr *mr;
- int n, m, i;
- struct ib_mr *ret;
-
- mr = alloc_mr(num_phys_buf, pd);
- if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
- goto bail;
- }
-
- mr->mr.user_base = *iova_start;
- mr->mr.iova = *iova_start;
- mr->mr.access_flags = acc;
-
- m = 0;
- n = 0;
- for (i = 0; i < num_phys_buf; i++) {
- mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
- mr->mr.map[m]->segs[n].length = buffer_list[i].size;
- mr->mr.length += buffer_list[i].size;
- n++;
- if (n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
-
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
- * qib_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the QLogic_IB driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
-{
- struct qib_mr *mr;
- struct ib_umem *umem;
- struct scatterlist *sg;
- int n, m, entry;
- struct ib_mr *ret;
-
- if (length == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
- if (IS_ERR(umem))
- return (void *) umem;
-
- n = umem->nmap;
-
- mr = alloc_mr(n, pd);
- if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
- ib_umem_release(umem);
- goto bail;
- }
-
- mr->mr.user_base = start;
- mr->mr.iova = virt_addr;
- mr->mr.length = length;
- mr->mr.offset = ib_umem_offset(umem);
- mr->mr.access_flags = mr_access_flags;
- mr->umem = umem;
-
- if (is_power_of_2(umem->page_size))
- mr->mr.page_shift = ilog2(umem->page_size);
- m = 0;
- n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- void *vaddr;
-
- vaddr = page_address(sg_page(sg));
- if (!vaddr) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = umem->page_size;
- n++;
- if (n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
- * qib_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by qib_get_dma_mr()
- * or qib_reg_user_mr().
- */
-int qib_dereg_mr(struct ib_mr *ibmr)
-{
- struct qib_mr *mr = to_imr(ibmr);
- int ret = 0;
- unsigned long timeout;
-
- qib_free_lkey(&mr->mr);
-
- qib_put_mr(&mr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&mr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- qib_get_mr(&mr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_qib_mregion(&mr->mr);
- if (mr->umem)
- ib_umem_release(mr->umem);
- kfree(mr);
-out:
- return ret;
-}
-
-/*
- * Allocate a memory region usable with the
- * IB_WR_FAST_REG_MR send work request.
- *
- * Return the memory region on success, otherwise return an errno.
- */
-struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
- struct qib_mr *mr;
-
- if (mr_type != IB_MR_TYPE_MEM_REG)
- return ERR_PTR(-EINVAL);
-
- mr = alloc_mr(max_num_sg, pd);
- if (IS_ERR(mr))
- return (struct ib_mr *)mr;
-
- return &mr->ibmr;
-}
-
-struct ib_fast_reg_page_list *
-qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
-{
- unsigned size = page_list_len * sizeof(u64);
- struct ib_fast_reg_page_list *pl;
-
- if (size > PAGE_SIZE)
- return ERR_PTR(-EINVAL);
-
- pl = kzalloc(sizeof(*pl), GFP_KERNEL);
- if (!pl)
- return ERR_PTR(-ENOMEM);
-
- pl->page_list = kzalloc(size, GFP_KERNEL);
- if (!pl->page_list)
- goto err_free;
-
- return pl;
-
-err_free:
- kfree(pl);
- return ERR_PTR(-ENOMEM);
-}
-
-void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
-{
- kfree(pl->page_list);
- kfree(pl);
-}
-
-/**
- * qib_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct qib_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
- fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = qib_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_qib_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * qib_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct qib_fmr *fmr = to_ifmr(ibfmr);
- struct qib_lkey_table *rkt;
- unsigned long flags;
- int m, n, i;
- u32 ps;
- int ret;
-
- i = atomic_read(&fmr->mr.refcount);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs) {
- ret = -EINVAL;
- goto bail;
- }
- rkt = &to_idev(ibfmr->device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- if (++n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int qib_unmap_fmr(struct list_head *fmr_list)
-{
- struct qib_fmr *fmr;
- struct qib_lkey_table *rkt;
- unsigned long flags;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rkt = &to_idev(fmr->ibfmr.device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * qib_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int qib_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct qib_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
- unsigned long timeout;
-
- qib_free_lkey(&fmr->mr);
- qib_put_mr(&fmr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&fmr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- qib_get_mr(&fmr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_qib_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
-
-void mr_rcu_callback(struct rcu_head *list)
-{
- struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
-
- complete(&mr->comp);
-}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
deleted file mode 100644
index 4758a3801ae8..000000000000
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ /dev/null
@@ -1,719 +0,0 @@
-/*
- * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/aer.h>
-#include <linux/module.h>
-
-#include "qib.h"
-
-/*
- * This file contains PCIe utility routines that are common to the
- * various QLogic InfiniPath adapters
- */
-
-/*
- * Code to adjust PCIe capabilities.
- * To minimize the change footprint, we call it
- * from qib_pcie_params, which every chip-specific
- * file calls, even though this violates some
- * expectations of harmlessness.
- */
-static void qib_tune_pcie_caps(struct qib_devdata *);
-static void qib_tune_pcie_coalesce(struct qib_devdata *);
-
-/*
- * Do all the common PCIe setup and initialization.
- * devdata is not yet allocated, and is not allocated until after this
- * routine returns success. Therefore qib_dev_err() can't be used for error
- * printing.
- */
-int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- /*
- * This can happen (in theory) iff:
- * We did a chip reset, and then failed to reprogram the
- * BAR, or the chip reset due to an internal error. We then
- * unloaded the driver and reloaded it.
- *
- * Both reset cases set the BAR back to initial state. For
- * the latter case, the AER sticky error bit at offset 0x718
- * should be set, but the Linux kernel doesn't yet know
- * about that, it appears. If the original BAR was retained
- * in the kernel data structures, this may be OK.
- */
- qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
- -ret);
- goto done;
- }
-
- ret = pci_request_regions(pdev, QIB_DRV_NAME);
- if (ret) {
- qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
- goto bail;
- }
-
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- /*
- * If the 64 bit setup fails, try 32 bit. Some systems
- * do not setup 64 bit maps on systems with 2GB or less
- * memory installed.
- */
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
- goto bail;
- }
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- qib_early_err(&pdev->dev,
- "Unable to set DMA consistent mask: %d\n", ret);
- goto bail;
- }
-
- pci_set_master(pdev);
- ret = pci_enable_pcie_error_reporting(pdev);
- if (ret) {
- qib_early_err(&pdev->dev,
- "Unable to enable pcie error reporting: %d\n",
- ret);
- ret = 0;
- }
- goto done;
-
-bail:
- pci_disable_device(pdev);
- pci_release_regions(pdev);
-done:
- return ret;
-}
-
-/*
- * Do remaining PCIe setup, once dd is allocated, and save away
- * fields required to re-initialize after a chip reset, or for
- * various other purposes
- */
-int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- unsigned long len;
- resource_size_t addr;
-
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
- addr = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
-
-#if defined(__powerpc__)
- /* There isn't a generic way to specify writethrough mappings */
- dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
-#else
- dd->kregbase = ioremap_nocache(addr, len);
-#endif
-
- if (!dd->kregbase)
- return -ENOMEM;
-
- dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
- dd->physaddr = addr; /* used for io_remap, etc. */
-
- /*
- * Save BARs to rewrite after device reset. Save all 64 bits of
- * BAR, just in case.
- */
- dd->pcibar0 = addr;
- dd->pcibar1 = addr >> 32;
- dd->deviceid = ent->device; /* save for later use */
- dd->vendorid = ent->vendor;
-
- return 0;
-}
-
-/*
- * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
- * to releasing the dd memory.
- * void because none of the core pcie cleanup returns are void
- */
-void qib_pcie_ddcleanup(struct qib_devdata *dd)
-{
- u64 __iomem *base = (void __iomem *) dd->kregbase;
-
- dd->kregbase = NULL;
- iounmap(base);
- if (dd->piobase)
- iounmap(dd->piobase);
- if (dd->userbase)
- iounmap(dd->userbase);
- if (dd->piovl15base)
- iounmap(dd->piovl15base);
-
- pci_disable_device(dd->pcidev);
- pci_release_regions(dd->pcidev);
-
- pci_set_drvdata(dd->pcidev, NULL);
-}
-
-static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
- struct qib_msix_entry *qib_msix_entry)
-{
- int ret;
- int nvec = *msixcnt;
- struct msix_entry *msix_entry;
- int i;
-
- ret = pci_msix_vec_count(dd->pcidev);
- if (ret < 0)
- goto do_intx;
-
- nvec = min(nvec, ret);
-
- /* We can't pass qib_msix_entry array to qib_msix_setup
- * so use a dummy msix_entry array and copy the allocated
- * irq back to the qib_msix_entry array. */
- msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
- if (!msix_entry)
- goto do_intx;
-
- for (i = 0; i < nvec; i++)
- msix_entry[i] = qib_msix_entry[i].msix;
-
- ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
- if (ret < 0)
- goto free_msix_entry;
- else
- nvec = ret;
-
- for (i = 0; i < nvec; i++)
- qib_msix_entry[i].msix = msix_entry[i];
-
- kfree(msix_entry);
- *msixcnt = nvec;
- return;
-
-free_msix_entry:
- kfree(msix_entry);
-
-do_intx:
- qib_dev_err(
- dd,
- "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
- nvec, ret);
- *msixcnt = 0;
- qib_enable_intx(dd->pcidev);
-}
-
-/**
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly.
- */
-static int qib_msi_setup(struct qib_devdata *dd, int pos)
-{
- struct pci_dev *pdev = dd->pcidev;
- u16 control;
- int ret;
-
- ret = pci_enable_msi(pdev);
- if (ret)
- qib_dev_err(dd,
- "pci_enable_msi failed: %d, interrupts may not work\n",
- ret);
- /* continue even if it fails, we may still be OK... */
-
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
- &dd->msi_lo);
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
- &dd->msi_hi);
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
- /* now save the data (vector) info */
- pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? 12 : 8),
- &dd->msi_data);
- return ret;
-}
-
-int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
- struct qib_msix_entry *entry)
-{
- u16 linkstat, speed;
- int pos = 0, ret = 1;
-
- if (!pci_is_pcie(dd->pcidev)) {
- qib_dev_err(dd, "Can't find PCI Express capability!\n");
- /* set up something... */
- dd->lbus_width = 1;
- dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- goto bail;
- }
-
- pos = dd->pcidev->msix_cap;
- if (nent && *nent && pos) {
- qib_msix_setup(dd, pos, nent, entry);
- ret = 0; /* did it, either MSIx or INTx */
- } else {
- pos = dd->pcidev->msi_cap;
- if (pos)
- ret = qib_msi_setup(dd, pos);
- else
- qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
- }
- if (!pos)
- qib_enable_intx(dd->pcidev);
-
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
- /*
- * speed is bits 0-3, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->lbus_width = linkstat;
-
- switch (speed) {
- case 1:
- dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->lbus_speed = 2500;
- break;
- }
-
- /*
- * Check against expected pcie width and complain if "wrong"
- * on first initialization, not afterwards (i.e., reset).
- */
- if (minw && linkstat < minw)
- qib_dev_err(dd,
- "PCIe width %u (x%u HCA), performance reduced\n",
- linkstat, minw);
-
- qib_tune_pcie_caps(dd);
-
- qib_tune_pcie_coalesce(dd);
-
-bail:
- /* fill in string, even on errors */
- snprintf(dd->lbus_info, sizeof(dd->lbus_info),
- "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
- return ret;
-}
-
-/*
- * Setup pcie interrupt stuff again after a reset. I'd like to just call
- * pci_enable_msi() again for msi, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- */
-int qib_reinit_intr(struct qib_devdata *dd)
-{
- int pos;
- u16 control;
- int ret = 0;
-
- /* If we aren't using MSI, don't restore it */
- if (!dd->msi_lo)
- goto bail;
-
- pos = dd->pcidev->msi_cap;
- if (!pos) {
- qib_dev_err(dd,
- "Can't find MSI capability, can't restore MSI settings\n");
- ret = 0;
- /* nothing special for MSIx, just MSI */
- goto bail;
- }
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->msi_lo);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->msi_data);
- ret = 1;
-bail:
- if (!ret && (dd->flags & QIB_HAS_INTX)) {
- qib_enable_intx(dd->pcidev);
- ret = 1;
- }
-
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
-
- return ret;
-}
-
-/*
- * Disable msi interrupt if enabled, and clear msi_lo.
- * This is used primarily for the fallback to INTx, but
- * is also used in reinit after reset, and during cleanup.
- */
-void qib_nomsi(struct qib_devdata *dd)
-{
- dd->msi_lo = 0;
- pci_disable_msi(dd->pcidev);
-}
-
-/*
- * Same as qib_nosmi, but for MSIx.
- */
-void qib_nomsix(struct qib_devdata *dd)
-{
- pci_disable_msix(dd->pcidev);
-}
-
-/*
- * Similar to pci_intx(pdev, 1), except that we make sure
- * msi(x) is off.
- */
-void qib_enable_intx(struct pci_dev *pdev)
-{
- u16 cw, new;
- int pos;
-
- /* first, turn on INTx */
- pci_read_config_word(pdev, PCI_COMMAND, &cw);
- new = cw & ~PCI_COMMAND_INTX_DISABLE;
- if (new != cw)
- pci_write_config_word(pdev, PCI_COMMAND, new);
-
- pos = pdev->msi_cap;
- if (pos) {
- /* then turn off MSI */
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- new = cw & ~PCI_MSI_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
- }
- pos = pdev->msix_cap;
- if (pos) {
- /* then turn off MSIx */
- pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
- new = cw & ~PCI_MSIX_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
- }
-}
-
-/*
- * These two routines are helper routines for the device reset code
- * to move all the pcie code out of the chip-specific driver code.
- */
-void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
-{
- pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
- pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
- pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
-}
-
-void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
-{
- int r;
-
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->pcibar0);
- if (r)
- qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->pcibar1);
- if (r)
- qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
- /* now re-enable memory access, and restore cosmetic settings */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
- pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
- r = pci_enable_device(dd->pcidev);
- if (r)
- qib_dev_err(dd,
- "pci_enable_device failed after reset: %d\n", r);
-}
-
-
-static int qib_pcie_coalesce;
-module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
-
-/*
- * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
- * chipsets. This is known to be unsafe for some revisions of some
- * of these chipsets, with some BIOS settings, and enabling it on those
- * systems may result in the system crashing, and/or data corruption.
- */
-static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
-{
- int r;
- struct pci_dev *parent;
- u16 devid;
- u32 mask, bits, val;
-
- if (!qib_pcie_coalesce)
- return;
-
- /* Find out supported and configured values for parent (root) */
- parent = dd->pcidev->bus->self;
- if (parent->bus->parent) {
- qib_devinfo(dd->pcidev, "Parent not root\n");
- return;
- }
- if (!pci_is_pcie(parent))
- return;
- if (parent->vendor != 0x8086)
- return;
-
- /*
- * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
- * - bit 11: COALESCE_FORCE: need to set to 0
- * - bit 10: COALESCE_EN: need to set to 1
- * (but limitations on some on some chipsets)
- *
- * On the Intel 5000, 5100, and 7300 chipsets, there is
- * also: - bit 25:24: COALESCE_MODE, need to set to 0
- */
- devid = parent->device;
- if (devid >= 0x25e2 && devid <= 0x25fa) {
- /* 5000 P/V/X/Z */
- if (parent->revision <= 0xb2)
- bits = 1U << 10;
- else
- bits = 7U << 10;
- mask = (3U << 24) | (7U << 10);
- } else if (devid >= 0x65e2 && devid <= 0x65fa) {
- /* 5100 */
- bits = 1U << 10;
- mask = (3U << 24) | (7U << 10);
- } else if (devid >= 0x4021 && devid <= 0x402e) {
- /* 5400 */
- bits = 7U << 10;
- mask = 7U << 10;
- } else if (devid >= 0x3604 && devid <= 0x360a) {
- /* 7300 */
- bits = 7U << 10;
- mask = (3U << 24) | (7U << 10);
- } else {
- /* not one of the chipsets that we know about */
- return;
- }
- pci_read_config_dword(parent, 0x48, &val);
- val &= ~mask;
- val |= bits;
- r = pci_write_config_dword(parent, 0x48, val);
-}
-
-/*
- * BIOS may not set PCIe bus-utilization parameters for best performance.
- * Check and optionally adjust them to maximize our throughput.
- */
-static int qib_pcie_caps;
-module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
-
-static void qib_tune_pcie_caps(struct qib_devdata *dd)
-{
- struct pci_dev *parent;
- u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
- u16 rc_mrrs, ep_mrrs, max_mrrs;
-
- /* Find out supported and configured values for parent (root) */
- parent = dd->pcidev->bus->self;
- if (!pci_is_root_bus(parent->bus)) {
- qib_devinfo(dd->pcidev, "Parent not root\n");
- return;
- }
-
- if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- return;
-
- rc_mpss = parent->pcie_mpss;
- rc_mps = ffs(pcie_get_mps(parent)) - 8;
- /* Find out supported and configured values for endpoint (us) */
- ep_mpss = dd->pcidev->pcie_mpss;
- ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
-
- /* Find max payload supported by root, endpoint */
- if (rc_mpss > ep_mpss)
- rc_mpss = ep_mpss;
-
- /* If Supported greater than limit in module param, limit it */
- if (rc_mpss > (qib_pcie_caps & 7))
- rc_mpss = qib_pcie_caps & 7;
- /* If less than (allowed, supported), bump root payload */
- if (rc_mpss > rc_mps) {
- rc_mps = rc_mpss;
- pcie_set_mps(parent, 128 << rc_mps);
- }
- /* If less than (allowed, supported), bump endpoint payload */
- if (rc_mpss > ep_mps) {
- ep_mps = rc_mpss;
- pcie_set_mps(dd->pcidev, 128 << ep_mps);
- }
-
- /*
- * Now the Read Request size.
- * No field for max supported, but PCIe spec limits it to 4096,
- * which is code '5' (log2(4096) - 7)
- */
- max_mrrs = 5;
- if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
- max_mrrs = (qib_pcie_caps >> 4) & 7;
-
- max_mrrs = 128 << max_mrrs;
- rc_mrrs = pcie_get_readrq(parent);
- ep_mrrs = pcie_get_readrq(dd->pcidev);
-
- if (max_mrrs > rc_mrrs) {
- rc_mrrs = max_mrrs;
- pcie_set_readrq(parent, rc_mrrs);
- }
- if (max_mrrs > ep_mrrs) {
- ep_mrrs = max_mrrs;
- pcie_set_readrq(dd->pcidev, ep_mrrs);
- }
-}
-/* End of PCIe capability tuning */
-
-/*
- * From here through qib_pci_err_handler definition is invoked via
- * PCI error infrastructure, registered via pci
- */
-static pci_ers_result_t
-qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- switch (state) {
- case pci_channel_io_normal:
- qib_devinfo(pdev, "State Normal, ignoring\n");
- break;
-
- case pci_channel_io_frozen:
- qib_devinfo(pdev, "State Frozen, requesting reset\n");
- pci_disable_device(pdev);
- ret = PCI_ERS_RESULT_NEED_RESET;
- break;
-
- case pci_channel_io_perm_failure:
- qib_devinfo(pdev, "State Permanent Failure, disabling\n");
- if (dd) {
- /* no more register accesses! */
- dd->flags &= ~QIB_PRESENT;
- qib_disable_after_error(dd);
- }
- /* else early, or other problem */
- ret = PCI_ERS_RESULT_DISCONNECT;
- break;
-
- default: /* shouldn't happen */
- qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
- state);
- break;
- }
- return ret;
-}
-
-static pci_ers_result_t
-qib_pci_mmio_enabled(struct pci_dev *pdev)
-{
- u64 words = 0U;
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- if (dd && dd->pport) {
- words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
- if (words == ~0ULL)
- ret = PCI_ERS_RESULT_NEED_RESET;
- }
- qib_devinfo(pdev,
- "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n",
- words, ret);
- return ret;
-}
-
-static pci_ers_result_t
-qib_pci_slot_reset(struct pci_dev *pdev)
-{
- qib_devinfo(pdev, "QIB slot_reset function called, ignored\n");
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static pci_ers_result_t
-qib_pci_link_reset(struct pci_dev *pdev)
-{
- qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static void
-qib_pci_resume(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
-
- qib_devinfo(pdev, "QIB resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
- /*
- * Running jobs will fail, since it's asynchronous
- * unlike sysfs-requested reset. Better than
- * doing nothing.
- */
- qib_init(dd, 1); /* same as re-init after reset */
-}
-
-const struct pci_error_handlers qib_pci_err_handler = {
- .error_detected = qib_pci_error_detected,
- .mmio_enabled = qib_pci_mmio_enabled,
- .link_reset = qib_pci_link_reset,
- .slot_reset = qib_pci_slot_reset,
- .resume = qib_pci_resume,
-};
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
deleted file mode 100644
index 4fa88ba2963e..000000000000
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ /dev/null
@@ -1,1376 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/vmalloc.h>
-#include <linux/jhash.h>
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-#endif
-
-#include "qib.h"
-
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
-
-static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off)
-{
- return (map - qpt->map) * BITS_PER_PAGE + off;
-}
-
-static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off,
- unsigned n)
-{
- if (qpt->mask) {
- off++;
- if (((off & qpt->mask) >> 1) >= n)
- off = (off | qpt->mask) + 2;
- } else
- off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
- return off;
-}
-
-/*
- * Convert the AETH credit code into the number of credits.
- */
-static u32 credit_table[31] = {
- 0, /* 0 */
- 1, /* 1 */
- 2, /* 2 */
- 3, /* 3 */
- 4, /* 4 */
- 6, /* 5 */
- 8, /* 6 */
- 12, /* 7 */
- 16, /* 8 */
- 24, /* 9 */
- 32, /* A */
- 48, /* B */
- 64, /* C */
- 96, /* D */
- 128, /* E */
- 192, /* F */
- 256, /* 10 */
- 384, /* 11 */
- 512, /* 12 */
- 768, /* 13 */
- 1024, /* 14 */
- 1536, /* 15 */
- 2048, /* 16 */
- 3072, /* 17 */
- 4096, /* 18 */
- 6144, /* 19 */
- 8192, /* 1A */
- 12288, /* 1B */
- 16384, /* 1C */
- 24576, /* 1D */
- 32768 /* 1E */
-};
-
-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
-{
- unsigned long page = get_zeroed_page(GFP_KERNEL);
-
- /*
- * Free the page if someone raced with us installing it.
- */
-
- spin_lock(&qpt->lock);
- if (map->page)
- free_page(page);
- else
- map->page = (void *)page;
- spin_unlock(&qpt->lock);
-}
-
-/*
- * Allocate the next available QPN or
- * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
- */
-static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
- enum ib_qp_type type, u8 port)
-{
- u32 i, offset, max_scan, qpn;
- struct qpn_map *map;
- u32 ret;
-
- if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
- unsigned n;
-
- ret = type == IB_QPT_GSI;
- n = 1 << (ret + 2 * (port - 1));
- spin_lock(&qpt->lock);
- if (qpt->flags & n)
- ret = -EINVAL;
- else
- qpt->flags |= n;
- spin_unlock(&qpt->lock);
- goto bail;
- }
-
- qpn = qpt->last + 2;
- if (qpn >= QPN_MAX)
- qpn = 2;
- if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
- qpn = (qpn | qpt->mask) + 2;
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
- max_scan = qpt->nmaps - !offset;
- for (i = 0;;) {
- if (unlikely(!map->page)) {
- get_map_page(qpt, map);
- if (unlikely(!map->page))
- break;
- }
- do {
- if (!test_and_set_bit(offset, map->page)) {
- qpt->last = qpn;
- ret = qpn;
- goto bail;
- }
- offset = find_next_offset(qpt, map, offset,
- dd->n_krcv_queues);
- qpn = mk_qpn(qpt, map, offset);
- /*
- * This test differs from alloc_pidmap().
- * If find_next_offset() does find a zero
- * bit, we don't need to check for QPN
- * wrapping around past our starting QPN.
- * We just need to be sure we don't loop
- * forever.
- */
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
- /*
- * In order to keep the number of pages allocated to a
- * minimum, we scan the all existing pages before increasing
- * the size of the bitmap table.
- */
- if (++i > max_scan) {
- if (qpt->nmaps == QPNMAP_ENTRIES)
- break;
- map = &qpt->map[qpt->nmaps++];
- offset = 0;
- } else if (map < &qpt->map[qpt->nmaps]) {
- ++map;
- offset = 0;
- } else {
- map = &qpt->map[0];
- offset = 2;
- }
- qpn = mk_qpn(qpt, map, offset);
- }
-
- ret = -ENOMEM;
-
-bail:
- return ret;
-}
-
-static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
-{
- struct qpn_map *map;
-
- map = qpt->map + qpn / BITS_PER_PAGE;
- if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
-}
-
-static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
-{
- return jhash_1word(qpn, dev->qp_rnd) &
- (dev->qp_table_size - 1);
-}
-
-
-/*
- * Put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned long flags;
- unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
-
- atomic_inc(&qp->refcount);
- spin_lock_irqsave(&dev->qpt_lock, flags);
-
- if (qp->ibqp.qp_num == 0)
- rcu_assign_pointer(ibp->qp0, qp);
- else if (qp->ibqp.qp_num == 1)
- rcu_assign_pointer(ibp->qp1, qp);
- else {
- qp->next = dev->qp_table[n];
- rcu_assign_pointer(dev->qp_table[n], qp);
- }
-
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
-}
-
-/*
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
- unsigned long flags;
- int removed = 1;
-
- spin_lock_irqsave(&dev->qpt_lock, flags);
-
- if (rcu_dereference_protected(ibp->qp0,
- lockdep_is_held(&dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp0, NULL);
- } else if (rcu_dereference_protected(ibp->qp1,
- lockdep_is_held(&dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp1, NULL);
- } else {
- struct qib_qp *q;
- struct qib_qp __rcu **qpp;
-
- removed = 0;
- qpp = &dev->qp_table[n];
- for (; (q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock))) != NULL;
- qpp = &q->next)
- if (q == qp) {
- RCU_INIT_POINTER(*qpp,
- rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)));
- removed = 1;
- break;
- }
- }
-
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
- if (removed) {
- synchronize_rcu();
- atomic_dec(&qp->refcount);
- }
-}
-
-/**
- * qib_free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
- */
-unsigned qib_free_all_qps(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- unsigned long flags;
- struct qib_qp *qp;
- unsigned n, qp_inuse = 0;
-
- for (n = 0; n < dd->num_pports; n++) {
- struct qib_ibport *ibp = &dd->pport[n].ibport_data;
-
- if (!qib_mcast_tree_empty(ibp))
- qp_inuse++;
- rcu_read_lock();
- if (rcu_dereference(ibp->qp0))
- qp_inuse++;
- if (rcu_dereference(ibp->qp1))
- qp_inuse++;
- rcu_read_unlock();
- }
-
- spin_lock_irqsave(&dev->qpt_lock, flags);
- for (n = 0; n < dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(dev->qp_table[n],
- lockdep_is_held(&dev->qpt_lock));
- RCU_INIT_POINTER(dev->qp_table[n], NULL);
-
- for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)))
- qp_inuse++;
- }
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
- synchronize_rcu();
-
- return qp_inuse;
-}
-
-/**
- * qib_lookup_qpn - return the QP with the given QPN
- * @qpt: the QP table
- * @qpn: the QP number to look up
- *
- * The caller is responsible for decrementing the QP reference count
- * when done.
- */
-struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
-{
- struct qib_qp *qp = NULL;
-
- rcu_read_lock();
- if (unlikely(qpn <= 1)) {
- if (qpn == 0)
- qp = rcu_dereference(ibp->qp0);
- else
- qp = rcu_dereference(ibp->qp1);
- if (qp)
- atomic_inc(&qp->refcount);
- } else {
- struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
- unsigned n = qpn_hash(dev, qpn);
-
- for (qp = rcu_dereference(dev->qp_table[n]); qp;
- qp = rcu_dereference(qp->next))
- if (qp->ibqp.qp_num == qpn) {
- atomic_inc(&qp->refcount);
- break;
- }
- }
- rcu_read_unlock();
- return qp;
-}
-
-/**
- * qib_reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
-{
- qp->remote_qpn = 0;
- qp->qkey = 0;
- qp->qp_access_flags = 0;
- atomic_set(&qp->s_dma_busy, 0);
- qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
- qp->s_hdrwords = 0;
- qp->s_wqe = NULL;
- qp->s_draining = 0;
- qp->s_next_psn = 0;
- qp->s_last_psn = 0;
- qp->s_sending_psn = 0;
- qp->s_sending_hpsn = 0;
- qp->s_psn = 0;
- qp->r_psn = 0;
- qp->r_msn = 0;
- if (type == IB_QPT_RC) {
- qp->s_state = IB_OPCODE_RC_SEND_LAST;
- qp->r_state = IB_OPCODE_RC_SEND_LAST;
- } else {
- qp->s_state = IB_OPCODE_UC_SEND_LAST;
- qp->r_state = IB_OPCODE_UC_SEND_LAST;
- }
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- qp->r_nak_state = 0;
- qp->r_aflags = 0;
- qp->r_flags = 0;
- qp->s_head = 0;
- qp->s_tail = 0;
- qp->s_cur = 0;
- qp->s_acked = 0;
- qp->s_last = 0;
- qp->s_ssn = 1;
- qp->s_lsn = 0;
- qp->s_mig_state = IB_MIG_MIGRATED;
- memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
- qp->r_head_ack_queue = 0;
- qp->s_tail_ack_queue = 0;
- qp->s_num_rd_atomic = 0;
- if (qp->r_rq.wq) {
- qp->r_rq.wq->head = 0;
- qp->r_rq.wq->tail = 0;
- }
- qp->r_sge.num_sge = 0;
-}
-
-static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
-{
- unsigned n;
-
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- qib_put_ss(&qp->s_rdma_read_sge);
-
- qib_put_ss(&qp->r_sge);
-
- if (clr_sends) {
- while (qp->s_last != qp->s_head) {
- struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
- unsigned i;
-
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
-
- qib_put_mr(sge->mr);
- }
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- }
- if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- }
-
- if (qp->ibqp.qp_type != IB_QPT_RC)
- return;
-
- for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
- struct qib_ack_entry *e = &qp->s_ack_queue[n];
-
- if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
- e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- }
-}
-
-/**
- * qib_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
-{
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
- int ret = 0;
-
- if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
- goto bail;
-
- qp->state = IB_QPS_ERR;
-
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
- if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
- qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
-
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
- qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
- list_del_init(&qp->iowait);
- }
- spin_unlock(&dev->pending_lock);
-
- if (!(qp->s_flags & QIB_S_BUSY)) {
- qp->s_hdrwords = 0;
- if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
- }
- }
-
- /* Schedule the sending tasklet to drain the send work queue. */
- if (qp->s_last != qp->s_head)
- qib_schedule_send(qp);
-
- clear_mr_refs(qp, 0);
-
- memset(&wc, 0, sizeof(wc));
- wc.qp = &qp->ibqp;
- wc.opcode = IB_WC_RECV;
-
- if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
- wc.wr_id = qp->r_wr_id;
- wc.status = err;
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wc.status = IB_WC_WR_FLUSH_ERR;
-
- if (qp->r_rq.wq) {
- struct qib_rwq *wq;
- u32 head;
- u32 tail;
-
- spin_lock(&qp->r_rq.lock);
-
- /* sanity check pointers before trusting them */
- wq = qp->r_rq.wq;
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- while (tail != head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
- if (++tail >= qp->r_rq.size)
- tail = 0;
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wq->tail = tail;
-
- spin_unlock(&qp->r_rq.lock);
- } else if (qp->ibqp.event_handler)
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * qib_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for libibverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_qp *qp = to_iqp(ibqp);
- enum ib_qp_state cur_state, new_state;
- struct ib_event ev;
- int lastwqe = 0;
- int mig = 0;
- int ret;
- u32 pmtu = 0; /* for gcc warning only */
-
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
-
- cur_state = attr_mask & IB_QP_CUR_STATE ?
- attr->cur_qp_state : qp->state;
- new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_UNSPECIFIED))
- goto inval;
-
- if (attr_mask & IB_QP_AV) {
- if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
- goto inval;
- if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
- goto inval;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
- goto inval;
- if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
- goto inval;
- if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
- goto inval;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
- goto inval;
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- if (attr->min_rnr_timer > 31)
- goto inval;
-
- if (attr_mask & IB_QP_PORT)
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI ||
- attr->port_num == 0 ||
- attr->port_num > ibqp->device->phys_port_cnt)
- goto inval;
-
- if (attr_mask & IB_QP_DEST_QPN)
- if (attr->dest_qp_num > QIB_QPN_MASK)
- goto inval;
-
- if (attr_mask & IB_QP_RETRY_CNT)
- if (attr->retry_cnt > 7)
- goto inval;
-
- if (attr_mask & IB_QP_RNR_RETRY)
- if (attr->rnr_retry > 7)
- goto inval;
-
- /*
- * Don't allow invalid path_mtu values. OK to set greater
- * than the active mtu (or even the max_cap, if we have tuned
- * that to a small mtu. We'll set qp->path_mtu
- * to the lesser of requested attribute mtu and active,
- * for packetizing messages.
- * Note that the QP port has to be set in INIT and MTU in RTR.
- */
- if (attr_mask & IB_QP_PATH_MTU) {
- struct qib_devdata *dd = dd_from_dev(dev);
- int mtu, pidx = qp->port_num - 1;
-
- mtu = ib_mtu_enum_to_int(attr->path_mtu);
- if (mtu == -1)
- goto inval;
- if (mtu > dd->pport[pidx].ibmtu) {
- switch (dd->pport[pidx].ibmtu) {
- case 4096:
- pmtu = IB_MTU_4096;
- break;
- case 2048:
- pmtu = IB_MTU_2048;
- break;
- case 1024:
- pmtu = IB_MTU_1024;
- break;
- case 512:
- pmtu = IB_MTU_512;
- break;
- case 256:
- pmtu = IB_MTU_256;
- break;
- default:
- pmtu = IB_MTU_2048;
- }
- } else
- pmtu = attr->path_mtu;
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- if (attr->path_mig_state == IB_MIG_REARM) {
- if (qp->s_mig_state == IB_MIG_ARMED)
- goto inval;
- if (new_state != IB_QPS_RTS)
- goto inval;
- } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
- if (qp->s_mig_state == IB_MIG_REARM)
- goto inval;
- if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
- goto inval;
- if (qp->s_mig_state == IB_MIG_ARMED)
- mig = 1;
- } else
- goto inval;
- }
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
- goto inval;
-
- switch (new_state) {
- case IB_QPS_RESET:
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait))
- list_del_init(&qp->iowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- /* Stop the sending work queue and retry timer */
- cancel_work_sync(&qp->s_work);
- del_timer_sync(&qp->s_timer);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
- }
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
- clear_mr_refs(qp, 1);
- qib_reset_qp(qp, ibqp->qp_type);
- }
- break;
-
- case IB_QPS_RTR:
- /* Allow event to retrigger if QP set to RTR more than once */
- qp->r_flags &= ~QIB_R_COMM_EST;
- qp->state = new_state;
- break;
-
- case IB_QPS_SQD:
- qp->s_draining = qp->s_last != qp->s_cur;
- qp->state = new_state;
- break;
-
- case IB_QPS_SQE:
- if (qp->ibqp.qp_type == IB_QPT_RC)
- goto inval;
- qp->state = new_state;
- break;
-
- case IB_QPS_ERR:
- lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- break;
-
- default:
- qp->state = new_state;
- break;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- qp->s_pkey_index = attr->pkey_index;
-
- if (attr_mask & IB_QP_PORT)
- qp->port_num = attr->port_num;
-
- if (attr_mask & IB_QP_DEST_QPN)
- qp->remote_qpn = attr->dest_qp_num;
-
- if (attr_mask & IB_QP_SQ_PSN) {
- qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
- qp->s_psn = qp->s_next_psn;
- qp->s_sending_psn = qp->s_next_psn;
- qp->s_last_psn = qp->s_next_psn - 1;
- qp->s_sending_hpsn = qp->s_last_psn;
- }
-
- if (attr_mask & IB_QP_RQ_PSN)
- qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
-
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->qp_access_flags = attr->qp_access_flags;
-
- if (attr_mask & IB_QP_AV) {
- qp->remote_ah_attr = attr->ah_attr;
- qp->s_srate = attr->ah_attr.static_rate;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- qp->alt_ah_attr = attr->alt_ah_attr;
- qp->s_alt_pkey_index = attr->alt_pkey_index;
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- qp->s_mig_state = attr->path_mig_state;
- if (mig) {
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
- qp->s_pkey_index = qp->s_alt_pkey_index;
- }
- }
-
- if (attr_mask & IB_QP_PATH_MTU) {
- qp->path_mtu = pmtu;
- qp->pmtu = ib_mtu_enum_to_int(pmtu);
- }
-
- if (attr_mask & IB_QP_RETRY_CNT) {
- qp->s_retry_cnt = attr->retry_cnt;
- qp->s_retry = attr->retry_cnt;
- }
-
- if (attr_mask & IB_QP_RNR_RETRY) {
- qp->s_rnr_retry_cnt = attr->rnr_retry;
- qp->s_rnr_retry = attr->rnr_retry;
- }
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- qp->r_min_rnr_timer = attr->min_rnr_timer;
-
- if (attr_mask & IB_QP_TIMEOUT) {
- qp->timeout = attr->timeout;
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- }
-
- if (attr_mask & IB_QP_QKEY)
- qp->qkey = attr->qkey;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
- qp->s_max_rd_atomic = attr->max_rd_atomic;
-
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
-
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- insert_qp(dev, qp);
-
- if (lastwqe) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- if (mig) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- ret = 0;
- goto bail;
-
-inval:
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- ret = -EINVAL;
-
-bail:
- return ret;
-}
-
-int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr)
-{
- struct qib_qp *qp = to_iqp(ibqp);
-
- attr->qp_state = qp->state;
- attr->cur_qp_state = attr->qp_state;
- attr->path_mtu = qp->path_mtu;
- attr->path_mig_state = qp->s_mig_state;
- attr->qkey = qp->qkey;
- attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
- attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
- attr->dest_qp_num = qp->remote_qpn;
- attr->qp_access_flags = qp->qp_access_flags;
- attr->cap.max_send_wr = qp->s_size - 1;
- attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
- attr->cap.max_send_sge = qp->s_max_sge;
- attr->cap.max_recv_sge = qp->r_rq.max_sge;
- attr->cap.max_inline_data = 0;
- attr->ah_attr = qp->remote_ah_attr;
- attr->alt_ah_attr = qp->alt_ah_attr;
- attr->pkey_index = qp->s_pkey_index;
- attr->alt_pkey_index = qp->s_alt_pkey_index;
- attr->en_sqd_async_notify = 0;
- attr->sq_draining = qp->s_draining;
- attr->max_rd_atomic = qp->s_max_rd_atomic;
- attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
- attr->min_rnr_timer = qp->r_min_rnr_timer;
- attr->port_num = qp->port_num;
- attr->timeout = qp->timeout;
- attr->retry_cnt = qp->s_retry_cnt;
- attr->rnr_retry = qp->s_rnr_retry_cnt;
- attr->alt_port_num = qp->alt_ah_attr.port_num;
- attr->alt_timeout = qp->alt_timeout;
-
- init_attr->event_handler = qp->ibqp.event_handler;
- init_attr->qp_context = qp->ibqp.qp_context;
- init_attr->send_cq = qp->ibqp.send_cq;
- init_attr->recv_cq = qp->ibqp.recv_cq;
- init_attr->srq = qp->ibqp.srq;
- init_attr->cap = attr->cap;
- if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
- init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- else
- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr->qp_type = qp->ibqp.qp_type;
- init_attr->port_num = qp->port_num;
- return 0;
-}
-
-/**
- * qib_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
-__be32 qib_compute_aeth(struct qib_qp *qp)
-{
- u32 aeth = qp->r_msn & QIB_MSN_MASK;
-
- if (qp->ibqp.srq) {
- /*
- * Shared receive queues don't generate credits.
- * Set the credit field to the invalid value.
- */
- aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
- } else {
- u32 min, max, x;
- u32 credits;
- struct qib_rwq *wq = qp->r_rq.wq;
- u32 head;
- u32 tail;
-
- /* sanity check pointers before trusting them */
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- /*
- * Compute the number of credits available (RWQEs).
- * XXX Not holding the r_rq.lock here so there is a small
- * chance that the pair of reads are not atomic.
- */
- credits = head - tail;
- if ((int)credits < 0)
- credits += qp->r_rq.size;
- /*
- * Binary search the credit table to find the code to
- * use.
- */
- min = 0;
- max = 31;
- for (;;) {
- x = (min + max) / 2;
- if (credit_table[x] == credits)
- break;
- if (credit_table[x] > credits)
- max = x;
- else if (min == x)
- break;
- else
- min = x;
- }
- aeth |= x << QIB_AETH_CREDIT_SHIFT;
- }
- return cpu_to_be32(aeth);
-}
-
-/**
- * qib_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: user data for libibverbs.so
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
-{
- struct qib_qp *qp;
- int err;
- struct qib_swqe *swq = NULL;
- struct qib_ibdev *dev;
- struct qib_devdata *dd;
- size_t sz;
- size_t sg_list_sz;
- struct ib_qp *ret;
-
- if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
- init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
- init_attr->create_flags) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- /* Check receive queue parameters if no SRQ is specified. */
- if (!init_attr->srq) {
- if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
- init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- if (init_attr->cap.max_send_sge +
- init_attr->cap.max_send_wr +
- init_attr->cap.max_recv_sge +
- init_attr->cap.max_recv_wr == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- }
-
- switch (init_attr->qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (init_attr->port_num == 0 ||
- init_attr->port_num > ibpd->device->phys_port_cnt) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- case IB_QPT_UC:
- case IB_QPT_RC:
- case IB_QPT_UD:
- sz = sizeof(struct qib_sge) *
- init_attr->cap.max_send_sge +
- sizeof(struct qib_swqe);
- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
- if (swq == NULL) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
- sz = sizeof(*qp);
- sg_list_sz = 0;
- if (init_attr->srq) {
- struct qib_srq *srq = to_isrq(init_attr->srq);
-
- if (srq->rq.max_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (srq->rq.max_sge - 1);
- } else if (init_attr->cap.max_recv_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (init_attr->cap.max_recv_sge - 1);
- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
- if (!qp) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_swq;
- }
- RCU_INIT_POINTER(qp->next, NULL);
- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
- if (!qp->s_hdr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- if (init_attr->srq)
- sz = 0;
- else {
- qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
- qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
- sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
- sizeof(struct qib_rwqe);
- qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
- qp->r_rq.size * sz);
- if (!qp->r_rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- }
-
- /*
- * ib_create_qp() will initialize qp->ibqp
- * except for qp->ibqp.qp_num.
- */
- spin_lock_init(&qp->r_lock);
- spin_lock_init(&qp->s_lock);
- spin_lock_init(&qp->r_rq.lock);
- atomic_set(&qp->refcount, 0);
- init_waitqueue_head(&qp->wait);
- init_waitqueue_head(&qp->wait_dma);
- init_timer(&qp->s_timer);
- qp->s_timer.data = (unsigned long)qp;
- INIT_WORK(&qp->s_work, qib_do_send);
- INIT_LIST_HEAD(&qp->iowait);
- INIT_LIST_HEAD(&qp->rspwait);
- qp->state = IB_QPS_RESET;
- qp->s_wq = swq;
- qp->s_size = init_attr->cap.max_send_wr + 1;
- qp->s_max_sge = init_attr->cap.max_send_sge;
- if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
- qp->s_flags = QIB_S_SIGNAL_REQ_WR;
- dev = to_idev(ibpd->device);
- dd = dd_from_dev(dev);
- err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
- init_attr->port_num);
- if (err < 0) {
- ret = ERR_PTR(err);
- vfree(qp->r_rq.wq);
- goto bail_qp;
- }
- qp->ibqp.qp_num = err;
- qp->port_num = init_attr->port_num;
- qib_reset_qp(qp, init_attr->qp_type);
- break;
-
- default:
- /* Don't support raw QPs */
- ret = ERR_PTR(-ENOSYS);
- goto bail;
- }
-
- init_attr->cap.max_inline_data = 0;
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- if (!qp->r_rq.wq) {
- __u64 offset = 0;
-
- err = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else {
- u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
-
- qp->ip = qib_create_mmap_info(dev, s,
- ibpd->uobject->context,
- qp->r_rq.wq);
- if (!qp->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- err = ib_copy_to_udata(udata, &(qp->ip->offset),
- sizeof(qp->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- }
- }
-
- spin_lock(&dev->n_qps_lock);
- if (dev->n_qps_allocated == ib_qib_max_qps) {
- spin_unlock(&dev->n_qps_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_qps_allocated++;
- spin_unlock(&dev->n_qps_lock);
-
- if (qp->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &qp->ibqp;
- goto bail;
-
-bail_ip:
- if (qp->ip)
- kref_put(&qp->ip->ref, qib_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
-bail_qp:
- kfree(qp->s_hdr);
- kfree(qp);
-bail_swq:
- vfree(swq);
-bail:
- return ret;
-}
-
-/**
- * qib_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int qib_destroy_qp(struct ib_qp *ibqp)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
-
- /* Make sure HW and driver activity is stopped. */
- spin_lock_irq(&qp->s_lock);
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait))
- list_del_init(&qp->iowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
- spin_unlock_irq(&qp->s_lock);
- cancel_work_sync(&qp->s_work);
- del_timer_sync(&qp->s_timer);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
- }
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- clear_mr_refs(qp, 1);
- } else
- spin_unlock_irq(&qp->s_lock);
-
- /* all user's cleaned up, mark it available */
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
- spin_lock(&dev->n_qps_lock);
- dev->n_qps_allocated--;
- spin_unlock(&dev->n_qps_lock);
-
- if (qp->ip)
- kref_put(&qp->ip->ref, qib_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- vfree(qp->s_wq);
- kfree(qp->s_hdr);
- kfree(qp);
- return 0;
-}
-
-/**
- * qib_init_qpn_table - initialize the QP number table for a device
- * @qpt: the QPN table
- */
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
-{
- spin_lock_init(&qpt->lock);
- qpt->last = 1; /* start with QPN 2 */
- qpt->nmaps = 1;
- qpt->mask = dd->qpn_mask;
-}
-
-/**
- * qib_free_qpn_table - free the QP number table for a device
- * @qpt: the QPN table
- */
-void qib_free_qpn_table(struct qib_qpn_table *qpt)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
- if (qpt->map[i].page)
- free_page((unsigned long) qpt->map[i].page);
-}
-
-/**
- * qib_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
-void qib_get_credit(struct qib_qp *qp, u32 aeth)
-{
- u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
-
- /*
- * If the credit is invalid, we can send
- * as many packets as we like. Otherwise, we have to
- * honor the credit field.
- */
- if (credit == QIB_AETH_CREDIT_INVAL) {
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
- qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
- if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
- qib_schedule_send(qp);
- }
- }
- } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
- /* Compute new LSN (i.e., MSN + credit) */
- credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
- if (qib_cmp24(credit, qp->s_lsn) > 0) {
- qp->s_lsn = credit;
- if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
- qib_schedule_send(qp);
- }
- }
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-struct qib_qp_iter {
- struct qib_ibdev *dev;
- struct qib_qp *qp;
- int n;
-};
-
-struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
-{
- struct qib_qp_iter *iter;
-
- iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return NULL;
-
- iter->dev = dev;
- if (qib_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
-
- return iter;
-}
-
-int qib_qp_iter_next(struct qib_qp_iter *iter)
-{
- struct qib_ibdev *dev = iter->dev;
- int n = iter->n;
- int ret = 1;
- struct qib_qp *pqp = iter->qp;
- struct qib_qp *qp;
-
- for (; n < dev->qp_table_size; n++) {
- if (pqp)
- qp = rcu_dereference(pqp->next);
- else
- qp = rcu_dereference(dev->qp_table[n]);
- pqp = qp;
- if (qp) {
- iter->qp = qp;
- iter->n = n;
- return 0;
- }
- }
- return ret;
-}
-
-static const char * const qp_type_str[] = {
- "SMI", "GSI", "RC", "UC", "UD",
-};
-
-void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
-{
- struct qib_swqe *wqe;
- struct qib_qp *qp = iter->qp;
-
- wqe = get_swqe_ptr(qp, qp->s_last);
- seq_printf(s,
- "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
- iter->n,
- qp->ibqp.qp_num,
- qp_type_str[qp->ibqp.qp_type],
- qp->state,
- wqe->wr.opcode,
- qp->s_hdrwords,
- qp->s_flags,
- atomic_read(&qp->s_dma_busy),
- !list_empty(&qp->iowait),
- qp->timeout,
- wqe->ssn,
- qp->s_lsn,
- qp->s_last_psn,
- qp->s_psn, qp->s_next_psn,
- qp->s_sending_psn, qp->s_sending_hpsn,
- qp->s_last, qp->s_acked, qp->s_cur,
- qp->s_tail, qp->s_head, qp->s_size,
- qp->remote_qpn,
- qp->remote_ah_attr.dlid);
-}
-
-#endif
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
deleted file mode 100644
index 5e27f76805e2..000000000000
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-#include "qib_qsfp.h"
-
-/*
- * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
- * in qib_twsi.c
- */
-#define QSFP_MAX_RETRY 4
-
-static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 out, mask;
- int ret, cnt, pass = 0;
- int stuck = 0;
- u8 *buff = bp;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto no_unlock;
-
- if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
- ret = -ENXIO;
- goto bail;
- }
-
- /*
- * We presume, if we are called at all, that this board has
- * QSFP. This is on the same i2c chain as the legacy parts,
- * but only responds if the module is selected via GPIO pins.
- * Further, there are very long setup and hold requirements
- * on MODSEL.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- if (ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- out <<= QSFP_GPIO_PORT2_SHIFT;
- }
-
- dd->f_gpio_mod(dd, out, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL, and there
- * is no way to tell if it is ready, so we must wait.
- */
- msleep(20);
-
- /* Make sure TWSI bus is in sane state. */
- ret = qib_twsi_reset(dd);
- if (ret) {
- qib_dev_porterr(dd, ppd->port,
- "QSFP interface Reset for read failed\n");
- ret = -EIO;
- stuck = 1;
- goto deselect;
- }
-
- /* All QSFP modules are at A0 */
-
- cnt = 0;
- while (cnt < len) {
- unsigned in_page;
- int wlen = len - cnt;
-
- in_page = addr % QSFP_PAGESIZE;
- if ((in_page + wlen) > QSFP_PAGESIZE)
- wlen = QSFP_PAGESIZE - in_page;
- ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
- /* Some QSFP's fail first try. Retry as experiment */
- if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
- continue;
- if (ret) {
- /* qib_twsi_blk_rd() 1 for error, else 0 */
- ret = -EIO;
- goto deselect;
- }
- addr += wlen;
- cnt += wlen;
- }
- ret = cnt;
-
-deselect:
- /*
- * Module could take up to 10 uSec after transfer before
- * ready to respond to MOD_SEL negation, and there is no way
- * to tell if it is ready, so we must wait.
- */
- udelay(10);
- /* set QSFP MODSEL, RST. LP all high */
- dd->f_gpio_mod(dd, mask, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL
- * going away, and there is no way to tell if it is ready.
- * so we must wait.
- */
- if (stuck)
- qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
-
- if (pass >= QSFP_MAX_RETRY && ret)
- qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
- else if (pass)
- qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
-
- msleep(20);
-
-bail:
- mutex_unlock(&dd->eep_lock);
-
-no_unlock:
- return ret;
-}
-
-/*
- * qsfp_write
- * We do not ordinarily write the QSFP, but this is needed to select
- * the page on non-flat QSFPs, and possibly later unusual cases
- */
-static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
- int len)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 out, mask;
- int ret, cnt;
- u8 *buff = bp;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto no_unlock;
-
- if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
- ret = -ENXIO;
- goto bail;
- }
-
- /*
- * We presume, if we are called at all, that this board has
- * QSFP. This is on the same i2c chain as the legacy parts,
- * but only responds if the module is selected via GPIO pins.
- * Further, there are very long setup and hold requirements
- * on MODSEL.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- if (ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- out <<= QSFP_GPIO_PORT2_SHIFT;
- }
- dd->f_gpio_mod(dd, out, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL,
- * and there is no way to tell if it is ready, so we must wait.
- */
- msleep(20);
-
- /* Make sure TWSI bus is in sane state. */
- ret = qib_twsi_reset(dd);
- if (ret) {
- qib_dev_porterr(dd, ppd->port,
- "QSFP interface Reset for write failed\n");
- ret = -EIO;
- goto deselect;
- }
-
- /* All QSFP modules are at A0 */
-
- cnt = 0;
- while (cnt < len) {
- unsigned in_page;
- int wlen = len - cnt;
-
- in_page = addr % QSFP_PAGESIZE;
- if ((in_page + wlen) > QSFP_PAGESIZE)
- wlen = QSFP_PAGESIZE - in_page;
- ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
- if (ret) {
- /* qib_twsi_blk_wr() 1 for error, else 0 */
- ret = -EIO;
- goto deselect;
- }
- addr += wlen;
- cnt += wlen;
- }
- ret = cnt;
-
-deselect:
- /*
- * Module could take up to 10 uSec after transfer before
- * ready to respond to MOD_SEL negation, and there is no way
- * to tell if it is ready, so we must wait.
- */
- udelay(10);
- /* set QSFP MODSEL, RST, LP high */
- dd->f_gpio_mod(dd, mask, mask, mask);
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL
- * going away, and there is no way to tell if it is ready.
- * so we must wait.
- */
- msleep(20);
-
-bail:
- mutex_unlock(&dd->eep_lock);
-
-no_unlock:
- return ret;
-}
-
-/*
- * For validation, we want to check the checksums, even of the
- * fields we do not otherwise use. This function reads the bytes from
- * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
- */
-static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
-{
- int ret;
- u16 cks;
- u8 bval;
-
- cks = 0;
- while (first < next) {
- ret = qsfp_read(ppd, first, &bval, 1);
- if (ret < 0)
- goto bail;
- cks += bval;
- ++first;
- }
- ret = cks & 0xFF;
-bail:
- return ret;
-
-}
-
-int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
-{
- int ret;
- int idx;
- u16 cks;
- u8 peek[4];
-
- /* ensure sane contents on invalid reads, for cable swaps */
- memset(cp, 0, sizeof(*cp));
-
- if (!qib_qsfp_mod_present(ppd)) {
- ret = -ENODEV;
- goto bail;
- }
-
- ret = qsfp_read(ppd, 0, peek, 3);
- if (ret < 0)
- goto bail;
- if ((peek[0] & 0xFE) != 0x0C)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
-
- if ((peek[2] & 2) == 0) {
- /*
- * If cable is paged, rather than "flat memory", we need to
- * set the page to zero, Even if it already appears to be zero.
- */
- u8 poke = 0;
-
- ret = qib_qsfp_write(ppd, 127, &poke, 1);
- udelay(50);
- if (ret != 1) {
- qib_dev_porterr(ppd->dd, ppd->port,
- "Failed QSFP Page set\n");
- goto bail;
- }
- }
-
- ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
- if (ret < 0)
- goto bail;
- if ((cp->id & 0xFE) != 0x0C)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
- cks = cp->id;
-
- ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
- if (ret < 0)
- goto bail;
- cks += cp->pwr;
-
- ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
- if (ret < 0)
- goto bail;
- cks += cp->len;
-
- ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
- if (ret < 0)
- goto bail;
- cks += cp->tech;
-
- ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
- cks += cp->vendor[idx];
-
- ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
- if (ret < 0)
- goto bail;
- cks += cp->xt_xcv;
-
- ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
- cks += cp->oui[idx];
-
- ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_PN_LEN; ++idx)
- cks += cp->partnum[idx];
-
- ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_REV_LEN; ++idx)
- cks += cp->rev[idx];
-
- ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
- cks += cp->atten[idx];
-
- ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- cks &= 0xFF;
- ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
- if (ret < 0)
- goto bail;
- if (cks != cp->cks1)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
- cks);
-
- /* Second checksum covers 192 to (serial, date, lot) */
- ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
- if (ret < 0)
- goto bail;
- cks = ret;
-
- ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_SN_LEN; ++idx)
- cks += cp->serial[idx];
-
- ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
- cks += cp->date[idx];
-
- ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
- cks += cp->lot[idx];
-
- ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
- if (ret < 0)
- goto bail;
- cks &= 0xFF;
- if (cks != cp->cks2)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
- cks);
- return 0;
-
-bail:
- cp->id = 0;
- return ret;
-}
-
-const char * const qib_qsfp_devtech[16] = {
- "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
- "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
- "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
- "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
-};
-
-#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
-#define QSFP_DEFAULT_HDR_CNT 224
-
-static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
-
-int qib_qsfp_mod_present(struct qib_pportdata *ppd)
-{
- u32 mask;
- int ret;
-
- mask = QSFP_GPIO_MOD_PRS_N <<
- (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT);
- ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
-
- return !((ret & mask) >>
- ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3));
-}
-
-/*
- * Initialize structures that control access to QSFP. Called once per port
- * on cards that support QSFP.
- */
-void qib_qsfp_init(struct qib_qsfp_data *qd,
- void (*fevent)(struct work_struct *))
-{
- u32 mask, highs;
-
- struct qib_devdata *dd = qd->ppd->dd;
-
- /* Initialize work struct for later QSFP events */
- INIT_WORK(&qd->work, fevent);
-
- /*
- * Later, we may want more validation. For now, just set up pins and
- * blip reset. If module is present, call qib_refresh_qsfp_cache(),
- * to do further init.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- highs = mask - QSFP_GPIO_MOD_RST_N;
- if (qd->ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- highs <<= QSFP_GPIO_PORT2_SHIFT;
- }
- dd->f_gpio_mod(dd, highs, mask, mask);
- udelay(20); /* Generous RST dwell */
-
- dd->f_gpio_mod(dd, mask, mask, mask);
-}
-
-void qib_qsfp_deinit(struct qib_qsfp_data *qd)
-{
- /*
- * There is nothing to do here for now. our work is scheduled
- * with queue_work(), and flush_workqueue() from remove_one
- * will block until all work setup with queue_work()
- * completes.
- */
-}
-
-int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
-{
- struct qib_qsfp_cache cd;
- u8 bin_buff[QSFP_DUMP_CHUNK];
- char lenstr[6];
- int sofar, ret;
- int bidx = 0;
-
- sofar = 0;
- ret = qib_refresh_qsfp_cache(ppd, &cd);
- if (ret < 0)
- goto bail;
-
- lenstr[0] = ' ';
- lenstr[1] = '\0';
- if (QSFP_IS_CU(cd.tech))
- sprintf(lenstr, "%dM ", cd.len);
-
- sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
- (QSFP_PWR(cd.pwr) * 4));
-
- sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
- qib_qsfp_devtech[cd.tech >> 4]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
- QSFP_VEND_LEN, cd.vendor);
-
- sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
- QSFP_OUI(cd.oui));
-
- sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
- QSFP_PN_LEN, cd.partnum);
- sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
- QSFP_REV_LEN, cd.rev);
- if (QSFP_IS_CU(cd.tech))
- sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
- QSFP_ATTEN_SDR(cd.atten),
- QSFP_ATTEN_DDR(cd.atten));
- sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
- QSFP_SN_LEN, cd.serial);
- sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
- QSFP_DATE_LEN, cd.date);
- sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
- QSFP_LOT_LEN, cd.date);
-
- while (bidx < QSFP_DEFAULT_HDR_CNT) {
- int iidx;
-
- ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
- if (ret < 0)
- goto bail;
- for (iidx = 0; iidx < ret; ++iidx) {
- sofar += scnprintf(buf + sofar, len-sofar, " %02X",
- bin_buff[iidx]);
- }
- sofar += scnprintf(buf + sofar, len - sofar, "\n");
- bidx += QSFP_DUMP_CHUNK;
- }
- ret = sofar;
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
deleted file mode 100644
index 91908f533a2b..000000000000
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* QSFP support common definitions, for ib_qib driver */
-
-#define QSFP_DEV 0xA0
-#define QSFP_PWR_LAG_MSEC 2000
-#define QSFP_MODPRS_LAG_MSEC 20
-
-/*
- * Below are masks for various QSFP signals, for Port 1.
- * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
- * _N means asserted low
- */
-#define QSFP_GPIO_MOD_SEL_N (4)
-#define QSFP_GPIO_MOD_PRS_N (8)
-#define QSFP_GPIO_INT_N (0x10)
-#define QSFP_GPIO_MOD_RST_N (0x20)
-#define QSFP_GPIO_LP_MODE (0x40)
-#define QSFP_GPIO_PORT2_SHIFT 5
-
-#define QSFP_PAGESIZE 128
-/* Defined fields that QLogic requires of qualified cables */
-/* Byte 0 is Identifier, not checked */
-/* Byte 1 is reserved "status MSB" */
-/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
-/*
- * Rest of first 128 not used, although 127 is reserved for page select
- * if module is not "Flat memory".
- */
-/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
-#define QSFP_MOD_ID_OFFS 128
-/*
- * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
- * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
- */
-#define QSFP_MOD_PWR_OFFS 129
-/* Byte 130 is Connector type. Not QLogic req'd */
-/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
-/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
-/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
-/* Byte 141 is Extended Rate Select. Not QLogic req'd */
-/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
-/* Byte 146 is length for Copper. Units of 1 meter */
-#define QSFP_MOD_LEN_OFFS 146
-/*
- * Byte 147 is Device technology. D0..3 not Qlogc req'd
- * D4..7 select from 15 choices, translated by table:
- */
-#define QSFP_MOD_TECH_OFFS 147
-extern const char *const qib_qsfp_devtech[16];
-/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
-#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
-/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
-#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
-/* Attenuation should be valid for copper other than full/near Eq */
-#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
-/* Length is only valid if technology is "copper" */
-#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
-#define QSFP_TECH_1490 9
-
-#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
- oui[2])
-#define QSFP_OUI_AMPHENOL 0x415048
-#define QSFP_OUI_FINISAR 0x009065
-#define QSFP_OUI_GORE 0x002177
-
-/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
-#define QSFP_VEND_OFFS 148
-#define QSFP_VEND_LEN 16
-/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
-#define QSFP_IBXCV_OFFS 164
-/* Bytes 165..167 are Vendor OUI number */
-#define QSFP_VOUI_OFFS 165
-#define QSFP_VOUI_LEN 3
-/* Bytes 168..183 are Vendor Part Number, string */
-#define QSFP_PN_OFFS 168
-#define QSFP_PN_LEN 16
-/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
-#define QSFP_REV_OFFS 184
-#define QSFP_REV_LEN 2
-/*
- * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
- * If copper, they are attenuation in dB:
- * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
- */
-#define QSFP_ATTEN_OFFS 186
-#define QSFP_ATTEN_LEN 2
-/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
-/* Byte 190 is Max Case Temp. Not QLogic req'd */
-/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
-#define QSFP_CC_OFFS 191
-/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
-/* Bytes 196..211 are Serial Number, String */
-#define QSFP_SN_OFFS 196
-#define QSFP_SN_LEN 16
-/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
-#define QSFP_DATE_OFFS 212
-#define QSFP_DATE_LEN 6
-/* Bytes 218,219 are optional lot-code, string */
-#define QSFP_LOT_OFFS 218
-#define QSFP_LOT_LEN 2
-/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
-/* Byte 223 is LSB of sum of bytes 192..222 */
-#define QSFP_CC_EXT_OFFS 223
-
-/*
- * struct qib_qsfp_data encapsulates state of QSFP device for one port.
- * it will be part of port-chip-specific data if a board supports QSFP.
- *
- * Since multiple board-types use QSFP, and their pport_data structs
- * differ (in the chip-specific section), we need a pointer to its head.
- *
- * Avoiding premature optimization, we will have one work_struct per port,
- * and let the (increasingly inaccurately named) eep_lock arbitrate
- * access to common resources.
- *
- */
-
-/*
- * Hold the parts of the onboard EEPROM that we care about, so we aren't
- * coonstantly bit-boffing
- */
-struct qib_qsfp_cache {
- u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
- u8 pwr; /* in D6,7 */
- u8 len; /* in meters, Cu only */
- u8 tech;
- char vendor[QSFP_VEND_LEN];
- u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
- u8 oui[QSFP_VOUI_LEN];
- u8 partnum[QSFP_PN_LEN];
- u8 rev[QSFP_REV_LEN];
- u8 atten[QSFP_ATTEN_LEN];
- u8 cks1; /* Checksum of bytes 128..190 */
- u8 serial[QSFP_SN_LEN];
- u8 date[QSFP_DATE_LEN];
- u8 lot[QSFP_LOT_LEN];
- u8 cks2; /* Checsum of bytes 192..222 */
-};
-
-#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
-#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
-#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
-
-struct qib_qsfp_data {
- /* Helps to find our way */
- struct qib_pportdata *ppd;
- struct work_struct work;
- struct qib_qsfp_cache cache;
- unsigned long t_insert;
- u8 modpresent;
-};
-
-extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
- struct qib_qsfp_cache *cp);
-extern int qib_qsfp_mod_present(struct qib_pportdata *ppd);
-extern void qib_qsfp_init(struct qib_qsfp_data *qd,
- void (*fevent)(struct work_struct *));
-extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
deleted file mode 100644
index 4544d6f88ad7..000000000000
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ /dev/null
@@ -1,2290 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/io.h>
-
-#include "qib.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-static void rc_timeout(unsigned long arg);
-
-static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
- u32 psn, u32 pmtu)
-{
- u32 len;
-
- len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
- ss->sge = wqe->sg_list[0];
- ss->sg_list = wqe->sg_list + 1;
- ss->num_sge = wqe->wr.num_sge;
- ss->total_len = wqe->length;
- qib_skip_sge(ss, len, 0);
- return wqe->length - len;
-}
-
-static void start_timer(struct qib_qp *qp)
-{
- qp->s_flags |= QIB_S_TIMER;
- qp->s_timer.function = rc_timeout;
- /* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies + qp->timeout_jiffies;
- add_timer(&qp->s_timer);
-}
-
-/**
- * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @dev: the device for this QP
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
- struct qib_other_headers *ohdr, u32 pmtu)
-{
- struct qib_ack_entry *e;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
-
- /* Don't send an ACK if we aren't supposed to. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto bail;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
-
- switch (qp->s_ack_state) {
- case OP(RDMA_READ_RESPONSE_LAST):
- case OP(RDMA_READ_RESPONSE_ONLY):
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- /* FALLTHROUGH */
- case OP(ATOMIC_ACKNOWLEDGE):
- /*
- * We can increment the tail pointer now that the last
- * response has been sent instead of only being
- * constructed.
- */
- if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
- qp->s_tail_ack_queue = 0;
- /* FALLTHROUGH */
- case OP(SEND_ONLY):
- case OP(ACKNOWLEDGE):
- /* Check for no next entry in the queue. */
- if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & QIB_S_ACK_PENDING)
- goto normal;
- goto bail;
- }
-
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST)) {
- /*
- * If a RDMA read response is being resent and
- * we haven't seen the duplicate request yet,
- * then stop sending the remaining responses the
- * responder has seen until the requester resends it.
- */
- len = e->rdma_sge.sge_length;
- if (len && !e->rdma_sge.mr) {
- qp->s_tail_ack_queue = qp->r_head_ack_queue;
- goto bail;
- }
- /* Copy SGE state in case we need to resend */
- qp->s_rdma_mr = e->rdma_sge.mr;
- if (qp->s_rdma_mr)
- qib_get_mr(qp->s_rdma_mr);
- qp->s_ack_rdma_sge.sge = e->rdma_sge;
- qp->s_ack_rdma_sge.num_sge = 1;
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- if (len > pmtu) {
- len = pmtu;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
- } else {
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
- e->sent = 1;
- }
- ohdr->u.aeth = qib_compute_aeth(qp);
- hwords++;
- qp->s_ack_rdma_psn = e->psn;
- bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
- } else {
- /* COMPARE_SWAP or FETCH_ADD */
- qp->s_cur_sge = NULL;
- len = 0;
- qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
- ohdr->u.at.aeth = qib_compute_aeth(qp);
- ohdr->u.at.atomic_ack_eth[0] =
- cpu_to_be32(e->atomic_data >> 32);
- ohdr->u.at.atomic_ack_eth[1] =
- cpu_to_be32(e->atomic_data);
- hwords += sizeof(ohdr->u.at) / sizeof(u32);
- bth2 = e->psn & QIB_PSN_MASK;
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
- if (qp->s_rdma_mr)
- qib_get_mr(qp->s_rdma_mr);
- len = qp->s_ack_rdma_sge.sge.sge_length;
- if (len > pmtu)
- len = pmtu;
- else {
- ohdr->u.aeth = qib_compute_aeth(qp);
- hwords++;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
- break;
-
- default:
-normal:
- /*
- * Send a regular ACK.
- * Set the s_ack_state so we wait until after sending
- * the ACK before setting s_ack_state to ACKNOWLEDGE
- * (see above).
- */
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~QIB_S_ACK_PENDING;
- qp->s_cur_sge = NULL;
- if (qp->s_nak_state)
- ohdr->u.aeth =
- cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
- (qp->s_nak_state <<
- QIB_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = qib_compute_aeth(qp);
- hwords++;
- len = 0;
- bth0 = OP(ACKNOWLEDGE) << 24;
- bth2 = qp->s_ack_psn & QIB_PSN_MASK;
- }
- qp->s_rdma_ack_cnt++;
- qp->s_hdrwords = hwords;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0, bth2);
- return 1;
-
-bail:
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
- return 0;
-}
-
-/**
- * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_rc_req(struct qib_qp *qp)
-{
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_other_headers *ohdr;
- struct qib_sge_state *ss;
- struct qib_swqe *wqe;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
- u32 pmtu = qp->pmtu;
- char newreq;
- unsigned long flags;
- int ret = 0;
- int delta;
-
- ohdr = &qp->s_hdr->u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->u.l.oth;
-
- /*
- * The lock is needed to synchronize between the sending tasklet,
- * the receive interrupt handler, and timeout resends.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & QIB_S_RESP_PENDING) &&
- qib_make_rc_ack(dev, qp, ohdr, pmtu))
- goto done;
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
- goto bail;
- }
- wqe = get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
- /* will get called again */
- goto done;
- }
-
- if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
- goto bail;
-
- if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
- if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= QIB_S_WAIT_PSN;
- goto bail;
- }
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- }
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 0;
-
- /* Send a request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
- switch (qp->s_state) {
- default:
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
- goto bail;
- /*
- * Resend an old request or start a new one.
- *
- * We keep track of the current SWQE so that
- * we don't reset the "furthest progress" state
- * if we need to back up.
- */
- newreq = 0;
- if (qp->s_cur == qp->s_tail) {
- /* Check if send work queue is empty. */
- if (qp->s_tail == qp->s_head)
- goto bail;
- /*
- * If a fence is requested, wait for previous
- * RDMA read and atomic operations to finish.
- */
- if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
- qp->s_num_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_FENCE;
- goto bail;
- }
- wqe->psn = qp->s_next_psn;
- newreq = 1;
- }
- /*
- * Note that we have to be careful not to modify the
- * original work request since we may need to resend
- * it.
- */
- len = wqe->length;
- ss = &qp->s_sge;
- bth2 = qp->s_psn & QIB_PSN_MASK;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- /* If no credit, return. */
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
- qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
- goto bail;
- }
- wqe->lpsn = wqe->psn;
- if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_ONLY);
- else {
- qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE_WITH_IMM:
- /* If no credit, return. */
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
- qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
- goto bail;
- }
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->wr.wr.rdma.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / sizeof(u32);
- wqe->lpsn = wqe->psn;
- if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
- qp->s_state =
- OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after RETH */
- ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_READ:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- /*
- * Adjust s_next_psn to count the
- * expected number of responses.
- */
- if (len > pmtu)
- qp->s_next_psn += (len - 1) / pmtu;
- wqe->lpsn = qp->s_next_psn++;
- }
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->wr.wr.rdma.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- wqe->lpsn = wqe->psn;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- qp->s_state = OP(COMPARE_SWAP);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->wr.wr.atomic.swap);
- ohdr->u.atomic_eth.compare_data = cpu_to_be64(
- wqe->wr.wr.atomic.compare_add);
- } else {
- qp->s_state = OP(FETCH_ADD);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->wr.wr.atomic.compare_add);
- ohdr->u.atomic_eth.compare_data = 0;
- }
- ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
- wqe->wr.wr.atomic.remote_addr >> 32);
- ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
- wqe->wr.wr.atomic.remote_addr);
- ohdr->u.atomic_eth.rkey = cpu_to_be32(
- wqe->wr.wr.atomic.rkey);
- hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- qp->s_len = wqe->length;
- if (newreq) {
- qp->s_tail++;
- if (qp->s_tail >= qp->s_size)
- qp->s_tail = 0;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_psn = wqe->lpsn + 1;
- else {
- qp->s_psn++;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- }
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
- * thread to indicate a SEND needs to be restarted from an
- * earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- bth2 = qp->s_psn++ & QIB_PSN_MASK;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_LAST);
- else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_LAST is used by the ACK processing
- * thread to indicate a RDMA write needs to be restarted from
- * an earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_MIDDLE):
- bth2 = qp->s_psn++ & QIB_PSN_MASK;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_LAST);
- else {
- qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
- * thread to indicate a RDMA read needs to be restarted from
- * an earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->wr.wr.rdma.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
- qp->s_psn = wqe->lpsn + 1;
- ss = NULL;
- len = 0;
- qp->s_cur++;
- if (qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_sending_hpsn = bth2;
- delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
- if (delta && delta % QIB_PSN_CREDIT == 0)
- bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & QIB_S_SEND_ONE) {
- qp->s_flags &= ~QIB_S_SEND_ONE;
- qp->s_flags |= QIB_S_WAIT_ACK;
- bth2 |= IB_BTH_REQ_ACK;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = ss;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
-done:
- ret = 1;
- goto unlock;
-
-bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * qib_send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from qib_rc_rcv() and qib_kreceive().
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-void qib_send_rc_ack(struct qib_qp *qp)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 pbc;
- u16 lrh0;
- u32 bth0;
- u32 hwords;
- u32 pbufn;
- u32 __iomem *piobuf;
- struct qib_ib_header hdr;
- struct qib_other_headers *ohdr;
- u32 control;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto unlock;
-
- /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
- goto queue_ack;
-
- /* Construct the header with s_lock held so APM doesn't change it. */
- ohdr = &hdr.u.oth;
- lrh0 = QIB_LRH_BTH;
- /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
- hwords = 6;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- hwords += qib_make_grh(ibp, &hdr.u.l.grh,
- &qp->remote_ah_attr.grh, hwords, 0);
- ohdr = &hdr.u.l.oth;
- lrh0 = QIB_LRH_GRH;
- }
- /* read pkey_index w/o lock (its atomic) */
- bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- if (qp->r_nak_state)
- ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
- (qp->r_nak_state <<
- QIB_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = qib_compute_aeth(qp);
- lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
- qp->remote_ah_attr.sl << 4;
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Don't try to send ACKs if the link isn't ACTIVE */
- if (!(ppd->lflags & QIBL_LINKACTIVE))
- goto done;
-
- control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
- qp->s_srate, lrh0 >> 12);
- /* length is + 1 for the control dword */
- pbc = ((u64) control << 32) | (hwords + 1);
-
- piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
- if (!piobuf) {
- /*
- * We are out of PIO buffers at the moment.
- * Pass responsibility for sending the ACK to the
- * send tasklet so that when a PIO buffer becomes
- * available, the ACK is sent ahead of other outgoing
- * packets.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- goto queue_ack;
- }
-
- /*
- * Write the pbc.
- * We have to flush after the PBC for correctness
- * on some cpus or WC buffer can be written out of order.
- */
- writeq(pbc, piobuf);
-
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- u32 *hdrp = (u32 *) &hdr;
-
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
- qib_flush_wc();
- __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
- } else
- qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
-
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
-
- qib_flush_wc();
- qib_sendbuf_done(dd, pbufn);
-
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- goto done;
-
-queue_ack:
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- ibp->n_rc_qacks++;
- qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
- qp->s_nak_state = qp->r_nak_state;
- qp->s_ack_psn = qp->r_ack_psn;
-
- /* Schedule the send tasklet. */
- qib_schedule_send(qp);
- }
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return;
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from qib_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct qib_qp *qp, u32 psn)
-{
- u32 n = qp->s_acked;
- struct qib_swqe *wqe = get_swqe_ptr(qp, n);
- u32 opcode;
-
- qp->s_cur = n;
-
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (qib_cmp24(psn, wqe->psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
-
- /* Find the work request opcode corresponding to the given PSN. */
- opcode = wqe->wr.opcode;
- for (;;) {
- int diff;
-
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- wqe = get_swqe_ptr(qp, n);
- diff = qib_cmp24(psn, wqe->psn);
- if (diff < 0)
- break;
- qp->s_cur = n;
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (diff == 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
- opcode = wqe->wr.opcode;
- }
-
- /*
- * Set the state to restart in the middle of a request.
- * Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See qib_make_rc_req().
- */
- switch (opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
- break;
-
- case IB_WR_RDMA_READ:
- qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- break;
-
- default:
- /*
- * This case shouldn't happen since its only
- * one PSN per req.
- */
- qp->s_state = OP(SEND_LAST);
- }
-done:
- qp->s_psn = psn;
- /*
- * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
- * asynchronously before the send tasklet can get scheduled.
- * Doing it in qib_make_rc_req() is too late.
- */
- if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
- (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= QIB_S_WAIT_PSN;
-}
-
-/*
- * Back up requester to resend the last un-ACKed request.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- */
-static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
-{
- struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
- struct qib_ibport *ibp;
-
- if (qp->s_retry == 0) {
- if (qp->s_mig_state == IB_MIG_ARMED) {
- qib_migrate_qp(qp);
- qp->s_retry = qp->s_retry_cnt;
- } else if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- return;
- } else /* XXX need to handle delayed completion */
- return;
- } else
- qp->s_retry--;
-
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- ibp->n_rc_resends++;
- else
- ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
-
- qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
- QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
- QIB_S_WAIT_ACK);
- if (wait)
- qp->s_flags |= QIB_S_SEND_ONE;
- reset_psn(qp, psn);
-}
-
-/*
- * This is called from s_timer for missing responses.
- */
-static void rc_timeout(unsigned long arg)
-{
- struct qib_qp *qp = (struct qib_qp *)arg;
- struct qib_ibport *ibp;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->r_lock, flags);
- spin_lock(&qp->s_lock);
- if (qp->s_flags & QIB_S_TIMER) {
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ibp->n_rc_timeouts++;
- qp->s_flags &= ~QIB_S_TIMER;
- del_timer(&qp->s_timer);
- qib_restart_rc(qp, qp->s_last_psn + 1, 1);
- qib_schedule_send(qp);
- }
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_lock, flags);
-}
-
-/*
- * This is called from s_timer for RNR timeouts.
- */
-void qib_rc_rnr_retry(unsigned long arg)
-{
- struct qib_qp *qp = (struct qib_qp *)arg;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_RNR) {
- qp->s_flags &= ~QIB_S_WAIT_RNR;
- del_timer(&qp->s_timer);
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
-
-/*
- * Set qp->s_sending_psn to the next PSN after the given one.
- * This would be psn+1 except when RDMA reads are present.
- */
-static void reset_sending_psn(struct qib_qp *qp, u32 psn)
-{
- struct qib_swqe *wqe;
- u32 n = qp->s_last;
-
- /* Find the work request corresponding to the given PSN. */
- for (;;) {
- wqe = get_swqe_ptr(qp, n);
- if (qib_cmp24(psn, wqe->lpsn) <= 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_sending_psn = wqe->lpsn + 1;
- else
- qp->s_sending_psn = psn + 1;
- break;
- }
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- }
-}
-
-/*
- * This should be called with the QP s_lock held and interrupts disabled.
- */
-void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
-{
- struct qib_other_headers *ohdr;
- struct qib_swqe *wqe;
- struct ib_wc wc;
- unsigned i;
- u32 opcode;
- u32 psn;
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
- return;
-
- /* Find out where the BTH is */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
-
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- WARN_ON(!qp->s_rdma_ack_cnt);
- qp->s_rdma_ack_cnt--;
- return;
- }
-
- psn = be32_to_cpu(ohdr->bth[2]);
- reset_sending_psn(qp, psn);
-
- /*
- * Start timer after a packet requesting an ACK has been sent and
- * there are still requests that haven't been acked.
- */
- if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
- (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- start_timer(qp);
-
- while (qp->s_last != qp->s_acked) {
- wqe = get_swqe_ptr(qp, qp->s_last);
- if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
- break;
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
-
- qib_put_mr(sge->mr);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
- }
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- }
- /*
- * If we were waiting for sends to complete before resending,
- * and they are now complete, restart sending.
- */
- if (qp->s_flags & QIB_S_WAIT_PSN &&
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~QIB_S_WAIT_PSN;
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- qib_schedule_send(qp);
- }
-}
-
-static inline void update_last_psn(struct qib_qp *qp, u32 psn)
-{
- qp->s_last_psn = psn;
-}
-
-/*
- * Generate a SWQE completion.
- * This is similar to qib_send_complete but has to check to be sure
- * that the SGEs are not being referenced if the SWQE is being resent.
- */
-static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
- struct qib_swqe *wqe,
- struct qib_ibport *ibp)
-{
- struct ib_wc wc;
- unsigned i;
-
- /*
- * Don't decrement refcount and don't generate a
- * completion if the SWQE is being resent until the send
- * is finished.
- */
- if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
-
- qib_put_mr(sge->mr);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
- }
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- } else
- ibp->n_rc_delayed_comp++;
-
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, wqe->lpsn);
-
- /*
- * If we are completing a request which is in the process of
- * being resent, we can stop resending it since we know the
- * responder has already seen it.
- */
- if (qp->s_acked == qp->s_cur) {
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- qp->s_acked = qp->s_cur;
- wqe = get_swqe_ptr(qp, qp->s_cur);
- if (qp->s_acked != qp->s_tail) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = wqe->psn;
- }
- } else {
- if (++qp->s_acked >= qp->s_size)
- qp->s_acked = 0;
- if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
- qp->s_draining = 0;
- wqe = get_swqe_ptr(qp, qp->s_acked);
- }
- return wqe;
-}
-
-/**
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
- u64 val, struct qib_ctxtdata *rcd)
-{
- struct qib_ibport *ibp;
- enum ib_wc_status status;
- struct qib_swqe *wqe;
- int ret = 0;
- u32 ack_psn;
- int diff;
-
- /* Remove QP from retry timer */
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
- /*
- * Note that NAKs implicitly ACK outstanding SEND and RDMA write
- * requests and implicitly NAK RDMA read and atomic requests issued
- * before the NAK'ed request. The MSN won't include the NAK'ed
- * request but will include an ACK'ed request(s).
- */
- ack_psn = psn;
- if (aeth >> 29)
- ack_psn--;
- wqe = get_swqe_ptr(qp, qp->s_acked);
- ibp = to_iport(qp->ibqp.device, qp->port_num);
-
- /*
- * The MSN might be for a later WQE than the PSN indicates so
- * only complete WQEs that the PSN finishes.
- */
- while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
- /*
- * RDMA_READ_RESPONSE_ONLY is a special case since
- * we want to generate completion events for everything
- * before the RDMA read, copy the data, then generate
- * the completion for the read.
- */
- if (wqe->wr.opcode == IB_WR_RDMA_READ &&
- opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
- diff == 0) {
- ret = 1;
- goto bail;
- }
- /*
- * If this request is a RDMA read or atomic, and the ACK is
- * for a later operation, this ACK NAKs the RDMA read or
- * atomic. In other words, only a RDMA_READ_LAST or ONLY
- * can ACK a RDMA read and likewise for atomic ops. Note
- * that the NAK case can only happen if relaxed ordering is
- * used and requests are sent after an RDMA read or atomic
- * is sent but before the response is received.
- */
- if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
- (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
- ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
- (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
- /* Retry this request. */
- if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
- qp->r_flags |= QIB_R_RDMAR_SEQ;
- qib_restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_SEND;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait,
- &rcd->qp_wait_list);
- }
- }
- /*
- * No need to process the ACK/NAK since we are
- * restarting an earlier request.
- */
- goto bail;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
- u64 *vaddr = wqe->sg_list[0].vaddr;
- *vaddr = val;
- }
- if (qp->s_num_rd_atomic &&
- (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
- qp->s_num_rd_atomic--;
- /* Restart sending task if fence is complete */
- if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
- !qp->s_num_rd_atomic) {
- qp->s_flags &= ~(QIB_S_WAIT_FENCE |
- QIB_S_WAIT_ACK);
- qib_schedule_send(qp);
- } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
- qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
- QIB_S_WAIT_ACK);
- qib_schedule_send(qp);
- }
- }
- wqe = do_rc_completion(qp, wqe, ibp);
- if (qp->s_acked == qp->s_tail)
- break;
- }
-
- switch (aeth >> 29) {
- case 0: /* ACK */
- ibp->n_rc_acks++;
- if (qp->s_acked != qp->s_tail) {
- /*
- * We are expecting more ACKs so
- * reset the retransmit timer.
- */
- start_timer(qp);
- /*
- * We can stop resending the earlier packets and
- * continue with the next packet the receiver wants.
- */
- if (qib_cmp24(qp->s_psn, psn) <= 0)
- reset_psn(qp, psn + 1);
- } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
- }
- if (qp->s_flags & QIB_S_WAIT_ACK) {
- qp->s_flags &= ~QIB_S_WAIT_ACK;
- qib_schedule_send(qp);
- }
- qib_get_credit(qp, aeth);
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, psn);
- ret = 1;
- goto bail;
-
- case 1: /* RNR NAK */
- ibp->n_rnr_naks++;
- if (qp->s_acked == qp->s_tail)
- goto bail;
- if (qp->s_flags & QIB_S_WAIT_RNR)
- goto bail;
- if (qp->s_rnr_retry == 0) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
- }
- if (qp->s_rnr_retry_cnt < 7)
- qp->s_rnr_retry--;
-
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
-
- ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
-
- reset_psn(qp, psn);
-
- qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
- qp->s_flags |= QIB_S_WAIT_RNR;
- qp->s_timer.function = qib_rc_rnr_retry;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(
- ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
- QIB_AETH_CREDIT_MASK]);
- add_timer(&qp->s_timer);
- goto bail;
-
- case 3: /* NAK */
- if (qp->s_acked == qp->s_tail)
- goto bail;
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
- switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
- QIB_AETH_CREDIT_MASK) {
- case 0: /* PSN sequence error */
- ibp->n_seq_naks++;
- /*
- * Back up to the responder's expected PSN.
- * Note that we might get a NAK in the middle of an
- * RDMA READ response which terminates the RDMA
- * READ.
- */
- qib_restart_rc(qp, psn, 0);
- qib_schedule_send(qp);
- break;
-
- case 1: /* Invalid Request */
- status = IB_WC_REM_INV_REQ_ERR;
- ibp->n_other_naks++;
- goto class_b;
-
- case 2: /* Remote Access Error */
- status = IB_WC_REM_ACCESS_ERR;
- ibp->n_other_naks++;
- goto class_b;
-
- case 3: /* Remote Operation Error */
- status = IB_WC_REM_OP_ERR;
- ibp->n_other_naks++;
-class_b:
- if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
- break;
-
- default:
- /* Ignore other reserved NAK error codes */
- goto reserved;
- }
- qp->s_retry = qp->s_retry_cnt;
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- goto bail;
-
- default: /* 2: reserved */
-reserved:
- /* Ignore reserved NAK codes. */
- goto bail;
- }
-
-bail:
- return ret;
-}
-
-/*
- * We have seen an out of sequence RDMA read middle or last packet.
- * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
- */
-static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
- struct qib_ctxtdata *rcd)
-{
- struct qib_swqe *wqe;
-
- /* Remove QP from retry timer */
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
- wqe = get_swqe_ptr(qp, qp->s_acked);
-
- while (qib_cmp24(psn, wqe->lpsn) > 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- break;
- wqe = do_rc_completion(qp, wqe, ibp);
- }
-
- ibp->n_rdma_seq++;
- qp->r_flags |= QIB_R_RDMAR_SEQ;
- qib_restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_SEND;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
-}
-
-/**
- * qib_rc_rcv_resp - process an incoming RC response packet
- * @ibp: the port this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- *
- * This is called from qib_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static void qib_rc_rcv_resp(struct qib_ibport *ibp,
- struct qib_other_headers *ohdr,
- void *data, u32 tlen,
- struct qib_qp *qp,
- u32 opcode,
- u32 psn, u32 hdrsize, u32 pmtu,
- struct qib_ctxtdata *rcd)
-{
- struct qib_swqe *wqe;
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- enum ib_wc_status status;
- unsigned long flags;
- int diff;
- u32 pad;
- u32 aeth;
- u64 val;
-
- if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
- /*
- * If ACK'd PSN on SDMA busy list try to make progress to
- * reclaim SDMA credits.
- */
- if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
- (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
-
- /*
- * If send tasklet not running attempt to progress
- * SDMA queue.
- */
- if (!(qp->s_flags & QIB_S_BUSY)) {
- /* Acquire SDMA Lock */
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /* Invoke sdma make progress */
- qib_sdma_make_progress(ppd);
- /* Release SDMA Lock */
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
- }
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto ack_done;
-
- /* Ignore invalid responses. */
- if (qib_cmp24(psn, qp->s_next_psn) >= 0)
- goto ack_done;
-
- /* Ignore duplicate responses. */
- diff = qib_cmp24(psn, qp->s_last_psn);
- if (unlikely(diff <= 0)) {
- /* Update credits for "ghost" ACKs */
- if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
- aeth = be32_to_cpu(ohdr->u.aeth);
- if ((aeth >> 29) == 0)
- qib_get_credit(qp, aeth);
- }
- goto ack_done;
- }
-
- /*
- * Skip everything other than the PSN we expect, if we are waiting
- * for a reply to a restarted RDMA read or atomic op.
- */
- if (qp->r_flags & QIB_R_RDMAR_SEQ) {
- if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
- goto ack_done;
- qp->r_flags &= ~QIB_R_RDMAR_SEQ;
- }
-
- if (unlikely(qp->s_acked == qp->s_tail))
- goto ack_done;
- wqe = get_swqe_ptr(qp, qp->s_acked);
- status = IB_WC_SUCCESS;
-
- switch (opcode) {
- case OP(ACKNOWLEDGE):
- case OP(ATOMIC_ACKNOWLEDGE):
- case OP(RDMA_READ_RESPONSE_FIRST):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
- __be32 *p = ohdr->u.at.atomic_ack_eth;
-
- val = ((u64) be32_to_cpu(p[0]) << 32) |
- be32_to_cpu(p[1]);
- } else
- val = 0;
- if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
- opcode != OP(RDMA_READ_RESPONSE_FIRST))
- goto ack_done;
- hdrsize += 4;
- wqe = get_swqe_ptr(qp, qp->s_acked);
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_middle;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /* no AETH, no ACK */
- if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
-read_middle:
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto ack_len_err;
- if (unlikely(pmtu >= qp->s_rdma_read_len))
- goto ack_len_err;
-
- /*
- * We got a response so update the timeout.
- * 4.096 usec. * (1 << qp->timeout)
- */
- qp->s_flags |= QIB_S_TIMER;
- mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
- if (qp->s_flags & QIB_S_WAIT_ACK) {
- qp->s_flags &= ~QIB_S_WAIT_ACK;
- qib_schedule_send(qp);
- }
-
- if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
- qp->s_retry = qp->s_retry_cnt;
-
- /*
- * Update the RDMA receive state but do the copy w/o
- * holding the locks and blocking interrupts.
- */
- qp->s_rdma_read_len -= pmtu;
- update_last_psn(qp, psn);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
- goto bail;
-
- case OP(RDMA_READ_RESPONSE_ONLY):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
- goto ack_done;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 0 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen < (hdrsize + pad + 8)))
- goto ack_len_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- wqe = get_swqe_ptr(qp, qp->s_acked);
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_last;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /* ACKs READ req. */
- if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 1 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen <= (hdrsize + pad + 8)))
- goto ack_len_err;
-read_last:
- tlen -= hdrsize + pad + 8;
- if (unlikely(tlen != qp->s_rdma_read_len))
- goto ack_len_err;
- aeth = be32_to_cpu(ohdr->u.aeth);
- qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
- WARN_ON(qp->s_rdma_read_sge.num_sge);
- (void) do_rc_ack(qp, aeth, psn,
- OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
- goto ack_done;
- }
-
-ack_op_err:
- status = IB_WC_LOC_QP_OP_ERR;
- goto ack_err;
-
-ack_seq_err:
- rdma_seq_err(qp, ibp, psn, rcd);
- goto ack_done;
-
-ack_len_err:
- status = IB_WC_LOC_LEN_ERR;
-ack_err:
- if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
-ack_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
- return;
-}
-
-/**
- * qib_rc_rcv_error - process an incoming duplicate or error RC packet
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- *
- * This is called from qib_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
- void *data,
- struct qib_qp *qp,
- u32 opcode,
- u32 psn,
- int diff,
- struct qib_ctxtdata *rcd)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_ack_entry *e;
- unsigned long flags;
- u8 i, prev;
- int old_req;
-
- if (diff > 0) {
- /*
- * Packet sequence error.
- * A NAK will ACK earlier sends and RDMA writes.
- * Don't queue the NAK if we already sent one.
- */
- if (!qp->r_nak_state) {
- ibp->n_rc_seqnak++;
- qp->r_nak_state = IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence NAK until all packets
- * in the receive queue have been processed.
- * Otherwise, we end up propagating congestion.
- */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- }
- goto done;
- }
-
- /*
- * Handle a duplicate request. Don't re-execute SEND, RDMA
- * write or atomic op. Don't NAK errors, just silently drop
- * the duplicate request. Note that r_sge, r_len, and
- * r_rcv_len may be in use so don't modify them.
- *
- * We are supposed to ACK the earliest duplicate PSN but we
- * can coalesce an outstanding duplicate ACK. We have to
- * send the earliest so that RDMA reads can be restarted at
- * the requester's expected PSN.
- *
- * First, find where this duplicate PSN falls within the
- * ACKs previously sent.
- * old_req is true if there is an older response that is scheduled
- * to be sent before sending this one.
- */
- e = NULL;
- old_req = 1;
- ibp->n_rc_dupreq++;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- for (i = qp->r_head_ack_queue; ; i = prev) {
- if (i == qp->s_tail_ack_queue)
- old_req = 0;
- if (i)
- prev = i - 1;
- else
- prev = QIB_MAX_RDMA_ATOMIC;
- if (prev == qp->r_head_ack_queue) {
- e = NULL;
- break;
- }
- e = &qp->s_ack_queue[prev];
- if (!e->opcode) {
- e = NULL;
- break;
- }
- if (qib_cmp24(psn, e->psn) >= 0) {
- if (prev == qp->s_tail_ack_queue &&
- qib_cmp24(psn, e->lpsn) <= 0)
- old_req = 0;
- break;
- }
- }
- switch (opcode) {
- case OP(RDMA_READ_REQUEST): {
- struct ib_reth *reth;
- u32 offset;
- u32 len;
-
- /*
- * If we didn't find the RDMA read request in the ack queue,
- * we can ignore this request.
- */
- if (!e || e->opcode != OP(RDMA_READ_REQUEST))
- goto unlock_done;
- /* RETH comes after BTH */
- reth = &ohdr->u.rc.reth;
- /*
- * Address range must be a subset of the original
- * request and start on pmtu boundaries.
- * We reuse the old ack_queue slot since the requester
- * should not back up and request an earlier PSN for the
- * same request.
- */
- offset = ((psn - e->psn) & QIB_PSN_MASK) *
- qp->pmtu;
- len = be32_to_cpu(reth->length);
- if (unlikely(offset + len != e->rdma_sge.sge_length))
- goto unlock_done;
- if (e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- if (len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
- IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto unlock_done;
- } else {
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->psn = psn;
- if (old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- /*
- * If we didn't find the atomic request in the ack queue
- * or the send tasklet is already backed up to send an
- * earlier entry, we can ignore this request.
- */
- if (!e || e->opcode != (u8) opcode || old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- default:
- /*
- * Ignore this operation if it doesn't request an ACK
- * or an earlier RDMA read or atomic is going to be resent.
- */
- if (!(psn & IB_BTH_REQ_ACK) || old_req)
- goto unlock_done;
- /*
- * Resend the most recent ACK if this request is
- * after all the previous RDMA reads and atomics.
- */
- if (i == qp->r_head_ack_queue) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->r_psn - 1;
- goto send_ack;
- }
- /*
- * Try to send a simple ACK to work around a Mellanox bug
- * which doesn't accept a RDMA read response or atomic
- * response as an ACK for earlier SENDs or RDMA writes.
- */
- if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
- goto send_ack;
- }
- /*
- * Resend the RDMA read or atomic op which
- * ACKs this duplicate request.
- */
- qp->s_tail_ack_queue = i;
- break;
- }
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= QIB_S_RESP_PENDING;
- qp->r_nak_state = 0;
- qib_schedule_send(qp);
-
-unlock_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return 1;
-
-send_ack:
- return 0;
-}
-
-void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
-{
- unsigned long flags;
- int lastwqe;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- lastwqe = qib_error_qp(qp, err);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
-}
-
-static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
-{
- unsigned next;
-
- next = n + 1;
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- qp->s_tail_ack_queue = next;
- qp->s_ack_state = OP(ACKNOWLEDGE);
-}
-
-/**
- * qib_rc_rcv - process an incoming RC packet
- * @rcd: the context pointer
- * @hdr: the header of this packet
- * @has_grh: true if the header has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from qib_qp_rcv() to process an incoming RC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = &rcd->ppd->ibport_data;
- struct qib_other_headers *ohdr;
- u32 opcode;
- u32 hdrsize;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = qp->pmtu;
- int diff;
- struct ib_reth *reth;
- unsigned long flags;
- int ret;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- }
-
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- return;
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
-
- /*
- * Process responses (ACKs) before anything else. Note that the
- * packet sequence number will be for something in the send work
- * queue rather than the expected receive packet sequence number.
- * In other words, this QP is the requester.
- */
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
- hdrsize, pmtu, rcd);
- return;
- }
-
- /* Compute 24 bits worth of difference. */
- diff = qib_cmp24(psn, qp->r_psn);
- if (unlikely(diff)) {
- if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
- return;
- goto send_ack;
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- default:
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- goto nack_inv;
- /*
- * Note that it is up to the requester to not send a new
- * RDMA read or atomic operation before receiving an ACK
- * for the previous operation.
- */
- break;
- }
-
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
- qp->r_flags |= QIB_R_COMM_EST;
- if (qp->ibqp.event_handler) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_COMM_EST;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- }
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- ret = qib_get_rwqe(qp, 0);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- case OP(RDMA_WRITE_MIDDLE):
-send_middle:
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto nack_inv;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- /* consume RWQE */
- ret = qib_get_rwqe(qp, 1);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- goto send_last_imm;
-
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = qib_get_rwqe(qp, 0);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto no_immediate_data;
- /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
- case OP(SEND_LAST_WITH_IMMEDIATE):
-send_last_imm:
- wc.ex.imm_data = ohdr->u.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
- case OP(SEND_LAST):
- case OP(RDMA_WRITE_LAST):
-no_immediate_data:
- wc.wc_flags = 0;
- wc.ex.imm_data = 0;
-send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto nack_inv;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
- qp->r_msn++;
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
- break;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- /* zero fields that are N/A */
- wc.vendor_err = 0;
- wc.pkey_index = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- /* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(IB_BTH_SOLICITED)) != 0);
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto nack_inv;
- /* consume RWQE */
- reth = &ohdr->u.rc.reth;
- hdrsize += sizeof(*reth);
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- qp->r_sge.sg_list = NULL;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
- rkey, IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto nack_acc;
- qp->r_sge.num_sge = 1;
- } else {
- qp->r_sge.num_sge = 0;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_FIRST))
- goto send_middle;
- else if (opcode == OP(RDMA_WRITE_ONLY))
- goto no_immediate_data;
- ret = qib_get_rwqe(qp, 1);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- wc.ex.imm_data = ohdr->u.rc.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
-
- case OP(RDMA_READ_REQUEST): {
- struct qib_ack_entry *e;
- u32 len;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- qib_update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- reth = &ohdr->u.rc.reth;
- len = be32_to_cpu(reth->length);
- if (len) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
- rkey, IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto nack_acc_unlck;
- /*
- * Update the next expected PSN. We add 1 later
- * below, so only add the remainder here.
- */
- if (len > pmtu)
- qp->r_psn += (len - 1) / pmtu;
- } else {
- e->rdma_sge.mr = NULL;
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = qp->r_psn;
- /*
- * We need to increment the MSN here instead of when we
- * finish sending the result since a duplicate request would
- * increment it more than once.
- */
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= QIB_S_RESP_PENDING;
- qib_schedule_send(qp);
-
- goto sunlock;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- struct ib_atomic_eth *ateth;
- struct qib_ack_entry *e;
- u64 vaddr;
- atomic64_t *maddr;
- u64 sdata;
- u32 rkey;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- qib_update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- ateth = &ohdr->u.atomic_eth;
- vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
- be32_to_cpu(ateth->vaddr[1]);
- if (unlikely(vaddr & (sizeof(u64) - 1)))
- goto nack_inv_unlck;
- rkey = be32_to_cpu(ateth->rkey);
- /* Check rkey & NAK */
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- vaddr, rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc_unlck;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = be64_to_cpu(ateth->swap_data);
- e->atomic_data = (opcode == OP(FETCH_ADD)) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- be64_to_cpu(ateth->compare_data),
- sdata);
- qib_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = psn;
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= QIB_S_RESP_PENDING;
- qib_schedule_send(qp);
-
- goto sunlock;
- }
-
- default:
- /* NAK unknown opcodes. */
- goto nack_inv;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_ack_psn = psn;
- qp->r_nak_state = 0;
- /* Send an ACK if requested or required. */
- if (psn & (1 << 31))
- goto send_ack;
- return;
-
-rnr_nak:
- qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
- qp->r_ack_psn = qp->r_psn;
- /* Queue RNR NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_op_err:
- qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_inv_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
- qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_INVALID_REQUEST;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_acc_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
- qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
- qp->r_ack_psn = qp->r_psn;
-send_ack:
- qib_send_rc_ack(qp);
- return;
-
-sunlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
deleted file mode 100644
index 22e356ca8058..000000000000
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ /dev/null
@@ -1,820 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-/*
- * Convert the AETH RNR timeout code into the number of microseconds.
- */
-const u32 ib_qib_rnr_table[32] = {
- 655360, /* 00: 655.36 */
- 10, /* 01: .01 */
- 20, /* 02 .02 */
- 30, /* 03: .03 */
- 40, /* 04: .04 */
- 60, /* 05: .06 */
- 80, /* 06: .08 */
- 120, /* 07: .12 */
- 160, /* 08: .16 */
- 240, /* 09: .24 */
- 320, /* 0A: .32 */
- 480, /* 0B: .48 */
- 640, /* 0C: .64 */
- 960, /* 0D: .96 */
- 1280, /* 0E: 1.28 */
- 1920, /* 0F: 1.92 */
- 2560, /* 10: 2.56 */
- 3840, /* 11: 3.84 */
- 5120, /* 12: 5.12 */
- 7680, /* 13: 7.68 */
- 10240, /* 14: 10.24 */
- 15360, /* 15: 15.36 */
- 20480, /* 16: 20.48 */
- 30720, /* 17: 30.72 */
- 40960, /* 18: 40.96 */
- 61440, /* 19: 61.44 */
- 81920, /* 1A: 81.92 */
- 122880, /* 1B: 122.88 */
- 163840, /* 1C: 163.84 */
- 245760, /* 1D: 245.76 */
- 327680, /* 1E: 327.68 */
- 491520 /* 1F: 491.52 */
-};
-
-/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct qib_lkey_table *rkt;
- struct qib_pd *pd;
- struct qib_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- qib_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * qib_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct qib_rq *rq;
- struct qib_rwq *wq;
- struct qib_srq *srq;
- struct qib_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = to_isrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !qib_init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
-/*
- * Switch to alternate path.
- * The QP s_lock should be held and interrupts disabled.
- */
-void qib_migrate_qp(struct qib_qp *qp)
-{
- struct ib_event ev;
-
- qp->s_mig_state = IB_MIG_MIGRATED;
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
- qp->s_pkey_index = qp->s_alt_pkey_index;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-}
-
-static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
-{
- if (!index) {
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- return ppd->guid;
- }
- return ibp->guids[index - 1];
-}
-
-static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
-{
- return (gid->global.interface_id == id &&
- (gid->global.subnet_prefix == gid_prefix ||
- gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
-}
-
-/*
- *
- * This should be called with the QP r_lock held.
- *
- * The s_lock will be acquired around the qib_migrate_qp() call.
- */
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, struct qib_qp *qp, u32 bth0)
-{
- __be64 guid;
- unsigned long flags;
-
- if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
- if (!has_grh) {
- if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
- goto err;
- } else {
- if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
- goto err;
- guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
- goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
- qp->alt_ah_attr.grh.dgid.global.interface_id))
- goto err;
- }
- if (!qib_pkey_ok((u16)bth0,
- qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
- ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
- goto err;
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_migrate_qp(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else {
- if (!has_grh) {
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- goto err;
- } else {
- if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
- goto err;
- guid = get_sguid(ibp,
- qp->remote_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
- goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
- qp->remote_ah_attr.grh.dgid.global.interface_id))
- goto err;
- }
- if (!qib_pkey_ok((u16)bth0,
- qib_get_pkey(ibp, qp->s_pkey_index))) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 */
- if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
- ppd_from_ibp(ibp)->port != qp->port_num)
- goto err;
- if (qp->s_mig_state == IB_MIG_REARM &&
- !(bth0 & IB_BTH_MIG_REQ))
- qp->s_mig_state = IB_MIG_ARMED;
- }
-
- return 0;
-
-err:
- return 1;
-}
-
-/**
- * qib_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from qib_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void qib_ruc_loopback(struct qib_qp *sqp)
-{
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_qp *qp;
- struct qib_swqe *wqe;
- struct qib_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- int release;
- int ret;
-
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
- !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= QIB_S_BUSY;
-
-again:
- if (sqp->s_last == sqp->s_head)
- goto clr_busy;
- wqe = get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work reqeust. */
- if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = 1;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
- ret = qib_get_rwqe(qp, 0);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = qib_get_rwqe(qp, 1);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- if (wqe->length == 0)
- break;
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->wr.wr.rdma.remote_addr,
- wqe->wr.wr.rdma.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->wr.wr.rdma.remote_addr,
- wqe->wr.wr.rdma.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = 0;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->wr.wr.atomic.remote_addr,
- wqe->wr.wr.atomic.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = wqe->wr.wr.atomic.compare_add;
- *(u64 *) sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- sdata, wqe->wr.wr.atomic.swap);
- qib_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- qib_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- qib_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- qib_send_complete(sqp, wqe, send_status);
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
- goto clr_busy;
- sqp->s_flags |= QIB_S_WAIT_RNR;
- sqp->s_timer.function = qib_rc_rnr_retry;
- sqp->s_timer.expires = jiffies +
- usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
- add_timer(&sqp->s_timer);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- qib_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- qib_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~QIB_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- if (qp && atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-/**
- * qib_make_grh - construct a GRH header
- * @ibp: a pointer to the IB port
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
- hdr->version_tclass_flow =
- cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
- (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
- (grh->flow_label << IB_GRH_FLOW_SHIFT));
- hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
- /* next_hdr is defined by C8-7 in ch. 8.4.1 */
- hdr->next_hdr = IB_GRH_NEXT_HDR;
- hdr->hop_limit = grh->hop_limit;
- /* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
- hdr->sgid.global.interface_id = grh->sgid_index ?
- ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
- hdr->dgid = grh->dgid;
-
- /* GRH header size in 32-bit words. */
- return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
- u32 bth0, u32 bth2)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u16 lrh0;
- u32 nwords;
- u32 extra_bytes;
-
- /* Construct the header. */
- extra_bytes = -qp->s_cur_size & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
- lrh0 = QIB_LRH_BTH;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
- &qp->remote_ah_attr.grh,
- qp->s_hdrwords, nwords);
- lrh0 = QIB_LRH_GRH;
- }
- lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
- qp->remote_ah_attr.sl << 4;
- qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
- qp->remote_ah_attr.src_path_bits);
- bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
- bth0 |= extra_bytes << 20;
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(bth2);
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
-}
-
-/**
- * qib_do_send - perform a send on a QP
- * @work: contains a pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void qib_do_send(struct work_struct *work)
-{
- struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int (*make_req)(struct qib_qp *qp);
- unsigned long flags;
-
- if ((qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC) &&
- (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
- qib_ruc_loopback(qp);
- return;
- }
-
- if (qp->ibqp.qp_type == IB_QPT_RC)
- make_req = qib_make_rc_req;
- else if (qp->ibqp.qp_type == IB_QPT_UC)
- make_req = qib_make_uc_req;
- else
- make_req = qib_make_ud_req;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if (!qib_send_ok(qp)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return;
- }
-
- qp->s_flags |= QIB_S_BUSY;
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- do {
- /* Check for a constructed packet to be sent. */
- if (qp->s_hdrwords != 0) {
- /*
- * If the packet cannot be sent now, return and
- * the send tasklet will be woken up later.
- */
- if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
- qp->s_cur_sge, qp->s_cur_size))
- break;
- /* Record that s_hdr is empty. */
- qp->s_hdrwords = 0;
- }
- } while (make_req(qp));
-}
-
-/*
- * This should be called with s_lock held.
- */
-void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
- unsigned i;
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
- return;
-
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
-
- qib_put_mr(sge->mr);
- }
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
-
- /* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.qp = &qp->ibqp;
- if (status == IB_WC_SUCCESS)
- wc.byte_len = wqe->length;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
-
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
deleted file mode 100644
index c72775f27212..000000000000
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ /dev/null
@@ -1,1454 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the SerDes
- * on the QLogic_IB 7220 chip.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-
-#include "qib.h"
-#include "qib_7220.h"
-
-#define SD7220_FW_NAME "qlogic/sd7220.fw"
-MODULE_FIRMWARE(SD7220_FW_NAME);
-
-/*
- * Same as in qib_iba7220.c, but just the registers needed here.
- * Could move whole set to qib_7220.h, but decided better to keep
- * local.
- */
-#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-/* these are used only here, not in qib_iba7220.c */
-#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
-#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
-#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
-#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
-#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
-
-/*
- * The IBSerDesMappTable is a memory that holds values to be stored in
- * various SerDes registers by IBC.
- */
-#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
-
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB.
- */
-#define PCIE_SERDES0 0
-#define PCIE_SERDES1 1
-
-/*
- * The EPB requires addressing in a particular form. EPB_LOC() is intended
- * to make #definitions a little more readable.
- */
-#define EPB_ADDR_SHF 8
-#define EPB_LOC(chn, elt, reg) \
- (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
- EPB_ADDR_SHF)
-#define EPB_IB_QUAD0_CS_SHF (25)
-#define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
-#define EPB_IB_UC_CS_SHF (26)
-#define EPB_PCIE_UC_CS_SHF (27)
-#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
-
-/* Forward declarations. */
-static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
- u32 data, u32 mask);
-static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
- int mask);
-static int qib_sd_trimdone_poll(struct qib_devdata *dd);
-static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
-static int qib_sd_setvals(struct qib_devdata *dd);
-static int qib_sd_early(struct qib_devdata *dd);
-static int qib_sd_dactrim(struct qib_devdata *dd);
-static int qib_internal_presets(struct qib_devdata *dd);
-/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
-static int qib_sd_trimself(struct qib_devdata *dd, int val);
-static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
-static int qib_sd7220_ib_load(struct qib_devdata *dd,
- const struct firmware *fw);
-static int qib_sd7220_ib_vfy(struct qib_devdata *dd,
- const struct firmware *fw);
-
-/*
- * Below keeps track of whether the "once per power-on" initialization has
- * been done, because uC code Version 1.32.17 or higher allows the uC to
- * be reset at will, and Automatic Equalization may require it. So the
- * state of the reset "pin", is no longer valid. Instead, we check for the
- * actual uC code having been loaded.
- */
-static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd,
- const struct firmware *fw)
-{
- struct qib_devdata *dd = ppd->dd;
-
- if (!dd->cspec->serdes_first_init_done &&
- qib_sd7220_ib_vfy(dd, fw) > 0)
- dd->cspec->serdes_first_init_done = 1;
- return dd->cspec->serdes_first_init_done;
-}
-
-/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
-#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
-#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
-#define UC_PAR_CLR_D 8
-#define UC_PAR_CLR_M 0xC
-#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
-#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
-
-void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
-{
- int ret;
-
- /* clear, then re-enable parity errs */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
- UC_PAR_CLR_D, UC_PAR_CLR_M);
- if (ret < 0) {
- qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
- goto bail;
- }
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
- UC_PAR_CLR_M);
-
- qib_read_kreg32(dd, kr_scratch);
- udelay(4);
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- qib_read_kreg32(dd, kr_scratch);
-bail:
- return;
-}
-
-/*
- * After a reset or other unusual event, the epb interface may need
- * to be re-synchronized, between the host and the uC.
- * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
- */
-#define IBSD_RESYNC_TRIES 3
-#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
-#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
-
-static int qib_resync_ibepb(struct qib_devdata *dd)
-{
- int ret, pat, tries, chn;
- u32 loc;
-
- ret = -1;
- chn = 0;
- for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
- loc = IB_PGUDP(chn);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed read in resync\n");
- continue;
- }
- if (ret != 0xF0 && ret != 0x55 && tries == 0)
- qib_dev_err(dd, "unexpected pattern in resync\n");
- pat = ret ^ 0xA5; /* alternate F0 and 55 */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
- if (ret < 0) {
- qib_dev_err(dd, "Failed write in resync\n");
- continue;
- }
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed re-read in resync\n");
- continue;
- }
- if (ret != pat) {
- qib_dev_err(dd, "Failed compare1 in resync\n");
- continue;
- }
- loc = IB_CMUDONE(chn);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
- continue;
- }
- if ((ret & 0x70) != ((chn << 4) | 0x40)) {
- qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
- ret, chn);
- continue;
- }
- if (++chn == 4)
- break; /* Success */
- }
- return (ret > 0) ? 0 : ret;
-}
-
-/*
- * Localize the stuff that should be done to change IB uC reset
- * returns <0 for errors.
- */
-static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
-{
- u64 rst_val;
- int ret = 0;
- unsigned long flags;
-
- rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
- if (assert_rst) {
- /*
- * Vendor recommends "interrupting" uC before reset, to
- * minimize possible glitches.
- */
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
- epb_access(dd, IB_7220_SERDES, 1);
- rst_val |= 1ULL;
- /* Squelch possible parity error from _asserting_ reset */
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask &
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
- /* flush write, delay to ensure it took effect */
- qib_read_kreg32(dd, kr_scratch);
- udelay(2);
- /* once it's reset, can remove interrupt */
- epb_access(dd, IB_7220_SERDES, -1);
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- } else {
- /*
- * Before we de-assert reset, we need to deal with
- * possible glitch on the Parity-error line.
- * Suppress it around the reset, both in chip-level
- * hwerrmask and in IB uC control reg. uC will allow
- * it again during startup.
- */
- u64 val;
-
- rst_val &= ~(1ULL);
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask &
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
-
- ret = qib_resync_ibepb(dd);
- if (ret < 0)
- qib_dev_err(dd, "unable to re-sync IB EPB\n");
-
- /* set uC control regs to suppress parity errs */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
- if (ret < 0)
- goto bail;
- /* IB uC code past Version 1.32.17 allow suppression of wdog */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
- 0x80);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set WDOG disable\n");
- goto bail;
- }
- qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
- /* flush write, delay for startup */
- qib_read_kreg32(dd, kr_scratch);
- udelay(1);
- /* clear, then re-enable parity errs */
- qib_sd7220_clr_ibpar(dd);
- val = qib_read_kreg64(dd, kr_hwerrstatus);
- if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
- qib_dev_err(dd, "IBUC Parity still set after RST\n");
- dd->cspec->hwerrmask &=
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
- }
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask);
- }
-
-bail:
- return ret;
-}
-
-static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
- const char *where)
-{
- int ret, chn, baduns;
- u64 val;
-
- if (!where)
- where = "?";
-
- /* give time for reset to settle out in EPB */
- udelay(2);
-
- ret = qib_resync_ibepb(dd);
- if (ret < 0)
- qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
-
- /* Do "sacrificial read" to get EPB in sane state after reset */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
- if (ret < 0)
- qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
-
- /* Check/show "summary" Trim-done bit in IBCStatus */
- val = qib_read_kreg64(dd, kr_ibcstatus);
- if (!(val & (1ULL << 11)))
- qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
- /*
- * Do "dummy read/mod/wr" to get EPB in sane state after reset
- * The default value for MPREG6 is 0.
- */
- udelay(2);
-
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
- udelay(10);
-
- baduns = 0;
-
- for (chn = 3; chn >= 0; --chn) {
- /* Read CTRL reg for each channel to check TRIMDONE */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0, 0);
- if (ret < 0)
- qib_dev_err(dd,
- "Failed checking TRIMDONE, chn %d (%s)\n",
- chn, where);
-
- if (!(ret & 0x10)) {
- int probe;
-
- baduns |= (1 << chn);
- qib_dev_err(dd,
- "TRIMDONE cleared on chn %d (%02X). (%s)\n",
- chn, ret, where);
- probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_PGUDP(0), 0, 0);
- qib_dev_err(dd, "probe is %d (%02X)\n",
- probe, probe);
- probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0, 0);
- qib_dev_err(dd, "re-read: %d (%02X)\n",
- probe, probe);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0x10, 0x10);
- if (ret < 0)
- qib_dev_err(dd,
- "Err on TRIMDONE rewrite1\n");
- }
- }
- for (chn = 3; chn >= 0; --chn) {
- /* Read CTRL reg for each channel to check TRIMDONE */
- if (baduns & (1 << chn)) {
- qib_dev_err(dd,
- "Resetting TRIMDONE on chn %d (%s)\n",
- chn, where);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0x10, 0x10);
- if (ret < 0)
- qib_dev_err(dd,
- "Failed re-setting TRIMDONE, chn %d (%s)\n",
- chn, where);
- }
- }
-}
-
-/*
- * Below is portion of IBA7220-specific bringup_serdes() that actually
- * deals with registers and memory within the SerDes itself.
- * Post IB uC code version 1.32.17, was_reset being 1 is not really
- * informative, so we double-check.
- */
-int qib_sd7220_init(struct qib_devdata *dd)
-{
- const struct firmware *fw;
- int ret = 1; /* default to failure */
- int first_reset, was_reset;
-
- /* SERDES MPU reset recorded in D0 */
- was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
- if (!was_reset) {
- /* entered with reset not asserted, we need to do it */
- qib_ibsd_reset(dd, 1);
- qib_sd_trimdone_monitor(dd, "Driver-reload");
- }
-
- ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev);
- if (ret) {
- qib_dev_err(dd, "Failed to load IB SERDES image\n");
- goto done;
- }
-
- /* Substitute our deduced value for was_reset */
- ret = qib_ibsd_ucode_loaded(dd->pport, fw);
- if (ret < 0)
- goto bail;
-
- first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
- /*
- * Alter some regs per vendor latest doc, reset-defaults
- * are not right for IB.
- */
- ret = qib_sd_early(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
- goto bail;
- }
- /*
- * Set DAC manual trim IB.
- * We only do this once after chip has been reset (usually
- * same as once per system boot).
- */
- if (first_reset) {
- ret = qib_sd_dactrim(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
- goto bail;
- }
- }
- /*
- * Set various registers (DDS and RXEQ) that will be
- * controlled by IBC (in 1.2 mode) to reasonable preset values
- * Calling the "internal" version avoids the "check for needed"
- * and "trimdone monitor" that might be counter-productive.
- */
- ret = qib_internal_presets(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES presets\n");
- goto bail;
- }
- ret = qib_sd_trimself(dd, 0x80);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
- goto bail;
- }
-
- /* Load image, then try to verify */
- ret = 0; /* Assume success */
- if (first_reset) {
- int vfy;
- int trim_done;
-
- ret = qib_sd7220_ib_load(dd, fw);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to load IB SERDES image\n");
- goto bail;
- } else {
- /* Loaded image, try to verify */
- vfy = qib_sd7220_ib_vfy(dd, fw);
- if (vfy != ret) {
- qib_dev_err(dd, "SERDES PRAM VFY failed\n");
- goto bail;
- } /* end if verified */
- } /* end if loaded */
-
- /*
- * Loaded and verified. Almost good...
- * hold "success" in ret
- */
- ret = 0;
- /*
- * Prev steps all worked, continue bringup
- * De-assert RESET to uC, only in first reset, to allow
- * trimming.
- *
- * Since our default setup sets START_EQ1 to
- * PRESET, we need to clear that for this very first run.
- */
- ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
- if (ret < 0) {
- qib_dev_err(dd, "Failed clearing START_EQ1\n");
- goto bail;
- }
-
- qib_ibsd_reset(dd, 0);
- /*
- * If this is not the first reset, trimdone should be set
- * already. We may need to check about this.
- */
- trim_done = qib_sd_trimdone_poll(dd);
- /*
- * Whether or not trimdone succeeded, we need to put the
- * uC back into reset to avoid a possible fight with the
- * IBC state-machine.
- */
- qib_ibsd_reset(dd, 1);
-
- if (!trim_done) {
- qib_dev_err(dd, "No TRIMDONE seen\n");
- goto bail;
- }
- /*
- * DEBUG: check each time we reset if trimdone bits have
- * gotten cleared, and re-set them.
- */
- qib_sd_trimdone_monitor(dd, "First-reset");
- /* Remember so we do not re-do the load, dactrim, etc. */
- dd->cspec->serdes_first_init_done = 1;
- }
- /*
- * setup for channel training and load values for
- * RxEq and DDS in tables used by IBC in IB1.2 mode
- */
- ret = 0;
- if (qib_sd_setvals(dd) >= 0)
- goto done;
-bail:
- ret = 1;
-done:
- /* start relock timer regardless, but start at 1 second */
- set_7220_relock_poll(dd, -1);
-
- release_firmware(fw);
- return ret;
-}
-
-#define EPB_ACC_REQ 1
-#define EPB_ACC_GNT 0x100
-#define EPB_DATA_MASK 0xFF
-#define EPB_RD (1ULL << 24)
-#define EPB_TRANS_RDY (1ULL << 31)
-#define EPB_TRANS_ERR (1ULL << 30)
-#define EPB_TRANS_TRIES 5
-
-/*
- * query, claim, release ownership of the EPB (External Parallel Bus)
- * for a specified SERDES.
- * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
- * Returns <0 for errors, >0 if we had ownership, else 0.
- */
-static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
-{
- u16 acc;
- u64 accval;
- int owned = 0;
- u64 oct_sel = 0;
-
- switch (sdnum) {
- case IB_7220_SERDES:
- /*
- * The IB SERDES "ownership" is fairly simple. A single each
- * request/grant.
- */
- acc = kr_ibsd_epb_access_ctrl;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- /* PCIe SERDES has two "octants", need to select which */
- acc = kr_pciesd_epb_access_ctrl;
- oct_sel = (2 << (sdnum - PCIE_SERDES0));
- break;
-
- default:
- return 0;
- }
-
- /* Make sure any outstanding transaction was seen */
- qib_read_kreg32(dd, kr_scratch);
- udelay(15);
-
- accval = qib_read_kreg32(dd, acc);
-
- owned = !!(accval & EPB_ACC_GNT);
- if (claim < 0) {
- /* Need to release */
- u64 pollval;
- /*
- * The only writeable bits are the request and CS.
- * Both should be clear
- */
- u64 newval = 0;
-
- qib_write_kreg(dd, acc, newval);
- /* First read after write is not trustworthy */
- pollval = qib_read_kreg32(dd, acc);
- udelay(5);
- pollval = qib_read_kreg32(dd, acc);
- if (pollval & EPB_ACC_GNT)
- owned = -1;
- } else if (claim > 0) {
- /* Need to claim */
- u64 pollval;
- u64 newval = EPB_ACC_REQ | oct_sel;
-
- qib_write_kreg(dd, acc, newval);
- /* First read after write is not trustworthy */
- pollval = qib_read_kreg32(dd, acc);
- udelay(5);
- pollval = qib_read_kreg32(dd, acc);
- if (!(pollval & EPB_ACC_GNT))
- owned = -1;
- }
- return owned;
-}
-
-/*
- * Lemma to deal with race condition of write..read to epb regs
- */
-static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
-{
- int tries;
- u64 transval;
-
- qib_write_kreg(dd, reg, i_val);
- /* Throw away first read, as RDY bit may be stale */
- transval = qib_read_kreg64(dd, reg);
-
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, reg);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
- if (transval & EPB_TRANS_ERR)
- return -1;
- if (tries > 0 && o_vp)
- *o_vp = transval;
- return tries;
-}
-
-/**
- * qib_sd7220_reg_mod - modify SERDES register
- * @dd: the qlogic_ib device
- * @sdnum: which SERDES to access
- * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
- * @wd: Write Data - value to set in register
- * @mask: ones where data should be spliced into reg.
- *
- * Basic register read/modify/write, with un-needed acesses elided. That is,
- * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
- * returns current (presumed, if a write was done) contents of selected
- * register, or <0 if errors.
- */
-static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
- u32 wd, u32 mask)
-{
- u16 trans;
- u64 transval;
- int owned;
- int tries, ret;
- unsigned long flags;
-
- switch (sdnum) {
- case IB_7220_SERDES:
- trans = kr_ibsd_epb_transaction_reg;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- trans = kr_pciesd_epb_transaction_reg;
- break;
-
- default:
- return -1;
- }
-
- /*
- * All access is locked in software (vs other host threads) and
- * hardware (vs uC access).
- */
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
-
- owned = epb_access(dd, sdnum, 1);
- if (owned < 0) {
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- return -1;
- }
- ret = 0;
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, trans);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
-
- if (tries > 0) {
- tries = 1; /* to make read-skip work */
- if (mask != 0xFF) {
- /*
- * Not a pure write, so need to read.
- * loc encodes chip-select as well as address
- */
- transval = loc | EPB_RD;
- tries = epb_trans(dd, trans, transval, &transval);
- }
- if (tries > 0 && mask != 0) {
- /*
- * Not a pure read, so need to write.
- */
- wd = (wd & mask) | (transval & ~mask);
- transval = loc | (wd & EPB_DATA_MASK);
- tries = epb_trans(dd, trans, transval, &transval);
- }
- }
- /* else, failed to see ready, what error-handling? */
-
- /*
- * Release bus. Failure is an error.
- */
- if (epb_access(dd, sdnum, -1) < 0)
- ret = -1;
- else
- ret = transval & EPB_DATA_MASK;
-
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- if (tries <= 0)
- ret = -1;
- return ret;
-}
-
-#define EPB_ROM_R (2)
-#define EPB_ROM_W (1)
-/*
- * Below, all uC-related, use appropriate UC_CS, depending
- * on which SerDes is used.
- */
-#define EPB_UC_CTL EPB_LOC(6, 0, 0)
-#define EPB_MADDRL EPB_LOC(6, 0, 2)
-#define EPB_MADDRH EPB_LOC(6, 0, 3)
-#define EPB_ROMDATA EPB_LOC(6, 0, 4)
-#define EPB_RAMDATA EPB_LOC(6, 0, 5)
-
-/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
-static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
- u8 *buf, int cnt, int rd_notwr)
-{
- u16 trans;
- u64 transval;
- u64 csbit;
- int owned;
- int tries;
- int sofar;
- int addr;
- int ret;
- unsigned long flags;
- const char *op;
-
- /* Pick appropriate transaction reg and "Chip select" for this serdes */
- switch (sdnum) {
- case IB_7220_SERDES:
- csbit = 1ULL << EPB_IB_UC_CS_SHF;
- trans = kr_ibsd_epb_transaction_reg;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- /* PCIe SERDES has uC "chip select" in different bit, too */
- csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
- trans = kr_pciesd_epb_transaction_reg;
- break;
-
- default:
- return -1;
- }
-
- op = rd_notwr ? "Rd" : "Wr";
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
-
- owned = epb_access(dd, sdnum, 1);
- if (owned < 0) {
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- return -1;
- }
-
- /*
- * In future code, we may need to distinguish several address ranges,
- * and select various memories based on this. For now, just trim
- * "loc" (location including address and memory select) to
- * "addr" (address within memory). we will only support PRAM
- * The memory is 8KB.
- */
- addr = loc & 0x1FFF;
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, trans);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
-
- sofar = 0;
- if (tries > 0) {
- /*
- * Every "memory" access is doubly-indirect.
- * We set two bytes of address, then read/write
- * one or mores bytes of data.
- */
-
- /* First, we set control to "Read" or "Write" */
- transval = csbit | EPB_UC_CTL |
- (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
- tries = epb_trans(dd, trans, transval, &transval);
- while (tries > 0 && sofar < cnt) {
- if (!sofar) {
- /* Only set address at start of chunk */
- int addrbyte = (addr + sofar) >> 8;
-
- transval = csbit | EPB_MADDRH | addrbyte;
- tries = epb_trans(dd, trans, transval,
- &transval);
- if (tries <= 0)
- break;
- addrbyte = (addr + sofar) & 0xFF;
- transval = csbit | EPB_MADDRL | addrbyte;
- tries = epb_trans(dd, trans, transval,
- &transval);
- if (tries <= 0)
- break;
- }
-
- if (rd_notwr)
- transval = csbit | EPB_ROMDATA | EPB_RD;
- else
- transval = csbit | EPB_ROMDATA | buf[sofar];
- tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- break;
- if (rd_notwr)
- buf[sofar] = transval & EPB_DATA_MASK;
- ++sofar;
- }
- /* Finally, clear control-bit for Read or Write */
- transval = csbit | EPB_UC_CTL;
- tries = epb_trans(dd, trans, transval, &transval);
- }
-
- ret = sofar;
- /* Release bus. Failure is an error */
- if (epb_access(dd, sdnum, -1) < 0)
- ret = -1;
-
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- if (tries <= 0)
- ret = -1;
- return ret;
-}
-
-#define PROG_CHUNK 64
-
-static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
-{
- int cnt, sofar, req;
-
- sofar = 0;
- while (sofar < len) {
- req = len - sofar;
- if (req > PROG_CHUNK)
- req = PROG_CHUNK;
- cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
- (u8 *)img + sofar, req, 0);
- if (cnt < req) {
- sofar = -1;
- break;
- }
- sofar += req;
- }
- return sofar;
-}
-
-#define VFY_CHUNK 64
-#define SD_PRAM_ERROR_LIMIT 42
-
-static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
-{
- int cnt, sofar, req, idx, errors;
- unsigned char readback[VFY_CHUNK];
-
- errors = 0;
- sofar = 0;
- while (sofar < len) {
- req = len - sofar;
- if (req > VFY_CHUNK)
- req = VFY_CHUNK;
- cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
- readback, req, 1);
- if (cnt < req) {
- /* failed in read itself */
- sofar = -1;
- break;
- }
- for (idx = 0; idx < cnt; ++idx) {
- if (readback[idx] != img[idx+sofar])
- ++errors;
- }
- sofar += cnt;
- }
- return errors ? -errors : sofar;
-}
-
-static int
-qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw)
-{
- return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0);
-}
-
-static int
-qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
-{
- return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0);
-}
-
-/*
- * IRQ not set up at this point in init, so we poll.
- */
-#define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (15)
-
-static int qib_sd_trimdone_poll(struct qib_devdata *dd)
-{
- int trim_tmo, ret;
- uint64_t val;
-
- /*
- * Default to failure, so IBC will not start
- * without IB_SERDES_TRIM_DONE.
- */
- ret = 0;
- for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- if (val & IB_SERDES_TRIM_DONE) {
- ret = 1;
- break;
- }
- msleep(20);
- }
- if (trim_tmo >= TRIM_TMO) {
- qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
- ret = 0;
- }
- return ret;
-}
-
-#define TX_FAST_ELT (9)
-
-/*
- * Set the "negotiation" values for SERDES. These are used by the IB1.2
- * link negotiation. Macros below are attempt to keep the values a
- * little more human-editable.
- * First, values related to Drive De-emphasis Settings.
- */
-
-#define NUM_DDS_REGS 6
-#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
-
-#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
- { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
- (main_d << 3) | 4 | (ipre_d >> 2), \
- (main_s << 3) | 4 | (ipre_s >> 2), \
- ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
- ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
-
-static struct dds_init {
- uint8_t reg_vals[NUM_DDS_REGS];
-} dds_init_vals[] = {
- /* DDR(FDR) SDR(HDR) */
- /* Vendor recommends below for 3m cable */
-#define DDS_3M 0
- DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
- DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
- DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
- DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
- DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
- DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
- DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
- DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
- DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
- DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
- DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
- DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
- DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
- /* Vendor recommends below for 1m cable */
-#define DDS_1M 13
- DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
- DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
- DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
-};
-
-/*
- * Now the RXEQ section of the table.
- */
-/* Hardware packs an element number and register address thus: */
-#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
-#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
- {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
-
-#define RXEQ_VAL_ALL(elt, adr, val) \
- {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
-
-#define RXEQ_SDR_DFELTH 0
-#define RXEQ_SDR_TLTH 0
-#define RXEQ_SDR_G1CNT_Z1CNT 0x11
-#define RXEQ_SDR_ZCNT 23
-
-static struct rxeq_init {
- u16 rdesc; /* in form used in SerDesDDSRXEQ */
- u8 rdata[4];
-} rxeq_init_vals[] = {
- /* Set Rcv Eq. to Preset node */
- RXEQ_VAL_ALL(7, 0x27, 0x10),
- /* Set DFELTHFDR/HDR thresholds */
- RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
- RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
- /* Set TLTHFDR/HDR theshold */
- RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
- RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
- /* Set Preamp setting 2 (ZFR/ZCNT) */
- RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
- RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
- /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
- RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
- RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
- /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
- RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
- RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
-};
-
-/* There are 17 values from vendor, but IBC only accesses the first 16 */
-#define DDS_ROWS (16)
-#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
-
-static int qib_sd_setvals(struct qib_devdata *dd)
-{
- int idx, midx;
- int min_idx; /* Minimum index for this portion of table */
- uint32_t dds_reg_map;
- u64 __iomem *taddr, *iaddr;
- uint64_t data;
- uint64_t sdctl;
-
- taddr = dd->kregbase + kr_serdes_maptable;
- iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
-
- /*
- * Init the DDS section of the table.
- * Each "row" of the table provokes NUM_DDS_REG writes, to the
- * registers indicated in DDS_REG_MAP.
- */
- sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
- sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
- sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
- qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
-
- /*
- * Iterate down table within loop for each register to store.
- */
- dds_reg_map = DDS_REG_MAP;
- for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
- data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
- writeq(data, iaddr + idx);
- mmiowb();
- qib_read_kreg32(dd, kr_scratch);
- dds_reg_map >>= 4;
- for (midx = 0; midx < DDS_ROWS; ++midx) {
- u64 __iomem *daddr = taddr + ((midx << 4) + idx);
-
- data = dds_init_vals[midx].reg_vals[idx];
- writeq(data, daddr);
- mmiowb();
- qib_read_kreg32(dd, kr_scratch);
- } /* End inner for (vals for this reg, each row) */
- } /* end outer for (regs to be stored) */
-
- /*
- * Init the RXEQ section of the table.
- * This runs in a different order, as the pattern of
- * register references is more complex, but there are only
- * four "data" values per register.
- */
- min_idx = idx; /* RXEQ indices pick up where DDS left off */
- taddr += 0x100; /* RXEQ data is in second half of table */
- /* Iterate through RXEQ register addresses */
- for (idx = 0; idx < RXEQ_ROWS; ++idx) {
- int didx; /* "destination" */
- int vidx;
-
- /* didx is offset by min_idx to address RXEQ range of regs */
- didx = idx + min_idx;
- /* Store the next RXEQ register address */
- writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
- mmiowb();
- qib_read_kreg32(dd, kr_scratch);
- /* Iterate through RXEQ values */
- for (vidx = 0; vidx < 4; vidx++) {
- data = rxeq_init_vals[idx].rdata[vidx];
- writeq(data, taddr + (vidx << 6) + idx);
- mmiowb();
- qib_read_kreg32(dd, kr_scratch);
- }
- } /* end outer for (Reg-writes for RXEQ) */
- return 0;
-}
-
-#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
-#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
-#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
-#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
-#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
-#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
-
-/*
- * Repeat a "store" across all channels of the IB SerDes.
- * Although nominally it inherits the "read value" of the last
- * channel it modified, the only really useful return is <0 for
- * failure, >= 0 for success. The parameter 'loc' is assumed to
- * be the location in some channel of the register to be modified
- * The caller can specify use of the "gang write" option of EPB,
- * in which case we use the specified channel data for any fields
- * not explicitely written.
- */
-static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
- int mask)
-{
- int ret = -1;
- int chnl;
-
- if (loc & EPB_GLOBAL_WR) {
- /*
- * Our caller has assured us that we can set all four
- * channels at once. Trust that. If mask is not 0xFF,
- * we will read the _specified_ channel for our starting
- * value.
- */
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
- if (mask != 0xFF) {
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- loc & ~EPB_GLOBAL_WR, 0, 0);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "pre-read failed: elt %d, addr 0x%X, chnl %d\n",
- (sloc & 0xF),
- (sloc >> 9) & 0x3f, chnl);
- return ret;
- }
- val = (ret & ~mask) | (val & mask);
- }
- loc &= ~(7 << (4+EPB_ADDR_SHF));
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "Global WR failed: elt %d, addr 0x%X, val %02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, val);
- }
- return ret;
- }
- /* Clear "channel" and set CS so we can simply iterate */
- loc &= ~(7 << (4+EPB_ADDR_SHF));
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- for (chnl = 0; chnl < 4; ++chnl) {
- int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
-
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
- val & 0xFF, mask & 0xFF);
- break;
- }
- }
- return ret;
-}
-
-/*
- * Set the Tx values normally modified by IBC in IB1.2 mode to default
- * values, as gotten from first row of init table.
- */
-static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
-{
- int ret;
- int idx, reg, data;
- uint32_t regmap;
-
- regmap = DDS_REG_MAP;
- for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
- reg = (regmap & 0xF);
- regmap >>= 4;
- data = ddi->reg_vals[idx];
- /* Vendor says RMW not needed for these regs, use 0xFF mask */
- ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
- if (ret < 0)
- break;
- }
- return ret;
-}
-
-/*
- * Set the Rx values normally modified by IBC in IB1.2 mode to default
- * values, as gotten from selected column of init table.
- */
-static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
-{
- int ret;
- int ridx;
- int cnt = ARRAY_SIZE(rxeq_init_vals);
-
- for (ridx = 0; ridx < cnt; ++ridx) {
- int elt, reg, val, loc;
-
- elt = rxeq_init_vals[ridx].rdesc & 0xF;
- reg = rxeq_init_vals[ridx].rdesc >> 4;
- loc = EPB_LOC(0, elt, reg);
- val = rxeq_init_vals[ridx].rdata[vsel];
- /* mask of 0xFF, because hardware does full-byte store. */
- ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
- if (ret < 0)
- break;
- }
- return ret;
-}
-
-/*
- * Set the default values (row 0) for DDR Driver Demphasis.
- * we do this initially and whenever we turn off IB-1.2
- *
- * The "default" values for Rx equalization are also stored to
- * SerDes registers. Formerly (and still default), we used set 2.
- * For experimenting with cables and link-partners, we allow changing
- * that via a module parameter.
- */
-static unsigned qib_rxeq_set = 2;
-module_param_named(rxeq_default_set, qib_rxeq_set, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(rxeq_default_set,
- "Which set [0..3] of Rx Equalization values is default");
-
-static int qib_internal_presets(struct qib_devdata *dd)
-{
- int ret = 0;
-
- ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
-
- if (ret < 0)
- qib_dev_err(dd, "Failed to set default DDS values\n");
- ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
- if (ret < 0)
- qib_dev_err(dd, "Failed to set default RXEQ values\n");
- return ret;
-}
-
-int qib_sd7220_presets(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (!dd->cspec->presets_needed)
- return ret;
- dd->cspec->presets_needed = 0;
- /* Assert uC reset, so we don't clash with it. */
- qib_ibsd_reset(dd, 1);
- udelay(2);
- qib_sd_trimdone_monitor(dd, "link-down");
-
- ret = qib_internal_presets(dd);
- return ret;
-}
-
-static int qib_sd_trimself(struct qib_devdata *dd, int val)
-{
- int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
-
- return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
-}
-
-static int qib_sd_early(struct qib_devdata *dd)
-{
- int ret;
-
- ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
- if (ret < 0)
- goto bail;
- ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
- if (ret < 0)
- goto bail;
- ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
-bail:
- return ret;
-}
-
-#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
-#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
-#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
-
-static int qib_sd_dactrim(struct qib_devdata *dd)
-{
- int ret;
-
- ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
- if (ret < 0)
- goto bail;
-
- /* more fine-tuning of what will be default */
- ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
- if (ret < 0)
- goto bail;
-
- /*
- * Delay for max possible number of steps, with slop.
- * Each step is about 4usec.
- */
- udelay(415);
-
- ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
-
-bail:
- return ret;
-}
-
-#define RELOCK_FIRST_MS 3
-#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
-void toggle_7220_rclkrls(struct qib_devdata *dd)
-{
- int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
- int ret;
-
- ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
- else {
- udelay(1);
- ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
- }
- /* And again for good measure */
- udelay(1);
- ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
- else {
- udelay(1);
- ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
- }
- /* Now reset xgxs and IBC to complete the recovery */
- dd->f_xgxs_reset(dd->pport);
-}
-
-/*
- * Shut down the timer that polls for relock occasions, if needed
- * this is "hooked" from qib_7220_quiet_serdes(), which is called
- * just before qib_shutdown_device() in qib_driver.c shuts down all
- * the other timers
- */
-void shutdown_7220_relock_poll(struct qib_devdata *dd)
-{
- if (dd->cspec->relock_timer_active)
- del_timer_sync(&dd->cspec->relock_timer);
-}
-
-static unsigned qib_relock_by_timer = 1;
-module_param_named(relock_by_timer, qib_relock_by_timer, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-
-static void qib_run_relock(unsigned long opaque)
-{
- struct qib_devdata *dd = (struct qib_devdata *)opaque;
- struct qib_pportdata *ppd = dd->pport;
- struct qib_chip_specific *cs = dd->cspec;
- int timeoff;
-
- /*
- * Check link-training state for "stuck" state, when down.
- * if found, try relock and schedule another try at
- * exponentially growing delay, maxed at one second.
- * if not stuck, our work is done.
- */
- if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
- (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE))) {
- if (qib_relock_by_timer) {
- if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
- toggle_7220_rclkrls(dd);
- }
- /* re-set timer for next check */
- timeoff = cs->relock_interval << 1;
- if (timeoff > HZ)
- timeoff = HZ;
- cs->relock_interval = timeoff;
- } else
- timeoff = HZ;
- mod_timer(&cs->relock_timer, jiffies + timeoff);
-}
-
-void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
-{
- struct qib_chip_specific *cs = dd->cspec;
-
- if (ibup) {
- /* We are now up, relax timer to 1 second interval */
- if (cs->relock_timer_active) {
- cs->relock_interval = HZ;
- mod_timer(&cs->relock_timer, jiffies + HZ);
- }
- } else {
- /* Transition to down, (re-)set timer to short interval. */
- unsigned int timeout;
-
- timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
- if (timeout == 0)
- timeout = 1;
- /* If timer has not yet been started, do so. */
- if (!cs->relock_timer_active) {
- cs->relock_timer_active = 1;
- init_timer(&cs->relock_timer);
- cs->relock_timer.function = qib_run_relock;
- cs->relock_timer.data = (unsigned long) dd;
- cs->relock_interval = timeout;
- cs->relock_timer.expires = jiffies + timeout;
- add_timer(&cs->relock_timer);
- } else {
- cs->relock_interval = timeout;
- mod_timer(&cs->relock_timer, jiffies + timeout);
- }
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
deleted file mode 100644
index c6d6a54d2e19..000000000000
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ /dev/null
@@ -1,1039 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-/* default pio off, sdma on */
-static ushort sdma_descq_cnt = 256;
-module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
-MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
-
-/*
- * Bits defined in the send DMA descriptor.
- */
-#define SDMA_DESC_LAST (1ULL << 11)
-#define SDMA_DESC_FIRST (1ULL << 12)
-#define SDMA_DESC_DMA_HEAD (1ULL << 13)
-#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
-#define SDMA_DESC_INTR (1ULL << 15)
-#define SDMA_DESC_COUNT_LSB 16
-#define SDMA_DESC_GEN_LSB 30
-
-char *qib_sdma_state_names[] = {
- [qib_sdma_state_s00_hw_down] = "s00_HwDown",
- [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
- [qib_sdma_state_s20_idle] = "s20_Idle",
- [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
- [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
- [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
- [qib_sdma_state_s99_running] = "s99_Running",
-};
-
-char *qib_sdma_event_names[] = {
- [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
- [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
- [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
- [qib_sdma_event_e30_go_running] = "e30_GoRunning",
- [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
- [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
- [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
- [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
- [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
- [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
- [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
-};
-
-/* declare all statics here rather than keep sorting */
-static int alloc_sdma(struct qib_pportdata *);
-static void sdma_complete(struct kref *);
-static void sdma_finalput(struct qib_sdma_state *);
-static void sdma_get(struct qib_sdma_state *);
-static void sdma_put(struct qib_sdma_state *);
-static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
-static void sdma_start_sw_clean_up(struct qib_pportdata *);
-static void sdma_sw_clean_up_task(unsigned long);
-static void unmap_desc(struct qib_pportdata *, unsigned);
-
-static void sdma_get(struct qib_sdma_state *ss)
-{
- kref_get(&ss->kref);
-}
-
-static void sdma_complete(struct kref *kref)
-{
- struct qib_sdma_state *ss =
- container_of(kref, struct qib_sdma_state, kref);
-
- complete(&ss->comp);
-}
-
-static void sdma_put(struct qib_sdma_state *ss)
-{
- kref_put(&ss->kref, sdma_complete);
-}
-
-static void sdma_finalput(struct qib_sdma_state *ss)
-{
- sdma_put(ss);
- wait_for_completion(&ss->comp);
-}
-
-/*
- * Complete all the sdma requests on the active list, in the correct
- * order, and with appropriate processing. Called when cleaning up
- * after sdma shutdown, and when new sdma requests are submitted for
- * a link that is down. This matches what is done for requests
- * that complete normally, it's just the full list.
- *
- * Must be called with sdma_lock held
- */
-static void clear_sdma_activelist(struct qib_pportdata *ppd)
-{
- struct qib_sdma_txreq *txp, *txp_next;
-
- list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
- list_del_init(&txp->list);
- if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
- unsigned idx;
-
- idx = txp->start_idx;
- while (idx != txp->next_descq_idx) {
- unmap_desc(ppd, idx);
- if (++idx == ppd->sdma_descq_cnt)
- idx = 0;
- }
- }
- if (txp->callback)
- (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
- }
-}
-
-static void sdma_sw_clean_up_task(unsigned long opaque)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- /*
- * At this point, the following should always be true:
- * - We are halted, so no more descriptors are getting retired.
- * - We are not running, so no one is submitting new work.
- * - Only we can send the e40_sw_cleaned, so we can't start
- * running again until we say so. So, the active list and
- * descq are ours to play with.
- */
-
- /* Process all retired requests. */
- qib_sdma_make_progress(ppd);
-
- clear_sdma_activelist(ppd);
-
- /*
- * Resync count of added and removed. It is VERY important that
- * sdma_descq_removed NEVER decrement - user_sdma depends on it.
- */
- ppd->sdma_descq_removed = ppd->sdma_descq_added;
-
- /*
- * Reset our notion of head and tail.
- * Note that the HW registers will be reset when switching states
- * due to calling __qib_sdma_process_event() below.
- */
- ppd->sdma_descq_tail = 0;
- ppd->sdma_descq_head = 0;
- ppd->sdma_head_dma[0] = 0;
- ppd->sdma_generation = 0;
-
- __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
- * as a result of send buffer errors or send DMA descriptor errors.
- * We want to disarm the buffers in these cases.
- */
-static void sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
- unsigned bufno;
-
- for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
- ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
-
- ppd->dd->f_sdma_hw_start_up(ppd);
-}
-
-static void sdma_sw_tear_down(struct qib_pportdata *ppd)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
-
- /* Releasing this reference means the state machine has stopped. */
- sdma_put(ss);
-}
-
-static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
-{
- tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
-}
-
-static void sdma_set_state(struct qib_pportdata *ppd,
- enum qib_sdma_states next_state)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
- struct sdma_set_state_action *action = ss->set_state_action;
- unsigned op = 0;
-
- /* debugging bookkeeping */
- ss->previous_state = ss->current_state;
- ss->previous_op = ss->current_op;
-
- ss->current_state = next_state;
-
- if (action[next_state].op_enable)
- op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
-
- if (action[next_state].op_intenable)
- op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
-
- if (action[next_state].op_halt)
- op |= QIB_SDMA_SENDCTRL_OP_HALT;
-
- if (action[next_state].op_drain)
- op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
-
- if (action[next_state].go_s99_running_tofalse)
- ss->go_s99_running = 0;
-
- if (action[next_state].go_s99_running_totrue)
- ss->go_s99_running = 1;
-
- ss->current_op = op;
-
- ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
-}
-
-static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
-{
- __le64 *descqp = &ppd->sdma_descq[head].qw[0];
- u64 desc[2];
- dma_addr_t addr;
- size_t len;
-
- desc[0] = le64_to_cpu(descqp[0]);
- desc[1] = le64_to_cpu(descqp[1]);
-
- addr = (desc[1] << 32) | (desc[0] >> 32);
- len = (desc[0] >> 14) & (0x7ffULL << 2);
- dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
-}
-
-static int alloc_sdma(struct qib_pportdata *ppd)
-{
- ppd->sdma_descq_cnt = sdma_descq_cnt;
- if (!ppd->sdma_descq_cnt)
- ppd->sdma_descq_cnt = 256;
-
- /* Allocate memory for SendDMA descriptor FIFO */
- ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
- GFP_KERNEL);
-
- if (!ppd->sdma_descq) {
- qib_dev_err(ppd->dd,
- "failed to allocate SendDMA descriptor FIFO memory\n");
- goto bail;
- }
-
- /* Allocate memory for DMA of head register to memory */
- ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
- PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
- if (!ppd->sdma_head_dma) {
- qib_dev_err(ppd->dd,
- "failed to allocate SendDMA head memory\n");
- goto cleanup_descq;
- }
- ppd->sdma_head_dma[0] = 0;
- return 0;
-
-cleanup_descq:
- dma_free_coherent(&ppd->dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
- ppd->sdma_descq_phys);
- ppd->sdma_descq = NULL;
- ppd->sdma_descq_phys = 0;
-bail:
- ppd->sdma_descq_cnt = 0;
- return -ENOMEM;
-}
-
-static void free_sdma(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
-
- if (ppd->sdma_head_dma) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *)ppd->sdma_head_dma,
- ppd->sdma_head_phys);
- ppd->sdma_head_dma = NULL;
- ppd->sdma_head_phys = 0;
- }
-
- if (ppd->sdma_descq) {
- dma_free_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]),
- ppd->sdma_descq, ppd->sdma_descq_phys);
- ppd->sdma_descq = NULL;
- ppd->sdma_descq_phys = 0;
- }
-}
-
-static inline void make_sdma_desc(struct qib_pportdata *ppd,
- u64 *sdmadesc, u64 addr, u64 dwlen,
- u64 dwoffset)
-{
-
- WARN_ON(addr & 3);
- /* SDmaPhyAddr[47:32] */
- sdmadesc[1] = addr >> 32;
- /* SDmaPhyAddr[31:0] */
- sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
- /* SDmaGeneration[1:0] */
- sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
- SDMA_DESC_GEN_LSB;
- /* SDmaDwordCount[10:0] */
- sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
- /* SDmaBufOffset[12:2] */
- sdmadesc[0] |= dwoffset & 0x7ffULL;
-}
-
-/* sdma_lock must be held */
-int qib_sdma_make_progress(struct qib_pportdata *ppd)
-{
- struct list_head *lp = NULL;
- struct qib_sdma_txreq *txp = NULL;
- struct qib_devdata *dd = ppd->dd;
- int progress = 0;
- u16 hwhead;
- u16 idx = 0;
-
- hwhead = dd->f_sdma_gethead(ppd);
-
- /* The reason for some of the complexity of this code is that
- * not all descriptors have corresponding txps. So, we have to
- * be able to skip over descs until we wander into the range of
- * the next txp on the list.
- */
-
- if (!list_empty(&ppd->sdma_activelist)) {
- lp = ppd->sdma_activelist.next;
- txp = list_entry(lp, struct qib_sdma_txreq, list);
- idx = txp->start_idx;
- }
-
- while (ppd->sdma_descq_head != hwhead) {
- /* if desc is part of this txp, unmap if needed */
- if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
- (idx == ppd->sdma_descq_head)) {
- unmap_desc(ppd, ppd->sdma_descq_head);
- if (++idx == ppd->sdma_descq_cnt)
- idx = 0;
- }
-
- /* increment dequed desc count */
- ppd->sdma_descq_removed++;
-
- /* advance head, wrap if needed */
- if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
- ppd->sdma_descq_head = 0;
-
- /* if now past this txp's descs, do the callback */
- if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
- /* remove from active list */
- list_del_init(&txp->list);
- if (txp->callback)
- (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
- /* see if there is another txp */
- if (list_empty(&ppd->sdma_activelist))
- txp = NULL;
- else {
- lp = ppd->sdma_activelist.next;
- txp = list_entry(lp, struct qib_sdma_txreq,
- list);
- idx = txp->start_idx;
- }
- }
- progress = 1;
- }
- if (progress)
- qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
- return progress;
-}
-
-/*
- * This is called from interrupt context.
- */
-void qib_sdma_intr(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- __qib_sdma_intr(ppd);
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-void __qib_sdma_intr(struct qib_pportdata *ppd)
-{
- if (__qib_sdma_running(ppd)) {
- qib_sdma_make_progress(ppd);
- if (!list_empty(&ppd->sdma_userpending))
- qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
- }
-}
-
-int qib_setup_sdma(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- int ret = 0;
-
- ret = alloc_sdma(ppd);
- if (ret)
- goto bail;
-
- /* set consistent sdma state */
- ppd->dd->f_sdma_init_early(ppd);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- /* set up reference counting */
- kref_init(&ppd->sdma_state.kref);
- init_completion(&ppd->sdma_state.comp);
-
- ppd->sdma_generation = 0;
- ppd->sdma_descq_head = 0;
- ppd->sdma_descq_removed = 0;
- ppd->sdma_descq_added = 0;
-
- ppd->sdma_intrequest = 0;
- INIT_LIST_HEAD(&ppd->sdma_userpending);
-
- INIT_LIST_HEAD(&ppd->sdma_activelist);
-
- tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
- (unsigned long)ppd);
-
- ret = dd->f_init_sdma_regs(ppd);
- if (ret)
- goto bail_alloc;
-
- qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
-
- return 0;
-
-bail_alloc:
- qib_teardown_sdma(ppd);
-bail:
- return ret;
-}
-
-void qib_teardown_sdma(struct qib_pportdata *ppd)
-{
- qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
-
- /*
- * This waits for the state machine to exit so it is not
- * necessary to kill the sdma_sw_clean_up_task to make sure
- * it is not running.
- */
- sdma_finalput(&ppd->sdma_state);
-
- free_sdma(ppd);
-}
-
-int qib_sdma_running(struct qib_pportdata *ppd)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- ret = __qib_sdma_running(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- return ret;
-}
-
-/*
- * Complete a request when sdma not running; likely only request
- * but to simplify the code, always queue it, then process the full
- * activelist. We process the entire list to ensure that this particular
- * request does get it's callback, but in the correct order.
- * Must be called with sdma_lock held
- */
-static void complete_sdma_err_req(struct qib_pportdata *ppd,
- struct qib_verbs_txreq *tx)
-{
- atomic_inc(&tx->qp->s_dma_busy);
- /* no sdma descriptors, so no unmap_desc */
- tx->txreq.start_idx = 0;
- tx->txreq.next_descq_idx = 0;
- list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
- clear_sdma_activelist(ppd);
-}
-
-/*
- * This function queues one IB packet onto the send DMA queue per call.
- * The caller is responsible for checking:
- * 1) The number of send DMA descriptor entries is less than the size of
- * the descriptor queue.
- * 2) The IB SGE addresses and lengths are 32-bit aligned
- * (except possibly the last SGE's length)
- * 3) The SGE addresses are suitable for passing to dma_map_single().
- */
-int qib_sdma_verbs_send(struct qib_pportdata *ppd,
- struct qib_sge_state *ss, u32 dwords,
- struct qib_verbs_txreq *tx)
-{
- unsigned long flags;
- struct qib_sge *sge;
- struct qib_qp *qp;
- int ret = 0;
- u16 tail;
- __le64 *descqp;
- u64 sdmadesc[2];
- u32 dwoffset;
- dma_addr_t addr;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
-retry:
- if (unlikely(!__qib_sdma_running(ppd))) {
- complete_sdma_err_req(ppd, tx);
- goto unlock;
- }
-
- if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
- if (qib_sdma_make_progress(ppd))
- goto retry;
- if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
- ppd->dd->f_sdma_set_desc_cnt(ppd,
- ppd->sdma_descq_cnt / 2);
- goto busy;
- }
-
- dwoffset = tx->hdr_dwords;
- make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
-
- sdmadesc[0] |= SDMA_DESC_FIRST;
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
-
- /* write to the descq */
- tail = ppd->sdma_descq_tail;
- descqp = &ppd->sdma_descq[tail].qw[0];
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- /* increment the tail */
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- descqp = &ppd->sdma_descq[0].qw[0];
- ++ppd->sdma_generation;
- }
-
- tx->txreq.start_idx = tail;
-
- sge = &ss->sge;
- while (dwords) {
- u32 dw;
- u32 len;
-
- len = dwords << 2;
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- dw = (len + 3) >> 2;
- addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
- dw << 2, DMA_TO_DEVICE);
- if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
- goto unmap;
- sdmadesc[0] = 0;
- make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
- /* SDmaUseLargeBuf has to be set in every descriptor */
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
- /* write to the descq */
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- /* increment the tail */
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- descqp = &ppd->sdma_descq[0].qw[0];
- ++ppd->sdma_generation;
- }
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
-
- dwoffset += dw;
- dwords -= dw;
- }
-
- if (!tail)
- descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
- descqp -= 2;
- descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
- descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
- descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
-
- atomic_inc(&tx->qp->s_dma_busy);
- tx->txreq.next_descq_idx = tail;
- ppd->dd->f_sdma_update_tail(ppd, tail);
- ppd->sdma_descq_added += tx->txreq.sg_count;
- list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
- goto unlock;
-
-unmap:
- for (;;) {
- if (!tail)
- tail = ppd->sdma_descq_cnt - 1;
- else
- tail--;
- if (tail == ppd->sdma_descq_tail)
- break;
- unmap_desc(ppd, tail);
- }
- qp = tx->qp;
- qib_put_txreq(tx);
- spin_lock(&qp->r_lock);
- spin_lock(&qp->s_lock);
- if (qp->ibqp.qp_type == IB_QPT_RC) {
- /* XXX what about error sending RDMA read responses? */
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
- qib_error_qp(qp, IB_WC_GENERAL_ERR);
- } else if (qp->s_wqe)
- qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->r_lock);
- /* return zero to process the next send work request */
- goto unlock;
-
-busy:
- qp = tx->qp;
- spin_lock(&qp->s_lock);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- struct qib_ibdev *dev;
-
- /*
- * If we couldn't queue the DMA request, save the info
- * and try again later rather than destroying the
- * buffer and undoing the side effects of the copy.
- */
- tx->ss = ss;
- tx->dwords = dwords;
- qp->s_tx = tx;
- dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
- struct qib_ibport *ibp;
-
- ibp = &ppd->ibport_data;
- ibp->n_dmawait++;
- qp->s_flags |= QIB_S_WAIT_DMA_DESC;
- list_add_tail(&qp->iowait, &dev->dmawait);
- }
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
- spin_unlock(&qp->s_lock);
- ret = -EBUSY;
- } else {
- spin_unlock(&qp->s_lock);
- qib_put_txreq(tx);
- }
-unlock:
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return ret;
-}
-
-/*
- * sdma_lock should be acquired before calling this routine
- */
-void dump_sdma_state(struct qib_pportdata *ppd)
-{
- struct qib_sdma_desc *descq;
- struct qib_sdma_txreq *txp, *txpnext;
- __le64 *descqp;
- u64 desc[2];
- u64 addr;
- u16 gen, dwlen, dwoffset;
- u16 head, tail, cnt;
-
- head = ppd->sdma_descq_head;
- tail = ppd->sdma_descq_tail;
- cnt = qib_sdma_descq_freecnt(ppd);
- descq = ppd->sdma_descq;
-
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA ppd->sdma_descq_head: %u\n", head);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA ppd->sdma_descq_tail: %u\n", tail);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sdma_descq_freecnt: %u\n", cnt);
-
- /* print info for each entry in the descriptor queue */
- while (head != tail) {
- char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
-
- descqp = &descq[head].qw[0];
- desc[0] = le64_to_cpu(descqp[0]);
- desc[1] = le64_to_cpu(descqp[1]);
- flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
- flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
- flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
- flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
- flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
- addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
- gen = (desc[0] >> 30) & 3ULL;
- dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
- dwoffset = (desc[0] & 0x7ffULL) << 2;
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
- head, flags, addr, gen, dwlen, dwoffset);
- if (++head == ppd->sdma_descq_cnt)
- head = 0;
- }
-
- /* print dma descriptor indices from the TX requests */
- list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
- list)
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
- txp->start_idx, txp->next_descq_idx);
-}
-
-void qib_sdma_process_event(struct qib_pportdata *ppd,
- enum qib_sdma_events event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- __qib_sdma_process_event(ppd, event);
-
- if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
- qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-void __qib_sdma_process_event(struct qib_pportdata *ppd,
- enum qib_sdma_events event)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
-
- switch (ss->current_state) {
- case qib_sdma_state_s00_hw_down:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- break;
- case qib_sdma_event_e30_go_running:
- /*
- * If down, but running requested (usually result
- * of link up, then we need to start up.
- * This can happen when hw down is requested while
- * bringing the link up with traffic active on
- * 7220, e.g. */
- ss->go_s99_running = 1;
- /* fall through and start dma engine */
- case qib_sdma_event_e10_go_hw_start:
- /* This reference means the state machine is started */
- sdma_get(&ppd->sdma_state);
- sdma_set_state(ppd,
- qib_sdma_state_s10_hw_start_up_wait);
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e40_sw_cleaned:
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- sdma_set_state(ppd, ss->go_s99_running ?
- qib_sdma_state_s99_running :
- qib_sdma_state_s20_idle);
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s20_idle:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- sdma_set_state(ppd, qib_sdma_state_s99_running);
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- sdma_set_state(ppd,
- qib_sdma_state_s10_hw_start_up_wait);
- sdma_hw_start_up(ppd);
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s40_hw_clean_up_wait);
- ppd->dd->f_sdma_hw_clean_up(ppd);
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s99_running:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e70_go_idle:
- sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e7322_err_halted:
- sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
- }
-
- ss->last_event = event;
-}
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
deleted file mode 100644
index d6235931a1ba..000000000000
--- a/drivers/infiniband/hw/qib/qib_srq.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "qib_verbs.h"
-
-/**
- * qib_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: A pointer to the first WR to cause a problem is put here
- *
- * This may be called from interrupt context.
- */
-int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_rwq *wq;
- unsigned long flags;
- int ret;
-
- for (; wr; wr = wr->next) {
- struct qib_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > srq->rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&srq->rq.lock, flags);
- wq = srq->rq.wq;
- next = wq->head + 1;
- if (next >= srq->rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&srq->rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
- * @srq_init_attr: the attributes of the SRQ
- * @udata: data from libibverbs when creating a user SRQ
- */
-struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
-{
- struct qib_ibdev *dev = to_idev(ibpd->device);
- struct qib_srq *srq;
- u32 sz;
- struct ib_srq *ret;
-
- if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- ret = ERR_PTR(-ENOSYS);
- goto done;
- }
-
- if (srq_init_attr->attr.max_sge == 0 ||
- srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
- srq_init_attr->attr.max_wr == 0 ||
- srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- srq->rq.size = srq_init_attr->attr.max_wr + 1;
- srq->rq.max_sge = srq_init_attr->attr.max_sge;
- sz = sizeof(struct ib_sge) * srq->rq.max_sge +
- sizeof(struct qib_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
- if (!srq->rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_srq;
- }
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
- u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
-
- srq->ip =
- qib_create_mmap_info(dev, s, ibpd->uobject->context,
- srq->rq.wq);
- if (!srq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wq;
- }
-
- err = ib_copy_to_udata(udata, &srq->ip->offset,
- sizeof(srq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- srq->ip = NULL;
-
- /*
- * ib_create_srq() will initialize srq->ibsrq.
- */
- spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
- srq->limit = srq_init_attr->attr.srq_limit;
-
- spin_lock(&dev->n_srqs_lock);
- if (dev->n_srqs_allocated == ib_qib_max_srqs) {
- spin_unlock(&dev->n_srqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_srqs_allocated++;
- spin_unlock(&dev->n_srqs_lock);
-
- if (srq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &srq->ibsrq;
- goto done;
-
-bail_ip:
- kfree(srq->ip);
-bail_wq:
- vfree(srq->rq.wq);
-bail_srq:
- kfree(srq);
-done:
- return ret;
-}
-
-/**
- * qib_modify_srq - modify a shared receive queue
- * @ibsrq: the SRQ to modify
- * @attr: the new attributes of the SRQ
- * @attr_mask: indicates which attributes to modify
- * @udata: user data for libibverbs.so
- */
-int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_rwq *wq;
- int ret = 0;
-
- if (attr_mask & IB_SRQ_MAX_WR) {
- struct qib_rwq *owq;
- struct qib_rwqe *p;
- u32 sz, size, n, head, tail;
-
- /* Check that the requested sizes are below the limits. */
- if ((attr->max_wr > ib_qib_max_srq_wrs) ||
- ((attr_mask & IB_SRQ_LIMIT) ?
- attr->srq_limit : srq->limit) > attr->max_wr) {
- ret = -EINVAL;
- goto bail;
- }
-
- sz = sizeof(struct qib_rwqe) +
- srq->rq.max_sge * sizeof(struct ib_sge);
- size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
- if (!wq) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->inlen >= sizeof(__u64)) {
- __u64 offset_addr;
- __u64 offset = 0;
-
- ret = ib_copy_from_udata(&offset_addr, udata,
- sizeof(offset_addr));
- if (ret)
- goto bail_free;
- udata->outbuf =
- (void __user *) (unsigned long) offset_addr;
- ret = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&srq->rq.lock);
- /*
- * validate head and tail pointer values and compute
- * the number of remaining WQEs.
- */
- owq = srq->rq.wq;
- head = owq->head;
- tail = owq->tail;
- if (head >= srq->rq.size || tail >= srq->rq.size) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- n = head;
- if (n < tail)
- n += srq->rq.size - tail;
- else
- n -= tail;
- if (size <= n) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- n = 0;
- p = wq->wq;
- while (tail != head) {
- struct qib_rwqe *wqe;
- int i;
-
- wqe = get_rwqe_ptr(&srq->rq, tail);
- p->wr_id = wqe->wr_id;
- p->num_sge = wqe->num_sge;
- for (i = 0; i < wqe->num_sge; i++)
- p->sg_list[i] = wqe->sg_list[i];
- n++;
- p = (struct qib_rwqe *)((char *) p + sz);
- if (++tail >= srq->rq.size)
- tail = 0;
- }
- srq->rq.wq = wq;
- srq->rq.size = size;
- wq->head = n;
- wq->tail = 0;
- if (attr_mask & IB_SRQ_LIMIT)
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
-
- vfree(owq);
-
- if (srq->ip) {
- struct qib_mmap_info *ip = srq->ip;
- struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
- u32 s = sizeof(struct qib_rwq) + size * sz;
-
- qib_update_mmap_info(dev, ip, s, wq);
-
- /*
- * Return the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->inlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- /*
- * Put user mapping info onto the pending list
- * unless it already is on the list.
- */
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps,
- &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
- } else if (attr_mask & IB_SRQ_LIMIT) {
- spin_lock_irq(&srq->rq.lock);
- if (attr->srq_limit >= srq->rq.size)
- ret = -EINVAL;
- else
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
- }
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&srq->rq.lock);
-bail_free:
- vfree(wq);
-bail:
- return ret;
-}
-
-int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
-
- attr->max_wr = srq->rq.size - 1;
- attr->max_sge = srq->rq.max_sge;
- attr->srq_limit = srq->limit;
- return 0;
-}
-
-/**
- * qib_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
- */
-int qib_destroy_srq(struct ib_srq *ibsrq)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_ibdev *dev = to_idev(ibsrq->device);
-
- spin_lock(&dev->n_srqs_lock);
- dev->n_srqs_allocated--;
- spin_unlock(&dev->n_srqs_lock);
- if (srq->ip)
- kref_put(&srq->ip->ref, qib_release_mmap_info);
- else
- vfree(srq->rq.wq);
- kfree(srq);
-
- return 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
deleted file mode 100644
index 81f56cdff2bc..000000000000
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/ctype.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-/* start of per-port functions */
-/*
- * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
- */
-static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
-{
- struct qib_devdata *dd = ppd->dd;
- int ret;
-
- ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = ppd->dd;
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 0, &val);
- if (ret) {
- qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
- return ret;
- }
-
- /*
- * Set the "intentional" heartbeat enable per either of
- * "Enable" and "Auto", as these are normally set together.
- * This bit is consulted when leaving loopback mode,
- * because entering loopback mode overrides it and automatically
- * disables heartbeat.
- */
- ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
- return ret < 0 ? ret : count;
-}
-
-static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = ppd->dd;
- int ret = count, r;
-
- r = dd->f_set_ib_loopback(ppd, buf);
- if (r < 0)
- ret = r;
-
- return ret;
-}
-
-static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = ppd->dd;
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 0, &val);
- if (ret) {
- qib_dev_err(dd, "attempt to set invalid LED override\n");
- return ret;
- }
-
- qib_set_led_override(ppd, val);
- return count;
-}
-
-static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
-{
- ssize_t ret;
-
- if (!ppd->statusp)
- ret = -EINVAL;
- else
- ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long) *(ppd->statusp));
- return ret;
-}
-
-/*
- * For userland compatibility, these offsets must remain fixed.
- * They are strings for QIB_STATUS_*
- */
-static const char * const qib_status_str[] = {
- "Initted",
- "",
- "",
- "",
- "",
- "Present",
- "IB_link_up",
- "IB_configured",
- "",
- "Fatal_Hardware_Error",
- NULL,
-};
-
-static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
-{
- int i, any;
- u64 s;
- ssize_t ret;
-
- if (!ppd->statusp) {
- ret = -EINVAL;
- goto bail;
- }
-
- s = *(ppd->statusp);
- *buf = '\0';
- for (any = i = 0; s && qib_status_str[i]; i++) {
- if (s & 1) {
- /* if overflow */
- if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- break;
- if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
- PAGE_SIZE)
- break;
- any = 1;
- }
- s >>= 1;
- }
- if (any)
- strlcat(buf, "\n", PAGE_SIZE);
-
- ret = strlen(buf);
-
-bail:
- return ret;
-}
-
-/* end of per-port functions */
-
-/*
- * Start of per-port file structures and support code
- * Because we are fitting into other infrastructure, we have to supply the
- * full set of kobject/sysfs_ops structures and routines.
- */
-#define QIB_PORT_ATTR(name, mode, show, store) \
- static struct qib_port_attr qib_port_attr_##name = \
- __ATTR(name, mode, show, store)
-
-struct qib_port_attr {
- struct attribute attr;
- ssize_t (*show)(struct qib_pportdata *, char *);
- ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
-};
-
-QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
-QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
-QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
- store_hrtbt_enb);
-QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
-QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
-
-static struct attribute *port_default_attributes[] = {
- &qib_port_attr_loopback.attr,
- &qib_port_attr_led_override.attr,
- &qib_port_attr_hrtbt_enable.attr,
- &qib_port_attr_status.attr,
- &qib_port_attr_status_str.attr,
- NULL
-};
-
-/*
- * Start of per-port congestion control structures and support code
- */
-
-/*
- * Congestion control table size followed by table entries
- */
-static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- int ret;
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_cc_kobj);
-
- if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
- return -EINVAL;
-
- ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
- + sizeof(__be16);
-
- if (pos > ret)
- return -EINVAL;
-
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- spin_lock(&ppd->cc_shadow_lock);
- memcpy(buf, ppd->ccti_entries_shadow, count);
- spin_unlock(&ppd->cc_shadow_lock);
-
- return count;
-}
-
-static void qib_port_release(struct kobject *kobj)
-{
- /* nothing to do since memory is freed by qib_free_devdata() */
-}
-
-static struct kobj_type qib_port_cc_ktype = {
- .release = qib_port_release,
-};
-
-static struct bin_attribute cc_table_bin_attr = {
- .attr = {.name = "cc_table_bin", .mode = 0444},
- .read = read_cc_table_bin,
- .size = PAGE_SIZE,
-};
-
-/*
- * Congestion settings: port control, control map and an array of 16
- * entries for the congestion entries - increase, timer, event log
- * trigger threshold and the minimum injection rate delay.
- */
-static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- int ret;
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_cc_kobj);
-
- if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
- return -EINVAL;
-
- ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
-
- if (pos > ret)
- return -EINVAL;
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- spin_lock(&ppd->cc_shadow_lock);
- memcpy(buf, ppd->congestion_entries_shadow, count);
- spin_unlock(&ppd->cc_shadow_lock);
-
- return count;
-}
-
-static struct bin_attribute cc_setting_bin_attr = {
- .attr = {.name = "cc_settings_bin", .mode = 0444},
- .read = read_cc_setting_bin,
- .size = PAGE_SIZE,
-};
-
-
-static ssize_t qib_portattr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct qib_port_attr *pattr =
- container_of(attr, struct qib_port_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_kobj);
-
- return pattr->show(ppd, buf);
-}
-
-static ssize_t qib_portattr_store(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t len)
-{
- struct qib_port_attr *pattr =
- container_of(attr, struct qib_port_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_kobj);
-
- return pattr->store(ppd, buf, len);
-}
-
-
-static const struct sysfs_ops qib_port_ops = {
- .show = qib_portattr_show,
- .store = qib_portattr_store,
-};
-
-static struct kobj_type qib_port_ktype = {
- .release = qib_port_release,
- .sysfs_ops = &qib_port_ops,
- .default_attrs = port_default_attributes
-};
-
-/* Start sl2vl */
-
-#define QIB_SL2VL_ATTR(N) \
- static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .sl = N \
- }
-
-struct qib_sl2vl_attr {
- struct attribute attr;
- int sl;
-};
-
-QIB_SL2VL_ATTR(0);
-QIB_SL2VL_ATTR(1);
-QIB_SL2VL_ATTR(2);
-QIB_SL2VL_ATTR(3);
-QIB_SL2VL_ATTR(4);
-QIB_SL2VL_ATTR(5);
-QIB_SL2VL_ATTR(6);
-QIB_SL2VL_ATTR(7);
-QIB_SL2VL_ATTR(8);
-QIB_SL2VL_ATTR(9);
-QIB_SL2VL_ATTR(10);
-QIB_SL2VL_ATTR(11);
-QIB_SL2VL_ATTR(12);
-QIB_SL2VL_ATTR(13);
-QIB_SL2VL_ATTR(14);
-QIB_SL2VL_ATTR(15);
-
-static struct attribute *sl2vl_default_attributes[] = {
- &qib_sl2vl_attr_0.attr,
- &qib_sl2vl_attr_1.attr,
- &qib_sl2vl_attr_2.attr,
- &qib_sl2vl_attr_3.attr,
- &qib_sl2vl_attr_4.attr,
- &qib_sl2vl_attr_5.attr,
- &qib_sl2vl_attr_6.attr,
- &qib_sl2vl_attr_7.attr,
- &qib_sl2vl_attr_8.attr,
- &qib_sl2vl_attr_9.attr,
- &qib_sl2vl_attr_10.attr,
- &qib_sl2vl_attr_11.attr,
- &qib_sl2vl_attr_12.attr,
- &qib_sl2vl_attr_13.attr,
- &qib_sl2vl_attr_14.attr,
- &qib_sl2vl_attr_15.attr,
- NULL
-};
-
-static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct qib_sl2vl_attr *sattr =
- container_of(attr, struct qib_sl2vl_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, sl2vl_kobj);
- struct qib_ibport *qibp = &ppd->ibport_data;
-
- return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
-}
-
-static const struct sysfs_ops qib_sl2vl_ops = {
- .show = sl2vl_attr_show,
-};
-
-static struct kobj_type qib_sl2vl_ktype = {
- .release = qib_port_release,
- .sysfs_ops = &qib_sl2vl_ops,
- .default_attrs = sl2vl_default_attributes
-};
-
-/* End sl2vl */
-
-/* Start diag_counters */
-
-#define QIB_DIAGC_ATTR(N) \
- static struct qib_diagc_attr qib_diagc_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0664 }, \
- .counter = offsetof(struct qib_ibport, n_##N) \
- }
-
-struct qib_diagc_attr {
- struct attribute attr;
- size_t counter;
-};
-
-QIB_DIAGC_ATTR(rc_resends);
-QIB_DIAGC_ATTR(rc_acks);
-QIB_DIAGC_ATTR(rc_qacks);
-QIB_DIAGC_ATTR(rc_delayed_comp);
-QIB_DIAGC_ATTR(seq_naks);
-QIB_DIAGC_ATTR(rdma_seq);
-QIB_DIAGC_ATTR(rnr_naks);
-QIB_DIAGC_ATTR(other_naks);
-QIB_DIAGC_ATTR(rc_timeouts);
-QIB_DIAGC_ATTR(loop_pkts);
-QIB_DIAGC_ATTR(pkt_drops);
-QIB_DIAGC_ATTR(dmawait);
-QIB_DIAGC_ATTR(unaligned);
-QIB_DIAGC_ATTR(rc_dupreq);
-QIB_DIAGC_ATTR(rc_seqnak);
-
-static struct attribute *diagc_default_attributes[] = {
- &qib_diagc_attr_rc_resends.attr,
- &qib_diagc_attr_rc_acks.attr,
- &qib_diagc_attr_rc_qacks.attr,
- &qib_diagc_attr_rc_delayed_comp.attr,
- &qib_diagc_attr_seq_naks.attr,
- &qib_diagc_attr_rdma_seq.attr,
- &qib_diagc_attr_rnr_naks.attr,
- &qib_diagc_attr_other_naks.attr,
- &qib_diagc_attr_rc_timeouts.attr,
- &qib_diagc_attr_loop_pkts.attr,
- &qib_diagc_attr_pkt_drops.attr,
- &qib_diagc_attr_dmawait.attr,
- &qib_diagc_attr_unaligned.attr,
- &qib_diagc_attr_rc_dupreq.attr,
- &qib_diagc_attr_rc_seqnak.attr,
- NULL
-};
-
-static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, diagc_kobj);
- struct qib_ibport *qibp = &ppd->ibport_data;
-
- return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
-}
-
-static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t size)
-{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, diagc_kobj);
- struct qib_ibport *qibp = &ppd->ibport_data;
- u32 val;
- int ret;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
- *(u32 *)((char *) qibp + dattr->counter) = val;
- return size;
-}
-
-static const struct sysfs_ops qib_diagc_ops = {
- .show = diagc_attr_show,
- .store = diagc_attr_store,
-};
-
-static struct kobj_type qib_diagc_ktype = {
- .release = qib_port_release,
- .sysfs_ops = &qib_diagc_ops,
- .default_attrs = diagc_default_attributes
-};
-
-/* End diag_counters */
-
-/* end of per-port file structures and support code */
-
-/*
- * Start of per-unit (or driver, in some cases, but replicated
- * per unit) functions (these get a device *)
- */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
-
- return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int ret;
-
- if (!dd->boardname)
- ret = -EINVAL;
- else
- ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
- return ret;
-}
-
-static ssize_t show_version(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- /* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
-}
-
-static ssize_t show_boardversion(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
-}
-
-
-static ssize_t show_localbus_info(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
-}
-
-
-static ssize_t show_nctxts(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* Return the number of user ports (contexts) available. */
- /* The calculation below deals with a special case where
- * cfgctxts is set to 1 on a single-port board. */
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
- (dd->cfgctxts - dd->first_user_ctxt));
-}
-
-static ssize_t show_nfreectxts(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* Return the number of free user ports (contexts) available. */
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
-}
-
-static ssize_t show_serial(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- buf[sizeof(dd->serial)] = '\0';
- memcpy(buf, dd->serial, sizeof(dd->serial));
- strcat(buf, "\n");
- return strlen(buf);
-}
-
-static ssize_t store_chip_reset(struct device *device,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int ret;
-
- if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = qib_reset_device(dd->unit);
-bail:
- return ret < 0 ? ret : count;
-}
-
-/*
- * Dump tempsense regs. in decimal, to ease shell-scripts.
- */
-static ssize_t show_tempsense(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int ret;
- int idx;
- u8 regvals[8];
-
- ret = -ENXIO;
- for (idx = 0; idx < 8; ++idx) {
- if (idx == 6)
- continue;
- ret = dd->f_tempsense_rd(dd, idx);
- if (ret < 0)
- break;
- regvals[idx] = ret;
- }
- if (idx == 8)
- ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
- *(signed char *)(regvals),
- *(signed char *)(regvals + 1),
- regvals[2], regvals[3],
- *(signed char *)(regvals + 5),
- *(signed char *)(regvals + 7));
- return ret;
-}
-
-/*
- * end of per-unit (or driver, in some cases, but replicated
- * per unit) functions
- */
-
-/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *qib_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_version,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_localbus_info,
- &dev_attr_chip_reset,
-};
-
-int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
- struct kobject *kobj)
-{
- struct qib_pportdata *ppd;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (!port_num || port_num > dd->num_pports) {
- qib_dev_err(dd,
- "Skipping infiniband class with invalid port %u\n",
- port_num);
- ret = -ENODEV;
- goto bail;
- }
- ppd = &dd->pport[port_num - 1];
-
- ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
- "linkcontrol");
- if (ret) {
- qib_dev_err(dd,
- "Skipping linkcontrol sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail;
- }
- kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
- "sl2vl");
- if (ret) {
- qib_dev_err(dd,
- "Skipping sl2vl sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_link;
- }
- kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
- "diag_counters");
- if (ret) {
- qib_dev_err(dd,
- "Skipping diag_counters sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_sl;
- }
- kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
-
- if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
- return 0;
-
- ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
- kobj, "CCMgtA");
- if (ret) {
- qib_dev_err(dd,
- "Skipping Congestion Control sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_diagc;
- }
-
- kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
- if (ret) {
- qib_dev_err(dd,
- "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc;
- }
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
- &cc_table_bin_attr);
- if (ret) {
- qib_dev_err(dd,
- "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc_entry_bin;
- }
-
- qib_devinfo(dd->pcidev,
- "IB%u: Congestion Control Agent enabled for port %d\n",
- dd->unit, port_num);
-
- return 0;
-
-bail_cc_entry_bin:
- sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
-bail_cc:
- kobject_put(&ppd->pport_cc_kobj);
-bail_diagc:
- kobject_put(&ppd->diagc_kobj);
-bail_sl:
- kobject_put(&ppd->sl2vl_kobj);
-bail_link:
- kobject_put(&ppd->pport_kobj);
-bail:
- return ret;
-}
-
-/*
- * Register and create our files in /sys/class/infiniband.
- */
-int qib_verbs_register_sysfs(struct qib_devdata *dd)
-{
- struct ib_device *dev = &dd->verbs_dev.ibdev;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
- ret = device_create_file(&dev->dev, qib_attributes[i]);
- if (ret)
- goto bail;
- }
-
- return 0;
-bail:
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
- device_remove_file(&dev->dev, qib_attributes[i]);
- return ret;
-}
-
-/*
- * Unregister and remove our files in /sys/class/infiniband.
- */
-void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int i;
-
- for (i = 0; i < dd->num_pports; i++) {
- ppd = &dd->pport[i];
- if (qib_cc_table_size &&
- ppd->congestion_entries_shadow) {
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_table_bin_attr);
- kobject_put(&ppd->pport_cc_kobj);
- }
- kobject_put(&ppd->sl2vl_kobj);
- kobject_put(&ppd->pport_kobj);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
deleted file mode 100644
index f5698664419b..000000000000
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ /dev/null
@@ -1,501 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-
-/*
- * QLogic_IB "Two Wire Serial Interface" driver.
- * Originally written for a not-quite-i2c serial eeprom, which is
- * still used on some supported boards. Later boards have added a
- * variety of other uses, most board-specific, so the bit-boffing
- * part has been split off to this file, while the other parts
- * have been moved to chip-specific files.
- *
- * We have also dropped all pretense of fully generic (e.g. pretend
- * we don't know whether '1' is the higher voltage) interface, as
- * the restrictions of the generic i2c interface (e.g. no access from
- * driver itself) make it unsuitable for this use.
- */
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the qlogic_ib device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip. Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct qib_devdata *dd)
-{
- /*
- * implicit read of EXTStatus is as good as explicit
- * read of scratch, if all we want to do is flush
- * writes.
- */
- dd->f_gpio_mod(dd, 0, 0, 0);
- rmb(); /* inlined, so prevent compiler reordering */
-}
-
-/*
- * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
- * for "almost compliant" modules
- */
-#define SCL_WAIT_USEC 1000
-
-/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
- * Should be 20, but some chips need more.
- */
-#define TWSI_BUF_WAIT_USEC 60
-
-static void scl_out(struct qib_devdata *dd, u8 bit)
-{
- u32 mask;
-
- udelay(1);
-
- mask = 1UL << dd->gpio_scl_num;
-
- /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
-
- /*
- * Allow for slow slaves by simple
- * delay for falling edge, sampling on rise.
- */
- if (!bit)
- udelay(2);
- else {
- int rise_usec;
-
- for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
- if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
- break;
- udelay(2);
- }
- if (rise_usec <= 0)
- qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
- SCL_WAIT_USEC);
- }
- i2c_wait_for_writes(dd);
-}
-
-static void sda_out(struct qib_devdata *dd, u8 bit)
-{
- u32 mask;
-
- mask = 1UL << dd->gpio_sda_num;
-
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
-
- i2c_wait_for_writes(dd);
- udelay(2);
-}
-
-static u8 sda_in(struct qib_devdata *dd, int wait)
-{
- int bnum;
- u32 read_val, mask;
-
- bnum = dd->gpio_sda_num;
- mask = (1UL << bnum);
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, 0, mask);
- read_val = dd->f_gpio_mod(dd, 0, 0, 0);
- if (wait)
- i2c_wait_for_writes(dd);
- return (read_val & mask) >> bnum;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the qlogic_ib device
- */
-static int i2c_ackrcv(struct qib_devdata *dd)
-{
- u8 ack_received;
-
- /* AT ENTRY SCL = LOW */
- /* change direction, ignore data */
- ack_received = sda_in(dd, 1);
- scl_out(dd, 1);
- ack_received = sda_in(dd, 1) == 0;
- scl_out(dd, 0);
- return ack_received;
-}
-
-static void stop_cmd(struct qib_devdata *dd);
-
-/**
- * rd_byte - read a byte, sending STOP on last, else ACK
- * @dd: the qlogic_ib device
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct qib_devdata *dd, int last)
-{
- int bit_cntr, data;
-
- data = 0;
-
- for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
- data <<= 1;
- scl_out(dd, 1);
- data |= sda_in(dd, 0);
- scl_out(dd, 0);
- }
- if (last) {
- scl_out(dd, 1);
- stop_cmd(dd);
- } else {
- sda_out(dd, 0);
- scl_out(dd, 1);
- scl_out(dd, 0);
- sda_out(dd, 1);
- }
- return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the qlogic_ib device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct qib_devdata *dd, u8 data)
-{
- int bit_cntr;
- u8 bit;
-
- for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
- bit = (data >> bit_cntr) & 1;
- sda_out(dd, bit);
- scl_out(dd, 1);
- scl_out(dd, 0);
- }
- return (!i2c_ackrcv(dd)) ? 1 : 0;
-}
-
-/*
- * issue TWSI start sequence:
- * (both clock/data high, clock high, data low while clock is high)
- */
-static void start_seq(struct qib_devdata *dd)
-{
- sda_out(dd, 1);
- scl_out(dd, 1);
- sda_out(dd, 0);
- udelay(1);
- scl_out(dd, 0);
-}
-
-/**
- * stop_seq - transmit the stop sequence
- * @dd: the qlogic_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_seq(struct qib_devdata *dd)
-{
- scl_out(dd, 0);
- sda_out(dd, 0);
- scl_out(dd, 1);
- sda_out(dd, 1);
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the qlogic_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct qib_devdata *dd)
-{
- stop_seq(dd);
- udelay(TWSI_BUF_WAIT_USEC);
-}
-
-/**
- * qib_twsi_reset - reset I2C communication
- * @dd: the qlogic_ib device
- */
-
-int qib_twsi_reset(struct qib_devdata *dd)
-{
- int clock_cycles_left = 9;
- int was_high = 0;
- u32 pins, mask;
-
- /* Both SCL and SDA should be high. If not, there
- * is something wrong.
- */
- mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
-
- /*
- * Force pins to desired innocuous state.
- * This is the default power-on state with out=0 and dir=0,
- * So tri-stated and should be floating high (barring HW problems)
- */
- dd->f_gpio_mod(dd, 0, 0, mask);
-
- /*
- * Clock nine times to get all listeners into a sane state.
- * If SDA does not go high at any point, we are wedged.
- * One vendor recommends then issuing START followed by STOP.
- * we cannot use our "normal" functions to do that, because
- * if SCL drops between them, another vendor's part will
- * wedge, dropping SDA and keeping it low forever, at the end of
- * the next transaction (even if it was not the device addressed).
- * So our START and STOP take place with SCL held high.
- */
- while (clock_cycles_left--) {
- scl_out(dd, 0);
- scl_out(dd, 1);
- /* Note if SDA is high, but keep clocking to sync slave */
- was_high |= sda_in(dd, 0);
- }
-
- if (was_high) {
- /*
- * We saw a high, which we hope means the slave is sync'd.
- * Issue START, STOP, pause for T_BUF.
- */
-
- pins = dd->f_gpio_mod(dd, 0, 0, 0);
- if ((pins & mask) != mask)
- qib_dev_err(dd, "GPIO pins not at rest: %d\n",
- pins & mask);
- /* Drop SDA to issue START */
- udelay(1); /* Guarantee .6 uSec setup */
- sda_out(dd, 0);
- udelay(1); /* Guarantee .6 uSec hold */
- /* At this point, SCL is high, SDA low. Raise SDA for STOP */
- sda_out(dd, 1);
- udelay(TWSI_BUF_WAIT_USEC);
- }
-
- return !was_high;
-}
-
-#define QIB_TWSI_START 0x100
-#define QIB_TWSI_STOP 0x200
-
-/* Write byte to TWSI, optionally prefixed with START or suffixed with
- * STOP.
- * returns 0 if OK (ACK received), else != 0
- */
-static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
-{
- int ret = 1;
-
- if (flags & QIB_TWSI_START)
- start_seq(dd);
-
- ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
-
- if (flags & QIB_TWSI_STOP)
- stop_cmd(dd);
- return ret;
-}
-
-/* Added functionality for IBA7220-based cards */
-#define QIB_TEMP_DEV 0x98
-
-/*
- * qib_twsi_blk_rd
- * Formerly called qib_eeprom_internal_read, and only used for eeprom,
- * but now the general interface for data transfer from twsi devices.
- * One vestige of its former role is that it recognizes a device
- * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
- * the "register" or "offset" within the device from which data should
- * be read.
- */
-int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
- void *buffer, int len)
-{
- int ret;
- u8 *bp = buffer;
-
- ret = 1;
-
- if (dev == QIB_TWSI_NO_DEV) {
- /* legacy not-really-I2C */
- addr = (addr << 1) | READ_CMD;
- ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
- } else {
- /* Actual I2C */
- ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
- if (ret) {
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
- /*
- * SFF spec claims we do _not_ stop after the addr
- * but simply issue a start with the "read" dev-addr.
- * Since we are implicitely waiting for ACK here,
- * we need t_buf (nominally 20uSec) before that start,
- * and cannot rely on the delay built in to the STOP
- */
- ret = qib_twsi_wr(dd, addr, 0);
- udelay(TWSI_BUF_WAIT_USEC);
-
- if (ret) {
- qib_dev_err(dd,
- "Failed to write interface read addr %02X\n",
- addr);
- ret = 1;
- goto bail;
- }
- ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
- }
- if (ret) {
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
-
- /*
- * block devices keeps clocking data out as long as we ack,
- * automatically incrementing the address. Some have "pages"
- * whose boundaries will not be crossed, but the handling
- * of these is left to the caller, who is in a better
- * position to know.
- */
- while (len-- > 0) {
- /*
- * Get and store data, sending ACK if length remaining,
- * else STOP
- */
- *bp++ = rd_byte(dd, !len);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * qib_twsi_blk_wr
- * Formerly called qib_eeprom_internal_write, and only used for eeprom,
- * but now the general interface for data transfer to twsi devices.
- * One vestige of its former role is that it recognizes a device
- * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
- * the "register" or "offset" within the device to which data should
- * be written.
- */
-int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
- const void *buffer, int len)
-{
- int sub_len;
- const u8 *bp = buffer;
- int max_wait_time, i;
- int ret = 1;
-
- while (len > 0) {
- if (dev == QIB_TWSI_NO_DEV) {
- if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
- QIB_TWSI_START)) {
- goto failed_write;
- }
- } else {
- /* Real I2C */
- if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
- goto failed_write;
- ret = qib_twsi_wr(dd, addr, 0);
- if (ret) {
- qib_dev_err(dd,
- "Failed to write interface write addr %02X\n",
- addr);
- goto failed_write;
- }
- }
-
- sub_len = min(len, 4);
- addr += sub_len;
- len -= sub_len;
-
- for (i = 0; i < sub_len; i++)
- if (qib_twsi_wr(dd, *bp++, 0))
- goto failed_write;
-
- stop_cmd(dd);
-
- /*
- * Wait for write complete by waiting for a successful
- * read (the chip replies with a zero after the write
- * cmd completes, and before it writes to the eeprom.
- * The startcmd for the read will fail the ack until
- * the writes have completed. We do this inline to avoid
- * the debug prints that are in the real read routine
- * if the startcmd fails.
- * We also use the proper device address, so it doesn't matter
- * whether we have real eeprom_dev. Legacy likes any address.
- */
- max_wait_time = 100;
- while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
- stop_cmd(dd);
- if (!--max_wait_time)
- goto failed_write;
- }
- /* now read (and ignore) the resulting byte */
- rd_byte(dd, 1);
- }
-
- ret = 0;
- goto bail;
-
-failed_write:
- stop_cmd(dd);
- ret = 1;
-
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
deleted file mode 100644
index eface3b3dacf..000000000000
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/moduleparam.h>
-
-#include "qib.h"
-
-static unsigned qib_hol_timeout_ms = 3000;
-module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
-MODULE_PARM_DESC(hol_timeout_ms,
- "duration of user app suspension after link failure");
-
-unsigned qib_sdma_fetch_arb = 1;
-module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
-MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
-
-/**
- * qib_disarm_piobufs - cancel a range of PIO buffers
- * @dd: the qlogic_ib device
- * @first: the first PIO buffer to cancel
- * @cnt: the number of PIO buffers to cancel
- *
- * Cancel a range of PIO buffers. Used at user process close,
- * in case it died while writing to a PIO buffer.
- */
-void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
-{
- unsigned long flags;
- unsigned i;
- unsigned last;
-
- last = first + cnt;
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (i = first; i < last; i++) {
- __clear_bit(i, dd->pio_need_disarm);
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/*
- * This is called by a user process when it sees the DISARM_BUFS event
- * bit is set.
- */
-int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned i;
- unsigned last;
- unsigned n = 0;
-
- last = rcd->pio_base + rcd->piocnt;
- /*
- * Don't need uctxt_lock here, since user has called in to us.
- * Clear at start in case more interrupts set bits while we
- * are disarming
- */
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- spin_lock_irq(&dd->pioavail_lock);
- for (i = rcd->pio_base; i < last; i++) {
- if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
- n++;
- dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
- }
- }
- spin_unlock_irq(&dd->pioavail_lock);
- return 0;
-}
-
-static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
-{
- struct qib_pportdata *ppd;
- unsigned pidx;
-
- for (pidx = 0; pidx < dd->num_pports; pidx++) {
- ppd = dd->pport + pidx;
- if (i >= ppd->sdma_state.first_sendbuf &&
- i < ppd->sdma_state.last_sendbuf)
- return ppd;
- }
- return NULL;
-}
-
-/*
- * Return true if send buffer is being used by a user context.
- * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
- */
-static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
-{
- struct qib_ctxtdata *rcd;
- unsigned ctxt;
- int ret = 0;
-
- spin_lock(&dd->uctxt_lock);
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- rcd = dd->rcd[ctxt];
- if (!rcd || bufn < rcd->pio_base ||
- bufn >= rcd->pio_base + rcd->piocnt)
- continue;
- if (rcd->user_event_mask) {
- int i;
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- ret = 1;
- break;
- }
- spin_unlock(&dd->uctxt_lock);
-
- return ret;
-}
-
-/*
- * Disarm a set of send buffers. If the buffer might be actively being
- * written to, mark the buffer to be disarmed later when it is not being
- * written to.
- *
- * This should only be called from the IRQ error handler.
- */
-void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
- unsigned cnt)
-{
- struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
- unsigned i;
- unsigned long flags;
-
- for (i = 0; i < dd->num_pports; i++)
- pppd[i] = NULL;
-
- for (i = 0; i < cnt; i++) {
- int which;
-
- if (!test_bit(i, mask))
- continue;
- /*
- * If the buffer is owned by the DMA hardware,
- * reset the DMA engine.
- */
- ppd = is_sdma_buf(dd, i);
- if (ppd) {
- pppd[ppd->port] = ppd;
- continue;
- }
- /*
- * If the kernel is writing the buffer or the buffer is
- * owned by a user process, we can't clear it yet.
- */
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- if (test_bit(i, dd->pio_writing) ||
- (!test_bit(i << 1, dd->pioavailkernel) &&
- find_ctxt(dd, i))) {
- __set_bit(i, dd->pio_need_disarm);
- which = 0;
- } else {
- which = 1;
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
- }
-
- /* do cancel_sends once per port that had sdma piobufs in error */
- for (i = 0; i < dd->num_pports; i++)
- if (pppd[i])
- qib_cancel_sends(pppd[i]);
-}
-
-/**
- * update_send_bufs - update shadow copy of the PIO availability map
- * @dd: the qlogic_ib device
- *
- * called whenever our local copy indicates we have run out of send buffers
- */
-static void update_send_bufs(struct qib_devdata *dd)
-{
- unsigned long flags;
- unsigned i;
- const unsigned piobregs = dd->pioavregs;
-
- /*
- * If the generation (check) bits have changed, then we update the
- * busy bit for the corresponding PIO buffer. This algorithm will
- * modify positions to the value they already have in some cases
- * (i.e., no change), but it's faster than changing only the bits
- * that have changed.
- *
- * We would like to do this atomicly, to avoid spinlocks in the
- * critical send path, but that's not really possible, given the
- * type of changes, and that this routine could be called on
- * multiple cpu's simultaneously, so we lock in this routine only,
- * to avoid conflicting updates; all we change is the shadow, and
- * it's a single 64 bit memory location, so by definition the update
- * is atomic in terms of what other cpu's can see in testing the
- * bits. The spin_lock overhead isn't too bad, since it only
- * happens when all buffers are in use, so only cpu overhead, not
- * latency or bandwidth is affected.
- */
- if (!dd->pioavailregs_dma)
- return;
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (i = 0; i < piobregs; i++) {
- u64 pchbusy, pchg, piov, pnew;
-
- piov = le64_to_cpu(dd->pioavailregs_dma[i]);
- pchg = dd->pioavailkernel[i] &
- ~(dd->pioavailshadow[i] ^ piov);
- pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
- if (pchg && (pchbusy & dd->pioavailshadow[i])) {
- pnew = dd->pioavailshadow[i] & ~pchbusy;
- pnew |= piov & pchbusy;
- dd->pioavailshadow[i] = pnew;
- }
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/*
- * Debugging code and stats updates if no pio buffers available.
- */
-static noinline void no_send_bufs(struct qib_devdata *dd)
-{
- dd->upd_pio_shadow = 1;
-
- /* not atomic, but if we lose a stat count in a while, that's OK */
- qib_stats.sps_nopiobufs++;
-}
-
-/*
- * Common code for normal driver send buffer allocation, and reserved
- * allocation.
- *
- * Do appropriate marking as busy, etc.
- * Returns buffer pointer if one is found, otherwise NULL.
- */
-u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
- u32 first, u32 last)
-{
- unsigned i, j, updated = 0;
- unsigned nbufs;
- unsigned long flags;
- unsigned long *shadow = dd->pioavailshadow;
- u32 __iomem *buf;
-
- if (!(dd->flags & QIB_PRESENT))
- return NULL;
-
- nbufs = last - first + 1; /* number in range to check */
- if (dd->upd_pio_shadow) {
-update_shadow:
- /*
- * Minor optimization. If we had no buffers on last call,
- * start out by doing the update; continue and do scan even
- * if no buffers were updated, to be paranoid.
- */
- update_send_bufs(dd);
- updated++;
- }
- i = first;
- /*
- * While test_and_set_bit() is atomic, we do that and then the
- * change_bit(), and the pair is not. See if this is the cause
- * of the remaining armlaunch errors.
- */
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- if (dd->last_pio >= first && dd->last_pio <= last)
- i = dd->last_pio + 1;
- if (!first)
- /* adjust to min possible */
- nbufs = last - dd->min_kernel_pio + 1;
- for (j = 0; j < nbufs; j++, i++) {
- if (i > last)
- i = !first ? dd->min_kernel_pio : first;
- if (__test_and_set_bit((2 * i) + 1, shadow))
- continue;
- /* flip generation bit */
- __change_bit(2 * i, shadow);
- /* remember that the buffer can be written to now */
- __set_bit(i, dd->pio_writing);
- if (!first && first != last) /* first == last on VL15, avoid */
- dd->last_pio = i;
- break;
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-
- if (j == nbufs) {
- if (!updated)
- /*
- * First time through; shadow exhausted, but may be
- * buffers available, try an update and then rescan.
- */
- goto update_shadow;
- no_send_bufs(dd);
- buf = NULL;
- } else {
- if (i < dd->piobcnt2k)
- buf = (u32 __iomem *)(dd->pio2kbase +
- i * dd->palign);
- else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
- buf = (u32 __iomem *)(dd->pio4kbase +
- (i - dd->piobcnt2k) * dd->align4k);
- else
- buf = (u32 __iomem *)(dd->piovl15base +
- (i - (dd->piobcnt2k + dd->piobcnt4k)) *
- dd->align4k);
- if (pbufnum)
- *pbufnum = i;
- dd->upd_pio_shadow = 0;
- }
-
- return buf;
-}
-
-/*
- * Record that the caller is finished writing to the buffer so we don't
- * disarm it while it is being written and disarm it now if needed.
- */
-void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- __clear_bit(n, dd->pio_writing);
- if (__test_and_clear_bit(n, dd->pio_need_disarm))
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/**
- * qib_chg_pioavailkernel - change which send buffers are available for kernel
- * @dd: the qlogic_ib device
- * @start: the starting send buffer number
- * @len: the number of send buffers
- * @avail: true if the buffers are available for kernel use, false otherwise
- */
-void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
- unsigned len, u32 avail, struct qib_ctxtdata *rcd)
-{
- unsigned long flags;
- unsigned end;
- unsigned ostart = start;
-
- /* There are two bits per send buffer (busy and generation) */
- start *= 2;
- end = start + len * 2;
-
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- /* Set or clear the busy bit in the shadow. */
- while (start < end) {
- if (avail) {
- unsigned long dma;
- int i;
-
- /*
- * The BUSY bit will never be set, because we disarm
- * the user buffers before we hand them back to the
- * kernel. We do have to make sure the generation
- * bit is set correctly in shadow, since it could
- * have changed many times while allocated to user.
- * We can't use the bitmap functions on the full
- * dma array because it is always little-endian, so
- * we have to flip to host-order first.
- * BITS_PER_LONG is slightly wrong, since it's
- * always 64 bits per register in chip...
- * We only work on 64 bit kernels, so that's OK.
- */
- i = start / BITS_PER_LONG;
- __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
- dd->pioavailshadow);
- dma = (unsigned long)
- le64_to_cpu(dd->pioavailregs_dma[i]);
- if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
- start) % BITS_PER_LONG, &dma))
- __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
- start, dd->pioavailshadow);
- else
- __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
- + start, dd->pioavailshadow);
- __set_bit(start, dd->pioavailkernel);
- if ((start >> 1) < dd->min_kernel_pio)
- dd->min_kernel_pio = start >> 1;
- } else {
- __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
- dd->pioavailshadow);
- __clear_bit(start, dd->pioavailkernel);
- if ((start >> 1) > dd->min_kernel_pio)
- dd->min_kernel_pio = start >> 1;
- }
- start += 2;
- }
-
- if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
- dd->last_pio = dd->min_kernel_pio - 1;
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-
- dd->f_txchk_change(dd, ostart, len, avail, rcd);
-}
-
-/*
- * Flush all sends that might be in the ready to send state, as well as any
- * that are in the process of being sent. Used whenever we need to be
- * sure the send side is idle. Cleans up all buffer state by canceling
- * all pio buffers, and issuing an abort, which cleans up anything in the
- * launch fifo. The cancel is superfluous on some chip versions, but
- * it's safer to always do it.
- * PIOAvail bits are updated by the chip as if a normal send had happened.
- */
-void qib_cancel_sends(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- unsigned long flags;
- unsigned ctxt;
- unsigned i;
- unsigned last;
-
- /*
- * Tell PSM to disarm buffers again before trying to reuse them.
- * We need to be sure the rcd doesn't change out from under us
- * while we do so. We hold the two locks sequentially. We might
- * needlessly set some need_disarm bits as a result, if the
- * context is closed after we release the uctxt_lock, but that's
- * fairly benign, and safer than nesting the locks.
- */
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- rcd = dd->rcd[ctxt];
- if (rcd && rcd->ppd == ppd) {
- last = rcd->pio_base + rcd->piocnt;
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt,
- * if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- i = rcd->pio_base;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (; i < last; i++)
- __set_bit(i, dd->pio_need_disarm);
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
- } else
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- }
-
- if (!(dd->flags & QIB_HAS_SEND_DMA))
- dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
- QIB_SENDCTRL_FLUSH);
-}
-
-/*
- * Force an update of in-memory copy of the pioavail registers, when
- * needed for any of a variety of reasons.
- * If already off, this routine is a nop, on the assumption that the
- * caller (or set of callers) will "do the right thing".
- * This is a per-device operation, so just the first port.
- */
-void qib_force_pio_avail_update(struct qib_devdata *dd)
-{
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
-}
-
-void qib_hol_down(struct qib_pportdata *ppd)
-{
- /*
- * Cancel sends when the link goes DOWN so that we aren't doing it
- * at INIT when we might be trying to send SMI packets.
- */
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- qib_cancel_sends(ppd);
-}
-
-/*
- * Link is at INIT.
- * We start the HoL timer so we can detect stuck packets blocking SMP replies.
- * Timer may already be running, so use mod_timer, not add_timer.
- */
-void qib_hol_init(struct qib_pportdata *ppd)
-{
- if (ppd->hol_state != QIB_HOL_INIT) {
- ppd->hol_state = QIB_HOL_INIT;
- mod_timer(&ppd->hol_timer,
- jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
- }
-}
-
-/*
- * Link is up, continue any user processes, and ensure timer
- * is a nop, if running. Let timer keep running, if set; it
- * will nop when it sees the link is up.
- */
-void qib_hol_up(struct qib_pportdata *ppd)
-{
- ppd->hol_state = QIB_HOL_UP;
-}
-
-/*
- * This is only called via the timer.
- */
-void qib_hol_event(unsigned long opaque)
-{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
-
- /* If hardware error, etc, skip. */
- if (!(ppd->dd->flags & QIB_INITTED))
- return;
-
- if (ppd->hol_state != QIB_HOL_UP) {
- /*
- * Try to flush sends in case a stuck packet is blocking
- * SMP replies.
- */
- qib_hol_down(ppd);
- mod_timer(&ppd->hol_timer,
- jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
deleted file mode 100644
index 26243b722b5e..000000000000
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-/**
- * qib_ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from qib_make_ud_req() to forward a WQE addressed
- * to the same HCA.
- * Note that the receive interrupt handler may be calling qib_ud_rcv()
- * while this is being called.
- */
-static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
-{
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd;
- struct qib_qp *qp;
- struct ib_ah_attr *ah_attr;
- unsigned long flags;
- struct qib_sge_state ssge;
- struct qib_sge *sge;
- struct ib_wc wc;
- u32 length;
- enum ib_qp_type sqptype, dqptype;
-
- qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
- if (!qp) {
- ibp->n_pkt_drops++;
- return;
- }
-
- sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : sqp->ibqp.qp_type;
- dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : qp->ibqp.qp_type;
-
- if (dqptype != sqptype ||
- !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
- goto drop;
- }
-
- ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
- ppd = ppd_from_ibp(ibp);
-
- if (qp->ibqp.qp_num > 1) {
- u16 pkey1;
- u16 pkey2;
- u16 lid;
-
- pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
- pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
- if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- lid = ppd->lid | (ah_attr->src_path_bits &
- ((1 << ppd->lmc) - 1));
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
- ah_attr->sl,
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(lid),
- cpu_to_be16(ah_attr->dlid));
- goto drop;
- }
- }
-
- /*
- * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- if (qp->ibqp.qp_num) {
- u32 qkey;
-
- qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
- sqp->qkey : swqe->wr.wr.ud.remote_qkey;
- if (unlikely(qkey != qp->qkey)) {
- u16 lid;
-
- lid = ppd->lid | (ah_attr->src_path_bits &
- ((1 << ppd->lmc) - 1));
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
- ah_attr->sl,
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(lid),
- cpu_to_be16(ah_attr->dlid));
- goto drop;
- }
- }
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- length = swqe->length;
- memset(&wc, 0, sizeof(wc));
- wc.byte_len = length + sizeof(struct ib_grh);
-
- if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = swqe->wr.ex.imm_data;
- }
-
- spin_lock_irqsave(&qp->r_lock, flags);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & QIB_R_REUSE_SGE)
- qp->r_flags &= ~QIB_R_REUSE_SGE;
- else {
- int ret;
-
- ret = qib_get_rwqe(qp, 0);
- if (ret < 0) {
- qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto bail_unlock;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
- goto bail_unlock;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= QIB_R_REUSE_SGE;
- ibp->n_pkt_drops++;
- goto bail_unlock;
- }
-
- if (ah_attr->ah_flags & IB_AH_GRH) {
- qib_copy_sge(&qp->r_sge, &ah_attr->grh,
- sizeof(struct ib_grh), 1);
- wc.wc_flags |= IB_WC_GRH;
- } else
- qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
- ssge.sg_list = swqe->sg_list + 1;
- ssge.sge = *swqe->sg_list;
- ssge.num_sge = swqe->wr.num_sge;
- sge = &ssge.sge;
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ssge.num_sge)
- *sge = *ssge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
- qib_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
- goto bail_unlock;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = sqp->ibqp.qp_num;
- wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
- swqe->wr.wr.ud.pkey_index : 0;
- wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
- wc.sl = ah_attr->sl;
- wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- swqe->wr.send_flags & IB_SEND_SOLICITED);
- ibp->n_loop_pkts++;
-bail_unlock:
- spin_unlock_irqrestore(&qp->r_lock, flags);
-drop:
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-/**
- * qib_make_ud_req - construct a UD request packet
- * @qp: the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_ud_req(struct qib_qp *qp)
-{
- struct qib_other_headers *ohdr;
- struct ib_ah_attr *ah_attr;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- struct qib_swqe *wqe;
- unsigned long flags;
- u32 nwords;
- u32 extra_bytes;
- u32 bth0;
- u16 lrh0;
- u16 lid;
- int ret = 0;
- int next_cur;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
- goto bail;
- }
- wqe = get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- if (qp->s_cur == qp->s_head)
- goto bail;
-
- wqe = get_swqe_ptr(qp, qp->s_cur);
- next_cur = qp->s_cur + 1;
- if (next_cur >= qp->s_size)
- next_cur = 0;
-
- /* Construct the header. */
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ppd = ppd_from_ibp(ibp);
- ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
- if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
- if (ah_attr->dlid != QIB_PERMISSIVE_LID)
- this_cpu_inc(ibp->pmastats->n_multicast_xmit);
- else
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- } else {
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
- if (unlikely(lid == ppd->lid)) {
- /*
- * If DMAs are in progress, we can't generate
- * a completion for the loopback packet since
- * it would be out of order.
- * XXX Instead of waiting, we could queue a
- * zero length descriptor so we get a callback.
- */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
- goto bail;
- }
- qp->s_cur = next_cur;
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qib_ud_loopback(qp, wqe);
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_send_complete(qp, wqe, IB_WC_SUCCESS);
- goto done;
- }
- }
-
- qp->s_cur = next_cur;
- extra_bytes = -wqe->length & 3;
- nwords = (wqe->length + extra_bytes) >> 2;
-
- /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
- qp->s_hdrwords = 7;
- qp->s_cur_size = wqe->length;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_srate = ah_attr->static_rate;
- qp->s_wqe = wqe;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
-
- if (ah_attr->ah_flags & IB_AH_GRH) {
- /* Header size in 32-bit words. */
- qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
- &ah_attr->grh,
- qp->s_hdrwords, nwords);
- lrh0 = QIB_LRH_GRH;
- ohdr = &qp->s_hdr->u.l.oth;
- /*
- * Don't worry about sending to locally attached multicast
- * QPs. It is unspecified by the spec. what happens.
- */
- } else {
- /* Header size in 32-bit words. */
- lrh0 = QIB_LRH_BTH;
- ohdr = &qp->s_hdr->u.oth;
- }
- if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- qp->s_hdrwords++;
- ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
- bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- } else
- bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
- lrh0 |= ah_attr->sl << 4;
- if (qp->ibqp.qp_type == IB_QPT_SMI)
- lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
- else
- lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
- qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
- qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- lid = ppd->lid;
- if (lid) {
- lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
- qp->s_hdr->lrh[3] = cpu_to_be16(lid);
- } else
- qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth0 |= extra_bytes << 20;
- bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
- qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
- wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
- ohdr->bth[0] = cpu_to_be32(bth0);
- /*
- * Use the multicast QP if the destination LID is a multicast LID.
- */
- ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
- ah_attr->dlid != QIB_PERMISSIVE_LID ?
- cpu_to_be32(QIB_MULTICAST_QPN) :
- cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
- /*
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
- qp->qkey : wqe->wr.wr.ud.remote_qkey);
- ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-
-done:
- ret = 1;
- goto unlock;
-
-bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
-{
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- unsigned ctxt = ppd->hw_pidx;
- unsigned i;
-
- pkey &= 0x7fff; /* remove limited/full membership bit */
-
- for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
- if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
- return i;
-
- /*
- * Should not get here, this means hardware failed to validate pkeys.
- * Punt and return index 0.
- */
- return 0;
-}
-
-/**
- * qib_ud_rcv - receive an incoming UD packet
- * @ibp: the port the packet came in on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from qib_qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
-{
- struct qib_other_headers *ohdr;
- int opcode;
- u32 hdrsize;
- u32 pad;
- struct ib_wc wc;
- u32 qkey;
- u32 src_qp;
- u16 dlid;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
- }
- qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
-
- /*
- * Get the number of bytes the message was padded by
- * and drop incomplete packets.
- */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
-
- tlen -= hdrsize + pad + 4;
-
- /*
- * Check that the permissive LID is only used on QP0
- * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
- */
- if (qp->ibqp.qp_num) {
- if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE))
- goto drop;
- if (qp->ibqp.qp_num > 1) {
- u16 pkey1, pkey2;
-
- pkey1 = be32_to_cpu(ohdr->bth[0]);
- pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
- if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
- pkey1,
- (be16_to_cpu(hdr->lrh[0]) >> 4) &
- 0xF,
- src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- return;
- }
- }
- if (unlikely(qkey != qp->qkey)) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- return;
- }
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (unlikely(qp->ibqp.qp_num == 1 &&
- (tlen != 256 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
- goto drop;
- } else {
- struct ib_smp *smp;
-
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
- goto drop;
- smp = (struct ib_smp *) data;
- if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE) &&
- smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- goto drop;
- }
-
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (qp->ibqp.qp_num > 1 &&
- opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
- wc.ex.imm_data = ohdr->u.ud.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
- } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
- } else
- goto drop;
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- wc.byte_len = tlen + sizeof(struct ib_grh);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & QIB_R_REUSE_SGE)
- qp->r_flags &= ~QIB_R_REUSE_SGE;
- else {
- int ret;
-
- ret = qib_get_rwqe(qp, 0);
- if (ret < 0) {
- qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
- return;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= QIB_R_REUSE_SGE;
- goto drop;
- }
- if (has_grh) {
- qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), 1);
- wc.wc_flags |= IB_WC_GRH;
- } else
- qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
- qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
- qib_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
- return;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
- wc.qp = &qp->ibqp;
- wc.src_qp = src_qp;
- wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
- qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
- wc.slid = be16_to_cpu(hdr->lrh[3]);
- wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
- dlid = be16_to_cpu(hdr->lrh[1]);
- /*
- * Save the LMC lower bits if the destination LID is a unicast LID.
- */
- wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
- dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(IB_BTH_SOLICITED)) != 0);
- return;
-
-drop:
- ibp->n_pkt_drops++;
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
deleted file mode 100644
index 74f90b2619f6..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/device.h>
-
-#include "qib.h"
-
-static void __qib_release_user_pages(struct page **p, size_t num_pages,
- int dirty)
-{
- size_t i;
-
- for (i = 0; i < num_pages; i++) {
- if (dirty)
- set_page_dirty_lock(p[i]);
- put_page(p[i]);
- }
-}
-
-/*
- * Call with current->mm->mmap_sem held.
- */
-static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- unsigned long lock_limit;
- size_t got;
- int ret;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
- ret = -ENOMEM;
- goto bail;
- }
-
- for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(current, current->mm,
- start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
- p + got, NULL);
- if (ret < 0)
- goto bail_release;
- }
-
- current->mm->pinned_vm += num_pages;
-
- ret = 0;
- goto bail;
-
-bail_release:
- __qib_release_user_pages(p, got, 0);
-bail:
- return ret;
-}
-
-/**
- * qib_map_page - a safety wrapper around pci_map_page()
- *
- * A dma_addr of all 0's is interpreted by the chip as "disabled".
- * Unfortunately, it can also be a valid dma_addr returned on some
- * architectures.
- *
- * The powerpc iommu assigns dma_addrs in ascending order, so we don't
- * have to bother with retries or mapping a dummy page to insure we
- * don't just get the same mapping again.
- *
- * I'm sure we won't be so lucky with other iommu's, so FIXME.
- */
-dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- dma_addr_t phys;
-
- phys = pci_map_page(hwdev, page, offset, size, direction);
-
- if (phys == 0) {
- pci_unmap_page(hwdev, phys, size, direction);
- phys = pci_map_page(hwdev, page, offset, size, direction);
- /*
- * FIXME: If we get 0 again, we should keep this page,
- * map another, then free the 0 page.
- */
- }
-
- return phys;
-}
-
-/**
- * qib_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages. For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int qib_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- int ret;
-
- down_write(&current->mm->mmap_sem);
-
- ret = __qib_get_user_pages(start_page, num_pages, p);
-
- up_write(&current->mm->mmap_sem);
-
- return ret;
-}
-
-void qib_release_user_pages(struct page **p, size_t num_pages)
-{
- if (current->mm) /* during close after signal, mm can be NULL */
- down_write(&current->mm->mmap_sem);
-
- __qib_release_user_pages(p, num_pages, 1);
-
- if (current->mm) {
- current->mm->pinned_vm -= num_pages;
- up_write(&current->mm->mmap_sem);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
deleted file mode 100644
index 3e0677c51276..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ /dev/null
@@ -1,1465 +0,0 @@
-/*
- * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-
-#include "qib.h"
-#include "qib_user_sdma.h"
-
-/* minimum size of header */
-#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
-/* expected size of headers (for dma_pool) */
-#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
-/* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
-
-/*
- * track how many times a process open this driver.
- */
-static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
-
-struct qib_user_sdma_rb_node {
- struct rb_node node;
- int refcount;
- pid_t pid;
-};
-
-struct qib_user_sdma_pkt {
- struct list_head list; /* list element */
-
- u8 tiddma; /* if this is NEW tid-sdma */
- u8 largepkt; /* this is large pkt from kmalloc */
- u16 frag_size; /* frag size used by PSM */
- u16 index; /* last header index or push index */
- u16 naddr; /* dimension of addr (1..3) ... */
- u16 addrlimit; /* addr array size */
- u16 tidsmidx; /* current tidsm index */
- u16 tidsmcount; /* tidsm array item count */
- u16 payload_size; /* payload size so far for header */
- u32 bytes_togo; /* bytes for processing */
- u32 counter; /* sdma pkts queued counter for this entry */
- struct qib_tid_session_member *tidsm; /* tid session member array */
- struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */
- u64 added; /* global descq number of entries */
-
- struct {
- u16 offset; /* offset for kvaddr, addr */
- u16 length; /* length in page */
- u16 first_desc; /* first desc */
- u16 last_desc; /* last desc */
- u16 put_page; /* should we put_page? */
- u16 dma_mapped; /* is page dma_mapped? */
- u16 dma_length; /* for dma_unmap_page() */
- u16 padding;
- struct page *page; /* may be NULL (coherent mem) */
- void *kvaddr; /* FIXME: only for pio hack */
- dma_addr_t addr;
- } addr[4]; /* max pages, any more and we coalesce */
-};
-
-struct qib_user_sdma_queue {
- /*
- * pkts sent to dma engine are queued on this
- * list head. the type of the elements of this
- * list are struct qib_user_sdma_pkt...
- */
- struct list_head sent;
-
- /*
- * Because above list will be accessed by both process and
- * signal handler, we need a spinlock for it.
- */
- spinlock_t sent_lock ____cacheline_aligned_in_smp;
-
- /* headers with expected length are allocated from here... */
- char header_cache_name[64];
- struct dma_pool *header_cache;
-
- /* packets are allocated from the slab cache... */
- char pkt_slab_name[64];
- struct kmem_cache *pkt_slab;
-
- /* as packets go on the queued queue, they are counted... */
- u32 counter;
- u32 sent_counter;
- /* pending packets, not sending yet */
- u32 num_pending;
- /* sending packets, not complete yet */
- u32 num_sending;
- /* global descq number of entry of last sending packet */
- u64 added;
-
- /* dma page table */
- struct rb_root dma_pages_root;
-
- struct qib_user_sdma_rb_node *sdma_rb_node;
-
- /* protect everything above... */
- struct mutex lock;
-};
-
-static struct qib_user_sdma_rb_node *
-qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
-{
- struct qib_user_sdma_rb_node *sdma_rb_node;
- struct rb_node *node = root->rb_node;
-
- while (node) {
- sdma_rb_node = container_of(node,
- struct qib_user_sdma_rb_node, node);
- if (pid < sdma_rb_node->pid)
- node = node->rb_left;
- else if (pid > sdma_rb_node->pid)
- node = node->rb_right;
- else
- return sdma_rb_node;
- }
- return NULL;
-}
-
-static int
-qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
-{
- struct rb_node **node = &(root->rb_node);
- struct rb_node *parent = NULL;
- struct qib_user_sdma_rb_node *got;
-
- while (*node) {
- got = container_of(*node, struct qib_user_sdma_rb_node, node);
- parent = *node;
- if (new->pid < got->pid)
- node = &((*node)->rb_left);
- else if (new->pid > got->pid)
- node = &((*node)->rb_right);
- else
- return 0;
- }
-
- rb_link_node(&new->node, parent, node);
- rb_insert_color(&new->node, root);
- return 1;
-}
-
-struct qib_user_sdma_queue *
-qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
-{
- struct qib_user_sdma_queue *pq =
- kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
- struct qib_user_sdma_rb_node *sdma_rb_node;
-
- if (!pq)
- goto done;
-
- pq->counter = 0;
- pq->sent_counter = 0;
- pq->num_pending = 0;
- pq->num_sending = 0;
- pq->added = 0;
- pq->sdma_rb_node = NULL;
-
- INIT_LIST_HEAD(&pq->sent);
- spin_lock_init(&pq->sent_lock);
- mutex_init(&pq->lock);
-
- snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
- "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
- pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
- sizeof(struct qib_user_sdma_pkt),
- 0, 0, NULL);
-
- if (!pq->pkt_slab)
- goto err_kfree;
-
- snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
- "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
- pq->header_cache = dma_pool_create(pq->header_cache_name,
- dev,
- QIB_USER_SDMA_EXP_HEADER_LENGTH,
- 4, 0);
- if (!pq->header_cache)
- goto err_slab;
-
- pq->dma_pages_root = RB_ROOT;
-
- sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
- current->pid);
- if (sdma_rb_node) {
- sdma_rb_node->refcount++;
- } else {
- int ret;
-
- sdma_rb_node = kmalloc(sizeof(
- struct qib_user_sdma_rb_node), GFP_KERNEL);
- if (!sdma_rb_node)
- goto err_rb;
-
- sdma_rb_node->refcount = 1;
- sdma_rb_node->pid = current->pid;
-
- ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
- sdma_rb_node);
- BUG_ON(ret == 0);
- }
- pq->sdma_rb_node = sdma_rb_node;
-
- goto done;
-
-err_rb:
- dma_pool_destroy(pq->header_cache);
-err_slab:
- kmem_cache_destroy(pq->pkt_slab);
-err_kfree:
- kfree(pq);
- pq = NULL;
-
-done:
- return pq;
-}
-
-static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
- int i, u16 offset, u16 len,
- u16 first_desc, u16 last_desc,
- u16 put_page, u16 dma_mapped,
- struct page *page, void *kvaddr,
- dma_addr_t dma_addr, u16 dma_length)
-{
- pkt->addr[i].offset = offset;
- pkt->addr[i].length = len;
- pkt->addr[i].first_desc = first_desc;
- pkt->addr[i].last_desc = last_desc;
- pkt->addr[i].put_page = put_page;
- pkt->addr[i].dma_mapped = dma_mapped;
- pkt->addr[i].page = page;
- pkt->addr[i].kvaddr = kvaddr;
- pkt->addr[i].addr = dma_addr;
- pkt->addr[i].dma_length = dma_length;
-}
-
-static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
- size_t len, dma_addr_t *dma_addr)
-{
- void *hdr;
-
- if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
- hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
- dma_addr);
- else
- hdr = NULL;
-
- if (!hdr) {
- hdr = kmalloc(len, GFP_KERNEL);
- if (!hdr)
- return NULL;
-
- *dma_addr = 0;
- }
-
- return hdr;
-}
-
-static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- struct page *page, u16 put,
- u16 offset, u16 len, void *kvaddr)
-{
- __le16 *pbc16;
- void *pbcvaddr;
- struct qib_message_header *hdr;
- u16 newlen, pbclen, lastdesc, dma_mapped;
- u32 vcto;
- union qib_seqnum seqnum;
- dma_addr_t pbcdaddr;
- dma_addr_t dma_addr =
- dma_map_page(&dd->pcidev->dev,
- page, offset, len, DMA_TO_DEVICE);
- int ret = 0;
-
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
- /*
- * dma mapping error, pkt has not managed
- * this page yet, return the page here so
- * the caller can ignore this page.
- */
- if (put) {
- put_page(page);
- } else {
- /* coalesce case */
- kunmap(page);
- __free_page(page);
- }
- ret = -ENOMEM;
- goto done;
- }
- offset = 0;
- dma_mapped = 1;
-
-
-next_fragment:
-
- /*
- * In tid-sdma, the transfer length is restricted by
- * receiver side current tid page length.
- */
- if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
- newlen = pkt->tidsm[pkt->tidsmidx].length;
- else
- newlen = len;
-
- /*
- * Then the transfer length is restricted by MTU.
- * the last descriptor flag is determined by:
- * 1. the current packet is at frag size length.
- * 2. the current tid page is done if tid-sdma.
- * 3. there is no more byte togo if sdma.
- */
- lastdesc = 0;
- if ((pkt->payload_size + newlen) >= pkt->frag_size) {
- newlen = pkt->frag_size - pkt->payload_size;
- lastdesc = 1;
- } else if (pkt->tiddma) {
- if (newlen == pkt->tidsm[pkt->tidsmidx].length)
- lastdesc = 1;
- } else {
- if (newlen == pkt->bytes_togo)
- lastdesc = 1;
- }
-
- /* fill the next fragment in this page */
- qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
- offset, newlen, /* offset, len */
- 0, lastdesc, /* first last desc */
- put, dma_mapped, /* put page, dma mapped */
- page, kvaddr, /* struct page, virt addr */
- dma_addr, len); /* dma addr, dma length */
- pkt->bytes_togo -= newlen;
- pkt->payload_size += newlen;
- pkt->naddr++;
- if (pkt->naddr == pkt->addrlimit) {
- ret = -EFAULT;
- goto done;
- }
-
- /* If there is no more byte togo. (lastdesc==1) */
- if (pkt->bytes_togo == 0) {
- /* The packet is done, header is not dma mapped yet.
- * it should be from kmalloc */
- if (!pkt->addr[pkt->index].addr) {
- pkt->addr[pkt->index].addr =
- dma_map_single(&dd->pcidev->dev,
- pkt->addr[pkt->index].kvaddr,
- pkt->addr[pkt->index].dma_length,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- pkt->addr[pkt->index].addr)) {
- ret = -ENOMEM;
- goto done;
- }
- pkt->addr[pkt->index].dma_mapped = 1;
- }
-
- goto done;
- }
-
- /* If tid-sdma, advance tid info. */
- if (pkt->tiddma) {
- pkt->tidsm[pkt->tidsmidx].length -= newlen;
- if (pkt->tidsm[pkt->tidsmidx].length) {
- pkt->tidsm[pkt->tidsmidx].offset += newlen;
- } else {
- pkt->tidsmidx++;
- if (pkt->tidsmidx == pkt->tidsmcount) {
- ret = -EFAULT;
- goto done;
- }
- }
- }
-
- /*
- * If this is NOT the last descriptor. (newlen==len)
- * the current packet is not done yet, but the current
- * send side page is done.
- */
- if (lastdesc == 0)
- goto done;
-
- /*
- * If running this driver under PSM with message size
- * fitting into one transfer unit, it is not possible
- * to pass this line. otherwise, it is a buggggg.
- */
-
- /*
- * Since the current packet is done, and there are more
- * bytes togo, we need to create a new sdma header, copying
- * from previous sdma header and modify both.
- */
- pbclen = pkt->addr[pkt->index].length;
- pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
- if (!pbcvaddr) {
- ret = -ENOMEM;
- goto done;
- }
- /* Copy the previous sdma header to new sdma header */
- pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
- memcpy(pbcvaddr, pbc16, pbclen);
-
- /* Modify the previous sdma header */
- hdr = (struct qib_message_header *)&pbc16[4];
-
- /* New pbc length */
- pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
-
- /* New packet length */
- hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
-
- if (pkt->tiddma) {
- /* turn on the header suppression */
- hdr->iph.pkt_flags =
- cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
- /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
- hdr->flags &= ~(0x04|0x20);
- } else {
- /* turn off extra bytes: 20-21 bits */
- hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
- /* turn off ACK_REQ: 0x04 */
- hdr->flags &= ~(0x04);
- }
-
- /* New kdeth checksum */
- vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
- hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
- be16_to_cpu(hdr->lrh[2]) -
- ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
- le16_to_cpu(hdr->iph.pkt_flags));
-
- /* The packet is done, header is not dma mapped yet.
- * it should be from kmalloc */
- if (!pkt->addr[pkt->index].addr) {
- pkt->addr[pkt->index].addr =
- dma_map_single(&dd->pcidev->dev,
- pkt->addr[pkt->index].kvaddr,
- pkt->addr[pkt->index].dma_length,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- pkt->addr[pkt->index].addr)) {
- ret = -ENOMEM;
- goto done;
- }
- pkt->addr[pkt->index].dma_mapped = 1;
- }
-
- /* Modify the new sdma header */
- pbc16 = (__le16 *)pbcvaddr;
- hdr = (struct qib_message_header *)&pbc16[4];
-
- /* New pbc length */
- pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
-
- /* New packet length */
- hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
-
- if (pkt->tiddma) {
- /* Set new tid and offset for new sdma header */
- hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
- (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
- (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
- (pkt->tidsm[pkt->tidsmidx].offset>>2));
- } else {
- /* Middle protocol new packet offset */
- hdr->uwords[2] += pkt->payload_size;
- }
-
- /* New kdeth checksum */
- vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
- hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
- be16_to_cpu(hdr->lrh[2]) -
- ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
- le16_to_cpu(hdr->iph.pkt_flags));
-
- /* Next sequence number in new sdma header */
- seqnum.val = be32_to_cpu(hdr->bth[2]);
- if (pkt->tiddma)
- seqnum.seq++;
- else
- seqnum.pkt++;
- hdr->bth[2] = cpu_to_be32(seqnum.val);
-
- /* Init new sdma header. */
- qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
- 0, pbclen, /* offset, len */
- 1, 0, /* first last desc */
- 0, 0, /* put page, dma mapped */
- NULL, pbcvaddr, /* struct page, virt addr */
- pbcdaddr, pbclen); /* dma addr, dma length */
- pkt->index = pkt->naddr;
- pkt->payload_size = 0;
- pkt->naddr++;
- if (pkt->naddr == pkt->addrlimit) {
- ret = -EFAULT;
- goto done;
- }
-
- /* Prepare for next fragment in this page */
- if (newlen != len) {
- if (dma_mapped) {
- put = 0;
- dma_mapped = 0;
- page = NULL;
- kvaddr = NULL;
- }
- len -= newlen;
- offset += newlen;
-
- goto next_fragment;
- }
-
-done:
- return ret;
-}
-
-/* we've too many pages in the iovec, coalesce to a single page */
-static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov)
-{
- int ret = 0;
- struct page *page = alloc_page(GFP_KERNEL);
- void *mpage_save;
- char *mpage;
- int i;
- int len = 0;
-
- if (!page) {
- ret = -ENOMEM;
- goto done;
- }
-
- mpage = kmap(page);
- mpage_save = mpage;
- for (i = 0; i < niov; i++) {
- int cfur;
-
- cfur = copy_from_user(mpage,
- iov[i].iov_base, iov[i].iov_len);
- if (cfur) {
- ret = -EFAULT;
- goto free_unmap;
- }
-
- mpage += iov[i].iov_len;
- len += iov[i].iov_len;
- }
-
- ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
- page, 0, 0, len, mpage_save);
- goto done;
-
-free_unmap:
- kunmap(page);
- __free_page(page);
-done:
- return ret;
-}
-
-/*
- * How many pages in this iovec element?
- */
-static int qib_user_sdma_num_pages(const struct iovec *iov)
-{
- const unsigned long addr = (unsigned long) iov->iov_base;
- const unsigned long len = iov->iov_len;
- const unsigned long spage = addr & PAGE_MASK;
- const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
- return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-static void qib_user_sdma_free_pkt_frag(struct device *dev,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- int frag)
-{
- const int i = frag;
-
- if (pkt->addr[i].page) {
- /* only user data has page */
- if (pkt->addr[i].dma_mapped)
- dma_unmap_page(dev,
- pkt->addr[i].addr,
- pkt->addr[i].dma_length,
- DMA_TO_DEVICE);
-
- if (pkt->addr[i].kvaddr)
- kunmap(pkt->addr[i].page);
-
- if (pkt->addr[i].put_page)
- put_page(pkt->addr[i].page);
- else
- __free_page(pkt->addr[i].page);
- } else if (pkt->addr[i].kvaddr) {
- /* for headers */
- if (pkt->addr[i].dma_mapped) {
- /* from kmalloc & dma mapped */
- dma_unmap_single(dev,
- pkt->addr[i].addr,
- pkt->addr[i].dma_length,
- DMA_TO_DEVICE);
- kfree(pkt->addr[i].kvaddr);
- } else if (pkt->addr[i].addr) {
- /* free coherent mem from cache... */
- dma_pool_free(pq->header_cache,
- pkt->addr[i].kvaddr, pkt->addr[i].addr);
- } else {
- /* from kmalloc but not dma mapped */
- kfree(pkt->addr[i].kvaddr);
- }
- }
-}
-
-/* return number of pages pinned... */
-static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- unsigned long addr, int tlen, int npages)
-{
- struct page *pages[8];
- int i, j;
- int ret = 0;
-
- while (npages) {
- if (npages > 8)
- j = 8;
- else
- j = npages;
-
- ret = get_user_pages_fast(addr, j, 0, pages);
- if (ret != j) {
- i = 0;
- j = ret;
- ret = -ENOMEM;
- goto free_pages;
- }
-
- for (i = 0; i < j; i++) {
- /* map the pages... */
- unsigned long fofs = addr & ~PAGE_MASK;
- int flen = ((fofs + tlen) > PAGE_SIZE) ?
- (PAGE_SIZE - fofs) : tlen;
-
- ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
- pages[i], 1, fofs, flen, NULL);
- if (ret < 0) {
- /* current page has beed taken
- * care of inside above call.
- */
- i++;
- goto free_pages;
- }
-
- addr += flen;
- tlen -= flen;
- }
-
- npages -= j;
- }
-
- goto done;
-
- /* if error, return all pages not managed by pkt */
-free_pages:
- while (i < j)
- put_page(pages[i++]);
-
-done:
- return ret;
-}
-
-static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov)
-{
- int ret = 0;
- unsigned long idx;
-
- for (idx = 0; idx < niov; idx++) {
- const int npages = qib_user_sdma_num_pages(iov + idx);
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
-
- ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
- iov[idx].iov_len, npages);
- if (ret < 0)
- goto free_pkt;
- }
-
- goto done;
-
-free_pkt:
- /* we need to ignore the first entry here */
- for (idx = 1; idx < pkt->naddr; idx++)
- qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
-
- /* need to dma unmap the first entry, this is to restore to
- * the original state so that caller can free the memory in
- * error condition. Caller does not know if dma mapped or not*/
- if (pkt->addr[0].dma_mapped) {
- dma_unmap_single(&dd->pcidev->dev,
- pkt->addr[0].addr,
- pkt->addr[0].dma_length,
- DMA_TO_DEVICE);
- pkt->addr[0].addr = 0;
- pkt->addr[0].dma_mapped = 0;
- }
-
-done:
- return ret;
-}
-
-static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov, int npages)
-{
- int ret = 0;
-
- if (pkt->frag_size == pkt->bytes_togo &&
- npages >= ARRAY_SIZE(pkt->addr))
- ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
- else
- ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
-
- return ret;
-}
-
-/* free a packet list -- return counter value of last packet */
-static void qib_user_sdma_free_pkt_list(struct device *dev,
- struct qib_user_sdma_queue *pq,
- struct list_head *list)
-{
- struct qib_user_sdma_pkt *pkt, *pkt_next;
-
- list_for_each_entry_safe(pkt, pkt_next, list, list) {
- int i;
-
- for (i = 0; i < pkt->naddr; i++)
- qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
-
- if (pkt->largepkt)
- kfree(pkt);
- else
- kmem_cache_free(pq->pkt_slab, pkt);
- }
- INIT_LIST_HEAD(list);
-}
-
-/*
- * copy headers, coalesce etc -- pq->lock must be held
- *
- * we queue all the packets to list, returning the
- * number of bytes total. list must be empty initially,
- * as, if there is an error we clean it...
- */
-static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
- struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long niov,
- struct list_head *list,
- int *maxpkts, int *ndesc)
-{
- unsigned long idx = 0;
- int ret = 0;
- int npkts = 0;
- __le32 *pbc;
- dma_addr_t dma_addr;
- struct qib_user_sdma_pkt *pkt = NULL;
- size_t len;
- size_t nw;
- u32 counter = pq->counter;
- u16 frag_size;
-
- while (idx < niov && npkts < *maxpkts) {
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
- const unsigned long idx_save = idx;
- unsigned pktnw;
- unsigned pktnwc;
- int nfrags = 0;
- int npages = 0;
- int bytes_togo = 0;
- int tiddma = 0;
- int cfur;
-
- len = iov[idx].iov_len;
- nw = len >> 2;
-
- if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
- len > PAGE_SIZE || len & 3 || addr & 3) {
- ret = -EINVAL;
- goto free_list;
- }
-
- pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
- if (!pbc) {
- ret = -ENOMEM;
- goto free_list;
- }
-
- cfur = copy_from_user(pbc, iov[idx].iov_base, len);
- if (cfur) {
- ret = -EFAULT;
- goto free_pbc;
- }
-
- /*
- * This assignment is a bit strange. it's because the
- * the pbc counts the number of 32 bit words in the full
- * packet _except_ the first word of the pbc itself...
- */
- pktnwc = nw - 1;
-
- /*
- * pktnw computation yields the number of 32 bit words
- * that the caller has indicated in the PBC. note that
- * this is one less than the total number of words that
- * goes to the send DMA engine as the first 32 bit word
- * of the PBC itself is not counted. Armed with this count,
- * we can verify that the packet is consistent with the
- * iovec lengths.
- */
- pktnw = le32_to_cpu(*pbc) & 0xFFFF;
- if (pktnw < pktnwc) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- idx++;
- while (pktnwc < pktnw && idx < niov) {
- const size_t slen = iov[idx].iov_len;
- const unsigned long faddr =
- (unsigned long) iov[idx].iov_base;
-
- if (slen & 3 || faddr & 3 || !slen) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- npages += qib_user_sdma_num_pages(&iov[idx]);
-
- bytes_togo += slen;
- pktnwc += slen >> 2;
- idx++;
- nfrags++;
- }
-
- if (pktnwc != pktnw) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
- if (((frag_size ? frag_size : bytes_togo) + len) >
- ppd->ibmaxlen) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- if (frag_size) {
- int pktsize, tidsmsize, n;
-
- n = npages*((2*PAGE_SIZE/frag_size)+1);
- pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n;
-
- /*
- * Determine if this is tid-sdma or just sdma.
- */
- tiddma = (((le32_to_cpu(pbc[7])>>
- QLOGIC_IB_I_TID_SHIFT)&
- QLOGIC_IB_I_TID_MASK) !=
- QLOGIC_IB_I_TID_MASK);
-
- if (tiddma)
- tidsmsize = iov[idx].iov_len;
- else
- tidsmsize = 0;
-
- pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_pbc;
- }
- pkt->largepkt = 1;
- pkt->frag_size = frag_size;
- pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
-
- if (tiddma) {
- char *tidsm = (char *)pkt + pktsize;
-
- cfur = copy_from_user(tidsm,
- iov[idx].iov_base, tidsmsize);
- if (cfur) {
- ret = -EFAULT;
- goto free_pkt;
- }
- pkt->tidsm =
- (struct qib_tid_session_member *)tidsm;
- pkt->tidsmcount = tidsmsize/
- sizeof(struct qib_tid_session_member);
- pkt->tidsmidx = 0;
- idx++;
- }
-
- /*
- * pbc 'fill1' field is borrowed to pass frag size,
- * we need to clear it after picking frag size, the
- * hardware requires this field to be zero.
- */
- *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
- } else {
- pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_pbc;
- }
- pkt->largepkt = 0;
- pkt->frag_size = bytes_togo;
- pkt->addrlimit = ARRAY_SIZE(pkt->addr);
- }
- pkt->bytes_togo = bytes_togo;
- pkt->payload_size = 0;
- pkt->counter = counter;
- pkt->tiddma = tiddma;
-
- /* setup the first header */
- qib_user_sdma_init_frag(pkt, 0, /* index */
- 0, len, /* offset, len */
- 1, 0, /* first last desc */
- 0, 0, /* put page, dma mapped */
- NULL, pbc, /* struct page, virt addr */
- dma_addr, len); /* dma addr, dma length */
- pkt->index = 0;
- pkt->naddr = 1;
-
- if (nfrags) {
- ret = qib_user_sdma_init_payload(dd, pq, pkt,
- iov + idx_save + 1,
- nfrags, npages);
- if (ret < 0)
- goto free_pkt;
- } else {
- /* since there is no payload, mark the
- * header as the last desc. */
- pkt->addr[0].last_desc = 1;
-
- if (dma_addr == 0) {
- /*
- * the header is not dma mapped yet.
- * it should be from kmalloc.
- */
- dma_addr = dma_map_single(&dd->pcidev->dev,
- pbc, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- dma_addr)) {
- ret = -ENOMEM;
- goto free_pkt;
- }
- pkt->addr[0].addr = dma_addr;
- pkt->addr[0].dma_mapped = 1;
- }
- }
-
- counter++;
- npkts++;
- pkt->pq = pq;
- pkt->index = 0; /* reset index for push on hw */
- *ndesc += pkt->naddr;
-
- list_add_tail(&pkt->list, list);
- }
-
- *maxpkts = npkts;
- ret = idx;
- goto done;
-
-free_pkt:
- if (pkt->largepkt)
- kfree(pkt);
- else
- kmem_cache_free(pq->pkt_slab, pkt);
-free_pbc:
- if (dma_addr)
- dma_pool_free(pq->header_cache, pbc, dma_addr);
- else
- kfree(pbc);
-free_list:
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
-done:
- return ret;
-}
-
-static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
- u32 c)
-{
- pq->sent_counter = c;
-}
-
-/* try to clean out queue -- needs pq->lock */
-static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- struct qib_devdata *dd = ppd->dd;
- struct list_head free_list;
- struct qib_user_sdma_pkt *pkt;
- struct qib_user_sdma_pkt *pkt_prev;
- unsigned long flags;
- int ret = 0;
-
- if (!pq->num_sending)
- return 0;
-
- INIT_LIST_HEAD(&free_list);
-
- /*
- * We need this spin lock here because interrupt handler
- * might modify this list in qib_user_sdma_send_desc(), also
- * we can not get interrupted, otherwise it is a deadlock.
- */
- spin_lock_irqsave(&pq->sent_lock, flags);
- list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
- s64 descd = ppd->sdma_descq_removed - pkt->added;
-
- if (descd < 0)
- break;
-
- list_move_tail(&pkt->list, &free_list);
-
- /* one more packet cleaned */
- ret++;
- pq->num_sending--;
- }
- spin_unlock_irqrestore(&pq->sent_lock, flags);
-
- if (!list_empty(&free_list)) {
- u32 counter;
-
- pkt = list_entry(free_list.prev,
- struct qib_user_sdma_pkt, list);
- counter = pkt->counter;
-
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- qib_user_sdma_set_complete_counter(pq, counter);
- }
-
- return ret;
-}
-
-void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
-{
- if (!pq)
- return;
-
- pq->sdma_rb_node->refcount--;
- if (pq->sdma_rb_node->refcount == 0) {
- rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
- kfree(pq->sdma_rb_node);
- }
- dma_pool_destroy(pq->header_cache);
- kmem_cache_destroy(pq->pkt_slab);
- kfree(pq);
-}
-
-/* clean descriptor queue, returns > 0 if some elements cleaned */
-static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- ret = qib_sdma_make_progress(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- return ret;
-}
-
-/* we're in close, drain packets so that we can cleanup successfully... */
-void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- int i;
-
- if (!pq)
- return;
-
- for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
- mutex_lock(&pq->lock);
- if (!pq->num_pending && !pq->num_sending) {
- mutex_unlock(&pq->lock);
- break;
- }
- qib_user_sdma_hwqueue_clean(ppd);
- qib_user_sdma_queue_clean(ppd, pq);
- mutex_unlock(&pq->lock);
- msleep(20);
- }
-
- if (pq->num_pending || pq->num_sending) {
- struct qib_user_sdma_pkt *pkt;
- struct qib_user_sdma_pkt *pkt_prev;
- struct list_head free_list;
-
- mutex_lock(&pq->lock);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /*
- * Since we hold sdma_lock, it is safe without sent_lock.
- */
- if (pq->num_pending) {
- list_for_each_entry_safe(pkt, pkt_prev,
- &ppd->sdma_userpending, list) {
- if (pkt->pq == pq) {
- list_move_tail(&pkt->list, &pq->sent);
- pq->num_pending--;
- pq->num_sending++;
- }
- }
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
- INIT_LIST_HEAD(&free_list);
- list_splice_init(&pq->sent, &free_list);
- pq->num_sending = 0;
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- mutex_unlock(&pq->lock);
- }
-}
-
-static inline __le64 qib_sdma_make_desc0(u8 gen,
- u64 addr, u64 dwlen, u64 dwoffset)
-{
- return cpu_to_le64(/* SDmaPhyAddr[31:0] */
- ((addr & 0xfffffffcULL) << 32) |
- /* SDmaGeneration[1:0] */
- ((gen & 3ULL) << 30) |
- /* SDmaDwordCount[10:0] */
- ((dwlen & 0x7ffULL) << 16) |
- /* SDmaBufOffset[12:2] */
- (dwoffset & 0x7ffULL));
-}
-
-static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
-{
- return descq | cpu_to_le64(1ULL << 12);
-}
-
-static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
-{
- /* last */ /* dma head */
- return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
-}
-
-static inline __le64 qib_sdma_make_desc1(u64 addr)
-{
- /* SDmaPhyAddr[47:32] */
- return cpu_to_le64(addr >> 32);
-}
-
-static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
- struct qib_user_sdma_pkt *pkt, int idx,
- unsigned ofs, u16 tail, u8 gen)
-{
- const u64 addr = (u64) pkt->addr[idx].addr +
- (u64) pkt->addr[idx].offset;
- const u64 dwlen = (u64) pkt->addr[idx].length / 4;
- __le64 *descqp;
- __le64 descq0;
-
- descqp = &ppd->sdma_descq[tail].qw[0];
-
- descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
- if (pkt->addr[idx].first_desc)
- descq0 = qib_sdma_make_first_desc0(descq0);
- if (pkt->addr[idx].last_desc) {
- descq0 = qib_sdma_make_last_desc0(descq0);
- if (ppd->sdma_intrequest) {
- descq0 |= cpu_to_le64(1ULL << 15);
- ppd->sdma_intrequest = 0;
- }
- }
-
- descqp[0] = descq0;
- descqp[1] = qib_sdma_make_desc1(addr);
-}
-
-void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
- struct list_head *pktlist)
-{
- struct qib_devdata *dd = ppd->dd;
- u16 nfree, nsent;
- u16 tail, tail_c;
- u8 gen, gen_c;
-
- nfree = qib_sdma_descq_freecnt(ppd);
- if (!nfree)
- return;
-
-retry:
- nsent = 0;
- tail_c = tail = ppd->sdma_descq_tail;
- gen_c = gen = ppd->sdma_generation;
- while (!list_empty(pktlist)) {
- struct qib_user_sdma_pkt *pkt =
- list_entry(pktlist->next, struct qib_user_sdma_pkt,
- list);
- int i, j, c = 0;
- unsigned ofs = 0;
- u16 dtail = tail;
-
- for (i = pkt->index; i < pkt->naddr && nfree; i++) {
- qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
- ofs += pkt->addr[i].length >> 2;
-
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- ++gen;
- ppd->sdma_intrequest = 1;
- } else if (tail == (ppd->sdma_descq_cnt>>1)) {
- ppd->sdma_intrequest = 1;
- }
- nfree--;
- if (pkt->addr[i].last_desc == 0)
- continue;
-
- /*
- * If the packet is >= 2KB mtu equivalent, we
- * have to use the large buffers, and have to
- * mark each descriptor as part of a large
- * buffer packet.
- */
- if (ofs > dd->piosize2kmax_dwords) {
- for (j = pkt->index; j <= i; j++) {
- ppd->sdma_descq[dtail].qw[0] |=
- cpu_to_le64(1ULL << 14);
- if (++dtail == ppd->sdma_descq_cnt)
- dtail = 0;
- }
- }
- c += i + 1 - pkt->index;
- pkt->index = i + 1; /* index for next first */
- tail_c = dtail = tail;
- gen_c = gen;
- ofs = 0; /* reset for next packet */
- }
-
- ppd->sdma_descq_added += c;
- nsent += c;
- if (pkt->index == pkt->naddr) {
- pkt->added = ppd->sdma_descq_added;
- pkt->pq->added = pkt->added;
- pkt->pq->num_pending--;
- spin_lock(&pkt->pq->sent_lock);
- pkt->pq->num_sending++;
- list_move_tail(&pkt->list, &pkt->pq->sent);
- spin_unlock(&pkt->pq->sent_lock);
- }
- if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
- break;
- }
-
- /* advance the tail on the chip if necessary */
- if (ppd->sdma_descq_tail != tail_c) {
- ppd->sdma_generation = gen_c;
- dd->f_sdma_update_tail(ppd, tail_c);
- }
-
- if (nfree && !list_empty(pktlist))
- goto retry;
-}
-
-/* pq->lock must be held, get packets on the wire... */
-static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- struct list_head *pktlist, int count)
-{
- unsigned long flags;
-
- if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
- return -ECOMM;
-
- /* non-blocking mode */
- if (pq->sdma_rb_node->refcount > 1) {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (unlikely(!__qib_sdma_running(ppd))) {
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return -ECOMM;
- }
- pq->num_pending += count;
- list_splice_tail_init(pktlist, &ppd->sdma_userpending);
- qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return 0;
- }
-
- /* In this case, descriptors from this process are not
- * linked to ppd pending queue, interrupt handler
- * won't update this process, it is OK to directly
- * modify without sdma lock.
- */
-
-
- pq->num_pending += count;
- /*
- * Blocking mode for single rail process, we must
- * release/regain sdma_lock to give other process
- * chance to make progress. This is important for
- * performance.
- */
- do {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (unlikely(!__qib_sdma_running(ppd))) {
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return -ECOMM;
- }
- qib_user_sdma_send_desc(ppd, pktlist);
- if (!list_empty(pktlist))
- qib_sdma_make_progress(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- } while (!list_empty(pktlist));
-
- return 0;
-}
-
-int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- int ret = 0;
- struct list_head list;
- int npkts = 0;
-
- INIT_LIST_HEAD(&list);
-
- mutex_lock(&pq->lock);
-
- /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
- if (!qib_sdma_running(ppd))
- goto done_unlock;
-
- /* if I have packets not complete yet */
- if (pq->added > ppd->sdma_descq_removed)
- qib_user_sdma_hwqueue_clean(ppd);
- /* if I have complete packets to be freed */
- if (pq->num_sending)
- qib_user_sdma_queue_clean(ppd, pq);
-
- while (dim) {
- int mxp = 1;
- int ndesc = 0;
-
- ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
- iov, dim, &list, &mxp, &ndesc);
- if (ret < 0)
- goto done_unlock;
- else {
- dim -= ret;
- iov += ret;
- }
-
- /* force packets onto the sdma hw queue... */
- if (!list_empty(&list)) {
- /*
- * Lazily clean hw queue.
- */
- if (qib_sdma_descq_freecnt(ppd) < ndesc) {
- qib_user_sdma_hwqueue_clean(ppd);
- if (pq->num_sending)
- qib_user_sdma_queue_clean(ppd, pq);
- }
-
- ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
- if (ret < 0)
- goto done_unlock;
- else {
- npkts += mxp;
- pq->counter += mxp;
- }
- }
- }
-
-done_unlock:
- if (!list_empty(&list))
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
- mutex_unlock(&pq->lock);
-
- return (ret < 0) ? ret : npkts;
-}
-
-int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- int ret = 0;
-
- mutex_lock(&pq->lock);
- qib_user_sdma_hwqueue_clean(ppd);
- ret = qib_user_sdma_queue_clean(ppd, pq);
- mutex_unlock(&pq->lock);
-
- return ret;
-}
-
-u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
-{
- return pq ? pq->sent_counter : 0;
-}
-
-u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
-{
- return pq ? pq->counter : 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
deleted file mode 100644
index ce8cbaf6a5c2..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_sdma.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-
-struct qib_user_sdma_queue;
-
-struct qib_user_sdma_queue *
-qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
-void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
-
-int qib_user_sdma_writev(struct qib_ctxtdata *pd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim);
-
-int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq);
-
-void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq);
-
-u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
-u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
deleted file mode 100644
index 3dcc4985b60f..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ /dev/null
@@ -1,2367 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-#include <linux/mm.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-static unsigned int ib_qib_qp_table_size = 256;
-module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-unsigned int ib_qib_lkey_table_size = 16;
-module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
- S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
- "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int ib_qib_max_pds = 0xFFFF;
-module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
-MODULE_PARM_DESC(max_pds,
- "Maximum number of protection domains to support");
-
-static unsigned int ib_qib_max_ahs = 0xFFFF;
-module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int ib_qib_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
- "Maximum number of completion queue entries to support");
-
-unsigned int ib_qib_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int ib_qib_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int ib_qib_max_qps = 16384;
-module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int ib_qib_max_sges = 0x60;
-module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int ib_qib_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
- "Maximum number of multicast groups to support");
-
-unsigned int ib_qib_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
- uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
- "Maximum number of attached QPs to support");
-
-unsigned int ib_qib_max_srqs = 1024;
-module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int ib_qib_max_srq_sges = 128;
-module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-static unsigned int ib_qib_disable_sma;
-module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(disable_sma, "Disable the SMA");
-
-/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; qib_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
- */
-const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = 0,
- [IB_QPS_INIT] = QIB_POST_RECV_OK,
- [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
- [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
- QIB_PROCESS_NEXT_SEND_OK,
- [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
- [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_FLUSH_SEND,
- [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
- QIB_POST_SEND_OK | QIB_FLUSH_SEND,
-};
-
-struct qib_ucontext {
- struct ib_ucontext ibucontext;
-};
-
-static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
-{
- return container_of(ibucontext, struct qib_ucontext, ibucontext);
-}
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_qib_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * System image GUID.
- */
-__be64 ib_qib_sys_image_guid;
-
-/**
- * qib_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
-{
- struct qib_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- memcpy(sge->vaddr, data, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (release)
- qib_put_mr(sge->mr);
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- data += len;
- length -= len;
- }
-}
-
-/**
- * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
- * @ss: the SGE state
- * @length: the number of bytes to skip
- */
-void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
-{
- struct qib_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (release)
- qib_put_mr(sge->mr);
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
-}
-
-/*
- * Count the number of DMA descriptors needed to send length bytes of data.
- * Don't modify the qib_sge_state to get the count.
- * Return zero if any of the segments is not aligned.
- */
-static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
-{
- struct qib_sge *sg_list = ss->sg_list;
- struct qib_sge sge = ss->sge;
- u8 num_sge = ss->num_sge;
- u32 ndesc = 1; /* count the header */
-
- while (length) {
- u32 len = sge.length;
-
- if (len > length)
- len = length;
- if (len > sge.sge_length)
- len = sge.sge_length;
- BUG_ON(len == 0);
- if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
- (len != length && (len & (sizeof(u32) - 1)))) {
- ndesc = 0;
- break;
- }
- ndesc++;
- sge.vaddr += len;
- sge.length -= len;
- sge.sge_length -= len;
- if (sge.sge_length == 0) {
- if (--num_sge)
- sge = *sg_list++;
- } else if (sge.length == 0 && sge.mr->lkey) {
- if (++sge.n >= QIB_SEGSZ) {
- if (++sge.m >= sge.mr->mapsz)
- break;
- sge.n = 0;
- }
- sge.vaddr =
- sge.mr->map[sge.m]->segs[sge.n].vaddr;
- sge.length =
- sge.mr->map[sge.m]->segs[sge.n].length;
- }
- length -= len;
- }
- return ndesc;
-}
-
-/*
- * Copy from the SGEs to the data buffer.
- */
-static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
-{
- struct qib_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- memcpy(data, sge->vaddr, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- data += len;
- length -= len;
- }
-}
-
-/**
- * qib_post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
- int *scheduled)
-{
- struct qib_swqe *wqe;
- u32 next;
- int i;
- int j;
- int acc;
- int ret;
- unsigned long flags;
- struct qib_lkey_table *rkt;
- struct qib_pd *pd;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Check that state is OK to post send. */
- if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
- goto bail_inval;
-
- /* IB spec says that num_sge == 0 is OK. */
- if (wr->num_sge > qp->s_max_sge)
- goto bail_inval;
-
- /*
- * Don't allow RDMA reads or atomic operations on UC or
- * undefined operations.
- * Make sure buffer is large enough to hold the result for atomics.
- */
- if (wr->opcode == IB_WR_FAST_REG_MR) {
- if (qib_fast_reg_mr(qp, wr))
- goto bail_inval;
- } else if (qp->ibqp.qp_type == IB_QPT_UC) {
- if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
- goto bail_inval;
- } else if (qp->ibqp.qp_type != IB_QPT_RC) {
- /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
- if (wr->opcode != IB_WR_SEND &&
- wr->opcode != IB_WR_SEND_WITH_IMM)
- goto bail_inval;
- /* Check UD destination address PD */
- if (qp->ibqp.pd != wr->wr.ud.ah->pd)
- goto bail_inval;
- } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
- goto bail_inval;
- else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
- (wr->num_sge == 0 ||
- wr->sg_list[0].length < sizeof(u64) ||
- wr->sg_list[0].addr & (sizeof(u64) - 1)))
- goto bail_inval;
- else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
- goto bail_inval;
-
- next = qp->s_head + 1;
- if (next >= qp->s_size)
- next = 0;
- if (next == qp->s_last) {
- ret = -ENOMEM;
- goto bail;
- }
-
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.pd);
- wqe = get_swqe_ptr(qp, qp->s_head);
- wqe->wr = *wr;
- wqe->length = 0;
- j = 0;
- if (wr->num_sge) {
- acc = wr->opcode >= IB_WR_RDMA_READ ?
- IB_ACCESS_LOCAL_WRITE : 0;
- for (i = 0; i < wr->num_sge; i++) {
- u32 length = wr->sg_list[i].length;
- int ok;
-
- if (length == 0)
- continue;
- ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
- &wr->sg_list[i], acc);
- if (!ok)
- goto bail_inval_free;
- wqe->length += length;
- j++;
- }
- wqe->wr.num_sge = j;
- }
- if (qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_RC) {
- if (wqe->length > 0x80000000U)
- goto bail_inval_free;
- } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
- qp->port_num - 1)->ibmtu)
- goto bail_inval_free;
- else
- atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
- wqe->ssn = qp->s_ssn++;
- qp->s_head = next;
-
- ret = 0;
- goto bail;
-
-bail_inval_free:
- while (j) {
- struct qib_sge *sge = &wqe->sg_list[--j];
-
- qib_put_mr(sge->mr);
- }
-bail_inval:
- ret = -EINVAL;
-bail:
- if (!ret && !wr->next &&
- !qib_sdma_empty(
- dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
- qib_schedule_send(qp);
- *scheduled = 1;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * qib_post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- int err = 0;
- int scheduled = 0;
-
- for (; wr; wr = wr->next) {
- err = qib_post_one_send(qp, wr, &scheduled);
- if (err) {
- *bad_wr = wr;
- goto bail;
- }
- }
-
- /* Try to do the send work in the caller's context. */
- if (!scheduled)
- qib_do_send(&qp->s_work);
-
-bail:
- return err;
-}
-
-/**
- * qib_post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_rwq *wq = qp->r_rq.wq;
- unsigned long flags;
- int ret;
-
- /* Check that state is OK to post receive. */
- if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- for (; wr; wr = wr->next) {
- struct qib_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- next = wq->head + 1;
- if (next >= qp->r_rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_qp_rcv - processing an incoming packet on a QP
- * @rcd: the context pointer
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from qib_ib_rcv() to process an incoming packet
- * for the given QP.
- * Called at interrupt level.
- */
-static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = &rcd->ppd->ibport_data;
-
- spin_lock(&qp->r_lock);
-
- /* Check for valid receive state. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
- goto unlock;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (ib_qib_disable_sma)
- break;
- /* FALLTHROUGH */
- case IB_QPT_UD:
- qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_RC:
- qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_UC:
- qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
- break;
-
- default:
- break;
- }
-
-unlock:
- spin_unlock(&qp->r_lock);
-}
-
-/**
- * qib_ib_rcv - process an incoming packet
- * @rcd: the context pointer
- * @rhdr: the header of the packet
- * @data: the packet payload
- * @tlen: the packet length
- *
- * This is called from qib_kreceive() to process an incoming packet at
- * interrupt level. Tlen is the length of the header + data + CRC in bytes.
- */
-void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
-{
- struct qib_pportdata *ppd = rcd->ppd;
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct qib_ib_header *hdr = rhdr;
- struct qib_other_headers *ohdr;
- struct qib_qp *qp;
- u32 qp_num;
- int lnh;
- u8 opcode;
- u16 lid;
-
- /* 24 == LRH+BTH+CRC */
- if (unlikely(tlen < 24))
- goto drop;
-
- /* Check for a valid destination LID (see ch. 7.11.1). */
- lid = be16_to_cpu(hdr->lrh[1]);
- if (lid < QIB_MULTICAST_LID_BASE) {
- lid &= ~((1 << ppd->lmc) - 1);
- if (unlikely(lid != ppd->lid))
- goto drop;
- }
-
- /* Check for GRH */
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == QIB_LRH_GRH) {
- u32 vtf;
-
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- } else
- goto drop;
-
- opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
-#ifdef CONFIG_DEBUG_FS
- rcd->opstats->stats[opcode].n_bytes += tlen;
- rcd->opstats->stats[opcode].n_packets++;
-#endif
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
- if (qp_num == QIB_MULTICAST_QPN) {
- struct qib_mcast *mcast;
- struct qib_mcast_qp *p;
-
- if (lnh != QIB_LRH_GRH)
- goto drop;
- mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
- if (mcast == NULL)
- goto drop;
- this_cpu_inc(ibp->pmastats->n_multicast_rcv);
- list_for_each_entry_rcu(p, &mcast->qp_list, list)
- qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
- /*
- * Notify qib_multicast_detach() if it is waiting for us
- * to finish.
- */
- if (atomic_dec_return(&mcast->refcount) <= 1)
- wake_up(&mcast->wait);
- } else {
- if (rcd->lookaside_qp) {
- if (rcd->lookaside_qpn != qp_num) {
- if (atomic_dec_and_test(
- &rcd->lookaside_qp->refcount))
- wake_up(
- &rcd->lookaside_qp->wait);
- rcd->lookaside_qp = NULL;
- }
- }
- if (!rcd->lookaside_qp) {
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
- goto drop;
- rcd->lookaside_qp = qp;
- rcd->lookaside_qpn = qp_num;
- } else
- qp = rcd->lookaside_qp;
- this_cpu_inc(ibp->pmastats->n_unicast_rcv);
- qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
- }
- return;
-
-drop:
- ibp->n_pkt_drops++;
-}
-
-/*
- * This is called from a timer to check for QPs
- * which need kernel memory in order to send a packet.
- */
-static void mem_timer(unsigned long data)
-{
- struct qib_ibdev *dev = (struct qib_ibdev *) data;
- struct list_head *list = &dev->memwait;
- struct qib_qp *qp = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->pending_lock, flags);
- if (!list_empty(list)) {
- qp = list_entry(list->next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
- atomic_inc(&qp->refcount);
- if (!list_empty(list))
- mod_timer(&dev->mem_timer, jiffies + 1);
- }
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-
- if (qp) {
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_KMEM) {
- qp->s_flags &= ~QIB_S_WAIT_KMEM;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-}
-
-static void update_sge(struct qib_sge_state *ss, u32 length)
-{
- struct qib_sge *sge = &ss->sge;
-
- sge->vaddr += length;
- sge->length -= length;
- sge->sge_length -= length;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- return;
- sge->n = 0;
- }
- sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
- }
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#endif
-
-static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
- u32 length, unsigned flush_wc)
-{
- u32 extra = 0;
- u32 data = 0;
- u32 last;
-
- while (1) {
- u32 len = ss->sge.length;
- u32 off;
-
- if (len > length)
- len = length;
- if (len > ss->sge.sge_length)
- len = ss->sge.sge_length;
- BUG_ON(len == 0);
- /* If the source address is not aligned, try to align it. */
- off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
- if (off) {
- u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
- ~(sizeof(u32) - 1));
- u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
- u32 y;
-
- y = sizeof(u32) - off;
- if (len > y)
- len = y;
- if (len + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, extra *
- BITS_PER_BYTE);
- len = sizeof(u32) - extra;
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, len, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += len;
- }
- } else if (extra) {
- /* Source address is aligned. */
- u32 *addr = (u32 *) ss->sge.vaddr;
- int shift = extra * BITS_PER_BYTE;
- int ushift = 32 - shift;
- u32 l = len;
-
- while (l >= sizeof(u32)) {
- u32 v = *addr;
-
- data |= set_upper_bits(v, shift);
- __raw_writel(data, piobuf);
- data = get_upper_bits(v, ushift);
- piobuf++;
- addr++;
- l -= sizeof(u32);
- }
- /*
- * We still have 'extra' number of bytes leftover.
- */
- if (l) {
- u32 v = *addr;
-
- if (l + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, shift);
- len -= l + extra - sizeof(u32);
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, l, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += l;
- }
- } else if (len == length) {
- last = data;
- break;
- }
- } else if (len == length) {
- u32 w;
-
- /*
- * Need to round up for the last dword in the
- * packet.
- */
- w = (len + 3) >> 2;
- qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
- piobuf += w - 1;
- last = ((u32 *) ss->sge.vaddr)[w - 1];
- break;
- } else {
- u32 w = len >> 2;
-
- qib_pio_copy(piobuf, ss->sge.vaddr, w);
- piobuf += w;
-
- extra = len & (sizeof(u32) - 1);
- if (extra) {
- u32 v = ((u32 *) ss->sge.vaddr)[w];
-
- /* Clear unused upper bytes */
- data = clear_upper_bytes(v, extra, 0);
- }
- }
- update_sge(ss, len);
- length -= len;
- }
- /* Update address before sending packet. */
- update_sge(ss, length);
- if (flush_wc) {
- /* must flush early everything before trigger word */
- qib_flush_wc();
- __raw_writel(last, piobuf);
- /* be sure trigger word is written */
- qib_flush_wc();
- } else
- __raw_writel(last, piobuf);
-}
-
-static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp)
-{
- struct qib_verbs_txreq *tx;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- spin_lock(&dev->pending_lock);
-
- if (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
-
- list_del(l);
- spin_unlock(&dev->pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- } else {
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
- list_empty(&qp->iowait)) {
- dev->n_txwait++;
- qp->s_flags |= QIB_S_WAIT_TX;
- list_add_tail(&qp->iowait, &dev->txwait);
- }
- qp->s_flags &= ~QIB_S_BUSY;
- spin_unlock(&dev->pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- tx = ERR_PTR(-EBUSY);
- }
- return tx;
-}
-
-static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp)
-{
- struct qib_verbs_txreq *tx;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->pending_lock, flags);
- /* assume the list non empty */
- if (likely(!list_empty(&dev->txreq_free))) {
- struct list_head *l = dev->txreq_free.next;
-
- list_del(l);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- } else {
- /* call slow path to get the extra lock */
- spin_unlock_irqrestore(&dev->pending_lock, flags);
- tx = __get_txreq(dev, qp);
- }
- return tx;
-}
-
-void qib_put_txreq(struct qib_verbs_txreq *tx)
-{
- struct qib_ibdev *dev;
- struct qib_qp *qp;
- unsigned long flags;
-
- qp = tx->qp;
- dev = to_idev(qp->ibqp.device);
-
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- if (tx->mr) {
- qib_put_mr(tx->mr);
- tx->mr = NULL;
- }
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
- tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
- dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
- tx->txreq.addr, tx->hdr_dwords << 2,
- DMA_TO_DEVICE);
- kfree(tx->align_buf);
- }
-
- spin_lock_irqsave(&dev->pending_lock, flags);
-
- /* Put struct back on free list */
- list_add(&tx->txreq.list, &dev->txreq_free);
-
- if (!list_empty(&dev->txwait)) {
- /* Wake up first QP wanting a free struct */
- qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
- atomic_inc(&qp->refcount);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_TX) {
- qp->s_flags &= ~QIB_S_WAIT_TX;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- } else
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-}
-
-/*
- * This is called when there are send DMA descriptors that might be
- * available.
- *
- * This is called with ppd->sdma_lock held.
- */
-void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
-{
- struct qib_qp *qp, *nqp;
- struct qib_qp *qps[20];
- struct qib_ibdev *dev;
- unsigned i, n;
-
- n = 0;
- dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->pending_lock);
-
- /* Search wait list for first QP wanting DMA descriptors. */
- list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
- if (qp->port_num != ppd->port)
- continue;
- if (n == ARRAY_SIZE(qps))
- break;
- if (qp->s_tx->txreq.sg_count > avail)
- break;
- avail -= qp->s_tx->txreq.sg_count;
- list_del_init(&qp->iowait);
- atomic_inc(&qp->refcount);
- qps[n++] = qp;
- }
-
- spin_unlock(&dev->pending_lock);
-
- for (i = 0; i < n; i++) {
- qp = qps[i];
- spin_lock(&qp->s_lock);
- if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
- qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
- qib_schedule_send(qp);
- }
- spin_unlock(&qp->s_lock);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-}
-
-/*
- * This is called with ppd->sdma_lock held.
- */
-static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
-{
- struct qib_verbs_txreq *tx =
- container_of(cookie, struct qib_verbs_txreq, txreq);
- struct qib_qp *qp = tx->qp;
-
- spin_lock(&qp->s_lock);
- if (tx->wqe)
- qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
- else if (qp->ibqp.qp_type == IB_QPT_RC) {
- struct qib_ib_header *hdr;
-
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
- hdr = &tx->align_buf->hdr;
- else {
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
-
- hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
- }
- qib_rc_send_complete(qp, hdr);
- }
- if (atomic_dec_and_test(&qp->s_dma_busy)) {
- if (qp->state == IB_QPS_RESET)
- wake_up(&qp->wait_dma);
- else if (qp->s_flags & QIB_S_WAIT_DMA) {
- qp->s_flags &= ~QIB_S_WAIT_DMA;
- qib_schedule_send(qp);
- }
- }
- spin_unlock(&qp->s_lock);
-
- qib_put_txreq(tx);
-}
-
-static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
- if (list_empty(&dev->memwait))
- mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= QIB_S_WAIT_KMEM;
- list_add_tail(&qp->iowait, &dev->memwait);
- }
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- return ret;
-}
-
-static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_txreq *tx;
- struct qib_pio_header *phdr;
- u32 control;
- u32 ndesc;
- int ret;
-
- tx = qp->s_tx;
- if (tx) {
- qp->s_tx = NULL;
- /* resend previously constructed packet */
- ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
- goto bail;
- }
-
- tx = get_txreq(dev, qp);
- if (IS_ERR(tx))
- goto bail_tx;
-
- control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
- be16_to_cpu(hdr->lrh[0]) >> 12);
- tx->qp = qp;
- atomic_inc(&qp->refcount);
- tx->wqe = qp->s_wqe;
- tx->mr = qp->s_rdma_mr;
- if (qp->s_rdma_mr)
- qp->s_rdma_mr = NULL;
- tx->txreq.callback = sdma_complete;
- if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
- tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
- else
- tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
- if (plen + 1 > dd->piosize2kmax_dwords)
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
-
- if (len) {
- /*
- * Don't try to DMA if it takes more descriptors than
- * the queue holds.
- */
- ndesc = qib_count_sge(ss, len);
- if (ndesc >= ppd->sdma_descq_cnt)
- ndesc = 0;
- } else
- ndesc = 1;
- if (ndesc) {
- phdr = &dev->pio_hdrs[tx->hdr_inx];
- phdr->pbc[0] = cpu_to_le32(plen);
- phdr->pbc[1] = cpu_to_le32(control);
- memcpy(&phdr->hdr, hdr, hdrwords << 2);
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
- tx->txreq.sg_count = ndesc;
- tx->txreq.addr = dev->pio_hdrs_phys +
- tx->hdr_inx * sizeof(struct qib_pio_header);
- tx->hdr_dwords = hdrwords + 2; /* add PBC length */
- ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
- goto bail;
- }
-
- /* Allocate a buffer and copy the header and payload to it. */
- tx->hdr_dwords = plen + 1;
- phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
- if (!phdr)
- goto err_tx;
- phdr->pbc[0] = cpu_to_le32(plen);
- phdr->pbc[1] = cpu_to_le32(control);
- memcpy(&phdr->hdr, hdr, hdrwords << 2);
- qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
-
- tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
- tx->hdr_dwords << 2, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
- goto map_err;
- tx->align_buf = phdr;
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
- tx->txreq.sg_count = 1;
- ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
- goto unaligned;
-
-map_err:
- kfree(phdr);
-err_tx:
- qib_put_txreq(tx);
- ret = wait_kmem(dev, qp);
-unaligned:
- ibp->n_unaligned++;
-bail:
- return ret;
-bail_tx:
- ret = PTR_ERR(tx);
- goto bail;
-}
-
-/*
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int no_bufs_available(struct qib_qp *qp)
-{
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_devdata *dd;
- unsigned long flags;
- int ret = 0;
-
- /*
- * Note that as soon as want_buffer() is called and
- * possibly before it returns, qib_ib_piobufavail()
- * could be called. Therefore, put QP on the I/O wait list before
- * enabling the PIO avail interrupt.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
- dev->n_piowait++;
- qp->s_flags |= QIB_S_WAIT_PIO;
- list_add_tail(&qp->iowait, &dev->piowait);
- dd = dd_from_dev(dev);
- dd->f_wantpiobuf_intr(dd, 1);
- }
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
- u32 *hdr = (u32 *) ibhdr;
- u32 __iomem *piobuf_orig;
- u32 __iomem *piobuf;
- u64 pbc;
- unsigned long flags;
- unsigned flush_wc;
- u32 control;
- u32 pbufn;
-
- control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
- be16_to_cpu(ibhdr->lrh[0]) >> 12);
- pbc = ((u64) control << 32) | plen;
- piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
- if (unlikely(piobuf == NULL))
- return no_bufs_available(qp);
-
- /*
- * Write the pbc.
- * We have to flush after the PBC for correctness on some cpus
- * or WC buffer can be written out of order.
- */
- writeq(pbc, piobuf);
- piobuf_orig = piobuf;
- piobuf += 2;
-
- flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
- if (len == 0) {
- /*
- * If there is just the header portion, must flush before
- * writing last word of header for correctness, and after
- * the last header word (trigger word).
- */
- if (flush_wc) {
- qib_flush_wc();
- qib_pio_copy(piobuf, hdr, hdrwords - 1);
- qib_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf, hdr, hdrwords);
- goto done;
- }
-
- if (flush_wc)
- qib_flush_wc();
- qib_pio_copy(piobuf, hdr, hdrwords);
- piobuf += hdrwords;
-
- /* The common case is aligned and contained in one segment. */
- if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
- !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
- u32 *addr = (u32 *) ss->sge.vaddr;
-
- /* Update address before sending packet. */
- update_sge(ss, len);
- if (flush_wc) {
- qib_pio_copy(piobuf, addr, dwords - 1);
- /* must flush early everything before trigger word */
- qib_flush_wc();
- __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
- /* be sure trigger word is written */
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf, addr, dwords);
- goto done;
- }
- copy_io(piobuf, ss, len, flush_wc);
-done:
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf_orig + spcl_off);
- }
- qib_sendbuf_done(dd, pbufn);
- if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_rc_send_complete(qp, ibhdr);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- return 0;
-}
-
-/**
- * qib_verbs_send - send a packet
- * @qp: the QP to send on
- * @hdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
- *
- * Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
- */
-int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- u32 plen;
- int ret;
- u32 dwords = (len + 3) >> 2;
-
- /*
- * Calculate the send buffer trigger address.
- * The +1 counts for the pbc control dword following the pbc length.
- */
- plen = hdrwords + dwords + 1;
-
- /*
- * VL15 packets (IB_QPT_SMI) will always use PIO, so we
- * can defer SDMA restart until link goes ACTIVE without
- * worrying about just how we got there.
- */
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- !(dd->flags & QIB_HAS_SEND_DMA))
- ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
- plen, dwords);
- else
- ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
- plen, dwords);
-
- return ret;
-}
-
-int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait)
-{
- int ret;
- struct qib_devdata *dd = ppd->dd;
-
- if (!(dd->flags & QIB_PRESENT)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
- *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
- *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
- *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
- *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_get_counters - get various chip counters
- * @dd: the qlogic_ib device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int qib_get_counters(struct qib_pportdata *ppd,
- struct qib_verbs_counters *cntrs)
-{
- int ret;
-
- if (!(ppd->dd->flags & QIB_PRESENT)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- cntrs->symbol_error_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
- cntrs->link_error_recovery_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
- /*
- * The link downed counter counts when the other side downs the
- * connection. We add in the number of times we downed the link
- * due to local link integrity errors to compensate.
- */
- cntrs->link_downed_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
- cntrs->port_rcv_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
- cntrs->port_rcv_errors +=
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
- cntrs->port_rcv_errors +=
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
- cntrs->port_rcv_remphys_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
- cntrs->port_xmit_discards =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
- cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_WORDSEND);
- cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_WORDRCV);
- cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_PKTSEND);
- cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_PKTRCV);
- cntrs->local_link_integrity_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
- cntrs->excessive_buffer_overrun_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
- cntrs->vl15_dropped =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_ib_piobufavail - callback when a PIO buffer is available
- * @dd: the device pointer
- *
- * This is called from qib_intr() at interrupt level when a PIO buffer is
- * available after qib_verbs_send() returned an error that no buffers were
- * available. Disable the interrupt if there are no more QPs waiting.
- */
-void qib_ib_piobufavail(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- struct list_head *list;
- struct qib_qp *qps[5];
- struct qib_qp *qp;
- unsigned long flags;
- unsigned i, n;
-
- list = &dev->piowait;
- n = 0;
-
- /*
- * Note: checking that the piowait list is empty and clearing
- * the buffer available interrupt needs to be atomic or we
- * could end up with QPs on the wait list with the interrupt
- * disabled.
- */
- spin_lock_irqsave(&dev->pending_lock, flags);
- while (!list_empty(list)) {
- if (n == ARRAY_SIZE(qps))
- goto full;
- qp = list_entry(list->next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
- atomic_inc(&qp->refcount);
- qps[n++] = qp;
- }
- dd->f_wantpiobuf_intr(dd, 0);
-full:
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-
- for (i = 0; i < n; i++) {
- qp = qps[i];
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_PIO) {
- qp->s_flags &= ~QIB_S_WAIT_PIO;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Notify qib_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-}
-
-static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibdev *dev = to_idev(ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
- memset(props, 0, sizeof(*props));
-
- props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
- props->page_size_cap = PAGE_SIZE;
- props->vendor_id =
- QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
- props->vendor_part_id = dd->deviceid;
- props->hw_ver = dd->minrev;
- props->sys_image_guid = ib_qib_sys_image_guid;
- props->max_mr_size = ~0ULL;
- props->max_qp = ib_qib_max_qps;
- props->max_qp_wr = ib_qib_max_qp_wrs;
- props->max_sge = ib_qib_max_sges;
- props->max_sge_rd = ib_qib_max_sges;
- props->max_cq = ib_qib_max_cqs;
- props->max_ah = ib_qib_max_ahs;
- props->max_cqe = ib_qib_max_cqes;
- props->max_mr = dev->lk_table.max;
- props->max_fmr = dev->lk_table.max;
- props->max_map_per_fmr = 32767;
- props->max_pd = ib_qib_max_pds;
- props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
- props->max_qp_init_rd_atom = 255;
- /* props->max_res_rd_atom */
- props->max_srq = ib_qib_max_srqs;
- props->max_srq_wr = ib_qib_max_srq_wrs;
- props->max_srq_sge = ib_qib_max_srq_sges;
- /* props->local_ca_ack_delay */
- props->atomic_cap = IB_ATOMIC_GLOB;
- props->max_pkeys = qib_get_npkeys(dd);
- props->max_mcast_grp = ib_qib_max_mcast_grps;
- props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
- props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
- props->max_mcast_grp;
-
- return 0;
-}
-
-static int qib_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- enum ib_mtu mtu;
- u16 lid = ppd->lid;
-
- memset(props, 0, sizeof(*props));
- props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
- props->lmc = ppd->lmc;
- props->sm_lid = ibp->sm_lid;
- props->sm_sl = ibp->sm_sl;
- props->state = dd->f_iblink_state(ppd->lastibcstat);
- props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
- props->port_cap_flags = ibp->port_cap_flags;
- props->gid_tbl_len = QIB_GUIDS_PER_PORT;
- props->max_msg_sz = 0x80000000;
- props->pkey_tbl_len = qib_get_npkeys(dd);
- props->bad_pkey_cntr = ibp->pkey_violations;
- props->qkey_viol_cntr = ibp->qkey_violations;
- props->active_width = ppd->link_width_active;
- /* See rate_show() */
- props->active_speed = ppd->link_speed_active;
- props->max_vl_num = qib_num_vls(ppd->vls_supported);
- props->init_type_reply = 0;
-
- props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
- switch (ppd->ibmtu) {
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- default:
- mtu = IB_MTU_2048;
- }
- props->active_mtu = mtu;
- props->subnet_timeout = ibp->subnet_timeout;
-
- return 0;
-}
-
-static int qib_modify_device(struct ib_device *device,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- struct qib_devdata *dd = dd_from_ibdev(device);
- unsigned i;
- int ret;
-
- if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
- IB_DEVICE_MODIFY_NODE_DESC)) {
- ret = -EOPNOTSUPP;
- goto bail;
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc, 64);
- for (i = 0; i < dd->num_pports; i++) {
- struct qib_ibport *ibp = &dd->pport[i].ibport_data;
-
- qib_node_desc_chg(ibp);
- }
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
- ib_qib_sys_image_guid =
- cpu_to_be64(device_modify->sys_image_guid);
- for (i = 0; i < dd->num_pports; i++) {
- struct qib_ibport *ibp = &dd->pport[i].ibport_data;
-
- qib_sys_guid_chg(ibp);
- }
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int qib_modify_port(struct ib_device *ibdev, u8 port,
- int port_modify_mask, struct ib_port_modify *props)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- ibp->port_cap_flags |= props->set_port_cap_mask;
- ibp->port_cap_flags &= ~props->clr_port_cap_mask;
- if (props->set_port_cap_mask || props->clr_port_cap_mask)
- qib_cap_mask_chg(ibp);
- if (port_modify_mask & IB_PORT_SHUTDOWN)
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
- if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
- ibp->qkey_violations = 0;
- return 0;
-}
-
-static int qib_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret = 0;
-
- if (!port || port > dd->num_pports)
- ret = -EINVAL;
- else {
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- gid->global.subnet_prefix = ibp->gid_prefix;
- if (index == 0)
- gid->global.interface_id = ppd->guid;
- else if (index < QIB_GUIDS_PER_PORT)
- gid->global.interface_id = ibp->guids[index - 1];
- else
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_pd *pd;
- struct ib_pd *ret;
-
- /*
- * This is actually totally arbitrary. Some correctness tests
- * assume there's a maximum number of PDs that can be allocated.
- * We don't actually have this limit, but we fail the test if
- * we allow allocations of more than we report for this value.
- */
-
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock(&dev->n_pds_lock);
- if (dev->n_pds_allocated == ib_qib_max_pds) {
- spin_unlock(&dev->n_pds_lock);
- kfree(pd);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_pds_allocated++;
- spin_unlock(&dev->n_pds_lock);
-
- /* ib_alloc_pd() will initialize pd->ibpd. */
- pd->user = udata != NULL;
-
- ret = &pd->ibpd;
-
-bail:
- return ret;
-}
-
-static int qib_dealloc_pd(struct ib_pd *ibpd)
-{
- struct qib_pd *pd = to_ipd(ibpd);
- struct qib_ibdev *dev = to_idev(ibpd->device);
-
- spin_lock(&dev->n_pds_lock);
- dev->n_pds_allocated--;
- spin_unlock(&dev->n_pds_lock);
-
- kfree(pd);
-
- return 0;
-}
-
-int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
-{
- /* A multicast address requires a GRH (see ch. 8.4.1). */
- if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
- ah_attr->dlid != QIB_PERMISSIVE_LID &&
- !(ah_attr->ah_flags & IB_AH_GRH))
- goto bail;
- if ((ah_attr->ah_flags & IB_AH_GRH) &&
- ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
- goto bail;
- if (ah_attr->dlid == 0)
- goto bail;
- if (ah_attr->port_num < 1 ||
- ah_attr->port_num > ibdev->phys_port_cnt)
- goto bail;
- if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
- ib_rate_to_mult(ah_attr->static_rate) < 0)
- goto bail;
- if (ah_attr->sl > 15)
- goto bail;
- return 0;
-bail:
- return -EINVAL;
-}
-
-/**
- * qib_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *qib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
-{
- struct qib_ah *ah;
- struct ib_ah *ret;
- struct qib_ibdev *dev = to_idev(pd->device);
- unsigned long flags;
-
- if (qib_check_ah(pd->device, ah_attr)) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- if (dev->n_ahs_allocated == ib_qib_max_ahs) {
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- kfree(ah);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_ahs_allocated++;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- /* ib_create_ah() will initialize ah->ibah. */
- ah->attr = *ah_attr;
- atomic_set(&ah->refcount, 0);
-
- ret = &ah->ibah;
-
-bail:
- return ret;
-}
-
-struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
-{
- struct ib_ah_attr attr;
- struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct qib_qp *qp0;
-
- memset(&attr, 0, sizeof(attr));
- attr.dlid = dlid;
- attr.port_num = ppd_from_ibp(ibp)->port;
- rcu_read_lock();
- qp0 = rcu_dereference(ibp->qp0);
- if (qp0)
- ah = ib_create_ah(qp0->ibqp.pd, &attr);
- rcu_read_unlock();
- return ah;
-}
-
-/**
- * qib_destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int qib_destroy_ah(struct ib_ah *ibah)
-{
- struct qib_ibdev *dev = to_idev(ibah->device);
- struct qib_ah *ah = to_iah(ibah);
- unsigned long flags;
-
- if (atomic_read(&ah->refcount) != 0)
- return -EBUSY;
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- dev->n_ahs_allocated--;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- kfree(ah);
-
- return 0;
-}
-
-static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct qib_ah *ah = to_iah(ibah);
-
- if (qib_check_ah(ibah->device, ah_attr))
- return -EINVAL;
-
- ah->attr = *ah_attr;
-
- return 0;
-}
-
-static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct qib_ah *ah = to_iah(ibah);
-
- *ah_attr = ah->attr;
-
- return 0;
-}
-
-/**
- * qib_get_npkeys - return the size of the PKEY table for context 0
- * @dd: the qlogic_ib device
- */
-unsigned qib_get_npkeys(struct qib_devdata *dd)
-{
- return ARRAY_SIZE(dd->rcd[0]->pkeys);
-}
-
-/*
- * Return the indexed PKEY from the port PKEY table.
- * No need to validate rcd[ctxt]; the port is setup if we are here.
- */
-unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
-{
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- unsigned ctxt = ppd->hw_pidx;
- unsigned ret;
-
- /* dd->rcd null if mini_init or some init failures */
- if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
- ret = 0;
- else
- ret = dd->rcd[ctxt]->pkeys[index];
-
- return ret;
-}
-
-static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (index >= qib_get_npkeys(dd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- *pkey = qib_get_pkey(to_iport(ibdev, port), index);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the QLogic_IB driver
- */
-
-static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct qib_ucontext *context;
- struct ib_ucontext *ret;
-
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- ret = &context->ibucontext;
-
-bail:
- return ret;
-}
-
-static int qib_dealloc_ucontext(struct ib_ucontext *context)
-{
- kfree(to_iucontext(context));
- return 0;
-}
-
-static void init_ibport(struct qib_pportdata *ppd)
-{
- struct qib_verbs_counters cntrs;
- struct qib_ibport *ibp = &ppd->ibport_data;
-
- spin_lock_init(&ibp->lock);
- /* Set the prefix to the default value (see ch. 4.1.1) */
- ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
- ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
- ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
- IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
- IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
- IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
- IB_PORT_OTHER_LOCAL_CHANGES_SUP;
- if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
- ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
- ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
- /* Snapshot current HW counters to "clear" them. */
- qib_get_counters(ppd, &cntrs);
- ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- ibp->z_link_downed_counter = cntrs.link_downed_counter;
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
- ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
- ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- RCU_INIT_POINTER(ibp->qp0, NULL);
- RCU_INIT_POINTER(ibp->qp1, NULL);
-}
-
-static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = qib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-}
-
-/**
- * qib_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return the allocated qib_ibdev pointer or NULL on error.
- */
-int qib_register_ib_device(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
- struct qib_pportdata *ppd = dd->pport;
- unsigned i, lk_tab_size;
- int ret;
-
- dev->qp_table_size = ib_qib_qp_table_size;
- get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
- dev->qp_table = kmalloc_array(
- dev->qp_table_size,
- sizeof(*dev->qp_table),
- GFP_KERNEL);
- if (!dev->qp_table) {
- ret = -ENOMEM;
- goto err_qpt;
- }
- for (i = 0; i < dev->qp_table_size; i++)
- RCU_INIT_POINTER(dev->qp_table[i], NULL);
-
- for (i = 0; i < dd->num_pports; i++)
- init_ibport(ppd + i);
-
- /* Only need to initialize non-zero fields. */
- spin_lock_init(&dev->qpt_lock);
- spin_lock_init(&dev->n_pds_lock);
- spin_lock_init(&dev->n_ahs_lock);
- spin_lock_init(&dev->n_cqs_lock);
- spin_lock_init(&dev->n_qps_lock);
- spin_lock_init(&dev->n_srqs_lock);
- spin_lock_init(&dev->n_mcast_grps_lock);
- init_timer(&dev->mem_timer);
- dev->mem_timer.function = mem_timer;
- dev->mem_timer.data = (unsigned long) dev;
-
- qib_init_qpn_table(dd, &dev->qpn_table);
-
- /*
- * The top ib_qib_lkey_table_size bits are used to index the
- * table. The lower 8 bits can be owned by the user (copied from
- * the LKEY). The remaining bits act as a generation number or tag.
- */
- spin_lock_init(&dev->lk_table.lock);
- /* insure generation is at least 4 bits see keys.c */
- if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
- qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
- ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
- ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
- }
- dev->lk_table.max = 1 << ib_qib_lkey_table_size;
- lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- dev->lk_table.table = (struct qib_mregion __rcu **)
- vmalloc(lk_tab_size);
- if (dev->lk_table.table == NULL) {
- ret = -ENOMEM;
- goto err_lk;
- }
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- for (i = 0; i < dev->lk_table.max; i++)
- RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
- INIT_LIST_HEAD(&dev->pending_mmaps);
- spin_lock_init(&dev->pending_lock);
- dev->mmap_offset = PAGE_SIZE;
- spin_lock_init(&dev->mmap_offset_lock);
- INIT_LIST_HEAD(&dev->piowait);
- INIT_LIST_HEAD(&dev->dmawait);
- INIT_LIST_HEAD(&dev->txwait);
- INIT_LIST_HEAD(&dev->memwait);
- INIT_LIST_HEAD(&dev->txreq_free);
-
- if (ppd->sdma_descq_cnt) {
- dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- &dev->pio_hdrs_phys,
- GFP_KERNEL);
- if (!dev->pio_hdrs) {
- ret = -ENOMEM;
- goto err_hdrs;
- }
- }
-
- for (i = 0; i < ppd->sdma_descq_cnt; i++) {
- struct qib_verbs_txreq *tx;
-
- tx = kzalloc(sizeof(*tx), GFP_KERNEL);
- if (!tx) {
- ret = -ENOMEM;
- goto err_tx;
- }
- tx->hdr_inx = i;
- list_add(&tx->txreq.list, &dev->txreq_free);
- }
-
- /*
- * The system image GUID is supposed to be the same for all
- * IB HCAs in a single system but since there can be other
- * device types in the system, we can't be sure this is unique.
- */
- if (!ib_qib_sys_image_guid)
- ib_qib_sys_image_guid = ppd->guid;
-
- strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
- ibdev->owner = THIS_MODULE;
- ibdev->node_guid = ppd->guid;
- ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
- ibdev->uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
- (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
- ibdev->node_type = RDMA_NODE_IB_CA;
- ibdev->phys_port_cnt = dd->num_pports;
- ibdev->num_comp_vectors = 1;
- ibdev->dma_device = &dd->pcidev->dev;
- ibdev->query_device = qib_query_device;
- ibdev->modify_device = qib_modify_device;
- ibdev->query_port = qib_query_port;
- ibdev->modify_port = qib_modify_port;
- ibdev->query_pkey = qib_query_pkey;
- ibdev->query_gid = qib_query_gid;
- ibdev->alloc_ucontext = qib_alloc_ucontext;
- ibdev->dealloc_ucontext = qib_dealloc_ucontext;
- ibdev->alloc_pd = qib_alloc_pd;
- ibdev->dealloc_pd = qib_dealloc_pd;
- ibdev->create_ah = qib_create_ah;
- ibdev->destroy_ah = qib_destroy_ah;
- ibdev->modify_ah = qib_modify_ah;
- ibdev->query_ah = qib_query_ah;
- ibdev->create_srq = qib_create_srq;
- ibdev->modify_srq = qib_modify_srq;
- ibdev->query_srq = qib_query_srq;
- ibdev->destroy_srq = qib_destroy_srq;
- ibdev->create_qp = qib_create_qp;
- ibdev->modify_qp = qib_modify_qp;
- ibdev->query_qp = qib_query_qp;
- ibdev->destroy_qp = qib_destroy_qp;
- ibdev->post_send = qib_post_send;
- ibdev->post_recv = qib_post_receive;
- ibdev->post_srq_recv = qib_post_srq_receive;
- ibdev->create_cq = qib_create_cq;
- ibdev->destroy_cq = qib_destroy_cq;
- ibdev->resize_cq = qib_resize_cq;
- ibdev->poll_cq = qib_poll_cq;
- ibdev->req_notify_cq = qib_req_notify_cq;
- ibdev->get_dma_mr = qib_get_dma_mr;
- ibdev->reg_phys_mr = qib_reg_phys_mr;
- ibdev->reg_user_mr = qib_reg_user_mr;
- ibdev->dereg_mr = qib_dereg_mr;
- ibdev->alloc_mr = qib_alloc_mr;
- ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
- ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
- ibdev->alloc_fmr = qib_alloc_fmr;
- ibdev->map_phys_fmr = qib_map_phys_fmr;
- ibdev->unmap_fmr = qib_unmap_fmr;
- ibdev->dealloc_fmr = qib_dealloc_fmr;
- ibdev->attach_mcast = qib_multicast_attach;
- ibdev->detach_mcast = qib_multicast_detach;
- ibdev->process_mad = qib_process_mad;
- ibdev->mmap = qib_mmap;
- ibdev->dma_ops = &qib_dma_mapping_ops;
- ibdev->get_port_immutable = qib_port_immutable;
-
- snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
- "Intel Infiniband HCA %s", init_utsname()->nodename);
-
- ret = ib_register_device(ibdev, qib_create_port_files);
- if (ret)
- goto err_reg;
-
- ret = qib_create_agents(dev);
- if (ret)
- goto err_agents;
-
- ret = qib_verbs_register_sysfs(dd);
- if (ret)
- goto err_class;
-
- goto bail;
-
-err_class:
- qib_free_agents(dev);
-err_agents:
- ib_unregister_device(ibdev);
-err_reg:
-err_tx:
- while (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
- struct qib_verbs_txreq *tx;
-
- list_del(l);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- kfree(tx);
- }
- if (ppd->sdma_descq_cnt)
- dma_free_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- dev->pio_hdrs, dev->pio_hdrs_phys);
-err_hdrs:
- vfree(dev->lk_table.table);
-err_lk:
- kfree(dev->qp_table);
-err_qpt:
- qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
-bail:
- return ret;
-}
-
-void qib_unregister_ib_device(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
- u32 qps_inuse;
- unsigned lk_tab_size;
-
- qib_verbs_unregister_sysfs(dd);
-
- qib_free_agents(dev);
-
- ib_unregister_device(ibdev);
-
- if (!list_empty(&dev->piowait))
- qib_dev_err(dd, "piowait list not empty!\n");
- if (!list_empty(&dev->dmawait))
- qib_dev_err(dd, "dmawait list not empty!\n");
- if (!list_empty(&dev->txwait))
- qib_dev_err(dd, "txwait list not empty!\n");
- if (!list_empty(&dev->memwait))
- qib_dev_err(dd, "memwait list not empty!\n");
- if (dev->dma_mr)
- qib_dev_err(dd, "DMA MR not NULL!\n");
-
- qps_inuse = qib_free_all_qps(dd);
- if (qps_inuse)
- qib_dev_err(dd, "QP memory leak! %u still in use\n",
- qps_inuse);
-
- del_timer_sync(&dev->mem_timer);
- qib_free_qpn_table(&dev->qpn_table);
- while (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
- struct qib_verbs_txreq *tx;
-
- list_del(l);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- kfree(tx);
- }
- if (dd->pport->sdma_descq_cnt)
- dma_free_coherent(&dd->pcidev->dev,
- dd->pport->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- dev->pio_hdrs, dev->pio_hdrs_phys);
- lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- vfree(dev->lk_table.table);
- kfree(dev->qp_table);
-}
-
-/*
- * This must be called with s_lock held.
- */
-void qib_schedule_send(struct qib_qp *qp)
-{
- if (qib_send_ok(qp)) {
- struct qib_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- queue_work(ppd->qib_wq, &qp->s_work);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
deleted file mode 100644
index a08df70e8503..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ /dev/null
@@ -1,1180 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef QIB_VERBS_H
-#define QIB_VERBS_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <linux/workqueue.h>
-#include <linux/kthread.h>
-#include <linux/completion.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-
-struct qib_ctxtdata;
-struct qib_pportdata;
-struct qib_devdata;
-struct qib_verbs_txreq;
-
-#define QIB_MAX_RDMA_ATOMIC 16
-#define QIB_GUIDS_PER_PORT 5
-
-#define QPN_MAX (1 << 24)
-#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define QIB_UVERBS_ABI_VERSION 2
-
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
-
-#define IB_SEQ_NAK (3 << 29)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK 0x20
-#define IB_NAK_PSN_ERROR 0x60
-#define IB_NAK_INVALID_REQUEST 0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST 0x64
-
-/* Flags for checking QP state (see ib_qib_state_ops[]) */
-#define QIB_POST_SEND_OK 0x01
-#define QIB_POST_RECV_OK 0x02
-#define QIB_PROCESS_RECV_OK 0x04
-#define QIB_PROCESS_SEND_OK 0x08
-#define QIB_PROCESS_NEXT_SEND_OK 0x10
-#define QIB_FLUSH_SEND 0x20
-#define QIB_FLUSH_RECV 0x40
-#define QIB_PROCESS_OR_FLUSH_SEND \
- (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE 0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
-
-#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
-
-#define IB_BTH_REQ_ACK (1 << 31)
-#define IB_BTH_SOLICITED (1 << 23)
-#define IB_BTH_MIG_REQ (1 << 22)
-
-/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
-#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
-
-#define IB_GRH_VERSION 6
-#define IB_GRH_VERSION_MASK 0xF
-#define IB_GRH_VERSION_SHIFT 28
-#define IB_GRH_TCLASS_MASK 0xFF
-#define IB_GRH_TCLASS_SHIFT 20
-#define IB_GRH_FLOW_MASK 0xFFFFF
-#define IB_GRH_FLOW_SHIFT 0
-#define IB_GRH_NEXT_HDR 0x1B
-
-#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
-
-/* Values for set/get portinfo VLCap OperationalVLs */
-#define IB_VL_VL0 1
-#define IB_VL_VL0_1 2
-#define IB_VL_VL0_3 3
-#define IB_VL_VL0_7 4
-#define IB_VL_VL0_14 5
-
-static inline int qib_num_vls(int vls)
-{
- switch (vls) {
- default:
- case IB_VL_VL0:
- return 1;
- case IB_VL_VL0_1:
- return 2;
- case IB_VL_VL0_3:
- return 4;
- case IB_VL_VL0_7:
- return 8;
- case IB_VL_VL0_14:
- return 15;
- }
-}
-
-struct ib_reth {
- __be64 vaddr;
- __be32 rkey;
- __be32 length;
-} __packed;
-
-struct ib_atomic_eth {
- __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
- __be32 rkey;
- __be64 swap_data;
- __be64 compare_data;
-} __packed;
-
-struct qib_other_headers {
- __be32 bth[3];
- union {
- struct {
- __be32 deth[2];
- __be32 imm_data;
- } ud;
- struct {
- struct ib_reth reth;
- __be32 imm_data;
- } rc;
- struct {
- __be32 aeth;
- __be32 atomic_ack_eth[2];
- } at;
- __be32 imm_data;
- __be32 aeth;
- struct ib_atomic_eth atomic_eth;
- } u;
-} __packed;
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data). Only the first 56 bytes of the IB header
- * will be in the eager header buffer. The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct qib_ib_header {
- __be16 lrh[4];
- union {
- struct {
- struct ib_grh grh;
- struct qib_other_headers oth;
- } l;
- struct qib_other_headers oth;
- } u;
-} __packed;
-
-struct qib_pio_header {
- __le32 pbc[2];
- struct qib_ib_header hdr;
-} __packed;
-
-/*
- * There is one struct qib_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct qib_mcast_qp.
- */
-struct qib_mcast_qp {
- struct list_head list;
- struct qib_qp *qp;
-};
-
-struct qib_mcast {
- struct rb_node rb_node;
- union ib_gid mgid;
- struct list_head qp_list;
- wait_queue_head_t wait;
- atomic_t refcount;
- int n_attached;
-};
-
-/* Protection domain */
-struct qib_pd {
- struct ib_pd ibpd;
- int user; /* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct qib_ah {
- struct ib_ah ibah;
- struct ib_ah_attr attr;
- atomic_t refcount;
-};
-
-/*
- * This structure is used by qib_mmap() to validate an offset
- * when an mmap() request is made. The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct qib_mmap_info {
- struct list_head pending_mmaps;
- struct ib_ucontext *context;
- void *obj;
- __u64 offset;
- struct kref ref;
- unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct qib_cq_wc {
- u32 head; /* index of next entry to fill */
- u32 tail; /* index of next ib_poll_cq() entry */
- union {
- /* these are actually size ibcq.cqe + 1 */
- struct ib_uverbs_wc uqueue[0];
- struct ib_wc kqueue[0];
- };
-};
-
-/*
- * The completion queue structure.
- */
-struct qib_cq {
- struct ib_cq ibcq;
- struct kthread_work comptask;
- struct qib_devdata *dd;
- spinlock_t lock; /* protect changes in this struct */
- u8 notify;
- u8 triggered;
- struct qib_cq_wc *queue;
- struct qib_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct qib_seg {
- void *vaddr;
- size_t length;
-};
-
-/* The number of qib_segs that fit in a page. */
-#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
-
-struct qib_segarray {
- struct qib_seg segs[QIB_SEGSZ];
-};
-
-struct qib_mregion {
- struct ib_pd *pd; /* shares refcnt of ibmr.pd */
- u64 user_base; /* User's address for this region */
- u64 iova; /* IB start address of this region */
- size_t length;
- u32 lkey;
- u32 offset; /* offset (bytes) to start of region */
- int access_flags;
- u32 max_segs; /* number of qib_segs in all the arrays */
- u32 mapsz; /* size of the map array */
- u8 page_shift; /* 0 - non unform/non powerof2 sizes */
- u8 lkey_published; /* in global table */
- struct completion comp; /* complete when refcount goes to zero */
- struct rcu_head list;
- atomic_t refcount;
- struct qib_segarray *map[0]; /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct qib_sge {
- struct qib_mregion *mr;
- void *vaddr; /* kernel virtual address of segment */
- u32 sge_length; /* length of the SGE */
- u32 length; /* remaining length of the segment */
- u16 m; /* current index: mr->map[m] */
- u16 n; /* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct qib_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct qib_mregion mr; /* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct qib_swqe {
- struct ib_send_wr wr; /* don't use wr.sg_list */
- u32 psn; /* first packet sequence number */
- u32 lpsn; /* last packet sequence number */
- u32 ssn; /* send sequence number */
- u32 length; /* total length of data in sg_list */
- struct qib_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct qib_rwqe {
- u64 wr_id;
- u8 num_sge;
- struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct qib_rwq {
- u32 head; /* new work requests posted to the head */
- u32 tail; /* receives pull requests from here. */
- struct qib_rwqe wq[0];
-};
-
-struct qib_rq {
- struct qib_rwq *wq;
- u32 size; /* size of RWQE array */
- u8 max_sge;
- spinlock_t lock /* protect changes in this struct */
- ____cacheline_aligned_in_smp;
-};
-
-struct qib_srq {
- struct ib_srq ibsrq;
- struct qib_rq rq;
- struct qib_mmap_info *ip;
- /* send signal when number of RWQEs < limit */
- u32 limit;
-};
-
-struct qib_sge_state {
- struct qib_sge *sg_list; /* next SGE to be used if any */
- struct qib_sge sge; /* progress state for the current SGE */
- u32 total_len;
- u8 num_sge;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct qib_ack_entry {
- u8 opcode;
- u8 sent;
- u32 psn;
- u32 lpsn;
- union {
- struct qib_sge rdma_sge;
- u64 atomic_data;
- };
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct qib_qp {
- struct ib_qp ibqp;
- /* read mostly fields above and below */
- struct ib_ah_attr remote_ah_attr;
- struct ib_ah_attr alt_ah_attr;
- struct qib_qp __rcu *next; /* link list for QPN hash table */
- struct qib_swqe *s_wq; /* send work queue */
- struct qib_mmap_info *ip;
- struct qib_ib_header *s_hdr; /* next packet header to send */
- unsigned long timeout_jiffies; /* computed from timeout */
-
- enum ib_mtu path_mtu;
- u32 remote_qpn;
- u32 pmtu; /* decoded from path_mtu */
- u32 qkey; /* QKEY for this QP (for UD or RD) */
- u32 s_size; /* send work queue size */
- u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
-
- u8 state; /* QP state */
- u8 qp_access_flags;
- u8 alt_timeout; /* Alternate path timeout for this QP */
- u8 timeout; /* Timeout for this QP */
- u8 s_srate;
- u8 s_mig_state;
- u8 port_num;
- u8 s_pkey_index; /* PKEY index to use */
- u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
- u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
- u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
- u8 s_retry_cnt; /* number of times to retry */
- u8 s_rnr_retry_cnt;
- u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
- u8 s_max_sge; /* size of s_wq->sg_list */
- u8 s_draining;
-
- /* start of read/write fields */
-
- atomic_t refcount ____cacheline_aligned_in_smp;
- wait_queue_head_t wait;
-
-
- struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
- ____cacheline_aligned_in_smp;
- struct qib_sge_state s_rdma_read_sge;
-
- spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
- unsigned long r_aflags;
- u64 r_wr_id; /* ID for current receive WQE */
- u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
- u32 r_len; /* total length of r_sge */
- u32 r_rcv_len; /* receive data len processed */
- u32 r_psn; /* expected rcv packet sequence number */
- u32 r_msn; /* message sequence number */
-
- u8 r_state; /* opcode of last packet received */
- u8 r_flags;
- u8 r_head_ack_queue; /* index into s_ack_queue[] */
-
- struct list_head rspwait; /* link for waititing to respond */
-
- struct qib_sge_state r_sge; /* current receive data */
- struct qib_rq r_rq; /* receive work queue */
-
- spinlock_t s_lock ____cacheline_aligned_in_smp;
- struct qib_sge_state *s_cur_sge;
- u32 s_flags;
- struct qib_verbs_txreq *s_tx;
- struct qib_swqe *s_wqe;
- struct qib_sge_state s_sge; /* current send request data */
- struct qib_mregion *s_rdma_mr;
- atomic_t s_dma_busy;
- u32 s_cur_size; /* size of send packet in bytes */
- u32 s_len; /* total length of s_sge */
- u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
- u32 s_next_psn; /* PSN for next request */
- u32 s_last_psn; /* last response PSN processed */
- u32 s_sending_psn; /* lowest PSN that is being sent */
- u32 s_sending_hpsn; /* highest PSN that is being sent */
- u32 s_psn; /* current packet sequence number */
- u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
- u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
- u32 s_head; /* new entries added here */
- u32 s_tail; /* next entry to process */
- u32 s_cur; /* current work queue entry */
- u32 s_acked; /* last un-ACK'ed entry */
- u32 s_last; /* last completed entry */
- u32 s_ssn; /* SSN of tail entry */
- u32 s_lsn; /* limit sequence number (credit) */
- u16 s_hdrwords; /* size of s_hdr in 32 bit words */
- u16 s_rdma_ack_cnt;
- u8 s_state; /* opcode of last packet sent */
- u8 s_ack_state; /* opcode of packet to ACK */
- u8 s_nak_state; /* non-zero if NAK is pending */
- u8 r_nak_state; /* non-zero if NAK is pending */
- u8 s_retry; /* requester retry counter */
- u8 s_rnr_retry; /* requester RNR retry counter */
- u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
- u8 s_tail_ack_queue; /* index into s_ack_queue[] */
-
- struct qib_sge_state s_ack_rdma_sge;
- struct timer_list s_timer;
- struct list_head iowait; /* link for wait PIO buf */
-
- struct work_struct s_work;
-
- wait_queue_head_t wait_dma;
-
- struct qib_sge r_sg_list[0] /* verified SGEs */
- ____cacheline_aligned_in_smp;
-};
-
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define QIB_R_WRID_VALID 0
-#define QIB_R_REWIND_SGE 1
-
-/*
- * Bit definitions for r_flags.
- */
-#define QIB_R_REUSE_SGE 0x01
-#define QIB_R_RDMAR_SEQ 0x02
-#define QIB_R_RSP_NAK 0x04
-#define QIB_R_RSP_SEND 0x08
-#define QIB_R_COMM_EST 0x10
-
-/*
- * Bit definitions for s_flags.
- *
- * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
- * QIB_S_BUSY - send tasklet is processing the QP
- * QIB_S_TIMER - the RC retry timer is active
- * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
- * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
- * before processing the next SWQE
- * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
- * before processing the next SWQE
- * QIB_S_WAIT_RNR - waiting for RNR timeout
- * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- * next send completion entry not via send DMA
- * QIB_S_WAIT_PIO - waiting for a send buffer to be available
- * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
- * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
- * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
- * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
- * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
- * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
- */
-#define QIB_S_SIGNAL_REQ_WR 0x0001
-#define QIB_S_BUSY 0x0002
-#define QIB_S_TIMER 0x0004
-#define QIB_S_RESP_PENDING 0x0008
-#define QIB_S_ACK_PENDING 0x0010
-#define QIB_S_WAIT_FENCE 0x0020
-#define QIB_S_WAIT_RDMAR 0x0040
-#define QIB_S_WAIT_RNR 0x0080
-#define QIB_S_WAIT_SSN_CREDIT 0x0100
-#define QIB_S_WAIT_DMA 0x0200
-#define QIB_S_WAIT_PIO 0x0400
-#define QIB_S_WAIT_TX 0x0800
-#define QIB_S_WAIT_DMA_DESC 0x1000
-#define QIB_S_WAIT_KMEM 0x2000
-#define QIB_S_WAIT_PSN 0x4000
-#define QIB_S_WAIT_ACK 0x8000
-#define QIB_S_SEND_ONE 0x10000
-#define QIB_S_UNLIMITED_CREDIT 0x20000
-
-/*
- * Wait flags that would prevent any packet type from being sent.
- */
-#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
- QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
-
-/*
- * Wait flags that would prevent send work requests from making progress.
- */
-#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
- QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
- QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
-
-#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
-
-#define QIB_PSN_CREDIT 16
-
-/*
- * Since struct qib_swqe is not a fixed size, we can't simply index into
- * struct qib_qp.s_wq. This function does the array index computation.
- */
-static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
- unsigned n)
-{
- return (struct qib_swqe *)((char *)qp->s_wq +
- (sizeof(struct qib_swqe) +
- qp->s_max_sge *
- sizeof(struct qib_sge)) * n);
-}
-
-/*
- * Since struct qib_rwqe is not a fixed size, we can't simply index into
- * struct qib_rwq.wq. This function does the array index computation.
- */
-static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
-{
- return (struct qib_rwqe *)
- ((char *) rq->wq->wq +
- (sizeof(struct qib_rwqe) +
- rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
- void *page;
-};
-
-struct qib_qpn_table {
- spinlock_t lock; /* protect changes in this struct */
- unsigned flags; /* flags for QP0/1 allocated for each port */
- u32 last; /* last QP number allocated */
- u32 nmaps; /* size of the map table */
- u16 limit;
- u16 mask;
- /* bit map of free QP numbers other than 0/1 */
- struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-#define MAX_LKEY_TABLE_BITS 23
-
-struct qib_lkey_table {
- spinlock_t lock; /* protect changes in this struct */
- u32 next; /* next unused index (speeds search) */
- u32 gen; /* generation count */
- u32 max; /* size of the table */
- struct qib_mregion __rcu **table;
-};
-
-struct qib_opcode_stats {
- u64 n_packets; /* number of packets */
- u64 n_bytes; /* total number of bytes */
-};
-
-struct qib_opcode_stats_perctx {
- struct qib_opcode_stats stats[128];
-};
-
-struct qib_pma_counters {
- u64 n_unicast_xmit; /* total unicast packets sent */
- u64 n_unicast_rcv; /* total unicast packets received */
- u64 n_multicast_xmit; /* total multicast packets sent */
- u64 n_multicast_rcv; /* total multicast packets received */
-};
-
-struct qib_ibport {
- struct qib_qp __rcu *qp0;
- struct qib_qp __rcu *qp1;
- struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
- struct qib_ah *sm_ah;
- struct qib_ah *smi_ah;
- struct rb_root mcast_tree;
- spinlock_t lock; /* protect changes in this struct */
-
- /* non-zero when timer is set */
- unsigned long mkey_lease_timeout;
- unsigned long trap_timeout;
- __be64 gid_prefix; /* in network order */
- __be64 mkey;
- __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
- u64 tid; /* TID for traps */
- struct qib_pma_counters __percpu *pmastats;
- u64 z_unicast_xmit; /* starting count for PMA */
- u64 z_unicast_rcv; /* starting count for PMA */
- u64 z_multicast_xmit; /* starting count for PMA */
- u64 z_multicast_rcv; /* starting count for PMA */
- u64 z_symbol_error_counter; /* starting count for PMA */
- u64 z_link_error_recovery_counter; /* starting count for PMA */
- u64 z_link_downed_counter; /* starting count for PMA */
- u64 z_port_rcv_errors; /* starting count for PMA */
- u64 z_port_rcv_remphys_errors; /* starting count for PMA */
- u64 z_port_xmit_discards; /* starting count for PMA */
- u64 z_port_xmit_data; /* starting count for PMA */
- u64 z_port_rcv_data; /* starting count for PMA */
- u64 z_port_xmit_packets; /* starting count for PMA */
- u64 z_port_rcv_packets; /* starting count for PMA */
- u32 z_local_link_integrity_errors; /* starting count for PMA */
- u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
- u32 z_vl15_dropped; /* starting count for PMA */
- u32 n_rc_resends;
- u32 n_rc_acks;
- u32 n_rc_qacks;
- u32 n_rc_delayed_comp;
- u32 n_seq_naks;
- u32 n_rdma_seq;
- u32 n_rnr_naks;
- u32 n_other_naks;
- u32 n_loop_pkts;
- u32 n_pkt_drops;
- u32 n_vl15_dropped;
- u32 n_rc_timeouts;
- u32 n_dmawait;
- u32 n_unaligned;
- u32 n_rc_dupreq;
- u32 n_rc_seqnak;
- u32 port_cap_flags;
- u32 pma_sample_start;
- u32 pma_sample_interval;
- __be16 pma_counter_select[5];
- u16 pma_tag;
- u16 pkey_violations;
- u16 qkey_violations;
- u16 mkey_violations;
- u16 mkey_lease_period;
- u16 sm_lid;
- u16 repress_traps;
- u8 sm_sl;
- u8 mkeyprot;
- u8 subnet_timeout;
- u8 vl_high_limit;
- u8 sl_to_vl[16];
-
-};
-
-
-struct qib_ibdev {
- struct ib_device ibdev;
- struct list_head pending_mmaps;
- spinlock_t mmap_offset_lock; /* protect mmap_offset */
- u32 mmap_offset;
- struct qib_mregion __rcu *dma_mr;
-
- /* QP numbers are shared by all IB ports */
- struct qib_qpn_table qpn_table;
- struct qib_lkey_table lk_table;
- struct list_head piowait; /* list for wait PIO buf */
- struct list_head dmawait; /* list for wait DMA */
- struct list_head txwait; /* list for wait qib_verbs_txreq */
- struct list_head memwait; /* list for wait kernel memory */
- struct list_head txreq_free;
- struct timer_list mem_timer;
- struct qib_qp __rcu **qp_table;
- struct qib_pio_header *pio_hdrs;
- dma_addr_t pio_hdrs_phys;
- /* list of QPs waiting for RNR timer */
- spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
- u32 qp_table_size; /* size of the hash table */
- u32 qp_rnd; /* random bytes for hash */
- spinlock_t qpt_lock;
-
- u32 n_piowait;
- u32 n_txwait;
-
- u32 n_pds_allocated; /* number of PDs allocated for device */
- spinlock_t n_pds_lock;
- u32 n_ahs_allocated; /* number of AHs allocated for device */
- spinlock_t n_ahs_lock;
- u32 n_cqs_allocated; /* number of CQs allocated for device */
- spinlock_t n_cqs_lock;
- u32 n_qps_allocated; /* number of QPs allocated for device */
- spinlock_t n_qps_lock;
- u32 n_srqs_allocated; /* number of SRQs allocated for device */
- spinlock_t n_srqs_lock;
- u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
- spinlock_t n_mcast_grps_lock;
-#ifdef CONFIG_DEBUG_FS
- /* per HCA debugfs */
- struct dentry *qib_ibdev_dbg;
-#endif
-};
-
-struct qib_verbs_counters {
- u64 symbol_error_counter;
- u64 link_error_recovery_counter;
- u64 link_downed_counter;
- u64 port_rcv_errors;
- u64 port_rcv_remphys_errors;
- u64 port_xmit_discards;
- u64 port_xmit_data;
- u64 port_rcv_data;
- u64 port_xmit_packets;
- u64 port_rcv_packets;
- u32 local_link_integrity_errors;
- u32 excessive_buffer_overrun_errors;
- u32 vl15_dropped;
-};
-
-static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct qib_mr, ibmr);
-}
-
-static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct qib_pd, ibpd);
-}
-
-static inline struct qib_ah *to_iah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct qib_ah, ibah);
-}
-
-static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct qib_cq, ibcq);
-}
-
-static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
-{
- return container_of(ibsrq, struct qib_srq, ibsrq);
-}
-
-static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct qib_qp, ibqp);
-}
-
-static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct qib_ibdev, ibdev);
-}
-
-/*
- * Send if not busy or waiting for I/O and either
- * a RC response is pending or we can process send work requests.
- */
-static inline int qib_send_ok(struct qib_qp *qp)
-{
- return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
- !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
-}
-
-/*
- * This must be called with s_lock held.
- */
-void qib_schedule_send(struct qib_qp *qp);
-
-static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
-{
- u16 p1 = pkey1 & 0x7FFF;
- u16 p2 = pkey2 & 0x7FFF;
-
- /*
- * Low 15 bits must be non-zero and match, and
- * one of the two must be a full member.
- */
- return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
-}
-
-void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
-void qib_cap_mask_chg(struct qib_ibport *ibp);
-void qib_sys_guid_chg(struct qib_ibport *ibp);
-void qib_node_desc_chg(struct qib_ibport *ibp);
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
-int qib_create_agents(struct qib_ibdev *dev);
-void qib_free_agents(struct qib_ibdev *dev);
-
-/*
- * Compare the lower 24 bits of the two values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int qib_cmp24(u32 a, u32 b)
-{
- return (((int) a) - ((int) b)) << 8;
-}
-
-struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
-
-int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait);
-
-int qib_get_counters(struct qib_pportdata *ppd,
- struct qib_verbs_counters *cntrs);
-
-int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int qib_mcast_tree_empty(struct qib_ibport *ibp);
-
-__be32 qib_compute_aeth(struct qib_qp *qp);
-
-struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
-
-struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-
-int qib_destroy_qp(struct ib_qp *ibqp);
-
-int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
-
-int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata);
-
-int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr);
-
-unsigned qib_free_all_qps(struct qib_devdata *dd);
-
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
-
-void qib_free_qpn_table(struct qib_qpn_table *qpt);
-
-#ifdef CONFIG_DEBUG_FS
-
-struct qib_qp_iter;
-
-struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
-
-int qib_qp_iter_next(struct qib_qp_iter *iter);
-
-void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
-
-#endif
-
-void qib_get_credit(struct qib_qp *qp, u32 aeth);
-
-unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
-
-void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
-
-void qib_put_txreq(struct qib_verbs_txreq *tx);
-
-int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len);
-
-void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
- int release);
-
-void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
-
-void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
-
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
-
-int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
-
-struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
-
-void qib_rc_rnr_retry(unsigned long arg);
-
-void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
-
-void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
-
-int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
-
-void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
-
-int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
-
-void qib_free_lkey(struct qib_mregion *mr);
-
-int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
- struct qib_sge *isge, struct ib_sge *sge, int acc);
-
-int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc);
-
-int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-
-struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
-
-int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata);
-
-int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int qib_destroy_srq(struct ib_srq *ibsrq);
-
-int qib_cq_init(struct qib_devdata *dd);
-
-void qib_cq_exit(struct qib_devdata *dd);
-
-void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
-
-int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *qib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int qib_destroy_cq(struct ib_cq *ibcq);
-
-int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-
-int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start);
-
-struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata);
-
-int qib_dereg_mr(struct ib_mr *ibmr);
-
-struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_entries);
-
-struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
- struct ib_device *ibdev, int page_list_len);
-
-void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
-
-int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
-
-struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-
-int qib_unmap_fmr(struct list_head *fmr_list);
-
-int qib_dealloc_fmr(struct ib_fmr *ibfmr);
-
-static inline void qib_get_mr(struct qib_mregion *mr)
-{
- atomic_inc(&mr->refcount);
-}
-
-void mr_rcu_callback(struct rcu_head *list);
-
-static inline void qib_put_mr(struct qib_mregion *mr)
-{
- if (unlikely(atomic_dec_and_test(&mr->refcount)))
- call_rcu(&mr->list, mr_rcu_callback);
-}
-
-static inline void qib_put_ss(struct qib_sge_state *ss)
-{
- while (ss->num_sge) {
- qib_put_mr(ss->sge.mr);
- if (--ss->num_sge)
- ss->sge = *ss->sg_list++;
- }
-}
-
-
-void qib_release_mmap_info(struct kref *ref);
-
-struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
- struct ib_ucontext *context,
- void *obj);
-
-void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
- u32 size, void *obj);
-
-int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
-
-void qib_migrate_qp(struct qib_qp *qp);
-
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, struct qib_qp *qp, u32 bth0);
-
-u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
- u32 bth0, u32 bth2);
-
-void qib_do_send(struct work_struct *work);
-
-void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
- enum ib_wc_status status);
-
-void qib_send_rc_ack(struct qib_qp *qp);
-
-int qib_make_rc_req(struct qib_qp *qp);
-
-int qib_make_uc_req(struct qib_qp *qp);
-
-int qib_make_ud_req(struct qib_qp *qp);
-
-int qib_register_ib_device(struct qib_devdata *);
-
-void qib_unregister_ib_device(struct qib_devdata *);
-
-void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
-
-void qib_ib_piobufavail(struct qib_devdata *);
-
-unsigned qib_get_npkeys(struct qib_devdata *);
-
-unsigned qib_get_pkey(struct qib_ibport *, unsigned);
-
-extern const enum ib_wc_opcode ib_qib_wc_opcode[];
-
-/*
- * Below HCA-independent IB PhysPortState values, returned
- * by the f_ibphys_portstate() routine.
- */
-#define IB_PHYSPORTSTATE_SLEEP 1
-#define IB_PHYSPORTSTATE_POLL 2
-#define IB_PHYSPORTSTATE_DISABLED 3
-#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-#define IB_PHYSPORTSTATE_LINKUP 5
-#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
-#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
-#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
-#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
-#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
-#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
-#define IB_PHYSPORTSTATE_CFG_ENH 0x10
-#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
-
-extern const int ib_qib_state_ops[];
-
-extern __be64 ib_qib_sys_image_guid; /* in network order */
-
-extern unsigned int ib_qib_lkey_table_size;
-
-extern unsigned int ib_qib_max_cqes;
-
-extern unsigned int ib_qib_max_cqs;
-
-extern unsigned int ib_qib_max_qp_wrs;
-
-extern unsigned int ib_qib_max_qps;
-
-extern unsigned int ib_qib_max_sges;
-
-extern unsigned int ib_qib_max_mcast_grps;
-
-extern unsigned int ib_qib_max_mcast_qp_attached;
-
-extern unsigned int ib_qib_max_srqs;
-
-extern unsigned int ib_qib_max_srq_sges;
-
-extern unsigned int ib_qib_max_srq_wrs;
-
-extern const u32 ib_qib_rnr_table[];
-
-extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
-
-#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
deleted file mode 100644
index f8ea069a3eaf..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/rculist.h>
-
-#include "qib.h"
-
-/**
- * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
- * @qp: the QP to link
- */
-static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
-{
- struct qib_mcast_qp *mqp;
-
- mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
- if (!mqp)
- goto bail;
-
- mqp->qp = qp;
- atomic_inc(&qp->refcount);
-
-bail:
- return mqp;
-}
-
-static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
-{
- struct qib_qp *qp = mqp->qp;
-
- /* Notify qib_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-
- kfree(mqp);
-}
-
-/**
- * qib_mcast_alloc - allocate the multicast GID structure
- * @mgid: the multicast GID
- *
- * A list of QPs will be attached to this structure.
- */
-static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
-{
- struct qib_mcast *mcast;
-
- mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
- if (!mcast)
- goto bail;
-
- mcast->mgid = *mgid;
- INIT_LIST_HEAD(&mcast->qp_list);
- init_waitqueue_head(&mcast->wait);
- atomic_set(&mcast->refcount, 0);
- mcast->n_attached = 0;
-
-bail:
- return mcast;
-}
-
-static void qib_mcast_free(struct qib_mcast *mcast)
-{
- struct qib_mcast_qp *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
- qib_mcast_qp_free(p);
-
- kfree(mcast);
-}
-
-/**
- * qib_mcast_find - search the global table for the given multicast GID
- * @ibp: the IB port structure
- * @mgid: the multicast GID to search for
- *
- * Returns NULL if not found.
- *
- * The caller is responsible for decrementing the reference count if found.
- */
-struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
-{
- struct rb_node *n;
- unsigned long flags;
- struct qib_mcast *mcast;
-
- spin_lock_irqsave(&ibp->lock, flags);
- n = ibp->mcast_tree.rb_node;
- while (n) {
- int ret;
-
- mcast = rb_entry(n, struct qib_mcast, rb_node);
-
- ret = memcmp(mgid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else {
- atomic_inc(&mcast->refcount);
- spin_unlock_irqrestore(&ibp->lock, flags);
- goto bail;
- }
- }
- spin_unlock_irqrestore(&ibp->lock, flags);
-
- mcast = NULL;
-
-bail:
- return mcast;
-}
-
-/**
- * qib_mcast_add - insert mcast GID into table and attach QP struct
- * @mcast: the mcast GID table
- * @mqp: the QP to attach
- *
- * Return zero if both were added. Return EEXIST if the GID was already in
- * the table but the QP was added. Return ESRCH if the QP was already
- * attached and neither structure was added.
- */
-static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
- struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
-{
- struct rb_node **n = &ibp->mcast_tree.rb_node;
- struct rb_node *pn = NULL;
- int ret;
-
- spin_lock_irq(&ibp->lock);
-
- while (*n) {
- struct qib_mcast *tmcast;
- struct qib_mcast_qp *p;
-
- pn = *n;
- tmcast = rb_entry(pn, struct qib_mcast, rb_node);
-
- ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0) {
- n = &pn->rb_left;
- continue;
- }
- if (ret > 0) {
- n = &pn->rb_right;
- continue;
- }
-
- /* Search the QP list to see if this is already there. */
- list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
- if (p->qp == mqp->qp) {
- ret = ESRCH;
- goto bail;
- }
- }
- if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
- ret = ENOMEM;
- goto bail;
- }
-
- tmcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
- ret = EEXIST;
- goto bail;
- }
-
- spin_lock(&dev->n_mcast_grps_lock);
- if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
- spin_unlock(&dev->n_mcast_grps_lock);
- ret = ENOMEM;
- goto bail;
- }
-
- dev->n_mcast_grps_allocated++;
- spin_unlock(&dev->n_mcast_grps_lock);
-
- mcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &mcast->qp_list);
-
- atomic_inc(&mcast->refcount);
- rb_link_node(&mcast->rb_node, pn, n);
- rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
-
- ret = 0;
-
-bail:
- spin_unlock_irq(&ibp->lock);
-
- return ret;
-}
-
-int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_ibport *ibp;
- struct qib_mcast *mcast;
- struct qib_mcast_qp *mqp;
- int ret;
-
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Allocate data structures since its better to do this outside of
- * spin locks and it will most likely be needed.
- */
- mcast = qib_mcast_alloc(gid);
- if (mcast == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
- mqp = qib_mcast_qp_alloc(qp);
- if (mqp == NULL) {
- qib_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
- }
- ibp = to_iport(ibqp->device, qp->port_num);
- switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
- case ESRCH:
- /* Neither was used: OK to attach the same QP twice. */
- qib_mcast_qp_free(mqp);
- qib_mcast_free(mcast);
- break;
-
- case EEXIST: /* The mcast wasn't used */
- qib_mcast_free(mcast);
- break;
-
- case ENOMEM:
- /* Exceeded the maximum number of mcast groups. */
- qib_mcast_qp_free(mqp);
- qib_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
-
- default:
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
- struct qib_mcast *mcast = NULL;
- struct qib_mcast_qp *p, *tmp;
- struct rb_node *n;
- int last = 0;
- int ret;
-
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irq(&ibp->lock);
-
- /* Find the GID in the mcast table. */
- n = ibp->mcast_tree.rb_node;
- while (1) {
- if (n == NULL) {
- spin_unlock_irq(&ibp->lock);
- ret = -EINVAL;
- goto bail;
- }
-
- mcast = rb_entry(n, struct qib_mcast, rb_node);
- ret = memcmp(gid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else
- break;
- }
-
- /* Search the QP list. */
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
- if (p->qp != qp)
- continue;
- /*
- * We found it, so remove it, but don't poison the forward
- * link until we are sure there are no list walkers.
- */
- list_del_rcu(&p->list);
- mcast->n_attached--;
-
- /* If this was the last attached QP, remove the GID too. */
- if (list_empty(&mcast->qp_list)) {
- rb_erase(&mcast->rb_node, &ibp->mcast_tree);
- last = 1;
- }
- break;
- }
-
- spin_unlock_irq(&ibp->lock);
-
- if (p) {
- /*
- * Wait for any list walkers to finish before freeing the
- * list element.
- */
- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
- qib_mcast_qp_free(p);
- }
- if (last) {
- atomic_dec(&mcast->refcount);
- wait_event(mcast->wait, !atomic_read(&mcast->refcount));
- qib_mcast_free(mcast);
- spin_lock_irq(&dev->n_mcast_grps_lock);
- dev->n_mcast_grps_allocated--;
- spin_unlock_irq(&dev->n_mcast_grps_lock);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int qib_mcast_tree_empty(struct qib_ibport *ibp)
-{
- return ibp->mcast_tree.rb_node == NULL;
-}
diff --git a/drivers/infiniband/hw/qib/qib_wc_ppc64.c b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
deleted file mode 100644
index 673cf4c22ebd..000000000000
--- a/drivers/infiniband/hw/qib/qib_wc_ppc64.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on PowerPC only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include "qib.h"
-
-/**
- * qib_enable_wc - enable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- *
- * Nothing to do on PowerPC, so just return without error.
- */
-int qib_enable_wc(struct qib_devdata *dd)
-{
- return 0;
-}
-
-/**
- * qib_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int qib_unordered_wc(void)
-{
- return 1;
-}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
deleted file mode 100644
index edd0ddbd4481..000000000000
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on x86_64 only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include <linux/pci.h>
-#include <asm/mtrr.h>
-#include <asm/processor.h>
-
-#include "qib.h"
-
-/**
- * qib_enable_wc - enable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- *
- * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
- * write combining.
- */
-int qib_enable_wc(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 pioaddr, piolen;
- unsigned bits;
- const unsigned long addr = pci_resource_start(dd->pcidev, 0);
- const size_t len = pci_resource_len(dd->pcidev, 0);
-
- /*
- * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
- * chip. Linux (possibly the hardware) requires it to be on a power
- * of 2 address matching the length (which has to be a power of 2).
- * For rev1, that means the base address, for rev2, it will be just
- * the PIO buffers themselves.
- * For chips with two sets of buffers, the calculations are
- * somewhat more complicated; we need to sum, and the piobufbase
- * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
- * The buffers are still packed, so a single range covers both.
- */
- if (dd->piobcnt2k && dd->piobcnt4k) {
- /* 2 sizes for chip */
- unsigned long pio2kbase, pio4kbase;
-
- pio2kbase = dd->piobufbase & 0xffffffffUL;
- pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
- if (pio2kbase < pio4kbase) {
- /* all current chips */
- pioaddr = addr + pio2kbase;
- piolen = pio4kbase - pio2kbase +
- dd->piobcnt4k * dd->align4k;
- } else {
- pioaddr = addr + pio4kbase;
- piolen = pio2kbase - pio4kbase +
- dd->piobcnt2k * dd->palign;
- }
- } else { /* single buffer size (2K, currently) */
- pioaddr = addr + dd->piobufbase;
- piolen = dd->piobcnt2k * dd->palign +
- dd->piobcnt4k * dd->align4k;
- }
-
- for (bits = 0; !(piolen & (1ULL << bits)); bits++)
- ; /* do nothing */
-
- if (piolen != (1ULL << bits)) {
- piolen >>= bits;
- while (piolen >>= 1)
- bits++;
- piolen = 1ULL << (bits + 1);
- }
- if (pioaddr & (piolen - 1)) {
- u64 atmp = pioaddr & ~(piolen - 1);
-
- if (atmp < addr || (atmp + piolen) > (addr + len)) {
- qib_dev_err(dd,
- "No way to align address/size (%llx/%llx), no WC mtrr\n",
- (unsigned long long) atmp,
- (unsigned long long) piolen << 1);
- ret = -ENODEV;
- } else {
- pioaddr = atmp;
- piolen <<= 1;
- }
- }
-
- if (!ret) {
- dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
- if (dd->wc_cookie < 0)
- /* use error from routine */
- ret = dd->wc_cookie;
- }
-
- return ret;
-}
-
-/**
- * qib_disable_wc - disable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- */
-void qib_disable_wc(struct qib_devdata *dd)
-{
- arch_phys_wc_del(dd->wc_cookie);
-}
-
-/**
- * qib_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering. Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int qib_unordered_wc(void)
-{
- return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
diff --git a/drivers/infiniband/hw/usnic/Kconfig b/drivers/infiniband/hw/usnic/Kconfig
index 29ab11c34f3f..9019599c173b 100644
--- a/drivers/infiniband/hw/usnic/Kconfig
+++ b/drivers/infiniband/hw/usnic/Kconfig
@@ -1,10 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_USNIC
tristate "Verbs support for Cisco VIC"
depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU
+ depends on INFINIBAND_USER_ACCESS
select ENIC
select NET_VENDOR_CISCO
select PCI_IOV
- select INFINIBAND_USER_ACCESS
- ---help---
+ help
This is a low-level driver for Cisco's Virtual Interface
Cards (VICs), including the VIC 1240 and 1280 cards.
diff --git a/drivers/infiniband/hw/usnic/Makefile b/drivers/infiniband/hw/usnic/Makefile
index 99fb2db47cd5..f12a4938ffd2 100644
--- a/drivers/infiniband/hw/usnic/Makefile
+++ b/drivers/infiniband/hw/usnic/Makefile
@@ -1,4 +1,5 @@
-ccflags-y := -Idrivers/net/ethernet/cisco/enic
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y := -I $(srctree)/drivers/net/ethernet/cisco/enic
obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o
diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h
index 5be13d8991bc..f903502d3883 100644
--- a/drivers/infiniband/hw/usnic/usnic.h
+++ b/drivers/infiniband/hw/usnic/usnic.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h
index 04a66229584e..86a82a4da0aa 100644
--- a/drivers/infiniband/hw/usnic/usnic_abi.h
+++ b/drivers/infiniband/hw/usnic/usnic_abi.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -57,7 +72,7 @@ struct usnic_ib_create_qp_resp {
u64 bar_bus_addr;
u32 bar_len;
/*
- * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
+ * WQ, RQ, CQ are explicitly specified bc exposing a generic resources inteface
* expands the scope of ABI to many files.
*/
u32 wq_cnt;
diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
index 393567266142..bf7d197a9f42 100644
--- a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
+++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -19,7 +34,6 @@
#ifndef USNIC_CMN_PKT_HDR_H
#define USNIC_CMN_PKT_HDR_H
-#define USNIC_ROCE_ETHERTYPE (0x8915)
#define USNIC_ROCE_GRH_VER (8)
#define USNIC_PROTO_VER (1)
#define USNIC_ROCE_GRH_VER_SHIFT (4)
diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h
index 9d737ed5e55d..ddd81294fa46 100644
--- a/drivers/infiniband/hw/usnic/usnic_common_util.h
+++ b/drivers/infiniband/hw/usnic/usnic_common_util.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -19,21 +34,7 @@
#ifndef USNIC_CMN_UTIL_H
#define USNIC_CMN_UTIL_H
-static inline void
-usnic_mac_to_gid(const char *const mac, char *raw_gid)
-{
- raw_gid[0] = 0xfe;
- raw_gid[1] = 0x80;
- memset(&raw_gid[2], 0, 6);
- raw_gid[8] = mac[0]^2;
- raw_gid[9] = mac[1];
- raw_gid[10] = mac[2];
- raw_gid[11] = 0xff;
- raw_gid[12] = 0xfe;
- raw_gid[13] = mac[3];
- raw_gid[14] = mac[4];
- raw_gid[15] = mac[5];
-}
+#include <net/addrconf.h>
static inline void
usnic_mac_ip_to_gid(const char *const mac, const __be32 inaddr, char *raw_gid)
@@ -42,27 +43,7 @@ usnic_mac_ip_to_gid(const char *const mac, const __be32 inaddr, char *raw_gid)
raw_gid[1] = 0x80;
memset(&raw_gid[2], 0, 2);
memcpy(&raw_gid[4], &inaddr, 4);
- raw_gid[8] = mac[0]^2;
- raw_gid[9] = mac[1];
- raw_gid[10] = mac[2];
- raw_gid[11] = 0xff;
- raw_gid[12] = 0xfe;
- raw_gid[13] = mac[3];
- raw_gid[14] = mac[4];
- raw_gid[15] = mac[5];
-}
-
-static inline void
-usnic_write_gid_if_id_from_mac(char *mac, char *raw_gid)
-{
- raw_gid[8] = mac[0]^2;
- raw_gid[9] = mac[1];
- raw_gid[10] = mac[2];
- raw_gid[11] = 0xff;
- raw_gid[12] = 0xfe;
- raw_gid[13] = mac[3];
- raw_gid[14] = mac[4];
- raw_gid[15] = mac[5];
+ addrconf_addr_eui48(&raw_gid[8], mac);
}
#endif /* USNIC_COMMON_UTIL_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
index 5d13860161a4..10a8cd5ba076 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -17,7 +32,6 @@
*/
#include <linux/debugfs.h>
-#include <linux/module.h>
#include "usnic.h"
#include "usnic_log.h"
@@ -98,42 +112,21 @@ static const struct file_operations flowinfo_ops = {
void usnic_debugfs_init(void)
{
debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
- if (IS_ERR(debugfs_root)) {
- usnic_err("Failed to create debugfs root dir, check if debugfs is enabled in kernel configuration\n");
- goto out_clear_root;
- }
flows_dentry = debugfs_create_dir("flows", debugfs_root);
- if (IS_ERR_OR_NULL(flows_dentry)) {
- usnic_err("Failed to create debugfs flow dir with err %ld\n",
- PTR_ERR(flows_dentry));
- goto out_free_root;
- }
debugfs_create_file("build-info", S_IRUGO, debugfs_root,
NULL, &usnic_debugfs_buildinfo_ops);
- return;
-
-out_free_root:
- debugfs_remove_recursive(debugfs_root);
-out_clear_root:
- debugfs_root = NULL;
}
void usnic_debugfs_exit(void)
{
- if (!debugfs_root)
- return;
-
debugfs_remove_recursive(debugfs_root);
debugfs_root = NULL;
}
void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
{
- if (IS_ERR_OR_NULL(flows_dentry))
- return;
-
scnprintf(qp_flow->dentry_name, sizeof(qp_flow->dentry_name),
"%u", qp_flow->flow->flow_id);
qp_flow->dbgfs_dentry = debugfs_create_file(qp_flow->dentry_name,
@@ -141,14 +134,9 @@ void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
flows_dentry,
qp_flow,
&flowinfo_ops);
- if (IS_ERR_OR_NULL(qp_flow->dbgfs_dentry)) {
- usnic_err("Failed to create dbg fs entry for flow %u\n",
- qp_flow->flow->flow_id);
- }
}
void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
{
- if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry))
- debugfs_remove(qp_flow->dbgfs_dentry);
+ debugfs_remove(qp_flow->dbgfs_dentry);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h
index 4087d24a88f6..98453e91daa6 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.h
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index e3c9bd9d3ba3..18a70850b738 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -77,8 +92,8 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
ufdev->pdev = pdev;
ufdev->netdev = pci_get_drvdata(pdev);
spin_lock_init(&ufdev->lock);
- strncpy(ufdev->name, netdev_name(ufdev->netdev),
- sizeof(ufdev->name) - 1);
+ BUILD_BUG_ON(sizeof(ufdev->name) != sizeof(ufdev->netdev->name));
+ strcpy(ufdev->name, ufdev->netdev->name);
return ufdev;
}
@@ -88,27 +103,19 @@ void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
kfree(ufdev);
}
-void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN])
{
spin_lock(&ufdev->lock);
memcpy(&ufdev->mac, mac, sizeof(ufdev->mac));
spin_unlock(&ufdev->lock);
}
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
{
- int status;
-
spin_lock(&ufdev->lock);
- if (ufdev->inaddr == 0) {
+ if (!ufdev->inaddr)
ufdev->inaddr = inaddr;
- status = 0;
- } else {
- status = -EFAULT;
- }
spin_unlock(&ufdev->lock);
-
- return status;
}
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
@@ -207,7 +214,7 @@ usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
if (!flow)
return ERR_PTR(-ENOMEM);
- tlv = pci_alloc_consistent(pdev, tlv_size, &tlv_pa);
+ tlv = dma_alloc_coherent(&pdev->dev, tlv_size, &tlv_pa, GFP_ATOMIC);
if (!tlv) {
usnic_err("Failed to allocate memory\n");
status = -ENOMEM;
@@ -251,7 +258,7 @@ usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
out_free_tlv:
spin_unlock(&ufdev->lock);
- pci_free_consistent(pdev, tlv_size, tlv, tlv_pa);
+ dma_free_coherent(&pdev->dev, tlv_size, tlv, tlv_pa);
if (!status)
return flow;
out_free_flow:
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index 93713a2230b3..a91200886922 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -21,6 +36,7 @@
#include <linux/if.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <linux/pci.h>
#include <linux/in.h>
@@ -41,7 +57,7 @@ struct usnic_fwd_dev {
char mac[ETH_ALEN];
unsigned int mtu;
__be32 inaddr;
- char name[IFNAMSIZ+1];
+ char name[IFNAMSIZ];
};
struct usnic_fwd_flow {
@@ -58,8 +74,8 @@ struct usnic_filter_action {
struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
-void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN]);
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);
@@ -82,7 +98,7 @@ static inline void usnic_fwd_init_usnic_filter(struct filter *filter,
uint32_t usnic_id)
{
filter->type = FILTER_USNIC_ID;
- filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE;
+ filter->u.usnic.ethtype = ETH_P_IBOE;
filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE |
FILTER_FIELD_USNIC_ID |
FILTER_FIELD_USNIC_PROTO;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
index e5a9297dd1bd..b350081aeb5a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -46,6 +61,10 @@ struct usnic_ib_pd {
struct usnic_uiom_pd *umem_pd;
};
+struct usnic_ib_cq {
+ struct ib_cq ibcq;
+};
+
struct usnic_ib_mr {
struct ib_mr ibmr;
struct usnic_uiom_reg *umem;
@@ -71,7 +90,7 @@ struct usnic_ib_dev {
struct usnic_ib_vf {
struct usnic_ib_dev *pf;
- spinlock_t lock;
+ struct mutex lock;
struct usnic_vnic *vnic;
unsigned int qp_grp_ref_cnt;
struct usnic_ib_pd *pd;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 34c49b8105fe..11eca39b73a9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -61,7 +76,7 @@ static LIST_HEAD(usnic_ib_ibdev_list);
static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
{
struct usnic_ib_vf *vf = obj;
- return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
+ return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
}
/* End callback dump funcs */
@@ -74,30 +89,18 @@ static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
void usnic_ib_log_vf(struct usnic_ib_vf *vf)
{
- char buf[1000];
- usnic_ib_dump_vf(vf, buf, sizeof(buf));
+ char *buf = kzalloc(1000, GFP_KERNEL);
+
+ if (!buf)
+ return;
+
+ usnic_ib_dump_vf(vf, buf, 1000);
usnic_dbg("%s\n", buf);
-}
-/* Start of netdev section */
-static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
-{
- const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
- "NETDEV_REBOOT", "NETDEV_CHANGE",
- "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
- "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
- "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
- "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
- "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
- "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
- };
-
- if (event >= ARRAY_SIZE(event2str))
- return "UNKNOWN_NETDEV_EVENT";
- else
- return event2str[event];
+ kfree(buf);
}
+/* Start of netdev section */
static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
{
struct usnic_ib_ucontext *ctx;
@@ -117,7 +120,7 @@ static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
IB_QPS_ERR,
NULL);
if (status) {
- usnic_err("Failed to transistion qp grp %u from %s to %s\n",
+ usnic_err("Failed to transition qp grp %u from %s to %s\n",
qp_grp->grp_id,
usnic_ib_qp_grp_state_to_string
(cur_state),
@@ -141,47 +144,21 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
netdev = us_ibdev->netdev;
switch (event) {
case NETDEV_REBOOT:
- usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
break;
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- if (!us_ibdev->ufdev->link_up &&
- netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_up(us_ibdev->ufdev);
- usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
- ib_event.event = IB_EVENT_PORT_ACTIVE;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else if (us_ibdev->ufdev->link_up &&
- !netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_down(us_ibdev->ufdev);
- usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
- usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
- ib_event.event = IB_EVENT_PORT_ERR;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else {
- usnic_dbg("Ignoring %s on %s\n",
- usnic_ib_netdev_event_to_string(event),
- us_ibdev->ib_dev.name);
- }
- break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
usnic_dbg("Ignoring addr change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
} else {
usnic_info(" %s old mac: %pM new mac: %pM\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mac,
netdev->dev_addr);
usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
@@ -196,19 +173,63 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
case NETDEV_CHANGEMTU:
if (us_ibdev->ufdev->mtu != netdev->mtu) {
usnic_info("MTU Change on %s old: %u new: %u\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mtu, netdev->mtu);
usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
} else {
usnic_dbg("Ignoring MTU change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
break;
default:
usnic_dbg("Ignoring event %s on %s",
- usnic_ib_netdev_event_to_string(event),
- us_ibdev->ib_dev.name);
+ netdev_cmd_to_name(event),
+ dev_name(&us_ibdev->ib_dev.dev));
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+}
+
+static void usnic_ib_handle_port_event(struct ib_device *ibdev,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ struct ib_event ib_event;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ if (!us_ibdev->ufdev->link_up &&
+ netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+ usnic_info("Link UP on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ ib_event.event = IB_EVENT_PORT_ACTIVE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else if (us_ibdev->ufdev->link_up &&
+ !netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_down(us_ibdev->ufdev);
+ usnic_info("Link DOWN on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else {
+ usnic_dbg("Ignoring %s on %s\n",
+ netdev_cmd_to_name(event),
+ dev_name(&us_ibdev->ib_dev.dev));
+ }
+ break;
+ default:
+ break;
}
mutex_unlock(&us_ibdev->usdev_lock);
}
@@ -217,18 +238,17 @@ static int usnic_ib_netdevice_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct usnic_ib_dev *us_ibdev;
+ struct ib_device *ibdev;
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- mutex_lock(&usnic_ib_ibdev_list_lock);
- list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
- if (us_ibdev->netdev == netdev) {
- usnic_ib_handle_usdev_event(us_ibdev, event);
- break;
- }
- }
- mutex_unlock(&usnic_ib_ibdev_list_lock);
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
+ if (!ibdev)
+ return NOTIFY_DONE;
+ us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ usnic_ib_handle_usdev_event(us_ibdev, event);
+ ib_device_put(ibdev);
return NOTIFY_DONE;
}
@@ -249,7 +269,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
switch (event) {
case NETDEV_DOWN:
usnic_info("%s via ip notifiers",
- usnic_ib_netdev_event_to_string(event));
+ netdev_cmd_to_name(event));
usnic_fwd_del_ipaddr(us_ibdev->ufdev);
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_GID_CHANGE;
@@ -260,7 +280,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
case NETDEV_UP:
usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
usnic_info("%s via ip notifiers: ip %pI4",
- usnic_ib_netdev_event_to_string(event),
+ netdev_cmd_to_name(event),
&us_ibdev->ufdev->inaddr);
ib_event.event = IB_EVENT_GID_CHANGE;
ib_event.device = &us_ibdev->ib_dev;
@@ -269,8 +289,8 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
break;
default:
usnic_info("Ignoring event %s on %s",
- usnic_ib_netdev_event_to_string(event),
- us_ibdev->ib_dev.name);
+ netdev_cmd_to_name(event),
+ dev_name(&us_ibdev->ib_dev.dev));
}
mutex_unlock(&us_ibdev->usdev_lock);
@@ -283,16 +303,15 @@ static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
struct usnic_ib_dev *us_ibdev;
struct in_ifaddr *ifa = ptr;
struct net_device *netdev = ifa->ifa_dev->dev;
+ struct ib_device *ibdev;
- mutex_lock(&usnic_ib_ibdev_list_lock);
- list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
- if (us_ibdev->netdev == netdev) {
- usnic_ib_handle_inet_event(us_ibdev, event, ptr);
- break;
- }
- }
- mutex_unlock(&usnic_ib_ibdev_list_lock);
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
+ if (!ibdev)
+ return NOTIFY_DONE;
+ us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ usnic_ib_handle_inet_event(us_ibdev, event, ptr);
+ ib_device_put(ibdev);
return NOTIFY_DONE;
}
static struct notifier_block usnic_ib_inetaddr_notifier = {
@@ -300,44 +319,90 @@ static struct notifier_block usnic_ib_inetaddr_notifier = {
};
/* End of inet section*/
-static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
int err;
- err = usnic_ib_query_port(ibdev, port_num, &attr);
+ immutable->core_cap_flags = RDMA_CORE_PORT_USNIC;
+
+ err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
return 0;
}
+static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(device, struct usnic_ib_dev, ib_dev);
+ struct ethtool_drvinfo info;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
+}
+
+static const struct ib_device_ops usnic_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_USNIC,
+ .uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION,
+
+ .alloc_pd = usnic_ib_alloc_pd,
+ .alloc_ucontext = usnic_ib_alloc_ucontext,
+ .create_cq = usnic_ib_create_cq,
+ .create_qp = usnic_ib_create_qp,
+ .dealloc_pd = usnic_ib_dealloc_pd,
+ .dealloc_ucontext = usnic_ib_dealloc_ucontext,
+ .dereg_mr = usnic_ib_dereg_mr,
+ .destroy_cq = usnic_ib_destroy_cq,
+ .destroy_qp = usnic_ib_destroy_qp,
+ .device_group = &usnic_attr_group,
+ .get_dev_fw_str = usnic_get_dev_fw_str,
+ .get_link_layer = usnic_ib_port_link_layer,
+ .get_port_immutable = usnic_port_immutable,
+ .mmap = usnic_ib_mmap,
+ .modify_qp = usnic_ib_modify_qp,
+ .query_device = usnic_ib_query_device,
+ .query_gid = usnic_ib_query_gid,
+ .query_port = usnic_ib_query_port,
+ .query_qp = usnic_ib_query_qp,
+ .reg_user_mr = usnic_ib_reg_mr,
+ .report_port_event = usnic_ib_handle_port_event,
+ INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
+};
+
/* Start of PF discovery section */
static void *usnic_ib_device_add(struct pci_dev *dev)
{
struct usnic_ib_dev *us_ibdev;
union ib_gid gid;
- struct in_ifaddr *in;
+ struct in_device *ind;
struct net_device *netdev;
+ int ret;
usnic_dbg("\n");
netdev = pci_get_drvdata(dev);
- us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
- if (IS_ERR_OR_NULL(us_ibdev)) {
+ us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev);
+ if (!us_ibdev) {
usnic_err("Device %s context alloc failed\n",
netdev_name(pci_get_drvdata(dev)));
- return ERR_PTR(us_ibdev ? PTR_ERR(us_ibdev) : -EFAULT);
+ return NULL;
}
us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
- if (IS_ERR_OR_NULL(us_ibdev->ufdev)) {
- usnic_err("Failed to alloc ufdev for %s with err %ld\n",
- pci_name(dev), PTR_ERR(us_ibdev->ufdev));
+ if (!us_ibdev->ufdev) {
+ usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
goto err_dealloc;
}
@@ -347,62 +412,19 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->pdev = dev;
us_ibdev->netdev = pci_get_drvdata(dev);
- us_ibdev->ib_dev.owner = THIS_MODULE;
us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
- us_ibdev->ib_dev.dma_device = &dev->dev;
- us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
- strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
-
- us_ibdev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_OPEN_QP);
-
- us_ibdev->ib_dev.query_device = usnic_ib_query_device;
- us_ibdev->ib_dev.query_port = usnic_ib_query_port;
- us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
- us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
- us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
- us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
- us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
- us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
- us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
- us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
- us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
- us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
- us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
- us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
- us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
- us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
- us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
- us_ibdev->ib_dev.mmap = usnic_ib_mmap;
- us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
- us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
- us_ibdev->ib_dev.post_send = usnic_ib_post_send;
- us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
- us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
- us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
- us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
- us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
-
-
- if (ib_register_device(&us_ibdev->ib_dev, NULL))
+ us_ibdev->ib_dev.dev.parent = &dev->dev;
+
+ ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
+
+ ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
+ if (ret)
+ goto err_fwd_dealloc;
+
+ dma_set_max_seg_size(&dev->dev, SZ_2G);
+ if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev))
goto err_fwd_dealloc;
usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
@@ -410,9 +432,16 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if (netif_carrier_ok(us_ibdev->netdev))
usnic_fwd_carrier_up(us_ibdev->ufdev);
- in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
- if (in != NULL)
- usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
+ rcu_read_lock();
+ ind = __in_dev_get_rcu(netdev);
+ if (ind) {
+ const struct in_ifaddr *ifa;
+
+ ifa = rcu_dereference(ind->ifa_list);
+ if (ifa)
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
+ }
+ rcu_read_unlock();
usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
us_ibdev->ufdev->inaddr, &gid.raw[0]);
@@ -421,9 +450,9 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
kref_init(&us_ibdev->vf_cnt);
usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
- us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
- us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
- us_ibdev->ufdev->mtu);
+ dev_name(&us_ibdev->ib_dev.dev),
+ netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
+ us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
return us_ibdev;
err_fwd_dealloc:
@@ -436,7 +465,7 @@ err_dealloc:
static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
{
- usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_sysfs_unregister_usdev(us_ibdev);
usnic_fwd_dev_free(us_ibdev->ufdev);
ib_unregister_device(&us_ibdev->ib_dev);
@@ -455,15 +484,17 @@ static void usnic_ib_undiscover_pf(struct kref *kref)
&usnic_ib_ibdev_list, ib_dev_link) {
if (us_ibdev->pdev == dev) {
list_del(&us_ibdev->ib_dev_link);
- usnic_ib_device_remove(us_ibdev);
found = true;
break;
}
}
- WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
mutex_unlock(&usnic_ib_ibdev_list_lock);
+ if (found)
+ usnic_ib_device_remove(us_ibdev);
+ else
+ WARN(1, "Failed to remove PF %s\n", pci_name(dev));
}
static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
@@ -486,8 +517,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
}
us_ibdev = usnic_ib_device_add(parent_pci);
- if (IS_ERR_OR_NULL(us_ibdev)) {
- us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
+ if (!us_ibdev) {
+ us_ibdev = ERR_PTR(-EFAULT);
goto out;
}
@@ -520,6 +551,11 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
struct usnic_ib_vf *vf;
enum usnic_vnic_res_type res_type;
+ if (!device_iommu_mapped(&pdev->dev)) {
+ usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
+ return -EPERM;
+ }
+
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf)
return -ENOMEM;
@@ -550,15 +586,15 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
}
pf = usnic_ib_discover_pf(vf->vnic);
- if (IS_ERR_OR_NULL(pf)) {
- usnic_err("Failed to discover pf of vnic %s with err%ld\n",
- pci_name(pdev), PTR_ERR(pf));
- err = pf ? PTR_ERR(pf) : -EFAULT;
+ if (IS_ERR(pf)) {
+ err = PTR_ERR(pf);
+ usnic_err("Failed to discover pf of vnic %s with err%d\n",
+ pci_name(pdev), err);
goto out_clean_vnic;
}
vf->pf = pf;
- spin_lock_init(&vf->lock);
+ mutex_init(&vf->lock);
mutex_lock(&pf->usdev_lock);
list_add_tail(&vf->link, &pf->vf_dev_list);
/*
@@ -575,7 +611,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
mutex_unlock(&pf->usdev_lock);
usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
- pf->ib_dev.name);
+ dev_name(&pf->ib_dev.dev));
usnic_ib_log_vf(vf);
return 0;
@@ -583,7 +619,6 @@ out_clean_vnic:
usnic_vnic_free(vf->vnic);
out_release_regions:
pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
@@ -604,7 +639,6 @@ static void usnic_ib_pci_remove(struct pci_dev *pdev)
kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
usnic_vnic_free(vf->vnic);
pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(vf);
@@ -628,13 +662,8 @@ static int __init usnic_ib_init(void)
printk_once(KERN_INFO "%s", usnic_version);
- err = usnic_uiom_init(DRV_NAME);
+ err = pci_register_driver(&usnic_ib_pci_driver);
if (err) {
- usnic_err("Unable to initalize umem with err %d\n", err);
- return err;
- }
-
- if (pci_register_driver(&usnic_ib_pci_driver)) {
usnic_err("Unable to register with PCI\n");
goto out_umem_fini;
}
@@ -668,7 +697,6 @@ out_unreg_netdev_notifier:
out_pci_unreg:
pci_unregister_driver(&usnic_ib_pci_driver);
out_umem_fini:
- usnic_uiom_fini();
return err;
}
@@ -681,13 +709,11 @@ static void __exit usnic_ib_destroy(void)
unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
pci_unregister_driver(&usnic_ib_pci_driver);
- usnic_uiom_fini();
}
MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index db3588df3546..59bfbfaee325 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -17,7 +32,6 @@
*/
#include <linux/bug.h>
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
#include "usnic_log.h"
@@ -49,7 +63,7 @@ const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
case IB_QPS_ERR:
return "ERR";
default:
- return "UNKOWN STATE";
+ return "UNKNOWN STATE";
}
}
@@ -102,10 +116,10 @@ static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
for (i = 0; i < res_chunk->cnt; i++) {
@@ -143,10 +157,10 @@ static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
for (i = 0; i < res_chunk->cnt; i++) {
@@ -171,11 +185,11 @@ static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
struct usnic_vnic_res_chunk *res_chunk;
res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get %s with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
@@ -213,16 +227,14 @@ create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
- usnic_err("Unable to alloc flow failed with err %ld\n",
- PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_unreserve_port;
}
/* Create Flow Handle */
qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
- if (IS_ERR_OR_NULL(qp_flow)) {
- err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
+ if (!qp_flow) {
+ err = -ENOMEM;
goto out_dealloc_flow;
}
qp_flow->flow = flow;
@@ -288,16 +300,14 @@ create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
- usnic_err("Unable to alloc flow failed with err %ld\n",
- PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_put_sock;
}
/* Create qp_flow */
qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
- if (IS_ERR_OR_NULL(qp_flow)) {
- err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
+ if (!qp_flow) {
+ err = -ENOMEM;
goto out_dealloc_flow;
}
qp_flow->flow = flow;
@@ -381,14 +391,12 @@ int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
void *data)
{
int status = 0;
- int vnic_idx;
struct ib_event ib_event;
enum ib_qp_state old_state;
struct usnic_transport_spec *trans_spec;
struct usnic_ib_qp_grp_flow *qp_flow;
old_state = qp_grp->state;
- vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
trans_spec = (struct usnic_transport_spec *) data;
spin_lock(&qp_grp->lock);
@@ -506,7 +514,7 @@ int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
if (!status) {
qp_grp->state = new_state;
- usnic_info("Transistioned %u from %s to %s",
+ usnic_info("Transitioned %u from %s to %s",
qp_grp->grp_id,
usnic_ib_qp_grp_state_to_string(old_state),
usnic_ib_qp_grp_state_to_string(new_state));
@@ -534,7 +542,7 @@ alloc_res_chunk_list(struct usnic_vnic *vnic,
/* Do Nothing */
}
- res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
+ res_chunk_list = kcalloc(res_lst_sz + 1, sizeof(*res_chunk_list),
GFP_ATOMIC);
if (!res_chunk_list)
return ERR_PTR(-ENOMEM);
@@ -560,7 +568,7 @@ alloc_res_chunk_list(struct usnic_vnic *vnic,
return res_chunk_list;
out_free_res:
- for (i--; i > 0; i--)
+ for (i--; i >= 0; i--)
usnic_vnic_put_resources(res_chunk_list[i]);
kfree(res_chunk_list);
return ERR_PTR(err);
@@ -656,13 +664,12 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
return 0;
}
-struct usnic_ib_qp_grp *
-usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
- struct usnic_ib_pd *pd,
- struct usnic_vnic_res_spec *res_spec,
- struct usnic_transport_spec *transport_spec)
+int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *transport_spec)
{
- struct usnic_ib_qp_grp *qp_grp;
int err;
enum usnic_transport_type transport = transport_spec->trans_type;
struct usnic_ib_qp_grp_flow *qp_flow;
@@ -672,27 +679,18 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
res_spec);
if (err) {
- usnic_err("Spec does not meet miniumum req for transport %d\n",
+ usnic_err("Spec does not meet minimum req for transport %d\n",
transport);
log_spec(res_spec);
- return ERR_PTR(err);
- }
-
- qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
- if (!qp_grp) {
- usnic_err("Unable to alloc qp_grp - Out of memory\n");
- return NULL;
+ return err;
}
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
qp_grp);
- if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
- err = qp_grp->res_chunk_list ?
- PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
- usnic_err("Unable to alloc res for %d with err %d\n",
- qp_grp->grp_id, err);
- goto out_free_qp_grp;
- }
+ if (IS_ERR_OR_NULL(qp_grp->res_chunk_list))
+ return qp_grp->res_chunk_list ?
+ PTR_ERR(qp_grp->res_chunk_list) :
+ -ENOMEM;
err = qp_grp_and_vf_bind(vf, pd, qp_grp);
if (err)
@@ -719,7 +717,7 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
usnic_ib_sysfs_qpn_add(qp_grp);
- return qp_grp;
+ return 0;
out_release_flow:
release_and_remove_flow(qp_flow);
@@ -727,10 +725,7 @@ out_qp_grp_vf_unbind:
qp_grp_and_vf_unbind(qp_grp);
out_free_res:
free_qp_grp_res(qp_grp->res_chunk_list);
-out_free_qp_grp:
- kfree(qp_grp);
-
- return ERR_PTR(err);
+ return err;
}
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
@@ -743,7 +738,6 @@ void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
usnic_ib_sysfs_qpn_remove(qp_grp);
qp_grp_and_vf_unbind(qp_grp);
free_qp_grp_res(qp_grp->res_chunk_list);
- kfree(qp_grp);
}
struct usnic_vnic_res_chunk*
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
index b0aafe8db0c3..62e732be6736 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -69,39 +84,16 @@ struct usnic_ib_qp_grp_flow {
char dentry_name[32];
};
-static const struct
-usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
- { /*USNIC_TRANSPORT_UNKNOWN*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
- { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
- { /*USNIC_TRANSPORT_IPV4_UDP*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
-};
+extern const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX];
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
-struct usnic_ib_qp_grp *
-usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
- struct usnic_ib_pd *pd,
- struct usnic_vnic_res_spec *res_spec,
- struct usnic_transport_spec *trans_spec);
+int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp,
+ struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *trans_spec);
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp);
int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
enum ib_qp_state new_state,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 27dc67c1689f..fdb63a8fb997 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -16,7 +31,6 @@
*
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -28,61 +42,39 @@
#include "usnic_ib_qp_grp.h"
#include "usnic_vnic.h"
#include "usnic_ib_verbs.h"
+#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-static ssize_t usnic_ib_show_fw_ver(struct device *device,
- struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev =
- container_of(device, struct usnic_ib_dev, ib_dev.dev);
- struct ethtool_drvinfo info;
-
- mutex_lock(&us_ibdev->usdev_lock);
- us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
- mutex_unlock(&us_ibdev->usdev_lock);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version);
-}
-
-static ssize_t usnic_ib_show_board(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct usnic_ib_dev *us_ibdev =
- container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
unsigned short subsystem_device_id;
mutex_lock(&us_ibdev->usdev_lock);
subsystem_device_id = us_ibdev->pdev->subsystem_device;
mutex_unlock(&us_ibdev->usdev_lock);
- return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
+ return sysfs_emit(buf, "%u\n", subsystem_device_id);
}
+static DEVICE_ATTR_RO(board_id);
/*
* Report the configuration for this PF
*/
static ssize_t
-usnic_ib_show_config(struct device *device, struct device_attribute *attr,
- char *buf)
+config_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
- char *ptr;
- unsigned left;
- unsigned n;
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
enum usnic_vnic_res_type res_type;
-
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
-
- /* Buffer space limit is 1 page */
- ptr = buf;
- left = PAGE_SIZE;
+ int len;
mutex_lock(&us_ibdev->usdev_lock);
- if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
+ if (kref_read(&us_ibdev->vf_cnt) > 0) {
char *busname;
-
+ char *sep = "";
/*
* bus name seems to come with annoying prefix.
* Remove it if it is predictable
@@ -91,108 +83,95 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
if (strncmp(busname, "PCI Bus ", 8) == 0)
busname += 8;
- n = scnprintf(ptr, left,
- "%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
- us_ibdev->ib_dev.name,
- busname,
- PCI_SLOT(us_ibdev->pdev->devfn),
- PCI_FUNC(us_ibdev->pdev->devfn),
- netdev_name(us_ibdev->netdev),
- us_ibdev->ufdev->mac,
- atomic_read(&us_ibdev->vf_cnt.refcount));
- UPDATE_PTR_LEFT(n, ptr, left);
+ len = sysfs_emit(buf, "%s: %s:%d.%d, %s, %pM, %u VFs\n",
+ dev_name(&us_ibdev->ib_dev.dev),
+ busname,
+ PCI_SLOT(us_ibdev->pdev->devfn),
+ PCI_FUNC(us_ibdev->pdev->devfn),
+ netdev_name(us_ibdev->netdev),
+ us_ibdev->ufdev->mac,
+ kref_read(&us_ibdev->vf_cnt));
+ len += sysfs_emit_at(buf, len, " Per VF:");
for (res_type = USNIC_VNIC_RES_TYPE_EOL;
- res_type < USNIC_VNIC_RES_TYPE_MAX;
- res_type++) {
+ res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
if (us_ibdev->vf_res_cnt[res_type] == 0)
continue;
- n = scnprintf(ptr, left, " %d %s%s",
- us_ibdev->vf_res_cnt[res_type],
- usnic_vnic_res_type_to_str(res_type),
- (res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ?
- "," : "");
- UPDATE_PTR_LEFT(n, ptr, left);
+ len += sysfs_emit_at(buf, len, "%s %d %s",
+ sep,
+ us_ibdev->vf_res_cnt[res_type],
+ usnic_vnic_res_type_to_str(res_type));
+ sep = ",";
}
- n = scnprintf(ptr, left, "\n");
- UPDATE_PTR_LEFT(n, ptr, left);
+ len += sysfs_emit_at(buf, len, "\n");
} else {
- n = scnprintf(ptr, left, "%s: no VFs\n",
- us_ibdev->ib_dev.name);
- UPDATE_PTR_LEFT(n, ptr, left);
+ len = sysfs_emit(buf, "%s: no VFs\n",
+ dev_name(&us_ibdev->ib_dev.dev));
}
+
mutex_unlock(&us_ibdev->usdev_lock);
- return ptr - buf;
+ return len;
}
+static DEVICE_ATTR_RO(config);
static ssize_t
-usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
- char *buf)
+iface_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
-
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- netdev_name(us_ibdev->netdev));
+ return sysfs_emit(buf, "%s\n", netdev_name(us_ibdev->netdev));
}
+static DEVICE_ATTR_RO(iface);
static ssize_t
-usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+max_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
-
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- atomic_read(&us_ibdev->vf_cnt.refcount));
+ return sysfs_emit(buf, "%u\n", kref_read(&us_ibdev->vf_cnt));
}
+static DEVICE_ATTR_RO(max_vf);
static ssize_t
-usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+qp_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
int qp_per_vf;
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
- return scnprintf(buf, PAGE_SIZE,
- "%d\n", qp_per_vf);
+ return sysfs_emit(buf, "%d\n", qp_per_vf);
}
+static DEVICE_ATTR_RO(qp_per_vf);
static ssize_t
-usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+cq_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
-
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
+ return sysfs_emit(buf, "%d\n",
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
+static DEVICE_ATTR_RO(cq_per_vf);
+
+static struct attribute *usnic_class_attributes[] = {
+ &dev_attr_board_id.attr,
+ &dev_attr_config.attr,
+ &dev_attr_iface.attr,
+ &dev_attr_max_vf.attr,
+ &dev_attr_qp_per_vf.attr,
+ &dev_attr_cq_per_vf.attr,
+ NULL
+};
-static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
-static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
-static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
-static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
-static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
-static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
-
-static struct device_attribute *usnic_class_attributes[] = {
- &dev_attr_fw_ver,
- &dev_attr_board_id,
- &dev_attr_config,
- &dev_attr_iface,
- &dev_attr_max_vf,
- &dev_attr_qp_per_vf,
- &dev_attr_cq_per_vf,
+const struct attribute_group usnic_attr_group = {
+ .attrs = usnic_class_attributes,
};
struct qpn_attribute {
@@ -224,43 +203,35 @@ struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME)
static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "0x%p\n", qp_grp->ctx);
+ return sysfs_emit(buf, "0x%p\n", qp_grp->ctx);
}
static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
{
- int i, j, n;
- int left;
- char *ptr;
+ int i, j;
struct usnic_vnic_res_chunk *res_chunk;
struct usnic_vnic_res *vnic_res;
+ int len;
- left = PAGE_SIZE;
- ptr = buf;
-
- n = scnprintf(ptr, left,
- "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
- qp_grp->ibqp.qp_num,
- usnic_ib_qp_grp_state_to_string(qp_grp->state),
- qp_grp->owner_pid,
- usnic_vnic_get_index(qp_grp->vf->vnic));
- UPDATE_PTR_LEFT(n, ptr, left);
+ len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
+ qp_grp->ibqp.qp_num,
+ usnic_ib_qp_grp_state_to_string(qp_grp->state),
+ qp_grp->owner_pid,
+ usnic_vnic_get_index(qp_grp->vf->vnic));
for (i = 0; qp_grp->res_chunk_list[i]; i++) {
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
- n = scnprintf(ptr, left, "%s[%d] ",
+ len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
- UPDATE_PTR_LEFT(n, ptr, left);
}
}
- n = scnprintf(ptr, left, "\n");
- UPDATE_PTR_LEFT(n, ptr, left);
+ len += sysfs_emit_at(buf, len, "\n");
- return ptr - buf;
+ return len;
}
static QPN_ATTR_RO(context);
@@ -271,26 +242,15 @@ static struct attribute *usnic_ib_qpn_default_attrs[] = {
&qpn_attr_summary.attr,
NULL
};
+ATTRIBUTE_GROUPS(usnic_ib_qpn_default);
static struct kobj_type usnic_ib_qpn_type = {
.sysfs_ops = &usnic_ib_qpn_sysfs_ops,
- .default_attrs = usnic_ib_qpn_default_attrs
+ .default_groups = usnic_ib_qpn_default_groups,
};
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- int err;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- err = device_create_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- if (err) {
- usnic_err("Failed to create device file %d for %s eith err %d",
- i, us_ibdev->ib_dev.name, err);
- return -EINVAL;
- }
- }
-
/* create kernel object for looking at individual QPs */
kobject_get(&us_ibdev->ib_dev.dev.kobj);
us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
@@ -305,12 +265,6 @@ int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- device_remove_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- }
-
kobject_put(us_ibdev->qpn_kobj);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
index 0d09b493cd02..b1f064cec850 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -26,4 +41,6 @@ void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
+extern const struct attribute_group usnic_attr_group;
+
#endif /* !USNIC_IB_SYSFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 7df43827cb29..ae5df96589d9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -15,18 +30,19 @@
* SOFTWARE.
*
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
+#include <rdma/uverbs_ioctl.h>
#include "usnic_abi.h"
#include "usnic_ib.h"
#include "usnic_common_util.h"
#include "usnic_ib_qp_grp.h"
+#include "usnic_ib_verbs.h"
#include "usnic_fwd.h"
#include "usnic_log.h"
#include "usnic_uiom.h"
@@ -34,9 +50,33 @@
#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
+const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
+ { /*USNIC_TRANSPORT_UNKNOWN*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_IPV4_UDP*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+};
+
static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
{
- *fw_ver = (u64) *fw_ver_str;
+ *fw_ver = *((u64 *)fw_ver_str);
}
static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
@@ -72,12 +112,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.bar_len = bar->len;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
@@ -86,12 +126,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.rq_idx[i] = chunk->res[i]->vnic_idx;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
@@ -100,12 +140,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.wq_idx[i] = chunk->res[i]->vnic_idx;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
@@ -119,83 +159,91 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (err) {
- usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
+ usnic_err("Failed to copy udata for %s",
+ dev_name(&us_ibdev->ib_dev.dev));
return err;
}
return 0;
}
-static struct usnic_ib_qp_grp*
-find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
- struct usnic_ib_pd *pd,
- struct usnic_transport_spec *trans_spec,
- struct usnic_vnic_res_spec *res_spec)
+static int
+find_free_vf_and_create_qp_grp(struct ib_qp *qp,
+ struct usnic_transport_spec *trans_spec,
+ struct usnic_vnic_res_spec *res_spec)
{
+ struct usnic_ib_dev *us_ibdev = to_usdev(qp->device);
+ struct usnic_ib_pd *pd = to_upd(qp->pd);
struct usnic_ib_vf *vf;
struct usnic_vnic *vnic;
- struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(qp);
struct device *dev, **dev_list;
- int i, found = 0;
+ int i, ret;
BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
if (list_empty(&us_ibdev->vf_dev_list)) {
usnic_info("No vfs to allocate\n");
- return NULL;
+ return -ENOMEM;
}
if (usnic_ib_share_vf) {
/* Try to find resouces on a used vf which is in pd */
dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
+ if (IS_ERR(dev_list))
+ return PTR_ERR(dev_list);
for (i = 0; dev_list[i]; i++) {
dev = dev_list[i];
- vf = pci_get_drvdata(to_pci_dev(dev));
- spin_lock(&vf->lock);
+ vf = dev_get_drvdata(dev);
+ mutex_lock(&vf->lock);
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
usnic_dbg("Found used vnic %s from %s\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
pci_name(usnic_vnic_get_pdev(
vnic)));
- found = 1;
- break;
+ ret = usnic_ib_qp_grp_create(qp_grp,
+ us_ibdev->ufdev,
+ vf, pd, res_spec,
+ trans_spec);
+
+ mutex_unlock(&vf->lock);
+ goto qp_grp_check;
}
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
usnic_uiom_free_dev_list(dev_list);
+ dev_list = NULL;
}
- if (!found) {
- /* Try to find resources on an unused vf */
- list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
- spin_lock(&vf->lock);
- vnic = vf->vnic;
- if (vf->qp_grp_ref_cnt == 0 &&
- usnic_vnic_check_room(vnic, res_spec) == 0) {
- found = 1;
- break;
- }
- spin_unlock(&vf->lock);
+ /* Try to find resources on an unused vf */
+ list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
+ mutex_lock(&vf->lock);
+ vnic = vf->vnic;
+ if (vf->qp_grp_ref_cnt == 0 &&
+ usnic_vnic_check_room(vnic, res_spec) == 0) {
+ ret = usnic_ib_qp_grp_create(qp_grp, us_ibdev->ufdev,
+ vf, pd, res_spec,
+ trans_spec);
+
+ mutex_unlock(&vf->lock);
+ goto qp_grp_check;
}
+ mutex_unlock(&vf->lock);
}
- if (!found) {
- usnic_info("No free qp grp found on %s\n",
- us_ibdev->ib_dev.name);
- return ERR_PTR(-ENOMEM);
- }
+ usnic_info("No free qp grp found on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ return -ENOMEM;
- qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
- trans_spec);
- spin_unlock(&vf->lock);
- if (IS_ERR_OR_NULL(qp_grp)) {
+qp_grp_check:
+ if (ret) {
usnic_err("Failed to allocate qp_grp\n");
- return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
+ if (usnic_ib_share_vf)
+ usnic_uiom_free_dev_list(dev_list);
}
-
- return qp_grp;
+ return ret;
}
static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
@@ -204,30 +252,9 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
WARN_ON(qp_grp->state != IB_QPS_RESET);
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
usnic_ib_qp_grp_destroy(qp_grp);
- spin_unlock(&vf->lock);
-}
-
-static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
- u8 *active_width)
-{
- if (speed <= 10000) {
- *active_width = IB_WIDTH_1X;
- *active_speed = IB_SPEED_FDR10;
- } else if (speed <= 20000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_DDR;
- } else if (speed <= 30000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_QDR;
- } else if (speed <= 40000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_FDR10;
- } else {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_EDR;
- }
+ mutex_unlock(&vf->lock);
}
static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
@@ -242,7 +269,7 @@ static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
/* Start of ib callback functions */
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -254,7 +281,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
union ib_gid gid;
struct ethtool_drvinfo info;
- struct ethtool_cmd cmd;
int qp_per_vf;
usnic_dbg("\n");
@@ -263,7 +289,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
mutex_lock(&us_ibdev->usdev_lock);
us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
- us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
memset(props, 0, sizeof(*props));
usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
&gid.raw[0]);
@@ -278,11 +303,12 @@ int usnic_ib_query_device(struct ib_device *ibdev,
qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
props->max_qp = qp_per_vf *
- atomic_read(&us_ibdev->vf_cnt.refcount);
+ kref_read(&us_ibdev->vf_cnt);
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_SYS_IMAGE_GUID;
+ props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK;
props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
- atomic_read(&us_ibdev->vf_cnt.refcount);
+ kref_read(&us_ibdev->vf_cnt);
props->max_pd = USNIC_UIOM_MAX_PD_CNT;
props->max_mr = USNIC_UIOM_MAX_MR_CNT;
props->local_ca_ack_delay = 0;
@@ -299,7 +325,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
props->max_mcast_grp = 0;
props->max_mcast_qp_attach = 0;
props->max_total_mcast_qp_attach = 0;
- props->max_map_per_fmr = 0;
/* Owned by Userspace
* max_qp_wr, max_sge, max_sge_rd, max_cqe */
mutex_unlock(&us_ibdev->usdev_lock);
@@ -307,17 +332,24 @@ int usnic_ib_query_device(struct ib_device *ibdev,
return 0;
}
-int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+int usnic_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
- struct ethtool_cmd cmd;
usnic_dbg("\n");
+ if (ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width))
+ return -EINVAL;
+
+ /*
+ * usdev_lock is acquired after (and not before) ib_get_eth_speed call
+ * because acquiring rtnl_lock in ib_get_eth_speed, while holding
+ * usdev_lock could lead to a deadlock.
+ */
mutex_lock(&us_ibdev->usdev_lock);
- us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
- memset(props, 0, sizeof(*props));
+ /* props being zeroed by the caller, avoid zeroing it here */
props->lid = 0;
props->lmc = 1;
@@ -326,22 +358,20 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
if (!us_ibdev->ufdev->link_up) {
props->state = IB_PORT_DOWN;
- props->phys_state = 3;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} else if (!us_ibdev->ufdev->inaddr) {
props->state = IB_PORT_INIT;
- props->phys_state = 4;
+ props->phys_state =
+ IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
} else {
props->state = IB_PORT_ACTIVE;
- props->phys_state = 5;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
}
props->port_cap_flags = 0;
props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
- eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
- &props->active_width);
props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
/* Userspace will adjust for hdrs */
@@ -390,7 +420,7 @@ err_out:
return err;
}
-int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
@@ -409,57 +439,31 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
-int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- if (index > 1)
- return -EINVAL;
-
- *pkey = 0xffff;
- return 0;
-}
-
-struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
- struct usnic_ib_pd *pd;
- void *umem_pd;
-
- usnic_dbg("\n");
+ struct usnic_ib_pd *pd = to_upd(ibpd);
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
- umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
- if (IS_ERR_OR_NULL(umem_pd)) {
- kfree(pd);
- return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
- }
+ pd->umem_pd = usnic_uiom_alloc_pd(ibpd->device->dev.parent);
+ if (IS_ERR(pd->umem_pd))
+ return PTR_ERR(pd->umem_pd);
- usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
- pd, context, ibdev->name);
- return &pd->ibpd;
+ return 0;
}
-int usnic_ib_dealloc_pd(struct ib_pd *pd)
+int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
- usnic_info("freeing domain 0x%p\n", pd);
-
usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
- kfree(pd);
return 0;
}
-struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+int usnic_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
int err;
struct usnic_ib_dev *us_ibdev;
- struct usnic_ib_qp_grp *qp_grp;
- struct usnic_ib_ucontext *ucontext;
+ struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(ibqp);
+ struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct usnic_ib_ucontext, ibucontext);
int cq_cnt;
struct usnic_vnic_res_spec res_spec;
struct usnic_ib_create_qp_cmd cmd;
@@ -467,30 +471,29 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
usnic_dbg("\n");
- ucontext = to_uucontext(pd->uobject->context);
- us_ibdev = to_usdev(pd->device);
+ us_ibdev = to_usdev(ibqp->device);
if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
+ return -EOPNOTSUPP;
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
usnic_err("%s: cannot copy udata for create_qp\n",
- us_ibdev->ib_dev.name);
- return ERR_PTR(-EINVAL);
+ dev_name(&us_ibdev->ib_dev.dev));
+ return -EINVAL;
}
err = create_qp_validate_user_data(cmd);
if (err) {
usnic_err("%s: Failed to validate user data\n",
- us_ibdev->ib_dev.name);
- return ERR_PTR(-EINVAL);
+ dev_name(&us_ibdev->ib_dev.dev));
+ return -EINVAL;
}
if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n",
- us_ibdev->ib_dev.name, init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
+ return -EOPNOTSUPP;
}
trans_spec = cmd.spec;
@@ -498,13 +501,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
res_spec = min_transport_spec[trans_spec.trans_type];
usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
- qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
- &trans_spec,
- &res_spec);
- if (IS_ERR_OR_NULL(qp_grp)) {
- err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
+ err = find_free_vf_and_create_qp_grp(ibqp, &trans_spec, &res_spec);
+ if (err)
goto out_release_mutex;
- }
err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
if (err) {
@@ -516,16 +515,16 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
usnic_ib_log_vf(qp_grp->vf);
mutex_unlock(&us_ibdev->usdev_lock);
- return &qp_grp->ibqp;
+ return 0;
out_release_qp_grp:
qp_grp_destroy(qp_grp);
out_release_mutex:
mutex_unlock(&us_ibdev->usdev_lock);
- return ERR_PTR(err);
+ return err;
}
-int usnic_ib_destroy_qp(struct ib_qp *qp)
+int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct usnic_ib_qp_grp *qp_grp;
struct usnic_ib_vf *vf;
@@ -554,53 +553,46 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int status;
usnic_dbg("\n");
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
qp_grp = to_uqp_grp(ibqp);
- /* TODO: Future Support All States */
mutex_lock(&qp_grp->vf->pf->usdev_lock);
- if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
- status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
- } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
- status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
- } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
- status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
+ if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
+ /* usnic devices only have one port */
+ status = -EINVAL;
+ goto out_unlock;
+ }
+ if (attr_mask & IB_QP_STATE) {
+ status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
} else {
- usnic_err("Unexpected combination mask: %u state: %u\n",
- attr_mask & IB_QP_STATE, attr->qp_state);
+ usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
status = -EINVAL;
}
+out_unlock:
mutex_unlock(&qp_grp->vf->pf->usdev_lock);
return status;
}
-struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
{
- struct ib_cq *cq;
-
- usnic_dbg("\n");
if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq)
- return ERR_PTR(-EBUSY);
+ return -EOPNOTSUPP;
- return cq;
+ return 0;
}
-int usnic_ib_destroy_cq(struct ib_cq *cq)
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
- usnic_dbg("\n");
- kfree(cq);
return 0;
}
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct usnic_ib_mr *mr;
@@ -609,9 +601,12 @@ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
virt_addr, length);
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (IS_ERR_OR_NULL(mr))
- return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
access_flags, 0);
@@ -628,48 +623,42 @@ err_free:
return ERR_PTR(err);
}
-int usnic_ib_dereg_mr(struct ib_mr *ibmr)
+int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct usnic_ib_mr *mr = to_umr(ibmr);
usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
- usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
+ usnic_uiom_reg_release(mr->umem);
kfree(mr);
return 0;
}
-struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
- struct usnic_ib_ucontext *context;
+ struct ib_device *ibdev = uctx->device;
+ struct usnic_ib_ucontext *context = to_ucontext(uctx);
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
usnic_dbg("\n");
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
INIT_LIST_HEAD(&context->qp_grp_list);
mutex_lock(&us_ibdev->usdev_lock);
list_add_tail(&context->link, &us_ibdev->ctx_list);
mutex_unlock(&us_ibdev->usdev_lock);
- return &context->ibucontext;
+ return 0;
}
-int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
usnic_dbg("\n");
mutex_lock(&us_ibdev->usdev_lock);
- BUG_ON(!list_empty(&context->qp_grp_list));
+ WARN_ON_ONCE(!list_empty(&context->qp_grp_list));
list_del(&context->link);
mutex_unlock(&us_ibdev->usdev_lock);
- kfree(context);
- return 0;
}
int usnic_ib_mmap(struct ib_ucontext *context,
@@ -687,7 +676,7 @@ int usnic_ib_mmap(struct ib_ucontext *context,
usnic_dbg("\n");
us_ibdev = to_usdev(context->device);
- vma->vm_flags |= VM_IO;
+ vm_flags_set(vma, VM_IO);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vfid = vma->vm_pgoff;
usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
@@ -722,55 +711,3 @@ int usnic_ib_mmap(struct ib_ucontext *context,
usnic_err("No VF %u found\n", vfid);
return -EINVAL;
}
-
-/* In ib callbacks section - Start of stub funcs */
-struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
-{
- usnic_dbg("\n");
- return ERR_PTR(-EPERM);
-}
-
-int usnic_ib_destroy_ah(struct ib_ah *ah)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
- struct ib_wc *wc)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_req_notify_cq(struct ib_cq *cq,
- enum ib_cq_notify_flags flags)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
-{
- usnic_dbg("\n");
- return ERR_PTR(-ENOMEM);
-}
-
-
-/* In ib callbacks section - End of stub funcs */
-/* End of ib callbacks section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 0bd04efa16f3..e3031ac32488 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -22,55 +37,34 @@
#include "usnic_ib.h"
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
int usnic_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw);
-int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+int usnic_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
-enum rdma_protocol_type
-usnic_ib_query_protocol(struct ib_device *device, u8 port_num);
int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
-int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey);
-struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int usnic_ib_dealloc_pd(struct ib_pd *pd);
-struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-int usnic_ib_destroy_qp(struct ib_qp *qp);
+int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int usnic_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
-struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int usnic_ib_destroy_cq(struct ib_cq *cq);
+int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
-int usnic_ib_dereg_mr(struct ib_mr *ibmr);
-struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata);
-int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
+int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma);
-struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr);
-int usnic_ib_destroy_ah(struct ib_ah *ah);
-int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
- struct ib_wc *wc);
-int usnic_ib_req_notify_cq(struct ib_cq *cq,
- enum ib_cq_notify_flags flags);
-struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc);
#endif /* !USNIC_IB_VERBS_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h
index 75777a66c684..183fcb6a952f 100644
--- a/drivers/infiniband/hw/usnic/usnic_log.h
+++ b/drivers/infiniband/hw/usnic/usnic_log.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
index ddef6f77a78c..dc37066900a5 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.c
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -17,7 +32,6 @@
*/
#include <linux/bitmap.h>
#include <linux/file.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <net/inet_sock.h>
@@ -106,7 +120,7 @@ void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
spin_lock(&roce_bitmap_lock);
if (!port_num) {
- usnic_err("Unreserved unvalid port num 0 for %s\n",
+ usnic_err("Unreserved invalid port num 0 for %s\n",
usnic_transport_to_str(type));
goto out_roce_custom;
}
@@ -159,14 +173,13 @@ void usnic_transport_put_socket(struct socket *sock)
int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
uint32_t *addr, uint16_t *port)
{
- int len;
int err;
struct sockaddr_in sock_addr;
err = sock->ops->getname(sock,
(struct sockaddr *)&sock_addr,
- &len, 0);
- if (err)
+ 0);
+ if (err < 0)
return err;
if (sock_addr.sin_family != AF_INET)
@@ -186,10 +199,8 @@ int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
int usnic_transport_init(void)
{
roce_bitmap = kzalloc(ROCE_BITMAP_SZ, GFP_KERNEL);
- if (!roce_bitmap) {
- usnic_err("Failed to allocate bit map");
+ if (!roce_bitmap)
return -ENOMEM;
- }
/* Do not ever allocate bit 0, hence set it here */
bitmap_set(roce_bitmap, 0, 1);
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h
index 7e5dc6d9f462..9a7a2d9755c0 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.h
+++ b/drivers/infiniband/hw/usnic/usnic_transport.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index cb2337f0532b..3fbf99757b11 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -7,7 +7,7 @@
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
@@ -34,43 +34,29 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <linux/workqueue.h>
#include <linux/list.h>
-#include <linux/pci.h>
+#include <rdma/ib_verbs.h>
#include "usnic_log.h"
#include "usnic_uiom.h"
#include "usnic_uiom_interval_tree.h"
-static struct workqueue_struct *usnic_uiom_wq;
-
#define USNIC_UIOM_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
-static void usnic_uiom_reg_account(struct work_struct *work)
-{
- struct usnic_uiom_reg *umem = container_of(work,
- struct usnic_uiom_reg, work);
-
- down_write(&umem->mm->mmap_sem);
- umem->mm->locked_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
-}
-
static int usnic_uiom_dma_fault(struct iommu_domain *domain,
struct device *dev,
unsigned long iova, int flags,
void *token)
{
- usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n",
dev_name(dev),
domain, iova, flags);
return -ENOSYS;
@@ -88,9 +74,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg);
pa = sg_phys(sg);
- if (dirty)
- set_page_dirty_lock(page);
- put_page(page);
+ unpin_user_pages_dirty_lock(&page, 1, dirty);
usnic_dbg("pa: %pa\n", &pa);
}
kfree(chunk);
@@ -98,8 +82,10 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
}
static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
- int dmasync, struct list_head *chunk_list)
+ int dmasync, struct usnic_uiom_reg *uiomr)
{
+ struct list_head *chunk_list = &uiomr->chunk_list;
+ unsigned int gup_flags = FOLL_LONGTERM;
struct page **page_list;
struct scatterlist *sg;
struct usnic_uiom_chunk *chunk;
@@ -110,12 +96,18 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int ret;
int off;
int i;
- int flags;
dma_addr_t pa;
- DEFINE_DMA_ATTRS(attrs);
+ struct mm_struct *mm;
- if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
if (!can_do_mlock())
return -EPERM;
@@ -128,9 +120,10 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
+ uiomr->owning_mm = mm = current->mm;
+ mmap_read_lock(mm);
- locked = npages + current->mm->locked_vm;
+ locked = atomic64_add_return(npages, &current->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -138,16 +131,16 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
goto out;
}
- flags = IOMMU_READ | IOMMU_CACHE;
- flags |= (writable) ? IOMMU_WRITE : 0;
+ if (writable)
+ gup_flags |= FOLL_WRITE;
cur_base = addr & PAGE_MASK;
ret = 0;
while (npages) {
- ret = get_user_pages(current, current->mm, cur_base,
- min_t(unsigned long, npages,
- PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ ret = pin_user_pages(cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof(struct page *)),
+ gup_flags, page_list);
if (ret < 0)
goto out;
@@ -156,9 +149,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
off = 0;
while (ret) {
- chunk = kmalloc(sizeof(*chunk) +
- sizeof(struct scatterlist) *
- min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
+ chunk = kmalloc(struct_size(chunk, page_list,
+ min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
GFP_KERNEL);
if (!chunk) {
ret = -ENOMEM;
@@ -184,12 +176,13 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
}
out:
- if (ret < 0)
+ if (ret < 0) {
usnic_uiom_put_pages(chunk_list, 0);
- else
- current->mm->locked_vm = locked;
+ atomic64_sub(npages, &current->mm->pinned_vm);
+ } else
+ mmgrab(uiomr->owning_mm);
- up_write(&current->mm->mmap_sem);
+ mmap_read_unlock(mm);
free_page((unsigned long) page_list);
return ret;
}
@@ -228,7 +221,7 @@ static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
vpn_last = vpn_start + npages - 1;
spin_lock(&pd->lock);
- usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
+ usnic_uiom_remove_interval(&pd->root, vpn_start,
vpn_last, &rm_intervals);
usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
@@ -284,7 +277,7 @@ iter_chunk:
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ size, flags, GFP_ATOMIC);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -301,7 +294,7 @@ iter_chunk:
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ size, flags, GFP_ATOMIC);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -369,7 +362,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
uiomr->pd = pd;
err = usnic_uiom_get_pages(addr, size, writable, dmasync,
- &uiomr->chunk_list);
+ uiomr);
if (err) {
usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
vpn_start, vpn_last, err);
@@ -380,7 +373,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
(writable) ? IOMMU_WRITE : 0,
IOMMU_WRITE,
- &pd->rb_root,
+ &pd->root,
&sorted_diff_intervals);
if (err) {
usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
@@ -396,7 +389,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
}
- err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
+ err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
(writable) ? IOMMU_WRITE : 0);
if (err) {
usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
@@ -416,53 +409,32 @@ out_put_intervals:
out_put_pages:
usnic_uiom_put_pages(&uiomr->chunk_list, 0);
spin_unlock(&pd->lock);
+ mmdrop(uiomr->owning_mm);
out_free_uiomr:
kfree(uiomr);
return ERR_PTR(err);
}
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
+static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
{
- struct mm_struct *mm;
- unsigned long diff;
-
- __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
-
- mm = get_task_mm(current);
- if (!mm) {
- kfree(uiomr);
- return;
- }
+ mmdrop(uiomr->owning_mm);
+ kfree(uiomr);
+}
- diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
+{
+ return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+}
- /*
- * We may be called with the mm's mmap_sem already held. This
- * can happen when a userspace munmap() is the call that drops
- * the last reference to our file and calls our release
- * method. If there are memory regions to destroy, we'll end
- * up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
- */
- if (closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
- uiomr->mm = mm;
- uiomr->diff = diff;
-
- queue_work(usnic_uiom_wq, &uiomr->work);
- return;
- }
- } else
- down_write(&mm->mmap_sem);
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
+{
+ __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
- current->mm->locked_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
- kfree(uiomr);
+ atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
+ __usnic_uiom_release_tail(uiomr);
}
-struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
+struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev)
{
struct usnic_uiom_pd *pd;
void *domain;
@@ -471,11 +443,11 @@ struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
if (!pd)
return ERR_PTR(-ENOMEM);
- pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
- if (!domain) {
+ pd->domain = domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain)) {
usnic_err("Failed to allocate IOMMU domain");
kfree(pd);
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(domain);
}
iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
@@ -506,7 +478,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
if (err)
goto out_free_dev;
- if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev));
err = -EINVAL;
@@ -579,25 +551,3 @@ void usnic_uiom_free_dev_list(struct device **devs)
{
kfree(devs);
}
-
-int usnic_uiom_init(char *drv_name)
-{
- if (!iommu_present(&pci_bus_type)) {
- usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
- return -EPERM;
- }
-
- usnic_uiom_wq = create_workqueue(drv_name);
- if (!usnic_uiom_wq) {
- usnic_err("Unable to alloc wq for drv %s\n", drv_name);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void usnic_uiom_fini(void)
-{
- flush_workqueue(usnic_uiom_wq);
- destroy_workqueue(usnic_uiom_wq);
-}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 70440996e8f2..70d51d919d12 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -24,6 +39,8 @@
#include "usnic_uiom_interval_tree.h"
+struct ib_ucontext;
+
#define USNIC_UIOM_READ (1)
#define USNIC_UIOM_WRITE (2)
@@ -40,7 +57,7 @@ struct usnic_uiom_dev {
struct usnic_uiom_pd {
struct iommu_domain *domain;
spinlock_t lock;
- struct rb_root rb_root;
+ struct rb_root_cached root;
struct list_head devs;
int dev_cnt;
};
@@ -54,17 +71,16 @@ struct usnic_uiom_reg {
int writable;
struct list_head chunk_list;
struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
+ struct mm_struct *owning_mm;
};
struct usnic_uiom_chunk {
struct list_head list;
int nents;
- struct scatterlist page_list[0];
+ struct scatterlist page_list[] __counted_by(nents);
};
-struct usnic_uiom_pd *usnic_uiom_alloc_pd(void);
+struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev);
void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd);
int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev);
void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd,
@@ -74,7 +90,5 @@ void usnic_uiom_free_dev_list(struct device **devs);
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
unsigned long addr, size_t size,
int access, int dmasync);
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
-int usnic_uiom_init(char *drv_name);
-void usnic_uiom_fini(void);
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr);
#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
index 3a4288e0fbac..29d71267af78 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -68,7 +83,8 @@ usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt,
return interval;
}
-static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
+static int interval_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
{
struct usnic_uiom_interval_node *node_a, *node_b;
@@ -85,9 +101,9 @@ static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
}
static void
-find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
- unsigned long last,
- struct list_head *list)
+find_intervals_intersection_sorted(struct rb_root_cached *root,
+ unsigned long start, unsigned long last,
+ struct list_head *list)
{
struct usnic_uiom_interval_node *node;
@@ -103,7 +119,7 @@ find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
int flags, int flag_mask,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct list_head *diff_set)
{
struct usnic_uiom_interval_node *interval, *tmp;
@@ -160,7 +176,7 @@ void usnic_uiom_put_interval_set(struct list_head *intervals)
kfree(interval);
}
-int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
+int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start,
unsigned long last, int flags)
{
struct usnic_uiom_interval_node *interval, *tmp;
@@ -231,8 +247,9 @@ err_out:
return err;
}
-void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
- unsigned long last, struct list_head *removed)
+void usnic_uiom_remove_interval(struct rb_root_cached *root,
+ unsigned long start, unsigned long last,
+ struct list_head *removed)
{
struct usnic_uiom_interval_node *interval;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
index d4f752e258fd..1d7fc3226bca 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -33,12 +48,12 @@ struct usnic_uiom_interval_node {
extern void
usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node,
- struct rb_root *root);
+ struct rb_root_cached *root);
extern void
usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node,
- struct rb_root *root);
+ struct rb_root_cached *root);
extern struct usnic_uiom_interval_node *
-usnic_uiom_interval_tree_iter_first(struct rb_root *root,
+usnic_uiom_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start,
unsigned long last);
extern struct usnic_uiom_interval_node *
@@ -48,7 +63,7 @@ usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node,
* Inserts {start...last} into {root}. If there are overlaps,
* nodes will be broken up and merged
*/
-int usnic_uiom_insert_interval(struct rb_root *root,
+int usnic_uiom_insert_interval(struct rb_root_cached *root,
unsigned long start, unsigned long last,
int flags);
/*
@@ -56,7 +71,7 @@ int usnic_uiom_insert_interval(struct rb_root *root,
* 'removed.' The caller is responsibile for freeing memory of nodes in
* 'removed.'
*/
-void usnic_uiom_remove_interval(struct rb_root *root,
+void usnic_uiom_remove_interval(struct rb_root_cached *root,
unsigned long start, unsigned long last,
struct list_head *removed);
/*
@@ -66,7 +81,7 @@ void usnic_uiom_remove_interval(struct rb_root *root,
int usnic_uiom_get_intervals_diff(unsigned long start,
unsigned long last, int flags,
int flag_mask,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct list_head *diff_set);
/* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */
void usnic_uiom_put_interval_set(struct list_head *intervals);
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
index 656b88c39eda..0c47f73aaed5 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.c
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -16,7 +31,6 @@
*
*/
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include "usnic_ib.h"
@@ -222,36 +236,33 @@ usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
struct usnic_vnic_res *res;
int i;
- if (usnic_vnic_res_free_cnt(vnic, type) < cnt || cnt < 1 || !owner)
+ if (usnic_vnic_res_free_cnt(vnic, type) < cnt || cnt < 0 || !owner)
return ERR_PTR(-EINVAL);
ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
- if (!ret) {
- usnic_err("Failed to allocate chunk for %s - Out of memory\n",
- usnic_vnic_pci_name(vnic));
+ if (!ret)
return ERR_PTR(-ENOMEM);
- }
- ret->res = kzalloc(sizeof(*(ret->res))*cnt, GFP_ATOMIC);
- if (!ret->res) {
- usnic_err("Failed to allocate resources for %s. Out of memory\n",
- usnic_vnic_pci_name(vnic));
- kfree(ret);
- return ERR_PTR(-ENOMEM);
- }
+ if (cnt > 0) {
+ ret->res = kcalloc(cnt, sizeof(*(ret->res)), GFP_ATOMIC);
+ if (!ret->res) {
+ kfree(ret);
+ return ERR_PTR(-ENOMEM);
+ }
- spin_lock(&vnic->res_lock);
- src = &vnic->chunks[type];
- for (i = 0; i < src->cnt && ret->cnt < cnt; i++) {
- res = src->res[i];
- if (!res->owner) {
- src->free_cnt--;
- res->owner = owner;
- ret->res[ret->cnt++] = res;
+ spin_lock(&vnic->res_lock);
+ src = &vnic->chunks[type];
+ for (i = 0; i < src->cnt && ret->cnt < cnt; i++) {
+ res = src->res[i];
+ if (!res->owner) {
+ src->free_cnt--;
+ res->owner = owner;
+ ret->res[ret->cnt++] = res;
+ }
}
- }
- spin_unlock(&vnic->res_lock);
+ spin_unlock(&vnic->res_lock);
+ }
ret->type = type;
ret->vnic = vnic;
WARN_ON(ret->cnt != cnt);
@@ -266,14 +277,16 @@ void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk)
int i;
struct usnic_vnic *vnic = chunk->vnic;
- spin_lock(&vnic->res_lock);
- while ((i = --chunk->cnt) >= 0) {
- res = chunk->res[i];
- chunk->res[i] = NULL;
- res->owner = NULL;
- vnic->chunks[res->type].free_cnt++;
+ if (chunk->cnt > 0) {
+ spin_lock(&vnic->res_lock);
+ while ((i = --chunk->cnt) >= 0) {
+ res = chunk->res[i];
+ chunk->res[i] = NULL;
+ res->owner = NULL;
+ vnic->chunks[res->type].free_cnt++;
+ }
+ spin_unlock(&vnic->res_lock);
}
- spin_unlock(&vnic->res_lock);
kfree(chunk->res);
kfree(chunk);
@@ -292,11 +305,13 @@ static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
struct usnic_vnic_res *res;
cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
- if (cnt < 1)
+ if (cnt < 1) {
+ usnic_err("Wrong res count with cnt %d\n", cnt);
return -EINVAL;
+ }
chunk->cnt = chunk->free_cnt = cnt;
- chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
+ chunk->res = kcalloc(cnt, sizeof(*(chunk->res)), GFP_KERNEL);
if (!chunk->res)
return -ENOMEM;
@@ -365,12 +380,8 @@ static int usnic_vnic_discover_resources(struct pci_dev *pdev,
res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
err = usnic_vnic_alloc_res_chunk(vnic, res_type,
&vnic->chunks[res_type]);
- if (err) {
- usnic_err("Failed to alloc res %s with err %d\n",
- usnic_vnic_res_type_to_str(res_type),
- err);
+ if (err)
goto out_clean_chunks;
- }
}
return 0;
@@ -435,11 +446,8 @@ struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
}
vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
- if (!vnic) {
- usnic_err("Failed to alloc vnic for %s - out of memory\n",
- pci_name(pdev));
+ if (!vnic)
return ERR_PTR(-ENOMEM);
- }
spin_lock_init(&vnic->res_lock);
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h
index 14d931a8829d..a08423e478af 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.h
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.h
@@ -1,9 +1,24 @@
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Kconfig b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
new file mode 100644
index 000000000000..a11e892e5e77
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_VMWARE_PVRDMA
+ tristate "VMware Paravirtualized RDMA Driver"
+ depends on NETDEVICES && ETHERNET && PCI && INET && VMXNET3
+ help
+ This driver provides low-level support for VMware Paravirtual
+ RDMA adapter. It interacts with the VMXNet3 driver to provide
+ Ethernet capabilities.
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile
new file mode 100644
index 000000000000..0f5fa4e8cfd0
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
+
+vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_srq.o pvrdma_verbs.o
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
new file mode 100644
index 000000000000..763ddc6f25d1
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_H__
+#define __PVRDMA_H__
+
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+
+#include "pvrdma_ring.h"
+#include "pvrdma_dev_api.h"
+#include "pvrdma_verbs.h"
+
+/* NOT the same as BIT_MASK(). */
+#define PVRDMA_MASK(n) ((n << 1) - 1)
+
+/*
+ * VMware PVRDMA PCI device id.
+ */
+#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
+
+#define PVRDMA_NUM_RING_PAGES 4
+#define PVRDMA_QP_NUM_HEADER_PAGES 1
+
+struct pvrdma_dev;
+
+struct pvrdma_page_dir {
+ dma_addr_t dir_dma;
+ u64 *dir;
+ int ntables;
+ u64 **tables;
+ u64 npages;
+ void **pages;
+};
+
+struct pvrdma_cq {
+ struct ib_cq ibcq;
+ int offset;
+ spinlock_t cq_lock; /* Poll lock. */
+ struct pvrdma_uar_map *uar;
+ struct ib_umem *umem;
+ struct pvrdma_ring_state *ring_state;
+ struct pvrdma_page_dir pdir;
+ u32 cq_handle;
+ bool is_kernel;
+ refcount_t refcnt;
+ struct completion free;
+};
+
+struct pvrdma_id_table {
+ u32 last;
+ u32 top;
+ u32 max;
+ u32 mask;
+ spinlock_t lock; /* Table lock. */
+ unsigned long *table;
+};
+
+struct pvrdma_uar_map {
+ unsigned long pfn;
+ void __iomem *map;
+ int index;
+};
+
+struct pvrdma_uar_table {
+ struct pvrdma_id_table tbl;
+ int size;
+};
+
+struct pvrdma_ucontext {
+ struct ib_ucontext ibucontext;
+ struct pvrdma_dev *dev;
+ struct pvrdma_uar_map uar;
+ u64 ctx_handle;
+};
+
+struct pvrdma_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+ u32 pd_handle;
+ int privileged;
+};
+
+struct pvrdma_mr {
+ u32 mr_handle;
+ u64 iova;
+ u64 size;
+};
+
+struct pvrdma_user_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct pvrdma_mr mmr;
+ struct pvrdma_page_dir pdir;
+ u64 *pages;
+ u32 npages;
+ u32 max_pages;
+ u32 page_shift;
+};
+
+struct pvrdma_wq {
+ struct pvrdma_ring *ring;
+ spinlock_t lock; /* Work queue lock. */
+ int wqe_cnt;
+ int wqe_size;
+ int max_sg;
+ int offset;
+};
+
+struct pvrdma_ah {
+ struct ib_ah ibah;
+ struct pvrdma_av av;
+};
+
+struct pvrdma_srq {
+ struct ib_srq ibsrq;
+ int offset;
+ spinlock_t lock; /* SRQ lock. */
+ int wqe_cnt;
+ int wqe_size;
+ int max_gs;
+ struct ib_umem *umem;
+ struct pvrdma_ring_state *ring;
+ struct pvrdma_page_dir pdir;
+ u32 srq_handle;
+ int npages;
+ refcount_t refcnt;
+ struct completion free;
+};
+
+struct pvrdma_qp {
+ struct ib_qp ibqp;
+ u32 qp_handle;
+ u32 qkey;
+ struct pvrdma_wq sq;
+ struct pvrdma_wq rq;
+ struct ib_umem *rumem;
+ struct ib_umem *sumem;
+ struct pvrdma_page_dir pdir;
+ struct pvrdma_srq *srq;
+ int npages;
+ int npages_send;
+ int npages_recv;
+ u32 flags;
+ u8 port;
+ u8 state;
+ bool is_kernel;
+ struct mutex mutex; /* QP state mutex. */
+ refcount_t refcnt;
+ struct completion free;
+};
+
+struct pvrdma_dev {
+ /* PCI device-related information. */
+ struct ib_device ib_dev;
+ struct pci_dev *pdev;
+ void __iomem *regs;
+ struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
+ dma_addr_t dsrbase; /* Shared region base address */
+ void *cmd_slot;
+ void *resp_slot;
+ unsigned long flags;
+ struct list_head device_link;
+ unsigned int dsr_version;
+
+ /* Locking and interrupt information. */
+ spinlock_t cmd_lock; /* Command lock. */
+ struct semaphore cmd_sema;
+ struct completion cmd_done;
+ unsigned int nr_vectors;
+
+ /* RDMA-related device information. */
+ union ib_gid *sgid_tbl;
+ struct pvrdma_ring_state *async_ring_state;
+ struct pvrdma_page_dir async_pdir;
+ struct pvrdma_ring_state *cq_ring_state;
+ struct pvrdma_page_dir cq_pdir;
+ struct pvrdma_cq **cq_tbl;
+ spinlock_t cq_tbl_lock;
+ struct pvrdma_srq **srq_tbl;
+ spinlock_t srq_tbl_lock;
+ struct pvrdma_qp **qp_tbl;
+ spinlock_t qp_tbl_lock;
+ struct pvrdma_uar_table uar_table;
+ struct pvrdma_uar_map driver_uar;
+ __be64 sys_image_guid;
+ spinlock_t desc_lock; /* Device modification lock. */
+ u32 port_cap_mask;
+ struct mutex port_mutex; /* Port modification mutex. */
+ bool ib_active;
+ atomic_t num_qps;
+ atomic_t num_cqs;
+ atomic_t num_srqs;
+ atomic_t num_pds;
+ atomic_t num_ahs;
+
+ /* Network device information. */
+ struct net_device *netdev;
+ struct notifier_block nb_netdev;
+};
+
+struct pvrdma_netdevice_work {
+ struct work_struct work;
+ struct net_device *event_netdev;
+ unsigned long event;
+};
+
+static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct pvrdma_dev, ib_dev);
+}
+
+static inline struct
+pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
+}
+
+static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct pvrdma_pd, ibpd);
+}
+
+static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct pvrdma_cq, ibcq);
+}
+
+static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct pvrdma_srq, ibsrq);
+}
+
+static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct pvrdma_user_mr, ibmr);
+}
+
+static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct pvrdma_qp, ibqp);
+}
+
+static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct pvrdma_ah, ibah);
+}
+
+static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
+{
+ writel(cpu_to_le32(val), dev->regs + reg);
+}
+
+static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
+{
+ return le32_to_cpu(readl(dev->regs + reg));
+}
+
+static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
+{
+ writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
+}
+
+static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
+{
+ writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
+}
+
+static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
+ u64 offset)
+{
+ return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
+}
+
+static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
+{
+ return (enum pvrdma_mtu)mtu;
+}
+
+static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
+{
+ return (enum ib_mtu)mtu;
+}
+
+static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
+ enum ib_port_state state)
+{
+ return (enum pvrdma_port_state)state;
+}
+
+static inline enum ib_port_state pvrdma_port_state_to_ib(
+ enum pvrdma_port_state state)
+{
+ return (enum ib_port_state)state;
+}
+
+static inline int pvrdma_port_cap_flags_to_ib(int flags)
+{
+ return flags;
+}
+
+static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
+ enum ib_port_width width)
+{
+ return (enum pvrdma_port_width)width;
+}
+
+static inline enum ib_port_width pvrdma_port_width_to_ib(
+ enum pvrdma_port_width width)
+{
+ return (enum ib_port_width)width;
+}
+
+static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
+ enum ib_port_speed speed)
+{
+ return (enum pvrdma_port_speed)speed;
+}
+
+static inline enum ib_port_speed pvrdma_port_speed_to_ib(
+ enum pvrdma_port_speed speed)
+{
+ return (enum ib_port_speed)speed;
+}
+
+static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
+{
+ return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
+}
+
+static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
+ enum ib_mig_state state)
+{
+ return (enum pvrdma_mig_state)state;
+}
+
+static inline enum ib_mig_state pvrdma_mig_state_to_ib(
+ enum pvrdma_mig_state state)
+{
+ return (enum ib_mig_state)state;
+}
+
+static inline int ib_access_flags_to_pvrdma(int flags)
+{
+ return flags;
+}
+
+static inline int pvrdma_access_flags_to_ib(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
+}
+
+static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
+{
+ return (enum pvrdma_qp_type)type;
+}
+
+static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
+{
+ return (enum pvrdma_qp_state)state;
+}
+
+static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
+{
+ return (enum ib_qp_state)state;
+}
+
+static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
+{
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ return PVRDMA_WR_RDMA_WRITE;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
+ case IB_WR_SEND:
+ return PVRDMA_WR_SEND;
+ case IB_WR_SEND_WITH_IMM:
+ return PVRDMA_WR_SEND_WITH_IMM;
+ case IB_WR_RDMA_READ:
+ return PVRDMA_WR_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_LSO:
+ return PVRDMA_WR_LSO;
+ case IB_WR_SEND_WITH_INV:
+ return PVRDMA_WR_SEND_WITH_INV;
+ case IB_WR_RDMA_READ_WITH_INV:
+ return PVRDMA_WR_RDMA_READ_WITH_INV;
+ case IB_WR_LOCAL_INV:
+ return PVRDMA_WR_LOCAL_INV;
+ case IB_WR_REG_MR:
+ return PVRDMA_WR_FAST_REG_MR;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_REG_MR_INTEGRITY:
+ return PVRDMA_WR_REG_SIG_MR;
+ default:
+ return PVRDMA_WR_ERROR;
+ }
+}
+
+static inline enum ib_wc_status pvrdma_wc_status_to_ib(
+ enum pvrdma_wc_status status)
+{
+ return (enum ib_wc_status)status;
+}
+
+static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
+{
+ switch (opcode) {
+ case PVRDMA_WC_SEND:
+ return IB_WC_SEND;
+ case PVRDMA_WC_RDMA_WRITE:
+ return IB_WC_RDMA_WRITE;
+ case PVRDMA_WC_RDMA_READ:
+ return IB_WC_RDMA_READ;
+ case PVRDMA_WC_COMP_SWAP:
+ return IB_WC_COMP_SWAP;
+ case PVRDMA_WC_FETCH_ADD:
+ return IB_WC_FETCH_ADD;
+ case PVRDMA_WC_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ case PVRDMA_WC_FAST_REG_MR:
+ return IB_WC_REG_MR;
+ case PVRDMA_WC_MASKED_COMP_SWAP:
+ return IB_WC_MASKED_COMP_SWAP;
+ case PVRDMA_WC_MASKED_FETCH_ADD:
+ return IB_WC_MASKED_FETCH_ADD;
+ case PVRDMA_WC_RECV:
+ return IB_WC_RECV;
+ case PVRDMA_WC_RECV_RDMA_WITH_IMM:
+ return IB_WC_RECV_RDMA_WITH_IMM;
+ default:
+ return IB_WC_SEND;
+ }
+}
+
+static inline int pvrdma_wc_flags_to_ib(int flags)
+{
+ return flags;
+}
+
+static inline int ib_send_flags_to_pvrdma(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
+}
+
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+ switch (type) {
+ case PVRDMA_NETWORK_ROCE_V1:
+ return RDMA_NETWORK_ROCE_V1;
+ case PVRDMA_NETWORK_IPV4:
+ return RDMA_NETWORK_IPV4;
+ case PVRDMA_NETWORK_IPV6:
+ return RDMA_NETWORK_IPV6;
+ default:
+ return RDMA_NETWORK_IPV6;
+ }
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
+ const struct pvrdma_qp_cap *src);
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
+ const struct ib_qp_cap *src);
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+ const struct pvrdma_global_route *src);
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+ const struct ib_global_route *src);
+void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
+ const struct pvrdma_ah_attr *src);
+void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+ const struct rdma_ah_attr *src);
+u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type);
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev);
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+ u64 npages, bool alloc_pages);
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir);
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+ dma_addr_t daddr);
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+ struct ib_umem *umem, u64 offset);
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+ u64 *page_list, int num_pages);
+
+int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp, unsigned resp_code);
+
+#endif /* __PVRDMA_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
new file mode 100644
index 000000000000..4a78c537d8a1
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+
+#include "pvrdma.h"
+
+#define PVRDMA_CMD_TIMEOUT 10000 /* ms */
+
+static inline int pvrdma_cmd_recv(struct pvrdma_dev *dev,
+ union pvrdma_cmd_resp *resp,
+ unsigned resp_code)
+{
+ int err;
+
+ dev_dbg(&dev->pdev->dev, "receive response from device\n");
+
+ err = wait_for_completion_interruptible_timeout(&dev->cmd_done,
+ msecs_to_jiffies(PVRDMA_CMD_TIMEOUT));
+ if (err == 0 || err == -ERESTARTSYS) {
+ dev_warn(&dev->pdev->dev,
+ "completion timeout or interrupted\n");
+ return -ETIMEDOUT;
+ }
+
+ spin_lock(&dev->cmd_lock);
+ memcpy(resp, dev->resp_slot, sizeof(*resp));
+ spin_unlock(&dev->cmd_lock);
+
+ if (resp->hdr.ack != resp_code) {
+ dev_warn(&dev->pdev->dev,
+ "unknown response %#x expected %#x\n",
+ resp->hdr.ack, resp_code);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *resp, unsigned resp_code)
+{
+ int err;
+
+ dev_dbg(&dev->pdev->dev, "post request to device\n");
+
+ /* Serializiation */
+ down(&dev->cmd_sema);
+
+ BUILD_BUG_ON(sizeof(union pvrdma_cmd_req) !=
+ sizeof(struct pvrdma_cmd_modify_qp));
+
+ spin_lock(&dev->cmd_lock);
+ memcpy(dev->cmd_slot, req, sizeof(*req));
+ spin_unlock(&dev->cmd_lock);
+
+ init_completion(&dev->cmd_done);
+ pvrdma_write_reg(dev, PVRDMA_REG_REQUEST, 0);
+
+ /* Make sure the request is written before reading status. */
+ mb();
+
+ err = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+ if (err == 0) {
+ if (resp != NULL)
+ err = pvrdma_cmd_recv(dev, resp, resp_code);
+ } else {
+ dev_warn(&dev->pdev->dev,
+ "failed to write request error reg: %d\n", err);
+ err = -EFAULT;
+ }
+
+ up(&dev->cmd_sema);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
new file mode 100644
index 000000000000..b3df6eb9b8ef
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_req_notify_cq - request notification for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: notification flags
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct pvrdma_dev *dev = to_vdev(ibcq->device);
+ struct pvrdma_cq *cq = to_vcq(ibcq);
+ u32 val = cq->cq_handle;
+ unsigned long flags;
+ int has_data = 0;
+
+ val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ pvrdma_write_uar_cq(dev, val);
+
+ if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ unsigned int head;
+
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (unlikely(has_data == PVRDMA_INVALID_IDX))
+ dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return has_data;
+}
+
+/**
+ * pvrdma_create_cq - create completion queue
+ * @ibcq: Allocated CQ
+ * @attr: completion queue attributes
+ * @attrs: bundle
+ *
+ * @return: 0 on success
+ */
+int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
+ int entries = attr->cqe;
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ struct pvrdma_cq *cq = to_vcq(ibcq);
+ int ret;
+ int npages;
+ unsigned long flags;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
+ struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
+ struct pvrdma_create_cq_resp cq_resp = {};
+ struct pvrdma_create_cq ucmd;
+ struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct pvrdma_ucontext, ibucontext);
+
+ BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ entries = roundup_pow_of_two(entries);
+ if (entries < 1 || entries > dev->dsr->caps.max_cqe)
+ return -EINVAL;
+
+ if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
+ return -ENOMEM;
+
+ cq->ibcq.cqe = entries;
+ cq->is_kernel = !udata;
+
+ if (!cq->is_kernel) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_cq;
+ }
+
+ cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->umem)) {
+ ret = PTR_ERR(cq->umem);
+ goto err_cq;
+ }
+
+ npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
+ } else {
+ /* One extra page for shared ring state */
+ npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
+ PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /* Skip header page. */
+ cq->offset = PAGE_SIZE;
+ }
+
+ if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in completion queue\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ /* Ring state is always the first page. Set in library for user cq. */
+ if (cq->is_kernel)
+ cq->ring_state = cq->pdir.pages[0];
+ else
+ pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
+
+ refcount_set(&cq->refcnt, 1);
+ init_completion(&cq->free);
+ spin_lock_init(&cq->cq_lock);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
+ cmd->nchunks = npages;
+ cmd->ctx_handle = context ? context->ctx_handle : 0;
+ cmd->cqe = entries;
+ cmd->pdir_dma = cq->pdir.dir_dma;
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create completion queue, error: %d\n", ret);
+ goto err_page_dir;
+ }
+
+ cq->ibcq.cqe = resp->cqe;
+ cq->cq_handle = resp->cq_handle;
+ cq_resp.cqn = resp->cq_handle;
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (!cq->is_kernel) {
+ cq->uar = &context->uar;
+
+ /* Copy udata back. */
+ if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ pvrdma_destroy_cq(&cq->ibcq, udata);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+
+err_page_dir:
+ pvrdma_page_dir_cleanup(dev, &cq->pdir);
+err_umem:
+ ib_umem_release(cq->umem);
+err_cq:
+ atomic_dec(&dev->num_cqs);
+ return ret;
+}
+
+static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
+ wait_for_completion(&cq->free);
+
+ ib_umem_release(cq->umem);
+
+ pvrdma_page_dir_cleanup(dev, &cq->pdir);
+}
+
+/**
+ * pvrdma_destroy_cq - destroy completion queue
+ * @cq: the completion queue to destroy.
+ * @udata: user data or null for kernel object
+ */
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+{
+ struct pvrdma_cq *vcq = to_vcq(cq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
+ struct pvrdma_dev *dev = to_vdev(cq->device);
+ unsigned long flags;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
+ cmd->cq_handle = vcq->cq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "could not destroy completion queue, error: %d\n",
+ ret);
+
+ /* free cq's resources */
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ dev->cq_tbl[vcq->cq_handle] = NULL;
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ pvrdma_free_cq(dev, vcq);
+ atomic_dec(&dev->num_cqs);
+ return 0;
+}
+
+static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
+{
+ return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
+ &cq->pdir,
+ cq->offset +
+ sizeof(struct pvrdma_cqe) * i);
+}
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
+{
+ unsigned int head;
+ int has_data;
+
+ if (!cq->is_kernel)
+ return;
+
+ /* Lock held */
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (unlikely(has_data > 0)) {
+ int items;
+ int curr;
+ int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
+ cq->ibcq.cqe);
+ struct pvrdma_cqe *cqe;
+ struct pvrdma_cqe *curr_cqe;
+
+ items = (tail > head) ? (tail - head) :
+ (cq->ibcq.cqe - head + tail);
+ curr = --tail;
+ while (items-- > 0) {
+ if (curr < 0)
+ curr = cq->ibcq.cqe - 1;
+ if (tail < 0)
+ tail = cq->ibcq.cqe - 1;
+ curr_cqe = get_cqe(cq, curr);
+ if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
+ if (curr != tail) {
+ cqe = get_cqe(cq, tail);
+ *cqe = *curr_cqe;
+ }
+ tail--;
+ } else {
+ pvrdma_idx_ring_inc(
+ &cq->ring_state->rx.cons_head,
+ cq->ibcq.cqe);
+ }
+ curr--;
+ }
+ }
+}
+
+static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
+ int has_data;
+ unsigned int head;
+ bool tried = false;
+ struct pvrdma_cqe *cqe;
+
+retry:
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (has_data == 0) {
+ if (tried)
+ return -EAGAIN;
+
+ pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
+
+ tried = true;
+ goto retry;
+ } else if (has_data == PVRDMA_INVALID_IDX) {
+ dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+ return -EAGAIN;
+ }
+
+ cqe = get_cqe(cq, head);
+
+ /* Ensure cqe is valid. */
+ rmb();
+ if (dev->qp_tbl[cqe->qp & 0xffff])
+ *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
+ else
+ return -EAGAIN;
+
+ wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
+ wc->status = pvrdma_wc_status_to_ib(cqe->status);
+ wc->wr_id = cqe->wr_id;
+ wc->qp = &(*cur_qp)->ibqp;
+ wc->byte_len = cqe->byte_len;
+ wc->ex.imm_data = cqe->imm_data;
+ wc->src_qp = cqe->src_qp;
+ wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
+ wc->pkey_index = cqe->pkey_index;
+ wc->slid = cqe->slid;
+ wc->sl = cqe->sl;
+ wc->dlid_path_bits = cqe->dlid_path_bits;
+ wc->port_num = cqe->port_num;
+ wc->vendor_err = cqe->vendor_err;
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
+
+ /* Update shared ring state */
+ pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
+
+ return 0;
+}
+
+/**
+ * pvrdma_poll_cq - poll for work completion queue entries
+ * @ibcq: completion queue
+ * @num_entries: the maximum number of entries
+ * @wc: pointer to work completion array
+ *
+ * @return: number of polled completion entries
+ */
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct pvrdma_cq *cq = to_vcq(ibcq);
+ struct pvrdma_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+
+ if (num_entries < 1 || wc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
+ break;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ /* Ensure we do not return errors from poll_cq */
+ return npolled;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
new file mode 100644
index 000000000000..86a6c054ea26
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_DEV_API_H__
+#define __PVRDMA_DEV_API_H__
+
+#include <linux/types.h>
+
+#include "pvrdma_verbs.h"
+
+/*
+ * PVRDMA version macros. Some new features require updates to PVRDMA_VERSION.
+ * These macros allow us to check for different features if necessary.
+ */
+
+#define PVRDMA_ROCEV1_VERSION 17
+#define PVRDMA_ROCEV2_VERSION 18
+#define PVRDMA_PPN64_VERSION 19
+#define PVRDMA_QPHANDLE_VERSION 20
+#define PVRDMA_VERSION PVRDMA_QPHANDLE_VERSION
+
+#define PVRDMA_BOARD_ID 1
+#define PVRDMA_REV_ID 1
+
+/*
+ * Masks and accessors for page directory, which is a two-level lookup:
+ * page directory -> page table -> page. Only one directory for now, but we
+ * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
+ * gigabyte for memory regions and so forth.
+ */
+
+#define PVRDMA_PDIR_SHIFT 18
+#define PVRDMA_PTABLE_SHIFT 9
+#define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
+#define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
+#define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff)
+#define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512)
+#define PVRDMA_MAX_FAST_REG_PAGES 128
+
+/*
+ * Max MSI-X vectors.
+ */
+
+#define PVRDMA_MAX_INTERRUPTS 3
+
+/* Register offsets within PCI resource on BAR1. */
+#define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */
+#define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */
+#define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */
+#define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */
+#define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */
+#define PVRDMA_REG_ERR 0x14 /* R: Device error. */
+#define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */
+#define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */
+#define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */
+#define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */
+
+/* Object flags. */
+#define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */
+#define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */
+#define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */
+#define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */
+
+/*
+ * Atomic operation capability (masked versions are extended atomic
+ * operations.
+ */
+
+#define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */
+#define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */
+#define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */
+#define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */
+
+/*
+ * Base Memory Management Extension flags to support Fast Reg Memory Regions
+ * and Fast Reg Work Requests. Each flag represents a verb operation and we
+ * must support all of them to qualify for the BMME device cap.
+ */
+
+#define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */
+#define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */
+#define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */
+
+/*
+ * GID types. The interpretation of the gid_types bit field in the device
+ * capabilities will depend on the device mode. For now, the device only
+ * supports RoCE as mode, so only the different GID types for RoCE are
+ * defined.
+ */
+
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
+
+/*
+ * Version checks. This checks whether each version supports specific
+ * capabilities from the device.
+ */
+
+#define PVRDMA_IS_VERSION17(_dev) \
+ (_dev->dsr_version == PVRDMA_ROCEV1_VERSION && \
+ _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
+
+#define PVRDMA_IS_VERSION18(_dev) \
+ (_dev->dsr_version >= PVRDMA_ROCEV2_VERSION && \
+ (_dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1 || \
+ _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)) \
+
+#define PVRDMA_SUPPORTED(_dev) \
+ ((_dev->dsr->caps.mode == PVRDMA_DEVICE_MODE_ROCE) && \
+ (PVRDMA_IS_VERSION17(_dev) || PVRDMA_IS_VERSION18(_dev)))
+
+/*
+ * Get capability values based on device version.
+ */
+
+#define PVRDMA_GET_CAP(_dev, _old_val, _val) \
+ ((PVRDMA_IS_VERSION18(_dev)) ? _val : _old_val)
+
+enum pvrdma_pci_resource {
+ PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
+ PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
+ PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */
+ PVRDMA_PCI_RESOURCE_LAST, /* Last. */
+};
+
+enum pvrdma_device_ctl {
+ PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
+ PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
+ PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
+};
+
+enum pvrdma_intr_vector {
+ PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */
+ PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */
+ PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */
+ /* Additional CQ notification vectors. */
+};
+
+enum pvrdma_intr_cause {
+ PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE),
+ PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC),
+ PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
+};
+
+enum pvrdma_gos_bits {
+ PVRDMA_GOS_BITS_UNK, /* Unknown. */
+ PVRDMA_GOS_BITS_32, /* 32-bit. */
+ PVRDMA_GOS_BITS_64, /* 64-bit. */
+};
+
+enum pvrdma_gos_type {
+ PVRDMA_GOS_TYPE_UNK, /* Unknown. */
+ PVRDMA_GOS_TYPE_LINUX, /* Linux. */
+};
+
+enum pvrdma_device_mode {
+ PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */
+ PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */
+ PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */
+};
+
+struct pvrdma_gos_info {
+ u32 gos_bits:2; /* W: PVRDMA_GOS_BITS_ */
+ u32 gos_type:4; /* W: PVRDMA_GOS_TYPE_ */
+ u32 gos_ver:16; /* W: Guest OS version. */
+ u32 gos_misc:10; /* W: Other. */
+ u32 pad; /* Pad to 8-byte alignment. */
+};
+
+struct pvrdma_device_caps {
+ u64 fw_ver; /* R: Query device. */
+ __be64 node_guid;
+ __be64 sys_image_guid;
+ u64 max_mr_size;
+ u64 page_size_cap;
+ u64 atomic_arg_sizes; /* EX verbs. */
+ u32 ex_comp_mask; /* EX verbs. */
+ u32 device_cap_flags2; /* EX verbs. */
+ u32 max_fa_bit_boundary; /* EX verbs. */
+ u32 log_max_atomic_inline_arg; /* EX verbs. */
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u32 max_qp;
+ u32 max_qp_wr;
+ u32 device_cap_flags;
+ u32 max_sge;
+ u32 max_sge_rd;
+ u32 max_cq;
+ u32 max_cqe;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_qp_rd_atom;
+ u32 max_ee_rd_atom;
+ u32 max_res_rd_atom;
+ u32 max_qp_init_rd_atom;
+ u32 max_ee_init_rd_atom;
+ u32 max_ee;
+ u32 max_rdd;
+ u32 max_mw;
+ u32 max_raw_ipv6_qp;
+ u32 max_raw_ethy_qp;
+ u32 max_mcast_grp;
+ u32 max_mcast_qp_attach;
+ u32 max_total_mcast_qp_attach;
+ u32 max_ah;
+ u32 max_fmr;
+ u32 max_map_per_fmr;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u32 max_srq_sge;
+ u32 max_uar;
+ u32 gid_tbl_len;
+ u16 max_pkeys;
+ u8 local_ca_ack_delay;
+ u8 phys_port_cnt;
+ u8 mode; /* PVRDMA_DEVICE_MODE_ */
+ u8 atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
+ u8 bmme_flags; /* FRWR Mem Mgmt Extensions */
+ u8 gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
+ u32 max_fast_reg_page_list_len;
+};
+
+struct pvrdma_ring_page_info {
+ u32 num_pages; /* Num pages incl. header. */
+ u32 reserved; /* Reserved. */
+ u64 pdir_dma; /* Page directory PA. */
+};
+
+#pragma pack(push, 1)
+
+struct pvrdma_device_shared_region {
+ u32 driver_version; /* W: Driver version. */
+ u32 pad; /* Pad to 8-byte align. */
+ struct pvrdma_gos_info gos_info; /* W: Guest OS information. */
+ u64 cmd_slot_dma; /* W: Command slot address. */
+ u64 resp_slot_dma; /* W: Response slot address. */
+ struct pvrdma_ring_page_info async_ring_pages;
+ /* W: Async ring page info. */
+ struct pvrdma_ring_page_info cq_ring_pages;
+ /* W: CQ ring page info. */
+ union {
+ u32 uar_pfn; /* W: UAR pageframe. */
+ u64 uar_pfn64; /* W: 64-bit UAR page frame. */
+ };
+ struct pvrdma_device_caps caps; /* R: Device capabilities. */
+};
+
+#pragma pack(pop)
+
+/* Event types. Currently a 1:1 mapping with enum ib_event. */
+enum pvrdma_eqe_type {
+ PVRDMA_EVENT_CQ_ERR,
+ PVRDMA_EVENT_QP_FATAL,
+ PVRDMA_EVENT_QP_REQ_ERR,
+ PVRDMA_EVENT_QP_ACCESS_ERR,
+ PVRDMA_EVENT_COMM_EST,
+ PVRDMA_EVENT_SQ_DRAINED,
+ PVRDMA_EVENT_PATH_MIG,
+ PVRDMA_EVENT_PATH_MIG_ERR,
+ PVRDMA_EVENT_DEVICE_FATAL,
+ PVRDMA_EVENT_PORT_ACTIVE,
+ PVRDMA_EVENT_PORT_ERR,
+ PVRDMA_EVENT_LID_CHANGE,
+ PVRDMA_EVENT_PKEY_CHANGE,
+ PVRDMA_EVENT_SM_CHANGE,
+ PVRDMA_EVENT_SRQ_ERR,
+ PVRDMA_EVENT_SRQ_LIMIT_REACHED,
+ PVRDMA_EVENT_QP_LAST_WQE_REACHED,
+ PVRDMA_EVENT_CLIENT_REREGISTER,
+ PVRDMA_EVENT_GID_CHANGE,
+};
+
+/* Event queue element. */
+struct pvrdma_eqe {
+ u32 type; /* Event type. */
+ u32 info; /* Handle, other. */
+};
+
+/* CQ notification queue element. */
+struct pvrdma_cqne {
+ u32 info; /* Handle */
+};
+
+enum {
+ PVRDMA_CMD_FIRST,
+ PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
+ PVRDMA_CMD_QUERY_PKEY,
+ PVRDMA_CMD_CREATE_PD,
+ PVRDMA_CMD_DESTROY_PD,
+ PVRDMA_CMD_CREATE_MR,
+ PVRDMA_CMD_DESTROY_MR,
+ PVRDMA_CMD_CREATE_CQ,
+ PVRDMA_CMD_RESIZE_CQ,
+ PVRDMA_CMD_DESTROY_CQ,
+ PVRDMA_CMD_CREATE_QP,
+ PVRDMA_CMD_MODIFY_QP,
+ PVRDMA_CMD_QUERY_QP,
+ PVRDMA_CMD_DESTROY_QP,
+ PVRDMA_CMD_CREATE_UC,
+ PVRDMA_CMD_DESTROY_UC,
+ PVRDMA_CMD_CREATE_BIND,
+ PVRDMA_CMD_DESTROY_BIND,
+ PVRDMA_CMD_CREATE_SRQ,
+ PVRDMA_CMD_MODIFY_SRQ,
+ PVRDMA_CMD_QUERY_SRQ,
+ PVRDMA_CMD_DESTROY_SRQ,
+ PVRDMA_CMD_MAX,
+};
+
+enum {
+ PVRDMA_CMD_FIRST_RESP = (1 << 31),
+ PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
+ PVRDMA_CMD_QUERY_PKEY_RESP,
+ PVRDMA_CMD_CREATE_PD_RESP,
+ PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
+ PVRDMA_CMD_CREATE_MR_RESP,
+ PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
+ PVRDMA_CMD_CREATE_CQ_RESP,
+ PVRDMA_CMD_RESIZE_CQ_RESP,
+ PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
+ PVRDMA_CMD_CREATE_QP_RESP,
+ PVRDMA_CMD_MODIFY_QP_RESP,
+ PVRDMA_CMD_QUERY_QP_RESP,
+ PVRDMA_CMD_DESTROY_QP_RESP,
+ PVRDMA_CMD_CREATE_UC_RESP,
+ PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
+ PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
+ PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
+ PVRDMA_CMD_CREATE_SRQ_RESP,
+ PVRDMA_CMD_MODIFY_SRQ_RESP,
+ PVRDMA_CMD_QUERY_SRQ_RESP,
+ PVRDMA_CMD_DESTROY_SRQ_RESP,
+ PVRDMA_CMD_MAX_RESP,
+};
+
+struct pvrdma_cmd_hdr {
+ u64 response; /* Key for response lookup. */
+ u32 cmd; /* PVRDMA_CMD_ */
+ u32 reserved; /* Reserved. */
+};
+
+struct pvrdma_cmd_resp_hdr {
+ u64 response; /* From cmd hdr. */
+ u32 ack; /* PVRDMA_CMD_XXX_RESP */
+ u8 err; /* Error. */
+ u8 reserved[3]; /* Reserved. */
+};
+
+struct pvrdma_cmd_query_port {
+ struct pvrdma_cmd_hdr hdr;
+ u8 port_num;
+ u8 reserved[7];
+};
+
+struct pvrdma_cmd_query_port_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_port_attr attrs;
+};
+
+struct pvrdma_cmd_query_pkey {
+ struct pvrdma_cmd_hdr hdr;
+ u8 port_num;
+ u8 index;
+ u8 reserved[6];
+};
+
+struct pvrdma_cmd_query_pkey_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u16 pkey;
+ u8 reserved[6];
+};
+
+struct pvrdma_cmd_create_uc {
+ struct pvrdma_cmd_hdr hdr;
+ union {
+ u32 pfn; /* UAR page frame number */
+ u64 pfn64; /* 64-bit UAR page frame number */
+ };
+};
+
+struct pvrdma_cmd_create_uc_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_uc {
+ struct pvrdma_cmd_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd {
+ struct pvrdma_cmd_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 pd_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_pd {
+ struct pvrdma_cmd_hdr hdr;
+ u32 pd_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_mr {
+ struct pvrdma_cmd_hdr hdr;
+ u64 start;
+ u64 length;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 access_flags;
+ u32 flags;
+ u32 nchunks;
+};
+
+struct pvrdma_cmd_create_mr_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 mr_handle;
+ u32 lkey;
+ u32 rkey;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_mr {
+ struct pvrdma_cmd_hdr hdr;
+ u32 mr_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 ctx_handle;
+ u32 cqe;
+ u32 nchunks;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 cq_handle;
+ u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 cq_handle;
+ u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 cqe;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 cq_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 nchunks;
+ struct pvrdma_srq_attr attrs;
+ u8 srq_type;
+ u8 reserved[7];
+};
+
+struct pvrdma_cmd_create_srq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 srqn;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_modify_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u32 attr_mask;
+ struct pvrdma_srq_attr attrs;
+};
+
+struct pvrdma_cmd_query_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_query_srq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_srq_attr attrs;
+};
+
+struct pvrdma_cmd_destroy_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 send_cq_handle;
+ u32 recv_cq_handle;
+ u32 srq_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+ u32 lkey;
+ u32 access_flags;
+ u16 total_chunks;
+ u16 send_chunks;
+ u16 max_atomic_arg;
+ u8 sq_sig_all;
+ u8 qp_type;
+ u8 is_srq;
+ u8 reserved[3];
+};
+
+struct pvrdma_cmd_create_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
+struct pvrdma_cmd_create_qp_resp_v2 {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 qp_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
+struct pvrdma_cmd_modify_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u32 attr_mask;
+ struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_query_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u32 attr_mask;
+};
+
+struct pvrdma_cmd_query_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_destroy_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 events_reported;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_bind {
+ struct pvrdma_cmd_hdr hdr;
+ u32 mtu;
+ u32 vlan;
+ u32 index;
+ u8 new_gid[16];
+ u8 gid_type;
+ u8 reserved[3];
+};
+
+struct pvrdma_cmd_destroy_bind {
+ struct pvrdma_cmd_hdr hdr;
+ u32 index;
+ u8 dest_gid[16];
+ u8 reserved[4];
+};
+
+union pvrdma_cmd_req {
+ struct pvrdma_cmd_hdr hdr;
+ struct pvrdma_cmd_query_port query_port;
+ struct pvrdma_cmd_query_pkey query_pkey;
+ struct pvrdma_cmd_create_uc create_uc;
+ struct pvrdma_cmd_destroy_uc destroy_uc;
+ struct pvrdma_cmd_create_pd create_pd;
+ struct pvrdma_cmd_destroy_pd destroy_pd;
+ struct pvrdma_cmd_create_mr create_mr;
+ struct pvrdma_cmd_destroy_mr destroy_mr;
+ struct pvrdma_cmd_create_cq create_cq;
+ struct pvrdma_cmd_resize_cq resize_cq;
+ struct pvrdma_cmd_destroy_cq destroy_cq;
+ struct pvrdma_cmd_create_qp create_qp;
+ struct pvrdma_cmd_modify_qp modify_qp;
+ struct pvrdma_cmd_query_qp query_qp;
+ struct pvrdma_cmd_destroy_qp destroy_qp;
+ struct pvrdma_cmd_create_bind create_bind;
+ struct pvrdma_cmd_destroy_bind destroy_bind;
+ struct pvrdma_cmd_create_srq create_srq;
+ struct pvrdma_cmd_modify_srq modify_srq;
+ struct pvrdma_cmd_query_srq query_srq;
+ struct pvrdma_cmd_destroy_srq destroy_srq;
+};
+
+union pvrdma_cmd_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_cmd_query_port_resp query_port_resp;
+ struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
+ struct pvrdma_cmd_create_uc_resp create_uc_resp;
+ struct pvrdma_cmd_create_pd_resp create_pd_resp;
+ struct pvrdma_cmd_create_mr_resp create_mr_resp;
+ struct pvrdma_cmd_create_cq_resp create_cq_resp;
+ struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
+ struct pvrdma_cmd_create_qp_resp create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 create_qp_resp_v2;
+ struct pvrdma_cmd_query_qp_resp query_qp_resp;
+ struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
+ struct pvrdma_cmd_create_srq_resp create_srq_resp;
+ struct pvrdma_cmd_query_srq_resp query_srq_resp;
+};
+
+#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
new file mode 100644
index 000000000000..9a4de962e947
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev)
+{
+ u32 num = dev->dsr->caps.max_uar;
+ u32 mask = num - 1;
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+ if (!is_power_of_2(num))
+ return -EINVAL;
+
+ tbl->last = 0;
+ tbl->top = 0;
+ tbl->max = num;
+ tbl->mask = mask;
+ spin_lock_init(&tbl->lock);
+ tbl->table = bitmap_zalloc(num, GFP_KERNEL);
+ if (!tbl->table)
+ return -ENOMEM;
+
+ /* 0th UAR is taken by the device. */
+ __set_bit(0, tbl->table);
+
+ return 0;
+}
+
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
+{
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+ bitmap_free(tbl->table);
+}
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+ struct pvrdma_id_table *tbl;
+ unsigned long flags;
+ u32 obj;
+
+ tbl = &dev->uar_table.tbl;
+
+ spin_lock_irqsave(&tbl->lock, flags);
+ obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last);
+ if (obj >= tbl->max) {
+ tbl->top = (tbl->top + tbl->max) & tbl->mask;
+ obj = find_first_zero_bit(tbl->table, tbl->max);
+ }
+
+ if (obj >= tbl->max) {
+ spin_unlock_irqrestore(&tbl->lock, flags);
+ return -ENOMEM;
+ }
+
+ __set_bit(obj, tbl->table);
+ obj |= tbl->top;
+
+ spin_unlock_irqrestore(&tbl->lock, flags);
+
+ uar->index = obj;
+ uar->pfn = (pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+ PAGE_SHIFT) + uar->index;
+
+ return 0;
+}
+
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+ unsigned long flags;
+ u32 obj;
+
+ obj = uar->index & (tbl->max - 1);
+ spin_lock_irqsave(&tbl->lock, flags);
+ __clear_bit(obj, tbl->table);
+ tbl->last = min(tbl->last, obj);
+ tbl->top = (tbl->top + tbl->max) & tbl->mask;
+ spin_unlock_irqrestore(&tbl->lock, flags);
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
new file mode 100644
index 000000000000..1664d1d7d969
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -0,0 +1,1160 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <net/addrconf.h>
+
+#include "pvrdma.h"
+
+#define DRV_NAME "vmw_pvrdma"
+#define DRV_VERSION "1.0.1.0-k"
+
+static DEFINE_MUTEX(pvrdma_device_list_lock);
+static LIST_HEAD(pvrdma_device_list);
+static struct workqueue_struct *event_wq;
+
+static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
+static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", PVRDMA_REV_ID);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", PVRDMA_BOARD_ID);
+}
+static DEVICE_ATTR_RO(board_id);
+
+static struct attribute *pvrdma_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL,
+};
+
+static const struct attribute_group pvrdma_attr_group = {
+ .attrs = pvrdma_class_attributes,
+};
+
+static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
+{
+ struct pvrdma_dev *dev =
+ container_of(device, struct pvrdma_dev, ib_dev);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
+ (int) (dev->dsr->caps.fw_ver >> 32),
+ (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->dsr->caps.fw_ver & 0xffff);
+}
+
+static int pvrdma_init_device(struct pvrdma_dev *dev)
+{
+ /* Initialize some device related stuff */
+ spin_lock_init(&dev->cmd_lock);
+ sema_init(&dev->cmd_sema, 1);
+ atomic_set(&dev->num_qps, 0);
+ atomic_set(&dev->num_srqs, 0);
+ atomic_set(&dev->num_cqs, 0);
+ atomic_set(&dev->num_pds, 0);
+ atomic_set(&dev->num_ahs, 0);
+
+ return 0;
+}
+
+static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ struct ib_port_attr attr;
+ int err;
+
+ if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
+ else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ return 0;
+}
+
+static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
+ enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+ ib_event.device = &dev->ib_dev;
+ ib_event.element.port_num = port;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+}
+
+static void pvrdma_report_event_handle(struct ib_device *ibdev,
+ struct net_device *ndev,
+ unsigned long event)
+{
+ struct pvrdma_dev *dev = container_of(ibdev, struct pvrdma_dev, ib_dev);
+
+ switch (event) {
+ case NETDEV_DOWN:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_UP:
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL,
+ PVRDMA_DEVICE_CTL_UNQUIESCE);
+
+ mb();
+
+ if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
+ dev_err(&dev->pdev->dev,
+ "failed to activate device during link up\n");
+ else
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static const struct ib_device_ops pvrdma_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_VMW_PVRDMA,
+ .uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION,
+
+ .add_gid = pvrdma_add_gid,
+ .alloc_mr = pvrdma_alloc_mr,
+ .alloc_pd = pvrdma_alloc_pd,
+ .alloc_ucontext = pvrdma_alloc_ucontext,
+ .create_ah = pvrdma_create_ah,
+ .create_cq = pvrdma_create_cq,
+ .create_qp = pvrdma_create_qp,
+ .dealloc_pd = pvrdma_dealloc_pd,
+ .dealloc_ucontext = pvrdma_dealloc_ucontext,
+ .del_gid = pvrdma_del_gid,
+ .dereg_mr = pvrdma_dereg_mr,
+ .destroy_ah = pvrdma_destroy_ah,
+ .destroy_cq = pvrdma_destroy_cq,
+ .destroy_qp = pvrdma_destroy_qp,
+ .device_group = &pvrdma_attr_group,
+ .get_dev_fw_str = pvrdma_get_fw_ver_str,
+ .get_dma_mr = pvrdma_get_dma_mr,
+ .get_link_layer = pvrdma_port_link_layer,
+ .get_port_immutable = pvrdma_port_immutable,
+ .map_mr_sg = pvrdma_map_mr_sg,
+ .mmap = pvrdma_mmap,
+ .modify_port = pvrdma_modify_port,
+ .modify_qp = pvrdma_modify_qp,
+ .poll_cq = pvrdma_poll_cq,
+ .post_recv = pvrdma_post_recv,
+ .post_send = pvrdma_post_send,
+ .query_device = pvrdma_query_device,
+ .query_gid = pvrdma_query_gid,
+ .query_pkey = pvrdma_query_pkey,
+ .query_port = pvrdma_query_port,
+ .query_qp = pvrdma_query_qp,
+ .reg_user_mr = pvrdma_reg_user_mr,
+ .req_notify_cq = pvrdma_req_notify_cq,
+ .report_port_event = pvrdma_report_event_handle,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, pvrdma_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops pvrdma_dev_srq_ops = {
+ .create_srq = pvrdma_create_srq,
+ .destroy_srq = pvrdma_destroy_srq,
+ .modify_srq = pvrdma_modify_srq,
+ .query_srq = pvrdma_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
+};
+
+static int pvrdma_register_device(struct pvrdma_dev *dev)
+{
+ int ret = -1;
+
+ dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
+ dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
+ dev->flags = 0;
+ dev->ib_dev.num_comp_vectors = 1;
+ dev->ib_dev.dev.parent = &dev->pdev->dev;
+
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
+
+ ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops);
+
+ mutex_init(&dev->port_mutex);
+ spin_lock_init(&dev->desc_lock);
+
+ dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
+ GFP_KERNEL);
+ if (!dev->cq_tbl)
+ return ret;
+ spin_lock_init(&dev->cq_tbl_lock);
+
+ dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
+ GFP_KERNEL);
+ if (!dev->qp_tbl)
+ goto err_cq_free;
+ spin_lock_init(&dev->qp_tbl_lock);
+
+ /* Check if SRQ is supported by backend */
+ if (dev->dsr->caps.max_srq) {
+ ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops);
+
+ dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
+ sizeof(struct pvrdma_srq *),
+ GFP_KERNEL);
+ if (!dev->srq_tbl)
+ goto err_qp_free;
+ }
+ ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
+ if (ret)
+ goto err_srq_free;
+ spin_lock_init(&dev->srq_tbl_lock);
+
+ ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev);
+ if (ret)
+ goto err_srq_free;
+
+ dev->ib_active = true;
+
+ return 0;
+
+err_srq_free:
+ kfree(dev->srq_tbl);
+err_qp_free:
+ kfree(dev->qp_tbl);
+err_cq_free:
+ kfree(dev->cq_tbl);
+
+ return ret;
+}
+
+static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
+{
+ u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
+ struct pvrdma_dev *dev = dev_id;
+
+ dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
+
+ if (!dev->pdev->msix_enabled) {
+ /* Legacy intr */
+ icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
+ if (icr == 0)
+ return IRQ_NONE;
+ }
+
+ if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
+ complete(&dev->cmd_done);
+
+ return IRQ_HANDLED;
+}
+
+static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
+{
+ struct pvrdma_qp *qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
+ if (qp)
+ refcount_inc(&qp->refcnt);
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ if (qp && qp->ibqp.event_handler) {
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct ib_event e;
+
+ e.device = ibqp->device;
+ e.element.qp = ibqp;
+ e.event = type; /* 1:1 mapping for now. */
+ ibqp->event_handler(&e, ibqp->qp_context);
+ }
+ if (qp) {
+ if (refcount_dec_and_test(&qp->refcnt))
+ complete(&qp->free);
+ }
+}
+
+static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
+{
+ struct pvrdma_cq *cq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
+ if (cq)
+ refcount_inc(&cq->refcnt);
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (cq && cq->ibcq.event_handler) {
+ struct ib_cq *ibcq = &cq->ibcq;
+ struct ib_event e;
+
+ e.device = ibcq->device;
+ e.element.cq = ibcq;
+ e.event = type; /* 1:1 mapping for now. */
+ ibcq->event_handler(&e, ibcq->cq_context);
+ }
+ if (cq) {
+ if (refcount_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
+ }
+}
+
+static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
+{
+ struct pvrdma_srq *srq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ if (dev->srq_tbl)
+ srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
+ else
+ srq = NULL;
+ if (srq)
+ refcount_inc(&srq->refcnt);
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ if (srq && srq->ibsrq.event_handler) {
+ struct ib_srq *ibsrq = &srq->ibsrq;
+ struct ib_event e;
+
+ e.device = ibsrq->device;
+ e.element.srq = ibsrq;
+ e.event = type; /* 1:1 mapping for now. */
+ ibsrq->event_handler(&e, ibsrq->srq_context);
+ }
+ if (srq) {
+ if (refcount_dec_and_test(&srq->refcnt))
+ complete(&srq->free);
+ }
+}
+
+static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
+{
+ if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
+ dev_warn(&dev->pdev->dev, "event on port %d\n", port);
+ return;
+ }
+
+ pvrdma_dispatch_event(dev, port, type);
+}
+
+static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
+{
+ return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
+ &dev->async_pdir,
+ PAGE_SIZE +
+ sizeof(struct pvrdma_eqe) * i);
+}
+
+static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
+{
+ struct pvrdma_dev *dev = dev_id;
+ struct pvrdma_ring *ring = &dev->async_ring_state->rx;
+ int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
+ PAGE_SIZE / sizeof(struct pvrdma_eqe);
+ unsigned int head;
+
+ dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
+
+ /*
+ * Don't process events until the IB device is registered. Otherwise
+ * we'll try to ib_dispatch_event() on an invalid device.
+ */
+ if (!dev->ib_active)
+ return IRQ_HANDLED;
+
+ while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+ struct pvrdma_eqe *eqe;
+
+ eqe = get_eqe(dev, head);
+
+ switch (eqe->type) {
+ case PVRDMA_EVENT_QP_FATAL:
+ case PVRDMA_EVENT_QP_REQ_ERR:
+ case PVRDMA_EVENT_QP_ACCESS_ERR:
+ case PVRDMA_EVENT_COMM_EST:
+ case PVRDMA_EVENT_SQ_DRAINED:
+ case PVRDMA_EVENT_PATH_MIG:
+ case PVRDMA_EVENT_PATH_MIG_ERR:
+ case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
+ pvrdma_qp_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_CQ_ERR:
+ pvrdma_cq_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_SRQ_ERR:
+ case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
+ pvrdma_srq_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_PORT_ACTIVE:
+ case PVRDMA_EVENT_PORT_ERR:
+ case PVRDMA_EVENT_LID_CHANGE:
+ case PVRDMA_EVENT_PKEY_CHANGE:
+ case PVRDMA_EVENT_SM_CHANGE:
+ case PVRDMA_EVENT_CLIENT_REREGISTER:
+ case PVRDMA_EVENT_GID_CHANGE:
+ pvrdma_dev_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_DEVICE_FATAL:
+ pvrdma_dev_event(dev, 1, eqe->type);
+ break;
+
+ default:
+ break;
+ }
+
+ pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
+ unsigned int i)
+{
+ return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
+ &dev->cq_pdir,
+ PAGE_SIZE +
+ sizeof(struct pvrdma_cqne) * i);
+}
+
+static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
+{
+ struct pvrdma_dev *dev = dev_id;
+ struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
+ int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
+ sizeof(struct pvrdma_cqne);
+ unsigned int head;
+
+ dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
+
+ while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+ struct pvrdma_cqne *cqne;
+ struct pvrdma_cq *cq;
+
+ cqne = get_cqne(dev, head);
+ spin_lock(&dev->cq_tbl_lock);
+ cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
+ if (cq)
+ refcount_inc(&cq->refcnt);
+ spin_unlock(&dev->cq_tbl_lock);
+
+ if (cq && cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ if (cq) {
+ if (refcount_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
+ }
+ pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pvrdma_free_irq(struct pvrdma_dev *dev)
+{
+ int i;
+
+ dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
+ for (i = 0; i < dev->nr_vectors; i++)
+ free_irq(pci_irq_vector(dev->pdev, i), dev);
+}
+
+static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
+{
+ dev_dbg(&dev->pdev->dev, "enable interrupts\n");
+ pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
+}
+
+static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
+{
+ dev_dbg(&dev->pdev->dev, "disable interrupts\n");
+ pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
+}
+
+static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int ret = 0, i;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ ret = pci_alloc_irq_vectors(pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_INTX);
+ if (ret < 0)
+ return ret;
+ }
+ dev->nr_vectors = ret;
+
+ ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
+ pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt 0\n");
+ goto out_free_vectors;
+ }
+
+ for (i = 1; i < dev->nr_vectors; i++) {
+ ret = request_irq(pci_irq_vector(dev->pdev, i),
+ i == 1 ? pvrdma_intr1_handler :
+ pvrdma_intrx_handler,
+ 0, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt %d\n", i);
+ goto free_irqs;
+ }
+ }
+
+ return 0;
+
+free_irqs:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(dev->pdev, i), dev);
+out_free_vectors:
+ pci_free_irq_vectors(pdev);
+ return ret;
+}
+
+static void pvrdma_free_slots(struct pvrdma_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (dev->resp_slot)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
+ dev->dsr->resp_slot_dma);
+ if (dev->cmd_slot)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
+ dev->dsr->cmd_slot_dma);
+}
+
+static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
+ const union ib_gid *gid,
+ u8 gid_type,
+ int index)
+{
+ int ret;
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
+
+ if (!dev->sgid_tbl) {
+ dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(cmd_bind, 0, sizeof(*cmd_bind));
+ cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
+ memcpy(cmd_bind->new_gid, gid->raw, 16);
+ cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
+ cmd_bind->vlan = 0xfff;
+ cmd_bind->index = index;
+ cmd_bind->gid_type = gid_type;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create binding, error: %d\n", ret);
+ return -EFAULT;
+ }
+ memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
+ return 0;
+}
+
+static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct pvrdma_dev *dev = to_vdev(attr->device);
+
+ return pvrdma_add_gid_at_index(dev, &attr->gid,
+ ib_gid_type_to_pvrdma(attr->gid_type),
+ attr->index);
+}
+
+static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
+{
+ int ret;
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
+
+ /* Update sgid table. */
+ if (!dev->sgid_tbl) {
+ dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(cmd_dest, 0, sizeof(*cmd_dest));
+ cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
+ memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
+ cmd_dest->index = index;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not destroy binding, error: %d\n", ret);
+ return ret;
+ }
+ memset(&dev->sgid_tbl[index], 0, 16);
+ return 0;
+}
+
+static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct pvrdma_dev *dev = to_vdev(attr->device);
+
+ dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
+ attr->index, dev->netdev->name);
+
+ return pvrdma_del_gid_at_index(dev, attr->index);
+}
+
+static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
+ struct net_device *ndev,
+ unsigned long event)
+{
+ struct pci_dev *pdev_net;
+ unsigned int slot;
+
+ switch (event) {
+ case NETDEV_REBOOT:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_UNREGISTER:
+ ib_device_set_netdev(&dev->ib_dev, NULL, 1);
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ break;
+ case NETDEV_REGISTER:
+ /* vmxnet3 will have same bus, slot. But func will be 0 */
+ slot = PCI_SLOT(dev->pdev->devfn);
+ pdev_net = pci_get_slot(dev->pdev->bus,
+ PCI_DEVFN(slot, 0));
+ if ((dev->netdev == NULL) &&
+ (pci_get_drvdata(pdev_net) == ndev)) {
+ /* this is our netdev */
+ ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+ dev->netdev = ndev;
+ dev_hold(ndev);
+ }
+ pci_dev_put(pdev_net);
+ break;
+
+ default:
+ dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
+ event, dev_name(&dev->ib_dev.dev));
+ break;
+ }
+}
+
+static void pvrdma_netdevice_event_work(struct work_struct *work)
+{
+ struct pvrdma_netdevice_work *netdev_work;
+ struct pvrdma_dev *dev;
+
+ netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_for_each_entry(dev, &pvrdma_device_list, device_link) {
+ if ((netdev_work->event == NETDEV_REGISTER) ||
+ (dev->netdev == netdev_work->event_netdev)) {
+ pvrdma_netdevice_event_handle(dev,
+ netdev_work->event_netdev,
+ netdev_work->event);
+ break;
+ }
+ }
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ kfree(netdev_work);
+}
+
+static int pvrdma_netdevice_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
+ struct pvrdma_netdevice_work *netdev_work;
+
+ netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
+ if (!netdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
+ netdev_work->event_netdev = event_netdev;
+ netdev_work->event = event;
+ queue_work(event_wq, &netdev_work->work);
+
+ return NOTIFY_DONE;
+}
+
+static int pvrdma_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pci_dev *pdev_net;
+ struct pvrdma_dev *dev;
+ int ret;
+ unsigned long start;
+ unsigned long len;
+ dma_addr_t slot_dma = 0;
+
+ dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
+
+ /* Allocate zero-out device */
+ dev = ib_alloc_device(pvrdma_dev, ib_dev);
+ if (!dev) {
+ dev_err(&pdev->dev, "failed to allocate IB device\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_add(&dev->device_link, &pvrdma_device_list);
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ ret = pvrdma_init_device(dev);
+ if (ret)
+ goto err_free_device;
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_free_device;
+ }
+
+ dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
+ pci_resource_flags(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+ (unsigned long long)pci_resource_start(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
+ pci_resource_flags(pdev, 1));
+ dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+ (unsigned long long)pci_resource_len(pdev, 1));
+ dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+ (unsigned long long)pci_resource_start(pdev, 1));
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
+ ret = -ENOMEM;
+ goto err_disable_pdev;
+ }
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot request PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ /* Enable 64-Bit DMA */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
+ goto err_free_resource;
+ }
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+ pci_set_master(pdev);
+
+ /* Map register space */
+ start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+ len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+ dev->regs = ioremap(start, len);
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "register mapping failed\n");
+ ret = -ENOMEM;
+ goto err_free_resource;
+ }
+
+ /* Setup per-device UAR. */
+ dev->driver_uar.index = 0;
+ dev->driver_uar.pfn =
+ pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+ PAGE_SHIFT;
+ dev->driver_uar.map =
+ ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!dev->driver_uar.map) {
+ dev_err(&pdev->dev, "failed to remap UAR pages\n");
+ ret = -ENOMEM;
+ goto err_unmap_regs;
+ }
+
+ dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
+ dev_info(&pdev->dev, "device version %d, driver version %d\n",
+ dev->dsr_version, PVRDMA_VERSION);
+
+ dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
+ if (!dev->dsr) {
+ dev_err(&pdev->dev, "failed to allocate shared region\n");
+ ret = -ENOMEM;
+ goto err_uar_unmap;
+ }
+
+ /* Setup the shared region */
+ dev->dsr->driver_version = PVRDMA_VERSION;
+ dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
+ PVRDMA_GOS_BITS_32 :
+ PVRDMA_GOS_BITS_64;
+ dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
+ dev->dsr->gos_info.gos_ver = 1;
+
+ if (dev->dsr_version < PVRDMA_PPN64_VERSION)
+ dev->dsr->uar_pfn = dev->driver_uar.pfn;
+ else
+ dev->dsr->uar_pfn64 = dev->driver_uar.pfn;
+
+ /* Command slot. */
+ dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &slot_dma, GFP_KERNEL);
+ if (!dev->cmd_slot) {
+ ret = -ENOMEM;
+ goto err_free_dsr;
+ }
+
+ dev->dsr->cmd_slot_dma = (u64)slot_dma;
+
+ /* Response slot. */
+ dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &slot_dma, GFP_KERNEL);
+ if (!dev->resp_slot) {
+ ret = -ENOMEM;
+ goto err_free_slots;
+ }
+
+ dev->dsr->resp_slot_dma = (u64)slot_dma;
+
+ /* Async event ring */
+ dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
+ ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
+ dev->dsr->async_ring_pages.num_pages, true);
+ if (ret)
+ goto err_free_slots;
+ dev->async_ring_state = dev->async_pdir.pages[0];
+ dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
+
+ /* CQ notification ring */
+ dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
+ ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
+ dev->dsr->cq_ring_pages.num_pages, true);
+ if (ret)
+ goto err_free_async_ring;
+ dev->cq_ring_state = dev->cq_pdir.pages[0];
+ dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
+
+ /*
+ * Write the PA of the shared region to the device. The writes must be
+ * ordered such that the high bits are written last. When the writes
+ * complete, the device will have filled out the capabilities.
+ */
+
+ pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
+ pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
+ (u32)((u64)(dev->dsrbase) >> 32));
+
+ /* Make sure the write is complete before reading status. */
+ mb();
+
+ /* The driver supports RoCE V1 and V2. */
+ if (!PVRDMA_SUPPORTED(dev)) {
+ dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
+ ret = -EFAULT;
+ goto err_free_cq_ring;
+ }
+
+ /* Paired vmxnet3 will have same bus, slot. But func will be 0 */
+ pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+ if (!pdev_net) {
+ dev_err(&pdev->dev, "failed to find paired net device\n");
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
+ pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
+ dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
+ pci_dev_put(pdev_net);
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ dev->netdev = pci_get_drvdata(pdev_net);
+ pci_dev_put(pdev_net);
+ if (!dev->netdev) {
+ dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+ dev_hold(dev->netdev);
+
+ dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
+
+ /* Interrupt setup */
+ ret = pvrdma_alloc_intrs(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to allocate interrupts\n");
+ ret = -ENOMEM;
+ goto err_free_cq_ring;
+ }
+
+ /* Allocate UAR table. */
+ ret = pvrdma_uar_table_init(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to allocate UAR table\n");
+ ret = -ENOMEM;
+ goto err_free_intrs;
+ }
+
+ /* Allocate GID table */
+ dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
+ sizeof(union ib_gid), GFP_KERNEL);
+ if (!dev->sgid_tbl) {
+ ret = -ENOMEM;
+ goto err_free_uar_table;
+ }
+ dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
+
+ pvrdma_enable_intrs(dev);
+
+ /* Activate pvrdma device */
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
+
+ /* Make sure the write is complete before reading status. */
+ mb();
+
+ /* Check if device was successfully activated */
+ ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to activate device\n");
+ ret = -EFAULT;
+ goto err_disable_intr;
+ }
+
+ /* Register IB device */
+ ret = pvrdma_register_device(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IB device\n");
+ goto err_disable_intr;
+ }
+
+ dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
+ ret = register_netdevice_notifier(&dev->nb_netdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register netdevice events\n");
+ goto err_unreg_ibdev;
+ }
+
+ dev_info(&pdev->dev, "attached to device\n");
+ return 0;
+
+err_unreg_ibdev:
+ ib_unregister_device(&dev->ib_dev);
+err_disable_intr:
+ pvrdma_disable_intrs(dev);
+ kfree(dev->sgid_tbl);
+err_free_uar_table:
+ pvrdma_uar_table_cleanup(dev);
+err_free_intrs:
+ pvrdma_free_irq(dev);
+ pci_free_irq_vectors(pdev);
+err_free_cq_ring:
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+err_free_async_ring:
+ pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+err_free_slots:
+ pvrdma_free_slots(dev);
+err_free_dsr:
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
+err_uar_unmap:
+ iounmap(dev->driver_uar.map);
+err_unmap_regs:
+ iounmap(dev->regs);
+err_free_resource:
+ pci_release_regions(pdev);
+err_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+err_free_device:
+ mutex_lock(&pvrdma_device_list_lock);
+ list_del(&dev->device_link);
+ mutex_unlock(&pvrdma_device_list_lock);
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
+}
+
+static void pvrdma_pci_remove(struct pci_dev *pdev)
+{
+ struct pvrdma_dev *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ return;
+
+ dev_info(&pdev->dev, "detaching from device\n");
+
+ unregister_netdevice_notifier(&dev->nb_netdev);
+ dev->nb_netdev.notifier_call = NULL;
+
+ flush_workqueue(event_wq);
+
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+
+ /* Unregister ib device */
+ ib_unregister_device(&dev->ib_dev);
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_del(&dev->device_link);
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ pvrdma_disable_intrs(dev);
+ pvrdma_free_irq(dev);
+ pci_free_irq_vectors(pdev);
+
+ /* Deactivate pvrdma device */
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
+ pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+ pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+ pvrdma_free_slots(dev);
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
+
+ iounmap(dev->regs);
+ kfree(dev->sgid_tbl);
+ kfree(dev->cq_tbl);
+ kfree(dev->srq_tbl);
+ kfree(dev->qp_tbl);
+ pvrdma_uar_table_cleanup(dev);
+ iounmap(dev->driver_uar.map);
+
+ ib_dealloc_device(&dev->ib_dev);
+
+ /* Free pci resources */
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id pvrdma_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
+
+static struct pci_driver pvrdma_driver = {
+ .name = DRV_NAME,
+ .id_table = pvrdma_pci_table,
+ .probe = pvrdma_pci_probe,
+ .remove = pvrdma_pci_remove,
+};
+
+static int __init pvrdma_init(void)
+{
+ int err;
+
+ event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
+ if (!event_wq)
+ return -ENOMEM;
+
+ err = pci_register_driver(&pvrdma_driver);
+ if (err)
+ destroy_workqueue(event_wq);
+
+ return err;
+}
+
+static void __exit pvrdma_cleanup(void)
+{
+ pci_unregister_driver(&pvrdma_driver);
+
+ destroy_workqueue(event_wq);
+}
+
+module_init(pvrdma_init);
+module_exit(pvrdma_cleanup);
+
+MODULE_AUTHOR("VMware, Inc");
+MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
new file mode 100644
index 000000000000..ba43ad07898c
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+
+#include "pvrdma.h"
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+ u64 npages, bool alloc_pages)
+{
+ u64 i;
+
+ if (npages > PVRDMA_PAGE_DIR_MAX_PAGES)
+ return -EINVAL;
+
+ memset(pdir, 0, sizeof(*pdir));
+
+ pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &pdir->dir_dma, GFP_KERNEL);
+ if (!pdir->dir)
+ goto err;
+
+ pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1;
+ pdir->tables = kcalloc(pdir->ntables, sizeof(*pdir->tables),
+ GFP_KERNEL);
+ if (!pdir->tables)
+ goto err;
+
+ for (i = 0; i < pdir->ntables; i++) {
+ pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ (dma_addr_t *)&pdir->dir[i],
+ GFP_KERNEL);
+ if (!pdir->tables[i])
+ goto err;
+ }
+
+ pdir->npages = npages;
+
+ if (alloc_pages) {
+ pdir->pages = kcalloc(npages, sizeof(*pdir->pages),
+ GFP_KERNEL);
+ if (!pdir->pages)
+ goto err;
+
+ for (i = 0; i < pdir->npages; i++) {
+ dma_addr_t page_dma;
+
+ pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev,
+ PAGE_SIZE,
+ &page_dma,
+ GFP_KERNEL);
+ if (!pdir->pages[i])
+ goto err;
+
+ pvrdma_page_dir_insert_dma(pdir, i, page_dma);
+ }
+ }
+
+ return 0;
+
+err:
+ pvrdma_page_dir_cleanup(dev, pdir);
+
+ return -ENOMEM;
+}
+
+static u64 *pvrdma_page_dir_table(struct pvrdma_page_dir *pdir, u64 idx)
+{
+ return pdir->tables[PVRDMA_PAGE_DIR_TABLE(idx)];
+}
+
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx)
+{
+ return pvrdma_page_dir_table(pdir, idx)[PVRDMA_PAGE_DIR_PAGE(idx)];
+}
+
+static void pvrdma_page_dir_cleanup_pages(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->pages) {
+ u64 i;
+
+ for (i = 0; i < pdir->npages && pdir->pages[i]; i++) {
+ dma_addr_t page_dma = pvrdma_page_dir_get_dma(pdir, i);
+
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ pdir->pages[i], page_dma);
+ }
+
+ kfree(pdir->pages);
+ }
+}
+
+static void pvrdma_page_dir_cleanup_tables(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->tables) {
+ int i;
+
+ pvrdma_page_dir_cleanup_pages(dev, pdir);
+
+ for (i = 0; i < pdir->ntables; i++) {
+ u64 *table = pdir->tables[i];
+
+ if (table)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ table, pdir->dir[i]);
+ }
+
+ kfree(pdir->tables);
+ }
+}
+
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->dir) {
+ pvrdma_page_dir_cleanup_tables(dev, pdir);
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ pdir->dir, pdir->dir_dma);
+ }
+}
+
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+ dma_addr_t daddr)
+{
+ u64 *table;
+
+ if (idx >= pdir->npages)
+ return -EINVAL;
+
+ table = pvrdma_page_dir_table(pdir, idx);
+ table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr;
+
+ return 0;
+}
+
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+ struct ib_umem *umem, u64 offset)
+{
+ struct ib_block_iter biter;
+ u64 i = offset;
+ int ret = 0;
+
+ if (offset >= pdir->npages)
+ return -EINVAL;
+
+ rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) {
+ ret = pvrdma_page_dir_insert_dma(
+ pdir, i, rdma_block_iter_dma_address(&biter));
+ if (ret)
+ goto exit;
+
+ i++;
+ }
+
+exit:
+ return ret;
+}
+
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+ u64 *page_list,
+ int num_pages)
+{
+ int i;
+ int ret;
+
+ if (num_pages > pdir->npages)
+ return -EINVAL;
+
+ for (i = 0; i < num_pages; i++) {
+ ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src)
+{
+ dst->max_send_wr = src->max_send_wr;
+ dst->max_recv_wr = src->max_recv_wr;
+ dst->max_send_sge = src->max_send_sge;
+ dst->max_recv_sge = src->max_recv_sge;
+ dst->max_inline_data = src->max_inline_data;
+}
+
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, const struct ib_qp_cap *src)
+{
+ dst->max_send_wr = src->max_send_wr;
+ dst->max_recv_wr = src->max_recv_wr;
+ dst->max_send_sge = src->max_send_sge;
+ dst->max_recv_sge = src->max_recv_sge;
+ dst->max_inline_data = src->max_inline_data;
+}
+
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src)
+{
+ BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+ memcpy(dst, src, sizeof(*src));
+}
+
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src)
+{
+ BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+ memcpy(dst, src, sizeof(*src));
+}
+
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+ const struct pvrdma_global_route *src)
+{
+ pvrdma_gid_to_ib(&dst->dgid, &src->dgid);
+ dst->flow_label = src->flow_label;
+ dst->sgid_index = src->sgid_index;
+ dst->hop_limit = src->hop_limit;
+ dst->traffic_class = src->traffic_class;
+}
+
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+ const struct ib_global_route *src)
+{
+ ib_gid_to_pvrdma(&dst->dgid, &src->dgid);
+ dst->flow_label = src->flow_label;
+ dst->sgid_index = src->sgid_index;
+ dst->hop_limit = src->hop_limit;
+ dst->traffic_class = src->traffic_class;
+}
+
+void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
+ const struct pvrdma_ah_attr *src)
+{
+ dst->type = RDMA_AH_ATTR_TYPE_ROCE;
+ pvrdma_global_route_to_ib(rdma_ah_retrieve_grh(dst), &src->grh);
+ rdma_ah_set_dlid(dst, src->dlid);
+ rdma_ah_set_sl(dst, src->sl);
+ rdma_ah_set_path_bits(dst, src->src_path_bits);
+ rdma_ah_set_static_rate(dst, src->static_rate);
+ rdma_ah_set_ah_flags(dst, src->ah_flags);
+ rdma_ah_set_port_num(dst, src->port_num);
+ memcpy(dst->roce.dmac, &src->dmac, ETH_ALEN);
+}
+
+void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+ const struct rdma_ah_attr *src)
+{
+ ib_global_route_to_pvrdma(&dst->grh, rdma_ah_read_grh(src));
+ dst->dlid = rdma_ah_get_dlid(src);
+ dst->sl = rdma_ah_get_sl(src);
+ dst->src_path_bits = rdma_ah_get_path_bits(src);
+ dst->static_rate = rdma_ah_get_static_rate(src);
+ dst->ah_flags = rdma_ah_get_ah_flags(src);
+ dst->port_num = rdma_ah_get_port_num(src);
+ memcpy(&dst->dmac, src->roce.dmac, sizeof(dst->dmac));
+}
+
+u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type)
+{
+ return (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
+ PVRDMA_GID_TYPE_FLAG_ROCE_V2 :
+ PVRDMA_GID_TYPE_FLAG_ROCE_V1;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
new file mode 100644
index 000000000000..ec7a00c8285b
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_get_dma_mr - get a DMA memory region
+ * @pd: protection domain
+ * @acc: access flags
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int ret;
+
+ /* Support only LOCAL_WRITE flag for DMA MRs */
+ if (acc & ~IB_ACCESS_LOCAL_WRITE) {
+ dev_warn(&dev->pdev->dev,
+ "unsupported dma mr access flags %#x\n", acc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = acc;
+ cmd->flags = PVRDMA_MR_FLAG_DMA;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not get DMA mem region, error: %d\n", ret);
+ kfree(mr);
+ return ERR_PTR(ret);
+ }
+
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+
+ return &mr->ibmr;
+}
+
+/**
+ * pvrdma_reg_user_mr - register a userspace memory region
+ * @pd: protection domain
+ * @start: starting address
+ * @length: length of region
+ * @virt_addr: I/O virtual address
+ * @access_flags: access flags for memory region
+ * @dmah: dma handle
+ * @udata: user data
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr = NULL;
+ struct ib_umem *umem;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int ret, npages;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (length == 0 || length > dev->dsr->caps.max_mr_size) {
+ dev_warn(&dev->pdev->dev, "invalid mem region length\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ umem = ib_umem_get(pd->device, start, length, access_flags);
+ if (IS_ERR(umem)) {
+ dev_warn(&dev->pdev->dev,
+ "could not get umem for mem region\n");
+ return ERR_CAST(umem);
+ }
+
+ npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
+ if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
+ npages);
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = -ENOMEM;
+ goto err_umem;
+ }
+
+ mr->mmr.iova = virt_addr;
+ mr->mmr.size = length;
+ mr->umem = umem;
+
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
+ if (ret)
+ goto err_pdir;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->start = start;
+ cmd->length = length;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = access_flags;
+ cmd->nchunks = npages;
+ cmd->pdir_dma = mr->pdir.dir_dma;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not register mem region, error: %d\n", ret);
+ goto err_pdir;
+ }
+
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+
+ return &mr->ibmr;
+
+err_pdir:
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+err_umem:
+ ib_umem_release(umem);
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_alloc_mr - allocate a memory region
+ * @pd: protection domain
+ * @mr_type: type of memory region
+ * @max_num_sg: maximum number of pages
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int size = max_num_sg * sizeof(u64);
+ int ret;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
+ max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->pages = kzalloc(size, GFP_KERNEL);
+ if (!mr->pages) {
+ ret = -ENOMEM;
+ goto freemr;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "failed to allocate page dir for mr\n");
+ ret = -ENOMEM;
+ goto freepages;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = 0;
+ cmd->flags = PVRDMA_MR_FLAG_FRMR;
+ cmd->nchunks = max_num_sg;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create FR mem region, error: %d\n", ret);
+ goto freepdir;
+ }
+
+ mr->max_pages = max_num_sg;
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+ mr->page_shift = PAGE_SHIFT;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+freepdir:
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+freepages:
+ kfree(mr->pages);
+freemr:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_dereg_mr - deregister a memory region
+ * @ibmr: memory region
+ * @udata: pointer to user data
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+ struct pvrdma_dev *dev = to_vdev(ibmr->device);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
+ cmd->mr_handle = mr->mmr.mr_handle;
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "could not deregister mem region, error: %d\n", ret);
+
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+ ib_umem_release(mr->umem);
+
+ kfree(mr->pages);
+ kfree(mr);
+
+ return 0;
+}
+
+static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+
+ if (mr->npages == mr->max_pages)
+ return -ENOMEM;
+
+ mr->pages[mr->npages++] = addr;
+ return 0;
+}
+
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+ struct pvrdma_dev *dev = to_vdev(ibmr->device);
+ int ret;
+
+ mr->npages = 0;
+
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
new file mode 100644
index 000000000000..98b2a0090bf2
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp);
+
+static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
+ struct pvrdma_cq **recv_cq)
+{
+ *send_cq = to_vcq(qp->ibqp.send_cq);
+ *recv_cq = to_vcq(qp->ibqp.recv_cq);
+}
+
+static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+ unsigned long *scq_flags,
+ unsigned long *rcq_flags)
+ __acquires(scq->cq_lock) __acquires(rcq->cq_lock)
+{
+ if (scq == rcq) {
+ spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+ __acquire(rcq->cq_lock);
+ } else if (scq->cq_handle < rcq->cq_handle) {
+ spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+ spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
+ SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
+ spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
+ SINGLE_DEPTH_NESTING);
+ }
+}
+
+static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+ unsigned long *scq_flags,
+ unsigned long *rcq_flags)
+ __releases(scq->cq_lock) __releases(rcq->cq_lock)
+{
+ if (scq == rcq) {
+ __release(rcq->cq_lock);
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ } else if (scq->cq_handle < rcq->cq_handle) {
+ spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ } else {
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+ }
+}
+
+static void pvrdma_reset_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq, *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* Clean up cqes */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ /*
+ * Reset queuepair. The checks are because usermode queuepairs won't
+ * have kernel ringstates.
+ */
+ if (qp->rq.ring) {
+ atomic_set(&qp->rq.ring->cons_head, 0);
+ atomic_set(&qp->rq.ring->prod_tail, 0);
+ }
+ if (qp->sq.ring) {
+ atomic_set(&qp->sq.ring->cons_head, 0);
+ atomic_set(&qp->sq.ring->prod_tail, 0);
+ }
+}
+
+static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
+ struct ib_qp_cap *req_cap,
+ struct pvrdma_qp *qp)
+{
+ if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
+ req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
+ dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
+ return -EINVAL;
+ }
+
+ qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
+ qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
+
+ /* Write back */
+ req_cap->max_recv_wr = qp->rq.wqe_cnt;
+ req_cap->max_recv_sge = qp->rq.max_sg;
+
+ qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) *
+ qp->rq.max_sg);
+ qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
+
+ return 0;
+}
+
+static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
+ struct pvrdma_qp *qp)
+{
+ if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
+ req_cap->max_send_sge > dev->dsr->caps.max_sge) {
+ dev_warn(&dev->pdev->dev, "send queue size invalid\n");
+ return -EINVAL;
+ }
+
+ qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
+ qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
+
+ /* Write back */
+ req_cap->max_send_wr = qp->sq.wqe_cnt;
+ req_cap->max_send_sge = qp->sq.max_sg;
+
+ qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) *
+ qp->sq.max_sg);
+ /* Note: one extra page for the header. */
+ qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
+ (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_qp - create queue pair
+ * @ibqp: queue pair
+ * @init_attr: queue pair attributes
+ * @udata: user data
+ *
+ * @return: the 0 on success, otherwise returns an errno.
+ */
+int pvrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
+ struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2;
+ struct pvrdma_create_qp ucmd;
+ struct pvrdma_create_qp_resp qp_resp = {};
+ unsigned long flags;
+ int ret;
+ bool is_srq = !!init_attr->srq;
+
+ if (init_attr->create_flags) {
+ dev_warn(&dev->pdev->dev,
+ "invalid create queuepair flags %#x\n",
+ init_attr->create_flags);
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->qp_type != IB_QPT_RC &&
+ init_attr->qp_type != IB_QPT_UD &&
+ init_attr->qp_type != IB_QPT_GSI) {
+ dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
+ init_attr->qp_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (is_srq && !dev->dsr->caps.max_srq) {
+ dev_warn(&dev->pdev->dev,
+ "SRQs not supported by device\n");
+ return -EINVAL;
+ }
+
+ if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
+ return -ENOMEM;
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibqp->device->phys_port_cnt) {
+ dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
+ ret = -EINVAL;
+ goto err_qp;
+ }
+ fallthrough;
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+ mutex_init(&qp->mutex);
+ refcount_set(&qp->refcnt, 1);
+ init_completion(&qp->free);
+
+ qp->state = IB_QPS_RESET;
+ qp->is_kernel = !udata;
+
+ if (!qp->is_kernel) {
+ dev_dbg(&dev->pdev->dev,
+ "create queuepair from user space\n");
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_qp;
+ }
+
+ /* Userspace supports qpn and qp handles? */
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION &&
+ udata->outlen < sizeof(qp_resp)) {
+ dev_warn(&dev->pdev->dev,
+ "create queuepair not supported\n");
+ ret = -EOPNOTSUPP;
+ goto err_qp;
+ }
+
+ if (!is_srq) {
+ /* set qp->sq.wqe_cnt, shift, buf_size.. */
+ qp->rumem = ib_umem_get(ibqp->device,
+ ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0);
+ if (IS_ERR(qp->rumem)) {
+ ret = PTR_ERR(qp->rumem);
+ goto err_qp;
+ }
+ qp->srq = NULL;
+ } else {
+ qp->rumem = NULL;
+ qp->srq = to_vsrq(init_attr->srq);
+ }
+
+ qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr,
+ ucmd.sbuf_size, 0);
+ if (IS_ERR(qp->sumem)) {
+ if (!is_srq)
+ ib_umem_release(qp->rumem);
+ ret = PTR_ERR(qp->sumem);
+ goto err_qp;
+ }
+
+ qp->npages_send =
+ ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
+ if (!is_srq)
+ qp->npages_recv = ib_umem_num_dma_blocks(
+ qp->rumem, PAGE_SIZE);
+ else
+ qp->npages_recv = 0;
+ qp->npages = qp->npages_send + qp->npages_recv;
+ } else {
+ ret = pvrdma_set_sq_size(to_vdev(ibqp->device),
+ &init_attr->cap, qp);
+ if (ret)
+ goto err_qp;
+
+ ret = pvrdma_set_rq_size(to_vdev(ibqp->device),
+ &init_attr->cap, qp);
+ if (ret)
+ goto err_qp;
+
+ qp->npages = qp->npages_send + qp->npages_recv;
+
+ /* Skip header page. */
+ qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
+
+ /* Recv queue pages are after send pages. */
+ qp->rq.offset = qp->npages_send * PAGE_SIZE;
+ }
+
+ if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in queuepair\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
+ qp->is_kernel);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ if (!qp->is_kernel) {
+ pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
+ if (!is_srq)
+ pvrdma_page_dir_insert_umem(&qp->pdir,
+ qp->rumem,
+ qp->npages_send);
+ } else {
+ /* Ring state is always the first page. */
+ qp->sq.ring = qp->pdir.pages[0];
+ qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_qp;
+ }
+
+ /* Not supported */
+ init_attr->cap.max_inline_data = 0;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
+ cmd->pd_handle = to_vpd(ibqp->pd)->pd_handle;
+ cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
+ cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
+ if (is_srq)
+ cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle;
+ else
+ cmd->srq_handle = 0;
+ cmd->max_send_wr = init_attr->cap.max_send_wr;
+ cmd->max_recv_wr = init_attr->cap.max_recv_wr;
+ cmd->max_send_sge = init_attr->cap.max_send_sge;
+ cmd->max_recv_sge = init_attr->cap.max_recv_sge;
+ cmd->max_inline_data = init_attr->cap.max_inline_data;
+ cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
+ cmd->is_srq = is_srq;
+ cmd->lkey = 0;
+ cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
+ cmd->total_chunks = qp->npages;
+ cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
+ cmd->pdir_dma = qp->pdir.dir_dma;
+
+ dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
+ cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
+ cmd->max_recv_sge);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create queuepair, error: %d\n", ret);
+ goto err_pdir;
+ }
+
+ /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
+ qp->port = init_attr->port_num;
+
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) {
+ qp->ibqp.qp_num = resp_v2->qpn;
+ qp->qp_handle = resp_v2->qp_handle;
+ } else {
+ qp->ibqp.qp_num = resp->qpn;
+ qp->qp_handle = resp->qpn;
+ }
+
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ if (udata) {
+ qp_resp.qpn = qp->ibqp.qp_num;
+ qp_resp.qp_handle = qp->qp_handle;
+
+ if (ib_copy_to_udata(udata, &qp_resp,
+ min(udata->outlen, sizeof(qp_resp)))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ __pvrdma_destroy_qp(dev, qp);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+
+err_pdir:
+ pvrdma_page_dir_cleanup(dev, &qp->pdir);
+err_umem:
+ ib_umem_release(qp->rumem);
+ ib_umem_release(qp->sumem);
+err_qp:
+ atomic_dec(&dev->num_qps);
+ return ret;
+}
+
+static void _pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ unsigned long flags;
+ struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
+
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ dev->qp_tbl[qp->qp_handle] = NULL;
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ if (refcount_dec_and_test(&qp->refcnt))
+ complete(&qp->free);
+ wait_for_completion(&qp->free);
+
+ ib_umem_release(qp->rumem);
+ ib_umem_release(qp->sumem);
+
+ pvrdma_page_dir_cleanup(dev, &qp->pdir);
+
+ atomic_dec(&dev->num_qps);
+}
+
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq;
+ struct pvrdma_cq *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* In case cq is polling */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ /*
+ * We're now unlocking the CQs before clearing out the qp handle this
+ * should still be safe. We have destroyed the backend QP and flushed
+ * the CQEs so there should be no other completions for this QP.
+ */
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_free_qp(qp);
+}
+
+static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev,
+ u32 qp_handle)
+{
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
+ cmd->qp_handle = qp_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "destroy queuepair failed, error: %d\n", ret);
+}
+
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ * @udata: user data or null for kernel object
+ *
+ * @return: always 0.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+{
+ struct pvrdma_qp *vqp = to_vqp(qp);
+
+ _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
+ pvrdma_free_qp(vqp);
+
+ return 0;
+}
+
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp)
+{
+ _pvrdma_destroy_qp_work(dev, qp->qp_handle);
+ _pvrdma_free_qp(qp);
+}
+
+/**
+ * pvrdma_modify_qp - modify queue pair attributes
+ * @ibqp: the queue pair
+ * @attr: the new queue pair's attributes
+ * @attr_mask: attributes mask
+ * @udata: user data
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
+ enum ib_qp_state cur_state, next_state;
+ int ret;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ /* Sanity checking. Should need lock here */
+ mutex_lock(&qp->mutex);
+ cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
+ qp->state;
+ next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
+ attr_mask)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_PORT) {
+ if (attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ if (attr->min_rnr_timer > 31) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (cur_state == next_state && cur_state == IB_QPS_RESET) {
+ ret = 0;
+ goto out;
+ }
+
+ qp->state = next_state;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
+ cmd->qp_handle = qp->qp_handle;
+ cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+ cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
+ cmd->attrs.cur_qp_state =
+ ib_qp_state_to_pvrdma(attr->cur_qp_state);
+ cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
+ cmd->attrs.path_mig_state =
+ ib_mig_state_to_pvrdma(attr->path_mig_state);
+ cmd->attrs.qkey = attr->qkey;
+ cmd->attrs.rq_psn = attr->rq_psn;
+ cmd->attrs.sq_psn = attr->sq_psn;
+ cmd->attrs.dest_qp_num = attr->dest_qp_num;
+ cmd->attrs.qp_access_flags =
+ ib_access_flags_to_pvrdma(attr->qp_access_flags);
+ cmd->attrs.pkey_index = attr->pkey_index;
+ cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
+ cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
+ cmd->attrs.sq_draining = attr->sq_draining;
+ cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
+ cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
+ cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
+ cmd->attrs.port_num = attr->port_num;
+ cmd->attrs.timeout = attr->timeout;
+ cmd->attrs.retry_cnt = attr->retry_cnt;
+ cmd->attrs.rnr_retry = attr->rnr_retry;
+ cmd->attrs.alt_port_num = attr->alt_port_num;
+ cmd->attrs.alt_timeout = attr->alt_timeout;
+ ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
+ rdma_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
+ rdma_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not modify queuepair, error: %d\n", ret);
+ } else if (rsp.hdr.err > 0) {
+ dev_warn(&dev->pdev->dev,
+ "cannot modify queuepair, error: %d\n", rsp.hdr.err);
+ ret = -EINVAL;
+ }
+
+ if (ret == 0 && next_state == IB_QPS_RESET)
+ pvrdma_reset_qp(qp);
+
+out:
+ mutex_unlock(&qp->mutex);
+
+ return ret;
+}
+
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
+{
+ return pvrdma_page_dir_get_ptr(&qp->pdir,
+ qp->sq.offset + n * qp->sq.wqe_size);
+}
+
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
+{
+ return pvrdma_page_dir_get_ptr(&qp->pdir,
+ qp->rq.offset + n * qp->rq.wqe_size);
+}
+
+static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr,
+ const struct ib_reg_wr *wr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(wr->mr);
+
+ wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
+ wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
+ wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
+ wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
+ wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
+ wqe_hdr->wr.fast_reg.access_flags = wr->access;
+ wqe_hdr->wr.fast_reg.rkey = wr->key;
+
+ return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
+ mr->npages);
+}
+
+/**
+ * pvrdma_post_send - post send work request entries on a QP
+ * @ibqp: the QP
+ * @wr: work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ unsigned long flags;
+ struct pvrdma_sq_wqe_hdr *wqe_hdr;
+ struct pvrdma_sge *sge;
+ int i, ret;
+
+ /*
+ * In states lower than RTS, we can fail immediately. In other states,
+ * just post and let the device figure it out.
+ */
+ if (qp->state < IB_QPS_RTS) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ while (wr) {
+ unsigned int tail = 0;
+
+ if (unlikely(!pvrdma_idx_ring_has_space(
+ qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "send queue is full\n");
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "send SGE overflow\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Only support UD, RC.
+ * Need to check opcode table for thorough checking.
+ * opcode _UD _UC _RC
+ * _SEND x x x
+ * _SEND_WITH_IMM x x x
+ * _RDMA_WRITE x x
+ * _RDMA_WRITE_WITH_IMM x x
+ * _LOCAL_INV x x
+ * _SEND_WITH_INV x x
+ * _RDMA_READ x
+ * _ATOMIC_CMP_AND_SWP x
+ * _ATOMIC_FETCH_AND_ADD x
+ * _MASK_ATOMIC_CMP_AND_SWP x
+ * _MASK_ATOMIC_FETCH_AND_ADD x
+ * _REG_MR x
+ *
+ */
+ if (qp->ibqp.qp_type != IB_QPT_UD &&
+ qp->ibqp.qp_type != IB_QPT_RC &&
+ wr->opcode != IB_WR_SEND) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "unsupported queuepair type\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ } else if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI) {
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid send opcode\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
+ memset(wqe_hdr, 0, sizeof(*wqe_hdr));
+ wqe_hdr->wr_id = wr->wr_id;
+ wqe_hdr->num_sge = wr->num_sge;
+ wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
+ wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
+ if (wr->opcode == IB_WR_SEND_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wqe_hdr->ex.imm_data = wr->ex.imm_data;
+
+ if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ if (unlikely(!ud_wr(wr)->ah)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid address handle\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Use qkey from qp context if high order bit set,
+ * otherwise from work request.
+ */
+ wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
+ wqe_hdr->wr.ud.remote_qkey =
+ ud_wr(wr)->remote_qkey & 0x80000000 ?
+ qp->qkey : ud_wr(wr)->remote_qkey;
+ wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
+
+ break;
+ case IB_QPT_RC:
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe_hdr->wr.rdma.remote_addr =
+ rdma_wr(wr)->remote_addr;
+ wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
+ break;
+ case IB_WR_LOCAL_INV:
+ case IB_WR_SEND_WITH_INV:
+ wqe_hdr->ex.invalidate_rkey =
+ wr->ex.invalidate_rkey;
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wqe_hdr->wr.atomic.remote_addr =
+ atomic_wr(wr)->remote_addr;
+ wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
+ wqe_hdr->wr.atomic.compare_add =
+ atomic_wr(wr)->compare_add;
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
+ wqe_hdr->wr.atomic.swap =
+ atomic_wr(wr)->swap;
+ break;
+ case IB_WR_REG_MR:
+ ret = set_reg_seg(wqe_hdr, reg_wr(wr));
+ if (ret < 0) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "Failed to set fast register work request\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ break;
+ default:
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid queuepair type\n");
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+ for (i = 0; i < wr->num_sge; i++) {
+ /* Need to check wqe_size 0 or max size */
+ sge->addr = wr->sg_list[i].addr;
+ sge->length = wr->sg_list[i].length;
+ sge->lkey = wr->sg_list[i].lkey;
+ sge++;
+ }
+
+ /* Make sure wqe is written before index update */
+ smp_wmb();
+
+ /* Update shared sq ring */
+ pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
+ qp->sq.wqe_cnt);
+
+ wr = wr->next;
+ }
+
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ if (!ret)
+ pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
+
+ return ret;
+}
+
+/**
+ * pvrdma_post_recv - post receive work request entries on a QP
+ * @ibqp: the QP
+ * @wr: the work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ unsigned long flags;
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_rq_wqe_hdr *wqe_hdr;
+ struct pvrdma_sge *sge;
+ int ret = 0;
+ int i;
+
+ /*
+ * In the RESET state, we can fail immediately. For other states,
+ * just post and let the device figure it out.
+ */
+ if (qp->state == IB_QPS_RESET) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (qp->srq) {
+ dev_warn(&dev->pdev->dev, "QP associated with SRQ\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ while (wr) {
+ unsigned int tail = 0;
+
+ if (unlikely(wr->num_sge > qp->rq.max_sg ||
+ wr->num_sge < 0)) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "recv SGE overflow\n");
+ goto out;
+ }
+
+ if (unlikely(!pvrdma_idx_ring_has_space(
+ qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "recv queue full\n");
+ goto out;
+ }
+
+ wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
+ wqe_hdr->wr_id = wr->wr_id;
+ wqe_hdr->num_sge = wr->num_sge;
+ wqe_hdr->total_len = 0;
+
+ sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+ for (i = 0; i < wr->num_sge; i++) {
+ sge->addr = wr->sg_list[i].addr;
+ sge->length = wr->sg_list[i].length;
+ sge->lkey = wr->sg_list[i].lkey;
+ sge++;
+ }
+
+ /* Make sure wqe is written before index update */
+ smp_wmb();
+
+ /* Update shared rq ring */
+ pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
+ qp->rq.wqe_cnt);
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
+
+ return ret;
+
+out:
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return ret;
+}
+
+/**
+ * pvrdma_query_qp - query a queue pair's attributes
+ * @ibqp: the queue pair to query
+ * @attr: the queue pair's attributes
+ * @attr_mask: attributes mask
+ * @init_attr: initial queue pair attributes
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
+ struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
+ int ret = 0;
+
+ mutex_lock(&qp->mutex);
+
+ if (qp->state == IB_QPS_RESET) {
+ attr->qp_state = IB_QPS_RESET;
+ goto out;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
+ cmd->qp_handle = qp->qp_handle;
+ cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query queuepair, error: %d\n", ret);
+ goto out;
+ }
+
+ attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
+ attr->cur_qp_state =
+ pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
+ attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
+ attr->path_mig_state =
+ pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
+ attr->qkey = resp->attrs.qkey;
+ attr->rq_psn = resp->attrs.rq_psn;
+ attr->sq_psn = resp->attrs.sq_psn;
+ attr->dest_qp_num = resp->attrs.dest_qp_num;
+ attr->qp_access_flags =
+ pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
+ attr->pkey_index = resp->attrs.pkey_index;
+ attr->alt_pkey_index = resp->attrs.alt_pkey_index;
+ attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
+ attr->sq_draining = resp->attrs.sq_draining;
+ attr->max_rd_atomic = resp->attrs.max_rd_atomic;
+ attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
+ attr->min_rnr_timer = resp->attrs.min_rnr_timer;
+ attr->port_num = resp->attrs.port_num;
+ attr->timeout = resp->attrs.timeout;
+ attr->retry_cnt = resp->attrs.retry_cnt;
+ attr->rnr_retry = resp->attrs.rnr_retry;
+ attr->alt_port_num = resp->attrs.alt_port_num;
+ attr->alt_timeout = resp->attrs.alt_timeout;
+ pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
+ pvrdma_ah_attr_to_rdma(&attr->ah_attr, &resp->attrs.ah_attr);
+ pvrdma_ah_attr_to_rdma(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
+
+ qp->state = attr->qp_state;
+
+ ret = 0;
+
+out:
+ attr->cur_qp_state = attr->qp_state;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->xrcd = NULL;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type = 0;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->create_flags = 0;
+ init_attr->port_num = qp->port;
+
+ mutex_unlock(&qp->mutex);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
new file mode 100644
index 000000000000..8b558ae234c8
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_RING_H__
+#define __PVRDMA_RING_H__
+
+#include <linux/types.h>
+
+#define PVRDMA_INVALID_IDX -1 /* Invalid index. */
+
+struct pvrdma_ring {
+ atomic_t prod_tail; /* Producer tail. */
+ atomic_t cons_head; /* Consumer head. */
+};
+
+struct pvrdma_ring_state {
+ struct pvrdma_ring tx; /* Tx ring. */
+ struct pvrdma_ring rx; /* Rx ring. */
+};
+
+static inline int pvrdma_idx_valid(__u32 idx, __u32 max_elems)
+{
+ /* Generates fewer instructions than a less-than. */
+ return (idx & ~((max_elems << 1) - 1)) == 0;
+}
+
+static inline __s32 pvrdma_idx(atomic_t *var, __u32 max_elems)
+{
+ const unsigned int idx = atomic_read(var);
+
+ if (pvrdma_idx_valid(idx, max_elems))
+ return idx & (max_elems - 1);
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline void pvrdma_idx_ring_inc(atomic_t *var, __u32 max_elems)
+{
+ __u32 idx = atomic_read(var) + 1; /* Increment. */
+
+ idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */
+ atomic_set(var, idx);
+}
+
+static inline __s32 pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *out_tail)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems)) {
+ *out_tail = tail & (max_elems - 1);
+ return tail != (head ^ max_elems);
+ }
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline __s32 pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *out_head)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems)) {
+ *out_head = head & (max_elems - 1);
+ return tail != head;
+ }
+ return PVRDMA_INVALID_IDX;
+}
+
+#endif /* __PVRDMA_RING_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
new file mode 100644
index 000000000000..bdc2703532c6
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2016-2017 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_query_srq - query shared receive queue
+ * @ibsrq: the shared receive queue to query
+ * @srq_attr: attributes to query and return to client
+ *
+ * @return: 0 for success, otherwise returns an errno.
+ */
+int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
+ struct pvrdma_srq *srq = to_vsrq(ibsrq);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
+ struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
+ cmd->srq_handle = srq->srq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query shared receive queue, error: %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ srq_attr->srq_limit = resp->attrs.srq_limit;
+ srq_attr->max_wr = resp->attrs.max_wr;
+ srq_attr->max_sge = resp->attrs.max_sge;
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_srq - create shared receive queue
+ * @ibsrq: the IB shared receive queue
+ * @init_attr: shared receive queue attributes
+ * @udata: user data
+ *
+ * @return: 0 on success, otherwise returns an errno.
+ */
+int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_srq *srq = to_vsrq(ibsrq);
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
+ struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
+ struct pvrdma_create_srq_resp srq_resp = {};
+ struct pvrdma_create_srq ucmd;
+ unsigned long flags;
+ int ret;
+
+ if (!udata) {
+ /* No support for kernel clients. */
+ dev_warn(&dev->pdev->dev,
+ "no shared receive queue support for kernel client\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->srq_type != IB_SRQT_BASIC) {
+ dev_warn(&dev->pdev->dev,
+ "shared receive queue type %d not supported\n",
+ init_attr->srq_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
+ init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
+ dev_warn(&dev->pdev->dev,
+ "shared receive queue size invalid\n");
+ return -EINVAL;
+ }
+
+ if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
+ return -ENOMEM;
+
+ spin_lock_init(&srq->lock);
+ refcount_set(&srq->refcnt, 1);
+ init_completion(&srq->free);
+
+ dev_dbg(&dev->pdev->dev,
+ "create shared receive queue from user space\n");
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_srq;
+ }
+
+ srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
+ if (IS_ERR(srq->umem)) {
+ ret = PTR_ERR(srq->umem);
+ goto err_srq;
+ }
+
+ srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
+
+ if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in shared receive queue\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
+ cmd->srq_type = init_attr->srq_type;
+ cmd->nchunks = srq->npages;
+ cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
+ cmd->attrs.max_wr = init_attr->attr.max_wr;
+ cmd->attrs.max_sge = init_attr->attr.max_sge;
+ cmd->attrs.srq_limit = init_attr->attr.srq_limit;
+ cmd->pdir_dma = srq->pdir.dir_dma;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create shared receive queue, error: %d\n",
+ ret);
+ goto err_page_dir;
+ }
+
+ srq->srq_handle = resp->srqn;
+ srq_resp.srqn = resp->srqn;
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ /* Copy udata back. */
+ if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
+ dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
+ pvrdma_destroy_srq(&srq->ibsrq, udata);
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_page_dir:
+ pvrdma_page_dir_cleanup(dev, &srq->pdir);
+err_umem:
+ ib_umem_release(srq->umem);
+err_srq:
+ atomic_dec(&dev->num_srqs);
+
+ return ret;
+}
+
+static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ dev->srq_tbl[srq->srq_handle] = NULL;
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ if (refcount_dec_and_test(&srq->refcnt))
+ complete(&srq->free);
+ wait_for_completion(&srq->free);
+
+ /* There is no support for kernel clients, so this is safe. */
+ ib_umem_release(srq->umem);
+
+ pvrdma_page_dir_cleanup(dev, &srq->pdir);
+
+ atomic_dec(&dev->num_srqs);
+}
+
+/**
+ * pvrdma_destroy_srq - destroy shared receive queue
+ * @srq: the shared receive queue to destroy
+ * @udata: user data or null for kernel object
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+{
+ struct pvrdma_srq *vsrq = to_vsrq(srq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
+ struct pvrdma_dev *dev = to_vdev(srq->device);
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
+ cmd->srq_handle = vsrq->srq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "destroy shared receive queue failed, error: %d\n",
+ ret);
+
+ pvrdma_free_srq(dev, vsrq);
+ return 0;
+}
+
+/**
+ * pvrdma_modify_srq - modify shared receive queue attributes
+ * @ibsrq: the shared receive queue to modify
+ * @attr: the shared receive queue's new attributes
+ * @attr_mask: attributes mask
+ * @udata: user data
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
+ int ret;
+
+ /* Only support SRQ limit. */
+ if (!(attr_mask & IB_SRQ_LIMIT))
+ return -EINVAL;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
+ cmd->srq_handle = vsrq->srq_handle;
+ cmd->attrs.srq_limit = attr->srq_limit;
+ cmd->attr_mask = attr_mask;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not modify shared receive queue, error: %d\n",
+ ret);
+
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
new file mode 100644
index 000000000000..bcd43dc30e21
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/inet.h>
+#include <linux/io.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_query_device - query device
+ * @ibdev: the device to query
+ * @props: the device properties
+ * @uhw: user data
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+
+ props->fw_ver = dev->dsr->caps.fw_ver;
+ props->sys_image_guid = dev->dsr->caps.sys_image_guid;
+ props->max_mr_size = dev->dsr->caps.max_mr_size;
+ props->page_size_cap = dev->dsr->caps.page_size_cap;
+ props->vendor_id = dev->dsr->caps.vendor_id;
+ props->vendor_part_id = dev->pdev->device;
+ props->hw_ver = dev->dsr->caps.hw_ver;
+ props->max_qp = dev->dsr->caps.max_qp;
+ props->max_qp_wr = dev->dsr->caps.max_qp_wr;
+ props->device_cap_flags = dev->dsr->caps.device_cap_flags;
+ props->max_send_sge = dev->dsr->caps.max_sge;
+ props->max_recv_sge = dev->dsr->caps.max_sge;
+ props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge,
+ dev->dsr->caps.max_sge_rd);
+ props->max_srq = dev->dsr->caps.max_srq;
+ props->max_srq_wr = dev->dsr->caps.max_srq_wr;
+ props->max_srq_sge = dev->dsr->caps.max_srq_sge;
+ props->max_cq = dev->dsr->caps.max_cq;
+ props->max_cqe = dev->dsr->caps.max_cqe;
+ props->max_mr = dev->dsr->caps.max_mr;
+ props->max_pd = dev->dsr->caps.max_pd;
+ props->max_qp_rd_atom = dev->dsr->caps.max_qp_rd_atom;
+ props->max_qp_init_rd_atom = dev->dsr->caps.max_qp_init_rd_atom;
+ props->atomic_cap =
+ dev->dsr->caps.atomic_ops &
+ (PVRDMA_ATOMIC_OP_COMP_SWAP | PVRDMA_ATOMIC_OP_FETCH_ADD) ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ props->max_ah = dev->dsr->caps.max_ah;
+ props->max_pkeys = dev->dsr->caps.max_pkeys;
+ props->local_ca_ack_delay = dev->dsr->caps.local_ca_ack_delay;
+ if ((dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_LOCAL_INV) &&
+ (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_REMOTE_INV) &&
+ (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_FAST_REG_WR)) {
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ props->max_fast_reg_page_list_len = PVRDMA_GET_CAP(dev,
+ PVRDMA_MAX_FAST_REG_PAGES,
+ dev->dsr->caps.max_fast_reg_page_list_len);
+ }
+
+ props->device_cap_flags |= IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_RC_RNR_NAK_GEN;
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_port - query device port attributes
+ * @ibdev: the device to query
+ * @port: the port number
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_port *cmd = &req.query_port;
+ struct pvrdma_cmd_query_port_resp *resp = &rsp.query_port_resp;
+ int err;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_PORT;
+ cmd->port_num = port;
+
+ err = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_PORT_RESP);
+ if (err < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query port, error: %d\n", err);
+ return err;
+ }
+
+ /* props being zeroed by the caller, avoid zeroing it here */
+
+ props->state = pvrdma_port_state_to_ib(resp->attrs.state);
+ props->max_mtu = pvrdma_mtu_to_ib(resp->attrs.max_mtu);
+ props->active_mtu = pvrdma_mtu_to_ib(resp->attrs.active_mtu);
+ props->gid_tbl_len = resp->attrs.gid_tbl_len;
+ props->port_cap_flags =
+ pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
+ props->max_msg_sz = resp->attrs.max_msg_sz;
+ props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
+ props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
+ props->pkey_tbl_len = resp->attrs.pkey_tbl_len;
+ props->lid = resp->attrs.lid;
+ props->sm_lid = resp->attrs.sm_lid;
+ props->lmc = resp->attrs.lmc;
+ props->max_vl_num = resp->attrs.max_vl_num;
+ props->sm_sl = resp->attrs.sm_sl;
+ props->subnet_timeout = resp->attrs.subnet_timeout;
+ props->init_type_reply = resp->attrs.init_type_reply;
+ props->active_width = pvrdma_port_width_to_ib(resp->attrs.active_width);
+ props->active_speed = pvrdma_port_speed_to_ib(resp->attrs.active_speed);
+ props->phys_state = resp->attrs.phys_state;
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_gid - query device gid
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @gid: the device gid value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (index >= dev->dsr->caps.gid_tbl_len)
+ return -EINVAL;
+
+ memcpy(gid, &dev->sgid_tbl[index], sizeof(union ib_gid));
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_pkey - query device port's P_Key table
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @pkey: the device P_Key value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ int err = 0;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_pkey *cmd = &req.query_pkey;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_PKEY;
+ cmd->port_num = port;
+ cmd->index = index;
+
+ err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp,
+ PVRDMA_CMD_QUERY_PKEY_RESP);
+ if (err < 0) {
+ dev_warn(&to_vdev(ibdev)->pdev->dev,
+ "could not query pkey, error: %d\n", err);
+ return err;
+ }
+
+ *pkey = rsp.query_pkey_resp.pkey;
+
+ return 0;
+}
+
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+ u32 port)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+/**
+ * pvrdma_modify_port - modify device port attributes
+ * @ibdev: the device to modify
+ * @port: the port number
+ * @mask: attributes to modify
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_modify_port(struct ib_device *ibdev, u32 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct ib_port_attr attr;
+ struct pvrdma_dev *vdev = to_vdev(ibdev);
+ int ret;
+
+ if (mask & ~IB_PORT_SHUTDOWN) {
+ dev_warn(&vdev->pdev->dev,
+ "unsupported port modify mask %#x\n", mask);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&vdev->port_mutex);
+ ret = ib_query_port(ibdev, port, &attr);
+ if (ret)
+ goto out;
+
+ vdev->port_cap_mask |= props->set_port_cap_mask;
+ vdev->port_cap_mask &= ~props->clr_port_cap_mask;
+
+ if (mask & IB_PORT_SHUTDOWN)
+ vdev->ib_active = false;
+
+out:
+ mutex_unlock(&vdev->port_mutex);
+ return ret;
+}
+
+/**
+ * pvrdma_alloc_ucontext - allocate ucontext
+ * @uctx: the uverbs countext
+ * @udata: user data
+ *
+ * @return: zero on success, otherwise errno.
+ */
+int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = uctx->device;
+ struct pvrdma_dev *vdev = to_vdev(ibdev);
+ struct pvrdma_ucontext *context = to_vucontext(uctx);
+ union pvrdma_cmd_req req = {};
+ union pvrdma_cmd_resp rsp = {};
+ struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
+ struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
+ struct pvrdma_alloc_ucontext_resp uresp = {};
+ int ret;
+
+ if (!vdev->ib_active)
+ return -EAGAIN;
+
+ context->dev = vdev;
+ ret = pvrdma_uar_alloc(vdev, &context->uar);
+ if (ret)
+ return -ENOMEM;
+
+ /* get ctx_handle from host */
+ if (vdev->dsr_version < PVRDMA_PPN64_VERSION)
+ cmd->pfn = context->uar.pfn;
+ else
+ cmd->pfn64 = context->uar.pfn;
+
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC;
+ ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP);
+ if (ret < 0) {
+ dev_warn(&vdev->pdev->dev,
+ "could not create ucontext, error: %d\n", ret);
+ goto err;
+ }
+
+ context->ctx_handle = resp->ctx_handle;
+
+ /* copy back to user */
+ uresp.qp_tab_size = vdev->dsr->caps.max_qp;
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret) {
+ pvrdma_uar_free(vdev, &context->uar);
+ pvrdma_dealloc_ucontext(&context->ibucontext);
+ return -EFAULT;
+ }
+
+ return 0;
+
+err:
+ pvrdma_uar_free(vdev, &context->uar);
+ return ret;
+}
+
+/**
+ * pvrdma_dealloc_ucontext - deallocate ucontext
+ * @ibcontext: the ucontext
+ */
+void pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+ union pvrdma_cmd_req req = {};
+ struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc;
+ int ret;
+
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC;
+ cmd->ctx_handle = context->ctx_handle;
+
+ ret = pvrdma_cmd_post(context->dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&context->dev->pdev->dev,
+ "destroy ucontext failed, error: %d\n", ret);
+
+ /* Free the UAR even if the device command failed */
+ pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar);
+}
+
+/**
+ * pvrdma_mmap - create mmap region
+ * @ibcontext: the user context
+ * @vma: the VMA
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+ struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ dev_dbg(&context->dev->pdev->dev, "create mmap region\n");
+
+ if ((size != PAGE_SIZE) || (offset & ~PAGE_MASK)) {
+ dev_warn(&context->dev->pdev->dev,
+ "invalid params for mmap region\n");
+ return -EINVAL;
+ }
+
+ /* Map UAR to kernel space, VM_LOCKED? */
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * pvrdma_alloc_pd - allocate protection domain
+ * @ibpd: PD pointer
+ * @udata: user data
+ *
+ * @return: the ib_pd protection domain pointer on success, otherwise errno.
+ */
+int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibpd->device;
+ struct pvrdma_pd *pd = to_vpd(ibpd);
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ union pvrdma_cmd_req req = {};
+ union pvrdma_cmd_resp rsp = {};
+ struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
+ struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
+ struct pvrdma_alloc_pd_resp pd_resp = {0};
+ int ret;
+ struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct pvrdma_ucontext, ibucontext);
+
+ /* Check allowed max pds */
+ if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
+ return -ENOMEM;
+
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
+ cmd->ctx_handle = context ? context->ctx_handle : 0;
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "failed to allocate protection domain, error: %d\n",
+ ret);
+ goto err;
+ }
+
+ pd->privileged = !udata;
+ pd->pd_handle = resp->pd_handle;
+ pd->pdn = resp->pd_handle;
+ pd_resp.pdn = resp->pd_handle;
+
+ if (udata) {
+ if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back protection domain\n");
+ pvrdma_dealloc_pd(&pd->ibpd, udata);
+ return -EFAULT;
+ }
+ }
+
+ /* u32 pd handle */
+ return 0;
+
+err:
+ atomic_dec(&dev->num_pds);
+ return ret;
+}
+
+/**
+ * pvrdma_dealloc_pd - deallocate protection domain
+ * @pd: the protection domain to be released
+ * @udata: user data or null for kernel object
+ *
+ * @return: Always 0
+ */
+int pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ union pvrdma_cmd_req req = {};
+ struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd;
+ int ret;
+
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret)
+ dev_warn(&dev->pdev->dev,
+ "could not dealloc protection domain, error: %d\n",
+ ret);
+
+ atomic_dec(&dev->num_pds);
+ return 0;
+}
+
+/**
+ * pvrdma_create_ah - create an address handle
+ * @ibah: the IB address handle
+ * @init_attr: the attributes of the AH
+ * @udata: pointer to user data
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ struct pvrdma_dev *dev = to_vdev(ibah->device);
+ struct pvrdma_ah *ah = to_vah(ibah);
+ const struct ib_global_route *grh;
+ u32 port_num = rdma_ah_get_port_num(ah_attr);
+
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+
+ grh = rdma_ah_read_grh(ah_attr);
+ if ((ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
+ rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw))
+ return -EINVAL;
+
+ if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
+ return -ENOMEM;
+
+ ah->av.port_pd = to_vpd(ibah->pd)->pd_handle | (port_num << 24);
+ ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr);
+ ah->av.src_path_bits |= 0x80;
+ ah->av.gid_index = grh->sgid_index;
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.sl_tclass_flowlabel = (grh->traffic_class << 20) |
+ grh->flow_label;
+ memcpy(ah->av.dgid, grh->dgid.raw, 16);
+ memcpy(ah->av.dmac, ah_attr->roce.dmac, ETH_ALEN);
+
+ return 0;
+}
+
+/**
+ * pvrdma_destroy_ah - destroy an address handle
+ * @ah: the address handle to destroyed
+ * @flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
+ *
+ */
+int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ struct pvrdma_dev *dev = to_vdev(ah->device);
+
+ atomic_dec(&dev->num_ahs);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
new file mode 100644
index 000000000000..603e5a9311eb
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_VERBS_H__
+#define __PVRDMA_VERBS_H__
+
+#include <linux/types.h>
+
+union pvrdma_gid {
+ u8 raw[16];
+ struct {
+ __be64 subnet_prefix;
+ __be64 interface_id;
+ } global;
+};
+
+enum pvrdma_link_layer {
+ PVRDMA_LINK_LAYER_UNSPECIFIED,
+ PVRDMA_LINK_LAYER_INFINIBAND,
+ PVRDMA_LINK_LAYER_ETHERNET,
+};
+
+enum pvrdma_mtu {
+ PVRDMA_MTU_256 = 1,
+ PVRDMA_MTU_512 = 2,
+ PVRDMA_MTU_1024 = 3,
+ PVRDMA_MTU_2048 = 4,
+ PVRDMA_MTU_4096 = 5,
+};
+
+enum pvrdma_port_state {
+ PVRDMA_PORT_NOP = 0,
+ PVRDMA_PORT_DOWN = 1,
+ PVRDMA_PORT_INIT = 2,
+ PVRDMA_PORT_ARMED = 3,
+ PVRDMA_PORT_ACTIVE = 4,
+ PVRDMA_PORT_ACTIVE_DEFER = 5,
+};
+
+enum pvrdma_port_cap_flags {
+ PVRDMA_PORT_SM = 1 << 1,
+ PVRDMA_PORT_NOTICE_SUP = 1 << 2,
+ PVRDMA_PORT_TRAP_SUP = 1 << 3,
+ PVRDMA_PORT_OPT_IPD_SUP = 1 << 4,
+ PVRDMA_PORT_AUTO_MIGR_SUP = 1 << 5,
+ PVRDMA_PORT_SL_MAP_SUP = 1 << 6,
+ PVRDMA_PORT_MKEY_NVRAM = 1 << 7,
+ PVRDMA_PORT_PKEY_NVRAM = 1 << 8,
+ PVRDMA_PORT_LED_INFO_SUP = 1 << 9,
+ PVRDMA_PORT_SM_DISABLED = 1 << 10,
+ PVRDMA_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
+ PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
+ PVRDMA_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
+ PVRDMA_PORT_CM_SUP = 1 << 16,
+ PVRDMA_PORT_SNMP_TUNNEL_SUP = 1 << 17,
+ PVRDMA_PORT_REINIT_SUP = 1 << 18,
+ PVRDMA_PORT_DEVICE_MGMT_SUP = 1 << 19,
+ PVRDMA_PORT_VENDOR_CLASS_SUP = 1 << 20,
+ PVRDMA_PORT_DR_NOTICE_SUP = 1 << 21,
+ PVRDMA_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
+ PVRDMA_PORT_BOOT_MGMT_SUP = 1 << 23,
+ PVRDMA_PORT_LINK_LATENCY_SUP = 1 << 24,
+ PVRDMA_PORT_CLIENT_REG_SUP = 1 << 25,
+ PVRDMA_PORT_IP_BASED_GIDS = 1 << 26,
+ PVRDMA_PORT_CAP_FLAGS_MAX = PVRDMA_PORT_IP_BASED_GIDS,
+};
+
+enum pvrdma_port_width {
+ PVRDMA_WIDTH_1X = 1,
+ PVRDMA_WIDTH_4X = 2,
+ PVRDMA_WIDTH_8X = 4,
+ PVRDMA_WIDTH_12X = 8,
+};
+
+enum pvrdma_port_speed {
+ PVRDMA_SPEED_SDR = 1,
+ PVRDMA_SPEED_DDR = 2,
+ PVRDMA_SPEED_QDR = 4,
+ PVRDMA_SPEED_FDR10 = 8,
+ PVRDMA_SPEED_FDR = 16,
+ PVRDMA_SPEED_EDR = 32,
+};
+
+struct pvrdma_port_attr {
+ enum pvrdma_port_state state;
+ enum pvrdma_mtu max_mtu;
+ enum pvrdma_mtu active_mtu;
+ u32 gid_tbl_len;
+ u32 port_cap_flags;
+ u32 max_msg_sz;
+ u32 bad_pkey_cntr;
+ u32 qkey_viol_cntr;
+ u16 pkey_tbl_len;
+ u16 lid;
+ u16 sm_lid;
+ u8 lmc;
+ u8 max_vl_num;
+ u8 sm_sl;
+ u8 subnet_timeout;
+ u8 init_type_reply;
+ u8 active_width;
+ u8 active_speed;
+ u8 phys_state;
+ u8 reserved[2];
+};
+
+struct pvrdma_global_route {
+ union pvrdma_gid dgid;
+ u32 flow_label;
+ u8 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
+ u8 reserved;
+};
+
+struct pvrdma_grh {
+ __be32 version_tclass_flow;
+ __be16 paylen;
+ u8 next_hdr;
+ u8 hop_limit;
+ union pvrdma_gid sgid;
+ union pvrdma_gid dgid;
+};
+
+enum pvrdma_ah_flags {
+ PVRDMA_AH_GRH = 1,
+};
+
+enum pvrdma_rate {
+ PVRDMA_RATE_PORT_CURRENT = 0,
+ PVRDMA_RATE_2_5_GBPS = 2,
+ PVRDMA_RATE_5_GBPS = 5,
+ PVRDMA_RATE_10_GBPS = 3,
+ PVRDMA_RATE_20_GBPS = 6,
+ PVRDMA_RATE_30_GBPS = 4,
+ PVRDMA_RATE_40_GBPS = 7,
+ PVRDMA_RATE_60_GBPS = 8,
+ PVRDMA_RATE_80_GBPS = 9,
+ PVRDMA_RATE_120_GBPS = 10,
+ PVRDMA_RATE_14_GBPS = 11,
+ PVRDMA_RATE_56_GBPS = 12,
+ PVRDMA_RATE_112_GBPS = 13,
+ PVRDMA_RATE_168_GBPS = 14,
+ PVRDMA_RATE_25_GBPS = 15,
+ PVRDMA_RATE_100_GBPS = 16,
+ PVRDMA_RATE_200_GBPS = 17,
+ PVRDMA_RATE_300_GBPS = 18,
+};
+
+struct pvrdma_ah_attr {
+ struct pvrdma_global_route grh;
+ u16 dlid;
+ u16 vlan_id;
+ u8 sl;
+ u8 src_path_bits;
+ u8 static_rate;
+ u8 ah_flags;
+ u8 port_num;
+ u8 dmac[6];
+ u8 reserved;
+};
+
+enum pvrdma_cq_notify_flags {
+ PVRDMA_CQ_SOLICITED = 1 << 0,
+ PVRDMA_CQ_NEXT_COMP = 1 << 1,
+ PVRDMA_CQ_SOLICITED_MASK = PVRDMA_CQ_SOLICITED |
+ PVRDMA_CQ_NEXT_COMP,
+ PVRDMA_CQ_REPORT_MISSED_EVENTS = 1 << 2,
+};
+
+struct pvrdma_qp_cap {
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+ u32 reserved;
+};
+
+enum pvrdma_sig_type {
+ PVRDMA_SIGNAL_ALL_WR,
+ PVRDMA_SIGNAL_REQ_WR,
+};
+
+enum pvrdma_qp_type {
+ PVRDMA_QPT_SMI,
+ PVRDMA_QPT_GSI,
+ PVRDMA_QPT_RC,
+ PVRDMA_QPT_UC,
+ PVRDMA_QPT_UD,
+ PVRDMA_QPT_RAW_IPV6,
+ PVRDMA_QPT_RAW_ETHERTYPE,
+ PVRDMA_QPT_RAW_PACKET = 8,
+ PVRDMA_QPT_XRC_INI = 9,
+ PVRDMA_QPT_XRC_TGT,
+ PVRDMA_QPT_MAX,
+};
+
+enum pvrdma_qp_create_flags {
+ PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO = 1 << 0,
+ PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+};
+
+enum pvrdma_qp_attr_mask {
+ PVRDMA_QP_STATE = 1 << 0,
+ PVRDMA_QP_CUR_STATE = 1 << 1,
+ PVRDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
+ PVRDMA_QP_ACCESS_FLAGS = 1 << 3,
+ PVRDMA_QP_PKEY_INDEX = 1 << 4,
+ PVRDMA_QP_PORT = 1 << 5,
+ PVRDMA_QP_QKEY = 1 << 6,
+ PVRDMA_QP_AV = 1 << 7,
+ PVRDMA_QP_PATH_MTU = 1 << 8,
+ PVRDMA_QP_TIMEOUT = 1 << 9,
+ PVRDMA_QP_RETRY_CNT = 1 << 10,
+ PVRDMA_QP_RNR_RETRY = 1 << 11,
+ PVRDMA_QP_RQ_PSN = 1 << 12,
+ PVRDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13,
+ PVRDMA_QP_ALT_PATH = 1 << 14,
+ PVRDMA_QP_MIN_RNR_TIMER = 1 << 15,
+ PVRDMA_QP_SQ_PSN = 1 << 16,
+ PVRDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
+ PVRDMA_QP_PATH_MIG_STATE = 1 << 18,
+ PVRDMA_QP_CAP = 1 << 19,
+ PVRDMA_QP_DEST_QPN = 1 << 20,
+ PVRDMA_QP_ATTR_MASK_MAX = PVRDMA_QP_DEST_QPN,
+};
+
+enum pvrdma_qp_state {
+ PVRDMA_QPS_RESET,
+ PVRDMA_QPS_INIT,
+ PVRDMA_QPS_RTR,
+ PVRDMA_QPS_RTS,
+ PVRDMA_QPS_SQD,
+ PVRDMA_QPS_SQE,
+ PVRDMA_QPS_ERR,
+};
+
+enum pvrdma_mig_state {
+ PVRDMA_MIG_MIGRATED,
+ PVRDMA_MIG_REARM,
+ PVRDMA_MIG_ARMED,
+};
+
+enum pvrdma_mw_type {
+ PVRDMA_MW_TYPE_1 = 1,
+ PVRDMA_MW_TYPE_2 = 2,
+};
+
+struct pvrdma_srq_attr {
+ u32 max_wr;
+ u32 max_sge;
+ u32 srq_limit;
+ u32 reserved;
+};
+
+struct pvrdma_qp_attr {
+ enum pvrdma_qp_state qp_state;
+ enum pvrdma_qp_state cur_qp_state;
+ enum pvrdma_mtu path_mtu;
+ enum pvrdma_mig_state path_mig_state;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 dest_qp_num;
+ u32 qp_access_flags;
+ u16 pkey_index;
+ u16 alt_pkey_index;
+ u8 en_sqd_async_notify;
+ u8 sq_draining;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ u8 min_rnr_timer;
+ u8 port_num;
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 alt_port_num;
+ u8 alt_timeout;
+ u8 reserved[5];
+ struct pvrdma_qp_cap cap;
+ struct pvrdma_ah_attr ah_attr;
+ struct pvrdma_ah_attr alt_ah_attr;
+};
+
+enum pvrdma_send_flags {
+ PVRDMA_SEND_FENCE = 1 << 0,
+ PVRDMA_SEND_SIGNALED = 1 << 1,
+ PVRDMA_SEND_SOLICITED = 1 << 2,
+ PVRDMA_SEND_INLINE = 1 << 3,
+ PVRDMA_SEND_IP_CSUM = 1 << 4,
+ PVRDMA_SEND_FLAGS_MAX = PVRDMA_SEND_IP_CSUM,
+};
+
+enum pvrdma_access_flags {
+ PVRDMA_ACCESS_LOCAL_WRITE = 1 << 0,
+ PVRDMA_ACCESS_REMOTE_WRITE = 1 << 1,
+ PVRDMA_ACCESS_REMOTE_READ = 1 << 2,
+ PVRDMA_ACCESS_REMOTE_ATOMIC = 1 << 3,
+ PVRDMA_ACCESS_MW_BIND = 1 << 4,
+ PVRDMA_ZERO_BASED = 1 << 5,
+ PVRDMA_ACCESS_ON_DEMAND = 1 << 6,
+ PVRDMA_ACCESS_FLAGS_MAX = PVRDMA_ACCESS_ON_DEMAND,
+};
+
+int pvrdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata);
+int pvrdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
+int pvrdma_query_gid(struct ib_device *ibdev, u32 port,
+ int index, union ib_gid *gid);
+int pvrdma_query_pkey(struct ib_device *ibdev, u32 port,
+ u16 index, u16 *pkey);
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+ u32 port);
+int pvrdma_modify_port(struct ib_device *ibdev, u32 port,
+ int mask, struct ib_port_modify *props);
+int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
+int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata);
+int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+
+int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
+int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+
+int pvrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
+int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+
+#endif /* __PVRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/sw/Makefile b/drivers/infiniband/sw/Makefile
new file mode 100644
index 000000000000..68e0230f8f31
--- /dev/null
+++ b/drivers/infiniband/sw/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt/
+obj-$(CONFIG_RDMA_RXE) += rxe/
+obj-$(CONFIG_RDMA_SIW) += siw/
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
new file mode 100644
index 000000000000..0df48b3a6b56
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_RDMAVT
+ tristate "RDMA verbs transport library"
+ depends on INFINIBAND_VIRT_DMA
+ depends on X86_64
+ depends on PCI
+ help
+ This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/Makefile b/drivers/infiniband/sw/rdmavt/Makefile
new file mode 100644
index 000000000000..b21962dafcc9
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# rdmavt driver
+#
+#
+#
+# Called from the kernel module build system.
+#
+obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o
+
+rdmavt-y := vt.o ah.o cq.o mad.o mcast.o mmap.o mr.o pd.o qp.o \
+ rc.o srq.o trace.o
+
+CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
new file mode 100644
index 000000000000..56926617b064
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 - 2019 Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include "ah.h"
+#include "vt.h" /* for prints */
+
+/**
+ * rvt_check_ah - validate the attributes of AH
+ * @ibdev: the ib device
+ * @ah_attr: the attributes of the AH
+ *
+ * If driver supports a more detailed check_ah function call back to it
+ * otherwise just check the basics.
+ *
+ * Return: 0 on success
+ */
+int rvt_check_ah(struct ib_device *ibdev,
+ struct rdma_ah_attr *ah_attr)
+{
+ int err;
+ int port_num = rdma_ah_get_port_num(ah_attr);
+ struct ib_port_attr port_attr;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ u8 ah_flags = rdma_ah_get_ah_flags(ah_attr);
+ u8 static_rate = rdma_ah_get_static_rate(ah_attr);
+
+ err = ib_query_port(ibdev, port_num, &port_attr);
+ if (err)
+ return -EINVAL;
+ if (port_num < 1 ||
+ port_num > ibdev->phys_port_cnt)
+ return -EINVAL;
+ if (static_rate != IB_RATE_PORT_CURRENT &&
+ ib_rate_to_mbps(static_rate) < 0)
+ return -EINVAL;
+ if ((ah_flags & IB_AH_GRH) &&
+ rdma_ah_read_grh(ah_attr)->sgid_index >= port_attr.gid_tbl_len)
+ return -EINVAL;
+ if (rdi->driver_f.check_ah)
+ return rdi->driver_f.check_ah(ibdev, ah_attr);
+ return 0;
+}
+EXPORT_SYMBOL(rvt_check_ah);
+
+/**
+ * rvt_create_ah - create an address handle
+ * @ibah: the IB address handle
+ * @init_attr: the attributes of the AH
+ * @udata: pointer to user's input output buffer information.
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success
+ */
+int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+ struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
+ unsigned long flags;
+
+ if (rvt_check_ah(ibah->device, init_attr->ah_attr))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ if (dev->n_ahs_allocated == dev->dparms.props.max_ah) {
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+ return -ENOMEM;
+ }
+
+ dev->n_ahs_allocated++;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
+
+ if (dev->driver_f.notify_new_ah)
+ dev->driver_f.notify_new_ah(ibah->device,
+ init_attr->ah_attr, ah);
+
+ return 0;
+}
+
+/**
+ * rvt_destroy_ah - Destroy an address handle
+ * @ibah: address handle
+ * @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
+ * Return: 0 on success
+ */
+int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ dev->n_ahs_allocated--;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ rdma_destroy_ah_attr(&ah->attr);
+ return 0;
+}
+
+/**
+ * rvt_modify_ah - modify an ah with given attrs
+ * @ibah: address handle to modify
+ * @ah_attr: attrs to apply
+ *
+ * Return: 0 on success
+ */
+int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+
+ if (rvt_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
+
+ ah->attr = *ah_attr;
+
+ return 0;
+}
+
+/**
+ * rvt_query_ah - return attrs for ah
+ * @ibah: address handle to query
+ * @ah_attr: return info in this
+ *
+ * Return: always 0
+ */
+int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
new file mode 100644
index 000000000000..50ddf802bdcc
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RVTAH_H
+#define DEF_RVTAH_H
+
+#include <rdma/rdma_vt.h>
+
+int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
+int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+
+#endif /* DEF_RVTAH_H */
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
new file mode 100644
index 000000000000..0ca2743f1075
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 - 2018 Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <rdma/uverbs_ioctl.h>
+#include "cq.h"
+#include "vt.h"
+#include "trace.h"
+
+static struct workqueue_struct *comp_vector_wq;
+
+/**
+ * rvt_cq_enter - add a new entry to the completion queue
+ * @cq: completion queue
+ * @entry: work completion entry to add
+ * @solicited: true if @entry is solicited
+ *
+ * This may be called with qp->s_lock held.
+ *
+ * Return: return true on success, else return
+ * false if cq is full.
+ */
+bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
+{
+ struct ib_uverbs_wc *uqueue = NULL;
+ struct ib_wc *kqueue = NULL;
+ struct rvt_cq_wc *u_wc = NULL;
+ struct rvt_k_cq_wc *k_wc = NULL;
+ unsigned long flags;
+ u32 head;
+ u32 next;
+ u32 tail;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ if (cq->ip) {
+ u_wc = cq->queue;
+ uqueue = &u_wc->uqueue[0];
+ head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
+ tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
+ } else {
+ k_wc = cq->kqueue;
+ kqueue = &k_wc->kqueue[0];
+ head = k_wc->head;
+ tail = k_wc->tail;
+ }
+
+ /*
+ * Note that the head pointer might be writable by
+ * user processes.Take care to verify it is a sane value.
+ */
+ if (head >= (unsigned)cq->ibcq.cqe) {
+ head = cq->ibcq.cqe;
+ next = 0;
+ } else {
+ next = head + 1;
+ }
+
+ if (unlikely(next == tail || cq->cq_full)) {
+ struct rvt_dev_info *rdi = cq->rdi;
+
+ if (!cq->cq_full)
+ rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
+ cq->cq_full = true;
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (cq->ibcq.event_handler) {
+ struct ib_event ev;
+
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+ return false;
+ }
+ trace_rvt_cq_enter(cq, entry, head);
+ if (uqueue) {
+ uqueue[head].wr_id = entry->wr_id;
+ uqueue[head].status = entry->status;
+ uqueue[head].opcode = entry->opcode;
+ uqueue[head].vendor_err = entry->vendor_err;
+ uqueue[head].byte_len = entry->byte_len;
+ uqueue[head].ex.imm_data = entry->ex.imm_data;
+ uqueue[head].qp_num = entry->qp->qp_num;
+ uqueue[head].src_qp = entry->src_qp;
+ uqueue[head].wc_flags = entry->wc_flags;
+ uqueue[head].pkey_index = entry->pkey_index;
+ uqueue[head].slid = ib_lid_cpu16(entry->slid);
+ uqueue[head].sl = entry->sl;
+ uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+ uqueue[head].port_num = entry->port_num;
+ /* Make sure entry is written before the head index. */
+ RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
+ } else {
+ kqueue[head] = *entry;
+ k_wc->head = next;
+ }
+
+ if (cq->notify == IB_CQ_NEXT_COMP ||
+ (cq->notify == IB_CQ_SOLICITED &&
+ (solicited || entry->status != IB_WC_SUCCESS))) {
+ /*
+ * This will cause send_complete() to be called in
+ * another thread.
+ */
+ cq->notify = RVT_CQ_NONE;
+ cq->triggered++;
+ queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
+ &cq->comptask);
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+ return true;
+}
+EXPORT_SYMBOL(rvt_cq_enter);
+
+static void send_complete(struct work_struct *work)
+{
+ struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
+
+ /*
+ * The completion handler will most likely rearm the notification
+ * and poll for all pending entries. If a new completion entry
+ * is added while we are in this routine, queue_work()
+ * won't call us again until we return so we check triggered to
+ * see if we need to call the handler again.
+ */
+ for (;;) {
+ u8 triggered = cq->triggered;
+
+ /*
+ * IPoIB connected mode assumes the callback is from a
+ * soft IRQ. We simulate this by blocking "bottom halves".
+ * See the implementation for ipoib_cm_handle_tx_wc(),
+ * netif_tx_lock_bh() and netif_tx_lock().
+ */
+ local_bh_disable();
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ local_bh_enable();
+
+ if (cq->triggered == triggered)
+ return;
+ }
+}
+
+/**
+ * rvt_create_cq - create a completion queue
+ * @ibcq: Allocated CQ
+ * @attr: creation attributes
+ * @attrs: uverbs bundle
+ *
+ * Called by ib_create_cq() in the generic verbs code.
+ *
+ * Return: 0 on success
+ */
+int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *ibdev = ibcq->device;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_cq_wc *u_wc = NULL;
+ struct rvt_k_cq_wc *k_wc = NULL;
+ u32 sz;
+ unsigned int entries = attr->cqe;
+ int comp_vector = attr->comp_vector;
+ int err;
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ if (entries < 1 || entries > rdi->dparms.props.max_cqe)
+ return -EINVAL;
+
+ if (comp_vector < 0)
+ comp_vector = 0;
+
+ comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
+
+ /*
+ * Allocate the completion queue entries and head/tail pointers.
+ * This is allocated separately so that it can be resized and
+ * also mapped into user space.
+ * We need to use vmalloc() in order to support mmap and large
+ * numbers of entries.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
+ sz += sizeof(*u_wc);
+ u_wc = vmalloc_user(sz);
+ if (!u_wc)
+ return -ENOMEM;
+ } else {
+ sz = sizeof(struct ib_wc) * (entries + 1);
+ sz += sizeof(*k_wc);
+ k_wc = vzalloc_node(sz, rdi->dparms.node);
+ if (!k_wc)
+ return -ENOMEM;
+ }
+
+ /*
+ * Return the address of the WC as the offset to mmap.
+ * See rvt_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
+ if (IS_ERR(cq->ip)) {
+ err = PTR_ERR(cq->ip);
+ goto bail_wc;
+ }
+
+ err = ib_copy_to_udata(udata, &cq->ip->offset,
+ sizeof(cq->ip->offset));
+ if (err)
+ goto bail_ip;
+ }
+
+ spin_lock_irq(&rdi->n_cqs_lock);
+ if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
+ spin_unlock_irq(&rdi->n_cqs_lock);
+ err = -ENOMEM;
+ goto bail_ip;
+ }
+
+ rdi->n_cqs_allocated++;
+ spin_unlock_irq(&rdi->n_cqs_lock);
+
+ if (cq->ip) {
+ spin_lock_irq(&rdi->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
+ }
+
+ /*
+ * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
+ * The number of entries should be >= the number requested or return
+ * an error.
+ */
+ cq->rdi = rdi;
+ if (rdi->driver_f.comp_vect_cpu_lookup)
+ cq->comp_vector_cpu =
+ rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
+ else
+ cq->comp_vector_cpu =
+ cpumask_first(cpumask_of_node(rdi->dparms.node));
+
+ cq->ibcq.cqe = entries;
+ cq->notify = RVT_CQ_NONE;
+ spin_lock_init(&cq->lock);
+ INIT_WORK(&cq->comptask, send_complete);
+ if (u_wc)
+ cq->queue = u_wc;
+ else
+ cq->kqueue = k_wc;
+
+ trace_rvt_create_cq(cq, attr);
+ return 0;
+
+bail_ip:
+ kfree(cq->ip);
+bail_wc:
+ vfree(u_wc);
+ vfree(k_wc);
+ return err;
+}
+
+/**
+ * rvt_destroy_cq - destroy a completion queue
+ * @ibcq: the completion queue to destroy.
+ * @udata: user data or NULL for kernel object
+ *
+ * Called by ib_destroy_cq() in the generic verbs code.
+ */
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_dev_info *rdi = cq->rdi;
+
+ flush_work(&cq->comptask);
+ spin_lock_irq(&rdi->n_cqs_lock);
+ rdi->n_cqs_allocated--;
+ spin_unlock_irq(&rdi->n_cqs_lock);
+ if (cq->ip)
+ kref_put(&cq->ip->ref, rvt_release_mmap_info);
+ else
+ vfree(cq->kqueue);
+ return 0;
+}
+
+/**
+ * rvt_req_notify_cq - change the notification type for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: the type of notification to request
+ *
+ * This may be called from interrupt context. Also called by
+ * ib_req_notify_cq() in the generic verbs code.
+ *
+ * Return: 0 for success.
+ */
+int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ /*
+ * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
+ * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
+ */
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
+
+ if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ if (cq->queue) {
+ if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
+ RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
+ ret = 1;
+ } else {
+ if (cq->kqueue->head != cq->kqueue->tail)
+ ret = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return ret;
+}
+
+/*
+ * rvt_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Return: 0 for success.
+ */
+int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ u32 head, tail, n;
+ int ret;
+ u32 sz;
+ struct rvt_dev_info *rdi = cq->rdi;
+ struct rvt_cq_wc *u_wc = NULL;
+ struct rvt_cq_wc *old_u_wc = NULL;
+ struct rvt_k_cq_wc *k_wc = NULL;
+ struct rvt_k_cq_wc *old_k_wc = NULL;
+
+ if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
+ return -EINVAL;
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ sz += sizeof(*u_wc);
+ u_wc = vmalloc_user(sz);
+ if (!u_wc)
+ return -ENOMEM;
+ } else {
+ sz = sizeof(struct ib_wc) * (cqe + 1);
+ sz += sizeof(*k_wc);
+ k_wc = vzalloc_node(sz, rdi->dparms.node);
+ if (!k_wc)
+ return -ENOMEM;
+ }
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ __u64 offset = 0;
+
+ ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&cq->lock);
+ /*
+ * Make sure head and tail are sane since they
+ * might be user writable.
+ */
+ if (u_wc) {
+ old_u_wc = cq->queue;
+ head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
+ tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
+ } else {
+ old_k_wc = cq->kqueue;
+ head = old_k_wc->head;
+ tail = old_k_wc->tail;
+ }
+
+ if (head > (u32)cq->ibcq.cqe)
+ head = (u32)cq->ibcq.cqe;
+ if (tail > (u32)cq->ibcq.cqe)
+ tail = (u32)cq->ibcq.cqe;
+ if (head < tail)
+ n = cq->ibcq.cqe + 1 + head - tail;
+ else
+ n = head - tail;
+ if (unlikely((u32)cqe < n)) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ for (n = 0; tail != head; n++) {
+ if (u_wc)
+ u_wc->uqueue[n] = old_u_wc->uqueue[tail];
+ else
+ k_wc->kqueue[n] = old_k_wc->kqueue[tail];
+ if (tail == (u32)cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ cq->ibcq.cqe = cqe;
+ if (u_wc) {
+ RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
+ RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
+ cq->queue = u_wc;
+ } else {
+ k_wc->head = n;
+ k_wc->tail = 0;
+ cq->kqueue = k_wc;
+ }
+ spin_unlock_irq(&cq->lock);
+
+ if (u_wc)
+ vfree(old_u_wc);
+ else
+ vfree(old_k_wc);
+
+ if (cq->ip) {
+ struct rvt_mmap_info *ip = cq->ip;
+
+ rvt_update_mmap_info(rdi, ip, sz, u_wc);
+
+ /*
+ * Return the offset to mmap.
+ * See rvt_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irq(&rdi->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
+ }
+
+ return 0;
+
+bail_unlock:
+ spin_unlock_irq(&cq->lock);
+bail_free:
+ vfree(u_wc);
+ vfree(k_wc);
+
+ return ret;
+}
+
+/**
+ * rvt_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ *
+ * Return: the number of completion entries polled.
+ */
+int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_k_cq_wc *wc;
+ unsigned long flags;
+ int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ wc = cq->kqueue;
+ tail = wc->tail;
+ if (tail > (u32)cq->ibcq.cqe)
+ tail = (u32)cq->ibcq.cqe;
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (tail == wc->head)
+ break;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ wc->tail = tail;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return npolled;
+}
+
+/**
+ * rvt_driver_cq_init - Init cq resources on behalf of driver
+ *
+ * Return: 0 on success
+ */
+int rvt_driver_cq_init(void)
+{
+ comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ 0, "rdmavt_cq");
+ if (!comp_vector_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * rvt_cq_exit - tear down cq reources
+ */
+void rvt_cq_exit(void)
+{
+ destroy_workqueue(comp_vector_wq);
+ comp_vector_wq = NULL;
+}
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
new file mode 100644
index 000000000000..4028702a7b2f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 - 2018 Intel Corporation.
+ */
+
+#ifndef DEF_RVTCQ_H
+#define DEF_RVTCQ_H
+
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_cq.h>
+
+int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+int rvt_driver_cq_init(void);
+void rvt_cq_exit(void);
+#endif /* DEF_RVTCQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
new file mode 100644
index 000000000000..846e014ecc55
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#include <rdma/ib_mad.h>
+#include "mad.h"
+#include "vt.h"
+
+/**
+ * rvt_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port_num: the port number this packet came in on, 1 based from ib core
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in: the incoming MAD
+ * @in_mad_size: size of the incoming MAD reply
+ * @out: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: unused
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ *
+ * Return: IB_MAD_RESULT_SUCCESS or error
+ */
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ /*
+ * MAD processing is quite different between hfi1 and qib. Therefore
+ * this is expected to be provided by the driver. Other drivers in the
+ * future may choose to implement this but it should not be made into a
+ * requirement.
+ */
+ return IB_MAD_RESULT_FAILURE;
+}
+
+static void rvt_send_mad_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+/**
+ * rvt_create_mad_agents - create mad agents
+ * @rdi: rvt dev struct
+ *
+ * If driver needs to be notified of mad agent creation then call back
+ *
+ * Return 0 on success
+ */
+int rvt_create_mad_agents(struct rvt_dev_info *rdi)
+{
+ struct ib_mad_agent *agent;
+ struct rvt_ibport *rvp;
+ int p;
+ int ret;
+
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ agent = ib_register_mad_agent(&rdi->ibdev, p + 1,
+ IB_QPT_SMI,
+ NULL, 0, rvt_send_mad_handler,
+ NULL, NULL, 0);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+
+ rvp->send_agent = agent;
+
+ if (rdi->driver_f.notify_create_mad_agent)
+ rdi->driver_f.notify_create_mad_agent(rdi, p);
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ if (rvp->send_agent) {
+ agent = rvp->send_agent;
+ rvp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ if (rdi->driver_f.notify_free_mad_agent)
+ rdi->driver_f.notify_free_mad_agent(rdi, p);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * rvt_free_mad_agents - free up mad agents
+ * @rdi: rvt dev struct
+ *
+ * If driver needs notification of mad agent removal make the call back
+ */
+void rvt_free_mad_agents(struct rvt_dev_info *rdi)
+{
+ struct ib_mad_agent *agent;
+ struct rvt_ibport *rvp;
+ int p;
+
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ if (rvp->send_agent) {
+ agent = rvp->send_agent;
+ rvp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ if (rvp->sm_ah) {
+ rdma_destroy_ah(&rvp->sm_ah->ibah,
+ RDMA_DESTROY_AH_SLEEPABLE);
+ rvp->sm_ah = NULL;
+ }
+
+ if (rdi->driver_f.notify_free_mad_agent)
+ rdi->driver_f.notify_free_mad_agent(rdi, p);
+ }
+}
+
diff --git a/drivers/infiniband/sw/rdmavt/mad.h b/drivers/infiniband/sw/rdmavt/mad.h
new file mode 100644
index 000000000000..705a94537b55
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mad.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RVTMAD_H
+#define DEF_RVTMAD_H
+
+#include <rdma/rdma_vt.h>
+
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+int rvt_create_mad_agents(struct rvt_dev_info *rdi);
+void rvt_free_mad_agents(struct rvt_dev_info *rdi);
+#endif /* DEF_RVTMAD_H */
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
new file mode 100644
index 000000000000..59045bdce2a9
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/rculist.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
+
+#include "mcast.h"
+
+/**
+ * rvt_driver_mcast_init - init resources for multicast
+ * @rdi: rvt dev struct
+ *
+ * This is per device that registers with rdmavt
+ */
+void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
+{
+ /*
+ * Anything that needs setup for multicast on a per driver or per rdi
+ * basis should be done in here.
+ */
+ spin_lock_init(&rdi->n_mcast_grps_lock);
+}
+
+/**
+ * rvt_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * @qp: the QP to link
+ */
+static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
+{
+ struct rvt_mcast_qp *mqp;
+
+ mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
+ if (!mqp)
+ goto bail;
+
+ mqp->qp = qp;
+ rvt_get_qp(qp);
+
+bail:
+ return mqp;
+}
+
+static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
+{
+ struct rvt_qp *qp = mqp->qp;
+
+ /* Notify hfi1_destroy_qp() if it is waiting. */
+ rvt_put_qp(qp);
+
+ kfree(mqp);
+}
+
+/**
+ * rvt_mcast_alloc - allocate the multicast GID structure
+ * @mgid: the multicast GID
+ * @lid: the muilticast LID (host order)
+ *
+ * A list of QPs will be attached to this structure.
+ */
+static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid, u16 lid)
+{
+ struct rvt_mcast *mcast;
+
+ mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
+ if (!mcast)
+ goto bail;
+
+ mcast->mcast_addr.mgid = *mgid;
+ mcast->mcast_addr.lid = lid;
+
+ INIT_LIST_HEAD(&mcast->qp_list);
+ init_waitqueue_head(&mcast->wait);
+ atomic_set(&mcast->refcount, 0);
+
+bail:
+ return mcast;
+}
+
+static void rvt_mcast_free(struct rvt_mcast *mcast)
+{
+ struct rvt_mcast_qp *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
+ rvt_mcast_qp_free(p);
+
+ kfree(mcast);
+}
+
+/**
+ * rvt_mcast_find - search the global table for the given multicast GID/LID
+ * NOTE: It is valid to have 1 MLID with multiple MGIDs. It is not valid
+ * to have 1 MGID with multiple MLIDs.
+ * @ibp: the IB port structure
+ * @mgid: the multicast GID to search for
+ * @lid: the multicast LID portion of the multicast address (host order)
+ *
+ * The caller is responsible for decrementing the reference count if found.
+ *
+ * Return: NULL if not found.
+ */
+struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
+ u16 lid)
+{
+ struct rb_node *n;
+ unsigned long flags;
+ struct rvt_mcast *found = NULL;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ n = ibp->mcast_tree.rb_node;
+ while (n) {
+ int ret;
+ struct rvt_mcast *mcast;
+
+ mcast = rb_entry(n, struct rvt_mcast, rb_node);
+
+ ret = memcmp(mgid->raw, mcast->mcast_addr.mgid.raw,
+ sizeof(*mgid));
+ if (ret < 0) {
+ n = n->rb_left;
+ } else if (ret > 0) {
+ n = n->rb_right;
+ } else {
+ /* MGID/MLID must match */
+ if (mcast->mcast_addr.lid == lid) {
+ atomic_inc(&mcast->refcount);
+ found = mcast;
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ return found;
+}
+EXPORT_SYMBOL(rvt_mcast_find);
+
+/*
+ * rvt_mcast_add - insert mcast GID into table and attach QP struct
+ * @mcast: the mcast GID table
+ * @mqp: the QP to attach
+ *
+ * Return: zero if both were added. Return EEXIST if the GID was already in
+ * the table but the QP was added. Return ESRCH if the QP was already
+ * attached and neither structure was added. Return EINVAL if the MGID was
+ * found, but the MLID did NOT match.
+ */
+static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
+ struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
+{
+ struct rb_node **n = &ibp->mcast_tree.rb_node;
+ struct rb_node *pn = NULL;
+ int ret;
+
+ spin_lock_irq(&ibp->lock);
+
+ while (*n) {
+ struct rvt_mcast *tmcast;
+ struct rvt_mcast_qp *p;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
+
+ ret = memcmp(mcast->mcast_addr.mgid.raw,
+ tmcast->mcast_addr.mgid.raw,
+ sizeof(mcast->mcast_addr.mgid));
+ if (ret < 0) {
+ n = &pn->rb_left;
+ continue;
+ }
+ if (ret > 0) {
+ n = &pn->rb_right;
+ continue;
+ }
+
+ if (tmcast->mcast_addr.lid != mcast->mcast_addr.lid) {
+ ret = EINVAL;
+ goto bail;
+ }
+
+ /* Search the QP list to see if this is already there. */
+ list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
+ if (p->qp == mqp->qp) {
+ ret = ESRCH;
+ goto bail;
+ }
+ }
+ if (tmcast->n_attached ==
+ rdi->dparms.props.max_mcast_qp_attach) {
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ tmcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
+ ret = EEXIST;
+ goto bail;
+ }
+
+ spin_lock(&rdi->n_mcast_grps_lock);
+ if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
+ spin_unlock(&rdi->n_mcast_grps_lock);
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ rdi->n_mcast_grps_allocated++;
+ spin_unlock(&rdi->n_mcast_grps_lock);
+
+ mcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &mcast->qp_list);
+
+ atomic_inc(&mcast->refcount);
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
+
+ ret = 0;
+
+bail:
+ spin_unlock_irq(&ibp->lock);
+
+ return ret;
+}
+
+/**
+ * rvt_attach_mcast - attach a qp to a multicast group
+ * @ibqp: Infiniband qp
+ * @gid: multicast guid
+ * @lid: multicast lid
+ *
+ * Return: 0 on success
+ */
+int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
+ struct rvt_mcast *mcast;
+ struct rvt_mcast_qp *mqp;
+ int ret = -ENOMEM;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ return -EINVAL;
+
+ /*
+ * Allocate data structures since its better to do this outside of
+ * spin locks and it will most likely be needed.
+ */
+ mcast = rvt_mcast_alloc(gid, lid);
+ if (!mcast)
+ return -ENOMEM;
+
+ mqp = rvt_mcast_qp_alloc(qp);
+ if (!mqp)
+ goto bail_mcast;
+
+ switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: OK to attach the same QP twice. */
+ ret = 0;
+ goto bail_mqp;
+ case EEXIST: /* The mcast wasn't used */
+ ret = 0;
+ goto bail_mcast;
+ case ENOMEM:
+ /* Exceeded the maximum number of mcast groups. */
+ ret = -ENOMEM;
+ goto bail_mqp;
+ case EINVAL:
+ /* Invalid MGID/MLID pair */
+ ret = -EINVAL;
+ goto bail_mqp;
+ default:
+ break;
+ }
+
+ return 0;
+
+bail_mqp:
+ rvt_mcast_qp_free(mqp);
+
+bail_mcast:
+ rvt_mcast_free(mcast);
+
+ return ret;
+}
+
+/**
+ * rvt_detach_mcast - remove a qp from a multicast group
+ * @ibqp: Infiniband qp
+ * @gid: multicast guid
+ * @lid: multicast lid
+ *
+ * Return: 0 on success
+ */
+int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
+ struct rvt_mcast *mcast = NULL;
+ struct rvt_mcast_qp *p, *tmp, *delp = NULL;
+ struct rb_node *n;
+ int last = 0;
+ int ret = 0;
+
+ if (ibqp->qp_num <= 1)
+ return -EINVAL;
+
+ spin_lock_irq(&ibp->lock);
+
+ /* Find the GID in the mcast table. */
+ n = ibp->mcast_tree.rb_node;
+ while (1) {
+ if (!n) {
+ spin_unlock_irq(&ibp->lock);
+ return -EINVAL;
+ }
+
+ mcast = rb_entry(n, struct rvt_mcast, rb_node);
+ ret = memcmp(gid->raw, mcast->mcast_addr.mgid.raw,
+ sizeof(*gid));
+ if (ret < 0) {
+ n = n->rb_left;
+ } else if (ret > 0) {
+ n = n->rb_right;
+ } else {
+ /* MGID/MLID must match */
+ if (mcast->mcast_addr.lid != lid) {
+ spin_unlock_irq(&ibp->lock);
+ return -EINVAL;
+ }
+ break;
+ }
+ }
+
+ /* Search the QP list. */
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
+ if (p->qp != qp)
+ continue;
+ /*
+ * We found it, so remove it, but don't poison the forward
+ * link until we are sure there are no list walkers.
+ */
+ list_del_rcu(&p->list);
+ mcast->n_attached--;
+ delp = p;
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+ rb_erase(&mcast->rb_node, &ibp->mcast_tree);
+ last = 1;
+ }
+ break;
+ }
+
+ spin_unlock_irq(&ibp->lock);
+ /* QP not attached */
+ if (!delp)
+ return -EINVAL;
+
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ rvt_mcast_qp_free(delp);
+
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+ rvt_mcast_free(mcast);
+ spin_lock_irq(&rdi->n_mcast_grps_lock);
+ rdi->n_mcast_grps_allocated--;
+ spin_unlock_irq(&rdi->n_mcast_grps_lock);
+ }
+
+ return 0;
+}
+
+/**
+ * rvt_mcast_tree_empty - determine if any qps are attached to any mcast group
+ * @rdi: rvt dev struct
+ *
+ * Return: in use count
+ */
+int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
+{
+ int i;
+ int in_use = 0;
+
+ for (i = 0; i < rdi->dparms.nports; i++)
+ if (rdi->ports[i]->mcast_tree.rb_node)
+ in_use++;
+ return in_use;
+}
diff --git a/drivers/infiniband/sw/rdmavt/mcast.h b/drivers/infiniband/sw/rdmavt/mcast.h
new file mode 100644
index 000000000000..7627e0d49d09
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mcast.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RVTMCAST_H
+#define DEF_RVTMCAST_H
+
+#include <rdma/rdma_vt.h>
+
+void rvt_driver_mcast_init(struct rvt_dev_info *rdi);
+int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+int rvt_mcast_tree_empty(struct rvt_dev_info *rdi);
+
+#endif /* DEF_RVTMCAST_H */
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
new file mode 100644
index 000000000000..46e3b3e0643a
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <rdma/uverbs_ioctl.h>
+#include "mmap.h"
+
+/**
+ * rvt_mmap_init - init link list and lock for mem map
+ * @rdi: rvt dev struct
+ */
+void rvt_mmap_init(struct rvt_dev_info *rdi)
+{
+ INIT_LIST_HEAD(&rdi->pending_mmaps);
+ spin_lock_init(&rdi->pending_lock);
+ rdi->mmap_offset = PAGE_SIZE;
+ spin_lock_init(&rdi->mmap_offset_lock);
+}
+
+/**
+ * rvt_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct rvt_mmap_info
+ */
+void rvt_release_mmap_info(struct kref *ref)
+{
+ struct rvt_mmap_info *ip =
+ container_of(ref, struct rvt_mmap_info, ref);
+ struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
+
+ spin_lock_irq(&rdi->pending_lock);
+ list_del(&ip->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
+
+ vfree(ip->obj);
+ kfree(ip);
+}
+
+static void rvt_vma_open(struct vm_area_struct *vma)
+{
+ struct rvt_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void rvt_vma_close(struct vm_area_struct *vma)
+{
+ struct rvt_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, rvt_release_mmap_info);
+}
+
+static const struct vm_operations_struct rvt_vm_ops = {
+ .open = rvt_vma_open,
+ .close = rvt_vma_close,
+};
+
+/**
+ * rvt_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ *
+ * Return: zero if the mmap is OK. Otherwise, return an errno.
+ */
+int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct rvt_mmap_info *ip, *pp;
+ int ret = -EINVAL;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_irq(&rdi->pending_lock);
+ list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
+ pending_mmaps) {
+ /* Only the creator is allowed to mmap the object */
+ if (context != ip->context || (__u64)offset != ip->offset)
+ continue;
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->size)
+ break;
+
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret)
+ goto done;
+ vma->vm_ops = &rvt_vm_ops;
+ vma->vm_private_data = ip;
+ rvt_vma_open(vma);
+ goto done;
+ }
+ spin_unlock_irq(&rdi->pending_lock);
+done:
+ return ret;
+}
+
+/**
+ * rvt_create_mmap_info - allocate information for hfi1_mmap
+ * @rdi: rvt dev struct
+ * @size: size in bytes to map
+ * @udata: user data (must be valid!)
+ * @obj: opaque pointer to a cq, wq etc
+ *
+ * Return: rvt_mmap struct on success, ERR_PTR on failure
+ */
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
+ struct ib_udata *udata, void *obj)
+{
+ struct rvt_mmap_info *ip;
+
+ if (!udata)
+ return ERR_PTR(-EINVAL);
+
+ ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
+ if (!ip)
+ return ERR_PTR(-ENOMEM);
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&rdi->mmap_offset_lock);
+ if (rdi->mmap_offset == 0)
+ rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
+ ip->offset = rdi->mmap_offset;
+ rdi->mmap_offset += ALIGN(size, SHMLBA);
+ spin_unlock_irq(&rdi->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->size = size;
+ ip->context =
+ container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+ return ip;
+}
+
+/**
+ * rvt_update_mmap_info - update a mem map
+ * @rdi: rvt dev struct
+ * @ip: mmap info pointer
+ * @size: size to grow by
+ * @obj: opaque pointer to cq, wq, etc.
+ */
+void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
+ u32 size, void *obj)
+{
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&rdi->mmap_offset_lock);
+ if (rdi->mmap_offset == 0)
+ rdi->mmap_offset = PAGE_SIZE;
+ ip->offset = rdi->mmap_offset;
+ rdi->mmap_offset += size;
+ spin_unlock_irq(&rdi->mmap_offset_lock);
+
+ ip->size = size;
+ ip->obj = obj;
+}
diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h
new file mode 100644
index 000000000000..29aaca3e8b83
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mmap.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RDMAVTMMAP_H
+#define DEF_RDMAVTMMAP_H
+
+#include <rdma/rdma_vt.h>
+
+void rvt_mmap_init(struct rvt_dev_info *rdi);
+void rvt_release_mmap_info(struct kref *ref);
+int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
+ struct ib_udata *udata, void *obj);
+void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
+ u32 size, void *obj);
+
+#endif /* DEF_RDMAVTMMAP_H */
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
new file mode 100644
index 000000000000..86e482593a85
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -0,0 +1,922 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <rdma/ib_umem.h>
+#include <rdma/rdma_vt.h>
+#include "vt.h"
+#include "mr.h"
+#include "trace.h"
+
+/**
+ * rvt_driver_mr_init - Init MR resources per driver
+ * @rdi: rvt dev struct
+ *
+ * Do any intilization needed when a driver registers with rdmavt.
+ *
+ * Return: 0 on success or errno on failure
+ */
+int rvt_driver_mr_init(struct rvt_dev_info *rdi)
+{
+ unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
+ unsigned lk_tab_size;
+ int i;
+
+ /*
+ * The top hfi1_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ if (!lkey_table_size)
+ return -EINVAL;
+
+ spin_lock_init(&rdi->lkey_table.lock);
+
+ /* ensure generation is at least 4 bits */
+ if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
+ rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
+ lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
+ rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
+ lkey_table_size = rdi->dparms.lkey_table_size;
+ }
+ rdi->lkey_table.max = 1 << lkey_table_size;
+ rdi->lkey_table.shift = 32 - lkey_table_size;
+ lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
+ rdi->lkey_table.table = (struct rvt_mregion __rcu **)
+ vmalloc_node(lk_tab_size, rdi->dparms.node);
+ if (!rdi->lkey_table.table)
+ return -ENOMEM;
+
+ RCU_INIT_POINTER(rdi->dma_mr, NULL);
+ for (i = 0; i < rdi->lkey_table.max; i++)
+ RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
+
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ return 0;
+}
+
+/**
+ * rvt_mr_exit - clean up MR
+ * @rdi: rvt dev structure
+ *
+ * called when drivers have unregistered or perhaps failed to register with us
+ */
+void rvt_mr_exit(struct rvt_dev_info *rdi)
+{
+ if (rdi->dma_mr)
+ rvt_pr_err(rdi, "DMA MR not null!\n");
+
+ vfree(rdi->lkey_table.table);
+}
+
+static void rvt_deinit_mregion(struct rvt_mregion *mr)
+{
+ int i = mr->mapsz;
+
+ mr->mapsz = 0;
+ while (i)
+ kfree(mr->map[--i]);
+ percpu_ref_exit(&mr->refcount);
+}
+
+static void __rvt_mregion_complete(struct percpu_ref *ref)
+{
+ struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
+ refcount);
+
+ complete(&mr->comp);
+}
+
+static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
+ int count, unsigned int percpu_flags)
+{
+ int m, i = 0;
+ struct rvt_dev_info *dev = ib_to_rvt(pd->device);
+
+ mr->mapsz = 0;
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ for (; i < m; i++) {
+ mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
+ dev->dparms.node);
+ if (!mr->map[i])
+ goto bail;
+ mr->mapsz++;
+ }
+ init_completion(&mr->comp);
+ /* count returning the ptr to user */
+ if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
+ percpu_flags, GFP_KERNEL))
+ goto bail;
+
+ atomic_set(&mr->lkey_invalid, 0);
+ mr->pd = pd;
+ mr->max_segs = count;
+ return 0;
+bail:
+ rvt_deinit_mregion(mr);
+ return -ENOMEM;
+}
+
+/**
+ * rvt_alloc_lkey - allocate an lkey
+ * @mr: memory region that this lkey protects
+ * @dma_region: 0->normal key, 1->restricted DMA key
+ *
+ * Returns 0 if successful, otherwise returns -errno.
+ *
+ * Increments mr reference count as required.
+ *
+ * Sets the lkey field mr for non-dma regions.
+ *
+ */
+static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret = 0;
+ struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+
+ rvt_get_mr(mr);
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* special case for dma_mr lkey == 0 */
+ if (dma_region) {
+ struct rvt_mregion *tmr;
+
+ tmr = rcu_access_pointer(dev->dma_mr);
+ if (!tmr) {
+ mr->lkey_published = 1;
+ /* Insure published written first */
+ rcu_assign_pointer(dev->dma_mr, mr);
+ rvt_get_mr(mr);
+ }
+ goto success;
+ }
+
+ /* Find the next available LKEY */
+ r = rkt->next;
+ n = r;
+ for (;;) {
+ if (!rcu_access_pointer(rkt->table[r]))
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n)
+ goto bail;
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ /*
+ * bits are capped to ensure enough bits for generation number
+ */
+ mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
+ ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ mr->lkey_published = 1;
+ /* Insure published written first */
+ rcu_assign_pointer(rkt->table[r], mr);
+success:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+out:
+ return ret;
+bail:
+ rvt_put_mr(mr);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = -ENOMEM;
+ goto out;
+}
+
+/**
+ * rvt_free_lkey - free an lkey
+ * @mr: mr to free from tables
+ */
+static void rvt_free_lkey(struct rvt_mregion *mr)
+{
+ unsigned long flags;
+ u32 lkey = mr->lkey;
+ u32 r;
+ struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ int freed = 0;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (!lkey) {
+ if (mr->lkey_published) {
+ mr->lkey_published = 0;
+ /* insure published is written before pointer */
+ rcu_assign_pointer(dev->dma_mr, NULL);
+ rvt_put_mr(mr);
+ }
+ } else {
+ if (!mr->lkey_published)
+ goto out;
+ r = lkey >> (32 - dev->dparms.lkey_table_size);
+ mr->lkey_published = 0;
+ /* insure published is written before pointer */
+ rcu_assign_pointer(rkt->table[r], NULL);
+ }
+ freed++;
+out:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ if (freed)
+ percpu_ref_kill(&mr->refcount);
+}
+
+static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
+{
+ struct rvt_mr *mr;
+ int rval = -ENOMEM;
+ int m;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
+ if (!mr)
+ goto bail;
+
+ rval = rvt_init_mregion(&mr->mr, pd, count, 0);
+ if (rval)
+ goto bail;
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ rval = rvt_alloc_lkey(&mr->mr, 0);
+ if (rval)
+ goto bail_mregion;
+ mr->ibmr.lkey = mr->mr.lkey;
+ mr->ibmr.rkey = mr->mr.lkey;
+done:
+ return mr;
+
+bail_mregion:
+ rvt_deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ mr = ERR_PTR(rval);
+ goto done;
+}
+
+static void __rvt_free_mr(struct rvt_mr *mr)
+{
+ rvt_free_lkey(&mr->mr);
+ rvt_deinit_mregion(&mr->mr);
+ kfree(mr);
+}
+
+/**
+ * rvt_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct rvt_mr *mr;
+ struct ib_mr *ret;
+ int rval;
+
+ if (ibpd_to_rvtpd(pd)->user)
+ return ERR_PTR(-EPERM);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail;
+ }
+
+ rval = rvt_alloc_lkey(&mr->mr, 1);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail_mregion;
+ }
+
+ mr->mr.access_flags = acc;
+ ret = &mr->ibmr;
+done:
+ return ret;
+
+bail_mregion:
+ rvt_deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ goto done;
+}
+
+/**
+ * rvt_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @virt_addr: associated virtual address
+ * @mr_access_flags: access flags for this memory region
+ * @dmah: dma handle
+ * @udata: unused by the driver
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct rvt_mr *mr;
+ struct ib_umem *umem;
+ struct sg_page_iter sg_iter;
+ int n, m;
+ struct ib_mr *ret;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (length == 0)
+ return ERR_PTR(-EINVAL);
+
+ umem = ib_umem_get(pd->device, start, length, mr_access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+
+ n = ib_umem_num_pages(umem);
+
+ mr = __rvt_alloc_mr(n, pd);
+ if (IS_ERR(mr)) {
+ ret = ERR_CAST(mr);
+ goto bail_umem;
+ }
+
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = ib_umem_offset(umem);
+ mr->mr.access_flags = mr_access_flags;
+ mr->umem = umem;
+
+ mr->mr.page_shift = PAGE_SHIFT;
+ m = 0;
+ n = 0;
+ for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page_iter_page(&sg_iter));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail_inval;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = PAGE_SIZE;
+ trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
+ if (++n == RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ return &mr->ibmr;
+
+bail_inval:
+ __rvt_free_mr(mr);
+
+bail_umem:
+ ib_umem_release(umem);
+
+ return ret;
+}
+
+/**
+ * rvt_dereg_clean_qp_cb - callback from iterator
+ * @qp: the qp
+ * @v: the mregion (as u64)
+ *
+ * This routine fields the callback for all QPs and
+ * for QPs in the same PD as the MR will call the
+ * rvt_qp_mr_clean() to potentially cleanup references.
+ */
+static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
+{
+ struct rvt_mregion *mr = (struct rvt_mregion *)v;
+
+ /* skip PDs that are not ours */
+ if (mr->pd != qp->ibqp.pd)
+ return;
+ rvt_qp_mr_clean(qp, mr->lkey);
+}
+
+/**
+ * rvt_dereg_clean_qps - find QPs for reference cleanup
+ * @mr: the MR that is being deregistered
+ *
+ * This routine iterates RC QPs looking for references
+ * to the lkey noted in mr.
+ */
+static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
+
+ rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
+}
+
+/**
+ * rvt_check_refs - check references
+ * @mr: the megion
+ * @t: the caller identification
+ *
+ * This routine checks MRs holding a reference during
+ * when being de-registered.
+ *
+ * If the count is non-zero, the code calls a clean routine then
+ * waits for the timeout for the count to zero.
+ */
+static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
+{
+ unsigned long timeout;
+ struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
+
+ if (mr->lkey) {
+ /* avoid dma mr */
+ rvt_dereg_clean_qps(mr);
+ /* @mr was indexed on rcu protected @lkey_table */
+ synchronize_rcu();
+ }
+
+ timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
+ if (!timeout) {
+ rvt_pr_err(rdi,
+ "%s timeout mr %p pd %p lkey %x refcount %ld\n",
+ t, mr, mr->pd, mr->lkey,
+ atomic_long_read(&mr->refcount.data->count));
+ rvt_get_mr(mr);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * rvt_mr_has_lkey - is MR
+ * @mr: the mregion
+ * @lkey: the lkey
+ */
+bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
+{
+ return mr && lkey == mr->lkey;
+}
+
+/**
+ * rvt_ss_has_lkey - is mr in sge tests
+ * @ss: the sge state
+ * @lkey: the lkey
+ *
+ * This code tests for an MR in the indicated
+ * sge state.
+ */
+bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
+{
+ int i;
+ bool rval = false;
+
+ if (!ss->num_sge)
+ return rval;
+ /* first one */
+ rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
+ /* any others */
+ for (i = 0; !rval && i < ss->num_sge - 1; i++)
+ rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
+ return rval;
+}
+
+/**
+ * rvt_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ * @udata: unused by the driver
+ *
+ * Note that this is called to free MRs created by rvt_get_dma_mr()
+ * or rvt_reg_user_mr().
+ *
+ * Returns 0 on success.
+ */
+int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+ int ret;
+
+ rvt_free_lkey(&mr->mr);
+
+ rvt_put_mr(&mr->mr); /* will set completion if last */
+ ret = rvt_check_refs(&mr->mr, __func__);
+ if (ret)
+ goto out;
+ rvt_deinit_mregion(&mr->mr);
+ ib_umem_release(mr->umem);
+ kfree(mr);
+out:
+ return ret;
+}
+
+/**
+ * rvt_alloc_mr - Allocate a memory region usable with the
+ * @pd: protection domain for this memory region
+ * @mr_type: mem region type
+ * @max_num_sg: Max number of segments allowed
+ *
+ * Return: the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct rvt_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __rvt_alloc_mr(max_num_sg, pd);
+ if (IS_ERR(mr))
+ return ERR_CAST(mr);
+
+ return &mr->ibmr;
+}
+
+/**
+ * rvt_set_page - page assignment function called by ib_sg_to_pages
+ * @ibmr: memory region
+ * @addr: dma address of mapped page
+ *
+ * Return: 0 on success
+ */
+static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+ u32 ps = 1 << mr->mr.page_shift;
+ u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
+ int m, n;
+
+ if (unlikely(mapped_segs == mr->mr.max_segs))
+ return -ENOMEM;
+
+ m = mapped_segs / RVT_SEGSZ;
+ n = mapped_segs % RVT_SEGSZ;
+ mr->mr.map[m]->segs[n].vaddr = (void *)addr;
+ mr->mr.map[m]->segs[n].length = ps;
+ mr->mr.length += ps;
+ trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
+
+ return 0;
+}
+
+/**
+ * rvt_map_mr_sg - map sg list and set it the memory region
+ * @ibmr: memory region
+ * @sg: dma mapped scatterlist
+ * @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
+ *
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
+ * Return: number of sg elements mapped to the memory region
+ */
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+ int ret;
+
+ mr->mr.length = 0;
+ mr->mr.page_shift = PAGE_SHIFT;
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+ mr->mr.user_base = ibmr->iova;
+ mr->mr.iova = ibmr->iova;
+ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+ mr->mr.length = (size_t)ibmr->length;
+ trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset);
+ return ret;
+}
+
+/**
+ * rvt_fast_reg_mr - fast register physical MR
+ * @qp: the queue pair where the work request comes from
+ * @ibmr: the memory region to be registered
+ * @key: updated key for this memory region
+ * @access: access flags for this memory region
+ *
+ * Returns 0 on success.
+ */
+int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
+ int access)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+
+ if (qp->ibqp.pd != mr->mr.pd)
+ return -EACCES;
+
+ /* not applicable to dma MR or user MR */
+ if (!mr->mr.lkey || mr->umem)
+ return -EINVAL;
+
+ if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
+ return -EINVAL;
+
+ ibmr->lkey = key;
+ ibmr->rkey = key;
+ mr->mr.lkey = key;
+ mr->mr.access_flags = access;
+ mr->mr.iova = ibmr->iova;
+ atomic_set(&mr->mr.lkey_invalid, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(rvt_fast_reg_mr);
+
+/**
+ * rvt_invalidate_rkey - invalidate an MR rkey
+ * @qp: queue pair associated with the invalidate op
+ * @rkey: rkey to invalidate
+ *
+ * Returns 0 on success.
+ */
+int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ struct rvt_mregion *mr;
+
+ if (rkey == 0)
+ return -EINVAL;
+
+ rcu_read_lock();
+ mr = rcu_dereference(
+ rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ atomic_set(&mr->lkey_invalid, 1);
+ rcu_read_unlock();
+ return 0;
+
+bail:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+EXPORT_SYMBOL(rvt_invalidate_rkey);
+
+/**
+ * rvt_sge_adjacent - is isge compressible
+ * @last_sge: last outgoing SGE written
+ * @sge: SGE to check
+ *
+ * If adjacent will update last_sge to add length.
+ *
+ * Return: true if isge is adjacent to last sge
+ */
+static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge,
+ struct ib_sge *sge)
+{
+ if (last_sge && sge->lkey == last_sge->mr->lkey &&
+ ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
+ if (sge->lkey) {
+ if (unlikely((sge->addr - last_sge->mr->user_base +
+ sge->length > last_sge->mr->length)))
+ return false; /* overrun, caller will catch */
+ } else {
+ last_sge->length += sge->length;
+ }
+ last_sge->sge_length += sge->length;
+ trace_rvt_sge_adjacent(last_sge, sge);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * rvt_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @pd: protection domain
+ * @isge: outgoing internal SGE
+ * @last_sge: last outgoing SGE written
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ *
+ * Increments the reference count when a new sge is stored.
+ *
+ * Return: 0 if compressed, 1 if added , otherwise returns -errno.
+ */
+int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
+ struct rvt_sge *isge, struct rvt_sge *last_sge,
+ struct ib_sge *sge, int acc)
+{
+ struct rvt_mregion *mr;
+ unsigned n, m;
+ size_t off;
+
+ /*
+ * We use LKEY == zero for kernel virtual addresses
+ * (see rvt_get_dma_mr()).
+ */
+ if (sge->lkey == 0) {
+ struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
+
+ if (pd->user)
+ return -EINVAL;
+ if (rvt_sge_adjacent(last_sge, sge))
+ return 0;
+ rcu_read_lock();
+ mr = rcu_dereference(dev->dma_mr);
+ if (!mr)
+ goto bail;
+ rvt_get_mr(mr);
+ rcu_read_unlock();
+
+ isge->mr = mr;
+ isge->vaddr = (void *)sge->addr;
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ isge->m = 0;
+ isge->n = 0;
+ goto ok;
+ }
+ if (rvt_sge_adjacent(last_sge, sge))
+ return 0;
+ rcu_read_lock();
+ mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
+ if (!mr)
+ goto bail;
+ rvt_get_mr(mr);
+ if (!READ_ONCE(mr->lkey_published))
+ goto bail_unref;
+
+ if (unlikely(atomic_read(&mr->lkey_invalid) ||
+ mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
+ goto bail_unref;
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc))
+ goto bail_unref;
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ * page sizes are uniform power of 2 so no loop is necessary
+ * entries_spanned_by_off is the number of times the loop below
+ * would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
+ isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
+ok:
+ trace_rvt_sge_new(isge, sge);
+ return 1;
+bail_unref:
+ rvt_put_mr(mr);
+bail:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+EXPORT_SYMBOL(rvt_lkey_ok);
+
+/**
+ * rvt_rkey_ok - check the IB virtual address, length, and RKEY
+ * @qp: qp for validation
+ * @sge: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return: 1 if successful, otherwise 0.
+ *
+ * increments the reference count upon success
+ */
+int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ struct rvt_mregion *mr;
+ unsigned n, m;
+ size_t off;
+
+ /*
+ * We use RKEY == zero for kernel virtual addresses
+ * (see rvt_get_dma_mr()).
+ */
+ rcu_read_lock();
+ if (rkey == 0) {
+ struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
+ struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ mr = rcu_dereference(rdi->dma_mr);
+ if (!mr)
+ goto bail;
+ rvt_get_mr(mr);
+ rcu_read_unlock();
+
+ sge->mr = mr;
+ sge->vaddr = (void *)vaddr;
+ sge->length = len;
+ sge->sge_length = len;
+ sge->m = 0;
+ sge->n = 0;
+ goto ok;
+ }
+
+ mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
+ if (!mr)
+ goto bail;
+ rvt_get_mr(mr);
+ /* insure mr read is before test */
+ if (!READ_ONCE(mr->lkey_published))
+ goto bail_unref;
+ if (unlikely(atomic_read(&mr->lkey_invalid) ||
+ mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail_unref;
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0))
+ goto bail_unref;
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ * page sizes are uniform power of 2 so no loop is necessary
+ * entries_spanned_by_off is the number of times the loop below
+ * would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
+ sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
+ok:
+ return 1;
+bail_unref:
+ rvt_put_mr(mr);
+bail:
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(rvt_rkey_ok);
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
new file mode 100644
index 000000000000..72dab48307b7
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RVTMR_H
+#define DEF_RVTMR_H
+
+#include <rdma/rdma_vt.h>
+
+struct rvt_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct rvt_mregion mr; /* must be last */
+};
+
+static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct rvt_mr, ibmr);
+}
+
+int rvt_driver_mr_init(struct rvt_dev_info *rdi);
+void rvt_mr_exit(struct rvt_dev_info *rdi);
+
+/* Mem Regions */
+struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata);
+int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+
+#endif /* DEF_RVTMR_H */
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
new file mode 100644
index 000000000000..3af8081dc6c7
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include "pd.h"
+
+/**
+ * rvt_alloc_pd - allocate a protection domain
+ * @ibpd: PD
+ * @udata: optional user data
+ *
+ * Allocate and keep track of a PD.
+ *
+ * Return: 0 on success
+ */
+int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibpd->device;
+ struct rvt_dev_info *dev = ib_to_rvt(ibdev);
+ struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
+ int ret = 0;
+
+ /*
+ * While we could continue allocating protecetion domains, being
+ * constrained only by system resources. The IBTA spec defines that
+ * there is a max_pd limit that can be set and we need to check for
+ * that.
+ */
+
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == dev->dparms.props.max_pd) {
+ spin_unlock(&dev->n_pds_lock);
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = !!udata;
+
+bail:
+ return ret;
+}
+
+/**
+ * rvt_dealloc_pd - Free PD
+ * @ibpd: Free up PD
+ * @udata: Valid user data or NULL for kernel object
+ *
+ * Return: always 0
+ */
+int rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
+
+ spin_lock(&dev->n_pds_lock);
+ dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
new file mode 100644
index 000000000000..552adaeb371f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RDMAVTPD_H
+#define DEF_RDMAVTPD_H
+
+#include <rdma/rdma_vt.h>
+
+int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+
+#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
new file mode 100644
index 000000000000..134a79eecfcb
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -0,0 +1,3218 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 - 2020 Intel Corporation.
+ */
+
+#include <linux/hash.h>
+#include <linux/bitops.h>
+#include <linux/lockdep.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_hdrs.h>
+#include <rdma/opa_addr.h>
+#include <rdma/uverbs_ioctl.h>
+#include "qp.h"
+#include "vt.h"
+#include "trace.h"
+
+#define RVT_RWQ_COUNT_THRESHOLD 16
+
+static void rvt_rc_timeout(struct timer_list *t);
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type);
+
+/*
+ * Convert the AETH RNR timeout code into the number of microseconds.
+ */
+static const u32 ib_rvt_rnr_table[32] = {
+ 655360, /* 00: 655.36 */
+ 10, /* 01: .01 */
+ 20, /* 02 .02 */
+ 30, /* 03: .03 */
+ 40, /* 04: .04 */
+ 60, /* 05: .06 */
+ 80, /* 06: .08 */
+ 120, /* 07: .12 */
+ 160, /* 08: .16 */
+ 240, /* 09: .24 */
+ 320, /* 0A: .32 */
+ 480, /* 0B: .48 */
+ 640, /* 0C: .64 */
+ 960, /* 0D: .96 */
+ 1280, /* 0E: 1.28 */
+ 1920, /* 0F: 1.92 */
+ 2560, /* 10: 2.56 */
+ 3840, /* 11: 3.84 */
+ 5120, /* 12: 5.12 */
+ 7680, /* 13: 7.68 */
+ 10240, /* 14: 10.24 */
+ 15360, /* 15: 15.36 */
+ 20480, /* 16: 20.48 */
+ 30720, /* 17: 30.72 */
+ 40960, /* 18: 40.96 */
+ 61440, /* 19: 61.44 */
+ 81920, /* 1A: 81.92 */
+ 122880, /* 1B: 122.88 */
+ 163840, /* 1C: 163.84 */
+ 245760, /* 1D: 245.76 */
+ 327680, /* 1E: 327.68 */
+ 491520 /* 1F: 491.52 */
+};
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; rvt_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = RVT_POST_RECV_OK,
+ [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
+ RVT_PROCESS_NEXT_SEND_OK,
+ [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_FLUSH_SEND,
+ [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
+ RVT_POST_SEND_OK | RVT_FLUSH_SEND,
+};
+EXPORT_SYMBOL(ib_rvt_state_ops);
+
+/* platform specific: return the last level cache (llc) size, in KiB */
+static int rvt_wss_llc_size(void)
+{
+ /* assume that the boot CPU value is universal for all CPUs */
+ return boot_cpu_data.x86_cache_size;
+}
+
+/* platform specific: cacheless copy */
+static void cacheless_memcpy(void *dst, void *src, size_t n)
+{
+ /*
+ * Use the only available X64 cacheless copy. Add a __user cast
+ * to quiet sparse. The src agument is already in the kernel so
+ * there are no security issues. The extra fault recovery machinery
+ * is not invoked.
+ */
+ __copy_user_nocache(dst, (void __user *)src, n);
+}
+
+void rvt_wss_exit(struct rvt_dev_info *rdi)
+{
+ struct rvt_wss *wss = rdi->wss;
+
+ if (!wss)
+ return;
+
+ /* coded to handle partially initialized and repeat callers */
+ kfree(wss->entries);
+ wss->entries = NULL;
+ kfree(rdi->wss);
+ rdi->wss = NULL;
+}
+
+/*
+ * rvt_wss_init - Init wss data structures
+ *
+ * Return: 0 on success
+ */
+int rvt_wss_init(struct rvt_dev_info *rdi)
+{
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+ unsigned int wss_threshold = rdi->dparms.wss_threshold;
+ unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
+ long llc_size;
+ long llc_bits;
+ long table_size;
+ long table_bits;
+ struct rvt_wss *wss;
+ int node = rdi->dparms.node;
+
+ if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
+ rdi->wss = NULL;
+ return 0;
+ }
+
+ rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
+ if (!rdi->wss)
+ return -ENOMEM;
+ wss = rdi->wss;
+
+ /* check for a valid percent range - default to 80 if none or invalid */
+ if (wss_threshold < 1 || wss_threshold > 100)
+ wss_threshold = 80;
+
+ /* reject a wildly large period */
+ if (wss_clean_period > 1000000)
+ wss_clean_period = 256;
+
+ /* reject a zero period */
+ if (wss_clean_period == 0)
+ wss_clean_period = 1;
+
+ /*
+ * Calculate the table size - the next power of 2 larger than the
+ * LLC size. LLC size is in KiB.
+ */
+ llc_size = rvt_wss_llc_size() * 1024;
+ table_size = roundup_pow_of_two(llc_size);
+
+ /* one bit per page in rounded up table */
+ llc_bits = llc_size / PAGE_SIZE;
+ table_bits = table_size / PAGE_SIZE;
+ wss->pages_mask = table_bits - 1;
+ wss->num_entries = table_bits / BITS_PER_LONG;
+
+ wss->threshold = (llc_bits * wss_threshold) / 100;
+ if (wss->threshold == 0)
+ wss->threshold = 1;
+
+ wss->clean_period = wss_clean_period;
+ atomic_set(&wss->clean_counter, wss_clean_period);
+
+ wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
+ GFP_KERNEL, node);
+ if (!wss->entries) {
+ rvt_wss_exit(rdi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Advance the clean counter. When the clean period has expired,
+ * clean an entry.
+ *
+ * This is implemented in atomics to avoid locking. Because multiple
+ * variables are involved, it can be racy which can lead to slightly
+ * inaccurate information. Since this is only a heuristic, this is
+ * OK. Any innaccuracies will clean themselves out as the counter
+ * advances. That said, it is unlikely the entry clean operation will
+ * race - the next possible racer will not start until the next clean
+ * period.
+ *
+ * The clean counter is implemented as a decrement to zero. When zero
+ * is reached an entry is cleaned.
+ */
+static void wss_advance_clean_counter(struct rvt_wss *wss)
+{
+ int entry;
+ int weight;
+ unsigned long bits;
+
+ /* become the cleaner if we decrement the counter to zero */
+ if (atomic_dec_and_test(&wss->clean_counter)) {
+ /*
+ * Set, not add, the clean period. This avoids an issue
+ * where the counter could decrement below the clean period.
+ * Doing a set can result in lost decrements, slowing the
+ * clean advance. Since this a heuristic, this possible
+ * slowdown is OK.
+ *
+ * An alternative is to loop, advancing the counter by a
+ * clean period until the result is > 0. However, this could
+ * lead to several threads keeping another in the clean loop.
+ * This could be mitigated by limiting the number of times
+ * we stay in the loop.
+ */
+ atomic_set(&wss->clean_counter, wss->clean_period);
+
+ /*
+ * Uniquely grab the entry to clean and move to next.
+ * The current entry is always the lower bits of
+ * wss.clean_entry. The table size, wss.num_entries,
+ * is always a power-of-2.
+ */
+ entry = (atomic_inc_return(&wss->clean_entry) - 1)
+ & (wss->num_entries - 1);
+
+ /* clear the entry and count the bits */
+ bits = xchg(&wss->entries[entry], 0);
+ weight = hweight64((u64)bits);
+ /* only adjust the contended total count if needed */
+ if (weight)
+ atomic_sub(weight, &wss->total_count);
+ }
+}
+
+/*
+ * Insert the given address into the working set array.
+ */
+static void wss_insert(struct rvt_wss *wss, void *address)
+{
+ u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
+ u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
+ u32 nr = page & (BITS_PER_LONG - 1);
+
+ if (!test_and_set_bit(nr, &wss->entries[entry]))
+ atomic_inc(&wss->total_count);
+
+ wss_advance_clean_counter(wss);
+}
+
+/*
+ * Is the working set larger than the threshold?
+ */
+static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
+{
+ return atomic_read(&wss->total_count) >= wss->threshold;
+}
+
+static void get_map_page(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map)
+{
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+ /*
+ * Free the page if someone raced with us installing it.
+ */
+
+ spin_lock(&qpt->lock);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock(&qpt->lock);
+}
+
+/**
+ * init_qpn_table - initialize the QP number table for a device
+ * @rdi: rvt dev struct
+ * @qpt: the QPN table
+ */
+static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
+{
+ u32 offset, i;
+ struct rvt_qpn_map *map;
+ int ret = 0;
+
+ if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
+ return -EINVAL;
+
+ spin_lock_init(&qpt->lock);
+
+ qpt->last = rdi->dparms.qpn_start;
+ qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
+
+ /*
+ * Drivers may want some QPs beyond what we need for verbs let them use
+ * our qpn table. No need for two. Lets go ahead and mark the bitmaps
+ * for those. The reserved range must be *after* the range which verbs
+ * will pick from.
+ */
+
+ /* Figure out number of bit maps needed before reserved range */
+ qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
+
+ /* This should always be zero */
+ offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
+
+ /* Starting with the first reserved bit map */
+ map = &qpt->map[qpt->nmaps];
+
+ rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
+ rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
+ for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
+ if (!map->page) {
+ get_map_page(qpt, map);
+ if (!map->page) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ set_bit(offset, map->page);
+ offset++;
+ if (offset == RVT_BITS_PER_PAGE) {
+ /* next page */
+ qpt->nmaps++;
+ map++;
+ offset = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+static void free_qpn_table(struct rvt_qpn_table *qpt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+ free_page((unsigned long)qpt->map[i].page);
+}
+
+/**
+ * rvt_driver_qp_init - Init driver qp resources
+ * @rdi: rvt dev strucutre
+ *
+ * Return: 0 on success
+ */
+int rvt_driver_qp_init(struct rvt_dev_info *rdi)
+{
+ int i;
+ int ret = -ENOMEM;
+
+ if (!rdi->dparms.qp_table_size)
+ return -EINVAL;
+
+ /*
+ * If driver is not doing any QP allocation then make sure it is
+ * providing the necessary QP functions.
+ */
+ if (!rdi->driver_f.free_all_qps ||
+ !rdi->driver_f.qp_priv_alloc ||
+ !rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.notify_restart_rc)
+ return -EINVAL;
+
+ /* allocate parent object */
+ rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
+ rdi->dparms.node);
+ if (!rdi->qp_dev)
+ return -ENOMEM;
+
+ /* allocate hash table */
+ rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
+ rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
+ rdi->qp_dev->qp_table =
+ kmalloc_array_node(rdi->qp_dev->qp_table_size,
+ sizeof(*rdi->qp_dev->qp_table),
+ GFP_KERNEL, rdi->dparms.node);
+ if (!rdi->qp_dev->qp_table)
+ goto no_qp_table;
+
+ for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
+ RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
+
+ spin_lock_init(&rdi->qp_dev->qpt_lock);
+
+ /* initialize qpn map */
+ if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
+ goto fail_table;
+
+ spin_lock_init(&rdi->n_qps_lock);
+
+ return 0;
+
+fail_table:
+ kfree(rdi->qp_dev->qp_table);
+ free_qpn_table(&rdi->qp_dev->qpn_table);
+
+no_qp_table:
+ kfree(rdi->qp_dev);
+
+ return ret;
+}
+
+/**
+ * rvt_free_qp_cb - callback function to reset a qp
+ * @qp: the qp to reset
+ * @v: a 64-bit value
+ *
+ * This function resets the qp and removes it from the
+ * qp hash table.
+ */
+static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
+{
+ unsigned int *qp_inuse = (unsigned int *)v;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ /* Reset the qp and remove it from the qp hash list */
+ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
+
+ /* Increment the qp_inuse count */
+ (*qp_inuse)++;
+}
+
+/**
+ * rvt_free_all_qps - check for QPs still in use
+ * @rdi: rvt device info structure
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ * Return the number of QPs still in use.
+ */
+static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
+{
+ unsigned int qp_inuse = 0;
+
+ qp_inuse += rvt_mcast_tree_empty(rdi);
+
+ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
+
+ return qp_inuse;
+}
+
+/**
+ * rvt_qp_exit - clean up qps on device exit
+ * @rdi: rvt dev structure
+ *
+ * Check for qp leaks and free resources.
+ */
+void rvt_qp_exit(struct rvt_dev_info *rdi)
+{
+ u32 qps_inuse = rvt_free_all_qps(rdi);
+
+ if (qps_inuse)
+ rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
+ qps_inuse);
+
+ kfree(rdi->qp_dev->qp_table);
+ free_qpn_table(&rdi->qp_dev->qpn_table);
+ kfree(rdi->qp_dev);
+}
+
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
+{
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
+}
+
+/**
+ * alloc_qpn - Allocate the next available qpn or zero/one for QP type
+ * IB_QPT_SMI/IB_QPT_GSI
+ * @rdi: rvt device info structure
+ * @qpt: queue pair number table pointer
+ * @type: the QP type
+ * @port_num: IB port number, 1 based, comes from core
+ * @exclude_prefix: prefix of special queue pair number being allocated
+ *
+ * Return: The queue pair number
+ */
+static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
+{
+ u32 i, offset, max_scan, qpn;
+ struct rvt_qpn_map *map;
+ int ret;
+ u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
+ RVT_AIP_QPN_MAX : RVT_QPN_MAX;
+
+ if (rdi->driver_f.alloc_qpn)
+ return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
+
+ if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+ unsigned n;
+
+ ret = type == IB_QPT_GSI;
+ n = 1 << (ret + 2 * (port_num - 1));
+ spin_lock(&qpt->lock);
+ if (qpt->flags & n)
+ ret = -EINVAL;
+ else
+ qpt->flags |= n;
+ spin_unlock(&qpt->lock);
+
+ return ret;
+ }
+
+ qpn = qpt->last + qpt->incr;
+ if (qpn >= max_qpn)
+ qpn = qpt->incr | ((qpt->last & 1) ^ 1);
+ /* offset carries bit 0 */
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ get_map_page(qpt, map);
+ if (unlikely(!map->page))
+ break;
+ }
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ qpt->last = qpn;
+ ret = qpn;
+
+ return ret;
+ }
+ offset += qpt->incr;
+ /*
+ * This qpn might be bogus if offset >= BITS_PER_PAGE.
+ * That is OK. It gets re-assigned below
+ */
+ qpn = mk_qpn(qpt, map, offset);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else {
+ map = &qpt->map[0];
+ /* wrap to first map page, invert bit 0 */
+ offset = qpt->incr | ((offset & 1) ^ 1);
+ }
+ /* there can be no set bits in low-order QoS bits */
+ WARN_ON(rdi->dparms.qos_shift > 1 &&
+ offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * rvt_clear_mr_refs - Drop help mr refs
+ * @qp: rvt qp data structure
+ * @clr_sends: If shoudl clear send side or not
+ */
+static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
+{
+ unsigned n;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
+ rvt_put_ss(&qp->s_rdma_read_sge);
+
+ rvt_put_ss(&qp->r_sge);
+
+ if (clr_sends) {
+ while (qp->s_last != qp->s_head) {
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+
+ rvt_put_qp_swqe(qp, wqe);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ smp_wmb(); /* see qp_set_savail */
+ }
+ if (qp->s_rdma_mr) {
+ rvt_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+ }
+
+ for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
+ struct rvt_ack_entry *e = &qp->s_ack_queue[n];
+
+ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ }
+}
+
+/**
+ * rvt_swqe_has_lkey - return true if lkey is used by swqe
+ * @wqe: the send wqe
+ * @lkey: the lkey
+ *
+ * Test the swqe for using lkey
+ */
+static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
+{
+ int i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct rvt_sge *sge = &wqe->sg_list[i];
+
+ if (rvt_mr_has_lkey(sge->mr, lkey))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * rvt_qp_sends_has_lkey - return true is qp sends use lkey
+ * @qp: the rvt_qp
+ * @lkey: the lkey
+ */
+static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
+{
+ u32 s_last = qp->s_last;
+
+ while (s_last != qp->s_head) {
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
+
+ if (rvt_swqe_has_lkey(wqe, lkey))
+ return true;
+
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ }
+ if (qp->s_rdma_mr)
+ if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
+ return true;
+ return false;
+}
+
+/**
+ * rvt_qp_acks_has_lkey - return true if acks have lkey
+ * @qp: the qp
+ * @lkey: the lkey
+ */
+static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
+{
+ int i;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
+ struct rvt_ack_entry *e = &qp->s_ack_queue[i];
+
+ if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * rvt_qp_mr_clean - clean up remote ops for lkey
+ * @qp: the qp
+ * @lkey: the lkey that is being de-registered
+ *
+ * This routine checks if the lkey is being used by
+ * the qp.
+ *
+ * If so, the qp is put into an error state to elminate
+ * any references from the qp.
+ */
+void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
+{
+ bool lastwqe = false;
+
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ /* avoid special QPs */
+ return;
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto check_lwqe;
+
+ if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
+ rvt_qp_sends_has_lkey(qp, lkey) ||
+ rvt_qp_acks_has_lkey(qp, lkey))
+ lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
+check_lwqe:
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+/**
+ * rvt_remove_qp - remove qp form table
+ * @rdi: rvt dev struct
+ * @qp: qp to remove
+ *
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive routine.
+ */
+static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
+ u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
+ unsigned long flags;
+ int removed = 1;
+
+ spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
+
+ if (rcu_dereference_protected(rvp->qp[0],
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(rvp->qp[0], NULL);
+ } else if (rcu_dereference_protected(rvp->qp[1],
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(rvp->qp[1], NULL);
+ } else {
+ struct rvt_qp *q;
+ struct rvt_qp __rcu **qpp;
+
+ removed = 0;
+ qpp = &rdi->qp_dev->qp_table[n];
+ for (; (q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
+ qpp = &q->next) {
+ if (q == qp) {
+ RCU_INIT_POINTER(*qpp,
+ rcu_dereference_protected(qp->next,
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)));
+ removed = 1;
+ trace_rvt_qpremove(qp, n);
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
+ if (removed) {
+ synchronize_rcu();
+ rvt_put_qp(qp);
+ }
+}
+
+/**
+ * rvt_alloc_rq - allocate memory for user or kernel buffer
+ * @rq: receive queue data structure
+ * @size: number of request queue entries
+ * @node: The NUMA node
+ * @udata: True if user data is available or not false
+ *
+ * Return: If memory allocation failed, return -ENONEM
+ * This function is used by both shared receive
+ * queues and non-shared receive queues to allocate
+ * memory.
+ */
+int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
+ struct ib_udata *udata)
+{
+ if (udata) {
+ rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
+ if (!rq->wq)
+ goto bail;
+ /* need kwq with no buffers */
+ rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
+ if (!rq->kwq)
+ goto bail;
+ rq->kwq->curr_wq = rq->wq->wq;
+ } else {
+ /* need kwq with buffers */
+ rq->kwq =
+ vzalloc_node(sizeof(struct rvt_krwq) + size, node);
+ if (!rq->kwq)
+ goto bail;
+ rq->kwq->curr_wq = rq->kwq->wq;
+ }
+
+ spin_lock_init(&rq->kwq->p_lock);
+ spin_lock_init(&rq->kwq->c_lock);
+ return 0;
+bail:
+ rvt_free_rq(rq);
+ return -ENOMEM;
+}
+
+/**
+ * rvt_init_qp - initialize the QP state to the reset state
+ * @rdi: rvt dev struct
+ * @qp: the QP to init or reinit
+ * @type: the QP type
+ *
+ * This function is called from both rvt_create_qp() and
+ * rvt_reset_qp(). The difference is that the reset
+ * patch the necessary locks to protect against concurent
+ * access.
+ */
+static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+{
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
+ qp->s_hdrwords = 0;
+ qp->s_wqe = NULL;
+ qp->s_draining = 0;
+ qp->s_next_psn = 0;
+ qp->s_last_psn = 0;
+ qp->s_sending_psn = 0;
+ qp->s_sending_hpsn = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ qp->r_msn = 0;
+ if (type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_acked = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ qp->r_head_ack_queue = 0;
+ qp->s_tail_ack_queue = 0;
+ qp->s_acked_ack_queue = 0;
+ qp->s_num_rd_atomic = 0;
+ qp->r_sge.num_sge = 0;
+ atomic_set(&qp->s_reserved_used, 0);
+}
+
+/**
+ * _rvt_reset_qp - initialize the QP state to the reset state
+ * @rdi: rvt dev struct
+ * @qp: the QP to reset
+ * @type: the QP type
+ *
+ * r_lock, s_hlock, and s_lock are required to be held by the caller
+ */
+static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+ __must_hold(&qp->s_lock)
+ __must_hold(&qp->s_hlock)
+ __must_hold(&qp->r_lock)
+{
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_hlock);
+ lockdep_assert_held(&qp->s_lock);
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+
+ /* Let drivers flush their waitlist */
+ rdi->driver_f.flush_qp_waiters(qp);
+ rvt_stop_rc_timers(qp);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ /* Stop the send queue and the retry timer */
+ rdi->driver_f.stop_send_queue(qp);
+ rvt_del_timers_sync(qp);
+ /* Wait for things to stop */
+ rdi->driver_f.quiesce_qp(qp);
+
+ /* take qp out the hash and wait for it to be unused */
+ rvt_remove_qp(rdi, qp);
+
+ /* grab the lock b/c it was locked at call time */
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ rvt_clear_mr_refs(qp, 1);
+ /*
+ * Let the driver do any tear down or re-init it needs to for
+ * a qp that has been reset
+ */
+ rdi->driver_f.notify_qp_reset(qp);
+ }
+ rvt_init_qp(rdi, qp, type);
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_hlock);
+ lockdep_assert_held(&qp->s_lock);
+}
+
+/**
+ * rvt_reset_qp - initialize the QP state to the reset state
+ * @rdi: the device info
+ * @qp: the QP to reset
+ * @type: the QP type
+ *
+ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
+ * before calling _rvt_reset_qp().
+ */
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+{
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ _rvt_reset_qp(rdi, qp, type);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+}
+
+/**
+ * rvt_free_qpn - Free a qpn from the bit map
+ * @qpt: QP table
+ * @qpn: queue pair number to free
+ */
+static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
+{
+ struct rvt_qpn_map *map;
+
+ if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
+ qpn &= RVT_AIP_QP_SUFFIX;
+
+ map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
+}
+
+/**
+ * get_allowed_ops - Given a QP type return the appropriate allowed OP
+ * @type: valid, supported, QP type
+ */
+static u8 get_allowed_ops(enum ib_qp_type type)
+{
+ return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
+ IB_OPCODE_UC : IB_OPCODE_UD;
+}
+
+/**
+ * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
+ * @qp: Valid QP with allowed_ops set
+ *
+ * The rvt_swqe data structure being used is a union, so this is
+ * only valid for UD QPs.
+ */
+static void free_ud_wq_attr(struct rvt_qp *qp)
+{
+ struct rvt_swqe *wqe;
+ int i;
+
+ for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
+ wqe = rvt_get_swqe_ptr(qp, i);
+ kfree(wqe->ud_wr.attr);
+ wqe->ud_wr.attr = NULL;
+ }
+}
+
+/**
+ * alloc_ud_wq_attr - AH attribute cache for UD QPs
+ * @qp: Valid QP with allowed_ops set
+ * @node: Numa node for allocation
+ *
+ * The rvt_swqe data structure being used is a union, so this is
+ * only valid for UD QPs.
+ */
+static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
+{
+ struct rvt_swqe *wqe;
+ int i;
+
+ for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
+ wqe = rvt_get_swqe_ptr(qp, i);
+ wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
+ GFP_KERNEL, node);
+ if (!wqe->ud_wr.attr) {
+ free_ud_wq_attr(qp);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * rvt_create_qp - create a queue pair for a device
+ * @ibqp: the queue pair
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Queue pair creation is mostly an rvt issue. However, drivers have their own
+ * unique idea of what queue pair numbers mean. For instance there is a reserved
+ * range for PSM.
+ *
+ * Return: 0 on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ int ret = -ENOMEM;
+ struct rvt_swqe *swq = NULL;
+ size_t sz;
+ size_t sg_list_sz = 0;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ void *priv = NULL;
+ size_t sqsize;
+ u8 exclude_prefix = 0;
+
+ if (!rdi)
+ return -EINVAL;
+
+ if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
+ return -EOPNOTSUPP;
+
+ if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
+ init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
+ return -EINVAL;
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+ if (init_attr->cap.max_recv_sge >
+ rdi->dparms.props.max_recv_sge ||
+ init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
+ return -EINVAL;
+
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_recv_wr == 0)
+ return -EINVAL;
+ }
+ sqsize =
+ init_attr->cap.max_send_wr + 1 +
+ rdi->dparms.reserved_operations;
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibqp->device->phys_port_cnt)
+ return -EINVAL;
+ fallthrough;
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
+ swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
+ if (!swq)
+ return -ENOMEM;
+
+ if (init_attr->srq) {
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
+
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp->r_sg_list =
+ kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
+ if (!qp->r_sg_list)
+ goto bail_qp;
+ qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
+
+ RCU_INIT_POINTER(qp->next, NULL);
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qp->s_ack_queue =
+ kcalloc_node(rvt_max_atomic(rdi),
+ sizeof(*qp->s_ack_queue),
+ GFP_KERNEL,
+ rdi->dparms.node);
+ if (!qp->s_ack_queue)
+ goto bail_qp;
+ }
+ /* initialize timers needed for rc qp */
+ timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
+ hrtimer_setup(&qp->s_rnr_timer, rvt_rc_rnr_retry, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+
+ /*
+ * Driver needs to set up it's private QP structure and do any
+ * initialization that is needed.
+ */
+ priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
+ goto bail_qp;
+ }
+ qp->priv = priv;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ if (init_attr->srq) {
+ sz = 0;
+ } else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct rvt_rwqe);
+ ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
+ rdi->dparms.node, udata);
+ if (ret)
+ goto bail_driver_priv;
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->r_lock);
+ spin_lock_init(&qp->s_hlock);
+ spin_lock_init(&qp->s_lock);
+ atomic_set(&qp->refcount, 0);
+ atomic_set(&qp->local_ops_pending, 0);
+ init_waitqueue_head(&qp->wait);
+ INIT_LIST_HEAD(&qp->rspwait);
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = sqsize;
+ qp->s_avail = init_attr->cap.max_send_wr;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = RVT_S_SIGNAL_REQ_WR;
+ ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
+ if (ret)
+ goto bail_rq_rvt;
+
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ exclude_prefix = RVT_AIP_QP_PREFIX;
+
+ ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
+ init_attr->qp_type,
+ init_attr->port_num,
+ exclude_prefix);
+ if (ret < 0)
+ goto bail_rq_wq;
+
+ qp->ibqp.qp_num = ret;
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
+ qp->port_num = init_attr->port_num;
+ rvt_init_qp(rdi, qp, init_attr->qp_type);
+ if (rdi->driver_f.qp_priv_init) {
+ ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
+ if (ret)
+ goto bail_rq_wq;
+ }
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ return -EOPNOTSUPP;
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See rvt_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ if (!qp->r_rq.wq) {
+ __u64 offset = 0;
+
+ ret = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (ret)
+ goto bail_qpn;
+ } else {
+ u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
+
+ qp->ip = rvt_create_mmap_info(rdi, s, udata,
+ qp->r_rq.wq);
+ if (IS_ERR(qp->ip)) {
+ ret = PTR_ERR(qp->ip);
+ goto bail_qpn;
+ }
+
+ ret = ib_copy_to_udata(udata, &qp->ip->offset,
+ sizeof(qp->ip->offset));
+ if (ret)
+ goto bail_ip;
+ }
+ qp->pid = current->pid;
+ }
+
+ spin_lock(&rdi->n_qps_lock);
+ if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
+ spin_unlock(&rdi->n_qps_lock);
+ ret = -ENOMEM;
+ goto bail_ip;
+ }
+
+ rdi->n_qps_allocated++;
+ /*
+ * Maintain a busy_jiffies variable that will be added to the timeout
+ * period in mod_retry_timer and add_retry_timer. This busy jiffies
+ * is scaled by the number of rc qps created for the device to reduce
+ * the number of timeouts occurring when there is a large number of
+ * qps. busy_jiffies is incremented every rc qp scaling interval.
+ * The scaling interval is selected based on extensive performance
+ * evaluation of targeted workloads.
+ */
+ if (init_attr->qp_type == IB_QPT_RC) {
+ rdi->n_rc_qps++;
+ rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
+ }
+ spin_unlock(&rdi->n_qps_lock);
+
+ if (qp->ip) {
+ spin_lock_irq(&rdi->pending_lock);
+ list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
+ }
+
+ return 0;
+
+bail_ip:
+ if (qp->ip)
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
+
+bail_qpn:
+ rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+
+bail_rq_wq:
+ free_ud_wq_attr(qp);
+
+bail_rq_rvt:
+ rvt_free_rq(&qp->r_rq);
+
+bail_driver_priv:
+ rdi->driver_f.qp_priv_free(rdi, qp);
+
+bail_qp:
+ kfree(qp->s_ack_queue);
+ kfree(qp->r_sg_list);
+ vfree(swq);
+ return ret;
+}
+
+/**
+ * rvt_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ *
+ * Return: true if last WQE event should be generated.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
+{
+ struct ib_wc wc;
+ int ret = 0;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_lock);
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
+
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
+ timer_delete(&qp->s_timer);
+ }
+
+ if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
+
+ rdi->driver_f.notify_error_qp(qp);
+
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (READ_ONCE(qp->s_last) != qp->s_head)
+ rdi->driver_f.schedule_send(qp);
+
+ rvt_clear_mr_refs(qp, 0);
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
+ wc.wr_id = qp->r_wr_id;
+ wc.status = err;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
+
+ if (qp->r_rq.kwq) {
+ u32 head;
+ u32 tail;
+ struct rvt_rwq *wq = NULL;
+ struct rvt_krwq *kwq = NULL;
+
+ spin_lock(&qp->r_rq.kwq->c_lock);
+ /* qp->ip used to validate if there is a user buffer mmaped */
+ if (qp->ip) {
+ wq = qp->r_rq.wq;
+ head = RDMA_READ_UAPI_ATOMIC(wq->head);
+ tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
+ } else {
+ kwq = qp->r_rq.kwq;
+ head = kwq->head;
+ tail = kwq->tail;
+ }
+ /* sanity check pointers before trusting them */
+ if (head >= qp->r_rq.size)
+ head = 0;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ while (tail != head) {
+ wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ if (qp->ip)
+ RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
+ else
+ kwq->tail = tail;
+ spin_unlock(&qp->r_rq.kwq->c_lock);
+ } else if (qp->ibqp.event_handler) {
+ ret = 1;
+ }
+
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rvt_error_qp);
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
+ unsigned long flags;
+
+ rvt_get_qp(qp);
+ spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
+
+ if (qp->ibqp.qp_num <= 1) {
+ rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
+ } else {
+ u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
+
+ qp->next = rdi->qp_dev->qp_table[n];
+ rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
+ trace_rvt_qpinsert(qp, n);
+ }
+
+ spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
+}
+
+/**
+ * rvt_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Return: 0 on success, otherwise returns an errno.
+ */
+int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct ib_event ev;
+ int lastwqe = 0;
+ int mig = 0;
+ int pmtu = 0; /* for gcc warning only */
+ int opa_ah;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+ opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask))
+ goto inval;
+
+ if (rdi->driver_f.check_modify_qp &&
+ rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
+ goto inval;
+
+ if (attr_mask & IB_QP_AV) {
+ if (opa_ah) {
+ if (rdma_ah_get_dlid(&attr->ah_attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ goto inval;
+ } else {
+ if (rdma_ah_get_dlid(&attr->ah_attr) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ }
+
+ if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (opa_ah) {
+ if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ goto inval;
+ } else {
+ if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ }
+
+ if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ goto inval;
+ if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= rvt_get_npkeys(rdi))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
+ if (attr_mask & IB_QP_PORT)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI ||
+ attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ if (attr->dest_qp_num > RVT_QPN_MASK)
+ goto inval;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ if (attr->retry_cnt > 7)
+ goto inval;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ if (attr->rnr_retry > 7)
+ goto inval;
+
+ /*
+ * Don't allow invalid path_mtu values. OK to set greater
+ * than the active mtu (or even the max_cap, if we have tuned
+ * that to a small mtu. We'll set qp->path_mtu
+ * to the lesser of requested attribute mtu and active,
+ * for packetizing messages.
+ * Note that the QP port has to be set in INIT and MTU in RTR.
+ */
+ if (attr_mask & IB_QP_PATH_MTU) {
+ pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
+ if (pmtu < 0)
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ if (attr->path_mig_state == IB_MIG_REARM) {
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ goto inval;
+ if (new_state != IB_QPS_RTS)
+ goto inval;
+ } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+ if (qp->s_mig_state == IB_MIG_REARM)
+ goto inval;
+ if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+ goto inval;
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ mig = 1;
+ } else {
+ goto inval;
+ }
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET)
+ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ break;
+
+ case IB_QPS_RTR:
+ /* Allow event to re-trigger if QP set to RTR more than once */
+ qp->r_flags &= ~RVT_R_COMM_EST;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_ERR:
+ lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ break;
+
+ default:
+ qp->state = new_state;
+ break;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port_num = attr->port_num;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sending_psn = qp->s_next_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ qp->s_sending_hpsn = qp->s_last_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
+ qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
+ qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
+ qp->s_alt_pkey_index = attr->alt_pkey_index;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ qp->s_mig_state = attr->path_mig_state;
+ if (mig) {
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
+ qp->log_pmtu = ilog2(qp->pmtu);
+ }
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ qp->s_retry_cnt = attr->retry_cnt;
+ qp->s_retry = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry_cnt = attr->rnr_retry;
+ qp->s_rnr_retry = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ qp->timeout = attr->timeout;
+ qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+ if (rdi->driver_f.modify_qp)
+ rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
+
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ rvt_insert_qp(rdi, qp);
+
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ if (mig) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ return 0;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ return -EINVAL;
+}
+
+/**
+ * rvt_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ * @udata: unused by the driver
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ *
+ * Return: 0 on success.
+ */
+int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ rvt_reset_qp(rdi, qp, ibqp->qp_type);
+
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ /* qpn is now available for use again */
+ rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+
+ spin_lock(&rdi->n_qps_lock);
+ rdi->n_qps_allocated--;
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ rdi->n_rc_qps--;
+ rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
+ }
+ spin_unlock(&rdi->n_qps_lock);
+
+ if (qp->ip)
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
+ kvfree(qp->r_rq.kwq);
+ rdi->driver_f.qp_priv_free(rdi, qp);
+ kfree(qp->s_ack_queue);
+ kfree(qp->r_sg_list);
+ rdma_destroy_ah_attr(&qp->remote_ah_attr);
+ rdma_destroy_ah_attr(&qp->alt_ah_attr);
+ free_ud_wq_attr(qp);
+ vfree(qp->s_wq);
+ return 0;
+}
+
+/**
+ * rvt_query_qp - query an ipbq
+ * @ibqp: IB qp to query
+ * @attr: attr struct to fill in
+ * @attr_mask: attr mask ignored
+ * @init_attr: struct to fill in
+ *
+ * Return: always 0
+ */
+int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
+ attr->path_mig_state = qp->s_mig_state;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
+ attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1 -
+ rdi->dparms.reserved_operations;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ attr->alt_ah_attr = qp->alt_ah_attr;
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = qp->s_alt_pkey_index;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = qp->s_draining;
+ attr->max_rd_atomic = qp->s_max_rd_atomic;
+ attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
+ attr->port_num = qp->port_num;
+ attr->timeout = qp->timeout;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry_cnt;
+ attr->alt_port_num =
+ rdma_ah_get_port_num(&qp->alt_ah_attr);
+ attr->alt_timeout = qp->alt_timeout;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = qp->port_num;
+ return 0;
+}
+
+/**
+ * rvt_post_recv - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success otherwise errno
+ */
+int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_krwq *wq = qp->r_rq.kwq;
+ unsigned long flags;
+ int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
+ !qp->ibqp.srq;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct rvt_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
+ next = wq->head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == READ_ONCE(wq->tail)) {
+ spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+ if (unlikely(qp_err_flush)) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = wr->wr_id;
+ wc.status = IB_WC_WR_FLUSH_ERR;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ } else {
+ wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ wqe->sg_list[i].addr = wr->sg_list[i].addr;
+ wqe->sg_list[i].length = wr->sg_list[i].length;
+ wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
+ }
+ /*
+ * Make sure queue entry is written
+ * before the head index.
+ */
+ smp_store_release(&wq->head, next);
+ }
+ spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * rvt_qp_valid_operation - validate post send wr request
+ * @qp: the qp
+ * @post_parms: the post send table for the driver
+ * @wr: the work request
+ *
+ * The routine validates the operation based on the
+ * validation table an returns the length of the operation
+ * which can extend beyond the ib_send_bw. Operation
+ * dependent flags key atomic operation validation.
+ *
+ * There is an exception for UD qps that validates the pd and
+ * overrides the length to include the additional UD specific
+ * length.
+ *
+ * Returns a negative error or the length of the work request
+ * for building the swqe.
+ */
+static inline int rvt_qp_valid_operation(
+ struct rvt_qp *qp,
+ const struct rvt_operation_params *post_parms,
+ const struct ib_send_wr *wr)
+{
+ int len;
+
+ if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
+ return -EINVAL;
+ if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
+ return -EINVAL;
+ if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
+ ibpd_to_rvtpd(qp->ibqp.pd)->user)
+ return -EINVAL;
+ if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1)))
+ return -EINVAL;
+ if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
+ !qp->s_max_rd_atomic)
+ return -EINVAL;
+ len = post_parms[wr->opcode].length;
+ /* UD specific */
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC) {
+ if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
+ return -EINVAL;
+ len = sizeof(struct ib_ud_wr);
+ }
+ return len;
+}
+
+/**
+ * rvt_qp_is_avail - determine queue capacity
+ * @qp: the qp
+ * @rdi: the rdmavt device
+ * @reserved_op: is reserved operation
+ *
+ * This assumes the s_hlock is held but the s_last
+ * qp variable is uncontrolled.
+ *
+ * For non reserved operations, the qp->s_avail
+ * may be changed.
+ *
+ * The return value is zero or a -ENOMEM.
+ */
+static inline int rvt_qp_is_avail(
+ struct rvt_qp *qp,
+ struct rvt_dev_info *rdi,
+ bool reserved_op)
+{
+ u32 slast;
+ u32 avail;
+ u32 reserved_used;
+
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+ if (unlikely(reserved_op)) {
+ /* see rvt_qp_wqe_unreserve() */
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ if (reserved_used >= rdi->dparms.reserved_operations)
+ return -ENOMEM;
+ return 0;
+ }
+ /* non-reserved operations */
+ if (likely(qp->s_avail))
+ return 0;
+ /* See rvt_qp_complete_swqe() */
+ slast = smp_load_acquire(&qp->s_last);
+ if (qp->s_head >= slast)
+ avail = qp->s_size - (qp->s_head - slast);
+ else
+ avail = slast - qp->s_head;
+
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ avail = avail - 1 -
+ (rdi->dparms.reserved_operations - reserved_used);
+ /* insure we don't assign a negative s_avail */
+ if ((s32)avail <= 0)
+ return -ENOMEM;
+ qp->s_avail = avail;
+ if (WARN_ON(qp->s_avail >
+ (qp->s_size - 1 - rdi->dparms.reserved_operations)))
+ rvt_pr_err(rdi,
+ "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
+ qp->ibqp.qp_num, qp->s_size, qp->s_avail,
+ qp->s_head, qp->s_tail, qp->s_cur,
+ qp->s_acked, qp->s_last);
+ return 0;
+}
+
+/**
+ * rvt_post_one_wr - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ * @call_send: kick the send engine into gear
+ */
+static int rvt_post_one_wr(struct rvt_qp *qp,
+ const struct ib_send_wr *wr,
+ bool *call_send)
+{
+ struct rvt_swqe *wqe;
+ u32 next;
+ int i;
+ int j;
+ int acc;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ u8 log_pmtu;
+ int ret;
+ size_t cplen;
+ bool reserved_op;
+ int local_ops_delayed = 0;
+
+ BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (unlikely(wr->num_sge > qp->s_max_sge))
+ return -EINVAL;
+
+ ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
+ if (ret < 0)
+ return ret;
+ cplen = ret;
+
+ /*
+ * Local operations include fast register and local invalidate.
+ * Fast register needs to be processed immediately because the
+ * registered lkey may be used by following work requests and the
+ * lkey needs to be valid at the time those requests are posted.
+ * Local invalidate can be processed immediately if fencing is
+ * not required and no previous local invalidate ops are pending.
+ * Signaled local operations that have been processed immediately
+ * need to have requests with "completion only" flags set posted
+ * to the send queue in order to generate completions.
+ */
+ if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
+ switch (wr->opcode) {
+ case IB_WR_REG_MR:
+ ret = rvt_fast_reg_mr(qp,
+ reg_wr(wr)->mr,
+ reg_wr(wr)->key,
+ reg_wr(wr)->access);
+ if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
+ return ret;
+ break;
+ case IB_WR_LOCAL_INV:
+ if ((wr->send_flags & IB_SEND_FENCE) ||
+ atomic_read(&qp->local_ops_pending)) {
+ local_ops_delayed = 1;
+ } else {
+ ret = rvt_invalidate_rkey(
+ qp, wr->ex.invalidate_rkey);
+ if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ reserved_op = rdi->post_parms[wr->opcode].flags &
+ RVT_OPERATION_USE_RESERVE;
+ /* check for avail */
+ ret = rvt_qp_is_avail(qp, rdi, reserved_op);
+ if (ret)
+ return ret;
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+
+ rkt = &rdi->lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.pd);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_head);
+
+ /* cplen has length from above */
+ memcpy(&wqe->ud_wr, wr, cplen);
+
+ wqe->length = 0;
+ j = 0;
+ if (wr->num_sge) {
+ struct rvt_sge *last_sge = NULL;
+
+ acc = wr->opcode >= IB_WR_RDMA_READ ?
+ IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 length = wr->sg_list[i].length;
+
+ if (length == 0)
+ continue;
+ ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
+ &wr->sg_list[i], acc);
+ if (unlikely(ret < 0))
+ goto bail_inval_free;
+ wqe->length += length;
+ if (ret)
+ last_sge = &wqe->sg_list[j];
+ j += ret;
+ }
+ wqe->wr.num_sge = j;
+ }
+
+ /*
+ * Calculate and set SWQE PSN values prior to handing it off
+ * to the driver's check routine. This give the driver the
+ * opportunity to adjust PSN values based on internal checks.
+ */
+ log_pmtu = qp->log_pmtu;
+ if (qp->allowed_ops == IB_OPCODE_UD) {
+ struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
+
+ log_pmtu = ah->log_pmtu;
+ rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
+ }
+
+ if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
+ if (local_ops_delayed)
+ atomic_inc(&qp->local_ops_pending);
+ else
+ wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
+ wqe->ssn = 0;
+ wqe->psn = 0;
+ wqe->lpsn = 0;
+ } else {
+ wqe->ssn = qp->s_ssn++;
+ wqe->psn = qp->s_next_psn;
+ wqe->lpsn = wqe->psn +
+ (wqe->length ?
+ ((wqe->length - 1) >> log_pmtu) :
+ 0);
+ }
+
+ /* general part of wqe valid - allow for driver checks */
+ if (rdi->driver_f.setup_wqe) {
+ ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
+ if (ret < 0)
+ goto bail_inval_free_ref;
+ }
+
+ if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
+ qp->s_next_psn = wqe->lpsn + 1;
+
+ if (unlikely(reserved_op)) {
+ wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
+ rvt_qp_wqe_reserve(qp, wqe);
+ } else {
+ wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
+ qp->s_avail--;
+ }
+ trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
+ smp_wmb(); /* see request builders */
+ qp->s_head = next;
+
+ return 0;
+
+bail_inval_free_ref:
+ if (qp->allowed_ops == IB_OPCODE_UD)
+ rdma_destroy_ah_attr(wqe->ud_wr.attr);
+bail_inval_free:
+ /* release mr holds */
+ while (j) {
+ struct rvt_sge *sge = &wqe->sg_list[--j];
+
+ rvt_put_mr(sge->mr);
+ }
+ return ret;
+}
+
+/**
+ * rvt_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success else errno
+ */
+int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ unsigned long flags = 0;
+ bool call_send;
+ unsigned nreq = 0;
+ int err = 0;
+
+ spin_lock_irqsave(&qp->s_hlock, flags);
+
+ /*
+ * Ensure QP state is such that we can send. If not bail out early,
+ * there is no need to do this every time we post a send.
+ */
+ if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
+ spin_unlock_irqrestore(&qp->s_hlock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * If the send queue is empty, and we only have a single WR then just go
+ * ahead and kick the send engine into gear. Otherwise we will always
+ * just schedule the send to happen later.
+ */
+ call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
+
+ for (; wr; wr = wr->next) {
+ err = rvt_post_one_wr(qp, wr, &call_send);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto bail;
+ }
+ nreq++;
+ }
+bail:
+ spin_unlock_irqrestore(&qp->s_hlock, flags);
+ if (nreq) {
+ /*
+ * Only call do_send if there is exactly one packet, and the
+ * driver said it was ok.
+ */
+ if (nreq == 1 && call_send)
+ rdi->driver_f.do_send(qp);
+ else
+ rdi->driver_f.schedule_send_no_lock(qp);
+ }
+ return err;
+}
+
+/**
+ * rvt_post_srq_recv - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success else errno
+ */
+int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_krwq *wq;
+ unsigned long flags;
+
+ for (; wr; wr = wr->next) {
+ struct rvt_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned)wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
+ wq = srq->rq.kwq;
+ next = wq->head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == READ_ONCE(wq->tail)) {
+ spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ wqe->sg_list[i].addr = wr->sg_list[i].addr;
+ wqe->sg_list[i].length = wr->sg_list[i].length;
+ wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
+ }
+ /* Make sure queue entry is written before the head index. */
+ smp_store_release(&wq->head, next);
+ spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * rvt used the internal kernel struct as part of its ABI, for now make sure
+ * the kernel struct does not change layout. FIXME: rvt should never cast the
+ * user struct to a kernel struct.
+ */
+static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
+{
+ BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
+ offsetof(struct rvt_wqe_sge, addr));
+ BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
+ offsetof(struct rvt_wqe_sge, length));
+ BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
+ offsetof(struct rvt_wqe_sge, lkey));
+ return (struct ib_sge *)sge;
+}
+
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_sge_state *ss;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ rkt = &rdi->lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ NULL, rvt_cast_sge(&wqe->sg_list[i]),
+ IB_ACCESS_LOCAL_WRITE);
+ if (unlikely(ret <= 0))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ return 1;
+
+bad_lkey:
+ while (j) {
+ struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ rvt_put_mr(sge->mr);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ return 0;
+}
+
+/**
+ * get_rvt_head - get head indices of the circular buffer
+ * @rq: data structure for request queue entry
+ * @ip: the QP
+ *
+ * Return - head index value
+ */
+static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
+{
+ u32 head;
+
+ if (ip)
+ head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
+ else
+ head = rq->kwq->head;
+
+ return head;
+}
+
+/**
+ * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
+{
+ unsigned long flags;
+ struct rvt_rq *rq;
+ struct rvt_krwq *kwq = NULL;
+ struct rvt_rwq *wq;
+ struct rvt_srq *srq;
+ struct rvt_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ u32 head;
+ int ret;
+ void *ip = NULL;
+
+ if (qp->ibqp.srq) {
+ srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ ip = srq->ip;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ ip = qp->ip;
+ }
+
+ spin_lock_irqsave(&rq->kwq->c_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+ kwq = rq->kwq;
+ if (ip) {
+ wq = rq->wq;
+ tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
+ } else {
+ tail = kwq->tail;
+ }
+
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+
+ if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
+ head = get_rvt_head(rq, ip);
+ kwq->count = rvt_get_rq_count(rq, head, tail);
+ }
+ if (unlikely(kwq->count == 0)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after the count is read. */
+ smp_rmb();
+ wqe = rvt_get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ if (ip)
+ RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
+ else
+ kwq->tail = tail;
+ if (!wr_id_only && !init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ kwq->count--;
+ ret = 1;
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ if (kwq->count < srq->limit) {
+ kwq->count =
+ rvt_get_rq_count(rq,
+ get_rvt_head(rq, ip), tail);
+ if (kwq->count < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rvt_get_rwqe);
+
+/**
+ * rvt_comm_est - handle trap with QP established
+ * @qp: the QP
+ */
+void rvt_comm_est(struct rvt_qp *qp)
+{
+ qp->r_flags |= RVT_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+EXPORT_SYMBOL(rvt_comm_est);
+
+void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
+{
+ unsigned long flags;
+ int lastwqe;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ lastwqe = rvt_error_qp(qp, err);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+EXPORT_SYMBOL(rvt_rc_error);
+
+/*
+ * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
+ * @index - the index
+ * return usec from an index into ib_rvt_rnr_table
+ */
+unsigned long rvt_rnr_tbl_to_usec(u32 index)
+{
+ return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
+}
+EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
+
+static inline unsigned long rvt_aeth_to_usec(u32 aeth)
+{
+ return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
+ IB_AETH_CREDIT_MASK];
+}
+
+/*
+ * rvt_add_retry_timer_ext - add/start a retry timer
+ * @qp - the QP
+ * @shift - timeout shift to wait for multiple packets
+ * add a retry timer on the QP
+ */
+void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ lockdep_assert_held(&qp->s_lock);
+ qp->s_flags |= RVT_S_TIMER;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ qp->s_timer.expires = jiffies + rdi->busy_jiffies +
+ (qp->timeout_jiffies << shift);
+ add_timer(&qp->s_timer);
+}
+EXPORT_SYMBOL(rvt_add_retry_timer_ext);
+
+/**
+ * rvt_add_rnr_timer - add/start an rnr timer on the QP
+ * @qp: the QP
+ * @aeth: aeth of RNR timeout, simulated aeth for loopback
+ */
+void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
+{
+ u32 to;
+
+ lockdep_assert_held(&qp->s_lock);
+ qp->s_flags |= RVT_S_WAIT_RNR;
+ to = rvt_aeth_to_usec(aeth);
+ trace_rvt_rnrnak_add(qp, to);
+ hrtimer_start(&qp->s_rnr_timer,
+ ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
+}
+EXPORT_SYMBOL(rvt_add_rnr_timer);
+
+/**
+ * rvt_stop_rc_timers - stop all timers
+ * @qp: the QP
+ * stop any pending timers
+ */
+void rvt_stop_rc_timers(struct rvt_qp *qp)
+{
+ lockdep_assert_held(&qp->s_lock);
+ /* Remove QP from all timers */
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
+ timer_delete(&qp->s_timer);
+ hrtimer_try_to_cancel(&qp->s_rnr_timer);
+ }
+}
+EXPORT_SYMBOL(rvt_stop_rc_timers);
+
+/**
+ * rvt_stop_rnr_timer - stop an rnr timer
+ * @qp: the QP
+ *
+ * stop an rnr timer and return if the timer
+ * had been pending.
+ */
+static void rvt_stop_rnr_timer(struct rvt_qp *qp)
+{
+ lockdep_assert_held(&qp->s_lock);
+ /* Remove QP from rnr timer */
+ if (qp->s_flags & RVT_S_WAIT_RNR) {
+ qp->s_flags &= ~RVT_S_WAIT_RNR;
+ trace_rvt_rnrnak_stop(qp, 0);
+ }
+}
+
+/**
+ * rvt_del_timers_sync - wait for any timeout routines to exit
+ * @qp: the QP
+ */
+void rvt_del_timers_sync(struct rvt_qp *qp)
+{
+ timer_delete_sync(&qp->s_timer);
+ hrtimer_cancel(&qp->s_rnr_timer);
+}
+EXPORT_SYMBOL(rvt_del_timers_sync);
+
+/*
+ * This is called from s_timer for missing responses.
+ */
+static void rvt_rc_timeout(struct timer_list *t)
+{
+ struct rvt_qp *qp = timer_container_of(qp, t, s_timer);
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
+ if (qp->s_flags & RVT_S_TIMER) {
+ struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
+
+ qp->s_flags &= ~RVT_S_TIMER;
+ rvp->n_rc_timeouts++;
+ timer_delete(&qp->s_timer);
+ trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
+ if (rdi->driver_f.notify_restart_rc)
+ rdi->driver_f.notify_restart_rc(qp,
+ qp->s_last_psn + 1,
+ 1);
+ rdi->driver_f.schedule_send(qp);
+ }
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+}
+
+/*
+ * This is called from s_timer for RNR timeouts.
+ */
+enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
+{
+ struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ rvt_stop_rnr_timer(qp);
+ trace_rvt_rnrnak_timeout(qp, 0);
+ rdi->driver_f.schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return HRTIMER_NORESTART;
+}
+EXPORT_SYMBOL(rvt_rc_rnr_retry);
+
+/**
+ * rvt_qp_iter_init - initial for QP iteration
+ * @rdi: rvt devinfo
+ * @v: u64 value
+ * @cb: user-defined callback
+ *
+ * This returns an iterator suitable for iterating QPs
+ * in the system.
+ *
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
+ * @cb. An example use case would be to alter QP processing
+ * based on criteria not part of the rvt_qp.
+ *
+ * Use cases that require memory allocation to succeed
+ * must preallocate appropriately.
+ *
+ * Return: a pointer to an rvt_qp_iter or NULL
+ */
+struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
+ u64 v,
+ void (*cb)(struct rvt_qp *qp, u64 v))
+{
+ struct rvt_qp_iter *i;
+
+ i = kzalloc(sizeof(*i), GFP_KERNEL);
+ if (!i)
+ return NULL;
+
+ i->rdi = rdi;
+ /* number of special QPs (SMI/GSI) for device */
+ i->specials = rdi->ibdev.phys_port_cnt * 2;
+ i->v = v;
+ i->cb = cb;
+
+ return i;
+}
+EXPORT_SYMBOL(rvt_qp_iter_init);
+
+/**
+ * rvt_qp_iter_next - return the next QP in iter
+ * @iter: the iterator
+ *
+ * Fine grained QP iterator suitable for use
+ * with debugfs seq_file mechanisms.
+ *
+ * Updates iter->qp with the current QP when the return
+ * value is 0.
+ *
+ * Return: 0 - iter->qp is valid 1 - no more QPs
+ */
+int rvt_qp_iter_next(struct rvt_qp_iter *iter)
+ __must_hold(RCU)
+{
+ int n = iter->n;
+ int ret = 1;
+ struct rvt_qp *pqp = iter->qp;
+ struct rvt_qp *qp;
+ struct rvt_dev_info *rdi = iter->rdi;
+
+ /*
+ * The approach is to consider the special qps
+ * as additional table entries before the
+ * real hash table. Since the qp code sets
+ * the qp->next hash link to NULL, this works just fine.
+ *
+ * iter->specials is 2 * # ports
+ *
+ * n = 0..iter->specials is the special qp indices
+ *
+ * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
+ * the potential hash bucket entries
+ *
+ */
+ for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
+ if (pqp) {
+ qp = rcu_dereference(pqp->next);
+ } else {
+ if (n < iter->specials) {
+ struct rvt_ibport *rvp;
+ int pidx;
+
+ pidx = n % rdi->ibdev.phys_port_cnt;
+ rvp = rdi->ports[pidx];
+ qp = rcu_dereference(rvp->qp[n & 1]);
+ } else {
+ qp = rcu_dereference(
+ rdi->qp_dev->qp_table[
+ (n - iter->specials)]);
+ }
+ }
+ pqp = qp;
+ if (qp) {
+ iter->qp = qp;
+ iter->n = n;
+ return 0;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rvt_qp_iter_next);
+
+/**
+ * rvt_qp_iter - iterate all QPs
+ * @rdi: rvt devinfo
+ * @v: a 64-bit value
+ * @cb: a callback
+ *
+ * This provides a way for iterating all QPs.
+ *
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
+ * cb. An example use case would be to alter QP processing
+ * based on criteria not part of the rvt_qp.
+ *
+ * The code has an internal iterator to simplify
+ * non seq_file use cases.
+ */
+void rvt_qp_iter(struct rvt_dev_info *rdi,
+ u64 v,
+ void (*cb)(struct rvt_qp *qp, u64 v))
+{
+ int ret;
+ struct rvt_qp_iter i = {
+ .rdi = rdi,
+ .specials = rdi->ibdev.phys_port_cnt * 2,
+ .v = v,
+ .cb = cb
+ };
+
+ rcu_read_lock();
+ do {
+ ret = rvt_qp_iter_next(&i);
+ if (!ret) {
+ rvt_get_qp(i.qp);
+ rcu_read_unlock();
+ i.cb(i.qp, i.v);
+ rcu_read_lock();
+ rvt_put_qp(i.qp);
+ }
+ } while (!ret);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(rvt_qp_iter);
+
+/*
+ * This should be called with s_lock and r_lock held.
+ */
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ struct rvt_dev_info *rdi;
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ return;
+ rdi = ib_to_rvt(qp->ibqp.device);
+
+ old_last = qp->s_last;
+ trace_rvt_qp_send_completion(qp, wqe, old_last);
+ last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
+ status);
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
+EXPORT_SYMBOL(rvt_send_complete);
+
+/**
+ * rvt_copy_sge - copy data to SGE memory
+ * @qp: associated QP
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ * @release: boolean to release MR
+ * @copy_last: do a separate copy of the last 8 bytes
+ */
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
+ void *data, u32 length,
+ bool release, bool copy_last)
+{
+ struct rvt_sge *sge = &ss->sge;
+ int i;
+ bool in_last = false;
+ bool cacheless_copy = false;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ struct rvt_wss *wss = rdi->wss;
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+
+ if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
+ cacheless_copy = length >= PAGE_SIZE;
+ } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
+ if (length >= PAGE_SIZE) {
+ /*
+ * NOTE: this *assumes*:
+ * o The first vaddr is the dest.
+ * o If multiple pages, then vaddr is sequential.
+ */
+ wss_insert(wss, sge->vaddr);
+ if (length >= (2 * PAGE_SIZE))
+ wss_insert(wss, (sge->vaddr + PAGE_SIZE));
+
+ cacheless_copy = wss_exceeds_threshold(wss);
+ } else {
+ wss_advance_clean_counter(wss);
+ }
+ }
+
+ if (copy_last) {
+ if (length > 8) {
+ length -= 8;
+ } else {
+ copy_last = false;
+ in_last = true;
+ }
+ }
+
+again:
+ while (length) {
+ u32 len = rvt_get_sge_length(sge, length);
+
+ WARN_ON_ONCE(len == 0);
+ if (unlikely(in_last)) {
+ /* enforce byte transfer ordering */
+ for (i = 0; i < len; i++)
+ ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
+ } else if (cacheless_copy) {
+ cacheless_memcpy(sge->vaddr, data, len);
+ } else {
+ memcpy(sge->vaddr, data, len);
+ }
+ rvt_update_sge(ss, len, release);
+ data += len;
+ length -= len;
+ }
+
+ if (copy_last) {
+ copy_last = false;
+ in_last = true;
+ length = 8;
+ goto again;
+ }
+}
+EXPORT_SYMBOL(rvt_copy_sge);
+
+static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
+ struct rvt_qp *sqp)
+{
+ rvp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ return sqp->ibqp.qp_type == IB_QPT_RC ?
+ IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
+}
+
+/**
+ * rvt_ruc_loopback - handle UC and RC loopback requests
+ * @sqp: the sending QP
+ *
+ * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
+ * Note that although we are single threaded due to the send engine, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+void rvt_ruc_loopback(struct rvt_qp *sqp)
+{
+ struct rvt_ibport *rvp = NULL;
+ struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ bool release;
+ int ret;
+ bool copy_last = false;
+ int local_ops = 0;
+
+ rcu_read_lock();
+ rvp = rdi->ports[sqp->port_num - 1];
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+
+ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
+ sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= RVT_S_BUSY;
+
+again:
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
+ goto clr_busy;
+ wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work request. */
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp) {
+ send_status = loopback_qp_drop(rvp, sqp);
+ goto serr_no_r_lock;
+ }
+ spin_lock_irqsave(&qp->r_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ send_status = loopback_qp_drop(rvp, sqp);
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof(wc));
+ send_status = IB_WC_SUCCESS;
+
+ release = true;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_REG_MR:
+ goto send_comp;
+
+ case IB_WR_LOCAL_INV:
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ if (rvt_invalidate_rkey(sqp,
+ wqe->wr.ex.invalidate_rkey))
+ send_status = IB_WC_LOC_PROT_ERR;
+ local_ops = 1;
+ }
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND:
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ if (wqe->length > qp->r_len)
+ goto inv_err;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp,
+ wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey =
+ wqe->wr.ex.invalidate_rkey;
+ }
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = rvt_get_rwqe(qp, true);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* skip copy_last set and qp_access_flags recheck */
+ goto do_write;
+ case IB_WR_RDMA_WRITE:
+ copy_last = rvt_is_user_qp(qp);
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+do_write:
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = false;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->atomic_wr.remote_addr,
+ wqe->atomic_wr.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
+ sdata = wqe->atomic_wr.compare_add;
+ *(u64 *)sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64)atomic64_add_return(sdata, maddr) - sdata :
+ (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
+ sdata, wqe->atomic_wr.swap);
+ rvt_put_mr(qp->r_sge.sge.mr);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = rvt_get_sge_length(sge, sqp->s_len);
+
+ WARN_ON_ONCE(len == 0);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
+ len, release, copy_last);
+ rvt_update_sge(&sqp->s_sge, len, !release);
+ sqp->s_len -= len;
+ }
+ if (release)
+ rvt_put_ss(&qp->r_sge);
+
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ spin_lock(&sqp->r_lock);
+ rvt_send_complete(sqp, wqe, send_status);
+ spin_unlock(&sqp->r_lock);
+ if (local_ops) {
+ atomic_dec(&sqp->local_ops_pending);
+ local_ops = 0;
+ }
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ rvp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
+ goto clr_busy;
+ rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
+ IB_AETH_CREDIT_SHIFT);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status =
+ sqp->ibqp.qp_type == IB_QPT_RC ?
+ IB_WC_REM_INV_REQ_ERR :
+ IB_WC_SUCCESS;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ rvt_rc_error(qp, wc.status);
+
+serr:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+serr_no_r_lock:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ spin_lock(&sqp->r_lock);
+ rvt_send_complete(sqp, wqe, send_status);
+ spin_unlock(&sqp->r_lock);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe;
+
+ spin_lock(&sqp->r_lock);
+ lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&sqp->r_lock);
+
+ sqp->s_flags &= ~RVT_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~RVT_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(rvt_ruc_loopback);
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
new file mode 100644
index 000000000000..1a201d2bedd6
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RVTQP_H
+#define DEF_RVTQP_H
+
+#include <rdma/rdmavt_qp.h>
+
+int rvt_driver_qp_init(struct rvt_dev_info *rdi);
+void rvt_qp_exit(struct rvt_dev_info *rdi);
+int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int rvt_wss_init(struct rvt_dev_info *rdi);
+void rvt_wss_exit(struct rvt_dev_info *rdi);
+int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
+ struct ib_udata *udata);
+#endif /* DEF_RVTQP_H */
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
new file mode 100644
index 000000000000..7cd473302576
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#include <rdma/rdmavt_qp.h>
+#include <rdma/ib_hdrs.h>
+
+/*
+ * Convert the AETH credit code into the number of credits.
+ */
+static const u16 credit_table[31] = {
+ 0, /* 0 */
+ 1, /* 1 */
+ 2, /* 2 */
+ 3, /* 3 */
+ 4, /* 4 */
+ 6, /* 5 */
+ 8, /* 6 */
+ 12, /* 7 */
+ 16, /* 8 */
+ 24, /* 9 */
+ 32, /* A */
+ 48, /* B */
+ 64, /* C */
+ 96, /* D */
+ 128, /* E */
+ 192, /* F */
+ 256, /* 10 */
+ 384, /* 11 */
+ 512, /* 12 */
+ 768, /* 13 */
+ 1024, /* 14 */
+ 1536, /* 15 */
+ 2048, /* 16 */
+ 3072, /* 17 */
+ 4096, /* 18 */
+ 6144, /* 19 */
+ 8192, /* 1A */
+ 12288, /* 1B */
+ 16384, /* 1C */
+ 24576, /* 1D */
+ 32768 /* 1E */
+};
+
+/**
+ * rvt_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ */
+__be32 rvt_compute_aeth(struct rvt_qp *qp)
+{
+ u32 aeth = qp->r_msn & IB_MSN_MASK;
+
+ if (qp->ibqp.srq) {
+ /*
+ * Shared receive queues don't generate credits.
+ * Set the credit field to the invalid value.
+ */
+ aeth |= IB_AETH_CREDIT_INVAL << IB_AETH_CREDIT_SHIFT;
+ } else {
+ u32 min, max, x;
+ u32 credits;
+ u32 head;
+ u32 tail;
+
+ credits = READ_ONCE(qp->r_rq.kwq->count);
+ if (credits == 0) {
+ /* sanity check pointers before trusting them */
+ if (qp->ip) {
+ head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head);
+ tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail);
+ } else {
+ head = READ_ONCE(qp->r_rq.kwq->head);
+ tail = READ_ONCE(qp->r_rq.kwq->tail);
+ }
+ if (head >= qp->r_rq.size)
+ head = 0;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ /*
+ * Compute the number of credits available (RWQEs).
+ * There is a small chance that the pair of reads are
+ * not atomic, which is OK, since the fuzziness is
+ * resolved as further ACKs go out.
+ */
+ credits = rvt_get_rq_count(&qp->r_rq, head, tail);
+ }
+ /*
+ * Binary search the credit table to find the code to
+ * use.
+ */
+ min = 0;
+ max = 31;
+ for (;;) {
+ x = (min + max) / 2;
+ if (credit_table[x] == credits)
+ break;
+ if (credit_table[x] > credits) {
+ max = x;
+ } else {
+ if (min == x)
+ break;
+ min = x;
+ }
+ }
+ aeth |= x << IB_AETH_CREDIT_SHIFT;
+ }
+ return cpu_to_be32(aeth);
+}
+EXPORT_SYMBOL(rvt_compute_aeth);
+
+/**
+ * rvt_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ u32 credit = (aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK;
+
+ lockdep_assert_held(&qp->s_lock);
+ /*
+ * If the credit is invalid, we can send
+ * as many packets as we like. Otherwise, we have to
+ * honor the credit field.
+ */
+ if (credit == IB_AETH_CREDIT_INVAL) {
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
+ rdi->driver_f.schedule_send(qp);
+ }
+ }
+ } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
+ /* Compute new LSN (i.e., MSN + credit) */
+ credit = (aeth + credit_table[credit]) & IB_MSN_MASK;
+ if (rvt_cmp_msn(credit, qp->s_lsn) > 0) {
+ qp->s_lsn = credit;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
+ rdi->driver_f.schedule_send(qp);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(rvt_get_credit);
+
+/**
+ * rvt_restart_sge - rewind the sge state for a wqe
+ * @ss: the sge state pointer
+ * @wqe: the wqe to rewind
+ * @len: the data length from the start of the wqe in bytes
+ *
+ * Returns the remaining data length.
+ */
+u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
+{
+ ss->sge = wqe->sg_list[0];
+ ss->sg_list = wqe->sg_list + 1;
+ ss->num_sge = wqe->wr.num_sge;
+ ss->total_len = wqe->length;
+ rvt_skip_sge(ss, len, false);
+ return wqe->length - len;
+}
+EXPORT_SYMBOL(rvt_restart_sge);
+
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
new file mode 100644
index 000000000000..fe125bf85b27
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "srq.h"
+#include "vt.h"
+#include "qp.h"
+/**
+ * rvt_driver_srq_init - init srq resources on a per driver basis
+ * @rdi: rvt dev structure
+ *
+ * Do any initialization needed when a driver registers with rdmavt.
+ */
+void rvt_driver_srq_init(struct rvt_dev_info *rdi)
+{
+ spin_lock_init(&rdi->n_srqs_lock);
+ rdi->n_srqs_allocated = 0;
+}
+
+/**
+ * rvt_create_srq - create a shared receive queue
+ * @ibsrq: the protection domain of the SRQ to create
+ * @srq_init_attr: the attributes of the SRQ
+ * @udata: data from libibverbs when creating a user SRQ
+ *
+ * Return: 0 on success
+ */
+int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ u32 sz;
+ int ret;
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ if (srq_init_attr->attr.max_sge == 0 ||
+ srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
+ srq_init_attr->attr.max_wr == 0 ||
+ srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
+ return -EINVAL;
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ srq->rq.size = srq_init_attr->attr.max_wr + 1;
+ srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ sz = sizeof(struct ib_sge) * srq->rq.max_sge +
+ sizeof(struct rvt_rwqe);
+ if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
+ dev->dparms.node, udata)) {
+ ret = -ENOMEM;
+ goto bail_srq;
+ }
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See rvt_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
+
+ srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
+ if (IS_ERR(srq->ip)) {
+ ret = PTR_ERR(srq->ip);
+ goto bail_wq;
+ }
+
+ ret = ib_copy_to_udata(udata, &srq->ip->offset,
+ sizeof(srq->ip->offset));
+ if (ret)
+ goto bail_ip;
+ }
+
+ /*
+ * ib_create_srq() will initialize srq->ibsrq.
+ */
+ spin_lock_init(&srq->rq.lock);
+ srq->limit = srq_init_attr->attr.srq_limit;
+
+ spin_lock(&dev->n_srqs_lock);
+ if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
+ spin_unlock(&dev->n_srqs_lock);
+ ret = -ENOMEM;
+ goto bail_ip;
+ }
+
+ dev->n_srqs_allocated++;
+ spin_unlock(&dev->n_srqs_lock);
+
+ if (srq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ return 0;
+
+bail_ip:
+ kfree(srq->ip);
+bail_wq:
+ rvt_free_rq(&srq->rq);
+bail_srq:
+ return ret;
+}
+
+/**
+ * rvt_modify_srq - modify a shared receive queue
+ * @ibsrq: the SRQ to modify
+ * @attr: the new attributes of the SRQ
+ * @attr_mask: indicates which attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Return: 0 on success
+ */
+int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
+ struct rvt_rq tmp_rq = {};
+ int ret = 0;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ struct rvt_krwq *okwq = NULL;
+ struct rvt_rwq *owq = NULL;
+ struct rvt_rwqe *p;
+ u32 sz, size, n, head, tail;
+
+ /* Check that the requested sizes are below the limits. */
+ if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
+ ((attr_mask & IB_SRQ_LIMIT) ?
+ attr->srq_limit : srq->limit) > attr->max_wr)
+ return -EINVAL;
+ sz = sizeof(struct rvt_rwqe) +
+ srq->rq.max_sge * sizeof(struct ib_sge);
+ size = attr->max_wr + 1;
+ if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
+ udata))
+ return -ENOMEM;
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 offset_addr;
+ __u64 offset = 0;
+
+ ret = ib_copy_from_udata(&offset_addr, udata,
+ sizeof(offset_addr));
+ if (ret)
+ goto bail_free;
+ udata->outbuf = (void __user *)
+ (unsigned long)offset_addr;
+ ret = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&srq->rq.kwq->c_lock);
+ /*
+ * validate head and tail pointer values and compute
+ * the number of remaining WQEs.
+ */
+ if (udata) {
+ owq = srq->rq.wq;
+ head = RDMA_READ_UAPI_ATOMIC(owq->head);
+ tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
+ } else {
+ okwq = srq->rq.kwq;
+ head = okwq->head;
+ tail = okwq->tail;
+ }
+ if (head >= srq->rq.size || tail >= srq->rq.size) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = head;
+ if (n < tail)
+ n += srq->rq.size - tail;
+ else
+ n -= tail;
+ if (size <= n) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = 0;
+ p = tmp_rq.kwq->curr_wq;
+ while (tail != head) {
+ struct rvt_rwqe *wqe;
+ int i;
+
+ wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
+ p->wr_id = wqe->wr_id;
+ p->num_sge = wqe->num_sge;
+ for (i = 0; i < wqe->num_sge; i++)
+ p->sg_list[i] = wqe->sg_list[i];
+ n++;
+ p = (struct rvt_rwqe *)((char *)p + sz);
+ if (++tail >= srq->rq.size)
+ tail = 0;
+ }
+ srq->rq.kwq = tmp_rq.kwq;
+ if (udata) {
+ srq->rq.wq = tmp_rq.wq;
+ RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
+ RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
+ } else {
+ tmp_rq.kwq->head = n;
+ tmp_rq.kwq->tail = 0;
+ }
+ srq->rq.size = size;
+ if (attr_mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.kwq->c_lock);
+
+ vfree(owq);
+ kvfree(okwq);
+
+ if (srq->ip) {
+ struct rvt_mmap_info *ip = srq->ip;
+ struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
+ u32 s = sizeof(struct rvt_rwq) + size * sz;
+
+ rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
+
+ /*
+ * Return the offset to mmap.
+ * See rvt_mmap() for details.
+ */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Put user mapping info onto the pending list
+ * unless it already is on the list.
+ */
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps,
+ &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ } else if (attr_mask & IB_SRQ_LIMIT) {
+ spin_lock_irq(&srq->rq.kwq->c_lock);
+ if (attr->srq_limit >= srq->rq.size)
+ ret = -EINVAL;
+ else
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.kwq->c_lock);
+ }
+ return ret;
+
+bail_unlock:
+ spin_unlock_irq(&srq->rq.kwq->c_lock);
+bail_free:
+ rvt_free_rq(&tmp_rq);
+ return ret;
+}
+
+/**
+ * rvt_query_srq - query srq data
+ * @ibsrq: srq to query
+ * @attr: return info in attr
+ *
+ * Return: always 0
+ */
+int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+
+ attr->max_wr = srq->rq.size - 1;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+/**
+ * rvt_destroy_srq - destory an srq
+ * @ibsrq: srq object to destroy
+ * @udata: user data for libibverbs.so
+ */
+int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
+
+ spin_lock(&dev->n_srqs_lock);
+ dev->n_srqs_allocated--;
+ spin_unlock(&dev->n_srqs_lock);
+ if (srq->ip)
+ kref_put(&srq->ip->ref, rvt_release_mmap_info);
+ kvfree(srq->rq.kwq);
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h
new file mode 100644
index 000000000000..e654a9fa2989
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/srq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RVTSRQ_H
+#define DEF_RVTSRQ_H
+
+#include <rdma/rdma_vt.h>
+void rvt_driver_srq_init(struct rvt_dev_info *rdi);
+int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
+int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+
+#endif /* DEF_RVTSRQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/trace.c b/drivers/infiniband/sw/rdmavt/trace.c
new file mode 100644
index 000000000000..e31b9f3e752d
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
new file mode 100644
index 000000000000..bdb6b9326b64
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016, 2017 Intel Corporation.
+ */
+
+#define RDI_DEV_ENTRY(rdi) __string(dev, rvt_get_ibdev_name(rdi))
+#define RDI_DEV_ASSIGN(rdi) __assign_str(dev)
+
+#include "trace_rvt.h"
+#include "trace_qp.h"
+#include "trace_tx.h"
+#include "trace_mr.h"
+#include "trace_cq.h"
+#include "trace_rc.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_cq.h b/drivers/infiniband/sw/rdmavt/trace_cq.h
new file mode 100644
index 000000000000..54ce06e10b7f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 - 2018 Intel Corporation.
+ */
+#if !defined(__RVT_TRACE_CQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_CQ_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_cq.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_cq
+
+#define wc_opcode_name(opcode) { IB_WC_##opcode, #opcode }
+#define show_wc_opcode(opcode) \
+__print_symbolic(opcode, \
+ wc_opcode_name(SEND), \
+ wc_opcode_name(RDMA_WRITE), \
+ wc_opcode_name(RDMA_READ), \
+ wc_opcode_name(COMP_SWAP), \
+ wc_opcode_name(FETCH_ADD), \
+ wc_opcode_name(LSO), \
+ wc_opcode_name(LOCAL_INV), \
+ wc_opcode_name(REG_MR), \
+ wc_opcode_name(MASKED_COMP_SWAP), \
+ wc_opcode_name(RECV), \
+ wc_opcode_name(RECV_RDMA_WITH_IMM))
+
+#define CQ_ATTR_PRINT \
+"[%s] user cq %s cqe %u comp_vector %d comp_vector_cpu %d flags %x"
+
+DECLARE_EVENT_CLASS(rvt_cq_template,
+ TP_PROTO(struct rvt_cq *cq,
+ const struct ib_cq_init_attr *attr),
+ TP_ARGS(cq, attr),
+ TP_STRUCT__entry(RDI_DEV_ENTRY(cq->rdi)
+ __field(struct rvt_mmap_info *, ip)
+ __field(unsigned int, cqe)
+ __field(int, comp_vector)
+ __field(int, comp_vector_cpu)
+ __field(u32, flags)
+ ),
+ TP_fast_assign(RDI_DEV_ASSIGN(cq->rdi);
+ __entry->ip = cq->ip;
+ __entry->cqe = attr->cqe;
+ __entry->comp_vector = attr->comp_vector;
+ __entry->comp_vector_cpu =
+ cq->comp_vector_cpu;
+ __entry->flags = attr->flags;
+ ),
+ TP_printk(CQ_ATTR_PRINT, __get_str(dev),
+ __entry->ip ? "true" : "false", __entry->cqe,
+ __entry->comp_vector, __entry->comp_vector_cpu,
+ __entry->flags
+ )
+);
+
+DEFINE_EVENT(rvt_cq_template, rvt_create_cq,
+ TP_PROTO(struct rvt_cq *cq, const struct ib_cq_init_attr *attr),
+ TP_ARGS(cq, attr));
+
+#define CQ_PRN \
+"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x flags %x imm %x"
+
+DECLARE_EVENT_CLASS(
+ rvt_cq_entry_template,
+ TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx),
+ TP_ARGS(cq, wc, idx),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(cq->rdi)
+ __field(u64, wr_id)
+ __field(u32, status)
+ __field(u32, opcode)
+ __field(u32, qpn)
+ __field(u32, length)
+ __field(u32, idx)
+ __field(u32, flags)
+ __field(u32, imm)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(cq->rdi);
+ __entry->wr_id = wc->wr_id;
+ __entry->status = wc->status;
+ __entry->opcode = wc->opcode;
+ __entry->length = wc->byte_len;
+ __entry->qpn = wc->qp->qp_num;
+ __entry->idx = idx;
+ __entry->flags = wc->wc_flags;
+ __entry->imm = be32_to_cpu(wc->ex.imm_data);
+ ),
+ TP_printk(
+ CQ_PRN,
+ __get_str(dev),
+ __entry->idx,
+ __entry->wr_id,
+ __entry->status,
+ __entry->opcode, show_wc_opcode(__entry->opcode),
+ __entry->length,
+ __entry->qpn,
+ __entry->flags,
+ __entry->imm
+ )
+);
+
+DEFINE_EVENT(
+ rvt_cq_entry_template, rvt_cq_enter,
+ TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx),
+ TP_ARGS(cq, wc, idx));
+
+DEFINE_EVENT(
+ rvt_cq_entry_template, rvt_cq_poll,
+ TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx),
+ TP_ARGS(cq, wc, idx));
+
+#endif /* __RVT_TRACE_CQ_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_cq
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h
new file mode 100644
index 000000000000..0cb8e0a0565e
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_mr.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+#if !defined(__RVT_TRACE_MR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_MR_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_mr.h>
+
+#include "mr.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_mr
+DECLARE_EVENT_CLASS(
+ rvt_mr_template,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(mr->pd->device))
+ __field(void *, vaddr)
+ __field(struct page *, page)
+ __field(u64, iova)
+ __field(u64, user_base)
+ __field(size_t, len)
+ __field(size_t, length)
+ __field(u32, lkey)
+ __field(u32, offset)
+ __field(u16, m)
+ __field(u16, n)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(mr->pd->device));
+ __entry->vaddr = v;
+ __entry->page = virt_to_page(v);
+ __entry->iova = mr->iova;
+ __entry->user_base = mr->user_base;
+ __entry->lkey = mr->lkey;
+ __entry->m = m;
+ __entry->n = n;
+ __entry->len = len;
+ __entry->length = mr->length;
+ __entry->offset = mr->offset;
+ ),
+ TP_printk(
+ "[%s] lkey %x iova %llx user_base %llx mr_len %lu vaddr %llx page %p m %u n %u len %lu off %u",
+ __get_str(dev),
+ __entry->lkey,
+ __entry->iova,
+ __entry->user_base,
+ __entry->length,
+ (unsigned long long)__entry->vaddr,
+ __entry->page,
+ __entry->m,
+ __entry->n,
+ __entry->len,
+ __entry->offset
+ )
+);
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_page_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_fmr_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_user_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+DECLARE_EVENT_CLASS(
+ rvt_sge_template,
+ TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge),
+ TP_ARGS(sge, isge),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(sge->mr->pd->device))
+ __field(struct rvt_mregion *, mr)
+ __field(struct rvt_sge *, sge)
+ __field(struct ib_sge *, isge)
+ __field(void *, vaddr)
+ __field(u64, ivaddr)
+ __field(u32, lkey)
+ __field(u32, sge_length)
+ __field(u32, length)
+ __field(u32, ilength)
+ __field(int, user)
+ __field(u16, m)
+ __field(u16, n)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(sge->mr->pd->device));
+ __entry->mr = sge->mr;
+ __entry->sge = sge;
+ __entry->isge = isge;
+ __entry->vaddr = sge->vaddr;
+ __entry->ivaddr = isge->addr;
+ __entry->lkey = sge->mr->lkey;
+ __entry->sge_length = sge->sge_length;
+ __entry->length = sge->length;
+ __entry->ilength = isge->length;
+ __entry->m = sge->m;
+ __entry->n = sge->m;
+ __entry->user = ibpd_to_rvtpd(sge->mr->pd)->user;
+ ),
+ TP_printk(
+ "[%s] mr %p sge %p isge %p vaddr %p ivaddr %llx lkey %x sge_length %u length %u ilength %u m %u n %u user %u",
+ __get_str(dev),
+ __entry->mr,
+ __entry->sge,
+ __entry->isge,
+ __entry->vaddr,
+ __entry->ivaddr,
+ __entry->lkey,
+ __entry->sge_length,
+ __entry->length,
+ __entry->ilength,
+ __entry->m,
+ __entry->n,
+ __entry->user
+ )
+);
+
+DEFINE_EVENT(
+ rvt_sge_template, rvt_sge_adjacent,
+ TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge),
+ TP_ARGS(sge, isge));
+
+DEFINE_EVENT(
+ rvt_sge_template, rvt_sge_new,
+ TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge),
+ TP_ARGS(sge, isge));
+
+TRACE_EVENT(
+ rvt_map_mr_sg,
+ TP_PROTO(struct ib_mr *ibmr, int sg_nents, unsigned int *sg_offset),
+ TP_ARGS(ibmr, sg_nents, sg_offset),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(to_imr(ibmr)->mr.pd->device))
+ __field(u64, iova)
+ __field(u64, ibmr_iova)
+ __field(u64, user_base)
+ __field(u64, ibmr_length)
+ __field(int, sg_nents)
+ __field(uint, sg_offset)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(to_imr(ibmr)->mr.pd->device));
+ __entry->ibmr_iova = ibmr->iova;
+ __entry->iova = to_imr(ibmr)->mr.iova;
+ __entry->user_base = to_imr(ibmr)->mr.user_base;
+ __entry->ibmr_length = to_imr(ibmr)->mr.length;
+ __entry->sg_nents = sg_nents;
+ __entry->sg_offset = sg_offset ? *sg_offset : 0;
+ ),
+ TP_printk(
+ "[%s] ibmr_iova %llx iova %llx user_base %llx length %llx sg_nents %d sg_offset %u",
+ __get_str(dev),
+ __entry->ibmr_iova,
+ __entry->iova,
+ __entry->user_base,
+ __entry->ibmr_length,
+ __entry->sg_nents,
+ __entry->sg_offset
+ )
+);
+
+#endif /* __RVT_TRACE_MR_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_mr
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
new file mode 100644
index 000000000000..fa128f16ca3f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+#if !defined(__RVT_TRACE_QP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_QP_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_qp.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_qp
+
+DECLARE_EVENT_CLASS(rvt_qphash_template,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, bucket)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->bucket = bucket;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x bucket %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->bucket
+ )
+);
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+DECLARE_EVENT_CLASS(
+ rvt_rnrnak_template,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(void *, hrtimer)
+ __field(u32, s_flags)
+ __field(u32, to)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->hrtimer = &qp->s_rnr_timer;
+ __entry->s_flags = qp->s_flags;
+ __entry->to = to;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x hrtimer 0x%p s_flags 0x%x timeout %u us",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->hrtimer,
+ __entry->s_flags,
+ __entry->to
+ )
+);
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_add,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_stop,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+#endif /* __RVT_TRACE_QP_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_qp
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_rc.h b/drivers/infiniband/sw/rdmavt/trace_rc.h
new file mode 100644
index 000000000000..9919d66c17c3
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_rc.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ */
+#if !defined(__RVT_TRACE_RC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_RC_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_qp.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_rc
+
+DECLARE_EVENT_CLASS(rvt_rc_template,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, s_flags)
+ __field(u32, psn)
+ __field(u32, s_psn)
+ __field(u32, s_next_psn)
+ __field(u32, s_sending_psn)
+ __field(u32, s_sending_hpsn)
+ __field(u32, r_psn)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->psn = psn;
+ __entry->s_psn = qp->s_psn;
+ __entry->s_next_psn = qp->s_next_psn;
+ __entry->s_sending_psn = qp->s_sending_psn;
+ __entry->s_sending_hpsn = qp->s_sending_hpsn;
+ __entry->r_psn = qp->r_psn;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->s_flags,
+ __entry->psn,
+ __entry->s_psn,
+ __entry->s_next_psn,
+ __entry->s_sending_psn,
+ __entry->s_sending_hpsn,
+ __entry->r_psn
+ )
+);
+
+DEFINE_EVENT(rvt_rc_template, rvt_rc_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+#endif /* __RVT_TRACE_RC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rc
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/trace_rvt.h b/drivers/infiniband/sw/rdmavt/trace_rvt.h
new file mode 100644
index 000000000000..a00489e66ddf
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_rvt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+#if !defined(__RVT_TRACE_RVT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_RVT_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt
+
+TRACE_EVENT(rvt_dbg,
+ TP_PROTO(struct rvt_dev_info *rdi,
+ const char *msg),
+ TP_ARGS(rdi, msg),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(rdi)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(rdi);
+ __assign_str(msg);
+ ),
+ TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
+);
+
+#endif /* __RVT_TRACE_MISC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rvt
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
new file mode 100644
index 000000000000..dff18baa2765
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+#if !defined(__RVT_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_TX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_qp.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_tx
+
+#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode }
+#define show_wr_opcode(opcode) \
+__print_symbolic(opcode, \
+ wr_opcode_name(RDMA_WRITE), \
+ wr_opcode_name(RDMA_WRITE_WITH_IMM), \
+ wr_opcode_name(SEND), \
+ wr_opcode_name(SEND_WITH_IMM), \
+ wr_opcode_name(RDMA_READ), \
+ wr_opcode_name(ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(ATOMIC_FETCH_AND_ADD), \
+ wr_opcode_name(LSO), \
+ wr_opcode_name(SEND_WITH_INV), \
+ wr_opcode_name(RDMA_READ_WITH_INV), \
+ wr_opcode_name(LOCAL_INV), \
+ wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD), \
+ wr_opcode_name(RESERVED1), \
+ wr_opcode_name(RESERVED2), \
+ wr_opcode_name(RESERVED3), \
+ wr_opcode_name(RESERVED4), \
+ wr_opcode_name(RESERVED5), \
+ wr_opcode_name(RESERVED6), \
+ wr_opcode_name(RESERVED7), \
+ wr_opcode_name(RESERVED8), \
+ wr_opcode_name(RESERVED9), \
+ wr_opcode_name(RESERVED10))
+
+#define POS_PRN \
+"[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u"
+
+TRACE_EVENT(
+ rvt_post_one_wr,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge),
+ TP_ARGS(qp, wqe, wr_num_sge),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u64, wr_id)
+ __field(struct rvt_swqe *, wqe)
+ __field(u32, qpn)
+ __field(u32, qpt)
+ __field(u32, psn)
+ __field(u32, lpsn)
+ __field(u32, length)
+ __field(u32, opcode)
+ __field(u32, size)
+ __field(u32, avail)
+ __field(u32, head)
+ __field(u32, last)
+ __field(u32, ssn)
+ __field(int, send_flags)
+ __field(pid_t, pid)
+ __field(int, num_sge)
+ __field(int, wr_num_sge)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
+ __entry->wqe = wqe;
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->qpt = qp->ibqp.qp_type;
+ __entry->psn = wqe->psn;
+ __entry->lpsn = wqe->lpsn;
+ __entry->length = wqe->length;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->size = qp->s_size;
+ __entry->avail = qp->s_avail;
+ __entry->head = qp->s_head;
+ __entry->last = qp->s_last;
+ __entry->pid = qp->pid;
+ __entry->ssn = wqe->ssn;
+ __entry->send_flags = wqe->wr.send_flags;
+ __entry->num_sge = wqe->wr.num_sge;
+ __entry->wr_num_sge = wr_num_sge;
+ ),
+ TP_printk(
+ POS_PRN,
+ __get_str(dev),
+ __entry->wqe,
+ __entry->wr_id,
+ __entry->send_flags,
+ __entry->qpn,
+ __entry->qpt,
+ __entry->psn,
+ __entry->lpsn,
+ __entry->ssn,
+ __entry->length,
+ __entry->opcode, show_wr_opcode(__entry->opcode),
+ __entry->size,
+ __entry->avail,
+ __entry->head,
+ __entry->last,
+ __entry->pid,
+ __entry->num_sge,
+ __entry->wr_num_sge
+ )
+);
+
+TRACE_EVENT(
+ rvt_qp_send_completion,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
+ TP_ARGS(qp, wqe, idx),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(struct rvt_swqe *, wqe)
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, qpt)
+ __field(u32, length)
+ __field(u32, idx)
+ __field(u32, ssn)
+ __field(enum ib_wr_opcode, opcode)
+ __field(int, send_flags)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
+ __entry->wqe = wqe;
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->qpt = qp->ibqp.qp_type;
+ __entry->length = wqe->length;
+ __entry->idx = idx;
+ __entry->ssn = wqe->ssn;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->send_flags = wqe->wr.send_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->qpt,
+ __entry->wqe,
+ __entry->idx,
+ __entry->wr_id,
+ __entry->length,
+ __entry->ssn,
+ __entry->opcode,
+ __entry->send_flags
+ )
+);
+#endif /* __RVT_TRACE_TX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tx
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
new file mode 100644
index 000000000000..d22d610c2696
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2016 - 2018 Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include "vt.h"
+#include "cq.h"
+#include "trace.h"
+
+#define RVT_UVERBS_ABI_VERSION 2
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("RDMA Verbs Transport Library");
+
+static int __init rvt_init(void)
+{
+ int ret = rvt_driver_cq_init();
+
+ if (ret)
+ pr_err("Error in driver CQ init.\n");
+
+ return ret;
+}
+module_init(rvt_init);
+
+static void __exit rvt_cleanup(void)
+{
+ rvt_cq_exit();
+}
+module_exit(rvt_cleanup);
+
+/**
+ * rvt_alloc_device - allocate rdi
+ * @size: how big of a structure to allocate
+ * @nports: number of ports to allocate array slots for
+ *
+ * Use IB core device alloc to allocate space for the rdi which is assumed to be
+ * inside of the ib_device. Any extra space that drivers require should be
+ * included in size.
+ *
+ * We also allocate a port array based on the number of ports.
+ *
+ * Return: pointer to allocated rdi
+ */
+struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
+{
+ struct rvt_dev_info *rdi;
+
+ rdi = container_of(_ib_alloc_device(size, &init_net), struct rvt_dev_info, ibdev);
+ if (!rdi)
+ return rdi;
+
+ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
+ if (!rdi->ports)
+ ib_dealloc_device(&rdi->ibdev);
+
+ return rdi;
+}
+EXPORT_SYMBOL(rvt_alloc_device);
+
+/**
+ * rvt_dealloc_device - deallocate rdi
+ * @rdi: structure to free
+ *
+ * Free a structure allocated with rvt_alloc_device()
+ */
+void rvt_dealloc_device(struct rvt_dev_info *rdi)
+{
+ kfree(rdi->ports);
+ ib_dealloc_device(&rdi->ibdev);
+}
+EXPORT_SYMBOL(rvt_dealloc_device);
+
+static int rvt_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+ /*
+ * Return rvt_dev_info.dparms.props contents
+ */
+ *props = rdi->dparms.props;
+ return 0;
+}
+
+static int rvt_get_numa_node(struct ib_device *ibdev)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+
+ return rdi->dparms.node;
+}
+
+static int rvt_modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ /*
+ * There is currently no need to supply this based on qib and hfi1.
+ * Future drivers may need to implement this though.
+ */
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * rvt_query_port - Passes the query port call to the driver
+ * @ibdev: Verbs IB dev
+ * @port_num: port number, 1 based from ib core
+ * @props: structure to hold returned properties
+ *
+ * Return: 0 on success
+ */
+static int rvt_query_port(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attr *props)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_ibport *rvp;
+ u32 port_index = ibport_num_to_idx(ibdev, port_num);
+
+ rvp = rdi->ports[port_index];
+ /* props being zeroed by the caller, avoid zeroing it here */
+ props->sm_lid = rvp->sm_lid;
+ props->sm_sl = rvp->sm_sl;
+ props->port_cap_flags = rvp->port_cap_flags;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = rvt_get_npkeys(rdi);
+ props->bad_pkey_cntr = rvp->pkey_violations;
+ props->qkey_viol_cntr = rvp->qkey_violations;
+ props->subnet_timeout = rvp->subnet_timeout;
+ props->init_type_reply = 0;
+
+ /* Populate the remaining ib_port_attr elements */
+ return rdi->driver_f.query_port_state(rdi, port_num, props);
+}
+
+/**
+ * rvt_modify_port - modify port
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @port_modify_mask: How to change the port
+ * @props: Structure to fill in
+ *
+ * Return: 0 on success
+ */
+static int rvt_modify_port(struct ib_device *ibdev, u32 port_num,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_ibport *rvp;
+ int ret = 0;
+ u32 port_index = ibport_num_to_idx(ibdev, port_num);
+
+ rvp = rdi->ports[port_index];
+ if (port_modify_mask & IB_PORT_OPA_MASK_CHG) {
+ rvp->port_cap3_flags |= props->set_port_cap_mask;
+ rvp->port_cap3_flags &= ~props->clr_port_cap_mask;
+ } else {
+ rvp->port_cap_flags |= props->set_port_cap_mask;
+ rvp->port_cap_flags &= ~props->clr_port_cap_mask;
+ }
+
+ if (props->set_port_cap_mask || props->clr_port_cap_mask)
+ rdi->driver_f.cap_mask_chg(rdi, port_num);
+ if (port_modify_mask & IB_PORT_SHUTDOWN)
+ ret = rdi->driver_f.shut_down_port(rdi, port_num);
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ rvp->qkey_violations = 0;
+
+ return ret;
+}
+
+/**
+ * rvt_query_pkey - Return a pkey from the table at a given index
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @index: Index into pkey table
+ * @pkey: returned pkey from the port pkey table
+ *
+ * Return: 0 on failure pkey otherwise
+ */
+static int rvt_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index,
+ u16 *pkey)
+{
+ /*
+ * Driver will be responsible for keeping rvt_dev_info.pkey_table up to
+ * date. This function will just return that value. There is no need to
+ * lock, if a stale value is read and sent to the user so be it there is
+ * no way to protect against that anyway.
+ */
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ u32 port_index;
+
+ port_index = ibport_num_to_idx(ibdev, port_num);
+
+ if (index >= rvt_get_npkeys(rdi))
+ return -EINVAL;
+
+ *pkey = rvt_get_pkey(rdi, port_index, index);
+ return 0;
+}
+
+/**
+ * rvt_query_gid - Return a gid from the table
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @guid_index: Index in table
+ * @gid: Gid to return
+ *
+ * Return: 0 on success
+ */
+static int rvt_query_gid(struct ib_device *ibdev, u32 port_num,
+ int guid_index, union ib_gid *gid)
+{
+ struct rvt_dev_info *rdi;
+ struct rvt_ibport *rvp;
+ u32 port_index;
+
+ /*
+ * Driver is responsible for updating the guid table. Which will be used
+ * to craft the return value. This will work similar to how query_pkey()
+ * is being done.
+ */
+ port_index = ibport_num_to_idx(ibdev, port_num);
+
+ rdi = ib_to_rvt(ibdev);
+ rvp = rdi->ports[port_index];
+
+ gid->global.subnet_prefix = rvp->gid_prefix;
+
+ return rdi->driver_f.get_guid_be(rdi, rvp, guid_index,
+ &gid->global.interface_id);
+}
+
+/**
+ * rvt_alloc_ucontext - Allocate a user context
+ * @uctx: Verbs context
+ * @udata: User data allocated
+ */
+static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
+{
+ return 0;
+}
+
+/**
+ * rvt_dealloc_ucontext - Free a user context
+ * @context: Unused
+ */
+static void rvt_dealloc_ucontext(struct ib_ucontext *context)
+{
+ return;
+}
+
+static int rvt_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = rdi->dparms.core_cap_flags;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->max_mad_size = rdi->dparms.max_mad_size;
+
+ return 0;
+}
+
+enum {
+ MISC,
+ QUERY_DEVICE,
+ MODIFY_DEVICE,
+ QUERY_PORT,
+ MODIFY_PORT,
+ QUERY_PKEY,
+ QUERY_GID,
+ ALLOC_UCONTEXT,
+ DEALLOC_UCONTEXT,
+ GET_PORT_IMMUTABLE,
+ CREATE_QP,
+ MODIFY_QP,
+ DESTROY_QP,
+ QUERY_QP,
+ POST_SEND,
+ POST_RECV,
+ POST_SRQ_RECV,
+ CREATE_AH,
+ DESTROY_AH,
+ MODIFY_AH,
+ QUERY_AH,
+ CREATE_SRQ,
+ MODIFY_SRQ,
+ DESTROY_SRQ,
+ QUERY_SRQ,
+ ATTACH_MCAST,
+ DETACH_MCAST,
+ GET_DMA_MR,
+ REG_USER_MR,
+ DEREG_MR,
+ ALLOC_MR,
+ MAP_MR_SG,
+ ALLOC_FMR,
+ MAP_PHYS_FMR,
+ UNMAP_FMR,
+ DEALLOC_FMR,
+ MMAP,
+ CREATE_CQ,
+ DESTROY_CQ,
+ POLL_CQ,
+ REQ_NOTFIY_CQ,
+ RESIZE_CQ,
+ ALLOC_PD,
+ DEALLOC_PD,
+ _VERB_IDX_MAX /* Must always be last! */
+};
+
+static const struct ib_device_ops rvt_dev_ops = {
+ .uverbs_abi_ver = RVT_UVERBS_ABI_VERSION,
+
+ .alloc_mr = rvt_alloc_mr,
+ .alloc_pd = rvt_alloc_pd,
+ .alloc_ucontext = rvt_alloc_ucontext,
+ .attach_mcast = rvt_attach_mcast,
+ .create_ah = rvt_create_ah,
+ .create_cq = rvt_create_cq,
+ .create_qp = rvt_create_qp,
+ .create_srq = rvt_create_srq,
+ .create_user_ah = rvt_create_ah,
+ .dealloc_pd = rvt_dealloc_pd,
+ .dealloc_ucontext = rvt_dealloc_ucontext,
+ .dereg_mr = rvt_dereg_mr,
+ .destroy_ah = rvt_destroy_ah,
+ .destroy_cq = rvt_destroy_cq,
+ .destroy_qp = rvt_destroy_qp,
+ .destroy_srq = rvt_destroy_srq,
+ .detach_mcast = rvt_detach_mcast,
+ .get_dma_mr = rvt_get_dma_mr,
+ .get_numa_node = rvt_get_numa_node,
+ .get_port_immutable = rvt_get_port_immutable,
+ .map_mr_sg = rvt_map_mr_sg,
+ .mmap = rvt_mmap,
+ .modify_ah = rvt_modify_ah,
+ .modify_device = rvt_modify_device,
+ .modify_port = rvt_modify_port,
+ .modify_qp = rvt_modify_qp,
+ .modify_srq = rvt_modify_srq,
+ .poll_cq = rvt_poll_cq,
+ .post_recv = rvt_post_recv,
+ .post_send = rvt_post_send,
+ .post_srq_recv = rvt_post_srq_recv,
+ .query_ah = rvt_query_ah,
+ .query_device = rvt_query_device,
+ .query_gid = rvt_query_gid,
+ .query_pkey = rvt_query_pkey,
+ .query_port = rvt_query_port,
+ .query_qp = rvt_query_qp,
+ .query_srq = rvt_query_srq,
+ .reg_user_mr = rvt_reg_user_mr,
+ .req_notify_cq = rvt_req_notify_cq,
+ .resize_cq = rvt_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, rvt_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
+};
+
+static noinline int check_support(struct rvt_dev_info *rdi, int verb)
+{
+ switch (verb) {
+ case MISC:
+ /*
+ * These functions are not part of verbs specifically but are
+ * required for rdmavt to function.
+ */
+ if ((!rdi->ibdev.ops.port_groups) ||
+ (!rdi->driver_f.get_pci_dev))
+ return -EINVAL;
+ break;
+
+ case MODIFY_DEVICE:
+ /*
+ * rdmavt does not support modify device currently drivers must
+ * provide.
+ */
+ if (!rdi->ibdev.ops.modify_device)
+ return -EOPNOTSUPP;
+ break;
+
+ case QUERY_PORT:
+ if (!rdi->ibdev.ops.query_port)
+ if (!rdi->driver_f.query_port_state)
+ return -EINVAL;
+ break;
+
+ case MODIFY_PORT:
+ if (!rdi->ibdev.ops.modify_port)
+ if (!rdi->driver_f.cap_mask_chg ||
+ !rdi->driver_f.shut_down_port)
+ return -EINVAL;
+ break;
+
+ case QUERY_GID:
+ if (!rdi->ibdev.ops.query_gid)
+ if (!rdi->driver_f.get_guid_be)
+ return -EINVAL;
+ break;
+
+ case CREATE_QP:
+ if (!rdi->ibdev.ops.create_qp)
+ if (!rdi->driver_f.qp_priv_alloc ||
+ !rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp)
+ return -EINVAL;
+ break;
+
+ case MODIFY_QP:
+ if (!rdi->ibdev.ops.modify_qp)
+ if (!rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.schedule_send ||
+ !rdi->driver_f.get_pmtu_from_attr ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp ||
+ !rdi->driver_f.notify_error_qp ||
+ !rdi->driver_f.mtu_from_qp ||
+ !rdi->driver_f.mtu_to_path_mtu)
+ return -EINVAL;
+ break;
+
+ case DESTROY_QP:
+ if (!rdi->ibdev.ops.destroy_qp)
+ if (!rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp)
+ return -EINVAL;
+ break;
+
+ case POST_SEND:
+ if (!rdi->ibdev.ops.post_send)
+ if (!rdi->driver_f.schedule_send ||
+ !rdi->driver_f.do_send ||
+ !rdi->post_parms)
+ return -EINVAL;
+ break;
+
+ }
+
+ return 0;
+}
+
+/**
+ * rvt_register_device - register a driver
+ * @rdi: main dev structure for all of rdmavt operations
+ *
+ * It is up to drivers to allocate the rdi and fill in the appropriate
+ * information.
+ *
+ * Return: 0 on success otherwise an errno.
+ */
+int rvt_register_device(struct rvt_dev_info *rdi)
+{
+ int ret = 0, i;
+
+ if (!rdi)
+ return -EINVAL;
+
+ /*
+ * Check to ensure drivers have setup the required helpers for the verbs
+ * they want rdmavt to handle
+ */
+ for (i = 0; i < _VERB_IDX_MAX; i++)
+ if (check_support(rdi, i)) {
+ pr_err("Driver support req not met at %d\n", i);
+ return -EINVAL;
+ }
+
+ ib_set_device_ops(&rdi->ibdev, &rvt_dev_ops);
+
+ /* Once we get past here we can use rvt_pr macros and tracepoints */
+ trace_rvt_dbg(rdi, "Driver attempting registration");
+ rvt_mmap_init(rdi);
+
+ /* Queue Pairs */
+ ret = rvt_driver_qp_init(rdi);
+ if (ret) {
+ pr_err("Error in driver QP init.\n");
+ return -EINVAL;
+ }
+
+ /* Address Handle */
+ spin_lock_init(&rdi->n_ahs_lock);
+ rdi->n_ahs_allocated = 0;
+
+ /* Shared Receive Queue */
+ rvt_driver_srq_init(rdi);
+
+ /* Multicast */
+ rvt_driver_mcast_init(rdi);
+
+ /* Mem Region */
+ ret = rvt_driver_mr_init(rdi);
+ if (ret) {
+ pr_err("Error in driver MR init.\n");
+ goto bail_no_mr;
+ }
+
+ /* Memory Working Set Size */
+ ret = rvt_wss_init(rdi);
+ if (ret) {
+ rvt_pr_err(rdi, "Error in WSS init.\n");
+ goto bail_mr;
+ }
+
+ /* Completion queues */
+ spin_lock_init(&rdi->n_cqs_lock);
+
+ /* Protection Domain */
+ spin_lock_init(&rdi->n_pds_lock);
+ rdi->n_pds_allocated = 0;
+
+ /*
+ * There are some things which could be set by underlying drivers but
+ * really should be up to rdmavt to set. For instance drivers can't know
+ * exactly which functions rdmavt supports, nor do they know the ABI
+ * version, so we do all of this sort of stuff here.
+ */
+ rdi->ibdev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ rdi->ibdev.node_type = RDMA_NODE_IB_CA;
+ if (!rdi->ibdev.num_comp_vectors)
+ rdi->ibdev.num_comp_vectors = 1;
+
+ /* We are now good to announce we exist */
+ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev), NULL);
+ if (ret) {
+ rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
+ goto bail_wss;
+ }
+
+ rvt_create_mad_agents(rdi);
+
+ rvt_pr_info(rdi, "Registration with rdmavt done.\n");
+ return ret;
+
+bail_wss:
+ rvt_wss_exit(rdi);
+bail_mr:
+ rvt_mr_exit(rdi);
+
+bail_no_mr:
+ rvt_qp_exit(rdi);
+
+ return ret;
+}
+EXPORT_SYMBOL(rvt_register_device);
+
+/**
+ * rvt_unregister_device - remove a driver
+ * @rdi: rvt dev struct
+ */
+void rvt_unregister_device(struct rvt_dev_info *rdi)
+{
+ trace_rvt_dbg(rdi, "Driver is unregistering.");
+ if (!rdi)
+ return;
+
+ rvt_free_mad_agents(rdi);
+
+ ib_unregister_device(&rdi->ibdev);
+ rvt_wss_exit(rdi);
+ rvt_mr_exit(rdi);
+ rvt_qp_exit(rdi);
+}
+EXPORT_SYMBOL(rvt_unregister_device);
+
+/**
+ * rvt_init_port - init internal data for driver port
+ * @rdi: rvt_dev_info struct
+ * @port: rvt port
+ * @port_index: 0 based index of ports, different from IB core port num
+ * @pkey_table: pkey_table for @port
+ *
+ * Keep track of a list of ports. No need to have a detach port.
+ * They persist until the driver goes away.
+ *
+ * Return: always 0
+ */
+int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
+ int port_index, u16 *pkey_table)
+{
+
+ rdi->ports[port_index] = port;
+ rdi->ports[port_index]->pkey_table = pkey_table;
+
+ return 0;
+}
+EXPORT_SYMBOL(rvt_init_port);
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
new file mode 100644
index 000000000000..4d17333fa90e
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ */
+
+#ifndef DEF_RDMAVT_H
+#define DEF_RDMAVT_H
+
+#include <rdma/rdma_vt.h>
+#include <linux/pci.h>
+#include "pd.h"
+#include "qp.h"
+#include "ah.h"
+#include "mr.h"
+#include "srq.h"
+#include "mcast.h"
+#include "mmap.h"
+#include "cq.h"
+#include "mad.h"
+
+#define rvt_pr_info(rdi, fmt, ...) \
+ __rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \
+ rvt_get_ibdev_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define rvt_pr_warn(rdi, fmt, ...) \
+ __rvt_pr_warn(rdi->driver_f.get_pci_dev(rdi), \
+ rvt_get_ibdev_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define rvt_pr_err(rdi, fmt, ...) \
+ __rvt_pr_err(rdi->driver_f.get_pci_dev(rdi), \
+ rvt_get_ibdev_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define rvt_pr_err_ratelimited(rdi, fmt, ...) \
+ __rvt_pr_err_ratelimited((rdi)->driver_f.get_pci_dev(rdi), \
+ rvt_get_ibdev_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define __rvt_pr_info(pdev, name, fmt, ...) \
+ dev_info(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+#define __rvt_pr_warn(pdev, name, fmt, ...) \
+ dev_warn(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+#define __rvt_pr_err(pdev, name, fmt, ...) \
+ dev_err(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+#define __rvt_pr_err_ratelimited(pdev, name, fmt, ...) \
+ dev_err_ratelimited(&(pdev)->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+static inline u32 ibport_num_to_idx(struct ib_device *ibdev, u32 port_num)
+{
+ return port_num - 1; /* IB ports start at 1 our arrays at 0 */
+}
+
+#endif /* DEF_RDMAVT_H */
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
new file mode 100644
index 000000000000..1ed5b63f8afc
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config RDMA_RXE
+ tristate "Software RDMA over Ethernet (RoCE) driver"
+ depends on INET && PCI && INFINIBAND && 64BIT
+ depends on INFINIBAND_VIRT_DMA
+ select NET_UDP_TUNNEL
+ select CRC32
+ help
+ This driver implements the InfiniBand RDMA transport over
+ the Linux network stack. It enables a system with a
+ standard Ethernet adapter to interoperate with a RoCE
+ adapter or with another system running the RXE driver.
+ Documentation on InfiniBand and RoCE can be downloaded at
+ www.infinibandta.org and www.openfabrics.org. (See also
+ siw which is a similar software driver for iWARP.)
+
+ The driver is split into two layers, one interfaces with the
+ Linux RDMA stack and implements a kernel or user space
+ verbs API. The user space verbs API requires a support
+ library named librxe which is loaded by the generic user
+ space verbs API, libibverbs. The other layer interfaces
+ with the Linux network stack at layer 3.
+
+ To configure and work with soft-RoCE driver please use the
+ following wiki page under "configure Soft-RoCE (RXE)" section:
+
+ https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
new file mode 100644
index 000000000000..93134f1d1d0c
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_RDMA_RXE) += rdma_rxe.o
+
+rdma_rxe-y := \
+ rxe.o \
+ rxe_comp.o \
+ rxe_req.o \
+ rxe_resp.o \
+ rxe_recv.o \
+ rxe_pool.o \
+ rxe_queue.o \
+ rxe_verbs.o \
+ rxe_av.o \
+ rxe_srq.o \
+ rxe_qp.o \
+ rxe_cq.o \
+ rxe_mr.o \
+ rxe_mw.o \
+ rxe_opcode.o \
+ rxe_mmap.o \
+ rxe_icrc.o \
+ rxe_mcast.o \
+ rxe_task.o \
+ rxe_net.o \
+ rxe_hw_counters.o
+
+rdma_rxe-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += rxe_odp.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
new file mode 100644
index 000000000000..e891199cbdef
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <rdma/rdma_netlink.h>
+#include <net/addrconf.h>
+#include "rxe.h"
+#include "rxe_loc.h"
+
+MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
+MODULE_DESCRIPTION("Soft RDMA transport");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* free resources for a rxe device all objects created for this device must
+ * have been destroyed
+ */
+void rxe_dealloc(struct ib_device *ib_dev)
+{
+ struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
+
+ rxe_pool_cleanup(&rxe->uc_pool);
+ rxe_pool_cleanup(&rxe->pd_pool);
+ rxe_pool_cleanup(&rxe->ah_pool);
+ rxe_pool_cleanup(&rxe->srq_pool);
+ rxe_pool_cleanup(&rxe->qp_pool);
+ rxe_pool_cleanup(&rxe->cq_pool);
+ rxe_pool_cleanup(&rxe->mr_pool);
+ rxe_pool_cleanup(&rxe->mw_pool);
+
+ WARN_ON(!RB_EMPTY_ROOT(&rxe->mcg_tree));
+
+ mutex_destroy(&rxe->usdev_lock);
+}
+
+static const struct ib_device_ops rxe_ib_dev_odp_ops = {
+ .advise_mr = rxe_ib_advise_mr,
+};
+
+/* initialize rxe device parameters */
+static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
+{
+ rxe->max_inline_data = RXE_MAX_INLINE_DATA;
+
+ rxe->attr.vendor_id = RXE_VENDOR_ID;
+ rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
+ rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
+ rxe->attr.max_qp = RXE_MAX_QP;
+ rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
+ rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
+ rxe->attr.kernel_cap_flags = IBK_ALLOW_USER_UNREG;
+ rxe->attr.max_send_sge = RXE_MAX_SGE;
+ rxe->attr.max_recv_sge = RXE_MAX_SGE;
+ rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
+ rxe->attr.max_cq = RXE_MAX_CQ;
+ rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
+ rxe->attr.max_mr = RXE_MAX_MR;
+ rxe->attr.max_mw = RXE_MAX_MW;
+ rxe->attr.max_pd = RXE_MAX_PD;
+ rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
+ rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
+ rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
+ rxe->attr.atomic_cap = IB_ATOMIC_HCA;
+ rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
+ rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
+ rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
+ rxe->attr.max_ah = RXE_MAX_AH;
+ rxe->attr.max_srq = RXE_MAX_SRQ;
+ rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
+ rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
+ rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
+ rxe->attr.max_pkeys = RXE_MAX_PKEYS;
+ rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+
+ if (ndev->addr_len) {
+ memcpy(rxe->raw_gid, ndev->dev_addr,
+ min_t(unsigned int, ndev->addr_len, ETH_ALEN));
+ } else {
+ /*
+ * This device does not have a HW address, but
+ * connection mangagement requires a unique gid.
+ */
+ eth_random_addr(rxe->raw_gid);
+ }
+
+ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
+ rxe->raw_gid);
+
+ rxe->max_ucontext = RXE_MAX_UCONTEXT;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ rxe->attr.kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
+
+ /* IB_ODP_SUPPORT_IMPLICIT is not supported right now. */
+ rxe->attr.odp_caps.general_caps |= IB_ODP_SUPPORT;
+
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE;
+
+ /* set handler for ODP prefetching API - ibv_advise_mr(3) */
+ ib_set_device_ops(&rxe->ib_dev, &rxe_ib_dev_odp_ops);
+ }
+}
+
+/* initialize port attributes */
+static void rxe_init_port_param(struct rxe_port *port)
+{
+ port->attr.state = IB_PORT_DOWN;
+ port->attr.max_mtu = IB_MTU_4096;
+ port->attr.active_mtu = IB_MTU_256;
+ port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
+ port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
+ port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
+ port->attr.bad_pkey_cntr = RXE_PORT_BAD_PKEY_CNTR;
+ port->attr.qkey_viol_cntr = RXE_PORT_QKEY_VIOL_CNTR;
+ port->attr.pkey_tbl_len = RXE_PORT_PKEY_TBL_LEN;
+ port->attr.lid = RXE_PORT_LID;
+ port->attr.sm_lid = RXE_PORT_SM_LID;
+ port->attr.lmc = RXE_PORT_LMC;
+ port->attr.max_vl_num = RXE_PORT_MAX_VL_NUM;
+ port->attr.sm_sl = RXE_PORT_SM_SL;
+ port->attr.subnet_timeout = RXE_PORT_SUBNET_TIMEOUT;
+ port->attr.init_type_reply = RXE_PORT_INIT_TYPE_REPLY;
+ port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
+ port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
+ port->attr.phys_state = RXE_PORT_PHYS_STATE;
+ port->mtu_cap = ib_mtu_enum_to_int(IB_MTU_256);
+ port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
+}
+
+/* initialize port state, note IB convention that HCA ports are always
+ * numbered from 1
+ */
+static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
+{
+ struct rxe_port *port = &rxe->port;
+
+ rxe_init_port_param(port);
+ addrconf_addr_eui48((unsigned char *)&port->port_guid,
+ rxe->raw_gid);
+ spin_lock_init(&port->port_lock);
+}
+
+/* init pools of managed objects */
+static void rxe_init_pools(struct rxe_dev *rxe)
+{
+ rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC);
+ rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD);
+ rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH);
+ rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ);
+ rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP);
+ rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ);
+ rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR);
+ rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW);
+}
+
+/* initialize rxe device state */
+static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev)
+{
+ /* init default device parameters */
+ rxe_init_device_param(rxe, ndev);
+
+ rxe_init_ports(rxe, ndev);
+ rxe_init_pools(rxe);
+
+ /* init pending mmap list */
+ spin_lock_init(&rxe->mmap_offset_lock);
+ spin_lock_init(&rxe->pending_lock);
+ INIT_LIST_HEAD(&rxe->pending_mmaps);
+
+ /* init multicast support */
+ spin_lock_init(&rxe->mcg_lock);
+ rxe->mcg_tree = RB_ROOT;
+
+ mutex_init(&rxe->usdev_lock);
+}
+
+void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+{
+ struct rxe_port *port = &rxe->port;
+ enum ib_mtu mtu;
+
+ mtu = eth_mtu_int_to_enum(ndev_mtu);
+
+ /* Make sure that new MTU in range */
+ mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
+
+ port->attr.active_mtu = mtu;
+ port->mtu_cap = ib_mtu_enum_to_int(mtu);
+}
+
+/* called by ifc layer to create new rxe device.
+ * The caller should allocate memory for rxe by calling ib_alloc_device.
+ */
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
+ struct net_device *ndev)
+{
+ rxe_init(rxe, ndev);
+ rxe_set_mtu(rxe, mtu);
+
+ return rxe_register_device(rxe, ibdev_name, ndev);
+}
+
+static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
+{
+ struct rxe_dev *rxe;
+ int err = 0;
+
+ if (is_vlan_dev(ndev)) {
+ rxe_err("rxe creation allowed on top of a real device only\n");
+ err = -EPERM;
+ goto err;
+ }
+
+ rxe = rxe_get_dev_from_net(ndev);
+ if (rxe) {
+ ib_device_put(&rxe->ib_dev);
+ rxe_err_dev(rxe, "already configured on %s\n", ndev->name);
+ err = -EEXIST;
+ goto err;
+ }
+
+ err = rxe_net_add(ibdev_name, ndev);
+ if (err) {
+ rxe_err("failed to add %s\n", ndev->name);
+ goto err;
+ }
+err:
+ return err;
+}
+
+static struct rdma_link_ops rxe_link_ops = {
+ .type = "rxe",
+ .newlink = rxe_newlink,
+};
+
+static int __init rxe_module_init(void)
+{
+ int err;
+
+ err = rxe_alloc_wq();
+ if (err)
+ return err;
+
+ err = rxe_net_init();
+ if (err) {
+ rxe_destroy_wq();
+ return err;
+ }
+
+ rdma_link_register(&rxe_link_ops);
+ pr_info("loaded\n");
+ return 0;
+}
+
+static void __exit rxe_module_exit(void)
+{
+ rdma_link_unregister(&rxe_link_ops);
+ ib_unregister_driver(RDMA_DRIVER_RXE);
+ rxe_net_exit();
+ rxe_destroy_wq();
+
+ pr_info("unloaded\n");
+}
+
+late_initcall(rxe_module_init);
+module_exit(rxe_module_exit);
+
+MODULE_ALIAS_RDMA_LINK("rxe");
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
new file mode 100644
index 000000000000..ff8cd53f5f28
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_H
+#define RXE_H
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+
+#include "rxe_net.h"
+#include "rxe_opcode.h"
+#include "rxe_hdr.h"
+#include "rxe_param.h"
+#include "rxe_verbs.h"
+#include "rxe_loc.h"
+
+/*
+ * Version 1 and Version 2 are identical on 64 bit machines, but on 32 bit
+ * machines Version 2 has a different struct layout.
+ */
+#define RXE_UVERBS_ABI_VERSION 2
+
+#define RXE_ROCE_V2_SPORT (0xc000)
+
+#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
+#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
+ "%s: " fmt, __func__, ##__VA_ARGS__)
+#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_pd(pd, fmt, ...) ibdev_dbg((pd)->ibpd.device, \
+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_ah(ah, fmt, ...) ibdev_dbg((ah)->ibah.device, \
+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_srq(srq, fmt, ...) ibdev_dbg((srq)->ibsrq.device, \
+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_qp(qp, fmt, ...) ibdev_dbg((qp)->ibqp.device, \
+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_cq(cq, fmt, ...) ibdev_dbg((cq)->ibcq.device, \
+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_mr(mr, fmt, ...) ibdev_dbg((mr)->ibmr.device, \
+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
+
+#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt, __func__, \
+ ##__VA_ARGS__)
+#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
+ "%s: " fmt, __func__, ##__VA_ARGS__)
+#define rxe_err_uc(uc, fmt, ...) ibdev_err_ratelimited((uc)->ibuc.device, \
+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_err_pd(pd, fmt, ...) ibdev_err_ratelimited((pd)->ibpd.device, \
+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_err_ah(ah, fmt, ...) ibdev_err_ratelimited((ah)->ibah.device, \
+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \
+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_err_qp(qp, fmt, ...) ibdev_err_ratelimited((qp)->ibqp.device, \
+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \
+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_err_mr(mr, fmt, ...) ibdev_err_ratelimited((mr)->ibmr.device, \
+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
+
+#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt, __func__, \
+ ##__VA_ARGS__)
+#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
+ "%s: " fmt, __func__, ##__VA_ARGS__)
+#define rxe_info_uc(uc, fmt, ...) ibdev_info_ratelimited((uc)->ibuc.device, \
+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_info_pd(pd, fmt, ...) ibdev_info_ratelimited((pd)->ibpd.device, \
+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_info_ah(ah, fmt, ...) ibdev_info_ratelimited((ah)->ibah.device, \
+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)->ibsrq.device, \
+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_info_qp(qp, fmt, ...) ibdev_info_ratelimited((qp)->ibqp.device, \
+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \
+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_info_mr(mr, fmt, ...) ibdev_info_ratelimited((mr)->ibmr.device, \
+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \
+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
+
+void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
+ struct net_device *ndev);
+
+void rxe_rcv(struct sk_buff *skb);
+
+/* The caller must do a matching ib_device_put(&dev->ib_dev) */
+static inline struct rxe_dev *rxe_get_dev_from_net(struct net_device *ndev)
+{
+ struct ib_device *ibdev =
+ ib_device_get_by_netdev(ndev, RDMA_DRIVER_RXE);
+
+ if (!ibdev)
+ return NULL;
+ return container_of(ibdev, struct rxe_dev, ib_dev);
+}
+
+void rxe_port_up(struct rxe_dev *rxe);
+void rxe_port_down(struct rxe_dev *rxe);
+void rxe_set_port_state(struct rxe_dev *rxe);
+
+#endif /* RXE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
new file mode 100644
index 000000000000..889d7adbd455
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av)
+{
+ rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
+ rxe_av_fill_ip_info(av, attr);
+ memcpy(av->dmac, attr->roce.dmac, ETH_ALEN);
+}
+
+static int chk_attr(void *obj, struct rdma_ah_attr *attr, bool obj_is_ah)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
+ struct rxe_port *port;
+ struct rxe_dev *rxe;
+ struct rxe_qp *qp;
+ struct rxe_ah *ah;
+ int type;
+
+ if (obj_is_ah) {
+ ah = obj;
+ rxe = to_rdev(ah->ibah.device);
+ } else {
+ qp = obj;
+ rxe = to_rdev(qp->ibqp.device);
+ }
+
+ port = &rxe->port;
+
+ if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) {
+ if (grh->sgid_index > port->attr.gid_tbl_len) {
+ if (obj_is_ah)
+ rxe_dbg_ah(ah, "invalid sgid index = %d\n",
+ grh->sgid_index);
+ else
+ rxe_dbg_qp(qp, "invalid sgid index = %d\n",
+ grh->sgid_index);
+ return -EINVAL;
+ }
+
+ type = rdma_gid_attr_network_type(grh->sgid_attr);
+ if (type < RDMA_NETWORK_IPV4 ||
+ type > RDMA_NETWORK_IPV6) {
+ if (obj_is_ah)
+ rxe_dbg_ah(ah, "invalid network type for rdma_rxe = %d\n",
+ type);
+ else
+ rxe_dbg_qp(qp, "invalid network type for rdma_rxe = %d\n",
+ type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr)
+{
+ return chk_attr(qp, attr, false);
+}
+
+int rxe_ah_chk_attr(struct rxe_ah *ah, struct rdma_ah_attr *attr)
+{
+ return chk_attr(ah, attr, true);
+}
+
+void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
+ struct rdma_ah_attr *attr)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
+
+ memset(av, 0, sizeof(*av));
+ memcpy(av->grh.dgid.raw, grh->dgid.raw, sizeof(grh->dgid.raw));
+ av->grh.flow_label = grh->flow_label;
+ av->grh.sgid_index = grh->sgid_index;
+ av->grh.hop_limit = grh->hop_limit;
+ av->grh.traffic_class = grh->traffic_class;
+ av->port_num = port_num;
+}
+
+void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr)
+{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
+ attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+
+ memcpy(grh->dgid.raw, av->grh.dgid.raw, sizeof(av->grh.dgid.raw));
+ grh->flow_label = av->grh.flow_label;
+ grh->sgid_index = av->grh.sgid_index;
+ grh->hop_limit = av->grh.hop_limit;
+ grh->traffic_class = av->grh.traffic_class;
+
+ rdma_ah_set_ah_flags(attr, IB_AH_GRH);
+ rdma_ah_set_port_num(attr, av->port_num);
+}
+
+void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
+{
+ const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr;
+ int ibtype;
+ int type;
+
+ rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&av->dgid_addr,
+ &rdma_ah_read_grh(attr)->dgid);
+
+ ibtype = rdma_gid_attr_network_type(sgid_attr);
+
+ switch (ibtype) {
+ case RDMA_NETWORK_IPV4:
+ type = RXE_NETWORK_TYPE_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ type = RXE_NETWORK_TYPE_IPV6;
+ break;
+ default:
+ /* not reached - checked in rxe_av_chk_attr */
+ type = 0;
+ break;
+ }
+
+ av->network_type = type;
+}
+
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp)
+{
+ struct rxe_ah *ah;
+ u32 ah_num;
+
+ if (ahp)
+ *ahp = NULL;
+
+ if (!pkt || !pkt->qp)
+ return NULL;
+
+ if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
+ return &pkt->qp->pri_av;
+
+ if (!pkt->wqe)
+ return NULL;
+
+ ah_num = pkt->wqe->wr.wr.ud.ah_num;
+ if (ah_num) {
+ /* only new user provider or kernel client */
+ ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num);
+ if (!ah) {
+ rxe_dbg_qp(pkt->qp, "Unable to find AH matching ah_num\n");
+ return NULL;
+ }
+
+ if (rxe_ah_pd(ah) != pkt->qp->pd) {
+ rxe_dbg_qp(pkt->qp, "PDs don't match for AH and QP\n");
+ rxe_put(ah);
+ return NULL;
+ }
+
+ if (ahp)
+ *ahp = ah;
+ else
+ rxe_put(ah);
+
+ return &ah->av;
+ }
+
+ /* only old user provider for UD sends*/
+ return &pkt->wqe->wr.wr.ud.av;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
new file mode 100644
index 000000000000..a5b2b62f596b
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+#include "rxe_task.h"
+
+enum comp_state {
+ COMPST_GET_ACK,
+ COMPST_GET_WQE,
+ COMPST_COMP_WQE,
+ COMPST_COMP_ACK,
+ COMPST_CHECK_PSN,
+ COMPST_CHECK_ACK,
+ COMPST_READ,
+ COMPST_ATOMIC,
+ COMPST_WRITE_SEND,
+ COMPST_UPDATE_COMP,
+ COMPST_ERROR_RETRY,
+ COMPST_RNR_RETRY,
+ COMPST_ERROR,
+ COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
+ COMPST_DONE, /* The completer finished successflly */
+};
+
+static char *comp_state_name[] = {
+ [COMPST_GET_ACK] = "GET ACK",
+ [COMPST_GET_WQE] = "GET WQE",
+ [COMPST_COMP_WQE] = "COMP WQE",
+ [COMPST_COMP_ACK] = "COMP ACK",
+ [COMPST_CHECK_PSN] = "CHECK PSN",
+ [COMPST_CHECK_ACK] = "CHECK ACK",
+ [COMPST_READ] = "READ",
+ [COMPST_ATOMIC] = "ATOMIC",
+ [COMPST_WRITE_SEND] = "WRITE/SEND",
+ [COMPST_UPDATE_COMP] = "UPDATE COMP",
+ [COMPST_ERROR_RETRY] = "ERROR RETRY",
+ [COMPST_RNR_RETRY] = "RNR RETRY",
+ [COMPST_ERROR] = "ERROR",
+ [COMPST_EXIT] = "EXIT",
+ [COMPST_DONE] = "DONE",
+};
+
+static unsigned long rnrnak_usec[32] = {
+ [IB_RNR_TIMER_655_36] = 655360,
+ [IB_RNR_TIMER_000_01] = 10,
+ [IB_RNR_TIMER_000_02] = 20,
+ [IB_RNR_TIMER_000_03] = 30,
+ [IB_RNR_TIMER_000_04] = 40,
+ [IB_RNR_TIMER_000_06] = 60,
+ [IB_RNR_TIMER_000_08] = 80,
+ [IB_RNR_TIMER_000_12] = 120,
+ [IB_RNR_TIMER_000_16] = 160,
+ [IB_RNR_TIMER_000_24] = 240,
+ [IB_RNR_TIMER_000_32] = 320,
+ [IB_RNR_TIMER_000_48] = 480,
+ [IB_RNR_TIMER_000_64] = 640,
+ [IB_RNR_TIMER_000_96] = 960,
+ [IB_RNR_TIMER_001_28] = 1280,
+ [IB_RNR_TIMER_001_92] = 1920,
+ [IB_RNR_TIMER_002_56] = 2560,
+ [IB_RNR_TIMER_003_84] = 3840,
+ [IB_RNR_TIMER_005_12] = 5120,
+ [IB_RNR_TIMER_007_68] = 7680,
+ [IB_RNR_TIMER_010_24] = 10240,
+ [IB_RNR_TIMER_015_36] = 15360,
+ [IB_RNR_TIMER_020_48] = 20480,
+ [IB_RNR_TIMER_030_72] = 30720,
+ [IB_RNR_TIMER_040_96] = 40960,
+ [IB_RNR_TIMER_061_44] = 61410,
+ [IB_RNR_TIMER_081_92] = 81920,
+ [IB_RNR_TIMER_122_88] = 122880,
+ [IB_RNR_TIMER_163_84] = 163840,
+ [IB_RNR_TIMER_245_76] = 245760,
+ [IB_RNR_TIMER_327_68] = 327680,
+ [IB_RNR_TIMER_491_52] = 491520,
+};
+
+static inline unsigned long rnrnak_jiffies(u8 timeout)
+{
+ return max_t(unsigned long,
+ usecs_to_jiffies(rnrnak_usec[timeout]), 1);
+}
+
+static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
+ case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND: return IB_WC_SEND;
+ case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
+ case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
+ case IB_WR_LSO: return IB_WC_LSO;
+ case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
+ case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
+ case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
+ case IB_WR_REG_MR: return IB_WC_REG_MR;
+ case IB_WR_BIND_MW: return IB_WC_BIND_MW;
+ case IB_WR_ATOMIC_WRITE: return IB_WC_ATOMIC_WRITE;
+ case IB_WR_FLUSH: return IB_WC_FLUSH;
+
+ default:
+ return 0xff;
+ }
+}
+
+void retransmit_timer(struct timer_list *t)
+{
+ struct rxe_qp *qp = timer_container_of(qp, t, retrans_timer);
+ unsigned long flags;
+
+ rxe_dbg_qp(qp, "retransmit timer fired\n");
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (qp->valid) {
+ qp->comp.timeout = 1;
+ rxe_sched_task(&qp->send_task);
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+}
+
+void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+{
+ rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
+ skb_queue_tail(&qp->resp_pkts, skb);
+ rxe_sched_task(&qp->send_task);
+}
+
+static inline enum comp_state get_wqe(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe **wqe_p)
+{
+ struct rxe_send_wqe *wqe;
+
+ /* we come here whether or not we found a response packet to see if
+ * there are any posted WQEs
+ */
+ wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
+ *wqe_p = wqe;
+
+ /* no WQE or requester has not started it yet */
+ if (!wqe || wqe->state == wqe_state_posted)
+ return pkt ? COMPST_DONE : COMPST_EXIT;
+
+ /* WQE does not require an ack */
+ if (wqe->state == wqe_state_done)
+ return COMPST_COMP_WQE;
+
+ /* WQE caused an error */
+ if (wqe->state == wqe_state_error)
+ return COMPST_ERROR;
+
+ /* we have a WQE, if we also have an ack check its PSN */
+ return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
+}
+
+static inline void reset_retry_counters(struct rxe_qp *qp)
+{
+ qp->comp.retry_cnt = qp->attr.retry_cnt;
+ qp->comp.rnr_retry = qp->attr.rnr_retry;
+ qp->comp.started_retry = 0;
+}
+
+static inline enum comp_state check_psn(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ s32 diff;
+
+ /* check to see if response is past the oldest WQE. if it is, complete
+ * send/write or error read/atomic
+ */
+ diff = psn_compare(pkt->psn, wqe->last_psn);
+ if (diff > 0) {
+ if (wqe->state == wqe_state_pending) {
+ if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
+ return COMPST_ERROR_RETRY;
+
+ reset_retry_counters(qp);
+ return COMPST_COMP_WQE;
+ } else {
+ return COMPST_DONE;
+ }
+ }
+
+ /* compare response packet to expected response */
+ diff = psn_compare(pkt->psn, qp->comp.psn);
+ if (diff < 0) {
+ /* response is most likely a retried packet if it matches an
+ * uncompleted WQE go complete it else ignore it
+ */
+ if (pkt->psn == wqe->last_psn)
+ return COMPST_COMP_ACK;
+ else if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE &&
+ (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST ||
+ qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE))
+ return COMPST_CHECK_ACK;
+ else
+ return COMPST_DONE;
+ } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
+ return COMPST_DONE;
+ } else {
+ return COMPST_CHECK_ACK;
+ }
+}
+
+static inline enum comp_state check_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ unsigned int mask = pkt->mask;
+ u8 syn;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ /* Check the sequence only */
+ switch (qp->comp.opcode) {
+ case -1:
+ /* Will catch all *_ONLY cases. */
+ if (!(mask & RXE_START_MASK))
+ return COMPST_ERROR;
+
+ break;
+
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ /* Check NAK code to handle a remote error */
+ if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE)
+ break;
+
+ if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
+ pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
+ /* read retries of partial data may restart from
+ * read response first or response only.
+ */
+ if ((pkt->psn == wqe->first_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
+ (wqe->first_psn == wqe->last_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
+ break;
+
+ return COMPST_ERROR;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ /* Check operation validity. */
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
+ syn = aeth_syn(pkt);
+
+ if ((syn & AETH_TYPE_MASK) != AETH_ACK)
+ return COMPST_ERROR;
+
+ if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE)
+ return COMPST_WRITE_SEND;
+
+ fallthrough;
+ /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
+ */
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ if (wqe->wr.opcode != IB_WR_RDMA_READ &&
+ wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV &&
+ wqe->wr.opcode != IB_WR_FLUSH) {
+ wqe->status = IB_WC_FATAL_ERR;
+ return COMPST_ERROR;
+ }
+ reset_retry_counters(qp);
+ return COMPST_READ;
+
+ case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
+ syn = aeth_syn(pkt);
+
+ if ((syn & AETH_TYPE_MASK) != AETH_ACK)
+ return COMPST_ERROR;
+
+ if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
+ wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
+ return COMPST_ERROR;
+ reset_retry_counters(qp);
+ return COMPST_ATOMIC;
+
+ case IB_OPCODE_RC_ACKNOWLEDGE:
+ syn = aeth_syn(pkt);
+ switch (syn & AETH_TYPE_MASK) {
+ case AETH_ACK:
+ reset_retry_counters(qp);
+ return COMPST_WRITE_SEND;
+
+ case AETH_RNR_NAK:
+ rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
+ return COMPST_RNR_RETRY;
+
+ case AETH_NAK:
+ switch (syn) {
+ case AETH_NAK_PSN_SEQ_ERROR:
+ /* a nak implicitly acks all packets with psns
+ * before
+ */
+ if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
+ rxe_counter_inc(rxe,
+ RXE_CNT_RCV_SEQ_ERR);
+ qp->comp.psn = pkt->psn;
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ qp->req.again = 1;
+ }
+ }
+ return COMPST_ERROR_RETRY;
+
+ case AETH_NAK_INVALID_REQ:
+ wqe->status = IB_WC_REM_INV_REQ_ERR;
+ return COMPST_ERROR;
+
+ case AETH_NAK_REM_ACC_ERR:
+ wqe->status = IB_WC_REM_ACCESS_ERR;
+ return COMPST_ERROR;
+
+ case AETH_NAK_REM_OP_ERR:
+ wqe->status = IB_WC_REM_OP_ERR;
+ return COMPST_ERROR;
+
+ default:
+ rxe_dbg_qp(qp, "unexpected nak %x\n", syn);
+ wqe->status = IB_WC_REM_OP_ERR;
+ return COMPST_ERROR;
+ }
+
+ default:
+ return COMPST_ERROR;
+ }
+ break;
+
+ default:
+ rxe_dbg_qp(qp, "unexpected opcode\n");
+ }
+
+ return COMPST_ERROR;
+}
+
+static inline enum comp_state do_read(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ int ret;
+
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
+ &wqe->dma, payload_addr(pkt),
+ payload_size(pkt), RXE_TO_MR_OBJ);
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ return COMPST_ERROR;
+ }
+
+ if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
+ return COMPST_COMP_ACK;
+
+ return COMPST_UPDATE_COMP;
+}
+
+static inline enum comp_state do_atomic(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ int ret;
+
+ u64 atomic_orig = atmack_orig(pkt);
+
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
+ &wqe->dma, &atomic_orig,
+ sizeof(u64), RXE_TO_MR_OBJ);
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ return COMPST_ERROR;
+ }
+
+ return COMPST_COMP_ACK;
+}
+
+static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_cqe *cqe)
+{
+ struct ib_wc *wc = &cqe->ibwc;
+ struct ib_uverbs_wc *uwc = &cqe->uibwc;
+
+ memset(cqe, 0, sizeof(*cqe));
+
+ if (!qp->is_user) {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = wqe->status;
+ wc->qp = &qp->ibqp;
+ } else {
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = wqe->status;
+ uwc->qp_num = qp->ibqp.qp_num;
+ }
+
+ if (wqe->status == IB_WC_SUCCESS) {
+ if (!qp->is_user) {
+ wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->byte_len = wqe->dma.length;
+ } else {
+ uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ uwc->wc_flags = IB_WC_WITH_IMM;
+ uwc->byte_len = wqe->dma.length;
+ }
+ } else {
+ if (wqe->status != IB_WC_WR_FLUSH_ERR)
+ rxe_err_qp(qp, "non-flush error status = %d\n",
+ wqe->status);
+ }
+}
+
+/*
+ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
+ * ---------8<---------8<-------------
+ * ...Note that if a completion error occurs, a Work Completion
+ * will always be generated, even if the signaling
+ * indicator requests an Unsignaled Completion.
+ * ---------8<---------8<-------------
+ */
+static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_cqe cqe;
+ bool post;
+
+ /* do we need to post a completion */
+ post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ wqe->status != IB_WC_SUCCESS);
+
+ if (post)
+ make_send_cqe(qp, wqe, &cqe);
+
+ queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
+
+ if (post)
+ rxe_cq_post(qp->scq, &cqe, 0);
+
+ if (wqe->wr.opcode == IB_WR_SEND ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_INV)
+ rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND);
+
+ /*
+ * we completed something so let req run again
+ * if it is trying to fence
+ */
+ if (qp->req.wait_fence) {
+ qp->req.wait_fence = 0;
+ qp->req.again = 1;
+ }
+}
+
+static void comp_check_sq_drain_done(struct rxe_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
+ if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
+ qp->attr.sq_draining = 0;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_SQ_DRAINED;
+ qp->ibqp.event_handler(&ev,
+ qp->ibqp.qp_context);
+ }
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+}
+
+static inline enum comp_state complete_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ if (wqe->has_rd_atomic) {
+ wqe->has_rd_atomic = 0;
+ atomic_inc(&qp->req.rd_atomic);
+ if (qp->req.need_rd_atomic) {
+ qp->comp.timeout_retry = 0;
+ qp->req.need_rd_atomic = 0;
+ qp->req.again = 1;
+ }
+ }
+
+ comp_check_sq_drain_done(qp);
+
+ do_complete(qp, wqe);
+
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ return COMPST_UPDATE_COMP;
+ else
+ return COMPST_DONE;
+}
+
+static inline enum comp_state complete_wqe(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ if (pkt && wqe->state == wqe_state_pending) {
+ if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
+ qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
+ qp->comp.opcode = -1;
+ }
+
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ qp->req.again = 1;
+ }
+ }
+
+ do_complete(qp, wqe);
+
+ return COMPST_GET_WQE;
+}
+
+/* drain incoming response packet queue */
+static void drain_resp_pkts(struct rxe_qp *qp)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_put(qp);
+ kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
+ }
+}
+
+/* complete send wqe with flush error */
+static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_cqe cqe = {};
+ struct ib_wc *wc = &cqe.ibwc;
+ struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ int err;
+
+ if (qp->is_user) {
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = IB_WC_WR_FLUSH_ERR;
+ uwc->qp_num = qp->ibqp.qp_num;
+ } else {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->qp = &qp->ibqp;
+ }
+
+ err = rxe_cq_post(qp->scq, &cqe, 0);
+ if (err)
+ rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err);
+
+ return err;
+}
+
+/* drain and optionally complete the send queue
+ * if unable to complete a wqe, i.e. cq is full, stop
+ * completing and flush the remaining wqes
+ */
+static void flush_send_queue(struct rxe_qp *qp, bool notify)
+{
+ struct rxe_send_wqe *wqe;
+ struct rxe_queue *q = qp->sq.queue;
+ int err;
+
+ /* send queue never got created. nothing to do. */
+ if (!qp->sq.queue)
+ return;
+
+ while ((wqe = queue_head(q, q->type))) {
+ if (notify) {
+ err = flush_send_wqe(qp, wqe);
+ if (err)
+ notify = 0;
+ }
+ queue_advance_consumer(q, q->type);
+ }
+}
+
+static void free_pkt(struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+ struct rxe_qp *qp = pkt->qp;
+ struct ib_device *dev = qp->ibqp.device;
+
+ kfree_skb(skb);
+ rxe_put(qp);
+ ib_device_put(dev);
+}
+
+/* reset the retry timer if
+ * - QP is type RC
+ * - there is a packet sent by the requester that
+ * might be acked (we still might get spurious
+ * timeouts but try to keep them as few as possible)
+ * - the timeout parameter is set
+ * - the QP is alive
+ */
+static void reset_retry_timer(struct rxe_qp *qp)
+{
+ unsigned long flags;
+
+ if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (qp_state(qp) >= IB_QPS_RTS &&
+ psn_compare(qp->req.psn, qp->comp.psn) > 0)
+ mod_timer(&qp->retrans_timer,
+ jiffies + qp->qp_timeout_jiffies);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ }
+}
+
+int rxe_completer(struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_send_wqe *wqe = NULL;
+ struct sk_buff *skb = NULL;
+ struct rxe_pkt_info *pkt = NULL;
+ enum comp_state state;
+ int ret;
+ unsigned long flags;
+
+ qp->req.again = 0;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
+ qp_state(qp) == IB_QPS_RESET) {
+ bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
+
+ drain_resp_pkts(qp);
+ flush_send_queue(qp, notify);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ goto exit;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->comp.timeout) {
+ qp->comp.timeout_retry = 1;
+ qp->comp.timeout = 0;
+ } else {
+ qp->comp.timeout_retry = 0;
+ }
+
+ if (qp->req.need_retry)
+ goto exit;
+
+ state = COMPST_GET_ACK;
+
+ while (1) {
+ rxe_dbg_qp(qp, "state = %s\n", comp_state_name[state]);
+ switch (state) {
+ case COMPST_GET_ACK:
+ skb = skb_dequeue(&qp->resp_pkts);
+ if (skb) {
+ pkt = SKB_TO_PKT(skb);
+ qp->comp.timeout_retry = 0;
+ }
+ state = COMPST_GET_WQE;
+ break;
+
+ case COMPST_GET_WQE:
+ state = get_wqe(qp, pkt, &wqe);
+ break;
+
+ case COMPST_CHECK_PSN:
+ state = check_psn(qp, pkt, wqe);
+ break;
+
+ case COMPST_CHECK_ACK:
+ state = check_ack(qp, pkt, wqe);
+ break;
+
+ case COMPST_READ:
+ state = do_read(qp, pkt, wqe);
+ break;
+
+ case COMPST_ATOMIC:
+ state = do_atomic(qp, pkt, wqe);
+ break;
+
+ case COMPST_WRITE_SEND:
+ if (wqe->state == wqe_state_pending &&
+ wqe->last_psn == pkt->psn)
+ state = COMPST_COMP_ACK;
+ else
+ state = COMPST_UPDATE_COMP;
+ break;
+
+ case COMPST_COMP_ACK:
+ state = complete_ack(qp, pkt, wqe);
+ break;
+
+ case COMPST_COMP_WQE:
+ state = complete_wqe(qp, pkt, wqe);
+ break;
+
+ case COMPST_UPDATE_COMP:
+ if (pkt->mask & RXE_END_MASK)
+ qp->comp.opcode = -1;
+ else
+ qp->comp.opcode = pkt->opcode;
+
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ qp->req.again = 1;
+ }
+
+ state = COMPST_DONE;
+ break;
+
+ case COMPST_DONE:
+ goto done;
+
+ case COMPST_EXIT:
+ if (qp->comp.timeout_retry && wqe) {
+ state = COMPST_ERROR_RETRY;
+ break;
+ }
+
+ reset_retry_timer(qp);
+ goto exit;
+
+ case COMPST_ERROR_RETRY:
+ /* we come here if the retry timer fired and we did
+ * not receive a response packet. try to retry the send
+ * queue if that makes sense and the limits have not
+ * been exceeded. remember that some timeouts are
+ * spurious since we do not reset the timer but kick
+ * it down the road or let it expire
+ */
+
+ /* there is nothing to retry in this case */
+ if (!wqe || (wqe->state == wqe_state_posted))
+ goto exit;
+
+ /* if we've started a retry, don't start another
+ * retry sequence, unless this is a timeout.
+ */
+ if (qp->comp.started_retry &&
+ !qp->comp.timeout_retry)
+ goto done;
+
+ if (qp->comp.retry_cnt > 0) {
+ if (qp->comp.retry_cnt != 7)
+ qp->comp.retry_cnt--;
+
+ /* no point in retrying if we have already
+ * seen the last ack that the requester could
+ * have caused
+ */
+ if (psn_compare(qp->req.psn,
+ qp->comp.psn) > 0) {
+ /* tell the requester to retry the
+ * send queue next time around
+ */
+ rxe_counter_inc(rxe,
+ RXE_CNT_COMP_RETRY);
+ qp->req.need_retry = 1;
+ qp->comp.started_retry = 1;
+ qp->req.again = 1;
+ }
+ goto done;
+
+ } else {
+ rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
+ wqe->status = IB_WC_RETRY_EXC_ERR;
+ state = COMPST_ERROR;
+ }
+ break;
+
+ case COMPST_RNR_RETRY:
+ /* we come here if we received an RNR NAK */
+ if (qp->comp.rnr_retry > 0) {
+ if (qp->comp.rnr_retry != 7)
+ qp->comp.rnr_retry--;
+
+ /* don't start a retry flow until the
+ * rnr timer has fired
+ */
+ qp->req.wait_for_rnr_timer = 1;
+ rxe_dbg_qp(qp, "set rnr nak timer\n");
+ // TODO who protects from destroy_qp??
+ mod_timer(&qp->rnr_nak_timer,
+ jiffies + rnrnak_jiffies(aeth_syn(pkt)
+ & ~AETH_TYPE_MASK));
+ goto exit;
+ } else {
+ rxe_counter_inc(rxe,
+ RXE_CNT_RNR_RETRY_EXCEEDED);
+ wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
+ state = COMPST_ERROR;
+ }
+ break;
+
+ case COMPST_ERROR:
+ WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
+ do_complete(qp, wqe);
+ rxe_qp_error(qp);
+ goto exit;
+ }
+ }
+
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the work item. A zero return
+ * will continue looping and return to rxe_completer
+ */
+done:
+ ret = 0;
+ goto out;
+exit:
+ ret = (qp->req.again) ? 0 : -EAGAIN;
+out:
+ qp->req.again = 0;
+ if (pkt)
+ free_pkt(pkt);
+ return ret;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
new file mode 100644
index 000000000000..fffd144d509e
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+#include <linux/vmalloc.h>
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+ int cqe, int comp_vector)
+{
+ int count;
+
+ if (cqe <= 0) {
+ rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
+ goto err1;
+ }
+
+ if (cqe > rxe->attr.max_cqe) {
+ rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
+ cqe, rxe->attr.max_cqe);
+ goto err1;
+ }
+
+ if (cq) {
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
+ if (cqe < count) {
+ rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
+ cqe, count);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ int comp_vector, struct ib_udata *udata,
+ struct rxe_create_cq_resp __user *uresp)
+{
+ int err;
+ enum queue_type type;
+
+ type = QUEUE_TYPE_TO_CLIENT;
+ cq->queue = rxe_queue_init(rxe, &cqe,
+ sizeof(struct rxe_cqe), type);
+ if (!cq->queue) {
+ rxe_dbg_dev(rxe, "unable to create cq\n");
+ return -ENOMEM;
+ }
+
+ err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
+ cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
+ if (err)
+ return err;
+
+ cq->is_user = uresp;
+
+ spin_lock_init(&cq->cq_lock);
+ cq->ibcq.cqe = cqe;
+ return 0;
+}
+
+int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
+ struct rxe_resize_cq_resp __user *uresp,
+ struct ib_udata *udata)
+{
+ int err;
+
+ err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
+ sizeof(struct rxe_cqe), udata,
+ uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
+ if (!err)
+ cq->ibcq.cqe = cqe;
+
+ return err;
+}
+
+/* caller holds reference to cq */
+int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+{
+ struct ib_event ev;
+ int full;
+ void *addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
+ if (unlikely(full)) {
+ rxe_err_cq(cq, "queue full\n");
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ if (cq->ibcq.event_handler) {
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+
+ return -EBUSY;
+ }
+
+ addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
+ memcpy(addr, cqe, sizeof(*cqe));
+
+ queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
+
+ if ((cq->notify & IB_CQ_NEXT_COMP) ||
+ (cq->notify & IB_CQ_SOLICITED && solicited)) {
+ cq->notify = 0;
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return 0;
+}
+
+void rxe_cq_cleanup(struct rxe_pool_elem *elem)
+{
+ struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
+
+ if (cq->queue)
+ rxe_queue_cleanup(cq->queue);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
new file mode 100644
index 000000000000..1f0322491d8c
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -0,0 +1,977 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_HDR_H
+#define RXE_HDR_H
+
+/* extracted information about a packet carried in an sk_buff struct fits in
+ * the skbuff cb array. Must be at most 48 bytes. stored in control block of
+ * sk_buff for received packets.
+ */
+struct rxe_pkt_info {
+ struct rxe_dev *rxe; /* device that owns packet */
+ struct rxe_qp *qp; /* qp that owns packet */
+ struct rxe_send_wqe *wqe; /* send wqe */
+ u8 *hdr; /* points to bth */
+ u32 mask; /* useful info about pkt */
+ u32 psn; /* bth psn of packet */
+ u16 pkey_index; /* partition of pkt */
+ u16 paylen; /* length of bth - icrc */
+ u8 port_num; /* port pkt received on */
+ u8 opcode; /* bth opcode of packet */
+};
+
+/* Macros should be used only for received skb */
+static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
+ return (void *)skb->cb;
+}
+
+static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
+{
+ return container_of((void *)pkt, struct sk_buff, cb);
+}
+
+/*
+ * IBA header types and methods
+ *
+ * Some of these are for reference and completeness only since
+ * rxe does not currently support RD transport
+ * most of this could be moved into IB core. ib_pack.h has
+ * part of this but is incomplete
+ *
+ * Header specific routines to insert/extract values to/from headers
+ * the routines that are named __hhh_(set_)fff() take a pointer to a
+ * hhh header and get(set) the fff field. The routines named
+ * hhh_(set_)fff take a packet info struct and find the
+ * header and field based on the opcode in the packet.
+ * Conversion to/from network byte order from cpu order is also done.
+ */
+
+#define RXE_ICRC_SIZE (4)
+#define RXE_MAX_HDR_LENGTH (80)
+
+/******************************************************************************
+ * Base Transport Header
+ ******************************************************************************/
+struct rxe_bth {
+ u8 opcode;
+ u8 flags;
+ __be16 pkey;
+ __be32 qpn;
+ __be32 apsn;
+};
+
+#define BTH_TVER (0)
+#define BTH_DEF_PKEY (0xffff)
+
+#define BTH_SE_MASK (0x80)
+#define BTH_MIG_MASK (0x40)
+#define BTH_PAD_MASK (0x30)
+#define BTH_TVER_MASK (0x0f)
+#define BTH_FECN_MASK (0x80000000)
+#define BTH_BECN_MASK (0x40000000)
+#define BTH_RESV6A_MASK (0x3f000000)
+#define BTH_QPN_MASK (0x00ffffff)
+#define BTH_ACK_MASK (0x80000000)
+#define BTH_RESV7_MASK (0x7f000000)
+#define BTH_PSN_MASK (0x00ffffff)
+
+static inline u8 __bth_opcode(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return bth->opcode;
+}
+
+static inline void __bth_set_opcode(void *arg, u8 opcode)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->opcode = opcode;
+}
+
+static inline u8 __bth_se(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (BTH_SE_MASK & bth->flags);
+}
+
+static inline void __bth_set_se(void *arg, int se)
+{
+ struct rxe_bth *bth = arg;
+
+ if (se)
+ bth->flags |= BTH_SE_MASK;
+ else
+ bth->flags &= ~BTH_SE_MASK;
+}
+
+static inline u8 __bth_mig(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (BTH_MIG_MASK & bth->flags);
+}
+
+static inline void __bth_set_mig(void *arg, u8 mig)
+{
+ struct rxe_bth *bth = arg;
+
+ if (mig)
+ bth->flags |= BTH_MIG_MASK;
+ else
+ bth->flags &= ~BTH_MIG_MASK;
+}
+
+static inline u8 __bth_pad(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return (BTH_PAD_MASK & bth->flags) >> 4;
+}
+
+static inline void __bth_set_pad(void *arg, u8 pad)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->flags = (BTH_PAD_MASK & (pad << 4)) |
+ (~BTH_PAD_MASK & bth->flags);
+}
+
+static inline u8 __bth_tver(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_TVER_MASK & bth->flags;
+}
+
+static inline void __bth_set_tver(void *arg, u8 tver)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->flags = (BTH_TVER_MASK & tver) |
+ (~BTH_TVER_MASK & bth->flags);
+}
+
+static inline u16 __bth_pkey(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return be16_to_cpu(bth->pkey);
+}
+
+static inline void __bth_set_pkey(void *arg, u16 pkey)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->pkey = cpu_to_be16(pkey);
+}
+
+static inline u32 __bth_qpn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
+}
+
+static inline void __bth_set_qpn(void *arg, u32 qpn)
+{
+ struct rxe_bth *bth = arg;
+ u32 resvqpn = be32_to_cpu(bth->qpn);
+
+ bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
+ (~BTH_QPN_MASK & resvqpn));
+}
+
+static inline int __bth_fecn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
+}
+
+static inline void __bth_set_fecn(void *arg, int fecn)
+{
+ struct rxe_bth *bth = arg;
+
+ if (fecn)
+ bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
+ else
+ bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
+}
+
+static inline int __bth_becn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
+}
+
+static inline void __bth_set_becn(void *arg, int becn)
+{
+ struct rxe_bth *bth = arg;
+
+ if (becn)
+ bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
+ else
+ bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
+}
+
+static inline u8 __bth_resv6a(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
+}
+
+static inline void __bth_set_resv6a(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->qpn &= cpu_to_be32(~BTH_RESV6A_MASK);
+}
+
+static inline int __bth_ack(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
+}
+
+static inline void __bth_set_ack(void *arg, int ack)
+{
+ struct rxe_bth *bth = arg;
+
+ if (ack)
+ bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
+ else
+ bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
+}
+
+static inline void __bth_set_resv7(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
+}
+
+static inline u32 __bth_psn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
+}
+
+static inline void __bth_set_psn(void *arg, u32 psn)
+{
+ struct rxe_bth *bth = arg;
+ u32 apsn = be32_to_cpu(bth->apsn);
+
+ bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
+ (~BTH_PSN_MASK & apsn));
+}
+
+static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
+{
+ return __bth_opcode(pkt->hdr);
+}
+
+static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
+{
+ __bth_set_opcode(pkt->hdr, opcode);
+}
+
+static inline u8 bth_se(struct rxe_pkt_info *pkt)
+{
+ return __bth_se(pkt->hdr);
+}
+
+static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
+{
+ __bth_set_se(pkt->hdr, se);
+}
+
+static inline u8 bth_mig(struct rxe_pkt_info *pkt)
+{
+ return __bth_mig(pkt->hdr);
+}
+
+static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
+{
+ __bth_set_mig(pkt->hdr, mig);
+}
+
+static inline u8 bth_pad(struct rxe_pkt_info *pkt)
+{
+ return __bth_pad(pkt->hdr);
+}
+
+static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
+{
+ __bth_set_pad(pkt->hdr, pad);
+}
+
+static inline u8 bth_tver(struct rxe_pkt_info *pkt)
+{
+ return __bth_tver(pkt->hdr);
+}
+
+static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
+{
+ __bth_set_tver(pkt->hdr, tver);
+}
+
+static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
+{
+ return __bth_pkey(pkt->hdr);
+}
+
+static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
+{
+ __bth_set_pkey(pkt->hdr, pkey);
+}
+
+static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
+{
+ return __bth_qpn(pkt->hdr);
+}
+
+static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
+{
+ __bth_set_qpn(pkt->hdr, qpn);
+}
+
+static inline int bth_fecn(struct rxe_pkt_info *pkt)
+{
+ return __bth_fecn(pkt->hdr);
+}
+
+static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
+{
+ __bth_set_fecn(pkt->hdr, fecn);
+}
+
+static inline int bth_becn(struct rxe_pkt_info *pkt)
+{
+ return __bth_becn(pkt->hdr);
+}
+
+static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
+{
+ __bth_set_becn(pkt->hdr, becn);
+}
+
+static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
+{
+ return __bth_resv6a(pkt->hdr);
+}
+
+static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
+{
+ __bth_set_resv6a(pkt->hdr);
+}
+
+static inline int bth_ack(struct rxe_pkt_info *pkt)
+{
+ return __bth_ack(pkt->hdr);
+}
+
+static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
+{
+ __bth_set_ack(pkt->hdr, ack);
+}
+
+static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
+{
+ __bth_set_resv7(pkt->hdr);
+}
+
+static inline u32 bth_psn(struct rxe_pkt_info *pkt)
+{
+ return __bth_psn(pkt->hdr);
+}
+
+static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
+{
+ __bth_set_psn(pkt->hdr, psn);
+}
+
+static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
+ int mig, int pad, u16 pkey, u32 qpn, int ack_req,
+ u32 psn)
+{
+ struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr);
+
+ bth->opcode = opcode;
+ bth->flags = (pad << 4) & BTH_PAD_MASK;
+ if (se)
+ bth->flags |= BTH_SE_MASK;
+ if (mig)
+ bth->flags |= BTH_MIG_MASK;
+ bth->pkey = cpu_to_be16(pkey);
+ bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
+ psn &= BTH_PSN_MASK;
+ if (ack_req)
+ psn |= BTH_ACK_MASK;
+ bth->apsn = cpu_to_be32(psn);
+}
+
+/******************************************************************************
+ * Reliable Datagram Extended Transport Header
+ ******************************************************************************/
+struct rxe_rdeth {
+ __be32 een;
+};
+
+#define RDETH_EEN_MASK (0x00ffffff)
+
+static inline u8 __rdeth_een(void *arg)
+{
+ struct rxe_rdeth *rdeth = arg;
+
+ return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
+}
+
+static inline void __rdeth_set_een(void *arg, u32 een)
+{
+ struct rxe_rdeth *rdeth = arg;
+
+ rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
+}
+
+static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
+{
+ return __rdeth_een(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
+}
+
+static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
+{
+ __rdeth_set_een(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
+}
+
+/******************************************************************************
+ * Datagram Extended Transport Header
+ ******************************************************************************/
+struct rxe_deth {
+ __be32 qkey;
+ __be32 sqp;
+};
+
+#define GSI_QKEY (0x80010000)
+#define DETH_SQP_MASK (0x00ffffff)
+
+static inline u32 __deth_qkey(void *arg)
+{
+ struct rxe_deth *deth = arg;
+
+ return be32_to_cpu(deth->qkey);
+}
+
+static inline void __deth_set_qkey(void *arg, u32 qkey)
+{
+ struct rxe_deth *deth = arg;
+
+ deth->qkey = cpu_to_be32(qkey);
+}
+
+static inline u32 __deth_sqp(void *arg)
+{
+ struct rxe_deth *deth = arg;
+
+ return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
+}
+
+static inline void __deth_set_sqp(void *arg, u32 sqp)
+{
+ struct rxe_deth *deth = arg;
+
+ deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
+}
+
+static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
+{
+ return __deth_qkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+}
+
+static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
+{
+ __deth_set_qkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
+}
+
+static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
+{
+ return __deth_sqp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+}
+
+static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
+{
+ __deth_set_sqp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
+}
+
+/******************************************************************************
+ * RDMA Extended Transport Header
+ ******************************************************************************/
+struct rxe_reth {
+ __be64 va;
+ __be32 rkey;
+ __be32 len;
+};
+
+static inline u64 __reth_va(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be64_to_cpu(reth->va);
+}
+
+static inline void __reth_set_va(void *arg, u64 va)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->va = cpu_to_be64(va);
+}
+
+static inline u32 __reth_rkey(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be32_to_cpu(reth->rkey);
+}
+
+static inline void __reth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u32 __reth_len(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be32_to_cpu(reth->len);
+}
+
+static inline void __reth_set_len(void *arg, u32 len)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->len = cpu_to_be32(len);
+}
+
+static inline u64 reth_va(struct rxe_pkt_info *pkt)
+{
+ return __reth_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
+{
+ __reth_set_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
+}
+
+static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __reth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __reth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
+}
+
+static inline u32 reth_len(struct rxe_pkt_info *pkt)
+{
+ return __reth_len(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
+{
+ __reth_set_len(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
+}
+
+/******************************************************************************
+ * FLUSH Extended Transport Header
+ ******************************************************************************/
+
+struct rxe_feth {
+ __be32 bits;
+};
+
+#define FETH_PLT_MASK (0x0000000f) /* bits 3-0 */
+#define FETH_SEL_MASK (0x00000030) /* bits 5-4 */
+#define FETH_SEL_SHIFT (4U)
+
+static inline u32 __feth_plt(void *arg)
+{
+ struct rxe_feth *feth = arg;
+
+ return be32_to_cpu(feth->bits) & FETH_PLT_MASK;
+}
+
+static inline u32 __feth_sel(void *arg)
+{
+ struct rxe_feth *feth = arg;
+
+ return (be32_to_cpu(feth->bits) & FETH_SEL_MASK) >> FETH_SEL_SHIFT;
+}
+
+static inline u32 feth_plt(struct rxe_pkt_info *pkt)
+{
+ return __feth_plt(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
+}
+
+static inline u32 feth_sel(struct rxe_pkt_info *pkt)
+{
+ return __feth_sel(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
+}
+
+static inline void feth_init(struct rxe_pkt_info *pkt, u8 type, u8 level)
+{
+ struct rxe_feth *feth = (struct rxe_feth *)
+ (pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
+ u32 bits = ((level << FETH_SEL_SHIFT) & FETH_SEL_MASK) |
+ (type & FETH_PLT_MASK);
+
+ feth->bits = cpu_to_be32(bits);
+}
+
+/******************************************************************************
+ * Atomic Extended Transport Header
+ ******************************************************************************/
+struct rxe_atmeth {
+ __be64 va;
+ __be32 rkey;
+ __be64 swap_add;
+ __be64 comp;
+} __packed;
+
+static inline u64 __atmeth_va(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->va);
+}
+
+static inline void __atmeth_set_va(void *arg, u64 va)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->va = cpu_to_be64(va);
+}
+
+static inline u32 __atmeth_rkey(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be32_to_cpu(atmeth->rkey);
+}
+
+static inline void __atmeth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u64 __atmeth_swap_add(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->swap_add);
+}
+
+static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->swap_add = cpu_to_be64(swap_add);
+}
+
+static inline u64 __atmeth_comp(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->comp);
+}
+
+static inline void __atmeth_set_comp(void *arg, u64 comp)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->comp = cpu_to_be64(comp);
+}
+
+static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
+{
+ __atmeth_set_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
+}
+
+static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __atmeth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
+}
+
+static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_swap_add(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
+{
+ __atmeth_set_swap_add(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
+}
+
+static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_comp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
+{
+ __atmeth_set_comp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
+}
+
+/******************************************************************************
+ * Ack Extended Transport Header
+ ******************************************************************************/
+struct rxe_aeth {
+ __be32 smsn;
+};
+
+#define AETH_SYN_MASK (0xff000000)
+#define AETH_MSN_MASK (0x00ffffff)
+
+enum aeth_syndrome {
+ AETH_TYPE_MASK = 0xe0,
+ AETH_ACK = 0x00,
+ AETH_RNR_NAK = 0x20,
+ AETH_RSVD = 0x40,
+ AETH_NAK = 0x60,
+ AETH_ACK_UNLIMITED = 0x1f,
+ AETH_NAK_PSN_SEQ_ERROR = 0x60,
+ AETH_NAK_INVALID_REQ = 0x61,
+ AETH_NAK_REM_ACC_ERR = 0x62,
+ AETH_NAK_REM_OP_ERR = 0x63,
+};
+
+static inline u8 __aeth_syn(void *arg)
+{
+ struct rxe_aeth *aeth = arg;
+
+ return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
+}
+
+static inline void __aeth_set_syn(void *arg, u8 syn)
+{
+ struct rxe_aeth *aeth = arg;
+ u32 smsn = be32_to_cpu(aeth->smsn);
+
+ aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
+ (~AETH_SYN_MASK & smsn));
+}
+
+static inline u32 __aeth_msn(void *arg)
+{
+ struct rxe_aeth *aeth = arg;
+
+ return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
+}
+
+static inline void __aeth_set_msn(void *arg, u32 msn)
+{
+ struct rxe_aeth *aeth = arg;
+ u32 smsn = be32_to_cpu(aeth->smsn);
+
+ aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
+ (~AETH_MSN_MASK & smsn));
+}
+
+static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
+{
+ return __aeth_syn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+}
+
+static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
+{
+ __aeth_set_syn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
+}
+
+static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
+{
+ return __aeth_msn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+}
+
+static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
+{
+ __aeth_set_msn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
+}
+
+/******************************************************************************
+ * Atomic Ack Extended Transport Header
+ ******************************************************************************/
+struct rxe_atmack {
+ __be64 orig;
+};
+
+static inline u64 __atmack_orig(void *arg)
+{
+ struct rxe_atmack *atmack = arg;
+
+ return be64_to_cpu(atmack->orig);
+}
+
+static inline void __atmack_set_orig(void *arg, u64 orig)
+{
+ struct rxe_atmack *atmack = arg;
+
+ atmack->orig = cpu_to_be64(orig);
+}
+
+static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
+{
+ return __atmack_orig(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
+}
+
+static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
+{
+ __atmack_set_orig(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
+}
+
+/******************************************************************************
+ * Immediate Extended Transport Header
+ ******************************************************************************/
+struct rxe_immdt {
+ __be32 imm;
+};
+
+static inline __be32 __immdt_imm(void *arg)
+{
+ struct rxe_immdt *immdt = arg;
+
+ return immdt->imm;
+}
+
+static inline void __immdt_set_imm(void *arg, __be32 imm)
+{
+ struct rxe_immdt *immdt = arg;
+
+ immdt->imm = imm;
+}
+
+static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
+{
+ return __immdt_imm(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
+}
+
+static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
+{
+ __immdt_set_imm(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
+}
+
+/******************************************************************************
+ * Invalidate Extended Transport Header
+ ******************************************************************************/
+struct rxe_ieth {
+ __be32 rkey;
+};
+
+static inline u32 __ieth_rkey(void *arg)
+{
+ struct rxe_ieth *ieth = arg;
+
+ return be32_to_cpu(ieth->rkey);
+}
+
+static inline void __ieth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_ieth *ieth = arg;
+
+ ieth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __ieth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
+}
+
+static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __ieth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
+}
+
+enum rxe_hdr_length {
+ RXE_BTH_BYTES = sizeof(struct rxe_bth),
+ RXE_DETH_BYTES = sizeof(struct rxe_deth),
+ RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
+ RXE_RETH_BYTES = sizeof(struct rxe_reth),
+ RXE_AETH_BYTES = sizeof(struct rxe_aeth),
+ RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
+ RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
+ RXE_IETH_BYTES = sizeof(struct rxe_ieth),
+ RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
+ RXE_FETH_BYTES = sizeof(struct rxe_feth),
+};
+
+static inline size_t header_size(struct rxe_pkt_info *pkt)
+{
+ return rxe_opcode[pkt->opcode].length;
+}
+
+static inline void *payload_addr(struct rxe_pkt_info *pkt)
+{
+ return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
+}
+
+static inline size_t payload_size(struct rxe_pkt_info *pkt)
+{
+ return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
+ - bth_pad(pkt) - RXE_ICRC_SIZE;
+}
+
+#endif /* RXE_HDR_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
new file mode 100644
index 000000000000..437917a7d8f2
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2017 Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#include "rxe.h"
+#include "rxe_hw_counters.h"
+
+static const struct rdma_stat_desc rxe_counter_descs[] = {
+ [RXE_CNT_SENT_PKTS].name = "sent_pkts",
+ [RXE_CNT_RCVD_PKTS].name = "rcvd_pkts",
+ [RXE_CNT_DUP_REQ].name = "duplicate_request",
+ [RXE_CNT_OUT_OF_SEQ_REQ].name = "out_of_seq_request",
+ [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err",
+ [RXE_CNT_SND_RNR].name = "send_rnr_err",
+ [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err",
+ [RXE_CNT_SENDER_SCHED].name = "ack_deferred",
+ [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err",
+ [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err",
+ [RXE_CNT_COMP_RETRY].name = "completer_retry_err",
+ [RXE_CNT_SEND_ERR].name = "send_err",
+ [RXE_CNT_LINK_DOWNED].name = "link_downed",
+ [RXE_CNT_RDMA_SEND].name = "rdma_sends",
+ [RXE_CNT_RDMA_RECV].name = "rdma_recvs",
+};
+
+int rxe_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct rxe_dev *dev = to_rdev(ibdev);
+ unsigned int cnt;
+
+ if (!port || !stats)
+ return -EINVAL;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_descs); cnt++)
+ stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]);
+
+ return ARRAY_SIZE(rxe_counter_descs);
+}
+
+struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_descs) != RXE_NUM_OF_COUNTERS);
+
+ return rdma_alloc_hw_stats_struct(rxe_counter_descs,
+ ARRAY_SIZE(rxe_counter_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
new file mode 100644
index 000000000000..051f9e1c3852
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2017 Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#ifndef RXE_HW_COUNTERS_H
+#define RXE_HW_COUNTERS_H
+
+/*
+ * when adding counters to enum also add
+ * them to rxe_counter_name[] vector.
+ */
+enum rxe_counters {
+ RXE_CNT_SENT_PKTS,
+ RXE_CNT_RCVD_PKTS,
+ RXE_CNT_DUP_REQ,
+ RXE_CNT_OUT_OF_SEQ_REQ,
+ RXE_CNT_RCV_RNR,
+ RXE_CNT_SND_RNR,
+ RXE_CNT_RCV_SEQ_ERR,
+ RXE_CNT_SENDER_SCHED,
+ RXE_CNT_RETRY_EXCEEDED,
+ RXE_CNT_RNR_RETRY_EXCEEDED,
+ RXE_CNT_COMP_RETRY,
+ RXE_CNT_SEND_ERR,
+ RXE_CNT_LINK_DOWNED,
+ RXE_CNT_RDMA_SEND,
+ RXE_CNT_RDMA_RECV,
+ RXE_NUM_OF_COUNTERS
+};
+
+struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num);
+int rxe_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port, int index);
+#endif /* RXE_HW_COUNTERS_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
new file mode 100644
index 000000000000..76d760fbe7ea
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/crc32.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/**
+ * rxe_crc32() - Compute cumulative crc32 for a contiguous segment
+ * @rxe: rdma_rxe device object
+ * @crc: starting crc32 value from previous segments
+ * @next: starting address of current segment
+ * @len: length of current segment
+ *
+ * Return: the cumulative crc32 checksum
+ */
+static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
+{
+ return (__force __be32)crc32_le((__force u32)crc, next, len);
+}
+
+/**
+ * rxe_icrc_hdr() - Compute the partial ICRC for the network and transport
+ * headers of a packet.
+ * @skb: packet buffer
+ * @pkt: packet information
+ *
+ * Return: the partial ICRC
+ */
+static __be32 rxe_icrc_hdr(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+{
+ unsigned int bth_offset = 0;
+ struct iphdr *ip4h = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct udphdr *udph;
+ struct rxe_bth *bth;
+ __be32 crc;
+ int length;
+ int hdr_size = sizeof(struct udphdr) +
+ (skb->protocol == htons(ETH_P_IP) ?
+ sizeof(struct iphdr) : sizeof(struct ipv6hdr));
+ /* pseudo header buffer size is calculate using ipv6 header size since
+ * it is bigger than ipv4
+ */
+ u8 pshdr[sizeof(struct udphdr) +
+ sizeof(struct ipv6hdr) +
+ RXE_BTH_BYTES];
+
+ /* This seed is the result of computing a CRC with a seed of
+ * 0xfffffff and 8 bytes of 0xff representing a masked LRH.
+ */
+ crc = (__force __be32)0xdebb20e3;
+
+ if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */
+ memcpy(pshdr, ip_hdr(skb), hdr_size);
+ ip4h = (struct iphdr *)pshdr;
+ udph = (struct udphdr *)(ip4h + 1);
+
+ ip4h->ttl = 0xff;
+ ip4h->check = CSUM_MANGLED_0;
+ ip4h->tos = 0xff;
+ } else { /* IPv6 */
+ memcpy(pshdr, ipv6_hdr(skb), hdr_size);
+ ip6h = (struct ipv6hdr *)pshdr;
+ udph = (struct udphdr *)(ip6h + 1);
+
+ memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl));
+ ip6h->priority = 0xf;
+ ip6h->hop_limit = 0xff;
+ }
+ udph->check = CSUM_MANGLED_0;
+
+ bth_offset += hdr_size;
+
+ memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES);
+ bth = (struct rxe_bth *)&pshdr[bth_offset];
+
+ /* exclude bth.resv8a */
+ bth->qpn |= cpu_to_be32(~BTH_QPN_MASK);
+
+ length = hdr_size + RXE_BTH_BYTES;
+ crc = rxe_crc32(pkt->rxe, crc, pshdr, length);
+
+ /* And finish to compute the CRC on the remainder of the headers. */
+ crc = rxe_crc32(pkt->rxe, crc, pkt->hdr + RXE_BTH_BYTES,
+ rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
+ return crc;
+}
+
+/**
+ * rxe_icrc_check() - Compute ICRC for a packet and compare to the ICRC
+ * delivered in the packet.
+ * @skb: packet buffer
+ * @pkt: packet information
+ *
+ * Return: 0 if the values match else an error
+ */
+int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+{
+ __be32 *icrcp;
+ __be32 pkt_icrc;
+ __be32 icrc;
+
+ icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
+ pkt_icrc = *icrcp;
+
+ icrc = rxe_icrc_hdr(skb, pkt);
+ icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt),
+ payload_size(pkt) + bth_pad(pkt));
+ icrc = ~icrc;
+
+ if (unlikely(icrc != pkt_icrc))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * rxe_icrc_generate() - compute ICRC for a packet.
+ * @skb: packet buffer
+ * @pkt: packet information
+ */
+void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+{
+ __be32 *icrcp;
+ __be32 icrc;
+
+ icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
+ icrc = rxe_icrc_hdr(skb, pkt);
+ icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt),
+ payload_size(pkt) + bth_pad(pkt));
+ *icrcp = ~icrc;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
new file mode 100644
index 000000000000..7992290886e1
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_LOC_H
+#define RXE_LOC_H
+
+/* rxe_av.c */
+void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av);
+int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr);
+int rxe_ah_chk_attr(struct rxe_ah *ah, struct rdma_ah_attr *attr);
+void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
+ struct rdma_ah_attr *attr);
+void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
+void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp);
+
+/* rxe_cq.c */
+int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+ int cqe, int comp_vector);
+
+int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ int comp_vector, struct ib_udata *udata,
+ struct rxe_create_cq_resp __user *uresp);
+
+int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
+ struct rxe_resize_cq_resp __user *uresp,
+ struct ib_udata *udata);
+
+int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
+
+void rxe_cq_cleanup(struct rxe_pool_elem *elem);
+
+/* rxe_mcast.c */
+struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid);
+int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
+int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
+void rxe_cleanup_mcg(struct kref *kref);
+
+/* rxe_mmap.c */
+struct rxe_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ struct kref ref;
+ void *obj;
+
+ struct mminfo info;
+};
+
+void rxe_mmap_release(struct kref *ref);
+
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
+ struct ib_udata *udata, void *obj);
+
+int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+/* rxe_mr.c */
+u8 rxe_get_next_key(u32 last_key);
+void rxe_mr_init(int access, struct rxe_mr *mr);
+void rxe_mr_init_dma(int access, struct rxe_mr *mr);
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
+ int access, struct rxe_mr *mr);
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length);
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir);
+int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
+ void *addr, int length, enum rxe_mr_copy_dir dir);
+int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ enum rxe_mr_lookup_type type);
+int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
+int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
+int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
+void rxe_mr_cleanup(struct rxe_pool_elem *elem);
+
+/* defined in rxe_mr.c; used in rxe_mr.c and rxe_odp.c */
+extern spinlock_t atomic_ops_lock;
+
+/* rxe_mw.c */
+int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
+int rxe_dealloc_mw(struct ib_mw *ibmw);
+int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
+int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
+struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
+void rxe_mw_cleanup(struct rxe_pool_elem *elem);
+
+/* rxe_net.c */
+struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt);
+int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb);
+int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb);
+const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
+
+/* rxe_qp.c */
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ struct ib_qp_init_attr *init,
+ struct rxe_create_qp_resp __user *uresp,
+ struct ib_pd *ibpd, struct ib_udata *udata);
+int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
+int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_attr *attr, int mask);
+int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
+ int mask, struct ib_udata *udata);
+int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
+void rxe_qp_error(struct rxe_qp *qp);
+int rxe_qp_chk_destroy(struct rxe_qp *qp);
+void rxe_qp_cleanup(struct rxe_pool_elem *elem);
+
+static inline int qp_num(struct rxe_qp *qp)
+{
+ return qp->ibqp.qp_num;
+}
+
+static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
+{
+ return qp->ibqp.qp_type;
+}
+
+static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
+{
+ return qp->attr.qp_state;
+}
+
+static inline int qp_mtu(struct rxe_qp *qp)
+{
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ return qp->attr.path_mtu;
+ else
+ return IB_MTU_4096;
+}
+
+static inline bool is_odp_mr(struct rxe_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_odp;
+}
+
+void free_rd_atomic_resource(struct resp_res *res);
+
+static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
+{
+ qp->resp.res_head++;
+ if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
+ qp->resp.res_head = 0;
+}
+
+void retransmit_timer(struct timer_list *t);
+void rnr_nak_timer(struct timer_list *t);
+
+/* rxe_srq.c */
+int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init);
+int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_init_attr *init, struct ib_udata *udata,
+ struct rxe_create_srq_resp __user *uresp);
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
+int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
+ struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
+void rxe_srq_cleanup(struct rxe_pool_elem *elem);
+
+void rxe_dealloc(struct ib_device *ib_dev);
+
+int rxe_completer(struct rxe_qp *qp);
+int rxe_requester(struct rxe_qp *qp);
+int rxe_sender(struct rxe_qp *qp);
+int rxe_receiver(struct rxe_qp *qp);
+
+/* rxe_icrc.c */
+int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt);
+void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt);
+
+void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
+
+void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
+
+static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
+{
+ return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
+}
+
+/* rxe_odp.c */
+extern const struct mmu_interval_notifier_ops rxe_mn_ops;
+
+#if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
+ u64 iova, int access_flags, struct rxe_mr *mr);
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir);
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length);
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+int rxe_ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge,
+ struct uverbs_attr_bundle *attrs);
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+static inline int
+rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ int access_flags, struct rxe_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
+static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ int length, enum rxe_mr_copy_dir dir)
+{
+ return -EOPNOTSUPP;
+}
+static inline enum resp_states
+rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ return -EOPNOTSUPP;
+}
+static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr,
+ u64 iova, u64 value)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+static inline int rxe_ib_advise_mr(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+#endif /* RXE_LOC_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
new file mode 100644
index 000000000000..07ff47bae31d
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2022 Hewlett Packard Enterprise, Inc. All rights reserved.
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+/*
+ * rxe_mcast.c implements driver support for multicast transport.
+ * It is based on two data structures struct rxe_mcg ('mcg') and
+ * struct rxe_mca ('mca'). An mcg is allocated each time a qp is
+ * attached to a new mgid for the first time. These are indexed by
+ * a red-black tree using the mgid. This data structure is searched
+ * for the mcg when a multicast packet is received and when another
+ * qp is attached to the same mgid. It is cleaned up when the last qp
+ * is detached from the mcg. Each time a qp is attached to an mcg an
+ * mca is created. It holds a pointer to the qp and is added to a list
+ * of qp's that are attached to the mcg. The qp_list is used to replicate
+ * mcast packets in the rxe receive path.
+ */
+
+#include "rxe.h"
+
+/**
+ * rxe_mcast_add - add multicast address to rxe device
+ * @rxe: rxe device object
+ * @mgid: multicast address as a gid
+ *
+ * Returns 0 on success else an error
+ */
+static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ unsigned char ll_addr[ETH_ALEN];
+ struct net_device *ndev;
+ int ret;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return -ENODEV;
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+
+ ret = dev_mc_add(ndev, ll_addr);
+ dev_put(ndev);
+
+ return ret;
+}
+
+/**
+ * rxe_mcast_del - delete multicast address from rxe device
+ * @rxe: rxe device object
+ * @mgid: multicast address as a gid
+ *
+ * Returns 0 on success else an error
+ */
+static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ unsigned char ll_addr[ETH_ALEN];
+ struct net_device *ndev;
+ int ret;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return -ENODEV;
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+
+ ret = dev_mc_del(ndev, ll_addr);
+ dev_put(ndev);
+
+ return ret;
+}
+
+/**
+ * __rxe_insert_mcg - insert an mcg into red-black tree (rxe->mcg_tree)
+ * @mcg: mcg object with an embedded red-black tree node
+ *
+ * Context: caller must hold a reference to mcg and rxe->mcg_lock and
+ * is responsible to avoid adding the same mcg twice to the tree.
+ */
+static void __rxe_insert_mcg(struct rxe_mcg *mcg)
+{
+ struct rb_root *tree = &mcg->rxe->mcg_tree;
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *node = NULL;
+ struct rxe_mcg *tmp;
+ int cmp;
+
+ while (*link) {
+ node = *link;
+ tmp = rb_entry(node, struct rxe_mcg, node);
+
+ cmp = memcmp(&tmp->mgid, &mcg->mgid, sizeof(mcg->mgid));
+ if (cmp > 0)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ rb_link_node(&mcg->node, node, link);
+ rb_insert_color(&mcg->node, tree);
+}
+
+/**
+ * __rxe_remove_mcg - remove an mcg from red-black tree holding lock
+ * @mcg: mcast group object with an embedded red-black tree node
+ *
+ * Context: caller must hold a reference to mcg and rxe->mcg_lock
+ */
+static void __rxe_remove_mcg(struct rxe_mcg *mcg)
+{
+ rb_erase(&mcg->node, &mcg->rxe->mcg_tree);
+}
+
+/**
+ * __rxe_lookup_mcg - lookup mcg in rxe->mcg_tree while holding lock
+ * @rxe: rxe device object
+ * @mgid: multicast IP address
+ *
+ * Context: caller must hold rxe->mcg_lock
+ * Returns: mcg on success and takes a ref to mcg else NULL
+ */
+static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
+ union ib_gid *mgid)
+{
+ struct rb_root *tree = &rxe->mcg_tree;
+ struct rxe_mcg *mcg;
+ struct rb_node *node;
+ int cmp;
+
+ node = tree->rb_node;
+
+ while (node) {
+ mcg = rb_entry(node, struct rxe_mcg, node);
+
+ cmp = memcmp(&mcg->mgid, mgid, sizeof(*mgid));
+
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (node) {
+ kref_get(&mcg->ref_cnt);
+ return mcg;
+ }
+
+ return NULL;
+}
+
+/**
+ * rxe_lookup_mcg - lookup up mcg in red-back tree
+ * @rxe: rxe device object
+ * @mgid: multicast IP address
+ *
+ * Returns: mcg if found else NULL
+ */
+struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ struct rxe_mcg *mcg;
+
+ spin_lock_bh(&rxe->mcg_lock);
+ mcg = __rxe_lookup_mcg(rxe, mgid);
+ spin_unlock_bh(&rxe->mcg_lock);
+
+ return mcg;
+}
+
+/**
+ * __rxe_init_mcg - initialize a new mcg
+ * @rxe: rxe device
+ * @mgid: multicast address as a gid
+ * @mcg: new mcg object
+ *
+ * Context: caller should hold rxe->mcg lock
+ */
+static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mcg *mcg)
+{
+ kref_init(&mcg->ref_cnt);
+ memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
+ INIT_LIST_HEAD(&mcg->qp_list);
+ mcg->rxe = rxe;
+
+ /* caller holds a ref on mcg but that will be
+ * dropped when mcg goes out of scope. We need to take a ref
+ * on the pointer that will be saved in the red-black tree
+ * by __rxe_insert_mcg and used to lookup mcg from mgid later.
+ * Inserting mcg makes it visible to outside so this should
+ * be done last after the object is ready.
+ */
+ kref_get(&mcg->ref_cnt);
+ __rxe_insert_mcg(mcg);
+}
+
+/**
+ * rxe_get_mcg - lookup or allocate a mcg
+ * @rxe: rxe device object
+ * @mgid: multicast IP address as a gid
+ *
+ * Returns: mcg on success else ERR_PTR(error)
+ */
+static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ struct rxe_mcg *mcg, *tmp;
+ int err;
+
+ if (rxe->attr.max_mcast_grp == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* check to see if mcg already exists */
+ mcg = rxe_lookup_mcg(rxe, mgid);
+ if (mcg)
+ return mcg;
+
+ /* check to see if we have reached limit */
+ if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
+ err = -ENOMEM;
+ goto err_dec;
+ }
+
+ /* speculative alloc of new mcg */
+ mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
+ if (!mcg) {
+ err = -ENOMEM;
+ goto err_dec;
+ }
+
+ spin_lock_bh(&rxe->mcg_lock);
+ /* re-check to see if someone else just added it */
+ tmp = __rxe_lookup_mcg(rxe, mgid);
+ if (tmp) {
+ spin_unlock_bh(&rxe->mcg_lock);
+ atomic_dec(&rxe->mcg_num);
+ kfree(mcg);
+ return tmp;
+ }
+
+ __rxe_init_mcg(rxe, mgid, mcg);
+ spin_unlock_bh(&rxe->mcg_lock);
+
+ /* add mcast address outside of lock */
+ err = rxe_mcast_add(rxe, mgid);
+ if (!err)
+ return mcg;
+
+ kfree(mcg);
+err_dec:
+ atomic_dec(&rxe->mcg_num);
+ return ERR_PTR(err);
+}
+
+/**
+ * rxe_cleanup_mcg - cleanup mcg for kref_put
+ * @kref: struct kref embnedded in mcg
+ */
+void rxe_cleanup_mcg(struct kref *kref)
+{
+ struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt);
+
+ kfree(mcg);
+}
+
+/**
+ * __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock
+ * @mcg: the mcg object
+ *
+ * Context: caller is holding rxe->mcg_lock
+ * no qp's are attached to mcg
+ */
+static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
+{
+ struct rxe_dev *rxe = mcg->rxe;
+
+ /* remove mcg from red-black tree then drop ref */
+ __rxe_remove_mcg(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ atomic_dec(&rxe->mcg_num);
+}
+
+/**
+ * rxe_destroy_mcg - destroy mcg object
+ * @mcg: the mcg object
+ *
+ * Context: no qp's are attached to mcg
+ */
+static void rxe_destroy_mcg(struct rxe_mcg *mcg)
+{
+ /* delete mcast address outside of lock */
+ rxe_mcast_del(mcg->rxe, &mcg->mgid);
+
+ spin_lock_bh(&mcg->rxe->mcg_lock);
+ __rxe_destroy_mcg(mcg);
+ spin_unlock_bh(&mcg->rxe->mcg_lock);
+}
+
+/**
+ * __rxe_init_mca - initialize a new mca holding lock
+ * @qp: qp object
+ * @mcg: mcg object
+ * @mca: empty space for new mca
+ *
+ * Context: caller must hold references on qp and mcg, rxe->mcg_lock
+ * and pass memory for new mca
+ *
+ * Returns: 0 on success else an error
+ */
+static int __rxe_init_mca(struct rxe_qp *qp, struct rxe_mcg *mcg,
+ struct rxe_mca *mca)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int n;
+
+ n = atomic_inc_return(&rxe->mcg_attach);
+ if (n > rxe->attr.max_total_mcast_qp_attach) {
+ atomic_dec(&rxe->mcg_attach);
+ return -ENOMEM;
+ }
+
+ n = atomic_inc_return(&mcg->qp_num);
+ if (n > rxe->attr.max_mcast_qp_attach) {
+ atomic_dec(&mcg->qp_num);
+ atomic_dec(&rxe->mcg_attach);
+ return -ENOMEM;
+ }
+
+ atomic_inc(&qp->mcg_num);
+
+ rxe_get(qp);
+ mca->qp = qp;
+
+ list_add_tail(&mca->qp_list, &mcg->qp_list);
+
+ return 0;
+}
+
+/**
+ * rxe_attach_mcg - attach qp to mcg if not already attached
+ * @qp: qp object
+ * @mcg: mcg object
+ *
+ * Context: caller must hold reference on qp and mcg.
+ * Returns: 0 on success else an error
+ */
+static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = mcg->rxe;
+ struct rxe_mca *mca, *tmp;
+ int err;
+
+ /* check to see if the qp is already a member of the group */
+ spin_lock_bh(&rxe->mcg_lock);
+ list_for_each_entry(mca, &mcg->qp_list, qp_list) {
+ if (mca->qp == qp) {
+ spin_unlock_bh(&rxe->mcg_lock);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&rxe->mcg_lock);
+
+ /* speculative alloc new mca without using GFP_ATOMIC */
+ mca = kzalloc(sizeof(*mca), GFP_KERNEL);
+ if (!mca)
+ return -ENOMEM;
+
+ spin_lock_bh(&rxe->mcg_lock);
+ /* re-check to see if someone else just attached qp */
+ list_for_each_entry(tmp, &mcg->qp_list, qp_list) {
+ if (tmp->qp == qp) {
+ kfree(mca);
+ err = 0;
+ goto out;
+ }
+ }
+
+ err = __rxe_init_mca(qp, mcg, mca);
+ if (err)
+ kfree(mca);
+out:
+ spin_unlock_bh(&rxe->mcg_lock);
+ return err;
+}
+
+/**
+ * __rxe_cleanup_mca - cleanup mca object holding lock
+ * @mca: mca object
+ * @mcg: mcg object
+ *
+ * Context: caller must hold a reference to mcg and rxe->mcg_lock
+ */
+static void __rxe_cleanup_mca(struct rxe_mca *mca, struct rxe_mcg *mcg)
+{
+ list_del(&mca->qp_list);
+
+ atomic_dec(&mcg->qp_num);
+ atomic_dec(&mcg->rxe->mcg_attach);
+ atomic_dec(&mca->qp->mcg_num);
+ rxe_put(mca->qp);
+
+ kfree(mca);
+}
+
+/**
+ * rxe_detach_mcg - detach qp from mcg
+ * @mcg: mcg object
+ * @qp: qp object
+ *
+ * Returns: 0 on success else an error if qp is not attached.
+ */
+static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = mcg->rxe;
+ struct rxe_mca *mca, *tmp;
+
+ spin_lock_bh(&rxe->mcg_lock);
+ list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
+ if (mca->qp == qp) {
+ __rxe_cleanup_mca(mca, mcg);
+
+ /* if the number of qp's attached to the
+ * mcast group falls to zero go ahead and
+ * tear it down. This will not free the
+ * object since we are still holding a ref
+ * from the caller
+ */
+ if (atomic_read(&mcg->qp_num) <= 0)
+ __rxe_destroy_mcg(mcg);
+
+ spin_unlock_bh(&rxe->mcg_lock);
+ return 0;
+ }
+ }
+
+ /* we didn't find the qp on the list */
+ spin_unlock_bh(&rxe->mcg_lock);
+ return -EINVAL;
+}
+
+/**
+ * rxe_attach_mcast - attach qp to multicast group (see IBA-11.3.1)
+ * @ibqp: (IB) qp object
+ * @mgid: multicast IP address
+ * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6)
+ *
+ * Returns: 0 on success else an errno
+ */
+int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_mcg *mcg;
+
+ /* takes a ref on mcg if successful */
+ mcg = rxe_get_mcg(rxe, mgid);
+ if (IS_ERR(mcg))
+ return PTR_ERR(mcg);
+
+ err = rxe_attach_mcg(mcg, qp);
+
+ /* if we failed to attach the first qp to mcg tear it down */
+ if (atomic_read(&mcg->qp_num) == 0)
+ rxe_destroy_mcg(mcg);
+
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ return err;
+}
+
+/**
+ * rxe_detach_mcast - detach qp from multicast group (see IBA-11.3.2)
+ * @ibqp: address of (IB) qp object
+ * @mgid: multicast IP address
+ * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6)
+ *
+ * Returns: 0 on success else an errno
+ */
+int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
+{
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_mcg *mcg;
+ int err;
+
+ mcg = rxe_lookup_mcg(rxe, mgid);
+ if (!mcg)
+ return -EINVAL;
+
+ err = rxe_detach_mcg(mcg, qp);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
new file mode 100644
index 000000000000..6b7f2bd69879
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+void rxe_mmap_release(struct kref *ref)
+{
+ struct rxe_mmap_info *ip = container_of(ref,
+ struct rxe_mmap_info, ref);
+ struct rxe_dev *rxe = to_rdev(ip->context->device);
+
+ spin_lock_bh(&rxe->pending_lock);
+
+ if (!list_empty(&ip->pending_mmaps))
+ list_del(&ip->pending_mmaps);
+
+ spin_unlock_bh(&rxe->pending_lock);
+
+ vfree(ip->obj); /* buf */
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the memory region is mapped,
+ * to avoid releasing it.
+ */
+static void rxe_vma_open(struct vm_area_struct *vma)
+{
+ struct rxe_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void rxe_vma_close(struct vm_area_struct *vma)
+{
+ struct rxe_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, rxe_mmap_release);
+}
+
+static const struct vm_operations_struct rxe_vm_ops = {
+ .open = rxe_vma_open,
+ .close = rxe_vma_close,
+};
+
+/**
+ * rxe_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct rxe_dev *rxe = to_rdev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct rxe_mmap_info *ip, *pp;
+ int ret;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_bh(&rxe->pending_lock);
+ list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
+ if (context != ip->context || (__u64)offset != ip->info.offset)
+ continue;
+
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->info.size) {
+ rxe_dbg_dev(rxe, "mmap region is larger than the object!\n");
+ spin_unlock_bh(&rxe->pending_lock);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ goto found_it;
+ }
+ rxe_dbg_dev(rxe, "unable to find pending mmap info\n");
+ spin_unlock_bh(&rxe->pending_lock);
+ ret = -EINVAL;
+ goto done;
+
+found_it:
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_bh(&rxe->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret) {
+ rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret);
+ goto done;
+ }
+
+ vma->vm_ops = &rxe_vm_ops;
+ vma->vm_private_data = ip;
+ rxe_vma_open(vma);
+done:
+ return ret;
+}
+
+/*
+ * Allocate information for rxe_mmap
+ */
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
+ struct ib_udata *udata, void *obj)
+{
+ struct rxe_mmap_info *ip;
+
+ if (!udata)
+ return ERR_PTR(-EINVAL);
+
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+ return ERR_PTR(-ENOMEM);
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_bh(&rxe->mmap_offset_lock);
+
+ if (rxe->mmap_offset == 0)
+ rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
+
+ ip->info.offset = rxe->mmap_offset;
+ rxe->mmap_offset += ALIGN(size, SHMLBA);
+
+ spin_unlock_bh(&rxe->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->info.size = size;
+ ip->context =
+ container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+ return ip;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
new file mode 100644
index 000000000000..bcb97b3ea58a
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/libnvdimm.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/* Return a random 8 bit key value that is
+ * different than the last_key. Set last_key to -1
+ * if this is the first key for an MR or MW
+ */
+u8 rxe_get_next_key(u32 last_key)
+{
+ u8 key;
+
+ do {
+ get_random_bytes(&key, 1);
+ } while (key == last_key);
+
+ return key;
+}
+
+int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
+{
+ switch (mr->ibmr.type) {
+ case IB_MR_TYPE_DMA:
+ return 0;
+
+ case IB_MR_TYPE_USER:
+ case IB_MR_TYPE_MEM_REG:
+ if (iova < mr->ibmr.iova ||
+ iova + length > mr->ibmr.iova + mr->ibmr.length) {
+ rxe_dbg_mr(mr, "iova/length out of range\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ rxe_dbg_mr(mr, "mr type not supported\n");
+ return -EINVAL;
+ }
+}
+
+void rxe_mr_init(int access, struct rxe_mr *mr)
+{
+ u32 key = mr->elem.index << 8 | rxe_get_next_key(-1);
+
+ /* set ibmr->l/rkey and also copy into private l/rkey
+ * for user MRs these will always be the same
+ * for cases where caller 'owns' the key portion
+ * they may be different until REG_MR WQE is executed.
+ */
+ mr->lkey = mr->ibmr.lkey = key;
+ mr->rkey = mr->ibmr.rkey = key;
+
+ mr->access = access;
+ mr->ibmr.page_size = PAGE_SIZE;
+ mr->page_mask = PAGE_MASK;
+ mr->page_shift = PAGE_SHIFT;
+ mr->state = RXE_MR_STATE_INVALID;
+}
+
+void rxe_mr_init_dma(int access, struct rxe_mr *mr)
+{
+ rxe_mr_init(access, mr);
+
+ mr->state = RXE_MR_STATE_VALID;
+ mr->ibmr.type = IB_MR_TYPE_DMA;
+}
+
+static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
+{
+ return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
+}
+
+static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
+{
+ return iova & (mr_page_size(mr) - 1);
+}
+
+static bool is_pmem_page(struct page *pg)
+{
+ unsigned long paddr = page_to_phys(pg);
+
+ return REGION_INTERSECTS ==
+ region_intersects(paddr, PAGE_SIZE, IORESOURCE_MEM,
+ IORES_DESC_PERSISTENT_MEMORY);
+}
+
+static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
+{
+ XA_STATE(xas, &mr->page_list, 0);
+ struct sg_page_iter sg_iter;
+ struct page *page;
+ bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
+
+ __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
+ if (!__sg_page_iter_next(&sg_iter))
+ return 0;
+
+ do {
+ xas_lock(&xas);
+ while (true) {
+ page = sg_page_iter_page(&sg_iter);
+
+ if (persistent && !is_pmem_page(page)) {
+ rxe_dbg_mr(mr, "Page can't be persistent\n");
+ xas_set_err(&xas, -EINVAL);
+ break;
+ }
+
+ xas_store(&xas, page);
+ if (xas_error(&xas))
+ break;
+ xas_next(&xas);
+ if (!__sg_page_iter_next(&sg_iter))
+ break;
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ return xas_error(&xas);
+}
+
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
+ int access, struct rxe_mr *mr)
+{
+ struct ib_umem *umem;
+ int err;
+
+ rxe_mr_init(access, mr);
+
+ xa_init(&mr->page_list);
+
+ umem = ib_umem_get(&rxe->ib_dev, start, length, access);
+ if (IS_ERR(umem)) {
+ rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
+ (int)PTR_ERR(umem));
+ return PTR_ERR(umem);
+ }
+
+ err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
+ if (err) {
+ ib_umem_release(umem);
+ return err;
+ }
+
+ mr->umem = umem;
+ mr->ibmr.type = IB_MR_TYPE_USER;
+ mr->state = RXE_MR_STATE_VALID;
+
+ return 0;
+}
+
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
+{
+ XA_STATE(xas, &mr->page_list, 0);
+ int i = 0;
+ int err;
+
+ xa_init(&mr->page_list);
+
+ do {
+ xas_lock(&xas);
+ while (i != num_buf) {
+ xas_store(&xas, XA_ZERO_ENTRY);
+ if (xas_error(&xas))
+ break;
+ xas_next(&xas);
+ i++;
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ err = xas_error(&xas);
+ if (err)
+ return err;
+
+ mr->num_buf = num_buf;
+
+ return 0;
+}
+
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
+{
+ int err;
+
+ /* always allow remote access for FMRs */
+ rxe_mr_init(RXE_ACCESS_REMOTE, mr);
+
+ err = rxe_mr_alloc(mr, max_pages);
+ if (err)
+ goto err1;
+
+ mr->state = RXE_MR_STATE_FREE;
+ mr->ibmr.type = IB_MR_TYPE_MEM_REG;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct page *page = ib_virt_dma_to_page(dma_addr);
+ bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
+ int err;
+
+ if (persistent && !is_pmem_page(page)) {
+ rxe_dbg_mr(mr, "Page cannot be persistent\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(mr->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL));
+ if (err)
+ return err;
+
+ mr->nbuf++;
+ return 0;
+}
+
+int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ unsigned int page_size = mr_page_size(mr);
+
+ mr->nbuf = 0;
+ mr->page_shift = ilog2(page_size);
+ mr->page_mask = ~((u64)page_size - 1);
+ mr->page_offset = mr->ibmr.iova & (page_size - 1);
+
+ return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page);
+}
+
+static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir)
+{
+ unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ unsigned long index = rxe_mr_iova_to_index(mr, iova);
+ unsigned int bytes;
+ struct page *page;
+ void *va;
+
+ while (length) {
+ page = xa_load(&mr->page_list, index);
+ if (!page)
+ return -EFAULT;
+
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+ va = kmap_local_page(page);
+ if (dir == RXE_FROM_MR_OBJ)
+ memcpy(addr, va + page_offset, bytes);
+ else
+ memcpy(va + page_offset, addr, bytes);
+ kunmap_local(va);
+
+ page_offset = 0;
+ addr += bytes;
+ length -= bytes;
+ index++;
+ }
+
+ return 0;
+}
+
+static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir)
+{
+ unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
+ unsigned int bytes;
+ struct page *page;
+ u8 *va;
+
+ while (length) {
+ page = ib_virt_dma_to_page(dma_addr);
+ bytes = min_t(unsigned int, length,
+ PAGE_SIZE - page_offset);
+ va = kmap_local_page(page);
+
+ if (dir == RXE_TO_MR_OBJ)
+ memcpy(va + page_offset, addr, bytes);
+ else
+ memcpy(addr, va + page_offset, bytes);
+
+ kunmap_local(va);
+ page_offset = 0;
+ dma_addr += bytes;
+ addr += bytes;
+ length -= bytes;
+ }
+}
+
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir)
+{
+ int err;
+
+ if (length == 0)
+ return 0;
+
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA) {
+ rxe_mr_copy_dma(mr, iova, addr, length, dir);
+ return 0;
+ }
+
+ err = mr_check_range(mr, iova, length);
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return err;
+ }
+
+ if (is_odp_mr(mr))
+ return rxe_odp_mr_copy(mr, iova, addr, length, dir);
+ else
+ return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
+}
+
+/* copy data in or out of a wqe, i.e. sg list
+ * under the control of a dma descriptor
+ */
+int copy_data(
+ struct rxe_pd *pd,
+ int access,
+ struct rxe_dma_info *dma,
+ void *addr,
+ int length,
+ enum rxe_mr_copy_dir dir)
+{
+ int bytes;
+ struct rxe_sge *sge = &dma->sge[dma->cur_sge];
+ int offset = dma->sge_offset;
+ int resid = dma->resid;
+ struct rxe_mr *mr = NULL;
+ u64 iova;
+ int err;
+
+ if (length == 0)
+ return 0;
+
+ if (length > resid) {
+ err = -EINVAL;
+ goto err2;
+ }
+
+ if (sge->length && (offset < sge->length)) {
+ mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL);
+ if (!mr) {
+ err = -EINVAL;
+ goto err1;
+ }
+ }
+
+ while (length > 0) {
+ bytes = length;
+
+ if (offset >= sge->length) {
+ if (mr) {
+ rxe_put(mr);
+ mr = NULL;
+ }
+ sge++;
+ dma->cur_sge++;
+ offset = 0;
+
+ if (dma->cur_sge >= dma->num_sge) {
+ err = -ENOSPC;
+ goto err2;
+ }
+
+ if (sge->length) {
+ mr = lookup_mr(pd, access, sge->lkey,
+ RXE_LOOKUP_LOCAL);
+ if (!mr) {
+ err = -EINVAL;
+ goto err1;
+ }
+ } else {
+ continue;
+ }
+ }
+
+ if (bytes > sge->length - offset)
+ bytes = sge->length - offset;
+
+ if (bytes > 0) {
+ iova = sge->addr + offset;
+ err = rxe_mr_copy(mr, iova, addr, bytes, dir);
+ if (err)
+ goto err2;
+
+ offset += bytes;
+ resid -= bytes;
+ length -= bytes;
+ addr += bytes;
+ }
+ }
+
+ dma->sge_offset = offset;
+ dma->resid = resid;
+
+ if (mr)
+ rxe_put(mr);
+
+ return 0;
+
+err2:
+ if (mr)
+ rxe_put(mr);
+err1:
+ return err;
+}
+
+static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
+{
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ err = mr_check_range(mr, iova, length);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_mr_iova_to_index(mr, iova);
+ page = xa_load(&mr->page_list, index);
+ page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ if (!page)
+ return -EFAULT;
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ return 0;
+}
+
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length)
+{
+ int err;
+
+ /* mr must be valid even if length is zero */
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_flush_pmem_iova(mr, start, length);
+ else
+ err = rxe_mr_flush_pmem_iova(mr, start, length);
+
+ return err;
+}
+
+/* Guarantee atomicity of atomic operations at the machine level. */
+DEFINE_SPINLOCK(atomic_ops_lock);
+
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ unsigned int page_offset;
+ struct page *page;
+ u64 value;
+ u64 *va;
+
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA) {
+ page_offset = iova & (PAGE_SIZE - 1);
+ page = ib_virt_dma_to_page(iova);
+ } else {
+ unsigned long index;
+ int err;
+
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (err) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ index = rxe_mr_iova_to_index(mr, iova);
+ page = xa_load(&mr->page_list, index);
+ if (!page)
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (unlikely(page_offset & 0x7)) {
+ rxe_dbg_mr(mr, "iova not aligned\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+
+ spin_lock_bh(&atomic_ops_lock);
+ value = *orig_val = va[page_offset >> 3];
+
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == compare)
+ va[page_offset >> 3] = swap_add;
+ } else {
+ value += swap_add;
+ va[page_offset >> 3] = value;
+ }
+ spin_unlock_bh(&atomic_ops_lock);
+
+ kunmap_local(va);
+
+ return RESPST_NONE;
+}
+
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ unsigned int page_offset;
+ struct page *page;
+ u64 *va;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA) {
+ page_offset = iova & (PAGE_SIZE - 1);
+ page = ib_virt_dma_to_page(iova);
+ } else {
+ unsigned long index;
+ int err;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ index = rxe_mr_iova_to_index(mr, iova);
+ page = xa_load(&mr->page_list, index);
+ if (!page)
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ rxe_dbg_mr(mr, "misaligned address\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+ kunmap_local(va);
+
+ return RESPST_NONE;
+}
+
+int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
+{
+ struct rxe_sge *sge = &dma->sge[dma->cur_sge];
+ int offset = dma->sge_offset;
+ int resid = dma->resid;
+
+ while (length) {
+ unsigned int bytes;
+
+ if (offset >= sge->length) {
+ sge++;
+ dma->cur_sge++;
+ offset = 0;
+ if (dma->cur_sge >= dma->num_sge)
+ return -ENOSPC;
+ }
+
+ bytes = length;
+
+ if (bytes > sge->length - offset)
+ bytes = sge->length - offset;
+
+ offset += bytes;
+ resid -= bytes;
+ length -= bytes;
+ }
+
+ dma->sge_offset = offset;
+ dma->resid = resid;
+
+ return 0;
+}
+
+struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ enum rxe_mr_lookup_type type)
+{
+ struct rxe_mr *mr;
+ struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
+ int index = key >> 8;
+
+ mr = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mr)
+ return NULL;
+
+ if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
+ (type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
+ mr_pd(mr) != pd || ((access & mr->access) != access) ||
+ mr->state != RXE_MR_STATE_VALID)) {
+ rxe_put(mr);
+ mr = NULL;
+ }
+
+ return mr;
+}
+
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mr *mr;
+ int remote;
+ int ret;
+
+ mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
+ if (!mr) {
+ rxe_dbg_qp(qp, "No MR for key %#x\n", key);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ remote = mr->access & RXE_ACCESS_REMOTE;
+ if (remote ? (key != mr->rkey) : (key != mr->lkey)) {
+ rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n",
+ key, (remote ? mr->rkey : mr->lkey));
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ if (atomic_read(&mr->num_mw) > 0) {
+ rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
+ rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ mr->state = RXE_MR_STATE_FREE;
+ ret = 0;
+
+err_drop_ref:
+ rxe_put(mr);
+err:
+ return ret;
+}
+
+/* user can (re)register fast MR by executing a REG_MR WQE.
+ * user is expected to hold a reference on the ib mr until the
+ * WQE completes.
+ * Once a fast MR is created this is the only way to change the
+ * private keys. It is the responsibility of the user to maintain
+ * the ib mr keys in sync with rxe mr keys.
+ */
+int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
+ u32 key = wqe->wr.wr.reg.key;
+ u32 access = wqe->wr.wr.reg.access;
+
+ /* user can only register MR in free state */
+ if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
+ rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey);
+ return -EINVAL;
+ }
+
+ /* user can only register mr with qp in same protection domain */
+ if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
+ rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
+ return -EINVAL;
+ }
+
+ /* user is only allowed to change key portion of l/rkey */
+ if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
+ rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
+ key, mr->lkey);
+ return -EINVAL;
+ }
+
+ mr->access = access;
+ mr->lkey = key;
+ mr->rkey = key;
+ mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
+ mr->state = RXE_MR_STATE_VALID;
+
+ return 0;
+}
+
+void rxe_mr_cleanup(struct rxe_pool_elem *elem)
+{
+ struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
+
+ rxe_put(mr_pd(mr));
+ ib_umem_release(mr->umem);
+
+ if (mr->ibmr.type != IB_MR_TYPE_DMA)
+ xa_destroy(&mr->page_list);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
new file mode 100644
index 000000000000..379e65bfcd49
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved.
+ */
+
+/*
+ * The rdma_rxe driver supports type 1 or type 2B memory windows.
+ * Type 1 MWs are created by ibv_alloc_mw() verbs calls and bound by
+ * ibv_bind_mw() calls. Type 2 MWs are also created by ibv_alloc_mw()
+ * but bound by bind_mw work requests. The ibv_bind_mw() call is converted
+ * by libibverbs to a bind_mw work request.
+ */
+
+#include "rxe.h"
+
+int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct rxe_mw *mw = to_rmw(ibmw);
+ struct rxe_pd *pd = to_rpd(ibmw->pd);
+ struct rxe_dev *rxe = to_rdev(ibmw->device);
+ int ret;
+
+ rxe_get(pd);
+
+ ret = rxe_add_to_pool(&rxe->mw_pool, mw);
+ if (ret) {
+ rxe_put(pd);
+ return ret;
+ }
+
+ mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
+ mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
+ RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
+ spin_lock_init(&mw->lock);
+
+ rxe_finalize(mw);
+
+ return 0;
+}
+
+int rxe_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct rxe_mw *mw = to_rmw(ibmw);
+
+ rxe_cleanup(mw);
+
+ return 0;
+}
+
+static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_mw *mw, struct rxe_mr *mr, int access)
+{
+ if (mw->ibmw.type == IB_MW_TYPE_1) {
+ if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
+ rxe_dbg_mw(mw,
+ "attempt to bind a type 1 MW not in the valid state\n");
+ return -EINVAL;
+ }
+
+ /* o10-36.2.2 */
+ if (unlikely((access & IB_ZERO_BASED))) {
+ rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n");
+ return -EINVAL;
+ }
+ }
+
+ if (mw->ibmw.type == IB_MW_TYPE_2) {
+ /* o10-37.2.30 */
+ if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
+ rxe_dbg_mw(mw,
+ "attempt to bind a type 2 MW not in the free state\n");
+ return -EINVAL;
+ }
+
+ /* C10-72 */
+ if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
+ rxe_dbg_mw(mw,
+ "attempt to bind type 2 MW with qp with different PD\n");
+ return -EINVAL;
+ }
+
+ /* o10-37.2.40 */
+ if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
+ rxe_dbg_mw(mw,
+ "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
+ return -EINVAL;
+ }
+ }
+
+ /* remaining checks only apply to a nonzero MR */
+ if (!mr)
+ return 0;
+
+ if (unlikely(mr->access & IB_ZERO_BASED)) {
+ rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n");
+ return -EINVAL;
+ }
+
+ /* C10-73 */
+ if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
+ rxe_dbg_mw(mw,
+ "attempt to bind an MW to an MR without bind access\n");
+ return -EINVAL;
+ }
+
+ /* C10-74 */
+ if (unlikely((access &
+ (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
+ !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
+ rxe_dbg_mw(mw,
+ "attempt to bind an Writable MW to an MR without local write access\n");
+ return -EINVAL;
+ }
+
+ /* C10-75 */
+ if (access & IB_ZERO_BASED) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
+ rxe_dbg_mw(mw,
+ "attempt to bind a ZB MW outside of the MR\n");
+ return -EINVAL;
+ }
+ } else {
+ if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
+ ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
+ (mr->ibmr.iova + mr->ibmr.length)))) {
+ rxe_dbg_mw(mw,
+ "attempt to bind a VA MW outside of the MR\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_mw *mw, struct rxe_mr *mr, int access)
+{
+ u32 key = wqe->wr.wr.mw.rkey & 0xff;
+
+ mw->rkey = (mw->rkey & ~0xff) | key;
+ mw->access = access;
+ mw->state = RXE_MW_STATE_VALID;
+ mw->addr = wqe->wr.wr.mw.addr;
+ mw->length = wqe->wr.wr.mw.length;
+
+ if (mw->mr) {
+ rxe_put(mw->mr);
+ atomic_dec(&mw->mr->num_mw);
+ mw->mr = NULL;
+ }
+
+ if (mw->length) {
+ mw->mr = mr;
+ atomic_inc(&mr->num_mw);
+ rxe_get(mr);
+ }
+
+ if (mw->ibmw.type == IB_MW_TYPE_2) {
+ rxe_get(qp);
+ mw->qp = qp;
+ }
+}
+
+int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ int ret;
+ struct rxe_mw *mw;
+ struct rxe_mr *mr;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
+ u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
+ int access = wqe->wr.wr.mw.access;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
+ if (unlikely(!mw)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (unlikely(mw->rkey != mw_rkey)) {
+ ret = -EINVAL;
+ goto err_drop_mw;
+ }
+
+ if (likely(wqe->wr.wr.mw.length)) {
+ mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8);
+ if (unlikely(!mr)) {
+ ret = -EINVAL;
+ goto err_drop_mw;
+ }
+
+ if (unlikely(mr->lkey != mr_lkey)) {
+ ret = -EINVAL;
+ goto err_drop_mr;
+ }
+ } else {
+ mr = NULL;
+ }
+
+ if (access & ~RXE_ACCESS_SUPPORTED_MW) {
+ rxe_err_mw(mw, "access %#x not supported\n", access);
+ ret = -EOPNOTSUPP;
+ goto err_drop_mr;
+ }
+
+ spin_lock_bh(&mw->lock);
+
+ ret = rxe_check_bind_mw(qp, wqe, mw, mr, access);
+ if (ret)
+ goto err_unlock;
+
+ rxe_do_bind_mw(qp, wqe, mw, mr, access);
+err_unlock:
+ spin_unlock_bh(&mw->lock);
+err_drop_mr:
+ if (mr)
+ rxe_put(mr);
+err_drop_mw:
+ rxe_put(mw);
+err:
+ return ret;
+}
+
+static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
+{
+ if (unlikely(mw->state == RXE_MW_STATE_INVALID))
+ return -EINVAL;
+
+ /* o10-37.2.26 */
+ if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void rxe_do_invalidate_mw(struct rxe_mw *mw)
+{
+ struct rxe_qp *qp;
+ struct rxe_mr *mr;
+
+ /* valid type 2 MW will always have a QP pointer */
+ qp = mw->qp;
+ mw->qp = NULL;
+ rxe_put(qp);
+
+ /* valid type 2 MW will always have an MR pointer */
+ mr = mw->mr;
+ mw->mr = NULL;
+ atomic_dec(&mr->num_mw);
+ rxe_put(mr);
+
+ mw->access = 0;
+ mw->addr = 0;
+ mw->length = 0;
+ mw->state = RXE_MW_STATE_FREE;
+}
+
+int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mw *mw;
+ int ret;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
+ if (!mw) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (rkey != mw->rkey) {
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ spin_lock_bh(&mw->lock);
+
+ ret = rxe_check_invalidate_mw(qp, mw);
+ if (ret)
+ goto err_unlock;
+
+ rxe_do_invalidate_mw(mw);
+err_unlock:
+ spin_unlock_bh(&mw->lock);
+err_drop_ref:
+ rxe_put(mw);
+err:
+ return ret;
+}
+
+struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_pd *pd = to_rpd(qp->ibqp.pd);
+ struct rxe_mw *mw;
+ int index = rkey >> 8;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool, index);
+ if (!mw)
+ return NULL;
+
+ if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
+ (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
+ (mw->length == 0) || ((access & mw->access) != access) ||
+ mw->state != RXE_MW_STATE_VALID)) {
+ rxe_put(mw);
+ return NULL;
+ }
+
+ return mw;
+}
+
+void rxe_mw_cleanup(struct rxe_pool_elem *elem)
+{
+ struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
+ struct rxe_pd *pd = to_rpd(mw->ibmw.pd);
+
+ rxe_put(pd);
+
+ if (mw->mr) {
+ struct rxe_mr *mr = mw->mr;
+
+ mw->mr = NULL;
+ atomic_dec(&mr->num_mw);
+ rxe_put(mr);
+ }
+
+ if (mw->qp) {
+ struct rxe_qp *qp = mw->qp;
+
+ mw->qp = NULL;
+ rxe_put(qp);
+ }
+
+ mw->access = 0;
+ mw->addr = 0;
+ mw->length = 0;
+ mw->state = RXE_MW_STATE_INVALID;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
new file mode 100644
index 000000000000..ac0183a2ff7a
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/udp_tunnel.h>
+#include <net/sch_generic.h>
+#include <linux/netfilter.h>
+#include <rdma/ib_addr.h>
+
+#include "rxe.h"
+#include "rxe_net.h"
+#include "rxe_loc.h"
+
+static struct rxe_recv_sockets recv_sockets;
+
+static struct dst_entry *rxe_find_route4(struct rxe_qp *qp,
+ struct net_device *ndev,
+ struct in_addr *saddr,
+ struct in_addr *daddr)
+{
+ struct rtable *rt;
+ struct flowi4 fl = { { 0 } };
+
+ memset(&fl, 0, sizeof(fl));
+ fl.flowi4_oif = ndev->ifindex;
+ memcpy(&fl.saddr, saddr, sizeof(*saddr));
+ memcpy(&fl.daddr, daddr, sizeof(*daddr));
+ fl.flowi4_proto = IPPROTO_UDP;
+
+ rt = ip_route_output_key(&init_net, &fl);
+ if (IS_ERR(rt)) {
+ rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr);
+ return NULL;
+ }
+
+ return &rt->dst;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *rxe_find_route6(struct rxe_qp *qp,
+ struct net_device *ndev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr)
+{
+ struct dst_entry *ndst;
+ struct flowi6 fl6 = { { 0 } };
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = ndev->ifindex;
+ memcpy(&fl6.saddr, saddr, sizeof(*saddr));
+ memcpy(&fl6.daddr, daddr, sizeof(*daddr));
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+ ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
+ recv_sockets.sk6->sk, &fl6,
+ NULL);
+ if (IS_ERR(ndst)) {
+ rxe_dbg_qp(qp, "no route to %pI6\n", daddr);
+ return NULL;
+ }
+
+ if (unlikely(ndst->error)) {
+ rxe_dbg_qp(qp, "no route to %pI6\n", daddr);
+ goto put;
+ }
+
+ return ndst;
+put:
+ dst_release(ndst);
+ return NULL;
+}
+
+#else
+
+static struct dst_entry *rxe_find_route6(struct rxe_qp *qp,
+ struct net_device *ndev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr)
+{
+ return NULL;
+}
+
+#endif
+
+static struct dst_entry *rxe_find_route(struct net_device *ndev,
+ struct rxe_qp *qp,
+ struct rxe_av *av)
+{
+ struct dst_entry *dst = NULL;
+
+ if (qp_type(qp) == IB_QPT_RC)
+ dst = sk_dst_get(qp->sk->sk);
+
+ if (!dst || !dst_check(dst, qp->dst_cookie)) {
+ if (dst)
+ dst_release(dst);
+
+ if (av->network_type == RXE_NETWORK_TYPE_IPV4) {
+ struct in_addr *saddr;
+ struct in_addr *daddr;
+
+ saddr = &av->sgid_addr._sockaddr_in.sin_addr;
+ daddr = &av->dgid_addr._sockaddr_in.sin_addr;
+ dst = rxe_find_route4(qp, ndev, saddr, daddr);
+ } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) {
+ struct in6_addr *saddr6;
+ struct in6_addr *daddr6;
+
+ saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
+ daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
+ dst = rxe_find_route6(qp, ndev, saddr6, daddr6);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (dst)
+ qp->dst_cookie =
+ rt6_get_cookie((struct rt6_info *)dst);
+#endif
+ }
+
+ if (dst && (qp_type(qp) == IB_QPT_RC)) {
+ dst_hold(dst);
+ sk_dst_set(qp->sk->sk, dst);
+ }
+ }
+ return dst;
+}
+
+static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct udphdr *udph;
+ struct rxe_dev *rxe;
+ struct net_device *ndev = skb->dev;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ /* takes a reference on rxe->ib_dev
+ * drop when skb is freed
+ */
+ rxe = rxe_get_dev_from_net(ndev);
+ if (!rxe && is_vlan_dev(ndev))
+ rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
+ if (!rxe)
+ goto drop;
+
+ if (skb_linearize(skb)) {
+ ib_device_put(&rxe->ib_dev);
+ goto drop;
+ }
+
+ udph = udp_hdr(skb);
+ pkt->rxe = rxe;
+ pkt->port_num = 1;
+ pkt->hdr = (u8 *)(udph + 1);
+ pkt->mask = RXE_GRH_MASK;
+ pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+
+ /* remove udp header */
+ skb_pull(skb, sizeof(struct udphdr));
+
+ rxe_rcv(skb);
+
+ return 0;
+drop:
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ bool ipv6)
+{
+ int err;
+ struct socket *sock;
+ struct udp_port_cfg udp_cfg = { };
+ struct udp_tunnel_sock_cfg tnl_cfg = { };
+
+ if (ipv6) {
+ udp_cfg.family = AF_INET6;
+ udp_cfg.ipv6_v6only = 1;
+ } else {
+ udp_cfg.family = AF_INET;
+ }
+
+ udp_cfg.local_udp_port = port;
+
+ /* Create UDP socket */
+ err = udp_sock_create(net, &udp_cfg, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ tnl_cfg.encap_type = 1;
+ tnl_cfg.encap_rcv = rxe_udp_encap_recv;
+
+ /* Setup UDP tunnel */
+ setup_udp_tunnel_sock(net, sock, &tnl_cfg);
+
+ return sock;
+}
+
+static void rxe_release_udp_tunnel(struct socket *sk)
+{
+ if (sk)
+ udp_tunnel_sock_release(sk);
+}
+
+static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
+ __be16 dst_port)
+{
+ struct udphdr *udph;
+
+ __skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+
+ udph->dest = dst_port;
+ udph->source = src_port;
+ udph->len = htons(skb->len);
+ udph->check = 0;
+}
+
+static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr, __u8 proto,
+ __u8 tos, __u8 ttl, __be16 df, bool xnet)
+{
+ struct iphdr *iph;
+
+ skb_scrub_packet(skb, xnet);
+
+ skb_clear_hash(skb);
+ skb_dst_set(skb, dst_clone(dst));
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb);
+
+ iph->version = IPVERSION;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->tot_len = htons(skb->len);
+ iph->frag_off = df;
+ iph->protocol = proto;
+ iph->tos = tos;
+ iph->daddr = daddr;
+ iph->saddr = saddr;
+ iph->ttl = ttl;
+ __ip_select_ident(dev_net(dst->dev), iph,
+ skb_shinfo(skb)->gso_segs ?: 1);
+}
+
+static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ __u8 proto, __u8 prio, __u8 ttl)
+{
+ struct ipv6hdr *ip6h;
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
+ | IPSKB_REROUTED);
+ skb_dst_set(skb, dst_clone(dst));
+
+ __skb_push(skb, sizeof(*ip6h));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6_flow_hdr(ip6h, prio, htonl(0));
+ ip6h->payload_len = htons(skb->len);
+ ip6h->nexthdr = proto;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+ ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
+}
+
+static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ struct rxe_qp *qp = pkt->qp;
+ struct dst_entry *dst;
+ bool xnet = false;
+ __be16 df = htons(IP_DF);
+ struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
+ struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
+
+ dst = rxe_find_route(skb->dev, qp, av);
+ if (!dst) {
+ rxe_dbg_qp(qp, "Host not reachable\n");
+ return -EHOSTUNREACH;
+ }
+
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
+
+ prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
+ av->grh.traffic_class, av->grh.hop_limit, df, xnet);
+
+ dst_release(dst);
+ return 0;
+}
+
+static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ struct rxe_qp *qp = pkt->qp;
+ struct dst_entry *dst;
+ struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
+ struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
+
+ dst = rxe_find_route(skb->dev, qp, av);
+ if (!dst) {
+ rxe_dbg_qp(qp, "Host not reachable\n");
+ return -EHOSTUNREACH;
+ }
+
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
+
+ prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
+ av->grh.traffic_class,
+ av->grh.hop_limit);
+
+ dst_release(dst);
+ return 0;
+}
+
+int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ int err = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ err = prepare4(av, pkt, skb);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ err = prepare6(av, pkt, skb);
+
+ if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
+ pkt->mask |= RXE_LOOPBACK_MASK;
+
+ return err;
+}
+
+static void rxe_skb_tx_dtor(struct sk_buff *skb)
+{
+ struct rxe_qp *qp = skb->sk->sk_user_data;
+ int skb_out;
+
+ skb_out = atomic_dec_return(&qp->skb_out);
+ if (unlikely(qp->need_req_skb &&
+ skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
+ rxe_sched_task(&qp->send_task);
+
+ rxe_put(qp);
+ sock_put(skb->sk);
+}
+
+static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+{
+ int err;
+ struct sock *sk = pkt->qp->sk->sk;
+
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ rxe_get(pkt->qp);
+ atomic_inc(&pkt->qp->skb_out);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ else
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+
+ return err;
+}
+
+/* fix up a send packet to match the packets
+ * received from UDP before looping them back
+ */
+static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+{
+ struct sock *sk = pkt->qp->sk->sk;
+
+ memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
+
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ rxe_get(pkt->qp);
+ atomic_inc(&pkt->qp->skb_out);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ skb_pull(skb, sizeof(struct iphdr));
+ else
+ skb_pull(skb, sizeof(struct ipv6hdr));
+
+ if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) {
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ /* remove udp header */
+ skb_pull(skb, sizeof(struct udphdr));
+
+ rxe_rcv(skb);
+
+ return 0;
+}
+
+int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ int err;
+ int is_request = pkt->mask & RXE_REQ_MASK;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
+ (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
+ goto drop;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ rxe_icrc_generate(skb, pkt);
+
+ if (pkt->mask & RXE_LOOPBACK_MASK)
+ err = rxe_loopback(skb, pkt);
+ else
+ err = rxe_send(skb, pkt);
+ if (err) {
+ rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
+ return err;
+ }
+
+ rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
+ goto done;
+
+drop:
+ kfree_skb(skb);
+ err = 0;
+done:
+ return err;
+}
+
+struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt)
+{
+ unsigned int hdr_len;
+ struct sk_buff *skb = NULL;
+ struct net_device *ndev;
+ const struct ib_gid_attr *attr;
+ const int port_num = 1;
+
+ attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
+ if (IS_ERR(attr))
+ return NULL;
+
+ if (av->network_type == RXE_NETWORK_TYPE_IPV4)
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) +
+ sizeof(struct iphdr);
+ else
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) +
+ sizeof(struct ipv6hdr);
+
+ rcu_read_lock();
+ ndev = rdma_read_gid_attr_ndev_rcu(attr);
+ if (IS_ERR(ndev)) {
+ rcu_read_unlock();
+ goto out;
+ }
+ skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
+ GFP_ATOMIC);
+
+ if (unlikely(!skb)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ /* Add time stamp to skb. */
+ skb->tstamp = ktime_get();
+
+ skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
+
+ /* FIXME: hold reference to this netdev until life of this skb. */
+ skb->dev = ndev;
+ rcu_read_unlock();
+
+ if (av->network_type == RXE_NETWORK_TYPE_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+ else
+ skb->protocol = htons(ETH_P_IPV6);
+
+ pkt->rxe = rxe;
+ pkt->port_num = port_num;
+ pkt->hdr = skb_put(skb, paylen);
+ pkt->mask |= RXE_GRH_MASK;
+
+out:
+ rdma_put_gid_attr(attr);
+ return skb;
+}
+
+/*
+ * this is required by rxe_cfg to match rxe devices in
+ * /sys/class/infiniband up with their underlying ethernet devices
+ */
+const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
+{
+ struct net_device *ndev;
+ char *ndev_name;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return NULL;
+ ndev_name = ndev->name;
+ dev_put(ndev);
+
+ return ndev_name;
+}
+
+int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
+{
+ int err;
+ struct rxe_dev *rxe = NULL;
+
+ rxe = ib_alloc_device(rxe_dev, ib_dev);
+ if (!rxe)
+ return -ENOMEM;
+
+ ib_mark_name_assigned_by_user(&rxe->ib_dev);
+
+ err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
+ if (err) {
+ ib_dealloc_device(&rxe->ib_dev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rxe_port_event(struct rxe_dev *rxe,
+ enum ib_event_type event)
+{
+ struct ib_event ev;
+
+ ev.device = &rxe->ib_dev;
+ ev.element.port_num = 1;
+ ev.event = event;
+
+ ib_dispatch_event(&ev);
+}
+
+/* Caller must hold net_info_lock */
+void rxe_port_up(struct rxe_dev *rxe)
+{
+ rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
+ dev_info(&rxe->ib_dev.dev, "set active\n");
+}
+
+/* Caller must hold net_info_lock */
+void rxe_port_down(struct rxe_dev *rxe)
+{
+ rxe_port_event(rxe, IB_EVENT_PORT_ERR);
+ rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
+ dev_info(&rxe->ib_dev.dev, "set down\n");
+}
+
+void rxe_set_port_state(struct rxe_dev *rxe)
+{
+ struct net_device *ndev;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
+
+ if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE)
+ rxe_port_up(rxe);
+ else
+ rxe_port_down(rxe);
+
+ dev_put(ndev);
+}
+
+static int rxe_notify(struct notifier_block *not_blk,
+ unsigned long event,
+ void *arg)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(arg);
+ struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
+
+ if (!rxe)
+ return NOTIFY_OK;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ ib_unregister_device_queued(&rxe->ib_dev);
+ break;
+ case NETDEV_CHANGEMTU:
+ rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
+ rxe_set_mtu(rxe, ndev->mtu);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN)
+ rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
+ break;
+ case NETDEV_REBOOT:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGENAME:
+ case NETDEV_FEAT_CHANGE:
+ default:
+ rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n",
+ event, ndev->name);
+ break;
+ }
+
+ ib_device_put(&rxe->ib_dev);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rxe_net_notifier = {
+ .notifier_call = rxe_notify,
+};
+
+static int rxe_net_ipv4_init(void)
+{
+ recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), false);
+ if (IS_ERR(recv_sockets.sk4)) {
+ recv_sockets.sk4 = NULL;
+ pr_err("Failed to create IPv4 UDP tunnel\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int rxe_net_ipv6_init(void)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+
+ recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), true);
+ if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
+ recv_sockets.sk6 = NULL;
+ pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
+ return 0;
+ }
+
+ if (IS_ERR(recv_sockets.sk6)) {
+ recv_sockets.sk6 = NULL;
+ pr_err("Failed to create IPv6 UDP tunnel\n");
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+void rxe_net_exit(void)
+{
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ rxe_release_udp_tunnel(recv_sockets.sk4);
+ unregister_netdevice_notifier(&rxe_net_notifier);
+}
+
+int rxe_net_init(void)
+{
+ int err;
+
+ recv_sockets.sk6 = NULL;
+
+ err = rxe_net_ipv4_init();
+ if (err)
+ return err;
+ err = rxe_net_ipv6_init();
+ if (err)
+ goto err_out;
+ err = register_netdevice_notifier(&rxe_net_notifier);
+ if (err) {
+ pr_err("Failed to register netdev notifier\n");
+ goto err_out;
+ }
+ return 0;
+err_out:
+ rxe_net_exit();
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
new file mode 100644
index 000000000000..45d80d00f86b
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_NET_H
+#define RXE_NET_H
+
+#include <net/sock.h>
+#include <net/if_inet6.h>
+#include <linux/module.h>
+
+struct rxe_recv_sockets {
+ struct socket *sk4;
+ struct socket *sk6;
+};
+
+int rxe_net_add(const char *ibdev_name, struct net_device *ndev);
+
+int rxe_net_init(void);
+void rxe_net_exit(void);
+
+#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
new file mode 100644
index 000000000000..f58e3ec6252f
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2022-2023 Fujitsu Ltd. All rights reserved.
+ */
+
+#include <linux/hmm.h>
+#include <linux/libnvdimm.h>
+
+#include <rdma/ib_umem_odp.h>
+
+#include "rxe.h"
+
+static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(mni, struct ib_umem_odp, notifier);
+ unsigned long start, end;
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ mutex_lock(&umem_odp->umem_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ end = min_t(u64, ib_umem_end(umem_odp), range->end);
+
+ /* update umem_odp->map.pfn_list */
+ ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+ return true;
+}
+
+const struct mmu_interval_notifier_ops rxe_mn_ops = {
+ .invalidate = rxe_ib_invalidate_range,
+};
+
+#define RXE_PAGEFAULT_DEFAULT 0
+#define RXE_PAGEFAULT_RDONLY BIT(0)
+#define RXE_PAGEFAULT_SNAPSHOT BIT(1)
+static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcnt, u32 flags)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
+ u64 access_mask = 0;
+ int np;
+
+ if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
+ access_mask |= HMM_PFN_WRITE;
+
+ /*
+ * ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
+ * Callers must release the lock later to let invalidation handler
+ * do its work again.
+ */
+ np = ib_umem_odp_map_dma_and_lock(umem_odp, user_va, bcnt,
+ access_mask, fault);
+ return np;
+}
+
+static int rxe_odp_init_pages(struct rxe_mr *mr)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ int ret;
+
+ ret = rxe_odp_do_pagefault_and_lock(mr, mr->umem->address,
+ mr->umem->length,
+ RXE_PAGEFAULT_SNAPSHOT);
+
+ if (ret >= 0)
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
+ u64 iova, int access_flags, struct rxe_mr *mr)
+{
+ struct ib_umem_odp *umem_odp;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return -EOPNOTSUPP;
+
+ rxe_mr_init(access_flags, mr);
+
+ if (!start && length == U64_MAX) {
+ if (iova != 0)
+ return -EINVAL;
+ if (!(rxe->attr.odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return -EINVAL;
+
+ /* Never reach here, for implicit ODP is not implemented. */
+ }
+
+ umem_odp = ib_umem_odp_get(&rxe->ib_dev, start, length, access_flags,
+ &rxe_mn_ops);
+ if (IS_ERR(umem_odp)) {
+ rxe_dbg_mr(mr, "Unable to create umem_odp err = %d\n",
+ (int)PTR_ERR(umem_odp));
+ return PTR_ERR(umem_odp);
+ }
+
+ umem_odp->private = mr;
+
+ mr->umem = &umem_odp->umem;
+ mr->access = access_flags;
+ mr->ibmr.length = length;
+ mr->ibmr.iova = iova;
+ mr->page_offset = ib_umem_offset(&umem_odp->umem);
+
+ err = rxe_odp_init_pages(mr);
+ if (err) {
+ ib_umem_odp_release(umem_odp);
+ return err;
+ }
+
+ mr->state = RXE_MR_STATE_VALID;
+ mr->ibmr.type = IB_MR_TYPE_USER;
+
+ return err;
+}
+
+static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
+ int length)
+{
+ bool need_fault = false;
+ u64 addr;
+ int idx;
+
+ addr = iova & (~(BIT(umem_odp->page_shift) - 1));
+
+ /* Skim through all pages that are to be accessed. */
+ while (addr < iova + length) {
+ idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+
+ if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
+ need_fault = true;
+ break;
+ }
+
+ addr += BIT(umem_odp->page_shift);
+ }
+ return need_fault;
+}
+
+static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+}
+
+static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return iova & (BIT(umem_odp->page_shift) - 1);
+}
+
+static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ bool need_fault;
+ int err;
+
+ if (unlikely(length < 1))
+ return -EINVAL;
+
+ mutex_lock(&umem_odp->umem_mutex);
+
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
+ if (need_fault) {
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ /* umem_mutex is locked on success. */
+ err = rxe_odp_do_pagefault_and_lock(mr, iova, length,
+ flags);
+ if (err < 0)
+ return err;
+
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
+ if (need_fault)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ int length, enum rxe_mr_copy_dir dir)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ struct page *page;
+ int idx, bytes;
+ size_t offset;
+ u8 *user_va;
+
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+
+ while (length > 0) {
+ u8 *src, *dest;
+
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
+ user_va = kmap_local_page(page);
+
+ src = (dir == RXE_TO_MR_OBJ) ? addr : user_va;
+ dest = (dir == RXE_TO_MR_OBJ) ? user_va : addr;
+
+ bytes = BIT(umem_odp->page_shift) - offset;
+ if (bytes > length)
+ bytes = length;
+
+ memcpy(dest, src, bytes);
+ kunmap_local(user_va);
+
+ length -= bytes;
+ idx++;
+ offset = 0;
+ }
+
+ return 0;
+}
+
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ u32 flags = RXE_PAGEFAULT_DEFAULT;
+ int err;
+
+ if (length == 0)
+ return 0;
+
+ if (unlikely(!mr->umem->is_odp))
+ return -EOPNOTSUPP;
+
+ switch (dir) {
+ case RXE_TO_MR_OBJ:
+ break;
+
+ case RXE_FROM_MR_OBJ:
+ flags |= RXE_PAGEFAULT_RDONLY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length, flags);
+ if (err)
+ return err;
+
+ err = __rxe_odp_mr_copy(mr, iova, addr, length, dir);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return err;
+}
+
+static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
+ int opcode, u64 compare,
+ u64 swap_add, u64 *orig_val)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ struct page *page;
+ unsigned int idx;
+ u64 value;
+ u64 *va;
+ int err;
+
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (err) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ if (unlikely(page_offset & 0x7)) {
+ rxe_dbg_mr(mr, "iova not aligned\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
+
+ va = kmap_local_page(page);
+
+ spin_lock_bh(&atomic_ops_lock);
+ value = *orig_val = va[page_offset >> 3];
+
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == compare)
+ va[page_offset >> 3] = swap_add;
+ } else {
+ value += swap_add;
+ va[page_offset >> 3] = value;
+ }
+ spin_unlock_bh(&atomic_ops_lock);
+
+ kunmap_local(va);
+
+ return RESPST_NONE;
+}
+
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ int err;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(char),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err < 0)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ err = rxe_odp_do_atomic_op(mr, iova, opcode, compare, swap_add,
+ orig_val);
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return err;
+}
+
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length,
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return 0;
+}
+
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ int err;
+ u64 *va;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ rxe_dbg_mr(mr, "misaligned address\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+
+ va = kmap_local_page(page);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+ kunmap_local(va);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return RESPST_NONE;
+}
+
+struct prefetch_mr_work {
+ struct work_struct work;
+ u32 pf_flags;
+ u32 num_sge;
+ struct {
+ u64 io_virt;
+ struct rxe_mr *mr;
+ size_t length;
+ } frags[];
+};
+
+static void rxe_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ int ret;
+ u32 i;
+
+ /*
+ * We rely on IB/core that work is executed
+ * if we have num_sge != 0 only.
+ */
+ WARN_ON(!work->num_sge);
+ for (i = 0; i < work->num_sge; ++i) {
+ struct ib_umem_odp *umem_odp;
+
+ ret = rxe_odp_do_pagefault_and_lock(work->frags[i].mr,
+ work->frags[i].io_virt,
+ work->frags[i].length,
+ work->pf_flags);
+ if (ret < 0) {
+ rxe_dbg_mr(work->frags[i].mr,
+ "failed to prefetch the mr\n");
+ goto deref;
+ }
+
+ umem_odp = to_ib_umem_odp(work->frags[i].mr->umem);
+ mutex_unlock(&umem_odp->umem_mutex);
+
+deref:
+ rxe_put(work->frags[i].mr);
+ }
+
+ kvfree(work);
+}
+
+static int rxe_ib_prefetch_sg_list(struct ib_pd *ibpd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
+{
+ struct rxe_pd *pd = container_of(ibpd, struct rxe_pd, ibpd);
+ int ret = 0;
+ u32 i;
+
+ for (i = 0; i < num_sge; ++i) {
+ struct rxe_mr *mr;
+ struct ib_umem_odp *umem_odp;
+
+ mr = lookup_mr(pd, IB_ACCESS_LOCAL_WRITE,
+ sg_list[i].lkey, RXE_LOOKUP_LOCAL);
+
+ if (!mr) {
+ rxe_dbg_pd(pd, "mr with lkey %x not found\n",
+ sg_list[i].lkey);
+ return -EINVAL;
+ }
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !mr->umem->writable) {
+ rxe_dbg_mr(mr, "missing write permission\n");
+ rxe_put(mr);
+ return -EPERM;
+ }
+
+ ret = rxe_odp_do_pagefault_and_lock(
+ mr, sg_list[i].addr, sg_list[i].length, pf_flags);
+ if (ret < 0) {
+ rxe_dbg_mr(mr, "failed to prefetch the mr\n");
+ rxe_put(mr);
+ return ret;
+ }
+
+ umem_odp = to_ib_umem_odp(mr->umem);
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ rxe_put(mr);
+ }
+
+ return 0;
+}
+
+static int rxe_ib_advise_mr_prefetch(struct ib_pd *ibpd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list,
+ u32 num_sge)
+{
+ struct rxe_pd *pd = container_of(ibpd, struct rxe_pd, ibpd);
+ u32 pf_flags = RXE_PAGEFAULT_DEFAULT;
+ struct prefetch_mr_work *work;
+ struct rxe_mr *mr;
+ u32 i;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
+ pf_flags |= RXE_PAGEFAULT_RDONLY;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ pf_flags |= RXE_PAGEFAULT_SNAPSHOT;
+
+ /* Synchronous call */
+ if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
+ return rxe_ib_prefetch_sg_list(ibpd, advice, pf_flags, sg_list,
+ num_sge);
+
+ /* Asynchronous call is "best-effort" and allowed to fail */
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ INIT_WORK(&work->work, rxe_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
+ work->num_sge = num_sge;
+
+ for (i = 0; i < num_sge; ++i) {
+ /* Takes a reference, which will be released in the queued work */
+ mr = lookup_mr(pd, IB_ACCESS_LOCAL_WRITE,
+ sg_list[i].lkey, RXE_LOOKUP_LOCAL);
+ if (!mr) {
+ mr = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
+ }
+
+ queue_work(system_unbound_wq, &work->work);
+
+ return 0;
+
+ err:
+ /* rollback reference counts for the invalid request */
+ while (i > 0) {
+ i--;
+ rxe_put(work->frags[i].mr);
+ }
+
+ kvfree(work);
+
+ return PTR_ERR(mr);
+}
+
+int rxe_ib_advise_mr(struct ib_pd *ibpd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags,
+ struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs)
+{
+ if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ return -EOPNOTSUPP;
+
+ return rxe_ib_advise_mr_prefetch(ibpd, advice, flags,
+ sg_list, num_sge);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
new file mode 100644
index 000000000000..5c0d5c6ffda4
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <rdma/ib_pack.h>
+#include "rxe_opcode.h"
+#include "rxe_hdr.h"
+
+/* useful information about work request opcodes and pkt opcodes in
+ * table form
+ */
+struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
+ [IB_WR_RDMA_WRITE] = {
+ .name = "IB_WR_RDMA_WRITE",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ },
+ },
+ [IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .name = "IB_WR_RDMA_WRITE_WITH_IMM",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ },
+ },
+ [IB_WR_SEND] = {
+ .name = "IB_WR_SEND",
+ .mask = {
+ [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_SEND_WITH_IMM] = {
+ .name = "IB_WR_SEND_WITH_IMM",
+ .mask = {
+ [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_RDMA_READ] = {
+ .name = "IB_WR_RDMA_READ",
+ .mask = {
+ [IB_QPT_RC] = WR_READ_MASK,
+ },
+ },
+ [IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .name = "IB_WR_ATOMIC_CMP_AND_SWP",
+ .mask = {
+ [IB_QPT_RC] = WR_ATOMIC_MASK,
+ },
+ },
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .name = "IB_WR_ATOMIC_FETCH_AND_ADD",
+ .mask = {
+ [IB_QPT_RC] = WR_ATOMIC_MASK,
+ },
+ },
+ [IB_WR_LSO] = {
+ .name = "IB_WR_LSO",
+ .mask = {
+ /* not supported */
+ },
+ },
+ [IB_WR_SEND_WITH_INV] = {
+ .name = "IB_WR_SEND_WITH_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_RDMA_READ_WITH_INV] = {
+ .name = "IB_WR_RDMA_READ_WITH_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_READ_MASK,
+ },
+ },
+ [IB_WR_LOCAL_INV] = {
+ .name = "IB_WR_LOCAL_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_LOCAL_OP_MASK,
+ },
+ },
+ [IB_WR_REG_MR] = {
+ .name = "IB_WR_REG_MR",
+ .mask = {
+ [IB_QPT_RC] = WR_LOCAL_OP_MASK,
+ },
+ },
+ [IB_WR_BIND_MW] = {
+ .name = "IB_WR_BIND_MW",
+ .mask = {
+ [IB_QPT_RC] = WR_LOCAL_OP_MASK,
+ [IB_QPT_UC] = WR_LOCAL_OP_MASK,
+ },
+ },
+ [IB_WR_FLUSH] = {
+ .name = "IB_WR_FLUSH",
+ .mask = {
+ [IB_QPT_RC] = WR_FLUSH_MASK,
+ },
+ },
+ [IB_WR_ATOMIC_WRITE] = {
+ .name = "IB_WR_ATOMIC_WRITE",
+ .mask = {
+ [IB_QPT_RC] = WR_ATOMIC_WRITE_MASK,
+ },
+ },
+};
+
+struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
+ [IB_OPCODE_RC_SEND_FIRST] = {
+ .name = "IB_OPCODE_RC_SEND_FIRST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_RC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST] = {
+ .name = "IB_OPCODE_RC_SEND_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_FIRST",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_REQUEST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_REQUEST",
+ .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_ACK_MASK | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RC_ACKNOWLEDGE",
+ .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
+ .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMACK_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_COMPARE_SWAP] = {
+ .name = "IB_OPCODE_RC_COMPARE_SWAP",
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_ATMETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_FETCH_ADD] = {
+ .name = "IB_OPCODE_RC_FETCH_ADD",
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_ATMETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = {
+ .name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY_INV",
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_END_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_FLUSH] = {
+ .name = "IB_OPCODE_RC_FLUSH",
+ .mask = RXE_FETH_MASK | RXE_RETH_MASK | RXE_FLUSH_MASK |
+ RXE_START_MASK | RXE_END_MASK | RXE_REQ_MASK,
+ .length = RXE_BTH_BYTES + RXE_FETH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_FETH] = RXE_BTH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES + RXE_FETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_ATOMIC_WRITE] = {
+ .name = "IB_OPCODE_RC_ATOMIC_WRITE",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_ATOMIC_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ }
+ },
+
+ /* UC */
+ [IB_OPCODE_UC_SEND_FIRST] = {
+ .name = "IB_OPCODE_UC_SEND_FIRST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_UC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_LAST] = {
+ .name = "IB_OPCODE_UC_SEND_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_ONLY] = {
+ .name = "IB_OPCODE_UC_SEND_ONLY",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_FIRST",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+
+ /* RD */
+ [IB_OPCODE_RD_SEND_FIRST] = {
+ .name = "IB_OPCODE_RD_SEND_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_RD_SEND_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_LAST] = {
+ .name = "IB_OPCODE_RD_SEND_LAST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_ONLY] = {
+ .name = "IB_OPCODE_RD_SEND_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_LAST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES +
+ RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_REQUEST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_REQUEST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RD_ACKNOWLEDGE",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_COMPARE_SWAP] = {
+ .name = "RD_COMPARE_SWAP",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_FETCH_ADD] = {
+ .name = "IB_OPCODE_RD_FETCH_ADD",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
+ }
+ },
+
+ /* UD */
+ [IB_OPCODE_UD_SEND_ONLY] = {
+ .name = "IB_OPCODE_UD_SEND_ONLY",
+ .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_DETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_DETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
+ }
+ },
+
+};
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
new file mode 100644
index 000000000000..5686b691d6b8
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_OPCODE_H
+#define RXE_OPCODE_H
+
+/*
+ * contains header bit mask definitions and header lengths
+ * declaration of the rxe_opcode_info struct and
+ * rxe_wr_opcode_info struct
+ */
+
+enum rxe_wr_mask {
+ WR_INLINE_MASK = BIT(0),
+ WR_ATOMIC_MASK = BIT(1),
+ WR_SEND_MASK = BIT(2),
+ WR_READ_MASK = BIT(3),
+ WR_WRITE_MASK = BIT(4),
+ WR_LOCAL_OP_MASK = BIT(5),
+ WR_FLUSH_MASK = BIT(6),
+ WR_ATOMIC_WRITE_MASK = BIT(7),
+
+ WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK,
+ WR_WRITE_OR_SEND_MASK = WR_WRITE_MASK | WR_SEND_MASK,
+ WR_ATOMIC_OR_READ_MASK = WR_ATOMIC_MASK | WR_READ_MASK,
+};
+
+#define WR_MAX_QPT (8)
+
+struct rxe_wr_opcode_info {
+ char *name;
+ enum rxe_wr_mask mask[WR_MAX_QPT];
+};
+
+extern struct rxe_wr_opcode_info rxe_wr_opcode_info[];
+
+enum rxe_hdr_type {
+ RXE_LRH,
+ RXE_GRH,
+ RXE_BTH,
+ RXE_RETH,
+ RXE_AETH,
+ RXE_ATMETH,
+ RXE_ATMACK,
+ RXE_IETH,
+ RXE_RDETH,
+ RXE_DETH,
+ RXE_IMMDT,
+ RXE_FETH,
+ RXE_PAYLOAD,
+ NUM_HDR_TYPES
+};
+
+enum rxe_hdr_mask {
+ RXE_LRH_MASK = BIT(RXE_LRH),
+ RXE_GRH_MASK = BIT(RXE_GRH),
+ RXE_BTH_MASK = BIT(RXE_BTH),
+ RXE_IMMDT_MASK = BIT(RXE_IMMDT),
+ RXE_RETH_MASK = BIT(RXE_RETH),
+ RXE_AETH_MASK = BIT(RXE_AETH),
+ RXE_ATMETH_MASK = BIT(RXE_ATMETH),
+ RXE_ATMACK_MASK = BIT(RXE_ATMACK),
+ RXE_IETH_MASK = BIT(RXE_IETH),
+ RXE_RDETH_MASK = BIT(RXE_RDETH),
+ RXE_DETH_MASK = BIT(RXE_DETH),
+ RXE_FETH_MASK = BIT(RXE_FETH),
+ RXE_PAYLOAD_MASK = BIT(RXE_PAYLOAD),
+
+ RXE_REQ_MASK = BIT(NUM_HDR_TYPES + 0),
+ RXE_ACK_MASK = BIT(NUM_HDR_TYPES + 1),
+ RXE_SEND_MASK = BIT(NUM_HDR_TYPES + 2),
+ RXE_WRITE_MASK = BIT(NUM_HDR_TYPES + 3),
+ RXE_READ_MASK = BIT(NUM_HDR_TYPES + 4),
+ RXE_ATOMIC_MASK = BIT(NUM_HDR_TYPES + 5),
+ RXE_FLUSH_MASK = BIT(NUM_HDR_TYPES + 6),
+
+ RXE_RWR_MASK = BIT(NUM_HDR_TYPES + 7),
+ RXE_COMP_MASK = BIT(NUM_HDR_TYPES + 8),
+
+ RXE_START_MASK = BIT(NUM_HDR_TYPES + 9),
+ RXE_MIDDLE_MASK = BIT(NUM_HDR_TYPES + 10),
+ RXE_END_MASK = BIT(NUM_HDR_TYPES + 11),
+
+ RXE_LOOPBACK_MASK = BIT(NUM_HDR_TYPES + 12),
+
+ RXE_ATOMIC_WRITE_MASK = BIT(NUM_HDR_TYPES + 14),
+
+ RXE_READ_OR_ATOMIC_MASK = (RXE_READ_MASK | RXE_ATOMIC_MASK),
+ RXE_WRITE_OR_SEND_MASK = (RXE_WRITE_MASK | RXE_SEND_MASK),
+ RXE_READ_OR_WRITE_MASK = (RXE_READ_MASK | RXE_WRITE_MASK),
+ RXE_RDMA_OP_MASK = (RXE_READ_MASK | RXE_WRITE_MASK |
+ RXE_ATOMIC_WRITE_MASK | RXE_FLUSH_MASK |
+ RXE_ATOMIC_MASK),
+};
+
+#define OPCODE_NONE (-1)
+#define RXE_NUM_OPCODE 256
+
+struct rxe_opcode_info {
+ char *name;
+ enum rxe_hdr_mask mask;
+ int length;
+ int offset[NUM_HDR_TYPES];
+};
+
+extern struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE];
+
+#endif /* RXE_OPCODE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
new file mode 100644
index 000000000000..767870568372
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_PARAM_H
+#define RXE_PARAM_H
+
+#include <uapi/rdma/rdma_user_rxe.h>
+
+#define DEFAULT_MAX_VALUE (1 << 20)
+
+static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
+{
+ if (mtu < 256)
+ return 0;
+ else if (mtu < 512)
+ return IB_MTU_256;
+ else if (mtu < 1024)
+ return IB_MTU_512;
+ else if (mtu < 2048)
+ return IB_MTU_1024;
+ else if (mtu < 4096)
+ return IB_MTU_2048;
+ else
+ return IB_MTU_4096;
+}
+
+/* Find the IB mtu for a given network MTU. */
+static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
+{
+ mtu -= RXE_MAX_HDR_LENGTH;
+
+ return rxe_mtu_int_to_enum(mtu);
+}
+
+/* default/initial rxe device parameter settings */
+enum rxe_device_param {
+ RXE_MAX_MR_SIZE = -1ull,
+ RXE_PAGE_SIZE_CAP = 0xfffff000,
+ RXE_MAX_QP_WR = DEFAULT_MAX_VALUE,
+ RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
+ | IB_DEVICE_BAD_QKEY_CNTR
+ | IB_DEVICE_AUTO_PATH_MIG
+ | IB_DEVICE_CHANGE_PHY_PORT
+ | IB_DEVICE_UD_AV_PORT_ENFORCE
+ | IB_DEVICE_PORT_ACTIVE_EVENT
+ | IB_DEVICE_SYS_IMAGE_GUID
+ | IB_DEVICE_RC_RNR_NAK_GEN
+ | IB_DEVICE_SRQ_RESIZE
+ | IB_DEVICE_MEM_MGT_EXTENSIONS
+ | IB_DEVICE_MEM_WINDOW
+ | IB_DEVICE_FLUSH_GLOBAL
+ | IB_DEVICE_FLUSH_PERSISTENT
+ | IB_DEVICE_MEM_WINDOW_TYPE_2B
+ | IB_DEVICE_ATOMIC_WRITE,
+
+ RXE_MAX_SGE = 32,
+ RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
+ sizeof(struct ib_sge) * RXE_MAX_SGE,
+ RXE_MAX_INLINE_DATA = RXE_MAX_WQE_SIZE -
+ sizeof(struct rxe_send_wqe),
+ RXE_MAX_SGE_RD = 32,
+ RXE_MAX_CQ = DEFAULT_MAX_VALUE,
+ RXE_MAX_LOG_CQE = 15,
+ RXE_MAX_PD = DEFAULT_MAX_VALUE,
+ RXE_MAX_QP_RD_ATOM = 128,
+ RXE_MAX_RES_RD_ATOM = 0x3f000,
+ RXE_MAX_QP_INIT_RD_ATOM = 128,
+ RXE_MAX_MCAST_GRP = 8192,
+ RXE_MAX_MCAST_QP_ATTACH = 56,
+ RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
+ RXE_MAX_AH = (1<<15) - 1, /* 32Ki - 1 */
+ RXE_MIN_AH_INDEX = 1,
+ RXE_MAX_AH_INDEX = RXE_MAX_AH,
+ RXE_MAX_SRQ_WR = DEFAULT_MAX_VALUE,
+ RXE_MIN_SRQ_WR = 1,
+ RXE_MAX_SRQ_SGE = 27,
+ RXE_MIN_SRQ_SGE = 1,
+ RXE_MAX_FMR_PAGE_LIST_LEN = 512,
+ RXE_MAX_PKEYS = 64,
+ RXE_LOCAL_CA_ACK_DELAY = 15,
+
+ RXE_MAX_UCONTEXT = DEFAULT_MAX_VALUE,
+
+ RXE_NUM_PORT = 1,
+
+ RXE_MIN_QP_INDEX = 16,
+ RXE_MAX_QP_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_QP = DEFAULT_MAX_VALUE - RXE_MIN_QP_INDEX,
+
+ RXE_MIN_SRQ_INDEX = 0x00020001,
+ RXE_MAX_SRQ_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
+
+ RXE_MIN_MR_INDEX = 0x00000001,
+ RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE >> 1,
+ RXE_MAX_MR = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
+ RXE_MIN_MW_INDEX = RXE_MAX_MR_INDEX + 1,
+ RXE_MAX_MW_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_MW = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
+
+ RXE_MAX_PKT_PER_ACK = 64,
+
+ RXE_MAX_UNACKED_PSNS = 128,
+
+ /* Max inflight SKBs per queue pair */
+ RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
+ RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
+
+ /* Max number of interations of each work item
+ * before yielding the cpu to let other
+ * work make progress
+ */
+ RXE_MAX_ITERATIONS = 1024,
+
+ /* Delay before calling arbiter timer */
+ RXE_NSEC_ARB_TIMER_DELAY = 200,
+
+ /* IBTA v1.4 A3.3.1 VENDOR INFORMATION section */
+ RXE_VENDOR_ID = 0XFFFFFF,
+};
+
+/* default/initial rxe port parameters */
+enum rxe_port_param {
+ RXE_PORT_GID_TBL_LEN = 1024,
+ RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP,
+ RXE_PORT_MAX_MSG_SZ = (1UL << 31),
+ RXE_PORT_BAD_PKEY_CNTR = 0,
+ RXE_PORT_QKEY_VIOL_CNTR = 0,
+ RXE_PORT_LID = 0,
+ RXE_PORT_SM_LID = 0,
+ RXE_PORT_SM_SL = 0,
+ RXE_PORT_LMC = 0,
+ RXE_PORT_MAX_VL_NUM = 1,
+ RXE_PORT_SUBNET_TIMEOUT = 0,
+ RXE_PORT_INIT_TYPE_REPLY = 0,
+ RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X,
+ RXE_PORT_ACTIVE_SPEED = 1,
+ RXE_PORT_PKEY_TBL_LEN = 1,
+ RXE_PORT_PHYS_STATE = IB_PORT_PHYS_STATE_POLLING,
+ RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL,
+};
+
+/* default/initial port info parameters */
+enum rxe_port_info_param {
+ RXE_PORT_INFO_VL_CAP = 4, /* 1-8 */
+ RXE_PORT_INFO_MTU_CAP = 5, /* 4096 */
+ RXE_PORT_INFO_OPER_VL = 1, /* 1 */
+};
+
+#endif /* RXE_PARAM_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
new file mode 100644
index 000000000000..d9cb682fd71f
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include "rxe.h"
+
+#define RXE_POOL_TIMEOUT (200)
+#define RXE_POOL_ALIGN (16)
+
+static const struct rxe_type_info {
+ const char *name;
+ size_t size;
+ size_t elem_offset;
+ void (*cleanup)(struct rxe_pool_elem *elem);
+ u32 min_index;
+ u32 max_index;
+ u32 max_elem;
+} rxe_type_info[RXE_NUM_TYPES] = {
+ [RXE_TYPE_UC] = {
+ .name = "uc",
+ .size = sizeof(struct rxe_ucontext),
+ .elem_offset = offsetof(struct rxe_ucontext, elem),
+ .min_index = 1,
+ .max_index = RXE_MAX_UCONTEXT,
+ .max_elem = RXE_MAX_UCONTEXT,
+ },
+ [RXE_TYPE_PD] = {
+ .name = "pd",
+ .size = sizeof(struct rxe_pd),
+ .elem_offset = offsetof(struct rxe_pd, elem),
+ .min_index = 1,
+ .max_index = RXE_MAX_PD,
+ .max_elem = RXE_MAX_PD,
+ },
+ [RXE_TYPE_AH] = {
+ .name = "ah",
+ .size = sizeof(struct rxe_ah),
+ .elem_offset = offsetof(struct rxe_ah, elem),
+ .min_index = RXE_MIN_AH_INDEX,
+ .max_index = RXE_MAX_AH_INDEX,
+ .max_elem = RXE_MAX_AH,
+ },
+ [RXE_TYPE_SRQ] = {
+ .name = "srq",
+ .size = sizeof(struct rxe_srq),
+ .elem_offset = offsetof(struct rxe_srq, elem),
+ .cleanup = rxe_srq_cleanup,
+ .min_index = RXE_MIN_SRQ_INDEX,
+ .max_index = RXE_MAX_SRQ_INDEX,
+ .max_elem = RXE_MAX_SRQ,
+ },
+ [RXE_TYPE_QP] = {
+ .name = "qp",
+ .size = sizeof(struct rxe_qp),
+ .elem_offset = offsetof(struct rxe_qp, elem),
+ .cleanup = rxe_qp_cleanup,
+ .min_index = RXE_MIN_QP_INDEX,
+ .max_index = RXE_MAX_QP_INDEX,
+ .max_elem = RXE_MAX_QP,
+ },
+ [RXE_TYPE_CQ] = {
+ .name = "cq",
+ .size = sizeof(struct rxe_cq),
+ .elem_offset = offsetof(struct rxe_cq, elem),
+ .cleanup = rxe_cq_cleanup,
+ .min_index = 1,
+ .max_index = RXE_MAX_CQ,
+ .max_elem = RXE_MAX_CQ,
+ },
+ [RXE_TYPE_MR] = {
+ .name = "mr",
+ .size = sizeof(struct rxe_mr),
+ .elem_offset = offsetof(struct rxe_mr, elem),
+ .cleanup = rxe_mr_cleanup,
+ .min_index = RXE_MIN_MR_INDEX,
+ .max_index = RXE_MAX_MR_INDEX,
+ .max_elem = RXE_MAX_MR,
+ },
+ [RXE_TYPE_MW] = {
+ .name = "mw",
+ .size = sizeof(struct rxe_mw),
+ .elem_offset = offsetof(struct rxe_mw, elem),
+ .cleanup = rxe_mw_cleanup,
+ .min_index = RXE_MIN_MW_INDEX,
+ .max_index = RXE_MAX_MW_INDEX,
+ .max_elem = RXE_MAX_MW,
+ },
+};
+
+void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
+ enum rxe_elem_type type)
+{
+ const struct rxe_type_info *info = &rxe_type_info[type];
+
+ memset(pool, 0, sizeof(*pool));
+
+ pool->rxe = rxe;
+ pool->name = info->name;
+ pool->type = type;
+ pool->max_elem = info->max_elem;
+ pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
+ pool->elem_offset = info->elem_offset;
+ pool->cleanup = info->cleanup;
+
+ atomic_set(&pool->num_elem, 0);
+
+ xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
+ pool->limit.min = info->min_index;
+ pool->limit.max = info->max_index;
+}
+
+void rxe_pool_cleanup(struct rxe_pool *pool)
+{
+ WARN_ON(!xa_empty(&pool->xa));
+}
+
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable)
+{
+ int err = -EINVAL;
+ gfp_t gfp_flags;
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto err_cnt;
+
+ elem->pool = pool;
+ elem->obj = (u8 *)elem - pool->elem_offset;
+ kref_init(&elem->ref_cnt);
+ init_completion(&elem->complete);
+
+ /* AH objects are unique in that the create_ah verb
+ * can be called in atomic context. If the create_ah
+ * call is not sleepable use GFP_ATOMIC.
+ */
+ gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC;
+
+ if (sleepable)
+ might_sleep();
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
+ &pool->next, gfp_flags);
+ if (err < 0)
+ goto err_cnt;
+
+ return 0;
+
+err_cnt:
+ atomic_dec(&pool->num_elem);
+ return err;
+}
+
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
+{
+ struct rxe_pool_elem *elem;
+ struct xarray *xa = &pool->xa;
+ void *obj;
+
+ rcu_read_lock();
+ elem = xa_load(xa, index);
+ if (elem && kref_get_unless_zero(&elem->ref_cnt))
+ obj = elem->obj;
+ else
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+static void rxe_elem_release(struct kref *kref)
+{
+ struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
+
+ complete(&elem->complete);
+}
+
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+{
+ struct rxe_pool *pool = elem->pool;
+ struct xarray *xa = &pool->xa;
+ int ret, err = 0;
+ void *xa_ret;
+
+ if (sleepable)
+ might_sleep();
+
+ /* erase xarray entry to prevent looking up
+ * the pool elem from its index
+ */
+ xa_ret = xa_erase(xa, elem->index);
+ WARN_ON(xa_err(xa_ret));
+
+ /* if this is the last call to rxe_put complete the
+ * object. It is safe to touch obj->elem after this since
+ * it is freed below
+ */
+ __rxe_put(elem);
+
+ /* wait until all references to the object have been
+ * dropped before final object specific cleanup and
+ * return to rdma-core
+ */
+ if (sleepable) {
+ if (!completion_done(&elem->complete)) {
+ ret = wait_for_completion_timeout(&elem->complete,
+ msecs_to_jiffies(50000));
+
+ /* Shouldn't happen. There are still references to
+ * the object but, rather than deadlock, free the
+ * object or pass back to rdma-core.
+ */
+ if (WARN_ON(!ret))
+ err = -ETIMEDOUT;
+ }
+ } else {
+ unsigned long until = jiffies + RXE_POOL_TIMEOUT;
+
+ /* AH objects are unique in that the destroy_ah verb
+ * can be called in atomic context. This delay
+ * replaces the wait_for_completion call above
+ * when the destroy_ah call is not sleepable
+ */
+ while (!completion_done(&elem->complete) &&
+ time_before(jiffies, until))
+ mdelay(1);
+
+ if (WARN_ON(!completion_done(&elem->complete)))
+ err = -ETIMEDOUT;
+ }
+
+ if (pool->cleanup)
+ pool->cleanup(elem);
+
+ atomic_dec(&pool->num_elem);
+
+ return err;
+}
+
+int __rxe_get(struct rxe_pool_elem *elem)
+{
+ return kref_get_unless_zero(&elem->ref_cnt);
+}
+
+int __rxe_put(struct rxe_pool_elem *elem)
+{
+ return kref_put(&elem->ref_cnt, rxe_elem_release);
+}
+
+void __rxe_finalize(struct rxe_pool_elem *elem)
+{
+ void *xa_ret;
+
+ xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
+ WARN_ON(xa_err(xa_ret));
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
new file mode 100644
index 000000000000..b42e26427a70
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_POOL_H
+#define RXE_POOL_H
+
+enum rxe_elem_type {
+ RXE_TYPE_UC,
+ RXE_TYPE_PD,
+ RXE_TYPE_AH,
+ RXE_TYPE_SRQ,
+ RXE_TYPE_QP,
+ RXE_TYPE_CQ,
+ RXE_TYPE_MR,
+ RXE_TYPE_MW,
+ RXE_NUM_TYPES, /* keep me last */
+};
+
+struct rxe_pool_elem {
+ struct rxe_pool *pool;
+ void *obj;
+ struct kref ref_cnt;
+ struct list_head list;
+ struct completion complete;
+ u32 index;
+};
+
+struct rxe_pool {
+ struct rxe_dev *rxe;
+ const char *name;
+ void (*cleanup)(struct rxe_pool_elem *elem);
+ enum rxe_elem_type type;
+
+ unsigned int max_elem;
+ atomic_t num_elem;
+ size_t elem_size;
+ size_t elem_offset;
+
+ struct xarray xa;
+ struct xa_limit limit;
+ u32 next;
+};
+
+/* initialize a pool of objects with given limit on
+ * number of elements. gets parameters from rxe_type_info
+ * pool elements will be allocated out of a slab cache
+ */
+void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
+ enum rxe_elem_type type);
+
+/* free resources from object pool */
+void rxe_pool_cleanup(struct rxe_pool *pool);
+
+/* connect already allocated object to pool */
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable);
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem, true)
+#define rxe_add_to_pool_ah(pool, obj, sleepable) __rxe_add_to_pool(pool, \
+ &(obj)->elem, sleepable)
+
+/* lookup an indexed object from index. takes a reference on object */
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
+
+int __rxe_get(struct rxe_pool_elem *elem);
+#define rxe_get(obj) __rxe_get(&(obj)->elem)
+
+int __rxe_put(struct rxe_pool_elem *elem);
+#define rxe_put(obj) __rxe_put(&(obj)->elem)
+
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable);
+#define rxe_cleanup(obj) __rxe_cleanup(&(obj)->elem, true)
+#define rxe_cleanup_ah(obj, sleepable) __rxe_cleanup(&(obj)->elem, sleepable)
+
+#define rxe_read(obj) kref_read(&(obj)->elem.ref_cnt)
+
+void __rxe_finalize(struct rxe_pool_elem *elem);
+#define rxe_finalize(obj) __rxe_finalize(&(obj)->elem)
+
+#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
new file mode 100644
index 000000000000..95f1c1c2949d
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+#include "rxe_task.h"
+
+static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
+ int has_srq)
+{
+ if (cap->max_send_wr > rxe->attr.max_qp_wr) {
+ rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
+ cap->max_send_wr, rxe->attr.max_qp_wr);
+ goto err1;
+ }
+
+ if (cap->max_send_sge > rxe->attr.max_send_sge) {
+ rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
+ cap->max_send_sge, rxe->attr.max_send_sge);
+ goto err1;
+ }
+
+ if (!has_srq) {
+ if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
+ rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
+ cap->max_recv_wr, rxe->attr.max_qp_wr);
+ goto err1;
+ }
+
+ if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
+ rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
+ cap->max_recv_sge, rxe->attr.max_recv_sge);
+ goto err1;
+ }
+ }
+
+ if (cap->max_inline_data > rxe->max_inline_data) {
+ rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
+ cap->max_inline_data, rxe->max_inline_data);
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
+{
+ struct ib_qp_cap *cap = &init->cap;
+ struct rxe_port *port;
+ int port_num = init->port_num;
+
+ switch (init->qp_type) {
+ case IB_QPT_GSI:
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!init->recv_cq || !init->send_cq) {
+ rxe_dbg_dev(rxe, "missing cq\n");
+ goto err1;
+ }
+
+ if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
+ goto err1;
+
+ if (init->qp_type == IB_QPT_GSI) {
+ if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
+ rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
+ rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
+{
+ qp->resp.res_head = 0;
+ qp->resp.res_tail = 0;
+ qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
+
+ if (!qp->resp.resources)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_rd_atomic_resources(struct rxe_qp *qp)
+{
+ if (qp->resp.resources) {
+ int i;
+
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
+ struct resp_res *res = &qp->resp.resources[i];
+
+ free_rd_atomic_resource(res);
+ }
+ kfree(qp->resp.resources);
+ qp->resp.resources = NULL;
+ }
+}
+
+void free_rd_atomic_resource(struct resp_res *res)
+{
+ res->type = 0;
+}
+
+static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
+{
+ int i;
+ struct resp_res *res;
+
+ if (qp->resp.resources) {
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
+ res = &qp->resp.resources[i];
+ free_rd_atomic_resource(res);
+ }
+ }
+}
+
+static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init)
+{
+ struct rxe_port *port;
+ u32 qpn;
+
+ qp->sq_sig_type = init->sq_sig_type;
+ qp->attr.path_mtu = 1;
+ qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
+
+ qpn = qp->elem.index;
+ port = &rxe->port;
+
+ switch (init->qp_type) {
+ case IB_QPT_GSI:
+ qp->ibqp.qp_num = 1;
+ port->qp_gsi_index = qpn;
+ qp->attr.port_num = init->port_num;
+ break;
+
+ default:
+ qp->ibqp.qp_num = qpn;
+ break;
+ }
+
+ spin_lock_init(&qp->state_lock);
+
+ spin_lock_init(&qp->sq.sq_lock);
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
+ skb_queue_head_init(&qp->req_pkts);
+ skb_queue_head_init(&qp->resp_pkts);
+
+ atomic_set(&qp->ssn, 0);
+ atomic_set(&qp->skb_out, 0);
+}
+
+static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->sq.max_wr = init->cap.max_send_wr;
+ wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+ init->cap.max_inline_data);
+ qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
+ qp->sq.max_inline = wqe_size;
+ wqe_size += sizeof(struct rxe_send_wqe);
+
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->sq.queue) {
+ rxe_err_qp(qp, "Unable to allocate send queue\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap send queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
+ qp->sq.queue->buf, qp->sq.queue->buf_size,
+ &qp->sq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_send_wr = qp->sq.max_wr;
+ init->cap.max_send_sge = qp->sq.max_sge;
+ init->cap.max_inline_data = qp->sq.max_inline;
+
+ return 0;
+
+err_free:
+ vfree(qp->sq.queue->buf);
+ kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
+err_out:
+ return err;
+}
+
+static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init, struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ int err;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->req_pkts);
+
+ err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
+ if (err < 0)
+ return err;
+ qp->sk->sk->sk_user_data = qp;
+
+ /* pick a source UDP port number for this QP based on
+ * the source QPN. this spreads traffic for different QPs
+ * across different NIC RX queues (while using a single
+ * flow for a given QP to maintain packet order).
+ * the port number must be in the Dynamic Ports range
+ * (0xc000 - 0xffff).
+ */
+ qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
+
+ err = rxe_init_sq(qp, init, udata, uresp);
+ if (err)
+ return err;
+
+ qp->req.wqe_index = queue_get_producer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
+
+ qp->req.opcode = -1;
+ qp->comp.opcode = -1;
+
+ rxe_init_task(&qp->send_task, qp, rxe_sender);
+
+ qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
+ if (init->qp_type == IB_QPT_RC) {
+ timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
+ timer_setup(&qp->retrans_timer, retransmit_timer, 0);
+ }
+ return 0;
+}
+
+static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->rq.max_wr = init->cap.max_recv_wr;
+ qp->rq.max_sge = init->cap.max_recv_sge;
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ qp->rq.max_sge*sizeof(struct ib_sge);
+
+ qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->rq.queue) {
+ rxe_err_qp(qp, "Unable to allocate recv queue\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap recv queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
+ qp->rq.queue->buf, qp->rq.queue->buf_size,
+ &qp->rq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_recv_wr = qp->rq.max_wr;
+
+ return 0;
+
+err_free:
+ vfree(qp->rq.queue->buf);
+ kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
+err_out:
+ return err;
+}
+
+static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ int err;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->resp_pkts);
+
+ if (!qp->srq) {
+ err = rxe_init_rq(qp, init, udata, uresp);
+ if (err)
+ return err;
+ }
+
+ rxe_init_task(&qp->recv_task, qp, rxe_receiver);
+
+ qp->resp.opcode = OPCODE_NONE;
+ qp->resp.msn = 0;
+
+ return 0;
+}
+
+/* called by the create qp verb */
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ struct ib_qp_init_attr *init,
+ struct rxe_create_qp_resp __user *uresp,
+ struct ib_pd *ibpd,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_cq *rcq = to_rcq(init->recv_cq);
+ struct rxe_cq *scq = to_rcq(init->send_cq);
+ struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+ unsigned long flags;
+
+ rxe_get(pd);
+ rxe_get(rcq);
+ rxe_get(scq);
+ if (srq)
+ rxe_get(srq);
+
+ qp->pd = pd;
+ qp->rcq = rcq;
+ qp->scq = scq;
+ qp->srq = srq;
+
+ atomic_inc(&rcq->num_wq);
+ atomic_inc(&scq->num_wq);
+
+ rxe_qp_init_misc(rxe, qp, init);
+
+ err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
+ if (err)
+ goto err1;
+
+ err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
+ if (err)
+ goto err2;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ qp->attr.qp_state = IB_QPS_RESET;
+ qp->valid = 1;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ return 0;
+
+err2:
+ rxe_queue_cleanup(qp->sq.queue);
+ qp->sq.queue = NULL;
+err1:
+ atomic_dec(&rcq->num_wq);
+ atomic_dec(&scq->num_wq);
+
+ qp->pd = NULL;
+ qp->rcq = NULL;
+ qp->scq = NULL;
+ qp->srq = NULL;
+
+ if (srq)
+ rxe_put(srq);
+ rxe_put(scq);
+ rxe_put(rcq);
+ rxe_put(pd);
+
+ return err;
+}
+
+/* called by the query qp verb */
+int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
+{
+ init->event_handler = qp->ibqp.event_handler;
+ init->qp_context = qp->ibqp.qp_context;
+ init->send_cq = qp->ibqp.send_cq;
+ init->recv_cq = qp->ibqp.recv_cq;
+ init->srq = qp->ibqp.srq;
+
+ init->cap.max_send_wr = qp->sq.max_wr;
+ init->cap.max_send_sge = qp->sq.max_sge;
+ init->cap.max_inline_data = qp->sq.max_inline;
+
+ if (!qp->srq) {
+ init->cap.max_recv_wr = qp->rq.max_wr;
+ init->cap.max_recv_sge = qp->rq.max_sge;
+ }
+
+ init->sq_sig_type = qp->sq_sig_type;
+
+ init->qp_type = qp->ibqp.qp_type;
+ init->port_num = 1;
+
+ return 0;
+}
+
+int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_attr *attr, int mask)
+{
+ if (mask & IB_QP_PORT) {
+ if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
+ rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
+ goto err1;
+
+ if (mask & IB_QP_ACCESS_FLAGS) {
+ if (!(qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_UC))
+ goto err1;
+ if (attr->qp_access_flags & ~RXE_ACCESS_SUPPORTED_QP)
+ goto err1;
+ }
+
+ if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
+ goto err1;
+
+ if (mask & IB_QP_ALT_PATH) {
+ if (rxe_av_chk_attr(qp, &attr->alt_ah_attr))
+ goto err1;
+ if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
+ rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num);
+ goto err1;
+ }
+ if (attr->alt_timeout > 31) {
+ rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n",
+ attr->alt_timeout);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_PATH_MTU) {
+ struct rxe_port *port = &rxe->port;
+
+ enum ib_mtu max_mtu = port->attr.max_mtu;
+ enum ib_mtu mtu = attr->path_mtu;
+
+ if (mtu > max_mtu) {
+ rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n",
+ ib_mtu_enum_to_int(mtu),
+ ib_mtu_enum_to_int(max_mtu));
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
+ rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n",
+ attr->max_rd_atomic,
+ rxe->attr.max_qp_rd_atom);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_TIMEOUT) {
+ if (attr->timeout > 31) {
+ rxe_dbg_qp(qp, "invalid timeout %d > 31\n",
+ attr->timeout);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+/* move the qp to the reset state */
+static void rxe_qp_reset(struct rxe_qp *qp)
+{
+ /* stop tasks from running */
+ rxe_disable_task(&qp->recv_task);
+ rxe_disable_task(&qp->send_task);
+
+ /* drain work and packet queuesc */
+ rxe_sender(qp);
+ rxe_receiver(qp);
+
+ if (qp->rq.queue)
+ rxe_queue_reset(qp->rq.queue);
+ if (qp->sq.queue)
+ rxe_queue_reset(qp->sq.queue);
+
+ /* cleanup attributes */
+ atomic_set(&qp->ssn, 0);
+ qp->req.opcode = -1;
+ qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
+ qp->req.noack_pkts = 0;
+ qp->resp.msn = 0;
+ qp->resp.opcode = -1;
+ qp->resp.drop_msg = 0;
+ qp->resp.goto_error = 0;
+ qp->resp.sent_psn_nak = 0;
+
+ if (qp->resp.mr) {
+ rxe_put(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ cleanup_rd_atomic_resources(qp);
+
+ /* reenable tasks */
+ rxe_enable_task(&qp->recv_task);
+ rxe_enable_task(&qp->send_task);
+}
+
+/* move the qp to the error state */
+void rxe_qp_error(struct rxe_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ qp->attr.qp_state = IB_QPS_ERR;
+
+ /* drain work and packet queues */
+ rxe_sched_task(&qp->recv_task);
+ rxe_sched_task(&qp->send_task);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+}
+
+static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
+ int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ qp->attr.sq_draining = 1;
+ rxe_sched_task(&qp->send_task);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+}
+
+/* caller should hold qp->state_lock */
+static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr,
+ int mask)
+{
+ enum ib_qp_state cur_state;
+ enum ib_qp_state new_state;
+
+ cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->attr.qp_state;
+ new_state = (mask & IB_QP_STATE) ?
+ attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask))
+ return -EINVAL;
+
+ if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
+ if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const char *const qps2str[] = {
+ [IB_QPS_RESET] = "RESET",
+ [IB_QPS_INIT] = "INIT",
+ [IB_QPS_RTR] = "RTR",
+ [IB_QPS_RTS] = "RTS",
+ [IB_QPS_SQD] = "SQD",
+ [IB_QPS_SQE] = "SQE",
+ [IB_QPS_ERR] = "ERR",
+};
+
+/* called by the modify qp verb */
+int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata)
+{
+ int err;
+
+ if (mask & IB_QP_CUR_STATE)
+ qp->attr.cur_qp_state = attr->qp_state;
+
+ if (mask & IB_QP_STATE) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ err = __qp_chk_state(qp, attr, mask);
+ if (!err) {
+ qp->attr.qp_state = attr->qp_state;
+ rxe_dbg_qp(qp, "state -> %s\n",
+ qps2str[attr->qp_state]);
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (err)
+ return err;
+
+ switch (attr->qp_state) {
+ case IB_QPS_RESET:
+ rxe_qp_reset(qp);
+ break;
+ case IB_QPS_SQD:
+ rxe_qp_sqd(qp, attr, mask);
+ break;
+ case IB_QPS_ERR:
+ rxe_qp_error(qp);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ int max_rd_atomic = attr->max_rd_atomic ?
+ roundup_pow_of_two(attr->max_rd_atomic) : 0;
+
+ qp->attr.max_rd_atomic = max_rd_atomic;
+ atomic_set(&qp->req.rd_atomic, max_rd_atomic);
+ }
+
+ if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
+ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
+
+ qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
+
+ free_rd_atomic_resources(qp);
+
+ err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
+ if (err)
+ return err;
+ }
+
+ if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
+
+ if (mask & IB_QP_ACCESS_FLAGS)
+ qp->attr.qp_access_flags = attr->qp_access_flags;
+
+ if (mask & IB_QP_PKEY_INDEX)
+ qp->attr.pkey_index = attr->pkey_index;
+
+ if (mask & IB_QP_PORT)
+ qp->attr.port_num = attr->port_num;
+
+ if (mask & IB_QP_QKEY)
+ qp->attr.qkey = attr->qkey;
+
+ if (mask & IB_QP_AV)
+ rxe_init_av(&attr->ah_attr, &qp->pri_av);
+
+ if (mask & IB_QP_ALT_PATH) {
+ rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
+ qp->attr.alt_port_num = attr->alt_port_num;
+ qp->attr.alt_pkey_index = attr->alt_pkey_index;
+ qp->attr.alt_timeout = attr->alt_timeout;
+ }
+
+ if (mask & IB_QP_PATH_MTU) {
+ qp->attr.path_mtu = attr->path_mtu;
+ qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ }
+
+ if (mask & IB_QP_TIMEOUT) {
+ qp->attr.timeout = attr->timeout;
+ if (attr->timeout == 0) {
+ qp->qp_timeout_jiffies = 0;
+ } else {
+ /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
+ int j = nsecs_to_jiffies(4096ULL << attr->timeout);
+
+ qp->qp_timeout_jiffies = j ? j : 1;
+ }
+ }
+
+ if (mask & IB_QP_RETRY_CNT) {
+ qp->attr.retry_cnt = attr->retry_cnt;
+ qp->comp.retry_cnt = attr->retry_cnt;
+ rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt);
+ }
+
+ if (mask & IB_QP_RNR_RETRY) {
+ qp->attr.rnr_retry = attr->rnr_retry;
+ qp->comp.rnr_retry = attr->rnr_retry;
+ rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry);
+ }
+
+ if (mask & IB_QP_RQ_PSN) {
+ qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
+ qp->resp.psn = qp->attr.rq_psn;
+ rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn);
+ }
+
+ if (mask & IB_QP_MIN_RNR_TIMER) {
+ qp->attr.min_rnr_timer = attr->min_rnr_timer;
+ rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n",
+ attr->min_rnr_timer);
+ }
+
+ if (mask & IB_QP_SQ_PSN) {
+ qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
+ qp->req.psn = qp->attr.sq_psn;
+ qp->comp.psn = qp->attr.sq_psn;
+ rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn);
+ }
+
+ if (mask & IB_QP_PATH_MIG_STATE)
+ qp->attr.path_mig_state = attr->path_mig_state;
+
+ if (mask & IB_QP_DEST_QPN)
+ qp->attr.dest_qp_num = attr->dest_qp_num;
+
+ return 0;
+}
+
+/* called by the query qp verb */
+int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
+{
+ unsigned long flags;
+
+ *attr = qp->attr;
+
+ attr->rq_psn = qp->resp.psn;
+ attr->sq_psn = qp->req.psn;
+
+ attr->cap.max_send_wr = qp->sq.max_wr;
+ attr->cap.max_send_sge = qp->sq.max_sge;
+ attr->cap.max_inline_data = qp->sq.max_inline;
+
+ if (!qp->srq) {
+ attr->cap.max_recv_wr = qp->rq.max_wr;
+ attr->cap.max_recv_sge = qp->rq.max_sge;
+ }
+
+ rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
+ rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
+
+ /* Applications that get this state typically spin on it.
+ * Yield the processor
+ */
+ spin_lock_irqsave(&qp->state_lock, flags);
+ attr->cur_qp_state = qp_state(qp);
+ if (qp->attr.sq_draining) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ cond_resched();
+ } else {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ }
+
+ return 0;
+}
+
+int rxe_qp_chk_destroy(struct rxe_qp *qp)
+{
+ /* See IBA o10-2.2.3
+ * An attempt to destroy a QP while attached to a mcast group
+ * will fail immediately.
+ */
+ if (atomic_read(&qp->mcg_num)) {
+ rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/* called when the last reference to the qp is dropped */
+static void rxe_qp_do_cleanup(struct work_struct *work)
+{
+ struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ qp->valid = 0;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ qp->qp_timeout_jiffies = 0;
+
+ /* In the function timer_setup, .function is initialized. If .function
+ * is NULL, it indicates the function timer_setup is not called, the
+ * timer is not initialized. Or else, the timer is initialized.
+ */
+ if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
+ qp->rnr_nak_timer.function) {
+ timer_delete_sync(&qp->retrans_timer);
+ timer_delete_sync(&qp->rnr_nak_timer);
+ }
+
+ if (qp->recv_task.func)
+ rxe_cleanup_task(&qp->recv_task);
+
+ if (qp->send_task.func)
+ rxe_cleanup_task(&qp->send_task);
+
+ /* flush out any receive wr's or pending requests */
+ rxe_sender(qp);
+ rxe_receiver(qp);
+
+ if (qp->sq.queue)
+ rxe_queue_cleanup(qp->sq.queue);
+
+ if (qp->srq)
+ rxe_put(qp->srq);
+
+ if (qp->rq.queue)
+ rxe_queue_cleanup(qp->rq.queue);
+
+ if (qp->scq) {
+ atomic_dec(&qp->scq->num_wq);
+ rxe_put(qp->scq);
+ }
+
+ if (qp->rcq) {
+ atomic_dec(&qp->rcq->num_wq);
+ rxe_put(qp->rcq);
+ }
+
+ if (qp->pd)
+ rxe_put(qp->pd);
+
+ if (qp->resp.mr)
+ rxe_put(qp->resp.mr);
+
+ free_rd_atomic_resources(qp);
+
+ if (qp->sk) {
+ if (qp_type(qp) == IB_QPT_RC)
+ sk_dst_reset(qp->sk->sk);
+
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
+}
+
+/* called when the last reference to the qp is dropped */
+void rxe_qp_cleanup(struct rxe_pool_elem *elem)
+{
+ struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
+
+ execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
new file mode 100644
index 000000000000..9611ee191a46
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
+ struct ib_udata *udata, struct rxe_queue_buf *buf,
+ size_t buf_size, struct rxe_mmap_info **ip_p)
+{
+ int err;
+ struct rxe_mmap_info *ip = NULL;
+
+ if (outbuf) {
+ ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
+ if (IS_ERR(ip)) {
+ err = PTR_ERR(ip);
+ goto err1;
+ }
+
+ if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
+ err = -EFAULT;
+ goto err2;
+ }
+
+ spin_lock_bh(&rxe->pending_lock);
+ list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
+ spin_unlock_bh(&rxe->pending_lock);
+ }
+
+ *ip_p = ip;
+
+ return 0;
+
+err2:
+ kfree(ip);
+err1:
+ return err;
+}
+
+inline void rxe_queue_reset(struct rxe_queue *q)
+{
+ /* queue is comprised from header and the memory
+ * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
+ * reset only the queue itself and not the management header
+ */
+ memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
+}
+
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
+ unsigned int elem_size, enum queue_type type)
+{
+ struct rxe_queue *q;
+ size_t buf_size;
+ unsigned int num_slots;
+
+ /* num_elem == 0 is allowed, but uninteresting */
+ if (*num_elem < 0)
+ return NULL;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return NULL;
+
+ q->rxe = rxe;
+ q->type = type;
+
+ /* used in resize, only need to copy used part of queue */
+ q->elem_size = elem_size;
+
+ /* pad element up to at least a cacheline and always a power of 2 */
+ if (elem_size < cache_line_size())
+ elem_size = cache_line_size();
+ elem_size = roundup_pow_of_two(elem_size);
+
+ q->log2_elem_size = order_base_2(elem_size);
+
+ num_slots = *num_elem + 1;
+ num_slots = roundup_pow_of_two(num_slots);
+ q->index_mask = num_slots - 1;
+
+ buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
+
+ q->buf = vmalloc_user(buf_size);
+ if (!q->buf)
+ goto err2;
+
+ q->buf->log2_elem_size = q->log2_elem_size;
+ q->buf->index_mask = q->index_mask;
+
+ q->buf_size = buf_size;
+
+ *num_elem = num_slots - 1;
+ return q;
+
+err2:
+ kfree(q);
+ return NULL;
+}
+
+/* copies elements from original q to new q and then swaps the contents of the
+ * two q headers. This is so that if anyone is holding a pointer to q it will
+ * still work
+ */
+static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
+ unsigned int num_elem)
+{
+ enum queue_type type = q->type;
+ u32 new_prod;
+ u32 prod;
+ u32 cons;
+
+ if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
+ return -EINVAL;
+
+ new_prod = queue_get_producer(new_q, type);
+ prod = queue_get_producer(q, type);
+ cons = queue_get_consumer(q, type);
+
+ while ((prod - cons) & q->index_mask) {
+ memcpy(queue_addr_from_index(new_q, new_prod),
+ queue_addr_from_index(q, cons), new_q->elem_size);
+ new_prod = queue_next_index(new_q, new_prod);
+ cons = queue_next_index(q, cons);
+ }
+
+ new_q->buf->producer_index = new_prod;
+ q->buf->consumer_index = cons;
+
+ /* update private index copies */
+ if (type == QUEUE_TYPE_TO_CLIENT)
+ new_q->index = new_q->buf->producer_index;
+ else
+ q->index = q->buf->consumer_index;
+
+ /* exchange rxe_queue headers */
+ swap(*q, *new_q);
+
+ return 0;
+}
+
+int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
+ unsigned int elem_size, struct ib_udata *udata,
+ struct mminfo __user *outbuf, spinlock_t *producer_lock,
+ spinlock_t *consumer_lock)
+{
+ struct rxe_queue *new_q;
+ unsigned int num_elem = *num_elem_p;
+ int err;
+ unsigned long producer_flags;
+ unsigned long consumer_flags;
+
+ new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
+ if (!new_q)
+ return -ENOMEM;
+
+ err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
+ new_q->buf_size, &new_q->ip);
+ if (err) {
+ vfree(new_q->buf);
+ kfree(new_q);
+ goto err1;
+ }
+
+ spin_lock_irqsave(consumer_lock, consumer_flags);
+
+ if (producer_lock) {
+ spin_lock_irqsave(producer_lock, producer_flags);
+ err = resize_finish(q, new_q, num_elem);
+ spin_unlock_irqrestore(producer_lock, producer_flags);
+ } else {
+ err = resize_finish(q, new_q, num_elem);
+ }
+
+ spin_unlock_irqrestore(consumer_lock, consumer_flags);
+
+ rxe_queue_cleanup(new_q); /* new/old dep on err */
+ if (err)
+ goto err1;
+
+ *num_elem_p = num_elem;
+ return 0;
+
+err1:
+ return err;
+}
+
+void rxe_queue_cleanup(struct rxe_queue *q)
+{
+ if (q->ip)
+ kref_put(&q->ip->ref, rxe_mmap_release);
+ else
+ vfree(q->buf);
+
+ kfree(q);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
new file mode 100644
index 000000000000..c711cb98b949
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_QUEUE_H
+#define RXE_QUEUE_H
+
+/* Implements a simple circular buffer that is shared between user
+ * and the driver and can be resized. The requested element size is
+ * rounded up to a power of 2 and the number of elements in the buffer
+ * is also rounded up to a power of 2. Since the queue is empty when
+ * the producer and consumer indices match the maximum capacity of the
+ * queue is one less than the number of element slots.
+ *
+ * Notes:
+ * - The driver indices are always masked off to q->index_mask
+ * before storing so do not need to be checked on reads.
+ * - The user whether user space or kernel is generally
+ * not trusted so its parameters are masked to make sure
+ * they do not access the queue out of bounds on reads.
+ * - The driver indices for queues must not be written
+ * by user so a local copy is used and a shared copy is
+ * stored when the local copy is changed.
+ * - By passing the type in the parameter list separate from q
+ * the compiler can eliminate the switch statement when the
+ * actual queue type is known when the function is called at
+ * compile time.
+ * - These queues are lock free. The user and driver must protect
+ * changes to their end of the queues with locks if more than one
+ * CPU can be accessing it at the same time.
+ */
+
+/**
+ * enum queue_type - type of queue
+ * @QUEUE_TYPE_TO_CLIENT: Queue is written by rxe driver and
+ * read by client which may be a user space
+ * application or a kernel ulp.
+ * Used by rxe internals only.
+ * @QUEUE_TYPE_FROM_CLIENT: Queue is written by client and
+ * read by rxe driver.
+ * Used by rxe internals only.
+ * @QUEUE_TYPE_FROM_ULP: Queue is written by kernel ulp and
+ * read by rxe driver.
+ * Used by kernel verbs APIs only on
+ * behalf of ulps.
+ * @QUEUE_TYPE_TO_ULP: Queue is written by rxe driver and
+ * read by kernel ulp.
+ * Used by kernel verbs APIs only on
+ * behalf of ulps.
+ */
+enum queue_type {
+ QUEUE_TYPE_TO_CLIENT,
+ QUEUE_TYPE_FROM_CLIENT,
+ QUEUE_TYPE_FROM_ULP,
+ QUEUE_TYPE_TO_ULP,
+};
+
+struct rxe_queue_buf;
+
+struct rxe_queue {
+ struct rxe_dev *rxe;
+ struct rxe_queue_buf *buf;
+ struct rxe_mmap_info *ip;
+ size_t buf_size;
+ size_t elem_size;
+ unsigned int log2_elem_size;
+ u32 index_mask;
+ enum queue_type type;
+ /* private copy of index for shared queues between
+ * driver and clients. Driver reads and writes
+ * this copy and then replicates to rxe_queue_buf
+ * for read access by clients.
+ */
+ u32 index;
+};
+
+int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
+ struct ib_udata *udata, struct rxe_queue_buf *buf,
+ size_t buf_size, struct rxe_mmap_info **ip_p);
+
+void rxe_queue_reset(struct rxe_queue *q);
+
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
+ unsigned int elem_size, enum queue_type type);
+
+int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
+ unsigned int elem_size, struct ib_udata *udata,
+ struct mminfo __user *outbuf,
+ spinlock_t *producer_lock, spinlock_t *consumer_lock);
+
+void rxe_queue_cleanup(struct rxe_queue *queue);
+
+static inline u32 queue_next_index(struct rxe_queue *q, int index)
+{
+ return (index + 1) & q->index_mask;
+}
+
+static inline u32 queue_get_producer(const struct rxe_queue *q,
+ enum queue_type type)
+{
+ u32 prod;
+
+ switch (type) {
+ case QUEUE_TYPE_FROM_CLIENT:
+ /* used by rxe, client owns the index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ break;
+ case QUEUE_TYPE_TO_CLIENT:
+ /* used by rxe which owns the index */
+ prod = q->index;
+ break;
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp which owns the index */
+ prod = q->buf->producer_index;
+ break;
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp, rxe owns the index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ break;
+ }
+
+ return prod;
+}
+
+static inline u32 queue_get_consumer(const struct rxe_queue *q,
+ enum queue_type type)
+{
+ u32 cons;
+
+ switch (type) {
+ case QUEUE_TYPE_FROM_CLIENT:
+ /* used by rxe which owns the index */
+ cons = q->index;
+ break;
+ case QUEUE_TYPE_TO_CLIENT:
+ /* used by rxe, client owns the index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ break;
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp, rxe owns the index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ break;
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp which owns the index */
+ cons = q->buf->consumer_index;
+ break;
+ }
+
+ return cons;
+}
+
+static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
+{
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
+
+ return ((prod - cons) & q->index_mask) == 0;
+}
+
+static inline int queue_full(struct rxe_queue *q, enum queue_type type)
+{
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
+
+ return ((prod + 1 - cons) & q->index_mask) == 0;
+}
+
+static inline u32 queue_count(const struct rxe_queue *q,
+ enum queue_type type)
+{
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
+
+ return (prod - cons) & q->index_mask;
+}
+
+static inline void queue_advance_producer(struct rxe_queue *q,
+ enum queue_type type)
+{
+ u32 prod;
+
+ switch (type) {
+ case QUEUE_TYPE_FROM_CLIENT:
+ /* used by rxe, client owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
+ break;
+ case QUEUE_TYPE_TO_CLIENT:
+ /* used by rxe which owns the index */
+ prod = q->index;
+ prod = (prod + 1) & q->index_mask;
+ q->index = prod;
+ /* release so client can read it safely */
+ smp_store_release(&q->buf->producer_index, prod);
+ break;
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp which owns the index */
+ prod = q->buf->producer_index;
+ prod = (prod + 1) & q->index_mask;
+ /* release so rxe can read it safely */
+ smp_store_release(&q->buf->producer_index, prod);
+ break;
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp, rxe owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
+ break;
+ }
+}
+
+static inline void queue_advance_consumer(struct rxe_queue *q,
+ enum queue_type type)
+{
+ u32 cons;
+
+ switch (type) {
+ case QUEUE_TYPE_FROM_CLIENT:
+ /* used by rxe which owns the index */
+ cons = (q->index + 1) & q->index_mask;
+ q->index = cons;
+ /* release so client can read it safely */
+ smp_store_release(&q->buf->consumer_index, cons);
+ break;
+ case QUEUE_TYPE_TO_CLIENT:
+ /* used by rxe, client owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
+ break;
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp, rxe owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
+ break;
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp which owns the index */
+ cons = q->buf->consumer_index;
+ cons = (cons + 1) & q->index_mask;
+ /* release so rxe can read it safely */
+ smp_store_release(&q->buf->consumer_index, cons);
+ break;
+ }
+}
+
+static inline void *queue_producer_addr(struct rxe_queue *q,
+ enum queue_type type)
+{
+ u32 prod = queue_get_producer(q, type);
+
+ return q->buf->data + (prod << q->log2_elem_size);
+}
+
+static inline void *queue_consumer_addr(struct rxe_queue *q,
+ enum queue_type type)
+{
+ u32 cons = queue_get_consumer(q, type);
+
+ return q->buf->data + (cons << q->log2_elem_size);
+}
+
+static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
+{
+ return q->buf->data + ((index & q->index_mask)
+ << q->log2_elem_size);
+}
+
+static inline u32 queue_index_from_addr(const struct rxe_queue *q,
+ const void *addr)
+{
+ return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
+ & q->index_mask;
+}
+
+static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
+{
+ return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
+}
+
+#endif /* RXE_QUEUE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
new file mode 100644
index 000000000000..5861e4244049
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/* check that QP matches packet opcode type and is in a valid state */
+static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+{
+ unsigned int pkt_type;
+ unsigned long flags;
+
+ if (unlikely(!qp->valid))
+ return -EINVAL;
+
+ pkt_type = pkt->opcode & 0xe0;
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (unlikely(pkt_type != IB_OPCODE_RC))
+ return -EINVAL;
+ break;
+ case IB_QPT_UC:
+ if (unlikely(pkt_type != IB_OPCODE_UC))
+ return -EINVAL;
+ break;
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ if (unlikely(pkt_type != IB_OPCODE_UD))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (pkt->mask & RXE_REQ_MASK) {
+ if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ return -EINVAL;
+ }
+ } else {
+ if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ return -EINVAL;
+ }
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ return 0;
+}
+
+static void set_bad_pkey_cntr(struct rxe_port *port)
+{
+ spin_lock_bh(&port->port_lock);
+ port->attr.bad_pkey_cntr = min((u32)0xffff,
+ port->attr.bad_pkey_cntr + 1);
+ spin_unlock_bh(&port->port_lock);
+}
+
+static void set_qkey_viol_cntr(struct rxe_port *port)
+{
+ spin_lock_bh(&port->port_lock);
+ port->attr.qkey_viol_cntr = min((u32)0xffff,
+ port->attr.qkey_viol_cntr + 1);
+ spin_unlock_bh(&port->port_lock);
+}
+
+static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ u32 qpn, struct rxe_qp *qp)
+{
+ struct rxe_port *port = &rxe->port;
+ u16 pkey = bth_pkey(pkt);
+
+ pkt->pkey_index = 0;
+
+ if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
+ set_bad_pkey_cntr(port);
+ return -EINVAL;
+ }
+
+ if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
+ u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
+
+ if (unlikely(deth_qkey(pkt) != qkey)) {
+ set_qkey_viol_cntr(port);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
+ return 0;
+
+ if (unlikely(pkt->port_num != qp->attr.port_num))
+ return -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct in_addr *saddr =
+ &qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
+ struct in_addr *daddr =
+ &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
+
+ if ((ip_hdr(skb)->daddr != saddr->s_addr) ||
+ (ip_hdr(skb)->saddr != daddr->s_addr))
+ return -EINVAL;
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct in6_addr *saddr =
+ &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
+ struct in6_addr *daddr =
+ &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
+
+ if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) ||
+ memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hdr_check(struct rxe_pkt_info *pkt)
+{
+ struct rxe_dev *rxe = pkt->rxe;
+ struct rxe_port *port = &rxe->port;
+ struct rxe_qp *qp = NULL;
+ u32 qpn = bth_qpn(pkt);
+ int index;
+ int err;
+
+ if (unlikely(bth_tver(pkt) != BTH_TVER))
+ goto err1;
+
+ if (unlikely(qpn == 0))
+ goto err1;
+
+ if (qpn != IB_MULTICAST_QPN) {
+ index = (qpn == 1) ? port->qp_gsi_index : qpn;
+
+ qp = rxe_pool_get_index(&rxe->qp_pool, index);
+ if (unlikely(!qp))
+ goto err1;
+
+ err = check_type_state(rxe, pkt, qp);
+ if (unlikely(err))
+ goto err2;
+
+ err = check_addr(rxe, pkt, qp);
+ if (unlikely(err))
+ goto err2;
+
+ err = check_keys(rxe, pkt, qpn, qp);
+ if (unlikely(err))
+ goto err2;
+ } else {
+ if (unlikely((pkt->mask & RXE_GRH_MASK) == 0))
+ goto err1;
+ }
+
+ pkt->qp = qp;
+ return 0;
+
+err2:
+ rxe_put(qp);
+err1:
+ return -EINVAL;
+}
+
+static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+{
+ if (pkt->mask & RXE_REQ_MASK)
+ rxe_resp_queue_pkt(pkt->qp, skb);
+ else
+ rxe_comp_queue_pkt(pkt->qp, skb);
+}
+
+static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+{
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ struct rxe_mcg *mcg;
+ struct rxe_mca *mca;
+ struct rxe_qp *qp;
+ union ib_gid dgid;
+ int err;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
+ (struct in6_addr *)&dgid);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
+
+ /* lookup mcast group corresponding to mgid, takes a ref */
+ mcg = rxe_lookup_mcg(rxe, &dgid);
+ if (!mcg)
+ goto drop; /* mcast group not registered */
+
+ spin_lock_bh(&rxe->mcg_lock);
+
+ /* this is unreliable datagram service so we let
+ * failures to deliver a multicast packet to a
+ * single QP happen and just move on and try
+ * the rest of them on the list
+ */
+ list_for_each_entry(mca, &mcg->qp_list, qp_list) {
+ qp = mca->qp;
+
+ /* validate qp for incoming packet */
+ err = check_type_state(rxe, pkt, qp);
+ if (err)
+ continue;
+
+ err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
+ if (err)
+ continue;
+
+ /* for all but the last QP create a new clone of the
+ * skb and pass to the QP. Pass the original skb to
+ * the last QP in the list.
+ */
+ if (mca->qp_list.next != &mcg->qp_list) {
+ struct sk_buff *cskb;
+ struct rxe_pkt_info *cpkt;
+
+ cskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!cskb))
+ continue;
+
+ if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
+ kfree_skb(cskb);
+ break;
+ }
+
+ cpkt = SKB_TO_PKT(cskb);
+ cpkt->qp = qp;
+ rxe_get(qp);
+ rxe_rcv_pkt(cpkt, cskb);
+ } else {
+ pkt->qp = qp;
+ rxe_get(qp);
+ rxe_rcv_pkt(pkt, skb);
+ skb = NULL; /* mark consumed */
+ }
+ }
+
+ spin_unlock_bh(&rxe->mcg_lock);
+
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ if (likely(!skb))
+ return;
+
+ /* This only occurs if one of the checks fails on the last
+ * QP in the list above
+ */
+
+drop:
+ kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
+}
+
+/**
+ * rxe_chk_dgid - validate destination IP address
+ * @rxe: rxe device that received packet
+ * @skb: the received packet buffer
+ *
+ * Accept any loopback packets
+ * Extract IP address from packet and
+ * Accept if multicast packet
+ * Accept if matches an SGID table entry
+ */
+static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
+{
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ const struct ib_gid_attr *gid_attr;
+ union ib_gid dgid;
+ union ib_gid *pdgid;
+
+ if (pkt->mask & RXE_LOOPBACK_MASK)
+ return 0;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
+ (struct in6_addr *)&dgid);
+ pdgid = &dgid;
+ } else {
+ pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
+ }
+
+ if (rdma_is_multicast_addr((struct in6_addr *)pdgid))
+ return 0;
+
+ gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
+ IB_GID_TYPE_ROCE_UDP_ENCAP,
+ 1, skb->dev);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+
+ rdma_put_gid_attr(gid_attr);
+ return 0;
+}
+
+/* rxe_rcv is called from the interface driver */
+void rxe_rcv(struct sk_buff *skb)
+{
+ int err;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ struct rxe_dev *rxe = pkt->rxe;
+
+ if (unlikely(skb->len < RXE_BTH_BYTES))
+ goto drop;
+
+ if (rxe_chk_dgid(rxe, skb) < 0)
+ goto drop;
+
+ pkt->opcode = bth_opcode(pkt);
+ pkt->psn = bth_psn(pkt);
+ pkt->qp = NULL;
+ pkt->mask |= rxe_opcode[pkt->opcode].mask;
+
+ if (unlikely(skb->len < header_size(pkt)))
+ goto drop;
+
+ err = hdr_check(pkt);
+ if (unlikely(err))
+ goto drop;
+
+ err = rxe_icrc_check(skb, pkt);
+ if (unlikely(err))
+ goto drop;
+
+ rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
+
+ if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
+ rxe_rcv_mcast_pkt(rxe, skb);
+ else
+ rxe_rcv_pkt(pkt, skb);
+
+ return;
+
+drop:
+ if (pkt->qp)
+ rxe_put(pkt->qp);
+
+ kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
new file mode 100644
index 000000000000..373b03f223be
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ u32 opcode);
+
+static inline void retry_first_write_send(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe, int npsn)
+{
+ int i;
+
+ for (i = 0; i < npsn; i++) {
+ int to_send = (wqe->dma.resid > qp->mtu) ?
+ qp->mtu : wqe->dma.resid;
+
+ qp->req.opcode = next_opcode(qp, wqe,
+ wqe->wr.opcode);
+
+ if (wqe->wr.send_flags & IB_SEND_INLINE) {
+ wqe->dma.resid -= to_send;
+ wqe->dma.sge_offset += to_send;
+ } else {
+ advance_dma_data(&wqe->dma, to_send);
+ }
+ }
+}
+
+static void req_retry(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe;
+ unsigned int wqe_index;
+ unsigned int mask;
+ int npsn;
+ int first = 1;
+ struct rxe_queue *q = qp->sq.queue;
+ unsigned int cons;
+ unsigned int prod;
+
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
+
+ qp->req.wqe_index = cons;
+ qp->req.psn = qp->comp.psn;
+ qp->req.opcode = -1;
+
+ for (wqe_index = cons; wqe_index != prod;
+ wqe_index = queue_next_index(q, wqe_index)) {
+ wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
+ mask = wr_opcode_mask(wqe->wr.opcode, qp);
+
+ if (wqe->state == wqe_state_posted)
+ break;
+
+ if (wqe->state == wqe_state_done)
+ continue;
+
+ wqe->iova = (mask & WR_ATOMIC_MASK) ?
+ wqe->wr.wr.atomic.remote_addr :
+ (mask & WR_READ_OR_WRITE_MASK) ?
+ wqe->wr.wr.rdma.remote_addr :
+ 0;
+
+ if (!first || (mask & WR_READ_MASK) == 0) {
+ wqe->dma.resid = wqe->dma.length;
+ wqe->dma.cur_sge = 0;
+ wqe->dma.sge_offset = 0;
+ }
+
+ if (first) {
+ first = 0;
+
+ if (mask & WR_WRITE_OR_SEND_MASK) {
+ npsn = (qp->comp.psn - wqe->first_psn) &
+ BTH_PSN_MASK;
+ retry_first_write_send(qp, wqe, npsn);
+ }
+
+ if (mask & WR_READ_MASK) {
+ npsn = (wqe->dma.length - wqe->dma.resid) /
+ qp->mtu;
+ wqe->iova += npsn * qp->mtu;
+ }
+ }
+
+ wqe->state = wqe_state_posted;
+ }
+}
+
+void rnr_nak_timer(struct timer_list *t)
+{
+ struct rxe_qp *qp = timer_container_of(qp, t, rnr_nak_timer);
+ unsigned long flags;
+
+ rxe_dbg_qp(qp, "nak timer fired\n");
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (qp->valid) {
+ /* request a send queue retry */
+ qp->req.need_retry = 1;
+ qp->req.wait_for_rnr_timer = 0;
+ rxe_sched_task(&qp->send_task);
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+}
+
+static void req_check_sq_drain_done(struct rxe_qp *qp)
+{
+ struct rxe_queue *q;
+ unsigned int index;
+ unsigned int cons;
+ struct rxe_send_wqe *wqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (qp_state(qp) == IB_QPS_SQD) {
+ q = qp->sq.queue;
+ index = qp->req.wqe_index;
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ wqe = queue_addr_from_index(q, cons);
+
+ /* check to see if we are drained;
+ * state_lock used by requester and completer
+ */
+ do {
+ if (!qp->attr.sq_draining)
+ /* comp just finished */
+ break;
+
+ if (wqe && ((index != cons) ||
+ (wqe->state != wqe_state_posted)))
+ /* comp not done yet */
+ break;
+
+ qp->attr.sq_draining = 0;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_SQ_DRAINED;
+ qp->ibqp.event_handler(&ev,
+ qp->ibqp.qp_context);
+ }
+ return;
+ } while (0);
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+}
+
+static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
+{
+ struct rxe_queue *q = qp->sq.queue;
+ unsigned int index = qp->req.wqe_index;
+ unsigned int prod;
+
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
+ if (index == prod)
+ return NULL;
+ else
+ return queue_addr_from_index(q, index);
+}
+
+static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe;
+ unsigned long flags;
+
+ req_check_sq_drain_done(qp);
+
+ wqe = __req_next_wqe(qp);
+ if (wqe == NULL)
+ return NULL;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
+ (wqe->state != wqe_state_processing))) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ return NULL;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
+ return wqe;
+}
+
+/**
+ * rxe_wqe_is_fenced - check if next wqe is fenced
+ * @qp: the queue pair
+ * @wqe: the next wqe
+ *
+ * Returns: 1 if wqe needs to wait
+ * 0 if wqe is ready to go
+ */
+static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ /* Local invalidate fence (LIF) see IBA 10.6.5.1
+ * Requires ALL previous operations on the send queue
+ * are complete. Make mandatory for the rxe driver.
+ */
+ if (wqe->wr.opcode == IB_WR_LOCAL_INV)
+ return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
+
+ /* Fence see IBA 10.8.3.3
+ * Requires that all previous read and atomic operations
+ * are complete.
+ */
+ return (wqe->wr.send_flags & IB_SEND_FENCE) &&
+ atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
+}
+
+static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_LAST :
+ IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_ONLY :
+ IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+ case IB_WR_SEND:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_SEND_LAST :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_SEND_ONLY :
+ IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_FLUSH:
+ return IB_OPCODE_RC_FLUSH;
+
+ case IB_WR_RDMA_READ:
+ return IB_OPCODE_RC_RDMA_READ_REQUEST;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_OPCODE_RC_COMPARE_SWAP;
+
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_OPCODE_RC_FETCH_ADD;
+
+ case IB_WR_SEND_WITH_INV:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
+ IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_ATOMIC_WRITE:
+ return IB_OPCODE_RC_ATOMIC_WRITE;
+
+ case IB_WR_REG_MR:
+ case IB_WR_LOCAL_INV:
+ return opcode;
+ }
+
+ return -EINVAL;
+}
+
+static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_LAST :
+ IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_ONLY :
+ IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+ case IB_WR_SEND:
+ if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_SEND_LAST :
+ IB_OPCODE_UC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_SEND_ONLY :
+ IB_OPCODE_UC_SEND_FIRST;
+
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_UC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_UC_SEND_FIRST;
+ }
+
+ return -EINVAL;
+}
+
+static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ u32 opcode)
+{
+ int fits = (wqe->dma.resid <= qp->mtu);
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ return next_opcode_rc(qp, opcode, fits);
+
+ case IB_QPT_UC:
+ return next_opcode_uc(qp, opcode, fits);
+
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ switch (opcode) {
+ case IB_WR_SEND:
+ return IB_OPCODE_UD_SEND_ONLY;
+
+ case IB_WR_SEND_WITH_IMM:
+ return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ int depth;
+
+ if (wqe->has_rd_atomic)
+ return 0;
+
+ qp->req.need_rd_atomic = 1;
+ depth = atomic_dec_return(&qp->req.rd_atomic);
+
+ if (depth >= 0) {
+ qp->req.need_rd_atomic = 0;
+ wqe->has_rd_atomic = 1;
+ return 0;
+ }
+
+ atomic_inc(&qp->req.rd_atomic);
+ return -EAGAIN;
+}
+
+static inline int get_mtu(struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
+ return qp->mtu;
+
+ return rxe->port.mtu_cap;
+}
+
+static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ struct rxe_av *av,
+ struct rxe_send_wqe *wqe,
+ int opcode, u32 payload,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct sk_buff *skb;
+ struct rxe_send_wr *ibwr = &wqe->wr;
+ int pad = (-payload) & 0x3;
+ int paylen;
+ int solicited;
+ u32 qp_num;
+ int ack_req = 0;
+
+ /* length from start of bth to end of icrc */
+ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+ pkt->paylen = paylen;
+
+ /* init skb */
+ skb = rxe_init_packet(rxe, av, paylen, pkt);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* init bth */
+ solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
+ (pkt->mask & RXE_END_MASK) &&
+ ((pkt->mask & (RXE_SEND_MASK)) ||
+ (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
+ (RXE_WRITE_MASK | RXE_IMMDT_MASK));
+
+ qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
+ qp->attr.dest_qp_num;
+
+ if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
+ ack_req = ((pkt->mask & RXE_END_MASK) ||
+ (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+ if (ack_req)
+ qp->req.noack_pkts = 0;
+
+ bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
+ ack_req, pkt->psn);
+
+ /* init optional headers */
+ if (pkt->mask & RXE_RETH_MASK) {
+ if (pkt->mask & RXE_FETH_MASK)
+ reth_set_rkey(pkt, ibwr->wr.flush.rkey);
+ else
+ reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
+ reth_set_va(pkt, wqe->iova);
+ reth_set_len(pkt, wqe->dma.resid);
+ }
+
+ /* Fill Flush Extension Transport Header */
+ if (pkt->mask & RXE_FETH_MASK)
+ feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
+
+ if (pkt->mask & RXE_IMMDT_MASK)
+ immdt_set_imm(pkt, ibwr->ex.imm_data);
+
+ if (pkt->mask & RXE_IETH_MASK)
+ ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
+
+ if (pkt->mask & RXE_ATMETH_MASK) {
+ atmeth_set_va(pkt, wqe->iova);
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
+ atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
+ } else {
+ atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
+ }
+ atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
+ }
+
+ if (pkt->mask & RXE_DETH_MASK) {
+ if (qp->ibqp.qp_num == 1)
+ deth_set_qkey(pkt, GSI_QKEY);
+ else
+ deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
+ deth_set_sqp(pkt, qp->ibqp.qp_num);
+ }
+
+ return skb;
+}
+
+static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
+ struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 payload)
+{
+ int err;
+
+ err = rxe_prepare(av, pkt, skb);
+ if (err)
+ return err;
+
+ if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
+ if (wqe->wr.send_flags & IB_SEND_INLINE) {
+ u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
+
+ memcpy(payload_addr(pkt), tmp, payload);
+
+ wqe->dma.resid -= payload;
+ wqe->dma.sge_offset += payload;
+ } else {
+ err = copy_data(qp->pd, 0, &wqe->dma,
+ payload_addr(pkt), payload,
+ RXE_FROM_MR_OBJ);
+ if (err)
+ return err;
+ }
+ if (bth_pad(pkt)) {
+ u8 *pad = payload_addr(pkt) + payload;
+
+ memset(pad, 0, bth_pad(pkt));
+ }
+ } else if (pkt->mask & RXE_FLUSH_MASK) {
+ /* oA19-2: shall have no payload. */
+ wqe->dma.resid = 0;
+ }
+
+ if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
+ memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
+ wqe->dma.resid -= payload;
+ }
+
+ return 0;
+}
+
+static void update_wqe_state(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt)
+{
+ if (pkt->mask & RXE_END_MASK) {
+ if (qp_type(qp) == IB_QPT_RC)
+ wqe->state = wqe_state_pending;
+ else
+ wqe->state = wqe_state_done;
+ } else {
+ wqe->state = wqe_state_processing;
+ }
+}
+
+static void update_wqe_psn(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt,
+ u32 payload)
+{
+ /* number of packets left to send including current one */
+ int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
+
+ /* handle zero length packet case */
+ if (num_pkt == 0)
+ num_pkt = 1;
+
+ if (pkt->mask & RXE_START_MASK) {
+ wqe->first_psn = qp->req.psn;
+ wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
+ }
+
+ if (pkt->mask & RXE_READ_MASK)
+ qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
+ else
+ qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+{
+ qp->req.opcode = pkt->opcode;
+
+ if (pkt->mask & RXE_END_MASK)
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
+ qp->req.wqe_index);
+
+ qp->need_req_skb = 0;
+
+ if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
+ mod_timer(&qp->retrans_timer,
+ jiffies + qp->qp_timeout_jiffies);
+}
+
+static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ u8 opcode = wqe->wr.opcode;
+ u32 rkey;
+ int ret;
+
+ switch (opcode) {
+ case IB_WR_LOCAL_INV:
+ rkey = wqe->wr.ex.invalidate_rkey;
+ if (rkey_is_mw(rkey))
+ ret = rxe_invalidate_mw(qp, rkey);
+ else
+ ret = rxe_invalidate_mr(qp, rkey);
+
+ if (unlikely(ret)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ return ret;
+ }
+ break;
+ case IB_WR_REG_MR:
+ ret = rxe_reg_fast_mr(qp, wqe);
+ if (unlikely(ret)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ return ret;
+ }
+ break;
+ case IB_WR_BIND_MW:
+ ret = rxe_bind_mw(qp, wqe);
+ if (unlikely(ret)) {
+ wqe->status = IB_WC_MW_BIND_ERR;
+ return ret;
+ }
+ break;
+ default:
+ rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ return -EINVAL;
+ }
+
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
+
+ return 0;
+}
+
+int rxe_requester(struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_pkt_info pkt;
+ struct sk_buff *skb;
+ struct rxe_send_wqe *wqe;
+ enum rxe_hdr_mask mask;
+ u32 payload;
+ int mtu;
+ int opcode;
+ int err;
+ int ret;
+ struct rxe_queue *q = qp->sq.queue;
+ struct rxe_ah *ah;
+ struct rxe_av *av;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (unlikely(!qp->valid)) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ goto exit;
+ }
+
+ if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
+ wqe = __req_next_wqe(qp);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ if (wqe) {
+ wqe->status = IB_WC_WR_FLUSH_ERR;
+ goto err;
+ } else {
+ goto exit;
+ }
+ }
+
+ if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
+ qp->req.wqe_index = queue_get_consumer(q,
+ QUEUE_TYPE_FROM_CLIENT);
+ qp->req.opcode = -1;
+ qp->req.need_rd_atomic = 0;
+ qp->req.wait_psn = 0;
+ qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ goto exit;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ /* we come here if the retransmit timer has fired
+ * or if the rnr timer has fired. If the retransmit
+ * timer fires while we are processing an RNR NAK wait
+ * until the rnr timer has fired before starting the
+ * retry flow
+ */
+ if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
+ req_retry(qp);
+ qp->req.need_retry = 0;
+ }
+
+ wqe = req_next_wqe(qp);
+ if (unlikely(!wqe))
+ goto exit;
+
+ if (rxe_wqe_is_fenced(qp, wqe)) {
+ qp->req.wait_fence = 1;
+ goto exit;
+ }
+
+ if (wqe->mask & WR_LOCAL_OP_MASK) {
+ err = rxe_do_local_ops(qp, wqe);
+ if (unlikely(err))
+ goto err;
+ else
+ goto done;
+ }
+
+ if (unlikely(qp_type(qp) == IB_QPT_RC &&
+ psn_compare(qp->req.psn, (qp->comp.psn +
+ RXE_MAX_UNACKED_PSNS)) > 0)) {
+ qp->req.wait_psn = 1;
+ goto exit;
+ }
+
+ /* Limit the number of inflight SKBs per QP */
+ if (unlikely(atomic_read(&qp->skb_out) >
+ RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
+ qp->need_req_skb = 1;
+ goto exit;
+ }
+
+ opcode = next_opcode(qp, wqe, wqe->wr.opcode);
+ if (unlikely(opcode < 0)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+ }
+
+ mask = rxe_opcode[opcode].mask;
+ if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
+ RXE_ATOMIC_WRITE_MASK))) {
+ if (check_init_depth(qp, wqe))
+ goto exit;
+ }
+
+ mtu = get_mtu(qp);
+ payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
+ wqe->dma.resid : 0;
+ if (payload > mtu) {
+ if (qp_type(qp) == IB_QPT_UD) {
+ /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
+ * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
+ * shall not emit any packets for this message. Further, the CI shall not
+ * generate an error due to this condition.
+ */
+
+ /* fake a successful UD send */
+ wqe->first_psn = qp->req.psn;
+ wqe->last_psn = qp->req.psn;
+ qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+ qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
+ qp->req.wqe_index);
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ goto done;
+ }
+ payload = mtu;
+ }
+
+ pkt.rxe = rxe;
+ pkt.opcode = opcode;
+ pkt.qp = qp;
+ pkt.psn = qp->req.psn;
+ pkt.mask = rxe_opcode[opcode].mask;
+ pkt.wqe = wqe;
+
+ av = rxe_get_av(&pkt, &ah);
+ if (unlikely(!av)) {
+ rxe_dbg_qp(qp, "Failed no address vector\n");
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+ }
+
+ skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
+ if (unlikely(!skb)) {
+ rxe_dbg_qp(qp, "Failed allocating skb\n");
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ if (ah)
+ rxe_put(ah);
+ goto err;
+ }
+
+ err = finish_packet(qp, av, wqe, &pkt, skb, payload);
+ if (unlikely(err)) {
+ rxe_dbg_qp(qp, "Error during finish packet\n");
+ if (err == -EFAULT)
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ else
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ kfree_skb(skb);
+ if (ah)
+ rxe_put(ah);
+ goto err;
+ }
+
+ if (ah)
+ rxe_put(ah);
+
+ err = rxe_xmit_packet(qp, &pkt, skb);
+ if (err) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+ }
+
+ update_wqe_state(qp, wqe, &pkt);
+ update_wqe_psn(qp, wqe, &pkt, payload);
+ update_state(qp, &pkt);
+
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the work item. A zero return
+ * will continue looping and return to rxe_requester
+ */
+done:
+ ret = 0;
+ goto out;
+err:
+ /* update wqe_index for each wqe completion */
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
+ wqe->state = wqe_state_error;
+ rxe_qp_error(qp);
+exit:
+ ret = -EAGAIN;
+out:
+ return ret;
+}
+
+int rxe_sender(struct rxe_qp *qp)
+{
+ int req_ret;
+ int comp_ret;
+
+ /* process the send queue */
+ req_ret = rxe_requester(qp);
+
+ /* process the response queue */
+ comp_ret = rxe_completer(qp);
+
+ /* exit the task loop if both requester and completer
+ * are ready
+ */
+ return (req_ret && comp_ret) ? -EAGAIN : 0;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
new file mode 100644
index 000000000000..711f73e0bbb1
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -0,0 +1,1709 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+static char *resp_state_name[] = {
+ [RESPST_NONE] = "NONE",
+ [RESPST_GET_REQ] = "GET_REQ",
+ [RESPST_CHK_PSN] = "CHK_PSN",
+ [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
+ [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
+ [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
+ [RESPST_CHK_LENGTH] = "CHK_LENGTH",
+ [RESPST_CHK_RKEY] = "CHK_RKEY",
+ [RESPST_EXECUTE] = "EXECUTE",
+ [RESPST_READ_REPLY] = "READ_REPLY",
+ [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY",
+ [RESPST_ATOMIC_WRITE_REPLY] = "ATOMIC_WRITE_REPLY",
+ [RESPST_PROCESS_FLUSH] = "PROCESS_FLUSH",
+ [RESPST_COMPLETE] = "COMPLETE",
+ [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
+ [RESPST_CLEANUP] = "CLEANUP",
+ [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
+ [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
+ [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
+ [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
+ [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
+ [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
+ [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
+ [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
+ [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
+ [RESPST_ERR_RNR] = "ERR_RNR",
+ [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
+ [RESPST_ERR_INVALIDATE_RKEY] = "ERR_INVALIDATE_RKEY_VIOLATION",
+ [RESPST_ERR_LENGTH] = "ERR_LENGTH",
+ [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
+ [RESPST_ERROR] = "ERROR",
+ [RESPST_DONE] = "DONE",
+ [RESPST_EXIT] = "EXIT",
+};
+
+/* rxe_recv calls here to add a request packet to the input queue */
+void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+{
+ skb_queue_tail(&qp->req_pkts, skb);
+ rxe_sched_task(&qp->recv_task);
+}
+
+static inline enum resp_states get_req(struct rxe_qp *qp,
+ struct rxe_pkt_info **pkt_p)
+{
+ struct sk_buff *skb;
+
+ skb = skb_peek(&qp->req_pkts);
+ if (!skb)
+ return RESPST_EXIT;
+
+ *pkt_p = SKB_TO_PKT(skb);
+
+ return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
+}
+
+static enum resp_states check_psn(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ int diff = psn_compare(pkt->psn, qp->resp.psn);
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (diff > 0) {
+ if (qp->resp.sent_psn_nak)
+ return RESPST_CLEANUP;
+
+ qp->resp.sent_psn_nak = 1;
+ rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
+ return RESPST_ERR_PSN_OUT_OF_SEQ;
+
+ } else if (diff < 0) {
+ rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
+ return RESPST_DUPLICATE_REQUEST;
+ }
+
+ if (qp->resp.sent_psn_nak)
+ qp->resp.sent_psn_nak = 0;
+
+ break;
+
+ case IB_QPT_UC:
+ if (qp->resp.drop_msg || diff != 0) {
+ if (pkt->mask & RXE_START_MASK) {
+ qp->resp.drop_msg = 0;
+ return RESPST_CHK_OP_SEQ;
+ }
+
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return RESPST_CHK_OP_SEQ;
+}
+
+static enum resp_states check_op_seq(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ switch (qp->resp.opcode) {
+ case IB_OPCODE_RC_SEND_FIRST:
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ case IB_OPCODE_RC_SEND_LAST:
+ case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_C;
+ }
+
+ case IB_OPCODE_RC_RDMA_WRITE_FIRST:
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_C;
+ }
+
+ default:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ case IB_OPCODE_RC_SEND_LAST:
+ case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_ERR_MISSING_OPCODE_FIRST;
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+ }
+ break;
+
+ case IB_QPT_UC:
+ switch (qp->resp.opcode) {
+ case IB_OPCODE_UC_SEND_FIRST:
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ case IB_OPCODE_UC_SEND_LAST:
+ case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
+ }
+
+ case IB_OPCODE_UC_RDMA_WRITE_FIRST:
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
+ }
+
+ default:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ case IB_OPCODE_UC_SEND_LAST:
+ case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+ }
+ break;
+
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+}
+
+static bool check_qp_attr_access(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ if (((pkt->mask & RXE_READ_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
+ ((pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
+ ((pkt->mask & RXE_ATOMIC_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ return false;
+
+ if (pkt->mask & RXE_FLUSH_MASK) {
+ u32 flush_type = feth_plt(pkt);
+
+ if ((flush_type & IB_FLUSH_GLOBAL &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_GLOBAL)) ||
+ (flush_type & IB_FLUSH_PERSISTENT &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT)))
+ return false;
+ }
+
+ return true;
+}
+
+static enum resp_states check_op_valid(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (!check_qp_attr_access(qp, pkt))
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+
+ break;
+
+ case IB_QPT_UC:
+ if ((pkt->mask & RXE_WRITE_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ }
+
+ break;
+
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return RESPST_CHK_RESOURCE;
+}
+
+static enum resp_states get_srq_wqe(struct rxe_qp *qp)
+{
+ struct rxe_srq *srq = qp->srq;
+ struct rxe_queue *q = srq->rq.queue;
+ struct rxe_recv_wqe *wqe;
+ struct ib_event ev;
+ unsigned int count;
+ size_t size;
+ unsigned long flags;
+
+ if (srq->error)
+ return RESPST_ERR_RNR;
+
+ spin_lock_irqsave(&srq->rq.consumer_lock, flags);
+
+ wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
+ if (!wqe) {
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
+ return RESPST_ERR_RNR;
+ }
+
+ /* don't trust user space data */
+ if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
+ rxe_dbg_qp(qp, "invalid num_sge in SRQ entry\n");
+ return RESPST_ERR_MALFORMED_WQE;
+ }
+ size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
+ memcpy(&qp->resp.srq_wqe, wqe, size);
+
+ qp->resp.wqe = &qp->resp.srq_wqe.wqe;
+ queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
+
+ if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
+ srq->limit = 0;
+ goto event;
+ }
+
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
+ return RESPST_CHK_LENGTH;
+
+event:
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
+ return RESPST_CHK_LENGTH;
+}
+
+static enum resp_states check_resource(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_srq *srq = qp->srq;
+
+ if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) {
+ /* it is the requesters job to not send
+ * too many read/atomic ops, we just
+ * recycle the responder resource queue
+ */
+ if (likely(qp->attr.max_dest_rd_atomic > 0))
+ return RESPST_CHK_LENGTH;
+ else
+ return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
+ }
+
+ if (pkt->mask & RXE_RWR_MASK) {
+ if (srq)
+ return get_srq_wqe(qp);
+
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
+ return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
+ }
+
+ return RESPST_CHK_LENGTH;
+}
+
+static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ /*
+ * See IBA C9-92
+ * For UD QPs we only check if the packet will fit in the
+ * receive buffer later. For RDMA operations additional
+ * length checks are performed in check_rkey.
+ */
+ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
+ unsigned int payload = payload_size(pkt);
+ unsigned int recv_buffer_len = 0;
+ int i;
+
+ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
+ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
+ if (payload + sizeof(union rdma_network_hdr) > recv_buffer_len) {
+ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
+ if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
+ (qp_type(qp) == IB_QPT_UC))) {
+ unsigned int mtu = qp->mtu;
+ unsigned int payload = payload_size(pkt);
+
+ if ((pkt->mask & RXE_START_MASK) &&
+ (pkt->mask & RXE_END_MASK)) {
+ if (unlikely(payload > mtu)) {
+ rxe_dbg_qp(qp, "only packet too long\n");
+ return RESPST_ERR_LENGTH;
+ }
+ } else if ((pkt->mask & RXE_START_MASK) ||
+ (pkt->mask & RXE_MIDDLE_MASK)) {
+ if (unlikely(payload != mtu)) {
+ rxe_dbg_qp(qp, "first or middle packet not mtu\n");
+ return RESPST_ERR_LENGTH;
+ }
+ } else if (pkt->mask & RXE_END_MASK) {
+ if (unlikely((payload == 0) || (payload > mtu))) {
+ rxe_dbg_qp(qp, "last packet zero or too long\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+ }
+
+ /* See IBA C9-94 */
+ if (pkt->mask & RXE_RETH_MASK) {
+ if (reth_len(pkt) > (1U << 31)) {
+ rxe_dbg_qp(qp, "dma length too long\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
+ if (pkt->mask & RXE_RDMA_OP_MASK)
+ return RESPST_CHK_RKEY;
+ else
+ return RESPST_EXECUTE;
+}
+
+/* if the reth length field is zero we can assume nothing
+ * about the rkey value and should not validate or use it.
+ * Instead set qp->resp.rkey to 0 which is an invalid rkey
+ * value since the minimum index part is 1.
+ */
+static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+{
+ unsigned int length = reth_len(pkt);
+
+ qp->resp.va = reth_va(pkt);
+ qp->resp.offset = 0;
+ qp->resp.resid = length;
+ qp->resp.length = length;
+ if (pkt->mask & RXE_READ_OR_WRITE_MASK && length == 0)
+ qp->resp.rkey = 0;
+ else
+ qp->resp.rkey = reth_rkey(pkt);
+}
+
+static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+{
+ qp->resp.va = atmeth_va(pkt);
+ qp->resp.offset = 0;
+ qp->resp.rkey = atmeth_rkey(pkt);
+ qp->resp.resid = sizeof(u64);
+}
+
+/* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
+ * if an invalid rkey is received or the rdma length is zero. For middle
+ * or last packets use the stored value of mr.
+ */
+static enum resp_states check_rkey(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_mr *mr = NULL;
+ struct rxe_mw *mw = NULL;
+ u64 va;
+ u32 rkey;
+ u32 resid;
+ u32 pktlen;
+ int mtu = qp->mtu;
+ enum resp_states state;
+ int access = 0;
+
+ /* parse RETH or ATMETH header for first/only packets
+ * for va, length, rkey, etc. or use current value for
+ * middle/last packets.
+ */
+ if (pkt->mask & (RXE_READ_OR_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) {
+ if (pkt->mask & RXE_RETH_MASK)
+ qp_resp_from_reth(qp, pkt);
+
+ access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
+ : IB_ACCESS_REMOTE_WRITE;
+ } else if (pkt->mask & RXE_FLUSH_MASK) {
+ u32 flush_type = feth_plt(pkt);
+
+ if (pkt->mask & RXE_RETH_MASK)
+ qp_resp_from_reth(qp, pkt);
+
+ if (flush_type & IB_FLUSH_GLOBAL)
+ access |= IB_ACCESS_FLUSH_GLOBAL;
+ if (flush_type & IB_FLUSH_PERSISTENT)
+ access |= IB_ACCESS_FLUSH_PERSISTENT;
+ } else if (pkt->mask & RXE_ATOMIC_MASK) {
+ qp_resp_from_atmeth(qp, pkt);
+ access = IB_ACCESS_REMOTE_ATOMIC;
+ } else {
+ /* shouldn't happen */
+ WARN_ON(1);
+ }
+
+ /* A zero-byte read or write op is not required to
+ * set an addr or rkey. See C9-88
+ */
+ if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
+ (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) {
+ qp->resp.mr = NULL;
+ return RESPST_EXECUTE;
+ }
+
+ va = qp->resp.va;
+ rkey = qp->resp.rkey;
+ resid = qp->resp.resid;
+ pktlen = payload_size(pkt);
+
+ if (rkey_is_mw(rkey)) {
+ mw = rxe_lookup_mw(qp, access, rkey);
+ if (!mw) {
+ rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey);
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err;
+ }
+
+ mr = mw->mr;
+ if (!mr) {
+ rxe_dbg_qp(qp, "MW doesn't have an MR\n");
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err;
+ }
+
+ if (mw->access & IB_ZERO_BASED)
+ qp->resp.offset = mw->addr;
+
+ rxe_get(mr);
+ rxe_put(mw);
+ mw = NULL;
+ } else {
+ mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
+ if (!mr) {
+ rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey);
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err;
+ }
+ }
+
+ if (pkt->mask & RXE_FLUSH_MASK) {
+ /* FLUSH MR may not set va or resid
+ * no need to check range since we will flush whole mr
+ */
+ if (feth_sel(pkt) == IB_FLUSH_MR)
+ goto skip_check_range;
+ }
+
+ if (mr_check_range(mr, va + qp->resp.offset, resid)) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err;
+ }
+
+skip_check_range:
+ if (pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) {
+ if (resid > mtu) {
+ if (pktlen != mtu || bth_pad(pkt)) {
+ state = RESPST_ERR_LENGTH;
+ goto err;
+ }
+ } else {
+ if (pktlen != resid) {
+ state = RESPST_ERR_LENGTH;
+ goto err;
+ }
+ if ((bth_pad(pkt) != (0x3 & (-resid)))) {
+ /* This case may not be exactly that
+ * but nothing else fits.
+ */
+ state = RESPST_ERR_LENGTH;
+ goto err;
+ }
+ }
+ }
+
+ WARN_ON_ONCE(qp->resp.mr);
+
+ qp->resp.mr = mr;
+ return RESPST_EXECUTE;
+
+err:
+ qp->resp.mr = NULL;
+ if (mr)
+ rxe_put(mr);
+ if (mw)
+ rxe_put(mw);
+
+ return state;
+}
+
+static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
+ int data_len)
+{
+ int err;
+
+ err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+ data_addr, data_len, RXE_TO_MR_OBJ);
+ if (unlikely(err))
+ return (err == -ENOSPC) ? RESPST_ERR_LENGTH
+ : RESPST_ERR_MALFORMED_WQE;
+
+ return RESPST_NONE;
+}
+
+static enum resp_states write_data_in(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ enum resp_states rc = RESPST_NONE;
+ int err;
+ int data_len = payload_size(pkt);
+
+ err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
+ payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
+ if (err) {
+ rc = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
+
+ qp->resp.va += data_len;
+ qp->resp.resid -= data_len;
+
+out:
+ return rc;
+}
+
+static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ int type)
+{
+ struct resp_res *res;
+ u32 pkts;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(res);
+
+ res->type = type;
+ res->replay = 0;
+
+ switch (type) {
+ case RXE_READ_MASK:
+ res->read.va = qp->resp.va + qp->resp.offset;
+ res->read.va_org = qp->resp.va + qp->resp.offset;
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
+ res->first_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
+
+ res->state = rdatm_res_state_new;
+ break;
+ case RXE_ATOMIC_MASK:
+ case RXE_ATOMIC_WRITE_MASK:
+ res->first_psn = pkt->psn;
+ res->last_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ break;
+ case RXE_FLUSH_MASK:
+ res->flush.va = qp->resp.va + qp->resp.offset;
+ res->flush.length = qp->resp.length;
+ res->flush.type = feth_plt(pkt);
+ res->flush.level = feth_sel(pkt);
+ }
+
+ return res;
+}
+
+static enum resp_states process_flush(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ u64 length, start;
+ struct rxe_mr *mr = qp->resp.mr;
+ struct resp_res *res = qp->resp.res;
+
+ /* oA19-14, oA19-15 */
+ if (res && res->replay)
+ return RESPST_ACKNOWLEDGE;
+ else if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_FLUSH_MASK);
+ qp->resp.res = res;
+ }
+
+ if (res->flush.level == IB_FLUSH_RANGE) {
+ start = res->flush.va;
+ length = res->flush.length;
+ } else { /* level == IB_FLUSH_MR */
+ start = mr->ibmr.iova;
+ length = mr->ibmr.length;
+ }
+
+ if (res->flush.type & IB_FLUSH_PERSISTENT) {
+ if (rxe_flush_pmem_iova(mr, start, length))
+ return RESPST_ERR_RKEY_VIOLATION;
+ /* Make data persistent. */
+ wmb();
+ } else if (res->flush.type & IB_FLUSH_GLOBAL) {
+ /* Make data global visibility. */
+ wmb();
+ }
+
+ qp->resp.msn++;
+
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
+
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+
+ return RESPST_ACKNOWLEDGE;
+}
+
+static enum resp_states atomic_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_mr *mr = qp->resp.mr;
+ struct resp_res *res = qp->resp.res;
+ int err;
+
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
+ qp->resp.res = res;
+ }
+
+ if (!res->replay) {
+ u64 iova = qp->resp.va + qp->resp.offset;
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
+ else
+ err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
+ if (err)
+ return err;
+
+ qp->resp.msn++;
+
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
+
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+ }
+
+ return RESPST_ACKNOWLEDGE;
+}
+
+static enum resp_states atomic_write_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct resp_res *res = qp->resp.res;
+ struct rxe_mr *mr;
+ u64 value;
+ u64 iova;
+ int err;
+
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
+ qp->resp.res = res;
+ }
+
+ if (res->replay)
+ return RESPST_ACKNOWLEDGE;
+
+ mr = qp->resp.mr;
+ value = *(u64 *)payload_addr(pkt);
+ iova = qp->resp.va + qp->resp.offset;
+
+ /* See IBA oA19-28 */
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_do_atomic_write(mr, iova, value);
+ else
+ err = rxe_mr_do_atomic_write(mr, iova, value);
+ if (err)
+ return err;
+
+ qp->resp.resid = 0;
+ qp->resp.msn++;
+
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
+
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+
+ return RESPST_ACKNOWLEDGE;
+}
+
+static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
+ struct rxe_pkt_info *ack,
+ int opcode,
+ int payload,
+ u32 psn,
+ u8 syndrome)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct sk_buff *skb;
+ int paylen;
+ int pad;
+ int err;
+
+ /*
+ * allocate packet
+ */
+ pad = (-payload) & 0x3;
+ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+
+ skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
+ if (!skb)
+ return NULL;
+
+ ack->qp = qp;
+ ack->opcode = opcode;
+ ack->mask = rxe_opcode[opcode].mask;
+ ack->paylen = paylen;
+ ack->psn = psn;
+
+ bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
+ qp->attr.dest_qp_num, 0, psn);
+
+ if (ack->mask & RXE_AETH_MASK) {
+ aeth_set_syn(ack, syndrome);
+ aeth_set_msn(ack, qp->resp.msn);
+ }
+
+ if (ack->mask & RXE_ATMACK_MASK)
+ atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
+
+ err = rxe_prepare(&qp->pri_av, ack, skb);
+ if (err) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ return skb;
+}
+
+/**
+ * rxe_recheck_mr - revalidate MR from rkey and get a reference
+ * @qp: the qp
+ * @rkey: the rkey
+ *
+ * This code allows the MR to be invalidated or deregistered or
+ * the MW if one was used to be invalidated or deallocated.
+ * It is assumed that the access permissions if originally good
+ * are OK and the mappings to be unchanged.
+ *
+ * TODO: If someone reregisters an MR to change its size or
+ * access permissions during the processing of an RDMA read
+ * we should kill the responder resource and complete the
+ * operation with an error.
+ *
+ * Return: mr on success else NULL
+ */
+static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mr *mr;
+ struct rxe_mw *mw;
+
+ if (rkey_is_mw(rkey)) {
+ mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
+ if (!mw)
+ return NULL;
+
+ mr = mw->mr;
+ if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
+ !mr || mr->state != RXE_MR_STATE_VALID) {
+ rxe_put(mw);
+ return NULL;
+ }
+
+ rxe_get(mr);
+ rxe_put(mw);
+
+ return mr;
+ }
+
+ mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ if (!mr)
+ return NULL;
+
+ if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
+ rxe_put(mr);
+ return NULL;
+ }
+
+ return mr;
+}
+
+/* RDMA read response. If res is not NULL, then we have a current RDMA request
+ * being processed or replayed.
+ */
+static enum resp_states read_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *req_pkt)
+{
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+ int mtu = qp->mtu;
+ enum resp_states state;
+ int payload;
+ int opcode;
+ int err;
+ struct resp_res *res = qp->resp.res;
+ struct rxe_mr *mr;
+
+ if (!res) {
+ res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
+ qp->resp.res = res;
+ }
+
+ if (res->state == rdatm_res_state_new) {
+ if (!res->replay || qp->resp.length == 0) {
+ /* if length == 0 mr will be NULL (is ok)
+ * otherwise qp->resp.mr holds a ref on mr
+ * which we transfer to mr and drop below.
+ */
+ mr = qp->resp.mr;
+ qp->resp.mr = NULL;
+ } else {
+ mr = rxe_recheck_mr(qp, res->read.rkey);
+ if (!mr)
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (res->read.resid <= mtu)
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
+ else
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
+ } else {
+ /* re-lookup mr from rkey on all later packets.
+ * length will be non-zero. This can fail if someone
+ * modifies or destroys the mr since the first packet.
+ */
+ mr = rxe_recheck_mr(qp, res->read.rkey);
+ if (!mr)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ if (res->read.resid > mtu)
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
+ else
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
+ }
+
+ res->state = rdatm_res_state_next;
+
+ payload = min_t(int, res->read.resid, mtu);
+
+ skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
+ res->cur_psn, AETH_ACK_UNLIMITED);
+ if (!skb) {
+ state = RESPST_ERR_RNR;
+ goto err_out;
+ }
+
+ err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
+ payload, RXE_FROM_MR_OBJ);
+ if (err) {
+ kfree_skb(skb);
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err_out;
+ }
+
+ if (bth_pad(&ack_pkt)) {
+ u8 *pad = payload_addr(&ack_pkt) + payload;
+
+ memset(pad, 0, bth_pad(&ack_pkt));
+ }
+
+ /* rxe_xmit_packet always consumes the skb */
+ err = rxe_xmit_packet(qp, &ack_pkt, skb);
+ if (err) {
+ state = RESPST_ERR_RNR;
+ goto err_out;
+ }
+
+ res->read.va += payload;
+ res->read.resid -= payload;
+ res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
+
+ if (res->read.resid > 0) {
+ state = RESPST_DONE;
+ } else {
+ qp->resp.res = NULL;
+ if (!res->replay)
+ qp->resp.opcode = -1;
+ if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
+ qp->resp.psn = res->cur_psn;
+ state = RESPST_CLEANUP;
+ }
+
+err_out:
+ if (mr)
+ rxe_put(mr);
+ return state;
+}
+
+static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
+{
+ if (rkey_is_mw(rkey))
+ return rxe_invalidate_mw(qp, rkey);
+ else
+ return rxe_invalidate_mr(qp, rkey);
+}
+
+/* Executes a new request. A retried request never reach that function (send
+ * and writes are discarded, and reads and atomics are retried elsewhere.
+ */
+static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+{
+ enum resp_states err;
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+ union rdma_network_hdr hdr;
+
+ if (pkt->mask & RXE_SEND_MASK) {
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_GSI) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(&hdr.reserved, 0,
+ sizeof(hdr.reserved));
+ memcpy(&hdr.roce4grh, ip_hdr(skb),
+ sizeof(hdr.roce4grh));
+ err = send_data_in(qp, &hdr, sizeof(hdr));
+ } else {
+ err = send_data_in(qp, ipv6_hdr(skb),
+ sizeof(hdr));
+ }
+ if (err)
+ return err;
+ }
+ err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
+ if (err)
+ return err;
+ } else if (pkt->mask & RXE_WRITE_MASK) {
+ err = write_data_in(qp, pkt);
+ if (err)
+ return err;
+ } else if (pkt->mask & RXE_READ_MASK) {
+ /* For RDMA Read we can increment the msn now. See C9-148. */
+ qp->resp.msn++;
+ return RESPST_READ_REPLY;
+ } else if (pkt->mask & RXE_ATOMIC_MASK) {
+ return RESPST_ATOMIC_REPLY;
+ } else if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
+ return RESPST_ATOMIC_WRITE_REPLY;
+ } else if (pkt->mask & RXE_FLUSH_MASK) {
+ return RESPST_PROCESS_FLUSH;
+ } else {
+ /* Unreachable */
+ WARN_ON_ONCE(1);
+ }
+
+ if (pkt->mask & RXE_IETH_MASK) {
+ u32 rkey = ieth_rkey(pkt);
+
+ err = invalidate_rkey(qp, rkey);
+ if (err)
+ return RESPST_ERR_INVALIDATE_RKEY;
+ }
+
+ if (pkt->mask & RXE_END_MASK)
+ /* We successfully processed this new request. */
+ qp->resp.msn++;
+
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
+
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+
+ if (pkt->mask & RXE_COMP_MASK)
+ return RESPST_COMPLETE;
+ else if (qp_type(qp) == IB_QPT_RC)
+ return RESPST_ACKNOWLEDGE;
+ else
+ return RESPST_CLEANUP;
+}
+
+static enum resp_states do_complete(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_cqe cqe;
+ struct ib_wc *wc = &cqe.ibwc;
+ struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ struct rxe_recv_wqe *wqe = qp->resp.wqe;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
+
+ if (!wqe)
+ goto finish;
+
+ memset(&cqe, 0, sizeof(cqe));
+
+ if (qp->rcq->is_user) {
+ uwc->status = qp->resp.status;
+ uwc->qp_num = qp->ibqp.qp_num;
+ uwc->wr_id = wqe->wr_id;
+ } else {
+ wc->status = qp->resp.status;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wqe->wr_id;
+ }
+
+ if (wc->status == IB_WC_SUCCESS) {
+ rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
+ wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
+ pkt->mask & RXE_WRITE_MASK) ?
+ IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
+ pkt->mask & RXE_WRITE_MASK) ?
+ qp->resp.length : wqe->dma.length - wqe->dma.resid;
+
+ /* fields after byte_len are different between kernel and user
+ * space
+ */
+ if (qp->rcq->is_user) {
+ uwc->wc_flags = IB_WC_GRH;
+
+ if (pkt->mask & RXE_IMMDT_MASK) {
+ uwc->wc_flags |= IB_WC_WITH_IMM;
+ uwc->ex.imm_data = immdt_imm(pkt);
+ }
+
+ if (pkt->mask & RXE_IETH_MASK) {
+ uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ uwc->ex.invalidate_rkey = ieth_rkey(pkt);
+ }
+
+ if (pkt->mask & RXE_DETH_MASK)
+ uwc->src_qp = deth_sqp(pkt);
+
+ uwc->port_num = qp->attr.port_num;
+ } else {
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
+ if (skb->protocol == htons(ETH_P_IP))
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
+ if (pkt->mask & RXE_IMMDT_MASK) {
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = immdt_imm(pkt);
+ }
+
+ if (pkt->mask & RXE_IETH_MASK) {
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = ieth_rkey(pkt);
+ }
+
+ if (pkt->mask & RXE_DETH_MASK)
+ wc->src_qp = deth_sqp(pkt);
+
+ wc->port_num = qp->attr.port_num;
+ }
+ } else {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ rxe_err_qp(qp, "non-flush error status = %d\n",
+ wc->status);
+ }
+
+ /* have copy for srq and reference for !srq */
+ if (!qp->srq)
+ queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
+
+ qp->resp.wqe = NULL;
+
+ if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
+ return RESPST_ERR_CQ_OVERFLOW;
+
+finish:
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ return RESPST_CHK_RESOURCE;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (unlikely(!pkt))
+ return RESPST_DONE;
+ if (qp_type(qp) == IB_QPT_RC)
+ return RESPST_ACKNOWLEDGE;
+ else
+ return RESPST_CLEANUP;
+}
+
+
+static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
+ int opcode, const char *msg)
+{
+ int err;
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+
+ skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome);
+ if (!skb)
+ return -ENOMEM;
+
+ err = rxe_xmit_packet(qp, &ack_pkt, skb);
+ if (err)
+ rxe_dbg_qp(qp, "Failed sending %s\n", msg);
+
+ return err;
+}
+
+static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+{
+ return send_common_ack(qp, syndrome, psn,
+ IB_OPCODE_RC_ACKNOWLEDGE, "ACK");
+}
+
+static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+{
+ int ret = send_common_ack(qp, syndrome, psn,
+ IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, "ATOMIC ACK");
+
+ /* have to clear this since it is used to trigger
+ * long read replies
+ */
+ qp->resp.res = NULL;
+ return ret;
+}
+
+static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+{
+ int ret = send_common_ack(qp, syndrome, psn,
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY,
+ "RDMA READ response of length zero ACK");
+
+ /* have to clear this since it is used to trigger
+ * long read replies
+ */
+ qp->resp.res = NULL;
+ return ret;
+}
+
+static enum resp_states acknowledge(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ if (qp_type(qp) != IB_QPT_RC)
+ return RESPST_CLEANUP;
+
+ if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
+ send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
+ else if (pkt->mask & RXE_ATOMIC_MASK)
+ send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
+ else if (pkt->mask & (RXE_FLUSH_MASK | RXE_ATOMIC_WRITE_MASK))
+ send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
+ else if (bth_ack(pkt))
+ send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
+
+ return RESPST_CLEANUP;
+}
+
+static enum resp_states cleanup(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb;
+
+ if (pkt) {
+ skb = skb_dequeue(&qp->req_pkts);
+ rxe_put(qp);
+ kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
+ }
+
+ if (qp->resp.mr) {
+ rxe_put(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ return RESPST_DONE;
+}
+
+static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
+{
+ int i;
+
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
+ struct resp_res *res = &qp->resp.resources[i];
+
+ if (res->type == 0)
+ continue;
+
+ if (psn_compare(psn, res->first_psn) >= 0 &&
+ psn_compare(psn, res->last_psn) <= 0) {
+ return res;
+ }
+ }
+
+ return NULL;
+}
+
+static enum resp_states duplicate_request(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ enum resp_states rc;
+ u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
+
+ if (pkt->mask & RXE_SEND_MASK ||
+ pkt->mask & RXE_WRITE_MASK) {
+ /* SEND. Ack again and cleanup. C9-105. */
+ send_ack(qp, AETH_ACK_UNLIMITED, prev_psn);
+ return RESPST_CLEANUP;
+ } else if (pkt->mask & RXE_FLUSH_MASK) {
+ struct resp_res *res;
+
+ /* Find the operation in our list of responder resources. */
+ res = find_resource(qp, pkt->psn);
+ if (res) {
+ res->replay = 1;
+ res->cur_psn = pkt->psn;
+ qp->resp.res = res;
+ rc = RESPST_PROCESS_FLUSH;
+ goto out;
+ }
+
+ /* Resource not found. Class D error. Drop the request. */
+ rc = RESPST_CLEANUP;
+ goto out;
+ } else if (pkt->mask & RXE_READ_MASK) {
+ struct resp_res *res;
+
+ res = find_resource(qp, pkt->psn);
+ if (!res) {
+ /* Resource not found. Class D error. Drop the
+ * request.
+ */
+ rc = RESPST_CLEANUP;
+ goto out;
+ } else {
+ /* Ensure this new request is the same as the previous
+ * one or a subset of it.
+ */
+ u64 iova = reth_va(pkt);
+ u32 resid = reth_len(pkt);
+
+ if (iova < res->read.va_org ||
+ resid > res->read.length ||
+ (iova + resid) > (res->read.va_org +
+ res->read.length)) {
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+
+ if (reth_rkey(pkt) != res->read.rkey) {
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+
+ res->cur_psn = pkt->psn;
+ res->state = (pkt->psn == res->first_psn) ?
+ rdatm_res_state_new :
+ rdatm_res_state_replay;
+ res->replay = 1;
+
+ /* Reset the resource, except length. */
+ res->read.va_org = iova;
+ res->read.va = iova;
+ res->read.resid = resid;
+
+ /* Replay the RDMA read reply. */
+ qp->resp.res = res;
+ rc = RESPST_READ_REPLY;
+ goto out;
+ }
+ } else {
+ struct resp_res *res;
+
+ /* Find the operation in our list of responder resources. */
+ res = find_resource(qp, pkt->psn);
+ if (res) {
+ res->replay = 1;
+ res->cur_psn = pkt->psn;
+ qp->resp.res = res;
+ rc = pkt->mask & RXE_ATOMIC_MASK ?
+ RESPST_ATOMIC_REPLY :
+ RESPST_ATOMIC_WRITE_REPLY;
+ goto out;
+ }
+
+ /* Resource not found. Class D error. Drop the request. */
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/* Process a class A or C. Both are treated the same in this implementation. */
+static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
+ enum ib_wc_status status)
+{
+ qp->resp.aeth_syndrome = syndrome;
+ qp->resp.status = status;
+
+ /* indicate that we should go through the ERROR state */
+ qp->resp.goto_error = 1;
+}
+
+static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
+{
+ /* UC */
+ if (qp->srq) {
+ /* Class E */
+ qp->resp.drop_msg = 1;
+ if (qp->resp.wqe) {
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ return RESPST_COMPLETE;
+ } else {
+ return RESPST_CLEANUP;
+ }
+ } else {
+ /* Class D1. This packet may be the start of a
+ * new message and could be valid. The previous
+ * message is invalid and ignored. reset the
+ * recv wr to its original state
+ */
+ if (qp->resp.wqe) {
+ qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
+ qp->resp.wqe->dma.cur_sge = 0;
+ qp->resp.wqe->dma.sge_offset = 0;
+ qp->resp.opcode = -1;
+ }
+
+ if (qp->resp.mr) {
+ rxe_put(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ return RESPST_CLEANUP;
+ }
+}
+
+/* drain incoming request packet queue */
+static void drain_req_pkts(struct rxe_qp *qp)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&qp->req_pkts))) {
+ rxe_put(qp);
+ kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
+ }
+}
+
+/* complete receive wqe with flush error */
+static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
+{
+ struct rxe_cqe cqe = {};
+ struct ib_wc *wc = &cqe.ibwc;
+ struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ int err;
+
+ if (qp->rcq->is_user) {
+ uwc->wr_id = wqe->wr_id;
+ uwc->status = IB_WC_WR_FLUSH_ERR;
+ uwc->qp_num = qp_num(qp);
+ } else {
+ wc->wr_id = wqe->wr_id;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->qp = &qp->ibqp;
+ }
+
+ err = rxe_cq_post(qp->rcq, &cqe, 0);
+ if (err)
+ rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err);
+
+ return err;
+}
+
+/* drain and optionally complete the recive queue
+ * if unable to complete a wqe stop completing and
+ * just flush the remaining wqes
+ */
+static void flush_recv_queue(struct rxe_qp *qp, bool notify)
+{
+ struct rxe_queue *q = qp->rq.queue;
+ struct rxe_recv_wqe *wqe;
+ int err;
+
+ if (qp->srq) {
+ if (notify && qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ return;
+ }
+
+ /* recv queue not created. nothing to do. */
+ if (!qp->rq.queue)
+ return;
+
+ while ((wqe = queue_head(q, q->type))) {
+ if (notify) {
+ err = flush_recv_wqe(qp, wqe);
+ if (err)
+ notify = 0;
+ }
+ queue_advance_consumer(q, q->type);
+ }
+
+ qp->resp.wqe = NULL;
+}
+
+int rxe_receiver(struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ enum resp_states state;
+ struct rxe_pkt_info *pkt = NULL;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
+ qp_state(qp) == IB_QPS_RESET) {
+ bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
+
+ drain_req_pkts(qp);
+ flush_recv_queue(qp, notify);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ goto exit;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
+
+ state = RESPST_GET_REQ;
+
+ while (1) {
+ rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]);
+ switch (state) {
+ case RESPST_GET_REQ:
+ state = get_req(qp, &pkt);
+ break;
+ case RESPST_CHK_PSN:
+ state = check_psn(qp, pkt);
+ break;
+ case RESPST_CHK_OP_SEQ:
+ state = check_op_seq(qp, pkt);
+ break;
+ case RESPST_CHK_OP_VALID:
+ state = check_op_valid(qp, pkt);
+ break;
+ case RESPST_CHK_RESOURCE:
+ state = check_resource(qp, pkt);
+ break;
+ case RESPST_CHK_LENGTH:
+ state = rxe_resp_check_length(qp, pkt);
+ break;
+ case RESPST_CHK_RKEY:
+ state = check_rkey(qp, pkt);
+ break;
+ case RESPST_EXECUTE:
+ state = execute(qp, pkt);
+ break;
+ case RESPST_COMPLETE:
+ state = do_complete(qp, pkt);
+ break;
+ case RESPST_READ_REPLY:
+ state = read_reply(qp, pkt);
+ break;
+ case RESPST_ATOMIC_REPLY:
+ state = atomic_reply(qp, pkt);
+ break;
+ case RESPST_ATOMIC_WRITE_REPLY:
+ state = atomic_write_reply(qp, pkt);
+ break;
+ case RESPST_PROCESS_FLUSH:
+ state = process_flush(qp, pkt);
+ break;
+ case RESPST_ACKNOWLEDGE:
+ state = acknowledge(qp, pkt);
+ break;
+ case RESPST_CLEANUP:
+ state = cleanup(qp, pkt);
+ break;
+ case RESPST_DUPLICATE_REQUEST:
+ state = duplicate_request(qp, pkt);
+ break;
+ case RESPST_ERR_PSN_OUT_OF_SEQ:
+ /* RC only - Class B. Drop packet. */
+ send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
+ state = RESPST_CLEANUP;
+ break;
+
+ case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
+ case RESPST_ERR_MISSING_OPCODE_FIRST:
+ case RESPST_ERR_MISSING_OPCODE_LAST_C:
+ case RESPST_ERR_UNSUPPORTED_OPCODE:
+ case RESPST_ERR_MISALIGNED_ATOMIC:
+ /* RC Only - Class C. */
+ do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
+ IB_WC_REM_INV_REQ_ERR);
+ state = RESPST_COMPLETE;
+ break;
+
+ case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
+ state = do_class_d1e_error(qp);
+ break;
+ case RESPST_ERR_RNR:
+ if (qp_type(qp) == IB_QPT_RC) {
+ rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
+ /* RC - class B */
+ send_ack(qp, AETH_RNR_NAK |
+ (~AETH_TYPE_MASK &
+ qp->attr.min_rnr_timer),
+ pkt->psn);
+ } else {
+ /* UD/UC - class D */
+ qp->resp.drop_msg = 1;
+ }
+ state = RESPST_CLEANUP;
+ break;
+
+ case RESPST_ERR_RKEY_VIOLATION:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* Class C */
+ do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
+ IB_WC_REM_ACCESS_ERR);
+ state = RESPST_COMPLETE;
+ } else {
+ qp->resp.drop_msg = 1;
+ if (qp->srq) {
+ /* UC/SRQ Class D */
+ qp->resp.status = IB_WC_REM_ACCESS_ERR;
+ state = RESPST_COMPLETE;
+ } else {
+ /* UC/non-SRQ Class E. */
+ state = RESPST_CLEANUP;
+ }
+ }
+ break;
+
+ case RESPST_ERR_INVALIDATE_RKEY:
+ /* RC - Class J. */
+ qp->resp.goto_error = 1;
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ state = RESPST_COMPLETE;
+ break;
+
+ case RESPST_ERR_LENGTH:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* Class C */
+ do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
+ IB_WC_REM_INV_REQ_ERR);
+ state = RESPST_COMPLETE;
+ } else if (qp->srq) {
+ /* UC/UD - class E */
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ state = RESPST_COMPLETE;
+ } else {
+ /* UC/UD - class D */
+ qp->resp.drop_msg = 1;
+ state = RESPST_CLEANUP;
+ }
+ break;
+
+ case RESPST_ERR_MALFORMED_WQE:
+ /* All, Class A. */
+ do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
+ IB_WC_LOC_QP_OP_ERR);
+ state = RESPST_COMPLETE;
+ break;
+
+ case RESPST_ERR_CQ_OVERFLOW:
+ /* All - Class G */
+ state = RESPST_ERROR;
+ break;
+
+ case RESPST_DONE:
+ if (qp->resp.goto_error) {
+ state = RESPST_ERROR;
+ break;
+ }
+
+ goto done;
+
+ case RESPST_EXIT:
+ if (qp->resp.goto_error) {
+ state = RESPST_ERROR;
+ break;
+ }
+
+ goto exit;
+
+ case RESPST_ERROR:
+ qp->resp.goto_error = 0;
+ rxe_dbg_qp(qp, "moved to error state\n");
+ rxe_qp_error(qp);
+ goto exit;
+
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the work item. A zero return
+ * will continue looping and return to rxe_responder
+ */
+done:
+ ret = 0;
+ goto out;
+exit:
+ ret = -EAGAIN;
+out:
+ return ret;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
new file mode 100644
index 000000000000..3661cb627d28
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "rxe.h"
+#include "rxe_queue.h"
+
+int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
+{
+ struct ib_srq_attr *attr = &init->attr;
+
+ if (attr->max_wr > rxe->attr.max_srq_wr) {
+ rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
+ attr->max_wr, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (attr->max_wr <= 0) {
+ rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
+ goto err1;
+ }
+
+ if (attr->max_wr < RXE_MIN_SRQ_WR)
+ attr->max_wr = RXE_MIN_SRQ_WR;
+
+ if (attr->max_sge > rxe->attr.max_srq_sge) {
+ rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
+ attr->max_sge, rxe->attr.max_srq_sge);
+ goto err1;
+ }
+
+ if (attr->max_sge < RXE_MIN_SRQ_SGE)
+ attr->max_sge = RXE_MIN_SRQ_SGE;
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_init_attr *init, struct ib_udata *udata,
+ struct rxe_create_srq_resp __user *uresp)
+{
+ struct rxe_queue *q;
+ int wqe_size;
+ int err;
+
+ srq->ibsrq.event_handler = init->event_handler;
+ srq->ibsrq.srq_context = init->srq_context;
+ srq->limit = init->attr.srq_limit;
+ srq->srq_num = srq->elem.index;
+ srq->rq.max_wr = init->attr.max_wr;
+ srq->rq.max_sge = init->attr.max_sge;
+
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ srq->rq.max_sge*sizeof(struct ib_sge);
+
+ spin_lock_init(&srq->rq.producer_lock);
+ spin_lock_init(&srq->rq.consumer_lock);
+
+ q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!q) {
+ rxe_dbg_srq(srq, "Unable to allocate queue\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
+ q->buf_size, &q->ip);
+ if (err) {
+ rxe_dbg_srq(srq, "Unable to init mmap info for caller\n");
+ goto err_free;
+ }
+
+ srq->rq.queue = q;
+ init->attr.max_wr = srq->rq.max_wr;
+
+ if (uresp) {
+ if (copy_to_user(&uresp->srq_num, &srq->srq_num,
+ sizeof(uresp->srq_num))) {
+ rxe_queue_cleanup(q);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+
+err_free:
+ vfree(q->buf);
+ kfree(q);
+err_out:
+ return err;
+}
+
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
+{
+ if (srq->error) {
+ rxe_dbg_srq(srq, "in error state\n");
+ goto err1;
+ }
+
+ if (mask & IB_SRQ_MAX_WR) {
+ if (attr->max_wr > rxe->attr.max_srq_wr) {
+ rxe_dbg_srq(srq, "max_wr(%d) > max_srq_wr(%d)\n",
+ attr->max_wr, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (attr->max_wr <= 0) {
+ rxe_dbg_srq(srq, "max_wr(%d) <= 0\n", attr->max_wr);
+ goto err1;
+ }
+
+ if (srq->limit && (attr->max_wr < srq->limit)) {
+ rxe_dbg_srq(srq, "max_wr (%d) < srq->limit (%d)\n",
+ attr->max_wr, srq->limit);
+ goto err1;
+ }
+
+ if (attr->max_wr < RXE_MIN_SRQ_WR)
+ attr->max_wr = RXE_MIN_SRQ_WR;
+ }
+
+ if (mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit > rxe->attr.max_srq_wr) {
+ rxe_dbg_srq(srq, "srq_limit(%d) > max_srq_wr(%d)\n",
+ attr->srq_limit, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
+ rxe_dbg_srq(srq, "srq_limit (%d) > cur limit(%d)\n",
+ attr->srq_limit,
+ srq->rq.queue->buf->index_mask);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
+ struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
+{
+ struct rxe_queue *q = srq->rq.queue;
+ struct mminfo __user *mi = NULL;
+ int wqe_size;
+ int err;
+
+ if (mask & IB_SRQ_MAX_WR) {
+ /*
+ * This is completely screwed up, the response is supposed to
+ * be in the outbuf not like this.
+ */
+ mi = u64_to_user_ptr(ucmd->mmap_info_addr);
+
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ srq->rq.max_sge*sizeof(struct ib_sge);
+
+ err = rxe_queue_resize(q, &attr->max_wr, wqe_size,
+ udata, mi, &srq->rq.producer_lock,
+ &srq->rq.consumer_lock);
+ if (err)
+ goto err_free;
+
+ srq->rq.max_wr = attr->max_wr;
+ }
+
+ if (mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+
+ return 0;
+
+err_free:
+ rxe_queue_cleanup(q);
+ srq->rq.queue = NULL;
+ return err;
+}
+
+void rxe_srq_cleanup(struct rxe_pool_elem *elem)
+{
+ struct rxe_srq *srq = container_of(elem, typeof(*srq), elem);
+
+ if (srq->pd)
+ rxe_put(srq->pd);
+
+ if (srq->rq.queue)
+ rxe_queue_cleanup(srq->rq.queue);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
new file mode 100644
index 000000000000..f522820b950c
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include "rxe.h"
+
+static struct workqueue_struct *rxe_wq;
+
+int rxe_alloc_wq(void)
+{
+ rxe_wq = alloc_workqueue("rxe_wq", WQ_UNBOUND, WQ_MAX_ACTIVE);
+ if (!rxe_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rxe_destroy_wq(void)
+{
+ destroy_workqueue(rxe_wq);
+}
+
+/* Check if task is idle i.e. not running, not scheduled in
+ * work queue and not draining. If so move to busy to
+ * reserve a slot in do_task() by setting to busy and taking
+ * a qp reference to cover the gap from now until the task finishes.
+ * state will move out of busy if task returns a non zero value
+ * in do_task(). If state is already busy it is raised to armed
+ * to indicate to do_task that additional pass should be made
+ * over the task.
+ * Context: caller should hold task->lock.
+ * Returns: true if state transitioned from idle to busy else false.
+ */
+static bool __reserve_if_idle(struct rxe_task *task)
+{
+ WARN_ON(rxe_read(task->qp) <= 0);
+
+ if (task->state == TASK_STATE_IDLE) {
+ rxe_get(task->qp);
+ task->state = TASK_STATE_BUSY;
+ task->num_sched++;
+ return true;
+ }
+
+ if (task->state == TASK_STATE_BUSY)
+ task->state = TASK_STATE_ARMED;
+
+ return false;
+}
+
+/* check if task is idle or drained and not currently
+ * scheduled in the work queue. This routine is
+ * called by rxe_cleanup_task or rxe_disable_task to
+ * see if the queue is empty.
+ * Context: caller should hold task->lock.
+ * Returns true if done else false.
+ */
+static bool __is_done(struct rxe_task *task)
+{
+ if (work_pending(&task->work))
+ return false;
+
+ if (task->state == TASK_STATE_IDLE ||
+ task->state == TASK_STATE_DRAINED) {
+ return true;
+ }
+
+ return false;
+}
+
+/* a locked version of __is_done */
+static bool is_done(struct rxe_task *task)
+{
+ unsigned long flags;
+ int done;
+
+ spin_lock_irqsave(&task->lock, flags);
+ done = __is_done(task);
+ spin_unlock_irqrestore(&task->lock, flags);
+
+ return done;
+}
+
+/* do_task is a wrapper for the three tasks (requester,
+ * completer, responder) and calls them in a loop until
+ * they return a non-zero value. It is called indirectly
+ * when rxe_sched_task schedules the task. They must
+ * call __reserve_if_idle to move the task to busy before
+ * calling or scheduling. The task can also be moved to
+ * drained or invalid by calls to rxe_cleanup_task or
+ * rxe_disable_task. In that case tasks which get here
+ * are not executed but just flushed. The tasks are
+ * designed to look to see if there is work to do and
+ * then do part of it before returning here with a return
+ * value of zero until all the work has been consumed then
+ * it returns a non-zero value.
+ * The number of times the task can be run is limited by
+ * max iterations so one task cannot hold the cpu forever.
+ * If the limit is hit and work remains the task is rescheduled.
+ */
+static void do_task(struct rxe_task *task)
+{
+ unsigned int iterations;
+ unsigned long flags;
+ int resched = 0;
+ int cont;
+ int ret;
+
+ WARN_ON(rxe_read(task->qp) <= 0);
+
+ spin_lock_irqsave(&task->lock, flags);
+ if (task->state >= TASK_STATE_DRAINED) {
+ rxe_put(task->qp);
+ task->num_done++;
+ spin_unlock_irqrestore(&task->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&task->lock, flags);
+
+ do {
+ iterations = RXE_MAX_ITERATIONS;
+ cont = 0;
+
+ do {
+ ret = task->func(task->qp);
+ } while (ret == 0 && iterations-- > 0);
+
+ spin_lock_irqsave(&task->lock, flags);
+ /* we're not done yet but we ran out of iterations.
+ * yield the cpu and reschedule the task
+ */
+ if (!ret) {
+ if (task->state != TASK_STATE_DRAINING) {
+ task->state = TASK_STATE_IDLE;
+ resched = 1;
+ } else {
+ cont = 1;
+ }
+ goto exit;
+ }
+
+ switch (task->state) {
+ case TASK_STATE_BUSY:
+ task->state = TASK_STATE_IDLE;
+ break;
+
+ /* someone tried to schedule the task while we
+ * were running, keep going
+ */
+ case TASK_STATE_ARMED:
+ task->state = TASK_STATE_BUSY;
+ cont = 1;
+ break;
+
+ case TASK_STATE_DRAINING:
+ task->state = TASK_STATE_DRAINED;
+ break;
+
+ default:
+ WARN_ON(1);
+ rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
+ task->state);
+ task->state = TASK_STATE_IDLE;
+ }
+
+exit:
+ if (!cont) {
+ task->num_done++;
+ if (WARN_ON(task->num_done != task->num_sched))
+ rxe_dbg_qp(
+ task->qp,
+ "%ld tasks scheduled, %ld tasks done\n",
+ task->num_sched, task->num_done);
+ }
+ spin_unlock_irqrestore(&task->lock, flags);
+ } while (cont);
+
+ task->ret = ret;
+
+ if (resched)
+ rxe_sched_task(task);
+
+ rxe_put(task->qp);
+}
+
+/* wrapper around do_task to fix argument for work queue */
+static void do_work(struct work_struct *work)
+{
+ do_task(container_of(work, struct rxe_task, work));
+}
+
+int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
+ int (*func)(struct rxe_qp *))
+{
+ WARN_ON(rxe_read(qp) <= 0);
+
+ task->qp = qp;
+ task->func = func;
+ task->state = TASK_STATE_IDLE;
+ spin_lock_init(&task->lock);
+ INIT_WORK(&task->work, do_work);
+
+ return 0;
+}
+
+/* rxe_cleanup_task is only called from rxe_do_qp_cleanup in
+ * process context. The qp is already completed with no
+ * remaining references. Once the queue is drained the
+ * task is moved to invalid and returns. The qp cleanup
+ * code then calls the task functions directly without
+ * using the task struct to drain any late arriving packets
+ * or work requests.
+ */
+void rxe_cleanup_task(struct rxe_task *task)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->lock, flags);
+ if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
+ task->state = TASK_STATE_DRAINING;
+ } else {
+ task->state = TASK_STATE_INVALID;
+ spin_unlock_irqrestore(&task->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&task->lock, flags);
+
+ /* now the task cannot be scheduled or run just wait
+ * for the previously scheduled tasks to finish.
+ */
+ while (!is_done(task))
+ cond_resched();
+
+ spin_lock_irqsave(&task->lock, flags);
+ task->state = TASK_STATE_INVALID;
+ spin_unlock_irqrestore(&task->lock, flags);
+}
+
+/* schedule the task to run later as a work queue entry.
+ * the queue_work call can be called holding
+ * the lock.
+ */
+void rxe_sched_task(struct rxe_task *task)
+{
+ unsigned long flags;
+
+ WARN_ON(rxe_read(task->qp) <= 0);
+
+ spin_lock_irqsave(&task->lock, flags);
+ if (__reserve_if_idle(task))
+ queue_work(rxe_wq, &task->work);
+ spin_unlock_irqrestore(&task->lock, flags);
+}
+
+/* rxe_disable/enable_task are only called from
+ * rxe_modify_qp in process context. Task is moved
+ * to the drained state by do_task.
+ */
+void rxe_disable_task(struct rxe_task *task)
+{
+ unsigned long flags;
+
+ WARN_ON(rxe_read(task->qp) <= 0);
+
+ spin_lock_irqsave(&task->lock, flags);
+ if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
+ task->state = TASK_STATE_DRAINING;
+ } else {
+ task->state = TASK_STATE_DRAINED;
+ spin_unlock_irqrestore(&task->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&task->lock, flags);
+
+ while (!is_done(task))
+ cond_resched();
+
+ spin_lock_irqsave(&task->lock, flags);
+ task->state = TASK_STATE_DRAINED;
+ spin_unlock_irqrestore(&task->lock, flags);
+}
+
+void rxe_enable_task(struct rxe_task *task)
+{
+ unsigned long flags;
+
+ WARN_ON(rxe_read(task->qp) <= 0);
+
+ spin_lock_irqsave(&task->lock, flags);
+ if (task->state == TASK_STATE_INVALID) {
+ spin_unlock_irqrestore(&task->lock, flags);
+ return;
+ }
+
+ task->state = TASK_STATE_IDLE;
+ spin_unlock_irqrestore(&task->lock, flags);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
new file mode 100644
index 000000000000..a8c9a77b6027
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_TASK_H
+#define RXE_TASK_H
+
+enum {
+ TASK_STATE_IDLE = 0,
+ TASK_STATE_BUSY = 1,
+ TASK_STATE_ARMED = 2,
+ TASK_STATE_DRAINING = 3,
+ TASK_STATE_DRAINED = 4,
+ TASK_STATE_INVALID = 5,
+};
+
+/*
+ * data structure to describe a 'task' which is a short
+ * function that returns 0 as long as it needs to be
+ * called again.
+ */
+struct rxe_task {
+ struct work_struct work;
+ int state;
+ spinlock_t lock;
+ struct rxe_qp *qp;
+ int (*func)(struct rxe_qp *qp);
+ int ret;
+ long num_sched;
+ long num_done;
+};
+
+int rxe_alloc_wq(void);
+
+void rxe_destroy_wq(void);
+
+/*
+ * init rxe_task structure
+ * qp => parameter to pass to func
+ * func => function to call until it returns != 0
+ */
+int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
+ int (*func)(struct rxe_qp *));
+
+/* cleanup task */
+void rxe_cleanup_task(struct rxe_task *task);
+
+void rxe_sched_task(struct rxe_task *task);
+
+/* keep a task from scheduling */
+void rxe_disable_task(struct rxe_task *task);
+
+/* allow task to run */
+void rxe_enable_task(struct rxe_task *task);
+
+#endif /* RXE_TASK_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
new file mode 100644
index 000000000000..38d8c408320f
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -0,0 +1,1565 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "rxe.h"
+#include "rxe_queue.h"
+#include "rxe_hw_counters.h"
+
+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr);
+
+/* dev */
+static int rxe_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+ int err;
+
+ if (udata->inlen || udata->outlen) {
+ rxe_dbg_dev(rxe, "malformed udata\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ memcpy(attr, &rxe->attr, sizeof(*attr));
+
+ return 0;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_query_port(struct ib_device *ibdev,
+ u32 port_num, struct ib_port_attr *attr)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+ struct net_device *ndev;
+ int err, ret;
+
+ if (port_num != 1) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
+ goto err_out;
+ }
+
+ ndev = rxe_ib_device_get_netdev(ibdev);
+ if (!ndev) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ memcpy(attr, &rxe->port.attr, sizeof(*attr));
+
+ mutex_lock(&rxe->usdev_lock);
+ ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
+ &attr->active_width);
+
+ attr->state = ib_get_curr_port_state(ndev);
+ if (attr->state == IB_PORT_ACTIVE)
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ else if (netif_get_flags(ndev) & IFF_UP)
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
+ else
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ mutex_unlock(&rxe->usdev_lock);
+
+ dev_put(ndev);
+ return ret;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_query_gid(struct ib_device *ibdev, u32 port, int idx,
+ union ib_gid *gid)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+
+ /* subnet_prefix == interface_id == 0; */
+ memset(gid, 0, sizeof(*gid));
+ memcpy(gid->raw, rxe->raw_gid, ETH_ALEN);
+
+ return 0;
+}
+
+static int rxe_query_pkey(struct ib_device *ibdev,
+ u32 port_num, u16 index, u16 *pkey)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+ int err;
+
+ if (index != 0) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "bad pkey index = %d\n", index);
+ goto err_out;
+ }
+
+ *pkey = IB_DEFAULT_PKEY_FULL;
+ return 0;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_modify_device(struct ib_device *ibdev,
+ int mask, struct ib_device_modify *attr)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+ int err;
+
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ err = -EOPNOTSUPP;
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
+ goto err_out;
+ }
+
+ if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
+ rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(rxe->ib_dev.node_desc,
+ attr->node_desc, sizeof(rxe->ib_dev.node_desc));
+ }
+
+ return 0;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
+ int mask, struct ib_port_modify *attr)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+ struct rxe_port *port;
+ int err;
+
+ if (port_num != 1) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
+ goto err_out;
+ }
+
+ //TODO is shutdown useful
+ if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
+ err = -EOPNOTSUPP;
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
+ goto err_out;
+ }
+
+ port = &rxe->port;
+ port->attr.port_cap_flags |= attr->set_port_cap_mask;
+ port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
+
+ if (mask & IB_PORT_RESET_QKEY_CNTR)
+ port->attr.qkey_viol_cntr = 0;
+
+ return 0;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
+ u32 port_num)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+ int err;
+
+ if (port_num != 1) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
+ goto err_out;
+ }
+
+ return IB_LINK_LAYER_ETHERNET;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+ struct ib_port_attr attr = {};
+ int err;
+
+ if (port_num != 1) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
+ goto err_out;
+ }
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ goto err_out;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+/* uc */
+static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibuc->device);
+ struct rxe_ucontext *uc = to_ruc(ibuc);
+ int err;
+
+ err = rxe_add_to_pool(&rxe->uc_pool, uc);
+ if (err)
+ rxe_err_dev(rxe, "unable to create uc\n");
+
+ return err;
+}
+
+static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
+{
+ struct rxe_ucontext *uc = to_ruc(ibuc);
+ int err;
+
+ err = rxe_cleanup(uc);
+ if (err)
+ rxe_err_uc(uc, "cleanup failed, err = %d\n", err);
+}
+
+/* pd */
+static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ int err;
+
+ err = rxe_add_to_pool(&rxe->pd_pool, pd);
+ if (err) {
+ rxe_dbg_dev(rxe, "unable to alloc pd\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct rxe_pd *pd = to_rpd(ibpd);
+ int err;
+
+ err = rxe_cleanup(pd);
+ if (err)
+ rxe_err_pd(pd, "cleanup failed, err = %d\n", err);
+
+ return 0;
+}
+
+/* ah */
+static int rxe_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibah->device);
+ struct rxe_ah *ah = to_rah(ibah);
+ struct rxe_create_ah_resp __user *uresp = NULL;
+ int err, cleanup_err;
+
+ if (udata) {
+ /* test if new user provider */
+ if (udata->outlen >= sizeof(*uresp))
+ uresp = udata->outbuf;
+ ah->is_user = true;
+ } else {
+ ah->is_user = false;
+ }
+
+ err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+ if (err) {
+ rxe_dbg_dev(rxe, "unable to create ah\n");
+ goto err_out;
+ }
+
+ /* create index > 0 */
+ ah->ah_num = ah->elem.index;
+
+ err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
+ if (err) {
+ rxe_dbg_ah(ah, "bad attr\n");
+ goto err_cleanup;
+ }
+
+ if (uresp) {
+ /* only if new user provider */
+ err = copy_to_user(&uresp->ah_num, &ah->ah_num,
+ sizeof(uresp->ah_num));
+ if (err) {
+ err = -EFAULT;
+ rxe_dbg_ah(ah, "unable to copy to user\n");
+ goto err_cleanup;
+ }
+ } else if (ah->is_user) {
+ /* only if old user provider */
+ ah->ah_num = 0;
+ }
+
+ rxe_init_av(init_attr->ah_attr, &ah->av);
+ rxe_finalize(ah);
+
+ return 0;
+
+err_cleanup:
+ cleanup_err = rxe_cleanup(ah);
+ if (cleanup_err)
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err);
+err_out:
+ rxe_err_ah(ah, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
+{
+ struct rxe_ah *ah = to_rah(ibah);
+ int err;
+
+ err = rxe_ah_chk_attr(ah, attr);
+ if (err) {
+ rxe_dbg_ah(ah, "bad attr\n");
+ goto err_out;
+ }
+
+ rxe_init_av(attr, &ah->av);
+
+ return 0;
+
+err_out:
+ rxe_err_ah(ah, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
+{
+ struct rxe_ah *ah = to_rah(ibah);
+
+ memset(attr, 0, sizeof(*attr));
+ attr->type = ibah->type;
+ rxe_av_to_attr(&ah->av, attr);
+
+ return 0;
+}
+
+static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct rxe_ah *ah = to_rah(ibah);
+ int err;
+
+ err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
+ if (err)
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", err);
+
+ return 0;
+}
+
+/* srq */
+static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibsrq->device);
+ struct rxe_pd *pd = to_rpd(ibsrq->pd);
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+ struct rxe_create_srq_resp __user *uresp = NULL;
+ int err, cleanup_err;
+
+ if (udata) {
+ if (udata->outlen < sizeof(*uresp)) {
+ err = -EINVAL;
+ rxe_err_dev(rxe, "malformed udata\n");
+ goto err_out;
+ }
+ uresp = udata->outbuf;
+ }
+
+ if (init->srq_type != IB_SRQT_BASIC) {
+ err = -EOPNOTSUPP;
+ rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
+ init->srq_type);
+ goto err_out;
+ }
+
+ err = rxe_srq_chk_init(rxe, init);
+ if (err) {
+ rxe_dbg_dev(rxe, "invalid init attributes\n");
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->srq_pool, srq);
+ if (err) {
+ rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
+ goto err_out;
+ }
+
+ rxe_get(pd);
+ srq->pd = pd;
+
+ err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
+ if (err) {
+ rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
+ goto err_cleanup;
+ }
+
+ return 0;
+
+err_cleanup:
+ cleanup_err = rxe_cleanup(srq);
+ if (cleanup_err)
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err);
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask mask,
+ struct ib_udata *udata)
+{
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+ struct rxe_dev *rxe = to_rdev(ibsrq->device);
+ struct rxe_modify_srq_cmd cmd = {};
+ int err;
+
+ if (udata) {
+ if (udata->inlen < sizeof(cmd)) {
+ err = -EINVAL;
+ rxe_dbg_srq(srq, "malformed udata\n");
+ goto err_out;
+ }
+
+ err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
+ if (err) {
+ err = -EFAULT;
+ rxe_dbg_srq(srq, "unable to read udata\n");
+ goto err_out;
+ }
+ }
+
+ err = rxe_srq_chk_attr(rxe, srq, attr, mask);
+ if (err) {
+ rxe_dbg_srq(srq, "bad init attributes\n");
+ goto err_out;
+ }
+
+ err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
+ if (err) {
+ rxe_dbg_srq(srq, "bad attr\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ rxe_err_srq(srq, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+ int err;
+
+ if (srq->error) {
+ err = -EINVAL;
+ rxe_dbg_srq(srq, "srq in error state\n");
+ goto err_out;
+ }
+
+ attr->max_wr = srq->rq.queue->buf->index_mask;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+
+err_out:
+ rxe_err_srq(srq, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&srq->rq.producer_lock, flags);
+
+ while (wr) {
+ err = post_one_recv(&srq->rq, wr);
+ if (unlikely(err))
+ break;
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
+
+ if (err) {
+ *bad_wr = wr;
+ rxe_err_srq(srq, "returned err = %d\n", err);
+ }
+
+ return err;
+}
+
+static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+ int err;
+
+ err = rxe_cleanup(srq);
+ if (err)
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", err);
+
+ return 0;
+}
+
+/* qp */
+static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_pd *pd = to_rpd(ibqp->pd);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_create_qp_resp __user *uresp = NULL;
+ int err, cleanup_err;
+
+ if (udata) {
+ if (udata->inlen) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
+ goto err_out;
+ }
+
+ if (udata->outlen < sizeof(*uresp)) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
+ goto err_out;
+ }
+
+ qp->is_user = true;
+ uresp = udata->outbuf;
+ } else {
+ qp->is_user = false;
+ }
+
+ if (init->create_flags) {
+ err = -EOPNOTSUPP;
+ rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_qp_chk_init(rxe, init);
+ if (err) {
+ rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->qp_pool, qp);
+ if (err) {
+ rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
+ if (err) {
+ rxe_dbg_qp(qp, "create qp failed, err = %d\n", err);
+ goto err_cleanup;
+ }
+
+ rxe_finalize(qp);
+ return 0;
+
+err_cleanup:
+ cleanup_err = rxe_cleanup(qp);
+ if (cleanup_err)
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err);
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ int err;
+
+ if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
+ err = -EOPNOTSUPP;
+ rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n",
+ mask, err);
+ goto err_out;
+ }
+
+ err = rxe_qp_chk_attr(rxe, qp, attr, mask);
+ if (err) {
+ rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_qp_from_attr(qp, attr, mask, udata);
+ if (err) {
+ rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err);
+ goto err_out;
+ }
+
+ if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
+ qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ qp->ibqp.qp_num,
+ qp->attr.dest_qp_num);
+
+ return 0;
+
+err_out:
+ rxe_err_qp(qp, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_qp_init_attr *init)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ rxe_qp_to_init(qp, init);
+ rxe_qp_to_attr(qp, attr, mask);
+
+ return 0;
+}
+
+static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+ int err;
+
+ err = rxe_qp_chk_destroy(qp);
+ if (err) {
+ rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_cleanup(qp);
+ if (err)
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", err);
+
+ return 0;
+
+err_out:
+ rxe_err_qp(qp, "returned err = %d\n", err);
+ return err;
+}
+
+/* send wr */
+
+/* sanity check incoming send work request */
+static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+ unsigned int *maskp, unsigned int *lengthp)
+{
+ int num_sge = ibwr->num_sge;
+ struct rxe_sq *sq = &qp->sq;
+ unsigned int mask = 0;
+ unsigned long length = 0;
+ int err = -EINVAL;
+ int i;
+
+ do {
+ mask = wr_opcode_mask(ibwr->opcode, qp);
+ if (!mask) {
+ rxe_err_qp(qp, "bad wr opcode for qp type\n");
+ break;
+ }
+
+ if (num_sge > sq->max_sge) {
+ rxe_err_qp(qp, "num_sge > max_sge\n");
+ break;
+ }
+
+ length = 0;
+ for (i = 0; i < ibwr->num_sge; i++)
+ length += ibwr->sg_list[i].length;
+
+ if (length > RXE_PORT_MAX_MSG_SZ) {
+ rxe_err_qp(qp, "message length too long\n");
+ break;
+ }
+
+ if (mask & WR_ATOMIC_MASK) {
+ if (length != 8) {
+ rxe_err_qp(qp, "atomic length != 8\n");
+ break;
+ }
+ if (atomic_wr(ibwr)->remote_addr & 0x7) {
+ rxe_err_qp(qp, "misaligned atomic address\n");
+ break;
+ }
+ }
+ if (ibwr->send_flags & IB_SEND_INLINE) {
+ if (!(mask & WR_INLINE_MASK)) {
+ rxe_err_qp(qp, "opcode doesn't support inline data\n");
+ break;
+ }
+ if (length > sq->max_inline) {
+ rxe_err_qp(qp, "inline length too big\n");
+ break;
+ }
+ }
+
+ err = 0;
+ } while (0);
+
+ *maskp = mask;
+ *lengthp = (int)length;
+
+ return err;
+}
+
+static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
+ const struct ib_send_wr *ibwr)
+{
+ wr->wr_id = ibwr->wr_id;
+ wr->opcode = ibwr->opcode;
+ wr->send_flags = ibwr->send_flags;
+
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_GSI) {
+ struct ib_ah *ibah = ud_wr(ibwr)->ah;
+
+ wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
+ wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
+ wr->wr.ud.ah_num = to_rah(ibah)->ah_num;
+ if (qp_type(qp) == IB_QPT_GSI)
+ wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ break;
+ case IB_WR_SEND:
+ break;
+ default:
+ rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n",
+ wr->opcode);
+ return -EINVAL;
+ }
+ } else {
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ fallthrough;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
+ wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
+ wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
+ wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wr->wr.atomic.remote_addr =
+ atomic_wr(ibwr)->remote_addr;
+ wr->wr.atomic.compare_add =
+ atomic_wr(ibwr)->compare_add;
+ wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
+ wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
+ break;
+ case IB_WR_LOCAL_INV:
+ wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
+ break;
+ case IB_WR_REG_MR:
+ wr->wr.reg.mr = reg_wr(ibwr)->mr;
+ wr->wr.reg.key = reg_wr(ibwr)->key;
+ wr->wr.reg.access = reg_wr(ibwr)->access;
+ break;
+ case IB_WR_SEND:
+ case IB_WR_BIND_MW:
+ case IB_WR_FLUSH:
+ case IB_WR_ATOMIC_WRITE:
+ break;
+ default:
+ rxe_err_qp(qp, "unsupported wr opcode %d\n",
+ wr->opcode);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+ const struct ib_send_wr *ibwr)
+{
+ struct ib_sge *sge = ibwr->sg_list;
+ u8 *p = wqe->dma.inline_data;
+ int i;
+
+ for (i = 0; i < ibwr->num_sge; i++, sge++) {
+ memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
+ p += sge->length;
+ }
+}
+
+static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+ unsigned int mask, unsigned int length,
+ struct rxe_send_wqe *wqe)
+{
+ int num_sge = ibwr->num_sge;
+ int err;
+
+ err = init_send_wr(qp, &wqe->wr, ibwr);
+ if (err)
+ return err;
+
+ /* local operation */
+ if (unlikely(mask & WR_LOCAL_OP_MASK)) {
+ wqe->mask = mask;
+ wqe->state = wqe_state_posted;
+ return 0;
+ }
+
+ if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
+ copy_inline_data_to_wqe(wqe, ibwr);
+ else
+ memcpy(wqe->dma.sge, ibwr->sg_list,
+ num_sge * sizeof(struct ib_sge));
+
+ wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
+ mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
+ wqe->mask = mask;
+ wqe->dma.length = length;
+ wqe->dma.resid = length;
+ wqe->dma.num_sge = num_sge;
+ wqe->dma.cur_sge = 0;
+ wqe->dma.sge_offset = 0;
+ wqe->state = wqe_state_posted;
+ wqe->ssn = atomic_add_return(1, &qp->ssn);
+
+ return 0;
+}
+
+static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr)
+{
+ int err;
+ struct rxe_sq *sq = &qp->sq;
+ struct rxe_send_wqe *send_wqe;
+ unsigned int mask;
+ unsigned int length;
+ int full;
+
+ err = validate_send_wr(qp, ibwr, &mask, &length);
+ if (err)
+ return err;
+
+ full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
+ if (unlikely(full)) {
+ rxe_err_qp(qp, "send queue full\n");
+ return -ENOMEM;
+ }
+
+ send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
+ err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
+ if (!err)
+ queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);
+
+ return err;
+}
+
+static int rxe_post_send_kernel(struct rxe_qp *qp,
+ const struct ib_send_wr *ibwr,
+ const struct ib_send_wr **bad_wr)
+{
+ int err = 0;
+ unsigned long flags;
+ int good = 0;
+
+ spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ while (ibwr) {
+ err = post_one_send(qp, ibwr);
+ if (err) {
+ *bad_wr = ibwr;
+ break;
+ } else {
+ good++;
+ }
+ ibwr = ibwr->next;
+ }
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+
+ /* kickoff processing of any posted wqes */
+ if (good)
+ rxe_sched_task(&qp->send_task);
+
+ return err;
+}
+
+static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ /* caller has already called destroy_qp */
+ if (WARN_ON_ONCE(!qp->valid)) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ rxe_err_qp(qp, "qp has been destroyed\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ *bad_wr = wr;
+ rxe_err_qp(qp, "qp not ready to send\n");
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->is_user) {
+ /* Utilize process context to do protocol processing */
+ rxe_sched_task(&qp->send_task);
+ } else {
+ err = rxe_post_send_kernel(qp, wr, bad_wr);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* recv wr */
+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+{
+ int i;
+ unsigned long length;
+ struct rxe_recv_wqe *recv_wqe;
+ int num_sge = ibwr->num_sge;
+ int full;
+ int err;
+
+ full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
+ if (unlikely(full)) {
+ err = -ENOMEM;
+ rxe_dbg("queue full\n");
+ goto err_out;
+ }
+
+ if (unlikely(num_sge > rq->max_sge)) {
+ err = -EINVAL;
+ rxe_dbg("bad num_sge > max_sge\n");
+ goto err_out;
+ }
+
+ length = 0;
+ for (i = 0; i < num_sge; i++)
+ length += ibwr->sg_list[i].length;
+
+ if (length > RXE_PORT_MAX_MSG_SZ) {
+ err = -EINVAL;
+ rxe_dbg("message length too long\n");
+ goto err_out;
+ }
+
+ recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
+
+ recv_wqe->wr_id = ibwr->wr_id;
+ recv_wqe->dma.length = length;
+ recv_wqe->dma.resid = length;
+ recv_wqe->dma.num_sge = num_sge;
+ recv_wqe->dma.cur_sge = 0;
+ recv_wqe->dma.sge_offset = 0;
+ memcpy(recv_wqe->dma.sge, ibwr->sg_list,
+ num_sge * sizeof(struct ib_sge));
+
+ queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
+
+ return 0;
+
+err_out:
+ rxe_dbg("returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_rq *rq = &qp->rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ /* caller has already called destroy_qp */
+ if (WARN_ON_ONCE(!qp->valid)) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ rxe_err_qp(qp, "qp has been destroyed\n");
+ return -EINVAL;
+ }
+
+ /* see C10-97.2.1 */
+ if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ *bad_wr = wr;
+ rxe_dbg_qp(qp, "qp not ready to post recv\n");
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (unlikely(qp->srq)) {
+ *bad_wr = wr;
+ rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&rq->producer_lock, flags);
+
+ while (wr) {
+ err = post_one_recv(rq, wr);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&rq->producer_lock, flags);
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if (qp_state(qp) == IB_QPS_ERR)
+ rxe_sched_task(&qp->recv_task);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ return err;
+}
+
+/* cq */
+static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ib_device *dev = ibcq->device;
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_cq *cq = to_rcq(ibcq);
+ struct rxe_create_cq_resp __user *uresp = NULL;
+ int err, cleanup_err;
+
+ if (udata) {
+ if (udata->outlen < sizeof(*uresp)) {
+ err = -EINVAL;
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
+ goto err_out;
+ }
+ uresp = udata->outbuf;
+ }
+
+ if (attr->flags) {
+ err = -EOPNOTSUPP;
+ rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
+ if (err) {
+ rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->cq_pool, cq);
+ if (err) {
+ rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
+ uresp);
+ if (err) {
+ rxe_dbg_cq(cq, "create cq failed, err = %d\n", err);
+ goto err_cleanup;
+ }
+
+ return 0;
+
+err_cleanup:
+ cleanup_err = rxe_cleanup(cq);
+ if (cleanup_err)
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err);
+err_out:
+ rxe_err_dev(rxe, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+ struct rxe_dev *rxe = to_rdev(ibcq->device);
+ struct rxe_resize_cq_resp __user *uresp = NULL;
+ int err;
+
+ if (udata) {
+ if (udata->outlen < sizeof(*uresp)) {
+ err = -EINVAL;
+ rxe_dbg_cq(cq, "malformed udata\n");
+ goto err_out;
+ }
+ uresp = udata->outbuf;
+ }
+
+ err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
+ if (err) {
+ rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
+ goto err_out;
+ }
+
+ err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
+ if (err) {
+ rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err);
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ rxe_err_cq(cq, "returned err = %d\n", err);
+ return err;
+}
+
+static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ int i;
+ struct rxe_cq *cq = to_rcq(ibcq);
+ struct rxe_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (i = 0; i < num_entries; i++) {
+ cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
+ if (!cqe)
+ break; /* queue empty */
+
+ memcpy(wc++, &cqe->ibwc, sizeof(*wc));
+ queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return i;
+}
+
+static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+ int count;
+
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP);
+
+ return (count > wc_cnt) ? wc_cnt : count;
+}
+
+static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+ int ret = 0;
+ int empty;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&cq->cq_lock, irq_flags);
+ cq->notify |= flags & IB_CQ_SOLICITED_MASK;
+ empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP);
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
+ ret = 1;
+
+ spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+
+ return ret;
+}
+
+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+ int err;
+
+ /* See IBA C11-17: The CI shall return an error if this Verb is
+ * invoked while a Work Queue is still associated with the CQ.
+ */
+ if (atomic_read(&cq->num_wq)) {
+ err = -EINVAL;
+ rxe_dbg_cq(cq, "still in use\n");
+ goto err_out;
+ }
+
+ err = rxe_cleanup(cq);
+ if (err)
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", err);
+
+ return 0;
+
+err_out:
+ rxe_err_cq(cq, "returned err = %d\n", err);
+ return err;
+}
+
+/* mr */
+static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mr *mr;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err) {
+ rxe_dbg_dev(rxe, "unable to create mr\n");
+ goto err_free;
+ }
+
+ rxe_get(pd);
+ mr->ibmr.pd = ibpd;
+ mr->ibmr.device = ibpd->device;
+
+ rxe_mr_init_dma(access, mr);
+ rxe_finalize(mr);
+ return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+ rxe_err_pd(pd, "returned err = %d\n", err);
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 iova, int access,
+ struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mr *mr;
+ int err, cleanup_err;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (access & ~RXE_ACCESS_SUPPORTED_MR) {
+ rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
+ RXE_ACCESS_SUPPORTED_MR);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err) {
+ rxe_dbg_pd(pd, "unable to create mr\n");
+ goto err_free;
+ }
+
+ rxe_get(pd);
+ mr->ibmr.pd = ibpd;
+ mr->ibmr.device = ibpd->device;
+
+ if (access & IB_ACCESS_ON_DEMAND)
+ err = rxe_odp_mr_init_user(rxe, start, length, iova, access, mr);
+ else
+ err = rxe_mr_init_user(rxe, start, length, access, mr);
+ if (err) {
+ rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
+ goto err_cleanup;
+ }
+
+ rxe_finalize(mr);
+ return &mr->ibmr;
+
+err_cleanup:
+ cleanup_err = rxe_cleanup(mr);
+ if (cleanup_err)
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
+err_free:
+ kfree(mr);
+ rxe_err_pd(pd, "returned err = %d\n", err);
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
+ u64 start, u64 length, u64 iova,
+ int access, struct ib_pd *ibpd,
+ struct ib_udata *udata)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_pd *old_pd = to_rpd(ibmr->pd);
+ struct rxe_pd *pd = to_rpd(ibpd);
+
+ /* for now only support the two easy cases:
+ * rereg_pd and rereg_access
+ */
+ if (flags & ~RXE_MR_REREG_SUPPORTED) {
+ rxe_err_mr(mr, "flags = %#x not supported\n", flags);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (flags & IB_MR_REREG_PD) {
+ rxe_put(old_pd);
+ rxe_get(pd);
+ mr->ibmr.pd = ibpd;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ if (access & ~RXE_ACCESS_SUPPORTED_MR) {
+ rxe_err_mr(mr, "access = %#x not supported\n", access);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ mr->access = access;
+ }
+
+ return NULL;
+}
+
+static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mr *mr;
+ int err, cleanup_err;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG) {
+ err = -EINVAL;
+ rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n",
+ mr_type, err);
+ goto err_out;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err)
+ goto err_free;
+
+ rxe_get(pd);
+ mr->ibmr.pd = ibpd;
+ mr->ibmr.device = ibpd->device;
+
+ err = rxe_mr_init_fast(max_num_sg, mr);
+ if (err) {
+ rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err);
+ goto err_cleanup;
+ }
+
+ rxe_finalize(mr);
+ return &mr->ibmr;
+
+err_cleanup:
+ cleanup_err = rxe_cleanup(mr);
+ if (cleanup_err)
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", err);
+err_free:
+ kfree(mr);
+err_out:
+ rxe_err_pd(pd, "returned err = %d\n", err);
+ return ERR_PTR(err);
+}
+
+static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ int err, cleanup_err;
+
+ /* See IBA 10.6.7.2.6 */
+ if (atomic_read(&mr->num_mw) > 0) {
+ err = -EINVAL;
+ rxe_dbg_mr(mr, "mr has mw's bound\n");
+ goto err_out;
+ }
+
+ cleanup_err = rxe_cleanup(mr);
+ if (cleanup_err)
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
+
+ kfree_rcu_mightsleep(mr);
+ return 0;
+
+err_out:
+ rxe_err_mr(mr, "returned err = %d\n", err);
+ return err;
+}
+
+static ssize_t parent_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct rxe_dev *rxe =
+ rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
+
+ return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
+}
+
+static DEVICE_ATTR_RO(parent);
+
+static struct attribute *rxe_dev_attributes[] = {
+ &dev_attr_parent.attr,
+ NULL
+};
+
+static const struct attribute_group rxe_attr_group = {
+ .attrs = rxe_dev_attributes,
+};
+
+static int rxe_enable_driver(struct ib_device *ib_dev)
+{
+ struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
+ struct net_device *ndev;
+
+ ndev = rxe_ib_device_get_netdev(ib_dev);
+ if (!ndev)
+ return -ENODEV;
+
+ rxe_set_port_state(rxe);
+ dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
+
+ dev_put(ndev);
+ return 0;
+}
+
+static const struct ib_device_ops rxe_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_RXE,
+ .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
+
+ .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats,
+ .alloc_mr = rxe_alloc_mr,
+ .alloc_mw = rxe_alloc_mw,
+ .alloc_pd = rxe_alloc_pd,
+ .alloc_ucontext = rxe_alloc_ucontext,
+ .attach_mcast = rxe_attach_mcast,
+ .create_ah = rxe_create_ah,
+ .create_cq = rxe_create_cq,
+ .create_qp = rxe_create_qp,
+ .create_srq = rxe_create_srq,
+ .create_user_ah = rxe_create_ah,
+ .dealloc_driver = rxe_dealloc,
+ .dealloc_mw = rxe_dealloc_mw,
+ .dealloc_pd = rxe_dealloc_pd,
+ .dealloc_ucontext = rxe_dealloc_ucontext,
+ .dereg_mr = rxe_dereg_mr,
+ .destroy_ah = rxe_destroy_ah,
+ .destroy_cq = rxe_destroy_cq,
+ .destroy_qp = rxe_destroy_qp,
+ .destroy_srq = rxe_destroy_srq,
+ .detach_mcast = rxe_detach_mcast,
+ .device_group = &rxe_attr_group,
+ .enable_driver = rxe_enable_driver,
+ .get_dma_mr = rxe_get_dma_mr,
+ .get_hw_stats = rxe_ib_get_hw_stats,
+ .get_link_layer = rxe_get_link_layer,
+ .get_port_immutable = rxe_port_immutable,
+ .map_mr_sg = rxe_map_mr_sg,
+ .mmap = rxe_mmap,
+ .modify_ah = rxe_modify_ah,
+ .modify_device = rxe_modify_device,
+ .modify_port = rxe_modify_port,
+ .modify_qp = rxe_modify_qp,
+ .modify_srq = rxe_modify_srq,
+ .peek_cq = rxe_peek_cq,
+ .poll_cq = rxe_poll_cq,
+ .post_recv = rxe_post_recv,
+ .post_send = rxe_post_send,
+ .post_srq_recv = rxe_post_srq_recv,
+ .query_ah = rxe_query_ah,
+ .query_device = rxe_query_device,
+ .query_pkey = rxe_query_pkey,
+ .query_gid = rxe_query_gid,
+ .query_port = rxe_query_port,
+ .query_qp = rxe_query_qp,
+ .query_srq = rxe_query_srq,
+ .reg_user_mr = rxe_reg_user_mr,
+ .req_notify_cq = rxe_req_notify_cq,
+ .rereg_user_mr = rxe_rereg_user_mr,
+ .resize_cq = rxe_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
+ INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
+};
+
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
+ struct net_device *ndev)
+{
+ int err;
+ struct ib_device *dev = &rxe->ib_dev;
+
+ strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
+
+ dev->node_type = RDMA_NODE_IB_CA;
+ dev->phys_port_cnt = 1;
+ dev->num_comp_vectors = num_possible_cpus();
+ dev->local_dma_lkey = 0;
+ addrconf_addr_eui48((unsigned char *)&dev->node_guid,
+ rxe->raw_gid);
+
+ dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
+ BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
+
+ ib_set_device_ops(dev, &rxe_dev_ops);
+ err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
+ if (err)
+ return err;
+
+ err = ib_register_device(dev, ibdev_name, NULL);
+ if (err)
+ rxe_dbg_dev(rxe, "failed with error %d\n", err);
+
+ /*
+ * Note that rxe may be invalid at this point if another thread
+ * unregistered it.
+ */
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
new file mode 100644
index 000000000000..fd48075810dd
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -0,0 +1,519 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ */
+
+#ifndef RXE_VERBS_H
+#define RXE_VERBS_H
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include "rxe_pool.h"
+#include "rxe_task.h"
+#include "rxe_hw_counters.h"
+
+static inline int pkey_match(u16 key1, u16 key2)
+{
+ return (((key1 & 0x7fff) != 0) &&
+ ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
+ ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
+}
+
+/* Return >0 if psn_a > psn_b
+ * 0 if psn_a == psn_b
+ * <0 if psn_a < psn_b
+ */
+static inline int psn_compare(u32 psn_a, u32 psn_b)
+{
+ s32 diff;
+
+ diff = (psn_a - psn_b) << 8;
+ return diff;
+}
+
+struct rxe_ucontext {
+ struct ib_ucontext ibuc;
+ struct rxe_pool_elem elem;
+};
+
+struct rxe_pd {
+ struct ib_pd ibpd;
+ struct rxe_pool_elem elem;
+};
+
+struct rxe_ah {
+ struct ib_ah ibah;
+ struct rxe_pool_elem elem;
+ struct rxe_av av;
+ bool is_user;
+ int ah_num;
+};
+
+struct rxe_cqe {
+ union {
+ struct ib_wc ibwc;
+ struct ib_uverbs_wc uibwc;
+ };
+};
+
+struct rxe_cq {
+ struct ib_cq ibcq;
+ struct rxe_pool_elem elem;
+ struct rxe_queue *queue;
+ spinlock_t cq_lock;
+ u8 notify;
+ bool is_user;
+ atomic_t num_wq;
+};
+
+enum wqe_state {
+ wqe_state_posted,
+ wqe_state_processing,
+ wqe_state_pending,
+ wqe_state_done,
+ wqe_state_error,
+};
+
+struct rxe_sq {
+ int max_wr;
+ int max_sge;
+ int max_inline;
+ spinlock_t sq_lock; /* guard queue */
+ struct rxe_queue *queue;
+};
+
+struct rxe_rq {
+ int max_wr;
+ int max_sge;
+ spinlock_t producer_lock; /* guard queue producer */
+ spinlock_t consumer_lock; /* guard queue consumer */
+ struct rxe_queue *queue;
+};
+
+struct rxe_srq {
+ struct ib_srq ibsrq;
+ struct rxe_pool_elem elem;
+ struct rxe_pd *pd;
+ struct rxe_rq rq;
+ u32 srq_num;
+
+ int limit;
+ int error;
+};
+
+struct rxe_req_info {
+ int wqe_index;
+ u32 psn;
+ int opcode;
+ atomic_t rd_atomic;
+ int wait_fence;
+ int need_rd_atomic;
+ int wait_psn;
+ int need_retry;
+ int wait_for_rnr_timer;
+ int noack_pkts;
+ int again;
+};
+
+struct rxe_comp_info {
+ u32 psn;
+ int opcode;
+ int timeout;
+ int timeout_retry;
+ int started_retry;
+ u32 retry_cnt;
+ u32 rnr_retry;
+};
+
+/* responder states */
+enum resp_states {
+ RESPST_NONE,
+ RESPST_GET_REQ,
+ RESPST_CHK_PSN,
+ RESPST_CHK_OP_SEQ,
+ RESPST_CHK_OP_VALID,
+ RESPST_CHK_RESOURCE,
+ RESPST_CHK_LENGTH,
+ RESPST_CHK_RKEY,
+ RESPST_EXECUTE,
+ RESPST_READ_REPLY,
+ RESPST_ATOMIC_REPLY,
+ RESPST_ATOMIC_WRITE_REPLY,
+ RESPST_PROCESS_FLUSH,
+ RESPST_COMPLETE,
+ RESPST_ACKNOWLEDGE,
+ RESPST_CLEANUP,
+ RESPST_DUPLICATE_REQUEST,
+ RESPST_ERR_MALFORMED_WQE,
+ RESPST_ERR_UNSUPPORTED_OPCODE,
+ RESPST_ERR_MISALIGNED_ATOMIC,
+ RESPST_ERR_PSN_OUT_OF_SEQ,
+ RESPST_ERR_MISSING_OPCODE_FIRST,
+ RESPST_ERR_MISSING_OPCODE_LAST_C,
+ RESPST_ERR_MISSING_OPCODE_LAST_D1E,
+ RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
+ RESPST_ERR_RNR,
+ RESPST_ERR_RKEY_VIOLATION,
+ RESPST_ERR_INVALIDATE_RKEY,
+ RESPST_ERR_LENGTH,
+ RESPST_ERR_CQ_OVERFLOW,
+ RESPST_ERROR,
+ RESPST_DONE,
+ RESPST_EXIT,
+};
+
+enum rdatm_res_state {
+ rdatm_res_state_next,
+ rdatm_res_state_new,
+ rdatm_res_state_replay,
+};
+
+struct resp_res {
+ int type;
+ int replay;
+ u32 first_psn;
+ u32 last_psn;
+ u32 cur_psn;
+ enum rdatm_res_state state;
+
+ union {
+ struct {
+ u64 orig_val;
+ } atomic;
+ struct {
+ u64 va_org;
+ u32 rkey;
+ u32 length;
+ u64 va;
+ u32 resid;
+ } read;
+ struct {
+ u32 length;
+ u64 va;
+ u8 type;
+ u8 level;
+ } flush;
+ };
+};
+
+struct rxe_resp_info {
+ u32 msn;
+ u32 psn;
+ u32 ack_psn;
+ int opcode;
+ int drop_msg;
+ int goto_error;
+ int sent_psn_nak;
+ enum ib_wc_status status;
+ u8 aeth_syndrome;
+
+ /* Receive only */
+ struct rxe_recv_wqe *wqe;
+
+ /* RDMA read / atomic only */
+ u64 va;
+ u64 offset;
+ struct rxe_mr *mr;
+ u32 resid;
+ u32 rkey;
+ u32 length;
+
+ /* SRQ only */
+ struct {
+ struct rxe_recv_wqe wqe;
+ struct ib_sge sge[RXE_MAX_SGE];
+ } srq_wqe;
+
+ /* Responder resources. It's a circular list where the oldest
+ * resource is dropped first.
+ */
+ struct resp_res *resources;
+ unsigned int res_head;
+ unsigned int res_tail;
+ struct resp_res *res;
+};
+
+struct rxe_qp {
+ struct ib_qp ibqp;
+ struct rxe_pool_elem elem;
+ struct ib_qp_attr attr;
+ unsigned int valid;
+ unsigned int mtu;
+ bool is_user;
+
+ struct rxe_pd *pd;
+ struct rxe_srq *srq;
+ struct rxe_cq *scq;
+ struct rxe_cq *rcq;
+
+ enum ib_sig_type sq_sig_type;
+
+ struct rxe_sq sq;
+ struct rxe_rq rq;
+
+ struct socket *sk;
+ u32 dst_cookie;
+ u16 src_port;
+
+ struct rxe_av pri_av;
+ struct rxe_av alt_av;
+
+ atomic_t mcg_num;
+
+ struct sk_buff_head req_pkts;
+ struct sk_buff_head resp_pkts;
+
+ struct rxe_task send_task;
+ struct rxe_task recv_task;
+
+ struct rxe_req_info req;
+ struct rxe_comp_info comp;
+ struct rxe_resp_info resp;
+
+ atomic_t ssn;
+ atomic_t skb_out;
+ int need_req_skb;
+
+ /* Timer for retranmitting packet when ACKs have been lost. RC
+ * only. The requester sets it when it is not already
+ * started. The responder resets it whenever an ack is
+ * received.
+ */
+ struct timer_list retrans_timer;
+ u64 qp_timeout_jiffies;
+
+ /* Timer for handling RNR NAKS. */
+ struct timer_list rnr_nak_timer;
+
+ spinlock_t state_lock; /* guard requester and completer */
+
+ struct execute_work cleanup_work;
+};
+
+enum {
+ RXE_ACCESS_REMOTE = IB_ACCESS_REMOTE_READ
+ | IB_ACCESS_REMOTE_WRITE
+ | IB_ACCESS_REMOTE_ATOMIC,
+ RXE_ACCESS_SUPPORTED_MR = RXE_ACCESS_REMOTE
+ | IB_ACCESS_LOCAL_WRITE
+ | IB_ACCESS_MW_BIND
+ | IB_ACCESS_ON_DEMAND
+ | IB_ACCESS_FLUSH_GLOBAL
+ | IB_ACCESS_FLUSH_PERSISTENT
+ | IB_ACCESS_OPTIONAL,
+ RXE_ACCESS_SUPPORTED_QP = RXE_ACCESS_SUPPORTED_MR,
+ RXE_ACCESS_SUPPORTED_MW = RXE_ACCESS_SUPPORTED_MR
+ | IB_ZERO_BASED,
+};
+
+enum rxe_mr_state {
+ RXE_MR_STATE_INVALID,
+ RXE_MR_STATE_FREE,
+ RXE_MR_STATE_VALID,
+};
+
+enum rxe_mr_copy_dir {
+ RXE_TO_MR_OBJ,
+ RXE_FROM_MR_OBJ,
+};
+
+enum rxe_mr_lookup_type {
+ RXE_LOOKUP_LOCAL,
+ RXE_LOOKUP_REMOTE,
+};
+
+enum rxe_rereg {
+ RXE_MR_REREG_SUPPORTED = IB_MR_REREG_PD
+ | IB_MR_REREG_ACCESS,
+};
+
+static inline int rkey_is_mw(u32 rkey)
+{
+ u32 index = rkey >> 8;
+
+ return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
+}
+
+struct rxe_mr {
+ struct rxe_pool_elem elem;
+ struct ib_mr ibmr;
+
+ struct ib_umem *umem;
+
+ u32 lkey;
+ u32 rkey;
+ enum rxe_mr_state state;
+ int access;
+ atomic_t num_mw;
+
+ unsigned int page_offset;
+ unsigned int page_shift;
+ u64 page_mask;
+
+ u32 num_buf;
+ u32 nbuf;
+
+ struct xarray page_list;
+};
+
+static inline unsigned int mr_page_size(struct rxe_mr *mr)
+{
+ return mr ? mr->ibmr.page_size : PAGE_SIZE;
+}
+
+enum rxe_mw_state {
+ RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
+ RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
+ RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
+};
+
+struct rxe_mw {
+ struct ib_mw ibmw;
+ struct rxe_pool_elem elem;
+ spinlock_t lock;
+ enum rxe_mw_state state;
+ struct rxe_qp *qp; /* Type 2 only */
+ struct rxe_mr *mr;
+ u32 rkey;
+ int access;
+ u64 addr;
+ u64 length;
+};
+
+struct rxe_mcg {
+ struct rb_node node;
+ struct kref ref_cnt;
+ struct rxe_dev *rxe;
+ struct list_head qp_list;
+ union ib_gid mgid;
+ atomic_t qp_num;
+ u32 qkey;
+ u16 pkey;
+};
+
+struct rxe_mca {
+ struct list_head qp_list;
+ struct rxe_qp *qp;
+};
+
+struct rxe_port {
+ struct ib_port_attr attr;
+ __be64 port_guid;
+ __be64 subnet_prefix;
+ spinlock_t port_lock; /* guard port */
+ unsigned int mtu_cap;
+ /* special QPs */
+ u32 qp_gsi_index;
+};
+
+#define RXE_PORT 1
+struct rxe_dev {
+ struct ib_device ib_dev;
+ struct ib_device_attr attr;
+ int max_ucontext;
+ int max_inline_data;
+ struct mutex usdev_lock;
+
+ char raw_gid[ETH_ALEN];
+
+ struct rxe_pool uc_pool;
+ struct rxe_pool pd_pool;
+ struct rxe_pool ah_pool;
+ struct rxe_pool srq_pool;
+ struct rxe_pool qp_pool;
+ struct rxe_pool cq_pool;
+ struct rxe_pool mr_pool;
+ struct rxe_pool mw_pool;
+
+ /* multicast support */
+ spinlock_t mcg_lock;
+ struct rb_root mcg_tree;
+ atomic_t mcg_num;
+ atomic_t mcg_attach;
+
+ spinlock_t pending_lock; /* guard pending_mmaps */
+ struct list_head pending_mmaps;
+
+ spinlock_t mmap_offset_lock; /* guard mmap_offset */
+ u64 mmap_offset;
+
+ atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
+
+ struct rxe_port port;
+};
+
+static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
+{
+ return ib_device_get_netdev(dev, RXE_PORT);
+}
+
+static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
+{
+ atomic64_inc(&rxe->stats_counters[index]);
+}
+
+static inline struct rxe_dev *to_rdev(struct ib_device *dev)
+{
+ return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
+}
+
+static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
+{
+ return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
+}
+
+static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
+{
+ return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
+}
+
+static inline struct rxe_ah *to_rah(struct ib_ah *ah)
+{
+ return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
+}
+
+static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
+{
+ return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
+}
+
+static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
+{
+ return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
+}
+
+static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
+{
+ return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
+}
+
+static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
+{
+ return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
+}
+
+static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
+{
+ return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
+}
+
+static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
+{
+ return to_rpd(ah->ibah.pd);
+}
+
+static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
+{
+ return to_rpd(mr->ibmr.pd);
+}
+
+static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
+{
+ return to_rpd(mw->ibmw.pd);
+}
+
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
+ struct net_device *ndev);
+
+#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
new file mode 100644
index 000000000000..186f182b80e7
--- /dev/null
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -0,0 +1,20 @@
+config RDMA_SIW
+ tristate "Software RDMA over TCP/IP (iWARP) driver"
+ depends on INET && INFINIBAND
+ depends on INFINIBAND_VIRT_DMA
+ select CRC32
+ select NET_CRC32C
+ help
+ This driver implements the iWARP RDMA transport over
+ the Linux TCP/IP network stack. It enables a system with a
+ standard Ethernet adapter to interoperate with a iWARP
+ adapter or with another system running the SIW driver.
+ (See also RXE which is a similar software driver for RoCE.)
+
+ The driver interfaces with the Linux RDMA stack and
+ implements both a kernel and user space RDMA verbs API.
+ The user space verbs API requires a support
+ library named libsiw which is loaded by the generic user
+ space verbs API, libibverbs. To implement RDMA over
+ TCP/IP, the driver further interfaces with the Linux
+ in-kernel TCP socket layer.
diff --git a/drivers/infiniband/sw/siw/Makefile b/drivers/infiniband/sw/siw/Makefile
new file mode 100644
index 000000000000..f5f7e3867889
--- /dev/null
+++ b/drivers/infiniband/sw/siw/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_RDMA_SIW) += siw.o
+
+siw-y := \
+ siw_cm.o \
+ siw_cq.o \
+ siw_main.o \
+ siw_mem.o \
+ siw_qp.o \
+ siw_qp_tx.o \
+ siw_qp_rx.o \
+ siw_verbs.o
diff --git a/drivers/infiniband/sw/siw/iwarp.h b/drivers/infiniband/sw/siw/iwarp.h
new file mode 100644
index 000000000000..8cf69309827d
--- /dev/null
+++ b/drivers/infiniband/sw/siw/iwarp.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#ifndef _IWARP_H
+#define _IWARP_H
+
+#include <rdma/rdma_user_cm.h> /* RDMA_MAX_PRIVATE_DATA */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define RDMAP_VERSION 1
+#define DDP_VERSION 1
+#define MPA_REVISION_1 1
+#define MPA_REVISION_2 2
+#define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+#define MPA_IRD_ORD_MASK 0x3fff
+
+struct mpa_rr_params {
+ __be16 bits;
+ __be16 pd_len;
+};
+
+/*
+ * MPA request/response header bits & fields
+ */
+enum {
+ MPA_RR_FLAG_MARKERS = cpu_to_be16(0x8000),
+ MPA_RR_FLAG_CRC = cpu_to_be16(0x4000),
+ MPA_RR_FLAG_REJECT = cpu_to_be16(0x2000),
+ MPA_RR_FLAG_ENHANCED = cpu_to_be16(0x1000),
+ MPA_RR_FLAG_GSO_EXP = cpu_to_be16(0x0800),
+ MPA_RR_MASK_REVISION = cpu_to_be16(0x00ff)
+};
+
+/*
+ * MPA request/reply header
+ */
+struct mpa_rr {
+ __u8 key[16];
+ struct mpa_rr_params params;
+};
+
+static inline void __mpa_rr_set_revision(__be16 *bits, u8 rev)
+{
+ *bits = (*bits & ~MPA_RR_MASK_REVISION) |
+ (cpu_to_be16(rev) & MPA_RR_MASK_REVISION);
+}
+
+static inline u8 __mpa_rr_revision(__be16 mpa_rr_bits)
+{
+ __be16 rev = mpa_rr_bits & MPA_RR_MASK_REVISION;
+
+ return be16_to_cpu(rev);
+}
+
+enum mpa_v2_ctrl {
+ MPA_V2_PEER_TO_PEER = cpu_to_be16(0x8000),
+ MPA_V2_ZERO_LENGTH_RTR = cpu_to_be16(0x4000),
+ MPA_V2_RDMA_WRITE_RTR = cpu_to_be16(0x8000),
+ MPA_V2_RDMA_READ_RTR = cpu_to_be16(0x4000),
+ MPA_V2_RDMA_NO_RTR = cpu_to_be16(0x0000),
+ MPA_V2_MASK_IRD_ORD = cpu_to_be16(0x3fff)
+};
+
+struct mpa_v2_data {
+ __be16 ird;
+ __be16 ord;
+};
+
+struct mpa_marker {
+ __be16 rsvd;
+ __be16 fpdu_hmd; /* FPDU header-marker distance (= MPA's FPDUPTR) */
+};
+
+/*
+ * maximum MPA trailer
+ */
+struct mpa_trailer {
+ __u8 pad[4];
+ __be32 crc;
+};
+
+#define MPA_HDR_SIZE 2
+#define MPA_CRC_SIZE 4
+
+/*
+ * Common portion of iWARP headers (MPA, DDP, RDMAP)
+ * for any FPDU
+ */
+struct iwarp_ctrl {
+ __be16 mpa_len;
+ __be16 ddp_rdmap_ctrl;
+};
+
+/*
+ * DDP/RDMAP Hdr bits & fields
+ */
+enum {
+ DDP_FLAG_TAGGED = cpu_to_be16(0x8000),
+ DDP_FLAG_LAST = cpu_to_be16(0x4000),
+ DDP_MASK_RESERVED = cpu_to_be16(0x3C00),
+ DDP_MASK_VERSION = cpu_to_be16(0x0300),
+ RDMAP_MASK_VERSION = cpu_to_be16(0x00C0),
+ RDMAP_MASK_RESERVED = cpu_to_be16(0x0030),
+ RDMAP_MASK_OPCODE = cpu_to_be16(0x000f)
+};
+
+static inline u8 __ddp_get_version(struct iwarp_ctrl *ctrl)
+{
+ return be16_to_cpu(ctrl->ddp_rdmap_ctrl & DDP_MASK_VERSION) >> 8;
+}
+
+static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl)
+{
+ __be16 ver = ctrl->ddp_rdmap_ctrl & RDMAP_MASK_VERSION;
+
+ return be16_to_cpu(ver) >> 6;
+}
+
+static inline u8 __rdmap_get_opcode(struct iwarp_ctrl *ctrl)
+{
+ return be16_to_cpu(ctrl->ddp_rdmap_ctrl & RDMAP_MASK_OPCODE);
+}
+
+static inline void __rdmap_set_opcode(struct iwarp_ctrl *ctrl, u8 opcode)
+{
+ ctrl->ddp_rdmap_ctrl = (ctrl->ddp_rdmap_ctrl & ~RDMAP_MASK_OPCODE) |
+ (cpu_to_be16(opcode) & RDMAP_MASK_OPCODE);
+}
+
+struct iwarp_rdma_write {
+ struct iwarp_ctrl ctrl;
+ __be32 sink_stag;
+ __be64 sink_to;
+};
+
+struct iwarp_rdma_rreq {
+ struct iwarp_ctrl ctrl;
+ __be32 rsvd;
+ __be32 ddp_qn;
+ __be32 ddp_msn;
+ __be32 ddp_mo;
+ __be32 sink_stag;
+ __be64 sink_to;
+ __be32 read_size;
+ __be32 source_stag;
+ __be64 source_to;
+};
+
+struct iwarp_rdma_rresp {
+ struct iwarp_ctrl ctrl;
+ __be32 sink_stag;
+ __be64 sink_to;
+};
+
+struct iwarp_send {
+ struct iwarp_ctrl ctrl;
+ __be32 rsvd;
+ __be32 ddp_qn;
+ __be32 ddp_msn;
+ __be32 ddp_mo;
+};
+
+struct iwarp_send_inv {
+ struct iwarp_ctrl ctrl;
+ __be32 inval_stag;
+ __be32 ddp_qn;
+ __be32 ddp_msn;
+ __be32 ddp_mo;
+};
+
+struct iwarp_terminate {
+ struct iwarp_ctrl ctrl;
+ __be32 rsvd;
+ __be32 ddp_qn;
+ __be32 ddp_msn;
+ __be32 ddp_mo;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __be32 layer : 4;
+ __be32 etype : 4;
+ __be32 ecode : 8;
+ __be32 flag_m : 1;
+ __be32 flag_d : 1;
+ __be32 flag_r : 1;
+ __be32 reserved : 13;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __be32 reserved : 13;
+ __be32 flag_r : 1;
+ __be32 flag_d : 1;
+ __be32 flag_m : 1;
+ __be32 ecode : 8;
+ __be32 etype : 4;
+ __be32 layer : 4;
+#else
+#error "undefined byte order"
+#endif
+};
+
+/*
+ * Terminate Hdr bits & fields
+ */
+enum {
+ TERM_MASK_LAYER = cpu_to_be32(0xf0000000),
+ TERM_MASK_ETYPE = cpu_to_be32(0x0f000000),
+ TERM_MASK_ECODE = cpu_to_be32(0x00ff0000),
+ TERM_FLAG_M = cpu_to_be32(0x00008000),
+ TERM_FLAG_D = cpu_to_be32(0x00004000),
+ TERM_FLAG_R = cpu_to_be32(0x00002000),
+ TERM_MASK_RESVD = cpu_to_be32(0x00001fff)
+};
+
+static inline u8 __rdmap_term_layer(struct iwarp_terminate *term)
+{
+ return term->layer;
+}
+
+static inline void __rdmap_term_set_layer(struct iwarp_terminate *term,
+ u8 layer)
+{
+ term->layer = layer & 0xf;
+}
+
+static inline u8 __rdmap_term_etype(struct iwarp_terminate *term)
+{
+ return term->etype;
+}
+
+static inline void __rdmap_term_set_etype(struct iwarp_terminate *term,
+ u8 etype)
+{
+ term->etype = etype & 0xf;
+}
+
+static inline u8 __rdmap_term_ecode(struct iwarp_terminate *term)
+{
+ return term->ecode;
+}
+
+static inline void __rdmap_term_set_ecode(struct iwarp_terminate *term,
+ u8 ecode)
+{
+ term->ecode = ecode;
+}
+
+/*
+ * Common portion of iWARP headers (MPA, DDP, RDMAP)
+ * for an FPDU carrying an untagged DDP segment
+ */
+struct iwarp_ctrl_untagged {
+ struct iwarp_ctrl ctrl;
+ __be32 rsvd;
+ __be32 ddp_qn;
+ __be32 ddp_msn;
+ __be32 ddp_mo;
+};
+
+/*
+ * Common portion of iWARP headers (MPA, DDP, RDMAP)
+ * for an FPDU carrying a tagged DDP segment
+ */
+struct iwarp_ctrl_tagged {
+ struct iwarp_ctrl ctrl;
+ __be32 ddp_stag;
+ __be64 ddp_to;
+};
+
+union iwarp_hdr {
+ struct iwarp_ctrl ctrl;
+ struct iwarp_ctrl_untagged c_untagged;
+ struct iwarp_ctrl_tagged c_tagged;
+ struct iwarp_rdma_write rwrite;
+ struct iwarp_rdma_rreq rreq;
+ struct iwarp_rdma_rresp rresp;
+ struct iwarp_terminate terminate;
+ struct iwarp_send send;
+ struct iwarp_send_inv send_inv;
+};
+
+enum term_elayer {
+ TERM_ERROR_LAYER_RDMAP = 0x00,
+ TERM_ERROR_LAYER_DDP = 0x01,
+ TERM_ERROR_LAYER_LLP = 0x02 /* eg., MPA */
+};
+
+enum ddp_etype {
+ DDP_ETYPE_CATASTROPHIC = 0x0,
+ DDP_ETYPE_TAGGED_BUF = 0x1,
+ DDP_ETYPE_UNTAGGED_BUF = 0x2,
+ DDP_ETYPE_RSVD = 0x3
+};
+
+enum ddp_ecode {
+ /* unspecified, set to zero */
+ DDP_ECODE_CATASTROPHIC = 0x00,
+ /* Tagged Buffer Errors */
+ DDP_ECODE_T_INVALID_STAG = 0x00,
+ DDP_ECODE_T_BASE_BOUNDS = 0x01,
+ DDP_ECODE_T_STAG_NOT_ASSOC = 0x02,
+ DDP_ECODE_T_TO_WRAP = 0x03,
+ DDP_ECODE_T_VERSION = 0x04,
+ /* Untagged Buffer Errors */
+ DDP_ECODE_UT_INVALID_QN = 0x01,
+ DDP_ECODE_UT_INVALID_MSN_NOBUF = 0x02,
+ DDP_ECODE_UT_INVALID_MSN_RANGE = 0x03,
+ DDP_ECODE_UT_INVALID_MO = 0x04,
+ DDP_ECODE_UT_MSG_TOOLONG = 0x05,
+ DDP_ECODE_UT_VERSION = 0x06
+};
+
+enum rdmap_untagged_qn {
+ RDMAP_UNTAGGED_QN_SEND = 0,
+ RDMAP_UNTAGGED_QN_RDMA_READ = 1,
+ RDMAP_UNTAGGED_QN_TERMINATE = 2,
+ RDMAP_UNTAGGED_QN_COUNT = 3
+};
+
+enum rdmap_etype {
+ RDMAP_ETYPE_CATASTROPHIC = 0x0,
+ RDMAP_ETYPE_REMOTE_PROTECTION = 0x1,
+ RDMAP_ETYPE_REMOTE_OPERATION = 0x2
+};
+
+enum rdmap_ecode {
+ RDMAP_ECODE_INVALID_STAG = 0x00,
+ RDMAP_ECODE_BASE_BOUNDS = 0x01,
+ RDMAP_ECODE_ACCESS_RIGHTS = 0x02,
+ RDMAP_ECODE_STAG_NOT_ASSOC = 0x03,
+ RDMAP_ECODE_TO_WRAP = 0x04,
+ RDMAP_ECODE_VERSION = 0x05,
+ RDMAP_ECODE_OPCODE = 0x06,
+ RDMAP_ECODE_CATASTROPHIC_STREAM = 0x07,
+ RDMAP_ECODE_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_ECODE_CANNOT_INVALIDATE = 0x09,
+ RDMAP_ECODE_UNSPECIFIED = 0xff
+};
+
+enum llp_ecode {
+ LLP_ECODE_TCP_STREAM_LOST = 0x01, /* How to transfer this ?? */
+ LLP_ECODE_RECEIVED_CRC = 0x02,
+ LLP_ECODE_FPDU_START = 0x03,
+ LLP_ECODE_INVALID_REQ_RESP = 0x04,
+
+ /* Errors for Enhanced Connection Establishment only */
+ LLP_ECODE_LOCAL_CATASTROPHIC = 0x05,
+ LLP_ECODE_INSUFFICIENT_IRD = 0x06,
+ LLP_ECODE_NO_MATCHING_RTR = 0x07
+};
+
+enum llp_etype { LLP_ETYPE_MPA = 0x00 };
+
+enum rdma_opcode {
+ RDMAP_RDMA_WRITE = 0x0,
+ RDMAP_RDMA_READ_REQ = 0x1,
+ RDMAP_RDMA_READ_RESP = 0x2,
+ RDMAP_SEND = 0x3,
+ RDMAP_SEND_INVAL = 0x4,
+ RDMAP_SEND_SE = 0x5,
+ RDMAP_SEND_SE_INVAL = 0x6,
+ RDMAP_TERMINATE = 0x7,
+ RDMAP_NOT_SUPPORTED = RDMAP_TERMINATE + 1
+};
+
+#endif
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
new file mode 100644
index 000000000000..f5fd71717b80
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -0,0 +1,729 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#ifndef _SIW_H
+#define _SIW_H
+
+#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/unaligned.h>
+
+#include <rdma/siw-abi.h>
+#include "iwarp.h"
+
+#define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */
+#define SIW_VENDORT_PART_ID 0
+#define SIW_MAX_QP (1024 * 100)
+#define SIW_MAX_QP_WR (1024 * 32)
+#define SIW_MAX_ORD_QP 128
+#define SIW_MAX_IRD_QP 128
+#define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */
+#define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */
+#define SIW_MAX_CQ (1024 * 100)
+#define SIW_MAX_CQE (SIW_MAX_QP_WR * 100)
+#define SIW_MAX_MR (SIW_MAX_QP * 10)
+#define SIW_MAX_PD SIW_MAX_QP
+#define SIW_MAX_MW 0 /* to be set if MW's are supported */
+#define SIW_MAX_SRQ SIW_MAX_QP
+#define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
+#define SIW_MAX_CONTEXT SIW_MAX_PD
+
+/* Min number of bytes for using zero copy transmit */
+#define SENDPAGE_THRESH PAGE_SIZE
+
+/* Maximum number of frames which can be send in one SQ processing */
+#define SQ_USER_MAXBURST 100
+
+/* Maximum number of consecutive IRQ elements which get served
+ * if SQ has pending work. Prevents starving local SQ processing
+ * by serving peer Read Requests.
+ */
+#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
+
+/* There is always only a port 1 per siw device */
+#define SIW_PORT 1
+
+struct siw_dev_cap {
+ int max_qp;
+ int max_qp_wr;
+ int max_ord; /* max. outbound read queue depth */
+ int max_ird; /* max. inbound read queue depth */
+ int max_sge;
+ int max_sge_rd;
+ int max_cq;
+ int max_cqe;
+ int max_mr;
+ int max_pd;
+ int max_mw;
+ int max_srq;
+ int max_srq_wr;
+ int max_srq_sge;
+};
+
+struct siw_pd {
+ struct ib_pd base_pd;
+};
+
+struct siw_device {
+ struct ib_device base_dev;
+ struct siw_dev_cap attrs;
+
+ u32 vendor_part_id;
+ int numa_node;
+ char raw_gid[ETH_ALEN];
+
+ spinlock_t lock;
+
+ struct xarray qp_xa;
+ struct xarray mem_xa;
+
+ struct list_head cep_list;
+ struct list_head qp_list;
+
+ /* active objects statistics to enforce limits */
+ atomic_t num_qp;
+ atomic_t num_cq;
+ atomic_t num_pd;
+ atomic_t num_mr;
+ atomic_t num_srq;
+ atomic_t num_ctx;
+};
+
+struct siw_ucontext {
+ struct ib_ucontext base_ucontext;
+ struct siw_device *sdev;
+};
+
+/*
+ * The RDMA core does not define LOCAL_READ access, which is always
+ * enabled implictely.
+ */
+#define IWARP_ACCESS_MASK \
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | \
+ IB_ACCESS_REMOTE_READ)
+
+/*
+ * siw presentation of user memory registered as source
+ * or target of RDMA operations.
+ */
+
+struct siw_page_chunk {
+ struct page **plist;
+};
+
+struct siw_umem {
+ struct ib_umem *base_mem;
+ struct siw_page_chunk *page_chunk;
+ int num_pages;
+ u64 fp_addr; /* First page base address */
+};
+
+struct siw_pble {
+ dma_addr_t addr; /* Address of assigned buffer */
+ unsigned int size; /* Size of this entry */
+ unsigned long pbl_off; /* Total offset from start of PBL */
+};
+
+struct siw_pbl {
+ unsigned int num_buf;
+ unsigned int max_buf;
+ struct siw_pble pbe[] __counted_by(max_buf);
+};
+
+/*
+ * Generic memory representation for registered siw memory.
+ * Memory lookup always via higher 24 bit of STag (STag index).
+ */
+struct siw_mem {
+ struct siw_device *sdev;
+ struct kref ref;
+ u64 va; /* VA of memory */
+ u64 len; /* length of the memory buffer in bytes */
+ u32 stag; /* iWarp memory access steering tag */
+ u8 stag_valid; /* VALID or INVALID */
+ u8 is_pbl; /* PBL or user space mem */
+ u8 is_mw; /* Memory Region or Memory Window */
+ enum ib_access_flags perms; /* local/remote READ & WRITE */
+ union {
+ struct siw_umem *umem;
+ struct siw_pbl *pbl;
+ void *mem_obj;
+ };
+ struct ib_pd *pd;
+};
+
+struct siw_mr {
+ struct ib_mr base_mr;
+ struct siw_mem *mem;
+ struct rcu_head rcu;
+};
+
+/*
+ * Error codes for local or remote
+ * access to registered memory
+ */
+enum siw_access_state {
+ E_ACCESS_OK,
+ E_STAG_INVALID,
+ E_BASE_BOUNDS,
+ E_ACCESS_PERM,
+ E_PD_MISMATCH
+};
+
+enum siw_wr_state {
+ SIW_WR_IDLE,
+ SIW_WR_QUEUED, /* processing has not started yet */
+ SIW_WR_INPROGRESS /* initiated processing of the WR */
+};
+
+/* The WQE currently being processed (RX or TX) */
+struct siw_wqe {
+ /* Copy of applications SQE or RQE */
+ union {
+ struct siw_sqe sqe;
+ struct siw_rqe rqe;
+ };
+ struct siw_mem *mem[SIW_MAX_SGE]; /* per sge's resolved mem */
+ enum siw_wr_state wr_status;
+ enum siw_wc_status wc_status;
+ u32 bytes; /* total bytes to process */
+ u32 processed; /* bytes processed */
+};
+
+struct siw_cq {
+ struct ib_cq base_cq;
+ spinlock_t lock;
+ struct siw_cq_ctrl *notify;
+ struct siw_cqe *queue;
+ u32 cq_put;
+ u32 cq_get;
+ u32 num_cqe;
+ struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
+ u32 id; /* For debugging only */
+};
+
+enum siw_qp_state {
+ SIW_QP_STATE_IDLE,
+ SIW_QP_STATE_RTR,
+ SIW_QP_STATE_RTS,
+ SIW_QP_STATE_CLOSING,
+ SIW_QP_STATE_TERMINATE,
+ SIW_QP_STATE_ERROR,
+ SIW_QP_STATE_COUNT
+};
+
+enum siw_qp_flags {
+ SIW_RDMA_BIND_ENABLED = (1 << 0),
+ SIW_RDMA_WRITE_ENABLED = (1 << 1),
+ SIW_RDMA_READ_ENABLED = (1 << 2),
+ SIW_SIGNAL_ALL_WR = (1 << 3),
+ SIW_MPA_CRC = (1 << 4),
+ SIW_QP_IN_DESTROY = (1 << 5)
+};
+
+enum siw_qp_attr_mask {
+ SIW_QP_ATTR_STATE = (1 << 0),
+ SIW_QP_ATTR_ACCESS_FLAGS = (1 << 1),
+ SIW_QP_ATTR_LLP_HANDLE = (1 << 2),
+ SIW_QP_ATTR_ORD = (1 << 3),
+ SIW_QP_ATTR_IRD = (1 << 4),
+ SIW_QP_ATTR_SQ_SIZE = (1 << 5),
+ SIW_QP_ATTR_RQ_SIZE = (1 << 6),
+ SIW_QP_ATTR_MPA = (1 << 7)
+};
+
+struct siw_srq {
+ struct ib_srq base_srq;
+ spinlock_t lock;
+ u32 max_sge;
+ u32 limit; /* low watermark for async event */
+ struct siw_rqe *recvq;
+ u32 rq_put;
+ u32 rq_get;
+ u32 num_rqe; /* max # of wqe's allowed */
+ struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
+ bool armed:1; /* inform user if limit hit */
+ bool is_kernel_res:1; /* true if kernel client */
+};
+
+struct siw_qp_attrs {
+ enum siw_qp_state state;
+ u32 sq_size;
+ u32 rq_size;
+ u32 orq_size;
+ u32 irq_size;
+ u32 sq_max_sges;
+ u32 rq_max_sges;
+ enum siw_qp_flags flags;
+
+ struct socket *sk;
+};
+
+enum siw_tx_ctx {
+ SIW_SEND_HDR, /* start or continue sending HDR */
+ SIW_SEND_DATA, /* start or continue sending DDP payload */
+ SIW_SEND_TRAILER, /* start or continue sending TRAILER */
+ SIW_SEND_SHORT_FPDU/* send whole FPDU hdr|data|trailer at once */
+};
+
+enum siw_rx_state {
+ SIW_GET_HDR, /* await new hdr or within hdr */
+ SIW_GET_DATA_START, /* start of inbound DDP payload */
+ SIW_GET_DATA_MORE, /* continuation of (misaligned) DDP payload */
+ SIW_GET_TRAILER/* await new trailer or within trailer */
+};
+
+struct siw_rx_stream {
+ struct sk_buff *skb;
+ int skb_new; /* pending unread bytes in skb */
+ int skb_offset; /* offset in skb */
+ int skb_copied; /* processed bytes in skb */
+
+ enum siw_rx_state state;
+
+ union iwarp_hdr hdr;
+ struct mpa_trailer trailer;
+ u32 mpa_crc;
+ bool mpa_crc_enabled;
+
+ /*
+ * For each FPDU, main RX loop runs through 3 stages:
+ * Receiving protocol headers, placing DDP payload and receiving
+ * trailer information (CRC + possibly padding).
+ * Next two variables keep state on receive status of the
+ * current FPDU part (hdr, data, trailer).
+ */
+ int fpdu_part_rcvd; /* bytes in pkt part copied */
+ int fpdu_part_rem; /* bytes in pkt part not seen */
+
+ /*
+ * Next expected DDP MSN for each QN +
+ * expected steering tag +
+ * expected DDP tagget offset (all HBO)
+ */
+ u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
+ u32 ddp_stag;
+ u64 ddp_to;
+ u32 inval_stag; /* Stag to be invalidated */
+
+ u8 rx_suspend : 1;
+ u8 pad : 2; /* # of pad bytes expected */
+ u8 rdmap_op : 4; /* opcode of current frame */
+};
+
+struct siw_rx_fpdu {
+ /*
+ * Local destination memory of inbound RDMA operation.
+ * Valid, according to wqe->wr_status
+ */
+ struct siw_wqe wqe_active;
+
+ unsigned int pbl_idx; /* Index into current PBL */
+ unsigned int sge_idx; /* current sge in rx */
+ unsigned int sge_off; /* already rcvd in curr. sge */
+
+ char first_ddp_seg; /* this is the first DDP seg */
+ char more_ddp_segs; /* more DDP segs expected */
+ u8 prev_rdmap_op : 4; /* opcode of prev frame */
+};
+
+/*
+ * Shorthands for short packets w/o payload
+ * to be transmitted more efficient.
+ */
+struct siw_send_pkt {
+ struct iwarp_send send;
+ __be32 crc;
+};
+
+struct siw_write_pkt {
+ struct iwarp_rdma_write write;
+ __be32 crc;
+};
+
+struct siw_rreq_pkt {
+ struct iwarp_rdma_rreq rreq;
+ __be32 crc;
+};
+
+struct siw_rresp_pkt {
+ struct iwarp_rdma_rresp rresp;
+ __be32 crc;
+};
+
+struct siw_iwarp_tx {
+ union {
+ union iwarp_hdr hdr;
+
+ /* Generic part of FPDU header */
+ struct iwarp_ctrl ctrl;
+ struct iwarp_ctrl_untagged c_untagged;
+ struct iwarp_ctrl_tagged c_tagged;
+
+ /* FPDU headers */
+ struct iwarp_rdma_write rwrite;
+ struct iwarp_rdma_rreq rreq;
+ struct iwarp_rdma_rresp rresp;
+ struct iwarp_terminate terminate;
+ struct iwarp_send send;
+ struct iwarp_send_inv send_inv;
+
+ /* complete short FPDUs */
+ struct siw_send_pkt send_pkt;
+ struct siw_write_pkt write_pkt;
+ struct siw_rreq_pkt rreq_pkt;
+ struct siw_rresp_pkt rresp_pkt;
+ } pkt;
+
+ struct mpa_trailer trailer;
+ /* DDP MSN for untagged messages */
+ u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
+
+ enum siw_tx_ctx state;
+ u16 ctrl_len; /* ddp+rdmap hdr */
+ u16 ctrl_sent;
+ int burst;
+ int bytes_unsent; /* ddp payload bytes */
+
+ u32 mpa_crc;
+ bool mpa_crc_enabled;
+
+ u8 do_crc : 1; /* do crc for segment */
+ u8 use_sendpage : 1; /* send w/o copy */
+ u8 tx_suspend : 1; /* stop sending DDP segs. */
+ u8 pad : 2; /* # pad in current fpdu */
+ u8 orq_fence : 1; /* ORQ full or Send fenced */
+ u8 in_syscall : 1; /* TX out of user context */
+ u8 zcopy_tx : 1; /* Use TCP_SENDPAGE if possible */
+ u8 gso_seg_limit; /* Maximum segments for GSO, 0 = unbound */
+
+ u16 fpdu_len; /* len of FPDU to tx */
+ unsigned int tcp_seglen; /* remaining tcp seg space */
+
+ struct siw_wqe wqe_active;
+
+ int pbl_idx; /* Index into current PBL */
+ int sge_idx; /* current sge in tx */
+ u32 sge_off; /* already sent in curr. sge */
+};
+
+struct siw_qp {
+ struct ib_qp base_qp;
+ struct siw_device *sdev;
+ int tx_cpu;
+ struct kref ref;
+ struct completion qp_free;
+ struct list_head devq;
+ struct siw_qp_attrs attrs;
+
+ struct siw_cep *cep;
+ struct rw_semaphore state_lock;
+
+ struct ib_pd *pd;
+ struct siw_cq *scq;
+ struct siw_cq *rcq;
+ struct siw_srq *srq;
+
+ struct siw_iwarp_tx tx_ctx; /* Transmit context */
+ spinlock_t sq_lock;
+ struct siw_sqe *sendq; /* send queue element array */
+ uint32_t sq_get; /* consumer index into sq array */
+ uint32_t sq_put; /* kernel prod. index into sq array */
+ struct llist_node tx_list;
+
+ struct siw_sqe *orq; /* outbound read queue element array */
+ spinlock_t orq_lock;
+ uint32_t orq_get; /* consumer index into orq array */
+ uint32_t orq_put; /* shared producer index for ORQ */
+
+ struct siw_rx_stream rx_stream;
+ struct siw_rx_fpdu *rx_fpdu;
+ struct siw_rx_fpdu rx_tagged;
+ struct siw_rx_fpdu rx_untagged;
+ spinlock_t rq_lock;
+ struct siw_rqe *recvq; /* recv queue element array */
+ uint32_t rq_get; /* consumer index into rq array */
+ uint32_t rq_put; /* kernel prod. index into rq array */
+
+ struct siw_sqe *irq; /* inbound read queue element array */
+ uint32_t irq_get; /* consumer index into irq array */
+ uint32_t irq_put; /* producer index into irq array */
+ int irq_burst;
+
+ struct { /* information to be carried in TERMINATE pkt, if valid */
+ u8 valid;
+ u8 in_tx;
+ u8 layer : 4, etype : 4;
+ u8 ecode;
+ } term_info;
+ struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
+ struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
+};
+
+/* helper macros */
+#define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
+#define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
+#define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
+#define rx_wqe(rctx) (&(rctx)->wqe_active)
+#define rx_mem(rctx) ((rctx)->wqe_active.mem[0])
+#define tx_type(wqe) ((wqe)->sqe.opcode)
+#define rx_type(wqe) ((wqe)->rqe.opcode)
+#define tx_flags(wqe) ((wqe)->sqe.flags)
+
+struct iwarp_msg_info {
+ int hdr_len;
+ struct iwarp_ctrl ctrl;
+ int (*rx_data)(struct siw_qp *qp);
+};
+
+struct siw_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ void *address;
+};
+
+/* Global siw parameters. Currently set in siw_main.c */
+extern const bool zcopy_tx;
+extern const bool try_gso;
+extern const bool loopback_enabled;
+extern const bool mpa_crc_required;
+extern const bool mpa_crc_strict;
+extern const bool siw_tcp_nagle;
+extern u_char mpa_version;
+extern const bool peer_to_peer;
+extern struct task_struct *siw_tx_thread[];
+
+extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1];
+
+/* QP general functions */
+int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attr,
+ enum siw_qp_attr_mask mask);
+int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl);
+void siw_qp_llp_close(struct siw_qp *qp);
+void siw_qp_cm_drop(struct siw_qp *qp, int schedule);
+void siw_send_terminate(struct siw_qp *qp);
+
+void siw_qp_get_ref(struct ib_qp *qp);
+void siw_qp_put_ref(struct ib_qp *qp);
+int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp);
+void siw_free_qp(struct kref *ref);
+
+void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer,
+ u8 etype, u8 ecode, int in_tx);
+enum ddp_ecode siw_tagged_error(enum siw_access_state state);
+enum rdmap_ecode siw_rdmap_error(enum siw_access_state state);
+
+void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
+int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
+ enum siw_wc_status status);
+int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
+ u32 inval_stag, enum siw_wc_status status);
+void siw_qp_llp_data_ready(struct sock *sk);
+void siw_qp_llp_write_space(struct sock *sk);
+
+/* QP TX path functions */
+int siw_create_tx_threads(void);
+void siw_stop_tx_threads(void);
+int siw_run_sq(void *arg);
+int siw_qp_sq_process(struct siw_qp *qp);
+int siw_sq_start(struct siw_qp *qp);
+int siw_activate_tx(struct siw_qp *qp);
+int siw_get_tx_cpu(struct siw_device *sdev);
+void siw_put_tx_cpu(int cpu);
+
+/* QP RX path functions */
+int siw_proc_send(struct siw_qp *qp);
+int siw_proc_rreq(struct siw_qp *qp);
+int siw_proc_rresp(struct siw_qp *qp);
+int siw_proc_write(struct siw_qp *qp);
+int siw_proc_terminate(struct siw_qp *qp);
+
+int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
+ unsigned int off, size_t len);
+
+static inline void set_rx_fpdu_context(struct siw_qp *qp, u8 opcode)
+{
+ if (opcode == RDMAP_RDMA_WRITE || opcode == RDMAP_RDMA_READ_RESP)
+ qp->rx_fpdu = &qp->rx_tagged;
+ else
+ qp->rx_fpdu = &qp->rx_untagged;
+
+ qp->rx_stream.rdmap_op = opcode;
+}
+
+static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
+{
+ return container_of(base_ctx, struct siw_ucontext, base_ucontext);
+}
+
+static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
+{
+ return container_of(base_qp, struct siw_qp, base_qp);
+}
+
+static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
+{
+ return container_of(base_cq, struct siw_cq, base_cq);
+}
+
+static inline struct siw_srq *to_siw_srq(struct ib_srq *base_srq)
+{
+ return container_of(base_srq, struct siw_srq, base_srq);
+}
+
+static inline struct siw_device *to_siw_dev(struct ib_device *base_dev)
+{
+ return container_of(base_dev, struct siw_device, base_dev);
+}
+
+static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
+{
+ return container_of(base_mr, struct siw_mr, base_mr);
+}
+
+static inline struct siw_user_mmap_entry *
+to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
+{
+ return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
+}
+
+static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
+{
+ struct siw_qp *qp;
+
+ rcu_read_lock();
+ qp = xa_load(&sdev->qp_xa, id);
+ if (likely(qp && kref_get_unless_zero(&qp->ref))) {
+ rcu_read_unlock();
+ return qp;
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+static inline u32 qp_id(struct siw_qp *qp)
+{
+ return qp->base_qp.qp_num;
+}
+
+static inline void siw_qp_get(struct siw_qp *qp)
+{
+ kref_get(&qp->ref);
+}
+
+static inline void siw_qp_put(struct siw_qp *qp)
+{
+ kref_put(&qp->ref, siw_free_qp);
+}
+
+static inline int siw_sq_empty(struct siw_qp *qp)
+{
+ struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
+
+ return READ_ONCE(sqe->flags) == 0;
+}
+
+static inline struct siw_sqe *sq_get_next(struct siw_qp *qp)
+{
+ struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
+
+ if (READ_ONCE(sqe->flags) & SIW_WQE_VALID)
+ return sqe;
+
+ return NULL;
+}
+
+static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
+{
+ return &qp->orq[qp->orq_get % qp->attrs.orq_size];
+}
+
+static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
+{
+ struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
+
+ if (READ_ONCE(orq_e->flags) == 0)
+ return orq_e;
+
+ return NULL;
+}
+
+static inline int siw_orq_empty(struct siw_qp *qp)
+{
+ return orq_get_current(qp)->flags == 0 ? 1 : 0;
+}
+
+static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp)
+{
+ struct siw_sqe *irq_e = &qp->irq[qp->irq_put % qp->attrs.irq_size];
+
+ if (READ_ONCE(irq_e->flags) == 0) {
+ qp->irq_put++;
+ return irq_e;
+ }
+ return NULL;
+}
+
+static inline void siw_crc_init(u32 *crc)
+{
+ *crc = ~0;
+}
+
+static inline void siw_crc_update(u32 *crc, const void *data, size_t len)
+{
+ *crc = crc32c(*crc, data, len);
+}
+
+static inline void siw_crc_final(u32 *crc, u8 out[4])
+{
+ put_unaligned_le32(~*crc, out);
+}
+
+static inline void siw_crc_oneshot(const void *data, size_t len, u8 out[4])
+{
+ u32 crc;
+
+ siw_crc_init(&crc);
+ siw_crc_update(&crc, data, len);
+ return siw_crc_final(&crc, out);
+}
+
+static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
+{
+ srx->mpa_crc = skb_crc32c(srx->skb, srx->skb_offset, len, srx->mpa_crc);
+}
+
+#define siw_dbg(ibdev, fmt, ...) \
+ ibdev_dbg(ibdev, "%s: " fmt, __func__, ##__VA_ARGS__)
+
+#define siw_dbg_qp(qp, fmt, ...) \
+ ibdev_dbg(&qp->sdev->base_dev, "QP[%u] %s: " fmt, qp_id(qp), __func__, \
+ ##__VA_ARGS__)
+
+#define siw_dbg_cq(cq, fmt, ...) \
+ ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \
+ ##__VA_ARGS__)
+
+#define siw_dbg_pd(pd, fmt, ...) \
+ ibdev_dbg(pd->device, "PD[%u] %s: " fmt, pd->res.id, __func__, \
+ ##__VA_ARGS__)
+
+#define siw_dbg_mem(mem, fmt, ...) \
+ ibdev_dbg(&mem->sdev->base_dev, \
+ "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
+
+#define siw_dbg_cep(cep, fmt, ...) \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
+ cep, __func__, ##__VA_ARGS__)
+
+void siw_cq_flush(struct siw_cq *cq);
+void siw_sq_flush(struct siw_qp *qp);
+void siw_rq_flush(struct siw_qp *qp);
+int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc);
+
+#endif
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
new file mode 100644
index 000000000000..708b13993fdf
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -0,0 +1,1958 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Fredy Neeser */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/inet.h>
+#include <linux/tcp.h>
+#include <trace/events/sock.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "siw.h"
+#include "siw_cm.h"
+
+/*
+ * Set to any combination of
+ * MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
+ */
+static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
+static const bool relaxed_ird_negotiation = true;
+
+static void siw_cm_llp_state_change(struct sock *s);
+static void siw_cm_llp_data_ready(struct sock *s);
+static void siw_cm_llp_write_space(struct sock *s);
+static void siw_cm_llp_error_report(struct sock *s);
+static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
+ int status);
+
+static void siw_sk_assign_cm_upcalls(struct sock *sk)
+{
+ struct siw_cep *cep = sk_to_cep(sk);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ cep->sk_state_change = sk->sk_state_change;
+ cep->sk_data_ready = sk->sk_data_ready;
+ cep->sk_write_space = sk->sk_write_space;
+ cep->sk_error_report = sk->sk_error_report;
+
+ sk->sk_state_change = siw_cm_llp_state_change;
+ sk->sk_data_ready = siw_cm_llp_data_ready;
+ sk->sk_write_space = siw_cm_llp_write_space;
+ sk->sk_error_report = siw_cm_llp_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void siw_sk_restore_upcalls(struct sock *sk, struct siw_cep *cep)
+{
+ sk->sk_state_change = cep->sk_state_change;
+ sk->sk_data_ready = cep->sk_data_ready;
+ sk->sk_write_space = cep->sk_write_space;
+ sk->sk_error_report = cep->sk_error_report;
+ sk->sk_user_data = NULL;
+}
+
+static void siw_qp_socket_assoc(struct siw_cep *cep, struct siw_qp *qp)
+{
+ struct socket *s = cep->sock;
+ struct sock *sk = s->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+
+ qp->attrs.sk = s;
+ sk->sk_data_ready = siw_qp_llp_data_ready;
+ sk->sk_write_space = siw_qp_llp_write_space;
+
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void siw_socket_disassoc(struct socket *s)
+{
+ struct sock *sk = s->sk;
+ struct siw_cep *cep;
+
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ cep = sk_to_cep(sk);
+ if (cep) {
+ siw_sk_restore_upcalls(sk, cep);
+ siw_cep_put(cep);
+ } else {
+ pr_warn("siw: cannot restore sk callbacks: no ep\n");
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ } else {
+ pr_warn("siw: cannot restore sk callbacks: no sk\n");
+ }
+}
+
+static void siw_rtr_data_ready(struct sock *sk)
+{
+ struct siw_cep *cep;
+ struct siw_qp *qp = NULL;
+ read_descriptor_t rd_desc;
+
+ trace_sk_data_ready(sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep) {
+ WARN(1, "No connection endpoint\n");
+ goto out;
+ }
+ qp = sk_to_qp(sk);
+
+ memset(&rd_desc, 0, sizeof(rd_desc));
+ rd_desc.arg.data = qp;
+ rd_desc.count = 1;
+
+ tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
+ /*
+ * Check if first frame was successfully processed.
+ * Signal connection full establishment if yes.
+ * Failed data processing would have already scheduled
+ * connection drop.
+ */
+ if (!qp->rx_stream.rx_suspend)
+ siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
+out:
+ read_unlock(&sk->sk_callback_lock);
+ if (qp)
+ siw_qp_socket_assoc(cep, qp);
+}
+
+static void siw_sk_assign_rtr_upcalls(struct siw_cep *cep)
+{
+ struct sock *sk = cep->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_data_ready = siw_rtr_data_ready;
+ sk->sk_write_space = siw_qp_llp_write_space;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void siw_cep_socket_assoc(struct siw_cep *cep, struct socket *s)
+{
+ cep->sock = s;
+ siw_cep_get(cep);
+ s->sk->sk_user_data = cep;
+
+ siw_sk_assign_cm_upcalls(s->sk);
+}
+
+static struct siw_cep *siw_cep_alloc(struct siw_device *sdev)
+{
+ struct siw_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
+ unsigned long flags;
+
+ if (!cep)
+ return NULL;
+
+ INIT_LIST_HEAD(&cep->listenq);
+ INIT_LIST_HEAD(&cep->devq);
+ INIT_LIST_HEAD(&cep->work_freelist);
+
+ kref_init(&cep->ref);
+ cep->state = SIW_EPSTATE_IDLE;
+ init_waitqueue_head(&cep->waitq);
+ spin_lock_init(&cep->lock);
+ cep->sdev = sdev;
+ cep->enhanced_rdma_conn_est = false;
+
+ spin_lock_irqsave(&sdev->lock, flags);
+ list_add_tail(&cep->devq, &sdev->cep_list);
+ spin_unlock_irqrestore(&sdev->lock, flags);
+
+ siw_dbg_cep(cep, "new endpoint\n");
+ return cep;
+}
+
+static void siw_cm_free_work(struct siw_cep *cep)
+{
+ struct list_head *w, *tmp;
+ struct siw_cm_work *work;
+
+ list_for_each_safe(w, tmp, &cep->work_freelist) {
+ work = list_entry(w, struct siw_cm_work, list);
+ list_del(&work->list);
+ kfree(work);
+ }
+}
+
+static void siw_cancel_mpatimer(struct siw_cep *cep)
+{
+ spin_lock_bh(&cep->lock);
+ if (cep->mpa_timer) {
+ if (cancel_delayed_work(&cep->mpa_timer->work)) {
+ siw_cep_put(cep);
+ kfree(cep->mpa_timer); /* not needed again */
+ }
+ cep->mpa_timer = NULL;
+ }
+ spin_unlock_bh(&cep->lock);
+}
+
+static void siw_put_work(struct siw_cm_work *work)
+{
+ INIT_LIST_HEAD(&work->list);
+ spin_lock_bh(&work->cep->lock);
+ list_add(&work->list, &work->cep->work_freelist);
+ spin_unlock_bh(&work->cep->lock);
+}
+
+static void siw_cep_set_inuse(struct siw_cep *cep)
+{
+ unsigned long flags;
+retry:
+ spin_lock_irqsave(&cep->lock, flags);
+
+ if (cep->in_use) {
+ spin_unlock_irqrestore(&cep->lock, flags);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
+ if (signal_pending(current))
+ flush_signals(current);
+ goto retry;
+ } else {
+ cep->in_use = 1;
+ spin_unlock_irqrestore(&cep->lock, flags);
+ }
+}
+
+static void siw_cep_set_free(struct siw_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ cep->in_use = 0;
+ spin_unlock_irqrestore(&cep->lock, flags);
+
+ wake_up(&cep->waitq);
+}
+
+static void __siw_cep_dealloc(struct kref *ref)
+{
+ struct siw_cep *cep = container_of(ref, struct siw_cep, ref);
+ struct siw_device *sdev = cep->sdev;
+ unsigned long flags;
+
+ WARN_ON(cep->listen_cep);
+
+ /* kfree(NULL) is safe */
+ kfree(cep->mpa.pdata);
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist))
+ siw_cm_free_work(cep);
+ spin_unlock_bh(&cep->lock);
+
+ spin_lock_irqsave(&sdev->lock, flags);
+ list_del(&cep->devq);
+ spin_unlock_irqrestore(&sdev->lock, flags);
+
+ siw_dbg_cep(cep, "free endpoint\n");
+ kfree(cep);
+}
+
+static struct siw_cm_work *siw_get_work(struct siw_cep *cep)
+{
+ struct siw_cm_work *work = NULL;
+
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist)) {
+ work = list_entry(cep->work_freelist.next, struct siw_cm_work,
+ list);
+ list_del_init(&work->list);
+ }
+ spin_unlock_bh(&cep->lock);
+ return work;
+}
+
+static int siw_cm_alloc_work(struct siw_cep *cep, int num)
+{
+ struct siw_cm_work *work;
+
+ while (num--) {
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ if (!(list_empty(&cep->work_freelist)))
+ siw_cm_free_work(cep);
+ return -ENOMEM;
+ }
+ work->cep = cep;
+ INIT_LIST_HEAD(&work->list);
+ list_add(&work->list, &cep->work_freelist);
+ }
+ return 0;
+}
+
+/*
+ * siw_cm_upcall()
+ *
+ * Upcall to IWCM to inform about async connection events
+ */
+static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
+ int status)
+{
+ struct iw_cm_event event;
+ struct iw_cm_id *id;
+
+ memset(&event, 0, sizeof(event));
+ event.status = status;
+ event.event = reason;
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
+ event.provider_data = cep;
+ id = cep->listen_cep->cm_id;
+ } else {
+ id = cep->cm_id;
+ }
+ /* Signal IRD and ORD */
+ if (reason == IW_CM_EVENT_ESTABLISHED ||
+ reason == IW_CM_EVENT_CONNECT_REPLY) {
+ /* Signal negotiated IRD/ORD values we will use */
+ event.ird = cep->ird;
+ event.ord = cep->ord;
+ } else if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
+ event.ird = cep->ord;
+ event.ord = cep->ird;
+ }
+ /* Signal private data and address information */
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
+ reason == IW_CM_EVENT_CONNECT_REPLY) {
+ u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
+
+ if (pd_len) {
+ /*
+ * hand over MPA private data
+ */
+ event.private_data_len = pd_len;
+ event.private_data = cep->mpa.pdata;
+
+ /* Hide MPA V2 IRD/ORD control */
+ if (cep->enhanced_rdma_conn_est) {
+ event.private_data_len -=
+ sizeof(struct mpa_v2_data);
+ event.private_data +=
+ sizeof(struct mpa_v2_data);
+ }
+ }
+ getname_local(cep->sock, &event.local_addr);
+ getname_peer(cep->sock, &event.remote_addr);
+ }
+ siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
+ cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
+
+ return id->event_handler(id, &event);
+}
+
+static void siw_free_cm_id(struct siw_cep *cep)
+{
+ if (!cep->cm_id)
+ return;
+
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+}
+
+static void siw_destroy_cep_sock(struct siw_cep *cep)
+{
+ if (cep->sock) {
+ siw_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+}
+
+/*
+ * siw_qp_cm_drop()
+ *
+ * Drops established LLP connection if present and not already
+ * scheduled for dropping. Called from user context, SQ workqueue
+ * or receive IRQ. Caller signals if socket can be immediately
+ * closed (basically, if not in IRQ).
+ */
+void siw_qp_cm_drop(struct siw_qp *qp, int schedule)
+{
+ struct siw_cep *cep = qp->cep;
+
+ qp->rx_stream.rx_suspend = 1;
+ qp->tx_ctx.tx_suspend = 1;
+
+ if (!qp->cep)
+ return;
+
+ if (schedule) {
+ siw_cm_queue_work(cep, SIW_CM_WORK_CLOSE_LLP);
+ } else {
+ siw_cep_set_inuse(cep);
+
+ if (cep->state == SIW_EPSTATE_CLOSED) {
+ siw_dbg_cep(cep, "already closed\n");
+ goto out;
+ }
+ siw_dbg_cep(cep, "immediate close, state %d\n", cep->state);
+
+ siw_send_terminate(qp);
+
+ if (cep->cm_id) {
+ switch (cep->state) {
+ case SIW_EPSTATE_AWAIT_MPAREP:
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EINVAL);
+ break;
+
+ case SIW_EPSTATE_RDMA_MODE:
+ siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ break;
+
+ case SIW_EPSTATE_IDLE:
+ case SIW_EPSTATE_LISTENING:
+ case SIW_EPSTATE_CONNECTING:
+ case SIW_EPSTATE_AWAIT_MPAREQ:
+ case SIW_EPSTATE_RECVD_MPAREQ:
+ case SIW_EPSTATE_CLOSED:
+ default:
+ break;
+ }
+ siw_free_cm_id(cep);
+ siw_cep_put(cep);
+ }
+ cep->state = SIW_EPSTATE_CLOSED;
+
+ siw_destroy_cep_sock(cep);
+ if (cep->qp) {
+ cep->qp = NULL;
+ siw_qp_put(qp);
+ }
+out:
+ siw_cep_set_free(cep);
+ }
+}
+
+void siw_cep_put(struct siw_cep *cep)
+{
+ WARN_ON(kref_read(&cep->ref) < 1);
+ kref_put(&cep->ref, __siw_cep_dealloc);
+}
+
+static void siw_cep_set_free_and_put(struct siw_cep *cep)
+{
+ siw_cep_set_free(cep);
+ siw_cep_put(cep);
+}
+
+void siw_cep_get(struct siw_cep *cep)
+{
+ kref_get(&cep->ref);
+}
+
+/*
+ * Expects params->pd_len in host byte order
+ */
+static int siw_send_mpareqrep(struct siw_cep *cep, const void *pdata, u8 pd_len)
+{
+ struct socket *s = cep->sock;
+ struct mpa_rr *rr = &cep->mpa.hdr;
+ struct kvec iov[3];
+ struct msghdr msg;
+ int rv;
+ int iovec_num = 0;
+ int mpa_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ iov[iovec_num].iov_base = rr;
+ iov[iovec_num].iov_len = sizeof(*rr);
+ mpa_len = sizeof(*rr);
+
+ if (cep->enhanced_rdma_conn_est) {
+ iovec_num++;
+ iov[iovec_num].iov_base = &cep->mpa.v2_ctrl;
+ iov[iovec_num].iov_len = sizeof(cep->mpa.v2_ctrl);
+ mpa_len += sizeof(cep->mpa.v2_ctrl);
+ }
+ if (pd_len) {
+ iovec_num++;
+ iov[iovec_num].iov_base = (char *)pdata;
+ iov[iovec_num].iov_len = pd_len;
+ mpa_len += pd_len;
+ }
+ if (cep->enhanced_rdma_conn_est)
+ pd_len += sizeof(cep->mpa.v2_ctrl);
+
+ rr->params.pd_len = cpu_to_be16(pd_len);
+
+ rv = kernel_sendmsg(s, &msg, iov, iovec_num + 1, mpa_len);
+
+ return rv < 0 ? rv : 0;
+}
+
+/*
+ * Receive MPA Request/Reply header.
+ *
+ * Returns 0 if complete MPA Request/Reply header including
+ * eventual private data was received. Returns -EAGAIN if
+ * header was partially received or negative error code otherwise.
+ *
+ * Context: May be called in process context only
+ */
+static int siw_recv_mpa_rr(struct siw_cep *cep)
+{
+ struct mpa_rr *hdr = &cep->mpa.hdr;
+ struct socket *s = cep->sock;
+ u16 pd_len;
+ int rcvd, to_rcv;
+
+ if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
+ rcvd = ksock_recv(s, (char *)hdr + cep->mpa.bytes_rcvd,
+ sizeof(struct mpa_rr) - cep->mpa.bytes_rcvd,
+ 0);
+ if (rcvd <= 0)
+ return -ECONNABORTED;
+
+ cep->mpa.bytes_rcvd += rcvd;
+
+ if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr))
+ return -EAGAIN;
+
+ if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA)
+ return -EPROTO;
+ }
+ pd_len = be16_to_cpu(hdr->params.pd_len);
+
+ /*
+ * At least the MPA Request/Reply header (frame not including
+ * private data) has been received.
+ * Receive (or continue receiving) any private data.
+ */
+ to_rcv = pd_len - (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr));
+
+ if (!to_rcv) {
+ /*
+ * We must have hdr->params.pd_len == 0 and thus received a
+ * complete MPA Request/Reply frame.
+ * Check against peer protocol violation.
+ */
+ u32 word;
+
+ rcvd = ksock_recv(s, (char *)&word, sizeof(word), MSG_DONTWAIT);
+ if (rcvd == -EAGAIN)
+ return 0;
+
+ if (rcvd == 0) {
+ siw_dbg_cep(cep, "peer EOF\n");
+ return -EPIPE;
+ }
+ if (rcvd < 0) {
+ siw_dbg_cep(cep, "error: %d\n", rcvd);
+ return rcvd;
+ }
+ siw_dbg_cep(cep, "peer sent extra data: %d\n", rcvd);
+
+ return -EPROTO;
+ }
+
+ /*
+ * At this point, we must have hdr->params.pd_len != 0.
+ * A private data buffer gets allocated if hdr->params.pd_len != 0.
+ */
+ if (!cep->mpa.pdata) {
+ cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
+ if (!cep->mpa.pdata)
+ return -ENOMEM;
+ }
+ rcvd = ksock_recv(
+ s, cep->mpa.pdata + cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
+ to_rcv + 4, MSG_DONTWAIT);
+
+ if (rcvd < 0)
+ return rcvd;
+
+ if (rcvd > to_rcv)
+ return -EPROTO;
+
+ cep->mpa.bytes_rcvd += rcvd;
+
+ if (to_rcv == rcvd) {
+ siw_dbg_cep(cep, "%d bytes private data received\n", pd_len);
+ return 0;
+ }
+ return -EAGAIN;
+}
+
+/*
+ * siw_proc_mpareq()
+ *
+ * Read MPA Request from socket and signal new connection to IWCM
+ * if success. Caller must hold lock on corresponding listening CEP.
+ */
+static int siw_proc_mpareq(struct siw_cep *cep)
+{
+ struct mpa_rr *req;
+ int version, rv;
+ u16 pd_len;
+
+ rv = siw_recv_mpa_rr(cep);
+ if (rv)
+ return rv;
+
+ req = &cep->mpa.hdr;
+
+ version = __mpa_rr_revision(req->params.bits);
+ pd_len = be16_to_cpu(req->params.pd_len);
+
+ if (version > MPA_REVISION_2)
+ /* allow for 0, 1, and 2 only */
+ return -EPROTO;
+
+ if (memcmp(req->key, MPA_KEY_REQ, 16))
+ return -EPROTO;
+
+ /* Prepare for sending MPA reply */
+ memcpy(req->key, MPA_KEY_REP, 16);
+
+ if (version == MPA_REVISION_2 &&
+ (req->params.bits & MPA_RR_FLAG_ENHANCED)) {
+ /*
+ * MPA version 2 must signal IRD/ORD values and P2P mode
+ * in private data if header flag MPA_RR_FLAG_ENHANCED
+ * is set.
+ */
+ if (pd_len < sizeof(struct mpa_v2_data))
+ goto reject_conn;
+
+ cep->enhanced_rdma_conn_est = true;
+ }
+
+ /* MPA Markers: currently not supported. Marker TX to be added. */
+ if (req->params.bits & MPA_RR_FLAG_MARKERS)
+ goto reject_conn;
+
+ if (req->params.bits & MPA_RR_FLAG_CRC) {
+ /*
+ * RFC 5044, page 27: CRC MUST be used if peer requests it.
+ * siw specific: 'mpa_crc_strict' parameter to reject
+ * connection with CRC if local CRC off enforced by
+ * 'mpa_crc_strict' module parameter.
+ */
+ if (!mpa_crc_required && mpa_crc_strict)
+ goto reject_conn;
+
+ /* Enable CRC if requested by module parameter */
+ if (mpa_crc_required)
+ req->params.bits |= MPA_RR_FLAG_CRC;
+ }
+ if (cep->enhanced_rdma_conn_est) {
+ struct mpa_v2_data *v2 = (struct mpa_v2_data *)cep->mpa.pdata;
+
+ /*
+ * Peer requested ORD becomes requested local IRD,
+ * peer requested IRD becomes requested local ORD.
+ * IRD and ORD get limited by global maximum values.
+ */
+ cep->ord = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
+ cep->ord = min(cep->ord, SIW_MAX_ORD_QP);
+ cep->ird = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
+ cep->ird = min(cep->ird, SIW_MAX_IRD_QP);
+
+ /* May get overwritten by locally negotiated values */
+ cep->mpa.v2_ctrl.ird = htons(cep->ird);
+ cep->mpa.v2_ctrl.ord = htons(cep->ord);
+
+ /*
+ * Support for peer sent zero length Write or Read to
+ * let local side enter RTS. Writes are preferred.
+ * Sends would require pre-posting a Receive and are
+ * not supported.
+ * Propose zero length Write if none of Read and Write
+ * is indicated.
+ */
+ if (v2->ird & MPA_V2_PEER_TO_PEER) {
+ cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
+
+ if (v2->ord & MPA_V2_RDMA_WRITE_RTR)
+ cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
+ else if (v2->ord & MPA_V2_RDMA_READ_RTR)
+ cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_READ_RTR;
+ else
+ cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
+ }
+ }
+
+ cep->state = SIW_EPSTATE_RECVD_MPAREQ;
+
+ /* Keep reference until IWCM accepts/rejects */
+ siw_cep_get(cep);
+ rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
+ if (rv)
+ siw_cep_put(cep);
+
+ return rv;
+
+reject_conn:
+ siw_dbg_cep(cep, "reject: crc %d:%d:%d, m %d:%d\n",
+ req->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
+ mpa_crc_required, mpa_crc_strict,
+ req->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
+
+ req->params.bits &= ~MPA_RR_FLAG_MARKERS;
+ req->params.bits |= MPA_RR_FLAG_REJECT;
+
+ if (!mpa_crc_required && mpa_crc_strict)
+ req->params.bits &= ~MPA_RR_FLAG_CRC;
+
+ if (pd_len)
+ kfree(cep->mpa.pdata);
+
+ cep->mpa.pdata = NULL;
+
+ siw_send_mpareqrep(cep, NULL, 0);
+
+ return -EOPNOTSUPP;
+}
+
+static int siw_proc_mpareply(struct siw_cep *cep)
+{
+ struct siw_qp_attrs qp_attrs;
+ enum siw_qp_attr_mask qp_attr_mask;
+ struct siw_qp *qp = cep->qp;
+ struct mpa_rr *rep;
+ int rv;
+ u16 rep_ord;
+ u16 rep_ird;
+ bool ird_insufficient = false;
+ enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
+
+ rv = siw_recv_mpa_rr(cep);
+ if (rv)
+ goto out_err;
+
+ siw_cancel_mpatimer(cep);
+
+ rep = &cep->mpa.hdr;
+
+ if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
+ /* allow for 0, 1, and 2 only */
+ rv = -EPROTO;
+ goto out_err;
+ }
+ if (memcmp(rep->key, MPA_KEY_REP, 16)) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA,
+ LLP_ECODE_INVALID_REQ_RESP, 0);
+ siw_send_terminate(qp);
+ rv = -EPROTO;
+ goto out_err;
+ }
+ if (rep->params.bits & MPA_RR_FLAG_REJECT) {
+ siw_dbg_cep(cep, "got mpa reject\n");
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
+
+ return -ECONNRESET;
+ }
+ if (try_gso && rep->params.bits & MPA_RR_FLAG_GSO_EXP) {
+ siw_dbg_cep(cep, "peer allows GSO on TX\n");
+ qp->tx_ctx.gso_seg_limit = 0;
+ }
+ if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
+ (mpa_crc_required && !(rep->params.bits & MPA_RR_FLAG_CRC)) ||
+ (mpa_crc_strict && !mpa_crc_required &&
+ (rep->params.bits & MPA_RR_FLAG_CRC))) {
+ siw_dbg_cep(cep, "reply unsupp: crc %d:%d:%d, m %d:%d\n",
+ rep->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
+ mpa_crc_required, mpa_crc_strict,
+ rep->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
+
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+
+ return -EINVAL;
+ }
+ if (cep->enhanced_rdma_conn_est) {
+ struct mpa_v2_data *v2;
+
+ if (__mpa_rr_revision(rep->params.bits) < MPA_REVISION_2 ||
+ !(rep->params.bits & MPA_RR_FLAG_ENHANCED)) {
+ /*
+ * Protocol failure: The responder MUST reply with
+ * MPA version 2 and MUST set MPA_RR_FLAG_ENHANCED.
+ */
+ siw_dbg_cep(cep, "mpa reply error: vers %d, enhcd %d\n",
+ __mpa_rr_revision(rep->params.bits),
+ rep->params.bits & MPA_RR_FLAG_ENHANCED ?
+ 1 :
+ 0);
+
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ return -EINVAL;
+ }
+ v2 = (struct mpa_v2_data *)cep->mpa.pdata;
+ rep_ird = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
+ rep_ord = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
+
+ if (cep->ird < rep_ord &&
+ (relaxed_ird_negotiation == false ||
+ rep_ord > cep->sdev->attrs.max_ird)) {
+ siw_dbg_cep(cep, "ird %d, rep_ord %d, max_ord %d\n",
+ cep->ird, rep_ord,
+ cep->sdev->attrs.max_ord);
+ ird_insufficient = true;
+ }
+ if (cep->ord > rep_ird && relaxed_ird_negotiation == false) {
+ siw_dbg_cep(cep, "ord %d, rep_ird %d\n", cep->ord,
+ rep_ird);
+ ird_insufficient = true;
+ }
+ /*
+ * Always report negotiated peer values to user,
+ * even if IRD/ORD negotiation failed
+ */
+ cep->ird = rep_ord;
+ cep->ord = rep_ird;
+
+ if (ird_insufficient) {
+ /*
+ * If the initiator IRD is insuffient for the
+ * responder ORD, send a TERM.
+ */
+ siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
+ LLP_ETYPE_MPA,
+ LLP_ECODE_INSUFFICIENT_IRD, 0);
+ siw_send_terminate(qp);
+ rv = -ENOMEM;
+ goto out_err;
+ }
+ if (cep->mpa.v2_ctrl_req.ird & MPA_V2_PEER_TO_PEER)
+ mpa_p2p_mode =
+ cep->mpa.v2_ctrl_req.ord &
+ (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR);
+
+ /*
+ * Check if we requested P2P mode, and if peer agrees
+ */
+ if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
+ if ((mpa_p2p_mode & v2->ord) == 0) {
+ /*
+ * We requested RTR mode(s), but the peer
+ * did not pick any mode we support.
+ */
+ siw_dbg_cep(cep,
+ "rtr mode: req %2x, got %2x\n",
+ mpa_p2p_mode,
+ v2->ord & (MPA_V2_RDMA_WRITE_RTR |
+ MPA_V2_RDMA_READ_RTR));
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
+ LLP_ETYPE_MPA,
+ LLP_ECODE_NO_MATCHING_RTR,
+ 0);
+ siw_send_terminate(qp);
+ rv = -EPROTO;
+ goto out_err;
+ }
+ mpa_p2p_mode = v2->ord & (MPA_V2_RDMA_WRITE_RTR |
+ MPA_V2_RDMA_READ_RTR);
+ }
+ }
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+
+ if (rep->params.bits & MPA_RR_FLAG_CRC)
+ qp_attrs.flags = SIW_MPA_CRC;
+
+ qp_attrs.irq_size = cep->ird;
+ qp_attrs.orq_size = cep->ord;
+ qp_attrs.sk = cep->sock;
+ qp_attrs.state = SIW_QP_STATE_RTS;
+
+ qp_attr_mask = SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
+ SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD | SIW_QP_ATTR_MPA;
+
+ /* Move socket RX/TX under QP control */
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > SIW_QP_STATE_RTR) {
+ rv = -EINVAL;
+ up_write(&qp->state_lock);
+ goto out_err;
+ }
+ rv = siw_qp_modify(qp, &qp_attrs, qp_attr_mask);
+
+ siw_qp_socket_assoc(cep, qp);
+
+ up_write(&qp->state_lock);
+
+ /* Send extra RDMA frame to trigger peer RTS if negotiated */
+ if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
+ rv = siw_qp_mpa_rts(qp, mpa_p2p_mode);
+ if (rv)
+ goto out_err;
+ }
+ if (!rv) {
+ rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
+ if (!rv)
+ cep->state = SIW_EPSTATE_RDMA_MODE;
+
+ return 0;
+ }
+
+out_err:
+ if (rv != -EAGAIN)
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+
+ return rv;
+}
+
+/*
+ * siw_accept_newconn - accept an incoming pending connection
+ *
+ */
+static void siw_accept_newconn(struct siw_cep *cep)
+{
+ struct socket *s = cep->sock;
+ struct socket *new_s = NULL;
+ struct siw_cep *new_cep = NULL;
+ int rv = 0; /* debug only. should disappear */
+
+ if (cep->state != SIW_EPSTATE_LISTENING)
+ goto error;
+
+ new_cep = siw_cep_alloc(cep->sdev);
+ if (!new_cep)
+ goto error;
+
+ /*
+ * 4: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout.
+ */
+ if (siw_cm_alloc_work(new_cep, 4) != 0)
+ goto error;
+
+ /*
+ * Copy saved socket callbacks from listening CEP
+ * and assign new socket with new CEP
+ */
+ new_cep->sk_state_change = cep->sk_state_change;
+ new_cep->sk_data_ready = cep->sk_data_ready;
+ new_cep->sk_write_space = cep->sk_write_space;
+ new_cep->sk_error_report = cep->sk_error_report;
+
+ rv = kernel_accept(s, &new_s, O_NONBLOCK);
+ if (rv != 0) {
+ /*
+ * Connection already aborted by peer..?
+ */
+ siw_dbg_cep(cep, "kernel_accept() error: %d\n", rv);
+ goto error;
+ }
+ new_cep->sock = new_s;
+ siw_cep_get(new_cep);
+ new_s->sk->sk_user_data = new_cep;
+
+ if (siw_tcp_nagle == false)
+ tcp_sock_set_nodelay(new_s->sk);
+ new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ;
+
+ rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT);
+ if (rv)
+ goto error;
+ /*
+ * See siw_proc_mpareq() etc. for the use of new_cep->listen_cep.
+ */
+ new_cep->listen_cep = cep;
+ siw_cep_get(cep);
+
+ if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
+ /*
+ * MPA REQ already queued
+ */
+ siw_dbg_cep(cep, "immediate mpa request\n");
+
+ siw_cep_set_inuse(new_cep);
+ rv = siw_proc_mpareq(new_cep);
+ if (rv != -EAGAIN) {
+ siw_cep_put(cep);
+ new_cep->listen_cep = NULL;
+ if (rv) {
+ siw_cancel_mpatimer(new_cep);
+ siw_cep_set_free(new_cep);
+ goto error;
+ }
+ }
+ siw_cep_set_free(new_cep);
+ }
+ return;
+
+error:
+ if (new_cep)
+ siw_cep_put(new_cep);
+
+ if (new_s) {
+ siw_socket_disassoc(new_s);
+ sock_release(new_s);
+ new_cep->sock = NULL;
+ }
+ siw_dbg_cep(cep, "error %d\n", rv);
+}
+
+static void siw_cm_work_handler(struct work_struct *w)
+{
+ struct siw_cm_work *work;
+ struct siw_cep *cep;
+ int release_cep = 0, rv = 0;
+
+ work = container_of(w, struct siw_cm_work, work.work);
+ cep = work->cep;
+
+ siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
+ cep->qp ? qp_id(cep->qp) : UINT_MAX,
+ work->type, cep->state);
+
+ siw_cep_set_inuse(cep);
+
+ switch (work->type) {
+ case SIW_CM_WORK_ACCEPT:
+ siw_accept_newconn(cep);
+ break;
+
+ case SIW_CM_WORK_READ_MPAHDR:
+ if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
+ if (cep->listen_cep) {
+ siw_cep_set_inuse(cep->listen_cep);
+
+ if (cep->listen_cep->state ==
+ SIW_EPSTATE_LISTENING)
+ rv = siw_proc_mpareq(cep);
+ else
+ rv = -EFAULT;
+
+ siw_cep_set_free(cep->listen_cep);
+
+ if (rv != -EAGAIN) {
+ siw_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ if (rv)
+ siw_cep_put(cep);
+ }
+ }
+ } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
+ rv = siw_proc_mpareply(cep);
+ } else {
+ /*
+ * CEP already moved out of MPA handshake.
+ * any connection management already done.
+ * silently ignore the mpa packet.
+ */
+ if (cep->state == SIW_EPSTATE_RDMA_MODE) {
+ cep->sock->sk->sk_data_ready(cep->sock->sk);
+ siw_dbg_cep(cep, "already in RDMA mode");
+ } else {
+ siw_dbg_cep(cep, "out of state: %d\n",
+ cep->state);
+ }
+ }
+ if (rv && rv != -EAGAIN)
+ release_cep = 1;
+ break;
+
+ case SIW_CM_WORK_CLOSE_LLP:
+ /*
+ * QP scheduled LLP close
+ */
+ if (cep->qp)
+ siw_send_terminate(cep->qp);
+
+ if (cep->cm_id)
+ siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+
+ release_cep = 1;
+ break;
+
+ case SIW_CM_WORK_PEER_CLOSE:
+ if (cep->cm_id) {
+ if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA reply not received, but connection drop
+ */
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ } else if (cep->state == SIW_EPSTATE_RDMA_MODE) {
+ /*
+ * NOTE: IW_CM_EVENT_DISCONNECT is given just
+ * to transition IWCM into CLOSING.
+ */
+ siw_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
+ siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ }
+ /*
+ * for other states there is no connection
+ * known to the IWCM.
+ */
+ } else {
+ if (cep->state == SIW_EPSTATE_RECVD_MPAREQ) {
+ /*
+ * Wait for the ulp/CM to call accept/reject
+ */
+ siw_dbg_cep(cep,
+ "mpa req recvd, wait for ULP\n");
+ } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
+ /*
+ * Socket close before MPA request received.
+ */
+ if (cep->listen_cep) {
+ siw_dbg_cep(cep,
+ "no mpareq: drop listener\n");
+ siw_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
+ }
+ }
+ release_cep = 1;
+ break;
+
+ case SIW_CM_WORK_MPATIMEOUT:
+ cep->mpa_timer = NULL;
+
+ if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA request timed out:
+ * Hide any partially received private data and signal
+ * timeout
+ */
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (cep->cm_id)
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+
+ } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
+ /*
+ * No MPA request received after peer TCP stream setup.
+ */
+ if (cep->listen_cep) {
+ siw_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
+ release_cep = 1;
+ }
+ break;
+
+ default:
+ WARN(1, "Undefined CM work type: %d\n", work->type);
+ }
+ if (release_cep) {
+ siw_dbg_cep(cep,
+ "release: timer=%s, QP[%u]\n",
+ cep->mpa_timer ? "y" : "n",
+ cep->qp ? qp_id(cep->qp) : UINT_MAX);
+
+ siw_cancel_mpatimer(cep);
+
+ cep->state = SIW_EPSTATE_CLOSED;
+
+ if (cep->qp) {
+ struct siw_qp *qp = cep->qp;
+ /*
+ * Serialize a potential race with application
+ * closing the QP and calling siw_qp_cm_drop()
+ */
+ siw_qp_get(qp);
+ siw_cep_set_free(cep);
+
+ siw_qp_llp_close(qp);
+ siw_qp_put(qp);
+
+ siw_cep_set_inuse(cep);
+ cep->qp = NULL;
+ siw_qp_put(qp);
+ }
+ if (cep->sock) {
+ siw_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+ if (cep->cm_id) {
+ siw_free_cm_id(cep);
+ siw_cep_put(cep);
+ }
+ }
+ siw_cep_set_free(cep);
+ siw_put_work(work);
+ siw_cep_put(cep);
+}
+
+static struct workqueue_struct *siw_cm_wq;
+
+int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
+{
+ struct siw_cm_work *work = siw_get_work(cep);
+ unsigned long delay = 0;
+
+ if (!work) {
+ siw_dbg_cep(cep, "failed with no work available\n");
+ return -ENOMEM;
+ }
+ work->type = type;
+ work->cep = cep;
+
+ siw_cep_get(cep);
+
+ INIT_DELAYED_WORK(&work->work, siw_cm_work_handler);
+
+ if (type == SIW_CM_WORK_MPATIMEOUT) {
+ cep->mpa_timer = work;
+
+ if (cep->state == SIW_EPSTATE_AWAIT_MPAREP)
+ delay = MPAREQ_TIMEOUT;
+ else
+ delay = MPAREP_TIMEOUT;
+ }
+ siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
+ cep->qp ? qp_id(cep->qp) : -1, type, delay);
+
+ queue_delayed_work(siw_cm_wq, &work->work, delay);
+
+ return 0;
+}
+
+static void siw_cm_llp_data_ready(struct sock *sk)
+{
+ struct siw_cep *cep;
+
+ trace_sk_data_ready(sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep)
+ goto out;
+
+ siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
+ cep->state, sk->sk_state);
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
+
+ switch (cep->state) {
+ case SIW_EPSTATE_RDMA_MODE:
+ case SIW_EPSTATE_LISTENING:
+ break;
+
+ case SIW_EPSTATE_AWAIT_MPAREQ:
+ case SIW_EPSTATE_AWAIT_MPAREP:
+ siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR);
+ break;
+
+ default:
+ siw_dbg_cep(cep, "unexpected data, state %d\n", cep->state);
+ break;
+ }
+out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void siw_cm_llp_write_space(struct sock *sk)
+{
+ struct siw_cep *cep = sk_to_cep(sk);
+
+ if (cep)
+ siw_dbg_cep(cep, "state: %d\n", cep->state);
+}
+
+static void siw_cm_llp_error_report(struct sock *sk)
+{
+ struct siw_cep *cep = sk_to_cep(sk);
+
+ if (cep) {
+ siw_dbg_cep(cep, "error %d, socket state: %d, cep state: %d\n",
+ sk->sk_err, sk->sk_state, cep->state);
+ cep->sk_error_report(sk);
+ }
+}
+
+static void siw_cm_llp_state_change(struct sock *sk)
+{
+ struct siw_cep *cep;
+ void (*orig_state_change)(struct sock *s);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep) {
+ /* endpoint already disassociated */
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = cep->sk_state_change;
+
+ siw_dbg_cep(cep, "state: %d\n", cep->state);
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+ /*
+ * handle accepting socket as special case where only
+ * new connection is possible
+ */
+ siw_cm_queue_work(cep, SIW_CM_WORK_ACCEPT);
+ break;
+
+ case TCP_CLOSE:
+ case TCP_CLOSE_WAIT:
+ if (cep->qp)
+ cep->qp->tx_ctx.tx_suspend = 1;
+ siw_cm_queue_work(cep, SIW_CM_WORK_PEER_CLOSE);
+ break;
+
+ default:
+ siw_dbg_cep(cep, "unexpected socket state %d\n", sk->sk_state);
+ }
+ read_unlock(&sk->sk_callback_lock);
+ orig_state_change(sk);
+}
+
+static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
+ struct sockaddr *raddr, bool afonly)
+{
+ int rv, flags = 0;
+ size_t size = laddr->sa_family == AF_INET ?
+ sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
+
+ /*
+ * Make address available again asap.
+ */
+ sock_set_reuseaddr(s->sk);
+
+ if (afonly) {
+ rv = ip6_sock_set_v6only(s->sk);
+ if (rv)
+ return rv;
+ }
+
+ rv = s->ops->bind(s, laddr, size);
+ if (rv < 0)
+ return rv;
+
+ rv = s->ops->connect(s, raddr, size, flags);
+
+ return rv < 0 ? rv : 0;
+}
+
+int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct siw_device *sdev = to_siw_dev(id->device);
+ struct siw_qp *qp;
+ struct siw_cep *cep = NULL;
+ struct socket *s = NULL;
+ struct sockaddr *laddr = (struct sockaddr *)&id->local_addr,
+ *raddr = (struct sockaddr *)&id->remote_addr;
+ bool p2p_mode = peer_to_peer, v4 = true;
+ u16 pd_len = params->private_data_len;
+ int version = mpa_version, rv;
+
+ if (pd_len > MPA_MAX_PRIVDATA)
+ return -EINVAL;
+
+ if (params->ird > sdev->attrs.max_ird ||
+ params->ord > sdev->attrs.max_ord)
+ return -ENOMEM;
+
+ if (laddr->sa_family == AF_INET6)
+ v4 = false;
+ else if (laddr->sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ /*
+ * Respect any iwarp port mapping: Use mapped remote address
+ * if valid. Local address must not be mapped, since siw
+ * uses kernel TCP stack.
+ */
+ if ((v4 && to_sockaddr_in(id->remote_addr).sin_port != 0) ||
+ to_sockaddr_in6(id->remote_addr).sin6_port != 0)
+ raddr = (struct sockaddr *)&id->m_remote_addr;
+
+ qp = siw_qp_id2obj(sdev, params->qpn);
+ if (!qp) {
+ WARN(1, "[QP %u] does not exist\n", params->qpn);
+ rv = -EINVAL;
+ goto error;
+ }
+ siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
+ raddr);
+
+ rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (rv < 0)
+ goto error;
+
+ /*
+ * NOTE: For simplification, connect() is called in blocking
+ * mode. Might be reconsidered for async connection setup at
+ * TCP level.
+ */
+ rv = kernel_bindconnect(s, laddr, raddr, id->afonly);
+ if (rv != 0) {
+ siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
+ goto error;
+ }
+ if (siw_tcp_nagle == false)
+ tcp_sock_set_nodelay(s->sk);
+ cep = siw_cep_alloc(sdev);
+ if (!cep) {
+ rv = -ENOMEM;
+ goto error;
+ }
+ siw_cep_set_inuse(cep);
+
+ /* Associate QP with CEP */
+ siw_cep_get(cep);
+ qp->cep = cep;
+
+ /* siw_qp_get(qp) already done by QP lookup */
+ cep->qp = qp;
+
+ id->add_ref(id);
+ cep->cm_id = id;
+
+ /*
+ * 4: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout.
+ */
+ rv = siw_cm_alloc_work(cep, 4);
+ if (rv != 0) {
+ rv = -ENOMEM;
+ goto error;
+ }
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+
+ if (p2p_mode && cep->ord == 0)
+ cep->ord = 1;
+
+ cep->state = SIW_EPSTATE_CONNECTING;
+
+ /*
+ * Associate CEP with socket
+ */
+ siw_cep_socket_assoc(cep, s);
+
+ cep->state = SIW_EPSTATE_AWAIT_MPAREP;
+
+ /*
+ * Set MPA Request bits: CRC if required, no MPA Markers,
+ * MPA Rev. according to module parameter 'mpa_version', Key 'Request'.
+ */
+ cep->mpa.hdr.params.bits = 0;
+ if (version > MPA_REVISION_2) {
+ pr_warn("Setting MPA version to %u\n", MPA_REVISION_2);
+ version = MPA_REVISION_2;
+ /* Adjust also module parameter */
+ mpa_version = MPA_REVISION_2;
+ }
+ __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, version);
+
+ if (try_gso)
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_GSO_EXP;
+
+ if (mpa_crc_required)
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_CRC;
+
+ /*
+ * If MPA version == 2:
+ * o Include ORD and IRD.
+ * o Indicate peer-to-peer mode, if required by module
+ * parameter 'peer_to_peer'.
+ */
+ if (version == MPA_REVISION_2) {
+ cep->enhanced_rdma_conn_est = true;
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_ENHANCED;
+
+ cep->mpa.v2_ctrl.ird = htons(cep->ird);
+ cep->mpa.v2_ctrl.ord = htons(cep->ord);
+
+ if (p2p_mode) {
+ cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
+ cep->mpa.v2_ctrl.ord |= rtr_type;
+ }
+ /* Remember own P2P mode requested */
+ cep->mpa.v2_ctrl_req.ird = cep->mpa.v2_ctrl.ird;
+ cep->mpa.v2_ctrl_req.ord = cep->mpa.v2_ctrl.ord;
+ }
+ memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, 16);
+
+ rv = siw_send_mpareqrep(cep, params->private_data, pd_len);
+ /*
+ * Reset private data.
+ */
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (rv >= 0) {
+ rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
+ if (!rv) {
+ siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
+ siw_cep_set_free(cep);
+ return 0;
+ }
+ }
+error:
+ siw_dbg(id->device, "failed: %d\n", rv);
+
+ if (cep) {
+ siw_socket_disassoc(s);
+ sock_release(s);
+ cep->sock = NULL;
+
+ cep->qp = NULL;
+
+ cep->cm_id = NULL;
+ id->rem_ref(id);
+
+ qp->cep = NULL;
+ siw_cep_put(cep);
+
+ cep->state = SIW_EPSTATE_CLOSED;
+
+ siw_cep_set_free_and_put(cep);
+
+ } else if (s) {
+ sock_release(s);
+ }
+ if (qp)
+ siw_qp_put(qp);
+
+ return rv;
+}
+
+/*
+ * siw_accept - Let SoftiWARP accept an RDMA connection request
+ *
+ * @id: New connection management id to be used for accepted
+ * connection request
+ * @params: Connection parameters provided by ULP for accepting connection
+ *
+ * Transition QP to RTS state, associate new CM id @id with accepted CEP
+ * and get prepared for TCP input by installing socket callbacks.
+ * Then send MPA Reply and generate the "connection established" event.
+ * Socket callbacks must be installed before sending MPA Reply, because
+ * the latter may cause a first RDMA message to arrive from the RDMA Initiator
+ * side very quickly, at which time the socket callbacks must be ready.
+ */
+int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct siw_device *sdev = to_siw_dev(id->device);
+ struct siw_cep *cep = (struct siw_cep *)id->provider_data;
+ struct siw_qp *qp;
+ struct siw_qp_attrs qp_attrs;
+ int rv = -EINVAL, max_priv_data = MPA_MAX_PRIVDATA;
+ bool wait_for_peer_rts = false;
+
+ siw_cep_set_inuse(cep);
+ siw_cep_put(cep);
+
+ /* Free lingering inbound private data */
+ if (cep->mpa.hdr.params.pd_len) {
+ cep->mpa.hdr.params.pd_len = 0;
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ }
+ siw_cancel_mpatimer(cep);
+
+ if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
+ siw_dbg_cep(cep, "out of state\n");
+ rv = -ECONNRESET;
+ goto free_cep;
+ }
+ qp = siw_qp_id2obj(sdev, params->qpn);
+ if (!qp) {
+ WARN(1, "[QP %d] does not exist\n", params->qpn);
+ goto free_cep;
+ }
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > SIW_QP_STATE_RTR)
+ goto error_unlock;
+ siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
+
+ if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
+ siw_dbg_cep(cep, "peer allows GSO on TX\n");
+ qp->tx_ctx.gso_seg_limit = 0;
+ }
+ if (params->ord > sdev->attrs.max_ord ||
+ params->ird > sdev->attrs.max_ird) {
+ siw_dbg_cep(
+ cep,
+ "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
+ qp_id(qp), params->ord, sdev->attrs.max_ord,
+ params->ird, sdev->attrs.max_ird);
+ goto error_unlock;
+ }
+ if (cep->enhanced_rdma_conn_est)
+ max_priv_data -= sizeof(struct mpa_v2_data);
+
+ if (params->private_data_len > max_priv_data) {
+ siw_dbg_cep(
+ cep,
+ "[QP %u]: private data length: %d (max %d)\n",
+ qp_id(qp), params->private_data_len, max_priv_data);
+ goto error_unlock;
+ }
+ if (cep->enhanced_rdma_conn_est) {
+ if (params->ord > cep->ord) {
+ if (relaxed_ird_negotiation) {
+ params->ord = cep->ord;
+ } else {
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+ goto error_unlock;
+ }
+ }
+ if (params->ird < cep->ird) {
+ if (relaxed_ird_negotiation &&
+ cep->ird <= sdev->attrs.max_ird)
+ params->ird = cep->ird;
+ else {
+ rv = -ENOMEM;
+ goto error_unlock;
+ }
+ }
+ if (cep->mpa.v2_ctrl.ord &
+ (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR))
+ wait_for_peer_rts = true;
+ /*
+ * Signal back negotiated IRD and ORD values
+ */
+ cep->mpa.v2_ctrl.ord =
+ htons(params->ord & MPA_IRD_ORD_MASK) |
+ (cep->mpa.v2_ctrl.ord & ~MPA_V2_MASK_IRD_ORD);
+ cep->mpa.v2_ctrl.ird =
+ htons(params->ird & MPA_IRD_ORD_MASK) |
+ (cep->mpa.v2_ctrl.ird & ~MPA_V2_MASK_IRD_ORD);
+ }
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+ qp_attrs.orq_size = cep->ord;
+ qp_attrs.irq_size = cep->ird;
+ qp_attrs.sk = cep->sock;
+ if (cep->mpa.hdr.params.bits & MPA_RR_FLAG_CRC)
+ qp_attrs.flags = SIW_MPA_CRC;
+ qp_attrs.state = SIW_QP_STATE_RTS;
+
+ siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
+
+ /* Associate QP with CEP */
+ siw_cep_get(cep);
+ qp->cep = cep;
+
+ /* siw_qp_get(qp) already done by QP lookup */
+ cep->qp = qp;
+
+ cep->state = SIW_EPSTATE_RDMA_MODE;
+
+ /* Move socket RX/TX under QP control */
+ rv = siw_qp_modify(qp, &qp_attrs,
+ SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
+ SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD |
+ SIW_QP_ATTR_MPA);
+ up_write(&qp->state_lock);
+ if (rv)
+ goto error;
+
+ siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
+ qp_id(qp), params->private_data_len);
+
+ rv = siw_send_mpareqrep(cep, params->private_data,
+ params->private_data_len);
+ if (rv != 0)
+ goto error;
+
+ if (wait_for_peer_rts) {
+ siw_sk_assign_rtr_upcalls(cep);
+ } else {
+ siw_qp_socket_assoc(cep, qp);
+ rv = siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
+ if (rv)
+ goto error;
+ }
+ siw_cep_set_free(cep);
+
+ return 0;
+
+error_unlock:
+ up_write(&qp->state_lock);
+error:
+ siw_destroy_cep_sock(cep);
+
+ cep->state = SIW_EPSTATE_CLOSED;
+
+ siw_free_cm_id(cep);
+ if (qp->cep) {
+ siw_cep_put(cep);
+ qp->cep = NULL;
+ }
+ cep->qp = NULL;
+ siw_qp_put(qp);
+free_cep:
+ siw_cep_set_free_and_put(cep);
+ return rv;
+}
+
+/*
+ * siw_reject()
+ *
+ * Local connection reject case. Send private data back to peer,
+ * close connection and dereference connection id.
+ */
+int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
+{
+ struct siw_cep *cep = (struct siw_cep *)id->provider_data;
+
+ siw_cep_set_inuse(cep);
+ siw_cep_put(cep);
+
+ siw_cancel_mpatimer(cep);
+
+ if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
+ siw_dbg_cep(cep, "out of state\n");
+
+ siw_cep_set_free_and_put(cep); /* put last reference */
+
+ return -ECONNRESET;
+ }
+ siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
+ pd_len);
+
+ if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
+ siw_send_mpareqrep(cep, pdata, pd_len);
+ }
+ siw_destroy_cep_sock(cep);
+
+ cep->state = SIW_EPSTATE_CLOSED;
+
+ siw_cep_set_free_and_put(cep);
+
+ return 0;
+}
+
+/*
+ * siw_create_listen - Create resources for a listener's IWCM ID @id
+ *
+ * Starts listen on the socket address id->local_addr.
+ *
+ */
+int siw_create_listen(struct iw_cm_id *id, int backlog)
+{
+ struct socket *s;
+ struct siw_cep *cep = NULL;
+ struct net_device *ndev = NULL;
+ struct siw_device *sdev = to_siw_dev(id->device);
+ int addr_family = id->local_addr.ss_family;
+ int rv = 0;
+
+ if (addr_family != AF_INET && addr_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
+ rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (rv < 0)
+ return rv;
+
+ /*
+ * Allow binding local port when still in TIME_WAIT from last close.
+ */
+ sock_set_reuseaddr(s->sk);
+
+ if (addr_family == AF_INET) {
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
+ ndev = ib_device_get_netdev(id->device, SIW_PORT);
+ if (ndev) {
+ s->sk->sk_bound_dev_if = ndev->ifindex;
+ } else {
+ rv = -ENODEV;
+ goto error;
+ }
+ }
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ } else {
+ struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
+
+ if (id->afonly) {
+ rv = ip6_sock_set_v6only(s->sk);
+ if (rv) {
+ siw_dbg(id->device,
+ "ip6_sock_set_v6only erro: %d\n", rv);
+ goto error;
+ }
+ }
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv6_addr_any(&laddr->sin6_addr)) {
+ ndev = ib_device_get_netdev(id->device, SIW_PORT);
+ if (ndev) {
+ s->sk->sk_bound_dev_if = ndev->ifindex;
+ } else {
+ rv = -ENODEV;
+ goto error;
+ }
+ }
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in6));
+ }
+ if (rv) {
+ siw_dbg(id->device, "socket bind error: %d\n", rv);
+ goto error;
+ }
+ cep = siw_cep_alloc(sdev);
+ if (!cep) {
+ rv = -ENOMEM;
+ goto error;
+ }
+ siw_cep_socket_assoc(cep, s);
+
+ rv = siw_cm_alloc_work(cep, backlog);
+ if (rv) {
+ siw_dbg(id->device,
+ "alloc_work error %d, backlog %d\n",
+ rv, backlog);
+ goto error;
+ }
+ rv = s->ops->listen(s, backlog);
+ if (rv) {
+ siw_dbg(id->device, "listen error %d\n", rv);
+ goto error;
+ }
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ /*
+ * In case of a wildcard rdma_listen on a multi-homed device,
+ * a listener's IWCM id is associated with more than one listening CEP.
+ *
+ * We currently use id->provider_data in three different ways:
+ *
+ * o For a listener's IWCM id, id->provider_data points to
+ * the list_head of the list of listening CEPs.
+ * Uses: siw_create_listen(), siw_destroy_listen()
+ *
+ * o For each accepted passive-side IWCM id, id->provider_data
+ * points to the CEP itself. This is a consequence of
+ * - siw_cm_upcall() setting event.provider_data = cep and
+ * - the IWCM's cm_conn_req_handler() setting provider_data of the
+ * new passive-side IWCM id equal to event.provider_data
+ * Uses: siw_accept(), siw_reject()
+ *
+ * o For an active-side IWCM id, id->provider_data is not used at all.
+ *
+ */
+ if (!id->provider_data) {
+ id->provider_data =
+ kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!id->provider_data) {
+ rv = -ENOMEM;
+ goto error;
+ }
+ INIT_LIST_HEAD((struct list_head *)id->provider_data);
+ }
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = SIW_EPSTATE_LISTENING;
+ dev_put(ndev);
+
+ siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
+
+ return 0;
+
+error:
+ siw_dbg(id->device, "failed: %d\n", rv);
+
+ if (cep) {
+ siw_cep_set_inuse(cep);
+
+ siw_free_cm_id(cep);
+ cep->sock = NULL;
+ siw_socket_disassoc(s);
+ cep->state = SIW_EPSTATE_CLOSED;
+
+ siw_cep_set_free_and_put(cep);
+ }
+ sock_release(s);
+ dev_put(ndev);
+
+ return rv;
+}
+
+static void siw_drop_listeners(struct iw_cm_id *id)
+{
+ struct list_head *p, *tmp;
+
+ /*
+ * In case of a wildcard rdma_listen on a multi-homed device,
+ * a listener's IWCM id is associated with more than one listening CEP.
+ */
+ list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
+ struct siw_cep *cep = list_entry(p, struct siw_cep, listenq);
+
+ list_del(p);
+
+ siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
+
+ siw_cep_set_inuse(cep);
+
+ siw_free_cm_id(cep);
+ if (cep->sock) {
+ siw_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+ cep->state = SIW_EPSTATE_CLOSED;
+ siw_cep_set_free_and_put(cep);
+ }
+}
+
+int siw_destroy_listen(struct iw_cm_id *id)
+{
+ if (!id->provider_data) {
+ siw_dbg(id->device, "no cep(s)\n");
+ return 0;
+ }
+ siw_drop_listeners(id);
+ kfree(id->provider_data);
+ id->provider_data = NULL;
+
+ return 0;
+}
+
+int siw_cm_init(void)
+{
+ /*
+ * create_single_workqueue for strict ordering
+ */
+ siw_cm_wq = create_singlethread_workqueue("siw_cm_wq");
+ if (!siw_cm_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void siw_cm_exit(void)
+{
+ if (siw_cm_wq)
+ destroy_workqueue(siw_cm_wq);
+}
diff --git a/drivers/infiniband/sw/siw/siw_cm.h b/drivers/infiniband/sw/siw/siw_cm.h
new file mode 100644
index 000000000000..7011c8a8ee7b
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_cm.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#ifndef _SIW_CM_H
+#define _SIW_CM_H
+
+#include <net/sock.h>
+#include <linux/tcp.h>
+
+#include <rdma/iw_cm.h>
+
+enum siw_cep_state {
+ SIW_EPSTATE_IDLE = 1,
+ SIW_EPSTATE_LISTENING,
+ SIW_EPSTATE_CONNECTING,
+ SIW_EPSTATE_AWAIT_MPAREQ,
+ SIW_EPSTATE_RECVD_MPAREQ,
+ SIW_EPSTATE_AWAIT_MPAREP,
+ SIW_EPSTATE_RDMA_MODE,
+ SIW_EPSTATE_CLOSED
+};
+
+struct siw_mpa_info {
+ struct mpa_rr hdr; /* peer mpa hdr in host byte order */
+ struct mpa_v2_data v2_ctrl;
+ struct mpa_v2_data v2_ctrl_req;
+ char *pdata;
+ int bytes_rcvd;
+};
+
+struct siw_device;
+
+struct siw_cep {
+ struct iw_cm_id *cm_id;
+ struct siw_device *sdev;
+ struct list_head devq;
+ spinlock_t lock;
+ struct kref ref;
+ int in_use;
+ wait_queue_head_t waitq;
+ enum siw_cep_state state;
+
+ struct list_head listenq;
+ struct siw_cep *listen_cep;
+
+ struct siw_qp *qp;
+ struct socket *sock;
+
+ struct siw_cm_work *mpa_timer;
+ struct list_head work_freelist;
+
+ struct siw_mpa_info mpa;
+ int ord;
+ int ird;
+ bool enhanced_rdma_conn_est;
+
+ /* Saved upcalls of socket */
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_write_space)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+/*
+ * Connection initiator waits 10 seconds to receive an
+ * MPA reply after sending out MPA request. Reponder waits for
+ * 5 seconds for MPA request to arrive if new TCP connection
+ * was set up.
+ */
+#define MPAREQ_TIMEOUT (HZ * 10)
+#define MPAREP_TIMEOUT (HZ * 5)
+
+enum siw_work_type {
+ SIW_CM_WORK_ACCEPT = 1,
+ SIW_CM_WORK_READ_MPAHDR,
+ SIW_CM_WORK_CLOSE_LLP, /* close socket */
+ SIW_CM_WORK_PEER_CLOSE, /* socket indicated peer close */
+ SIW_CM_WORK_MPATIMEOUT
+};
+
+struct siw_cm_work {
+ struct delayed_work work;
+ struct list_head list;
+ enum siw_work_type type;
+ struct siw_cep *cep;
+};
+
+#define to_sockaddr_in(a) (*(struct sockaddr_in *)(&(a)))
+#define to_sockaddr_in6(a) (*(struct sockaddr_in6 *)(&(a)))
+
+static inline int getname_peer(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 1);
+}
+
+static inline int getname_local(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 0);
+}
+
+static inline int ksock_recv(struct socket *sock, char *buf, size_t size,
+ int flags)
+{
+ struct kvec iov = { buf, size };
+ struct msghdr msg = { .msg_name = NULL, .msg_flags = flags };
+
+ return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+}
+
+int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *parm);
+int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int siw_reject(struct iw_cm_id *id, const void *data, u8 len);
+int siw_create_listen(struct iw_cm_id *id, int backlog);
+int siw_destroy_listen(struct iw_cm_id *id);
+
+void siw_cep_get(struct siw_cep *cep);
+void siw_cep_put(struct siw_cep *cep);
+int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type);
+
+int siw_cm_init(void);
+void siw_cm_exit(void);
+
+/*
+ * TCP socket interface
+ */
+#define sk_to_qp(sk) (((struct siw_cep *)((sk)->sk_user_data))->qp)
+#define sk_to_cep(sk) ((struct siw_cep *)((sk)->sk_user_data))
+
+#endif
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
new file mode 100644
index 000000000000..25b3c741b66b
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "siw.h"
+
+static int map_wc_opcode[SIW_NUM_OPCODES] = {
+ [SIW_OP_WRITE] = IB_WC_RDMA_WRITE,
+ [SIW_OP_SEND] = IB_WC_SEND,
+ [SIW_OP_SEND_WITH_IMM] = IB_WC_SEND,
+ [SIW_OP_READ] = IB_WC_RDMA_READ,
+ [SIW_OP_READ_LOCAL_INV] = IB_WC_RDMA_READ,
+ [SIW_OP_COMP_AND_SWAP] = IB_WC_COMP_SWAP,
+ [SIW_OP_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+ [SIW_OP_INVAL_STAG] = IB_WC_LOCAL_INV,
+ [SIW_OP_REG_MR] = IB_WC_REG_MR,
+ [SIW_OP_RECEIVE] = IB_WC_RECV,
+ [SIW_OP_READ_RESPONSE] = -1 /* not used */
+};
+
+static struct {
+ enum siw_wc_status siw;
+ enum ib_wc_status ib;
+} map_cqe_status[SIW_NUM_WC_STATUS] = {
+ { SIW_WC_SUCCESS, IB_WC_SUCCESS },
+ { SIW_WC_LOC_LEN_ERR, IB_WC_LOC_LEN_ERR },
+ { SIW_WC_LOC_PROT_ERR, IB_WC_LOC_PROT_ERR },
+ { SIW_WC_LOC_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
+ { SIW_WC_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
+ { SIW_WC_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
+ { SIW_WC_LOC_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
+ { SIW_WC_REM_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
+ { SIW_WC_REM_INV_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
+ { SIW_WC_GENERAL_ERR, IB_WC_GENERAL_ERR }
+};
+
+/*
+ * Reap one CQE from the CQ. Only used by kernel clients
+ * during CQ normal operation. Might be called during CQ
+ * flush for user mapped CQE array as well.
+ */
+int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
+{
+ struct siw_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ cqe = &cq->queue[cq->cq_get % cq->num_cqe];
+ if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
+ memset(wc, 0, sizeof(*wc));
+ wc->wr_id = cqe->id;
+ wc->byte_len = cqe->bytes;
+
+ /*
+ * During CQ flush, also user land CQE's may get
+ * reaped here, which do not hold a QP reference
+ * and do not qualify for memory extension verbs.
+ */
+ if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
+ if (cqe->flags & SIW_WQE_REM_INVAL) {
+ wc->ex.invalidate_rkey = cqe->inval_stag;
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ }
+ wc->qp = cqe->base_qp;
+ wc->opcode = map_wc_opcode[cqe->opcode];
+ wc->status = map_cqe_status[cqe->status].ib;
+ siw_dbg_cq(cq,
+ "idx %u, type %d, flags %2x, id 0x%p\n",
+ cq->cq_get % cq->num_cqe, cqe->opcode,
+ cqe->flags, (void *)(uintptr_t)cqe->id);
+ } else {
+ /*
+ * A malicious user may set invalid opcode or
+ * status in the user mmapped CQE array.
+ * Sanity check and correct values in that case
+ * to avoid out-of-bounds access to global arrays
+ * for opcode and status mapping.
+ */
+ u8 opcode = cqe->opcode;
+ u16 status = cqe->status;
+
+ if (opcode >= SIW_NUM_OPCODES) {
+ opcode = 0;
+ status = SIW_WC_GENERAL_ERR;
+ } else if (status >= SIW_NUM_WC_STATUS) {
+ status = SIW_WC_GENERAL_ERR;
+ }
+ wc->opcode = map_wc_opcode[opcode];
+ wc->status = map_cqe_status[status].ib;
+
+ }
+ WRITE_ONCE(cqe->flags, 0);
+ cq->cq_get++;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return 1;
+ }
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return 0;
+}
+
+/*
+ * siw_cq_flush()
+ *
+ * Flush all CQ elements.
+ */
+void siw_cq_flush(struct siw_cq *cq)
+{
+ struct ib_wc wc;
+
+ while (siw_reap_cqe(cq, &wc))
+ ;
+}
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
new file mode 100644
index 000000000000..5168307229a9
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/net_namespace.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+#include <net/addrconf.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/rdma_netlink.h>
+#include <linux/kthread.h>
+
+#include "siw.h"
+#include "siw_verbs.h"
+
+MODULE_AUTHOR("Bernard Metzler");
+MODULE_DESCRIPTION("Software iWARP Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* transmit from user buffer, if possible */
+const bool zcopy_tx = true;
+
+/* Restrict usage of GSO, if hardware peer iwarp is unable to process
+ * large packets. try_gso = true lets siw try to use local GSO,
+ * if peer agrees. Not using GSO severly limits siw maximum tx bandwidth.
+ */
+const bool try_gso;
+
+/* Attach siw also with loopback devices */
+const bool loopback_enabled = true;
+
+/* We try to negotiate CRC on, if true */
+const bool mpa_crc_required;
+
+/* MPA CRC on/off enforced */
+const bool mpa_crc_strict;
+
+/* Control TCP_NODELAY socket option */
+const bool siw_tcp_nagle;
+
+/* Select MPA version to be used during connection setup */
+u_char mpa_version = MPA_REVISION_2;
+
+/* Selects MPA P2P mode (additional handshake during connection
+ * setup, if true.
+ */
+const bool peer_to_peer;
+
+struct task_struct *siw_tx_thread[NR_CPUS];
+
+static int siw_device_register(struct siw_device *sdev, const char *name)
+{
+ struct ib_device *base_dev = &sdev->base_dev;
+ static int dev_id = 1;
+ int rv;
+
+ sdev->vendor_part_id = dev_id++;
+
+ rv = ib_register_device(base_dev, name, NULL);
+ if (rv) {
+ pr_warn("siw: device registration error %d\n", rv);
+ return rv;
+ }
+
+ siw_dbg(base_dev, "HWaddr=%pM\n", sdev->raw_gid);
+ return 0;
+}
+
+static void siw_device_cleanup(struct ib_device *base_dev)
+{
+ struct siw_device *sdev = to_siw_dev(base_dev);
+
+ xa_destroy(&sdev->qp_xa);
+ xa_destroy(&sdev->mem_xa);
+}
+
+static int siw_dev_qualified(struct net_device *netdev)
+{
+ /*
+ * Additional hardware support can be added here
+ * (e.g. ARPHRD_FDDI, ARPHRD_ATM, ...) - see
+ * <linux/if_arp.h> for type identifiers.
+ */
+ if (netdev->type == ARPHRD_ETHER || netdev->type == ARPHRD_IEEE802 ||
+ netdev->type == ARPHRD_NONE ||
+ (netdev->type == ARPHRD_LOOPBACK && loopback_enabled))
+ return 1;
+
+ return 0;
+}
+
+static DEFINE_PER_CPU(atomic_t, siw_use_cnt);
+
+static struct {
+ struct cpumask **tx_valid_cpus;
+ int num_nodes;
+} siw_cpu_info;
+
+static void siw_destroy_cpulist(int number)
+{
+ int i = 0;
+
+ while (i < number)
+ kfree(siw_cpu_info.tx_valid_cpus[i++]);
+
+ kfree(siw_cpu_info.tx_valid_cpus);
+ siw_cpu_info.tx_valid_cpus = NULL;
+}
+
+static int siw_init_cpulist(void)
+{
+ int i, num_nodes = nr_node_ids;
+
+ memset(siw_tx_thread, 0, sizeof(siw_tx_thread));
+
+ siw_cpu_info.num_nodes = num_nodes;
+
+ siw_cpu_info.tx_valid_cpus =
+ kcalloc(num_nodes, sizeof(struct cpumask *), GFP_KERNEL);
+ if (!siw_cpu_info.tx_valid_cpus) {
+ siw_cpu_info.num_nodes = 0;
+ return -ENOMEM;
+ }
+ for (i = 0; i < siw_cpu_info.num_nodes; i++) {
+ siw_cpu_info.tx_valid_cpus[i] =
+ kzalloc(sizeof(struct cpumask), GFP_KERNEL);
+ if (!siw_cpu_info.tx_valid_cpus[i])
+ goto out_err;
+
+ cpumask_clear(siw_cpu_info.tx_valid_cpus[i]);
+ }
+ for_each_possible_cpu(i)
+ cpumask_set_cpu(i, siw_cpu_info.tx_valid_cpus[cpu_to_node(i)]);
+
+ return 0;
+
+out_err:
+ siw_cpu_info.num_nodes = 0;
+ siw_destroy_cpulist(i);
+
+ return -ENOMEM;
+}
+
+/*
+ * Choose CPU with least number of active QP's from NUMA node of
+ * TX interface.
+ */
+int siw_get_tx_cpu(struct siw_device *sdev)
+{
+ const struct cpumask *tx_cpumask;
+ int i, num_cpus, cpu, min_use, node = sdev->numa_node, tx_cpu = -1;
+
+ if (node < 0)
+ tx_cpumask = cpu_online_mask;
+ else
+ tx_cpumask = siw_cpu_info.tx_valid_cpus[node];
+
+ num_cpus = cpumask_weight(tx_cpumask);
+ if (!num_cpus) {
+ /* no CPU on this NUMA node */
+ tx_cpumask = cpu_online_mask;
+ num_cpus = cpumask_weight(tx_cpumask);
+ }
+ if (!num_cpus)
+ goto out;
+
+ cpu = cpumask_first(tx_cpumask);
+
+ for (i = 0, min_use = SIW_MAX_QP; i < num_cpus;
+ i++, cpu = cpumask_next(cpu, tx_cpumask)) {
+ int usage;
+
+ /* Skip any cores which have no TX thread */
+ if (!siw_tx_thread[cpu])
+ continue;
+
+ usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
+ if (usage <= min_use) {
+ tx_cpu = cpu;
+ min_use = usage;
+ }
+ }
+ siw_dbg(&sdev->base_dev,
+ "tx cpu %d, node %d, %d qp's\n", tx_cpu, node, min_use);
+
+out:
+ if (tx_cpu >= 0)
+ atomic_inc(&per_cpu(siw_use_cnt, tx_cpu));
+ else
+ pr_warn("siw: no tx cpu found\n");
+
+ return tx_cpu;
+}
+
+void siw_put_tx_cpu(int cpu)
+{
+ atomic_dec(&per_cpu(siw_use_cnt, cpu));
+}
+
+static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
+{
+ struct siw_qp *qp = siw_qp_id2obj(to_siw_dev(base_dev), id);
+
+ if (qp) {
+ /*
+ * siw_qp_id2obj() increments object reference count
+ */
+ siw_qp_put(qp);
+ return &qp->base_qp;
+ }
+ return NULL;
+}
+
+static const struct ib_device_ops siw_device_ops = {
+ .owner = THIS_MODULE,
+ .uverbs_abi_ver = SIW_ABI_VERSION,
+ .driver_id = RDMA_DRIVER_SIW,
+
+ .alloc_mr = siw_alloc_mr,
+ .alloc_pd = siw_alloc_pd,
+ .alloc_ucontext = siw_alloc_ucontext,
+ .create_cq = siw_create_cq,
+ .create_qp = siw_create_qp,
+ .create_srq = siw_create_srq,
+ .dealloc_driver = siw_device_cleanup,
+ .dealloc_pd = siw_dealloc_pd,
+ .dealloc_ucontext = siw_dealloc_ucontext,
+ .dereg_mr = siw_dereg_mr,
+ .destroy_cq = siw_destroy_cq,
+ .destroy_qp = siw_destroy_qp,
+ .destroy_srq = siw_destroy_srq,
+ .get_dma_mr = siw_get_dma_mr,
+ .get_port_immutable = siw_get_port_immutable,
+ .iw_accept = siw_accept,
+ .iw_add_ref = siw_qp_get_ref,
+ .iw_connect = siw_connect,
+ .iw_create_listen = siw_create_listen,
+ .iw_destroy_listen = siw_destroy_listen,
+ .iw_get_qp = siw_get_base_qp,
+ .iw_reject = siw_reject,
+ .iw_rem_ref = siw_qp_put_ref,
+ .map_mr_sg = siw_map_mr_sg,
+ .mmap = siw_mmap,
+ .mmap_free = siw_mmap_free,
+ .modify_qp = siw_verbs_modify_qp,
+ .modify_srq = siw_modify_srq,
+ .poll_cq = siw_poll_cq,
+ .post_recv = siw_post_receive,
+ .post_send = siw_post_send,
+ .post_srq_recv = siw_post_srq_recv,
+ .query_device = siw_query_device,
+ .query_gid = siw_query_gid,
+ .query_port = siw_query_port,
+ .query_qp = siw_query_qp,
+ .query_srq = siw_query_srq,
+ .req_notify_cq = siw_req_notify_cq,
+ .reg_user_mr = siw_reg_user_mr,
+
+ INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, siw_qp, base_qp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext),
+};
+
+static struct siw_device *siw_device_create(struct net_device *netdev)
+{
+ struct siw_device *sdev = NULL;
+ struct ib_device *base_dev;
+ int rv;
+
+ sdev = ib_alloc_device(siw_device, base_dev);
+ if (!sdev)
+ return NULL;
+
+ base_dev = &sdev->base_dev;
+
+ if (netdev->addr_len) {
+ memcpy(sdev->raw_gid, netdev->dev_addr,
+ min_t(unsigned int, netdev->addr_len, ETH_ALEN));
+ } else {
+ /*
+ * This device does not have a HW address, but
+ * connection mangagement requires a unique gid.
+ */
+ eth_random_addr(sdev->raw_gid);
+ }
+ addrconf_addr_eui48((u8 *)&base_dev->node_guid, sdev->raw_gid);
+
+ base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
+
+ base_dev->node_type = RDMA_NODE_RNIC;
+ memcpy(base_dev->node_desc, SIW_NODE_DESC_COMMON,
+ sizeof(SIW_NODE_DESC_COMMON));
+
+ /*
+ * Current model (one-to-one device association):
+ * One Softiwarp device per net_device or, equivalently,
+ * per physical port.
+ */
+ base_dev->phys_port_cnt = 1;
+ base_dev->num_comp_vectors = num_possible_cpus();
+
+ xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
+
+ ib_set_device_ops(base_dev, &siw_device_ops);
+ rv = ib_device_set_netdev(base_dev, netdev, 1);
+ if (rv)
+ goto error;
+
+ memcpy(base_dev->iw_ifname, netdev->name,
+ sizeof(base_dev->iw_ifname));
+
+ /* Disable TCP port mapping */
+ base_dev->iw_driver_flags = IW_F_NO_PORT_MAP;
+
+ sdev->attrs.max_qp = SIW_MAX_QP;
+ sdev->attrs.max_qp_wr = SIW_MAX_QP_WR;
+ sdev->attrs.max_ord = SIW_MAX_ORD_QP;
+ sdev->attrs.max_ird = SIW_MAX_IRD_QP;
+ sdev->attrs.max_sge = SIW_MAX_SGE;
+ sdev->attrs.max_sge_rd = SIW_MAX_SGE_RD;
+ sdev->attrs.max_cq = SIW_MAX_CQ;
+ sdev->attrs.max_cqe = SIW_MAX_CQE;
+ sdev->attrs.max_mr = SIW_MAX_MR;
+ sdev->attrs.max_pd = SIW_MAX_PD;
+ sdev->attrs.max_mw = SIW_MAX_MW;
+ sdev->attrs.max_srq = SIW_MAX_SRQ;
+ sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
+ sdev->attrs.max_srq_sge = SIW_MAX_SGE;
+
+ INIT_LIST_HEAD(&sdev->cep_list);
+ INIT_LIST_HEAD(&sdev->qp_list);
+
+ atomic_set(&sdev->num_ctx, 0);
+ atomic_set(&sdev->num_srq, 0);
+ atomic_set(&sdev->num_qp, 0);
+ atomic_set(&sdev->num_cq, 0);
+ atomic_set(&sdev->num_mr, 0);
+ atomic_set(&sdev->num_pd, 0);
+
+ sdev->numa_node = dev_to_node(&netdev->dev);
+ spin_lock_init(&sdev->lock);
+
+ return sdev;
+error:
+ ib_dealloc_device(base_dev);
+
+ return NULL;
+}
+
+static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *arg)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(arg);
+ struct ib_device *base_dev;
+ struct siw_device *sdev;
+
+ dev_dbg(&netdev->dev, "siw: event %lu\n", event);
+
+ base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
+ if (!base_dev)
+ return NOTIFY_OK;
+
+ sdev = to_siw_dev(base_dev);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ /*
+ * Device registration now handled only by
+ * rdma netlink commands. So it shall be impossible
+ * to end up here with a valid siw device.
+ */
+ siw_dbg(base_dev, "unexpected NETDEV_REGISTER event\n");
+ break;
+
+ case NETDEV_UNREGISTER:
+ ib_unregister_device_queued(&sdev->base_dev);
+ break;
+
+ case NETDEV_CHANGEADDR:
+ siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
+ break;
+ /*
+ * All other events are not handled
+ */
+ default:
+ break;
+ }
+ ib_device_put(&sdev->base_dev);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block siw_netdev_nb = {
+ .notifier_call = siw_netdev_event,
+};
+
+static int siw_newlink(const char *basedev_name, struct net_device *netdev)
+{
+ struct ib_device *base_dev;
+ struct siw_device *sdev = NULL;
+ int rv = -ENOMEM;
+
+ if (!siw_dev_qualified(netdev))
+ return -EINVAL;
+
+ base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
+ if (base_dev) {
+ ib_device_put(base_dev);
+ return -EEXIST;
+ }
+ sdev = siw_device_create(netdev);
+ if (sdev) {
+ dev_dbg(&netdev->dev, "siw: new device\n");
+ ib_mark_name_assigned_by_user(&sdev->base_dev);
+ rv = siw_device_register(sdev, basedev_name);
+ if (rv)
+ ib_dealloc_device(&sdev->base_dev);
+ }
+ return rv;
+}
+
+static struct rdma_link_ops siw_link_ops = {
+ .type = "siw",
+ .newlink = siw_newlink,
+};
+
+/*
+ * siw_init_module - Initialize Softiwarp module and register with netdev
+ * subsystem.
+ */
+static __init int siw_init_module(void)
+{
+ int rv;
+
+ if (SENDPAGE_THRESH < SIW_MAX_INLINE) {
+ pr_info("siw: sendpage threshold too small: %u\n",
+ (int)SENDPAGE_THRESH);
+ rv = -EINVAL;
+ goto out_error;
+ }
+ rv = siw_init_cpulist();
+ if (rv)
+ goto out_error;
+
+ rv = siw_cm_init();
+ if (rv)
+ goto out_error;
+
+ if (!siw_create_tx_threads()) {
+ pr_info("siw: Could not start any TX thread\n");
+ rv = -ENOMEM;
+ goto out_error;
+ }
+
+ rv = register_netdevice_notifier(&siw_netdev_nb);
+ if (rv)
+ goto out_error;
+
+ rdma_link_register(&siw_link_ops);
+
+ pr_info("SoftiWARP attached\n");
+ return 0;
+
+out_error:
+ siw_stop_tx_threads();
+
+ pr_info("SoftIWARP attach failed. Error: %d\n", rv);
+
+ siw_cm_exit();
+ siw_destroy_cpulist(siw_cpu_info.num_nodes);
+
+ return rv;
+}
+
+static void __exit siw_exit_module(void)
+{
+ siw_stop_tx_threads();
+
+ unregister_netdevice_notifier(&siw_netdev_nb);
+ rdma_link_unregister(&siw_link_ops);
+ ib_unregister_driver(RDMA_DRIVER_SIW);
+
+ siw_cm_exit();
+
+ siw_destroy_cpulist(siw_cpu_info.num_nodes);
+
+ pr_info("SoftiWARP detached\n");
+}
+
+module_init(siw_init_module);
+module_exit(siw_exit_module);
+
+MODULE_ALIAS_RDMA_LINK("siw");
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
new file mode 100644
index 000000000000..d5ddeb17bd22
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/gfp.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/sched/mm.h>
+#include <linux/resource.h>
+
+#include "siw.h"
+#include "siw_mem.h"
+
+/* Stag lookup is based on its index part only (24 bits). */
+#define SIW_STAG_MAX_INDEX 0x00ffffff
+
+/*
+ * siw_mem_id2obj()
+ *
+ * resolves memory from stag given by id. might be called from:
+ * o process context before sending out of sgl, or
+ * o in softirq when resolving target memory
+ */
+struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
+{
+ struct siw_mem *mem;
+
+ rcu_read_lock();
+ mem = xa_load(&sdev->mem_xa, stag_index);
+ if (likely(mem && kref_get_unless_zero(&mem->ref))) {
+ rcu_read_unlock();
+ return mem;
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+void siw_umem_release(struct siw_umem *umem)
+{
+ int i, num_pages = umem->num_pages;
+
+ if (umem->base_mem)
+ ib_umem_release(umem->base_mem);
+
+ for (i = 0; num_pages > 0; i++) {
+ kfree(umem->page_chunk[i].plist);
+ num_pages -= PAGES_PER_CHUNK;
+ }
+ kfree(umem->page_chunk);
+ kfree(umem);
+}
+
+int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
+ u64 start, u64 len, int rights)
+{
+ struct siw_device *sdev = to_siw_dev(pd->device);
+ struct siw_mem *mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
+ u32 id, next;
+
+ if (!mem)
+ return -ENOMEM;
+
+ mem->mem_obj = mem_obj;
+ mem->stag_valid = 0;
+ mem->sdev = sdev;
+ mem->va = start;
+ mem->len = len;
+ mem->pd = pd;
+ mem->perms = rights & IWARP_ACCESS_MASK;
+ kref_init(&mem->ref);
+
+ get_random_bytes(&next, 4);
+ next &= SIW_STAG_MAX_INDEX;
+
+ if (xa_alloc_cyclic(&sdev->mem_xa, &id, mem, limit, &next,
+ GFP_KERNEL) < 0) {
+ kfree(mem);
+ return -ENOMEM;
+ }
+
+ mr->mem = mem;
+ /* Set the STag index part */
+ mem->stag = id << 8;
+ mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
+
+ return 0;
+}
+
+void siw_mr_drop_mem(struct siw_mr *mr)
+{
+ struct siw_mem *mem = mr->mem, *found;
+
+ mem->stag_valid = 0;
+
+ /* make STag invalid visible asap */
+ smp_mb();
+
+ found = xa_erase(&mem->sdev->mem_xa, mem->stag >> 8);
+ WARN_ON(found != mem);
+ siw_mem_put(mem);
+}
+
+void siw_free_mem(struct kref *ref)
+{
+ struct siw_mem *mem = container_of(ref, struct siw_mem, ref);
+
+ siw_dbg_mem(mem, "free mem, pbl: %s\n", mem->is_pbl ? "y" : "n");
+
+ if (!mem->is_mw && mem->mem_obj) {
+ if (mem->is_pbl == 0)
+ siw_umem_release(mem->umem);
+ else
+ kfree(mem->pbl);
+ }
+ kfree(mem);
+}
+
+/*
+ * siw_check_mem()
+ *
+ * Check protection domain, STAG state, access permissions and
+ * address range for memory object.
+ *
+ * @pd: Protection Domain memory should belong to
+ * @mem: memory to be checked
+ * @addr: starting addr of mem
+ * @perms: requested access permissions
+ * @len: len of memory interval to be checked
+ *
+ */
+int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
+ enum ib_access_flags perms, int len)
+{
+ if (!mem->stag_valid) {
+ siw_dbg_pd(pd, "STag 0x%08x invalid\n", mem->stag);
+ return -E_STAG_INVALID;
+ }
+ if (mem->pd != pd) {
+ siw_dbg_pd(pd, "STag 0x%08x: PD mismatch\n", mem->stag);
+ return -E_PD_MISMATCH;
+ }
+ /*
+ * check access permissions
+ */
+ if ((mem->perms & perms) < perms) {
+ siw_dbg_pd(pd, "permissions 0x%08x < 0x%08x\n",
+ mem->perms, perms);
+ return -E_ACCESS_PERM;
+ }
+ /*
+ * Check if access falls into valid memory interval.
+ */
+ if (addr < mem->va || addr + len > mem->va + mem->len) {
+ siw_dbg_pd(pd, "MEM interval len %d\n", len);
+ siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n",
+ (void *)(uintptr_t)addr,
+ (void *)(uintptr_t)(addr + len));
+ siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n",
+ (void *)(uintptr_t)mem->va,
+ (void *)(uintptr_t)(mem->va + mem->len),
+ mem->stag);
+
+ return -E_BASE_BOUNDS;
+ }
+ return E_ACCESS_OK;
+}
+
+/*
+ * siw_check_sge()
+ *
+ * Check SGE for access rights in given interval
+ *
+ * @pd: Protection Domain memory should belong to
+ * @sge: SGE to be checked
+ * @mem: location of memory reference within array
+ * @perms: requested access permissions
+ * @off: starting offset in SGE
+ * @len: len of memory interval to be checked
+ *
+ * NOTE: Function references SGE's memory object (mem->obj)
+ * if not yet done. New reference is kept if check went ok and
+ * released if check failed. If mem->obj is already valid, no new
+ * lookup is being done and mem is not released it check fails.
+ */
+int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge, struct siw_mem *mem[],
+ enum ib_access_flags perms, u32 off, int len)
+{
+ struct siw_device *sdev = to_siw_dev(pd->device);
+ struct siw_mem *new = NULL;
+ int rv = E_ACCESS_OK;
+
+ if (len + off > sge->length) {
+ rv = -E_BASE_BOUNDS;
+ goto fail;
+ }
+ if (*mem == NULL) {
+ new = siw_mem_id2obj(sdev, sge->lkey >> 8);
+ if (unlikely(!new)) {
+ siw_dbg_pd(pd, "STag unknown: 0x%08x\n", sge->lkey);
+ rv = -E_STAG_INVALID;
+ goto fail;
+ }
+ *mem = new;
+ }
+ /* Check if user re-registered with different STag key */
+ if (unlikely((*mem)->stag != sge->lkey)) {
+ siw_dbg_mem((*mem), "STag mismatch: 0x%08x\n", sge->lkey);
+ rv = -E_STAG_INVALID;
+ goto fail;
+ }
+ rv = siw_check_mem(pd, *mem, sge->laddr + off, perms, len);
+ if (unlikely(rv))
+ goto fail;
+
+ return 0;
+
+fail:
+ if (new) {
+ *mem = NULL;
+ siw_mem_put(new);
+ }
+ return rv;
+}
+
+void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op)
+{
+ switch (op) {
+ case SIW_OP_SEND:
+ case SIW_OP_WRITE:
+ case SIW_OP_SEND_WITH_IMM:
+ case SIW_OP_SEND_REMOTE_INV:
+ case SIW_OP_READ:
+ case SIW_OP_READ_LOCAL_INV:
+ if (!(wqe->sqe.flags & SIW_WQE_INLINE))
+ siw_unref_mem_sgl(wqe->mem, wqe->sqe.num_sge);
+ break;
+
+ case SIW_OP_RECEIVE:
+ siw_unref_mem_sgl(wqe->mem, wqe->rqe.num_sge);
+ break;
+
+ case SIW_OP_READ_RESPONSE:
+ siw_unref_mem_sgl(wqe->mem, 1);
+ break;
+
+ default:
+ /*
+ * SIW_OP_INVAL_STAG and SIW_OP_REG_MR
+ * do not hold memory references
+ */
+ break;
+ }
+}
+
+int siw_invalidate_stag(struct ib_pd *pd, u32 stag)
+{
+ struct siw_device *sdev = to_siw_dev(pd->device);
+ struct siw_mem *mem = siw_mem_id2obj(sdev, stag >> 8);
+ int rv = 0;
+
+ if (unlikely(!mem)) {
+ siw_dbg_pd(pd, "STag 0x%08x unknown\n", stag);
+ return -EINVAL;
+ }
+ if (unlikely(mem->pd != pd)) {
+ siw_dbg_pd(pd, "PD mismatch for STag 0x%08x\n", stag);
+ rv = -EACCES;
+ goto out;
+ }
+ /*
+ * Per RDMA verbs definition, an STag may already be in invalid
+ * state if invalidation is requested. So no state check here.
+ */
+ mem->stag_valid = 0;
+
+ siw_dbg_pd(pd, "STag 0x%08x now invalid\n", stag);
+out:
+ siw_mem_put(mem);
+ return rv;
+}
+
+/*
+ * Gets physical address backed by PBL element. Address is referenced
+ * by linear byte offset into list of variably sized PB elements.
+ * Optionally, provides remaining len within current element, and
+ * current PBL index for later resume at same element.
+ */
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
+{
+ int i = idx ? *idx : 0;
+
+ while (i < pbl->num_buf) {
+ struct siw_pble *pble = &pbl->pbe[i];
+
+ if (pble->pbl_off + pble->size > off) {
+ u64 pble_off = off - pble->pbl_off;
+
+ if (len)
+ *len = pble->size - pble_off;
+ if (idx)
+ *idx = i;
+
+ return pble->addr + pble_off;
+ }
+ i++;
+ }
+ if (len)
+ *len = 0;
+ return 0;
+}
+
+struct siw_pbl *siw_pbl_alloc(u32 num_buf)
+{
+ struct siw_pbl *pbl;
+
+ if (num_buf == 0)
+ return ERR_PTR(-EINVAL);
+
+ pbl = kzalloc(struct_size(pbl, pbe, num_buf), GFP_KERNEL);
+ if (!pbl)
+ return ERR_PTR(-ENOMEM);
+
+ pbl->max_buf = num_buf;
+
+ return pbl;
+}
+
+struct siw_umem *siw_umem_get(struct ib_device *base_dev, u64 start,
+ u64 len, int rights)
+{
+ struct siw_umem *umem;
+ struct ib_umem *base_mem;
+ struct sg_page_iter sg_iter;
+ struct sg_table *sgt;
+ u64 first_page_va;
+ int num_pages, num_chunks, i, rv = 0;
+
+ if (!len)
+ return ERR_PTR(-EINVAL);
+
+ first_page_va = start & PAGE_MASK;
+ num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT;
+ num_chunks = (num_pages >> CHUNK_SHIFT) + 1;
+
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+
+ umem->page_chunk =
+ kcalloc(num_chunks, sizeof(struct siw_page_chunk), GFP_KERNEL);
+ if (!umem->page_chunk) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ base_mem = ib_umem_get(base_dev, start, len, rights);
+ if (IS_ERR(base_mem)) {
+ rv = PTR_ERR(base_mem);
+ siw_dbg(base_dev, "Cannot pin user memory: %d\n", rv);
+ goto err_out;
+ }
+ umem->fp_addr = first_page_va;
+ umem->base_mem = base_mem;
+
+ sgt = &base_mem->sgt_append.sgt;
+ __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
+
+ if (!__sg_page_iter_next(&sg_iter)) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ for (i = 0; num_pages > 0; i++) {
+ int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
+ struct page **plist =
+ kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
+
+ if (!plist) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ umem->page_chunk[i].plist = plist;
+ while (nents--) {
+ *plist = sg_page_iter_page(&sg_iter);
+ umem->num_pages++;
+ num_pages--;
+ plist++;
+ if (!__sg_page_iter_next(&sg_iter))
+ break;
+ }
+ }
+ return umem;
+err_out:
+ siw_umem_release(umem);
+
+ return ERR_PTR(rv);
+}
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
new file mode 100644
index 000000000000..8e769d30e2ac
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#ifndef _SIW_MEM_H
+#define _SIW_MEM_H
+
+struct siw_umem *siw_umem_get(struct ib_device *base_dave, u64 start,
+ u64 len, int rights);
+void siw_umem_release(struct siw_umem *umem);
+struct siw_pbl *siw_pbl_alloc(u32 num_buf);
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
+struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
+int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
+int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
+ enum ib_access_flags perms, int len);
+int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge,
+ struct siw_mem *mem[], enum ib_access_flags perms,
+ u32 off, int len);
+void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op);
+int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
+ u64 start, u64 len, int rights);
+void siw_mr_drop_mem(struct siw_mr *mr);
+void siw_free_mem(struct kref *ref);
+
+static inline void siw_mem_put(struct siw_mem *mem)
+{
+ kref_put(&mem->ref, siw_free_mem);
+}
+
+static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
+{
+ while (num_sge) {
+ if (*mem == NULL)
+ break;
+
+ siw_mem_put(*mem);
+ *mem = NULL;
+ mem++;
+ num_sge--;
+ }
+}
+
+#define CHUNK_SHIFT 9 /* sets number of pages per chunk */
+#define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT)
+#define CHUNK_MASK (~(PAGES_PER_CHUNK - 1))
+#define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *))
+
+/*
+ * siw_get_upage()
+ *
+ * Get page pointer for address on given umem.
+ *
+ * @umem: two dimensional list of page pointers
+ * @addr: user virtual address
+ */
+static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
+{
+ unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT,
+ chunk_idx = page_idx >> CHUNK_SHIFT,
+ page_in_chunk = page_idx & ~CHUNK_MASK;
+
+ if (likely(page_idx < umem->num_pages))
+ return umem->page_chunk[chunk_idx].plist[page_in_chunk];
+
+ return NULL;
+}
+#endif
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
new file mode 100644
index 000000000000..c1e6e7d6e32f
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -0,0 +1,1318 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/scatterlist.h>
+#include <linux/llist.h>
+#include <asm/barrier.h>
+#include <net/tcp.h>
+#include <trace/events/sock.h>
+
+#include "siw.h"
+#include "siw_verbs.h"
+#include "siw_mem.h"
+
+static char siw_qp_state_to_string[SIW_QP_STATE_COUNT][sizeof "TERMINATE"] = {
+ [SIW_QP_STATE_IDLE] = "IDLE",
+ [SIW_QP_STATE_RTR] = "RTR",
+ [SIW_QP_STATE_RTS] = "RTS",
+ [SIW_QP_STATE_CLOSING] = "CLOSING",
+ [SIW_QP_STATE_TERMINATE] = "TERMINATE",
+ [SIW_QP_STATE_ERROR] = "ERROR"
+};
+
+/*
+ * iWARP (RDMAP, DDP and MPA) parameters as well as Softiwarp settings on a
+ * per-RDMAP message basis. Please keep order of initializer. All MPA len
+ * is initialized to minimum packet size.
+ */
+struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1] = {
+ { /* RDMAP_RDMA_WRITE */
+ .hdr_len = sizeof(struct iwarp_rdma_write),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_write) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST |
+ cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_RDMA_WRITE),
+ .rx_data = siw_proc_write },
+ { /* RDMAP_RDMA_READ_REQ */
+ .hdr_len = sizeof(struct iwarp_rdma_rreq),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rreq) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_RDMA_READ_REQ),
+ .rx_data = siw_proc_rreq },
+ { /* RDMAP_RDMA_READ_RESP */
+ .hdr_len = sizeof(struct iwarp_rdma_rresp),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rresp) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST |
+ cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_RDMA_READ_RESP),
+ .rx_data = siw_proc_rresp },
+ { /* RDMAP_SEND */
+ .hdr_len = sizeof(struct iwarp_send),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_SEND),
+ .rx_data = siw_proc_send },
+ { /* RDMAP_SEND_INVAL */
+ .hdr_len = sizeof(struct iwarp_send_inv),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_SEND_INVAL),
+ .rx_data = siw_proc_send },
+ { /* RDMAP_SEND_SE */
+ .hdr_len = sizeof(struct iwarp_send),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_SEND_SE),
+ .rx_data = siw_proc_send },
+ { /* RDMAP_SEND_SE_INVAL */
+ .hdr_len = sizeof(struct iwarp_send_inv),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_SEND_SE_INVAL),
+ .rx_data = siw_proc_send },
+ { /* RDMAP_TERMINATE */
+ .hdr_len = sizeof(struct iwarp_terminate),
+ .ctrl.mpa_len = htons(sizeof(struct iwarp_terminate) - 2),
+ .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) |
+ cpu_to_be16(RDMAP_VERSION << 6) |
+ cpu_to_be16(RDMAP_TERMINATE),
+ .rx_data = siw_proc_terminate }
+};
+
+void siw_qp_llp_data_ready(struct sock *sk)
+{
+ struct siw_qp *qp;
+
+ trace_sk_data_ready(sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ if (unlikely(!sk->sk_user_data || !sk_to_qp(sk)))
+ goto done;
+
+ qp = sk_to_qp(sk);
+
+ if (likely(!qp->rx_stream.rx_suspend &&
+ down_read_trylock(&qp->state_lock))) {
+ read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 };
+
+ if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
+ /*
+ * Implements data receive operation during
+ * socket callback. TCP gracefully catches
+ * the case where there is nothing to receive
+ * (not calling siw_tcp_rx_data() then).
+ */
+ tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
+
+ up_read(&qp->state_lock);
+ } else {
+ siw_dbg_qp(qp, "unable to process RX, suspend: %d\n",
+ qp->rx_stream.rx_suspend);
+ }
+done:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+void siw_qp_llp_close(struct siw_qp *qp)
+{
+ siw_dbg_qp(qp, "enter llp close, state = %s\n",
+ siw_qp_state_to_string[qp->attrs.state]);
+
+ down_write(&qp->state_lock);
+
+ qp->rx_stream.rx_suspend = 1;
+ qp->tx_ctx.tx_suspend = 1;
+ qp->attrs.sk = NULL;
+
+ switch (qp->attrs.state) {
+ case SIW_QP_STATE_RTS:
+ case SIW_QP_STATE_RTR:
+ case SIW_QP_STATE_IDLE:
+ case SIW_QP_STATE_TERMINATE:
+ qp->attrs.state = SIW_QP_STATE_ERROR;
+ break;
+ /*
+ * SIW_QP_STATE_CLOSING:
+ *
+ * This is a forced close. shall the QP be moved to
+ * ERROR or IDLE ?
+ */
+ case SIW_QP_STATE_CLOSING:
+ if (tx_wqe(qp)->wr_status == SIW_WR_IDLE)
+ qp->attrs.state = SIW_QP_STATE_ERROR;
+ else
+ qp->attrs.state = SIW_QP_STATE_IDLE;
+ break;
+
+ default:
+ siw_dbg_qp(qp, "llp close: no state transition needed: %s\n",
+ siw_qp_state_to_string[qp->attrs.state]);
+ break;
+ }
+ siw_sq_flush(qp);
+ siw_rq_flush(qp);
+
+ /*
+ * Dereference closing CEP
+ */
+ if (qp->cep) {
+ siw_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+
+ up_write(&qp->state_lock);
+
+ siw_dbg_qp(qp, "llp close exit: state %s\n",
+ siw_qp_state_to_string[qp->attrs.state]);
+}
+
+/*
+ * socket callback routine informing about newly available send space.
+ * Function schedules SQ work for processing SQ items.
+ */
+void siw_qp_llp_write_space(struct sock *sk)
+{
+ struct siw_cep *cep;
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (cep) {
+ cep->sk_write_space(sk);
+
+ if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ (void)siw_sq_start(cep->qp);
+ }
+
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
+{
+ if (irq_size) {
+ irq_size = roundup_pow_of_two(irq_size);
+ qp->irq = vcalloc(irq_size, sizeof(struct siw_sqe));
+ if (!qp->irq) {
+ qp->attrs.irq_size = 0;
+ return -ENOMEM;
+ }
+ }
+ if (orq_size) {
+ orq_size = roundup_pow_of_two(orq_size);
+ qp->orq = vcalloc(orq_size, sizeof(struct siw_sqe));
+ if (!qp->orq) {
+ qp->attrs.orq_size = 0;
+ qp->attrs.irq_size = 0;
+ vfree(qp->irq);
+ return -ENOMEM;
+ }
+ }
+ qp->attrs.irq_size = irq_size;
+ qp->attrs.orq_size = orq_size;
+ siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
+ return 0;
+}
+
+/*
+ * Send a non signalled READ or WRITE to peer side as negotiated
+ * with MPAv2 P2P setup protocol. The work request is only created
+ * as a current active WR and does not consume Send Queue space.
+ *
+ * Caller must hold QP state lock.
+ */
+int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
+{
+ struct siw_wqe *wqe = tx_wqe(qp);
+ unsigned long flags;
+ int rv = 0;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+ if (unlikely(wqe->wr_status != SIW_WR_IDLE)) {
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return -EIO;
+ }
+ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+
+ wqe->wr_status = SIW_WR_QUEUED;
+ wqe->sqe.flags = 0;
+ wqe->sqe.num_sge = 1;
+ wqe->sqe.sge[0].length = 0;
+ wqe->sqe.sge[0].laddr = 0;
+ wqe->sqe.sge[0].lkey = 0;
+ /*
+ * While it must not be checked for inbound zero length
+ * READ/WRITE, some HW may treat STag 0 special.
+ */
+ wqe->sqe.rkey = 1;
+ wqe->sqe.raddr = 0;
+ wqe->processed = 0;
+
+ if (ctrl & MPA_V2_RDMA_WRITE_RTR)
+ wqe->sqe.opcode = SIW_OP_WRITE;
+ else if (ctrl & MPA_V2_RDMA_READ_RTR) {
+ struct siw_sqe *rreq = NULL;
+
+ wqe->sqe.opcode = SIW_OP_READ;
+
+ spin_lock(&qp->orq_lock);
+
+ if (qp->attrs.orq_size)
+ rreq = orq_get_free(qp);
+ if (rreq) {
+ siw_read_to_orq(rreq, &wqe->sqe);
+ qp->orq_put++;
+ } else
+ rv = -EIO;
+
+ spin_unlock(&qp->orq_lock);
+ } else
+ rv = -EINVAL;
+
+ if (rv)
+ wqe->wr_status = SIW_WR_IDLE;
+
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ if (!rv)
+ rv = siw_sq_start(qp);
+
+ return rv;
+}
+
+/*
+ * Map memory access error to DDP tagged error
+ */
+enum ddp_ecode siw_tagged_error(enum siw_access_state state)
+{
+ switch (state) {
+ case E_STAG_INVALID:
+ return DDP_ECODE_T_INVALID_STAG;
+ case E_BASE_BOUNDS:
+ return DDP_ECODE_T_BASE_BOUNDS;
+ case E_PD_MISMATCH:
+ return DDP_ECODE_T_STAG_NOT_ASSOC;
+ case E_ACCESS_PERM:
+ /*
+ * RFC 5041 (DDP) lacks an ecode for insufficient access
+ * permissions. 'Invalid STag' seem to be the closest
+ * match though.
+ */
+ return DDP_ECODE_T_INVALID_STAG;
+ default:
+ WARN_ON(1);
+ return DDP_ECODE_T_INVALID_STAG;
+ }
+}
+
+/*
+ * Map memory access error to RDMAP protection error
+ */
+enum rdmap_ecode siw_rdmap_error(enum siw_access_state state)
+{
+ switch (state) {
+ case E_STAG_INVALID:
+ return RDMAP_ECODE_INVALID_STAG;
+ case E_BASE_BOUNDS:
+ return RDMAP_ECODE_BASE_BOUNDS;
+ case E_PD_MISMATCH:
+ return RDMAP_ECODE_STAG_NOT_ASSOC;
+ case E_ACCESS_PERM:
+ return RDMAP_ECODE_ACCESS_RIGHTS;
+ default:
+ return RDMAP_ECODE_UNSPECIFIED;
+ }
+}
+
+void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype,
+ u8 ecode, int in_tx)
+{
+ if (!qp->term_info.valid) {
+ memset(&qp->term_info, 0, sizeof(qp->term_info));
+ qp->term_info.layer = layer;
+ qp->term_info.etype = etype;
+ qp->term_info.ecode = ecode;
+ qp->term_info.in_tx = in_tx;
+ qp->term_info.valid = 1;
+ }
+ siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n",
+ layer, etype, ecode, in_tx ? "yes" : "no");
+}
+
+/*
+ * Send a TERMINATE message, as defined in RFC's 5040/5041/5044/6581.
+ * Sending TERMINATE messages is best effort - such messages
+ * can only be send if the QP is still connected and it does
+ * not have another outbound message in-progress, i.e. the
+ * TERMINATE message must not interfer with an incomplete current
+ * transmit operation.
+ */
+void siw_send_terminate(struct siw_qp *qp)
+{
+ struct kvec iov[3];
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
+ struct iwarp_terminate *term = NULL;
+ union iwarp_hdr *err_hdr = NULL;
+ struct socket *s = qp->attrs.sk;
+ struct siw_rx_stream *srx = &qp->rx_stream;
+ union iwarp_hdr *rx_hdr = &srx->hdr;
+ u32 crc = 0;
+ int num_frags, len_terminate, rv;
+
+ if (!qp->term_info.valid)
+ return;
+
+ qp->term_info.valid = 0;
+
+ if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) {
+ siw_dbg_qp(qp, "cannot send TERMINATE: op %d in progress\n",
+ tx_type(tx_wqe(qp)));
+ return;
+ }
+ if (!s && qp->cep)
+ /* QP not yet in RTS. Take socket from connection end point */
+ s = qp->cep->sock;
+
+ if (!s) {
+ siw_dbg_qp(qp, "cannot send TERMINATE: not connected\n");
+ return;
+ }
+
+ term = kzalloc(sizeof(*term), GFP_KERNEL);
+ if (!term)
+ return;
+
+ term->ddp_qn = cpu_to_be32(RDMAP_UNTAGGED_QN_TERMINATE);
+ term->ddp_mo = 0;
+ term->ddp_msn = cpu_to_be32(1);
+
+ iov[0].iov_base = term;
+ iov[0].iov_len = sizeof(*term);
+
+ if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) ||
+ ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) &&
+ (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) {
+ err_hdr = kzalloc(sizeof(*err_hdr), GFP_KERNEL);
+ if (!err_hdr) {
+ kfree(term);
+ return;
+ }
+ }
+ memcpy(&term->ctrl, &iwarp_pktinfo[RDMAP_TERMINATE].ctrl,
+ sizeof(struct iwarp_ctrl));
+
+ __rdmap_term_set_layer(term, qp->term_info.layer);
+ __rdmap_term_set_etype(term, qp->term_info.etype);
+ __rdmap_term_set_ecode(term, qp->term_info.ecode);
+
+ switch (qp->term_info.layer) {
+ case TERM_ERROR_LAYER_RDMAP:
+ if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC)
+ /* No additional DDP/RDMAP header to be included */
+ break;
+
+ if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) {
+ /*
+ * Complete RDMAP frame will get attached, and
+ * DDP segment length is valid
+ */
+ term->flag_m = 1;
+ term->flag_d = 1;
+ term->flag_r = 1;
+
+ if (qp->term_info.in_tx) {
+ struct iwarp_rdma_rreq *rreq;
+ struct siw_wqe *wqe = tx_wqe(qp);
+
+ /* Inbound RREQ error, detected during
+ * RRESP creation. Take state from
+ * current TX work queue element to
+ * reconstruct peers RREQ.
+ */
+ rreq = (struct iwarp_rdma_rreq *)err_hdr;
+
+ memcpy(&rreq->ctrl,
+ &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl,
+ sizeof(struct iwarp_ctrl));
+
+ rreq->rsvd = 0;
+ rreq->ddp_qn =
+ htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
+
+ /* Provide RREQ's MSN as kept aside */
+ rreq->ddp_msn = htonl(wqe->sqe.sge[0].length);
+
+ rreq->ddp_mo = htonl(wqe->processed);
+ rreq->sink_stag = htonl(wqe->sqe.rkey);
+ rreq->sink_to = cpu_to_be64(wqe->sqe.raddr);
+ rreq->read_size = htonl(wqe->sqe.sge[0].length);
+ rreq->source_stag = htonl(wqe->sqe.sge[0].lkey);
+ rreq->source_to =
+ cpu_to_be64(wqe->sqe.sge[0].laddr);
+
+ iov[1].iov_base = rreq;
+ iov[1].iov_len = sizeof(*rreq);
+
+ rx_hdr = (union iwarp_hdr *)rreq;
+ } else {
+ /* Take RDMAP/DDP information from
+ * current (failed) inbound frame.
+ */
+ iov[1].iov_base = rx_hdr;
+
+ if (__rdmap_get_opcode(&rx_hdr->ctrl) ==
+ RDMAP_RDMA_READ_REQ)
+ iov[1].iov_len =
+ sizeof(struct iwarp_rdma_rreq);
+ else /* SEND type */
+ iov[1].iov_len =
+ sizeof(struct iwarp_send);
+ }
+ } else {
+ /* Do not report DDP hdr information if packet
+ * layout is unknown
+ */
+ if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) ||
+ (qp->term_info.ecode == RDMAP_ECODE_OPCODE))
+ break;
+
+ iov[1].iov_base = rx_hdr;
+
+ /* Only DDP frame will get attached */
+ if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)
+ iov[1].iov_len =
+ sizeof(struct iwarp_rdma_write);
+ else
+ iov[1].iov_len = sizeof(struct iwarp_send);
+
+ term->flag_m = 1;
+ term->flag_d = 1;
+ }
+ term->ctrl.mpa_len = cpu_to_be16(iov[1].iov_len);
+ break;
+
+ case TERM_ERROR_LAYER_DDP:
+ /* Report error encountered while DDP processing.
+ * This can only happen as a result of inbound
+ * DDP processing
+ */
+
+ /* Do not report DDP hdr information if packet
+ * layout is unknown
+ */
+ if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) &&
+ (qp->term_info.ecode == DDP_ECODE_T_VERSION)) ||
+ ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) &&
+ (qp->term_info.ecode == DDP_ECODE_UT_VERSION)))
+ break;
+
+ iov[1].iov_base = rx_hdr;
+
+ if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)
+ iov[1].iov_len = sizeof(struct iwarp_ctrl_tagged);
+ else
+ iov[1].iov_len = sizeof(struct iwarp_ctrl_untagged);
+
+ term->flag_m = 1;
+ term->flag_d = 1;
+ break;
+
+ default:
+ break;
+ }
+ if (term->flag_m || term->flag_d || term->flag_r) {
+ iov[2].iov_base = &crc;
+ iov[2].iov_len = sizeof(crc);
+ len_terminate = sizeof(*term) + iov[1].iov_len + MPA_CRC_SIZE;
+ num_frags = 3;
+ } else {
+ iov[1].iov_base = &crc;
+ iov[1].iov_len = sizeof(crc);
+ len_terminate = sizeof(*term) + MPA_CRC_SIZE;
+ num_frags = 2;
+ }
+
+ /* Adjust DDP Segment Length parameter, if valid */
+ if (term->flag_m) {
+ u32 real_ddp_len = be16_to_cpu(rx_hdr->ctrl.mpa_len);
+ enum rdma_opcode op = __rdmap_get_opcode(&rx_hdr->ctrl);
+
+ real_ddp_len -= iwarp_pktinfo[op].hdr_len - MPA_HDR_SIZE;
+ rx_hdr->ctrl.mpa_len = cpu_to_be16(real_ddp_len);
+ }
+
+ term->ctrl.mpa_len =
+ cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE));
+ if (qp->tx_ctx.mpa_crc_enabled) {
+ siw_crc_init(&qp->tx_ctx.mpa_crc);
+ siw_crc_update(&qp->tx_ctx.mpa_crc,
+ iov[0].iov_base, iov[0].iov_len);
+ if (num_frags == 3) {
+ siw_crc_update(&qp->tx_ctx.mpa_crc,
+ iov[1].iov_base, iov[1].iov_len);
+ }
+ siw_crc_final(&qp->tx_ctx.mpa_crc, (u8 *)&crc);
+ }
+
+ rv = kernel_sendmsg(s, &msg, iov, num_frags, len_terminate);
+ siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n",
+ rv == len_terminate ? "success" : "failure",
+ __rdmap_term_layer(term), __rdmap_term_etype(term),
+ __rdmap_term_ecode(term), rv);
+ kfree(term);
+ kfree(err_hdr);
+}
+
+/*
+ * Handle all attrs other than state
+ */
+static void siw_qp_modify_nonstate(struct siw_qp *qp,
+ struct siw_qp_attrs *attrs,
+ enum siw_qp_attr_mask mask)
+{
+ if (mask & SIW_QP_ATTR_ACCESS_FLAGS) {
+ if (attrs->flags & SIW_RDMA_BIND_ENABLED)
+ qp->attrs.flags |= SIW_RDMA_BIND_ENABLED;
+ else
+ qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED;
+
+ if (attrs->flags & SIW_RDMA_WRITE_ENABLED)
+ qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED;
+ else
+ qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED;
+
+ if (attrs->flags & SIW_RDMA_READ_ENABLED)
+ qp->attrs.flags |= SIW_RDMA_READ_ENABLED;
+ else
+ qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED;
+ }
+}
+
+static int siw_qp_nextstate_from_idle(struct siw_qp *qp,
+ struct siw_qp_attrs *attrs,
+ enum siw_qp_attr_mask mask)
+{
+ int rv = 0;
+
+ switch (attrs->state) {
+ case SIW_QP_STATE_RTS:
+ if (attrs->flags & SIW_MPA_CRC) {
+ siw_crc_init(&qp->tx_ctx.mpa_crc);
+ qp->tx_ctx.mpa_crc_enabled = true;
+ siw_crc_init(&qp->rx_stream.mpa_crc);
+ qp->rx_stream.mpa_crc_enabled = true;
+ }
+ if (!(mask & SIW_QP_ATTR_LLP_HANDLE)) {
+ siw_dbg_qp(qp, "no socket\n");
+ rv = -EINVAL;
+ break;
+ }
+ if (!(mask & SIW_QP_ATTR_MPA)) {
+ siw_dbg_qp(qp, "no MPA\n");
+ rv = -EINVAL;
+ break;
+ }
+ /*
+ * Initialize iWARP TX state
+ */
+ qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0;
+ qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0;
+ qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0;
+
+ /*
+ * Initialize iWARP RX state
+ */
+ qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1;
+ qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1;
+ qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1;
+
+ /*
+ * init IRD free queue, caller has already checked
+ * limits.
+ */
+ rv = siw_qp_readq_init(qp, attrs->irq_size,
+ attrs->orq_size);
+ if (rv)
+ break;
+
+ qp->attrs.sk = attrs->sk;
+ qp->attrs.state = SIW_QP_STATE_RTS;
+
+ siw_dbg_qp(qp, "enter RTS: crc=%s, ord=%u, ird=%u\n",
+ attrs->flags & SIW_MPA_CRC ? "y" : "n",
+ qp->attrs.orq_size, qp->attrs.irq_size);
+ break;
+
+ case SIW_QP_STATE_ERROR:
+ siw_rq_flush(qp);
+ qp->attrs.state = SIW_QP_STATE_ERROR;
+ if (qp->cep) {
+ siw_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return rv;
+}
+
+static int siw_qp_nextstate_from_rts(struct siw_qp *qp,
+ struct siw_qp_attrs *attrs)
+{
+ int drop_conn = 0;
+
+ switch (attrs->state) {
+ case SIW_QP_STATE_CLOSING:
+ /*
+ * Verbs: move to IDLE if SQ and ORQ are empty.
+ * Move to ERROR otherwise. But first of all we must
+ * close the connection. So we keep CLOSING or ERROR
+ * as a transient state, schedule connection drop work
+ * and wait for the socket state change upcall to
+ * come back closed.
+ */
+ if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) {
+ qp->attrs.state = SIW_QP_STATE_CLOSING;
+ } else {
+ qp->attrs.state = SIW_QP_STATE_ERROR;
+ siw_sq_flush(qp);
+ }
+ siw_rq_flush(qp);
+
+ drop_conn = 1;
+ break;
+
+ case SIW_QP_STATE_TERMINATE:
+ qp->attrs.state = SIW_QP_STATE_TERMINATE;
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_CATASTROPHIC,
+ RDMAP_ECODE_UNSPECIFIED, 1);
+ drop_conn = 1;
+ break;
+
+ case SIW_QP_STATE_ERROR:
+ /*
+ * This is an emergency close.
+ *
+ * Any in progress transmit operation will get
+ * cancelled.
+ * This will likely result in a protocol failure,
+ * if a TX operation is in transit. The caller
+ * could unconditional wait to give the current
+ * operation a chance to complete.
+ * Esp., how to handle the non-empty IRQ case?
+ * The peer was asking for data transfer at a valid
+ * point in time.
+ */
+ siw_sq_flush(qp);
+ siw_rq_flush(qp);
+ qp->attrs.state = SIW_QP_STATE_ERROR;
+ drop_conn = 1;
+ break;
+
+ default:
+ break;
+ }
+ return drop_conn;
+}
+
+static void siw_qp_nextstate_from_term(struct siw_qp *qp,
+ struct siw_qp_attrs *attrs)
+{
+ switch (attrs->state) {
+ case SIW_QP_STATE_ERROR:
+ siw_rq_flush(qp);
+ qp->attrs.state = SIW_QP_STATE_ERROR;
+
+ if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
+ siw_sq_flush(qp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int siw_qp_nextstate_from_close(struct siw_qp *qp,
+ struct siw_qp_attrs *attrs)
+{
+ int rv = 0;
+
+ switch (attrs->state) {
+ case SIW_QP_STATE_IDLE:
+ WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE);
+ qp->attrs.state = SIW_QP_STATE_IDLE;
+ break;
+
+ case SIW_QP_STATE_CLOSING:
+ /*
+ * The LLP may already moved the QP to closing
+ * due to graceful peer close init
+ */
+ break;
+
+ case SIW_QP_STATE_ERROR:
+ /*
+ * QP was moved to CLOSING by LLP event
+ * not yet seen by user.
+ */
+ qp->attrs.state = SIW_QP_STATE_ERROR;
+
+ if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
+ siw_sq_flush(qp);
+
+ siw_rq_flush(qp);
+ break;
+
+ default:
+ siw_dbg_qp(qp, "state transition undefined: %s => %s\n",
+ siw_qp_state_to_string[qp->attrs.state],
+ siw_qp_state_to_string[attrs->state]);
+
+ rv = -ECONNABORTED;
+ }
+ return rv;
+}
+
+/*
+ * Caller must hold qp->state_lock
+ */
+int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs,
+ enum siw_qp_attr_mask mask)
+{
+ int drop_conn = 0, rv = 0;
+
+ if (!mask)
+ return 0;
+
+ siw_dbg_qp(qp, "state: %s => %s\n",
+ siw_qp_state_to_string[qp->attrs.state],
+ siw_qp_state_to_string[attrs->state]);
+
+ if (mask != SIW_QP_ATTR_STATE)
+ siw_qp_modify_nonstate(qp, attrs, mask);
+
+ if (!(mask & SIW_QP_ATTR_STATE))
+ return 0;
+
+ switch (qp->attrs.state) {
+ case SIW_QP_STATE_IDLE:
+ case SIW_QP_STATE_RTR:
+ rv = siw_qp_nextstate_from_idle(qp, attrs, mask);
+ break;
+
+ case SIW_QP_STATE_RTS:
+ drop_conn = siw_qp_nextstate_from_rts(qp, attrs);
+ break;
+
+ case SIW_QP_STATE_TERMINATE:
+ siw_qp_nextstate_from_term(qp, attrs);
+ break;
+
+ case SIW_QP_STATE_CLOSING:
+ siw_qp_nextstate_from_close(qp, attrs);
+ break;
+ default:
+ break;
+ }
+ if (drop_conn)
+ siw_qp_cm_drop(qp, 0);
+
+ return rv;
+}
+
+void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
+{
+ rreq->id = sqe->id;
+ rreq->opcode = sqe->opcode;
+ rreq->sge[0].laddr = sqe->sge[0].laddr;
+ rreq->sge[0].length = sqe->sge[0].length;
+ rreq->sge[0].lkey = sqe->sge[0].lkey;
+ rreq->sge[1].lkey = sqe->sge[1].lkey;
+ rreq->flags = sqe->flags | SIW_WQE_VALID;
+ rreq->num_sge = 1;
+}
+
+static int siw_activate_tx_from_sq(struct siw_qp *qp)
+{
+ struct siw_sqe *sqe;
+ struct siw_wqe *wqe = tx_wqe(qp);
+ int rv = 1;
+
+ sqe = sq_get_next(qp);
+ if (!sqe)
+ return 0;
+
+ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+ wqe->wr_status = SIW_WR_QUEUED;
+
+ /* First copy SQE to kernel private memory */
+ memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+
+ if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
+ rv = -EINVAL;
+ goto out;
+ }
+ if (wqe->sqe.flags & SIW_WQE_INLINE) {
+ if (wqe->sqe.opcode != SIW_OP_SEND &&
+ wqe->sqe.opcode != SIW_OP_WRITE) {
+ rv = -EINVAL;
+ goto out;
+ }
+ if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
+ rv = -EINVAL;
+ goto out;
+ }
+ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].lkey = 0;
+ wqe->sqe.num_sge = 1;
+ }
+ if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
+ /* A READ cannot be fenced */
+ if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
+ wqe->sqe.opcode ==
+ SIW_OP_READ_LOCAL_INV)) {
+ siw_dbg_qp(qp, "cannot fence read\n");
+ rv = -EINVAL;
+ goto out;
+ }
+ spin_lock(&qp->orq_lock);
+
+ if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
+ qp->tx_ctx.orq_fence = 1;
+ rv = 0;
+ }
+ spin_unlock(&qp->orq_lock);
+
+ } else if (wqe->sqe.opcode == SIW_OP_READ ||
+ wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
+ struct siw_sqe *rreq;
+
+ if (unlikely(!qp->attrs.orq_size)) {
+ /* We negotiated not to send READ req's */
+ rv = -EINVAL;
+ goto out;
+ }
+ wqe->sqe.num_sge = 1;
+
+ spin_lock(&qp->orq_lock);
+
+ rreq = orq_get_free(qp);
+ if (rreq) {
+ /*
+ * Make an immediate copy in ORQ to be ready
+ * to process loopback READ reply
+ */
+ siw_read_to_orq(rreq, &wqe->sqe);
+ qp->orq_put++;
+ } else {
+ qp->tx_ctx.orq_fence = 1;
+ rv = 0;
+ }
+ spin_unlock(&qp->orq_lock);
+ }
+
+ /* Clear SQE, can be re-used by application */
+ smp_store_mb(sqe->flags, 0);
+ qp->sq_get++;
+out:
+ if (unlikely(rv < 0)) {
+ siw_dbg_qp(qp, "error %d\n", rv);
+ wqe->wr_status = SIW_WR_IDLE;
+ }
+ return rv;
+}
+
+/*
+ * Must be called with SQ locked.
+ * To avoid complete SQ starvation by constant inbound READ requests,
+ * the active IRQ will not be served after qp->irq_burst, if the
+ * SQ has pending work.
+ */
+int siw_activate_tx(struct siw_qp *qp)
+{
+ struct siw_sqe *irqe;
+ struct siw_wqe *wqe = tx_wqe(qp);
+
+ if (!qp->attrs.irq_size)
+ return siw_activate_tx_from_sq(qp);
+
+ irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
+
+ if (!(irqe->flags & SIW_WQE_VALID))
+ return siw_activate_tx_from_sq(qp);
+
+ /*
+ * Avoid local WQE processing starvation in case
+ * of constant inbound READ request stream
+ */
+ if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
+ qp->irq_burst = 0;
+ return siw_activate_tx_from_sq(qp);
+ }
+ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+ wqe->wr_status = SIW_WR_QUEUED;
+
+ /* start READ RESPONSE */
+ wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
+ wqe->sqe.flags = 0;
+ if (irqe->num_sge) {
+ wqe->sqe.num_sge = 1;
+ wqe->sqe.sge[0].length = irqe->sge[0].length;
+ wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
+ wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
+ } else {
+ wqe->sqe.num_sge = 0;
+ }
+
+ /* Retain original RREQ's message sequence number for
+ * potential error reporting cases.
+ */
+ wqe->sqe.sge[1].length = irqe->sge[1].length;
+
+ wqe->sqe.rkey = irqe->rkey;
+ wqe->sqe.raddr = irqe->raddr;
+
+ wqe->processed = 0;
+ qp->irq_get++;
+
+ /* mark current IRQ entry free */
+ smp_store_mb(irqe->flags, 0);
+
+ return 1;
+}
+
+/*
+ * Check if current CQ state qualifies for calling CQ completion
+ * handler. Must be called with CQ lock held.
+ */
+static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
+{
+ u32 cq_notify;
+
+ if (!cq->base_cq.comp_handler)
+ return false;
+
+ /* Read application shared notification state */
+ cq_notify = READ_ONCE(cq->notify->flags);
+
+ if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
+ ((cq_notify & SIW_NOTIFY_SOLICITED) &&
+ (flags & SIW_WQE_SOLICITED))) {
+ /*
+ * CQ notification is one-shot: Since the
+ * current CQE causes user notification,
+ * the CQ gets dis-aremd and must be re-aremd
+ * by the user for a new notification.
+ */
+ WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
+
+ return true;
+ }
+ return false;
+}
+
+int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
+ enum siw_wc_status status)
+{
+ struct siw_cq *cq = qp->scq;
+ int rv = 0;
+
+ if (cq) {
+ u32 sqe_flags = sqe->flags;
+ struct siw_cqe *cqe;
+ u32 idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ idx = cq->cq_put % cq->num_cqe;
+ cqe = &cq->queue[idx];
+
+ if (!READ_ONCE(cqe->flags)) {
+ bool notify;
+
+ cqe->id = sqe->id;
+ cqe->opcode = sqe->opcode;
+ cqe->status = status;
+ cqe->imm_data = 0;
+ cqe->bytes = bytes;
+
+ if (rdma_is_kernel_res(&cq->base_cq.res))
+ cqe->base_qp = &qp->base_qp;
+ else
+ cqe->qp_id = qp_id(qp);
+
+ /* mark CQE valid for application */
+ WRITE_ONCE(cqe->flags, SIW_WQE_VALID);
+ /* recycle SQE */
+ smp_store_mb(sqe->flags, 0);
+
+ cq->cq_put++;
+ notify = siw_cq_notify_now(cq, sqe_flags);
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (notify) {
+ siw_dbg_cq(cq, "Call completion handler\n");
+ cq->base_cq.comp_handler(&cq->base_cq,
+ cq->base_cq.cq_context);
+ }
+ } else {
+ spin_unlock_irqrestore(&cq->lock, flags);
+ rv = -ENOMEM;
+ siw_cq_event(cq, IB_EVENT_CQ_ERR);
+ }
+ } else {
+ /* recycle SQE */
+ smp_store_mb(sqe->flags, 0);
+ }
+ return rv;
+}
+
+int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
+ u32 inval_stag, enum siw_wc_status status)
+{
+ struct siw_cq *cq = qp->rcq;
+ int rv = 0;
+
+ if (cq) {
+ struct siw_cqe *cqe;
+ u32 idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ idx = cq->cq_put % cq->num_cqe;
+ cqe = &cq->queue[idx];
+
+ if (!READ_ONCE(cqe->flags)) {
+ bool notify;
+ u8 cqe_flags = SIW_WQE_VALID;
+
+ cqe->id = rqe->id;
+ cqe->opcode = SIW_OP_RECEIVE;
+ cqe->status = status;
+ cqe->imm_data = 0;
+ cqe->bytes = bytes;
+
+ if (rdma_is_kernel_res(&cq->base_cq.res)) {
+ cqe->base_qp = &qp->base_qp;
+ if (inval_stag) {
+ cqe_flags |= SIW_WQE_REM_INVAL;
+ cqe->inval_stag = inval_stag;
+ }
+ } else {
+ cqe->qp_id = qp_id(qp);
+ }
+ /* mark CQE valid for application */
+ WRITE_ONCE(cqe->flags, cqe_flags);
+ /* recycle RQE */
+ smp_store_mb(rqe->flags, 0);
+
+ cq->cq_put++;
+ notify = siw_cq_notify_now(cq, SIW_WQE_SIGNALLED);
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (notify) {
+ siw_dbg_cq(cq, "Call completion handler\n");
+ cq->base_cq.comp_handler(&cq->base_cq,
+ cq->base_cq.cq_context);
+ }
+ } else {
+ spin_unlock_irqrestore(&cq->lock, flags);
+ rv = -ENOMEM;
+ siw_cq_event(cq, IB_EVENT_CQ_ERR);
+ }
+ } else {
+ /* recycle RQE */
+ smp_store_mb(rqe->flags, 0);
+ }
+ return rv;
+}
+
+/*
+ * siw_sq_flush()
+ *
+ * Flush SQ and ORQ entries to CQ.
+ *
+ * Must be called with QP state write lock held.
+ * Therefore, SQ and ORQ lock must not be taken.
+ */
+void siw_sq_flush(struct siw_qp *qp)
+{
+ struct siw_sqe *sqe;
+ struct siw_wqe *wqe = tx_wqe(qp);
+ int async_event = 0;
+
+ /*
+ * Start with completing any work currently on the ORQ
+ */
+ while (qp->attrs.orq_size) {
+ sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size];
+ if (!READ_ONCE(sqe->flags))
+ break;
+
+ if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
+ break;
+
+ WRITE_ONCE(sqe->flags, 0);
+ qp->orq_get++;
+ }
+ /*
+ * Flush an in-progress WQE if present
+ */
+ if (wqe->wr_status != SIW_WR_IDLE) {
+ siw_dbg_qp(qp, "flush current SQE, type %d, status %d\n",
+ tx_type(wqe), wqe->wr_status);
+
+ siw_wqe_put_mem(wqe, tx_type(wqe));
+
+ if (tx_type(wqe) != SIW_OP_READ_RESPONSE &&
+ ((tx_type(wqe) != SIW_OP_READ &&
+ tx_type(wqe) != SIW_OP_READ_LOCAL_INV) ||
+ wqe->wr_status == SIW_WR_QUEUED))
+ /*
+ * An in-progress Read Request is already in
+ * the ORQ
+ */
+ siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
+ SIW_WC_WR_FLUSH_ERR);
+
+ wqe->wr_status = SIW_WR_IDLE;
+ }
+ /*
+ * Flush the Send Queue
+ */
+ while (qp->attrs.sq_size) {
+ sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
+ if (!READ_ONCE(sqe->flags))
+ break;
+
+ async_event = 1;
+ if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
+ /*
+ * Shall IB_EVENT_SQ_DRAINED be supressed if work
+ * completion fails?
+ */
+ break;
+
+ WRITE_ONCE(sqe->flags, 0);
+ qp->sq_get++;
+ }
+ if (async_event)
+ siw_qp_event(qp, IB_EVENT_SQ_DRAINED);
+}
+
+/*
+ * siw_rq_flush()
+ *
+ * Flush recv queue entries to CQ. Also
+ * takes care of pending active tagged and untagged
+ * inbound transfers, which have target memory
+ * referenced.
+ *
+ * Must be called with QP state write lock held.
+ * Therefore, RQ lock must not be taken.
+ */
+void siw_rq_flush(struct siw_qp *qp)
+{
+ struct siw_wqe *wqe = &qp->rx_untagged.wqe_active;
+
+ /*
+ * Flush an in-progress untagged operation if present
+ */
+ if (wqe->wr_status != SIW_WR_IDLE) {
+ siw_dbg_qp(qp, "flush current rqe, type %d, status %d\n",
+ rx_type(wqe), wqe->wr_status);
+
+ siw_wqe_put_mem(wqe, rx_type(wqe));
+
+ if (rx_type(wqe) == SIW_OP_RECEIVE) {
+ siw_rqe_complete(qp, &wqe->rqe, wqe->bytes,
+ 0, SIW_WC_WR_FLUSH_ERR);
+ } else if (rx_type(wqe) != SIW_OP_READ &&
+ rx_type(wqe) != SIW_OP_READ_RESPONSE &&
+ rx_type(wqe) != SIW_OP_WRITE) {
+ siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR);
+ }
+ wqe->wr_status = SIW_WR_IDLE;
+ }
+ wqe = &qp->rx_tagged.wqe_active;
+
+ if (wqe->wr_status != SIW_WR_IDLE) {
+ siw_wqe_put_mem(wqe, rx_type(wqe));
+ wqe->wr_status = SIW_WR_IDLE;
+ }
+ /*
+ * Flush the Receive Queue
+ */
+ while (qp->attrs.rq_size) {
+ struct siw_rqe *rqe =
+ &qp->recvq[qp->rq_get % qp->attrs.rq_size];
+
+ if (!READ_ONCE(rqe->flags))
+ break;
+
+ if (siw_rqe_complete(qp, rqe, 0, 0, SIW_WC_WR_FLUSH_ERR) != 0)
+ break;
+
+ WRITE_ONCE(rqe->flags, 0);
+ qp->rq_get++;
+ }
+}
+
+int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
+{
+ int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
+ GFP_KERNEL);
+
+ if (!rv) {
+ kref_init(&qp->ref);
+ qp->sdev = sdev;
+ siw_dbg_qp(qp, "new QP\n");
+ }
+ return rv;
+}
+
+void siw_free_qp(struct kref *ref)
+{
+ struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
+ struct siw_device *sdev = qp->sdev;
+ unsigned long flags;
+
+ if (qp->cep)
+ siw_cep_put(qp->cep);
+
+ found = xa_erase(&sdev->qp_xa, qp_id(qp));
+ WARN_ON(found != qp);
+ spin_lock_irqsave(&sdev->lock, flags);
+ list_del(&qp->devq);
+ spin_unlock_irqrestore(&sdev->lock, flags);
+
+ vfree(qp->sendq);
+ vfree(qp->recvq);
+ vfree(qp->irq);
+ vfree(qp->orq);
+
+ siw_put_tx_cpu(qp->tx_cpu);
+ complete(&qp->qp_free);
+ atomic_dec(&sdev->num_qp);
+}
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
new file mode 100644
index 000000000000..a10820e33887
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+
+#include "siw.h"
+#include "siw_verbs.h"
+#include "siw_mem.h"
+
+/*
+ * siw_rx_umem()
+ *
+ * Receive data of @len into target referenced by @dest_addr.
+ *
+ * @srx: Receive Context
+ * @umem: siw representation of target memory
+ * @dest_addr: user virtual address
+ * @len: number of bytes to place
+ */
+static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
+ u64 dest_addr, int len)
+{
+ int copied = 0;
+
+ while (len) {
+ struct page *p;
+ int pg_off, bytes, rv;
+ void *dest;
+
+ p = siw_get_upage(umem, dest_addr);
+ if (unlikely(!p)) {
+ pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
+ __func__, qp_id(rx_qp(srx)),
+ (void *)(uintptr_t)dest_addr,
+ (void *)(uintptr_t)umem->fp_addr);
+ /* siw internal error */
+ srx->skb_copied += copied;
+ srx->skb_new -= copied;
+
+ return -EFAULT;
+ }
+ pg_off = dest_addr & ~PAGE_MASK;
+ bytes = min(len, (int)PAGE_SIZE - pg_off);
+
+ siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
+
+ dest = kmap_atomic(p);
+ rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
+ bytes);
+
+ if (unlikely(rv)) {
+ kunmap_atomic(dest);
+ srx->skb_copied += copied;
+ srx->skb_new -= copied;
+
+ pr_warn("siw: [QP %u]: %s, len %d, page %p, rv %d\n",
+ qp_id(rx_qp(srx)), __func__, len, p, rv);
+
+ return -EFAULT;
+ }
+ if (srx->mpa_crc_enabled) {
+ if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
+ siw_crc_update(&srx->mpa_crc, dest + pg_off,
+ bytes);
+ kunmap_atomic(dest);
+ } else {
+ kunmap_atomic(dest);
+ /*
+ * Do CRC on original, not target buffer.
+ * Some user land applications may
+ * concurrently write the target buffer,
+ * which would yield a broken CRC.
+ * Walking the skb twice is very ineffcient.
+ * Folding the CRC into skb_copy_bits()
+ * would be much better, but is currently
+ * not supported.
+ */
+ siw_crc_skb(srx, bytes);
+ }
+ } else {
+ kunmap_atomic(dest);
+ }
+ srx->skb_offset += bytes;
+ copied += bytes;
+ len -= bytes;
+ dest_addr += bytes;
+ pg_off = 0;
+ }
+ srx->skb_copied += copied;
+ srx->skb_new -= copied;
+
+ return copied;
+}
+
+static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
+{
+ int rv;
+
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
+
+ rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
+ if (unlikely(rv)) {
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
+ qp_id(rx_qp(srx)), __func__, len, kva, rv);
+
+ return rv;
+ }
+ if (srx->mpa_crc_enabled)
+ siw_crc_update(&srx->mpa_crc, kva, len);
+
+ srx->skb_offset += len;
+ srx->skb_copied += len;
+ srx->skb_new -= len;
+
+ return len;
+}
+
+static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
+ struct siw_mem *mem, u64 addr, int len)
+{
+ struct siw_pbl *pbl = mem->pbl;
+ u64 offset = addr - mem->va;
+ int copied = 0;
+
+ while (len) {
+ int bytes;
+ dma_addr_t buf_addr =
+ siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
+ if (!buf_addr)
+ break;
+
+ bytes = min(bytes, len);
+ if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
+ bytes) {
+ copied += bytes;
+ offset += bytes;
+ len -= bytes;
+ } else {
+ break;
+ }
+ }
+ return copied;
+}
+
+/*
+ * siw_rresp_check_ntoh()
+ *
+ * Check incoming RRESP fragment header against expected
+ * header values and update expected values for potential next
+ * fragment.
+ *
+ * NOTE: This function must be called only if a RRESP DDP segment
+ * starts but not for fragmented consecutive pieces of an
+ * already started DDP segment.
+ */
+static int siw_rresp_check_ntoh(struct siw_rx_stream *srx,
+ struct siw_rx_fpdu *frx)
+{
+ struct iwarp_rdma_rresp *rresp = &srx->hdr.rresp;
+ struct siw_wqe *wqe = &frx->wqe_active;
+ enum ddp_ecode ecode;
+
+ u32 sink_stag = be32_to_cpu(rresp->sink_stag);
+ u64 sink_to = be64_to_cpu(rresp->sink_to);
+
+ if (frx->first_ddp_seg) {
+ srx->ddp_stag = wqe->sqe.sge[0].lkey;
+ srx->ddp_to = wqe->sqe.sge[0].laddr;
+ frx->pbl_idx = 0;
+ }
+ /* Below checks extend beyond the semantics of DDP, and
+ * into RDMAP:
+ * We check if the read response matches exactly the
+ * read request which was send to the remote peer to
+ * trigger this read response. RFC5040/5041 do not
+ * always have a proper error code for the detected
+ * error cases. We choose 'base or bounds error' for
+ * cases where the inbound STag is valid, but offset
+ * or length do not match our response receive state.
+ */
+ if (unlikely(srx->ddp_stag != sink_stag)) {
+ pr_warn("siw: [QP %u]: rresp stag: %08x != %08x\n",
+ qp_id(rx_qp(srx)), sink_stag, srx->ddp_stag);
+ ecode = DDP_ECODE_T_INVALID_STAG;
+ goto error;
+ }
+ if (unlikely(srx->ddp_to != sink_to)) {
+ pr_warn("siw: [QP %u]: rresp off: %016llx != %016llx\n",
+ qp_id(rx_qp(srx)), (unsigned long long)sink_to,
+ (unsigned long long)srx->ddp_to);
+ ecode = DDP_ECODE_T_BASE_BOUNDS;
+ goto error;
+ }
+ if (unlikely(!frx->more_ddp_segs &&
+ (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) {
+ pr_warn("siw: [QP %u]: rresp len: %d != %d\n",
+ qp_id(rx_qp(srx)),
+ wqe->processed + srx->fpdu_part_rem, wqe->bytes);
+ ecode = DDP_ECODE_T_BASE_BOUNDS;
+ goto error;
+ }
+ return 0;
+error:
+ siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_TAGGED_BUF, ecode, 0);
+ return -EINVAL;
+}
+
+/*
+ * siw_write_check_ntoh()
+ *
+ * Check incoming WRITE fragment header against expected
+ * header values and update expected values for potential next
+ * fragment
+ *
+ * NOTE: This function must be called only if a WRITE DDP segment
+ * starts but not for fragmented consecutive pieces of an
+ * already started DDP segment.
+ */
+static int siw_write_check_ntoh(struct siw_rx_stream *srx,
+ struct siw_rx_fpdu *frx)
+{
+ struct iwarp_rdma_write *write = &srx->hdr.rwrite;
+ enum ddp_ecode ecode;
+
+ u32 sink_stag = be32_to_cpu(write->sink_stag);
+ u64 sink_to = be64_to_cpu(write->sink_to);
+
+ if (frx->first_ddp_seg) {
+ srx->ddp_stag = sink_stag;
+ srx->ddp_to = sink_to;
+ frx->pbl_idx = 0;
+ } else {
+ if (unlikely(srx->ddp_stag != sink_stag)) {
+ pr_warn("siw: [QP %u]: write stag: %08x != %08x\n",
+ qp_id(rx_qp(srx)), sink_stag,
+ srx->ddp_stag);
+ ecode = DDP_ECODE_T_INVALID_STAG;
+ goto error;
+ }
+ if (unlikely(srx->ddp_to != sink_to)) {
+ pr_warn("siw: [QP %u]: write off: %016llx != %016llx\n",
+ qp_id(rx_qp(srx)),
+ (unsigned long long)sink_to,
+ (unsigned long long)srx->ddp_to);
+ ecode = DDP_ECODE_T_BASE_BOUNDS;
+ goto error;
+ }
+ }
+ return 0;
+error:
+ siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_TAGGED_BUF, ecode, 0);
+ return -EINVAL;
+}
+
+/*
+ * siw_send_check_ntoh()
+ *
+ * Check incoming SEND fragment header against expected
+ * header values and update expected MSN if no next
+ * fragment expected
+ *
+ * NOTE: This function must be called only if a SEND DDP segment
+ * starts but not for fragmented consecutive pieces of an
+ * already started DDP segment.
+ */
+static int siw_send_check_ntoh(struct siw_rx_stream *srx,
+ struct siw_rx_fpdu *frx)
+{
+ struct iwarp_send_inv *send = &srx->hdr.send_inv;
+ struct siw_wqe *wqe = &frx->wqe_active;
+ enum ddp_ecode ecode;
+
+ u32 ddp_msn = be32_to_cpu(send->ddp_msn);
+ u32 ddp_mo = be32_to_cpu(send->ddp_mo);
+ u32 ddp_qn = be32_to_cpu(send->ddp_qn);
+
+ if (unlikely(ddp_qn != RDMAP_UNTAGGED_QN_SEND)) {
+ pr_warn("siw: [QP %u]: invalid ddp qn %d for send\n",
+ qp_id(rx_qp(srx)), ddp_qn);
+ ecode = DDP_ECODE_UT_INVALID_QN;
+ goto error;
+ }
+ if (unlikely(ddp_msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND])) {
+ pr_warn("siw: [QP %u]: send msn: %u != %u\n",
+ qp_id(rx_qp(srx)), ddp_msn,
+ srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
+ ecode = DDP_ECODE_UT_INVALID_MSN_RANGE;
+ goto error;
+ }
+ if (unlikely(ddp_mo != wqe->processed)) {
+ pr_warn("siw: [QP %u], send mo: %u != %u\n",
+ qp_id(rx_qp(srx)), ddp_mo, wqe->processed);
+ ecode = DDP_ECODE_UT_INVALID_MO;
+ goto error;
+ }
+ if (frx->first_ddp_seg) {
+ /* initialize user memory write position */
+ frx->sge_idx = 0;
+ frx->sge_off = 0;
+ frx->pbl_idx = 0;
+
+ /* only valid for SEND_INV and SEND_SE_INV operations */
+ srx->inval_stag = be32_to_cpu(send->inval_stag);
+ }
+ if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) {
+ siw_dbg_qp(rx_qp(srx), "receive space short: %d - %d < %d\n",
+ wqe->bytes, wqe->processed, srx->fpdu_part_rem);
+ wqe->wc_status = SIW_WC_LOC_LEN_ERR;
+ ecode = DDP_ECODE_UT_INVALID_MSN_NOBUF;
+ goto error;
+ }
+ return 0;
+error:
+ siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_UNTAGGED_BUF, ecode, 0);
+ return -EINVAL;
+}
+
+static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
+{
+ struct siw_rqe *rqe;
+ struct siw_srq *srq;
+ struct siw_wqe *wqe = NULL;
+ bool srq_event = false;
+ unsigned long flags;
+
+ srq = qp->srq;
+ if (srq) {
+ spin_lock_irqsave(&srq->lock, flags);
+ if (unlikely(!srq->num_rqe))
+ goto out;
+
+ rqe = &srq->recvq[srq->rq_get % srq->num_rqe];
+ } else {
+ if (unlikely(!qp->recvq))
+ goto out;
+
+ rqe = &qp->recvq[qp->rq_get % qp->attrs.rq_size];
+ }
+ if (likely(rqe->flags == SIW_WQE_VALID)) {
+ int num_sge = rqe->num_sge;
+
+ if (likely(num_sge <= SIW_MAX_SGE)) {
+ int i = 0;
+
+ wqe = rx_wqe(&qp->rx_untagged);
+ rx_type(wqe) = SIW_OP_RECEIVE;
+ wqe->wr_status = SIW_WR_INPROGRESS;
+ wqe->bytes = 0;
+ wqe->processed = 0;
+
+ wqe->rqe.id = rqe->id;
+ wqe->rqe.num_sge = num_sge;
+
+ while (i < num_sge) {
+ wqe->rqe.sge[i].laddr = rqe->sge[i].laddr;
+ wqe->rqe.sge[i].lkey = rqe->sge[i].lkey;
+ wqe->rqe.sge[i].length = rqe->sge[i].length;
+ wqe->bytes += wqe->rqe.sge[i].length;
+ wqe->mem[i] = NULL;
+ i++;
+ }
+ /* can be re-used by appl */
+ smp_store_mb(rqe->flags, 0);
+ } else {
+ siw_dbg_qp(qp, "too many sge's: %d\n", rqe->num_sge);
+ if (srq)
+ spin_unlock_irqrestore(&srq->lock, flags);
+ return NULL;
+ }
+ if (!srq) {
+ qp->rq_get++;
+ } else {
+ if (srq->armed) {
+ /* Test SRQ limit */
+ u32 off = (srq->rq_get + srq->limit) %
+ srq->num_rqe;
+ struct siw_rqe *rqe2 = &srq->recvq[off];
+
+ if (!(rqe2->flags & SIW_WQE_VALID)) {
+ srq->armed = false;
+ srq_event = true;
+ }
+ }
+ srq->rq_get++;
+ }
+ }
+out:
+ if (srq) {
+ spin_unlock_irqrestore(&srq->lock, flags);
+ if (srq_event)
+ siw_srq_event(srq, IB_EVENT_SRQ_LIMIT_REACHED);
+ }
+ return wqe;
+}
+
+static int siw_rx_data(struct siw_mem *mem_p, struct siw_rx_stream *srx,
+ unsigned int *pbl_idx, u64 addr, int bytes)
+{
+ int rv;
+
+ if (mem_p->mem_obj == NULL)
+ rv = siw_rx_kva(srx, ib_virt_dma_to_ptr(addr), bytes);
+ else if (!mem_p->is_pbl)
+ rv = siw_rx_umem(srx, mem_p->umem, addr, bytes);
+ else
+ rv = siw_rx_pbl(srx, pbl_idx, mem_p, addr, bytes);
+ return rv;
+}
+
+/*
+ * siw_proc_send:
+ *
+ * Process one incoming SEND and place data into memory referenced by
+ * receive wqe.
+ *
+ * Function supports partially received sends (suspending/resuming
+ * current receive wqe processing)
+ *
+ * return value:
+ * 0: reached the end of a DDP segment
+ * -EAGAIN: to be called again to finish the DDP segment
+ */
+int siw_proc_send(struct siw_qp *qp)
+{
+ struct siw_rx_stream *srx = &qp->rx_stream;
+ struct siw_rx_fpdu *frx = &qp->rx_untagged;
+ struct siw_wqe *wqe;
+ u32 data_bytes; /* all data bytes available */
+ u32 rcvd_bytes; /* sum of data bytes rcvd */
+ int rv = 0;
+
+ if (frx->first_ddp_seg) {
+ wqe = siw_rqe_get(qp);
+ if (unlikely(!wqe)) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_UNTAGGED_BUF,
+ DDP_ECODE_UT_INVALID_MSN_NOBUF, 0);
+ return -ENOENT;
+ }
+ } else {
+ wqe = rx_wqe(frx);
+ }
+ if (srx->state == SIW_GET_DATA_START) {
+ rv = siw_send_check_ntoh(srx, frx);
+ if (unlikely(rv)) {
+ siw_qp_event(qp, IB_EVENT_QP_FATAL);
+ return rv;
+ }
+ if (!srx->fpdu_part_rem) /* zero length SEND */
+ return 0;
+ }
+ data_bytes = min(srx->fpdu_part_rem, srx->skb_new);
+ rcvd_bytes = 0;
+
+ /* A zero length SEND will skip below loop */
+ while (data_bytes) {
+ struct ib_pd *pd;
+ struct siw_mem **mem, *mem_p;
+ struct siw_sge *sge;
+ u32 sge_bytes; /* data bytes avail for SGE */
+
+ sge = &wqe->rqe.sge[frx->sge_idx];
+
+ if (!sge->length) {
+ /* just skip empty sge's */
+ frx->sge_idx++;
+ frx->sge_off = 0;
+ frx->pbl_idx = 0;
+ continue;
+ }
+ sge_bytes = min(data_bytes, sge->length - frx->sge_off);
+ mem = &wqe->mem[frx->sge_idx];
+
+ /*
+ * check with QP's PD if no SRQ present, SRQ's PD otherwise
+ */
+ pd = qp->srq == NULL ? qp->pd : qp->srq->base_srq.pd;
+
+ rv = siw_check_sge(pd, sge, mem, IB_ACCESS_LOCAL_WRITE,
+ frx->sge_off, sge_bytes);
+ if (unlikely(rv)) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_CATASTROPHIC,
+ DDP_ECODE_CATASTROPHIC, 0);
+
+ siw_qp_event(qp, IB_EVENT_QP_ACCESS_ERR);
+ break;
+ }
+ mem_p = *mem;
+ rv = siw_rx_data(mem_p, srx, &frx->pbl_idx,
+ sge->laddr + frx->sge_off, sge_bytes);
+ if (unlikely(rv != sge_bytes)) {
+ wqe->processed += rcvd_bytes;
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_CATASTROPHIC,
+ DDP_ECODE_CATASTROPHIC, 0);
+ return -EINVAL;
+ }
+ frx->sge_off += rv;
+
+ if (frx->sge_off == sge->length) {
+ frx->sge_idx++;
+ frx->sge_off = 0;
+ frx->pbl_idx = 0;
+ }
+ data_bytes -= rv;
+ rcvd_bytes += rv;
+
+ srx->fpdu_part_rem -= rv;
+ srx->fpdu_part_rcvd += rv;
+ }
+ wqe->processed += rcvd_bytes;
+
+ if (!srx->fpdu_part_rem)
+ return 0;
+
+ return (rv < 0) ? rv : -EAGAIN;
+}
+
+/*
+ * siw_proc_write:
+ *
+ * Place incoming WRITE after referencing and checking target buffer
+
+ * Function supports partially received WRITEs (suspending/resuming
+ * current receive processing)
+ *
+ * return value:
+ * 0: reached the end of a DDP segment
+ * -EAGAIN: to be called again to finish the DDP segment
+ */
+int siw_proc_write(struct siw_qp *qp)
+{
+ struct siw_rx_stream *srx = &qp->rx_stream;
+ struct siw_rx_fpdu *frx = &qp->rx_tagged;
+ struct siw_mem *mem;
+ int bytes, rv;
+
+ if (srx->state == SIW_GET_DATA_START) {
+ if (!srx->fpdu_part_rem) /* zero length WRITE */
+ return 0;
+
+ rv = siw_write_check_ntoh(srx, frx);
+ if (unlikely(rv)) {
+ siw_qp_event(qp, IB_EVENT_QP_FATAL);
+ return rv;
+ }
+ }
+ bytes = min(srx->fpdu_part_rem, srx->skb_new);
+
+ if (frx->first_ddp_seg) {
+ struct siw_wqe *wqe = rx_wqe(frx);
+
+ rx_mem(frx) = siw_mem_id2obj(qp->sdev, srx->ddp_stag >> 8);
+ if (unlikely(!rx_mem(frx))) {
+ siw_dbg_qp(qp,
+ "sink stag not found/invalid, stag 0x%08x\n",
+ srx->ddp_stag);
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_TAGGED_BUF,
+ DDP_ECODE_T_INVALID_STAG, 0);
+ return -EINVAL;
+ }
+ wqe->rqe.num_sge = 1;
+ rx_type(wqe) = SIW_OP_WRITE;
+ wqe->wr_status = SIW_WR_INPROGRESS;
+ }
+ mem = rx_mem(frx);
+
+ /*
+ * Check if application re-registered memory with different
+ * key field of STag.
+ */
+ if (unlikely(mem->stag != srx->ddp_stag)) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_TAGGED_BUF,
+ DDP_ECODE_T_INVALID_STAG, 0);
+ return -EINVAL;
+ }
+ rv = siw_check_mem(qp->pd, mem, srx->ddp_to + srx->fpdu_part_rcvd,
+ IB_ACCESS_REMOTE_WRITE, bytes);
+ if (unlikely(rv)) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_TAGGED_BUF, siw_tagged_error(-rv),
+ 0);
+
+ siw_qp_event(qp, IB_EVENT_QP_ACCESS_ERR);
+
+ return -EINVAL;
+ }
+
+ rv = siw_rx_data(mem, srx, &frx->pbl_idx,
+ srx->ddp_to + srx->fpdu_part_rcvd, bytes);
+ if (unlikely(rv != bytes)) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_CATASTROPHIC,
+ DDP_ECODE_CATASTROPHIC, 0);
+ return -EINVAL;
+ }
+ srx->fpdu_part_rem -= rv;
+ srx->fpdu_part_rcvd += rv;
+
+ if (!srx->fpdu_part_rem) {
+ srx->ddp_to += srx->fpdu_part_rcvd;
+ return 0;
+ }
+ return -EAGAIN;
+}
+
+/*
+ * Inbound RREQ's cannot carry user data.
+ */
+int siw_proc_rreq(struct siw_qp *qp)
+{
+ struct siw_rx_stream *srx = &qp->rx_stream;
+
+ if (!srx->fpdu_part_rem)
+ return 0;
+
+ pr_warn("siw: [QP %u]: rreq with mpa len %d\n", qp_id(qp),
+ be16_to_cpu(srx->hdr.ctrl.mpa_len));
+
+ return -EPROTO;
+}
+
+/*
+ * siw_init_rresp:
+ *
+ * Process inbound RDMA READ REQ. Produce a pseudo READ RESPONSE WQE.
+ * Put it at the tail of the IRQ, if there is another WQE currently in
+ * transmit processing. If not, make it the current WQE to be processed
+ * and schedule transmit processing.
+ *
+ * Can be called from softirq context and from process
+ * context (RREAD socket loopback case!)
+ *
+ * return value:
+ * 0: success,
+ * failure code otherwise
+ */
+
+static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
+{
+ struct siw_wqe *tx_work = tx_wqe(qp);
+ struct siw_sqe *resp;
+
+ uint64_t raddr = be64_to_cpu(srx->hdr.rreq.sink_to),
+ laddr = be64_to_cpu(srx->hdr.rreq.source_to);
+ uint32_t length = be32_to_cpu(srx->hdr.rreq.read_size),
+ lkey = be32_to_cpu(srx->hdr.rreq.source_stag),
+ rkey = be32_to_cpu(srx->hdr.rreq.sink_stag),
+ msn = be32_to_cpu(srx->hdr.rreq.ddp_msn);
+
+ int run_sq = 1, rv = 0;
+ unsigned long flags;
+
+ if (unlikely(msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ])) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_UNTAGGED_BUF,
+ DDP_ECODE_UT_INVALID_MSN_RANGE, 0);
+ return -EPROTO;
+ }
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+ if (unlikely(!qp->attrs.irq_size)) {
+ run_sq = 0;
+ goto error_irq;
+ }
+ if (tx_work->wr_status == SIW_WR_IDLE) {
+ /*
+ * immediately schedule READ response w/o
+ * consuming IRQ entry: IRQ must be empty.
+ */
+ tx_work->processed = 0;
+ tx_work->mem[0] = NULL;
+ tx_work->wr_status = SIW_WR_QUEUED;
+ resp = &tx_work->sqe;
+ } else {
+ resp = irq_alloc_free(qp);
+ run_sq = 0;
+ }
+ if (likely(resp)) {
+ resp->opcode = SIW_OP_READ_RESPONSE;
+
+ resp->sge[0].length = length;
+ resp->sge[0].laddr = laddr;
+ resp->sge[0].lkey = lkey;
+
+ /* Keep aside message sequence number for potential
+ * error reporting during Read Response generation.
+ */
+ resp->sge[1].length = msn;
+
+ resp->raddr = raddr;
+ resp->rkey = rkey;
+ resp->num_sge = length ? 1 : 0;
+
+ /* RRESP now valid as current TX wqe or placed into IRQ */
+ smp_store_mb(resp->flags, SIW_WQE_VALID);
+ } else {
+error_irq:
+ pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n",
+ qp_id(qp), qp->attrs.irq_size);
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_REMOTE_OPERATION,
+ RDMAP_ECODE_CATASTROPHIC_STREAM, 0);
+ rv = -EPROTO;
+ }
+
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ if (run_sq)
+ rv = siw_sq_start(qp);
+
+ return rv;
+}
+
+/*
+ * Only called at start of Read.Resonse processing.
+ * Transfer pending Read from tip of ORQ into currrent rx wqe,
+ * but keep ORQ entry valid until Read.Response processing done.
+ * No Queue locking needed.
+ */
+static int siw_orqe_start_rx(struct siw_qp *qp)
+{
+ struct siw_sqe *orqe;
+ struct siw_wqe *wqe = NULL;
+
+ if (unlikely(!qp->attrs.orq_size))
+ return -EPROTO;
+
+ /* make sure ORQ indices are current */
+ smp_mb();
+
+ orqe = orq_get_current(qp);
+ if (READ_ONCE(orqe->flags) & SIW_WQE_VALID) {
+ /* RRESP is a TAGGED RDMAP operation */
+ wqe = rx_wqe(&qp->rx_tagged);
+ wqe->sqe.id = orqe->id;
+ wqe->sqe.opcode = orqe->opcode;
+ wqe->sqe.sge[0].laddr = orqe->sge[0].laddr;
+ wqe->sqe.sge[0].lkey = orqe->sge[0].lkey;
+ wqe->sqe.sge[0].length = orqe->sge[0].length;
+ wqe->sqe.flags = orqe->flags;
+ wqe->sqe.num_sge = 1;
+ wqe->bytes = orqe->sge[0].length;
+ wqe->processed = 0;
+ wqe->mem[0] = NULL;
+ /* make sure WQE is completely written before valid */
+ smp_wmb();
+ wqe->wr_status = SIW_WR_INPROGRESS;
+
+ return 0;
+ }
+ return -EPROTO;
+}
+
+/*
+ * siw_proc_rresp:
+ *
+ * Place incoming RRESP data into memory referenced by RREQ WQE
+ * which is at the tip of the ORQ
+ *
+ * Function supports partially received RRESP's (suspending/resuming
+ * current receive processing)
+ */
+int siw_proc_rresp(struct siw_qp *qp)
+{
+ struct siw_rx_stream *srx = &qp->rx_stream;
+ struct siw_rx_fpdu *frx = &qp->rx_tagged;
+ struct siw_wqe *wqe = rx_wqe(frx);
+ struct siw_mem **mem, *mem_p;
+ struct siw_sge *sge;
+ int bytes, rv;
+
+ if (frx->first_ddp_seg) {
+ if (unlikely(wqe->wr_status != SIW_WR_IDLE)) {
+ pr_warn("siw: [QP %u]: proc RRESP: status %d, op %d\n",
+ qp_id(qp), wqe->wr_status, wqe->sqe.opcode);
+ rv = -EPROTO;
+ goto error_term;
+ }
+ /*
+ * fetch pending RREQ from orq
+ */
+ rv = siw_orqe_start_rx(qp);
+ if (rv) {
+ pr_warn("siw: [QP %u]: ORQ empty, size %d\n",
+ qp_id(qp), qp->attrs.orq_size);
+ goto error_term;
+ }
+ rv = siw_rresp_check_ntoh(srx, frx);
+ if (unlikely(rv)) {
+ siw_qp_event(qp, IB_EVENT_QP_FATAL);
+ return rv;
+ }
+ } else {
+ if (unlikely(wqe->wr_status != SIW_WR_INPROGRESS)) {
+ pr_warn("siw: [QP %u]: resume RRESP: status %d\n",
+ qp_id(qp), wqe->wr_status);
+ rv = -EPROTO;
+ goto error_term;
+ }
+ }
+ if (!srx->fpdu_part_rem) /* zero length RRESPONSE */
+ return 0;
+
+ sge = wqe->sqe.sge; /* there is only one */
+ mem = &wqe->mem[0];
+
+ if (!(*mem)) {
+ /*
+ * check target memory which resolves memory on first fragment
+ */
+ rv = siw_check_sge(qp->pd, sge, mem, IB_ACCESS_LOCAL_WRITE, 0,
+ wqe->bytes);
+ if (unlikely(rv)) {
+ siw_dbg_qp(qp, "target mem check: %d\n", rv);
+ wqe->wc_status = SIW_WC_LOC_PROT_ERR;
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
+ DDP_ETYPE_TAGGED_BUF,
+ siw_tagged_error(-rv), 0);
+
+ siw_qp_event(qp, IB_EVENT_QP_ACCESS_ERR);
+
+ return -EINVAL;
+ }
+ }
+ mem_p = *mem;
+
+ bytes = min(srx->fpdu_part_rem, srx->skb_new);
+ rv = siw_rx_data(mem_p, srx, &frx->pbl_idx,
+ sge->laddr + wqe->processed, bytes);
+ if (rv != bytes) {
+ wqe->wc_status = SIW_WC_GENERAL_ERR;
+ rv = -EINVAL;
+ goto error_term;
+ }
+ srx->fpdu_part_rem -= rv;
+ srx->fpdu_part_rcvd += rv;
+ wqe->processed += rv;
+
+ if (!srx->fpdu_part_rem) {
+ srx->ddp_to += srx->fpdu_part_rcvd;
+ return 0;
+ }
+ return -EAGAIN;
+
+error_term:
+ siw_init_terminate(qp, TERM_ERROR_LAYER_DDP, DDP_ETYPE_CATASTROPHIC,
+ DDP_ECODE_CATASTROPHIC, 0);
+ return rv;
+}
+
+static void siw_update_skb_rcvd(struct siw_rx_stream *srx, u16 length)
+{
+ srx->skb_offset += length;
+ srx->skb_new -= length;
+ srx->skb_copied += length;
+}
+
+int siw_proc_terminate(struct siw_qp *qp)
+{
+ struct siw_rx_stream *srx = &qp->rx_stream;
+ struct sk_buff *skb = srx->skb;
+ struct iwarp_terminate *term = &srx->hdr.terminate;
+ union iwarp_hdr term_info;
+ u8 *infop = (u8 *)&term_info;
+ enum rdma_opcode op;
+ u16 to_copy = sizeof(struct iwarp_ctrl);
+
+ pr_warn("siw: got TERMINATE. layer %d, type %d, code %d\n",
+ __rdmap_term_layer(term), __rdmap_term_etype(term),
+ __rdmap_term_ecode(term));
+
+ if (be32_to_cpu(term->ddp_qn) != RDMAP_UNTAGGED_QN_TERMINATE ||
+ be32_to_cpu(term->ddp_msn) !=
+ qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] ||
+ be32_to_cpu(term->ddp_mo) != 0) {
+ pr_warn("siw: rx bogus TERM [QN x%08x, MSN x%08x, MO x%08x]\n",
+ be32_to_cpu(term->ddp_qn), be32_to_cpu(term->ddp_msn),
+ be32_to_cpu(term->ddp_mo));
+ return -ECONNRESET;
+ }
+ /*
+ * Receive remaining pieces of TERM if indicated
+ */
+ if (!term->flag_m)
+ return -ECONNRESET;
+
+ /* Do not take the effort to reassemble a network fragmented
+ * TERM message
+ */
+ if (srx->skb_new < sizeof(struct iwarp_ctrl_tagged))
+ return -ECONNRESET;
+
+ memset(infop, 0, sizeof(term_info));
+
+ skb_copy_bits(skb, srx->skb_offset, infop, to_copy);
+
+ op = __rdmap_get_opcode(&term_info.ctrl);
+ if (op >= RDMAP_TERMINATE)
+ goto out;
+
+ infop += to_copy;
+ siw_update_skb_rcvd(srx, to_copy);
+ srx->fpdu_part_rcvd += to_copy;
+ srx->fpdu_part_rem -= to_copy;
+
+ to_copy = iwarp_pktinfo[op].hdr_len - to_copy;
+
+ /* Again, no network fragmented TERM's */
+ if (to_copy + MPA_CRC_SIZE > srx->skb_new)
+ return -ECONNRESET;
+
+ skb_copy_bits(skb, srx->skb_offset, infop, to_copy);
+
+ if (term->flag_r) {
+ siw_dbg_qp(qp, "TERM reports RDMAP hdr type %u, len %u (%s)\n",
+ op, be16_to_cpu(term_info.ctrl.mpa_len),
+ term->flag_m ? "valid" : "invalid");
+ } else if (term->flag_d) {
+ siw_dbg_qp(qp, "TERM reports DDP hdr type %u, len %u (%s)\n",
+ op, be16_to_cpu(term_info.ctrl.mpa_len),
+ term->flag_m ? "valid" : "invalid");
+ }
+out:
+ siw_update_skb_rcvd(srx, to_copy);
+ srx->fpdu_part_rcvd += to_copy;
+ srx->fpdu_part_rem -= to_copy;
+
+ return -ECONNRESET;
+}
+
+static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
+{
+ struct sk_buff *skb = srx->skb;
+ int avail = min(srx->skb_new, srx->fpdu_part_rem);
+ u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
+ __wsum crc_in, crc_own = 0;
+
+ siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
+ srx->fpdu_part_rem, srx->skb_new, srx->pad);
+
+ skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
+
+ siw_update_skb_rcvd(srx, avail);
+ srx->fpdu_part_rem -= avail;
+
+ if (srx->fpdu_part_rem)
+ return -EAGAIN;
+
+ if (!srx->mpa_crc_enabled)
+ return 0;
+
+ if (srx->pad)
+ siw_crc_update(&srx->mpa_crc, tbuf, srx->pad);
+ /*
+ * CRC32 is computed, transmitted and received directly in NBO,
+ * so there's never a reason to convert byte order.
+ */
+ siw_crc_final(&srx->mpa_crc, (u8 *)&crc_own);
+ crc_in = (__force __wsum)srx->trailer.crc;
+
+ if (unlikely(crc_in != crc_own)) {
+ pr_warn("siw: crc error. in: %08x, own %08x, op %u\n",
+ crc_in, crc_own, qp->rx_stream.rdmap_op);
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
+ LLP_ETYPE_MPA,
+ LLP_ECODE_RECEIVED_CRC, 0);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define MIN_DDP_HDR sizeof(struct iwarp_ctrl_tagged)
+
+static int siw_get_hdr(struct siw_rx_stream *srx)
+{
+ struct sk_buff *skb = srx->skb;
+ struct siw_qp *qp = rx_qp(srx);
+ struct iwarp_ctrl *c_hdr = &srx->hdr.ctrl;
+ struct siw_rx_fpdu *frx;
+ u8 opcode;
+ int bytes;
+
+ if (srx->fpdu_part_rcvd < MIN_DDP_HDR) {
+ /*
+ * copy a mimimum sized (tagged) DDP frame control part
+ */
+ bytes = min_t(int, srx->skb_new,
+ MIN_DDP_HDR - srx->fpdu_part_rcvd);
+
+ skb_copy_bits(skb, srx->skb_offset,
+ (char *)c_hdr + srx->fpdu_part_rcvd, bytes);
+
+ siw_update_skb_rcvd(srx, bytes);
+ srx->fpdu_part_rcvd += bytes;
+ if (srx->fpdu_part_rcvd < MIN_DDP_HDR)
+ return -EAGAIN;
+
+ if (unlikely(__ddp_get_version(c_hdr) != DDP_VERSION)) {
+ enum ddp_etype etype;
+ enum ddp_ecode ecode;
+
+ pr_warn("siw: received ddp version unsupported %d\n",
+ __ddp_get_version(c_hdr));
+
+ if (c_hdr->ddp_rdmap_ctrl & DDP_FLAG_TAGGED) {
+ etype = DDP_ETYPE_TAGGED_BUF;
+ ecode = DDP_ECODE_T_VERSION;
+ } else {
+ etype = DDP_ETYPE_UNTAGGED_BUF;
+ ecode = DDP_ECODE_UT_VERSION;
+ }
+ siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
+ etype, ecode, 0);
+ return -EINVAL;
+ }
+ if (unlikely(__rdmap_get_version(c_hdr) != RDMAP_VERSION)) {
+ pr_warn("siw: received rdmap version unsupported %d\n",
+ __rdmap_get_version(c_hdr));
+
+ siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_REMOTE_OPERATION,
+ RDMAP_ECODE_VERSION, 0);
+ return -EINVAL;
+ }
+ opcode = __rdmap_get_opcode(c_hdr);
+
+ if (opcode > RDMAP_TERMINATE) {
+ pr_warn("siw: received unknown packet type %u\n",
+ opcode);
+
+ siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_REMOTE_OPERATION,
+ RDMAP_ECODE_OPCODE, 0);
+ return -EINVAL;
+ }
+ siw_dbg_qp(rx_qp(srx), "new header, opcode %u\n", opcode);
+ } else {
+ opcode = __rdmap_get_opcode(c_hdr);
+ }
+ set_rx_fpdu_context(qp, opcode);
+ frx = qp->rx_fpdu;
+
+ /*
+ * Figure out len of current hdr: variable length of
+ * iwarp hdr may force us to copy hdr information in
+ * two steps. Only tagged DDP messages are already
+ * completely received.
+ */
+ if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) {
+ int hdrlen = iwarp_pktinfo[opcode].hdr_len;
+
+ bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
+
+ skb_copy_bits(skb, srx->skb_offset,
+ (char *)c_hdr + srx->fpdu_part_rcvd, bytes);
+
+ siw_update_skb_rcvd(srx, bytes);
+ srx->fpdu_part_rcvd += bytes;
+ if (srx->fpdu_part_rcvd < hdrlen)
+ return -EAGAIN;
+ }
+
+ /*
+ * DDP/RDMAP header receive completed. Check if the current
+ * DDP segment starts a new RDMAP message or continues a previously
+ * started RDMAP message.
+ *
+ * Alternating reception of DDP segments (or FPDUs) from incomplete
+ * tagged and untagged RDMAP messages is supported, as long as
+ * the current tagged or untagged message gets eventually completed
+ * w/o intersection from another message of the same type
+ * (tagged/untagged). E.g., a WRITE can get intersected by a SEND,
+ * but not by a READ RESPONSE etc.
+ */
+ if (srx->mpa_crc_enabled) {
+ /*
+ * Restart CRC computation
+ */
+ siw_crc_init(&srx->mpa_crc);
+ siw_crc_update(&srx->mpa_crc, c_hdr, srx->fpdu_part_rcvd);
+ }
+ if (frx->more_ddp_segs) {
+ frx->first_ddp_seg = 0;
+ if (frx->prev_rdmap_op != opcode) {
+ pr_warn("siw: packet intersection: %u : %u\n",
+ frx->prev_rdmap_op, opcode);
+ /*
+ * The last inbound RDMA operation of same type
+ * (tagged or untagged) is left unfinished.
+ * To complete it in error, make it the current
+ * operation again, even with the header already
+ * overwritten. For error handling, only the opcode
+ * and current rx context are relevant.
+ */
+ set_rx_fpdu_context(qp, frx->prev_rdmap_op);
+ __rdmap_set_opcode(c_hdr, frx->prev_rdmap_op);
+ return -EPROTO;
+ }
+ } else {
+ frx->prev_rdmap_op = opcode;
+ frx->first_ddp_seg = 1;
+ }
+ frx->more_ddp_segs = c_hdr->ddp_rdmap_ctrl & DDP_FLAG_LAST ? 0 : 1;
+
+ return 0;
+}
+
+static int siw_check_tx_fence(struct siw_qp *qp)
+{
+ struct siw_wqe *tx_waiting = tx_wqe(qp);
+ struct siw_sqe *rreq;
+ int resume_tx = 0, rv = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->orq_lock, flags);
+
+ /* free current orq entry */
+ rreq = orq_get_current(qp);
+ WRITE_ONCE(rreq->flags, 0);
+
+ qp->orq_get++;
+
+ if (qp->tx_ctx.orq_fence) {
+ if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
+ pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
+ qp_id(qp), tx_waiting->wr_status);
+ rv = -EPROTO;
+ goto out;
+ }
+ /* resume SQ processing, if possible */
+ if (tx_waiting->sqe.opcode == SIW_OP_READ ||
+ tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
+
+ /* SQ processing was stopped because of a full ORQ */
+ rreq = orq_get_free(qp);
+ if (unlikely(!rreq)) {
+ pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
+ rv = -EPROTO;
+ goto out;
+ }
+ siw_read_to_orq(rreq, &tx_waiting->sqe);
+
+ qp->orq_put++;
+ qp->tx_ctx.orq_fence = 0;
+ resume_tx = 1;
+
+ } else if (siw_orq_empty(qp)) {
+ /*
+ * SQ processing was stopped by fenced work request.
+ * Resume since all previous Read's are now completed.
+ */
+ qp->tx_ctx.orq_fence = 0;
+ resume_tx = 1;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&qp->orq_lock, flags);
+
+ if (resume_tx)
+ rv = siw_sq_start(qp);
+
+ return rv;
+}
+
+/*
+ * siw_rdmap_complete()
+ *
+ * Complete processing of an RDMA message after receiving all
+ * DDP segmens or ABort processing after encountering error case.
+ *
+ * o SENDs + RRESPs will need for completion,
+ * o RREQs need for READ RESPONSE initialization
+ * o WRITEs need memory dereferencing
+ *
+ * TODO: Failed WRITEs need local error to be surfaced.
+ */
+static int siw_rdmap_complete(struct siw_qp *qp, int error)
+{
+ struct siw_rx_stream *srx = &qp->rx_stream;
+ struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu);
+ enum siw_wc_status wc_status = wqe->wc_status;
+ u8 opcode = __rdmap_get_opcode(&srx->hdr.ctrl);
+ int rv = 0;
+
+ switch (opcode) {
+ case RDMAP_SEND_SE:
+ case RDMAP_SEND_SE_INVAL:
+ wqe->rqe.flags |= SIW_WQE_SOLICITED;
+ fallthrough;
+
+ case RDMAP_SEND:
+ case RDMAP_SEND_INVAL:
+ if (wqe->wr_status == SIW_WR_IDLE)
+ break;
+
+ srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]++;
+
+ if (error != 0 && wc_status == SIW_WC_SUCCESS)
+ wc_status = SIW_WC_GENERAL_ERR;
+ /*
+ * Handle STag invalidation request
+ */
+ if (wc_status == SIW_WC_SUCCESS &&
+ (opcode == RDMAP_SEND_INVAL ||
+ opcode == RDMAP_SEND_SE_INVAL)) {
+ rv = siw_invalidate_stag(qp->pd, srx->inval_stag);
+ if (rv) {
+ siw_init_terminate(
+ qp, TERM_ERROR_LAYER_RDMAP,
+ rv == -EACCES ?
+ RDMAP_ETYPE_REMOTE_PROTECTION :
+ RDMAP_ETYPE_REMOTE_OPERATION,
+ RDMAP_ECODE_CANNOT_INVALIDATE, 0);
+
+ wc_status = SIW_WC_REM_INV_REQ_ERR;
+ }
+ rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed,
+ rv ? 0 : srx->inval_stag,
+ wc_status);
+ } else {
+ rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed,
+ 0, wc_status);
+ }
+ siw_wqe_put_mem(wqe, SIW_OP_RECEIVE);
+ break;
+
+ case RDMAP_RDMA_READ_RESP:
+ if (wqe->wr_status == SIW_WR_IDLE)
+ break;
+
+ if (error != 0) {
+ if ((srx->state == SIW_GET_HDR &&
+ qp->rx_fpdu->first_ddp_seg) || error == -ENODATA)
+ /* possible RREQ in ORQ left untouched */
+ break;
+
+ if (wc_status == SIW_WC_SUCCESS)
+ wc_status = SIW_WC_GENERAL_ERR;
+ } else if (rdma_is_kernel_res(&qp->base_qp.res) &&
+ rx_type(wqe) == SIW_OP_READ_LOCAL_INV) {
+ /*
+ * Handle any STag invalidation request
+ */
+ rv = siw_invalidate_stag(qp->pd, wqe->sqe.sge[0].lkey);
+ if (rv) {
+ siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_CATASTROPHIC,
+ RDMAP_ECODE_UNSPECIFIED, 0);
+
+ if (wc_status == SIW_WC_SUCCESS) {
+ wc_status = SIW_WC_GENERAL_ERR;
+ error = rv;
+ }
+ }
+ }
+ /*
+ * All errors turn the wqe into signalled.
+ */
+ if ((wqe->sqe.flags & SIW_WQE_SIGNALLED) || error != 0)
+ rv = siw_sqe_complete(qp, &wqe->sqe, wqe->processed,
+ wc_status);
+ siw_wqe_put_mem(wqe, SIW_OP_READ);
+
+ if (!error) {
+ rv = siw_check_tx_fence(qp);
+ } else {
+ /* Disable current ORQ element */
+ if (qp->attrs.orq_size)
+ WRITE_ONCE(orq_get_current(qp)->flags, 0);
+ }
+ break;
+
+ case RDMAP_RDMA_READ_REQ:
+ if (!error) {
+ rv = siw_init_rresp(qp, srx);
+ srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]++;
+ }
+ break;
+
+ case RDMAP_RDMA_WRITE:
+ if (wqe->wr_status == SIW_WR_IDLE)
+ break;
+
+ /*
+ * Free References from memory object if
+ * attached to receive context (inbound WRITE).
+ * While a zero-length WRITE is allowed,
+ * no memory reference got created.
+ */
+ if (rx_mem(&qp->rx_tagged)) {
+ siw_mem_put(rx_mem(&qp->rx_tagged));
+ rx_mem(&qp->rx_tagged) = NULL;
+ }
+ break;
+
+ default:
+ break;
+ }
+ wqe->wr_status = SIW_WR_IDLE;
+
+ return rv;
+}
+
+/*
+ * siw_tcp_rx_data()
+ *
+ * Main routine to consume inbound TCP payload
+ *
+ * @rd_desc: read descriptor
+ * @skb: socket buffer
+ * @off: offset in skb
+ * @len: skb->len - offset : payload in skb
+ */
+int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
+ unsigned int off, size_t len)
+{
+ struct siw_qp *qp = rd_desc->arg.data;
+ struct siw_rx_stream *srx = &qp->rx_stream;
+ int rv;
+
+ srx->skb = skb;
+ srx->skb_new = skb->len - off;
+ srx->skb_offset = off;
+ srx->skb_copied = 0;
+
+ siw_dbg_qp(qp, "new data, len %d\n", srx->skb_new);
+
+ while (srx->skb_new) {
+ int run_completion = 1;
+
+ if (unlikely(srx->rx_suspend)) {
+ /* Do not process any more data */
+ srx->skb_copied += srx->skb_new;
+ break;
+ }
+ switch (srx->state) {
+ case SIW_GET_HDR:
+ rv = siw_get_hdr(srx);
+ if (!rv) {
+ srx->fpdu_part_rem =
+ be16_to_cpu(srx->hdr.ctrl.mpa_len) -
+ srx->fpdu_part_rcvd + MPA_HDR_SIZE;
+
+ if (srx->fpdu_part_rem)
+ srx->pad = -srx->fpdu_part_rem & 0x3;
+ else
+ srx->pad = 0;
+
+ srx->state = SIW_GET_DATA_START;
+ srx->fpdu_part_rcvd = 0;
+ }
+ break;
+
+ case SIW_GET_DATA_MORE:
+ /*
+ * Another data fragment of the same DDP segment.
+ * Setting first_ddp_seg = 0 avoids repeating
+ * initializations that shall occur only once per
+ * DDP segment.
+ */
+ qp->rx_fpdu->first_ddp_seg = 0;
+ fallthrough;
+
+ case SIW_GET_DATA_START:
+ /*
+ * Headers will be checked by the opcode-specific
+ * data receive function below.
+ */
+ rv = iwarp_pktinfo[qp->rx_stream.rdmap_op].rx_data(qp);
+ if (!rv) {
+ int mpa_len =
+ be16_to_cpu(srx->hdr.ctrl.mpa_len)
+ + MPA_HDR_SIZE;
+
+ srx->fpdu_part_rem = (-mpa_len & 0x3)
+ + MPA_CRC_SIZE;
+ srx->fpdu_part_rcvd = 0;
+ srx->state = SIW_GET_TRAILER;
+ } else {
+ if (unlikely(rv == -ECONNRESET))
+ run_completion = 0;
+ else
+ srx->state = SIW_GET_DATA_MORE;
+ }
+ break;
+
+ case SIW_GET_TRAILER:
+ /*
+ * read CRC + any padding
+ */
+ rv = siw_get_trailer(qp, srx);
+ if (likely(!rv)) {
+ /*
+ * FPDU completed.
+ * complete RDMAP message if last fragment
+ */
+ srx->state = SIW_GET_HDR;
+ srx->fpdu_part_rcvd = 0;
+
+ if (!(srx->hdr.ctrl.ddp_rdmap_ctrl &
+ DDP_FLAG_LAST))
+ /* more frags */
+ break;
+
+ rv = siw_rdmap_complete(qp, 0);
+ run_completion = 0;
+ }
+ break;
+
+ default:
+ pr_warn("QP[%u]: RX out of state\n", qp_id(qp));
+ rv = -EPROTO;
+ run_completion = 0;
+ }
+ if (unlikely(rv != 0 && rv != -EAGAIN)) {
+ if ((srx->state > SIW_GET_HDR ||
+ qp->rx_fpdu->more_ddp_segs) && run_completion)
+ siw_rdmap_complete(qp, rv);
+
+ siw_dbg_qp(qp, "rx error %d, rx state %d\n", rv,
+ srx->state);
+
+ siw_qp_cm_drop(qp, 1);
+
+ break;
+ }
+ if (rv) {
+ siw_dbg_qp(qp, "fpdu fragment, state %d, missing %d\n",
+ srx->state, srx->fpdu_part_rem);
+ break;
+ }
+ }
+ return srx->skb_copied;
+}
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
new file mode 100644
index 000000000000..f7dd32c6e5ba
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -0,0 +1,1306 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+#include <net/tcp.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "siw.h"
+#include "siw_verbs.h"
+#include "siw_mem.h"
+
+#define MAX_HDR_INLINE \
+ (((uint32_t)(sizeof(struct siw_rreq_pkt) - \
+ sizeof(struct iwarp_send))) & 0xF8)
+
+static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
+{
+ struct siw_pbl *pbl = mem->pbl;
+ u64 offset = addr - mem->va;
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+
+ if (paddr)
+ return ib_virt_dma_to_page(paddr);
+
+ return NULL;
+}
+
+static struct page *siw_get_page(struct siw_mem *mem, struct siw_sge *sge,
+ unsigned long offset, int *pbl_idx)
+{
+ if (!mem->is_pbl)
+ return siw_get_upage(mem->umem, sge->laddr + offset);
+ else
+ return siw_get_pblpage(mem, sge->laddr + offset, pbl_idx);
+}
+
+/*
+ * Copy short payload at provided destination payload address
+ */
+static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
+{
+ struct siw_wqe *wqe = &c_tx->wqe_active;
+ struct siw_sge *sge = &wqe->sqe.sge[0];
+ u32 bytes = sge->length;
+
+ if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1)
+ return MAX_HDR_INLINE + 1;
+
+ if (!bytes)
+ return 0;
+
+ if (tx_flags(wqe) & SIW_WQE_INLINE) {
+ memcpy(paddr, &wqe->sqe.sge[1], bytes);
+ } else {
+ struct siw_mem *mem = wqe->mem[0];
+
+ if (!mem->mem_obj) {
+ /* Kernel client using kva */
+ memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes);
+ } else if (c_tx->in_syscall) {
+ if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
+ bytes))
+ return -EFAULT;
+ } else {
+ unsigned int off = sge->laddr & ~PAGE_MASK;
+ struct page *p;
+ char *buffer;
+ int pbl_idx = 0;
+
+ p = siw_get_page(mem, sge, 0, &pbl_idx);
+ if (unlikely(!p))
+ return -EFAULT;
+
+ buffer = kmap_local_page(p);
+
+ if (likely(PAGE_SIZE - off >= bytes)) {
+ memcpy(paddr, buffer + off, bytes);
+ } else {
+ unsigned long part = bytes - (PAGE_SIZE - off);
+
+ memcpy(paddr, buffer + off, part);
+ kunmap_local(buffer);
+
+ p = siw_get_page(mem, sge, part, &pbl_idx);
+ if (unlikely(!p))
+ return -EFAULT;
+
+ buffer = kmap_local_page(p);
+ memcpy(paddr + part, buffer, bytes - part);
+ }
+ kunmap_local(buffer);
+ }
+ }
+ return (int)bytes;
+}
+
+#define PKT_FRAGMENTED 1
+#define PKT_COMPLETE 0
+
+/*
+ * siw_qp_prepare_tx()
+ *
+ * Prepare tx state for sending out one fpdu. Builds complete pkt
+ * if no user data or only immediate data are present.
+ *
+ * returns PKT_COMPLETE if complete pkt built, PKT_FRAGMENTED otherwise.
+ */
+static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
+{
+ struct siw_wqe *wqe = &c_tx->wqe_active;
+ char *crc = NULL;
+ int data = 0;
+
+ switch (tx_type(wqe)) {
+ case SIW_OP_READ:
+ case SIW_OP_READ_LOCAL_INV:
+ memcpy(&c_tx->pkt.ctrl,
+ &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl,
+ sizeof(struct iwarp_ctrl));
+
+ c_tx->pkt.rreq.rsvd = 0;
+ c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
+ c_tx->pkt.rreq.ddp_msn =
+ htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]);
+ c_tx->pkt.rreq.ddp_mo = 0;
+ c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey);
+ c_tx->pkt.rreq.sink_to =
+ cpu_to_be64(wqe->sqe.sge[0].laddr);
+ c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey);
+ c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr);
+ c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length);
+
+ c_tx->ctrl_len = sizeof(struct iwarp_rdma_rreq);
+ crc = (char *)&c_tx->pkt.rreq_pkt.crc;
+ break;
+
+ case SIW_OP_SEND:
+ if (tx_flags(wqe) & SIW_WQE_SOLICITED)
+ memcpy(&c_tx->pkt.ctrl,
+ &iwarp_pktinfo[RDMAP_SEND_SE].ctrl,
+ sizeof(struct iwarp_ctrl));
+ else
+ memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND].ctrl,
+ sizeof(struct iwarp_ctrl));
+
+ c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
+ c_tx->pkt.send.ddp_msn =
+ htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
+ c_tx->pkt.send.ddp_mo = 0;
+
+ c_tx->pkt.send_inv.inval_stag = 0;
+
+ c_tx->ctrl_len = sizeof(struct iwarp_send);
+
+ crc = (char *)&c_tx->pkt.send_pkt.crc;
+ data = siw_try_1seg(c_tx, crc);
+ break;
+
+ case SIW_OP_SEND_REMOTE_INV:
+ if (tx_flags(wqe) & SIW_WQE_SOLICITED)
+ memcpy(&c_tx->pkt.ctrl,
+ &iwarp_pktinfo[RDMAP_SEND_SE_INVAL].ctrl,
+ sizeof(struct iwarp_ctrl));
+ else
+ memcpy(&c_tx->pkt.ctrl,
+ &iwarp_pktinfo[RDMAP_SEND_INVAL].ctrl,
+ sizeof(struct iwarp_ctrl));
+
+ c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
+ c_tx->pkt.send.ddp_msn =
+ htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
+ c_tx->pkt.send.ddp_mo = 0;
+
+ c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey);
+
+ c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
+
+ crc = (char *)&c_tx->pkt.send_pkt.crc;
+ data = siw_try_1seg(c_tx, crc);
+ break;
+
+ case SIW_OP_WRITE:
+ memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl,
+ sizeof(struct iwarp_ctrl));
+
+ c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey);
+ c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr);
+ c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
+
+ crc = (char *)&c_tx->pkt.write_pkt.crc;
+ data = siw_try_1seg(c_tx, crc);
+ break;
+
+ case SIW_OP_READ_RESPONSE:
+ memcpy(&c_tx->pkt.ctrl,
+ &iwarp_pktinfo[RDMAP_RDMA_READ_RESP].ctrl,
+ sizeof(struct iwarp_ctrl));
+
+ /* NBO */
+ c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey);
+ c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr);
+
+ c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
+
+ crc = (char *)&c_tx->pkt.write_pkt.crc;
+ data = siw_try_1seg(c_tx, crc);
+ break;
+
+ default:
+ siw_dbg_qp(tx_qp(c_tx), "stale wqe type %d\n", tx_type(wqe));
+ return -EOPNOTSUPP;
+ }
+ if (unlikely(data < 0))
+ return data;
+
+ c_tx->ctrl_sent = 0;
+
+ if (data <= MAX_HDR_INLINE) {
+ if (data) {
+ wqe->processed = data;
+
+ c_tx->pkt.ctrl.mpa_len =
+ htons(c_tx->ctrl_len + data - MPA_HDR_SIZE);
+
+ /* Add pad, if needed */
+ data += -(int)data & 0x3;
+ /* advance CRC location after payload */
+ crc += data;
+ c_tx->ctrl_len += data;
+
+ if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
+ c_tx->pkt.c_untagged.ddp_mo = 0;
+ else
+ c_tx->pkt.c_tagged.ddp_to =
+ cpu_to_be64(wqe->sqe.raddr);
+ }
+
+ *(u32 *)crc = 0;
+ /*
+ * Do complete CRC if enabled and short packet
+ */
+ if (c_tx->mpa_crc_enabled)
+ siw_crc_oneshot(&c_tx->pkt, c_tx->ctrl_len, (u8 *)crc);
+ c_tx->ctrl_len += MPA_CRC_SIZE;
+
+ return PKT_COMPLETE;
+ }
+ c_tx->ctrl_len += MPA_CRC_SIZE;
+ c_tx->sge_idx = 0;
+ c_tx->sge_off = 0;
+ c_tx->pbl_idx = 0;
+
+ /*
+ * Allow direct sending out of user buffer if WR is non signalled
+ * and payload is over threshold.
+ * Per RDMA verbs, the application should not change the send buffer
+ * until the work completed. In iWarp, work completion is only
+ * local delivery to TCP. TCP may reuse the buffer for
+ * retransmission. Changing unsent data also breaks the CRC,
+ * if applied.
+ */
+ if (c_tx->zcopy_tx && wqe->bytes >= SENDPAGE_THRESH &&
+ !(tx_flags(wqe) & SIW_WQE_SIGNALLED))
+ c_tx->use_sendpage = 1;
+ else
+ c_tx->use_sendpage = 0;
+
+ return PKT_FRAGMENTED;
+}
+
+static noinline_for_stack int
+siw_sendmsg(struct socket *sock, unsigned int msg_flags,
+ struct kvec *vec, size_t num, size_t len)
+{
+ struct msghdr msg = { .msg_flags = msg_flags };
+
+ return kernel_sendmsg(sock, &msg, vec, num, len);
+}
+
+/*
+ * Send out one complete control type FPDU, or header of FPDU carrying
+ * data. Used for fixed sized packets like Read.Requests or zero length
+ * SENDs, WRITEs, READ.Responses, or header only.
+ */
+static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
+ int flags)
+{
+ struct kvec iov = { .iov_base =
+ (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent,
+ .iov_len = c_tx->ctrl_len - c_tx->ctrl_sent };
+
+ int rv = siw_sendmsg(s, flags, &iov, 1, iov.iov_len);
+
+ if (rv >= 0) {
+ c_tx->ctrl_sent += rv;
+
+ if (c_tx->ctrl_sent == c_tx->ctrl_len)
+ rv = 0;
+ else
+ rv = -EAGAIN;
+ }
+ return rv;
+}
+
+/*
+ * 0copy TCP transmit interface: Use MSG_SPLICE_PAGES.
+ *
+ * Using sendpage to push page by page appears to be less efficient
+ * than using sendmsg, even if data are copied.
+ *
+ * A general performance limitation might be the extra four bytes
+ * trailer checksum segment to be pushed after user data.
+ */
+static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
+ size_t size)
+{
+ struct bio_vec bvec;
+ struct msghdr msg = {
+ .msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SPLICE_PAGES),
+ };
+ struct sock *sk = s->sk;
+ int i = 0, rv = 0, sent = 0;
+
+ while (size) {
+ size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
+
+ if (size + offset <= PAGE_SIZE)
+ msg.msg_flags &= ~MSG_MORE;
+
+ tcp_rate_check_app_limited(sk);
+ if (!sendpage_ok(page[i]))
+ msg.msg_flags &= ~MSG_SPLICE_PAGES;
+ bvec_set_page(&bvec, page[i], bytes, offset);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, bytes);
+
+try_page_again:
+ lock_sock(sk);
+ rv = tcp_sendmsg_locked(sk, &msg, bytes);
+ release_sock(sk);
+
+ if (rv > 0) {
+ size -= rv;
+ sent += rv;
+ if (rv != bytes) {
+ bytes -= rv;
+ goto try_page_again;
+ }
+ offset = 0;
+ } else {
+ if (rv == -EAGAIN || rv == 0)
+ break;
+ return rv;
+ }
+ i++;
+ }
+ return sent;
+}
+
+/*
+ * siw_0copy_tx()
+ *
+ * Pushes list of pages to TCP socket. If pages from multiple
+ * SGE's, all referenced pages of each SGE are pushed in one
+ * shot.
+ */
+static int siw_0copy_tx(struct socket *s, struct page **page,
+ struct siw_sge *sge, unsigned int offset,
+ unsigned int size)
+{
+ int i = 0, sent = 0, rv;
+ int sge_bytes = min(sge->length - offset, size);
+
+ offset = (sge->laddr + offset) & ~PAGE_MASK;
+
+ while (sent != size) {
+ rv = siw_tcp_sendpages(s, &page[i], offset, sge_bytes);
+ if (rv >= 0) {
+ sent += rv;
+ if (size == sent || sge_bytes > rv)
+ break;
+
+ i += PAGE_ALIGN(sge_bytes + offset) >> PAGE_SHIFT;
+ sge++;
+ sge_bytes = min(sge->length, size - sent);
+ offset = sge->laddr & ~PAGE_MASK;
+ } else {
+ sent = rv;
+ break;
+ }
+ }
+ return sent;
+}
+
+#define MAX_TRAILER (MPA_CRC_SIZE + 4)
+
+static void siw_unmap_pages(struct kvec *iov, unsigned long kmap_mask, int len)
+{
+ int i;
+
+ /*
+ * Work backwards through the array to honor the kmap_local_page()
+ * ordering requirements.
+ */
+ for (i = (len-1); i >= 0; i--) {
+ if (kmap_mask & BIT(i)) {
+ unsigned long addr = (unsigned long)iov[i].iov_base;
+
+ kunmap_local((void *)(addr & PAGE_MASK));
+ }
+ }
+}
+
+/*
+ * siw_tx_hdt() tries to push a complete packet to TCP where all
+ * packet fragments are referenced by the elements of one iovec.
+ * For the data portion, each involved page must be referenced by
+ * one extra element. All sge's data can be non-aligned to page
+ * boundaries. Two more elements are referencing iWARP header
+ * and trailer:
+ * MAX_ARRAY = 64KB/PAGE_SIZE + 1 + (2 * (SIW_MAX_SGE - 1) + HDR + TRL
+ */
+#define MAX_ARRAY ((0xffff / PAGE_SIZE) + 1 + (2 * (SIW_MAX_SGE - 1) + 2))
+
+/*
+ * Write out iov referencing hdr, data and trailer of current FPDU.
+ * Update transmit state dependent on write return status
+ */
+static noinline_for_stack int siw_tx_hdt(struct siw_iwarp_tx *c_tx,
+ struct socket *s)
+{
+ struct siw_wqe *wqe = &c_tx->wqe_active;
+ struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx];
+ struct kvec iov[MAX_ARRAY];
+ struct page *page_array[MAX_ARRAY];
+
+ int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv;
+ unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
+ sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
+ pbl_idx = c_tx->pbl_idx;
+ unsigned long kmap_mask = 0L;
+
+ if (c_tx->state == SIW_SEND_HDR) {
+ if (c_tx->use_sendpage) {
+ rv = siw_tx_ctrl(c_tx, s, MSG_DONTWAIT | MSG_MORE);
+ if (rv)
+ goto done;
+
+ c_tx->state = SIW_SEND_DATA;
+ } else {
+ iov[0].iov_base =
+ (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent;
+ iov[0].iov_len = hdr_len =
+ c_tx->ctrl_len - c_tx->ctrl_sent;
+ seg = 1;
+ }
+ }
+
+ wqe->processed += data_len;
+
+ while (data_len) { /* walk the list of SGE's */
+ unsigned int sge_len = min(sge->length - sge_off, data_len);
+ unsigned int fp_off = (sge->laddr + sge_off) & ~PAGE_MASK;
+ struct siw_mem *mem;
+
+ if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
+ mem = wqe->mem[sge_idx];
+ is_kva = mem->mem_obj == NULL ? 1 : 0;
+ } else {
+ is_kva = 1;
+ }
+ if (is_kva && !c_tx->use_sendpage) {
+ /*
+ * tx from kernel virtual address: either inline data
+ * or memory region with assigned kernel buffer
+ */
+ iov[seg].iov_base =
+ ib_virt_dma_to_ptr(sge->laddr + sge_off);
+ iov[seg].iov_len = sge_len;
+
+ if (do_crc)
+ siw_crc_update(&c_tx->mpa_crc,
+ iov[seg].iov_base, sge_len);
+ sge_off += sge_len;
+ data_len -= sge_len;
+ seg++;
+ goto sge_done;
+ }
+
+ while (sge_len) {
+ size_t plen = min((int)PAGE_SIZE - fp_off, sge_len);
+ void *kaddr;
+
+ if (!is_kva) {
+ struct page *p;
+
+ p = siw_get_page(mem, sge, sge_off, &pbl_idx);
+ if (unlikely(!p)) {
+ siw_unmap_pages(iov, kmap_mask, seg);
+ wqe->processed -= c_tx->bytes_unsent;
+ rv = -EFAULT;
+ goto done_crc;
+ }
+ page_array[seg] = p;
+
+ if (!c_tx->use_sendpage) {
+ void *kaddr = kmap_local_page(p);
+
+ /* Remember for later kunmap() */
+ kmap_mask |= BIT(seg);
+ iov[seg].iov_base = kaddr + fp_off;
+ iov[seg].iov_len = plen;
+
+ if (do_crc)
+ siw_crc_update(
+ &c_tx->mpa_crc,
+ iov[seg].iov_base,
+ plen);
+ } else if (do_crc) {
+ kaddr = kmap_local_page(p);
+ siw_crc_update(&c_tx->mpa_crc,
+ kaddr + fp_off, plen);
+ kunmap_local(kaddr);
+ }
+ } else {
+ /*
+ * Cast to an uintptr_t to preserve all 64 bits
+ * in sge->laddr.
+ */
+ u64 va = sge->laddr + sge_off;
+
+ page_array[seg] = ib_virt_dma_to_page(va);
+ if (do_crc)
+ siw_crc_update(&c_tx->mpa_crc,
+ ib_virt_dma_to_ptr(va),
+ plen);
+ }
+
+ sge_len -= plen;
+ sge_off += plen;
+ data_len -= plen;
+ fp_off = 0;
+
+ if (++seg >= (int)MAX_ARRAY) {
+ siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
+ siw_unmap_pages(iov, kmap_mask, seg-1);
+ wqe->processed -= c_tx->bytes_unsent;
+ rv = -EMSGSIZE;
+ goto done_crc;
+ }
+ }
+sge_done:
+ /* Update SGE variables at end of SGE */
+ if (sge_off == sge->length &&
+ (data_len != 0 || wqe->processed < wqe->bytes)) {
+ sge_idx++;
+ sge++;
+ sge_off = 0;
+ }
+ }
+ /* trailer */
+ if (likely(c_tx->state != SIW_SEND_TRAILER)) {
+ iov[seg].iov_base = &c_tx->trailer.pad[4 - c_tx->pad];
+ iov[seg].iov_len = trl_len = MAX_TRAILER - (4 - c_tx->pad);
+ } else {
+ iov[seg].iov_base = &c_tx->trailer.pad[c_tx->ctrl_sent];
+ iov[seg].iov_len = trl_len = MAX_TRAILER - c_tx->ctrl_sent;
+ }
+
+ if (c_tx->pad) {
+ *(u32 *)c_tx->trailer.pad = 0;
+ if (do_crc)
+ siw_crc_update(&c_tx->mpa_crc,
+ (u8 *)&c_tx->trailer.crc - c_tx->pad,
+ c_tx->pad);
+ }
+ if (!c_tx->mpa_crc_enabled)
+ c_tx->trailer.crc = 0;
+ else if (do_crc)
+ siw_crc_final(&c_tx->mpa_crc, (u8 *)&c_tx->trailer.crc);
+
+ data_len = c_tx->bytes_unsent;
+
+ if (c_tx->use_sendpage) {
+ rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx],
+ c_tx->sge_off, data_len);
+ if (rv == data_len) {
+
+ rv = siw_sendmsg(s, MSG_DONTWAIT | MSG_EOR, &iov[seg],
+ 1, trl_len);
+ if (rv > 0)
+ rv += data_len;
+ else
+ rv = data_len;
+ }
+ } else {
+ rv = siw_sendmsg(s, MSG_DONTWAIT | MSG_EOR, iov, seg + 1,
+ hdr_len + data_len + trl_len);
+ siw_unmap_pages(iov, kmap_mask, seg);
+ }
+ if (rv < (int)hdr_len) {
+ /* Not even complete hdr pushed or negative rv */
+ wqe->processed -= data_len;
+ if (rv >= 0) {
+ c_tx->ctrl_sent += rv;
+ rv = -EAGAIN;
+ }
+ goto done_crc;
+ }
+ rv -= hdr_len;
+
+ if (rv >= (int)data_len) {
+ /* all user data pushed to TCP or no data to push */
+ if (data_len > 0 && wqe->processed < wqe->bytes) {
+ /* Save the current state for next tx */
+ c_tx->sge_idx = sge_idx;
+ c_tx->sge_off = sge_off;
+ c_tx->pbl_idx = pbl_idx;
+ }
+ rv -= data_len;
+
+ if (rv == trl_len) /* all pushed */
+ rv = 0;
+ else {
+ c_tx->state = SIW_SEND_TRAILER;
+ c_tx->ctrl_len = MAX_TRAILER;
+ c_tx->ctrl_sent = rv + 4 - c_tx->pad;
+ c_tx->bytes_unsent = 0;
+ rv = -EAGAIN;
+ }
+
+ } else if (data_len > 0) {
+ /* Maybe some user data pushed to TCP */
+ c_tx->state = SIW_SEND_DATA;
+ wqe->processed -= data_len - rv;
+
+ if (rv) {
+ /*
+ * Some bytes out. Recompute tx state based
+ * on old state and bytes pushed
+ */
+ unsigned int sge_unsent;
+
+ c_tx->bytes_unsent -= rv;
+ sge = &wqe->sqe.sge[c_tx->sge_idx];
+ sge_unsent = sge->length - c_tx->sge_off;
+
+ while (sge_unsent <= rv) {
+ rv -= sge_unsent;
+ c_tx->sge_idx++;
+ c_tx->sge_off = 0;
+ sge++;
+ sge_unsent = sge->length;
+ }
+ c_tx->sge_off += rv;
+ }
+ rv = -EAGAIN;
+ }
+done_crc:
+ c_tx->do_crc = 0;
+done:
+ return rv;
+}
+
+static void siw_update_tcpseg(struct siw_iwarp_tx *c_tx,
+ struct socket *s)
+{
+ struct tcp_sock *tp = tcp_sk(s->sk);
+
+ if (tp->gso_segs) {
+ if (c_tx->gso_seg_limit == 0)
+ c_tx->tcp_seglen = tp->mss_cache * tp->gso_segs;
+ else
+ c_tx->tcp_seglen =
+ tp->mss_cache *
+ min_t(u16, c_tx->gso_seg_limit, tp->gso_segs);
+ } else {
+ c_tx->tcp_seglen = tp->mss_cache;
+ }
+ /* Loopback may give odd numbers */
+ c_tx->tcp_seglen &= 0xfffffff8;
+}
+
+/*
+ * siw_prepare_fpdu()
+ *
+ * Prepares transmit context to send out one FPDU if FPDU will contain
+ * user data and user data are not immediate data.
+ * Computes maximum FPDU length to fill up TCP MSS if possible.
+ *
+ * @qp: QP from which to transmit
+ * @wqe: Current WQE causing transmission
+ *
+ * TODO: Take into account real available sendspace on socket
+ * to avoid header misalignment due to send pausing within
+ * fpdu transmission
+ */
+static void siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe)
+{
+ struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
+ int data_len;
+
+ c_tx->ctrl_len =
+ iwarp_pktinfo[__rdmap_get_opcode(&c_tx->pkt.ctrl)].hdr_len;
+ c_tx->ctrl_sent = 0;
+
+ /*
+ * Update target buffer offset if any
+ */
+ if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
+ /* Untagged message */
+ c_tx->pkt.c_untagged.ddp_mo = cpu_to_be32(wqe->processed);
+ else /* Tagged message */
+ c_tx->pkt.c_tagged.ddp_to =
+ cpu_to_be64(wqe->sqe.raddr + wqe->processed);
+
+ data_len = wqe->bytes - wqe->processed;
+ if (data_len + c_tx->ctrl_len + MPA_CRC_SIZE > c_tx->tcp_seglen) {
+ /* Trim DDP payload to fit into current TCP segment */
+ data_len = c_tx->tcp_seglen - (c_tx->ctrl_len + MPA_CRC_SIZE);
+ c_tx->pkt.ctrl.ddp_rdmap_ctrl &= ~DDP_FLAG_LAST;
+ c_tx->pad = 0;
+ } else {
+ c_tx->pkt.ctrl.ddp_rdmap_ctrl |= DDP_FLAG_LAST;
+ c_tx->pad = -data_len & 0x3;
+ }
+ c_tx->bytes_unsent = data_len;
+
+ c_tx->pkt.ctrl.mpa_len =
+ htons(c_tx->ctrl_len + data_len - MPA_HDR_SIZE);
+
+ /*
+ * Init MPA CRC computation
+ */
+ if (c_tx->mpa_crc_enabled) {
+ siw_crc_init(&c_tx->mpa_crc);
+ siw_crc_update(&c_tx->mpa_crc, &c_tx->pkt, c_tx->ctrl_len);
+ c_tx->do_crc = 1;
+ }
+}
+
+/*
+ * siw_check_sgl_tx()
+ *
+ * Check permissions for a list of SGE's (SGL).
+ * A successful check will have all memory referenced
+ * for transmission resolved and assigned to the WQE.
+ *
+ * @pd: Protection Domain SGL should belong to
+ * @wqe: WQE to be checked
+ * @perms: requested access permissions
+ *
+ */
+
+static int siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe,
+ enum ib_access_flags perms)
+{
+ struct siw_sge *sge = &wqe->sqe.sge[0];
+ int i, len, num_sge = wqe->sqe.num_sge;
+
+ if (unlikely(num_sge > SIW_MAX_SGE))
+ return -EINVAL;
+
+ for (i = 0, len = 0; num_sge; num_sge--, i++, sge++) {
+ /*
+ * rdma verbs: do not check stag for a zero length sge
+ */
+ if (sge->length) {
+ int rv = siw_check_sge(pd, sge, &wqe->mem[i], perms, 0,
+ sge->length);
+
+ if (unlikely(rv != E_ACCESS_OK))
+ return rv;
+ }
+ len += sge->length;
+ }
+ return len;
+}
+
+/*
+ * siw_qp_sq_proc_tx()
+ *
+ * Process one WQE which needs transmission on the wire.
+ */
+static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
+{
+ struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
+ struct socket *s = qp->attrs.sk;
+ int rv = 0, burst_len = qp->tx_ctx.burst;
+ enum rdmap_ecode ecode = RDMAP_ECODE_CATASTROPHIC_STREAM;
+
+ if (unlikely(wqe->wr_status == SIW_WR_IDLE))
+ return 0;
+
+ if (!burst_len)
+ burst_len = SQ_USER_MAXBURST;
+
+ if (wqe->wr_status == SIW_WR_QUEUED) {
+ if (!(wqe->sqe.flags & SIW_WQE_INLINE)) {
+ if (tx_type(wqe) == SIW_OP_READ_RESPONSE)
+ wqe->sqe.num_sge = 1;
+
+ if (tx_type(wqe) != SIW_OP_READ &&
+ tx_type(wqe) != SIW_OP_READ_LOCAL_INV) {
+ /*
+ * Reference memory to be tx'd w/o checking
+ * access for LOCAL_READ permission, since
+ * not defined in RDMA core.
+ */
+ rv = siw_check_sgl_tx(qp->pd, wqe, 0);
+ if (rv < 0) {
+ if (tx_type(wqe) ==
+ SIW_OP_READ_RESPONSE)
+ ecode = siw_rdmap_error(-rv);
+ rv = -EINVAL;
+ goto tx_error;
+ }
+ wqe->bytes = rv;
+ } else {
+ wqe->bytes = 0;
+ }
+ } else {
+ wqe->bytes = wqe->sqe.sge[0].length;
+ if (!rdma_is_kernel_res(&qp->base_qp.res)) {
+ if (wqe->bytes > SIW_MAX_INLINE) {
+ rv = -EINVAL;
+ goto tx_error;
+ }
+ wqe->sqe.sge[0].laddr =
+ (u64)(uintptr_t)&wqe->sqe.sge[1];
+ }
+ }
+ wqe->wr_status = SIW_WR_INPROGRESS;
+ wqe->processed = 0;
+
+ siw_update_tcpseg(c_tx, s);
+
+ rv = siw_qp_prepare_tx(c_tx);
+ if (rv == PKT_FRAGMENTED) {
+ c_tx->state = SIW_SEND_HDR;
+ siw_prepare_fpdu(qp, wqe);
+ } else if (rv == PKT_COMPLETE) {
+ c_tx->state = SIW_SEND_SHORT_FPDU;
+ } else {
+ goto tx_error;
+ }
+ }
+
+next_segment:
+ siw_dbg_qp(qp, "wr type %d, state %d, data %u, sent %u, id %llx\n",
+ tx_type(wqe), wqe->wr_status, wqe->bytes, wqe->processed,
+ wqe->sqe.id);
+
+ if (--burst_len == 0) {
+ rv = -EINPROGRESS;
+ goto tx_done;
+ }
+ if (c_tx->state == SIW_SEND_SHORT_FPDU) {
+ enum siw_opcode tx_type = tx_type(wqe);
+ unsigned int msg_flags;
+
+ if (siw_sq_empty(qp) || !siw_tcp_nagle || burst_len == 1)
+ /*
+ * End current TCP segment, if SQ runs empty,
+ * or siw_tcp_nagle is not set, or we bail out
+ * soon due to no burst credit left.
+ */
+ msg_flags = MSG_DONTWAIT;
+ else
+ msg_flags = MSG_DONTWAIT | MSG_MORE;
+
+ rv = siw_tx_ctrl(c_tx, s, msg_flags);
+
+ if (!rv && tx_type != SIW_OP_READ &&
+ tx_type != SIW_OP_READ_LOCAL_INV)
+ wqe->processed = wqe->bytes;
+
+ goto tx_done;
+
+ } else {
+ rv = siw_tx_hdt(c_tx, s);
+ }
+ if (!rv) {
+ /*
+ * One segment sent. Processing completed if last
+ * segment, Do next segment otherwise.
+ */
+ if (unlikely(c_tx->tx_suspend)) {
+ /*
+ * Verbs, 6.4.: Try stopping sending after a full
+ * DDP segment if the connection goes down
+ * (== peer halfclose)
+ */
+ rv = -ECONNABORTED;
+ goto tx_done;
+ }
+ if (c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST) {
+ siw_dbg_qp(qp, "WQE completed\n");
+ goto tx_done;
+ }
+ c_tx->state = SIW_SEND_HDR;
+
+ siw_update_tcpseg(c_tx, s);
+
+ siw_prepare_fpdu(qp, wqe);
+ goto next_segment;
+ }
+tx_done:
+ qp->tx_ctx.burst = burst_len;
+ return rv;
+
+tx_error:
+ if (ecode != RDMAP_ECODE_CATASTROPHIC_STREAM)
+ siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_REMOTE_PROTECTION, ecode, 1);
+ else
+ siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_CATASTROPHIC,
+ RDMAP_ECODE_UNSPECIFIED, 1);
+ return rv;
+}
+
+static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
+{
+ struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
+ struct siw_device *sdev = to_siw_dev(pd->device);
+ struct siw_mem *mem;
+ int rv = 0;
+
+ siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
+
+ if (unlikely(!base_mr)) {
+ pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
+ return -EINVAL;
+ }
+
+ if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
+ pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
+ return -EINVAL;
+ }
+
+ mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
+ if (unlikely(!mem)) {
+ pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
+ return -EINVAL;
+ }
+
+ if (unlikely(mem->pd != pd)) {
+ pr_warn("siw: fastreg: PD mismatch\n");
+ rv = -EINVAL;
+ goto out;
+ }
+ if (unlikely(mem->stag_valid)) {
+ pr_warn("siw: fastreg: STag 0x%08x already valid\n", sqe->rkey);
+ rv = -EINVAL;
+ goto out;
+ }
+ /* Refresh STag since user may have changed key part */
+ mem->stag = sqe->rkey;
+ mem->perms = sqe->access;
+
+ siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
+ mem->va = base_mr->iova;
+ mem->stag_valid = 1;
+out:
+ siw_mem_put(mem);
+ return rv;
+}
+
+static int siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe)
+{
+ int rv;
+
+ switch (tx_type(wqe)) {
+ case SIW_OP_REG_MR:
+ rv = siw_fastreg_mr(qp->pd, &wqe->sqe);
+ break;
+
+ case SIW_OP_INVAL_STAG:
+ rv = siw_invalidate_stag(qp->pd, wqe->sqe.rkey);
+ break;
+
+ default:
+ rv = -EINVAL;
+ }
+ return rv;
+}
+
+/*
+ * siw_qp_sq_process()
+ *
+ * Core TX path routine for RDMAP/DDP/MPA using a TCP kernel socket.
+ * Sends RDMAP payload for the current SQ WR @wqe of @qp in one or more
+ * MPA FPDUs, each containing a DDP segment.
+ *
+ * SQ processing may occur in user context as a result of posting
+ * new WQE's or from siw_tx_thread context. Processing in
+ * user context is limited to non-kernel verbs users.
+ *
+ * SQ processing may get paused anytime, possibly in the middle of a WR
+ * or FPDU, if insufficient send space is available. SQ processing
+ * gets resumed from siw_tx_thread, if send space becomes available again.
+ *
+ * Must be called with the QP state read-locked.
+ *
+ * Note:
+ * An outbound RREQ can be satisfied by the corresponding RRESP
+ * _before_ it gets assigned to the ORQ. This happens regularly
+ * in RDMA READ via loopback case. Since both outbound RREQ and
+ * inbound RRESP can be handled by the same CPU, locking the ORQ
+ * is dead-lock prone and thus not an option. With that, the
+ * RREQ gets assigned to the ORQ _before_ being sent - see
+ * siw_activate_tx() - and pulled back in case of send failure.
+ */
+int siw_qp_sq_process(struct siw_qp *qp)
+{
+ struct siw_wqe *wqe = tx_wqe(qp);
+ enum siw_opcode tx_type;
+ unsigned long flags;
+ int rv = 0;
+
+ siw_dbg_qp(qp, "enter for type %d\n", tx_type(wqe));
+
+next_wqe:
+ /*
+ * Stop QP processing if SQ state changed
+ */
+ if (unlikely(qp->tx_ctx.tx_suspend)) {
+ siw_dbg_qp(qp, "tx suspended\n");
+ goto done;
+ }
+ tx_type = tx_type(wqe);
+
+ if (tx_type <= SIW_OP_READ_RESPONSE)
+ rv = siw_qp_sq_proc_tx(qp, wqe);
+ else
+ rv = siw_qp_sq_proc_local(qp, wqe);
+
+ if (!rv) {
+ /*
+ * WQE processing done
+ */
+ switch (tx_type) {
+ case SIW_OP_SEND:
+ case SIW_OP_SEND_REMOTE_INV:
+ case SIW_OP_WRITE:
+ siw_wqe_put_mem(wqe, tx_type);
+ fallthrough;
+
+ case SIW_OP_INVAL_STAG:
+ case SIW_OP_REG_MR:
+ if (tx_flags(wqe) & SIW_WQE_SIGNALLED)
+ siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
+ SIW_WC_SUCCESS);
+ break;
+
+ case SIW_OP_READ:
+ case SIW_OP_READ_LOCAL_INV:
+ /*
+ * already enqueued to ORQ queue
+ */
+ break;
+
+ case SIW_OP_READ_RESPONSE:
+ siw_wqe_put_mem(wqe, tx_type);
+ break;
+
+ default:
+ WARN(1, "undefined WQE type %d\n", tx_type);
+ rv = -EINVAL;
+ goto done;
+ }
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+ wqe->wr_status = SIW_WR_IDLE;
+ rv = siw_activate_tx(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ if (rv <= 0)
+ goto done;
+
+ goto next_wqe;
+
+ } else if (rv == -EAGAIN) {
+ siw_dbg_qp(qp, "sq paused: hd/tr %d of %d, data %d\n",
+ qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len,
+ qp->tx_ctx.bytes_unsent);
+ rv = 0;
+ goto done;
+ } else if (rv == -EINPROGRESS) {
+ rv = siw_sq_start(qp);
+ goto done;
+ } else {
+ /*
+ * WQE processing failed.
+ * Verbs 8.3.2:
+ * o It turns any WQE into a signalled WQE.
+ * o Local catastrophic error must be surfaced
+ * o QP must be moved into Terminate state: done by code
+ * doing socket state change processing
+ *
+ * o TODO: Termination message must be sent.
+ * o TODO: Implement more precise work completion errors,
+ * see enum ib_wc_status in ib_verbs.h
+ */
+ siw_dbg_qp(qp, "wqe type %d processing failed: %d\n",
+ tx_type(wqe), rv);
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+ /*
+ * RREQ may have already been completed by inbound RRESP!
+ */
+ if ((tx_type == SIW_OP_READ ||
+ tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
+ /* Cleanup pending entry in ORQ */
+ qp->orq_put--;
+ qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
+ }
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ /*
+ * immediately suspends further TX processing
+ */
+ if (!qp->tx_ctx.tx_suspend)
+ siw_qp_cm_drop(qp, 0);
+
+ switch (tx_type) {
+ case SIW_OP_SEND:
+ case SIW_OP_SEND_REMOTE_INV:
+ case SIW_OP_SEND_WITH_IMM:
+ case SIW_OP_WRITE:
+ case SIW_OP_READ:
+ case SIW_OP_READ_LOCAL_INV:
+ siw_wqe_put_mem(wqe, tx_type);
+ fallthrough;
+
+ case SIW_OP_INVAL_STAG:
+ case SIW_OP_REG_MR:
+ siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
+ SIW_WC_LOC_QP_OP_ERR);
+
+ siw_qp_event(qp, IB_EVENT_QP_FATAL);
+
+ break;
+
+ case SIW_OP_READ_RESPONSE:
+ siw_dbg_qp(qp, "proc. read.response failed: %d\n", rv);
+
+ siw_qp_event(qp, IB_EVENT_QP_REQ_ERR);
+
+ siw_wqe_put_mem(wqe, SIW_OP_READ_RESPONSE);
+
+ break;
+
+ default:
+ WARN(1, "undefined WQE type %d\n", tx_type);
+ rv = -EINVAL;
+ }
+ wqe->wr_status = SIW_WR_IDLE;
+ }
+done:
+ return rv;
+}
+
+static void siw_sq_resume(struct siw_qp *qp)
+{
+ if (down_read_trylock(&qp->state_lock)) {
+ if (likely(qp->attrs.state == SIW_QP_STATE_RTS &&
+ !qp->tx_ctx.tx_suspend)) {
+ int rv = siw_qp_sq_process(qp);
+
+ up_read(&qp->state_lock);
+
+ if (unlikely(rv < 0)) {
+ siw_dbg_qp(qp, "SQ task failed: err %d\n", rv);
+
+ if (!qp->tx_ctx.tx_suspend)
+ siw_qp_cm_drop(qp, 0);
+ }
+ } else {
+ up_read(&qp->state_lock);
+ }
+ } else {
+ siw_dbg_qp(qp, "Resume SQ while QP locked\n");
+ }
+ siw_qp_put(qp);
+}
+
+struct tx_task_t {
+ struct llist_head active;
+ wait_queue_head_t waiting;
+};
+
+static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
+
+int siw_create_tx_threads(void)
+{
+ int cpu, assigned = 0;
+
+ for_each_online_cpu(cpu) {
+ struct tx_task_t *tx_task;
+
+ /* Skip HT cores */
+ if (cpu % cpumask_weight(topology_sibling_cpumask(cpu)))
+ continue;
+
+ tx_task = &per_cpu(siw_tx_task_g, cpu);
+ init_llist_head(&tx_task->active);
+ init_waitqueue_head(&tx_task->waiting);
+
+ siw_tx_thread[cpu] =
+ kthread_run_on_cpu(siw_run_sq,
+ (unsigned long *)(long)cpu,
+ cpu, "siw_tx/%u");
+ if (IS_ERR(siw_tx_thread[cpu])) {
+ siw_tx_thread[cpu] = NULL;
+ continue;
+ }
+ assigned++;
+ }
+ return assigned;
+}
+
+void siw_stop_tx_threads(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (siw_tx_thread[cpu]) {
+ kthread_stop(siw_tx_thread[cpu]);
+ wake_up(&per_cpu(siw_tx_task_g, cpu).waiting);
+ siw_tx_thread[cpu] = NULL;
+ }
+ }
+}
+
+int siw_run_sq(void *data)
+{
+ const int nr_cpu = (unsigned int)(long)data;
+ struct llist_node *active;
+ struct siw_qp *qp;
+ struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
+
+ while (1) {
+ struct llist_node *fifo_list = NULL;
+
+ wait_event_interruptible(tx_task->waiting,
+ !llist_empty(&tx_task->active) ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ active = llist_del_all(&tx_task->active);
+ /*
+ * llist_del_all returns a list with newest entry first.
+ * Re-order list for fairness among QP's.
+ */
+ fifo_list = llist_reverse_order(active);
+ while (fifo_list) {
+ qp = container_of(fifo_list, struct siw_qp, tx_list);
+ fifo_list = llist_next(fifo_list);
+ qp->tx_list.next = NULL;
+
+ siw_sq_resume(qp);
+ }
+ }
+ active = llist_del_all(&tx_task->active);
+ if (active) {
+ llist_for_each_entry(qp, active, tx_list) {
+ qp->tx_list.next = NULL;
+ siw_sq_resume(qp);
+ }
+ }
+ return 0;
+}
+
+int siw_sq_start(struct siw_qp *qp)
+{
+ if (tx_wqe(qp)->wr_status == SIW_WR_IDLE)
+ return 0;
+
+ if (unlikely(!cpu_online(qp->tx_cpu))) {
+ siw_put_tx_cpu(qp->tx_cpu);
+ qp->tx_cpu = siw_get_tx_cpu(qp->sdev);
+ if (qp->tx_cpu < 0) {
+ pr_warn("siw: no tx cpu available\n");
+
+ return -EIO;
+ }
+ }
+ siw_qp_get(qp);
+
+ llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
+
+ wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
new file mode 100644
index 000000000000..efa2f097b582
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -0,0 +1,1891 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/xarray.h>
+#include <net/addrconf.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "siw.h"
+#include "siw_verbs.h"
+#include "siw_mem.h"
+
+static int siw_qp_state_to_ib_qp_state[SIW_QP_STATE_COUNT] = {
+ [SIW_QP_STATE_IDLE] = IB_QPS_INIT,
+ [SIW_QP_STATE_RTR] = IB_QPS_RTR,
+ [SIW_QP_STATE_RTS] = IB_QPS_RTS,
+ [SIW_QP_STATE_CLOSING] = IB_QPS_SQD,
+ [SIW_QP_STATE_TERMINATE] = IB_QPS_SQE,
+ [SIW_QP_STATE_ERROR] = IB_QPS_ERR
+};
+
+static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = SIW_QP_STATE_IDLE,
+ [IB_QPS_INIT] = SIW_QP_STATE_IDLE,
+ [IB_QPS_RTR] = SIW_QP_STATE_RTR,
+ [IB_QPS_RTS] = SIW_QP_STATE_RTS,
+ [IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
+ [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
+ [IB_QPS_ERR] = SIW_QP_STATE_ERROR
+};
+
+static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
+ [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
+ [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE",
+ [IB_QPS_ERR] = "ERR"
+};
+
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
+
+ kfree(entry);
+}
+
+int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
+{
+ struct siw_ucontext *uctx = to_siw_ctx(ctx);
+ size_t size = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct siw_user_mmap_entry *entry;
+ int rv = -EINVAL;
+
+ /*
+ * Must be page aligned
+ */
+ if (vma->vm_start & (PAGE_SIZE - 1)) {
+ pr_warn("siw: mmap not page aligned\n");
+ return -EINVAL;
+ }
+ rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
+ if (!rdma_entry) {
+ siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
+ vma->vm_pgoff, size);
+ return -EINVAL;
+ }
+ entry = to_siw_mmap_entry(rdma_entry);
+
+ rv = remap_vmalloc_range(vma, entry->address, 0);
+ if (rv)
+ pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
+ size);
+ rdma_user_mmap_entry_put(rdma_entry);
+
+ return rv;
+}
+
+int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
+{
+ struct siw_device *sdev = to_siw_dev(base_ctx->device);
+ struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
+ struct siw_uresp_alloc_ctx uresp = {};
+ int rv;
+
+ if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ ctx->sdev = sdev;
+
+ uresp.dev_id = sdev->vendor_part_id;
+
+ if (udata->outlen < sizeof(uresp)) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rv)
+ goto err_out;
+
+ siw_dbg(base_ctx->device, "success. now %d context(s)\n",
+ atomic_read(&sdev->num_ctx));
+
+ return 0;
+
+err_out:
+ atomic_dec(&sdev->num_ctx);
+ siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
+ atomic_read(&sdev->num_ctx));
+
+ return rv;
+}
+
+void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
+{
+ struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
+
+ atomic_dec(&uctx->sdev->num_ctx);
+}
+
+int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
+ struct ib_udata *udata)
+{
+ struct siw_device *sdev = to_siw_dev(base_dev);
+
+ if (udata->inlen || udata->outlen)
+ return -EINVAL;
+
+ memset(attr, 0, sizeof(*attr));
+
+ /* Revisit atomic caps if RFC 7306 gets supported */
+ attr->atomic_cap = 0;
+ attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG;
+ attr->max_cq = sdev->attrs.max_cq;
+ attr->max_cqe = sdev->attrs.max_cqe;
+ attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
+ attr->max_mr = sdev->attrs.max_mr;
+ attr->max_mw = sdev->attrs.max_mw;
+ attr->max_mr_size = ~0ull;
+ attr->max_pd = sdev->attrs.max_pd;
+ attr->max_qp = sdev->attrs.max_qp;
+ attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
+ attr->max_qp_rd_atom = sdev->attrs.max_ord;
+ attr->max_qp_wr = sdev->attrs.max_qp_wr;
+ attr->max_recv_sge = sdev->attrs.max_sge;
+ attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
+ attr->max_send_sge = sdev->attrs.max_sge;
+ attr->max_sge_rd = sdev->attrs.max_sge_rd;
+ attr->max_srq = sdev->attrs.max_srq;
+ attr->max_srq_sge = sdev->attrs.max_srq_sge;
+ attr->max_srq_wr = sdev->attrs.max_srq_wr;
+ attr->page_size_cap = PAGE_SIZE;
+ attr->vendor_id = SIW_VENDOR_ID;
+ attr->vendor_part_id = sdev->vendor_part_id;
+
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ sdev->raw_gid);
+
+ return 0;
+}
+
+int siw_query_port(struct ib_device *base_dev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct net_device *ndev;
+ int rv;
+
+ memset(attr, 0, sizeof(*attr));
+
+ rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
+ &attr->active_width);
+ if (rv)
+ return rv;
+
+ ndev = ib_device_get_netdev(base_dev, SIW_PORT);
+ if (!ndev)
+ return -ENODEV;
+
+ attr->gid_tbl_len = 1;
+ attr->max_msg_sz = -1;
+ attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
+ attr->state = ib_get_curr_port_state(ndev);
+ attr->phys_state = attr->state == IB_PORT_ACTIVE ?
+ IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
+ attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
+ /*
+ * All zero
+ *
+ * attr->lid = 0;
+ * attr->bad_pkey_cntr = 0;
+ * attr->qkey_viol_cntr = 0;
+ * attr->sm_lid = 0;
+ * attr->lmc = 0;
+ * attr->max_vl_num = 0;
+ * attr->sm_sl = 0;
+ * attr->subnet_timeout = 0;
+ * attr->init_type_repy = 0;
+ */
+ dev_put(ndev);
+ return rv;
+}
+
+int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
+ struct ib_port_immutable *port_immutable)
+{
+ struct ib_port_attr attr;
+ int rv = siw_query_port(base_dev, port, &attr);
+
+ if (rv)
+ return rv;
+
+ port_immutable->gid_tbl_len = attr.gid_tbl_len;
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+ return 0;
+}
+
+int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
+ union ib_gid *gid)
+{
+ struct siw_device *sdev = to_siw_dev(base_dev);
+
+ /* subnet_prefix == interface_id == 0; */
+ memset(gid, 0, sizeof(*gid));
+ memcpy(gid->raw, sdev->raw_gid, ETH_ALEN);
+
+ return 0;
+}
+
+int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+ struct siw_device *sdev = to_siw_dev(pd->device);
+
+ if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
+ atomic_dec(&sdev->num_pd);
+ return -ENOMEM;
+ }
+ siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
+
+ return 0;
+}
+
+int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+ struct siw_device *sdev = to_siw_dev(pd->device);
+
+ siw_dbg_pd(pd, "free PD\n");
+ atomic_dec(&sdev->num_pd);
+ return 0;
+}
+
+void siw_qp_get_ref(struct ib_qp *base_qp)
+{
+ siw_qp_get(to_siw_qp(base_qp));
+}
+
+void siw_qp_put_ref(struct ib_qp *base_qp)
+{
+ siw_qp_put(to_siw_qp(base_qp));
+}
+
+static struct rdma_user_mmap_entry *
+siw_mmap_entry_insert(struct siw_ucontext *uctx,
+ void *address, size_t length,
+ u64 *offset)
+{
+ struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int rv;
+
+ *offset = SIW_INVAL_UOBJ_KEY;
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+
+ rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
+ &entry->rdma_entry,
+ length);
+ if (rv) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+/*
+ * siw_create_qp()
+ *
+ * Create QP of requested size on given device.
+ *
+ * @qp: Queue pait
+ * @attrs: Initial QP attributes.
+ * @udata: used to provide QP ID, SQ and RQ size back to user.
+ */
+
+int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct ib_pd *pd = ibqp->pd;
+ struct siw_qp *qp = to_siw_qp(ibqp);
+ struct ib_device *base_dev = pd->device;
+ struct siw_device *sdev = to_siw_dev(base_dev);
+ struct siw_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+ unsigned long flags;
+ int num_sqe, num_rqe, rv = 0;
+ size_t length;
+
+ siw_dbg(base_dev, "create new QP\n");
+
+ if (attrs->create_flags)
+ return -EOPNOTSUPP;
+
+ if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
+ siw_dbg(base_dev, "too many QP's\n");
+ rv = -ENOMEM;
+ goto err_atomic;
+ }
+ if (attrs->qp_type != IB_QPT_RC) {
+ siw_dbg(base_dev, "only RC QP's supported\n");
+ rv = -EOPNOTSUPP;
+ goto err_atomic;
+ }
+ if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
+ (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
+ (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
+ (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
+ siw_dbg(base_dev, "QP size error\n");
+ rv = -EINVAL;
+ goto err_atomic;
+ }
+ if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
+ siw_dbg(base_dev, "max inline send: %d > %d\n",
+ attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
+ rv = -EINVAL;
+ goto err_atomic;
+ }
+ /*
+ * NOTE: we don't allow for a QP unable to hold any SQ WQE
+ */
+ if (attrs->cap.max_send_wr == 0) {
+ siw_dbg(base_dev, "QP must have send queue\n");
+ rv = -EINVAL;
+ goto err_atomic;
+ }
+
+ if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
+ siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
+ rv = -EINVAL;
+ goto err_atomic;
+ }
+
+ init_rwsem(&qp->state_lock);
+ spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
+ spin_lock_init(&qp->orq_lock);
+
+ rv = siw_qp_add(sdev, qp);
+ if (rv)
+ goto err_atomic;
+
+
+ /* All queue indices are derived from modulo operations
+ * on a free running 'get' (consumer) and 'put' (producer)
+ * unsigned counter. Having queue sizes at power of two
+ * avoids handling counter wrap around.
+ */
+ num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
+ num_rqe = attrs->cap.max_recv_wr;
+ if (num_rqe)
+ num_rqe = roundup_pow_of_two(num_rqe);
+
+ if (udata)
+ qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
+ else
+ qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe));
+
+ if (qp->sendq == NULL) {
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+ if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
+ if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
+ else {
+ rv = -EINVAL;
+ goto err_out_xa;
+ }
+ }
+ qp->pd = pd;
+ qp->scq = to_siw_cq(attrs->send_cq);
+ qp->rcq = to_siw_cq(attrs->recv_cq);
+
+ if (attrs->srq) {
+ /*
+ * SRQ support.
+ * Verbs 6.3.7: ignore RQ size, if SRQ present
+ * Verbs 6.3.5: do not check PD of SRQ against PD of QP
+ */
+ qp->srq = to_siw_srq(attrs->srq);
+ qp->attrs.rq_size = 0;
+ siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
+ qp->base_qp.qp_num);
+ } else if (num_rqe) {
+ if (udata)
+ qp->recvq =
+ vmalloc_user(num_rqe * sizeof(struct siw_rqe));
+ else
+ qp->recvq = vcalloc(num_rqe, sizeof(struct siw_rqe));
+
+ if (qp->recvq == NULL) {
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+ qp->attrs.rq_size = num_rqe;
+ }
+ qp->attrs.sq_size = num_sqe;
+ qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
+ qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
+
+ /* Make those two tunables fixed for now. */
+ qp->tx_ctx.gso_seg_limit = 1;
+ qp->tx_ctx.zcopy_tx = zcopy_tx;
+
+ qp->attrs.state = SIW_QP_STATE_IDLE;
+
+ if (udata) {
+ struct siw_uresp_create_qp uresp = {};
+
+ uresp.num_sqe = num_sqe;
+ uresp.num_rqe = num_rqe;
+ uresp.qp_id = qp_id(qp);
+
+ if (qp->sendq) {
+ length = num_sqe * sizeof(struct siw_sqe);
+ qp->sq_entry =
+ siw_mmap_entry_insert(uctx, qp->sendq,
+ length, &uresp.sq_key);
+ if (!qp->sq_entry) {
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+ }
+
+ if (qp->recvq) {
+ length = num_rqe * sizeof(struct siw_rqe);
+ qp->rq_entry =
+ siw_mmap_entry_insert(uctx, qp->recvq,
+ length, &uresp.rq_key);
+ if (!qp->rq_entry) {
+ uresp.sq_key = SIW_INVAL_UOBJ_KEY;
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+ }
+
+ if (udata->outlen < sizeof(uresp)) {
+ rv = -EINVAL;
+ goto err_out_xa;
+ }
+ rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rv)
+ goto err_out_xa;
+ }
+ qp->tx_cpu = siw_get_tx_cpu(sdev);
+ if (qp->tx_cpu < 0) {
+ rv = -EINVAL;
+ goto err_out_xa;
+ }
+ INIT_LIST_HEAD(&qp->devq);
+ spin_lock_irqsave(&sdev->lock, flags);
+ list_add_tail(&qp->devq, &sdev->qp_list);
+ spin_unlock_irqrestore(&sdev->lock, flags);
+
+ init_completion(&qp->qp_free);
+
+ return 0;
+
+err_out_xa:
+ xa_erase(&sdev->qp_xa, qp_id(qp));
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
+ vfree(qp->sendq);
+ vfree(qp->recvq);
+
+err_atomic:
+ atomic_dec(&sdev->num_qp);
+ return rv;
+}
+
+/*
+ * Minimum siw_query_qp() verb interface.
+ *
+ * @qp_attr_mask is not used but all available information is provided
+ */
+int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct siw_qp *qp;
+ struct net_device *ndev;
+
+ if (base_qp && qp_attr && qp_init_attr)
+ qp = to_siw_qp(base_qp);
+ else
+ return -EINVAL;
+
+ ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
+ if (!ndev)
+ return -ENODEV;
+
+ qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
+ qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
+ qp_attr->cap.max_send_wr = qp->attrs.sq_size;
+ qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
+ qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
+ qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
+ qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
+ qp_attr->max_rd_atomic = qp->attrs.irq_size;
+ qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
+
+ qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ qp_init_attr->qp_type = base_qp->qp_type;
+ qp_init_attr->send_cq = base_qp->send_cq;
+ qp_init_attr->recv_cq = base_qp->recv_cq;
+ qp_init_attr->srq = base_qp->srq;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ dev_put(ndev);
+ return 0;
+}
+
+int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct siw_qp_attrs new_attrs;
+ enum siw_qp_attr_mask siw_attr_mask = 0;
+ struct siw_qp *qp = to_siw_qp(base_qp);
+ int rv = 0;
+
+ if (!attr_mask)
+ return 0;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ memset(&new_attrs, 0, sizeof(new_attrs));
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
+
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ new_attrs.flags |= SIW_RDMA_READ_ENABLED;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
+ if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
+ new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
+ }
+ if (attr_mask & IB_QP_STATE) {
+ siw_dbg_qp(qp, "desired IB QP state: %s\n",
+ ib_qp_state_to_string[attr->qp_state]);
+
+ new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
+
+ if (new_attrs.state > SIW_QP_STATE_RTS)
+ qp->tx_ctx.tx_suspend = 1;
+
+ siw_attr_mask |= SIW_QP_ATTR_STATE;
+ }
+ if (!siw_attr_mask)
+ goto out;
+
+ down_write(&qp->state_lock);
+
+ rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
+
+ up_write(&qp->state_lock);
+out:
+ return rv;
+}
+
+int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
+{
+ struct siw_qp *qp = to_siw_qp(base_qp);
+ struct siw_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+ struct siw_qp_attrs qp_attrs;
+
+ siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
+
+ /*
+ * Mark QP as in process of destruction to prevent from
+ * any async callbacks to RDMA core
+ */
+ qp->attrs.flags |= SIW_QP_IN_DESTROY;
+ qp->rx_stream.rx_suspend = 1;
+
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
+
+ down_write(&qp->state_lock);
+
+ qp_attrs.state = SIW_QP_STATE_ERROR;
+ siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
+
+ if (qp->cep) {
+ siw_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+ up_write(&qp->state_lock);
+
+ qp->scq = qp->rcq = NULL;
+
+ siw_qp_put(qp);
+ wait_for_completion(&qp->qp_free);
+
+ return 0;
+}
+
+/*
+ * siw_copy_inline_sgl()
+ *
+ * Prepare sgl of inlined data for sending. For userland callers
+ * function checks if given buffer addresses and len's are within
+ * process context bounds.
+ * Data from all provided sge's are copied together into the wqe,
+ * referenced by a single sge.
+ */
+static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
+ struct siw_sqe *sqe)
+{
+ struct ib_sge *core_sge = core_wr->sg_list;
+ void *kbuf = &sqe->sge[1];
+ int num_sge = core_wr->num_sge, bytes = 0;
+
+ sqe->sge[0].laddr = (uintptr_t)kbuf;
+ sqe->sge[0].lkey = 0;
+
+ while (num_sge--) {
+ if (!core_sge->length) {
+ core_sge++;
+ continue;
+ }
+ bytes += core_sge->length;
+ if (bytes > SIW_MAX_INLINE) {
+ bytes = -EINVAL;
+ break;
+ }
+ memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
+ core_sge->length);
+
+ kbuf += core_sge->length;
+ core_sge++;
+ }
+ sqe->sge[0].length = max(bytes, 0);
+ sqe->num_sge = bytes > 0 ? 1 : 0;
+
+ return bytes;
+}
+
+/* Complete SQ WR's without processing */
+static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ int rv = 0;
+
+ while (wr) {
+ struct siw_sqe sqe = {};
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ sqe.opcode = SIW_OP_WRITE;
+ break;
+ case IB_WR_RDMA_READ:
+ sqe.opcode = SIW_OP_READ;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ sqe.opcode = SIW_OP_READ_LOCAL_INV;
+ break;
+ case IB_WR_SEND:
+ sqe.opcode = SIW_OP_SEND;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ sqe.opcode = SIW_OP_SEND_WITH_IMM;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ sqe.opcode = SIW_OP_SEND_REMOTE_INV;
+ break;
+ case IB_WR_LOCAL_INV:
+ sqe.opcode = SIW_OP_INVAL_STAG;
+ break;
+ case IB_WR_REG_MR:
+ sqe.opcode = SIW_OP_REG_MR;
+ break;
+ default:
+ rv = -EINVAL;
+ break;
+ }
+ if (!rv) {
+ sqe.id = wr->wr_id;
+ rv = siw_sqe_complete(qp, &sqe, 0,
+ SIW_WC_WR_FLUSH_ERR);
+ }
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
+/* Complete RQ WR's without processing */
+static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct siw_rqe rqe = {};
+ int rv = 0;
+
+ while (wr) {
+ rqe.id = wr->wr_id;
+ rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
+/*
+ * siw_post_send()
+ *
+ * Post a list of S-WR's to a SQ.
+ *
+ * @base_qp: Base QP contained in siw QP
+ * @wr: Null terminated list of user WR's
+ * @bad_wr: Points to failing WR in case of synchronous failure.
+ */
+int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct siw_qp *qp = to_siw_qp(base_qp);
+ struct siw_wqe *wqe = tx_wqe(qp);
+
+ unsigned long flags;
+ int rv = 0, imm_err = 0;
+
+ if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
+ siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ /*
+ * Try to acquire QP state lock. Must be non-blocking
+ * to accommodate kernel clients needs.
+ */
+ if (!down_read_trylock(&qp->state_lock)) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_sq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
+ }
+ if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. SQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_sq().
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ up_read(&qp->state_lock);
+ return rv;
+ }
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+ while (wr) {
+ u32 idx = qp->sq_put % qp->attrs.sq_size;
+ struct siw_sqe *sqe = &qp->sendq[idx];
+
+ if (sqe->flags) {
+ siw_dbg_qp(qp, "sq full\n");
+ rv = -ENOMEM;
+ break;
+ }
+ if (wr->num_sge > qp->attrs.sq_max_sges) {
+ siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
+ rv = -EINVAL;
+ break;
+ }
+ sqe->id = wr->wr_id;
+
+ if ((wr->send_flags & IB_SEND_SIGNALED) ||
+ (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
+ sqe->flags |= SIW_WQE_SIGNALLED;
+
+ if (wr->send_flags & IB_SEND_FENCE)
+ sqe->flags |= SIW_WQE_READ_FENCE;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ sqe->flags |= SIW_WQE_SOLICITED;
+
+ if (!(wr->send_flags & IB_SEND_INLINE)) {
+ siw_copy_sgl(wr->sg_list, sqe->sge,
+ wr->num_sge);
+ sqe->num_sge = wr->num_sge;
+ } else {
+ rv = siw_copy_inline_sgl(wr, sqe);
+ if (rv <= 0) {
+ rv = -EINVAL;
+ break;
+ }
+ sqe->flags |= SIW_WQE_INLINE;
+ sqe->num_sge = 1;
+ }
+ if (wr->opcode == IB_WR_SEND)
+ sqe->opcode = SIW_OP_SEND;
+ else {
+ sqe->opcode = SIW_OP_SEND_REMOTE_INV;
+ sqe->rkey = wr->ex.invalidate_rkey;
+ }
+ break;
+
+ case IB_WR_RDMA_READ_WITH_INV:
+ case IB_WR_RDMA_READ:
+ /*
+ * iWarp restricts RREAD sink to SGL containing
+ * 1 SGE only. we could relax to SGL with multiple
+ * elements referring the SAME ltag or even sending
+ * a private per-rreq tag referring to a checked
+ * local sgl with MULTIPLE ltag's.
+ */
+ if (unlikely(wr->num_sge != 1)) {
+ rv = -EINVAL;
+ break;
+ }
+ siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
+ /*
+ * NOTE: zero length RREAD is allowed!
+ */
+ sqe->raddr = rdma_wr(wr)->remote_addr;
+ sqe->rkey = rdma_wr(wr)->rkey;
+ sqe->num_sge = 1;
+
+ if (wr->opcode == IB_WR_RDMA_READ)
+ sqe->opcode = SIW_OP_READ;
+ else
+ sqe->opcode = SIW_OP_READ_LOCAL_INV;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ if (!(wr->send_flags & IB_SEND_INLINE)) {
+ siw_copy_sgl(wr->sg_list, &sqe->sge[0],
+ wr->num_sge);
+ sqe->num_sge = wr->num_sge;
+ } else {
+ rv = siw_copy_inline_sgl(wr, sqe);
+ if (unlikely(rv < 0)) {
+ rv = -EINVAL;
+ break;
+ }
+ sqe->flags |= SIW_WQE_INLINE;
+ sqe->num_sge = 1;
+ }
+ sqe->raddr = rdma_wr(wr)->remote_addr;
+ sqe->rkey = rdma_wr(wr)->rkey;
+ sqe->opcode = SIW_OP_WRITE;
+ break;
+
+ case IB_WR_REG_MR:
+ sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
+ sqe->rkey = reg_wr(wr)->key;
+ sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
+ sqe->opcode = SIW_OP_REG_MR;
+ break;
+
+ case IB_WR_LOCAL_INV:
+ sqe->rkey = wr->ex.invalidate_rkey;
+ sqe->opcode = SIW_OP_INVAL_STAG;
+ break;
+
+ default:
+ siw_dbg_qp(qp, "ib wr type %d unsupported\n",
+ wr->opcode);
+ rv = -EINVAL;
+ break;
+ }
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
+ sqe->opcode, sqe->flags,
+ (void *)(uintptr_t)sqe->id);
+
+ if (unlikely(rv < 0))
+ break;
+
+ /* make SQE only valid after completely written */
+ smp_wmb();
+ sqe->flags |= SIW_WQE_VALID;
+
+ qp->sq_put++;
+ wr = wr->next;
+ }
+
+ /*
+ * Send directly if SQ processing is not in progress.
+ * Eventual immediate errors (rv < 0) do not affect the involved
+ * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
+ * processing, if new work is already pending. But rv and pointer
+ * to failed work request must be passed to caller.
+ */
+ if (unlikely(rv < 0)) {
+ /*
+ * Immediate error
+ */
+ siw_dbg_qp(qp, "Immediate error %d\n", rv);
+ imm_err = rv;
+ *bad_wr = wr;
+ }
+ if (wqe->wr_status != SIW_WR_IDLE) {
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ goto skip_direct_sending;
+ }
+ rv = siw_activate_tx(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ if (rv <= 0)
+ goto skip_direct_sending;
+
+ if (rdma_is_kernel_res(&qp->base_qp.res)) {
+ rv = siw_sq_start(qp);
+ } else {
+ qp->tx_ctx.in_syscall = 1;
+
+ if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
+ siw_qp_cm_drop(qp, 0);
+
+ qp->tx_ctx.in_syscall = 0;
+ }
+skip_direct_sending:
+
+ up_read(&qp->state_lock);
+
+ if (unlikely(imm_err))
+ return imm_err;
+
+ return (rv >= 0) ? 0 : rv;
+}
+
+/*
+ * siw_post_receive()
+ *
+ * Post a list of R-WR's to a RQ.
+ *
+ * @base_qp: Base QP contained in siw QP
+ * @wr: Null terminated list of user WR's
+ * @bad_wr: Points to failing WR in case of synchronous failure.
+ */
+int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct siw_qp *qp = to_siw_qp(base_qp);
+ unsigned long flags;
+ int rv = 0;
+
+ if (qp->srq || qp->attrs.rq_size == 0) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+ if (!rdma_is_kernel_res(&qp->base_qp.res)) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ /*
+ * Try to acquire QP state lock. Must be non-blocking
+ * to accommodate kernel clients needs.
+ */
+ if (!down_read_trylock(&qp->state_lock)) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_rq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
+ }
+ if (qp->attrs.state > SIW_QP_STATE_RTS) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. RQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_rq().
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ up_read(&qp->state_lock);
+ return rv;
+ }
+ /*
+ * Serialize potentially multiple producers.
+ * Not needed for single threaded consumer side.
+ */
+ spin_lock_irqsave(&qp->rq_lock, flags);
+
+ while (wr) {
+ u32 idx = qp->rq_put % qp->attrs.rq_size;
+ struct siw_rqe *rqe = &qp->recvq[idx];
+
+ if (rqe->flags) {
+ siw_dbg_qp(qp, "RQ full\n");
+ rv = -ENOMEM;
+ break;
+ }
+ if (wr->num_sge > qp->attrs.rq_max_sges) {
+ siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
+ rv = -EINVAL;
+ break;
+ }
+ rqe->id = wr->wr_id;
+ rqe->num_sge = wr->num_sge;
+ siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
+
+ /* make sure RQE is completely written before valid */
+ smp_wmb();
+
+ rqe->flags = SIW_WQE_VALID;
+
+ qp->rq_put++;
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&qp->rq_lock, flags);
+
+ up_read(&qp->state_lock);
+
+ if (rv < 0) {
+ siw_dbg_qp(qp, "error %d\n", rv);
+ *bad_wr = wr;
+ }
+ return rv;
+}
+
+int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
+{
+ struct siw_cq *cq = to_siw_cq(base_cq);
+ struct siw_device *sdev = to_siw_dev(base_cq->device);
+ struct siw_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+
+ siw_dbg_cq(cq, "free CQ resources\n");
+
+ siw_cq_flush(cq);
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
+
+ atomic_dec(&sdev->num_cq);
+
+ vfree(cq->queue);
+ return 0;
+}
+
+/*
+ * siw_create_cq()
+ *
+ * Populate CQ of requested size
+ *
+ * @base_cq: CQ as allocated by RDMA midlayer
+ * @attr: Initial CQ attributes
+ * @attrs: uverbs bundle
+ */
+
+int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct siw_device *sdev = to_siw_dev(base_cq->device);
+ struct siw_cq *cq = to_siw_cq(base_cq);
+ int rv, size = attr->cqe;
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
+ siw_dbg(base_cq->device, "too many CQ's\n");
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ if (size < 1 || size > sdev->attrs.max_cqe) {
+ siw_dbg(base_cq->device, "CQ size error: %d\n", size);
+ rv = -EINVAL;
+ goto err_out;
+ }
+ size = roundup_pow_of_two(size);
+ cq->base_cq.cqe = size;
+ cq->num_cqe = size;
+
+ if (udata)
+ cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl));
+ else
+ cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl));
+
+ if (cq->queue == NULL) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ get_random_bytes(&cq->id, 4);
+ siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
+
+ spin_lock_init(&cq->lock);
+
+ cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
+
+ if (udata) {
+ struct siw_uresp_create_cq uresp = {};
+ struct siw_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+ size_t length = size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl);
+
+ cq->cq_entry =
+ siw_mmap_entry_insert(ctx, cq->queue,
+ length, &uresp.cq_key);
+ if (!cq->cq_entry) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+
+ uresp.cq_id = cq->id;
+ uresp.num_cqe = size;
+
+ if (udata->outlen < sizeof(uresp)) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rv)
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
+
+ if (cq->queue) {
+ struct siw_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
+ vfree(cq->queue);
+ }
+ atomic_dec(&sdev->num_cq);
+
+ return rv;
+}
+
+/*
+ * siw_poll_cq()
+ *
+ * Reap CQ entries if available and copy work completion status into
+ * array of WC's provided by caller. Returns number of reaped CQE's.
+ *
+ * @base_cq: Base CQ contained in siw CQ.
+ * @num_cqe: Maximum number of CQE's to reap.
+ * @wc: Array of work completions to be filled by siw.
+ */
+int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
+{
+ struct siw_cq *cq = to_siw_cq(base_cq);
+ int i;
+
+ for (i = 0; i < num_cqe; i++) {
+ if (!siw_reap_cqe(cq, wc))
+ break;
+ wc++;
+ }
+ return i;
+}
+
+/*
+ * siw_req_notify_cq()
+ *
+ * Request notification for new CQE's added to that CQ.
+ * Defined flags:
+ * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification
+ * event if a WQE with notification flag set enters the CQ
+ * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification
+ * event if a WQE enters the CQ.
+ * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the
+ * number of not reaped CQE's regardless of its notification
+ * type and current or new CQ notification settings.
+ *
+ * @base_cq: Base CQ contained in siw CQ.
+ * @flags: Requested notification flags.
+ */
+int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
+{
+ struct siw_cq *cq = to_siw_cq(base_cq);
+
+ siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
+
+ if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
+ /*
+ * Enable CQ event for next solicited completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
+ else
+ /*
+ * Enable CQ event for any signalled completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
+
+ if (flags & IB_CQ_REPORT_MISSED_EVENTS)
+ return cq->cq_put - cq->cq_get;
+
+ return 0;
+}
+
+/*
+ * siw_dereg_mr()
+ *
+ * Release Memory Region.
+ *
+ * @base_mr: Base MR contained in siw MR.
+ * @udata: points to user context, unused.
+ */
+int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
+{
+ struct siw_mr *mr = to_siw_mr(base_mr);
+ struct siw_device *sdev = to_siw_dev(base_mr->device);
+
+ siw_dbg_mem(mr->mem, "deregister MR\n");
+
+ atomic_dec(&sdev->num_mr);
+
+ siw_mr_drop_mem(mr);
+ kfree_rcu(mr, rcu);
+
+ return 0;
+}
+
+/*
+ * siw_reg_user_mr()
+ *
+ * Register Memory Region.
+ *
+ * @pd: Protection Domain
+ * @start: starting address of MR (virtual address)
+ * @len: len of MR
+ * @rnic_va: not used by siw
+ * @rights: MR access rights
+ * @dmah: dma handle
+ * @udata: user buffer to communicate STag and Key.
+ */
+struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ u64 rnic_va, int rights, struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct siw_mr *mr = NULL;
+ struct siw_umem *umem = NULL;
+ struct siw_ureq_reg_mr ureq;
+ struct siw_device *sdev = to_siw_dev(pd->device);
+ int rv;
+
+ siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n",
+ (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
+ (unsigned long long)len);
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
+ siw_dbg_pd(pd, "too many mr's\n");
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ if (!len) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ umem = siw_umem_get(pd->device, start, len, rights);
+ if (IS_ERR(umem)) {
+ rv = PTR_ERR(umem);
+ siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
+ umem = NULL;
+ goto err_out;
+ }
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
+ if (rv)
+ goto err_out;
+
+ if (udata) {
+ struct siw_uresp_reg_mr uresp = {};
+ struct siw_mem *mem = mr->mem;
+
+ if (udata->inlen < sizeof(ureq)) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
+ if (rv)
+ goto err_out;
+
+ mr->base_mr.lkey |= ureq.stag_key;
+ mr->base_mr.rkey |= ureq.stag_key;
+ mem->stag |= ureq.stag_key;
+ uresp.stag = mem->stag;
+
+ if (udata->outlen < sizeof(uresp)) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rv)
+ goto err_out;
+ }
+ mr->mem->stag_valid = 1;
+
+ return &mr->base_mr;
+
+err_out:
+ atomic_dec(&sdev->num_mr);
+ if (mr) {
+ if (mr->mem)
+ siw_mr_drop_mem(mr);
+ kfree_rcu(mr, rcu);
+ } else {
+ if (umem)
+ siw_umem_release(umem);
+ }
+ return ERR_PTR(rv);
+}
+
+struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_sge)
+{
+ struct siw_device *sdev = to_siw_dev(pd->device);
+ struct siw_mr *mr = NULL;
+ struct siw_pbl *pbl = NULL;
+ int rv;
+
+ if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
+ siw_dbg_pd(pd, "too many mr's\n");
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ if (mr_type != IB_MR_TYPE_MEM_REG) {
+ siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
+ rv = -EOPNOTSUPP;
+ goto err_out;
+ }
+ if (max_sge > SIW_MAX_SGE_PBL) {
+ siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ pbl = siw_pbl_alloc(max_sge);
+ if (IS_ERR(pbl)) {
+ rv = PTR_ERR(pbl);
+ siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
+ pbl = NULL;
+ goto err_out;
+ }
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
+ if (rv)
+ goto err_out;
+
+ mr->mem->is_pbl = 1;
+
+ siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
+
+ return &mr->base_mr;
+
+err_out:
+ atomic_dec(&sdev->num_mr);
+
+ if (!mr) {
+ kfree(pbl);
+ } else {
+ if (mr->mem)
+ siw_mr_drop_mem(mr);
+ kfree_rcu(mr, rcu);
+ }
+ siw_dbg_pd(pd, "failed: %d\n", rv);
+
+ return ERR_PTR(rv);
+}
+
+/* Just used to count number of pages being mapped */
+static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
+{
+ return 0;
+}
+
+int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
+ unsigned int *sg_off)
+{
+ struct scatterlist *slp;
+ struct siw_mr *mr = to_siw_mr(base_mr);
+ struct siw_mem *mem = mr->mem;
+ struct siw_pbl *pbl = mem->pbl;
+ struct siw_pble *pble;
+ unsigned long pbl_size;
+ int i, rv;
+
+ if (!pbl) {
+ siw_dbg_mem(mem, "no PBL allocated\n");
+ return -EINVAL;
+ }
+ pble = pbl->pbe;
+
+ if (pbl->max_buf < num_sle) {
+ siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
+ num_sle, pbl->max_buf);
+ return -ENOMEM;
+ }
+ for_each_sg(sl, slp, num_sle, i) {
+ if (sg_dma_len(slp) == 0) {
+ siw_dbg_mem(mem, "empty SGE\n");
+ return -EINVAL;
+ }
+ if (i == 0) {
+ pble->addr = sg_dma_address(slp);
+ pble->size = sg_dma_len(slp);
+ pble->pbl_off = 0;
+ pbl_size = pble->size;
+ pbl->num_buf = 1;
+ } else {
+ /* Merge PBL entries if adjacent */
+ if (pble->addr + pble->size == sg_dma_address(slp)) {
+ pble->size += sg_dma_len(slp);
+ } else {
+ pble++;
+ pbl->num_buf++;
+ pble->addr = sg_dma_address(slp);
+ pble->size = sg_dma_len(slp);
+ pble->pbl_off = pbl_size;
+ }
+ pbl_size += sg_dma_len(slp);
+ }
+ siw_dbg_mem(mem,
+ "sge[%d], size %u, addr 0x%p, total %lu\n",
+ i, pble->size, ib_virt_dma_to_ptr(pble->addr),
+ pbl_size);
+ }
+ rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
+ if (rv > 0) {
+ mem->len = base_mr->length;
+ mem->va = base_mr->iova;
+ siw_dbg_mem(mem,
+ "%llu bytes, start 0x%p, %u SLE to %u entries\n",
+ mem->len, (void *)(uintptr_t)mem->va, num_sle,
+ pbl->num_buf);
+ }
+ return rv;
+}
+
+/*
+ * siw_get_dma_mr()
+ *
+ * Create a (empty) DMA memory region, where no umem is attached.
+ */
+struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
+{
+ struct siw_device *sdev = to_siw_dev(pd->device);
+ struct siw_mr *mr = NULL;
+ int rv;
+
+ if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
+ siw_dbg_pd(pd, "too many mr's\n");
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
+ if (rv)
+ goto err_out;
+
+ mr->mem->stag_valid = 1;
+
+ siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
+
+ return &mr->base_mr;
+
+err_out:
+ if (rv)
+ kfree(mr);
+
+ atomic_dec(&sdev->num_mr);
+
+ return ERR_PTR(rv);
+}
+
+/*
+ * siw_create_srq()
+ *
+ * Create Shared Receive Queue of attributes @init_attrs
+ * within protection domain given by @pd.
+ *
+ * @base_srq: Base SRQ contained in siw SRQ.
+ * @init_attrs: SRQ init attributes.
+ * @udata: points to user context
+ */
+int siw_create_srq(struct ib_srq *base_srq,
+ struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
+{
+ struct siw_srq *srq = to_siw_srq(base_srq);
+ struct ib_srq_attr *attrs = &init_attrs->attr;
+ struct siw_device *sdev = to_siw_dev(base_srq->device);
+ struct siw_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+ int rv;
+
+ if (init_attrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
+ siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
+ attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ srq->max_sge = attrs->max_sge;
+ srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
+ srq->limit = attrs->srq_limit;
+ if (srq->limit)
+ srq->armed = true;
+
+ srq->is_kernel_res = !udata;
+
+ if (udata)
+ srq->recvq =
+ vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
+ else
+ srq->recvq = vcalloc(srq->num_rqe, sizeof(struct siw_rqe));
+
+ if (srq->recvq == NULL) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+ if (udata) {
+ struct siw_uresp_create_srq uresp = {};
+ size_t length = srq->num_rqe * sizeof(struct siw_rqe);
+
+ srq->srq_entry =
+ siw_mmap_entry_insert(ctx, srq->recvq,
+ length, &uresp.srq_key);
+ if (!srq->srq_entry) {
+ rv = -ENOMEM;
+ goto err_out;
+ }
+
+ uresp.num_rqe = srq->num_rqe;
+
+ if (udata->outlen < sizeof(uresp)) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rv)
+ goto err_out;
+ }
+ spin_lock_init(&srq->lock);
+
+ siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
+
+ return 0;
+
+err_out:
+ if (srq->recvq) {
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
+ vfree(srq->recvq);
+ }
+ atomic_dec(&sdev->num_srq);
+
+ return rv;
+}
+
+/*
+ * siw_modify_srq()
+ *
+ * Modify SRQ. The caller may resize SRQ and/or set/reset notification
+ * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification.
+ *
+ * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE
+ * parameter. siw_modify_srq() does not check the attrs->max_sge param.
+ */
+int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct siw_srq *srq = to_siw_srq(base_srq);
+ unsigned long flags;
+ int rv = 0;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ /* resize request not yet supported */
+ rv = -EOPNOTSUPP;
+ goto out;
+ }
+ if (attr_mask & IB_SRQ_LIMIT) {
+ if (attrs->srq_limit) {
+ if (unlikely(attrs->srq_limit > srq->num_rqe)) {
+ rv = -EINVAL;
+ goto out;
+ }
+ srq->armed = true;
+ } else {
+ srq->armed = false;
+ }
+ srq->limit = attrs->srq_limit;
+ }
+out:
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return rv;
+}
+
+/*
+ * siw_query_srq()
+ *
+ * Query SRQ attributes.
+ */
+int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
+{
+ struct siw_srq *srq = to_siw_srq(base_srq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ attrs->max_wr = srq->num_rqe;
+ attrs->max_sge = srq->max_sge;
+ attrs->srq_limit = srq->limit;
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return 0;
+}
+
+/*
+ * siw_destroy_srq()
+ *
+ * Destroy SRQ.
+ * It is assumed that the SRQ is not referenced by any
+ * QP anymore - the code trusts the RDMA core environment to keep track
+ * of QP references.
+ */
+int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
+{
+ struct siw_srq *srq = to_siw_srq(base_srq);
+ struct siw_device *sdev = to_siw_dev(base_srq->device);
+ struct siw_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
+ vfree(srq->recvq);
+ atomic_dec(&sdev->num_srq);
+ return 0;
+}
+
+/*
+ * siw_post_srq_recv()
+ *
+ * Post a list of receive queue elements to SRQ.
+ * NOTE: The function does not check or lock a certain SRQ state
+ * during the post operation. The code simply trusts the
+ * RDMA core environment.
+ *
+ * @base_srq: Base SRQ contained in siw SRQ
+ * @wr: List of R-WR's
+ * @bad_wr: Updated to failing WR if posting fails.
+ */
+int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct siw_srq *srq = to_siw_srq(base_srq);
+ unsigned long flags;
+ int rv = 0;
+
+ if (unlikely(!srq->is_kernel_res)) {
+ siw_dbg_pd(base_srq->pd,
+ "[SRQ]: no kernel post_recv for mapped srq\n");
+ rv = -EINVAL;
+ goto out;
+ }
+ /*
+ * Serialize potentially multiple producers.
+ * Also needed to serialize potentially multiple
+ * consumers.
+ */
+ spin_lock_irqsave(&srq->lock, flags);
+
+ while (wr) {
+ u32 idx = srq->rq_put % srq->num_rqe;
+ struct siw_rqe *rqe = &srq->recvq[idx];
+
+ if (rqe->flags) {
+ siw_dbg_pd(base_srq->pd, "SRQ full\n");
+ rv = -ENOMEM;
+ break;
+ }
+ if (unlikely(wr->num_sge > srq->max_sge)) {
+ siw_dbg_pd(base_srq->pd,
+ "[SRQ]: too many sge's: %d\n", wr->num_sge);
+ rv = -EINVAL;
+ break;
+ }
+ rqe->id = wr->wr_id;
+ rqe->num_sge = wr->num_sge;
+ siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
+
+ /* Make sure S-RQE is completely written before valid */
+ smp_wmb();
+
+ rqe->flags = SIW_WQE_VALID;
+
+ srq->rq_put++;
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->lock, flags);
+out:
+ if (unlikely(rv < 0)) {
+ siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
+ *bad_wr = wr;
+ }
+ return rv;
+}
+
+void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
+{
+ struct ib_event event;
+ struct ib_qp *base_qp = &qp->base_qp;
+
+ /*
+ * Do not report asynchronous errors on QP which gets
+ * destroyed via verbs interface (siw_destroy_qp())
+ */
+ if (qp->attrs.flags & SIW_QP_IN_DESTROY)
+ return;
+
+ event.event = etype;
+ event.device = base_qp->device;
+ event.element.qp = base_qp;
+
+ if (base_qp->event_handler) {
+ siw_dbg_qp(qp, "reporting event %d\n", etype);
+ base_qp->event_handler(&event, base_qp->qp_context);
+ }
+}
+
+void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
+{
+ struct ib_event event;
+ struct ib_cq *base_cq = &cq->base_cq;
+
+ event.event = etype;
+ event.device = base_cq->device;
+ event.element.cq = base_cq;
+
+ if (base_cq->event_handler) {
+ siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
+ base_cq->event_handler(&event, base_cq->cq_context);
+ }
+}
+
+void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
+{
+ struct ib_event event;
+ struct ib_srq *base_srq = &srq->base_srq;
+
+ event.event = etype;
+ event.device = base_srq->device;
+ event.element.srq = base_srq;
+
+ if (base_srq->event_handler) {
+ siw_dbg_pd(srq->base_srq.pd,
+ "reporting SRQ event %d\n", etype);
+ base_srq->event_handler(&event, base_srq->srq_context);
+ }
+}
+
+void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype)
+{
+ struct ib_event event;
+
+ event.event = etype;
+ event.device = &sdev->base_dev;
+ event.element.port_num = port;
+
+ siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
+
+ ib_dispatch_event(&event);
+}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
new file mode 100644
index 000000000000..e9f4463aecdc
--- /dev/null
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#ifndef _SIW_VERBS_H
+#define _SIW_VERBS_H
+
+#include <linux/errno.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "siw.h"
+#include "siw_cm.h"
+
+/*
+ * siw_copy_sgl()
+ *
+ * Copy SGL from RDMA core representation to local
+ * representation.
+ */
+static inline void siw_copy_sgl(struct ib_sge *sge, struct siw_sge *siw_sge,
+ int num_sge)
+{
+ while (num_sge--) {
+ siw_sge->laddr = sge->addr;
+ siw_sge->length = sge->length;
+ siw_sge->lkey = sge->lkey;
+
+ siw_sge++;
+ sge++;
+ }
+}
+
+int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata);
+void siw_dealloc_ucontext(struct ib_ucontext *base_ctx);
+int siw_query_port(struct ib_device *base_dev, u32 port,
+ struct ib_port_attr *attr);
+int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
+ struct ib_port_immutable *port_immutable);
+int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
+ struct ib_udata *udata);
+int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int siw_query_port(struct ib_device *base_dev, u32 port,
+ struct ib_port_attr *attr);
+int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
+ union ib_gid *gid);
+int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
+int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
+int siw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata);
+int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata);
+int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
+int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
+int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
+struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
+ u64 rnic_va, int rights, struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *siw_alloc_mr(struct ib_pd *base_pd, enum ib_mr_type mr_type,
+ u32 max_sge);
+struct ib_mr *siw_get_dma_mr(struct ib_pd *base_pd, int rights);
+int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
+ unsigned int *sg_off);
+int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata);
+int siw_create_srq(struct ib_srq *base_srq, struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
+int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask mask, struct ib_udata *udata);
+int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr);
+int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
+int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
+void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
+void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
+void siw_port_event(struct siw_device *dev, u32 port, enum ib_event_type type);
+
+#endif
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
index f3c7dcf03098..4d0004b58377 100644
--- a/drivers/infiniband/ulp/Makefile
+++ b/drivers/infiniband/ulp/Makefile
@@ -1,5 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_IPOIB) += ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += srp/
obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
obj-$(CONFIG_INFINIBAND_ISER) += iser/
obj-$(CONFIG_INFINIBAND_ISERT) += isert/
+obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic/
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs/
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index cda8eac55fff..254e31a90a66 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,18 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_IPOIB
tristate "IP-over-InfiniBand"
depends on NETDEVICES && INET && (IPV6 || IPV6=n)
- ---help---
+ help
Support for the IP-over-InfiniBand protocol (IPoIB). This
transports IP packets over InfiniBand so you can use your IB
device as a fancy NIC.
- See Documentation/infiniband/ipoib.txt for more information
+ See Documentation/infiniband/ipoib.rst for more information
config INFINIBAND_IPOIB_CM
bool "IP-over-InfiniBand Connected Mode support"
depends on INFINIBAND_IPOIB
default n
- ---help---
+ help
This option enables support for IPoIB connected mode. After
enabling this option, you need to switch to connected mode
through /sys/class/net/ibXXX/mode to actually create
@@ -27,7 +28,7 @@ config INFINIBAND_IPOIB_DEBUG
bool "IP-over-InfiniBand debugging" if EXPERT
depends on INFINIBAND_IPOIB
default y
- ---help---
+ help
This option causes debugging code to be compiled into the
IPoIB driver. The output can be turned on via the
debug_level and mcast_debug_level module parameters (which
@@ -41,7 +42,7 @@ config INFINIBAND_IPOIB_DEBUG
config INFINIBAND_IPOIB_DEBUG_DATA
bool "IP-over-InfiniBand data path debugging"
depends on INFINIBAND_IPOIB_DEBUG
- ---help---
+ help
This option compiles debugging code into the data path
of the IPoIB driver. The output can be turned on via the
data_debug_level module parameter; however, even with output
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index e5430dd50764..6ece857ed262 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
ib_ipoib-y := ipoib_main.o \
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca2873698d75..91f866e3fb8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -52,7 +52,6 @@
#include <rdma/ib_pack.h>
#include <rdma/ib_sa.h>
#include <linux/sched.h>
-
/* constants */
enum ipoib_flush_level {
@@ -63,6 +62,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -80,7 +81,7 @@ enum {
IPOIB_NUM_WC = 4,
IPOIB_MAX_PATH_REC_QUEUE = 3,
- IPOIB_MAX_MCAST_QUEUE = 3,
+ IPOIB_MAX_MCAST_QUEUE = 64,
IPOIB_FLAG_OPER_UP = 0,
IPOIB_FLAG_INITIALIZED = 1,
@@ -90,8 +91,9 @@ enum {
IPOIB_STOP_REAPER = 7,
IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
- IPOIB_STOP_NEIGH_GC = 11,
IPOIB_NEIGH_TBL_FLUSH = 12,
+ IPOIB_FLAG_DEV_ADDR_SET = 13,
+ IPOIB_FLAG_DEV_ADDR_CTRL = 14,
IPOIB_MAX_BACKOFF_SECONDS = 16,
@@ -107,7 +109,7 @@ enum {
IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- MAX_SEND_CQE = 16,
+ MAX_SEND_CQE = 64,
IPOIB_CM_COPYBREAK = 256,
IPOIB_NON_CHILD = 0,
@@ -131,15 +133,28 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
+}
+
+static inline struct ipoib_dev_priv *ipoib_priv(const struct net_device *dev)
+{
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ return rn->clnt_priv;
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
@@ -176,11 +191,6 @@ struct ipoib_tx_buf {
u64 mapping[MAX_SKB_FRAGS + 1];
};
-struct ipoib_cm_tx_buf {
- struct sk_buff *skb;
- u64 mapping;
-};
-
struct ib_cm_id;
struct ipoib_cm_data {
@@ -238,12 +248,12 @@ struct ipoib_cm_tx {
struct list_head list;
struct net_device *dev;
struct ipoib_neigh *neigh;
- struct ipoib_path *path;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
+ unsigned int tx_head;
+ unsigned int tx_tail;
unsigned long flags;
u32 mtu;
+ unsigned int max_send_sge;
};
struct ipoib_cm_rx_buf {
@@ -312,12 +322,14 @@ struct ipoib_dev_priv {
spinlock_t lock;
struct net_device *dev;
+ void (*next_priv_destructor)(struct net_device *dev);
- struct napi_struct napi;
+ struct napi_struct send_napi;
+ struct napi_struct recv_napi;
unsigned long flags;
- struct rw_semaphore vlan_rwsem;
+ struct mutex mcast_mutex;
struct rb_root path_tree;
struct list_head path_list;
@@ -331,10 +343,12 @@ struct ipoib_dev_priv {
struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
+ struct work_struct reschedule_napi_work;
struct work_struct flush_light;
struct work_struct flush_normal;
struct work_struct flush_heavy;
struct work_struct restart_task;
+ struct work_struct tx_timeout_work;
struct delayed_work ah_reap_task;
struct delayed_work neigh_reap_task;
struct ib_device *ca;
@@ -348,7 +362,7 @@ struct ipoib_dev_priv {
u32 qkey;
union ib_gid local_gid;
- u16 local_lid;
+ u32 local_lid;
unsigned int admin_mtu;
unsigned int mcast_mtu;
@@ -357,11 +371,14 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
+ /* cyclic ring variables for managing tx_ring, for UD only */
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ /* cyclic ring variables for counting overall outstanding send WRs */
+ unsigned int global_tx_head;
+ unsigned int global_tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
- struct ib_send_wr tx_wr;
- unsigned tx_outstanding;
+ struct ib_ud_wr tx_wr;
struct ib_wc send_wc[MAX_SEND_CQE];
struct ib_recv_wr rx_wr;
@@ -374,6 +391,9 @@ struct ipoib_dev_priv {
struct ib_event_handler event_handler;
struct net_device *parent;
+ /* 'child_intfs' and 'list' membership of all child devices are
+ * protected by the netdev instance lock of 'dev'.
+ */
struct list_head child_intfs;
struct list_head list;
int child_type;
@@ -387,9 +407,11 @@ struct ipoib_dev_priv {
struct dentry *mcg_dentry;
struct dentry *path_dentry;
#endif
- int hca_caps;
+ u64 hca_caps;
+ u64 kernel_caps;
struct ipoib_ethtool_st ethtool;
- struct timer_list poll_timer;
+ unsigned int max_send_sge;
+ const struct net_device_ops *rn_ops;
};
struct ipoib_ah {
@@ -397,12 +419,13 @@ struct ipoib_ah {
struct ib_ah *ah;
struct list_head list;
struct kref ref;
- unsigned last_send;
+ unsigned int last_send;
+ int valid;
};
struct ipoib_path {
struct net_device *dev;
- struct ib_sa_path_rec pathrec;
+ struct sa_path_rec pathrec;
struct ipoib_ah *ah;
struct sk_buff_head queue;
@@ -414,7 +437,6 @@ struct ipoib_path {
struct rb_node rb_node;
struct list_head list;
- int valid;
};
struct ipoib_neigh {
@@ -430,7 +452,7 @@ struct ipoib_neigh {
struct list_head list;
struct ipoib_neigh __rcu *hnext;
struct rcu_head rcu;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned long alive;
};
@@ -440,7 +462,7 @@ struct ipoib_neigh {
void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
{
- if (atomic_dec_and_test(&neigh->refcnt))
+ if (refcount_dec_and_test(&neigh->refcnt))
ipoib_neigh_dtor(neigh);
}
struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr);
@@ -453,60 +475,67 @@ extern struct workqueue_struct *ipoib_workqueue;
/* functions */
-int ipoib_poll(struct napi_struct *napi, int budget);
-void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
-void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
+int ipoib_rx_poll(struct napi_struct *napi, int budget);
+int ipoib_tx_poll(struct napi_struct *napi, int budget);
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr);
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
- struct ib_pd *pd, struct ib_ah_attr *attr);
+ struct ib_pd *pd, struct rdma_ah_attr *attr);
void ipoib_free_ah(struct kref *kref);
static inline void ipoib_put_ah(struct ipoib_ah *ah)
{
kref_put(&ah->ref, ipoib_free_ah);
}
int ipoib_open(struct net_device *dev);
+void ipoib_intf_free(struct net_device *dev);
int ipoib_add_pkey_attr(struct net_device *dev);
int ipoib_add_umcast_attr(struct net_device *dev);
-void ipoib_send(struct net_device *dev, struct sk_buff *skb,
- struct ipoib_ah *address, u32 qpn);
+int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn);
void ipoib_reap_ah(struct work_struct *work);
+void ipoib_napi_schedule_work(struct work_struct *work);
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
-struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
-
-int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
+ const char *format);
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
+ struct net_device *dev);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
-void ipoib_pkey_event(struct work_struct *work);
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level);
+void ipoib_ib_tx_timeout_work(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
+int ipoib_ib_dev_open_default(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
-int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev);
-int ipoib_ib_dev_stop(struct net_device *dev);
+void ipoib_ib_dev_stop(struct net_device *dev);
+void ipoib_ib_dev_up(struct net_device *dev);
+void ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop_default(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
-int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_dev_cleanup(struct net_device *dev);
-
void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_carrier_on_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
-int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+void ipoib_mcast_start_thread(struct net_device *dev);
+void ipoib_mcast_stop_thread(struct net_device *dev);
-void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req);
+struct rtnl_link_ops *ipoib_get_link_ops(void);
+
static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req)
{
@@ -527,7 +556,7 @@ static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
priv->tx_sge[i + off].addr = mapping[i + off];
priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
}
- priv->tx_wr.num_sge = nr_frags + off;
+ priv->tx_wr.wr.num_sge = nr_frags + off;
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
@@ -546,8 +575,13 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
struct ipoib_path *path);
#endif
-int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
- union ib_gid *mgid, int set_qkey);
+int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey);
+int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid);
+void ipoib_mcast_remove_list(struct list_head *remove_list);
+void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
+ struct list_head *remove_list);
int ipoib_init_qp(struct net_device *dev);
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
@@ -568,13 +602,11 @@ void __exit ipoib_netlink_fini(void);
void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
int ipoib_set_mode(struct net_device *dev, const char *buf);
-void ipoib_setup(struct net_device *dev);
+void ipoib_setup_common(struct net_device *dev);
-void ipoib_pkey_open(struct ipoib_dev_priv *priv);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
-int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
#define IPOIB_FLAGS_RC 0x80
#define IPOIB_FLAGS_UC 0x40
@@ -588,14 +620,14 @@ extern int ipoib_max_conn_qp;
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
return IPOIB_CM_SUPPORTED(dev->dev_addr) &&
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
return IPOIB_CM_SUPPORTED(hwaddr) &&
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
@@ -618,13 +650,13 @@ static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *t
static inline int ipoib_cm_has_srq(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
return !!priv->cm.srq;
}
static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
return priv->cm.max_cm_mtu;
}
@@ -643,8 +675,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
#else
-struct ipoib_cm_tx;
-
#define ipoib_max_conn_qp 0
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
@@ -703,7 +733,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
static inline
int ipoib_cm_dev_init(struct net_device *dev)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline
@@ -749,19 +779,25 @@ static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *w
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
void ipoib_create_debug_files(struct net_device *dev);
void ipoib_delete_debug_files(struct net_device *dev);
-int ipoib_register_debugfs(void);
+void ipoib_register_debugfs(void);
void ipoib_unregister_debugfs(void);
#else
static inline void ipoib_create_debug_files(struct net_device *dev) { }
static inline void ipoib_delete_debug_files(struct net_device *dev) { }
-static inline int ipoib_register_debugfs(void) { return 0; }
+static inline void ipoib_register_debugfs(void) { }
static inline void ipoib_unregister_debugfs(void) { }
#endif
#define ipoib_printk(level, priv, format, arg...) \
printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
#define ipoib_warn(priv, format, arg...) \
- ipoib_printk(KERN_WARNING, priv, format , ## arg)
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ 10 * HZ /*10 seconds */, \
+ 100); \
+ if (__ratelimit(&_rs)) \
+ ipoib_printk(KERN_WARNING, priv, format , ## arg);\
+} while (0)
extern int ipoib_sendq_size;
extern int ipoib_recvq_size;
@@ -801,6 +837,4 @@ extern int ipoib_debug_level;
#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
-extern const char ipoib_driver_version[];
-
#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c78dc1638030..b610d36295bb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -38,6 +38,8 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "ipoib.h"
@@ -63,6 +65,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -70,12 +74,11 @@ static struct ib_qp_attr ipoib_cm_err_attr = {
#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
static struct ib_send_wr ipoib_cm_rx_drain_wr = {
- .wr_id = IPOIB_CM_RX_DRAIN_WRID,
.opcode = IB_WR_SEND,
};
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event);
+ const struct ib_cm_event *event);
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
u64 mapping[IPOIB_CM_RX_SG])
@@ -90,8 +93,7 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_recv_wr *bad_wr;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i, ret;
priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
@@ -99,7 +101,7 @@ static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
for (i = 0; i < priv->cm.num_frags; ++i)
priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
- ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
+ ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
@@ -116,8 +118,7 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
struct ib_recv_wr *wr,
struct ib_sge *sge, int id)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_recv_wr *bad_wr;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i, ret;
wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
@@ -125,7 +126,7 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
for (i = 0; i < IPOIB_CM_RX_SG; ++i)
sge[i].addr = rx->rx_ring[id].mapping[i];
- ret = ib_post_recv(rx->qp, wr, &bad_wr);
+ ret = ib_post_recv(rx->qp, wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
@@ -143,19 +144,19 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
u64 mapping[IPOIB_CM_RX_SG],
gfp_t gfp)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -194,7 +195,7 @@ partial_error:
static void ipoib_cm_free_rx_ring(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i)
@@ -209,7 +210,6 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev,
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
{
- struct ib_send_wr *bad_wr;
struct ipoib_cm_rx *p;
/* We only reserved 1 extra slot in CQ for drain WRs, so
@@ -223,7 +223,8 @@ static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
* error" WC will be immediately generated for each WR we post.
*/
p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
- if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
+ ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
+ if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
ipoib_warn(priv, "failed to post drain wr\n");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
@@ -232,7 +233,7 @@ static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
{
struct ipoib_cm_rx *p = ctx;
- struct ipoib_dev_priv *priv = netdev_priv(p->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
unsigned long flags;
if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
@@ -248,7 +249,7 @@ static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
struct ipoib_cm_rx *p)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr attr = {
.event_handler = ipoib_cm_rx_event_handler,
.send_cq = priv->recv_cq, /* For drain WR */
@@ -271,9 +272,9 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
static int ipoib_cm_modify_rx_qp(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp,
- unsigned psn)
+ unsigned int psn)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
@@ -328,7 +329,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev,
struct ib_recv_wr *wr,
struct ib_sge *sge)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i;
for (i = 0; i < priv->cm.num_frags; ++i)
@@ -346,7 +347,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev,
static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
struct ipoib_cm_rx *rx)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct {
struct ib_recv_wr wr;
struct ib_sge sge[IPOIB_CM_RX_SG];
@@ -354,17 +355,15 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
int ret;
int i;
- rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
- if (!rx->rx_ring) {
- printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
- priv->ca->name, ipoib_recvq_size);
+ rx->rx_ring = vzalloc(array_size(ipoib_recvq_size,
+ sizeof(*rx->rx_ring)));
+ if (!rx->rx_ring)
return -ENOMEM;
- }
- t = kmalloc(sizeof *t, GFP_KERNEL);
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
if (!t) {
ret = -ENOMEM;
- goto err_free;
+ goto err_free_1;
}
ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
@@ -411,16 +410,19 @@ err_count:
err_free:
kfree(t);
+
+err_free_1:
ipoib_cm_free_rx_ring(dev, rx->rx_ring);
return ret;
}
static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
- struct ib_qp *qp, struct ib_cm_req_event_param *req,
- unsigned psn)
+ struct ib_qp *qp,
+ const struct ib_cm_req_event_param *req,
+ unsigned int psn)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_rep_param rep = {};
@@ -428,7 +430,7 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
rep.private_data = &data;
- rep.private_data_len = sizeof data;
+ rep.private_data_len = sizeof(data);
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
rep.srq = ipoib_cm_has_srq(dev);
@@ -437,16 +439,17 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
return ib_send_cm_rep(cm_id, &rep);
}
-static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct net_device *dev = cm_id->context;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *p;
- unsigned psn;
+ unsigned int psn;
int ret;
ipoib_dbg(priv, "REQ arrived\n");
- p = kzalloc(sizeof *p, GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
@@ -462,7 +465,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
goto err_qp;
}
- psn = prandom_u32() & 0xffffff;
+ psn = get_random_u32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret)
goto err_modify;
@@ -499,7 +502,7 @@ err_qp:
}
static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
@@ -508,15 +511,14 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
- p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0);
- /* Fall through */
+ fallthrough;
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
- priv = netdev_priv(p->dev);
+ priv = ipoib_priv(p->dev);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
- /* Fall through */
+ fallthrough;
default:
return 0;
}
@@ -544,7 +546,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags;
} else {
- size = min(length, (unsigned) PAGE_SIZE);
+ size = min_t(unsigned int, length, PAGE_SIZE);
skb_frag_size_set(frag, size);
skb->data_len += size;
@@ -557,7 +559,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx_buf *rx_ring;
unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
struct sk_buff *skb, *newskb;
@@ -592,9 +594,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
- ipoib_dbg(priv, "cm recv error "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ ipoib_dbg(priv,
+ "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
goto repost;
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -638,8 +640,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
}
- frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
- (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
+ frags = PAGE_ALIGN(wc->byte_len -
+ min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) /
+ PAGE_SIZE;
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
mapping, GFP_ATOMIC);
@@ -654,7 +657,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
- memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
+ memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
@@ -663,8 +666,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
@@ -696,20 +698,19 @@ static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ipoib_tx_buf *tx_req)
{
- struct ib_send_wr *bad_wr;
-
ipoib_build_sge(priv, tx_req);
- priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
+ priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
- return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
+ return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL);
}
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_tx_buf *tx_req;
int rc;
+ unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -719,7 +720,23 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
-
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
@@ -739,36 +756,44 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return;
}
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
+ netif_stop_queue(dev);
+ }
+
skb_orphan(skb);
skb_dst_drop(skb);
+ if (netif_queue_stopped(dev)) {
+ rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS);
+ if (unlikely(rc < 0))
+ ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ else if (rc)
+ napi_schedule(&priv->send_napi);
+ }
+
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
- ipoib_warn(priv, "post_send failed, error %d\n", rc);
+ ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
++tx->tx_head;
-
- if (++priv->tx_outstanding == ipoib_sendq_size) {
- ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
- tx->qp->qp_num);
- netif_stop_queue(dev);
- rc = ib_req_notify_cq(priv->send_cq,
- IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
- if (rc < 0)
- ipoib_warn(priv, "request notify on send CQ failed\n");
- else if (rc)
- ipoib_send_comp_handler(priv->send_cq, dev);
- }
+ ++priv->global_tx_head;
}
}
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
struct ipoib_tx_buf *tx_req;
@@ -796,18 +821,30 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_tx_lock(dev);
++tx->tx_tail;
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
- netif_queue_stopped(dev) &&
- test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ ++priv->global_tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_neigh *neigh;
- ipoib_dbg(priv, "failed cm send event "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
+ * so don't make waves.
+ */
+ if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
+ wc->status == IB_WC_RETRY_EXC_ERR)
+ ipoib_dbg(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
+ else
+ ipoib_warn(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
@@ -834,7 +871,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
int ipoib_cm_dev_open(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
@@ -842,16 +879,16 @@ int ipoib_cm_dev_open(struct net_device *dev)
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
- printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
+ pr_warn("%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
- ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
- 0);
+ ret = ib_cm_listen(priv->cm.id,
+ cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num));
if (ret) {
- printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
- IPOIB_CM_IETF_ID | priv->qp->qp_num);
+ pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
+ IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
@@ -866,7 +903,7 @@ err_cm:
static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *rx, *n;
LIST_HEAD(list);
@@ -889,7 +926,7 @@ static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
void ipoib_cm_dev_stop(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *p;
unsigned long begin;
int ret;
@@ -933,7 +970,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
break;
}
spin_unlock_irq(&priv->lock);
- msleep(1);
+ usleep_range(1000, 2000);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock);
}
@@ -945,10 +982,11 @@ void ipoib_cm_dev_stop(struct net_device *dev)
cancel_delayed_work(&priv->cm.stale_task);
}
-static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct ipoib_cm_tx *p = cm_id->context;
- struct ipoib_dev_priv *priv = netdev_priv(p->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
struct ipoib_cm_data *data = event->private_data;
struct sk_buff_head skqueue;
struct ib_qp_attr qp_attr;
@@ -991,18 +1029,21 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
skb_queue_head_init(&skqueue);
+ netif_tx_lock_bh(p->dev);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
+ netif_tx_unlock_bh(p->dev);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
- if (dev_queue_xmit(skb))
- ipoib_warn(priv, "dev_queue_xmit failed "
- "to requeue packet\n");
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
}
ret = ib_send_cm_rtu(cm_id, NULL, 0);
@@ -1015,9 +1056,9 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr attr = {
- .send_cq = priv->recv_cq,
+ .send_cq = priv->send_cq,
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = ipoib_sendq_size,
@@ -1025,30 +1066,25 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx,
- .create_flags = IB_QP_CREATE_USE_GFP_NOIO
+ .create_flags = 0
};
-
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
- attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
- if (PTR_ERR(tx_qp) == -EINVAL) {
- ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
- priv->ca->name);
- attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
- tx_qp = ib_create_qp(priv->pd, &attr);
- }
+ tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
static int ipoib_cm_send_req(struct net_device *dev,
struct ib_cm_id *id, struct ib_qp *qp,
u32 qpn,
- struct ib_sa_path_rec *pathrec)
+ struct sa_path_rec *pathrec)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_req_param req = {};
@@ -1061,7 +1097,7 @@ static int ipoib_cm_send_req(struct net_device *dev,
req.qp_num = qp->qp_num;
req.qp_type = qp->qp_type;
req.private_data = &data;
- req.private_data_len = sizeof data;
+ req.private_data_len = sizeof(data);
req.flow_control = 0;
req.starting_psn = 0; /* FIXME */
@@ -1083,15 +1119,11 @@ static int ipoib_cm_send_req(struct net_device *dev,
static int ipoib_cm_modify_tx_init(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
- ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
- if (ret) {
- ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
- return ret;
- }
+ qp_attr.pkey_index = priv->pkey_index;
qp_attr.qp_state = IB_QPS_INIT;
qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
qp_attr.port_num = priv->port;
@@ -1106,24 +1138,25 @@ static int ipoib_cm_modify_tx_init(struct net_device *dev,
}
static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
- struct ib_sa_path_rec *pathrec)
+ struct sa_path_rec *pathrec)
{
- struct ipoib_dev_priv *priv = netdev_priv(p->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned int noio_flag;
int ret;
- p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
- GFP_NOIO, PAGE_KERNEL);
+ noio_flag = memalloc_noio_save();
+ p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring)));
if (!p->tx_ring) {
- ipoib_warn(priv, "failed to allocate tx ring\n");
+ memalloc_noio_restore(noio_flag);
ret = -ENOMEM;
goto err_tx;
}
- memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
+ memalloc_noio_restore(noio_flag);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
- ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
+ ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
goto err_qp;
}
@@ -1137,13 +1170,13 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
if (ret) {
ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
- goto err_modify;
+ goto err_modify_send;
}
ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
if (ret) {
ipoib_warn(priv, "failed to send cm req: %d\n", ret);
- goto err_send_cm;
+ goto err_modify_send;
}
ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
@@ -1151,8 +1184,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
return 0;
-err_send_cm:
-err_modify:
+err_modify_send:
ib_destroy_cm_id(p->id);
err_id:
p->id = NULL;
@@ -1166,7 +1198,7 @@ err_tx:
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
- struct ipoib_dev_priv *priv = netdev_priv(p->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
struct ipoib_tx_buf *tx_req;
unsigned long begin;
@@ -1186,7 +1218,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
goto timeout;
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1196,9 +1228,11 @@ timeout:
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
- ++p->tx_tail;
netif_tx_lock_bh(p->dev);
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
+ ++p->tx_tail;
+ ++priv->global_tx_tail;
+ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
@@ -1213,10 +1247,10 @@ timeout:
}
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ipoib_cm_tx *tx = cm_id->context;
- struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
unsigned long flags;
@@ -1267,16 +1301,15 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
struct ipoib_neigh *neigh)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_tx *tx;
- tx = kzalloc(sizeof *tx, GFP_ATOMIC);
+ tx = kzalloc(sizeof(*tx), GFP_ATOMIC);
if (!tx)
return NULL;
neigh->cm = tx;
tx->neigh = neigh;
- tx->path = path;
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1286,7 +1319,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
{
- struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
unsigned long flags;
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
@@ -1299,6 +1332,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
}
}
+#define QPN_AND_OPTIONS_OFFSET 4
+
static void ipoib_cm_tx_start(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1307,9 +1342,10 @@ static void ipoib_cm_tx_start(struct work_struct *work)
struct ipoib_neigh *neigh;
struct ipoib_cm_tx *p;
unsigned long flags;
+ struct ipoib_path *path;
int ret;
- struct ib_sa_path_rec pathrec;
+ struct sa_path_rec pathrec;
u32 qpn;
netif_tx_lock_bh(dev);
@@ -1319,8 +1355,20 @@ static void ipoib_cm_tx_start(struct work_struct *work)
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
+
qpn = IPOIB_QPN(neigh->daddr);
- memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
+ /*
+ * As long as the search is with these 2 locks,
+ * path existence indicates its validity.
+ */
+ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ if (!path) {
+ pr_info("%s ignore not valid path %pI6\n",
+ __func__,
+ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ goto free_neigh;
+ }
+ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
@@ -1331,6 +1379,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
spin_lock_irqsave(&priv->lock, flags);
if (ret) {
+free_neigh:
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
@@ -1358,7 +1407,7 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
- list_del(&p->list);
+ list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
@@ -1377,7 +1426,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned long flags;
- unsigned mtu = priv->mcast_mtu;
+ unsigned int mtu = priv->mcast_mtu;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
@@ -1386,11 +1435,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
- if (skb->protocol == htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ }
#if IS_ENABLED(CONFIG_IPV6)
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ }
#endif
dev_kfree_skb_any(skb);
@@ -1405,11 +1458,10 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
unsigned int mtu)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
- if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
@@ -1451,37 +1503,46 @@ static void ipoib_cm_stale_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
}
-static ssize_t show_mode(struct device *d, struct device_attribute *attr,
+static ssize_t mode_show(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
+ struct net_device *dev = to_net_dev(d);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
- return sprintf(buf, "connected\n");
+ return sysfs_emit(buf, "connected\n");
else
- return sprintf(buf, "datagram\n");
+ return sysfs_emit(buf, "datagram\n");
}
-static ssize_t set_mode(struct device *d, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t mode_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct net_device *dev = to_net_dev(d);
int ret;
- if (!rtnl_trylock())
+ if (!rtnl_trylock()) {
return restart_syscall();
+ }
- ret = ipoib_set_mode(dev, buf);
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
- rtnl_unlock();
+ ret = ipoib_set_mode(dev, buf);
- if (!ret)
- return count;
+ /* The assumption is that the function ipoib_set_mode returned
+ * with the rtnl held by it, if not the value -EBUSY returned,
+ * then no need to rtnl_unlock
+ */
+ if (ret != -EBUSY)
+ rtnl_unlock();
- return ret;
+ return (!ret || ret == -EBUSY) ? count : ret;
}
-static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
+static DEVICE_ATTR_RW(mode);
int ipoib_cm_add_mode_attr(struct net_device *dev)
{
@@ -1490,7 +1551,7 @@ int ipoib_cm_add_mode_attr(struct net_device *dev)
static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_srq_init_attr srq_init_attr = {
.srq_type = IB_SRQT_BASIC,
.attr = {
@@ -1501,17 +1562,16 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
- if (PTR_ERR(priv->cm.srq) != -ENOSYS)
- printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
+ if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP)
+ pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
}
- priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
+ priv->cm.srq_ring = vzalloc(array_size(ipoib_recvq_size,
+ sizeof(*priv->cm.srq_ring)));
if (!priv->cm.srq_ring) {
- printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
- priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
return;
@@ -1521,9 +1581,9 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
int ipoib_cm_dev_init(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- int i, ret;
- struct ib_device_attr attr;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int max_srq_sge, i;
+ u8 addr;
INIT_LIST_HEAD(&priv->cm.passive_ids);
INIT_LIST_HEAD(&priv->cm.reap_list);
@@ -1540,19 +1600,13 @@ int ipoib_cm_dev_init(struct net_device *dev)
skb_queue_head_init(&priv->cm.skb_queue);
- ret = ib_query_device(priv->ca, &attr);
- if (ret) {
- printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
- return ret;
- }
-
- ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
+ ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
- attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
- ipoib_cm_create_srq(dev, attr.max_srq_sge);
+ max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
+ ipoib_cm_create_srq(dev, max_srq_sge);
if (ipoib_cm_has_srq(dev)) {
- priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
- priv->cm.num_frags = attr.max_srq_sge;
+ priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
+ priv->cm.num_frags = max_srq_sge;
ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
priv->cm.max_cm_mtu, priv->cm.num_frags);
} else {
@@ -1583,24 +1637,21 @@ int ipoib_cm_dev_init(struct net_device *dev)
}
}
- priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
+ addr = IPOIB_FLAGS_RC;
+ dev_addr_mod(dev, 0, &addr, 1);
return 0;
}
void ipoib_cm_dev_cleanup(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- int ret;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!priv->cm.srq)
return;
ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
- ret = ib_destroy_srq(priv->cm.srq);
- if (ret)
- ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
-
+ ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
if (!priv->cm.srq_ring)
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 078cadd6c797..4feb7170535c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -36,33 +36,47 @@
#include "ipoib.h"
+struct ipoib_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define IPOIB_NETDEV_STAT(m) { \
+ .stat_string = #m, \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct ipoib_stats ipoib_gstrings_stats[] = {
+ IPOIB_NETDEV_STAT(rx_packets),
+ IPOIB_NETDEV_STAT(tx_packets),
+ IPOIB_NETDEV_STAT(rx_bytes),
+ IPOIB_NETDEV_STAT(tx_bytes),
+ IPOIB_NETDEV_STAT(tx_errors),
+ IPOIB_NETDEV_STAT(rx_dropped),
+ IPOIB_NETDEV_STAT(tx_dropped),
+ IPOIB_NETDEV_STAT(multicast),
+};
+
+#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
+
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- struct ipoib_dev_priv *priv = netdev_priv(netdev);
- struct ib_device_attr *attr;
-
- attr = kmalloc(sizeof(*attr), GFP_KERNEL);
- if (attr && !ib_query_device(priv->ca, attr))
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d", (int)(attr->fw_ver >> 32),
- (int)(attr->fw_ver >> 16) & 0xffff,
- (int)attr->fw_ver & 0xffff);
- kfree(attr);
-
- strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device),
- sizeof(drvinfo->bus_info));
+ struct ipoib_dev_priv *priv = ipoib_priv(netdev);
+
+ ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
- strlcpy(drvinfo->version, ipoib_driver_version,
- sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
+ sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
@@ -71,9 +85,11 @@ static int ipoib_get_coalesce(struct net_device *dev,
}
static int ipoib_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
/*
@@ -84,9 +100,10 @@ static int ipoib_set_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames > 0xffff)
return -EINVAL;
- ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames,
- coal->rx_coalesce_usecs);
- if (ret && ret != -ENOSYS) {
+ ret = rdma_set_cq_moderation(priv->recv_cq,
+ coal->rx_max_coalesced_frames,
+ coal->rx_coalesce_usecs);
+ if (ret && ret != -EOPNOTSUPP) {
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
return ret;
}
@@ -96,11 +113,120 @@ static int ipoib_set_coalesce(struct net_device *dev,
return 0;
}
+static void ipoib_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ int i;
+ struct net_device_stats *net_stats = &dev->stats;
+ u8 *p = (u8 *)net_stats;
+
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
+ data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
+
+}
+static void ipoib_get_strings(struct net_device __always_unused *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
+ ethtool_puts(&data,
+ ipoib_gstrings_stats[i].stat_string);
+ break;
+ default:
+ break;
+ }
+}
+static int ipoib_get_sset_count(struct net_device __always_unused *dev,
+ int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IPOIB_GLOBAL_STATS_LEN;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/* Return lane speed in unit of 1e6 bit/sec */
+static inline int ib_speed_enum_to_int(int speed)
+{
+ switch (speed) {
+ case IB_SPEED_SDR:
+ return SPEED_2500;
+ case IB_SPEED_DDR:
+ return SPEED_5000;
+ case IB_SPEED_QDR:
+ case IB_SPEED_FDR10:
+ return SPEED_10000;
+ case IB_SPEED_FDR:
+ return SPEED_14000;
+ case IB_SPEED_EDR:
+ return SPEED_25000;
+ case IB_SPEED_HDR:
+ return SPEED_50000;
+ case IB_SPEED_NDR:
+ return SPEED_100000;
+ case IB_SPEED_XDR:
+ return SPEED_200000;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+static int ipoib_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(netdev);
+ struct ib_port_attr attr;
+ int ret, speed, width;
+
+ if (!netif_carrier_ok(netdev)) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+
+ ret = ib_query_port(priv->ca, priv->port, &attr);
+ if (ret < 0)
+ return -EINVAL;
+
+ speed = ib_speed_enum_to_int(attr.active_speed);
+ width = ib_width_enum_to_int(attr.active_width);
+
+ if (speed < 0 || width < 0)
+ return -EINVAL;
+
+ /* Except the following are set, the other members of
+ * the struct ethtool_link_settings are initialized to
+ * zero in the function __ethtool_get_link_ksettings.
+ */
+ cmd->base.speed = speed * width;
+ cmd->base.duplex = DUPLEX_FULL;
+
+ cmd->base.phy_address = 0xFF;
+
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
static const struct ethtool_ops ipoib_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .get_link_ksettings = ipoib_get_link_ksettings,
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
+ .get_strings = ipoib_get_strings,
+ .get_ethtool_stats = ipoib_get_ethtool_stats,
+ .get_sset_count = ipoib_get_sset_count,
+ .get_link = ethtool_op_get_link,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 6bd5740e2691..12ba7a0fe0b5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -124,35 +124,14 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
return 0;
}
-static const struct seq_operations ipoib_mcg_seq_ops = {
+static const struct seq_operations ipoib_mcg_sops = {
.start = ipoib_mcg_seq_start,
.next = ipoib_mcg_seq_next,
.stop = ipoib_mcg_seq_stop,
.show = ipoib_mcg_seq_show,
};
-static int ipoib_mcg_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int ret;
-
- ret = seq_open(file, &ipoib_mcg_seq_ops);
- if (ret)
- return ret;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations ipoib_mcg_fops = {
- .owner = THIS_MODULE,
- .open = ipoib_mcg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(ipoib_mcg);
static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
{
@@ -210,16 +189,16 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
seq_printf(file,
"GID: %s\n"
" complete: %6s\n",
- gid_buf, path.pathrec.dlid ? "yes" : "no");
+ gid_buf, sa_path_get_dlid(&path.pathrec) ? "yes" : "no");
- if (path.pathrec.dlid) {
+ if (sa_path_get_dlid(&path.pathrec)) {
rate = ib_rate_to_mbps(path.pathrec.rate);
seq_printf(file,
" DLID: 0x%04x\n"
" SL: %12d\n"
" rate: %8d.%d Gb/sec\n",
- be16_to_cpu(path.pathrec.dlid),
+ be32_to_cpu(sa_path_get_dlid(&path.pathrec)),
path.pathrec.sl,
rate / 1000, rate % 1000);
}
@@ -229,66 +208,41 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
return 0;
}
-static const struct seq_operations ipoib_path_seq_ops = {
+static const struct seq_operations ipoib_path_sops = {
.start = ipoib_path_seq_start,
.next = ipoib_path_seq_next,
.stop = ipoib_path_seq_stop,
.show = ipoib_path_seq_show,
};
-static int ipoib_path_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int ret;
-
- ret = seq_open(file, &ipoib_path_seq_ops);
- if (ret)
- return ret;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations ipoib_path_fops = {
- .owner = THIS_MODULE,
- .open = ipoib_path_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(ipoib_path);
void ipoib_create_debug_files(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- char name[IFNAMSIZ + sizeof "_path"];
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ char name[IFNAMSIZ + sizeof("_path")];
- snprintf(name, sizeof name, "%s_mcg", dev->name);
+ snprintf(name, sizeof(name), "%s_mcg", dev->name);
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_mcg_fops);
- if (!priv->mcg_dentry)
- ipoib_warn(priv, "failed to create mcg debug file\n");
- snprintf(name, sizeof name, "%s_path", dev->name);
+ snprintf(name, sizeof(name), "%s_path", dev->name);
priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_path_fops);
- if (!priv->path_dentry)
- ipoib_warn(priv, "failed to create path debug file\n");
}
void ipoib_delete_debug_files(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
+ priv->mcg_dentry = priv->path_dentry = NULL;
}
-int ipoib_register_debugfs(void)
+void ipoib_register_debugfs(void)
{
ipoib_root = debugfs_create_dir("ipoib", NULL);
- return ipoib_root ? 0 : -ENOMEM;
}
void ipoib_unregister_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index d266667ca9b8..10b0dbda6cd5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -40,6 +40,8 @@
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <net/netdev_lock.h>
+#include <rdma/ib_cache.h>
#include "ipoib.h"
@@ -51,15 +53,13 @@ MODULE_PARM_DESC(data_debug_level,
"Enable data path debug tracing if > 0");
#endif
-static DEFINE_MUTEX(pkey_mutex);
-
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
- struct ib_pd *pd, struct ib_ah_attr *attr)
+ struct ib_pd *pd, struct rdma_ah_attr *attr)
{
struct ipoib_ah *ah;
struct ib_ah *vah;
- ah = kmalloc(sizeof *ah, GFP_KERNEL);
+ ah = kmalloc(sizeof(*ah), GFP_KERNEL);
if (!ah)
return ERR_PTR(-ENOMEM);
@@ -67,13 +67,13 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
ah->last_send = 0;
kref_init(&ah->ref);
- vah = ib_create_ah(pd, attr);
+ vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE);
if (IS_ERR(vah)) {
kfree(ah);
ah = (struct ipoib_ah *)vah;
} else {
ah->ah = vah;
- ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+ ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
}
return ah;
@@ -82,7 +82,7 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
void ipoib_free_ah(struct kref *kref)
{
struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
- struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
unsigned long flags;
@@ -101,8 +101,7 @@ static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_recv_wr *bad_wr;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
@@ -110,7 +109,7 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
- ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
@@ -123,23 +122,22 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct sk_buff *skb;
int buf_size;
u64 *mapping;
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -156,7 +154,7 @@ error:
static int ipoib_ib_post_receives(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
@@ -175,11 +173,12 @@ static int ipoib_ib_post_receives(struct net_device *dev)
static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
u64 mapping[IPOIB_UD_RX_SG];
union ib_gid *dgid;
+ union ib_gid *sgid;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -194,8 +193,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- ipoib_warn(priv, "failed recv event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
@@ -203,15 +202,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
return;
}
- /*
- * Drop packets that this interface sent, ie multicast packets
- * that the HCA has replicated.
- */
- if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
- goto repost;
-
memcpy(mapping, priv->rx_ring[wr_id].mapping,
- IPOIB_UD_RX_SG * sizeof *mapping);
+ IPOIB_UD_RX_SG * sizeof(*mapping));
/*
* If we can't allocate a new RX buffer, dump
@@ -239,23 +231,41 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
else
skb->pkt_type = PACKET_MULTICAST;
+ sgid = &((struct ib_grh *)skb->data)->sgid;
+
+ /*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
+ int need_repost = 1;
+
+ if ((wc->wc_flags & IB_WC_GRH) &&
+ sgid->global.interface_id != priv->local_gid.global.interface_id)
+ need_repost = 0;
+
+ if (need_repost) {
+ dev_kfree_skb_any(skb);
+ goto repost;
+ }
+ }
+
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
-
- skb->truesize = SKB_TRUESIZE(skb->len);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
skb->dev = dev;
if ((dev->features & NETIF_F_RXCSUM) &&
likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&priv->napi, skb);
+ napi_gro_receive(&priv->recv_napi, skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -284,7 +294,8 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + off] = ib_dma_map_page(ca,
skb_frag_page(frag),
- frag->page_offset, skb_frag_size(frag),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
goto partial_error;
@@ -374,7 +385,7 @@ free_res:
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned int wr_id = wc->wr_id;
struct ipoib_tx_buf *tx_req;
@@ -397,23 +408,23 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
- netif_queue_stopped(dev) &&
- test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ ++priv->global_tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_qp_state_validate *qp_work;
- ipoib_warn(priv, "failed send event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed send event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
- if (!qp_work) {
- ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
- __func__, priv->qp->qp_num);
+ if (!qp_work)
return;
- }
INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
qp_work->priv = priv;
@@ -424,17 +435,23 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
static int poll_tx(struct ipoib_dev_priv *priv)
{
int n, i;
+ struct ib_wc *wc;
n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
- for (i = 0; i < n; ++i)
- ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
-
+ for (i = 0; i < n; ++i) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
+ else
+ ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
+ }
return n == MAX_SEND_CQE;
}
-int ipoib_poll(struct napi_struct *napi, int budget)
+int ipoib_rx_poll(struct napi_struct *napi, int budget)
{
- struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
+ struct ipoib_dev_priv *priv =
+ container_of(napi, struct ipoib_dev_priv, recv_napi);
struct net_device *dev = priv->dev;
int done;
int t;
@@ -458,8 +475,9 @@ poll_more:
ipoib_cm_handle_rx_wc(dev, wc);
else
ipoib_ib_handle_rx_wc(dev, wc);
- } else
- ipoib_cm_handle_tx_wc(priv->dev, wc);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
}
if (n != t)
@@ -471,85 +489,123 @@ poll_more:
if (unlikely(ib_req_notify_cq(priv->recv_cq,
IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS)) &&
- napi_reschedule(napi))
+ napi_schedule(napi))
goto poll_more;
}
return done;
}
-void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
+int ipoib_tx_poll(struct napi_struct *napi, int budget)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
+ send_napi);
+ struct net_device *dev = priv->dev;
+ int n, i;
+ struct ib_wc *wc;
- napi_schedule(&priv->napi);
+poll_more:
+ n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
+
+ for (i = 0; i < n; i++) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(dev, wc);
+ else
+ ipoib_ib_handle_tx_wc(dev, wc);
+ }
+
+ if (n < budget) {
+ napi_complete(napi);
+ if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) &&
+ napi_schedule(napi))
+ goto poll_more;
+ }
+ return n < 0 ? 0 : n;
}
-static void drain_tx_cq(struct net_device *dev)
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ctx_ptr;
- netif_tx_lock(dev);
- while (poll_tx(priv))
- ; /* nothing */
+ napi_schedule(&priv->recv_napi);
+}
- if (netif_queue_stopped(dev))
- mod_timer(&priv->poll_timer, jiffies + 1);
+/* The function will force napi_schedule */
+void ipoib_napi_schedule_work(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, reschedule_napi_work);
+ bool ret;
- netif_tx_unlock(dev);
+ do {
+ ret = napi_schedule(&priv->send_napi);
+ if (!ret)
+ msleep(3);
+ } while (!ret && netif_queue_stopped(priv->dev) &&
+ test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags));
}
-void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
+ struct ipoib_dev_priv *priv = ctx_ptr;
+ bool ret;
- mod_timer(&priv->poll_timer, jiffies);
+ ret = napi_schedule(&priv->send_napi);
+ /*
+ * if the queue is closed the driver must be able to schedule napi,
+ * otherwise we can end with closed queue forever, because no new
+ * packets to send and napi callback might not get new event after
+ * its re-arm of the napi.
+ */
+ if (!ret && netif_queue_stopped(priv->dev))
+ schedule_work(&priv->reschedule_napi_work);
}
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
- struct ib_ah *address, u32 qpn,
+ struct ib_ah *address, u32 dqpn,
struct ipoib_tx_buf *tx_req,
void *head, int hlen)
{
- struct ib_send_wr *bad_wr;
struct sk_buff *skb = tx_req->skb;
ipoib_build_sge(priv, tx_req);
- priv->tx_wr.wr_id = wr_id;
- priv->tx_wr.wr.ud.remote_qpn = qpn;
- priv->tx_wr.wr.ud.ah = address;
+ priv->tx_wr.wr.wr_id = wr_id;
+ priv->tx_wr.remote_qpn = dqpn;
+ priv->tx_wr.ah = address;
if (head) {
- priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
- priv->tx_wr.wr.ud.header = head;
- priv->tx_wr.wr.ud.hlen = hlen;
- priv->tx_wr.opcode = IB_WR_LSO;
+ priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
+ priv->tx_wr.header = head;
+ priv->tx_wr.hlen = hlen;
+ priv->tx_wr.wr.opcode = IB_WR_LSO;
} else
- priv->tx_wr.opcode = IB_WR_SEND;
+ priv->tx_wr.wr.opcode = IB_WR_SEND;
- return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
+ return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
}
-void ipoib_send(struct net_device *dev, struct sk_buff *skb,
- struct ipoib_ah *address, u32 qpn)
+int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
+ unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
- hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
phead = skb->data;
if (unlikely(!skb_pull(skb, hlen))) {
ipoib_warn(priv, "linear data too small\n");
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
- return;
+ return -1;
}
} else {
if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
@@ -558,14 +614,32 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
- return;
+ return -1;
}
phead = NULL;
hlen = 0;
}
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ }
- ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
- skb->len, address, qpn);
+ ipoib_dbg_data(priv,
+ "sending packet, length=%d address=%p dqpn=0x%06x\n",
+ skb->len, address, dqpn);
/*
* We put the skb into the tx_ring _before_ we call post_send()
@@ -579,115 +653,232 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
- return;
+ return -1;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
- priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
+ priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
else
- priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
-
- if (++priv->tx_outstanding == ipoib_sendq_size) {
+ priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
+ /* increase the tx_head after send success, but use it for queue state */
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
- ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
}
skb_orphan(skb);
skb_dst_drop(skb);
+ if (netif_queue_stopped(dev))
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS) < 0)
+ ipoib_warn(priv, "request notify on send CQ failed\n");
+
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
- address->ah, qpn, tx_req, phead, hlen);
+ address, dqpn, tx_req, phead, hlen);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
- --priv->tx_outstanding;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
+ rc = 0;
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
- address->last_send = priv->tx_head;
+ rc = priv->tx_head;
++priv->tx_head;
+ ++priv->global_tx_head;
}
-
- if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
- while (poll_tx(priv))
- ; /* nothing */
+ return rc;
}
-static void __ipoib_reap_ah(struct net_device *dev)
+static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_ah *ah, *tah;
- LIST_HEAD(remove_list);
unsigned long flags;
- netif_tx_lock_bh(dev);
+ netif_tx_lock_bh(priv->dev);
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
list_del(&ah->list);
- ib_destroy_ah(ah->ah);
+ rdma_destroy_ah(ah->ah, 0);
kfree(ah);
}
spin_unlock_irqrestore(&priv->lock, flags);
- netif_tx_unlock_bh(dev);
+ netif_tx_unlock_bh(priv->dev);
}
void ipoib_reap_ah(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
- struct net_device *dev = priv->dev;
- __ipoib_reap_ah(dev);
+ ipoib_reap_dead_ahs(priv);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
-static void ipoib_flush_ah(struct net_device *dev)
+static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ clear_bit(IPOIB_STOP_REAPER, &priv->flags);
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ round_jiffies_relative(HZ));
+}
+static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
+{
+ set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
- flush_workqueue(priv->wq);
- ipoib_reap_ah(&priv->ah_reap_task.work);
+ /*
+ * After ipoib_stop_ah_reaper() we always go through
+ * ipoib_reap_dead_ahs() which ensures the work is really stopped and
+ * does a final flush out of the dead_ah's list
+ */
}
-static void ipoib_stop_ah(struct net_device *dev)
+static int recvs_pending(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int pending = 0;
+ int i;
- set_bit(IPOIB_STOP_REAPER, &priv->flags);
- ipoib_flush_ah(dev);
+ for (i = 0; i < ipoib_recvq_size; ++i)
+ if (priv->rx_ring[i].skb)
+ ++pending;
+
+ return pending;
+}
+
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+ struct ib_qp *qp,
+ enum ib_qp_state new_state)
+{
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+ return;
+ }
+ /* print according to the new-state and the previous state.*/
+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+ else
+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+ new_state, qp_attr.qp_state);
}
-static void ipoib_ib_tx_timer_func(unsigned long ctx)
+static void ipoib_napi_enable(struct net_device *dev)
{
- drain_tx_cq((struct net_device *)ctx);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netdev_lock_ops_to_full(dev);
+ napi_enable_locked(&priv->recv_napi);
+ napi_enable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
}
-int ipoib_ib_dev_open(struct net_device *dev)
+static void ipoib_napi_disable(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- int ret;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
- ipoib_pkey_dev_check_presence(dev);
+ netdev_lock_ops_to_full(dev);
+ napi_disable_locked(&priv->recv_napi);
+ napi_disable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
+}
- if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
- ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
- (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
- return -1;
+int ipoib_ib_dev_stop_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr qp_attr;
+ unsigned long begin;
+ struct ipoib_tx_buf *tx_req;
+ int i;
+
+ if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_napi_disable(dev);
+
+ ipoib_cm_dev_stop(dev);
+
+ /*
+ * Move our QP to the error state and then reinitialize in
+ * when all work requests have completed or have been flushed.
+ */
+ qp_attr.qp_state = IB_QPS_ERR;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
+
+ /* Wait for all sends and receives to complete */
+ begin = jiffies;
+
+ while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
+ if (time_after(jiffies, begin + 5 * HZ)) {
+ ipoib_warn(priv,
+ "timing out; %d sends %d receives not completed\n",
+ priv->tx_head - priv->tx_tail,
+ recvs_pending(dev));
+
+ /*
+ * assume the HW is wedged and just free up
+ * all our pending work requests.
+ */
+ while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
+ tx_req = &priv->tx_ring[priv->tx_tail &
+ (ipoib_sendq_size - 1)];
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(tx_req->skb);
+ ++priv->tx_tail;
+ ++priv->global_tx_tail;
+ }
+
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ struct ipoib_rx_buf *rx_req;
+
+ rx_req = &priv->rx_ring[i];
+ if (!rx_req->skb)
+ continue;
+ ipoib_ud_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
+ dev_kfree_skb_any(rx_req->skb);
+ rx_req->skb = NULL;
+ }
+
+ goto timeout;
+ }
+
+ ipoib_drain_cq(dev);
+
+ usleep_range(1000, 2000);
}
+ ipoib_dbg(priv, "All sends and receives done.\n");
+
+timeout:
+ qp_attr.qp_state = IB_QPS_RESET;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ ipoib_warn(priv, "Failed to modify QP to RESET state\n");
+
+ ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
+
+ return 0;
+}
+
+int ipoib_ib_dev_open_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
ret = ipoib_init_qp(dev);
if (ret) {
ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
@@ -697,61 +888,95 @@ int ipoib_ib_dev_open(struct net_device *dev)
ret = ipoib_ib_post_receives(dev);
if (ret) {
ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
- goto dev_stop;
+ goto out;
}
ret = ipoib_cm_dev_open(dev);
if (ret) {
ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
- goto dev_stop;
+ goto out;
}
- clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(priv->wq, &priv->ah_reap_task,
- round_jiffies_relative(HZ));
+ if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_napi_enable(dev);
+
+ return 0;
+out:
+ return -1;
+}
+
+int ipoib_ib_dev_open(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_pkey_dev_check_presence(dev);
- if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_enable(&priv->napi);
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
+ ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
+ (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
+ return -1;
+ }
+
+ ipoib_start_ah_reaper(priv);
+ if (priv->rn_ops->ndo_open(dev)) {
+ pr_warn("%s: Failed to open dev\n", dev->name);
+ goto dev_stop;
+ }
+
+ set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
return 0;
+
dev_stop:
- if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev);
+ ipoib_stop_ah_reaper(priv);
return -1;
}
+void ipoib_ib_dev_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ priv->rn_ops->ndo_stop(dev);
+
+ clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ ipoib_stop_ah_reaper(priv);
+}
+
void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
if (!(priv->pkey & 0x7fff) ||
ib_find_pkey(priv->ca, priv->port, priv->pkey,
- &priv->pkey_index))
+ &priv->pkey_index)) {
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
- else
+ } else {
+ if (rn->set_id)
+ rn->set_id(dev, priv->pkey_index);
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ }
}
-int ipoib_ib_dev_up(struct net_device *dev)
+void ipoib_ib_dev_up(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_pkey_dev_check_presence(dev);
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
ipoib_dbg(priv, "PKEY is not assigned.\n");
- return 0;
+ return;
}
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
- return ipoib_mcast_start_thread(dev);
+ ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev)
+void ipoib_ib_dev_down(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "downing ib_dev\n");
@@ -762,26 +987,11 @@ int ipoib_ib_dev_down(struct net_device *dev)
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
-
- return 0;
-}
-
-static int recvs_pending(struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- int pending = 0;
- int i;
-
- for (i = 0; i < ipoib_recvq_size; ++i)
- if (priv->rx_ring[i].skb)
- ++pending;
-
- return pending;
}
void ipoib_drain_cq(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i, n;
/*
@@ -807,8 +1017,9 @@ void ipoib_drain_cq(struct net_device *dev)
ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
else
ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
- } else
- ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
}
} while (n == IPOIB_NUM_WC);
@@ -818,109 +1029,6 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_qp_attr qp_attr;
- unsigned long begin;
- struct ipoib_tx_buf *tx_req;
- int i;
-
- if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_disable(&priv->napi);
-
- ipoib_cm_dev_stop(dev);
-
- /*
- * Move our QP to the error state and then reinitialize in
- * when all work requests have completed or have been flushed.
- */
- qp_attr.qp_state = IB_QPS_ERR;
- if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
- ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
-
- /* Wait for all sends and receives to complete */
- begin = jiffies;
-
- while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
- if (time_after(jiffies, begin + 5 * HZ)) {
- ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
- priv->tx_head - priv->tx_tail, recvs_pending(dev));
-
- /*
- * assume the HW is wedged and just free up
- * all our pending work requests.
- */
- while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
- tx_req = &priv->tx_ring[priv->tx_tail &
- (ipoib_sendq_size - 1)];
- ipoib_dma_unmap_tx(priv, tx_req);
- dev_kfree_skb_any(tx_req->skb);
- ++priv->tx_tail;
- --priv->tx_outstanding;
- }
-
- for (i = 0; i < ipoib_recvq_size; ++i) {
- struct ipoib_rx_buf *rx_req;
-
- rx_req = &priv->rx_ring[i];
- if (!rx_req->skb)
- continue;
- ipoib_ud_dma_unmap_rx(priv,
- priv->rx_ring[i].mapping);
- dev_kfree_skb_any(rx_req->skb);
- rx_req->skb = NULL;
- }
-
- goto timeout;
- }
-
- ipoib_drain_cq(dev);
-
- msleep(1);
- }
-
- ipoib_dbg(priv, "All sends and receives done.\n");
-
-timeout:
- del_timer_sync(&priv->poll_timer);
- qp_attr.qp_state = IB_QPS_RESET;
- if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
- ipoib_warn(priv, "Failed to modify QP to RESET state\n");
-
- ipoib_flush_ah(dev);
-
- ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
-
- return 0;
-}
-
-int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
-
- priv->ca = ca;
- priv->port = port;
- priv->qp = NULL;
-
- if (ipoib_transport_dev_init(dev, ca)) {
- printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
- return -ENODEV;
- }
-
- setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
- (unsigned long) dev);
-
- if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev)) {
- ipoib_transport_dev_cleanup(dev);
- return -ENODEV;
- }
- }
-
- return 0;
-}
-
/*
* Takes whatever value which is in pkey index 0 and updates priv->pkey
* returns 0 if the pkey value was changed.
@@ -970,27 +1078,115 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
return 0;
}
-static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
- enum ipoib_flush_level level,
- int nesting)
+/*
+ * returns true if the device address of the ipoib interface has changed and the
+ * new address is a valid one (i.e in the gid table), return false otherwise.
+ */
+static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
{
- struct ipoib_dev_priv *cpriv;
- struct net_device *dev = priv->dev;
- int result;
+ union ib_gid search_gid;
+ union ib_gid gid0;
+ int err;
+ u16 index;
+ u32 port;
+ bool ret = false;
- down_read_nested(&priv->vlan_rwsem, nesting);
+ if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
+ return false;
- /*
- * Flush any child interfaces too -- they might be up even if
- * the parent is down.
+ netif_addr_lock_bh(priv->dev);
+
+ /* The subnet prefix may have changed, update it now so we won't have
+ * to do it later
*/
- list_for_each_entry(cpriv, &priv->child_intfs, list)
- __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
+ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix,
+ sizeof(gid0.global.subnet_prefix));
+ search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+
+ search_gid.global.interface_id = priv->local_gid.global.interface_id;
+
+ netif_addr_unlock_bh(priv->dev);
+
+ err = ib_find_gid(priv->ca, &search_gid, &port, &index);
+
+ netif_addr_lock_bh(priv->dev);
+
+ if (search_gid.global.interface_id !=
+ priv->local_gid.global.interface_id)
+ /* There was a change while we were looking up the gid, bail
+ * here and let the next work sort this out
+ */
+ goto out;
+
+ /* The next section of code needs some background:
+ * Per IB spec the port GUID can't change if the HCA is powered on.
+ * port GUID is the basis for GID at index 0 which is the basis for
+ * the default device address of a ipoib interface.
+ *
+ * so it seems the flow should be:
+ * if user_changed_dev_addr && gid in gid tbl
+ * set bit dev_addr_set
+ * return true
+ * else
+ * return false
+ *
+ * The issue is that there are devices that don't follow the spec,
+ * they change the port GUID when the HCA is powered, so in order
+ * not to break userspace applications, We need to check if the
+ * user wanted to control the device address and we assume that
+ * if he sets the device address back to be based on GID index 0,
+ * he no longer wishs to control it.
+ *
+ * If the user doesn't control the device address,
+ * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
+ * the port GUID has changed and GID at index 0 has changed
+ * so we need to change priv->local_gid and priv->dev->dev_addr
+ * to reflect the new GID.
+ */
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ if (!err && port == priv->port) {
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+ if (index == 0)
+ clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
+ &priv->flags);
+ else
+ set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
+ ret = true;
+ } else {
+ ret = false;
+ }
+ } else {
+ if (!err && port == priv->port) {
+ ret = true;
+ } else {
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
+ memcpy(&priv->local_gid, &gid0,
+ sizeof(priv->local_gid));
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0,
+ sizeof(priv->local_gid));
+ ret = true;
+ }
+ }
+ }
+
+out:
+ netif_addr_unlock_bh(priv->dev);
+
+ return ret;
+}
- up_read(&priv->vlan_rwsem);
+static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level)
+{
+ struct net_device *dev = priv->dev;
+ int result;
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
+ /* Make sure the dev_addr is set even if not flushing */
+ if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
@@ -1002,7 +1198,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
update_parent_pkey(priv);
else
update_child_pkey(priv);
- }
+ } else if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
@@ -1030,19 +1227,34 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level == IPOIB_FLUSH_LIGHT) {
+ int oper_up;
ipoib_mark_paths_invalid(dev);
+ /* Set IPoIB operation as down to prevent races between:
+ * the flush flow which leaves MCG and on the fly joins
+ * which can happen during that time. mcast restart task
+ * should deal with join requests we missed.
+ */
+ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_mcast_dev_flush(dev);
- ipoib_flush_ah(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_reap_dead_ahs(priv);
}
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
+ netdev_lock_ops(dev);
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
- if (ipoib_ib_dev_open(dev) != 0)
+
+ result = ipoib_ib_dev_open(dev);
+ netdev_unlock_ops(dev);
+
+ if (result)
return;
+
if (netif_queue_stopped(dev))
netif_start_queue(dev);
}
@@ -1054,7 +1266,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_up(dev);
- ipoib_mcast_restart_task(&priv->restart_task);
+ if (ipoib_dev_addr_changed_valid(priv))
+ ipoib_mcast_restart_task(&priv->restart_task);
}
}
@@ -1063,7 +1276,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1071,7 +1284,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1079,12 +1292,39 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ rtnl_lock();
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
+ rtnl_unlock();
+}
+
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level)
+{
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock(priv->dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_queue_work(cpriv, level);
+ netdev_unlock(priv->dev);
+ }
+
+ switch (level) {
+ case IPOIB_FLUSH_LIGHT:
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ break;
+ case IPOIB_FLUSH_NORMAL:
+ queue_work(ipoib_workqueue, &priv->flush_normal);
+ break;
+ case IPOIB_FLUSH_HEAVY:
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+ break;
+ }
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "cleaning up ib_dev\n");
/*
@@ -1102,9 +1342,15 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
* the neighbor garbage collection is stopped and reaped.
* That should all be done now, so make a final ah flush.
*/
- ipoib_stop_ah(dev);
+ ipoib_reap_dead_ahs(priv);
- ipoib_transport_dev_cleanup(dev);
-}
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+
+ priv->rn_ops->ndo_uninit(dev);
+ if (priv->pd) {
+ ib_dealloc_pd(priv->pd);
+ priv->pd = NULL;
+ }
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 36536ce5a3e2..5b4d76e97437 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -49,17 +49,14 @@
#include <linux/jhash.h>
#include <net/arp.h>
#include <net/addrconf.h>
+#include <net/netdev_lock.h>
+#include <net/pkt_sched.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
-#define DRV_VERSION "1.0.0"
-
-const char ipoib_driver_version[] = DRV_VERSION;
-
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
@@ -91,13 +88,16 @@ struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
-static void ipoib_add_one(struct ib_device *device);
+static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
- struct ib_device *dev, u8 port, u16 pkey,
+ struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
+static int ipoib_set_mac(struct net_device *dev, void *addr);
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd);
static struct ib_client ipoib_client = {
.name = "ipoib",
@@ -106,9 +106,82 @@ static struct ib_client ipoib_client = {
.get_net_dev_by_params = ipoib_get_net_dev_by_params,
};
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static int ipoib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_info *ni = ptr;
+ struct net_device *dev = ni->dev;
+
+ if (dev->netdev_ops->ndo_open != ipoib_open)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_CHANGENAME:
+ ipoib_delete_debug_files(dev);
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_UNREGISTER:
+ ipoib_delete_debug_files(dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
+struct ipoib_ifupdown_work {
+ struct work_struct work;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ bool up;
+};
+
+static void ipoib_ifupdown_task(struct work_struct *work)
+{
+ struct ipoib_ifupdown_work *pwork =
+ container_of(work, struct ipoib_ifupdown_work, work);
+ struct net_device *dev = pwork->dev;
+ unsigned int flags;
+
+ rtnl_lock();
+ flags = dev->flags;
+ if (pwork->up)
+ flags |= IFF_UP;
+ else
+ flags &= ~IFF_UP;
+
+ if (dev->flags != flags)
+ dev_change_flags(dev, flags, NULL);
+ rtnl_unlock();
+ netdev_put(dev, &pwork->dev_tracker);
+ kfree(pwork);
+}
+
+static void ipoib_schedule_ifupdown_task(struct net_device *dev, bool up)
+{
+ struct ipoib_ifupdown_work *work;
+
+ if ((up && (dev->flags & IFF_UP)) ||
+ (!up && !(dev->flags & IFF_UP)))
+ return;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return;
+ work->dev = dev;
+ netdev_hold(dev, &work->dev_tracker, GFP_KERNEL);
+ work->up = up;
+ INIT_WORK(&work->work, ipoib_ifupdown_task);
+ queue_work(ipoib_workqueue, &work->work);
+}
+
int ipoib_open(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "bringing up interface\n");
@@ -122,33 +195,27 @@ int ipoib_open(struct net_device *dev)
goto err_disable;
}
- if (ipoib_ib_dev_up(dev))
- goto err_stop;
+ ipoib_ib_dev_up(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (flags & IFF_UP)
- continue;
-
- dev_change_flags(cpriv->dev, flags | IFF_UP);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, true);
+ netdev_unlock_full_to_ops(dev);
+ } else if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags))
+ ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n",
+ ppriv->dev->name);
}
-
netif_start_queue(dev);
return 0;
-err_stop:
- ipoib_ib_dev_stop(dev);
-
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,7 +224,7 @@ err_disable:
static int ipoib_stop(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "stopping interface\n");
@@ -172,30 +239,18 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (!(flags & IFF_UP))
- continue;
-
- dev_change_flags(cpriv->dev, flags & ~IFF_UP);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, false);
+ netdev_unlock_full_to_ops(dev);
}
return 0;
}
-static void ipoib_uninit(struct net_device *dev)
-{
- ipoib_dev_cleanup(dev);
-}
-
static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
@@ -205,7 +260,8 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = 0;
/* dev->mtu > 2K ==> connected mode */
if (ipoib_cm_admin_enabled(dev)) {
@@ -216,18 +272,48 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
priv->mcast_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
- if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
+ if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
+ new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
- dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+ if (priv->mcast_mtu < priv->admin_mtu)
+ ipoib_dbg(priv, "MTU must be smaller than the underlying "
+ "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
- return 0;
+ new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
+
+ if (priv->rn_ops->ndo_change_mtu) {
+ bool carrier_status = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+
+ /* notify lower level on the real mtu */
+ ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
+
+ if (carrier_status)
+ netif_carrier_on(dev);
+ } else {
+ WRITE_ONCE(dev->mtu, new_mtu);
+ }
+
+ return ret;
+}
+
+static void ipoib_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->rn_ops->ndo_get_stats64)
+ priv->rn_ops->ndo_get_stats64(dev, stats);
+ else
+ netdev_stats_to_stats64(stats, &dev->stats);
}
/* Called with an RCU read lock taken */
@@ -264,33 +350,54 @@ static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
return false;
}
-/**
- * Find the master net_device on top of the given net_device.
+/*
+ * Find the L2 master net_device on top of the given net_device.
* @dev: base IPoIB net_device
*
- * Returns the master net_device with a reference held, or the same net_device
- * if no master exists.
+ * Returns the L2 master net_device with reference held if the L2 master
+ * exists (such as bond netdevice), or returns same netdev with reference
+ * held when master does not exist or when L3 master (such as VRF netdev).
*/
static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
{
struct net_device *master;
rcu_read_lock();
+
master = netdev_master_upper_dev_get_rcu(dev);
- if (master)
- dev_hold(master);
+ if (!master || netif_is_l3_master(master))
+ master = dev;
+
+ dev_hold(master);
rcu_read_unlock();
- if (master)
- return master;
+ return master;
+}
- dev_hold(dev);
- return dev;
+struct ipoib_walk_data {
+ const struct sockaddr *addr;
+ struct net_device *result;
+};
+
+static int ipoib_upper_walk(struct net_device *upper,
+ struct netdev_nested_priv *priv)
+{
+ struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data;
+ int ret = 0;
+
+ if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
+ dev_hold(upper);
+ data->result = upper;
+ ret = 1;
+ }
+
+ return ret;
}
/**
- * Find a net_device matching the given address, which is an upper device of
- * the given net_device.
+ * ipoib_get_net_dev_match_addr - Find a net_device matching
+ * the given address, which is an upper device of the given net_device.
+ *
* @addr: IP address to look for.
* @dev: base IPoIB net_device
*
@@ -300,27 +407,23 @@ static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
static struct net_device *ipoib_get_net_dev_match_addr(
const struct sockaddr *addr, struct net_device *dev)
{
- struct net_device *upper,
- *result = NULL;
- struct list_head *iter;
+ struct netdev_nested_priv priv;
+ struct ipoib_walk_data data = {
+ .addr = addr,
+ };
+ priv.data = (void *)&data;
rcu_read_lock();
if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
dev_hold(dev);
- result = dev;
+ data.result = dev;
goto out;
}
- netdev_for_each_all_upper_dev_rcu(dev, upper, iter) {
- if (ipoib_is_dev_match_addr_rcu(addr, upper)) {
- dev_hold(upper);
- result = upper;
- break;
- }
- }
+ netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv);
out:
rcu_read_unlock();
- return result;
+ return data.result;
}
/* returns the number of IPoIB netdevs on top a given ipoib device matching a
@@ -357,17 +460,20 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
}
}
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ return matches;
+
/* Check child interfaces */
- down_read_nested(&priv->vlan_rwsem, nesting);
+ netdev_lock(priv->dev);
list_for_each_entry(child_priv, &priv->child_intfs, list) {
matches += ipoib_match_gid_pkey_addr(child_priv, gid,
- pkey_index, addr,
- nesting + 1,
- found_net_dev);
+ pkey_index, addr,
+ nesting + 1,
+ found_net_dev);
if (matches > 1)
break;
}
- up_read(&priv->vlan_rwsem);
+ netdev_unlock(priv->dev);
return matches;
}
@@ -375,7 +481,7 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
/* Returns the number of matching net_devs found (between 0 and 2). Also
* return the matching net_device in the @net_dev parameter, holding a
* reference to the net_device, if the number of matches >= 1 */
-static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
+static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port,
u16 pkey_index,
const union ib_gid *gid,
const struct sockaddr *addr,
@@ -400,7 +506,7 @@ static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
}
static struct net_device *ipoib_get_net_dev_by_params(
- struct ib_device *dev, u8 port, u16 pkey,
+ struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data)
{
@@ -417,10 +523,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- if (!dev_list)
- return NULL;
-
- /* See if we can find a unique device matching the L2 parameters */
+ /* See if we can find a unique device matching the pkey and GID */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -433,7 +536,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
dev_put(net_dev);
- /* Couldn't find a unique device with L2 parameters only. Use L3
+ /* Couldn't find a unique device with pkey and GID only. Use L3
* address to uniquely match the net device */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, addr, &net_dev);
@@ -443,7 +546,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
default:
dev_warn_ratelimited(&dev->dev,
"duplicate IP address detected\n");
- /* Fall through */
+ fallthrough;
case 1:
return net_dev;
}
@@ -451,39 +554,50 @@ static struct net_device *ipoib_get_net_dev_by_params(
int ipoib_set_mode(struct net_device *dev, const char *buf)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "connected\n")) ||
+ (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "datagram\n"))) {
+ return 0;
+ }
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_real_num_tx_queues(dev, 1);
+ netdev_unlock_ops(dev);
rtnl_unlock();
- priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+ priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
ipoib_flush_paths(dev);
- rtnl_lock();
- return 0;
+ return (!rtnl_trylock()) ? -EBUSY : 0;
}
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
+ netdev_unlock_ops(dev);
rtnl_unlock();
ipoib_flush_paths(dev);
- rtnl_lock();
- return 0;
+ return (!rtnl_trylock()) ? -EBUSY : 0;
}
return -EINVAL;
}
-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node *n = priv->path_tree.rb_node;
struct ipoib_path *path;
int ret;
@@ -507,7 +621,7 @@ static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
static int __path_add(struct net_device *dev, struct ipoib_path *path)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node **n = &priv->path_tree.rb_node;
struct rb_node *pn = NULL;
struct ipoib_path *tpath;
@@ -542,7 +656,7 @@ static void path_free(struct net_device *dev, struct ipoib_path *path)
while ((skb = __skb_dequeue(&path->queue)))
dev_kfree_skb_irq(skb);
- ipoib_dbg(netdev_priv(dev), "path_free\n");
+ ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
/* remove all neigh connected to this path */
ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
@@ -559,7 +673,7 @@ struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
{
struct ipoib_path_iter *iter;
- iter = kmalloc(sizeof *iter, GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
@@ -576,7 +690,7 @@ struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
int ipoib_path_iter_next(struct ipoib_path_iter *iter)
{
- struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
struct rb_node *n;
struct ipoib_path *path;
int ret = 1;
@@ -613,24 +727,33 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
void ipoib_mark_paths_invalid(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_path *path, *tp;
spin_lock_irq(&priv->lock);
list_for_each_entry_safe(path, tp, &priv->path_list, list) {
- ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
- be16_to_cpu(path->pathrec.dlid),
- path->pathrec.dgid.raw);
- path->valid = 0;
+ ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
+ path->pathrec.dgid.raw);
+ if (path->ah)
+ path->ah->valid = 0;
}
spin_unlock_irq(&priv->lock);
}
+static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
+{
+ struct ipoib_pseudo_header *phdr;
+
+ phdr = skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+}
+
void ipoib_flush_paths(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_path *path, *tp;
LIST_HEAD(remove_list);
unsigned long flags;
@@ -659,12 +782,12 @@ void ipoib_flush_paths(struct net_device *dev)
}
static void path_rec_completion(int status,
- struct ib_sa_path_rec *pathrec,
- void *path_ptr)
+ struct sa_path_rec *pathrec,
+ unsigned int num_prs, void *path_ptr)
{
struct ipoib_path *path = path_ptr;
struct net_device *dev = path->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_ah *ah = NULL;
struct ipoib_ah *old_ah = NULL;
struct ipoib_neigh *neigh, *tn;
@@ -674,7 +797,8 @@ static void path_rec_completion(int status,
if (!status)
ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
- be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
+ be32_to_cpu(sa_path_get_dlid(pathrec)),
+ pathrec->dgid.raw);
else
ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
status, path->pathrec.dgid.raw);
@@ -682,22 +806,42 @@ static void path_rec_completion(int status,
skb_queue_head_init(&skqueue);
if (!status) {
- struct ib_ah_attr av;
+ struct rdma_ah_attr av;
- if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
+ if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
+ pathrec, &av, NULL)) {
ah = ipoib_create_ah(dev, priv->pd, &av);
+ rdma_destroy_ah_attr(&av);
+ }
}
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
path->pathrec = *pathrec;
old_ah = path->ah;
path->ah = ah;
ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
- ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
+ ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
+ pathrec->sl);
while ((skb = __skb_dequeue(&path->queue)))
__skb_queue_tail(&skqueue, skb);
@@ -731,7 +875,7 @@ static void path_rec_completion(int status,
while ((skb = __skb_dequeue(&neigh->queue)))
__skb_queue_tail(&skqueue, skb);
}
- path->valid = 1;
+ path->ah->valid = 1;
}
path->query = NULL;
@@ -746,36 +890,49 @@ static void path_rec_completion(int status,
ipoib_put_ah(old_ah);
while ((skb = __skb_dequeue(&skqueue))) {
+ int ret;
skb->dev = dev;
- if (dev_queue_xmit(skb))
- ipoib_warn(priv, "dev_queue_xmit failed "
- "to requeue packet\n");
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
}
}
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
+ void *gid)
+{
+ path->dev = priv->dev;
+
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
+
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.numb_path = 1;
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+}
+
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_path *path;
if (!priv->broadcast)
return NULL;
- path = kzalloc(sizeof *path, GFP_ATOMIC);
+ path = kzalloc(sizeof(*path), GFP_ATOMIC);
if (!path)
return NULL;
- path->dev = dev;
-
skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list);
- memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
- path->pathrec.sgid = priv->local_gid;
- path->pathrec.pkey = cpu_to_be16(priv->pkey);
- path->pathrec.numb_path = 1;
- path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ init_path_rec(priv, path, gid);
return path;
}
@@ -783,7 +940,7 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
static int path_rec_start(struct net_device *dev,
struct ipoib_path *path)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "Start path record lookup for %pI6\n",
path->pathrec.dgid.raw);
@@ -811,10 +968,29 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- struct net_device *dev)
+static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
+ struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ path = __path_find(dev, daddr + 4);
+ if (!path)
+ goto out;
+ if (!path->query)
+ path_rec_start(dev, path);
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
unsigned long flags;
@@ -825,7 +1001,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- return;
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
}
path = __path_find(dev, daddr + 4);
@@ -839,7 +1023,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
list_add_tail(&neigh->list, &path->neigh_list);
- if (path->ah) {
+ if (path->ah && path->ah->valid) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
@@ -850,33 +1034,38 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, neigh->daddr);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
}
} else {
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
- return;
+ return NULL;
}
} else {
neigh->ah = NULL;
if (!path->query && path_rec_start(dev, path))
goto err_path;
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, neigh->daddr);
__skb_queue_tail(&neigh->queue, skb);
- else
+ } else {
goto err_drop;
+ }
}
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
- return;
+ return NULL;
err_path:
ipoib_neigh_free(neigh);
@@ -886,78 +1075,79 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
+
+ return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_path *path;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
- if (!path || !path->valid) {
- int new_path = 0;
+ /* no broadcast means that all paths are (going to be) not valid */
+ if (!priv->broadcast)
+ goto drop_and_unlock;
+ path = __path_find(dev, phdr->hwaddr + 4);
+ if (!path || !path->ah || !path->ah->valid) {
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
- new_path = 1;
+ path = path_rec_create(dev, phdr->hwaddr + 4);
+ if (!path)
+ goto drop_and_unlock;
+ __path_add(dev, path);
+ } else {
+ /*
+ * make sure there are no changes in the existing
+ * path record
+ */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+ }
+ if (!path->query && path_rec_start(dev, path)) {
+ goto drop_and_unlock;
}
- if (path) {
- if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- __skb_queue_tail(&path->queue, skb);
- } else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- }
- if (!path->query && path_rec_start(dev, path)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- if (new_path)
- path_free(dev, path);
- return;
- } else
- __path_add(dev, path);
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
+ __skb_queue_tail(&path->queue, skb);
+ goto unlock;
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
}
- if (path->ah) {
- ipoib_dbg(priv, "Send unicast ARP to %04x\n",
- be16_to_cpu(path->pathrec.dlid));
-
- spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
- return;
- } else if ((path->query || !path_rec_start(dev, path)) &&
- skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- __skb_queue_tail(&path->queue, skb);
- } else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_dbg(priv, "Send unicast ARP to %08x\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
+ return;
+drop_and_unlock:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+unlock:
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -970,13 +1160,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -985,16 +1175,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
- return NETDEV_TX_OK;
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1010,12 +1201,16 @@ send_using_neigh:
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto unref;
}
- } else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ } else if (neigh->ah && neigh->ah->valid) {
+ neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
goto unref;
+ } else if (neigh->ah) {
+ neigh_refresh_path(neigh, phdr->hwaddr, dev);
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1030,44 +1225,81 @@ unref:
return NETDEV_TX_OK;
}
-static void ipoib_timeout(struct net_device *dev)
+static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ if (rn->tx_timeout) {
+ rn->tx_timeout(dev, txqueue);
+ return;
+ }
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
- jiffies_to_msecs(jiffies - dev->trans_start));
- ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
- netif_queue_stopped(dev),
- priv->tx_head, priv->tx_tail);
- /* XXX reset QP, etc. */
+ jiffies_to_msecs(jiffies - dev_trans_start(dev)));
+ ipoib_warn(priv,
+ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+ priv->global_tx_head, priv->global_tx_tail);
+
+
+ schedule_work(&priv->tx_timeout_work);
+}
+
+void ipoib_ib_tx_timeout_work(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work,
+ struct ipoib_dev_priv,
+ tx_timeout_work);
+ int err;
+
+ rtnl_lock();
+ netdev_lock_ops(priv->dev);
+
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ goto unlock;
+
+ ipoib_stop(priv->dev);
+ err = ipoib_open(priv->dev);
+ if (err) {
+ ipoib_warn(priv, "ipoib_open failed recovering from a tx_timeout, err(%d).\n",
+ err);
+ goto unlock;
+ }
+
+ netif_tx_wake_all_queues(priv->dev);
+unlock:
+ netdev_unlock_ops(priv->dev);
+ rtnl_unlock();
+
}
static int ipoib_hard_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr,
+ const void *saddr,
+ unsigned int len)
{
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
- header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+ header = skb_push(skb, sizeof(*header));
header->proto = htons(type);
header->reserved = 0;
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ push_pseudo_header(skb, daddr);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
@@ -1079,14 +1311,14 @@ static void ipoib_set_mcast_list(struct net_device *dev)
static int ipoib_get_iflink(const struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
/* parent interface */
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
/* child/vlan interface */
- return priv->parent->ifindex;
+ return READ_ONCE(priv->parent->ifindex);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -1107,7 +1339,7 @@ static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
struct ipoib_neigh *neigh = NULL;
@@ -1126,12 +1358,14 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = rcu_dereference_bh(neigh->hnext)) {
if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
/* found, take one ref on behalf of the caller */
- if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ if (!refcount_inc_not_zero(&neigh->refcnt)) {
/* deleted */
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
@@ -1149,9 +1383,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
unsigned long dt;
unsigned long flags;
int i;
-
- if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- return;
+ LIST_HEAD(remove_list);
spin_lock_irqsave(&priv->lock, flags);
@@ -1164,9 +1396,6 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
/* neigh is obsolete if it was idle for two GC periods */
dt = 2 * arp_tbl.gc_interval;
neigh_obsolete = jiffies - dt;
- /* handle possible race condition */
- if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- goto out_unlock;
for (i = 0; i < htbl->size; i++) {
struct ipoib_neigh *neigh;
@@ -1176,11 +1405,14 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
lockdep_is_held(&priv->lock))) != NULL) {
/* was the neigh idle for two GC periods */
if (time_after(neigh_obsolete, neigh->alive)) {
+
+ ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
+
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@@ -1191,6 +1423,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_mcast_remove_list(&remove_list);
}
static void ipoib_reap_neigh(struct work_struct *work)
@@ -1200,9 +1433,8 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
- if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(priv->wq, &priv->neigh_reap_task,
- arp_tbl.gc_interval);
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
}
@@ -1211,7 +1443,7 @@ static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
{
struct ipoib_neigh *neigh;
- neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
+ neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
if (!neigh)
return NULL;
@@ -1221,7 +1453,7 @@ static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
INIT_LIST_HEAD(&neigh->list);
ipoib_cm_set(neigh, NULL);
/* one ref on behalf of the caller */
- atomic_set(&neigh->refcnt, 1);
+ refcount_set(&neigh->refcnt, 1);
return neigh;
}
@@ -1229,7 +1461,7 @@ static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
struct ipoib_neigh *neigh;
@@ -1253,7 +1485,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
lockdep_is_held(&priv->lock))) {
if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
/* found, take one ref on behalf of the caller */
- if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ if (!refcount_inc_not_zero(&neigh->refcnt)) {
/* deleted */
neigh = NULL;
break;
@@ -1268,7 +1500,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
goto out_unlock;
/* one ref on behalf of the hash table */
- atomic_inc(&neigh->refcnt);
+ refcount_inc(&neigh->refcnt);
neigh->alive = jiffies;
/* put in hash */
rcu_assign_pointer(neigh->hnext,
@@ -1286,7 +1518,7 @@ void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
{
/* neigh reference count was dropprd to zero */
struct net_device *dev = neigh->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct sk_buff *skb;
if (neigh->ah)
ipoib_put_ah(neigh->ah);
@@ -1296,7 +1528,7 @@ void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
}
if (ipoib_cm_get(neigh))
ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
- ipoib_dbg(netdev_priv(dev),
+ ipoib_dbg(ipoib_priv(dev),
"neigh free for %06x %pI6\n",
IPOIB_QPN(neigh->daddr),
neigh->daddr + 4);
@@ -1318,7 +1550,7 @@ static void ipoib_neigh_reclaim(struct rcu_head *rp)
void ipoib_neigh_free(struct ipoib_neigh *neigh)
{
struct net_device *dev = neigh->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
struct ipoib_neigh __rcu **np;
@@ -1343,7 +1575,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return;
} else {
@@ -1364,9 +1596,8 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
if (!htbl)
return -ENOMEM;
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
size = roundup_pow_of_two(arp_tbl.gc_thresh3);
- buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
+ buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
if (!buckets) {
kfree(htbl);
return -ENOMEM;
@@ -1379,7 +1610,6 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
atomic_set(&ntbl->entries, 0);
/* start garbage collection */
- clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
@@ -1394,14 +1624,14 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct ipoib_neigh __rcu **buckets = htbl->buckets;
struct ipoib_neigh_table *ntbl = htbl->ntbl;
- kfree(buckets);
+ kvfree(buckets);
kfree(htbl);
complete(&ntbl->deleted);
}
void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
unsigned long flags;
@@ -1428,7 +1658,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@@ -1448,6 +1678,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
int i, wait_flushed = 0;
init_completion(&priv->ntbl.flushed);
+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
spin_lock_irqsave(&priv->lock, flags);
@@ -1470,7 +1701,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
}
}
@@ -1487,110 +1718,442 @@ out_unlock:
static void ipoib_neigh_hash_uninit(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- int stopped;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
- ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
+ ipoib_dbg(priv, "%s\n", __func__);
init_completion(&priv->ntbl.deleted);
- set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
- /* Stop GC if called at init fail need to cancel work */
- stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- if (!stopped)
- cancel_delayed_work(&priv->neigh_reap_task);
+ cancel_delayed_work_sync(&priv->neigh_reap_task);
ipoib_flush_neighs(priv);
wait_for_completion(&priv->ntbl.deleted);
}
+static void ipoib_napi_add(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
+ IPOIB_NUM_WC);
+ netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
+ MAX_SEND_CQE);
+}
-int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
+static void ipoib_napi_del(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_del(&priv->recv_napi);
+ netif_napi_del(&priv->send_napi);
+}
+
+static void ipoib_dev_uninit_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_transport_dev_cleanup(dev);
+
+ ipoib_napi_del(dev);
+
+ ipoib_cm_dev_cleanup(dev);
+
+ kfree(priv->rx_ring);
+ vfree(priv->tx_ring);
+
+ priv->rx_ring = NULL;
+ priv->tx_ring = NULL;
+}
+
+static int ipoib_dev_init_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ u8 addr_mod[3];
+
+ ipoib_napi_add(dev);
/* Allocate RX/TX "rings" to hold queued skbs */
- priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
- GFP_KERNEL);
- if (!priv->rx_ring) {
- printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
- ca->name, ipoib_recvq_size);
+ priv->rx_ring = kcalloc(ipoib_recvq_size,
+ sizeof(*priv->rx_ring),
+ GFP_KERNEL);
+ if (!priv->rx_ring)
goto out;
- }
- priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
+ priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
+ sizeof(*priv->tx_ring)));
if (!priv->tx_ring) {
- printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
- ca->name, ipoib_sendq_size);
+ pr_warn("%s: failed to allocate TX ring (%d entries)\n",
+ priv->ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
- /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
+ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
- if (ipoib_ib_dev_init(dev, ca, port))
+ if (ipoib_transport_dev_init(dev, priv->ca)) {
+ pr_warn("%s: ipoib_transport_dev_init failed\n",
+ priv->ca->name);
goto out_tx_ring_cleanup;
+ }
+
+ /* after qp created set dev address */
+ addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff;
+ addr_mod[1] = (priv->qp->qp_num >> 8) & 0xff;
+ addr_mod[2] = (priv->qp->qp_num) & 0xff;
+ dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod));
+
+ return 0;
+
+out_tx_ring_cleanup:
+ vfree(priv->tx_ring);
+
+out_rx_ring_cleanup:
+ kfree(priv->rx_ring);
+
+out:
+ ipoib_napi_del(dev);
+ return -ENOMEM;
+}
+
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->rn_ops->ndo_eth_ioctl)
+ return -EOPNOTSUPP;
+
+ return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
+}
+
+static int ipoib_dev_init(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = -ENOMEM;
+
+ priv->qp = NULL;
/*
- * Must be after ipoib_ib_dev_init so we can allocate a per
- * device wq there and use it here
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
*/
- if (ipoib_neigh_hash_init(priv) < 0)
+ priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
+ if (!priv->wq) {
+ pr_warn("%s: failed to allocate device WQ\n", dev->name);
+ goto out;
+ }
+
+ /* create pd, which used both for control and datapath*/
+ priv->pd = ib_alloc_pd(priv->ca, 0);
+ if (IS_ERR(priv->pd)) {
+ pr_warn("%s: failed to allocate PD\n", priv->ca->name);
+ goto clean_wq;
+ }
+
+ ret = priv->rn_ops->ndo_init(dev);
+ if (ret) {
+ pr_warn("%s failed to init HW resource\n", dev->name);
+ goto out_free_pd;
+ }
+
+ ret = ipoib_neigh_hash_init(priv);
+ if (ret) {
+ pr_warn("%s failed to init neigh hash\n", dev->name);
goto out_dev_uninit;
+ }
+
+ if (dev->flags & IFF_UP) {
+ if (ipoib_ib_dev_open(dev)) {
+ pr_warn("%s failed to open device\n", dev->name);
+ ret = -ENODEV;
+ goto out_hash_uninit;
+ }
+ }
return 0;
+out_hash_uninit:
+ ipoib_neigh_hash_uninit(dev);
+
out_dev_uninit:
ipoib_ib_dev_cleanup(dev);
-out_tx_ring_cleanup:
- vfree(priv->tx_ring);
+out_free_pd:
+ if (priv->pd) {
+ ib_dealloc_pd(priv->pd);
+ priv->pd = NULL;
+ }
-out_rx_ring_cleanup:
- kfree(priv->rx_ring);
+clean_wq:
+ if (priv->wq) {
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
out:
- return -ENOMEM;
+ return ret;
+}
+
+/*
+ * This must be called before doing an unregister_netdev on a parent device to
+ * shutdown the IB event handler.
+ */
+static void ipoib_parent_unregister_pre(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ /*
+ * ipoib_set_mac checks netif_running before pushing work, clearing
+ * running ensures the it will not add more work.
+ */
+ rtnl_lock();
+ dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
+ rtnl_unlock();
+
+ /* ipoib_event() cannot be running once this returns */
+ ib_unregister_event_handler(&priv->event_handler);
+
+ /*
+ * Work on the queue grabs the rtnl lock, so this cannot be done while
+ * also holding it.
+ */
+ flush_workqueue(ipoib_workqueue);
+}
+
+static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
+{
+ priv->hca_caps = priv->ca->attrs.device_cap_flags;
+ priv->kernel_caps = priv->ca->attrs.kernel_cap_flags;
+
+ if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+ priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ if (priv->kernel_caps & IBK_UD_TSO)
+ priv->dev->hw_features |= NETIF_F_TSO;
+
+ priv->dev->features |= priv->dev->hw_features;
+ }
}
-void ipoib_dev_cleanup(struct net_device *dev)
+static int ipoib_parent_init(struct net_device *ndev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
- LIST_HEAD(head);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ib_port_attr attr;
+ int result;
+
+ result = ib_query_port(priv->ca, priv->port, &attr);
+ if (result) {
+ pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
+ priv->port);
+ return result;
+ }
+ priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
+
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+
+ result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
+ if (result) {
+ pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+ dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid));
+
+ SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_port = priv->port - 1;
+ /* Let's set this one too for backwards compatibility. */
+ priv->dev->dev_id = priv->port - 1;
+
+ return 0;
+}
- ASSERT_RTNL();
+static void ipoib_child_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
+ set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ if (memchr_inv(priv->dev->dev_addr, 0, INFINIBAND_ALEN))
+ memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
+ sizeof(priv->local_gid));
+ else {
+ __dev_addr_set(priv->dev, ppriv->dev->dev_addr,
+ INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid,
+ sizeof(priv->local_gid));
+ }
+}
- ipoib_delete_debug_files(dev);
+static int ipoib_ndo_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ int rc;
+ struct rdma_netdev *rn = netdev_priv(ndev);
- /* Delete any child interfaces first */
- list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
- /* Stop GC on child */
- set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
- cancel_delayed_work(&cpriv->neigh_reap_task);
- unregister_netdevice_queue(cpriv->dev, &head);
+ if (priv->parent) {
+ ipoib_child_init(ndev);
+ } else {
+ rc = ipoib_parent_init(ndev);
+ if (rc)
+ return rc;
}
- unregister_netdevice_many(&head);
+
+ /* MTU will be reset when mcast join happens */
+ ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ rn->mtu = priv->mcast_mtu;
+ ndev->max_mtu = IPOIB_CM_MTU;
+
+ ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
/*
- * Must be before ipoib_ib_dev_cleanup or we delete an in use
- * work queue
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
*/
+ priv->pkey |= 0x8000;
+
+ ndev->broadcast[8] = priv->pkey >> 8;
+ ndev->broadcast[9] = priv->pkey & 0xff;
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ ipoib_set_dev_features(priv);
+
+ rc = ipoib_dev_init(ndev);
+ if (rc) {
+ pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
+ priv->ca->name, priv->dev->name, priv->port, rc);
+ return rc;
+ }
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ dev_hold(priv->parent);
+
+ netdev_lock(priv->parent);
+ list_add_tail(&priv->list, &ppriv->child_intfs);
+ netdev_unlock(priv->parent);
+ }
+
+ return 0;
+}
+
+static void ipoib_ndo_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ /*
+ * ipoib_remove_one guarantees the children are removed before the
+ * parent, and that is the only place where a parent can be removed.
+ */
+ WARN_ON(!list_empty(&priv->child_intfs));
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ netdev_lock(ppriv->dev);
+ list_del(&priv->list);
+ netdev_unlock(ppriv->dev);
+ }
+
ipoib_neigh_hash_uninit(dev);
ipoib_ib_dev_cleanup(dev);
- kfree(priv->rx_ring);
- vfree(priv->tx_ring);
+ /* no more works over the priv->wq */
+ if (priv->wq) {
+ /* See ipoib_mcast_carrier_on_task() */
+ WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
- priv->rx_ring = NULL;
- priv->tx_ring = NULL;
+ dev_put(priv->parent);
+}
+
+static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
+}
+
+static int ipoib_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int err;
+
+ err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
+ if (err)
+ return err;
+
+ ivf->vf = vf;
+ memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
+
+ return 0;
+}
+
+static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
+ return -EINVAL;
+
+ return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
+}
+
+static int ipoib_get_vf_guid(struct net_device *dev, int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
+}
+
+static int ipoib_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
}
static const struct header_ops ipoib_header_ops = {
.create = ipoib_hard_header,
};
-static const struct net_device_ops ipoib_netdev_ops = {
- .ndo_uninit = ipoib_uninit,
+static const struct net_device_ops ipoib_netdev_ops_pf = {
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
+ .ndo_open = ipoib_open,
+ .ndo_stop = ipoib_stop,
+ .ndo_change_mtu = ipoib_change_mtu,
+ .ndo_fix_features = ipoib_fix_features,
+ .ndo_start_xmit = ipoib_start_xmit,
+ .ndo_tx_timeout = ipoib_timeout,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
+ .ndo_set_vf_link_state = ipoib_set_vf_link_state,
+ .ndo_get_vf_config = ipoib_get_vf_config,
+ .ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_get_vf_guid = ipoib_get_vf_guid,
+ .ndo_set_vf_guid = ipoib_set_vf_guid,
+ .ndo_set_mac_address = ipoib_set_mac,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_eth_ioctl = ipoib_ioctl,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_vf = {
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
.ndo_change_mtu = ipoib_change_mtu,
@@ -1599,38 +2162,53 @@ static const struct net_device_ops ipoib_netdev_ops = {
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_eth_ioctl = ipoib_ioctl,
};
-void ipoib_setup(struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+static const struct net_device_ops ipoib_netdev_default_pf = {
+ .ndo_init = ipoib_dev_init_default,
+ .ndo_uninit = ipoib_dev_uninit_default,
+ .ndo_open = ipoib_ib_dev_open_default,
+ .ndo_stop = ipoib_ib_dev_stop_default,
+};
- dev->netdev_ops = &ipoib_netdev_ops;
+void ipoib_setup_common(struct net_device *dev)
+{
dev->header_ops = &ipoib_header_ops;
+ dev->netdev_ops = &ipoib_netdev_default_pf;
ipoib_set_ethtool_ops(dev);
- netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
-
- dev->watchdog_timeo = HZ;
+ dev->watchdog_timeo = 10 * HZ;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
- dev->tx_queue_len = ipoib_sendq_size * 2;
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
dev->features = (NETIF_F_VLAN_CHALLENGED |
NETIF_F_HIGHDMA);
netif_keep_dst(dev);
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
- priv->dev = dev;
+ /*
+ * unregister_netdev always frees the netdev, we use this mode
+ * consistently to unify all the various unregister paths, including
+ * those connected to rtnl_link_ops which require it.
+ */
+ dev->needs_free_netdev = true;
+}
- spin_lock_init(&priv->lock);
+static void ipoib_build_priv(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
- init_rwsem(&priv->vlan_rwsem);
+ priv->dev = dev;
+ spin_lock_init(&priv->lock);
+ mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
@@ -1639,46 +2217,160 @@ void ipoib_setup(struct net_device *dev)
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
+ INIT_WORK(&priv->reschedule_napi_work, ipoib_napi_schedule_work);
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_WORK(&priv->tx_timeout_work, ipoib_ib_tx_timeout_work);
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
-struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
+static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port,
+ const char *name)
{
struct net_device *dev;
- dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
- NET_NAME_UNKNOWN, ipoib_setup);
+ dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common);
+ if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
+ return dev;
+
+ dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
+ ipoib_setup_common);
if (!dev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+ return dev;
+}
+
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
+ struct net_device *dev)
+{
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_dev_priv *priv;
+ int rc;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ca = hca;
+ priv->port = port;
+
+ rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common, dev);
+ if (rc) {
+ if (rc != -EOPNOTSUPP)
+ goto out;
+
+ rn->send = ipoib_send;
+ rn->attach_mcast = ipoib_mcast_attach;
+ rn->detach_mcast = ipoib_mcast_detach;
+ rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
+ }
+
+ priv->rn_ops = dev->netdev_ops;
+
+ if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION)
+ dev->netdev_ops = &ipoib_netdev_ops_vf;
+ else
+ dev->netdev_ops = &ipoib_netdev_ops_pf;
+
+ rn->clnt_priv = priv;
+ /*
+ * Only the child register_netdev flows can handle priv_destructor
+ * being set, so we force it to NULL here and handle manually until it
+ * is safe to turn on.
+ */
+ priv->next_priv_destructor = dev->priv_destructor;
+ dev->priv_destructor = NULL;
+
+ ipoib_build_priv(dev);
+
+ return 0;
+
+out:
+ kfree(priv);
+ return rc;
+}
+
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
+ const char *name)
+{
+ struct net_device *dev;
+ int rc;
+
+ dev = ipoib_alloc_netdev(hca, port, name);
+ if (IS_ERR(dev))
+ return dev;
- return netdev_priv(dev);
+ rc = ipoib_intf_init(hca, port, name, dev);
+ if (rc) {
+ free_netdev(dev);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * Upon success the caller must ensure ipoib_intf_free is called or
+ * register_netdevice succeed'd and priv_destructor is set to
+ * ipoib_intf_free.
+ */
+ return dev;
}
-static ssize_t show_pkey(struct device *dev,
- struct device_attribute *attr, char *buf)
+void ipoib_intf_free(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ dev->priv_destructor = priv->next_priv_destructor;
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+
+ /*
+ * There are some error flows around register_netdev failing that may
+ * attempt to call priv_destructor twice, prevent that from happening.
+ */
+ dev->priv_destructor = NULL;
- return sprintf(buf, "0x%04x\n", priv->pkey);
+ /* unregister/destroy is very complicated. Make bugs more obvious. */
+ rn->clnt_priv = NULL;
+
+ kfree(priv);
}
-static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
-static ssize_t show_umcast(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ return sysfs_emit(buf, "0x%04x\n", priv->pkey);
+}
+static DEVICE_ATTR_RO(pkey);
+
+static ssize_t umcast_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
+ struct net_device *ndev = to_net_dev(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
- return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
+ return sysfs_emit(buf, "%d\n",
+ test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
}
void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
{
- struct ipoib_dev_priv *priv = netdev_priv(ndev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
if (umcast_val > 0) {
set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
@@ -1688,9 +2380,8 @@ void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
}
-static ssize_t set_umcast(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t umcast_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
@@ -1698,16 +2389,88 @@ static ssize_t set_umcast(struct device *dev,
return count;
}
-static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
+static DEVICE_ATTR_RW(umcast);
int ipoib_add_umcast_attr(struct net_device *dev)
{
return device_create_file(&dev->dev, &dev_attr_umcast);
}
-static ssize_t create_child(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *netdev = priv->dev;
+
+ netif_addr_lock_bh(netdev);
+
+ memcpy(&priv->local_gid.global.interface_id,
+ &gid->global.interface_id,
+ sizeof(gid->global.interface_id));
+ dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid));
+ clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ netif_addr_unlock_bh(netdev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ netdev_lock_ops_to_full(priv->dev);
+ list_for_each_entry(child_priv, &priv->child_intfs, list)
+ set_base_guid(child_priv, gid);
+ netdev_unlock_full_to_ops(priv->dev);
+ }
+}
+
+static int ipoib_check_lladdr(struct net_device *dev,
+ struct sockaddr_storage *ss)
+{
+ union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
+ int ret = 0;
+
+ netif_addr_lock_bh(dev);
+
+ /* Make sure the QPN, reserved and subnet prefix match the current
+ * lladdr, it also makes sure the lladdr is unicast.
+ */
+ if (memcmp(dev->dev_addr, ss->__data,
+ 4 + sizeof(gid->global.subnet_prefix)) ||
+ gid->global.interface_id == 0)
+ ret = -EINVAL;
+
+ netif_addr_unlock_bh(dev);
+
+ return ret;
+}
+
+static int ipoib_set_mac(struct net_device *dev, void *addr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sockaddr_storage *ss = addr;
+ int ret;
+
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
+ return -EBUSY;
+
+ ret = ipoib_check_lladdr(dev, ss);
+ if (ret)
+ return ret;
+
+ set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ queue_work(ipoib_workqueue, &cpriv->flush_light);
+ netdev_unlock_full_to_ops(dev);
+ }
+ queue_work(ipoib_workqueue, &priv->flush_light);
+
+ return 0;
+}
+
+static ssize_t create_child_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int pkey;
int ret;
@@ -1718,21 +2481,15 @@ static ssize_t create_child(struct device *dev,
if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
return -EINVAL;
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- pkey |= 0x8000;
-
ret = ipoib_vlan_add(to_net_dev(dev), pkey);
return ret ? ret : count;
}
-static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
+static DEVICE_ATTR_WO(create_child);
-static ssize_t delete_child(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t delete_child_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int pkey;
int ret;
@@ -1748,187 +2505,150 @@ static ssize_t delete_child(struct device *dev,
return ret ? ret : count;
}
-static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
+static DEVICE_ATTR_WO(delete_child);
int ipoib_add_pkey_attr(struct net_device *dev)
{
return device_create_file(&dev->dev, &dev_attr_pkey);
}
-int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
+/*
+ * We erroneously exposed the iface's port number in the dev_id
+ * sysfs field long after dev_port was introduced for that purpose[1],
+ * and we need to stop everyone from relying on that.
+ * Let's overload the shower routine for the dev_id file here
+ * to gently bring the issue up.
+ *
+ * [1] https://www.spinics.net/lists/netdev/msg272123.html
+ */
+static ssize_t dev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct ib_device_attr *device_attr;
- int result = -ENOMEM;
-
- device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
- if (!device_attr) {
- printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
- hca->name, sizeof *device_attr);
- return result;
- }
+ struct net_device *ndev = to_net_dev(dev);
- result = ib_query_device(hca, device_attr);
- if (result) {
- printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
- hca->name, result);
- kfree(device_attr);
- return result;
- }
- priv->hca_caps = device_attr->device_cap_flags;
-
- kfree(device_attr);
-
- if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
- priv->dev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-
- if (priv->hca_caps & IB_DEVICE_UD_TSO)
- priv->dev->hw_features |= NETIF_F_TSO;
+ /*
+ * ndev->dev_port will be equal to 0 in old kernel prior to commit
+ * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
+ * port numbers") Zero was chosen as special case for user space
+ * applications to fallback and query dev_id to check if it has
+ * different value or not.
+ *
+ * Don't print warning in such scenario.
+ *
+ * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
+ */
+ if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
+ netdev_info_once(ndev,
+ "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
+ current->comm);
- priv->dev->features |= priv->dev->hw_features;
- }
+ return sysfs_emit(buf, "%#x\n", ndev->dev_id);
+}
+static DEVICE_ATTR_RO(dev_id);
- return 0;
+static int ipoib_intercept_dev_id_attr(struct net_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_dev_id);
+ return device_create_file(&dev->dev, &dev_attr_dev_id);
}
static struct net_device *ipoib_add_port(const char *format,
- struct ib_device *hca, u8 port)
+ struct ib_device *hca, u32 port)
{
+ struct rtnl_link_ops *ops = ipoib_get_link_ops();
+ struct rdma_netdev_alloc_params params;
struct ipoib_dev_priv *priv;
- struct ib_port_attr attr;
- int result = -ENOMEM;
-
- priv = ipoib_intf_alloc(format);
- if (!priv)
- goto alloc_mem_failed;
-
- SET_NETDEV_DEV(priv->dev, hca->dma_device);
- priv->dev->dev_id = port - 1;
-
- result = ib_query_port(hca, port, &attr);
- if (!result)
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
- else {
- printk(KERN_WARNING "%s: ib_query_port %d failed\n",
- hca->name, port);
- goto device_init_failed;
+ struct net_device *ndev;
+ int result;
+
+ ndev = ipoib_intf_alloc(hca, port, format);
+ if (IS_ERR(ndev)) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
+ PTR_ERR(ndev));
+ return ndev;
}
+ priv = ipoib_priv(ndev);
- /* MTU will be reset when mcast join happens */
- priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
- priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
-
- priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
-
- result = ib_query_pkey(hca, port, 0, &priv->pkey);
- if (result) {
- printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
- }
+ INIT_IB_EVENT_HANDLER(&priv->event_handler,
+ priv->ca, ipoib_event);
+ ib_register_event_handler(&priv->event_handler);
- result = ipoib_set_dev_features(priv, hca);
- if (result)
- goto device_init_failed;
+ /* call event handler to ensure pkey in sync */
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- priv->pkey |= 0x8000;
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
- priv->dev->broadcast[8] = priv->pkey >> 8;
- priv->dev->broadcast[9] = priv->pkey & 0xff;
+ dev_net_set(ndev, rdma_dev_net(hca));
- result = ib_query_gid(hca, port, 0, &priv->local_gid);
+ result = register_netdev(ndev);
if (result) {
- printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
- } else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+ hca->name, port, result);
- result = ipoib_dev_init(priv->dev, hca, port);
- if (result < 0) {
- printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
- }
+ ipoib_parent_unregister_pre(ndev);
+ ipoib_intf_free(ndev);
+ free_netdev(ndev);
- INIT_IB_EVENT_HANDLER(&priv->event_handler,
- priv->ca, ipoib_event);
- result = ib_register_event_handler(&priv->event_handler);
- if (result < 0) {
- printk(KERN_WARNING "%s: ib_register_event_handler failed for "
- "port %d (ret = %d)\n",
- hca->name, port, result);
- goto event_failed;
+ return ERR_PTR(result);
}
- result = register_netdev(priv->dev);
- if (result) {
- printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
- hca->name, port, result);
- goto register_failed;
- }
+ if (hca->ops.rdma_netdev_get_params) {
+ int rc = hca->ops.rdma_netdev_get_params(hca, port,
+ RDMA_NETDEV_IPOIB,
+ &params);
- ipoib_create_debug_files(priv->dev);
+ if (!rc && ops->priv_size < params.sizeof_priv)
+ ops->priv_size = params.sizeof_priv;
+ }
+ /*
+ * We cannot set priv_destructor before register_netdev because we
+ * need priv to be always valid during the error flow to execute
+ * ipoib_parent_unregister_pre(). Instead handle it manually and only
+ * enter priv_destructor mode once we are completely registered.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
- if (ipoib_cm_add_mode_attr(priv->dev))
+ if (ipoib_intercept_dev_id_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_pkey_attr(priv->dev))
+ if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_umcast_attr(priv->dev))
+ if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
+ if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
+ if (device_create_file(&ndev->dev, &dev_attr_create_child))
+ goto sysfs_failed;
+ if (device_create_file(&ndev->dev, &dev_attr_delete_child))
goto sysfs_failed;
- return priv->dev;
+ return ndev;
sysfs_failed:
- ipoib_delete_debug_files(priv->dev);
- unregister_netdev(priv->dev);
-
-register_failed:
- ib_unregister_event_handler(&priv->event_handler);
- flush_workqueue(ipoib_workqueue);
- /* Stop GC if started before flush */
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
-
-event_failed:
- ipoib_dev_cleanup(priv->dev);
-
-device_init_failed:
- free_netdev(priv->dev);
-
-alloc_mem_failed:
- return ERR_PTR(result);
+ ipoib_parent_unregister_pre(ndev);
+ unregister_netdev(ndev);
+ return ERR_PTR(-ENOMEM);
}
-static void ipoib_add_one(struct ib_device *device)
+static int ipoib_add_one(struct ib_device *device)
{
struct list_head *dev_list;
struct net_device *dev;
struct ipoib_dev_priv *priv;
- int p;
+ unsigned int p;
int count = 0;
- dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
+ dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
- return;
+ return -ENOMEM;
INIT_LIST_HEAD(dev_list);
- for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
+ rdma_for_each_port (device, p) {
if (!rdma_protocol_ib(device, p))
continue;
dev = ipoib_add_port("ib%d", device, p);
if (!IS_ERR(dev)) {
- priv = netdev_priv(dev);
+ priv = ipoib_priv(dev);
list_add_tail(&priv->list, dev_list);
count++;
}
@@ -1936,40 +2656,44 @@ static void ipoib_add_one(struct ib_device *device)
if (!count) {
kfree(dev_list);
- return;
+ return -EOPNOTSUPP;
}
ib_set_client_data(device, &ipoib_client, dev_list);
+ return 0;
}
static void ipoib_remove_one(struct ib_device *device, void *client_data)
{
- struct ipoib_dev_priv *priv, *tmp;
+ struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
- if (!dev_list)
- return;
-
list_for_each_entry_safe(priv, tmp, dev_list, list) {
- ib_unregister_event_handler(&priv->event_handler);
- flush_workqueue(ipoib_workqueue);
+ LIST_HEAD(head);
+ ipoib_parent_unregister_pre(priv->dev);
rtnl_lock();
- dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
- rtnl_unlock();
- /* Stop GC */
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
+ netdev_lock(priv->dev);
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
+ list)
+ unregister_netdevice_queue(cpriv->dev, &head);
+ netdev_unlock(priv->dev);
+ unregister_netdevice_queue(priv->dev, &head);
+ unregister_netdevice_many(&head);
- unregister_netdev(priv->dev);
- free_netdev(priv->dev);
+ rtnl_unlock();
}
kfree(dev_list);
}
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static struct notifier_block ipoib_netdev_notifier = {
+ .notifier_call = ipoib_netdev_event,
+};
+#endif
+
static int __init ipoib_init_module(void)
{
int ret;
@@ -1983,6 +2707,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
#endif
/*
@@ -1991,9 +2716,7 @@ static int __init ipoib_init_module(void)
*/
BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
- ret = ipoib_register_debugfs();
- if (ret)
- return ret;
+ ipoib_register_debugfs();
/*
* We create a global workqueue here that is used for all flush
@@ -2005,7 +2728,7 @@ static int __init ipoib_init_module(void)
* its private workqueue, and we only queue up flush events
* on our global flush workqueue. This avoids the deadlocks.
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+ ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
@@ -2021,6 +2744,9 @@ static int __init ipoib_init_module(void)
if (ret)
goto err_client;
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ register_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
return 0;
err_client:
@@ -2038,6 +2764,9 @@ err_fs:
static void __exit ipoib_cleanup_module(void)
{
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ unregister_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
ipoib_netlink_fini();
ib_unregister_client(&ipoib_client);
ib_sa_unregister_client(&ipoib_sa_client);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 09a1748f9d13..8a4ab9ff0a68 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -64,6 +64,9 @@ struct ipoib_mcast_iter {
unsigned int send_only;
};
+/* join state that allows creating mcg with sendonly member request */
+#define SENDONLY_FULLMEMBER_JOIN 8
+
/*
* This should be called with the priv->lock held
*/
@@ -111,7 +114,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
struct net_device *dev = mcast->dev;
int tx_dropped = 0;
- ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
+ ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n",
mcast->mcmember.mgid.raw);
/* remove all neigh connected to this mcast */
@@ -132,12 +135,11 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
kfree(mcast);
}
-static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
- int can_sleep)
+static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
{
struct ipoib_mcast *mcast;
- mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return NULL;
@@ -155,7 +157,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node *n = priv->multicast_tree.rb_node;
while (n) {
@@ -179,7 +181,7 @@ static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid
static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
while (*n) {
@@ -209,10 +211,13 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
struct ib_sa_mcmember_rec *mcmember)
{
struct net_device *dev = mcast->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_ah *ah;
+ struct rdma_ah_attr av;
int ret;
int set_qkey = 0;
+ int mtu;
mcast->mcmember = *mcmember;
@@ -235,17 +240,16 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
priv->broadcast->mcmember.flow_label = mcmember->flow_label;
priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
/* assume if the admin and the mcast are the same both can be changed */
+ mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
+ priv->broadcast->mcmember.mtu);
if (priv->mcast_mtu == priv->admin_mtu)
- priv->admin_mtu =
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
- else
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->admin_mtu = IPOIB_UD_MTU(mtu);
+ priv->mcast_mtu = IPOIB_UD_MTU(mtu);
+ rn->mtu = priv->mcast_mtu;
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
- priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
+ priv->tx_wr.remote_qkey = priv->qkey;
set_qkey = 1;
}
@@ -257,8 +261,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
return 0;
}
- ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
- &mcast->mcmember.mgid, set_qkey);
+ ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
+ be16_to_cpu(mcast->mcmember.mlid),
+ set_qkey, priv->qkey);
if (ret < 0) {
ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
mcast->mcmember.mgid.raw);
@@ -268,40 +273,33 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
}
}
- {
- struct ib_ah_attr av = {
- .dlid = be16_to_cpu(mcast->mcmember.mlid),
- .port_num = priv->port,
- .sl = mcast->mcmember.sl,
- .ah_flags = IB_AH_GRH,
- .static_rate = mcast->mcmember.rate,
- .grh = {
- .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
- .hop_limit = mcast->mcmember.hop_limit,
- .sgid_index = 0,
- .traffic_class = mcast->mcmember.traffic_class
- }
- };
- av.grh.dgid = mcast->mcmember.mgid;
-
- ah = ipoib_create_ah(dev, priv->pd, &av);
- if (IS_ERR(ah)) {
- ipoib_warn(priv, "ib_address_create failed %ld\n",
- -PTR_ERR(ah));
- /* use original error */
- return PTR_ERR(ah);
- } else {
- spin_lock_irq(&priv->lock);
- mcast->ah = ah;
- spin_unlock_irq(&priv->lock);
-
- ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
- mcast->mcmember.mgid.raw,
- mcast->ah->ah,
- be16_to_cpu(mcast->mcmember.mlid),
- mcast->mcmember.sl);
- }
+ memset(&av, 0, sizeof(av));
+ av.type = rdma_ah_find_type(priv->ca, priv->port);
+ rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid));
+ rdma_ah_set_port_num(&av, priv->port);
+ rdma_ah_set_sl(&av, mcast->mcmember.sl);
+ rdma_ah_set_static_rate(&av, mcast->mcmember.rate);
+
+ rdma_ah_set_grh(&av, &mcast->mcmember.mgid,
+ be32_to_cpu(mcast->mcmember.flow_label),
+ 0, mcast->mcmember.hop_limit,
+ mcast->mcmember.traffic_class);
+
+ ah = ipoib_create_ah(dev, priv->pd, &av);
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %pe\n", ah);
+ /* use original error */
+ return PTR_ERR(ah);
}
+ spin_lock_irq(&priv->lock);
+ mcast->ah = ah;
+ spin_unlock_irq(&priv->lock);
+
+ ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
+ mcast->mcmember.mgid.raw,
+ mcast->ah->ah,
+ be16_to_cpu(mcast->mcmember.mlid),
+ mcast->mcmember.sl);
/* actually send any queued packets */
netif_tx_lock_bh(dev);
@@ -311,9 +309,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
netif_tx_unlock_bh(dev);
skb->dev = dev;
- if (dev_queue_xmit(skb))
- ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
netif_tx_lock_bh(dev);
}
netif_tx_unlock_bh(dev);
@@ -332,7 +332,6 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
-
/*
* Take rtnl_lock to avoid racing with ipoib_stop() and
* turning the carrier back on while a device is being
@@ -359,7 +358,7 @@ static int ipoib_mcast_join_complete(int status,
{
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
@@ -456,9 +455,12 @@ out_locked:
return status;
}
-static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_sa_multicast *multicast;
struct ib_sa_mcmember_rec rec = {
.join_state = 1
@@ -466,6 +468,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
ib_sa_comp_mask comp_mask;
int ret = 0;
+ if (!priv->broadcast ||
+ !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return -EINVAL;
+
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@@ -508,35 +517,33 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/*
- * Historically Linux IPoIB has never properly supported SEND
- * ONLY join. It emulated it by not providing all the required
- * attributes, which is enough to prevent group creation and
- * detect if there are full members or not. A major problem
- * with supporting SEND ONLY is detecting when the group is
- * auto-destroyed as IPoIB will cache the MLID..
+ * Send-only IB Multicast joins work at the core IB layer but
+ * require specific SM support.
+ * We can use such joins here only if the current SM supports that feature.
+ * However, if not, we emulate an Ethernet multicast send,
+ * which does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
+ * do is to create the group if it doesn't exist as that
+ * most closely emulates the behavior, from a user space
+ * application perspective, of Ethernet multicast operation.
*/
-#if 1
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
-#else
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- rec.join_state = 4;
-#endif
+ rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
- &rec, comp_mask, GFP_KERNEL,
+ &rec, comp_mask, GFP_ATOMIC,
ipoib_mcast_join_complete, mcast);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
- spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- spin_unlock_irq(&priv->lock);
complete(&mcast->done);
+ return ret;
}
+ return 0;
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -551,18 +558,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
- if (ib_query_port(priv->ca, priv->port, &port_attr) ||
- port_attr.state != IB_PORT_ACTIVE) {
+ if (ib_query_port(priv->ca, priv->port, &port_attr)) {
+ ipoib_dbg(priv, "ib_query_port() failed\n");
+ return;
+ }
+ if (port_attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
port_attr.state);
return;
}
priv->local_lid = port_attr.lid;
+ netif_addr_lock_bh(dev);
- if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
- ipoib_warn(priv, "ib_query_gid() failed\n");
- else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ netif_addr_unlock_bh(dev);
+ return;
+ }
+ netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
@@ -571,7 +583,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
- broadcast = ipoib_mcast_alloc(dev, 0);
+ broadcast = ipoib_mcast_alloc(dev);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
/*
@@ -616,11 +628,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (mcast->backoff == 1 ||
time_after_eq(jiffies, mcast->delay_until)) {
/* Found the next unjoined group */
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- spin_unlock_irq(&priv->lock);
- ipoib_mcast_join(dev, mcast);
- spin_lock_irq(&priv->lock);
+ if (ipoib_mcast_join(dev, mcast)) {
+ spin_unlock_irq(&priv->lock);
+ return;
+ }
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until;
@@ -636,18 +647,15 @@ out:
queue_delayed_work(priv->wq, &priv->mcast_task,
delay_until - jiffies);
}
- if (mcast) {
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- }
- spin_unlock_irq(&priv->lock);
if (mcast)
ipoib_mcast_join(dev, mcast);
+
+ spin_unlock_irq(&priv->lock);
}
-int ipoib_mcast_start_thread(struct net_device *dev)
+void ipoib_mcast_start_thread(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned long flags;
ipoib_dbg_mcast(priv, "starting multicast thread\n");
@@ -655,29 +663,21 @@ int ipoib_mcast_start_thread(struct net_device *dev)
spin_lock_irqsave(&priv->lock, flags);
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev)
+void ipoib_mcast_stop_thread(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- unsigned long flags;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
- spin_lock_irqsave(&priv->lock, flags);
- cancel_delayed_work(&priv->mcast_task);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- flush_workqueue(priv->wq);
-
- return 0;
+ cancel_delayed_work_sync(&priv->mcast_task);
}
static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
@@ -691,8 +691,8 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
mcast->mcmember.mgid.raw);
/* Remove ourselves from the multicast group */
- ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
- be16_to_cpu(mcast->mcmember.mlid));
+ ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
+ be16_to_cpu(mcast->mcmember.mlid));
if (ret)
ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
} else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
@@ -702,9 +702,47 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
return 0;
}
+/*
+ * Check if the multicast group is sendonly. If so remove it from the maps
+ * and add to the remove list
+ */
+void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
+ struct list_head *remove_list)
+{
+ /* Is this multicast ? */
+ if (*mgid == 0xff) {
+ struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid);
+
+ if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, remove_list);
+ }
+ }
+}
+
+void ipoib_mcast_remove_list(struct list_head *remove_list)
+{
+ struct ipoib_mcast *mcast, *tmcast;
+
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
+ ipoib_mcast_leave(mcast->dev, mcast);
+ ipoib_mcast_free(mcast);
+ }
+}
+
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_mcast *mcast;
unsigned long flags;
void *mgid = daddr + 4;
@@ -726,7 +764,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
mgid);
- mcast = ipoib_mcast_alloc(dev, 0);
+ mcast = ipoib_mcast_alloc(dev);
if (!mcast) {
ipoib_warn(priv, "unable to allocate memory "
"for multicast structure\n");
@@ -741,9 +779,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
@@ -758,14 +798,19 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
- if (neigh) {
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
+ neigh->ah->valid = 1;
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
+ mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah,
+ IB_MULTICAST_QPN);
if (neigh)
ipoib_neigh_put(neigh);
return;
@@ -777,11 +822,12 @@ unlock:
void ipoib_mcast_dev_flush(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
LIST_HEAD(remove_list);
struct ipoib_mcast *mcast, *tmcast;
unsigned long flags;
+ mutex_lock(&priv->mcast_mutex);
ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags);
@@ -800,18 +846,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
- ipoib_mcast_leave(dev, mcast);
- ipoib_mcast_free(mcast);
- }
+ ipoib_mcast_remove_list(&remove_list);
+ mutex_unlock(&priv->mcast_mutex);
}
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -833,7 +869,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
- unsigned long flags;
struct ib_sa_mcmember_rec rec;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
@@ -845,9 +880,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- local_irq_save(flags);
- netif_addr_lock(dev);
- spin_lock(&priv->lock);
+ netif_addr_lock_bh(dev);
+ spin_lock_irq(&priv->lock);
/*
* Unfortunately, the networking core only gives us a list of all of
@@ -866,7 +900,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
- memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
+ memcpy(mgid.raw, ha->addr + 4, sizeof(mgid));
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -884,7 +918,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
mgid.raw);
- nmcast = ipoib_mcast_alloc(dev, 0);
+ nmcast = ipoib_mcast_alloc(dev);
if (!nmcast) {
ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
continue;
@@ -925,30 +959,18 @@ void ipoib_mcast_restart_task(struct work_struct *work)
}
}
- spin_unlock(&priv->lock);
- netif_addr_unlock(dev);
- local_irq_restore(flags);
-
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
+ spin_unlock_irq(&priv->lock);
+ netif_addr_unlock_bh(dev);
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
- ipoib_mcast_leave(mcast->dev, mcast);
- ipoib_mcast_free(mcast);
- }
+ ipoib_mcast_remove_list(&remove_list);
/*
* Double check that we are still up
*/
if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
}
}
@@ -958,7 +980,7 @@ struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
{
struct ipoib_mcast_iter *iter;
- iter = kmalloc(sizeof *iter, GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
@@ -975,7 +997,7 @@ struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
{
- struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
struct rb_node *n;
struct ipoib_mcast *mcast;
int ret = 1;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index cdc7df4fdb8a..53db7c8191e3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -32,7 +32,6 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h> /* For ARPHRD_xxx */
-#include <linux/module.h>
#include <net/rtnetlink.h>
#include "ipoib.h"
@@ -42,9 +41,14 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
[IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
};
+static unsigned int ipoib_get_max_num_queues(void)
+{
+ return min_t(unsigned int, num_possible_cpus(), 128);
+}
+
static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
u16 val;
if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
@@ -64,8 +68,9 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int ipoib_changelink(struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
u16 mode, umcast;
int ret = 0;
@@ -92,9 +97,13 @@ out_err:
return ret;
}
-static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int ipoib_new_child_link(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *pdev;
struct ipoib_dev_priv *ppriv;
u16 child_pkey;
@@ -103,11 +112,11 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ pdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
- ppriv = netdev_priv(pdev);
+ ppriv = ipoib_priv(pdev);
if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
ipoib_warn(ppriv, "child creation disallowed for child devices\n");
@@ -120,33 +129,36 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
- if (child_pkey == 0 || child_pkey == 0x8000)
- return -EINVAL;
-
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- child_pkey |= 0x8000;
+ err = ipoib_intf_init(ppriv->ca, ppriv->port, dev->name, dev);
+ if (err) {
+ ipoib_warn(ppriv, "failed to initialize pkey device\n");
+ return err;
+ }
- err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
+ err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
+ child_pkey, IPOIB_RTNL_CHILD);
+ if (err)
+ return err;
+
+ if (data) {
+ err = ipoib_changelink(dev, tb, data, extack);
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
+ }
- if (!err && data)
- err = ipoib_changelink(dev, tb, data);
- return err;
+ return 0;
}
-static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
+static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
{
- struct ipoib_dev_priv *priv, *ppriv;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
- priv = netdev_priv(dev);
- ppriv = netdev_priv(priv->parent);
+ if (!priv->parent)
+ return;
- down_write(&ppriv->vlan_rwsem);
unregister_netdevice_queue(dev, head);
- list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
}
static size_t ipoib_get_size(const struct net_device *dev)
@@ -158,17 +170,25 @@ static size_t ipoib_get_size(const struct net_device *dev)
static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.kind = "ipoib",
+ .netns_refund = true,
.maxtype = IFLA_IPOIB_MAX,
.policy = ipoib_policy,
.priv_size = sizeof(struct ipoib_dev_priv),
- .setup = ipoib_setup,
+ .setup = ipoib_setup_common,
.newlink = ipoib_new_child_link,
+ .dellink = ipoib_del_child_link,
.changelink = ipoib_changelink,
- .dellink = ipoib_unregister_child_dev,
.get_size = ipoib_get_size,
.fill_info = ipoib_fill_info,
+ .get_num_rx_queues = ipoib_get_max_num_queues,
+ .get_num_tx_queues = ipoib_get_max_num_queues,
};
+struct rtnl_link_ops *ipoib_get_link_ops(void)
+{
+ return &ipoib_link_ops;
+}
+
int __init ipoib_netlink_init(void)
{
return rtnl_link_register(&ipoib_link_ops);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 78845b6e8b81..86983080d28b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -35,9 +35,10 @@
#include "ipoib.h"
-int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey)
+int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr *qp_attr = NULL;
int ret;
u16 pkey_index;
@@ -51,12 +52,12 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int
if (set_qkey) {
ret = -ENOMEM;
- qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr)
goto out;
/* set correct QKey for QP */
- qp_attr->qkey = priv->qkey;
+ qp_attr->qkey = qkey;
ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
if (ret) {
ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
@@ -74,9 +75,20 @@ out:
return ret;
}
+int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ ret = ib_detach_mcast(priv->qp, mgid, mlid);
+
+ return ret;
+}
+
int ipoib_init_qp(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
struct ib_qp_attr qp_attr;
int attr_mask;
@@ -130,12 +142,13 @@ out_fail:
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr init_attr = {
.cap = {
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
- .max_send_sge = 1,
+ .max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1),
.max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_ALL_WR,
@@ -143,24 +156,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
};
struct ib_cq_init_attr cq_attr = {};
- int ret, size;
+ int ret, size, req_vec;
int i;
-
- priv->pd = ib_alloc_pd(priv->ca);
- if (IS_ERR(priv->pd)) {
- printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
- return -ENODEV;
- }
-
- /*
- * the various IPoIB tasks assume they will never race against
- * themselves, so always use a single thread workqueue
- */
- priv->wq = create_singlethread_workqueue("ipoib_wq");
- if (!priv->wq) {
- printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
- goto out_free_pd;
- }
+ static atomic_t counter;
size = ipoib_recvq_size + 1;
ret = ipoib_cm_dev_init(dev);
@@ -171,22 +169,25 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
} else
- if (ret != -ENOSYS)
- goto out_free_wq;
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ req_vec = atomic_inc_return(&counter) * 2;
cq_attr.cqe = size;
- priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
- dev, &cq_attr);
+ cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
+ priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
+ priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
- printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
+ pr_warn("%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
cq_attr.cqe = ipoib_sendq_size;
- priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
- dev, &cq_attr);
+ cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors;
+ priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
+ priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
- printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
+ pr_warn("%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
}
@@ -196,34 +197,33 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
init_attr.send_cq = priv->send_cq;
init_attr.recv_cq = priv->recv_cq;
- if (priv->hca_caps & IB_DEVICE_UD_TSO)
+ if (priv->kernel_caps & IBK_UD_TSO)
init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
- if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
+ if (priv->kernel_caps & IBK_BLOCK_MULTICAST_LOOPBACK)
init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
- if (dev->features & NETIF_F_SG)
- init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ if (priv->kernel_caps & IBK_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
- printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
+ pr_warn("%s: failed to create QP\n", ca->name);
goto out_free_send_cq;
}
- priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
- priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
- priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff;
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ goto out_free_send_cq;
for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
- priv->tx_wr.opcode = IB_WR_SEND;
- priv->tx_wr.sg_list = priv->tx_sge;
- priv->tx_wr.send_flags = IB_SEND_SIGNALED;
+ priv->tx_wr.wr.opcode = IB_WR_SEND;
+ priv->tx_wr.wr.sg_list = priv->tx_sge;
+ priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED;
priv->rx_sge[0].lkey = priv->pd->local_dma_lkey;
@@ -233,6 +233,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
+ if (init_attr.cap.max_send_sge > 1)
+ dev->features |= NETIF_F_SG;
+
+ priv->max_send_sge = init_attr.cap.max_send_sge;
+
return 0;
out_free_send_cq:
@@ -244,43 +249,22 @@ out_free_recv_cq:
out_cm_dev_cleanup:
ipoib_cm_dev_cleanup(dev);
-out_free_wq:
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
-
-out_free_pd:
- ib_dealloc_pd(priv->pd);
-
return -ENODEV;
}
void ipoib_transport_dev_cleanup(struct net_device *dev)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (priv->qp) {
if (ib_destroy_qp(priv->qp))
ipoib_warn(priv, "ib_qp_destroy failed\n");
priv->qp = NULL;
- clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
- }
-
- if (ib_destroy_cq(priv->send_cq))
- ipoib_warn(priv, "ib_cq_destroy (send) failed\n");
-
- if (ib_destroy_cq(priv->recv_cq))
- ipoib_warn(priv, "ib_cq_destroy (recv) failed\n");
-
- ipoib_cm_dev_cleanup(dev);
-
- if (priv->wq) {
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
}
- ib_dealloc_pd(priv->pd);
+ ib_destroy_cq(priv->send_cq);
+ ib_destroy_cq(priv->recv_cq);
}
void ipoib_event(struct ib_event_handler *handler,
@@ -293,16 +277,18 @@ void ipoib_event(struct ib_event_handler *handler,
return;
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
- record->device->name, record->element.port_num);
+ dev_name(&record->device->dev), record->element.port_num);
- if (record->event == IB_EVENT_SM_CHANGE ||
- record->event == IB_EVENT_CLIENT_REREGISTER) {
- queue_work(ipoib_workqueue, &priv->flush_light);
+ if (record->event == IB_EVENT_CLIENT_REREGISTER) {
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
} else if (record->event == IB_EVENT_PORT_ERR ||
record->event == IB_EVENT_PORT_ACTIVE ||
record->event == IB_EVENT_LID_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_normal);
+ ipoib_queue_work(priv, IPOIB_FLUSH_NORMAL);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
+ } else if (record->event == IB_EVENT_GID_CHANGE &&
+ !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index fca1a882de27..243e8f555eca 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -30,90 +30,138 @@
* SOFTWARE.
*/
-#include <linux/module.h>
+#include <linux/sched/signal.h>
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "ipoib.h"
-static ssize_t show_parent(struct device *d, struct device_attribute *attr,
+static ssize_t parent_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
- return sprintf(buf, "%s\n", priv->parent->name);
+ return sysfs_emit(buf, "%s\n", priv->parent->name);
}
-static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
+static DEVICE_ATTR_RO(parent);
+static bool is_child_unique(struct ipoib_dev_priv *ppriv,
+ struct ipoib_dev_priv *priv)
+{
+ struct ipoib_dev_priv *tpriv;
+ bool result = true;
+
+ /*
+ * Since the legacy sysfs interface uses pkey for deletion it cannot
+ * support more than one interface with the same pkey, it creates
+ * ambiguity. The RTNL interface deletes using the netdev so it does
+ * not have a problem to support duplicated pkeys.
+ */
+ if (priv->child_type != IPOIB_LEGACY_CHILD)
+ return true;
+
+ /*
+ * First ensure this isn't a duplicate. We check the parent device and
+ * then all of the legacy child interfaces to make sure the Pkey
+ * doesn't match.
+ */
+ if (ppriv->pkey == priv->pkey)
+ return false;
+
+ netdev_lock(ppriv->dev);
+ list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
+ if (tpriv->pkey == priv->pkey &&
+ tpriv->child_type == IPOIB_LEGACY_CHILD) {
+ result = false;
+ break;
+ }
+ }
+ netdev_unlock(ppriv->dev);
+
+ return result;
+}
+
+/*
+ * NOTE: If this function fails then the priv->dev will remain valid, however
+ * priv will have been freed and must not be touched by caller in the error
+ * case.
+ *
+ * If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
+ * free the net_device (just as rtnl_newlink does) otherwise the net_device
+ * will be freed when the rtnl is unlocked.
+ */
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
u16 pkey, int type)
{
+ struct net_device *ndev = priv->dev;
int result;
+ struct rdma_netdev *rn = netdev_priv(ndev);
- priv->max_ib_mtu = ppriv->max_ib_mtu;
- /* MTU will be reset when mcast join happens */
- priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
- priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
- priv->parent = ppriv->dev;
- set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ /*
+ * We do not need to touch priv if register_netdevice fails, so just
+ * always use this flow.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
- result = ipoib_set_dev_features(priv, ppriv->ca);
- if (result)
- goto err;
+ /*
+ * Racing with unregister of the parent must be prevented by the
+ * caller.
+ */
+ WARN_ON(ppriv->dev->reg_state != NETREG_REGISTERED);
- priv->pkey = pkey;
+ if (pkey == 0 || pkey == 0x8000) {
+ result = -EINVAL;
+ goto out_early;
+ }
- memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
- priv->dev->broadcast[8] = pkey >> 8;
- priv->dev->broadcast[9] = pkey & 0xff;
+ rn->mtu = priv->mcast_mtu;
- result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
- if (result < 0) {
- ipoib_warn(ppriv, "failed to initialize subinterface: "
- "device %s, port %d",
- ppriv->ca->name, ppriv->port);
- goto err;
+ priv->parent = ppriv->dev;
+ priv->pkey = pkey;
+ priv->child_type = type;
+
+ if (!is_child_unique(ppriv, priv)) {
+ result = -ENOTUNIQ;
+ goto out_early;
}
- result = register_netdevice(priv->dev);
+ result = register_netdevice(ndev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
- goto register_failed;
- }
- ipoib_create_debug_files(priv->dev);
+ /*
+ * register_netdevice sometimes calls priv_destructor,
+ * sometimes not. Make sure it was done.
+ */
+ goto out_early;
+ }
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
- if (ipoib_cm_add_mode_attr(priv->dev))
+ if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_pkey_attr(priv->dev))
+ if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_umcast_attr(priv->dev))
+ if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_parent))
+ if (device_create_file(&ndev->dev, &dev_attr_parent))
goto sysfs_failed;
}
- priv->child_type = type;
- list_add_tail(&priv->list, &ppriv->child_intfs);
-
return 0;
sysfs_failed:
- result = -ENOMEM;
- ipoib_delete_debug_files(priv->dev);
unregister_netdevice(priv->dev);
+ return -ENOMEM;
-register_failed:
- ipoib_dev_cleanup(priv->dev);
-
-err:
+out_early:
+ if (ndev->priv_destructor)
+ ndev->priv_destructor(ndev);
return result;
}
@@ -121,87 +169,130 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv;
char intf_name[IFNAMSIZ];
- struct ipoib_dev_priv *tpriv;
+ struct net_device *ndev;
int result;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- ppriv = netdev_priv(pdev);
-
- snprintf(intf_name, sizeof intf_name, "%s.%04x",
- ppriv->dev->name, pkey);
- priv = ipoib_intf_alloc(intf_name);
- if (!priv)
- return -ENOMEM;
-
if (!rtnl_trylock())
return restart_syscall();
- down_write(&ppriv->vlan_rwsem);
+ if (pdev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
- /*
- * First ensure this isn't a duplicate. We check the parent device and
- * then all of the legacy child interfaces to make sure the Pkey
- * doesn't match.
+ ppriv = ipoib_priv(pdev);
+
+ /* If you increase IFNAMSIZ, update snprintf below
+ * to allow longer names.
*/
- if (ppriv->pkey == pkey) {
- result = -ENOTUNIQ;
+ BUILD_BUG_ON(IFNAMSIZ != 16);
+ snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
+ pkey);
+
+ ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (IS_ERR(ndev)) {
+ result = PTR_ERR(ndev);
goto out;
}
+ priv = ipoib_priv(ndev);
- list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
- if (tpriv->pkey == pkey &&
- tpriv->child_type == IPOIB_LEGACY_CHILD) {
- result = -ENOTUNIQ;
- goto out;
- }
- }
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+ if (result && ndev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(ndev);
+
out:
- up_write(&ppriv->vlan_rwsem);
+ rtnl_unlock();
+
+ return result;
+}
+
+struct ipoib_vlan_delete_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
- if (result)
- free_netdev(priv->dev);
+/*
+ * sysfs callbacks of a netdevice cannot obtain the rtnl lock as
+ * unregister_netdev ultimately deletes the sysfs files while holding the rtnl
+ * lock. This deadlocks the system.
+ *
+ * A callback can use rtnl_trylock to avoid the deadlock but it cannot call
+ * unregister_netdev as that internally takes and releases the rtnl_lock. So
+ * instead we find the netdev to unregister and then do the actual unregister
+ * from the global work queue where we can obtain the rtnl_lock safely.
+ */
+static void ipoib_vlan_delete_task(struct work_struct *work)
+{
+ struct ipoib_vlan_delete_work *pwork =
+ container_of(work, struct ipoib_vlan_delete_work, work);
+ struct net_device *dev = pwork->dev;
+
+ rtnl_lock();
+
+ /* Unregistering tasks can race with another task or parent removal */
+ if (dev->reg_state == NETREG_REGISTERED) {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
+ }
rtnl_unlock();
- return result;
+ kfree(pwork);
}
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
- struct net_device *dev = NULL;
+ int rc;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- ppriv = netdev_priv(pdev);
-
if (!rtnl_trylock())
return restart_syscall();
- down_write(&ppriv->vlan_rwsem);
+ if (pdev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
+ ppriv = ipoib_priv(pdev);
+
+ rc = -ENODEV;
+ netdev_lock(ppriv->dev);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
- unregister_netdevice(priv->dev);
- list_del(&priv->list);
- dev = priv->dev;
+ struct ipoib_vlan_delete_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ list_del_init(&priv->list);
+ work->dev = priv->dev;
+ INIT_WORK(&work->work, ipoib_vlan_delete_task);
+ queue_work(ipoib_workqueue, &work->work);
+
+ rc = 0;
break;
}
}
- up_write(&ppriv->vlan_rwsem);
+out:
+ netdev_unlock(ppriv->dev);
rtnl_unlock();
- if (dev) {
- free_netdev(dev);
- return 0;
- }
-
- return -ENODEV;
+ return rc;
}
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index d00af71a2cfc..3016a0c9a9f0 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,11 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_ISER
tristate "iSCSI Extensions for RDMA (iSER)"
depends on SCSI && INET && INFINIBAND_ADDR_TRANS
select SCSI_ISCSI_ATTRS
- ---help---
+ help
Support for the iSCSI Extensions for RDMA (iSER) Protocol
- over InfiniBand. This allows you to access storage devices
- that speak iSCSI over iSER over InfiniBand.
+ over InfiniBand. This allows you to access storage devices
+ that speak iSCSI over iSER over InfiniBand.
The iSER protocol is defined by IETF.
See <http://www.ietf.org/rfc/rfc5046.txt>
diff --git a/drivers/infiniband/ulp/iser/Makefile b/drivers/infiniband/ulp/iser/Makefile
index fe6cd15f2317..2f3e788638d4 100644
--- a/drivers/infiniband/ulp/iser/Makefile
+++ b/drivers/infiniband/ulp/iser/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INFINIBAND_ISER) += ib_iser.o
ib_iser-y := iser_verbs.o iser_initiator.o iser_memory.o \
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1ace5d83a4d7..2e3c0516ce8f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -62,7 +62,7 @@
#include <net/sock.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -77,36 +77,56 @@
MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
-MODULE_VERSION(DRV_VER);
-static struct scsi_host_template iscsi_iser_sht;
+static const struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
struct iser_global ig;
int iser_debug_level = 0;
module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops iscsi_iser_size_ops = {
+ .set = iscsi_iser_set,
+ .get = param_get_uint,
+};
+
static unsigned int iscsi_max_lun = 512;
-module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
-MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512");
+module_param_cb(max_lun, &iscsi_iser_size_ops, &iscsi_max_lun, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session, should > 0 (default:512)");
unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
-module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
+module_param_cb(max_sectors, &iscsi_iser_size_ops, &iser_max_sectors,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command, should > 0 (default:1024)");
+
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+ "Always register memory, even for continuous memory regions (default:true)");
bool iser_pi_enable = false;
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
-int iser_pi_guard;
-module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
-MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ unsigned int n = 0;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n == 0)
+ return -EINVAL;
+
+ return param_set_uint(val, kp);
+}
/*
- * iscsi_iser_recv() - Process a successfull recv completion
+ * iscsi_iser_recv() - Process a successful recv completion
* @conn: iscsi connection
* @hdr: iscsi header
* @rx_data: buffer containing receive data payload
@@ -115,13 +135,11 @@ MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
* Notes: In case of data length errors or iscsi PDU completion failures
* this routine will signal iscsi layer of connection failure.
*/
-void
-iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *rx_data, int rx_data_len)
+void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *rx_data, int rx_data_len)
{
int rc = 0;
int datalen;
- int ahslen;
/* verify PDU length */
datalen = ntoh24(hdr->dlength);
@@ -136,9 +154,6 @@ iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
datalen, rx_data_len);
- /* read AHS */
- ahslen = hdr->hlength * 4;
-
rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
goto error;
@@ -156,8 +171,7 @@ error:
* Netes: This routine can't fail, just assign iscsi task
* hdr and max hdr size.
*/
-static int
-iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
+static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -178,33 +192,24 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
* state mutex to avoid dereferencing the IB device which
* may have already been terminated.
*/
-int
-iser_initialize_task_headers(struct iscsi_task *task,
- struct iser_tx_desc *tx_desc)
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
{
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
- const bool mgmt_task = !task->sc && !in_interrupt();
- int ret = 0;
- if (unlikely(mgmt_task))
- mutex_lock(&iser_conn->state_mutex);
-
- if (unlikely(iser_conn->state != ISER_CONN_UP)) {
- ret = -ENODEV;
- goto out;
- }
+ if (unlikely(iser_conn->state != ISER_CONN_UP))
+ return -ENODEV;
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
- if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
- ret = -ENOMEM;
- goto out;
- }
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ return -ENOMEM;
- tx_desc->wr_idx = 0;
+ tx_desc->inv_wr.next = NULL;
+ tx_desc->reg_wr.wr.next = NULL;
tx_desc->mapped = true;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
@@ -212,11 +217,8 @@ iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
iser_task->iser_conn = iser_conn;
-out:
- if (unlikely(mgmt_task))
- mutex_unlock(&iser_conn->state_mutex);
- return ret;
+ return 0;
}
/**
@@ -228,8 +230,7 @@ out:
* Return: Returns zero on success or -ENOMEM when failing
* to init task headers (dma mapping error).
*/
-static int
-iscsi_iser_task_init(struct iscsi_task *task)
+static int iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
int ret;
@@ -263,27 +264,22 @@ iscsi_iser_task_init(struct iscsi_task *task)
* xmit.
*
**/
-static int
-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
- int error = 0;
-
iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
- error = iser_send_control(conn, task);
-
/* since iser xmits control with zero copy, tasks can not be recycled
* right after sending them.
* The recycling scheme is based on whether a response is expected
* - if yes, the task is recycled at iscsi_complete_pdu
* - if no, the task is recycled at iser_snd_completion
*/
- return error;
+ return iser_send_control(conn, task);
}
-static int
-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
- struct iscsi_task *task)
+static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
struct iscsi_data hdr;
@@ -317,8 +313,7 @@ iscsi_iser_task_xmit_unsol_data_exit:
*
* Return: zero on success or escalates $error on failure.
*/
-static int
-iscsi_iser_task_xmit(struct iscsi_task *task)
+static int iscsi_iser_task_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -394,24 +389,20 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
* @task: iscsi task
* @sector: error sector if exsists (output)
*
- * Return: zero if no data-integrity errors have occured
- * 0x1: data-integrity error occured in the guard-block
- * 0x2: data-integrity error occured in the reference tag
- * 0x3: data-integrity error occured in the application tag
+ * Return: zero if no data-integrity errors have occurred
+ * 0x1: data-integrity error occurred in the guard-block
+ * 0x2: data-integrity error occurred in the reference tag
+ * 0x3: data-integrity error occurred in the application tag
*
* In addition the error sector is marked.
*/
-static u8
-iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
+static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
+ ISER_DIR_IN : ISER_DIR_OUT;
- if (iser_task->dir[ISER_DIR_IN])
- return iser_check_task_pi_status(iser_task, ISER_DIR_IN,
- sector);
- else
- return iser_check_task_pi_status(iser_task, ISER_DIR_OUT,
- sector);
+ return iser_check_task_pi_status(iser_task, dir, sector);
}
/**
@@ -451,14 +442,12 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
* @is_leading: indicate if this is the session leading connection (MCS)
*
* Return: zero on success, $error if iscsi_conn_bind fails and
- * -EINVAL in case end-point doesn't exsits anymore or iser connection
+ * -EINVAL in case end-point doesn't exists anymore or iser connection
* state is not UP (teardown already started).
*/
-static int
-iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
- struct iscsi_cls_conn *cls_conn,
- uint64_t transport_eph,
- int is_leading)
+static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_eph, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn;
@@ -500,6 +489,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
iser_conn->iscsi_conn = conn;
out:
+ iscsi_put_endpoint(ep);
mutex_unlock(&iser_conn->state_mutex);
return error;
}
@@ -512,8 +502,7 @@ out:
* from this point iscsi must call conn_stop in session/connection
* teardown so iser transport must wait for it.
*/
-static int
-iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *iscsi_conn;
struct iser_conn *iser_conn;
@@ -535,8 +524,7 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
* handle, so we call it under iser the state lock to protect against
* this kind of race.
*/
-static void
-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn = conn->dd_data;
@@ -549,12 +537,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
+ mutex_lock(&unbind_iser_conn_mutex);
iser_conn_terminate(iser_conn);
iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
conn->dd_data = NULL;
+ mutex_unlock(&unbind_iser_conn_mutex);
complete(&iser_conn->stop_completion);
mutex_unlock(&iser_conn->state_mutex);
@@ -569,26 +559,31 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*
* Removes and free iscsi host.
*/
-static void
-iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
-static inline unsigned int
-iser_dif_prot_caps(int prot_caps)
+static inline unsigned int iser_dif_prot_caps(int prot_caps)
{
- return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ?
- SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_2) ?
- SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_3) ?
- SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0);
+ int ret = 0;
+
+ if (prot_caps & IB_PROT_T10DIF_TYPE_1)
+ ret |= SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_2)
+ ret |= SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_3)
+ ret |= SHOST_DIF_TYPE3_PROTECTION |
+ SHOST_DIX_TYPE3_PROTECTION;
+
+ return ret;
}
/**
@@ -607,11 +602,11 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
uint32_t initial_cmdsn)
{
struct iscsi_cls_session *cls_session;
- struct iscsi_session *session;
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
- u16 max_cmds;
+ struct ib_device *ib_dev;
+ u32 max_fr_sectors;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
@@ -629,9 +624,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
if (ep) {
iser_conn = ep->dd_data;
- max_cmds = iser_conn->max_cmds;
shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
- shost->max_sectors = iser_conn->scsi_max_sectors;
+ shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds);
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
@@ -642,62 +636,59 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
}
ib_conn = &iser_conn->ib_conn;
+ ib_dev = ib_conn->device->ib_device;
if (ib_conn->pi_support) {
- u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
+ u32 sig_caps = ib_dev->attrs.sig_prot_cap;
+ shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
- /*
- * Limit the sg_tablesize and max_sectors based on the device
- * max fastreg page list length.
- */
- shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
- ib_conn->device->dev_attr.max_fast_reg_page_list_len);
- shost->max_sectors = min_t(unsigned int,
- 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
-
- if (iscsi_host_add(shost,
- ib_conn->device->ib_device->dma_device)) {
+ if (!(ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
+ shost->virt_boundary_mask = SZ_4K - 1;
+
+ if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
goto free_host;
}
mutex_unlock(&iser_conn->state_mutex);
} else {
- max_cmds = ISER_DEF_XMIT_CMDS_MAX;
+ shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX);
if (iscsi_host_add(shost, NULL))
goto free_host;
}
- if (cmds_max > max_cmds) {
- iser_info("cmds_max changed from %u to %u\n",
- cmds_max, max_cmds);
- cmds_max = max_cmds;
- }
+ max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9;
+ shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
+ iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+ iser_conn, shost->sg_tablesize,
+ shost->max_sectors);
+
+ if (shost->max_sectors < iser_max_sectors)
+ iser_warn("max_sectors was reduced from %u to %u\n",
+ iser_max_sectors, shost->max_sectors);
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
- cmds_max, 0,
+ shost->can_queue, 0,
sizeof(struct iscsi_iser_task),
initial_cmdsn, 0);
if (!cls_session)
goto remove_host;
- session = cls_session->dd_data;
- shost->can_queue = session->scsi_cmds_max;
return cls_session;
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
}
-static int
-iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf, int buflen)
+static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
{
int value;
@@ -741,14 +732,14 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
}
/**
- * iscsi_iser_set_param() - set class connection parameter
+ * iscsi_iser_conn_get_stats() - get iscsi connection statistics
* @cls_conn: iscsi class connection
* @stats: iscsi stats to output
*
* Output connection statistics.
*/
-static void
-iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
@@ -761,16 +752,13 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
- stats->custom_length = 1;
- strcpy(stats->custom[0].desc, "fmr_unalign_cnt");
- stats->custom[0].value = conn->fmr_unalign_cnt;
+ stats->custom_length = 0;
}
static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
struct iser_conn *iser_conn = ep->dd_data;
- int len;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -781,19 +769,17 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&iser_conn->ib_conn.cma_id->route.addr.dst_addr,
param, buf);
- break;
default:
- return -ENOSYS;
+ break;
}
-
- return len;
+ return -ENOSYS;
}
/**
* iscsi_iser_ep_connect() - Initiate iSER connection establishment
* @shost: scsi_host
* @dst_addr: destination address
- * @non-blocking: indicate if routine can block
+ * @non_blocking: indicate if routine can block
*
* Allocate an iscsi endpoint, an iser_conn structure and bind them.
* After that start RDMA connection establishment via rdma_cm. We
@@ -804,9 +790,9 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error)
* if fails.
*/
-static struct iscsi_endpoint *
-iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
- int non_blocking)
+static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
{
int err;
struct iser_conn *iser_conn;
@@ -849,8 +835,7 @@ failure:
* or more likely iser connection state transitioned to TEMINATING or
* DOWN during the wait period.
*/
-static int
-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct iser_conn *iser_conn = ep->dd_data;
int rc;
@@ -869,7 +854,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
iser_info("iser conn %p rc = %d\n", iser_conn, rc);
if (rc > 0)
- return 1; /* success, this is the equivalent of POLLOUT */
+ return 1; /* success, this is the equivalent of EPOLLOUT */
else if (!rc)
return 0; /* timeout */
else
@@ -885,8 +870,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
* and cleanup or actually call it immediately in case we didn't pass
* iscsi conn bind/start stage, thus it is safe.
*/
-static void
-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
+static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
struct iser_conn *iser_conn = ep->dd_data;
@@ -968,22 +952,22 @@ static umode_t iser_attr_is_visible(int param_type, int param)
return 0;
}
-static struct scsi_host_template iscsi_iser_sht = {
+static const struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over iSER",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
- .max_sectors = ISER_DEF_MAX_SECTORS,
.cmd_per_lun = ISER_DEF_CMD_PER_LUN,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
- .use_clustering = DISABLE_CLUSTERING,
.proc_name = "iscsi_iser",
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport iscsi_iser_transport = {
@@ -996,6 +980,7 @@ static struct iscsi_transport iscsi_iser_transport = {
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
@@ -1029,11 +1014,6 @@ static int __init iser_init(void)
iser_dbg("Starting iSER datamover...\n");
- if (iscsi_max_lun < 1) {
- iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
- return -EINVAL;
- }
-
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
@@ -1052,7 +1032,8 @@ static int __init iser_init(void)
release_wq = alloc_workqueue("release workqueue", 0, 0);
if (!release_wq) {
iser_err("failed to allocate release workqueue\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_alloc_wq;
}
iscsi_iser_scsi_transport = iscsi_register_transport(
@@ -1060,12 +1041,14 @@ static int __init iser_init(void)
if (!iscsi_iser_scsi_transport) {
iser_err("iscsi_register_transport failed\n");
err = -EINVAL;
- goto register_transport_failure;
+ goto err_reg;
}
return 0;
-register_transport_failure:
+err_reg:
+ destroy_workqueue(release_wq);
+err_alloc_wq:
kmem_cache_destroy(ig.desc_cache);
return err;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 86f6583485ef..1d7ac24c4c00 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -48,6 +48,7 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include <scsi/iser.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -64,7 +65,6 @@
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
#define DRV_NAME "iser"
@@ -95,15 +95,12 @@
#define iser_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define SHIFT_4K 12
-#define SIZE_4K (1ULL << SHIFT_4K)
-#define MASK_4K (~(SIZE_4K-1))
-
/* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS 1024
-#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K)
-/* Maximum support is 8MB I/O size */
-#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K)
+#define ISCSI_ISER_DEF_SG_TABLESIZE \
+ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
+/* Maximum support is 16MB I/O size */
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
@@ -122,8 +119,6 @@
#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
-#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
-
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
* to have at max for SCSI command. The tx posting & completion handling code *
@@ -151,46 +146,8 @@
- ISER_MAX_RX_MISC_PDUS) / \
(1 + ISER_INFLIGHT_DATAOUTS))
-#define ISER_WC_BATCH_COUNT 16
-#define ISER_SIGNAL_CMD_COUNT 32
-
-#define ISER_VER 0x10
-#define ISER_WSV 0x08
-#define ISER_RSV 0x04
-
-#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
-#define ISER_BEACON_WRID 0xfffffffffffffffeULL
-
-/**
- * struct iser_hdr - iSER header
- *
- * @flags: flags support (zbva, remote_inv)
- * @rsvd: reserved
- * @write_stag: write rkey
- * @write_va: write virtual address
- * @reaf_stag: read rkey
- * @read_va: read virtual address
- */
-struct iser_hdr {
- u8 flags;
- u8 rsvd[3];
- __be32 write_stag;
- __be64 write_va;
- __be32 read_stag;
- __be64 read_va;
-} __attribute__((packed));
-
-
-#define ISER_ZBVA_NOT_SUPPORTED 0x80
-#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40
-
-struct iser_cm_hdr {
- u8 flags;
- u8 rsvd[3];
-} __packed;
-
/* Constant PDU lengths calculations */
-#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
+#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
#define ISER_RECV_DATA_SEG_LEN 128
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
@@ -225,20 +182,15 @@ enum iser_data_dir {
*
* @sg: pointer to the sg list
* @size: num entries of this sg
- * @data_len: total beffer byte len
+ * @data_len: total buffer byte len
* @dma_nents: returned by dma_map_sg
- * @orig_sg: pointer to the original sg list (in case
- * we used a copy)
- * @orig_size: num entris of orig sg list
*/
struct iser_data_buf {
struct scatterlist *sg;
- unsigned int size;
+ int size;
unsigned long data_len;
- unsigned int dma_nents;
- struct scatterlist *orig_sg;
- unsigned int orig_size;
- };
+ int dma_nents;
+};
/* fwd declarations */
struct iser_device;
@@ -251,12 +203,12 @@ struct iser_reg_resources;
*
* @sge: memory region sg element
* @rkey: memory region remote key
- * @mem_h: pointer to registration context (FMR/Fastreg)
+ * @desc: pointer to fast registration context
*/
struct iser_mem_reg {
- struct ib_sge sge;
- u32 rkey;
- void *mem_h;
+ struct ib_sge sge;
+ u32 rkey;
+ struct iser_fr_desc *desc;
};
enum iser_desc_type {
@@ -265,183 +217,109 @@ enum iser_desc_type {
ISCSI_TX_DATAOUT
};
-/* Maximum number of work requests per task:
- * Data memory region local invalidate + fast registration
- * Protection memory region local invalidate + fast registration
- * Signature memory region local invalidate + fast registration
- * PDU send
- */
-#define ISER_MAX_WRS 7
-
/**
- * struct iser_tx_desc - iSER TX descriptor (for send wr_id)
+ * struct iser_tx_desc - iSER TX descriptor
*
* @iser_header: iser header
* @iscsi_header: iscsi header
* @type: command/control/dataout
- * @dam_addr: header buffer dma_address
+ * @dma_addr: header buffer dma_address
* @tx_sg: sg[0] points to iser/iscsi headers
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
+ * @cqe: completion handler
* @mapped: Is the task header mapped
- * @wr_idx: Current WR index
- * @wrs: Array of WRs per task
- * @data_reg: Data buffer registration details
- * @prot_reg: Protection buffer registration details
- * @sig_attrs: Signature attributes
+ * @reg_wr: registration WR
+ * @send_wr: send WR
+ * @inv_wr: invalidate WR
*/
struct iser_tx_desc {
- struct iser_hdr iser_header;
+ struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
enum iser_desc_type type;
u64 dma_addr;
struct ib_sge tx_sg[2];
int num_sge;
+ struct ib_cqe cqe;
bool mapped;
- u8 wr_idx;
- struct ib_send_wr wrs[ISER_MAX_WRS];
- struct iser_mem_reg data_reg;
- struct iser_mem_reg prot_reg;
- struct ib_sig_attrs sig_attrs;
+ struct ib_reg_wr reg_wr;
+ struct ib_send_wr send_wr;
+ struct ib_send_wr inv_wr;
};
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
- sizeof(u64) + sizeof(struct ib_sge)))
+ sizeof(u64) + sizeof(struct ib_sge) + \
+ sizeof(struct ib_cqe)))
/**
- * struct iser_rx_desc - iSER RX descriptor (for recv wr_id)
+ * struct iser_rx_desc - iSER RX descriptor
*
* @iser_header: iser header
* @iscsi_header: iscsi header
* @data: received data segment
* @dma_addr: receive buffer dma address
* @rx_sg: ib_sge of receive buffer
+ * @cqe: completion handler
* @pad: for sense data TODO: Modify to maximum sense length supported
*/
struct iser_rx_desc {
- struct iser_hdr iser_header;
+ struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
char data[ISER_RECV_DATA_SEG_LEN];
u64 dma_addr;
struct ib_sge rx_sg;
+ struct ib_cqe cqe;
char pad[ISER_RX_PAD_SIZE];
-} __attribute__((packed));
-
-struct iser_conn;
-struct ib_conn;
-struct iscsi_iser_task;
+} __packed;
/**
- * struct iser_comp - iSER completion context
+ * struct iser_login_desc - iSER login descriptor
*
- * @device: pointer to device handle
- * @cq: completion queue
- * @wcs: work completion array
- * @tasklet: Tasklet handle
- * @active_qps: Number of active QPs attached
- * to completion context
+ * @req: pointer to login request buffer
+ * @rsp: pointer to login response buffer
+ * @req_dma: DMA address of login request buffer
+ * @rsp_dma: DMA address of login response buffer
+ * @sge: IB sge for login post recv
+ * @cqe: completion handler
*/
-struct iser_comp {
- struct iser_device *device;
- struct ib_cq *cq;
- struct ib_wc wcs[ISER_WC_BATCH_COUNT];
- struct tasklet_struct tasklet;
- int active_qps;
-};
+struct iser_login_desc {
+ void *req;
+ void *rsp;
+ u64 req_dma;
+ u64 rsp_dma;
+ struct ib_sge sge;
+ struct ib_cqe cqe;
+} __packed;
-/**
- * struct iser_device - Memory registration operations
- * per-device registration schemes
- *
- * @alloc_reg_res: Allocate registration resources
- * @free_reg_res: Free registration resources
- * @fast_reg_mem: Register memory buffers
- * @unreg_mem: Un-register memory buffers
- * @reg_desc_get: Get a registration descriptor for pool
- * @reg_desc_put: Get a registration descriptor to pool
- */
-struct iser_reg_ops {
- int (*alloc_reg_res)(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
- void (*free_reg_res)(struct ib_conn *ib_conn);
- int (*reg_mem)(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg);
- void (*unreg_mem)(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
- struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
- void (*reg_desc_put)(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-};
+struct iser_conn;
+struct ib_conn;
/**
* struct iser_device - iSER device handle
*
* @ib_device: RDMA device
* @pd: Protection Domain for this device
- * @dev_attr: Device attributes container
- * @mr: Global DMA memory region
* @event_handler: IB events handle routine
* @ig_list: entry in devices list
* @refcount: Reference counter, dominated by open iser connections
- * @comps_used: Number of completion contexts used, Min between online
- * cpus and device max completion vectors
- * @comps: Dinamically allocated array of completion handlers
- * @reg_ops: Registration ops
*/
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_device_attr dev_attr;
- struct ib_mr *mr;
struct ib_event_handler event_handler;
struct list_head ig_list;
int refcount;
- int comps_used;
- struct iser_comp *comps;
- struct iser_reg_ops *reg_ops;
};
-#define ISER_CHECK_GUARD 0xc0
-#define ISER_CHECK_REFTAG 0x0f
-#define ISER_CHECK_APPTAG 0x30
-
/**
- * struct iser_reg_resources - Fast registration recources
+ * struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
- * @fmr_pool: pool of fmrs
- * @frpl: fast reg page list used by frwrs
- * @page_vec: fast reg page list used by fmr pool
- * @mr_valid: is mr valid indicator
+ * @sig_mr: signature memory region
*/
struct iser_reg_resources {
- union {
- struct ib_mr *mr;
- struct ib_fmr_pool *fmr_pool;
- };
- union {
- struct ib_fast_reg_page_list *frpl;
- struct iser_page_vec *page_vec;
- };
- u8 mr_valid:1;
-};
-
-/**
- * struct iser_pi_context - Protection information context
- *
- * @rsc: protection buffer registration resources
- * @sig_mr: signature enable memory region
- * @sig_mr_valid: is sig_mr valid indicator
- * @sig_protected: is region protected indicator
- */
-struct iser_pi_context {
- struct iser_reg_resources rsc;
- struct ib_mr *sig_mr;
- u8 sig_mr_valid:1;
- u8 sig_protected:1;
+ struct ib_mr *mr;
+ struct ib_mr *sig_mr;
};
/**
@@ -449,25 +327,29 @@ struct iser_pi_context {
*
* @list: entry in connection fastreg pool
* @rsc: data buffer registration resources
- * @pi_ctx: protection information context
+ * @sig_protected: is region protected indicator
+ * @all_list: first and last list members
*/
struct iser_fr_desc {
struct list_head list;
struct iser_reg_resources rsc;
- struct iser_pi_context *pi_ctx;
+ bool sig_protected;
+ struct list_head all_list;
};
/**
- * struct iser_fr_pool: connection fast registration pool
+ * struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
- * @lock: protects fmr/fastreg pool
+ * @lock: protects fastreg pool
* @size: size of the pool
+ * @all_list: first and last list members
*/
struct iser_fr_pool {
struct list_head list;
spinlock_t lock;
int size;
+ struct list_head all_list;
};
/**
@@ -475,28 +357,22 @@ struct iser_fr_pool {
*
* @cma_id: rdma_cm connection maneger handle
* @qp: Connection Queue-pair
- * @post_recv_buf_count: post receive counter
- * @sig_count: send work request signal count
- * @rx_wr: receive work request for batch posts
+ * @cq: Connection completion queue
+ * @cq_size: The number of max outstanding completions
* @device: reference to iser device
- * @comp: iser completion context
+ * @fr_pool: connection fast registration pool
* @pi_support: Indicate device T10-PI support
- * @beacon: beacon send wr to signal all flush errors were drained
- * @flush_comp: completes when all connection completions consumed
- * @fr_pool: connection fast registration poool
+ * @reg_cqe: completion handler
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
struct ib_qp *qp;
- int post_recv_buf_count;
- u8 sig_count;
- struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
+ struct ib_cq *cq;
+ u32 cq_size;
struct iser_device *device;
- struct iser_comp *comp;
- bool pi_support;
- struct ib_send_wr beacon;
- struct completion flush_comp;
struct iser_fr_pool fr_pool;
+ bool pi_support;
+ struct ib_cqe reg_cqe;
};
/**
@@ -508,27 +384,21 @@ struct ib_conn {
* @state: connection logical state
* @qp_max_recv_dtos: maximum number of data outs, corresponds
* to max number of post recvs
- * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
- * @min_posted_rx: (qp_max_recv_dtos >> 2)
* @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
- * @release_work: deffered work for release job
+ * @release_work: deferred work for release job
* @state_mutex: protects iser onnection state
* @stop_completion: conn_stop completion
* @ib_completion: RDMA cleanup completion
* @up_completion: connection establishment completed
* (state is ISER_CONN_UP)
* @conn_list: entry in ig conn list
- * @login_buf: login data buffer (stores login parameters)
- * @login_req_buf: login request buffer
- * @login_req_dma: login request buffer dma address
- * @login_resp_buf: login response buffer
- * @login_resp_dma: login response buffer dma address
- * @rx_desc_head: head of rx_descs cyclic buffer
+ * @login_desc: login descriptor
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
+ * @pages_per_mr: maximum pages available for registration
+ * @snd_w_inv: connection uses remote invalidation
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -536,8 +406,6 @@ struct iser_conn {
struct iscsi_endpoint *ep;
enum iser_conn_state state;
unsigned qp_max_recv_dtos;
- unsigned qp_max_recv_dtos_mask;
- unsigned min_posted_rx;
u16 max_cmds;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
@@ -546,15 +414,12 @@ struct iser_conn {
struct completion ib_completion;
struct completion up_completion;
struct list_head conn_list;
-
- char *login_buf;
- char *login_req_buf, *login_resp_buf;
- u64 login_req_dma, login_resp_dma;
- unsigned int rx_desc_head;
+ struct iser_login_desc login_desc;
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
- unsigned int scsi_max_sectors;
+ unsigned short pages_per_mr;
+ bool snd_w_inv;
};
/**
@@ -582,15 +447,8 @@ struct iscsi_iser_task {
struct iser_data_buf prot[ISER_DIRS_NUM];
};
-struct iser_page_vec {
- u64 *pages;
- int length;
- int offset;
- int data_size;
-};
-
/**
- * struct iser_global: iSER global context
+ * struct iser_global - iSER global context
*
* @device_list_mutex: protects device_list
* @device_list: iser devices global list
@@ -609,10 +467,8 @@ struct iser_global {
extern struct iser_global ig;
extern int iser_debug_level;
extern bool iser_pi_enable;
-extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
-
-int iser_assign_reg_ops(struct iser_device *device);
+extern bool iser_always_reg;
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
@@ -637,12 +493,13 @@ int iser_conn_terminate(struct iser_conn *iser_conn);
void iser_release_work(struct work_struct *work);
-void iser_rcv_completion(struct iser_rx_desc *desc,
- unsigned long dto_xfer_len,
- struct ib_conn *ib_conn);
-
-void iser_snd_completion(struct iser_tx_desc *desc,
- struct ib_conn *ib_conn);
+void iser_err_comp(struct ib_wc *wc, const char *type);
+void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_task_rdma_init(struct iscsi_iser_task *task);
@@ -650,77 +507,63 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- enum iser_data_dir cmd_dir);
-
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir);
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir);
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-
int iser_post_recvl(struct iser_conn *iser_conn);
-int iser_post_recvm(struct iser_conn *iser_conn, int count);
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal);
+int iser_post_recvm(struct iser_conn *iser_conn,
+ struct iser_rx_desc *rx_desc);
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum dma_data_direction dir);
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
-void iser_free_fmr_pool(struct ib_conn *ib_conn);
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
unsigned int size);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
-struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-
-static inline struct ib_send_wr *
-iser_tx_next_wr(struct iser_tx_desc *tx_desc)
+
+static inline struct iser_conn *
+to_iser_conn(struct ib_conn *ib_conn)
+{
+ return container_of(ib_conn, struct iser_conn, ib_conn);
+}
+
+static inline struct iser_rx_desc *
+iser_rx(struct ib_cqe *cqe)
{
- struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx];
- struct ib_send_wr *last_wr;
+ return container_of(cqe, struct iser_rx_desc, cqe);
+}
- if (tx_desc->wr_idx) {
- last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1];
- last_wr->next = cur_wr;
- }
- tx_desc->wr_idx++;
+static inline struct iser_tx_desc *
+iser_tx(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_tx_desc, cqe);
+}
- return cur_wr;
+static inline struct iser_login_desc *
+iser_login(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_login_desc, cqe);
}
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index d511879d8cdf..f5f090dc4f1e 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -51,31 +51,18 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_mem_reg *mem_reg;
int err;
- struct iser_hdr *hdr = &iser_task->desc.iser_header;
- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
+ struct iser_ctrl *hdr = &iser_task->desc.iser_header;
err = iser_dma_map_task_data(iser_task,
- buf_in,
ISER_DIR_IN,
DMA_FROM_DEVICE);
if (err)
return err;
- if (scsi_prot_sg_count(iser_task->sc)) {
- struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
-
- err = iser_dma_map_task_data(iser_task,
- pbuf_in,
- ISER_DIR_IN,
- DMA_FROM_DEVICE);
- if (err)
- return err;
- }
-
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
- return err;
+ goto out_err;
}
mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
@@ -88,6 +75,10 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
(unsigned long long)mem_reg->sge.addr);
return 0;
+
+out_err:
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
+ return err;
}
/* Register user buffer memory and initialize passive rdma
@@ -95,52 +86,39 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
* task->data[ISER_DIR_OUT].data_len, Protection size
* is stored at task->prot[ISER_DIR_OUT].data_len
*/
-static int
-iser_prepare_write_cmd(struct iscsi_task *task,
- unsigned int imm_sz,
- unsigned int unsol_sz,
- unsigned int edtl)
+static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
+ unsigned int unsol_sz, unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_mem_reg *mem_reg;
int err;
- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+ struct iser_ctrl *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
- buf_out,
ISER_DIR_OUT,
DMA_TO_DEVICE);
if (err)
return err;
- if (scsi_prot_sg_count(iser_task->sc)) {
- struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
-
- err = iser_dma_map_task_data(iser_task,
- pbuf_out,
- ISER_DIR_OUT,
- DMA_TO_DEVICE);
- if (err)
- return err;
- }
-
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
- if (err != 0) {
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
+ buf_out->data_len == imm_sz);
+ if (err) {
iser_err("Failed to register write cmd RDMA mem\n");
- return err;
+ goto out_err;
}
mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
- hdr->write_stag = cpu_to_be32(mem_reg->rkey);
- hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ if (buf_out->data_len > imm_sz) {
+ hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ }
- iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
- "VA:%#llX + unsol:%d\n",
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
task->itt, mem_reg->rkey,
(unsigned long long)mem_reg->sge.addr, unsol_sz);
}
@@ -155,18 +133,26 @@ iser_prepare_write_cmd(struct iscsi_task *task,
}
return 0;
+
+out_err:
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
+ return err;
}
/* creates a new tx descriptor and adds header regd buffer */
-static void iser_create_send_desc(struct iser_conn *iser_conn,
- struct iser_tx_desc *tx_desc)
+static void iser_create_send_desc(struct iser_conn *iser_conn,
+ struct iser_tx_desc *tx_desc, enum iser_desc_type type,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc))
{
struct iser_device *device = iser_conn->ib_conn.device;
+ tx_desc->type = type;
+ tx_desc->cqe.done = done;
+
ib_dma_sync_single_for_cpu(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
- memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
tx_desc->iser_header.flags = ISER_VER;
tx_desc->num_sge = 1;
}
@@ -174,73 +160,63 @@ static void iser_create_send_desc(struct iser_conn *iser_conn,
static void iser_free_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
+ struct iser_login_desc *desc = &iser_conn->login_desc;
- if (!iser_conn->login_buf)
+ if (!desc->req)
return;
- if (iser_conn->login_req_dma)
- ib_dma_unmap_single(device->ib_device,
- iser_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+ ib_dma_unmap_single(device->ib_device, desc->req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
- if (iser_conn->login_resp_dma)
- ib_dma_unmap_single(device->ib_device,
- iser_conn->login_resp_dma,
- ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
- kfree(iser_conn->login_buf);
+ kfree(desc->req);
+ kfree(desc->rsp);
/* make sure we never redo any unmapping */
- iser_conn->login_req_dma = 0;
- iser_conn->login_resp_dma = 0;
- iser_conn->login_buf = NULL;
+ desc->req = NULL;
+ desc->rsp = NULL;
}
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
- int req_err, resp_err;
+ struct iser_login_desc *desc = &iser_conn->login_desc;
+
+ desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!desc->req)
+ return -ENOMEM;
- BUG_ON(device == NULL);
+ desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device,
+ desc->req_dma))
+ goto free_req;
- iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
- ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!iser_conn->login_buf)
- goto out_err;
+ desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ if (!desc->rsp)
+ goto unmap_req;
+
+ desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
+ ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device,
+ desc->rsp_dma))
+ goto free_rsp;
- iser_conn->login_req_buf = iser_conn->login_buf;
- iser_conn->login_resp_buf = iser_conn->login_buf +
- ISCSI_DEF_MAX_RECV_SEG_LEN;
-
- iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
- iser_conn->login_req_buf,
- ISCSI_DEF_MAX_RECV_SEG_LEN,
- DMA_TO_DEVICE);
-
- iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
- iser_conn->login_resp_buf,
- ISER_RX_LOGIN_SIZE,
- DMA_FROM_DEVICE);
-
- req_err = ib_dma_mapping_error(device->ib_device,
- iser_conn->login_req_dma);
- resp_err = ib_dma_mapping_error(device->ib_device,
- iser_conn->login_resp_dma);
-
- if (req_err || resp_err) {
- if (req_err)
- iser_conn->login_req_dma = 0;
- if (resp_err)
- iser_conn->login_resp_dma = 0;
- goto free_login_buf;
- }
return 0;
-free_login_buf:
- iser_free_login_buf(iser_conn);
+free_rsp:
+ kfree(desc->rsp);
+unmap_req:
+ ib_dma_unmap_single(device->ib_device, desc->req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ DMA_TO_DEVICE);
+free_req:
+ kfree(desc->req);
-out_err:
- iser_err("unable to alloc or map login buf\n");
return -ENOMEM;
}
@@ -255,19 +231,18 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
- iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
- iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
- if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
- iser_conn->scsi_sg_tablesize))
+ if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
+ iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
goto alloc_login_buf_fail;
iser_conn->num_rx_descs = session->cmds_max;
- iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
- sizeof(struct iser_rx_desc), GFP_KERNEL);
+ iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
+ sizeof(struct iser_rx_desc),
+ GFP_KERNEL);
if (!iser_conn->rx_descs)
goto rx_desc_alloc_fail;
@@ -280,14 +255,13 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
goto rx_desc_dma_map_failed;
rx_desc->dma_addr = dma_addr;
-
+ rx_desc->cqe.done = iser_task_rsp;
rx_sg = &rx_desc->rx_sg;
- rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
- rx_sg->lkey = device->pd->local_dma_lkey;
+ rx_sg->lkey = device->pd->local_dma_lkey;
}
- iser_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
@@ -300,7 +274,7 @@ rx_desc_dma_map_failed:
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
@@ -313,8 +287,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- if (device->reg_ops->free_reg_res)
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
@@ -330,44 +303,43 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iser_conn *iser_conn = conn->dd_data;
- struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iscsi_session *session = conn->session;
+ int err = 0;
+ int i;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
- return 0;
-
- /*
- * Check that there is one posted recv buffer
- * (for the last login response).
- */
- WARN_ON(ib_conn->post_recv_buf_count != 1);
+ goto out;
if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n");
- return 0;
- } else
- iser_info("Normal session, posting batch of RX %d buffers\n",
- iser_conn->min_posted_rx);
+ goto out;
+ }
- /* Initial post receive buffers */
- if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
- return -ENOMEM;
+ iser_info("Normal session, posting batch of RX %d buffers\n",
+ iser_conn->qp_max_recv_dtos - 1);
- return 0;
-}
-
-static inline bool iser_signal_comp(u8 sig_count)
-{
- return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
+ /*
+ * Initial post receive buffers.
+ * There is one already posted recv buffer (for the last login
+ * response). Therefore, the first recv buffer is skipped here.
+ */
+ for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
+ err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
+ if (err)
+ goto out;
+ }
+out:
+ return err;
}
/**
* iser_send_command - send command PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
*/
-int iser_send_command(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -377,13 +349,12 @@ int iser_send_command(struct iscsi_conn *conn,
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
- tx_desc->type = ISCSI_TX_SCSI_COMMAND;
- iser_create_send_desc(iser_conn, tx_desc);
+ iser_create_send_desc(iser_conn, tx_desc, ISCSI_TX_SCSI_COMMAND,
+ iser_cmd_comp);
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
data_buf = &iser_task->data[ISER_DIR_IN];
@@ -423,8 +394,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_conn->ib_conn, tx_desc,
- iser_signal_comp(sig_count));
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -435,14 +405,16 @@ send_command_error:
/**
* iser_send_data_out - send data out PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
+ * @hdr: pointer to the LLD's iSCSI message header
*/
-int iser_send_data_out(struct iscsi_conn *conn,
- struct iscsi_task *task,
+int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = NULL;
+ struct iser_tx_desc *tx_desc;
struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
@@ -458,12 +430,11 @@ int iser_send_data_out(struct iscsi_conn *conn,
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
- if (tx_desc == NULL) {
- iser_err("Failed to alloc desc for post dataout\n");
+ if (!tx_desc)
return -ENOMEM;
- }
tx_desc->type = ISCSI_TX_DATAOUT;
+ tx_desc->cqe.done = iser_dataout_comp;
tx_desc->iser_header.flags = ISER_VER;
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
@@ -480,8 +451,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
- iser_err("Offset:%ld & DSL:%ld in Data-Out "
- "inconsistent with total len:%ld, itt:%d\n",
+ iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
@@ -490,8 +460,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
itt, buf_offset, data_seg_len);
-
- err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -501,8 +470,7 @@ send_data_out_error:
return err;
}
-int iser_send_control(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -512,33 +480,33 @@ int iser_send_control(struct iscsi_conn *conn,
struct iser_device *device;
/* build the tx desc regd header and add it to the tx desc dto */
- mdesc->type = ISCSI_TX_CONTROL;
- iser_create_send_desc(iser_conn, mdesc);
+ iser_create_send_desc(iser_conn, mdesc, ISCSI_TX_CONTROL,
+ iser_ctrl_comp);
device = iser_conn->ib_conn.device;
data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
+ struct iser_login_desc *desc = &iser_conn->login_desc;
struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
+
if (task != conn->login_task) {
iser_err("data present on non login task!!!\n");
goto send_control_error;
}
- ib_dma_sync_single_for_cpu(device->ib_device,
- iser_conn->login_req_dma, task->data_count,
- DMA_TO_DEVICE);
+ ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
+ task->data_count, DMA_TO_DEVICE);
- memcpy(iser_conn->login_req_buf, task->data, task->data_count);
+ memcpy(desc->req, task->data, task->data_count);
- ib_dma_sync_single_for_device(device->ib_device,
- iser_conn->login_req_dma, task->data_count,
- DMA_TO_DEVICE);
+ ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
+ task->data_count, DMA_TO_DEVICE);
- tx_dsg->addr = iser_conn->login_req_dma;
- tx_dsg->length = task->data_count;
- tx_dsg->lkey = device->pd->local_dma_lkey;
+ tx_dsg->addr = desc->req_dma;
+ tx_dsg->length = task->data_count;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
mdesc->num_sge = 2;
}
@@ -553,7 +521,7 @@ int iser_send_control(struct iscsi_conn *conn,
goto send_control_error;
}
- err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
+ err = iser_post_send(&iser_conn->ib_conn, mdesc);
if (!err)
return 0;
@@ -562,81 +530,181 @@ send_control_error:
return err;
}
-/**
- * iser_rcv_dto_completion - recv DTO completion
- */
-void iser_rcv_completion(struct iser_rx_desc *rx_desc,
- unsigned long rx_xfer_len,
- struct ib_conn *ib_conn)
+void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
{
- struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
- ib_conn);
+ struct ib_conn *ib_conn = wc->qp->qp_context;
+ struct iser_conn *iser_conn = to_iser_conn(ib_conn);
+ struct iser_login_desc *desc = iser_login(wc->wr_cqe);
struct iscsi_hdr *hdr;
- u64 rx_dma;
- int rx_buflen, outstanding, count, err;
+ char *data;
+ int length;
+ bool full_feature_phase;
- /* differentiate between login to all other PDUs */
- if ((char *)rx_desc == iser_conn->login_resp_buf) {
- rx_dma = iser_conn->login_resp_dma;
- rx_buflen = ISER_RX_LOGIN_SIZE;
- } else {
- rx_dma = rx_desc->dma_addr;
- rx_buflen = ISER_RX_PAYLOAD_SIZE;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ iser_err_comp(wc, "login_rsp");
+ return;
}
- ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
- rx_buflen, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
+ desc->rsp_dma, ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
- hdr = &rx_desc->iscsi_header;
+ hdr = desc->rsp + sizeof(struct iser_ctrl);
+ data = desc->rsp + ISER_HEADERS_LEN;
+ length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
- hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
+ hdr->itt, length);
- iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
- rx_xfer_len - ISER_HEADERS_LEN);
+ iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
- ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
- rx_buflen, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ desc->rsp_dma, ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
- /* decrementing conn->post_recv_buf_count only --after-- freeing the *
- * task eliminates the need to worry on tasks which are completed in *
- * parallel to the execution of iser_conn_term. So the code that waits *
- * for the posted rx bufs refcount to become zero handles everything */
- ib_conn->post_recv_buf_count--;
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
+ return;
- if (rx_dma == iser_conn->login_resp_dma)
+ /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
+ iser_post_recvm(iser_conn, iser_conn->rx_descs);
+}
+
+static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+{
+ if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
+ (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
+ iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
+ return -EINVAL;
+ }
+
+ if (desc->sig_protected)
+ desc->rsc.sig_mr->need_inval = false;
+ else
+ desc->rsc.mr->need_inval = false;
+
+ return 0;
+}
+
+static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
+ struct iscsi_hdr *hdr)
+{
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+ struct iscsi_task *task;
+ u32 rkey = wc->ex.invalidate_rkey;
+
+ iser_dbg("conn %p: remote invalidation for rkey %#x\n",
+ iser_conn, rkey);
+
+ if (unlikely(!iser_conn->snd_w_inv)) {
+ iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
+ iser_conn);
+ return -EPROTO;
+ }
+
+ task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
+ if (likely(task)) {
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_fr_desc *desc;
+
+ if (iser_task->dir[ISER_DIR_IN]) {
+ desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
+ }
+
+ if (iser_task->dir[ISER_DIR_OUT]) {
+ desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
+ }
+ } else {
+ iser_err("failed to get task for itt=%d\n", hdr->itt);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+
+void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_conn *ib_conn = wc->qp->qp_context;
+ struct iser_conn *iser_conn = to_iser_conn(ib_conn);
+ struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
+ struct iscsi_hdr *hdr;
+ int length, err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ iser_err_comp(wc, "task_rsp");
return;
+ }
- outstanding = ib_conn->post_recv_buf_count;
- if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
- count = min(iser_conn->qp_max_recv_dtos - outstanding,
- iser_conn->min_posted_rx);
- err = iser_post_recvm(iser_conn, count);
- if (err)
- iser_err("posting %d rx bufs err %d\n", count, err);
+ ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
+ desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
+ DMA_FROM_DEVICE);
+
+ hdr = &desc->iscsi_header;
+ length = wc->byte_len - ISER_HEADERS_LEN;
+
+ iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
+ hdr->itt, length);
+
+ if (iser_check_remote_inv(iser_conn, wc, hdr)) {
+ iscsi_conn_failure(iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ return;
}
+
+ iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
+
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
+ DMA_FROM_DEVICE);
+
+ err = iser_post_recvm(iser_conn, desc);
+ if (err)
+ iser_err("posting rx buffer err %d\n", err);
+}
+
+void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ iser_err_comp(wc, "command");
}
-void iser_snd_completion(struct iser_tx_desc *tx_desc,
- struct ib_conn *ib_conn)
+void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
struct iscsi_task *task;
- struct iser_device *device = ib_conn->device;
- if (tx_desc->type == ISCSI_TX_DATAOUT) {
- ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
- ISER_HEADERS_LEN, DMA_TO_DEVICE);
- kmem_cache_free(ig.desc_cache, tx_desc);
- tx_desc = NULL;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ iser_err_comp(wc, "control");
+ return;
}
- if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
- /* this arithmetic is legal by libiscsi dd_data allocation */
- task = (void *) ((long)(void *)tx_desc -
- sizeof(struct iscsi_task));
- if (task->hdr->itt == RESERVED_ITT)
- iscsi_put_task(task);
- }
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+ task = (void *)desc - sizeof(struct iscsi_task);
+ if (task->hdr->itt == RESERVED_ITT)
+ iscsi_put_task(task);
+}
+
+void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
+ struct ib_conn *ib_conn = wc->qp->qp_context;
+ struct iser_device *device = ib_conn->device;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ iser_err_comp(wc, "dataout");
+
+ ib_dma_unmap_single(device->ib_device, desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ kmem_cache_free(ig.desc_cache, desc);
}
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
@@ -653,6 +721,9 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
iser_task->prot[ISER_DIR_IN].data_len = 0;
iser_task->prot[ISER_DIR_OUT].data_len = 0;
+ iser_task->prot[ISER_DIR_IN].dma_nents = 0;
+ iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
+
memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
sizeof(struct iser_mem_reg));
memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
@@ -661,62 +732,16 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- int is_rdma_data_aligned = 1;
- int is_rdma_prot_aligned = 1;
- int prot_count = scsi_prot_sg_count(iser_task->sc);
-
- /* if we were reading, copy back to unaligned sglist,
- * anyway dma_unmap and free the copy
- */
- if (iser_task->data[ISER_DIR_IN].orig_sg) {
- is_rdma_data_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->data[ISER_DIR_IN],
- ISER_DIR_IN);
- }
-
- if (iser_task->data[ISER_DIR_OUT].orig_sg) {
- is_rdma_data_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->data[ISER_DIR_OUT],
- ISER_DIR_OUT);
- }
-
- if (iser_task->prot[ISER_DIR_IN].orig_sg) {
- is_rdma_prot_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->prot[ISER_DIR_IN],
- ISER_DIR_IN);
- }
-
- if (iser_task->prot[ISER_DIR_OUT].orig_sg) {
- is_rdma_prot_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task,
- &iser_task->prot[ISER_DIR_OUT],
- ISER_DIR_OUT);
- }
if (iser_task->dir[ISER_DIR_IN]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
- if (is_rdma_data_aligned)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->data[ISER_DIR_IN],
- DMA_FROM_DEVICE);
- if (prot_count && is_rdma_prot_aligned)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->prot[ISER_DIR_IN],
- DMA_FROM_DEVICE);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
+ DMA_FROM_DEVICE);
}
if (iser_task->dir[ISER_DIR_OUT]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
- if (is_rdma_data_aligned)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->data[ISER_DIR_OUT],
- DMA_TO_DEVICE);
- if (prot_count && is_rdma_prot_aligned)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->prot[ISER_DIR_OUT],
- DMA_TO_DEVICE);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
+ DMA_TO_DEVICE);
}
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2493cc748db8..6efcb79c8efe 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -30,7 +30,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -38,165 +37,13 @@
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-static
-int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-
-static struct iser_reg_ops fastreg_ops = {
- .alloc_reg_res = iser_alloc_fastreg_pool,
- .free_reg_res = iser_free_fastreg_pool,
- .reg_mem = iser_fast_reg_mr,
- .unreg_mem = iser_unreg_mem_fastreg,
- .reg_desc_get = iser_reg_desc_get_fr,
- .reg_desc_put = iser_reg_desc_put_fr,
-};
-
-static struct iser_reg_ops fmr_ops = {
- .alloc_reg_res = iser_alloc_fmr_pool,
- .free_reg_res = iser_free_fmr_pool,
- .reg_mem = iser_fast_reg_fmr,
- .unreg_mem = iser_unreg_mem_fmr,
- .reg_desc_get = iser_reg_desc_get_fmr,
- .reg_desc_put = iser_reg_desc_put_fmr,
-};
-
-int iser_assign_reg_ops(struct iser_device *device)
-{
- struct ib_device_attr *dev_attr = &device->dev_attr;
-
- /* Assign function handles - based on FMR support */
- if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
- device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
- iser_info("FMR supported, using FMR for registration\n");
- device->reg_ops = &fmr_ops;
- } else
- if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- iser_info("FastReg supported, using FastReg for registration\n");
- device->reg_ops = &fastreg_ops;
- } else {
- iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
- return -1;
- }
- return 0;
-}
-
-static void
-iser_free_bounce_sg(struct iser_data_buf *data)
+void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
{
- struct scatterlist *sg;
- int count;
-
- for_each_sg(data->sg, sg, data->size, count)
- __free_page(sg_page(sg));
-
- kfree(data->sg);
-
- data->sg = data->orig_sg;
- data->size = data->orig_size;
- data->orig_sg = NULL;
- data->orig_size = 0;
-}
-
-static int
-iser_alloc_bounce_sg(struct iser_data_buf *data)
-{
- struct scatterlist *sg;
- struct page *page;
- unsigned long length = data->data_len;
- int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
-
- sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
- if (!sg)
- goto err;
-
- sg_init_table(sg, nents);
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_ATOMIC);
- if (!page)
- goto err;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
-
- data->orig_sg = data->sg;
- data->orig_size = data->size;
- data->sg = sg;
- data->size = nents;
-
- return 0;
-
-err:
- for (; i > 0; i--)
- __free_page(sg_page(&sg[i - 1]));
- kfree(sg);
-
- return -ENOMEM;
+ iser_err_comp(wc, "memreg");
}
-static void
-iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
-{
- struct scatterlist *osg, *bsg = data->sg;
- void *oaddr, *baddr;
- unsigned int left = data->data_len;
- unsigned int bsg_off = 0;
- int i;
-
- for_each_sg(data->orig_sg, osg, data->orig_size, i) {
- unsigned int copy_len, osg_off = 0;
-
- oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
- copy_len = min(left, osg->length);
- while (copy_len) {
- unsigned int len = min(copy_len, bsg->length - bsg_off);
-
- baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
- if (to_buffer)
- memcpy(baddr + bsg_off, oaddr + osg_off, len);
- else
- memcpy(oaddr + osg_off, baddr + bsg_off, len);
-
- kunmap_atomic(baddr - bsg->offset);
- osg_off += len;
- bsg_off += len;
- copy_len -= len;
-
- if (bsg_off >= bsg->length) {
- bsg = sg_next(bsg);
- bsg_off = 0;
- }
- }
- kunmap_atomic(oaddr - osg->offset);
- left -= osg_off;
- }
-}
-
-static inline void
-iser_copy_from_bounce(struct iser_data_buf *data)
-{
- iser_copy_bounce(data, false);
-}
-
-static inline void
-iser_copy_to_bounce(struct iser_data_buf *data)
-{
- iser_copy_bounce(data, true);
-}
-
-struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn)
+static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
@@ -211,9 +58,8 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
+static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
@@ -223,387 +69,116 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
-
- return list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
-}
-
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
-{
-}
-
-/**
- * iser_start_rdma_unaligned_sg
- */
-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum iser_data_dir cmd_dir)
-{
- struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
- int rc;
-
- rc = iser_alloc_bounce_sg(data);
- if (rc) {
- iser_err("Failed to allocate bounce for data len %lu\n",
- data->data_len);
- return rc;
- }
-
- if (cmd_dir == ISER_DIR_OUT)
- iser_copy_to_bounce(data);
-
- data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
- (cmd_dir == ISER_DIR_OUT) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (!data->dma_nents) {
- iser_err("Got dma_nents %d, something went wrong...\n",
- data->dma_nents);
- rc = -ENOMEM;
- goto err;
- }
-
- return 0;
-err:
- iser_free_bounce_sg(data);
- return rc;
-}
-
-/**
- * iser_finalize_rdma_unaligned_sg
- */
-
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum iser_data_dir cmd_dir)
-{
- struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
-
- ib_dma_unmap_sg(dev, data->sg, data->size,
- (cmd_dir == ISER_DIR_OUT) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- if (cmd_dir == ISER_DIR_IN)
- iser_copy_from_bounce(data);
-
- iser_free_bounce_sg(data);
-}
-
-#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
-
-/**
- * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
- * and returns the length of resulting physical address array (may be less than
- * the original due to possible compaction).
- *
- * we build a "page vec" under the assumption that the SG meets the RDMA
- * alignment requirements. Other then the first and last SG elements, all
- * the "internal" elements can be compacted into a list whose elements are
- * dma addresses of physical pages. The code supports also the weird case
- * where --few fragments of the same page-- are present in the SG as
- * consecutive elements. Also, it handles one entry SG.
- */
-
-static int iser_sg_to_page_vec(struct iser_data_buf *data,
- struct ib_device *ibdev, u64 *pages,
- int *offset, int *data_size)
-{
- struct scatterlist *sg, *sgl = data->sg;
- u64 start_addr, end_addr, page, chunk_start = 0;
- unsigned long total_sz = 0;
- unsigned int dma_len;
- int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
-
- /* compute the offset of first element */
- *offset = (u64) sgl[0].offset & ~MASK_4K;
-
- new_chunk = 1;
- cur_page = 0;
- for_each_sg(sgl, sg, data->dma_nents, i) {
- start_addr = ib_sg_dma_address(ibdev, sg);
- if (new_chunk)
- chunk_start = start_addr;
- dma_len = ib_sg_dma_len(ibdev, sg);
- end_addr = start_addr + dma_len;
- total_sz += dma_len;
-
- /* collect page fragments until aligned or end of SG list */
- if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
- new_chunk = 0;
- continue;
- }
- new_chunk = 1;
-
- /* address of the first page in the contiguous chunk;
- masking relevant for the very first SG entry,
- which might be unaligned */
- page = chunk_start & MASK_4K;
- do {
- pages[cur_page++] = page;
- page += SIZE_4K;
- } while (page < end_addr);
- }
-
- *data_size = total_sz;
- iser_dbg("page_vec->data_size:%d cur_page %d\n",
- *data_size, cur_page);
- return cur_page;
-}
-
-
-/**
- * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
- * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
- * the number of entries which are aligned correctly. Supports the case where
- * consecutive SG elements are actually fragments of the same physcial page.
- */
-static int iser_data_buf_aligned_len(struct iser_data_buf *data,
- struct ib_device *ibdev,
- unsigned sg_tablesize)
-{
- struct scatterlist *sg, *sgl, *next_sg = NULL;
- u64 start_addr, end_addr;
- int i, ret_len, start_check = 0;
-
- if (data->dma_nents == 1)
- return 1;
-
- sgl = data->sg;
- start_addr = ib_sg_dma_address(ibdev, sgl);
-
- if (unlikely(sgl[0].offset &&
- data->data_len >= sg_tablesize * PAGE_SIZE)) {
- iser_dbg("can't register length %lx with offset %x "
- "fall to bounce buffer\n", data->data_len,
- sgl[0].offset);
- return 0;
- }
-
- for_each_sg(sgl, sg, data->dma_nents, i) {
- if (start_check && !IS_4K_ALIGNED(start_addr))
- break;
-
- next_sg = sg_next(sg);
- if (!next_sg)
- break;
-
- end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
- start_addr = ib_sg_dma_address(ibdev, next_sg);
-
- if (end_addr == start_addr) {
- start_check = 0;
- continue;
- } else
- start_check = 1;
-
- if (!IS_4K_ALIGNED(end_addr))
- break;
- }
- ret_len = (next_sg) ? i : i+1;
-
- if (unlikely(ret_len != data->dma_nents))
- iser_warn("rdma alignment violation (%d/%d aligned)\n",
- ret_len, data->dma_nents);
-
- return ret_len;
-}
-
-static void iser_data_buf_dump(struct iser_data_buf *data,
- struct ib_device *ibdev)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(data->sg, sg, data->dma_nents, i)
- iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
- "off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)ib_sg_dma_address(ibdev, sg),
- sg_page(sg), sg->offset,
- sg->length, ib_sg_dma_len(ibdev, sg));
-}
-
-static void iser_dump_page_vec(struct iser_page_vec *page_vec)
-{
- int i;
-
- iser_err("page vec length %d data size %d\n",
- page_vec->length, page_vec->data_size);
- for (i = 0; i < page_vec->length; i++)
- iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
-}
-
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum iser_data_dir iser_dir,
- enum dma_data_direction dma_dir)
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
{
+ struct iser_data_buf *data = &iser_task->data[iser_dir];
struct ib_device *dev;
iser_task->dir[iser_dir] = 1;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
- if (data->dma_nents == 0) {
+ if (unlikely(data->dma_nents == 0)) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
- return 0;
-}
-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum dma_data_direction dir)
-{
- struct ib_device *dev;
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
- dev = iser_task->iser_conn->ib_conn.device->ib_device;
- ib_dma_unmap_sg(dev, data->sg, data->size, dir);
-}
-
-static int
-iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
- struct iser_mem_reg *reg)
-{
- struct scatterlist *sg = mem->sg;
-
- reg->sge.lkey = device->pd->local_dma_lkey;
- reg->rkey = device->mr->rkey;
- reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
- reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
-
- iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
- " length=0x%x\n", reg->sge.lkey, reg->rkey,
- reg->sge.addr, reg->sge.length);
+ pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
+ if (unlikely(pdata->dma_nents == 0)) {
+ iser_err("protection dma_map_sg failed!!!\n");
+ goto out_unmap;
+ }
+ }
return 0;
-}
-static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- enum iser_data_dir cmd_dir)
-{
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
+out_unmap:
+ ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
+ return -EINVAL;
+}
- iscsi_conn->fmr_unalign_cnt++;
- if (iser_debug_level > 0)
- iser_data_buf_dump(mem, device->ib_device);
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
+{
+ struct iser_data_buf *data = &iser_task->data[iser_dir];
+ struct ib_device *dev;
- /* unmap the command data before accessing it */
- iser_dma_unmap_task_data(iser_task, mem,
- (cmd_dir == ISER_DIR_OUT) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ dev = iser_task->iser_conn->ib_conn.device->ib_device;
+ ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
- /* allocate copy buf, if we are writing, copy the */
- /* unaligned scatterlist, dma map the copy */
- if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
- return -ENOMEM;
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
- return 0;
+ ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
+ }
}
-/**
- * iser_reg_page_vec - Register physical memory
- *
- * returns: 0 on success, errno code on failure
- */
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg)
+static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
+ struct iser_mem_reg *reg)
{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct iser_page_vec *page_vec = rsc->page_vec;
- struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
- struct ib_pool_fmr *fmr;
- int ret, plen;
-
- plen = iser_sg_to_page_vec(mem, device->ib_device,
- page_vec->pages,
- &page_vec->offset,
- &page_vec->data_size);
- page_vec->length = plen;
- if (plen * SIZE_4K < page_vec->data_size) {
- iser_err("page vec too short to hold this SG\n");
- iser_data_buf_dump(mem, device->ib_device);
- iser_dump_page_vec(page_vec);
- return -EINVAL;
- }
+ struct scatterlist *sg = mem->sg;
- fmr = ib_fmr_pool_map_phys(fmr_pool,
- page_vec->pages,
- page_vec->length,
- page_vec->pages[0]);
- if (IS_ERR(fmr)) {
- ret = PTR_ERR(fmr);
- iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
- return ret;
- }
+ reg->sge.lkey = device->pd->local_dma_lkey;
+ /*
+ * FIXME: rework the registration code path to differentiate
+ * rkey/lkey use cases
+ */
- reg->sge.lkey = fmr->fmr->lkey;
- reg->rkey = fmr->fmr->rkey;
- reg->sge.addr = page_vec->pages[0] + page_vec->offset;
- reg->sge.length = page_vec->data_size;
- reg->mem_h = fmr;
+ if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+ reg->rkey = device->pd->unsafe_global_rkey;
+ else
+ reg->rkey = 0;
+ reg->sge.addr = sg_dma_address(&sg[0]);
+ reg->sge.length = sg_dma_len(&sg[0]);
- iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
+ iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
" length=0x%x\n", reg->sge.lkey, reg->rkey,
reg->sge.addr, reg->sge.length);
return 0;
}
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
- int ret;
-
- if (!reg->mem_h)
- return;
-
- iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
-
- ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
- if (ret)
- iser_err("ib_fmr_pool_unmap failed %d\n", ret);
-
- reg->mem_h = NULL;
-}
-
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+ struct iser_fr_desc *desc;
+ struct ib_mr_status mr_status;
- if (!reg->mem_h)
+ desc = reg->desc;
+ if (!desc)
return;
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
- reg->mem_h);
- reg->mem_h = NULL;
+ /*
+ * The signature MR cannot be invalidated and reused without checking.
+ * libiscsi calls the check_protection transport handler only if
+ * SCSI-Response is received. And the signature MR is not checked if
+ * the task is completed for some other reason like a timeout or error
+ * handling. That's why we must check the signature MR here before
+ * putting it to the free pool.
+ */
+ if (unlikely(desc->sig_protected)) {
+ desc->sig_protected = false;
+ ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
+ &mr_status);
+ }
+ iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
+ reg->desc = NULL;
}
-static void
-iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
- struct ib_sig_domain *domain)
+static void iser_set_dif_domain(struct scsi_cmnd *sc,
+ struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
- domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
+ domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
@@ -613,30 +188,30 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
domain->sig.dif.ref_escape = true;
if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
domain->sig.dif.ref_remap = true;
-};
+}
-static int
-iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
+static int iser_set_sig_attrs(struct scsi_cmnd *sc,
+ struct ib_sig_attrs *sig_attrs)
{
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_STRIP:
sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
+ iser_set_dif_domain(sc, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
+ iser_set_dif_domain(sc, &sig_attrs->mem);
sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
+ iser_set_dif_domain(sc, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
+ iser_set_dif_domain(sc, &sig_attrs->mem);
sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
@@ -649,41 +224,37 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
return 0;
}
-static inline void
-iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
+static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
*mask = 0;
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
- *mask |= ISER_CHECK_REFTAG;
+ *mask |= IB_SIG_CHECK_REFTAG;
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
- *mask |= ISER_CHECK_GUARD;
+ *mask |= IB_SIG_CHECK_GUARD;
}
-static void
-iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
+ struct ib_cqe *cqe, struct ib_send_wr *next_wr)
{
- u32 rkey;
-
inv_wr->opcode = IB_WR_LOCAL_INV;
- inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->wr_cqe = cqe;
inv_wr->ex.invalidate_rkey = mr->rkey;
inv_wr->send_flags = 0;
inv_wr->num_sge = 0;
-
- rkey = ib_inc_rkey(mr->rkey);
- ib_update_fast_reg_key(mr, rkey);
+ inv_wr->next = next_wr;
}
-static int
-iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct iser_pi_context *pi_ctx,
- struct iser_mem_reg *data_reg,
- struct iser_mem_reg *prot_reg,
- struct iser_mem_reg *sig_reg)
+static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *sig_mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *sig_reg)
{
struct iser_tx_desc *tx_desc = &iser_task->desc;
- struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
- struct ib_send_wr *wr;
+ struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
+ struct ib_mr *mr = rsc->sig_mr;
+ struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
int ret;
memset(sig_attrs, 0, sizeof(*sig_attrs));
@@ -693,34 +264,38 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
- if (!pi_ctx->sig_mr_valid) {
- wr = iser_tx_next_wr(tx_desc);
- iser_inv_rkey(wr, pi_ctx->sig_mr);
+ if (rsc->sig_mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+
+ ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
+ sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
+ if (unlikely(ret)) {
+ iser_err("failed to map PI sg (%d)\n",
+ mem->dma_nents + sig_mem->dma_nents);
+ goto err;
}
- wr = iser_tx_next_wr(tx_desc);
- wr->opcode = IB_WR_REG_SIG_MR;
- wr->wr_id = ISER_FASTREG_LI_WRID;
- wr->sg_list = &data_reg->sge;
- wr->num_sge = 1;
- wr->send_flags = 0;
- wr->wr.sig_handover.sig_attrs = sig_attrs;
- wr->wr.sig_handover.sig_mr = pi_ctx->sig_mr;
- if (scsi_prot_sg_count(iser_task->sc))
- wr->wr.sig_handover.prot = &prot_reg->sge;
- else
- wr->wr.sig_handover.prot = NULL;
- wr->wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
- pi_ctx->sig_mr_valid = 0;
-
- sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
- sig_reg->rkey = pi_ctx->sig_mr->rkey;
- sig_reg->sge.addr = 0;
- sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
-
- iser_dbg("sig reg: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.next = &tx_desc->send_wr;
+ wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
+ wr->wr.wr_cqe = cqe;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ rsc->sig_mr->need_inval = true;
+
+ sig_reg->sge.lkey = mr->lkey;
+ sig_reg->rkey = mr->rkey;
+ sig_reg->sge.addr = mr->iova;
+ sig_reg->sge.length = mr->length;
+
+ iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
sig_reg->sge.length);
err:
@@ -732,166 +307,85 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct iser_reg_resources *rsc,
struct iser_mem_reg *reg)
{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct ib_mr *mr = rsc->mr;
- struct ib_fast_reg_page_list *frpl = rsc->frpl;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- struct ib_send_wr *wr;
- int offset, size, plen;
+ struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
+ struct ib_mr *mr = rsc->mr;
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
+ int n;
- plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
- &offset, &size);
- if (plen * SIZE_4K < size) {
- iser_err("fast reg page_list too short to hold this SG\n");
- return -EINVAL;
- }
+ if (rsc->mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- if (!rsc->mr_valid) {
- wr = iser_tx_next_wr(tx_desc);
- iser_inv_rkey(wr, mr);
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
+ if (unlikely(n != mem->dma_nents)) {
+ iser_err("failed to map sg (%d/%d)\n",
+ n, mem->dma_nents);
+ return n < 0 ? n : -EINVAL;
}
- wr = iser_tx_next_wr(tx_desc);
- wr->opcode = IB_WR_FAST_REG_MR;
- wr->wr_id = ISER_FASTREG_LI_WRID;
- wr->send_flags = 0;
- wr->wr.fast_reg.iova_start = frpl->page_list[0] + offset;
- wr->wr.fast_reg.page_list = frpl;
- wr->wr.fast_reg.page_list_len = plen;
- wr->wr.fast_reg.page_shift = SHIFT_4K;
- wr->wr.fast_reg.length = size;
- wr->wr.fast_reg.rkey = mr->rkey;
- wr->wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
- rsc->mr_valid = 0;
+ wr->wr.next = &tx_desc->send_wr;
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_cqe = cqe;
+ wr->wr.send_flags = 0;
+ wr->wr.num_sge = 0;
+ wr->mr = mr;
+ wr->key = mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ rsc->mr->need_inval = true;
reg->sge.lkey = mr->lkey;
reg->rkey = mr->rkey;
- reg->sge.addr = frpl->page_list[0] + offset;
- reg->sge.length = size;
+ reg->sge.addr = mr->iova;
+ reg->sge.length = mr->length;
- iser_dbg("fast reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
- " length=0x%x\n", reg->sge.lkey, reg->rkey,
- reg->sge.addr, reg->sge.length);
+ iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
+ reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
return 0;
}
-static int
-iser_handle_unaligned_buf(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- enum iser_data_dir dir)
-{
- struct iser_conn *iser_conn = task->iser_conn;
- struct iser_device *device = iser_conn->ib_conn.device;
- int err, aligned_len;
-
- aligned_len = iser_data_buf_aligned_len(mem, device->ib_device,
- iser_conn->scsi_sg_tablesize);
- if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(task, mem, dir);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int
-iser_reg_prot_sg(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- struct iser_fr_desc *desc,
- struct iser_mem_reg *reg)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- if (mem->dma_nents == 1)
- return iser_reg_dma(device, mem, reg);
-
- return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
-}
-
-static int
-iser_reg_data_sg(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- struct iser_fr_desc *desc,
- struct iser_mem_reg *reg)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- if (mem->dma_nents == 1)
- return iser_reg_dma(device, mem, reg);
-
- return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
-}
-
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir)
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm)
{
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
- struct iser_mem_reg *data_reg;
- struct iser_fr_desc *desc = NULL;
+ struct iser_fr_desc *desc;
+ bool use_dma_key;
int err;
- err = iser_handle_unaligned_buf(task, mem, dir);
- if (unlikely(err))
- return err;
-
- if (mem->dma_nents != 1 ||
- scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
- desc = device->reg_ops->reg_desc_get(ib_conn);
- reg->mem_h = desc;
- }
-
- if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
- data_reg = reg;
- else
- data_reg = &task->desc.data_reg;
-
- err = iser_reg_data_sg(task, mem, desc, data_reg);
- if (unlikely(err))
- goto err_reg;
-
- if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
- struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
-
- if (scsi_prot_sg_count(task->sc)) {
- mem = &task->prot[dir];
- err = iser_handle_unaligned_buf(task, mem, dir);
- if (unlikely(err))
- goto err_reg;
-
- err = iser_reg_prot_sg(task, mem, desc, prot_reg);
- if (unlikely(err))
- goto err_reg;
- }
+ use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
+ scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
+ if (use_dma_key)
+ return iser_reg_dma(device, mem, reg);
- err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
- prot_reg, reg);
+ desc = iser_reg_desc_get_fr(ib_conn);
+ if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
+ err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
+ if (unlikely(err))
+ goto err_reg;
+ } else {
+ err = iser_reg_sig_mr(task, mem, &task->prot[dir],
+ &desc->rsc, reg);
if (unlikely(err))
goto err_reg;
- desc->pi_ctx->sig_protected = 1;
+ desc->sig_protected = true;
}
+ reg->desc = desc;
+
return 0;
err_reg:
- if (desc)
- device->reg_ops->reg_desc_put(ib_conn, desc);
+ iser_reg_desc_put_fr(ib_conn, desc);
return err;
}
-
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- device->reg_ops->unreg_mem(task, dir);
-}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ae70cc1463ac..6801b70dc9e0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -32,29 +32,11 @@
* SOFTWARE.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "iscsi_iser.h"
-#define ISCSI_ISER_MAX_CONN 8
-#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
-#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
-#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
- ISCSI_ISER_MAX_CONN)
-
-static int iser_cq_poll_limit = 512;
-
-static void iser_cq_tasklet_fn(unsigned long data);
-static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-
-static void iser_cq_event_callback(struct ib_event *cause, void *context)
-{
- iser_err("cq event %s (%d)\n",
- ib_event_msg(cause->event), cause->event);
-}
-
static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
iser_err("qp event %s (%d)\n",
@@ -66,343 +48,118 @@ static void iser_event_handler(struct ib_event_handler *handler,
{
iser_err("async event %s (%d) on device %s port %d\n",
ib_event_msg(event->event), event->event,
- event->device->name, event->element.port_num);
+ dev_name(&event->device->dev), event->element.port_num);
}
-/**
+/*
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
- * the adapator.
+ * the adaptor.
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
- struct ib_device_attr *dev_attr = &device->dev_attr;
- int ret, i, max_cqe;
+ struct ib_device *ib_dev = device->ib_device;
- ret = ib_query_device(device->ib_device, dev_attr);
- if (ret) {
- pr_warn("Query device failed for %s\n", device->ib_device->name);
- return ret;
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ iser_err("IB device does not support memory registrations\n");
+ return -1;
}
- ret = iser_assign_reg_ops(device);
- if (ret)
- return ret;
-
- device->comps_used = min_t(int, num_online_cpus(),
- device->ib_device->num_comp_vectors);
-
- device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
- GFP_KERNEL);
- if (!device->comps)
- goto comps_err;
-
- max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
-
- iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
- device->comps_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, max_cqe);
-
- device->pd = ib_alloc_pd(device->ib_device);
+ device->pd = ib_alloc_pd(ib_dev,
+ iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(device->pd))
goto pd_err;
- for (i = 0; i < device->comps_used; i++) {
- struct ib_cq_init_attr cq_attr = {};
- struct iser_comp *comp = &device->comps[i];
-
- comp->device = device;
- cq_attr.cqe = max_cqe;
- cq_attr.comp_vector = i;
- comp->cq = ib_create_cq(device->ib_device,
- iser_cq_callback,
- iser_cq_event_callback,
- (void *)comp,
- &cq_attr);
- if (IS_ERR(comp->cq)) {
- comp->cq = NULL;
- goto cq_err;
- }
-
- if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
- goto cq_err;
-
- tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
- (unsigned long)comp);
- }
-
- device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
- if (IS_ERR(device->mr))
- goto dma_mr_err;
-
- INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
- iser_event_handler);
- if (ib_register_event_handler(&device->event_handler))
- goto handler_err;
-
+ INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
+ iser_event_handler);
+ ib_register_event_handler(&device->event_handler);
return 0;
-handler_err:
- ib_dereg_mr(device->mr);
-dma_mr_err:
- for (i = 0; i < device->comps_used; i++)
- tasklet_kill(&device->comps[i].tasklet);
-cq_err:
- for (i = 0; i < device->comps_used; i++) {
- struct iser_comp *comp = &device->comps[i];
-
- if (comp->cq)
- ib_destroy_cq(comp->cq);
- }
- ib_dealloc_pd(device->pd);
pd_err:
- kfree(device->comps);
-comps_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
-/**
+/*
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
- * CQ and PD created with the device associated with the adapator.
+ * CQ and PD created with the device associated with the adaptor.
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
- int i;
- BUG_ON(device->mr == NULL);
-
- for (i = 0; i < device->comps_used; i++) {
- struct iser_comp *comp = &device->comps[i];
-
- tasklet_kill(&comp->tasklet);
- ib_destroy_cq(comp->cq);
- comp->cq = NULL;
- }
-
- (void)ib_unregister_event_handler(&device->event_handler);
- (void)ib_dereg_mr(device->mr);
+ ib_unregister_event_handler(&device->event_handler);
ib_dealloc_pd(device->pd);
- kfree(device->comps);
- device->comps = NULL;
-
- device->mr = NULL;
device->pd = NULL;
}
-/**
- * iser_alloc_fmr_pool - Creates FMR pool and page_vector
- *
- * returns 0 on success, or errno code on failure
- */
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size)
-{
- struct iser_device *device = ib_conn->device;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_page_vec *page_vec;
- struct iser_fr_desc *desc;
- struct ib_fmr_pool *fmr_pool;
- struct ib_fmr_pool_param params;
- int ret;
-
- INIT_LIST_HEAD(&fr_pool->list);
- spin_lock_init(&fr_pool->lock);
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
- GFP_KERNEL);
- if (!page_vec) {
- ret = -ENOMEM;
- goto err_frpl;
- }
-
- page_vec->pages = (u64 *)(page_vec + 1);
-
- params.page_shift = SHIFT_4K;
- params.max_pages_per_fmr = size;
- /* make the pool size twice the max number of SCSI commands *
- * the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = cmds_max * 2;
- params.dirty_watermark = cmds_max;
- params.cache = 0;
- params.flush_function = NULL;
- params.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- iser_err("FMR allocation failed, err %d\n", ret);
- goto err_fmr;
- }
-
- desc->rsc.page_vec = page_vec;
- desc->rsc.fmr_pool = fmr_pool;
- list_add(&desc->list, &fr_pool->list);
-
- return 0;
-
-err_fmr:
- kfree(page_vec);
-err_frpl:
- kfree(desc);
-
- return ret;
-}
-
-/**
- * iser_free_fmr_pool - releases the FMR pool and page vec
- */
-void iser_free_fmr_pool(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_fr_desc *desc;
-
- desc = list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
- list_del(&desc->list);
-
- iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, desc->rsc.fmr_pool);
-
- ib_destroy_fmr_pool(desc->rsc.fmr_pool);
- kfree(desc->rsc.page_vec);
- kfree(desc);
-}
-
-static int
-iser_alloc_reg_res(struct ib_device *ib_device,
- struct ib_pd *pd,
- struct iser_reg_resources *res,
- unsigned int size)
-{
- int ret;
-
- res->frpl = ib_alloc_fast_reg_page_list(ib_device, size);
- if (IS_ERR(res->frpl)) {
- ret = PTR_ERR(res->frpl);
- iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
- ret);
- return PTR_ERR(res->frpl);
- }
-
- res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size);
- if (IS_ERR(res->mr)) {
- ret = PTR_ERR(res->mr);
- iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
- goto fast_reg_mr_failure;
- }
- res->mr_valid = 1;
-
- return 0;
-
-fast_reg_mr_failure:
- ib_free_fast_reg_page_list(res->frpl);
-
- return ret;
-}
-
-static void
-iser_free_reg_res(struct iser_reg_resources *rsc)
-{
- ib_dereg_mr(rsc->mr);
- ib_free_fast_reg_page_list(rsc->frpl);
-}
-
-static int
-iser_alloc_pi_ctx(struct ib_device *ib_device,
- struct ib_pd *pd,
- struct iser_fr_desc *desc,
- unsigned int size)
-{
- struct iser_pi_context *pi_ctx = NULL;
- int ret;
-
- desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
- if (!desc->pi_ctx)
- return -ENOMEM;
-
- pi_ctx = desc->pi_ctx;
-
- ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size);
- if (ret) {
- iser_err("failed to allocate reg_resources\n");
- goto alloc_reg_res_err;
- }
-
- pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
- if (IS_ERR(pi_ctx->sig_mr)) {
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto sig_mr_failure;
- }
- pi_ctx->sig_mr_valid = 1;
- desc->pi_ctx->sig_protected = 0;
-
- return 0;
-
-sig_mr_failure:
- iser_free_reg_res(&pi_ctx->rsc);
-alloc_reg_res_err:
- kfree(desc->pi_ctx);
-
- return ret;
-}
-
-static void
-iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
-{
- iser_free_reg_res(&pi_ctx->rsc);
- ib_dereg_mr(pi_ctx->sig_mr);
- kfree(pi_ctx);
-}
-
static struct iser_fr_desc *
-iser_create_fastreg_desc(struct ib_device *ib_device,
+iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
bool pi_enable,
unsigned int size)
{
struct iser_fr_desc *desc;
+ struct ib_device *ib_dev = device->ib_device;
+ enum ib_mr_type mr_type;
int ret;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return ERR_PTR(-ENOMEM);
- ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size);
- if (ret)
- goto reg_res_alloc_failure;
+ if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
+ desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
+ if (IS_ERR(desc->rsc.mr)) {
+ ret = PTR_ERR(desc->rsc.mr);
+ iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
+ goto err_alloc_mr;
+ }
if (pi_enable) {
- ret = iser_alloc_pi_ctx(ib_device, pd, desc, size);
- if (ret)
- goto pi_ctx_alloc_failure;
+ desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
+ if (IS_ERR(desc->rsc.sig_mr)) {
+ ret = PTR_ERR(desc->rsc.sig_mr);
+ iser_err("Failed to allocate sig_mr err=%d\n", ret);
+ goto err_alloc_mr_integrity;
+ }
}
return desc;
-pi_ctx_alloc_failure:
- iser_free_reg_res(&desc->rsc);
-reg_res_alloc_failure:
+err_alloc_mr_integrity:
+ ib_dereg_mr(desc->rsc.mr);
+err_alloc_mr:
kfree(desc);
return ERR_PTR(ret);
}
+static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
+{
+ struct iser_reg_resources *res = &desc->rsc;
+
+ ib_dereg_mr(res->mr);
+ if (res->sig_mr) {
+ ib_dereg_mr(res->sig_mr);
+ res->sig_mr = NULL;
+ }
+ kfree(desc);
+}
+
/**
* iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
- * returns 0 on success, or errno code on failure
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
+ *
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -414,10 +171,11 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
int i, ret;
INIT_LIST_HEAD(&fr_pool->list);
+ INIT_LIST_HEAD(&fr_pool->all_list);
spin_lock_init(&fr_pool->lock);
fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
- desc = iser_create_fastreg_desc(device->ib_device, device->pd,
+ desc = iser_create_fastreg_desc(device, device->pd,
ib_conn->pi_support, size);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
@@ -425,6 +183,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
}
list_add_tail(&desc->list, &fr_pool->list);
+ list_add_tail(&desc->all_list, &fr_pool->all_list);
fr_pool->size++;
}
@@ -437,6 +196,7 @@ err:
/**
* iser_free_fastreg_pool - releases the pool of fast_reg descriptors
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
@@ -444,17 +204,14 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
struct iser_fr_desc *desc, *tmp;
int i = 0;
- if (list_empty(&fr_pool->list))
+ if (list_empty(&fr_pool->all_list))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
- list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
- list_del(&desc->list);
- iser_free_reg_res(&desc->rsc);
- if (desc->pi_ctx)
- iser_free_pi_ctx(desc->pi_ctx);
- kfree(desc);
+ list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
+ list_del(&desc->all_list);
+ iser_destroy_fastreg_desc(desc);
++i;
}
@@ -463,88 +220,76 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
fr_pool->size - i);
}
-/**
+/*
* iser_create_ib_conn_res - Queue-Pair (QP)
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
- struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
- ib_conn);
+ struct iser_conn *iser_conn = to_iser_conn(ib_conn);
struct iser_device *device;
- struct ib_device_attr *dev_attr;
+ struct ib_device *ib_dev;
struct ib_qp_init_attr init_attr;
int ret = -ENOMEM;
- int index, min_index = 0;
+ unsigned int max_send_wr, cq_size;
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
- dev_attr = &device->dev_attr;
+ ib_dev = device->ib_device;
- memset(&init_attr, 0, sizeof init_attr);
+ /* +1 for drain */
+ if (ib_conn->pi_support)
+ max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
+ else
+ max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ max_send_wr = min_t(unsigned int, max_send_wr,
+ (unsigned int)ib_dev->attrs.max_qp_wr);
- mutex_lock(&ig.connlist_mutex);
- /* select the CQ with the minimal number of usages */
- for (index = 0; index < device->comps_used; index++) {
- if (device->comps[index].active_qps <
- device->comps[min_index].active_qps)
- min_index = index;
+ cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS;
+ ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ);
+ if (IS_ERR(ib_conn->cq)) {
+ ret = PTR_ERR(ib_conn->cq);
+ goto cq_err;
}
- ib_conn->comp = &device->comps[min_index];
- ib_conn->comp->active_qps++;
- mutex_unlock(&ig.connlist_mutex);
- iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
+ ib_conn->cq_size = cq_size;
+
+ memset(&init_attr, 0, sizeof(init_attr));
init_attr.event_handler = iser_qp_event_callback;
- init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = ib_conn->comp->cq;
- init_attr.recv_cq = ib_conn->comp->cq;
- init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ init_attr.qp_context = (void *)ib_conn;
+ init_attr.send_cq = ib_conn->cq;
+ init_attr.recv_cq = ib_conn->cq;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
- init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
- init_attr.qp_type = IB_QPT_RC;
- if (ib_conn->pi_support) {
- init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
- init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
- iser_conn->max_cmds =
- ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
- } else {
- if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
- init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
- iser_conn->max_cmds =
- ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
- } else {
- init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
- iser_conn->max_cmds =
- ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
- iser_dbg("device %s supports max_send_wr %d\n",
- device->ib_device->name, dev_attr->max_qp_wr);
- }
- }
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.cap.max_send_wr = max_send_wr;
+ if (ib_conn->pi_support)
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+ iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1);
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
if (ret)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
- iser_info("setting conn %p cma_id %p qp %p\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->cma_id->qp);
+ iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
+ ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
return ret;
out_err:
- mutex_lock(&ig.connlist_mutex);
- ib_conn->comp->active_qps--;
- mutex_unlock(&ig.connlist_mutex);
+ ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
+cq_err:
iser_err("unable to alloc mem or create resource, err %d\n", ret);
return ret;
}
-/**
+/*
* based on the resolved device node GUID see if there already allocated
* device for this device. If there's no such, create one.
*/
@@ -561,7 +306,7 @@ struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
goto inc_refcnt;
device = kzalloc(sizeof *device, GFP_KERNEL);
- if (device == NULL)
+ if (!device)
goto out;
/* assign this device to the device */
@@ -595,22 +340,6 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
-/**
- * Called with state mutex held
- **/
-static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
- enum iser_conn_state comp,
- enum iser_conn_state exch)
-{
- int ret;
-
- ret = (iser_conn->state == comp);
- if (ret)
- iser_conn->state = exch;
-
- return ret;
-}
-
void iser_release_work(struct work_struct *work)
{
struct iser_conn *iser_conn;
@@ -640,8 +369,7 @@ void iser_release_work(struct work_struct *work)
* so the cm_id removal is out of here. It is Safe to
* be invoked multiple times.
*/
-static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
- bool destroy)
+static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
@@ -649,9 +377,9 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
iser_info("freeing conn %p cma_id %p qp %p\n",
iser_conn, ib_conn->cma_id, ib_conn->qp);
- if (ib_conn->qp != NULL) {
- ib_conn->comp->active_qps--;
+ if (ib_conn->qp) {
rdma_destroy_qp(ib_conn->cma_id);
+ ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
ib_conn->qp = NULL;
}
@@ -659,7 +387,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
if (iser_conn->rx_descs)
iser_free_rx_descriptors(iser_conn);
- if (device != NULL) {
+ if (device) {
iser_device_try_release(device);
ib_conn->device = NULL;
}
@@ -667,7 +395,8 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
}
/**
- * Frees all conn objects and deallocs conn descriptor
+ * iser_conn_release - Frees all conn objects and deallocs conn descriptor
+ * @iser_conn: iSER connection context
*/
void iser_conn_release(struct iser_conn *iser_conn)
{
@@ -692,7 +421,7 @@ void iser_conn_release(struct iser_conn *iser_conn)
iser_free_ib_conn_res(iser_conn, true);
mutex_unlock(&iser_conn->state_mutex);
- if (ib_conn->cma_id != NULL) {
+ if (ib_conn->cma_id) {
rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL;
}
@@ -701,20 +430,24 @@ void iser_conn_release(struct iser_conn *iser_conn)
}
/**
- * triggers start of the disconnect procedures and wait for them to be done
+ * iser_conn_terminate - triggers start of the disconnect procedures and
+ * waits for them to be done
+ * @iser_conn: iSER connection context
+ *
* Called with state mutex held
*/
int iser_conn_terminate(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct ib_send_wr *bad_wr;
int err = 0;
+ lockdep_assert_held(&iser_conn->state_mutex);
+
/* terminate the iser conn only if the conn state is UP */
- if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
+ if (iser_conn->state != ISER_CONN_UP)
return 0;
+ iser_conn->state = ISER_CONN_TERMINATING;
iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
/* suspend queuing of new iscsi commands */
@@ -732,66 +465,70 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
iser_conn, err);
- /* post an indication that all flush errors were consumed */
- err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
- if (err) {
- iser_err("conn %p failed to post beacon", ib_conn);
- return 1;
- }
-
- wait_for_completion(&ib_conn->flush_comp);
+ /* block until all flush errors are consumed */
+ ib_drain_qp(ib_conn->qp);
}
return 1;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
- struct iser_conn *iser_conn;
+ struct iser_conn *iser_conn = cma_id->context;
+
+ lockdep_assert_held(&iser_conn->state_mutex);
- iser_conn = (struct iser_conn *)cma_id->context;
iser_conn->state = ISER_CONN_TERMINATING;
}
-static void
-iser_calc_scsi_params(struct iser_conn *iser_conn,
- unsigned int max_sectors)
+static void iser_calc_scsi_params(struct iser_conn *iser_conn,
+ unsigned int max_sectors)
{
struct iser_device *device = iser_conn->ib_conn.device;
+ struct ib_device_attr *attr = &device->ib_device->attrs;
unsigned short sg_tablesize, sup_sg_tablesize;
+ unsigned short reserved_mr_pages;
+ u32 max_num_sg;
- sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
- sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
- device->dev_attr.max_fast_reg_page_list_len);
-
- if (sg_tablesize > sup_sg_tablesize) {
- sg_tablesize = sup_sg_tablesize;
- iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
- } else {
- iser_conn->scsi_max_sectors = max_sectors;
- }
+ /*
+ * FRs without SG_GAPS can only map up to a (device) page per entry,
+ * but if the first entry is misaligned we'll end up using two entries
+ * (head and tail) for a single page worth data, so one additional
+ * entry is required.
+ */
+ if (attr->kernel_cap_flags & IBK_SG_GAPS_REG)
+ reserved_mr_pages = 0;
+ else
+ reserved_mr_pages = 1;
- iser_conn->scsi_sg_tablesize = sg_tablesize;
+ if (iser_conn->ib_conn.pi_support)
+ max_num_sg = attr->max_pi_fast_reg_page_list_len;
+ else
+ max_num_sg = attr->max_fast_reg_page_list_len;
- iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
- iser_conn, iser_conn->scsi_sg_tablesize,
- iser_conn->scsi_max_sectors);
+ sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
+ sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ max_num_sg - reserved_mr_pages);
+ iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
+ iser_conn->pages_per_mr =
+ iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
+ struct iser_conn *iser_conn = cma_id->context;
struct iser_device *device;
- struct iser_conn *iser_conn;
- struct ib_conn *ib_conn;
+ struct ib_conn *ib_conn;
int ret;
- iser_conn = (struct iser_conn *)cma_id->context;
+ lockdep_assert_held(&iser_conn->state_mutex);
+
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -808,11 +545,11 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
/* connection T10-PI support */
if (iser_pi_enable) {
- if (!(device->dev_attr.device_cap_flags &
- IB_DEVICE_SIGNATURE_HANDOVER)) {
+ if (!(device->ib_device->attrs.kernel_cap_flags &
+ IBK_INTEGRITY_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n",
- ib_conn->device->ib_device->name);
+ dev_name(&ib_conn->device->ib_device->dev));
ib_conn->pi_support = false;
} else {
ib_conn->pi_support = true;
@@ -829,17 +566,19 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
}
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
- int ret;
+ int ret;
struct iser_cm_hdr req_hdr;
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
+ struct ib_device *ib_dev = ib_conn->device->ib_device;
+
+ lockdep_assert_held(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
@@ -850,18 +589,19 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
memset(&conn_param, 0, sizeof conn_param);
- conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
- conn_param.initiator_depth = 1;
- conn_param.retry_count = 7;
- conn_param.rnr_retry_count = 6;
+ conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = 7;
+ conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
- req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
- ISER_SEND_W_INV_NOT_SUPPORTED);
- conn_param.private_data = (void *)&req_hdr;
- conn_param.private_data_len = sizeof(struct iser_cm_hdr);
+ req_hdr.flags = ISER_ZBVA_NOT_SUP;
+ if (!iser_always_reg)
+ req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
+ conn_param.private_data = (void *)&req_hdr;
+ conn_param.private_data_len = sizeof(struct iser_cm_hdr);
- ret = rdma_connect(cma_id, &conn_param);
+ ret = rdma_connect_locked(cma_id, &conn_param);
if (ret) {
iser_err("failure connecting: %d\n", ret);
goto failure;
@@ -872,13 +612,18 @@ failure:
iser_connect_error(cma_id);
}
-static void iser_connected_handler(struct rdma_cm_id *cma_id)
+/*
+ * Called with state mutex held
+ */
+static void iser_connected_handler(struct rdma_cm_id *cma_id,
+ const void *private_data)
{
- struct iser_conn *iser_conn;
+ struct iser_conn *iser_conn = cma_id->context;
struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr;
- iser_conn = (struct iser_conn *)cma_id->context;
+ lockdep_assert_held(&iser_conn->state_mutex);
+
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -886,44 +631,51 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
- iser_conn->state = ISER_CONN_UP;
- complete(&iser_conn->up_completion);
-}
-
-static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
-{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ if (private_data) {
+ u8 flags = *(u8 *)private_data;
- if (iser_conn_terminate(iser_conn)) {
- if (iser_conn->iscsi_conn)
- iscsi_conn_failure(iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
- else
- iser_err("iscsi_iser connection isn't bound\n");
+ iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
}
+
+ iser_info("conn %p: negotiated %s invalidation\n",
+ iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
+
+ iser_conn->state = ISER_CONN_UP;
+ complete(&iser_conn->up_completion);
}
+/*
+ * Called with state mutex held
+ */
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
bool destroy)
{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
+ lockdep_assert_held(&iser_conn->state_mutex);
/*
* We are not guaranteed that we visited disconnected_handler
* by now, call it here to be safe that we handle CM drep
* and flush errors.
*/
- iser_disconnected_handler(cma_id);
+ if (iser_conn_terminate(iser_conn)) {
+ if (iser_conn->iscsi_conn)
+ iscsi_conn_failure(iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
iser_free_ib_conn_res(iser_conn, destroy);
complete(&iser_conn->ib_completion);
-};
+}
-static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+static int iser_cma_handler(struct rdma_cm_id *cma_id,
+ struct rdma_cm_event *event)
{
struct iser_conn *iser_conn;
int ret = 0;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
iser_info("%s (%d): status %d conn %p id %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id->context, cma_id);
@@ -937,13 +689,16 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
iser_route_handler(cma_id);
break;
case RDMA_CM_EVENT_ESTABLISHED:
- iser_connected_handler(cma_id);
+ iser_connected_handler(cma_id, event->param.conn.private_data);
break;
+ case RDMA_CM_EVENT_REJECTED:
+ iser_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
+ fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- case RDMA_CM_EVENT_REJECTED:
iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
@@ -976,24 +731,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
void iser_conn_init(struct iser_conn *iser_conn)
{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+
iser_conn->state = ISER_CONN_INIT;
- iser_conn->ib_conn.post_recv_buf_count = 0;
- init_completion(&iser_conn->ib_conn.flush_comp);
init_completion(&iser_conn->stop_completion);
init_completion(&iser_conn->ib_completion);
init_completion(&iser_conn->up_completion);
INIT_LIST_HEAD(&iser_conn->conn_list);
mutex_init(&iser_conn->state_mutex);
+
+ ib_conn->reg_cqe.done = iser_reg_comp;
}
- /**
+/*
* starts the process of connecting to the target
* sleeps until the connection is established or rejected
*/
-int iser_connect(struct iser_conn *iser_conn,
- struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
- int non_blocking)
+int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int non_blocking)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
int err = 0;
@@ -1009,12 +764,8 @@ int iser_connect(struct iser_conn *iser_conn,
iser_conn->state = ISER_CONN_PENDING;
- ib_conn->beacon.wr_id = ISER_BEACON_WRID;
- ib_conn->beacon.opcode = IB_WR_SEND;
-
- ib_conn->cma_id = rdma_create_id(iser_cma_handler,
- (void *)iser_conn,
- RDMA_PS_TCP, IB_QPT_RC);
+ ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
+ iser_conn, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err);
@@ -1054,258 +805,114 @@ connect_failure:
int iser_post_recvl(struct iser_conn *iser_conn)
{
- struct ib_recv_wr rx_wr, *rx_wr_failed;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct ib_sge sge;
- int ib_ret;
-
- sge.addr = iser_conn->login_resp_dma;
- sge.length = ISER_RX_LOGIN_SIZE;
- sge.lkey = ib_conn->device->pd->local_dma_lkey;
-
- rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf;
- rx_wr.sg_list = &sge;
- rx_wr.num_sge = 1;
- rx_wr.next = NULL;
-
- ib_conn->post_recv_buf_count++;
- ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
- if (ib_ret) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count--;
- }
- return ib_ret;
+ struct iser_login_desc *desc = &iser_conn->login_desc;
+ struct ib_recv_wr wr;
+ int ret;
+
+ desc->sge.addr = desc->rsp_dma;
+ desc->sge.length = ISER_RX_LOGIN_SIZE;
+ desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
+
+ desc->cqe.done = iser_login_rsp;
+ wr.wr_cqe = &desc->cqe;
+ wr.sg_list = &desc->sge;
+ wr.num_sge = 1;
+ wr.next = NULL;
+
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv login failed ret=%d\n", ret);
+
+ return ret;
}
-int iser_post_recvm(struct iser_conn *iser_conn, int count)
+int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
{
- struct ib_recv_wr *rx_wr, *rx_wr_failed;
- int i, ib_ret;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- unsigned int my_rx_head = iser_conn->rx_desc_head;
- struct iser_rx_desc *rx_desc;
-
- for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
- rx_desc = &iser_conn->rx_descs[my_rx_head];
- rx_wr->wr_id = (uintptr_t)rx_desc;
- rx_wr->sg_list = &rx_desc->rx_sg;
- rx_wr->num_sge = 1;
- rx_wr->next = rx_wr + 1;
- my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
- }
+ struct ib_recv_wr wr;
+ int ret;
+
+ rx_desc->cqe.done = iser_task_rsp;
+ wr.wr_cqe = &rx_desc->cqe;
+ wr.sg_list = &rx_desc->rx_sg;
+ wr.num_sge = 1;
+ wr.next = NULL;
+
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv failed ret=%d\n", ret);
- rx_wr--;
- rx_wr->next = NULL; /* mark end of work requests list */
-
- ib_conn->post_recv_buf_count += count;
- ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
- if (ib_ret) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count -= count;
- } else
- iser_conn->rx_desc_head = my_rx_head;
- return ib_ret;
+ return ret;
}
/**
- * iser_start_send - Initiate a Send DTO operation
+ * iser_post_send - Initiate a Send DTO operation
+ * @ib_conn: connection RDMA resources
+ * @tx_desc: iSER TX descriptor
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal)
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
- struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
- int ib_ret;
+ struct ib_send_wr *wr = &tx_desc->send_wr;
+ struct ib_send_wr *first_wr;
+ int ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN,
DMA_TO_DEVICE);
wr->next = NULL;
- wr->wr_id = (uintptr_t)tx_desc;
+ wr->wr_cqe = &tx_desc->cqe;
wr->sg_list = tx_desc->tx_sg;
wr->num_sge = tx_desc->num_sge;
wr->opcode = IB_WR_SEND;
- wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
-
- ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0], &bad_wr);
- if (ib_ret)
- iser_err("ib_post_send failed, ret:%d opcode:%d\n",
- ib_ret, bad_wr->opcode);
-
- return ib_ret;
-}
-
-/**
- * is_iser_tx_desc - Indicate if the completion wr_id
- * is a TX descriptor or not.
- * @iser_conn: iser connection
- * @wr_id: completion WR identifier
- *
- * Since we cannot rely on wc opcode in FLUSH errors
- * we must work around it by checking if the wr_id address
- * falls in the iser connection rx_descs buffer. If so
- * it is an RX descriptor, otherwize it is a TX.
- */
-static inline bool
-is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
-{
- void *start = iser_conn->rx_descs;
- int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);
-
- if (wr_id >= start && wr_id < start + len)
- return false;
-
- return true;
-}
-
-/**
- * iser_handle_comp_error() - Handle error completion
- * @ib_conn: connection RDMA resources
- * @wc: work completion
- *
- * Notes: We may handle a FLUSH error completion and in this case
- * we only cleanup in case TX type was DATAOUT. For non-FLUSH
- * error completion we should also notify iscsi layer that
- * connection is failed (in case we passed bind stage).
- */
-static void
-iser_handle_comp_error(struct ib_conn *ib_conn,
- struct ib_wc *wc)
-{
- void *wr_id = (void *)(uintptr_t)wc->wr_id;
- struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
- ib_conn);
-
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- if (iser_conn->iscsi_conn)
- iscsi_conn_failure(iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
-
- if (wc->wr_id == ISER_FASTREG_LI_WRID)
- return;
-
- if (is_iser_tx_desc(iser_conn, wr_id)) {
- struct iser_tx_desc *desc = wr_id;
-
- if (desc->type == ISCSI_TX_DATAOUT)
- kmem_cache_free(ig.desc_cache, desc);
- } else {
- ib_conn->post_recv_buf_count--;
- }
-}
-
-/**
- * iser_handle_wc - handle a single work completion
- * @wc: work completion
- *
- * Soft-IRQ context, work completion can be either
- * SEND or RECV, and can turn out successful or
- * with error (or flush error).
- */
-static void iser_handle_wc(struct ib_wc *wc)
-{
- struct ib_conn *ib_conn;
- struct iser_tx_desc *tx_desc;
- struct iser_rx_desc *rx_desc;
-
- ib_conn = wc->qp->qp_context;
- if (likely(wc->status == IB_WC_SUCCESS)) {
- if (wc->opcode == IB_WC_RECV) {
- rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
- iser_rcv_completion(rx_desc, wc->byte_len,
- ib_conn);
- } else
- if (wc->opcode == IB_WC_SEND) {
- tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- iser_snd_completion(tx_desc, ib_conn);
- } else {
- iser_err("Unknown wc opcode %d\n", wc->opcode);
- }
- } else {
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- iser_err("%s (%d): wr id %llx vend_err %x\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id, wc->vendor_err);
- else
- iser_dbg("%s (%d): wr id %llx\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id);
-
- if (wc->wr_id == ISER_BEACON_WRID)
- /* all flush errors were consumed */
- complete(&ib_conn->flush_comp);
- else
- iser_handle_comp_error(ib_conn, wc);
- }
-}
-
-/**
- * iser_cq_tasklet_fn - iSER completion polling loop
- * @data: iSER completion context
- *
- * Soft-IRQ context, polling connection CQ until
- * either CQ was empty or we exausted polling budget
- */
-static void iser_cq_tasklet_fn(unsigned long data)
-{
- struct iser_comp *comp = (struct iser_comp *)data;
- struct ib_cq *cq = comp->cq;
- struct ib_wc *const wcs = comp->wcs;
- int i, n, completed = 0;
-
- while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
- for (i = 0; i < n; i++)
- iser_handle_wc(&wcs[i]);
-
- completed += n;
- if (completed >= iser_cq_poll_limit)
- break;
- }
-
- /*
- * It is assumed here that arming CQ only once its empty
- * would not cause interrupts to be missed.
- */
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ wr->send_flags = IB_SEND_SIGNALED;
- iser_dbg("got %d completions\n", completed);
-}
+ if (tx_desc->inv_wr.next)
+ first_wr = &tx_desc->inv_wr;
+ else if (tx_desc->reg_wr.wr.next)
+ first_wr = &tx_desc->reg_wr.wr;
+ else
+ first_wr = wr;
-static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
-{
- struct iser_comp *comp = cq_context;
+ ret = ib_post_send(ib_conn->qp, first_wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_send failed, ret:%d opcode:%d\n",
+ ret, wr->opcode);
- tasklet_schedule(&comp->tasklet);
+ return ret;
}
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector)
{
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
- struct iser_fr_desc *desc = reg->mem_h;
+ struct iser_fr_desc *desc = reg->desc;
unsigned long sector_size = iser_task->sc->device->sector_size;
struct ib_mr_status mr_status;
int ret;
- if (desc && desc->pi_ctx->sig_protected) {
- desc->pi_ctx->sig_protected = 0;
- ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
+ if (desc && desc->sig_protected) {
+ desc->sig_protected = false;
+ ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
- goto err;
+ iser_err("ib_check_mr_status failed, ret %d\n", ret);
+ /* Not a lot we can do, return ambiguous guard error */
+ *sector = 0;
+ return 0x1;
}
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
sector_t sector_off = mr_status.sig_err.sig_err_offset;
- do_div(sector_off, sector_size + 8);
- *sector = scsi_get_lba(iser_task->sc) + sector_off;
+ sector_div(sector_off, sector_size + 8);
+ *sector = scsi_get_sector(iser_task->sc) + sector_off;
- pr_err("PI error found type %d at sector %llx "
+ iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
mr_status.sig_err.err_type,
(unsigned long long)*sector,
@@ -1324,7 +931,22 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
}
return 0;
-err:
- /* Not alot we can do here, return ambiguous guard error */
- return 0x1;
+}
+
+void iser_err_comp(struct ib_wc *wc, const char *type)
+{
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
+
+ iser_err("%s failure: %s (%d) vend_err %#x\n", type,
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->vendor_err);
+
+ if (iser_conn->iscsi_conn)
+ iscsi_conn_failure(iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ } else {
+ iser_dbg("%s failure: %s (%d)\n", type,
+ ib_wc_status_msg(wc->status), wc->status);
+ }
}
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
index 02f9759ebb1a..798147adbef1 100644
--- a/drivers/infiniband/ulp/isert/Kconfig
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -1,5 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_ISERT
tristate "iSCSI Extensions for RDMA (iSER) target support"
depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
- ---help---
+ help
Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/infiniband/ulp/isert/Makefile b/drivers/infiniband/ulp/isert/Makefile
index c8bf2421f5bc..e19b16cafda7 100644
--- a/drivers/infiniband/ulp/isert/Makefile
+++ b/drivers/infiniband/ulp/isert/Makefile
@@ -1,2 +1,2 @@
-ccflags-y := -Idrivers/target -Idrivers/target/iscsi
+# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index aa59037d7504..42977a5326ee 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains iSCSI extentions for RDMA (iSER) Verbs
*
@@ -5,15 +6,6 @@
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
****************************************************************************/
#include <linux/string.h>
@@ -23,49 +15,62 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include <linux/semaphore.h>
-#include "isert_proto.h"
#include "ib_isert.h"
-#define ISERT_MAX_CONN 8
-#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
-#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
-#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
- ISERT_MAX_CONN)
-
static int isert_debug_level;
module_param_named(debug_level, isert_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
+static int isert_sg_tablesize_set(const char *val,
+ const struct kernel_param *kp);
+static const struct kernel_param_ops sg_tablesize_ops = {
+ .set = isert_sg_tablesize_set,
+ .get = param_get_int,
+};
+
+static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
+module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
+MODULE_PARM_DESC(sg_tablesize,
+ "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
+
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
+static struct workqueue_struct *isert_login_wq;
static struct workqueue_struct *isert_comp_wq;
static struct workqueue_struct *isert_release_wq;
-static void
-isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
-static void
-isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
-static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
-static int
-isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
+isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd);
static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn);
+isert_login_post_recv(struct isert_conn *isert_conn);
static int
isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
+static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < ISCSI_ISER_MIN_SG_TABLESIZE ||
+ n > ISCSI_ISER_MAX_SG_TABLESIZE)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -74,7 +79,6 @@ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
cmd->prot_op != TARGET_PROT_NORMAL);
}
-
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
@@ -87,95 +91,51 @@ isert_qp_event_callback(struct ib_event *e, void *context)
case IB_EVENT_COMM_EST:
rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
break;
- case IB_EVENT_QP_LAST_WQE_REACHED:
- isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
- break;
default:
break;
}
}
-static int
-isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
-{
- int ret;
-
- ret = ib_query_device(ib_dev, devattr);
- if (ret) {
- isert_err("ib_query_device() failed: %d\n", ret);
- return ret;
- }
- isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
- isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
-
- return 0;
-}
-
-static struct isert_comp *
-isert_comp_get(struct isert_conn *isert_conn)
-{
- struct isert_device *device = isert_conn->device;
- struct isert_comp *comp;
- int i, min = 0;
-
- mutex_lock(&device_list_mutex);
- for (i = 0; i < device->comps_used; i++)
- if (device->comps[i].active_qps <
- device->comps[min].active_qps)
- min = i;
- comp = &device->comps[min];
- comp->active_qps++;
- mutex_unlock(&device_list_mutex);
-
- isert_info("conn %p, using comp %p min_index: %d\n",
- isert_conn, comp, min);
-
- return comp;
-}
-
-static void
-isert_comp_put(struct isert_comp *comp)
-{
- mutex_lock(&device_list_mutex);
- comp->active_qps--;
- mutex_unlock(&device_list_mutex);
-}
-
static struct ib_qp *
isert_create_qp(struct isert_conn *isert_conn,
- struct isert_comp *comp,
struct rdma_cm_id *cma_id)
{
+ u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2;
struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_qp_init_attr attr;
- int ret;
+ int ret, factor;
+
+ isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE);
+ if (IS_ERR(isert_conn->cq)) {
+ isert_err("Unable to allocate cq\n");
+ ret = PTR_ERR(isert_conn->cq);
+ return ERR_PTR(ret);
+ }
+ isert_conn->cq_size = cq_size;
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
attr.qp_context = isert_conn;
- attr.send_cq = comp->cq;
- attr.recv_cq = comp->cq;
- attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+ attr.send_cq = isert_conn->cq;
+ attr.recv_cq = isert_conn->cq;
+ attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
- /*
- * FIXME: Use devattr.max_sge - 2 for max_send_sge as
- * work-around for RDMA_READs with ConnectX-2.
- *
- * Also, still make sure to have at least two SGEs for
- * outgoing control PDU responses.
- */
- attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
- isert_conn->max_sge = attr.cap.max_send_sge;
-
+ factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num,
+ isert_sg_tablesize);
+ attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor;
+ attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
if (device->pi_capable)
- attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+ attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
ret = rdma_create_qp(cma_id, device->pd, &attr);
if (ret) {
isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+ ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
+
return ERR_PTR(ret);
}
@@ -183,31 +143,6 @@ isert_create_qp(struct isert_conn *isert_conn,
}
static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
-{
- struct isert_comp *comp;
- int ret;
-
- comp = isert_comp_get(isert_conn);
- isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
- if (IS_ERR(isert_conn->qp)) {
- ret = PTR_ERR(isert_conn->qp);
- goto err;
- }
-
- return 0;
-err:
- isert_comp_put(comp);
- return ret;
-}
-
-static void
-isert_cq_event_callback(struct ib_event *e, void *context)
-{
- isert_dbg("event: %d\n", e->event);
-}
-
-static int
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
{
struct isert_device *device = isert_conn->device;
@@ -217,25 +152,27 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
u64 dma_addr;
int i, j;
- isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
- sizeof(struct iser_rx_desc), GFP_KERNEL);
+ isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS,
+ sizeof(struct iser_rx_desc),
+ GFP_KERNEL);
if (!isert_conn->rx_descs)
- goto fail;
+ return -ENOMEM;
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
- dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr))
goto dma_map_fail;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
- rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
+ rx_desc->rx_cqe.done = isert_recv_done;
}
return 0;
@@ -244,13 +181,11 @@ dma_map_fail:
rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
-fail:
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
-
return -ENOMEM;
}
@@ -267,131 +202,38 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
}
-static void isert_cq_work(struct work_struct *);
-static void isert_cq_callback(struct ib_cq *, void *);
-
-static void
-isert_free_comps(struct isert_device *device)
-{
- int i;
-
- for (i = 0; i < device->comps_used; i++) {
- struct isert_comp *comp = &device->comps[i];
-
- if (comp->cq) {
- cancel_work_sync(&comp->work);
- ib_destroy_cq(comp->cq);
- }
- }
- kfree(device->comps);
-}
-
-static int
-isert_alloc_comps(struct isert_device *device,
- struct ib_device_attr *attr)
-{
- int i, max_cqe, ret = 0;
-
- device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
- device->ib_device->num_comp_vectors));
-
- isert_info("Using %d CQs, %s supports %d vectors support "
- "Fast registration %d pi_capable %d\n",
- device->comps_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_fastreg,
- device->pi_capable);
-
- device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
- GFP_KERNEL);
- if (!device->comps) {
- isert_err("Unable to allocate completion contexts\n");
- return -ENOMEM;
- }
-
- max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
-
- for (i = 0; i < device->comps_used; i++) {
- struct ib_cq_init_attr cq_attr = {};
- struct isert_comp *comp = &device->comps[i];
-
- comp->device = device;
- INIT_WORK(&comp->work, isert_cq_work);
- cq_attr.cqe = max_cqe;
- cq_attr.comp_vector = i;
- comp->cq = ib_create_cq(device->ib_device,
- isert_cq_callback,
- isert_cq_event_callback,
- (void *)comp,
- &cq_attr);
- if (IS_ERR(comp->cq)) {
- isert_err("Unable to allocate cq\n");
- ret = PTR_ERR(comp->cq);
- comp->cq = NULL;
- goto out_cq;
- }
-
- ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
- if (ret)
- goto out_cq;
- }
-
- return 0;
-out_cq:
- isert_free_comps(device);
- return ret;
-}
-
static int
isert_create_device_ib_res(struct isert_device *device)
{
- struct ib_device_attr *dev_attr;
+ struct ib_device *ib_dev = device->ib_device;
int ret;
- dev_attr = &device->dev_attr;
- ret = isert_query_device(device->ib_device, dev_attr);
- if (ret)
- return ret;
-
- /* asign function handlers */
- if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
- dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
- device->use_fastreg = 1;
- device->reg_rdma_mem = isert_reg_rdma;
- device->unreg_rdma_mem = isert_unreg_rdma;
- } else {
- device->use_fastreg = 0;
- device->reg_rdma_mem = isert_map_rdma;
- device->unreg_rdma_mem = isert_unmap_cmd;
- }
+ isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
+ ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge);
+ isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
- ret = isert_alloc_comps(device, dev_attr);
- if (ret)
- return ret;
-
- device->pd = ib_alloc_pd(device->ib_device);
+ device->pd = ib_alloc_pd(ib_dev, 0);
if (IS_ERR(device->pd)) {
ret = PTR_ERR(device->pd);
isert_err("failed to allocate pd, device %p, ret=%d\n",
device, ret);
- goto out_cq;
+ return ret;
}
/* Check signature cap */
- device->pi_capable = dev_attr->device_cap_flags &
- IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+ if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)
+ device->pi_capable = true;
+ else
+ device->pi_capable = false;
return 0;
-
-out_cq:
- isert_free_comps(device);
- return ret;
}
static void
@@ -400,7 +242,6 @@ isert_free_device_ib_res(struct isert_device *device)
isert_info("device %p\n", device);
ib_dealloc_pd(device->pd);
- isert_free_comps(device);
}
static void
@@ -460,186 +301,15 @@ isert_device_get(struct rdma_cm_id *cma_id)
}
static void
-isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
-{
- struct fast_reg_descriptor *fr_desc, *tmp;
- int i = 0;
-
- if (list_empty(&isert_conn->fr_pool))
- return;
-
- isert_info("Freeing conn %p fastreg pool", isert_conn);
-
- list_for_each_entry_safe(fr_desc, tmp,
- &isert_conn->fr_pool, list) {
- list_del(&fr_desc->list);
- ib_free_fast_reg_page_list(fr_desc->data_frpl);
- ib_dereg_mr(fr_desc->data_mr);
- if (fr_desc->pi_ctx) {
- ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
- ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
- ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
- kfree(fr_desc->pi_ctx);
- }
- kfree(fr_desc);
- ++i;
- }
-
- if (i < isert_conn->fr_pool_size)
- isert_warn("Pool still has %d regions registered\n",
- isert_conn->fr_pool_size - i);
-}
-
-static int
-isert_create_pi_ctx(struct fast_reg_descriptor *desc,
- struct ib_device *device,
- struct ib_pd *pd)
-{
- struct pi_context *pi_ctx;
- int ret;
-
- pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
- if (!pi_ctx) {
- isert_err("Failed to allocate pi context\n");
- return -ENOMEM;
- }
-
- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_frpl)) {
- isert_err("Failed to allocate prot frpl err=%ld\n",
- PTR_ERR(pi_ctx->prot_frpl));
- ret = PTR_ERR(pi_ctx->prot_frpl);
- goto err_pi_ctx;
- }
-
- pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_mr)) {
- isert_err("Failed to allocate prot frmr err=%ld\n",
- PTR_ERR(pi_ctx->prot_mr));
- ret = PTR_ERR(pi_ctx->prot_mr);
- goto err_prot_frpl;
- }
- desc->ind |= ISERT_PROT_KEY_VALID;
-
- pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
- if (IS_ERR(pi_ctx->sig_mr)) {
- isert_err("Failed to allocate signature enabled mr err=%ld\n",
- PTR_ERR(pi_ctx->sig_mr));
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto err_prot_mr;
- }
-
- desc->pi_ctx = pi_ctx;
- desc->ind |= ISERT_SIG_KEY_VALID;
- desc->ind &= ~ISERT_PROTECTED;
-
- return 0;
-
-err_prot_mr:
- ib_dereg_mr(pi_ctx->prot_mr);
-err_prot_frpl:
- ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
-err_pi_ctx:
- kfree(pi_ctx);
-
- return ret;
-}
-
-static int
-isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *fr_desc)
-{
- int ret;
-
- fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_frpl)) {
- isert_err("Failed to allocate data frpl err=%ld\n",
- PTR_ERR(fr_desc->data_frpl));
- return PTR_ERR(fr_desc->data_frpl);
- }
-
- fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_mr)) {
- isert_err("Failed to allocate data frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
- ret = PTR_ERR(fr_desc->data_mr);
- goto err_data_frpl;
- }
- fr_desc->ind |= ISERT_DATA_KEY_VALID;
-
- isert_dbg("Created fr_desc %p\n", fr_desc);
-
- return 0;
-
-err_data_frpl:
- ib_free_fast_reg_page_list(fr_desc->data_frpl);
-
- return ret;
-}
-
-static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
-{
- struct fast_reg_descriptor *fr_desc;
- struct isert_device *device = isert_conn->device;
- struct se_session *se_sess = isert_conn->conn->sess->se_sess;
- struct se_node_acl *se_nacl = se_sess->se_node_acl;
- int i, ret, tag_num;
- /*
- * Setup the number of FRMRs based upon the number of tags
- * available to session in iscsi_target_locate_portal().
- */
- tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
- tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
-
- isert_conn->fr_pool_size = 0;
- for (i = 0; i < tag_num; i++) {
- fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
- if (!fr_desc) {
- isert_err("Failed to allocate fast_reg descriptor\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = isert_create_fr_desc(device->ib_device,
- device->pd, fr_desc);
- if (ret) {
- isert_err("Failed to create fastreg descriptor err=%d\n",
- ret);
- kfree(fr_desc);
- goto err;
- }
-
- list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
- isert_conn->fr_pool_size++;
- }
-
- isert_dbg("Creating conn %p fastreg pool size=%d",
- isert_conn, isert_conn->fr_pool_size);
-
- return 0;
-
-err:
- isert_conn_free_fastreg_pool(isert_conn);
- return ret;
-}
-
-static void
isert_init_conn(struct isert_conn *isert_conn)
{
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->node);
init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
- init_completion(&isert_conn->wait);
+ init_waitqueue_head(&isert_conn->rem_wait);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
- spin_lock_init(&isert_conn->pool_lock);
- INIT_LIST_HEAD(&isert_conn->fr_pool);
INIT_WORK(&isert_conn->release_work, isert_release_work);
}
@@ -649,11 +319,12 @@ isert_free_login_buf(struct isert_conn *isert_conn)
struct ib_device *ib_dev = isert_conn->device->ib_device;
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN,
- DMA_FROM_DEVICE);
- kfree(isert_conn->login_buf);
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
+ kfree(isert_conn->login_rsp_buf);
+
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ kfree(isert_conn->login_desc);
}
static int
@@ -662,53 +333,82 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
{
int ret;
- isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
- ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!isert_conn->login_buf) {
- isert_err("Unable to allocate isert_conn->login_buf\n");
+ isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
+ GFP_KERNEL);
+ if (!isert_conn->login_desc)
return -ENOMEM;
- }
-
- isert_conn->login_req_buf = isert_conn->login_buf;
- isert_conn->login_rsp_buf = isert_conn->login_buf +
- ISCSI_DEF_MAX_RECV_SEG_LEN;
-
- isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
- isert_conn->login_buf, isert_conn->login_req_buf,
- isert_conn->login_rsp_buf);
-
- isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
- (void *)isert_conn->login_req_buf,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
- ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
+ isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
+ isert_conn->login_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
if (ret) {
- isert_err("login_req_dma mapping error: %d\n", ret);
- isert_conn->login_req_dma = 0;
- goto out_login_buf;
+ isert_err("login_desc dma mapping error: %d\n", ret);
+ isert_conn->login_desc->dma_addr = 0;
+ goto out_free_login_desc;
}
- isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
- (void *)isert_conn->login_rsp_buf,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+ isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!isert_conn->login_rsp_buf) {
+ ret = -ENOMEM;
+ goto out_unmap_login_desc;
+ }
+ isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
+ isert_conn->login_rsp_buf,
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
isert_err("login_rsp_dma mapping error: %d\n", ret);
isert_conn->login_rsp_dma = 0;
- goto out_req_dma_map;
+ goto out_free_login_rsp_buf;
}
return 0;
-out_req_dma_map:
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-out_login_buf:
- kfree(isert_conn->login_buf);
+out_free_login_rsp_buf:
+ kfree(isert_conn->login_rsp_buf);
+out_unmap_login_desc:
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+out_free_login_desc:
+ kfree(isert_conn->login_desc);
return ret;
}
+static void
+isert_set_nego_params(struct isert_conn *isert_conn,
+ struct rdma_conn_param *param)
+{
+ struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
+
+ /* Set max inflight RDMA READ requests */
+ isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
+ attr->max_qp_init_rd_atom);
+ isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+
+ if (param->private_data) {
+ u8 flags = *(u8 *)param->private_data;
+
+ /*
+ * use remote invalidation if the both initiator
+ * and the HCA support it
+ */
+ isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
+ (attr->device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS);
+ if (isert_conn->snd_w_inv)
+ isert_info("Using remote invalidation\n");
+ }
+}
+
+static void
+isert_destroy_qp(struct isert_conn *isert_conn)
+{
+ ib_destroy_qp(isert_conn->qp);
+ ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
+}
+
static int
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
@@ -722,7 +422,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("iscsi_np is not enabled, reject connect request\n");
- return rdma_reject(cma_id, NULL, 0);
+ return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
}
spin_unlock_bh(&np->np_thread_lock);
@@ -736,34 +436,32 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_init_conn(isert_conn);
isert_conn->cm_id = cma_id;
- ret = isert_alloc_login_buf(isert_conn, cma_id->device);
- if (ret)
- goto out;
-
device = isert_device_get(cma_id);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
- goto out_rsp_dma_map;
+ goto out;
}
isert_conn->device = device;
- /* Set max inflight RDMA READ requests */
- isert_conn->initiator_depth = min_t(u8,
- event->param.conn.initiator_depth,
- device->dev_attr.max_qp_init_rd_atom);
- isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
-
- ret = isert_conn_setup_qp(isert_conn, cma_id);
+ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
if (ret)
goto out_conn_dev;
- ret = isert_rdma_post_recvl(isert_conn);
+ isert_set_nego_params(isert_conn, &event->param.conn);
+
+ isert_conn->qp = isert_create_qp(isert_conn, cma_id);
+ if (IS_ERR(isert_conn->qp)) {
+ ret = PTR_ERR(isert_conn->qp);
+ goto out_rsp_dma_map;
+ }
+
+ ret = isert_login_post_recv(isert_conn);
if (ret)
- goto out_conn_dev;
+ goto out_destroy_qp;
ret = isert_rdma_accept(isert_conn);
if (ret)
- goto out_conn_dev;
+ goto out_destroy_qp;
mutex_lock(&isert_np->mutex);
list_add_tail(&isert_conn->node, &isert_np->accepted);
@@ -771,13 +469,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
return 0;
-out_conn_dev:
- isert_device_put(device);
+out_destroy_qp:
+ isert_destroy_qp(isert_conn);
out_rsp_dma_map:
isert_free_login_buf(isert_conn);
+out_conn_dev:
+ isert_device_put(device);
out:
kfree(isert_conn);
- rdma_reject(cma_id, NULL, 0);
+ rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
@@ -790,26 +490,23 @@ isert_connect_release(struct isert_conn *isert_conn)
BUG_ON(!device);
- if (device->use_fastreg)
- isert_conn_free_fastreg_pool(isert_conn);
-
isert_free_rx_descriptors(isert_conn);
- if (isert_conn->cm_id)
+ if (isert_conn->cm_id &&
+ !isert_conn->dev_removed)
rdma_destroy_id(isert_conn->cm_id);
- if (isert_conn->qp) {
- struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
+ if (isert_conn->qp)
+ isert_destroy_qp(isert_conn);
- isert_comp_put(comp);
- ib_destroy_qp(isert_conn->qp);
- }
-
- if (isert_conn->login_buf)
+ if (isert_conn->login_desc)
isert_free_login_buf(isert_conn);
isert_device_put(device);
- kfree(isert_conn);
+ if (isert_conn->dev_removed)
+ wake_up_interruptible(&isert_conn->rem_wait);
+ else
+ kfree(isert_conn);
}
static void
@@ -851,12 +548,30 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->kref, isert_release_kref);
}
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@@ -868,23 +583,16 @@ isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
- switch (isert_conn->state) {
- case ISER_CONN_TERMINATING:
- break;
- case ISER_CONN_UP:
- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
- isert_info("Terminating conn %p state %d\n",
- isert_conn, isert_conn->state);
- isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->cm_id);
- if (err)
- isert_warn("Failed rdma_disconnect isert_conn %p\n",
- isert_conn);
- break;
- default:
- isert_warn("conn %p teminating in state %d\n",
- isert_conn, isert_conn->state);
- }
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
}
static int
@@ -918,35 +626,27 @@ static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
- struct isert_np *isert_np = cma_id->context;
- struct isert_conn *isert_conn;
- bool terminating = false;
-
- if (isert_np->cm_id == cma_id)
- return isert_np_cma_handler(cma_id->context, event);
-
- isert_conn = cma_id->qp->qp_context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
- isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->mutex);
-
- isert_info("conn %p completing wait\n", isert_conn);
- complete(&isert_conn->wait);
-
- if (terminating)
- goto out;
-
- mutex_lock(&isert_np->mutex);
- if (!list_empty(&isert_conn->node)) {
- list_del_init(&isert_conn->node);
- isert_put_conn(isert_conn);
- queue_work(isert_release_wq, &isert_conn->release_work);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ ib_drain_qp(isert_conn->qp);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p terminating in state %d\n",
+ isert_conn, isert_conn->state);
}
- mutex_unlock(&isert_np->mutex);
+ mutex_unlock(&isert_conn->mutex);
-out:
return 0;
}
@@ -954,8 +654,13 @@ static int
isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ struct isert_np *isert_np = cma_id->context;
+
+ ib_drain_qp(isert_conn->qp);
+ mutex_lock(&isert_np->mutex);
list_del_init(&isert_conn->node);
+ mutex_unlock(&isert_np->mutex);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
@@ -965,12 +670,17 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
+ struct isert_np *isert_np = cma_id->context;
+ struct isert_conn *isert_conn;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@@ -980,14 +690,28 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
break;
- case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
- case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
- case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ret = isert_disconnected_handler(cma_id, event->event);
break;
- case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
- case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_conn = cma_id->qp->qp_context;
+ isert_conn->dev_removed = true;
+ isert_disconnected_handler(cma_id, event->event);
+ wait_event_interruptible(isert_conn->rem_wait,
+ isert_conn->state == ISER_CONN_DOWN);
+ kfree(isert_conn);
+ /*
+ * return non-zero from the callback to destroy
+ * the rdma cm id
+ */
+ return 1;
+ case RDMA_CM_EVENT_REJECTED:
+ isert_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
+ fallthrough;
+ case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
break;
@@ -1002,27 +726,25 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
static int
isert_post_recvm(struct isert_conn *isert_conn, u32 count)
{
- struct ib_recv_wr *rx_wr, *rx_wr_failed;
+ struct ib_recv_wr *rx_wr;
int i, ret;
struct iser_rx_desc *rx_desc;
for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->rx_descs[i];
- rx_wr->wr_id = (uintptr_t)rx_desc;
+
+ rx_wr->wr_cqe = &rx_desc->rx_cqe;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
+ rx_desc->in_use = false;
}
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- isert_conn->post_recv_buf_count += count;
- ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
- &rx_wr_failed);
- if (ret) {
+ ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL);
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count -= count;
- }
return ret;
}
@@ -1030,42 +752,50 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
static int
isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
{
- struct ib_recv_wr *rx_wr_failed, rx_wr;
+ struct ib_recv_wr rx_wr;
int ret;
- rx_wr.wr_id = (uintptr_t)rx_desc;
+ if (!rx_desc->in_use) {
+ /*
+ * if the descriptor is not in-use we already reposted it
+ * for recv, so just silently return
+ */
+ return 0;
+ }
+
+ rx_desc->in_use = false;
+ rx_wr.wr_cqe = &rx_desc->rx_cqe;
rx_wr.sg_list = &rx_desc->rx_sg;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- isert_conn->post_recv_buf_count++;
- ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
- if (ret) {
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
static int
-isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
+isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct ib_send_wr send_wr, *send_wr_failed;
+ struct ib_send_wr send_wr;
int ret;
ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ tx_desc->tx_cqe.done = isert_login_send_done;
+
send_wr.next = NULL;
- send_wr.wr_id = (uintptr_t)tx_desc;
+ send_wr.wr_cqe = &tx_desc->tx_cqe;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
+ ret = ib_post_send(isert_conn->qp, &send_wr, NULL);
if (ret)
isert_err("ib_post_send() failed, ret: %d\n", ret);
@@ -1073,6 +803,22 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
}
static void
+__isert_create_send_desc(struct isert_device *device,
+ struct iser_tx_desc *tx_desc)
+{
+
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
+ tx_desc->iser_header.flags = ISCSI_CTRL;
+
+ tx_desc->num_sge = 1;
+
+ if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
+ tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
+ isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
+ }
+}
+
+static void
isert_create_send_desc(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct iser_tx_desc *tx_desc)
@@ -1083,16 +829,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
- memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
- tx_desc->iser_header.flags = ISER_VER;
-
- tx_desc->num_sge = 1;
- tx_desc->isert_cmd = isert_cmd;
-
- if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
- tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
- isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
- }
+ __isert_create_send_desc(device, tx_desc);
}
static int
@@ -1128,46 +865,53 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
- isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
- send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
- send_wr->opcode = IB_WR_SEND;
+ tx_desc->tx_cqe.done = isert_send_done;
+ send_wr->wr_cqe = &tx_desc->tx_cqe;
+
+ if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
+ send_wr->opcode = IB_WR_SEND_WITH_INV;
+ send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
+ } else {
+ send_wr->opcode = IB_WR_SEND;
+ }
+
send_wr->sg_list = &tx_desc->tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
send_wr->send_flags = IB_SEND_SIGNALED;
}
static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn)
+isert_login_post_recv(struct isert_conn *isert_conn)
{
- struct ib_recv_wr rx_wr, *rx_wr_fail;
+ struct ib_recv_wr rx_wr;
struct ib_sge sge;
int ret;
memset(&sge, 0, sizeof(struct ib_sge));
- sge.addr = isert_conn->login_req_dma;
- sge.length = ISER_RX_LOGIN_SIZE;
+ sge.addr = isert_conn->login_desc->dma_addr +
+ isert_get_hdr_offset(isert_conn->login_desc);
+ sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
+ isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
+
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
+ rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- isert_conn->post_recv_buf_count++;
- ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
- if (ret) {
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
+ if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
static int
-isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+isert_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
u32 length)
{
struct isert_conn *isert_conn = conn->context;
@@ -1176,7 +920,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
int ret;
- isert_create_send_desc(isert_conn, NULL, tx_desc);
+ __isert_create_send_desc(device, tx_desc);
memcpy(&tx_desc->iscsi_header, &login->rsp[0],
sizeof(struct iscsi_hdr));
@@ -1201,16 +945,6 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
}
if (!login->login_failed) {
if (login->login_complete) {
- if (!conn->sess->sess_ops->SessionType &&
- isert_conn->device->use_fastreg) {
- ret = isert_conn_create_fastreg_pool(isert_conn);
- if (ret) {
- isert_err("Conn: %p failed to create"
- " fastreg pool\n", isert_conn);
- return ret;
- }
- }
-
ret = isert_alloc_rx_descriptors(isert_conn);
if (ret)
return ret;
@@ -1227,12 +961,12 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
goto post_send;
}
- ret = isert_rdma_post_recvl(isert_conn);
+ ret = isert_login_post_recv(isert_conn);
if (ret)
return ret;
}
post_send:
- ret = isert_post_send(isert_conn, tx_desc);
+ ret = isert_login_post_send(isert_conn, tx_desc);
if (ret)
return ret;
@@ -1242,9 +976,9 @@ post_send:
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
- struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+ struct iser_rx_desc *rx_desc = isert_conn->login_desc;
int rx_buflen = isert_conn->login_req_len;
- struct iscsi_conn *conn = isert_conn->conn;
+ struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
int size;
@@ -1254,15 +988,14 @@ isert_rx_login_req(struct isert_conn *isert_conn)
if (login->first_request) {
struct iscsi_login_req *login_req =
- (struct iscsi_login_req *)&rx_desc->iscsi_header;
+ (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
*/
login->leading_connection = (!login_req->tsih) ? 1 : 0;
- login->current_stage =
- (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
- >> 2;
+ login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
+ login_req->flags);
login->version_min = login_req->min_version;
login->version_max = login_req->max_version;
memcpy(login->isid, login_req->isid, 6);
@@ -1273,36 +1006,36 @@ isert_rx_login_req(struct isert_conn *isert_conn)
login->tsih = be16_to_cpu(login_req->tsih);
}
- memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
+ memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
isert_dbg("Using login payload size: %d, rx_buflen: %d "
"MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
MAX_KEY_VALUE_PAIRS);
- memcpy(login->req_buf, &rx_desc->data[0], size);
+ memcpy(login->req_buf, isert_get_data(rx_desc), size);
if (login->first_request) {
complete(&isert_conn->login_comp);
return;
}
- schedule_delayed_work(&conn->login_work, 0);
+ queue_delayed_work(isert_login_wq, &conn->login_work, 0);
}
-static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
+static struct iscsit_cmd
+*isert_allocate_cmd(struct iscsit_conn *conn, struct iser_rx_desc *rx_desc)
{
struct isert_conn *isert_conn = conn->context;
struct isert_cmd *isert_cmd;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
- isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
+ isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
return NULL;
}
isert_cmd = iscsit_priv_cmd(cmd);
isert_cmd->conn = isert_conn;
- isert_cmd->iscsi_cmd = cmd;
+ isert_cmd->iscsit_cmd = cmd;
isert_cmd->rx_desc = rx_desc;
return cmd;
@@ -1310,10 +1043,10 @@ static struct iscsi_cmd
static int
isert_handle_scsi_cmd(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
+ struct isert_cmd *isert_cmd, struct iscsit_cmd *cmd,
struct iser_rx_desc *rx_desc, unsigned char *buf)
{
- struct iscsi_conn *conn = isert_conn->conn;
+ struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
int imm_data, imm_data_len, unsol_data, sg_nents, rc;
bool dump_payload = false;
@@ -1344,14 +1077,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
if (imm_data_len != data_len) {
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
- &rx_desc->data[0], imm_data_len);
+ isert_get_data(rx_desc), imm_data_len);
isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
sg_nents, imm_data_len);
} else {
sg_init_table(&isert_cmd->sg, 1);
cmd->se_cmd.t_data_sg = &isert_cmd->sg;
cmd->se_cmd.t_data_nents = 1;
- sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
+ sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
+ imm_data_len);
isert_dbg("Transfer Immediate imm_data_len: %d\n",
imm_data_len);
}
@@ -1368,8 +1102,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
sequence_cmd:
rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (!rc && dump_payload == false && unsol_data)
- iscsit_set_unsoliticed_dataout(cmd);
+ if (!rc && !dump_payload && unsol_data)
+ iscsit_set_unsolicited_dataout(cmd);
else if (dump_payload && imm_data)
target_put_sess_cmd(&cmd->se_cmd);
@@ -1381,8 +1115,8 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
struct iser_rx_desc *rx_desc, unsigned char *buf)
{
struct scatterlist *sg_start;
- struct iscsi_conn *conn = isert_conn->conn;
- struct iscsi_cmd *cmd = NULL;
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsit_cmd *cmd = NULL;
struct iscsi_data *hdr = (struct iscsi_data *)buf;
u32 unsol_data_len = ntoh24(hdr->dlength);
int rc, sg_nents, sg_off, page_off;
@@ -1420,9 +1154,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
}
isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
"sg_nents: %u from %p %u\n", sg_start, sg_off,
- sg_nents, &rx_desc->data[0], unsol_data_len);
+ sg_nents, isert_get_data(rx_desc), unsol_data_len);
- sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
+ sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
unsol_data_len);
rc = iscsit_check_dataout_payload(cmd, hdr, false);
@@ -1433,20 +1167,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* multiple data-outs on the same command can arrive -
* so post the buffer before hand
*/
- rc = isert_post_recv(isert_conn, rx_desc);
- if (rc) {
- isert_err("ib_post_recv failed with %d\n", rc);
- return rc;
- }
- return 0;
+ return isert_post_recv(isert_conn, rx_desc);
}
static int
isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
+ struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
unsigned char *buf)
{
- struct iscsi_conn *conn = isert_conn->conn;
+ struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
int rc;
@@ -1462,10 +1191,10 @@ isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
static int
isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
+ struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
struct iscsi_text *hdr)
{
- struct iscsi_conn *conn = isert_conn->conn;
+ struct iscsit_conn *conn = isert_conn->conn;
u32 payload_length = ntoh24(hdr->dlength);
int rc;
unsigned char *text_in = NULL;
@@ -1476,15 +1205,12 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
if (payload_length) {
text_in = kzalloc(payload_length, GFP_KERNEL);
- if (!text_in) {
- isert_err("Unable to allocate text_in of payload_length: %u\n",
- payload_length);
+ if (!text_in)
return -ENOMEM;
- }
}
cmd->text_in_ptr = text_in;
- memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
+ memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
return iscsit_process_text_cmd(conn, cmd, hdr);
}
@@ -1494,9 +1220,9 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
uint32_t write_stag, uint64_t write_va)
{
- struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
- struct iscsi_conn *conn = isert_conn->conn;
- struct iscsi_cmd *cmd;
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsit_cmd *cmd;
struct isert_cmd *isert_cmd;
int ret = -EINVAL;
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
@@ -1519,6 +1245,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
isert_cmd->read_va = read_va;
isert_cmd->write_stag = write_stag;
isert_cmd->write_va = write_va;
+ isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
rx_desc, (unsigned char *)hdr);
@@ -1574,24 +1301,55 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
}
static void
-isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
+isert_print_wc(struct ib_wc *wc, const char *type)
{
- struct iser_hdr *iser_hdr = &rx_desc->iser_header;
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ isert_err("%s failure: %s (%d) vend_err %x\n", type,
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->vendor_err);
+ else
+ isert_dbg("%s failure: %s (%d)\n", type,
+ ib_wc_status_msg(wc->status), wc->status);
+}
+
+static void
+isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
+ struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
- int rc;
- switch (iser_hdr->flags & 0xF0) {
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "recv");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ return;
+ }
+
+ rx_desc->in_use = true;
+
+ ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+
+ isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+ rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
+ (int)(wc->byte_len - ISER_HEADERS_LEN));
+
+ switch (iser_ctrl->flags & 0xF0) {
case ISCSI_CTRL:
- if (iser_hdr->flags & ISER_RSV) {
- read_stag = be32_to_cpu(iser_hdr->read_stag);
- read_va = be64_to_cpu(iser_hdr->read_va);
+ if (iser_ctrl->flags & ISER_RSV) {
+ read_stag = be32_to_cpu(iser_ctrl->read_stag);
+ read_va = be64_to_cpu(iser_ctrl->read_va);
isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
read_stag, (unsigned long long)read_va);
}
- if (iser_hdr->flags & ISER_WSV) {
- write_stag = be32_to_cpu(iser_hdr->write_stag);
- write_va = be64_to_cpu(iser_hdr->write_va);
+ if (iser_ctrl->flags & ISER_WSV) {
+ write_stag = be32_to_cpu(iser_ctrl->write_stag);
+ write_va = be64_to_cpu(iser_ctrl->write_va);
isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
write_stag, (unsigned long long)write_va);
}
@@ -1602,168 +1360,76 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
isert_err("iSER Hello message\n");
break;
default:
- isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
+ isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
break;
}
- rc = isert_rx_opcode(isert_conn, rx_desc,
- read_stag, read_va, write_stag, write_va);
-}
-
-static void
-isert_rcv_completion(struct iser_rx_desc *desc,
- struct isert_conn *isert_conn,
- u32 xfer_len)
-{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct iscsi_hdr *hdr;
- u64 rx_dma;
- int rx_buflen;
-
- if ((char *)desc == isert_conn->login_req_buf) {
- rx_dma = isert_conn->login_req_dma;
- rx_buflen = ISER_RX_LOGIN_SIZE;
- isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
- rx_dma, rx_buflen);
- } else {
- rx_dma = desc->dma_addr;
- rx_buflen = ISER_RX_PAYLOAD_SIZE;
- isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
- rx_dma, rx_buflen);
- }
-
- ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
-
- hdr = &desc->iscsi_header;
- isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
- hdr->opcode, hdr->itt, hdr->flags,
- (int)(xfer_len - ISER_HEADERS_LEN));
-
- if ((char *)desc == isert_conn->login_req_buf) {
- isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
- if (isert_conn->conn) {
- struct iscsi_login *login = isert_conn->conn->conn_login;
-
- if (login && !login->first_request)
- isert_rx_login_req(isert_conn);
- }
- mutex_lock(&isert_conn->mutex);
- complete(&isert_conn->login_req_comp);
- mutex_unlock(&isert_conn->mutex);
- } else {
- isert_rx_do_work(desc, isert_conn);
- }
-
- ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
- DMA_FROM_DEVICE);
+ isert_rx_opcode(isert_conn, rx_desc,
+ read_stag, read_va, write_stag, write_va);
- isert_conn->post_recv_buf_count--;
-}
-
-static int
-isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct scatterlist *sg, u32 nents, u32 length, u32 offset,
- enum iser_ib_op_code op, struct isert_data_buf *data)
-{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
-
- data->dma_dir = op == ISER_IB_RDMA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- data->len = length - offset;
- data->offset = offset;
- data->sg_off = data->offset / PAGE_SIZE;
-
- data->sg = &sg[data->sg_off];
- data->nents = min_t(unsigned int, nents - data->sg_off,
- ISCSI_ISER_SG_TABLESIZE);
- data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
- PAGE_SIZE);
-
- data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
- data->dma_dir);
- if (unlikely(!data->dma_nents)) {
- isert_err("Cmd: unable to dma map SGs %p\n", sg);
- return -EINVAL;
- }
-
- isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
-
- return 0;
+ ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
-isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
+isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
-
- ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
- memset(data, 0, sizeof(*data));
-}
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login recv");
+ return;
+ }
+ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
-static void
-isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
-{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
- isert_dbg("Cmd %p\n", isert_cmd);
+ if (isert_conn->conn) {
+ struct iscsi_login *login = isert_conn->conn->conn_login;
- if (wr->data.sg) {
- isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &wr->data);
+ if (login && !login->first_request)
+ isert_rx_login_req(isert_conn);
}
- if (wr->send_wr) {
- isert_dbg("Cmd %p free send_wr\n", isert_cmd);
- kfree(wr->send_wr);
- wr->send_wr = NULL;
- }
+ mutex_lock(&isert_conn->mutex);
+ complete(&isert_conn->login_req_comp);
+ mutex_unlock(&isert_conn->mutex);
- if (wr->ib_sge) {
- isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
- kfree(wr->ib_sge);
- wr->ib_sge = NULL;
- }
+ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
-isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
- isert_dbg("Cmd %p\n", isert_cmd);
-
- if (wr->fr_desc) {
- isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
- if (wr->fr_desc->ind & ISERT_PROTECTED) {
- isert_unmap_data_buf(isert_conn, &wr->prot);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
- }
- spin_lock_bh(&isert_conn->pool_lock);
- list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
- spin_unlock_bh(&isert_conn->pool_lock);
- wr->fr_desc = NULL;
- }
+ if (!cmd->rw.nr_ops)
+ return;
- if (wr->data.sg) {
- isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &wr->data);
+ if (isert_prot_cmd(conn, se_cmd)) {
+ rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
+ conn->cm_id->port_num, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents, dir);
+ } else {
+ rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
}
- wr->ib_sge = NULL;
- wr->send_wr = NULL;
+ cmd->rw.nr_ops = 0;
}
static void
isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
- struct iscsi_conn *conn = isert_conn->conn;
- struct isert_device *device = isert_conn->device;
+ struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_text_rsp *hdr;
isert_dbg("Cmd %p\n", isert_cmd);
@@ -1791,7 +1457,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
}
}
- device->unreg_rdma_mem(isert_cmd, isert_conn);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_SCSI_TMFUNC:
@@ -1826,9 +1492,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
- /*
- * Fall-through
- */
+ fallthrough;
default:
iscsit_release_cmd(cmd);
break;
@@ -1890,12 +1554,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
}
sec_offset_err = mr_status.sig_err.sig_err_offset;
do_div(sec_offset_err, block_size);
- se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
+ se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba;
isert_err("PI error found type %d at sector 0x%llx "
"expected 0x%x vs actual 0x%x\n",
mr_status.sig_err.err_type,
- (unsigned long long)se_cmd->bad_sector,
+ (unsigned long long)se_cmd->sense_info,
mr_status.sig_err.expected,
mr_status.sig_err.actual);
ret = 1;
@@ -1906,52 +1570,74 @@ fail_mr_status:
}
static void
-isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd)
+isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = isert_cmd->conn;
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
+ struct se_cmd *cmd = &isert_cmd->iscsit_cmd->se_cmd;
int ret = 0;
- if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(se_cmd,
- wr->fr_desc->pi_ctx->sig_mr);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma write");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
}
- device->unreg_rdma_mem(isert_cmd, isert_conn);
- wr->send_wr_num = 0;
- if (ret)
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->pi_err, 0);
- else
- isert_put_response(isert_conn->conn, cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+
+ if (ret) {
+ /*
+ * transport_generic_request_failure() expects to have
+ * plus two references to handle queue-full, so re-add
+ * one here as target-core will have already dropped
+ * it after the first isert_put_datain() callback.
+ */
+ kref_get(&cmd->cmd_kref);
+ transport_generic_request_failure(cmd, cmd->pi_err);
+ } else {
+ /*
+ * XXX: isert_put_response() failure is not retried.
+ */
+ ret = isert_put_response(isert_conn->conn, isert_cmd->iscsit_cmd);
+ if (ret)
+ pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
+ }
}
static void
-isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd)
+isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = isert_cmd->conn;
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
+ struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
int ret = 0;
- if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(se_cmd,
- wr->fr_desc->pi_ctx->sig_mr);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma read");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
}
+ isert_dbg("Cmd %p\n", isert_cmd);
+
iscsit_stop_dataout_timer(cmd);
- device->unreg_rdma_mem(isert_cmd, isert_conn);
- cmd->write_data_done = wr->data.len;
- wr->send_wr_num = 0;
+
+ if (isert_prot_cmd(isert_conn, se_cmd))
+ ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+ cmd->write_data_done = 0;
isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -1959,13 +1645,15 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
- if (ret) {
- target_put_sess_cmd(se_cmd);
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->pi_err, 0);
- } else {
+ /*
+ * transport_generic_request_failure() will drop the extra
+ * se_cmd->cmd_kref reference after T10-PI error, and handle
+ * any non-zero ->queue_status() callback error retries.
+ */
+ if (ret)
+ transport_generic_request_failure(se_cmd, se_cmd->pi_err);
+ else
target_execute_cmd(se_cmd);
- }
}
static void
@@ -1975,15 +1663,16 @@ isert_do_control_comp(struct work_struct *work)
struct isert_cmd, comp_work);
struct isert_conn *isert_conn = isert_cmd->conn;
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
iscsit_tmr_post_handler(cmd, cmd->conn);
- case ISTATE_SEND_REJECT: /* FALLTHRU */
- case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
+ fallthrough;
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
ib_dev, false);
@@ -1999,184 +1688,66 @@ isert_do_control_comp(struct work_struct *work)
}
static void
-isert_response_completion(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd,
- struct isert_conn *isert_conn,
- struct ib_device *ib_dev)
+isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
-
- if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
- cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
- cmd->i_state == ISTATE_SEND_REJECT ||
- cmd->i_state == ISTATE_SEND_TEXTRSP) {
- isert_unmap_tx_desc(tx_desc, ib_dev);
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
- INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
- queue_work(isert_comp_wq, &isert_cmd->comp_work);
- return;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
+ isert_unmap_tx_desc(tx_desc, ib_dev);
}
static void
-isert_snd_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
- struct isert_rdma_wr *wr;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
- if (!isert_cmd) {
- isert_unmap_tx_desc(tx_desc, ib_dev);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
return;
}
- wr = &isert_cmd->rdma_wr;
- isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+ isert_dbg("Cmd %p\n", isert_cmd);
- switch (wr->iser_ib_op) {
- case ISER_IB_SEND:
- isert_response_completion(tx_desc, isert_cmd,
- isert_conn, ib_dev);
- break;
- case ISER_IB_RDMA_WRITE:
- isert_completion_rdma_write(tx_desc, isert_cmd);
- break;
- case ISER_IB_RDMA_READ:
- isert_completion_rdma_read(tx_desc, isert_cmd);
- break;
+ switch (isert_cmd->iscsit_cmd->i_state) {
+ case ISTATE_SEND_TASKMGTRSP:
+ case ISTATE_SEND_LOGOUTRSP:
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+
+ INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
+ queue_work(isert_comp_wq, &isert_cmd->comp_work);
+ return;
default:
- isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
- dump_stack();
+ isert_cmd->iscsit_cmd->i_state = ISTATE_SENT_STATUS;
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
break;
}
}
-/**
- * is_isert_tx_desc() - Indicate if the completion wr_id
- * is a TX descriptor or not.
- * @isert_conn: iser connection
- * @wr_id: completion WR identifier
- *
- * Since we cannot rely on wc opcode in FLUSH errors
- * we must work around it by checking if the wr_id address
- * falls in the iser connection rx_descs buffer. If so
- * it is an RX descriptor, otherwize it is a TX.
- */
-static inline bool
-is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
-{
- void *start = isert_conn->rx_descs;
- int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
-
- if (wr_id >= start && wr_id < start + len)
- return false;
-
- return true;
-}
-
-static void
-isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
-{
- if (wc->wr_id == ISER_BEACON_WRID) {
- isert_info("conn %p completing wait_comp_err\n",
- isert_conn);
- complete(&isert_conn->wait_comp_err);
- } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
- struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct isert_cmd *isert_cmd;
- struct iser_tx_desc *desc;
-
- desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_cmd = desc->isert_cmd;
- if (!isert_cmd)
- isert_unmap_tx_desc(desc, ib_dev);
- else
- isert_completion_put(desc, isert_cmd, ib_dev, true);
- } else {
- isert_conn->post_recv_buf_count--;
- if (!isert_conn->post_recv_buf_count)
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
- }
-}
-
-static void
-isert_handle_wc(struct ib_wc *wc)
-{
- struct isert_conn *isert_conn;
- struct iser_tx_desc *tx_desc;
- struct iser_rx_desc *rx_desc;
-
- isert_conn = wc->qp->qp_context;
- if (likely(wc->status == IB_WC_SUCCESS)) {
- if (wc->opcode == IB_WC_RECV) {
- rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
- isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
- } else {
- tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_snd_completion(tx_desc, isert_conn);
- }
- } else {
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- isert_err("%s (%d): wr id %llx vend_err %x\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id, wc->vendor_err);
- else
- isert_dbg("%s (%d): wr id %llx\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id);
-
- if (wc->wr_id != ISER_FASTREG_LI_WRID)
- isert_cq_comp_err(isert_conn, wc);
- }
-}
-
-static void
-isert_cq_work(struct work_struct *work)
-{
- enum { isert_poll_budget = 65536 };
- struct isert_comp *comp = container_of(work, struct isert_comp,
- work);
- struct ib_wc *const wcs = comp->wcs;
- int i, n, completed = 0;
-
- while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
- for (i = 0; i < n; i++)
- isert_handle_wc(&wcs[i]);
-
- completed += n;
- if (completed >= isert_poll_budget)
- break;
- }
-
- ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_callback(struct ib_cq *cq, void *context)
-{
- struct isert_comp *comp = context;
-
- queue_work(isert_comp_wq, &comp->work);
-}
-
static int
isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
{
- struct ib_send_wr *wr_failed;
int ret;
ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
- if (ret) {
- isert_err("ib_post_recv failed with %d\n", ret);
+ if (ret)
return ret;
- }
- ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
- &wr_failed);
+ ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
if (ret) {
isert_err("ib_post_send failed with %d\n", ret);
return ret;
@@ -2185,7 +1756,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
}
static int
-isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
@@ -2218,6 +1789,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->sense_buffer, pdu_len,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
@@ -2234,11 +1807,10 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
}
static void
-isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+isert_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
@@ -2247,12 +1819,11 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
-
- device->unreg_rdma_mem(isert_cmd, isert_conn);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
}
static enum target_prot_op
-isert_get_sup_prot_ops(struct iscsi_conn *conn)
+isert_get_sup_prot_ops(struct iscsit_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
struct isert_device *device = isert_conn->device;
@@ -2272,7 +1843,7 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
}
static int
-isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+isert_put_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
bool nopout_response)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -2292,7 +1863,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
}
static int
-isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
@@ -2310,7 +1881,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
}
static int
-isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_tm_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
@@ -2328,7 +1899,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
}
static int
-isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
@@ -2347,6 +1918,8 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
@@ -2361,7 +1934,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
}
static int
-isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
@@ -2387,6 +1960,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
@@ -2401,279 +1976,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return isert_post_response(isert_conn, isert_cmd);
}
-static int
-isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
- u32 data_left, u32 offset)
-{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct scatterlist *sg_start, *tmp_sg;
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
- u32 sg_off, page_off;
- int i = 0, sg_nents;
-
- sg_off = offset / PAGE_SIZE;
- sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
- page_off = offset % PAGE_SIZE;
-
- send_wr->sg_list = ib_sge;
- send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
- /*
- * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
- */
- for_each_sg(sg_start, tmp_sg, sg_nents, i) {
- isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
- "page_off: %u\n",
- (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length, page_off);
-
- ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
- ib_sge->length = min_t(u32, data_left,
- ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
- ib_sge->lkey = device->pd->local_dma_lkey;
-
- isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
- ib_sge->addr, ib_sge->length, ib_sge->lkey);
- page_off = 0;
- data_left -= ib_sge->length;
- if (!data_left)
- break;
- ib_sge++;
- isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
- }
-
- send_wr->num_sge = ++i;
- isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
- send_wr->sg_list, send_wr->num_sge);
-
- return send_wr->num_sge;
-}
-
-static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
-{
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = conn->context;
- struct isert_data_buf *data = &wr->data;
- struct ib_send_wr *send_wr;
- struct ib_sge *ib_sge;
- u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
- int ret = 0, i, ib_sge_cnt;
-
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
- offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
- ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, se_cmd->data_length,
- offset, wr->iser_ib_op, &wr->data);
- if (ret)
- return ret;
-
- data_left = data->len;
- offset = data->offset;
-
- ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
- if (!ib_sge) {
- isert_warn("Unable to allocate ib_sge\n");
- ret = -ENOMEM;
- goto unmap_cmd;
- }
- wr->ib_sge = ib_sge;
-
- wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
- wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
- GFP_KERNEL);
- if (!wr->send_wr) {
- isert_dbg("Unable to allocate wr->send_wr\n");
- ret = -ENOMEM;
- goto unmap_cmd;
- }
-
- wr->isert_cmd = isert_cmd;
- rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
-
- for (i = 0; i < wr->send_wr_num; i++) {
- send_wr = &isert_cmd->rdma_wr.send_wr[i];
- data_len = min(data_left, rdma_write_max);
-
- send_wr->send_flags = 0;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
- send_wr->opcode = IB_WR_RDMA_WRITE;
- send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
- send_wr->wr.rdma.rkey = isert_cmd->read_stag;
- if (i + 1 == wr->send_wr_num)
- send_wr->next = &isert_cmd->tx_desc.send_wr;
- else
- send_wr->next = &wr->send_wr[i + 1];
- } else {
- send_wr->opcode = IB_WR_RDMA_READ;
- send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
- send_wr->wr.rdma.rkey = isert_cmd->write_stag;
- if (i + 1 == wr->send_wr_num)
- send_wr->send_flags = IB_SEND_SIGNALED;
- else
- send_wr->next = &wr->send_wr[i + 1];
- }
-
- ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
- send_wr, data_len, offset);
- ib_sge += ib_sge_cnt;
-
- offset += data_len;
- va_offset += data_len;
- data_left -= data_len;
- }
-
- return 0;
-unmap_cmd:
- isert_unmap_data_buf(isert_conn, data);
-
- return ret;
-}
-
-static int
-isert_map_fr_pagelist(struct ib_device *ib_dev,
- struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
-{
- u64 start_addr, end_addr, page, chunk_start = 0;
- struct scatterlist *tmp_sg;
- int i = 0, new_chunk, last_ent, n_pages;
-
- n_pages = 0;
- new_chunk = 1;
- last_ent = sg_nents - 1;
- for_each_sg(sg_start, tmp_sg, sg_nents, i) {
- start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
- if (new_chunk)
- chunk_start = start_addr;
- end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
-
- isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
- i, (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length);
-
- if ((end_addr & ~PAGE_MASK) && i < last_ent) {
- new_chunk = 0;
- continue;
- }
- new_chunk = 1;
-
- page = chunk_start & PAGE_MASK;
- do {
- fr_pl[n_pages++] = page;
- isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
- n_pages - 1, page);
- page += PAGE_SIZE;
- } while (page < end_addr);
- }
-
- return n_pages;
-}
-
static inline void
-isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
-{
- u32 rkey;
-
- memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->wr_id = ISER_FASTREG_LI_WRID;
- inv_wr->opcode = IB_WR_LOCAL_INV;
- inv_wr->ex.invalidate_rkey = mr->rkey;
-
- /* Bump the key */
- rkey = ib_inc_rkey(mr->rkey);
- ib_update_fast_reg_key(mr, rkey);
-}
-
-static int
-isert_fast_reg_mr(struct isert_conn *isert_conn,
- struct fast_reg_descriptor *fr_desc,
- struct isert_data_buf *mem,
- enum isert_indicator ind,
- struct ib_sge *sge)
-{
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
- struct ib_mr *mr;
- struct ib_fast_reg_page_list *frpl;
- struct ib_send_wr fr_wr, inv_wr;
- struct ib_send_wr *bad_wr, *wr = NULL;
- int ret, pagelist_len;
- u32 page_off;
-
- if (mem->dma_nents == 1) {
- sge->lkey = device->pd->local_dma_lkey;
- sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
- sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
- isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
- sge->addr, sge->length, sge->lkey);
- return 0;
- }
-
- if (ind == ISERT_DATA_KEY_VALID) {
- /* Registering data buffer */
- mr = fr_desc->data_mr;
- frpl = fr_desc->data_frpl;
- } else {
- /* Registering protection buffer */
- mr = fr_desc->pi_ctx->prot_mr;
- frpl = fr_desc->pi_ctx->prot_frpl;
- }
-
- page_off = mem->offset % PAGE_SIZE;
-
- isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
- fr_desc, mem->nents, mem->offset);
-
- pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
- &frpl->page_list[0]);
-
- if (!(fr_desc->ind & ind)) {
- isert_inv_rkey(&inv_wr, mr);
- wr = &inv_wr;
- }
-
- /* Prepare FASTREG WR */
- memset(&fr_wr, 0, sizeof(fr_wr));
- fr_wr.wr_id = ISER_FASTREG_LI_WRID;
- fr_wr.opcode = IB_WR_FAST_REG_MR;
- fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
- fr_wr.wr.fast_reg.page_list = frpl;
- fr_wr.wr.fast_reg.page_list_len = pagelist_len;
- fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
- fr_wr.wr.fast_reg.length = mem->len;
- fr_wr.wr.fast_reg.rkey = mr->rkey;
- fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
-
- if (!wr)
- wr = &fr_wr;
- else
- wr->next = &fr_wr;
-
- ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
- if (ret) {
- isert_err("fast registration failed, ret:%d\n", ret);
- return ret;
- }
- fr_desc->ind &= ~ind;
-
- sge->lkey = mr->lkey;
- sge->addr = frpl->page_list[0] + page_off;
- sge->length = mem->len;
-
- isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
- sge->addr, sge->length, sge->lkey);
-
- return ret;
-}
-
-static inline void
-isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
- struct ib_sig_domain *domain)
+isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.bg_type = IB_T10DIF_CRC;
@@ -2690,273 +1994,117 @@ isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
domain->sig.dif.ref_remap = true;
-};
+}
static int
isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
{
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
switch (se_cmd->prot_op) {
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
- isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
+ isert_set_dif_domain(se_cmd, &sig_attrs->wire);
break;
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
- isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
+ isert_set_dif_domain(se_cmd, &sig_attrs->mem);
break;
case TARGET_PROT_DIN_PASS:
case TARGET_PROT_DOUT_PASS:
- isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
- isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
+ isert_set_dif_domain(se_cmd, &sig_attrs->wire);
+ isert_set_dif_domain(se_cmd, &sig_attrs->mem);
break;
default:
isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
return -EINVAL;
}
- return 0;
-}
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
-static inline u8
-isert_set_prot_checks(u8 prot_checks)
-{
- return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
- (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
- (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+ return 0;
}
static int
-isert_reg_sig_mr(struct isert_conn *isert_conn,
- struct se_cmd *se_cmd,
- struct isert_rdma_wr *rdma_wr,
- struct fast_reg_descriptor *fr_desc)
-{
- struct ib_send_wr sig_wr, inv_wr;
- struct ib_send_wr *bad_wr, *wr = NULL;
- struct pi_context *pi_ctx = fr_desc->pi_ctx;
- struct ib_sig_attrs sig_attrs;
+isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
+ u8 port_num = conn->cm_id->port_num;
+ u64 addr;
+ u32 rkey, offset;
int ret;
- memset(&sig_attrs, 0, sizeof(sig_attrs));
- ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
- if (ret)
- goto err;
-
- sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
+ if (cmd->ctx_init_done)
+ goto rdma_ctx_post;
- if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
- isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
- wr = &inv_wr;
+ if (dir == DMA_FROM_DEVICE) {
+ addr = cmd->write_va;
+ rkey = cmd->write_stag;
+ offset = cmd->iscsit_cmd->write_data_done;
+ } else {
+ addr = cmd->read_va;
+ rkey = cmd->read_stag;
+ offset = 0;
}
- memset(&sig_wr, 0, sizeof(sig_wr));
- sig_wr.opcode = IB_WR_REG_SIG_MR;
- sig_wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
- sig_wr.num_sge = 1;
- sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
- sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
- sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
- if (se_cmd->t_prot_sg)
- sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
-
- if (!wr)
- wr = &sig_wr;
- else
- wr->next = &sig_wr;
+ if (isert_prot_cmd(conn, se_cmd)) {
+ struct ib_sig_attrs sig_attrs;
- ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
- if (ret) {
- isert_err("fast registration failed, ret:%d\n", ret);
- goto err;
- }
- fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
-
- rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
- rdma_wr->ib_sg[SIG].addr = 0;
- rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
- if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
- se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
- /*
- * We have protection guards on the wire
- * so we need to set a larget transfer
- */
- rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
-
- isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
- rdma_wr->ib_sg[SIG].lkey);
-err:
- return ret;
-}
-
-static int
-isert_handle_prot_cmd(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd,
- struct isert_rdma_wr *wr)
-{
- struct isert_device *device = isert_conn->device;
- struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
- int ret;
-
- if (!wr->fr_desc->pi_ctx) {
- ret = isert_create_pi_ctx(wr->fr_desc,
- device->ib_device,
- device->pd);
- if (ret) {
- isert_err("conn %p failed to allocate pi_ctx\n",
- isert_conn);
- return ret;
- }
- }
-
- if (se_cmd->t_prot_sg) {
- ret = isert_map_data_buf(isert_conn, isert_cmd,
- se_cmd->t_prot_sg,
- se_cmd->t_prot_nents,
- se_cmd->prot_length,
- 0, wr->iser_ib_op, &wr->prot);
- if (ret) {
- isert_err("conn %p failed to map protection buffer\n",
- isert_conn);
+ ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+ if (ret)
return ret;
- }
- memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
- ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
- ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
- if (ret) {
- isert_err("conn %p failed to fast reg mr\n",
- isert_conn);
- goto unmap_prot_cmd;
- }
- }
-
- ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
- if (ret) {
- isert_err("conn %p failed to fast reg mr\n",
- isert_conn);
- goto unmap_prot_cmd;
+ WARN_ON_ONCE(offset);
+ ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ se_cmd->t_prot_sg, se_cmd->t_prot_nents,
+ &sig_attrs, addr, rkey, dir);
+ } else {
+ ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ offset, addr, rkey, dir);
}
- wr->fr_desc->ind |= ISERT_PROTECTED;
- return 0;
-
-unmap_prot_cmd:
- if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &wr->prot);
-
- return ret;
-}
-
-static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
-{
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = conn->context;
- struct fast_reg_descriptor *fr_desc = NULL;
- struct ib_send_wr *send_wr;
- struct ib_sge *ib_sg;
- u32 offset;
- int ret = 0;
- unsigned long flags;
-
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
- offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
- ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, se_cmd->data_length,
- offset, wr->iser_ib_op, &wr->data);
- if (ret)
+ if (ret < 0) {
+ isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
return ret;
-
- if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
- spin_lock_irqsave(&isert_conn->pool_lock, flags);
- fr_desc = list_first_entry(&isert_conn->fr_pool,
- struct fast_reg_descriptor, list);
- list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
- wr->fr_desc = fr_desc;
}
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
- ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
- if (ret)
- goto unmap_cmd;
-
- if (isert_prot_cmd(isert_conn, se_cmd)) {
- ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
- if (ret)
- goto unmap_cmd;
-
- ib_sg = &wr->ib_sg[SIG];
- } else {
- ib_sg = &wr->ib_sg[DATA];
- }
-
- memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
- wr->ib_sge = &wr->s_ib_sge;
- wr->send_wr_num = 1;
- memset(&wr->s_send_wr, 0, sizeof(*send_wr));
- wr->send_wr = &wr->s_send_wr;
- wr->isert_cmd = isert_cmd;
-
- send_wr = &isert_cmd->rdma_wr.s_send_wr;
- send_wr->sg_list = &wr->s_ib_sge;
- send_wr->num_sge = 1;
- send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
- send_wr->opcode = IB_WR_RDMA_WRITE;
- send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
- send_wr->wr.rdma.rkey = isert_cmd->read_stag;
- send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
- 0 : IB_SEND_SIGNALED;
- } else {
- send_wr->opcode = IB_WR_RDMA_READ;
- send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
- send_wr->wr.rdma.rkey = isert_cmd->write_stag;
- send_wr->send_flags = IB_SEND_SIGNALED;
- }
-
- return 0;
-
-unmap_cmd:
- if (fr_desc) {
- spin_lock_irqsave(&isert_conn->pool_lock, flags);
- list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
- spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
- }
- isert_unmap_data_buf(isert_conn, &wr->data);
+ cmd->ctx_init_done = true;
+rdma_ctx_post:
+ ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
+ if (ret < 0)
+ isert_err("Cmd: %p failed to post RDMA res\n", cmd);
return ret;
}
static int
-isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+isert_put_datain(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
- struct ib_send_wr *wr_failed;
+ struct ib_cqe *cqe = NULL;
+ struct ib_send_wr *chain_wr = NULL;
int rc;
isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
- wr->iser_ib_op = ISER_IB_RDMA_WRITE;
- rc = device->reg_rdma_mem(conn, cmd, wr);
- if (rc) {
- isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
- return rc;
- }
-
- if (!isert_prot_cmd(isert_conn, se_cmd)) {
+ if (isert_prot_cmd(isert_conn, se_cmd)) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+ cqe = &isert_cmd->tx_desc.tx_cqe;
+ } else {
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
@@ -2967,62 +2115,40 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr);
- isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
- wr->send_wr_num += 1;
rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
- if (rc) {
- isert_err("ib_post_recv failed with %d\n", rc);
+ if (rc)
return rc;
- }
- }
-
- rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
- if (rc)
- isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- if (!isert_prot_cmd(isert_conn, se_cmd))
- isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
- "READ\n", isert_cmd);
- else
- isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
- isert_cmd);
+ chain_wr = &isert_cmd->tx_desc.send_wr;
+ }
- return 1;
+ rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
+ isert_cmd, rc);
+ return rc;
}
static int
-isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+isert_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, bool recovery)
{
- struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
- struct ib_send_wr *wr_failed;
- int rc;
+ int ret;
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
- isert_cmd, se_cmd->data_length, cmd->write_data_done);
- wr->iser_ib_op = ISER_IB_RDMA_READ;
- rc = device->reg_rdma_mem(conn, cmd, wr);
- if (rc) {
- isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
- return rc;
- }
+ isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
- rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
- if (rc)
- isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+ ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+ &isert_cmd->tx_desc.tx_cqe, NULL);
- isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
- isert_cmd);
-
- return 0;
+ isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
+ isert_cmd, ret);
+ return ret;
}
static int
-isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+isert_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
int ret = 0;
@@ -3047,7 +2173,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
}
static int
-isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+isert_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
struct isert_conn *isert_conn = conn->context;
int ret;
@@ -3097,7 +2223,7 @@ isert_setup_id(struct isert_np *isert_np)
sa = (struct sockaddr *)&np->np_sockaddr;
isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
- id = rdma_create_id(isert_cma_handler, isert_np,
+ id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) {
isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
@@ -3106,6 +2232,16 @@ isert_setup_id(struct isert_np *isert_np)
}
isert_dbg("id %p context %p\n", id, id->context);
+ /*
+ * Allow both IPv4 and IPv6 sockets to bind a single port
+ * at the same time.
+ */
+ ret = rdma_set_afonly(id, 1);
+ if (ret) {
+ isert_err("rdma_set_afonly() failed: %d\n", ret);
+ goto out_id;
+ }
+
ret = rdma_bind_addr(id, sa);
if (ret) {
isert_err("rdma_bind_addr() failed: %d\n", ret);
@@ -3134,10 +2270,9 @@ isert_setup_np(struct iscsi_np *np,
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
- if (!isert_np) {
- isert_err("Unable to allocate struct isert_np\n");
+ if (!isert_np)
return -ENOMEM;
- }
+
sema_init(&isert_np->sem, 0);
mutex_init(&isert_np->mutex);
INIT_LIST_HEAD(&isert_np->accepted);
@@ -3174,12 +2309,20 @@ isert_rdma_accept(struct isert_conn *isert_conn)
struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_conn_param cp;
int ret;
+ struct iser_cm_hdr rsp_hdr;
memset(&cp, 0, sizeof(struct rdma_conn_param));
cp.initiator_depth = isert_conn->initiator_depth;
cp.retry_count = 7;
cp.rnr_retry_count = 7;
+ memset(&rsp_hdr, 0, sizeof(rsp_hdr));
+ rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
+ if (!isert_conn->snd_w_inv)
+ rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
+ cp.private_data = (void *)&rsp_hdr;
+ cp.private_data_len = sizeof(rsp_hdr);
+
ret = rdma_accept(cm_id, &cp);
if (ret) {
isert_err("rdma_accept() failed with: %d\n", ret);
@@ -3190,7 +2333,7 @@ isert_rdma_accept(struct isert_conn *isert_conn)
}
static int
-isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+isert_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
{
struct isert_conn *isert_conn = conn->context;
int ret;
@@ -3206,9 +2349,9 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
/*
* For login requests after the first PDU, isert_rx_login_req() will
- * kick schedule_delayed_work(&conn->login_work) as the packet is
- * received, which turns this callback from iscsi_target_do_login_rx()
- * into a NOP.
+ * kick queue_delayed_work(isert_login_wq, &conn->login_work) as
+ * the packet is received, which turns this callback from
+ * iscsi_target_do_login_rx() into a NOP.
*/
if (!login->first_request)
return 0;
@@ -3226,7 +2369,7 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
}
static void
-isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
+isert_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->cm_id;
@@ -3239,7 +2382,7 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
}
static int
-isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+isert_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
{
struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn;
@@ -3255,10 +2398,10 @@ accept_wait:
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("np_thread_state %d\n",
np->np_thread_state);
- /**
+ /*
* No point in stalling here when np_thread
* is in state RESET/SHUTDOWN/EXIT - bail
- **/
+ */
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
@@ -3275,6 +2418,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
@@ -3288,6 +2432,7 @@ isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn, *n;
+ LIST_HEAD(drop_conn_list);
if (isert_np->cm_id)
rdma_destroy_id(isert_np->cm_id);
@@ -3307,7 +2452,7 @@ isert_free_np(struct iscsi_np *np)
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
- isert_connect_release(isert_conn);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
@@ -3318,11 +2463,16 @@ isert_free_np(struct iscsi_np *np)
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
- isert_connect_release(isert_conn);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
mutex_unlock(&isert_np->mutex);
+ list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
+ list_del_init(&isert_conn->node);
+ isert_connect_release(isert_conn);
+ }
+
np->np_context = NULL;
kfree(isert_np);
}
@@ -3335,8 +2485,6 @@ static void isert_release_work(struct work_struct *work)
isert_info("Starting release conn %p\n", isert_conn);
- wait_for_completion(&isert_conn->wait);
-
mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->mutex);
@@ -3348,7 +2496,7 @@ static void isert_release_work(struct work_struct *work)
static void
isert_wait4logout(struct isert_conn *isert_conn)
{
- struct iscsi_conn *conn = isert_conn->conn;
+ struct iscsit_conn *conn = isert_conn->conn;
isert_info("conn %p\n", isert_conn);
@@ -3360,34 +2508,16 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
static void
-isert_wait4cmds(struct iscsi_conn *conn)
+isert_wait4cmds(struct iscsit_conn *conn)
{
- isert_info("iscsi_conn %p\n", conn);
+ isert_info("iscsit_conn %p\n", conn);
if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
+ target_stop_cmd_counter(conn->cmd_cnt);
+ target_wait_for_cmds(conn->cmd_cnt);
}
}
-static void
-isert_wait4flush(struct isert_conn *isert_conn)
-{
- struct ib_recv_wr *bad_wr;
-
- isert_info("conn %p\n", isert_conn);
-
- init_completion(&isert_conn->wait_comp_err);
- isert_conn->beacon.wr_id = ISER_BEACON_WRID;
- /* post an indication that all flush errors were consumed */
- if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
- isert_err("conn %p failed to post beacon", isert_conn);
- return;
- }
-
- wait_for_completion(&isert_conn->wait_comp_err);
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -3398,9 +2528,9 @@ isert_wait4flush(struct isert_conn *isert_conn)
* before blocking on the target_wait_for_session_cmds
*/
static void
-isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
+isert_put_unsol_pending_cmds(struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd, *tmp;
+ struct iscsit_cmd *cmd, *tmp;
static LIST_HEAD(drop_cmd_list);
spin_lock_bh(&conn->cmd_lock);
@@ -3423,25 +2553,17 @@ isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
}
}
-static void isert_wait_conn(struct iscsi_conn *conn)
+static void isert_wait_conn(struct iscsit_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
isert_info("Starting conn %p\n", isert_conn);
mutex_lock(&isert_conn->mutex);
- /*
- * Only wait for wait_comp_err if the isert_conn made it
- * into full feature phase..
- */
- if (isert_conn->state == ISER_CONN_INIT) {
- mutex_unlock(&isert_conn->mutex);
- return;
- }
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->mutex);
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
@@ -3449,17 +2571,27 @@ static void isert_wait_conn(struct iscsi_conn *conn)
queue_work(isert_release_wq, &isert_conn->release_work);
}
-static void isert_free_conn(struct iscsi_conn *conn)
+static void isert_free_conn(struct iscsit_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_conn(isert_conn);
}
+static void isert_get_rx_pdu(struct iscsit_conn *conn)
+{
+ struct completion comp;
+
+ init_completion(&comp);
+
+ wait_for_completion_interruptible(&comp);
+}
+
static struct iscsit_transport iser_target_transport = {
.name = "IB/iSER",
.transport_type = ISCSI_INFINIBAND,
+ .rdma_shutdown = true,
.priv_size = sizeof(struct isert_cmd),
.owner = THIS_MODULE,
.iscsit_setup_np = isert_setup_np,
@@ -3475,26 +2607,29 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_queue_data_in = isert_put_datain,
.iscsit_queue_status = isert_put_response,
.iscsit_aborted_task = isert_aborted_task,
+ .iscsit_get_rx_pdu = isert_get_rx_pdu,
.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
};
static int __init isert_init(void)
{
- int ret;
+ isert_login_wq = alloc_workqueue("isert_login_wq", 0, 0);
+ if (!isert_login_wq) {
+ isert_err("Unable to allocate isert_login_wq\n");
+ return -ENOMEM;
+ }
isert_comp_wq = alloc_workqueue("isert_comp_wq",
WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!isert_comp_wq) {
isert_err("Unable to allocate isert_comp_wq\n");
- ret = -ENOMEM;
- return -ENOMEM;
+ goto destroy_login_wq;
}
isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
if (!isert_release_wq) {
isert_err("Unable to allocate isert_release_wq\n");
- ret = -ENOMEM;
goto destroy_comp_wq;
}
@@ -3505,21 +2640,23 @@ static int __init isert_init(void)
destroy_comp_wq:
destroy_workqueue(isert_comp_wq);
+destroy_login_wq:
+ destroy_workqueue(isert_login_wq);
- return ret;
+ return -ENOMEM;
}
static void __exit isert_exit(void)
{
- flush_scheduled_work();
+ flush_workqueue(isert_login_wq);
destroy_workqueue(isert_release_wq);
destroy_workqueue(isert_comp_wq);
iscsit_unregister_transport(&iser_target_transport);
isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
+ destroy_workqueue(isert_login_wq);
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
-MODULE_VERSION("1.0");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c5b99bcecbcf..0b2dfd6e7e27 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -1,8 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+#include <scsi/iser.h>
+
#define DRV_NAME "isert"
#define PFX DRV_NAME ": "
@@ -31,177 +35,164 @@
#define isert_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define ISCSI_ISER_SG_TABLESIZE 256
-#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
-#define ISER_BEACON_WRID 0xfffffffffffffffeULL
+/* Constant PDU lengths calculations */
+#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \
+ sizeof(struct iscsi_hdr))
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
+
+/* QP settings */
+/* Maximal bounds on received asynchronous PDUs */
+#define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
+
+#define ISERT_MAX_RX_MISC_PDUS 6 /*
+ * NOOP_OUT(2), TEXT(1),
+ * SCSI_TMFUNC(2), LOGOUT(1)
+ */
+
+#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */
+
+#define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
+
+#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
+
+#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
+ ISERT_MAX_TX_MISC_PDUS + \
+ ISERT_MAX_RX_MISC_PDUS)
+
+/*
+ * RX size is default of 8k plus headers, but data needs to align to
+ * 512 boundary, so use 1024 to have the extra space for alignment.
+ */
+#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
+
+/* Minimum I/O size is 512KB */
+#define ISCSI_ISER_MIN_SG_TABLESIZE 128
+
+/* Maximum support is 16MB I/O size */
+#define ISCSI_ISER_MAX_SG_TABLESIZE 4096
enum isert_desc_type {
ISCSI_TX_CONTROL,
ISCSI_TX_DATAIN
};
-enum iser_ib_op_code {
- ISER_IB_RECV,
- ISER_IB_SEND,
- ISER_IB_RDMA_WRITE,
- ISER_IB_RDMA_READ,
-};
-
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
};
struct iser_rx_desc {
- struct iser_hdr iser_header;
- struct iscsi_hdr iscsi_header;
- char data[ISER_RECV_DATA_SEG_LEN];
+ char buf[ISER_RX_SIZE];
u64 dma_addr;
struct ib_sge rx_sg;
- char pad[ISER_RX_PAD_SIZE];
-} __packed;
+ struct ib_cqe rx_cqe;
+ bool in_use;
+};
+
+static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_rx_desc, rx_cqe);
+}
+
+static void *isert_get_iser_hdr(struct iser_rx_desc *desc)
+{
+ return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN;
+}
+
+static size_t isert_get_hdr_offset(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) - (void *)desc->buf;
+}
+
+static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl);
+}
+
+static void *isert_get_data(struct iser_rx_desc *desc)
+{
+ void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN;
+
+ WARN_ON((uintptr_t)data & 511);
+ return data;
+}
struct iser_tx_desc {
- struct iser_hdr iser_header;
+ struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
enum isert_desc_type type;
u64 dma_addr;
struct ib_sge tx_sg[2];
+ struct ib_cqe tx_cqe;
int num_sge;
- struct isert_cmd *isert_cmd;
struct ib_send_wr send_wr;
} __packed;
-enum isert_indicator {
- ISERT_PROTECTED = 1 << 0,
- ISERT_DATA_KEY_VALID = 1 << 1,
- ISERT_PROT_KEY_VALID = 1 << 2,
- ISERT_SIG_KEY_VALID = 1 << 3,
-};
-
-struct pi_context {
- struct ib_mr *prot_mr;
- struct ib_fast_reg_page_list *prot_frpl;
- struct ib_mr *sig_mr;
-};
-
-struct fast_reg_descriptor {
- struct list_head list;
- struct ib_mr *data_mr;
- struct ib_fast_reg_page_list *data_frpl;
- u8 ind;
- struct pi_context *pi_ctx;
-};
-
-struct isert_data_buf {
- struct scatterlist *sg;
- int nents;
- u32 sg_off;
- u32 len; /* cur_rdma_length */
- u32 offset;
- unsigned int dma_nents;
- enum dma_data_direction dma_dir;
-};
-
-enum {
- DATA = 0,
- PROT = 1,
- SIG = 2,
-};
-
-struct isert_rdma_wr {
- struct isert_cmd *isert_cmd;
- enum iser_ib_op_code iser_ib_op;
- struct ib_sge *ib_sge;
- struct ib_sge s_ib_sge;
- int send_wr_num;
- struct ib_send_wr *send_wr;
- struct ib_send_wr s_send_wr;
- struct ib_sge ib_sg[3];
- struct isert_data_buf data;
- struct isert_data_buf prot;
- struct fast_reg_descriptor *fr_desc;
-};
+static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_tx_desc, tx_cqe);
+}
struct isert_cmd {
uint32_t read_stag;
uint32_t write_stag;
uint64_t read_va;
uint64_t write_va;
+ uint32_t inv_rkey;
u64 pdu_buf_dma;
u32 pdu_buf_len;
struct isert_conn *conn;
- struct iscsi_cmd *iscsi_cmd;
+ struct iscsit_cmd *iscsit_cmd;
struct iser_tx_desc tx_desc;
struct iser_rx_desc *rx_desc;
- struct isert_rdma_wr rdma_wr;
+ struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
+ bool ctx_init_done;
};
+static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
+{
+ return container_of(desc, struct isert_cmd, tx_desc);
+}
+
struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- int post_recv_buf_count;
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
- u32 max_sge;
- char *login_buf;
- char *login_req_buf;
+ struct iser_rx_desc *login_desc;
char *login_rsp_buf;
- u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS];
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
struct list_head node;
struct completion login_comp;
struct completion login_req_comp;
struct iser_tx_desc login_tx_desc;
struct rdma_cm_id *cm_id;
struct ib_qp *qp;
+ struct ib_cq *cq;
+ u32 cq_size;
struct isert_device *device;
struct mutex mutex;
- struct completion wait;
- struct completion wait_comp_err;
struct kref kref;
- struct list_head fr_pool;
- int fr_pool_size;
- /* lock to protect fastreg pool */
- spinlock_t pool_lock;
struct work_struct release_work;
- struct ib_recv_wr beacon;
bool logout_posted;
-};
-
-#define ISERT_MAX_CQ 64
-
-/**
- * struct isert_comp - iSER completion context
- *
- * @device: pointer to device handle
- * @cq: completion queue
- * @wcs: work completion array
- * @active_qps: Number of active QPs attached
- * to completion context
- * @work: completion work handle
- */
-struct isert_comp {
- struct isert_device *device;
- struct ib_cq *cq;
- struct ib_wc wcs[16];
- int active_qps;
- struct work_struct work;
+ bool snd_w_inv;
+ wait_queue_head_t rem_wait;
+ bool dev_removed;
};
struct isert_device {
- int use_fastreg;
bool pi_capable;
int refcount;
struct ib_device *ib_device;
@@ -209,12 +200,6 @@ struct isert_device {
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
- struct ib_device_attr dev_attr;
- int (*reg_rdma_mem)(struct iscsi_conn *conn,
- struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
- void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
- struct isert_conn *isert_conn);
};
struct isert_np {
diff --git a/drivers/infiniband/ulp/isert/isert_proto.h b/drivers/infiniband/ulp/isert/isert_proto.h
deleted file mode 100644
index 4dccd313b777..000000000000
--- a/drivers/infiniband/ulp/isert/isert_proto.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* From iscsi_iser.h */
-
-struct iser_hdr {
- u8 flags;
- u8 rsvd[3];
- __be32 write_stag; /* write rkey */
- __be64 write_va;
- __be32 read_stag; /* read rkey */
- __be64 read_va;
-} __packed;
-
-/*Constant PDU lengths calculations */
-#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
-
-#define ISER_RECV_DATA_SEG_LEN 8192
-#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
-#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
-
-/* QP settings */
-/* Maximal bounds on received asynchronous PDUs */
-#define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
-
-#define ISERT_MAX_RX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
- * SCSI_TMFUNC(2), LOGOUT(1) */
-
-#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */
-
-#define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
-
-#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
-
-#define ISERT_INFLIGHT_DATAOUTS 8
-
-#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
- (1 + ISERT_INFLIGHT_DATAOUTS) + \
- ISERT_MAX_TX_MISC_PDUS + \
- ISERT_MAX_RX_MISC_PDUS)
-
-#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \
- (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
-
-#define ISER_VER 0x10
-#define ISER_WSV 0x08
-#define ISER_RSV 0x04
-#define ISCSI_CTRL 0x10
-#define ISER_HELLO 0x20
-#define ISER_HELLORPLY 0x30
diff --git a/drivers/infiniband/ulp/opa_vnic/Kconfig b/drivers/infiniband/ulp/opa_vnic/Kconfig
new file mode 100644
index 000000000000..4d43d055fa8e
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_OPA_VNIC
+ tristate "Cornelis OPX VNIC support"
+ depends on X86_64 && INFINIBAND
+ help
+ This is Omni-Path Express (OPX) Virtual Network Interface Controller (VNIC)
+ driver for Ethernet over Omni-Path feature. It implements the HW
+ independent VNIC functionality. It interfaces with Linux stack for
+ data path and IB MAD for the control path.
diff --git a/drivers/infiniband/ulp/opa_vnic/Makefile b/drivers/infiniband/ulp/opa_vnic/Makefile
new file mode 100644
index 000000000000..196183817cdc
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile - Cornelis Omni-Path Express Virtual Network Controller driver
+# Copyright(c) 2017, Intel Corporation.
+# Copyright(c) 2021, Cornelis Networks.
+#
+obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic.o
+
+opa_vnic-y := opa_vnic_netdev.o opa_vnic_encap.o opa_vnic_ethtool.o \
+ opa_vnic_vema.o opa_vnic_vema_iface.o
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
new file mode 100644
index 000000000000..31cd361416ac
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC encapsulation/decapsulation function.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+
+#include "opa_vnic_internal.h"
+
+/* OPA 16B Header fields */
+#define OPA_16B_LID_MASK 0xFFFFFull
+#define OPA_16B_SLID_HIGH_SHFT 8
+#define OPA_16B_SLID_MASK 0xF00ull
+#define OPA_16B_DLID_MASK 0xF000ull
+#define OPA_16B_DLID_HIGH_SHFT 12
+#define OPA_16B_LEN_SHFT 20
+#define OPA_16B_SC_SHFT 20
+#define OPA_16B_RC_SHFT 25
+#define OPA_16B_PKEY_SHFT 16
+
+#define OPA_VNIC_L4_HDR_SHFT 16
+
+/* L2+L4 hdr len is 20 bytes (5 quad words) */
+#define OPA_VNIC_HDR_QW_LEN 5
+
+static inline void opa_vnic_make_header(u8 *hdr, u32 slid, u32 dlid, u16 len,
+ u16 pkey, u16 entropy, u8 sc, u8 rc,
+ u8 l4_type, u16 l4_hdr)
+{
+ /* h[1]: LT=1, 16B L2=10 */
+ u32 h[OPA_VNIC_HDR_QW_LEN] = {0, 0xc0000000, 0, 0, 0};
+
+ h[2] = l4_type;
+ h[3] = entropy;
+ h[4] = l4_hdr << OPA_VNIC_L4_HDR_SHFT;
+
+ /* Extract and set 4 upper bits and 20 lower bits of the lids */
+ h[0] |= (slid & OPA_16B_LID_MASK);
+ h[2] |= ((slid >> (20 - OPA_16B_SLID_HIGH_SHFT)) & OPA_16B_SLID_MASK);
+
+ h[1] |= (dlid & OPA_16B_LID_MASK);
+ h[2] |= ((dlid >> (20 - OPA_16B_DLID_HIGH_SHFT)) & OPA_16B_DLID_MASK);
+
+ h[0] |= (len << OPA_16B_LEN_SHFT);
+ h[1] |= (rc << OPA_16B_RC_SHFT);
+ h[1] |= (sc << OPA_16B_SC_SHFT);
+ h[2] |= ((u32)pkey << OPA_16B_PKEY_SHFT);
+
+ memcpy(hdr, h, OPA_VNIC_HDR_LEN);
+}
+
+/*
+ * Using a simple hash table for mac table implementation with the last octet
+ * of mac address as a key.
+ */
+static void opa_vnic_free_mac_tbl(struct hlist_head *mactbl)
+{
+ struct opa_vnic_mac_tbl_node *node;
+ struct hlist_node *tmp;
+ int bkt;
+
+ if (!mactbl)
+ return;
+
+ vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) {
+ hash_del(&node->hlist);
+ kfree(node);
+ }
+ kfree(mactbl);
+}
+
+static struct hlist_head *opa_vnic_alloc_mac_tbl(void)
+{
+ u32 size = sizeof(struct hlist_head) * OPA_VNIC_MAC_TBL_SIZE;
+ struct hlist_head *mactbl;
+
+ mactbl = kzalloc(size, GFP_KERNEL);
+ if (!mactbl)
+ return ERR_PTR(-ENOMEM);
+
+ vnic_hash_init(mactbl);
+ return mactbl;
+}
+
+/* opa_vnic_release_mac_tbl - empty and free the mac table */
+void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter)
+{
+ struct hlist_head *mactbl;
+
+ mutex_lock(&adapter->mactbl_lock);
+ mactbl = rcu_access_pointer(adapter->mactbl);
+ rcu_assign_pointer(adapter->mactbl, NULL);
+ synchronize_rcu();
+ opa_vnic_free_mac_tbl(mactbl);
+ adapter->info.vport.mac_tbl_digest = 0;
+ mutex_unlock(&adapter->mactbl_lock);
+}
+
+/*
+ * opa_vnic_query_mac_tbl - query the mac table for a section
+ *
+ * This function implements query of specific function of the mac table.
+ * The function also expects the requested range to be valid.
+ */
+void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl)
+{
+ struct opa_vnic_mac_tbl_node *node;
+ struct hlist_head *mactbl;
+ int bkt;
+ u16 loffset, lnum_entries;
+
+ rcu_read_lock();
+ mactbl = rcu_dereference(adapter->mactbl);
+ if (!mactbl)
+ goto get_mac_done;
+
+ loffset = be16_to_cpu(tbl->offset);
+ lnum_entries = be16_to_cpu(tbl->num_entries);
+
+ vnic_hash_for_each(mactbl, bkt, node, hlist) {
+ struct __opa_vnic_mactable_entry *nentry = &node->entry;
+ struct opa_veswport_mactable_entry *entry;
+
+ if ((node->index < loffset) ||
+ (node->index >= (loffset + lnum_entries)))
+ continue;
+
+ /* populate entry in the tbl corresponding to the index */
+ entry = &tbl->tbl_entries[node->index - loffset];
+ memcpy(entry->mac_addr, nentry->mac_addr,
+ ARRAY_SIZE(entry->mac_addr));
+ memcpy(entry->mac_addr_mask, nentry->mac_addr_mask,
+ ARRAY_SIZE(entry->mac_addr_mask));
+ entry->dlid_sd = cpu_to_be32(nentry->dlid_sd);
+ }
+ tbl->mac_tbl_digest = cpu_to_be32(adapter->info.vport.mac_tbl_digest);
+get_mac_done:
+ rcu_read_unlock();
+}
+
+/*
+ * opa_vnic_update_mac_tbl - update mac table section
+ *
+ * This function updates the specified section of the mac table.
+ * The procedure includes following steps.
+ * - Allocate a new mac (hash) table.
+ * - Add the specified entries to the new table.
+ * (except the ones that are requested to be deleted).
+ * - Add all the other entries from the old mac table.
+ * - If there is a failure, free the new table and return.
+ * - Switch to the new table.
+ * - Free the old table and return.
+ *
+ * The function also expects the requested range to be valid.
+ */
+int opa_vnic_update_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl)
+{
+ struct opa_vnic_mac_tbl_node *node, *new_node;
+ struct hlist_head *new_mactbl, *old_mactbl;
+ int i, bkt, rc = 0;
+ u8 key;
+ u16 loffset, lnum_entries;
+
+ mutex_lock(&adapter->mactbl_lock);
+ /* allocate new mac table */
+ new_mactbl = opa_vnic_alloc_mac_tbl();
+ if (IS_ERR(new_mactbl)) {
+ mutex_unlock(&adapter->mactbl_lock);
+ return PTR_ERR(new_mactbl);
+ }
+
+ loffset = be16_to_cpu(tbl->offset);
+ lnum_entries = be16_to_cpu(tbl->num_entries);
+
+ /* add updated entries to the new mac table */
+ for (i = 0; i < lnum_entries; i++) {
+ struct __opa_vnic_mactable_entry *nentry;
+ struct opa_veswport_mactable_entry *entry =
+ &tbl->tbl_entries[i];
+ u8 *mac_addr = entry->mac_addr;
+ u8 empty_mac[ETH_ALEN] = { 0 };
+
+ v_dbg("new mac entry %4d: %02x:%02x:%02x:%02x:%02x:%02x %x\n",
+ loffset + i, mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5],
+ entry->dlid_sd);
+
+ /* if the entry is being removed, do not add it */
+ if (!memcmp(mac_addr, empty_mac, ARRAY_SIZE(empty_mac)))
+ continue;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ rc = -ENOMEM;
+ goto updt_done;
+ }
+
+ node->index = loffset + i;
+ nentry = &node->entry;
+ memcpy(nentry->mac_addr, entry->mac_addr,
+ ARRAY_SIZE(nentry->mac_addr));
+ memcpy(nentry->mac_addr_mask, entry->mac_addr_mask,
+ ARRAY_SIZE(nentry->mac_addr_mask));
+ nentry->dlid_sd = be32_to_cpu(entry->dlid_sd);
+ key = node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX];
+ vnic_hash_add(new_mactbl, &node->hlist, key);
+ }
+
+ /* add other entries from current mac table to new mac table */
+ old_mactbl = rcu_access_pointer(adapter->mactbl);
+ if (!old_mactbl)
+ goto switch_tbl;
+
+ vnic_hash_for_each(old_mactbl, bkt, node, hlist) {
+ if ((node->index >= loffset) &&
+ (node->index < (loffset + lnum_entries)))
+ continue;
+
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node) {
+ rc = -ENOMEM;
+ goto updt_done;
+ }
+
+ new_node->index = node->index;
+ memcpy(&new_node->entry, &node->entry, sizeof(node->entry));
+ key = new_node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX];
+ vnic_hash_add(new_mactbl, &new_node->hlist, key);
+ }
+
+switch_tbl:
+ /* switch to new table */
+ rcu_assign_pointer(adapter->mactbl, new_mactbl);
+ synchronize_rcu();
+
+ adapter->info.vport.mac_tbl_digest = be32_to_cpu(tbl->mac_tbl_digest);
+updt_done:
+ /* upon failure, free the new table; otherwise, free the old table */
+ if (rc)
+ opa_vnic_free_mac_tbl(new_mactbl);
+ else
+ opa_vnic_free_mac_tbl(old_mactbl);
+
+ mutex_unlock(&adapter->mactbl_lock);
+ return rc;
+}
+
+/* opa_vnic_chk_mac_tbl - check mac table for dlid */
+static uint32_t opa_vnic_chk_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct ethhdr *mac_hdr)
+{
+ struct opa_vnic_mac_tbl_node *node;
+ struct hlist_head *mactbl;
+ u32 dlid = 0;
+ u8 key;
+
+ rcu_read_lock();
+ mactbl = rcu_dereference(adapter->mactbl);
+ if (unlikely(!mactbl))
+ goto chk_done;
+
+ key = mac_hdr->h_dest[OPA_VNIC_MAC_HASH_IDX];
+ vnic_hash_for_each_possible(mactbl, node, hlist, key) {
+ struct __opa_vnic_mactable_entry *entry = &node->entry;
+
+ /* if related to source mac, skip */
+ if (unlikely(OPA_VNIC_DLID_SD_IS_SRC_MAC(entry->dlid_sd)))
+ continue;
+
+ if (!memcmp(node->entry.mac_addr, mac_hdr->h_dest,
+ ARRAY_SIZE(node->entry.mac_addr))) {
+ /* mac address found */
+ dlid = OPA_VNIC_DLID_SD_GET_DLID(node->entry.dlid_sd);
+ break;
+ }
+ }
+
+chk_done:
+ rcu_read_unlock();
+ return dlid;
+}
+
+/* opa_vnic_get_dlid - find and return the DLID */
+static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
+ struct sk_buff *skb, u8 def_port)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ u32 dlid;
+
+ dlid = opa_vnic_chk_mac_tbl(adapter, mac_hdr);
+ if (dlid)
+ return dlid;
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest)) {
+ dlid = info->vesw.u_mcast_dlid;
+ } else {
+ if (is_local_ether_addr(mac_hdr->h_dest)) {
+ dlid = ((uint32_t)mac_hdr->h_dest[5] << 16) |
+ ((uint32_t)mac_hdr->h_dest[4] << 8) |
+ mac_hdr->h_dest[3];
+ if (unlikely(!dlid))
+ v_warn("Null dlid in MAC address\n");
+ } else if (def_port != OPA_VNIC_INVALID_PORT) {
+ if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
+ dlid = info->vesw.u_ucast_dlid[def_port];
+ }
+ }
+
+ return dlid;
+}
+
+/* opa_vnic_get_sc - return the service class */
+static u8 opa_vnic_get_sc(struct __opa_veswport_info *info,
+ struct sk_buff *skb)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ u16 vlan_tci;
+ u8 sc;
+
+ if (!__vlan_get_tag(skb, &vlan_tci)) {
+ u8 pcp = OPA_VNIC_VLAN_PCP(vlan_tci);
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ sc = info->vport.pcp_to_sc_mc[pcp];
+ else
+ sc = info->vport.pcp_to_sc_uc[pcp];
+ } else {
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ sc = info->vport.non_vlan_sc_mc;
+ else
+ sc = info->vport.non_vlan_sc_uc;
+ }
+
+ return sc;
+}
+
+u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ struct __opa_veswport_info *info = &adapter->info;
+ u8 vl;
+
+ if (skb_vlan_tag_present(skb)) {
+ u8 pcp = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ vl = info->vport.pcp_to_vl_mc[pcp];
+ else
+ vl = info->vport.pcp_to_vl_uc[pcp];
+ } else {
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ vl = info->vport.non_vlan_vl_mc;
+ else
+ vl = info->vport.non_vlan_vl_uc;
+ }
+
+ return vl;
+}
+
+/* opa_vnic_get_rc - return the routing control */
+static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
+ struct sk_buff *skb)
+{
+ u8 proto, rout_ctrl;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IPV6):
+ proto = ipv6_hdr(skb)->nexthdr;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV6);
+ break;
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV4);
+ break;
+ default:
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, DEFAULT);
+ }
+
+ return rout_ctrl;
+}
+
+/* opa_vnic_calc_entropy - calculate the packet entropy */
+u8 opa_vnic_calc_entropy(struct sk_buff *skb)
+{
+ u32 hash = skb_get_hash(skb);
+
+ /* store XOR of all bytes in lower 8 bits */
+ hash ^= hash >> 8;
+ hash ^= hash >> 16;
+
+ /* return lower 8 bits as entropy */
+ return (u8)(hash & 0xFF);
+}
+
+/* opa_vnic_get_def_port - get default port based on entropy */
+static inline u8 opa_vnic_get_def_port(struct opa_vnic_adapter *adapter,
+ u8 entropy)
+{
+ u8 flow_id;
+
+ /* Add the upper and lower 4-bits of entropy to get the flow id */
+ flow_id = ((entropy & 0xf) + (entropy >> 4));
+ return adapter->flow_tbl[flow_id & (OPA_VNIC_FLOW_TBL_SIZE - 1)];
+}
+
+/* Calculate packet length including OPA header, crc and padding */
+static inline int opa_vnic_wire_length(struct sk_buff *skb)
+{
+ u32 pad_len;
+
+ /* padding for 8 bytes size alignment */
+ pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7;
+ pad_len += OPA_VNIC_ICRC_TAIL_LEN;
+
+ return (skb->len + pad_len) >> 3;
+}
+
+/* opa_vnic_encap_skb - encapsulate skb packet with OPA header and meta data */
+void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct opa_vnic_skb_mdata *mdata;
+ u8 def_port, sc, rc, entropy, *hdr;
+ u16 len, l4_hdr;
+ u32 dlid;
+
+ hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
+
+ entropy = opa_vnic_calc_entropy(skb);
+ def_port = opa_vnic_get_def_port(adapter, entropy);
+ len = opa_vnic_wire_length(skb);
+ dlid = opa_vnic_get_dlid(adapter, skb, def_port);
+ sc = opa_vnic_get_sc(info, skb);
+ rc = opa_vnic_get_rc(info, skb);
+ l4_hdr = info->vesw.vesw_id;
+
+ mdata = skb_push(skb, sizeof(*mdata));
+ mdata->vl = opa_vnic_get_vl(adapter, skb);
+ mdata->entropy = entropy;
+ mdata->flags = 0;
+ if (unlikely(!dlid)) {
+ mdata->flags = OPA_VNIC_SKB_MDATA_ENCAP_ERR;
+ return;
+ }
+
+ opa_vnic_make_header(hdr, info->vport.encap_slid, dlid, len,
+ info->vesw.pkey, entropy, sc, rc,
+ OPA_VNIC_L4_ETHR, l4_hdr);
+}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
new file mode 100644
index 000000000000..012fc27c5c93
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -0,0 +1,524 @@
+#ifndef _OPA_VNIC_ENCAP_H
+#define _OPA_VNIC_ENCAP_H
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains all OPA VNIC declaration required for encapsulation
+ * and decapsulation of Ethernet packets
+ */
+
+#include <linux/types.h>
+#include <rdma/ib_mad.h>
+
+/* EMA class version */
+#define OPA_EMA_CLASS_VERSION 0x80
+
+/*
+ * Define the Intel vendor management class for OPA
+ * ETHERNET MANAGEMENT
+ */
+#define OPA_MGMT_CLASS_INTEL_EMA 0x34
+
+/* EM attribute IDs */
+#define OPA_EM_ATTR_CLASS_PORT_INFO 0x0001
+#define OPA_EM_ATTR_VESWPORT_INFO 0x0011
+#define OPA_EM_ATTR_VESWPORT_MAC_ENTRIES 0x0012
+#define OPA_EM_ATTR_IFACE_UCAST_MACS 0x0013
+#define OPA_EM_ATTR_IFACE_MCAST_MACS 0x0014
+#define OPA_EM_ATTR_DELETE_VESW 0x0015
+#define OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS 0x0020
+#define OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS 0x0022
+
+/* VNIC configured and operational state values */
+#define OPA_VNIC_STATE_DROP_ALL 0x1
+#define OPA_VNIC_STATE_FORWARDING 0x3
+
+#define OPA_VESW_MAX_NUM_DEF_PORT 16
+#define OPA_VNIC_MAX_NUM_PCP 8
+
+#define OPA_VNIC_EMA_DATA (OPA_MGMT_MAD_SIZE - IB_MGMT_VENDOR_HDR)
+
+/* Defines for vendor specific notice(trap) attributes */
+#define OPA_INTEL_EMA_NOTICE_TYPE_INFO 0x04
+
+/* INTEL OUI */
+#define INTEL_OUI_1 0x00
+#define INTEL_OUI_2 0x06
+#define INTEL_OUI_3 0x6a
+
+/* Trap opcodes sent from VNIC */
+#define OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE 0x1
+#define OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE 0x2
+#define OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE 0x3
+
+#define OPA_VNIC_DLID_SD_IS_SRC_MAC(dlid_sd) (!!((dlid_sd) & 0x20))
+#define OPA_VNIC_DLID_SD_GET_DLID(dlid_sd) ((dlid_sd) >> 8)
+
+/* VNIC Ethernet link status */
+#define OPA_VNIC_ETH_LINK_UP 1
+#define OPA_VNIC_ETH_LINK_DOWN 2
+
+/* routing control */
+#define OPA_VNIC_ENCAP_RC_DEFAULT 0
+#define OPA_VNIC_ENCAP_RC_IPV4 4
+#define OPA_VNIC_ENCAP_RC_IPV4_UDP 8
+#define OPA_VNIC_ENCAP_RC_IPV4_TCP 12
+#define OPA_VNIC_ENCAP_RC_IPV6 16
+#define OPA_VNIC_ENCAP_RC_IPV6_TCP 20
+#define OPA_VNIC_ENCAP_RC_IPV6_UDP 24
+
+#define OPA_VNIC_ENCAP_RC_EXT(w, b) (((w) >> OPA_VNIC_ENCAP_RC_ ## b) & 0x7)
+
+/**
+ * struct opa_vesw_info - OPA vnic switch information
+ * @fabric_id: 10-bit fabric id
+ * @vesw_id: 12-bit virtual ethernet switch id
+ * @rsvd0: reserved bytes
+ * @def_port_mask: bitmask of default ports
+ * @rsvd1: reserved bytes
+ * @pkey: partition key
+ * @rsvd2: reserved bytes
+ * @u_mcast_dlid: unknown multicast dlid
+ * @u_ucast_dlid: array of unknown unicast dlids
+ * @rsvd3: reserved bytes
+ * @rc: routing control
+ * @eth_mtu: Ethernet MTU
+ * @rsvd4: reserved bytes
+ */
+struct opa_vesw_info {
+ __be16 fabric_id;
+ __be16 vesw_id;
+
+ u8 rsvd0[6];
+ __be16 def_port_mask;
+
+ u8 rsvd1[2];
+ __be16 pkey;
+
+ u8 rsvd2[4];
+ __be32 u_mcast_dlid;
+ __be32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
+
+ __be32 rc;
+
+ u8 rsvd3[56];
+ __be16 eth_mtu;
+ u8 rsvd4[2];
+} __packed;
+
+/**
+ * struct opa_per_veswport_info - OPA vnic per port information
+ * @port_num: port number
+ * @eth_link_status: current ethernet link state
+ * @rsvd0: reserved bytes
+ * @base_mac_addr: base mac address
+ * @config_state: configured port state
+ * @oper_state: operational port state
+ * @max_mac_tbl_ent: max number of mac table entries
+ * @max_smac_ent: max smac entries in mac table
+ * @mac_tbl_digest: mac table digest
+ * @rsvd1: reserved bytes
+ * @encap_slid: base slid for the port
+ * @pcp_to_sc_uc: sc by pcp index for unicast ethernet packets
+ * @pcp_to_vl_uc: vl by pcp index for unicast ethernet packets
+ * @pcp_to_sc_mc: sc by pcp index for multicast ethernet packets
+ * @pcp_to_vl_mc: vl by pcp index for multicast ethernet packets
+ * @non_vlan_sc_uc: sc for non-vlan unicast ethernet packets
+ * @non_vlan_vl_uc: vl for non-vlan unicast ethernet packets
+ * @non_vlan_sc_mc: sc for non-vlan multicast ethernet packets
+ * @non_vlan_vl_mc: vl for non-vlan multicast ethernet packets
+ * @rsvd2: reserved bytes
+ * @uc_macs_gen_count: generation count for unicast macs list
+ * @mc_macs_gen_count: generation count for multicast macs list
+ * @rsvd3: reserved bytes
+ */
+struct opa_per_veswport_info {
+ __be32 port_num;
+
+ u8 eth_link_status;
+ u8 rsvd0[3];
+
+ u8 base_mac_addr[ETH_ALEN];
+ u8 config_state;
+ u8 oper_state;
+
+ __be16 max_mac_tbl_ent;
+ __be16 max_smac_ent;
+ __be32 mac_tbl_digest;
+ u8 rsvd1[4];
+
+ __be32 encap_slid;
+
+ u8 pcp_to_sc_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_sc_mc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_mc[OPA_VNIC_MAX_NUM_PCP];
+
+ u8 non_vlan_sc_uc;
+ u8 non_vlan_vl_uc;
+ u8 non_vlan_sc_mc;
+ u8 non_vlan_vl_mc;
+
+ u8 rsvd2[48];
+
+ __be16 uc_macs_gen_count;
+ __be16 mc_macs_gen_count;
+
+ u8 rsvd3[8];
+} __packed;
+
+/**
+ * struct opa_veswport_info - OPA vnic port information
+ * @vesw: OPA vnic switch information
+ * @vport: OPA vnic per port information
+ *
+ * On host, each of the virtual ethernet ports belongs
+ * to a different virtual ethernet switches.
+ */
+struct opa_veswport_info {
+ struct opa_vesw_info vesw;
+ struct opa_per_veswport_info vport;
+};
+
+/**
+ * struct opa_veswport_mactable_entry - single entry in the forwarding table
+ * @mac_addr: MAC address
+ * @mac_addr_mask: MAC address bit mask
+ * @dlid_sd: Matching DLID and side data
+ *
+ * On the host each virtual ethernet port will have
+ * a forwarding table. These tables are used to
+ * map a MAC to a LID and other data. For more
+ * details see struct opa_veswport_mactable_entries.
+ * This is the structure of a single mactable entry
+ */
+struct opa_veswport_mactable_entry {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+ __be32 dlid_sd;
+} __packed;
+
+/**
+ * struct opa_veswport_mactable - Forwarding table array
+ * @offset: mac table starting offset
+ * @num_entries: Number of entries to get or set
+ * @mac_tbl_digest: mac table digest
+ * @tbl_entries: Array of table entries
+ *
+ * The EM sends down this structure in a MAD indicating
+ * the starting offset in the forwarding table that this
+ * entry is to be loaded into and the number of entries
+ * that that this MAD instance contains
+ * The mac_tbl_digest has been added to this MAD structure. It will be set by
+ * the EM and it will be used by the EM to check if there are any
+ * discrepancies with this value and the value
+ * maintained by the EM in the case of VNIC port being deleted or unloaded
+ * A new instantiation of a VNIC will always have a value of zero.
+ * This value is stored as part of the vnic adapter structure and will be
+ * accessed by the GET and SET routines for both the mactable entries and the
+ * veswport info.
+ */
+struct opa_veswport_mactable {
+ __be16 offset;
+ __be16 num_entries;
+ __be32 mac_tbl_digest;
+ struct opa_veswport_mactable_entry tbl_entries[];
+} __packed;
+
+/**
+ * struct opa_veswport_summary_counters - summary counters
+ * @vp_instance: vport instance on the OPA port
+ * @vesw_id: virtual ethernet switch id
+ * @veswport_num: virtual ethernet switch port number
+ * @tx_errors: transmit errors
+ * @rx_errors: receive errors
+ * @tx_packets: transmit packets
+ * @rx_packets: receive packets
+ * @tx_bytes: transmit bytes
+ * @rx_bytes: receive bytes
+ * @tx_unicast: unicast packets transmitted
+ * @tx_mcastbcast: multicast/broadcast packets transmitted
+ * @tx_untagged: non-vlan packets transmitted
+ * @tx_vlan: vlan packets transmitted
+ * @tx_64_size: transmit packet length is 64 bytes
+ * @tx_65_127: transmit packet length is >=65 and < 127 bytes
+ * @tx_128_255: transmit packet length is >=128 and < 255 bytes
+ * @tx_256_511: transmit packet length is >=256 and < 511 bytes
+ * @tx_512_1023: transmit packet length is >=512 and < 1023 bytes
+ * @tx_1024_1518: transmit packet length is >=1024 and < 1518 bytes
+ * @tx_1519_max: transmit packet length >= 1519 bytes
+ * @rx_unicast: unicast packets received
+ * @rx_mcastbcast: multicast/broadcast packets received
+ * @rx_untagged: non-vlan packets received
+ * @rx_vlan: vlan packets received
+ * @rx_64_size: received packet length is 64 bytes
+ * @rx_65_127: received packet length is >=65 and < 127 bytes
+ * @rx_128_255: received packet length is >=128 and < 255 bytes
+ * @rx_256_511: received packet length is >=256 and < 511 bytes
+ * @rx_512_1023: received packet length is >=512 and < 1023 bytes
+ * @rx_1024_1518: received packet length is >=1024 and < 1518 bytes
+ * @rx_1519_max: received packet length >= 1519 bytes
+ * @reserved: reserved bytes
+ *
+ * All the above are counters of corresponding conditions.
+ */
+struct opa_veswport_summary_counters {
+ __be16 vp_instance;
+ __be16 vesw_id;
+ __be32 veswport_num;
+
+ __be64 tx_errors;
+ __be64 rx_errors;
+ __be64 tx_packets;
+ __be64 rx_packets;
+ __be64 tx_bytes;
+ __be64 rx_bytes;
+
+ __be64 tx_unicast;
+ __be64 tx_mcastbcast;
+
+ __be64 tx_untagged;
+ __be64 tx_vlan;
+
+ __be64 tx_64_size;
+ __be64 tx_65_127;
+ __be64 tx_128_255;
+ __be64 tx_256_511;
+ __be64 tx_512_1023;
+ __be64 tx_1024_1518;
+ __be64 tx_1519_max;
+
+ __be64 rx_unicast;
+ __be64 rx_mcastbcast;
+
+ __be64 rx_untagged;
+ __be64 rx_vlan;
+
+ __be64 rx_64_size;
+ __be64 rx_65_127;
+ __be64 rx_128_255;
+ __be64 rx_256_511;
+ __be64 rx_512_1023;
+ __be64 rx_1024_1518;
+ __be64 rx_1519_max;
+
+ __be64 reserved[16];
+} __packed;
+
+/**
+ * struct opa_veswport_error_counters - error counters
+ * @vp_instance: vport instance on the OPA port
+ * @vesw_id: virtual ethernet switch id
+ * @veswport_num: virtual ethernet switch port number
+ * @tx_errors: transmit errors
+ * @rx_errors: receive errors
+ * @rsvd0: reserved bytes
+ * @tx_smac_filt: smac filter errors
+ * @rsvd1: reserved bytes
+ * @rsvd2: reserved bytes
+ * @rsvd3: reserved bytes
+ * @tx_dlid_zero: transmit packets with invalid dlid
+ * @rsvd4: reserved bytes
+ * @tx_logic: other transmit errors
+ * @rsvd5: reserved bytes
+ * @tx_drop_state: packet tansmission in non-forward port state
+ * @rx_bad_veswid: received packet with invalid vesw id
+ * @rsvd6: reserved bytes
+ * @rx_runt: received ethernet packet with length < 64 bytes
+ * @rx_oversize: received ethernet packet with length > MTU size
+ * @rsvd7: reserved bytes
+ * @rx_eth_down: received packets when interface is down
+ * @rx_drop_state: received packets in non-forwarding port state
+ * @rx_logic: other receive errors
+ * @rsvd8: reserved bytes
+ * @rsvd9: reserved bytes
+ *
+ * All the above are counters of corresponding error conditions.
+ */
+struct opa_veswport_error_counters {
+ __be16 vp_instance;
+ __be16 vesw_id;
+ __be32 veswport_num;
+
+ __be64 tx_errors;
+ __be64 rx_errors;
+
+ __be64 rsvd0;
+ __be64 tx_smac_filt;
+ __be64 rsvd1;
+ __be64 rsvd2;
+ __be64 rsvd3;
+ __be64 tx_dlid_zero;
+ __be64 rsvd4;
+ __be64 tx_logic;
+ __be64 rsvd5;
+ __be64 tx_drop_state;
+
+ __be64 rx_bad_veswid;
+ __be64 rsvd6;
+ __be64 rx_runt;
+ __be64 rx_oversize;
+ __be64 rsvd7;
+ __be64 rx_eth_down;
+ __be64 rx_drop_state;
+ __be64 rx_logic;
+ __be64 rsvd8;
+
+ __be64 rsvd9[16];
+} __packed;
+
+/**
+ * struct opa_veswport_trap - Trap message sent to EM by VNIC
+ * @fabric_id: 10 bit fabric id
+ * @veswid: 12 bit virtual ethernet switch id
+ * @veswportnum: logical port number on the Virtual switch
+ * @opaportnum: physical port num (redundant on host)
+ * @veswportindex: switch port index on opa port 0 based
+ * @opcode: operation
+ * @reserved: 32 bit for alignment
+ *
+ * The VNIC will send trap messages to the Ethernet manager to
+ * inform it about changes to the VNIC config, behaviour etc.
+ * This is the format of the trap payload.
+ */
+struct opa_veswport_trap {
+ __be16 fabric_id;
+ __be16 veswid;
+ __be32 veswportnum;
+ __be16 opaportnum;
+ u8 veswportindex;
+ u8 opcode;
+ __be32 reserved;
+} __packed;
+
+/**
+ * struct opa_vnic_iface_mac_entry - single entry in the mac list
+ * @mac_addr: MAC address
+ */
+struct opa_vnic_iface_mac_entry {
+ u8 mac_addr[ETH_ALEN];
+};
+
+/**
+ * struct opa_veswport_iface_macs - Msg to set globally administered MAC
+ * @start_idx: position of first entry (0 based)
+ * @num_macs_in_msg: number of MACs in this message
+ * @tot_macs_in_lst: The total number of MACs the agent has
+ * @gen_count: gen_count to indicate change
+ * @entry: The mac list entry
+ *
+ * Same attribute IDS and attribute modifiers as in locally administered
+ * addresses used to set globally administered addresses
+ */
+struct opa_veswport_iface_macs {
+ __be16 start_idx;
+ __be16 num_macs_in_msg;
+ __be16 tot_macs_in_lst;
+ __be16 gen_count;
+ struct opa_vnic_iface_mac_entry entry[];
+} __packed;
+
+/**
+ * struct opa_vnic_vema_mad - Generic VEMA MAD
+ * @mad_hdr: Generic MAD header
+ * @rmpp_hdr: RMPP header for vendor specific MADs
+ * @reserved: reserved bytes
+ * @oui: Unique org identifier
+ * @data: MAD data
+ */
+struct opa_vnic_vema_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ u8 reserved;
+ u8 oui[3];
+ u8 data[OPA_VNIC_EMA_DATA];
+};
+
+/**
+ * struct opa_vnic_notice_attr - Generic Notice MAD
+ * @gen_type: Generic/Specific bit and type of notice
+ * @oui_1: Vendor ID byte 1
+ * @oui_2: Vendor ID byte 2
+ * @oui_3: Vendor ID byte 3
+ * @trap_num: Trap number
+ * @toggle_count: Notice toggle bit and count value
+ * @issuer_lid: Trap issuer's lid
+ * @reserved: reserved bytes
+ * @issuer_gid: Issuer GID (only if Report method)
+ * @raw_data: Trap message body
+ */
+struct opa_vnic_notice_attr {
+ u8 gen_type;
+ u8 oui_1;
+ u8 oui_2;
+ u8 oui_3;
+ __be16 trap_num;
+ __be16 toggle_count;
+ __be32 issuer_lid;
+ __be32 reserved;
+ u8 issuer_gid[16];
+ u8 raw_data[64];
+} __packed;
+
+/**
+ * struct opa_vnic_vema_mad_trap - Generic VEMA MAD Trap
+ * @mad_hdr: Generic MAD header
+ * @rmpp_hdr: RMPP header for vendor specific MADs
+ * @reserved: reserved bytes
+ * @oui: Unique org identifier
+ * @notice: Notice structure
+ */
+struct opa_vnic_vema_mad_trap {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ u8 reserved;
+ u8 oui[3];
+ struct opa_vnic_notice_attr notice;
+};
+
+#endif /* _OPA_VNIC_ENCAP_H */
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
new file mode 100644
index 000000000000..316959940d2f
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC ethtool functions
+ */
+
+#include <linux/ethtool.h>
+
+#include "opa_vnic_internal.h"
+
+enum {NETDEV_STATS, VNIC_STATS};
+
+struct vnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ struct {
+ int sizeof_stat;
+ int stat_offset;
+ };
+};
+
+#define VNIC_STAT(m) { sizeof_field(struct opa_vnic_stats, m), \
+ offsetof(struct opa_vnic_stats, m) }
+
+static struct vnic_stats vnic_gstrings_stats[] = {
+ /* NETDEV stats */
+ {"rx_packets", VNIC_STAT(netstats.rx_packets)},
+ {"tx_packets", VNIC_STAT(netstats.tx_packets)},
+ {"rx_bytes", VNIC_STAT(netstats.rx_bytes)},
+ {"tx_bytes", VNIC_STAT(netstats.tx_bytes)},
+ {"rx_errors", VNIC_STAT(netstats.rx_errors)},
+ {"tx_errors", VNIC_STAT(netstats.tx_errors)},
+ {"rx_dropped", VNIC_STAT(netstats.rx_dropped)},
+ {"tx_dropped", VNIC_STAT(netstats.tx_dropped)},
+
+ /* SUMMARY counters */
+ {"tx_unicast", VNIC_STAT(tx_grp.unicast)},
+ {"tx_mcastbcast", VNIC_STAT(tx_grp.mcastbcast)},
+ {"tx_untagged", VNIC_STAT(tx_grp.untagged)},
+ {"tx_vlan", VNIC_STAT(tx_grp.vlan)},
+
+ {"tx_64_size", VNIC_STAT(tx_grp.s_64)},
+ {"tx_65_127", VNIC_STAT(tx_grp.s_65_127)},
+ {"tx_128_255", VNIC_STAT(tx_grp.s_128_255)},
+ {"tx_256_511", VNIC_STAT(tx_grp.s_256_511)},
+ {"tx_512_1023", VNIC_STAT(tx_grp.s_512_1023)},
+ {"tx_1024_1518", VNIC_STAT(tx_grp.s_1024_1518)},
+ {"tx_1519_max", VNIC_STAT(tx_grp.s_1519_max)},
+
+ {"rx_unicast", VNIC_STAT(rx_grp.unicast)},
+ {"rx_mcastbcast", VNIC_STAT(rx_grp.mcastbcast)},
+ {"rx_untagged", VNIC_STAT(rx_grp.untagged)},
+ {"rx_vlan", VNIC_STAT(rx_grp.vlan)},
+
+ {"rx_64_size", VNIC_STAT(rx_grp.s_64)},
+ {"rx_65_127", VNIC_STAT(rx_grp.s_65_127)},
+ {"rx_128_255", VNIC_STAT(rx_grp.s_128_255)},
+ {"rx_256_511", VNIC_STAT(rx_grp.s_256_511)},
+ {"rx_512_1023", VNIC_STAT(rx_grp.s_512_1023)},
+ {"rx_1024_1518", VNIC_STAT(rx_grp.s_1024_1518)},
+ {"rx_1519_max", VNIC_STAT(rx_grp.s_1519_max)},
+
+ /* ERROR counters */
+ {"rx_fifo_errors", VNIC_STAT(netstats.rx_fifo_errors)},
+ {"rx_length_errors", VNIC_STAT(netstats.rx_length_errors)},
+
+ {"tx_fifo_errors", VNIC_STAT(netstats.tx_fifo_errors)},
+ {"tx_carrier_errors", VNIC_STAT(netstats.tx_carrier_errors)},
+
+ {"tx_dlid_zero", VNIC_STAT(tx_dlid_zero)},
+ {"tx_drop_state", VNIC_STAT(tx_drop_state)},
+ {"rx_drop_state", VNIC_STAT(rx_drop_state)},
+ {"rx_oversize", VNIC_STAT(rx_oversize)},
+ {"rx_runt", VNIC_STAT(rx_runt)},
+};
+
+#define VNIC_STATS_LEN ARRAY_SIZE(vnic_gstrings_stats)
+
+/* vnic_get_drvinfo - get driver info */
+static void vnic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
+ sizeof(drvinfo->bus_info));
+}
+
+/* vnic_get_sset_count - get string set count */
+static int vnic_get_sset_count(struct net_device *netdev, int sset)
+{
+ return (sset == ETH_SS_STATS) ? VNIC_STATS_LEN : -EOPNOTSUPP;
+}
+
+/* vnic_get_ethtool_stats - get statistics */
+static void vnic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct opa_vnic_stats vstats;
+ int i;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+ for (i = 0; i < VNIC_STATS_LEN; i++) {
+ char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset;
+
+ data[i] = (vnic_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+/* vnic_get_strings - get strings */
+static void vnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < VNIC_STATS_LEN; i++)
+ ethtool_puts(&data, vnic_gstrings_stats[i].stat_string);
+}
+
+/* ethtool ops */
+static const struct ethtool_ops opa_vnic_ethtool_ops = {
+ .get_drvinfo = vnic_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = vnic_get_strings,
+ .get_sset_count = vnic_get_sset_count,
+ .get_ethtool_stats = vnic_get_ethtool_stats,
+};
+
+/* opa_vnic_set_ethtool_ops - set ethtool ops */
+void opa_vnic_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &opa_vnic_ethtool_ops;
+}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
new file mode 100644
index 000000000000..dd942dd642bd
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -0,0 +1,329 @@
+#ifndef _OPA_VNIC_INTERNAL_H
+#define _OPA_VNIC_INTERNAL_H
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC driver internal declarations
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/hashtable.h>
+#include <linux/sizes.h>
+#include <rdma/opa_vnic.h>
+
+#include "opa_vnic_encap.h"
+
+#define OPA_VNIC_VLAN_PCP(vlan_tci) \
+ (((vlan_tci) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
+
+/* Flow to default port redirection table size */
+#define OPA_VNIC_FLOW_TBL_SIZE 32
+
+/* Invalid port number */
+#define OPA_VNIC_INVALID_PORT 0xff
+
+struct opa_vnic_adapter;
+
+/*
+ * struct __opa_vesw_info - OPA vnic virtual switch info
+ *
+ * Same as opa_vesw_info without bitwise attribute.
+ */
+struct __opa_vesw_info {
+ u16 fabric_id;
+ u16 vesw_id;
+
+ u8 rsvd0[6];
+ u16 def_port_mask;
+
+ u8 rsvd1[2];
+ u16 pkey;
+
+ u8 rsvd2[4];
+ u32 u_mcast_dlid;
+ u32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
+
+ u32 rc;
+
+ u8 rsvd3[56];
+ u16 eth_mtu;
+ u8 rsvd4[2];
+} __packed;
+
+/*
+ * struct __opa_per_veswport_info - OPA vnic per port info
+ *
+ * Same as opa_per_veswport_info without bitwise attribute.
+ */
+struct __opa_per_veswport_info {
+ u32 port_num;
+
+ u8 eth_link_status;
+ u8 rsvd0[3];
+
+ u8 base_mac_addr[ETH_ALEN];
+ u8 config_state;
+ u8 oper_state;
+
+ u16 max_mac_tbl_ent;
+ u16 max_smac_ent;
+ u32 mac_tbl_digest;
+ u8 rsvd1[4];
+
+ u32 encap_slid;
+
+ u8 pcp_to_sc_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_sc_mc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_mc[OPA_VNIC_MAX_NUM_PCP];
+
+ u8 non_vlan_sc_uc;
+ u8 non_vlan_vl_uc;
+ u8 non_vlan_sc_mc;
+ u8 non_vlan_vl_mc;
+
+ u8 rsvd2[48];
+
+ u16 uc_macs_gen_count;
+ u16 mc_macs_gen_count;
+
+ u8 rsvd3[8];
+} __packed;
+
+/*
+ * struct __opa_veswport_info - OPA vnic port info
+ *
+ * Same as opa_veswport_info without bitwise attribute.
+ */
+struct __opa_veswport_info {
+ struct __opa_vesw_info vesw;
+ struct __opa_per_veswport_info vport;
+};
+
+/*
+ * struct __opa_veswport_trap - OPA vnic trap info
+ *
+ * Same as opa_veswport_trap without bitwise attribute.
+ */
+struct __opa_veswport_trap {
+ u16 fabric_id;
+ u16 veswid;
+ u32 veswportnum;
+ u16 opaportnum;
+ u8 veswportindex;
+ u8 opcode;
+ u32 reserved;
+} __packed;
+
+/**
+ * struct opa_vnic_ctrl_port - OPA virtual NIC control port
+ * @ibdev: pointer to ib device
+ * @ops: opa vnic control operations
+ * @num_ports: number of opa ports
+ */
+struct opa_vnic_ctrl_port {
+ struct ib_device *ibdev;
+ struct opa_vnic_ctrl_ops *ops;
+ u8 num_ports;
+};
+
+/**
+ * struct opa_vnic_adapter - OPA VNIC netdev private data structure
+ * @netdev: pointer to associated netdev
+ * @ibdev: ib device
+ * @cport: pointer to opa vnic control port
+ * @rn_ops: rdma netdev's net_device_ops
+ * @port_num: OPA port number
+ * @vport_num: vesw port number
+ * @lock: adapter lock
+ * @info: virtual ethernet switch port information
+ * @vema_mac_addr: mac address configured by vema
+ * @umac_hash: unicast maclist hash
+ * @mmac_hash: multicast maclist hash
+ * @mactbl: hash table of MAC entries
+ * @mactbl_lock: mac table lock
+ * @stats_lock: statistics lock
+ * @flow_tbl: flow to default port redirection table
+ * @trap_timeout: trap timeout
+ * @trap_count: no. of traps allowed within timeout period
+ */
+struct opa_vnic_adapter {
+ struct net_device *netdev;
+ struct ib_device *ibdev;
+ struct opa_vnic_ctrl_port *cport;
+ const struct net_device_ops *rn_ops;
+
+ u8 port_num;
+ u8 vport_num;
+
+ /* Lock used around concurrent updates to netdev */
+ struct mutex lock;
+
+ struct __opa_veswport_info info;
+ u8 vema_mac_addr[ETH_ALEN];
+ u32 umac_hash;
+ u32 mmac_hash;
+ struct hlist_head __rcu *mactbl;
+
+ /* Lock used to protect updates to mac table */
+ struct mutex mactbl_lock;
+
+ /* Lock used to protect access to vnic counters */
+ spinlock_t stats_lock;
+
+ u8 flow_tbl[OPA_VNIC_FLOW_TBL_SIZE];
+
+ unsigned long trap_timeout;
+ u8 trap_count;
+};
+
+/* Same as opa_veswport_mactable_entry, but without bitwise attribute */
+struct __opa_vnic_mactable_entry {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+ u32 dlid_sd;
+} __packed;
+
+/**
+ * struct opa_vnic_mac_tbl_node - OPA VNIC mac table node
+ * @hlist: hash list handle
+ * @index: index of entry in the mac table
+ * @entry: entry in the table
+ */
+struct opa_vnic_mac_tbl_node {
+ struct hlist_node hlist;
+ u16 index;
+ struct __opa_vnic_mactable_entry entry;
+};
+
+#define v_dbg(format, arg...) \
+ netdev_dbg(adapter->netdev, format, ## arg)
+#define v_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define v_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define v_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+
+#define c_err(format, arg...) \
+ dev_err(&cport->ibdev->dev, format, ## arg)
+#define c_info(format, arg...) \
+ dev_info(&cport->ibdev->dev, format, ## arg)
+#define c_dbg(format, arg...) \
+ dev_dbg(&cport->ibdev->dev, format, ## arg)
+
+/* The maximum allowed entries in the mac table */
+#define OPA_VNIC_MAC_TBL_MAX_ENTRIES 2048
+/* Limit of smac entries in mac table */
+#define OPA_VNIC_MAX_SMAC_LIMIT 256
+
+/* The last octet of the MAC address is used as the key to the hash table */
+#define OPA_VNIC_MAC_HASH_IDX 5
+
+/* The VNIC MAC hash table is of size 2^8 */
+#define OPA_VNIC_MAC_TBL_HASH_BITS 8
+#define OPA_VNIC_MAC_TBL_SIZE BIT(OPA_VNIC_MAC_TBL_HASH_BITS)
+
+/* VNIC HASH MACROS */
+#define vnic_hash_init(hashtable) __hash_init(hashtable, OPA_VNIC_MAC_TBL_SIZE)
+
+#define vnic_hash_add(hashtable, node, key) \
+ hlist_add_head(node, \
+ &hashtable[hash_min(key, ilog2(OPA_VNIC_MAC_TBL_SIZE))])
+
+#define vnic_hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; \
+ !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+#define vnic_hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, \
+ &name[hash_min(key, ilog2(OPA_VNIC_MAC_TBL_SIZE))], member)
+
+#define vnic_hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; \
+ !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+extern char opa_vnic_driver_name[];
+
+struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
+ u8 port_num, u8 vport_num);
+void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter);
+void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
+u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
+u8 opa_vnic_calc_entropy(struct sk_buff *skb);
+void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter);
+void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter);
+void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl);
+int opa_vnic_update_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl);
+void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs);
+void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs);
+void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_summary_counters *cntrs);
+void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_error_counters *cntrs);
+void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info);
+void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info);
+void opa_vnic_get_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info);
+void opa_vnic_set_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info);
+void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event);
+void opa_vnic_set_ethtool_ops(struct net_device *netdev);
+void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
+ struct __opa_veswport_trap *data, u32 lid);
+
+#endif /* _OPA_VNIC_INTERNAL_H */
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
new file mode 100644
index 000000000000..071f35711468
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA Virtual Network Interface Controller (VNIC) driver
+ * netdev functionality.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+
+#include "opa_vnic_internal.h"
+
+#define OPA_TX_TIMEOUT_MS 1000
+
+#define OPA_VNIC_SKB_HEADROOM \
+ ALIGN((OPA_VNIC_HDR_LEN + OPA_VNIC_SKB_MDATA_LEN), 8)
+
+/* This function is overloaded for opa_vnic specific implementation */
+static void opa_vnic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct opa_vnic_stats vstats;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+ memcpy(stats, &vstats.netstats, sizeof(*stats));
+}
+
+/* opa_netdev_start_xmit - transmit function */
+static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+
+ v_dbg("xmit: queue %d skb len %d\n", skb->queue_mapping, skb->len);
+ /* pad to ensure mininum ethernet packet length */
+ if (unlikely(skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
+ opa_vnic_encap_skb(adapter, skb);
+ return adapter->rn_ops->ndo_start_xmit(skb, netdev);
+}
+
+static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct opa_vnic_skb_mdata *mdata;
+ int rc;
+
+ /* pass entropy and vl as metadata in skb */
+ mdata = skb_push(skb, sizeof(*mdata));
+ mdata->entropy = opa_vnic_calc_entropy(skb);
+ mdata->vl = opa_vnic_get_vl(adapter, skb);
+ rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
+ skb_pull(skb, sizeof(*mdata));
+ return rc;
+}
+
+static void opa_vnic_update_state(struct opa_vnic_adapter *adapter, bool up)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+
+ mutex_lock(&adapter->lock);
+ /* Operational state can only be DROP_ALL or FORWARDING */
+ if ((info->vport.config_state == OPA_VNIC_STATE_FORWARDING) && up) {
+ info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ } else {
+ info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ }
+
+ if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING)
+ netif_dormant_off(adapter->netdev);
+ else
+ netif_dormant_on(adapter->netdev);
+ mutex_unlock(&adapter->lock);
+}
+
+/* opa_vnic_process_vema_config - process vema configuration updates */
+void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct rdma_netdev *rn = netdev_priv(adapter->netdev);
+ u8 port_num[OPA_VESW_MAX_NUM_DEF_PORT] = { 0 };
+ struct net_device *netdev = adapter->netdev;
+ u8 i, port_count = 0;
+ u16 port_mask;
+
+ /* If the base_mac_addr is changed, update the interface mac address */
+ if (memcmp(info->vport.base_mac_addr, adapter->vema_mac_addr,
+ ARRAY_SIZE(info->vport.base_mac_addr))) {
+ struct sockaddr saddr;
+
+ memcpy(saddr.sa_data, info->vport.base_mac_addr,
+ ARRAY_SIZE(info->vport.base_mac_addr));
+ mutex_lock(&adapter->lock);
+ eth_commit_mac_addr_change(netdev, &saddr);
+ memcpy(adapter->vema_mac_addr,
+ info->vport.base_mac_addr, ETH_ALEN);
+ mutex_unlock(&adapter->lock);
+ }
+
+ rn->set_id(netdev, info->vesw.vesw_id);
+
+ /* Handle MTU limit change */
+ rtnl_lock();
+ netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu,
+ netdev->min_mtu);
+ if (netdev->mtu > netdev->max_mtu)
+ dev_set_mtu(netdev, netdev->max_mtu);
+ rtnl_unlock();
+
+ /* Update flow to default port redirection table */
+ port_mask = info->vesw.def_port_mask;
+ for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++) {
+ if (port_mask & 1)
+ port_num[port_count++] = i;
+ port_mask >>= 1;
+ }
+
+ /*
+ * Build the flow table. Flow table is required when destination LID
+ * is not available. Up to OPA_VNIC_FLOW_TBL_SIZE flows supported.
+ * Each flow need a default port number to get its dlid from the
+ * u_ucast_dlid array.
+ */
+ for (i = 0; i < OPA_VNIC_FLOW_TBL_SIZE; i++)
+ adapter->flow_tbl[i] = port_count ? port_num[i % port_count] :
+ OPA_VNIC_INVALID_PORT;
+
+ /* update state */
+ opa_vnic_update_state(adapter, !!(netdev->flags & IFF_UP));
+}
+
+/*
+ * Set the power on default values in adapter's vema interface structure.
+ */
+static inline void opa_vnic_set_pod_values(struct opa_vnic_adapter *adapter)
+{
+ adapter->info.vport.max_mac_tbl_ent = OPA_VNIC_MAC_TBL_MAX_ENTRIES;
+ adapter->info.vport.max_smac_ent = OPA_VNIC_MAX_SMAC_LIMIT;
+ adapter->info.vport.config_state = OPA_VNIC_STATE_DROP_ALL;
+ adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ adapter->info.vesw.eth_mtu = ETH_DATA_LEN;
+}
+
+/* opa_vnic_set_mac_addr - change mac address */
+static int opa_vnic_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct sockaddr *sa = addr;
+ int rc;
+
+ if (!memcmp(netdev->dev_addr, sa->sa_data, ETH_ALEN))
+ return 0;
+
+ mutex_lock(&adapter->lock);
+ rc = eth_mac_addr(netdev, addr);
+ mutex_unlock(&adapter->lock);
+ if (rc)
+ return rc;
+
+ adapter->info.vport.uc_macs_gen_count++;
+ opa_vnic_vema_report_event(adapter,
+ OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE);
+ return 0;
+}
+
+/*
+ * opa_vnic_mac_send_event - post event on possible mac list exchange
+ * Send trap when digest from uc/mc mac list differs from previous run.
+ * Digest is evaluated similar to how cksum does.
+ */
+static void opa_vnic_mac_send_event(struct net_device *netdev, u8 event)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct netdev_hw_addr *ha;
+ struct netdev_hw_addr_list *hw_list;
+ u32 *ref_crc;
+ u32 l, crc = 0;
+
+ switch (event) {
+ case OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE:
+ hw_list = &netdev->uc;
+ adapter->info.vport.uc_macs_gen_count++;
+ ref_crc = &adapter->umac_hash;
+ break;
+ case OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE:
+ hw_list = &netdev->mc;
+ adapter->info.vport.mc_macs_gen_count++;
+ ref_crc = &adapter->mmac_hash;
+ break;
+ default:
+ return;
+ }
+ netdev_hw_addr_list_for_each(ha, hw_list) {
+ crc = crc32_le(crc, ha->addr, ETH_ALEN);
+ }
+ l = netdev_hw_addr_list_count(hw_list) * ETH_ALEN;
+ crc = ~crc32_le(crc, (void *)&l, sizeof(l));
+
+ if (crc != *ref_crc) {
+ *ref_crc = crc;
+ opa_vnic_vema_report_event(adapter, event);
+ }
+}
+
+/* opa_vnic_set_rx_mode - handle uc/mc mac list change */
+static void opa_vnic_set_rx_mode(struct net_device *netdev)
+{
+ opa_vnic_mac_send_event(netdev,
+ OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE);
+
+ opa_vnic_mac_send_event(netdev,
+ OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE);
+}
+
+/* opa_netdev_open - activate network interface */
+static int opa_netdev_open(struct net_device *netdev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ int rc;
+
+ rc = adapter->rn_ops->ndo_open(adapter->netdev);
+ if (rc) {
+ v_dbg("open failed %d\n", rc);
+ return rc;
+ }
+
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, true);
+ opa_vnic_vema_report_event(adapter,
+ OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
+ return 0;
+}
+
+/* opa_netdev_close - disable network interface */
+static int opa_netdev_close(struct net_device *netdev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ int rc;
+
+ rc = adapter->rn_ops->ndo_stop(adapter->netdev);
+ if (rc) {
+ v_dbg("close failed %d\n", rc);
+ return rc;
+ }
+
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, false);
+ opa_vnic_vema_report_event(adapter,
+ OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
+ return 0;
+}
+
+/* netdev ops */
+static const struct net_device_ops opa_netdev_ops = {
+ .ndo_open = opa_netdev_open,
+ .ndo_stop = opa_netdev_close,
+ .ndo_start_xmit = opa_netdev_start_xmit,
+ .ndo_get_stats64 = opa_vnic_get_stats64,
+ .ndo_set_rx_mode = opa_vnic_set_rx_mode,
+ .ndo_select_queue = opa_vnic_select_queue,
+ .ndo_set_mac_address = opa_vnic_set_mac_addr,
+};
+
+/* opa_vnic_add_netdev - create vnic netdev interface */
+struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
+ u8 port_num, u8 vport_num)
+{
+ struct opa_vnic_adapter *adapter;
+ struct net_device *netdev;
+ struct rdma_netdev *rn;
+ int rc;
+
+ netdev = ibdev->ops.alloc_rdma_netdev(ibdev, port_num,
+ RDMA_NETDEV_OPA_VNIC,
+ "veth%d", NET_NAME_UNKNOWN,
+ ether_setup);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+ else if (IS_ERR(netdev))
+ return ERR_CAST(netdev);
+
+ rn = netdev_priv(netdev);
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ rc = -ENOMEM;
+ goto adapter_err;
+ }
+
+ rn->clnt_priv = adapter;
+ rn->hca = ibdev;
+ rn->port_num = port_num;
+ adapter->netdev = netdev;
+ adapter->ibdev = ibdev;
+ adapter->port_num = port_num;
+ adapter->vport_num = vport_num;
+ adapter->rn_ops = netdev->netdev_ops;
+
+ netdev->netdev_ops = &opa_netdev_ops;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM;
+ mutex_init(&adapter->lock);
+ mutex_init(&adapter->mactbl_lock);
+ spin_lock_init(&adapter->stats_lock);
+
+ SET_NETDEV_DEV(netdev, ibdev->dev.parent);
+
+ opa_vnic_set_ethtool_ops(netdev);
+
+ opa_vnic_set_pod_values(adapter);
+
+ rc = register_netdev(netdev);
+ if (rc)
+ goto netdev_err;
+
+ netif_carrier_off(netdev);
+ netif_dormant_on(netdev);
+ v_info("initialized\n");
+
+ return adapter;
+netdev_err:
+ mutex_destroy(&adapter->lock);
+ mutex_destroy(&adapter->mactbl_lock);
+ kfree(adapter);
+adapter_err:
+ rn->free_rdma_netdev(netdev);
+
+ return ERR_PTR(rc);
+}
+
+/* opa_vnic_rem_netdev - remove vnic netdev interface */
+void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct rdma_netdev *rn = netdev_priv(netdev);
+
+ v_info("removing\n");
+ unregister_netdev(netdev);
+ opa_vnic_release_mac_tbl(adapter);
+ mutex_destroy(&adapter->lock);
+ mutex_destroy(&adapter->mactbl_lock);
+ kfree(adapter);
+ rn->free_rdma_netdev(netdev);
+}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
new file mode 100644
index 000000000000..21c6cea8b1db
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -0,0 +1,1056 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPX Virtual Network Interface Controller (VNIC)
+ * Ethernet Management Agent (EMA) driver
+ */
+
+#include <linux/module.h>
+#include <linux/xarray.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/opa_smi.h>
+#include <rdma/opa_port_info.h>
+
+#include "opa_vnic_internal.h"
+
+char opa_vnic_driver_name[] = "opa_vnic";
+
+/*
+ * The trap service level is kept in bits 3 to 7 in the trap_sl_rsvd
+ * field in the class port info MAD.
+ */
+#define GET_TRAP_SL_FROM_CLASS_PORT_INFO(x) (((x) >> 3) & 0x1f)
+
+/* Cap trap bursts to a reasonable limit good for normal cases */
+#define OPA_VNIC_TRAP_BURST_LIMIT 4
+
+/*
+ * VNIC trap limit timeout.
+ * Inverse of cap2_mask response time out (1.0737 secs) = 0.9
+ * secs approx IB spec 13.4.6.2.1 PortInfoSubnetTimeout and
+ * 13.4.9 Traps.
+ */
+#define OPA_VNIC_TRAP_TIMEOUT ((4096 * (1UL << 18)) / 1000)
+
+#define OPA_VNIC_UNSUP_ATTR \
+ cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
+
+#define OPA_VNIC_INVAL_ATTR \
+ cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
+
+#define OPA_VNIC_CLASS_CAP_TRAP 0x1
+
+/* Maximum number of VNIC ports supported */
+#define OPA_VNIC_MAX_NUM_VPORT 255
+
+/**
+ * struct opa_vnic_vema_port -- VNIC VEMA port details
+ * @cport: pointer to port
+ * @mad_agent: pointer to mad agent for port
+ * @class_port_info: Class port info information.
+ * @tid: Transaction id
+ * @port_num: OPA port number
+ * @vports: vnic ports
+ * @event_handler: ib event handler
+ * @lock: adapter interface lock
+ */
+struct opa_vnic_vema_port {
+ struct opa_vnic_ctrl_port *cport;
+ struct ib_mad_agent *mad_agent;
+ struct opa_class_port_info class_port_info;
+ u64 tid;
+ u8 port_num;
+ struct xarray vports;
+ struct ib_event_handler event_handler;
+
+ /* Lock to query/update network adapter */
+ struct mutex lock;
+};
+
+static int opa_vnic_vema_add_one(struct ib_device *device);
+static void opa_vnic_vema_rem_one(struct ib_device *device,
+ void *client_data);
+
+static struct ib_client opa_vnic_client = {
+ .name = opa_vnic_driver_name,
+ .add = opa_vnic_vema_add_one,
+ .remove = opa_vnic_vema_rem_one,
+};
+
+/**
+ * vema_get_vport_num -- Get the vnic from the mad
+ * @recvd_mad: Received mad
+ *
+ * Return: returns value of the vnic port number
+ */
+static inline u8 vema_get_vport_num(struct opa_vnic_vema_mad *recvd_mad)
+{
+ return be32_to_cpu(recvd_mad->mad_hdr.attr_mod) & 0xff;
+}
+
+/**
+ * vema_get_vport_adapter -- Get vnic port adapter from recvd mad
+ * @recvd_mad: received mad
+ * @port: ptr to port struct on which MAD was recvd
+ *
+ * Return: vnic adapter
+ */
+static inline struct opa_vnic_adapter *
+vema_get_vport_adapter(struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_port *port)
+{
+ u8 vport_num = vema_get_vport_num(recvd_mad);
+
+ return xa_load(&port->vports, vport_num);
+}
+
+/**
+ * vema_mac_tbl_req_ok -- Check if mac request has correct values
+ * @mac_tbl: mac table
+ *
+ * This function checks for the validity of the offset and number of
+ * entries required.
+ *
+ * Return: true if offset and num_entries are valid
+ */
+static inline bool vema_mac_tbl_req_ok(struct opa_veswport_mactable *mac_tbl)
+{
+ u16 offset, num_entries;
+ u16 req_entries = ((OPA_VNIC_EMA_DATA - sizeof(*mac_tbl)) /
+ sizeof(mac_tbl->tbl_entries[0]));
+
+ offset = be16_to_cpu(mac_tbl->offset);
+ num_entries = be16_to_cpu(mac_tbl->num_entries);
+
+ return ((num_entries <= req_entries) &&
+ (offset + num_entries <= OPA_VNIC_MAC_TBL_MAX_ENTRIES));
+}
+
+/*
+ * Return the power on default values in the port info structure
+ * in big endian format as required by MAD.
+ */
+static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
+{
+ memset(port_info, 0, sizeof(*port_info));
+ port_info->vport.max_mac_tbl_ent =
+ cpu_to_be16(OPA_VNIC_MAC_TBL_MAX_ENTRIES);
+ port_info->vport.max_smac_ent =
+ cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
+ port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
+ port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
+ port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
+}
+
+/**
+ * vema_add_vport -- Add a new vnic port
+ * @port: ptr to opa_vnic_vema_port struct
+ * @vport_num: vnic port number (to be added)
+ *
+ * Return a pointer to the vnic adapter structure
+ */
+static struct opa_vnic_adapter *vema_add_vport(struct opa_vnic_vema_port *port,
+ u8 vport_num)
+{
+ struct opa_vnic_ctrl_port *cport = port->cport;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = opa_vnic_add_netdev(cport->ibdev, port->port_num, vport_num);
+ if (!IS_ERR(adapter)) {
+ int rc;
+
+ adapter->cport = cport;
+ rc = xa_insert(&port->vports, vport_num, adapter, GFP_KERNEL);
+ if (rc < 0) {
+ opa_vnic_rem_netdev(adapter);
+ adapter = ERR_PTR(rc);
+ }
+ }
+
+ return adapter;
+}
+
+/**
+ * vema_get_class_port_info -- Get class info for port
+ * @port: Port on whic MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function copies the latest class port info value set for the
+ * port and stores it for generating traps
+ */
+static void vema_get_class_port_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_class_port_info *port_info;
+
+ port_info = (struct opa_class_port_info *)rsp_mad->data;
+ memcpy(port_info, &port->class_port_info, sizeof(*port_info));
+ port_info->base_version = OPA_MGMT_BASE_VERSION;
+ port_info->class_version = OPA_EMA_CLASS_VERSION;
+
+ /*
+ * Set capability mask bit indicating agent generates traps,
+ * and set the maximum number of VNIC ports supported.
+ */
+ port_info->cap_mask = cpu_to_be16((OPA_VNIC_CLASS_CAP_TRAP |
+ (OPA_VNIC_MAX_NUM_VPORT << 8)));
+
+ /*
+ * Since a get routine is always sent by the EM first we
+ * set the expected response time to
+ * 4.096 usec * 2^18 == 1.0737 sec here.
+ */
+ port_info->cap_mask2_resp_time = cpu_to_be32(18);
+}
+
+/**
+ * vema_set_class_port_info -- Get class info for port
+ * @port: Port on whic MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function updates the port class info for the specific vnic
+ * and sets up the response mad data
+ */
+static void vema_set_class_port_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ memcpy(&port->class_port_info, recvd_mad->data,
+ sizeof(port->class_port_info));
+
+ vema_get_class_port_info(port, recvd_mad, rsp_mad);
+}
+
+/**
+ * vema_get_veswport_info -- Get veswport info
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ */
+static void vema_get_veswport_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_info *port_info =
+ (struct opa_veswport_info *)rsp_mad->data;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (adapter) {
+ memset(port_info, 0, sizeof(*port_info));
+ opa_vnic_get_vesw_info(adapter, &port_info->vesw);
+ opa_vnic_get_per_veswport_info(adapter,
+ &port_info->vport);
+ } else {
+ vema_get_pod_values(port_info);
+ }
+}
+
+/**
+ * vema_set_veswport_info -- Set veswport info
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function gets the port class infor for vnic
+ */
+static void vema_set_veswport_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_vnic_ctrl_port *cport = port->cport;
+ struct opa_veswport_info *port_info;
+ struct opa_vnic_adapter *adapter;
+ u8 vport_num;
+
+ vport_num = vema_get_vport_num(recvd_mad);
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ adapter = vema_add_vport(port, vport_num);
+ if (IS_ERR(adapter)) {
+ c_err("failed to add vport %d: %ld\n",
+ vport_num, PTR_ERR(adapter));
+ goto err_exit;
+ }
+ }
+
+ port_info = (struct opa_veswport_info *)recvd_mad->data;
+ opa_vnic_set_vesw_info(adapter, &port_info->vesw);
+ opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
+
+ /* Process the new config settings */
+ opa_vnic_process_vema_config(adapter);
+
+ vema_get_veswport_info(port, recvd_mad, rsp_mad);
+ return;
+
+err_exit:
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+}
+
+/**
+ * vema_get_mac_entries -- Get MAC entries in VNIC MAC table
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function gets the MAC entries that are programmed into
+ * the VNIC MAC forwarding table. It checks for the validity of
+ * the index into the MAC table and the number of entries that
+ * are to be retrieved.
+ */
+static void vema_get_mac_entries(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_mactable *mac_tbl_in, *mac_tbl_out;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ mac_tbl_in = (struct opa_veswport_mactable *)recvd_mad->data;
+ mac_tbl_out = (struct opa_veswport_mactable *)rsp_mad->data;
+
+ if (vema_mac_tbl_req_ok(mac_tbl_in)) {
+ mac_tbl_out->offset = mac_tbl_in->offset;
+ mac_tbl_out->num_entries = mac_tbl_in->num_entries;
+ opa_vnic_query_mac_tbl(adapter, mac_tbl_out);
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ }
+}
+
+/**
+ * vema_set_mac_entries -- Set MAC entries in VNIC MAC table
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function sets the MAC entries in the VNIC forwarding table
+ * It checks for the validity of the index and the number of forwarding
+ * table entries to be programmed.
+ */
+static void vema_set_mac_entries(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_mactable *mac_tbl;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ mac_tbl = (struct opa_veswport_mactable *)recvd_mad->data;
+ if (vema_mac_tbl_req_ok(mac_tbl)) {
+ if (opa_vnic_update_mac_tbl(adapter, mac_tbl))
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ }
+ vema_get_mac_entries(port, recvd_mad, rsp_mad);
+}
+
+/**
+ * vema_set_delete_vesw -- Reset VESW info to POD values
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function clears all the fields of veswport info for the requested vesw
+ * and sets them back to the power-on default values. It does not delete the
+ * vesw.
+ */
+static void vema_set_delete_vesw(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_info *port_info =
+ (struct opa_veswport_info *)rsp_mad->data;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ vema_get_pod_values(port_info);
+ opa_vnic_set_vesw_info(adapter, &port_info->vesw);
+ opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
+
+ /* Process the new config settings */
+ opa_vnic_process_vema_config(adapter);
+
+ opa_vnic_release_mac_tbl(adapter);
+
+ vema_get_veswport_info(port, recvd_mad, rsp_mad);
+}
+
+/**
+ * vema_get_mac_list -- Get the unicast/multicast macs.
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ * @attr_id: Attribute ID indicating multicast or unicast mac list
+ */
+static void vema_get_mac_list(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad,
+ u16 attr_id)
+{
+ struct opa_veswport_iface_macs *macs_in, *macs_out;
+ int max_entries = (OPA_VNIC_EMA_DATA - sizeof(*macs_out)) / ETH_ALEN;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ macs_in = (struct opa_veswport_iface_macs *)recvd_mad->data;
+ macs_out = (struct opa_veswport_iface_macs *)rsp_mad->data;
+
+ macs_out->start_idx = macs_in->start_idx;
+ if (macs_in->num_macs_in_msg)
+ macs_out->num_macs_in_msg = macs_in->num_macs_in_msg;
+ else
+ macs_out->num_macs_in_msg = cpu_to_be16(max_entries);
+
+ if (attr_id == OPA_EM_ATTR_IFACE_MCAST_MACS)
+ opa_vnic_query_mcast_macs(adapter, macs_out);
+ else
+ opa_vnic_query_ucast_macs(adapter, macs_out);
+}
+
+/**
+ * vema_get_summary_counters -- Gets summary counters.
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_get_summary_counters(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_summary_counters *cntrs;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (adapter) {
+ cntrs = (struct opa_veswport_summary_counters *)rsp_mad->data;
+ opa_vnic_get_summary_counters(adapter, cntrs);
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ }
+}
+
+/**
+ * vema_get_error_counters -- Gets summary counters.
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_get_error_counters(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_error_counters *cntrs;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (adapter) {
+ cntrs = (struct opa_veswport_error_counters *)rsp_mad->data;
+ opa_vnic_get_error_counters(adapter, cntrs);
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ }
+}
+
+/**
+ * vema_get -- Process received get MAD
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_get(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
+
+ switch (attr_id) {
+ case OPA_EM_ATTR_CLASS_PORT_INFO:
+ vema_get_class_port_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_INFO:
+ vema_get_veswport_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
+ vema_get_mac_entries(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_IFACE_UCAST_MACS:
+ case OPA_EM_ATTR_IFACE_MCAST_MACS:
+ vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id);
+ break;
+ case OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS:
+ vema_get_summary_counters(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS:
+ vema_get_error_counters(port, recvd_mad, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ break;
+ }
+}
+
+/**
+ * vema_set -- Process received set MAD
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_set(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
+
+ switch (attr_id) {
+ case OPA_EM_ATTR_CLASS_PORT_INFO:
+ vema_set_class_port_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_INFO:
+ vema_set_veswport_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
+ vema_set_mac_entries(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_DELETE_VESW:
+ vema_set_delete_vesw(port, recvd_mad, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ break;
+ }
+}
+
+/**
+ * vema_send -- Send handler for VEMA MAD agent
+ * @mad_agent: pointer to the mad agent
+ * @mad_wc: pointer to mad send work completion information
+ *
+ * Free all the data structures associated with the sent MAD
+ */
+static void vema_send(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_wc *mad_wc)
+{
+ rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
+ ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * vema_recv -- Recv handler for VEMA MAD agent
+ * @mad_agent: pointer to the mad agent
+ * @send_buf: Send buffer if found, else NULL
+ * @mad_wc: pointer to mad send work completion information
+ *
+ * Handle only set and get methods and respond to other methods
+ * as unsupported. Allocate response buffer and address handle
+ * for the response MAD.
+ */
+static void vema_recv(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_buf *send_buf,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct opa_vnic_vema_port *port;
+ struct ib_ah *ah;
+ struct ib_mad_send_buf *rsp;
+ struct opa_vnic_vema_mad *vema_mad;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad)
+ return;
+
+ port = mad_agent->context;
+ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+ mad_wc->recv_buf.grh, mad_agent->port_num);
+ if (IS_ERR(ah))
+ goto free_recv_mad;
+
+ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+ mad_wc->wc->pkey_index, 0,
+ IB_MGMT_VENDOR_HDR, OPA_VNIC_EMA_DATA,
+ GFP_KERNEL, OPA_MGMT_BASE_VERSION);
+ if (IS_ERR(rsp))
+ goto err_rsp;
+
+ rsp->ah = ah;
+ vema_mad = rsp->mad;
+ memcpy(vema_mad, mad_wc->recv_buf.mad, IB_MGMT_VENDOR_HDR);
+ vema_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+ vema_mad->mad_hdr.status = 0;
+
+ /* Lock ensures network adapter is not removed */
+ mutex_lock(&port->lock);
+
+ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ vema_get(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
+ vema_mad);
+ break;
+ case IB_MGMT_METHOD_SET:
+ vema_set(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
+ vema_mad);
+ break;
+ default:
+ vema_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ break;
+ }
+ mutex_unlock(&port->lock);
+
+ if (!ib_post_send_mad(rsp, NULL)) {
+ /*
+ * with post send successful ah and send mad
+ * will be destroyed in send handler
+ */
+ goto free_recv_mad;
+ }
+
+ ib_free_send_mad(rsp);
+
+err_rsp:
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
+free_recv_mad:
+ ib_free_recv_mad(mad_wc);
+}
+
+/**
+ * vema_get_port -- Gets the opa_vnic_vema_port
+ * @cport: pointer to control dev
+ * @port_num: Port number
+ *
+ * This function loops through the ports and returns
+ * the opa_vnic_vema port structure that is associated
+ * with the OPA port number
+ *
+ * Return: ptr to requested opa_vnic_vema_port strucure
+ * if success, NULL if not
+ */
+static struct opa_vnic_vema_port *
+vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
+{
+ struct opa_vnic_vema_port *port = (void *)cport + sizeof(*cport);
+
+ if (port_num > cport->num_ports)
+ return NULL;
+
+ return port + (port_num - 1);
+}
+
+/**
+ * opa_vnic_vema_send_trap -- This function sends a trap to the EM
+ * @adapter: pointer to vnic adapter
+ * @data: pointer to trap data filled by calling function
+ * @lid: issuers lid (encap_slid from vesw_port_info)
+ *
+ * This function is called from the VNIC driver to send a trap if there
+ * is somethng the EM should be notified about. These events currently
+ * are
+ * 1) UNICAST INTERFACE MACADDRESS changes
+ * 2) MULTICAST INTERFACE MACADDRESS changes
+ * 3) ETHERNET LINK STATUS changes
+ * While allocating the send mad the remote site qpn used is 1
+ * as this is the well known QP.
+ *
+ */
+void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
+ struct __opa_veswport_trap *data, u32 lid)
+{
+ struct opa_vnic_ctrl_port *cport = adapter->cport;
+ struct ib_mad_send_buf *send_buf;
+ struct opa_vnic_vema_port *port;
+ struct ib_device *ibp;
+ struct opa_vnic_vema_mad_trap *trap_mad;
+ struct opa_class_port_info *class;
+ struct rdma_ah_attr ah_attr;
+ struct ib_ah *ah;
+ struct opa_veswport_trap *trap;
+ u32 trap_lid;
+ u16 pkey_idx;
+
+ if (!cport)
+ goto err_exit;
+ ibp = cport->ibdev;
+ port = vema_get_port(cport, data->opaportnum);
+ if (!port || !port->mad_agent)
+ goto err_exit;
+
+ if (time_before(jiffies, adapter->trap_timeout)) {
+ if (adapter->trap_count == OPA_VNIC_TRAP_BURST_LIMIT) {
+ v_warn("Trap rate exceeded\n");
+ goto err_exit;
+ } else {
+ adapter->trap_count++;
+ }
+ } else {
+ adapter->trap_count = 0;
+ }
+
+ class = &port->class_port_info;
+ /* Set up address handle */
+ memset(&ah_attr, 0, sizeof(ah_attr));
+ ah_attr.type = rdma_ah_find_type(ibp, port->port_num);
+ rdma_ah_set_sl(&ah_attr,
+ GET_TRAP_SL_FROM_CLASS_PORT_INFO(class->trap_sl_rsvd));
+ rdma_ah_set_port_num(&ah_attr, port->port_num);
+ trap_lid = be32_to_cpu(class->trap_lid);
+ /*
+ * check for trap lid validity, must not be zero
+ * The trap sink could change after we fashion the MAD but since traps
+ * are not guaranteed we won't use a lock as anyway the change will take
+ * place even with locking.
+ */
+ if (!trap_lid) {
+ c_err("%s: Invalid dlid\n", __func__);
+ goto err_exit;
+ }
+
+ rdma_ah_set_dlid(&ah_attr, trap_lid);
+ ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr, 0);
+ if (IS_ERR(ah)) {
+ c_err("%s:Couldn't create new AH = %p\n", __func__, ah);
+ c_err("%s:dlid = %d, sl = %d, port = %d\n", __func__,
+ rdma_ah_get_dlid(&ah_attr), rdma_ah_get_sl(&ah_attr),
+ rdma_ah_get_port_num(&ah_attr));
+ goto err_exit;
+ }
+
+ if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_FULL,
+ &pkey_idx) < 0) {
+ c_err("%s:full key not found, defaulting to partial\n",
+ __func__);
+ if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_PARTIAL,
+ &pkey_idx) < 0)
+ pkey_idx = 1;
+ }
+
+ send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0,
+ IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC, OPA_MGMT_BASE_VERSION);
+ if (IS_ERR(send_buf)) {
+ c_err("%s:Couldn't allocate send buf\n", __func__);
+ goto err_sndbuf;
+ }
+
+ send_buf->ah = ah;
+
+ /* Set up common MAD hdr */
+ trap_mad = send_buf->mad;
+ trap_mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
+ trap_mad->mad_hdr.mgmt_class = OPA_MGMT_CLASS_INTEL_EMA;
+ trap_mad->mad_hdr.class_version = OPA_EMA_CLASS_VERSION;
+ trap_mad->mad_hdr.method = IB_MGMT_METHOD_TRAP;
+ port->tid++;
+ trap_mad->mad_hdr.tid = cpu_to_be64(port->tid);
+ trap_mad->mad_hdr.attr_id = IB_SMP_ATTR_NOTICE;
+
+ /* Set up vendor OUI */
+ trap_mad->oui[0] = INTEL_OUI_1;
+ trap_mad->oui[1] = INTEL_OUI_2;
+ trap_mad->oui[2] = INTEL_OUI_3;
+
+ /* Setup notice attribute portion */
+ trap_mad->notice.gen_type = OPA_INTEL_EMA_NOTICE_TYPE_INFO << 1;
+ trap_mad->notice.oui_1 = INTEL_OUI_1;
+ trap_mad->notice.oui_2 = INTEL_OUI_2;
+ trap_mad->notice.oui_3 = INTEL_OUI_3;
+ trap_mad->notice.issuer_lid = cpu_to_be32(lid);
+
+ /* copy the actual trap data */
+ trap = (struct opa_veswport_trap *)trap_mad->notice.raw_data;
+ trap->fabric_id = cpu_to_be16(data->fabric_id);
+ trap->veswid = cpu_to_be16(data->veswid);
+ trap->veswportnum = cpu_to_be32(data->veswportnum);
+ trap->opaportnum = cpu_to_be16(data->opaportnum);
+ trap->veswportindex = data->veswportindex;
+ trap->opcode = data->opcode;
+
+ /* If successful send set up rate limit timeout else bail */
+ if (ib_post_send_mad(send_buf, NULL)) {
+ ib_free_send_mad(send_buf);
+ } else {
+ if (adapter->trap_count)
+ return;
+ adapter->trap_timeout = jiffies +
+ usecs_to_jiffies(OPA_VNIC_TRAP_TIMEOUT);
+ return;
+ }
+
+err_sndbuf:
+ rdma_destroy_ah(ah, 0);
+err_exit:
+ v_err("Aborting trap\n");
+}
+
+static void opa_vnic_event(struct ib_event_handler *handler,
+ struct ib_event *record)
+{
+ struct opa_vnic_vema_port *port =
+ container_of(handler, struct opa_vnic_vema_port, event_handler);
+ struct opa_vnic_ctrl_port *cport = port->cport;
+ struct opa_vnic_adapter *adapter;
+ unsigned long index;
+
+ if (record->element.port_num != port->port_num)
+ return;
+
+ c_dbg("OPA_VNIC received event %d on device %s port %d\n",
+ record->event, dev_name(&record->device->dev),
+ record->element.port_num);
+
+ if (record->event != IB_EVENT_PORT_ERR &&
+ record->event != IB_EVENT_PORT_ACTIVE)
+ return;
+
+ xa_for_each(&port->vports, index, adapter) {
+ if (record->event == IB_EVENT_PORT_ACTIVE)
+ netif_carrier_on(adapter->netdev);
+ else
+ netif_carrier_off(adapter->netdev);
+ }
+}
+
+/**
+ * vema_unregister -- Unregisters agent
+ * @cport: pointer to control port
+ *
+ * This deletes the registration by VEMA for MADs
+ */
+static void vema_unregister(struct opa_vnic_ctrl_port *cport)
+{
+ struct opa_vnic_adapter *adapter;
+ unsigned long index;
+ int i;
+
+ for (i = 1; i <= cport->num_ports; i++) {
+ struct opa_vnic_vema_port *port = vema_get_port(cport, i);
+
+ if (!port->mad_agent)
+ continue;
+
+ /* Lock ensures no MAD is being processed */
+ mutex_lock(&port->lock);
+ xa_for_each(&port->vports, index, adapter)
+ opa_vnic_rem_netdev(adapter);
+ mutex_unlock(&port->lock);
+
+ ib_unregister_mad_agent(port->mad_agent);
+ port->mad_agent = NULL;
+ mutex_destroy(&port->lock);
+ xa_destroy(&port->vports);
+ ib_unregister_event_handler(&port->event_handler);
+ }
+}
+
+/**
+ * vema_register -- Registers agent
+ * @cport: pointer to control port
+ *
+ * This function registers the handlers for the VEMA MADs
+ *
+ * Return: returns 0 on success. non zero otherwise
+ */
+static int vema_register(struct opa_vnic_ctrl_port *cport)
+{
+ struct ib_mad_reg_req reg_req = {
+ .mgmt_class = OPA_MGMT_CLASS_INTEL_EMA,
+ .mgmt_class_version = OPA_MGMT_BASE_VERSION,
+ .oui = { INTEL_OUI_1, INTEL_OUI_2, INTEL_OUI_3 }
+ };
+ int i;
+
+ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+ /* register ib event handler and mad agent for each port on dev */
+ for (i = 1; i <= cport->num_ports; i++) {
+ struct opa_vnic_vema_port *port = vema_get_port(cport, i);
+ int ret;
+
+ port->cport = cport;
+ port->port_num = i;
+
+ INIT_IB_EVENT_HANDLER(&port->event_handler,
+ cport->ibdev, opa_vnic_event);
+ ib_register_event_handler(&port->event_handler);
+
+ xa_init(&port->vports);
+ mutex_init(&port->lock);
+ port->mad_agent = ib_register_mad_agent(cport->ibdev, i,
+ IB_QPT_GSI, &reg_req,
+ IB_MGMT_RMPP_VERSION,
+ vema_send, vema_recv,
+ port, 0);
+ if (IS_ERR(port->mad_agent)) {
+ ret = PTR_ERR(port->mad_agent);
+ port->mad_agent = NULL;
+ mutex_destroy(&port->lock);
+ vema_unregister(cport);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * opa_vnic_ctrl_config_dev -- This function sends a trap to the EM
+ * by way of ib_modify_port to indicate support for ethernet on the
+ * fabric.
+ * @cport: pointer to control port
+ * @en: enable or disable ethernet on fabric support
+ */
+static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
+{
+ struct ib_port_modify pm = { 0 };
+ int i;
+
+ if (en)
+ pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
+ else
+ pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
+
+ for (i = 1; i <= cport->num_ports; i++)
+ ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm);
+}
+
+/**
+ * opa_vnic_vema_add_one -- Handle new ib device
+ * @device: ib device pointer
+ *
+ * Allocate the vnic control port and initialize it.
+ */
+static int opa_vnic_vema_add_one(struct ib_device *device)
+{
+ struct opa_vnic_ctrl_port *cport;
+ int rc, size = sizeof(*cport);
+
+ if (!rdma_cap_opa_vnic(device))
+ return -EOPNOTSUPP;
+
+ size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
+ cport = kzalloc(size, GFP_KERNEL);
+ if (!cport)
+ return -ENOMEM;
+
+ cport->num_ports = device->phys_port_cnt;
+ cport->ibdev = device;
+
+ /* Initialize opa vnic management agent (vema) */
+ rc = vema_register(cport);
+ if (!rc)
+ c_info("VNIC client initialized\n");
+
+ ib_set_client_data(device, &opa_vnic_client, cport);
+ opa_vnic_ctrl_config_dev(cport, true);
+ return 0;
+}
+
+/**
+ * opa_vnic_vema_rem_one -- Handle ib device removal
+ * @device: ib device pointer
+ * @client_data: ib client data
+ *
+ * Uninitialize and free the vnic control port.
+ */
+static void opa_vnic_vema_rem_one(struct ib_device *device,
+ void *client_data)
+{
+ struct opa_vnic_ctrl_port *cport = client_data;
+
+ c_info("removing VNIC client\n");
+ opa_vnic_ctrl_config_dev(cport, false);
+ vema_unregister(cport);
+ kfree(cport);
+}
+
+static int __init opa_vnic_init(void)
+{
+ int rc;
+
+ rc = ib_register_client(&opa_vnic_client);
+ if (rc)
+ pr_err("VNIC driver register failed %d\n", rc);
+
+ return rc;
+}
+module_init(opa_vnic_init);
+
+static void opa_vnic_deinit(void)
+{
+ ib_unregister_client(&opa_vnic_client);
+}
+module_exit(opa_vnic_deinit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Cornelis Networks");
+MODULE_DESCRIPTION("Cornelis OPX Virtual Network driver");
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
new file mode 100644
index 000000000000..292c037aa239
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC EMA Interface functions.
+ */
+
+#include "opa_vnic_internal.h"
+
+/**
+ * opa_vnic_vema_report_event - sent trap to report the specified event
+ * @adapter: vnic port adapter
+ * @event: event to be reported
+ *
+ * This function calls vema api to sent a trap for the given event.
+ */
+void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct __opa_veswport_trap trap_data;
+
+ trap_data.fabric_id = info->vesw.fabric_id;
+ trap_data.veswid = info->vesw.vesw_id;
+ trap_data.veswportnum = info->vport.port_num;
+ trap_data.opaportnum = adapter->port_num;
+ trap_data.veswportindex = adapter->vport_num;
+ trap_data.opcode = event;
+
+ opa_vnic_vema_send_trap(adapter, &trap_data, info->vport.encap_slid);
+}
+
+/**
+ * opa_vnic_get_summary_counters - get summary counters
+ * @adapter: vnic port adapter
+ * @cntrs: pointer to destination summary counters structure
+ *
+ * This function populates the summary counters that is maintained by the
+ * given adapter to destination address provided.
+ */
+void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_summary_counters *cntrs)
+{
+ struct opa_vnic_stats vstats;
+ __be64 *dst;
+ u64 *src;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+
+ cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
+ cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
+ cntrs->veswport_num = cpu_to_be32(adapter->port_num);
+
+ cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors);
+ cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors);
+ cntrs->tx_packets = cpu_to_be64(vstats.netstats.tx_packets);
+ cntrs->rx_packets = cpu_to_be64(vstats.netstats.rx_packets);
+ cntrs->tx_bytes = cpu_to_be64(vstats.netstats.tx_bytes);
+ cntrs->rx_bytes = cpu_to_be64(vstats.netstats.rx_bytes);
+
+ /*
+ * This loop depends on layout of
+ * opa_veswport_summary_counters opa_vnic_stats structures.
+ */
+ for (dst = &cntrs->tx_unicast, src = &vstats.tx_grp.unicast;
+ dst < &cntrs->reserved[0]; dst++, src++) {
+ *dst = cpu_to_be64(*src);
+ }
+}
+
+/**
+ * opa_vnic_get_error_counters - get error counters
+ * @adapter: vnic port adapter
+ * @cntrs: pointer to destination error counters structure
+ *
+ * This function populates the error counters that is maintained by the
+ * given adapter to destination address provided.
+ */
+void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_error_counters *cntrs)
+{
+ struct opa_vnic_stats vstats;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+
+ cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
+ cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
+ cntrs->veswport_num = cpu_to_be32(adapter->port_num);
+
+ cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors);
+ cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors);
+ cntrs->tx_dlid_zero = cpu_to_be64(vstats.tx_dlid_zero);
+ cntrs->tx_drop_state = cpu_to_be64(vstats.tx_drop_state);
+ cntrs->tx_logic = cpu_to_be64(vstats.netstats.tx_fifo_errors +
+ vstats.netstats.tx_carrier_errors);
+
+ cntrs->rx_bad_veswid = cpu_to_be64(vstats.netstats.rx_nohandler);
+ cntrs->rx_runt = cpu_to_be64(vstats.rx_runt);
+ cntrs->rx_oversize = cpu_to_be64(vstats.rx_oversize);
+ cntrs->rx_drop_state = cpu_to_be64(vstats.rx_drop_state);
+ cntrs->rx_logic = cpu_to_be64(vstats.netstats.rx_fifo_errors);
+}
+
+/**
+ * opa_vnic_get_vesw_info -- Get the vesw information
+ * @adapter: vnic port adapter
+ * @info: pointer to destination vesw info structure
+ *
+ * This function copies the vesw info that is maintained by the
+ * given adapter to destination address provided.
+ */
+void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info)
+{
+ struct __opa_vesw_info *src = &adapter->info.vesw;
+ int i;
+
+ info->fabric_id = cpu_to_be16(src->fabric_id);
+ info->vesw_id = cpu_to_be16(src->vesw_id);
+ memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0));
+ info->def_port_mask = cpu_to_be16(src->def_port_mask);
+ memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1));
+ info->pkey = cpu_to_be16(src->pkey);
+
+ memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2));
+ info->u_mcast_dlid = cpu_to_be32(src->u_mcast_dlid);
+ for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
+ info->u_ucast_dlid[i] = cpu_to_be32(src->u_ucast_dlid[i]);
+
+ info->rc = cpu_to_be32(src->rc);
+
+ memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
+ info->eth_mtu = cpu_to_be16(src->eth_mtu);
+ memcpy(info->rsvd4, src->rsvd4, ARRAY_SIZE(src->rsvd4));
+}
+
+/**
+ * opa_vnic_set_vesw_info -- Set the vesw information
+ * @adapter: vnic port adapter
+ * @info: pointer to vesw info structure
+ *
+ * This function updates the vesw info that is maintained by the
+ * given adapter with vesw info provided. Reserved fields are stored
+ * and returned back to EM as is.
+ */
+void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info)
+{
+ struct __opa_vesw_info *dst = &adapter->info.vesw;
+ int i;
+
+ dst->fabric_id = be16_to_cpu(info->fabric_id);
+ dst->vesw_id = be16_to_cpu(info->vesw_id);
+ memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0));
+ dst->def_port_mask = be16_to_cpu(info->def_port_mask);
+ memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1));
+ dst->pkey = be16_to_cpu(info->pkey);
+
+ memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2));
+ dst->u_mcast_dlid = be32_to_cpu(info->u_mcast_dlid);
+ for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
+ dst->u_ucast_dlid[i] = be32_to_cpu(info->u_ucast_dlid[i]);
+
+ dst->rc = be32_to_cpu(info->rc);
+
+ memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
+ dst->eth_mtu = be16_to_cpu(info->eth_mtu);
+ memcpy(dst->rsvd4, info->rsvd4, ARRAY_SIZE(info->rsvd4));
+}
+
+/**
+ * opa_vnic_get_per_veswport_info -- Get the vesw per port information
+ * @adapter: vnic port adapter
+ * @info: pointer to destination vport info structure
+ *
+ * This function copies the vesw per port info that is maintained by the
+ * given adapter to destination address provided.
+ * Note that the read only fields are not copied.
+ */
+void opa_vnic_get_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info)
+{
+ struct __opa_per_veswport_info *src = &adapter->info.vport;
+
+ info->port_num = cpu_to_be32(src->port_num);
+ info->eth_link_status = src->eth_link_status;
+ memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0));
+
+ memcpy(info->base_mac_addr, src->base_mac_addr,
+ ARRAY_SIZE(info->base_mac_addr));
+ info->config_state = src->config_state;
+ info->oper_state = src->oper_state;
+ info->max_mac_tbl_ent = cpu_to_be16(src->max_mac_tbl_ent);
+ info->max_smac_ent = cpu_to_be16(src->max_smac_ent);
+ info->mac_tbl_digest = cpu_to_be32(src->mac_tbl_digest);
+ memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1));
+
+ info->encap_slid = cpu_to_be32(src->encap_slid);
+ memcpy(info->pcp_to_sc_uc, src->pcp_to_sc_uc,
+ ARRAY_SIZE(info->pcp_to_sc_uc));
+ memcpy(info->pcp_to_vl_uc, src->pcp_to_vl_uc,
+ ARRAY_SIZE(info->pcp_to_vl_uc));
+ memcpy(info->pcp_to_sc_mc, src->pcp_to_sc_mc,
+ ARRAY_SIZE(info->pcp_to_sc_mc));
+ memcpy(info->pcp_to_vl_mc, src->pcp_to_vl_mc,
+ ARRAY_SIZE(info->pcp_to_vl_mc));
+ info->non_vlan_sc_uc = src->non_vlan_sc_uc;
+ info->non_vlan_vl_uc = src->non_vlan_vl_uc;
+ info->non_vlan_sc_mc = src->non_vlan_sc_mc;
+ info->non_vlan_vl_mc = src->non_vlan_vl_mc;
+ memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2));
+
+ info->uc_macs_gen_count = cpu_to_be16(src->uc_macs_gen_count);
+ info->mc_macs_gen_count = cpu_to_be16(src->mc_macs_gen_count);
+ memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
+}
+
+/**
+ * opa_vnic_set_per_veswport_info -- Set vesw per port information
+ * @adapter: vnic port adapter
+ * @info: pointer to vport info structure
+ *
+ * This function updates the vesw per port info that is maintained by the
+ * given adapter with vesw per port info provided. Reserved fields are
+ * stored and returned back to EM as is.
+ */
+void opa_vnic_set_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info)
+{
+ struct __opa_per_veswport_info *dst = &adapter->info.vport;
+
+ dst->port_num = be32_to_cpu(info->port_num);
+ memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0));
+
+ memcpy(dst->base_mac_addr, info->base_mac_addr,
+ ARRAY_SIZE(dst->base_mac_addr));
+ dst->config_state = info->config_state;
+ memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1));
+
+ dst->encap_slid = be32_to_cpu(info->encap_slid);
+ memcpy(dst->pcp_to_sc_uc, info->pcp_to_sc_uc,
+ ARRAY_SIZE(dst->pcp_to_sc_uc));
+ memcpy(dst->pcp_to_vl_uc, info->pcp_to_vl_uc,
+ ARRAY_SIZE(dst->pcp_to_vl_uc));
+ memcpy(dst->pcp_to_sc_mc, info->pcp_to_sc_mc,
+ ARRAY_SIZE(dst->pcp_to_sc_mc));
+ memcpy(dst->pcp_to_vl_mc, info->pcp_to_vl_mc,
+ ARRAY_SIZE(dst->pcp_to_vl_mc));
+ dst->non_vlan_sc_uc = info->non_vlan_sc_uc;
+ dst->non_vlan_vl_uc = info->non_vlan_vl_uc;
+ dst->non_vlan_sc_mc = info->non_vlan_sc_mc;
+ dst->non_vlan_vl_mc = info->non_vlan_vl_mc;
+ memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2));
+ memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
+}
+
+/**
+ * opa_vnic_query_mcast_macs - query multicast mac list
+ * @adapter: vnic port adapter
+ * @macs: pointer mac list
+ *
+ * This function populates the provided mac list with the configured
+ * multicast addresses in the adapter.
+ */
+void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs)
+{
+ u16 start_idx, num_macs, idx = 0, count = 0;
+ struct netdev_hw_addr *ha;
+
+ start_idx = be16_to_cpu(macs->start_idx);
+ num_macs = be16_to_cpu(macs->num_macs_in_msg);
+ netdev_for_each_mc_addr(ha, adapter->netdev) {
+ struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
+
+ if (start_idx > idx++)
+ continue;
+ else if (num_macs == count)
+ break;
+ memcpy(entry, ha->addr, sizeof(*entry));
+ count++;
+ }
+
+ macs->tot_macs_in_lst = cpu_to_be16(netdev_mc_count(adapter->netdev));
+ macs->num_macs_in_msg = cpu_to_be16(count);
+ macs->gen_count = cpu_to_be16(adapter->info.vport.mc_macs_gen_count);
+}
+
+/**
+ * opa_vnic_query_ucast_macs - query unicast mac list
+ * @adapter: vnic port adapter
+ * @macs: pointer mac list
+ *
+ * This function populates the provided mac list with the configured
+ * unicast addresses in the adapter.
+ */
+void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs)
+{
+ u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0;
+ struct netdev_hw_addr *ha;
+
+ start_idx = be16_to_cpu(macs->start_idx);
+ num_macs = be16_to_cpu(macs->num_macs_in_msg);
+ /* loop through dev_addrs list first */
+ for_each_dev_addr(adapter->netdev, ha) {
+ struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
+
+ /* Do not include EM specified MAC address */
+ if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr,
+ ARRAY_SIZE(adapter->info.vport.base_mac_addr))) {
+ em_macs++;
+ continue;
+ }
+
+ if (start_idx > idx++)
+ continue;
+ else if (num_macs == count)
+ break;
+ memcpy(entry, ha->addr, sizeof(*entry));
+ count++;
+ }
+
+ /* loop through uc list */
+ netdev_for_each_uc_addr(ha, adapter->netdev) {
+ struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
+
+ if (start_idx > idx++)
+ continue;
+ else if (num_macs == count)
+ break;
+ memcpy(entry, ha->addr, sizeof(*entry));
+ count++;
+ }
+
+ tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) +
+ netdev_uc_count(adapter->netdev) - em_macs;
+ macs->tot_macs_in_lst = cpu_to_be16(tot_macs);
+ macs->num_macs_in_msg = cpu_to_be16(count);
+ macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count);
+}
diff --git a/drivers/infiniband/ulp/rtrs/Kconfig b/drivers/infiniband/ulp/rtrs/Kconfig
new file mode 100644
index 000000000000..9092b62e6dc8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config INFINIBAND_RTRS
+ tristate
+ depends on INFINIBAND_ADDR_TRANS
+
+config INFINIBAND_RTRS_CLIENT
+ tristate "RTRS client module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport client module.
+
+ RDMA Transport (RTRS) client implements a reliable transport layer
+ and also multipathing functionality and that it is intended to be
+ the base layer for a block storage initiator over RDMA.
+
+config INFINIBAND_RTRS_SERVER
+ tristate "RTRS server module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport server module.
+
+ RDMA Transport (RTRS) server module processing connection and IO
+ requests received from the RTRS client module, it will pass the
+ IO requests to its user eg. RNBD_server.
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
new file mode 100644
index 000000000000..5227e7788e1f
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS_rtrs-clt-trace.o = -I$(src)
+
+rtrs-client-y := rtrs-clt.o \
+ rtrs-clt-stats.o \
+ rtrs-clt-sysfs.o \
+ rtrs-clt-trace.o
+
+CFLAGS_rtrs-srv-trace.o = -I$(src)
+
+rtrs-server-y := rtrs-srv.o \
+ rtrs-srv-stats.o \
+ rtrs-srv-sysfs.o \
+ rtrs-srv-trace.o
+
+rtrs-core-y := rtrs.o
+
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs-core.o
+obj-$(CONFIG_INFINIBAND_RTRS_CLIENT) += rtrs-client.o
+obj-$(CONFIG_INFINIBAND_RTRS_SERVER) += rtrs-server.o
diff --git a/drivers/infiniband/ulp/rtrs/README b/drivers/infiniband/ulp/rtrs/README
new file mode 100644
index 000000000000..5d9ea142e5dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/README
@@ -0,0 +1,213 @@
+****************************
+RDMA Transport (RTRS)
+****************************
+
+RTRS (RDMA Transport) is a reliable high speed transport library
+which provides support to establish optimal number of connections
+between client and server machines using RDMA (InfiniBand, RoCE, iWarp)
+transport. It is optimized to transfer (read/write) IO blocks.
+
+In its core interface it follows the BIO semantics of providing the
+possibility to either write data from an sg list to the remote side
+or to request ("read") data transfer from the remote side into a given
+sg list.
+
+RTRS provides I/O fail-over and load-balancing capabilities by using
+multipath I/O (see "add_path" and "mp_policy" configuration entries in
+Documentation/ABI/testing/sysfs-class-rtrs-client).
+
+RTRS is used by the RNBD (RDMA Network Block Device) modules.
+
+==================
+Transport protocol
+==================
+
+Overview
+--------
+An established connection between a client and a server is called rtrs
+session. A session is associated with a set of memory chunks reserved on the
+server side for a given client for rdma transfer. A session
+consists of multiple paths, each representing a separate physical link
+between client and server. Those are used for load balancing and failover.
+Each path consists of as many connections (QPs) as there are cpus on
+the client.
+
+When processing an incoming write or read request, rtrs client uses memory
+chunks reserved for him on the server side. Their number, size and addresses
+need to be exchanged between client and server during the connection
+establishment phase. Apart from the memory related information client needs to
+inform the server about the session name and identify each path and connection
+individually.
+
+On an established session client sends to server write or read messages.
+Server uses immediate field to tell the client which request is being
+acknowledged and for errno. Client uses immediate field to tell the server
+which of the memory chunks has been accessed and at which offset the message
+can be found.
+
+Module parameter always_invalidate is introduced for the security problem
+discussed in LPC RDMA MC 2019. When always_invalidate=Y, on the server side we
+invalidate each rdma buffer before we hand it over to RNBD server and
+then pass it to the block layer. A new rkey is generated and registered for the
+buffer after it returns back from the block layer and RNBD server.
+The new rkey is sent back to the client along with the IO result.
+The procedure is the default behaviour of the driver. This invalidation and
+registration on each IO causes performance drop of up to 20%. A user of the
+driver may choose to load the modules with this mechanism switched off
+(always_invalidate=N), if he understands and can take the risk of a malicious
+client being able to corrupt memory of a server it is connected to. This might
+be a reasonable option in a scenario where all the clients and all the servers
+are located within a secure datacenter.
+
+
+Connection establishment
+------------------------
+
+1. Client starts establishing connections belonging to a path of a session one
+by one via attaching RTRS_MSG_CON_REQ messages to the rdma_connect requests.
+Those include uuid of the session and uuid of the path to be
+established. They are used by the server to find a persisting session/path or
+to create a new one when necessary. The message also contains the protocol
+version and magic for compatibility, total number of connections per session
+(as many as cpus on the client), the id of the current connection and
+the reconnect counter, which is used to resolve the situations where
+client is trying to reconnect a path, while server is still destroying the old
+one.
+
+2. Server accepts the connection requests one by one and attaches
+RTRS_MSG_CONN_RSP messages to the rdma_accept. Apart from magic and
+protocol version, the messages include error code, queue depth supported by
+the server (number of memory chunks which are going to be allocated for that
+session) and the maximum size of one io, RTRS_MSG_NEW_RKEY_F flags is set
+when always_invalidate=Y.
+
+3. After all connections of a path are established client sends to server the
+RTRS_MSG_INFO_REQ message, containing the name of the session. This message
+requests the address information from the server.
+
+4. Server replies to the session info request message with RTRS_MSG_INFO_RSP,
+which contains the addresses and keys of the RDMA buffers allocated for that
+session.
+
+5. Session becomes connected after all paths to be established are connected
+(i.e. steps 1-4 finished for all paths requested for a session)
+
+6. Server and client exchange periodically heartbeat messages (empty rdma
+messages with an immediate field) which are used to detect a crash on remote
+side or network outage in an absence of IO.
+
+7. On any RDMA related error or in the case of a heartbeat timeout, the
+corresponding path is disconnected, all the inflight IO are failed over to a
+healthy path, if any, and the reconnect mechanism is triggered.
+
+CLT SRV
+*for each connection belonging to a path and for each path:
+RTRS_MSG_CON_REQ ------------------->
+ <------------------- RTRS_MSG_CON_RSP
+...
+*after all connections are established:
+RTRS_MSG_INFO_REQ ------------------->
+ <------------------- RTRS_MSG_INFO_RSP
+*heartbeat is started from both sides:
+ -------------------> [RTRS_HB_MSG_IMM]
+[RTRS_HB_MSG_ACK] <-------------------
+[RTRS_HB_MSG_IMM] <-------------------
+ -------------------> [RTRS_HB_MSG_ACK]
+
+IO path
+-------
+
+* Write (always_invalidate=N) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+* Write (always_invalidate=Y) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field, Server invalidate rkey associated to the memory chunks
+first, when it finishes, pass the IO to RNBD server module.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+
+* Read (always_invalidate=N)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+
+* Read (always_invalidate=Y)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+Server invalidate rkey associated to the memory chunks first, when it finishes,
+passes the IO to RNBD server module.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
new file mode 100644
index 000000000000..1e6ffafa2db3
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-clt.h"
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+ s = get_cpu_ptr(stats->pcpu_stats);
+ if (con->cpu != cpu) {
+ s->cpu_migr.to++;
+
+ /* Careful here, override s pointer */
+ s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
+ atomic_inc(&s->cpu_migr.from);
+ }
+ put_cpu_ptr(stats->pcpu_stats);
+}
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
+{
+ this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
+}
+
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = 0;
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += sysfs_emit_at(buf, used, "%d ",
+ atomic_read(&s->cpu_migr.from));
+ }
+
+ used += sysfs_emit_at(buf, used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = 0;
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
+ }
+
+ used += sysfs_emit_at(buf, used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
+}
+
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
+{
+ struct rtrs_clt_stats_rdma sum;
+ struct rtrs_clt_stats_rdma *r;
+ int cpu;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ sum.failover_cnt += r->failover_cnt;
+ }
+
+ return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
+ atomic_read(&stats->inflight), sum.failover_cnt);
+}
+
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
+{
+ return sysfs_emit(page, "echo 1 to reset all statistics\n");
+}
+
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->rdma, 0, sizeof(s->rdma));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
+{
+ if (!enable)
+ return -EINVAL;
+
+ memset(&stats->reconnects, 0, sizeof(stats->reconnects));
+
+ return 0;
+}
+
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
+{
+ if (enable) {
+ rtrs_clt_reset_rdma_stats(s, enable);
+ rtrs_clt_reset_cpu_migr_stats(s, enable);
+ rtrs_clt_reset_reconnects_stat(s, enable);
+ atomic_set(&s->inflight, 0);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
+ size_t size, int d)
+{
+ this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
+ this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
+}
+
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
+ unsigned int len;
+
+ len = req->usr_len + req->data_len;
+ rtrs_clt_update_rdma_stats(stats, len, dir);
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_inc(&stats->inflight);
+}
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
+{
+ stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
+ if (!stats->pcpu_stats)
+ return -ENOMEM;
+
+ /*
+ * successful_cnt will be set to 0 after session
+ * is established for the first time
+ */
+ stats->reconnects.successful_cnt = -1;
+
+ return 0;
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
new file mode 100644
index 000000000000..4aa80c9388f0
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define MIN_MAX_RECONN_ATT -1
+#define MAX_MAX_RECONN_ATT 9999
+
+static void rtrs_clt_path_release(struct kobject *kobj)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+
+ free_path(clt_path);
+}
+
+static struct kobj_type ktype_sess = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_path_release
+};
+
+static void rtrs_clt_path_stats_release(struct kobject *kobj)
+{
+ struct rtrs_clt_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_clt_stats, kobj_stats);
+
+ free_percpu(stats->pcpu_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_path_stats_release,
+};
+
+static ssize_t max_reconnect_attempts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
+
+ return sysfs_emit(page, "%d\n",
+ rtrs_clt_get_max_reconnect_attempts(clt));
+}
+
+static ssize_t max_reconnect_attempts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int value;
+ int ret;
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (ret) {
+ rtrs_err(clt, "%s: failed to convert string '%s' to int\n",
+ attr->attr.name, buf);
+ return ret;
+ }
+ if (value > MAX_MAX_RECONN_ATT ||
+ value < MIN_MAX_RECONN_ATT) {
+ rtrs_err(clt,
+ "%s: invalid range (provided: '%s', accepted: min: %d, max: %d)\n",
+ attr->attr.name, buf, MIN_MAX_RECONN_ATT,
+ MAX_MAX_RECONN_ATT);
+ return -EINVAL;
+ }
+ rtrs_clt_set_max_reconnect_attempts(clt, value);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_reconnect_attempts);
+
+static ssize_t mpath_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *clt;
+
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
+
+ switch (clt->mp_policy) {
+ case MP_POLICY_RR:
+ return sysfs_emit(page, "round-robin (RR: %d)\n",
+ clt->mp_policy);
+ case MP_POLICY_MIN_INFLIGHT:
+ return sysfs_emit(page, "min-inflight (MI: %d)\n",
+ clt->mp_policy);
+ case MP_POLICY_MIN_LATENCY:
+ return sysfs_emit(page, "min-latency (ML: %d)\n",
+ clt->mp_policy);
+ default:
+ return sysfs_emit(page, "Unknown (%d)\n", clt->mp_policy);
+ }
+}
+
+static ssize_t mpath_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rtrs_clt_sess *clt;
+ int value;
+ int ret;
+ size_t len = 0;
+
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (!ret && (value == MP_POLICY_RR ||
+ value == MP_POLICY_MIN_INFLIGHT ||
+ value == MP_POLICY_MIN_LATENCY)) {
+ clt->mp_policy = value;
+ return count;
+ }
+
+ /* distinguish "mi" and "min-latency" with length */
+ len = strnlen(buf, NAME_MAX);
+ if (len && buf[len - 1] == '\n')
+ len--;
+
+ if (!strncasecmp(buf, "round-robin", 11) ||
+ (len == 2 && !strncasecmp(buf, "rr", 2)))
+ clt->mp_policy = MP_POLICY_RR;
+ else if (!strncasecmp(buf, "min-inflight", 12) ||
+ (len == 2 && !strncasecmp(buf, "mi", 2)))
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else if (!strncasecmp(buf, "min-latency", 11) ||
+ (len == 2 && !strncasecmp(buf, "ml", 2)))
+ clt->mp_policy = MP_POLICY_MIN_LATENCY;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(mpath_policy);
+
+static ssize_t add_path_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ return sysfs_emit(page,
+ "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static ssize_t add_path_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sockaddr_storage srcaddr, dstaddr;
+ struct rtrs_addr addr = {
+ .src = &srcaddr,
+ .dst = &dstaddr
+ };
+ struct rtrs_clt_sess *clt;
+ const char *nl;
+ size_t len;
+ int err;
+
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
+
+ nl = strchr(buf, '\n');
+ if (nl)
+ len = nl - buf;
+ else
+ len = count;
+ err = rtrs_addr_to_sockaddr(buf, len, clt->port, &addr);
+ if (err)
+ return -EINVAL;
+
+ err = rtrs_clt_create_path_from_sysfs(clt, &addr);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(add_path);
+
+static ssize_t rtrs_clt_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (clt_path->state == RTRS_CLT_CONNECTED)
+ return sysfs_emit(page, "connected\n");
+
+ return sysfs_emit(page, "disconnected\n");
+}
+
+static struct kobj_attribute rtrs_clt_state_attr =
+ __ATTR(state, 0444, rtrs_clt_state_show, NULL);
+
+static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_path *clt_path;
+ int ret;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_reconnect_from_sysfs(clt_path);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_reconnect_attr =
+ __ATTR(reconnect, 0644, rtrs_clt_reconnect_show,
+ rtrs_clt_reconnect_store);
+
+static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ rtrs_clt_close_conns(clt_path, true);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_disconnect_attr =
+ __ATTR(disconnect, 0644, rtrs_clt_disconnect_show,
+ rtrs_clt_disconnect_store);
+
+static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_path *clt_path;
+ int ret;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_remove_path_attr =
+ __ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
+ rtrs_clt_remove_path_store);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_from,
+ rtrs_clt_stats_migration_from_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_to,
+ rtrs_clt_stats_migration_to_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reconnects,
+ rtrs_clt_stats_reconnects_to_str,
+ rtrs_clt_reset_reconnects_stat);
+
+STAT_ATTR(struct rtrs_clt_stats, rdma,
+ rtrs_clt_stats_rdma_to_str,
+ rtrs_clt_reset_rdma_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reset_all,
+ rtrs_clt_reset_all_help,
+ rtrs_clt_reset_all_stats);
+
+static struct attribute *rtrs_clt_stats_attrs[] = {
+ &cpu_migration_from_attr.attr,
+ &cpu_migration_to_attr.attr,
+ &reconnects_attr.attr,
+ &rdma_attr.attr,
+ &reset_all_attr.attr,
+ NULL
+};
+
+static const struct attribute_group rtrs_clt_stats_attr_group = {
+ .attrs = rtrs_clt_stats_attrs,
+};
+
+static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, typeof(*clt_path), kobj);
+
+ return sysfs_emit(page, "%u\n", clt_path->hca_port);
+}
+
+static struct kobj_attribute rtrs_clt_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_clt_hca_port_show, NULL);
+
+static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+
+ return sysfs_emit(page, "%s\n", clt_path->hca_name);
+}
+
+static struct kobj_attribute rtrs_clt_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+
+static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+
+ return sysfs_emit(page, "%lld ns\n",
+ ktime_to_ns(clt_path->s.hb_cur_latency));
+}
+
+static struct kobj_attribute rtrs_clt_cur_latency_attr =
+ __ATTR(cur_latency, 0444, rtrs_clt_cur_latency_show, NULL);
+
+static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+ int len;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
+}
+
+static struct kobj_attribute rtrs_clt_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_clt_src_addr_show, NULL);
+
+static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+ int len;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
+}
+
+static struct kobj_attribute rtrs_clt_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
+
+static struct attribute *rtrs_clt_path_attrs[] = {
+ &rtrs_clt_hca_name_attr.attr,
+ &rtrs_clt_hca_port_attr.attr,
+ &rtrs_clt_src_addr_attr.attr,
+ &rtrs_clt_dst_addr_attr.attr,
+ &rtrs_clt_state_attr.attr,
+ &rtrs_clt_reconnect_attr.attr,
+ &rtrs_clt_disconnect_attr.attr,
+ &rtrs_clt_remove_path_attr.attr,
+ &rtrs_clt_cur_latency_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_clt_path_attr_group = {
+ .attrs = rtrs_clt_path_attrs,
+};
+
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ char str[NAME_MAX];
+ int err;
+ struct rtrs_addr path = {
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
+ err = kobject_init_and_add(&clt_path->kobj, &ktype_sess,
+ clt->kobj_paths,
+ "%s", str);
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&clt_path->kobj);
+ return err;
+ }
+ err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats,
+ &clt_path->kobj, "stats");
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&clt_path->stats->kobj_stats);
+ goto remove_group;
+ }
+
+ err = sysfs_create_group(&clt_path->stats->kobj_stats,
+ &rtrs_clt_stats_attr_group);
+ if (err) {
+ pr_err("failed to create stats sysfs group, err: %d\n", err);
+ goto put_kobj_stats;
+ }
+
+ return 0;
+
+put_kobj_stats:
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
+remove_group:
+ sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
+put_kobj:
+ kobject_del(&clt_path->kobj);
+ kobject_put(&clt_path->kobj);
+
+ return err;
+}
+
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
+ const struct attribute *sysfs_self)
+{
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
+ if (sysfs_self)
+ sysfs_remove_file_self(&clt_path->kobj, sysfs_self);
+ kobject_del(&clt_path->kobj);
+}
+
+static struct attribute *rtrs_clt_attrs[] = {
+ &dev_attr_max_reconnect_attempts.attr,
+ &dev_attr_mpath_policy.attr,
+ &dev_attr_add_path.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_clt_attr_group = {
+ .attrs = rtrs_clt_attrs,
+};
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt)
+{
+ return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
+
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt)
+{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+
+ if (clt->kobj_paths) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ }
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
new file mode 100644
index 000000000000..f14fa1f36ce8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-clt.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-clt-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
new file mode 100644
index 000000000000..7738e2676855
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_clt
+
+#if !defined(_TRACE_RTRS_CLT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_CLT_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_clt_path;
+struct rtrs_clt_sess;
+
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING_ERR);
+TRACE_DEFINE_ENUM(RTRS_CLT_RECONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSED);
+TRACE_DEFINE_ENUM(RTRS_CLT_DEAD);
+
+#define show_rtrs_clt_state(x) \
+ __print_symbolic(x, \
+ { RTRS_CLT_CONNECTING, "CONNECTING" }, \
+ { RTRS_CLT_CONNECTING_ERR, "CONNECTING_ERR" }, \
+ { RTRS_CLT_RECONNECTING, "RECONNECTING" }, \
+ { RTRS_CLT_CONNECTED, "CONNECTED" }, \
+ { RTRS_CLT_CLOSING, "CLOSING" }, \
+ { RTRS_CLT_CLOSED, "CLOSED" }, \
+ { RTRS_CLT_DEAD, "DEAD" })
+
+DECLARE_EVENT_CLASS(rtrs_clt_conn_class,
+ TP_PROTO(struct rtrs_clt_path *clt_path),
+
+ TP_ARGS(clt_path),
+
+ TP_STRUCT__entry(
+ __field(int, state)
+ __field(int, reconnect_attempts)
+ __field(int, max_reconnect_attempts)
+ __field(int, fail_cnt)
+ __field(int, success_cnt)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ __entry->state = clt_path->state;
+ __entry->reconnect_attempts = clt_path->reconnect_attempts;
+ __entry->max_reconnect_attempts = clt->max_reconnect_attempts;
+ __entry->fail_cnt = clt_path->stats->reconnects.fail_cnt;
+ __entry->success_cnt = clt_path->stats->reconnects.successful_cnt;
+ memcpy(__entry->sessname, kobject_name(&clt_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("RTRS-CLT: sess='%s' state=%s attempts='%d' max-attempts='%d' fail='%d' success='%d'",
+ __entry->sessname,
+ show_rtrs_clt_state(__entry->state),
+ __entry->reconnect_attempts,
+ __entry->max_reconnect_attempts,
+ __entry->fail_cnt,
+ __entry->success_cnt
+ )
+);
+
+#define DEFINE_CLT_CONN_EVENT(name) \
+DEFINE_EVENT(rtrs_clt_conn_class, rtrs_##name, \
+ TP_PROTO(struct rtrs_clt_path *clt_path), \
+ TP_ARGS(clt_path))
+
+DEFINE_CLT_CONN_EVENT(clt_reconnect_work);
+DEFINE_CLT_CONN_EVENT(clt_close_conns);
+DEFINE_CLT_CONN_EVENT(rdma_error_recovery);
+
+#endif /* _TRACE_RTRS_CLT_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-clt-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
new file mode 100644
index 000000000000..71387811b281
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -0,0 +1,3207 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/random.h>
+
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+#include "rtrs-clt-trace.h"
+
+#define RTRS_CONNECT_TIMEOUT_MS 30000
+/*
+ * Wait a bit before trying to reconnect after a failure
+ * in order to give server time to finish clean up which
+ * leads to "false positives" failed reconnect attempts
+ */
+#define RTRS_RECONNECT_BACKOFF 1000
+/*
+ * Wait for additional random time between 0 and 8 seconds
+ * before starting to reconnect to avoid clients reconnecting
+ * all at once in case of a major network outage
+ */
+#define RTRS_RECONNECT_SEED 8
+
+#define FIRST_CONN 0x01
+/* limit to 128 * 4k = 512k max IO */
+#define RTRS_MAX_SEGMENTS 128
+
+MODULE_DESCRIPTION("RDMA Transport Client");
+MODULE_LICENSE("GPL");
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
+
+static struct workqueue_struct *rtrs_wq;
+static const struct class rtrs_clt_dev_class = {
+ .name = "rtrs-client",
+};
+
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
+{
+ struct rtrs_clt_path *clt_path;
+ bool connected = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
+ connected = true;
+ break;
+ }
+ rcu_read_unlock();
+
+ return connected;
+}
+
+static struct rtrs_permit *
+__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
+{
+ size_t max_depth = clt->queue_depth;
+ struct rtrs_permit *permit;
+ int bit;
+
+ /*
+ * Adapted from null_blk get_tag(). Callers from different cpus may
+ * grab the same bit, since find_first_zero_bit is not atomic.
+ * But then the test_and_set_bit_lock will fail for all the
+ * callers but one, so that they will loop again.
+ * This way an explicit spinlock is not required.
+ */
+ do {
+ bit = find_first_zero_bit(clt->permits_map, max_depth);
+ if (bit >= max_depth)
+ return NULL;
+ } while (test_and_set_bit_lock(bit, clt->permits_map));
+
+ permit = get_permit(clt, bit);
+ WARN_ON(permit->mem_id != bit);
+ permit->cpu_id = raw_smp_processor_id();
+ permit->con_type = con_type;
+
+ return permit;
+}
+
+static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
+ struct rtrs_permit *permit)
+{
+ clear_bit_unlock(permit->mem_id, clt->permits_map);
+}
+
+/**
+ * rtrs_clt_get_permit() - allocates permit for future RDMA operation
+ * @clt: Current session
+ * @con_type: Type of connection to use with the permit
+ * @can_wait: Wait type
+ *
+ * Description:
+ * Allocates permit for the following RDMA operation. Permit is used
+ * to preallocate all resources and to propagate memory pressure
+ * up earlier.
+ *
+ * Context:
+ * Can sleep if @wait == RTRS_PERMIT_WAIT
+ */
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
+ enum rtrs_clt_con_type con_type,
+ enum wait_type can_wait)
+{
+ struct rtrs_permit *permit;
+ DEFINE_WAIT(wait);
+
+ permit = __rtrs_get_permit(clt, con_type);
+ if (permit || !can_wait)
+ return permit;
+
+ do {
+ prepare_to_wait(&clt->permits_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ permit = __rtrs_get_permit(clt, con_type);
+ if (permit)
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&clt->permits_wait, &wait);
+
+ return permit;
+}
+EXPORT_SYMBOL(rtrs_clt_get_permit);
+
+/**
+ * rtrs_clt_put_permit() - puts allocated permit
+ * @clt: Current session
+ * @permit: Permit to be freed
+ *
+ * Context:
+ * Does not matter
+ */
+void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
+ struct rtrs_permit *permit)
+{
+ if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
+ return;
+
+ __rtrs_put_permit(clt, permit);
+
+ /*
+ * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
+ * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
+ * it must have added itself to &clt->permits_wait before
+ * __rtrs_put_permit() finished.
+ * Hence it is safe to guard wake_up() with a waitqueue_active() test.
+ */
+ if (waitqueue_active(&clt->permits_wait))
+ wake_up(&clt->permits_wait);
+}
+EXPORT_SYMBOL(rtrs_clt_put_permit);
+
+/**
+ * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
+ * @clt_path: client path pointer
+ * @permit: permit for the allocation of the RDMA buffer
+ * Note:
+ * IO connection starts from 1.
+ * 0 connection is for user messages.
+ */
+static
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
+ struct rtrs_permit *permit)
+{
+ int id = 0;
+
+ if (permit->con_type == RTRS_IO_CON)
+ id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
+
+ return to_clt_con(clt_path->s.con[id]);
+}
+
+/**
+ * rtrs_clt_change_state() - change the session state through session state
+ * machine.
+ *
+ * @clt_path: client path to change the state of.
+ * @new_state: state to change to.
+ *
+ * returns true if sess's state is changed to new state, otherwise return false.
+ *
+ * Locks:
+ * state_wq lock must be hold.
+ */
+static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&clt_path->state_wq.lock);
+
+ old_state = clt_path->state;
+ switch (new_state) {
+ case RTRS_CLT_CONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_RECONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_RECONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTED:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTED:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTING_ERR:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_RECONNECTING:
+ case RTRS_CLT_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSED:
+ switch (old_state) {
+ case RTRS_CLT_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_DEAD:
+ switch (old_state) {
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed) {
+ clt_path->state = new_state;
+ wake_up_locked(&clt_path->state_wq);
+ }
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
+ enum rtrs_clt_state old_state,
+ enum rtrs_clt_state new_state)
+{
+ bool changed = false;
+
+ spin_lock_irq(&clt_path->state_wq.lock);
+ if (clt_path->state == old_state)
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
+static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ trace_rtrs_rdma_error_recovery(clt_path);
+
+ if (rtrs_clt_change_state_from_to(clt_path,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_RECONNECTING)) {
+ queue_work(rtrs_wq, &clt_path->err_recovery_work);
+ } else {
+ /*
+ * Error can happen just on establishing new connection,
+ * so notify waiter with error state, waiter is responsible
+ * for cleaning the rest and reconnect if needed.
+ */
+ rtrs_clt_change_state_from_to(clt_path,
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR);
+ }
+}
+
+static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static struct ib_cqe fast_reg_cqe = {
+ .done = rtrs_clt_fast_reg_done
+};
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait);
+
+static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_io_req *req =
+ container_of(wc->wr_cqe, typeof(*req), inv_cqe);
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ req->mr->need_inval = false;
+ if (req->need_inv_comp)
+ complete(&req->inv_comp);
+ else
+ /* Complete request from INV callback */
+ complete_rdma_req(req, req->inv_errno, true, false);
+}
+
+static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_path *clt_path;
+ int err;
+
+ if (!req->in_use)
+ return;
+ if (WARN_ON(!req->con))
+ return;
+ clt_path = to_clt_path(con->c.path);
+
+ if (req->sg_cnt) {
+ if (req->mr->need_inval) {
+ /*
+ * We are here to invalidate read/write requests
+ * ourselves. In normal scenario server should
+ * send INV for all read requests, we do local
+ * invalidate for write requests ourselves, but
+ * we are here, thus three things could happen:
+ *
+ * 1. this is failover, when errno != 0
+ * and can_wait == 1,
+ *
+ * 2. something totally bad happened and
+ * server forgot to send INV, so we
+ * should do that ourselves.
+ *
+ * 3. write request finishes, we need to do local
+ * invalidate
+ */
+
+ if (can_wait) {
+ req->need_inv_comp = true;
+ } else {
+ /* This should be IO path, so always notify */
+ WARN_ON(!notify);
+ /* Save errno for INV callback */
+ req->inv_errno = errno;
+ }
+
+ refcount_inc(&req->ref);
+ err = rtrs_inv_rkey(req);
+ if (err) {
+ rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %d\n",
+ req->mr->rkey, err);
+ } else if (can_wait) {
+ wait_for_completion(&req->inv_comp);
+ }
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ }
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&clt_path->stats->inflight);
+
+ req->in_use = false;
+ req->con = NULL;
+
+ if (errno) {
+ rtrs_err_rl(con->c.path,
+ "IO %s request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ req->dir == DMA_TO_DEVICE ? "write" : "read", errno,
+ kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port, notify);
+ }
+
+ if (notify)
+ req->conf(req->priv, errno);
+}
+
+static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, u32 off,
+ u32 imm, struct ib_send_wr *wr)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ enum ib_send_flags flags;
+ struct ib_sge sge;
+
+ if (!req->sg_size) {
+ rtrs_wrn(con->c.path,
+ "Doing RDMA Write failed, no data supplied\n");
+ return -EINVAL;
+ }
+
+ /* user data and user message in the first list element */
+ sge.addr = req->iu->dma_addr;
+ sge.length = req->sg_size;
+ sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
+ req->sg_size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
+ rbuf->rkey, rbuf->addr + off,
+ imm, flags, wr, NULL);
+}
+
+static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
+ s16 errno, bool w_inval)
+{
+ struct rtrs_clt_io_req *req;
+
+ if (WARN_ON(msg_id >= clt_path->queue_depth))
+ return;
+
+ req = &clt_path->reqs[msg_id];
+ /* Drop need_inv if server responded with send with invalidation */
+ req->mr->need_inval &= !w_inval;
+ complete_rdma_req(req, errno, true, false);
+}
+
+static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_iu *iu;
+ int err;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu,
+ cqe);
+ err = rtrs_iu_post_recv(&con->c, iu);
+ if (err) {
+ rtrs_err(con->c.path, "post iu failed %d\n", err);
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_msg_rkey_rsp *msg;
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ struct rtrs_iu *iu;
+ u32 buf_id;
+ int err;
+
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+
+ if (wc->byte_len < sizeof(*msg)) {
+ rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
+ rtrs_err(clt_path->clt,
+ "rkey response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ buf_id = le16_to_cpu(msg->buf_id);
+ if (WARN_ON(buf_id >= clt_path->queue_depth))
+ goto out;
+
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
+ if (imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ if (WARN_ON(buf_id != msg_id))
+ goto out;
+ clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
+ }
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ return rtrs_clt_recv_done(con, wc);
+out:
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_clt_rdma_done
+};
+
+/*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr_arr[2], *wr;
+ int i;
+
+ memset(wr_arr, 0, sizeof(wr_arr));
+ for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
+ wr = &wr_arr[i];
+ wr->wr_cqe = cqe;
+ if (i)
+ /* Chain backwards */
+ wr->next = &wr_arr[i - 1];
+ }
+
+ return ib_post_recv(con->qp, wr, NULL);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ int err;
+
+ if (wc->status != IB_WC_SUCCESS) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(clt_path->clt, "RDMA failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ return;
+ }
+ rtrs_clt_update_wc_stats(con);
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
+ return;
+ clt_path->s.hb_missed_cnt = 0;
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ process_io_rsp(clt_path, msg_id, err, w_inval);
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&clt_path->s);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ clt_path->s.hb_cur_latency =
+ ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else {
+ rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
+ imm_type);
+ }
+ if (w_inval)
+ /*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+ err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
+ else
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (err) {
+ rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
+ err);
+ rtrs_rdma_error_recovery(con);
+ }
+ break;
+ case IB_WC_RECV:
+ /*
+ * Key invalidations from server side
+ */
+ clt_path->s.hb_missed_cnt = 0;
+ WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ wc->wc_flags & IB_WC_WITH_IMM));
+ WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
+ return rtrs_clt_recv_done(con, wc);
+
+ return rtrs_clt_rkey_rsp_done(con, wc);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb.
+ */
+ break;
+
+ default:
+ rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
+{
+ int err, i;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ for (i = 0; i < q_size; i++) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
+ struct rtrs_iu *iu = &con->rsp_ius[i];
+
+ err = rtrs_iu_post_recv(&con->c, iu);
+ } else {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ }
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_path(struct rtrs_clt_path *clt_path)
+{
+ size_t q_size = 0;
+ int err, cid;
+
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = clt_path->queue_depth;
+
+ /*
+ * x2 for RDMA read responses + FR key invalidations,
+ * RDMA writes do not require any FR registrations.
+ */
+ q_size *= 2;
+
+ err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
+ if (err) {
+ rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct path_it {
+ int i;
+ struct list_head skip_list;
+ struct rtrs_clt_sess *clt;
+ struct rtrs_clt_path *(*next_path)(struct path_it *it);
+};
+
+/*
+ * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
+ * @head: the head for the list.
+ * @clt_path: The element to take the next clt_path from.
+ *
+ * Next clt path returned in round-robin fashion, i.e. head will be skipped,
+ * but if list is observed as empty, NULL will be returned.
+ *
+ * This function may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+static inline struct rtrs_clt_path *
+rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
+{
+ return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
+ list_next_or_null_rcu(head,
+ READ_ONCE((&clt_path->s.entry)->next),
+ typeof(*clt_path), s.entry);
+}
+
+/**
+ * get_next_path_rr() - Returns path in round-robin fashion.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_RR
+ *
+ * Locks:
+ * rcu_read_lock() must be held.
+ */
+static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
+{
+ struct rtrs_clt_path __rcu **ppcpu_path;
+ struct rtrs_clt_path *path;
+ struct rtrs_clt_sess *clt;
+
+ /*
+ * Assert that rcu lock must be held
+ */
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu read lock held");
+
+ clt = it->clt;
+
+ /*
+ * Here we use two RCU objects: @paths_list and @pcpu_path
+ * pointer. See rtrs_clt_remove_path_from_arr() for details
+ * how that is handled.
+ */
+
+ ppcpu_path = this_cpu_ptr(clt->pcpu_path);
+ path = rcu_dereference(*ppcpu_path);
+ if (!path)
+ path = list_first_or_null_rcu(&clt->paths_list,
+ typeof(*path), s.entry);
+ else
+ path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
+
+ rcu_assign_pointer(*ppcpu_path, path);
+
+ return path;
+}
+
+/**
+ * get_next_path_min_inflight() - Returns path with minimal inflight count.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_MIN_INFLIGHT
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
+{
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
+ int min_inflight = INT_MAX;
+ int inflight;
+
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
+ continue;
+
+ inflight = atomic_read(&clt_path->stats->inflight);
+
+ if (inflight < min_inflight) {
+ min_inflight = inflight;
+ min_path = clt_path;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+/**
+ * get_next_path_min_latency() - Returns path with minimal latency.
+ * @it: the path pointer
+ *
+ * Return: a path with the lowest latency or NULL if all paths are tried
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ *
+ * Related to @MP_POLICY_MIN_LATENCY
+ *
+ * This DOES skip an already-tried path.
+ * There is a skip-list to skip a path if the path has tried but failed.
+ * It will try the minimum latency path and then the second minimum latency
+ * path and so on. Finally it will return NULL if all paths are tried.
+ * Therefore the caller MUST check the returned
+ * path is NULL and trigger the IO error.
+ */
+static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
+{
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
+ ktime_t min_latency = KTIME_MAX;
+ ktime_t latency;
+
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
+ continue;
+
+ latency = clt_path->s.hb_cur_latency;
+
+ if (latency < min_latency) {
+ min_latency = latency;
+ min_path = clt_path;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
+{
+ INIT_LIST_HEAD(&it->skip_list);
+ it->clt = clt;
+ it->i = 0;
+
+ if (clt->mp_policy == MP_POLICY_RR)
+ it->next_path = get_next_path_rr;
+ else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ it->next_path = get_next_path_min_inflight;
+ else
+ it->next_path = get_next_path_min_latency;
+}
+
+static inline void path_it_deinit(struct path_it *it)
+{
+ struct list_head *skip, *tmp;
+ /*
+ * The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies.
+ * We need to remove paths from it, so that next IO can insert
+ * paths (->mp_skip_entry) into a skip_list again.
+ */
+ list_for_each_safe(skip, tmp, &it->skip_list)
+ list_del_init(skip);
+}
+
+/**
+ * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
+ * about an inflight IO.
+ * The user buffer holding user control message (not data) is copied into
+ * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
+ * also hold the control message of rtrs.
+ * @req: an io request holding information about IO.
+ * @clt_path: client path
+ * @conf: conformation callback function to notify upper layer.
+ * @permit: permit for allocation of RDMA remote buffer
+ * @priv: private pointer
+ * @vec: kernel vector containing control message
+ * @usr_len: length of the user message
+ * @sg: scater list for IO data
+ * @sg_cnt: number of scater list entries
+ * @data_len: length of the IO data
+ * @dir: direction of the IO.
+ */
+static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
+ struct rtrs_clt_path *clt_path,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct iov_iter iter;
+ size_t len;
+
+ req->permit = permit;
+ req->in_use = true;
+ req->usr_len = usr_len;
+ req->data_len = data_len;
+ req->sglist = sg;
+ req->sg_cnt = sg_cnt;
+ req->priv = priv;
+ req->dir = dir;
+ req->con = rtrs_permit_to_clt_con(clt_path, permit);
+ req->conf = conf;
+ req->mr->need_inval = false;
+ req->need_inv_comp = false;
+ req->inv_errno = 0;
+ refcount_set(&req->ref, 1);
+ req->mp_policy = clt_path->clt->mp_policy;
+
+ iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
+ len = _copy_from_iter(req->iu->buf, usr_len, &iter);
+ WARN_ON(len != usr_len);
+
+ reinit_completion(&req->inv_comp);
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct rtrs_clt_io_req *req;
+
+ req = &clt_path->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
+ sg, sg_cnt, data_len, dir);
+ return req;
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_io_req *req;
+ struct kvec vec = {
+ .iov_base = fail_req->iu->buf,
+ .iov_len = fail_req->usr_len
+ };
+
+ req = &alive_path->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
+ fail_req->priv, &vec, fail_req->usr_len,
+ fail_req->sglist, fail_req->sg_cnt,
+ fail_req->data_len, fail_req->dir);
+ return req;
+}
+
+static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, bool fr_en,
+ u32 count, u32 size, u32 imm,
+ struct ib_send_wr *wr,
+ struct ib_send_wr *tail)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct ib_sge *sge = req->sge;
+ enum ib_send_flags flags;
+ struct scatterlist *sg;
+ size_t num_sge;
+ int i;
+ struct ib_send_wr *ptail = NULL;
+
+ if (fr_en) {
+ i = 0;
+ sge[i].addr = req->mr->iova;
+ sge[i].length = req->mr->length;
+ sge[i].lkey = req->mr->lkey;
+ i++;
+ num_sge = 2;
+ ptail = tail;
+ } else {
+ for_each_sg(req->sglist, sg, count, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
+ }
+ num_sge = 1 + count;
+ }
+ sge[i].addr = req->iu->dma_addr;
+ sge[i].length = size;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
+ size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
+ rbuf->rkey, rbuf->addr, imm,
+ flags, wr, ptail);
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr != count)
+ return nr < 0 ? nr : -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
+}
+
+static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ struct rtrs_msg_rdma_write *msg;
+
+ struct rtrs_rbuf *rbuf;
+ int ret, count = 0;
+ u32 imm, buf_id;
+ struct ib_reg_wr rwr;
+ struct ib_send_wr *wr = NULL;
+ bool fr_en = false;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (tsize > clt_path->chunk_size) {
+ rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
+ tsize, clt_path->chunk_size);
+ return -EMSGSIZE;
+ }
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ if (!count) {
+ rtrs_wrn(s, "Write request failed, map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put rtrs msg after sg and user message */
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_WRITE);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ /* rtrs message on server side will be after user data and message */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+ req->sg_size = tsize;
+ rbuf = &clt_path->rbufs[buf_id];
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Write request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ return ret;
+ }
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE),
+ };
+ wr = &rwr.wr;
+ fr_en = true;
+ req->mr->need_inval = true;
+ }
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, WRITE);
+
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
+ req->usr_len + sizeof(*msg),
+ imm, wr, NULL);
+ if (ret) {
+ rtrs_err_rl(s,
+ "Write request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&clt_path->stats->inflight);
+ if (req->mr->need_inval) {
+ req->mr->need_inval = false;
+ refcount_dec(&req->ref);
+ }
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ struct rtrs_msg_rdma_read *msg;
+ struct rtrs_ib_dev *dev = clt_path->s.dev;
+
+ struct ib_reg_wr rwr;
+ struct ib_send_wr *wr = NULL;
+
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (tsize > clt_path->chunk_size) {
+ rtrs_wrn(s,
+ "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
+ tsize, clt_path->chunk_size);
+ return -EMSGSIZE;
+ }
+
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ if (!count) {
+ rtrs_wrn(s,
+ "Read request failed, dma map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put our message into req->buf after user message*/
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_READ);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Read request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ return ret;
+ }
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ };
+ wr = &rwr.wr;
+
+ msg->sg_cnt = cpu_to_le16(1);
+ msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
+
+ msg->desc[0].addr = cpu_to_le64(req->mr->iova);
+ msg->desc[0].key = cpu_to_le32(req->mr->rkey);
+ msg->desc[0].len = cpu_to_le32(req->mr->length);
+
+ /* Further invalidation is required */
+ req->mr->need_inval = !!RTRS_MSG_NEED_INVAL_F;
+
+ } else {
+ msg->sg_cnt = 0;
+ msg->flags = 0;
+ }
+ /*
+ * rtrs message will be after the space reserved for disk data and
+ * user message
+ */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+
+ req->sg_size = sizeof(*msg);
+ req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
+ req->sg_size += req->usr_len;
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, READ);
+
+ ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
+ req->data_len, imm, wr);
+ if (ret) {
+ rtrs_err_rl(s,
+ "Read request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&clt_path->stats->inflight);
+ req->mr->need_inval = false;
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+/**
+ * rtrs_clt_failover_req() - Try to find an active path for a failed request
+ * @clt: clt context
+ * @fail_req: a failed io request.
+ */
+static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_path *alive_path;
+ struct rtrs_clt_io_req *req;
+ int err = -ECONNABORTED;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
+ it.i++) {
+ if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+ req = rtrs_clt_get_copy_req(alive_path, fail_req);
+ if (req->dir == DMA_TO_DEVICE)
+ err = rtrs_clt_write_req(req);
+ else
+ err = rtrs_clt_read_req(req);
+ if (err) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ rtrs_clt_inc_failover_cnt(alive_path->stats);
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_clt_io_req *req;
+ int i, err;
+
+ if (!clt_path->reqs)
+ return;
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ if (!req->in_use)
+ continue;
+
+ /*
+ * Safely (without notification) complete failed request.
+ * After completion this request is still useble and can
+ * be failovered to another path.
+ */
+ complete_rdma_req(req, -ECONNABORTED, false, true);
+
+ err = rtrs_clt_failover_req(clt, req);
+ if (err)
+ /* Failover failed, notify anyway */
+ req->conf(req->priv, err);
+ }
+}
+
+static void free_path_reqs(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_io_req *req;
+ int i;
+
+ if (!clt_path->reqs)
+ return;
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+ kfree(req->sge);
+ rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
+ }
+ kfree(clt_path->reqs);
+ clt_path->reqs = NULL;
+}
+
+static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_io_req *req;
+ int i, err = -ENOMEM;
+
+ clt_path->reqs = kcalloc(clt_path->queue_depth,
+ sizeof(*clt_path->reqs),
+ GFP_KERNEL);
+ if (!clt_path->reqs)
+ return -ENOMEM;
+
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
+ DMA_TO_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!req->iu)
+ goto out;
+
+ req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
+ if (!req->sge)
+ goto out;
+
+ req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
+ IB_MR_TYPE_MEM_REG,
+ clt_path->max_pages_per_mr);
+ if (IS_ERR(req->mr)) {
+ err = PTR_ERR(req->mr);
+ pr_err("Failed to alloc clt_path->max_pages_per_mr %d: %pe\n",
+ clt_path->max_pages_per_mr, req->mr);
+ req->mr = NULL;
+ goto out;
+ }
+
+ init_completion(&req->inv_comp);
+ }
+
+ return 0;
+
+out:
+ free_path_reqs(clt_path);
+
+ return err;
+}
+
+static int alloc_permits(struct rtrs_clt_sess *clt)
+{
+ unsigned int chunk_bits;
+ int err, i;
+
+ clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
+ if (!clt->permits_map) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
+ if (!clt->permits) {
+ err = -ENOMEM;
+ goto err_map;
+ }
+ chunk_bits = ilog2(clt->queue_depth - 1) + 1;
+ for (i = 0; i < clt->queue_depth; i++) {
+ struct rtrs_permit *permit;
+
+ permit = get_permit(clt, i);
+ permit->mem_id = i;
+ permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
+ }
+
+ return 0;
+
+err_map:
+ bitmap_free(clt->permits_map);
+ clt->permits_map = NULL;
+out_err:
+ return err;
+}
+
+static void free_permits(struct rtrs_clt_sess *clt)
+{
+ if (clt->permits_map)
+ wait_event(clt->permits_wait,
+ bitmap_empty(clt->permits_map, clt->queue_depth));
+
+ bitmap_free(clt->permits_map);
+ clt->permits_map = NULL;
+ kfree(clt->permits);
+ clt->permits = NULL;
+}
+
+static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
+{
+ struct ib_device *ib_dev;
+ u64 max_pages_per_mr;
+ int mr_page_shift;
+
+ ib_dev = clt_path->s.dev->ib_dev;
+
+ /*
+ * Use the smallest page size supported by the HCA, down to a
+ * minimum of 4096 bytes. We're unlikely to build large sglists
+ * out of smaller entries.
+ */
+ mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
+ max_pages_per_mr = ib_dev->attrs.max_mr_size;
+ do_div(max_pages_per_mr, (1ull << mr_page_shift));
+ clt_path->max_pages_per_mr =
+ min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
+ ib_dev->attrs.max_fast_reg_page_list_len);
+ clt_path->clt->max_segments =
+ min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
+}
+
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
+ enum rtrs_clt_state new_state,
+ enum rtrs_clt_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&clt_path->state_wq.lock);
+ if (old_state)
+ *old_state = clt_path->state;
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
+{
+ struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj));
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
+{
+ rtrs_init_hb(&clt_path->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_clt_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work);
+static void rtrs_clt_close_work(struct work_struct *work);
+
+static void rtrs_clt_err_recovery_work(struct work_struct *work)
+{
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
+ int delay_ms;
+
+ clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
+ clt = clt_path->clt;
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
+ msecs_to_jiffies(delay_ms +
+ get_random_u32_below(RTRS_RECONNECT_SEED)));
+}
+
+static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
+ const struct rtrs_addr *path,
+ size_t con_num, u32 nr_poll_queues)
+{
+ struct rtrs_clt_path *clt_path;
+ int err = -ENOMEM;
+ int cpu;
+ size_t total_con;
+
+ clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
+ if (!clt_path)
+ goto err;
+
+ /*
+ * irqmode and poll
+ * +1: Extra connection for user messages
+ */
+ total_con = con_num + nr_poll_queues + 1;
+ clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
+ GFP_KERNEL);
+ if (!clt_path->s.con)
+ goto err_free_path;
+
+ clt_path->s.con_num = total_con;
+ clt_path->s.irq_con_num = con_num + 1;
+
+ clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
+ if (!clt_path->stats)
+ goto err_free_con;
+
+ mutex_init(&clt_path->init_mutex);
+ uuid_gen(&clt_path->s.uuid);
+ memcpy(&clt_path->s.dst_addr, path->dst,
+ rdma_addr_size((struct sockaddr *)path->dst));
+
+ /*
+ * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
+ * checks the sa_family to be non-zero. If user passed src_addr=NULL
+ * the sess->src_addr will contain only zeros, which is then fine.
+ */
+ if (path->src)
+ memcpy(&clt_path->s.src_addr, path->src,
+ rdma_addr_size((struct sockaddr *)path->src));
+ strscpy(clt_path->s.sessname, clt->sessname,
+ sizeof(clt_path->s.sessname));
+ clt_path->clt = clt;
+ clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
+ init_waitqueue_head(&clt_path->state_wq);
+ clt_path->state = RTRS_CLT_CONNECTING;
+ atomic_set(&clt_path->connected_cnt, 0);
+ INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
+ INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
+ INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(clt_path);
+
+ clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
+ if (!clt_path->mp_skip_entry)
+ goto err_free_stats;
+
+ for_each_possible_cpu(cpu)
+ INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
+
+ err = rtrs_clt_init_stats(clt_path->stats);
+ if (err)
+ goto err_free_percpu;
+
+ return clt_path;
+
+err_free_percpu:
+ free_percpu(clt_path->mp_skip_entry);
+err_free_stats:
+ kfree(clt_path->stats);
+err_free_con:
+ kfree(clt_path->s.con);
+err_free_path:
+ kfree(clt_path);
+err:
+ return ERR_PTR(err);
+}
+
+void free_path(struct rtrs_clt_path *clt_path)
+{
+ free_percpu(clt_path->mp_skip_entry);
+ mutex_destroy(&clt_path->init_mutex);
+ kfree(clt_path->s.con);
+ kfree(clt_path->rbufs);
+ kfree(clt_path);
+}
+
+static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
+{
+ struct rtrs_clt_con *con;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ /* Map first two connections to the first CPU */
+ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
+ con->c.cid = cid;
+ con->c.path = &clt_path->s;
+ /* Align with srv, init as 1 */
+ atomic_set(&con->c.wr_cnt, 1);
+ mutex_init(&con->con_mutex);
+
+ clt_path->s.con[cid] = &con->c;
+
+ return 0;
+}
+
+static void destroy_con(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ clt_path->s.con[con->c.cid] = NULL;
+ mutex_destroy(&con->con_mutex);
+ kfree(con);
+}
+
+static int create_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
+ int err, cq_vector;
+ struct rtrs_msg_rkey_rsp *rsp;
+
+ lockdep_assert_held(&con->con_mutex);
+ if (con->c.cid == 0) {
+ max_send_sge = 1;
+ /* We must be the first here */
+ if (WARN_ON(clt_path->s.dev))
+ return -EINVAL;
+
+ /*
+ * The whole session uses device from user connection.
+ * Be careful not to close user connection before ib dev
+ * is gracefully put.
+ */
+ clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ &dev_pd);
+ if (!clt_path->s.dev) {
+ rtrs_wrn(clt_path->clt,
+ "rtrs_ib_dev_find_get_or_add(): no memory\n");
+ return -ENOMEM;
+ }
+ clt_path->s.dev_ref = 1;
+ query_fast_reg_mode(clt_path);
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
+ /*
+ * Two (request + registration) completion for send
+ * Two for recv if always_invalidate is set on server
+ * or one for recv.
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state.
+ */
+ max_send_wr =
+ min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+ max_recv_wr = max_send_wr;
+ } else {
+ /*
+ * Here we assume that session members are correctly set.
+ * This is always true if user connection (cid == 0) is
+ * established first.
+ */
+ if (WARN_ON(!clt_path->s.dev))
+ return -EINVAL;
+ if (WARN_ON(!clt_path->queue_depth))
+ return -EINVAL;
+
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
+ /* Shared between connections */
+ clt_path->s.dev_ref++;
+ max_send_wr = min_t(int, wr_limit,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ clt_path->queue_depth * 4 + 1);
+ max_recv_wr = min_t(int, wr_limit,
+ clt_path->queue_depth * 3 + 1);
+ max_send_sge = 2;
+ }
+ atomic_set(&con->c.sq_wr_avail, max_send_wr);
+ cq_num = max_send_wr + max_recv_wr;
+ /* alloc iu to recv new rkey reply when server reports flags set */
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
+ GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
+ DMA_FROM_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!con->rsp_ius)
+ return -ENOMEM;
+ con->queue_num = cq_num;
+ }
+ cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
+ if (con->c.cid >= clt_path->s.irq_con_num)
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
+ max_recv_wr, IB_POLL_DIRECT);
+ else
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
+ max_recv_wr, IB_POLL_SOFTIRQ);
+ /*
+ * In case of error we do not bother to clean previous allocations,
+ * since destroy_con_cq_qp() must be called.
+ */
+ return err;
+}
+
+static void destroy_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ /*
+ * Be careful here: destroy_con_cq_qp() can be called even
+ * create_con_cq_qp() failed, see comments there.
+ */
+ lockdep_assert_held(&con->con_mutex);
+ rtrs_cq_qp_destroy(&con->c);
+ if (con->rsp_ius) {
+ rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
+ con->queue_num);
+ con->rsp_ius = NULL;
+ con->queue_num = 0;
+ }
+ if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
+ rtrs_ib_dev_put(clt_path->s.dev);
+ clt_path->s.dev = NULL;
+ }
+}
+
+static void stop_cm(struct rtrs_clt_con *con)
+{
+ rdma_disconnect(con->c.cm_id);
+ if (con->c.qp)
+ ib_drain_qp(con->c.qp);
+}
+
+static void destroy_cm(struct rtrs_clt_con *con)
+{
+ rdma_destroy_id(con->c.cm_id);
+ con->c.cm_id = NULL;
+}
+
+static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_path *s = con->c.path;
+ int err;
+
+ mutex_lock(&con->con_mutex);
+ err = create_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
+ if (err) {
+ rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
+ if (err)
+ rtrs_err(s, "Resolving route failed, err: %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_msg_conn_req msg;
+ struct rdma_conn_param param;
+
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_req) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .cid = cpu_to_le16(con->c.cid),
+ .cid_num = cpu_to_le16(clt_path->s.con_num),
+ .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
+ };
+ msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
+ uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
+ uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+
+ err = rdma_connect_locked(con->c.cm_id, &param);
+ if (err)
+ rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ const struct rtrs_msg_conn_rsp *msg;
+ u16 version, queue_depth;
+ int errno;
+ u8 len;
+
+ msg = ev->param.conn.private_data;
+ len = ev->param.conn.private_data_len;
+ if (len < sizeof(*msg)) {
+ rtrs_err(clt, "Invalid RTRS connection response\n");
+ return -ECONNRESET;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ rtrs_err(clt, "Invalid RTRS magic\n");
+ return -ECONNRESET;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ return -ECONNRESET;
+ }
+ errno = le16_to_cpu(msg->errno);
+ if (errno) {
+ rtrs_err(clt, "Invalid RTRS message: errno %d\n",
+ errno);
+ return -ECONNRESET;
+ }
+ if (con->c.cid == 0) {
+ queue_depth = le16_to_cpu(msg->queue_depth);
+
+ if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
+ rtrs_err(clt, "Error: queue depth changed\n");
+
+ /*
+ * Stop any more reconnection attempts
+ */
+ clt_path->reconnect_attempts = -1;
+ rtrs_err(clt,
+ "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
+ return -ECONNRESET;
+ }
+
+ if (!clt_path->rbufs) {
+ clt_path->rbufs = kcalloc(queue_depth,
+ sizeof(*clt_path->rbufs),
+ GFP_KERNEL);
+ if (!clt_path->rbufs)
+ return -ENOMEM;
+ }
+ clt_path->queue_depth = queue_depth;
+ clt_path->s.signal_interval = min_not_zero(queue_depth,
+ (unsigned short) SERVICE_CON_QUEUE_DEPTH);
+ clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
+ clt_path->flags = le32_to_cpu(msg->flags);
+ clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
+
+ /*
+ * Global IO size is always a minimum.
+ * If while a reconnection server sends us a value a bit
+ * higher - client does not care and uses cached minimum.
+ *
+ * Since we can have several sessions (paths) restablishing
+ * connections in parallel, use lock.
+ */
+ mutex_lock(&clt->paths_mutex);
+ clt->queue_depth = clt_path->queue_depth;
+ clt->max_io_size = min_not_zero(clt_path->max_io_size,
+ clt->max_io_size);
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * Cache the hca_port and hca_name for sysfs
+ */
+ clt_path->hca_port = con->c.cm_id->port_num;
+ scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
+ clt_path->s.dev->ib_dev->name);
+ clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ /* set for_new_clt, to allow future reconnect on any path */
+ clt_path->for_new_clt = 1;
+ }
+
+ return 0;
+}
+
+static inline void flag_success_on_conn(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ atomic_inc(&clt_path->connected_cnt);
+ con->cm_err = 1;
+}
+
+static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_path *s = con->c.path;
+ const struct rtrs_msg_conn_rsp *msg;
+ const char *rej_msg;
+ int status, errno;
+ u8 data_len;
+
+ status = ev->status;
+ rej_msg = rdma_reject_msg(con->c.cm_id, status);
+ msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
+
+ if (msg && data_len >= sizeof(*msg)) {
+ errno = (int16_t)le16_to_cpu(msg->errno);
+ if (errno == -EBUSY)
+ rtrs_err(s,
+ "Previous session is still exists on the server, please reconnect later\n");
+ else
+ rtrs_err(s,
+ "Connect rejected: status %d (%s), rtrs errno %d\n",
+ status, rej_msg, errno);
+ } else {
+ rtrs_err(s,
+ "Connect rejected but with malformed message: status %d (%s)\n",
+ status, rej_msg);
+ }
+
+ return -ECONNRESET;
+}
+
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
+{
+ trace_rtrs_clt_close_conns(clt_path);
+
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
+ queue_work(rtrs_wq, &clt_path->close_work);
+ if (wait)
+ flush_work(&clt_path->close_work);
+}
+
+static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
+{
+ if (con->cm_err == 1) {
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = to_clt_path(con->c.path);
+ if (atomic_dec_and_test(&clt_path->connected_cnt))
+
+ wake_up(&clt_path->state_wq);
+ }
+ con->cm_err = cm_err;
+}
+
+static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_con *con = cm_id->context;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ int cm_err = 0;
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_err = rtrs_rdma_addr_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_err = rtrs_rdma_route_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ cm_err = rtrs_rdma_conn_established(con, ev);
+ if (!cm_err) {
+ /*
+ * Report success and wake up. Here we abuse state_wq,
+ * i.e. wake up without state change, but we set cm_err.
+ */
+ flag_success_on_conn(con);
+ wake_up(&clt_path->state_wq);
+ return 0;
+ }
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_err = rtrs_rdma_conn_rejected(con, ev);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ /* No message for disconnecting */
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ cm_err = -EHOSTUNREACH;
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /*
+ * Device removal is a special case. Queue close and return 0.
+ */
+ rtrs_wrn_rl(s, "CM event: %s, status: %d\n", rdma_event_msg(ev->event),
+ ev->status);
+ rtrs_clt_close_conns(clt_path, false);
+ return 0;
+ default:
+ rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ cm_err = -ECONNRESET;
+ break;
+ }
+
+ if (cm_err) {
+ /*
+ * cm error makes sense only on connection establishing,
+ * in other cases we rely on normal procedure of reconnecting.
+ */
+ flag_error_on_conn(con, cm_err);
+ rtrs_rdma_error_recovery(con);
+ }
+
+ return 0;
+}
+
+/* The caller should do the cleanup in case of error */
+static int create_cm(struct rtrs_clt_con *con)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ struct rdma_cm_id *cm_id;
+ int err;
+
+ cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
+ clt_path->s.dst_addr.ss_family == AF_IB ?
+ RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ rtrs_err(s, "Failed to create CM ID, err: %pe\n", cm_id);
+ return PTR_ERR(cm_id);
+ }
+ con->c.cm_id = cm_id;
+ con->cm_err = 0;
+ /* allow the port to be reused */
+ err = rdma_set_reuseaddr(cm_id, 1);
+ if (err != 0) {
+ rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+ (struct sockaddr *)&clt_path->s.dst_addr,
+ RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+ return err;
+ }
+ /*
+ * Combine connection status and session events. This is needed
+ * for waiting two possible cases: cm_err has something meaningful
+ * or session state was really changed to error by device removal.
+ */
+ err = wait_event_interruptible_timeout(
+ clt_path->state_wq,
+ con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+ if (err == 0 || err == -ERESTARTSYS) {
+ if (err == 0)
+ err = -ETIMEDOUT;
+ /* Timedout or interrupted */
+ return err;
+ }
+ if (con->cm_err < 0)
+ return con->cm_err;
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
+ /* Device removal */
+ return -ECONNABORTED;
+
+ return 0;
+}
+
+static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ int up;
+
+ /*
+ * We can fire RECONNECTED event only when all paths were
+ * connected on rtrs_clt_open(), then each was disconnected
+ * and the first one connected again. That's why this nasty
+ * game with counter value.
+ */
+
+ mutex_lock(&clt->paths_ev_mutex);
+ up = ++clt->paths_up;
+ /*
+ * Here it is safe to access paths num directly since up counter
+ * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
+ * in progress, thus paths removals are impossible.
+ */
+ if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
+ clt->paths_up = clt->paths_num;
+ else if (up == 1)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+
+ /* Mark session as established */
+ clt_path->established = true;
+ clt_path->reconnect_attempts = 0;
+ clt_path->stats->reconnects.successful_cnt++;
+}
+
+static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ if (!clt_path->established)
+ return;
+
+ clt_path->established = false;
+ mutex_lock(&clt->paths_ev_mutex);
+ WARN_ON(!clt->paths_up);
+ if (--clt->paths_up == 0)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_con *con;
+ unsigned int cid;
+
+ WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
+
+ /*
+ * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
+ * exactly in between. Start destroying after it finishes.
+ */
+ mutex_lock(&clt_path->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
+
+ /*
+ * All IO paths must observe !CONNECTED state before we
+ * free everything.
+ */
+ synchronize_rcu();
+
+ rtrs_stop_hb(&clt_path->s);
+
+ /*
+ * The order it utterly crucial: firstly disconnect and complete all
+ * rdma requests with error (thus set in_use=false for requests),
+ * then fail outstanding requests checking in_use for each, and
+ * eventually notify upper layer about session disconnection.
+ */
+
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
+ break;
+ con = to_clt_con(clt_path->s.con[cid]);
+ stop_cm(con);
+ }
+ fail_all_outstanding_reqs(clt_path);
+ free_path_reqs(clt_path);
+ rtrs_clt_path_down(clt_path);
+
+ /*
+ * Wait for graceful shutdown, namely when peer side invokes
+ * rdma_disconnect(). 'connected_cnt' is decremented only on
+ * CM events, thus if other side had crashed and hb has detected
+ * something is wrong, here we will stuck for exactly timeout ms,
+ * since CM does not fire anything. That is fine, we are not in
+ * hurry.
+ */
+ wait_event_timeout(clt_path->state_wq,
+ !atomic_read(&clt_path->connected_cnt),
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
+ break;
+ con = to_clt_con(clt_path->s.con[cid]);
+ mutex_lock(&con->con_mutex);
+ destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+}
+
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_clt_path *next;
+ bool wait_for_grace = false;
+ int cpu;
+
+ mutex_lock(&clt->paths_mutex);
+ list_del_rcu(&clt_path->s.entry);
+
+ /* Make sure everybody observes path removal. */
+ synchronize_rcu();
+
+ /*
+ * At this point nobody sees @sess in the list, but still we have
+ * dangling pointer @pcpu_path which _can_ point to @sess. Since
+ * nobody can observe @sess in the list, we guarantee that IO path
+ * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
+ * to @sess, but can never again become @sess.
+ */
+
+ /*
+ * Decrement paths number only after grace period, because
+ * caller of do_each_path() must firstly observe list without
+ * path and only then decremented paths number.
+ *
+ * Otherwise there can be the following situation:
+ * o Two paths exist and IO is coming.
+ * o One path is removed:
+ * CPU#0 CPU#1
+ * do_each_path(): rtrs_clt_remove_path_from_arr():
+ * path = get_next_path()
+ * ^^^ list_del_rcu(path)
+ * [!CONNECTED path] clt->paths_num--
+ * ^^^^^^^^^
+ * load clt->paths_num from 2 to 1
+ * ^^^^^^^^^
+ * sees 1
+ *
+ * path is observed as !CONNECTED, but do_each_path() loop
+ * ends, because expression i < clt->paths_num is false.
+ */
+ clt->paths_num--;
+
+ /*
+ * Get @next connection from current @sess which is going to be
+ * removed. If @sess is the last element, then @next is NULL.
+ */
+ rcu_read_lock();
+ next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
+ rcu_read_unlock();
+
+ /*
+ * @pcpu paths can still point to the path which is going to be
+ * removed, so change the pointer manually.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rtrs_clt_path __rcu **ppcpu_path;
+
+ ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
+ if (rcu_dereference_protected(*ppcpu_path,
+ lockdep_is_held(&clt->paths_mutex)) != clt_path)
+ /*
+ * synchronize_rcu() was called just after deleting
+ * entry from the list, thus IO code path cannot
+ * change pointer back to the pointer which is going
+ * to be removed, we are safe here.
+ */
+ continue;
+
+ /*
+ * We race with IO code path, which also changes pointer,
+ * thus we have to be careful not to overwrite it.
+ */
+ if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
+ next))
+ /*
+ * @ppcpu_path was successfully replaced with @next,
+ * that means that someone could also pick up the
+ * @sess and dereferencing it right now, so wait for
+ * a grace period is required.
+ */
+ wait_for_grace = true;
+ }
+ if (wait_for_grace)
+ synchronize_rcu();
+
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ mutex_lock(&clt->paths_mutex);
+ clt->paths_num++;
+
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_close_work(struct work_struct *work)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(work, struct rtrs_clt_path, close_work);
+
+ cancel_work_sync(&clt_path->err_recovery_work);
+ cancel_delayed_work_sync(&clt_path->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
+}
+
+static int init_conns(struct rtrs_clt_path *clt_path)
+{
+ unsigned int cid;
+ int err, i;
+
+ /*
+ * On every new session connections increase reconnect counter
+ * to avoid clashes with previous sessions not yet closed
+ * sessions on a server side.
+ */
+ clt_path->s.recon_cnt++;
+
+ /* Establish all RDMA connections */
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ err = create_con(clt_path, cid);
+ if (err)
+ goto destroy;
+
+ err = create_cm(to_clt_con(clt_path->s.con[cid]));
+ if (err)
+ goto destroy;
+ }
+
+ /*
+ * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds.
+ */
+ cid = clt_path->s.con_num - 1;
+
+ err = alloc_path_reqs(clt_path);
+ if (err)
+ goto destroy;
+
+ return 0;
+
+destroy:
+ /* Make sure we do the cleanup in the order they are created */
+ for (i = 0; i <= cid; i++) {
+ struct rtrs_clt_con *con;
+
+ if (!clt_path->s.con[i])
+ break;
+
+ con = to_clt_con(clt_path->s.con[i]);
+ if (con->c.cm_id) {
+ stop_cm(con);
+ mutex_lock(&con->con_mutex);
+ destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
+ destroy_cm(con);
+ }
+ destroy_con(con);
+ }
+ /*
+ * If we've never taken async path and got an error, say,
+ * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
+ * manually to keep reconnecting.
+ */
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
+
+ return err;
+}
+
+static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
+ return;
+ }
+
+ rtrs_clt_update_wc_stats(con);
+}
+
+static int process_info_rsp(struct rtrs_clt_path *clt_path,
+ const struct rtrs_msg_info_rsp *msg)
+{
+ unsigned int sg_cnt, total_len;
+ int i, sgi;
+
+ sg_cnt = le16_to_cpu(msg->sg_cnt);
+ if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg_cnt %d, is not multiple\n",
+ sg_cnt);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and
+ * the offset inside the memory chunk.
+ */
+ if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
+ MAX_IMM_PAYL_BITS) {
+ rtrs_err(clt_path->clt,
+ "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
+ MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
+ return -EINVAL;
+ }
+ total_len = 0;
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
+ const struct rtrs_sg_desc *desc = &msg->desc[sgi];
+ u32 len, rkey;
+ u64 addr;
+
+ addr = le64_to_cpu(desc->addr);
+ rkey = le32_to_cpu(desc->key);
+ len = le32_to_cpu(desc->len);
+
+ total_len += len;
+
+ if (!len || (len % clt_path->chunk_size)) {
+ rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
+ sgi,
+ len);
+ return -EINVAL;
+ }
+ for ( ; len && i < clt_path->queue_depth; i++) {
+ clt_path->rbufs[i].addr = addr;
+ clt_path->rbufs[i].rkey = rkey;
+
+ len -= clt_path->chunk_size;
+ addr += clt_path->chunk_size;
+ }
+ }
+ /* Sanity check */
+ if (sgi != sg_cnt || i != clt_path->queue_depth) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg vector, not fully mapped\n");
+ return -EINVAL;
+ }
+ if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
+ rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_msg_info_rsp *msg;
+ enum rtrs_clt_state state;
+ struct rtrs_iu *iu;
+ size_t rx_sz;
+ int err;
+
+ state = RTRS_CLT_CONNECTING_ERR;
+
+ WARN_ON(con->c.cid);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto out;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (wc->byte_len < sizeof(*msg)) {
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
+ rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ rx_sz = sizeof(*msg);
+ rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
+ if (wc->byte_len < rx_sz) {
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ err = process_info_rsp(clt_path, msg);
+ if (err)
+ goto out;
+
+ err = post_recv_path(clt_path);
+ if (err)
+ goto out;
+
+ state = RTRS_CLT_CONNECTED;
+
+out:
+ rtrs_clt_update_wc_stats(con);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
+ rtrs_clt_change_state_get_old(clt_path, state, NULL);
+}
+
+static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *tx_iu, *rx_iu;
+ size_t rx_sz;
+ int err;
+
+ rx_sz = sizeof(struct rtrs_msg_info_rsp);
+ rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
+
+ tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
+ clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
+ rtrs_clt_info_req_done);
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
+ if (!tx_iu || !rx_iu) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
+ if (err) {
+ rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ goto out;
+ }
+ rx_iu = NULL;
+
+ msg = tx_iu->buf;
+ msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
+ memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
+
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info request */
+ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
+ if (err) {
+ rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ goto out;
+ }
+ tx_iu = NULL;
+
+ /* Wait for state change */
+ wait_event_interruptible_timeout(clt_path->state_wq,
+ clt_path->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(
+ RTRS_CONNECT_TIMEOUT_MS));
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
+ err = -ECONNRESET;
+ else
+ err = -ETIMEDOUT;
+ }
+
+out:
+ if (tx_iu)
+ rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
+ if (rx_iu)
+ rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
+ if (err)
+ /* If we've never taken async path because of malloc problems */
+ rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_CONNECTING_ERR, NULL);
+
+ return err;
+}
+
+/**
+ * init_path() - establishes all path connections and does handshake
+ * @clt_path: client path.
+ * In case of error full close or reconnect procedure should be taken,
+ * because reconnect or close async works can be started.
+ */
+static int init_path(struct rtrs_clt_path *clt_path)
+{
+ int err;
+ char str[NAME_MAX];
+ struct rtrs_addr path = {
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
+
+ mutex_lock(&clt_path->init_mutex);
+ err = init_conns(clt_path);
+ if (err) {
+ rtrs_err(clt_path->clt,
+ "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
+ str, clt_path->hca_name, clt_path->hca_port);
+ goto out;
+ }
+ err = rtrs_send_path_info(clt_path);
+ if (err) {
+ rtrs_err(clt_path->clt,
+ "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
+ err, str, clt_path->hca_name, clt_path->hca_port);
+ goto out;
+ }
+ rtrs_clt_path_up(clt_path);
+ rtrs_start_hb(&clt_path->s);
+out:
+ mutex_unlock(&clt_path->init_mutex);
+
+ return err;
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work)
+{
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
+ int err;
+
+ clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
+ reconnect_dwork);
+ clt = clt_path->clt;
+
+ trace_rtrs_clt_reconnect_work(clt_path);
+
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
+ return;
+
+ if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a path completely if max attempts is reached */
+ rtrs_clt_close_conns(clt_path, false);
+ return;
+ }
+ clt_path->reconnect_attempts++;
+
+ msleep(RTRS_RECONNECT_BACKOFF);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
+ err = init_path(clt_path);
+ if (err)
+ goto reconnect_again;
+ }
+
+ return;
+
+reconnect_again:
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
+ clt_path->stats->reconnects.fail_cnt++;
+ queue_work(rtrs_wq, &clt_path->err_recovery_work);
+ }
+}
+
+static void rtrs_clt_dev_release(struct device *dev)
+{
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
+
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
+ kfree(clt);
+}
+
+static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
+ u16 port, size_t pdu_sz, void *priv,
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev),
+ unsigned int reconnect_delay_sec,
+ unsigned int max_reconnect_attempts)
+{
+ struct rtrs_clt_sess *clt;
+ int err;
+
+ if (!paths_num || paths_num > MAX_PATHS_NUM)
+ return ERR_PTR(-EINVAL);
+
+ if (strlen(sessname) >= sizeof(clt->sessname))
+ return ERR_PTR(-EINVAL);
+
+ clt = kzalloc(sizeof(*clt), GFP_KERNEL);
+ if (!clt)
+ return ERR_PTR(-ENOMEM);
+
+ clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
+ if (!clt->pcpu_path) {
+ kfree(clt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ clt->dev.class = &rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
+ uuid_gen(&clt->paths_uuid);
+ INIT_LIST_HEAD_RCU(&clt->paths_list);
+ clt->paths_num = paths_num;
+ clt->paths_up = MAX_PATHS_NUM;
+ clt->port = port;
+ clt->pdu_sz = pdu_sz;
+ clt->max_segments = RTRS_MAX_SEGMENTS;
+ clt->reconnect_delay_sec = reconnect_delay_sec;
+ clt->max_reconnect_attempts = max_reconnect_attempts;
+ clt->priv = priv;
+ clt->link_ev = link_ev;
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ strscpy(clt->sessname, sessname, sizeof(clt->sessname));
+ init_waitqueue_head(&clt->permits_wait);
+ mutex_init(&clt->paths_ev_mutex);
+ mutex_init(&clt->paths_mutex);
+ device_initialize(&clt->dev);
+
+ err = dev_set_name(&clt->dev, "%s", sessname);
+ if (err)
+ goto err_put;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&clt->dev, true);
+ err = device_add(&clt->dev);
+ if (err)
+ goto err_put;
+
+ clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ if (!clt->kobj_paths) {
+ err = -ENOMEM;
+ goto err_del;
+ }
+ err = rtrs_clt_create_sysfs_root_files(clt);
+ if (err) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ goto err_del;
+ }
+ dev_set_uevent_suppress(&clt->dev, false);
+ kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+
+ return clt;
+err_del:
+ device_del(&clt->dev);
+err_put:
+ free_percpu(clt->pcpu_path);
+ put_device(&clt->dev);
+ return ERR_PTR(err);
+}
+
+static void free_clt(struct rtrs_clt_sess *clt)
+{
+ free_percpu(clt->pcpu_path);
+
+ /*
+ * release callback will free clt and destroy mutexes in last put
+ */
+ device_unregister(&clt->dev);
+}
+
+/**
+ * rtrs_clt_open() - Open a path to an RTRS server
+ * @ops: holds the link event callback and the private pointer.
+ * @pathname: name of the path to an RTRS server
+ * @paths: Paths to be established defined by their src and dst addresses
+ * @paths_num: Number of elements in the @paths array
+ * @port: port to be used by the RTRS session
+ * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
+ * @reconnect_delay_sec: time between reconnect tries
+ * @max_reconnect_attempts: Number of times to reconnect on error before giving
+ * up, 0 for * disabled, -1 for forever
+ * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
+ *
+ * Starts session establishment with the rtrs_server. The function can block
+ * up to ~2000ms before it returns.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
+ const struct rtrs_addr *paths,
+ size_t paths_num, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ s16 max_reconnect_attempts, u32 nr_poll_queues)
+{
+ struct rtrs_clt_path *clt_path, *tmp;
+ struct rtrs_clt_sess *clt;
+ int err, i;
+
+ if (strchr(pathname, '/') || strchr(pathname, '.')) {
+ pr_err("pathname cannot contain / and .\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
+ ops->link_ev,
+ reconnect_delay_sec,
+ max_reconnect_attempts);
+ if (IS_ERR(clt)) {
+ err = PTR_ERR(clt);
+ goto out;
+ }
+ for (i = 0; i < paths_num; i++) {
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
+ nr_poll_queues);
+ if (IS_ERR(clt_path)) {
+ err = PTR_ERR(clt_path);
+ goto close_all_path;
+ }
+ if (!i)
+ clt_path->for_new_clt = 1;
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
+
+ err = init_path(clt_path);
+ if (err) {
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
+ }
+
+ err = rtrs_clt_create_path_files(clt_path);
+ if (err) {
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
+ }
+ }
+ err = alloc_permits(clt);
+ if (err)
+ goto close_all_path;
+
+ return clt;
+
+close_all_path:
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ rtrs_clt_close_conns(clt_path, true);
+ kobject_put(&clt_path->kobj);
+ }
+ rtrs_clt_destroy_sysfs_root(clt);
+ free_clt(clt);
+
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(rtrs_clt_open);
+
+/**
+ * rtrs_clt_close() - Close a path
+ * @clt: Session handle. Session is freed upon return.
+ */
+void rtrs_clt_close(struct rtrs_clt_sess *clt)
+{
+ struct rtrs_clt_path *clt_path, *tmp;
+
+ /* Firstly forbid sysfs access */
+ rtrs_clt_destroy_sysfs_root(clt);
+
+ /* Now it is safe to iterate over all paths without locks */
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_close_conns(clt_path, true);
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ kobject_put(&clt_path->kobj);
+ }
+ free_permits(clt);
+ free_clt(clt);
+}
+EXPORT_SYMBOL(rtrs_clt_close);
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
+{
+ enum rtrs_clt_state old_state;
+ int err = -EBUSY;
+ bool changed;
+
+ changed = rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_RECONNECTING,
+ &old_state);
+ if (changed) {
+ clt_path->reconnect_attempts = 0;
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
+ }
+ if (changed || old_state == RTRS_CLT_RECONNECTING) {
+ /*
+ * flush_delayed_work() queues pending work for immediate
+ * execution, so do the flush if we have queued something
+ * right now or work is pending.
+ */
+ flush_delayed_work(&clt_path->reconnect_dwork);
+ err = (READ_ONCE(clt_path->state) ==
+ RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
+ }
+
+ return err;
+}
+
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
+ const struct attribute *sysfs_self)
+{
+ enum rtrs_clt_state old_state;
+ bool changed;
+
+ /*
+ * Continue stopping path till state was changed to DEAD or
+ * state was observed as DEAD:
+ * 1. State was changed to DEAD - we were fast and nobody
+ * invoked rtrs_clt_reconnect(), which can again start
+ * reconnecting.
+ * 2. State was observed as DEAD - we have someone in parallel
+ * removing the path.
+ */
+ do {
+ rtrs_clt_close_conns(clt_path, true);
+ changed = rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_DEAD,
+ &old_state);
+ } while (!changed && old_state != RTRS_CLT_DEAD);
+
+ if (changed) {
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_destroy_path_files(clt_path, sysfs_self);
+ kobject_put(&clt_path->kobj);
+ }
+
+ return 0;
+}
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
+{
+ clt->max_reconnect_attempts = (unsigned int)value;
+}
+
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
+{
+ return (int)clt->max_reconnect_attempts;
+}
+
+/**
+ * rtrs_clt_request() - Request data transfer to/from server via RDMA.
+ *
+ * @dir: READ/WRITE
+ * @ops: callback function to be called as confirmation, and the pointer.
+ * @clt: Session
+ * @permit: Preallocated permit
+ * @vec: Message that is sent to server together with the request.
+ * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
+ * Since the msg is copied internally it can be allocated on stack.
+ * @nr: Number of elements in @vec.
+ * @data_len: length of data sent to/from server
+ * @sg: Pages to be sent/received to/from server.
+ * @sg_cnt: Number of elements in the @sg
+ *
+ * Return:
+ * 0: Success
+ * <0: Error
+ *
+ * On dir=READ rtrs client will request a data transfer from Server to client.
+ * The data that the server will respond with will be stored in @sg when
+ * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
+ * On dir=WRITE rtrs client will rdma write data in sg to server side.
+ */
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt_path *clt_path;
+
+ enum dma_data_direction dma_dir;
+ int err = -ECONNABORTED, i;
+ size_t usr_len, hdr_len;
+ struct path_it it;
+
+ /* Get kvec length */
+ for (i = 0, usr_len = 0; i < nr; i++)
+ usr_len += vec[i].iov_len;
+
+ if (dir == READ) {
+ hdr_len = sizeof(struct rtrs_msg_rdma_read) +
+ sg_cnt * sizeof(struct rtrs_sg_desc);
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ hdr_len = sizeof(struct rtrs_msg_rdma_write);
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ if (usr_len + hdr_len > clt_path->max_hdr_size) {
+ rtrs_wrn_rl(clt_path->clt,
+ "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
+ dir == READ ? "Read" : "Write",
+ usr_len, hdr_len, clt_path->max_hdr_size);
+ err = -EMSGSIZE;
+ break;
+ }
+ req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
+ vec, usr_len, sg, sg_cnt, data_len,
+ dma_dir);
+ if (dir == READ)
+ err = rtrs_clt_read_req(req);
+ else
+ err = rtrs_clt_write_req(req);
+ if (err) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_clt_request);
+
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
+{
+ /* If no path, return -1 for block layer not to try again */
+ int cnt = -1;
+ struct rtrs_con *con;
+ struct rtrs_clt_path *clt_path;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ con = clt_path->s.con[index + 1];
+ cnt = ib_process_cq_direct(con->cq, -1);
+ if (cnt)
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return cnt;
+}
+EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
+
+/**
+ * rtrs_clt_query() - queries RTRS session attributes
+ *@clt: session pointer
+ *@attr: query results for session attributes.
+ * Returns:
+ * 0 on success
+ * -ECOMM no connection to the server
+ */
+int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
+{
+ if (!rtrs_clt_is_connected(clt))
+ return -ECOMM;
+
+ attr->queue_depth = clt->queue_depth;
+ attr->max_segments = clt->max_segments;
+ /* Cap max_io_size to min of remote buffer size and the fr pages */
+ attr->max_io_size = min_t(int, clt->max_io_size,
+ clt->max_segments * SZ_4K);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtrs_clt_query);
+
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt_path *clt_path;
+ int err;
+
+ clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
+ if (IS_ERR(clt_path))
+ return PTR_ERR(clt_path);
+
+ mutex_lock(&clt->paths_mutex);
+ if (clt->paths_num == 0) {
+ /*
+ * When all the paths are removed for a session,
+ * the addition of the first path is like a new session for
+ * the storage server
+ */
+ clt_path->for_new_clt = 1;
+ }
+
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * It is totally safe to add path in CONNECTING state: coming
+ * IO will never grab it. Also it is very important to add
+ * path before init, since init fires LINK_CONNECTED event.
+ */
+ rtrs_clt_add_path_to_arr(clt_path);
+
+ err = init_path(clt_path);
+ if (err)
+ goto close_path;
+
+ err = rtrs_clt_create_path_files(clt_path);
+ if (err)
+ goto close_path;
+
+ return 0;
+
+close_path:
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+
+ return err;
+}
+
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent)
+{
+ pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event),
+ ibevent->event);
+}
+
+
+static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
+ rtrs_clt_ib_event_handler);
+ ib_register_event_handler(&dev->event_handler);
+
+ if (!(dev->ib_dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ pr_err("Memory registrations not supported.\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtrs_clt_ib_dev_deinit(struct rtrs_ib_dev *dev)
+{
+ ib_unregister_event_handler(&dev->event_handler);
+}
+
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_clt_ib_dev_init,
+ .deinit = rtrs_clt_ib_dev_deinit
+};
+
+static int __init rtrs_client_init(void)
+{
+ int ret = 0;
+
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+ ret = class_register(&rtrs_clt_dev_class);
+ if (ret) {
+ pr_err("Failed to create rtrs-client dev class\n");
+ return ret;
+ }
+ rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
+ if (!rtrs_wq) {
+ class_unregister(&rtrs_clt_dev_class);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit rtrs_client_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_unregister(&rtrs_clt_dev_class);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_client_init);
+module_exit(rtrs_client_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
new file mode 100644
index 000000000000..0f57759b3080
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_CLT_H
+#define RTRS_CLT_H
+
+#include <linux/device.h>
+#include "rtrs-pri.h"
+
+/**
+ * enum rtrs_clt_state - Client states.
+ */
+enum rtrs_clt_state {
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR,
+ RTRS_CLT_RECONNECTING,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_CLOSING,
+ RTRS_CLT_CLOSED,
+ RTRS_CLT_DEAD,
+};
+
+enum rtrs_mp_policy {
+ MP_POLICY_RR,
+ MP_POLICY_MIN_INFLIGHT,
+ MP_POLICY_MIN_LATENCY,
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_reconnects {
+ int successful_cnt;
+ int fail_cnt;
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_cpu_migr {
+ atomic_t from;
+ int to;
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-client for details
+ */
+struct rtrs_clt_stats_rdma {
+ struct {
+ u64 cnt;
+ u64 size_total;
+ } dir[2];
+
+ u64 failover_cnt;
+};
+
+struct rtrs_clt_stats_pcpu {
+ struct rtrs_clt_stats_cpu_migr cpu_migr;
+ struct rtrs_clt_stats_rdma rdma;
+};
+
+struct rtrs_clt_stats {
+ struct kobject kobj_stats;
+ struct rtrs_clt_stats_pcpu __percpu *pcpu_stats;
+ struct rtrs_clt_stats_reconnects reconnects;
+ atomic_t inflight;
+};
+
+struct rtrs_clt_con {
+ struct rtrs_con c;
+ struct rtrs_iu *rsp_ius;
+ u32 queue_num;
+ unsigned int cpu;
+ struct mutex con_mutex;
+ int cm_err;
+};
+
+/**
+ * rtrs_permit - permits the memory allocation for future RDMA operation.
+ * Combine with irq pinning to keep IO on same CPU.
+ */
+struct rtrs_permit {
+ enum rtrs_clt_con_type con_type;
+ unsigned int cpu_id;
+ unsigned int mem_id;
+ unsigned int mem_off;
+};
+
+/**
+ * rtrs_clt_io_req - describes one inflight IO request
+ */
+struct rtrs_clt_io_req {
+ struct list_head list;
+ struct rtrs_iu *iu;
+ struct scatterlist *sglist; /* list holding user data */
+ unsigned int sg_cnt;
+ unsigned int sg_size;
+ unsigned int data_len;
+ unsigned int usr_len;
+ void *priv;
+ bool in_use;
+ enum rtrs_mp_policy mp_policy;
+ struct rtrs_clt_con *con;
+ struct rtrs_sg_desc *desc;
+ struct ib_sge *sge;
+ struct rtrs_permit *permit;
+ enum dma_data_direction dir;
+ void (*conf)(void *priv, int errno);
+ unsigned long start_jiffies;
+
+ struct ib_mr *mr;
+ struct ib_cqe inv_cqe;
+ struct completion inv_comp;
+ int inv_errno;
+ bool need_inv_comp;
+ refcount_t ref;
+};
+
+struct rtrs_rbuf {
+ u64 addr;
+ u32 rkey;
+};
+
+struct rtrs_clt_path {
+ struct rtrs_path s;
+ struct rtrs_clt_sess *clt;
+ wait_queue_head_t state_wq;
+ enum rtrs_clt_state state;
+ atomic_t connected_cnt;
+ struct mutex init_mutex;
+ struct rtrs_clt_io_req *reqs;
+ struct delayed_work reconnect_dwork;
+ struct work_struct close_work;
+ struct work_struct err_recovery_work;
+ unsigned int reconnect_attempts;
+ bool established;
+ struct rtrs_rbuf *rbufs;
+ size_t max_io_size;
+ u32 max_hdr_size;
+ u32 chunk_size;
+ size_t queue_depth;
+ u32 max_pages_per_mr;
+ u32 flags;
+ struct kobject kobj;
+ u8 for_new_clt;
+ struct rtrs_clt_stats *stats;
+ /* cache hca_port and hca_name to display in sysfs */
+ u8 hca_port;
+ char hca_name[IB_DEVICE_NAME_MAX];
+ struct list_head __percpu
+ *mp_skip_entry;
+};
+
+struct rtrs_clt_sess {
+ struct list_head paths_list; /* rcu protected list */
+ size_t paths_num;
+ struct rtrs_clt_path
+ __rcu * __percpu *pcpu_path;
+ uuid_t paths_uuid;
+ int paths_up;
+ struct mutex paths_mutex;
+ struct mutex paths_ev_mutex;
+ char sessname[NAME_MAX];
+ u16 port;
+ unsigned int max_reconnect_attempts;
+ unsigned int reconnect_delay_sec;
+ unsigned int max_segments;
+ void *permits;
+ unsigned long *permits_map;
+ size_t queue_depth;
+ size_t max_io_size;
+ wait_queue_head_t permits_wait;
+ size_t pdu_sz;
+ void *priv;
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev);
+ struct device dev;
+ struct kobject *kobj_paths;
+ enum rtrs_mp_policy mp_policy;
+};
+
+static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_clt_con, c);
+}
+
+static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s)
+{
+ return container_of(s, struct rtrs_clt_path, s);
+}
+
+static inline int permit_size(struct rtrs_clt_sess *clt)
+{
+ return sizeof(struct rtrs_permit) + clt->pdu_sz;
+}
+
+static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt,
+ int idx)
+{
+ return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
+}
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path);
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
+ struct rtrs_addr *addr);
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
+ const struct attribute *sysfs_self);
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
+void free_path(struct rtrs_clt_path *clt_path);
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent);
+
+/* rtrs-clt-stats.c */
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats);
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *s);
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con);
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
+
+int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
+ bool enable);
+ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
+ char *page);
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page);
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
+ char *page);
+
+/* rtrs-clt-sysfs.c */
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt);
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt);
+
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path);
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
+ const struct attribute *sysfs_self);
+
+#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-log.h b/drivers/infiniband/ulp/rtrs/rtrs-log.h
new file mode 100644
index 000000000000..53c785b992f2
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-log.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_LOG_H
+#define RTRS_LOG_H
+
+#define rtrs_log(fn, obj, fmt, ...) \
+ fn("<%s>: " fmt, obj->sessname, ##__VA_ARGS__)
+
+#define rtrs_err(obj, fmt, ...) \
+ rtrs_log(pr_err, obj, fmt, ##__VA_ARGS__)
+#define rtrs_err_rl(obj, fmt, ...) \
+ rtrs_log(pr_err_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn(obj, fmt, ...) \
+ rtrs_log(pr_warn, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn_rl(obj, fmt, ...) \
+ rtrs_log(pr_warn_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info(obj, fmt, ...) \
+ rtrs_log(pr_info, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info_rl(obj, fmt, ...) \
+ rtrs_log(pr_info_ratelimited, obj, fmt, ##__VA_ARGS__)
+
+#endif /* RTRS_LOG_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
new file mode 100644
index 000000000000..ef29bd483b5a
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_PRI_H
+#define RTRS_PRI_H
+
+#include <linux/uuid.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib.h>
+
+#include "rtrs.h"
+
+#define RTRS_PROTO_VER_MAJOR 2
+#define RTRS_PROTO_VER_MINOR 0
+
+#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
+ __stringify(RTRS_PROTO_VER_MINOR)
+
+/*
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65535 (2^16 - 1) in theory
+ * since queue_depth in rtrs_msg_conn_rsp is defined as le16.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65535 and it depends on the system.
+ */
+#define MAX_SESS_QUEUE_DEPTH 65535
+
+enum rtrs_imm_const {
+ MAX_IMM_TYPE_BITS = 4,
+ MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
+ MAX_IMM_PAYL_BITS = 28,
+ MAX_IMM_PAYL_MASK = ((1 << MAX_IMM_PAYL_BITS) - 1),
+};
+
+enum rtrs_imm_type {
+ RTRS_IO_REQ_IMM = 0, /* client to server */
+ RTRS_IO_RSP_IMM = 1, /* server to client */
+ RTRS_IO_RSP_W_INV_IMM = 2, /* server to client */
+
+ RTRS_HB_MSG_IMM = 8, /* HB: HeartBeat */
+ RTRS_HB_ACK_IMM = 9,
+
+ RTRS_LAST_IMM,
+};
+
+enum {
+ SERVICE_CON_QUEUE_DEPTH = 512,
+
+ MAX_PATHS_NUM = 128,
+
+ MIN_CHUNK_SIZE = 8192,
+
+ RTRS_HB_INTERVAL_MS = 5000,
+ RTRS_HB_MISSED_MAX = 5,
+
+ RTRS_MAGIC = 0x1BBD,
+ RTRS_PROTO_VER = (RTRS_PROTO_VER_MAJOR << 8) | RTRS_PROTO_VER_MINOR,
+};
+
+struct rtrs_ib_dev;
+
+struct rtrs_rdma_dev_pd_ops {
+ int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
+};
+
+struct rtrs_rdma_dev_pd {
+ struct mutex mutex;
+ struct list_head list;
+ enum ib_pd_flags pd_flags;
+ const struct rtrs_rdma_dev_pd_ops *ops;
+};
+
+struct rtrs_ib_dev {
+ struct ib_device *ib_dev;
+ struct ib_pd *ib_pd;
+ struct kref ref;
+ struct list_head entry;
+ struct rtrs_rdma_dev_pd *pool;
+ struct ib_event_handler event_handler;
+};
+
+struct rtrs_con {
+ struct rtrs_path *path;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct rdma_cm_id *cm_id;
+ unsigned int cid;
+ int nr_cqe;
+ atomic_t wr_cnt;
+ atomic_t sq_wr_avail;
+};
+
+struct rtrs_path {
+ struct list_head entry;
+ struct sockaddr_storage dst_addr;
+ struct sockaddr_storage src_addr;
+ char sessname[NAME_MAX];
+ uuid_t uuid;
+ struct rtrs_con **con;
+ unsigned int con_num;
+ unsigned int irq_con_num;
+ unsigned int recon_cnt;
+ unsigned int signal_interval;
+ struct rtrs_ib_dev *dev;
+ int dev_ref;
+ struct ib_cqe *hb_cqe;
+ void (*hb_err_handler)(struct rtrs_con *con);
+ struct workqueue_struct *hb_wq;
+ struct delayed_work hb_dwork;
+ unsigned int hb_interval_ms;
+ unsigned int hb_missed_cnt;
+ unsigned int hb_missed_max;
+ ktime_t hb_last_sent;
+ ktime_t hb_cur_latency;
+};
+
+/* rtrs information unit */
+struct rtrs_iu {
+ struct ib_cqe cqe;
+ dma_addr_t dma_addr;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+};
+
+/**
+ * enum rtrs_msg_types - RTRS message types, see also rtrs/README
+ * @RTRS_MSG_INFO_REQ: Client additional info request to the server
+ * @RTRS_MSG_INFO_RSP: Server additional info response to the client
+ * @RTRS_MSG_WRITE: Client writes data per RDMA to server
+ * @RTRS_MSG_READ: Client requests data transfer from server
+ * @RTRS_MSG_RKEY_RSP: Server refreshed rkey for rbuf
+ */
+enum rtrs_msg_types {
+ RTRS_MSG_INFO_REQ,
+ RTRS_MSG_INFO_RSP,
+ RTRS_MSG_WRITE,
+ RTRS_MSG_READ,
+ RTRS_MSG_RKEY_RSP,
+};
+
+/**
+ * enum rtrs_msg_flags - RTRS message flags.
+ * @RTRS_NEED_INVAL: Send invalidation in response.
+ * @RTRS_MSG_NEW_RKEY_F: Send refreshed rkey in response.
+ */
+enum rtrs_msg_flags {
+ RTRS_MSG_NEED_INVAL_F = 1 << 0,
+ RTRS_MSG_NEW_RKEY_F = 1 << 1,
+};
+
+/**
+ * struct rtrs_sg_desc - RDMA-Buffer entry description
+ * @addr: Address of RDMA destination buffer
+ * @key: Authorization rkey to write to the buffer
+ * @len: Size of the buffer
+ */
+struct rtrs_sg_desc {
+ __le64 addr;
+ __le32 key;
+ __le32 len;
+};
+
+/**
+ * struct rtrs_msg_conn_req - Client connection request to the server
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @cid: Current connection id
+ * @cid_num: Number of connections per session
+ * @recon_cnt: Reconnections counter
+ * @sess_uuid: UUID of a session (path)
+ * @paths_uuid: UUID of a group of sessions (paths)
+ *
+ * NOTE: max size 56 bytes, see man rdma_connect().
+ */
+struct rtrs_msg_conn_req {
+ /* Is set to 0 by cma.c in case of AF_IB, do not touch that.
+ * see https://www.spinics.net/lists/linux-rdma/msg22397.html
+ */
+ u8 __cma_version;
+ /* On sender side that should be set to 0, or cma_save_ip_info()
+ * extract garbage and will fail.
+ */
+ u8 __ip_version;
+ __le16 magic;
+ __le16 version;
+ __le16 cid;
+ __le16 cid_num;
+ __le16 recon_cnt;
+ uuid_t sess_uuid;
+ uuid_t paths_uuid;
+ u8 first_conn : 1;
+ u8 reserved_bits : 7;
+ u8 reserved[11];
+};
+
+/**
+ * struct rtrs_msg_conn_rsp - Server connection response to the client
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @errno: If rdma_accept() then 0, if rdma_reject() indicates error
+ * @queue_depth: max inflight messages (queue-depth) in this session
+ * @max_io_size: max io size server supports
+ * @max_hdr_size: max msg header size server supports
+ *
+ * NOTE: size is 56 bytes, max possible is 136 bytes, see man rdma_accept().
+ */
+struct rtrs_msg_conn_rsp {
+ __le16 magic;
+ __le16 version;
+ __le16 errno;
+ __le16 queue_depth;
+ __le32 max_io_size;
+ __le32 max_hdr_size;
+ __le32 flags;
+ u8 reserved[36];
+};
+
+/**
+ * struct rtrs_msg_info_req
+ * @type: @RTRS_MSG_INFO_REQ
+ * @pathname: Path name chosen by client
+ */
+struct rtrs_msg_info_req {
+ __le16 type;
+ u8 pathname[NAME_MAX];
+ u8 reserved[15];
+};
+
+/**
+ * struct rtrs_msg_info_rsp
+ * @type: @RTRS_MSG_INFO_RSP
+ * @sg_cnt: Number of @desc entries
+ * @desc: RDMA buffers where the client can write to server
+ */
+struct rtrs_msg_info_rsp {
+ __le16 type;
+ __le16 sg_cnt;
+ u8 reserved[4];
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct rtrs_msg_rkey_rsp
+ * @type: @RTRS_MSG_RKEY_RSP
+ * @buf_id: RDMA buf_id of the new rkey
+ * @rkey: new remote key for RDMA buffers id from server
+ */
+struct rtrs_msg_rkey_rsp {
+ __le16 type;
+ __le16 buf_id;
+ __le32 rkey;
+};
+
+/**
+ * struct rtrs_msg_rdma_read - RDMA data transfer request from client
+ * @type: always @RTRS_MSG_READ
+ * @usr_len: length of user payload
+ * @sg_cnt: number of @desc entries
+ * @desc: RDMA buffers where the server can write the result to
+ */
+struct rtrs_msg_rdma_read {
+ __le16 type;
+ __le16 usr_len;
+ __le16 flags;
+ __le16 sg_cnt;
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct_msg_rdma_write - Message transferred to server with RDMA-Write
+ * @type: always @RTRS_MSG_WRITE
+ * @usr_len: length of user payload
+ */
+struct rtrs_msg_rdma_write {
+ __le16 type;
+ __le16 usr_len;
+};
+
+/**
+ * struct_msg_rdma_hdr - header for read or write request
+ * @type: @RTRS_MSG_WRITE | @RTRS_MSG_READ
+ */
+struct rtrs_msg_rdma_hdr {
+ __le16 type;
+};
+
+/* rtrs.c */
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_num, size_t size, gfp_t t,
+ struct ib_device *dev, enum dma_data_direction,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc));
+void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_num);
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head);
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head,
+ struct ib_send_wr *tail);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
+
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, int nr_cqe,
+ u32 max_send_wr, u32 max_recv_wr,
+ enum ib_poll_context poll_ctx);
+void rtrs_cq_qp_destroy(struct rtrs_con *con);
+
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq);
+void rtrs_start_hb(struct rtrs_path *path);
+void rtrs_stop_hb(struct rtrs_path *path);
+void rtrs_send_hb_ack(struct rtrs_path *path);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool);
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool);
+
+struct rtrs_ib_dev *rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool);
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev);
+
+static inline u32 rtrs_to_imm(u32 type, u32 payload)
+{
+ BUILD_BUG_ON(MAX_IMM_PAYL_BITS + MAX_IMM_TYPE_BITS != 32);
+ BUILD_BUG_ON(RTRS_LAST_IMM > (1<<MAX_IMM_TYPE_BITS));
+ return ((type & MAX_IMM_TYPE_MASK) << MAX_IMM_PAYL_BITS) |
+ (payload & MAX_IMM_PAYL_MASK);
+}
+
+static inline void rtrs_from_imm(u32 imm, u32 *type, u32 *payload)
+{
+ *payload = imm & MAX_IMM_PAYL_MASK;
+ *type = imm >> MAX_IMM_PAYL_BITS;
+}
+
+static inline u32 rtrs_to_io_req_imm(u32 addr)
+{
+ return rtrs_to_imm(RTRS_IO_REQ_IMM, addr);
+}
+
+static inline u32 rtrs_to_io_rsp_imm(u32 msg_id, int errno, bool w_inval)
+{
+ enum rtrs_imm_type type;
+ u32 payload;
+
+ /* 9 bits for errno, 19 bits for msg_id */
+ payload = (abs(errno) & 0x1ff) << 19 | (msg_id & 0x7ffff);
+ type = w_inval ? RTRS_IO_RSP_W_INV_IMM : RTRS_IO_RSP_IMM;
+
+ return rtrs_to_imm(type, payload);
+}
+
+static inline void rtrs_from_io_rsp_imm(u32 payload, u32 *msg_id, int *errno)
+{
+ /* 9 bits for errno, 19 bits for msg_id */
+ *msg_id = payload & 0x7ffff;
+ *errno = -(int)((payload >> 19) & 0x1ff);
+}
+
+#define STAT_STORE_FUNC(type, set_value, reset) \
+static ssize_t set_value##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int ret = -EINVAL; \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ if (sysfs_streq(buf, "1")) \
+ ret = reset(stats, true); \
+ else if (sysfs_streq(buf, "0")) \
+ ret = reset(stats, false); \
+ if (ret) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define STAT_SHOW_FUNC(type, get_value, print) \
+static ssize_t get_value##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ return print(stats, page); \
+}
+
+#define STAT_ATTR(type, stat, print, reset) \
+STAT_STORE_FUNC(type, stat, reset) \
+STAT_SHOW_FUNC(type, stat, print) \
+static struct kobj_attribute stat##_attr = __ATTR_RW(stat)
+
+#endif /* RTRS_PRI_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
new file mode 100644
index 000000000000..2aff1213a19d
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-srv.h"
+
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
+{
+ if (enable) {
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+ memset(r, 0, sizeof(*r));
+ }
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
+{
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats sum;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ }
+
+ return sysfs_emit(page, "%llu %llu %llu %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total);
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
new file mode 100644
index 000000000000..3f305e694fe8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+
+static void rtrs_srv_release(struct kobject *kobj)
+{
+ struct rtrs_srv_path *srv_path;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ kfree(srv_path);
+}
+
+static struct kobj_type ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_release,
+};
+
+static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_path *s;
+ char str[MAXHOSTNAMELEN];
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ s = &srv_path->s;
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(s, "%s: invalid value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+
+ sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str,
+ sizeof(str));
+
+ rtrs_info(s, "disconnect for path %s requested\n", str);
+ /* first remove sysfs itself to avoid deadlock */
+ sysfs_remove_file_self(&srv_path->kobj, &attr->attr);
+ close_path(srv_path);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_srv_disconnect_attr =
+ __ATTR(disconnect, 0644,
+ rtrs_srv_disconnect_show, rtrs_srv_disconnect_store);
+
+static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_con *usr_con;
+
+ srv_path = container_of(kobj, typeof(*srv_path), kobj);
+ usr_con = srv_path->s.con[0];
+
+ return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
+}
+
+static struct kobj_attribute rtrs_srv_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_srv_hca_port_show, NULL);
+
+static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+
+ return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name);
+}
+
+static struct kobj_attribute rtrs_srv_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_srv_hca_name_show, NULL);
+
+static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+ int cnt;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + sysfs_emit_at(page, cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_srv_src_addr_show, NULL);
+
+static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+ int len;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
+}
+
+static struct kobj_attribute rtrs_srv_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
+
+static struct attribute *rtrs_srv_path_attrs[] = {
+ &rtrs_srv_hca_name_attr.attr,
+ &rtrs_srv_hca_port_attr.attr,
+ &rtrs_srv_src_addr_attr.attr,
+ &rtrs_srv_dst_addr_attr.attr,
+ &rtrs_srv_disconnect_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_srv_path_attr_group = {
+ .attrs = rtrs_srv_path_attrs,
+};
+
+STAT_ATTR(struct rtrs_srv_stats, rdma,
+ rtrs_srv_stats_rdma_to_str,
+ rtrs_srv_reset_rdma_stats);
+
+static struct attribute *rtrs_srv_stats_attrs[] = {
+ &rdma_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_srv_stats_attr_group = {
+ .attrs = rtrs_srv_stats_attrs,
+};
+
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ int err = 0;
+
+ mutex_lock(&srv->paths_mutex);
+ if (srv->dev_ref++) {
+ /*
+ * Device needs to be registered only on the first session
+ */
+ goto unlock;
+ }
+ srv->dev.class = &rtrs_dev_class;
+ err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
+ if (err)
+ goto unlock;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&srv->dev, true);
+ err = device_add(&srv->dev);
+ if (err) {
+ pr_err("device_add(): %d\n", err);
+ put_device(&srv->dev);
+ goto unlock;
+ }
+ srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
+ if (!srv->kobj_paths) {
+ err = -ENOMEM;
+ pr_err("kobject_create_and_add(): %d\n", err);
+ device_del(&srv->dev);
+ put_device(&srv->dev);
+ goto unlock;
+ }
+ dev_set_uevent_suppress(&srv->dev, false);
+ kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
+unlock:
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static void
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+
+ mutex_lock(&srv->paths_mutex);
+ if (!--srv->dev_ref) {
+ kobject_put(srv->kobj_paths);
+ mutex_unlock(&srv->paths_mutex);
+ device_del(&srv->dev);
+ put_device(&srv->dev);
+ } else {
+ put_device(&srv->dev);
+ mutex_unlock(&srv->paths_mutex);
+ }
+}
+
+static void rtrs_srv_path_stats_release(struct kobject *kobj)
+{
+ struct rtrs_srv_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+
+ free_percpu(stats->rdma_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_path_stats_release,
+};
+
+static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path)
+{
+ int err;
+ struct rtrs_path *s = &srv_path->s;
+
+ err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats,
+ &srv_path->kobj, "stats");
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ kobject_put(&srv_path->stats->kobj_stats);
+ return err;
+ }
+ err = sysfs_create_group(&srv_path->stats->kobj_stats,
+ &rtrs_srv_stats_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
+
+ return err;
+}
+
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
+ char str[NAME_MAX];
+ int err;
+ struct rtrs_addr path = {
+ .src = &srv_path->s.dst_addr,
+ .dst = &srv_path->s.src_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
+ err = rtrs_srv_create_once_sysfs_root_folders(srv_path);
+ if (err)
+ return err;
+
+ err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths,
+ "%s", str);
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ goto destroy_root;
+ }
+ err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = rtrs_srv_create_stats_files(srv_path);
+ if (err)
+ goto remove_group;
+
+ return 0;
+
+remove_group:
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+put_kobj:
+ kobject_del(&srv_path->kobj);
+destroy_root:
+ kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
+
+ return err;
+}
+
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
+{
+ if (srv_path->stats->kobj_stats.state_in_sysfs) {
+ sysfs_remove_group(&srv_path->stats->kobj_stats,
+ &rtrs_srv_stats_attr_group);
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
+ }
+
+ if (srv_path->kobj.state_in_sysfs) {
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+ kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
+ }
+
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
new file mode 100644
index 000000000000..29ca59ceb0dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-srv-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
new file mode 100644
index 000000000000..587d3e033081
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_srv
+
+#if !defined(_TRACE_RTRS_SRV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_SRV_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_srv_op;
+struct rtrs_srv_con;
+struct rtrs_srv_path;
+
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSED);
+
+#define show_rtrs_srv_state(x) \
+ __print_symbolic(x, \
+ { RTRS_SRV_CONNECTING, "CONNECTING" }, \
+ { RTRS_SRV_CONNECTED, "CONNECTED" }, \
+ { RTRS_SRV_CLOSING, "CLOSING" }, \
+ { RTRS_SRV_CLOSED, "CLOSED" })
+
+TRACE_EVENT(send_io_resp_imm,
+ TP_PROTO(struct rtrs_srv_op *id,
+ bool need_inval,
+ bool always_invalidate,
+ int errno),
+
+ TP_ARGS(id, need_inval, always_invalidate, errno),
+
+ TP_STRUCT__entry(
+ __field(u8, dir)
+ __field(bool, need_inval)
+ __field(bool, always_invalidate)
+ __field(u32, msg_id)
+ __field(int, wr_cnt)
+ __field(u32, signal_interval)
+ __field(int, state)
+ __field(int, errno)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_srv_con *con = id->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+
+ __entry->dir = id->dir;
+ __entry->state = srv_path->state;
+ __entry->errno = errno;
+ __entry->need_inval = need_inval;
+ __entry->always_invalidate = always_invalidate;
+ __entry->msg_id = id->msg_id;
+ __entry->wr_cnt = atomic_read(&con->c.wr_cnt);
+ __entry->signal_interval = s->signal_interval;
+ memcpy(__entry->sessname, kobject_name(&srv_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("sess='%s' state='%s' dir=%s err='%d' inval='%d' glob-inval='%d' msgid='%u' wrcnt='%d' sig-interval='%u'",
+ __entry->sessname,
+ show_rtrs_srv_state(__entry->state),
+ __print_symbolic(__entry->dir,
+ { READ, "READ" },
+ { WRITE, "WRITE" }),
+ __entry->errno,
+ __entry->need_inval,
+ __entry->always_invalidate,
+ __entry->msg_id,
+ __entry->wr_cnt,
+ __entry->signal_interval
+ )
+);
+
+#endif /* _TRACE_RTRS_SRV_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-srv-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
new file mode 100644
index 000000000000..ef4abdea3c2d
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -0,0 +1,2346 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+#include <rdma/ib_cm.h>
+#include <rdma/ib_verbs.h>
+#include "rtrs-srv-trace.h"
+
+MODULE_DESCRIPTION("RDMA Transport Server");
+MODULE_LICENSE("GPL");
+
+/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
+#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
+#define DEFAULT_SESS_QUEUE_DEPTH 512
+#define MAX_HDR_SIZE PAGE_SIZE
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
+const struct class rtrs_dev_class = {
+ .name = "rtrs-server",
+};
+static struct rtrs_srv_ib_ctx ib_ctx;
+
+static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
+static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
+
+static bool always_invalidate = true;
+module_param(always_invalidate, bool, 0444);
+MODULE_PARM_DESC(always_invalidate,
+ "Invalidate memory registration for contiguous memory regions before accessing.");
+
+module_param_named(max_chunk_size, max_chunk_size, int, 0444);
+MODULE_PARM_DESC(max_chunk_size,
+ "Max size for each IO request, when change the unit is in byte (default: "
+ __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
+
+module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
+MODULE_PARM_DESC(sess_queue_depth,
+ "Number of buffers for pending I/O requests to allocate per session. Maximum: "
+ __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
+ __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
+
+static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
+
+static struct workqueue_struct *rtrs_wq;
+
+static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_srv_con, c);
+}
+
+static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+ bool changed = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&srv_path->state_lock, flags);
+ old_state = srv_path->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+ if (old_state == RTRS_SRV_CONNECTING)
+ changed = true;
+ break;
+ case RTRS_SRV_CLOSING:
+ if (old_state == RTRS_SRV_CONNECTING ||
+ old_state == RTRS_SRV_CONNECTED)
+ changed = true;
+ break;
+ case RTRS_SRV_CLOSED:
+ if (old_state == RTRS_SRV_CLOSING)
+ changed = true;
+ break;
+ default:
+ break;
+ }
+ if (changed)
+ srv_path->state = new_state;
+ spin_unlock_irqrestore(&srv_path->state_lock, flags);
+
+ return changed;
+}
+
+static void free_id(struct rtrs_srv_op *id)
+{
+ if (!id)
+ return;
+ kfree(id);
+}
+
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ int i;
+
+ if (srv_path->ops_ids) {
+ for (i = 0; i < srv->queue_depth; i++)
+ free_id(srv_path->ops_ids[i]);
+ kfree(srv_path->ops_ids);
+ srv_path->ops_ids = NULL;
+ }
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_srv_rdma_done
+};
+
+static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
+{
+ struct rtrs_srv_path *srv_path = container_of(ref,
+ struct rtrs_srv_path,
+ ids_inflight_ref);
+
+ percpu_ref_exit(&srv_path->ids_inflight_ref);
+ complete(&srv_path->complete_done);
+}
+
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_op *id;
+ int i, ret;
+
+ srv_path->ops_ids = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->ops_ids),
+ GFP_KERNEL);
+ if (!srv_path->ops_ids)
+ goto err;
+
+ for (i = 0; i < srv->queue_depth; ++i) {
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ goto err;
+
+ srv_path->ops_ids[i] = id;
+ }
+
+ ret = percpu_ref_init(&srv_path->ids_inflight_ref,
+ rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
+ if (ret) {
+ pr_err("Percpu reference init failed\n");
+ goto err;
+ }
+ init_completion(&srv_path->complete_done);
+
+ return 0;
+
+err:
+ rtrs_srv_free_ops_ids(srv_path);
+ return -ENOMEM;
+}
+
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ percpu_ref_get(&srv_path->ids_inflight_ref);
+}
+
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ percpu_ref_put(&srv_path->ids_inflight_ref);
+}
+
+static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(s, "REG MR failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_path(srv_path);
+ return;
+ }
+}
+
+static struct ib_cqe local_reg_cqe = {
+ .done = rtrs_srv_reg_mr_done
+};
+
+static int rdma_write_sg(struct rtrs_srv_op *id)
+{
+ struct rtrs_path *s = id->con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
+ struct rtrs_srv_mr *srv_mr;
+ struct ib_send_wr inv_wr;
+ struct ib_rdma_wr imm_wr;
+ struct ib_rdma_wr *wr = NULL;
+ enum ib_send_flags flags;
+ size_t sg_cnt;
+ int err, offset;
+ bool need_inval;
+ u32 rkey = 0;
+ struct ib_reg_wr rwr;
+ struct ib_sge *plist;
+ struct ib_sge list;
+
+ sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
+ need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
+ if (sg_cnt != 1)
+ return -EINVAL;
+
+ offset = 0;
+
+ wr = &id->tx_wr;
+ plist = &id->tx_sg;
+ plist->addr = dma_addr + offset;
+ plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
+
+ /* WR will fail with length error
+ * if this is 0
+ */
+ if (plist->length == 0) {
+ rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
+ return -EINVAL;
+ }
+
+ plist->lkey = srv_path->s.dev-